summaryrefslogtreecommitdiff
path: root/deps/v8/src
diff options
context:
space:
mode:
authorMichaƫl Zasso <targos@protonmail.com>2017-09-12 11:34:59 +0200
committerAnna Henningsen <anna@addaleax.net>2017-09-13 16:15:18 +0200
commitd82e1075dbc2cec2d6598ade10c1f43805f690fd (patch)
treeccd242b9b491dfc341d1099fe11b0ef528839877 /deps/v8/src
parentb4b7ac6ae811b2b5a3082468115dfb5a5246fe3f (diff)
downloadnode-new-d82e1075dbc2cec2d6598ade10c1f43805f690fd.tar.gz
deps: update V8 to 6.1.534.36
PR-URL: https://github.com/nodejs/node/pull/14730 Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com>
Diffstat (limited to 'deps/v8/src')
-rw-r--r--deps/v8/src/OWNERS2
-rw-r--r--deps/v8/src/PRESUBMIT.py29
-rw-r--r--deps/v8/src/accessors.cc6
-rw-r--r--deps/v8/src/address-map.cc2
-rw-r--r--deps/v8/src/allocation-site-scopes.cc83
-rw-r--r--deps/v8/src/allocation-site-scopes.h36
-rw-r--r--deps/v8/src/allocation.cc2
-rw-r--r--deps/v8/src/api-natives.cc67
-rw-r--r--deps/v8/src/api-natives.h11
-rw-r--r--deps/v8/src/api.cc758
-rw-r--r--deps/v8/src/api.h6
-rw-r--r--deps/v8/src/arguments.h4
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h12
-rw-r--r--deps/v8/src/arm/assembler-arm.cc659
-rw-r--r--deps/v8/src/arm/assembler-arm.h213
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc335
-rw-r--r--deps/v8/src/arm/codegen-arm.cc13
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc60
-rw-r--r--deps/v8/src/arm/disasm-arm.cc46
-rw-r--r--deps/v8/src/arm/frames-arm.cc9
-rw-r--r--deps/v8/src/arm/interface-descriptors-arm.cc62
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc987
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h1023
-rw-r--r--deps/v8/src/arm/simulator-arm.cc2
-rw-r--r--deps/v8/src/arm64/assembler-arm64-inl.h227
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc2468
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h1875
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc216
-rw-r--r--deps/v8/src/arm64/constants-arm64.h1036
-rw-r--r--deps/v8/src/arm64/decoder-arm64-inl.h204
-rw-r--r--deps/v8/src/arm64/decoder-arm64.h121
-rw-r--r--deps/v8/src/arm64/deoptimizer-arm64.cc27
-rw-r--r--deps/v8/src/arm64/disasm-arm64.cc2517
-rw-r--r--deps/v8/src/arm64/disasm-arm64.h8
-rw-r--r--deps/v8/src/arm64/frames-arm64.cc9
-rw-r--r--deps/v8/src/arm64/instructions-arm64.cc472
-rw-r--r--deps/v8/src/arm64/instructions-arm64.h314
-rw-r--r--deps/v8/src/arm64/instrument-arm64.cc155
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.cc62
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64-inl.h853
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc1059
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h2272
-rw-r--r--deps/v8/src/arm64/simulator-arm64.cc3779
-rw-r--r--deps/v8/src/arm64/simulator-arm64.h1666
-rw-r--r--deps/v8/src/arm64/simulator-logic-arm64.cc4191
-rw-r--r--deps/v8/src/arm64/utils-arm64.cc122
-rw-r--r--deps/v8/src/arm64/utils-arm64.h52
-rw-r--r--deps/v8/src/asmjs/OWNERS2
-rw-r--r--deps/v8/src/asmjs/asm-js.cc33
-rw-r--r--deps/v8/src/asmjs/asm-parser.cc68
-rw-r--r--deps/v8/src/asmjs/asm-parser.h57
-rw-r--r--deps/v8/src/asmjs/asm-scanner.cc5
-rw-r--r--deps/v8/src/asmjs/asm-scanner.h2
-rw-r--r--deps/v8/src/asmjs/asm-types.cc1
-rw-r--r--deps/v8/src/assembler-inl.h2
-rw-r--r--deps/v8/src/assembler.cc159
-rw-r--r--deps/v8/src/assembler.h112
-rw-r--r--deps/v8/src/ast/OWNERS2
-rw-r--r--deps/v8/src/ast/ast-expression-rewriter.cc12
-rw-r--r--deps/v8/src/ast/ast-expression-rewriter.h1
-rw-r--r--deps/v8/src/ast/ast-numbering.cc173
-rw-r--r--deps/v8/src/ast/ast-source-ranges.h236
-rw-r--r--deps/v8/src/ast/ast-traversal-visitor.h18
-rw-r--r--deps/v8/src/ast/ast-type-bounds.h40
-rw-r--r--deps/v8/src/ast/ast-types.cc1308
-rw-r--r--deps/v8/src/ast/ast-types.h1017
-rw-r--r--deps/v8/src/ast/ast-value-factory.cc46
-rw-r--r--deps/v8/src/ast/ast-value-factory.h42
-rw-r--r--deps/v8/src/ast/ast.cc177
-rw-r--r--deps/v8/src/ast/ast.h1033
-rw-r--r--deps/v8/src/ast/compile-time-value.cc9
-rw-r--r--deps/v8/src/ast/context-slot-cache.cc52
-rw-r--r--deps/v8/src/ast/modules.cc48
-rw-r--r--deps/v8/src/ast/modules.h53
-rw-r--r--deps/v8/src/ast/prettyprinter.cc85
-rw-r--r--deps/v8/src/ast/prettyprinter.h9
-rw-r--r--deps/v8/src/ast/scopes.cc108
-rw-r--r--deps/v8/src/ast/scopes.h88
-rw-r--r--deps/v8/src/ast/variables.cc24
-rw-r--r--deps/v8/src/ast/variables.h21
-rw-r--r--deps/v8/src/bailout-reason.h5
-rw-r--r--deps/v8/src/base/OWNERS4
-rw-r--r--deps/v8/src/base/atomic-utils.h173
-rw-r--r--deps/v8/src/base/atomicops.h54
-rw-r--r--deps/v8/src/base/atomicops_internals_atomicword_compat.h36
-rw-r--r--deps/v8/src/base/atomicops_internals_portable.h40
-rw-r--r--deps/v8/src/base/atomicops_internals_x86_msvc.h71
-rw-r--r--deps/v8/src/base/bits.h63
-rw-r--r--deps/v8/src/base/build_config.h15
-rw-r--r--deps/v8/src/base/debug/stack_trace_fuchsia.cc38
-rw-r--r--deps/v8/src/base/functional.cc1
-rw-r--r--deps/v8/src/base/hashmap.h4
-rw-r--r--deps/v8/src/base/iterator.h10
-rw-r--r--deps/v8/src/base/logging.cc60
-rw-r--r--deps/v8/src/base/logging.h126
-rw-r--r--deps/v8/src/base/macros.h38
-rw-r--r--deps/v8/src/base/optional.h493
-rw-r--r--deps/v8/src/base/platform/condition-variable.cc211
-rw-r--r--deps/v8/src/base/platform/condition-variable.h20
-rw-r--r--deps/v8/src/base/platform/mutex.cc97
-rw-r--r--deps/v8/src/base/platform/mutex.h8
-rw-r--r--deps/v8/src/base/platform/platform-aix.cc34
-rw-r--r--deps/v8/src/base/platform/platform-cygwin.cc32
-rw-r--r--deps/v8/src/base/platform/platform-freebsd.cc35
-rw-r--r--deps/v8/src/base/platform/platform-fuchsia.cc98
-rw-r--r--deps/v8/src/base/platform/platform-linux.cc21
-rw-r--r--deps/v8/src/base/platform/platform-macos.cc45
-rw-r--r--deps/v8/src/base/platform/platform-openbsd.cc40
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc23
-rw-r--r--deps/v8/src/base/platform/platform-qnx.cc40
-rw-r--r--deps/v8/src/base/platform/platform-solaris.cc39
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc49
-rw-r--r--deps/v8/src/base/platform/platform.h38
-rw-r--r--deps/v8/src/base/platform/time.cc3
-rw-r--r--deps/v8/src/base/safe_conversions.h1
-rw-r--r--deps/v8/src/base/template-utils.h56
-rw-r--r--deps/v8/src/base/utils/random-number-generator.cc3
-rw-r--r--deps/v8/src/bignum.cc1
-rw-r--r--deps/v8/src/bit-vector.cc18
-rw-r--r--deps/v8/src/bit-vector.h239
-rw-r--r--deps/v8/src/bootstrapper.cc1908
-rw-r--r--deps/v8/src/bootstrapper.h1
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc1165
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc1245
-rw-r--r--deps/v8/src/builtins/builtins-arguments-gen.cc23
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.cc515
-rw-r--r--deps/v8/src/builtins/builtins-array.cc112
-rw-r--r--deps/v8/src/builtins/builtins-async-function-gen.cc28
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.cc174
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.h7
-rw-r--r--deps/v8/src/builtins/builtins-async-generator-gen.cc80
-rw-r--r--deps/v8/src/builtins/builtins-async-iterator-gen.cc9
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.cc392
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.h31
-rw-r--r--deps/v8/src/builtins/builtins-call.cc75
-rw-r--r--deps/v8/src/builtins/builtins-callsite.cc2
-rw-r--r--deps/v8/src/builtins/builtins-collections-gen.cc1357
-rw-r--r--deps/v8/src/builtins/builtins-collections.cc29
-rw-r--r--deps/v8/src/builtins/builtins-console.cc139
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.cc440
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.h5
-rw-r--r--deps/v8/src/builtins/builtins-conversion-gen.cc97
-rw-r--r--deps/v8/src/builtins/builtins-conversion-gen.h32
-rw-r--r--deps/v8/src/builtins/builtins-date-gen.cc22
-rw-r--r--deps/v8/src/builtins/builtins-date.cc6
-rw-r--r--deps/v8/src/builtins/builtins-debug-gen.cc (renamed from deps/v8/src/builtins/builtins-debug.cc)0
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h226
-rw-r--r--deps/v8/src/builtins/builtins-error.cc44
-rw-r--r--deps/v8/src/builtins/builtins-forin-gen.cc5
-rw-r--r--deps/v8/src/builtins/builtins-function-gen.cc12
-rw-r--r--deps/v8/src/builtins/builtins-function.cc3
-rw-r--r--deps/v8/src/builtins/builtins-generator-gen.cc115
-rw-r--r--deps/v8/src/builtins/builtins-global-gen.cc20
-rw-r--r--deps/v8/src/builtins/builtins-internal-gen.cc21
-rw-r--r--deps/v8/src/builtins/builtins-internal.cc13
-rw-r--r--deps/v8/src/builtins/builtins-interpreter-gen.cc25
-rw-r--r--deps/v8/src/builtins/builtins-interpreter.cc40
-rw-r--r--deps/v8/src/builtins/builtins-intl-gen.cc45
-rw-r--r--deps/v8/src/builtins/builtins-intl.cc269
-rw-r--r--deps/v8/src/builtins/builtins-intl.h30
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.cc184
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.h49
-rw-r--r--deps/v8/src/builtins/builtins-math-gen.cc30
-rw-r--r--deps/v8/src/builtins/builtins-number-gen.cc1260
-rw-r--r--deps/v8/src/builtins/builtins-number.cc24
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.cc91
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.cc552
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.h39
-rw-r--r--deps/v8/src/builtins/builtins-promise.cc20
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.cc215
-rw-r--r--deps/v8/src/builtins/builtins-proxy.cc33
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.cc241
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer.cc5
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.cc420
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.h11
-rw-r--r--deps/v8/src/builtins/builtins-string.cc4
-rw-r--r--deps/v8/src/builtins/builtins-typedarray-gen.cc67
-rw-r--r--deps/v8/src/builtins/builtins-typedarray.cc2
-rw-r--r--deps/v8/src/builtins/builtins-wasm-gen.cc7
-rw-r--r--deps/v8/src/builtins/builtins.cc81
-rw-r--r--deps/v8/src/builtins/builtins.h35
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc1037
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc980
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc992
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc1027
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc1017
-rw-r--r--deps/v8/src/builtins/setup-builtins-internal.cc16
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc984
-rw-r--r--deps/v8/src/builtins/x87/OWNERS2
-rw-r--r--deps/v8/src/builtins/x87/builtins-x87.cc3183
-rw-r--r--deps/v8/src/cancelable-task.cc26
-rw-r--r--deps/v8/src/cancelable-task.h28
-rw-r--r--deps/v8/src/char-predicates.cc50
-rw-r--r--deps/v8/src/char-predicates.h49
-rw-r--r--deps/v8/src/code-factory.cc229
-rw-r--r--deps/v8/src/code-factory.h99
-rw-r--r--deps/v8/src/code-stub-assembler.cc1312
-rw-r--r--deps/v8/src/code-stub-assembler.h232
-rw-r--r--deps/v8/src/code-stubs-hydrogen.cc503
-rw-r--r--deps/v8/src/code-stubs.cc224
-rw-r--r--deps/v8/src/code-stubs.h306
-rw-r--r--deps/v8/src/codegen.cc7
-rw-r--r--deps/v8/src/codegen.h4
-rw-r--r--deps/v8/src/compilation-cache.cc35
-rw-r--r--deps/v8/src/compilation-cache.h2
-rw-r--r--deps/v8/src/compilation-dependencies.cc10
-rw-r--r--deps/v8/src/compilation-info.cc22
-rw-r--r--deps/v8/src/compilation-info.h73
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc65
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h66
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc1
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc98
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc5
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h2
-rw-r--r--deps/v8/src/compiler.cc668
-rw-r--r--deps/v8/src/compiler.h13
-rw-r--r--deps/v8/src/compiler/OWNERS4
-rw-r--r--deps/v8/src/compiler/access-builder.cc153
-rw-r--r--deps/v8/src/compiler/access-builder.h31
-rw-r--r--deps/v8/src/compiler/access-info.cc24
-rw-r--r--deps/v8/src/compiler/arm/code-generator-arm.cc315
-rw-r--r--deps/v8/src/compiler/arm/instruction-codes-arm.h25
-rw-r--r--deps/v8/src/compiler/arm/instruction-scheduler-arm.cc26
-rw-r--r--deps/v8/src/compiler/arm/instruction-selector-arm.cc321
-rw-r--r--deps/v8/src/compiler/arm64/code-generator-arm64.cc749
-rw-r--r--deps/v8/src/compiler/arm64/instruction-codes-arm64.h146
-rw-r--r--deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc145
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64.cc428
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.cc498
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.h76
-rw-r--r--deps/v8/src/compiler/ast-loop-assignment-analyzer.cc8
-rw-r--r--deps/v8/src/compiler/basic-block-instrumentor.cc4
-rw-r--r--deps/v8/src/compiler/branch-elimination.cc59
-rw-r--r--deps/v8/src/compiler/branch-elimination.h9
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.cc53
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.h3
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc399
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h36
-rw-r--r--deps/v8/src/compiler/c-linkage.cc8
-rw-r--r--deps/v8/src/compiler/check-elimination.cc76
-rw-r--r--deps/v8/src/compiler/check-elimination.h46
-rw-r--r--deps/v8/src/compiler/checkpoint-elimination.h2
-rw-r--r--deps/v8/src/compiler/code-assembler.cc52
-rw-r--r--deps/v8/src/compiler/code-assembler.h7
-rw-r--r--deps/v8/src/compiler/code-generator-impl.h17
-rw-r--r--deps/v8/src/compiler/code-generator.cc191
-rw-r--r--deps/v8/src/compiler/code-generator.h32
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.h2
-rw-r--r--deps/v8/src/compiler/common-operator.cc5
-rw-r--r--deps/v8/src/compiler/common-operator.h1
-rw-r--r--deps/v8/src/compiler/dead-code-elimination.cc1
-rw-r--r--deps/v8/src/compiler/dead-code-elimination.h2
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc391
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.h14
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.h2
-rw-r--r--deps/v8/src/compiler/escape-analysis.cc17
-rw-r--r--deps/v8/src/compiler/frame-states.cc140
-rw-r--r--deps/v8/src/compiler/frame-states.h70
-rw-r--r--deps/v8/src/compiler/gap-resolver.cc2
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc3
-rw-r--r--deps/v8/src/compiler/graph-reducer.cc19
-rw-r--r--deps/v8/src/compiler/graph-reducer.h3
-rw-r--r--deps/v8/src/compiler/graph.h59
-rw-r--r--deps/v8/src/compiler/ia32/code-generator-ia32.cc275
-rw-r--r--deps/v8/src/compiler/ia32/instruction-codes-ia32.h15
-rw-r--r--deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc18
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32.cc87
-rw-r--r--deps/v8/src/compiler/instruction-codes.h2
-rw-r--r--deps/v8/src/compiler/instruction-scheduler.cc25
-rw-r--r--deps/v8/src/compiler/instruction-scheduler.h37
-rw-r--r--deps/v8/src/compiler/instruction-selector-impl.h2
-rw-r--r--deps/v8/src/compiler/instruction-selector.cc362
-rw-r--r--deps/v8/src/compiler/instruction-selector.h21
-rw-r--r--deps/v8/src/compiler/instruction.cc49
-rw-r--r--deps/v8/src/compiler/instruction.h67
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc7
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.cc665
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.h16
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc930
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h31
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc5
-rw-r--r--deps/v8/src/compiler/js-context-specialization.h5
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc279
-rw-r--r--deps/v8/src/compiler/js-create-lowering.h6
-rw-r--r--deps/v8/src/compiler/js-frame-specialization.h2
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc182
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.h2
-rw-r--r--deps/v8/src/compiler/js-graph.cc13
-rw-r--r--deps/v8/src/compiler/js-graph.h6
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.h2
-rw-r--r--deps/v8/src/compiler/js-inlining.cc56
-rw-r--r--deps/v8/src/compiler/js-inlining.h4
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc25
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.h3
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc1344
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h93
-rw-r--r--deps/v8/src/compiler/js-operator.cc137
-rw-r--r--deps/v8/src/compiler/js-operator.h67
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.cc54
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.h15
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc529
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h13
-rw-r--r--deps/v8/src/compiler/linkage.cc14
-rw-r--r--deps/v8/src/compiler/linkage.h5
-rw-r--r--deps/v8/src/compiler/liveness-analyzer.cc233
-rw-r--r--deps/v8/src/compiler/liveness-analyzer.h171
-rw-r--r--deps/v8/src/compiler/load-elimination.cc125
-rw-r--r--deps/v8/src/compiler/load-elimination.h48
-rw-r--r--deps/v8/src/compiler/loop-analysis.h1
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc3
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc12
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.h2
-rw-r--r--deps/v8/src/compiler/machine-operator.cc91
-rw-r--r--deps/v8/src/compiler/machine-operator.h51
-rw-r--r--deps/v8/src/compiler/mips/code-generator-mips.cc1029
-rw-r--r--deps/v8/src/compiler/mips/instruction-codes-mips.h91
-rw-r--r--deps/v8/src/compiler/mips/instruction-selector-mips.cc641
-rw-r--r--deps/v8/src/compiler/mips64/code-generator-mips64.cc1248
-rw-r--r--deps/v8/src/compiler/mips64/instruction-codes-mips64.h95
-rw-r--r--deps/v8/src/compiler/mips64/instruction-selector-mips64.cc729
-rw-r--r--deps/v8/src/compiler/move-optimizer.cc2
-rw-r--r--deps/v8/src/compiler/node-matchers.h3
-rw-r--r--deps/v8/src/compiler/node-properties.h3
-rw-r--r--deps/v8/src/compiler/opcodes.h41
-rw-r--r--deps/v8/src/compiler/operator-properties.cc5
-rw-r--r--deps/v8/src/compiler/operator.cc10
-rw-r--r--deps/v8/src/compiler/operator.h7
-rw-r--r--deps/v8/src/compiler/osr.h4
-rw-r--r--deps/v8/src/compiler/pipeline-statistics.cc2
-rw-r--r--deps/v8/src/compiler/pipeline.cc200
-rw-r--r--deps/v8/src/compiler/pipeline.h6
-rw-r--r--deps/v8/src/compiler/ppc/code-generator-ppc.cc189
-rw-r--r--deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc1
-rw-r--r--deps/v8/src/compiler/ppc/instruction-selector-ppc.cc17
-rw-r--r--deps/v8/src/compiler/property-access-builder.cc271
-rw-r--r--deps/v8/src/compiler/property-access-builder.h80
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc14
-rw-r--r--deps/v8/src/compiler/redundancy-elimination.cc12
-rw-r--r--deps/v8/src/compiler/redundancy-elimination.h2
-rw-r--r--deps/v8/src/compiler/register-allocator-verifier.cc39
-rw-r--r--deps/v8/src/compiler/register-allocator-verifier.h4
-rw-r--r--deps/v8/src/compiler/register-allocator.cc11
-rw-r--r--deps/v8/src/compiler/register-allocator.h5
-rw-r--r--deps/v8/src/compiler/representation-change.cc43
-rw-r--r--deps/v8/src/compiler/representation-change.h1
-rw-r--r--deps/v8/src/compiler/s390/code-generator-s390.cc145
-rw-r--r--deps/v8/src/compiler/s390/instruction-scheduler-s390.cc1
-rw-r--r--deps/v8/src/compiler/s390/instruction-selector-s390.cc19
-rw-r--r--deps/v8/src/compiler/schedule.cc1
-rw-r--r--deps/v8/src/compiler/scheduler.cc6
-rw-r--r--deps/v8/src/compiler/select-lowering.h2
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.cc260
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.h16
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc152
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.h4
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc172
-rw-r--r--deps/v8/src/compiler/simplified-operator.h21
-rw-r--r--deps/v8/src/compiler/store-store-elimination.cc1
-rw-r--r--deps/v8/src/compiler/tail-call-optimization.cc80
-rw-r--r--deps/v8/src/compiler/tail-call-optimization.h41
-rw-r--r--deps/v8/src/compiler/type-cache.h1
-rw-r--r--deps/v8/src/compiler/typed-optimization.cc36
-rw-r--r--deps/v8/src/compiler/typed-optimization.h5
-rw-r--r--deps/v8/src/compiler/typer.cc109
-rw-r--r--deps/v8/src/compiler/types.cc56
-rw-r--r--deps/v8/src/compiler/types.h59
-rw-r--r--deps/v8/src/compiler/value-numbering-reducer.h2
-rw-r--r--deps/v8/src/compiler/verifier.cc48
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc911
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h93
-rw-r--r--deps/v8/src/compiler/wasm-linkage.cc51
-rw-r--r--deps/v8/src/compiler/x64/code-generator-x64.cc324
-rw-r--r--deps/v8/src/compiler/x64/instruction-codes-x64.h15
-rw-r--r--deps/v8/src/compiler/x64/instruction-scheduler-x64.cc20
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc70
-rw-r--r--deps/v8/src/compiler/x87/OWNERS2
-rw-r--r--deps/v8/src/compiler/x87/code-generator-x87.cc2772
-rw-r--r--deps/v8/src/compiler/x87/instruction-codes-x87.h144
-rw-r--r--deps/v8/src/compiler/x87/instruction-scheduler-x87.cc26
-rw-r--r--deps/v8/src/compiler/x87/instruction-selector-x87.cc1881
-rw-r--r--deps/v8/src/contexts-inl.h77
-rw-r--r--deps/v8/src/contexts.cc205
-rw-r--r--deps/v8/src/contexts.h220
-rw-r--r--deps/v8/src/conversions-inl.h12
-rw-r--r--deps/v8/src/conversions.cc8
-rw-r--r--deps/v8/src/counters-inl.h2
-rw-r--r--deps/v8/src/counters.cc165
-rw-r--r--deps/v8/src/counters.h454
-rw-r--r--deps/v8/src/crankshaft/OWNERS7
-rw-r--r--deps/v8/src/crankshaft/arm/OWNERS1
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-arm.cc2397
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-arm.h2491
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc5393
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-codegen-arm.h386
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-gap-resolver-arm.cc303
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-gap-resolver-arm.h63
-rw-r--r--deps/v8/src/crankshaft/arm64/OWNERS1
-rw-r--r--deps/v8/src/crankshaft/arm64/delayed-masm-arm64-inl.h73
-rw-r--r--deps/v8/src/crankshaft/arm64/delayed-masm-arm64.cc199
-rw-r--r--deps/v8/src/crankshaft/arm64/delayed-masm-arm64.h154
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-arm64.cc2493
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-arm64.h2849
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc5593
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h442
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-gap-resolver-arm64.cc306
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-gap-resolver-arm64.h94
-rw-r--r--deps/v8/src/crankshaft/compilation-phase.cc45
-rw-r--r--deps/v8/src/crankshaft/compilation-phase.h42
-rw-r--r--deps/v8/src/crankshaft/hydrogen-alias-analysis.h73
-rw-r--r--deps/v8/src/crankshaft/hydrogen-bce.cc479
-rw-r--r--deps/v8/src/crankshaft/hydrogen-bce.h52
-rw-r--r--deps/v8/src/crankshaft/hydrogen-canonicalize.cc59
-rw-r--r--deps/v8/src/crankshaft/hydrogen-canonicalize.h29
-rw-r--r--deps/v8/src/crankshaft/hydrogen-check-elimination.cc914
-rw-r--r--deps/v8/src/crankshaft/hydrogen-check-elimination.h74
-rw-r--r--deps/v8/src/crankshaft/hydrogen-dce.cc106
-rw-r--r--deps/v8/src/crankshaft/hydrogen-dce.h35
-rw-r--r--deps/v8/src/crankshaft/hydrogen-dehoist.cc73
-rw-r--r--deps/v8/src/crankshaft/hydrogen-dehoist.h29
-rw-r--r--deps/v8/src/crankshaft/hydrogen-environment-liveness.cc232
-rw-r--r--deps/v8/src/crankshaft/hydrogen-environment-liveness.h68
-rw-r--r--deps/v8/src/crankshaft/hydrogen-escape-analysis.cc330
-rw-r--r--deps/v8/src/crankshaft/hydrogen-escape-analysis.h71
-rw-r--r--deps/v8/src/crankshaft/hydrogen-flow-engine.h220
-rw-r--r--deps/v8/src/crankshaft/hydrogen-gvn.cc901
-rw-r--r--deps/v8/src/crankshaft/hydrogen-gvn.h153
-rw-r--r--deps/v8/src/crankshaft/hydrogen-infer-representation.cc163
-rw-r--r--deps/v8/src/crankshaft/hydrogen-infer-representation.h35
-rw-r--r--deps/v8/src/crankshaft/hydrogen-infer-types.cc56
-rw-r--r--deps/v8/src/crankshaft/hydrogen-infer-types.h37
-rw-r--r--deps/v8/src/crankshaft/hydrogen-instructions.cc4051
-rw-r--r--deps/v8/src/crankshaft/hydrogen-instructions.h6751
-rw-r--r--deps/v8/src/crankshaft/hydrogen-load-elimination.cc512
-rw-r--r--deps/v8/src/crankshaft/hydrogen-load-elimination.h28
-rw-r--r--deps/v8/src/crankshaft/hydrogen-mark-unreachable.cc56
-rw-r--r--deps/v8/src/crankshaft/hydrogen-mark-unreachable.h31
-rw-r--r--deps/v8/src/crankshaft/hydrogen-osr.cc105
-rw-r--r--deps/v8/src/crankshaft/hydrogen-osr.h56
-rw-r--r--deps/v8/src/crankshaft/hydrogen-range-analysis.cc286
-rw-r--r--deps/v8/src/crankshaft/hydrogen-range-analysis.h52
-rw-r--r--deps/v8/src/crankshaft/hydrogen-redundant-phi.cc67
-rw-r--r--deps/v8/src/crankshaft/hydrogen-redundant-phi.h34
-rw-r--r--deps/v8/src/crankshaft/hydrogen-removable-simulates.cc190
-rw-r--r--deps/v8/src/crankshaft/hydrogen-removable-simulates.h29
-rw-r--r--deps/v8/src/crankshaft/hydrogen-representation-changes.cc245
-rw-r--r--deps/v8/src/crankshaft/hydrogen-representation-changes.h33
-rw-r--r--deps/v8/src/crankshaft/hydrogen-sce.cc40
-rw-r--r--deps/v8/src/crankshaft/hydrogen-sce.h26
-rw-r--r--deps/v8/src/crankshaft/hydrogen-store-elimination.cc122
-rw-r--r--deps/v8/src/crankshaft/hydrogen-store-elimination.h35
-rw-r--r--deps/v8/src/crankshaft/hydrogen-types.cc76
-rw-r--r--deps/v8/src/crankshaft/hydrogen-types.h95
-rw-r--r--deps/v8/src/crankshaft/hydrogen-uint32-analysis.cc238
-rw-r--r--deps/v8/src/crankshaft/hydrogen-uint32-analysis.h37
-rw-r--r--deps/v8/src/crankshaft/hydrogen.cc12535
-rw-r--r--deps/v8/src/crankshaft/hydrogen.h2996
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc5155
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h387
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-gap-resolver-ia32.cc490
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-gap-resolver-ia32.h86
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-ia32.cc2467
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-ia32.h2514
-rw-r--r--deps/v8/src/crankshaft/lithium-allocator-inl.h62
-rw-r--r--deps/v8/src/crankshaft/lithium-allocator.cc2192
-rw-r--r--deps/v8/src/crankshaft/lithium-allocator.h576
-rw-r--r--deps/v8/src/crankshaft/lithium-codegen.cc416
-rw-r--r--deps/v8/src/crankshaft/lithium-codegen.h110
-rw-r--r--deps/v8/src/crankshaft/lithium-inl.h116
-rw-r--r--deps/v8/src/crankshaft/lithium.cc730
-rw-r--r--deps/v8/src/crankshaft/lithium.h847
-rw-r--r--deps/v8/src/crankshaft/mips/OWNERS3
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc5417
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-codegen-mips.h405
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-gap-resolver-mips.cc298
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-gap-resolver-mips.h59
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-mips.cc2345
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-mips.h2450
-rw-r--r--deps/v8/src/crankshaft/mips64/OWNERS3
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc5609
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h408
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-gap-resolver-mips64.cc299
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-gap-resolver-mips64.h59
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-mips64.cc2350
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-mips64.h2496
-rw-r--r--deps/v8/src/crankshaft/ppc/OWNERS6
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc5688
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h344
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-gap-resolver-ppc.cc287
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-gap-resolver-ppc.h58
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-ppc.cc2368
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-ppc.h2415
-rw-r--r--deps/v8/src/crankshaft/s390/OWNERS6
-rw-r--r--deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc5616
-rw-r--r--deps/v8/src/crankshaft/s390/lithium-codegen-s390.h342
-rw-r--r--deps/v8/src/crankshaft/s390/lithium-gap-resolver-s390.cc280
-rw-r--r--deps/v8/src/crankshaft/s390/lithium-gap-resolver-s390.h58
-rw-r--r--deps/v8/src/crankshaft/s390/lithium-s390.cc2156
-rw-r--r--deps/v8/src/crankshaft/s390/lithium-s390.h2248
-rw-r--r--deps/v8/src/crankshaft/typing.cc802
-rw-r--r--deps/v8/src/crankshaft/typing.h85
-rw-r--r--deps/v8/src/crankshaft/unique.h362
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc5436
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-codegen-x64.h382
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-gap-resolver-x64.cc322
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-gap-resolver-x64.h50
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-x64.cc2470
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-x64.h2496
-rw-r--r--deps/v8/src/crankshaft/x87/OWNERS2
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc5651
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-codegen-x87.h489
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-gap-resolver-x87.cc457
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-gap-resolver-x87.h86
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-x87.cc2469
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-x87.h2508
-rw-r--r--deps/v8/src/d8-console.cc21
-rw-r--r--deps/v8/src/d8-console.h21
-rw-r--r--deps/v8/src/d8.cc276
-rw-r--r--deps/v8/src/d8.h6
-rw-r--r--deps/v8/src/date.cc1
-rw-r--r--deps/v8/src/debug/OWNERS2
-rw-r--r--deps/v8/src/debug/debug-coverage.cc333
-rw-r--r--deps/v8/src/debug/debug-coverage.h13
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc29
-rw-r--r--deps/v8/src/debug/debug-frames.cc8
-rw-r--r--deps/v8/src/debug/debug-interface.h30
-rw-r--r--deps/v8/src/debug/debug-scopes.cc169
-rw-r--r--deps/v8/src/debug/debug-scopes.h18
-rw-r--r--deps/v8/src/debug/debug.cc308
-rw-r--r--deps/v8/src/debug/debug.h56
-rw-r--r--deps/v8/src/debug/debug.js38
-rw-r--r--deps/v8/src/debug/interface-types.h84
-rw-r--r--deps/v8/src/debug/liveedit.cc17
-rw-r--r--deps/v8/src/debug/mips64/debug-mips64.cc2
-rw-r--r--deps/v8/src/debug/mirrors.js5
-rw-r--r--deps/v8/src/debug/x64/debug-x64.cc4
-rw-r--r--deps/v8/src/debug/x87/OWNERS2
-rw-r--r--deps/v8/src/debug/x87/debug-x87.cc157
-rw-r--r--deps/v8/src/deoptimize-reason.cc1
-rw-r--r--deps/v8/src/deoptimize-reason.h3
-rw-r--r--deps/v8/src/deoptimizer.cc1324
-rw-r--r--deps/v8/src/deoptimizer.h154
-rw-r--r--deps/v8/src/disassembler.cc193
-rw-r--r--deps/v8/src/double.h4
-rw-r--r--deps/v8/src/dtoa.cc1
-rw-r--r--deps/v8/src/effects.h335
-rw-r--r--deps/v8/src/elements-kind.cc69
-rw-r--r--deps/v8/src/elements-kind.h119
-rw-r--r--deps/v8/src/elements.cc591
-rw-r--r--deps/v8/src/elements.h3
-rw-r--r--deps/v8/src/execution.h19
-rw-r--r--deps/v8/src/extensions/ignition-statistics-extension.cc1
-rw-r--r--deps/v8/src/external-reference-table.cc22
-rw-r--r--deps/v8/src/factory.cc449
-rw-r--r--deps/v8/src/factory.h114
-rw-r--r--deps/v8/src/feedback-vector-inl.h39
-rw-r--r--deps/v8/src/feedback-vector.cc89
-rw-r--r--deps/v8/src/feedback-vector.h17
-rw-r--r--deps/v8/src/ffi/OWNERS2
-rw-r--r--deps/v8/src/ffi/ffi-compiler.cc3
-rw-r--r--deps/v8/src/field-index-inl.h12
-rw-r--r--deps/v8/src/field-index.h9
-rw-r--r--deps/v8/src/field-type.cc8
-rw-r--r--deps/v8/src/field-type.h2
-rw-r--r--deps/v8/src/flag-definitions.h158
-rw-r--r--deps/v8/src/flags.cc2
-rw-r--r--deps/v8/src/float.h57
-rw-r--r--deps/v8/src/frames-inl.h22
-rw-r--r--deps/v8/src/frames.cc112
-rw-r--r--deps/v8/src/frames.h121
-rw-r--r--deps/v8/src/full-codegen/OWNERS2
-rw-r--r--deps/v8/src/full-codegen/arm/full-codegen-arm.cc478
-rw-r--r--deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc493
-rw-r--r--deps/v8/src/full-codegen/full-codegen.cc186
-rw-r--r--deps/v8/src/full-codegen/full-codegen.h61
-rw-r--r--deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc400
-rw-r--r--deps/v8/src/full-codegen/mips/full-codegen-mips.cc389
-rw-r--r--deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc388
-rw-r--r--deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc419
-rw-r--r--deps/v8/src/full-codegen/s390/full-codegen-s390.cc429
-rw-r--r--deps/v8/src/full-codegen/x64/full-codegen-x64.cc345
-rw-r--r--deps/v8/src/full-codegen/x87/OWNERS2
-rw-r--r--deps/v8/src/full-codegen/x87/full-codegen-x87.cc2749
-rw-r--r--deps/v8/src/gdb-jit.cc20
-rw-r--r--deps/v8/src/global-handles.cc31
-rw-r--r--deps/v8/src/global-handles.h10
-rw-r--r--deps/v8/src/globals.h208
-rw-r--r--deps/v8/src/handles.cc10
-rw-r--r--deps/v8/src/handles.h3
-rw-r--r--deps/v8/src/heap-symbols.h25
-rw-r--r--deps/v8/src/heap/OWNERS3
-rw-r--r--deps/v8/src/heap/array-buffer-tracker-inl.h20
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.cc54
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.h29
-rw-r--r--deps/v8/src/heap/code-stats.h11
-rw-r--r--deps/v8/src/heap/concurrent-marking-deque.h175
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc273
-rw-r--r--deps/v8/src/heap/concurrent-marking.h45
-rw-r--r--deps/v8/src/heap/embedder-tracing.cc2
-rw-r--r--deps/v8/src/heap/embedder-tracing.h10
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler.cc15
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler.h5
-rw-r--r--deps/v8/src/heap/gc-tracer.cc27
-rw-r--r--deps/v8/src/heap/gc-tracer.h153
-rw-r--r--deps/v8/src/heap/heap-inl.h181
-rw-r--r--deps/v8/src/heap/heap.cc1029
-rw-r--r--deps/v8/src/heap/heap.h281
-rw-r--r--deps/v8/src/heap/incremental-marking-inl.h8
-rw-r--r--deps/v8/src/heap/incremental-marking-job.cc10
-rw-r--r--deps/v8/src/heap/incremental-marking-job.h7
-rw-r--r--deps/v8/src/heap/incremental-marking.cc293
-rw-r--r--deps/v8/src/heap/incremental-marking.h35
-rw-r--r--deps/v8/src/heap/item-parallel-job.h6
-rw-r--r--deps/v8/src/heap/local-allocator.h99
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h169
-rw-r--r--deps/v8/src/heap/mark-compact.cc1966
-rw-r--r--deps/v8/src/heap/mark-compact.h427
-rw-r--r--deps/v8/src/heap/marking.cc201
-rw-r--r--deps/v8/src/heap/marking.h318
-rw-r--r--deps/v8/src/heap/memory-reducer.cc1
-rw-r--r--deps/v8/src/heap/object-stats.cc36
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h951
-rw-r--r--deps/v8/src/heap/objects-visiting.cc195
-rw-r--r--deps/v8/src/heap/objects-visiting.h438
-rw-r--r--deps/v8/src/heap/page-parallel-job.h180
-rw-r--r--deps/v8/src/heap/remembered-set.h28
-rw-r--r--deps/v8/src/heap/scavenge-job.h5
-rw-r--r--deps/v8/src/heap/scavenger-inl.h200
-rw-r--r--deps/v8/src/heap/scavenger.cc531
-rw-r--r--deps/v8/src/heap/scavenger.h174
-rw-r--r--deps/v8/src/heap/sequential-marking-deque.cc6
-rw-r--r--deps/v8/src/heap/sequential-marking-deque.h19
-rw-r--r--deps/v8/src/heap/slot-set.h375
-rw-r--r--deps/v8/src/heap/spaces-inl.h110
-rw-r--r--deps/v8/src/heap/spaces.cc348
-rw-r--r--deps/v8/src/heap/spaces.h147
-rw-r--r--deps/v8/src/heap/store-buffer.cc3
-rw-r--r--deps/v8/src/heap/worklist.h354
-rw-r--r--deps/v8/src/heap/workstealing-marking-deque.h167
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h75
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc192
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h174
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc174
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.h1
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc4
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc20
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc147
-rw-r--r--deps/v8/src/ia32/frames-ia32.cc9
-rw-r--r--deps/v8/src/ia32/interface-descriptors-ia32.cc62
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc408
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h432
-rw-r--r--deps/v8/src/ic/OWNERS2
-rw-r--r--deps/v8/src/ic/accessor-assembler.cc82
-rw-r--r--deps/v8/src/ic/accessor-assembler.h4
-rw-r--r--deps/v8/src/ic/arm/handler-compiler-arm.cc43
-rw-r--r--deps/v8/src/ic/arm/ic-arm.cc1
-rw-r--r--deps/v8/src/ic/arm64/handler-compiler-arm64.cc11
-rw-r--r--deps/v8/src/ic/arm64/ic-arm64.cc1
-rw-r--r--deps/v8/src/ic/binary-op-assembler.cc813
-rw-r--r--deps/v8/src/ic/binary-op-assembler.h26
-rw-r--r--deps/v8/src/ic/call-optimization.cc11
-rw-r--r--deps/v8/src/ic/call-optimization.h3
-rw-r--r--deps/v8/src/ic/handler-compiler.cc2
-rw-r--r--deps/v8/src/ic/handler-compiler.h1
-rw-r--r--deps/v8/src/ic/handler-configuration-inl.h1
-rw-r--r--deps/v8/src/ic/ia32/handler-compiler-ia32.cc10
-rw-r--r--deps/v8/src/ic/ia32/ic-ia32.cc1
-rw-r--r--deps/v8/src/ic/ic-inl.h5
-rw-r--r--deps/v8/src/ic/ic-state.cc363
-rw-r--r--deps/v8/src/ic/ic-state.h129
-rw-r--r--deps/v8/src/ic/ic-stats.cc8
-rw-r--r--deps/v8/src/ic/ic.cc306
-rw-r--r--deps/v8/src/ic/ic.h24
-rw-r--r--deps/v8/src/ic/keyed-store-generic.cc58
-rw-r--r--deps/v8/src/ic/mips/handler-compiler-mips.cc15
-rw-r--r--deps/v8/src/ic/mips/ic-mips.cc1
-rw-r--r--deps/v8/src/ic/mips64/handler-compiler-mips64.cc14
-rw-r--r--deps/v8/src/ic/mips64/ic-mips64.cc1
-rw-r--r--deps/v8/src/ic/ppc/handler-compiler-ppc.cc15
-rw-r--r--deps/v8/src/ic/ppc/ic-ppc.cc1
-rw-r--r--deps/v8/src/ic/s390/handler-compiler-s390.cc14
-rw-r--r--deps/v8/src/ic/s390/ic-s390.cc1
-rw-r--r--deps/v8/src/ic/stub-cache.cc15
-rw-r--r--deps/v8/src/ic/stub-cache.h1
-rw-r--r--deps/v8/src/ic/x64/handler-compiler-x64.cc11
-rw-r--r--deps/v8/src/ic/x64/ic-x64.cc1
-rw-r--r--deps/v8/src/ic/x87/OWNERS2
-rw-r--r--deps/v8/src/ic/x87/access-compiler-x87.cc40
-rw-r--r--deps/v8/src/ic/x87/handler-compiler-x87.cc456
-rw-r--r--deps/v8/src/ic/x87/ic-x87.cc85
-rw-r--r--deps/v8/src/identity-map.cc1
-rw-r--r--deps/v8/src/inspector/BUILD.gn2
-rw-r--r--deps/v8/src/inspector/DEPS1
-rw-r--r--deps/v8/src/inspector/OWNERS3
-rw-r--r--deps/v8/src/inspector/PRESUBMIT.py13
-rw-r--r--deps/v8/src/inspector/debugger-script.js2
-rw-r--r--deps/v8/src/inspector/debugger_script_externs.js10
-rw-r--r--deps/v8/src/inspector/injected-script-native.cc89
-rw-r--r--deps/v8/src/inspector/injected-script-native.h46
-rw-r--r--deps/v8/src/inspector/injected-script.cc125
-rw-r--r--deps/v8/src/inspector/injected-script.h33
-rw-r--r--deps/v8/src/inspector/inspected-context.cc73
-rw-r--r--deps/v8/src/inspector/inspected-context.h20
-rw-r--r--deps/v8/src/inspector/inspector.gypi2
-rw-r--r--deps/v8/src/inspector/inspector_protocol_config.json2
-rw-r--r--deps/v8/src/inspector/js_protocol.json6
-rw-r--r--deps/v8/src/inspector/string-16.cc35
-rw-r--r--deps/v8/src/inspector/string-16.h37
-rw-r--r--deps/v8/src/inspector/v8-console-message.cc28
-rw-r--r--deps/v8/src/inspector/v8-console-message.h3
-rw-r--r--deps/v8/src/inspector/v8-console.cc301
-rw-r--r--deps/v8/src/inspector/v8-console.h141
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.cc68
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.h10
-rw-r--r--deps/v8/src/inspector/v8-debugger.cc145
-rw-r--r--deps/v8/src/inspector/v8-debugger.h10
-rw-r--r--deps/v8/src/inspector/v8-function-call.cc1
-rw-r--r--deps/v8/src/inspector/v8-injected-script-host.cc11
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.cc120
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.h23
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.cc99
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.h8
-rw-r--r--deps/v8/src/inspector/v8-internal-value-type.cc1
-rw-r--r--deps/v8/src/inspector/v8-profiler-agent-impl.cc51
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.cc109
-rw-r--r--deps/v8/src/inspector/v8-value-copier.cc1
-rw-r--r--deps/v8/src/interface-descriptors.cc91
-rw-r--r--deps/v8/src/interface-descriptors.h83
-rw-r--r--deps/v8/src/interpreter/OWNERS2
-rw-r--r--deps/v8/src/interpreter/block-coverage-builder.h68
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.cc6
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc126
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h43
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.cc3
-rw-r--r--deps/v8/src/interpreter/bytecode-decoder.cc2
-rw-r--r--deps/v8/src/interpreter/bytecode-flags.cc15
-rw-r--r--deps/v8/src/interpreter/bytecode-flags.h20
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc1196
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h75
-rw-r--r--deps/v8/src/interpreter/bytecode-label.cc2
-rw-r--r--deps/v8/src/interpreter/bytecode-label.h4
-rw-r--r--deps/v8/src/interpreter/bytecode-operands.cc4
-rw-r--r--deps/v8/src/interpreter/bytecode-operands.h4
-rw-r--r--deps/v8/src/interpreter/bytecode-register-allocator.h22
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.cc85
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.h11
-rw-r--r--deps/v8/src/interpreter/bytecode-register.h9
-rw-r--r--deps/v8/src/interpreter/bytecodes.cc41
-rw-r--r--deps/v8/src/interpreter/bytecodes.h65
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.cc9
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.cc21
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.h38
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc97
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h27
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc678
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics-generator.cc53
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics.cc2
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics.h4
-rw-r--r--deps/v8/src/interpreter/interpreter.cc18
-rw-r--r--deps/v8/src/interpreter/setup-interpreter-internal.cc3
-rw-r--r--deps/v8/src/intl.h1
-rw-r--r--deps/v8/src/isolate-inl.h4
-rw-r--r--deps/v8/src/isolate.cc350
-rw-r--r--deps/v8/src/isolate.h179
-rw-r--r--deps/v8/src/js/OWNERS3
-rw-r--r--deps/v8/src/js/array.js641
-rw-r--r--deps/v8/src/js/collection-iterator.js178
-rw-r--r--deps/v8/src/js/collection.js460
-rw-r--r--deps/v8/src/js/intl.js175
-rw-r--r--deps/v8/src/js/macros.py63
-rw-r--r--deps/v8/src/js/max-min.js2
-rw-r--r--deps/v8/src/js/messages.js10
-rw-r--r--deps/v8/src/js/prologue.js54
-rw-r--r--deps/v8/src/js/promise.js132
-rw-r--r--deps/v8/src/js/proxy.js17
-rw-r--r--deps/v8/src/js/spread.js2
-rw-r--r--deps/v8/src/js/string.js403
-rw-r--r--deps/v8/src/js/typedarray.js338
-rw-r--r--deps/v8/src/js/v8natives.js41
-rw-r--r--deps/v8/src/js/weak-collection.js147
-rw-r--r--deps/v8/src/json-parser.cc16
-rw-r--r--deps/v8/src/json-stringifier.cc15
-rw-r--r--deps/v8/src/keys.cc23
-rw-r--r--deps/v8/src/keys.h2
-rw-r--r--deps/v8/src/label.h1
-rw-r--r--deps/v8/src/layout-descriptor-inl.h57
-rw-r--r--deps/v8/src/layout-descriptor.cc36
-rw-r--r--deps/v8/src/layout-descriptor.h11
-rw-r--r--deps/v8/src/libplatform/OWNERS4
-rw-r--r--deps/v8/src/libplatform/default-platform.cc19
-rw-r--r--deps/v8/src/libplatform/tracing/trace-writer.h8
-rw-r--r--deps/v8/src/libplatform/tracing/tracing-controller.cc7
-rw-r--r--deps/v8/src/libsampler/sampler.cc8
-rw-r--r--deps/v8/src/libsampler/sampler.h14
-rw-r--r--deps/v8/src/log-utils.cc2
-rw-r--r--deps/v8/src/log.cc52
-rw-r--r--deps/v8/src/log.h5
-rw-r--r--deps/v8/src/lookup.cc61
-rw-r--r--deps/v8/src/machine-type.cc8
-rw-r--r--deps/v8/src/machine-type.h33
-rw-r--r--deps/v8/src/macro-assembler.h22
-rw-r--r--deps/v8/src/managed.h60
-rw-r--r--deps/v8/src/messages.cc84
-rw-r--r--deps/v8/src/messages.h23
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h7
-rw-r--r--deps/v8/src/mips/assembler-mips.cc382
-rw-r--r--deps/v8/src/mips/assembler-mips.h128
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc181
-rw-r--r--deps/v8/src/mips/codegen-mips.cc4
-rw-r--r--deps/v8/src/mips/constants-mips.h55
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc44
-rw-r--r--deps/v8/src/mips/disasm-mips.cc15
-rw-r--r--deps/v8/src/mips/frames-mips.cc9
-rw-r--r--deps/v8/src/mips/interface-descriptors-mips.cc62
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc1413
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h1431
-rw-r--r--deps/v8/src/mips/simulator-mips.cc923
-rw-r--r--deps/v8/src/mips/simulator-mips.h80
-rw-r--r--deps/v8/src/mips64/assembler-mips64-inl.h7
-rw-r--r--deps/v8/src/mips64/assembler-mips64.cc313
-rw-r--r--deps/v8/src/mips64/assembler-mips64.h126
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.cc180
-rw-r--r--deps/v8/src/mips64/codegen-mips64.cc4
-rw-r--r--deps/v8/src/mips64/constants-mips64.h97
-rw-r--r--deps/v8/src/mips64/deoptimizer-mips64.cc42
-rw-r--r--deps/v8/src/mips64/disasm-mips64.cc15
-rw-r--r--deps/v8/src/mips64/frames-mips64.cc9
-rw-r--r--deps/v8/src/mips64/interface-descriptors-mips64.cc62
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.cc1866
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.h1395
-rw-r--r--deps/v8/src/mips64/simulator-mips64.cc932
-rw-r--r--deps/v8/src/mips64/simulator-mips64.h80
-rw-r--r--deps/v8/src/objects-body-descriptors-inl.h115
-rw-r--r--deps/v8/src/objects-debug.cc221
-rw-r--r--deps/v8/src/objects-inl.h3390
-rw-r--r--deps/v8/src/objects-printer.cc297
-rw-r--r--deps/v8/src/objects.cc4048
-rw-r--r--deps/v8/src/objects.h3535
-rw-r--r--deps/v8/src/objects/arguments-inl.h57
-rw-r--r--deps/v8/src/objects/arguments.h141
-rw-r--r--deps/v8/src/objects/code-cache-inl.h2
-rw-r--r--deps/v8/src/objects/code-cache.h70
-rw-r--r--deps/v8/src/objects/compilation-cache-inl.h45
-rw-r--r--deps/v8/src/objects/compilation-cache.h20
-rw-r--r--deps/v8/src/objects/debug-objects-inl.h63
-rw-r--r--deps/v8/src/objects/debug-objects.cc337
-rw-r--r--deps/v8/src/objects/debug-objects.h187
-rw-r--r--deps/v8/src/objects/descriptor-array.h2
-rw-r--r--deps/v8/src/objects/dictionary.h255
-rw-r--r--deps/v8/src/objects/frame-array-inl.h2
-rw-r--r--deps/v8/src/objects/frame-array.h36
-rw-r--r--deps/v8/src/objects/hash-table-inl.h20
-rw-r--r--deps/v8/src/objects/hash-table.h576
-rw-r--r--deps/v8/src/objects/literal-objects.cc2
-rw-r--r--deps/v8/src/objects/literal-objects.h4
-rw-r--r--deps/v8/src/objects/map.h248
-rw-r--r--deps/v8/src/objects/module-info.h20
-rw-r--r--deps/v8/src/objects/name-inl.h99
-rw-r--r--deps/v8/src/objects/name.h189
-rw-r--r--deps/v8/src/objects/object-macros-undef.h57
-rw-r--r--deps/v8/src/objects/object-macros.h244
-rw-r--r--deps/v8/src/objects/regexp-match-info.h2
-rw-r--r--deps/v8/src/objects/scope-info.cc69
-rw-r--r--deps/v8/src/objects/scope-info.h10
-rw-r--r--deps/v8/src/objects/script-inl.h85
-rw-r--r--deps/v8/src/objects/script.h217
-rw-r--r--deps/v8/src/objects/shared-function-info-inl.h416
-rw-r--r--deps/v8/src/objects/shared-function-info.h592
-rw-r--r--deps/v8/src/objects/string-inl.h742
-rw-r--r--deps/v8/src/objects/string-table.h53
-rw-r--r--deps/v8/src/objects/string.h877
-rw-r--r--deps/v8/src/ostreams.cc1
-rw-r--r--deps/v8/src/parsing/OWNERS2
-rw-r--r--deps/v8/src/parsing/parameter-initializer-rewriter.cc46
-rw-r--r--deps/v8/src/parsing/parameter-initializer-rewriter.h13
-rw-r--r--deps/v8/src/parsing/parse-info.cc9
-rw-r--r--deps/v8/src/parsing/parse-info.h18
-rw-r--r--deps/v8/src/parsing/parser-base.h527
-rw-r--r--deps/v8/src/parsing/parser.cc628
-rw-r--r--deps/v8/src/parsing/parser.h146
-rw-r--r--deps/v8/src/parsing/pattern-rewriter.cc101
-rw-r--r--deps/v8/src/parsing/preparsed-scope-data.cc615
-rw-r--r--deps/v8/src/parsing/preparsed-scope-data.h155
-rw-r--r--deps/v8/src/parsing/preparser.cc59
-rw-r--r--deps/v8/src/parsing/preparser.h206
-rw-r--r--deps/v8/src/parsing/rewriter.cc8
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.cc2
-rw-r--r--deps/v8/src/parsing/scanner.cc28
-rw-r--r--deps/v8/src/parsing/scanner.h33
-rw-r--r--deps/v8/src/parsing/token.h7
-rw-r--r--deps/v8/src/ppc/assembler-ppc-inl.h8
-rw-r--r--deps/v8/src/ppc/assembler-ppc.cc147
-rw-r--r--deps/v8/src/ppc/assembler-ppc.h71
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.cc183
-rw-r--r--deps/v8/src/ppc/codegen-ppc.cc2
-rw-r--r--deps/v8/src/ppc/constants-ppc.h9
-rw-r--r--deps/v8/src/ppc/deoptimizer-ppc.cc21
-rw-r--r--deps/v8/src/ppc/disasm-ppc.cc2
-rw-r--r--deps/v8/src/ppc/frames-ppc.cc6
-rw-r--r--deps/v8/src/ppc/interface-descriptors-ppc.cc62
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc607
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.h999
-rw-r--r--deps/v8/src/profiler/OWNERS2
-rw-r--r--deps/v8/src/profiler/allocation-tracker.cc3
-rw-r--r--deps/v8/src/profiler/circular-queue-inl.h4
-rw-r--r--deps/v8/src/profiler/cpu-profiler.cc5
-rw-r--r--deps/v8/src/profiler/cpu-profiler.h2
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc64
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.h6
-rw-r--r--deps/v8/src/profiler/profile-generator.cc19
-rw-r--r--deps/v8/src/profiler/profiler-listener.cc3
-rw-r--r--deps/v8/src/profiler/tick-sample.cc33
-rw-r--r--deps/v8/src/profiler/unbound-queue-inl.h2
-rw-r--r--deps/v8/src/property-details.h8
-rw-r--r--deps/v8/src/property.h1
-rw-r--r--deps/v8/src/prototype.h4
-rw-r--r--deps/v8/src/regexp/OWNERS2
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc14
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h2
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc2
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc2
-rw-r--r--deps/v8/src/regexp/interpreter-irregexp.cc1
-rw-r--r--deps/v8/src/regexp/jsregexp.cc47
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc4
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc4
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc4
-rw-r--r--deps/v8/src/regexp/regexp-ast.h1
-rw-r--r--deps/v8/src/regexp/regexp-parser.cc1
-rw-r--r--deps/v8/src/regexp/regexp-utils.cc2
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc2
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc2
-rw-r--r--deps/v8/src/regexp/x87/OWNERS2
-rw-r--r--deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc1273
-rw-r--r--deps/v8/src/regexp/x87/regexp-macro-assembler-x87.h204
-rw-r--r--deps/v8/src/register-configuration.cc3
-rw-r--r--deps/v8/src/register-configuration.h3
-rw-r--r--deps/v8/src/runtime-profiler.cc56
-rw-r--r--deps/v8/src/runtime/runtime-array.cc377
-rw-r--r--deps/v8/src/runtime/runtime-atomics.cc7
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc10
-rw-r--r--deps/v8/src/runtime/runtime-collections.cc173
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc114
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc150
-rw-r--r--deps/v8/src/runtime/runtime-function.cc45
-rw-r--r--deps/v8/src/runtime/runtime-generator.cc45
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc90
-rw-r--r--deps/v8/src/runtime/runtime-interpreter.cc1
-rw-r--r--deps/v8/src/runtime/runtime-intl.cc7
-rw-r--r--deps/v8/src/runtime/runtime-literals.cc752
-rw-r--r--deps/v8/src/runtime/runtime-liveedit.cc4
-rw-r--r--deps/v8/src/runtime/runtime-module.cc26
-rw-r--r--deps/v8/src/runtime/runtime-object.cc160
-rw-r--r--deps/v8/src/runtime/runtime-proxy.cc56
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc17
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc68
-rw-r--r--deps/v8/src/runtime/runtime-strings.cc38
-rw-r--r--deps/v8/src/runtime/runtime-test.cc163
-rw-r--r--deps/v8/src/runtime/runtime-typedarray.cc4
-rw-r--r--deps/v8/src/runtime/runtime-wasm.cc5
-rw-r--r--deps/v8/src/runtime/runtime.h249
-rw-r--r--deps/v8/src/s390/assembler-s390-inl.h21
-rw-r--r--deps/v8/src/s390/assembler-s390.cc142
-rw-r--r--deps/v8/src/s390/assembler-s390.h70
-rw-r--r--deps/v8/src/s390/code-stubs-s390.cc176
-rw-r--r--deps/v8/src/s390/codegen-s390.cc2
-rw-r--r--deps/v8/src/s390/constants-s390.h10
-rw-r--r--deps/v8/src/s390/deoptimizer-s390.cc20
-rw-r--r--deps/v8/src/s390/disasm-s390.cc5
-rw-r--r--deps/v8/src/s390/frames-s390.cc8
-rw-r--r--deps/v8/src/s390/interface-descriptors-s390.cc62
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.cc978
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.h1059
-rw-r--r--deps/v8/src/s390/simulator-s390.cc4
-rw-r--r--deps/v8/src/setup-isolate-deserialize.cc8
-rw-r--r--deps/v8/src/signature.h3
-rw-r--r--deps/v8/src/simulator.h2
-rw-r--r--deps/v8/src/snapshot/OWNERS2
-rw-r--r--deps/v8/src/snapshot/deserializer.cc70
-rw-r--r--deps/v8/src/snapshot/serializer-common.cc4
-rw-r--r--deps/v8/src/snapshot/serializer-common.h8
-rw-r--r--deps/v8/src/snapshot/serializer.cc7
-rw-r--r--deps/v8/src/snapshot/serializer.h26
-rw-r--r--deps/v8/src/snapshot/snapshot-source-sink.cc6
-rw-r--r--deps/v8/src/snapshot/snapshot-source-sink.h5
-rw-r--r--deps/v8/src/snapshot/startup-serializer.cc3
-rw-r--r--deps/v8/src/string-builder.h8
-rw-r--r--deps/v8/src/string-hasher-inl.h5
-rw-r--r--deps/v8/src/string-stream.cc2
-rw-r--r--deps/v8/src/strtod.cc4
-rw-r--r--deps/v8/src/tracing/trace-event.h4
-rw-r--r--deps/v8/src/transitions.cc2
-rw-r--r--deps/v8/src/transitions.h10
-rw-r--r--deps/v8/src/trap-handler/OWNERS3
-rw-r--r--deps/v8/src/trap-handler/handler-shared.cc9
-rw-r--r--deps/v8/src/trap-handler/trap-handler.h2
-rw-r--r--deps/v8/src/type-hints.cc11
-rw-r--r--deps/v8/src/type-hints.h3
-rw-r--r--deps/v8/src/type-info.cc550
-rw-r--r--deps/v8/src/type-info.h122
-rw-r--r--deps/v8/src/unicode.cc296
-rw-r--r--deps/v8/src/unicode.h17
-rw-r--r--deps/v8/src/utils-inl.h29
-rw-r--r--deps/v8/src/utils.cc5
-rw-r--r--deps/v8/src/utils.h226
-rw-r--r--deps/v8/src/v8.cc22
-rw-r--r--deps/v8/src/v8.gyp229
-rw-r--r--deps/v8/src/v8threads.cc4
-rw-r--r--deps/v8/src/value-serializer.cc50
-rw-r--r--deps/v8/src/value-serializer.h5
-rw-r--r--deps/v8/src/vm-state-inl.h1
-rw-r--r--deps/v8/src/vm-state.h1
-rw-r--r--deps/v8/src/wasm/OWNERS2
-rw-r--r--deps/v8/src/wasm/compilation-manager.cc32
-rw-r--r--deps/v8/src/wasm/compilation-manager.h44
-rw-r--r--deps/v8/src/wasm/decoder.h11
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h23
-rw-r--r--deps/v8/src/wasm/function-body-decoder.cc224
-rw-r--r--deps/v8/src/wasm/function-body-decoder.h26
-rw-r--r--deps/v8/src/wasm/module-compiler.cc2356
-rw-r--r--deps/v8/src/wasm/module-compiler.h390
-rw-r--r--deps/v8/src/wasm/module-decoder.cc462
-rw-r--r--deps/v8/src/wasm/module-decoder.h90
-rw-r--r--deps/v8/src/wasm/signature-map.cc4
-rw-r--r--deps/v8/src/wasm/signature-map.h9
-rw-r--r--deps/v8/src/wasm/streaming-decoder.cc53
-rw-r--r--deps/v8/src/wasm/wasm-code-specialization.cc4
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc143
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.cc349
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.h96
-rw-r--r--deps/v8/src/wasm/wasm-js.cc424
-rw-r--r--deps/v8/src/wasm/wasm-js.h3
-rw-r--r--deps/v8/src/wasm/wasm-limits.h6
-rw-r--r--deps/v8/src/wasm/wasm-module.cc2716
-rw-r--r--deps/v8/src/wasm/wasm-module.h231
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc574
-rw-r--r--deps/v8/src/wasm/wasm-objects.h408
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc196
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h450
-rw-r--r--deps/v8/src/wasm/wasm-result.cc17
-rw-r--r--deps/v8/src/wasm/wasm-result.h5
-rw-r--r--deps/v8/src/wasm/wasm-text.cc1
-rw-r--r--deps/v8/src/wasm/wasm-value.h86
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h16
-rw-r--r--deps/v8/src/x64/assembler-x64.cc57
-rw-r--r--deps/v8/src/x64/assembler-x64.h52
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc175
-rw-r--r--deps/v8/src/x64/code-stubs-x64.h1
-rw-r--r--deps/v8/src/x64/codegen-x64.cc6
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc20
-rw-r--r--deps/v8/src/x64/disasm-x64.cc1
-rw-r--r--deps/v8/src/x64/frames-x64.cc9
-rw-r--r--deps/v8/src/x64/interface-descriptors-x64.cc62
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc593
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h689
-rw-r--r--deps/v8/src/x87/OWNERS2
-rw-r--r--deps/v8/src/x87/assembler-x87-inl.h547
-rw-r--r--deps/v8/src/x87/assembler-x87.cc2217
-rw-r--r--deps/v8/src/x87/assembler-x87.h1107
-rw-r--r--deps/v8/src/x87/code-stubs-x87.cc3490
-rw-r--r--deps/v8/src/x87/code-stubs-x87.h352
-rw-r--r--deps/v8/src/x87/codegen-x87.cc381
-rw-r--r--deps/v8/src/x87/codegen-x87.h33
-rw-r--r--deps/v8/src/x87/cpu-x87.cc43
-rw-r--r--deps/v8/src/x87/deoptimizer-x87.cc428
-rw-r--r--deps/v8/src/x87/disasm-x87.cc1875
-rw-r--r--deps/v8/src/x87/frames-x87.cc36
-rw-r--r--deps/v8/src/x87/frames-x87.h78
-rw-r--r--deps/v8/src/x87/interface-descriptors-x87.cc384
-rw-r--r--deps/v8/src/x87/macro-assembler-x87.cc2599
-rw-r--r--deps/v8/src/x87/macro-assembler-x87.h914
-rw-r--r--deps/v8/src/x87/simulator-x87.cc7
-rw-r--r--deps/v8/src/x87/simulator-x87.h52
-rw-r--r--deps/v8/src/zone/accounting-allocator.cc20
-rw-r--r--deps/v8/src/zone/accounting-allocator.h5
-rw-r--r--deps/v8/src/zone/zone-containers.h17
-rw-r--r--deps/v8/src/zone/zone-handle-set.h48
1075 files changed, 87816 insertions, 239525 deletions
diff --git a/deps/v8/src/OWNERS b/deps/v8/src/OWNERS
index 8bbbab6ecb..83a275c80f 100644
--- a/deps/v8/src/OWNERS
+++ b/deps/v8/src/OWNERS
@@ -3,3 +3,5 @@ per-file intl.*=mnita@google.com
per-file intl.*=jshin@chromium.org
per-file typing-asm.*=aseemgarg@chromium.org
per-file typing-asm.*=bradnelson@chromium.org
+
+# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/PRESUBMIT.py b/deps/v8/src/PRESUBMIT.py
new file mode 100644
index 0000000000..d928a60689
--- /dev/null
+++ b/deps/v8/src/PRESUBMIT.py
@@ -0,0 +1,29 @@
+# Copyright 2017 the V8 project authors. All rights reserved.')
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Presubmit script for //v8/src
+
+See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
+for more details about the presubmit API built into depot_tools.
+"""
+
+import os
+
+
+def PostUploadHook(cl, change, output_api):
+ """git cl upload will call this hook after the issue is created/modified.
+
+ This hook adds extra try bots to the CL description in order to run layout
+ tests in addition to CQ try bots.
+ """
+ def is_api_cc(f):
+ return 'api.cc' == os.path.split(f.LocalPath())[1]
+ if not change.AffectedFiles(file_filter=is_api_cc):
+ return []
+ return output_api.EnsureCQIncludeTrybotsAreAdded(
+ cl,
+ [
+ 'master.tryserver.chromium.linux:linux_chromium_rel_ng'
+ ],
+ 'Automatically added layout test trybots to run tests on CQ.')
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index 98f780d589..32ee1b61e3 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -649,11 +649,7 @@ void Accessors::ScriptEvalFromFunctionNameGetter(
Handle<SharedFunctionInfo> shared(
SharedFunctionInfo::cast(script->eval_from_shared()));
// Find the name of the function calling eval.
- if (!shared->name()->IsUndefined(isolate)) {
- result = Handle<Object>(shared->name(), isolate);
- } else {
- result = Handle<Object>(shared->inferred_name(), isolate);
- }
+ result = Handle<Object>(shared->name(), isolate);
}
info.GetReturnValue().Set(Utils::ToLocal(result));
}
diff --git a/deps/v8/src/address-map.cc b/deps/v8/src/address-map.cc
index 79f8e62d54..4b0d029588 100644
--- a/deps/v8/src/address-map.cc
+++ b/deps/v8/src/address-map.cc
@@ -20,6 +20,8 @@ RootIndexMap::RootIndexMap(Isolate* isolate) {
if (!root->IsHeapObject()) continue;
// Omit root entries that can be written after initialization. They must
// not be referenced through the root list in the snapshot.
+ // Since we map the raw address of an root item to its root list index, the
+ // raw address must be constant, i.e. the object must be immovable.
if (isolate->heap()->RootCanBeTreatedAsConstant(root_index)) {
HeapObject* heap_object = HeapObject::cast(root);
Maybe<uint32_t> maybe_index = map_->Get(heap_object);
diff --git a/deps/v8/src/allocation-site-scopes.cc b/deps/v8/src/allocation-site-scopes.cc
deleted file mode 100644
index 6b9fd03a21..0000000000
--- a/deps/v8/src/allocation-site-scopes.cc
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/allocation-site-scopes.h"
-#include "src/factory.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-Handle<AllocationSite> AllocationSiteCreationContext::EnterNewScope() {
- Handle<AllocationSite> scope_site;
- if (top().is_null()) {
- // We are creating the top level AllocationSite as opposed to a nested
- // AllocationSite.
- InitializeTraversal(isolate()->factory()->NewAllocationSite());
- scope_site = Handle<AllocationSite>(*top(), isolate());
- if (FLAG_trace_creation_allocation_sites) {
- PrintF("*** Creating top level AllocationSite %p\n",
- static_cast<void*>(*scope_site));
- }
- } else {
- DCHECK(!current().is_null());
- scope_site = isolate()->factory()->NewAllocationSite();
- if (FLAG_trace_creation_allocation_sites) {
- PrintF("Creating nested site (top, current, new) (%p, %p, %p)\n",
- static_cast<void*>(*top()),
- static_cast<void*>(*current()),
- static_cast<void*>(*scope_site));
- }
- current()->set_nested_site(*scope_site);
- update_current_site(*scope_site);
- }
- DCHECK(!scope_site.is_null());
- return scope_site;
-}
-
-
-void AllocationSiteCreationContext::ExitScope(
- Handle<AllocationSite> scope_site,
- Handle<JSObject> object) {
- if (!object.is_null()) {
- bool top_level = !scope_site.is_null() &&
- top().is_identical_to(scope_site);
-
- scope_site->set_transition_info(*object);
- if (FLAG_trace_creation_allocation_sites) {
- if (top_level) {
- PrintF("*** Setting AllocationSite %p transition_info %p\n",
- static_cast<void*>(*scope_site),
- static_cast<void*>(*object));
- } else {
- PrintF("Setting AllocationSite (%p, %p) transition_info %p\n",
- static_cast<void*>(*top()),
- static_cast<void*>(*scope_site),
- static_cast<void*>(*object));
- }
- }
- }
-}
-
-
-bool AllocationSiteUsageContext::ShouldCreateMemento(Handle<JSObject> object) {
- if (activated_ && AllocationSite::CanTrack(object->map()->instance_type())) {
- if (FLAG_allocation_site_pretenuring ||
- AllocationSite::GetMode(object->GetElementsKind()) ==
- TRACK_ALLOCATION_SITE) {
- if (FLAG_trace_creation_allocation_sites) {
- PrintF("*** Creating Memento for %s %p\n",
- object->IsJSArray() ? "JSArray" : "JSObject",
- static_cast<void*>(*object));
- }
- return true;
- }
- }
- return false;
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/allocation-site-scopes.h b/deps/v8/src/allocation-site-scopes.h
index da2b9dc45c..60614c5e01 100644
--- a/deps/v8/src/allocation-site-scopes.h
+++ b/deps/v8/src/allocation-site-scopes.h
@@ -7,11 +7,11 @@
#include "src/handles.h"
#include "src/objects.h"
+#include "src/objects/map.h"
namespace v8 {
namespace internal {
-
// AllocationSiteContext is the base class for walking and copying a nested
// boilerplate with AllocationSite and AllocationMemento support.
class AllocationSiteContext {
@@ -34,6 +34,8 @@ class AllocationSiteContext {
void InitializeTraversal(Handle<AllocationSite> site) {
top_ = site;
+ // {current_} is updated in place to not create unnecessary Handles, hence
+ // we initially need a separate handle.
current_ = Handle<AllocationSite>::New(*top_, isolate());
}
@@ -44,18 +46,6 @@ class AllocationSiteContext {
};
-// AllocationSiteCreationContext aids in the creation of AllocationSites to
-// accompany object literals.
-class AllocationSiteCreationContext : public AllocationSiteContext {
- public:
- explicit AllocationSiteCreationContext(Isolate* isolate)
- : AllocationSiteContext(isolate) { }
-
- Handle<AllocationSite> EnterNewScope();
- void ExitScope(Handle<AllocationSite> site, Handle<JSObject> object);
-};
-
-
// AllocationSiteUsageContext aids in the creation of AllocationMementos placed
// behind some/all components of a copied object literal.
class AllocationSiteUsageContext : public AllocationSiteContext {
@@ -82,10 +72,26 @@ class AllocationSiteUsageContext : public AllocationSiteContext {
Handle<JSObject> object) {
// This assert ensures that we are pointing at the right sub-object in a
// recursive walk of a nested literal.
- DCHECK(object.is_null() || *object == scope_site->transition_info());
+ DCHECK(object.is_null() || *object == scope_site->boilerplate());
+ }
+
+ bool ShouldCreateMemento(Handle<JSObject> object) {
+ if (activated_ &&
+ AllocationSite::CanTrack(object->map()->instance_type())) {
+ if (FLAG_allocation_site_pretenuring ||
+ AllocationSite::ShouldTrack(object->GetElementsKind())) {
+ if (FLAG_trace_creation_allocation_sites) {
+ PrintF("*** Creating Memento for %s %p\n",
+ object->IsJSArray() ? "JSArray" : "JSObject",
+ static_cast<void*>(*object));
+ }
+ return true;
+ }
+ }
+ return false;
}
- bool ShouldCreateMemento(Handle<JSObject> object);
+ static const bool kCopying = true;
private:
Handle<AllocationSite> top_site_;
diff --git a/deps/v8/src/allocation.cc b/deps/v8/src/allocation.cc
index fde01f6447..0a39a796bc 100644
--- a/deps/v8/src/allocation.cc
+++ b/deps/v8/src/allocation.cc
@@ -53,7 +53,7 @@ char* StrNDup(const char* str, int n) {
void* AlignedAlloc(size_t size, size_t alignment) {
DCHECK_LE(V8_ALIGNOF(void*), alignment);
- DCHECK(base::bits::IsPowerOfTwo64(alignment));
+ DCHECK(base::bits::IsPowerOfTwo(alignment));
void* ptr;
#if V8_OS_WIN
ptr = _aligned_malloc(size, alignment);
diff --git a/deps/v8/src/api-natives.cc b/deps/v8/src/api-natives.cc
index ef51f950a5..8a649534f8 100644
--- a/deps/v8/src/api-natives.cc
+++ b/deps/v8/src/api-natives.cc
@@ -39,15 +39,16 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
bool is_hidden_prototype,
bool is_prototype);
-MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
- Handle<FunctionTemplateInfo> data,
- Handle<Name> name = Handle<Name>());
+MaybeHandle<JSFunction> InstantiateFunction(
+ Isolate* isolate, Handle<FunctionTemplateInfo> data,
+ MaybeHandle<Name> maybe_name = MaybeHandle<Name>());
-MaybeHandle<Object> Instantiate(Isolate* isolate, Handle<Object> data,
- Handle<Name> name = Handle<Name>()) {
+MaybeHandle<Object> Instantiate(
+ Isolate* isolate, Handle<Object> data,
+ MaybeHandle<Name> maybe_name = MaybeHandle<Name>()) {
if (data->IsFunctionTemplateInfo()) {
- return InstantiateFunction(isolate,
- Handle<FunctionTemplateInfo>::cast(data), name);
+ return InstantiateFunction(
+ isolate, Handle<FunctionTemplateInfo>::cast(data), maybe_name);
} else if (data->IsObjectTemplateInfo()) {
return InstantiateObject(isolate, Handle<ObjectTemplateInfo>::cast(data),
Handle<JSReceiver>(), false, false);
@@ -250,7 +251,7 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
DCHECK_EQ(kData, details.kind());
v8::Intrinsic intrinsic =
- static_cast<v8::Intrinsic>(Smi::cast(properties->get(i++))->value());
+ static_cast<v8::Intrinsic>(Smi::ToInt(properties->get(i++)));
auto prop_data = handle(GetIntrinsic(isolate, intrinsic), isolate);
RETURN_ON_EXCEPTION(isolate, DefineDataProperty(isolate, obj, name,
@@ -311,7 +312,7 @@ void CacheTemplateInstantiation(Isolate* isolate, int serial_number,
Handle<UnseededNumberDictionary> cache =
isolate->slow_template_instantiations_cache();
auto new_cache =
- UnseededNumberDictionary::AtNumberPut(cache, serial_number, object);
+ UnseededNumberDictionary::Set(cache, serial_number, object);
if (*new_cache != *cache) {
isolate->native_context()->set_slow_template_instantiations_cache(
*new_cache);
@@ -333,14 +334,9 @@ void UncacheTemplateInstantiation(Isolate* isolate, int serial_number,
Handle<UnseededNumberDictionary> cache =
isolate->slow_template_instantiations_cache();
int entry = cache->FindEntry(serial_number);
- DCHECK(entry != UnseededNumberDictionary::kNotFound);
- Handle<Object> result =
- UnseededNumberDictionary::DeleteProperty(cache, entry);
- USE(result);
- DCHECK(result->IsTrue(isolate));
- auto new_cache = UnseededNumberDictionary::Shrink(cache, entry);
- isolate->native_context()->set_slow_template_instantiations_cache(
- *new_cache);
+ DCHECK_NE(UnseededNumberDictionary::kNotFound, entry);
+ cache = UnseededNumberDictionary::DeleteEntry(cache, entry);
+ isolate->native_context()->set_slow_template_instantiations_cache(*cache);
}
}
@@ -361,7 +357,7 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
bool is_hidden_prototype,
bool is_prototype) {
Handle<JSFunction> constructor;
- int serial_number = Smi::cast(info->serial_number())->value();
+ int serial_number = Smi::ToInt(info->serial_number());
if (!new_target.is_null()) {
if (IsSimpleInstantiation(isolate, *info, *new_target)) {
constructor = Handle<JSFunction>::cast(new_target);
@@ -402,7 +398,7 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
ASSIGN_RETURN_ON_EXCEPTION(isolate, object,
JSObject::New(constructor, new_target), JSObject);
- if (is_prototype) JSObject::OptimizeAsPrototype(object, FAST_PROTOTYPE);
+ if (is_prototype) JSObject::OptimizeAsPrototype(object);
ASSIGN_RETURN_ON_EXCEPTION(
isolate, result,
@@ -450,8 +446,8 @@ MaybeHandle<Object> GetInstancePrototype(Isolate* isolate,
MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
Handle<FunctionTemplateInfo> data,
- Handle<Name> name) {
- int serial_number = Smi::cast(data->serial_number())->value();
+ MaybeHandle<Name> maybe_name) {
+ int serial_number = Smi::ToInt(data->serial_number());
if (serial_number) {
Handle<JSObject> result;
if (ProbeInstantiationsCache(isolate, serial_number,
@@ -492,10 +488,7 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
}
}
Handle<JSFunction> function = ApiNatives::CreateApiFunction(
- isolate, data, prototype, ApiNatives::JavaScriptObjectType);
- if (!name.is_null() && name->IsString()) {
- function->shared()->set_name(*name);
- }
+ isolate, data, prototype, ApiNatives::JavaScriptObjectType, maybe_name);
if (serial_number) {
// Cache the function.
CacheTemplateInstantiation(isolate, serial_number, CachingMode::kUnlimited,
@@ -538,10 +531,10 @@ void AddPropertyToPropertyList(Isolate* isolate, Handle<TemplateInfo> templ,
} // namespace
MaybeHandle<JSFunction> ApiNatives::InstantiateFunction(
- Handle<FunctionTemplateInfo> data) {
+ Handle<FunctionTemplateInfo> data, MaybeHandle<Name> maybe_name) {
Isolate* isolate = data->GetIsolate();
InvokeScope invoke_scope(isolate);
- return ::v8::internal::InstantiateFunction(isolate, data);
+ return ::v8::internal::InstantiateFunction(isolate, data, maybe_name);
}
MaybeHandle<JSObject> ApiNatives::InstantiateObject(
@@ -562,7 +555,7 @@ MaybeHandle<JSObject> ApiNatives::InstantiateRemoteObject(
Handle<Map> object_map = isolate->factory()->NewMap(
JS_SPECIAL_API_OBJECT_TYPE,
JSObject::kHeaderSize + data->embedder_field_count() * kPointerSize,
- FAST_HOLEY_SMI_ELEMENTS);
+ HOLEY_SMI_ELEMENTS);
object_map->SetConstructor(*constructor);
object_map->set_is_access_check_needed(true);
@@ -575,7 +568,7 @@ MaybeHandle<JSObject> ApiNatives::InstantiateRemoteObject(
void ApiNatives::AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
Handle<Name> name, Handle<Object> value,
PropertyAttributes attributes) {
- PropertyDetails details(kData, attributes, 0, PropertyCellType::kNoCell);
+ PropertyDetails details(kData, attributes, PropertyCellType::kNoCell);
auto details_handle = handle(details.AsSmi(), isolate);
Handle<Object> data[] = {name, details_handle, value};
AddPropertyToPropertyList(isolate, info, arraysize(data), data);
@@ -587,7 +580,7 @@ void ApiNatives::AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
PropertyAttributes attributes) {
auto value = handle(Smi::FromInt(intrinsic), isolate);
auto intrinsic_marker = isolate->factory()->true_value();
- PropertyDetails details(kData, attributes, 0, PropertyCellType::kNoCell);
+ PropertyDetails details(kData, attributes, PropertyCellType::kNoCell);
auto details_handle = handle(details.AsSmi(), isolate);
Handle<Object> data[] = {name, intrinsic_marker, details_handle, value};
AddPropertyToPropertyList(isolate, info, arraysize(data), data);
@@ -600,7 +593,7 @@ void ApiNatives::AddAccessorProperty(Isolate* isolate,
Handle<FunctionTemplateInfo> getter,
Handle<FunctionTemplateInfo> setter,
PropertyAttributes attributes) {
- PropertyDetails details(kAccessor, attributes, 0, PropertyCellType::kNoCell);
+ PropertyDetails details(kAccessor, attributes, PropertyCellType::kNoCell);
auto details_handle = handle(details.AsSmi(), isolate);
Handle<Object> data[] = {name, details_handle, getter, setter};
AddPropertyToPropertyList(isolate, info, arraysize(data), data);
@@ -621,12 +614,16 @@ void ApiNatives::AddNativeDataProperty(Isolate* isolate,
info->set_property_accessors(*list);
}
-
Handle<JSFunction> ApiNatives::CreateApiFunction(
Isolate* isolate, Handle<FunctionTemplateInfo> obj,
- Handle<Object> prototype, ApiInstanceType instance_type) {
+ Handle<Object> prototype, ApiInstanceType instance_type,
+ MaybeHandle<Name> maybe_name) {
Handle<SharedFunctionInfo> shared =
- FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(isolate, obj);
+ FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(isolate, obj,
+ maybe_name);
+ // To simplify things, API functions always have shared name.
+ DCHECK(shared->has_shared_name());
+
Handle<JSFunction> result =
isolate->factory()->NewFunctionFromSharedFunctionInfo(
shared, isolate->native_context());
@@ -695,7 +692,7 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
}
Handle<Map> map =
- isolate->factory()->NewMap(type, instance_size, FAST_HOLEY_SMI_ELEMENTS);
+ isolate->factory()->NewMap(type, instance_size, HOLEY_SMI_ELEMENTS);
JSFunction::SetInitialMap(result, map, Handle<JSObject>::cast(prototype));
// Mark as undetectable if needed.
diff --git a/deps/v8/src/api-natives.h b/deps/v8/src/api-natives.h
index 74d3788fd1..455be0dd06 100644
--- a/deps/v8/src/api-natives.h
+++ b/deps/v8/src/api-natives.h
@@ -20,7 +20,8 @@ class ApiNatives {
static const int kInitialFunctionCacheSize = 256;
MUST_USE_RESULT static MaybeHandle<JSFunction> InstantiateFunction(
- Handle<FunctionTemplateInfo> data);
+ Handle<FunctionTemplateInfo> data,
+ MaybeHandle<Name> maybe_name = MaybeHandle<Name>());
MUST_USE_RESULT static MaybeHandle<JSObject> InstantiateObject(
Handle<ObjectTemplateInfo> data,
@@ -35,10 +36,10 @@ class ApiNatives {
GlobalProxyType
};
- static Handle<JSFunction> CreateApiFunction(Isolate* isolate,
- Handle<FunctionTemplateInfo> obj,
- Handle<Object> prototype,
- ApiInstanceType instance_type);
+ static Handle<JSFunction> CreateApiFunction(
+ Isolate* isolate, Handle<FunctionTemplateInfo> obj,
+ Handle<Object> prototype, ApiInstanceType instance_type,
+ MaybeHandle<Name> maybe_name = MaybeHandle<Name>());
static void AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
Handle<Name> name, Handle<Object> value,
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 818dfa1e22..10d44feeb0 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -80,6 +80,26 @@
namespace v8 {
+/*
+ * Most API methods should use one of the three macros:
+ *
+ * ENTER_V8, ENTER_V8_NO_SCRIPT, ENTER_V8_NO_SCRIPT_NO_EXCEPTION.
+ *
+ * The latter two assume that no script is executed, and no exceptions are
+ * scheduled in addition (respectively). Creating a pending exception and
+ * removing it before returning is ok.
+ *
+ * Exceptions should be handled either by invoking one of the
+ * RETURN_ON_FAILED_EXECUTION* macros.
+ *
+ * Don't use macros with DO_NOT_USE in their name.
+ *
+ * TODO(jochen): Document debugger specific macros.
+ * TODO(jochen): Document LOG_API and other RuntimeCallStats macros.
+ * TODO(jochen): All API methods should invoke one of the ENTER_V8* macros.
+ * TODO(jochen): Remove calls form API methods to DO_NOT_USE macros.
+ */
+
#define LOG_API(isolate, class_name, function_name) \
i::RuntimeCallTimerScope _runtime_timer( \
isolate, &i::RuntimeCallStats::API_##class_name##_##function_name); \
@@ -87,16 +107,16 @@ namespace v8 {
#define ENTER_V8_DO_NOT_USE(isolate) i::VMState<v8::OTHER> __state__((isolate))
-#define PREPARE_FOR_EXECUTION_GENERIC(isolate, context, class_name, \
- function_name, bailout_value, \
- HandleScopeClass, do_callback) \
- if (IsExecutionTerminatingCheck(isolate)) { \
- return bailout_value; \
- } \
- HandleScopeClass handle_scope(isolate); \
- CallDepthScope<do_callback> call_depth_scope(isolate, context); \
- LOG_API(isolate, class_name, function_name); \
- ENTER_V8_DO_NOT_USE(isolate); \
+#define ENTER_V8_HELPER_DO_NOT_USE(isolate, context, class_name, \
+ function_name, bailout_value, \
+ HandleScopeClass, do_callback) \
+ if (IsExecutionTerminatingCheck(isolate)) { \
+ return bailout_value; \
+ } \
+ HandleScopeClass handle_scope(isolate); \
+ CallDepthScope<do_callback> call_depth_scope(isolate, context); \
+ LOG_API(isolate, class_name, function_name); \
+ i::VMState<v8::OTHER> __state__((isolate)); \
bool has_pending_exception = false
#define PREPARE_FOR_DEBUG_INTERFACE_EXECUTION_WITH_ISOLATE(isolate, T) \
@@ -105,7 +125,7 @@ namespace v8 {
} \
InternalEscapableScope handle_scope(isolate); \
CallDepthScope<false> call_depth_scope(isolate, v8::Local<v8::Context>()); \
- ENTER_V8_DO_NOT_USE(isolate); \
+ i::VMState<v8::OTHER> __state__((isolate)); \
bool has_pending_exception = false
#define PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, class_name, function_name, \
@@ -114,45 +134,26 @@ namespace v8 {
auto isolate = context.IsEmpty() \
? i::Isolate::Current() \
: reinterpret_cast<i::Isolate*>(context->GetIsolate()); \
- PREPARE_FOR_EXECUTION_GENERIC(isolate, context, class_name, function_name, \
- bailout_value, HandleScopeClass, do_callback);
-
-#define PREPARE_FOR_EXECUTION_WITH_CONTEXT_IN_RUNTIME_CALL_STATS_SCOPE( \
- category, name, context, class_name, function_name, bailout_value, \
- HandleScopeClass, do_callback) \
- auto isolate = context.IsEmpty() \
- ? i::Isolate::Current() \
- : reinterpret_cast<i::Isolate*>(context->GetIsolate()); \
- TRACE_EVENT_CALL_STATS_SCOPED(isolate, category, name); \
- PREPARE_FOR_EXECUTION_GENERIC(isolate, context, class_name, function_name, \
- bailout_value, HandleScopeClass, do_callback);
-
-#define PREPARE_FOR_EXECUTION_WITH_ISOLATE(isolate, class_name, function_name, \
- T) \
- PREPARE_FOR_EXECUTION_GENERIC(isolate, Local<Context>(), class_name, \
- function_name, MaybeLocal<T>(), \
- InternalEscapableScope, false);
+ ENTER_V8_HELPER_DO_NOT_USE(isolate, context, class_name, function_name, \
+ bailout_value, HandleScopeClass, do_callback);
#define PREPARE_FOR_EXECUTION(context, class_name, function_name, T) \
PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, class_name, function_name, \
MaybeLocal<T>(), InternalEscapableScope, \
false)
-#define PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, class_name, \
- function_name, T) \
- PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, class_name, function_name, \
- MaybeLocal<T>(), InternalEscapableScope, \
- true)
-
-#define PREPARE_FOR_EXECUTION_PRIMITIVE(context, class_name, function_name, T) \
- PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, class_name, function_name, \
- Nothing<T>(), i::HandleScope, false)
-
-#define PREPARE_FOR_EXECUTION_BOOL(context, class_name, function_name) \
- PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, class_name, function_name, \
- false, i::HandleScope, false)
+#define ENTER_V8(isolate, context, class_name, function_name, bailout_value, \
+ HandleScopeClass) \
+ ENTER_V8_HELPER_DO_NOT_USE(isolate, context, class_name, function_name, \
+ bailout_value, HandleScopeClass, true)
#ifdef DEBUG
+#define ENTER_V8_NO_SCRIPT(isolate, context, class_name, function_name, \
+ bailout_value, HandleScopeClass) \
+ ENTER_V8_HELPER_DO_NOT_USE(isolate, context, class_name, function_name, \
+ bailout_value, HandleScopeClass, false); \
+ i::DisallowJavascriptExecutionDebugOnly __no_script__((isolate))
+
#define ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate) \
i::VMState<v8::OTHER> __state__((isolate)); \
i::DisallowJavascriptExecutionDebugOnly __no_script__((isolate)); \
@@ -162,6 +163,11 @@ namespace v8 {
i::VMState<v8::OTHER> __state__((isolate)); \
i::DisallowExceptions __no_exceptions__((isolate))
#else
+#define ENTER_V8_NO_SCRIPT(isolate, context, class_name, function_name, \
+ bailout_value, HandleScopeClass) \
+ ENTER_V8_HELPER_DO_NOT_USE(isolate, context, class_name, function_name, \
+ bailout_value, HandleScopeClass, false)
+
#define ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate) \
i::VMState<v8::OTHER> __state__((isolate));
@@ -169,24 +175,19 @@ namespace v8 {
i::VMState<v8::OTHER> __state__((isolate));
#endif // DEBUG
-#define EXCEPTION_BAILOUT_CHECK_SCOPED(isolate, value) \
- do { \
- if (has_pending_exception) { \
- call_depth_scope.Escape(); \
- return value; \
- } \
+#define EXCEPTION_BAILOUT_CHECK_SCOPED_DO_NOT_USE(isolate, value) \
+ do { \
+ if (has_pending_exception) { \
+ call_depth_scope.Escape(); \
+ return value; \
+ } \
} while (false)
-
#define RETURN_ON_FAILED_EXECUTION(T) \
- EXCEPTION_BAILOUT_CHECK_SCOPED(isolate, MaybeLocal<T>())
-
+ EXCEPTION_BAILOUT_CHECK_SCOPED_DO_NOT_USE(isolate, MaybeLocal<T>())
#define RETURN_ON_FAILED_EXECUTION_PRIMITIVE(T) \
- EXCEPTION_BAILOUT_CHECK_SCOPED(isolate, Nothing<T>())
-
-#define RETURN_ON_FAILED_EXECUTION_BOOL() \
- EXCEPTION_BAILOUT_CHECK_SCOPED(isolate, false)
+ EXCEPTION_BAILOUT_CHECK_SCOPED_DO_NOT_USE(isolate, Nothing<T>())
#define RETURN_TO_LOCAL_UNCHECKED(maybe_local, T) \
return maybe_local.FromMaybe(Local<T>());
@@ -208,8 +209,8 @@ class InternalEscapableScope : public v8::EscapableHandleScope {
: v8::EscapableHandleScope(reinterpret_cast<v8::Isolate*>(isolate)) {}
};
-
-#ifdef DEBUG
+// TODO(jochen): This should be #ifdef DEBUG
+#ifdef V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY
void CheckMicrotasksScopesConsistency(i::Isolate* isolate) {
auto handle_scope_implementer = isolate->handle_scope_implementer();
if (handle_scope_implementer->microtasks_policy() ==
@@ -248,7 +249,8 @@ class CallDepthScope {
}
if (!escaped_) isolate_->handle_scope_implementer()->DecrementCallDepth();
if (do_callback) isolate_->FireCallCompletedCallback();
-#ifdef DEBUG
+// TODO(jochen): This should be #ifdef DEBUG
+#ifdef V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY
if (do_callback) CheckMicrotasksScopesConsistency(isolate_);
#endif
}
@@ -475,7 +477,8 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
virtual void Free(void* data, size_t) { free(data); }
virtual void* Reserve(size_t length) {
- return base::VirtualMemory::ReserveRegion(length);
+ return base::VirtualMemory::ReserveRegion(length,
+ base::OS::GetRandomMmapAddr());
}
virtual void Free(void* data, size_t length,
@@ -875,7 +878,7 @@ Extension::Extension(const char* name,
}
ResourceConstraints::ResourceConstraints()
- : max_semi_space_size_(0),
+ : max_semi_space_size_in_kb_(0),
max_old_space_size_(0),
stack_limit_(NULL),
code_range_size_(0),
@@ -883,38 +886,11 @@ ResourceConstraints::ResourceConstraints()
void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
uint64_t virtual_memory_limit) {
-#if V8_OS_ANDROID
- // Android has higher physical memory requirements before raising the maximum
- // heap size limits since it has no swap space.
- const uint64_t low_limit = 512ul * i::MB;
- const uint64_t medium_limit = 1ul * i::GB;
- const uint64_t high_limit = 2ul * i::GB;
-#else
- const uint64_t low_limit = 512ul * i::MB;
- const uint64_t medium_limit = 768ul * i::MB;
- const uint64_t high_limit = 1ul * i::GB;
-#endif
-
- if (physical_memory <= low_limit) {
- set_max_semi_space_size(i::Heap::kMaxSemiSpaceSizeLowMemoryDevice);
- set_max_old_space_size(i::Heap::kMaxOldSpaceSizeLowMemoryDevice);
- set_max_zone_pool_size(i::AccountingAllocator::kMaxPoolSizeLowMemoryDevice);
- } else if (physical_memory <= medium_limit) {
- set_max_semi_space_size(i::Heap::kMaxSemiSpaceSizeMediumMemoryDevice);
- set_max_old_space_size(i::Heap::kMaxOldSpaceSizeMediumMemoryDevice);
- set_max_zone_pool_size(
- i::AccountingAllocator::kMaxPoolSizeMediumMemoryDevice);
- } else if (physical_memory <= high_limit) {
- set_max_semi_space_size(i::Heap::kMaxSemiSpaceSizeHighMemoryDevice);
- set_max_old_space_size(i::Heap::kMaxOldSpaceSizeHighMemoryDevice);
- set_max_zone_pool_size(
- i::AccountingAllocator::kMaxPoolSizeHighMemoryDevice);
- } else {
- set_max_semi_space_size(i::Heap::kMaxSemiSpaceSizeHugeMemoryDevice);
- set_max_old_space_size(i::Heap::kMaxOldSpaceSizeHugeMemoryDevice);
- set_max_zone_pool_size(
- i::AccountingAllocator::kMaxPoolSizeHugeMemoryDevice);
- }
+ set_max_semi_space_size_in_kb(
+ i::Heap::ComputeMaxSemiSpaceSize(physical_memory));
+ set_max_old_space_size(
+ static_cast<int>(i::Heap::ComputeMaxOldGenerationSize(physical_memory)));
+ set_max_zone_pool_size(i::AccountingAllocator::kMaxPoolSize);
if (virtual_memory_limit > 0 && i::kRequiresCodeRange) {
// Reserve no more than 1/8 of the memory for the code range, but at most
@@ -925,10 +901,9 @@ void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
}
}
-
void SetResourceConstraints(i::Isolate* isolate,
const ResourceConstraints& constraints) {
- int semi_space_size = constraints.max_semi_space_size();
+ size_t semi_space_size = constraints.max_semi_space_size_in_kb();
int old_space_size = constraints.max_old_space_size();
size_t code_range_size = constraints.code_range_size();
size_t max_pool_size = constraints.max_zone_pool_size();
@@ -1068,8 +1043,9 @@ HandleScope::~HandleScope() {
}
void* HandleScope::operator new(size_t) { base::OS::Abort(); }
-
+void* HandleScope::operator new[](size_t) { base::OS::Abort(); }
void HandleScope::operator delete(void*, size_t) { base::OS::Abort(); }
+void HandleScope::operator delete[](void*, size_t) { base::OS::Abort(); }
int HandleScope::NumberOfHandles(Isolate* isolate) {
return i::HandleScope::NumberOfHandles(
@@ -1109,8 +1085,11 @@ i::Object** EscapableHandleScope::Escape(i::Object** escape_value) {
}
void* EscapableHandleScope::operator new(size_t) { base::OS::Abort(); }
-
+void* EscapableHandleScope::operator new[](size_t) { base::OS::Abort(); }
void EscapableHandleScope::operator delete(void*, size_t) { base::OS::Abort(); }
+void EscapableHandleScope::operator delete[](void*, size_t) {
+ base::OS::Abort();
+}
SealHandleScope::SealHandleScope(Isolate* isolate)
: isolate_(reinterpret_cast<i::Isolate*>(isolate)) {
@@ -1131,8 +1110,9 @@ SealHandleScope::~SealHandleScope() {
}
void* SealHandleScope::operator new(size_t) { base::OS::Abort(); }
-
+void* SealHandleScope::operator new[](size_t) { base::OS::Abort(); }
void SealHandleScope::operator delete(void*, size_t) { base::OS::Abort(); }
+void SealHandleScope::operator delete[](void*, size_t) { base::OS::Abort(); }
void Context::Enter() {
i::Handle<i::Context> env = Utils::OpenHandle(this);
@@ -1144,7 +1124,6 @@ void Context::Enter() {
isolate->set_context(*env);
}
-
void Context::Exit() {
i::Handle<i::Context> env = Utils::OpenHandle(this);
i::Isolate* isolate = env->GetIsolate();
@@ -1159,6 +1138,22 @@ void Context::Exit() {
isolate->set_context(impl->RestoreContext());
}
+Context::BackupIncumbentScope::BackupIncumbentScope(
+ Local<Context> backup_incumbent_context)
+ : backup_incumbent_context_(backup_incumbent_context) {
+ DCHECK(!backup_incumbent_context_.IsEmpty());
+
+ i::Handle<i::Context> env = Utils::OpenHandle(*backup_incumbent_context_);
+ i::Isolate* isolate = env->GetIsolate();
+ prev_ = isolate->top_backup_incumbent_scope();
+ isolate->set_top_backup_incumbent_scope(this);
+}
+
+Context::BackupIncumbentScope::~BackupIncumbentScope() {
+ i::Handle<i::Context> env = Utils::OpenHandle(*backup_incumbent_context_);
+ i::Isolate* isolate = env->GetIsolate();
+ isolate->set_top_backup_incumbent_scope(prev_);
+}
static void* DecodeSmiToAligned(i::Object* value, const char* location) {
Utils::ApiCheck(value->IsSmi(), location, "Not a Smi");
@@ -1366,8 +1361,9 @@ static Local<FunctionTemplate> FunctionTemplateNew(
obj->set_undetectable(false);
obj->set_needs_access_check(false);
obj->set_accept_any_receiver(true);
- if (!signature.IsEmpty())
+ if (!signature.IsEmpty()) {
obj->set_signature(*Utils::OpenHandle(*signature));
+ }
obj->set_cached_property_name(
cached_property_name.IsEmpty()
? isolate->heap()->the_hole_value()
@@ -2062,9 +2058,10 @@ Local<Value> UnboundScript::GetSourceMappingURL() {
MaybeLocal<Value> Script::Run(Local<Context> context) {
- PREPARE_FOR_EXECUTION_WITH_CONTEXT_IN_RUNTIME_CALL_STATS_SCOPE(
- "v8", "V8.Execute", context, Script, Run, MaybeLocal<Value>(),
- InternalEscapableScope, true);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute");
+ ENTER_V8(isolate, context, Script, Run, MaybeLocal<Value>(),
+ InternalEscapableScope);
i::HistogramTimerScope execute_timer(isolate->counters()->execute(), true);
i::AggregatingHistogramTimerScope timer(isolate->counters()->compile_lazy());
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
@@ -2096,37 +2093,33 @@ Local<UnboundScript> Script::GetUnboundScript() {
i::Handle<i::SharedFunctionInfo>(i::JSFunction::cast(*obj)->shared()));
}
-bool DynamicImportResult::FinishDynamicImportSuccess(Local<Context> context,
- Local<Module> module) {
- PREPARE_FOR_EXECUTION_BOOL(context, Module, FinishDynamicImportSuccess);
- auto promise = Utils::OpenHandle(this);
- i::Handle<i::Module> module_obj = Utils::OpenHandle(*module);
- i::Handle<i::JSModuleNamespace> module_namespace =
- i::Module::GetModuleNamespace(module_obj);
- i::Handle<i::Object> argv[] = {promise, module_namespace};
- has_pending_exception =
- i::Execution::Call(isolate, isolate->promise_resolve(),
- isolate->factory()->undefined_value(), arraysize(argv),
- argv)
- .is_null();
- RETURN_ON_FAILED_EXECUTION_BOOL();
- return true;
-}
-bool DynamicImportResult::FinishDynamicImportFailure(Local<Context> context,
- Local<Value> exception) {
- PREPARE_FOR_EXECUTION_BOOL(context, Module, FinishDynamicImportFailure);
- auto promise = Utils::OpenHandle(this);
- // We pass true to trigger the debugger's on exception handler.
- i::Handle<i::Object> argv[] = {promise, Utils::OpenHandle(*exception),
- isolate->factory()->ToBoolean(true)};
- has_pending_exception =
- i::Execution::Call(isolate, isolate->promise_internal_reject(),
- isolate->factory()->undefined_value(), arraysize(argv),
- argv)
- .is_null();
- RETURN_ON_FAILED_EXECUTION_BOOL();
- return true;
+Module::Status Module::GetStatus() const {
+ i::Handle<i::Module> self = Utils::OpenHandle(this);
+ switch (self->status()) {
+ case i::Module::kUninstantiated:
+ case i::Module::kPreInstantiating:
+ return kUninstantiated;
+ case i::Module::kInstantiating:
+ return kInstantiating;
+ case i::Module::kInstantiated:
+ return kInstantiated;
+ case i::Module::kEvaluating:
+ return kEvaluating;
+ case i::Module::kEvaluated:
+ return kEvaluated;
+ case i::Module::kErrored:
+ return kErrored;
+ }
+ UNREACHABLE();
+}
+
+Local<Value> Module::GetException() const {
+ Utils::ApiCheck(GetStatus() == kErrored, "v8::Module::GetException",
+ "Module status must be kErrored");
+ i::Handle<i::Module> self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
+ return ToApiHandle<Value>(i::handle(self->GetException(), isolate));
}
int Module::GetModuleRequestsLength() const {
@@ -2144,28 +2137,63 @@ Local<String> Module::GetModuleRequest(int i) const {
return ToApiHandle<String>(i::handle(module_requests->get(i), isolate));
}
+Location Module::GetModuleRequestLocation(int i) const {
+ CHECK_GE(i, 0);
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ i::HandleScope scope(isolate);
+ i::Handle<i::Module> self = Utils::OpenHandle(this);
+ i::Handle<i::FixedArray> module_request_positions(
+ self->info()->module_request_positions(), isolate);
+ CHECK_LT(i, module_request_positions->length());
+ int position = i::Smi::ToInt(module_request_positions->get(i));
+ i::Handle<i::Script> script(self->script(), isolate);
+ i::Script::PositionInfo info;
+ i::Script::GetPositionInfo(script, position, &info, i::Script::WITH_OFFSET);
+ return v8::Location(info.line, info.column);
+}
+
+Local<Value> Module::GetModuleNamespace() {
+ Utils::ApiCheck(
+ GetStatus() != kErrored && GetStatus() >= kInstantiated,
+ "v8::Module::GetModuleNamespace",
+ "GetModuleNamespace should be used on a successfully instantiated"
+ "module. The current module has not been instantiated or has errored");
+ i::Handle<i::Module> self = Utils::OpenHandle(this);
+ i::Handle<i::JSModuleNamespace> module_namespace =
+ i::Module::GetModuleNamespace(self);
+ return ToApiHandle<Value>(module_namespace);
+}
+
int Module::GetIdentityHash() const { return Utils::OpenHandle(this)->hash(); }
bool Module::Instantiate(Local<Context> context,
Module::ResolveCallback callback) {
- PREPARE_FOR_EXECUTION_BOOL(context, Module, Instantiate);
+ return InstantiateModule(context, callback).FromMaybe(false);
+}
+
+Maybe<bool> Module::InstantiateModule(Local<Context> context,
+ Module::ResolveCallback callback) {
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8_NO_SCRIPT(isolate, context, Module, InstantiateModule,
+ Nothing<bool>(), i::HandleScope);
has_pending_exception =
!i::Module::Instantiate(Utils::OpenHandle(this), context, callback);
- RETURN_ON_FAILED_EXECUTION_BOOL();
- return true;
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return Just(true);
}
MaybeLocal<Value> Module::Evaluate(Local<Context> context) {
- PREPARE_FOR_EXECUTION_WITH_CONTEXT_IN_RUNTIME_CALL_STATS_SCOPE(
- "v8", "V8.Execute", context, Module, Evaluate, MaybeLocal<Value>(),
- InternalEscapableScope, true);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute");
+ ENTER_V8(isolate, context, Module, Evaluate, MaybeLocal<Value>(),
+ InternalEscapableScope);
i::HistogramTimerScope execute_timer(isolate->counters()->execute(), true);
i::AggregatingHistogramTimerScope timer(isolate->counters()->compile_lazy());
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
i::Handle<i::Module> self = Utils::OpenHandle(this);
// It's an API error to call Evaluate before Instantiate.
- CHECK(self->instantiated());
+ CHECK_GE(self->status(), i::Module::kInstantiated);
Local<Value> result;
has_pending_exception = !ToLocal(i::Module::Evaluate(self), &result);
@@ -2175,10 +2203,11 @@ MaybeLocal<Value> Module::Evaluate(Local<Context> context) {
MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
Isolate* v8_isolate, Source* source, CompileOptions options) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- PREPARE_FOR_EXECUTION_WITH_ISOLATE(isolate, ScriptCompiler, CompileUnbound,
- UnboundScript);
+ auto isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.ScriptCompiler");
+ ENTER_V8_NO_SCRIPT(isolate, v8_isolate->GetCurrentContext(), ScriptCompiler,
+ CompileUnbound, MaybeLocal<UnboundScript>(),
+ InternalEscapableScope);
// Don't try to produce any kind of cache when the debugger is loaded.
if (isolate->debug()->is_loaded() &&
@@ -2354,14 +2383,9 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
Function);
TRACE_EVENT0("v8", "V8.ScriptCompiler");
i::Handle<i::String> source_string;
- int parameters_end_pos = i::kNoSourcePosition;
auto factory = isolate->factory();
if (arguments_count) {
- if (i::FLAG_harmony_function_tostring) {
- source_string = factory->NewStringFromStaticChars("(function anonymous(");
- } else {
- source_string = factory->NewStringFromStaticChars("(function(");
- }
+ source_string = factory->NewStringFromStaticChars("(function(");
for (size_t i = 0; i < arguments_count; ++i) {
IsIdentifierHelper helper;
if (!helper.Check(*Utils::OpenHandle(*arguments[i]))) {
@@ -2380,25 +2404,12 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
RETURN_ON_FAILED_EXECUTION(Function);
}
i::Handle<i::String> brackets;
- if (i::FLAG_harmony_function_tostring) {
- // Append linefeed and signal that text beyond the linefeed is not part of
- // the formal parameters.
- brackets = factory->NewStringFromStaticChars("\n) {\n");
- parameters_end_pos = source_string->length() + 1;
- } else {
- brackets = factory->NewStringFromStaticChars("){");
- }
+ brackets = factory->NewStringFromStaticChars("){");
has_pending_exception = !factory->NewConsString(source_string, brackets)
.ToHandle(&source_string);
RETURN_ON_FAILED_EXECUTION(Function);
} else {
- if (i::FLAG_harmony_function_tostring) {
- source_string =
- factory->NewStringFromStaticChars("(function anonymous(\n) {\n");
- parameters_end_pos = source_string->length() - 4;
- } else {
- source_string = factory->NewStringFromStaticChars("(function(){");
- }
+ source_string = factory->NewStringFromStaticChars("(function(){");
}
int scope_position = source_string->length();
@@ -2448,7 +2459,7 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
has_pending_exception =
!i::Compiler::GetFunctionFromEval(
source_string, outer_info, context, i::SLOPPY,
- i::ONLY_SINGLE_FUNCTION_LITERAL, parameters_end_pos,
+ i::ONLY_SINGLE_FUNCTION_LITERAL, i::kNoSourcePosition,
eval_scope_position, eval_position, line_offset,
column_offset - scope_position, name_obj, source->resource_options)
.ToHandle(&fun);
@@ -2665,8 +2676,9 @@ v8::TryCatch::~TryCatch() {
}
void* v8::TryCatch::operator new(size_t) { base::OS::Abort(); }
-
+void* v8::TryCatch::operator new[](size_t) { base::OS::Abort(); }
void v8::TryCatch::operator delete(void*, size_t) { base::OS::Abort(); }
+void v8::TryCatch::operator delete[](void*, size_t) { base::OS::Abort(); }
bool v8::TryCatch::HasCaught() const {
return !reinterpret_cast<i::Object*>(exception_)->IsTheHole(isolate_);
@@ -2994,7 +3006,7 @@ Local<Array> StackTrace::AsArray() {
frames->set(i, *frame_obj);
}
return Utils::ToLocal(isolate->factory()->NewJSArrayWithElements(
- frames, i::FAST_ELEMENTS, frame_count));
+ frames, i::PACKED_ELEMENTS, frame_count));
}
@@ -3168,8 +3180,7 @@ bool NativeWeakMap::Delete(Local<Value> v8_key) {
// --- J S O N ---
MaybeLocal<Value> JSON::Parse(Isolate* v8_isolate, Local<String> json_string) {
- auto isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- PREPARE_FOR_EXECUTION_WITH_ISOLATE(isolate, JSON, Parse, Value);
+ PREPARE_FOR_EXECUTION(v8_isolate->GetCurrentContext(), JSON, Parse, Value);
i::Handle<i::String> string = Utils::OpenHandle(*json_string);
i::Handle<i::String> source = i::String::Flatten(string);
i::Handle<i::Object> undefined = isolate->factory()->undefined_value();
@@ -3282,7 +3293,9 @@ void ValueSerializer::SetTreatArrayBufferViewsAsHostObjects(bool mode) {
Maybe<bool> ValueSerializer::WriteValue(Local<Context> context,
Local<Value> value) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, ValueSerializer, WriteValue, bool);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8(isolate, context, ValueSerializer, WriteValue, Nothing<bool>(),
+ i::HandleScope);
i::Handle<i::Object> object = Utils::OpenHandle(*value);
Maybe<bool> result = private_->serializer.WriteObject(object);
has_pending_exception = result.IsNothing();
@@ -3373,7 +3386,9 @@ ValueDeserializer::ValueDeserializer(Isolate* isolate, const uint8_t* data,
ValueDeserializer::~ValueDeserializer() { delete private_; }
Maybe<bool> ValueDeserializer::ReadHeader(Local<Context> context) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, ValueDeserializer, ReadHeader, bool);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8_NO_SCRIPT(isolate, context, ValueDeserializer, ReadHeader,
+ Nothing<bool>(), i::HandleScope);
// We could have aborted during the constructor.
// If so, ReadHeader is where we report it.
@@ -3626,7 +3641,7 @@ bool Value::IsInt32() const {
bool Value::IsUint32() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (obj->IsSmi()) return i::Smi::cast(*obj)->value() >= 0;
+ if (obj->IsSmi()) return i::Smi::ToInt(*obj) >= 0;
if (obj->IsNumber()) {
double value = obj->Number();
return !i::IsMinusZero(value) &&
@@ -4067,7 +4082,9 @@ bool Value::BooleanValue() const {
Maybe<double> Value::NumberValue(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
if (obj->IsNumber()) return Just(obj->Number());
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, NumberValue, double);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8(isolate, context, Value, NumberValue, Nothing<double>(),
+ i::HandleScope);
i::Handle<i::Object> num;
has_pending_exception = !i::Object::ToNumber(obj).ToHandle(&num);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(double);
@@ -4088,7 +4105,9 @@ Maybe<int64_t> Value::IntegerValue(Local<Context> context) const {
if (obj->IsNumber()) {
return Just(NumberToInt64(*obj));
}
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, IntegerValue, int64_t);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8(isolate, context, Value, IntegerValue, Nothing<int64_t>(),
+ i::HandleScope);
i::Handle<i::Object> num;
has_pending_exception = !i::Object::ToInteger(isolate, obj).ToHandle(&num);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(int64_t);
@@ -4100,7 +4119,7 @@ int64_t Value::IntegerValue() const {
auto obj = Utils::OpenHandle(this);
if (obj->IsNumber()) {
if (obj->IsSmi()) {
- return i::Smi::cast(*obj)->value();
+ return i::Smi::ToInt(*obj);
} else {
return static_cast<int64_t>(obj->Number());
}
@@ -4112,11 +4131,13 @@ int64_t Value::IntegerValue() const {
Maybe<int32_t> Value::Int32Value(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
if (obj->IsNumber()) return Just(NumberToInt32(*obj));
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, Int32Value, int32_t);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8(isolate, context, Value, Int32Value, Nothing<int32_t>(),
+ i::HandleScope);
i::Handle<i::Object> num;
has_pending_exception = !i::Object::ToInt32(isolate, obj).ToHandle(&num);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(int32_t);
- return Just(num->IsSmi() ? i::Smi::cast(*num)->value()
+ return Just(num->IsSmi() ? i::Smi::ToInt(*num)
: static_cast<int32_t>(num->Number()));
}
@@ -4131,11 +4152,13 @@ int32_t Value::Int32Value() const {
Maybe<uint32_t> Value::Uint32Value(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
if (obj->IsNumber()) return Just(NumberToUint32(*obj));
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, Uint32Value, uint32_t);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8(isolate, context, Value, Uint32Value, Nothing<uint32_t>(),
+ i::HandleScope);
i::Handle<i::Object> num;
has_pending_exception = !i::Object::ToUint32(isolate, obj).ToHandle(&num);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(uint32_t);
- return Just(num->IsSmi() ? static_cast<uint32_t>(i::Smi::cast(*num)->value())
+ return Just(num->IsSmi() ? static_cast<uint32_t>(i::Smi::ToInt(*num))
: static_cast<uint32_t>(num->Number()));
}
@@ -4150,7 +4173,7 @@ uint32_t Value::Uint32Value() const {
MaybeLocal<Uint32> Value::ToArrayIndex(Local<Context> context) const {
auto self = Utils::OpenHandle(this);
if (self->IsSmi()) {
- if (i::Smi::cast(*self)->value() >= 0) return Utils::Uint32ToLocal(self);
+ if (i::Smi::ToInt(*self) >= 0) return Utils::Uint32ToLocal(self);
return Local<Uint32>();
}
PREPARE_FOR_EXECUTION(context, Object, ToArrayIndex, Uint32);
@@ -4176,7 +4199,7 @@ MaybeLocal<Uint32> Value::ToArrayIndex(Local<Context> context) const {
Local<Uint32> Value::ToArrayIndex() const {
auto self = Utils::OpenHandle(this);
if (self->IsSmi()) {
- if (i::Smi::cast(*self)->value() >= 0) return Utils::Uint32ToLocal(self);
+ if (i::Smi::ToInt(*self) >= 0) return Utils::Uint32ToLocal(self);
return Local<Uint32>();
}
auto context = ContextFromHeapObject(self);
@@ -4228,7 +4251,9 @@ Local<String> Value::TypeOf(v8::Isolate* external_isolate) {
Maybe<bool> Value::InstanceOf(v8::Local<v8::Context> context,
v8::Local<v8::Object> object) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, Value, InstanceOf, bool);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8(isolate, context, Value, InstanceOf, Nothing<bool>(),
+ i::HandleScope);
auto left = Utils::OpenHandle(this);
auto right = Utils::OpenHandle(*object);
i::Handle<i::Object> result;
@@ -4240,7 +4265,8 @@ Maybe<bool> Value::InstanceOf(v8::Local<v8::Context> context,
Maybe<bool> v8::Object::Set(v8::Local<v8::Context> context,
v8::Local<Value> key, v8::Local<Value> value) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, Set, bool);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8(isolate, context, Object, Set, Nothing<bool>(), i::HandleScope);
auto self = Utils::OpenHandle(this);
auto key_obj = Utils::OpenHandle(*key);
auto value_obj = Utils::OpenHandle(*value);
@@ -4260,7 +4286,8 @@ bool v8::Object::Set(v8::Local<Value> key, v8::Local<Value> value) {
Maybe<bool> v8::Object::Set(v8::Local<v8::Context> context, uint32_t index,
v8::Local<Value> value) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, Set, bool);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8(isolate, context, Object, Set, Nothing<bool>(), i::HandleScope);
auto self = Utils::OpenHandle(this);
auto value_obj = Utils::OpenHandle(*value);
has_pending_exception = i::Object::SetElement(isolate, self, index, value_obj,
@@ -4279,7 +4306,9 @@ bool v8::Object::Set(uint32_t index, v8::Local<Value> value) {
Maybe<bool> v8::Object::CreateDataProperty(v8::Local<v8::Context> context,
v8::Local<Name> key,
v8::Local<Value> value) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, CreateDataProperty, bool);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8(isolate, context, Object, CreateDataProperty, Nothing<bool>(),
+ i::HandleScope);
i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
i::Handle<i::Name> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
@@ -4297,7 +4326,9 @@ Maybe<bool> v8::Object::CreateDataProperty(v8::Local<v8::Context> context,
Maybe<bool> v8::Object::CreateDataProperty(v8::Local<v8::Context> context,
uint32_t index,
v8::Local<Value> value) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, CreateDataProperty, bool);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8(isolate, context, Object, CreateDataProperty, Nothing<bool>(),
+ i::HandleScope);
i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
@@ -4406,7 +4437,9 @@ Maybe<bool> v8::Object::DefineOwnProperty(v8::Local<v8::Context> context,
v8::Local<Name> key,
v8::Local<Value> value,
v8::PropertyAttribute attributes) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, DefineOwnProperty, bool);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8(isolate, context, Object, DefineOwnProperty, Nothing<bool>(),
+ i::HandleScope);
i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
i::Handle<i::Name> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
@@ -4426,7 +4459,9 @@ Maybe<bool> v8::Object::DefineOwnProperty(v8::Local<v8::Context> context,
Maybe<bool> v8::Object::DefineProperty(v8::Local<v8::Context> context,
v8::Local<Name> key,
PropertyDescriptor& descriptor) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, DefineProperty, bool);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8(isolate, context, Object, DefineOwnProperty, Nothing<bool>(),
+ i::HandleScope);
i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
i::Handle<i::Name> key_obj = Utils::OpenHandle(*key);
@@ -4455,7 +4490,9 @@ static i::MaybeHandle<i::Object> DefineObjectProperty(
Maybe<bool> v8::Object::ForceSet(v8::Local<v8::Context> context,
v8::Local<Value> key, v8::Local<Value> value,
v8::PropertyAttribute attribs) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, ForceSet, bool);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8_NO_SCRIPT(isolate, context, Object, ForceSet, Nothing<bool>(),
+ i::HandleScope);
auto self = i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
auto key_obj = Utils::OpenHandle(*key);
auto value_obj = Utils::OpenHandle(*value);
@@ -4468,27 +4505,11 @@ Maybe<bool> v8::Object::ForceSet(v8::Local<v8::Context> context,
}
-bool v8::Object::ForceSet(v8::Local<Value> key, v8::Local<Value> value,
- v8::PropertyAttribute attribs) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- PREPARE_FOR_EXECUTION_GENERIC(isolate, Local<Context>(), Object, ForceSet,
- false, i::HandleScope, false);
- i::Handle<i::JSObject> self =
- i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
- i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
- i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
- has_pending_exception =
- DefineObjectProperty(self, key_obj, value_obj,
- static_cast<i::PropertyAttributes>(attribs))
- .is_null();
- EXCEPTION_BAILOUT_CHECK_SCOPED(isolate, false);
- return true;
-}
-
-
Maybe<bool> v8::Object::SetPrivate(Local<Context> context, Local<Private> key,
Local<Value> value) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, SetPrivate, bool);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8_NO_SCRIPT(isolate, context, Object, SetPrivate, Nothing<bool>(),
+ i::HandleScope);
auto self = Utils::OpenHandle(this);
auto key_obj = Utils::OpenHandle(reinterpret_cast<Name*>(*key));
auto value_obj = Utils::OpenHandle(*value);
@@ -4556,8 +4577,9 @@ MaybeLocal<Value> v8::Object::GetPrivate(Local<Context> context,
Maybe<PropertyAttribute> v8::Object::GetPropertyAttributes(
Local<Context> context, Local<Value> key) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, GetPropertyAttributes,
- PropertyAttribute);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8(isolate, context, Object, GetPropertyAttributes,
+ Nothing<PropertyAttribute>(), i::HandleScope);
auto self = Utils::OpenHandle(this);
auto key_obj = Utils::OpenHandle(*key);
if (!key_obj->IsName()) {
@@ -4615,7 +4637,9 @@ Local<Value> v8::Object::GetPrototype() {
Maybe<bool> v8::Object::SetPrototype(Local<Context> context,
Local<Value> value) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, SetPrototype, bool);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8(isolate, context, Object, SetPrototype, Nothing<bool>(),
+ i::HandleScope);
auto self = Utils::OpenHandle(this);
auto value_obj = Utils::OpenHandle(*value);
// We do not allow exceptions thrown while setting the prototype
@@ -4726,7 +4750,9 @@ Local<String> v8::Object::GetConstructorName() {
Maybe<bool> v8::Object::SetIntegrityLevel(Local<Context> context,
IntegrityLevel level) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, SetIntegrityLevel, bool);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8(isolate, context, Object, SetIntegrityLevel, Nothing<bool>(),
+ i::HandleScope);
auto self = Utils::OpenHandle(this);
i::JSReceiver::IntegrityLevel i_level =
level == IntegrityLevel::kFrozen ? i::FROZEN : i::SEALED;
@@ -4738,7 +4764,8 @@ Maybe<bool> v8::Object::SetIntegrityLevel(Local<Context> context,
}
Maybe<bool> v8::Object::Delete(Local<Context> context, Local<Value> key) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, Delete, bool);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8(isolate, context, Object, Delete, Nothing<bool>(), i::HandleScope);
auto self = Utils::OpenHandle(this);
auto key_obj = Utils::OpenHandle(*key);
Maybe<bool> result =
@@ -4762,7 +4789,8 @@ Maybe<bool> v8::Object::DeletePrivate(Local<Context> context,
Maybe<bool> v8::Object::Has(Local<Context> context, Local<Value> key) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, Get, bool);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8(isolate, context, Object, Has, Nothing<bool>(), i::HandleScope);
auto self = Utils::OpenHandle(this);
auto key_obj = Utils::OpenHandle(*key);
Maybe<bool> maybe = Nothing<bool>();
@@ -4795,7 +4823,8 @@ Maybe<bool> v8::Object::HasPrivate(Local<Context> context, Local<Private> key) {
Maybe<bool> v8::Object::Delete(Local<Context> context, uint32_t index) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, DeleteProperty, bool);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8(isolate, context, Object, Delete, Nothing<bool>(), i::HandleScope);
auto self = Utils::OpenHandle(this);
Maybe<bool> result = i::JSReceiver::DeleteElement(self, index);
has_pending_exception = result.IsNothing();
@@ -4811,7 +4840,8 @@ bool v8::Object::Delete(uint32_t index) {
Maybe<bool> v8::Object::Has(Local<Context> context, uint32_t index) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, Get, bool);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8(isolate, context, Object, Has, Nothing<bool>(), i::HandleScope);
auto self = Utils::OpenHandle(this);
auto maybe = i::JSReceiver::HasElement(self, index);
has_pending_exception = maybe.IsNothing();
@@ -4832,7 +4862,9 @@ static Maybe<bool> ObjectSetAccessor(Local<Context> context, Object* self,
AccessControl settings,
PropertyAttribute attributes,
bool is_special_data_property) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, SetAccessor, bool);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8_NO_SCRIPT(isolate, context, Object, SetAccessor, Nothing<bool>(),
+ i::HandleScope);
if (!Utils::OpenHandle(self)->IsJSObject()) return Just(false);
i::Handle<i::JSObject> obj =
i::Handle<i::JSObject>::cast(Utils::OpenHandle(self));
@@ -4916,7 +4948,9 @@ Maybe<bool> Object::SetNativeDataProperty(v8::Local<v8::Context> context,
Maybe<bool> v8::Object::HasOwnProperty(Local<Context> context,
Local<Name> key) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, HasOwnProperty, bool);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8(isolate, context, Object, HasOwnProperty, Nothing<bool>(),
+ i::HandleScope);
auto self = Utils::OpenHandle(this);
auto key_val = Utils::OpenHandle(*key);
auto result = i::JSReceiver::HasOwnProperty(self, key_val);
@@ -4926,7 +4960,9 @@ Maybe<bool> v8::Object::HasOwnProperty(Local<Context> context,
}
Maybe<bool> v8::Object::HasOwnProperty(Local<Context> context, uint32_t index) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, HasOwnProperty, bool);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8(isolate, context, Object, HasOwnProperty, Nothing<bool>(),
+ i::HandleScope);
auto self = Utils::OpenHandle(this);
auto result = i::JSReceiver::HasOwnProperty(self, index);
has_pending_exception = result.IsNothing();
@@ -4942,7 +4978,9 @@ bool v8::Object::HasOwnProperty(Local<String> key) {
Maybe<bool> v8::Object::HasRealNamedProperty(Local<Context> context,
Local<Name> key) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, HasRealNamedProperty, bool);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8_NO_SCRIPT(isolate, context, Object, HasRealNamedProperty,
+ Nothing<bool>(), i::HandleScope);
auto self = Utils::OpenHandle(this);
if (!self->IsJSObject()) return Just(false);
auto key_val = Utils::OpenHandle(*key);
@@ -4962,8 +5000,9 @@ bool v8::Object::HasRealNamedProperty(Local<String> key) {
Maybe<bool> v8::Object::HasRealIndexedProperty(Local<Context> context,
uint32_t index) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, HasRealIndexedProperty,
- bool);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8_NO_SCRIPT(isolate, context, Object, HasRealIndexedProperty,
+ Nothing<bool>(), i::HandleScope);
auto self = Utils::OpenHandle(this);
if (!self->IsJSObject()) return Just(false);
auto result = i::JSObject::HasRealElementProperty(
@@ -4982,8 +5021,9 @@ bool v8::Object::HasRealIndexedProperty(uint32_t index) {
Maybe<bool> v8::Object::HasRealNamedCallbackProperty(Local<Context> context,
Local<Name> key) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, HasRealNamedCallbackProperty,
- bool);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8_NO_SCRIPT(isolate, context, Object, HasRealNamedCallbackProperty,
+ Nothing<bool>(), i::HandleScope);
auto self = Utils::OpenHandle(this);
if (!self->IsJSObject()) return Just(false);
auto key_val = Utils::OpenHandle(*key);
@@ -5048,9 +5088,10 @@ Local<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
Maybe<PropertyAttribute>
v8::Object::GetRealNamedPropertyAttributesInPrototypeChain(
Local<Context> context, Local<Name> key) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(
- context, Object, GetRealNamedPropertyAttributesInPrototypeChain,
- PropertyAttribute);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8_NO_SCRIPT(isolate, context, Object,
+ GetRealNamedPropertyAttributesInPrototypeChain,
+ Nothing<PropertyAttribute>(), i::HandleScope);
i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
if (!self->IsJSObject()) return Nothing<PropertyAttribute>();
i::Handle<i::Name> key_obj = Utils::OpenHandle(*key);
@@ -5101,8 +5142,9 @@ Local<Value> v8::Object::GetRealNamedProperty(Local<String> key) {
Maybe<PropertyAttribute> v8::Object::GetRealNamedPropertyAttributes(
Local<Context> context, Local<Name> key) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(
- context, Object, GetRealNamedPropertyAttributes, PropertyAttribute);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8_NO_SCRIPT(isolate, context, Object, GetRealNamedPropertyAttributes,
+ Nothing<PropertyAttribute>(), i::HandleScope);
auto self = Utils::OpenHandle(this);
auto key_obj = Utils::OpenHandle(*key);
i::LookupIterator it = i::LookupIterator::PropertyOrElement(
@@ -5163,9 +5205,10 @@ bool v8::Object::IsConstructor() {
MaybeLocal<Value> Object::CallAsFunction(Local<Context> context,
Local<Value> recv, int argc,
Local<Value> argv[]) {
- PREPARE_FOR_EXECUTION_WITH_CONTEXT_IN_RUNTIME_CALL_STATS_SCOPE(
- "v8", "V8.Execute", context, Object, CallAsFunction, MaybeLocal<Value>(),
- InternalEscapableScope, true);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute");
+ ENTER_V8(isolate, context, Object, CallAsFunction, MaybeLocal<Value>(),
+ InternalEscapableScope);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
auto self = Utils::OpenHandle(this);
auto recv_obj = Utils::OpenHandle(*recv);
@@ -5190,9 +5233,10 @@ Local<v8::Value> Object::CallAsFunction(v8::Local<v8::Value> recv, int argc,
MaybeLocal<Value> Object::CallAsConstructor(Local<Context> context, int argc,
Local<Value> argv[]) {
- PREPARE_FOR_EXECUTION_WITH_CONTEXT_IN_RUNTIME_CALL_STATS_SCOPE(
- "v8", "V8.Execute", context, Object, CallAsConstructor,
- MaybeLocal<Value>(), InternalEscapableScope, true);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute");
+ ENTER_V8(isolate, context, Object, CallAsConstructor, MaybeLocal<Value>(),
+ InternalEscapableScope);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
auto self = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
@@ -5241,9 +5285,10 @@ Local<v8::Object> Function::NewInstance() const {
MaybeLocal<Object> Function::NewInstance(Local<Context> context, int argc,
v8::Local<v8::Value> argv[]) const {
- PREPARE_FOR_EXECUTION_WITH_CONTEXT_IN_RUNTIME_CALL_STATS_SCOPE(
- "v8", "V8.Execute", context, Function, NewInstance, MaybeLocal<Object>(),
- InternalEscapableScope, true);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute");
+ ENTER_V8(isolate, context, Function, NewInstance, MaybeLocal<Object>(),
+ InternalEscapableScope);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
auto self = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
@@ -5266,9 +5311,10 @@ Local<v8::Object> Function::NewInstance(int argc,
MaybeLocal<v8::Value> Function::Call(Local<Context> context,
v8::Local<v8::Value> recv, int argc,
v8::Local<v8::Value> argv[]) {
- PREPARE_FOR_EXECUTION_WITH_CONTEXT_IN_RUNTIME_CALL_STATS_SCOPE(
- "v8", "V8.Execute", context, Function, Call, MaybeLocal<Value>(),
- InternalEscapableScope, true);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute");
+ ENTER_V8(isolate, context, Function, Call, MaybeLocal<Value>(),
+ InternalEscapableScope);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
@@ -5743,7 +5789,6 @@ class Utf8LengthHelper : public i::AllStatic {
}
}
UNREACHABLE();
- return 0;
}
static inline int Calculate(i::ConsString* current) {
@@ -6160,7 +6205,7 @@ bool Boolean::Value() const {
int64_t Integer::Value() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) {
- return i::Smi::cast(*obj)->value();
+ return i::Smi::ToInt(*obj);
} else {
return static_cast<int64_t>(obj->Number());
}
@@ -6170,7 +6215,7 @@ int64_t Integer::Value() const {
int32_t Int32::Value() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) {
- return i::Smi::cast(*obj)->value();
+ return i::Smi::ToInt(*obj);
} else {
return static_cast<int32_t>(obj->Number());
}
@@ -6180,7 +6225,7 @@ int32_t Int32::Value() const {
uint32_t Uint32::Value() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) {
- return i::Smi::cast(*obj)->value();
+ return i::Smi::ToInt(*obj);
} else {
return static_cast<uint32_t>(obj->Number());
}
@@ -6286,12 +6331,16 @@ bool v8::V8::Initialize() {
return true;
}
-#if V8_OS_LINUX && V8_TARGET_ARCH_X64 && !V8_OS_ANDROID
+#if V8_OS_POSIX
bool V8::TryHandleSignal(int signum, void* info, void* context) {
+#if V8_OS_LINUX && V8_TARGET_ARCH_X64 && !V8_OS_ANDROID
return v8::internal::trap_handler::TryHandleSignal(
signum, static_cast<siginfo_t*>(info), static_cast<ucontext_t*>(context));
+#else // V8_OS_LINUX && V8_TARGET_ARCH_X64 && !V8_OS_ANDROID
+ return false;
+#endif
}
-#endif // V8_OS_LINUX
+#endif
bool V8::RegisterDefaultSignalHandler() {
return v8::internal::trap_handler::RegisterDefaultSignalHandler();
@@ -6500,6 +6549,11 @@ Local<Context> NewContext(
v8::MaybeLocal<Value> global_object, size_t context_snapshot_index,
v8::DeserializeInternalFieldsCallback embedder_fields_deserializer) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
+ // TODO(jkummerow): This is for crbug.com/713699. Remove it if it doesn't
+ // fail.
+ // Sanity-check that the isolate is initialized and usable.
+ CHECK(isolate->builtins()->Illegal()->IsCode());
+
TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.NewContext");
LOG_API(isolate, Context, New);
i::HandleScope scope(isolate);
@@ -7155,8 +7209,7 @@ void v8::Date::DateTimeConfigurationChangeNotification(Isolate* isolate) {
DCHECK_EQ(1, date_cache_version->length());
CHECK(date_cache_version->get(0)->IsSmi());
date_cache_version->set(
- 0,
- i::Smi::FromInt(i::Smi::cast(date_cache_version->get(0))->value() + 1));
+ 0, i::Smi::FromInt(i::Smi::ToInt(date_cache_version->get(0)) + 1));
}
@@ -7222,7 +7275,7 @@ uint32_t v8::Array::Length() const {
i::Handle<i::JSArray> obj = Utils::OpenHandle(this);
i::Object* length = obj->length();
if (length->IsSmi()) {
- return i::Smi::cast(length)->value();
+ return i::Smi::ToInt(length);
} else {
return static_cast<uint32_t>(length->Number());
}
@@ -7233,7 +7286,7 @@ MaybeLocal<Object> Array::CloneElementAt(Local<Context> context,
uint32_t index) {
PREPARE_FOR_EXECUTION(context, Array, CloneElementAt, Object);
auto self = Utils::OpenHandle(this);
- if (!self->HasFastObjectElements()) return Local<Object>();
+ if (!self->HasObjectElements()) return Local<Object>();
i::FixedArray* elms = i::FixedArray::cast(self->elements());
i::Object* paragon = elms->get(index);
if (!paragon->IsJSObject()) return Local<Object>();
@@ -7304,7 +7357,8 @@ MaybeLocal<Map> Map::Set(Local<Context> context, Local<Value> key,
Maybe<bool> Map::Has(Local<Context> context, Local<Value> key) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, Map, Has, bool);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8(isolate, context, Map, Has, Nothing<bool>(), i::HandleScope);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> result;
i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
@@ -7317,7 +7371,8 @@ Maybe<bool> Map::Has(Local<Context> context, Local<Value> key) {
Maybe<bool> Map::Delete(Local<Context> context, Local<Value> key) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, Map, Delete, bool);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8(isolate, context, Map, Delete, Nothing<bool>(), i::HandleScope);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> result;
i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
@@ -7329,13 +7384,20 @@ Maybe<bool> Map::Delete(Local<Context> context, Local<Value> key) {
}
namespace {
+
+enum class MapAsArrayKind {
+ kEntries = i::JS_MAP_KEY_VALUE_ITERATOR_TYPE,
+ kKeys = i::JS_MAP_KEY_ITERATOR_TYPE,
+ kValues = i::JS_MAP_VALUE_ITERATOR_TYPE
+};
+
i::Handle<i::JSArray> MapAsArray(i::Isolate* isolate, i::Object* table_obj,
- int offset, int kind) {
+ int offset, MapAsArrayKind kind) {
i::Factory* factory = isolate->factory();
i::Handle<i::OrderedHashMap> table(i::OrderedHashMap::cast(table_obj));
if (offset >= table->NumberOfElements()) return factory->NewJSArray(0);
int length = (table->NumberOfElements() - offset) *
- (kind == i::JSMapIterator::kKindEntries ? 2 : 1);
+ (kind == MapAsArrayKind::kEntries ? 2 : 1);
i::Handle<i::FixedArray> result = factory->NewFixedArray(length);
int result_index = 0;
{
@@ -7346,20 +7408,19 @@ i::Handle<i::JSArray> MapAsArray(i::Isolate* isolate, i::Object* table_obj,
i::Object* key = table->KeyAt(i);
if (key == the_hole) continue;
if (offset-- > 0) continue;
- if (kind == i::JSMapIterator::kKindEntries ||
- kind == i::JSMapIterator::kKindKeys) {
+ if (kind == MapAsArrayKind::kEntries || kind == MapAsArrayKind::kKeys) {
result->set(result_index++, key);
}
- if (kind == i::JSMapIterator::kKindEntries ||
- kind == i::JSMapIterator::kKindValues) {
+ if (kind == MapAsArrayKind::kEntries || kind == MapAsArrayKind::kValues) {
result->set(result_index++, table->ValueAt(i));
}
}
}
DCHECK_EQ(result_index, result->length());
DCHECK_EQ(result_index, length);
- return factory->NewJSArrayWithElements(result, i::FAST_ELEMENTS, length);
+ return factory->NewJSArrayWithElements(result, i::PACKED_ELEMENTS, length);
}
+
} // namespace
Local<Array> Map::AsArray() const {
@@ -7368,7 +7429,7 @@ Local<Array> Map::AsArray() const {
LOG_API(isolate, Map, AsArray);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
return Utils::ToLocal(
- MapAsArray(isolate, obj->table(), 0, i::JSMapIterator::kKindEntries));
+ MapAsArray(isolate, obj->table(), 0, MapAsArrayKind::kEntries));
}
@@ -7410,7 +7471,8 @@ MaybeLocal<Set> Set::Add(Local<Context> context, Local<Value> key) {
Maybe<bool> Set::Has(Local<Context> context, Local<Value> key) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, Set, Has, bool);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8(isolate, context, Set, Has, Nothing<bool>(), i::HandleScope);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> result;
i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
@@ -7423,7 +7485,8 @@ Maybe<bool> Set::Has(Local<Context> context, Local<Value> key) {
Maybe<bool> Set::Delete(Local<Context> context, Local<Value> key) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, Set, Delete, bool);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8(isolate, context, Set, Delete, Nothing<bool>(), i::HandleScope);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> result;
i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
@@ -7456,7 +7519,7 @@ i::Handle<i::JSArray> SetAsArray(i::Isolate* isolate, i::Object* table_obj,
}
DCHECK_EQ(result_index, result->length());
DCHECK_EQ(result_index, length);
- return factory->NewJSArrayWithElements(result, i::FAST_ELEMENTS, length);
+ return factory->NewJSArrayWithElements(result, i::PACKED_ELEMENTS, length);
}
} // namespace
@@ -7495,7 +7558,9 @@ Local<Promise> Promise::Resolver::GetPromise() {
Maybe<bool> Promise::Resolver::Resolve(Local<Context> context,
Local<Value> value) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, Promise_Resolver, Resolve, bool);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8(isolate, context, Promise_Resolver, Resolve, Nothing<bool>(),
+ i::HandleScope);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> argv[] = {self, Utils::OpenHandle(*value)};
has_pending_exception =
@@ -7516,7 +7581,9 @@ void Promise::Resolver::Resolve(Local<Value> value) {
Maybe<bool> Promise::Resolver::Reject(Local<Context> context,
Local<Value> value) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, Promise_Resolver, Resolve, bool);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ ENTER_V8(isolate, context, Promise_Resolver, Reject, Nothing<bool>(),
+ i::HandleScope);
auto self = Utils::OpenHandle(this);
// We pass true to trigger the debugger's on exception handler.
@@ -7648,22 +7715,14 @@ MaybeLocal<Proxy> Proxy::New(Local<Context> context, Local<Object> local_target,
}
Local<String> WasmCompiledModule::GetWasmWireBytes() {
- i::Handle<i::JSObject> obj =
- i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
+ i::Handle<i::WasmModuleObject> obj =
+ i::Handle<i::WasmModuleObject>::cast(Utils::OpenHandle(this));
i::Handle<i::WasmCompiledModule> compiled_part =
- i::handle(i::WasmCompiledModule::cast(obj->GetEmbedderField(0)));
+ i::handle(i::WasmCompiledModule::cast(obj->compiled_module()));
i::Handle<i::String> wire_bytes(compiled_part->module_bytes());
return Local<String>::Cast(Utils::ToLocal(wire_bytes));
}
-WasmCompiledModule::TransferrableModule&
-WasmCompiledModule::TransferrableModule::operator=(
- TransferrableModule&& src) {
- compiled_code = std::move(src.compiled_code);
- wire_bytes = std::move(src.wire_bytes);
- return *this;
-}
-
// Currently, wasm modules are bound, both to Isolate and to
// the Context they were created in. The currently-supported means to
// decontextualize and then re-contextualize a module is via
@@ -7695,10 +7754,10 @@ MaybeLocal<WasmCompiledModule> WasmCompiledModule::FromTransferrableModule(
}
WasmCompiledModule::SerializedModule WasmCompiledModule::Serialize() {
- i::Handle<i::JSObject> obj =
- i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
+ i::Handle<i::WasmModuleObject> obj =
+ i::Handle<i::WasmModuleObject>::cast(Utils::OpenHandle(this));
i::Handle<i::WasmCompiledModule> compiled_part =
- i::handle(i::WasmCompiledModule::cast(obj->GetEmbedderField(0)));
+ i::handle(i::WasmCompiledModule::cast(obj->compiled_module()));
std::unique_ptr<i::ScriptData> script_data =
i::WasmCompiledModuleSerializer::SerializeWasmModule(obj->GetIsolate(),
@@ -7777,13 +7836,6 @@ MaybeLocal<WasmCompiledModule> WasmModuleObjectBuilder::Finish() {
return WasmCompiledModule::Compile(isolate_, wire_bytes.get(), total_size_);
}
-WasmModuleObjectBuilder&
-WasmModuleObjectBuilder::operator=(WasmModuleObjectBuilder&& src) {
- received_buffers_ = std::move(src.received_buffers_);
- total_size_ = src.total_size_;
- return *this;
-}
-
// static
v8::ArrayBuffer::Allocator* v8::ArrayBuffer::Allocator::NewDefaultAllocator() {
return new ArrayBufferAllocator();
@@ -7815,6 +7867,11 @@ v8::ArrayBuffer::Contents v8::ArrayBuffer::GetContents() {
i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
size_t byte_length = static_cast<size_t>(self->byte_length()->Number());
Contents contents;
+ contents.allocation_base_ = self->allocation_base();
+ contents.allocation_length_ = self->allocation_length();
+ contents.allocation_mode_ = self->has_guard_region()
+ ? Allocator::AllocationMode::kReservation
+ : Allocator::AllocationMode::kNormal;
contents.data_ = self->backing_store();
contents.byte_length_ = byte_length;
return contents;
@@ -8023,6 +8080,12 @@ v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::GetContents() {
Contents contents;
contents.data_ = self->backing_store();
contents.byte_length_ = byte_length;
+ // SharedArrayBuffers never have guard regions, so their allocation and data
+ // are equivalent.
+ contents.allocation_base_ = self->backing_store();
+ contents.allocation_length_ = byte_length;
+ contents.allocation_mode_ =
+ ArrayBufferAllocator::Allocator::AllocationMode::kNormal;
return contents;
}
@@ -8179,6 +8242,11 @@ void Isolate::ReportExternalAllocationLimitReached() {
heap->ReportExternalMemoryPressure();
}
+void Isolate::CheckMemoryPressure() {
+ i::Heap* heap = reinterpret_cast<i::Isolate*>(this)->heap();
+ if (heap->gc_state() != i::Heap::NOT_IN_GC) return;
+ heap->CheckMemoryPressure();
+}
HeapProfiler* Isolate::GetHeapProfiler() {
i::HeapProfiler* heap_profiler =
@@ -8239,6 +8307,12 @@ v8::Local<v8::Context> Isolate::GetEnteredOrMicrotaskContext() {
return Utils::ToLocal(i::Handle<i::Context>::cast(last));
}
+v8::Local<v8::Context> Isolate::GetIncumbentContext() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ i::Handle<i::Context> context = isolate->GetIncumbentContext();
+ return Utils::ToLocal(context);
+}
+
v8::Local<Value> Isolate::ThrowException(v8::Local<v8::Value> value) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
ENTER_V8_DO_NOT_USE(isolate);
@@ -8294,6 +8368,12 @@ void Isolate::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
isolate->heap()->SetEmbedderHeapTracer(tracer);
}
+void Isolate::SetGetExternallyAllocatedMemoryInBytesCallback(
+ GetExternallyAllocatedMemoryInBytesCallback callback) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->heap()->SetGetExternallyAllocatedMemoryInBytesCallback(callback);
+}
+
void Isolate::TerminateExecution() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->stack_guard()->RequestTerminateExecution();
@@ -8392,16 +8472,17 @@ Isolate* IsolateNewImpl(internal::Isolate* isolate,
isolate->set_api_external_references(params.external_references);
isolate->set_allow_atomics_wait(params.allow_atomics_wait);
- if (params.host_import_module_dynamically_callback_ != nullptr) {
- isolate->SetHostImportModuleDynamicallyCallback(
- params.host_import_module_dynamically_callback_);
- }
-
SetResourceConstraints(isolate, params.constraints);
// TODO(jochen): Once we got rid of Isolate::Current(), we can remove this.
Isolate::Scope isolate_scope(v8_isolate);
if (params.entry_hook || !i::Snapshot::Initialize(isolate)) {
+ base::ElapsedTimer timer;
+ if (i::FLAG_profile_deserialization) timer.Start();
isolate->Init(NULL);
+ if (i::FLAG_profile_deserialization) {
+ double ms = timer.Elapsed().InMillisecondsF();
+ i::PrintF("[Initializing isolate from scratch took %0.3f ms]\n", ms);
+ }
}
return v8_isolate;
}
@@ -8446,6 +8527,11 @@ void Isolate::SetAbortOnUncaughtExceptionCallback(
isolate->SetAbortOnUncaughtExceptionCallback(callback);
}
+void Isolate::SetHostImportModuleDynamicallyCallback(
+ HostImportModuleDynamicallyCallback callback) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->SetHostImportModuleDynamicallyCallback(callback);
+}
Isolate::DisallowJavascriptExecutionScope::DisallowJavascriptExecutionScope(
Isolate* isolate,
@@ -8739,25 +8825,20 @@ void Isolate::SetUseCounterCallback(UseCounterCallback callback) {
void Isolate::SetCounterFunction(CounterLookupCallback callback) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- isolate->stats_table()->SetCounterFunction(callback);
- isolate->InitializeLoggingAndCounters();
- isolate->counters()->ResetCounters();
+ isolate->counters()->ResetCounterFunction(callback);
}
void Isolate::SetCreateHistogramFunction(CreateHistogramCallback callback) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- isolate->stats_table()->SetCreateHistogramFunction(callback);
- isolate->InitializeLoggingAndCounters();
- isolate->counters()->ResetHistograms();
- isolate->counters()->InitializeHistograms();
+ isolate->counters()->ResetCreateHistogramFunction(callback);
}
void Isolate::SetAddHistogramSampleFunction(
AddHistogramSampleCallback callback) {
reinterpret_cast<i::Isolate*>(this)
- ->stats_table()
+ ->counters()
->SetAddHistogramSampleFunction(callback);
}
@@ -8893,6 +8974,13 @@ void Isolate::SetAllowCodeGenerationFromStringsCallback(
isolate->set_allow_code_gen_callback(callback);
}
+void Isolate::SetAllowCodeGenerationFromStringsCallback(
+ DeprecatedAllowCodeGenerationFromStringsCallback callback) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->set_allow_code_gen_callback(
+ reinterpret_cast<AllowCodeGenerationFromStringsCallback>(callback));
+}
+
#define CALLBACK_SETTER(ExternalName, Type, InternalName) \
void Isolate::Set##ExternalName(Type callback) { \
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this); \
@@ -8900,10 +8988,10 @@ void Isolate::SetAllowCodeGenerationFromStringsCallback(
}
CALLBACK_SETTER(WasmModuleCallback, ExtensionCallback, wasm_module_callback)
-CALLBACK_SETTER(WasmCompileCallback, ExtensionCallback, wasm_compile_callback)
CALLBACK_SETTER(WasmInstanceCallback, ExtensionCallback, wasm_instance_callback)
-CALLBACK_SETTER(WasmInstantiateCallback, ExtensionCallback,
- wasm_instantiate_callback)
+
+CALLBACK_SETTER(WasmCompileStreamingCallback, ApiImplementationCallback,
+ wasm_compile_streaming_callback)
bool Isolate::IsDead() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@@ -9212,14 +9300,9 @@ void Debug::SetLiveEditEnabled(Isolate* isolate, bool enable) {
debug::SetLiveEditEnabled(isolate, enable);
}
-bool Debug::IsTailCallEliminationEnabled(Isolate* isolate) {
- i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
- return internal_isolate->is_tail_call_elimination_enabled();
-}
+bool Debug::IsTailCallEliminationEnabled(Isolate* isolate) { return false; }
void Debug::SetTailCallEliminationEnabled(Isolate* isolate, bool enabled) {
- i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
- internal_isolate->SetTailCallEliminationEnabled(enabled);
}
MaybeLocal<Array> Debug::GetInternalProperties(Isolate* v8_isolate,
@@ -9233,7 +9316,7 @@ void debug::SetContextId(Local<Context> context, int id) {
int debug::GetContextId(Local<Context> context) {
i::Object* value = Utils::OpenHandle(*context)->debug_context_id();
- return (value->IsSmi()) ? i::Smi::cast(value)->value() : 0;
+ return (value->IsSmi()) ? i::Smi::ToInt(value) : 0;
}
Local<Context> debug::GetDebugContext(Isolate* isolate) {
@@ -9413,7 +9496,7 @@ Maybe<int> debug::Script::ContextId() const {
i::HandleScope handle_scope(isolate);
i::Handle<i::Script> script = Utils::OpenHandle(this);
i::Object* value = script->context_data();
- if (value->IsSmi()) return Just(i::Smi::cast(value)->value());
+ if (value->IsSmi()) return Just(i::Smi::ToInt(value));
return Nothing<int>();
}
@@ -9437,7 +9520,7 @@ bool debug::Script::IsModule() const {
namespace {
int GetSmiValue(i::Handle<i::FixedArray> array, int index) {
- return i::Smi::cast(array->get(index))->value();
+ return i::Smi::ToInt(array->get(index));
}
bool CompareBreakLocation(const i::BreakLocation& loc1,
@@ -9573,10 +9656,10 @@ std::pair<int, int> debug::WasmScript::GetFunctionRange(
DCHECK_GT(compiled_module->module()->functions.size(), function_index);
i::wasm::WasmFunction& func =
compiled_module->module()->functions[function_index];
- DCHECK_GE(i::kMaxInt, func.code_start_offset);
- DCHECK_GE(i::kMaxInt, func.code_end_offset);
- return std::make_pair(static_cast<int>(func.code_start_offset),
- static_cast<int>(func.code_end_offset));
+ DCHECK_GE(i::kMaxInt, func.code.offset());
+ DCHECK_GE(i::kMaxInt, func.code.end_offset());
+ return std::make_pair(static_cast<int>(func.code.offset()),
+ static_cast<int>(func.code.end_offset()));
}
debug::WasmDisassembly debug::WasmScript::DisassembleFunction(
@@ -9618,7 +9701,7 @@ void debug::GetLoadedScripts(v8::Isolate* v8_isolate,
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
// TODO(kozyatinskiy): remove this GC once tests are dealt with.
- isolate->heap()->CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask,
+ isolate->heap()->CollectAllGarbage(i::Heap::kMakeHeapIterableMask,
i::GarbageCollectionReason::kDebugger);
{
i::DisallowHeapAllocation no_gc;
@@ -9707,19 +9790,19 @@ v8::MaybeLocal<v8::Array> debug::EntriesPreview(Isolate* v8_isolate,
if (object->IsJSMapIterator()) {
i::Handle<i::JSMapIterator> iterator =
i::Handle<i::JSMapIterator>::cast(object);
- int iterator_kind = i::Smi::cast(iterator->kind())->value();
- *is_key_value = iterator_kind == i::JSMapIterator::kKindEntries;
+ MapAsArrayKind const kind =
+ static_cast<MapAsArrayKind>(iterator->map()->instance_type());
+ *is_key_value = kind == MapAsArrayKind::kEntries;
if (!iterator->HasMore()) return v8::Array::New(v8_isolate);
return Utils::ToLocal(MapAsArray(isolate, iterator->table(),
- i::Smi::cast(iterator->index())->value(),
- iterator_kind));
+ i::Smi::ToInt(iterator->index()), kind));
}
if (object->IsJSSetIterator()) {
i::Handle<i::JSSetIterator> it = i::Handle<i::JSSetIterator>::cast(object);
*is_key_value = false;
if (!it->HasMore()) return v8::Array::New(v8_isolate);
return Utils::ToLocal(
- SetAsArray(isolate, it->table(), i::Smi::cast(it->index())->value()));
+ SetAsArray(isolate, it->table(), i::Smi::ToInt(it->index())));
}
return v8::MaybeLocal<v8::Array>();
}
@@ -9745,11 +9828,13 @@ Local<Function> debug::GetBuiltin(Isolate* v8_isolate, Builtin builtin) {
case kObjectGetOwnPropertySymbols:
name = i::Builtins::kObjectGetOwnPropertySymbols;
break;
+ default:
+ UNREACHABLE();
}
i::Handle<i::Code> call_code(isolate->builtins()->builtin(name));
i::Handle<i::JSFunction> fun =
isolate->factory()->NewFunctionWithoutPrototype(
- isolate->factory()->empty_string(), call_code, false);
+ isolate->factory()->empty_string(), call_code, i::SLOPPY);
fun->shared()->DontAdaptArguments();
return Utils::ToLocal(handle_scope.CloseAndEscape(fun));
}
@@ -9774,6 +9859,18 @@ int debug::GetStackFrameId(v8::Local<v8::StackFrame> frame) {
return Utils::OpenHandle(*frame)->id();
}
+v8::Local<v8::StackTrace> debug::GetDetailedStackTrace(
+ Isolate* v8_isolate, v8::Local<v8::Object> v8_error) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ i::Handle<i::JSReceiver> error = Utils::OpenHandle(*v8_error);
+ if (!error->IsJSObject()) {
+ return v8::Local<v8::StackTrace>();
+ }
+ i::Handle<i::FixedArray> stack_trace =
+ isolate->GetDetailedStackTrace(i::Handle<i::JSObject>::cast(error));
+ return Utils::StackTraceToLocal(stack_trace);
+}
+
MaybeLocal<debug::Script> debug::GeneratorObject::Script() {
i::Handle<i::JSGeneratorObject> obj = Utils::OpenHandle(this);
i::Object* maybe_script = obj->function()->shared()->script();
@@ -9826,6 +9923,10 @@ Local<String> CpuProfileNode::GetFunctionName() const {
}
}
+int debug::Coverage::BlockData::StartOffset() const { return block_->start; }
+int debug::Coverage::BlockData::EndOffset() const { return block_->end; }
+uint32_t debug::Coverage::BlockData::Count() const { return block_->count; }
+
int debug::Coverage::FunctionData::StartOffset() const {
return function_->start;
}
@@ -9838,6 +9939,19 @@ MaybeLocal<String> debug::Coverage::FunctionData::Name() const {
return ToApiHandle<String>(function_->name);
}
+size_t debug::Coverage::FunctionData::BlockCount() const {
+ return function_->blocks.size();
+}
+
+bool debug::Coverage::FunctionData::HasBlockCoverage() const {
+ return function_->has_block_coverage;
+}
+
+debug::Coverage::BlockData debug::Coverage::FunctionData::GetBlockData(
+ size_t i) const {
+ return BlockData(&function_->blocks.at(i));
+}
+
Local<debug::Script> debug::Coverage::ScriptData::GetScript() const {
return ToApiHandle<debug::Script>(script_->script);
}
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index 3b97e04fb2..e856a4408c 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -111,8 +111,7 @@ class RegisteredExtension {
V(NativeWeakMap, JSWeakMap) \
V(debug::GeneratorObject, JSGeneratorObject) \
V(debug::Script, Script) \
- V(Promise, JSPromise) \
- V(DynamicImportResult, JSPromise)
+ V(Promise, JSPromise)
class Utils {
public:
@@ -186,8 +185,6 @@ class Utils {
v8::internal::Handle<v8::internal::Object> obj);
static inline Local<Promise> PromiseToLocal(
v8::internal::Handle<v8::internal::JSObject> obj);
- static inline Local<DynamicImportResult> PromiseToDynamicImportResult(
- v8::internal::Handle<v8::internal::JSPromise> obj);
static inline Local<StackTrace> StackTraceToLocal(
v8::internal::Handle<v8::internal::FixedArray> obj);
static inline Local<StackFrame> StackFrameToLocal(
@@ -320,7 +317,6 @@ MAKE_TO_LOCAL(SignatureToLocal, FunctionTemplateInfo, Signature)
MAKE_TO_LOCAL(AccessorSignatureToLocal, FunctionTemplateInfo, AccessorSignature)
MAKE_TO_LOCAL(MessageToLocal, Object, Message)
MAKE_TO_LOCAL(PromiseToLocal, JSObject, Promise)
-MAKE_TO_LOCAL(PromiseToDynamicImportResult, JSPromise, DynamicImportResult)
MAKE_TO_LOCAL(StackTraceToLocal, FixedArray, StackTrace)
MAKE_TO_LOCAL(StackFrameToLocal, StackFrameInfo, StackFrame)
MAKE_TO_LOCAL(NumberToLocal, Object, Number)
diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h
index 1d91b20b2b..f3fcb8edb0 100644
--- a/deps/v8/src/arguments.h
+++ b/deps/v8/src/arguments.h
@@ -50,9 +50,7 @@ class Arguments BASE_EMBEDDED {
return Handle<S>(reinterpret_cast<S**>(value));
}
- int smi_at(int index) {
- return Smi::cast((*this)[index])->value();
- }
+ int smi_at(int index) { return Smi::ToInt((*this)[index]); }
double number_at(int index) {
return (*this)[index]->Number();
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index b5a59bb476..52218cc8ce 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -280,7 +280,7 @@ void RelocInfo::Visit(Heap* heap) {
Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
rm_ = no_reg;
- imm32_ = immediate;
+ value_.immediate = immediate;
rmode_ = rmode;
}
@@ -288,14 +288,14 @@ Operand Operand::Zero() { return Operand(static_cast<int32_t>(0)); }
Operand::Operand(const ExternalReference& f) {
rm_ = no_reg;
- imm32_ = reinterpret_cast<int32_t>(f.address());
+ value_.immediate = reinterpret_cast<int32_t>(f.address());
rmode_ = RelocInfo::EXTERNAL_REFERENCE;
}
Operand::Operand(Smi* value) {
rm_ = no_reg;
- imm32_ = reinterpret_cast<intptr_t>(value);
+ value_.immediate = reinterpret_cast<intptr_t>(value);
rmode_ = RelocInfo::NONE32;
}
@@ -400,11 +400,7 @@ void Assembler::deserialization_set_target_internal_reference_at(
bool Assembler::is_constant_pool_load(Address pc) {
- if (CpuFeatures::IsSupported(ARMv7)) {
- return !Assembler::IsMovW(Memory::int32_at(pc));
- } else {
- return !Assembler::IsMovImmed(Memory::int32_at(pc));
- }
+ return IsLdrPcImmediateOffset(Memory::int32_at(pc));
}
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 6932e97379..876af4e619 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -42,6 +42,7 @@
#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/base/cpu.h"
+#include "src/code-stubs.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
@@ -372,19 +373,10 @@ void RelocInfo::unchecked_update_wasm_size(Isolate* isolate, uint32_t size,
// Implementation of Operand and MemOperand
// See assembler-arm-inl.h for inlined constructors
-Operand::Operand(Handle<Object> handle) {
- AllowDeferredHandleDereference using_raw_address;
+Operand::Operand(Handle<HeapObject> handle) {
rm_ = no_reg;
- // Verify all Objects referred by code are NOT in new space.
- Object* obj = *handle;
- if (obj->IsHeapObject()) {
- imm32_ = reinterpret_cast<intptr_t>(handle.location());
- rmode_ = RelocInfo::EMBEDDED_OBJECT;
- } else {
- // no relocation needed
- imm32_ = reinterpret_cast<intptr_t>(obj);
- rmode_ = RelocInfo::NONE32;
- }
+ value_.immediate = reinterpret_cast<intptr_t>(handle.address());
+ rmode_ = RelocInfo::EMBEDDED_OBJECT;
}
@@ -417,6 +409,21 @@ Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
rs_ = rs;
}
+Operand Operand::EmbeddedNumber(double value) {
+ int32_t smi;
+ if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
+ Operand result(0, RelocInfo::EMBEDDED_OBJECT);
+ result.is_heap_object_request_ = true;
+ result.value_.heap_object_request = HeapObjectRequest(value);
+ return result;
+}
+
+Operand Operand::EmbeddedCode(CodeStub* stub) {
+ Operand result(0, RelocInfo::CODE_TARGET);
+ result.is_heap_object_request_ = true;
+ result.value_.heap_object_request = HeapObjectRequest(stub);
+ return result;
+}
MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
rn_ = rn;
@@ -488,6 +495,25 @@ void NeonMemOperand::SetAlignment(int align) {
}
}
+void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
+ for (auto& request : heap_object_requests_) {
+ Handle<HeapObject> object;
+ switch (request.kind()) {
+ case HeapObjectRequest::kHeapNumber:
+ object = isolate->factory()->NewHeapNumber(request.heap_number(),
+ IMMUTABLE, TENURED);
+ break;
+ case HeapObjectRequest::kCodeStub:
+ request.code_stub()->set_isolate(isolate);
+ object = request.code_stub()->GetCode();
+ break;
+ }
+ Address pc = buffer_ + request.offset();
+ Memory::Address_at(constant_pool_entry_address(pc, 0 /* unused */)) =
+ object.address();
+ }
+}
+
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
@@ -542,19 +568,19 @@ const Instr kLdrStrInstrTypeMask = 0xffff0000;
Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
: AssemblerBase(isolate_data, buffer, buffer_size),
- recorded_ast_id_(TypeFeedbackId::None()),
pending_32_bit_constants_(),
- pending_64_bit_constants_() {
+ pending_64_bit_constants_(),
+ scratch_register_list_(ip.bit()) {
pending_32_bit_constants_.reserve(kMinNumPendingConstants);
pending_64_bit_constants_.reserve(kMinNumPendingConstants);
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
next_buffer_check_ = 0;
+ code_target_sharing_blocked_nesting_ = 0;
const_pool_blocked_nesting_ = 0;
no_const_pool_before_ = 0;
first_const_pool_32_use_ = -1;
first_const_pool_64_use_ = -1;
last_bound_pos_ = 0;
- ClearRecordedAstId();
if (CpuFeatures::IsSupported(VFP32DREGS)) {
// Register objects tend to be abstracted and survive between scopes, so
// it's awkward to use CpuFeatures::VFP32DREGS with CpuFeatureScope. To make
@@ -565,16 +591,19 @@ Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
Assembler::~Assembler() {
- DCHECK(const_pool_blocked_nesting_ == 0);
+ DCHECK_EQ(const_pool_blocked_nesting_, 0);
+ DCHECK_EQ(code_target_sharing_blocked_nesting_, 0);
}
-
-void Assembler::GetCode(CodeDesc* desc) {
+void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
// Emit constant pool if necessary.
int constant_pool_offset = 0;
CheckConstPool(true, false);
DCHECK(pending_32_bit_constants_.empty());
DCHECK(pending_64_bit_constants_.empty());
+
+ AllocateAndInstallRequestedHeapObjects(isolate);
+
// Set up code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
@@ -589,7 +618,7 @@ void Assembler::GetCode(CodeDesc* desc) {
void Assembler::Align(int m) {
- DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
+ DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
DCHECK((pc_offset() & (kInstrSize - 1)) == 0);
while ((pc_offset() & (m - 1)) != 0) {
nop();
@@ -1033,15 +1062,14 @@ void Assembler::next(Label* L) {
}
}
+namespace {
// Low-level code emission routines depending on the addressing mode.
// If this returns true then you have to use the rotate_imm and immed_8
// that it returns, because it may have already changed the instruction
// to match them!
-static bool fits_shifter(uint32_t imm32,
- uint32_t* rotate_imm,
- uint32_t* immed_8,
- Instr* instr) {
+bool FitsShifter(uint32_t imm32, uint32_t* rotate_imm, uint32_t* immed_8,
+ Instr* instr) {
// imm32 must be unsigned.
for (int rot = 0; rot < 16; rot++) {
uint32_t imm8 = base::bits::RotateLeft32(imm32, 2 * rot);
@@ -1055,7 +1083,7 @@ static bool fits_shifter(uint32_t imm32,
// immediate fits, change the opcode.
if (instr != NULL) {
if ((*instr & kMovMvnMask) == kMovMvnPattern) {
- if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
+ if (FitsShifter(~imm32, rotate_imm, immed_8, NULL)) {
*instr ^= kMovMvnFlip;
return true;
} else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
@@ -1069,7 +1097,7 @@ static bool fits_shifter(uint32_t imm32,
}
}
} else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
- if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
+ if (FitsShifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
*instr ^= kCmpCmnFlip;
return true;
}
@@ -1077,13 +1105,13 @@ static bool fits_shifter(uint32_t imm32,
Instr alu_insn = (*instr & kALUMask);
if (alu_insn == ADD ||
alu_insn == SUB) {
- if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
+ if (FitsShifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
*instr ^= kAddSubFlip;
return true;
}
} else if (alu_insn == AND ||
alu_insn == BIC) {
- if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
+ if (FitsShifter(~imm32, rotate_imm, immed_8, NULL)) {
*instr ^= kAndBicFlip;
return true;
}
@@ -1093,26 +1121,23 @@ static bool fits_shifter(uint32_t imm32,
return false;
}
-
// We have to use the temporary register for things that can be relocated even
// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
// space. There is no guarantee that the relocated location can be similarly
// encoded.
-bool Operand::must_output_reloc_info(const Assembler* assembler) const {
- if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
+bool MustOutputRelocInfo(RelocInfo::Mode rmode, const Assembler* assembler) {
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
if (assembler != NULL && assembler->predictable_code_size()) return true;
return assembler->serializer_enabled();
- } else if (RelocInfo::IsNone(rmode_)) {
+ } else if (RelocInfo::IsNone(rmode)) {
return false;
}
return true;
}
-
-static bool use_mov_immediate_load(const Operand& x,
- const Assembler* assembler) {
+bool UseMovImmediateLoad(const Operand& x, const Assembler* assembler) {
DCHECK(assembler != nullptr);
- if (x.must_output_reloc_info(assembler)) {
+ if (x.MustOutputRelocInfo(assembler)) {
// Prefer constant pool if data is likely to be patched.
return false;
} else {
@@ -1121,21 +1146,27 @@ static bool use_mov_immediate_load(const Operand& x,
}
}
+} // namespace
-int Operand::instructions_required(const Assembler* assembler,
- Instr instr) const {
+bool Operand::MustOutputRelocInfo(const Assembler* assembler) const {
+ return v8::internal::MustOutputRelocInfo(rmode_, assembler);
+}
+
+int Operand::InstructionsRequired(const Assembler* assembler,
+ Instr instr) const {
DCHECK(assembler != nullptr);
if (rm_.is_valid()) return 1;
uint32_t dummy1, dummy2;
- if (must_output_reloc_info(assembler) ||
- !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
+ if (MustOutputRelocInfo(assembler) ||
+ !FitsShifter(immediate(), &dummy1, &dummy2, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, or use of
// constant pool is required. First account for the instructions required
// for the constant pool or immediate load
int instructions;
- if (use_mov_immediate_load(*this, assembler)) {
- // A movw / movt or mov / orr immediate load.
- instructions = CpuFeatures::IsSupported(ARMv7) ? 2 : 4;
+ if (UseMovImmediateLoad(*this, assembler)) {
+ DCHECK(CpuFeatures::IsSupported(ARMv7));
+ // A movw / movt immediate load.
+ instructions = 2;
} else {
// A small constant pool load.
instructions = 1;
@@ -1154,22 +1185,18 @@ int Operand::instructions_required(const Assembler* assembler,
}
}
-
-void Assembler::move_32_bit_immediate(Register rd,
- const Operand& x,
- Condition cond) {
- uint32_t imm32 = static_cast<uint32_t>(x.imm32_);
- if (x.must_output_reloc_info(this)) {
- RecordRelocInfo(x.rmode_);
- }
-
- if (use_mov_immediate_load(x, this)) {
- // use_mov_immediate_load should return false when we need to output
+void Assembler::Move32BitImmediate(Register rd, const Operand& x,
+ Condition cond) {
+ if (UseMovImmediateLoad(x, this)) {
+ // UseMovImmediateLoad should return false when we need to output
// relocation info, since we prefer the constant pool for values that
// can be patched.
- DCHECK(!x.must_output_reloc_info(this));
- Register target = rd.code() == pc.code() ? ip : rd;
+ DCHECK(!x.MustOutputRelocInfo(this));
+ UseScratchRegisterScope temps(this);
+ // Re-use the destination register as a scratch if possible.
+ Register target = !rd.is(pc) ? rd : temps.Acquire();
if (CpuFeatures::IsSupported(ARMv7)) {
+ uint32_t imm32 = static_cast<uint32_t>(x.immediate());
CpuFeatureScope scope(this, ARMv7);
movw(target, imm32 & 0xffff, cond);
movt(target, imm32 >> 16, cond);
@@ -1178,59 +1205,100 @@ void Assembler::move_32_bit_immediate(Register rd,
mov(rd, target, LeaveCC, cond);
}
} else {
- ConstantPoolEntry::Access access =
- ConstantPoolAddEntry(pc_offset(), x.rmode_, x.imm32_);
- DCHECK(access == ConstantPoolEntry::REGULAR);
- USE(access);
+ int32_t immediate;
+ if (x.IsHeapObjectRequest()) {
+ RequestHeapObject(x.heap_object_request());
+ immediate = 0;
+ } else {
+ immediate = x.immediate();
+ }
+ ConstantPoolAddEntry(pc_offset(), x.rmode_, immediate);
ldr(rd, MemOperand(pc, 0), cond);
}
}
-
-void Assembler::addrmod1(Instr instr,
- Register rn,
- Register rd,
- const Operand& x) {
+void Assembler::AddrMode1(Instr instr, Register rd, Register rn,
+ const Operand& x) {
CheckBuffer();
+ uint32_t opcode = instr & kOpCodeMask;
+ bool set_flags = (instr & S) != 0;
+ DCHECK((opcode == ADC) || (opcode == ADD) || (opcode == AND) ||
+ (opcode == BIC) || (opcode == EOR) || (opcode == ORR) ||
+ (opcode == RSB) || (opcode == RSC) || (opcode == SBC) ||
+ (opcode == SUB) || (opcode == CMN) || (opcode == CMP) ||
+ (opcode == TEQ) || (opcode == TST) || (opcode == MOV) ||
+ (opcode == MVN));
+ // For comparison instructions, rd is not defined.
+ DCHECK(rd.is_valid() || (opcode == CMN) || (opcode == CMP) ||
+ (opcode == TEQ) || (opcode == TST));
+ // For move instructions, rn is not defined.
+ DCHECK(rn.is_valid() || (opcode == MOV) || (opcode == MVN));
+ DCHECK(rd.is_valid() || rn.is_valid());
DCHECK((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
- if (!x.rm_.is_valid()) {
- // Immediate.
- uint32_t rotate_imm;
- uint32_t immed_8;
- if (x.must_output_reloc_info(this) ||
- !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
+ if (!AddrMode1TryEncodeOperand(&instr, x)) {
+ DCHECK(x.IsImmediate());
+ // Upon failure to encode, the opcode should not have changed.
+ DCHECK(opcode == (instr & kOpCodeMask));
+ Condition cond = Instruction::ConditionField(instr);
+ if ((opcode == MOV) && !set_flags) {
+ // Generate a sequence of mov instructions or a load from the constant
+ // pool only for a MOV instruction which does not set the flags.
+ DCHECK(!rn.is_valid());
+ Move32BitImmediate(rd, x, cond);
+ } else {
// The immediate operand cannot be encoded as a shifter operand, so load
- // it first to register ip and change the original instruction to use ip.
- // However, if the original instruction is a 'mov rd, x' (not setting the
- // condition code), then replace it with a 'ldr rd, [pc]'.
- CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
- Condition cond = Instruction::ConditionField(instr);
- if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
- move_32_bit_immediate(rd, x, cond);
- } else {
- mov(ip, x, LeaveCC, cond);
- addrmod1(instr, rn, rd, Operand(ip));
- }
- return;
+ // it first to a scratch register and change the original instruction to
+ // use it.
+ UseScratchRegisterScope temps(this);
+ // Re-use the destination register if possible.
+ Register scratch =
+ (rd.is_valid() && !rd.is(rn) && !rd.is(pc)) ? rd : temps.Acquire();
+ mov(scratch, x, LeaveCC, cond);
+ AddrMode1(instr, rd, rn, Operand(scratch));
}
- instr |= I | rotate_imm*B8 | immed_8;
- } else if (!x.rs_.is_valid()) {
- // Immediate shift.
- instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
+ return;
+ }
+ if (!rd.is_valid()) {
+ // Emit a comparison instruction.
+ emit(instr | rn.code() * B16);
+ } else if (!rn.is_valid()) {
+ // Emit a move instruction. If the operand is a register-shifted register,
+ // then prevent the destination from being PC as this is unpredictable.
+ DCHECK(!x.IsRegisterShiftedRegister() || !rd.is(pc));
+ emit(instr | rd.code() * B12);
} else {
- // Register shift.
- DCHECK(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
- instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
+ emit(instr | rn.code() * B16 | rd.code() * B12);
}
- emit(instr | rn.code()*B16 | rd.code()*B12);
if (rn.is(pc) || x.rm_.is(pc)) {
// Block constant pool emission for one instruction after reading pc.
BlockConstPoolFor(1);
}
}
+bool Assembler::AddrMode1TryEncodeOperand(Instr* instr, const Operand& x) {
+ if (x.IsImmediate()) {
+ // Immediate.
+ uint32_t rotate_imm;
+ uint32_t immed_8;
+ if (x.MustOutputRelocInfo(this) ||
+ !FitsShifter(x.immediate(), &rotate_imm, &immed_8, instr)) {
+ // Let the caller handle generating multiple instructions.
+ return false;
+ }
+ *instr |= I | rotate_imm * B8 | immed_8;
+ } else if (x.IsImmediateShiftedRegister()) {
+ *instr |= x.shift_imm_ * B7 | x.shift_op_ | x.rm_.code();
+ } else {
+ DCHECK(x.IsRegisterShiftedRegister());
+ // It is unpredictable to use the PC in this case.
+ DCHECK(!x.rm_.is(pc) && !x.rs_.is(pc));
+ *instr |= x.rs_.code() * B8 | x.shift_op_ | B4 | x.rm_.code();
+ }
-void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
+ return true;
+}
+
+void Assembler::AddrMode2(Instr instr, Register rd, const MemOperand& x) {
DCHECK((instr & ~(kCondMask | B | L)) == B26);
int am = x.am_;
if (!x.rm_.is_valid()) {
@@ -1241,11 +1309,16 @@ void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
am ^= U;
}
if (!is_uint12(offset_12)) {
- // Immediate offset cannot be encoded, load it first to register ip
- // rn (and rd in a load) should never be ip, or will be trashed.
- DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
- mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
- addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
+ // Immediate offset cannot be encoded, load it first to a scratch
+ // register.
+ UseScratchRegisterScope temps(this);
+ // Allow re-using rd for load instructions if possible.
+ bool is_load = (instr & L) == L;
+ Register scratch =
+ (is_load && !rd.is(x.rn_) && !rd.is(pc)) ? rd : temps.Acquire();
+ mov(scratch, Operand(x.offset_), LeaveCC,
+ Instruction::ConditionField(instr));
+ AddrMode2(instr, rd, MemOperand(x.rn_, scratch, x.am_));
return;
}
DCHECK(offset_12 >= 0); // no masking needed
@@ -1261,11 +1334,11 @@ void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
}
-
-void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
+void Assembler::AddrMode3(Instr instr, Register rd, const MemOperand& x) {
DCHECK((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
DCHECK(x.rn_.is_valid());
int am = x.am_;
+ bool is_load = (instr & L) == L;
if (!x.rm_.is_valid()) {
// Immediate offset.
int offset_8 = x.offset_;
@@ -1274,22 +1347,29 @@ void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
am ^= U;
}
if (!is_uint8(offset_8)) {
- // Immediate offset cannot be encoded, load it first to register ip
- // rn (and rd in a load) should never be ip, or will be trashed.
- DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
- mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
- addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
+ // Immediate offset cannot be encoded, load it first to a scratch
+ // register.
+ UseScratchRegisterScope temps(this);
+ // Allow re-using rd for load instructions if possible.
+ Register scratch =
+ (is_load && !rd.is(x.rn_) && !rd.is(pc)) ? rd : temps.Acquire();
+ mov(scratch, Operand(x.offset_), LeaveCC,
+ Instruction::ConditionField(instr));
+ AddrMode3(instr, rd, MemOperand(x.rn_, scratch, x.am_));
return;
}
DCHECK(offset_8 >= 0); // no masking needed
instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
} else if (x.shift_imm_ != 0) {
- // Scaled register offset not supported, load index first
- // rn (and rd in a load) should never be ip, or will be trashed.
- DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
- mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
+ // Scaled register offsets are not supported, compute the offset seperately
+ // to a scratch register.
+ UseScratchRegisterScope temps(this);
+ // Allow re-using rd for load instructions if possible.
+ Register scratch =
+ (is_load && !rd.is(x.rn_) && !rd.is(pc)) ? rd : temps.Acquire();
+ mov(scratch, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
Instruction::ConditionField(instr));
- addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
+ AddrMode3(instr, rd, MemOperand(x.rn_, scratch, x.am_));
return;
} else {
// Register offset.
@@ -1300,16 +1380,14 @@ void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
}
-
-void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
+void Assembler::AddrMode4(Instr instr, Register rn, RegList rl) {
DCHECK((instr & ~(kCondMask | P | U | W | L)) == B27);
DCHECK(rl != 0);
DCHECK(!rn.is(pc));
emit(instr | rn.code()*B16 | rl);
}
-
-void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
+void Assembler::AddrMode5(Instr instr, CRegister crd, const MemOperand& x) {
// Unindexed addressing is not encoded by this function.
DCHECK_EQ((B27 | B26),
(instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L)));
@@ -1325,7 +1403,7 @@ void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
DCHECK(is_uint8(offset_8)); // unsigned word offset must fit in a byte
DCHECK((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
- // Post-indexed addressing requires W == 1; different than in addrmod2/3.
+ // Post-indexed addressing requires W == 1; different than in AddrMode2/3.
if ((am & P) == 0)
am |= W;
@@ -1419,19 +1497,19 @@ void Assembler::blx(Label* L) {
void Assembler::and_(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
- addrmod1(cond | AND | s, src1, dst, src2);
+ AddrMode1(cond | AND | s, dst, src1, src2);
}
void Assembler::eor(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
- addrmod1(cond | EOR | s, src1, dst, src2);
+ AddrMode1(cond | EOR | s, dst, src1, src2);
}
void Assembler::sub(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
- addrmod1(cond | SUB | s, src1, dst, src2);
+ AddrMode1(cond | SUB | s, dst, src1, src2);
}
void Assembler::sub(Register dst, Register src1, Register src2, SBit s,
@@ -1441,13 +1519,13 @@ void Assembler::sub(Register dst, Register src1, Register src2, SBit s,
void Assembler::rsb(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
- addrmod1(cond | RSB | s, src1, dst, src2);
+ AddrMode1(cond | RSB | s, dst, src1, src2);
}
void Assembler::add(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
- addrmod1(cond | ADD | s, src1, dst, src2);
+ AddrMode1(cond | ADD | s, dst, src1, src2);
}
void Assembler::add(Register dst, Register src1, Register src2, SBit s,
@@ -1457,24 +1535,24 @@ void Assembler::add(Register dst, Register src1, Register src2, SBit s,
void Assembler::adc(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
- addrmod1(cond | ADC | s, src1, dst, src2);
+ AddrMode1(cond | ADC | s, dst, src1, src2);
}
void Assembler::sbc(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
- addrmod1(cond | SBC | s, src1, dst, src2);
+ AddrMode1(cond | SBC | s, dst, src1, src2);
}
void Assembler::rsc(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
- addrmod1(cond | RSC | s, src1, dst, src2);
+ AddrMode1(cond | RSC | s, dst, src1, src2);
}
void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
- addrmod1(cond | TST | S, src1, r0, src2);
+ AddrMode1(cond | TST | S, no_reg, src1, src2);
}
void Assembler::tst(Register src1, Register src2, Condition cond) {
@@ -1482,12 +1560,12 @@ void Assembler::tst(Register src1, Register src2, Condition cond) {
}
void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
- addrmod1(cond | TEQ | S, src1, r0, src2);
+ AddrMode1(cond | TEQ | S, no_reg, src1, src2);
}
void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
- addrmod1(cond | CMP | S, src1, r0, src2);
+ AddrMode1(cond | CMP | S, no_reg, src1, src2);
}
void Assembler::cmp(Register src1, Register src2, Condition cond) {
@@ -1502,13 +1580,13 @@ void Assembler::cmp_raw_immediate(
void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
- addrmod1(cond | CMN | S, src1, r0, src2);
+ AddrMode1(cond | CMN | S, no_reg, src1, src2);
}
void Assembler::orr(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
- addrmod1(cond | ORR | s, src1, dst, src2);
+ AddrMode1(cond | ORR | s, dst, src1, src2);
}
void Assembler::orr(Register dst, Register src1, Register src2, SBit s,
@@ -1520,8 +1598,8 @@ void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
// Don't allow nop instructions in the form mov rn, rn to be generated using
// the mov instruction. They must be generated using nop(int/NopMarkerTypes)
// or MarkCode(int/NopMarkerTypes) pseudo instructions.
- DCHECK(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
- addrmod1(cond | MOV | s, r0, dst, src);
+ DCHECK(!(src.IsRegister() && src.rm().is(dst) && s == LeaveCC && cond == al));
+ AddrMode1(cond | MOV | s, dst, no_reg, src);
}
void Assembler::mov(Register dst, Register src, SBit s, Condition cond) {
@@ -1581,17 +1659,17 @@ void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
void Assembler::bic(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
- addrmod1(cond | BIC | s, src1, dst, src2);
+ AddrMode1(cond | BIC | s, dst, src1, src2);
}
void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
- addrmod1(cond | MVN | s, r0, dst, src);
+ AddrMode1(cond | MVN | s, dst, no_reg, src);
}
void Assembler::asr(Register dst, Register src1, const Operand& src2, SBit s,
Condition cond) {
- if (src2.is_reg()) {
+ if (src2.IsRegister()) {
mov(dst, Operand(src1, ASR, src2.rm()), s, cond);
} else {
mov(dst, Operand(src1, ASR, src2.immediate()), s, cond);
@@ -1600,7 +1678,7 @@ void Assembler::asr(Register dst, Register src1, const Operand& src2, SBit s,
void Assembler::lsl(Register dst, Register src1, const Operand& src2, SBit s,
Condition cond) {
- if (src2.is_reg()) {
+ if (src2.IsRegister()) {
mov(dst, Operand(src1, LSL, src2.rm()), s, cond);
} else {
mov(dst, Operand(src1, LSL, src2.immediate()), s, cond);
@@ -1609,7 +1687,7 @@ void Assembler::lsl(Register dst, Register src1, const Operand& src2, SBit s,
void Assembler::lsr(Register dst, Register src1, const Operand& src2, SBit s,
Condition cond) {
- if (src2.is_reg()) {
+ if (src2.IsRegister()) {
mov(dst, Operand(src1, LSR, src2.rm()), s, cond);
} else {
mov(dst, Operand(src1, LSR, src2.immediate()), s, cond);
@@ -1745,8 +1823,8 @@ void Assembler::usat(Register dst,
Condition cond) {
DCHECK(!dst.is(pc) && !src.rm_.is(pc));
DCHECK((satpos >= 0) && (satpos <= 31));
+ DCHECK(src.IsImmediateShiftedRegister());
DCHECK((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
- DCHECK(src.rs_.is(no_reg));
int sh = 0;
if (src.shift_op_ == ASR) {
@@ -1839,9 +1917,8 @@ void Assembler::pkhbt(Register dst,
// Rd(15-12) | imm5(11-7) | 0(6) | 01(5-4) | Rm(3-0)
DCHECK(!dst.is(pc));
DCHECK(!src1.is(pc));
+ DCHECK(src2.IsImmediateShiftedRegister());
DCHECK(!src2.rm().is(pc));
- DCHECK(!src2.rm().is(no_reg));
- DCHECK(src2.rs().is(no_reg));
DCHECK((src2.shift_imm_ >= 0) && (src2.shift_imm_ <= 31));
DCHECK(src2.shift_op() == LSL);
emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
@@ -1858,9 +1935,8 @@ void Assembler::pkhtb(Register dst,
// Rd(15-12) | imm5(11-7) | 1(6) | 01(5-4) | Rm(3-0)
DCHECK(!dst.is(pc));
DCHECK(!src1.is(pc));
+ DCHECK(src2.IsImmediateShiftedRegister());
DCHECK(!src2.rm().is(pc));
- DCHECK(!src2.rm().is(no_reg));
- DCHECK(src2.rs().is(no_reg));
DCHECK((src2.shift_imm_ >= 1) && (src2.shift_imm_ <= 32));
DCHECK(src2.shift_op() == ASR);
int asr = (src2.shift_imm_ == 32) ? 0 : src2.shift_imm_;
@@ -2007,20 +2083,23 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
DCHECK((fields & 0x000f0000) != 0); // At least one field must be set.
DCHECK(((fields & 0xfff0ffff) == CPSR) || ((fields & 0xfff0ffff) == SPSR));
Instr instr;
- if (!src.rm_.is_valid()) {
+ if (src.IsImmediate()) {
// Immediate.
uint32_t rotate_imm;
uint32_t immed_8;
- if (src.must_output_reloc_info(this) ||
- !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
- // Immediate operand cannot be encoded, load it first to register ip.
- move_32_bit_immediate(ip, src);
- msr(fields, Operand(ip), cond);
+ if (src.MustOutputRelocInfo(this) ||
+ !FitsShifter(src.immediate(), &rotate_imm, &immed_8, NULL)) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ // Immediate operand cannot be encoded, load it first to a scratch
+ // register.
+ Move32BitImmediate(scratch, src);
+ msr(fields, Operand(scratch), cond);
return;
}
instr = I | rotate_imm*B8 | immed_8;
} else {
- DCHECK(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed
+ DCHECK(src.IsRegister()); // Only rm is allowed.
instr = src.rm_.code();
}
emit(cond | instr | B24 | B21 | fields | 15*B12);
@@ -2029,42 +2108,42 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
// Load/Store instructions.
void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
- addrmod2(cond | B26 | L, dst, src);
+ AddrMode2(cond | B26 | L, dst, src);
}
void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
- addrmod2(cond | B26, src, dst);
+ AddrMode2(cond | B26, src, dst);
}
void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
- addrmod2(cond | B26 | B | L, dst, src);
+ AddrMode2(cond | B26 | B | L, dst, src);
}
void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
- addrmod2(cond | B26 | B, src, dst);
+ AddrMode2(cond | B26 | B, src, dst);
}
void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
- addrmod3(cond | L | B7 | H | B4, dst, src);
+ AddrMode3(cond | L | B7 | H | B4, dst, src);
}
void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
- addrmod3(cond | B7 | H | B4, src, dst);
+ AddrMode3(cond | B7 | H | B4, src, dst);
}
void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
- addrmod3(cond | L | B7 | S6 | B4, dst, src);
+ AddrMode3(cond | L | B7 | S6 | B4, dst, src);
}
void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
- addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
+ AddrMode3(cond | L | B7 | S6 | H | B4, dst, src);
}
@@ -2074,7 +2153,7 @@ void Assembler::ldrd(Register dst1, Register dst2,
DCHECK(!dst1.is(lr)); // r14.
DCHECK_EQ(0, dst1.code() % 2);
DCHECK_EQ(dst1.code() + 1, dst2.code());
- addrmod3(cond | B7 | B6 | B4, dst1, src);
+ AddrMode3(cond | B7 | B6 | B4, dst1, src);
}
@@ -2084,7 +2163,7 @@ void Assembler::strd(Register src1, Register src2,
DCHECK(!src1.is(lr)); // r14.
DCHECK_EQ(0, src1.code() % 2);
DCHECK_EQ(src1.code() + 1, src2.code());
- addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
+ AddrMode3(cond | B7 | B6 | B5 | B4, src1, dst);
}
// Load/Store exclusive instructions.
@@ -2162,7 +2241,7 @@ void Assembler::ldm(BlockAddrMode am,
// ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable.
DCHECK(base.is(sp) || (dst & sp.bit()) == 0);
- addrmod4(cond | B27 | am | L, base, dst);
+ AddrMode4(cond | B27 | am | L, base, dst);
// Emit the constant pool after a function return implemented by ldm ..{..pc}.
if (cond == al && (dst & pc.bit()) != 0) {
@@ -2180,7 +2259,7 @@ void Assembler::stm(BlockAddrMode am,
Register base,
RegList src,
Condition cond) {
- addrmod4(cond | B27 | am, base, src);
+ AddrMode4(cond | B27 | am, base, src);
}
@@ -2318,7 +2397,7 @@ void Assembler::ldc(Coprocessor coproc,
const MemOperand& src,
LFlag l,
Condition cond) {
- addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
+ AddrMode5(cond | B27 | B26 | l | L | coproc * B8, crd, src);
}
@@ -2370,15 +2449,18 @@ void Assembler::vldr(const DwVfpRegister dst,
emit(cond | 0xD*B24 | u*B23 | d*B22 | B20 | base.code()*B16 | vd*B12 |
0xB*B8 | ((offset / 4) & 255));
} else {
- // Larger offsets must be handled by computing the correct address
- // in the ip register.
- DCHECK(!base.is(ip));
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ // Larger offsets must be handled by computing the correct address in a
+ // scratch register.
+ DCHECK(!base.is(scratch));
if (u == 1) {
- add(ip, base, Operand(offset));
+ add(scratch, base, Operand(offset));
} else {
- sub(ip, base, Operand(offset));
+ sub(scratch, base, Operand(offset));
}
- emit(cond | 0xD*B24 | d*B22 | B20 | ip.code()*B16 | vd*B12 | 0xB*B8);
+ emit(cond | 0xD * B24 | d * B22 | B20 | scratch.code() * B16 | vd * B12 |
+ 0xB * B8);
}
}
@@ -2389,9 +2471,11 @@ void Assembler::vldr(const DwVfpRegister dst,
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(operand.am_ == Offset);
if (operand.rm().is_valid()) {
- add(ip, operand.rn(),
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ add(scratch, operand.rn(),
Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
- vldr(dst, ip, 0, cond);
+ vldr(dst, scratch, 0, cond);
} else {
vldr(dst, operand.rn(), operand.offset(), cond);
}
@@ -2419,15 +2503,18 @@ void Assembler::vldr(const SwVfpRegister dst,
emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 |
0xA*B8 | ((offset / 4) & 255));
} else {
- // Larger offsets must be handled by computing the correct address
- // in the ip register.
- DCHECK(!base.is(ip));
+ // Larger offsets must be handled by computing the correct address in a
+ // scratch register.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(!base.is(scratch));
if (u == 1) {
- add(ip, base, Operand(offset));
+ add(scratch, base, Operand(offset));
} else {
- sub(ip, base, Operand(offset));
+ sub(scratch, base, Operand(offset));
}
- emit(cond | d*B22 | 0xD1*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
+ emit(cond | d * B22 | 0xD1 * B20 | scratch.code() * B16 | sd * B12 |
+ 0xA * B8);
}
}
@@ -2437,9 +2524,11 @@ void Assembler::vldr(const SwVfpRegister dst,
const Condition cond) {
DCHECK(operand.am_ == Offset);
if (operand.rm().is_valid()) {
- add(ip, operand.rn(),
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ add(scratch, operand.rn(),
Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
- vldr(dst, ip, 0, cond);
+ vldr(dst, scratch, 0, cond);
} else {
vldr(dst, operand.rn(), operand.offset(), cond);
}
@@ -2469,15 +2558,18 @@ void Assembler::vstr(const DwVfpRegister src,
emit(cond | 0xD*B24 | u*B23 | d*B22 | base.code()*B16 | vd*B12 | 0xB*B8 |
((offset / 4) & 255));
} else {
- // Larger offsets must be handled by computing the correct address
- // in the ip register.
- DCHECK(!base.is(ip));
+ // Larger offsets must be handled by computing the correct address in the a
+ // scratch register.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(!base.is(scratch));
if (u == 1) {
- add(ip, base, Operand(offset));
+ add(scratch, base, Operand(offset));
} else {
- sub(ip, base, Operand(offset));
+ sub(scratch, base, Operand(offset));
}
- emit(cond | 0xD*B24 | d*B22 | ip.code()*B16 | vd*B12 | 0xB*B8);
+ emit(cond | 0xD * B24 | d * B22 | scratch.code() * B16 | vd * B12 |
+ 0xB * B8);
}
}
@@ -2488,9 +2580,11 @@ void Assembler::vstr(const DwVfpRegister src,
DCHECK(VfpRegisterIsAvailable(src));
DCHECK(operand.am_ == Offset);
if (operand.rm().is_valid()) {
- add(ip, operand.rn(),
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ add(scratch, operand.rn(),
Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
- vstr(src, ip, 0, cond);
+ vstr(src, scratch, 0, cond);
} else {
vstr(src, operand.rn(), operand.offset(), cond);
}
@@ -2518,15 +2612,18 @@ void Assembler::vstr(const SwVfpRegister src,
emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
0xA*B8 | ((offset / 4) & 255));
} else {
- // Larger offsets must be handled by computing the correct address
- // in the ip register.
- DCHECK(!base.is(ip));
+ // Larger offsets must be handled by computing the correct address in a
+ // scratch register.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(!base.is(scratch));
if (u == 1) {
- add(ip, base, Operand(offset));
+ add(scratch, base, Operand(offset));
} else {
- sub(ip, base, Operand(offset));
+ sub(scratch, base, Operand(offset));
}
- emit(cond | d*B22 | 0xD0*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
+ emit(cond | d * B22 | 0xD0 * B20 | scratch.code() * B16 | sd * B12 |
+ 0xA * B8);
}
}
@@ -2536,9 +2633,11 @@ void Assembler::vstr(const SwVfpRegister src,
const Condition cond) {
DCHECK(operand.am_ == Offset);
if (operand.rm().is_valid()) {
- add(ip, operand.rn(),
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ add(scratch, operand.rn(),
Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
- vstr(src, ip, 0, cond);
+ vstr(src, scratch, 0, cond);
} else {
vstr(src, operand.rn(), operand.offset(), cond);
}
@@ -2612,19 +2711,16 @@ void Assembler::vstm(BlockAddrMode am, Register base, SwVfpRegister first,
0xA*B8 | count);
}
-
-static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
- uint64_t i;
- memcpy(&i, &d, 8);
+static void DoubleAsTwoUInt32(Double d, uint32_t* lo, uint32_t* hi) {
+ uint64_t i = d.AsUint64();
*lo = i & 0xffffffff;
*hi = i >> 32;
}
-
// Only works for little endian floating point formats.
// We don't support VFP on the mixed endian floating point platform.
-static bool FitsVmovFPImmediate(double d, uint32_t* encoding) {
+static bool FitsVmovFPImmediate(Double d, uint32_t* encoding) {
// VMOV can accept an immediate of the form:
//
// +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7
@@ -2670,10 +2766,10 @@ static bool FitsVmovFPImmediate(double d, uint32_t* encoding) {
return true;
}
-
-void Assembler::vmov(const SwVfpRegister dst, float imm) {
+void Assembler::vmov(const SwVfpRegister dst, Float32 imm) {
uint32_t enc;
- if (CpuFeatures::IsSupported(VFPv3) && FitsVmovFPImmediate(imm, &enc)) {
+ if (CpuFeatures::IsSupported(VFPv3) &&
+ FitsVmovFPImmediate(Double(imm.get_scalar()), &enc)) {
CpuFeatureScope scope(this, VFPv3);
// The float can be encoded in the instruction.
//
@@ -2685,17 +2781,16 @@ void Assembler::vmov(const SwVfpRegister dst, float imm) {
dst.split_code(&vd, &d);
emit(al | 0x1D * B23 | d * B22 | 0x3 * B20 | vd * B12 | 0x5 * B9 | enc);
} else {
- mov(ip, Operand(bit_cast<int32_t>(imm)));
- vmov(dst, ip);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ mov(scratch, Operand(imm.get_bits()));
+ vmov(dst, scratch);
}
}
-
-void Assembler::vmov(const DwVfpRegister dst,
- double imm,
- const Register scratch) {
+void Assembler::vmov(const DwVfpRegister dst, Double imm,
+ const Register extra_scratch) {
DCHECK(VfpRegisterIsAvailable(dst));
- DCHECK(!scratch.is(ip));
uint32_t enc;
if (CpuFeatures::IsSupported(VFPv3) && FitsVmovFPImmediate(imm, &enc)) {
CpuFeatureScope scope(this, VFPv3);
@@ -2725,42 +2820,42 @@ void Assembler::vmov(const DwVfpRegister dst,
// The code could also randomize the order of values, though
// that's tricky because vldr has a limited reach. Furthermore
// it breaks load locality.
- ConstantPoolEntry::Access access = ConstantPoolAddEntry(pc_offset(), imm);
- DCHECK(access == ConstantPoolEntry::REGULAR);
- USE(access);
+ ConstantPoolAddEntry(pc_offset(), imm);
vldr(dst, MemOperand(pc, 0));
} else {
// Synthesise the double from ARM immediates.
uint32_t lo, hi;
DoubleAsTwoUInt32(imm, &lo, &hi);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
if (lo == hi) {
// Move the low and high parts of the double to a D register in one
// instruction.
- mov(ip, Operand(lo));
- vmov(dst, ip, ip);
- } else if (scratch.is(no_reg)) {
- mov(ip, Operand(lo));
- vmov(dst, VmovIndexLo, ip);
+ mov(scratch, Operand(lo));
+ vmov(dst, scratch, scratch);
+ } else if (extra_scratch.is(no_reg)) {
+ // We only have one spare scratch register.
+ mov(scratch, Operand(lo));
+ vmov(dst, VmovIndexLo, scratch);
if (((lo & 0xffff) == (hi & 0xffff)) &&
CpuFeatures::IsSupported(ARMv7)) {
CpuFeatureScope scope(this, ARMv7);
- movt(ip, hi >> 16);
+ movt(scratch, hi >> 16);
} else {
- mov(ip, Operand(hi));
+ mov(scratch, Operand(hi));
}
- vmov(dst, VmovIndexHi, ip);
+ vmov(dst, VmovIndexHi, scratch);
} else {
// Move the low and high parts of the double to a D register in one
// instruction.
- mov(ip, Operand(lo));
- mov(scratch, Operand(hi));
- vmov(dst, ip, scratch);
+ mov(scratch, Operand(lo));
+ mov(extra_scratch, Operand(hi));
+ vmov(dst, scratch, extra_scratch);
}
}
}
-
void Assembler::vmov(const SwVfpRegister dst,
const SwVfpRegister src,
const Condition cond) {
@@ -2898,7 +2993,6 @@ static bool IsSignedVFPType(VFPType type) {
return false;
default:
UNREACHABLE();
- return false;
}
}
@@ -2913,7 +3007,6 @@ static bool IsIntegerVFPType(VFPType type) {
return false;
default:
UNREACHABLE();
- return false;
}
}
@@ -2926,7 +3019,6 @@ static bool IsDoubleVFPType(VFPType type) {
return true;
default:
UNREACHABLE();
- return false;
}
}
@@ -4887,7 +4979,7 @@ int Assembler::DecodeShiftImm(Instr instr) {
Instr Assembler::PatchShiftImm(Instr instr, int immed) {
uint32_t rotate_imm = 0;
uint32_t immed_8 = 0;
- bool immed_fits = fits_shifter(immed, &rotate_imm, &immed_8, NULL);
+ bool immed_fits = FitsShifter(immed, &rotate_imm, &immed_8, NULL);
DCHECK(immed_fits);
USE(immed_fits);
return (instr & ~kOff12Mask) | (rotate_imm << 8) | immed_8;
@@ -4915,7 +5007,7 @@ bool Assembler::IsOrrImmed(Instr instr) {
bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
uint32_t dummy1;
uint32_t dummy2;
- return fits_shifter(imm32, &dummy1, &dummy2, NULL);
+ return FitsShifter(imm32, &dummy1, &dummy2, NULL);
}
@@ -4945,9 +5037,7 @@ void Assembler::GrowBuffer() {
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
- if (desc.buffer_size > kMaximalBufferSize ||
- static_cast<size_t>(desc.buffer_size) >
- isolate_data().max_old_generation_size_) {
+ if (desc.buffer_size > kMaximalBufferSize) {
V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
}
@@ -5019,7 +5109,6 @@ void Assembler::emit_code_stub_address(Code* stub) {
pc_ += sizeof(uint32_t);
}
-
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
if (RelocInfo::IsNone(rmode) ||
// Don't record external references unless the heap will be serialized.
@@ -5028,49 +5117,90 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
return;
}
DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
- if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- data = RecordedAstId().ToInt();
- ClearRecordedAstId();
- }
RelocInfo rinfo(pc_, rmode, data, NULL);
reloc_info_writer.Write(&rinfo);
}
-
-ConstantPoolEntry::Access Assembler::ConstantPoolAddEntry(int position,
- RelocInfo::Mode rmode,
- intptr_t value) {
+void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
+ intptr_t value) {
DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::CONST_POOL &&
rmode != RelocInfo::NONE64);
bool sharing_ok = RelocInfo::IsNone(rmode) ||
- !(serializer_enabled() || rmode < RelocInfo::CELL);
+ (rmode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE);
DCHECK(pending_32_bit_constants_.size() < kMaxNumPending32Constants);
if (pending_32_bit_constants_.empty()) {
first_const_pool_32_use_ = position;
}
- ConstantPoolEntry entry(position, value, sharing_ok);
+ ConstantPoolEntry entry(position, value,
+ sharing_ok || (rmode == RelocInfo::CODE_TARGET &&
+ IsCodeTargetSharingAllowed()));
+
+ bool shared = false;
+ if (sharing_ok) {
+ // Merge the constant, if possible.
+ for (size_t i = 0; i < pending_32_bit_constants_.size(); i++) {
+ ConstantPoolEntry& current_entry = pending_32_bit_constants_[i];
+ if (!current_entry.sharing_ok()) continue;
+ if (entry.value() == current_entry.value()) {
+ entry.set_merged_index(i);
+ shared = true;
+ break;
+ }
+ }
+ }
+
+ // Share entries if allowed and possible.
+ // Null-values are placeholders and must be ignored.
+ if (rmode == RelocInfo::CODE_TARGET && IsCodeTargetSharingAllowed() &&
+ value != 0) {
+ // Sharing entries here relies on canonicalized handles - without them, we
+ // will miss the optimisation opportunity.
+ Address handle_address = reinterpret_cast<Address>(value);
+ auto existing = handle_to_index_map_.find(handle_address);
+ if (existing != handle_to_index_map_.end()) {
+ int index = existing->second;
+ entry.set_merged_index(index);
+ shared = true;
+ } else {
+ // Keep track of this code handle.
+ handle_to_index_map_[handle_address] =
+ static_cast<int>(pending_32_bit_constants_.size());
+ }
+ }
+
pending_32_bit_constants_.push_back(entry);
// Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info.
BlockConstPoolFor(1);
- return ConstantPoolEntry::REGULAR;
-}
+ // Emit relocation info.
+ if (MustOutputRelocInfo(rmode, this) && !shared) {
+ RecordRelocInfo(rmode);
+ }
+}
-ConstantPoolEntry::Access Assembler::ConstantPoolAddEntry(int position,
- double value) {
+void Assembler::ConstantPoolAddEntry(int position, Double value) {
DCHECK(pending_64_bit_constants_.size() < kMaxNumPending64Constants);
if (pending_64_bit_constants_.empty()) {
first_const_pool_64_use_ = position;
}
ConstantPoolEntry entry(position, value);
+
+ // Merge the constant, if possible.
+ for (size_t i = 0; i < pending_64_bit_constants_.size(); i++) {
+ ConstantPoolEntry& current_entry = pending_64_bit_constants_[i];
+ DCHECK(current_entry.sharing_ok());
+ if (entry.value() == current_entry.value()) {
+ entry.set_merged_index(i);
+ break;
+ }
+ }
pending_64_bit_constants_.push_back(entry);
// Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info.
BlockConstPoolFor(1);
- return ConstantPoolEntry::REGULAR;
}
@@ -5171,29 +5301,12 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
int size_after_marker = estimated_size_after_marker;
for (size_t i = 0; i < pending_64_bit_constants_.size(); i++) {
ConstantPoolEntry& entry = pending_64_bit_constants_[i];
- DCHECK(!entry.is_merged());
- for (size_t j = 0; j < i; j++) {
- if (entry.value64() == pending_64_bit_constants_[j].value64()) {
- DCHECK(!pending_64_bit_constants_[j].is_merged());
- entry.set_merged_index(j);
- size_after_marker -= kDoubleSize;
- break;
- }
- }
+ if (entry.is_merged()) size_after_marker -= kDoubleSize;
}
for (size_t i = 0; i < pending_32_bit_constants_.size(); i++) {
ConstantPoolEntry& entry = pending_32_bit_constants_[i];
- DCHECK(!entry.is_merged());
- if (!entry.sharing_ok()) continue;
- for (size_t j = 0; j < i; j++) {
- if (entry.value() == pending_32_bit_constants_[j].value()) {
- DCHECK(!pending_32_bit_constants_[j].is_merged());
- entry.set_merged_index(j);
- size_after_marker -= kPointerSize;
- break;
- }
- }
+ if (entry.is_merged()) size_after_marker -= kPointerSize;
}
int size = size_up_to_marker + size_after_marker;
@@ -5292,6 +5405,8 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
pending_32_bit_constants_.clear();
pending_64_bit_constants_.clear();
+ handle_to_index_map_.clear();
+
first_const_pool_32_use_ = -1;
first_const_pool_64_use_ = -1;
@@ -5333,6 +5448,22 @@ void PatchingAssembler::FlushICache(Isolate* isolate) {
Assembler::FlushICache(isolate, buffer_, buffer_size_ - kGap);
}
+UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler)
+ : available_(assembler->GetScratchRegisterList()),
+ old_available_(*available_) {}
+
+UseScratchRegisterScope::~UseScratchRegisterScope() {
+ *available_ = old_available_;
+}
+
+Register UseScratchRegisterScope::Acquire() {
+ DCHECK(available_ != nullptr);
+ DCHECK(*available_ != 0);
+ int index = static_cast<int>(base::bits::CountTrailingZeros32(*available_));
+ *available_ &= ~(1UL << index);
+ return Register::from_code(index);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index a628493723..dd61bf2abb 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -45,6 +45,8 @@
#include "src/arm/constants-arm.h"
#include "src/assembler.h"
+#include "src/double.h"
+#include "src/float.h"
namespace v8 {
namespace internal {
@@ -501,7 +503,7 @@ class Operand BASE_EMBEDDED {
RelocInfo::Mode rmode = RelocInfo::NONE32));
INLINE(static Operand Zero());
INLINE(explicit Operand(const ExternalReference& f));
- explicit Operand(Handle<Object> handle);
+ explicit Operand(Handle<HeapObject> handle);
INLINE(explicit Operand(Smi* value));
// rm
@@ -524,18 +526,29 @@ class Operand BASE_EMBEDDED {
// rm <shift_op> rs
explicit Operand(Register rm, ShiftOp shift_op, Register rs);
+ static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
+ static Operand EmbeddedCode(CodeStub* stub);
+
// Return true if this is a register operand.
- INLINE(bool is_reg() const) {
+ bool IsRegister() const {
return rm_.is_valid() &&
rs_.is(no_reg) &&
shift_op_ == LSL &&
shift_imm_ == 0;
}
+ // Return true if this is a register operand shifted with an immediate.
+ bool IsImmediateShiftedRegister() const {
+ return rm_.is_valid() && !rs_.is_valid();
+ }
+ // Return true if this is a register operand shifted with a register.
+ bool IsRegisterShiftedRegister() const {
+ return rm_.is_valid() && rs_.is_valid();
+ }
// Return the number of actual instructions required to implement the given
// instruction for this particular operand. This can be a single instruction,
- // if no load into the ip register is necessary, or anything between 2 and 4
- // instructions when we need to load from the constant pool (depending upon
+ // if no load into a scratch register is necessary, or anything between 2 and
+ // 4 instructions when we need to load from the constant pool (depending upon
// whether the constant pool entry is in the small or extended section). If
// the instruction this operand is used for is a MOV or MVN instruction the
// actual instruction to use is required for this calculation. For other
@@ -543,24 +556,46 @@ class Operand BASE_EMBEDDED {
//
// The value returned is only valid as long as no entries are added to the
// constant pool between this call and the actual instruction being emitted.
- int instructions_required(const Assembler* assembler, Instr instr = 0) const;
- bool must_output_reloc_info(const Assembler* assembler) const;
+ int InstructionsRequired(const Assembler* assembler, Instr instr = 0) const;
+ bool MustOutputRelocInfo(const Assembler* assembler) const;
inline int32_t immediate() const {
- DCHECK(!rm_.is_valid());
- return imm32_;
+ DCHECK(IsImmediate());
+ DCHECK(!IsHeapObjectRequest());
+ return value_.immediate;
+ }
+ bool IsImmediate() const {
+ return !rm_.is_valid();
+ }
+
+ HeapObjectRequest heap_object_request() const {
+ DCHECK(IsHeapObjectRequest());
+ return value_.heap_object_request;
+ }
+ bool IsHeapObjectRequest() const {
+ DCHECK_IMPLIES(is_heap_object_request_, IsImmediate());
+ DCHECK_IMPLIES(is_heap_object_request_,
+ rmode_ == RelocInfo::EMBEDDED_OBJECT ||
+ rmode_ == RelocInfo::CODE_TARGET);
+ return is_heap_object_request_;
}
Register rm() const { return rm_; }
Register rs() const { return rs_; }
ShiftOp shift_op() const { return shift_op_; }
+
private:
Register rm_;
Register rs_;
ShiftOp shift_op_;
- int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
- int32_t imm32_; // valid if rm_ == no_reg
+ int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
+ union Value {
+ Value() {}
+ HeapObjectRequest heap_object_request; // if is_heap_object_request_
+ int32_t immediate; // otherwise
+ } value_; // valid if rm_ == no_reg
+ bool is_heap_object_request_ = false;
RelocInfo::Mode rmode_;
friend class Assembler;
@@ -573,8 +608,9 @@ class MemOperand BASE_EMBEDDED {
// [rn +/- offset] Offset/NegOffset
// [rn +/- offset]! PreIndex/NegPreIndex
// [rn], +/- offset PostIndex/NegPostIndex
- // offset is any signed 32-bit value; offset is first loaded to register ip if
- // it does not fit the addressing mode (12-bit unsigned and sign bit)
+ // offset is any signed 32-bit value; offset is first loaded to a scratch
+ // register if it does not fit the addressing mode (12-bit unsigned and sign
+ // bit)
explicit MemOperand(Register rn, int32_t offset = 0, AddrMode am = Offset);
// [rn +/- rm] Offset/NegOffset
@@ -703,7 +739,7 @@ class Assembler : public AssemblerBase {
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
- void GetCode(CodeDesc* desc);
+ void GetCode(Isolate* isolate, CodeDesc* desc);
// Label operations & relative jumps (PPUM Appendix D)
//
@@ -789,6 +825,8 @@ class Assembler : public AssemblerBase {
static constexpr int kDebugBreakSlotLength =
kDebugBreakSlotInstructions * kInstrSize;
+ RegList* GetScratchRegisterList() { return &scratch_register_list_; }
+
// ---------------------------------------------------------------------------
// Code generation
@@ -1131,10 +1169,10 @@ class Assembler : public AssemblerBase {
SwVfpRegister last,
Condition cond = al);
- void vmov(const SwVfpRegister dst, float imm);
+ void vmov(const SwVfpRegister dst, Float32 imm);
void vmov(const DwVfpRegister dst,
- double imm,
- const Register scratch = no_reg);
+ Double imm,
+ const Register extra_scratch = no_reg);
void vmov(const SwVfpRegister dst,
const SwVfpRegister src,
const Condition cond = al);
@@ -1491,24 +1529,40 @@ class Assembler : public AssemblerBase {
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
};
- // Debugging
+ // Class for blocking sharing of code targets in constant pool.
+ class BlockCodeTargetSharingScope {
+ public:
+ explicit BlockCodeTargetSharingScope(Assembler* assem) : assem_(nullptr) {
+ Open(assem);
+ }
+ // This constructor does not initialize the scope. The user needs to
+ // explicitly call Open() before using it.
+ BlockCodeTargetSharingScope() : assem_(nullptr) {}
+ ~BlockCodeTargetSharingScope() {
+ Close();
+ }
+ void Open(Assembler* assem) {
+ DCHECK_NULL(assem_);
+ DCHECK_NOT_NULL(assem);
+ assem_ = assem;
+ assem_->StartBlockCodeTargetSharing();
+ }
- // Mark address of a debug break slot.
- void RecordDebugBreakSlot(RelocInfo::Mode mode);
+ private:
+ void Close() {
+ if (assem_ != nullptr) {
+ assem_->EndBlockCodeTargetSharing();
+ }
+ }
+ Assembler* assem_;
- // Record the AST id of the CallIC being compiled, so that it can be placed
- // in the relocation information.
- void SetRecordedAstId(TypeFeedbackId ast_id) {
- DCHECK(recorded_ast_id_.IsNone());
- recorded_ast_id_ = ast_id;
- }
+ DISALLOW_COPY_AND_ASSIGN(BlockCodeTargetSharingScope);
+ };
- TypeFeedbackId RecordedAstId() {
- DCHECK(!recorded_ast_id_.IsNone());
- return recorded_ast_id_;
- }
+ // Debugging
- void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); }
+ // Mark address of a debug break slot.
+ void RecordDebugBreakSlot(RelocInfo::Mode mode);
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
@@ -1636,11 +1690,6 @@ class Assembler : public AssemblerBase {
}
protected:
- // Relocation for a type-recording IC has the AST id added to it. This
- // member variable is a way to pass the information from the call site to
- // the relocation info.
- TypeFeedbackId recorded_ast_id_;
-
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Decode branch instruction at pos and return branch target pos
@@ -1649,8 +1698,22 @@ class Assembler : public AssemblerBase {
// Patch branch instruction at pos to branch to given branch target pos
void target_at_put(int pos, int target_pos);
+ // Prevent sharing of code target constant pool entries until
+ // EndBlockCodeTargetSharing is called. Calls to this function can be nested
+ // but must be followed by an equal number of call to
+ // EndBlockCodeTargetSharing.
+ void StartBlockCodeTargetSharing() {
+ ++code_target_sharing_blocked_nesting_;
+ }
+
+ // Resume sharing of constant pool code target entries. Needs to be called
+ // as many times as StartBlockCodeTargetSharing to have an effect.
+ void EndBlockCodeTargetSharing() {
+ --code_target_sharing_blocked_nesting_;
+ }
+
// Prevent contant pool emission until EndBlockConstPool is called.
- // Call to this function can be nested but must be followed by an equal
+ // Calls to this function can be nested but must be followed by an equal
// number of call to EndBlockConstpool.
void StartBlockConstPool() {
if (const_pool_blocked_nesting_++ == 0) {
@@ -1660,7 +1723,7 @@ class Assembler : public AssemblerBase {
}
}
- // Resume constant pool emission. Need to be called as many time as
+ // Resume constant pool emission. Needs to be called as many times as
// StartBlockConstPool to have an effect.
void EndBlockConstPool() {
if (--const_pool_blocked_nesting_ == 0) {
@@ -1726,6 +1789,12 @@ class Assembler : public AssemblerBase {
std::vector<ConstantPoolEntry> pending_32_bit_constants_;
std::vector<ConstantPoolEntry> pending_64_bit_constants_;
+ // Map of address of handle to index in pending_32_bit_constants_.
+ std::map<Address, int> handle_to_index_map_;
+
+ // Scratch registers available for use by the Assembler.
+ RegList scratch_register_list_;
+
private:
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512 * MB;
@@ -1749,6 +1818,11 @@ class Assembler : public AssemblerBase {
static constexpr int kCheckPoolIntervalInst = 32;
static constexpr int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize;
+ // Sharing of code target entries may be blocked in some code sequences.
+ int code_target_sharing_blocked_nesting_;
+ bool IsCodeTargetSharingAllowed() const {
+ return code_target_sharing_blocked_nesting_ == 0;
+ }
// Emission of the constant pool may be blocked in some code sequences.
int const_pool_blocked_nesting_; // Block emission if this is not zero.
@@ -1766,16 +1840,21 @@ class Assembler : public AssemblerBase {
void GrowBuffer();
// 32-bit immediate values
- void move_32_bit_immediate(Register rd,
- const Operand& x,
- Condition cond = al);
+ void Move32BitImmediate(Register rd, const Operand& x, Condition cond = al);
// Instruction generation
- void addrmod1(Instr instr, Register rn, Register rd, const Operand& x);
- void addrmod2(Instr instr, Register rd, const MemOperand& x);
- void addrmod3(Instr instr, Register rd, const MemOperand& x);
- void addrmod4(Instr instr, Register rn, RegList rl);
- void addrmod5(Instr instr, CRegister crd, const MemOperand& x);
+ void AddrMode1(Instr instr, Register rd, Register rn, const Operand& x);
+ // Attempt to encode operand |x| for instruction |instr| and return true on
+ // success. The result will be encoded in |instr| directly. This method may
+ // change the opcode if deemed beneficial, for instance, MOV may be turned
+ // into MVN, ADD into SUB, AND into BIC, ...etc. The only reason this method
+ // may fail is that the operand is an immediate that cannot be encoded.
+ bool AddrMode1TryEncodeOperand(Instr* instr, const Operand& x);
+
+ void AddrMode2(Instr instr, Register rd, const MemOperand& x);
+ void AddrMode3(Instr instr, Register rd, const MemOperand& x);
+ void AddrMode4(Instr instr, Register rn, RegList rl);
+ void AddrMode5(Instr instr, CRegister crd, const MemOperand& x);
// Labels
void print(Label* L);
@@ -1784,15 +1863,28 @@ class Assembler : public AssemblerBase {
// Record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
- ConstantPoolEntry::Access ConstantPoolAddEntry(int position,
- RelocInfo::Mode rmode,
- intptr_t value);
- ConstantPoolEntry::Access ConstantPoolAddEntry(int position, double value);
+ void ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
+ intptr_t value);
+ void ConstantPoolAddEntry(int position, Double value);
friend class RelocInfo;
friend class CodePatcher;
friend class BlockConstPoolScope;
+ friend class BlockCodeTargetSharingScope;
friend class EnsureSpace;
+
+ // The following functions help with avoiding allocations of embedded heap
+ // objects during the code assembly phase. {RequestHeapObject} records the
+ // need for a future heap number allocation or code stub generation. After
+ // code assembly, {AllocateAndInstallRequestedHeapObjects} will allocate these
+ // objects and place them where they are expected (determined by the pc offset
+ // associated with each request). That is, for each request, it will patch the
+ // dummy heap object handle that we emitted during code assembly with the
+ // actual heap object handle.
+ void RequestHeapObject(HeapObjectRequest request);
+ void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
+
+ std::forward_list<HeapObjectRequest> heap_object_requests_;
};
constexpr int kNoCodeAgeSequenceLength = 3 * Assembler::kInstrSize;
@@ -1811,6 +1903,29 @@ class PatchingAssembler : public Assembler {
void FlushICache(Isolate* isolate);
};
+// This scope utility allows scratch registers to be managed safely. The
+// Assembler's GetScratchRegisterList() is used as a pool of scratch
+// registers. These registers can be allocated on demand, and will be returned
+// at the end of the scope.
+//
+// When the scope ends, the Assembler's list will be restored to its original
+// state, even if the list is modified by some other means. Note that this scope
+// can be nested but the destructors need to run in the opposite order as the
+// constructors. We do not have assertions for this.
+class UseScratchRegisterScope {
+ public:
+ explicit UseScratchRegisterScope(Assembler* assembler);
+ ~UseScratchRegisterScope();
+
+ // Take a register from the list and return it.
+ Register Acquire();
+
+ private:
+ // Currently available scratch registers.
+ RegList* available_;
+ // Available scratch registers at the start of this scope.
+ RegList old_available_;
+};
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index fc59f4007e..61d52f58f4 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -12,6 +12,7 @@
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/counters.h"
+#include "src/double.h"
#include "src/heap/heap-inl.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
@@ -51,29 +52,6 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
Register rhs);
-void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
- ExternalReference miss) {
- // Update the static counter each time a new code stub is generated.
- isolate()->counters()->code_stubs()->Increment();
-
- CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
- int param_count = descriptor.GetRegisterParameterCount();
- {
- // Call the runtime system in a fresh internal frame.
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- DCHECK(param_count == 0 ||
- r0.is(descriptor.GetRegisterParameter(param_count - 1)));
- // Push arguments
- for (int i = 0; i < param_count; ++i) {
- __ push(descriptor.GetRegisterParameter(i));
- }
- __ CallExternalReference(miss, param_count);
- }
-
- __ Ret();
-}
-
-
void DoubleToIStub::Generate(MacroAssembler* masm) {
Label out_of_range, only_low, negate, done;
Register input_reg = source();
@@ -671,7 +649,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
const int fp_argument_count = 0;
AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
+ __ PrepareCallCFunction(argument_count, fp_argument_count);
__ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
__ CallCFunction(
ExternalReference::store_buffer_overflow_function(isolate()),
@@ -710,7 +688,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ push(lr);
{
AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(0, 2, scratch);
+ __ PrepareCallCFunction(0, 2);
__ MovToFloatParameters(double_base, double_exponent);
__ CallCFunction(
ExternalReference::power_double_double_function(isolate()), 0, 2);
@@ -731,7 +709,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ mov(exponent, scratch);
}
__ vmov(double_scratch, double_base); // Back up base.
- __ vmov(double_result, 1.0, scratch2);
+ __ vmov(double_result, Double(1.0), scratch2);
// Get absolute value of exponent.
__ cmp(scratch, Operand::Zero());
@@ -746,7 +724,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ cmp(exponent, Operand::Zero());
__ b(ge, &done);
- __ vmov(double_scratch, 1.0, scratch);
+ __ vmov(double_scratch, Double(1.0), scratch);
__ vdiv(double_result, double_scratch, double_result);
// Test whether result is zero. Bail out to check for subnormal result.
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
@@ -761,7 +739,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ push(lr);
{
AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(0, 2, scratch);
+ __ PrepareCallCFunction(0, 2);
__ MovToFloatParameters(double_base, double_exponent);
__ CallCFunction(ExternalReference::power_double_double_function(isolate()),
0, 2);
@@ -781,12 +759,9 @@ bool CEntryStub::NeedsImmovableCode() {
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
- StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
CreateWeakCellStub::GenerateAheadOfTime(isolate);
- BinaryOpICStub::GenerateAheadOfTime(isolate);
- BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
}
@@ -847,7 +822,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
if (FLAG_debug_code) {
if (frame_alignment > kPointerSize) {
Label alignment_as_expected;
- DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
__ tst(sp, Operand(frame_alignment_mask));
__ b(eq, &alignment_as_expected);
// Don't use Check here, as it will call Runtime_Abort re-entering here.
@@ -911,7 +886,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
if (FLAG_debug_code) {
Label okay;
ExternalReference pending_exception_address(
- Isolate::kPendingExceptionAddress, isolate());
+ IsolateAddressId::kPendingExceptionAddress, isolate());
__ mov(r3, Operand(pending_exception_address));
__ ldr(r3, MemOperand(r3));
__ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
@@ -940,15 +915,15 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ bind(&exception_returned);
ExternalReference pending_handler_context_address(
- Isolate::kPendingHandlerContextAddress, isolate());
+ IsolateAddressId::kPendingHandlerContextAddress, isolate());
ExternalReference pending_handler_code_address(
- Isolate::kPendingHandlerCodeAddress, isolate());
+ IsolateAddressId::kPendingHandlerCodeAddress, isolate());
ExternalReference pending_handler_offset_address(
- Isolate::kPendingHandlerOffsetAddress, isolate());
+ IsolateAddressId::kPendingHandlerOffsetAddress, isolate());
ExternalReference pending_handler_fp_address(
- Isolate::kPendingHandlerFPAddress, isolate());
+ IsolateAddressId::kPendingHandlerFPAddress, isolate());
ExternalReference pending_handler_sp_address(
- Isolate::kPendingHandlerSPAddress, isolate());
+ IsolateAddressId::kPendingHandlerSPAddress, isolate());
// Ask the runtime for help to determine the handler. This will set r0 to
// contain the current pending exception, don't clobber it.
@@ -956,7 +931,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
isolate());
{
FrameScope scope(masm, StackFrame::MANUAL);
- __ PrepareCallCFunction(3, 0, r0);
+ __ PrepareCallCFunction(3, 0);
__ mov(r0, Operand(0));
__ mov(r1, Operand(0));
__ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
@@ -1006,7 +981,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Save callee-saved vfp registers.
__ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
// Set up the reserved register for 0.0.
- __ vmov(kDoubleRegZero, 0.0);
+ __ vmov(kDoubleRegZero, Double(0.0));
// Get address of argv, see stm above.
// r0: code entry
@@ -1028,31 +1003,38 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
StackFrame::Type marker = type();
__ mov(r7, Operand(StackFrame::TypeToMarker(marker)));
__ mov(r6, Operand(StackFrame::TypeToMarker(marker)));
- __ mov(r5,
- Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ __ mov(r5, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
+ isolate())));
__ ldr(r5, MemOperand(r5));
- __ mov(ip, Operand(-1)); // Push a bad frame pointer to fail if it is used.
- __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() |
- ip.bit());
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+
+ // Push a bad frame pointer to fail if it is used.
+ __ mov(scratch, Operand(-1));
+ __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | scratch.bit());
+ }
+
+ Register scratch = r6;
// Set up frame pointer for the frame to be pushed.
__ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
// If this is the outermost JS call, set js_entry_sp value.
Label non_outermost_js;
- ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
+ ExternalReference js_entry_sp(IsolateAddressId::kJSEntrySPAddress, isolate());
__ mov(r5, Operand(ExternalReference(js_entry_sp)));
- __ ldr(r6, MemOperand(r5));
- __ cmp(r6, Operand::Zero());
+ __ ldr(scratch, MemOperand(r5));
+ __ cmp(scratch, Operand::Zero());
__ b(ne, &non_outermost_js);
__ str(fp, MemOperand(r5));
- __ mov(ip, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ __ mov(scratch, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
Label cont;
__ b(&cont);
__ bind(&non_outermost_js);
- __ mov(ip, Operand(StackFrame::INNER_JSENTRY_FRAME));
+ __ mov(scratch, Operand(StackFrame::INNER_JSENTRY_FRAME));
__ bind(&cont);
- __ push(ip);
+ __ push(scratch);
// Jump to a faked try block that does the invoke, with a faked catch
// block that sets the pending exception.
@@ -1069,10 +1051,11 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// field in the JSEnv and return a failure sentinel. Coming in here the
// fp will be invalid because the PushStackHandler below sets it to 0 to
// signal the existence of the JSEntry frame.
- __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate())));
+ __ mov(scratch,
+ Operand(ExternalReference(IsolateAddressId::kPendingExceptionAddress,
+ isolate())));
}
- __ str(r0, MemOperand(ip));
+ __ str(r0, MemOperand(scratch));
__ LoadRoot(r0, Heap::kExceptionRootIndex);
__ b(&exit);
@@ -1098,16 +1081,16 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
if (type() == StackFrame::ENTRY_CONSTRUCT) {
ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
isolate());
- __ mov(ip, Operand(construct_entry));
+ __ mov(scratch, Operand(construct_entry));
} else {
ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
- __ mov(ip, Operand(entry));
+ __ mov(scratch, Operand(entry));
}
- __ ldr(ip, MemOperand(ip)); // deref address
- __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ ldr(scratch, MemOperand(scratch)); // deref address
+ __ add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
// Branch and link to JSEntryTrampoline.
- __ Call(ip);
+ __ Call(scratch);
// Unlink this frame from the handler chain.
__ PopStackHandler();
@@ -1125,9 +1108,9 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Restore the top frame descriptors from the stack.
__ pop(r3);
- __ mov(ip,
- Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
- __ str(r3, MemOperand(ip));
+ __ mov(scratch, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
+ isolate())));
+ __ str(r3, MemOperand(scratch));
// Reset the stack to the callee saved registers.
__ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
@@ -1228,8 +1211,8 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// write-barrier is needed.
__ bind(&megamorphic);
__ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
- __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
- __ str(ip, FieldMemOperand(r5, FixedArray::kHeaderSize));
+ __ LoadRoot(r4, Heap::kmegamorphic_symbolRootIndex);
+ __ str(r4, FieldMemOperand(r5, FixedArray::kHeaderSize));
__ jmp(&done);
// An uninitialized cache is patched with the function
@@ -1321,8 +1304,8 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
__ bind(&got_smi_index_);
// Check for index out of range.
- __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
- __ cmp(ip, Operand(index_));
+ __ ldr(result_, FieldMemOperand(object_, String::kLengthOffset));
+ __ cmp(result_, Operand(index_));
__ b(ls, index_out_of_range_);
__ SmiUntag(index_);
@@ -1487,37 +1470,6 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
}
-void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r1 : left
- // -- r0 : right
- // -- lr : return address
- // -----------------------------------
-
- // Load r2 with the allocation site. We stick an undefined dummy value here
- // and replace it with the real allocation site later when we instantiate this
- // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
- __ Move(r2, isolate()->factory()->undefined_value());
-
- // Make sure that we actually patched the allocation site.
- if (FLAG_debug_code) {
- __ tst(r2, Operand(kSmiTagMask));
- __ Assert(ne, kExpectedAllocationSite);
- __ push(r2);
- __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kAllocationSiteMapRootIndex);
- __ cmp(r2, ip);
- __ pop(r2);
- __ Assert(eq, kExpectedAllocationSite);
- }
-
- // Tail call into the stub that handles binary operations with allocation
- // sites.
- BinaryOpWithAllocationSiteStub stub(isolate(), state());
- __ TailCallStub(&stub);
-}
-
-
void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
DCHECK_EQ(CompareICState::BOOLEAN, state());
Label miss;
@@ -1852,22 +1804,22 @@ void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
void CompareICStub::GenerateMiss(MacroAssembler* masm) {
+ Register scratch = r2;
{
// Call the runtime system in a fresh internal frame.
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r1, r0);
__ Push(lr, r1, r0);
- __ mov(ip, Operand(Smi::FromInt(op())));
- __ push(ip);
+ __ mov(scratch, Operand(Smi::FromInt(op())));
+ __ push(scratch);
__ CallRuntime(Runtime::kCompareIC_Miss);
// Compute the entry point of the rewritten stub.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ add(scratch, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
__ pop(lr);
__ Pop(r1, r0);
}
-
- __ Jump(r2);
+ __ Jump(scratch);
}
@@ -1949,7 +1901,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
// Restore the properties.
__ ldr(properties,
- FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
}
const int spill_mask =
@@ -1957,7 +1909,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
r2.bit() | r1.bit() | r0.bit());
__ stm(db_w, sp, spill_mask);
- __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
__ mov(r1, Operand(Handle<Name>(name)));
NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
__ CallStub(&stub);
@@ -2148,7 +2100,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
int argument_count = 3;
- __ PrepareCallCFunction(argument_count, regs_.scratch0());
+ __ PrepareCallCFunction(argument_count);
Register address =
r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
DCHECK(!address.is(regs_.object()));
@@ -2173,10 +2125,11 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode) {
- Label on_black;
Label need_incremental;
Label need_incremental_pop_scratch;
+#ifndef V8_CONCURRENT_MARKING
+ Label on_black;
// Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker.
__ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
@@ -2190,6 +2143,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
}
__ bind(&on_black);
+#endif
// Get the value from the slot.
__ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
@@ -2238,20 +2192,16 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// Fall through when we need to inform the incremental marker.
}
-
-void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
- CEntryStub ces(isolate(), 1, kSaveFPRegs);
- __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
- int parameter_count_offset =
- StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
- __ ldr(r1, MemOperand(fp, parameter_count_offset));
- if (function_mode() == JS_FUNCTION_STUB_MODE) {
- __ add(r1, r1, Operand(1));
+void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
+ Zone* zone) {
+ if (tasm->isolate()->function_entry_hook() != NULL) {
+ tasm->MaybeCheckConstPool();
+ PredictableCodeSizeScope predictable(tasm);
+ predictable.ExpectSize(tasm->CallStubSize() + 2 * Assembler::kInstrSize);
+ tasm->push(lr);
+ tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
+ tasm->pop(lr);
}
- masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
- __ mov(r1, Operand(r1, LSL, kPointerSizeLog2));
- __ add(sp, sp, r1);
- __ Ret();
}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
@@ -2259,8 +2209,7 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
ProfileEntryHookStub stub(masm->isolate());
masm->MaybeCheckConstPool();
PredictableCodeSizeScope predictable(masm);
- predictable.ExpectSize(masm->CallStubSize(&stub) +
- 2 * Assembler::kInstrSize);
+ predictable.ExpectSize(masm->CallStubSize() + 2 * Assembler::kInstrSize);
__ push(lr);
__ CallStub(&stub);
__ pop(lr);
@@ -2300,26 +2249,31 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
int frame_alignment = masm->ActivationFrameAlignment();
if (frame_alignment > kPointerSize) {
__ mov(r5, sp);
- DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
__ and_(sp, sp, Operand(-frame_alignment));
}
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+
#if V8_HOST_ARCH_ARM
- int32_t entry_hook =
- reinterpret_cast<int32_t>(isolate()->function_entry_hook());
- __ mov(ip, Operand(entry_hook));
+ int32_t entry_hook =
+ reinterpret_cast<int32_t>(isolate()->function_entry_hook());
+ __ mov(scratch, Operand(entry_hook));
#else
- // Under the simulator we need to indirect the entry hook through a
- // trampoline function at a known address.
- // It additionally takes an isolate as a third parameter
- __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
+ // Under the simulator we need to indirect the entry hook through a
+ // trampoline function at a known address.
+ // It additionally takes an isolate as a third parameter
+ __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
- ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
- __ mov(ip, Operand(ExternalReference(&dispatcher,
- ExternalReference::BUILTIN_CALL,
- isolate())));
+ ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
+ __ mov(scratch,
+ Operand(ExternalReference(
+ &dispatcher, ExternalReference::BUILTIN_CALL, isolate())));
#endif
- __ Call(ip);
+ __ Call(scratch);
+ }
// Restore the stack pointer if needed.
if (frame_alignment > kPointerSize) {
@@ -2338,8 +2292,8 @@ static void CreateArrayDispatch(MacroAssembler* masm,
T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
- int last_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
+ int last_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
__ cmp(r3, Operand(kind));
@@ -2362,24 +2316,12 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// r0 - number of arguments
// r1 - constructor?
// sp[0] - last argument
- Label normal_sequence;
- if (mode == DONT_OVERRIDE) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
- STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
-
- // is the low bit set? If so, we are holey and that is good.
- __ tst(r3, Operand(1));
- __ b(ne, &normal_sequence);
- }
-
- // look at the first argument
- __ ldr(r5, MemOperand(sp, 0));
- __ cmp(r5, Operand::Zero());
- __ b(eq, &normal_sequence);
+ STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(PACKED_ELEMENTS == 2);
+ STATIC_ASSERT(HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS == 4);
+ STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS == 5);
if (mode == DISABLE_ALLOCATION_SITES) {
ElementsKind initial = GetInitialFastElementsKind();
@@ -2389,13 +2331,12 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
holey_initial,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub_holey);
-
- __ bind(&normal_sequence);
- ArraySingleArgumentConstructorStub stub(masm->isolate(),
- initial,
- DISABLE_ALLOCATION_SITES);
- __ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
+ // is the low bit set? If so, we are holey and that is good.
+ Label normal_sequence;
+ __ tst(r3, Operand(1));
+ __ b(ne, &normal_sequence);
+
// We are going to create a holey array, but our kind is non-holey.
// Fix kind and retry (only if we have an allocation site in the slot).
__ add(r3, r3, Operand(1));
@@ -2410,13 +2351,15 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// in the AllocationSite::transition_info field because elements kind is
// restricted to a portion of the field...upper bits need to be left alone.
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ ldr(r4, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
+ __ ldr(r4, FieldMemOperand(
+ r2, AllocationSite::kTransitionInfoOrBoilerplateOffset));
__ add(r4, r4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
- __ str(r4, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
+ __ str(r4, FieldMemOperand(
+ r2, AllocationSite::kTransitionInfoOrBoilerplateOffset));
__ bind(&normal_sequence);
- int last_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
+ int last_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
__ cmp(r3, Operand(kind));
@@ -2434,13 +2377,13 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
template<class T>
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
- int to_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
+ int to_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(isolate, kind);
stub.GetCode();
- if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ if (AllocationSite::ShouldTrack(kind)) {
T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
stub1.GetCode();
}
@@ -2454,7 +2397,7 @@ void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
isolate);
ArrayNArgumentsConstructorStub stub(isolate);
stub.GetCode();
- ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
+ ElementsKind kinds[2] = {PACKED_ELEMENTS, HOLEY_ELEMENTS};
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things
InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
@@ -2522,7 +2465,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
__ b(eq, &no_info);
- __ ldr(r3, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
+ __ ldr(r3, FieldMemOperand(
+ r2, AllocationSite::kTransitionInfoOrBoilerplateOffset));
__ SmiUntag(r3);
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
__ and_(r3, r3, Operand(AllocationSite::ElementsKindBits::kMask));
@@ -2596,21 +2540,21 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
if (FLAG_debug_code) {
Label done;
- __ cmp(r3, Operand(FAST_ELEMENTS));
+ __ cmp(r3, Operand(PACKED_ELEMENTS));
__ b(eq, &done);
- __ cmp(r3, Operand(FAST_HOLEY_ELEMENTS));
+ __ cmp(r3, Operand(HOLEY_ELEMENTS));
__ Assert(eq,
kInvalidElementsKindForInternalArrayOrInternalPackedArray);
__ bind(&done);
}
Label fast_elements_case;
- __ cmp(r3, Operand(FAST_ELEMENTS));
+ __ cmp(r3, Operand(PACKED_ELEMENTS));
__ b(eq, &fast_elements_case);
- GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+ GenerateCase(masm, HOLEY_ELEMENTS);
__ bind(&fast_elements_case);
- GenerateCase(masm, FAST_ELEMENTS);
+ GenerateCase(masm, PACKED_ELEMENTS);
}
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
@@ -2666,7 +2610,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
if (FLAG_log_timer_events) {
FrameScope frame(masm, StackFrame::MANUAL);
__ PushSafepointRegisters();
- __ PrepareCallCFunction(1, r0);
+ __ PrepareCallCFunction(1);
__ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
__ CallCFunction(ExternalReference::log_enter_external_function(isolate),
1);
@@ -2682,7 +2626,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
if (FLAG_log_timer_events) {
FrameScope frame(masm, StackFrame::MANUAL);
__ PushSafepointRegisters();
- __ PrepareCallCFunction(1, r0);
+ __ PrepareCallCFunction(1);
__ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
__ CallCFunction(ExternalReference::log_leave_external_function(isolate),
1);
@@ -2707,8 +2651,8 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
}
__ sub(r6, r6, Operand(1));
__ str(r6, MemOperand(r9, kLevelOffset));
- __ ldr(ip, MemOperand(r9, kLimitOffset));
- __ cmp(r5, ip);
+ __ ldr(r6, MemOperand(r9, kLimitOffset));
+ __ cmp(r5, r6);
__ b(ne, &delete_allocated_handles);
// Leave the API exit frame.
@@ -2727,8 +2671,8 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Check if the function scheduled an exception.
__ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
- __ mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate)));
- __ ldr(r5, MemOperand(ip));
+ __ mov(r6, Operand(ExternalReference::scheduled_exception_address(isolate)));
+ __ ldr(r5, MemOperand(r6));
__ cmp(r4, r5);
__ b(ne, &promote_scheduled_exception);
@@ -2742,7 +2686,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ bind(&delete_allocated_handles);
__ str(r5, MemOperand(r9, kLimitOffset));
__ mov(r4, r0);
- __ PrepareCallCFunction(1, r5);
+ __ PrepareCallCFunction(1);
__ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
__ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
1);
@@ -2798,20 +2742,22 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// call data
__ push(call_data);
- Register scratch = call_data;
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ Register scratch0 = call_data;
+ Register scratch1 = r5;
+ __ LoadRoot(scratch0, Heap::kUndefinedValueRootIndex);
// return value
- __ push(scratch);
+ __ push(scratch0);
// return value default
- __ push(scratch);
+ __ push(scratch0);
// isolate
- __ mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
- __ push(scratch);
+ __ mov(scratch1,
+ Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ push(scratch1);
// holder
__ push(holder);
// Prepare arguments.
- __ mov(scratch, sp);
+ __ mov(scratch0, sp);
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
@@ -2820,18 +2766,19 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
- DCHECK(!api_function_address.is(r0) && !scratch.is(r0));
+ DCHECK(!api_function_address.is(r0) && !scratch0.is(r0));
// r0 = FunctionCallbackInfo&
// Arguments is after the return address.
__ add(r0, sp, Operand(1 * kPointerSize));
// FunctionCallbackInfo::implicit_args_
- __ str(scratch, MemOperand(r0, 0 * kPointerSize));
+ __ str(scratch0, MemOperand(r0, 0 * kPointerSize));
// FunctionCallbackInfo::values_
- __ add(ip, scratch, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
- __ str(ip, MemOperand(r0, 1 * kPointerSize));
+ __ add(scratch1, scratch0,
+ Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
+ __ str(scratch1, MemOperand(r0, 1 * kPointerSize));
// FunctionCallbackInfo::length_ = argc
- __ mov(ip, Operand(argc()));
- __ str(ip, MemOperand(r0, 2 * kPointerSize));
+ __ mov(scratch0, Operand(argc()));
+ __ str(scratch0, MemOperand(r0, 2 * kPointerSize));
ExternalReference thunk_ref =
ExternalReference::invoke_function_callback(masm->isolate());
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index db6068df9e..1fc4dca381 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -143,7 +143,8 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
__ ldr(temp1, MemOperand(src, 4, PostIndex));
__ str(temp1, MemOperand(dest, 4, PostIndex));
} else {
- Register temp2 = ip;
+ UseScratchRegisterScope temps(&masm);
+ Register temp2 = temps.Acquire();
Label loop;
__ bic(temp2, chars, Operand(0x3), SetCC);
@@ -167,7 +168,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
__ Ret();
CodeDesc desc;
- masm.GetCode(&desc);
+ masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
@@ -219,8 +220,10 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
__ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest));
__ Ret();
} else {
+ UseScratchRegisterScope temps(&masm);
+
Register temp1 = r3;
- Register temp2 = ip;
+ Register temp2 = temps.Acquire();
Register temp3 = lr;
Register temp4 = r4;
Label loop;
@@ -256,7 +259,7 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
}
CodeDesc desc;
- masm.GetCode(&desc);
+ masm.GetCode(isolate, &desc);
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
@@ -284,7 +287,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
__ Ret();
CodeDesc desc;
- masm.GetCode(&desc);
+ masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index b33b977879..8138f53c7e 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -87,24 +87,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
-void Deoptimizer::SetPlatformCompiledStubRegisters(
- FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
- ApiFunction function(descriptor->deoptimization_handler());
- ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
- intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
- int params = descriptor->GetHandlerParameterCount();
- output_frame->SetRegister(r0.code(), params);
- output_frame->SetRegister(r1.code(), handler);
-}
-
-
-void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
- for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
- Float64 double_value = input_->GetDoubleRegister(i);
- output_frame->SetDoubleRegister(i, double_value);
- }
-}
-
#define __ masm()->
// This code tries to be close to ia32 code so that any changes can be
@@ -129,9 +111,11 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// We use a run-time check for VFP32DREGS.
CpuFeatureScope scope(masm(), VFP32DREGS,
CpuFeatureScope::kDontCheckSupported);
+ UseScratchRegisterScope temps(masm());
+ Register scratch = temps.Acquire();
// Check CPU flags for number of registers, setting the Z condition flag.
- __ CheckFor32DRegs(ip);
+ __ CheckFor32DRegs(scratch);
// Push registers d0-d15, and possibly d16-d31, on the stack.
// If d16-d31 are not pushed, decrease the stack pointer instead.
@@ -148,8 +132,13 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// handle this a bit differently.
__ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit());
- __ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
- __ str(fp, MemOperand(ip));
+ {
+ UseScratchRegisterScope temps(masm());
+ Register scratch = temps.Acquire();
+ __ mov(scratch, Operand(ExternalReference(
+ IsolateAddressId::kCEntryFPAddress, isolate())));
+ __ str(fp, MemOperand(scratch));
+ }
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize + kFloatRegsSize;
@@ -167,7 +156,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Allocate a new deoptimizer object.
// Pass four arguments in r0 to r3 and fifth argument on stack.
- __ PrepareCallCFunction(6, r5);
+ __ PrepareCallCFunction(6);
__ mov(r0, Operand(0));
Label context_check;
__ ldr(r1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
@@ -248,7 +237,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Compute the output frame in the deoptimizer.
__ push(r0); // Preserve deoptimizer object across call.
// r0: deoptimizer object; r1: scratch.
- __ PrepareCallCFunction(1, r1);
+ __ PrepareCallCFunction(1);
// Call Deoptimizer::ComputeOutputFrames().
{
AllowExternalCallThatCantCauseGC scope(masm());
@@ -311,15 +300,18 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Restore the registers from the stack.
__ ldm(ia_w, sp, restored_regs); // all but pc registers.
- __ pop(ip); // remove sp
- __ pop(ip); // remove lr
__ InitializeRootRegister();
- __ pop(ip); // remove pc
- __ pop(ip); // get continuation, leave pc on stack
- __ pop(lr);
- __ Jump(ip);
+ // Remove sp, lr and pc.
+ __ Drop(3);
+ {
+ UseScratchRegisterScope temps(masm());
+ Register scratch = temps.Acquire();
+ __ pop(scratch); // get continuation, leave pc on stack
+ __ pop(lr);
+ __ Jump(scratch);
+ }
__ stop("Unreachable.");
}
@@ -332,13 +324,15 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
// ARMv7, we can use movw (with a maximum immediate of 0xffff). On ARMv6, we
// need two instructions.
STATIC_ASSERT((kMaxNumberOfEntries - 1) <= 0xffff);
+ UseScratchRegisterScope temps(masm());
+ Register scratch = temps.Acquire();
if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatureScope scope(masm(), ARMv7);
Label done;
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
- __ movw(ip, i);
+ __ movw(scratch, i);
__ b(&done);
DCHECK_EQ(table_entry_size_, masm()->pc_offset() - start);
}
@@ -354,14 +348,14 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
- __ mov(ip, Operand(i & 0xff)); // Set the low byte.
+ __ mov(scratch, Operand(i & 0xff)); // Set the low byte.
__ b(&high_fixes[i >> 8]); // Jump to the secondary table.
DCHECK_EQ(table_entry_size_, masm()->pc_offset() - start);
}
// Generate the secondary table, to set the high byte.
for (int high = 1; high <= high_fix_max; high++) {
__ bind(&high_fixes[high]);
- __ orr(ip, ip, Operand(high << 8));
+ __ orr(scratch, scratch, Operand(high << 8));
// If this isn't the last entry, emit a branch to the end of the table.
// The last entry can just fall through.
if (high < high_fix_max) __ b(&high_fixes[0]);
@@ -371,7 +365,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
// through with no additional branch.
__ bind(&high_fixes[0]);
}
- __ push(ip);
+ __ push(scratch);
}
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 0b8fee10f4..7f63b193b0 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -343,7 +343,6 @@ int Decoder::FormatRegister(Instruction* instr, const char* format) {
return 5;
}
UNREACHABLE();
- return -1;
}
@@ -416,8 +415,8 @@ void Decoder::FormatNeonList(int Vd, int type) {
void Decoder::FormatNeonMemory(int Rn, int align, int Rm) {
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "[r%d", Rn);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "[%s",
+ converter_.NameOfCPURegister(Rn));
if (align != 0) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
":%d", (1 << align) << 6);
@@ -427,8 +426,8 @@ void Decoder::FormatNeonMemory(int Rn, int align, int Rm) {
} else if (Rm == 13) {
Print("]!");
} else {
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "], r%d", Rm);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "], %s",
+ converter_.NameOfCPURegister(Rm));
}
}
@@ -686,7 +685,8 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
return -1;
}
}
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%p", addr);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%p",
+ static_cast<void*>(addr));
return 1;
}
case 'S':
@@ -705,7 +705,6 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
}
}
UNREACHABLE();
- return -1;
}
@@ -1559,6 +1558,7 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
(instr->VAValue() == 0x0)) {
DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
} else if ((instr->VLValue() == 0x0) && (instr->VCValue() == 0x1)) {
+ const char* rt_name = converter_.NameOfCPURegister(instr->RtValue());
if (instr->Bit(23) == 0) {
int opc1_opc2 = (instr->Bits(22, 21) << 2) | instr->Bits(6, 5);
if ((opc1_opc2 & 0xb) == 0) {
@@ -1570,31 +1570,30 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
}
} else {
int vd = instr->VFPNRegValue(kDoublePrecision);
- int rt = instr->RtValue();
if ((opc1_opc2 & 0x8) != 0) {
// NeonS8 / NeonU8
int i = opc1_opc2 & 0x7;
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vmov.8 d%d[%d], r%d", vd, i, rt);
+ "vmov.8 d%d[%d], %s", vd, i, rt_name);
} else if ((opc1_opc2 & 0x1) != 0) {
// NeonS16 / NeonU16
int i = (opc1_opc2 >> 1) & 0x3;
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vmov.16 d%d[%d], r%d", vd, i, rt);
+ "vmov.16 d%d[%d], %s", vd, i, rt_name);
} else {
Unknown(instr);
}
}
} else {
int size = 32;
- if (instr->Bit(5) != 0)
+ if (instr->Bit(5) != 0) {
size = 16;
- else if (instr->Bit(22) != 0)
+ } else if (instr->Bit(22) != 0) {
size = 8;
+ }
int Vd = instr->VFPNRegValue(kSimd128Precision);
- int Rt = instr->RtValue();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vdup.%i q%d, r%d", size, Vd, Rt);
+ "vdup.%i q%d, %s", size, Vd, rt_name);
}
} else if ((instr->VLValue() == 0x1) && (instr->VCValue() == 0x1)) {
int opc1_opc2 = (instr->Bits(22, 21) << 2) | instr->Bits(6, 5);
@@ -1607,19 +1606,20 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
}
} else {
char sign = instr->Bit(23) != 0 ? 'u' : 's';
- int rt = instr->RtValue();
+ const char* rt_name = converter_.NameOfCPURegister(instr->RtValue());
int vn = instr->VFPNRegValue(kDoublePrecision);
if ((opc1_opc2 & 0x8) != 0) {
// NeonS8 / NeonU8
int i = opc1_opc2 & 0x7;
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vmov.%c8 r%d, d%d[%d]", sign, rt, vn, i);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "vmov.%c8 %s, d%d[%d]",
+ sign, rt_name, vn, i);
} else if ((opc1_opc2 & 0x1) != 0) {
// NeonS16 / NeonU16
int i = (opc1_opc2 >> 1) & 0x3;
out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "vmov.%c16 r%d, d%d[%d]",
- sign, rt, vn, i);
+ SNPrintF(out_buffer_ + out_buffer_pos_, "vmov.%c16 %s, d%d[%d]",
+ sign, rt_name, vn, i);
} else {
Unknown(instr);
}
@@ -2424,17 +2424,17 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
case 0xA:
case 0xB:
if ((instr->Bits(22, 20) == 5) && (instr->Bits(15, 12) == 0xf)) {
- int Rn = instr->Bits(19, 16);
+ const char* rn_name = converter_.NameOfCPURegister(instr->Bits(19, 16));
int offset = instr->Bits(11, 0);
if (offset == 0) {
out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "pld [r%d]", Rn);
+ SNPrintF(out_buffer_ + out_buffer_pos_, "pld [%s]", rn_name);
} else if (instr->Bit(23) == 0) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "pld [r%d, #-%d]", Rn, offset);
+ "pld [%s, #-%d]", rn_name, offset);
} else {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "pld [r%d, #+%d]", Rn, offset);
+ "pld [%s, #+%d]", rn_name, offset);
}
} else if (instr->SpecialValue() == 0xA && instr->Bits(22, 20) == 7) {
int option = instr->Bits(3, 0);
diff --git a/deps/v8/src/arm/frames-arm.cc b/deps/v8/src/arm/frames-arm.cc
index 8529bb541c..b0e2c1454d 100644
--- a/deps/v8/src/arm/frames-arm.cc
+++ b/deps/v8/src/arm/frames-arm.cc
@@ -21,15 +21,6 @@ Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() {
UNREACHABLE();
- return no_reg;
-}
-
-
-Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
-Register StubFailureTrampolineFrame::context_register() { return cp; }
-Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
- UNREACHABLE();
- return no_reg;
}
diff --git a/deps/v8/src/arm/interface-descriptors-arm.cc b/deps/v8/src/arm/interface-descriptors-arm.cc
index f2fb703b9f..c042ade156 100644
--- a/deps/v8/src/arm/interface-descriptors-arm.cc
+++ b/deps/v8/src/arm/interface-descriptors-arm.cc
@@ -49,6 +49,8 @@ const Register StoreTransitionDescriptor::MapRegister() { return r5; }
const Register StringCompareDescriptor::LeftRegister() { return r1; }
const Register StringCompareDescriptor::RightRegister() { return r0; }
+const Register StringConcatDescriptor::ArgumentsCountRegister() { return r0; }
+
const Register ApiGetterDescriptor::HolderRegister() { return r0; }
const Register ApiGetterDescriptor::CallbackRegister() { return r3; }
@@ -155,6 +157,16 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r0 : number of arguments (on the stack, not including receiver)
+ // r1 : the target to call
+ // r2 : arguments list (FixedArray)
+ // r4 : arguments list length (untagged)
+ Register registers[] = {r1, r0, r2, r4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void CallForwardVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r0 : number of arguments
@@ -164,6 +176,34 @@ void CallForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallWithSpreadDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r0 : number of arguments (on the stack, not including receiver)
+ // r1 : the target to call
+ // r2 : the object to spread
+ Register registers[] = {r1, r0, r2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r1 : the target to call
+ // r2 : the arguments list
+ Register registers[] = {r1, r2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r0 : number of arguments (on the stack, not including receiver)
+ // r1 : the target to call
+ // r3 : the new target
+ // r2 : arguments list (FixedArray)
+ // r4 : arguments list length (untagged)
+ Register registers[] = {r1, r3, r0, r2, r4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r0 : number of arguments
@@ -174,6 +214,25 @@ void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r0 : number of arguments (on the stack, not including receiver)
+ // r1 : the target to call
+ // r3 : the new target
+ // r2 : the object to spread
+ Register registers[] = {r1, r3, r0, r2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r1 : the target to call
+ // r3 : the new target
+ // r2 : the arguments list
+ Register registers[] = {r1, r3, r2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void ConstructStubDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r0 : number of arguments
@@ -378,8 +437,7 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
Register registers[] = {
r0, // the value to pass to the generator
r1, // the JSGeneratorObject to resume
- r2, // the resume mode (tagged)
- r3, // SuspendFlags (tagged)
+ r2 // the resume mode (tagged)
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 7256086b1d..4fda72574a 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -14,6 +14,7 @@
#include "src/codegen.h"
#include "src/counters.h"
#include "src/debug/debug.h"
+#include "src/double.h"
#include "src/objects-inl.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
@@ -25,55 +26,39 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
- : Assembler(isolate, buffer, size),
- generating_stub_(false),
- has_frame_(false),
- isolate_(isolate),
+ : TurboAssembler(isolate, buffer, size, create_code_object),
jit_cookie_(0) {
if (FLAG_mask_constants_with_cookie) {
jit_cookie_ = isolate->random_number_generator()->NextInt();
}
- if (create_code_object == CodeObjectRequired::kYes) {
- code_object_ =
- Handle<Object>::New(isolate_->heap()->undefined_value(), isolate_);
- }
}
+void TurboAssembler::Jump(Register target, Condition cond) { bx(target, cond); }
-void MacroAssembler::Jump(Register target, Condition cond) {
- bx(target, cond);
-}
-
-
-void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
+void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
mov(pc, Operand(target, rmode), LeaveCC, cond);
}
-
-void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
+void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(!RelocInfo::IsCodeTarget(rmode));
Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
}
-
-void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
+void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
// 'code' is always generated ARM code, never THUMB code
- AllowDeferredHandleDereference embedding_raw_address;
- Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
+ Jump(reinterpret_cast<intptr_t>(code.address()), rmode, cond);
}
-
-int MacroAssembler::CallSize(Register target, Condition cond) {
+int TurboAssembler::CallSize(Register target, Condition cond) {
return kInstrSize;
}
-
-void MacroAssembler::Call(Register target, Condition cond) {
+void TurboAssembler::Call(Register target, Condition cond) {
// Block constant pool for the call instruction sequence.
BlockConstPoolScope block_const_pool(this);
Label start;
@@ -82,22 +67,19 @@ void MacroAssembler::Call(Register target, Condition cond) {
DCHECK_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
}
-
-int MacroAssembler::CallSize(
- Address target, RelocInfo::Mode rmode, Condition cond) {
+int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode,
+ Condition cond) {
Instr mov_instr = cond | MOV | LeaveCC;
Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
return kInstrSize +
- mov_operand.instructions_required(this, mov_instr) * kInstrSize;
+ mov_operand.InstructionsRequired(this, mov_instr) * kInstrSize;
}
-
-int MacroAssembler::CallStubSize(
- CodeStub* stub, TypeFeedbackId ast_id, Condition cond) {
- return CallSize(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
+int TurboAssembler::CallStubSize() {
+ return CallSize(Handle<Code>(), RelocInfo::CODE_TARGET, al);
}
-void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
+void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
TargetAddressStorageMode mode,
bool check_constant_pool) {
// Check if we have to emit the constant pool before we block it.
@@ -118,6 +100,9 @@ void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
int expected_size = CallSize(target, rmode, cond);
#endif
+ // Use ip directly instead of using UseScratchRegisterScope, as we do not
+ // preserve scratch registers across calls.
+
// Call sequence on V7 or later may be :
// movw ip, #... @ call address low 16
// movt ip, #... @ call address high 16
@@ -138,29 +123,17 @@ void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
}
}
-
-int MacroAssembler::CallSize(Handle<Code> code,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id,
+int TurboAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
- AllowDeferredHandleDereference using_raw_address;
- return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
+ return CallSize(code.address(), rmode, cond);
}
-void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
- TypeFeedbackId ast_id, Condition cond,
- TargetAddressStorageMode mode,
+void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
+ Condition cond, TargetAddressStorageMode mode,
bool check_constant_pool) {
- Label start;
- bind(&start);
DCHECK(RelocInfo::IsCodeTarget(rmode));
- if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
- SetRecordedAstId(ast_id);
- rmode = RelocInfo::CODE_TARGET_WITH_ID;
- }
// 'code' is always generated ARM code, never THUMB code
- AllowDeferredHandleDereference embedding_raw_address;
- Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
+ Call(code.address(), rmode, cond, mode);
}
void MacroAssembler::CallDeoptimizer(Address target) {
@@ -168,6 +141,9 @@ void MacroAssembler::CallDeoptimizer(Address target) {
uintptr_t target_raw = reinterpret_cast<uintptr_t>(target);
+ // Use ip directly instead of using UseScratchRegisterScope, as we do not
+ // preserve scratch registers across calls.
+
// We use blx, like a call, but it does not return here. The link register is
// used by the deoptimizer to work out what called it.
if (CpuFeatures::IsSupported(ARMv7)) {
@@ -198,22 +174,19 @@ int MacroAssembler::CallDeoptimizerSize() {
return 3 * kInstrSize;
}
-void MacroAssembler::Ret(Condition cond) {
- bx(lr, cond);
-}
-
+void TurboAssembler::Ret(Condition cond) { bx(lr, cond); }
-void MacroAssembler::Drop(int count, Condition cond) {
+void TurboAssembler::Drop(int count, Condition cond) {
if (count > 0) {
add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
}
}
-void MacroAssembler::Drop(Register count, Condition cond) {
+void TurboAssembler::Drop(Register count, Condition cond) {
add(sp, sp, Operand(count, LSL, kPointerSizeLog2), LeaveCC, cond);
}
-void MacroAssembler::Ret(int drop, Condition cond) {
+void TurboAssembler::Ret(int drop, Condition cond) {
Drop(drop, cond);
Ret(cond);
}
@@ -234,53 +207,63 @@ void MacroAssembler::Swap(Register reg1,
}
}
+void TurboAssembler::Call(Label* target) { bl(target); }
-void MacroAssembler::Call(Label* target) {
- bl(target);
+void TurboAssembler::Push(Handle<HeapObject> handle) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ mov(scratch, Operand(handle));
+ push(scratch);
}
-
-void MacroAssembler::Push(Handle<Object> handle) {
- mov(ip, Operand(handle));
- push(ip);
+void TurboAssembler::Push(Smi* smi) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ mov(scratch, Operand(smi));
+ push(scratch);
}
-void MacroAssembler::Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
+void MacroAssembler::PushObject(Handle<Object> handle) {
+ if (handle->IsHeapObject()) {
+ Push(Handle<HeapObject>::cast(handle));
+ } else {
+ Push(Smi::cast(*handle));
+ }
+}
-void MacroAssembler::Move(Register dst, Smi* smi) { mov(dst, Operand(smi)); }
+void TurboAssembler::Move(Register dst, Smi* smi) { mov(dst, Operand(smi)); }
-void MacroAssembler::Move(Register dst, Handle<Object> value) {
+void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
mov(dst, Operand(value));
}
-
-void MacroAssembler::Move(Register dst, Register src, Condition cond) {
+void TurboAssembler::Move(Register dst, Register src, Condition cond) {
if (!dst.is(src)) {
mov(dst, src, LeaveCC, cond);
}
}
-void MacroAssembler::Move(SwVfpRegister dst, SwVfpRegister src,
+void TurboAssembler::Move(SwVfpRegister dst, SwVfpRegister src,
Condition cond) {
if (!dst.is(src)) {
vmov(dst, src, cond);
}
}
-void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src,
+void TurboAssembler::Move(DwVfpRegister dst, DwVfpRegister src,
Condition cond) {
if (!dst.is(src)) {
vmov(dst, src, cond);
}
}
-void MacroAssembler::Move(QwNeonRegister dst, QwNeonRegister src) {
+void TurboAssembler::Move(QwNeonRegister dst, QwNeonRegister src) {
if (!dst.is(src)) {
vmov(dst, src);
}
}
-void MacroAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
+void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
if (srcdst0.is(srcdst1)) return; // Swapping aliased registers emits nothing.
DCHECK(VfpRegisterIsAvailable(srcdst0));
@@ -297,7 +280,7 @@ void MacroAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
}
}
-void MacroAssembler::Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1) {
+void TurboAssembler::Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1) {
if (!srcdst0.is(srcdst1)) {
vswp(srcdst0, srcdst1);
}
@@ -309,23 +292,24 @@ void MacroAssembler::Mls(Register dst, Register src1, Register src2,
CpuFeatureScope scope(this, ARMv7);
mls(dst, src1, src2, srcA, cond);
} else {
- DCHECK(!srcA.is(ip));
- mul(ip, src1, src2, LeaveCC, cond);
- sub(dst, srcA, ip, LeaveCC, cond);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(!srcA.is(scratch));
+ mul(scratch, src1, src2, LeaveCC, cond);
+ sub(dst, srcA, scratch, LeaveCC, cond);
}
}
void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
Condition cond) {
- if (!src2.is_reg() &&
- !src2.must_output_reloc_info(this) &&
+ if (!src2.IsRegister() && !src2.MustOutputRelocInfo(this) &&
src2.immediate() == 0) {
mov(dst, Operand::Zero(), LeaveCC, cond);
- } else if (!(src2.instructions_required(this) == 1) &&
- !src2.must_output_reloc_info(this) &&
+ } else if (!(src2.InstructionsRequired(this) == 1) &&
+ !src2.MustOutputRelocInfo(this) &&
CpuFeatures::IsSupported(ARMv7) &&
- base::bits::IsPowerOfTwo32(src2.immediate() + 1)) {
+ base::bits::IsPowerOfTwo(src2.immediate() + 1)) {
CpuFeatureScope scope(this, ARMv7);
ubfx(dst, src1, 0,
WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
@@ -395,8 +379,7 @@ void MacroAssembler::Bfi(Register dst,
}
}
-
-void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width,
+void TurboAssembler::Bfc(Register dst, Register src, int lsb, int width,
Condition cond) {
DCHECK(lsb < 32);
if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
@@ -446,9 +429,7 @@ void MacroAssembler::Store(Register src,
}
}
-
-void MacroAssembler::LoadRoot(Register destination,
- Heap::RootListIndex index,
+void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
Condition cond) {
ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
}
@@ -522,9 +503,8 @@ void MacroAssembler::RecordWriteField(
}
}
-
-// Will clobber 4 registers: object, map, dst, ip. The
-// register 'object' contains a heap object pointer.
+// Will clobber 3 registers: object, map and dst. The register 'object' contains
+// a heap object pointer. A scratch register also needs to be available.
void MacroAssembler::RecordWriteForMap(Register object,
Register map,
Register dst,
@@ -541,8 +521,10 @@ void MacroAssembler::RecordWriteForMap(Register object,
}
if (emit_debug_code()) {
- ldr(ip, FieldMemOperand(object, HeapObject::kMapOffset));
- cmp(ip, map);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ cmp(scratch, map);
Check(eq, kWrongAddressOrValuePassedToRecordWrite);
}
@@ -582,7 +564,11 @@ void MacroAssembler::RecordWriteForMap(Register object,
// Count number of write barriers in generated code.
isolate()->counters()->write_barriers_static()->Increment();
- IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst);
+ {
+ UseScratchRegisterScope temps(this);
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1,
+ temps.Acquire(), dst);
+ }
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
@@ -592,10 +578,9 @@ void MacroAssembler::RecordWriteForMap(Register object,
}
}
-
-// Will clobber 4 registers: object, address, scratch, ip. The
-// register 'object' contains a heap object pointer. The heap object
-// tag is shifted away.
+// Will clobber 3 registers: object, address, and value. The register 'object'
+// contains a heap object pointer. The heap object tag is shifted away.
+// A scratch register also needs to be available.
void MacroAssembler::RecordWrite(
Register object,
Register address,
@@ -607,8 +592,10 @@ void MacroAssembler::RecordWrite(
PointersToHereCheck pointers_to_here_check_for_value) {
DCHECK(!object.is(value));
if (emit_debug_code()) {
- ldr(ip, MemOperand(address));
- cmp(ip, value);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ldr(scratch, MemOperand(address));
+ cmp(scratch, value);
Check(eq, kWrongAddressOrValuePassedToRecordWrite);
}
@@ -653,8 +640,11 @@ void MacroAssembler::RecordWrite(
// Count number of write barriers in generated code.
isolate()->counters()->write_barriers_static()->Increment();
- IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip,
- value);
+ {
+ UseScratchRegisterScope temps(this);
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1,
+ temps.Acquire(), value);
+ }
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
@@ -681,8 +671,8 @@ void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
if (emit_debug_code()) {
add(scratch, js_function, Operand(offset - kHeapObjectTag));
- ldr(ip, MemOperand(scratch));
- cmp(ip, code_entry);
+ ldr(scratch, MemOperand(scratch));
+ cmp(scratch, code_entry);
Check(eq, kWrongAddressOrValuePassedToRecordWrite);
}
@@ -706,7 +696,7 @@ void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
stm(db_w, sp, (kCallerSaved | lr.bit()));
int argument_count = 3;
- PrepareCallCFunction(argument_count, code_entry);
+ PrepareCallCFunction(argument_count);
mov(r0, js_function);
mov(r1, dst);
@@ -741,14 +731,16 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
bind(&ok);
}
// Load store buffer top.
- ExternalReference store_buffer =
- ExternalReference::store_buffer_top(isolate());
- mov(ip, Operand(store_buffer));
- ldr(scratch, MemOperand(ip));
- // Store pointer to buffer and increment buffer top.
- str(address, MemOperand(scratch, kPointerSize, PostIndex));
- // Write back new top of buffer.
- str(scratch, MemOperand(ip));
+ {
+ UseScratchRegisterScope temps(this);
+ Register store_buffer = temps.Acquire();
+ mov(store_buffer, Operand(ExternalReference::store_buffer_top(isolate())));
+ ldr(scratch, MemOperand(store_buffer));
+ // Store pointer to buffer and increment buffer top.
+ str(address, MemOperand(scratch, kPointerSize, PostIndex));
+ // Write back new top of buffer.
+ str(scratch, MemOperand(store_buffer));
+ }
// Call stub on end of buffer.
// Check for end of buffer.
tst(scratch, Operand(StoreBuffer::kStoreBufferMask));
@@ -768,7 +760,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
}
}
-void MacroAssembler::PushCommonFrame(Register marker_reg) {
+void TurboAssembler::PushCommonFrame(Register marker_reg) {
if (marker_reg.is_valid()) {
if (marker_reg.code() > fp.code()) {
stm(db_w, sp, fp.bit() | lr.bit());
@@ -797,7 +789,7 @@ void MacroAssembler::PopCommonFrame(Register marker_reg) {
}
}
-void MacroAssembler::PushStandardFrame(Register function_reg) {
+void TurboAssembler::PushStandardFrame(Register function_reg) {
DCHECK(!function_reg.is_valid() || function_reg.code() < cp.code());
stm(db_w, sp, (function_reg.is_valid() ? function_reg.bit() : 0) | cp.bit() |
fp.bit() | lr.bit());
@@ -927,7 +919,7 @@ void MacroAssembler::Strd(Register src1, Register src2,
}
}
-void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
+void TurboAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
// Subtracting 0.0 preserves all inputs except for signalling NaNs, which
@@ -936,38 +928,35 @@ void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
vsub(dst, src, kDoubleRegZero, cond);
}
-
-void MacroAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1,
+void TurboAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1,
const SwVfpRegister src2,
const Condition cond) {
// Compare and move FPSCR flags to the normal condition flags.
VFPCompareAndLoadFlags(src1, src2, pc, cond);
}
-void MacroAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1,
+void TurboAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1,
const float src2,
const Condition cond) {
// Compare and move FPSCR flags to the normal condition flags.
VFPCompareAndLoadFlags(src1, src2, pc, cond);
}
-
-void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
+void TurboAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond) {
// Compare and move FPSCR flags to the normal condition flags.
VFPCompareAndLoadFlags(src1, src2, pc, cond);
}
-void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
+void TurboAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
const double src2,
const Condition cond) {
// Compare and move FPSCR flags to the normal condition flags.
VFPCompareAndLoadFlags(src1, src2, pc, cond);
}
-
-void MacroAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
+void TurboAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
const SwVfpRegister src2,
const Register fpscr_flags,
const Condition cond) {
@@ -976,7 +965,7 @@ void MacroAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
vmrs(fpscr_flags, cond);
}
-void MacroAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
+void TurboAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
const float src2,
const Register fpscr_flags,
const Condition cond) {
@@ -985,8 +974,7 @@ void MacroAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
vmrs(fpscr_flags, cond);
}
-
-void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
+void TurboAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
const DwVfpRegister src2,
const Register fpscr_flags,
const Condition cond) {
@@ -995,7 +983,7 @@ void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
vmrs(fpscr_flags, cond);
}
-void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
+void TurboAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
const double src2,
const Register fpscr_flags,
const Condition cond) {
@@ -1004,23 +992,20 @@ void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
vmrs(fpscr_flags, cond);
}
-
-void MacroAssembler::Vmov(const DwVfpRegister dst,
- const double imm,
+void MacroAssembler::Vmov(const DwVfpRegister dst, Double imm,
const Register scratch) {
- int64_t imm_bits = bit_cast<int64_t>(imm);
+ uint64_t imm_bits = imm.AsUint64();
// Handle special values first.
- if (imm_bits == bit_cast<int64_t>(0.0)) {
+ if (imm_bits == Double(0.0).AsUint64()) {
vmov(dst, kDoubleRegZero);
- } else if (imm_bits == bit_cast<int64_t>(-0.0)) {
+ } else if (imm_bits == Double(-0.0).AsUint64()) {
vneg(dst, kDoubleRegZero);
} else {
vmov(dst, imm, scratch);
}
}
-
-void MacroAssembler::VmovHigh(Register dst, DwVfpRegister src) {
+void TurboAssembler::VmovHigh(Register dst, DwVfpRegister src) {
if (src.code() < 16) {
const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
vmov(dst, loc.high());
@@ -1029,8 +1014,7 @@ void MacroAssembler::VmovHigh(Register dst, DwVfpRegister src) {
}
}
-
-void MacroAssembler::VmovHigh(DwVfpRegister dst, Register src) {
+void TurboAssembler::VmovHigh(DwVfpRegister dst, Register src) {
if (dst.code() < 16) {
const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
vmov(loc.high(), src);
@@ -1039,8 +1023,7 @@ void MacroAssembler::VmovHigh(DwVfpRegister dst, Register src) {
}
}
-
-void MacroAssembler::VmovLow(Register dst, DwVfpRegister src) {
+void TurboAssembler::VmovLow(Register dst, DwVfpRegister src) {
if (src.code() < 16) {
const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
vmov(dst, loc.low());
@@ -1049,8 +1032,7 @@ void MacroAssembler::VmovLow(Register dst, DwVfpRegister src) {
}
}
-
-void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
+void TurboAssembler::VmovLow(DwVfpRegister dst, Register src) {
if (dst.code() < 16) {
const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
vmov(loc.low(), src);
@@ -1059,7 +1041,7 @@ void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
}
}
-void MacroAssembler::VmovExtended(Register dst, int src_code) {
+void TurboAssembler::VmovExtended(Register dst, int src_code) {
DCHECK_LE(SwVfpRegister::kMaxNumRegisters, src_code);
DCHECK_GT(SwVfpRegister::kMaxNumRegisters * 2, src_code);
if (src_code & 0x1) {
@@ -1069,7 +1051,7 @@ void MacroAssembler::VmovExtended(Register dst, int src_code) {
}
}
-void MacroAssembler::VmovExtended(int dst_code, Register src) {
+void TurboAssembler::VmovExtended(int dst_code, Register src) {
DCHECK_LE(SwVfpRegister::kMaxNumRegisters, dst_code);
DCHECK_GT(SwVfpRegister::kMaxNumRegisters * 2, dst_code);
if (dst_code & 0x1) {
@@ -1079,7 +1061,7 @@ void MacroAssembler::VmovExtended(int dst_code, Register src) {
}
}
-void MacroAssembler::VmovExtended(int dst_code, int src_code) {
+void TurboAssembler::VmovExtended(int dst_code, int src_code) {
if (src_code == dst_code) return;
if (src_code < SwVfpRegister::kMaxNumRegisters &&
@@ -1143,7 +1125,7 @@ void MacroAssembler::VmovExtended(int dst_code, int src_code) {
}
}
-void MacroAssembler::VmovExtended(int dst_code, const MemOperand& src) {
+void TurboAssembler::VmovExtended(int dst_code, const MemOperand& src) {
if (dst_code < SwVfpRegister::kMaxNumRegisters) {
vldr(SwVfpRegister::from_code(dst_code), src);
} else {
@@ -1155,7 +1137,7 @@ void MacroAssembler::VmovExtended(int dst_code, const MemOperand& src) {
}
}
-void MacroAssembler::VmovExtended(const MemOperand& dst, int src_code) {
+void TurboAssembler::VmovExtended(const MemOperand& dst, int src_code) {
if (src_code < SwVfpRegister::kMaxNumRegisters) {
vstr(SwVfpRegister::from_code(src_code), dst);
} else {
@@ -1166,7 +1148,7 @@ void MacroAssembler::VmovExtended(const MemOperand& dst, int src_code) {
}
}
-void MacroAssembler::ExtractLane(Register dst, QwNeonRegister src,
+void TurboAssembler::ExtractLane(Register dst, QwNeonRegister src,
NeonDataType dt, int lane) {
int size = NeonSz(dt); // 0, 1, 2
int byte = lane << size;
@@ -1178,7 +1160,7 @@ void MacroAssembler::ExtractLane(Register dst, QwNeonRegister src,
vmov(dt, dst, double_source, double_lane);
}
-void MacroAssembler::ExtractLane(Register dst, DwVfpRegister src,
+void TurboAssembler::ExtractLane(Register dst, DwVfpRegister src,
NeonDataType dt, int lane) {
int size = NeonSz(dt); // 0, 1, 2
int byte = lane << size;
@@ -1187,13 +1169,13 @@ void MacroAssembler::ExtractLane(Register dst, DwVfpRegister src,
vmov(dt, dst, src, double_lane);
}
-void MacroAssembler::ExtractLane(SwVfpRegister dst, QwNeonRegister src,
+void TurboAssembler::ExtractLane(SwVfpRegister dst, QwNeonRegister src,
int lane) {
int s_code = src.code() * 4 + lane;
VmovExtended(dst.code(), s_code);
}
-void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
+void TurboAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
Register src_lane, NeonDataType dt, int lane) {
Move(dst, src);
int size = NeonSz(dt); // 0, 1, 2
@@ -1206,14 +1188,14 @@ void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
vmov(dt, double_dst, double_lane, src_lane);
}
-void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
+void TurboAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
SwVfpRegister src_lane, int lane) {
Move(dst, src);
int s_code = dst.code() * 4 + lane;
VmovExtended(s_code, src_lane.code());
}
-void MacroAssembler::LslPair(Register dst_low, Register dst_high,
+void TurboAssembler::LslPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift) {
DCHECK(!AreAliased(dst_high, src_low));
@@ -1236,7 +1218,7 @@ void MacroAssembler::LslPair(Register dst_low, Register dst_high,
bind(&done);
}
-void MacroAssembler::LslPair(Register dst_low, Register dst_high,
+void TurboAssembler::LslPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
uint32_t shift) {
DCHECK(!AreAliased(dst_high, src_low));
@@ -1259,7 +1241,7 @@ void MacroAssembler::LslPair(Register dst_low, Register dst_high,
}
}
-void MacroAssembler::LsrPair(Register dst_low, Register dst_high,
+void TurboAssembler::LsrPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift) {
DCHECK(!AreAliased(dst_low, src_high));
@@ -1283,7 +1265,7 @@ void MacroAssembler::LsrPair(Register dst_low, Register dst_high,
bind(&done);
}
-void MacroAssembler::LsrPair(Register dst_low, Register dst_high,
+void TurboAssembler::LsrPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
uint32_t shift) {
DCHECK(!AreAliased(dst_low, src_high));
@@ -1306,7 +1288,7 @@ void MacroAssembler::LsrPair(Register dst_low, Register dst_high,
}
}
-void MacroAssembler::AsrPair(Register dst_low, Register dst_high,
+void TurboAssembler::AsrPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift) {
DCHECK(!AreAliased(dst_low, src_high));
@@ -1329,7 +1311,7 @@ void MacroAssembler::AsrPair(Register dst_low, Register dst_high,
bind(&done);
}
-void MacroAssembler::AsrPair(Register dst_low, Register dst_high,
+void TurboAssembler::AsrPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
uint32_t shift) {
DCHECK(!AreAliased(dst_low, src_high));
@@ -1352,12 +1334,14 @@ void MacroAssembler::AsrPair(Register dst_low, Register dst_high,
}
}
-void MacroAssembler::StubPrologue(StackFrame::Type type) {
- mov(ip, Operand(StackFrame::TypeToMarker(type)));
- PushCommonFrame(ip);
+void TurboAssembler::StubPrologue(StackFrame::Type type) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ mov(scratch, Operand(StackFrame::TypeToMarker(type)));
+ PushCommonFrame(scratch);
}
-void MacroAssembler::Prologue(bool code_pre_aging) {
+void TurboAssembler::Prologue(bool code_pre_aging) {
{ PredictableCodeSizeScope predictible_code_size_scope(
this, kNoCodeAgeSequenceLength);
// The following three instructions must remain together and unmodified
@@ -1381,20 +1365,20 @@ void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
ldr(vector, FieldMemOperand(vector, Cell::kValueOffset));
}
-
-void MacroAssembler::EnterFrame(StackFrame::Type type,
+void TurboAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
// r0-r3: preserved
- mov(ip, Operand(StackFrame::TypeToMarker(type)));
- PushCommonFrame(ip);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ mov(scratch, Operand(StackFrame::TypeToMarker(type)));
+ PushCommonFrame(scratch);
if (type == StackFrame::INTERNAL) {
- mov(ip, Operand(CodeObject()));
- push(ip);
+ mov(scratch, Operand(CodeObject()));
+ push(scratch);
}
}
-
-int MacroAssembler::LeaveFrame(StackFrame::Type type) {
+int TurboAssembler::LeaveFrame(StackFrame::Type type) {
// r0: preserved
// r1: preserved
// r2: preserved
@@ -1424,31 +1408,35 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
StackFrame::Type frame_type) {
DCHECK(frame_type == StackFrame::EXIT ||
frame_type == StackFrame::BUILTIN_EXIT);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
// Set up the frame structure on the stack.
DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
- mov(ip, Operand(StackFrame::TypeToMarker(frame_type)));
- PushCommonFrame(ip);
+ mov(scratch, Operand(StackFrame::TypeToMarker(frame_type)));
+ PushCommonFrame(scratch);
// Reserve room for saved entry sp and code object.
sub(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
if (emit_debug_code()) {
- mov(ip, Operand::Zero());
- str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
+ mov(scratch, Operand::Zero());
+ str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
- mov(ip, Operand(CodeObject()));
- str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
+ mov(scratch, Operand(CodeObject()));
+ str(scratch, MemOperand(fp, ExitFrameConstants::kCodeOffset));
// Save the frame pointer and the context in top.
- mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
- str(fp, MemOperand(ip));
- mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
- str(cp, MemOperand(ip));
+ mov(scratch, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
+ isolate())));
+ str(fp, MemOperand(scratch));
+ mov(scratch,
+ Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
+ str(cp, MemOperand(scratch));
// Optionally save all double registers.
if (save_doubles) {
- SaveFPRegs(sp, ip);
+ SaveFPRegs(sp, scratch);
// Note that d0 will be accessible at
// fp - ExitFrameConstants::kFrameSize -
// DwVfpRegister::kMaxNumRegisters * kDoubleSize,
@@ -1460,17 +1448,17 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
if (frame_alignment > 0) {
- DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
and_(sp, sp, Operand(-frame_alignment));
}
// Set the exit frame sp value to point just before the return address
// location.
- add(ip, sp, Operand(kPointerSize));
- str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
+ add(scratch, sp, Operand(kPointerSize));
+ str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
-int MacroAssembler::ActivationFrameAlignment() {
+int TurboAssembler::ActivationFrameAlignment() {
#if V8_HOST_ARCH_ARM
// Running on the real platform. Use the alignment as mandated by the local
// environment.
@@ -1491,6 +1479,8 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
bool restore_context,
bool argument_count_is_length) {
ConstantPoolUnavailableScope constant_pool_unavailable(this);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
// Optionally restore all double registers.
if (save_doubles) {
@@ -1498,22 +1488,25 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
const int offset = ExitFrameConstants::kFixedFrameSizeFromFp;
sub(r3, fp,
Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
- RestoreFPRegs(r3, ip);
+ RestoreFPRegs(r3, scratch);
}
// Clear top frame.
mov(r3, Operand::Zero());
- mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
- str(r3, MemOperand(ip));
+ mov(scratch, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
+ isolate())));
+ str(r3, MemOperand(scratch));
// Restore current context from top and clear it in debug mode.
if (restore_context) {
- mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
- ldr(cp, MemOperand(ip));
+ mov(scratch, Operand(ExternalReference(IsolateAddressId::kContextAddress,
+ isolate())));
+ ldr(cp, MemOperand(scratch));
}
#ifdef DEBUG
- mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
- str(r3, MemOperand(ip));
+ mov(scratch,
+ Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
+ str(r3, MemOperand(scratch));
#endif
// Tear down the exit frame, pop the arguments, and return.
@@ -1528,8 +1521,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
}
}
-
-void MacroAssembler::MovFromFloatResult(const DwVfpRegister dst) {
+void TurboAssembler::MovFromFloatResult(const DwVfpRegister dst) {
if (use_eabi_hardfloat()) {
Move(dst, d0);
} else {
@@ -1539,11 +1531,11 @@ void MacroAssembler::MovFromFloatResult(const DwVfpRegister dst) {
// On ARM this is just a synonym to make the purpose clear.
-void MacroAssembler::MovFromFloatParameter(DwVfpRegister dst) {
+void TurboAssembler::MovFromFloatParameter(DwVfpRegister dst) {
MovFromFloatResult(dst);
}
-void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
+void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
Register caller_args_count_reg,
Register scratch0, Register scratch1) {
#if DEBUG
@@ -1778,7 +1770,6 @@ void MacroAssembler::InvokeFunction(Register fun,
ldr(expected_reg,
FieldMemOperand(temp_reg,
SharedFunctionInfo::kFormalParameterCountOffset));
- SmiUntag(expected_reg);
ParameterCount expected(expected_reg);
InvokeFunctionCode(fun, new_target, expected, actual, flag, call_wrapper);
@@ -1850,7 +1841,8 @@ void MacroAssembler::PushStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
// Link the current handler as the next handler.
- mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+ mov(r6,
+ Operand(ExternalReference(IsolateAddressId::kHandlerAddress, isolate())));
ldr(r5, MemOperand(r6));
push(r5);
@@ -1860,11 +1852,14 @@ void MacroAssembler::PushStackHandler() {
void MacroAssembler::PopStackHandler() {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
pop(r1);
- mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+ mov(scratch,
+ Operand(ExternalReference(IsolateAddressId::kHandlerAddress, isolate())));
add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
- str(r1, MemOperand(ip));
+ str(r1, MemOperand(scratch));
}
@@ -1907,7 +1902,6 @@ void MacroAssembler::Allocate(int object_size,
Label* gc_required,
AllocationFlags flags) {
DCHECK(object_size <= kMaxRegularHeapObjectSize);
- DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1919,7 +1913,7 @@ void MacroAssembler::Allocate(int object_size,
return;
}
- DCHECK(!AreAliased(result, scratch1, scratch2, ip));
+ DCHECK(!AreAliased(result, scratch1, scratch2));
// Make object size into bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
@@ -1939,13 +1933,12 @@ void MacroAssembler::Allocate(int object_size,
intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
DCHECK((limit - top) == kPointerSize);
- DCHECK(result.code() < ip.code());
+
+ UseScratchRegisterScope temps(this);
// Set up allocation top address register.
Register top_address = scratch1;
- // This code stores a temporary value in ip. This is OK, as the code below
- // does not need ip for implicit literal generation.
- Register alloc_limit = ip;
+ Register alloc_limit = temps.Acquire();
Register result_end = scratch2;
mov(top_address, Operand(allocation_top));
@@ -1980,8 +1973,8 @@ void MacroAssembler::Allocate(int object_size,
}
// Calculate new top and bail out if new space is exhausted. Use result
- // to calculate the new top. We must preserve the ip register at this
- // point, so we cannot just use add().
+ // to calculate the new top. We have already acquired the scratch register at
+ // this point, so we cannot just use add().
DCHECK(object_size > 0);
Register source = result;
int shift = 0;
@@ -1993,7 +1986,7 @@ void MacroAssembler::Allocate(int object_size,
object_size -= bits;
shift += 8;
Operand bits_operand(bits);
- DCHECK(bits_operand.instructions_required(this) == 1);
+ DCHECK(bits_operand.InstructionsRequired(this) == 1);
add(result_end, source, bits_operand);
source = result_end;
}
@@ -2002,10 +1995,7 @@ void MacroAssembler::Allocate(int object_size,
cmp(result_end, Operand(alloc_limit));
b(hi, gc_required);
- if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
- // The top pointer is not updated for allocation folding dominators.
- str(result_end, MemOperand(top_address));
- }
+ str(result_end, MemOperand(top_address));
// Tag object.
add(result, result, Operand(kHeapObjectTag));
@@ -2015,7 +2005,6 @@ void MacroAssembler::Allocate(int object_size,
void MacroAssembler::Allocate(Register object_size, Register result,
Register result_end, Register scratch,
Label* gc_required, AllocationFlags flags) {
- DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -2029,8 +2018,7 @@ void MacroAssembler::Allocate(Register object_size, Register result,
// |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
// is not specified. Other registers must not overlap.
- DCHECK(!AreAliased(object_size, result, scratch, ip));
- DCHECK(!AreAliased(result_end, result, scratch, ip));
+ DCHECK(!AreAliased(object_size, result, scratch, result_end));
DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
// Check relative positions of allocation top and limit addresses.
@@ -2044,13 +2032,12 @@ void MacroAssembler::Allocate(Register object_size, Register result,
intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
DCHECK((limit - top) == kPointerSize);
- DCHECK(result.code() < ip.code());
+
+ UseScratchRegisterScope temps(this);
// Set up allocation top address and allocation limit registers.
Register top_address = scratch;
- // This code stores a temporary value in ip. This is OK, as the code below
- // does not need ip for implicit literal generation.
- Register alloc_limit = ip;
+ Register alloc_limit = temps.Acquire();
mov(top_address, Operand(allocation_top));
if ((flags & RESULT_CONTAINS_TOP) == 0) {
@@ -2100,118 +2087,9 @@ void MacroAssembler::Allocate(Register object_size, Register result,
tst(result_end, Operand(kObjectAlignmentMask));
Check(eq, kUnalignedAllocationInNewSpace);
}
- if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
- // The top pointer is not updated for allocation folding dominators.
- str(result_end, MemOperand(top_address));
- }
-
- // Tag object.
- add(result, result, Operand(kHeapObjectTag));
-}
-
-void MacroAssembler::FastAllocate(Register object_size, Register result,
- Register result_end, Register scratch,
- AllocationFlags flags) {
- // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
- // is not specified. Other registers must not overlap.
- DCHECK(!AreAliased(object_size, result, scratch, ip));
- DCHECK(!AreAliased(result_end, result, scratch, ip));
- DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
-
- ExternalReference allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), flags);
-
- Register top_address = scratch;
- mov(top_address, Operand(allocation_top));
- ldr(result, MemOperand(top_address));
-
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
- // Align the next allocation. Storing the filler map without checking top is
- // safe in new-space because the limit of the heap is aligned there.
- DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
- and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
- Label aligned;
- b(eq, &aligned);
- mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
- str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
- bind(&aligned);
- }
-
- // Calculate new top using result. Object size may be in words so a shift is
- // required to get the number of bytes.
- if ((flags & SIZE_IN_WORDS) != 0) {
- add(result_end, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
- } else {
- add(result_end, result, Operand(object_size), SetCC);
- }
-
- // Update allocation top. result temporarily holds the new top.
- if (emit_debug_code()) {
- tst(result_end, Operand(kObjectAlignmentMask));
- Check(eq, kUnalignedAllocationInNewSpace);
- }
- // The top pointer is not updated for allocation folding dominators.
- str(result_end, MemOperand(top_address));
-
- add(result, result, Operand(kHeapObjectTag));
-}
-
-void MacroAssembler::FastAllocate(int object_size, Register result,
- Register scratch1, Register scratch2,
- AllocationFlags flags) {
- DCHECK(object_size <= kMaxRegularHeapObjectSize);
- DCHECK(!AreAliased(result, scratch1, scratch2, ip));
-
- // Make object size into bytes.
- if ((flags & SIZE_IN_WORDS) != 0) {
- object_size *= kPointerSize;
- }
- DCHECK_EQ(0, object_size & kObjectAlignmentMask);
-
- ExternalReference allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), flags);
-
- // Set up allocation top address register.
- Register top_address = scratch1;
- Register result_end = scratch2;
- mov(top_address, Operand(allocation_top));
- ldr(result, MemOperand(top_address));
-
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
- // Align the next allocation. Storing the filler map without checking top is
- // safe in new-space because the limit of the heap is aligned there.
- STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
- and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
- Label aligned;
- b(eq, &aligned);
- mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
- str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
- bind(&aligned);
- }
-
- // Calculate new top using result. Object size may be in words so a shift is
- // required to get the number of bytes. We must preserve the ip register at
- // this point, so we cannot just use add().
- DCHECK(object_size > 0);
- Register source = result;
- int shift = 0;
- while (object_size != 0) {
- if (((object_size >> shift) & 0x03) == 0) {
- shift += 2;
- } else {
- int bits = object_size & (0xff << shift);
- object_size -= bits;
- shift += 8;
- Operand bits_operand(bits);
- DCHECK(bits_operand.instructions_required(this) == 1);
- add(result_end, source, bits_operand);
- source = result_end;
- }
- }
-
- // The top pointer is not updated for allocation folding dominators.
str(result_end, MemOperand(top_address));
+ // Tag object.
add(result, result, Operand(kHeapObjectTag));
}
@@ -2219,7 +2097,8 @@ void MacroAssembler::CompareObjectType(Register object,
Register map,
Register type_reg,
InstanceType type) {
- const Register temp = type_reg.is(no_reg) ? ip : type_reg;
+ UseScratchRegisterScope temps(this);
+ const Register temp = type_reg.is(no_reg) ? temps.Acquire() : type_reg;
ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
CompareInstanceType(map, temp, type);
@@ -2229,11 +2108,6 @@ void MacroAssembler::CompareObjectType(Register object,
void MacroAssembler::CompareInstanceType(Register map,
Register type_reg,
InstanceType type) {
- // Registers map and type_reg can be ip. These two lines assert
- // that ip can be used with the two instructions (the constants
- // will never need ip).
- STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
- STATIC_ASSERT(LAST_TYPE < 256);
ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
cmp(type_reg, Operand(type));
}
@@ -2241,9 +2115,11 @@ void MacroAssembler::CompareInstanceType(Register map,
void MacroAssembler::CompareRoot(Register obj,
Heap::RootListIndex index) {
- DCHECK(!obj.is(ip));
- LoadRoot(ip, index);
- cmp(obj, ip);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(!obj.is(scratch));
+ LoadRoot(scratch, index);
+ cmp(obj, scratch);
}
void MacroAssembler::CompareMap(Register obj,
@@ -2277,18 +2153,17 @@ void MacroAssembler::CheckMap(Register obj,
bind(&success);
}
-
-void MacroAssembler::CheckMap(Register obj,
- Register scratch,
- Heap::RootListIndex index,
- Label* fail,
+void MacroAssembler::CheckMap(Register obj, Register scratch,
+ Heap::RootListIndex index, Label* fail,
SmiCheckType smi_check_type) {
+ UseScratchRegisterScope temps(this);
+ Register root_register = temps.Acquire();
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, fail);
}
ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- LoadRoot(ip, index);
- cmp(scratch, ip);
+ LoadRoot(root_register, index);
+ cmp(scratch, root_register);
b(ne, fail);
}
@@ -2319,21 +2194,49 @@ void MacroAssembler::GetMapConstructor(Register result, Register map,
}
void MacroAssembler::CallStub(CodeStub* stub,
- TypeFeedbackId ast_id,
Condition cond) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond,
- CAN_INLINE_TARGET_ADDRESS, false);
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, CAN_INLINE_TARGET_ADDRESS,
+ false);
}
+void TurboAssembler::CallStubDelayed(CodeStub* stub) {
+ DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
+
+ // Block constant pool for the call instruction sequence.
+ BlockConstPoolScope block_const_pool(this);
+ Label start;
+ bind(&start);
+
+#ifdef DEBUG
+ // Check the expected size before generating code to ensure we assume the same
+ // constant pool availability (e.g., whether constant pool is full or not).
+ int expected_size = CallStubSize();
+#endif
+
+ // Call sequence on V7 or later may be :
+ // movw ip, #... @ call address low 16
+ // movt ip, #... @ call address high 16
+ // blx ip
+ // @ return address
+ // Or for pre-V7 or values that may be back-patched
+ // to avoid ICache flushes:
+ // ldr ip, [pc, #...] @ call address
+ // blx ip
+ // @ return address
+
+ mov(ip, Operand::EmbeddedCode(stub));
+ blx(ip, al);
+
+ DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
+}
void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
}
-
-bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
- return has_frame_ || !stub->SometimesSetsUpAFrame();
+bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
+ return has_frame() || !stub->SometimesSetsUpAFrame();
}
void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) {
@@ -2342,8 +2245,10 @@ void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) {
vmov(value.low(), smi);
vcvt_f64_s32(value, 1);
} else {
- SmiUntag(ip, smi);
- vmov(value.low(), ip);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ SmiUntag(scratch, smi);
+ vmov(value.low(), scratch);
vcvt_f64_s32(value, value.low());
}
}
@@ -2415,22 +2320,24 @@ void MacroAssembler::TryInt32Floor(Register result,
bind(&exception);
}
-void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
+void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
DwVfpRegister double_input,
Label* done) {
LowDwVfpRegister double_scratch = kScratchDoubleReg;
vcvt_s32_f64(double_scratch.low(), double_input);
vmov(result, double_scratch.low());
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+
// If result is not saturated (0x7fffffff or 0x80000000), we are done.
- sub(ip, result, Operand(1));
- cmp(ip, Operand(0x7ffffffe));
+ sub(scratch, result, Operand(1));
+ cmp(scratch, Operand(0x7ffffffe));
b(lt, done);
}
-
-void MacroAssembler::TruncateDoubleToI(Register result,
- DwVfpRegister double_input) {
+void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
+ DwVfpRegister double_input) {
Label done;
TryInlineTruncateDoubleToI(result, double_input, &done);
@@ -2440,8 +2347,7 @@ void MacroAssembler::TruncateDoubleToI(Register result,
sub(sp, sp, Operand(kDoubleSize)); // Put input on stack.
vstr(double_input, MemOperand(sp, 0));
- DoubleToIStub stub(isolate(), sp, result, 0, true, true);
- CallStub(&stub);
+ CallStubDelayed(new (zone) DoubleToIStub(nullptr, sp, result, 0, true, true));
add(sp, sp, Operand(kDoubleSize));
pop(lr);
@@ -2449,48 +2355,6 @@ void MacroAssembler::TruncateDoubleToI(Register result,
bind(&done);
}
-
-void MacroAssembler::TruncateHeapNumberToI(Register result,
- Register object) {
- Label done;
- LowDwVfpRegister double_scratch = kScratchDoubleReg;
- DCHECK(!result.is(object));
-
- vldr(double_scratch,
- MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
- TryInlineTruncateDoubleToI(result, double_scratch, &done);
-
- // If we fell through then inline version didn't succeed - call stub instead.
- push(lr);
- DoubleToIStub stub(isolate(),
- object,
- result,
- HeapNumber::kValueOffset - kHeapObjectTag,
- true,
- true);
- CallStub(&stub);
- pop(lr);
-
- bind(&done);
-}
-
-
-void MacroAssembler::TruncateNumberToI(Register object,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Label* not_number) {
- Label done;
- DCHECK(!result.is(object));
-
- UntagAndJumpIfSmi(result, object, &done);
- JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
- TruncateHeapNumberToI(result, object);
-
- bind(&done);
-}
-
-
void MacroAssembler::GetLeastBitsFromSmi(Register dst,
Register src,
int num_least_bits) {
@@ -2510,6 +2374,17 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst,
and_(dst, src, Operand((1 << num_least_bits) - 1));
}
+void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
+ SaveFPRegsMode save_doubles) {
+ const Runtime::Function* f = Runtime::FunctionForId(fid);
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ mov(r0, Operand(f->nargs));
+ mov(r1, Operand(ExternalReference(f, isolate())));
+ CallStubDelayed(new (zone) CEntryStub(nullptr, 1, save_doubles));
+}
void MacroAssembler::CallRuntime(const Runtime::Function* f,
int num_arguments,
@@ -2567,16 +2442,6 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
-void MacroAssembler::SetCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
- if (FLAG_native_code_counters && counter->Enabled()) {
- mov(scratch1, Operand(value));
- mov(scratch2, Operand(ExternalReference(counter)));
- str(scratch1, MemOperand(scratch2));
- }
-}
-
-
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK(value > 0);
@@ -2600,15 +2465,12 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
}
}
-
-void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
+void TurboAssembler::Assert(Condition cond, BailoutReason reason) {
if (emit_debug_code())
Check(cond, reason);
}
-
-
-void MacroAssembler::Check(Condition cond, BailoutReason reason) {
+void TurboAssembler::Check(Condition cond, BailoutReason reason) {
Label L;
b(cond, &L);
Abort(reason);
@@ -2616,8 +2478,7 @@ void MacroAssembler::Check(Condition cond, BailoutReason reason) {
bind(&L);
}
-
-void MacroAssembler::Abort(BailoutReason reason) {
+void TurboAssembler::Abort(BailoutReason reason) {
Label abort_start;
bind(&abort_start);
#ifdef DEBUG
@@ -2633,13 +2494,10 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- // Check if Abort() has already been initialized.
- DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
-
Move(r1, Smi::FromInt(static_cast<int>(reason)));
// Disable stub call restrictions to always allow calls to abort.
- if (!has_frame_) {
+ if (!has_frame()) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
@@ -2698,7 +2556,7 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
}
}
-void MacroAssembler::InitializeRootRegister() {
+void TurboAssembler::InitializeRootRegister() {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
mov(kRootRegister, Operand(roots_array_start));
@@ -2759,7 +2617,7 @@ void MacroAssembler::NonNegativeSmiTst(Register value) {
tst(value, Operand(kSmiTagMask | kSmiSignMask));
}
-void MacroAssembler::JumpIfSmi(Register value, Label* smi_label) {
+void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) {
tst(value, Operand(kSmiTagMask));
b(eq, smi_label);
}
@@ -2795,6 +2653,17 @@ void MacroAssembler::AssertSmi(Register object) {
}
}
+void MacroAssembler::AssertFixedArray(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ tst(object, Operand(kSmiTagMask));
+ Check(ne, kOperandIsASmiAndNotAFixedArray);
+ push(object);
+ CompareObjectType(object, object, object, FIXED_ARRAY_TYPE);
+ pop(object);
+ Check(eq, kOperandIsNotAFixedArray);
+ }
+}
void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
@@ -2821,8 +2690,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
}
-void MacroAssembler::AssertGeneratorObject(Register object, Register flags) {
- // `flags` should be an untagged integer. See `SuspendFlags` in src/globals.h
+void MacroAssembler::AssertGeneratorObject(Register object) {
if (!emit_debug_code()) return;
tst(object, Operand(kSmiTagMask));
Check(ne, kOperandIsASmiAndNotAGeneratorObject);
@@ -2832,17 +2700,14 @@ void MacroAssembler::AssertGeneratorObject(Register object, Register flags) {
push(object);
ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
- Label async, do_check;
- tst(flags, Operand(static_cast<int>(SuspendFlags::kGeneratorTypeMask)));
- b(ne, &async);
-
// Check if JSGeneratorObject
- CompareInstanceType(map, object, JS_GENERATOR_OBJECT_TYPE);
- jmp(&do_check);
+ Label do_check;
+ Register instance_type = object;
+ CompareInstanceType(map, instance_type, JS_GENERATOR_OBJECT_TYPE);
+ b(eq, &do_check);
- bind(&async);
- // Check if JSAsyncGeneratorObject
- CompareInstanceType(map, object, JS_ASYNC_GENERATOR_OBJECT_TYPE);
+ // Check if JSAsyncGeneratorObject (See MacroAssembler::CompareInstanceType)
+ cmp(instance_type, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
bind(&do_check);
// Restore generator object to register and perform assertion
@@ -2975,25 +2840,12 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
- str(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
+ str(scratch1, FieldMemOperand(result, JSObject::kPropertiesOrHashOffset));
str(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
str(value, FieldMemOperand(result, JSValue::kValueOffset));
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}
-void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
- Register end_address,
- Register filler) {
- Label loop, entry;
- b(&entry);
- bind(&loop);
- str(filler, MemOperand(current_address, kPointerSize, PostIndex));
- bind(&entry);
- cmp(current_address, end_address);
- b(lo, &loop);
-}
-
-
void MacroAssembler::CheckFor32DRegs(Register scratch) {
mov(scratch, Operand(ExternalReference::cpu_features()));
ldr(scratch, MemOperand(scratch));
@@ -3019,7 +2871,7 @@ void MacroAssembler::RestoreFPRegs(Register location, Register scratch) {
}
template <typename T>
-void MacroAssembler::FloatMaxHelper(T result, T left, T right,
+void TurboAssembler::FloatMaxHelper(T result, T left, T right,
Label* out_of_line) {
// This trivial case is caught sooner, so that the out-of-line code can be
// completely avoided.
@@ -3050,7 +2902,7 @@ void MacroAssembler::FloatMaxHelper(T result, T left, T right,
}
template <typename T>
-void MacroAssembler::FloatMaxOutOfLineHelper(T result, T left, T right) {
+void TurboAssembler::FloatMaxOutOfLineHelper(T result, T left, T right) {
DCHECK(!left.is(right));
// ARMv8: At least one of left and right is a NaN.
@@ -3063,7 +2915,7 @@ void MacroAssembler::FloatMaxOutOfLineHelper(T result, T left, T right) {
}
template <typename T>
-void MacroAssembler::FloatMinHelper(T result, T left, T right,
+void TurboAssembler::FloatMinHelper(T result, T left, T right,
Label* out_of_line) {
// This trivial case is caught sooner, so that the out-of-line code can be
// completely avoided.
@@ -3109,7 +2961,7 @@ void MacroAssembler::FloatMinHelper(T result, T left, T right,
}
template <typename T>
-void MacroAssembler::FloatMinOutOfLineHelper(T result, T left, T right) {
+void TurboAssembler::FloatMinOutOfLineHelper(T result, T left, T right) {
DCHECK(!left.is(right));
// At least one of left and right is a NaN. Use vadd to propagate the NaN
@@ -3117,42 +2969,42 @@ void MacroAssembler::FloatMinOutOfLineHelper(T result, T left, T right) {
vadd(result, left, right);
}
-void MacroAssembler::FloatMax(SwVfpRegister result, SwVfpRegister left,
+void TurboAssembler::FloatMax(SwVfpRegister result, SwVfpRegister left,
SwVfpRegister right, Label* out_of_line) {
FloatMaxHelper(result, left, right, out_of_line);
}
-void MacroAssembler::FloatMin(SwVfpRegister result, SwVfpRegister left,
+void TurboAssembler::FloatMin(SwVfpRegister result, SwVfpRegister left,
SwVfpRegister right, Label* out_of_line) {
FloatMinHelper(result, left, right, out_of_line);
}
-void MacroAssembler::FloatMax(DwVfpRegister result, DwVfpRegister left,
+void TurboAssembler::FloatMax(DwVfpRegister result, DwVfpRegister left,
DwVfpRegister right, Label* out_of_line) {
FloatMaxHelper(result, left, right, out_of_line);
}
-void MacroAssembler::FloatMin(DwVfpRegister result, DwVfpRegister left,
+void TurboAssembler::FloatMin(DwVfpRegister result, DwVfpRegister left,
DwVfpRegister right, Label* out_of_line) {
FloatMinHelper(result, left, right, out_of_line);
}
-void MacroAssembler::FloatMaxOutOfLine(SwVfpRegister result, SwVfpRegister left,
+void TurboAssembler::FloatMaxOutOfLine(SwVfpRegister result, SwVfpRegister left,
SwVfpRegister right) {
FloatMaxOutOfLineHelper(result, left, right);
}
-void MacroAssembler::FloatMinOutOfLine(SwVfpRegister result, SwVfpRegister left,
+void TurboAssembler::FloatMinOutOfLine(SwVfpRegister result, SwVfpRegister left,
SwVfpRegister right) {
FloatMinOutOfLineHelper(result, left, right);
}
-void MacroAssembler::FloatMaxOutOfLine(DwVfpRegister result, DwVfpRegister left,
+void TurboAssembler::FloatMaxOutOfLine(DwVfpRegister result, DwVfpRegister left,
DwVfpRegister right) {
FloatMaxOutOfLineHelper(result, left, right);
}
-void MacroAssembler::FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left,
+void TurboAssembler::FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left,
DwVfpRegister right) {
FloatMinOutOfLineHelper(result, left, right);
}
@@ -3174,8 +3026,7 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
static const int kRegisterPassedArguments = 4;
-
-int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
+int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments) {
int stack_passed_words = 0;
if (use_eabi_hardfloat()) {
@@ -3197,55 +3048,19 @@ int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
return stack_passed_words;
}
-
-void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
- Register index,
- Register value,
- uint32_t encoding_mask) {
- Label is_object;
- SmiTst(string);
- Check(ne, kNonObject);
-
- ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
- ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
-
- and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
- cmp(ip, Operand(encoding_mask));
- Check(eq, kUnexpectedStringType);
-
- // The index is assumed to be untagged coming in, tag it to compare with the
- // string length without using a temp register, it is restored at the end of
- // this function.
- Label index_tag_ok, index_tag_bad;
- TrySmiTag(index, index, &index_tag_bad);
- b(&index_tag_ok);
- bind(&index_tag_bad);
- Abort(kIndexIsTooLarge);
- bind(&index_tag_ok);
-
- ldr(ip, FieldMemOperand(string, String::kLengthOffset));
- cmp(index, ip);
- Check(lt, kIndexIsTooLarge);
-
- cmp(index, Operand(Smi::kZero));
- Check(ge, kIndexIsNegative);
-
- SmiUntag(index, index);
-}
-
-
-void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
- int num_double_arguments,
- Register scratch) {
+void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
+ int num_double_arguments) {
int frame_alignment = ActivationFrameAlignment();
int stack_passed_arguments = CalculateStackPassedWords(
num_reg_arguments, num_double_arguments);
if (frame_alignment > kPointerSize) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
// Make stack end at alignment and make room for num_arguments - 4 words
// and the original value of sp.
mov(scratch, sp);
sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
- DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
and_(sp, sp, Operand(-frame_alignment));
str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else {
@@ -3253,14 +3068,7 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
}
}
-
-void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
- Register scratch) {
- PrepareCallCFunction(num_reg_arguments, 0, scratch);
-}
-
-
-void MacroAssembler::MovToFloatParameter(DwVfpRegister src) {
+void TurboAssembler::MovToFloatParameter(DwVfpRegister src) {
DCHECK(src.is(d0));
if (!use_eabi_hardfloat()) {
vmov(r0, r1, src);
@@ -3269,12 +3077,11 @@ void MacroAssembler::MovToFloatParameter(DwVfpRegister src) {
// On ARM this is just a synonym to make the purpose clear.
-void MacroAssembler::MovToFloatResult(DwVfpRegister src) {
+void TurboAssembler::MovToFloatResult(DwVfpRegister src) {
MovToFloatParameter(src);
}
-
-void MacroAssembler::MovToFloatParameters(DwVfpRegister src1,
+void TurboAssembler::MovToFloatParameters(DwVfpRegister src1,
DwVfpRegister src2) {
DCHECK(src1.is(d0));
DCHECK(src2.is(d1));
@@ -3284,35 +3091,30 @@ void MacroAssembler::MovToFloatParameters(DwVfpRegister src1,
}
}
-
-void MacroAssembler::CallCFunction(ExternalReference function,
+void TurboAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments) {
- mov(ip, Operand(function));
- CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ mov(scratch, Operand(function));
+ CallCFunctionHelper(scratch, num_reg_arguments, num_double_arguments);
}
-
-void MacroAssembler::CallCFunction(Register function,
- int num_reg_arguments,
+void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
int num_double_arguments) {
CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
}
-
-void MacroAssembler::CallCFunction(ExternalReference function,
+void TurboAssembler::CallCFunction(ExternalReference function,
int num_arguments) {
CallCFunction(function, num_arguments, 0);
}
-
-void MacroAssembler::CallCFunction(Register function,
- int num_arguments) {
+void TurboAssembler::CallCFunction(Register function, int num_arguments) {
CallCFunction(function, num_arguments, 0);
}
-
-void MacroAssembler::CallCFunctionHelper(Register function,
+void TurboAssembler::CallCFunctionHelper(Register function,
int num_reg_arguments,
int num_double_arguments) {
DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
@@ -3325,7 +3127,7 @@ void MacroAssembler::CallCFunctionHelper(Register function,
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
- DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
Label alignment_as_expected;
tst(sp, Operand(frame_alignment_mask));
b(eq, &alignment_as_expected);
@@ -3350,13 +3152,8 @@ void MacroAssembler::CallCFunctionHelper(Register function,
}
}
-
-void MacroAssembler::CheckPageFlag(
- Register object,
- Register scratch,
- int mask,
- Condition cc,
- Label* condition_met) {
+void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
+ Condition cc, Label* condition_met) {
DCHECK(cc == eq || cc == ne);
Bfc(scratch, object, 0, kPageSizeBits);
ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
@@ -3385,19 +3182,22 @@ void MacroAssembler::HasColor(Register object,
GetMarkBits(object, bitmap_scratch, mask_scratch);
Label other_color, word_boundary;
- ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- tst(ip, Operand(mask_scratch));
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ ldr(scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ tst(scratch, Operand(mask_scratch));
b(first_bit == 1 ? eq : ne, &other_color);
// Shift left 1 by adding.
add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC);
b(eq, &word_boundary);
- tst(ip, Operand(mask_scratch));
+ tst(scratch, Operand(mask_scratch));
b(second_bit == 1 ? ne : eq, has_color);
jmp(&other_color);
bind(&word_boundary);
- ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
- tst(ip, Operand(1));
+ ldr(scratch,
+ MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
+ tst(scratch, Operand(1));
b(second_bit == 1 ? ne : eq, has_color);
bind(&other_color);
}
@@ -3410,17 +3210,19 @@ void MacroAssembler::GetMarkBits(Register addr_reg,
and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
- Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits);
- add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2));
- mov(ip, Operand(1));
- mov(mask_reg, Operand(ip, LSL, mask_reg));
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Ubfx(scratch, addr_reg, kLowBits, kPageSizeBits - kLowBits);
+ add(bitmap_reg, bitmap_reg, Operand(scratch, LSL, kPointerSizeLog2));
+ mov(scratch, Operand(1));
+ mov(mask_reg, Operand(scratch, LSL, mask_reg));
}
void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
Register mask_scratch, Register load_scratch,
Label* value_is_white) {
- DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
+ DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch));
GetMarkBits(value, bitmap_scratch, mask_scratch);
// If the value is black or grey we don't need to do anything.
@@ -3442,26 +3244,6 @@ void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
}
-void MacroAssembler::ClampDoubleToUint8(Register result_reg,
- DwVfpRegister input_reg,
- LowDwVfpRegister double_scratch) {
- Label done;
-
- // Handle inputs >= 255 (including +infinity).
- Vmov(double_scratch, 255.0, result_reg);
- mov(result_reg, Operand(255));
- VFPCompareAndSetFlags(input_reg, double_scratch);
- b(ge, &done);
-
- // For inputs < 255 (including negative) vcvt_u32_f64 with round-to-nearest
- // rounding mode will provide the correct result.
- vcvt_u32_f64(double_scratch.low(), input_reg, kFPSCRRounding);
- vmov(result_reg, double_scratch.low());
-
- bind(&done);
-}
-
-
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
@@ -3556,51 +3338,6 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
b(ne, &next);
}
-void MacroAssembler::TestJSArrayForAllocationMemento(
- Register receiver_reg,
- Register scratch_reg,
- Label* no_memento_found) {
- Label map_check;
- Label top_check;
- ExternalReference new_space_allocation_top_adr =
- ExternalReference::new_space_allocation_top_address(isolate());
- const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
- const int kMementoLastWordOffset =
- kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
-
- // Bail out if the object is not in new space.
- JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
- // If the object is in new space, we need to check whether it is on the same
- // page as the current top.
- add(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
- mov(ip, Operand(new_space_allocation_top_adr));
- ldr(ip, MemOperand(ip));
- eor(scratch_reg, scratch_reg, Operand(ip));
- tst(scratch_reg, Operand(~Page::kPageAlignmentMask));
- b(eq, &top_check);
- // The object is on a different page than allocation top. Bail out if the
- // object sits on the page boundary as no memento can follow and we cannot
- // touch the memory following it.
- add(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
- eor(scratch_reg, scratch_reg, Operand(receiver_reg));
- tst(scratch_reg, Operand(~Page::kPageAlignmentMask));
- b(ne, no_memento_found);
- // Continue with the actual map check.
- jmp(&map_check);
- // If top is on the same page as the current object, we need to check whether
- // we are below top.
- bind(&top_check);
- add(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
- mov(ip, Operand(new_space_allocation_top_adr));
- ldr(ip, MemOperand(ip));
- cmp(scratch_reg, ip);
- b(ge, no_memento_found);
- // Memento map check.
- bind(&map_check);
- ldr(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
- cmp(scratch_reg, Operand(isolate()->factory()->allocation_memento_map()));
-}
-
Register GetRegisterThatIsNotOneOf(Register reg1,
Register reg2,
Register reg3,
@@ -3623,7 +3360,6 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
return candidate;
}
UNREACHABLE();
- return no_reg;
}
#ifdef DEBUG
@@ -3699,29 +3435,6 @@ void CodePatcher::EmitCondition(Condition cond) {
masm_.emit(instr);
}
-
-void MacroAssembler::TruncatingDiv(Register result,
- Register dividend,
- int32_t divisor) {
- DCHECK(!dividend.is(result));
- DCHECK(!dividend.is(ip));
- DCHECK(!result.is(ip));
- base::MagicNumbersForDivision<uint32_t> mag =
- base::SignedDivisionByConstant(bit_cast<uint32_t>(divisor));
- mov(ip, Operand(mag.multiplier));
- bool neg = (mag.multiplier & (1U << 31)) != 0;
- if (divisor > 0 && neg) {
- smmla(result, dividend, ip, dividend);
- } else {
- smmul(result, dividend, ip);
- if (divisor < 0 && !neg && mag.multiplier > 0) {
- sub(result, result, Operand(dividend));
- }
- }
- if (mag.shift > 0) mov(result, Operand(result, ASR, mag.shift));
- add(result, result, Operand(dividend, LSR, 31));
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 506364686f..7d4d7344a4 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -86,255 +86,48 @@ enum TargetAddressStorageMode {
NEVER_INLINE_TARGET_ADDRESS
};
-// MacroAssembler implements a collection of frequently used macros.
-class MacroAssembler: public Assembler {
+class TurboAssembler : public Assembler {
public:
- MacroAssembler(Isolate* isolate, void* buffer, int size,
- CodeObjectRequired create_code_object);
-
- int jit_cookie() const { return jit_cookie_; }
-
- Isolate* isolate() const { return isolate_; }
-
- // Returns the size of a call in instructions. Note, the value returned is
- // only valid as long as no entries are added to the constant pool between
- // checking the call size and emitting the actual call.
- static int CallSize(Register target, Condition cond = al);
- int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
- int CallStubSize(CodeStub* stub,
- TypeFeedbackId ast_id = TypeFeedbackId::None(),
- Condition cond = al);
-
- // Jump, Call, and Ret pseudo instructions implementing inter-working.
- void Jump(Register target, Condition cond = al);
- void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
- void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
- void Call(Register target, Condition cond = al);
- void Call(Address target, RelocInfo::Mode rmode, Condition cond = al,
- TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS,
- bool check_constant_pool = true);
- void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- TypeFeedbackId ast_id = TypeFeedbackId::None(), Condition cond = al,
- TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS,
- bool check_constant_pool = true);
- int CallSize(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- TypeFeedbackId ast_id = TypeFeedbackId::None(),
- Condition cond = al);
- void Ret(Condition cond = al);
-
- // Used for patching in calls to the deoptimizer.
- void CallDeoptimizer(Address target);
- static int CallDeoptimizerSize();
-
- // Emit code that loads |parameter_index|'th parameter from the stack to
- // the register according to the CallInterfaceDescriptor definition.
- // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
- // below the caller's sp.
- template <class Descriptor>
- void LoadParameterFromStack(
- Register reg, typename Descriptor::ParameterIndices parameter_index,
- int sp_to_ra_offset_in_words = 0) {
- DCHECK(Descriptor::kPassLastArgsOnStack);
- UNIMPLEMENTED();
- }
-
- // Emit code to discard a non-negative number of pointer-sized elements
- // from the stack, clobbering only the sp register.
- void Drop(int count, Condition cond = al);
- void Drop(Register count, Condition cond = al);
-
- void Ret(int drop, Condition cond = al);
-
- // Swap two registers. If the scratch register is omitted then a slightly
- // less efficient form using xor instead of mov is emitted.
- void Swap(Register reg1,
- Register reg2,
- Register scratch = no_reg,
- Condition cond = al);
-
- void Mls(Register dst, Register src1, Register src2, Register srcA,
- Condition cond = al);
- void And(Register dst, Register src1, const Operand& src2,
- Condition cond = al);
- void Ubfx(Register dst, Register src, int lsb, int width,
- Condition cond = al);
- void Sbfx(Register dst, Register src, int lsb, int width,
- Condition cond = al);
- // The scratch register is not used for ARMv7.
- // scratch can be the same register as src (in which case it is trashed), but
- // not the same as dst.
- void Bfi(Register dst,
- Register src,
- Register scratch,
- int lsb,
- int width,
- Condition cond = al);
- void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al);
-
- void Call(Label* target);
- void Push(Register src) { push(src); }
- void Pop(Register dst) { pop(dst); }
-
- // Register move. May do nothing if the registers are identical.
- void Move(Register dst, Smi* smi);
- void Move(Register dst, Handle<Object> value);
- void Move(Register dst, Register src, Condition cond = al);
- void Move(Register dst, const Operand& src, SBit sbit = LeaveCC,
- Condition cond = al) {
- if (!src.is_reg() || !src.rm().is(dst) || sbit != LeaveCC) {
- mov(dst, src, sbit, cond);
+ TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
+ CodeObjectRequired create_code_object)
+ : Assembler(isolate, buffer, buffer_size), isolate_(isolate) {
+ if (create_code_object == CodeObjectRequired::kYes) {
+ code_object_ =
+ Handle<HeapObject>::New(isolate->heap()->undefined_value(), isolate);
}
}
- void Move(SwVfpRegister dst, SwVfpRegister src, Condition cond = al);
- void Move(DwVfpRegister dst, DwVfpRegister src, Condition cond = al);
- void Move(QwNeonRegister dst, QwNeonRegister src);
- // Register swap.
- void Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1);
- void Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1);
-
- void Load(Register dst, const MemOperand& src, Representation r);
- void Store(Register src, const MemOperand& dst, Representation r);
-
- // Load an object from the root table.
- void LoadRoot(Register destination,
- Heap::RootListIndex index,
- Condition cond = al);
- // Store an object to the root table.
- void StoreRoot(Register source,
- Heap::RootListIndex index,
- Condition cond = al);
-
- // ---------------------------------------------------------------------------
- // GC Support
-
- void IncrementalMarkingRecordWriteHelper(Register object,
- Register value,
- Register address);
- enum RememberedSetFinalAction {
- kReturnAtEnd,
- kFallThroughAtEnd
- };
-
- // Record in the remembered set the fact that we have a pointer to new space
- // at the address pointed to by the addr register. Only works if addr is not
- // in new space.
- void RememberedSetHelper(Register object, // Used for debug code.
- Register addr,
- Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetFinalAction and_then);
-
- void CheckPageFlag(Register object,
- Register scratch,
- int mask,
- Condition cc,
- Label* condition_met);
+ void set_has_frame(bool value) { has_frame_ = value; }
+ bool has_frame() const { return has_frame_; }
- // Check if object is in new space. Jumps if the object is not in new space.
- // The register scratch can be object itself, but scratch will be clobbered.
- void JumpIfNotInNewSpace(Register object,
- Register scratch,
- Label* branch) {
- InNewSpace(object, scratch, eq, branch);
- }
+ Isolate* isolate() const { return isolate_; }
- // Check if object is in new space. Jumps if the object is in new space.
- // The register scratch can be object itself, but it will be clobbered.
- void JumpIfInNewSpace(Register object,
- Register scratch,
- Label* branch) {
- InNewSpace(object, scratch, ne, branch);
+ Handle<HeapObject> CodeObject() {
+ DCHECK(!code_object_.is_null());
+ return code_object_;
}
- // Check if an object has a given incremental marking color.
- void HasColor(Register object,
- Register scratch0,
- Register scratch1,
- Label* has_color,
- int first_bit,
- int second_bit);
-
- void JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* on_black);
-
- // Checks the color of an object. If the object is white we jump to the
- // incremental marker.
- void JumpIfWhite(Register value, Register scratch1, Register scratch2,
- Register scratch3, Label* value_is_white);
+ // Activation support.
+ void EnterFrame(StackFrame::Type type,
+ bool load_constant_pool_pointer_reg = false);
+ // Returns the pc offset at which the frame ends.
+ int LeaveFrame(StackFrame::Type type);
- // Notify the garbage collector that we wrote a pointer into an object.
- // |object| is the object being stored into, |value| is the object being
- // stored. value and scratch registers are clobbered by the operation.
- // The offset is the offset from the start of the object, not the offset from
- // the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
- void RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register scratch,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK,
- PointersToHereCheck pointers_to_here_check_for_value =
- kPointersToHereMaybeInteresting);
+ // Push a fixed frame, consisting of lr, fp
+ void PushCommonFrame(Register marker_reg = no_reg);
- // As above, but the offset has the tag presubtracted. For use with
- // MemOperand(reg, off).
- inline void RecordWriteContextSlot(
- Register context,
- int offset,
- Register value,
- Register scratch,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK,
- PointersToHereCheck pointers_to_here_check_for_value =
- kPointersToHereMaybeInteresting) {
- RecordWriteField(context,
- offset + kHeapObjectTag,
- value,
- scratch,
- lr_status,
- save_fp,
- remembered_set_action,
- smi_check,
- pointers_to_here_check_for_value);
- }
+ // Generates function and stub prologue code.
+ void StubPrologue(StackFrame::Type type);
+ void Prologue(bool code_pre_aging);
- // Notify the garbage collector that we wrote a code entry into a
- // JSFunction. Only scratch is clobbered by the operation.
- void RecordWriteCodeEntryField(Register js_function, Register code_entry,
- Register scratch);
+ // Push a standard frame, consisting of lr, fp, context and JS function
+ void PushStandardFrame(Register function_reg);
- void RecordWriteForMap(
- Register object,
- Register map,
- Register dst,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode save_fp);
+ void InitializeRootRegister();
- // For a given |object| notify the garbage collector that the slot |address|
- // has been written. |value| is the object being stored. The value and
- // address registers are clobbered by the operation.
- void RecordWrite(
- Register object,
- Register address,
- Register value,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK,
- PointersToHereCheck pointers_to_here_check_for_value =
- kPointersToHereMaybeInteresting);
+ void Push(Register src) { push(src); }
- // Push a handle.
- void Push(Handle<Object> handle);
+ void Push(Handle<HeapObject> handle);
void Push(Smi* smi);
// Push two registers. Pushes leftmost register first (to highest address).
@@ -363,17 +156,12 @@ class MacroAssembler: public Assembler {
}
// Push four registers. Pushes leftmost register first (to highest address).
- void Push(Register src1,
- Register src2,
- Register src3,
- Register src4,
+ void Push(Register src1, Register src2, Register src3, Register src4,
Condition cond = al) {
if (src1.code() > src2.code()) {
if (src2.code() > src3.code()) {
if (src3.code() > src4.code()) {
- stm(db_w,
- sp,
- src1.bit() | src2.bit() | src3.bit() | src4.bit(),
+ stm(db_w, sp, src1.bit() | src2.bit() | src3.bit() | src4.bit(),
cond);
} else {
stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
@@ -418,6 +206,8 @@ class MacroAssembler: public Assembler {
}
}
+ void Pop(Register dst) { pop(dst); }
+
// Pop two registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Condition cond = al) {
DCHECK(!src1.is(src2));
@@ -446,18 +236,13 @@ class MacroAssembler: public Assembler {
}
// Pop four registers. Pops rightmost register first (from lower address).
- void Pop(Register src1,
- Register src2,
- Register src3,
- Register src4,
+ void Pop(Register src1, Register src2, Register src3, Register src4,
Condition cond = al) {
DCHECK(!AreAliased(src1, src2, src3, src4));
if (src1.code() > src2.code()) {
if (src2.code() > src3.code()) {
if (src3.code() > src4.code()) {
- ldm(ia_w,
- sp,
- src1.bit() | src2.bit() | src3.bit() | src4.bit(),
+ ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit() | src4.bit(),
cond);
} else {
ldr(src4, MemOperand(sp, 4, PostIndex), cond);
@@ -473,45 +258,108 @@ class MacroAssembler: public Assembler {
}
}
- // Push a fixed frame, consisting of lr, fp
- void PushCommonFrame(Register marker_reg = no_reg);
+ // Before calling a C-function from generated code, align arguments on stack.
+ // After aligning the frame, non-register arguments must be stored in
+ // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
+ // are word sized. If double arguments are used, this function assumes that
+ // all double arguments are stored before core registers; otherwise the
+ // correct alignment of the double values is not guaranteed.
+ // Some compilers/platforms require the stack to be aligned when calling
+ // C++ code.
+ // Needs a scratch register to do some arithmetic. This register will be
+ // trashed.
+ void PrepareCallCFunction(int num_reg_arguments,
+ int num_double_registers = 0);
- // Push a standard frame, consisting of lr, fp, context and JS function
- void PushStandardFrame(Register function_reg);
+ // Removes current frame and its arguments from the stack preserving
+ // the arguments and a return address pushed to the stack for the next call.
+ // Both |callee_args_count| and |caller_args_count_reg| do not include
+ // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
+ // is trashed.
+ void PrepareForTailCall(const ParameterCount& callee_args_count,
+ Register caller_args_count_reg, Register scratch0,
+ Register scratch1);
- void PopCommonFrame(Register marker_reg = no_reg);
+ // There are two ways of passing double arguments on ARM, depending on
+ // whether soft or hard floating point ABI is used. These functions
+ // abstract parameter passing for the three different ways we call
+ // C functions from generated code.
+ void MovToFloatParameter(DwVfpRegister src);
+ void MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2);
+ void MovToFloatResult(DwVfpRegister src);
- // Push and pop the registers that can hold pointers, as defined by the
- // RegList constant kSafepointSavedRegisters.
- void PushSafepointRegisters();
- void PopSafepointRegisters();
- // Store value in register src in the safepoint stack slot for
- // register dst.
- void StoreToSafepointRegisterSlot(Register src, Register dst);
- // Load the value of the src register from its safepoint stack slot
- // into register dst.
- void LoadFromSafepointRegisterSlot(Register dst, Register src);
+ // Calls a C function and cleans up the space for arguments allocated
+ // by PrepareCallCFunction. The called function is not allowed to trigger a
+ // garbage collection, since that might move the code and invalidate the
+ // return address (unless this is somehow accounted for by the called
+ // function).
+ void CallCFunction(ExternalReference function, int num_arguments);
+ void CallCFunction(Register function, int num_arguments);
+ void CallCFunction(ExternalReference function, int num_reg_arguments,
+ int num_double_arguments);
+ void CallCFunction(Register function, int num_reg_arguments,
+ int num_double_arguments);
- // Load two consecutive registers with two consecutive memory locations.
- void Ldrd(Register dst1,
- Register dst2,
- const MemOperand& src,
- Condition cond = al);
+ void MovFromFloatParameter(DwVfpRegister dst);
+ void MovFromFloatResult(DwVfpRegister dst);
- // Store two consecutive registers to two consecutive memory locations.
- void Strd(Register src1,
- Register src2,
- const MemOperand& dst,
- Condition cond = al);
+ // Calls Abort(msg) if the condition cond is not satisfied.
+ // Use --debug_code to enable.
+ void Assert(Condition cond, BailoutReason reason);
- // If the value is a NaN, canonicalize the value else, do nothing.
- void VFPCanonicalizeNaN(const DwVfpRegister dst,
- const DwVfpRegister src,
- const Condition cond = al);
- void VFPCanonicalizeNaN(const DwVfpRegister value,
- const Condition cond = al) {
- VFPCanonicalizeNaN(value, value, cond);
- }
+ // Like Assert(), but always enabled.
+ void Check(Condition cond, BailoutReason reason);
+
+ // Print a message to stdout and abort execution.
+ void Abort(BailoutReason msg);
+
+ inline bool AllowThisStubCall(CodeStub* stub);
+
+ void LslPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, Register scratch, Register shift);
+ void LslPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, uint32_t shift);
+ void LsrPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, Register scratch, Register shift);
+ void LsrPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, uint32_t shift);
+ void AsrPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, Register scratch, Register shift);
+ void AsrPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, uint32_t shift);
+
+ // Returns the size of a call in instructions. Note, the value returned is
+ // only valid as long as no entries are added to the constant pool between
+ // checking the call size and emitting the actual call.
+ static int CallSize(Register target, Condition cond = al);
+ int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
+ int CallSize(Handle<Code> code,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ Condition cond = al);
+ int CallStubSize();
+
+ void CallStubDelayed(CodeStub* stub);
+ void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+
+ // Jump, Call, and Ret pseudo instructions implementing inter-working.
+ void Call(Register target, Condition cond = al);
+ void Call(Address target, RelocInfo::Mode rmode, Condition cond = al,
+ TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS,
+ bool check_constant_pool = true);
+ void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ Condition cond = al,
+ TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS,
+ bool check_constant_pool = true);
+ void Call(Label* target);
+
+ // Emit code to discard a non-negative number of pointer-sized elements
+ // from the stack, clobbering only the sp register.
+ void Drop(int count, Condition cond = al);
+ void Drop(Register count, Condition cond = al);
+
+ void Ret(Condition cond = al);
+ void Ret(int drop, Condition cond = al);
// Compare single values and move the result to the normal condition flags.
void VFPCompareAndSetFlags(const SwVfpRegister src1, const SwVfpRegister src2,
@@ -520,13 +368,142 @@ class MacroAssembler: public Assembler {
const Condition cond = al);
// Compare double values and move the result to the normal condition flags.
- void VFPCompareAndSetFlags(const DwVfpRegister src1,
- const DwVfpRegister src2,
+ void VFPCompareAndSetFlags(const DwVfpRegister src1, const DwVfpRegister src2,
const Condition cond = al);
- void VFPCompareAndSetFlags(const DwVfpRegister src1,
- const double src2,
+ void VFPCompareAndSetFlags(const DwVfpRegister src1, const double src2,
const Condition cond = al);
+ // If the value is a NaN, canonicalize the value else, do nothing.
+ void VFPCanonicalizeNaN(const DwVfpRegister dst, const DwVfpRegister src,
+ const Condition cond = al);
+ void VFPCanonicalizeNaN(const DwVfpRegister value,
+ const Condition cond = al) {
+ VFPCanonicalizeNaN(value, value, cond);
+ }
+
+ void VmovHigh(Register dst, DwVfpRegister src);
+ void VmovHigh(DwVfpRegister dst, Register src);
+ void VmovLow(Register dst, DwVfpRegister src);
+ void VmovLow(DwVfpRegister dst, Register src);
+
+ void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
+ Label* condition_met);
+
+ void Jump(Register target, Condition cond = al);
+ void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
+ void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
+
+ // Perform a floating-point min or max operation with the
+ // (IEEE-754-compatible) semantics of ARM64's fmin/fmax. Some cases, typically
+ // NaNs or +/-0.0, are expected to be rare and are handled in out-of-line
+ // code. The specific behaviour depends on supported instructions.
+ //
+ // These functions assume (and assert) that !left.is(right). It is permitted
+ // for the result to alias either input register.
+ void FloatMax(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right,
+ Label* out_of_line);
+ void FloatMin(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right,
+ Label* out_of_line);
+ void FloatMax(DwVfpRegister result, DwVfpRegister left, DwVfpRegister right,
+ Label* out_of_line);
+ void FloatMin(DwVfpRegister result, DwVfpRegister left, DwVfpRegister right,
+ Label* out_of_line);
+
+ // Generate out-of-line cases for the macros above.
+ void FloatMaxOutOfLine(SwVfpRegister result, SwVfpRegister left,
+ SwVfpRegister right);
+ void FloatMinOutOfLine(SwVfpRegister result, SwVfpRegister left,
+ SwVfpRegister right);
+ void FloatMaxOutOfLine(DwVfpRegister result, DwVfpRegister left,
+ DwVfpRegister right);
+ void FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left,
+ DwVfpRegister right);
+
+ void ExtractLane(Register dst, QwNeonRegister src, NeonDataType dt, int lane);
+ void ExtractLane(Register dst, DwVfpRegister src, NeonDataType dt, int lane);
+ void ExtractLane(SwVfpRegister dst, QwNeonRegister src, int lane);
+ void ReplaceLane(QwNeonRegister dst, QwNeonRegister src, Register src_lane,
+ NeonDataType dt, int lane);
+ void ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
+ SwVfpRegister src_lane, int lane);
+
+ // Register move. May do nothing if the registers are identical.
+ void Move(Register dst, Smi* smi);
+ void Move(Register dst, Handle<HeapObject> value);
+ void Move(Register dst, Register src, Condition cond = al);
+ void Move(Register dst, const Operand& src, SBit sbit = LeaveCC,
+ Condition cond = al) {
+ if (!src.IsRegister() || !src.rm().is(dst) || sbit != LeaveCC) {
+ mov(dst, src, sbit, cond);
+ }
+ }
+ void Move(SwVfpRegister dst, SwVfpRegister src, Condition cond = al);
+ void Move(DwVfpRegister dst, DwVfpRegister src, Condition cond = al);
+ void Move(QwNeonRegister dst, QwNeonRegister src);
+
+ // Simulate s-register moves for imaginary s32 - s63 registers.
+ void VmovExtended(Register dst, int src_code);
+ void VmovExtended(int dst_code, Register src);
+ // Move between s-registers and imaginary s-registers.
+ void VmovExtended(int dst_code, int src_code);
+ void VmovExtended(int dst_code, const MemOperand& src);
+ void VmovExtended(const MemOperand& dst, int src_code);
+
+ // Register swap.
+ void Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1);
+ void Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1);
+
+ // Get the actual activation frame alignment for target environment.
+ static int ActivationFrameAlignment();
+
+ void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al);
+
+ void SmiUntag(Register reg, SBit s = LeaveCC) {
+ mov(reg, Operand::SmiUntag(reg), s);
+ }
+ void SmiUntag(Register dst, Register src, SBit s = LeaveCC) {
+ mov(dst, Operand::SmiUntag(src), s);
+ }
+
+ // Load an object from the root table.
+ void LoadRoot(Register destination, Heap::RootListIndex index,
+ Condition cond = al);
+
+ // Jump if the register contains a smi.
+ void JumpIfSmi(Register value, Label* smi_label);
+
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
+ // succeeds, otherwise falls through if result is saturated. On return
+ // 'result' either holds answer, or is clobbered on fall through.
+ //
+ // Only public for the test code in test-code-stubs-arm.cc.
+ void TryInlineTruncateDoubleToI(Register result, DwVfpRegister input,
+ Label* done);
+
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
+ // Exits with 'result' holding the answer.
+ void TruncateDoubleToIDelayed(Zone* zone, Register result,
+ DwVfpRegister double_input);
+
+ // EABI variant for double arguments in use.
+ bool use_eabi_hardfloat() {
+#ifdef __arm__
+ return base::OS::ArmUsingHardFloat();
+#elif USE_EABI_HARDFLOAT
+ return true;
+#else
+ return false;
+#endif
+ }
+
+ private:
+ bool has_frame_ = false;
+ Isolate* const isolate_;
+ // This handle will be patched with the code object on installation.
+ Handle<HeapObject> code_object_;
+
// Compare single values and then load the fpscr flags to a register.
void VFPCompareAndLoadFlags(const SwVfpRegister src1,
const SwVfpRegister src2,
@@ -541,48 +518,194 @@ class MacroAssembler: public Assembler {
const DwVfpRegister src2,
const Register fpscr_flags,
const Condition cond = al);
- void VFPCompareAndLoadFlags(const DwVfpRegister src1,
- const double src2,
+ void VFPCompareAndLoadFlags(const DwVfpRegister src1, const double src2,
const Register fpscr_flags,
const Condition cond = al);
- void Vmov(const DwVfpRegister dst,
- const double imm,
- const Register scratch = no_reg);
+ void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
- void VmovHigh(Register dst, DwVfpRegister src);
- void VmovHigh(DwVfpRegister dst, Register src);
- void VmovLow(Register dst, DwVfpRegister src);
- void VmovLow(DwVfpRegister dst, Register src);
+ // Implementation helpers for FloatMin and FloatMax.
+ template <typename T>
+ void FloatMaxHelper(T result, T left, T right, Label* out_of_line);
+ template <typename T>
+ void FloatMinHelper(T result, T left, T right, Label* out_of_line);
+ template <typename T>
+ void FloatMaxOutOfLineHelper(T result, T left, T right);
+ template <typename T>
+ void FloatMinOutOfLineHelper(T result, T left, T right);
- // Simulate s-register moves for imaginary s32 - s63 registers.
- void VmovExtended(Register dst, int src_code);
- void VmovExtended(int dst_code, Register src);
- // Move between s-registers and imaginary s-registers.
- void VmovExtended(int dst_code, int src_code);
- void VmovExtended(int dst_code, const MemOperand& src);
- void VmovExtended(const MemOperand& dst, int src_code);
+ int CalculateStackPassedWords(int num_reg_arguments,
+ int num_double_arguments);
- void ExtractLane(Register dst, QwNeonRegister src, NeonDataType dt, int lane);
- void ExtractLane(Register dst, DwVfpRegister src, NeonDataType dt, int lane);
- void ExtractLane(SwVfpRegister dst, QwNeonRegister src, int lane);
- void ReplaceLane(QwNeonRegister dst, QwNeonRegister src, Register src_lane,
- NeonDataType dt, int lane);
- void ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
- SwVfpRegister src_lane, int lane);
+ void CallCFunctionHelper(Register function, int num_reg_arguments,
+ int num_double_arguments);
+};
- void LslPair(Register dst_low, Register dst_high, Register src_low,
- Register src_high, Register scratch, Register shift);
- void LslPair(Register dst_low, Register dst_high, Register src_low,
- Register src_high, uint32_t shift);
- void LsrPair(Register dst_low, Register dst_high, Register src_low,
- Register src_high, Register scratch, Register shift);
- void LsrPair(Register dst_low, Register dst_high, Register src_low,
- Register src_high, uint32_t shift);
- void AsrPair(Register dst_low, Register dst_high, Register src_low,
- Register src_high, Register scratch, Register shift);
- void AsrPair(Register dst_low, Register dst_high, Register src_low,
- Register src_high, uint32_t shift);
+// MacroAssembler implements a collection of frequently used macros.
+class MacroAssembler : public TurboAssembler {
+ public:
+ MacroAssembler(Isolate* isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object);
+
+ int jit_cookie() const { return jit_cookie_; }
+
+ // Used for patching in calls to the deoptimizer.
+ void CallDeoptimizer(Address target);
+ static int CallDeoptimizerSize();
+
+ // Emit code that loads |parameter_index|'th parameter from the stack to
+ // the register according to the CallInterfaceDescriptor definition.
+ // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
+ // below the caller's sp.
+ template <class Descriptor>
+ void LoadParameterFromStack(
+ Register reg, typename Descriptor::ParameterIndices parameter_index,
+ int sp_to_ra_offset_in_words = 0) {
+ DCHECK(Descriptor::kPassLastArgsOnStack);
+ UNIMPLEMENTED();
+ }
+
+ // Swap two registers. If the scratch register is omitted then a slightly
+ // less efficient form using xor instead of mov is emitted.
+ void Swap(Register reg1, Register reg2, Register scratch = no_reg,
+ Condition cond = al);
+
+ void Mls(Register dst, Register src1, Register src2, Register srcA,
+ Condition cond = al);
+ void And(Register dst, Register src1, const Operand& src2,
+ Condition cond = al);
+ void Ubfx(Register dst, Register src, int lsb, int width,
+ Condition cond = al);
+ void Sbfx(Register dst, Register src, int lsb, int width,
+ Condition cond = al);
+ // The scratch register is not used for ARMv7.
+ // scratch can be the same register as src (in which case it is trashed), but
+ // not the same as dst.
+ void Bfi(Register dst, Register src, Register scratch, int lsb, int width,
+ Condition cond = al);
+
+ void PushObject(Handle<Object> object);
+
+ void Load(Register dst, const MemOperand& src, Representation r);
+ void Store(Register src, const MemOperand& dst, Representation r);
+
+ // Store an object to the root table.
+ void StoreRoot(Register source, Heap::RootListIndex index,
+ Condition cond = al);
+
+ // ---------------------------------------------------------------------------
+ // GC Support
+
+ void IncrementalMarkingRecordWriteHelper(Register object, Register value,
+ Register address);
+
+ enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
+
+ // Record in the remembered set the fact that we have a pointer to new space
+ // at the address pointed to by the addr register. Only works if addr is not
+ // in new space.
+ void RememberedSetHelper(Register object, // Used for debug code.
+ Register addr, Register scratch,
+ SaveFPRegsMode save_fp,
+ RememberedSetFinalAction and_then);
+
+ // Check if object is in new space. Jumps if the object is not in new space.
+ // The register scratch can be object itself, but scratch will be clobbered.
+ void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) {
+ InNewSpace(object, scratch, eq, branch);
+ }
+
+ // Check if object is in new space. Jumps if the object is in new space.
+ // The register scratch can be object itself, but it will be clobbered.
+ void JumpIfInNewSpace(Register object, Register scratch, Label* branch) {
+ InNewSpace(object, scratch, ne, branch);
+ }
+
+ // Check if an object has a given incremental marking color.
+ void HasColor(Register object, Register scratch0, Register scratch1,
+ Label* has_color, int first_bit, int second_bit);
+
+ void JumpIfBlack(Register object, Register scratch0, Register scratch1,
+ Label* on_black);
+
+ // Checks the color of an object. If the object is white we jump to the
+ // incremental marker.
+ void JumpIfWhite(Register value, Register scratch1, Register scratch2,
+ Register scratch3, Label* value_is_white);
+
+ // Notify the garbage collector that we wrote a pointer into an object.
+ // |object| is the object being stored into, |value| is the object being
+ // stored. value and scratch registers are clobbered by the operation.
+ // The offset is the offset from the start of the object, not the offset from
+ // the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
+ void RecordWriteField(
+ Register object, int offset, Register value, Register scratch,
+ LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting);
+
+ // As above, but the offset has the tag presubtracted. For use with
+ // MemOperand(reg, off).
+ inline void RecordWriteContextSlot(
+ Register context, int offset, Register value, Register scratch,
+ LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting) {
+ RecordWriteField(context, offset + kHeapObjectTag, value, scratch,
+ lr_status, save_fp, remembered_set_action, smi_check,
+ pointers_to_here_check_for_value);
+ }
+
+ // Notify the garbage collector that we wrote a code entry into a
+ // JSFunction. Only scratch is clobbered by the operation.
+ void RecordWriteCodeEntryField(Register js_function, Register code_entry,
+ Register scratch);
+
+ void RecordWriteForMap(Register object, Register map, Register dst,
+ LinkRegisterStatus lr_status, SaveFPRegsMode save_fp);
+
+ // For a given |object| notify the garbage collector that the slot |address|
+ // has been written. |value| is the object being stored. The value and
+ // address registers are clobbered by the operation.
+ void RecordWrite(
+ Register object, Register address, Register value,
+ LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting);
+
+ void PopCommonFrame(Register marker_reg = no_reg);
+
+ // Push and pop the registers that can hold pointers, as defined by the
+ // RegList constant kSafepointSavedRegisters.
+ void PushSafepointRegisters();
+ void PopSafepointRegisters();
+ // Store value in register src in the safepoint stack slot for
+ // register dst.
+ void StoreToSafepointRegisterSlot(Register src, Register dst);
+ // Load the value of the src register from its safepoint stack slot
+ // into register dst.
+ void LoadFromSafepointRegisterSlot(Register dst, Register src);
+
+ // Load two consecutive registers with two consecutive memory locations.
+ void Ldrd(Register dst1,
+ Register dst2,
+ const MemOperand& src,
+ Condition cond = al);
+
+ // Store two consecutive registers to two consecutive memory locations.
+ void Strd(Register src1,
+ Register src2,
+ const MemOperand& dst,
+ Condition cond = al);
+
+ void Vmov(const DwVfpRegister dst, Double imm,
+ const Register scratch = no_reg);
// Loads the number from object into dst register.
// If |object| is neither smi nor heap number, |not_number| is jumped to
@@ -618,10 +741,6 @@ class MacroAssembler: public Assembler {
LowDwVfpRegister double_scratch1,
Label* not_int32);
- // Generates function and stub prologue code.
- void StubPrologue(StackFrame::Type type);
- void Prologue(bool code_pre_aging);
-
// Enter exit frame.
// stack_space - extra stack space, used for alignment before call to C.
void EnterExitFrame(bool save_doubles, int stack_space = 0,
@@ -634,9 +753,6 @@ class MacroAssembler: public Assembler {
bool restore_context,
bool argument_count_is_length = false);
- // Get the actual activation frame alignment for target environment.
- static int ActivationFrameAlignment();
-
void LoadContext(Register dst, int context_chain_length);
// Load the global object from the current context.
@@ -657,20 +773,9 @@ class MacroAssembler: public Assembler {
Register map,
Register scratch);
- void InitializeRootRegister();
-
// ---------------------------------------------------------------------------
// JavaScript invokes
- // Removes current frame and its arguments from the stack preserving
- // the arguments and a return address pushed to the stack for the next call.
- // Both |callee_args_count| and |caller_args_count_reg| do not include
- // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
- // is trashed.
- void PrepareForTailCall(const ParameterCount& callee_args_count,
- Register caller_args_count_reg, Register scratch0,
- Register scratch1);
-
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
const ParameterCount& expected,
@@ -778,15 +883,6 @@ class MacroAssembler: public Assembler {
void Allocate(Register object_size, Register result, Register result_end,
Register scratch, Label* gc_required, AllocationFlags flags);
- // FastAllocate is right now only used for folded allocations. It just
- // increments the top pointer without checking against limit. This can only
- // be done if it was proved earlier that the allocation will succeed.
- void FastAllocate(int object_size, Register result, Register scratch1,
- Register scratch2, AllocationFlags flags);
-
- void FastAllocate(Register object_size, Register result, Register result_end,
- Register scratch, AllocationFlags flags);
-
// Allocates a heap number or jumps to the gc_required label if the young
// space is full and a scavenge is needed. All registers are clobbered also
// when control continues at the gc_required label.
@@ -809,12 +905,6 @@ class MacroAssembler: public Assembler {
Register scratch1, Register scratch2,
Label* gc_required);
- // Initialize fields with filler values. Fields starting at |current_address|
- // not including |end_address| are overwritten with the value in |filler|. At
- // the end the loop, |current_address| takes the value of |end_address|.
- void InitializeFieldsWithFiller(Register current_address,
- Register end_address, Register filler);
-
// ---------------------------------------------------------------------------
// Support functions.
@@ -830,7 +920,7 @@ class MacroAssembler: public Assembler {
// are the same register). It leaves the heap object in the heap_object
// register unless the heap_object register is the same register as one of the
// other registers.
- // Type_reg can be no_reg. In that case ip is used.
+ // Type_reg can be no_reg. In that case a scratch register is used.
void CompareObjectType(Register heap_object,
Register map,
Register type_reg,
@@ -882,11 +972,13 @@ class MacroAssembler: public Assembler {
void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
// Compare the object in a register to a value from the root list.
- // Uses the ip register as scratch.
+ // Acquires a scratch register.
void CompareRoot(Register obj, Heap::RootListIndex index);
void PushRoot(Heap::RootListIndex index) {
- LoadRoot(ip, index);
- Push(ip);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ LoadRoot(scratch, index);
+ Push(scratch);
}
// Compare the object in a register to a value and jump if they are equal.
@@ -940,36 +1032,6 @@ class MacroAssembler: public Assembler {
Label* done,
Label* exact);
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
- // succeeds, otherwise falls through if result is saturated. On return
- // 'result' either holds answer, or is clobbered on fall through.
- //
- // Only public for the test code in test-code-stubs-arm.cc.
- void TryInlineTruncateDoubleToI(Register result,
- DwVfpRegister input,
- Label* done);
-
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
- // Exits with 'result' holding the answer.
- void TruncateDoubleToI(Register result, DwVfpRegister double_input);
-
- // Performs a truncating conversion of a heap number as used by
- // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
- // must be different registers. Exits with 'result' holding the answer.
- void TruncateHeapNumberToI(Register result, Register object);
-
- // Converts the smi or heap number in object to an int32 using the rules
- // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
- // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
- // different registers.
- void TruncateNumberToI(Register object,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Label* not_int32);
-
// Check whether d16-d31 are available on the CPU. The result is given by the
// Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
void CheckFor32DRegs(Register scratch);
@@ -982,38 +1044,11 @@ class MacroAssembler: public Assembler {
// values to location, restoring [d0..(d15|d31)].
void RestoreFPRegs(Register location, Register scratch);
- // Perform a floating-point min or max operation with the
- // (IEEE-754-compatible) semantics of ARM64's fmin/fmax. Some cases, typically
- // NaNs or +/-0.0, are expected to be rare and are handled in out-of-line
- // code. The specific behaviour depends on supported instructions.
- //
- // These functions assume (and assert) that !left.is(right). It is permitted
- // for the result to alias either input register.
- void FloatMax(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right,
- Label* out_of_line);
- void FloatMin(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right,
- Label* out_of_line);
- void FloatMax(DwVfpRegister result, DwVfpRegister left, DwVfpRegister right,
- Label* out_of_line);
- void FloatMin(DwVfpRegister result, DwVfpRegister left, DwVfpRegister right,
- Label* out_of_line);
-
- // Generate out-of-line cases for the macros above.
- void FloatMaxOutOfLine(SwVfpRegister result, SwVfpRegister left,
- SwVfpRegister right);
- void FloatMinOutOfLine(SwVfpRegister result, SwVfpRegister left,
- SwVfpRegister right);
- void FloatMaxOutOfLine(DwVfpRegister result, DwVfpRegister left,
- DwVfpRegister right);
- void FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left,
- DwVfpRegister right);
-
// ---------------------------------------------------------------------------
// Runtime calls
// Call a code stub.
void CallStub(CodeStub* stub,
- TypeFeedbackId ast_id = TypeFeedbackId::None(),
Condition cond = al);
// Call a code stub.
@@ -1048,106 +1083,18 @@ class MacroAssembler: public Assembler {
// Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid);
- int CalculateStackPassedWords(int num_reg_arguments,
- int num_double_arguments);
-
- // Before calling a C-function from generated code, align arguments on stack.
- // After aligning the frame, non-register arguments must be stored in
- // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
- // are word sized. If double arguments are used, this function assumes that
- // all double arguments are stored before core registers; otherwise the
- // correct alignment of the double values is not guaranteed.
- // Some compilers/platforms require the stack to be aligned when calling
- // C++ code.
- // Needs a scratch register to do some arithmetic. This register will be
- // trashed.
- void PrepareCallCFunction(int num_reg_arguments,
- int num_double_registers,
- Register scratch);
- void PrepareCallCFunction(int num_reg_arguments,
- Register scratch);
-
- // There are two ways of passing double arguments on ARM, depending on
- // whether soft or hard floating point ABI is used. These functions
- // abstract parameter passing for the three different ways we call
- // C functions from generated code.
- void MovToFloatParameter(DwVfpRegister src);
- void MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2);
- void MovToFloatResult(DwVfpRegister src);
-
- // Calls a C function and cleans up the space for arguments allocated
- // by PrepareCallCFunction. The called function is not allowed to trigger a
- // garbage collection, since that might move the code and invalidate the
- // return address (unless this is somehow accounted for by the called
- // function).
- void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, int num_arguments);
- void CallCFunction(ExternalReference function,
- int num_reg_arguments,
- int num_double_arguments);
- void CallCFunction(Register function,
- int num_reg_arguments,
- int num_double_arguments);
-
- void MovFromFloatParameter(DwVfpRegister dst);
- void MovFromFloatResult(DwVfpRegister dst);
-
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame = false);
- Handle<Object> CodeObject() {
- DCHECK(!code_object_.is_null());
- return code_object_;
- }
-
-
- // Emit code for a truncating division by a constant. The dividend register is
- // unchanged and ip gets clobbered. Dividend and result must be different.
- void TruncatingDiv(Register result, Register dividend, int32_t divisor);
-
// ---------------------------------------------------------------------------
// StatsCounter support
- void SetCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2);
void IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2);
void DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2);
-
- // ---------------------------------------------------------------------------
- // Debugging
-
- // Calls Abort(msg) if the condition cond is not satisfied.
- // Use --debug_code to enable.
- void Assert(Condition cond, BailoutReason reason);
-
- // Like Assert(), but always enabled.
- void Check(Condition cond, BailoutReason reason);
-
- // Print a message to stdout and abort execution.
- void Abort(BailoutReason msg);
-
- // Verify restrictions about code generated in stubs.
- void set_generating_stub(bool value) { generating_stub_ = value; }
- bool generating_stub() { return generating_stub_; }
- void set_has_frame(bool value) { has_frame_ = value; }
- bool has_frame() { return has_frame_; }
- inline bool AllowThisStubCall(CodeStub* stub);
-
- // EABI variant for double arguments in use.
- bool use_eabi_hardfloat() {
-#ifdef __arm__
- return base::OS::ArmUsingHardFloat();
-#elif USE_EABI_HARDFLOAT
- return true;
-#else
- return false;
-#endif
- }
-
// ---------------------------------------------------------------------------
// Number utilities
@@ -1182,19 +1129,14 @@ class MacroAssembler: public Assembler {
TrySmiTag(reg, reg, not_a_smi);
}
void TrySmiTag(Register reg, Register src, Label* not_a_smi) {
- SmiTag(ip, src, SetCC);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ SmiTag(scratch, src, SetCC);
b(vs, not_a_smi);
- mov(reg, ip);
+ mov(reg, scratch);
}
- void SmiUntag(Register reg, SBit s = LeaveCC) {
- mov(reg, Operand::SmiUntag(reg), s);
- }
- void SmiUntag(Register dst, Register src, SBit s = LeaveCC) {
- mov(dst, Operand::SmiUntag(src), s);
- }
-
// Untag the source value into destination and jump if source is a smi.
// Souce and destination can be the same register.
void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
@@ -1202,8 +1144,6 @@ class MacroAssembler: public Assembler {
// Test if the register contains a smi (Z == 0 (eq) if true).
void SmiTst(Register value);
void NonNegativeSmiTst(Register value);
- // Jump if the register contains a smi.
- void JumpIfSmi(Register value, Label* smi_label);
// Jump if either of the registers contain a non-smi.
void JumpIfNotSmi(Register value, Label* not_smi_label);
// Jump if either of the registers contain a non-smi.
@@ -1215,6 +1155,9 @@ class MacroAssembler: public Assembler {
void AssertNotSmi(Register object);
void AssertSmi(Register object);
+ // Abort execution if argument is not a FixedArray, enabled via --debug-code.
+ void AssertFixedArray(Register object);
+
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
@@ -1222,9 +1165,9 @@ class MacroAssembler: public Assembler {
// enabled via --debug-code.
void AssertBoundFunction(Register object);
- // Abort execution if argument is not a JSGeneratorObject,
+ // Abort execution if argument is not a JSGeneratorObject (or subclass),
// enabled via --debug-code.
- void AssertGeneratorObject(Register object, Register suspend_flags);
+ void AssertGeneratorObject(Register object);
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
@@ -1268,19 +1211,8 @@ class MacroAssembler: public Assembler {
void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
- void EmitSeqStringSetCharCheck(Register string,
- Register index,
- Register value,
- uint32_t encoding_mask);
-
-
void ClampUint8(Register output_reg, Register input_reg);
- void ClampDoubleToUint8(Register result_reg,
- DwVfpRegister input_reg,
- LowDwVfpRegister double_scratch);
-
-
void LoadInstanceDescriptors(Register map, Register descriptors);
void EnumLength(Register dst, Register map);
void NumberOfOwnDescriptors(Register dst, Register map);
@@ -1308,12 +1240,6 @@ class MacroAssembler: public Assembler {
// Load the type feedback vector from a JavaScript frame.
void EmitLoadFeedbackVector(Register vector);
- // Activation support.
- void EnterFrame(StackFrame::Type type,
- bool load_constant_pool_pointer_reg = false);
- // Returns the pc offset at which the frame ends.
- int LeaveFrame(StackFrame::Type type);
-
void EnterBuiltinFrame(Register context, Register target, Register argc);
void LeaveBuiltinFrame(Register context, Register target, Register argc);
@@ -1321,23 +1247,7 @@ class MacroAssembler: public Assembler {
// in r0. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Label* call_runtime);
- // AllocationMemento support. Arrays may have an associated
- // AllocationMemento object that can be checked for in order to pretransition
- // to another type.
- // On entry, receiver_reg should point to the array object.
- // scratch_reg gets clobbered.
- // If allocation info is present, condition flags are set to eq.
- void TestJSArrayForAllocationMemento(Register receiver_reg,
- Register scratch_reg,
- Label* no_memento_found);
-
private:
- void CallCFunctionHelper(Register function,
- int num_reg_arguments,
- int num_double_arguments);
-
- void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
-
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
@@ -1364,21 +1274,6 @@ class MacroAssembler: public Assembler {
MemOperand SafepointRegisterSlot(Register reg);
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
- // Implementation helpers for FloatMin and FloatMax.
- template <typename T>
- void FloatMaxHelper(T result, T left, T right, Label* out_of_line);
- template <typename T>
- void FloatMinHelper(T result, T left, T right, Label* out_of_line);
- template <typename T>
- void FloatMaxOutOfLineHelper(T result, T left, T right);
- template <typename T>
- void FloatMinOutOfLineHelper(T result, T left, T right);
-
- bool generating_stub_;
- bool has_frame_;
- Isolate* isolate_;
- // This handle will be patched with the code object on installation.
- Handle<Object> code_object_;
int jit_cookie_;
// Needs access to SafepointRegisterStackIndex for compiled frame
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 1f7e146692..dc279ceb44 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -3225,7 +3225,6 @@ void Simulator::DecodeType7(Instruction* instr) {
void Simulator::DecodeTypeVFP(Instruction* instr) {
DCHECK((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
DCHECK(instr->Bits(11, 9) == 0x5);
-
// Obtain single precision register codes.
int m = instr->VFPMRegValue(kSinglePrecision);
int d = instr->VFPDRegValue(kSinglePrecision);
@@ -3749,7 +3748,6 @@ bool get_inv_op_vfp_flag(VFPRoundingMode mode,
(val <= (min_int - 1.0));
default:
UNREACHABLE();
- return true;
}
}
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h
index e865b634b5..fbc4ac41fb 100644
--- a/deps/v8/src/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/assembler-arm64-inl.h
@@ -16,7 +16,7 @@ namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return true; }
-bool CpuFeatures::SupportsWasmSimd128() { return false; }
+bool CpuFeatures::SupportsWasmSimd128() { return true; }
void RelocInfo::apply(intptr_t delta) {
// On arm64 only internal references need extra work.
@@ -57,6 +57,15 @@ inline int CPURegister::SizeInBytes() const {
return reg_size / 8;
}
+inline bool CPURegister::Is8Bits() const {
+ DCHECK(IsValid());
+ return reg_size == 8;
+}
+
+inline bool CPURegister::Is16Bits() const {
+ DCHECK(IsValid());
+ return reg_size == 16;
+}
inline bool CPURegister::Is32Bits() const {
DCHECK(IsValid());
@@ -69,9 +78,13 @@ inline bool CPURegister::Is64Bits() const {
return reg_size == 64;
}
+inline bool CPURegister::Is128Bits() const {
+ DCHECK(IsValid());
+ return reg_size == 128;
+}
inline bool CPURegister::IsValid() const {
- if (IsValidRegister() || IsValidFPRegister()) {
+ if (IsValidRegister() || IsValidVRegister()) {
DCHECK(!IsNone());
return true;
} else {
@@ -87,14 +100,14 @@ inline bool CPURegister::IsValidRegister() const {
((reg_code < kNumberOfRegisters) || (reg_code == kSPRegInternalCode));
}
-
-inline bool CPURegister::IsValidFPRegister() const {
- return IsFPRegister() &&
- ((reg_size == kSRegSizeInBits) || (reg_size == kDRegSizeInBits)) &&
- (reg_code < kNumberOfFPRegisters);
+inline bool CPURegister::IsValidVRegister() const {
+ return IsVRegister() &&
+ ((reg_size == kBRegSizeInBits) || (reg_size == kHRegSizeInBits) ||
+ (reg_size == kSRegSizeInBits) || (reg_size == kDRegSizeInBits) ||
+ (reg_size == kQRegSizeInBits)) &&
+ (reg_code < kNumberOfVRegisters);
}
-
inline bool CPURegister::IsNone() const {
// kNoRegister types should always have size 0 and code 0.
DCHECK((reg_type != kNoRegister) || (reg_code == 0));
@@ -120,11 +133,7 @@ inline bool CPURegister::IsRegister() const {
return reg_type == kRegister;
}
-
-inline bool CPURegister::IsFPRegister() const {
- return reg_type == kFPRegister;
-}
-
+inline bool CPURegister::IsVRegister() const { return reg_type == kVRegister; }
inline bool CPURegister::IsSameSizeAndType(const CPURegister& other) const {
return (reg_size == other.reg_size) && (reg_type == other.reg_type);
@@ -200,7 +209,7 @@ inline Register Register::XRegFromCode(unsigned code) {
if (code == kSPRegInternalCode) {
return csp;
} else {
- DCHECK(code < kNumberOfRegisters);
+ DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters));
return Register::Create(code, kXRegSizeInBits);
}
}
@@ -210,23 +219,40 @@ inline Register Register::WRegFromCode(unsigned code) {
if (code == kSPRegInternalCode) {
return wcsp;
} else {
- DCHECK(code < kNumberOfRegisters);
+ DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters));
return Register::Create(code, kWRegSizeInBits);
}
}
+inline VRegister VRegister::BRegFromCode(unsigned code) {
+ DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
+ return VRegister::Create(code, kBRegSizeInBits);
+}
-inline FPRegister FPRegister::SRegFromCode(unsigned code) {
- DCHECK(code < kNumberOfFPRegisters);
- return FPRegister::Create(code, kSRegSizeInBits);
+inline VRegister VRegister::HRegFromCode(unsigned code) {
+ DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
+ return VRegister::Create(code, kHRegSizeInBits);
}
+inline VRegister VRegister::SRegFromCode(unsigned code) {
+ DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
+ return VRegister::Create(code, kSRegSizeInBits);
+}
-inline FPRegister FPRegister::DRegFromCode(unsigned code) {
- DCHECK(code < kNumberOfFPRegisters);
- return FPRegister::Create(code, kDRegSizeInBits);
+inline VRegister VRegister::DRegFromCode(unsigned code) {
+ DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
+ return VRegister::Create(code, kDRegSizeInBits);
}
+inline VRegister VRegister::QRegFromCode(unsigned code) {
+ DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
+ return VRegister::Create(code, kQRegSizeInBits);
+}
+
+inline VRegister VRegister::VRegFromCode(unsigned code) {
+ DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
+ return VRegister::Create(code, kVRegSizeInBits);
+}
inline Register CPURegister::W() const {
DCHECK(IsValidRegister());
@@ -239,16 +265,34 @@ inline Register CPURegister::X() const {
return Register::XRegFromCode(reg_code);
}
+inline VRegister CPURegister::V() const {
+ DCHECK(IsValidVRegister());
+ return VRegister::VRegFromCode(reg_code);
+}
+
+inline VRegister CPURegister::B() const {
+ DCHECK(IsValidVRegister());
+ return VRegister::BRegFromCode(reg_code);
+}
+
+inline VRegister CPURegister::H() const {
+ DCHECK(IsValidVRegister());
+ return VRegister::HRegFromCode(reg_code);
+}
-inline FPRegister CPURegister::S() const {
- DCHECK(IsValidFPRegister());
- return FPRegister::SRegFromCode(reg_code);
+inline VRegister CPURegister::S() const {
+ DCHECK(IsValidVRegister());
+ return VRegister::SRegFromCode(reg_code);
}
+inline VRegister CPURegister::D() const {
+ DCHECK(IsValidVRegister());
+ return VRegister::DRegFromCode(reg_code);
+}
-inline FPRegister CPURegister::D() const {
- DCHECK(IsValidFPRegister());
- return FPRegister::DRegFromCode(reg_code);
+inline VRegister CPURegister::Q() const {
+ DCHECK(IsValidVRegister());
+ return VRegister::QRegFromCode(reg_code);
}
@@ -310,7 +354,6 @@ Immediate::Immediate(T t, RelocInfo::Mode rmode)
STATIC_ASSERT(ImmediateInitializer<T>::kIsIntType);
}
-
// Operand.
template<typename T>
Operand::Operand(Handle<T> value) : immediate_(value), reg_(NoReg) {}
@@ -325,7 +368,6 @@ Operand::Operand(T t, RelocInfo::Mode rmode)
: immediate_(t, rmode),
reg_(NoReg) {}
-
Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
: immediate_(0),
reg_(reg),
@@ -352,9 +394,21 @@ Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
DCHECK(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
}
+bool Operand::IsHeapObjectRequest() const {
+ DCHECK_IMPLIES(heap_object_request_.has_value(), reg_.Is(NoReg));
+ DCHECK_IMPLIES(heap_object_request_.has_value(),
+ immediate_.rmode() == RelocInfo::EMBEDDED_OBJECT ||
+ immediate_.rmode() == RelocInfo::CODE_TARGET);
+ return heap_object_request_.has_value();
+}
+
+HeapObjectRequest Operand::heap_object_request() const {
+ DCHECK(IsHeapObjectRequest());
+ return *heap_object_request_;
+}
bool Operand::IsImmediate() const {
- return reg_.Is(NoReg);
+ return reg_.Is(NoReg) && !IsHeapObjectRequest();
}
@@ -383,6 +437,13 @@ Operand Operand::ToExtendedRegister() const {
return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
}
+Immediate Operand::immediate_for_heap_object_request() const {
+ DCHECK((heap_object_request().kind() == HeapObjectRequest::kHeapNumber &&
+ immediate_.rmode() == RelocInfo::EMBEDDED_OBJECT) ||
+ (heap_object_request().kind() == HeapObjectRequest::kCodeStub &&
+ immediate_.rmode() == RelocInfo::CODE_TARGET));
+ return immediate_;
+}
Immediate Operand::immediate() const {
DCHECK(IsImmediate());
@@ -491,7 +552,7 @@ MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
regoffset_ = NoReg;
} else if (offset.IsShiftedRegister()) {
- DCHECK(addrmode == Offset);
+ DCHECK((addrmode == Offset) || (addrmode == PostIndex));
regoffset_ = offset.reg();
shift_ = offset.shift();
@@ -877,21 +938,20 @@ LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
if (rt.IsRegister()) {
return rt.Is64Bits() ? LDR_x : LDR_w;
} else {
- DCHECK(rt.IsFPRegister());
- return rt.Is64Bits() ? LDR_d : LDR_s;
- }
-}
-
-
-LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt,
- const CPURegister& rt2) {
- DCHECK(AreSameSizeAndType(rt, rt2));
- USE(rt2);
- if (rt.IsRegister()) {
- return rt.Is64Bits() ? LDP_x : LDP_w;
- } else {
- DCHECK(rt.IsFPRegister());
- return rt.Is64Bits() ? LDP_d : LDP_s;
+ DCHECK(rt.IsVRegister());
+ switch (rt.SizeInBits()) {
+ case kBRegSizeInBits:
+ return LDR_b;
+ case kHRegSizeInBits:
+ return LDR_h;
+ case kSRegSizeInBits:
+ return LDR_s;
+ case kDRegSizeInBits:
+ return LDR_d;
+ default:
+ DCHECK(rt.IsQ());
+ return LDR_q;
+ }
}
}
@@ -901,11 +961,29 @@ LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) {
if (rt.IsRegister()) {
return rt.Is64Bits() ? STR_x : STR_w;
} else {
- DCHECK(rt.IsFPRegister());
- return rt.Is64Bits() ? STR_d : STR_s;
+ DCHECK(rt.IsVRegister());
+ switch (rt.SizeInBits()) {
+ case kBRegSizeInBits:
+ return STR_b;
+ case kHRegSizeInBits:
+ return STR_h;
+ case kSRegSizeInBits:
+ return STR_s;
+ case kDRegSizeInBits:
+ return STR_d;
+ default:
+ DCHECK(rt.IsQ());
+ return STR_q;
+ }
}
}
+LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt,
+ const CPURegister& rt2) {
+ DCHECK_EQ(STP_w | LoadStorePairLBit, LDP_w);
+ return static_cast<LoadStorePairOp>(StorePairOpFor(rt, rt2) |
+ LoadStorePairLBit);
+}
LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
const CPURegister& rt2) {
@@ -914,8 +992,16 @@ LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
if (rt.IsRegister()) {
return rt.Is64Bits() ? STP_x : STP_w;
} else {
- DCHECK(rt.IsFPRegister());
- return rt.Is64Bits() ? STP_d : STP_s;
+ DCHECK(rt.IsVRegister());
+ switch (rt.SizeInBits()) {
+ case kSRegSizeInBits:
+ return STP_s;
+ case kDRegSizeInBits:
+ return STP_d;
+ default:
+ DCHECK(rt.IsQ());
+ return STP_q;
+ }
}
}
@@ -924,7 +1010,7 @@ LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) {
if (rt.IsRegister()) {
return rt.Is64Bits() ? LDR_x_lit : LDR_w_lit;
} else {
- DCHECK(rt.IsFPRegister());
+ DCHECK(rt.IsVRegister());
return rt.Is64Bits() ? LDR_d_lit : LDR_s_lit;
}
}
@@ -945,7 +1031,6 @@ Instr Assembler::Flags(FlagsUpdate S) {
return 0 << FlagsUpdate_offset;
}
UNREACHABLE();
- return 0;
}
@@ -1108,9 +1193,8 @@ Instr Assembler::ImmLS(int imm9) {
return truncate_to_int9(imm9) << ImmLS_offset;
}
-
-Instr Assembler::ImmLSPair(int imm7, LSDataSize size) {
- DCHECK(((imm7 >> size) << size) == imm7);
+Instr Assembler::ImmLSPair(int imm7, unsigned size) {
+ DCHECK_EQ((imm7 >> size) << size, imm7);
int scaled_imm7 = imm7 >> size;
DCHECK(is_int7(scaled_imm7));
return truncate_to_int7(scaled_imm7) << ImmLSPair_offset;
@@ -1152,10 +1236,17 @@ Instr Assembler::ImmBarrierType(int imm2) {
return imm2 << ImmBarrierType_offset;
}
-
-LSDataSize Assembler::CalcLSDataSize(LoadStoreOp op) {
- DCHECK((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8));
- return static_cast<LSDataSize>(op >> SizeLS_offset);
+unsigned Assembler::CalcLSDataSize(LoadStoreOp op) {
+ DCHECK((LSSize_offset + LSSize_width) == (kInstructionSize * 8));
+ unsigned size = static_cast<Instr>(op >> LSSize_offset);
+ if ((op & LSVector_mask) != 0) {
+ // Vector register memory operations encode the access size in the "size"
+ // and "opc" fields.
+ if ((size == 0) && ((op & LSOpc_mask) >> LSOpc_offset) >= 2) {
+ size = kQRegSizeLog2;
+ }
+ }
+ return size;
}
@@ -1170,11 +1261,7 @@ Instr Assembler::ShiftMoveWide(int shift) {
return shift << ShiftMoveWide_offset;
}
-
-Instr Assembler::FPType(FPRegister fd) {
- return fd.Is64Bits() ? FP64 : FP32;
-}
-
+Instr Assembler::FPType(VRegister fd) { return fd.Is64Bits() ? FP64 : FP32; }
Instr Assembler::FPScale(unsigned scale) {
DCHECK(is_uint6(scale));
@@ -1205,18 +1292,6 @@ inline void Assembler::CheckBuffer() {
}
}
-
-TypeFeedbackId Assembler::RecordedAstId() {
- DCHECK(!recorded_ast_id_.IsNone());
- return recorded_ast_id_;
-}
-
-
-void Assembler::ClearRecordedAstId() {
- recorded_ast_id_ = TypeFeedbackId::None();
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
index ec12e77274..1cabc01cec 100644
--- a/deps/v8/src/arm64/assembler-arm64.cc
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -34,12 +34,12 @@
#include "src/arm64/frames-arm64.h"
#include "src/base/bits.h"
#include "src/base/cpu.h"
+#include "src/code-stubs.h"
#include "src/register-configuration.h"
namespace v8 {
namespace internal {
-
// -----------------------------------------------------------------------------
// CpuFeatures implementation.
@@ -89,8 +89,8 @@ CPURegister CPURegList::PopHighestIndex() {
void CPURegList::RemoveCalleeSaved() {
if (type() == CPURegister::kRegister) {
Remove(GetCalleeSaved(RegisterSizeInBits()));
- } else if (type() == CPURegister::kFPRegister) {
- Remove(GetCalleeSavedFP(RegisterSizeInBits()));
+ } else if (type() == CPURegister::kVRegister) {
+ Remove(GetCalleeSavedV(RegisterSizeInBits()));
} else {
DCHECK(type() == CPURegister::kNoRegister);
DCHECK(IsEmpty());
@@ -103,9 +103,8 @@ CPURegList CPURegList::GetCalleeSaved(int size) {
return CPURegList(CPURegister::kRegister, size, 19, 29);
}
-
-CPURegList CPURegList::GetCalleeSavedFP(int size) {
- return CPURegList(CPURegister::kFPRegister, size, 8, 15);
+CPURegList CPURegList::GetCalleeSavedV(int size) {
+ return CPURegList(CPURegister::kVRegister, size, 8, 15);
}
@@ -116,11 +115,10 @@ CPURegList CPURegList::GetCallerSaved(int size) {
return list;
}
-
-CPURegList CPURegList::GetCallerSavedFP(int size) {
+CPURegList CPURegList::GetCallerSavedV(int size) {
// Registers d0-d7 and d16-d31 are caller-saved.
- CPURegList list = CPURegList(CPURegister::kFPRegister, size, 0, 7);
- list.Combine(CPURegList(CPURegister::kFPRegister, size, 16, 31));
+ CPURegList list = CPURegList(CPURegister::kVRegister, size, 0, 7);
+ list.Combine(CPURegList(CPURegister::kVRegister, size, 16, 31));
return list;
}
@@ -220,7 +218,6 @@ Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2,
return candidate;
}
UNREACHABLE();
- return NoReg;
}
@@ -240,7 +237,7 @@ bool AreAliased(const CPURegister& reg1, const CPURegister& reg2,
if (regs[i].IsRegister()) {
number_of_valid_regs++;
unique_regs |= regs[i].Bit();
- } else if (regs[i].IsFPRegister()) {
+ } else if (regs[i].IsVRegister()) {
number_of_valid_fpregs++;
unique_fpregs |= regs[i].Bit();
} else {
@@ -277,20 +274,43 @@ bool AreSameSizeAndType(const CPURegister& reg1, const CPURegister& reg2,
return match;
}
+bool AreSameFormat(const VRegister& reg1, const VRegister& reg2,
+ const VRegister& reg3, const VRegister& reg4) {
+ DCHECK(reg1.IsValid());
+ return (!reg2.IsValid() || reg2.IsSameFormat(reg1)) &&
+ (!reg3.IsValid() || reg3.IsSameFormat(reg1)) &&
+ (!reg4.IsValid() || reg4.IsSameFormat(reg1));
+}
-void Immediate::InitializeHandle(Handle<Object> handle) {
- AllowDeferredHandleDereference using_raw_address;
+bool AreConsecutive(const VRegister& reg1, const VRegister& reg2,
+ const VRegister& reg3, const VRegister& reg4) {
+ DCHECK(reg1.IsValid());
+ if (!reg2.IsValid()) {
+ DCHECK(!reg3.IsValid() && !reg4.IsValid());
+ return true;
+ } else if (reg2.code() != ((reg1.code() + 1) % kNumberOfVRegisters)) {
+ return false;
+ }
- // Verify all Objects referred by code are NOT in new space.
- Object* obj = *handle;
- if (obj->IsHeapObject()) {
- value_ = reinterpret_cast<intptr_t>(handle.location());
- rmode_ = RelocInfo::EMBEDDED_OBJECT;
- } else {
- STATIC_ASSERT(sizeof(intptr_t) == sizeof(int64_t));
- value_ = reinterpret_cast<intptr_t>(obj);
- rmode_ = RelocInfo::NONE64;
+ if (!reg3.IsValid()) {
+ DCHECK(!reg4.IsValid());
+ return true;
+ } else if (reg3.code() != ((reg2.code() + 1) % kNumberOfVRegisters)) {
+ return false;
}
+
+ if (!reg4.IsValid()) {
+ return true;
+ } else if (reg4.code() != ((reg3.code() + 1) % kNumberOfVRegisters)) {
+ return false;
+ }
+
+ return true;
+}
+
+void Immediate::InitializeHandle(Handle<HeapObject> handle) {
+ value_ = reinterpret_cast<intptr_t>(handle.address());
+ rmode_ = RelocInfo::EMBEDDED_OBJECT;
}
@@ -304,36 +324,52 @@ bool Operand::NeedsRelocation(const Assembler* assembler) const {
return !RelocInfo::IsNone(rmode);
}
+bool ConstPool::AddSharedEntry(SharedEntryMap& entry_map, uint64_t data,
+ int offset) {
+ auto existing = entry_map.find(data);
+ if (existing == entry_map.end()) {
+ entry_map[data] = static_cast<int>(entries_.size());
+ entries_.push_back(std::make_pair(data, std::vector<int>(1, offset)));
+ return true;
+ }
+ int index = existing->second;
+ entries_[index].second.push_back(offset);
+ return false;
+}
// Constant Pool.
-void ConstPool::RecordEntry(intptr_t data,
- RelocInfo::Mode mode) {
+bool ConstPool::RecordEntry(intptr_t data, RelocInfo::Mode mode) {
DCHECK(mode != RelocInfo::COMMENT && mode != RelocInfo::CONST_POOL &&
mode != RelocInfo::VENEER_POOL &&
mode != RelocInfo::CODE_AGE_SEQUENCE &&
mode != RelocInfo::DEOPT_SCRIPT_OFFSET &&
mode != RelocInfo::DEOPT_INLINING_ID &&
mode != RelocInfo::DEOPT_REASON && mode != RelocInfo::DEOPT_ID);
+
+ bool write_reloc_info = true;
+
uint64_t raw_data = static_cast<uint64_t>(data);
int offset = assm_->pc_offset();
if (IsEmpty()) {
first_use_ = offset;
}
- std::pair<uint64_t, int> entry = std::make_pair(raw_data, offset);
if (CanBeShared(mode)) {
- shared_entries_.insert(entry);
- if (shared_entries_.count(entry.first) == 1) {
- shared_entries_count++;
- }
+ write_reloc_info = AddSharedEntry(shared_entries_, raw_data, offset);
+ } else if (mode == RelocInfo::CODE_TARGET &&
+ assm_->IsCodeTargetSharingAllowed() && raw_data != 0) {
+ // A zero data value is a placeholder and must not be shared.
+ write_reloc_info = AddSharedEntry(handle_to_index_map_, raw_data, offset);
} else {
- unique_entries_.push_back(entry);
+ entries_.push_back(std::make_pair(raw_data, std::vector<int>(1, offset)));
}
if (EntryCount() > Assembler::kApproxMaxPoolEntryCount) {
// Request constant pool emission after the next instruction.
assm_->SetNextConstPoolCheckIn(1);
}
+
+ return write_reloc_info;
}
@@ -442,8 +478,8 @@ void ConstPool::Emit(bool require_jump) {
void ConstPool::Clear() {
shared_entries_.clear();
- shared_entries_count = 0;
- unique_entries_.clear();
+ handle_to_index_map_.clear();
+ entries_.clear();
first_use_ = -1;
}
@@ -453,8 +489,7 @@ bool ConstPool::CanBeShared(RelocInfo::Mode mode) {
DCHECK(mode != RelocInfo::NONE32);
return RelocInfo::IsNone(mode) ||
- (!assm_->serializer_enabled() &&
- (mode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE));
+ (mode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE);
}
@@ -512,43 +547,19 @@ void ConstPool::EmitGuard() {
void ConstPool::EmitEntries() {
DCHECK(IsAligned(assm_->pc_offset(), 8));
- typedef std::multimap<uint64_t, int>::const_iterator SharedEntriesIterator;
- SharedEntriesIterator value_it;
- // Iterate through the keys (constant pool values).
- for (value_it = shared_entries_.begin();
- value_it != shared_entries_.end();
- value_it = shared_entries_.upper_bound(value_it->first)) {
- std::pair<SharedEntriesIterator, SharedEntriesIterator> range;
- uint64_t data = value_it->first;
- range = shared_entries_.equal_range(data);
- SharedEntriesIterator offset_it;
- // Iterate through the offsets of a given key.
- for (offset_it = range.first; offset_it != range.second; offset_it++) {
- Instruction* instr = assm_->InstructionAt(offset_it->second);
+ // Emit entries.
+ for (const auto& entry : entries_) {
+ for (const auto& pc : entry.second) {
+ Instruction* instr = assm_->InstructionAt(pc);
// Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
instr->SetImmPCOffsetTarget(assm_->isolate_data(), assm_->pc());
}
- assm_->dc64(data);
- }
- shared_entries_.clear();
- shared_entries_count = 0;
-
- // Emit unique entries.
- std::vector<std::pair<uint64_t, int> >::const_iterator unique_it;
- for (unique_it = unique_entries_.begin();
- unique_it != unique_entries_.end();
- unique_it++) {
- Instruction* instr = assm_->InstructionAt(unique_it->second);
- // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
- DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
- instr->SetImmPCOffsetTarget(assm_->isolate_data(), assm_->pc());
- assm_->dc64(unique_it->first);
+ assm_->dc64(entry.first);
}
- unique_entries_.clear();
- first_use_ = -1;
+ Clear();
}
@@ -556,26 +567,28 @@ void ConstPool::EmitEntries() {
Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
: AssemblerBase(isolate_data, buffer, buffer_size),
constpool_(this),
- recorded_ast_id_(TypeFeedbackId::None()),
unresolved_branches_() {
const_pool_blocked_nesting_ = 0;
veneer_pool_blocked_nesting_ = 0;
+ code_target_sharing_blocked_nesting_ = 0;
Reset();
}
Assembler::~Assembler() {
DCHECK(constpool_.IsEmpty());
- DCHECK(const_pool_blocked_nesting_ == 0);
- DCHECK(veneer_pool_blocked_nesting_ == 0);
+ DCHECK_EQ(const_pool_blocked_nesting_, 0);
+ DCHECK_EQ(veneer_pool_blocked_nesting_, 0);
+ DCHECK_EQ(code_target_sharing_blocked_nesting_, 0);
}
void Assembler::Reset() {
#ifdef DEBUG
DCHECK((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_));
- DCHECK(const_pool_blocked_nesting_ == 0);
- DCHECK(veneer_pool_blocked_nesting_ == 0);
+ DCHECK_EQ(const_pool_blocked_nesting_, 0);
+ DCHECK_EQ(veneer_pool_blocked_nesting_, 0);
+ DCHECK_EQ(code_target_sharing_blocked_nesting_, 0);
DCHECK(unresolved_branches_.empty());
memset(buffer_, 0, pc_ - buffer_);
#endif
@@ -586,15 +599,33 @@ void Assembler::Reset() {
next_constant_pool_check_ = 0;
next_veneer_pool_check_ = kMaxInt;
no_const_pool_before_ = 0;
- ClearRecordedAstId();
}
+void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
+ for (auto& request : heap_object_requests_) {
+ Handle<HeapObject> object;
+ switch (request.kind()) {
+ case HeapObjectRequest::kHeapNumber:
+ object = isolate->factory()->NewHeapNumber(request.heap_number(),
+ IMMUTABLE, TENURED);
+ break;
+ case HeapObjectRequest::kCodeStub:
+ request.code_stub()->set_isolate(isolate);
+ object = request.code_stub()->GetCode();
+ break;
+ }
+ Address pc = buffer_ + request.offset();
+ Memory::Address_at(target_pointer_address_at(pc)) = object.address();
+ }
+}
-void Assembler::GetCode(CodeDesc* desc) {
+void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
// Emit constant pool if necessary.
CheckConstPool(true, false);
DCHECK(constpool_.IsEmpty());
+ AllocateAndInstallRequestedHeapObjects(isolate);
+
// Set up code descriptor.
if (desc) {
desc->buffer = reinterpret_cast<byte*>(buffer_);
@@ -612,7 +643,7 @@ void Assembler::GetCode(CodeDesc* desc) {
void Assembler::Align(int m) {
- DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
+ DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
while ((pc_offset() & (m - 1)) != 0) {
nop();
}
@@ -1683,6 +1714,32 @@ void Assembler::ldr_pcrel(const CPURegister& rt, int imm19) {
Emit(LoadLiteralOpFor(rt) | ImmLLiteral(imm19) | Rt(rt));
}
+Operand Operand::EmbeddedNumber(double number) {
+ int32_t smi;
+ if (DoubleToSmiInteger(number, &smi)) {
+ return Operand(Immediate(Smi::FromInt(smi)));
+ }
+ Operand result(0, RelocInfo::EMBEDDED_OBJECT);
+ result.heap_object_request_.emplace(number);
+ DCHECK(result.IsHeapObjectRequest());
+ return result;
+}
+
+Operand Operand::EmbeddedCode(CodeStub* stub) {
+ Operand result(0, RelocInfo::CODE_TARGET);
+ result.heap_object_request_.emplace(stub);
+ DCHECK(result.IsHeapObjectRequest());
+ return result;
+}
+
+void Assembler::ldr(const CPURegister& rt, const Operand& operand) {
+ if (operand.IsHeapObjectRequest()) {
+ RequestHeapObject(operand.heap_object_request());
+ ldr(rt, operand.immediate_for_heap_object_request());
+ } else {
+ ldr(rt, operand.immediate());
+ }
+}
void Assembler::ldr(const CPURegister& rt, const Immediate& imm) {
// Currently we only support 64-bit literals.
@@ -1773,6 +1830,440 @@ void Assembler::stlxrh(const Register& rs, const Register& rt,
Emit(STLXR_h | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
+void Assembler::NEON3DifferentL(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm, NEON3DifferentOp vop) {
+ DCHECK(AreSameFormat(vn, vm));
+ DCHECK((vn.Is1H() && vd.Is1S()) || (vn.Is1S() && vd.Is1D()) ||
+ (vn.Is8B() && vd.Is8H()) || (vn.Is4H() && vd.Is4S()) ||
+ (vn.Is2S() && vd.Is2D()) || (vn.Is16B() && vd.Is8H()) ||
+ (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D()));
+ Instr format, op = vop;
+ if (vd.IsScalar()) {
+ op |= NEON_Q | NEONScalar;
+ format = SFormat(vn);
+ } else {
+ format = VFormat(vn);
+ }
+ Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+void Assembler::NEON3DifferentW(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm, NEON3DifferentOp vop) {
+ DCHECK(AreSameFormat(vd, vn));
+ DCHECK((vm.Is8B() && vd.Is8H()) || (vm.Is4H() && vd.Is4S()) ||
+ (vm.Is2S() && vd.Is2D()) || (vm.Is16B() && vd.Is8H()) ||
+ (vm.Is8H() && vd.Is4S()) || (vm.Is4S() && vd.Is2D()));
+ Emit(VFormat(vm) | vop | Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+void Assembler::NEON3DifferentHN(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm, NEON3DifferentOp vop) {
+ DCHECK(AreSameFormat(vm, vn));
+ DCHECK((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) ||
+ (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) ||
+ (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D()));
+ Emit(VFormat(vd) | vop | Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+#define NEON_3DIFF_LONG_LIST(V) \
+ V(pmull, NEON_PMULL, vn.IsVector() && vn.Is8B()) \
+ V(pmull2, NEON_PMULL2, vn.IsVector() && vn.Is16B()) \
+ V(saddl, NEON_SADDL, vn.IsVector() && vn.IsD()) \
+ V(saddl2, NEON_SADDL2, vn.IsVector() && vn.IsQ()) \
+ V(sabal, NEON_SABAL, vn.IsVector() && vn.IsD()) \
+ V(sabal2, NEON_SABAL2, vn.IsVector() && vn.IsQ()) \
+ V(uabal, NEON_UABAL, vn.IsVector() && vn.IsD()) \
+ V(uabal2, NEON_UABAL2, vn.IsVector() && vn.IsQ()) \
+ V(sabdl, NEON_SABDL, vn.IsVector() && vn.IsD()) \
+ V(sabdl2, NEON_SABDL2, vn.IsVector() && vn.IsQ()) \
+ V(uabdl, NEON_UABDL, vn.IsVector() && vn.IsD()) \
+ V(uabdl2, NEON_UABDL2, vn.IsVector() && vn.IsQ()) \
+ V(smlal, NEON_SMLAL, vn.IsVector() && vn.IsD()) \
+ V(smlal2, NEON_SMLAL2, vn.IsVector() && vn.IsQ()) \
+ V(umlal, NEON_UMLAL, vn.IsVector() && vn.IsD()) \
+ V(umlal2, NEON_UMLAL2, vn.IsVector() && vn.IsQ()) \
+ V(smlsl, NEON_SMLSL, vn.IsVector() && vn.IsD()) \
+ V(smlsl2, NEON_SMLSL2, vn.IsVector() && vn.IsQ()) \
+ V(umlsl, NEON_UMLSL, vn.IsVector() && vn.IsD()) \
+ V(umlsl2, NEON_UMLSL2, vn.IsVector() && vn.IsQ()) \
+ V(smull, NEON_SMULL, vn.IsVector() && vn.IsD()) \
+ V(smull2, NEON_SMULL2, vn.IsVector() && vn.IsQ()) \
+ V(umull, NEON_UMULL, vn.IsVector() && vn.IsD()) \
+ V(umull2, NEON_UMULL2, vn.IsVector() && vn.IsQ()) \
+ V(ssubl, NEON_SSUBL, vn.IsVector() && vn.IsD()) \
+ V(ssubl2, NEON_SSUBL2, vn.IsVector() && vn.IsQ()) \
+ V(uaddl, NEON_UADDL, vn.IsVector() && vn.IsD()) \
+ V(uaddl2, NEON_UADDL2, vn.IsVector() && vn.IsQ()) \
+ V(usubl, NEON_USUBL, vn.IsVector() && vn.IsD()) \
+ V(usubl2, NEON_USUBL2, vn.IsVector() && vn.IsQ()) \
+ V(sqdmlal, NEON_SQDMLAL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \
+ V(sqdmlal2, NEON_SQDMLAL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \
+ V(sqdmlsl, NEON_SQDMLSL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \
+ V(sqdmlsl2, NEON_SQDMLSL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \
+ V(sqdmull, NEON_SQDMULL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \
+ V(sqdmull2, NEON_SQDMULL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S())
+
+#define DEFINE_ASM_FUNC(FN, OP, AS) \
+ void Assembler::FN(const VRegister& vd, const VRegister& vn, \
+ const VRegister& vm) { \
+ DCHECK(AS); \
+ NEON3DifferentL(vd, vn, vm, OP); \
+ }
+NEON_3DIFF_LONG_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+#define NEON_3DIFF_HN_LIST(V) \
+ V(addhn, NEON_ADDHN, vd.IsD()) \
+ V(addhn2, NEON_ADDHN2, vd.IsQ()) \
+ V(raddhn, NEON_RADDHN, vd.IsD()) \
+ V(raddhn2, NEON_RADDHN2, vd.IsQ()) \
+ V(subhn, NEON_SUBHN, vd.IsD()) \
+ V(subhn2, NEON_SUBHN2, vd.IsQ()) \
+ V(rsubhn, NEON_RSUBHN, vd.IsD()) \
+ V(rsubhn2, NEON_RSUBHN2, vd.IsQ())
+
+#define DEFINE_ASM_FUNC(FN, OP, AS) \
+ void Assembler::FN(const VRegister& vd, const VRegister& vn, \
+ const VRegister& vm) { \
+ DCHECK(AS); \
+ NEON3DifferentHN(vd, vn, vm, OP); \
+ }
+NEON_3DIFF_HN_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+void Assembler::NEONPerm(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm, NEONPermOp op) {
+ DCHECK(AreSameFormat(vd, vn, vm));
+ DCHECK(!vd.Is1D());
+ Emit(VFormat(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+void Assembler::trn1(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm) {
+ NEONPerm(vd, vn, vm, NEON_TRN1);
+}
+
+void Assembler::trn2(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm) {
+ NEONPerm(vd, vn, vm, NEON_TRN2);
+}
+
+void Assembler::uzp1(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm) {
+ NEONPerm(vd, vn, vm, NEON_UZP1);
+}
+
+void Assembler::uzp2(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm) {
+ NEONPerm(vd, vn, vm, NEON_UZP2);
+}
+
+void Assembler::zip1(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm) {
+ NEONPerm(vd, vn, vm, NEON_ZIP1);
+}
+
+void Assembler::zip2(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm) {
+ NEONPerm(vd, vn, vm, NEON_ZIP2);
+}
+
+void Assembler::NEONShiftImmediate(const VRegister& vd, const VRegister& vn,
+ NEONShiftImmediateOp op, int immh_immb) {
+ DCHECK(AreSameFormat(vd, vn));
+ Instr q, scalar;
+ if (vn.IsScalar()) {
+ q = NEON_Q;
+ scalar = NEONScalar;
+ } else {
+ q = vd.IsD() ? 0 : NEON_Q;
+ scalar = 0;
+ }
+ Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd));
+}
+
+void Assembler::NEONShiftLeftImmediate(const VRegister& vd, const VRegister& vn,
+ int shift, NEONShiftImmediateOp op) {
+ int laneSizeInBits = vn.LaneSizeInBits();
+ DCHECK((shift >= 0) && (shift < laneSizeInBits));
+ NEONShiftImmediate(vd, vn, op, (laneSizeInBits + shift) << 16);
+}
+
+void Assembler::NEONShiftRightImmediate(const VRegister& vd,
+ const VRegister& vn, int shift,
+ NEONShiftImmediateOp op) {
+ int laneSizeInBits = vn.LaneSizeInBits();
+ DCHECK((shift >= 1) && (shift <= laneSizeInBits));
+ NEONShiftImmediate(vd, vn, op, ((2 * laneSizeInBits) - shift) << 16);
+}
+
+void Assembler::NEONShiftImmediateL(const VRegister& vd, const VRegister& vn,
+ int shift, NEONShiftImmediateOp op) {
+ int laneSizeInBits = vn.LaneSizeInBits();
+ DCHECK((shift >= 0) && (shift < laneSizeInBits));
+ int immh_immb = (laneSizeInBits + shift) << 16;
+
+ DCHECK((vn.Is8B() && vd.Is8H()) || (vn.Is4H() && vd.Is4S()) ||
+ (vn.Is2S() && vd.Is2D()) || (vn.Is16B() && vd.Is8H()) ||
+ (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D()));
+ Instr q;
+ q = vn.IsD() ? 0 : NEON_Q;
+ Emit(q | op | immh_immb | Rn(vn) | Rd(vd));
+}
+
+void Assembler::NEONShiftImmediateN(const VRegister& vd, const VRegister& vn,
+ int shift, NEONShiftImmediateOp op) {
+ Instr q, scalar;
+ int laneSizeInBits = vd.LaneSizeInBits();
+ DCHECK((shift >= 1) && (shift <= laneSizeInBits));
+ int immh_immb = (2 * laneSizeInBits - shift) << 16;
+
+ if (vn.IsScalar()) {
+ DCHECK((vd.Is1B() && vn.Is1H()) || (vd.Is1H() && vn.Is1S()) ||
+ (vd.Is1S() && vn.Is1D()));
+ q = NEON_Q;
+ scalar = NEONScalar;
+ } else {
+ DCHECK((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) ||
+ (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) ||
+ (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D()));
+ scalar = 0;
+ q = vd.IsD() ? 0 : NEON_Q;
+ }
+ Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd));
+}
+
+void Assembler::shl(const VRegister& vd, const VRegister& vn, int shift) {
+ DCHECK(vd.IsVector() || vd.Is1D());
+ NEONShiftLeftImmediate(vd, vn, shift, NEON_SHL);
+}
+
+void Assembler::sli(const VRegister& vd, const VRegister& vn, int shift) {
+ DCHECK(vd.IsVector() || vd.Is1D());
+ NEONShiftLeftImmediate(vd, vn, shift, NEON_SLI);
+}
+
+void Assembler::sqshl(const VRegister& vd, const VRegister& vn, int shift) {
+ NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHL_imm);
+}
+
+void Assembler::sqshlu(const VRegister& vd, const VRegister& vn, int shift) {
+ NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHLU);
+}
+
+void Assembler::uqshl(const VRegister& vd, const VRegister& vn, int shift) {
+ NEONShiftLeftImmediate(vd, vn, shift, NEON_UQSHL_imm);
+}
+
+void Assembler::sshll(const VRegister& vd, const VRegister& vn, int shift) {
+ DCHECK(vn.IsD());
+ NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL);
+}
+
+void Assembler::sshll2(const VRegister& vd, const VRegister& vn, int shift) {
+ DCHECK(vn.IsQ());
+ NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL);
+}
+
+void Assembler::sxtl(const VRegister& vd, const VRegister& vn) {
+ sshll(vd, vn, 0);
+}
+
+void Assembler::sxtl2(const VRegister& vd, const VRegister& vn) {
+ sshll2(vd, vn, 0);
+}
+
+void Assembler::ushll(const VRegister& vd, const VRegister& vn, int shift) {
+ DCHECK(vn.IsD());
+ NEONShiftImmediateL(vd, vn, shift, NEON_USHLL);
+}
+
+void Assembler::ushll2(const VRegister& vd, const VRegister& vn, int shift) {
+ DCHECK(vn.IsQ());
+ NEONShiftImmediateL(vd, vn, shift, NEON_USHLL);
+}
+
+void Assembler::uxtl(const VRegister& vd, const VRegister& vn) {
+ ushll(vd, vn, 0);
+}
+
+void Assembler::uxtl2(const VRegister& vd, const VRegister& vn) {
+ ushll2(vd, vn, 0);
+}
+
+void Assembler::sri(const VRegister& vd, const VRegister& vn, int shift) {
+ DCHECK(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_SRI);
+}
+
+void Assembler::sshr(const VRegister& vd, const VRegister& vn, int shift) {
+ DCHECK(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_SSHR);
+}
+
+void Assembler::ushr(const VRegister& vd, const VRegister& vn, int shift) {
+ DCHECK(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_USHR);
+}
+
+void Assembler::srshr(const VRegister& vd, const VRegister& vn, int shift) {
+ DCHECK(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_SRSHR);
+}
+
+void Assembler::urshr(const VRegister& vd, const VRegister& vn, int shift) {
+ DCHECK(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_URSHR);
+}
+
+void Assembler::ssra(const VRegister& vd, const VRegister& vn, int shift) {
+ DCHECK(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_SSRA);
+}
+
+void Assembler::usra(const VRegister& vd, const VRegister& vn, int shift) {
+ DCHECK(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_USRA);
+}
+
+void Assembler::srsra(const VRegister& vd, const VRegister& vn, int shift) {
+ DCHECK(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_SRSRA);
+}
+
+void Assembler::ursra(const VRegister& vd, const VRegister& vn, int shift) {
+ DCHECK(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_URSRA);
+}
+
+void Assembler::shrn(const VRegister& vd, const VRegister& vn, int shift) {
+ DCHECK(vn.IsVector() && vd.IsD());
+ NEONShiftImmediateN(vd, vn, shift, NEON_SHRN);
+}
+
+void Assembler::shrn2(const VRegister& vd, const VRegister& vn, int shift) {
+ DCHECK(vn.IsVector() && vd.IsQ());
+ NEONShiftImmediateN(vd, vn, shift, NEON_SHRN);
+}
+
+void Assembler::rshrn(const VRegister& vd, const VRegister& vn, int shift) {
+ DCHECK(vn.IsVector() && vd.IsD());
+ NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN);
+}
+
+void Assembler::rshrn2(const VRegister& vd, const VRegister& vn, int shift) {
+ DCHECK(vn.IsVector() && vd.IsQ());
+ NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN);
+}
+
+void Assembler::sqshrn(const VRegister& vd, const VRegister& vn, int shift) {
+ DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
+ NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN);
+}
+
+void Assembler::sqshrn2(const VRegister& vd, const VRegister& vn, int shift) {
+ DCHECK(vn.IsVector() && vd.IsQ());
+ NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN);
+}
+
+void Assembler::sqrshrn(const VRegister& vd, const VRegister& vn, int shift) {
+ DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
+ NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN);
+}
+
+void Assembler::sqrshrn2(const VRegister& vd, const VRegister& vn, int shift) {
+ DCHECK(vn.IsVector() && vd.IsQ());
+ NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN);
+}
+
+void Assembler::sqshrun(const VRegister& vd, const VRegister& vn, int shift) {
+ DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
+ NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN);
+}
+
+void Assembler::sqshrun2(const VRegister& vd, const VRegister& vn, int shift) {
+ DCHECK(vn.IsVector() && vd.IsQ());
+ NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN);
+}
+
+void Assembler::sqrshrun(const VRegister& vd, const VRegister& vn, int shift) {
+ DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
+ NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN);
+}
+
+void Assembler::sqrshrun2(const VRegister& vd, const VRegister& vn, int shift) {
+ DCHECK(vn.IsVector() && vd.IsQ());
+ NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN);
+}
+
+void Assembler::uqshrn(const VRegister& vd, const VRegister& vn, int shift) {
+ DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
+ NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN);
+}
+
+void Assembler::uqshrn2(const VRegister& vd, const VRegister& vn, int shift) {
+ DCHECK(vn.IsVector() && vd.IsQ());
+ NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN);
+}
+
+void Assembler::uqrshrn(const VRegister& vd, const VRegister& vn, int shift) {
+ DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
+ NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN);
+}
+
+void Assembler::uqrshrn2(const VRegister& vd, const VRegister& vn, int shift) {
+ DCHECK(vn.IsVector() && vd.IsQ());
+ NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN);
+}
+
+void Assembler::uaddw(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm) {
+ DCHECK(vm.IsD());
+ NEON3DifferentW(vd, vn, vm, NEON_UADDW);
+}
+
+void Assembler::uaddw2(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm) {
+ DCHECK(vm.IsQ());
+ NEON3DifferentW(vd, vn, vm, NEON_UADDW2);
+}
+
+void Assembler::saddw(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm) {
+ DCHECK(vm.IsD());
+ NEON3DifferentW(vd, vn, vm, NEON_SADDW);
+}
+
+void Assembler::saddw2(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm) {
+ DCHECK(vm.IsQ());
+ NEON3DifferentW(vd, vn, vm, NEON_SADDW2);
+}
+
+void Assembler::usubw(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm) {
+ DCHECK(vm.IsD());
+ NEON3DifferentW(vd, vn, vm, NEON_USUBW);
+}
+
+void Assembler::usubw2(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm) {
+ DCHECK(vm.IsQ());
+ NEON3DifferentW(vd, vn, vm, NEON_USUBW2);
+}
+
+void Assembler::ssubw(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm) {
+ DCHECK(vm.IsD());
+ NEON3DifferentW(vd, vn, vm, NEON_SSUBW);
+}
+
+void Assembler::ssubw2(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm) {
+ DCHECK(vm.IsQ());
+ NEON3DifferentW(vd, vn, vm, NEON_SSUBW2);
+}
+
void Assembler::mov(const Register& rd, const Register& rm) {
// Moves involving the stack pointer are encoded as add immediate with
// second operand of zero. Otherwise, orr with first operand zr is
@@ -1784,342 +2275,1641 @@ void Assembler::mov(const Register& rd, const Register& rm) {
}
}
+void Assembler::ins(const VRegister& vd, int vd_index, const Register& rn) {
+ // We support vd arguments of the form vd.VxT() or vd.T(), where x is the
+ // number of lanes, and T is b, h, s or d.
+ int lane_size = vd.LaneSizeInBytes();
+ NEONFormatField format;
+ switch (lane_size) {
+ case 1:
+ format = NEON_16B;
+ DCHECK(rn.IsW());
+ break;
+ case 2:
+ format = NEON_8H;
+ DCHECK(rn.IsW());
+ break;
+ case 4:
+ format = NEON_4S;
+ DCHECK(rn.IsW());
+ break;
+ default:
+ DCHECK_EQ(lane_size, 8);
+ DCHECK(rn.IsX());
+ format = NEON_2D;
+ break;
+ }
-void Assembler::mvn(const Register& rd, const Operand& operand) {
- orn(rd, AppropriateZeroRegFor(rd), operand);
+ DCHECK((0 <= vd_index) &&
+ (vd_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
+ Emit(NEON_INS_GENERAL | ImmNEON5(format, vd_index) | Rn(rn) | Rd(vd));
}
+void Assembler::mov(const Register& rd, const VRegister& vn, int vn_index) {
+ DCHECK_GE(vn.SizeInBytes(), 4);
+ umov(rd, vn, vn_index);
+}
-void Assembler::mrs(const Register& rt, SystemRegister sysreg) {
- DCHECK(rt.Is64Bits());
- Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt));
+void Assembler::smov(const Register& rd, const VRegister& vn, int vn_index) {
+ // We support vn arguments of the form vn.VxT() or vn.T(), where x is the
+ // number of lanes, and T is b, h, s.
+ int lane_size = vn.LaneSizeInBytes();
+ NEONFormatField format;
+ Instr q = 0;
+ switch (lane_size) {
+ case 1:
+ format = NEON_16B;
+ break;
+ case 2:
+ format = NEON_8H;
+ break;
+ default:
+ DCHECK_EQ(lane_size, 4);
+ DCHECK(rd.IsX());
+ format = NEON_4S;
+ break;
+ }
+ q = rd.IsW() ? 0 : NEON_Q;
+ DCHECK((0 <= vn_index) &&
+ (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
+ Emit(q | NEON_SMOV | ImmNEON5(format, vn_index) | Rn(vn) | Rd(rd));
}
+void Assembler::cls(const VRegister& vd, const VRegister& vn) {
+ DCHECK(AreSameFormat(vd, vn));
+ DCHECK(!vd.Is1D() && !vd.Is2D());
+ Emit(VFormat(vn) | NEON_CLS | Rn(vn) | Rd(vd));
+}
-void Assembler::msr(SystemRegister sysreg, const Register& rt) {
- DCHECK(rt.Is64Bits());
- Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg));
+void Assembler::clz(const VRegister& vd, const VRegister& vn) {
+ DCHECK(AreSameFormat(vd, vn));
+ DCHECK(!vd.Is1D() && !vd.Is2D());
+ Emit(VFormat(vn) | NEON_CLZ | Rn(vn) | Rd(vd));
}
+void Assembler::cnt(const VRegister& vd, const VRegister& vn) {
+ DCHECK(AreSameFormat(vd, vn));
+ DCHECK(vd.Is8B() || vd.Is16B());
+ Emit(VFormat(vn) | NEON_CNT | Rn(vn) | Rd(vd));
+}
-void Assembler::hint(SystemHint code) {
- Emit(HINT | ImmHint(code) | Rt(xzr));
+void Assembler::rev16(const VRegister& vd, const VRegister& vn) {
+ DCHECK(AreSameFormat(vd, vn));
+ DCHECK(vd.Is8B() || vd.Is16B());
+ Emit(VFormat(vn) | NEON_REV16 | Rn(vn) | Rd(vd));
}
+void Assembler::rev32(const VRegister& vd, const VRegister& vn) {
+ DCHECK(AreSameFormat(vd, vn));
+ DCHECK(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H());
+ Emit(VFormat(vn) | NEON_REV32 | Rn(vn) | Rd(vd));
+}
-void Assembler::dmb(BarrierDomain domain, BarrierType type) {
- Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type));
+void Assembler::rev64(const VRegister& vd, const VRegister& vn) {
+ DCHECK(AreSameFormat(vd, vn));
+ DCHECK(!vd.Is1D() && !vd.Is2D());
+ Emit(VFormat(vn) | NEON_REV64 | Rn(vn) | Rd(vd));
}
+void Assembler::ursqrte(const VRegister& vd, const VRegister& vn) {
+ DCHECK(AreSameFormat(vd, vn));
+ DCHECK(vd.Is2S() || vd.Is4S());
+ Emit(VFormat(vn) | NEON_URSQRTE | Rn(vn) | Rd(vd));
+}
-void Assembler::dsb(BarrierDomain domain, BarrierType type) {
- Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type));
+void Assembler::urecpe(const VRegister& vd, const VRegister& vn) {
+ DCHECK(AreSameFormat(vd, vn));
+ DCHECK(vd.Is2S() || vd.Is4S());
+ Emit(VFormat(vn) | NEON_URECPE | Rn(vn) | Rd(vd));
}
+void Assembler::NEONAddlp(const VRegister& vd, const VRegister& vn,
+ NEON2RegMiscOp op) {
+ DCHECK((op == NEON_SADDLP) || (op == NEON_UADDLP) || (op == NEON_SADALP) ||
+ (op == NEON_UADALP));
-void Assembler::isb() {
- Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll));
+ DCHECK((vn.Is8B() && vd.Is4H()) || (vn.Is4H() && vd.Is2S()) ||
+ (vn.Is2S() && vd.Is1D()) || (vn.Is16B() && vd.Is8H()) ||
+ (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D()));
+ Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
}
+void Assembler::saddlp(const VRegister& vd, const VRegister& vn) {
+ NEONAddlp(vd, vn, NEON_SADDLP);
+}
-void Assembler::fmov(FPRegister fd, double imm) {
- DCHECK(fd.Is64Bits());
- DCHECK(IsImmFP64(imm));
- Emit(FMOV_d_imm | Rd(fd) | ImmFP64(imm));
+void Assembler::uaddlp(const VRegister& vd, const VRegister& vn) {
+ NEONAddlp(vd, vn, NEON_UADDLP);
}
+void Assembler::sadalp(const VRegister& vd, const VRegister& vn) {
+ NEONAddlp(vd, vn, NEON_SADALP);
+}
-void Assembler::fmov(FPRegister fd, float imm) {
- DCHECK(fd.Is32Bits());
- DCHECK(IsImmFP32(imm));
- Emit(FMOV_s_imm | Rd(fd) | ImmFP32(imm));
+void Assembler::uadalp(const VRegister& vd, const VRegister& vn) {
+ NEONAddlp(vd, vn, NEON_UADALP);
}
+void Assembler::NEONAcrossLanesL(const VRegister& vd, const VRegister& vn,
+ NEONAcrossLanesOp op) {
+ DCHECK((vn.Is8B() && vd.Is1H()) || (vn.Is16B() && vd.Is1H()) ||
+ (vn.Is4H() && vd.Is1S()) || (vn.Is8H() && vd.Is1S()) ||
+ (vn.Is4S() && vd.Is1D()));
+ Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
+}
-void Assembler::fmov(Register rd, FPRegister fn) {
- DCHECK(rd.SizeInBits() == fn.SizeInBits());
- FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd;
- Emit(op | Rd(rd) | Rn(fn));
+void Assembler::saddlv(const VRegister& vd, const VRegister& vn) {
+ NEONAcrossLanesL(vd, vn, NEON_SADDLV);
}
+void Assembler::uaddlv(const VRegister& vd, const VRegister& vn) {
+ NEONAcrossLanesL(vd, vn, NEON_UADDLV);
+}
-void Assembler::fmov(FPRegister fd, Register rn) {
- DCHECK(fd.SizeInBits() == rn.SizeInBits());
- FPIntegerConvertOp op = fd.Is32Bits() ? FMOV_sw : FMOV_dx;
- Emit(op | Rd(fd) | Rn(rn));
+void Assembler::NEONAcrossLanes(const VRegister& vd, const VRegister& vn,
+ NEONAcrossLanesOp op) {
+ DCHECK((vn.Is8B() && vd.Is1B()) || (vn.Is16B() && vd.Is1B()) ||
+ (vn.Is4H() && vd.Is1H()) || (vn.Is8H() && vd.Is1H()) ||
+ (vn.Is4S() && vd.Is1S()));
+ if ((op & NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) {
+ Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd));
+ } else {
+ Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
+ }
}
+#define NEON_ACROSSLANES_LIST(V) \
+ V(fmaxv, NEON_FMAXV, vd.Is1S()) \
+ V(fminv, NEON_FMINV, vd.Is1S()) \
+ V(fmaxnmv, NEON_FMAXNMV, vd.Is1S()) \
+ V(fminnmv, NEON_FMINNMV, vd.Is1S()) \
+ V(addv, NEON_ADDV, true) \
+ V(smaxv, NEON_SMAXV, true) \
+ V(sminv, NEON_SMINV, true) \
+ V(umaxv, NEON_UMAXV, true) \
+ V(uminv, NEON_UMINV, true)
+
+#define DEFINE_ASM_FUNC(FN, OP, AS) \
+ void Assembler::FN(const VRegister& vd, const VRegister& vn) { \
+ DCHECK(AS); \
+ NEONAcrossLanes(vd, vn, OP); \
+ }
+NEON_ACROSSLANES_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+void Assembler::mov(const VRegister& vd, int vd_index, const Register& rn) {
+ ins(vd, vd_index, rn);
+}
+
+void Assembler::umov(const Register& rd, const VRegister& vn, int vn_index) {
+ // We support vn arguments of the form vn.VxT() or vn.T(), where x is the
+ // number of lanes, and T is b, h, s or d.
+ int lane_size = vn.LaneSizeInBytes();
+ NEONFormatField format;
+ Instr q = 0;
+ switch (lane_size) {
+ case 1:
+ format = NEON_16B;
+ DCHECK(rd.IsW());
+ break;
+ case 2:
+ format = NEON_8H;
+ DCHECK(rd.IsW());
+ break;
+ case 4:
+ format = NEON_4S;
+ DCHECK(rd.IsW());
+ break;
+ default:
+ DCHECK_EQ(lane_size, 8);
+ DCHECK(rd.IsX());
+ format = NEON_2D;
+ q = NEON_Q;
+ break;
+ }
-void Assembler::fmov(FPRegister fd, FPRegister fn) {
- DCHECK(fd.SizeInBits() == fn.SizeInBits());
- Emit(FPType(fd) | FMOV | Rd(fd) | Rn(fn));
+ DCHECK((0 <= vn_index) &&
+ (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
+ Emit(q | NEON_UMOV | ImmNEON5(format, vn_index) | Rn(vn) | Rd(rd));
}
+void Assembler::mov(const VRegister& vd, const VRegister& vn, int vn_index) {
+ DCHECK(vd.IsScalar());
+ dup(vd, vn, vn_index);
+}
-void Assembler::fadd(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm) {
- FPDataProcessing2Source(fd, fn, fm, FADD);
+void Assembler::dup(const VRegister& vd, const Register& rn) {
+ DCHECK(!vd.Is1D());
+ DCHECK_EQ(vd.Is2D(), rn.IsX());
+ Instr q = vd.IsD() ? 0 : NEON_Q;
+ Emit(q | NEON_DUP_GENERAL | ImmNEON5(VFormat(vd), 0) | Rn(rn) | Rd(vd));
}
+void Assembler::ins(const VRegister& vd, int vd_index, const VRegister& vn,
+ int vn_index) {
+ DCHECK(AreSameFormat(vd, vn));
+ // We support vd arguments of the form vd.VxT() or vd.T(), where x is the
+ // number of lanes, and T is b, h, s or d.
+ int lane_size = vd.LaneSizeInBytes();
+ NEONFormatField format;
+ switch (lane_size) {
+ case 1:
+ format = NEON_16B;
+ break;
+ case 2:
+ format = NEON_8H;
+ break;
+ case 4:
+ format = NEON_4S;
+ break;
+ default:
+ DCHECK_EQ(lane_size, 8);
+ format = NEON_2D;
+ break;
+ }
-void Assembler::fsub(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm) {
- FPDataProcessing2Source(fd, fn, fm, FSUB);
+ DCHECK((0 <= vd_index) &&
+ (vd_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
+ DCHECK((0 <= vn_index) &&
+ (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
+ Emit(NEON_INS_ELEMENT | ImmNEON5(format, vd_index) |
+ ImmNEON4(format, vn_index) | Rn(vn) | Rd(vd));
}
+void Assembler::NEONTable(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm, NEONTableOp op) {
+ DCHECK(vd.Is16B() || vd.Is8B());
+ DCHECK(vn.Is16B());
+ DCHECK(AreSameFormat(vd, vm));
+ Emit(op | (vd.IsQ() ? NEON_Q : 0) | Rm(vm) | Rn(vn) | Rd(vd));
+}
-void Assembler::fmul(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm) {
- FPDataProcessing2Source(fd, fn, fm, FMUL);
+void Assembler::tbl(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm) {
+ NEONTable(vd, vn, vm, NEON_TBL_1v);
}
+void Assembler::tbl(const VRegister& vd, const VRegister& vn,
+ const VRegister& vn2, const VRegister& vm) {
+ USE(vn2);
+ DCHECK(AreSameFormat(vn, vn2));
+ DCHECK(AreConsecutive(vn, vn2));
+ NEONTable(vd, vn, vm, NEON_TBL_2v);
+}
-void Assembler::fmadd(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa) {
- FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMADD_s : FMADD_d);
+void Assembler::tbl(const VRegister& vd, const VRegister& vn,
+ const VRegister& vn2, const VRegister& vn3,
+ const VRegister& vm) {
+ USE(vn2);
+ USE(vn3);
+ DCHECK(AreSameFormat(vn, vn2, vn3));
+ DCHECK(AreConsecutive(vn, vn2, vn3));
+ NEONTable(vd, vn, vm, NEON_TBL_3v);
}
+void Assembler::tbl(const VRegister& vd, const VRegister& vn,
+ const VRegister& vn2, const VRegister& vn3,
+ const VRegister& vn4, const VRegister& vm) {
+ USE(vn2);
+ USE(vn3);
+ USE(vn4);
+ DCHECK(AreSameFormat(vn, vn2, vn3, vn4));
+ DCHECK(AreConsecutive(vn, vn2, vn3, vn4));
+ NEONTable(vd, vn, vm, NEON_TBL_4v);
+}
-void Assembler::fmsub(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa) {
- FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMSUB_s : FMSUB_d);
+void Assembler::tbx(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm) {
+ NEONTable(vd, vn, vm, NEON_TBX_1v);
}
+void Assembler::tbx(const VRegister& vd, const VRegister& vn,
+ const VRegister& vn2, const VRegister& vm) {
+ USE(vn2);
+ DCHECK(AreSameFormat(vn, vn2));
+ DCHECK(AreConsecutive(vn, vn2));
+ NEONTable(vd, vn, vm, NEON_TBX_2v);
+}
-void Assembler::fnmadd(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa) {
- FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMADD_s : FNMADD_d);
+void Assembler::tbx(const VRegister& vd, const VRegister& vn,
+ const VRegister& vn2, const VRegister& vn3,
+ const VRegister& vm) {
+ USE(vn2);
+ USE(vn3);
+ DCHECK(AreSameFormat(vn, vn2, vn3));
+ DCHECK(AreConsecutive(vn, vn2, vn3));
+ NEONTable(vd, vn, vm, NEON_TBX_3v);
}
+void Assembler::tbx(const VRegister& vd, const VRegister& vn,
+ const VRegister& vn2, const VRegister& vn3,
+ const VRegister& vn4, const VRegister& vm) {
+ USE(vn2);
+ USE(vn3);
+ USE(vn4);
+ DCHECK(AreSameFormat(vn, vn2, vn3, vn4));
+ DCHECK(AreConsecutive(vn, vn2, vn3, vn4));
+ NEONTable(vd, vn, vm, NEON_TBX_4v);
+}
-void Assembler::fnmsub(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa) {
- FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMSUB_s : FNMSUB_d);
+void Assembler::mov(const VRegister& vd, int vd_index, const VRegister& vn,
+ int vn_index) {
+ ins(vd, vd_index, vn, vn_index);
}
+void Assembler::mvn(const Register& rd, const Operand& operand) {
+ orn(rd, AppropriateZeroRegFor(rd), operand);
+}
-void Assembler::fdiv(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm) {
- FPDataProcessing2Source(fd, fn, fm, FDIV);
+void Assembler::mrs(const Register& rt, SystemRegister sysreg) {
+ DCHECK(rt.Is64Bits());
+ Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt));
}
+void Assembler::msr(SystemRegister sysreg, const Register& rt) {
+ DCHECK(rt.Is64Bits());
+ Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg));
+}
+
+void Assembler::hint(SystemHint code) { Emit(HINT | ImmHint(code) | Rt(xzr)); }
-void Assembler::fmax(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm) {
- FPDataProcessing2Source(fd, fn, fm, FMAX);
+// NEON structure loads and stores.
+Instr Assembler::LoadStoreStructAddrModeField(const MemOperand& addr) {
+ Instr addr_field = RnSP(addr.base());
+
+ if (addr.IsPostIndex()) {
+ static_assert(NEONLoadStoreMultiStructPostIndex ==
+ static_cast<NEONLoadStoreMultiStructPostIndexOp>(
+ NEONLoadStoreSingleStructPostIndex),
+ "Opcodes must match for NEON post index memop.");
+
+ addr_field |= NEONLoadStoreMultiStructPostIndex;
+ if (addr.offset() == 0) {
+ addr_field |= RmNot31(addr.regoffset());
+ } else {
+ // The immediate post index addressing mode is indicated by rm = 31.
+ // The immediate is implied by the number of vector registers used.
+ addr_field |= (0x1f << Rm_offset);
+ }
+ } else {
+ DCHECK(addr.IsImmediateOffset() && (addr.offset() == 0));
+ }
+ return addr_field;
}
+void Assembler::LoadStoreStructVerify(const VRegister& vt,
+ const MemOperand& addr, Instr op) {
+#ifdef DEBUG
+ // Assert that addressing mode is either offset (with immediate 0), post
+ // index by immediate of the size of the register list, or post index by a
+ // value in a core register.
+ if (addr.IsImmediateOffset()) {
+ DCHECK_EQ(addr.offset(), 0);
+ } else {
+ int offset = vt.SizeInBytes();
+ switch (op) {
+ case NEON_LD1_1v:
+ case NEON_ST1_1v:
+ offset *= 1;
+ break;
+ case NEONLoadStoreSingleStructLoad1:
+ case NEONLoadStoreSingleStructStore1:
+ case NEON_LD1R:
+ offset = (offset / vt.LaneCount()) * 1;
+ break;
+
+ case NEON_LD1_2v:
+ case NEON_ST1_2v:
+ case NEON_LD2:
+ case NEON_ST2:
+ offset *= 2;
+ break;
+ case NEONLoadStoreSingleStructLoad2:
+ case NEONLoadStoreSingleStructStore2:
+ case NEON_LD2R:
+ offset = (offset / vt.LaneCount()) * 2;
+ break;
+
+ case NEON_LD1_3v:
+ case NEON_ST1_3v:
+ case NEON_LD3:
+ case NEON_ST3:
+ offset *= 3;
+ break;
+ case NEONLoadStoreSingleStructLoad3:
+ case NEONLoadStoreSingleStructStore3:
+ case NEON_LD3R:
+ offset = (offset / vt.LaneCount()) * 3;
+ break;
-void Assembler::fmaxnm(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm) {
- FPDataProcessing2Source(fd, fn, fm, FMAXNM);
+ case NEON_LD1_4v:
+ case NEON_ST1_4v:
+ case NEON_LD4:
+ case NEON_ST4:
+ offset *= 4;
+ break;
+ case NEONLoadStoreSingleStructLoad4:
+ case NEONLoadStoreSingleStructStore4:
+ case NEON_LD4R:
+ offset = (offset / vt.LaneCount()) * 4;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ DCHECK(!addr.regoffset().Is(NoReg) || addr.offset() == offset);
+ }
+#else
+ USE(vt);
+ USE(addr);
+ USE(op);
+#endif
}
+void Assembler::LoadStoreStruct(const VRegister& vt, const MemOperand& addr,
+ NEONLoadStoreMultiStructOp op) {
+ LoadStoreStructVerify(vt, addr, op);
+ DCHECK(vt.IsVector() || vt.Is1D());
+ Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt));
+}
-void Assembler::fmin(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm) {
- FPDataProcessing2Source(fd, fn, fm, FMIN);
+void Assembler::LoadStoreStructSingleAllLanes(const VRegister& vt,
+ const MemOperand& addr,
+ NEONLoadStoreSingleStructOp op) {
+ LoadStoreStructVerify(vt, addr, op);
+ Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt));
}
+void Assembler::ld1(const VRegister& vt, const MemOperand& src) {
+ LoadStoreStruct(vt, src, NEON_LD1_1v);
+}
-void Assembler::fminnm(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm) {
- FPDataProcessing2Source(fd, fn, fm, FMINNM);
+void Assembler::ld1(const VRegister& vt, const VRegister& vt2,
+ const MemOperand& src) {
+ USE(vt2);
+ DCHECK(AreSameFormat(vt, vt2));
+ DCHECK(AreConsecutive(vt, vt2));
+ LoadStoreStruct(vt, src, NEON_LD1_2v);
}
+void Assembler::ld1(const VRegister& vt, const VRegister& vt2,
+ const VRegister& vt3, const MemOperand& src) {
+ USE(vt2);
+ USE(vt3);
+ DCHECK(AreSameFormat(vt, vt2, vt3));
+ DCHECK(AreConsecutive(vt, vt2, vt3));
+ LoadStoreStruct(vt, src, NEON_LD1_3v);
+}
-void Assembler::fabs(const FPRegister& fd,
- const FPRegister& fn) {
- DCHECK(fd.SizeInBits() == fn.SizeInBits());
- FPDataProcessing1Source(fd, fn, FABS);
+void Assembler::ld1(const VRegister& vt, const VRegister& vt2,
+ const VRegister& vt3, const VRegister& vt4,
+ const MemOperand& src) {
+ USE(vt2);
+ USE(vt3);
+ USE(vt4);
+ DCHECK(AreSameFormat(vt, vt2, vt3, vt4));
+ DCHECK(AreConsecutive(vt, vt2, vt3, vt4));
+ LoadStoreStruct(vt, src, NEON_LD1_4v);
}
+void Assembler::ld2(const VRegister& vt, const VRegister& vt2,
+ const MemOperand& src) {
+ USE(vt2);
+ DCHECK(AreSameFormat(vt, vt2));
+ DCHECK(AreConsecutive(vt, vt2));
+ LoadStoreStruct(vt, src, NEON_LD2);
+}
-void Assembler::fneg(const FPRegister& fd,
- const FPRegister& fn) {
- DCHECK(fd.SizeInBits() == fn.SizeInBits());
- FPDataProcessing1Source(fd, fn, FNEG);
+void Assembler::ld2(const VRegister& vt, const VRegister& vt2, int lane,
+ const MemOperand& src) {
+ USE(vt2);
+ DCHECK(AreSameFormat(vt, vt2));
+ DCHECK(AreConsecutive(vt, vt2));
+ LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad2);
}
+void Assembler::ld2r(const VRegister& vt, const VRegister& vt2,
+ const MemOperand& src) {
+ USE(vt2);
+ DCHECK(AreSameFormat(vt, vt2));
+ DCHECK(AreConsecutive(vt, vt2));
+ LoadStoreStructSingleAllLanes(vt, src, NEON_LD2R);
+}
-void Assembler::fsqrt(const FPRegister& fd,
- const FPRegister& fn) {
- DCHECK(fd.SizeInBits() == fn.SizeInBits());
- FPDataProcessing1Source(fd, fn, FSQRT);
+void Assembler::ld3(const VRegister& vt, const VRegister& vt2,
+ const VRegister& vt3, const MemOperand& src) {
+ USE(vt2);
+ USE(vt3);
+ DCHECK(AreSameFormat(vt, vt2, vt3));
+ DCHECK(AreConsecutive(vt, vt2, vt3));
+ LoadStoreStruct(vt, src, NEON_LD3);
}
+void Assembler::ld3(const VRegister& vt, const VRegister& vt2,
+ const VRegister& vt3, int lane, const MemOperand& src) {
+ USE(vt2);
+ USE(vt3);
+ DCHECK(AreSameFormat(vt, vt2, vt3));
+ DCHECK(AreConsecutive(vt, vt2, vt3));
+ LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad3);
+}
-void Assembler::frinta(const FPRegister& fd,
- const FPRegister& fn) {
- DCHECK(fd.SizeInBits() == fn.SizeInBits());
- FPDataProcessing1Source(fd, fn, FRINTA);
+void Assembler::ld3r(const VRegister& vt, const VRegister& vt2,
+ const VRegister& vt3, const MemOperand& src) {
+ USE(vt2);
+ USE(vt3);
+ DCHECK(AreSameFormat(vt, vt2, vt3));
+ DCHECK(AreConsecutive(vt, vt2, vt3));
+ LoadStoreStructSingleAllLanes(vt, src, NEON_LD3R);
}
+void Assembler::ld4(const VRegister& vt, const VRegister& vt2,
+ const VRegister& vt3, const VRegister& vt4,
+ const MemOperand& src) {
+ USE(vt2);
+ USE(vt3);
+ USE(vt4);
+ DCHECK(AreSameFormat(vt, vt2, vt3, vt4));
+ DCHECK(AreConsecutive(vt, vt2, vt3, vt4));
+ LoadStoreStruct(vt, src, NEON_LD4);
+}
-void Assembler::frintm(const FPRegister& fd,
- const FPRegister& fn) {
- DCHECK(fd.SizeInBits() == fn.SizeInBits());
- FPDataProcessing1Source(fd, fn, FRINTM);
+void Assembler::ld4(const VRegister& vt, const VRegister& vt2,
+ const VRegister& vt3, const VRegister& vt4, int lane,
+ const MemOperand& src) {
+ USE(vt2);
+ USE(vt3);
+ USE(vt4);
+ DCHECK(AreSameFormat(vt, vt2, vt3, vt4));
+ DCHECK(AreConsecutive(vt, vt2, vt3, vt4));
+ LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad4);
}
+void Assembler::ld4r(const VRegister& vt, const VRegister& vt2,
+ const VRegister& vt3, const VRegister& vt4,
+ const MemOperand& src) {
+ USE(vt2);
+ USE(vt3);
+ USE(vt4);
+ DCHECK(AreSameFormat(vt, vt2, vt3, vt4));
+ DCHECK(AreConsecutive(vt, vt2, vt3, vt4));
+ LoadStoreStructSingleAllLanes(vt, src, NEON_LD4R);
+}
-void Assembler::frintn(const FPRegister& fd,
- const FPRegister& fn) {
- DCHECK(fd.SizeInBits() == fn.SizeInBits());
- FPDataProcessing1Source(fd, fn, FRINTN);
+void Assembler::st1(const VRegister& vt, const MemOperand& src) {
+ LoadStoreStruct(vt, src, NEON_ST1_1v);
}
+void Assembler::st1(const VRegister& vt, const VRegister& vt2,
+ const MemOperand& src) {
+ USE(vt2);
+ DCHECK(AreSameFormat(vt, vt2));
+ DCHECK(AreConsecutive(vt, vt2));
+ LoadStoreStruct(vt, src, NEON_ST1_2v);
+}
-void Assembler::frintp(const FPRegister& fd, const FPRegister& fn) {
- DCHECK(fd.SizeInBits() == fn.SizeInBits());
- FPDataProcessing1Source(fd, fn, FRINTP);
+void Assembler::st1(const VRegister& vt, const VRegister& vt2,
+ const VRegister& vt3, const MemOperand& src) {
+ USE(vt2);
+ USE(vt3);
+ DCHECK(AreSameFormat(vt, vt2, vt3));
+ DCHECK(AreConsecutive(vt, vt2, vt3));
+ LoadStoreStruct(vt, src, NEON_ST1_3v);
}
+void Assembler::st1(const VRegister& vt, const VRegister& vt2,
+ const VRegister& vt3, const VRegister& vt4,
+ const MemOperand& src) {
+ USE(vt2);
+ USE(vt3);
+ USE(vt4);
+ DCHECK(AreSameFormat(vt, vt2, vt3, vt4));
+ DCHECK(AreConsecutive(vt, vt2, vt3, vt4));
+ LoadStoreStruct(vt, src, NEON_ST1_4v);
+}
-void Assembler::frintz(const FPRegister& fd,
- const FPRegister& fn) {
- DCHECK(fd.SizeInBits() == fn.SizeInBits());
- FPDataProcessing1Source(fd, fn, FRINTZ);
+void Assembler::st2(const VRegister& vt, const VRegister& vt2,
+ const MemOperand& dst) {
+ USE(vt2);
+ DCHECK(AreSameFormat(vt, vt2));
+ DCHECK(AreConsecutive(vt, vt2));
+ LoadStoreStruct(vt, dst, NEON_ST2);
}
+void Assembler::st2(const VRegister& vt, const VRegister& vt2, int lane,
+ const MemOperand& dst) {
+ USE(vt2);
+ DCHECK(AreSameFormat(vt, vt2));
+ DCHECK(AreConsecutive(vt, vt2));
+ LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore2);
+}
-void Assembler::fcmp(const FPRegister& fn,
- const FPRegister& fm) {
- DCHECK(fn.SizeInBits() == fm.SizeInBits());
- Emit(FPType(fn) | FCMP | Rm(fm) | Rn(fn));
+void Assembler::st3(const VRegister& vt, const VRegister& vt2,
+ const VRegister& vt3, const MemOperand& dst) {
+ USE(vt2);
+ USE(vt3);
+ DCHECK(AreSameFormat(vt, vt2, vt3));
+ DCHECK(AreConsecutive(vt, vt2, vt3));
+ LoadStoreStruct(vt, dst, NEON_ST3);
+}
+
+void Assembler::st3(const VRegister& vt, const VRegister& vt2,
+ const VRegister& vt3, int lane, const MemOperand& dst) {
+ USE(vt2);
+ USE(vt3);
+ DCHECK(AreSameFormat(vt, vt2, vt3));
+ DCHECK(AreConsecutive(vt, vt2, vt3));
+ LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore3);
+}
+
+void Assembler::st4(const VRegister& vt, const VRegister& vt2,
+ const VRegister& vt3, const VRegister& vt4,
+ const MemOperand& dst) {
+ USE(vt2);
+ USE(vt3);
+ USE(vt4);
+ DCHECK(AreSameFormat(vt, vt2, vt3, vt4));
+ DCHECK(AreConsecutive(vt, vt2, vt3, vt4));
+ LoadStoreStruct(vt, dst, NEON_ST4);
+}
+
+void Assembler::st4(const VRegister& vt, const VRegister& vt2,
+ const VRegister& vt3, const VRegister& vt4, int lane,
+ const MemOperand& dst) {
+ USE(vt2);
+ USE(vt3);
+ USE(vt4);
+ DCHECK(AreSameFormat(vt, vt2, vt3, vt4));
+ DCHECK(AreConsecutive(vt, vt2, vt3, vt4));
+ LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore4);
+}
+
+void Assembler::LoadStoreStructSingle(const VRegister& vt, uint32_t lane,
+ const MemOperand& addr,
+ NEONLoadStoreSingleStructOp op) {
+ LoadStoreStructVerify(vt, addr, op);
+
+ // We support vt arguments of the form vt.VxT() or vt.T(), where x is the
+ // number of lanes, and T is b, h, s or d.
+ unsigned lane_size = vt.LaneSizeInBytes();
+ DCHECK_LT(lane, kQRegSize / lane_size);
+
+ // Lane size is encoded in the opcode field. Lane index is encoded in the Q,
+ // S and size fields.
+ lane *= lane_size;
+
+ // Encodings for S[0]/D[0] and S[2]/D[1] are distinguished using the least-
+ // significant bit of the size field, so we increment lane here to account for
+ // that.
+ if (lane_size == 8) lane++;
+
+ Instr size = (lane << NEONLSSize_offset) & NEONLSSize_mask;
+ Instr s = (lane << (NEONS_offset - 2)) & NEONS_mask;
+ Instr q = (lane << (NEONQ_offset - 3)) & NEONQ_mask;
+
+ Instr instr = op;
+ switch (lane_size) {
+ case 1:
+ instr |= NEONLoadStoreSingle_b;
+ break;
+ case 2:
+ instr |= NEONLoadStoreSingle_h;
+ break;
+ case 4:
+ instr |= NEONLoadStoreSingle_s;
+ break;
+ default:
+ DCHECK_EQ(lane_size, 8U);
+ instr |= NEONLoadStoreSingle_d;
+ }
+
+ Emit(instr | LoadStoreStructAddrModeField(addr) | q | size | s | Rt(vt));
+}
+
+void Assembler::ld1(const VRegister& vt, int lane, const MemOperand& src) {
+ LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad1);
+}
+
+void Assembler::ld1r(const VRegister& vt, const MemOperand& src) {
+ LoadStoreStructSingleAllLanes(vt, src, NEON_LD1R);
+}
+
+void Assembler::st1(const VRegister& vt, int lane, const MemOperand& dst) {
+ LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore1);
+}
+
+void Assembler::dmb(BarrierDomain domain, BarrierType type) {
+ Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type));
}
+void Assembler::dsb(BarrierDomain domain, BarrierType type) {
+ Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type));
+}
+
+void Assembler::isb() {
+ Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll));
+}
-void Assembler::fcmp(const FPRegister& fn,
- double value) {
+void Assembler::fmov(const VRegister& vd, double imm) {
+ if (vd.IsScalar()) {
+ DCHECK(vd.Is1D());
+ Emit(FMOV_d_imm | Rd(vd) | ImmFP(imm));
+ } else {
+ DCHECK(vd.Is2D());
+ Instr op = NEONModifiedImmediate_MOVI | NEONModifiedImmediateOpBit;
+ Emit(NEON_Q | op | ImmNEONFP(imm) | NEONCmode(0xf) | Rd(vd));
+ }
+}
+
+void Assembler::fmov(const VRegister& vd, float imm) {
+ if (vd.IsScalar()) {
+ DCHECK(vd.Is1S());
+ Emit(FMOV_s_imm | Rd(vd) | ImmFP(imm));
+ } else {
+ DCHECK(vd.Is2S() | vd.Is4S());
+ Instr op = NEONModifiedImmediate_MOVI;
+ Instr q = vd.Is4S() ? NEON_Q : 0;
+ Emit(q | op | ImmNEONFP(imm) | NEONCmode(0xf) | Rd(vd));
+ }
+}
+
+void Assembler::fmov(const Register& rd, const VRegister& fn) {
+ DCHECK_EQ(rd.SizeInBits(), fn.SizeInBits());
+ FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd;
+ Emit(op | Rd(rd) | Rn(fn));
+}
+
+void Assembler::fmov(const VRegister& vd, const Register& rn) {
+ DCHECK_EQ(vd.SizeInBits(), rn.SizeInBits());
+ FPIntegerConvertOp op = vd.Is32Bits() ? FMOV_sw : FMOV_dx;
+ Emit(op | Rd(vd) | Rn(rn));
+}
+
+void Assembler::fmov(const VRegister& vd, const VRegister& vn) {
+ DCHECK_EQ(vd.SizeInBits(), vn.SizeInBits());
+ Emit(FPType(vd) | FMOV | Rd(vd) | Rn(vn));
+}
+
+void Assembler::fmov(const VRegister& vd, int index, const Register& rn) {
+ DCHECK((index == 1) && vd.Is1D() && rn.IsX());
+ USE(index);
+ Emit(FMOV_d1_x | Rd(vd) | Rn(rn));
+}
+
+void Assembler::fmov(const Register& rd, const VRegister& vn, int index) {
+ DCHECK((index == 1) && vn.Is1D() && rd.IsX());
+ USE(index);
+ Emit(FMOV_x_d1 | Rd(rd) | Rn(vn));
+}
+
+void Assembler::fmadd(const VRegister& fd, const VRegister& fn,
+ const VRegister& fm, const VRegister& fa) {
+ FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMADD_s : FMADD_d);
+}
+
+void Assembler::fmsub(const VRegister& fd, const VRegister& fn,
+ const VRegister& fm, const VRegister& fa) {
+ FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMSUB_s : FMSUB_d);
+}
+
+void Assembler::fnmadd(const VRegister& fd, const VRegister& fn,
+ const VRegister& fm, const VRegister& fa) {
+ FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMADD_s : FNMADD_d);
+}
+
+void Assembler::fnmsub(const VRegister& fd, const VRegister& fn,
+ const VRegister& fm, const VRegister& fa) {
+ FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMSUB_s : FNMSUB_d);
+}
+
+void Assembler::fnmul(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm) {
+ DCHECK(AreSameSizeAndType(vd, vn, vm));
+ Instr op = vd.Is1S() ? FNMUL_s : FNMUL_d;
+ Emit(FPType(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+void Assembler::fcmp(const VRegister& fn, const VRegister& fm) {
+ DCHECK_EQ(fn.SizeInBits(), fm.SizeInBits());
+ Emit(FPType(fn) | FCMP | Rm(fm) | Rn(fn));
+}
+
+void Assembler::fcmp(const VRegister& fn, double value) {
USE(value);
// Although the fcmp instruction can strictly only take an immediate value of
// +0.0, we don't need to check for -0.0 because the sign of 0.0 doesn't
// affect the result of the comparison.
- DCHECK(value == 0.0);
+ DCHECK_EQ(value, 0.0);
Emit(FPType(fn) | FCMP_zero | Rn(fn));
}
-
-void Assembler::fccmp(const FPRegister& fn,
- const FPRegister& fm,
- StatusFlags nzcv,
- Condition cond) {
- DCHECK(fn.SizeInBits() == fm.SizeInBits());
+void Assembler::fccmp(const VRegister& fn, const VRegister& fm,
+ StatusFlags nzcv, Condition cond) {
+ DCHECK_EQ(fn.SizeInBits(), fm.SizeInBits());
Emit(FPType(fn) | FCCMP | Rm(fm) | Cond(cond) | Rn(fn) | Nzcv(nzcv));
}
-
-void Assembler::fcsel(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- Condition cond) {
- DCHECK(fd.SizeInBits() == fn.SizeInBits());
- DCHECK(fd.SizeInBits() == fm.SizeInBits());
+void Assembler::fcsel(const VRegister& fd, const VRegister& fn,
+ const VRegister& fm, Condition cond) {
+ DCHECK_EQ(fd.SizeInBits(), fn.SizeInBits());
+ DCHECK_EQ(fd.SizeInBits(), fm.SizeInBits());
Emit(FPType(fd) | FCSEL | Rm(fm) | Cond(cond) | Rn(fn) | Rd(fd));
}
-
-void Assembler::FPConvertToInt(const Register& rd,
- const FPRegister& fn,
- FPIntegerConvertOp op) {
- Emit(SF(rd) | FPType(fn) | op | Rn(fn) | Rd(rd));
+void Assembler::NEONFPConvertToInt(const Register& rd, const VRegister& vn,
+ Instr op) {
+ Emit(SF(rd) | FPType(vn) | op | Rn(vn) | Rd(rd));
}
+void Assembler::NEONFPConvertToInt(const VRegister& vd, const VRegister& vn,
+ Instr op) {
+ if (vn.IsScalar()) {
+ DCHECK((vd.Is1S() && vn.Is1S()) || (vd.Is1D() && vn.Is1D()));
+ op |= NEON_Q | NEONScalar;
+ }
+ Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd));
+}
-void Assembler::fcvt(const FPRegister& fd,
- const FPRegister& fn) {
- if (fd.Is64Bits()) {
- // Convert float to double.
- DCHECK(fn.Is32Bits());
- FPDataProcessing1Source(fd, fn, FCVT_ds);
+void Assembler::fcvt(const VRegister& vd, const VRegister& vn) {
+ FPDataProcessing1SourceOp op;
+ if (vd.Is1D()) {
+ DCHECK(vn.Is1S() || vn.Is1H());
+ op = vn.Is1S() ? FCVT_ds : FCVT_dh;
+ } else if (vd.Is1S()) {
+ DCHECK(vn.Is1D() || vn.Is1H());
+ op = vn.Is1D() ? FCVT_sd : FCVT_sh;
} else {
- // Convert double to float.
- DCHECK(fn.Is64Bits());
- FPDataProcessing1Source(fd, fn, FCVT_sd);
+ DCHECK(vd.Is1H());
+ DCHECK(vn.Is1D() || vn.Is1S());
+ op = vn.Is1D() ? FCVT_hd : FCVT_hs;
}
+ FPDataProcessing1Source(vd, vn, op);
}
+void Assembler::fcvtl(const VRegister& vd, const VRegister& vn) {
+ DCHECK((vd.Is4S() && vn.Is4H()) || (vd.Is2D() && vn.Is2S()));
+ Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0;
+ Emit(format | NEON_FCVTL | Rn(vn) | Rd(vd));
+}
-void Assembler::fcvtau(const Register& rd, const FPRegister& fn) {
- FPConvertToInt(rd, fn, FCVTAU);
+void Assembler::fcvtl2(const VRegister& vd, const VRegister& vn) {
+ DCHECK((vd.Is4S() && vn.Is8H()) || (vd.Is2D() && vn.Is4S()));
+ Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0;
+ Emit(NEON_Q | format | NEON_FCVTL | Rn(vn) | Rd(vd));
}
+void Assembler::fcvtn(const VRegister& vd, const VRegister& vn) {
+ DCHECK((vn.Is4S() && vd.Is4H()) || (vn.Is2D() && vd.Is2S()));
+ Instr format = vn.Is2D() ? (1 << NEONSize_offset) : 0;
+ Emit(format | NEON_FCVTN | Rn(vn) | Rd(vd));
+}
-void Assembler::fcvtas(const Register& rd, const FPRegister& fn) {
- FPConvertToInt(rd, fn, FCVTAS);
+void Assembler::fcvtn2(const VRegister& vd, const VRegister& vn) {
+ DCHECK((vn.Is4S() && vd.Is8H()) || (vn.Is2D() && vd.Is4S()));
+ Instr format = vn.Is2D() ? (1 << NEONSize_offset) : 0;
+ Emit(NEON_Q | format | NEON_FCVTN | Rn(vn) | Rd(vd));
}
+void Assembler::fcvtxn(const VRegister& vd, const VRegister& vn) {
+ Instr format = 1 << NEONSize_offset;
+ if (vd.IsScalar()) {
+ DCHECK(vd.Is1S() && vn.Is1D());
+ Emit(format | NEON_FCVTXN_scalar | Rn(vn) | Rd(vd));
+ } else {
+ DCHECK(vd.Is2S() && vn.Is2D());
+ Emit(format | NEON_FCVTXN | Rn(vn) | Rd(vd));
+ }
+}
+
+void Assembler::fcvtxn2(const VRegister& vd, const VRegister& vn) {
+ DCHECK(vd.Is4S() && vn.Is2D());
+ Instr format = 1 << NEONSize_offset;
+ Emit(NEON_Q | format | NEON_FCVTXN | Rn(vn) | Rd(vd));
+}
+
+#define NEON_FP2REGMISC_FCVT_LIST(V) \
+ V(fcvtnu, NEON_FCVTNU, FCVTNU) \
+ V(fcvtns, NEON_FCVTNS, FCVTNS) \
+ V(fcvtpu, NEON_FCVTPU, FCVTPU) \
+ V(fcvtps, NEON_FCVTPS, FCVTPS) \
+ V(fcvtmu, NEON_FCVTMU, FCVTMU) \
+ V(fcvtms, NEON_FCVTMS, FCVTMS) \
+ V(fcvtau, NEON_FCVTAU, FCVTAU) \
+ V(fcvtas, NEON_FCVTAS, FCVTAS)
+
+#define DEFINE_ASM_FUNCS(FN, VEC_OP, SCA_OP) \
+ void Assembler::FN(const Register& rd, const VRegister& vn) { \
+ NEONFPConvertToInt(rd, vn, SCA_OP); \
+ } \
+ void Assembler::FN(const VRegister& vd, const VRegister& vn) { \
+ NEONFPConvertToInt(vd, vn, VEC_OP); \
+ }
+NEON_FP2REGMISC_FCVT_LIST(DEFINE_ASM_FUNCS)
+#undef DEFINE_ASM_FUNCS
-void Assembler::fcvtmu(const Register& rd, const FPRegister& fn) {
- FPConvertToInt(rd, fn, FCVTMU);
+void Assembler::scvtf(const VRegister& vd, const VRegister& vn, int fbits) {
+ DCHECK_GE(fbits, 0);
+ if (fbits == 0) {
+ NEONFP2RegMisc(vd, vn, NEON_SCVTF);
+ } else {
+ DCHECK(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
+ NEONShiftRightImmediate(vd, vn, fbits, NEON_SCVTF_imm);
+ }
+}
+
+void Assembler::ucvtf(const VRegister& vd, const VRegister& vn, int fbits) {
+ DCHECK_GE(fbits, 0);
+ if (fbits == 0) {
+ NEONFP2RegMisc(vd, vn, NEON_UCVTF);
+ } else {
+ DCHECK(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
+ NEONShiftRightImmediate(vd, vn, fbits, NEON_UCVTF_imm);
+ }
+}
+
+void Assembler::scvtf(const VRegister& vd, const Register& rn, int fbits) {
+ DCHECK_GE(fbits, 0);
+ if (fbits == 0) {
+ Emit(SF(rn) | FPType(vd) | SCVTF | Rn(rn) | Rd(vd));
+ } else {
+ Emit(SF(rn) | FPType(vd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
+ Rd(vd));
+ }
+}
+
+void Assembler::ucvtf(const VRegister& fd, const Register& rn, int fbits) {
+ DCHECK_GE(fbits, 0);
+ if (fbits == 0) {
+ Emit(SF(rn) | FPType(fd) | UCVTF | Rn(rn) | Rd(fd));
+ } else {
+ Emit(SF(rn) | FPType(fd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
+ Rd(fd));
+ }
}
+void Assembler::NEON3Same(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm, NEON3SameOp vop) {
+ DCHECK(AreSameFormat(vd, vn, vm));
+ DCHECK(vd.IsVector() || !vd.IsQ());
+
+ Instr format, op = vop;
+ if (vd.IsScalar()) {
+ op |= NEON_Q | NEONScalar;
+ format = SFormat(vd);
+ } else {
+ format = VFormat(vd);
+ }
+
+ Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+void Assembler::NEONFP3Same(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm, Instr op) {
+ DCHECK(AreSameFormat(vd, vn, vm));
+ Emit(FPFormat(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+#define NEON_FP2REGMISC_LIST(V) \
+ V(fabs, NEON_FABS, FABS) \
+ V(fneg, NEON_FNEG, FNEG) \
+ V(fsqrt, NEON_FSQRT, FSQRT) \
+ V(frintn, NEON_FRINTN, FRINTN) \
+ V(frinta, NEON_FRINTA, FRINTA) \
+ V(frintp, NEON_FRINTP, FRINTP) \
+ V(frintm, NEON_FRINTM, FRINTM) \
+ V(frintx, NEON_FRINTX, FRINTX) \
+ V(frintz, NEON_FRINTZ, FRINTZ) \
+ V(frinti, NEON_FRINTI, FRINTI) \
+ V(frsqrte, NEON_FRSQRTE, NEON_FRSQRTE_scalar) \
+ V(frecpe, NEON_FRECPE, NEON_FRECPE_scalar)
+
+#define DEFINE_ASM_FUNC(FN, VEC_OP, SCA_OP) \
+ void Assembler::FN(const VRegister& vd, const VRegister& vn) { \
+ Instr op; \
+ if (vd.IsScalar()) { \
+ DCHECK(vd.Is1S() || vd.Is1D()); \
+ op = SCA_OP; \
+ } else { \
+ DCHECK(vd.Is2S() || vd.Is2D() || vd.Is4S()); \
+ op = VEC_OP; \
+ } \
+ NEONFP2RegMisc(vd, vn, op); \
+ }
+NEON_FP2REGMISC_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+void Assembler::shll(const VRegister& vd, const VRegister& vn, int shift) {
+ DCHECK((vd.Is8H() && vn.Is8B() && shift == 8) ||
+ (vd.Is4S() && vn.Is4H() && shift == 16) ||
+ (vd.Is2D() && vn.Is2S() && shift == 32));
+ USE(shift);
+ Emit(VFormat(vn) | NEON_SHLL | Rn(vn) | Rd(vd));
+}
-void Assembler::fcvtms(const Register& rd, const FPRegister& fn) {
- FPConvertToInt(rd, fn, FCVTMS);
+void Assembler::shll2(const VRegister& vd, const VRegister& vn, int shift) {
+ USE(shift);
+ DCHECK((vd.Is8H() && vn.Is16B() && shift == 8) ||
+ (vd.Is4S() && vn.Is8H() && shift == 16) ||
+ (vd.Is2D() && vn.Is4S() && shift == 32));
+ Emit(VFormat(vn) | NEON_SHLL | Rn(vn) | Rd(vd));
}
+void Assembler::NEONFP2RegMisc(const VRegister& vd, const VRegister& vn,
+ NEON2RegMiscOp vop, double value) {
+ DCHECK(AreSameFormat(vd, vn));
+ DCHECK_EQ(value, 0.0);
+ USE(value);
-void Assembler::fcvtnu(const Register& rd, const FPRegister& fn) {
- FPConvertToInt(rd, fn, FCVTNU);
+ Instr op = vop;
+ if (vd.IsScalar()) {
+ DCHECK(vd.Is1S() || vd.Is1D());
+ op |= NEON_Q | NEONScalar;
+ } else {
+ DCHECK(vd.Is2S() || vd.Is2D() || vd.Is4S());
+ }
+
+ Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd));
}
+void Assembler::fcmeq(const VRegister& vd, const VRegister& vn, double value) {
+ NEONFP2RegMisc(vd, vn, NEON_FCMEQ_zero, value);
+}
-void Assembler::fcvtns(const Register& rd, const FPRegister& fn) {
- FPConvertToInt(rd, fn, FCVTNS);
+void Assembler::fcmge(const VRegister& vd, const VRegister& vn, double value) {
+ NEONFP2RegMisc(vd, vn, NEON_FCMGE_zero, value);
}
+void Assembler::fcmgt(const VRegister& vd, const VRegister& vn, double value) {
+ NEONFP2RegMisc(vd, vn, NEON_FCMGT_zero, value);
+}
-void Assembler::fcvtzu(const Register& rd, const FPRegister& fn) {
- FPConvertToInt(rd, fn, FCVTZU);
+void Assembler::fcmle(const VRegister& vd, const VRegister& vn, double value) {
+ NEONFP2RegMisc(vd, vn, NEON_FCMLE_zero, value);
}
+void Assembler::fcmlt(const VRegister& vd, const VRegister& vn, double value) {
+ NEONFP2RegMisc(vd, vn, NEON_FCMLT_zero, value);
+}
-void Assembler::fcvtzs(const Register& rd, const FPRegister& fn) {
- FPConvertToInt(rd, fn, FCVTZS);
+void Assembler::frecpx(const VRegister& vd, const VRegister& vn) {
+ DCHECK(vd.IsScalar());
+ DCHECK(AreSameFormat(vd, vn));
+ DCHECK(vd.Is1S() || vd.Is1D());
+ Emit(FPFormat(vd) | NEON_FRECPX_scalar | Rn(vn) | Rd(vd));
}
+void Assembler::fcvtzs(const Register& rd, const VRegister& vn, int fbits) {
+ DCHECK(vn.Is1S() || vn.Is1D());
+ DCHECK((fbits >= 0) && (fbits <= rd.SizeInBits()));
+ if (fbits == 0) {
+ Emit(SF(rd) | FPType(vn) | FCVTZS | Rn(vn) | Rd(rd));
+ } else {
+ Emit(SF(rd) | FPType(vn) | FCVTZS_fixed | FPScale(64 - fbits) | Rn(vn) |
+ Rd(rd));
+ }
+}
-void Assembler::scvtf(const FPRegister& fd,
- const Register& rn,
- unsigned fbits) {
+void Assembler::fcvtzs(const VRegister& vd, const VRegister& vn, int fbits) {
+ DCHECK_GE(fbits, 0);
if (fbits == 0) {
- Emit(SF(rn) | FPType(fd) | SCVTF | Rn(rn) | Rd(fd));
+ NEONFP2RegMisc(vd, vn, NEON_FCVTZS);
} else {
- Emit(SF(rn) | FPType(fd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
- Rd(fd));
+ DCHECK(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
+ NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZS_imm);
}
}
+void Assembler::fcvtzu(const Register& rd, const VRegister& vn, int fbits) {
+ DCHECK(vn.Is1S() || vn.Is1D());
+ DCHECK((fbits >= 0) && (fbits <= rd.SizeInBits()));
+ if (fbits == 0) {
+ Emit(SF(rd) | FPType(vn) | FCVTZU | Rn(vn) | Rd(rd));
+ } else {
+ Emit(SF(rd) | FPType(vn) | FCVTZU_fixed | FPScale(64 - fbits) | Rn(vn) |
+ Rd(rd));
+ }
+}
-void Assembler::ucvtf(const FPRegister& fd,
- const Register& rn,
- unsigned fbits) {
+void Assembler::fcvtzu(const VRegister& vd, const VRegister& vn, int fbits) {
+ DCHECK_GE(fbits, 0);
if (fbits == 0) {
- Emit(SF(rn) | FPType(fd) | UCVTF | Rn(rn) | Rd(fd));
+ NEONFP2RegMisc(vd, vn, NEON_FCVTZU);
} else {
- Emit(SF(rn) | FPType(fd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
- Rd(fd));
+ DCHECK(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
+ NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZU_imm);
}
}
+void Assembler::NEONFP2RegMisc(const VRegister& vd, const VRegister& vn,
+ Instr op) {
+ DCHECK(AreSameFormat(vd, vn));
+ Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd));
+}
+
+void Assembler::NEON2RegMisc(const VRegister& vd, const VRegister& vn,
+ NEON2RegMiscOp vop, int value) {
+ DCHECK(AreSameFormat(vd, vn));
+ DCHECK_EQ(value, 0);
+ USE(value);
+
+ Instr format, op = vop;
+ if (vd.IsScalar()) {
+ op |= NEON_Q | NEONScalar;
+ format = SFormat(vd);
+ } else {
+ format = VFormat(vd);
+ }
+
+ Emit(format | op | Rn(vn) | Rd(vd));
+}
+
+void Assembler::cmeq(const VRegister& vd, const VRegister& vn, int value) {
+ DCHECK(vd.IsVector() || vd.Is1D());
+ NEON2RegMisc(vd, vn, NEON_CMEQ_zero, value);
+}
+
+void Assembler::cmge(const VRegister& vd, const VRegister& vn, int value) {
+ DCHECK(vd.IsVector() || vd.Is1D());
+ NEON2RegMisc(vd, vn, NEON_CMGE_zero, value);
+}
+
+void Assembler::cmgt(const VRegister& vd, const VRegister& vn, int value) {
+ DCHECK(vd.IsVector() || vd.Is1D());
+ NEON2RegMisc(vd, vn, NEON_CMGT_zero, value);
+}
+
+void Assembler::cmle(const VRegister& vd, const VRegister& vn, int value) {
+ DCHECK(vd.IsVector() || vd.Is1D());
+ NEON2RegMisc(vd, vn, NEON_CMLE_zero, value);
+}
+
+void Assembler::cmlt(const VRegister& vd, const VRegister& vn, int value) {
+ DCHECK(vd.IsVector() || vd.Is1D());
+ NEON2RegMisc(vd, vn, NEON_CMLT_zero, value);
+}
+
+#define NEON_3SAME_LIST(V) \
+ V(add, NEON_ADD, vd.IsVector() || vd.Is1D()) \
+ V(addp, NEON_ADDP, vd.IsVector() || vd.Is1D()) \
+ V(sub, NEON_SUB, vd.IsVector() || vd.Is1D()) \
+ V(cmeq, NEON_CMEQ, vd.IsVector() || vd.Is1D()) \
+ V(cmge, NEON_CMGE, vd.IsVector() || vd.Is1D()) \
+ V(cmgt, NEON_CMGT, vd.IsVector() || vd.Is1D()) \
+ V(cmhi, NEON_CMHI, vd.IsVector() || vd.Is1D()) \
+ V(cmhs, NEON_CMHS, vd.IsVector() || vd.Is1D()) \
+ V(cmtst, NEON_CMTST, vd.IsVector() || vd.Is1D()) \
+ V(sshl, NEON_SSHL, vd.IsVector() || vd.Is1D()) \
+ V(ushl, NEON_USHL, vd.IsVector() || vd.Is1D()) \
+ V(srshl, NEON_SRSHL, vd.IsVector() || vd.Is1D()) \
+ V(urshl, NEON_URSHL, vd.IsVector() || vd.Is1D()) \
+ V(sqdmulh, NEON_SQDMULH, vd.IsLaneSizeH() || vd.IsLaneSizeS()) \
+ V(sqrdmulh, NEON_SQRDMULH, vd.IsLaneSizeH() || vd.IsLaneSizeS()) \
+ V(shadd, NEON_SHADD, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(uhadd, NEON_UHADD, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(srhadd, NEON_SRHADD, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(urhadd, NEON_URHADD, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(shsub, NEON_SHSUB, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(uhsub, NEON_UHSUB, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(smax, NEON_SMAX, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(smaxp, NEON_SMAXP, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(smin, NEON_SMIN, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(sminp, NEON_SMINP, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(umax, NEON_UMAX, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(umaxp, NEON_UMAXP, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(umin, NEON_UMIN, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(uminp, NEON_UMINP, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(saba, NEON_SABA, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(sabd, NEON_SABD, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(uaba, NEON_UABA, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(uabd, NEON_UABD, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(mla, NEON_MLA, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(mls, NEON_MLS, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(mul, NEON_MUL, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(and_, NEON_AND, vd.Is8B() || vd.Is16B()) \
+ V(orr, NEON_ORR, vd.Is8B() || vd.Is16B()) \
+ V(orn, NEON_ORN, vd.Is8B() || vd.Is16B()) \
+ V(eor, NEON_EOR, vd.Is8B() || vd.Is16B()) \
+ V(bic, NEON_BIC, vd.Is8B() || vd.Is16B()) \
+ V(bit, NEON_BIT, vd.Is8B() || vd.Is16B()) \
+ V(bif, NEON_BIF, vd.Is8B() || vd.Is16B()) \
+ V(bsl, NEON_BSL, vd.Is8B() || vd.Is16B()) \
+ V(pmul, NEON_PMUL, vd.Is8B() || vd.Is16B()) \
+ V(uqadd, NEON_UQADD, true) \
+ V(sqadd, NEON_SQADD, true) \
+ V(uqsub, NEON_UQSUB, true) \
+ V(sqsub, NEON_SQSUB, true) \
+ V(sqshl, NEON_SQSHL, true) \
+ V(uqshl, NEON_UQSHL, true) \
+ V(sqrshl, NEON_SQRSHL, true) \
+ V(uqrshl, NEON_UQRSHL, true)
+
+#define DEFINE_ASM_FUNC(FN, OP, AS) \
+ void Assembler::FN(const VRegister& vd, const VRegister& vn, \
+ const VRegister& vm) { \
+ DCHECK(AS); \
+ NEON3Same(vd, vn, vm, OP); \
+ }
+NEON_3SAME_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+#define NEON_FP3SAME_LIST(V) \
+ V(fadd, NEON_FADD, FADD) \
+ V(fsub, NEON_FSUB, FSUB) \
+ V(fmul, NEON_FMUL, FMUL) \
+ V(fdiv, NEON_FDIV, FDIV) \
+ V(fmax, NEON_FMAX, FMAX) \
+ V(fmaxnm, NEON_FMAXNM, FMAXNM) \
+ V(fmin, NEON_FMIN, FMIN) \
+ V(fminnm, NEON_FMINNM, FMINNM) \
+ V(fmulx, NEON_FMULX, NEON_FMULX_scalar) \
+ V(frecps, NEON_FRECPS, NEON_FRECPS_scalar) \
+ V(frsqrts, NEON_FRSQRTS, NEON_FRSQRTS_scalar) \
+ V(fabd, NEON_FABD, NEON_FABD_scalar) \
+ V(fmla, NEON_FMLA, 0) \
+ V(fmls, NEON_FMLS, 0) \
+ V(facge, NEON_FACGE, NEON_FACGE_scalar) \
+ V(facgt, NEON_FACGT, NEON_FACGT_scalar) \
+ V(fcmeq, NEON_FCMEQ, NEON_FCMEQ_scalar) \
+ V(fcmge, NEON_FCMGE, NEON_FCMGE_scalar) \
+ V(fcmgt, NEON_FCMGT, NEON_FCMGT_scalar) \
+ V(faddp, NEON_FADDP, 0) \
+ V(fmaxp, NEON_FMAXP, 0) \
+ V(fminp, NEON_FMINP, 0) \
+ V(fmaxnmp, NEON_FMAXNMP, 0) \
+ V(fminnmp, NEON_FMINNMP, 0)
+
+#define DEFINE_ASM_FUNC(FN, VEC_OP, SCA_OP) \
+ void Assembler::FN(const VRegister& vd, const VRegister& vn, \
+ const VRegister& vm) { \
+ Instr op; \
+ if ((SCA_OP != 0) && vd.IsScalar()) { \
+ DCHECK(vd.Is1S() || vd.Is1D()); \
+ op = SCA_OP; \
+ } else { \
+ DCHECK(vd.IsVector()); \
+ DCHECK(vd.Is2S() || vd.Is2D() || vd.Is4S()); \
+ op = VEC_OP; \
+ } \
+ NEONFP3Same(vd, vn, vm, op); \
+ }
+NEON_FP3SAME_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+void Assembler::addp(const VRegister& vd, const VRegister& vn) {
+ DCHECK((vd.Is1D() && vn.Is2D()));
+ Emit(SFormat(vd) | NEON_ADDP_scalar | Rn(vn) | Rd(vd));
+}
+
+void Assembler::faddp(const VRegister& vd, const VRegister& vn) {
+ DCHECK((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()));
+ Emit(FPFormat(vd) | NEON_FADDP_scalar | Rn(vn) | Rd(vd));
+}
+
+void Assembler::fmaxp(const VRegister& vd, const VRegister& vn) {
+ DCHECK((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()));
+ Emit(FPFormat(vd) | NEON_FMAXP_scalar | Rn(vn) | Rd(vd));
+}
+
+void Assembler::fminp(const VRegister& vd, const VRegister& vn) {
+ DCHECK((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()));
+ Emit(FPFormat(vd) | NEON_FMINP_scalar | Rn(vn) | Rd(vd));
+}
+
+void Assembler::fmaxnmp(const VRegister& vd, const VRegister& vn) {
+ DCHECK((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()));
+ Emit(FPFormat(vd) | NEON_FMAXNMP_scalar | Rn(vn) | Rd(vd));
+}
+
+void Assembler::fminnmp(const VRegister& vd, const VRegister& vn) {
+ DCHECK((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()));
+ Emit(FPFormat(vd) | NEON_FMINNMP_scalar | Rn(vn) | Rd(vd));
+}
+
+void Assembler::orr(const VRegister& vd, const int imm8, const int left_shift) {
+ NEONModifiedImmShiftLsl(vd, imm8, left_shift, NEONModifiedImmediate_ORR);
+}
+
+void Assembler::mov(const VRegister& vd, const VRegister& vn) {
+ DCHECK(AreSameFormat(vd, vn));
+ if (vd.IsD()) {
+ orr(vd.V8B(), vn.V8B(), vn.V8B());
+ } else {
+ DCHECK(vd.IsQ());
+ orr(vd.V16B(), vn.V16B(), vn.V16B());
+ }
+}
+
+void Assembler::bic(const VRegister& vd, const int imm8, const int left_shift) {
+ NEONModifiedImmShiftLsl(vd, imm8, left_shift, NEONModifiedImmediate_BIC);
+}
+
+void Assembler::movi(const VRegister& vd, const uint64_t imm, Shift shift,
+ const int shift_amount) {
+ DCHECK((shift == LSL) || (shift == MSL));
+ if (vd.Is2D() || vd.Is1D()) {
+ DCHECK_EQ(shift_amount, 0);
+ int imm8 = 0;
+ for (int i = 0; i < 8; ++i) {
+ int byte = (imm >> (i * 8)) & 0xff;
+ DCHECK((byte == 0) || (byte == 0xff));
+ if (byte == 0xff) {
+ imm8 |= (1 << i);
+ }
+ }
+ Instr q = vd.Is2D() ? NEON_Q : 0;
+ Emit(q | NEONModImmOp(1) | NEONModifiedImmediate_MOVI |
+ ImmNEONabcdefgh(imm8) | NEONCmode(0xe) | Rd(vd));
+ } else if (shift == LSL) {
+ NEONModifiedImmShiftLsl(vd, static_cast<int>(imm), shift_amount,
+ NEONModifiedImmediate_MOVI);
+ } else {
+ NEONModifiedImmShiftMsl(vd, static_cast<int>(imm), shift_amount,
+ NEONModifiedImmediate_MOVI);
+ }
+}
+
+void Assembler::mvn(const VRegister& vd, const VRegister& vn) {
+ DCHECK(AreSameFormat(vd, vn));
+ if (vd.IsD()) {
+ not_(vd.V8B(), vn.V8B());
+ } else {
+ DCHECK(vd.IsQ());
+ not_(vd.V16B(), vn.V16B());
+ }
+}
+
+void Assembler::mvni(const VRegister& vd, const int imm8, Shift shift,
+ const int shift_amount) {
+ DCHECK((shift == LSL) || (shift == MSL));
+ if (shift == LSL) {
+ NEONModifiedImmShiftLsl(vd, imm8, shift_amount, NEONModifiedImmediate_MVNI);
+ } else {
+ NEONModifiedImmShiftMsl(vd, imm8, shift_amount, NEONModifiedImmediate_MVNI);
+ }
+}
+
+void Assembler::NEONFPByElement(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm, int vm_index,
+ NEONByIndexedElementOp vop) {
+ DCHECK(AreSameFormat(vd, vn));
+ DCHECK((vd.Is2S() && vm.Is1S()) || (vd.Is4S() && vm.Is1S()) ||
+ (vd.Is1S() && vm.Is1S()) || (vd.Is2D() && vm.Is1D()) ||
+ (vd.Is1D() && vm.Is1D()));
+ DCHECK((vm.Is1S() && (vm_index < 4)) || (vm.Is1D() && (vm_index < 2)));
+
+ Instr op = vop;
+ int index_num_bits = vm.Is1S() ? 2 : 1;
+ if (vd.IsScalar()) {
+ op |= NEON_Q | NEONScalar;
+ }
+
+ Emit(FPFormat(vd) | op | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) |
+ Rn(vn) | Rd(vd));
+}
+
+void Assembler::NEONByElement(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm, int vm_index,
+ NEONByIndexedElementOp vop) {
+ DCHECK(AreSameFormat(vd, vn));
+ DCHECK((vd.Is4H() && vm.Is1H()) || (vd.Is8H() && vm.Is1H()) ||
+ (vd.Is1H() && vm.Is1H()) || (vd.Is2S() && vm.Is1S()) ||
+ (vd.Is4S() && vm.Is1S()) || (vd.Is1S() && vm.Is1S()));
+ DCHECK((vm.Is1H() && (vm.code() < 16) && (vm_index < 8)) ||
+ (vm.Is1S() && (vm_index < 4)));
+
+ Instr format, op = vop;
+ int index_num_bits = vm.Is1H() ? 3 : 2;
+ if (vd.IsScalar()) {
+ op |= NEONScalar | NEON_Q;
+ format = SFormat(vn);
+ } else {
+ format = VFormat(vn);
+ }
+ Emit(format | op | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) |
+ Rd(vd));
+}
+
+void Assembler::NEONByElementL(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm, int vm_index,
+ NEONByIndexedElementOp vop) {
+ DCHECK((vd.Is4S() && vn.Is4H() && vm.Is1H()) ||
+ (vd.Is4S() && vn.Is8H() && vm.Is1H()) ||
+ (vd.Is1S() && vn.Is1H() && vm.Is1H()) ||
+ (vd.Is2D() && vn.Is2S() && vm.Is1S()) ||
+ (vd.Is2D() && vn.Is4S() && vm.Is1S()) ||
+ (vd.Is1D() && vn.Is1S() && vm.Is1S()));
+
+ DCHECK((vm.Is1H() && (vm.code() < 16) && (vm_index < 8)) ||
+ (vm.Is1S() && (vm_index < 4)));
+
+ Instr format, op = vop;
+ int index_num_bits = vm.Is1H() ? 3 : 2;
+ if (vd.IsScalar()) {
+ op |= NEONScalar | NEON_Q;
+ format = SFormat(vn);
+ } else {
+ format = VFormat(vn);
+ }
+ Emit(format | op | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) |
+ Rd(vd));
+}
+
+#define NEON_BYELEMENT_LIST(V) \
+ V(mul, NEON_MUL_byelement, vn.IsVector()) \
+ V(mla, NEON_MLA_byelement, vn.IsVector()) \
+ V(mls, NEON_MLS_byelement, vn.IsVector()) \
+ V(sqdmulh, NEON_SQDMULH_byelement, true) \
+ V(sqrdmulh, NEON_SQRDMULH_byelement, true)
+
+#define DEFINE_ASM_FUNC(FN, OP, AS) \
+ void Assembler::FN(const VRegister& vd, const VRegister& vn, \
+ const VRegister& vm, int vm_index) { \
+ DCHECK(AS); \
+ NEONByElement(vd, vn, vm, vm_index, OP); \
+ }
+NEON_BYELEMENT_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+#define NEON_FPBYELEMENT_LIST(V) \
+ V(fmul, NEON_FMUL_byelement) \
+ V(fmla, NEON_FMLA_byelement) \
+ V(fmls, NEON_FMLS_byelement) \
+ V(fmulx, NEON_FMULX_byelement)
+
+#define DEFINE_ASM_FUNC(FN, OP) \
+ void Assembler::FN(const VRegister& vd, const VRegister& vn, \
+ const VRegister& vm, int vm_index) { \
+ NEONFPByElement(vd, vn, vm, vm_index, OP); \
+ }
+NEON_FPBYELEMENT_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+#define NEON_BYELEMENT_LONG_LIST(V) \
+ V(sqdmull, NEON_SQDMULL_byelement, vn.IsScalar() || vn.IsD()) \
+ V(sqdmull2, NEON_SQDMULL_byelement, vn.IsVector() && vn.IsQ()) \
+ V(sqdmlal, NEON_SQDMLAL_byelement, vn.IsScalar() || vn.IsD()) \
+ V(sqdmlal2, NEON_SQDMLAL_byelement, vn.IsVector() && vn.IsQ()) \
+ V(sqdmlsl, NEON_SQDMLSL_byelement, vn.IsScalar() || vn.IsD()) \
+ V(sqdmlsl2, NEON_SQDMLSL_byelement, vn.IsVector() && vn.IsQ()) \
+ V(smull, NEON_SMULL_byelement, vn.IsVector() && vn.IsD()) \
+ V(smull2, NEON_SMULL_byelement, vn.IsVector() && vn.IsQ()) \
+ V(umull, NEON_UMULL_byelement, vn.IsVector() && vn.IsD()) \
+ V(umull2, NEON_UMULL_byelement, vn.IsVector() && vn.IsQ()) \
+ V(smlal, NEON_SMLAL_byelement, vn.IsVector() && vn.IsD()) \
+ V(smlal2, NEON_SMLAL_byelement, vn.IsVector() && vn.IsQ()) \
+ V(umlal, NEON_UMLAL_byelement, vn.IsVector() && vn.IsD()) \
+ V(umlal2, NEON_UMLAL_byelement, vn.IsVector() && vn.IsQ()) \
+ V(smlsl, NEON_SMLSL_byelement, vn.IsVector() && vn.IsD()) \
+ V(smlsl2, NEON_SMLSL_byelement, vn.IsVector() && vn.IsQ()) \
+ V(umlsl, NEON_UMLSL_byelement, vn.IsVector() && vn.IsD()) \
+ V(umlsl2, NEON_UMLSL_byelement, vn.IsVector() && vn.IsQ())
+
+#define DEFINE_ASM_FUNC(FN, OP, AS) \
+ void Assembler::FN(const VRegister& vd, const VRegister& vn, \
+ const VRegister& vm, int vm_index) { \
+ DCHECK(AS); \
+ NEONByElementL(vd, vn, vm, vm_index, OP); \
+ }
+NEON_BYELEMENT_LONG_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+void Assembler::suqadd(const VRegister& vd, const VRegister& vn) {
+ NEON2RegMisc(vd, vn, NEON_SUQADD);
+}
+
+void Assembler::usqadd(const VRegister& vd, const VRegister& vn) {
+ NEON2RegMisc(vd, vn, NEON_USQADD);
+}
+
+void Assembler::abs(const VRegister& vd, const VRegister& vn) {
+ DCHECK(vd.IsVector() || vd.Is1D());
+ NEON2RegMisc(vd, vn, NEON_ABS);
+}
+
+void Assembler::sqabs(const VRegister& vd, const VRegister& vn) {
+ NEON2RegMisc(vd, vn, NEON_SQABS);
+}
+
+void Assembler::neg(const VRegister& vd, const VRegister& vn) {
+ DCHECK(vd.IsVector() || vd.Is1D());
+ NEON2RegMisc(vd, vn, NEON_NEG);
+}
+
+void Assembler::sqneg(const VRegister& vd, const VRegister& vn) {
+ NEON2RegMisc(vd, vn, NEON_SQNEG);
+}
+
+void Assembler::NEONXtn(const VRegister& vd, const VRegister& vn,
+ NEON2RegMiscOp vop) {
+ Instr format, op = vop;
+ if (vd.IsScalar()) {
+ DCHECK((vd.Is1B() && vn.Is1H()) || (vd.Is1H() && vn.Is1S()) ||
+ (vd.Is1S() && vn.Is1D()));
+ op |= NEON_Q | NEONScalar;
+ format = SFormat(vd);
+ } else {
+ DCHECK((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) ||
+ (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) ||
+ (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D()));
+ format = VFormat(vd);
+ }
+ Emit(format | op | Rn(vn) | Rd(vd));
+}
+
+void Assembler::xtn(const VRegister& vd, const VRegister& vn) {
+ DCHECK(vd.IsVector() && vd.IsD());
+ NEONXtn(vd, vn, NEON_XTN);
+}
+
+void Assembler::xtn2(const VRegister& vd, const VRegister& vn) {
+ DCHECK(vd.IsVector() && vd.IsQ());
+ NEONXtn(vd, vn, NEON_XTN);
+}
+
+void Assembler::sqxtn(const VRegister& vd, const VRegister& vn) {
+ DCHECK(vd.IsScalar() || vd.IsD());
+ NEONXtn(vd, vn, NEON_SQXTN);
+}
+
+void Assembler::sqxtn2(const VRegister& vd, const VRegister& vn) {
+ DCHECK(vd.IsVector() && vd.IsQ());
+ NEONXtn(vd, vn, NEON_SQXTN);
+}
+
+void Assembler::sqxtun(const VRegister& vd, const VRegister& vn) {
+ DCHECK(vd.IsScalar() || vd.IsD());
+ NEONXtn(vd, vn, NEON_SQXTUN);
+}
+
+void Assembler::sqxtun2(const VRegister& vd, const VRegister& vn) {
+ DCHECK(vd.IsVector() && vd.IsQ());
+ NEONXtn(vd, vn, NEON_SQXTUN);
+}
+
+void Assembler::uqxtn(const VRegister& vd, const VRegister& vn) {
+ DCHECK(vd.IsScalar() || vd.IsD());
+ NEONXtn(vd, vn, NEON_UQXTN);
+}
+
+void Assembler::uqxtn2(const VRegister& vd, const VRegister& vn) {
+ DCHECK(vd.IsVector() && vd.IsQ());
+ NEONXtn(vd, vn, NEON_UQXTN);
+}
+
+// NEON NOT and RBIT are distinguised by bit 22, the bottom bit of "size".
+void Assembler::not_(const VRegister& vd, const VRegister& vn) {
+ DCHECK(AreSameFormat(vd, vn));
+ DCHECK(vd.Is8B() || vd.Is16B());
+ Emit(VFormat(vd) | NEON_RBIT_NOT | Rn(vn) | Rd(vd));
+}
+
+void Assembler::rbit(const VRegister& vd, const VRegister& vn) {
+ DCHECK(AreSameFormat(vd, vn));
+ DCHECK(vd.Is8B() || vd.Is16B());
+ Emit(VFormat(vn) | (1 << NEONSize_offset) | NEON_RBIT_NOT | Rn(vn) | Rd(vd));
+}
+
+void Assembler::ext(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm, int index) {
+ DCHECK(AreSameFormat(vd, vn, vm));
+ DCHECK(vd.Is8B() || vd.Is16B());
+ DCHECK((0 <= index) && (index < vd.LaneCount()));
+ Emit(VFormat(vd) | NEON_EXT | Rm(vm) | ImmNEONExt(index) | Rn(vn) | Rd(vd));
+}
+
+void Assembler::dup(const VRegister& vd, const VRegister& vn, int vn_index) {
+ Instr q, scalar;
+
+ // We support vn arguments of the form vn.VxT() or vn.T(), where x is the
+ // number of lanes, and T is b, h, s or d.
+ int lane_size = vn.LaneSizeInBytes();
+ NEONFormatField format;
+ switch (lane_size) {
+ case 1:
+ format = NEON_16B;
+ break;
+ case 2:
+ format = NEON_8H;
+ break;
+ case 4:
+ format = NEON_4S;
+ break;
+ default:
+ DCHECK_EQ(lane_size, 8);
+ format = NEON_2D;
+ break;
+ }
+
+ if (vd.IsScalar()) {
+ q = NEON_Q;
+ scalar = NEONScalar;
+ } else {
+ DCHECK(!vd.Is1D());
+ q = vd.IsD() ? 0 : NEON_Q;
+ scalar = 0;
+ }
+ Emit(q | scalar | NEON_DUP_ELEMENT | ImmNEON5(format, vn_index) | Rn(vn) |
+ Rd(vd));
+}
void Assembler::dcptr(Label* label) {
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
@@ -2164,31 +3954,13 @@ void Assembler::dcptr(Label* label) {
}
}
-
-// Note:
// Below, a difference in case for the same letter indicates a
-// negated bit.
-// If b is 1, then B is 0.
-Instr Assembler::ImmFP32(float imm) {
- DCHECK(IsImmFP32(imm));
- // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
- uint32_t bits = float_to_rawbits(imm);
- // bit7: a000.0000
- uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
- // bit6: 0b00.0000
- uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
- // bit5_to_0: 00cd.efgh
- uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
-
- return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
-}
-
-
-Instr Assembler::ImmFP64(double imm) {
+// negated bit. If b is 1, then B is 0.
+uint32_t Assembler::FPToImm8(double imm) {
DCHECK(IsImmFP64(imm));
// bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
// 0000.0000.0000.0000.0000.0000.0000.0000
- uint64_t bits = double_to_rawbits(imm);
+ uint64_t bits = bit_cast<uint64_t>(imm);
// bit7: a000.0000
uint64_t bit7 = ((bits >> 63) & 0x1) << 7;
// bit6: 0b00.0000
@@ -2196,14 +3968,16 @@ Instr Assembler::ImmFP64(double imm) {
// bit5_to_0: 00cd.efgh
uint64_t bit5_to_0 = (bits >> 48) & 0x3f;
- return static_cast<Instr>((bit7 | bit6 | bit5_to_0) << ImmFP_offset);
+ return static_cast<uint32_t>(bit7 | bit6 | bit5_to_0);
}
+Instr Assembler::ImmFP(double imm) { return FPToImm8(imm) << ImmFP_offset; }
+Instr Assembler::ImmNEONFP(double imm) {
+ return ImmNEONabcdefgh(FPToImm8(imm));
+}
// Code generation helpers.
-void Assembler::MoveWide(const Register& rd,
- uint64_t imm,
- int shift,
+void Assembler::MoveWide(const Register& rd, uint64_t imm, int shift,
MoveWideImmediateOp mov_op) {
// Ignore the top 32 bits of an immediate if we're moving to a W register.
if (rd.Is32Bits()) {
@@ -2245,13 +4019,9 @@ void Assembler::MoveWide(const Register& rd,
ImmMoveWide(static_cast<int>(imm)) | ShiftMoveWide(shift));
}
-
-void Assembler::AddSub(const Register& rd,
- const Register& rn,
- const Operand& operand,
- FlagsUpdate S,
- AddSubOp op) {
- DCHECK(rd.SizeInBits() == rn.SizeInBits());
+void Assembler::AddSub(const Register& rd, const Register& rn,
+ const Operand& operand, FlagsUpdate S, AddSubOp op) {
+ DCHECK_EQ(rd.SizeInBits(), rn.SizeInBits());
DCHECK(!operand.NeedsRelocation(this));
if (operand.IsImmediate()) {
int64_t immediate = operand.ImmediateValue();
@@ -2260,8 +4030,8 @@ void Assembler::AddSub(const Register& rd,
Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) |
ImmAddSub(static_cast<int>(immediate)) | dest_reg | RnSP(rn));
} else if (operand.IsShiftedRegister()) {
- DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
- DCHECK(operand.shift() != ROR);
+ DCHECK_EQ(operand.reg().SizeInBits(), rd.SizeInBits());
+ DCHECK_NE(operand.shift(), ROR);
// For instructions of the form:
// add/sub wsp, <Wn>, <Wm> [, LSL #0-3 ]
@@ -2283,39 +4053,34 @@ void Assembler::AddSub(const Register& rd,
}
}
-
-void Assembler::AddSubWithCarry(const Register& rd,
- const Register& rn,
- const Operand& operand,
- FlagsUpdate S,
+void Assembler::AddSubWithCarry(const Register& rd, const Register& rn,
+ const Operand& operand, FlagsUpdate S,
AddSubWithCarryOp op) {
- DCHECK(rd.SizeInBits() == rn.SizeInBits());
- DCHECK(rd.SizeInBits() == operand.reg().SizeInBits());
+ DCHECK_EQ(rd.SizeInBits(), rn.SizeInBits());
+ DCHECK_EQ(rd.SizeInBits(), operand.reg().SizeInBits());
DCHECK(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
DCHECK(!operand.NeedsRelocation(this));
Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd));
}
-
void Assembler::hlt(int code) {
DCHECK(is_uint16(code));
Emit(HLT | ImmException(code));
}
-
void Assembler::brk(int code) {
DCHECK(is_uint16(code));
Emit(BRK | ImmException(code));
}
-
void Assembler::EmitStringData(const char* string) {
size_t len = strlen(string) + 1;
- DCHECK(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap));
+ DCHECK_LE(RoundUp(len, kInstructionSize), static_cast<size_t>(kGap));
EmitData(string, static_cast<int>(len));
// Pad with NULL characters until pc_ is aligned.
const char pad[] = {'\0', '\0', '\0', '\0'};
- STATIC_ASSERT(sizeof(pad) == kInstructionSize);
+ static_assert(sizeof(pad) == kInstructionSize,
+ "Size of padding must match instruction size.");
EmitData(pad, RoundUp(pc_offset(), kInstructionSize) - pc_offset());
}
@@ -2349,7 +4114,7 @@ void Assembler::debug(const char* message, uint32_t code, Instr params) {
#endif
if (params & BREAK) {
- hlt(kImmExceptionIsDebug);
+ brk(0);
}
}
@@ -2432,33 +4197,75 @@ void Assembler::DataProcessing1Source(const Register& rd,
Emit(SF(rn) | op | Rn(rn) | Rd(rd));
}
-
-void Assembler::FPDataProcessing1Source(const FPRegister& fd,
- const FPRegister& fn,
+void Assembler::FPDataProcessing1Source(const VRegister& vd,
+ const VRegister& vn,
FPDataProcessing1SourceOp op) {
- Emit(FPType(fn) | op | Rn(fn) | Rd(fd));
+ Emit(FPType(vn) | op | Rn(vn) | Rd(vd));
}
-
-void Assembler::FPDataProcessing2Source(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
+void Assembler::FPDataProcessing2Source(const VRegister& fd,
+ const VRegister& fn,
+ const VRegister& fm,
FPDataProcessing2SourceOp op) {
DCHECK(fd.SizeInBits() == fn.SizeInBits());
DCHECK(fd.SizeInBits() == fm.SizeInBits());
Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd));
}
-
-void Assembler::FPDataProcessing3Source(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa,
+void Assembler::FPDataProcessing3Source(const VRegister& fd,
+ const VRegister& fn,
+ const VRegister& fm,
+ const VRegister& fa,
FPDataProcessing3SourceOp op) {
DCHECK(AreSameSizeAndType(fd, fn, fm, fa));
Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd) | Ra(fa));
}
+void Assembler::NEONModifiedImmShiftLsl(const VRegister& vd, const int imm8,
+ const int left_shift,
+ NEONModifiedImmediateOp op) {
+ DCHECK(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H() || vd.Is2S() ||
+ vd.Is4S());
+ DCHECK((left_shift == 0) || (left_shift == 8) || (left_shift == 16) ||
+ (left_shift == 24));
+ DCHECK(is_uint8(imm8));
+
+ int cmode_1, cmode_2, cmode_3;
+ if (vd.Is8B() || vd.Is16B()) {
+ DCHECK_EQ(op, NEONModifiedImmediate_MOVI);
+ cmode_1 = 1;
+ cmode_2 = 1;
+ cmode_3 = 1;
+ } else {
+ cmode_1 = (left_shift >> 3) & 1;
+ cmode_2 = left_shift >> 4;
+ cmode_3 = 0;
+ if (vd.Is4H() || vd.Is8H()) {
+ DCHECK((left_shift == 0) || (left_shift == 8));
+ cmode_3 = 1;
+ }
+ }
+ int cmode = (cmode_3 << 3) | (cmode_2 << 2) | (cmode_1 << 1);
+
+ Instr q = vd.IsQ() ? NEON_Q : 0;
+
+ Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd));
+}
+
+void Assembler::NEONModifiedImmShiftMsl(const VRegister& vd, const int imm8,
+ const int shift_amount,
+ NEONModifiedImmediateOp op) {
+ DCHECK(vd.Is2S() || vd.Is4S());
+ DCHECK((shift_amount == 8) || (shift_amount == 16));
+ DCHECK(is_uint8(imm8));
+
+ int cmode_0 = (shift_amount >> 4) & 1;
+ int cmode = 0xc | cmode_0;
+
+ Instr q = vd.IsQ() ? NEON_Q : 0;
+
+ Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd));
+}
void Assembler::EmitShift(const Register& rd,
const Register& rn,
@@ -2558,7 +4365,7 @@ void Assembler::LoadStore(const CPURegister& rt,
Instr memop = op | Rt(rt) | RnSP(addr.base());
if (addr.IsImmediateOffset()) {
- LSDataSize size = CalcLSDataSize(op);
+ unsigned size = CalcLSDataSize(op);
if (IsImmLSScaled(addr.offset(), size)) {
int offset = static_cast<int>(addr.offset());
// Use the scaled addressing mode.
@@ -2611,14 +4418,12 @@ bool Assembler::IsImmLSUnscaled(int64_t offset) {
return is_int9(offset);
}
-
-bool Assembler::IsImmLSScaled(int64_t offset, LSDataSize size) {
+bool Assembler::IsImmLSScaled(int64_t offset, unsigned size) {
bool offset_is_size_multiple = (((offset >> size) << size) == offset);
return offset_is_size_multiple && is_uint12(offset >> size);
}
-
-bool Assembler::IsImmLSPair(int64_t offset, LSDataSize size) {
+bool Assembler::IsImmLSPair(int64_t offset, unsigned size) {
bool offset_is_size_multiple = (((offset >> size) << size) == offset);
return offset_is_size_multiple && is_int7(offset >> size);
}
@@ -2628,6 +4433,8 @@ bool Assembler::IsImmLLiteral(int64_t offset) {
int inst_size = static_cast<int>(kInstructionSizeLog2);
bool offset_is_inst_multiple =
(((offset >> inst_size) << inst_size) == offset);
+ DCHECK_GT(offset, 0);
+ offset >>= kLoadLiteralScaleLog2;
return offset_is_inst_multiple && is_intn(offset, ImmLLiteral_width);
}
@@ -2759,7 +4566,7 @@ bool Assembler::IsImmLogical(uint64_t value,
}
// If the repeat period d is not a power of two, it can't be encoded.
- if (!IS_POWER_OF_TWO(d)) {
+ if (!base::bits::IsPowerOfTwo(d)) {
return false;
}
@@ -2849,7 +4656,7 @@ bool Assembler::IsImmConditionalCompare(int64_t immediate) {
bool Assembler::IsImmFP32(float imm) {
// Valid values will have the form:
// aBbb.bbbc.defg.h000.0000.0000.0000.0000
- uint32_t bits = float_to_rawbits(imm);
+ uint32_t bits = bit_cast<uint32_t>(imm);
// bits[19..0] are cleared.
if ((bits & 0x7ffff) != 0) {
return false;
@@ -2874,7 +4681,7 @@ bool Assembler::IsImmFP64(double imm) {
// Valid values will have the form:
// aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
// 0000.0000.0000.0000.0000.0000.0000.0000
- uint64_t bits = double_to_rawbits(imm);
+ uint64_t bits = bit_cast<uint64_t>(imm);
// bits[47..0] are cleared.
if ((bits & 0xffffffffffffL) != 0) {
return false;
@@ -2908,9 +4715,7 @@ void Assembler::GrowBuffer() {
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
- if (desc.buffer_size > kMaximalBufferSize ||
- static_cast<size_t>(desc.buffer_size) >
- isolate_data().max_old_generation_size_) {
+ if (desc.buffer_size > kMaximalBufferSize) {
V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
}
@@ -2957,6 +4762,8 @@ void Assembler::GrowBuffer() {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// We do not try to reuse pool constants.
RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL);
+ bool write_reloc_info = true;
+
if (((rmode >= RelocInfo::COMMENT) &&
(rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL)) ||
(rmode == RelocInfo::INTERNAL_REFERENCE) ||
@@ -2972,27 +4779,20 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode));
// These modes do not need an entry in the constant pool.
} else {
- constpool_.RecordEntry(data, rmode);
+ write_reloc_info = constpool_.RecordEntry(data, rmode);
// Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info.
BlockConstPoolFor(1);
}
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNone(rmode) && write_reloc_info) {
// Don't record external references unless the heap will be serialized.
if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
!serializer_enabled() && !emit_debug_code()) {
return;
}
DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
- if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- RelocInfo reloc_info_with_ast_id(reinterpret_cast<byte*>(pc_), rmode,
- RecordedAstId().ToInt(), NULL);
- ClearRecordedAstId();
- reloc_info_writer.Write(&reloc_info_with_ast_id);
- } else {
- reloc_info_writer.Write(&rinfo);
- }
+ reloc_info_writer.Write(&rinfo);
}
}
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index e4ca410abd..cc9315458d 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -13,6 +13,7 @@
#include "src/arm64/constants-arm64.h"
#include "src/arm64/instructions-arm64.h"
#include "src/assembler.h"
+#include "src/base/optional.h"
#include "src/globals.h"
#include "src/utils.h"
@@ -55,7 +56,9 @@ namespace internal {
#define SIMD128_REGISTERS(V) \
V(q0) V(q1) V(q2) V(q3) V(q4) V(q5) V(q6) V(q7) \
- V(q8) V(q9) V(q10) V(q11) V(q12) V(q13) V(q14) V(q15)
+ V(q8) V(q9) V(q10) V(q11) V(q12) V(q13) V(q14) V(q15) \
+ V(q16) V(q17) V(q18) V(q19) V(q20) V(q21) V(q22) V(q23) \
+ V(q24) V(q25) V(q26) V(q27) V(q28) V(q29) V(q30) V(q31)
#define ALLOCATABLE_DOUBLE_REGISTERS(R) \
R(d0) R(d1) R(d2) R(d3) R(d4) R(d5) R(d6) R(d7) \
@@ -67,11 +70,10 @@ namespace internal {
constexpr int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte;
static const int kNoCodeAgeSequenceLength = 5 * kInstructionSize;
-// Some CPURegister methods can return Register and FPRegister types, so we
+// Some CPURegister methods can return Register and VRegister types, so we
// need to declare them in advance.
struct Register;
-struct FPRegister;
-
+struct VRegister;
struct CPURegister {
enum Code {
@@ -87,17 +89,22 @@ struct CPURegister {
// which are always zero-initialized before any constructors are called.
kInvalid = 0,
kRegister,
- kFPRegister,
+ kVRegister,
kNoRegister
};
constexpr CPURegister() : CPURegister(0, 0, CPURegister::kNoRegister) {}
- constexpr CPURegister(int reg_code, int reg_size, RegisterType reg_type)
- : reg_code(reg_code), reg_size(reg_size), reg_type(reg_type) {}
+ constexpr CPURegister(int reg_code, int reg_size, RegisterType reg_type,
+ int lane_count = 1)
+ : reg_code(reg_code),
+ reg_size(reg_size),
+ reg_type(reg_type),
+ lane_count(lane_count) {}
- static CPURegister Create(int code, int size, RegisterType type) {
- CPURegister r = {code, size, type};
+ static CPURegister Create(int reg_code, int reg_size, RegisterType reg_type,
+ int lane_count = 1) {
+ CPURegister r = {reg_code, reg_size, reg_type, lane_count};
return r;
}
@@ -106,12 +113,15 @@ struct CPURegister {
RegList Bit() const;
int SizeInBits() const;
int SizeInBytes() const;
+ bool Is8Bits() const;
+ bool Is16Bits() const;
bool Is32Bits() const;
bool Is64Bits() const;
+ bool Is128Bits() const;
bool IsValid() const;
bool IsValidOrNone() const;
bool IsValidRegister() const;
- bool IsValidFPRegister() const;
+ bool IsValidVRegister() const;
bool IsNone() const;
bool Is(const CPURegister& other) const;
bool Aliases(const CPURegister& other) const;
@@ -120,12 +130,34 @@ struct CPURegister {
bool IsSP() const;
bool IsRegister() const;
- bool IsFPRegister() const;
+ bool IsVRegister() const;
+
+ bool IsFPRegister() const { return IsS() || IsD(); }
+
+ bool IsW() const { return IsValidRegister() && Is32Bits(); }
+ bool IsX() const { return IsValidRegister() && Is64Bits(); }
+
+ // These assertions ensure that the size and type of the register are as
+ // described. They do not consider the number of lanes that make up a vector.
+ // So, for example, Is8B() implies IsD(), and Is1D() implies IsD, but IsD()
+ // does not imply Is1D() or Is8B().
+ // Check the number of lanes, ie. the format of the vector, using methods such
+ // as Is8B(), Is1D(), etc. in the VRegister class.
+ bool IsV() const { return IsVRegister(); }
+ bool IsB() const { return IsV() && Is8Bits(); }
+ bool IsH() const { return IsV() && Is16Bits(); }
+ bool IsS() const { return IsV() && Is32Bits(); }
+ bool IsD() const { return IsV() && Is64Bits(); }
+ bool IsQ() const { return IsV() && Is128Bits(); }
Register X() const;
Register W() const;
- FPRegister D() const;
- FPRegister S() const;
+ VRegister V() const;
+ VRegister B() const;
+ VRegister H() const;
+ VRegister D() const;
+ VRegister S() const;
+ VRegister Q() const;
bool IsSameSizeAndType(const CPURegister& other) const;
@@ -136,6 +168,7 @@ struct CPURegister {
int reg_code;
int reg_size;
RegisterType reg_type;
+ int lane_count;
};
@@ -190,7 +223,7 @@ struct Register : public CPURegister {
constexpr bool kSimpleFPAliasing = true;
constexpr bool kSimdMaskRegisters = false;
-struct FPRegister : public CPURegister {
+struct VRegister : public CPURegister {
enum Code {
#define REGISTER_CODE(R) kCode_##R,
DOUBLE_REGISTERS(REGISTER_CODE)
@@ -199,41 +232,123 @@ struct FPRegister : public CPURegister {
kCode_no_reg = -1
};
- static FPRegister Create(int code, int size) {
- return FPRegister(
- CPURegister::Create(code, size, CPURegister::kFPRegister));
+ static VRegister Create(int reg_code, int reg_size, int lane_count = 1) {
+ DCHECK(base::bits::IsPowerOfTwo(lane_count) && (lane_count <= 16));
+ VRegister v(CPURegister::Create(reg_code, reg_size, CPURegister::kVRegister,
+ lane_count));
+ DCHECK(v.IsValidVRegister());
+ return v;
+ }
+
+ static VRegister Create(int reg_code, VectorFormat format) {
+ int reg_size = RegisterSizeInBitsFromFormat(format);
+ int reg_count = IsVectorFormat(format) ? LaneCountFromFormat(format) : 1;
+ return VRegister::Create(reg_code, reg_size, reg_count);
}
- constexpr FPRegister() : CPURegister() {}
+ constexpr VRegister() : CPURegister() {}
- constexpr explicit FPRegister(const CPURegister& r) : CPURegister(r) {}
+ constexpr explicit VRegister(const CPURegister& r) : CPURegister(r) {}
bool IsValid() const {
- DCHECK(IsFPRegister() || IsNone());
- return IsValidFPRegister();
+ DCHECK(IsVRegister() || IsNone());
+ return IsValidVRegister();
+ }
+
+ static VRegister BRegFromCode(unsigned code);
+ static VRegister HRegFromCode(unsigned code);
+ static VRegister SRegFromCode(unsigned code);
+ static VRegister DRegFromCode(unsigned code);
+ static VRegister QRegFromCode(unsigned code);
+ static VRegister VRegFromCode(unsigned code);
+
+ VRegister V8B() const {
+ return VRegister::Create(code(), kDRegSizeInBits, 8);
+ }
+ VRegister V16B() const {
+ return VRegister::Create(code(), kQRegSizeInBits, 16);
+ }
+ VRegister V4H() const {
+ return VRegister::Create(code(), kDRegSizeInBits, 4);
+ }
+ VRegister V8H() const {
+ return VRegister::Create(code(), kQRegSizeInBits, 8);
+ }
+ VRegister V2S() const {
+ return VRegister::Create(code(), kDRegSizeInBits, 2);
+ }
+ VRegister V4S() const {
+ return VRegister::Create(code(), kQRegSizeInBits, 4);
+ }
+ VRegister V2D() const {
+ return VRegister::Create(code(), kQRegSizeInBits, 2);
+ }
+ VRegister V1D() const {
+ return VRegister::Create(code(), kDRegSizeInBits, 1);
+ }
+
+ bool Is8B() const { return (Is64Bits() && (lane_count == 8)); }
+ bool Is16B() const { return (Is128Bits() && (lane_count == 16)); }
+ bool Is4H() const { return (Is64Bits() && (lane_count == 4)); }
+ bool Is8H() const { return (Is128Bits() && (lane_count == 8)); }
+ bool Is2S() const { return (Is64Bits() && (lane_count == 2)); }
+ bool Is4S() const { return (Is128Bits() && (lane_count == 4)); }
+ bool Is1D() const { return (Is64Bits() && (lane_count == 1)); }
+ bool Is2D() const { return (Is128Bits() && (lane_count == 2)); }
+
+ // For consistency, we assert the number of lanes of these scalar registers,
+ // even though there are no vectors of equivalent total size with which they
+ // could alias.
+ bool Is1B() const {
+ DCHECK(!(Is8Bits() && IsVector()));
+ return Is8Bits();
+ }
+ bool Is1H() const {
+ DCHECK(!(Is16Bits() && IsVector()));
+ return Is16Bits();
+ }
+ bool Is1S() const {
+ DCHECK(!(Is32Bits() && IsVector()));
+ return Is32Bits();
+ }
+
+ bool IsLaneSizeB() const { return LaneSizeInBits() == kBRegSizeInBits; }
+ bool IsLaneSizeH() const { return LaneSizeInBits() == kHRegSizeInBits; }
+ bool IsLaneSizeS() const { return LaneSizeInBits() == kSRegSizeInBits; }
+ bool IsLaneSizeD() const { return LaneSizeInBits() == kDRegSizeInBits; }
+
+ bool IsScalar() const { return lane_count == 1; }
+ bool IsVector() const { return lane_count > 1; }
+
+ bool IsSameFormat(const VRegister& other) const {
+ return (reg_size == other.reg_size) && (lane_count == other.lane_count);
}
- static FPRegister SRegFromCode(unsigned code);
- static FPRegister DRegFromCode(unsigned code);
+ int LaneCount() const { return lane_count; }
+
+ unsigned LaneSizeInBytes() const { return SizeInBytes() / lane_count; }
+
+ unsigned LaneSizeInBits() const { return LaneSizeInBytes() * 8; }
// Start of V8 compatibility section ---------------------
- static constexpr int kMaxNumRegisters = kNumberOfFPRegisters;
+ static constexpr int kMaxNumRegisters = kNumberOfVRegisters;
STATIC_ASSERT(kMaxNumRegisters == Code::kAfterLast);
- // Crankshaft can use all the FP registers except:
+ // Crankshaft can use all the V registers except:
// - d15 which is used to keep the 0 double value
// - d30 which is used in crankshaft as a double scratch register
// - d31 which is used in the MacroAssembler as a double scratch register
- static FPRegister from_code(int code) {
+ static VRegister from_code(int code) {
// Always return a D register.
- return FPRegister::Create(code, kDRegSizeInBits);
+ return VRegister::Create(code, kDRegSizeInBits);
}
// End of V8 compatibility section -----------------------
};
-
-STATIC_ASSERT(sizeof(CPURegister) == sizeof(Register));
-STATIC_ASSERT(sizeof(CPURegister) == sizeof(FPRegister));
+static_assert(sizeof(CPURegister) == sizeof(Register),
+ "CPURegister must be same size as Register");
+static_assert(sizeof(CPURegister) == sizeof(VRegister),
+ "CPURegister must be same size as VRegister");
#define DEFINE_REGISTER(register_class, name, code, size, type) \
constexpr register_class name { CPURegister(code, size, type) }
@@ -241,10 +356,10 @@ STATIC_ASSERT(sizeof(CPURegister) == sizeof(FPRegister));
constexpr register_class alias = name
// No*Reg is used to indicate an unused argument, or an error case. Note that
-// these all compare equal (using the Is() method). The Register and FPRegister
+// these all compare equal (using the Is() method). The Register and VRegister
// variants are provided for convenience.
DEFINE_REGISTER(Register, NoReg, 0, 0, CPURegister::kNoRegister);
-DEFINE_REGISTER(FPRegister, NoFPReg, 0, 0, CPURegister::kNoRegister);
+DEFINE_REGISTER(VRegister, NoVReg, 0, 0, CPURegister::kNoRegister);
DEFINE_REGISTER(CPURegister, NoCPUReg, 0, 0, CPURegister::kNoRegister);
// v8 compatibility.
@@ -261,17 +376,25 @@ DEFINE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSizeInBits,
DEFINE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSizeInBits,
CPURegister::kRegister);
-#define DEFINE_FPREGISTERS(N) \
- DEFINE_REGISTER(FPRegister, s##N, N, kSRegSizeInBits, \
- CPURegister::kFPRegister); \
- DEFINE_REGISTER(FPRegister, d##N, N, kDRegSizeInBits, \
- CPURegister::kFPRegister);
-GENERAL_REGISTER_CODE_LIST(DEFINE_FPREGISTERS)
-#undef DEFINE_FPREGISTERS
+#define DEFINE_VREGISTERS(N) \
+ DEFINE_REGISTER(VRegister, b##N, N, kBRegSizeInBits, \
+ CPURegister::kVRegister); \
+ DEFINE_REGISTER(VRegister, h##N, N, kHRegSizeInBits, \
+ CPURegister::kVRegister); \
+ DEFINE_REGISTER(VRegister, s##N, N, kSRegSizeInBits, \
+ CPURegister::kVRegister); \
+ DEFINE_REGISTER(VRegister, d##N, N, kDRegSizeInBits, \
+ CPURegister::kVRegister); \
+ DEFINE_REGISTER(VRegister, q##N, N, kQRegSizeInBits, \
+ CPURegister::kVRegister); \
+ DEFINE_REGISTER(VRegister, v##N, N, kQRegSizeInBits, CPURegister::kVRegister);
+GENERAL_REGISTER_CODE_LIST(DEFINE_VREGISTERS)
+#undef DEFINE_VREGISTERS
#undef DEFINE_REGISTER
// Registers aliases.
+ALIAS_REGISTER(VRegister, v8_, v8); // Avoid conflicts with namespace v8.
ALIAS_REGISTER(Register, ip0, x16);
ALIAS_REGISTER(Register, ip1, x17);
ALIAS_REGISTER(Register, wip0, w16);
@@ -294,13 +417,17 @@ ALIAS_REGISTER(Register, xzr, x31);
ALIAS_REGISTER(Register, wzr, w31);
// Keeps the 0 double value.
-ALIAS_REGISTER(FPRegister, fp_zero, d15);
+ALIAS_REGISTER(VRegister, fp_zero, d15);
+// MacroAssembler fixed V Registers.
+ALIAS_REGISTER(VRegister, fp_fixed1, d27);
+ALIAS_REGISTER(VRegister, fp_fixed2, d28);
+ALIAS_REGISTER(VRegister, fp_fixed3, d29); // same as Crankshaft scratch.
// Crankshaft double scratch register.
-ALIAS_REGISTER(FPRegister, crankshaft_fp_scratch, d29);
-// MacroAssembler double scratch registers.
-ALIAS_REGISTER(FPRegister, fp_scratch, d30);
-ALIAS_REGISTER(FPRegister, fp_scratch1, d30);
-ALIAS_REGISTER(FPRegister, fp_scratch2, d31);
+ALIAS_REGISTER(VRegister, crankshaft_fp_scratch, d29);
+// MacroAssembler scratch V registers.
+ALIAS_REGISTER(VRegister, fp_scratch, d30);
+ALIAS_REGISTER(VRegister, fp_scratch1, d30);
+ALIAS_REGISTER(VRegister, fp_scratch2, d31);
#undef ALIAS_REGISTER
@@ -335,11 +462,24 @@ bool AreSameSizeAndType(const CPURegister& reg1,
const CPURegister& reg7 = NoCPUReg,
const CPURegister& reg8 = NoCPUReg);
-typedef FPRegister FloatRegister;
-typedef FPRegister DoubleRegister;
-
-// TODO(arm64) Define SIMD registers.
-typedef FPRegister Simd128Register;
+// AreSameFormat returns true if all of the specified VRegisters have the same
+// vector format. Arguments set to NoVReg are ignored, as are any subsequent
+// arguments. At least one argument (reg1) must be valid (not NoVReg).
+bool AreSameFormat(const VRegister& reg1, const VRegister& reg2,
+ const VRegister& reg3 = NoVReg,
+ const VRegister& reg4 = NoVReg);
+
+// AreConsecutive returns true if all of the specified VRegisters are
+// consecutive in the register file. Arguments may be set to NoVReg, and if so,
+// subsequent arguments must also be NoVReg. At least one argument (reg1) must
+// be valid (not NoVReg).
+bool AreConsecutive(const VRegister& reg1, const VRegister& reg2,
+ const VRegister& reg3 = NoVReg,
+ const VRegister& reg4 = NoVReg);
+
+typedef VRegister FloatRegister;
+typedef VRegister DoubleRegister;
+typedef VRegister Simd128Register;
// -----------------------------------------------------------------------------
// Lists of registers.
@@ -363,10 +503,10 @@ class CPURegList {
CPURegList(CPURegister::RegisterType type, int size, int first_reg,
int last_reg)
: size_(size), type_(type) {
- DCHECK(((type == CPURegister::kRegister) &&
- (last_reg < kNumberOfRegisters)) ||
- ((type == CPURegister::kFPRegister) &&
- (last_reg < kNumberOfFPRegisters)));
+ DCHECK(
+ ((type == CPURegister::kRegister) && (last_reg < kNumberOfRegisters)) ||
+ ((type == CPURegister::kVRegister) &&
+ (last_reg < kNumberOfVRegisters)));
DCHECK(last_reg >= first_reg);
list_ = (1UL << (last_reg + 1)) - 1;
list_ &= ~((1UL << first_reg) - 1);
@@ -419,11 +559,13 @@ class CPURegList {
// AAPCS64 callee-saved registers.
static CPURegList GetCalleeSaved(int size = kXRegSizeInBits);
- static CPURegList GetCalleeSavedFP(int size = kDRegSizeInBits);
+ static CPURegList GetCalleeSavedV(int size = kDRegSizeInBits);
// AAPCS64 caller-saved registers. Note that this includes lr.
+ // TODO(all): Determine how we handle d8-d15 being callee-saved, but the top
+ // 64-bits being caller-saved.
static CPURegList GetCallerSaved(int size = kXRegSizeInBits);
- static CPURegList GetCallerSavedFP(int size = kDRegSizeInBits);
+ static CPURegList GetCallerSavedV(int size = kDRegSizeInBits);
// Registers saved as safepoints.
static CPURegList GetSafepointSavedRegisters();
@@ -474,17 +616,16 @@ class CPURegList {
bool IsValid() const {
const RegList kValidRegisters = 0x8000000ffffffff;
- const RegList kValidFPRegisters = 0x0000000ffffffff;
+ const RegList kValidVRegisters = 0x0000000ffffffff;
switch (type_) {
case CPURegister::kRegister:
return (list_ & kValidRegisters) == list_;
- case CPURegister::kFPRegister:
- return (list_ & kValidFPRegisters) == list_;
+ case CPURegister::kVRegister:
+ return (list_ & kValidVRegisters) == list_;
case CPURegister::kNoRegister:
return list_ == 0;
default:
UNREACHABLE();
- return false;
}
}
};
@@ -492,12 +633,11 @@ class CPURegList {
// AAPCS64 callee-saved registers.
#define kCalleeSaved CPURegList::GetCalleeSaved()
-#define kCalleeSavedFP CPURegList::GetCalleeSavedFP()
-
+#define kCalleeSavedV CPURegList::GetCalleeSavedV()
// AAPCS64 caller-saved registers. Note that this includes lr.
#define kCallerSaved CPURegList::GetCallerSaved()
-#define kCallerSavedFP CPURegList::GetCallerSavedFP()
+#define kCallerSavedV CPURegList::GetCallerSavedV()
// -----------------------------------------------------------------------------
// Immediates.
@@ -518,7 +658,7 @@ class Immediate {
RelocInfo::Mode rmode() const { return rmode_; }
private:
- void InitializeHandle(Handle<Object> value);
+ void InitializeHandle(Handle<HeapObject> value);
int64_t value_;
RelocInfo::Mode rmode_;
@@ -551,6 +691,13 @@ class Operand {
Extend extend,
unsigned shift_amount = 0);
+ static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
+ static Operand EmbeddedCode(CodeStub* stub);
+
+ inline bool IsHeapObjectRequest() const;
+ inline HeapObjectRequest heap_object_request() const;
+ inline Immediate immediate_for_heap_object_request() const;
+
template<typename T>
inline explicit Operand(Handle<T> handle);
@@ -586,6 +733,7 @@ class Operand {
inline static Operand UntagSmiAndScale(Register smi, int scale);
private:
+ base::Optional<HeapObjectRequest> heap_object_request_;
Immediate immediate_;
Register reg_;
Shift shift_;
@@ -652,17 +800,11 @@ class MemOperand {
class ConstPool {
public:
- explicit ConstPool(Assembler* assm)
- : assm_(assm),
- first_use_(-1),
- shared_entries_count(0) {}
- void RecordEntry(intptr_t data, RelocInfo::Mode mode);
- int EntryCount() const {
- return shared_entries_count + static_cast<int>(unique_entries_.size());
- }
- bool IsEmpty() const {
- return shared_entries_.empty() && unique_entries_.empty();
- }
+ explicit ConstPool(Assembler* assm) : assm_(assm), first_use_(-1) {}
+ // Returns true when we need to write RelocInfo and false when we do not.
+ bool RecordEntry(intptr_t data, RelocInfo::Mode mode);
+ int EntryCount() const { return static_cast<int>(entries_.size()); }
+ bool IsEmpty() const { return entries_.empty(); }
// Distance in bytes between the current pc and the first instruction
// using the pool. If there are no pending entries return kMaxInt.
int DistanceToFirstUse();
@@ -686,16 +828,29 @@ class ConstPool {
void EmitGuard();
void EmitEntries();
+ typedef std::map<uint64_t, int> SharedEntryMap;
+ // Adds a shared entry to entries_, using 'entry_map' to determine whether we
+ // already track this entry. Returns true if this is the first time we add
+ // this entry, false otherwise.
+ bool AddSharedEntry(SharedEntryMap& entry_map, uint64_t data, int offset);
+
Assembler* assm_;
// Keep track of the first instruction requiring a constant pool entry
// since the previous constant pool was emitted.
int first_use_;
- // values, pc offset(s) of entries which can be shared.
- std::multimap<uint64_t, int> shared_entries_;
- // Number of distinct literal in shared entries.
- int shared_entries_count;
- // values, pc offset of entries which cannot be shared.
- std::vector<std::pair<uint64_t, int> > unique_entries_;
+
+ // Map of data to index in entries_ for shared entries.
+ SharedEntryMap shared_entries_;
+
+ // Map of address of handle to index in entries_. We need to keep track of
+ // code targets separately from other shared entries, as they can be
+ // relocated.
+ SharedEntryMap handle_to_index_map_;
+
+ // Values, pc offset(s) of entries. Use a vector to preserve the order of
+ // insertion, as the serializer expects code target RelocInfo to point to
+ // constant pool addresses in an ascending order.
+ std::vector<std::pair<uint64_t, std::vector<int> > > entries_;
};
@@ -741,7 +896,7 @@ class Assembler : public AssemblerBase {
//
// The descriptor (desc) can be NULL. In that case, the code is finalized as
// usual, but the descriptor is not populated.
- void GetCode(CodeDesc* desc);
+ void GetCode(Isolate* isolate, CodeDesc* desc);
// Insert the smallest number of nop instructions
// possible to align the pc offset to a multiple
@@ -857,7 +1012,7 @@ class Assembler : public AssemblerBase {
// Prevent contant pool emission until EndBlockConstPool is called.
// Call to this function can be nested but must be followed by an equal
- // number of call to EndBlockConstpool.
+ // number of calls to EndBlockConstpool.
void StartBlockConstPool();
// Resume constant pool emission. Need to be called as many time as
@@ -872,7 +1027,7 @@ class Assembler : public AssemblerBase {
// Prevent veneer pool emission until EndBlockVeneerPool is called.
// Call to this function can be nested but must be followed by an equal
- // number of call to EndBlockConstpool.
+ // number of calls to EndBlockConstpool.
void StartBlockVeneerPool();
// Resume constant pool emission. Need to be called as many time as
@@ -925,7 +1080,6 @@ class Assembler : public AssemblerBase {
// the marker and branch over the data.
void RecordConstPool(int size);
-
// Instruction set functions ------------------------------------------------
// Branch / Jump instructions.
@@ -1064,9 +1218,101 @@ class Assembler : public AssemblerBase {
const Register& rn,
const Operand& operand);
+ // Bitwise and.
+ void and_(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Bit clear immediate.
+ void bic(const VRegister& vd, const int imm8, const int left_shift = 0);
+
+ // Bit clear.
+ void bic(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Bitwise insert if false.
+ void bif(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Bitwise insert if true.
+ void bit(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Bitwise select.
+ void bsl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Polynomial multiply.
+ void pmul(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Vector move immediate.
+ void movi(const VRegister& vd, const uint64_t imm, Shift shift = LSL,
+ const int shift_amount = 0);
+
+ // Bitwise not.
+ void mvn(const VRegister& vd, const VRegister& vn);
+
+ // Vector move inverted immediate.
+ void mvni(const VRegister& vd, const int imm8, Shift shift = LSL,
+ const int shift_amount = 0);
+
+ // Signed saturating accumulate of unsigned value.
+ void suqadd(const VRegister& vd, const VRegister& vn);
+
+ // Unsigned saturating accumulate of signed value.
+ void usqadd(const VRegister& vd, const VRegister& vn);
+
+ // Absolute value.
+ void abs(const VRegister& vd, const VRegister& vn);
+
+ // Signed saturating absolute value.
+ void sqabs(const VRegister& vd, const VRegister& vn);
+
+ // Negate.
+ void neg(const VRegister& vd, const VRegister& vn);
+
+ // Signed saturating negate.
+ void sqneg(const VRegister& vd, const VRegister& vn);
+
+ // Bitwise not.
+ void not_(const VRegister& vd, const VRegister& vn);
+
+ // Extract narrow.
+ void xtn(const VRegister& vd, const VRegister& vn);
+
+ // Extract narrow (second part).
+ void xtn2(const VRegister& vd, const VRegister& vn);
+
+ // Signed saturating extract narrow.
+ void sqxtn(const VRegister& vd, const VRegister& vn);
+
+ // Signed saturating extract narrow (second part).
+ void sqxtn2(const VRegister& vd, const VRegister& vn);
+
+ // Unsigned saturating extract narrow.
+ void uqxtn(const VRegister& vd, const VRegister& vn);
+
+ // Unsigned saturating extract narrow (second part).
+ void uqxtn2(const VRegister& vd, const VRegister& vn);
+
+ // Signed saturating extract unsigned narrow.
+ void sqxtun(const VRegister& vd, const VRegister& vn);
+
+ // Signed saturating extract unsigned narrow (second part).
+ void sqxtun2(const VRegister& vd, const VRegister& vn);
+
+ // Move register to register.
+ void mov(const VRegister& vd, const VRegister& vn);
+
+ // Bitwise not or.
+ void orn(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Bitwise exclusive or.
+ void eor(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
// Bitwise or (A | B).
void orr(const Register& rd, const Register& rn, const Operand& operand);
+ // Bitwise or.
+ void orr(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Bitwise or immediate.
+ void orr(const VRegister& vd, const int imm8, const int left_shift = 0);
+
// Bitwise nor (A | ~B).
void orn(const Register& rd, const Register& rn, const Operand& operand);
@@ -1361,6 +1607,7 @@ class Assembler : public AssemblerBase {
// Load literal to register.
void ldr(const CPURegister& rt, const Immediate& imm);
+ void ldr(const CPURegister& rt, const Operand& operand);
// Load-acquire word.
void ldar(const Register& rt, const Register& rn);
@@ -1473,147 +1720,1080 @@ class Assembler : public AssemblerBase {
mov(Register::XRegFromCode(n), Register::XRegFromCode(n));
}
+ // Add.
+ void add(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Unsigned halving add.
+ void uhadd(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Subtract.
+ void sub(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed halving add.
+ void shadd(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Multiply by scalar element.
+ void mul(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ int vm_index);
+
+ // Multiply-add by scalar element.
+ void mla(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ int vm_index);
+
+ // Multiply-subtract by scalar element.
+ void mls(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ int vm_index);
+
+ // Signed long multiply-add by scalar element.
+ void smlal(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ int vm_index);
+
+ // Signed long multiply-add by scalar element (second part).
+ void smlal2(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ int vm_index);
+
+ // Unsigned long multiply-add by scalar element.
+ void umlal(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ int vm_index);
+
+ // Unsigned long multiply-add by scalar element (second part).
+ void umlal2(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ int vm_index);
+
+ // Signed long multiply-sub by scalar element.
+ void smlsl(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ int vm_index);
+
+ // Signed long multiply-sub by scalar element (second part).
+ void smlsl2(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ int vm_index);
+
+ // Unsigned long multiply-sub by scalar element.
+ void umlsl(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ int vm_index);
+
+ // Unsigned long multiply-sub by scalar element (second part).
+ void umlsl2(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ int vm_index);
+
+ // Signed long multiply by scalar element.
+ void smull(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ int vm_index);
+
+ // Signed long multiply by scalar element (second part).
+ void smull2(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ int vm_index);
+
+ // Unsigned long multiply by scalar element.
+ void umull(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ int vm_index);
+
+ // Unsigned long multiply by scalar element (second part).
+ void umull2(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ int vm_index);
+
+ // Add narrow returning high half.
+ void addhn(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Add narrow returning high half (second part).
+ void addhn2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed saturating double long multiply by element.
+ void sqdmull(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ int vm_index);
+
+ // Signed saturating double long multiply by element (second part).
+ void sqdmull2(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ int vm_index);
+
+ // Signed saturating doubling long multiply-add by element.
+ void sqdmlal(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ int vm_index);
+
+ // Signed saturating doubling long multiply-add by element (second part).
+ void sqdmlal2(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ int vm_index);
+
+ // Signed saturating doubling long multiply-sub by element.
+ void sqdmlsl(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ int vm_index);
+
+ // Signed saturating doubling long multiply-sub by element (second part).
+ void sqdmlsl2(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ int vm_index);
+
+ // Compare bitwise to zero.
+ void cmeq(const VRegister& vd, const VRegister& vn, int value);
+
+ // Compare signed greater than or equal to zero.
+ void cmge(const VRegister& vd, const VRegister& vn, int value);
+
+ // Compare signed greater than zero.
+ void cmgt(const VRegister& vd, const VRegister& vn, int value);
+
+ // Compare signed less than or equal to zero.
+ void cmle(const VRegister& vd, const VRegister& vn, int value);
+
+ // Compare signed less than zero.
+ void cmlt(const VRegister& vd, const VRegister& vn, int value);
+
+ // Unsigned rounding halving add.
+ void urhadd(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Compare equal.
+ void cmeq(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Compare signed greater than or equal.
+ void cmge(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Compare signed greater than.
+ void cmgt(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Compare unsigned higher.
+ void cmhi(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Compare unsigned higher or same.
+ void cmhs(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Compare bitwise test bits nonzero.
+ void cmtst(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed shift left by register.
+ void sshl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Unsigned shift left by register.
+ void ushl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed saturating doubling long multiply-subtract.
+ void sqdmlsl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed saturating doubling long multiply-subtract (second part).
+ void sqdmlsl2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed saturating doubling long multiply.
+ void sqdmull(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed saturating doubling long multiply (second part).
+ void sqdmull2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed saturating doubling multiply returning high half.
+ void sqdmulh(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed saturating rounding doubling multiply returning high half.
+ void sqrdmulh(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed saturating doubling multiply element returning high half.
+ void sqdmulh(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ int vm_index);
+
+ // Signed saturating rounding doubling multiply element returning high half.
+ void sqrdmulh(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ int vm_index);
+
+ // Unsigned long multiply long.
+ void umull(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Unsigned long multiply (second part).
+ void umull2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Rounding add narrow returning high half.
+ void raddhn(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Subtract narrow returning high half.
+ void subhn(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Subtract narrow returning high half (second part).
+ void subhn2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Rounding add narrow returning high half (second part).
+ void raddhn2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Rounding subtract narrow returning high half.
+ void rsubhn(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Rounding subtract narrow returning high half (second part).
+ void rsubhn2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed saturating shift left by register.
+ void sqshl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Unsigned saturating shift left by register.
+ void uqshl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed rounding shift left by register.
+ void srshl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Unsigned rounding shift left by register.
+ void urshl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed saturating rounding shift left by register.
+ void sqrshl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Unsigned saturating rounding shift left by register.
+ void uqrshl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed absolute difference.
+ void sabd(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Unsigned absolute difference and accumulate.
+ void uaba(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Shift left by immediate and insert.
+ void sli(const VRegister& vd, const VRegister& vn, int shift);
+
+ // Shift right by immediate and insert.
+ void sri(const VRegister& vd, const VRegister& vn, int shift);
+
+ // Signed maximum.
+ void smax(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed pairwise maximum.
+ void smaxp(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Add across vector.
+ void addv(const VRegister& vd, const VRegister& vn);
+
+ // Signed add long across vector.
+ void saddlv(const VRegister& vd, const VRegister& vn);
+
+ // Unsigned add long across vector.
+ void uaddlv(const VRegister& vd, const VRegister& vn);
+
+ // FP maximum number across vector.
+ void fmaxnmv(const VRegister& vd, const VRegister& vn);
+
+ // FP maximum across vector.
+ void fmaxv(const VRegister& vd, const VRegister& vn);
+
+ // FP minimum number across vector.
+ void fminnmv(const VRegister& vd, const VRegister& vn);
+
+ // FP minimum across vector.
+ void fminv(const VRegister& vd, const VRegister& vn);
+
+ // Signed maximum across vector.
+ void smaxv(const VRegister& vd, const VRegister& vn);
+
+ // Signed minimum.
+ void smin(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed minimum pairwise.
+ void sminp(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed minimum across vector.
+ void sminv(const VRegister& vd, const VRegister& vn);
+
+ // One-element structure store from one register.
+ void st1(const VRegister& vt, const MemOperand& src);
+
+ // One-element structure store from two registers.
+ void st1(const VRegister& vt, const VRegister& vt2, const MemOperand& src);
+
+ // One-element structure store from three registers.
+ void st1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
+ const MemOperand& src);
+
+ // One-element structure store from four registers.
+ void st1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
+ const VRegister& vt4, const MemOperand& src);
+
+ // One-element single structure store from one lane.
+ void st1(const VRegister& vt, int lane, const MemOperand& src);
+
+ // Two-element structure store from two registers.
+ void st2(const VRegister& vt, const VRegister& vt2, const MemOperand& src);
+
+ // Two-element single structure store from two lanes.
+ void st2(const VRegister& vt, const VRegister& vt2, int lane,
+ const MemOperand& src);
+
+ // Three-element structure store from three registers.
+ void st3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
+ const MemOperand& src);
+
+ // Three-element single structure store from three lanes.
+ void st3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
+ int lane, const MemOperand& src);
+
+ // Four-element structure store from four registers.
+ void st4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
+ const VRegister& vt4, const MemOperand& src);
+
+ // Four-element single structure store from four lanes.
+ void st4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
+ const VRegister& vt4, int lane, const MemOperand& src);
+
+ // Unsigned add long.
+ void uaddl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Unsigned add long (second part).
+ void uaddl2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Unsigned add wide.
+ void uaddw(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Unsigned add wide (second part).
+ void uaddw2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed add long.
+ void saddl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed add long (second part).
+ void saddl2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed add wide.
+ void saddw(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed add wide (second part).
+ void saddw2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Unsigned subtract long.
+ void usubl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Unsigned subtract long (second part).
+ void usubl2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Unsigned subtract wide.
+ void usubw(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed subtract long.
+ void ssubl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed subtract long (second part).
+ void ssubl2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed integer subtract wide.
+ void ssubw(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed integer subtract wide (second part).
+ void ssubw2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Unsigned subtract wide (second part).
+ void usubw2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Unsigned maximum.
+ void umax(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Unsigned pairwise maximum.
+ void umaxp(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Unsigned maximum across vector.
+ void umaxv(const VRegister& vd, const VRegister& vn);
+
+ // Unsigned minimum.
+ void umin(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Unsigned pairwise minimum.
+ void uminp(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Unsigned minimum across vector.
+ void uminv(const VRegister& vd, const VRegister& vn);
+
+ // Transpose vectors (primary).
+ void trn1(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Transpose vectors (secondary).
+ void trn2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Unzip vectors (primary).
+ void uzp1(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Unzip vectors (secondary).
+ void uzp2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Zip vectors (primary).
+ void zip1(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Zip vectors (secondary).
+ void zip2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed shift right by immediate.
+ void sshr(const VRegister& vd, const VRegister& vn, int shift);
+
+ // Unsigned shift right by immediate.
+ void ushr(const VRegister& vd, const VRegister& vn, int shift);
+
+ // Signed rounding shift right by immediate.
+ void srshr(const VRegister& vd, const VRegister& vn, int shift);
+
+ // Unsigned rounding shift right by immediate.
+ void urshr(const VRegister& vd, const VRegister& vn, int shift);
+
+ // Signed shift right by immediate and accumulate.
+ void ssra(const VRegister& vd, const VRegister& vn, int shift);
+
+ // Unsigned shift right by immediate and accumulate.
+ void usra(const VRegister& vd, const VRegister& vn, int shift);
+
+ // Signed rounding shift right by immediate and accumulate.
+ void srsra(const VRegister& vd, const VRegister& vn, int shift);
+
+ // Unsigned rounding shift right by immediate and accumulate.
+ void ursra(const VRegister& vd, const VRegister& vn, int shift);
+
+ // Shift right narrow by immediate.
+ void shrn(const VRegister& vd, const VRegister& vn, int shift);
+
+ // Shift right narrow by immediate (second part).
+ void shrn2(const VRegister& vd, const VRegister& vn, int shift);
+
+ // Rounding shift right narrow by immediate.
+ void rshrn(const VRegister& vd, const VRegister& vn, int shift);
+
+ // Rounding shift right narrow by immediate (second part).
+ void rshrn2(const VRegister& vd, const VRegister& vn, int shift);
+
+ // Unsigned saturating shift right narrow by immediate.
+ void uqshrn(const VRegister& vd, const VRegister& vn, int shift);
+
+ // Unsigned saturating shift right narrow by immediate (second part).
+ void uqshrn2(const VRegister& vd, const VRegister& vn, int shift);
+
+ // Unsigned saturating rounding shift right narrow by immediate.
+ void uqrshrn(const VRegister& vd, const VRegister& vn, int shift);
+
+ // Unsigned saturating rounding shift right narrow by immediate (second part).
+ void uqrshrn2(const VRegister& vd, const VRegister& vn, int shift);
+
+ // Signed saturating shift right narrow by immediate.
+ void sqshrn(const VRegister& vd, const VRegister& vn, int shift);
+
+ // Signed saturating shift right narrow by immediate (second part).
+ void sqshrn2(const VRegister& vd, const VRegister& vn, int shift);
+
+ // Signed saturating rounded shift right narrow by immediate.
+ void sqrshrn(const VRegister& vd, const VRegister& vn, int shift);
+
+ // Signed saturating rounded shift right narrow by immediate (second part).
+ void sqrshrn2(const VRegister& vd, const VRegister& vn, int shift);
+
+ // Signed saturating shift right unsigned narrow by immediate.
+ void sqshrun(const VRegister& vd, const VRegister& vn, int shift);
+
+ // Signed saturating shift right unsigned narrow by immediate (second part).
+ void sqshrun2(const VRegister& vd, const VRegister& vn, int shift);
+
+ // Signed sat rounded shift right unsigned narrow by immediate.
+ void sqrshrun(const VRegister& vd, const VRegister& vn, int shift);
+
+ // Signed sat rounded shift right unsigned narrow by immediate (second part).
+ void sqrshrun2(const VRegister& vd, const VRegister& vn, int shift);
+
+ // FP reciprocal step.
+ void frecps(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // FP reciprocal estimate.
+ void frecpe(const VRegister& vd, const VRegister& vn);
+
+ // FP reciprocal square root estimate.
+ void frsqrte(const VRegister& vd, const VRegister& vn);
+
+ // FP reciprocal square root step.
+ void frsqrts(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed absolute difference and accumulate long.
+ void sabal(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed absolute difference and accumulate long (second part).
+ void sabal2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Unsigned absolute difference and accumulate long.
+ void uabal(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Unsigned absolute difference and accumulate long (second part).
+ void uabal2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed absolute difference long.
+ void sabdl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed absolute difference long (second part).
+ void sabdl2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Unsigned absolute difference long.
+ void uabdl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Unsigned absolute difference long (second part).
+ void uabdl2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Polynomial multiply long.
+ void pmull(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Polynomial multiply long (second part).
+ void pmull2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed long multiply-add.
+ void smlal(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed long multiply-add (second part).
+ void smlal2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Unsigned long multiply-add.
+ void umlal(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Unsigned long multiply-add (second part).
+ void umlal2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed long multiply-sub.
+ void smlsl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed long multiply-sub (second part).
+ void smlsl2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Unsigned long multiply-sub.
+ void umlsl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Unsigned long multiply-sub (second part).
+ void umlsl2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed long multiply.
+ void smull(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed long multiply (second part).
+ void smull2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed saturating doubling long multiply-add.
+ void sqdmlal(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed saturating doubling long multiply-add (second part).
+ void sqdmlal2(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Unsigned absolute difference.
+ void uabd(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed absolute difference and accumulate.
+ void saba(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
// FP instructions.
// Move immediate to FP register.
- void fmov(FPRegister fd, double imm);
- void fmov(FPRegister fd, float imm);
+ void fmov(const VRegister& fd, double imm);
+ void fmov(const VRegister& fd, float imm);
// Move FP register to register.
- void fmov(Register rd, FPRegister fn);
+ void fmov(const Register& rd, const VRegister& fn);
// Move register to FP register.
- void fmov(FPRegister fd, Register rn);
+ void fmov(const VRegister& fd, const Register& rn);
// Move FP register to FP register.
- void fmov(FPRegister fd, FPRegister fn);
+ void fmov(const VRegister& fd, const VRegister& fn);
+
+ // Move 64-bit register to top half of 128-bit FP register.
+ void fmov(const VRegister& vd, int index, const Register& rn);
+
+ // Move top half of 128-bit FP register to 64-bit register.
+ void fmov(const Register& rd, const VRegister& vn, int index);
// FP add.
- void fadd(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+ void fadd(const VRegister& vd, const VRegister& vn, const VRegister& vm);
// FP subtract.
- void fsub(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+ void fsub(const VRegister& vd, const VRegister& vn, const VRegister& vm);
// FP multiply.
- void fmul(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
-
- // FP fused multiply and add.
- void fmadd(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa);
-
- // FP fused multiply and subtract.
- void fmsub(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa);
-
- // FP fused multiply, add and negate.
- void fnmadd(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa);
-
- // FP fused multiply, subtract and negate.
- void fnmsub(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa);
+ void fmul(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // FP compare equal to zero.
+ void fcmeq(const VRegister& vd, const VRegister& vn, double imm);
+
+ // FP greater than zero.
+ void fcmgt(const VRegister& vd, const VRegister& vn, double imm);
+
+ // FP greater than or equal to zero.
+ void fcmge(const VRegister& vd, const VRegister& vn, double imm);
+
+ // FP less than or equal to zero.
+ void fcmle(const VRegister& vd, const VRegister& vn, double imm);
+
+ // FP less than to zero.
+ void fcmlt(const VRegister& vd, const VRegister& vn, double imm);
+
+ // FP absolute difference.
+ void fabd(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // FP pairwise add vector.
+ void faddp(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // FP pairwise add scalar.
+ void faddp(const VRegister& vd, const VRegister& vn);
+
+ // FP pairwise maximum scalar.
+ void fmaxp(const VRegister& vd, const VRegister& vn);
+
+ // FP pairwise maximum number scalar.
+ void fmaxnmp(const VRegister& vd, const VRegister& vn);
+
+ // FP pairwise minimum number scalar.
+ void fminnmp(const VRegister& vd, const VRegister& vn);
+
+ // FP vector multiply accumulate.
+ void fmla(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // FP vector multiply subtract.
+ void fmls(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // FP vector multiply extended.
+ void fmulx(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // FP absolute greater than or equal.
+ void facge(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // FP absolute greater than.
+ void facgt(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // FP multiply by element.
+ void fmul(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ int vm_index);
+
+ // FP fused multiply-add to accumulator by element.
+ void fmla(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ int vm_index);
+
+ // FP fused multiply-sub from accumulator by element.
+ void fmls(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ int vm_index);
+
+ // FP multiply extended by element.
+ void fmulx(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ int vm_index);
+
+ // FP compare equal.
+ void fcmeq(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // FP greater than.
+ void fcmgt(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // FP greater than or equal.
+ void fcmge(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // FP pairwise maximum vector.
+ void fmaxp(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // FP pairwise minimum vector.
+ void fminp(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // FP pairwise minimum scalar.
+ void fminp(const VRegister& vd, const VRegister& vn);
+
+ // FP pairwise maximum number vector.
+ void fmaxnmp(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // FP pairwise minimum number vector.
+ void fminnmp(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // FP fused multiply-add.
+ void fmadd(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ const VRegister& va);
+
+ // FP fused multiply-subtract.
+ void fmsub(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ const VRegister& va);
+
+ // FP fused multiply-add and negate.
+ void fnmadd(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ const VRegister& va);
+
+ // FP fused multiply-subtract and negate.
+ void fnmsub(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ const VRegister& va);
+
+ // FP multiply-negate scalar.
+ void fnmul(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // FP reciprocal exponent scalar.
+ void frecpx(const VRegister& vd, const VRegister& vn);
// FP divide.
- void fdiv(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+ void fdiv(const VRegister& vd, const VRegister& vn, const VRegister& vm);
// FP maximum.
- void fmax(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+ void fmax(const VRegister& vd, const VRegister& vn, const VRegister& vm);
// FP minimum.
- void fmin(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+ void fmin(const VRegister& vd, const VRegister& vn, const VRegister& vm);
// FP maximum.
- void fmaxnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+ void fmaxnm(const VRegister& vd, const VRegister& vn, const VRegister& vm);
// FP minimum.
- void fminnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+ void fminnm(const VRegister& vd, const VRegister& vn, const VRegister& vm);
// FP absolute.
- void fabs(const FPRegister& fd, const FPRegister& fn);
+ void fabs(const VRegister& vd, const VRegister& vn);
// FP negate.
- void fneg(const FPRegister& fd, const FPRegister& fn);
+ void fneg(const VRegister& vd, const VRegister& vn);
// FP square root.
- void fsqrt(const FPRegister& fd, const FPRegister& fn);
+ void fsqrt(const VRegister& vd, const VRegister& vn);
- // FP round to integer (nearest with ties to away).
- void frinta(const FPRegister& fd, const FPRegister& fn);
+ // FP round to integer nearest with ties to away.
+ void frinta(const VRegister& vd, const VRegister& vn);
- // FP round to integer (toward minus infinity).
- void frintm(const FPRegister& fd, const FPRegister& fn);
+ // FP round to integer, implicit rounding.
+ void frinti(const VRegister& vd, const VRegister& vn);
- // FP round to integer (nearest with ties to even).
- void frintn(const FPRegister& fd, const FPRegister& fn);
+ // FP round to integer toward minus infinity.
+ void frintm(const VRegister& vd, const VRegister& vn);
- // FP round to integer (towards plus infinity).
- void frintp(const FPRegister& fd, const FPRegister& fn);
+ // FP round to integer nearest with ties to even.
+ void frintn(const VRegister& vd, const VRegister& vn);
- // FP round to integer (towards zero.)
- void frintz(const FPRegister& fd, const FPRegister& fn);
+ // FP round to integer towards plus infinity.
+ void frintp(const VRegister& vd, const VRegister& vn);
+
+ // FP round to integer, exact, implicit rounding.
+ void frintx(const VRegister& vd, const VRegister& vn);
+
+ // FP round to integer towards zero.
+ void frintz(const VRegister& vd, const VRegister& vn);
// FP compare registers.
- void fcmp(const FPRegister& fn, const FPRegister& fm);
+ void fcmp(const VRegister& vn, const VRegister& vm);
// FP compare immediate.
- void fcmp(const FPRegister& fn, double value);
+ void fcmp(const VRegister& vn, double value);
// FP conditional compare.
- void fccmp(const FPRegister& fn,
- const FPRegister& fm,
- StatusFlags nzcv,
+ void fccmp(const VRegister& vn, const VRegister& vm, StatusFlags nzcv,
Condition cond);
// FP conditional select.
- void fcsel(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
+ void fcsel(const VRegister& vd, const VRegister& vn, const VRegister& vm,
Condition cond);
- // Common FP Convert function
- void FPConvertToInt(const Register& rd,
- const FPRegister& fn,
- FPIntegerConvertOp op);
+ // Common FP Convert functions.
+ void NEONFPConvertToInt(const Register& rd, const VRegister& vn, Instr op);
+ void NEONFPConvertToInt(const VRegister& vd, const VRegister& vn, Instr op);
+
+ // FP convert between precisions.
+ void fcvt(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to higher precision.
+ void fcvtl(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to higher precision (second part).
+ void fcvtl2(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to lower precision.
+ void fcvtn(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to lower prevision (second part).
+ void fcvtn2(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to lower precision, rounding to odd.
+ void fcvtxn(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to lower precision, rounding to odd (second part).
+ void fcvtxn2(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to signed integer, nearest with ties to away.
+ void fcvtas(const Register& rd, const VRegister& vn);
+
+ // FP convert to unsigned integer, nearest with ties to away.
+ void fcvtau(const Register& rd, const VRegister& vn);
+
+ // FP convert to signed integer, nearest with ties to away.
+ void fcvtas(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to unsigned integer, nearest with ties to away.
+ void fcvtau(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to signed integer, round towards -infinity.
+ void fcvtms(const Register& rd, const VRegister& vn);
+
+ // FP convert to unsigned integer, round towards -infinity.
+ void fcvtmu(const Register& rd, const VRegister& vn);
+
+ // FP convert to signed integer, round towards -infinity.
+ void fcvtms(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to unsigned integer, round towards -infinity.
+ void fcvtmu(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to signed integer, nearest with ties to even.
+ void fcvtns(const Register& rd, const VRegister& vn);
+
+ // FP convert to unsigned integer, nearest with ties to even.
+ void fcvtnu(const Register& rd, const VRegister& vn);
+
+ // FP convert to signed integer, nearest with ties to even.
+ void fcvtns(const VRegister& rd, const VRegister& vn);
- // FP convert between single and double precision.
- void fcvt(const FPRegister& fd, const FPRegister& fn);
+ // FP convert to unsigned integer, nearest with ties to even.
+ void fcvtnu(const VRegister& rd, const VRegister& vn);
- // Convert FP to unsigned integer (nearest with ties to away).
- void fcvtau(const Register& rd, const FPRegister& fn);
+ // FP convert to signed integer or fixed-point, round towards zero.
+ void fcvtzs(const Register& rd, const VRegister& vn, int fbits = 0);
- // Convert FP to signed integer (nearest with ties to away).
- void fcvtas(const Register& rd, const FPRegister& fn);
+ // FP convert to unsigned integer or fixed-point, round towards zero.
+ void fcvtzu(const Register& rd, const VRegister& vn, int fbits = 0);
- // Convert FP to unsigned integer (round towards -infinity).
- void fcvtmu(const Register& rd, const FPRegister& fn);
+ // FP convert to signed integer or fixed-point, round towards zero.
+ void fcvtzs(const VRegister& vd, const VRegister& vn, int fbits = 0);
- // Convert FP to signed integer (round towards -infinity).
- void fcvtms(const Register& rd, const FPRegister& fn);
+ // FP convert to unsigned integer or fixed-point, round towards zero.
+ void fcvtzu(const VRegister& vd, const VRegister& vn, int fbits = 0);
- // Convert FP to unsigned integer (nearest with ties to even).
- void fcvtnu(const Register& rd, const FPRegister& fn);
+ // FP convert to signed integer, round towards +infinity.
+ void fcvtps(const Register& rd, const VRegister& vn);
- // Convert FP to signed integer (nearest with ties to even).
- void fcvtns(const Register& rd, const FPRegister& fn);
+ // FP convert to unsigned integer, round towards +infinity.
+ void fcvtpu(const Register& rd, const VRegister& vn);
- // Convert FP to unsigned integer (round towards zero).
- void fcvtzu(const Register& rd, const FPRegister& fn);
+ // FP convert to signed integer, round towards +infinity.
+ void fcvtps(const VRegister& vd, const VRegister& vn);
- // Convert FP to signed integer (rounf towards zero).
- void fcvtzs(const Register& rd, const FPRegister& fn);
+ // FP convert to unsigned integer, round towards +infinity.
+ void fcvtpu(const VRegister& vd, const VRegister& vn);
// Convert signed integer or fixed point to FP.
- void scvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
+ void scvtf(const VRegister& fd, const Register& rn, int fbits = 0);
// Convert unsigned integer or fixed point to FP.
- void ucvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
+ void ucvtf(const VRegister& fd, const Register& rn, int fbits = 0);
+
+ // Convert signed integer or fixed-point to FP.
+ void scvtf(const VRegister& fd, const VRegister& vn, int fbits = 0);
+
+ // Convert unsigned integer or fixed-point to FP.
+ void ucvtf(const VRegister& fd, const VRegister& vn, int fbits = 0);
+
+ // Extract vector from pair of vectors.
+ void ext(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ int index);
+
+ // Duplicate vector element to vector or scalar.
+ void dup(const VRegister& vd, const VRegister& vn, int vn_index);
+
+ // Duplicate general-purpose register to vector.
+ void dup(const VRegister& vd, const Register& rn);
+
+ // Insert vector element from general-purpose register.
+ void ins(const VRegister& vd, int vd_index, const Register& rn);
+
+ // Move general-purpose register to a vector element.
+ void mov(const VRegister& vd, int vd_index, const Register& rn);
+
+ // Unsigned move vector element to general-purpose register.
+ void umov(const Register& rd, const VRegister& vn, int vn_index);
+
+ // Move vector element to general-purpose register.
+ void mov(const Register& rd, const VRegister& vn, int vn_index);
+
+ // Move vector element to scalar.
+ void mov(const VRegister& vd, const VRegister& vn, int vn_index);
+
+ // Insert vector element from another vector element.
+ void ins(const VRegister& vd, int vd_index, const VRegister& vn,
+ int vn_index);
+
+ // Move vector element to another vector element.
+ void mov(const VRegister& vd, int vd_index, const VRegister& vn,
+ int vn_index);
+
+ // Signed move vector element to general-purpose register.
+ void smov(const Register& rd, const VRegister& vn, int vn_index);
+
+ // One-element structure load to one register.
+ void ld1(const VRegister& vt, const MemOperand& src);
+
+ // One-element structure load to two registers.
+ void ld1(const VRegister& vt, const VRegister& vt2, const MemOperand& src);
+
+ // One-element structure load to three registers.
+ void ld1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
+ const MemOperand& src);
+
+ // One-element structure load to four registers.
+ void ld1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
+ const VRegister& vt4, const MemOperand& src);
+
+ // One-element single structure load to one lane.
+ void ld1(const VRegister& vt, int lane, const MemOperand& src);
+
+ // One-element single structure load to all lanes.
+ void ld1r(const VRegister& vt, const MemOperand& src);
+
+ // Two-element structure load.
+ void ld2(const VRegister& vt, const VRegister& vt2, const MemOperand& src);
+
+ // Two-element single structure load to one lane.
+ void ld2(const VRegister& vt, const VRegister& vt2, int lane,
+ const MemOperand& src);
+
+ // Two-element single structure load to all lanes.
+ void ld2r(const VRegister& vt, const VRegister& vt2, const MemOperand& src);
+
+ // Three-element structure load.
+ void ld3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
+ const MemOperand& src);
+
+ // Three-element single structure load to one lane.
+ void ld3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
+ int lane, const MemOperand& src);
+
+ // Three-element single structure load to all lanes.
+ void ld3r(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
+ const MemOperand& src);
+
+ // Four-element structure load.
+ void ld4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
+ const VRegister& vt4, const MemOperand& src);
+
+ // Four-element single structure load to one lane.
+ void ld4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
+ const VRegister& vt4, int lane, const MemOperand& src);
+
+ // Four-element single structure load to all lanes.
+ void ld4r(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
+ const VRegister& vt4, const MemOperand& src);
+
+ // Count leading sign bits.
+ void cls(const VRegister& vd, const VRegister& vn);
+
+ // Count leading zero bits (vector).
+ void clz(const VRegister& vd, const VRegister& vn);
+
+ // Population count per byte.
+ void cnt(const VRegister& vd, const VRegister& vn);
+
+ // Reverse bit order.
+ void rbit(const VRegister& vd, const VRegister& vn);
+
+ // Reverse elements in 16-bit halfwords.
+ void rev16(const VRegister& vd, const VRegister& vn);
+
+ // Reverse elements in 32-bit words.
+ void rev32(const VRegister& vd, const VRegister& vn);
+
+ // Reverse elements in 64-bit doublewords.
+ void rev64(const VRegister& vd, const VRegister& vn);
+
+ // Unsigned reciprocal square root estimate.
+ void ursqrte(const VRegister& vd, const VRegister& vn);
+
+ // Unsigned reciprocal estimate.
+ void urecpe(const VRegister& vd, const VRegister& vn);
+
+ // Signed pairwise long add and accumulate.
+ void sadalp(const VRegister& vd, const VRegister& vn);
+
+ // Signed pairwise long add.
+ void saddlp(const VRegister& vd, const VRegister& vn);
+
+ // Unsigned pairwise long add.
+ void uaddlp(const VRegister& vd, const VRegister& vn);
+
+ // Unsigned pairwise long add and accumulate.
+ void uadalp(const VRegister& vd, const VRegister& vn);
+
+ // Shift left by immediate.
+ void shl(const VRegister& vd, const VRegister& vn, int shift);
+
+ // Signed saturating shift left by immediate.
+ void sqshl(const VRegister& vd, const VRegister& vn, int shift);
+
+ // Signed saturating shift left unsigned by immediate.
+ void sqshlu(const VRegister& vd, const VRegister& vn, int shift);
+
+ // Unsigned saturating shift left by immediate.
+ void uqshl(const VRegister& vd, const VRegister& vn, int shift);
+
+ // Signed shift left long by immediate.
+ void sshll(const VRegister& vd, const VRegister& vn, int shift);
+
+ // Signed shift left long by immediate (second part).
+ void sshll2(const VRegister& vd, const VRegister& vn, int shift);
+
+ // Signed extend long.
+ void sxtl(const VRegister& vd, const VRegister& vn);
+
+ // Signed extend long (second part).
+ void sxtl2(const VRegister& vd, const VRegister& vn);
+
+ // Unsigned shift left long by immediate.
+ void ushll(const VRegister& vd, const VRegister& vn, int shift);
+
+ // Unsigned shift left long by immediate (second part).
+ void ushll2(const VRegister& vd, const VRegister& vn, int shift);
+
+ // Shift left long by element size.
+ void shll(const VRegister& vd, const VRegister& vn, int shift);
+
+ // Shift left long by element size (second part).
+ void shll2(const VRegister& vd, const VRegister& vn, int shift);
+
+ // Unsigned extend long.
+ void uxtl(const VRegister& vd, const VRegister& vn);
+
+ // Unsigned extend long (second part).
+ void uxtl2(const VRegister& vd, const VRegister& vn);
+
+ // Signed rounding halving add.
+ void srhadd(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Unsigned halving sub.
+ void uhsub(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed halving sub.
+ void shsub(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Unsigned saturating add.
+ void uqadd(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed saturating add.
+ void sqadd(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Unsigned saturating subtract.
+ void uqsub(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Signed saturating subtract.
+ void sqsub(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Add pairwise.
+ void addp(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Add pair of elements scalar.
+ void addp(const VRegister& vd, const VRegister& vn);
+
+ // Multiply-add to accumulator.
+ void mla(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Multiply-subtract to accumulator.
+ void mls(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Multiply.
+ void mul(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Table lookup from one register.
+ void tbl(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Table lookup from two registers.
+ void tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
+ const VRegister& vm);
+
+ // Table lookup from three registers.
+ void tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
+ const VRegister& vn3, const VRegister& vm);
+
+ // Table lookup from four registers.
+ void tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
+ const VRegister& vn3, const VRegister& vn4, const VRegister& vm);
+
+ // Table lookup extension from one register.
+ void tbx(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // Table lookup extension from two registers.
+ void tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
+ const VRegister& vm);
+
+ // Table lookup extension from three registers.
+ void tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
+ const VRegister& vn3, const VRegister& vm);
+
+ // Table lookup extension from four registers.
+ void tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
+ const VRegister& vn3, const VRegister& vn4, const VRegister& vm);
// Instruction functions used only for test, debug, and patching.
// Emit raw instructions in the instruction stream.
@@ -1663,37 +2843,43 @@ class Assembler : public AssemblerBase {
// Register encoding.
static Instr Rd(CPURegister rd) {
- DCHECK(rd.code() != kSPRegInternalCode);
+ DCHECK_NE(rd.code(), kSPRegInternalCode);
return rd.code() << Rd_offset;
}
static Instr Rn(CPURegister rn) {
- DCHECK(rn.code() != kSPRegInternalCode);
+ DCHECK_NE(rn.code(), kSPRegInternalCode);
return rn.code() << Rn_offset;
}
static Instr Rm(CPURegister rm) {
- DCHECK(rm.code() != kSPRegInternalCode);
+ DCHECK_NE(rm.code(), kSPRegInternalCode);
return rm.code() << Rm_offset;
}
+ static Instr RmNot31(CPURegister rm) {
+ DCHECK_NE(rm.code(), kSPRegInternalCode);
+ DCHECK(!rm.IsZero());
+ return Rm(rm);
+ }
+
static Instr Ra(CPURegister ra) {
- DCHECK(ra.code() != kSPRegInternalCode);
+ DCHECK_NE(ra.code(), kSPRegInternalCode);
return ra.code() << Ra_offset;
}
static Instr Rt(CPURegister rt) {
- DCHECK(rt.code() != kSPRegInternalCode);
+ DCHECK_NE(rt.code(), kSPRegInternalCode);
return rt.code() << Rt_offset;
}
static Instr Rt2(CPURegister rt2) {
- DCHECK(rt2.code() != kSPRegInternalCode);
+ DCHECK_NE(rt2.code(), kSPRegInternalCode);
return rt2.code() << Rt2_offset;
}
static Instr Rs(CPURegister rs) {
- DCHECK(rs.code() != kSPRegInternalCode);
+ DCHECK_NE(rs.code(), kSPRegInternalCode);
return rs.code() << Rs_offset;
}
@@ -1749,17 +2935,174 @@ class Assembler : public AssemblerBase {
// MemOperand offset encoding.
inline static Instr ImmLSUnsigned(int imm12);
inline static Instr ImmLS(int imm9);
- inline static Instr ImmLSPair(int imm7, LSDataSize size);
+ inline static Instr ImmLSPair(int imm7, unsigned size);
inline static Instr ImmShiftLS(unsigned shift_amount);
inline static Instr ImmException(int imm16);
inline static Instr ImmSystemRegister(int imm15);
inline static Instr ImmHint(int imm7);
inline static Instr ImmBarrierDomain(int imm2);
inline static Instr ImmBarrierType(int imm2);
- inline static LSDataSize CalcLSDataSize(LoadStoreOp op);
+ inline static unsigned CalcLSDataSize(LoadStoreOp op);
+
+ // Instruction bits for vector format in data processing operations.
+ static Instr VFormat(VRegister vd) {
+ if (vd.Is64Bits()) {
+ switch (vd.LaneCount()) {
+ case 2:
+ return NEON_2S;
+ case 4:
+ return NEON_4H;
+ case 8:
+ return NEON_8B;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ DCHECK(vd.Is128Bits());
+ switch (vd.LaneCount()) {
+ case 2:
+ return NEON_2D;
+ case 4:
+ return NEON_4S;
+ case 8:
+ return NEON_8H;
+ case 16:
+ return NEON_16B;
+ default:
+ UNREACHABLE();
+ }
+ }
+ }
+
+ // Instruction bits for vector format in floating point data processing
+ // operations.
+ static Instr FPFormat(VRegister vd) {
+ if (vd.LaneCount() == 1) {
+ // Floating point scalar formats.
+ DCHECK(vd.Is32Bits() || vd.Is64Bits());
+ return vd.Is64Bits() ? FP64 : FP32;
+ }
+
+ // Two lane floating point vector formats.
+ if (vd.LaneCount() == 2) {
+ DCHECK(vd.Is64Bits() || vd.Is128Bits());
+ return vd.Is128Bits() ? NEON_FP_2D : NEON_FP_2S;
+ }
+
+ // Four lane floating point vector format.
+ DCHECK((vd.LaneCount() == 4) && vd.Is128Bits());
+ return NEON_FP_4S;
+ }
+
+ // Instruction bits for vector format in load and store operations.
+ static Instr LSVFormat(VRegister vd) {
+ if (vd.Is64Bits()) {
+ switch (vd.LaneCount()) {
+ case 1:
+ return LS_NEON_1D;
+ case 2:
+ return LS_NEON_2S;
+ case 4:
+ return LS_NEON_4H;
+ case 8:
+ return LS_NEON_8B;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ DCHECK(vd.Is128Bits());
+ switch (vd.LaneCount()) {
+ case 2:
+ return LS_NEON_2D;
+ case 4:
+ return LS_NEON_4S;
+ case 8:
+ return LS_NEON_8H;
+ case 16:
+ return LS_NEON_16B;
+ default:
+ UNREACHABLE();
+ }
+ }
+ }
+
+ // Instruction bits for scalar format in data processing operations.
+ static Instr SFormat(VRegister vd) {
+ DCHECK(vd.IsScalar());
+ switch (vd.SizeInBytes()) {
+ case 1:
+ return NEON_B;
+ case 2:
+ return NEON_H;
+ case 4:
+ return NEON_S;
+ case 8:
+ return NEON_D;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ static Instr ImmNEONHLM(int index, int num_bits) {
+ int h, l, m;
+ if (num_bits == 3) {
+ DCHECK(is_uint3(index));
+ h = (index >> 2) & 1;
+ l = (index >> 1) & 1;
+ m = (index >> 0) & 1;
+ } else if (num_bits == 2) {
+ DCHECK(is_uint2(index));
+ h = (index >> 1) & 1;
+ l = (index >> 0) & 1;
+ m = 0;
+ } else {
+ DCHECK(is_uint1(index) && (num_bits == 1));
+ h = (index >> 0) & 1;
+ l = 0;
+ m = 0;
+ }
+ return (h << NEONH_offset) | (l << NEONL_offset) | (m << NEONM_offset);
+ }
+
+ static Instr ImmNEONExt(int imm4) {
+ DCHECK(is_uint4(imm4));
+ return imm4 << ImmNEONExt_offset;
+ }
+
+ static Instr ImmNEON5(Instr format, int index) {
+ DCHECK(is_uint4(index));
+ int s = LaneSizeInBytesLog2FromFormat(static_cast<VectorFormat>(format));
+ int imm5 = (index << (s + 1)) | (1 << s);
+ return imm5 << ImmNEON5_offset;
+ }
+
+ static Instr ImmNEON4(Instr format, int index) {
+ DCHECK(is_uint4(index));
+ int s = LaneSizeInBytesLog2FromFormat(static_cast<VectorFormat>(format));
+ int imm4 = index << s;
+ return imm4 << ImmNEON4_offset;
+ }
+
+ static Instr ImmNEONabcdefgh(int imm8) {
+ DCHECK(is_uint8(imm8));
+ Instr instr;
+ instr = ((imm8 >> 5) & 7) << ImmNEONabc_offset;
+ instr |= (imm8 & 0x1f) << ImmNEONdefgh_offset;
+ return instr;
+ }
+
+ static Instr NEONCmode(int cmode) {
+ DCHECK(is_uint4(cmode));
+ return cmode << NEONCmode_offset;
+ }
+
+ static Instr NEONModImmOp(int op) {
+ DCHECK(is_uint1(op));
+ return op << NEONModImmOp_offset;
+ }
static bool IsImmLSUnscaled(int64_t offset);
- static bool IsImmLSScaled(int64_t offset, LSDataSize size);
+ static bool IsImmLSScaled(int64_t offset, unsigned size);
static bool IsImmLLiteral(int64_t offset);
// Move immediates encoding.
@@ -1767,12 +3110,12 @@ class Assembler : public AssemblerBase {
inline static Instr ShiftMoveWide(int shift);
// FP Immediates.
- static Instr ImmFP32(float imm);
- static Instr ImmFP64(double imm);
+ static Instr ImmFP(double imm);
+ static Instr ImmNEONFP(double imm);
inline static Instr FPScale(unsigned scale);
// FP register type.
- inline static Instr FPType(FPRegister fd);
+ inline static Instr FPType(VRegister fd);
// Class for scoping postponing the constant pool generation.
class BlockConstPoolScope {
@@ -1840,16 +3183,56 @@ class Assembler : public AssemblerBase {
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope);
};
+ // Class for blocking sharing of code targets in constant pool.
+ class BlockCodeTargetSharingScope {
+ public:
+ explicit BlockCodeTargetSharingScope(Assembler* assem) : assem_(nullptr) {
+ Open(assem);
+ }
+ // This constructor does not initialize the scope. The user needs to
+ // explicitly call Open() before using it.
+ BlockCodeTargetSharingScope() : assem_(nullptr) {}
+ ~BlockCodeTargetSharingScope() { Close(); }
+ void Open(Assembler* assem) {
+ DCHECK_NULL(assem_);
+ DCHECK_NOT_NULL(assem);
+ assem_ = assem;
+ assem_->StartBlockCodeTargetSharing();
+ }
+
+ private:
+ void Close() {
+ if (assem_ != nullptr) {
+ assem_->EndBlockCodeTargetSharing();
+ }
+ }
+ Assembler* assem_;
+
+ DISALLOW_COPY_AND_ASSIGN(BlockCodeTargetSharingScope);
+ };
+
protected:
inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const;
void LoadStore(const CPURegister& rt,
const MemOperand& addr,
LoadStoreOp op);
-
void LoadStorePair(const CPURegister& rt, const CPURegister& rt2,
const MemOperand& addr, LoadStorePairOp op);
- static bool IsImmLSPair(int64_t offset, LSDataSize size);
+ void LoadStoreStruct(const VRegister& vt, const MemOperand& addr,
+ NEONLoadStoreMultiStructOp op);
+ void LoadStoreStruct1(const VRegister& vt, int reg_count,
+ const MemOperand& addr);
+ void LoadStoreStructSingle(const VRegister& vt, uint32_t lane,
+ const MemOperand& addr,
+ NEONLoadStoreSingleStructOp op);
+ void LoadStoreStructSingleAllLanes(const VRegister& vt,
+ const MemOperand& addr,
+ NEONLoadStoreSingleStructOp op);
+ void LoadStoreStructVerify(const VRegister& vt, const MemOperand& addr,
+ Instr op);
+
+ static bool IsImmLSPair(int64_t offset, unsigned size);
void Logical(const Register& rd,
const Register& rn,
@@ -1913,7 +3296,19 @@ class Assembler : public AssemblerBase {
Label* label,
Instruction* label_veneer = NULL);
+ // Prevent sharing of code target constant pool entries until
+ // EndBlockCodeTargetSharing is called. Calls to this function can be nested
+ // but must be followed by an equal number of call to
+ // EndBlockCodeTargetSharing.
+ void StartBlockCodeTargetSharing() { ++code_target_sharing_blocked_nesting_; }
+
+ // Resume sharing of constant pool code target entries. Needs to be called
+ // as many times as StartBlockCodeTargetSharing to have an effect.
+ void EndBlockCodeTargetSharing() { --code_target_sharing_blocked_nesting_; }
+
private:
+ static uint32_t FPToImm8(double imm);
+
// Instruction helpers.
void MoveWide(const Register& rd,
uint64_t imm,
@@ -1942,18 +3337,66 @@ class Assembler : public AssemblerBase {
const Register& rm,
const Register& ra,
DataProcessing3SourceOp op);
- void FPDataProcessing1Source(const FPRegister& fd,
- const FPRegister& fn,
+ void FPDataProcessing1Source(const VRegister& fd, const VRegister& fn,
FPDataProcessing1SourceOp op);
- void FPDataProcessing2Source(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
+ void FPDataProcessing2Source(const VRegister& fd, const VRegister& fn,
+ const VRegister& fm,
FPDataProcessing2SourceOp op);
- void FPDataProcessing3Source(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa,
+ void FPDataProcessing3Source(const VRegister& fd, const VRegister& fn,
+ const VRegister& fm, const VRegister& fa,
FPDataProcessing3SourceOp op);
+ void NEONAcrossLanesL(const VRegister& vd, const VRegister& vn,
+ NEONAcrossLanesOp op);
+ void NEONAcrossLanes(const VRegister& vd, const VRegister& vn,
+ NEONAcrossLanesOp op);
+ void NEONModifiedImmShiftLsl(const VRegister& vd, const int imm8,
+ const int left_shift,
+ NEONModifiedImmediateOp op);
+ void NEONModifiedImmShiftMsl(const VRegister& vd, const int imm8,
+ const int shift_amount,
+ NEONModifiedImmediateOp op);
+ void NEON3Same(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ NEON3SameOp vop);
+ void NEONFP3Same(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm, Instr op);
+ void NEON3DifferentL(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm, NEON3DifferentOp vop);
+ void NEON3DifferentW(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm, NEON3DifferentOp vop);
+ void NEON3DifferentHN(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm, NEON3DifferentOp vop);
+ void NEONFP2RegMisc(const VRegister& vd, const VRegister& vn,
+ NEON2RegMiscOp vop, double value = 0.0);
+ void NEON2RegMisc(const VRegister& vd, const VRegister& vn,
+ NEON2RegMiscOp vop, int value = 0);
+ void NEONFP2RegMisc(const VRegister& vd, const VRegister& vn, Instr op);
+ void NEONAddlp(const VRegister& vd, const VRegister& vn, NEON2RegMiscOp op);
+ void NEONPerm(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ NEONPermOp op);
+ void NEONFPByElement(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm, int vm_index,
+ NEONByIndexedElementOp op);
+ void NEONByElement(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm, int vm_index,
+ NEONByIndexedElementOp op);
+ void NEONByElementL(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm, int vm_index,
+ NEONByIndexedElementOp op);
+ void NEONShiftImmediate(const VRegister& vd, const VRegister& vn,
+ NEONShiftImmediateOp op, int immh_immb);
+ void NEONShiftLeftImmediate(const VRegister& vd, const VRegister& vn,
+ int shift, NEONShiftImmediateOp op);
+ void NEONShiftRightImmediate(const VRegister& vd, const VRegister& vn,
+ int shift, NEONShiftImmediateOp op);
+ void NEONShiftImmediateL(const VRegister& vd, const VRegister& vn, int shift,
+ NEONShiftImmediateOp op);
+ void NEONShiftImmediateN(const VRegister& vd, const VRegister& vn, int shift,
+ NEONShiftImmediateOp op);
+ void NEONXtn(const VRegister& vd, const VRegister& vn, NEON2RegMiscOp vop);
+ void NEONTable(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ NEONTableOp op);
+
+ Instr LoadStoreStructAddrModeField(const MemOperand& addr);
// Label helpers.
@@ -2044,6 +3487,12 @@ class Assembler : public AssemblerBase {
// Emission of the veneer pools may be blocked in some code sequences.
int veneer_pool_blocked_nesting_; // Block emission if this is not zero.
+ // Sharing of code target entries may be blocked in some code sequences.
+ int code_target_sharing_blocked_nesting_;
+ bool IsCodeTargetSharingAllowed() const {
+ return code_target_sharing_blocked_nesting_ == 0;
+ }
+
// Relocation info generation
// Each relocation is encoded as a variable size value
static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize;
@@ -2064,22 +3513,7 @@ class Assembler : public AssemblerBase {
// The pending constant pool.
ConstPool constpool_;
- // Relocation for a type-recording IC has the AST id added to it. This
- // member variable is a way to pass the information from the call site to
- // the relocation info.
- TypeFeedbackId recorded_ast_id_;
-
- inline TypeFeedbackId RecordedAstId();
- inline void ClearRecordedAstId();
-
protected:
- // Record the AST id of the CallIC being compiled, so that it can be placed
- // in the relocation information.
- void SetRecordedAstId(TypeFeedbackId ast_id) {
- DCHECK(recorded_ast_id_.IsNone());
- recorded_ast_id_ = ast_id;
- }
-
// Code generation
// The relocation writer's position is at least kGap bytes below the end of
// the generated instructions. This is so that multi-instruction sequences do
@@ -2089,6 +3523,22 @@ class Assembler : public AssemblerBase {
static constexpr int kGap = 128;
public:
+#ifdef DEBUG
+ // Functions used for testing.
+ int GetConstantPoolEntriesSizeForTesting() const {
+ // Do not include branch over the pool.
+ return constpool_.EntryCount() * kPointerSize;
+ }
+
+ static constexpr int GetCheckConstPoolIntervalForTesting() {
+ return kCheckConstPoolInterval;
+ }
+
+ static constexpr int GetApproxMaxDistToConstPoolForTesting() {
+ return kApproxMaxDistToConstPool;
+ }
+#endif
+
class FarBranchInfo {
public:
FarBranchInfo(int offset, Label* label)
@@ -2148,6 +3598,19 @@ class Assembler : public AssemblerBase {
// the length of the label chain.
void DeleteUnresolvedBranchInfoForLabelTraverse(Label* label);
+ // The following functions help with avoiding allocations of embedded heap
+ // objects during the code assembly phase. {RequestHeapObject} records the
+ // need for a future heap number allocation or code stub generation. After
+ // code assembly, {AllocateAndInstallRequestedHeapObjects} will allocate these
+ // objects and place them where they are expected (determined by the pc offset
+ // associated with each request). That is, for each request, it will patch the
+ // dummy heap object handle that we emitted during code assembly with the
+ // actual heap object handle.
+ void RequestHeapObject(HeapObjectRequest request);
+ void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
+
+ std::forward_list<HeapObjectRequest> heap_object_requests_;
+
private:
friend class EnsureSpace;
friend class ConstPool;
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index c3c3367b10..0628a2c923 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -38,32 +38,6 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kNewArray);
}
-void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
- ExternalReference miss) {
- // Update the static counter each time a new code stub is generated.
- isolate()->counters()->code_stubs()->Increment();
-
- CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
- int param_count = descriptor.GetRegisterParameterCount();
- {
- // Call the runtime system in a fresh internal frame.
- FrameScope scope(masm, StackFrame::INTERNAL);
- DCHECK((param_count == 0) ||
- x0.Is(descriptor.GetRegisterParameter(param_count - 1)));
-
- // Push arguments
- MacroAssembler::PushPopQueue queue(masm);
- for (int i = 0; i < param_count; ++i) {
- queue.Queue(descriptor.GetRegisterParameter(i));
- }
- queue.PushQueued();
-
- __ CallExternalReference(miss, param_count);
- }
-
- __ Ret();
-}
-
void DoubleToIStub::Generate(MacroAssembler* masm) {
Label done;
@@ -147,8 +121,8 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// See call site for description.
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
Register right, Register scratch,
- FPRegister double_scratch,
- Label* slow, Condition cond) {
+ VRegister double_scratch, Label* slow,
+ Condition cond) {
DCHECK(!AreAliased(left, right, scratch));
Label not_identical, return_equal, heap_number;
Register result = x0;
@@ -292,12 +266,9 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// See call site for description.
-static void EmitSmiNonsmiComparison(MacroAssembler* masm,
- Register left,
- Register right,
- FPRegister left_d,
- FPRegister right_d,
- Label* slow,
+static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register left,
+ Register right, VRegister left_d,
+ VRegister right_d, Label* slow,
bool strict) {
DCHECK(!AreAliased(left_d, right_d));
DCHECK((left.is(x0) && right.is(x1)) ||
@@ -476,8 +447,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// In case 3, we have found out that we were dealing with a number-number
// comparison. The double values of the numbers have been loaded, right into
// rhs_d, left into lhs_d.
- FPRegister rhs_d = d0;
- FPRegister lhs_d = d1;
+ VRegister rhs_d = d0;
+ VRegister lhs_d = d1;
EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, &slow, strict());
__ Bind(&both_loaded_as_doubles);
@@ -613,7 +584,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
CPURegList saved_regs = kCallerSaved;
- CPURegList saved_fp_regs = kCallerSavedFP;
+ CPURegList saved_fp_regs = kCallerSavedV;
// We don't allow a GC during a store buffer overflow so there is no need to
// store the registers in any particular way, but we do have to store and
@@ -686,12 +657,12 @@ void MathPowStub::Generate(MacroAssembler* masm) {
Register exponent_integer = MathPowIntegerDescriptor::exponent();
DCHECK(exponent_integer.is(x12));
Register saved_lr = x19;
- FPRegister result_double = d0;
- FPRegister base_double = d0;
- FPRegister exponent_double = d1;
- FPRegister base_double_copy = d2;
- FPRegister scratch1_double = d6;
- FPRegister scratch0_double = d7;
+ VRegister result_double = d0;
+ VRegister base_double = d0;
+ VRegister exponent_double = d1;
+ VRegister base_double_copy = d2;
+ VRegister scratch1_double = d6;
+ VRegister scratch0_double = d7;
// A fast-path for integer exponents.
Label exponent_is_smi, exponent_is_integer;
@@ -803,14 +774,11 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
// CEntryStub.
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
- StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
CreateWeakCellStub::GenerateAheadOfTime(isolate);
- BinaryOpICStub::GenerateAheadOfTime(isolate);
StoreRegistersStateStub::GenerateAheadOfTime(isolate);
RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
- BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
}
@@ -1046,15 +1014,15 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Bind(&exception_returned);
ExternalReference pending_handler_context_address(
- Isolate::kPendingHandlerContextAddress, isolate());
+ IsolateAddressId::kPendingHandlerContextAddress, isolate());
ExternalReference pending_handler_code_address(
- Isolate::kPendingHandlerCodeAddress, isolate());
+ IsolateAddressId::kPendingHandlerCodeAddress, isolate());
ExternalReference pending_handler_offset_address(
- Isolate::kPendingHandlerOffsetAddress, isolate());
+ IsolateAddressId::kPendingHandlerOffsetAddress, isolate());
ExternalReference pending_handler_fp_address(
- Isolate::kPendingHandlerFPAddress, isolate());
+ IsolateAddressId::kPendingHandlerFPAddress, isolate());
ExternalReference pending_handler_sp_address(
- Isolate::kPendingHandlerSPAddress, isolate());
+ IsolateAddressId::kPendingHandlerSPAddress, isolate());
// Ask the runtime for help to determine the handler. This will set x0 to
// contain the current pending exception, don't clobber it.
@@ -1142,7 +1110,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
int64_t bad_frame_pointer = -1L; // Bad frame pointer to fail if it is used.
__ Mov(x13, bad_frame_pointer);
__ Mov(x12, StackFrame::TypeToMarker(marker));
- __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate()));
+ __ Mov(x11, ExternalReference(IsolateAddressId::kCEntryFPAddress, isolate()));
__ Ldr(x10, MemOperand(x11));
__ Push(x13, x12, xzr, x10);
@@ -1152,7 +1120,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Push the JS entry frame marker. Also set js_entry_sp if this is the
// outermost JS call.
Label non_outermost_js, done;
- ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
+ ExternalReference js_entry_sp(IsolateAddressId::kJSEntrySPAddress, isolate());
__ Mov(x10, ExternalReference(js_entry_sp));
__ Ldr(x11, MemOperand(x10));
__ Cbnz(x11, &non_outermost_js);
@@ -1191,8 +1159,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// field in the JSEnv and return a failure sentinel. Coming in here the
// fp will be invalid because the PushTryHandler below sets it to 0 to
// signal the existence of the JSEntry frame.
- __ Mov(x10, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate())));
+ __ Mov(x10, Operand(ExternalReference(
+ IsolateAddressId::kPendingExceptionAddress, isolate())));
}
__ Str(code_entry, MemOperand(x10));
__ LoadRoot(x0, Heap::kExceptionRootIndex);
@@ -1252,7 +1220,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Restore the top frame descriptors from the stack.
__ Pop(x10);
- __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate()));
+ __ Mov(x11, ExternalReference(IsolateAddressId::kCEntryFPAddress, isolate()));
__ Str(x10, MemOperand(x11));
// Reset the stack to the callee saved registers.
@@ -1582,8 +1550,8 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
Register result = x0;
Register rhs = x0;
Register lhs = x1;
- FPRegister rhs_d = d0;
- FPRegister lhs_d = d1;
+ VRegister rhs_d = d0;
+ VRegister lhs_d = d1;
if (left() == CompareICState::SMI) {
__ JumpIfNotSmi(lhs, &miss);
@@ -2009,32 +1977,6 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
}
-void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x1 : left
- // -- x0 : right
- // -- lr : return address
- // -----------------------------------
-
- // Load x2 with the allocation site. We stick an undefined dummy value here
- // and replace it with the real allocation site later when we instantiate this
- // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
- __ LoadObject(x2, handle(isolate()->heap()->undefined_value()));
-
- // Make sure that we actually patched the allocation site.
- if (FLAG_debug_code) {
- __ AssertNotSmi(x2, kExpectedAllocationSite);
- __ Ldr(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
- __ AssertRegisterIsRoot(x10, Heap::kAllocationSiteMapRootIndex,
- kExpectedAllocationSite);
- }
-
- // Tail call into the stub that handles binary operations with allocation
- // sites.
- BinaryOpWithAllocationSiteStub stub(isolate(), state());
- __ TailCallStub(&stub);
-}
-
RecordWriteStub::RegisterAllocation::RegisterAllocation(Register object,
Register address,
Register scratch)
@@ -2042,7 +1984,7 @@ RecordWriteStub::RegisterAllocation::RegisterAllocation(Register object,
address_(address),
scratch0_(scratch),
saved_regs_(kCallerSaved),
- saved_fp_regs_(kCallerSavedFP) {
+ saved_fp_regs_(kCallerSavedV) {
DCHECK(!AreAliased(scratch, object, address));
// The SaveCallerSaveRegisters method needs to save caller-saved
@@ -2131,10 +2073,11 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode) {
- Label on_black;
Label need_incremental;
Label need_incremental_pop_scratch;
+#ifndef V8_CONCURRENT_MARKING
+ Label on_black;
// If the object is not black we don't have to inform the incremental marker.
__ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
@@ -2148,6 +2091,8 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
}
__ Bind(&on_black);
+#endif
+
// Get the value from the slot.
Register val = regs_.scratch0();
__ Ldr(val, MemOperand(regs_.address()));
@@ -2225,26 +2170,25 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
}
-void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
- CEntryStub ces(isolate(), 1, kSaveFPRegs);
- __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
- int parameter_count_offset =
- StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
- __ Ldr(x1, MemOperand(fp, parameter_count_offset));
- if (function_mode() == JS_FUNCTION_STUB_MODE) {
- __ Add(x1, x1, 1);
- }
- masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
- __ Drop(x1);
- // Return to IC Miss stub, continuation still on stack.
- __ Ret();
-}
-
// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
// a "Push lr" instruction, followed by a call.
static const unsigned int kProfileEntryHookCallSize =
Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
+void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
+ Zone* zone) {
+ if (tasm->isolate()->function_entry_hook() != NULL) {
+ Assembler::BlockConstPoolScope no_const_pools(tasm);
+ DontEmitDebugCodeScope no_debug_code(tasm);
+ Label entry_hook_call_start;
+ tasm->Bind(&entry_hook_call_start);
+ tasm->Push(lr);
+ tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
+ DCHECK(tasm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
+ kProfileEntryHookCallSize);
+ tasm->Pop(lr);
+ }
+}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
@@ -2257,7 +2201,6 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
__ CallStub(&stub);
DCHECK(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
kProfileEntryHookCallSize);
-
__ Pop(lr);
}
}
@@ -2397,7 +2340,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ PushCPURegList(spill_list);
- __ Ldr(x0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Ldr(x0, FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
__ Mov(x1, Operand(name));
NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
__ CallStub(&stub);
@@ -2543,23 +2486,12 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
Register allocation_site = x2;
Register kind = x3;
- Label normal_sequence;
- if (mode == DONT_OVERRIDE) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
- STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
-
- // Is the low bit set? If so, the array is holey.
- __ Tbnz(kind, 0, &normal_sequence);
- }
-
- // Look at the last argument.
- // TODO(jbramley): What does a 0 argument represent?
- __ Peek(x10, 0);
- __ Cbz(x10, &normal_sequence);
+ STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(PACKED_ELEMENTS == 2);
+ STATIC_ASSERT(HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS == 4);
+ STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS == 5);
if (mode == DISABLE_ALLOCATION_SITES) {
ElementsKind initial = GetInitialFastElementsKind();
@@ -2569,13 +2501,11 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
holey_initial,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub_holey);
-
- __ Bind(&normal_sequence);
- ArraySingleArgumentConstructorStub stub(masm->isolate(),
- initial,
- DISABLE_ALLOCATION_SITES);
- __ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
+ // Is the low bit set? If so, the array is holey.
+ Label normal_sequence;
+ __ Tbnz(kind, 0, &normal_sequence);
+
// We are going to create a holey array, but our kind is non-holey.
// Fix kind and retry (only if we have an allocation site in the slot).
__ Orr(kind, kind, 1);
@@ -2591,11 +2521,13 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// in the AllocationSite::transition_info field because elements kind is
// restricted to a portion of the field; upper bits need to be left alone.
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ Ldr(x11, FieldMemOperand(allocation_site,
- AllocationSite::kTransitionInfoOffset));
+ __ Ldr(x11,
+ FieldMemOperand(allocation_site,
+ AllocationSite::kTransitionInfoOrBoilerplateOffset));
__ Add(x11, x11, Smi::FromInt(kFastElementsKindPackedToHoley));
- __ Str(x11, FieldMemOperand(allocation_site,
- AllocationSite::kTransitionInfoOffset));
+ __ Str(x11,
+ FieldMemOperand(allocation_site,
+ AllocationSite::kTransitionInfoOrBoilerplateOffset));
__ Bind(&normal_sequence);
int last_index =
@@ -2619,13 +2551,13 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
template<class T>
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
- int to_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
+ int to_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(isolate, kind);
stub.GetCode();
- if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ if (AllocationSite::ShouldTrack(kind)) {
T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
stub1.GetCode();
}
@@ -2639,7 +2571,7 @@ void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
isolate);
ArrayNArgumentsConstructorStub stub(isolate);
stub.GetCode();
- ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
+ ElementsKind kinds[2] = {PACKED_ELEMENTS, HOLEY_ELEMENTS};
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things
InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
@@ -2718,9 +2650,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Get the elements kind and case on that.
__ JumpIfRoot(allocation_site, Heap::kUndefinedValueRootIndex, &no_info);
- __ Ldrsw(kind,
- UntagSmiFieldMemOperand(allocation_site,
- AllocationSite::kTransitionInfoOffset));
+ __ Ldrsw(kind, UntagSmiFieldMemOperand(
+ allocation_site,
+ AllocationSite::kTransitionInfoOrBoilerplateOffset));
__ And(kind, kind, AllocationSite::ElementsKindBits::kMask);
GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
@@ -2809,17 +2741,17 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
if (FLAG_debug_code) {
Label done;
- __ Cmp(x3, FAST_ELEMENTS);
- __ Ccmp(x3, FAST_HOLEY_ELEMENTS, ZFlag, ne);
+ __ Cmp(x3, PACKED_ELEMENTS);
+ __ Ccmp(x3, HOLEY_ELEMENTS, ZFlag, ne);
__ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
}
Label fast_elements_case;
- __ CompareAndBranch(kind, FAST_ELEMENTS, eq, &fast_elements_case);
- GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+ __ CompareAndBranch(kind, PACKED_ELEMENTS, eq, &fast_elements_case);
+ GenerateCase(masm, HOLEY_ELEMENTS);
__ Bind(&fast_elements_case);
- GenerateCase(masm, FAST_ELEMENTS);
+ GenerateCase(masm, PACKED_ELEMENTS);
}
// The number of register that CallApiFunctionAndReturn will need to save on
diff --git a/deps/v8/src/arm64/constants-arm64.h b/deps/v8/src/arm64/constants-arm64.h
index ddaa30e984..dc2e55cf82 100644
--- a/deps/v8/src/arm64/constants-arm64.h
+++ b/deps/v8/src/arm64/constants-arm64.h
@@ -33,13 +33,13 @@ const unsigned kLoadLiteralScaleLog2 = 2;
const unsigned kMaxLoadLiteralRange = 1 * MB;
const int kNumberOfRegisters = 32;
-const int kNumberOfFPRegisters = 32;
+const int kNumberOfVRegisters = 32;
// Callee saved registers are x19-x30(lr).
const int kNumberOfCalleeSavedRegisters = 11;
const int kFirstCalleeSavedRegisterIndex = 19;
// Callee saved FP registers are d8-d15.
-const int kNumberOfCalleeSavedFPRegisters = 8;
-const int kFirstCalleeSavedFPRegisterIndex = 8;
+const int kNumberOfCalleeSavedVRegisters = 8;
+const int kFirstCalleeSavedVRegisterIndex = 8;
// Callee saved registers with no specific purpose in JS are x19-x25.
const unsigned kJSCalleeSavedRegList = 0x03f80000;
const int kWRegSizeInBits = 32;
@@ -58,6 +58,17 @@ const int kDRegSizeInBits = 64;
const int kDRegSizeInBitsLog2 = 6;
const int kDRegSize = kDRegSizeInBits >> 3;
const int kDRegSizeLog2 = kDRegSizeInBitsLog2 - 3;
+const int kDRegSizeInBytesLog2 = kDRegSizeInBitsLog2 - 3;
+const int kBRegSizeInBits = 8;
+const int kBRegSize = kBRegSizeInBits >> 3;
+const int kHRegSizeInBits = 16;
+const int kHRegSize = kHRegSizeInBits >> 3;
+const int kQRegSizeInBits = 128;
+const int kQRegSizeInBitsLog2 = 7;
+const int kQRegSize = kQRegSizeInBits >> 3;
+const int kQRegSizeLog2 = kQRegSizeInBitsLog2 - 3;
+const int kVRegSizeInBits = kQRegSizeInBits;
+const int kVRegSize = kVRegSizeInBits >> 3;
const int64_t kWRegMask = 0x00000000ffffffffL;
const int64_t kXRegMask = 0xffffffffffffffffL;
const int64_t kSRegMask = 0x00000000ffffffffL;
@@ -110,12 +121,27 @@ const unsigned kDoubleWordSize = 64;
const unsigned kDoubleWordSizeInBytes = kDoubleWordSize >> 3;
const unsigned kQuadWordSize = 128;
const unsigned kQuadWordSizeInBytes = kQuadWordSize >> 3;
+const int kMaxLanesPerVector = 16;
+
+const unsigned kAddressTagOffset = 56;
+const unsigned kAddressTagWidth = 8;
+const uint64_t kAddressTagMask = ((UINT64_C(1) << kAddressTagWidth) - 1)
+ << kAddressTagOffset;
+static_assert(kAddressTagMask == UINT64_C(0xff00000000000000),
+ "AddressTagMask must represent most-significant eight bits.");
+
// AArch64 floating-point specifics. These match IEEE-754.
const unsigned kDoubleMantissaBits = 52;
const unsigned kDoubleExponentBits = 11;
const unsigned kDoubleExponentBias = 1023;
const unsigned kFloatMantissaBits = 23;
const unsigned kFloatExponentBits = 8;
+const unsigned kFloatExponentBias = 127;
+const unsigned kFloat16MantissaBits = 10;
+const unsigned kFloat16ExponentBits = 5;
+const unsigned kFloat16ExponentBias = 15;
+
+typedef uint16_t float16;
#define INSTRUCTION_FIELDS_LIST(V_) \
/* Register fields */ \
@@ -126,7 +152,7 @@ const unsigned kFloatExponentBits = 8;
V_(Rt, 4, 0, Bits) /* Load dest / store source. */ \
V_(Rt2, 14, 10, Bits) /* Load second dest / */ \
/* store second source. */ \
- V_(Rs, 20, 16, Bits) /* Store-exclusive status */ \
+ V_(Rs, 20, 16, Bits) /* Store-exclusive status */ \
V_(PrefetchMode, 4, 0, Bits) \
\
/* Common bits */ \
@@ -181,8 +207,22 @@ const unsigned kFloatExponentBits = 8;
V_(ImmLS, 20, 12, SignedBits) \
V_(ImmLSUnsigned, 21, 10, Bits) \
V_(ImmLSPair, 21, 15, SignedBits) \
- V_(SizeLS, 31, 30, Bits) \
V_(ImmShiftLS, 12, 12, Bits) \
+ V_(LSOpc, 23, 22, Bits) \
+ V_(LSVector, 26, 26, Bits) \
+ V_(LSSize, 31, 30, Bits) \
+ \
+ /* NEON generic fields */ \
+ V_(NEONQ, 30, 30, Bits) \
+ V_(NEONSize, 23, 22, Bits) \
+ V_(NEONLSSize, 11, 10, Bits) \
+ V_(NEONS, 12, 12, Bits) \
+ V_(NEONL, 21, 21, Bits) \
+ V_(NEONM, 20, 20, Bits) \
+ V_(NEONH, 11, 11, Bits) \
+ V_(ImmNEONExt, 14, 11, Bits) \
+ V_(ImmNEON5, 20, 16, Bits) \
+ V_(ImmNEON4, 14, 11, Bits) \
\
/* Other immediates */ \
V_(ImmUncondBranch, 25, 0, SignedBits) \
@@ -206,7 +246,21 @@ const unsigned kFloatExponentBits = 8;
V_(LoadStoreXNotExclusive, 23, 23, Bits) \
V_(LoadStoreXAcquireRelease, 15, 15, Bits) \
V_(LoadStoreXSizeLog2, 31, 30, Bits) \
- V_(LoadStoreXPair, 21, 21, Bits)
+ V_(LoadStoreXPair, 21, 21, Bits) \
+ \
+ /* NEON load/store */ \
+ V_(NEONLoad, 22, 22, Bits) \
+ \
+ /* NEON Modified Immediate fields */ \
+ V_(ImmNEONabc, 18, 16, Bits) \
+ V_(ImmNEONdefgh, 9, 5, Bits) \
+ V_(NEONModImmOp, 29, 29, Bits) \
+ V_(NEONCmode, 15, 12, Bits) \
+ \
+ /* NEON Shift Immediate fields */ \
+ V_(ImmNEONImmhImmb, 22, 16, Bits) \
+ V_(ImmNEONImmh, 22, 19, Bits) \
+ V_(ImmNEONImmb, 18, 16, Bits)
#define SYSTEM_REGISTER_FIELDS_LIST(V_, M_) \
/* NZCV */ \
@@ -297,7 +351,6 @@ inline Condition CommuteCondition(Condition cond) {
// invalid as it doesn't necessary make sense to reverse it (consider
// 'mi' for instance).
UNREACHABLE();
- return nv;
}
}
@@ -338,7 +391,8 @@ enum Shift {
LSL = 0x0,
LSR = 0x1,
ASR = 0x2,
- ROR = 0x3
+ ROR = 0x3,
+ MSL = 0x4
};
enum Extend {
@@ -411,6 +465,10 @@ enum SystemRegister {
// default: printf("Unknown instruction\n");
// }
+// Used to corrupt encodings by setting all bits when orred. Although currently
+// unallocated in AArch64, this encoding is not guaranteed to be undefined
+// indefinitely.
+const uint32_t kUnallocatedInstruction = 0xffffffff;
// Generic fields.
enum GenericInstrField {
@@ -420,6 +478,47 @@ enum GenericInstrField {
FP64 = 0x00400000
};
+enum NEONFormatField {
+ NEONFormatFieldMask = 0x40C00000,
+ NEON_Q = 0x40000000,
+ NEON_8B = 0x00000000,
+ NEON_16B = NEON_8B | NEON_Q,
+ NEON_4H = 0x00400000,
+ NEON_8H = NEON_4H | NEON_Q,
+ NEON_2S = 0x00800000,
+ NEON_4S = NEON_2S | NEON_Q,
+ NEON_1D = 0x00C00000,
+ NEON_2D = 0x00C00000 | NEON_Q
+};
+
+enum NEONFPFormatField {
+ NEONFPFormatFieldMask = 0x40400000,
+ NEON_FP_2S = FP32,
+ NEON_FP_4S = FP32 | NEON_Q,
+ NEON_FP_2D = FP64 | NEON_Q
+};
+
+enum NEONLSFormatField {
+ NEONLSFormatFieldMask = 0x40000C00,
+ LS_NEON_8B = 0x00000000,
+ LS_NEON_16B = LS_NEON_8B | NEON_Q,
+ LS_NEON_4H = 0x00000400,
+ LS_NEON_8H = LS_NEON_4H | NEON_Q,
+ LS_NEON_2S = 0x00000800,
+ LS_NEON_4S = LS_NEON_2S | NEON_Q,
+ LS_NEON_1D = 0x00000C00,
+ LS_NEON_2D = LS_NEON_1D | NEON_Q
+};
+
+enum NEONScalarFormatField {
+ NEONScalarFormatFieldMask = 0x00C00000,
+ NEONScalar = 0x10000000,
+ NEON_B = 0x00000000,
+ NEON_H = 0x00400000,
+ NEON_S = 0x00800000,
+ NEON_D = 0x00C00000
+};
+
// PC relative addressing.
enum PCRelAddressingOp {
PCRelAddressingFixed = 0x10000000,
@@ -713,16 +812,12 @@ enum LoadStorePairAnyOp {
LoadStorePairAnyFixed = 0x28000000
};
-#define LOAD_STORE_PAIR_OP_LIST(V) \
- V(STP, w, 0x00000000), \
- V(LDP, w, 0x00400000), \
- V(LDPSW, x, 0x40400000), \
- V(STP, x, 0x80000000), \
- V(LDP, x, 0x80400000), \
- V(STP, s, 0x04000000), \
- V(LDP, s, 0x04400000), \
- V(STP, d, 0x44000000), \
- V(LDP, d, 0x44400000)
+#define LOAD_STORE_PAIR_OP_LIST(V) \
+ V(STP, w, 0x00000000) \
+ , V(LDP, w, 0x00400000), V(LDPSW, x, 0x40400000), V(STP, x, 0x80000000), \
+ V(LDP, x, 0x80400000), V(STP, s, 0x04000000), V(LDP, s, 0x04400000), \
+ V(STP, d, 0x44000000), V(LDP, d, 0x44400000), V(STP, q, 0x84000000), \
+ V(LDP, q, 0x84400000)
// Load/store pair (post, pre and offset.)
enum LoadStorePairOp {
@@ -777,25 +872,34 @@ enum LoadLiteralOp {
LDR_d_lit = LoadLiteralFixed | 0x44000000
};
-#define LOAD_STORE_OP_LIST(V) \
- V(ST, RB, w, 0x00000000), \
- V(ST, RH, w, 0x40000000), \
- V(ST, R, w, 0x80000000), \
- V(ST, R, x, 0xC0000000), \
- V(LD, RB, w, 0x00400000), \
- V(LD, RH, w, 0x40400000), \
- V(LD, R, w, 0x80400000), \
- V(LD, R, x, 0xC0400000), \
- V(LD, RSB, x, 0x00800000), \
- V(LD, RSH, x, 0x40800000), \
- V(LD, RSW, x, 0x80800000), \
- V(LD, RSB, w, 0x00C00000), \
- V(LD, RSH, w, 0x40C00000), \
- V(ST, R, s, 0x84000000), \
- V(ST, R, d, 0xC4000000), \
- V(LD, R, s, 0x84400000), \
- V(LD, R, d, 0xC4400000)
-
+// clang-format off
+
+#define LOAD_STORE_OP_LIST(V) \
+ V(ST, RB, w, 0x00000000), \
+ V(ST, RH, w, 0x40000000), \
+ V(ST, R, w, 0x80000000), \
+ V(ST, R, x, 0xC0000000), \
+ V(LD, RB, w, 0x00400000), \
+ V(LD, RH, w, 0x40400000), \
+ V(LD, R, w, 0x80400000), \
+ V(LD, R, x, 0xC0400000), \
+ V(LD, RSB, x, 0x00800000), \
+ V(LD, RSH, x, 0x40800000), \
+ V(LD, RSW, x, 0x80800000), \
+ V(LD, RSB, w, 0x00C00000), \
+ V(LD, RSH, w, 0x40C00000), \
+ V(ST, R, b, 0x04000000), \
+ V(ST, R, h, 0x44000000), \
+ V(ST, R, s, 0x84000000), \
+ V(ST, R, d, 0xC4000000), \
+ V(ST, R, q, 0x04800000), \
+ V(LD, R, b, 0x04400000), \
+ V(LD, R, h, 0x44400000), \
+ V(LD, R, s, 0x84400000), \
+ V(LD, R, d, 0xC4400000), \
+ V(LD, R, q, 0x04C00000)
+
+// clang-format on
// Load/store unscaled offset.
enum LoadStoreUnscaledOffsetOp {
@@ -810,11 +914,10 @@ enum LoadStoreUnscaledOffsetOp {
// Load/store (post, pre, offset and unsigned.)
enum LoadStoreOp {
- LoadStoreOpMask = 0xC4C00000,
- #define LOAD_STORE(A, B, C, D) \
- A##B##_##C = D
+ LoadStoreMask = 0xC4C00000,
+#define LOAD_STORE(A, B, C, D) A##B##_##C = D
LOAD_STORE_OP_LIST(LOAD_STORE),
- #undef LOAD_STORE
+#undef LOAD_STORE
PRFM = 0xC0800000
};
@@ -1063,42 +1166,46 @@ enum FPImmediateOp {
enum FPDataProcessing1SourceOp {
FPDataProcessing1SourceFixed = 0x1E204000,
FPDataProcessing1SourceFMask = 0x5F207C00,
- FPDataProcessing1SourceMask = 0xFFFFFC00,
- FMOV_s = FPDataProcessing1SourceFixed | 0x00000000,
- FMOV_d = FPDataProcessing1SourceFixed | FP64 | 0x00000000,
- FMOV = FMOV_s,
- FABS_s = FPDataProcessing1SourceFixed | 0x00008000,
- FABS_d = FPDataProcessing1SourceFixed | FP64 | 0x00008000,
- FABS = FABS_s,
- FNEG_s = FPDataProcessing1SourceFixed | 0x00010000,
- FNEG_d = FPDataProcessing1SourceFixed | FP64 | 0x00010000,
- FNEG = FNEG_s,
- FSQRT_s = FPDataProcessing1SourceFixed | 0x00018000,
- FSQRT_d = FPDataProcessing1SourceFixed | FP64 | 0x00018000,
- FSQRT = FSQRT_s,
- FCVT_ds = FPDataProcessing1SourceFixed | 0x00028000,
- FCVT_sd = FPDataProcessing1SourceFixed | FP64 | 0x00020000,
+ FPDataProcessing1SourceMask = 0xFFFFFC00,
+ FMOV_s = FPDataProcessing1SourceFixed | 0x00000000,
+ FMOV_d = FPDataProcessing1SourceFixed | FP64 | 0x00000000,
+ FMOV = FMOV_s,
+ FABS_s = FPDataProcessing1SourceFixed | 0x00008000,
+ FABS_d = FPDataProcessing1SourceFixed | FP64 | 0x00008000,
+ FABS = FABS_s,
+ FNEG_s = FPDataProcessing1SourceFixed | 0x00010000,
+ FNEG_d = FPDataProcessing1SourceFixed | FP64 | 0x00010000,
+ FNEG = FNEG_s,
+ FSQRT_s = FPDataProcessing1SourceFixed | 0x00018000,
+ FSQRT_d = FPDataProcessing1SourceFixed | FP64 | 0x00018000,
+ FSQRT = FSQRT_s,
+ FCVT_ds = FPDataProcessing1SourceFixed | 0x00028000,
+ FCVT_sd = FPDataProcessing1SourceFixed | FP64 | 0x00020000,
+ FCVT_hs = FPDataProcessing1SourceFixed | 0x00038000,
+ FCVT_hd = FPDataProcessing1SourceFixed | FP64 | 0x00038000,
+ FCVT_sh = FPDataProcessing1SourceFixed | 0x00C20000,
+ FCVT_dh = FPDataProcessing1SourceFixed | 0x00C28000,
FRINTN_s = FPDataProcessing1SourceFixed | 0x00040000,
FRINTN_d = FPDataProcessing1SourceFixed | FP64 | 0x00040000,
- FRINTN = FRINTN_s,
+ FRINTN = FRINTN_s,
FRINTP_s = FPDataProcessing1SourceFixed | 0x00048000,
FRINTP_d = FPDataProcessing1SourceFixed | FP64 | 0x00048000,
- FRINTP = FRINTP_s,
+ FRINTP = FRINTP_s,
FRINTM_s = FPDataProcessing1SourceFixed | 0x00050000,
FRINTM_d = FPDataProcessing1SourceFixed | FP64 | 0x00050000,
- FRINTM = FRINTM_s,
+ FRINTM = FRINTM_s,
FRINTZ_s = FPDataProcessing1SourceFixed | 0x00058000,
FRINTZ_d = FPDataProcessing1SourceFixed | FP64 | 0x00058000,
- FRINTZ = FRINTZ_s,
+ FRINTZ = FRINTZ_s,
FRINTA_s = FPDataProcessing1SourceFixed | 0x00060000,
FRINTA_d = FPDataProcessing1SourceFixed | FP64 | 0x00060000,
- FRINTA = FRINTA_s,
+ FRINTA = FRINTA_s,
FRINTX_s = FPDataProcessing1SourceFixed | 0x00070000,
FRINTX_d = FPDataProcessing1SourceFixed | FP64 | 0x00070000,
- FRINTX = FRINTX_s,
+ FRINTX = FRINTX_s,
FRINTI_s = FPDataProcessing1SourceFixed | 0x00078000,
FRINTI_d = FPDataProcessing1SourceFixed | FP64 | 0x00078000,
- FRINTI = FRINTI_s
+ FRINTI = FRINTI_s
};
// Floating point data processing 2 source.
@@ -1154,71 +1261,73 @@ enum FPDataProcessing3SourceOp {
enum FPIntegerConvertOp {
FPIntegerConvertFixed = 0x1E200000,
FPIntegerConvertFMask = 0x5F20FC00,
- FPIntegerConvertMask = 0xFFFFFC00,
- FCVTNS = FPIntegerConvertFixed | 0x00000000,
+ FPIntegerConvertMask = 0xFFFFFC00,
+ FCVTNS = FPIntegerConvertFixed | 0x00000000,
FCVTNS_ws = FCVTNS,
FCVTNS_xs = FCVTNS | SixtyFourBits,
FCVTNS_wd = FCVTNS | FP64,
FCVTNS_xd = FCVTNS | SixtyFourBits | FP64,
- FCVTNU = FPIntegerConvertFixed | 0x00010000,
+ FCVTNU = FPIntegerConvertFixed | 0x00010000,
FCVTNU_ws = FCVTNU,
FCVTNU_xs = FCVTNU | SixtyFourBits,
FCVTNU_wd = FCVTNU | FP64,
FCVTNU_xd = FCVTNU | SixtyFourBits | FP64,
- FCVTPS = FPIntegerConvertFixed | 0x00080000,
+ FCVTPS = FPIntegerConvertFixed | 0x00080000,
FCVTPS_ws = FCVTPS,
FCVTPS_xs = FCVTPS | SixtyFourBits,
FCVTPS_wd = FCVTPS | FP64,
FCVTPS_xd = FCVTPS | SixtyFourBits | FP64,
- FCVTPU = FPIntegerConvertFixed | 0x00090000,
+ FCVTPU = FPIntegerConvertFixed | 0x00090000,
FCVTPU_ws = FCVTPU,
FCVTPU_xs = FCVTPU | SixtyFourBits,
FCVTPU_wd = FCVTPU | FP64,
FCVTPU_xd = FCVTPU | SixtyFourBits | FP64,
- FCVTMS = FPIntegerConvertFixed | 0x00100000,
+ FCVTMS = FPIntegerConvertFixed | 0x00100000,
FCVTMS_ws = FCVTMS,
FCVTMS_xs = FCVTMS | SixtyFourBits,
FCVTMS_wd = FCVTMS | FP64,
FCVTMS_xd = FCVTMS | SixtyFourBits | FP64,
- FCVTMU = FPIntegerConvertFixed | 0x00110000,
+ FCVTMU = FPIntegerConvertFixed | 0x00110000,
FCVTMU_ws = FCVTMU,
FCVTMU_xs = FCVTMU | SixtyFourBits,
FCVTMU_wd = FCVTMU | FP64,
FCVTMU_xd = FCVTMU | SixtyFourBits | FP64,
- FCVTZS = FPIntegerConvertFixed | 0x00180000,
+ FCVTZS = FPIntegerConvertFixed | 0x00180000,
FCVTZS_ws = FCVTZS,
FCVTZS_xs = FCVTZS | SixtyFourBits,
FCVTZS_wd = FCVTZS | FP64,
FCVTZS_xd = FCVTZS | SixtyFourBits | FP64,
- FCVTZU = FPIntegerConvertFixed | 0x00190000,
+ FCVTZU = FPIntegerConvertFixed | 0x00190000,
FCVTZU_ws = FCVTZU,
FCVTZU_xs = FCVTZU | SixtyFourBits,
FCVTZU_wd = FCVTZU | FP64,
FCVTZU_xd = FCVTZU | SixtyFourBits | FP64,
- SCVTF = FPIntegerConvertFixed | 0x00020000,
- SCVTF_sw = SCVTF,
- SCVTF_sx = SCVTF | SixtyFourBits,
- SCVTF_dw = SCVTF | FP64,
- SCVTF_dx = SCVTF | SixtyFourBits | FP64,
- UCVTF = FPIntegerConvertFixed | 0x00030000,
- UCVTF_sw = UCVTF,
- UCVTF_sx = UCVTF | SixtyFourBits,
- UCVTF_dw = UCVTF | FP64,
- UCVTF_dx = UCVTF | SixtyFourBits | FP64,
- FCVTAS = FPIntegerConvertFixed | 0x00040000,
+ SCVTF = FPIntegerConvertFixed | 0x00020000,
+ SCVTF_sw = SCVTF,
+ SCVTF_sx = SCVTF | SixtyFourBits,
+ SCVTF_dw = SCVTF | FP64,
+ SCVTF_dx = SCVTF | SixtyFourBits | FP64,
+ UCVTF = FPIntegerConvertFixed | 0x00030000,
+ UCVTF_sw = UCVTF,
+ UCVTF_sx = UCVTF | SixtyFourBits,
+ UCVTF_dw = UCVTF | FP64,
+ UCVTF_dx = UCVTF | SixtyFourBits | FP64,
+ FCVTAS = FPIntegerConvertFixed | 0x00040000,
FCVTAS_ws = FCVTAS,
FCVTAS_xs = FCVTAS | SixtyFourBits,
FCVTAS_wd = FCVTAS | FP64,
FCVTAS_xd = FCVTAS | SixtyFourBits | FP64,
- FCVTAU = FPIntegerConvertFixed | 0x00050000,
+ FCVTAU = FPIntegerConvertFixed | 0x00050000,
FCVTAU_ws = FCVTAU,
FCVTAU_xs = FCVTAU | SixtyFourBits,
FCVTAU_wd = FCVTAU | FP64,
FCVTAU_xd = FCVTAU | SixtyFourBits | FP64,
- FMOV_ws = FPIntegerConvertFixed | 0x00060000,
- FMOV_sw = FPIntegerConvertFixed | 0x00070000,
- FMOV_xd = FMOV_ws | SixtyFourBits | FP64,
- FMOV_dx = FMOV_sw | SixtyFourBits | FP64
+ FMOV_ws = FPIntegerConvertFixed | 0x00060000,
+ FMOV_sw = FPIntegerConvertFixed | 0x00070000,
+ FMOV_xd = FMOV_ws | SixtyFourBits | FP64,
+ FMOV_dx = FMOV_sw | SixtyFourBits | FP64,
+ FMOV_d1_x = FPIntegerConvertFixed | SixtyFourBits | 0x008F0000,
+ FMOV_x_d1 = FPIntegerConvertFixed | SixtyFourBits | 0x008E0000
};
// Conversion between fixed point and floating point.
@@ -1248,6 +1357,757 @@ enum FPFixedPointConvertOp {
UCVTF_dx_fixed = UCVTF_fixed | SixtyFourBits | FP64
};
+// NEON instructions with two register operands.
+enum NEON2RegMiscOp {
+ NEON2RegMiscFixed = 0x0E200800,
+ NEON2RegMiscFMask = 0x9F3E0C00,
+ NEON2RegMiscMask = 0xBF3FFC00,
+ NEON2RegMiscUBit = 0x20000000,
+ NEON_REV64 = NEON2RegMiscFixed | 0x00000000,
+ NEON_REV32 = NEON2RegMiscFixed | 0x20000000,
+ NEON_REV16 = NEON2RegMiscFixed | 0x00001000,
+ NEON_SADDLP = NEON2RegMiscFixed | 0x00002000,
+ NEON_UADDLP = NEON_SADDLP | NEON2RegMiscUBit,
+ NEON_SUQADD = NEON2RegMiscFixed | 0x00003000,
+ NEON_USQADD = NEON_SUQADD | NEON2RegMiscUBit,
+ NEON_CLS = NEON2RegMiscFixed | 0x00004000,
+ NEON_CLZ = NEON2RegMiscFixed | 0x20004000,
+ NEON_CNT = NEON2RegMiscFixed | 0x00005000,
+ NEON_RBIT_NOT = NEON2RegMiscFixed | 0x20005000,
+ NEON_SADALP = NEON2RegMiscFixed | 0x00006000,
+ NEON_UADALP = NEON_SADALP | NEON2RegMiscUBit,
+ NEON_SQABS = NEON2RegMiscFixed | 0x00007000,
+ NEON_SQNEG = NEON2RegMiscFixed | 0x20007000,
+ NEON_CMGT_zero = NEON2RegMiscFixed | 0x00008000,
+ NEON_CMGE_zero = NEON2RegMiscFixed | 0x20008000,
+ NEON_CMEQ_zero = NEON2RegMiscFixed | 0x00009000,
+ NEON_CMLE_zero = NEON2RegMiscFixed | 0x20009000,
+ NEON_CMLT_zero = NEON2RegMiscFixed | 0x0000A000,
+ NEON_ABS = NEON2RegMiscFixed | 0x0000B000,
+ NEON_NEG = NEON2RegMiscFixed | 0x2000B000,
+ NEON_XTN = NEON2RegMiscFixed | 0x00012000,
+ NEON_SQXTUN = NEON2RegMiscFixed | 0x20012000,
+ NEON_SHLL = NEON2RegMiscFixed | 0x20013000,
+ NEON_SQXTN = NEON2RegMiscFixed | 0x00014000,
+ NEON_UQXTN = NEON_SQXTN | NEON2RegMiscUBit,
+
+ NEON2RegMiscOpcode = 0x0001F000,
+ NEON_RBIT_NOT_opcode = NEON_RBIT_NOT & NEON2RegMiscOpcode,
+ NEON_NEG_opcode = NEON_NEG & NEON2RegMiscOpcode,
+ NEON_XTN_opcode = NEON_XTN & NEON2RegMiscOpcode,
+ NEON_UQXTN_opcode = NEON_UQXTN & NEON2RegMiscOpcode,
+
+ // These instructions use only one bit of the size field. The other bit is
+ // used to distinguish between instructions.
+ NEON2RegMiscFPMask = NEON2RegMiscMask | 0x00800000,
+ NEON_FABS = NEON2RegMiscFixed | 0x0080F000,
+ NEON_FNEG = NEON2RegMiscFixed | 0x2080F000,
+ NEON_FCVTN = NEON2RegMiscFixed | 0x00016000,
+ NEON_FCVTXN = NEON2RegMiscFixed | 0x20016000,
+ NEON_FCVTL = NEON2RegMiscFixed | 0x00017000,
+ NEON_FRINTN = NEON2RegMiscFixed | 0x00018000,
+ NEON_FRINTA = NEON2RegMiscFixed | 0x20018000,
+ NEON_FRINTP = NEON2RegMiscFixed | 0x00818000,
+ NEON_FRINTM = NEON2RegMiscFixed | 0x00019000,
+ NEON_FRINTX = NEON2RegMiscFixed | 0x20019000,
+ NEON_FRINTZ = NEON2RegMiscFixed | 0x00819000,
+ NEON_FRINTI = NEON2RegMiscFixed | 0x20819000,
+ NEON_FCVTNS = NEON2RegMiscFixed | 0x0001A000,
+ NEON_FCVTNU = NEON_FCVTNS | NEON2RegMiscUBit,
+ NEON_FCVTPS = NEON2RegMiscFixed | 0x0081A000,
+ NEON_FCVTPU = NEON_FCVTPS | NEON2RegMiscUBit,
+ NEON_FCVTMS = NEON2RegMiscFixed | 0x0001B000,
+ NEON_FCVTMU = NEON_FCVTMS | NEON2RegMiscUBit,
+ NEON_FCVTZS = NEON2RegMiscFixed | 0x0081B000,
+ NEON_FCVTZU = NEON_FCVTZS | NEON2RegMiscUBit,
+ NEON_FCVTAS = NEON2RegMiscFixed | 0x0001C000,
+ NEON_FCVTAU = NEON_FCVTAS | NEON2RegMiscUBit,
+ NEON_FSQRT = NEON2RegMiscFixed | 0x2081F000,
+ NEON_SCVTF = NEON2RegMiscFixed | 0x0001D000,
+ NEON_UCVTF = NEON_SCVTF | NEON2RegMiscUBit,
+ NEON_URSQRTE = NEON2RegMiscFixed | 0x2081C000,
+ NEON_URECPE = NEON2RegMiscFixed | 0x0081C000,
+ NEON_FRSQRTE = NEON2RegMiscFixed | 0x2081D000,
+ NEON_FRECPE = NEON2RegMiscFixed | 0x0081D000,
+ NEON_FCMGT_zero = NEON2RegMiscFixed | 0x0080C000,
+ NEON_FCMGE_zero = NEON2RegMiscFixed | 0x2080C000,
+ NEON_FCMEQ_zero = NEON2RegMiscFixed | 0x0080D000,
+ NEON_FCMLE_zero = NEON2RegMiscFixed | 0x2080D000,
+ NEON_FCMLT_zero = NEON2RegMiscFixed | 0x0080E000,
+
+ NEON_FCVTL_opcode = NEON_FCVTL & NEON2RegMiscOpcode,
+ NEON_FCVTN_opcode = NEON_FCVTN & NEON2RegMiscOpcode
+};
+
+// NEON instructions with three same-type operands.
+enum NEON3SameOp {
+ NEON3SameFixed = 0x0E200400,
+ NEON3SameFMask = 0x9F200400,
+ NEON3SameMask = 0xBF20FC00,
+ NEON3SameUBit = 0x20000000,
+ NEON_ADD = NEON3SameFixed | 0x00008000,
+ NEON_ADDP = NEON3SameFixed | 0x0000B800,
+ NEON_SHADD = NEON3SameFixed | 0x00000000,
+ NEON_SHSUB = NEON3SameFixed | 0x00002000,
+ NEON_SRHADD = NEON3SameFixed | 0x00001000,
+ NEON_CMEQ = NEON3SameFixed | NEON3SameUBit | 0x00008800,
+ NEON_CMGE = NEON3SameFixed | 0x00003800,
+ NEON_CMGT = NEON3SameFixed | 0x00003000,
+ NEON_CMHI = NEON3SameFixed | NEON3SameUBit | NEON_CMGT,
+ NEON_CMHS = NEON3SameFixed | NEON3SameUBit | NEON_CMGE,
+ NEON_CMTST = NEON3SameFixed | 0x00008800,
+ NEON_MLA = NEON3SameFixed | 0x00009000,
+ NEON_MLS = NEON3SameFixed | 0x20009000,
+ NEON_MUL = NEON3SameFixed | 0x00009800,
+ NEON_PMUL = NEON3SameFixed | 0x20009800,
+ NEON_SRSHL = NEON3SameFixed | 0x00005000,
+ NEON_SQSHL = NEON3SameFixed | 0x00004800,
+ NEON_SQRSHL = NEON3SameFixed | 0x00005800,
+ NEON_SSHL = NEON3SameFixed | 0x00004000,
+ NEON_SMAX = NEON3SameFixed | 0x00006000,
+ NEON_SMAXP = NEON3SameFixed | 0x0000A000,
+ NEON_SMIN = NEON3SameFixed | 0x00006800,
+ NEON_SMINP = NEON3SameFixed | 0x0000A800,
+ NEON_SABD = NEON3SameFixed | 0x00007000,
+ NEON_SABA = NEON3SameFixed | 0x00007800,
+ NEON_UABD = NEON3SameFixed | NEON3SameUBit | NEON_SABD,
+ NEON_UABA = NEON3SameFixed | NEON3SameUBit | NEON_SABA,
+ NEON_SQADD = NEON3SameFixed | 0x00000800,
+ NEON_SQSUB = NEON3SameFixed | 0x00002800,
+ NEON_SUB = NEON3SameFixed | NEON3SameUBit | 0x00008000,
+ NEON_UHADD = NEON3SameFixed | NEON3SameUBit | NEON_SHADD,
+ NEON_UHSUB = NEON3SameFixed | NEON3SameUBit | NEON_SHSUB,
+ NEON_URHADD = NEON3SameFixed | NEON3SameUBit | NEON_SRHADD,
+ NEON_UMAX = NEON3SameFixed | NEON3SameUBit | NEON_SMAX,
+ NEON_UMAXP = NEON3SameFixed | NEON3SameUBit | NEON_SMAXP,
+ NEON_UMIN = NEON3SameFixed | NEON3SameUBit | NEON_SMIN,
+ NEON_UMINP = NEON3SameFixed | NEON3SameUBit | NEON_SMINP,
+ NEON_URSHL = NEON3SameFixed | NEON3SameUBit | NEON_SRSHL,
+ NEON_UQADD = NEON3SameFixed | NEON3SameUBit | NEON_SQADD,
+ NEON_UQRSHL = NEON3SameFixed | NEON3SameUBit | NEON_SQRSHL,
+ NEON_UQSHL = NEON3SameFixed | NEON3SameUBit | NEON_SQSHL,
+ NEON_UQSUB = NEON3SameFixed | NEON3SameUBit | NEON_SQSUB,
+ NEON_USHL = NEON3SameFixed | NEON3SameUBit | NEON_SSHL,
+ NEON_SQDMULH = NEON3SameFixed | 0x0000B000,
+ NEON_SQRDMULH = NEON3SameFixed | 0x2000B000,
+
+ // NEON floating point instructions with three same-type operands.
+ NEON3SameFPFixed = NEON3SameFixed | 0x0000C000,
+ NEON3SameFPFMask = NEON3SameFMask | 0x0000C000,
+ NEON3SameFPMask = NEON3SameMask | 0x00800000,
+ NEON_FADD = NEON3SameFixed | 0x0000D000,
+ NEON_FSUB = NEON3SameFixed | 0x0080D000,
+ NEON_FMUL = NEON3SameFixed | 0x2000D800,
+ NEON_FDIV = NEON3SameFixed | 0x2000F800,
+ NEON_FMAX = NEON3SameFixed | 0x0000F000,
+ NEON_FMAXNM = NEON3SameFixed | 0x0000C000,
+ NEON_FMAXP = NEON3SameFixed | 0x2000F000,
+ NEON_FMAXNMP = NEON3SameFixed | 0x2000C000,
+ NEON_FMIN = NEON3SameFixed | 0x0080F000,
+ NEON_FMINNM = NEON3SameFixed | 0x0080C000,
+ NEON_FMINP = NEON3SameFixed | 0x2080F000,
+ NEON_FMINNMP = NEON3SameFixed | 0x2080C000,
+ NEON_FMLA = NEON3SameFixed | 0x0000C800,
+ NEON_FMLS = NEON3SameFixed | 0x0080C800,
+ NEON_FMULX = NEON3SameFixed | 0x0000D800,
+ NEON_FRECPS = NEON3SameFixed | 0x0000F800,
+ NEON_FRSQRTS = NEON3SameFixed | 0x0080F800,
+ NEON_FABD = NEON3SameFixed | 0x2080D000,
+ NEON_FADDP = NEON3SameFixed | 0x2000D000,
+ NEON_FCMEQ = NEON3SameFixed | 0x0000E000,
+ NEON_FCMGE = NEON3SameFixed | 0x2000E000,
+ NEON_FCMGT = NEON3SameFixed | 0x2080E000,
+ NEON_FACGE = NEON3SameFixed | 0x2000E800,
+ NEON_FACGT = NEON3SameFixed | 0x2080E800,
+
+ // NEON logical instructions with three same-type operands.
+ NEON3SameLogicalFixed = NEON3SameFixed | 0x00001800,
+ NEON3SameLogicalFMask = NEON3SameFMask | 0x0000F800,
+ NEON3SameLogicalMask = 0xBFE0FC00,
+ NEON3SameLogicalFormatMask = NEON_Q,
+ NEON_AND = NEON3SameLogicalFixed | 0x00000000,
+ NEON_ORR = NEON3SameLogicalFixed | 0x00A00000,
+ NEON_ORN = NEON3SameLogicalFixed | 0x00C00000,
+ NEON_EOR = NEON3SameLogicalFixed | 0x20000000,
+ NEON_BIC = NEON3SameLogicalFixed | 0x00400000,
+ NEON_BIF = NEON3SameLogicalFixed | 0x20C00000,
+ NEON_BIT = NEON3SameLogicalFixed | 0x20800000,
+ NEON_BSL = NEON3SameLogicalFixed | 0x20400000
+};
+
+// NEON instructions with three different-type operands.
+enum NEON3DifferentOp {
+ NEON3DifferentFixed = 0x0E200000,
+ NEON3DifferentFMask = 0x9F200C00,
+ NEON3DifferentMask = 0xFF20FC00,
+ NEON_ADDHN = NEON3DifferentFixed | 0x00004000,
+ NEON_ADDHN2 = NEON_ADDHN | NEON_Q,
+ NEON_PMULL = NEON3DifferentFixed | 0x0000E000,
+ NEON_PMULL2 = NEON_PMULL | NEON_Q,
+ NEON_RADDHN = NEON3DifferentFixed | 0x20004000,
+ NEON_RADDHN2 = NEON_RADDHN | NEON_Q,
+ NEON_RSUBHN = NEON3DifferentFixed | 0x20006000,
+ NEON_RSUBHN2 = NEON_RSUBHN | NEON_Q,
+ NEON_SABAL = NEON3DifferentFixed | 0x00005000,
+ NEON_SABAL2 = NEON_SABAL | NEON_Q,
+ NEON_SABDL = NEON3DifferentFixed | 0x00007000,
+ NEON_SABDL2 = NEON_SABDL | NEON_Q,
+ NEON_SADDL = NEON3DifferentFixed | 0x00000000,
+ NEON_SADDL2 = NEON_SADDL | NEON_Q,
+ NEON_SADDW = NEON3DifferentFixed | 0x00001000,
+ NEON_SADDW2 = NEON_SADDW | NEON_Q,
+ NEON_SMLAL = NEON3DifferentFixed | 0x00008000,
+ NEON_SMLAL2 = NEON_SMLAL | NEON_Q,
+ NEON_SMLSL = NEON3DifferentFixed | 0x0000A000,
+ NEON_SMLSL2 = NEON_SMLSL | NEON_Q,
+ NEON_SMULL = NEON3DifferentFixed | 0x0000C000,
+ NEON_SMULL2 = NEON_SMULL | NEON_Q,
+ NEON_SSUBL = NEON3DifferentFixed | 0x00002000,
+ NEON_SSUBL2 = NEON_SSUBL | NEON_Q,
+ NEON_SSUBW = NEON3DifferentFixed | 0x00003000,
+ NEON_SSUBW2 = NEON_SSUBW | NEON_Q,
+ NEON_SQDMLAL = NEON3DifferentFixed | 0x00009000,
+ NEON_SQDMLAL2 = NEON_SQDMLAL | NEON_Q,
+ NEON_SQDMLSL = NEON3DifferentFixed | 0x0000B000,
+ NEON_SQDMLSL2 = NEON_SQDMLSL | NEON_Q,
+ NEON_SQDMULL = NEON3DifferentFixed | 0x0000D000,
+ NEON_SQDMULL2 = NEON_SQDMULL | NEON_Q,
+ NEON_SUBHN = NEON3DifferentFixed | 0x00006000,
+ NEON_SUBHN2 = NEON_SUBHN | NEON_Q,
+ NEON_UABAL = NEON_SABAL | NEON3SameUBit,
+ NEON_UABAL2 = NEON_UABAL | NEON_Q,
+ NEON_UABDL = NEON_SABDL | NEON3SameUBit,
+ NEON_UABDL2 = NEON_UABDL | NEON_Q,
+ NEON_UADDL = NEON_SADDL | NEON3SameUBit,
+ NEON_UADDL2 = NEON_UADDL | NEON_Q,
+ NEON_UADDW = NEON_SADDW | NEON3SameUBit,
+ NEON_UADDW2 = NEON_UADDW | NEON_Q,
+ NEON_UMLAL = NEON_SMLAL | NEON3SameUBit,
+ NEON_UMLAL2 = NEON_UMLAL | NEON_Q,
+ NEON_UMLSL = NEON_SMLSL | NEON3SameUBit,
+ NEON_UMLSL2 = NEON_UMLSL | NEON_Q,
+ NEON_UMULL = NEON_SMULL | NEON3SameUBit,
+ NEON_UMULL2 = NEON_UMULL | NEON_Q,
+ NEON_USUBL = NEON_SSUBL | NEON3SameUBit,
+ NEON_USUBL2 = NEON_USUBL | NEON_Q,
+ NEON_USUBW = NEON_SSUBW | NEON3SameUBit,
+ NEON_USUBW2 = NEON_USUBW | NEON_Q
+};
+
+// NEON instructions operating across vectors.
+enum NEONAcrossLanesOp {
+ NEONAcrossLanesFixed = 0x0E300800,
+ NEONAcrossLanesFMask = 0x9F3E0C00,
+ NEONAcrossLanesMask = 0xBF3FFC00,
+ NEON_ADDV = NEONAcrossLanesFixed | 0x0001B000,
+ NEON_SADDLV = NEONAcrossLanesFixed | 0x00003000,
+ NEON_UADDLV = NEONAcrossLanesFixed | 0x20003000,
+ NEON_SMAXV = NEONAcrossLanesFixed | 0x0000A000,
+ NEON_SMINV = NEONAcrossLanesFixed | 0x0001A000,
+ NEON_UMAXV = NEONAcrossLanesFixed | 0x2000A000,
+ NEON_UMINV = NEONAcrossLanesFixed | 0x2001A000,
+
+ // NEON floating point across instructions.
+ NEONAcrossLanesFPFixed = NEONAcrossLanesFixed | 0x0000C000,
+ NEONAcrossLanesFPFMask = NEONAcrossLanesFMask | 0x0000C000,
+ NEONAcrossLanesFPMask = NEONAcrossLanesMask | 0x00800000,
+
+ NEON_FMAXV = NEONAcrossLanesFPFixed | 0x2000F000,
+ NEON_FMINV = NEONAcrossLanesFPFixed | 0x2080F000,
+ NEON_FMAXNMV = NEONAcrossLanesFPFixed | 0x2000C000,
+ NEON_FMINNMV = NEONAcrossLanesFPFixed | 0x2080C000
+};
+
+// NEON instructions with indexed element operand.
+enum NEONByIndexedElementOp {
+ NEONByIndexedElementFixed = 0x0F000000,
+ NEONByIndexedElementFMask = 0x9F000400,
+ NEONByIndexedElementMask = 0xBF00F400,
+ NEON_MUL_byelement = NEONByIndexedElementFixed | 0x00008000,
+ NEON_MLA_byelement = NEONByIndexedElementFixed | 0x20000000,
+ NEON_MLS_byelement = NEONByIndexedElementFixed | 0x20004000,
+ NEON_SMULL_byelement = NEONByIndexedElementFixed | 0x0000A000,
+ NEON_SMLAL_byelement = NEONByIndexedElementFixed | 0x00002000,
+ NEON_SMLSL_byelement = NEONByIndexedElementFixed | 0x00006000,
+ NEON_UMULL_byelement = NEONByIndexedElementFixed | 0x2000A000,
+ NEON_UMLAL_byelement = NEONByIndexedElementFixed | 0x20002000,
+ NEON_UMLSL_byelement = NEONByIndexedElementFixed | 0x20006000,
+ NEON_SQDMULL_byelement = NEONByIndexedElementFixed | 0x0000B000,
+ NEON_SQDMLAL_byelement = NEONByIndexedElementFixed | 0x00003000,
+ NEON_SQDMLSL_byelement = NEONByIndexedElementFixed | 0x00007000,
+ NEON_SQDMULH_byelement = NEONByIndexedElementFixed | 0x0000C000,
+ NEON_SQRDMULH_byelement = NEONByIndexedElementFixed | 0x0000D000,
+
+ // Floating point instructions.
+ NEONByIndexedElementFPFixed = NEONByIndexedElementFixed | 0x00800000,
+ NEONByIndexedElementFPMask = NEONByIndexedElementMask | 0x00800000,
+ NEON_FMLA_byelement = NEONByIndexedElementFPFixed | 0x00001000,
+ NEON_FMLS_byelement = NEONByIndexedElementFPFixed | 0x00005000,
+ NEON_FMUL_byelement = NEONByIndexedElementFPFixed | 0x00009000,
+ NEON_FMULX_byelement = NEONByIndexedElementFPFixed | 0x20009000
+};
+
+// NEON modified immediate.
+enum NEONModifiedImmediateOp {
+ NEONModifiedImmediateFixed = 0x0F000400,
+ NEONModifiedImmediateFMask = 0x9FF80400,
+ NEONModifiedImmediateOpBit = 0x20000000,
+ NEONModifiedImmediate_MOVI = NEONModifiedImmediateFixed | 0x00000000,
+ NEONModifiedImmediate_MVNI = NEONModifiedImmediateFixed | 0x20000000,
+ NEONModifiedImmediate_ORR = NEONModifiedImmediateFixed | 0x00001000,
+ NEONModifiedImmediate_BIC = NEONModifiedImmediateFixed | 0x20001000
+};
+
+// NEON extract.
+enum NEONExtractOp {
+ NEONExtractFixed = 0x2E000000,
+ NEONExtractFMask = 0xBF208400,
+ NEONExtractMask = 0xBFE08400,
+ NEON_EXT = NEONExtractFixed | 0x00000000
+};
+
+enum NEONLoadStoreMultiOp {
+ NEONLoadStoreMultiL = 0x00400000,
+ NEONLoadStoreMulti1_1v = 0x00007000,
+ NEONLoadStoreMulti1_2v = 0x0000A000,
+ NEONLoadStoreMulti1_3v = 0x00006000,
+ NEONLoadStoreMulti1_4v = 0x00002000,
+ NEONLoadStoreMulti2 = 0x00008000,
+ NEONLoadStoreMulti3 = 0x00004000,
+ NEONLoadStoreMulti4 = 0x00000000
+};
+
+// NEON load/store multiple structures.
+enum NEONLoadStoreMultiStructOp {
+ NEONLoadStoreMultiStructFixed = 0x0C000000,
+ NEONLoadStoreMultiStructFMask = 0xBFBF0000,
+ NEONLoadStoreMultiStructMask = 0xBFFFF000,
+ NEONLoadStoreMultiStructStore = NEONLoadStoreMultiStructFixed,
+ NEONLoadStoreMultiStructLoad =
+ NEONLoadStoreMultiStructFixed | NEONLoadStoreMultiL,
+ NEON_LD1_1v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_1v,
+ NEON_LD1_2v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_2v,
+ NEON_LD1_3v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_3v,
+ NEON_LD1_4v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_4v,
+ NEON_LD2 = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti2,
+ NEON_LD3 = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti3,
+ NEON_LD4 = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti4,
+ NEON_ST1_1v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_1v,
+ NEON_ST1_2v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_2v,
+ NEON_ST1_3v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_3v,
+ NEON_ST1_4v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_4v,
+ NEON_ST2 = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti2,
+ NEON_ST3 = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti3,
+ NEON_ST4 = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti4
+};
+
+// NEON load/store multiple structures with post-index addressing.
+enum NEONLoadStoreMultiStructPostIndexOp {
+ NEONLoadStoreMultiStructPostIndexFixed = 0x0C800000,
+ NEONLoadStoreMultiStructPostIndexFMask = 0xBFA00000,
+ NEONLoadStoreMultiStructPostIndexMask = 0xBFE0F000,
+ NEONLoadStoreMultiStructPostIndex = 0x00800000,
+ NEON_LD1_1v_post = NEON_LD1_1v | NEONLoadStoreMultiStructPostIndex,
+ NEON_LD1_2v_post = NEON_LD1_2v | NEONLoadStoreMultiStructPostIndex,
+ NEON_LD1_3v_post = NEON_LD1_3v | NEONLoadStoreMultiStructPostIndex,
+ NEON_LD1_4v_post = NEON_LD1_4v | NEONLoadStoreMultiStructPostIndex,
+ NEON_LD2_post = NEON_LD2 | NEONLoadStoreMultiStructPostIndex,
+ NEON_LD3_post = NEON_LD3 | NEONLoadStoreMultiStructPostIndex,
+ NEON_LD4_post = NEON_LD4 | NEONLoadStoreMultiStructPostIndex,
+ NEON_ST1_1v_post = NEON_ST1_1v | NEONLoadStoreMultiStructPostIndex,
+ NEON_ST1_2v_post = NEON_ST1_2v | NEONLoadStoreMultiStructPostIndex,
+ NEON_ST1_3v_post = NEON_ST1_3v | NEONLoadStoreMultiStructPostIndex,
+ NEON_ST1_4v_post = NEON_ST1_4v | NEONLoadStoreMultiStructPostIndex,
+ NEON_ST2_post = NEON_ST2 | NEONLoadStoreMultiStructPostIndex,
+ NEON_ST3_post = NEON_ST3 | NEONLoadStoreMultiStructPostIndex,
+ NEON_ST4_post = NEON_ST4 | NEONLoadStoreMultiStructPostIndex
+};
+
+enum NEONLoadStoreSingleOp {
+ NEONLoadStoreSingle1 = 0x00000000,
+ NEONLoadStoreSingle2 = 0x00200000,
+ NEONLoadStoreSingle3 = 0x00002000,
+ NEONLoadStoreSingle4 = 0x00202000,
+ NEONLoadStoreSingleL = 0x00400000,
+ NEONLoadStoreSingle_b = 0x00000000,
+ NEONLoadStoreSingle_h = 0x00004000,
+ NEONLoadStoreSingle_s = 0x00008000,
+ NEONLoadStoreSingle_d = 0x00008400,
+ NEONLoadStoreSingleAllLanes = 0x0000C000,
+ NEONLoadStoreSingleLenMask = 0x00202000
+};
+
+// NEON load/store single structure.
+enum NEONLoadStoreSingleStructOp {
+ NEONLoadStoreSingleStructFixed = 0x0D000000,
+ NEONLoadStoreSingleStructFMask = 0xBF9F0000,
+ NEONLoadStoreSingleStructMask = 0xBFFFE000,
+ NEONLoadStoreSingleStructStore = NEONLoadStoreSingleStructFixed,
+ NEONLoadStoreSingleStructLoad =
+ NEONLoadStoreSingleStructFixed | NEONLoadStoreSingleL,
+ NEONLoadStoreSingleStructLoad1 =
+ NEONLoadStoreSingle1 | NEONLoadStoreSingleStructLoad,
+ NEONLoadStoreSingleStructLoad2 =
+ NEONLoadStoreSingle2 | NEONLoadStoreSingleStructLoad,
+ NEONLoadStoreSingleStructLoad3 =
+ NEONLoadStoreSingle3 | NEONLoadStoreSingleStructLoad,
+ NEONLoadStoreSingleStructLoad4 =
+ NEONLoadStoreSingle4 | NEONLoadStoreSingleStructLoad,
+ NEONLoadStoreSingleStructStore1 =
+ NEONLoadStoreSingle1 | NEONLoadStoreSingleStructFixed,
+ NEONLoadStoreSingleStructStore2 =
+ NEONLoadStoreSingle2 | NEONLoadStoreSingleStructFixed,
+ NEONLoadStoreSingleStructStore3 =
+ NEONLoadStoreSingle3 | NEONLoadStoreSingleStructFixed,
+ NEONLoadStoreSingleStructStore4 =
+ NEONLoadStoreSingle4 | NEONLoadStoreSingleStructFixed,
+ NEON_LD1_b = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_b,
+ NEON_LD1_h = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_h,
+ NEON_LD1_s = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_s,
+ NEON_LD1_d = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_d,
+ NEON_LD1R = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingleAllLanes,
+ NEON_ST1_b = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_b,
+ NEON_ST1_h = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_h,
+ NEON_ST1_s = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_s,
+ NEON_ST1_d = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_d,
+
+ NEON_LD2_b = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_b,
+ NEON_LD2_h = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_h,
+ NEON_LD2_s = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_s,
+ NEON_LD2_d = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_d,
+ NEON_LD2R = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingleAllLanes,
+ NEON_ST2_b = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_b,
+ NEON_ST2_h = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_h,
+ NEON_ST2_s = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_s,
+ NEON_ST2_d = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_d,
+
+ NEON_LD3_b = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_b,
+ NEON_LD3_h = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_h,
+ NEON_LD3_s = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_s,
+ NEON_LD3_d = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_d,
+ NEON_LD3R = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingleAllLanes,
+ NEON_ST3_b = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_b,
+ NEON_ST3_h = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_h,
+ NEON_ST3_s = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_s,
+ NEON_ST3_d = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_d,
+
+ NEON_LD4_b = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_b,
+ NEON_LD4_h = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_h,
+ NEON_LD4_s = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_s,
+ NEON_LD4_d = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_d,
+ NEON_LD4R = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingleAllLanes,
+ NEON_ST4_b = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_b,
+ NEON_ST4_h = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_h,
+ NEON_ST4_s = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_s,
+ NEON_ST4_d = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_d
+};
+
+// NEON load/store single structure with post-index addressing.
+enum NEONLoadStoreSingleStructPostIndexOp {
+ NEONLoadStoreSingleStructPostIndexFixed = 0x0D800000,
+ NEONLoadStoreSingleStructPostIndexFMask = 0xBF800000,
+ NEONLoadStoreSingleStructPostIndexMask = 0xBFE0E000,
+ NEONLoadStoreSingleStructPostIndex = 0x00800000,
+ NEON_LD1_b_post = NEON_LD1_b | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD1_h_post = NEON_LD1_h | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD1_s_post = NEON_LD1_s | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD1_d_post = NEON_LD1_d | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD1R_post = NEON_LD1R | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST1_b_post = NEON_ST1_b | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST1_h_post = NEON_ST1_h | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST1_s_post = NEON_ST1_s | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST1_d_post = NEON_ST1_d | NEONLoadStoreSingleStructPostIndex,
+
+ NEON_LD2_b_post = NEON_LD2_b | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD2_h_post = NEON_LD2_h | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD2_s_post = NEON_LD2_s | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD2_d_post = NEON_LD2_d | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD2R_post = NEON_LD2R | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST2_b_post = NEON_ST2_b | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST2_h_post = NEON_ST2_h | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST2_s_post = NEON_ST2_s | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST2_d_post = NEON_ST2_d | NEONLoadStoreSingleStructPostIndex,
+
+ NEON_LD3_b_post = NEON_LD3_b | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD3_h_post = NEON_LD3_h | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD3_s_post = NEON_LD3_s | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD3_d_post = NEON_LD3_d | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD3R_post = NEON_LD3R | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST3_b_post = NEON_ST3_b | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST3_h_post = NEON_ST3_h | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST3_s_post = NEON_ST3_s | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST3_d_post = NEON_ST3_d | NEONLoadStoreSingleStructPostIndex,
+
+ NEON_LD4_b_post = NEON_LD4_b | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD4_h_post = NEON_LD4_h | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD4_s_post = NEON_LD4_s | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD4_d_post = NEON_LD4_d | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD4R_post = NEON_LD4R | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST4_b_post = NEON_ST4_b | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST4_h_post = NEON_ST4_h | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST4_s_post = NEON_ST4_s | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST4_d_post = NEON_ST4_d | NEONLoadStoreSingleStructPostIndex
+};
+
+// NEON register copy.
+enum NEONCopyOp {
+ NEONCopyFixed = 0x0E000400,
+ NEONCopyFMask = 0x9FE08400,
+ NEONCopyMask = 0x3FE08400,
+ NEONCopyInsElementMask = NEONCopyMask | 0x40000000,
+ NEONCopyInsGeneralMask = NEONCopyMask | 0x40007800,
+ NEONCopyDupElementMask = NEONCopyMask | 0x20007800,
+ NEONCopyDupGeneralMask = NEONCopyDupElementMask,
+ NEONCopyUmovMask = NEONCopyMask | 0x20007800,
+ NEONCopySmovMask = NEONCopyMask | 0x20007800,
+ NEON_INS_ELEMENT = NEONCopyFixed | 0x60000000,
+ NEON_INS_GENERAL = NEONCopyFixed | 0x40001800,
+ NEON_DUP_ELEMENT = NEONCopyFixed | 0x00000000,
+ NEON_DUP_GENERAL = NEONCopyFixed | 0x00000800,
+ NEON_SMOV = NEONCopyFixed | 0x00002800,
+ NEON_UMOV = NEONCopyFixed | 0x00003800
+};
+
+// NEON scalar instructions with indexed element operand.
+enum NEONScalarByIndexedElementOp {
+ NEONScalarByIndexedElementFixed = 0x5F000000,
+ NEONScalarByIndexedElementFMask = 0xDF000400,
+ NEONScalarByIndexedElementMask = 0xFF00F400,
+ NEON_SQDMLAL_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMLAL_byelement,
+ NEON_SQDMLSL_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMLSL_byelement,
+ NEON_SQDMULL_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMULL_byelement,
+ NEON_SQDMULH_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMULH_byelement,
+ NEON_SQRDMULH_byelement_scalar =
+ NEON_Q | NEONScalar | NEON_SQRDMULH_byelement,
+
+ // Floating point instructions.
+ NEONScalarByIndexedElementFPFixed =
+ NEONScalarByIndexedElementFixed | 0x00800000,
+ NEONScalarByIndexedElementFPMask =
+ NEONScalarByIndexedElementMask | 0x00800000,
+ NEON_FMLA_byelement_scalar = NEON_Q | NEONScalar | NEON_FMLA_byelement,
+ NEON_FMLS_byelement_scalar = NEON_Q | NEONScalar | NEON_FMLS_byelement,
+ NEON_FMUL_byelement_scalar = NEON_Q | NEONScalar | NEON_FMUL_byelement,
+ NEON_FMULX_byelement_scalar = NEON_Q | NEONScalar | NEON_FMULX_byelement
+};
+
+// NEON shift immediate.
+enum NEONShiftImmediateOp {
+ NEONShiftImmediateFixed = 0x0F000400,
+ NEONShiftImmediateFMask = 0x9F800400,
+ NEONShiftImmediateMask = 0xBF80FC00,
+ NEONShiftImmediateUBit = 0x20000000,
+ NEON_SHL = NEONShiftImmediateFixed | 0x00005000,
+ NEON_SSHLL = NEONShiftImmediateFixed | 0x0000A000,
+ NEON_USHLL = NEONShiftImmediateFixed | 0x2000A000,
+ NEON_SLI = NEONShiftImmediateFixed | 0x20005000,
+ NEON_SRI = NEONShiftImmediateFixed | 0x20004000,
+ NEON_SHRN = NEONShiftImmediateFixed | 0x00008000,
+ NEON_RSHRN = NEONShiftImmediateFixed | 0x00008800,
+ NEON_UQSHRN = NEONShiftImmediateFixed | 0x20009000,
+ NEON_UQRSHRN = NEONShiftImmediateFixed | 0x20009800,
+ NEON_SQSHRN = NEONShiftImmediateFixed | 0x00009000,
+ NEON_SQRSHRN = NEONShiftImmediateFixed | 0x00009800,
+ NEON_SQSHRUN = NEONShiftImmediateFixed | 0x20008000,
+ NEON_SQRSHRUN = NEONShiftImmediateFixed | 0x20008800,
+ NEON_SSHR = NEONShiftImmediateFixed | 0x00000000,
+ NEON_SRSHR = NEONShiftImmediateFixed | 0x00002000,
+ NEON_USHR = NEONShiftImmediateFixed | 0x20000000,
+ NEON_URSHR = NEONShiftImmediateFixed | 0x20002000,
+ NEON_SSRA = NEONShiftImmediateFixed | 0x00001000,
+ NEON_SRSRA = NEONShiftImmediateFixed | 0x00003000,
+ NEON_USRA = NEONShiftImmediateFixed | 0x20001000,
+ NEON_URSRA = NEONShiftImmediateFixed | 0x20003000,
+ NEON_SQSHLU = NEONShiftImmediateFixed | 0x20006000,
+ NEON_SCVTF_imm = NEONShiftImmediateFixed | 0x0000E000,
+ NEON_UCVTF_imm = NEONShiftImmediateFixed | 0x2000E000,
+ NEON_FCVTZS_imm = NEONShiftImmediateFixed | 0x0000F800,
+ NEON_FCVTZU_imm = NEONShiftImmediateFixed | 0x2000F800,
+ NEON_SQSHL_imm = NEONShiftImmediateFixed | 0x00007000,
+ NEON_UQSHL_imm = NEONShiftImmediateFixed | 0x20007000
+};
+
+// NEON scalar register copy.
+enum NEONScalarCopyOp {
+ NEONScalarCopyFixed = 0x5E000400,
+ NEONScalarCopyFMask = 0xDFE08400,
+ NEONScalarCopyMask = 0xFFE0FC00,
+ NEON_DUP_ELEMENT_scalar = NEON_Q | NEONScalar | NEON_DUP_ELEMENT
+};
+
+// NEON scalar pairwise instructions.
+enum NEONScalarPairwiseOp {
+ NEONScalarPairwiseFixed = 0x5E300800,
+ NEONScalarPairwiseFMask = 0xDF3E0C00,
+ NEONScalarPairwiseMask = 0xFFB1F800,
+ NEON_ADDP_scalar = NEONScalarPairwiseFixed | 0x0081B000,
+ NEON_FMAXNMP_scalar = NEONScalarPairwiseFixed | 0x2000C000,
+ NEON_FMINNMP_scalar = NEONScalarPairwiseFixed | 0x2080C000,
+ NEON_FADDP_scalar = NEONScalarPairwiseFixed | 0x2000D000,
+ NEON_FMAXP_scalar = NEONScalarPairwiseFixed | 0x2000F000,
+ NEON_FMINP_scalar = NEONScalarPairwiseFixed | 0x2080F000
+};
+
+// NEON scalar shift immediate.
+enum NEONScalarShiftImmediateOp {
+ NEONScalarShiftImmediateFixed = 0x5F000400,
+ NEONScalarShiftImmediateFMask = 0xDF800400,
+ NEONScalarShiftImmediateMask = 0xFF80FC00,
+ NEON_SHL_scalar = NEON_Q | NEONScalar | NEON_SHL,
+ NEON_SLI_scalar = NEON_Q | NEONScalar | NEON_SLI,
+ NEON_SRI_scalar = NEON_Q | NEONScalar | NEON_SRI,
+ NEON_SSHR_scalar = NEON_Q | NEONScalar | NEON_SSHR,
+ NEON_USHR_scalar = NEON_Q | NEONScalar | NEON_USHR,
+ NEON_SRSHR_scalar = NEON_Q | NEONScalar | NEON_SRSHR,
+ NEON_URSHR_scalar = NEON_Q | NEONScalar | NEON_URSHR,
+ NEON_SSRA_scalar = NEON_Q | NEONScalar | NEON_SSRA,
+ NEON_USRA_scalar = NEON_Q | NEONScalar | NEON_USRA,
+ NEON_SRSRA_scalar = NEON_Q | NEONScalar | NEON_SRSRA,
+ NEON_URSRA_scalar = NEON_Q | NEONScalar | NEON_URSRA,
+ NEON_UQSHRN_scalar = NEON_Q | NEONScalar | NEON_UQSHRN,
+ NEON_UQRSHRN_scalar = NEON_Q | NEONScalar | NEON_UQRSHRN,
+ NEON_SQSHRN_scalar = NEON_Q | NEONScalar | NEON_SQSHRN,
+ NEON_SQRSHRN_scalar = NEON_Q | NEONScalar | NEON_SQRSHRN,
+ NEON_SQSHRUN_scalar = NEON_Q | NEONScalar | NEON_SQSHRUN,
+ NEON_SQRSHRUN_scalar = NEON_Q | NEONScalar | NEON_SQRSHRUN,
+ NEON_SQSHLU_scalar = NEON_Q | NEONScalar | NEON_SQSHLU,
+ NEON_SQSHL_imm_scalar = NEON_Q | NEONScalar | NEON_SQSHL_imm,
+ NEON_UQSHL_imm_scalar = NEON_Q | NEONScalar | NEON_UQSHL_imm,
+ NEON_SCVTF_imm_scalar = NEON_Q | NEONScalar | NEON_SCVTF_imm,
+ NEON_UCVTF_imm_scalar = NEON_Q | NEONScalar | NEON_UCVTF_imm,
+ NEON_FCVTZS_imm_scalar = NEON_Q | NEONScalar | NEON_FCVTZS_imm,
+ NEON_FCVTZU_imm_scalar = NEON_Q | NEONScalar | NEON_FCVTZU_imm
+};
+
+// NEON table.
+enum NEONTableOp {
+ NEONTableFixed = 0x0E000000,
+ NEONTableFMask = 0xBF208C00,
+ NEONTableExt = 0x00001000,
+ NEONTableMask = 0xBF20FC00,
+ NEON_TBL_1v = NEONTableFixed | 0x00000000,
+ NEON_TBL_2v = NEONTableFixed | 0x00002000,
+ NEON_TBL_3v = NEONTableFixed | 0x00004000,
+ NEON_TBL_4v = NEONTableFixed | 0x00006000,
+ NEON_TBX_1v = NEON_TBL_1v | NEONTableExt,
+ NEON_TBX_2v = NEON_TBL_2v | NEONTableExt,
+ NEON_TBX_3v = NEON_TBL_3v | NEONTableExt,
+ NEON_TBX_4v = NEON_TBL_4v | NEONTableExt
+};
+
+// NEON perm.
+enum NEONPermOp {
+ NEONPermFixed = 0x0E000800,
+ NEONPermFMask = 0xBF208C00,
+ NEONPermMask = 0x3F20FC00,
+ NEON_UZP1 = NEONPermFixed | 0x00001000,
+ NEON_TRN1 = NEONPermFixed | 0x00002000,
+ NEON_ZIP1 = NEONPermFixed | 0x00003000,
+ NEON_UZP2 = NEONPermFixed | 0x00005000,
+ NEON_TRN2 = NEONPermFixed | 0x00006000,
+ NEON_ZIP2 = NEONPermFixed | 0x00007000
+};
+
+// NEON scalar instructions with two register operands.
+enum NEONScalar2RegMiscOp {
+ NEONScalar2RegMiscFixed = 0x5E200800,
+ NEONScalar2RegMiscFMask = 0xDF3E0C00,
+ NEONScalar2RegMiscMask = NEON_Q | NEONScalar | NEON2RegMiscMask,
+ NEON_CMGT_zero_scalar = NEON_Q | NEONScalar | NEON_CMGT_zero,
+ NEON_CMEQ_zero_scalar = NEON_Q | NEONScalar | NEON_CMEQ_zero,
+ NEON_CMLT_zero_scalar = NEON_Q | NEONScalar | NEON_CMLT_zero,
+ NEON_CMGE_zero_scalar = NEON_Q | NEONScalar | NEON_CMGE_zero,
+ NEON_CMLE_zero_scalar = NEON_Q | NEONScalar | NEON_CMLE_zero,
+ NEON_ABS_scalar = NEON_Q | NEONScalar | NEON_ABS,
+ NEON_SQABS_scalar = NEON_Q | NEONScalar | NEON_SQABS,
+ NEON_NEG_scalar = NEON_Q | NEONScalar | NEON_NEG,
+ NEON_SQNEG_scalar = NEON_Q | NEONScalar | NEON_SQNEG,
+ NEON_SQXTN_scalar = NEON_Q | NEONScalar | NEON_SQXTN,
+ NEON_UQXTN_scalar = NEON_Q | NEONScalar | NEON_UQXTN,
+ NEON_SQXTUN_scalar = NEON_Q | NEONScalar | NEON_SQXTUN,
+ NEON_SUQADD_scalar = NEON_Q | NEONScalar | NEON_SUQADD,
+ NEON_USQADD_scalar = NEON_Q | NEONScalar | NEON_USQADD,
+
+ NEONScalar2RegMiscOpcode = NEON2RegMiscOpcode,
+ NEON_NEG_scalar_opcode = NEON_NEG_scalar & NEONScalar2RegMiscOpcode,
+
+ NEONScalar2RegMiscFPMask = NEONScalar2RegMiscMask | 0x00800000,
+ NEON_FRSQRTE_scalar = NEON_Q | NEONScalar | NEON_FRSQRTE,
+ NEON_FRECPE_scalar = NEON_Q | NEONScalar | NEON_FRECPE,
+ NEON_SCVTF_scalar = NEON_Q | NEONScalar | NEON_SCVTF,
+ NEON_UCVTF_scalar = NEON_Q | NEONScalar | NEON_UCVTF,
+ NEON_FCMGT_zero_scalar = NEON_Q | NEONScalar | NEON_FCMGT_zero,
+ NEON_FCMEQ_zero_scalar = NEON_Q | NEONScalar | NEON_FCMEQ_zero,
+ NEON_FCMLT_zero_scalar = NEON_Q | NEONScalar | NEON_FCMLT_zero,
+ NEON_FCMGE_zero_scalar = NEON_Q | NEONScalar | NEON_FCMGE_zero,
+ NEON_FCMLE_zero_scalar = NEON_Q | NEONScalar | NEON_FCMLE_zero,
+ NEON_FRECPX_scalar = NEONScalar2RegMiscFixed | 0x0081F000,
+ NEON_FCVTNS_scalar = NEON_Q | NEONScalar | NEON_FCVTNS,
+ NEON_FCVTNU_scalar = NEON_Q | NEONScalar | NEON_FCVTNU,
+ NEON_FCVTPS_scalar = NEON_Q | NEONScalar | NEON_FCVTPS,
+ NEON_FCVTPU_scalar = NEON_Q | NEONScalar | NEON_FCVTPU,
+ NEON_FCVTMS_scalar = NEON_Q | NEONScalar | NEON_FCVTMS,
+ NEON_FCVTMU_scalar = NEON_Q | NEONScalar | NEON_FCVTMU,
+ NEON_FCVTZS_scalar = NEON_Q | NEONScalar | NEON_FCVTZS,
+ NEON_FCVTZU_scalar = NEON_Q | NEONScalar | NEON_FCVTZU,
+ NEON_FCVTAS_scalar = NEON_Q | NEONScalar | NEON_FCVTAS,
+ NEON_FCVTAU_scalar = NEON_Q | NEONScalar | NEON_FCVTAU,
+ NEON_FCVTXN_scalar = NEON_Q | NEONScalar | NEON_FCVTXN
+};
+
+// NEON scalar instructions with three same-type operands.
+enum NEONScalar3SameOp {
+ NEONScalar3SameFixed = 0x5E200400,
+ NEONScalar3SameFMask = 0xDF200400,
+ NEONScalar3SameMask = 0xFF20FC00,
+ NEON_ADD_scalar = NEON_Q | NEONScalar | NEON_ADD,
+ NEON_CMEQ_scalar = NEON_Q | NEONScalar | NEON_CMEQ,
+ NEON_CMGE_scalar = NEON_Q | NEONScalar | NEON_CMGE,
+ NEON_CMGT_scalar = NEON_Q | NEONScalar | NEON_CMGT,
+ NEON_CMHI_scalar = NEON_Q | NEONScalar | NEON_CMHI,
+ NEON_CMHS_scalar = NEON_Q | NEONScalar | NEON_CMHS,
+ NEON_CMTST_scalar = NEON_Q | NEONScalar | NEON_CMTST,
+ NEON_SUB_scalar = NEON_Q | NEONScalar | NEON_SUB,
+ NEON_UQADD_scalar = NEON_Q | NEONScalar | NEON_UQADD,
+ NEON_SQADD_scalar = NEON_Q | NEONScalar | NEON_SQADD,
+ NEON_UQSUB_scalar = NEON_Q | NEONScalar | NEON_UQSUB,
+ NEON_SQSUB_scalar = NEON_Q | NEONScalar | NEON_SQSUB,
+ NEON_USHL_scalar = NEON_Q | NEONScalar | NEON_USHL,
+ NEON_SSHL_scalar = NEON_Q | NEONScalar | NEON_SSHL,
+ NEON_UQSHL_scalar = NEON_Q | NEONScalar | NEON_UQSHL,
+ NEON_SQSHL_scalar = NEON_Q | NEONScalar | NEON_SQSHL,
+ NEON_URSHL_scalar = NEON_Q | NEONScalar | NEON_URSHL,
+ NEON_SRSHL_scalar = NEON_Q | NEONScalar | NEON_SRSHL,
+ NEON_UQRSHL_scalar = NEON_Q | NEONScalar | NEON_UQRSHL,
+ NEON_SQRSHL_scalar = NEON_Q | NEONScalar | NEON_SQRSHL,
+ NEON_SQDMULH_scalar = NEON_Q | NEONScalar | NEON_SQDMULH,
+ NEON_SQRDMULH_scalar = NEON_Q | NEONScalar | NEON_SQRDMULH,
+
+ // NEON floating point scalar instructions with three same-type operands.
+ NEONScalar3SameFPFixed = NEONScalar3SameFixed | 0x0000C000,
+ NEONScalar3SameFPFMask = NEONScalar3SameFMask | 0x0000C000,
+ NEONScalar3SameFPMask = NEONScalar3SameMask | 0x00800000,
+ NEON_FACGE_scalar = NEON_Q | NEONScalar | NEON_FACGE,
+ NEON_FACGT_scalar = NEON_Q | NEONScalar | NEON_FACGT,
+ NEON_FCMEQ_scalar = NEON_Q | NEONScalar | NEON_FCMEQ,
+ NEON_FCMGE_scalar = NEON_Q | NEONScalar | NEON_FCMGE,
+ NEON_FCMGT_scalar = NEON_Q | NEONScalar | NEON_FCMGT,
+ NEON_FMULX_scalar = NEON_Q | NEONScalar | NEON_FMULX,
+ NEON_FRECPS_scalar = NEON_Q | NEONScalar | NEON_FRECPS,
+ NEON_FRSQRTS_scalar = NEON_Q | NEONScalar | NEON_FRSQRTS,
+ NEON_FABD_scalar = NEON_Q | NEONScalar | NEON_FABD
+};
+
+// NEON scalar instructions with three different-type operands.
+enum NEONScalar3DiffOp {
+ NEONScalar3DiffFixed = 0x5E200000,
+ NEONScalar3DiffFMask = 0xDF200C00,
+ NEONScalar3DiffMask = NEON_Q | NEONScalar | NEON3DifferentMask,
+ NEON_SQDMLAL_scalar = NEON_Q | NEONScalar | NEON_SQDMLAL,
+ NEON_SQDMLSL_scalar = NEON_Q | NEONScalar | NEON_SQDMLSL,
+ NEON_SQDMULL_scalar = NEON_Q | NEONScalar | NEON_SQDMULL
+};
+
// Unimplemented and unallocated instructions. These are defined to make fixed
// bit assertion easier.
enum UnimplementedOp {
diff --git a/deps/v8/src/arm64/decoder-arm64-inl.h b/deps/v8/src/arm64/decoder-arm64-inl.h
index 2405f87830..6718bd3d68 100644
--- a/deps/v8/src/arm64/decoder-arm64-inl.h
+++ b/deps/v8/src/arm64/decoder-arm64-inl.h
@@ -213,6 +213,11 @@ void Decoder<V>::DecodeLoadStore(Instruction* instr) {
(instr->Bits(27, 24) == 0xC) ||
(instr->Bits(27, 24) == 0xD) );
+ if ((instr->Bit(28) == 0) && (instr->Bit(29) == 0) && (instr->Bit(26) == 1)) {
+ DecodeNEONLoadStore(instr);
+ return;
+ }
+
if (instr->Bit(24) == 0) {
if (instr->Bit(28) == 0) {
if (instr->Bit(29) == 0) {
@@ -226,8 +231,6 @@ void Decoder<V>::DecodeLoadStore(Instruction* instr) {
} else {
V::VisitLoadStoreAcquireRelease(instr);
}
- } else {
- DecodeAdvSIMDLoadStore(instr);
}
} else {
if ((instr->Bits(31, 30) == 0x3) ||
@@ -513,16 +516,14 @@ void Decoder<V>::DecodeFP(Instruction* instr) {
(instr->Bits(27, 24) == 0xF) );
if (instr->Bit(28) == 0) {
- DecodeAdvSIMDDataProcessing(instr);
+ DecodeNEONVectorDataProcessing(instr);
} else {
- if (instr->Bit(29) == 1) {
+ if (instr->Bits(31, 30) == 0x3) {
V::VisitUnallocated(instr);
+ } else if (instr->Bits(31, 30) == 0x1) {
+ DecodeNEONScalarDataProcessing(instr);
} else {
- if (instr->Bits(31, 30) == 0x3) {
- V::VisitUnallocated(instr);
- } else if (instr->Bits(31, 30) == 0x1) {
- DecodeAdvSIMDDataProcessing(instr);
- } else {
+ if (instr->Bit(29) == 0) {
if (instr->Bit(24) == 0) {
if (instr->Bit(21) == 0) {
if ((instr->Bit(23) == 1) ||
@@ -629,25 +630,190 @@ void Decoder<V>::DecodeFP(Instruction* instr) {
V::VisitFPDataProcessing3Source(instr);
}
}
+ } else {
+ V::VisitUnallocated(instr);
}
}
}
}
-
-template<typename V>
-void Decoder<V>::DecodeAdvSIMDLoadStore(Instruction* instr) {
- // TODO(all): Implement Advanced SIMD load/store instruction decode.
+template <typename V>
+void Decoder<V>::DecodeNEONLoadStore(Instruction* instr) {
DCHECK(instr->Bits(29, 25) == 0x6);
- V::VisitUnimplemented(instr);
+ if (instr->Bit(31) == 0) {
+ if ((instr->Bit(24) == 0) && (instr->Bit(21) == 1)) {
+ V::VisitUnallocated(instr);
+ return;
+ }
+
+ if (instr->Bit(23) == 0) {
+ if (instr->Bits(20, 16) == 0) {
+ if (instr->Bit(24) == 0) {
+ V::VisitNEONLoadStoreMultiStruct(instr);
+ } else {
+ V::VisitNEONLoadStoreSingleStruct(instr);
+ }
+ } else {
+ V::VisitUnallocated(instr);
+ }
+ } else {
+ if (instr->Bit(24) == 0) {
+ V::VisitNEONLoadStoreMultiStructPostIndex(instr);
+ } else {
+ V::VisitNEONLoadStoreSingleStructPostIndex(instr);
+ }
+ }
+ } else {
+ V::VisitUnallocated(instr);
+ }
}
+template <typename V>
+void Decoder<V>::DecodeNEONVectorDataProcessing(Instruction* instr) {
+ DCHECK(instr->Bits(28, 25) == 0x7);
+ if (instr->Bit(31) == 0) {
+ if (instr->Bit(24) == 0) {
+ if (instr->Bit(21) == 0) {
+ if (instr->Bit(15) == 0) {
+ if (instr->Bit(10) == 0) {
+ if (instr->Bit(29) == 0) {
+ if (instr->Bit(11) == 0) {
+ V::VisitNEONTable(instr);
+ } else {
+ V::VisitNEONPerm(instr);
+ }
+ } else {
+ V::VisitNEONExtract(instr);
+ }
+ } else {
+ if (instr->Bits(23, 22) == 0) {
+ V::VisitNEONCopy(instr);
+ } else {
+ V::VisitUnallocated(instr);
+ }
+ }
+ } else {
+ V::VisitUnallocated(instr);
+ }
+ } else {
+ if (instr->Bit(10) == 0) {
+ if (instr->Bit(11) == 0) {
+ V::VisitNEON3Different(instr);
+ } else {
+ if (instr->Bits(18, 17) == 0) {
+ if (instr->Bit(20) == 0) {
+ if (instr->Bit(19) == 0) {
+ V::VisitNEON2RegMisc(instr);
+ } else {
+ if (instr->Bits(30, 29) == 0x2) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitUnallocated(instr);
+ }
+ }
+ } else {
+ if (instr->Bit(19) == 0) {
+ V::VisitNEONAcrossLanes(instr);
+ } else {
+ V::VisitUnallocated(instr);
+ }
+ }
+ } else {
+ V::VisitUnallocated(instr);
+ }
+ }
+ } else {
+ V::VisitNEON3Same(instr);
+ }
+ }
+ } else {
+ if (instr->Bit(10) == 0) {
+ V::VisitNEONByIndexedElement(instr);
+ } else {
+ if (instr->Bit(23) == 0) {
+ if (instr->Bits(22, 19) == 0) {
+ V::VisitNEONModifiedImmediate(instr);
+ } else {
+ V::VisitNEONShiftImmediate(instr);
+ }
+ } else {
+ V::VisitUnallocated(instr);
+ }
+ }
+ }
+ } else {
+ V::VisitUnallocated(instr);
+ }
+}
-template<typename V>
-void Decoder<V>::DecodeAdvSIMDDataProcessing(Instruction* instr) {
- // TODO(all): Implement Advanced SIMD data processing instruction decode.
- DCHECK(instr->Bits(27, 25) == 0x7);
- V::VisitUnimplemented(instr);
+template <typename V>
+void Decoder<V>::DecodeNEONScalarDataProcessing(Instruction* instr) {
+ DCHECK(instr->Bits(28, 25) == 0xF);
+ if (instr->Bit(24) == 0) {
+ if (instr->Bit(21) == 0) {
+ if (instr->Bit(15) == 0) {
+ if (instr->Bit(10) == 0) {
+ if (instr->Bit(29) == 0) {
+ if (instr->Bit(11) == 0) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitUnallocated(instr);
+ }
+ } else {
+ V::VisitUnallocated(instr);
+ }
+ } else {
+ if (instr->Bits(23, 22) == 0) {
+ V::VisitNEONScalarCopy(instr);
+ } else {
+ V::VisitUnallocated(instr);
+ }
+ }
+ } else {
+ V::VisitUnallocated(instr);
+ }
+ } else {
+ if (instr->Bit(10) == 0) {
+ if (instr->Bit(11) == 0) {
+ V::VisitNEONScalar3Diff(instr);
+ } else {
+ if (instr->Bits(18, 17) == 0) {
+ if (instr->Bit(20) == 0) {
+ if (instr->Bit(19) == 0) {
+ V::VisitNEONScalar2RegMisc(instr);
+ } else {
+ if (instr->Bit(29) == 0) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitUnallocated(instr);
+ }
+ }
+ } else {
+ if (instr->Bit(19) == 0) {
+ V::VisitNEONScalarPairwise(instr);
+ } else {
+ V::VisitUnallocated(instr);
+ }
+ }
+ } else {
+ V::VisitUnallocated(instr);
+ }
+ }
+ } else {
+ V::VisitNEONScalar3Same(instr);
+ }
+ }
+ } else {
+ if (instr->Bit(10) == 0) {
+ V::VisitNEONScalarByIndexedElement(instr);
+ } else {
+ if (instr->Bit(23) == 0) {
+ V::VisitNEONScalarShiftImmediate(instr);
+ } else {
+ V::VisitUnallocated(instr);
+ }
+ }
+ }
}
diff --git a/deps/v8/src/arm64/decoder-arm64.h b/deps/v8/src/arm64/decoder-arm64.h
index a17b324412..a89bf38980 100644
--- a/deps/v8/src/arm64/decoder-arm64.h
+++ b/deps/v8/src/arm64/decoder-arm64.h
@@ -16,50 +16,72 @@ namespace internal {
// List macro containing all visitors needed by the decoder class.
-#define VISITOR_LIST(V) \
- V(PCRelAddressing) \
- V(AddSubImmediate) \
- V(LogicalImmediate) \
- V(MoveWideImmediate) \
- V(Bitfield) \
- V(Extract) \
- V(UnconditionalBranch) \
- V(UnconditionalBranchToRegister) \
- V(CompareBranch) \
- V(TestBranch) \
- V(ConditionalBranch) \
- V(System) \
- V(Exception) \
- V(LoadStorePairPostIndex) \
- V(LoadStorePairOffset) \
- V(LoadStorePairPreIndex) \
- V(LoadLiteral) \
- V(LoadStoreUnscaledOffset) \
- V(LoadStorePostIndex) \
- V(LoadStorePreIndex) \
- V(LoadStoreRegisterOffset) \
- V(LoadStoreUnsignedOffset) \
- V(LoadStoreAcquireRelease) \
- V(LogicalShifted) \
- V(AddSubShifted) \
- V(AddSubExtended) \
- V(AddSubWithCarry) \
- V(ConditionalCompareRegister) \
- V(ConditionalCompareImmediate) \
- V(ConditionalSelect) \
- V(DataProcessing1Source) \
- V(DataProcessing2Source) \
- V(DataProcessing3Source) \
- V(FPCompare) \
- V(FPConditionalCompare) \
- V(FPConditionalSelect) \
- V(FPImmediate) \
- V(FPDataProcessing1Source) \
- V(FPDataProcessing2Source) \
- V(FPDataProcessing3Source) \
- V(FPIntegerConvert) \
- V(FPFixedPointConvert) \
- V(Unallocated) \
+#define VISITOR_LIST(V) \
+ V(PCRelAddressing) \
+ V(AddSubImmediate) \
+ V(LogicalImmediate) \
+ V(MoveWideImmediate) \
+ V(Bitfield) \
+ V(Extract) \
+ V(UnconditionalBranch) \
+ V(UnconditionalBranchToRegister) \
+ V(CompareBranch) \
+ V(TestBranch) \
+ V(ConditionalBranch) \
+ V(System) \
+ V(Exception) \
+ V(LoadStorePairPostIndex) \
+ V(LoadStorePairOffset) \
+ V(LoadStorePairPreIndex) \
+ V(LoadLiteral) \
+ V(LoadStoreUnscaledOffset) \
+ V(LoadStorePostIndex) \
+ V(LoadStorePreIndex) \
+ V(LoadStoreRegisterOffset) \
+ V(LoadStoreUnsignedOffset) \
+ V(LoadStoreAcquireRelease) \
+ V(LogicalShifted) \
+ V(AddSubShifted) \
+ V(AddSubExtended) \
+ V(AddSubWithCarry) \
+ V(ConditionalCompareRegister) \
+ V(ConditionalCompareImmediate) \
+ V(ConditionalSelect) \
+ V(DataProcessing1Source) \
+ V(DataProcessing2Source) \
+ V(DataProcessing3Source) \
+ V(FPCompare) \
+ V(FPConditionalCompare) \
+ V(FPConditionalSelect) \
+ V(FPImmediate) \
+ V(FPDataProcessing1Source) \
+ V(FPDataProcessing2Source) \
+ V(FPDataProcessing3Source) \
+ V(FPIntegerConvert) \
+ V(FPFixedPointConvert) \
+ V(NEON2RegMisc) \
+ V(NEON3Different) \
+ V(NEON3Same) \
+ V(NEONAcrossLanes) \
+ V(NEONByIndexedElement) \
+ V(NEONCopy) \
+ V(NEONExtract) \
+ V(NEONLoadStoreMultiStruct) \
+ V(NEONLoadStoreMultiStructPostIndex) \
+ V(NEONLoadStoreSingleStruct) \
+ V(NEONLoadStoreSingleStructPostIndex) \
+ V(NEONModifiedImmediate) \
+ V(NEONScalar2RegMisc) \
+ V(NEONScalar3Diff) \
+ V(NEONScalar3Same) \
+ V(NEONScalarByIndexedElement) \
+ V(NEONScalarCopy) \
+ V(NEONScalarPairwise) \
+ V(NEONScalarShiftImmediate) \
+ V(NEONShiftImmediate) \
+ V(NEONTable) \
+ V(NEONPerm) \
+ V(Unallocated) \
V(Unimplemented)
// The Visitor interface. Disassembler and simulator (and other tools)
@@ -109,6 +131,8 @@ class DispatchingDecoderVisitor : public DecoderVisitor {
// stored by the decoder.
void RemoveVisitor(DecoderVisitor* visitor);
+ void VisitNEONShiftImmediate(const Instruction* instr);
+
#define DECLARE(A) void Visit##A(Instruction* instr);
VISITOR_LIST(DECLARE)
#undef DECLARE
@@ -173,12 +197,17 @@ class Decoder : public V {
// Decode the Advanced SIMD (NEON) load/store part of the instruction tree,
// and call the corresponding visitors.
// On entry, instruction bits 29:25 = 0x6.
- void DecodeAdvSIMDLoadStore(Instruction* instr);
+ void DecodeNEONLoadStore(Instruction* instr);
// Decode the Advanced SIMD (NEON) data processing part of the instruction
// tree, and call the corresponding visitors.
// On entry, instruction bits 27:25 = 0x7.
- void DecodeAdvSIMDDataProcessing(Instruction* instr);
+ void DecodeNEONVectorDataProcessing(Instruction* instr);
+
+ // Decode the Advanced SIMD (NEON) scalar data processing part of the
+ // instruction tree, and call the corresponding visitors.
+ // On entry, instruction bits 28:25 = 0xF.
+ void DecodeNEONScalarDataProcessing(Instruction* instr);
};
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc
index a178e1d95e..dac144d3d1 100644
--- a/deps/v8/src/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/arm64/deoptimizer-arm64.cc
@@ -87,26 +87,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
-void Deoptimizer::SetPlatformCompiledStubRegisters(
- FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
- ApiFunction function(descriptor->deoptimization_handler());
- ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
- intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
- int params = descriptor->GetHandlerParameterCount();
- output_frame->SetRegister(x0.code(), params);
- output_frame->SetRegister(x1.code(), handler);
-}
-
-
-void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
- for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
- Float64 double_value = input_->GetDoubleRegister(i);
- output_frame->SetDoubleRegister(i, double_value);
- }
-}
-
-
-
#define __ masm()->
void Deoptimizer::TableEntryGenerator::Generate() {
@@ -118,13 +98,13 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Save all allocatable double registers.
CPURegList saved_double_registers(
- CPURegister::kFPRegister, kDRegSizeInBits,
+ CPURegister::kVRegister, kDRegSizeInBits,
RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask());
__ PushCPURegList(saved_double_registers);
// Save all allocatable float registers.
CPURegList saved_float_registers(
- CPURegister::kFPRegister, kSRegSizeInBits,
+ CPURegister::kVRegister, kSRegSizeInBits,
RegisterConfiguration::Crankshaft()->allocatable_float_codes_mask());
__ PushCPURegList(saved_float_registers);
@@ -133,7 +113,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
saved_registers.Combine(fp);
__ PushCPURegList(saved_registers);
- __ Mov(x3, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ __ Mov(x3, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
+ isolate())));
__ Str(fp, MemOperand(x3));
const int kSavedRegistersAreaSize =
diff --git a/deps/v8/src/arm64/disasm-arm64.cc b/deps/v8/src/arm64/disasm-arm64.cc
index e3ef4595d8..288cfe4705 100644
--- a/deps/v8/src/arm64/disasm-arm64.cc
+++ b/deps/v8/src/arm64/disasm-arm64.cc
@@ -11,6 +11,7 @@
#include "src/arm64/decoder-arm64-inl.h"
#include "src/arm64/disasm-arm64.h"
+#include "src/arm64/utils-arm64.h"
#include "src/base/platform/platform.h"
#include "src/disasm.h"
#include "src/macro-assembler.h"
@@ -94,9 +95,9 @@ void DisassemblingDecoder::VisitAddSubShifted(Instruction* instr) {
bool rd_is_zr = RdIsZROrSP(instr);
bool rn_is_zr = RnIsZROrSP(instr);
const char *mnemonic = "";
- const char *form = "'Rd, 'Rn, 'Rm'HDP";
- const char *form_cmp = "'Rn, 'Rm'HDP";
- const char *form_neg = "'Rd, 'Rm'HDP";
+ const char* form = "'Rd, 'Rn, 'Rm'NDP";
+ const char* form_cmp = "'Rn, 'Rm'NDP";
+ const char* form_neg = "'Rd, 'Rm'NDP";
switch (instr->Mask(AddSubShiftedMask)) {
case ADD_w_shift:
@@ -286,7 +287,7 @@ void DisassemblingDecoder::VisitLogicalShifted(Instruction* instr) {
bool rd_is_zr = RdIsZROrSP(instr);
bool rn_is_zr = RnIsZROrSP(instr);
const char *mnemonic = "";
- const char *form = "'Rd, 'Rn, 'Rm'HLo";
+ const char* form = "'Rd, 'Rn, 'Rm'NLo";
switch (instr->Mask(LogicalShiftedMask)) {
case AND_w:
@@ -304,7 +305,7 @@ void DisassemblingDecoder::VisitLogicalShifted(Instruction* instr) {
mnemonic = "ands";
if (rd_is_zr) {
mnemonic = "tst";
- form = "'Rn, 'Rm'HLo";
+ form = "'Rn, 'Rm'NLo";
}
break;
}
@@ -322,7 +323,7 @@ void DisassemblingDecoder::VisitLogicalShifted(Instruction* instr) {
mnemonic = "orn";
if (rn_is_zr) {
mnemonic = "mvn";
- form = "'Rd, 'Rm'HLo";
+ form = "'Rd, 'Rm'NLo";
}
break;
}
@@ -527,7 +528,9 @@ void DisassemblingDecoder::VisitPCRelAddressing(Instruction* instr) {
void DisassemblingDecoder::VisitConditionalBranch(Instruction* instr) {
switch (instr->Mask(ConditionalBranchMask)) {
- case B_cond: Format(instr, "b.'CBrn", "'BImmCond"); break;
+ case B_cond:
+ Format(instr, "b.'CBrn", "'TImmCond");
+ break;
default: UNREACHABLE();
}
}
@@ -556,7 +559,7 @@ void DisassemblingDecoder::VisitUnconditionalBranchToRegister(
void DisassemblingDecoder::VisitUnconditionalBranch(Instruction* instr) {
const char *mnemonic = "";
- const char *form = "'BImmUncn";
+ const char* form = "'TImmUncn";
switch (instr->Mask(UnconditionalBranchMask)) {
case B: mnemonic = "b"; break;
@@ -689,7 +692,7 @@ void DisassemblingDecoder::VisitDataProcessing3Source(Instruction* instr) {
void DisassemblingDecoder::VisitCompareBranch(Instruction* instr) {
const char *mnemonic = "";
- const char *form = "'Rt, 'BImmCmpa";
+ const char* form = "'Rt, 'TImmCmpa";
switch (instr->Mask(CompareBranchMask)) {
case CBZ_w:
@@ -708,7 +711,7 @@ void DisassemblingDecoder::VisitTestBranch(Instruction* instr) {
// disassembled as Wt, otherwise Xt. As the top bit of the immediate is
// encoded in bit 31 of the instruction, we can reuse the Rt form, which
// uses bit 31 (normally "sf") to choose the register size.
- const char *form = "'Rt, 'IS, 'BImmTest";
+ const char* form = "'Rt, 'IS, 'TImmTest";
switch (instr->Mask(TestBranchMask)) {
case TBZ: mnemonic = "tbz"; break;
@@ -738,25 +741,30 @@ void DisassemblingDecoder::VisitMoveWideImmediate(Instruction* instr) {
Format(instr, mnemonic, form);
}
-
-#define LOAD_STORE_LIST(V) \
- V(STRB_w, "strb", "'Wt") \
- V(STRH_w, "strh", "'Wt") \
- V(STR_w, "str", "'Wt") \
- V(STR_x, "str", "'Xt") \
- V(LDRB_w, "ldrb", "'Wt") \
- V(LDRH_w, "ldrh", "'Wt") \
- V(LDR_w, "ldr", "'Wt") \
- V(LDR_x, "ldr", "'Xt") \
- V(LDRSB_x, "ldrsb", "'Xt") \
- V(LDRSH_x, "ldrsh", "'Xt") \
- V(LDRSW_x, "ldrsw", "'Xt") \
- V(LDRSB_w, "ldrsb", "'Wt") \
- V(LDRSH_w, "ldrsh", "'Wt") \
- V(STR_s, "str", "'St") \
- V(STR_d, "str", "'Dt") \
- V(LDR_s, "ldr", "'St") \
- V(LDR_d, "ldr", "'Dt")
+#define LOAD_STORE_LIST(V) \
+ V(STRB_w, "strb", "'Wt") \
+ V(STRH_w, "strh", "'Wt") \
+ V(STR_w, "str", "'Wt") \
+ V(STR_x, "str", "'Xt") \
+ V(LDRB_w, "ldrb", "'Wt") \
+ V(LDRH_w, "ldrh", "'Wt") \
+ V(LDR_w, "ldr", "'Wt") \
+ V(LDR_x, "ldr", "'Xt") \
+ V(LDRSB_x, "ldrsb", "'Xt") \
+ V(LDRSH_x, "ldrsh", "'Xt") \
+ V(LDRSW_x, "ldrsw", "'Xt") \
+ V(LDRSB_w, "ldrsb", "'Wt") \
+ V(LDRSH_w, "ldrsh", "'Wt") \
+ V(STR_b, "str", "'Bt") \
+ V(STR_h, "str", "'Ht") \
+ V(STR_s, "str", "'St") \
+ V(STR_d, "str", "'Dt") \
+ V(LDR_b, "ldr", "'Bt") \
+ V(LDR_h, "ldr", "'Ht") \
+ V(LDR_s, "ldr", "'St") \
+ V(LDR_d, "ldr", "'Dt") \
+ V(STR_q, "str", "'Qt") \
+ V(LDR_q, "ldr", "'Qt")
void DisassemblingDecoder::VisitLoadStorePreIndex(Instruction* instr) {
const char *mnemonic = "unimplemented";
@@ -861,17 +869,18 @@ void DisassemblingDecoder::VisitLoadLiteral(Instruction* instr) {
Format(instr, mnemonic, form);
}
-
#define LOAD_STORE_PAIR_LIST(V) \
- V(STP_w, "stp", "'Wt, 'Wt2", "4") \
- V(LDP_w, "ldp", "'Wt, 'Wt2", "4") \
- V(LDPSW_x, "ldpsw", "'Xt, 'Xt2", "4") \
- V(STP_x, "stp", "'Xt, 'Xt2", "8") \
- V(LDP_x, "ldp", "'Xt, 'Xt2", "8") \
- V(STP_s, "stp", "'St, 'St2", "4") \
- V(LDP_s, "ldp", "'St, 'St2", "4") \
- V(STP_d, "stp", "'Dt, 'Dt2", "8") \
- V(LDP_d, "ldp", "'Dt, 'Dt2", "8")
+ V(STP_w, "stp", "'Wt, 'Wt2", "2") \
+ V(LDP_w, "ldp", "'Wt, 'Wt2", "2") \
+ V(LDPSW_x, "ldpsw", "'Xt, 'Xt2", "2") \
+ V(STP_x, "stp", "'Xt, 'Xt2", "3") \
+ V(LDP_x, "ldp", "'Xt, 'Xt2", "3") \
+ V(STP_s, "stp", "'St, 'St2", "2") \
+ V(LDP_s, "ldp", "'St, 'St2", "2") \
+ V(STP_d, "stp", "'Dt, 'Dt2", "3") \
+ V(LDP_d, "ldp", "'Dt, 'Dt2", "3") \
+ V(LDP_q, "ldp", "'Qt, 'Qt2", "4") \
+ V(STP_q, "stp", "'Qt, 'Qt2", "4")
void DisassemblingDecoder::VisitLoadStorePairPostIndex(Instruction* instr) {
const char *mnemonic = "unimplemented";
@@ -1010,6 +1019,22 @@ void DisassemblingDecoder::VisitFPDataProcessing1Source(Instruction* instr) {
#undef FORMAT
case FCVT_ds: mnemonic = "fcvt"; form = "'Dd, 'Sn"; break;
case FCVT_sd: mnemonic = "fcvt"; form = "'Sd, 'Dn"; break;
+ case FCVT_hs:
+ mnemonic = "fcvt";
+ form = "'Hd, 'Sn";
+ break;
+ case FCVT_sh:
+ mnemonic = "fcvt";
+ form = "'Sd, 'Hn";
+ break;
+ case FCVT_dh:
+ mnemonic = "fcvt";
+ form = "'Dd, 'Hn";
+ break;
+ case FCVT_hd:
+ mnemonic = "fcvt";
+ form = "'Hd, 'Dn";
+ break;
default: form = "(FPDataProcessing1Source)";
}
Format(instr, mnemonic, form);
@@ -1083,6 +1108,14 @@ void DisassemblingDecoder::VisitFPIntegerConvert(Instruction* instr) {
case FMOV_xd: mnemonic = "fmov"; form = form_rf; break;
case FMOV_sw:
case FMOV_dx: mnemonic = "fmov"; form = form_fr; break;
+ case FMOV_d1_x:
+ mnemonic = "fmov";
+ form = "'Vd.D[1], 'Rn";
+ break;
+ case FMOV_x_d1:
+ mnemonic = "fmov";
+ form = "'Rd, 'Vn.D[1]";
+ break;
case FCVTAS_ws:
case FCVTAS_xs:
case FCVTAS_wd:
@@ -1115,6 +1148,20 @@ void DisassemblingDecoder::VisitFPIntegerConvert(Instruction* instr) {
case FCVTZS_wd:
case FCVTZS_xs:
case FCVTZS_ws: mnemonic = "fcvtzs"; form = form_rf; break;
+ case FCVTPU_xd:
+ case FCVTPU_ws:
+ case FCVTPU_wd:
+ case FCVTPU_xs:
+ mnemonic = "fcvtpu";
+ form = form_rf;
+ break;
+ case FCVTPS_xd:
+ case FCVTPS_wd:
+ case FCVTPS_xs:
+ case FCVTPS_ws:
+ mnemonic = "fcvtps";
+ form = form_rf;
+ break;
case SCVTF_sw:
case SCVTF_sx:
case SCVTF_dw:
@@ -1234,21 +1281,2066 @@ void DisassemblingDecoder::VisitException(Instruction* instr) {
Format(instr, mnemonic, form);
}
+void DisassemblingDecoder::VisitNEON3Same(Instruction* instr) {
+ const char* mnemonic = "unimplemented";
+ const char* form = "'Vd.%s, 'Vn.%s, 'Vm.%s";
+ NEONFormatDecoder nfd(instr);
+
+ if (instr->Mask(NEON3SameLogicalFMask) == NEON3SameLogicalFixed) {
+ switch (instr->Mask(NEON3SameLogicalMask)) {
+ case NEON_AND:
+ mnemonic = "and";
+ break;
+ case NEON_ORR:
+ mnemonic = "orr";
+ if (instr->Rm() == instr->Rn()) {
+ mnemonic = "mov";
+ form = "'Vd.%s, 'Vn.%s";
+ }
+ break;
+ case NEON_ORN:
+ mnemonic = "orn";
+ break;
+ case NEON_EOR:
+ mnemonic = "eor";
+ break;
+ case NEON_BIC:
+ mnemonic = "bic";
+ break;
+ case NEON_BIF:
+ mnemonic = "bif";
+ break;
+ case NEON_BIT:
+ mnemonic = "bit";
+ break;
+ case NEON_BSL:
+ mnemonic = "bsl";
+ break;
+ default:
+ form = "(NEON3Same)";
+ }
+ nfd.SetFormatMaps(nfd.LogicalFormatMap());
+ } else {
+ static const char* mnemonics[] = {
+ "shadd", "uhadd", "shadd", "uhadd",
+ "sqadd", "uqadd", "sqadd", "uqadd",
+ "srhadd", "urhadd", "srhadd", "urhadd",
+ NULL, NULL, NULL,
+ NULL, // Handled by logical cases above.
+ "shsub", "uhsub", "shsub", "uhsub",
+ "sqsub", "uqsub", "sqsub", "uqsub",
+ "cmgt", "cmhi", "cmgt", "cmhi",
+ "cmge", "cmhs", "cmge", "cmhs",
+ "sshl", "ushl", "sshl", "ushl",
+ "sqshl", "uqshl", "sqshl", "uqshl",
+ "srshl", "urshl", "srshl", "urshl",
+ "sqrshl", "uqrshl", "sqrshl", "uqrshl",
+ "smax", "umax", "smax", "umax",
+ "smin", "umin", "smin", "umin",
+ "sabd", "uabd", "sabd", "uabd",
+ "saba", "uaba", "saba", "uaba",
+ "add", "sub", "add", "sub",
+ "cmtst", "cmeq", "cmtst", "cmeq",
+ "mla", "mls", "mla", "mls",
+ "mul", "pmul", "mul", "pmul",
+ "smaxp", "umaxp", "smaxp", "umaxp",
+ "sminp", "uminp", "sminp", "uminp",
+ "sqdmulh", "sqrdmulh", "sqdmulh", "sqrdmulh",
+ "addp", "unallocated", "addp", "unallocated",
+ "fmaxnm", "fmaxnmp", "fminnm", "fminnmp",
+ "fmla", "unallocated", "fmls", "unallocated",
+ "fadd", "faddp", "fsub", "fabd",
+ "fmulx", "fmul", "unallocated", "unallocated",
+ "fcmeq", "fcmge", "unallocated", "fcmgt",
+ "unallocated", "facge", "unallocated", "facgt",
+ "fmax", "fmaxp", "fmin", "fminp",
+ "frecps", "fdiv", "frsqrts", "unallocated"};
+
+ // Operation is determined by the opcode bits (15-11), the top bit of
+ // size (23) and the U bit (29).
+ unsigned index =
+ (instr->Bits(15, 11) << 2) | (instr->Bit(23) << 1) | instr->Bit(29);
+ DCHECK_LT(index, arraysize(mnemonics));
+ mnemonic = mnemonics[index];
+ // Assert that index is not one of the previously handled logical
+ // instructions.
+ DCHECK_NOT_NULL(mnemonic);
+
+ if (instr->Mask(NEON3SameFPFMask) == NEON3SameFPFixed) {
+ nfd.SetFormatMaps(nfd.FPFormatMap());
+ }
+ }
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+void DisassemblingDecoder::VisitNEON2RegMisc(Instruction* instr) {
+ const char* mnemonic = "unimplemented";
+ const char* form = "'Vd.%s, 'Vn.%s";
+ const char* form_cmp_zero = "'Vd.%s, 'Vn.%s, #0";
+ const char* form_fcmp_zero = "'Vd.%s, 'Vn.%s, #0.0";
+ NEONFormatDecoder nfd(instr);
+
+ static const NEONFormatMap map_lp_ta = {
+ {23, 22, 30}, {NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}};
+
+ static const NEONFormatMap map_cvt_ta = {{22}, {NF_4S, NF_2D}};
+
+ static const NEONFormatMap map_cvt_tb = {{22, 30},
+ {NF_4H, NF_8H, NF_2S, NF_4S}};
+
+ if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_opcode) {
+ // These instructions all use a two bit size field, except NOT and RBIT,
+ // which use the field to encode the operation.
+ switch (instr->Mask(NEON2RegMiscMask)) {
+ case NEON_REV64:
+ mnemonic = "rev64";
+ break;
+ case NEON_REV32:
+ mnemonic = "rev32";
+ break;
+ case NEON_REV16:
+ mnemonic = "rev16";
+ break;
+ case NEON_SADDLP:
+ mnemonic = "saddlp";
+ nfd.SetFormatMap(0, &map_lp_ta);
+ break;
+ case NEON_UADDLP:
+ mnemonic = "uaddlp";
+ nfd.SetFormatMap(0, &map_lp_ta);
+ break;
+ case NEON_SUQADD:
+ mnemonic = "suqadd";
+ break;
+ case NEON_USQADD:
+ mnemonic = "usqadd";
+ break;
+ case NEON_CLS:
+ mnemonic = "cls";
+ break;
+ case NEON_CLZ:
+ mnemonic = "clz";
+ break;
+ case NEON_CNT:
+ mnemonic = "cnt";
+ break;
+ case NEON_SADALP:
+ mnemonic = "sadalp";
+ nfd.SetFormatMap(0, &map_lp_ta);
+ break;
+ case NEON_UADALP:
+ mnemonic = "uadalp";
+ nfd.SetFormatMap(0, &map_lp_ta);
+ break;
+ case NEON_SQABS:
+ mnemonic = "sqabs";
+ break;
+ case NEON_SQNEG:
+ mnemonic = "sqneg";
+ break;
+ case NEON_CMGT_zero:
+ mnemonic = "cmgt";
+ form = form_cmp_zero;
+ break;
+ case NEON_CMGE_zero:
+ mnemonic = "cmge";
+ form = form_cmp_zero;
+ break;
+ case NEON_CMEQ_zero:
+ mnemonic = "cmeq";
+ form = form_cmp_zero;
+ break;
+ case NEON_CMLE_zero:
+ mnemonic = "cmle";
+ form = form_cmp_zero;
+ break;
+ case NEON_CMLT_zero:
+ mnemonic = "cmlt";
+ form = form_cmp_zero;
+ break;
+ case NEON_ABS:
+ mnemonic = "abs";
+ break;
+ case NEON_NEG:
+ mnemonic = "neg";
+ break;
+ case NEON_RBIT_NOT:
+ switch (instr->FPType()) {
+ case 0:
+ mnemonic = "mvn";
+ break;
+ case 1:
+ mnemonic = "rbit";
+ break;
+ default:
+ form = "(NEON2RegMisc)";
+ }
+ nfd.SetFormatMaps(nfd.LogicalFormatMap());
+ break;
+ }
+ } else {
+ // These instructions all use a one bit size field, except XTN, SQXTUN,
+ // SHLL, SQXTN and UQXTN, which use a two bit size field.
+ nfd.SetFormatMaps(nfd.FPFormatMap());
+ switch (instr->Mask(NEON2RegMiscFPMask)) {
+ case NEON_FABS:
+ mnemonic = "fabs";
+ break;
+ case NEON_FNEG:
+ mnemonic = "fneg";
+ break;
+ case NEON_FCVTN:
+ mnemonic = instr->Mask(NEON_Q) ? "fcvtn2" : "fcvtn";
+ nfd.SetFormatMap(0, &map_cvt_tb);
+ nfd.SetFormatMap(1, &map_cvt_ta);
+ break;
+ case NEON_FCVTXN:
+ mnemonic = instr->Mask(NEON_Q) ? "fcvtxn2" : "fcvtxn";
+ nfd.SetFormatMap(0, &map_cvt_tb);
+ nfd.SetFormatMap(1, &map_cvt_ta);
+ break;
+ case NEON_FCVTL:
+ mnemonic = instr->Mask(NEON_Q) ? "fcvtl2" : "fcvtl";
+ nfd.SetFormatMap(0, &map_cvt_ta);
+ nfd.SetFormatMap(1, &map_cvt_tb);
+ break;
+ case NEON_FRINTN:
+ mnemonic = "frintn";
+ break;
+ case NEON_FRINTA:
+ mnemonic = "frinta";
+ break;
+ case NEON_FRINTP:
+ mnemonic = "frintp";
+ break;
+ case NEON_FRINTM:
+ mnemonic = "frintm";
+ break;
+ case NEON_FRINTX:
+ mnemonic = "frintx";
+ break;
+ case NEON_FRINTZ:
+ mnemonic = "frintz";
+ break;
+ case NEON_FRINTI:
+ mnemonic = "frinti";
+ break;
+ case NEON_FCVTNS:
+ mnemonic = "fcvtns";
+ break;
+ case NEON_FCVTNU:
+ mnemonic = "fcvtnu";
+ break;
+ case NEON_FCVTPS:
+ mnemonic = "fcvtps";
+ break;
+ case NEON_FCVTPU:
+ mnemonic = "fcvtpu";
+ break;
+ case NEON_FCVTMS:
+ mnemonic = "fcvtms";
+ break;
+ case NEON_FCVTMU:
+ mnemonic = "fcvtmu";
+ break;
+ case NEON_FCVTZS:
+ mnemonic = "fcvtzs";
+ break;
+ case NEON_FCVTZU:
+ mnemonic = "fcvtzu";
+ break;
+ case NEON_FCVTAS:
+ mnemonic = "fcvtas";
+ break;
+ case NEON_FCVTAU:
+ mnemonic = "fcvtau";
+ break;
+ case NEON_FSQRT:
+ mnemonic = "fsqrt";
+ break;
+ case NEON_SCVTF:
+ mnemonic = "scvtf";
+ break;
+ case NEON_UCVTF:
+ mnemonic = "ucvtf";
+ break;
+ case NEON_URSQRTE:
+ mnemonic = "ursqrte";
+ break;
+ case NEON_URECPE:
+ mnemonic = "urecpe";
+ break;
+ case NEON_FRSQRTE:
+ mnemonic = "frsqrte";
+ break;
+ case NEON_FRECPE:
+ mnemonic = "frecpe";
+ break;
+ case NEON_FCMGT_zero:
+ mnemonic = "fcmgt";
+ form = form_fcmp_zero;
+ break;
+ case NEON_FCMGE_zero:
+ mnemonic = "fcmge";
+ form = form_fcmp_zero;
+ break;
+ case NEON_FCMEQ_zero:
+ mnemonic = "fcmeq";
+ form = form_fcmp_zero;
+ break;
+ case NEON_FCMLE_zero:
+ mnemonic = "fcmle";
+ form = form_fcmp_zero;
+ break;
+ case NEON_FCMLT_zero:
+ mnemonic = "fcmlt";
+ form = form_fcmp_zero;
+ break;
+ default:
+ if ((NEON_XTN_opcode <= instr->Mask(NEON2RegMiscOpcode)) &&
+ (instr->Mask(NEON2RegMiscOpcode) <= NEON_UQXTN_opcode)) {
+ nfd.SetFormatMap(0, nfd.IntegerFormatMap());
+ nfd.SetFormatMap(1, nfd.LongIntegerFormatMap());
+
+ switch (instr->Mask(NEON2RegMiscMask)) {
+ case NEON_XTN:
+ mnemonic = "xtn";
+ break;
+ case NEON_SQXTN:
+ mnemonic = "sqxtn";
+ break;
+ case NEON_UQXTN:
+ mnemonic = "uqxtn";
+ break;
+ case NEON_SQXTUN:
+ mnemonic = "sqxtun";
+ break;
+ case NEON_SHLL:
+ mnemonic = "shll";
+ nfd.SetFormatMap(0, nfd.LongIntegerFormatMap());
+ nfd.SetFormatMap(1, nfd.IntegerFormatMap());
+ switch (instr->NEONSize()) {
+ case 0:
+ form = "'Vd.%s, 'Vn.%s, #8";
+ break;
+ case 1:
+ form = "'Vd.%s, 'Vn.%s, #16";
+ break;
+ case 2:
+ form = "'Vd.%s, 'Vn.%s, #32";
+ break;
+ default:
+ Format(instr, "unallocated", "(NEON2RegMisc)");
+ return;
+ }
+ }
+ Format(instr, nfd.Mnemonic(mnemonic), nfd.Substitute(form));
+ return;
+ } else {
+ form = "(NEON2RegMisc)";
+ }
+ }
+ }
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+void DisassemblingDecoder::VisitNEON3Different(Instruction* instr) {
+ const char* mnemonic = "unimplemented";
+ const char* form = "'Vd.%s, 'Vn.%s, 'Vm.%s";
+
+ NEONFormatDecoder nfd(instr);
+ nfd.SetFormatMap(0, nfd.LongIntegerFormatMap());
+
+ // Ignore the Q bit. Appending a "2" suffix is handled later.
+ switch (instr->Mask(NEON3DifferentMask) & ~NEON_Q) {
+ case NEON_PMULL:
+ mnemonic = "pmull";
+ break;
+ case NEON_SABAL:
+ mnemonic = "sabal";
+ break;
+ case NEON_SABDL:
+ mnemonic = "sabdl";
+ break;
+ case NEON_SADDL:
+ mnemonic = "saddl";
+ break;
+ case NEON_SMLAL:
+ mnemonic = "smlal";
+ break;
+ case NEON_SMLSL:
+ mnemonic = "smlsl";
+ break;
+ case NEON_SMULL:
+ mnemonic = "smull";
+ break;
+ case NEON_SSUBL:
+ mnemonic = "ssubl";
+ break;
+ case NEON_SQDMLAL:
+ mnemonic = "sqdmlal";
+ break;
+ case NEON_SQDMLSL:
+ mnemonic = "sqdmlsl";
+ break;
+ case NEON_SQDMULL:
+ mnemonic = "sqdmull";
+ break;
+ case NEON_UABAL:
+ mnemonic = "uabal";
+ break;
+ case NEON_UABDL:
+ mnemonic = "uabdl";
+ break;
+ case NEON_UADDL:
+ mnemonic = "uaddl";
+ break;
+ case NEON_UMLAL:
+ mnemonic = "umlal";
+ break;
+ case NEON_UMLSL:
+ mnemonic = "umlsl";
+ break;
+ case NEON_UMULL:
+ mnemonic = "umull";
+ break;
+ case NEON_USUBL:
+ mnemonic = "usubl";
+ break;
+ case NEON_SADDW:
+ mnemonic = "saddw";
+ nfd.SetFormatMap(1, nfd.LongIntegerFormatMap());
+ break;
+ case NEON_SSUBW:
+ mnemonic = "ssubw";
+ nfd.SetFormatMap(1, nfd.LongIntegerFormatMap());
+ break;
+ case NEON_UADDW:
+ mnemonic = "uaddw";
+ nfd.SetFormatMap(1, nfd.LongIntegerFormatMap());
+ break;
+ case NEON_USUBW:
+ mnemonic = "usubw";
+ nfd.SetFormatMap(1, nfd.LongIntegerFormatMap());
+ break;
+ case NEON_ADDHN:
+ mnemonic = "addhn";
+ nfd.SetFormatMaps(nfd.LongIntegerFormatMap());
+ nfd.SetFormatMap(0, nfd.IntegerFormatMap());
+ break;
+ case NEON_RADDHN:
+ mnemonic = "raddhn";
+ nfd.SetFormatMaps(nfd.LongIntegerFormatMap());
+ nfd.SetFormatMap(0, nfd.IntegerFormatMap());
+ break;
+ case NEON_RSUBHN:
+ mnemonic = "rsubhn";
+ nfd.SetFormatMaps(nfd.LongIntegerFormatMap());
+ nfd.SetFormatMap(0, nfd.IntegerFormatMap());
+ break;
+ case NEON_SUBHN:
+ mnemonic = "subhn";
+ nfd.SetFormatMaps(nfd.LongIntegerFormatMap());
+ nfd.SetFormatMap(0, nfd.IntegerFormatMap());
+ break;
+ default:
+ form = "(NEON3Different)";
+ }
+ Format(instr, nfd.Mnemonic(mnemonic), nfd.Substitute(form));
+}
+
+void DisassemblingDecoder::VisitNEONAcrossLanes(Instruction* instr) {
+ const char* mnemonic = "unimplemented";
+ const char* form = "%sd, 'Vn.%s";
+
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap(),
+ NEONFormatDecoder::IntegerFormatMap());
+
+ if (instr->Mask(NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) {
+ nfd.SetFormatMap(0, nfd.FPScalarFormatMap());
+ nfd.SetFormatMap(1, nfd.FPFormatMap());
+ switch (instr->Mask(NEONAcrossLanesFPMask)) {
+ case NEON_FMAXV:
+ mnemonic = "fmaxv";
+ break;
+ case NEON_FMINV:
+ mnemonic = "fminv";
+ break;
+ case NEON_FMAXNMV:
+ mnemonic = "fmaxnmv";
+ break;
+ case NEON_FMINNMV:
+ mnemonic = "fminnmv";
+ break;
+ default:
+ form = "(NEONAcrossLanes)";
+ break;
+ }
+ } else if (instr->Mask(NEONAcrossLanesFMask) == NEONAcrossLanesFixed) {
+ switch (instr->Mask(NEONAcrossLanesMask)) {
+ case NEON_ADDV:
+ mnemonic = "addv";
+ break;
+ case NEON_SMAXV:
+ mnemonic = "smaxv";
+ break;
+ case NEON_SMINV:
+ mnemonic = "sminv";
+ break;
+ case NEON_UMAXV:
+ mnemonic = "umaxv";
+ break;
+ case NEON_UMINV:
+ mnemonic = "uminv";
+ break;
+ case NEON_SADDLV:
+ mnemonic = "saddlv";
+ nfd.SetFormatMap(0, nfd.LongScalarFormatMap());
+ break;
+ case NEON_UADDLV:
+ mnemonic = "uaddlv";
+ nfd.SetFormatMap(0, nfd.LongScalarFormatMap());
+ break;
+ default:
+ form = "(NEONAcrossLanes)";
+ break;
+ }
+ }
+ Format(instr, mnemonic,
+ nfd.Substitute(form, NEONFormatDecoder::kPlaceholder,
+ NEONFormatDecoder::kFormat));
+}
+
+void DisassemblingDecoder::VisitNEONByIndexedElement(Instruction* instr) {
+ const char* mnemonic = "unimplemented";
+ bool l_instr = false;
+ bool fp_instr = false;
+
+ const char* form = "'Vd.%s, 'Vn.%s, 'Ve.%s['IVByElemIndex]";
+
+ static const NEONFormatMap map_ta = {{23, 22}, {NF_UNDEF, NF_4S, NF_2D}};
+ NEONFormatDecoder nfd(instr, &map_ta, NEONFormatDecoder::IntegerFormatMap(),
+ NEONFormatDecoder::ScalarFormatMap());
+
+ switch (instr->Mask(NEONByIndexedElementMask)) {
+ case NEON_SMULL_byelement:
+ mnemonic = "smull";
+ l_instr = true;
+ break;
+ case NEON_UMULL_byelement:
+ mnemonic = "umull";
+ l_instr = true;
+ break;
+ case NEON_SMLAL_byelement:
+ mnemonic = "smlal";
+ l_instr = true;
+ break;
+ case NEON_UMLAL_byelement:
+ mnemonic = "umlal";
+ l_instr = true;
+ break;
+ case NEON_SMLSL_byelement:
+ mnemonic = "smlsl";
+ l_instr = true;
+ break;
+ case NEON_UMLSL_byelement:
+ mnemonic = "umlsl";
+ l_instr = true;
+ break;
+ case NEON_SQDMULL_byelement:
+ mnemonic = "sqdmull";
+ l_instr = true;
+ break;
+ case NEON_SQDMLAL_byelement:
+ mnemonic = "sqdmlal";
+ l_instr = true;
+ break;
+ case NEON_SQDMLSL_byelement:
+ mnemonic = "sqdmlsl";
+ l_instr = true;
+ break;
+ case NEON_MUL_byelement:
+ mnemonic = "mul";
+ break;
+ case NEON_MLA_byelement:
+ mnemonic = "mla";
+ break;
+ case NEON_MLS_byelement:
+ mnemonic = "mls";
+ break;
+ case NEON_SQDMULH_byelement:
+ mnemonic = "sqdmulh";
+ break;
+ case NEON_SQRDMULH_byelement:
+ mnemonic = "sqrdmulh";
+ break;
+ default:
+ switch (instr->Mask(NEONByIndexedElementFPMask)) {
+ case NEON_FMUL_byelement:
+ mnemonic = "fmul";
+ fp_instr = true;
+ break;
+ case NEON_FMLA_byelement:
+ mnemonic = "fmla";
+ fp_instr = true;
+ break;
+ case NEON_FMLS_byelement:
+ mnemonic = "fmls";
+ fp_instr = true;
+ break;
+ case NEON_FMULX_byelement:
+ mnemonic = "fmulx";
+ fp_instr = true;
+ break;
+ }
+ }
+
+ if (l_instr) {
+ Format(instr, nfd.Mnemonic(mnemonic), nfd.Substitute(form));
+ } else if (fp_instr) {
+ nfd.SetFormatMap(0, nfd.FPFormatMap());
+ Format(instr, mnemonic, nfd.Substitute(form));
+ } else {
+ nfd.SetFormatMap(0, nfd.IntegerFormatMap());
+ Format(instr, mnemonic, nfd.Substitute(form));
+ }
+}
+
+void DisassemblingDecoder::VisitNEONCopy(Instruction* instr) {
+ const char* mnemonic = "unimplemented";
+ const char* form = "(NEONCopy)";
+
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularFormatMap(),
+ NEONFormatDecoder::TriangularScalarFormatMap());
+
+ if (instr->Mask(NEONCopyInsElementMask) == NEON_INS_ELEMENT) {
+ mnemonic = "mov";
+ nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap());
+ form = "'Vd.%s['IVInsIndex1], 'Vn.%s['IVInsIndex2]";
+ } else if (instr->Mask(NEONCopyInsGeneralMask) == NEON_INS_GENERAL) {
+ mnemonic = "mov";
+ nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap());
+ if (nfd.GetVectorFormat() == kFormatD) {
+ form = "'Vd.%s['IVInsIndex1], 'Xn";
+ } else {
+ form = "'Vd.%s['IVInsIndex1], 'Wn";
+ }
+ } else if (instr->Mask(NEONCopyUmovMask) == NEON_UMOV) {
+ if (instr->Mask(NEON_Q) || ((instr->ImmNEON5() & 7) == 4)) {
+ mnemonic = "mov";
+ } else {
+ mnemonic = "umov";
+ }
+ nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap());
+ if (nfd.GetVectorFormat() == kFormatD) {
+ form = "'Xd, 'Vn.%s['IVInsIndex1]";
+ } else {
+ form = "'Wd, 'Vn.%s['IVInsIndex1]";
+ }
+ } else if (instr->Mask(NEONCopySmovMask) == NEON_SMOV) {
+ mnemonic = "smov";
+ nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap());
+ form = "'Rdq, 'Vn.%s['IVInsIndex1]";
+ } else if (instr->Mask(NEONCopyDupElementMask) == NEON_DUP_ELEMENT) {
+ mnemonic = "dup";
+ form = "'Vd.%s, 'Vn.%s['IVInsIndex1]";
+ } else if (instr->Mask(NEONCopyDupGeneralMask) == NEON_DUP_GENERAL) {
+ mnemonic = "dup";
+ if (nfd.GetVectorFormat() == kFormat2D) {
+ form = "'Vd.%s, 'Xn";
+ } else {
+ form = "'Vd.%s, 'Wn";
+ }
+ }
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+void DisassemblingDecoder::VisitNEONExtract(Instruction* instr) {
+ const char* mnemonic = "unimplemented";
+ const char* form = "(NEONExtract)";
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap());
+ if (instr->Mask(NEONExtractMask) == NEON_EXT) {
+ mnemonic = "ext";
+ form = "'Vd.%s, 'Vn.%s, 'Vm.%s, 'IVExtract";
+ }
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+void DisassemblingDecoder::VisitNEONLoadStoreMultiStruct(Instruction* instr) {
+ const char* mnemonic = NULL;
+ const char* form = NULL;
+ const char* form_1v = "{'Vt.%1$s}, ['Xns]";
+ const char* form_2v = "{'Vt.%1$s, 'Vt2.%1$s}, ['Xns]";
+ const char* form_3v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s}, ['Xns]";
+ const char* form_4v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns]";
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
+
+ switch (instr->Mask(NEONLoadStoreMultiStructMask)) {
+ case NEON_LD1_1v:
+ mnemonic = "ld1";
+ form = form_1v;
+ break;
+ case NEON_LD1_2v:
+ mnemonic = "ld1";
+ form = form_2v;
+ break;
+ case NEON_LD1_3v:
+ mnemonic = "ld1";
+ form = form_3v;
+ break;
+ case NEON_LD1_4v:
+ mnemonic = "ld1";
+ form = form_4v;
+ break;
+ case NEON_LD2:
+ mnemonic = "ld2";
+ form = form_2v;
+ break;
+ case NEON_LD3:
+ mnemonic = "ld3";
+ form = form_3v;
+ break;
+ case NEON_LD4:
+ mnemonic = "ld4";
+ form = form_4v;
+ break;
+ case NEON_ST1_1v:
+ mnemonic = "st1";
+ form = form_1v;
+ break;
+ case NEON_ST1_2v:
+ mnemonic = "st1";
+ form = form_2v;
+ break;
+ case NEON_ST1_3v:
+ mnemonic = "st1";
+ form = form_3v;
+ break;
+ case NEON_ST1_4v:
+ mnemonic = "st1";
+ form = form_4v;
+ break;
+ case NEON_ST2:
+ mnemonic = "st2";
+ form = form_2v;
+ break;
+ case NEON_ST3:
+ mnemonic = "st3";
+ form = form_3v;
+ break;
+ case NEON_ST4:
+ mnemonic = "st4";
+ form = form_4v;
+ break;
+ default:
+ break;
+ }
+
+ // Work out unallocated encodings.
+ bool allocated = (mnemonic != NULL);
+ switch (instr->Mask(NEONLoadStoreMultiStructMask)) {
+ case NEON_LD2:
+ case NEON_LD3:
+ case NEON_LD4:
+ case NEON_ST2:
+ case NEON_ST3:
+ case NEON_ST4:
+ // LD[2-4] and ST[2-4] cannot use .1d format.
+ allocated = (instr->NEONQ() != 0) || (instr->NEONLSSize() != 3);
+ break;
+ default:
+ break;
+ }
+ if (allocated) {
+ DCHECK_NOT_NULL(mnemonic);
+ DCHECK_NOT_NULL(form);
+ } else {
+ mnemonic = "unallocated";
+ form = "(NEONLoadStoreMultiStruct)";
+ }
+
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+void DisassemblingDecoder::VisitNEONLoadStoreMultiStructPostIndex(
+ Instruction* instr) {
+ const char* mnemonic = NULL;
+ const char* form = NULL;
+ const char* form_1v = "{'Vt.%1$s}, ['Xns], 'Xmr1";
+ const char* form_2v = "{'Vt.%1$s, 'Vt2.%1$s}, ['Xns], 'Xmr2";
+ const char* form_3v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s}, ['Xns], 'Xmr3";
+ const char* form_4v =
+ "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns], 'Xmr4";
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
+
+ switch (instr->Mask(NEONLoadStoreMultiStructPostIndexMask)) {
+ case NEON_LD1_1v_post:
+ mnemonic = "ld1";
+ form = form_1v;
+ break;
+ case NEON_LD1_2v_post:
+ mnemonic = "ld1";
+ form = form_2v;
+ break;
+ case NEON_LD1_3v_post:
+ mnemonic = "ld1";
+ form = form_3v;
+ break;
+ case NEON_LD1_4v_post:
+ mnemonic = "ld1";
+ form = form_4v;
+ break;
+ case NEON_LD2_post:
+ mnemonic = "ld2";
+ form = form_2v;
+ break;
+ case NEON_LD3_post:
+ mnemonic = "ld3";
+ form = form_3v;
+ break;
+ case NEON_LD4_post:
+ mnemonic = "ld4";
+ form = form_4v;
+ break;
+ case NEON_ST1_1v_post:
+ mnemonic = "st1";
+ form = form_1v;
+ break;
+ case NEON_ST1_2v_post:
+ mnemonic = "st1";
+ form = form_2v;
+ break;
+ case NEON_ST1_3v_post:
+ mnemonic = "st1";
+ form = form_3v;
+ break;
+ case NEON_ST1_4v_post:
+ mnemonic = "st1";
+ form = form_4v;
+ break;
+ case NEON_ST2_post:
+ mnemonic = "st2";
+ form = form_2v;
+ break;
+ case NEON_ST3_post:
+ mnemonic = "st3";
+ form = form_3v;
+ break;
+ case NEON_ST4_post:
+ mnemonic = "st4";
+ form = form_4v;
+ break;
+ default:
+ break;
+ }
+
+ // Work out unallocated encodings.
+ bool allocated = (mnemonic != NULL);
+ switch (instr->Mask(NEONLoadStoreMultiStructPostIndexMask)) {
+ case NEON_LD2_post:
+ case NEON_LD3_post:
+ case NEON_LD4_post:
+ case NEON_ST2_post:
+ case NEON_ST3_post:
+ case NEON_ST4_post:
+ // LD[2-4] and ST[2-4] cannot use .1d format.
+ allocated = (instr->NEONQ() != 0) || (instr->NEONLSSize() != 3);
+ break;
+ default:
+ break;
+ }
+ if (allocated) {
+ DCHECK_NOT_NULL(mnemonic);
+ DCHECK_NOT_NULL(form);
+ } else {
+ mnemonic = "unallocated";
+ form = "(NEONLoadStoreMultiStructPostIndex)";
+ }
+
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+void DisassemblingDecoder::VisitNEONLoadStoreSingleStruct(Instruction* instr) {
+ const char* mnemonic = NULL;
+ const char* form = NULL;
+
+ const char* form_1b = "{'Vt.b}['IVLSLane0], ['Xns]";
+ const char* form_1h = "{'Vt.h}['IVLSLane1], ['Xns]";
+ const char* form_1s = "{'Vt.s}['IVLSLane2], ['Xns]";
+ const char* form_1d = "{'Vt.d}['IVLSLane3], ['Xns]";
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
+
+ switch (instr->Mask(NEONLoadStoreSingleStructMask)) {
+ case NEON_LD1_b:
+ mnemonic = "ld1";
+ form = form_1b;
+ break;
+ case NEON_LD1_h:
+ mnemonic = "ld1";
+ form = form_1h;
+ break;
+ case NEON_LD1_s:
+ mnemonic = "ld1";
+ static_assert((NEON_LD1_s | (1 << NEONLSSize_offset)) == NEON_LD1_d,
+ "LSB of size distinguishes S and D registers.");
+ form = ((instr->NEONLSSize() & 1) == 0) ? form_1s : form_1d;
+ break;
+ case NEON_ST1_b:
+ mnemonic = "st1";
+ form = form_1b;
+ break;
+ case NEON_ST1_h:
+ mnemonic = "st1";
+ form = form_1h;
+ break;
+ case NEON_ST1_s:
+ mnemonic = "st1";
+ static_assert((NEON_ST1_s | (1 << NEONLSSize_offset)) == NEON_ST1_d,
+ "LSB of size distinguishes S and D registers.");
+ form = ((instr->NEONLSSize() & 1) == 0) ? form_1s : form_1d;
+ break;
+ case NEON_LD1R:
+ mnemonic = "ld1r";
+ form = "{'Vt.%s}, ['Xns]";
+ break;
+ case NEON_LD2_b:
+ case NEON_ST2_b:
+ mnemonic = (instr->NEONLoad() == 1) ? "ld2" : "st2";
+ form = "{'Vt.b, 'Vt2.b}['IVLSLane0], ['Xns]";
+ break;
+ case NEON_LD2_h:
+ case NEON_ST2_h:
+ mnemonic = (instr->NEONLoad() == 1) ? "ld2" : "st2";
+ form = "{'Vt.h, 'Vt2.h}['IVLSLane1], ['Xns]";
+ break;
+ case NEON_LD2_s:
+ case NEON_ST2_s:
+ static_assert((NEON_ST2_s | (1 << NEONLSSize_offset)) == NEON_ST2_d,
+ "LSB of size distinguishes S and D registers.");
+ static_assert((NEON_LD2_s | (1 << NEONLSSize_offset)) == NEON_LD2_d,
+ "LSB of size distinguishes S and D registers.");
+ mnemonic = (instr->NEONLoad() == 1) ? "ld2" : "st2";
+ if ((instr->NEONLSSize() & 1) == 0) {
+ form = "{'Vt.s, 'Vt2.s}['IVLSLane2], ['Xns]";
+ } else {
+ form = "{'Vt.d, 'Vt2.d}['IVLSLane3], ['Xns]";
+ }
+ break;
+ case NEON_LD2R:
+ mnemonic = "ld2r";
+ form = "{'Vt.%s, 'Vt2.%s}, ['Xns]";
+ break;
+ case NEON_LD3_b:
+ case NEON_ST3_b:
+ mnemonic = (instr->NEONLoad() == 1) ? "ld3" : "st3";
+ form = "{'Vt.b, 'Vt2.b, 'Vt3.b}['IVLSLane0], ['Xns]";
+ break;
+ case NEON_LD3_h:
+ case NEON_ST3_h:
+ mnemonic = (instr->NEONLoad() == 1) ? "ld3" : "st3";
+ form = "{'Vt.h, 'Vt2.h, 'Vt3.h}['IVLSLane1], ['Xns]";
+ break;
+ case NEON_LD3_s:
+ case NEON_ST3_s:
+ mnemonic = (instr->NEONLoad() == 1) ? "ld3" : "st3";
+ if ((instr->NEONLSSize() & 1) == 0) {
+ form = "{'Vt.s, 'Vt2.s, 'Vt3.s}['IVLSLane2], ['Xns]";
+ } else {
+ form = "{'Vt.d, 'Vt2.d, 'Vt3.d}['IVLSLane3], ['Xns]";
+ }
+ break;
+ case NEON_LD3R:
+ mnemonic = "ld3r";
+ form = "{'Vt.%s, 'Vt2.%s, 'Vt3.%s}, ['Xns]";
+ break;
+ case NEON_LD4_b:
+ case NEON_ST4_b:
+ mnemonic = (instr->NEONLoad() == 1) ? "ld4" : "st4";
+ form = "{'Vt.b, 'Vt2.b, 'Vt3.b, 'Vt4.b}['IVLSLane0], ['Xns]";
+ break;
+ case NEON_LD4_h:
+ case NEON_ST4_h:
+ mnemonic = (instr->NEONLoad() == 1) ? "ld4" : "st4";
+ form = "{'Vt.h, 'Vt2.h, 'Vt3.h, 'Vt4.h}['IVLSLane1], ['Xns]";
+ break;
+ case NEON_LD4_s:
+ case NEON_ST4_s:
+ static_assert((NEON_LD4_s | (1 << NEONLSSize_offset)) == NEON_LD4_d,
+ "LSB of size distinguishes S and D registers.");
+ static_assert((NEON_ST4_s | (1 << NEONLSSize_offset)) == NEON_ST4_d,
+ "LSB of size distinguishes S and D registers.");
+ mnemonic = (instr->NEONLoad() == 1) ? "ld4" : "st4";
+ if ((instr->NEONLSSize() & 1) == 0) {
+ form = "{'Vt.s, 'Vt2.s, 'Vt3.s, 'Vt4.s}['IVLSLane2], ['Xns]";
+ } else {
+ form = "{'Vt.d, 'Vt2.d, 'Vt3.d, 'Vt4.d}['IVLSLane3], ['Xns]";
+ }
+ break;
+ case NEON_LD4R:
+ mnemonic = "ld4r";
+ form = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns]";
+ break;
+ default:
+ break;
+ }
+
+ // Work out unallocated encodings.
+ bool allocated = (mnemonic != NULL);
+ switch (instr->Mask(NEONLoadStoreSingleStructMask)) {
+ case NEON_LD1_h:
+ case NEON_LD2_h:
+ case NEON_LD3_h:
+ case NEON_LD4_h:
+ case NEON_ST1_h:
+ case NEON_ST2_h:
+ case NEON_ST3_h:
+ case NEON_ST4_h:
+ DCHECK(allocated);
+ allocated = ((instr->NEONLSSize() & 1) == 0);
+ break;
+ case NEON_LD1_s:
+ case NEON_LD2_s:
+ case NEON_LD3_s:
+ case NEON_LD4_s:
+ case NEON_ST1_s:
+ case NEON_ST2_s:
+ case NEON_ST3_s:
+ case NEON_ST4_s:
+ DCHECK(allocated);
+ allocated = (instr->NEONLSSize() <= 1) &&
+ ((instr->NEONLSSize() == 0) || (instr->NEONS() == 0));
+ break;
+ case NEON_LD1R:
+ case NEON_LD2R:
+ case NEON_LD3R:
+ case NEON_LD4R:
+ DCHECK(allocated);
+ allocated = (instr->NEONS() == 0);
+ break;
+ default:
+ break;
+ }
+ if (allocated) {
+ DCHECK_NOT_NULL(mnemonic);
+ DCHECK_NOT_NULL(form);
+ } else {
+ mnemonic = "unallocated";
+ form = "(NEONLoadStoreSingleStruct)";
+ }
+
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+void DisassemblingDecoder::VisitNEONLoadStoreSingleStructPostIndex(
+ Instruction* instr) {
+ const char* mnemonic = NULL;
+ const char* form = NULL;
+
+ const char* form_1b = "{'Vt.b}['IVLSLane0], ['Xns], 'Xmb1";
+ const char* form_1h = "{'Vt.h}['IVLSLane1], ['Xns], 'Xmb2";
+ const char* form_1s = "{'Vt.s}['IVLSLane2], ['Xns], 'Xmb4";
+ const char* form_1d = "{'Vt.d}['IVLSLane3], ['Xns], 'Xmb8";
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
+
+ switch (instr->Mask(NEONLoadStoreSingleStructPostIndexMask)) {
+ case NEON_LD1_b_post:
+ mnemonic = "ld1";
+ form = form_1b;
+ break;
+ case NEON_LD1_h_post:
+ mnemonic = "ld1";
+ form = form_1h;
+ break;
+ case NEON_LD1_s_post:
+ mnemonic = "ld1";
+ static_assert((NEON_LD1_s | (1 << NEONLSSize_offset)) == NEON_LD1_d,
+ "LSB of size distinguishes S and D registers.");
+ form = ((instr->NEONLSSize() & 1) == 0) ? form_1s : form_1d;
+ break;
+ case NEON_ST1_b_post:
+ mnemonic = "st1";
+ form = form_1b;
+ break;
+ case NEON_ST1_h_post:
+ mnemonic = "st1";
+ form = form_1h;
+ break;
+ case NEON_ST1_s_post:
+ mnemonic = "st1";
+ static_assert((NEON_ST1_s | (1 << NEONLSSize_offset)) == NEON_ST1_d,
+ "LSB of size distinguishes S and D registers.");
+ form = ((instr->NEONLSSize() & 1) == 0) ? form_1s : form_1d;
+ break;
+ case NEON_LD1R_post:
+ mnemonic = "ld1r";
+ form = "{'Vt.%s}, ['Xns], 'Xmz1";
+ break;
+ case NEON_LD2_b_post:
+ case NEON_ST2_b_post:
+ mnemonic = (instr->NEONLoad() == 1) ? "ld2" : "st2";
+ form = "{'Vt.b, 'Vt2.b}['IVLSLane0], ['Xns], 'Xmb2";
+ break;
+ case NEON_ST2_h_post:
+ case NEON_LD2_h_post:
+ mnemonic = (instr->NEONLoad() == 1) ? "ld2" : "st2";
+ form = "{'Vt.h, 'Vt2.h}['IVLSLane1], ['Xns], 'Xmb4";
+ break;
+ case NEON_LD2_s_post:
+ case NEON_ST2_s_post:
+ mnemonic = (instr->NEONLoad() == 1) ? "ld2" : "st2";
+ if ((instr->NEONLSSize() & 1) == 0)
+ form = "{'Vt.s, 'Vt2.s}['IVLSLane2], ['Xns], 'Xmb8";
+ else
+ form = "{'Vt.d, 'Vt2.d}['IVLSLane3], ['Xns], 'Xmb16";
+ break;
+ case NEON_LD2R_post:
+ mnemonic = "ld2r";
+ form = "{'Vt.%s, 'Vt2.%s}, ['Xns], 'Xmz2";
+ break;
+ case NEON_LD3_b_post:
+ case NEON_ST3_b_post:
+ mnemonic = (instr->NEONLoad() == 1) ? "ld3" : "st3";
+ form = "{'Vt.b, 'Vt2.b, 'Vt3.b}['IVLSLane0], ['Xns], 'Xmb3";
+ break;
+ case NEON_LD3_h_post:
+ case NEON_ST3_h_post:
+ mnemonic = (instr->NEONLoad() == 1) ? "ld3" : "st3";
+ form = "{'Vt.h, 'Vt2.h, 'Vt3.h}['IVLSLane1], ['Xns], 'Xmb6";
+ break;
+ case NEON_LD3_s_post:
+ case NEON_ST3_s_post:
+ mnemonic = (instr->NEONLoad() == 1) ? "ld3" : "st3";
+ if ((instr->NEONLSSize() & 1) == 0)
+ form = "{'Vt.s, 'Vt2.s, 'Vt3.s}['IVLSLane2], ['Xns], 'Xmb12";
+ else
+ form = "{'Vt.d, 'Vt2.d, 'Vt3.d}['IVLSLane3], ['Xns], 'Xmb24";
+ break;
+ case NEON_LD3R_post:
+ mnemonic = "ld3r";
+ form = "{'Vt.%s, 'Vt2.%s, 'Vt3.%s}, ['Xns], 'Xmz3";
+ break;
+ case NEON_LD4_b_post:
+ case NEON_ST4_b_post:
+ mnemonic = (instr->NEONLoad() == 1) ? "ld4" : "st4";
+ form = "{'Vt.b, 'Vt2.b, 'Vt3.b, 'Vt4.b}['IVLSLane0], ['Xns], 'Xmb4";
+ break;
+ case NEON_LD4_h_post:
+ case NEON_ST4_h_post:
+ mnemonic = (instr->NEONLoad()) == 1 ? "ld4" : "st4";
+ form = "{'Vt.h, 'Vt2.h, 'Vt3.h, 'Vt4.h}['IVLSLane1], ['Xns], 'Xmb8";
+ break;
+ case NEON_LD4_s_post:
+ case NEON_ST4_s_post:
+ mnemonic = (instr->NEONLoad() == 1) ? "ld4" : "st4";
+ if ((instr->NEONLSSize() & 1) == 0)
+ form = "{'Vt.s, 'Vt2.s, 'Vt3.s, 'Vt4.s}['IVLSLane2], ['Xns], 'Xmb16";
+ else
+ form = "{'Vt.d, 'Vt2.d, 'Vt3.d, 'Vt4.d}['IVLSLane3], ['Xns], 'Xmb32";
+ break;
+ case NEON_LD4R_post:
+ mnemonic = "ld4r";
+ form = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns], 'Xmz4";
+ break;
+ default:
+ break;
+ }
+
+ // Work out unallocated encodings.
+ bool allocated = (mnemonic != NULL);
+ switch (instr->Mask(NEONLoadStoreSingleStructPostIndexMask)) {
+ case NEON_LD1_h_post:
+ case NEON_LD2_h_post:
+ case NEON_LD3_h_post:
+ case NEON_LD4_h_post:
+ case NEON_ST1_h_post:
+ case NEON_ST2_h_post:
+ case NEON_ST3_h_post:
+ case NEON_ST4_h_post:
+ DCHECK(allocated);
+ allocated = ((instr->NEONLSSize() & 1) == 0);
+ break;
+ case NEON_LD1_s_post:
+ case NEON_LD2_s_post:
+ case NEON_LD3_s_post:
+ case NEON_LD4_s_post:
+ case NEON_ST1_s_post:
+ case NEON_ST2_s_post:
+ case NEON_ST3_s_post:
+ case NEON_ST4_s_post:
+ DCHECK(allocated);
+ allocated = (instr->NEONLSSize() <= 1) &&
+ ((instr->NEONLSSize() == 0) || (instr->NEONS() == 0));
+ break;
+ case NEON_LD1R_post:
+ case NEON_LD2R_post:
+ case NEON_LD3R_post:
+ case NEON_LD4R_post:
+ DCHECK(allocated);
+ allocated = (instr->NEONS() == 0);
+ break;
+ default:
+ break;
+ }
+ if (allocated) {
+ DCHECK_NOT_NULL(mnemonic);
+ DCHECK_NOT_NULL(form);
+ } else {
+ mnemonic = "unallocated";
+ form = "(NEONLoadStoreSingleStructPostIndex)";
+ }
+
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+void DisassemblingDecoder::VisitNEONModifiedImmediate(Instruction* instr) {
+ const char* mnemonic = "unimplemented";
+ const char* form = "'Vt.%s, 'IVMIImm8, lsl 'IVMIShiftAmt1";
+
+ int cmode = instr->NEONCmode();
+ int cmode_3 = (cmode >> 3) & 1;
+ int cmode_2 = (cmode >> 2) & 1;
+ int cmode_1 = (cmode >> 1) & 1;
+ int cmode_0 = cmode & 1;
+ int q = instr->NEONQ();
+ int op = instr->NEONModImmOp();
+
+ static const NEONFormatMap map_b = {{30}, {NF_8B, NF_16B}};
+ static const NEONFormatMap map_h = {{30}, {NF_4H, NF_8H}};
+ static const NEONFormatMap map_s = {{30}, {NF_2S, NF_4S}};
+ NEONFormatDecoder nfd(instr, &map_b);
+
+ if (cmode_3 == 0) {
+ if (cmode_0 == 0) {
+ mnemonic = (op == 1) ? "mvni" : "movi";
+ } else { // cmode<0> == '1'.
+ mnemonic = (op == 1) ? "bic" : "orr";
+ }
+ nfd.SetFormatMap(0, &map_s);
+ } else { // cmode<3> == '1'.
+ if (cmode_2 == 0) {
+ if (cmode_0 == 0) {
+ mnemonic = (op == 1) ? "mvni" : "movi";
+ } else { // cmode<0> == '1'.
+ mnemonic = (op == 1) ? "bic" : "orr";
+ }
+ nfd.SetFormatMap(0, &map_h);
+ } else { // cmode<2> == '1'.
+ if (cmode_1 == 0) {
+ mnemonic = (op == 1) ? "mvni" : "movi";
+ form = "'Vt.%s, 'IVMIImm8, msl 'IVMIShiftAmt2";
+ nfd.SetFormatMap(0, &map_s);
+ } else { // cmode<1> == '1'.
+ if (cmode_0 == 0) {
+ mnemonic = "movi";
+ if (op == 0) {
+ form = "'Vt.%s, 'IVMIImm8";
+ } else {
+ form = (q == 0) ? "'Dd, 'IVMIImm" : "'Vt.2d, 'IVMIImm";
+ }
+ } else { // cmode<0> == '1'
+ mnemonic = "fmov";
+ if (op == 0) {
+ form = "'Vt.%s, 'IVMIImmFPSingle";
+ nfd.SetFormatMap(0, &map_s);
+ } else {
+ if (q == 1) {
+ form = "'Vt.2d, 'IVMIImmFPDouble";
+ } else {
+ mnemonic = "unallocated";
+ form = "(NEONModifiedImmediate)";
+ }
+ }
+ }
+ }
+ }
+ }
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+void DisassemblingDecoder::VisitNEONPerm(Instruction* instr) {
+ const char* mnemonic = "unimplemented";
+ const char* form = "'Vd.%s, 'Vn.%s, 'Vm.%s";
+ NEONFormatDecoder nfd(instr);
+
+ switch (instr->Mask(NEONPermMask)) {
+ case NEON_TRN1:
+ mnemonic = "trn1";
+ break;
+ case NEON_TRN2:
+ mnemonic = "trn2";
+ break;
+ case NEON_UZP1:
+ mnemonic = "uzp1";
+ break;
+ case NEON_UZP2:
+ mnemonic = "uzp2";
+ break;
+ case NEON_ZIP1:
+ mnemonic = "zip1";
+ break;
+ case NEON_ZIP2:
+ mnemonic = "zip2";
+ break;
+ default:
+ form = "(NEONPerm)";
+ }
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+void DisassemblingDecoder::VisitNEONScalar2RegMisc(Instruction* instr) {
+ const char* mnemonic = "unimplemented";
+ const char* form = "%sd, %sn";
+ const char* form_0 = "%sd, %sn, #0";
+ const char* form_fp0 = "%sd, %sn, #0.0";
+
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap());
+
+ if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_scalar_opcode) {
+ // These instructions all use a two bit size field, except NOT and RBIT,
+ // which use the field to encode the operation.
+ switch (instr->Mask(NEONScalar2RegMiscMask)) {
+ case NEON_CMGT_zero_scalar:
+ mnemonic = "cmgt";
+ form = form_0;
+ break;
+ case NEON_CMGE_zero_scalar:
+ mnemonic = "cmge";
+ form = form_0;
+ break;
+ case NEON_CMLE_zero_scalar:
+ mnemonic = "cmle";
+ form = form_0;
+ break;
+ case NEON_CMLT_zero_scalar:
+ mnemonic = "cmlt";
+ form = form_0;
+ break;
+ case NEON_CMEQ_zero_scalar:
+ mnemonic = "cmeq";
+ form = form_0;
+ break;
+ case NEON_NEG_scalar:
+ mnemonic = "neg";
+ break;
+ case NEON_SQNEG_scalar:
+ mnemonic = "sqneg";
+ break;
+ case NEON_ABS_scalar:
+ mnemonic = "abs";
+ break;
+ case NEON_SQABS_scalar:
+ mnemonic = "sqabs";
+ break;
+ case NEON_SUQADD_scalar:
+ mnemonic = "suqadd";
+ break;
+ case NEON_USQADD_scalar:
+ mnemonic = "usqadd";
+ break;
+ default:
+ form = "(NEONScalar2RegMisc)";
+ }
+ } else {
+ // These instructions all use a one bit size field, except SQXTUN, SQXTN
+ // and UQXTN, which use a two bit size field.
+ nfd.SetFormatMaps(nfd.FPScalarFormatMap());
+ switch (instr->Mask(NEONScalar2RegMiscFPMask)) {
+ case NEON_FRSQRTE_scalar:
+ mnemonic = "frsqrte";
+ break;
+ case NEON_FRECPE_scalar:
+ mnemonic = "frecpe";
+ break;
+ case NEON_SCVTF_scalar:
+ mnemonic = "scvtf";
+ break;
+ case NEON_UCVTF_scalar:
+ mnemonic = "ucvtf";
+ break;
+ case NEON_FCMGT_zero_scalar:
+ mnemonic = "fcmgt";
+ form = form_fp0;
+ break;
+ case NEON_FCMGE_zero_scalar:
+ mnemonic = "fcmge";
+ form = form_fp0;
+ break;
+ case NEON_FCMLE_zero_scalar:
+ mnemonic = "fcmle";
+ form = form_fp0;
+ break;
+ case NEON_FCMLT_zero_scalar:
+ mnemonic = "fcmlt";
+ form = form_fp0;
+ break;
+ case NEON_FCMEQ_zero_scalar:
+ mnemonic = "fcmeq";
+ form = form_fp0;
+ break;
+ case NEON_FRECPX_scalar:
+ mnemonic = "frecpx";
+ break;
+ case NEON_FCVTNS_scalar:
+ mnemonic = "fcvtns";
+ break;
+ case NEON_FCVTNU_scalar:
+ mnemonic = "fcvtnu";
+ break;
+ case NEON_FCVTPS_scalar:
+ mnemonic = "fcvtps";
+ break;
+ case NEON_FCVTPU_scalar:
+ mnemonic = "fcvtpu";
+ break;
+ case NEON_FCVTMS_scalar:
+ mnemonic = "fcvtms";
+ break;
+ case NEON_FCVTMU_scalar:
+ mnemonic = "fcvtmu";
+ break;
+ case NEON_FCVTZS_scalar:
+ mnemonic = "fcvtzs";
+ break;
+ case NEON_FCVTZU_scalar:
+ mnemonic = "fcvtzu";
+ break;
+ case NEON_FCVTAS_scalar:
+ mnemonic = "fcvtas";
+ break;
+ case NEON_FCVTAU_scalar:
+ mnemonic = "fcvtau";
+ break;
+ case NEON_FCVTXN_scalar:
+ nfd.SetFormatMap(0, nfd.LongScalarFormatMap());
+ mnemonic = "fcvtxn";
+ break;
+ default:
+ nfd.SetFormatMap(0, nfd.ScalarFormatMap());
+ nfd.SetFormatMap(1, nfd.LongScalarFormatMap());
+ switch (instr->Mask(NEONScalar2RegMiscMask)) {
+ case NEON_SQXTN_scalar:
+ mnemonic = "sqxtn";
+ break;
+ case NEON_UQXTN_scalar:
+ mnemonic = "uqxtn";
+ break;
+ case NEON_SQXTUN_scalar:
+ mnemonic = "sqxtun";
+ break;
+ default:
+ form = "(NEONScalar2RegMisc)";
+ }
+ }
+ }
+ Format(instr, mnemonic, nfd.SubstitutePlaceholders(form));
+}
+
+void DisassemblingDecoder::VisitNEONScalar3Diff(Instruction* instr) {
+ const char* mnemonic = "unimplemented";
+ const char* form = "%sd, %sn, %sm";
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap(),
+ NEONFormatDecoder::ScalarFormatMap());
+
+ switch (instr->Mask(NEONScalar3DiffMask)) {
+ case NEON_SQDMLAL_scalar:
+ mnemonic = "sqdmlal";
+ break;
+ case NEON_SQDMLSL_scalar:
+ mnemonic = "sqdmlsl";
+ break;
+ case NEON_SQDMULL_scalar:
+ mnemonic = "sqdmull";
+ break;
+ default:
+ form = "(NEONScalar3Diff)";
+ }
+ Format(instr, mnemonic, nfd.SubstitutePlaceholders(form));
+}
+
+void DisassemblingDecoder::VisitNEONScalar3Same(Instruction* instr) {
+ const char* mnemonic = "unimplemented";
+ const char* form = "%sd, %sn, %sm";
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap());
+
+ if (instr->Mask(NEONScalar3SameFPFMask) == NEONScalar3SameFPFixed) {
+ nfd.SetFormatMaps(nfd.FPScalarFormatMap());
+ switch (instr->Mask(NEONScalar3SameFPMask)) {
+ case NEON_FACGE_scalar:
+ mnemonic = "facge";
+ break;
+ case NEON_FACGT_scalar:
+ mnemonic = "facgt";
+ break;
+ case NEON_FCMEQ_scalar:
+ mnemonic = "fcmeq";
+ break;
+ case NEON_FCMGE_scalar:
+ mnemonic = "fcmge";
+ break;
+ case NEON_FCMGT_scalar:
+ mnemonic = "fcmgt";
+ break;
+ case NEON_FMULX_scalar:
+ mnemonic = "fmulx";
+ break;
+ case NEON_FRECPS_scalar:
+ mnemonic = "frecps";
+ break;
+ case NEON_FRSQRTS_scalar:
+ mnemonic = "frsqrts";
+ break;
+ case NEON_FABD_scalar:
+ mnemonic = "fabd";
+ break;
+ default:
+ form = "(NEONScalar3Same)";
+ }
+ } else {
+ switch (instr->Mask(NEONScalar3SameMask)) {
+ case NEON_ADD_scalar:
+ mnemonic = "add";
+ break;
+ case NEON_SUB_scalar:
+ mnemonic = "sub";
+ break;
+ case NEON_CMEQ_scalar:
+ mnemonic = "cmeq";
+ break;
+ case NEON_CMGE_scalar:
+ mnemonic = "cmge";
+ break;
+ case NEON_CMGT_scalar:
+ mnemonic = "cmgt";
+ break;
+ case NEON_CMHI_scalar:
+ mnemonic = "cmhi";
+ break;
+ case NEON_CMHS_scalar:
+ mnemonic = "cmhs";
+ break;
+ case NEON_CMTST_scalar:
+ mnemonic = "cmtst";
+ break;
+ case NEON_UQADD_scalar:
+ mnemonic = "uqadd";
+ break;
+ case NEON_SQADD_scalar:
+ mnemonic = "sqadd";
+ break;
+ case NEON_UQSUB_scalar:
+ mnemonic = "uqsub";
+ break;
+ case NEON_SQSUB_scalar:
+ mnemonic = "sqsub";
+ break;
+ case NEON_USHL_scalar:
+ mnemonic = "ushl";
+ break;
+ case NEON_SSHL_scalar:
+ mnemonic = "sshl";
+ break;
+ case NEON_UQSHL_scalar:
+ mnemonic = "uqshl";
+ break;
+ case NEON_SQSHL_scalar:
+ mnemonic = "sqshl";
+ break;
+ case NEON_URSHL_scalar:
+ mnemonic = "urshl";
+ break;
+ case NEON_SRSHL_scalar:
+ mnemonic = "srshl";
+ break;
+ case NEON_UQRSHL_scalar:
+ mnemonic = "uqrshl";
+ break;
+ case NEON_SQRSHL_scalar:
+ mnemonic = "sqrshl";
+ break;
+ case NEON_SQDMULH_scalar:
+ mnemonic = "sqdmulh";
+ break;
+ case NEON_SQRDMULH_scalar:
+ mnemonic = "sqrdmulh";
+ break;
+ default:
+ form = "(NEONScalar3Same)";
+ }
+ }
+ Format(instr, mnemonic, nfd.SubstitutePlaceholders(form));
+}
+
+void DisassemblingDecoder::VisitNEONScalarByIndexedElement(Instruction* instr) {
+ const char* mnemonic = "unimplemented";
+ const char* form = "%sd, %sn, 'Ve.%s['IVByElemIndex]";
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap());
+ bool long_instr = false;
+
+ switch (instr->Mask(NEONScalarByIndexedElementMask)) {
+ case NEON_SQDMULL_byelement_scalar:
+ mnemonic = "sqdmull";
+ long_instr = true;
+ break;
+ case NEON_SQDMLAL_byelement_scalar:
+ mnemonic = "sqdmlal";
+ long_instr = true;
+ break;
+ case NEON_SQDMLSL_byelement_scalar:
+ mnemonic = "sqdmlsl";
+ long_instr = true;
+ break;
+ case NEON_SQDMULH_byelement_scalar:
+ mnemonic = "sqdmulh";
+ break;
+ case NEON_SQRDMULH_byelement_scalar:
+ mnemonic = "sqrdmulh";
+ break;
+ default:
+ nfd.SetFormatMap(0, nfd.FPScalarFormatMap());
+ switch (instr->Mask(NEONScalarByIndexedElementFPMask)) {
+ case NEON_FMUL_byelement_scalar:
+ mnemonic = "fmul";
+ break;
+ case NEON_FMLA_byelement_scalar:
+ mnemonic = "fmla";
+ break;
+ case NEON_FMLS_byelement_scalar:
+ mnemonic = "fmls";
+ break;
+ case NEON_FMULX_byelement_scalar:
+ mnemonic = "fmulx";
+ break;
+ default:
+ form = "(NEONScalarByIndexedElement)";
+ }
+ }
+
+ if (long_instr) {
+ nfd.SetFormatMap(0, nfd.LongScalarFormatMap());
+ }
+
+ Format(instr, mnemonic,
+ nfd.Substitute(form, nfd.kPlaceholder, nfd.kPlaceholder, nfd.kFormat));
+}
+
+void DisassemblingDecoder::VisitNEONScalarCopy(Instruction* instr) {
+ const char* mnemonic = "unimplemented";
+ const char* form = "(NEONScalarCopy)";
+
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularScalarFormatMap());
+
+ if (instr->Mask(NEONScalarCopyMask) == NEON_DUP_ELEMENT_scalar) {
+ mnemonic = "mov";
+ form = "%sd, 'Vn.%s['IVInsIndex1]";
+ }
+
+ Format(instr, mnemonic, nfd.Substitute(form, nfd.kPlaceholder, nfd.kFormat));
+}
+
+void DisassemblingDecoder::VisitNEONScalarPairwise(Instruction* instr) {
+ const char* mnemonic = "unimplemented";
+ const char* form = "%sd, 'Vn.%s";
+ NEONFormatMap map = {{22}, {NF_2S, NF_2D}};
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::FPScalarFormatMap(), &map);
+
+ switch (instr->Mask(NEONScalarPairwiseMask)) {
+ case NEON_ADDP_scalar:
+ mnemonic = "addp";
+ break;
+ case NEON_FADDP_scalar:
+ mnemonic = "faddp";
+ break;
+ case NEON_FMAXP_scalar:
+ mnemonic = "fmaxp";
+ break;
+ case NEON_FMAXNMP_scalar:
+ mnemonic = "fmaxnmp";
+ break;
+ case NEON_FMINP_scalar:
+ mnemonic = "fminp";
+ break;
+ case NEON_FMINNMP_scalar:
+ mnemonic = "fminnmp";
+ break;
+ default:
+ form = "(NEONScalarPairwise)";
+ }
+ Format(instr, mnemonic,
+ nfd.Substitute(form, NEONFormatDecoder::kPlaceholder,
+ NEONFormatDecoder::kFormat));
+}
+
+void DisassemblingDecoder::VisitNEONScalarShiftImmediate(Instruction* instr) {
+ const char* mnemonic = "unimplemented";
+ const char* form = "%sd, %sn, 'Is1";
+ const char* form_2 = "%sd, %sn, 'Is2";
+
+ static const NEONFormatMap map_shift = {
+ {22, 21, 20, 19},
+ {NF_UNDEF, NF_B, NF_H, NF_H, NF_S, NF_S, NF_S, NF_S, NF_D, NF_D, NF_D,
+ NF_D, NF_D, NF_D, NF_D, NF_D}};
+ static const NEONFormatMap map_shift_narrow = {
+ {21, 20, 19}, {NF_UNDEF, NF_H, NF_S, NF_S, NF_D, NF_D, NF_D, NF_D}};
+ NEONFormatDecoder nfd(instr, &map_shift);
+
+ if (instr->ImmNEONImmh()) { // immh has to be non-zero.
+ switch (instr->Mask(NEONScalarShiftImmediateMask)) {
+ case NEON_FCVTZU_imm_scalar:
+ mnemonic = "fcvtzu";
+ break;
+ case NEON_FCVTZS_imm_scalar:
+ mnemonic = "fcvtzs";
+ break;
+ case NEON_SCVTF_imm_scalar:
+ mnemonic = "scvtf";
+ break;
+ case NEON_UCVTF_imm_scalar:
+ mnemonic = "ucvtf";
+ break;
+ case NEON_SRI_scalar:
+ mnemonic = "sri";
+ break;
+ case NEON_SSHR_scalar:
+ mnemonic = "sshr";
+ break;
+ case NEON_USHR_scalar:
+ mnemonic = "ushr";
+ break;
+ case NEON_SRSHR_scalar:
+ mnemonic = "srshr";
+ break;
+ case NEON_URSHR_scalar:
+ mnemonic = "urshr";
+ break;
+ case NEON_SSRA_scalar:
+ mnemonic = "ssra";
+ break;
+ case NEON_USRA_scalar:
+ mnemonic = "usra";
+ break;
+ case NEON_SRSRA_scalar:
+ mnemonic = "srsra";
+ break;
+ case NEON_URSRA_scalar:
+ mnemonic = "ursra";
+ break;
+ case NEON_SHL_scalar:
+ mnemonic = "shl";
+ form = form_2;
+ break;
+ case NEON_SLI_scalar:
+ mnemonic = "sli";
+ form = form_2;
+ break;
+ case NEON_SQSHLU_scalar:
+ mnemonic = "sqshlu";
+ form = form_2;
+ break;
+ case NEON_SQSHL_imm_scalar:
+ mnemonic = "sqshl";
+ form = form_2;
+ break;
+ case NEON_UQSHL_imm_scalar:
+ mnemonic = "uqshl";
+ form = form_2;
+ break;
+ case NEON_UQSHRN_scalar:
+ mnemonic = "uqshrn";
+ nfd.SetFormatMap(1, &map_shift_narrow);
+ break;
+ case NEON_UQRSHRN_scalar:
+ mnemonic = "uqrshrn";
+ nfd.SetFormatMap(1, &map_shift_narrow);
+ break;
+ case NEON_SQSHRN_scalar:
+ mnemonic = "sqshrn";
+ nfd.SetFormatMap(1, &map_shift_narrow);
+ break;
+ case NEON_SQRSHRN_scalar:
+ mnemonic = "sqrshrn";
+ nfd.SetFormatMap(1, &map_shift_narrow);
+ break;
+ case NEON_SQSHRUN_scalar:
+ mnemonic = "sqshrun";
+ nfd.SetFormatMap(1, &map_shift_narrow);
+ break;
+ case NEON_SQRSHRUN_scalar:
+ mnemonic = "sqrshrun";
+ nfd.SetFormatMap(1, &map_shift_narrow);
+ break;
+ default:
+ form = "(NEONScalarShiftImmediate)";
+ }
+ } else {
+ form = "(NEONScalarShiftImmediate)";
+ }
+ Format(instr, mnemonic, nfd.SubstitutePlaceholders(form));
+}
+
+void DisassemblingDecoder::VisitNEONShiftImmediate(Instruction* instr) {
+ const char* mnemonic = "unimplemented";
+ const char* form = "'Vd.%s, 'Vn.%s, 'Is1";
+ const char* form_shift_2 = "'Vd.%s, 'Vn.%s, 'Is2";
+ const char* form_xtl = "'Vd.%s, 'Vn.%s";
+
+ // 0001->8H, 001x->4S, 01xx->2D, all others undefined.
+ static const NEONFormatMap map_shift_ta = {
+ {22, 21, 20, 19},
+ {NF_UNDEF, NF_8H, NF_4S, NF_4S, NF_2D, NF_2D, NF_2D, NF_2D}};
+
+ // 00010->8B, 00011->16B, 001x0->4H, 001x1->8H,
+ // 01xx0->2S, 01xx1->4S, 1xxx1->2D, all others undefined.
+ static const NEONFormatMap map_shift_tb = {
+ {22, 21, 20, 19, 30},
+ {NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_4H, NF_8H,
+ NF_2S, NF_4S, NF_2S, NF_4S, NF_2S, NF_4S, NF_2S, NF_4S,
+ NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D,
+ NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D}};
+
+ NEONFormatDecoder nfd(instr, &map_shift_tb);
+
+ if (instr->ImmNEONImmh()) { // immh has to be non-zero.
+ switch (instr->Mask(NEONShiftImmediateMask)) {
+ case NEON_SQSHLU:
+ mnemonic = "sqshlu";
+ form = form_shift_2;
+ break;
+ case NEON_SQSHL_imm:
+ mnemonic = "sqshl";
+ form = form_shift_2;
+ break;
+ case NEON_UQSHL_imm:
+ mnemonic = "uqshl";
+ form = form_shift_2;
+ break;
+ case NEON_SHL:
+ mnemonic = "shl";
+ form = form_shift_2;
+ break;
+ case NEON_SLI:
+ mnemonic = "sli";
+ form = form_shift_2;
+ break;
+ case NEON_SCVTF_imm:
+ mnemonic = "scvtf";
+ break;
+ case NEON_UCVTF_imm:
+ mnemonic = "ucvtf";
+ break;
+ case NEON_FCVTZU_imm:
+ mnemonic = "fcvtzu";
+ break;
+ case NEON_FCVTZS_imm:
+ mnemonic = "fcvtzs";
+ break;
+ case NEON_SRI:
+ mnemonic = "sri";
+ break;
+ case NEON_SSHR:
+ mnemonic = "sshr";
+ break;
+ case NEON_USHR:
+ mnemonic = "ushr";
+ break;
+ case NEON_SRSHR:
+ mnemonic = "srshr";
+ break;
+ case NEON_URSHR:
+ mnemonic = "urshr";
+ break;
+ case NEON_SSRA:
+ mnemonic = "ssra";
+ break;
+ case NEON_USRA:
+ mnemonic = "usra";
+ break;
+ case NEON_SRSRA:
+ mnemonic = "srsra";
+ break;
+ case NEON_URSRA:
+ mnemonic = "ursra";
+ break;
+ case NEON_SHRN:
+ mnemonic = instr->Mask(NEON_Q) ? "shrn2" : "shrn";
+ nfd.SetFormatMap(1, &map_shift_ta);
+ break;
+ case NEON_RSHRN:
+ mnemonic = instr->Mask(NEON_Q) ? "rshrn2" : "rshrn";
+ nfd.SetFormatMap(1, &map_shift_ta);
+ break;
+ case NEON_UQSHRN:
+ mnemonic = instr->Mask(NEON_Q) ? "uqshrn2" : "uqshrn";
+ nfd.SetFormatMap(1, &map_shift_ta);
+ break;
+ case NEON_UQRSHRN:
+ mnemonic = instr->Mask(NEON_Q) ? "uqrshrn2" : "uqrshrn";
+ nfd.SetFormatMap(1, &map_shift_ta);
+ break;
+ case NEON_SQSHRN:
+ mnemonic = instr->Mask(NEON_Q) ? "sqshrn2" : "sqshrn";
+ nfd.SetFormatMap(1, &map_shift_ta);
+ break;
+ case NEON_SQRSHRN:
+ mnemonic = instr->Mask(NEON_Q) ? "sqrshrn2" : "sqrshrn";
+ nfd.SetFormatMap(1, &map_shift_ta);
+ break;
+ case NEON_SQSHRUN:
+ mnemonic = instr->Mask(NEON_Q) ? "sqshrun2" : "sqshrun";
+ nfd.SetFormatMap(1, &map_shift_ta);
+ break;
+ case NEON_SQRSHRUN:
+ mnemonic = instr->Mask(NEON_Q) ? "sqrshrun2" : "sqrshrun";
+ nfd.SetFormatMap(1, &map_shift_ta);
+ break;
+ case NEON_SSHLL:
+ nfd.SetFormatMap(0, &map_shift_ta);
+ if (instr->ImmNEONImmb() == 0 &&
+ CountSetBits(instr->ImmNEONImmh(), 32) == 1) { // sxtl variant.
+ form = form_xtl;
+ mnemonic = instr->Mask(NEON_Q) ? "sxtl2" : "sxtl";
+ } else { // sshll variant.
+ form = form_shift_2;
+ mnemonic = instr->Mask(NEON_Q) ? "sshll2" : "sshll";
+ }
+ break;
+ case NEON_USHLL:
+ nfd.SetFormatMap(0, &map_shift_ta);
+ if (instr->ImmNEONImmb() == 0 &&
+ CountSetBits(instr->ImmNEONImmh(), 32) == 1) { // uxtl variant.
+ form = form_xtl;
+ mnemonic = instr->Mask(NEON_Q) ? "uxtl2" : "uxtl";
+ } else { // ushll variant.
+ form = form_shift_2;
+ mnemonic = instr->Mask(NEON_Q) ? "ushll2" : "ushll";
+ }
+ break;
+ default:
+ form = "(NEONShiftImmediate)";
+ }
+ } else {
+ form = "(NEONShiftImmediate)";
+ }
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+void DisassemblingDecoder::VisitNEONTable(Instruction* instr) {
+ const char* mnemonic = "unimplemented";
+ const char* form = "(NEONTable)";
+ const char form_1v[] = "'Vd.%%s, {'Vn.16b}, 'Vm.%%s";
+ const char form_2v[] = "'Vd.%%s, {'Vn.16b, v%d.16b}, 'Vm.%%s";
+ const char form_3v[] = "'Vd.%%s, {'Vn.16b, v%d.16b, v%d.16b}, 'Vm.%%s";
+ const char form_4v[] =
+ "'Vd.%%s, {'Vn.16b, v%d.16b, v%d.16b, v%d.16b}, 'Vm.%%s";
+ static const NEONFormatMap map_b = {{30}, {NF_8B, NF_16B}};
+ NEONFormatDecoder nfd(instr, &map_b);
+
+ switch (instr->Mask(NEONTableMask)) {
+ case NEON_TBL_1v:
+ mnemonic = "tbl";
+ form = form_1v;
+ break;
+ case NEON_TBL_2v:
+ mnemonic = "tbl";
+ form = form_2v;
+ break;
+ case NEON_TBL_3v:
+ mnemonic = "tbl";
+ form = form_3v;
+ break;
+ case NEON_TBL_4v:
+ mnemonic = "tbl";
+ form = form_4v;
+ break;
+ case NEON_TBX_1v:
+ mnemonic = "tbx";
+ form = form_1v;
+ break;
+ case NEON_TBX_2v:
+ mnemonic = "tbx";
+ form = form_2v;
+ break;
+ case NEON_TBX_3v:
+ mnemonic = "tbx";
+ form = form_3v;
+ break;
+ case NEON_TBX_4v:
+ mnemonic = "tbx";
+ form = form_4v;
+ break;
+ default:
+ break;
+ }
+
+ char re_form[sizeof(form_4v)];
+ int reg_num = instr->Rn();
+ snprintf(re_form, sizeof(re_form), form, (reg_num + 1) % kNumberOfVRegisters,
+ (reg_num + 2) % kNumberOfVRegisters,
+ (reg_num + 3) % kNumberOfVRegisters);
+
+ Format(instr, mnemonic, nfd.Substitute(re_form));
+}
void DisassemblingDecoder::VisitUnimplemented(Instruction* instr) {
Format(instr, "unimplemented", "(Unimplemented)");
}
-
void DisassemblingDecoder::VisitUnallocated(Instruction* instr) {
Format(instr, "unallocated", "(Unallocated)");
}
-
void DisassemblingDecoder::ProcessOutput(Instruction* /*instr*/) {
// The base disasm does nothing more than disassembling into a buffer.
}
+void DisassemblingDecoder::AppendRegisterNameToOutput(const CPURegister& reg) {
+ DCHECK(reg.IsValid());
+ char reg_char;
+
+ if (reg.IsRegister()) {
+ reg_char = reg.Is64Bits() ? 'x' : 'w';
+ } else {
+ DCHECK(reg.IsVRegister());
+ switch (reg.SizeInBits()) {
+ case kBRegSizeInBits:
+ reg_char = 'b';
+ break;
+ case kHRegSizeInBits:
+ reg_char = 'h';
+ break;
+ case kSRegSizeInBits:
+ reg_char = 's';
+ break;
+ case kDRegSizeInBits:
+ reg_char = 'd';
+ break;
+ default:
+ DCHECK(reg.Is128Bits());
+ reg_char = 'q';
+ }
+ }
+
+ if (reg.IsVRegister() || !(reg.Aliases(csp) || reg.Aliases(xzr))) {
+ // Filter special registers
+ if (reg.IsX() && (reg.code() == 27)) {
+ AppendToOutput("cp");
+ } else if (reg.IsX() && (reg.code() == 28)) {
+ AppendToOutput("jssp");
+ } else if (reg.IsX() && (reg.code() == 29)) {
+ AppendToOutput("fp");
+ } else if (reg.IsX() && (reg.code() == 30)) {
+ AppendToOutput("lr");
+ } else {
+ // A core or scalar/vector register: [wx]0 - 30, [bhsdq]0 - 31.
+ AppendToOutput("%c%d", reg_char, reg.code());
+ }
+ } else if (reg.Aliases(csp)) {
+ // Disassemble w31/x31 as stack pointer wcsp/csp.
+ AppendToOutput("%s", reg.Is64Bits() ? "csp" : "wcsp");
+ } else {
+ // Disassemble w31/x31 as zero register wzr/xzr.
+ AppendToOutput("%czr", reg_char);
+ }
+}
void DisassemblingDecoder::Format(Instruction* instr, const char* mnemonic,
const char* format) {
@@ -1265,7 +3357,6 @@ void DisassemblingDecoder::Format(Instruction* instr, const char* mnemonic,
ProcessOutput(instr);
}
-
void DisassemblingDecoder::Substitute(Instruction* instr, const char* string) {
char chr = *string++;
while (chr != '\0') {
@@ -1278,56 +3369,123 @@ void DisassemblingDecoder::Substitute(Instruction* instr, const char* string) {
}
}
-
int DisassemblingDecoder::SubstituteField(Instruction* instr,
const char* format) {
switch (format[0]) {
+ // NB. The remaining substitution prefix characters are: GJKUZ.
case 'R': // Register. X or W, selected by sf bit.
- case 'F': // FP Register. S or D, selected by type field.
+ case 'F': // FP register. S or D, selected by type field.
+ case 'V': // Vector register, V, vector format.
case 'W':
case 'X':
+ case 'B':
+ case 'H':
case 'S':
- case 'D': return SubstituteRegisterField(instr, format);
- case 'I': return SubstituteImmediateField(instr, format);
- case 'L': return SubstituteLiteralField(instr, format);
- case 'H': return SubstituteShiftField(instr, format);
- case 'P': return SubstitutePrefetchField(instr, format);
- case 'C': return SubstituteConditionField(instr, format);
- case 'E': return SubstituteExtendField(instr, format);
- case 'A': return SubstitutePCRelAddressField(instr, format);
- case 'B': return SubstituteBranchTargetField(instr, format);
- case 'O': return SubstituteLSRegOffsetField(instr, format);
- case 'M': return SubstituteBarrierField(instr, format);
- default: {
+ case 'D':
+ case 'Q':
+ return SubstituteRegisterField(instr, format);
+ case 'I':
+ return SubstituteImmediateField(instr, format);
+ case 'L':
+ return SubstituteLiteralField(instr, format);
+ case 'N':
+ return SubstituteShiftField(instr, format);
+ case 'P':
+ return SubstitutePrefetchField(instr, format);
+ case 'C':
+ return SubstituteConditionField(instr, format);
+ case 'E':
+ return SubstituteExtendField(instr, format);
+ case 'A':
+ return SubstitutePCRelAddressField(instr, format);
+ case 'T':
+ return SubstituteBranchTargetField(instr, format);
+ case 'O':
+ return SubstituteLSRegOffsetField(instr, format);
+ case 'M':
+ return SubstituteBarrierField(instr, format);
+ default:
UNREACHABLE();
- return 1;
- }
}
}
-
int DisassemblingDecoder::SubstituteRegisterField(Instruction* instr,
const char* format) {
+ char reg_prefix = format[0];
unsigned reg_num = 0;
unsigned field_len = 2;
+
switch (format[1]) {
- case 'd': reg_num = instr->Rd(); break;
- case 'n': reg_num = instr->Rn(); break;
- case 'm': reg_num = instr->Rm(); break;
- case 'a': reg_num = instr->Ra(); break;
- case 't': {
- if (format[2] == '2') {
- reg_num = instr->Rt2();
+ case 'd':
+ reg_num = instr->Rd();
+ if (format[2] == 'q') {
+ reg_prefix = instr->NEONQ() ? 'X' : 'W';
field_len = 3;
+ }
+ break;
+ case 'n':
+ reg_num = instr->Rn();
+ break;
+ case 'm':
+ reg_num = instr->Rm();
+ switch (format[2]) {
+ // Handle registers tagged with b (bytes), z (instruction), or
+ // r (registers), used for address updates in
+ // NEON load/store instructions.
+ case 'r':
+ case 'b':
+ case 'z': {
+ field_len = 3;
+ char* eimm;
+ int imm = static_cast<int>(strtol(&format[3], &eimm, 10));
+ field_len += eimm - &format[3];
+ if (reg_num == 31) {
+ switch (format[2]) {
+ case 'z':
+ imm *= (1 << instr->NEONLSSize());
+ break;
+ case 'r':
+ imm *= (instr->NEONQ() == 0) ? kDRegSize : kQRegSize;
+ break;
+ case 'b':
+ break;
+ }
+ AppendToOutput("#%d", imm);
+ return field_len;
+ }
+ break;
+ }
+ }
+ break;
+ case 'e':
+ // This is register Rm, but using a 4-bit specifier. Used in NEON
+ // by-element instructions.
+ reg_num = (instr->Rm() & 0xf);
+ break;
+ case 'a':
+ reg_num = instr->Ra();
+ break;
+ case 't':
+ reg_num = instr->Rt();
+ if (format[0] == 'V') {
+ if ((format[2] >= '2') && (format[2] <= '4')) {
+ // Handle consecutive vector register specifiers Vt2, Vt3 and Vt4.
+ reg_num = (reg_num + format[2] - '1') % 32;
+ field_len = 3;
+ }
} else {
- reg_num = instr->Rt();
+ if (format[2] == '2') {
+ // Handle register specifier Rt2.
+ reg_num = instr->Rt2();
+ field_len = 3;
+ }
}
break;
- }
case 's':
reg_num = instr->Rs();
break;
- default: UNREACHABLE();
+ default:
+ UNREACHABLE();
}
// Increase field length for registers tagged as stack.
@@ -1335,58 +3493,78 @@ int DisassemblingDecoder::SubstituteRegisterField(Instruction* instr,
field_len = 3;
}
- char reg_type;
- if (format[0] == 'R') {
- // Register type is R: use sf bit to choose X and W.
- reg_type = instr->SixtyFourBits() ? 'x' : 'w';
- } else if (format[0] == 'F') {
- // Floating-point register: use type field to choose S or D.
- reg_type = ((instr->FPType() & 1) == 0) ? 's' : 'd';
- } else {
- // Register type is specified. Make it lower case.
- reg_type = format[0] + 0x20;
+ CPURegister::RegisterType reg_type;
+ unsigned reg_size;
+
+ if (reg_prefix == 'R') {
+ reg_prefix = instr->SixtyFourBits() ? 'X' : 'W';
+ } else if (reg_prefix == 'F') {
+ reg_prefix = ((instr->FPType() & 1) == 0) ? 'S' : 'D';
}
- if ((reg_num != kZeroRegCode) || (reg_type == 's') || (reg_type == 'd')) {
- // A normal register: w0 - w30, x0 - x30, s0 - s31, d0 - d31.
+ switch (reg_prefix) {
+ case 'W':
+ reg_type = CPURegister::kRegister;
+ reg_size = kWRegSizeInBits;
+ break;
+ case 'X':
+ reg_type = CPURegister::kRegister;
+ reg_size = kXRegSizeInBits;
+ break;
+ case 'B':
+ reg_type = CPURegister::kVRegister;
+ reg_size = kBRegSizeInBits;
+ break;
+ case 'H':
+ reg_type = CPURegister::kVRegister;
+ reg_size = kHRegSizeInBits;
+ break;
+ case 'S':
+ reg_type = CPURegister::kVRegister;
+ reg_size = kSRegSizeInBits;
+ break;
+ case 'D':
+ reg_type = CPURegister::kVRegister;
+ reg_size = kDRegSizeInBits;
+ break;
+ case 'Q':
+ reg_type = CPURegister::kVRegister;
+ reg_size = kQRegSizeInBits;
+ break;
+ case 'V':
+ AppendToOutput("v%d", reg_num);
+ return field_len;
+ default:
+ UNREACHABLE();
+ reg_type = CPURegister::kRegister;
+ reg_size = kXRegSizeInBits;
+ }
- // Filter special registers
- if ((reg_type == 'x') && (reg_num == 27)) {
- AppendToOutput("cp");
- } else if ((reg_type == 'x') && (reg_num == 28)) {
- AppendToOutput("jssp");
- } else if ((reg_type == 'x') && (reg_num == 29)) {
- AppendToOutput("fp");
- } else if ((reg_type == 'x') && (reg_num == 30)) {
- AppendToOutput("lr");
- } else {
- AppendToOutput("%c%d", reg_type, reg_num);
- }
- } else if (format[2] == 's') {
- // Disassemble w31/x31 as stack pointer wcsp/csp.
- AppendToOutput("%s", (reg_type == 'w') ? "wcsp" : "csp");
- } else {
- // Disassemble w31/x31 as zero register wzr/xzr.
- AppendToOutput("%czr", reg_type);
+ if ((reg_type == CPURegister::kRegister) && (reg_num == kZeroRegCode) &&
+ (format[2] == 's')) {
+ reg_num = kSPRegInternalCode;
}
+ AppendRegisterNameToOutput(CPURegister::Create(reg_num, reg_size, reg_type));
+
return field_len;
}
-
int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
const char* format) {
DCHECK(format[0] == 'I');
switch (format[1]) {
case 'M': { // IMoveImm or IMoveLSL.
- if (format[5] == 'I') {
+ if (format[5] == 'I' || format[5] == 'N') {
uint64_t imm = static_cast<uint64_t>(instr->ImmMoveWide())
<< (16 * instr->ShiftMoveWide());
+ if (format[5] == 'N') imm = ~imm;
+ if (!instr->SixtyFourBits()) imm &= UINT64_C(0xffffffff);
AppendToOutput("#0x%" PRIx64, imm);
} else {
DCHECK(format[5] == 'L');
- AppendToOutput("#0x%" PRIx32, instr->ImmMoveWide());
+ AppendToOutput("#0x%" PRIx64, instr->ImmMoveWide());
if (instr->ShiftMoveWide() > 0) {
AppendToOutput(", lsl #%d", 16 * instr->ShiftMoveWide());
}
@@ -1409,15 +3587,15 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
case 'P': { // ILPx - Immediate Load/Store Pair, x = access size.
if (instr->ImmLSPair() != 0) {
// format[3] is the scale value. Convert to a number.
- int scale = format[3] - 0x30;
+ int scale = 1 << (format[3] - '0');
AppendToOutput(", #%" PRId32, instr->ImmLSPair() * scale);
}
return 4;
}
case 'U': { // ILU - Immediate Load/Store Unsigned.
if (instr->ImmLSUnsigned() != 0) {
- AppendToOutput(", #%" PRId32, instr->ImmLSUnsigned()
- << instr->SizeLS());
+ int shift = instr->SizeLS();
+ AppendToOutput(", #%" PRId32, instr->ImmLSUnsigned() << shift);
}
return 3;
}
@@ -1473,13 +3651,120 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
instr->ImmTestBranchBit40());
return 2;
}
+ case 's': { // Is - Shift (immediate).
+ switch (format[2]) {
+ case '1': { // Is1 - SSHR.
+ int shift = 16 << HighestSetBitPosition(instr->ImmNEONImmh());
+ shift -= instr->ImmNEONImmhImmb();
+ AppendToOutput("#%d", shift);
+ return 3;
+ }
+ case '2': { // Is2 - SLI.
+ int shift = instr->ImmNEONImmhImmb();
+ shift -= 8 << HighestSetBitPosition(instr->ImmNEONImmh());
+ AppendToOutput("#%d", shift);
+ return 3;
+ }
+ default: {
+ UNIMPLEMENTED();
+ return 0;
+ }
+ }
+ }
case 'D': { // IDebug - HLT and BRK instructions.
AppendToOutput("#0x%x", instr->ImmException());
return 6;
}
+ case 'V': { // Immediate Vector.
+ switch (format[2]) {
+ case 'E': { // IVExtract.
+ AppendToOutput("#%" PRId64, instr->ImmNEONExt());
+ return 9;
+ }
+ case 'B': { // IVByElemIndex.
+ int vm_index = (instr->NEONH() << 1) | instr->NEONL();
+ if (instr->NEONSize() == 1) {
+ vm_index = (vm_index << 1) | instr->NEONM();
+ }
+ AppendToOutput("%d", vm_index);
+ return strlen("IVByElemIndex");
+ }
+ case 'I': { // INS element.
+ if (strncmp(format, "IVInsIndex", strlen("IVInsIndex")) == 0) {
+ unsigned rd_index, rn_index;
+ unsigned imm5 = instr->ImmNEON5();
+ unsigned imm4 = instr->ImmNEON4();
+ int tz = CountTrailingZeros(imm5, 32);
+ if (tz <= 3) { // Defined for 0 <= tz <= 3 only.
+ rd_index = imm5 >> (tz + 1);
+ rn_index = imm4 >> tz;
+ if (strncmp(format, "IVInsIndex1", strlen("IVInsIndex1")) == 0) {
+ AppendToOutput("%d", rd_index);
+ return strlen("IVInsIndex1");
+ } else if (strncmp(format, "IVInsIndex2",
+ strlen("IVInsIndex2")) == 0) {
+ AppendToOutput("%d", rn_index);
+ return strlen("IVInsIndex2");
+ }
+ }
+ return 0;
+ }
+ }
+ case 'L': { // IVLSLane[0123] - suffix indicates access size shift.
+ AppendToOutput("%d", instr->NEONLSIndex(format[8] - '0'));
+ return 9;
+ }
+ case 'M': { // Modified Immediate cases.
+ if (strncmp(format, "IVMIImmFPSingle", strlen("IVMIImmFPSingle")) ==
+ 0) {
+ AppendToOutput("#0x%" PRIx32 " (%.4f)", instr->ImmNEONabcdefgh(),
+ instr->ImmNEONFP32());
+ return strlen("IVMIImmFPSingle");
+ } else if (strncmp(format, "IVMIImmFPDouble",
+ strlen("IVMIImmFPDouble")) == 0) {
+ AppendToOutput("#0x%" PRIx32 " (%.4f)", instr->ImmNEONabcdefgh(),
+ instr->ImmNEONFP64());
+ return strlen("IVMIImmFPDouble");
+ } else if (strncmp(format, "IVMIImm8", strlen("IVMIImm8")) == 0) {
+ uint64_t imm8 = instr->ImmNEONabcdefgh();
+ AppendToOutput("#0x%" PRIx64, imm8);
+ return strlen("IVMIImm8");
+ } else if (strncmp(format, "IVMIImm", strlen("IVMIImm")) == 0) {
+ uint64_t imm8 = instr->ImmNEONabcdefgh();
+ uint64_t imm = 0;
+ for (int i = 0; i < 8; ++i) {
+ if (imm8 & (1 << i)) {
+ imm |= (UINT64_C(0xff) << (8 * i));
+ }
+ }
+ AppendToOutput("#0x%" PRIx64, imm);
+ return strlen("IVMIImm");
+ } else if (strncmp(format, "IVMIShiftAmt1",
+ strlen("IVMIShiftAmt1")) == 0) {
+ int cmode = instr->NEONCmode();
+ int shift_amount = 8 * ((cmode >> 1) & 3);
+ AppendToOutput("#%d", shift_amount);
+ return strlen("IVMIShiftAmt1");
+ } else if (strncmp(format, "IVMIShiftAmt2",
+ strlen("IVMIShiftAmt2")) == 0) {
+ int cmode = instr->NEONCmode();
+ int shift_amount = 8 << (cmode & 1);
+ AppendToOutput("#%d", shift_amount);
+ return strlen("IVMIShiftAmt2");
+ } else {
+ UNIMPLEMENTED();
+ return 0;
+ }
+ }
+ default: {
+ UNIMPLEMENTED();
+ return 0;
+ }
+ }
+ }
default: {
+ printf("%s", format);
UNREACHABLE();
- return 0;
}
}
}
@@ -1515,7 +3800,6 @@ int DisassemblingDecoder::SubstituteBitfieldImmediateField(Instruction* instr,
}
default: {
UNREACHABLE();
- return 0;
}
}
}
@@ -1542,14 +3826,14 @@ int DisassemblingDecoder::SubstituteLiteralField(Instruction* instr,
int DisassemblingDecoder::SubstituteShiftField(Instruction* instr,
const char* format) {
- DCHECK(format[0] == 'H');
- DCHECK(instr->ShiftDP() <= 0x3);
+ DCHECK_EQ(format[0], 'N');
+ DCHECK_LE(instr->ShiftDP(), 0x3);
switch (format[1]) {
- case 'D': { // HDP.
+ case 'D': { // NDP.
DCHECK(instr->ShiftDP() != ROR);
} // Fall through.
- case 'L': { // HLo.
+ case 'L': { // NLo.
if (instr->ImmDPShift() != 0) {
const char* shift_type[] = {"lsl", "lsr", "asr", "ror"};
AppendToOutput(", %s #%" PRId32, shift_type[instr->ShiftDP()],
@@ -1559,7 +3843,6 @@ int DisassemblingDecoder::SubstituteShiftField(Instruction* instr,
}
default:
UNREACHABLE();
- return 0;
}
}
@@ -1608,17 +3891,17 @@ int DisassemblingDecoder::SubstitutePCRelAddressField(Instruction* instr,
int DisassemblingDecoder::SubstituteBranchTargetField(Instruction* instr,
const char* format) {
- DCHECK(strncmp(format, "BImm", 4) == 0);
+ DCHECK_EQ(strncmp(format, "TImm", 4), 0);
int64_t offset = 0;
switch (format[5]) {
- // BImmUncn - unconditional branch immediate.
+ // TImmUncn - unconditional branch immediate.
case 'n': offset = instr->ImmUncondBranch(); break;
- // BImmCond - conditional branch immediate.
+ // TImmCond - conditional branch immediate.
case 'o': offset = instr->ImmCondBranch(); break;
- // BImmCmpa - compare and branch immediate.
+ // TImmCmpa - compare and branch immediate.
case 'm': offset = instr->ImmCmpBranch(); break;
- // BImmTest - test and branch immediate.
+ // TImmTest - test and branch immediate.
case 'e': offset = instr->ImmTestBranch(); break;
default: UNREACHABLE();
}
diff --git a/deps/v8/src/arm64/disasm-arm64.h b/deps/v8/src/arm64/disasm-arm64.h
index 4b477bc438..c12d53b7e6 100644
--- a/deps/v8/src/arm64/disasm-arm64.h
+++ b/deps/v8/src/arm64/disasm-arm64.h
@@ -5,6 +5,7 @@
#ifndef V8_ARM64_DISASM_ARM64_H
#define V8_ARM64_DISASM_ARM64_H
+#include "src/arm64/assembler-arm64.h"
#include "src/arm64/decoder-arm64.h"
#include "src/arm64/instructions-arm64.h"
#include "src/globals.h"
@@ -29,6 +30,13 @@ class DisassemblingDecoder : public DecoderVisitor {
protected:
virtual void ProcessOutput(Instruction* instr);
+ // Default output functions. The functions below implement a default way of
+ // printing elements in the disassembly. A sub-class can override these to
+ // customize the disassembly output.
+
+ // Prints the name of a register.
+ virtual void AppendRegisterNameToOutput(const CPURegister& reg);
+
void Format(Instruction* instr, const char* mnemonic, const char* format);
void Substitute(Instruction* instr, const char* string);
int SubstituteField(Instruction* instr, const char* format);
diff --git a/deps/v8/src/arm64/frames-arm64.cc b/deps/v8/src/arm64/frames-arm64.cc
index bf2fde119e..68e8d757c8 100644
--- a/deps/v8/src/arm64/frames-arm64.cc
+++ b/deps/v8/src/arm64/frames-arm64.cc
@@ -19,15 +19,6 @@ Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() {
UNREACHABLE();
- return no_reg;
-}
-
-
-Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
-Register StubFailureTrampolineFrame::context_register() { return cp; }
-Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
- UNREACHABLE();
- return no_reg;
}
diff --git a/deps/v8/src/arm64/instructions-arm64.cc b/deps/v8/src/arm64/instructions-arm64.cc
index 4b419d6dbd..f4dbd75533 100644
--- a/deps/v8/src/arm64/instructions-arm64.cc
+++ b/deps/v8/src/arm64/instructions-arm64.cc
@@ -21,7 +21,7 @@ bool Instruction::IsLoad() const {
if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
return Mask(LoadStorePairLBit) != 0;
} else {
- LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
+ LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
switch (op) {
case LDRB_w:
case LDRH_w:
@@ -32,8 +32,12 @@ bool Instruction::IsLoad() const {
case LDRSH_w:
case LDRSH_x:
case LDRSW_x:
+ case LDR_b:
+ case LDR_h:
case LDR_s:
- case LDR_d: return true;
+ case LDR_d:
+ case LDR_q:
+ return true;
default: return false;
}
}
@@ -48,14 +52,18 @@ bool Instruction::IsStore() const {
if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
return Mask(LoadStorePairLBit) == 0;
} else {
- LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
+ LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
switch (op) {
case STRB_w:
case STRH_w:
case STR_w:
case STR_x:
+ case STR_b:
+ case STR_h:
case STR_s:
- case STR_d: return true;
+ case STR_d:
+ case STR_q:
+ return true;
default: return false;
}
}
@@ -136,46 +144,50 @@ uint64_t Instruction::ImmLogical() {
}
}
UNREACHABLE();
- return 0;
}
-
-float Instruction::ImmFP32() {
- // ImmFP: abcdefgh (8 bits)
- // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
- // where B is b ^ 1
- uint32_t bits = ImmFP();
- uint32_t bit7 = (bits >> 7) & 0x1;
- uint32_t bit6 = (bits >> 6) & 0x1;
- uint32_t bit5_to_0 = bits & 0x3f;
- uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
-
- return rawbits_to_float(result);
+uint32_t Instruction::ImmNEONabcdefgh() const {
+ return ImmNEONabc() << 5 | ImmNEONdefgh();
}
+float Instruction::ImmFP32() { return Imm8ToFP32(ImmFP()); }
+
+double Instruction::ImmFP64() { return Imm8ToFP64(ImmFP()); }
-double Instruction::ImmFP64() {
- // ImmFP: abcdefgh (8 bits)
- // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
- // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
- // where B is b ^ 1
- uint32_t bits = ImmFP();
- uint64_t bit7 = (bits >> 7) & 0x1;
- uint64_t bit6 = (bits >> 6) & 0x1;
- uint64_t bit5_to_0 = bits & 0x3f;
- uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
+float Instruction::ImmNEONFP32() const { return Imm8ToFP32(ImmNEONabcdefgh()); }
- return rawbits_to_double(result);
+double Instruction::ImmNEONFP64() const {
+ return Imm8ToFP64(ImmNEONabcdefgh());
}
+unsigned CalcLSDataSize(LoadStoreOp op) {
+ DCHECK_EQ(static_cast<unsigned>(LSSize_offset + LSSize_width),
+ kInstructionSize * 8);
+ unsigned size = static_cast<Instr>(op) >> LSSize_offset;
+ if ((op & LSVector_mask) != 0) {
+ // Vector register memory operations encode the access size in the "size"
+ // and "opc" fields.
+ if ((size == 0) && ((op & LSOpc_mask) >> LSOpc_offset) >= 2) {
+ size = kQRegSizeLog2;
+ }
+ }
+ return size;
+}
-LSDataSize CalcLSPairDataSize(LoadStorePairOp op) {
+unsigned CalcLSPairDataSize(LoadStorePairOp op) {
+ static_assert(kXRegSize == kDRegSize, "X and D registers must be same size.");
+ static_assert(kWRegSize == kSRegSize, "W and S registers must be same size.");
switch (op) {
+ case STP_q:
+ case LDP_q:
+ return kQRegSizeLog2;
case STP_x:
case LDP_x:
case STP_d:
- case LDP_d: return LSDoubleWord;
- default: return LSWord;
+ case LDP_d:
+ return kXRegSizeLog2;
+ default:
+ return kWRegSizeLog2;
}
}
@@ -334,7 +346,405 @@ uint64_t InstructionSequence::InlineData() const {
return payload;
}
+VectorFormat VectorFormatHalfWidth(VectorFormat vform) {
+ DCHECK(vform == kFormat8H || vform == kFormat4S || vform == kFormat2D ||
+ vform == kFormatH || vform == kFormatS || vform == kFormatD);
+ switch (vform) {
+ case kFormat8H:
+ return kFormat8B;
+ case kFormat4S:
+ return kFormat4H;
+ case kFormat2D:
+ return kFormat2S;
+ case kFormatH:
+ return kFormatB;
+ case kFormatS:
+ return kFormatH;
+ case kFormatD:
+ return kFormatS;
+ default:
+ UNREACHABLE();
+ }
+}
+
+VectorFormat VectorFormatDoubleWidth(VectorFormat vform) {
+ DCHECK(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S ||
+ vform == kFormatB || vform == kFormatH || vform == kFormatS);
+ switch (vform) {
+ case kFormat8B:
+ return kFormat8H;
+ case kFormat4H:
+ return kFormat4S;
+ case kFormat2S:
+ return kFormat2D;
+ case kFormatB:
+ return kFormatH;
+ case kFormatH:
+ return kFormatS;
+ case kFormatS:
+ return kFormatD;
+ default:
+ UNREACHABLE();
+ }
+}
+
+VectorFormat VectorFormatFillQ(VectorFormat vform) {
+ switch (vform) {
+ case kFormatB:
+ case kFormat8B:
+ case kFormat16B:
+ return kFormat16B;
+ case kFormatH:
+ case kFormat4H:
+ case kFormat8H:
+ return kFormat8H;
+ case kFormatS:
+ case kFormat2S:
+ case kFormat4S:
+ return kFormat4S;
+ case kFormatD:
+ case kFormat1D:
+ case kFormat2D:
+ return kFormat2D;
+ default:
+ UNREACHABLE();
+ }
+}
+
+VectorFormat VectorFormatHalfWidthDoubleLanes(VectorFormat vform) {
+ switch (vform) {
+ case kFormat4H:
+ return kFormat8B;
+ case kFormat8H:
+ return kFormat16B;
+ case kFormat2S:
+ return kFormat4H;
+ case kFormat4S:
+ return kFormat8H;
+ case kFormat1D:
+ return kFormat2S;
+ case kFormat2D:
+ return kFormat4S;
+ default:
+ UNREACHABLE();
+ }
+}
+
+VectorFormat VectorFormatDoubleLanes(VectorFormat vform) {
+ DCHECK(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S);
+ switch (vform) {
+ case kFormat8B:
+ return kFormat16B;
+ case kFormat4H:
+ return kFormat8H;
+ case kFormat2S:
+ return kFormat4S;
+ default:
+ UNREACHABLE();
+ }
+}
+
+VectorFormat VectorFormatHalfLanes(VectorFormat vform) {
+ DCHECK(vform == kFormat16B || vform == kFormat8H || vform == kFormat4S);
+ switch (vform) {
+ case kFormat16B:
+ return kFormat8B;
+ case kFormat8H:
+ return kFormat4H;
+ case kFormat4S:
+ return kFormat2S;
+ default:
+ UNREACHABLE();
+ }
+}
+
+VectorFormat ScalarFormatFromLaneSize(int laneSize) {
+ switch (laneSize) {
+ case 8:
+ return kFormatB;
+ case 16:
+ return kFormatH;
+ case 32:
+ return kFormatS;
+ case 64:
+ return kFormatD;
+ default:
+ UNREACHABLE();
+ }
+}
+
+VectorFormat ScalarFormatFromFormat(VectorFormat vform) {
+ return ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform));
+}
+
+unsigned RegisterSizeInBytesFromFormat(VectorFormat vform) {
+ return RegisterSizeInBitsFromFormat(vform) / 8;
+}
+
+unsigned RegisterSizeInBitsFromFormat(VectorFormat vform) {
+ DCHECK_NE(vform, kFormatUndefined);
+ switch (vform) {
+ case kFormatB:
+ return kBRegSizeInBits;
+ case kFormatH:
+ return kHRegSizeInBits;
+ case kFormatS:
+ return kSRegSizeInBits;
+ case kFormatD:
+ return kDRegSizeInBits;
+ case kFormat8B:
+ case kFormat4H:
+ case kFormat2S:
+ case kFormat1D:
+ return kDRegSizeInBits;
+ default:
+ return kQRegSizeInBits;
+ }
+}
+
+unsigned LaneSizeInBitsFromFormat(VectorFormat vform) {
+ DCHECK_NE(vform, kFormatUndefined);
+ switch (vform) {
+ case kFormatB:
+ case kFormat8B:
+ case kFormat16B:
+ return 8;
+ case kFormatH:
+ case kFormat4H:
+ case kFormat8H:
+ return 16;
+ case kFormatS:
+ case kFormat2S:
+ case kFormat4S:
+ return 32;
+ case kFormatD:
+ case kFormat1D:
+ case kFormat2D:
+ return 64;
+ default:
+ UNREACHABLE();
+ }
+}
+
+int LaneSizeInBytesFromFormat(VectorFormat vform) {
+ return LaneSizeInBitsFromFormat(vform) / 8;
+}
+
+int LaneSizeInBytesLog2FromFormat(VectorFormat vform) {
+ DCHECK_NE(vform, kFormatUndefined);
+ switch (vform) {
+ case kFormatB:
+ case kFormat8B:
+ case kFormat16B:
+ return 0;
+ case kFormatH:
+ case kFormat4H:
+ case kFormat8H:
+ return 1;
+ case kFormatS:
+ case kFormat2S:
+ case kFormat4S:
+ return 2;
+ case kFormatD:
+ case kFormat1D:
+ case kFormat2D:
+ return 3;
+ default:
+ UNREACHABLE();
+ }
+}
+
+int LaneCountFromFormat(VectorFormat vform) {
+ DCHECK_NE(vform, kFormatUndefined);
+ switch (vform) {
+ case kFormat16B:
+ return 16;
+ case kFormat8B:
+ case kFormat8H:
+ return 8;
+ case kFormat4H:
+ case kFormat4S:
+ return 4;
+ case kFormat2S:
+ case kFormat2D:
+ return 2;
+ case kFormat1D:
+ case kFormatB:
+ case kFormatH:
+ case kFormatS:
+ case kFormatD:
+ return 1;
+ default:
+ UNREACHABLE();
+ }
+}
+
+int MaxLaneCountFromFormat(VectorFormat vform) {
+ DCHECK_NE(vform, kFormatUndefined);
+ switch (vform) {
+ case kFormatB:
+ case kFormat8B:
+ case kFormat16B:
+ return 16;
+ case kFormatH:
+ case kFormat4H:
+ case kFormat8H:
+ return 8;
+ case kFormatS:
+ case kFormat2S:
+ case kFormat4S:
+ return 4;
+ case kFormatD:
+ case kFormat1D:
+ case kFormat2D:
+ return 2;
+ default:
+ UNREACHABLE();
+ }
+}
+
+// Does 'vform' indicate a vector format or a scalar format?
+bool IsVectorFormat(VectorFormat vform) {
+ DCHECK_NE(vform, kFormatUndefined);
+ switch (vform) {
+ case kFormatB:
+ case kFormatH:
+ case kFormatS:
+ case kFormatD:
+ return false;
+ default:
+ return true;
+ }
+}
+
+int64_t MaxIntFromFormat(VectorFormat vform) {
+ return INT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
+}
+
+int64_t MinIntFromFormat(VectorFormat vform) {
+ return INT64_MIN >> (64 - LaneSizeInBitsFromFormat(vform));
+}
+
+uint64_t MaxUintFromFormat(VectorFormat vform) {
+ return UINT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
+}
+
+NEONFormatDecoder::NEONFormatDecoder(const Instruction* instr) {
+ instrbits_ = instr->InstructionBits();
+ SetFormatMaps(IntegerFormatMap());
+}
+
+NEONFormatDecoder::NEONFormatDecoder(const Instruction* instr,
+ const NEONFormatMap* format) {
+ instrbits_ = instr->InstructionBits();
+ SetFormatMaps(format);
+}
+
+NEONFormatDecoder::NEONFormatDecoder(const Instruction* instr,
+ const NEONFormatMap* format0,
+ const NEONFormatMap* format1) {
+ instrbits_ = instr->InstructionBits();
+ SetFormatMaps(format0, format1);
+}
+
+NEONFormatDecoder::NEONFormatDecoder(const Instruction* instr,
+ const NEONFormatMap* format0,
+ const NEONFormatMap* format1,
+ const NEONFormatMap* format2) {
+ instrbits_ = instr->InstructionBits();
+ SetFormatMaps(format0, format1, format2);
+}
+
+void NEONFormatDecoder::SetFormatMaps(const NEONFormatMap* format0,
+ const NEONFormatMap* format1,
+ const NEONFormatMap* format2) {
+ DCHECK_NOT_NULL(format0);
+ formats_[0] = format0;
+ formats_[1] = (format1 == NULL) ? formats_[0] : format1;
+ formats_[2] = (format2 == NULL) ? formats_[1] : format2;
+}
+
+void NEONFormatDecoder::SetFormatMap(unsigned index,
+ const NEONFormatMap* format) {
+ DCHECK_LT(index, arraysize(formats_));
+ DCHECK_NOT_NULL(format);
+ formats_[index] = format;
+}
+const char* NEONFormatDecoder::SubstitutePlaceholders(const char* string) {
+ return Substitute(string, kPlaceholder, kPlaceholder, kPlaceholder);
+}
+
+const char* NEONFormatDecoder::Substitute(const char* string,
+ SubstitutionMode mode0,
+ SubstitutionMode mode1,
+ SubstitutionMode mode2) {
+ snprintf(form_buffer_, sizeof(form_buffer_), string, GetSubstitute(0, mode0),
+ GetSubstitute(1, mode1), GetSubstitute(2, mode2));
+ return form_buffer_;
+}
+
+const char* NEONFormatDecoder::Mnemonic(const char* mnemonic) {
+ if ((instrbits_ & NEON_Q) != 0) {
+ snprintf(mne_buffer_, sizeof(mne_buffer_), "%s2", mnemonic);
+ return mne_buffer_;
+ }
+ return mnemonic;
+}
+
+VectorFormat NEONFormatDecoder::GetVectorFormat(int format_index) {
+ return GetVectorFormat(formats_[format_index]);
+}
+
+VectorFormat NEONFormatDecoder::GetVectorFormat(
+ const NEONFormatMap* format_map) {
+ static const VectorFormat vform[] = {
+ kFormatUndefined, kFormat8B, kFormat16B, kFormat4H, kFormat8H,
+ kFormat2S, kFormat4S, kFormat1D, kFormat2D, kFormatB,
+ kFormatH, kFormatS, kFormatD};
+ DCHECK_LT(GetNEONFormat(format_map), arraysize(vform));
+ return vform[GetNEONFormat(format_map)];
+}
+
+const char* NEONFormatDecoder::GetSubstitute(int index, SubstitutionMode mode) {
+ if (mode == kFormat) {
+ return NEONFormatAsString(GetNEONFormat(formats_[index]));
+ }
+ DCHECK_EQ(mode, kPlaceholder);
+ return NEONFormatAsPlaceholder(GetNEONFormat(formats_[index]));
+}
+
+NEONFormat NEONFormatDecoder::GetNEONFormat(const NEONFormatMap* format_map) {
+ return format_map->map[PickBits(format_map->bits)];
+}
+
+const char* NEONFormatDecoder::NEONFormatAsString(NEONFormat format) {
+ static const char* formats[] = {"undefined", "8b", "16b", "4h", "8h",
+ "2s", "4s", "1d", "2d", "b",
+ "h", "s", "d"};
+ DCHECK_LT(format, arraysize(formats));
+ return formats[format];
+}
+
+const char* NEONFormatDecoder::NEONFormatAsPlaceholder(NEONFormat format) {
+ DCHECK((format == NF_B) || (format == NF_H) || (format == NF_S) ||
+ (format == NF_D) || (format == NF_UNDEF));
+ static const char* formats[] = {
+ "undefined", "undefined", "undefined", "undefined", "undefined",
+ "undefined", "undefined", "undefined", "undefined", "'B",
+ "'H", "'S", "'D"};
+ return formats[format];
+}
+
+uint8_t NEONFormatDecoder::PickBits(const uint8_t bits[]) {
+ uint8_t result = 0;
+ for (unsigned b = 0; b < kNEONFormatMaxBits; b++) {
+ if (bits[b] == 0) break;
+ result <<= 1;
+ result |= ((instrbits_ & (1 << bits[b])) == 0) ? 0 : 1;
+ }
+ return result;
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm64/instructions-arm64.h b/deps/v8/src/arm64/instructions-arm64.h
index 6110a14722..b6b38166bf 100644
--- a/deps/v8/src/arm64/instructions-arm64.h
+++ b/deps/v8/src/arm64/instructions-arm64.h
@@ -23,13 +23,17 @@ typedef uint32_t Instr;
// symbol is defined as uint32_t/uint64_t initialized with the desired bit
// pattern. Otherwise, the same symbol is declared as an external float/double.
#if defined(ARM64_DEFINE_FP_STATICS)
+#define DEFINE_FLOAT16(name, value) extern const uint16_t name = value
#define DEFINE_FLOAT(name, value) extern const uint32_t name = value
#define DEFINE_DOUBLE(name, value) extern const uint64_t name = value
#else
+#define DEFINE_FLOAT16(name, value) extern const float16 name
#define DEFINE_FLOAT(name, value) extern const float name
#define DEFINE_DOUBLE(name, value) extern const double name
#endif // defined(ARM64_DEFINE_FP_STATICS)
+DEFINE_FLOAT16(kFP16PositiveInfinity, 0x7c00);
+DEFINE_FLOAT16(kFP16NegativeInfinity, 0xfc00);
DEFINE_FLOAT(kFP32PositiveInfinity, 0x7f800000);
DEFINE_FLOAT(kFP32NegativeInfinity, 0xff800000);
DEFINE_DOUBLE(kFP64PositiveInfinity, 0x7ff0000000000000UL);
@@ -47,19 +51,14 @@ DEFINE_FLOAT(kFP32QuietNaN, 0x7fc00001);
// The default NaN values (for FPCR.DN=1).
DEFINE_DOUBLE(kFP64DefaultNaN, 0x7ff8000000000000UL);
DEFINE_FLOAT(kFP32DefaultNaN, 0x7fc00000);
+DEFINE_FLOAT16(kFP16DefaultNaN, 0x7e00);
+#undef DEFINE_FLOAT16
#undef DEFINE_FLOAT
#undef DEFINE_DOUBLE
-
-enum LSDataSize {
- LSByte = 0,
- LSHalfword = 1,
- LSWord = 2,
- LSDoubleWord = 3
-};
-
-LSDataSize CalcLSPairDataSize(LoadStorePairOp op);
+unsigned CalcLSDataSize(LoadStoreOp op);
+unsigned CalcLSPairDataSize(LoadStorePairOp op);
enum ImmBranchType {
UnknownBranchType = 0,
@@ -82,9 +81,10 @@ enum FPRounding {
FPNegativeInfinity = 0x2,
FPZero = 0x3,
- // The final rounding mode is only available when explicitly specified by the
- // instruction (such as with fcvta). It cannot be set in FPCR.
- FPTieAway
+ // The final rounding modes are only available when explicitly specified by
+ // the instruction (such as with fcvta). They cannot be set in FPCR.
+ FPTieAway,
+ FPRoundOdd
};
enum Reg31Mode {
@@ -152,14 +152,29 @@ class Instruction {
}
uint64_t ImmLogical();
+ unsigned ImmNEONabcdefgh() const;
float ImmFP32();
double ImmFP64();
+ float ImmNEONFP32() const;
+ double ImmNEONFP64() const;
- LSDataSize SizeLSPair() const {
+ unsigned SizeLS() const {
+ return CalcLSDataSize(static_cast<LoadStoreOp>(Mask(LoadStoreMask)));
+ }
+
+ unsigned SizeLSPair() const {
return CalcLSPairDataSize(
static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
}
+ int NEONLSIndex(int access_size_shift) const {
+ int q = NEONQ();
+ int s = NEONS();
+ int size = NEONLSSize();
+ int index = (q << 3) | (s << 2) | size;
+ return index >> access_size_shift;
+ }
+
// Helpers.
bool IsCondBranchImm() const {
return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
@@ -181,6 +196,33 @@ class Instruction {
return BranchType() != UnknownBranchType;
}
+ static float Imm8ToFP32(uint32_t imm8) {
+ // Imm8: abcdefgh (8 bits)
+ // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
+ // where B is b ^ 1
+ uint32_t bits = imm8;
+ uint32_t bit7 = (bits >> 7) & 0x1;
+ uint32_t bit6 = (bits >> 6) & 0x1;
+ uint32_t bit5_to_0 = bits & 0x3f;
+ uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
+
+ return bit_cast<float>(result);
+ }
+
+ static double Imm8ToFP64(uint32_t imm8) {
+ // Imm8: abcdefgh (8 bits)
+ // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+ // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
+ // where B is b ^ 1
+ uint32_t bits = imm8;
+ uint64_t bit7 = (bits >> 7) & 0x1;
+ uint64_t bit6 = (bits >> 6) & 0x1;
+ uint64_t bit5_to_0 = bits & 0x3f;
+ uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
+
+ return bit_cast<double>(result);
+ }
+
bool IsLdrLiteral() const {
return Mask(LoadLiteralFMask) == LoadLiteralFixed;
}
@@ -300,7 +342,6 @@ class Instruction {
return ImmTestBranch_width;
default:
UNREACHABLE();
- return 0;
}
}
@@ -417,6 +458,48 @@ class Instruction {
void SetBranchImmTarget(Instruction* target);
};
+// Functions for handling NEON vector format information.
+enum VectorFormat {
+ kFormatUndefined = 0xffffffff,
+ kFormat8B = NEON_8B,
+ kFormat16B = NEON_16B,
+ kFormat4H = NEON_4H,
+ kFormat8H = NEON_8H,
+ kFormat2S = NEON_2S,
+ kFormat4S = NEON_4S,
+ kFormat1D = NEON_1D,
+ kFormat2D = NEON_2D,
+
+ // Scalar formats. We add the scalar bit to distinguish between scalar and
+ // vector enumerations; the bit is always set in the encoding of scalar ops
+ // and always clear for vector ops. Although kFormatD and kFormat1D appear
+ // to be the same, their meaning is subtly different. The first is a scalar
+ // operation, the second a vector operation that only affects one lane.
+ kFormatB = NEON_B | NEONScalar,
+ kFormatH = NEON_H | NEONScalar,
+ kFormatS = NEON_S | NEONScalar,
+ kFormatD = NEON_D | NEONScalar
+};
+
+VectorFormat VectorFormatHalfWidth(VectorFormat vform);
+VectorFormat VectorFormatDoubleWidth(VectorFormat vform);
+VectorFormat VectorFormatDoubleLanes(VectorFormat vform);
+VectorFormat VectorFormatHalfLanes(VectorFormat vform);
+VectorFormat ScalarFormatFromLaneSize(int lanesize);
+VectorFormat VectorFormatHalfWidthDoubleLanes(VectorFormat vform);
+VectorFormat VectorFormatFillQ(VectorFormat vform);
+VectorFormat ScalarFormatFromFormat(VectorFormat vform);
+unsigned RegisterSizeInBitsFromFormat(VectorFormat vform);
+unsigned RegisterSizeInBytesFromFormat(VectorFormat vform);
+int LaneSizeInBytesFromFormat(VectorFormat vform);
+unsigned LaneSizeInBitsFromFormat(VectorFormat vform);
+int LaneSizeInBytesLog2FromFormat(VectorFormat vform);
+int LaneCountFromFormat(VectorFormat vform);
+int MaxLaneCountFromFormat(VectorFormat vform);
+bool IsVectorFormat(VectorFormat vform);
+int64_t MaxIntFromFormat(VectorFormat vform);
+int64_t MinIntFromFormat(VectorFormat vform);
+uint64_t MaxUintFromFormat(VectorFormat vform);
// Where Instruction looks at instructions generated by the Assembler,
// InstructionSequence looks at instructions sequences generated by the
@@ -504,7 +587,7 @@ const unsigned kDebugMessageOffset = 3 * kInstructionSize;
//
// For example:
//
-// __ debug("print registers and fp registers", 0, LOG_REGS | LOG_FP_REGS);
+// __ debug("print registers and fp registers", 0, LOG_REGS | LOG_VREGS);
// will print the registers and fp registers only once.
//
// __ debug("trace disasm", 1, TRACE_ENABLE | LOG_DISASM);
@@ -517,24 +600,201 @@ const unsigned kDebugMessageOffset = 3 * kInstructionSize;
// stops tracing the registers.
const unsigned kDebuggerTracingDirectivesMask = 3 << 6;
enum DebugParameters {
- NO_PARAM = 0,
- BREAK = 1 << 0,
- LOG_DISASM = 1 << 1, // Use only with TRACE. Disassemble the code.
- LOG_REGS = 1 << 2, // Log general purpose registers.
- LOG_FP_REGS = 1 << 3, // Log floating-point registers.
- LOG_SYS_REGS = 1 << 4, // Log the status flags.
- LOG_WRITE = 1 << 5, // Log any memory write.
-
- LOG_STATE = LOG_REGS | LOG_FP_REGS | LOG_SYS_REGS,
- LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE,
+ NO_PARAM = 0,
+ BREAK = 1 << 0,
+ LOG_DISASM = 1 << 1, // Use only with TRACE. Disassemble the code.
+ LOG_REGS = 1 << 2, // Log general purpose registers.
+ LOG_VREGS = 1 << 3, // Log NEON and floating-point registers.
+ LOG_SYS_REGS = 1 << 4, // Log the status flags.
+ LOG_WRITE = 1 << 5, // Log any memory write.
+
+ LOG_NONE = 0,
+ LOG_STATE = LOG_REGS | LOG_VREGS | LOG_SYS_REGS,
+ LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE,
// Trace control.
- TRACE_ENABLE = 1 << 6,
- TRACE_DISABLE = 2 << 6,
+ TRACE_ENABLE = 1 << 6,
+ TRACE_DISABLE = 2 << 6,
TRACE_OVERRIDE = 3 << 6
};
+enum NEONFormat {
+ NF_UNDEF = 0,
+ NF_8B = 1,
+ NF_16B = 2,
+ NF_4H = 3,
+ NF_8H = 4,
+ NF_2S = 5,
+ NF_4S = 6,
+ NF_1D = 7,
+ NF_2D = 8,
+ NF_B = 9,
+ NF_H = 10,
+ NF_S = 11,
+ NF_D = 12
+};
+
+static const unsigned kNEONFormatMaxBits = 6;
+struct NEONFormatMap {
+ // The bit positions in the instruction to consider.
+ uint8_t bits[kNEONFormatMaxBits];
+
+ // Mapping from concatenated bits to format.
+ NEONFormat map[1 << kNEONFormatMaxBits];
+};
+
+class NEONFormatDecoder {
+ public:
+ enum SubstitutionMode { kPlaceholder, kFormat };
+
+ // Construct a format decoder with increasingly specific format maps for each
+ // substitution. If no format map is specified, the default is the integer
+ // format map.
+ explicit NEONFormatDecoder(const Instruction* instr);
+ NEONFormatDecoder(const Instruction* instr, const NEONFormatMap* format);
+ NEONFormatDecoder(const Instruction* instr, const NEONFormatMap* format0,
+ const NEONFormatMap* format1);
+ NEONFormatDecoder(const Instruction* instr, const NEONFormatMap* format0,
+ const NEONFormatMap* format1, const NEONFormatMap* format2);
+
+ // Set the format mapping for all or individual substitutions.
+ void SetFormatMaps(const NEONFormatMap* format0,
+ const NEONFormatMap* format1 = NULL,
+ const NEONFormatMap* format2 = NULL);
+ void SetFormatMap(unsigned index, const NEONFormatMap* format);
+
+ // Substitute %s in the input string with the placeholder string for each
+ // register, ie. "'B", "'H", etc.
+ const char* SubstitutePlaceholders(const char* string);
+
+ // Substitute %s in the input string with a new string based on the
+ // substitution mode.
+ const char* Substitute(const char* string, SubstitutionMode mode0 = kFormat,
+ SubstitutionMode mode1 = kFormat,
+ SubstitutionMode mode2 = kFormat);
+
+ // Append a "2" to a mnemonic string based of the state of the Q bit.
+ const char* Mnemonic(const char* mnemonic);
+
+ VectorFormat GetVectorFormat(int format_index = 0);
+ VectorFormat GetVectorFormat(const NEONFormatMap* format_map);
+
+ // Built in mappings for common cases.
+
+ // The integer format map uses three bits (Q, size<1:0>) to encode the
+ // "standard" set of NEON integer vector formats.
+ static const NEONFormatMap* IntegerFormatMap() {
+ static const NEONFormatMap map = {
+ {23, 22, 30},
+ {NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_UNDEF, NF_2D}};
+ return &map;
+ }
+
+ // The long integer format map uses two bits (size<1:0>) to encode the
+ // long set of NEON integer vector formats. These are used in narrow, wide
+ // and long operations.
+ static const NEONFormatMap* LongIntegerFormatMap() {
+ static const NEONFormatMap map = {{23, 22}, {NF_8H, NF_4S, NF_2D}};
+ return &map;
+ }
+
+ // The FP format map uses two bits (Q, size<0>) to encode the NEON FP vector
+ // formats: NF_2S, NF_4S, NF_2D.
+ static const NEONFormatMap* FPFormatMap() {
+ // The FP format map assumes two bits (Q, size<0>) are used to encode the
+ // NEON FP vector formats: NF_2S, NF_4S, NF_2D.
+ static const NEONFormatMap map = {{22, 30},
+ {NF_2S, NF_4S, NF_UNDEF, NF_2D}};
+ return &map;
+ }
+
+ // The load/store format map uses three bits (Q, 11, 10) to encode the
+ // set of NEON vector formats.
+ static const NEONFormatMap* LoadStoreFormatMap() {
+ static const NEONFormatMap map = {
+ {11, 10, 30},
+ {NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}};
+ return &map;
+ }
+
+ // The logical format map uses one bit (Q) to encode the NEON vector format:
+ // NF_8B, NF_16B.
+ static const NEONFormatMap* LogicalFormatMap() {
+ static const NEONFormatMap map = {{30}, {NF_8B, NF_16B}};
+ return &map;
+ }
+
+ // The triangular format map uses between two and five bits to encode the NEON
+ // vector format:
+ // xxx10->8B, xxx11->16B, xx100->4H, xx101->8H
+ // x1000->2S, x1001->4S, 10001->2D, all others undefined.
+ static const NEONFormatMap* TriangularFormatMap() {
+ static const NEONFormatMap map = {
+ {19, 18, 17, 16, 30},
+ {NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
+ NF_2S, NF_4S, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
+ NF_UNDEF, NF_2D, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
+ NF_2S, NF_4S, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B}};
+ return &map;
+ }
+
+ // The scalar format map uses two bits (size<1:0>) to encode the NEON scalar
+ // formats: NF_B, NF_H, NF_S, NF_D.
+ static const NEONFormatMap* ScalarFormatMap() {
+ static const NEONFormatMap map = {{23, 22}, {NF_B, NF_H, NF_S, NF_D}};
+ return &map;
+ }
+
+ // The long scalar format map uses two bits (size<1:0>) to encode the longer
+ // NEON scalar formats: NF_H, NF_S, NF_D.
+ static const NEONFormatMap* LongScalarFormatMap() {
+ static const NEONFormatMap map = {{23, 22}, {NF_H, NF_S, NF_D}};
+ return &map;
+ }
+
+ // The FP scalar format map assumes one bit (size<0>) is used to encode the
+ // NEON FP scalar formats: NF_S, NF_D.
+ static const NEONFormatMap* FPScalarFormatMap() {
+ static const NEONFormatMap map = {{22}, {NF_S, NF_D}};
+ return &map;
+ }
+
+ // The triangular scalar format map uses between one and four bits to encode
+ // the NEON FP scalar formats:
+ // xxx1->B, xx10->H, x100->S, 1000->D, all others undefined.
+ static const NEONFormatMap* TriangularScalarFormatMap() {
+ static const NEONFormatMap map = {
+ {19, 18, 17, 16},
+ {NF_UNDEF, NF_B, NF_H, NF_B, NF_S, NF_B, NF_H, NF_B, NF_D, NF_B, NF_H,
+ NF_B, NF_S, NF_B, NF_H, NF_B}};
+ return &map;
+ }
+
+ private:
+ // Get a pointer to a string that represents the format or placeholder for
+ // the specified substitution index, based on the format map and instruction.
+ const char* GetSubstitute(int index, SubstitutionMode mode);
+
+ // Get the NEONFormat enumerated value for bits obtained from the
+ // instruction based on the specified format mapping.
+ NEONFormat GetNEONFormat(const NEONFormatMap* format_map);
+
+ // Convert a NEONFormat into a string.
+ static const char* NEONFormatAsString(NEONFormat format);
+
+ // Convert a NEONFormat into a register placeholder string.
+ static const char* NEONFormatAsPlaceholder(NEONFormat format);
+
+ // Select bits from instrbits_ defined by the bits array, concatenate them,
+ // and return the value.
+ uint8_t PickBits(const uint8_t bits[]);
+
+ Instr instrbits_;
+ const NEONFormatMap* formats_[3];
+ char form_buffer_[64];
+ char mne_buffer_[16];
+};
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm64/instrument-arm64.cc b/deps/v8/src/arm64/instrument-arm64.cc
index c6e27f8ee3..2ed67ba57c 100644
--- a/deps/v8/src/arm64/instrument-arm64.cc
+++ b/deps/v8/src/arm64/instrument-arm64.cc
@@ -377,7 +377,7 @@ void Instrument::InstrumentLoadStore(Instruction* instr) {
static Counter* load_fp_counter = GetCounter("Load FP");
static Counter* store_fp_counter = GetCounter("Store FP");
- switch (instr->Mask(LoadStoreOpMask)) {
+ switch (instr->Mask(LoadStoreMask)) {
case STRB_w: // Fall through.
case STRH_w: // Fall through.
case STR_w: // Fall through.
@@ -595,6 +595,159 @@ void Instrument::VisitFPFixedPointConvert(Instruction* instr) {
counter->Increment();
}
+void Instrument::VisitNEON2RegMisc(Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+void Instrument::VisitNEON3Different(Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+void Instrument::VisitNEON3Same(Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+void Instrument::VisitNEONAcrossLanes(Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+void Instrument::VisitNEONByIndexedElement(Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+void Instrument::VisitNEONCopy(Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+void Instrument::VisitNEONExtract(Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+void Instrument::VisitNEONLoadStoreMultiStruct(Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+void Instrument::VisitNEONLoadStoreMultiStructPostIndex(Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+void Instrument::VisitNEONLoadStoreSingleStruct(Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+void Instrument::VisitNEONLoadStoreSingleStructPostIndex(Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+void Instrument::VisitNEONModifiedImmediate(Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+void Instrument::VisitNEONPerm(Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+void Instrument::VisitNEONScalar2RegMisc(Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+void Instrument::VisitNEONScalar3Diff(Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+void Instrument::VisitNEONScalar3Same(Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+void Instrument::VisitNEONScalarByIndexedElement(Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+void Instrument::VisitNEONScalarCopy(Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+void Instrument::VisitNEONScalarPairwise(Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+void Instrument::VisitNEONScalarShiftImmediate(Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+void Instrument::VisitNEONShiftImmediate(Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+void Instrument::VisitNEONTable(Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
void Instrument::VisitUnallocated(Instruction* instr) {
Update();
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc
index 887adddf29..29078ed5d2 100644
--- a/deps/v8/src/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc
@@ -49,6 +49,8 @@ const Register StoreTransitionDescriptor::MapRegister() { return x5; }
const Register StringCompareDescriptor::LeftRegister() { return x1; }
const Register StringCompareDescriptor::RightRegister() { return x0; }
+const Register StringConcatDescriptor::ArgumentsCountRegister() { return x0; }
+
const Register ApiGetterDescriptor::HolderRegister() { return x0; }
const Register ApiGetterDescriptor::CallbackRegister() { return x3; }
@@ -174,6 +176,16 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // x0 : number of arguments (on the stack, not including receiver)
+ // x1 : the target to call
+ // x2 : arguments list (FixedArray)
+ // x4 : arguments list length (untagged)
+ Register registers[] = {x1, x0, x2, x4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void CallForwardVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1: target
@@ -183,6 +195,34 @@ void CallForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallWithSpreadDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // x0 : number of arguments (on the stack, not including receiver)
+ // x1 : the target to call
+ // x2 : the object to spread
+ Register registers[] = {x1, x0, x2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // x1 : the target to call
+ // x2 : the arguments list
+ Register registers[] = {x1, x2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // x0 : number of arguments (on the stack, not including receiver)
+ // x1 : the target to call
+ // x3 : the new target
+ // x2 : arguments list (FixedArray)
+ // x4 : arguments list length (untagged)
+ Register registers[] = {x1, x3, x0, x2, x4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x3: new target
@@ -193,6 +233,25 @@ void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // x0 : number of arguments (on the stack, not including receiver)
+ // x1 : the target to call
+ // x3 : the new target
+ // x2 : the object to spread
+ Register registers[] = {x1, x3, x0, x2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // x1 : the target to call
+ // x3 : the new target
+ // x2 : the arguments list
+ Register registers[] = {x1, x3, x2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void ConstructStubDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x3: new target
@@ -407,8 +466,7 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
Register registers[] = {
x0, // the value to pass to the generator
x1, // the JSGeneratorObject to resume
- x2, // the resume mode (tagged)
- x3 // SuspendFlags (tagged)
+ x2 // the resume mode (tagged)
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
index e2fbc8f4af..2815f31881 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
@@ -35,36 +35,28 @@ MemOperand UntagSmiMemOperand(Register object, int offset) {
return MemOperand(object, offset + (kSmiShift / kBitsPerByte));
}
-
-void MacroAssembler::And(const Register& rd,
- const Register& rn,
+void TurboAssembler::And(const Register& rd, const Register& rn,
const Operand& operand) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
LogicalMacro(rd, rn, operand, AND);
}
-
-void MacroAssembler::Ands(const Register& rd,
- const Register& rn,
+void TurboAssembler::Ands(const Register& rd, const Register& rn,
const Operand& operand) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
LogicalMacro(rd, rn, operand, ANDS);
}
-
-void MacroAssembler::Tst(const Register& rn,
- const Operand& operand) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Tst(const Register& rn, const Operand& operand) {
+ DCHECK(allow_macro_instructions());
LogicalMacro(AppropriateZeroRegFor(rn), rn, operand, ANDS);
}
-
-void MacroAssembler::Bic(const Register& rd,
- const Register& rn,
+void TurboAssembler::Bic(const Register& rd, const Register& rn,
const Operand& operand) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
LogicalMacro(rd, rn, operand, BIC);
}
@@ -73,53 +65,42 @@ void MacroAssembler::Bic(const Register& rd,
void MacroAssembler::Bics(const Register& rd,
const Register& rn,
const Operand& operand) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
LogicalMacro(rd, rn, operand, BICS);
}
-
-void MacroAssembler::Orr(const Register& rd,
- const Register& rn,
+void TurboAssembler::Orr(const Register& rd, const Register& rn,
const Operand& operand) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
LogicalMacro(rd, rn, operand, ORR);
}
-
-void MacroAssembler::Orn(const Register& rd,
- const Register& rn,
+void TurboAssembler::Orn(const Register& rd, const Register& rn,
const Operand& operand) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
LogicalMacro(rd, rn, operand, ORN);
}
-
-void MacroAssembler::Eor(const Register& rd,
- const Register& rn,
+void TurboAssembler::Eor(const Register& rd, const Register& rn,
const Operand& operand) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
LogicalMacro(rd, rn, operand, EOR);
}
-
-void MacroAssembler::Eon(const Register& rd,
- const Register& rn,
+void TurboAssembler::Eon(const Register& rd, const Register& rn,
const Operand& operand) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
LogicalMacro(rd, rn, operand, EON);
}
-
-void MacroAssembler::Ccmp(const Register& rn,
- const Operand& operand,
- StatusFlags nzcv,
- Condition cond) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Ccmp(const Register& rn, const Operand& operand,
+ StatusFlags nzcv, Condition cond) {
+ DCHECK(allow_macro_instructions());
if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
ConditionalCompareMacro(rn, -operand.ImmediateValue(), nzcv, cond, CCMN);
} else {
@@ -132,7 +113,7 @@ void MacroAssembler::Ccmn(const Register& rn,
const Operand& operand,
StatusFlags nzcv,
Condition cond) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
ConditionalCompareMacro(rn, -operand.ImmediateValue(), nzcv, cond, CCMP);
} else {
@@ -140,11 +121,9 @@ void MacroAssembler::Ccmn(const Register& rn,
}
}
-
-void MacroAssembler::Add(const Register& rd,
- const Register& rn,
+void TurboAssembler::Add(const Register& rd, const Register& rn,
const Operand& operand) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
if (operand.IsImmediate() && (operand.ImmediateValue() < 0) &&
IsImmAddSub(-operand.ImmediateValue())) {
AddSubMacro(rd, rn, -operand.ImmediateValue(), LeaveFlags, SUB);
@@ -153,10 +132,9 @@ void MacroAssembler::Add(const Register& rd,
}
}
-void MacroAssembler::Adds(const Register& rd,
- const Register& rn,
+void TurboAssembler::Adds(const Register& rd, const Register& rn,
const Operand& operand) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
if (operand.IsImmediate() && (operand.ImmediateValue() < 0) &&
IsImmAddSub(-operand.ImmediateValue())) {
AddSubMacro(rd, rn, -operand.ImmediateValue(), SetFlags, SUB);
@@ -165,11 +143,9 @@ void MacroAssembler::Adds(const Register& rd,
}
}
-
-void MacroAssembler::Sub(const Register& rd,
- const Register& rn,
+void TurboAssembler::Sub(const Register& rd, const Register& rn,
const Operand& operand) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
if (operand.IsImmediate() && (operand.ImmediateValue() < 0) &&
IsImmAddSub(-operand.ImmediateValue())) {
AddSubMacro(rd, rn, -operand.ImmediateValue(), LeaveFlags, ADD);
@@ -178,11 +154,9 @@ void MacroAssembler::Sub(const Register& rd,
}
}
-
-void MacroAssembler::Subs(const Register& rd,
- const Register& rn,
+void TurboAssembler::Subs(const Register& rd, const Register& rn,
const Operand& operand) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
if (operand.IsImmediate() && (operand.ImmediateValue() < 0) &&
IsImmAddSub(-operand.ImmediateValue())) {
AddSubMacro(rd, rn, -operand.ImmediateValue(), SetFlags, ADD);
@@ -191,22 +165,18 @@ void MacroAssembler::Subs(const Register& rd,
}
}
-
-void MacroAssembler::Cmn(const Register& rn, const Operand& operand) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Cmn(const Register& rn, const Operand& operand) {
+ DCHECK(allow_macro_instructions());
Adds(AppropriateZeroRegFor(rn), rn, operand);
}
-
-void MacroAssembler::Cmp(const Register& rn, const Operand& operand) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Cmp(const Register& rn, const Operand& operand) {
+ DCHECK(allow_macro_instructions());
Subs(AppropriateZeroRegFor(rn), rn, operand);
}
-
-void MacroAssembler::Neg(const Register& rd,
- const Operand& operand) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Neg(const Register& rd, const Operand& operand) {
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
if (operand.IsImmediate()) {
Mov(rd, -operand.ImmediateValue());
@@ -215,18 +185,14 @@ void MacroAssembler::Neg(const Register& rd,
}
}
-
-void MacroAssembler::Negs(const Register& rd,
- const Operand& operand) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Negs(const Register& rd, const Operand& operand) {
+ DCHECK(allow_macro_instructions());
Subs(rd, AppropriateZeroRegFor(rd), operand);
}
-
-void MacroAssembler::Adc(const Register& rd,
- const Register& rn,
+void TurboAssembler::Adc(const Register& rd, const Register& rn,
const Operand& operand) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, ADC);
}
@@ -235,7 +201,7 @@ void MacroAssembler::Adc(const Register& rd,
void MacroAssembler::Adcs(const Register& rd,
const Register& rn,
const Operand& operand) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
AddSubWithCarryMacro(rd, rn, operand, SetFlags, ADC);
}
@@ -244,7 +210,7 @@ void MacroAssembler::Adcs(const Register& rd,
void MacroAssembler::Sbc(const Register& rd,
const Register& rn,
const Operand& operand) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, SBC);
}
@@ -253,7 +219,7 @@ void MacroAssembler::Sbc(const Register& rd,
void MacroAssembler::Sbcs(const Register& rd,
const Register& rn,
const Operand& operand) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
AddSubWithCarryMacro(rd, rn, operand, SetFlags, SBC);
}
@@ -261,7 +227,7 @@ void MacroAssembler::Sbcs(const Register& rd,
void MacroAssembler::Ngc(const Register& rd,
const Operand& operand) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
Register zr = AppropriateZeroRegFor(rd);
Sbc(rd, zr, operand);
@@ -270,41 +236,38 @@ void MacroAssembler::Ngc(const Register& rd,
void MacroAssembler::Ngcs(const Register& rd,
const Operand& operand) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
Register zr = AppropriateZeroRegFor(rd);
Sbcs(rd, zr, operand);
}
-
-void MacroAssembler::Mvn(const Register& rd, uint64_t imm) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Mvn(const Register& rd, uint64_t imm) {
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
Mov(rd, ~imm);
}
-
-#define DEFINE_FUNCTION(FN, REGTYPE, REG, OP) \
-void MacroAssembler::FN(const REGTYPE REG, const MemOperand& addr) { \
- DCHECK(allow_macro_instructions_); \
- LoadStoreMacro(REG, addr, OP); \
-}
+#define DEFINE_FUNCTION(FN, REGTYPE, REG, OP) \
+ void TurboAssembler::FN(const REGTYPE REG, const MemOperand& addr) { \
+ DCHECK(allow_macro_instructions()); \
+ LoadStoreMacro(REG, addr, OP); \
+ }
LS_MACRO_LIST(DEFINE_FUNCTION)
#undef DEFINE_FUNCTION
-
#define DEFINE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
- void MacroAssembler::FN(const REGTYPE REG, const REGTYPE REG2, \
+ void TurboAssembler::FN(const REGTYPE REG, const REGTYPE REG2, \
const MemOperand& addr) { \
- DCHECK(allow_macro_instructions_); \
+ DCHECK(allow_macro_instructions()); \
LoadStorePairMacro(REG, REG2, addr, OP); \
}
LSPAIR_MACRO_LIST(DEFINE_FUNCTION)
#undef DEFINE_FUNCTION
#define DECLARE_FUNCTION(FN, OP) \
- void MacroAssembler::FN(const Register& rt, const Register& rn) { \
- DCHECK(allow_macro_instructions_); \
+ void TurboAssembler::FN(const Register& rt, const Register& rn) { \
+ DCHECK(allow_macro_instructions()); \
OP(rt, rn); \
}
LDA_STL_MACRO_LIST(DECLARE_FUNCTION)
@@ -313,47 +276,39 @@ LDA_STL_MACRO_LIST(DECLARE_FUNCTION)
#define DECLARE_FUNCTION(FN, OP) \
void MacroAssembler::FN(const Register& rs, const Register& rt, \
const Register& rn) { \
- DCHECK(allow_macro_instructions_); \
+ DCHECK(allow_macro_instructions()); \
OP(rs, rt, rn); \
}
STLX_MACRO_LIST(DECLARE_FUNCTION)
#undef DECLARE_FUNCTION
-void MacroAssembler::Asr(const Register& rd,
- const Register& rn,
+void TurboAssembler::Asr(const Register& rd, const Register& rn,
unsigned shift) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
asr(rd, rn, shift);
}
-
-void MacroAssembler::Asr(const Register& rd,
- const Register& rn,
+void TurboAssembler::Asr(const Register& rd, const Register& rn,
const Register& rm) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
asrv(rd, rn, rm);
}
-
-void MacroAssembler::B(Label* label) {
+void TurboAssembler::B(Label* label) {
b(label);
CheckVeneerPool(false, false);
}
-
-void MacroAssembler::B(Condition cond, Label* label) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::B(Condition cond, Label* label) {
+ DCHECK(allow_macro_instructions());
B(label, cond);
}
-
-void MacroAssembler::Bfi(const Register& rd,
- const Register& rn,
- unsigned lsb,
+void TurboAssembler::Bfi(const Register& rd, const Register& rn, unsigned lsb,
unsigned width) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
bfi(rd, rn, lsb, width);
}
@@ -363,40 +318,35 @@ void MacroAssembler::Bfxil(const Register& rd,
const Register& rn,
unsigned lsb,
unsigned width) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
bfxil(rd, rn, lsb, width);
}
-
-void MacroAssembler::Bind(Label* label) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Bind(Label* label) {
+ DCHECK(allow_macro_instructions());
bind(label);
}
-
-void MacroAssembler::Bl(Label* label) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Bl(Label* label) {
+ DCHECK(allow_macro_instructions());
bl(label);
}
-
-void MacroAssembler::Blr(const Register& xn) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Blr(const Register& xn) {
+ DCHECK(allow_macro_instructions());
DCHECK(!xn.IsZero());
blr(xn);
}
-
-void MacroAssembler::Br(const Register& xn) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Br(const Register& xn) {
+ DCHECK(allow_macro_instructions());
DCHECK(!xn.IsZero());
br(xn);
}
-
-void MacroAssembler::Brk(int code) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Brk(int code) {
+ DCHECK(allow_macro_instructions());
brk(code);
}
@@ -404,7 +354,7 @@ void MacroAssembler::Brk(int code) {
void MacroAssembler::Cinc(const Register& rd,
const Register& rn,
Condition cond) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
DCHECK((cond != al) && (cond != nv));
cinc(rd, rn, cond);
@@ -414,31 +364,27 @@ void MacroAssembler::Cinc(const Register& rd,
void MacroAssembler::Cinv(const Register& rd,
const Register& rn,
Condition cond) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
DCHECK((cond != al) && (cond != nv));
cinv(rd, rn, cond);
}
-
-void MacroAssembler::Cls(const Register& rd, const Register& rn) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Cls(const Register& rd, const Register& rn) {
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
cls(rd, rn);
}
-
-void MacroAssembler::Clz(const Register& rd, const Register& rn) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Clz(const Register& rd, const Register& rn) {
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
clz(rd, rn);
}
-
-void MacroAssembler::Cneg(const Register& rd,
- const Register& rn,
+void TurboAssembler::Cneg(const Register& rd, const Register& rn,
Condition cond) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
DCHECK((cond != al) && (cond != nv));
cneg(rd, rn, cond);
@@ -449,7 +395,7 @@ void MacroAssembler::Cneg(const Register& rd,
// due to the truncation side-effect when used on W registers.
void MacroAssembler::CzeroX(const Register& rd,
Condition cond) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsSP() && rd.Is64Bits());
DCHECK((cond != al) && (cond != nv));
csel(rd, xzr, rd, cond);
@@ -461,7 +407,7 @@ void MacroAssembler::CzeroX(const Register& rd,
void MacroAssembler::CmovX(const Register& rd,
const Register& rn,
Condition cond) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsSP());
DCHECK(rd.Is64Bits() && rn.Is64Bits());
DCHECK((cond != al) && (cond != nv));
@@ -470,9 +416,8 @@ void MacroAssembler::CmovX(const Register& rd,
}
}
-
-void MacroAssembler::Cset(const Register& rd, Condition cond) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Cset(const Register& rd, Condition cond) {
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
DCHECK((cond != al) && (cond != nv));
cset(rd, cond);
@@ -480,18 +425,15 @@ void MacroAssembler::Cset(const Register& rd, Condition cond) {
void MacroAssembler::Csetm(const Register& rd, Condition cond) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
DCHECK((cond != al) && (cond != nv));
csetm(rd, cond);
}
-
-void MacroAssembler::Csinc(const Register& rd,
- const Register& rn,
- const Register& rm,
- Condition cond) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Csinc(const Register& rd, const Register& rn,
+ const Register& rm, Condition cond) {
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
DCHECK((cond != al) && (cond != nv));
csinc(rd, rn, rm, cond);
@@ -502,7 +444,7 @@ void MacroAssembler::Csinv(const Register& rd,
const Register& rn,
const Register& rm,
Condition cond) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
DCHECK((cond != al) && (cond != nv));
csinv(rd, rn, rm, cond);
@@ -513,7 +455,7 @@ void MacroAssembler::Csneg(const Register& rd,
const Register& rn,
const Register& rm,
Condition cond) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
DCHECK((cond != al) && (cond != nv));
csneg(rd, rn, rm, cond);
@@ -521,19 +463,18 @@ void MacroAssembler::Csneg(const Register& rd,
void MacroAssembler::Dmb(BarrierDomain domain, BarrierType type) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
dmb(domain, type);
}
void MacroAssembler::Dsb(BarrierDomain domain, BarrierType type) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
dsb(domain, type);
}
-
-void MacroAssembler::Debug(const char* message, uint32_t code, Instr params) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Debug(const char* message, uint32_t code, Instr params) {
+ DCHECK(allow_macro_instructions());
debug(message, code, params);
}
@@ -542,47 +483,39 @@ void MacroAssembler::Extr(const Register& rd,
const Register& rn,
const Register& rm,
unsigned lsb) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
extr(rd, rn, rm, lsb);
}
-
-void MacroAssembler::Fabs(const FPRegister& fd, const FPRegister& fn) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Fabs(const VRegister& fd, const VRegister& fn) {
+ DCHECK(allow_macro_instructions());
fabs(fd, fn);
}
-
-void MacroAssembler::Fadd(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Fadd(const VRegister& fd, const VRegister& fn,
+ const VRegister& fm) {
+ DCHECK(allow_macro_instructions());
fadd(fd, fn, fm);
}
-
-void MacroAssembler::Fccmp(const FPRegister& fn,
- const FPRegister& fm,
- StatusFlags nzcv,
- Condition cond) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Fccmp(const VRegister& fn, const VRegister& fm,
+ StatusFlags nzcv, Condition cond) {
+ DCHECK(allow_macro_instructions());
DCHECK((cond != al) && (cond != nv));
fccmp(fn, fm, nzcv, cond);
}
-
-void MacroAssembler::Fcmp(const FPRegister& fn, const FPRegister& fm) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Fcmp(const VRegister& fn, const VRegister& fm) {
+ DCHECK(allow_macro_instructions());
fcmp(fn, fm);
}
-
-void MacroAssembler::Fcmp(const FPRegister& fn, double value) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Fcmp(const VRegister& fn, double value) {
+ DCHECK(allow_macro_instructions());
if (value != 0.0) {
UseScratchRegisterScope temps(this);
- FPRegister tmp = temps.AcquireSameSizeAs(fn);
+ VRegister tmp = temps.AcquireSameSizeAs(fn);
Fmov(tmp, value);
fcmp(fn, tmp);
} else {
@@ -590,364 +523,281 @@ void MacroAssembler::Fcmp(const FPRegister& fn, double value) {
}
}
-
-void MacroAssembler::Fcsel(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- Condition cond) {
- DCHECK(allow_macro_instructions_);
+void MacroAssembler::Fcsel(const VRegister& fd, const VRegister& fn,
+ const VRegister& fm, Condition cond) {
+ DCHECK(allow_macro_instructions());
DCHECK((cond != al) && (cond != nv));
fcsel(fd, fn, fm, cond);
}
-
-void MacroAssembler::Fcvt(const FPRegister& fd, const FPRegister& fn) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Fcvt(const VRegister& fd, const VRegister& fn) {
+ DCHECK(allow_macro_instructions());
fcvt(fd, fn);
}
-
-void MacroAssembler::Fcvtas(const Register& rd, const FPRegister& fn) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Fcvtas(const Register& rd, const VRegister& fn) {
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
fcvtas(rd, fn);
}
-
-void MacroAssembler::Fcvtau(const Register& rd, const FPRegister& fn) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Fcvtau(const Register& rd, const VRegister& fn) {
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
fcvtau(rd, fn);
}
-
-void MacroAssembler::Fcvtms(const Register& rd, const FPRegister& fn) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Fcvtms(const Register& rd, const VRegister& fn) {
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
fcvtms(rd, fn);
}
-
-void MacroAssembler::Fcvtmu(const Register& rd, const FPRegister& fn) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Fcvtmu(const Register& rd, const VRegister& fn) {
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
fcvtmu(rd, fn);
}
-
-void MacroAssembler::Fcvtns(const Register& rd, const FPRegister& fn) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Fcvtns(const Register& rd, const VRegister& fn) {
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
fcvtns(rd, fn);
}
-
-void MacroAssembler::Fcvtnu(const Register& rd, const FPRegister& fn) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Fcvtnu(const Register& rd, const VRegister& fn) {
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
fcvtnu(rd, fn);
}
-
-void MacroAssembler::Fcvtzs(const Register& rd, const FPRegister& fn) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Fcvtzs(const Register& rd, const VRegister& fn) {
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
fcvtzs(rd, fn);
}
-void MacroAssembler::Fcvtzu(const Register& rd, const FPRegister& fn) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Fcvtzu(const Register& rd, const VRegister& fn) {
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
fcvtzu(rd, fn);
}
-
-void MacroAssembler::Fdiv(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Fdiv(const VRegister& fd, const VRegister& fn,
+ const VRegister& fm) {
+ DCHECK(allow_macro_instructions());
fdiv(fd, fn, fm);
}
-
-void MacroAssembler::Fmadd(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa) {
- DCHECK(allow_macro_instructions_);
+void MacroAssembler::Fmadd(const VRegister& fd, const VRegister& fn,
+ const VRegister& fm, const VRegister& fa) {
+ DCHECK(allow_macro_instructions());
fmadd(fd, fn, fm, fa);
}
-
-void MacroAssembler::Fmax(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Fmax(const VRegister& fd, const VRegister& fn,
+ const VRegister& fm) {
+ DCHECK(allow_macro_instructions());
fmax(fd, fn, fm);
}
-
-void MacroAssembler::Fmaxnm(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm) {
- DCHECK(allow_macro_instructions_);
+void MacroAssembler::Fmaxnm(const VRegister& fd, const VRegister& fn,
+ const VRegister& fm) {
+ DCHECK(allow_macro_instructions());
fmaxnm(fd, fn, fm);
}
-
-void MacroAssembler::Fmin(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Fmin(const VRegister& fd, const VRegister& fn,
+ const VRegister& fm) {
+ DCHECK(allow_macro_instructions());
fmin(fd, fn, fm);
}
-
-void MacroAssembler::Fminnm(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm) {
- DCHECK(allow_macro_instructions_);
+void MacroAssembler::Fminnm(const VRegister& fd, const VRegister& fn,
+ const VRegister& fm) {
+ DCHECK(allow_macro_instructions());
fminnm(fd, fn, fm);
}
-
-void MacroAssembler::Fmov(FPRegister fd, FPRegister fn) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Fmov(VRegister fd, VRegister fn) {
+ DCHECK(allow_macro_instructions());
// Only emit an instruction if fd and fn are different, and they are both D
// registers. fmov(s0, s0) is not a no-op because it clears the top word of
// d0. Technically, fmov(d0, d0) is not a no-op either because it clears the
- // top of q0, but FPRegister does not currently support Q registers.
+ // top of q0, but VRegister does not currently support Q registers.
if (!fd.Is(fn) || !fd.Is64Bits()) {
fmov(fd, fn);
}
}
-
-void MacroAssembler::Fmov(FPRegister fd, Register rn) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Fmov(VRegister fd, Register rn) {
+ DCHECK(allow_macro_instructions());
fmov(fd, rn);
}
+void TurboAssembler::Fmov(VRegister vd, double imm) {
+ DCHECK(allow_macro_instructions());
-void MacroAssembler::Fmov(FPRegister fd, double imm) {
- DCHECK(allow_macro_instructions_);
- if (fd.Is32Bits()) {
- Fmov(fd, static_cast<float>(imm));
+ if (vd.Is1S() || vd.Is2S() || vd.Is4S()) {
+ Fmov(vd, static_cast<float>(imm));
return;
}
- DCHECK(fd.Is64Bits());
+ DCHECK(vd.Is1D() || vd.Is2D());
if (IsImmFP64(imm)) {
- fmov(fd, imm);
- } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) {
- fmov(fd, xzr);
+ fmov(vd, imm);
} else {
- Ldr(fd, imm);
+ uint64_t bits = bit_cast<uint64_t>(imm);
+ if (vd.IsScalar()) {
+ if (bits == 0) {
+ fmov(vd, xzr);
+ } else {
+ Ldr(vd, imm);
+ }
+ } else {
+ // TODO(all): consider NEON support for load literal.
+ Movi(vd, bits);
+ }
}
}
-
-void MacroAssembler::Fmov(FPRegister fd, float imm) {
- DCHECK(allow_macro_instructions_);
- if (fd.Is64Bits()) {
- Fmov(fd, static_cast<double>(imm));
+void TurboAssembler::Fmov(VRegister vd, float imm) {
+ DCHECK(allow_macro_instructions());
+ if (vd.Is1D() || vd.Is2D()) {
+ Fmov(vd, static_cast<double>(imm));
return;
}
- DCHECK(fd.Is32Bits());
+ DCHECK(vd.Is1S() || vd.Is2S() || vd.Is4S());
if (IsImmFP32(imm)) {
- fmov(fd, imm);
- } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) {
- fmov(fd, wzr);
+ fmov(vd, imm);
} else {
- UseScratchRegisterScope temps(this);
- Register tmp = temps.AcquireW();
- // TODO(all): Use Assembler::ldr(const FPRegister& ft, float imm).
- Mov(tmp, float_to_rawbits(imm));
- Fmov(fd, tmp);
+ uint32_t bits = bit_cast<uint32_t>(imm);
+ if (vd.IsScalar()) {
+ if (bits == 0) {
+ fmov(vd, wzr);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireW();
+ // TODO(all): Use Assembler::ldr(const VRegister& ft, float imm).
+ Mov(tmp, bit_cast<uint32_t>(imm));
+ Fmov(vd, tmp);
+ }
+ } else {
+ // TODO(all): consider NEON support for load literal.
+ Movi(vd, bits);
+ }
}
}
-
-void MacroAssembler::Fmov(Register rd, FPRegister fn) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Fmov(Register rd, VRegister fn) {
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
fmov(rd, fn);
}
-
-void MacroAssembler::Fmsub(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa) {
- DCHECK(allow_macro_instructions_);
+void MacroAssembler::Fmsub(const VRegister& fd, const VRegister& fn,
+ const VRegister& fm, const VRegister& fa) {
+ DCHECK(allow_macro_instructions());
fmsub(fd, fn, fm, fa);
}
-
-void MacroAssembler::Fmul(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Fmul(const VRegister& fd, const VRegister& fn,
+ const VRegister& fm) {
+ DCHECK(allow_macro_instructions());
fmul(fd, fn, fm);
}
-
-void MacroAssembler::Fneg(const FPRegister& fd, const FPRegister& fn) {
- DCHECK(allow_macro_instructions_);
- fneg(fd, fn);
-}
-
-
-void MacroAssembler::Fnmadd(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa) {
- DCHECK(allow_macro_instructions_);
+void MacroAssembler::Fnmadd(const VRegister& fd, const VRegister& fn,
+ const VRegister& fm, const VRegister& fa) {
+ DCHECK(allow_macro_instructions());
fnmadd(fd, fn, fm, fa);
}
-
-void MacroAssembler::Fnmsub(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa) {
- DCHECK(allow_macro_instructions_);
+void MacroAssembler::Fnmsub(const VRegister& fd, const VRegister& fn,
+ const VRegister& fm, const VRegister& fa) {
+ DCHECK(allow_macro_instructions());
fnmsub(fd, fn, fm, fa);
}
-
-void MacroAssembler::Frinta(const FPRegister& fd, const FPRegister& fn) {
- DCHECK(allow_macro_instructions_);
- frinta(fd, fn);
-}
-
-
-void MacroAssembler::Frintm(const FPRegister& fd, const FPRegister& fn) {
- DCHECK(allow_macro_instructions_);
- frintm(fd, fn);
-}
-
-
-void MacroAssembler::Frintn(const FPRegister& fd, const FPRegister& fn) {
- DCHECK(allow_macro_instructions_);
- frintn(fd, fn);
-}
-
-
-void MacroAssembler::Frintp(const FPRegister& fd, const FPRegister& fn) {
- DCHECK(allow_macro_instructions_);
- frintp(fd, fn);
-}
-
-
-void MacroAssembler::Frintz(const FPRegister& fd, const FPRegister& fn) {
- DCHECK(allow_macro_instructions_);
- frintz(fd, fn);
-}
-
-
-void MacroAssembler::Fsqrt(const FPRegister& fd, const FPRegister& fn) {
- DCHECK(allow_macro_instructions_);
- fsqrt(fd, fn);
-}
-
-
-void MacroAssembler::Fsub(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Fsub(const VRegister& fd, const VRegister& fn,
+ const VRegister& fm) {
+ DCHECK(allow_macro_instructions());
fsub(fd, fn, fm);
}
void MacroAssembler::Hint(SystemHint code) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
hint(code);
}
void MacroAssembler::Hlt(int code) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
hlt(code);
}
void MacroAssembler::Isb() {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
isb();
}
-
-void MacroAssembler::Ldr(const CPURegister& rt, const Immediate& imm) {
- DCHECK(allow_macro_instructions_);
- ldr(rt, imm);
+void TurboAssembler::Ldr(const CPURegister& rt, const Operand& operand) {
+ DCHECK(allow_macro_instructions());
+ ldr(rt, operand);
}
-
-void MacroAssembler::Ldr(const CPURegister& rt, double imm) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Ldr(const CPURegister& rt, double imm) {
+ DCHECK(allow_macro_instructions());
DCHECK(rt.Is64Bits());
- ldr(rt, Immediate(double_to_rawbits(imm)));
+ ldr(rt, Immediate(bit_cast<uint64_t>(imm)));
}
-
-void MacroAssembler::Lsl(const Register& rd,
- const Register& rn,
+void TurboAssembler::Lsl(const Register& rd, const Register& rn,
unsigned shift) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
lsl(rd, rn, shift);
}
-
-void MacroAssembler::Lsl(const Register& rd,
- const Register& rn,
+void TurboAssembler::Lsl(const Register& rd, const Register& rn,
const Register& rm) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
lslv(rd, rn, rm);
}
-
-void MacroAssembler::Lsr(const Register& rd,
- const Register& rn,
+void TurboAssembler::Lsr(const Register& rd, const Register& rn,
unsigned shift) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
lsr(rd, rn, shift);
}
-
-void MacroAssembler::Lsr(const Register& rd,
- const Register& rn,
+void TurboAssembler::Lsr(const Register& rd, const Register& rn,
const Register& rm) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
lsrv(rd, rn, rm);
}
-
-void MacroAssembler::Madd(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Madd(const Register& rd, const Register& rn,
+ const Register& rm, const Register& ra) {
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
madd(rd, rn, rm, ra);
}
-
-void MacroAssembler::Mneg(const Register& rd,
- const Register& rn,
+void TurboAssembler::Mneg(const Register& rd, const Register& rn,
const Register& rm) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
mneg(rd, rn, rm);
}
-
-void MacroAssembler::Mov(const Register& rd, const Register& rn) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Mov(const Register& rd, const Register& rn) {
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
// Emit a register move only if the registers are distinct, or if they are
// not X registers. Note that mov(w0, w0) is not a no-op because it clears
@@ -959,53 +809,45 @@ void MacroAssembler::Mov(const Register& rd, const Register& rn) {
void MacroAssembler::Movk(const Register& rd, uint64_t imm, int shift) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
movk(rd, imm, shift);
}
-
-void MacroAssembler::Mrs(const Register& rt, SystemRegister sysreg) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Mrs(const Register& rt, SystemRegister sysreg) {
+ DCHECK(allow_macro_instructions());
DCHECK(!rt.IsZero());
mrs(rt, sysreg);
}
void MacroAssembler::Msr(SystemRegister sysreg, const Register& rt) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
msr(sysreg, rt);
}
-
-void MacroAssembler::Msub(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Msub(const Register& rd, const Register& rn,
+ const Register& rm, const Register& ra) {
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
msub(rd, rn, rm, ra);
}
-
-void MacroAssembler::Mul(const Register& rd,
- const Register& rn,
+void TurboAssembler::Mul(const Register& rd, const Register& rn,
const Register& rm) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
mul(rd, rn, rm);
}
-
-void MacroAssembler::Rbit(const Register& rd, const Register& rn) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Rbit(const Register& rd, const Register& rn) {
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
rbit(rd, rn);
}
-
-void MacroAssembler::Ret(const Register& xn) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Ret(const Register& xn) {
+ DCHECK(allow_macro_instructions());
DCHECK(!xn.IsZero());
ret(xn);
CheckVeneerPool(false, false);
@@ -1013,39 +855,33 @@ void MacroAssembler::Ret(const Register& xn) {
void MacroAssembler::Rev(const Register& rd, const Register& rn) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
rev(rd, rn);
}
-
-void MacroAssembler::Rev16(const Register& rd, const Register& rn) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Rev16(const Register& rd, const Register& rn) {
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
rev16(rd, rn);
}
-
-void MacroAssembler::Rev32(const Register& rd, const Register& rn) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Rev32(const Register& rd, const Register& rn) {
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
rev32(rd, rn);
}
-
-void MacroAssembler::Ror(const Register& rd,
- const Register& rs,
+void TurboAssembler::Ror(const Register& rd, const Register& rs,
unsigned shift) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
ror(rd, rs, shift);
}
-
-void MacroAssembler::Ror(const Register& rd,
- const Register& rn,
+void TurboAssembler::Ror(const Register& rd, const Register& rn,
const Register& rm) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
rorv(rd, rn, rm);
}
@@ -1055,34 +891,27 @@ void MacroAssembler::Sbfiz(const Register& rd,
const Register& rn,
unsigned lsb,
unsigned width) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
sbfiz(rd, rn, lsb, width);
}
-
-void MacroAssembler::Sbfx(const Register& rd,
- const Register& rn,
- unsigned lsb,
+void TurboAssembler::Sbfx(const Register& rd, const Register& rn, unsigned lsb,
unsigned width) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
sbfx(rd, rn, lsb, width);
}
-
-void MacroAssembler::Scvtf(const FPRegister& fd,
- const Register& rn,
+void TurboAssembler::Scvtf(const VRegister& fd, const Register& rn,
unsigned fbits) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
scvtf(fd, rn, fbits);
}
-
-void MacroAssembler::Sdiv(const Register& rd,
- const Register& rn,
+void TurboAssembler::Sdiv(const Register& rd, const Register& rn,
const Register& rm) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
sdiv(rd, rn, rm);
}
@@ -1092,7 +921,7 @@ void MacroAssembler::Smaddl(const Register& rd,
const Register& rn,
const Register& rm,
const Register& ra) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
smaddl(rd, rn, rm, ra);
}
@@ -1102,16 +931,14 @@ void MacroAssembler::Smsubl(const Register& rd,
const Register& rn,
const Register& rm,
const Register& ra) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
smsubl(rd, rn, rm, ra);
}
-
-void MacroAssembler::Smull(const Register& rd,
- const Register& rn,
+void TurboAssembler::Smull(const Register& rd, const Register& rn,
const Register& rm) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
smull(rd, rn, rm);
}
@@ -1120,73 +947,59 @@ void MacroAssembler::Smull(const Register& rd,
void MacroAssembler::Smulh(const Register& rd,
const Register& rn,
const Register& rm) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
smulh(rd, rn, rm);
}
-
-void MacroAssembler::Umull(const Register& rd, const Register& rn,
+void TurboAssembler::Umull(const Register& rd, const Register& rn,
const Register& rm) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
umaddl(rd, rn, rm, xzr);
}
-
-void MacroAssembler::Sxtb(const Register& rd, const Register& rn) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Sxtb(const Register& rd, const Register& rn) {
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
sxtb(rd, rn);
}
-
-void MacroAssembler::Sxth(const Register& rd, const Register& rn) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Sxth(const Register& rd, const Register& rn) {
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
sxth(rd, rn);
}
-
-void MacroAssembler::Sxtw(const Register& rd, const Register& rn) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Sxtw(const Register& rd, const Register& rn) {
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
sxtw(rd, rn);
}
-
-void MacroAssembler::Ubfiz(const Register& rd,
- const Register& rn,
- unsigned lsb,
+void TurboAssembler::Ubfiz(const Register& rd, const Register& rn, unsigned lsb,
unsigned width) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
ubfiz(rd, rn, lsb, width);
}
-
-void MacroAssembler::Ubfx(const Register& rd,
- const Register& rn,
- unsigned lsb,
+void TurboAssembler::Ubfx(const Register& rd, const Register& rn, unsigned lsb,
unsigned width) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
ubfx(rd, rn, lsb, width);
}
-
-void MacroAssembler::Ucvtf(const FPRegister& fd,
- const Register& rn,
+void TurboAssembler::Ucvtf(const VRegister& fd, const Register& rn,
unsigned fbits) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
ucvtf(fd, rn, fbits);
}
-
-void MacroAssembler::Udiv(const Register& rd,
- const Register& rn,
+void TurboAssembler::Udiv(const Register& rd, const Register& rn,
const Register& rm) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
udiv(rd, rn, rm);
}
@@ -1196,7 +1009,7 @@ void MacroAssembler::Umaddl(const Register& rd,
const Register& rn,
const Register& rm,
const Register& ra) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
umaddl(rd, rn, rm, ra);
}
@@ -1206,28 +1019,25 @@ void MacroAssembler::Umsubl(const Register& rd,
const Register& rn,
const Register& rm,
const Register& ra) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
umsubl(rd, rn, rm, ra);
}
-
-void MacroAssembler::Uxtb(const Register& rd, const Register& rn) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Uxtb(const Register& rd, const Register& rn) {
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
uxtb(rd, rn);
}
-
-void MacroAssembler::Uxth(const Register& rd, const Register& rn) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Uxth(const Register& rd, const Register& rn) {
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
uxth(rd, rn);
}
-
-void MacroAssembler::Uxtw(const Register& rd, const Register& rn) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Uxtw(const Register& rd, const Register& rn) {
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
uxtw(rd, rn);
}
@@ -1236,13 +1046,13 @@ void MacroAssembler::AlignAndSetCSPForFrame() {
int sp_alignment = ActivationFrameAlignment();
// AAPCS64 mandates at least 16-byte alignment.
DCHECK(sp_alignment >= 16);
- DCHECK(base::bits::IsPowerOfTwo32(sp_alignment));
+ DCHECK(base::bits::IsPowerOfTwo(sp_alignment));
Bic(csp, StackPointer(), sp_alignment - 1);
SetStackPointer(csp);
}
-void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
- DCHECK(!csp.Is(sp_));
+void TurboAssembler::BumpSystemStackPointer(const Operand& space) {
+ DCHECK(!csp.Is(StackPointer()));
if (!TmpList()->IsEmpty()) {
Sub(csp, StackPointer(), space);
} else {
@@ -1276,18 +1086,16 @@ void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
AssertStackConsistency();
}
-
-void MacroAssembler::SyncSystemStackPointer() {
+void TurboAssembler::SyncSystemStackPointer() {
DCHECK(emit_debug_code());
- DCHECK(!csp.Is(sp_));
+ DCHECK(!csp.Is(StackPointer()));
{ InstructionAccurateScope scope(this);
mov(csp, StackPointer());
}
AssertStackConsistency();
}
-
-void MacroAssembler::InitializeRootRegister() {
+void TurboAssembler::InitializeRootRegister() {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
Mov(root, Operand(roots_array_start));
@@ -1304,8 +1112,7 @@ void MacroAssembler::SmiTag(Register dst, Register src) {
void MacroAssembler::SmiTag(Register smi) { SmiTag(smi, smi); }
-
-void MacroAssembler::SmiUntag(Register dst, Register src) {
+void TurboAssembler::SmiUntag(Register dst, Register src) {
STATIC_ASSERT(kXRegSizeInBits ==
static_cast<unsigned>(kSmiShift + kSmiValueSize));
DCHECK(dst.Is64Bits() && src.Is64Bits());
@@ -1315,12 +1122,9 @@ void MacroAssembler::SmiUntag(Register dst, Register src) {
Asr(dst, src, kSmiShift);
}
+void TurboAssembler::SmiUntag(Register smi) { SmiUntag(smi, smi); }
-void MacroAssembler::SmiUntag(Register smi) { SmiUntag(smi, smi); }
-
-
-void MacroAssembler::SmiUntagToDouble(FPRegister dst,
- Register src,
+void MacroAssembler::SmiUntagToDouble(VRegister dst, Register src,
UntagMode mode) {
DCHECK(dst.Is64Bits() && src.Is64Bits());
if (FLAG_enable_slow_asserts && (mode == kNotSpeculativeUntag)) {
@@ -1329,9 +1133,7 @@ void MacroAssembler::SmiUntagToDouble(FPRegister dst,
Scvtf(dst, src, kSmiShift);
}
-
-void MacroAssembler::SmiUntagToFloat(FPRegister dst,
- Register src,
+void MacroAssembler::SmiUntagToFloat(VRegister dst, Register src,
UntagMode mode) {
DCHECK(dst.Is32Bits() && src.Is64Bits());
if (FLAG_enable_slow_asserts && (mode == kNotSpeculativeUntag)) {
@@ -1356,9 +1158,7 @@ void MacroAssembler::SmiTagAndPush(Register src1, Register src2) {
Push(src1.W(), wzr, src2.W(), wzr);
}
-
-void MacroAssembler::JumpIfSmi(Register value,
- Label* smi_label,
+void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
Label* not_smi_label) {
STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
// Check if the tag bit is set.
@@ -1442,7 +1242,7 @@ void MacroAssembler::ObjectUntag(Register untagged_obj, Register obj) {
Bic(untagged_obj, obj, kHeapObjectTag);
}
-void MacroAssembler::jmp(Label* L) { B(L); }
+void TurboAssembler::jmp(Label* L) { B(L); }
void MacroAssembler::IsObjectJSStringType(Register object,
Register type,
@@ -1463,17 +1263,29 @@ void MacroAssembler::IsObjectJSStringType(Register object,
}
}
-
-void MacroAssembler::Push(Handle<Object> handle) {
+void TurboAssembler::Push(Handle<HeapObject> handle) {
UseScratchRegisterScope temps(this);
Register tmp = temps.AcquireX();
Mov(tmp, Operand(handle));
Push(tmp);
}
-void MacroAssembler::Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
+void TurboAssembler::Push(Smi* smi) {
+ UseScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireX();
+ Mov(tmp, Operand(smi));
+ Push(tmp);
+}
+
+void MacroAssembler::PushObject(Handle<Object> handle) {
+ if (handle->IsHeapObject()) {
+ Push(Handle<HeapObject>::cast(handle));
+ } else {
+ Push(Smi::cast(*handle));
+ }
+}
-void MacroAssembler::Claim(int64_t count, uint64_t unit_size) {
+void TurboAssembler::Claim(int64_t count, uint64_t unit_size) {
DCHECK(count >= 0);
uint64_t size = count * unit_size;
@@ -1490,10 +1302,9 @@ void MacroAssembler::Claim(int64_t count, uint64_t unit_size) {
Sub(StackPointer(), StackPointer(), size);
}
-
-void MacroAssembler::Claim(const Register& count, uint64_t unit_size) {
+void TurboAssembler::Claim(const Register& count, uint64_t unit_size) {
if (unit_size == 0) return;
- DCHECK(base::bits::IsPowerOfTwo64(unit_size));
+ DCHECK(base::bits::IsPowerOfTwo(unit_size));
const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits);
const Operand size(count, LSL, shift);
@@ -1512,7 +1323,7 @@ void MacroAssembler::Claim(const Register& count, uint64_t unit_size) {
void MacroAssembler::ClaimBySMI(const Register& count_smi, uint64_t unit_size) {
- DCHECK(unit_size == 0 || base::bits::IsPowerOfTwo64(unit_size));
+ DCHECK(unit_size == 0 || base::bits::IsPowerOfTwo(unit_size));
const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift;
const Operand size(count_smi,
(shift >= 0) ? (LSL) : (LSR),
@@ -1529,8 +1340,7 @@ void MacroAssembler::ClaimBySMI(const Register& count_smi, uint64_t unit_size) {
Sub(StackPointer(), StackPointer(), size);
}
-
-void MacroAssembler::Drop(int64_t count, uint64_t unit_size) {
+void TurboAssembler::Drop(int64_t count, uint64_t unit_size) {
DCHECK(count >= 0);
uint64_t size = count * unit_size;
@@ -1550,10 +1360,9 @@ void MacroAssembler::Drop(int64_t count, uint64_t unit_size) {
}
}
-
-void MacroAssembler::Drop(const Register& count, uint64_t unit_size) {
+void TurboAssembler::Drop(const Register& count, uint64_t unit_size) {
if (unit_size == 0) return;
- DCHECK(base::bits::IsPowerOfTwo64(unit_size));
+ DCHECK(base::bits::IsPowerOfTwo(unit_size));
const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits);
const Operand size(count, LSL, shift);
@@ -1575,7 +1384,7 @@ void MacroAssembler::Drop(const Register& count, uint64_t unit_size) {
void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
- DCHECK(unit_size == 0 || base::bits::IsPowerOfTwo64(unit_size));
+ DCHECK(unit_size == 0 || base::bits::IsPowerOfTwo(unit_size));
const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift;
const Operand size(count_smi,
(shift >= 0) ? (LSL) : (LSR),
@@ -1613,8 +1422,7 @@ void MacroAssembler::CompareAndBranch(const Register& lhs,
}
}
-
-void MacroAssembler::TestAndBranchIfAnySet(const Register& reg,
+void TurboAssembler::TestAndBranchIfAnySet(const Register& reg,
const uint64_t bit_pattern,
Label* label) {
int bits = reg.SizeInBits();
@@ -1627,8 +1435,7 @@ void MacroAssembler::TestAndBranchIfAnySet(const Register& reg,
}
}
-
-void MacroAssembler::TestAndBranchIfAllClear(const Register& reg,
+void TurboAssembler::TestAndBranchIfAllClear(const Register& reg,
const uint64_t bit_pattern,
Label* label) {
int bits = reg.SizeInBits();
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index 2282c941ba..acecfb950c 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -27,38 +27,16 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* isolate, byte* buffer,
unsigned buffer_size,
CodeObjectRequired create_code_object)
- : Assembler(isolate, buffer, buffer_size),
- generating_stub_(false),
-#if DEBUG
- allow_macro_instructions_(true),
-#endif
- has_frame_(false),
- isolate_(isolate),
- use_real_aborts_(true),
- sp_(jssp),
- tmp_list_(DefaultTmpList()),
- fptmp_list_(DefaultFPTmpList()) {
- if (create_code_object == CodeObjectRequired::kYes) {
- code_object_ =
- Handle<Object>::New(isolate_->heap()->undefined_value(), isolate_);
- }
-}
-
-
-CPURegList MacroAssembler::DefaultTmpList() {
- return CPURegList(ip0, ip1);
-}
+ : TurboAssembler(isolate, buffer, buffer_size, create_code_object) {}
+CPURegList TurboAssembler::DefaultTmpList() { return CPURegList(ip0, ip1); }
-CPURegList MacroAssembler::DefaultFPTmpList() {
+CPURegList TurboAssembler::DefaultFPTmpList() {
return CPURegList(fp_scratch1, fp_scratch2);
}
-
-void MacroAssembler::LogicalMacro(const Register& rd,
- const Register& rn,
- const Operand& operand,
- LogicalOp op) {
+void TurboAssembler::LogicalMacro(const Register& rd, const Register& rn,
+ const Operand& operand, LogicalOp op) {
UseScratchRegisterScope temps(this);
if (operand.NeedsRelocation(this)) {
@@ -165,9 +143,8 @@ void MacroAssembler::LogicalMacro(const Register& rd,
}
}
-
-void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Mov(const Register& rd, uint64_t imm) {
+ DCHECK(allow_macro_instructions());
DCHECK(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
DCHECK(!rd.IsZero());
@@ -244,11 +221,9 @@ void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
}
}
-
-void MacroAssembler::Mov(const Register& rd,
- const Operand& operand,
+void TurboAssembler::Mov(const Register& rd, const Operand& operand,
DiscardMoveMode discard_mode) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
// Provide a swap register for instructions that need to write into the
@@ -257,7 +232,7 @@ void MacroAssembler::Mov(const Register& rd,
Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
if (operand.NeedsRelocation(this)) {
- Ldr(dst, operand.immediate());
+ Ldr(dst, operand);
} else if (operand.IsImmediate()) {
// Call the macro assembler for generic immediates.
@@ -300,9 +275,174 @@ void MacroAssembler::Mov(const Register& rd,
}
}
+void TurboAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) {
+ DCHECK(is_uint16(imm));
+ int byte1 = (imm & 0xff);
+ int byte2 = ((imm >> 8) & 0xff);
+ if (byte1 == byte2) {
+ movi(vd.Is64Bits() ? vd.V8B() : vd.V16B(), byte1);
+ } else if (byte1 == 0) {
+ movi(vd, byte2, LSL, 8);
+ } else if (byte2 == 0) {
+ movi(vd, byte1);
+ } else if (byte1 == 0xff) {
+ mvni(vd, ~byte2 & 0xff, LSL, 8);
+ } else if (byte2 == 0xff) {
+ mvni(vd, ~byte1 & 0xff);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireW();
+ movz(temp, imm);
+ dup(vd, temp);
+ }
+}
+
+void TurboAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) {
+ DCHECK(is_uint32(imm));
-void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
- DCHECK(allow_macro_instructions_);
+ uint8_t bytes[sizeof(imm)];
+ memcpy(bytes, &imm, sizeof(imm));
+
+ // All bytes are either 0x00 or 0xff.
+ {
+ bool all0orff = true;
+ for (int i = 0; i < 4; ++i) {
+ if ((bytes[i] != 0) && (bytes[i] != 0xff)) {
+ all0orff = false;
+ break;
+ }
+ }
+
+ if (all0orff == true) {
+ movi(vd.Is64Bits() ? vd.V1D() : vd.V2D(), ((imm << 32) | imm));
+ return;
+ }
+ }
+
+ // Of the 4 bytes, only one byte is non-zero.
+ for (int i = 0; i < 4; i++) {
+ if ((imm & (0xff << (i * 8))) == imm) {
+ movi(vd, bytes[i], LSL, i * 8);
+ return;
+ }
+ }
+
+ // Of the 4 bytes, only one byte is not 0xff.
+ for (int i = 0; i < 4; i++) {
+ uint32_t mask = ~(0xff << (i * 8));
+ if ((imm & mask) == mask) {
+ mvni(vd, ~bytes[i] & 0xff, LSL, i * 8);
+ return;
+ }
+ }
+
+ // Immediate is of the form 0x00MMFFFF.
+ if ((imm & 0xff00ffff) == 0x0000ffff) {
+ movi(vd, bytes[2], MSL, 16);
+ return;
+ }
+
+ // Immediate is of the form 0x0000MMFF.
+ if ((imm & 0xffff00ff) == 0x000000ff) {
+ movi(vd, bytes[1], MSL, 8);
+ return;
+ }
+
+ // Immediate is of the form 0xFFMM0000.
+ if ((imm & 0xff00ffff) == 0xff000000) {
+ mvni(vd, ~bytes[2] & 0xff, MSL, 16);
+ return;
+ }
+ // Immediate is of the form 0xFFFFMM00.
+ if ((imm & 0xffff00ff) == 0xffff0000) {
+ mvni(vd, ~bytes[1] & 0xff, MSL, 8);
+ return;
+ }
+
+ // Top and bottom 16-bits are equal.
+ if (((imm >> 16) & 0xffff) == (imm & 0xffff)) {
+ Movi16bitHelper(vd.Is64Bits() ? vd.V4H() : vd.V8H(), imm & 0xffff);
+ return;
+ }
+
+ // Default case.
+ {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireW();
+ Mov(temp, imm);
+ dup(vd, temp);
+ }
+}
+
+void TurboAssembler::Movi64bitHelper(const VRegister& vd, uint64_t imm) {
+ // All bytes are either 0x00 or 0xff.
+ {
+ bool all0orff = true;
+ for (int i = 0; i < 8; ++i) {
+ int byteval = (imm >> (i * 8)) & 0xff;
+ if (byteval != 0 && byteval != 0xff) {
+ all0orff = false;
+ break;
+ }
+ }
+ if (all0orff == true) {
+ movi(vd, imm);
+ return;
+ }
+ }
+
+ // Top and bottom 32-bits are equal.
+ if (((imm >> 32) & 0xffffffff) == (imm & 0xffffffff)) {
+ Movi32bitHelper(vd.Is64Bits() ? vd.V2S() : vd.V4S(), imm & 0xffffffff);
+ return;
+ }
+
+ // Default case.
+ {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ Mov(temp, imm);
+ if (vd.Is1D()) {
+ mov(vd.D(), 0, temp);
+ } else {
+ dup(vd.V2D(), temp);
+ }
+ }
+}
+
+void TurboAssembler::Movi(const VRegister& vd, uint64_t imm, Shift shift,
+ int shift_amount) {
+ DCHECK(allow_macro_instructions());
+ if (shift_amount != 0 || shift != LSL) {
+ movi(vd, imm, shift, shift_amount);
+ } else if (vd.Is8B() || vd.Is16B()) {
+ // 8-bit immediate.
+ DCHECK(is_uint8(imm));
+ movi(vd, imm);
+ } else if (vd.Is4H() || vd.Is8H()) {
+ // 16-bit immediate.
+ Movi16bitHelper(vd, imm);
+ } else if (vd.Is2S() || vd.Is4S()) {
+ // 32-bit immediate.
+ Movi32bitHelper(vd, imm);
+ } else {
+ // 64-bit immediate.
+ Movi64bitHelper(vd, imm);
+ }
+}
+
+void TurboAssembler::Movi(const VRegister& vd, uint64_t hi, uint64_t lo) {
+ // TODO(all): Move 128-bit values in a more efficient way.
+ DCHECK(vd.Is128Bits());
+ UseScratchRegisterScope temps(this);
+ Movi(vd.V2D(), lo);
+ Register temp = temps.AcquireX();
+ Mov(temp, hi);
+ Ins(vd.V2D(), 1, temp);
+}
+
+void TurboAssembler::Mvn(const Register& rd, const Operand& operand) {
+ DCHECK(allow_macro_instructions());
if (operand.NeedsRelocation(this)) {
Ldr(rd, operand.immediate());
@@ -324,8 +464,7 @@ void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
}
}
-
-unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
+unsigned TurboAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
DCHECK((reg_size % 8) == 0);
int count = 0;
for (unsigned i = 0; i < (reg_size / 16); i++) {
@@ -340,7 +479,7 @@ unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
// The movz instruction can generate immediates containing an arbitrary 16-bit
// half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
-bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
+bool TurboAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
DCHECK((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
}
@@ -348,15 +487,13 @@ bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
// The movn instruction can generate immediates containing an arbitrary 16-bit
// half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
-bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
+bool TurboAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
return IsImmMovz(~imm, reg_size);
}
-
-void MacroAssembler::ConditionalCompareMacro(const Register& rn,
+void TurboAssembler::ConditionalCompareMacro(const Register& rn,
const Operand& operand,
- StatusFlags nzcv,
- Condition cond,
+ StatusFlags nzcv, Condition cond,
ConditionalCompareOp op) {
DCHECK((cond != al) && (cond != nv));
if (operand.NeedsRelocation(this)) {
@@ -387,7 +524,7 @@ void MacroAssembler::Csel(const Register& rd,
const Register& rn,
const Operand& operand,
Condition cond) {
- DCHECK(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
DCHECK((cond != al) && (cond != nv));
if (operand.IsImmediate()) {
@@ -419,8 +556,7 @@ void MacroAssembler::Csel(const Register& rd,
}
}
-
-bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst,
+bool TurboAssembler::TryOneInstrMoveImmediate(const Register& dst,
int64_t imm) {
unsigned n, imm_s, imm_r;
int reg_size = dst.SizeInBits();
@@ -442,7 +578,7 @@ bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst,
return false;
}
-Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
+Operand TurboAssembler::MoveImmediateForShiftedOp(const Register& dst,
int64_t imm,
PreShiftImmMode mode) {
int reg_size = dst.SizeInBits();
@@ -485,11 +621,8 @@ Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
return Operand(dst);
}
-
-void MacroAssembler::AddSubMacro(const Register& rd,
- const Register& rn,
- const Operand& operand,
- FlagsUpdate S,
+void TurboAssembler::AddSubMacro(const Register& rd, const Register& rn,
+ const Operand& operand, FlagsUpdate S,
AddSubOp op) {
if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
!operand.NeedsRelocation(this) && (S == LeaveFlags)) {
@@ -534,11 +667,9 @@ void MacroAssembler::AddSubMacro(const Register& rd,
}
}
-
-void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
+void TurboAssembler::AddSubWithCarryMacro(const Register& rd,
const Register& rn,
- const Operand& operand,
- FlagsUpdate S,
+ const Operand& operand, FlagsUpdate S,
AddSubWithCarryOp op) {
DCHECK(rd.SizeInBits() == rn.SizeInBits());
UseScratchRegisterScope temps(this);
@@ -585,12 +716,10 @@ void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
}
}
-
-void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
- const MemOperand& addr,
- LoadStoreOp op) {
+void TurboAssembler::LoadStoreMacro(const CPURegister& rt,
+ const MemOperand& addr, LoadStoreOp op) {
int64_t offset = addr.offset();
- LSDataSize size = CalcLSDataSize(op);
+ unsigned size = CalcLSDataSize(op);
// Check if an immediate offset fits in the immediate field of the
// appropriate instruction. If not, emit two instructions to perform
@@ -617,7 +746,7 @@ void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
}
}
-void MacroAssembler::LoadStorePairMacro(const CPURegister& rt,
+void TurboAssembler::LoadStorePairMacro(const CPURegister& rt,
const CPURegister& rt2,
const MemOperand& addr,
LoadStorePairOp op) {
@@ -625,7 +754,7 @@ void MacroAssembler::LoadStorePairMacro(const CPURegister& rt,
DCHECK(!addr.IsRegisterOffset());
int64_t offset = addr.offset();
- LSDataSize size = CalcLSPairDataSize(op);
+ unsigned size = CalcLSPairDataSize(op);
// Check if the offset fits in the immediate field of the appropriate
// instruction. If not, emit two instructions to perform the operation.
@@ -695,9 +824,8 @@ void MacroAssembler::Store(const Register& rt,
}
}
-
-bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch(
- Label *label, ImmBranchType b_type) {
+bool TurboAssembler::NeedExtraInstructionsOrRegisterBranch(
+ Label* label, ImmBranchType b_type) {
bool need_longer_range = false;
// There are two situations in which we care about the offset being out of
// range:
@@ -721,9 +849,8 @@ bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch(
return need_longer_range;
}
-
-void MacroAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
+ DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
if (hint == kAdrNear) {
@@ -756,8 +883,7 @@ void MacroAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
}
}
-
-void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
+void TurboAssembler::B(Label* label, BranchType type, Register reg, int bit) {
DCHECK((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) &&
(bit == -1 || type >= kBranchTypeFirstUsingBit));
if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
@@ -776,9 +902,8 @@ void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
}
}
-
-void MacroAssembler::B(Label* label, Condition cond) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::B(Label* label, Condition cond) {
+ DCHECK(allow_macro_instructions());
DCHECK((cond != al) && (cond != nv));
Label done;
@@ -794,9 +919,8 @@ void MacroAssembler::B(Label* label, Condition cond) {
bind(&done);
}
-
-void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
+ DCHECK(allow_macro_instructions());
Label done;
bool need_extra_instructions =
@@ -811,9 +935,8 @@ void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
bind(&done);
}
-
-void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
+ DCHECK(allow_macro_instructions());
Label done;
bool need_extra_instructions =
@@ -828,9 +951,8 @@ void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
bind(&done);
}
-
-void MacroAssembler::Cbnz(const Register& rt, Label* label) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Cbnz(const Register& rt, Label* label) {
+ DCHECK(allow_macro_instructions());
Label done;
bool need_extra_instructions =
@@ -845,9 +967,8 @@ void MacroAssembler::Cbnz(const Register& rt, Label* label) {
bind(&done);
}
-
-void MacroAssembler::Cbz(const Register& rt, Label* label) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Cbz(const Register& rt, Label* label) {
+ DCHECK(allow_macro_instructions());
Label done;
bool need_extra_instructions =
@@ -865,11 +986,9 @@ void MacroAssembler::Cbz(const Register& rt, Label* label) {
// Pseudo-instructions.
-
-void MacroAssembler::Abs(const Register& rd, const Register& rm,
- Label* is_not_representable,
- Label* is_representable) {
- DCHECK(allow_macro_instructions_);
+void TurboAssembler::Abs(const Register& rd, const Register& rm,
+ Label* is_not_representable, Label* is_representable) {
+ DCHECK(allow_macro_instructions());
DCHECK(AreSameSizeAndType(rd, rm));
Cmp(rm, 1);
@@ -891,8 +1010,7 @@ void MacroAssembler::Abs(const Register& rd, const Register& rm,
// Abstracted stack operations.
-
-void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
+void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1,
const CPURegister& src2, const CPURegister& src3) {
DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
@@ -903,8 +1021,7 @@ void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
PushHelper(count, size, src0, src1, src2, src3);
}
-
-void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
+void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1,
const CPURegister& src2, const CPURegister& src3,
const CPURegister& src4, const CPURegister& src5,
const CPURegister& src6, const CPURegister& src7) {
@@ -918,8 +1035,7 @@ void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
PushHelper(count - 4, size, src4, src5, src6, src7);
}
-
-void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
+void TurboAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
const CPURegister& dst2, const CPURegister& dst3) {
// It is not valid to pop into the same register more than once in one
// instruction, not even into the zero register.
@@ -934,8 +1050,7 @@ void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
PopPostamble(count, size);
}
-
-void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
+void TurboAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
const CPURegister& dst2, const CPURegister& dst3,
const CPURegister& dst4, const CPURegister& dst5,
const CPURegister& dst6, const CPURegister& dst7) {
@@ -953,8 +1068,7 @@ void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
PopPostamble(count, size);
}
-
-void MacroAssembler::Push(const Register& src0, const FPRegister& src1) {
+void TurboAssembler::Push(const Register& src0, const VRegister& src1) {
int size = src0.SizeInBytes() + src1.SizeInBytes();
PushPreamble(size);
@@ -1016,8 +1130,7 @@ void MacroAssembler::PushPopQueue::PopQueued() {
queued_.clear();
}
-
-void MacroAssembler::PushCPURegList(CPURegList registers) {
+void TurboAssembler::PushCPURegList(CPURegList registers) {
int size = registers.RegisterSizeInBytes();
PushPreamble(registers.Count(), size);
@@ -1035,8 +1148,7 @@ void MacroAssembler::PushCPURegList(CPURegList registers) {
}
}
-
-void MacroAssembler::PopCPURegList(CPURegList registers) {
+void TurboAssembler::PopCPURegList(CPURegList registers) {
int size = registers.RegisterSizeInBytes();
// Pop up to four registers at a time because if the current stack pointer is
@@ -1138,9 +1250,7 @@ void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
}
}
-
-void MacroAssembler::PushHelper(int count, int size,
- const CPURegister& src0,
+void TurboAssembler::PushHelper(int count, int size, const CPURegister& src0,
const CPURegister& src1,
const CPURegister& src2,
const CPURegister& src3) {
@@ -1178,11 +1288,8 @@ void MacroAssembler::PushHelper(int count, int size,
}
}
-
-void MacroAssembler::PopHelper(int count, int size,
- const CPURegister& dst0,
- const CPURegister& dst1,
- const CPURegister& dst2,
+void TurboAssembler::PopHelper(int count, int size, const CPURegister& dst0,
+ const CPURegister& dst1, const CPURegister& dst2,
const CPURegister& dst3) {
// Ensure that we don't unintentially modify scratch or debug registers.
InstructionAccurateScope scope(this);
@@ -1219,8 +1326,7 @@ void MacroAssembler::PopHelper(int count, int size,
}
}
-
-void MacroAssembler::PushPreamble(Operand total_size) {
+void TurboAssembler::PushPreamble(Operand total_size) {
if (csp.Is(StackPointer())) {
// If the current stack pointer is csp, then it must be aligned to 16 bytes
// on entry and the total size of the specified registers must also be a
@@ -1239,8 +1345,7 @@ void MacroAssembler::PushPreamble(Operand total_size) {
}
}
-
-void MacroAssembler::PopPostamble(Operand total_size) {
+void TurboAssembler::PopPostamble(Operand total_size) {
if (csp.Is(StackPointer())) {
// If the current stack pointer is csp, then it must be aligned to 16 bytes
// on entry and the total size of the specified registers must also be a
@@ -1259,14 +1364,14 @@ void MacroAssembler::PopPostamble(Operand total_size) {
}
}
-void MacroAssembler::PushPreamble(int count, int size) {
+void TurboAssembler::PushPreamble(int count, int size) {
PushPreamble(count * size);
}
-void MacroAssembler::PopPostamble(int count, int size) {
+void TurboAssembler::PopPostamble(int count, int size) {
PopPostamble(count * size);
}
-void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
+void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) {
if (offset.IsImmediate()) {
DCHECK(offset.ImmediateValue() >= 0);
} else if (emit_debug_code()) {
@@ -1289,9 +1394,7 @@ void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
Ldr(dst, MemOperand(StackPointer(), offset));
}
-
-void MacroAssembler::PokePair(const CPURegister& src1,
- const CPURegister& src2,
+void TurboAssembler::PokePair(const CPURegister& src1, const CPURegister& src2,
int offset) {
DCHECK(AreSameSizeAndType(src1, src2));
DCHECK((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
@@ -1355,8 +1458,7 @@ void MacroAssembler::PopCalleeSavedRegisters() {
ldp(d14, d15, tos);
}
-
-void MacroAssembler::AssertStackConsistency() {
+void TurboAssembler::AssertStackConsistency() {
// Avoid emitting code when !use_real_abort() since non-real aborts cause too
// much code to be generated.
if (emit_debug_code() && use_real_aborts()) {
@@ -1388,7 +1490,7 @@ void MacroAssembler::AssertStackConsistency() {
}
}
-void MacroAssembler::AssertCspAligned() {
+void TurboAssembler::AssertCspAligned() {
if (emit_debug_code() && use_real_aborts()) {
// TODO(titzer): use a real assert for alignment check?
UseScratchRegisterScope scope(this);
@@ -1397,7 +1499,7 @@ void MacroAssembler::AssertCspAligned() {
}
}
-void MacroAssembler::AssertFPCRState(Register fpcr) {
+void TurboAssembler::AssertFPCRState(Register fpcr) {
if (emit_debug_code()) {
Label unexpected_mode, done;
UseScratchRegisterScope temps(this);
@@ -1421,9 +1523,8 @@ void MacroAssembler::AssertFPCRState(Register fpcr) {
}
}
-
-void MacroAssembler::CanonicalizeNaN(const FPRegister& dst,
- const FPRegister& src) {
+void TurboAssembler::CanonicalizeNaN(const VRegister& dst,
+ const VRegister& src) {
AssertFPCRState();
// Subtracting 0.0 preserves all inputs except for signalling NaNs, which
@@ -1432,8 +1533,7 @@ void MacroAssembler::CanonicalizeNaN(const FPRegister& dst,
Fsub(dst, src, fp_zero);
}
-
-void MacroAssembler::LoadRoot(CPURegister destination,
+void TurboAssembler::LoadRoot(CPURegister destination,
Heap::RootListIndex index) {
// TODO(jbramley): Most root values are constants, and can be synthesized
// without a load. Refer to the ARM back end for details.
@@ -1447,35 +1547,18 @@ void MacroAssembler::StoreRoot(Register source,
Str(source, MemOperand(root, index << kPointerSizeLog2));
}
-
-void MacroAssembler::LoadTrueFalseRoots(Register true_root,
- Register false_root) {
- STATIC_ASSERT((Heap::kTrueValueRootIndex + 1) == Heap::kFalseValueRootIndex);
- Ldp(true_root, false_root,
- MemOperand(root, Heap::kTrueValueRootIndex << kPointerSizeLog2));
-}
-
-
-void MacroAssembler::LoadHeapObject(Register result,
- Handle<HeapObject> object) {
- Mov(result, Operand(object));
-}
-
void MacroAssembler::LoadObject(Register result, Handle<Object> object) {
AllowDeferredHandleDereference heap_object_check;
if (object->IsHeapObject()) {
- LoadHeapObject(result, Handle<HeapObject>::cast(object));
+ Move(result, Handle<HeapObject>::cast(object));
} else {
- DCHECK(object->IsSmi());
- Mov(result, Operand(object));
+ Mov(result, Operand(Smi::cast(*object)));
}
}
-void MacroAssembler::Move(Register dst, Register src) { Mov(dst, src); }
-void MacroAssembler::Move(Register dst, Handle<Object> x) {
- LoadObject(dst, x);
-}
-void MacroAssembler::Move(Register dst, Smi* src) { Mov(dst, src); }
+void TurboAssembler::Move(Register dst, Register src) { Mov(dst, src); }
+void TurboAssembler::Move(Register dst, Handle<HeapObject> x) { Mov(dst, x); }
+void TurboAssembler::Move(Register dst, Smi* src) { Mov(dst, src); }
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
@@ -1496,12 +1579,6 @@ void MacroAssembler::EnumLengthUntagged(Register dst, Register map) {
}
-void MacroAssembler::EnumLengthSmi(Register dst, Register map) {
- EnumLengthUntagged(dst, map);
- SmiTag(dst, dst);
-}
-
-
void MacroAssembler::LoadAccessor(Register dst, Register holder,
int accessor_index,
AccessorComponent accessor) {
@@ -1570,51 +1647,6 @@ void MacroAssembler::CheckEnumCache(Register object, Register scratch0,
B(ne, &next);
}
-
-void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver,
- Register scratch1,
- Register scratch2,
- Label* no_memento_found) {
- Label map_check;
- Label top_check;
- ExternalReference new_space_allocation_top_adr =
- ExternalReference::new_space_allocation_top_address(isolate());
- const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
- const int kMementoLastWordOffset =
- kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
-
- // Bail out if the object is not in new space.
- JumpIfNotInNewSpace(receiver, no_memento_found);
- Add(scratch1, receiver, kMementoLastWordOffset);
- // If the object is in new space, we need to check whether it is on the same
- // page as the current top.
- Mov(scratch2, new_space_allocation_top_adr);
- Ldr(scratch2, MemOperand(scratch2));
- Eor(scratch2, scratch1, scratch2);
- Tst(scratch2, ~Page::kPageAlignmentMask);
- B(eq, &top_check);
- // The object is on a different page than allocation top. Bail out if the
- // object sits on the page boundary as no memento can follow and we cannot
- // touch the memory following it.
- Eor(scratch2, scratch1, receiver);
- Tst(scratch2, ~Page::kPageAlignmentMask);
- B(ne, no_memento_found);
- // Continue with the actual map check.
- jmp(&map_check);
- // If top is on the same page as the current object, we need to check whether
- // we are below top.
- bind(&top_check);
- Mov(scratch2, new_space_allocation_top_adr);
- Ldr(scratch2, MemOperand(scratch2));
- Cmp(scratch1, scratch2);
- B(ge, no_memento_found);
- // Memento map check.
- bind(&map_check);
- Ldr(scratch1, MemOperand(receiver, kMementoMapOffset));
- Cmp(scratch1, Operand(isolate()->factory()->allocation_memento_map()));
-}
-
-
void MacroAssembler::InNewSpace(Register object,
Condition cond,
Label* branch) {
@@ -1624,8 +1656,7 @@ void MacroAssembler::InNewSpace(Register object,
MemoryChunk::kIsInNewSpaceMask, cond, branch);
}
-
-void MacroAssembler::AssertSmi(Register object, BailoutReason reason) {
+void TurboAssembler::AssertSmi(Register object, BailoutReason reason) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
Tst(object, kSmiTagMask);
@@ -1642,6 +1673,17 @@ void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) {
}
}
+void MacroAssembler::AssertFixedArray(Register object) {
+ if (emit_debug_code()) {
+ AssertNotSmi(object, kOperandIsASmiAndNotAFixedArray);
+
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+
+ CompareObjectType(object, temp, temp, FIXED_ARRAY_TYPE);
+ Check(eq, kOperandIsNotAFixedArray);
+ }
+}
void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
@@ -1668,8 +1710,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
}
-void MacroAssembler::AssertGeneratorObject(Register object, Register flags) {
- // `flags` should be an untagged integer. See `SuspendFlags` in src/globals.h
+void MacroAssembler::AssertGeneratorObject(Register object) {
if (!emit_debug_code()) return;
AssertNotSmi(object, kOperandIsASmiAndNotAGeneratorObject);
@@ -1681,16 +1722,11 @@ void MacroAssembler::AssertGeneratorObject(Register object, Register flags) {
// Load instance type
Ldrb(temp, FieldMemOperand(temp, Map::kInstanceTypeOffset));
- Label async, do_check;
- STATIC_ASSERT(static_cast<int>(SuspendFlags::kGeneratorTypeMask) == 4);
- DCHECK(!temp.is(flags));
- B(&async, reg_bit_set, flags, 2);
-
+ Label do_check;
// Check if JSGeneratorObject
Cmp(temp, JS_GENERATOR_OBJECT_TYPE);
- jmp(&do_check);
+ B(eq, &do_check);
- bind(&async);
// Check if JSAsyncGeneratorObject
Cmp(temp, JS_ASYNC_GENERATOR_OBJECT_TYPE);
@@ -1712,8 +1748,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
}
}
-
-void MacroAssembler::AssertPositiveOrZero(Register value) {
+void TurboAssembler::AssertPositiveOrZero(Register value) {
if (emit_debug_code()) {
Label done;
int sign_bit = value.Is64Bits() ? kXSignBit : kWSignBit;
@@ -1723,16 +1758,42 @@ void MacroAssembler::AssertPositiveOrZero(Register value) {
}
}
-void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
+void TurboAssembler::CallStubDelayed(CodeStub* stub) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
+ BlockPoolsScope scope(this);
+#ifdef DEBUG
+ Label start_call;
+ Bind(&start_call);
+#endif
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ Ldr(temp, Operand::EmbeddedCode(stub));
+ Blr(temp);
+#ifdef DEBUG
+ AssertSizeOfCodeGeneratedSince(&start_call, kCallSizeWithRelocation);
+#endif
}
+void MacroAssembler::CallStub(CodeStub* stub) {
+ DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET);
+}
void MacroAssembler::TailCallStub(CodeStub* stub) {
Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
}
+void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
+ SaveFPRegsMode save_doubles) {
+ const Runtime::Function* f = Runtime::FunctionForId(fid);
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ Mov(x0, f->nargs);
+ Mov(x1, ExternalReference(f, isolate()));
+ CallStubDelayed(new (zone) CEntryStub(nullptr, 1, save_doubles));
+}
void MacroAssembler::CallRuntime(const Runtime::Function* f,
int num_arguments,
@@ -1783,7 +1844,7 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
JumpToExternalReference(ExternalReference(fid, isolate()));
}
-int MacroAssembler::ActivationFrameAlignment() {
+int TurboAssembler::ActivationFrameAlignment() {
#if V8_HOST_ARCH_ARM64
// Running on the real platform. Use the alignment as mandated by the local
// environment.
@@ -1799,14 +1860,12 @@ int MacroAssembler::ActivationFrameAlignment() {
#endif // V8_HOST_ARCH_ARM64
}
-
-void MacroAssembler::CallCFunction(ExternalReference function,
+void TurboAssembler::CallCFunction(ExternalReference function,
int num_of_reg_args) {
CallCFunction(function, num_of_reg_args, 0);
}
-
-void MacroAssembler::CallCFunction(ExternalReference function,
+void TurboAssembler::CallCFunction(ExternalReference function,
int num_of_reg_args,
int num_of_double_args) {
UseScratchRegisterScope temps(this);
@@ -1817,8 +1876,7 @@ void MacroAssembler::CallCFunction(ExternalReference function,
static const int kRegisterPassedArguments = 8;
-void MacroAssembler::CallCFunction(Register function,
- int num_of_reg_args,
+void TurboAssembler::CallCFunction(Register function, int num_of_reg_args,
int num_of_double_args) {
DCHECK_LE(num_of_reg_args + num_of_double_args, kMaxCParameters);
DCHECK(has_frame());
@@ -1903,13 +1961,9 @@ void MacroAssembler::CallCFunction(Register function,
}
}
+void TurboAssembler::Jump(Register target) { Br(target); }
-void MacroAssembler::Jump(Register target) {
- Br(target);
-}
-
-
-void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
+void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
Condition cond) {
if (cond == nv) return;
UseScratchRegisterScope temps(this);
@@ -1921,23 +1975,19 @@ void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
Bind(&done);
}
-
-void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
+void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(!RelocInfo::IsCodeTarget(rmode));
Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
}
-
-void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
+void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
- AllowDeferredHandleDereference embedding_raw_address;
- Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
+ Jump(reinterpret_cast<intptr_t>(code.address()), rmode, cond);
}
-
-void MacroAssembler::Call(Register target) {
+void TurboAssembler::Call(Register target) {
BlockPoolsScope scope(this);
#ifdef DEBUG
Label start_call;
@@ -1951,8 +2001,7 @@ void MacroAssembler::Call(Register target) {
#endif
}
-
-void MacroAssembler::Call(Label* target) {
+void TurboAssembler::Call(Label* target) {
BlockPoolsScope scope(this);
#ifdef DEBUG
Label start_call;
@@ -1966,10 +2015,9 @@ void MacroAssembler::Call(Label* target) {
#endif
}
-
-// MacroAssembler::CallSize is sensitive to changes in this function, as it
+// TurboAssembler::CallSize is sensitive to changes in this function, as it
// requires to know how many instructions are used to branch to the target.
-void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) {
+void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) {
BlockPoolsScope scope(this);
#ifdef DEBUG
Label start_call;
@@ -1999,43 +2047,31 @@ void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) {
#endif
}
-
-void MacroAssembler::Call(Handle<Code> code,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id) {
+void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
#ifdef DEBUG
Label start_call;
Bind(&start_call);
#endif
- if ((rmode == RelocInfo::CODE_TARGET) && (!ast_id.IsNone())) {
- SetRecordedAstId(ast_id);
- rmode = RelocInfo::CODE_TARGET_WITH_ID;
- }
-
- AllowDeferredHandleDereference embedding_raw_address;
- Call(reinterpret_cast<Address>(code.location()), rmode);
+ Call(code.address(), rmode);
#ifdef DEBUG
// Check the size of the code generated.
- AssertSizeOfCodeGeneratedSince(&start_call, CallSize(code, rmode, ast_id));
+ AssertSizeOfCodeGeneratedSince(&start_call, CallSize(code, rmode));
#endif
}
-
-int MacroAssembler::CallSize(Register target) {
+int TurboAssembler::CallSize(Register target) {
USE(target);
return kInstructionSize;
}
-
-int MacroAssembler::CallSize(Label* target) {
+int TurboAssembler::CallSize(Label* target) {
USE(target);
return kInstructionSize;
}
-
-int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
+int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
USE(target);
// Addresses always have 64 bits, so we shouldn't encounter NONE32.
@@ -2048,12 +2084,8 @@ int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
}
}
-
-int MacroAssembler::CallSize(Handle<Code> code,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id) {
+int TurboAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
USE(code);
- USE(ast_id);
// Addresses always have 64 bits, so we shouldn't encounter NONE32.
DCHECK(rmode != RelocInfo::NONE32);
@@ -2100,10 +2132,8 @@ void MacroAssembler::JumpIfNotHeapNumber(Register object,
JumpIfNotRoot(temp, Heap::kHeapNumberMapRootIndex, on_not_heap_number);
}
-
-void MacroAssembler::TryRepresentDoubleAsInt(Register as_int,
- FPRegister value,
- FPRegister scratch_d,
+void MacroAssembler::TryRepresentDoubleAsInt(Register as_int, VRegister value,
+ VRegister scratch_d,
Label* on_successful_conversion,
Label* on_failed_conversion) {
// Convert to an int and back again, then compare with the original value.
@@ -2119,101 +2149,6 @@ void MacroAssembler::TryRepresentDoubleAsInt(Register as_int,
}
}
-
-void MacroAssembler::TestForMinusZero(DoubleRegister input) {
- UseScratchRegisterScope temps(this);
- Register temp = temps.AcquireX();
- // Floating point -0.0 is kMinInt as an integer, so subtracting 1 (cmp) will
- // cause overflow.
- Fmov(temp, input);
- Cmp(temp, 1);
-}
-
-
-void MacroAssembler::JumpIfMinusZero(DoubleRegister input,
- Label* on_negative_zero) {
- TestForMinusZero(input);
- B(vs, on_negative_zero);
-}
-
-
-void MacroAssembler::JumpIfMinusZero(Register input,
- Label* on_negative_zero) {
- DCHECK(input.Is64Bits());
- // Floating point value is in an integer register. Detect -0.0 by subtracting
- // 1 (cmp), which will cause overflow.
- Cmp(input, 1);
- B(vs, on_negative_zero);
-}
-
-
-void MacroAssembler::ClampInt32ToUint8(Register output, Register input) {
- // Clamp the value to [0..255].
- Cmp(input.W(), Operand(input.W(), UXTB));
- // If input < input & 0xff, it must be < 0, so saturate to 0.
- Csel(output.W(), wzr, input.W(), lt);
- // If input <= input & 0xff, it must be <= 255. Otherwise, saturate to 255.
- Csel(output.W(), output.W(), 255, le);
-}
-
-
-void MacroAssembler::ClampInt32ToUint8(Register in_out) {
- ClampInt32ToUint8(in_out, in_out);
-}
-
-
-void MacroAssembler::ClampDoubleToUint8(Register output,
- DoubleRegister input,
- DoubleRegister dbl_scratch) {
- // This conversion follows the WebIDL "[Clamp]" rules for PIXEL types:
- // - Inputs lower than 0 (including -infinity) produce 0.
- // - Inputs higher than 255 (including +infinity) produce 255.
- // Also, it seems that PIXEL types use round-to-nearest rather than
- // round-towards-zero.
-
- // Squash +infinity before the conversion, since Fcvtnu will normally
- // convert it to 0.
- Fmov(dbl_scratch, 255);
- Fmin(dbl_scratch, dbl_scratch, input);
-
- // Convert double to unsigned integer. Values less than zero become zero.
- // Values greater than 255 have already been clamped to 255.
- Fcvtnu(output, dbl_scratch);
-}
-
-void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
- Register end_address,
- Register filler) {
- DCHECK(!current_address.Is(csp));
- UseScratchRegisterScope temps(this);
- Register distance_in_words = temps.AcquireX();
- Label done;
-
- // Calculate the distance. If it's <= zero then there's nothing to do.
- Subs(distance_in_words, end_address, current_address);
- B(le, &done);
-
- // There's at least one field to fill, so do this unconditionally.
- Str(filler, MemOperand(current_address));
-
- // If the distance_in_words consists of odd number of words we advance
- // start_address by one word, otherwise the pairs loop will ovwerite the
- // field that was stored above.
- And(distance_in_words, distance_in_words, kPointerSize);
- Add(current_address, current_address, distance_in_words);
-
- // Store filler to memory in pairs.
- Label loop, entry;
- B(&entry);
- Bind(&loop);
- Stp(filler, filler, MemOperand(current_address, 2 * kPointerSize, PostIndex));
- Bind(&entry);
- Cmp(current_address, end_address);
- B(lo, &loop);
-
- Bind(&done);
-}
-
void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
Register first, Register second, Register scratch1, Register scratch2,
Label* failure) {
@@ -2243,7 +2178,7 @@ void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register type,
B(ne, not_unique_name);
}
-void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
+void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
Register caller_args_count_reg,
Register scratch0, Register scratch1) {
#if DEBUG
@@ -2529,8 +2464,7 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
InvokeFunction(x1, expected, actual, flag, call_wrapper);
}
-
-void MacroAssembler::TryConvertDoubleToInt64(Register result,
+void TurboAssembler::TryConvertDoubleToInt64(Register result,
DoubleRegister double_input,
Label* done) {
// Try to convert with an FPU convert instruction. It's trivial to compute
@@ -2554,9 +2488,8 @@ void MacroAssembler::TryConvertDoubleToInt64(Register result,
B(vc, done);
}
-
-void MacroAssembler::TruncateDoubleToI(Register result,
- DoubleRegister double_input) {
+void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
+ DoubleRegister double_input) {
Label done;
// Try to convert the double to an int64. If successful, the bottom 32 bits
@@ -2577,13 +2510,11 @@ void MacroAssembler::TruncateDoubleToI(Register result,
// If we fell through then inline version didn't succeed - call stub instead.
Push(lr, double_input);
- DoubleToIStub stub(isolate(),
- jssp,
- result,
- 0,
- true, // is_truncating
- true); // skip_fastpath
- CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
+ auto stub = new (zone) DoubleToIStub(nullptr, jssp, result, 0,
+ true, // is_truncating
+ true); // skip_fastpath
+ // DoubleToIStub preserves any registers it needs to clobber.
+ CallStubDelayed(stub);
DCHECK_EQ(xzr.SizeInBytes(), double_input.SizeInBytes());
Pop(xzr, lr); // xzr to drop the double input on the stack.
@@ -2600,45 +2531,7 @@ void MacroAssembler::TruncateDoubleToI(Register result,
Uxtw(result.W(), result.W());
}
-
-void MacroAssembler::TruncateHeapNumberToI(Register result,
- Register object) {
- Label done;
- DCHECK(!result.is(object));
- DCHECK(jssp.Is(StackPointer()));
-
- Ldr(fp_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
-
- // Try to convert the double to an int64. If successful, the bottom 32 bits
- // contain our truncated int32 result.
- TryConvertDoubleToInt64(result, fp_scratch, &done);
-
- // If we fell through then inline version didn't succeed - call stub instead.
- Push(lr);
- DoubleToIStub stub(isolate(),
- object,
- result,
- HeapNumber::kValueOffset - kHeapObjectTag,
- true, // is_truncating
- true); // skip_fastpath
- CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
- Pop(lr);
-
- Bind(&done);
-}
-
-void MacroAssembler::StubPrologue(StackFrame::Type type, int frame_slots) {
- UseScratchRegisterScope temps(this);
- frame_slots -= TypedFrameConstants::kFixedSlotCountAboveFp;
- Register temp = temps.AcquireX();
- Mov(temp, StackFrame::TypeToMarker(type));
- Push(lr, fp);
- Mov(fp, StackPointer());
- Claim(frame_slots);
- str(temp, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
-}
-
-void MacroAssembler::Prologue(bool code_pre_aging) {
+void TurboAssembler::Prologue(bool code_pre_aging) {
if (code_pre_aging) {
Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
__ EmitCodeAgeSequence(stub);
@@ -2653,15 +2546,7 @@ void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
Ldr(vector, FieldMemOperand(vector, Cell::kValueOffset));
}
-
-void MacroAssembler::EnterFrame(StackFrame::Type type,
- bool load_constant_pool_pointer_reg) {
- // Out-of-line constant pool not implemented on arm64.
- UNREACHABLE();
-}
-
-
-void MacroAssembler::EnterFrame(StackFrame::Type type) {
+void TurboAssembler::EnterFrame(StackFrame::Type type) {
UseScratchRegisterScope temps(this);
Register type_reg = temps.AcquireX();
Register code_reg = temps.AcquireX();
@@ -2700,8 +2585,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
}
}
-
-void MacroAssembler::LeaveFrame(StackFrame::Type type) {
+void TurboAssembler::LeaveFrame(StackFrame::Type type) {
if (type == StackFrame::WASM_COMPILED) {
DCHECK(csp.Is(StackPointer()));
Mov(csp, fp);
@@ -2719,14 +2603,14 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
void MacroAssembler::ExitFramePreserveFPRegs() {
- PushCPURegList(kCallerSavedFP);
+ PushCPURegList(kCallerSavedV);
}
void MacroAssembler::ExitFrameRestoreFPRegs() {
// Read the registers from the stack without popping them. The stack pointer
// will be reset as part of the unwinding process.
- CPURegList saved_fp_regs = kCallerSavedFP;
+ CPURegList saved_fp_regs = kCallerSavedV;
DCHECK(saved_fp_regs.Count() % 2 == 0);
int offset = ExitFrameConstants::kLastExitFrameField;
@@ -2778,11 +2662,11 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
STATIC_ASSERT((-3 * kPointerSize) == ExitFrameConstants::kCodeOffset);
// Save the frame pointer and context pointer in the top frame.
- Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
+ Mov(scratch, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
isolate())));
Str(fp, MemOperand(scratch));
- Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
- isolate())));
+ Mov(scratch,
+ Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
Str(cp, MemOperand(scratch));
STATIC_ASSERT((-3 * kPointerSize) == ExitFrameConstants::kLastExitFrameField);
@@ -2838,19 +2722,19 @@ void MacroAssembler::LeaveExitFrame(bool restore_doubles,
// Restore the context pointer from the top frame.
if (restore_context) {
- Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
+ Mov(scratch, Operand(ExternalReference(IsolateAddressId::kContextAddress,
isolate())));
Ldr(cp, MemOperand(scratch));
}
if (emit_debug_code()) {
// Also emit debug code to clear the cp in the top frame.
- Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
+ Mov(scratch, Operand(ExternalReference(IsolateAddressId::kContextAddress,
isolate())));
Str(xzr, MemOperand(scratch));
}
// Clear the frame pointer from the top frame.
- Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
+ Mov(scratch, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
isolate())));
Str(xzr, MemOperand(scratch));
@@ -2865,16 +2749,6 @@ void MacroAssembler::LeaveExitFrame(bool restore_doubles,
}
-void MacroAssembler::SetCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
- if (FLAG_native_code_counters && counter->Enabled()) {
- Mov(scratch1, value);
- Mov(scratch2, ExternalReference(counter));
- Str(scratch1.W(), MemOperand(scratch2));
- }
-}
-
-
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK(value != 0);
@@ -2929,7 +2803,7 @@ void MacroAssembler::PushStackHandler() {
// (See JSEntryStub::GenerateBody().)
// Link the current handler as the next handler.
- Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
+ Mov(x11, ExternalReference(IsolateAddressId::kHandlerAddress, isolate()));
Ldr(x10, MemOperand(x11));
Push(x10);
@@ -2941,7 +2815,7 @@ void MacroAssembler::PushStackHandler() {
void MacroAssembler::PopStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
Pop(x10);
- Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
+ Mov(x11, ExternalReference(IsolateAddressId::kHandlerAddress, isolate()));
Drop(StackHandlerConstants::kSize - kXRegSize, kByteSizeInBytes);
Str(x10, MemOperand(x11));
}
@@ -2954,7 +2828,6 @@ void MacroAssembler::Allocate(int object_size,
Label* gc_required,
AllocationFlags flags) {
DCHECK(object_size <= kMaxRegularHeapObjectSize);
- DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -3018,10 +2891,7 @@ void MacroAssembler::Allocate(int object_size,
Ccmp(result_end, alloc_limit, NoFlag, cc);
B(hi, gc_required);
- if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
- // The top pointer is not updated for allocation folding dominators.
- Str(result_end, MemOperand(top_address));
- }
+ Str(result_end, MemOperand(top_address));
// Tag the object.
ObjectTag(result, result);
@@ -3100,83 +2970,9 @@ void MacroAssembler::Allocate(Register object_size, Register result,
Ccmp(result_end, alloc_limit, NoFlag, cc);
B(hi, gc_required);
- if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
- // The top pointer is not updated for allocation folding dominators.
- Str(result_end, MemOperand(top_address));
- }
-
- // Tag the object.
- ObjectTag(result, result);
-}
-
-void MacroAssembler::FastAllocate(int object_size, Register result,
- Register scratch1, Register scratch2,
- AllocationFlags flags) {
- DCHECK(object_size <= kMaxRegularHeapObjectSize);
-
- DCHECK(!AreAliased(result, scratch1, scratch2));
- DCHECK(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
-
- // Make object size into bytes.
- if ((flags & SIZE_IN_WORDS) != 0) {
- object_size *= kPointerSize;
- }
- DCHECK(0 == (object_size & kObjectAlignmentMask));
-
- ExternalReference heap_allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), flags);
-
- // Set up allocation top address and allocation limit registers.
- Register top_address = scratch1;
- Register result_end = scratch2;
- Mov(top_address, Operand(heap_allocation_top));
- Ldr(result, MemOperand(top_address));
-
- // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
- // the same alignment on ARM64.
- STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
-
- // Calculate new top and write it back.
- Adds(result_end, result, object_size);
- Str(result_end, MemOperand(top_address));
-
- ObjectTag(result, result);
-}
-
-void MacroAssembler::FastAllocate(Register object_size, Register result,
- Register result_end, Register scratch,
- AllocationFlags flags) {
- // |object_size| and |result_end| may overlap, other registers must not.
- DCHECK(!AreAliased(object_size, result, scratch));
- DCHECK(!AreAliased(result_end, result, scratch));
- DCHECK(object_size.Is64Bits() && result.Is64Bits() && scratch.Is64Bits() &&
- result_end.Is64Bits());
-
- ExternalReference heap_allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), flags);
-
- // Set up allocation top address and allocation limit registers.
- Register top_address = scratch;
- Mov(top_address, heap_allocation_top);
- Ldr(result, MemOperand(top_address));
-
- // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
- // the same alignment on ARM64.
- STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
-
- // Calculate new top and write it back.
- if ((flags & SIZE_IN_WORDS) != 0) {
- Adds(result_end, result, Operand(object_size, LSL, kPointerSizeLog2));
- } else {
- Adds(result_end, result, object_size);
- }
Str(result_end, MemOperand(top_address));
- if (emit_debug_code()) {
- Tst(result_end, kObjectAlignmentMask);
- Check(eq, kUnalignedAllocationInNewSpace);
- }
-
+ // Tag the object.
ObjectTag(result, result);
}
@@ -3205,7 +3001,7 @@ void MacroAssembler::AllocateHeapNumber(Register result,
if (!heap_number_map.IsValid()) {
// If we have a valid value register, use the same type of register to store
// the map so we can use STP to store both in one instruction.
- if (value.IsValid() && value.IsFPRegister()) {
+ if (value.IsValid() && value.IsVRegister()) {
heap_number_map = temps.AcquireD();
} else {
heap_number_map = scratch1;
@@ -3214,7 +3010,7 @@ void MacroAssembler::AllocateHeapNumber(Register result,
}
if (emit_debug_code()) {
Register map;
- if (heap_number_map.IsFPRegister()) {
+ if (heap_number_map.IsVRegister()) {
map = scratch1;
Fmov(map, DoubleRegister(heap_number_map));
} else {
@@ -3265,7 +3061,7 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
Str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
- Str(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
+ Str(scratch1, FieldMemOperand(result, JSObject::kPropertiesOrHashOffset));
Str(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
Str(value, FieldMemOperand(result, JSValue::kValueOffset));
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
@@ -3373,16 +3169,6 @@ void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
JumpIfSmi(value, miss);
}
-
-void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) {
- UseScratchRegisterScope temps(this);
- Register temp = temps.AcquireX();
- Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
- Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
- Tst(temp, mask);
-}
-
-
void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
// Load the map's "bit field 2".
__ Ldrb(result, FieldMemOperand(map, Map::kBitField2Offset));
@@ -3477,43 +3263,10 @@ void MacroAssembler::TestAndSplit(const Register& reg,
}
}
-bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
- return has_frame_ || !stub->SometimesSetsUpAFrame();
-}
-
-void MacroAssembler::EmitSeqStringSetCharCheck(
- Register string,
- Register index,
- SeqStringSetCharCheckIndexType index_type,
- Register scratch,
- uint32_t encoding_mask) {
- DCHECK(!AreAliased(string, index, scratch));
-
- if (index_type == kIndexIsSmi) {
- AssertSmi(index);
- }
-
- // Check that string is an object.
- AssertNotSmi(string, kNonObject);
-
- // Check that string has an appropriate map.
- Ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
- Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
-
- And(scratch, scratch, kStringRepresentationMask | kStringEncodingMask);
- Cmp(scratch, encoding_mask);
- Check(eq, kUnexpectedStringType);
-
- Ldr(scratch, FieldMemOperand(string, String::kLengthOffset));
- Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch));
- Check(lt, kIndexIsTooLarge);
-
- DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
- Cmp(index, 0);
- Check(ge, kIndexIsNegative);
+bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
+ return has_frame() || !stub->SometimesSetsUpAFrame();
}
-
// Compute the hash code from the untagged key. This must be kept in sync with
// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
// code-stub-hydrogen.cc
@@ -3672,22 +3425,6 @@ void MacroAssembler::PushSafepointRegisters() {
PushXRegList(kSafepointSavedRegisters);
}
-
-void MacroAssembler::PushSafepointRegistersAndDoubles() {
- PushSafepointRegisters();
- PushCPURegList(CPURegList(
- CPURegister::kFPRegister, kDRegSizeInBits,
- RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask()));
-}
-
-
-void MacroAssembler::PopSafepointRegistersAndDoubles() {
- PopCPURegList(CPURegList(
- CPURegister::kFPRegister, kDRegSizeInBits,
- RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask()));
- PopSafepointRegisters();
-}
-
void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
Poke(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
}
@@ -3722,7 +3459,6 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
} else {
// This register has no safepoint register slot.
UNREACHABLE();
- return -1;
}
}
@@ -3738,19 +3474,16 @@ void MacroAssembler::CheckPageFlag(const Register& object,
}
}
-void MacroAssembler::CheckPageFlagSet(const Register& object,
- const Register& scratch,
- int mask,
+void TurboAssembler::CheckPageFlagSet(const Register& object,
+ const Register& scratch, int mask,
Label* if_any_set) {
And(scratch, object, ~Page::kPageAlignmentMask);
Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
TestAndBranchIfAnySet(scratch, mask, if_any_set);
}
-
-void MacroAssembler::CheckPageFlagClear(const Register& object,
- const Register& scratch,
- int mask,
+void TurboAssembler::CheckPageFlagClear(const Register& object,
+ const Register& scratch, int mask,
Label* if_all_clear) {
And(scratch, object, ~Page::kPageAlignmentMask);
Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
@@ -4068,22 +3801,12 @@ void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
Tbz(load_scratch, 0, value_is_white);
}
-
-void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
+void TurboAssembler::Assert(Condition cond, BailoutReason reason) {
if (emit_debug_code()) {
Check(cond, reason);
}
}
-
-
-void MacroAssembler::AssertRegisterIsClear(Register reg, BailoutReason reason) {
- if (emit_debug_code()) {
- CheckRegisterIsClear(reg, reason);
- }
-}
-
-
void MacroAssembler::AssertRegisterIsRoot(Register reg,
Heap::RootListIndex index,
BailoutReason reason) {
@@ -4093,23 +3816,7 @@ void MacroAssembler::AssertRegisterIsRoot(Register reg,
}
}
-
-
-void MacroAssembler::AssertIsString(const Register& object) {
- if (emit_debug_code()) {
- UseScratchRegisterScope temps(this);
- Register temp = temps.AcquireX();
- STATIC_ASSERT(kSmiTag == 0);
- Tst(object, kSmiTagMask);
- Check(ne, kOperandIsNotAString);
- Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
- CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
- Check(lo, kOperandIsNotAString);
- }
-}
-
-
-void MacroAssembler::Check(Condition cond, BailoutReason reason) {
+void TurboAssembler::Check(Condition cond, BailoutReason reason) {
Label ok;
B(cond, &ok);
Abort(reason);
@@ -4117,17 +3824,7 @@ void MacroAssembler::Check(Condition cond, BailoutReason reason) {
Bind(&ok);
}
-
-void MacroAssembler::CheckRegisterIsClear(Register reg, BailoutReason reason) {
- Label ok;
- Cbz(reg, &ok);
- Abort(reason);
- // Will not return here.
- Bind(&ok);
-}
-
-
-void MacroAssembler::Abort(BailoutReason reason) {
+void TurboAssembler::Abort(BailoutReason reason) {
#ifdef DEBUG
RecordComment("Abort message: ");
RecordComment(GetBailoutReason(reason));
@@ -4154,9 +3851,6 @@ void MacroAssembler::Abort(BailoutReason reason) {
// Avoid infinite recursion; Push contains some assertions that use Abort.
NoUseRealAbortsScope no_real_aborts(this);
- // Check if Abort() has already been initialized.
- DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
-
Move(x1, Smi::FromInt(static_cast<int>(reason)));
if (!has_frame_) {
@@ -4235,7 +3929,7 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
static const CPURegList kPCSVarargs =
CPURegList(CPURegister::kRegister, kXRegSizeInBits, 1, arg_count);
static const CPURegList kPCSVarargsFP =
- CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, arg_count - 1);
+ CPURegList(CPURegister::kVRegister, kDRegSizeInBits, 0, arg_count - 1);
// We can use caller-saved registers as scratch values, except for the
// arguments and the PCS registers where they might need to go.
@@ -4244,7 +3938,7 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
tmp_list.Remove(kPCSVarargs);
tmp_list.Remove(arg0, arg1, arg2, arg3);
- CPURegList fp_tmp_list = kCallerSavedFP;
+ CPURegList fp_tmp_list = kCallerSavedV;
fp_tmp_list.Remove(kPCSVarargsFP);
fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
@@ -4269,7 +3963,7 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
// We might only need a W register here. We need to know the size of the
// argument so we can properly encode it for the simulator call.
if (args[i].Is32Bits()) pcs[i] = pcs[i].W();
- } else if (args[i].IsFPRegister()) {
+ } else if (args[i].IsVRegister()) {
// In C, floats are always cast to doubles for varargs calls.
pcs[i] = pcs_varargs_fp.PopLowestIndex().D();
} else {
@@ -4291,8 +3985,8 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
Mov(new_arg, old_arg);
args[i] = new_arg;
} else {
- FPRegister old_arg = FPRegister(args[i]);
- FPRegister new_arg = temps.AcquireSameSizeAs(old_arg);
+ VRegister old_arg = VRegister(args[i]);
+ VRegister new_arg = temps.AcquireSameSizeAs(old_arg);
Fmov(new_arg, old_arg);
args[i] = new_arg;
}
@@ -4306,11 +4000,11 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
if (pcs[i].IsRegister()) {
Mov(Register(pcs[i]), Register(args[i]), kDiscardForSameWReg);
} else {
- DCHECK(pcs[i].IsFPRegister());
+ DCHECK(pcs[i].IsVRegister());
if (pcs[i].SizeInBytes() == args[i].SizeInBytes()) {
- Fmov(FPRegister(pcs[i]), FPRegister(args[i]));
+ Fmov(VRegister(pcs[i]), VRegister(args[i]));
} else {
- Fcvt(FPRegister(pcs[i]), FPRegister(args[i]));
+ Fcvt(VRegister(pcs[i]), VRegister(args[i]));
}
}
}
@@ -4343,11 +4037,10 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
CallPrintf(arg_count, pcs);
}
-
-void MacroAssembler::CallPrintf(int arg_count, const CPURegister * args) {
- // A call to printf needs special handling for the simulator, since the system
- // printf function will use a different instruction set and the procedure-call
- // standard will not be compatible.
+void TurboAssembler::CallPrintf(int arg_count, const CPURegister* args) {
+// A call to printf needs special handling for the simulator, since the system
+// printf function will use a different instruction set and the procedure-call
+// standard will not be compatible.
#ifdef USE_SIMULATOR
{ InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize);
hlt(kImmExceptionIsPrintf);
@@ -4398,11 +4091,11 @@ void MacroAssembler::Printf(const char * format,
// If csp is the stack pointer, PushCPURegList asserts that the size of each
// list is a multiple of 16 bytes.
PushCPURegList(kCallerSaved);
- PushCPURegList(kCallerSavedFP);
+ PushCPURegList(kCallerSavedV);
// We can use caller-saved registers as scratch values (except for argN).
CPURegList tmp_list = kCallerSaved;
- CPURegList fp_tmp_list = kCallerSavedFP;
+ CPURegList fp_tmp_list = kCallerSavedV;
tmp_list.Remove(arg0, arg1, arg2, arg3);
fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
TmpList()->set_list(tmp_list.list());
@@ -4421,7 +4114,7 @@ void MacroAssembler::Printf(const char * format,
// to PrintfNoPreserve as an argument.
Register arg_sp = temps.AcquireX();
Add(arg_sp, StackPointer(),
- kCallerSaved.TotalSizeInBytes() + kCallerSavedFP.TotalSizeInBytes());
+ kCallerSaved.TotalSizeInBytes() + kCallerSavedV.TotalSizeInBytes());
if (arg0_sp) arg0 = Register::Create(arg_sp.code(), arg0.SizeInBits());
if (arg1_sp) arg1 = Register::Create(arg_sp.code(), arg1.SizeInBits());
if (arg2_sp) arg2 = Register::Create(arg_sp.code(), arg2.SizeInBits());
@@ -4445,15 +4138,14 @@ void MacroAssembler::Printf(const char * format,
}
}
- PopCPURegList(kCallerSavedFP);
+ PopCPURegList(kCallerSavedV);
PopCPURegList(kCallerSaved);
TmpList()->set_list(old_tmp_list);
FPTmpList()->set_list(old_fp_tmp_list);
}
-
-void MacroAssembler::EmitFrameSetupForCodeAgePatching() {
+void TurboAssembler::EmitFrameSetupForCodeAgePatching() {
// TODO(jbramley): Other architectures use the internal memcpy to copy the
// sequence. If this is a performance bottleneck, we should consider caching
// the sequence and copying it in the same way.
@@ -4463,9 +4155,7 @@ void MacroAssembler::EmitFrameSetupForCodeAgePatching() {
EmitFrameSetupForCodeAgePatching(this);
}
-
-
-void MacroAssembler::EmitCodeAgeSequence(Code* stub) {
+void TurboAssembler::EmitCodeAgeSequence(Code* stub) {
InstructionAccurateScope scope(this,
kNoCodeAgeSequenceLength / kInstructionSize);
DCHECK(jssp.Is(StackPointer()));
@@ -4476,8 +4166,7 @@ void MacroAssembler::EmitCodeAgeSequence(Code* stub) {
#undef __
#define __ assm->
-
-void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) {
+void TurboAssembler::EmitFrameSetupForCodeAgePatching(Assembler* assm) {
Label start;
__ bind(&start);
@@ -4494,9 +4183,7 @@ void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) {
__ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength);
}
-
-void MacroAssembler::EmitCodeAgeSequence(Assembler * assm,
- Code * stub) {
+void TurboAssembler::EmitCodeAgeSequence(Assembler* assm, Code* stub) {
Label start;
__ bind(&start);
// When the stub is called, the sequence is replaced with the young sequence
@@ -4526,25 +4213,6 @@ bool MacroAssembler::IsYoungSequence(Isolate* isolate, byte* sequence) {
return is_young;
}
-
-void MacroAssembler::TruncatingDiv(Register result,
- Register dividend,
- int32_t divisor) {
- DCHECK(!AreAliased(result, dividend));
- DCHECK(result.Is32Bits() && dividend.Is32Bits());
- base::MagicNumbersForDivision<uint32_t> mag =
- base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
- Mov(result, mag.multiplier);
- Smull(result.X(), dividend, result);
- Asr(result.X(), result.X(), 32);
- bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
- if (divisor > 0 && neg) Add(result, result, dividend);
- if (divisor < 0 && !neg && mag.multiplier > 0) Sub(result, result, dividend);
- if (mag.shift > 0) Asr(result, result, mag.shift);
- Add(result, result, Operand(dividend, LSR, 31));
-}
-
-
#undef __
@@ -4559,10 +4227,9 @@ Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) {
return Register::Create(code, reg.SizeInBits());
}
-
-FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) {
+VRegister UseScratchRegisterScope::AcquireSameSizeAs(const VRegister& reg) {
int code = AcquireNextAvailable(availablefp_).code();
- return FPRegister::Create(code, reg.SizeInBits());
+ return VRegister::Create(code, reg.SizeInBits());
}
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index 6c77dd5b01..12f7516f6b 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -52,15 +52,15 @@ namespace internal {
#define kRuntimeCallFunctionRegister x1
#define kRuntimeCallArgCountRegister x0
-#define LS_MACRO_LIST(V) \
- V(Ldrb, Register&, rt, LDRB_w) \
- V(Strb, Register&, rt, STRB_w) \
- V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \
- V(Ldrh, Register&, rt, LDRH_w) \
- V(Strh, Register&, rt, STRH_w) \
- V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \
- V(Ldr, CPURegister&, rt, LoadOpFor(rt)) \
- V(Str, CPURegister&, rt, StoreOpFor(rt)) \
+#define LS_MACRO_LIST(V) \
+ V(Ldrb, Register&, rt, LDRB_w) \
+ V(Strb, Register&, rt, STRB_w) \
+ V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \
+ V(Ldrh, Register&, rt, LDRH_w) \
+ V(Strh, Register&, rt, STRH_w) \
+ V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \
+ V(Ldr, CPURegister&, rt, LoadOpFor(rt)) \
+ V(Str, CPURegister&, rt, StoreOpFor(rt)) \
V(Ldrsw, Register&, rt, LDRSW_x)
#define LSPAIR_MACRO_LIST(V) \
@@ -177,429 +177,563 @@ enum PreShiftImmMode {
kAnyShift // Allow any pre-shift.
};
-class MacroAssembler : public Assembler {
+class TurboAssembler : public Assembler {
public:
- MacroAssembler(Isolate* isolate, byte* buffer, unsigned buffer_size,
- CodeObjectRequired create_code_object);
+ TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
+ CodeObjectRequired create_code_object)
+ : Assembler(isolate, buffer, buffer_size),
+ isolate_(isolate),
+#if DEBUG
+ allow_macro_instructions_(true),
+#endif
+ tmp_list_(DefaultTmpList()),
+ fptmp_list_(DefaultFPTmpList()),
+ sp_(jssp),
+ use_real_aborts_(true) {
+ if (create_code_object == CodeObjectRequired::kYes) {
+ code_object_ =
+ Handle<HeapObject>::New(isolate->heap()->undefined_value(), isolate);
+ }
+ }
+
+ // The Abort method should call a V8 runtime function, but the CallRuntime
+ // mechanism depends on CEntryStub. If use_real_aborts is false, Abort will
+ // use a simpler abort mechanism that doesn't depend on CEntryStub.
+ //
+ // The purpose of this is to allow Aborts to be compiled whilst CEntryStub is
+ // being generated.
+ bool use_real_aborts() const { return use_real_aborts_; }
+
+ class NoUseRealAbortsScope {
+ public:
+ explicit NoUseRealAbortsScope(TurboAssembler* tasm)
+ : saved_(tasm->use_real_aborts_), tasm_(tasm) {
+ tasm_->use_real_aborts_ = false;
+ }
+ ~NoUseRealAbortsScope() { tasm_->use_real_aborts_ = saved_; }
+
+ private:
+ bool saved_;
+ TurboAssembler* tasm_;
+ };
+
+ void set_has_frame(bool value) { has_frame_ = value; }
+ bool has_frame() const { return has_frame_; }
Isolate* isolate() const { return isolate_; }
- Handle<Object> CodeObject() {
+ Handle<HeapObject> CodeObject() {
DCHECK(!code_object_.is_null());
return code_object_;
}
- // Instruction set functions ------------------------------------------------
- // Logical macros.
- inline void And(const Register& rd,
- const Register& rn,
- const Operand& operand);
- inline void Ands(const Register& rd,
- const Register& rn,
- const Operand& operand);
- inline void Bic(const Register& rd,
- const Register& rn,
- const Operand& operand);
- inline void Bics(const Register& rd,
- const Register& rn,
- const Operand& operand);
- inline void Orr(const Register& rd,
- const Register& rn,
- const Operand& operand);
- inline void Orn(const Register& rd,
- const Register& rn,
- const Operand& operand);
- inline void Eor(const Register& rd,
- const Register& rn,
- const Operand& operand);
- inline void Eon(const Register& rd,
- const Register& rn,
- const Operand& operand);
- inline void Tst(const Register& rn, const Operand& operand);
- void LogicalMacro(const Register& rd,
- const Register& rn,
- const Operand& operand,
- LogicalOp op);
+#if DEBUG
+ void set_allow_macro_instructions(bool value) {
+ allow_macro_instructions_ = value;
+ }
+ bool allow_macro_instructions() const { return allow_macro_instructions_; }
+#endif
- // Add and sub macros.
- inline void Add(const Register& rd,
- const Register& rn,
- const Operand& operand);
- inline void Adds(const Register& rd,
- const Register& rn,
- const Operand& operand);
- inline void Sub(const Register& rd,
- const Register& rn,
- const Operand& operand);
- inline void Subs(const Register& rd,
- const Register& rn,
- const Operand& operand);
- inline void Cmn(const Register& rn, const Operand& operand);
- inline void Cmp(const Register& rn, const Operand& operand);
- inline void Neg(const Register& rd,
- const Operand& operand);
- inline void Negs(const Register& rd,
- const Operand& operand);
+ // Set the current stack pointer, but don't generate any code.
+ inline void SetStackPointer(const Register& stack_pointer) {
+ DCHECK(!TmpList()->IncludesAliasOf(stack_pointer));
+ sp_ = stack_pointer;
+ }
- void AddSubMacro(const Register& rd,
- const Register& rn,
- const Operand& operand,
- FlagsUpdate S,
- AddSubOp op);
+ // Activation support.
+ void EnterFrame(StackFrame::Type type);
+ void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) {
+ // Out-of-line constant pool not implemented on arm64.
+ UNREACHABLE();
+ }
+ void LeaveFrame(StackFrame::Type type);
- // Add/sub with carry macros.
- inline void Adc(const Register& rd,
- const Register& rn,
- const Operand& operand);
- inline void Adcs(const Register& rd,
- const Register& rn,
- const Operand& operand);
- inline void Sbc(const Register& rd,
- const Register& rn,
- const Operand& operand);
- inline void Sbcs(const Register& rd,
- const Register& rn,
- const Operand& operand);
- inline void Ngc(const Register& rd,
- const Operand& operand);
- inline void Ngcs(const Register& rd,
- const Operand& operand);
- void AddSubWithCarryMacro(const Register& rd,
- const Register& rn,
- const Operand& operand,
- FlagsUpdate S,
- AddSubWithCarryOp op);
+ inline void InitializeRootRegister();
- // Move macros.
- void Mov(const Register& rd,
- const Operand& operand,
+ void Mov(const Register& rd, const Operand& operand,
DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
void Mov(const Register& rd, uint64_t imm);
- inline void Mvn(const Register& rd, uint64_t imm);
- void Mvn(const Register& rd, const Operand& operand);
- static bool IsImmMovn(uint64_t imm, unsigned reg_size);
- static bool IsImmMovz(uint64_t imm, unsigned reg_size);
- static unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
+ inline void Mov(const Register& rd, const Register& rm);
+ void Mov(const VRegister& vd, int vd_index, const VRegister& vn,
+ int vn_index) {
+ DCHECK(allow_macro_instructions());
+ mov(vd, vd_index, vn, vn_index);
+ }
+ void Mov(const VRegister& vd, const VRegister& vn, int index) {
+ DCHECK(allow_macro_instructions());
+ mov(vd, vn, index);
+ }
+ void Mov(const VRegister& vd, int vd_index, const Register& rn) {
+ DCHECK(allow_macro_instructions());
+ mov(vd, vd_index, rn);
+ }
+ void Mov(const Register& rd, const VRegister& vn, int vn_index) {
+ DCHECK(allow_macro_instructions());
+ mov(rd, vn, vn_index);
+ }
- // Try to move an immediate into the destination register in a single
- // instruction. Returns true for success, and updates the contents of dst.
- // Returns false, otherwise.
- bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm);
+ // This is required for compatibility with architecture independent code.
+ // Remove if not needed.
+ void Move(Register dst, Register src);
+ void Move(Register dst, Handle<HeapObject> x);
+ void Move(Register dst, Smi* src);
- // Move an immediate into register dst, and return an Operand object for use
- // with a subsequent instruction that accepts a shift. The value moved into
- // dst is not necessarily equal to imm; it may have had a shifting operation
- // applied to it that will be subsequently undone by the shift applied in the
- // Operand.
- Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm,
- PreShiftImmMode mode);
+// NEON by element instructions.
+#define NEON_BYELEMENT_MACRO_LIST(V) \
+ V(fmla, Fmla) \
+ V(fmls, Fmls) \
+ V(fmul, Fmul) \
+ V(fmulx, Fmulx) \
+ V(mul, Mul) \
+ V(mla, Mla) \
+ V(mls, Mls) \
+ V(sqdmulh, Sqdmulh) \
+ V(sqrdmulh, Sqrdmulh) \
+ V(sqdmull, Sqdmull) \
+ V(sqdmull2, Sqdmull2) \
+ V(sqdmlal, Sqdmlal) \
+ V(sqdmlal2, Sqdmlal2) \
+ V(sqdmlsl, Sqdmlsl) \
+ V(sqdmlsl2, Sqdmlsl2) \
+ V(smull, Smull) \
+ V(smull2, Smull2) \
+ V(smlal, Smlal) \
+ V(smlal2, Smlal2) \
+ V(smlsl, Smlsl) \
+ V(smlsl2, Smlsl2) \
+ V(umull, Umull) \
+ V(umull2, Umull2) \
+ V(umlal, Umlal) \
+ V(umlal2, Umlal2) \
+ V(umlsl, Umlsl) \
+ V(umlsl2, Umlsl2)
+
+#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
+ void MASM(const VRegister& vd, const VRegister& vn, const VRegister& vm, \
+ int vm_index) { \
+ DCHECK(allow_macro_instructions()); \
+ ASM(vd, vn, vm, vm_index); \
+ }
+ NEON_BYELEMENT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
+#undef DEFINE_MACRO_ASM_FUNC
+
+// NEON 2 vector register instructions.
+#define NEON_2VREG_MACRO_LIST(V) \
+ V(abs, Abs) \
+ V(addp, Addp) \
+ V(addv, Addv) \
+ V(cls, Cls) \
+ V(clz, Clz) \
+ V(cnt, Cnt) \
+ V(faddp, Faddp) \
+ V(fcvtas, Fcvtas) \
+ V(fcvtau, Fcvtau) \
+ V(fcvtms, Fcvtms) \
+ V(fcvtmu, Fcvtmu) \
+ V(fcvtns, Fcvtns) \
+ V(fcvtnu, Fcvtnu) \
+ V(fcvtps, Fcvtps) \
+ V(fcvtpu, Fcvtpu) \
+ V(fmaxnmp, Fmaxnmp) \
+ V(fmaxnmv, Fmaxnmv) \
+ V(fmaxp, Fmaxp) \
+ V(fmaxv, Fmaxv) \
+ V(fminnmp, Fminnmp) \
+ V(fminnmv, Fminnmv) \
+ V(fminp, Fminp) \
+ V(fminv, Fminv) \
+ V(fneg, Fneg) \
+ V(frecpe, Frecpe) \
+ V(frecpx, Frecpx) \
+ V(frinta, Frinta) \
+ V(frinti, Frinti) \
+ V(frintm, Frintm) \
+ V(frintn, Frintn) \
+ V(frintp, Frintp) \
+ V(frintx, Frintx) \
+ V(frintz, Frintz) \
+ V(frsqrte, Frsqrte) \
+ V(fsqrt, Fsqrt) \
+ V(mov, Mov) \
+ V(mvn, Mvn) \
+ V(neg, Neg) \
+ V(not_, Not) \
+ V(rbit, Rbit) \
+ V(rev16, Rev16) \
+ V(rev32, Rev32) \
+ V(rev64, Rev64) \
+ V(sadalp, Sadalp) \
+ V(saddlp, Saddlp) \
+ V(saddlv, Saddlv) \
+ V(smaxv, Smaxv) \
+ V(sminv, Sminv) \
+ V(sqabs, Sqabs) \
+ V(sqneg, Sqneg) \
+ V(sqxtn2, Sqxtn2) \
+ V(sqxtn, Sqxtn) \
+ V(sqxtun2, Sqxtun2) \
+ V(sqxtun, Sqxtun) \
+ V(suqadd, Suqadd) \
+ V(sxtl2, Sxtl2) \
+ V(sxtl, Sxtl) \
+ V(uadalp, Uadalp) \
+ V(uaddlp, Uaddlp) \
+ V(uaddlv, Uaddlv) \
+ V(umaxv, Umaxv) \
+ V(uminv, Uminv) \
+ V(uqxtn2, Uqxtn2) \
+ V(uqxtn, Uqxtn) \
+ V(urecpe, Urecpe) \
+ V(ursqrte, Ursqrte) \
+ V(usqadd, Usqadd) \
+ V(uxtl2, Uxtl2) \
+ V(uxtl, Uxtl) \
+ V(xtn2, Xtn2) \
+ V(xtn, Xtn)
+
+#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
+ void MASM(const VRegister& vd, const VRegister& vn) { \
+ DCHECK(allow_macro_instructions()); \
+ ASM(vd, vn); \
+ }
+ NEON_2VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
+#undef DEFINE_MACRO_ASM_FUNC
+#undef NEON_2VREG_MACRO_LIST
+
+// NEON 2 vector register with immediate instructions.
+#define NEON_2VREG_FPIMM_MACRO_LIST(V) \
+ V(fcmeq, Fcmeq) \
+ V(fcmge, Fcmge) \
+ V(fcmgt, Fcmgt) \
+ V(fcmle, Fcmle) \
+ V(fcmlt, Fcmlt)
+
+#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
+ void MASM(const VRegister& vd, const VRegister& vn, double imm) { \
+ DCHECK(allow_macro_instructions()); \
+ ASM(vd, vn, imm); \
+ }
+ NEON_2VREG_FPIMM_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
+#undef DEFINE_MACRO_ASM_FUNC
+
+// NEON 3 vector register instructions.
+#define NEON_3VREG_MACRO_LIST(V) \
+ V(add, Add) \
+ V(addhn2, Addhn2) \
+ V(addhn, Addhn) \
+ V(addp, Addp) \
+ V(and_, And) \
+ V(bic, Bic) \
+ V(bif, Bif) \
+ V(bit, Bit) \
+ V(bsl, Bsl) \
+ V(cmeq, Cmeq) \
+ V(cmge, Cmge) \
+ V(cmgt, Cmgt) \
+ V(cmhi, Cmhi) \
+ V(cmhs, Cmhs) \
+ V(cmtst, Cmtst) \
+ V(eor, Eor) \
+ V(fabd, Fabd) \
+ V(facge, Facge) \
+ V(facgt, Facgt) \
+ V(faddp, Faddp) \
+ V(fcmeq, Fcmeq) \
+ V(fcmge, Fcmge) \
+ V(fcmgt, Fcmgt) \
+ V(fmaxnmp, Fmaxnmp) \
+ V(fmaxp, Fmaxp) \
+ V(fminnmp, Fminnmp) \
+ V(fminp, Fminp) \
+ V(fmla, Fmla) \
+ V(fmls, Fmls) \
+ V(fmulx, Fmulx) \
+ V(frecps, Frecps) \
+ V(frsqrts, Frsqrts) \
+ V(mla, Mla) \
+ V(mls, Mls) \
+ V(mul, Mul) \
+ V(orn, Orn) \
+ V(orr, Orr) \
+ V(pmull2, Pmull2) \
+ V(pmull, Pmull) \
+ V(pmul, Pmul) \
+ V(raddhn2, Raddhn2) \
+ V(raddhn, Raddhn) \
+ V(rsubhn2, Rsubhn2) \
+ V(rsubhn, Rsubhn) \
+ V(sabal2, Sabal2) \
+ V(sabal, Sabal) \
+ V(saba, Saba) \
+ V(sabdl2, Sabdl2) \
+ V(sabdl, Sabdl) \
+ V(sabd, Sabd) \
+ V(saddl2, Saddl2) \
+ V(saddl, Saddl) \
+ V(saddw2, Saddw2) \
+ V(saddw, Saddw) \
+ V(shadd, Shadd) \
+ V(shsub, Shsub) \
+ V(smaxp, Smaxp) \
+ V(smax, Smax) \
+ V(sminp, Sminp) \
+ V(smin, Smin) \
+ V(smlal2, Smlal2) \
+ V(smlal, Smlal) \
+ V(smlsl2, Smlsl2) \
+ V(smlsl, Smlsl) \
+ V(smull2, Smull2) \
+ V(smull, Smull) \
+ V(sqadd, Sqadd) \
+ V(sqdmlal2, Sqdmlal2) \
+ V(sqdmlal, Sqdmlal) \
+ V(sqdmlsl2, Sqdmlsl2) \
+ V(sqdmlsl, Sqdmlsl) \
+ V(sqdmulh, Sqdmulh) \
+ V(sqdmull2, Sqdmull2) \
+ V(sqdmull, Sqdmull) \
+ V(sqrdmulh, Sqrdmulh) \
+ V(sqrshl, Sqrshl) \
+ V(sqshl, Sqshl) \
+ V(sqsub, Sqsub) \
+ V(srhadd, Srhadd) \
+ V(srshl, Srshl) \
+ V(sshl, Sshl) \
+ V(ssubl2, Ssubl2) \
+ V(ssubl, Ssubl) \
+ V(ssubw2, Ssubw2) \
+ V(ssubw, Ssubw) \
+ V(subhn2, Subhn2) \
+ V(subhn, Subhn) \
+ V(sub, Sub) \
+ V(trn1, Trn1) \
+ V(trn2, Trn2) \
+ V(uabal2, Uabal2) \
+ V(uabal, Uabal) \
+ V(uaba, Uaba) \
+ V(uabdl2, Uabdl2) \
+ V(uabdl, Uabdl) \
+ V(uabd, Uabd) \
+ V(uaddl2, Uaddl2) \
+ V(uaddl, Uaddl) \
+ V(uaddw2, Uaddw2) \
+ V(uaddw, Uaddw) \
+ V(uhadd, Uhadd) \
+ V(uhsub, Uhsub) \
+ V(umaxp, Umaxp) \
+ V(umax, Umax) \
+ V(uminp, Uminp) \
+ V(umin, Umin) \
+ V(umlal2, Umlal2) \
+ V(umlal, Umlal) \
+ V(umlsl2, Umlsl2) \
+ V(umlsl, Umlsl) \
+ V(umull2, Umull2) \
+ V(umull, Umull) \
+ V(uqadd, Uqadd) \
+ V(uqrshl, Uqrshl) \
+ V(uqshl, Uqshl) \
+ V(uqsub, Uqsub) \
+ V(urhadd, Urhadd) \
+ V(urshl, Urshl) \
+ V(ushl, Ushl) \
+ V(usubl2, Usubl2) \
+ V(usubl, Usubl) \
+ V(usubw2, Usubw2) \
+ V(usubw, Usubw) \
+ V(uzp1, Uzp1) \
+ V(uzp2, Uzp2) \
+ V(zip1, Zip1) \
+ V(zip2, Zip2)
+
+#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
+ void MASM(const VRegister& vd, const VRegister& vn, const VRegister& vm) { \
+ DCHECK(allow_macro_instructions()); \
+ ASM(vd, vn, vm); \
+ }
+ NEON_3VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
+#undef DEFINE_MACRO_ASM_FUNC
- // Conditional macros.
- inline void Ccmp(const Register& rn,
- const Operand& operand,
- StatusFlags nzcv,
- Condition cond);
- inline void Ccmn(const Register& rn,
- const Operand& operand,
- StatusFlags nzcv,
- Condition cond);
- void ConditionalCompareMacro(const Register& rn,
- const Operand& operand,
- StatusFlags nzcv,
- Condition cond,
- ConditionalCompareOp op);
- void Csel(const Register& rd,
- const Register& rn,
- const Operand& operand,
- Condition cond);
+ void Bic(const VRegister& vd, const int imm8, const int left_shift = 0) {
+ DCHECK(allow_macro_instructions());
+ bic(vd, imm8, left_shift);
+ }
- // Load/store macros.
-#define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
- inline void FN(const REGTYPE REG, const MemOperand& addr);
- LS_MACRO_LIST(DECLARE_FUNCTION)
-#undef DECLARE_FUNCTION
+ // This is required for compatibility in architecture independent code.
+ inline void jmp(Label* L);
- void LoadStoreMacro(const CPURegister& rt,
- const MemOperand& addr,
- LoadStoreOp op);
+ void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1);
+ inline void B(Label* label);
+ inline void B(Condition cond, Label* label);
+ void B(Label* label, Condition cond);
-#define DECLARE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
- inline void FN(const REGTYPE REG, const REGTYPE REG2, const MemOperand& addr);
- LSPAIR_MACRO_LIST(DECLARE_FUNCTION)
-#undef DECLARE_FUNCTION
+ void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
+ void Tbz(const Register& rt, unsigned bit_pos, Label* label);
- void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2,
- const MemOperand& addr, LoadStorePairOp op);
+ void Cbnz(const Register& rt, Label* label);
+ void Cbz(const Register& rt, Label* label);
-// Load-acquire/store-release macros.
-#define DECLARE_FUNCTION(FN, OP) \
- inline void FN(const Register& rt, const Register& rn);
- LDA_STL_MACRO_LIST(DECLARE_FUNCTION)
-#undef DECLARE_FUNCTION
+ bool AllowThisStubCall(CodeStub* stub);
+ void CallStubDelayed(CodeStub* stub);
+ void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
-#define DECLARE_FUNCTION(FN, OP) \
- inline void FN(const Register& rs, const Register& rt, const Register& rn);
- STLX_MACRO_LIST(DECLARE_FUNCTION)
-#undef DECLARE_FUNCTION
+ // Removes current frame and its arguments from the stack preserving
+ // the arguments and a return address pushed to the stack for the next call.
+ // Both |callee_args_count| and |caller_args_count_reg| do not include
+ // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
+ // is trashed.
+ void PrepareForTailCall(const ParameterCount& callee_args_count,
+ Register caller_args_count_reg, Register scratch0,
+ Register scratch1);
- // V8-specific load/store helpers.
- void Load(const Register& rt, const MemOperand& addr, Representation r);
- void Store(const Register& rt, const MemOperand& addr, Representation r);
+ inline void SmiUntag(Register dst, Register src);
+ inline void SmiUntag(Register smi);
- enum AdrHint {
- // The target must be within the immediate range of adr.
- kAdrNear,
- // The target may be outside of the immediate range of adr. Additional
- // instructions may be emitted.
- kAdrFar
- };
- void Adr(const Register& rd, Label* label, AdrHint = kAdrNear);
+ // Calls Abort(msg) if the condition cond is not satisfied.
+ // Use --debug_code to enable.
+ void Assert(Condition cond, BailoutReason reason);
+
+ void AssertSmi(Register object, BailoutReason reason = kOperandIsNotASmi);
+
+ // Like Assert(), but always enabled.
+ void Check(Condition cond, BailoutReason reason);
+
+ inline void Debug(const char* message, uint32_t code, Instr params = BREAK);
+
+ // Print a message to stderr and abort execution.
+ void Abort(BailoutReason reason);
+
+ // If emit_debug_code() is true, emit a run-time check to ensure that
+ // StackPointer() does not point below the system stack pointer.
+ //
+ // Whilst it is architecturally legal for StackPointer() to point below csp,
+ // it can be evidence of a potential bug because the ABI forbids accesses
+ // below csp.
+ //
+ // If StackPointer() is the system stack pointer (csp), then csp will be
+ // dereferenced to cause the processor (or simulator) to abort if it is not
+ // properly aligned.
+ //
+ // If emit_debug_code() is false, this emits no code.
+ void AssertStackConsistency();
// Remaining instructions are simple pass-through calls to the assembler.
inline void Asr(const Register& rd, const Register& rn, unsigned shift);
inline void Asr(const Register& rd, const Register& rn, const Register& rm);
- // Branch type inversion relies on these relations.
- STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) &&
- (reg_bit_clear == (reg_bit_set ^ 1)) &&
- (always == (never ^ 1)));
-
- void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1);
+ // Try to move an immediate into the destination register in a single
+ // instruction. Returns true for success, and updates the contents of dst.
+ // Returns false, otherwise.
+ bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm);
- inline void B(Label* label);
- inline void B(Condition cond, Label* label);
- void B(Label* label, Condition cond);
- inline void Bfi(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width);
- inline void Bfxil(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width);
inline void Bind(Label* label);
- inline void Bl(Label* label);
- inline void Blr(const Register& xn);
- inline void Br(const Register& xn);
- inline void Brk(int code);
- void Cbnz(const Register& rt, Label* label);
- void Cbz(const Register& rt, Label* label);
- inline void Cinc(const Register& rd, const Register& rn, Condition cond);
- inline void Cinv(const Register& rd, const Register& rn, Condition cond);
- inline void Cls(const Register& rd, const Register& rn);
- inline void Clz(const Register& rd, const Register& rn);
- inline void Cneg(const Register& rd, const Register& rn, Condition cond);
- inline void CzeroX(const Register& rd, Condition cond);
- inline void CmovX(const Register& rd, const Register& rn, Condition cond);
- inline void Cset(const Register& rd, Condition cond);
- inline void Csetm(const Register& rd, Condition cond);
- inline void Csinc(const Register& rd,
- const Register& rn,
- const Register& rm,
- Condition cond);
- inline void Csinv(const Register& rd,
- const Register& rn,
- const Register& rm,
- Condition cond);
- inline void Csneg(const Register& rd,
- const Register& rn,
- const Register& rm,
- Condition cond);
- inline void Dmb(BarrierDomain domain, BarrierType type);
- inline void Dsb(BarrierDomain domain, BarrierType type);
- inline void Debug(const char* message, uint32_t code, Instr params = BREAK);
- inline void Extr(const Register& rd,
- const Register& rn,
- const Register& rm,
- unsigned lsb);
- inline void Fabs(const FPRegister& fd, const FPRegister& fn);
- inline void Fadd(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm);
- inline void Fccmp(const FPRegister& fn,
- const FPRegister& fm,
- StatusFlags nzcv,
- Condition cond);
- inline void Fcmp(const FPRegister& fn, const FPRegister& fm);
- inline void Fcmp(const FPRegister& fn, double value);
- inline void Fcsel(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- Condition cond);
- inline void Fcvt(const FPRegister& fd, const FPRegister& fn);
- inline void Fcvtas(const Register& rd, const FPRegister& fn);
- inline void Fcvtau(const Register& rd, const FPRegister& fn);
- inline void Fcvtms(const Register& rd, const FPRegister& fn);
- inline void Fcvtmu(const Register& rd, const FPRegister& fn);
- inline void Fcvtns(const Register& rd, const FPRegister& fn);
- inline void Fcvtnu(const Register& rd, const FPRegister& fn);
- inline void Fcvtzs(const Register& rd, const FPRegister& fn);
- inline void Fcvtzu(const Register& rd, const FPRegister& fn);
- inline void Fdiv(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm);
- inline void Fmadd(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa);
- inline void Fmax(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm);
- inline void Fmaxnm(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm);
- inline void Fmin(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm);
- inline void Fminnm(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm);
- inline void Fmov(FPRegister fd, FPRegister fn);
- inline void Fmov(FPRegister fd, Register rn);
- // Provide explicit double and float interfaces for FP immediate moves, rather
- // than relying on implicit C++ casts. This allows signalling NaNs to be
- // preserved when the immediate matches the format of fd. Most systems convert
- // signalling NaNs to quiet NaNs when converting between float and double.
- inline void Fmov(FPRegister fd, double imm);
- inline void Fmov(FPRegister fd, float imm);
- // Provide a template to allow other types to be converted automatically.
- template<typename T>
- void Fmov(FPRegister fd, T imm) {
- DCHECK(allow_macro_instructions_);
- Fmov(fd, static_cast<double>(imm));
+
+ static unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
+
+ CPURegList* TmpList() { return &tmp_list_; }
+ CPURegList* FPTmpList() { return &fptmp_list_; }
+
+ static CPURegList DefaultTmpList();
+ static CPURegList DefaultFPTmpList();
+
+ // Return the current stack pointer, as set by SetStackPointer.
+ inline const Register& StackPointer() const { return sp_; }
+
+ // Move macros.
+ inline void Mvn(const Register& rd, uint64_t imm);
+ void Mvn(const Register& rd, const Operand& operand);
+ static bool IsImmMovn(uint64_t imm, unsigned reg_size);
+ static bool IsImmMovz(uint64_t imm, unsigned reg_size);
+
+ void LogicalMacro(const Register& rd, const Register& rn,
+ const Operand& operand, LogicalOp op);
+ void AddSubMacro(const Register& rd, const Register& rn,
+ const Operand& operand, FlagsUpdate S, AddSubOp op);
+ inline void Orr(const Register& rd, const Register& rn,
+ const Operand& operand);
+ void Orr(const VRegister& vd, const int imm8, const int left_shift = 0) {
+ DCHECK(allow_macro_instructions());
+ orr(vd, imm8, left_shift);
}
- inline void Fmov(Register rd, FPRegister fn);
- inline void Fmsub(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa);
- inline void Fmul(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm);
- inline void Fneg(const FPRegister& fd, const FPRegister& fn);
- inline void Fnmadd(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa);
- inline void Fnmsub(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm,
- const FPRegister& fa);
- inline void Frinta(const FPRegister& fd, const FPRegister& fn);
- inline void Frintm(const FPRegister& fd, const FPRegister& fn);
- inline void Frintn(const FPRegister& fd, const FPRegister& fn);
- inline void Frintp(const FPRegister& fd, const FPRegister& fn);
- inline void Frintz(const FPRegister& fd, const FPRegister& fn);
- inline void Fsqrt(const FPRegister& fd, const FPRegister& fn);
- inline void Fsub(const FPRegister& fd,
- const FPRegister& fn,
- const FPRegister& fm);
- inline void Hint(SystemHint code);
- inline void Hlt(int code);
- inline void Isb();
- inline void Ldnp(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& src);
+ inline void Orn(const Register& rd, const Register& rn,
+ const Operand& operand);
+ inline void Eor(const Register& rd, const Register& rn,
+ const Operand& operand);
+ inline void Eon(const Register& rd, const Register& rn,
+ const Operand& operand);
+ inline void And(const Register& rd, const Register& rn,
+ const Operand& operand);
+ inline void Ands(const Register& rd, const Register& rn,
+ const Operand& operand);
+ inline void Tst(const Register& rn, const Operand& operand);
+ inline void Bic(const Register& rd, const Register& rn,
+ const Operand& operand);
+ inline void Blr(const Register& xn);
+ inline void Cmp(const Register& rn, const Operand& operand);
+ inline void Subs(const Register& rd, const Register& rn,
+ const Operand& operand);
+
+ // Emits a runtime assert that the CSP is aligned.
+ void AssertCspAligned();
+
// Load a literal from the inline constant pool.
- inline void Ldr(const CPURegister& rt, const Immediate& imm);
+ inline void Ldr(const CPURegister& rt, const Operand& imm);
// Helper function for double immediate.
inline void Ldr(const CPURegister& rt, double imm);
- inline void Lsl(const Register& rd, const Register& rn, unsigned shift);
- inline void Lsl(const Register& rd, const Register& rn, const Register& rm);
- inline void Lsr(const Register& rd, const Register& rn, unsigned shift);
- inline void Lsr(const Register& rd, const Register& rn, const Register& rm);
- inline void Madd(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra);
- inline void Mneg(const Register& rd, const Register& rn, const Register& rm);
- inline void Mov(const Register& rd, const Register& rm);
- inline void Movk(const Register& rd, uint64_t imm, int shift = -1);
- inline void Mrs(const Register& rt, SystemRegister sysreg);
- inline void Msr(SystemRegister sysreg, const Register& rt);
- inline void Msub(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra);
- inline void Mul(const Register& rd, const Register& rn, const Register& rm);
- inline void Nop() { nop(); }
- inline void Rbit(const Register& rd, const Register& rn);
- inline void Ret(const Register& xn = lr);
- inline void Rev(const Register& rd, const Register& rn);
- inline void Rev16(const Register& rd, const Register& rn);
- inline void Rev32(const Register& rd, const Register& rn);
- inline void Ror(const Register& rd, const Register& rs, unsigned shift);
- inline void Ror(const Register& rd, const Register& rn, const Register& rm);
- inline void Sbfiz(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width);
- inline void Sbfx(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width);
- inline void Scvtf(const FPRegister& fd,
- const Register& rn,
- unsigned fbits = 0);
- inline void Sdiv(const Register& rd, const Register& rn, const Register& rm);
- inline void Smaddl(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra);
- inline void Smsubl(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra);
- inline void Smull(const Register& rd,
- const Register& rn,
- const Register& rm);
- inline void Smulh(const Register& rd,
- const Register& rn,
- const Register& rm);
- inline void Umull(const Register& rd, const Register& rn, const Register& rm);
- inline void Stnp(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& dst);
- inline void Sxtb(const Register& rd, const Register& rn);
- inline void Sxth(const Register& rd, const Register& rn);
- inline void Sxtw(const Register& rd, const Register& rn);
- void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
- void Tbz(const Register& rt, unsigned bit_pos, Label* label);
- inline void Ubfiz(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width);
- inline void Ubfx(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width);
- inline void Ucvtf(const FPRegister& fd,
- const Register& rn,
- unsigned fbits = 0);
- inline void Udiv(const Register& rd, const Register& rn, const Register& rm);
- inline void Umaddl(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra);
- inline void Umsubl(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra);
- inline void Uxtb(const Register& rd, const Register& rn);
- inline void Uxth(const Register& rd, const Register& rn);
- inline void Uxtw(const Register& rd, const Register& rn);
- // Pseudo-instructions ------------------------------------------------------
+ // Claim or drop stack space without actually accessing memory.
+ //
+ // In debug mode, both of these will write invalid data into the claimed or
+ // dropped space.
+ //
+ // If the current stack pointer (according to StackPointer()) is csp, then it
+ // must be aligned to 16 bytes and the size claimed or dropped must be a
+ // multiple of 16 bytes.
+ //
+ // Note that unit_size must be specified in bytes. For variants which take a
+ // Register count, the unit size must be a power of two.
+ inline void Claim(int64_t count, uint64_t unit_size = kXRegSize);
+ inline void Claim(const Register& count, uint64_t unit_size = kXRegSize);
+ inline void Drop(int64_t count, uint64_t unit_size = kXRegSize);
+ inline void Drop(const Register& count, uint64_t unit_size = kXRegSize);
+
+ // Re-synchronizes the system stack pointer (csp) with the current stack
+ // pointer (according to StackPointer()).
+ //
+ // This method asserts that StackPointer() is not csp, since the call does
+ // not make sense in that context.
+ inline void SyncSystemStackPointer();
- // Compute rd = abs(rm).
- // This function clobbers the condition flags. On output the overflow flag is
- // set iff the negation overflowed.
+ // Push the system stack pointer (csp) down to allow the same to be done to
+ // the current stack pointer (according to StackPointer()). This must be
+ // called _before_ accessing the memory.
//
- // If rm is the minimum representable value, the result is not representable.
- // Handlers for each case can be specified using the relevant labels.
- void Abs(const Register& rd, const Register& rm,
- Label * is_not_representable = NULL,
- Label * is_representable = NULL);
+ // This is necessary when pushing or otherwise adding things to the stack, to
+ // satisfy the AAPCS64 constraint that the memory below the system stack
+ // pointer is not accessed. The amount pushed will be increased as necessary
+ // to ensure csp remains aligned to 16 bytes.
+ //
+ // This method asserts that StackPointer() is not csp, since the call does
+ // not make sense in that context.
+ inline void BumpSystemStackPointer(const Operand& space);
+
+ // Add and sub macros.
+ inline void Add(const Register& rd, const Register& rn,
+ const Operand& operand);
+ inline void Adds(const Register& rd, const Register& rn,
+ const Operand& operand);
+ inline void Sub(const Register& rd, const Register& rn,
+ const Operand& operand);
+
+ // Abort execution if argument is not a positive or zero integer, enabled via
+ // --debug-code.
+ void AssertPositiveOrZero(Register value);
+
+#define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
+ inline void FN(const REGTYPE REG, const MemOperand& addr);
+ LS_MACRO_LIST(DECLARE_FUNCTION)
+#undef DECLARE_FUNCTION
// Push or pop up to 4 registers of the same width to or from the stack,
// using the current stack pointer as set by SetStackPointer.
@@ -639,7 +773,15 @@ class MacroAssembler : public Assembler {
const CPURegister& dst2, const CPURegister& dst3,
const CPURegister& dst4, const CPURegister& dst5 = NoReg,
const CPURegister& dst6 = NoReg, const CPURegister& dst7 = NoReg);
- void Push(const Register& src0, const FPRegister& src1);
+ void Push(const Register& src0, const VRegister& src1);
+
+ // This is a convenience method for pushing a single Handle<Object>.
+ inline void Push(Handle<HeapObject> object);
+ inline void Push(Smi* smi);
+
+ // Aliases of Push and Pop, required for V8 compatibility.
+ inline void push(Register src) { Push(src); }
+ inline void pop(Register dst) { Pop(dst); }
// Alternative forms of Push and Pop, taking a RegList or CPURegList that
// specifies the registers that are to be pushed or popped. Higher-numbered
@@ -654,6 +796,774 @@ class MacroAssembler : public Assembler {
void PushCPURegList(CPURegList registers);
void PopCPURegList(CPURegList registers);
+ // Move an immediate into register dst, and return an Operand object for use
+ // with a subsequent instruction that accepts a shift. The value moved into
+ // dst is not necessarily equal to imm; it may have had a shifting operation
+ // applied to it that will be subsequently undone by the shift applied in the
+ // Operand.
+ Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm,
+ PreShiftImmMode mode);
+
+ void CheckPageFlagSet(const Register& object, const Register& scratch,
+ int mask, Label* if_any_set);
+
+ void CheckPageFlagClear(const Register& object, const Register& scratch,
+ int mask, Label* if_all_clear);
+
+ // Perform necessary maintenance operations before a push or after a pop.
+ //
+ // Note that size is specified in bytes.
+ void PushPreamble(Operand total_size);
+ void PopPostamble(Operand total_size);
+
+ void PushPreamble(int count, int size);
+ void PopPostamble(int count, int size);
+
+ // Test the bits of register defined by bit_pattern, and branch if ANY of
+ // those bits are set. May corrupt the status flags.
+ inline void TestAndBranchIfAnySet(const Register& reg,
+ const uint64_t bit_pattern, Label* label);
+
+ // Test the bits of register defined by bit_pattern, and branch if ALL of
+ // those bits are clear (ie. not set.) May corrupt the status flags.
+ inline void TestAndBranchIfAllClear(const Register& reg,
+ const uint64_t bit_pattern, Label* label);
+
+ inline void Brk(int code);
+
+ inline void JumpIfSmi(Register value, Label* smi_label,
+ Label* not_smi_label = NULL);
+
+ inline void Fmov(VRegister fd, VRegister fn);
+ inline void Fmov(VRegister fd, Register rn);
+ // Provide explicit double and float interfaces for FP immediate moves, rather
+ // than relying on implicit C++ casts. This allows signalling NaNs to be
+ // preserved when the immediate matches the format of fd. Most systems convert
+ // signalling NaNs to quiet NaNs when converting between float and double.
+ inline void Fmov(VRegister fd, double imm);
+ inline void Fmov(VRegister fd, float imm);
+ // Provide a template to allow other types to be converted automatically.
+ template <typename T>
+ void Fmov(VRegister fd, T imm) {
+ DCHECK(allow_macro_instructions());
+ Fmov(fd, static_cast<double>(imm));
+ }
+ inline void Fmov(Register rd, VRegister fn);
+
+ void Movi(const VRegister& vd, uint64_t imm, Shift shift = LSL,
+ int shift_amount = 0);
+ void Movi(const VRegister& vd, uint64_t hi, uint64_t lo);
+
+ void Jump(Register target);
+ void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
+ void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
+ void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
+
+ void Call(Register target);
+ void Call(Label* target);
+ void Call(Address target, RelocInfo::Mode rmode);
+ void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
+
+ // For every Call variant, there is a matching CallSize function that returns
+ // the size (in bytes) of the call sequence.
+ static int CallSize(Register target);
+ static int CallSize(Label* target);
+ static int CallSize(Address target, RelocInfo::Mode rmode);
+ static int CallSize(Handle<Code> code,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
+
+ // Calls a C function.
+ // The called function is not allowed to trigger a
+ // garbage collection, since that might move the code and invalidate the
+ // return address (unless this is somehow accounted for by the called
+ // function).
+ void CallCFunction(ExternalReference function, int num_reg_arguments);
+ void CallCFunction(ExternalReference function, int num_reg_arguments,
+ int num_double_arguments);
+ void CallCFunction(Register function, int num_reg_arguments,
+ int num_double_arguments);
+
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
+ // Exits with 'result' holding the answer.
+ void TruncateDoubleToIDelayed(Zone* zone, Register result,
+ DoubleRegister double_input);
+
+ inline void Mul(const Register& rd, const Register& rn, const Register& rm);
+
+ inline void Fcvtzs(const Register& rd, const VRegister& fn);
+ void Fcvtzs(const VRegister& vd, const VRegister& vn, int fbits = 0) {
+ DCHECK(allow_macro_instructions());
+ fcvtzs(vd, vn, fbits);
+ }
+
+ inline void Fcvtzu(const Register& rd, const VRegister& fn);
+ void Fcvtzu(const VRegister& vd, const VRegister& vn, int fbits = 0) {
+ DCHECK(allow_macro_instructions());
+ fcvtzu(vd, vn, fbits);
+ }
+
+ inline void Madd(const Register& rd, const Register& rn, const Register& rm,
+ const Register& ra);
+ inline void Mneg(const Register& rd, const Register& rn, const Register& rm);
+ inline void Sdiv(const Register& rd, const Register& rn, const Register& rm);
+ inline void Udiv(const Register& rd, const Register& rn, const Register& rm);
+ inline void Msub(const Register& rd, const Register& rn, const Register& rm,
+ const Register& ra);
+
+ inline void Lsl(const Register& rd, const Register& rn, unsigned shift);
+ inline void Lsl(const Register& rd, const Register& rn, const Register& rm);
+ inline void Umull(const Register& rd, const Register& rn, const Register& rm);
+ inline void Smull(const Register& rd, const Register& rn, const Register& rm);
+
+ inline void Sxtb(const Register& rd, const Register& rn);
+ inline void Sxth(const Register& rd, const Register& rn);
+ inline void Sxtw(const Register& rd, const Register& rn);
+ inline void Ubfiz(const Register& rd, const Register& rn, unsigned lsb,
+ unsigned width);
+ inline void Ubfx(const Register& rd, const Register& rn, unsigned lsb,
+ unsigned width);
+ inline void Lsr(const Register& rd, const Register& rn, unsigned shift);
+ inline void Lsr(const Register& rd, const Register& rn, const Register& rm);
+ inline void Ror(const Register& rd, const Register& rs, unsigned shift);
+ inline void Ror(const Register& rd, const Register& rn, const Register& rm);
+ inline void Cmn(const Register& rn, const Operand& operand);
+ inline void Fadd(const VRegister& fd, const VRegister& fn,
+ const VRegister& fm);
+ inline void Fcmp(const VRegister& fn, const VRegister& fm);
+ inline void Fcmp(const VRegister& fn, double value);
+ inline void Fabs(const VRegister& fd, const VRegister& fn);
+ inline void Fmul(const VRegister& fd, const VRegister& fn,
+ const VRegister& fm);
+ inline void Fsub(const VRegister& fd, const VRegister& fn,
+ const VRegister& fm);
+ inline void Fdiv(const VRegister& fd, const VRegister& fn,
+ const VRegister& fm);
+ inline void Fmax(const VRegister& fd, const VRegister& fn,
+ const VRegister& fm);
+ inline void Fmin(const VRegister& fd, const VRegister& fn,
+ const VRegister& fm);
+ inline void Rbit(const Register& rd, const Register& rn);
+
+ enum AdrHint {
+ // The target must be within the immediate range of adr.
+ kAdrNear,
+ // The target may be outside of the immediate range of adr. Additional
+ // instructions may be emitted.
+ kAdrFar
+ };
+ void Adr(const Register& rd, Label* label, AdrHint = kAdrNear);
+
+ // Add/sub with carry macros.
+ inline void Adc(const Register& rd, const Register& rn,
+ const Operand& operand);
+
+ // Conditional macros.
+ inline void Ccmp(const Register& rn, const Operand& operand, StatusFlags nzcv,
+ Condition cond);
+
+ inline void Clz(const Register& rd, const Register& rn);
+
+ // Poke 'src' onto the stack. The offset is in bytes.
+ //
+ // If the current stack pointer (according to StackPointer()) is csp, then
+ // csp must be aligned to 16 bytes.
+ void Poke(const CPURegister& src, const Operand& offset);
+
+ // Poke 'src1' and 'src2' onto the stack. The values written will be adjacent
+ // with 'src2' at a higher address than 'src1'. The offset is in bytes.
+ //
+ // If the current stack pointer (according to StackPointer()) is csp, then
+ // csp must be aligned to 16 bytes.
+ void PokePair(const CPURegister& src1, const CPURegister& src2, int offset);
+
+ inline void Sbfx(const Register& rd, const Register& rn, unsigned lsb,
+ unsigned width);
+
+ inline void Bfi(const Register& rd, const Register& rn, unsigned lsb,
+ unsigned width);
+
+ inline void Scvtf(const VRegister& fd, const Register& rn,
+ unsigned fbits = 0);
+ void Scvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) {
+ DCHECK(allow_macro_instructions());
+ scvtf(vd, vn, fbits);
+ }
+ inline void Ucvtf(const VRegister& fd, const Register& rn,
+ unsigned fbits = 0);
+ void Ucvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) {
+ DCHECK(allow_macro_instructions());
+ ucvtf(vd, vn, fbits);
+ }
+
+ void AssertFPCRState(Register fpcr = NoReg);
+ void CanonicalizeNaN(const VRegister& dst, const VRegister& src);
+ void CanonicalizeNaN(const VRegister& reg) { CanonicalizeNaN(reg, reg); }
+
+ inline void Cset(const Register& rd, Condition cond);
+ inline void Fccmp(const VRegister& fn, const VRegister& fm, StatusFlags nzcv,
+ Condition cond);
+ inline void Csinc(const Register& rd, const Register& rn, const Register& rm,
+ Condition cond);
+
+ inline void Fcvt(const VRegister& fd, const VRegister& fn);
+
+ int ActivationFrameAlignment();
+
+ void Ins(const VRegister& vd, int vd_index, const VRegister& vn,
+ int vn_index) {
+ DCHECK(allow_macro_instructions());
+ ins(vd, vd_index, vn, vn_index);
+ }
+ void Ins(const VRegister& vd, int vd_index, const Register& rn) {
+ DCHECK(allow_macro_instructions());
+ ins(vd, vd_index, rn);
+ }
+
+ inline void Bl(Label* label);
+ inline void Br(const Register& xn);
+
+ inline void Uxtb(const Register& rd, const Register& rn);
+ inline void Uxth(const Register& rd, const Register& rn);
+ inline void Uxtw(const Register& rd, const Register& rn);
+
+ void Dup(const VRegister& vd, const VRegister& vn, int index) {
+ DCHECK(allow_macro_instructions());
+ dup(vd, vn, index);
+ }
+ void Dup(const VRegister& vd, const Register& rn) {
+ DCHECK(allow_macro_instructions());
+ dup(vd, rn);
+ }
+
+#define DECLARE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
+ inline void FN(const REGTYPE REG, const REGTYPE REG2, const MemOperand& addr);
+ LSPAIR_MACRO_LIST(DECLARE_FUNCTION)
+#undef DECLARE_FUNCTION
+
+#define NEON_2VREG_SHIFT_MACRO_LIST(V) \
+ V(rshrn, Rshrn) \
+ V(rshrn2, Rshrn2) \
+ V(shl, Shl) \
+ V(shll, Shll) \
+ V(shll2, Shll2) \
+ V(shrn, Shrn) \
+ V(shrn2, Shrn2) \
+ V(sli, Sli) \
+ V(sqrshrn, Sqrshrn) \
+ V(sqrshrn2, Sqrshrn2) \
+ V(sqrshrun, Sqrshrun) \
+ V(sqrshrun2, Sqrshrun2) \
+ V(sqshl, Sqshl) \
+ V(sqshlu, Sqshlu) \
+ V(sqshrn, Sqshrn) \
+ V(sqshrn2, Sqshrn2) \
+ V(sqshrun, Sqshrun) \
+ V(sqshrun2, Sqshrun2) \
+ V(sri, Sri) \
+ V(srshr, Srshr) \
+ V(srsra, Srsra) \
+ V(sshll, Sshll) \
+ V(sshll2, Sshll2) \
+ V(sshr, Sshr) \
+ V(ssra, Ssra) \
+ V(uqrshrn, Uqrshrn) \
+ V(uqrshrn2, Uqrshrn2) \
+ V(uqshl, Uqshl) \
+ V(uqshrn, Uqshrn) \
+ V(uqshrn2, Uqshrn2) \
+ V(urshr, Urshr) \
+ V(ursra, Ursra) \
+ V(ushll, Ushll) \
+ V(ushll2, Ushll2) \
+ V(ushr, Ushr) \
+ V(usra, Usra)
+
+#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
+ void MASM(const VRegister& vd, const VRegister& vn, int shift) { \
+ DCHECK(allow_macro_instructions()); \
+ ASM(vd, vn, shift); \
+ }
+ NEON_2VREG_SHIFT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
+#undef DEFINE_MACRO_ASM_FUNC
+
+ void Umov(const Register& rd, const VRegister& vn, int vn_index) {
+ DCHECK(allow_macro_instructions());
+ umov(rd, vn, vn_index);
+ }
+ void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
+ DCHECK(allow_macro_instructions());
+ tbl(vd, vn, vm);
+ }
+ void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
+ const VRegister& vm) {
+ DCHECK(allow_macro_instructions());
+ tbl(vd, vn, vn2, vm);
+ }
+ void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
+ const VRegister& vn3, const VRegister& vm) {
+ DCHECK(allow_macro_instructions());
+ tbl(vd, vn, vn2, vn3, vm);
+ }
+ void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
+ const VRegister& vn3, const VRegister& vn4, const VRegister& vm) {
+ DCHECK(allow_macro_instructions());
+ tbl(vd, vn, vn2, vn3, vn4, vm);
+ }
+ void Ext(const VRegister& vd, const VRegister& vn, const VRegister& vm,
+ int index) {
+ DCHECK(allow_macro_instructions());
+ ext(vd, vn, vm, index);
+ }
+
+ void Smov(const Register& rd, const VRegister& vn, int vn_index) {
+ DCHECK(allow_macro_instructions());
+ smov(rd, vn, vn_index);
+ }
+
+// Load-acquire/store-release macros.
+#define DECLARE_FUNCTION(FN, OP) \
+ inline void FN(const Register& rt, const Register& rn);
+ LDA_STL_MACRO_LIST(DECLARE_FUNCTION)
+#undef DECLARE_FUNCTION
+
+ // Load an object from the root table.
+ void LoadRoot(CPURegister destination, Heap::RootListIndex index);
+
+ inline void Ret(const Register& xn = lr);
+
+ // Perform a conversion from a double to a signed int64. If the input fits in
+ // range of the 64-bit result, execution branches to done. Otherwise,
+ // execution falls through, and the sign of the result can be used to
+ // determine if overflow was towards positive or negative infinity.
+ //
+ // On successful conversion, the least significant 32 bits of the result are
+ // equivalent to the ECMA-262 operation "ToInt32".
+ //
+ // Only public for the test code in test-code-stubs-arm64.cc.
+ void TryConvertDoubleToInt64(Register result, DoubleRegister input,
+ Label* done);
+
+ inline void Mrs(const Register& rt, SystemRegister sysreg);
+
+ // Generates function prologue code.
+ void Prologue(bool code_pre_aging);
+
+ // Code ageing support functions.
+
+ // Code ageing on ARM64 works similarly to on ARM. When V8 wants to mark a
+ // function as old, it replaces some of the function prologue (generated by
+ // FullCodeGenerator::Generate) with a call to a special stub (ultimately
+ // generated by GenerateMakeCodeYoungAgainCommon). The stub restores the
+ // function prologue to its initial young state (indicating that it has been
+ // recently run) and continues. A young function is therefore one which has a
+ // normal frame setup sequence, and an old function has a code age sequence
+ // which calls a code ageing stub.
+
+ // Set up a basic stack frame for young code (or code exempt from ageing) with
+ // type FUNCTION. It may be patched later for code ageing support. This is
+ // done by to Code::PatchPlatformCodeAge and EmitCodeAgeSequence.
+ //
+ // This function takes an Assembler so it can be called from either a
+ // MacroAssembler or a PatchingAssembler context.
+ static void EmitFrameSetupForCodeAgePatching(Assembler* assm);
+
+ // Call EmitFrameSetupForCodeAgePatching from a MacroAssembler context.
+ void EmitFrameSetupForCodeAgePatching();
+
+ // Emit a code age sequence that calls the relevant code age stub. The code
+ // generated by this sequence is expected to replace the code generated by
+ // EmitFrameSetupForCodeAgePatching, and represents an old function.
+ //
+ // If stub is NULL, this function generates the code age sequence but omits
+ // the stub address that is normally embedded in the instruction stream. This
+ // can be used by debug code to verify code age sequences.
+ static void EmitCodeAgeSequence(Assembler* assm, Code* stub);
+
+ // Call EmitCodeAgeSequence from a MacroAssembler context.
+ void EmitCodeAgeSequence(Code* stub);
+
+ void Cmgt(const VRegister& vd, const VRegister& vn, int imm) {
+ DCHECK(allow_macro_instructions());
+ cmgt(vd, vn, imm);
+ }
+ void Cmge(const VRegister& vd, const VRegister& vn, int imm) {
+ DCHECK(allow_macro_instructions());
+ cmge(vd, vn, imm);
+ }
+ void Cmeq(const VRegister& vd, const VRegister& vn, int imm) {
+ DCHECK(allow_macro_instructions());
+ cmeq(vd, vn, imm);
+ }
+
+ inline void Neg(const Register& rd, const Operand& operand);
+ inline void Negs(const Register& rd, const Operand& operand);
+
+ // Compute rd = abs(rm).
+ // This function clobbers the condition flags. On output the overflow flag is
+ // set iff the negation overflowed.
+ //
+ // If rm is the minimum representable value, the result is not representable.
+ // Handlers for each case can be specified using the relevant labels.
+ void Abs(const Register& rd, const Register& rm,
+ Label* is_not_representable = NULL, Label* is_representable = NULL);
+
+ inline void Cls(const Register& rd, const Register& rn);
+ inline void Cneg(const Register& rd, const Register& rn, Condition cond);
+ inline void Rev16(const Register& rd, const Register& rn);
+ inline void Rev32(const Register& rd, const Register& rn);
+ inline void Fcvtns(const Register& rd, const VRegister& fn);
+ inline void Fcvtnu(const Register& rd, const VRegister& fn);
+ inline void Fcvtms(const Register& rd, const VRegister& fn);
+ inline void Fcvtmu(const Register& rd, const VRegister& fn);
+ inline void Fcvtas(const Register& rd, const VRegister& fn);
+ inline void Fcvtau(const Register& rd, const VRegister& fn);
+
+ protected:
+ // The actual Push and Pop implementations. These don't generate any code
+ // other than that required for the push or pop. This allows
+ // (Push|Pop)CPURegList to bundle together run-time assertions for a large
+ // block of registers.
+ //
+ // Note that size is per register, and is specified in bytes.
+ void PushHelper(int count, int size, const CPURegister& src0,
+ const CPURegister& src1, const CPURegister& src2,
+ const CPURegister& src3);
+ void PopHelper(int count, int size, const CPURegister& dst0,
+ const CPURegister& dst1, const CPURegister& dst2,
+ const CPURegister& dst3);
+
+ void ConditionalCompareMacro(const Register& rn, const Operand& operand,
+ StatusFlags nzcv, Condition cond,
+ ConditionalCompareOp op);
+
+ void AddSubWithCarryMacro(const Register& rd, const Register& rn,
+ const Operand& operand, FlagsUpdate S,
+ AddSubWithCarryOp op);
+
+ // Call Printf. On a native build, a simple call will be generated, but if the
+ // simulator is being used then a suitable pseudo-instruction is used. The
+ // arguments and stack (csp) must be prepared by the caller as for a normal
+ // AAPCS64 call to 'printf'.
+ //
+ // The 'args' argument should point to an array of variable arguments in their
+ // proper PCS registers (and in calling order). The argument registers can
+ // have mixed types. The format string (x0) should not be included.
+ void CallPrintf(int arg_count = 0, const CPURegister* args = NULL);
+
+ private:
+ bool has_frame_ = false;
+ Isolate* const isolate_;
+#if DEBUG
+ // Tell whether any of the macro instruction can be used. When false the
+ // MacroAssembler will assert if a method which can emit a variable number
+ // of instructions is called.
+ bool allow_macro_instructions_;
+#endif
+ // This handle will be patched with the code object on installation.
+ Handle<HeapObject> code_object_;
+
+ // Scratch registers available for use by the MacroAssembler.
+ CPURegList tmp_list_;
+ CPURegList fptmp_list_;
+
+ // The register to use as a stack pointer for stack operations.
+ Register sp_;
+
+ bool use_real_aborts_;
+
+ // Helps resolve branching to labels potentially out of range.
+ // If the label is not bound, it registers the information necessary to later
+ // be able to emit a veneer for this branch if necessary.
+ // If the label is bound, it returns true if the label (or the previous link
+ // in the label chain) is out of range. In that case the caller is responsible
+ // for generating appropriate code.
+ // Otherwise it returns false.
+ // This function also checks wether veneers need to be emitted.
+ bool NeedExtraInstructionsOrRegisterBranch(Label* label,
+ ImmBranchType branch_type);
+
+ void Movi16bitHelper(const VRegister& vd, uint64_t imm);
+ void Movi32bitHelper(const VRegister& vd, uint64_t imm);
+ void Movi64bitHelper(const VRegister& vd, uint64_t imm);
+
+ void LoadStoreMacro(const CPURegister& rt, const MemOperand& addr,
+ LoadStoreOp op);
+
+ void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& addr, LoadStorePairOp op);
+};
+
+class MacroAssembler : public TurboAssembler {
+ public:
+ MacroAssembler(Isolate* isolate, byte* buffer, unsigned buffer_size,
+ CodeObjectRequired create_code_object);
+
+ // Instruction set functions ------------------------------------------------
+ // Logical macros.
+ inline void Bics(const Register& rd, const Register& rn,
+ const Operand& operand);
+
+ inline void Adcs(const Register& rd, const Register& rn,
+ const Operand& operand);
+ inline void Sbc(const Register& rd, const Register& rn,
+ const Operand& operand);
+ inline void Sbcs(const Register& rd, const Register& rn,
+ const Operand& operand);
+ inline void Ngc(const Register& rd, const Operand& operand);
+ inline void Ngcs(const Register& rd, const Operand& operand);
+
+ inline void Ccmn(const Register& rn, const Operand& operand, StatusFlags nzcv,
+ Condition cond);
+ void Csel(const Register& rd, const Register& rn, const Operand& operand,
+ Condition cond);
+
+#define DECLARE_FUNCTION(FN, OP) \
+ inline void FN(const Register& rs, const Register& rt, const Register& rn);
+ STLX_MACRO_LIST(DECLARE_FUNCTION)
+#undef DECLARE_FUNCTION
+
+ // V8-specific load/store helpers.
+ void Load(const Register& rt, const MemOperand& addr, Representation r);
+ void Store(const Register& rt, const MemOperand& addr, Representation r);
+
+ // Branch type inversion relies on these relations.
+ STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) &&
+ (reg_bit_clear == (reg_bit_set ^ 1)) &&
+ (always == (never ^ 1)));
+
+ inline void Bfxil(const Register& rd, const Register& rn, unsigned lsb,
+ unsigned width);
+ inline void Cinc(const Register& rd, const Register& rn, Condition cond);
+ inline void Cinv(const Register& rd, const Register& rn, Condition cond);
+ inline void CzeroX(const Register& rd, Condition cond);
+ inline void CmovX(const Register& rd, const Register& rn, Condition cond);
+ inline void Csetm(const Register& rd, Condition cond);
+ inline void Csinv(const Register& rd, const Register& rn, const Register& rm,
+ Condition cond);
+ inline void Csneg(const Register& rd, const Register& rn, const Register& rm,
+ Condition cond);
+ inline void Dmb(BarrierDomain domain, BarrierType type);
+ inline void Dsb(BarrierDomain domain, BarrierType type);
+ inline void Extr(const Register& rd, const Register& rn, const Register& rm,
+ unsigned lsb);
+ inline void Fcsel(const VRegister& fd, const VRegister& fn,
+ const VRegister& fm, Condition cond);
+ void Fcvtl(const VRegister& vd, const VRegister& vn) {
+ DCHECK(allow_macro_instructions());
+ fcvtl(vd, vn);
+ }
+ void Fcvtl2(const VRegister& vd, const VRegister& vn) {
+ DCHECK(allow_macro_instructions());
+ fcvtl2(vd, vn);
+ }
+ void Fcvtn(const VRegister& vd, const VRegister& vn) {
+ DCHECK(allow_macro_instructions());
+ fcvtn(vd, vn);
+ }
+ void Fcvtn2(const VRegister& vd, const VRegister& vn) {
+ DCHECK(allow_macro_instructions());
+ fcvtn2(vd, vn);
+ }
+ void Fcvtxn(const VRegister& vd, const VRegister& vn) {
+ DCHECK(allow_macro_instructions());
+ fcvtxn(vd, vn);
+ }
+ void Fcvtxn2(const VRegister& vd, const VRegister& vn) {
+ DCHECK(allow_macro_instructions());
+ fcvtxn2(vd, vn);
+ }
+ inline void Fmadd(const VRegister& fd, const VRegister& fn,
+ const VRegister& fm, const VRegister& fa);
+ inline void Fmaxnm(const VRegister& fd, const VRegister& fn,
+ const VRegister& fm);
+ inline void Fminnm(const VRegister& fd, const VRegister& fn,
+ const VRegister& fm);
+ inline void Fmsub(const VRegister& fd, const VRegister& fn,
+ const VRegister& fm, const VRegister& fa);
+ inline void Fnmadd(const VRegister& fd, const VRegister& fn,
+ const VRegister& fm, const VRegister& fa);
+ inline void Fnmsub(const VRegister& fd, const VRegister& fn,
+ const VRegister& fm, const VRegister& fa);
+ inline void Hint(SystemHint code);
+ inline void Hlt(int code);
+ inline void Isb();
+ inline void Ldnp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& src);
+ inline void Movk(const Register& rd, uint64_t imm, int shift = -1);
+ inline void Msr(SystemRegister sysreg, const Register& rt);
+ inline void Nop() { nop(); }
+ void Mvni(const VRegister& vd, const int imm8, Shift shift = LSL,
+ const int shift_amount = 0) {
+ DCHECK(allow_macro_instructions());
+ mvni(vd, imm8, shift, shift_amount);
+ }
+ inline void Rev(const Register& rd, const Register& rn);
+ inline void Sbfiz(const Register& rd, const Register& rn, unsigned lsb,
+ unsigned width);
+ inline void Smaddl(const Register& rd, const Register& rn, const Register& rm,
+ const Register& ra);
+ inline void Smsubl(const Register& rd, const Register& rn, const Register& rm,
+ const Register& ra);
+ inline void Smulh(const Register& rd, const Register& rn, const Register& rm);
+ inline void Stnp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& dst);
+ inline void Umaddl(const Register& rd, const Register& rn, const Register& rm,
+ const Register& ra);
+ inline void Umsubl(const Register& rd, const Register& rn, const Register& rm,
+ const Register& ra);
+
+ void Cmle(const VRegister& vd, const VRegister& vn, int imm) {
+ DCHECK(allow_macro_instructions());
+ cmle(vd, vn, imm);
+ }
+ void Cmlt(const VRegister& vd, const VRegister& vn, int imm) {
+ DCHECK(allow_macro_instructions());
+ cmlt(vd, vn, imm);
+ }
+
+ void Ld1(const VRegister& vt, const MemOperand& src) {
+ DCHECK(allow_macro_instructions());
+ ld1(vt, src);
+ }
+ void Ld1(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
+ DCHECK(allow_macro_instructions());
+ ld1(vt, vt2, src);
+ }
+ void Ld1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
+ const MemOperand& src) {
+ DCHECK(allow_macro_instructions());
+ ld1(vt, vt2, vt3, src);
+ }
+ void Ld1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
+ const VRegister& vt4, const MemOperand& src) {
+ DCHECK(allow_macro_instructions());
+ ld1(vt, vt2, vt3, vt4, src);
+ }
+ void Ld1(const VRegister& vt, int lane, const MemOperand& src) {
+ DCHECK(allow_macro_instructions());
+ ld1(vt, lane, src);
+ }
+ void Ld1r(const VRegister& vt, const MemOperand& src) {
+ DCHECK(allow_macro_instructions());
+ ld1r(vt, src);
+ }
+ void Ld2(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
+ DCHECK(allow_macro_instructions());
+ ld2(vt, vt2, src);
+ }
+ void Ld2(const VRegister& vt, const VRegister& vt2, int lane,
+ const MemOperand& src) {
+ DCHECK(allow_macro_instructions());
+ ld2(vt, vt2, lane, src);
+ }
+ void Ld2r(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
+ DCHECK(allow_macro_instructions());
+ ld2r(vt, vt2, src);
+ }
+ void Ld3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
+ const MemOperand& src) {
+ DCHECK(allow_macro_instructions());
+ ld3(vt, vt2, vt3, src);
+ }
+ void Ld3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
+ int lane, const MemOperand& src) {
+ DCHECK(allow_macro_instructions());
+ ld3(vt, vt2, vt3, lane, src);
+ }
+ void Ld3r(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
+ const MemOperand& src) {
+ DCHECK(allow_macro_instructions());
+ ld3r(vt, vt2, vt3, src);
+ }
+ void Ld4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
+ const VRegister& vt4, const MemOperand& src) {
+ DCHECK(allow_macro_instructions());
+ ld4(vt, vt2, vt3, vt4, src);
+ }
+ void Ld4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
+ const VRegister& vt4, int lane, const MemOperand& src) {
+ DCHECK(allow_macro_instructions());
+ ld4(vt, vt2, vt3, vt4, lane, src);
+ }
+ void Ld4r(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
+ const VRegister& vt4, const MemOperand& src) {
+ DCHECK(allow_macro_instructions());
+ ld4r(vt, vt2, vt3, vt4, src);
+ }
+ void St1(const VRegister& vt, const MemOperand& dst) {
+ DCHECK(allow_macro_instructions());
+ st1(vt, dst);
+ }
+ void St1(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
+ DCHECK(allow_macro_instructions());
+ st1(vt, vt2, dst);
+ }
+ void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
+ const MemOperand& dst) {
+ DCHECK(allow_macro_instructions());
+ st1(vt, vt2, vt3, dst);
+ }
+ void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
+ const VRegister& vt4, const MemOperand& dst) {
+ DCHECK(allow_macro_instructions());
+ st1(vt, vt2, vt3, vt4, dst);
+ }
+ void St1(const VRegister& vt, int lane, const MemOperand& dst) {
+ DCHECK(allow_macro_instructions());
+ st1(vt, lane, dst);
+ }
+ void St2(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
+ DCHECK(allow_macro_instructions());
+ st2(vt, vt2, dst);
+ }
+ void St3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
+ const MemOperand& dst) {
+ DCHECK(allow_macro_instructions());
+ st3(vt, vt2, vt3, dst);
+ }
+ void St4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
+ const VRegister& vt4, const MemOperand& dst) {
+ DCHECK(allow_macro_instructions());
+ st4(vt, vt2, vt3, vt4, dst);
+ }
+ void St2(const VRegister& vt, const VRegister& vt2, int lane,
+ const MemOperand& dst) {
+ DCHECK(allow_macro_instructions());
+ st2(vt, vt2, lane, dst);
+ }
+ void St3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
+ int lane, const MemOperand& dst) {
+ DCHECK(allow_macro_instructions());
+ st3(vt, vt2, vt3, lane, dst);
+ }
+ void St4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
+ const VRegister& vt4, int lane, const MemOperand& dst) {
+ DCHECK(allow_macro_instructions());
+ st4(vt, vt2, vt3, vt4, lane, dst);
+ }
+ void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
+ DCHECK(allow_macro_instructions());
+ tbx(vd, vn, vm);
+ }
+ void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
+ const VRegister& vm) {
+ DCHECK(allow_macro_instructions());
+ tbx(vd, vn, vn2, vm);
+ }
+ void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
+ const VRegister& vn3, const VRegister& vm) {
+ DCHECK(allow_macro_instructions());
+ tbx(vd, vn, vn2, vn3, vm);
+ }
+ void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
+ const VRegister& vn3, const VRegister& vn4, const VRegister& vm) {
+ DCHECK(allow_macro_instructions());
+ tbx(vd, vn, vn2, vn3, vn4, vm);
+ }
+
+ void LoadObject(Register result, Handle<Object> object);
+
inline void PushSizeRegList(RegList registers, unsigned reg_size,
CPURegister::RegisterType type = CPURegister::kRegister) {
PushCPURegList(CPURegList(type, reg_size, registers));
@@ -675,33 +1585,23 @@ class MacroAssembler : public Assembler {
PopSizeRegList(regs, kWRegSizeInBits);
}
inline void PushDRegList(RegList regs) {
- PushSizeRegList(regs, kDRegSizeInBits, CPURegister::kFPRegister);
+ PushSizeRegList(regs, kDRegSizeInBits, CPURegister::kVRegister);
}
inline void PopDRegList(RegList regs) {
- PopSizeRegList(regs, kDRegSizeInBits, CPURegister::kFPRegister);
+ PopSizeRegList(regs, kDRegSizeInBits, CPURegister::kVRegister);
}
inline void PushSRegList(RegList regs) {
- PushSizeRegList(regs, kSRegSizeInBits, CPURegister::kFPRegister);
+ PushSizeRegList(regs, kSRegSizeInBits, CPURegister::kVRegister);
}
inline void PopSRegList(RegList regs) {
- PopSizeRegList(regs, kSRegSizeInBits, CPURegister::kFPRegister);
+ PopSizeRegList(regs, kSRegSizeInBits, CPURegister::kVRegister);
}
// Push the specified register 'count' times.
void PushMultipleTimes(CPURegister src, Register count);
void PushMultipleTimes(CPURegister src, int count);
- // This is a convenience method for pushing a single Handle<Object>.
- inline void Push(Handle<Object> handle);
- inline void Push(Smi* smi);
-
- // Aliases of Push and Pop, required for V8 compatibility.
- inline void push(Register src) {
- Push(src);
- }
- inline void pop(Register dst) {
- Pop(dst);
- }
+ inline void PushObject(Handle<Object> handle);
// Sometimes callers need to push or pop multiple registers in a way that is
// difficult to structure efficiently for fixed Push or Pop calls. This scope
@@ -736,25 +1636,12 @@ class MacroAssembler : public Assembler {
std::vector<CPURegister> queued_;
};
- // Poke 'src' onto the stack. The offset is in bytes.
- //
- // If the current stack pointer (according to StackPointer()) is csp, then
- // csp must be aligned to 16 bytes.
- void Poke(const CPURegister& src, const Operand& offset);
-
// Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
//
// If the current stack pointer (according to StackPointer()) is csp, then
// csp must be aligned to 16 bytes.
void Peek(const CPURegister& dst, const Operand& offset);
- // Poke 'src1' and 'src2' onto the stack. The values written will be adjacent
- // with 'src2' at a higher address than 'src1'. The offset is in bytes.
- //
- // If the current stack pointer (according to StackPointer()) is csp, then
- // csp must be aligned to 16 bytes.
- void PokePair(const CPURegister& src1, const CPURegister& src2, int offset);
-
// Peek at two values on the stack, and put them in 'dst1' and 'dst2'. The
// values peeked will be adjacent, with the value in 'dst2' being from a
// higher address than 'dst1'. The offset is in bytes.
@@ -775,24 +1662,6 @@ class MacroAssembler : public Assembler {
UNIMPLEMENTED();
}
- // Claim or drop stack space without actually accessing memory.
- //
- // In debug mode, both of these will write invalid data into the claimed or
- // dropped space.
- //
- // If the current stack pointer (according to StackPointer()) is csp, then it
- // must be aligned to 16 bytes and the size claimed or dropped must be a
- // multiple of 16 bytes.
- //
- // Note that unit_size must be specified in bytes. For variants which take a
- // Register count, the unit size must be a power of two.
- inline void Claim(int64_t count, uint64_t unit_size = kXRegSize);
- inline void Claim(const Register& count,
- uint64_t unit_size = kXRegSize);
- inline void Drop(int64_t count, uint64_t unit_size = kXRegSize);
- inline void Drop(const Register& count,
- uint64_t unit_size = kXRegSize);
-
// Variants of Claim and Drop, where the 'count' parameter is a SMI held in a
// register.
inline void ClaimBySMI(const Register& count_smi,
@@ -807,18 +1676,6 @@ class MacroAssembler : public Assembler {
Condition cond,
Label* label);
- // Test the bits of register defined by bit_pattern, and branch if ANY of
- // those bits are set. May corrupt the status flags.
- inline void TestAndBranchIfAnySet(const Register& reg,
- const uint64_t bit_pattern,
- Label* label);
-
- // Test the bits of register defined by bit_pattern, and branch if ALL of
- // those bits are clear (ie. not set.) May corrupt the status flags.
- inline void TestAndBranchIfAllClear(const Register& reg,
- const uint64_t bit_pattern,
- Label* label);
-
// Insert one or more instructions into the instruction stream that encode
// some caller-defined data. The instructions used will be executable with no
// side effects.
@@ -836,23 +1693,6 @@ class MacroAssembler : public Assembler {
// it will be encoded in the event marker.
inline void AnnotateInstrumentation(const char* marker_name);
- // If emit_debug_code() is true, emit a run-time check to ensure that
- // StackPointer() does not point below the system stack pointer.
- //
- // Whilst it is architecturally legal for StackPointer() to point below csp,
- // it can be evidence of a potential bug because the ABI forbids accesses
- // below csp.
- //
- // If StackPointer() is the system stack pointer (csp), then csp will be
- // dereferenced to cause the processor (or simulator) to abort if it is not
- // properly aligned.
- //
- // If emit_debug_code() is false, this emits no code.
- void AssertStackConsistency();
-
- // Emits a runtime assert that the CSP is aligned.
- void AssertCspAligned();
-
// Preserve the callee-saved registers (as defined by AAPCS64).
//
// Higher-numbered registers are pushed before lower-numbered registers, and
@@ -880,77 +1720,21 @@ class MacroAssembler : public Assembler {
// ActivationFrameAlignment().
void PopCalleeSavedRegisters();
- // Set the current stack pointer, but don't generate any code.
- inline void SetStackPointer(const Register& stack_pointer) {
- DCHECK(!TmpList()->IncludesAliasOf(stack_pointer));
- sp_ = stack_pointer;
- }
-
- // Return the current stack pointer, as set by SetStackPointer.
- inline const Register& StackPointer() const {
- return sp_;
- }
-
// Align csp for a frame, as per ActivationFrameAlignment, and make it the
// current stack pointer.
inline void AlignAndSetCSPForFrame();
- // Push the system stack pointer (csp) down to allow the same to be done to
- // the current stack pointer (according to StackPointer()). This must be
- // called _before_ accessing the memory.
- //
- // This is necessary when pushing or otherwise adding things to the stack, to
- // satisfy the AAPCS64 constraint that the memory below the system stack
- // pointer is not accessed. The amount pushed will be increased as necessary
- // to ensure csp remains aligned to 16 bytes.
- //
- // This method asserts that StackPointer() is not csp, since the call does
- // not make sense in that context.
- inline void BumpSystemStackPointer(const Operand& space);
-
- // Re-synchronizes the system stack pointer (csp) with the current stack
- // pointer (according to StackPointer()).
- //
- // This method asserts that StackPointer() is not csp, since the call does
- // not make sense in that context.
- inline void SyncSystemStackPointer();
-
// Helpers ------------------------------------------------------------------
- // Root register.
- inline void InitializeRootRegister();
-
- void AssertFPCRState(Register fpcr = NoReg);
- void CanonicalizeNaN(const FPRegister& dst, const FPRegister& src);
- void CanonicalizeNaN(const FPRegister& reg) {
- CanonicalizeNaN(reg, reg);
- }
- // Load an object from the root table.
- void LoadRoot(CPURegister destination,
- Heap::RootListIndex index);
// Store an object to the root table.
void StoreRoot(Register source,
Heap::RootListIndex index);
- // Load both TrueValue and FalseValue roots.
- void LoadTrueFalseRoots(Register true_root, Register false_root);
-
- void LoadHeapObject(Register dst, Handle<HeapObject> object);
-
- void LoadObject(Register result, Handle<Object> object);
-
static int SafepointRegisterStackIndex(int reg_code);
- // This is required for compatibility with architecture independant code.
- // Remove if not needed.
- void Move(Register dst, Register src);
- void Move(Register dst, Handle<Object> x);
- void Move(Register dst, Smi* src);
-
void LoadInstanceDescriptors(Register map,
Register descriptors);
void EnumLengthUntagged(Register dst, Register map);
- void EnumLengthSmi(Register dst, Register map);
void NumberOfOwnDescriptors(Register dst, Register map);
void LoadAccessor(Register dst, Register holder, int accessor_index,
AccessorComponent accessor);
@@ -971,22 +1755,15 @@ class MacroAssembler : public Assembler {
inline void SmiTag(Register dst, Register src);
inline void SmiTag(Register smi);
- inline void SmiUntag(Register dst, Register src);
- inline void SmiUntag(Register smi);
- inline void SmiUntagToDouble(FPRegister dst,
- Register src,
+ inline void SmiUntagToDouble(VRegister dst, Register src,
UntagMode mode = kNotSpeculativeUntag);
- inline void SmiUntagToFloat(FPRegister dst,
- Register src,
+ inline void SmiUntagToFloat(VRegister dst, Register src,
UntagMode mode = kNotSpeculativeUntag);
// Tag and push in one step.
inline void SmiTagAndPush(Register src);
inline void SmiTagAndPush(Register src1, Register src2);
- inline void JumpIfSmi(Register value,
- Label* smi_label,
- Label* not_smi_label = NULL);
inline void JumpIfNotSmi(Register value, Label* not_smi_label);
inline void JumpIfBothSmi(Register value1,
Register value2,
@@ -1005,17 +1782,19 @@ class MacroAssembler : public Assembler {
// Abort execution if argument is a smi, enabled via --debug-code.
void AssertNotSmi(Register object, BailoutReason reason = kOperandIsASmi);
- void AssertSmi(Register object, BailoutReason reason = kOperandIsNotASmi);
inline void ObjectTag(Register tagged_obj, Register obj);
inline void ObjectUntag(Register untagged_obj, Register obj);
+ // Abort execution if argument is not a FixedArray, enabled via --debug-code.
+ void AssertFixedArray(Register object);
+
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
- // Abort execution if argument is not a JSGeneratorObject,
+ // Abort execution if argument is not a JSGeneratorObject (or subclass),
// enabled via --debug-code.
- void AssertGeneratorObject(Register object, Register suspend_flags);
+ void AssertGeneratorObject(Register object);
// Abort execution if argument is not a JSBoundFunction,
// enabled via --debug-code.
@@ -1025,58 +1804,18 @@ class MacroAssembler : public Assembler {
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
- // Abort execution if argument is not a positive or zero integer, enabled via
- // --debug-code.
- void AssertPositiveOrZero(Register value);
-
void JumpIfHeapNumber(Register object, Label* on_heap_number,
SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
void JumpIfNotHeapNumber(Register object, Label* on_not_heap_number,
SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
- // Sets the vs flag if the input is -0.0.
- void TestForMinusZero(DoubleRegister input);
-
- // Jump to label if the input double register contains -0.0.
- void JumpIfMinusZero(DoubleRegister input, Label* on_negative_zero);
-
- // Jump to label if the input integer register contains the double precision
- // floating point representation of -0.0.
- void JumpIfMinusZero(Register input, Label* on_negative_zero);
-
- // Saturate a signed 32-bit integer in input to an unsigned 8-bit integer in
- // output.
- void ClampInt32ToUint8(Register in_out);
- void ClampInt32ToUint8(Register output, Register input);
-
- // Saturate a double in input to an unsigned 8-bit integer in output.
- void ClampDoubleToUint8(Register output,
- DoubleRegister input,
- DoubleRegister dbl_scratch);
-
- // Try to represent a double as a signed 32-bit int.
- // This succeeds if the result compares equal to the input, so inputs of -0.0
- // are represented as 0 and handled as a success.
- //
- // On output the Z flag is set if the operation was successful.
- void TryRepresentDoubleAsInt32(Register as_int,
- FPRegister value,
- FPRegister scratch_d,
- Label* on_successful_conversion = NULL,
- Label* on_failed_conversion = NULL) {
- DCHECK(as_int.Is32Bits());
- TryRepresentDoubleAsInt(as_int, value, scratch_d, on_successful_conversion,
- on_failed_conversion);
- }
-
// Try to represent a double as a signed 64-bit int.
// This succeeds if the result compares equal to the input, so inputs of -0.0
// are represented as 0 and handled as a success.
//
// On output the Z flag is set if the operation was successful.
- void TryRepresentDoubleAsInt64(Register as_int,
- FPRegister value,
- FPRegister scratch_d,
+ void TryRepresentDoubleAsInt64(Register as_int, VRegister value,
+ VRegister scratch_d,
Label* on_successful_conversion = NULL,
Label* on_failed_conversion = NULL) {
DCHECK(as_int.Is64Bits());
@@ -1084,14 +1823,6 @@ class MacroAssembler : public Assembler {
on_failed_conversion);
}
- // ---- Object Utilities ----
-
- // Initialize fields with filler values. Fields starting at |current_address|
- // not including |end_address| are overwritten with the value in |filler|. At
- // the end the loop, |current_address| takes the value of |end_address|.
- void InitializeFieldsWithFiller(Register current_address,
- Register end_address, Register filler);
-
// ---- String Utilities ----
// Checks if both instance types are sequential one-byte strings and jumps to
@@ -1104,10 +1835,7 @@ class MacroAssembler : public Assembler {
// ---- Calling / Jumping helpers ----
- // This is required for compatibility in architecture indepenedant code.
- inline void jmp(Label* L);
-
- void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
+ void CallStub(CodeStub* stub);
void TailCallStub(CodeStub* stub);
void CallRuntime(const Runtime::Function* f,
@@ -1134,22 +1862,6 @@ class MacroAssembler : public Assembler {
void TailCallRuntime(Runtime::FunctionId fid);
- int ActivationFrameAlignment();
-
- // Calls a C function.
- // The called function is not allowed to trigger a
- // garbage collection, since that might move the code and invalidate the
- // return address (unless this is somehow accounted for by the called
- // function).
- void CallCFunction(ExternalReference function,
- int num_reg_arguments);
- void CallCFunction(ExternalReference function,
- int num_reg_arguments,
- int num_double_arguments);
- void CallCFunction(Register function,
- int num_reg_arguments,
- int num_double_arguments);
-
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame = false);
@@ -1159,36 +1871,6 @@ class MacroAssembler : public Assembler {
int num_arguments);
- void Jump(Register target);
- void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
- void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
- void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
-
- void Call(Register target);
- void Call(Label* target);
- void Call(Address target, RelocInfo::Mode rmode);
- void Call(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- TypeFeedbackId ast_id = TypeFeedbackId::None());
-
- // For every Call variant, there is a matching CallSize function that returns
- // the size (in bytes) of the call sequence.
- static int CallSize(Register target);
- static int CallSize(Label* target);
- static int CallSize(Address target, RelocInfo::Mode rmode);
- static int CallSize(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- TypeFeedbackId ast_id = TypeFeedbackId::None());
-
- // Removes current frame and its arguments from the stack preserving
- // the arguments and a return address pushed to the stack for the next call.
- // Both |callee_args_count| and |caller_args_count_reg| do not include
- // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
- // is trashed.
- void PrepareForTailCall(const ParameterCount& callee_args_count,
- Register caller_args_count_reg, Register scratch0,
- Register scratch1);
-
// Registers used through the invocation chain are hard-coded.
// We force passing the parameters to ensure the contracts are correctly
// honoured by the caller.
@@ -1229,70 +1911,8 @@ class MacroAssembler : public Assembler {
InvokeFlag flag,
const CallWrapper& call_wrapper);
-
- // ---- Floating point helpers ----
-
- // Perform a conversion from a double to a signed int64. If the input fits in
- // range of the 64-bit result, execution branches to done. Otherwise,
- // execution falls through, and the sign of the result can be used to
- // determine if overflow was towards positive or negative infinity.
- //
- // On successful conversion, the least significant 32 bits of the result are
- // equivalent to the ECMA-262 operation "ToInt32".
- //
- // Only public for the test code in test-code-stubs-arm64.cc.
- void TryConvertDoubleToInt64(Register result,
- DoubleRegister input,
- Label* done);
-
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
- // Exits with 'result' holding the answer.
- void TruncateDoubleToI(Register result, DoubleRegister double_input);
-
- // Performs a truncating conversion of a heap number as used by
- // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
- // must be different registers. Exits with 'result' holding the answer.
- void TruncateHeapNumberToI(Register result, Register object);
-
- // Converts the smi or heap number in object to an int32 using the rules
- // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
- // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
- // different registers.
- void TruncateNumberToI(Register object,
- Register result,
- Register heap_number_map,
- Label* not_int32);
-
// ---- Code generation helpers ----
- void set_generating_stub(bool value) { generating_stub_ = value; }
- bool generating_stub() const { return generating_stub_; }
-#if DEBUG
- void set_allow_macro_instructions(bool value) {
- allow_macro_instructions_ = value;
- }
- bool allow_macro_instructions() const { return allow_macro_instructions_; }
-#endif
- bool use_real_aborts() const { return use_real_aborts_; }
- void set_has_frame(bool value) { has_frame_ = value; }
- bool has_frame() const { return has_frame_; }
- bool AllowThisStubCall(CodeStub* stub);
-
- class NoUseRealAbortsScope {
- public:
- explicit NoUseRealAbortsScope(MacroAssembler* masm) :
- saved_(masm->use_real_aborts_), masm_(masm) {
- masm_->use_real_aborts_ = false;
- }
- ~NoUseRealAbortsScope() {
- masm_->use_real_aborts_ = saved_;
- }
- private:
- bool saved_;
- MacroAssembler* masm_;
- };
-
// Frame restart support
void MaybeDropFrames();
@@ -1325,25 +1945,14 @@ class MacroAssembler : public Assembler {
Label* gc_required,
AllocationFlags flags);
- // FastAllocate is right now only used for folded allocations. It just
- // increments the top pointer without checking against limit. This can only
- // be done if it was proved earlier that the allocation will succeed.
- void FastAllocate(Register object_size, Register result, Register result_end,
- Register scratch, AllocationFlags flags);
-
- void FastAllocate(int object_size, Register result, Register scratch1,
- Register scratch2, AllocationFlags flags);
-
// Allocates a heap number or jumps to the gc_required label if the young
// space is full and a scavenge is needed.
// All registers are clobbered.
// If no heap_number_map register is provided, the function will take care of
// loading it.
- void AllocateHeapNumber(Register result,
- Label* gc_required,
- Register scratch1,
- Register scratch2,
- CPURegister value = NoFPReg,
+ void AllocateHeapNumber(Register result, Label* gc_required,
+ Register scratch1, Register scratch2,
+ CPURegister value = NoVReg,
CPURegister heap_number_map = NoReg,
MutableMode mode = IMMUTABLE);
@@ -1444,10 +2053,6 @@ class MacroAssembler : public Assembler {
// miss label if the weak cell was cleared.
void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
- // Test the bitfield of the heap object map with mask and set the condition
- // flags. The object register is preserved.
- void TestMapBitfield(Register object, uint64_t mask);
-
// Load the elements kind field from a map, and return it in the result
// register.
void LoadElementsKindFromMap(Register result, Register map);
@@ -1497,12 +2102,6 @@ class MacroAssembler : public Assembler {
// ---------------------------------------------------------------------------
// Inline caching support.
- void EmitSeqStringSetCharCheck(Register string,
- Register index,
- SeqStringSetCharCheckIndexType index_type,
- Register scratch,
- uint32_t encoding_mask);
-
// Hash the interger value in 'key' register.
// It uses the same algorithm as ComputeIntegerHash in utils.h.
void GetNumberHash(Register key, Register scratch);
@@ -1513,11 +2112,6 @@ class MacroAssembler : public Assembler {
// Load the type feedback vector from a JavaScript frame.
void EmitLoadFeedbackVector(Register vector);
- // Activation support.
- void EnterFrame(StackFrame::Type type);
- void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
- void LeaveFrame(StackFrame::Type type);
-
void EnterBuiltinFrame(Register context, Register target, Register argc);
void LeaveBuiltinFrame(Register context, Register target, Register argc);
@@ -1526,27 +2120,12 @@ class MacroAssembler : public Assembler {
Register scratch2, Register scratch3, Register scratch4,
Label* call_runtime);
- // AllocationMemento support. Arrays may have an associated
- // AllocationMemento object that can be checked for in order to pretransition
- // to another type.
- // On entry, receiver should point to the array object.
- // If allocation info is present, the Z flag is set (so that the eq
- // condition will pass).
- void TestJSArrayForAllocationMemento(Register receiver,
- Register scratch1,
- Register scratch2,
- Label* no_memento_found);
-
// The stack pointer has to switch between csp and jssp when setting up and
// destroying the exit frame. Hence preserving/restoring the registers is
// slightly more complicated than simple push/pop operations.
void ExitFramePreserveFPRegs();
void ExitFrameRestoreFPRegs();
- // Generates function and stub prologue code.
- void StubPrologue(StackFrame::Type type, int frame_slots);
- void Prologue(bool code_pre_aging);
-
// Enter exit frame. Exit frames are used when calling C code from generated
// (JavaScript) code.
//
@@ -1601,15 +2180,9 @@ class MacroAssembler : public Assembler {
LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
}
- // Emit code for a truncating division by a constant. The dividend register is
- // unchanged. Dividend and result must be different.
- void TruncatingDiv(Register result, Register dividend, int32_t divisor);
-
// ---------------------------------------------------------------------------
// StatsCounter support
- void SetCounter(StatsCounter* counter, int value, Register scratch1,
- Register scratch2);
void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
Register scratch2);
void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
@@ -1637,9 +2210,6 @@ class MacroAssembler : public Assembler {
void PushSafepointRegisters();
void PopSafepointRegisters();
- void PushSafepointRegistersAndDoubles();
- void PopSafepointRegistersAndDoubles();
-
// Store value in register src in the safepoint stack slot for register dst.
void StoreToSafepointRegisterSlot(Register src, Register dst);
@@ -1650,16 +2220,6 @@ class MacroAssembler : public Assembler {
void CheckPageFlag(const Register& object, const Register& scratch, int mask,
Condition cc, Label* condition_met);
- void CheckPageFlagSet(const Register& object,
- const Register& scratch,
- int mask,
- Label* if_any_set);
-
- void CheckPageFlagClear(const Register& object,
- const Register& scratch,
- int mask,
- Label* if_all_clear);
-
// Check if object is in new space and jump accordingly.
// Register 'object' is preserved.
void JumpIfNotInNewSpace(Register object,
@@ -1772,10 +2332,6 @@ class MacroAssembler : public Assembler {
// ---------------------------------------------------------------------------
// Debugging.
- // Calls Abort(msg) if the condition cond is not satisfied.
- // Use --debug_code to enable.
- void Assert(Condition cond, BailoutReason reason);
- void AssertRegisterIsClear(Register reg, BailoutReason reason);
void AssertRegisterIsRoot(
Register reg,
Heap::RootListIndex index,
@@ -1787,18 +2343,6 @@ class MacroAssembler : public Assembler {
// If emit_debug_code() is false, this emits no code.
void AssertHasValidColor(const Register& reg);
- // Abort if 'object' register doesn't point to a string object.
- //
- // If emit_debug_code() is false, this emits no code.
- void AssertIsString(const Register& object);
-
- // Like Assert(), but always enabled.
- void Check(Condition cond, BailoutReason reason);
- void CheckRegisterIsClear(Register reg, BailoutReason reason);
-
- // Print a message to stderr and abort execution.
- void Abort(BailoutReason reason);
-
void LoadNativeContextSlot(int index, Register dst);
// Load the initial map from the global function. The registers function and
@@ -1807,16 +2351,10 @@ class MacroAssembler : public Assembler {
Register map,
Register scratch);
- CPURegList* TmpList() { return &tmp_list_; }
- CPURegList* FPTmpList() { return &fptmp_list_; }
-
- static CPURegList DefaultTmpList();
- static CPURegList DefaultFPTmpList();
-
// Like printf, but print at run-time from generated code.
//
// The caller must ensure that arguments for floating-point placeholders
- // (such as %e, %f or %g) are FPRegisters, and that arguments for integer
+ // (such as %e, %f or %g) are VRegisters, and that arguments for integer
// placeholders are Registers.
//
// At the moment it is only possible to print the value of csp if it is the
@@ -1848,78 +2386,12 @@ class MacroAssembler : public Assembler {
const CPURegister& arg2 = NoCPUReg,
const CPURegister& arg3 = NoCPUReg);
- // Code ageing support functions.
-
- // Code ageing on ARM64 works similarly to on ARM. When V8 wants to mark a
- // function as old, it replaces some of the function prologue (generated by
- // FullCodeGenerator::Generate) with a call to a special stub (ultimately
- // generated by GenerateMakeCodeYoungAgainCommon). The stub restores the
- // function prologue to its initial young state (indicating that it has been
- // recently run) and continues. A young function is therefore one which has a
- // normal frame setup sequence, and an old function has a code age sequence
- // which calls a code ageing stub.
-
- // Set up a basic stack frame for young code (or code exempt from ageing) with
- // type FUNCTION. It may be patched later for code ageing support. This is
- // done by to Code::PatchPlatformCodeAge and EmitCodeAgeSequence.
- //
- // This function takes an Assembler so it can be called from either a
- // MacroAssembler or a PatchingAssembler context.
- static void EmitFrameSetupForCodeAgePatching(Assembler* assm);
-
- // Call EmitFrameSetupForCodeAgePatching from a MacroAssembler context.
- void EmitFrameSetupForCodeAgePatching();
-
- // Emit a code age sequence that calls the relevant code age stub. The code
- // generated by this sequence is expected to replace the code generated by
- // EmitFrameSetupForCodeAgePatching, and represents an old function.
- //
- // If stub is NULL, this function generates the code age sequence but omits
- // the stub address that is normally embedded in the instruction stream. This
- // can be used by debug code to verify code age sequences.
- static void EmitCodeAgeSequence(Assembler* assm, Code* stub);
-
- // Call EmitCodeAgeSequence from a MacroAssembler context.
- void EmitCodeAgeSequence(Code* stub);
-
// Return true if the sequence is a young sequence geneated by
// EmitFrameSetupForCodeAgePatching. Otherwise, this method asserts that the
// sequence is a code age sequence (emitted by EmitCodeAgeSequence).
static bool IsYoungSequence(Isolate* isolate, byte* sequence);
- // Perform necessary maintenance operations before a push or after a pop.
- //
- // Note that size is specified in bytes.
- void PushPreamble(Operand total_size);
- void PopPostamble(Operand total_size);
-
- void PushPreamble(int count, int size);
- void PopPostamble(int count, int size);
-
private:
- // The actual Push and Pop implementations. These don't generate any code
- // other than that required for the push or pop. This allows
- // (Push|Pop)CPURegList to bundle together run-time assertions for a large
- // block of registers.
- //
- // Note that size is per register, and is specified in bytes.
- void PushHelper(int count, int size,
- const CPURegister& src0, const CPURegister& src1,
- const CPURegister& src2, const CPURegister& src3);
- void PopHelper(int count, int size,
- const CPURegister& dst0, const CPURegister& dst1,
- const CPURegister& dst2, const CPURegister& dst3);
-
- // Call Printf. On a native build, a simple call will be generated, but if the
- // simulator is being used then a suitable pseudo-instruction is used. The
- // arguments and stack (csp) must be prepared by the caller as for a normal
- // AAPCS64 call to 'printf'.
- //
- // The 'args' argument should point to an array of variable arguments in their
- // proper PCS registers (and in calling order). The argument registers can
- // have mixed types. The format string (x0) should not be included.
- void CallPrintf(int arg_count = 0, const CPURegister * args = NULL);
-
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
void InNewSpace(Register object,
Condition cond, // eq for new space, ne otherwise.
@@ -1934,40 +2406,11 @@ class MacroAssembler : public Assembler {
// important it must be checked separately.
//
// On output the Z flag is set if the operation was successful.
- void TryRepresentDoubleAsInt(Register as_int,
- FPRegister value,
- FPRegister scratch_d,
+ void TryRepresentDoubleAsInt(Register as_int, VRegister value,
+ VRegister scratch_d,
Label* on_successful_conversion = NULL,
Label* on_failed_conversion = NULL);
- bool generating_stub_;
-#if DEBUG
- // Tell whether any of the macro instruction can be used. When false the
- // MacroAssembler will assert if a method which can emit a variable number
- // of instructions is called.
- bool allow_macro_instructions_;
-#endif
- bool has_frame_;
- Isolate* isolate_;
-
- // The Abort method should call a V8 runtime function, but the CallRuntime
- // mechanism depends on CEntryStub. If use_real_aborts is false, Abort will
- // use a simpler abort mechanism that doesn't depend on CEntryStub.
- //
- // The purpose of this is to allow Aborts to be compiled whilst CEntryStub is
- // being generated.
- bool use_real_aborts_;
-
- // This handle will be patched with the code object on installation.
- Handle<Object> code_object_;
-
- // The register to use as a stack pointer for stack operations.
- Register sp_;
-
- // Scratch registers available for use by the MacroAssembler.
- CPURegList tmp_list_;
- CPURegList fptmp_list_;
-
public:
// Far branches resolving.
//
@@ -1981,17 +2424,6 @@ class MacroAssembler : public Assembler {
// branch isntructions with a range of +-128MB. If that becomes too little
// (!), the mechanism can be extended to generate special veneers for really
// far targets.
-
- // Helps resolve branching to labels potentially out of range.
- // If the label is not bound, it registers the information necessary to later
- // be able to emit a veneer for this branch if necessary.
- // If the label is bound, it returns true if the label (or the previous link
- // in the label chain) is out of range. In that case the caller is responsible
- // for generating appropriate code.
- // Otherwise it returns false.
- // This function also checks wether veneers need to be emitted.
- bool NeedExtraInstructionsOrRegisterBranch(Label *label,
- ImmBranchType branch_type);
};
@@ -2001,39 +2433,39 @@ class MacroAssembler : public Assembler {
// emitted is what you specified when creating the scope.
class InstructionAccurateScope BASE_EMBEDDED {
public:
- explicit InstructionAccurateScope(MacroAssembler* masm, size_t count = 0)
- : masm_(masm)
+ explicit InstructionAccurateScope(TurboAssembler* tasm, size_t count = 0)
+ : tasm_(tasm)
#ifdef DEBUG
,
size_(count * kInstructionSize)
#endif
{
// Before blocking the const pool, see if it needs to be emitted.
- masm_->CheckConstPool(false, true);
- masm_->CheckVeneerPool(false, true);
+ tasm_->CheckConstPool(false, true);
+ tasm_->CheckVeneerPool(false, true);
- masm_->StartBlockPools();
+ tasm_->StartBlockPools();
#ifdef DEBUG
if (count != 0) {
- masm_->bind(&start_);
+ tasm_->bind(&start_);
}
- previous_allow_macro_instructions_ = masm_->allow_macro_instructions();
- masm_->set_allow_macro_instructions(false);
+ previous_allow_macro_instructions_ = tasm_->allow_macro_instructions();
+ tasm_->set_allow_macro_instructions(false);
#endif
}
~InstructionAccurateScope() {
- masm_->EndBlockPools();
+ tasm_->EndBlockPools();
#ifdef DEBUG
if (start_.is_bound()) {
- DCHECK(masm_->SizeOfCodeGeneratedSince(&start_) == size_);
+ DCHECK(tasm_->SizeOfCodeGeneratedSince(&start_) == size_);
}
- masm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
+ tasm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
#endif
}
private:
- MacroAssembler* masm_;
+ TurboAssembler* tasm_;
#ifdef DEBUG
size_t size_;
Label start_;
@@ -2041,23 +2473,24 @@ class InstructionAccurateScope BASE_EMBEDDED {
#endif
};
-
// This scope utility allows scratch registers to be managed safely. The
-// MacroAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch
+// TurboAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch
// registers. These registers can be allocated on demand, and will be returned
// at the end of the scope.
//
// When the scope ends, the MacroAssembler's lists will be restored to their
-// original state, even if the lists were modified by some other means.
+// original state, even if the lists were modified by some other means. Note
+// that this scope can be nested but the destructors need to run in the opposite
+// order as the constructors. We do not have assertions for this.
class UseScratchRegisterScope {
public:
- explicit UseScratchRegisterScope(MacroAssembler* masm)
- : available_(masm->TmpList()),
- availablefp_(masm->FPTmpList()),
+ explicit UseScratchRegisterScope(TurboAssembler* tasm)
+ : available_(tasm->TmpList()),
+ availablefp_(tasm->FPTmpList()),
old_available_(available_->list()),
old_availablefp_(availablefp_->list()) {
- DCHECK(available_->type() == CPURegister::kRegister);
- DCHECK(availablefp_->type() == CPURegister::kFPRegister);
+ DCHECK_EQ(available_->type(), CPURegister::kRegister);
+ DCHECK_EQ(availablefp_->type(), CPURegister::kVRegister);
}
~UseScratchRegisterScope();
@@ -2066,15 +2499,18 @@ class UseScratchRegisterScope {
// automatically when the scope ends.
Register AcquireW() { return AcquireNextAvailable(available_).W(); }
Register AcquireX() { return AcquireNextAvailable(available_).X(); }
- FPRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
- FPRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
+ VRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
+ VRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
+ VRegister AcquireV(VectorFormat format) {
+ return VRegister::Create(AcquireNextAvailable(availablefp_).code(), format);
+ }
Register UnsafeAcquire(const Register& reg) {
return Register(UnsafeAcquire(available_, reg));
}
Register AcquireSameSizeAs(const Register& reg);
- FPRegister AcquireSameSizeAs(const FPRegister& reg);
+ VRegister AcquireSameSizeAs(const VRegister& reg);
private:
static CPURegister AcquireNextAvailable(CPURegList* available);
@@ -2083,11 +2519,11 @@ class UseScratchRegisterScope {
// Available scratch registers.
CPURegList* available_; // kRegister
- CPURegList* availablefp_; // kFPRegister
+ CPURegList* availablefp_; // kVRegister
// The state of the available lists at the start of this scope.
RegList old_available_; // kRegister
- RegList old_availablefp_; // kFPRegister
+ RegList old_availablefp_; // kVRegister
};
MemOperand ContextMemOperand(Register context, int index = 0);
diff --git a/deps/v8/src/arm64/simulator-arm64.cc b/deps/v8/src/arm64/simulator-arm64.cc
index fb0e614982..231f4efd98 100644
--- a/deps/v8/src/arm64/simulator-arm64.cc
+++ b/deps/v8/src/arm64/simulator-arm64.cc
@@ -5,6 +5,7 @@
#include <stdlib.h>
#include <cmath>
#include <cstdarg>
+#include <type_traits>
#if V8_TARGET_ARCH_ARM64
@@ -43,14 +44,15 @@ namespace internal {
#define MAGENTA "35"
#define CYAN "36"
#define WHITE "37"
+
typedef char const * const TEXT_COLOUR;
TEXT_COLOUR clr_normal = FLAG_log_colour ? COLOUR(NORMAL) : "";
TEXT_COLOUR clr_flag_name = FLAG_log_colour ? COLOUR_BOLD(WHITE) : "";
TEXT_COLOUR clr_flag_value = FLAG_log_colour ? COLOUR(NORMAL) : "";
TEXT_COLOUR clr_reg_name = FLAG_log_colour ? COLOUR_BOLD(CYAN) : "";
TEXT_COLOUR clr_reg_value = FLAG_log_colour ? COLOUR(CYAN) : "";
-TEXT_COLOUR clr_fpreg_name = FLAG_log_colour ? COLOUR_BOLD(MAGENTA) : "";
-TEXT_COLOUR clr_fpreg_value = FLAG_log_colour ? COLOUR(MAGENTA) : "";
+TEXT_COLOUR clr_vreg_name = FLAG_log_colour ? COLOUR_BOLD(MAGENTA) : "";
+TEXT_COLOUR clr_vreg_value = FLAG_log_colour ? COLOUR(MAGENTA) : "";
TEXT_COLOUR clr_memory_address = FLAG_log_colour ? COLOUR_BOLD(BLUE) : "";
TEXT_COLOUR clr_debug_number = FLAG_log_colour ? COLOUR_BOLD(YELLOW) : "";
TEXT_COLOUR clr_debug_message = FLAG_log_colour ? COLOUR(YELLOW) : "";
@@ -94,7 +96,6 @@ SimSystemRegister SimSystemRegister::DefaultValueFor(SystemRegister id) {
return SimSystemRegister(0x00000000, FPCRWriteIgnoreMask);
default:
UNREACHABLE();
- return SimSystemRegister();
}
}
@@ -231,20 +232,20 @@ void Simulator::CheckPCSComplianceAndRun() {
#ifdef DEBUG
CHECK_EQ(kNumberOfCalleeSavedRegisters, kCalleeSaved.Count());
- CHECK_EQ(kNumberOfCalleeSavedFPRegisters, kCalleeSavedFP.Count());
+ CHECK_EQ(kNumberOfCalleeSavedVRegisters, kCalleeSavedV.Count());
int64_t saved_registers[kNumberOfCalleeSavedRegisters];
- uint64_t saved_fpregisters[kNumberOfCalleeSavedFPRegisters];
+ uint64_t saved_fpregisters[kNumberOfCalleeSavedVRegisters];
CPURegList register_list = kCalleeSaved;
- CPURegList fpregister_list = kCalleeSavedFP;
+ CPURegList fpregister_list = kCalleeSavedV;
for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) {
// x31 is not a caller saved register, so no need to specify if we want
// the stack or zero.
saved_registers[i] = xreg(register_list.PopLowestIndex().code());
}
- for (int i = 0; i < kNumberOfCalleeSavedFPRegisters; i++) {
+ for (int i = 0; i < kNumberOfCalleeSavedVRegisters; i++) {
saved_fpregisters[i] =
dreg_bits(fpregister_list.PopLowestIndex().code());
}
@@ -256,11 +257,11 @@ void Simulator::CheckPCSComplianceAndRun() {
CHECK_EQ(original_stack, sp());
// Check that callee-saved registers have been preserved.
register_list = kCalleeSaved;
- fpregister_list = kCalleeSavedFP;
+ fpregister_list = kCalleeSavedV;
for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) {
CHECK_EQ(saved_registers[i], xreg(register_list.PopLowestIndex().code()));
}
- for (int i = 0; i < kNumberOfCalleeSavedFPRegisters; i++) {
+ for (int i = 0; i < kNumberOfCalleeSavedVRegisters; i++) {
DCHECK(saved_fpregisters[i] ==
dreg_bits(fpregister_list.PopLowestIndex().code()));
}
@@ -275,11 +276,11 @@ void Simulator::CheckPCSComplianceAndRun() {
// In theory d0 to d7 can be used for return values, but V8 only uses d0
// for now .
- fpregister_list = kCallerSavedFP;
+ fpregister_list = kCallerSavedV;
fpregister_list.Remove(d0);
CorruptRegisters(&register_list, kCallerSavedRegisterCorruptionValue);
- CorruptRegisters(&fpregister_list, kCallerSavedFPRegisterCorruptionValue);
+ CorruptRegisters(&fpregister_list, kCallerSavedVRegisterCorruptionValue);
#endif
}
@@ -294,7 +295,7 @@ void Simulator::CorruptRegisters(CPURegList* list, uint64_t value) {
set_xreg(code, value | code);
}
} else {
- DCHECK(list->type() == CPURegister::kFPRegister);
+ DCHECK_EQ(list->type(), CPURegister::kVRegister);
while (!list->IsEmpty()) {
unsigned code = list->PopLowestIndex().code();
set_dreg_bits(code, value | code);
@@ -306,10 +307,10 @@ void Simulator::CorruptRegisters(CPURegList* list, uint64_t value) {
void Simulator::CorruptAllCallerSavedCPURegisters() {
// Corrupt alters its parameter so copy them first.
CPURegList register_list = kCallerSaved;
- CPURegList fpregister_list = kCallerSavedFP;
+ CPURegList fpregister_list = kCallerSavedV;
CorruptRegisters(&register_list, kCallerSavedRegisterCorruptionValue);
- CorruptRegisters(&fpregister_list, kCallerSavedFPRegisterCorruptionValue);
+ CorruptRegisters(&fpregister_list, kCallerSavedVRegisterCorruptionValue);
}
#endif
@@ -417,7 +418,7 @@ void Simulator::ResetState() {
for (unsigned i = 0; i < kNumberOfRegisters; i++) {
set_xreg(i, 0xbadbeef);
}
- for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
+ for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
// Set FP registers to a value that is NaN in both 32-bit and 64-bit FP.
set_dreg_bits(i, 0x7ff000007f800001UL);
}
@@ -444,6 +445,10 @@ Simulator::~Simulator() {
void Simulator::Run() {
+ // Flush any written registers before executing anything, so that
+ // manually-set registers are logged _before_ the first instruction.
+ LogAllWrittenRegisters();
+
pc_modified_ = false;
while (pc_ != kEndOfSimAddress) {
ExecuteInstruction();
@@ -840,8 +845,9 @@ const char* Simulator::vreg_names[] = {
const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) {
- STATIC_ASSERT(arraysize(Simulator::wreg_names) == (kNumberOfRegisters + 1));
- DCHECK(code < kNumberOfRegisters);
+ static_assert(arraysize(Simulator::wreg_names) == (kNumberOfRegisters + 1),
+ "Array must be large enough to hold all register names.");
+ DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters));
// The modulo operator has no effect here, but it silences a broken GCC
// warning about out-of-bounds array accesses.
code %= kNumberOfRegisters;
@@ -855,8 +861,9 @@ const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) {
const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) {
- STATIC_ASSERT(arraysize(Simulator::xreg_names) == (kNumberOfRegisters + 1));
- DCHECK(code < kNumberOfRegisters);
+ static_assert(arraysize(Simulator::xreg_names) == (kNumberOfRegisters + 1),
+ "Array must be large enough to hold all register names.");
+ DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters));
code %= kNumberOfRegisters;
// If the code represents the stack pointer, index the name after zr.
@@ -868,23 +875,70 @@ const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) {
const char* Simulator::SRegNameForCode(unsigned code) {
- STATIC_ASSERT(arraysize(Simulator::sreg_names) == kNumberOfFPRegisters);
- DCHECK(code < kNumberOfFPRegisters);
- return sreg_names[code % kNumberOfFPRegisters];
+ static_assert(arraysize(Simulator::sreg_names) == kNumberOfVRegisters,
+ "Array must be large enough to hold all register names.");
+ DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
+ return sreg_names[code % kNumberOfVRegisters];
}
const char* Simulator::DRegNameForCode(unsigned code) {
- STATIC_ASSERT(arraysize(Simulator::dreg_names) == kNumberOfFPRegisters);
- DCHECK(code < kNumberOfFPRegisters);
- return dreg_names[code % kNumberOfFPRegisters];
+ static_assert(arraysize(Simulator::dreg_names) == kNumberOfVRegisters,
+ "Array must be large enough to hold all register names.");
+ DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
+ return dreg_names[code % kNumberOfVRegisters];
}
const char* Simulator::VRegNameForCode(unsigned code) {
- STATIC_ASSERT(arraysize(Simulator::vreg_names) == kNumberOfFPRegisters);
- DCHECK(code < kNumberOfFPRegisters);
- return vreg_names[code % kNumberOfFPRegisters];
+ static_assert(arraysize(Simulator::vreg_names) == kNumberOfVRegisters,
+ "Array must be large enough to hold all register names.");
+ DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
+ return vreg_names[code % kNumberOfVRegisters];
+}
+
+void LogicVRegister::ReadUintFromMem(VectorFormat vform, int index,
+ uint64_t addr) const {
+ switch (LaneSizeInBitsFromFormat(vform)) {
+ case 8:
+ register_.Insert(index, SimMemory::Read<uint8_t>(addr));
+ break;
+ case 16:
+ register_.Insert(index, SimMemory::Read<uint16_t>(addr));
+ break;
+ case 32:
+ register_.Insert(index, SimMemory::Read<uint32_t>(addr));
+ break;
+ case 64:
+ register_.Insert(index, SimMemory::Read<uint64_t>(addr));
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+}
+
+void LogicVRegister::WriteUintToMem(VectorFormat vform, int index,
+ uint64_t addr) const {
+ switch (LaneSizeInBitsFromFormat(vform)) {
+ case 8:
+ SimMemory::Write<uint8_t>(addr, static_cast<uint8_t>(Uint(vform, index)));
+ break;
+ case 16:
+ SimMemory::Write<uint16_t>(addr,
+ static_cast<uint16_t>(Uint(vform, index)));
+ break;
+ case 32:
+ SimMemory::Write<uint32_t>(addr,
+ static_cast<uint32_t>(Uint(vform, index)));
+ break;
+ case 64:
+ SimMemory::Write<uint64_t>(addr, Uint(vform, index));
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
}
@@ -895,7 +949,7 @@ int Simulator::CodeFromName(const char* name) {
return i;
}
}
- for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
+ for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
if ((strcmp(vreg_names[i], name) == 0) ||
(strcmp(dreg_names[i], name) == 0) ||
(strcmp(sreg_names[i], name) == 0)) {
@@ -964,7 +1018,7 @@ void Simulator::AddSubWithCarry(Instruction* instr) {
template <typename T>
T Simulator::ShiftOperand(T value, Shift shift_type, unsigned amount) {
- typedef typename make_unsigned<T>::type unsignedT;
+ typedef typename std::make_unsigned<T>::type unsignedT;
if (amount == 0) {
return value;
@@ -1038,16 +1092,6 @@ void Simulator::Extract(Instruction* instr) {
}
-template<> double Simulator::FPDefaultNaN<double>() const {
- return kFP64DefaultNaN;
-}
-
-
-template<> float Simulator::FPDefaultNaN<float>() const {
- return kFP32DefaultNaN;
-}
-
-
void Simulator::FPCompare(double val0, double val1) {
AssertSupportedFPCR();
@@ -1067,6 +1111,108 @@ void Simulator::FPCompare(double val0, double val1) {
LogSystemRegister(NZCV);
}
+Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormatForSize(
+ size_t reg_size, size_t lane_size) {
+ DCHECK_GE(reg_size, lane_size);
+
+ uint32_t format = 0;
+ if (reg_size != lane_size) {
+ switch (reg_size) {
+ default:
+ UNREACHABLE();
+ case kQRegSize:
+ format = kPrintRegAsQVector;
+ break;
+ case kDRegSize:
+ format = kPrintRegAsDVector;
+ break;
+ }
+ }
+
+ switch (lane_size) {
+ default:
+ UNREACHABLE();
+ case kQRegSize:
+ format |= kPrintReg1Q;
+ break;
+ case kDRegSize:
+ format |= kPrintReg1D;
+ break;
+ case kSRegSize:
+ format |= kPrintReg1S;
+ break;
+ case kHRegSize:
+ format |= kPrintReg1H;
+ break;
+ case kBRegSize:
+ format |= kPrintReg1B;
+ break;
+ }
+
+ // These sizes would be duplicate case labels.
+ static_assert(kXRegSize == kDRegSize, "X and D registers must be same size.");
+ static_assert(kWRegSize == kSRegSize, "W and S registers must be same size.");
+ static_assert(kPrintXReg == kPrintReg1D,
+ "X and D register printing code is shared.");
+ static_assert(kPrintWReg == kPrintReg1S,
+ "W and S register printing code is shared.");
+
+ return static_cast<PrintRegisterFormat>(format);
+}
+
+Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormat(
+ VectorFormat vform) {
+ switch (vform) {
+ default:
+ UNREACHABLE();
+ case kFormat16B:
+ return kPrintReg16B;
+ case kFormat8B:
+ return kPrintReg8B;
+ case kFormat8H:
+ return kPrintReg8H;
+ case kFormat4H:
+ return kPrintReg4H;
+ case kFormat4S:
+ return kPrintReg4S;
+ case kFormat2S:
+ return kPrintReg2S;
+ case kFormat2D:
+ return kPrintReg2D;
+ case kFormat1D:
+ return kPrintReg1D;
+
+ case kFormatB:
+ return kPrintReg1B;
+ case kFormatH:
+ return kPrintReg1H;
+ case kFormatS:
+ return kPrintReg1S;
+ case kFormatD:
+ return kPrintReg1D;
+ }
+}
+
+Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormatFP(
+ VectorFormat vform) {
+ switch (vform) {
+ default:
+ UNREACHABLE();
+ case kFormat4S:
+ return kPrintReg4SFP;
+ case kFormat2S:
+ return kPrintReg2SFP;
+ case kFormat2D:
+ return kPrintReg2DFP;
+ case kFormat1D:
+ return kPrintReg1DFP;
+
+ case kFormatS:
+ return kPrintReg1SFP;
+ case kFormatD:
+ return kPrintReg1DFP;
+ }
+}
void Simulator::SetBreakpoint(Instruction* location) {
for (unsigned i = 0; i < breakpoints_.size(); i++) {
@@ -1130,6 +1276,18 @@ void Simulator::PrintInstructionsAt(Instruction* start, uint64_t count) {
}
}
+void Simulator::PrintWrittenRegisters() {
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ if (registers_[i].WrittenSinceLastLog()) PrintRegister(i);
+ }
+}
+
+void Simulator::PrintWrittenVRegisters() {
+ for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
+ // At this point there is no type information, so print as a raw 1Q.
+ if (vregisters_[i].WrittenSinceLastLog()) PrintVRegister(i, kPrintReg1Q);
+ }
+}
void Simulator::PrintSystemRegisters() {
PrintSystemRegister(NZCV);
@@ -1143,58 +1301,217 @@ void Simulator::PrintRegisters() {
}
}
-
-void Simulator::PrintFPRegisters() {
- for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
- PrintFPRegister(i);
+void Simulator::PrintVRegisters() {
+ for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
+ // At this point there is no type information, so print as a raw 1Q.
+ PrintVRegister(i, kPrintReg1Q);
}
}
void Simulator::PrintRegister(unsigned code, Reg31Mode r31mode) {
+ registers_[code].NotifyRegisterLogged();
+
// Don't print writes into xzr.
if ((code == kZeroRegCode) && (r31mode == Reg31IsZeroRegister)) {
return;
}
- // The template is "# x<code>:value".
- fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s\n",
- clr_reg_name, XRegNameForCode(code, r31mode),
- clr_reg_value, reg<uint64_t>(code, r31mode), clr_normal);
+ // The template for all x and w registers:
+ // "# x{code}: 0x{value}"
+ // "# w{code}: 0x{value}"
+
+ PrintRegisterRawHelper(code, r31mode);
+ fprintf(stream_, "\n");
}
+// Print a register's name and raw value.
+//
+// The `bytes` and `lsb` arguments can be used to limit the bytes that are
+// printed. These arguments are intended for use in cases where register hasn't
+// actually been updated (such as in PrintVWrite).
+//
+// No newline is printed. This allows the caller to print more details (such as
+// a floating-point interpretation or a memory access annotation).
+void Simulator::PrintVRegisterRawHelper(unsigned code, int bytes, int lsb) {
+ // The template for vector types:
+ // "# v{code}: 0xffeeddccbbaa99887766554433221100".
+ // An example with bytes=4 and lsb=8:
+ // "# v{code}: 0xbbaa9988 ".
+ fprintf(stream_, "# %s%5s: %s", clr_vreg_name, VRegNameForCode(code),
+ clr_vreg_value);
+
+ int msb = lsb + bytes - 1;
+ int byte = kQRegSize - 1;
+
+ // Print leading padding spaces. (Two spaces per byte.)
+ while (byte > msb) {
+ fprintf(stream_, " ");
+ byte--;
+ }
+
+ // Print the specified part of the value, byte by byte.
+ qreg_t rawbits = qreg(code);
+ fprintf(stream_, "0x");
+ while (byte >= lsb) {
+ fprintf(stream_, "%02x", rawbits.val[byte]);
+ byte--;
+ }
+
+ // Print trailing padding spaces.
+ while (byte >= 0) {
+ fprintf(stream_, " ");
+ byte--;
+ }
+ fprintf(stream_, "%s", clr_normal);
+}
+
+// Print each of the specified lanes of a register as a float or double value.
+//
+// The `lane_count` and `lslane` arguments can be used to limit the lanes that
+// are printed. These arguments are intended for use in cases where register
+// hasn't actually been updated (such as in PrintVWrite).
+//
+// No newline is printed. This allows the caller to print more details (such as
+// a memory access annotation).
+void Simulator::PrintVRegisterFPHelper(unsigned code,
+ unsigned lane_size_in_bytes,
+ int lane_count, int rightmost_lane) {
+ DCHECK((lane_size_in_bytes == kSRegSize) ||
+ (lane_size_in_bytes == kDRegSize));
+
+ unsigned msb = (lane_count + rightmost_lane) * lane_size_in_bytes;
+ DCHECK_LE(msb, static_cast<unsigned>(kQRegSize));
+
+ // For scalar types ((lane_count == 1) && (rightmost_lane == 0)), a register
+ // name is used:
+ // " (s{code}: {value})"
+ // " (d{code}: {value})"
+ // For vector types, "..." is used to represent one or more omitted lanes.
+ // " (..., {value}, {value}, ...)"
+ if ((lane_count == 1) && (rightmost_lane == 0)) {
+ const char* name = (lane_size_in_bytes == kSRegSize)
+ ? SRegNameForCode(code)
+ : DRegNameForCode(code);
+ fprintf(stream_, " (%s%s: ", clr_vreg_name, name);
+ } else {
+ if (msb < (kQRegSize - 1)) {
+ fprintf(stream_, " (..., ");
+ } else {
+ fprintf(stream_, " (");
+ }
+ }
+
+ // Print the list of values.
+ const char* separator = "";
+ int leftmost_lane = rightmost_lane + lane_count - 1;
+ for (int lane = leftmost_lane; lane >= rightmost_lane; lane--) {
+ double value = (lane_size_in_bytes == kSRegSize)
+ ? vreg(code).Get<float>(lane)
+ : vreg(code).Get<double>(lane);
+ fprintf(stream_, "%s%s%#g%s", separator, clr_vreg_value, value, clr_normal);
+ separator = ", ";
+ }
+
+ if (rightmost_lane > 0) {
+ fprintf(stream_, ", ...");
+ }
+ fprintf(stream_, ")");
+}
+
+// Print a register's name and raw value.
+//
+// Only the least-significant `size_in_bytes` bytes of the register are printed,
+// but the value is aligned as if the whole register had been printed.
+//
+// For typical register updates, size_in_bytes should be set to kXRegSize
+// -- the default -- so that the whole register is printed. Other values of
+// size_in_bytes are intended for use when the register hasn't actually been
+// updated (such as in PrintWrite).
+//
+// No newline is printed. This allows the caller to print more details (such as
+// a memory access annotation).
+void Simulator::PrintRegisterRawHelper(unsigned code, Reg31Mode r31mode,
+ int size_in_bytes) {
+ // The template for all supported sizes.
+ // "# x{code}: 0xffeeddccbbaa9988"
+ // "# w{code}: 0xbbaa9988"
+ // "# w{code}<15:0>: 0x9988"
+ // "# w{code}<7:0>: 0x88"
+ unsigned padding_chars = (kXRegSize - size_in_bytes) * 2;
+
+ const char* name = "";
+ const char* suffix = "";
+ switch (size_in_bytes) {
+ case kXRegSize:
+ name = XRegNameForCode(code, r31mode);
+ break;
+ case kWRegSize:
+ name = WRegNameForCode(code, r31mode);
+ break;
+ case 2:
+ name = WRegNameForCode(code, r31mode);
+ suffix = "<15:0>";
+ padding_chars -= strlen(suffix);
+ break;
+ case 1:
+ name = WRegNameForCode(code, r31mode);
+ suffix = "<7:0>";
+ padding_chars -= strlen(suffix);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ fprintf(stream_, "# %s%5s%s: ", clr_reg_name, name, suffix);
+
+ // Print leading padding spaces.
+ DCHECK_LT(padding_chars, kXRegSize * 2U);
+ for (unsigned i = 0; i < padding_chars; i++) {
+ putc(' ', stream_);
+ }
+
+ // Print the specified bits in hexadecimal format.
+ uint64_t bits = reg<uint64_t>(code, r31mode);
+ bits &= kXRegMask >> ((kXRegSize - size_in_bytes) * 8);
+ static_assert(sizeof(bits) == kXRegSize,
+ "X registers and uint64_t must be the same size.");
-void Simulator::PrintFPRegister(unsigned code, PrintFPRegisterSizes sizes) {
- // The template is "# v<code>:bits (d<code>:value, ...)".
+ int chars = size_in_bytes * 2;
+ fprintf(stream_, "%s0x%0*" PRIx64 "%s", clr_reg_value, chars, bits,
+ clr_normal);
+}
- DCHECK(sizes != 0);
- DCHECK((sizes & kPrintAllFPRegValues) == sizes);
+void Simulator::PrintVRegister(unsigned code, PrintRegisterFormat format) {
+ vregisters_[code].NotifyRegisterLogged();
- // Print the raw bits.
- fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s (",
- clr_fpreg_name, VRegNameForCode(code),
- clr_fpreg_value, fpreg<uint64_t>(code), clr_normal);
+ int lane_size_log2 = format & kPrintRegLaneSizeMask;
- // Print all requested value interpretations.
- bool need_separator = false;
- if (sizes & kPrintDRegValue) {
- fprintf(stream_, "%s%s%s: %s%g%s",
- need_separator ? ", " : "",
- clr_fpreg_name, DRegNameForCode(code),
- clr_fpreg_value, fpreg<double>(code), clr_normal);
- need_separator = true;
+ int reg_size_log2;
+ if (format & kPrintRegAsQVector) {
+ reg_size_log2 = kQRegSizeLog2;
+ } else if (format & kPrintRegAsDVector) {
+ reg_size_log2 = kDRegSizeLog2;
+ } else {
+ // Scalar types.
+ reg_size_log2 = lane_size_log2;
}
- if (sizes & kPrintSRegValue) {
- fprintf(stream_, "%s%s%s: %s%g%s",
- need_separator ? ", " : "",
- clr_fpreg_name, SRegNameForCode(code),
- clr_fpreg_value, fpreg<float>(code), clr_normal);
- need_separator = true;
+ int lane_count = 1 << (reg_size_log2 - lane_size_log2);
+ int lane_size = 1 << lane_size_log2;
+
+ // The template for vector types:
+ // "# v{code}: 0x{rawbits} (..., {value}, ...)".
+ // The template for scalar types:
+ // "# v{code}: 0x{rawbits} ({reg}:{value})".
+ // The values in parentheses after the bit representations are floating-point
+ // interpretations. They are displayed only if the kPrintVRegAsFP bit is set.
+
+ PrintVRegisterRawHelper(code);
+ if (format & kPrintRegAsFP) {
+ PrintVRegisterFPHelper(code, lane_size, lane_count);
}
- // End the value list.
- fprintf(stream_, ")\n");
+ fprintf(stream_, "\n");
}
@@ -1226,109 +1543,61 @@ void Simulator::PrintSystemRegister(SystemRegister id) {
}
}
+void Simulator::PrintRead(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format) {
+ registers_[reg_code].NotifyRegisterLogged();
-void Simulator::PrintRead(uintptr_t address,
- size_t size,
- unsigned reg_code) {
- USE(size); // Size is unused here.
-
- // The template is "# x<code>:value <- address".
- fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s",
- clr_reg_name, XRegNameForCode(reg_code),
- clr_reg_value, reg<uint64_t>(reg_code), clr_normal);
+ USE(format);
+ // The template is "# {reg}: 0x{value} <- {address}".
+ PrintRegisterRawHelper(reg_code, Reg31IsZeroRegister);
fprintf(stream_, " <- %s0x%016" PRIxPTR "%s\n",
clr_memory_address, address, clr_normal);
}
+void Simulator::PrintVRead(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format, unsigned lane) {
+ vregisters_[reg_code].NotifyRegisterLogged();
-void Simulator::PrintReadFP(uintptr_t address,
- size_t size,
- unsigned reg_code) {
- // The template is "# reg:bits (reg:value) <- address".
- switch (size) {
- case kSRegSize:
- fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s (%s%s: %s%gf%s)",
- clr_fpreg_name, VRegNameForCode(reg_code),
- clr_fpreg_value, fpreg<uint64_t>(reg_code), clr_normal,
- clr_fpreg_name, SRegNameForCode(reg_code),
- clr_fpreg_value, fpreg<float>(reg_code), clr_normal);
- break;
- case kDRegSize:
- fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s (%s%s: %s%g%s)",
- clr_fpreg_name, VRegNameForCode(reg_code),
- clr_fpreg_value, fpreg<uint64_t>(reg_code), clr_normal,
- clr_fpreg_name, DRegNameForCode(reg_code),
- clr_fpreg_value, fpreg<double>(reg_code), clr_normal);
- break;
- default:
- UNREACHABLE();
+ // The template is "# v{code}: 0x{rawbits} <- address".
+ PrintVRegisterRawHelper(reg_code);
+ if (format & kPrintRegAsFP) {
+ PrintVRegisterFPHelper(reg_code, GetPrintRegLaneSizeInBytes(format),
+ GetPrintRegLaneCount(format), lane);
}
-
fprintf(stream_, " <- %s0x%016" PRIxPTR "%s\n",
clr_memory_address, address, clr_normal);
}
+void Simulator::PrintWrite(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format) {
+ DCHECK_EQ(GetPrintRegLaneCount(format), 1U);
-void Simulator::PrintWrite(uintptr_t address,
- size_t size,
- unsigned reg_code) {
- // The template is "# reg:value -> address". To keep the trace tidy and
- // readable, the value is aligned with the values in the register trace.
- switch (size) {
- case kByteSizeInBytes:
- fprintf(stream_, "# %s%5s<7:0>: %s0x%02" PRIx8 "%s",
- clr_reg_name, WRegNameForCode(reg_code),
- clr_reg_value, reg<uint8_t>(reg_code), clr_normal);
- break;
- case kHalfWordSizeInBytes:
- fprintf(stream_, "# %s%5s<15:0>: %s0x%04" PRIx16 "%s",
- clr_reg_name, WRegNameForCode(reg_code),
- clr_reg_value, reg<uint16_t>(reg_code), clr_normal);
- break;
- case kWRegSize:
- fprintf(stream_, "# %s%5s: %s0x%08" PRIx32 "%s",
- clr_reg_name, WRegNameForCode(reg_code),
- clr_reg_value, reg<uint32_t>(reg_code), clr_normal);
- break;
- case kXRegSize:
- fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s",
- clr_reg_name, XRegNameForCode(reg_code),
- clr_reg_value, reg<uint64_t>(reg_code), clr_normal);
- break;
- default:
- UNREACHABLE();
- }
-
+ // The template is "# v{code}: 0x{value} -> {address}". To keep the trace tidy
+ // and readable, the value is aligned with the values in the register trace.
+ PrintRegisterRawHelper(reg_code, Reg31IsZeroRegister,
+ GetPrintRegSizeInBytes(format));
fprintf(stream_, " -> %s0x%016" PRIxPTR "%s\n",
clr_memory_address, address, clr_normal);
}
-
-void Simulator::PrintWriteFP(uintptr_t address,
- size_t size,
- unsigned reg_code) {
- // The template is "# reg:bits (reg:value) -> address". To keep the trace tidy
- // and readable, the value is aligned with the values in the register trace.
- switch (size) {
- case kSRegSize:
- fprintf(stream_, "# %s%5s<31:0>: %s0x%08" PRIx32 "%s (%s%s: %s%gf%s)",
- clr_fpreg_name, VRegNameForCode(reg_code),
- clr_fpreg_value, fpreg<uint32_t>(reg_code), clr_normal,
- clr_fpreg_name, SRegNameForCode(reg_code),
- clr_fpreg_value, fpreg<float>(reg_code), clr_normal);
- break;
- case kDRegSize:
- fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s (%s%s: %s%g%s)",
- clr_fpreg_name, VRegNameForCode(reg_code),
- clr_fpreg_value, fpreg<uint64_t>(reg_code), clr_normal,
- clr_fpreg_name, DRegNameForCode(reg_code),
- clr_fpreg_value, fpreg<double>(reg_code), clr_normal);
- break;
- default:
- UNREACHABLE();
+void Simulator::PrintVWrite(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format, unsigned lane) {
+ // The templates:
+ // "# v{code}: 0x{rawbits} -> {address}"
+ // "# v{code}: 0x{rawbits} (..., {value}, ...) -> {address}".
+ // "# v{code}: 0x{rawbits} ({reg}:{value}) -> {address}"
+ // Because this trace doesn't represent a change to the source register's
+ // value, only the relevant part of the value is printed. To keep the trace
+ // tidy and readable, the raw value is aligned with the other values in the
+ // register trace.
+ int lane_count = GetPrintRegLaneCount(format);
+ int lane_size = GetPrintRegLaneSizeInBytes(format);
+ int reg_size = GetPrintRegSizeInBytes(format);
+ PrintVRegisterRawHelper(reg_code, reg_size, lane_size * lane);
+ if (format & kPrintRegAsFP) {
+ PrintVRegisterFPHelper(reg_code, lane_size, lane_count, lane);
}
-
fprintf(stream_, " -> %s0x%016" PRIxPTR "%s\n",
clr_memory_address, address, clr_normal);
}
@@ -1650,13 +1919,14 @@ void Simulator::LoadStoreHelper(Instruction* instr,
uintptr_t address = LoadStoreAddress(addr_reg, offset, addrmode);
uintptr_t stack = 0;
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- if (instr->IsLoad()) {
- local_monitor_.NotifyLoad(address);
- } else {
- local_monitor_.NotifyStore(address);
- global_monitor_.Pointer()->NotifyStore_Locked(address,
- &global_monitor_processor_);
+ {
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ if (instr->IsLoad()) {
+ local_monitor_.NotifyLoad();
+ } else {
+ local_monitor_.NotifyStore();
+ global_monitor_.Pointer()->NotifyStore_Locked(&global_monitor_processor_);
+ }
}
// Handle the writeback for stores before the store. On a CPU the writeback
@@ -1674,10 +1944,10 @@ void Simulator::LoadStoreHelper(Instruction* instr,
stack = sp();
}
- LoadStoreOp op = static_cast<LoadStoreOp>(instr->Mask(LoadStoreOpMask));
+ LoadStoreOp op = static_cast<LoadStoreOp>(instr->Mask(LoadStoreMask));
switch (op) {
// Use _no_log variants to suppress the register trace (LOG_REGS,
- // LOG_FP_REGS). We will print a more detailed log.
+ // LOG_VREGS). We will print a more detailed log.
case LDRB_w: set_wreg_no_log(srcdst, MemoryRead<uint8_t>(address)); break;
case LDRH_w: set_wreg_no_log(srcdst, MemoryRead<uint16_t>(address)); break;
case LDR_w: set_wreg_no_log(srcdst, MemoryRead<uint32_t>(address)); break;
@@ -1687,33 +1957,55 @@ void Simulator::LoadStoreHelper(Instruction* instr,
case LDRSB_x: set_xreg_no_log(srcdst, MemoryRead<int8_t>(address)); break;
case LDRSH_x: set_xreg_no_log(srcdst, MemoryRead<int16_t>(address)); break;
case LDRSW_x: set_xreg_no_log(srcdst, MemoryRead<int32_t>(address)); break;
+ case LDR_b:
+ set_breg_no_log(srcdst, MemoryRead<uint8_t>(address));
+ break;
+ case LDR_h:
+ set_hreg_no_log(srcdst, MemoryRead<uint16_t>(address));
+ break;
case LDR_s: set_sreg_no_log(srcdst, MemoryRead<float>(address)); break;
case LDR_d: set_dreg_no_log(srcdst, MemoryRead<double>(address)); break;
+ case LDR_q:
+ set_qreg_no_log(srcdst, MemoryRead<qreg_t>(address));
+ break;
case STRB_w: MemoryWrite<uint8_t>(address, wreg(srcdst)); break;
case STRH_w: MemoryWrite<uint16_t>(address, wreg(srcdst)); break;
case STR_w: MemoryWrite<uint32_t>(address, wreg(srcdst)); break;
case STR_x: MemoryWrite<uint64_t>(address, xreg(srcdst)); break;
+ case STR_b:
+ MemoryWrite<uint8_t>(address, breg(srcdst));
+ break;
+ case STR_h:
+ MemoryWrite<uint16_t>(address, hreg(srcdst));
+ break;
case STR_s: MemoryWrite<float>(address, sreg(srcdst)); break;
case STR_d: MemoryWrite<double>(address, dreg(srcdst)); break;
+ case STR_q:
+ MemoryWrite<qreg_t>(address, qreg(srcdst));
+ break;
default: UNIMPLEMENTED();
}
// Print a detailed trace (including the memory address) instead of the basic
// register:value trace generated by set_*reg().
- size_t access_size = 1 << instr->SizeLS();
+ unsigned access_size = 1 << instr->SizeLS();
if (instr->IsLoad()) {
if ((op == LDR_s) || (op == LDR_d)) {
- LogReadFP(address, access_size, srcdst);
+ LogVRead(address, srcdst, GetPrintRegisterFormatForSizeFP(access_size));
+ } else if ((op == LDR_b) || (op == LDR_h) || (op == LDR_q)) {
+ LogVRead(address, srcdst, GetPrintRegisterFormatForSize(access_size));
} else {
- LogRead(address, access_size, srcdst);
+ LogRead(address, srcdst, GetPrintRegisterFormatForSize(access_size));
}
} else {
if ((op == STR_s) || (op == STR_d)) {
- LogWriteFP(address, access_size, srcdst);
+ LogVWrite(address, srcdst, GetPrintRegisterFormatForSizeFP(access_size));
+ } else if ((op == STR_b) || (op == STR_h) || (op == STR_q)) {
+ LogVWrite(address, srcdst, GetPrintRegisterFormatForSize(access_size));
} else {
- LogWrite(address, access_size, srcdst);
+ LogWrite(address, srcdst, GetPrintRegisterFormatForSize(access_size));
}
}
@@ -1761,17 +2053,14 @@ void Simulator::LoadStorePairHelper(Instruction* instr,
uintptr_t address2 = address + access_size;
uintptr_t stack = 0;
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- if (instr->IsLoad()) {
- local_monitor_.NotifyLoad(address);
- local_monitor_.NotifyLoad(address2);
- } else {
- local_monitor_.NotifyStore(address);
- local_monitor_.NotifyStore(address2);
- global_monitor_.Pointer()->NotifyStore_Locked(address,
- &global_monitor_processor_);
- global_monitor_.Pointer()->NotifyStore_Locked(address2,
- &global_monitor_processor_);
+ {
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ if (instr->IsLoad()) {
+ local_monitor_.NotifyLoad();
+ } else {
+ local_monitor_.NotifyStore();
+ global_monitor_.Pointer()->NotifyStore_Locked(&global_monitor_processor_);
+ }
}
// Handle the writeback for stores before the store. On a CPU the writeback
@@ -1797,61 +2086,73 @@ void Simulator::LoadStorePairHelper(Instruction* instr,
switch (op) {
// Use _no_log variants to suppress the register trace (LOG_REGS,
- // LOG_FP_REGS). We will print a more detailed log.
+ // LOG_VREGS). We will print a more detailed log.
case LDP_w: {
- DCHECK(access_size == kWRegSize);
+ DCHECK_EQ(access_size, static_cast<unsigned>(kWRegSize));
set_wreg_no_log(rt, MemoryRead<uint32_t>(address));
set_wreg_no_log(rt2, MemoryRead<uint32_t>(address2));
break;
}
case LDP_s: {
- DCHECK(access_size == kSRegSize);
+ DCHECK_EQ(access_size, static_cast<unsigned>(kSRegSize));
set_sreg_no_log(rt, MemoryRead<float>(address));
set_sreg_no_log(rt2, MemoryRead<float>(address2));
break;
}
case LDP_x: {
- DCHECK(access_size == kXRegSize);
+ DCHECK_EQ(access_size, static_cast<unsigned>(kXRegSize));
set_xreg_no_log(rt, MemoryRead<uint64_t>(address));
set_xreg_no_log(rt2, MemoryRead<uint64_t>(address2));
break;
}
case LDP_d: {
- DCHECK(access_size == kDRegSize);
+ DCHECK_EQ(access_size, static_cast<unsigned>(kDRegSize));
set_dreg_no_log(rt, MemoryRead<double>(address));
set_dreg_no_log(rt2, MemoryRead<double>(address2));
break;
}
+ case LDP_q: {
+ DCHECK_EQ(access_size, static_cast<unsigned>(kQRegSize));
+ set_qreg(rt, MemoryRead<qreg_t>(address), NoRegLog);
+ set_qreg(rt2, MemoryRead<qreg_t>(address2), NoRegLog);
+ break;
+ }
case LDPSW_x: {
- DCHECK(access_size == kWRegSize);
+ DCHECK_EQ(access_size, static_cast<unsigned>(kWRegSize));
set_xreg_no_log(rt, MemoryRead<int32_t>(address));
set_xreg_no_log(rt2, MemoryRead<int32_t>(address2));
break;
}
case STP_w: {
- DCHECK(access_size == kWRegSize);
+ DCHECK_EQ(access_size, static_cast<unsigned>(kWRegSize));
MemoryWrite<uint32_t>(address, wreg(rt));
MemoryWrite<uint32_t>(address2, wreg(rt2));
break;
}
case STP_s: {
- DCHECK(access_size == kSRegSize);
+ DCHECK_EQ(access_size, static_cast<unsigned>(kSRegSize));
MemoryWrite<float>(address, sreg(rt));
MemoryWrite<float>(address2, sreg(rt2));
break;
}
case STP_x: {
- DCHECK(access_size == kXRegSize);
+ DCHECK_EQ(access_size, static_cast<unsigned>(kXRegSize));
MemoryWrite<uint64_t>(address, xreg(rt));
MemoryWrite<uint64_t>(address2, xreg(rt2));
break;
}
case STP_d: {
- DCHECK(access_size == kDRegSize);
+ DCHECK_EQ(access_size, static_cast<unsigned>(kDRegSize));
MemoryWrite<double>(address, dreg(rt));
MemoryWrite<double>(address2, dreg(rt2));
break;
}
+ case STP_q: {
+ DCHECK_EQ(access_size, static_cast<unsigned>(kQRegSize));
+ MemoryWrite<qreg_t>(address, qreg(rt));
+ MemoryWrite<qreg_t>(address2, qreg(rt2));
+ break;
+ }
default: UNREACHABLE();
}
@@ -1859,19 +2160,25 @@ void Simulator::LoadStorePairHelper(Instruction* instr,
// register:value trace generated by set_*reg().
if (instr->IsLoad()) {
if ((op == LDP_s) || (op == LDP_d)) {
- LogReadFP(address, access_size, rt);
- LogReadFP(address2, access_size, rt2);
+ LogVRead(address, rt, GetPrintRegisterFormatForSizeFP(access_size));
+ LogVRead(address2, rt2, GetPrintRegisterFormatForSizeFP(access_size));
+ } else if (op == LDP_q) {
+ LogVRead(address, rt, GetPrintRegisterFormatForSize(access_size));
+ LogVRead(address2, rt2, GetPrintRegisterFormatForSize(access_size));
} else {
- LogRead(address, access_size, rt);
- LogRead(address2, access_size, rt2);
+ LogRead(address, rt, GetPrintRegisterFormatForSize(access_size));
+ LogRead(address2, rt2, GetPrintRegisterFormatForSize(access_size));
}
} else {
if ((op == STP_s) || (op == STP_d)) {
- LogWriteFP(address, access_size, rt);
- LogWriteFP(address2, access_size, rt2);
+ LogVWrite(address, rt, GetPrintRegisterFormatForSizeFP(access_size));
+ LogVWrite(address2, rt2, GetPrintRegisterFormatForSizeFP(access_size));
+ } else if (op == STP_q) {
+ LogVWrite(address, rt, GetPrintRegisterFormatForSize(access_size));
+ LogVWrite(address2, rt2, GetPrintRegisterFormatForSize(access_size));
} else {
- LogWrite(address, access_size, rt);
- LogWrite(address2, access_size, rt2);
+ LogWrite(address, rt, GetPrintRegisterFormatForSize(access_size));
+ LogWrite(address2, rt2, GetPrintRegisterFormatForSize(access_size));
}
}
@@ -1897,27 +2204,29 @@ void Simulator::VisitLoadLiteral(Instruction* instr) {
uintptr_t address = instr->LiteralAddress();
unsigned rt = instr->Rt();
- base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
- local_monitor_.NotifyLoad(address);
+ {
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyLoad();
+ }
switch (instr->Mask(LoadLiteralMask)) {
// Use _no_log variants to suppress the register trace (LOG_REGS,
- // LOG_FP_REGS), then print a more detailed log.
+ // LOG_VREGS), then print a more detailed log.
case LDR_w_lit:
set_wreg_no_log(rt, MemoryRead<uint32_t>(address));
- LogRead(address, kWRegSize, rt);
+ LogRead(address, rt, kPrintWReg);
break;
case LDR_x_lit:
set_xreg_no_log(rt, MemoryRead<uint64_t>(address));
- LogRead(address, kXRegSize, rt);
+ LogRead(address, rt, kPrintXReg);
break;
case LDR_s_lit:
set_sreg_no_log(rt, MemoryRead<float>(address));
- LogReadFP(address, kSRegSize, rt);
+ LogVRead(address, rt, kPrintSReg);
break;
case LDR_d_lit:
set_dreg_no_log(rt, MemoryRead<double>(address));
- LogReadFP(address, kDRegSize, rt);
+ LogVRead(address, rt, kPrintDReg);
break;
default: UNREACHABLE();
}
@@ -1992,7 +2301,7 @@ void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) {
global_monitor_.Pointer()->NotifyLoadExcl_Locked(
address, &global_monitor_processor_);
} else {
- local_monitor_.NotifyLoad(address);
+ local_monitor_.NotifyLoad();
}
switch (op) {
case LDAR_b:
@@ -2010,7 +2319,7 @@ void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) {
default:
UNIMPLEMENTED();
}
- LogRead(address, access_size, rt);
+ LogRead(address, rt, GetPrintRegisterFormatForSize(access_size));
} else {
if (is_exclusive) {
unsigned rs = instr->Rs();
@@ -2031,15 +2340,14 @@ void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) {
default:
UNIMPLEMENTED();
}
- LogWrite(address, access_size, rt);
+ LogWrite(address, rt, GetPrintRegisterFormatForSize(access_size));
set_wreg(rs, 0);
} else {
set_wreg(rs, 1);
}
} else {
- local_monitor_.NotifyStore(address);
- global_monitor_.Pointer()->NotifyStore_Locked(address,
- &global_monitor_processor_);
+ local_monitor_.NotifyStore();
+ global_monitor_.Pointer()->NotifyStore_Locked(&global_monitor_processor_);
switch (op) {
case STLR_b:
MemoryWrite<uint8_t>(address, wreg(rt));
@@ -2210,7 +2518,7 @@ void Simulator::DataProcessing2Source(Instruction* instr) {
}
case UDIV_w:
case UDIV_x: {
- typedef typename make_unsigned<T>::type unsignedT;
+ typedef typename std::make_unsigned<T>::type unsignedT;
unsignedT rn = static_cast<unsignedT>(reg<T>(instr->Rn()));
unsignedT rm = static_cast<unsignedT>(reg<T>(instr->Rm()));
if (rm == 0) {
@@ -2315,7 +2623,7 @@ void Simulator::VisitDataProcessing3Source(Instruction* instr) {
template <typename T>
void Simulator::BitfieldHelper(Instruction* instr) {
- typedef typename make_unsigned<T>::type unsignedT;
+ typedef typename std::make_unsigned<T>::type unsignedT;
T reg_size = sizeof(T) * 8;
T R = instr->ImmR();
T S = instr->ImmS();
@@ -2528,62 +2836,22 @@ void Simulator::VisitFPFixedPointConvert(Instruction* instr) {
}
-int32_t Simulator::FPToInt32(double value, FPRounding rmode) {
- value = FPRoundInt(value, rmode);
- if (value >= kWMaxInt) {
- return kWMaxInt;
- } else if (value < kWMinInt) {
- return kWMinInt;
- }
- return std::isnan(value) ? 0 : static_cast<int32_t>(value);
-}
-
-
-int64_t Simulator::FPToInt64(double value, FPRounding rmode) {
- value = FPRoundInt(value, rmode);
- if (value >= kXMaxInt) {
- return kXMaxInt;
- } else if (value < kXMinInt) {
- return kXMinInt;
- }
- return std::isnan(value) ? 0 : static_cast<int64_t>(value);
-}
-
-
-uint32_t Simulator::FPToUInt32(double value, FPRounding rmode) {
- value = FPRoundInt(value, rmode);
- if (value >= kWMaxUInt) {
- return kWMaxUInt;
- } else if (value < 0.0) {
- return 0;
- }
- return std::isnan(value) ? 0 : static_cast<uint32_t>(value);
-}
-
-
-uint64_t Simulator::FPToUInt64(double value, FPRounding rmode) {
- value = FPRoundInt(value, rmode);
- if (value >= kXMaxUInt) {
- return kXMaxUInt;
- } else if (value < 0.0) {
- return 0;
- }
- return std::isnan(value) ? 0 : static_cast<uint64_t>(value);
-}
-
-
void Simulator::VisitFPCompare(Instruction* instr) {
AssertSupportedFPCR();
- unsigned reg_size = (instr->Mask(FP64) == FP64) ? kDRegSizeInBits
- : kSRegSizeInBits;
- double fn_val = fpreg(reg_size, instr->Rn());
-
switch (instr->Mask(FPCompareMask)) {
case FCMP_s:
- case FCMP_d: FPCompare(fn_val, fpreg(reg_size, instr->Rm())); break;
+ FPCompare(sreg(instr->Rn()), sreg(instr->Rm()));
+ break;
+ case FCMP_d:
+ FPCompare(dreg(instr->Rn()), dreg(instr->Rm()));
+ break;
case FCMP_s_zero:
- case FCMP_d_zero: FPCompare(fn_val, 0.0); break;
+ FPCompare(sreg(instr->Rn()), 0.0f);
+ break;
+ case FCMP_d_zero:
+ FPCompare(dreg(instr->Rn()), 0.0);
+ break;
default: UNIMPLEMENTED();
}
}
@@ -2594,13 +2862,16 @@ void Simulator::VisitFPConditionalCompare(Instruction* instr) {
switch (instr->Mask(FPConditionalCompareMask)) {
case FCCMP_s:
+ if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
+ FPCompare(sreg(instr->Rn()), sreg(instr->Rm()));
+ } else {
+ nzcv().SetFlags(instr->Nzcv());
+ LogSystemRegister(NZCV);
+ }
+ break;
case FCCMP_d: {
if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
- // If the condition passes, set the status flags to the result of
- // comparing the operands.
- unsigned reg_size = (instr->Mask(FP64) == FP64) ? kDRegSizeInBits
- : kSRegSizeInBits;
- FPCompare(fpreg(reg_size, instr->Rn()), fpreg(reg_size, instr->Rm()));
+ FPCompare(dreg(instr->Rn()), dreg(instr->Rm()));
} else {
// If the condition fails, set the status flags to the nzcv immediate.
nzcv().SetFlags(instr->Nzcv());
@@ -2634,479 +2905,147 @@ void Simulator::VisitFPConditionalSelect(Instruction* instr) {
void Simulator::VisitFPDataProcessing1Source(Instruction* instr) {
AssertSupportedFPCR();
+ FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode());
+ VectorFormat vform = (instr->Mask(FP64) == FP64) ? kFormatD : kFormatS;
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ bool inexact_exception = false;
+
unsigned fd = instr->Rd();
unsigned fn = instr->Rn();
switch (instr->Mask(FPDataProcessing1SourceMask)) {
- case FMOV_s: set_sreg(fd, sreg(fn)); break;
- case FMOV_d: set_dreg(fd, dreg(fn)); break;
- case FABS_s: set_sreg(fd, std::fabs(sreg(fn))); break;
- case FABS_d: set_dreg(fd, std::fabs(dreg(fn))); break;
- case FNEG_s: set_sreg(fd, -sreg(fn)); break;
- case FNEG_d: set_dreg(fd, -dreg(fn)); break;
- case FSQRT_s: set_sreg(fd, FPSqrt(sreg(fn))); break;
- case FSQRT_d: set_dreg(fd, FPSqrt(dreg(fn))); break;
- case FRINTA_s: set_sreg(fd, FPRoundInt(sreg(fn), FPTieAway)); break;
- case FRINTA_d: set_dreg(fd, FPRoundInt(dreg(fn), FPTieAway)); break;
- case FRINTM_s:
- set_sreg(fd, FPRoundInt(sreg(fn), FPNegativeInfinity)); break;
- case FRINTM_d:
- set_dreg(fd, FPRoundInt(dreg(fn), FPNegativeInfinity)); break;
- case FRINTP_s:
- set_sreg(fd, FPRoundInt(sreg(fn), FPPositiveInfinity));
+ case FMOV_s:
+ set_sreg(fd, sreg(fn));
+ return;
+ case FMOV_d:
+ set_dreg(fd, dreg(fn));
+ return;
+ case FABS_s:
+ case FABS_d:
+ fabs_(vform, vreg(fd), vreg(fn));
+ // Explicitly log the register update whilst we have type information.
+ LogVRegister(fd, GetPrintRegisterFormatFP(vform));
+ return;
+ case FNEG_s:
+ case FNEG_d:
+ fneg(vform, vreg(fd), vreg(fn));
+ // Explicitly log the register update whilst we have type information.
+ LogVRegister(fd, GetPrintRegisterFormatFP(vform));
+ return;
+ case FCVT_ds:
+ set_dreg(fd, FPToDouble(sreg(fn)));
+ return;
+ case FCVT_sd:
+ set_sreg(fd, FPToFloat(dreg(fn), FPTieEven));
+ return;
+ case FCVT_hs:
+ set_hreg(fd, FPToFloat16(sreg(fn), FPTieEven));
+ return;
+ case FCVT_sh:
+ set_sreg(fd, FPToFloat(hreg(fn)));
+ return;
+ case FCVT_dh:
+ set_dreg(fd, FPToDouble(FPToFloat(hreg(fn))));
+ return;
+ case FCVT_hd:
+ set_hreg(fd, FPToFloat16(dreg(fn), FPTieEven));
+ return;
+ case FSQRT_s:
+ case FSQRT_d:
+ fsqrt(vform, rd, rn);
+ // Explicitly log the register update whilst we have type information.
+ LogVRegister(fd, GetPrintRegisterFormatFP(vform));
+ return;
+ case FRINTI_s:
+ case FRINTI_d:
+ break; // Use FPCR rounding mode.
+ case FRINTX_s:
+ case FRINTX_d:
+ inexact_exception = true;
break;
- case FRINTP_d:
- set_dreg(fd, FPRoundInt(dreg(fn), FPPositiveInfinity));
- break;
- case FRINTN_s: set_sreg(fd, FPRoundInt(sreg(fn), FPTieEven)); break;
- case FRINTN_d: set_dreg(fd, FPRoundInt(dreg(fn), FPTieEven)); break;
- case FRINTZ_s: set_sreg(fd, FPRoundInt(sreg(fn), FPZero)); break;
- case FRINTZ_d: set_dreg(fd, FPRoundInt(dreg(fn), FPZero)); break;
- case FCVT_ds: set_dreg(fd, FPToDouble(sreg(fn))); break;
- case FCVT_sd: set_sreg(fd, FPToFloat(dreg(fn), FPTieEven)); break;
- default: UNIMPLEMENTED();
- }
-}
-
-
-// Assemble the specified IEEE-754 components into the target type and apply
-// appropriate rounding.
-// sign: 0 = positive, 1 = negative
-// exponent: Unbiased IEEE-754 exponent.
-// mantissa: The mantissa of the input. The top bit (which is not encoded for
-// normal IEEE-754 values) must not be omitted. This bit has the
-// value 'pow(2, exponent)'.
-//
-// The input value is assumed to be a normalized value. That is, the input may
-// not be infinity or NaN. If the source value is subnormal, it must be
-// normalized before calling this function such that the highest set bit in the
-// mantissa has the value 'pow(2, exponent)'.
-//
-// Callers should use FPRoundToFloat or FPRoundToDouble directly, rather than
-// calling a templated FPRound.
-template <class T, int ebits, int mbits>
-static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
- FPRounding round_mode) {
- DCHECK((sign == 0) || (sign == 1));
-
- // Only the FPTieEven rounding mode is implemented.
- DCHECK(round_mode == FPTieEven);
- USE(round_mode);
-
- // Rounding can promote subnormals to normals, and normals to infinities. For
- // example, a double with exponent 127 (FLT_MAX_EXP) would appear to be
- // encodable as a float, but rounding based on the low-order mantissa bits
- // could make it overflow. With ties-to-even rounding, this value would become
- // an infinity.
-
- // ---- Rounding Method ----
- //
- // The exponent is irrelevant in the rounding operation, so we treat the
- // lowest-order bit that will fit into the result ('onebit') as having
- // the value '1'. Similarly, the highest-order bit that won't fit into
- // the result ('halfbit') has the value '0.5'. The 'point' sits between
- // 'onebit' and 'halfbit':
- //
- // These bits fit into the result.
- // |---------------------|
- // mantissa = 0bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
- // ||
- // / |
- // / halfbit
- // onebit
- //
- // For subnormal outputs, the range of representable bits is smaller and
- // the position of onebit and halfbit depends on the exponent of the
- // input, but the method is otherwise similar.
- //
- // onebit(frac)
- // |
- // | halfbit(frac) halfbit(adjusted)
- // | / /
- // | | |
- // 0b00.0 (exact) -> 0b00.0 (exact) -> 0b00
- // 0b00.0... -> 0b00.0... -> 0b00
- // 0b00.1 (exact) -> 0b00.0111..111 -> 0b00
- // 0b00.1... -> 0b00.1... -> 0b01
- // 0b01.0 (exact) -> 0b01.0 (exact) -> 0b01
- // 0b01.0... -> 0b01.0... -> 0b01
- // 0b01.1 (exact) -> 0b01.1 (exact) -> 0b10
- // 0b01.1... -> 0b01.1... -> 0b10
- // 0b10.0 (exact) -> 0b10.0 (exact) -> 0b10
- // 0b10.0... -> 0b10.0... -> 0b10
- // 0b10.1 (exact) -> 0b10.0111..111 -> 0b10
- // 0b10.1... -> 0b10.1... -> 0b11
- // 0b11.0 (exact) -> 0b11.0 (exact) -> 0b11
- // ... / | / |
- // / | / |
- // / |
- // adjusted = frac - (halfbit(mantissa) & ~onebit(frac)); / |
- //
- // mantissa = (mantissa >> shift) + halfbit(adjusted);
-
- static const int mantissa_offset = 0;
- static const int exponent_offset = mantissa_offset + mbits;
- static const int sign_offset = exponent_offset + ebits;
- STATIC_ASSERT(sign_offset == (sizeof(T) * kByteSize - 1));
-
- // Bail out early for zero inputs.
- if (mantissa == 0) {
- return static_cast<T>(sign << sign_offset);
- }
-
- // If all bits in the exponent are set, the value is infinite or NaN.
- // This is true for all binary IEEE-754 formats.
- static const int infinite_exponent = (1 << ebits) - 1;
- static const int max_normal_exponent = infinite_exponent - 1;
-
- // Apply the exponent bias to encode it for the result. Doing this early makes
- // it easy to detect values that will be infinite or subnormal.
- exponent += max_normal_exponent >> 1;
-
- if (exponent > max_normal_exponent) {
- // Overflow: The input is too large for the result type to represent. The
- // FPTieEven rounding mode handles overflows using infinities.
- exponent = infinite_exponent;
- mantissa = 0;
- return static_cast<T>((sign << sign_offset) |
- (exponent << exponent_offset) |
- (mantissa << mantissa_offset));
- }
-
- // Calculate the shift required to move the top mantissa bit to the proper
- // place in the destination type.
- const int highest_significant_bit = 63 - CountLeadingZeros(mantissa, 64);
- int shift = highest_significant_bit - mbits;
-
- if (exponent <= 0) {
- // The output will be subnormal (before rounding).
-
- // For subnormal outputs, the shift must be adjusted by the exponent. The +1
- // is necessary because the exponent of a subnormal value (encoded as 0) is
- // the same as the exponent of the smallest normal value (encoded as 1).
- shift += -exponent + 1;
-
- // Handle inputs that would produce a zero output.
- //
- // Shifts higher than highest_significant_bit+1 will always produce a zero
- // result. A shift of exactly highest_significant_bit+1 might produce a
- // non-zero result after rounding.
- if (shift > (highest_significant_bit + 1)) {
- // The result will always be +/-0.0.
- return static_cast<T>(sign << sign_offset);
- }
-
- // Properly encode the exponent for a subnormal output.
- exponent = 0;
- } else {
- // Clear the topmost mantissa bit, since this is not encoded in IEEE-754
- // normal values.
- mantissa &= ~(1UL << highest_significant_bit);
- }
-
- if (shift > 0) {
- // We have to shift the mantissa to the right. Some precision is lost, so we
- // need to apply rounding.
- uint64_t onebit_mantissa = (mantissa >> (shift)) & 1;
- uint64_t halfbit_mantissa = (mantissa >> (shift-1)) & 1;
- uint64_t adjusted = mantissa - (halfbit_mantissa & ~onebit_mantissa);
- T halfbit_adjusted = (adjusted >> (shift-1)) & 1;
-
- T result =
- static_cast<T>((sign << sign_offset) | (exponent << exponent_offset) |
- ((mantissa >> shift) << mantissa_offset));
-
- // A very large mantissa can overflow during rounding. If this happens, the
- // exponent should be incremented and the mantissa set to 1.0 (encoded as
- // 0). Applying halfbit_adjusted after assembling the float has the nice
- // side-effect that this case is handled for free.
- //
- // This also handles cases where a very large finite value overflows to
- // infinity, or where a very large subnormal value overflows to become
- // normal.
- return result + halfbit_adjusted;
- } else {
- // We have to shift the mantissa to the left (or not at all). The input
- // mantissa is exactly representable in the output mantissa, so apply no
- // rounding correction.
- return static_cast<T>((sign << sign_offset) |
- (exponent << exponent_offset) |
- ((mantissa << -shift) << mantissa_offset));
- }
-}
-
-
-// See FPRound for a description of this function.
-static inline double FPRoundToDouble(int64_t sign, int64_t exponent,
- uint64_t mantissa, FPRounding round_mode) {
- int64_t bits =
- FPRound<int64_t, kDoubleExponentBits, kDoubleMantissaBits>(sign,
- exponent,
- mantissa,
- round_mode);
- return rawbits_to_double(bits);
-}
-
-
-// See FPRound for a description of this function.
-static inline float FPRoundToFloat(int64_t sign, int64_t exponent,
- uint64_t mantissa, FPRounding round_mode) {
- int32_t bits =
- FPRound<int32_t, kFloatExponentBits, kFloatMantissaBits>(sign,
- exponent,
- mantissa,
- round_mode);
- return rawbits_to_float(bits);
-}
-
-
-double Simulator::FixedToDouble(int64_t src, int fbits, FPRounding round) {
- if (src >= 0) {
- return UFixedToDouble(src, fbits, round);
- } else {
- // This works for all negative values, including INT64_MIN.
- return -UFixedToDouble(-src, fbits, round);
- }
-}
-
-
-double Simulator::UFixedToDouble(uint64_t src, int fbits, FPRounding round) {
- // An input of 0 is a special case because the result is effectively
- // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit.
- if (src == 0) {
- return 0.0;
- }
-
- // Calculate the exponent. The highest significant bit will have the value
- // 2^exponent.
- const int highest_significant_bit = 63 - CountLeadingZeros(src, 64);
- const int64_t exponent = highest_significant_bit - fbits;
-
- return FPRoundToDouble(0, exponent, src, round);
-}
-
-
-float Simulator::FixedToFloat(int64_t src, int fbits, FPRounding round) {
- if (src >= 0) {
- return UFixedToFloat(src, fbits, round);
- } else {
- // This works for all negative values, including INT64_MIN.
- return -UFixedToFloat(-src, fbits, round);
- }
-}
-
-
-float Simulator::UFixedToFloat(uint64_t src, int fbits, FPRounding round) {
- // An input of 0 is a special case because the result is effectively
- // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit.
- if (src == 0) {
- return 0.0f;
- }
-
- // Calculate the exponent. The highest significant bit will have the value
- // 2^exponent.
- const int highest_significant_bit = 63 - CountLeadingZeros(src, 64);
- const int32_t exponent = highest_significant_bit - fbits;
-
- return FPRoundToFloat(0, exponent, src, round);
-}
-
-
-double Simulator::FPRoundInt(double value, FPRounding round_mode) {
- if ((value == 0.0) || (value == kFP64PositiveInfinity) ||
- (value == kFP64NegativeInfinity)) {
- return value;
- } else if (std::isnan(value)) {
- return FPProcessNaN(value);
- }
-
- double int_result = floor(value);
- double error = value - int_result;
- switch (round_mode) {
- case FPTieAway: {
- // Take care of correctly handling the range ]-0.5, -0.0], which must
- // yield -0.0.
- if ((-0.5 < value) && (value < 0.0)) {
- int_result = -0.0;
-
- } else if ((error > 0.5) || ((error == 0.5) && (int_result >= 0.0))) {
- // If the error is greater than 0.5, or is equal to 0.5 and the integer
- // result is positive, round up.
- int_result++;
- }
+ case FRINTA_s:
+ case FRINTA_d:
+ fpcr_rounding = FPTieAway;
break;
- }
- case FPTieEven: {
- // Take care of correctly handling the range [-0.5, -0.0], which must
- // yield -0.0.
- if ((-0.5 <= value) && (value < 0.0)) {
- int_result = -0.0;
-
- // If the error is greater than 0.5, or is equal to 0.5 and the integer
- // result is odd, round up.
- } else if ((error > 0.5) ||
- ((error == 0.5) && (modulo(int_result, 2) != 0))) {
- int_result++;
- }
+ case FRINTM_s:
+ case FRINTM_d:
+ fpcr_rounding = FPNegativeInfinity;
break;
- }
- case FPZero: {
- // If value > 0 then we take floor(value)
- // otherwise, ceil(value)
- if (value < 0) {
- int_result = ceil(value);
- }
+ case FRINTN_s:
+ case FRINTN_d:
+ fpcr_rounding = FPTieEven;
break;
- }
- case FPNegativeInfinity: {
- // We always use floor(value).
+ case FRINTP_s:
+ case FRINTP_d:
+ fpcr_rounding = FPPositiveInfinity;
break;
- }
- case FPPositiveInfinity: {
- int_result = ceil(value);
+ case FRINTZ_s:
+ case FRINTZ_d:
+ fpcr_rounding = FPZero;
break;
- }
- default: UNIMPLEMENTED();
- }
- return int_result;
-}
-
-
-double Simulator::FPToDouble(float value) {
- switch (std::fpclassify(value)) {
- case FP_NAN: {
- if (fpcr().DN()) return kFP64DefaultNaN;
-
- // Convert NaNs as the processor would:
- // - The sign is propagated.
- // - The payload (mantissa) is transferred entirely, except that the top
- // bit is forced to '1', making the result a quiet NaN. The unused
- // (low-order) payload bits are set to 0.
- uint32_t raw = float_to_rawbits(value);
-
- uint64_t sign = raw >> 31;
- uint64_t exponent = (1 << 11) - 1;
- uint64_t payload = unsigned_bitextract_64(21, 0, raw);
- payload <<= (52 - 23); // The unused low-order bits should be 0.
- payload |= (1L << 51); // Force a quiet NaN.
-
- return rawbits_to_double((sign << 63) | (exponent << 52) | payload);
- }
-
- case FP_ZERO:
- case FP_NORMAL:
- case FP_SUBNORMAL:
- case FP_INFINITE: {
- // All other inputs are preserved in a standard cast, because every value
- // representable using an IEEE-754 float is also representable using an
- // IEEE-754 double.
- return static_cast<double>(value);
- }
- }
-
- UNREACHABLE();
- return static_cast<double>(value);
-}
-
-
-float Simulator::FPToFloat(double value, FPRounding round_mode) {
- // Only the FPTieEven rounding mode is implemented.
- DCHECK(round_mode == FPTieEven);
- USE(round_mode);
-
- switch (std::fpclassify(value)) {
- case FP_NAN: {
- if (fpcr().DN()) return kFP32DefaultNaN;
-
- // Convert NaNs as the processor would:
- // - The sign is propagated.
- // - The payload (mantissa) is transferred as much as possible, except
- // that the top bit is forced to '1', making the result a quiet NaN.
- uint64_t raw = double_to_rawbits(value);
-
- uint32_t sign = raw >> 63;
- uint32_t exponent = (1 << 8) - 1;
- uint32_t payload =
- static_cast<uint32_t>(unsigned_bitextract_64(50, 52 - 23, raw));
- payload |= (1 << 22); // Force a quiet NaN.
-
- return rawbits_to_float((sign << 31) | (exponent << 23) | payload);
- }
-
- case FP_ZERO:
- case FP_INFINITE: {
- // In a C++ cast, any value representable in the target type will be
- // unchanged. This is always the case for +/-0.0 and infinities.
- return static_cast<float>(value);
- }
-
- case FP_NORMAL:
- case FP_SUBNORMAL: {
- // Convert double-to-float as the processor would, assuming that FPCR.FZ
- // (flush-to-zero) is not set.
- uint64_t raw = double_to_rawbits(value);
- // Extract the IEEE-754 double components.
- uint32_t sign = raw >> 63;
- // Extract the exponent and remove the IEEE-754 encoding bias.
- int32_t exponent =
- static_cast<int32_t>(unsigned_bitextract_64(62, 52, raw)) - 1023;
- // Extract the mantissa and add the implicit '1' bit.
- uint64_t mantissa = unsigned_bitextract_64(51, 0, raw);
- if (std::fpclassify(value) == FP_NORMAL) {
- mantissa |= (1UL << 52);
- }
- return FPRoundToFloat(sign, exponent, mantissa, round_mode);
- }
+ default:
+ UNIMPLEMENTED();
}
- UNREACHABLE();
- return value;
+ // Only FRINT* instructions fall through the switch above.
+ frint(vform, rd, rn, fpcr_rounding, inexact_exception);
+ // Explicitly log the register update whilst we have type information
+ LogVRegister(fd, GetPrintRegisterFormatFP(vform));
}
-
void Simulator::VisitFPDataProcessing2Source(Instruction* instr) {
AssertSupportedFPCR();
- unsigned fd = instr->Rd();
- unsigned fn = instr->Rn();
- unsigned fm = instr->Rm();
+ VectorFormat vform = (instr->Mask(FP64) == FP64) ? kFormatD : kFormatS;
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ SimVRegister& rm = vreg(instr->Rm());
- // Fmaxnm and Fminnm have special NaN handling.
switch (instr->Mask(FPDataProcessing2SourceMask)) {
- case FMAXNM_s: set_sreg(fd, FPMaxNM(sreg(fn), sreg(fm))); return;
- case FMAXNM_d: set_dreg(fd, FPMaxNM(dreg(fn), dreg(fm))); return;
- case FMINNM_s: set_sreg(fd, FPMinNM(sreg(fn), sreg(fm))); return;
- case FMINNM_d: set_dreg(fd, FPMinNM(dreg(fn), dreg(fm))); return;
- default:
- break; // Fall through.
- }
-
- if (FPProcessNaNs(instr)) return;
-
- switch (instr->Mask(FPDataProcessing2SourceMask)) {
- case FADD_s: set_sreg(fd, FPAdd(sreg(fn), sreg(fm))); break;
- case FADD_d: set_dreg(fd, FPAdd(dreg(fn), dreg(fm))); break;
- case FSUB_s: set_sreg(fd, FPSub(sreg(fn), sreg(fm))); break;
- case FSUB_d: set_dreg(fd, FPSub(dreg(fn), dreg(fm))); break;
- case FMUL_s: set_sreg(fd, FPMul(sreg(fn), sreg(fm))); break;
- case FMUL_d: set_dreg(fd, FPMul(dreg(fn), dreg(fm))); break;
- case FDIV_s: set_sreg(fd, FPDiv(sreg(fn), sreg(fm))); break;
- case FDIV_d: set_dreg(fd, FPDiv(dreg(fn), dreg(fm))); break;
- case FMAX_s: set_sreg(fd, FPMax(sreg(fn), sreg(fm))); break;
- case FMAX_d: set_dreg(fd, FPMax(dreg(fn), dreg(fm))); break;
- case FMIN_s: set_sreg(fd, FPMin(sreg(fn), sreg(fm))); break;
- case FMIN_d: set_dreg(fd, FPMin(dreg(fn), dreg(fm))); break;
+ case FADD_s:
+ case FADD_d:
+ fadd(vform, rd, rn, rm);
+ break;
+ case FSUB_s:
+ case FSUB_d:
+ fsub(vform, rd, rn, rm);
+ break;
+ case FMUL_s:
+ case FMUL_d:
+ fmul(vform, rd, rn, rm);
+ break;
+ case FNMUL_s:
+ case FNMUL_d:
+ fnmul(vform, rd, rn, rm);
+ break;
+ case FDIV_s:
+ case FDIV_d:
+ fdiv(vform, rd, rn, rm);
+ break;
+ case FMAX_s:
+ case FMAX_d:
+ fmax(vform, rd, rn, rm);
+ break;
+ case FMIN_s:
+ case FMIN_d:
+ fmin(vform, rd, rn, rm);
+ break;
case FMAXNM_s:
case FMAXNM_d:
+ fmaxnm(vform, rd, rn, rm);
+ break;
case FMINNM_s:
case FMINNM_d:
- // These were handled before the standard FPProcessNaNs() stage.
+ fminnm(vform, rd, rn, rm);
+ break;
+ default:
UNREACHABLE();
- default: UNIMPLEMENTED();
}
+ // Explicitly log the register update whilst we have type information.
+ LogVRegister(instr->Rd(), GetPrintRegisterFormatFP(vform));
}
-
void Simulator::VisitFPDataProcessing3Source(Instruction* instr) {
AssertSupportedFPCR();
@@ -3117,10 +3056,18 @@ void Simulator::VisitFPDataProcessing3Source(Instruction* instr) {
switch (instr->Mask(FPDataProcessing3SourceMask)) {
// fd = fa +/- (fn * fm)
- case FMADD_s: set_sreg(fd, FPMulAdd(sreg(fa), sreg(fn), sreg(fm))); break;
- case FMSUB_s: set_sreg(fd, FPMulAdd(sreg(fa), -sreg(fn), sreg(fm))); break;
- case FMADD_d: set_dreg(fd, FPMulAdd(dreg(fa), dreg(fn), dreg(fm))); break;
- case FMSUB_d: set_dreg(fd, FPMulAdd(dreg(fa), -dreg(fn), dreg(fm))); break;
+ case FMADD_s:
+ set_sreg(fd, FPMulAdd(sreg(fa), sreg(fn), sreg(fm)));
+ break;
+ case FMSUB_s:
+ set_sreg(fd, FPMulAdd(sreg(fa), -sreg(fn), sreg(fm)));
+ break;
+ case FMADD_d:
+ set_dreg(fd, FPMulAdd(dreg(fa), dreg(fn), dreg(fm)));
+ break;
+ case FMSUB_d:
+ set_dreg(fd, FPMulAdd(dreg(fa), -dreg(fn), dreg(fm)));
+ break;
// Negated variants of the above.
case FNMADD_s:
set_sreg(fd, FPMulAdd(-sreg(fa), -sreg(fn), sreg(fm)));
@@ -3134,232 +3081,11 @@ void Simulator::VisitFPDataProcessing3Source(Instruction* instr) {
case FNMSUB_d:
set_dreg(fd, FPMulAdd(-dreg(fa), dreg(fn), dreg(fm)));
break;
- default: UNIMPLEMENTED();
- }
-}
-
-
-template <typename T>
-T Simulator::FPAdd(T op1, T op2) {
- // NaNs should be handled elsewhere.
- DCHECK(!std::isnan(op1) && !std::isnan(op2));
-
- if (std::isinf(op1) && std::isinf(op2) && (op1 != op2)) {
- // inf + -inf returns the default NaN.
- return FPDefaultNaN<T>();
- } else {
- // Other cases should be handled by standard arithmetic.
- return op1 + op2;
- }
-}
-
-
-template <typename T>
-T Simulator::FPDiv(T op1, T op2) {
- // NaNs should be handled elsewhere.
- DCHECK(!std::isnan(op1) && !std::isnan(op2));
-
- if ((std::isinf(op1) && std::isinf(op2)) || ((op1 == 0.0) && (op2 == 0.0))) {
- // inf / inf and 0.0 / 0.0 return the default NaN.
- return FPDefaultNaN<T>();
- } else {
- // Other cases should be handled by standard arithmetic.
- return op1 / op2;
- }
-}
-
-
-template <typename T>
-T Simulator::FPMax(T a, T b) {
- // NaNs should be handled elsewhere.
- DCHECK(!std::isnan(a) && !std::isnan(b));
-
- if ((a == 0.0) && (b == 0.0) &&
- (copysign(1.0, a) != copysign(1.0, b))) {
- // a and b are zero, and the sign differs: return +0.0.
- return 0.0;
- } else {
- return (a > b) ? a : b;
- }
-}
-
-
-template <typename T>
-T Simulator::FPMaxNM(T a, T b) {
- if (IsQuietNaN(a) && !IsQuietNaN(b)) {
- a = kFP64NegativeInfinity;
- } else if (!IsQuietNaN(a) && IsQuietNaN(b)) {
- b = kFP64NegativeInfinity;
- }
-
- T result = FPProcessNaNs(a, b);
- return std::isnan(result) ? result : FPMax(a, b);
-}
-
-template <typename T>
-T Simulator::FPMin(T a, T b) {
- // NaNs should be handled elsewhere.
- DCHECK(!std::isnan(a) && !std::isnan(b));
-
- if ((a == 0.0) && (b == 0.0) &&
- (copysign(1.0, a) != copysign(1.0, b))) {
- // a and b are zero, and the sign differs: return -0.0.
- return -0.0;
- } else {
- return (a < b) ? a : b;
- }
-}
-
-
-template <typename T>
-T Simulator::FPMinNM(T a, T b) {
- if (IsQuietNaN(a) && !IsQuietNaN(b)) {
- a = kFP64PositiveInfinity;
- } else if (!IsQuietNaN(a) && IsQuietNaN(b)) {
- b = kFP64PositiveInfinity;
- }
-
- T result = FPProcessNaNs(a, b);
- return std::isnan(result) ? result : FPMin(a, b);
-}
-
-
-template <typename T>
-T Simulator::FPMul(T op1, T op2) {
- // NaNs should be handled elsewhere.
- DCHECK(!std::isnan(op1) && !std::isnan(op2));
-
- if ((std::isinf(op1) && (op2 == 0.0)) || (std::isinf(op2) && (op1 == 0.0))) {
- // inf * 0.0 returns the default NaN.
- return FPDefaultNaN<T>();
- } else {
- // Other cases should be handled by standard arithmetic.
- return op1 * op2;
- }
-}
-
-
-template<typename T>
-T Simulator::FPMulAdd(T a, T op1, T op2) {
- T result = FPProcessNaNs3(a, op1, op2);
-
- T sign_a = copysign(1.0, a);
- T sign_prod = copysign(1.0, op1) * copysign(1.0, op2);
- bool isinf_prod = std::isinf(op1) || std::isinf(op2);
- bool operation_generates_nan =
- (std::isinf(op1) && (op2 == 0.0)) || // inf * 0.0
- (std::isinf(op2) && (op1 == 0.0)) || // 0.0 * inf
- (std::isinf(a) && isinf_prod && (sign_a != sign_prod)); // inf - inf
-
- if (std::isnan(result)) {
- // Generated NaNs override quiet NaNs propagated from a.
- if (operation_generates_nan && IsQuietNaN(a)) {
- return FPDefaultNaN<T>();
- } else {
- return result;
- }
- }
-
- // If the operation would produce a NaN, return the default NaN.
- if (operation_generates_nan) {
- return FPDefaultNaN<T>();
- }
-
- // Work around broken fma implementations for exact zero results: The sign of
- // exact 0.0 results is positive unless both a and op1 * op2 are negative.
- if (((op1 == 0.0) || (op2 == 0.0)) && (a == 0.0)) {
- return ((sign_a < 0) && (sign_prod < 0)) ? -0.0 : 0.0;
- }
-
- result = FusedMultiplyAdd(op1, op2, a);
- DCHECK(!std::isnan(result));
-
- // Work around broken fma implementations for rounded zero results: If a is
- // 0.0, the sign of the result is the sign of op1 * op2 before rounding.
- if ((a == 0.0) && (result == 0.0)) {
- return copysign(0.0, sign_prod);
- }
-
- return result;
-}
-
-
-template <typename T>
-T Simulator::FPSqrt(T op) {
- if (std::isnan(op)) {
- return FPProcessNaN(op);
- } else if (op < 0.0) {
- return FPDefaultNaN<T>();
- } else {
- lazily_initialize_fast_sqrt(isolate_);
- return fast_sqrt(op, isolate_);
- }
-}
-
-
-template <typename T>
-T Simulator::FPSub(T op1, T op2) {
- // NaNs should be handled elsewhere.
- DCHECK(!std::isnan(op1) && !std::isnan(op2));
-
- if (std::isinf(op1) && std::isinf(op2) && (op1 == op2)) {
- // inf - inf returns the default NaN.
- return FPDefaultNaN<T>();
- } else {
- // Other cases should be handled by standard arithmetic.
- return op1 - op2;
- }
-}
-
-
-template <typename T>
-T Simulator::FPProcessNaN(T op) {
- DCHECK(std::isnan(op));
- return fpcr().DN() ? FPDefaultNaN<T>() : ToQuietNaN(op);
-}
-
-
-template <typename T>
-T Simulator::FPProcessNaNs(T op1, T op2) {
- if (IsSignallingNaN(op1)) {
- return FPProcessNaN(op1);
- } else if (IsSignallingNaN(op2)) {
- return FPProcessNaN(op2);
- } else if (std::isnan(op1)) {
- DCHECK(IsQuietNaN(op1));
- return FPProcessNaN(op1);
- } else if (std::isnan(op2)) {
- DCHECK(IsQuietNaN(op2));
- return FPProcessNaN(op2);
- } else {
- return 0.0;
- }
-}
-
-
-template <typename T>
-T Simulator::FPProcessNaNs3(T op1, T op2, T op3) {
- if (IsSignallingNaN(op1)) {
- return FPProcessNaN(op1);
- } else if (IsSignallingNaN(op2)) {
- return FPProcessNaN(op2);
- } else if (IsSignallingNaN(op3)) {
- return FPProcessNaN(op3);
- } else if (std::isnan(op1)) {
- DCHECK(IsQuietNaN(op1));
- return FPProcessNaN(op1);
- } else if (std::isnan(op2)) {
- DCHECK(IsQuietNaN(op2));
- return FPProcessNaN(op2);
- } else if (std::isnan(op3)) {
- DCHECK(IsQuietNaN(op3));
- return FPProcessNaN(op3);
- } else {
- return 0.0;
+ default:
+ UNIMPLEMENTED();
}
}
-
bool Simulator::FPProcessNaNs(Instruction* instr) {
unsigned fd = instr->Rd();
unsigned fn = instr->Rn();
@@ -3469,31 +3195,24 @@ bool Simulator::PrintValue(const char* desc) {
}
int i = CodeFromName(desc);
- STATIC_ASSERT(kNumberOfRegisters == kNumberOfFPRegisters);
- if (i < 0 || static_cast<unsigned>(i) >= kNumberOfFPRegisters) return false;
+ static_assert(kNumberOfRegisters == kNumberOfVRegisters,
+ "Must be same number of Registers as VRegisters.");
+ if (i < 0 || static_cast<unsigned>(i) >= kNumberOfVRegisters) return false;
if (desc[0] == 'v') {
PrintF(stream_, "%s %s:%s 0x%016" PRIx64 "%s (%s%s:%s %g%s %s:%s %g%s)\n",
- clr_fpreg_name, VRegNameForCode(i),
- clr_fpreg_value, double_to_rawbits(dreg(i)),
- clr_normal,
- clr_fpreg_name, DRegNameForCode(i),
- clr_fpreg_value, dreg(i),
- clr_fpreg_name, SRegNameForCode(i),
- clr_fpreg_value, sreg(i),
- clr_normal);
+ clr_vreg_name, VRegNameForCode(i), clr_vreg_value,
+ bit_cast<uint64_t>(dreg(i)), clr_normal, clr_vreg_name,
+ DRegNameForCode(i), clr_vreg_value, dreg(i), clr_vreg_name,
+ SRegNameForCode(i), clr_vreg_value, sreg(i), clr_normal);
return true;
} else if (desc[0] == 'd') {
- PrintF(stream_, "%s %s:%s %g%s\n",
- clr_fpreg_name, DRegNameForCode(i),
- clr_fpreg_value, dreg(i),
- clr_normal);
+ PrintF(stream_, "%s %s:%s %g%s\n", clr_vreg_name, DRegNameForCode(i),
+ clr_vreg_value, dreg(i), clr_normal);
return true;
} else if (desc[0] == 's') {
- PrintF(stream_, "%s %s:%s %g%s\n",
- clr_fpreg_name, SRegNameForCode(i),
- clr_fpreg_value, sreg(i),
- clr_normal);
+ PrintF(stream_, "%s %s:%s %g%s\n", clr_vreg_name, SRegNameForCode(i),
+ clr_vreg_value, sreg(i), clr_normal);
return true;
} else if (desc[0] == 'w') {
PrintF(stream_, "%s %s:%s 0x%08" PRIx32 "%s\n",
@@ -3619,7 +3338,7 @@ void Simulator::Debug() {
if (argc == 2) {
if (strcmp(arg1, "all") == 0) {
PrintRegisters();
- PrintFPRegisters();
+ PrintVRegisters();
} else {
if (!PrintValue(arg1)) {
PrintF("%s unrecognized\n", arg1);
@@ -3845,7 +3564,9 @@ void Simulator::VisitException(Instruction* instr) {
set_log_parameters(log_parameters() | parameters);
if (parameters & LOG_SYS_REGS) { PrintSystemRegisters(); }
if (parameters & LOG_REGS) { PrintRegisters(); }
- if (parameters & LOG_FP_REGS) { PrintFPRegisters(); }
+ if (parameters & LOG_VREGS) {
+ PrintVRegisters();
+ }
break;
case TRACE_DISABLE:
set_log_parameters(log_parameters() & ~parameters);
@@ -3861,7 +3582,7 @@ void Simulator::VisitException(Instruction* instr) {
// Print the requested information.
if (parameters & LOG_SYS_REGS) PrintSystemRegisters();
if (parameters & LOG_REGS) PrintRegisters();
- if (parameters & LOG_FP_REGS) PrintFPRegisters();
+ if (parameters & LOG_VREGS) PrintVRegisters();
}
// The stop parameters are inlined in the code. Skip them:
@@ -3892,12 +3613,2131 @@ void Simulator::VisitException(Instruction* instr) {
}
break;
}
+ case BRK:
+ base::OS::DebugBreak();
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+void Simulator::VisitNEON2RegMisc(Instruction* instr) {
+ NEONFormatDecoder nfd(instr);
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ // Format mapping for "long pair" instructions, [su]addlp, [su]adalp.
+ static const NEONFormatMap map_lp = {
+ {23, 22, 30}, {NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}};
+ VectorFormat vf_lp = nfd.GetVectorFormat(&map_lp);
+
+ static const NEONFormatMap map_fcvtl = {{22}, {NF_4S, NF_2D}};
+ VectorFormat vf_fcvtl = nfd.GetVectorFormat(&map_fcvtl);
+
+ static const NEONFormatMap map_fcvtn = {{22, 30},
+ {NF_4H, NF_8H, NF_2S, NF_4S}};
+ VectorFormat vf_fcvtn = nfd.GetVectorFormat(&map_fcvtn);
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+
+ if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_opcode) {
+ // These instructions all use a two bit size field, except NOT and RBIT,
+ // which use the field to encode the operation.
+ switch (instr->Mask(NEON2RegMiscMask)) {
+ case NEON_REV64:
+ rev64(vf, rd, rn);
+ break;
+ case NEON_REV32:
+ rev32(vf, rd, rn);
+ break;
+ case NEON_REV16:
+ rev16(vf, rd, rn);
+ break;
+ case NEON_SUQADD:
+ suqadd(vf, rd, rn);
+ break;
+ case NEON_USQADD:
+ usqadd(vf, rd, rn);
+ break;
+ case NEON_CLS:
+ cls(vf, rd, rn);
+ break;
+ case NEON_CLZ:
+ clz(vf, rd, rn);
+ break;
+ case NEON_CNT:
+ cnt(vf, rd, rn);
+ break;
+ case NEON_SQABS:
+ abs(vf, rd, rn).SignedSaturate(vf);
+ break;
+ case NEON_SQNEG:
+ neg(vf, rd, rn).SignedSaturate(vf);
+ break;
+ case NEON_CMGT_zero:
+ cmp(vf, rd, rn, 0, gt);
+ break;
+ case NEON_CMGE_zero:
+ cmp(vf, rd, rn, 0, ge);
+ break;
+ case NEON_CMEQ_zero:
+ cmp(vf, rd, rn, 0, eq);
+ break;
+ case NEON_CMLE_zero:
+ cmp(vf, rd, rn, 0, le);
+ break;
+ case NEON_CMLT_zero:
+ cmp(vf, rd, rn, 0, lt);
+ break;
+ case NEON_ABS:
+ abs(vf, rd, rn);
+ break;
+ case NEON_NEG:
+ neg(vf, rd, rn);
+ break;
+ case NEON_SADDLP:
+ saddlp(vf_lp, rd, rn);
+ break;
+ case NEON_UADDLP:
+ uaddlp(vf_lp, rd, rn);
+ break;
+ case NEON_SADALP:
+ sadalp(vf_lp, rd, rn);
+ break;
+ case NEON_UADALP:
+ uadalp(vf_lp, rd, rn);
+ break;
+ case NEON_RBIT_NOT:
+ vf = nfd.GetVectorFormat(nfd.LogicalFormatMap());
+ switch (instr->FPType()) {
+ case 0:
+ not_(vf, rd, rn);
+ break;
+ case 1:
+ rbit(vf, rd, rn);
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+ break;
+ }
+ } else {
+ VectorFormat fpf = nfd.GetVectorFormat(nfd.FPFormatMap());
+ FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode());
+ bool inexact_exception = false;
+
+ // These instructions all use a one bit size field, except XTN, SQXTUN,
+ // SHLL, SQXTN and UQXTN, which use a two bit size field.
+ switch (instr->Mask(NEON2RegMiscFPMask)) {
+ case NEON_FABS:
+ fabs_(fpf, rd, rn);
+ return;
+ case NEON_FNEG:
+ fneg(fpf, rd, rn);
+ return;
+ case NEON_FSQRT:
+ fsqrt(fpf, rd, rn);
+ return;
+ case NEON_FCVTL:
+ if (instr->Mask(NEON_Q)) {
+ fcvtl2(vf_fcvtl, rd, rn);
+ } else {
+ fcvtl(vf_fcvtl, rd, rn);
+ }
+ return;
+ case NEON_FCVTN:
+ if (instr->Mask(NEON_Q)) {
+ fcvtn2(vf_fcvtn, rd, rn);
+ } else {
+ fcvtn(vf_fcvtn, rd, rn);
+ }
+ return;
+ case NEON_FCVTXN:
+ if (instr->Mask(NEON_Q)) {
+ fcvtxn2(vf_fcvtn, rd, rn);
+ } else {
+ fcvtxn(vf_fcvtn, rd, rn);
+ }
+ return;
+
+ // The following instructions break from the switch statement, rather
+ // than return.
+ case NEON_FRINTI:
+ break; // Use FPCR rounding mode.
+ case NEON_FRINTX:
+ inexact_exception = true;
+ break;
+ case NEON_FRINTA:
+ fpcr_rounding = FPTieAway;
+ break;
+ case NEON_FRINTM:
+ fpcr_rounding = FPNegativeInfinity;
+ break;
+ case NEON_FRINTN:
+ fpcr_rounding = FPTieEven;
+ break;
+ case NEON_FRINTP:
+ fpcr_rounding = FPPositiveInfinity;
+ break;
+ case NEON_FRINTZ:
+ fpcr_rounding = FPZero;
+ break;
+
+ // The remaining cases return to the caller.
+ case NEON_FCVTNS:
+ fcvts(fpf, rd, rn, FPTieEven);
+ return;
+ case NEON_FCVTNU:
+ fcvtu(fpf, rd, rn, FPTieEven);
+ return;
+ case NEON_FCVTPS:
+ fcvts(fpf, rd, rn, FPPositiveInfinity);
+ return;
+ case NEON_FCVTPU:
+ fcvtu(fpf, rd, rn, FPPositiveInfinity);
+ return;
+ case NEON_FCVTMS:
+ fcvts(fpf, rd, rn, FPNegativeInfinity);
+ return;
+ case NEON_FCVTMU:
+ fcvtu(fpf, rd, rn, FPNegativeInfinity);
+ return;
+ case NEON_FCVTZS:
+ fcvts(fpf, rd, rn, FPZero);
+ return;
+ case NEON_FCVTZU:
+ fcvtu(fpf, rd, rn, FPZero);
+ return;
+ case NEON_FCVTAS:
+ fcvts(fpf, rd, rn, FPTieAway);
+ return;
+ case NEON_FCVTAU:
+ fcvtu(fpf, rd, rn, FPTieAway);
+ return;
+ case NEON_SCVTF:
+ scvtf(fpf, rd, rn, 0, fpcr_rounding);
+ return;
+ case NEON_UCVTF:
+ ucvtf(fpf, rd, rn, 0, fpcr_rounding);
+ return;
+ case NEON_URSQRTE:
+ ursqrte(fpf, rd, rn);
+ return;
+ case NEON_URECPE:
+ urecpe(fpf, rd, rn);
+ return;
+ case NEON_FRSQRTE:
+ frsqrte(fpf, rd, rn);
+ return;
+ case NEON_FRECPE:
+ frecpe(fpf, rd, rn, fpcr_rounding);
+ return;
+ case NEON_FCMGT_zero:
+ fcmp_zero(fpf, rd, rn, gt);
+ return;
+ case NEON_FCMGE_zero:
+ fcmp_zero(fpf, rd, rn, ge);
+ return;
+ case NEON_FCMEQ_zero:
+ fcmp_zero(fpf, rd, rn, eq);
+ return;
+ case NEON_FCMLE_zero:
+ fcmp_zero(fpf, rd, rn, le);
+ return;
+ case NEON_FCMLT_zero:
+ fcmp_zero(fpf, rd, rn, lt);
+ return;
+ default:
+ if ((NEON_XTN_opcode <= instr->Mask(NEON2RegMiscOpcode)) &&
+ (instr->Mask(NEON2RegMiscOpcode) <= NEON_UQXTN_opcode)) {
+ switch (instr->Mask(NEON2RegMiscMask)) {
+ case NEON_XTN:
+ xtn(vf, rd, rn);
+ return;
+ case NEON_SQXTN:
+ sqxtn(vf, rd, rn);
+ return;
+ case NEON_UQXTN:
+ uqxtn(vf, rd, rn);
+ return;
+ case NEON_SQXTUN:
+ sqxtun(vf, rd, rn);
+ return;
+ case NEON_SHLL:
+ vf = nfd.GetVectorFormat(nfd.LongIntegerFormatMap());
+ if (instr->Mask(NEON_Q)) {
+ shll2(vf, rd, rn);
+ } else {
+ shll(vf, rd, rn);
+ }
+ return;
+ default:
+ UNIMPLEMENTED();
+ }
+ } else {
+ UNIMPLEMENTED();
+ }
+ }
+
+ // Only FRINT* instructions fall through the switch above.
+ frint(fpf, rd, rn, fpcr_rounding, inexact_exception);
+ }
+}
+void Simulator::VisitNEON3Same(Instruction* instr) {
+ NEONFormatDecoder nfd(instr);
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ SimVRegister& rm = vreg(instr->Rm());
+
+ if (instr->Mask(NEON3SameLogicalFMask) == NEON3SameLogicalFixed) {
+ VectorFormat vf = nfd.GetVectorFormat(nfd.LogicalFormatMap());
+ switch (instr->Mask(NEON3SameLogicalMask)) {
+ case NEON_AND:
+ and_(vf, rd, rn, rm);
+ break;
+ case NEON_ORR:
+ orr(vf, rd, rn, rm);
+ break;
+ case NEON_ORN:
+ orn(vf, rd, rn, rm);
+ break;
+ case NEON_EOR:
+ eor(vf, rd, rn, rm);
+ break;
+ case NEON_BIC:
+ bic(vf, rd, rn, rm);
+ break;
+ case NEON_BIF:
+ bif(vf, rd, rn, rm);
+ break;
+ case NEON_BIT:
+ bit(vf, rd, rn, rm);
+ break;
+ case NEON_BSL:
+ bsl(vf, rd, rn, rm);
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+ } else if (instr->Mask(NEON3SameFPFMask) == NEON3SameFPFixed) {
+ VectorFormat vf = nfd.GetVectorFormat(nfd.FPFormatMap());
+ switch (instr->Mask(NEON3SameFPMask)) {
+ case NEON_FADD:
+ fadd(vf, rd, rn, rm);
+ break;
+ case NEON_FSUB:
+ fsub(vf, rd, rn, rm);
+ break;
+ case NEON_FMUL:
+ fmul(vf, rd, rn, rm);
+ break;
+ case NEON_FDIV:
+ fdiv(vf, rd, rn, rm);
+ break;
+ case NEON_FMAX:
+ fmax(vf, rd, rn, rm);
+ break;
+ case NEON_FMIN:
+ fmin(vf, rd, rn, rm);
+ break;
+ case NEON_FMAXNM:
+ fmaxnm(vf, rd, rn, rm);
+ break;
+ case NEON_FMINNM:
+ fminnm(vf, rd, rn, rm);
+ break;
+ case NEON_FMLA:
+ fmla(vf, rd, rn, rm);
+ break;
+ case NEON_FMLS:
+ fmls(vf, rd, rn, rm);
+ break;
+ case NEON_FMULX:
+ fmulx(vf, rd, rn, rm);
+ break;
+ case NEON_FACGE:
+ fabscmp(vf, rd, rn, rm, ge);
+ break;
+ case NEON_FACGT:
+ fabscmp(vf, rd, rn, rm, gt);
+ break;
+ case NEON_FCMEQ:
+ fcmp(vf, rd, rn, rm, eq);
+ break;
+ case NEON_FCMGE:
+ fcmp(vf, rd, rn, rm, ge);
+ break;
+ case NEON_FCMGT:
+ fcmp(vf, rd, rn, rm, gt);
+ break;
+ case NEON_FRECPS:
+ frecps(vf, rd, rn, rm);
+ break;
+ case NEON_FRSQRTS:
+ frsqrts(vf, rd, rn, rm);
+ break;
+ case NEON_FABD:
+ fabd(vf, rd, rn, rm);
+ break;
+ case NEON_FADDP:
+ faddp(vf, rd, rn, rm);
+ break;
+ case NEON_FMAXP:
+ fmaxp(vf, rd, rn, rm);
+ break;
+ case NEON_FMAXNMP:
+ fmaxnmp(vf, rd, rn, rm);
+ break;
+ case NEON_FMINP:
+ fminp(vf, rd, rn, rm);
+ break;
+ case NEON_FMINNMP:
+ fminnmp(vf, rd, rn, rm);
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+ } else {
+ VectorFormat vf = nfd.GetVectorFormat();
+ switch (instr->Mask(NEON3SameMask)) {
+ case NEON_ADD:
+ add(vf, rd, rn, rm);
+ break;
+ case NEON_ADDP:
+ addp(vf, rd, rn, rm);
+ break;
+ case NEON_CMEQ:
+ cmp(vf, rd, rn, rm, eq);
+ break;
+ case NEON_CMGE:
+ cmp(vf, rd, rn, rm, ge);
+ break;
+ case NEON_CMGT:
+ cmp(vf, rd, rn, rm, gt);
+ break;
+ case NEON_CMHI:
+ cmp(vf, rd, rn, rm, hi);
+ break;
+ case NEON_CMHS:
+ cmp(vf, rd, rn, rm, hs);
+ break;
+ case NEON_CMTST:
+ cmptst(vf, rd, rn, rm);
+ break;
+ case NEON_MLS:
+ mls(vf, rd, rn, rm);
+ break;
+ case NEON_MLA:
+ mla(vf, rd, rn, rm);
+ break;
+ case NEON_MUL:
+ mul(vf, rd, rn, rm);
+ break;
+ case NEON_PMUL:
+ pmul(vf, rd, rn, rm);
+ break;
+ case NEON_SMAX:
+ smax(vf, rd, rn, rm);
+ break;
+ case NEON_SMAXP:
+ smaxp(vf, rd, rn, rm);
+ break;
+ case NEON_SMIN:
+ smin(vf, rd, rn, rm);
+ break;
+ case NEON_SMINP:
+ sminp(vf, rd, rn, rm);
+ break;
+ case NEON_SUB:
+ sub(vf, rd, rn, rm);
+ break;
+ case NEON_UMAX:
+ umax(vf, rd, rn, rm);
+ break;
+ case NEON_UMAXP:
+ umaxp(vf, rd, rn, rm);
+ break;
+ case NEON_UMIN:
+ umin(vf, rd, rn, rm);
+ break;
+ case NEON_UMINP:
+ uminp(vf, rd, rn, rm);
+ break;
+ case NEON_SSHL:
+ sshl(vf, rd, rn, rm);
+ break;
+ case NEON_USHL:
+ ushl(vf, rd, rn, rm);
+ break;
+ case NEON_SABD:
+ AbsDiff(vf, rd, rn, rm, true);
+ break;
+ case NEON_UABD:
+ AbsDiff(vf, rd, rn, rm, false);
+ break;
+ case NEON_SABA:
+ saba(vf, rd, rn, rm);
+ break;
+ case NEON_UABA:
+ uaba(vf, rd, rn, rm);
+ break;
+ case NEON_UQADD:
+ add(vf, rd, rn, rm).UnsignedSaturate(vf);
+ break;
+ case NEON_SQADD:
+ add(vf, rd, rn, rm).SignedSaturate(vf);
+ break;
+ case NEON_UQSUB:
+ sub(vf, rd, rn, rm).UnsignedSaturate(vf);
+ break;
+ case NEON_SQSUB:
+ sub(vf, rd, rn, rm).SignedSaturate(vf);
+ break;
+ case NEON_SQDMULH:
+ sqdmulh(vf, rd, rn, rm);
+ break;
+ case NEON_SQRDMULH:
+ sqrdmulh(vf, rd, rn, rm);
+ break;
+ case NEON_UQSHL:
+ ushl(vf, rd, rn, rm).UnsignedSaturate(vf);
+ break;
+ case NEON_SQSHL:
+ sshl(vf, rd, rn, rm).SignedSaturate(vf);
+ break;
+ case NEON_URSHL:
+ ushl(vf, rd, rn, rm).Round(vf);
+ break;
+ case NEON_SRSHL:
+ sshl(vf, rd, rn, rm).Round(vf);
+ break;
+ case NEON_UQRSHL:
+ ushl(vf, rd, rn, rm).Round(vf).UnsignedSaturate(vf);
+ break;
+ case NEON_SQRSHL:
+ sshl(vf, rd, rn, rm).Round(vf).SignedSaturate(vf);
+ break;
+ case NEON_UHADD:
+ add(vf, rd, rn, rm).Uhalve(vf);
+ break;
+ case NEON_URHADD:
+ add(vf, rd, rn, rm).Uhalve(vf).Round(vf);
+ break;
+ case NEON_SHADD:
+ add(vf, rd, rn, rm).Halve(vf);
+ break;
+ case NEON_SRHADD:
+ add(vf, rd, rn, rm).Halve(vf).Round(vf);
+ break;
+ case NEON_UHSUB:
+ sub(vf, rd, rn, rm).Uhalve(vf);
+ break;
+ case NEON_SHSUB:
+ sub(vf, rd, rn, rm).Halve(vf);
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+ }
+}
+
+void Simulator::VisitNEON3Different(Instruction* instr) {
+ NEONFormatDecoder nfd(instr);
+ VectorFormat vf = nfd.GetVectorFormat();
+ VectorFormat vf_l = nfd.GetVectorFormat(nfd.LongIntegerFormatMap());
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ SimVRegister& rm = vreg(instr->Rm());
+
+ switch (instr->Mask(NEON3DifferentMask)) {
+ case NEON_PMULL:
+ pmull(vf_l, rd, rn, rm);
+ break;
+ case NEON_PMULL2:
+ pmull2(vf_l, rd, rn, rm);
+ break;
+ case NEON_UADDL:
+ uaddl(vf_l, rd, rn, rm);
+ break;
+ case NEON_UADDL2:
+ uaddl2(vf_l, rd, rn, rm);
+ break;
+ case NEON_SADDL:
+ saddl(vf_l, rd, rn, rm);
+ break;
+ case NEON_SADDL2:
+ saddl2(vf_l, rd, rn, rm);
+ break;
+ case NEON_USUBL:
+ usubl(vf_l, rd, rn, rm);
+ break;
+ case NEON_USUBL2:
+ usubl2(vf_l, rd, rn, rm);
+ break;
+ case NEON_SSUBL:
+ ssubl(vf_l, rd, rn, rm);
+ break;
+ case NEON_SSUBL2:
+ ssubl2(vf_l, rd, rn, rm);
+ break;
+ case NEON_SABAL:
+ sabal(vf_l, rd, rn, rm);
+ break;
+ case NEON_SABAL2:
+ sabal2(vf_l, rd, rn, rm);
+ break;
+ case NEON_UABAL:
+ uabal(vf_l, rd, rn, rm);
+ break;
+ case NEON_UABAL2:
+ uabal2(vf_l, rd, rn, rm);
+ break;
+ case NEON_SABDL:
+ sabdl(vf_l, rd, rn, rm);
+ break;
+ case NEON_SABDL2:
+ sabdl2(vf_l, rd, rn, rm);
+ break;
+ case NEON_UABDL:
+ uabdl(vf_l, rd, rn, rm);
+ break;
+ case NEON_UABDL2:
+ uabdl2(vf_l, rd, rn, rm);
+ break;
+ case NEON_SMLAL:
+ smlal(vf_l, rd, rn, rm);
+ break;
+ case NEON_SMLAL2:
+ smlal2(vf_l, rd, rn, rm);
+ break;
+ case NEON_UMLAL:
+ umlal(vf_l, rd, rn, rm);
+ break;
+ case NEON_UMLAL2:
+ umlal2(vf_l, rd, rn, rm);
+ break;
+ case NEON_SMLSL:
+ smlsl(vf_l, rd, rn, rm);
+ break;
+ case NEON_SMLSL2:
+ smlsl2(vf_l, rd, rn, rm);
+ break;
+ case NEON_UMLSL:
+ umlsl(vf_l, rd, rn, rm);
+ break;
+ case NEON_UMLSL2:
+ umlsl2(vf_l, rd, rn, rm);
+ break;
+ case NEON_SMULL:
+ smull(vf_l, rd, rn, rm);
+ break;
+ case NEON_SMULL2:
+ smull2(vf_l, rd, rn, rm);
+ break;
+ case NEON_UMULL:
+ umull(vf_l, rd, rn, rm);
+ break;
+ case NEON_UMULL2:
+ umull2(vf_l, rd, rn, rm);
+ break;
+ case NEON_SQDMLAL:
+ sqdmlal(vf_l, rd, rn, rm);
+ break;
+ case NEON_SQDMLAL2:
+ sqdmlal2(vf_l, rd, rn, rm);
+ break;
+ case NEON_SQDMLSL:
+ sqdmlsl(vf_l, rd, rn, rm);
+ break;
+ case NEON_SQDMLSL2:
+ sqdmlsl2(vf_l, rd, rn, rm);
+ break;
+ case NEON_SQDMULL:
+ sqdmull(vf_l, rd, rn, rm);
+ break;
+ case NEON_SQDMULL2:
+ sqdmull2(vf_l, rd, rn, rm);
+ break;
+ case NEON_UADDW:
+ uaddw(vf_l, rd, rn, rm);
+ break;
+ case NEON_UADDW2:
+ uaddw2(vf_l, rd, rn, rm);
+ break;
+ case NEON_SADDW:
+ saddw(vf_l, rd, rn, rm);
+ break;
+ case NEON_SADDW2:
+ saddw2(vf_l, rd, rn, rm);
+ break;
+ case NEON_USUBW:
+ usubw(vf_l, rd, rn, rm);
+ break;
+ case NEON_USUBW2:
+ usubw2(vf_l, rd, rn, rm);
+ break;
+ case NEON_SSUBW:
+ ssubw(vf_l, rd, rn, rm);
+ break;
+ case NEON_SSUBW2:
+ ssubw2(vf_l, rd, rn, rm);
+ break;
+ case NEON_ADDHN:
+ addhn(vf, rd, rn, rm);
+ break;
+ case NEON_ADDHN2:
+ addhn2(vf, rd, rn, rm);
+ break;
+ case NEON_RADDHN:
+ raddhn(vf, rd, rn, rm);
+ break;
+ case NEON_RADDHN2:
+ raddhn2(vf, rd, rn, rm);
+ break;
+ case NEON_SUBHN:
+ subhn(vf, rd, rn, rm);
+ break;
+ case NEON_SUBHN2:
+ subhn2(vf, rd, rn, rm);
+ break;
+ case NEON_RSUBHN:
+ rsubhn(vf, rd, rn, rm);
+ break;
+ case NEON_RSUBHN2:
+ rsubhn2(vf, rd, rn, rm);
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+void Simulator::VisitNEONAcrossLanes(Instruction* instr) {
+ NEONFormatDecoder nfd(instr);
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+
+ // The input operand's VectorFormat is passed for these instructions.
+ if (instr->Mask(NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) {
+ VectorFormat vf = nfd.GetVectorFormat(nfd.FPFormatMap());
+
+ switch (instr->Mask(NEONAcrossLanesFPMask)) {
+ case NEON_FMAXV:
+ fmaxv(vf, rd, rn);
+ break;
+ case NEON_FMINV:
+ fminv(vf, rd, rn);
+ break;
+ case NEON_FMAXNMV:
+ fmaxnmv(vf, rd, rn);
+ break;
+ case NEON_FMINNMV:
+ fminnmv(vf, rd, rn);
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+ } else {
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ switch (instr->Mask(NEONAcrossLanesMask)) {
+ case NEON_ADDV:
+ addv(vf, rd, rn);
+ break;
+ case NEON_SMAXV:
+ smaxv(vf, rd, rn);
+ break;
+ case NEON_SMINV:
+ sminv(vf, rd, rn);
+ break;
+ case NEON_UMAXV:
+ umaxv(vf, rd, rn);
+ break;
+ case NEON_UMINV:
+ uminv(vf, rd, rn);
+ break;
+ case NEON_SADDLV:
+ saddlv(vf, rd, rn);
+ break;
+ case NEON_UADDLV:
+ uaddlv(vf, rd, rn);
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+ }
+}
+
+void Simulator::VisitNEONByIndexedElement(Instruction* instr) {
+ NEONFormatDecoder nfd(instr);
+ VectorFormat vf_r = nfd.GetVectorFormat();
+ VectorFormat vf = nfd.GetVectorFormat(nfd.LongIntegerFormatMap());
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+
+ ByElementOp Op = NULL;
+
+ int rm_reg = instr->Rm();
+ int index = (instr->NEONH() << 1) | instr->NEONL();
+ if (instr->NEONSize() == 1) {
+ rm_reg &= 0xf;
+ index = (index << 1) | instr->NEONM();
+ }
+
+ switch (instr->Mask(NEONByIndexedElementMask)) {
+ case NEON_MUL_byelement:
+ Op = &Simulator::mul;
+ vf = vf_r;
+ break;
+ case NEON_MLA_byelement:
+ Op = &Simulator::mla;
+ vf = vf_r;
+ break;
+ case NEON_MLS_byelement:
+ Op = &Simulator::mls;
+ vf = vf_r;
+ break;
+ case NEON_SQDMULH_byelement:
+ Op = &Simulator::sqdmulh;
+ vf = vf_r;
+ break;
+ case NEON_SQRDMULH_byelement:
+ Op = &Simulator::sqrdmulh;
+ vf = vf_r;
+ break;
+ case NEON_SMULL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::smull2;
+ } else {
+ Op = &Simulator::smull;
+ }
+ break;
+ case NEON_UMULL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::umull2;
+ } else {
+ Op = &Simulator::umull;
+ }
+ break;
+ case NEON_SMLAL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::smlal2;
+ } else {
+ Op = &Simulator::smlal;
+ }
+ break;
+ case NEON_UMLAL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::umlal2;
+ } else {
+ Op = &Simulator::umlal;
+ }
+ break;
+ case NEON_SMLSL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::smlsl2;
+ } else {
+ Op = &Simulator::smlsl;
+ }
+ break;
+ case NEON_UMLSL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::umlsl2;
+ } else {
+ Op = &Simulator::umlsl;
+ }
+ break;
+ case NEON_SQDMULL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::sqdmull2;
+ } else {
+ Op = &Simulator::sqdmull;
+ }
+ break;
+ case NEON_SQDMLAL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::sqdmlal2;
+ } else {
+ Op = &Simulator::sqdmlal;
+ }
+ break;
+ case NEON_SQDMLSL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::sqdmlsl2;
+ } else {
+ Op = &Simulator::sqdmlsl;
+ }
+ break;
+ default:
+ index = instr->NEONH();
+ if ((instr->FPType() & 1) == 0) {
+ index = (index << 1) | instr->NEONL();
+ }
+
+ vf = nfd.GetVectorFormat(nfd.FPFormatMap());
+
+ switch (instr->Mask(NEONByIndexedElementFPMask)) {
+ case NEON_FMUL_byelement:
+ Op = &Simulator::fmul;
+ break;
+ case NEON_FMLA_byelement:
+ Op = &Simulator::fmla;
+ break;
+ case NEON_FMLS_byelement:
+ Op = &Simulator::fmls;
+ break;
+ case NEON_FMULX_byelement:
+ Op = &Simulator::fmulx;
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+ }
+
+ (this->*Op)(vf, rd, rn, vreg(rm_reg), index);
+}
+
+void Simulator::VisitNEONCopy(Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ int imm5 = instr->ImmNEON5();
+ int lsb = LowestSetBitPosition(imm5);
+ int reg_index = imm5 >> lsb;
+
+ if (instr->Mask(NEONCopyInsElementMask) == NEON_INS_ELEMENT) {
+ int imm4 = instr->ImmNEON4();
+ DCHECK_GE(lsb, 1);
+ int rn_index = imm4 >> (lsb - 1);
+ ins_element(vf, rd, reg_index, rn, rn_index);
+ } else if (instr->Mask(NEONCopyInsGeneralMask) == NEON_INS_GENERAL) {
+ ins_immediate(vf, rd, reg_index, xreg(instr->Rn()));
+ } else if (instr->Mask(NEONCopyUmovMask) == NEON_UMOV) {
+ uint64_t value = LogicVRegister(rn).Uint(vf, reg_index);
+ value &= MaxUintFromFormat(vf);
+ set_xreg(instr->Rd(), value);
+ } else if (instr->Mask(NEONCopyUmovMask) == NEON_SMOV) {
+ int64_t value = LogicVRegister(rn).Int(vf, reg_index);
+ if (instr->NEONQ()) {
+ set_xreg(instr->Rd(), value);
+ } else {
+ DCHECK(is_int32(value));
+ set_wreg(instr->Rd(), static_cast<int32_t>(value));
+ }
+ } else if (instr->Mask(NEONCopyDupElementMask) == NEON_DUP_ELEMENT) {
+ dup_element(vf, rd, rn, reg_index);
+ } else if (instr->Mask(NEONCopyDupGeneralMask) == NEON_DUP_GENERAL) {
+ dup_immediate(vf, rd, xreg(instr->Rn()));
+ } else {
+ UNIMPLEMENTED();
+ }
+}
+
+void Simulator::VisitNEONExtract(Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ SimVRegister& rm = vreg(instr->Rm());
+ if (instr->Mask(NEONExtractMask) == NEON_EXT) {
+ int index = instr->ImmNEONExt();
+ ext(vf, rd, rn, rm, index);
+ } else {
+ UNIMPLEMENTED();
+ }
+}
+
+void Simulator::NEONLoadStoreMultiStructHelper(const Instruction* instr,
+ AddrMode addr_mode) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ uint64_t addr_base = xreg(instr->Rn(), Reg31IsStackPointer);
+ int reg_size = RegisterSizeInBytesFromFormat(vf);
+
+ int reg[4];
+ uint64_t addr[4];
+ for (int i = 0; i < 4; i++) {
+ reg[i] = (instr->Rt() + i) % kNumberOfVRegisters;
+ addr[i] = addr_base + (i * reg_size);
+ }
+ int count = 1;
+ bool log_read = true;
+
+ // Bit 23 determines whether this is an offset or post-index addressing mode.
+ // In offset mode, bits 20 to 16 should be zero; these bits encode the
+ // register of immediate in post-index mode.
+ if ((instr->Bit(23) == 0) && (instr->Bits(20, 16) != 0)) {
+ UNREACHABLE();
+ }
+
+ // We use the PostIndex mask here, as it works in this case for both Offset
+ // and PostIndex addressing.
+ switch (instr->Mask(NEONLoadStoreMultiStructPostIndexMask)) {
+ case NEON_LD1_4v:
+ case NEON_LD1_4v_post:
+ ld1(vf, vreg(reg[3]), addr[3]);
+ count++; // Fall through.
+ case NEON_LD1_3v:
+ case NEON_LD1_3v_post:
+ ld1(vf, vreg(reg[2]), addr[2]);
+ count++; // Fall through.
+ case NEON_LD1_2v:
+ case NEON_LD1_2v_post:
+ ld1(vf, vreg(reg[1]), addr[1]);
+ count++; // Fall through.
+ case NEON_LD1_1v:
+ case NEON_LD1_1v_post:
+ ld1(vf, vreg(reg[0]), addr[0]);
+ break;
+ case NEON_ST1_4v:
+ case NEON_ST1_4v_post:
+ st1(vf, vreg(reg[3]), addr[3]);
+ count++; // Fall through.
+ case NEON_ST1_3v:
+ case NEON_ST1_3v_post:
+ st1(vf, vreg(reg[2]), addr[2]);
+ count++; // Fall through.
+ case NEON_ST1_2v:
+ case NEON_ST1_2v_post:
+ st1(vf, vreg(reg[1]), addr[1]);
+ count++; // Fall through.
+ case NEON_ST1_1v:
+ case NEON_ST1_1v_post:
+ st1(vf, vreg(reg[0]), addr[0]);
+ log_read = false;
+ break;
+ case NEON_LD2_post:
+ case NEON_LD2:
+ ld2(vf, vreg(reg[0]), vreg(reg[1]), addr[0]);
+ count = 2;
+ break;
+ case NEON_ST2:
+ case NEON_ST2_post:
+ st2(vf, vreg(reg[0]), vreg(reg[1]), addr[0]);
+ count = 2;
+ log_read = false;
+ break;
+ case NEON_LD3_post:
+ case NEON_LD3:
+ ld3(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), addr[0]);
+ count = 3;
+ break;
+ case NEON_ST3:
+ case NEON_ST3_post:
+ st3(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), addr[0]);
+ count = 3;
+ log_read = false;
+ break;
+ case NEON_LD4_post:
+ case NEON_LD4:
+ ld4(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), vreg(reg[3]), addr[0]);
+ count = 4;
+ break;
+ case NEON_ST4:
+ case NEON_ST4_post:
+ st4(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), vreg(reg[3]), addr[0]);
+ count = 4;
+ log_read = false;
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+
+ {
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ if (log_read) {
+ local_monitor_.NotifyLoad();
+ } else {
+ local_monitor_.NotifyStore();
+ global_monitor_.Pointer()->NotifyStore_Locked(&global_monitor_processor_);
+ }
+ }
+
+ // Explicitly log the register update whilst we have type information.
+ for (int i = 0; i < count; i++) {
+ // For de-interleaving loads, only print the base address.
+ int lane_size = LaneSizeInBytesFromFormat(vf);
+ PrintRegisterFormat format = GetPrintRegisterFormatTryFP(
+ GetPrintRegisterFormatForSize(reg_size, lane_size));
+ if (log_read) {
+ LogVRead(addr_base, reg[i], format);
+ } else {
+ LogVWrite(addr_base, reg[i], format);
+ }
+ }
+
+ if (addr_mode == PostIndex) {
+ int rm = instr->Rm();
+ // The immediate post index addressing mode is indicated by rm = 31.
+ // The immediate is implied by the number of vector registers used.
+ addr_base +=
+ (rm == 31) ? RegisterSizeInBytesFromFormat(vf) * count : xreg(rm);
+ set_xreg(instr->Rn(), addr_base);
+ } else {
+ DCHECK_EQ(addr_mode, Offset);
+ }
+}
+
+void Simulator::VisitNEONLoadStoreMultiStruct(Instruction* instr) {
+ NEONLoadStoreMultiStructHelper(instr, Offset);
+}
+
+void Simulator::VisitNEONLoadStoreMultiStructPostIndex(Instruction* instr) {
+ NEONLoadStoreMultiStructHelper(instr, PostIndex);
+}
+
+void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr,
+ AddrMode addr_mode) {
+ uint64_t addr = xreg(instr->Rn(), Reg31IsStackPointer);
+ int rt = instr->Rt();
+
+ // Bit 23 determines whether this is an offset or post-index addressing mode.
+ // In offset mode, bits 20 to 16 should be zero; these bits encode the
+ // register of immediate in post-index mode.
+ DCHECK_IMPLIES(instr->Bit(23) == 0, instr->Bits(20, 16) == 0);
+
+ bool do_load = false;
+
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
+ VectorFormat vf_t = nfd.GetVectorFormat();
+
+ VectorFormat vf = kFormat16B;
+ // We use the PostIndex mask here, as it works in this case for both Offset
+ // and PostIndex addressing.
+ switch (instr->Mask(NEONLoadStoreSingleStructPostIndexMask)) {
+ case NEON_LD1_b:
+ case NEON_LD1_b_post:
+ case NEON_LD2_b:
+ case NEON_LD2_b_post:
+ case NEON_LD3_b:
+ case NEON_LD3_b_post:
+ case NEON_LD4_b:
+ case NEON_LD4_b_post:
+ do_load = true; // Fall through.
+ case NEON_ST1_b:
+ case NEON_ST1_b_post:
+ case NEON_ST2_b:
+ case NEON_ST2_b_post:
+ case NEON_ST3_b:
+ case NEON_ST3_b_post:
+ case NEON_ST4_b:
+ case NEON_ST4_b_post:
+ break;
+
+ case NEON_LD1_h:
+ case NEON_LD1_h_post:
+ case NEON_LD2_h:
+ case NEON_LD2_h_post:
+ case NEON_LD3_h:
+ case NEON_LD3_h_post:
+ case NEON_LD4_h:
+ case NEON_LD4_h_post:
+ do_load = true; // Fall through.
+ case NEON_ST1_h:
+ case NEON_ST1_h_post:
+ case NEON_ST2_h:
+ case NEON_ST2_h_post:
+ case NEON_ST3_h:
+ case NEON_ST3_h_post:
+ case NEON_ST4_h:
+ case NEON_ST4_h_post:
+ vf = kFormat8H;
+ break;
+
+ case NEON_LD1_s:
+ case NEON_LD1_s_post:
+ case NEON_LD2_s:
+ case NEON_LD2_s_post:
+ case NEON_LD3_s:
+ case NEON_LD3_s_post:
+ case NEON_LD4_s:
+ case NEON_LD4_s_post:
+ do_load = true; // Fall through.
+ case NEON_ST1_s:
+ case NEON_ST1_s_post:
+ case NEON_ST2_s:
+ case NEON_ST2_s_post:
+ case NEON_ST3_s:
+ case NEON_ST3_s_post:
+ case NEON_ST4_s:
+ case NEON_ST4_s_post: {
+ static_assert((NEON_LD1_s | (1 << NEONLSSize_offset)) == NEON_LD1_d,
+ "LSB of size distinguishes S and D registers.");
+ static_assert(
+ (NEON_LD1_s_post | (1 << NEONLSSize_offset)) == NEON_LD1_d_post,
+ "LSB of size distinguishes S and D registers.");
+ static_assert((NEON_ST1_s | (1 << NEONLSSize_offset)) == NEON_ST1_d,
+ "LSB of size distinguishes S and D registers.");
+ static_assert(
+ (NEON_ST1_s_post | (1 << NEONLSSize_offset)) == NEON_ST1_d_post,
+ "LSB of size distinguishes S and D registers.");
+ vf = ((instr->NEONLSSize() & 1) == 0) ? kFormat4S : kFormat2D;
+ break;
+ }
+
+ case NEON_LD1R:
+ case NEON_LD1R_post: {
+ vf = vf_t;
+ ld1r(vf, vreg(rt), addr);
+ do_load = true;
+ break;
+ }
+
+ case NEON_LD2R:
+ case NEON_LD2R_post: {
+ vf = vf_t;
+ int rt2 = (rt + 1) % kNumberOfVRegisters;
+ ld2r(vf, vreg(rt), vreg(rt2), addr);
+ do_load = true;
+ break;
+ }
+
+ case NEON_LD3R:
+ case NEON_LD3R_post: {
+ vf = vf_t;
+ int rt2 = (rt + 1) % kNumberOfVRegisters;
+ int rt3 = (rt2 + 1) % kNumberOfVRegisters;
+ ld3r(vf, vreg(rt), vreg(rt2), vreg(rt3), addr);
+ do_load = true;
+ break;
+ }
+
+ case NEON_LD4R:
+ case NEON_LD4R_post: {
+ vf = vf_t;
+ int rt2 = (rt + 1) % kNumberOfVRegisters;
+ int rt3 = (rt2 + 1) % kNumberOfVRegisters;
+ int rt4 = (rt3 + 1) % kNumberOfVRegisters;
+ ld4r(vf, vreg(rt), vreg(rt2), vreg(rt3), vreg(rt4), addr);
+ do_load = true;
+ break;
+ }
+ default:
+ UNIMPLEMENTED();
+ }
+
+ PrintRegisterFormat print_format =
+ GetPrintRegisterFormatTryFP(GetPrintRegisterFormat(vf));
+ // Make sure that the print_format only includes a single lane.
+ print_format =
+ static_cast<PrintRegisterFormat>(print_format & ~kPrintRegAsVectorMask);
+
+ int esize = LaneSizeInBytesFromFormat(vf);
+ int index_shift = LaneSizeInBytesLog2FromFormat(vf);
+ int lane = instr->NEONLSIndex(index_shift);
+ int scale = 0;
+ int rt2 = (rt + 1) % kNumberOfVRegisters;
+ int rt3 = (rt2 + 1) % kNumberOfVRegisters;
+ int rt4 = (rt3 + 1) % kNumberOfVRegisters;
+ switch (instr->Mask(NEONLoadStoreSingleLenMask)) {
+ case NEONLoadStoreSingle1:
+ scale = 1;
+ if (do_load) {
+ ld1(vf, vreg(rt), lane, addr);
+ LogVRead(addr, rt, print_format, lane);
+ } else {
+ st1(vf, vreg(rt), lane, addr);
+ LogVWrite(addr, rt, print_format, lane);
+ }
+ break;
+ case NEONLoadStoreSingle2:
+ scale = 2;
+ if (do_load) {
+ ld2(vf, vreg(rt), vreg(rt2), lane, addr);
+ LogVRead(addr, rt, print_format, lane);
+ LogVRead(addr + esize, rt2, print_format, lane);
+ } else {
+ st2(vf, vreg(rt), vreg(rt2), lane, addr);
+ LogVWrite(addr, rt, print_format, lane);
+ LogVWrite(addr + esize, rt2, print_format, lane);
+ }
+ break;
+ case NEONLoadStoreSingle3:
+ scale = 3;
+ if (do_load) {
+ ld3(vf, vreg(rt), vreg(rt2), vreg(rt3), lane, addr);
+ LogVRead(addr, rt, print_format, lane);
+ LogVRead(addr + esize, rt2, print_format, lane);
+ LogVRead(addr + (2 * esize), rt3, print_format, lane);
+ } else {
+ st3(vf, vreg(rt), vreg(rt2), vreg(rt3), lane, addr);
+ LogVWrite(addr, rt, print_format, lane);
+ LogVWrite(addr + esize, rt2, print_format, lane);
+ LogVWrite(addr + (2 * esize), rt3, print_format, lane);
+ }
+ break;
+ case NEONLoadStoreSingle4:
+ scale = 4;
+ if (do_load) {
+ ld4(vf, vreg(rt), vreg(rt2), vreg(rt3), vreg(rt4), lane, addr);
+ LogVRead(addr, rt, print_format, lane);
+ LogVRead(addr + esize, rt2, print_format, lane);
+ LogVRead(addr + (2 * esize), rt3, print_format, lane);
+ LogVRead(addr + (3 * esize), rt4, print_format, lane);
+ } else {
+ st4(vf, vreg(rt), vreg(rt2), vreg(rt3), vreg(rt4), lane, addr);
+ LogVWrite(addr, rt, print_format, lane);
+ LogVWrite(addr + esize, rt2, print_format, lane);
+ LogVWrite(addr + (2 * esize), rt3, print_format, lane);
+ LogVWrite(addr + (3 * esize), rt4, print_format, lane);
+ }
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+
+ {
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ if (do_load) {
+ local_monitor_.NotifyLoad();
+ } else {
+ local_monitor_.NotifyStore();
+ global_monitor_.Pointer()->NotifyStore_Locked(&global_monitor_processor_);
+ }
+ }
+
+ if (addr_mode == PostIndex) {
+ int rm = instr->Rm();
+ int lane_size = LaneSizeInBytesFromFormat(vf);
+ set_xreg(instr->Rn(), addr + ((rm == 31) ? (scale * lane_size) : xreg(rm)));
+ }
+}
+
+void Simulator::VisitNEONLoadStoreSingleStruct(Instruction* instr) {
+ NEONLoadStoreSingleStructHelper(instr, Offset);
+}
+
+void Simulator::VisitNEONLoadStoreSingleStructPostIndex(Instruction* instr) {
+ NEONLoadStoreSingleStructHelper(instr, PostIndex);
+}
+
+void Simulator::VisitNEONModifiedImmediate(Instruction* instr) {
+ SimVRegister& rd = vreg(instr->Rd());
+ int cmode = instr->NEONCmode();
+ int cmode_3_1 = (cmode >> 1) & 7;
+ int cmode_3 = (cmode >> 3) & 1;
+ int cmode_2 = (cmode >> 2) & 1;
+ int cmode_1 = (cmode >> 1) & 1;
+ int cmode_0 = cmode & 1;
+ int q = instr->NEONQ();
+ int op_bit = instr->NEONModImmOp();
+ uint64_t imm8 = instr->ImmNEONabcdefgh();
+
+ // Find the format and immediate value
+ uint64_t imm = 0;
+ VectorFormat vform = kFormatUndefined;
+ switch (cmode_3_1) {
+ case 0x0:
+ case 0x1:
+ case 0x2:
+ case 0x3:
+ vform = (q == 1) ? kFormat4S : kFormat2S;
+ imm = imm8 << (8 * cmode_3_1);
+ break;
+ case 0x4:
+ case 0x5:
+ vform = (q == 1) ? kFormat8H : kFormat4H;
+ imm = imm8 << (8 * cmode_1);
+ break;
+ case 0x6:
+ vform = (q == 1) ? kFormat4S : kFormat2S;
+ if (cmode_0 == 0) {
+ imm = imm8 << 8 | 0x000000ff;
+ } else {
+ imm = imm8 << 16 | 0x0000ffff;
+ }
+ break;
+ case 0x7:
+ if (cmode_0 == 0 && op_bit == 0) {
+ vform = q ? kFormat16B : kFormat8B;
+ imm = imm8;
+ } else if (cmode_0 == 0 && op_bit == 1) {
+ vform = q ? kFormat2D : kFormat1D;
+ imm = 0;
+ for (int i = 0; i < 8; ++i) {
+ if (imm8 & (1 << i)) {
+ imm |= (UINT64_C(0xff) << (8 * i));
+ }
+ }
+ } else { // cmode_0 == 1, cmode == 0xf.
+ if (op_bit == 0) {
+ vform = q ? kFormat4S : kFormat2S;
+ imm = bit_cast<uint32_t>(instr->ImmNEONFP32());
+ } else if (q == 1) {
+ vform = kFormat2D;
+ imm = bit_cast<uint64_t>(instr->ImmNEONFP64());
+ } else {
+ DCHECK((q == 0) && (op_bit == 1) && (cmode == 0xf));
+ VisitUnallocated(instr);
+ }
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ // Find the operation.
+ NEONModifiedImmediateOp op;
+ if (cmode_3 == 0) {
+ if (cmode_0 == 0) {
+ op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI;
+ } else { // cmode<0> == '1'
+ op = op_bit ? NEONModifiedImmediate_BIC : NEONModifiedImmediate_ORR;
+ }
+ } else { // cmode<3> == '1'
+ if (cmode_2 == 0) {
+ if (cmode_0 == 0) {
+ op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI;
+ } else { // cmode<0> == '1'
+ op = op_bit ? NEONModifiedImmediate_BIC : NEONModifiedImmediate_ORR;
+ }
+ } else { // cmode<2> == '1'
+ if (cmode_1 == 0) {
+ op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI;
+ } else { // cmode<1> == '1'
+ if (cmode_0 == 0) {
+ op = NEONModifiedImmediate_MOVI;
+ } else { // cmode<0> == '1'
+ op = NEONModifiedImmediate_MOVI;
+ }
+ }
+ }
+ }
+
+ // Call the logic function.
+ switch (op) {
+ case NEONModifiedImmediate_ORR:
+ orr(vform, rd, rd, imm);
+ break;
+ case NEONModifiedImmediate_BIC:
+ bic(vform, rd, rd, imm);
+ break;
+ case NEONModifiedImmediate_MOVI:
+ movi(vform, rd, imm);
+ break;
+ case NEONModifiedImmediate_MVNI:
+ mvni(vform, rd, imm);
+ break;
+ default:
+ VisitUnimplemented(instr);
+ }
+}
+
+void Simulator::VisitNEONScalar2RegMisc(Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+
+ if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_scalar_opcode) {
+ // These instructions all use a two bit size field, except NOT and RBIT,
+ // which use the field to encode the operation.
+ switch (instr->Mask(NEONScalar2RegMiscMask)) {
+ case NEON_CMEQ_zero_scalar:
+ cmp(vf, rd, rn, 0, eq);
+ break;
+ case NEON_CMGE_zero_scalar:
+ cmp(vf, rd, rn, 0, ge);
+ break;
+ case NEON_CMGT_zero_scalar:
+ cmp(vf, rd, rn, 0, gt);
+ break;
+ case NEON_CMLT_zero_scalar:
+ cmp(vf, rd, rn, 0, lt);
+ break;
+ case NEON_CMLE_zero_scalar:
+ cmp(vf, rd, rn, 0, le);
+ break;
+ case NEON_ABS_scalar:
+ abs(vf, rd, rn);
+ break;
+ case NEON_SQABS_scalar:
+ abs(vf, rd, rn).SignedSaturate(vf);
+ break;
+ case NEON_NEG_scalar:
+ neg(vf, rd, rn);
+ break;
+ case NEON_SQNEG_scalar:
+ neg(vf, rd, rn).SignedSaturate(vf);
+ break;
+ case NEON_SUQADD_scalar:
+ suqadd(vf, rd, rn);
+ break;
+ case NEON_USQADD_scalar:
+ usqadd(vf, rd, rn);
+ break;
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+ } else {
+ VectorFormat fpf = nfd.GetVectorFormat(nfd.FPScalarFormatMap());
+ FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode());
+
+ // These instructions all use a one bit size field, except SQXTUN, SQXTN
+ // and UQXTN, which use a two bit size field.
+ switch (instr->Mask(NEONScalar2RegMiscFPMask)) {
+ case NEON_FRECPE_scalar:
+ frecpe(fpf, rd, rn, fpcr_rounding);
+ break;
+ case NEON_FRECPX_scalar:
+ frecpx(fpf, rd, rn);
+ break;
+ case NEON_FRSQRTE_scalar:
+ frsqrte(fpf, rd, rn);
+ break;
+ case NEON_FCMGT_zero_scalar:
+ fcmp_zero(fpf, rd, rn, gt);
+ break;
+ case NEON_FCMGE_zero_scalar:
+ fcmp_zero(fpf, rd, rn, ge);
+ break;
+ case NEON_FCMEQ_zero_scalar:
+ fcmp_zero(fpf, rd, rn, eq);
+ break;
+ case NEON_FCMLE_zero_scalar:
+ fcmp_zero(fpf, rd, rn, le);
+ break;
+ case NEON_FCMLT_zero_scalar:
+ fcmp_zero(fpf, rd, rn, lt);
+ break;
+ case NEON_SCVTF_scalar:
+ scvtf(fpf, rd, rn, 0, fpcr_rounding);
+ break;
+ case NEON_UCVTF_scalar:
+ ucvtf(fpf, rd, rn, 0, fpcr_rounding);
+ break;
+ case NEON_FCVTNS_scalar:
+ fcvts(fpf, rd, rn, FPTieEven);
+ break;
+ case NEON_FCVTNU_scalar:
+ fcvtu(fpf, rd, rn, FPTieEven);
+ break;
+ case NEON_FCVTPS_scalar:
+ fcvts(fpf, rd, rn, FPPositiveInfinity);
+ break;
+ case NEON_FCVTPU_scalar:
+ fcvtu(fpf, rd, rn, FPPositiveInfinity);
+ break;
+ case NEON_FCVTMS_scalar:
+ fcvts(fpf, rd, rn, FPNegativeInfinity);
+ break;
+ case NEON_FCVTMU_scalar:
+ fcvtu(fpf, rd, rn, FPNegativeInfinity);
+ break;
+ case NEON_FCVTZS_scalar:
+ fcvts(fpf, rd, rn, FPZero);
+ break;
+ case NEON_FCVTZU_scalar:
+ fcvtu(fpf, rd, rn, FPZero);
+ break;
+ case NEON_FCVTAS_scalar:
+ fcvts(fpf, rd, rn, FPTieAway);
+ break;
+ case NEON_FCVTAU_scalar:
+ fcvtu(fpf, rd, rn, FPTieAway);
+ break;
+ case NEON_FCVTXN_scalar:
+ // Unlike all of the other FP instructions above, fcvtxn encodes dest
+ // size S as size<0>=1. There's only one case, so we ignore the form.
+ DCHECK_EQ(instr->Bit(22), 1);
+ fcvtxn(kFormatS, rd, rn);
+ break;
+ default:
+ switch (instr->Mask(NEONScalar2RegMiscMask)) {
+ case NEON_SQXTN_scalar:
+ sqxtn(vf, rd, rn);
+ break;
+ case NEON_UQXTN_scalar:
+ uqxtn(vf, rd, rn);
+ break;
+ case NEON_SQXTUN_scalar:
+ sqxtun(vf, rd, rn);
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+ }
+ }
+}
+
+void Simulator::VisitNEONScalar3Diff(Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ SimVRegister& rm = vreg(instr->Rm());
+ switch (instr->Mask(NEONScalar3DiffMask)) {
+ case NEON_SQDMLAL_scalar:
+ sqdmlal(vf, rd, rn, rm);
+ break;
+ case NEON_SQDMLSL_scalar:
+ sqdmlsl(vf, rd, rn, rm);
+ break;
+ case NEON_SQDMULL_scalar:
+ sqdmull(vf, rd, rn, rm);
+ break;
default:
UNIMPLEMENTED();
}
}
+void Simulator::VisitNEONScalar3Same(Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ SimVRegister& rm = vreg(instr->Rm());
+
+ if (instr->Mask(NEONScalar3SameFPFMask) == NEONScalar3SameFPFixed) {
+ vf = nfd.GetVectorFormat(nfd.FPScalarFormatMap());
+ switch (instr->Mask(NEONScalar3SameFPMask)) {
+ case NEON_FMULX_scalar:
+ fmulx(vf, rd, rn, rm);
+ break;
+ case NEON_FACGE_scalar:
+ fabscmp(vf, rd, rn, rm, ge);
+ break;
+ case NEON_FACGT_scalar:
+ fabscmp(vf, rd, rn, rm, gt);
+ break;
+ case NEON_FCMEQ_scalar:
+ fcmp(vf, rd, rn, rm, eq);
+ break;
+ case NEON_FCMGE_scalar:
+ fcmp(vf, rd, rn, rm, ge);
+ break;
+ case NEON_FCMGT_scalar:
+ fcmp(vf, rd, rn, rm, gt);
+ break;
+ case NEON_FRECPS_scalar:
+ frecps(vf, rd, rn, rm);
+ break;
+ case NEON_FRSQRTS_scalar:
+ frsqrts(vf, rd, rn, rm);
+ break;
+ case NEON_FABD_scalar:
+ fabd(vf, rd, rn, rm);
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+ } else {
+ switch (instr->Mask(NEONScalar3SameMask)) {
+ case NEON_ADD_scalar:
+ add(vf, rd, rn, rm);
+ break;
+ case NEON_SUB_scalar:
+ sub(vf, rd, rn, rm);
+ break;
+ case NEON_CMEQ_scalar:
+ cmp(vf, rd, rn, rm, eq);
+ break;
+ case NEON_CMGE_scalar:
+ cmp(vf, rd, rn, rm, ge);
+ break;
+ case NEON_CMGT_scalar:
+ cmp(vf, rd, rn, rm, gt);
+ break;
+ case NEON_CMHI_scalar:
+ cmp(vf, rd, rn, rm, hi);
+ break;
+ case NEON_CMHS_scalar:
+ cmp(vf, rd, rn, rm, hs);
+ break;
+ case NEON_CMTST_scalar:
+ cmptst(vf, rd, rn, rm);
+ break;
+ case NEON_USHL_scalar:
+ ushl(vf, rd, rn, rm);
+ break;
+ case NEON_SSHL_scalar:
+ sshl(vf, rd, rn, rm);
+ break;
+ case NEON_SQDMULH_scalar:
+ sqdmulh(vf, rd, rn, rm);
+ break;
+ case NEON_SQRDMULH_scalar:
+ sqrdmulh(vf, rd, rn, rm);
+ break;
+ case NEON_UQADD_scalar:
+ add(vf, rd, rn, rm).UnsignedSaturate(vf);
+ break;
+ case NEON_SQADD_scalar:
+ add(vf, rd, rn, rm).SignedSaturate(vf);
+ break;
+ case NEON_UQSUB_scalar:
+ sub(vf, rd, rn, rm).UnsignedSaturate(vf);
+ break;
+ case NEON_SQSUB_scalar:
+ sub(vf, rd, rn, rm).SignedSaturate(vf);
+ break;
+ case NEON_UQSHL_scalar:
+ ushl(vf, rd, rn, rm).UnsignedSaturate(vf);
+ break;
+ case NEON_SQSHL_scalar:
+ sshl(vf, rd, rn, rm).SignedSaturate(vf);
+ break;
+ case NEON_URSHL_scalar:
+ ushl(vf, rd, rn, rm).Round(vf);
+ break;
+ case NEON_SRSHL_scalar:
+ sshl(vf, rd, rn, rm).Round(vf);
+ break;
+ case NEON_UQRSHL_scalar:
+ ushl(vf, rd, rn, rm).Round(vf).UnsignedSaturate(vf);
+ break;
+ case NEON_SQRSHL_scalar:
+ sshl(vf, rd, rn, rm).Round(vf).SignedSaturate(vf);
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+ }
+}
+
+void Simulator::VisitNEONScalarByIndexedElement(Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+ VectorFormat vf_r = nfd.GetVectorFormat(nfd.ScalarFormatMap());
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ ByElementOp Op = NULL;
+
+ int rm_reg = instr->Rm();
+ int index = (instr->NEONH() << 1) | instr->NEONL();
+ if (instr->NEONSize() == 1) {
+ rm_reg &= 0xf;
+ index = (index << 1) | instr->NEONM();
+ }
+
+ switch (instr->Mask(NEONScalarByIndexedElementMask)) {
+ case NEON_SQDMULL_byelement_scalar:
+ Op = &Simulator::sqdmull;
+ break;
+ case NEON_SQDMLAL_byelement_scalar:
+ Op = &Simulator::sqdmlal;
+ break;
+ case NEON_SQDMLSL_byelement_scalar:
+ Op = &Simulator::sqdmlsl;
+ break;
+ case NEON_SQDMULH_byelement_scalar:
+ Op = &Simulator::sqdmulh;
+ vf = vf_r;
+ break;
+ case NEON_SQRDMULH_byelement_scalar:
+ Op = &Simulator::sqrdmulh;
+ vf = vf_r;
+ break;
+ default:
+ vf = nfd.GetVectorFormat(nfd.FPScalarFormatMap());
+ index = instr->NEONH();
+ if ((instr->FPType() & 1) == 0) {
+ index = (index << 1) | instr->NEONL();
+ }
+ switch (instr->Mask(NEONScalarByIndexedElementFPMask)) {
+ case NEON_FMUL_byelement_scalar:
+ Op = &Simulator::fmul;
+ break;
+ case NEON_FMLA_byelement_scalar:
+ Op = &Simulator::fmla;
+ break;
+ case NEON_FMLS_byelement_scalar:
+ Op = &Simulator::fmls;
+ break;
+ case NEON_FMULX_byelement_scalar:
+ Op = &Simulator::fmulx;
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+ }
+
+ (this->*Op)(vf, rd, rn, vreg(rm_reg), index);
+}
+
+void Simulator::VisitNEONScalarCopy(Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularScalarFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+
+ if (instr->Mask(NEONScalarCopyMask) == NEON_DUP_ELEMENT_scalar) {
+ int imm5 = instr->ImmNEON5();
+ int lsb = LowestSetBitPosition(imm5);
+ int rn_index = imm5 >> lsb;
+ dup_element(vf, rd, rn, rn_index);
+ } else {
+ UNIMPLEMENTED();
+ }
+}
+
+void Simulator::VisitNEONScalarPairwise(Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::FPScalarFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ switch (instr->Mask(NEONScalarPairwiseMask)) {
+ case NEON_ADDP_scalar:
+ addp(vf, rd, rn);
+ break;
+ case NEON_FADDP_scalar:
+ faddp(vf, rd, rn);
+ break;
+ case NEON_FMAXP_scalar:
+ fmaxp(vf, rd, rn);
+ break;
+ case NEON_FMAXNMP_scalar:
+ fmaxnmp(vf, rd, rn);
+ break;
+ case NEON_FMINP_scalar:
+ fminp(vf, rd, rn);
+ break;
+ case NEON_FMINNMP_scalar:
+ fminnmp(vf, rd, rn);
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+void Simulator::VisitNEONScalarShiftImmediate(Instruction* instr) {
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode());
+
+ static const NEONFormatMap map = {
+ {22, 21, 20, 19},
+ {NF_UNDEF, NF_B, NF_H, NF_H, NF_S, NF_S, NF_S, NF_S, NF_D, NF_D, NF_D,
+ NF_D, NF_D, NF_D, NF_D, NF_D}};
+ NEONFormatDecoder nfd(instr, &map);
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ int highestSetBit = HighestSetBitPosition(instr->ImmNEONImmh());
+ int immhimmb = instr->ImmNEONImmhImmb();
+ int right_shift = (16 << highestSetBit) - immhimmb;
+ int left_shift = immhimmb - (8 << highestSetBit);
+ switch (instr->Mask(NEONScalarShiftImmediateMask)) {
+ case NEON_SHL_scalar:
+ shl(vf, rd, rn, left_shift);
+ break;
+ case NEON_SLI_scalar:
+ sli(vf, rd, rn, left_shift);
+ break;
+ case NEON_SQSHL_imm_scalar:
+ sqshl(vf, rd, rn, left_shift);
+ break;
+ case NEON_UQSHL_imm_scalar:
+ uqshl(vf, rd, rn, left_shift);
+ break;
+ case NEON_SQSHLU_scalar:
+ sqshlu(vf, rd, rn, left_shift);
+ break;
+ case NEON_SRI_scalar:
+ sri(vf, rd, rn, right_shift);
+ break;
+ case NEON_SSHR_scalar:
+ sshr(vf, rd, rn, right_shift);
+ break;
+ case NEON_USHR_scalar:
+ ushr(vf, rd, rn, right_shift);
+ break;
+ case NEON_SRSHR_scalar:
+ sshr(vf, rd, rn, right_shift).Round(vf);
+ break;
+ case NEON_URSHR_scalar:
+ ushr(vf, rd, rn, right_shift).Round(vf);
+ break;
+ case NEON_SSRA_scalar:
+ ssra(vf, rd, rn, right_shift);
+ break;
+ case NEON_USRA_scalar:
+ usra(vf, rd, rn, right_shift);
+ break;
+ case NEON_SRSRA_scalar:
+ srsra(vf, rd, rn, right_shift);
+ break;
+ case NEON_URSRA_scalar:
+ ursra(vf, rd, rn, right_shift);
+ break;
+ case NEON_UQSHRN_scalar:
+ uqshrn(vf, rd, rn, right_shift);
+ break;
+ case NEON_UQRSHRN_scalar:
+ uqrshrn(vf, rd, rn, right_shift);
+ break;
+ case NEON_SQSHRN_scalar:
+ sqshrn(vf, rd, rn, right_shift);
+ break;
+ case NEON_SQRSHRN_scalar:
+ sqrshrn(vf, rd, rn, right_shift);
+ break;
+ case NEON_SQSHRUN_scalar:
+ sqshrun(vf, rd, rn, right_shift);
+ break;
+ case NEON_SQRSHRUN_scalar:
+ sqrshrun(vf, rd, rn, right_shift);
+ break;
+ case NEON_FCVTZS_imm_scalar:
+ fcvts(vf, rd, rn, FPZero, right_shift);
+ break;
+ case NEON_FCVTZU_imm_scalar:
+ fcvtu(vf, rd, rn, FPZero, right_shift);
+ break;
+ case NEON_SCVTF_imm_scalar:
+ scvtf(vf, rd, rn, right_shift, fpcr_rounding);
+ break;
+ case NEON_UCVTF_imm_scalar:
+ ucvtf(vf, rd, rn, right_shift, fpcr_rounding);
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+void Simulator::VisitNEONShiftImmediate(Instruction* instr) {
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode());
+
+ // 00010->8B, 00011->16B, 001x0->4H, 001x1->8H,
+ // 01xx0->2S, 01xx1->4S, 1xxx1->2D, all others undefined.
+ static const NEONFormatMap map = {
+ {22, 21, 20, 19, 30},
+ {NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_4H, NF_8H,
+ NF_2S, NF_4S, NF_2S, NF_4S, NF_2S, NF_4S, NF_2S, NF_4S,
+ NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D,
+ NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D}};
+ NEONFormatDecoder nfd(instr, &map);
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ // 0001->8H, 001x->4S, 01xx->2D, all others undefined.
+ static const NEONFormatMap map_l = {
+ {22, 21, 20, 19},
+ {NF_UNDEF, NF_8H, NF_4S, NF_4S, NF_2D, NF_2D, NF_2D, NF_2D}};
+ VectorFormat vf_l = nfd.GetVectorFormat(&map_l);
+
+ int highestSetBit = HighestSetBitPosition(instr->ImmNEONImmh());
+ int immhimmb = instr->ImmNEONImmhImmb();
+ int right_shift = (16 << highestSetBit) - immhimmb;
+ int left_shift = immhimmb - (8 << highestSetBit);
+
+ switch (instr->Mask(NEONShiftImmediateMask)) {
+ case NEON_SHL:
+ shl(vf, rd, rn, left_shift);
+ break;
+ case NEON_SLI:
+ sli(vf, rd, rn, left_shift);
+ break;
+ case NEON_SQSHLU:
+ sqshlu(vf, rd, rn, left_shift);
+ break;
+ case NEON_SRI:
+ sri(vf, rd, rn, right_shift);
+ break;
+ case NEON_SSHR:
+ sshr(vf, rd, rn, right_shift);
+ break;
+ case NEON_USHR:
+ ushr(vf, rd, rn, right_shift);
+ break;
+ case NEON_SRSHR:
+ sshr(vf, rd, rn, right_shift).Round(vf);
+ break;
+ case NEON_URSHR:
+ ushr(vf, rd, rn, right_shift).Round(vf);
+ break;
+ case NEON_SSRA:
+ ssra(vf, rd, rn, right_shift);
+ break;
+ case NEON_USRA:
+ usra(vf, rd, rn, right_shift);
+ break;
+ case NEON_SRSRA:
+ srsra(vf, rd, rn, right_shift);
+ break;
+ case NEON_URSRA:
+ ursra(vf, rd, rn, right_shift);
+ break;
+ case NEON_SQSHL_imm:
+ sqshl(vf, rd, rn, left_shift);
+ break;
+ case NEON_UQSHL_imm:
+ uqshl(vf, rd, rn, left_shift);
+ break;
+ case NEON_SCVTF_imm:
+ scvtf(vf, rd, rn, right_shift, fpcr_rounding);
+ break;
+ case NEON_UCVTF_imm:
+ ucvtf(vf, rd, rn, right_shift, fpcr_rounding);
+ break;
+ case NEON_FCVTZS_imm:
+ fcvts(vf, rd, rn, FPZero, right_shift);
+ break;
+ case NEON_FCVTZU_imm:
+ fcvtu(vf, rd, rn, FPZero, right_shift);
+ break;
+ case NEON_SSHLL:
+ vf = vf_l;
+ if (instr->Mask(NEON_Q)) {
+ sshll2(vf, rd, rn, left_shift);
+ } else {
+ sshll(vf, rd, rn, left_shift);
+ }
+ break;
+ case NEON_USHLL:
+ vf = vf_l;
+ if (instr->Mask(NEON_Q)) {
+ ushll2(vf, rd, rn, left_shift);
+ } else {
+ ushll(vf, rd, rn, left_shift);
+ }
+ break;
+ case NEON_SHRN:
+ if (instr->Mask(NEON_Q)) {
+ shrn2(vf, rd, rn, right_shift);
+ } else {
+ shrn(vf, rd, rn, right_shift);
+ }
+ break;
+ case NEON_RSHRN:
+ if (instr->Mask(NEON_Q)) {
+ rshrn2(vf, rd, rn, right_shift);
+ } else {
+ rshrn(vf, rd, rn, right_shift);
+ }
+ break;
+ case NEON_UQSHRN:
+ if (instr->Mask(NEON_Q)) {
+ uqshrn2(vf, rd, rn, right_shift);
+ } else {
+ uqshrn(vf, rd, rn, right_shift);
+ }
+ break;
+ case NEON_UQRSHRN:
+ if (instr->Mask(NEON_Q)) {
+ uqrshrn2(vf, rd, rn, right_shift);
+ } else {
+ uqrshrn(vf, rd, rn, right_shift);
+ }
+ break;
+ case NEON_SQSHRN:
+ if (instr->Mask(NEON_Q)) {
+ sqshrn2(vf, rd, rn, right_shift);
+ } else {
+ sqshrn(vf, rd, rn, right_shift);
+ }
+ break;
+ case NEON_SQRSHRN:
+ if (instr->Mask(NEON_Q)) {
+ sqrshrn2(vf, rd, rn, right_shift);
+ } else {
+ sqrshrn(vf, rd, rn, right_shift);
+ }
+ break;
+ case NEON_SQSHRUN:
+ if (instr->Mask(NEON_Q)) {
+ sqshrun2(vf, rd, rn, right_shift);
+ } else {
+ sqshrun(vf, rd, rn, right_shift);
+ }
+ break;
+ case NEON_SQRSHRUN:
+ if (instr->Mask(NEON_Q)) {
+ sqrshrun2(vf, rd, rn, right_shift);
+ } else {
+ sqrshrun(vf, rd, rn, right_shift);
+ }
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+void Simulator::VisitNEONTable(Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ SimVRegister& rn2 = vreg((instr->Rn() + 1) % kNumberOfVRegisters);
+ SimVRegister& rn3 = vreg((instr->Rn() + 2) % kNumberOfVRegisters);
+ SimVRegister& rn4 = vreg((instr->Rn() + 3) % kNumberOfVRegisters);
+ SimVRegister& rm = vreg(instr->Rm());
+
+ switch (instr->Mask(NEONTableMask)) {
+ case NEON_TBL_1v:
+ tbl(vf, rd, rn, rm);
+ break;
+ case NEON_TBL_2v:
+ tbl(vf, rd, rn, rn2, rm);
+ break;
+ case NEON_TBL_3v:
+ tbl(vf, rd, rn, rn2, rn3, rm);
+ break;
+ case NEON_TBL_4v:
+ tbl(vf, rd, rn, rn2, rn3, rn4, rm);
+ break;
+ case NEON_TBX_1v:
+ tbx(vf, rd, rn, rm);
+ break;
+ case NEON_TBX_2v:
+ tbx(vf, rd, rn, rn2, rm);
+ break;
+ case NEON_TBX_3v:
+ tbx(vf, rd, rn, rn2, rn3, rm);
+ break;
+ case NEON_TBX_4v:
+ tbx(vf, rd, rn, rn2, rn3, rn4, rm);
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+void Simulator::VisitNEONPerm(Instruction* instr) {
+ NEONFormatDecoder nfd(instr);
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ SimVRegister& rm = vreg(instr->Rm());
+
+ switch (instr->Mask(NEONPermMask)) {
+ case NEON_TRN1:
+ trn1(vf, rd, rn, rm);
+ break;
+ case NEON_TRN2:
+ trn2(vf, rd, rn, rm);
+ break;
+ case NEON_UZP1:
+ uzp1(vf, rd, rn, rm);
+ break;
+ case NEON_UZP2:
+ uzp2(vf, rd, rn, rm);
+ break;
+ case NEON_ZIP1:
+ zip1(vf, rd, rn, rm);
+ break;
+ case NEON_ZIP2:
+ zip2(vf, rd, rn, rm);
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+}
void Simulator::DoPrintf(Instruction* instr) {
DCHECK((instr->Mask(ExceptionMask) == HLT) &&
@@ -4035,7 +5875,7 @@ void Simulator::LocalMonitor::Clear() {
size_ = TransactionSize::None;
}
-void Simulator::LocalMonitor::NotifyLoad(uintptr_t addr) {
+void Simulator::LocalMonitor::NotifyLoad() {
if (access_state_ == MonitorAccess::Exclusive) {
// A non exclusive load could clear the local monitor. As a result, it's
// most strict to unconditionally clear the local monitor on load.
@@ -4050,7 +5890,7 @@ void Simulator::LocalMonitor::NotifyLoadExcl(uintptr_t addr,
size_ = size;
}
-void Simulator::LocalMonitor::NotifyStore(uintptr_t addr) {
+void Simulator::LocalMonitor::NotifyStore() {
if (access_state_ == MonitorAccess::Exclusive) {
// A non exclusive store could clear the local monitor. As a result, it's
// most strict to unconditionally clear the local monitor on store.
@@ -4098,7 +5938,7 @@ void Simulator::GlobalMonitor::Processor::NotifyLoadExcl_Locked(
}
void Simulator::GlobalMonitor::Processor::NotifyStore_Locked(
- uintptr_t addr, bool is_requesting_processor) {
+ bool is_requesting_processor) {
if (access_state_ == MonitorAccess::Exclusive) {
// A non exclusive store could clear the global monitor. As a result, it's
// most strict to unconditionally clear global monitors on store.
@@ -4144,12 +5984,11 @@ void Simulator::GlobalMonitor::NotifyLoadExcl_Locked(uintptr_t addr,
PrependProcessor_Locked(processor);
}
-void Simulator::GlobalMonitor::NotifyStore_Locked(uintptr_t addr,
- Processor* processor) {
+void Simulator::GlobalMonitor::NotifyStore_Locked(Processor* processor) {
// Notify each processor of the store operation.
for (Processor* iter = head_; iter; iter = iter->next_) {
bool is_requesting_processor = iter == processor;
- iter->NotifyStore_Locked(addr, is_requesting_processor);
+ iter->NotifyStore_Locked(is_requesting_processor);
}
}
diff --git a/deps/v8/src/arm64/simulator-arm64.h b/deps/v8/src/arm64/simulator-arm64.h
index 48fc1c7bc6..c82bdd8c7a 100644
--- a/deps/v8/src/arm64/simulator-arm64.h
+++ b/deps/v8/src/arm64/simulator-arm64.h
@@ -67,6 +67,239 @@ class SimulatorStack : public v8::internal::AllStatic {
#else // !defined(USE_SIMULATOR)
+// Assemble the specified IEEE-754 components into the target type and apply
+// appropriate rounding.
+// sign: 0 = positive, 1 = negative
+// exponent: Unbiased IEEE-754 exponent.
+// mantissa: The mantissa of the input. The top bit (which is not encoded for
+// normal IEEE-754 values) must not be omitted. This bit has the
+// value 'pow(2, exponent)'.
+//
+// The input value is assumed to be a normalized value. That is, the input may
+// not be infinity or NaN. If the source value is subnormal, it must be
+// normalized before calling this function such that the highest set bit in the
+// mantissa has the value 'pow(2, exponent)'.
+//
+// Callers should use FPRoundToFloat or FPRoundToDouble directly, rather than
+// calling a templated FPRound.
+template <class T, int ebits, int mbits>
+T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
+ FPRounding round_mode) {
+ static_assert((sizeof(T) * 8) >= (1 + ebits + mbits),
+ "destination type T not large enough");
+ static_assert(sizeof(T) <= sizeof(uint64_t),
+ "maximum size of destination type T is 64 bits");
+ static_assert(std::is_unsigned<T>::value,
+ "destination type T must be unsigned");
+
+ DCHECK((sign == 0) || (sign == 1));
+
+ // Only FPTieEven and FPRoundOdd rounding modes are implemented.
+ DCHECK((round_mode == FPTieEven) || (round_mode == FPRoundOdd));
+
+ // Rounding can promote subnormals to normals, and normals to infinities. For
+ // example, a double with exponent 127 (FLT_MAX_EXP) would appear to be
+ // encodable as a float, but rounding based on the low-order mantissa bits
+ // could make it overflow. With ties-to-even rounding, this value would become
+ // an infinity.
+
+ // ---- Rounding Method ----
+ //
+ // The exponent is irrelevant in the rounding operation, so we treat the
+ // lowest-order bit that will fit into the result ('onebit') as having
+ // the value '1'. Similarly, the highest-order bit that won't fit into
+ // the result ('halfbit') has the value '0.5'. The 'point' sits between
+ // 'onebit' and 'halfbit':
+ //
+ // These bits fit into the result.
+ // |---------------------|
+ // mantissa = 0bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+ // ||
+ // / |
+ // / halfbit
+ // onebit
+ //
+ // For subnormal outputs, the range of representable bits is smaller and
+ // the position of onebit and halfbit depends on the exponent of the
+ // input, but the method is otherwise similar.
+ //
+ // onebit(frac)
+ // |
+ // | halfbit(frac) halfbit(adjusted)
+ // | / /
+ // | | |
+ // 0b00.0 (exact) -> 0b00.0 (exact) -> 0b00
+ // 0b00.0... -> 0b00.0... -> 0b00
+ // 0b00.1 (exact) -> 0b00.0111..111 -> 0b00
+ // 0b00.1... -> 0b00.1... -> 0b01
+ // 0b01.0 (exact) -> 0b01.0 (exact) -> 0b01
+ // 0b01.0... -> 0b01.0... -> 0b01
+ // 0b01.1 (exact) -> 0b01.1 (exact) -> 0b10
+ // 0b01.1... -> 0b01.1... -> 0b10
+ // 0b10.0 (exact) -> 0b10.0 (exact) -> 0b10
+ // 0b10.0... -> 0b10.0... -> 0b10
+ // 0b10.1 (exact) -> 0b10.0111..111 -> 0b10
+ // 0b10.1... -> 0b10.1... -> 0b11
+ // 0b11.0 (exact) -> 0b11.0 (exact) -> 0b11
+ // ... / | / |
+ // / | / |
+ // / |
+ // adjusted = frac - (halfbit(mantissa) & ~onebit(frac)); / |
+ //
+ // mantissa = (mantissa >> shift) + halfbit(adjusted);
+
+ const int mantissa_offset = 0;
+ const int exponent_offset = mantissa_offset + mbits;
+ const int sign_offset = exponent_offset + ebits;
+ DCHECK_EQ(sign_offset, static_cast<int>(sizeof(T) * 8 - 1));
+
+ // Bail out early for zero inputs.
+ if (mantissa == 0) {
+ return static_cast<T>(sign << sign_offset);
+ }
+
+ // If all bits in the exponent are set, the value is infinite or NaN.
+ // This is true for all binary IEEE-754 formats.
+ const int infinite_exponent = (1 << ebits) - 1;
+ const int max_normal_exponent = infinite_exponent - 1;
+
+ // Apply the exponent bias to encode it for the result. Doing this early makes
+ // it easy to detect values that will be infinite or subnormal.
+ exponent += max_normal_exponent >> 1;
+
+ if (exponent > max_normal_exponent) {
+ // Overflow: the input is too large for the result type to represent.
+ if (round_mode == FPTieEven) {
+ // FPTieEven rounding mode handles overflows using infinities.
+ exponent = infinite_exponent;
+ mantissa = 0;
+ } else {
+ DCHECK_EQ(round_mode, FPRoundOdd);
+ // FPRoundOdd rounding mode handles overflows using the largest magnitude
+ // normal number.
+ exponent = max_normal_exponent;
+ mantissa = (UINT64_C(1) << exponent_offset) - 1;
+ }
+ return static_cast<T>((sign << sign_offset) |
+ (exponent << exponent_offset) |
+ (mantissa << mantissa_offset));
+ }
+
+ // Calculate the shift required to move the top mantissa bit to the proper
+ // place in the destination type.
+ const int highest_significant_bit = 63 - CountLeadingZeros(mantissa, 64);
+ int shift = highest_significant_bit - mbits;
+
+ if (exponent <= 0) {
+ // The output will be subnormal (before rounding).
+ // For subnormal outputs, the shift must be adjusted by the exponent. The +1
+ // is necessary because the exponent of a subnormal value (encoded as 0) is
+ // the same as the exponent of the smallest normal value (encoded as 1).
+ shift += -exponent + 1;
+
+ // Handle inputs that would produce a zero output.
+ //
+ // Shifts higher than highest_significant_bit+1 will always produce a zero
+ // result. A shift of exactly highest_significant_bit+1 might produce a
+ // non-zero result after rounding.
+ if (shift > (highest_significant_bit + 1)) {
+ if (round_mode == FPTieEven) {
+ // The result will always be +/-0.0.
+ return static_cast<T>(sign << sign_offset);
+ } else {
+ DCHECK_EQ(round_mode, FPRoundOdd);
+ DCHECK_NE(mantissa, 0U);
+ // For FPRoundOdd, if the mantissa is too small to represent and
+ // non-zero return the next "odd" value.
+ return static_cast<T>((sign << sign_offset) | 1);
+ }
+ }
+
+ // Properly encode the exponent for a subnormal output.
+ exponent = 0;
+ } else {
+ // Clear the topmost mantissa bit, since this is not encoded in IEEE-754
+ // normal values.
+ mantissa &= ~(UINT64_C(1) << highest_significant_bit);
+ }
+
+ if (shift > 0) {
+ if (round_mode == FPTieEven) {
+ // We have to shift the mantissa to the right. Some precision is lost, so
+ // we need to apply rounding.
+ uint64_t onebit_mantissa = (mantissa >> (shift)) & 1;
+ uint64_t halfbit_mantissa = (mantissa >> (shift - 1)) & 1;
+ uint64_t adjustment = (halfbit_mantissa & ~onebit_mantissa);
+ uint64_t adjusted = mantissa - adjustment;
+ T halfbit_adjusted = (adjusted >> (shift - 1)) & 1;
+
+ T result =
+ static_cast<T>((sign << sign_offset) | (exponent << exponent_offset) |
+ ((mantissa >> shift) << mantissa_offset));
+
+ // A very large mantissa can overflow during rounding. If this happens,
+ // the exponent should be incremented and the mantissa set to 1.0
+ // (encoded as 0). Applying halfbit_adjusted after assembling the float
+ // has the nice side-effect that this case is handled for free.
+ //
+ // This also handles cases where a very large finite value overflows to
+ // infinity, or where a very large subnormal value overflows to become
+ // normal.
+ return result + halfbit_adjusted;
+ } else {
+ DCHECK_EQ(round_mode, FPRoundOdd);
+ // If any bits at position halfbit or below are set, onebit (ie. the
+ // bottom bit of the resulting mantissa) must be set.
+ uint64_t fractional_bits = mantissa & ((UINT64_C(1) << shift) - 1);
+ if (fractional_bits != 0) {
+ mantissa |= UINT64_C(1) << shift;
+ }
+
+ return static_cast<T>((sign << sign_offset) |
+ (exponent << exponent_offset) |
+ ((mantissa >> shift) << mantissa_offset));
+ }
+ } else {
+ // We have to shift the mantissa to the left (or not at all). The input
+ // mantissa is exactly representable in the output mantissa, so apply no
+ // rounding correction.
+ return static_cast<T>((sign << sign_offset) |
+ (exponent << exponent_offset) |
+ ((mantissa << -shift) << mantissa_offset));
+ }
+}
+
+// Representation of memory, with typed getters and setters for access.
+class SimMemory {
+ public:
+ template <typename T>
+ static T AddressUntag(T address) {
+ // Cast the address using a C-style cast. A reinterpret_cast would be
+ // appropriate, but it can't cast one integral type to another.
+ uint64_t bits = (uint64_t)address;
+ return (T)(bits & ~kAddressTagMask);
+ }
+
+ template <typename T, typename A>
+ static T Read(A address) {
+ T value;
+ address = AddressUntag(address);
+ DCHECK((sizeof(value) == 1) || (sizeof(value) == 2) ||
+ (sizeof(value) == 4) || (sizeof(value) == 8) ||
+ (sizeof(value) == 16));
+ memcpy(&value, reinterpret_cast<const char*>(address), sizeof(value));
+ return value;
+ }
+
+ template <typename T, typename A>
+ static void Write(A address, T value) {
+ address = AddressUntag(address);
+ DCHECK((sizeof(value) == 1) || (sizeof(value) == 2) ||
+ (sizeof(value) == 4) || (sizeof(value) == 8) ||
+ (sizeof(value) == 16));
+ memcpy(reinterpret_cast<char*>(address), &value, sizeof(value));
+ }
+};
// The proper way to initialize a simulated system register (such as NZCV) is as
// follows:
@@ -122,29 +355,330 @@ class SimSystemRegister {
// Represent a register (r0-r31, v0-v31).
+template <int kSizeInBytes>
class SimRegisterBase {
public:
template<typename T>
void Set(T new_value) {
- value_ = 0;
+ static_assert(sizeof(new_value) <= kSizeInBytes,
+ "Size of new_value must be <= size of template type.");
+ if (sizeof(new_value) < kSizeInBytes) {
+ // All AArch64 registers are zero-extending.
+ memset(value_ + sizeof(new_value), 0, kSizeInBytes - sizeof(new_value));
+ }
memcpy(&value_, &new_value, sizeof(T));
+ NotifyRegisterWrite();
}
- template<typename T>
- T Get() const {
+ // Insert a typed value into a register, leaving the rest of the register
+ // unchanged. The lane parameter indicates where in the register the value
+ // should be inserted, in the range [ 0, sizeof(value_) / sizeof(T) ), where
+ // 0 represents the least significant bits.
+ template <typename T>
+ void Insert(int lane, T new_value) {
+ DCHECK_GE(lane, 0);
+ DCHECK_LE(sizeof(new_value) + (lane * sizeof(new_value)),
+ static_cast<unsigned>(kSizeInBytes));
+ memcpy(&value_[lane * sizeof(new_value)], &new_value, sizeof(new_value));
+ NotifyRegisterWrite();
+ }
+
+ template <typename T>
+ T Get(int lane = 0) const {
T result;
- memcpy(&result, &value_, sizeof(T));
+ DCHECK_GE(lane, 0);
+ DCHECK_LE(sizeof(result) + (lane * sizeof(result)),
+ static_cast<unsigned>(kSizeInBytes));
+ memcpy(&result, &value_[lane * sizeof(result)], sizeof(result));
return result;
}
+ // TODO(all): Make this return a map of updated bytes, so that we can
+ // highlight updated lanes for load-and-insert. (That never happens for scalar
+ // code, but NEON has some instructions that can update individual lanes.)
+ bool WrittenSinceLastLog() const { return written_since_last_log_; }
+
+ void NotifyRegisterLogged() { written_since_last_log_ = false; }
+
protected:
- int64_t value_;
+ uint8_t value_[kSizeInBytes];
+
+ // Helpers to aid with register tracing.
+ bool written_since_last_log_;
+
+ void NotifyRegisterWrite() { written_since_last_log_ = true; }
};
+typedef SimRegisterBase<kXRegSize> SimRegister; // r0-r31
+typedef SimRegisterBase<kQRegSize> SimVRegister; // v0-v31
+
+// Representation of a vector register, with typed getters and setters for lanes
+// and additional information to represent lane state.
+class LogicVRegister {
+ public:
+ inline LogicVRegister(SimVRegister& other) // NOLINT
+ : register_(other) {
+ for (unsigned i = 0; i < arraysize(saturated_); i++) {
+ saturated_[i] = kNotSaturated;
+ }
+ for (unsigned i = 0; i < arraysize(round_); i++) {
+ round_[i] = false;
+ }
+ }
+
+ int64_t Int(VectorFormat vform, int index) const {
+ int64_t element;
+ switch (LaneSizeInBitsFromFormat(vform)) {
+ case 8:
+ element = register_.Get<int8_t>(index);
+ break;
+ case 16:
+ element = register_.Get<int16_t>(index);
+ break;
+ case 32:
+ element = register_.Get<int32_t>(index);
+ break;
+ case 64:
+ element = register_.Get<int64_t>(index);
+ break;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+ return element;
+ }
+
+ uint64_t Uint(VectorFormat vform, int index) const {
+ uint64_t element;
+ switch (LaneSizeInBitsFromFormat(vform)) {
+ case 8:
+ element = register_.Get<uint8_t>(index);
+ break;
+ case 16:
+ element = register_.Get<uint16_t>(index);
+ break;
+ case 32:
+ element = register_.Get<uint32_t>(index);
+ break;
+ case 64:
+ element = register_.Get<uint64_t>(index);
+ break;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+ return element;
+ }
+
+ uint64_t UintLeftJustified(VectorFormat vform, int index) const {
+ return Uint(vform, index) << (64 - LaneSizeInBitsFromFormat(vform));
+ }
+
+ int64_t IntLeftJustified(VectorFormat vform, int index) const {
+ uint64_t value = UintLeftJustified(vform, index);
+ int64_t result;
+ memcpy(&result, &value, sizeof(result));
+ return result;
+ }
+
+ void SetInt(VectorFormat vform, int index, int64_t value) const {
+ switch (LaneSizeInBitsFromFormat(vform)) {
+ case 8:
+ register_.Insert(index, static_cast<int8_t>(value));
+ break;
+ case 16:
+ register_.Insert(index, static_cast<int16_t>(value));
+ break;
+ case 32:
+ register_.Insert(index, static_cast<int32_t>(value));
+ break;
+ case 64:
+ register_.Insert(index, static_cast<int64_t>(value));
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ }
+
+ void SetIntArray(VectorFormat vform, const int64_t* src) const {
+ ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ SetInt(vform, i, src[i]);
+ }
+ }
+
+ void SetUint(VectorFormat vform, int index, uint64_t value) const {
+ switch (LaneSizeInBitsFromFormat(vform)) {
+ case 8:
+ register_.Insert(index, static_cast<uint8_t>(value));
+ break;
+ case 16:
+ register_.Insert(index, static_cast<uint16_t>(value));
+ break;
+ case 32:
+ register_.Insert(index, static_cast<uint32_t>(value));
+ break;
+ case 64:
+ register_.Insert(index, static_cast<uint64_t>(value));
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ }
+
+ void SetUintArray(VectorFormat vform, const uint64_t* src) const {
+ ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ SetUint(vform, i, src[i]);
+ }
+ }
+
+ void ReadUintFromMem(VectorFormat vform, int index, uint64_t addr) const;
+
+ void WriteUintToMem(VectorFormat vform, int index, uint64_t addr) const;
+
+ template <typename T>
+ T Float(int index) const {
+ return register_.Get<T>(index);
+ }
+
+ template <typename T>
+ void SetFloat(int index, T value) const {
+ register_.Insert(index, value);
+ }
+
+ // When setting a result in a register of size less than Q, the top bits of
+ // the Q register must be cleared.
+ void ClearForWrite(VectorFormat vform) const {
+ unsigned size = RegisterSizeInBytesFromFormat(vform);
+ for (unsigned i = size; i < kQRegSize; i++) {
+ SetUint(kFormat16B, i, 0);
+ }
+ }
-typedef SimRegisterBase SimRegister; // r0-r31
-typedef SimRegisterBase SimFPRegister; // v0-v31
+ // Saturation state for each lane of a vector.
+ enum Saturation {
+ kNotSaturated = 0,
+ kSignedSatPositive = 1 << 0,
+ kSignedSatNegative = 1 << 1,
+ kSignedSatMask = kSignedSatPositive | kSignedSatNegative,
+ kSignedSatUndefined = kSignedSatMask,
+ kUnsignedSatPositive = 1 << 2,
+ kUnsignedSatNegative = 1 << 3,
+ kUnsignedSatMask = kUnsignedSatPositive | kUnsignedSatNegative,
+ kUnsignedSatUndefined = kUnsignedSatMask
+ };
+
+ // Getters for saturation state.
+ Saturation GetSignedSaturation(int index) {
+ return static_cast<Saturation>(saturated_[index] & kSignedSatMask);
+ }
+
+ Saturation GetUnsignedSaturation(int index) {
+ return static_cast<Saturation>(saturated_[index] & kUnsignedSatMask);
+ }
+
+ // Setters for saturation state.
+ void ClearSat(int index) { saturated_[index] = kNotSaturated; }
+
+ void SetSignedSat(int index, bool positive) {
+ SetSatFlag(index, positive ? kSignedSatPositive : kSignedSatNegative);
+ }
+ void SetUnsignedSat(int index, bool positive) {
+ SetSatFlag(index, positive ? kUnsignedSatPositive : kUnsignedSatNegative);
+ }
+
+ void SetSatFlag(int index, Saturation sat) {
+ saturated_[index] = static_cast<Saturation>(saturated_[index] | sat);
+ DCHECK_NE(sat & kUnsignedSatMask, kUnsignedSatUndefined);
+ DCHECK_NE(sat & kSignedSatMask, kSignedSatUndefined);
+ }
+
+ // Saturate lanes of a vector based on saturation state.
+ LogicVRegister& SignedSaturate(VectorFormat vform) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ Saturation sat = GetSignedSaturation(i);
+ if (sat == kSignedSatPositive) {
+ SetInt(vform, i, MaxIntFromFormat(vform));
+ } else if (sat == kSignedSatNegative) {
+ SetInt(vform, i, MinIntFromFormat(vform));
+ }
+ }
+ return *this;
+ }
+
+ LogicVRegister& UnsignedSaturate(VectorFormat vform) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ Saturation sat = GetUnsignedSaturation(i);
+ if (sat == kUnsignedSatPositive) {
+ SetUint(vform, i, MaxUintFromFormat(vform));
+ } else if (sat == kUnsignedSatNegative) {
+ SetUint(vform, i, 0);
+ }
+ }
+ return *this;
+ }
+
+ // Getter for rounding state.
+ bool GetRounding(int index) { return round_[index]; }
+
+ // Setter for rounding state.
+ void SetRounding(int index, bool round) { round_[index] = round; }
+
+ // Round lanes of a vector based on rounding state.
+ LogicVRegister& Round(VectorFormat vform) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ SetUint(vform, i, Uint(vform, i) + (GetRounding(i) ? 1 : 0));
+ }
+ return *this;
+ }
+
+ // Unsigned halve lanes of a vector, and use the saturation state to set the
+ // top bit.
+ LogicVRegister& Uhalve(VectorFormat vform) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t val = Uint(vform, i);
+ SetRounding(i, (val & 1) == 1);
+ val >>= 1;
+ if (GetUnsignedSaturation(i) != kNotSaturated) {
+ // If the operation causes unsigned saturation, the bit shifted into the
+ // most significant bit must be set.
+ val |= (MaxUintFromFormat(vform) >> 1) + 1;
+ }
+ SetInt(vform, i, val);
+ }
+ return *this;
+ }
+
+ // Signed halve lanes of a vector, and use the carry state to set the top bit.
+ LogicVRegister& Halve(VectorFormat vform) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ int64_t val = Int(vform, i);
+ SetRounding(i, (val & 1) == 1);
+ val >>= 1;
+ if (GetSignedSaturation(i) != kNotSaturated) {
+ // If the operation causes signed saturation, the sign bit must be
+ // inverted.
+ val ^= (MaxUintFromFormat(vform) >> 1) + 1;
+ }
+ SetInt(vform, i, val);
+ }
+ return *this;
+ }
+
+ private:
+ SimVRegister& register_;
+
+ // Allocate one saturation state entry per lane; largest register is type Q,
+ // and lanes can be a minimum of one byte wide.
+ Saturation saturated_[kQRegSize];
+
+ // Allocate one rounding state entry per lane.
+ bool round_[kQRegSize];
+};
class Simulator : public DecoderVisitor {
public:
@@ -311,6 +845,7 @@ class Simulator : public DecoderVisitor {
CheckBreakNext();
Decode(pc_);
increment_pc();
+ LogAllWrittenRegisters();
CheckBreakpoints();
}
@@ -329,7 +864,7 @@ class Simulator : public DecoderVisitor {
//
template<typename T>
T reg(unsigned code, Reg31Mode r31mode = Reg31IsZeroRegister) const {
- DCHECK(code < kNumberOfRegisters);
+ DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters));
if (IsZeroRegister(code, r31mode)) {
return 0;
}
@@ -345,6 +880,8 @@ class Simulator : public DecoderVisitor {
return reg<int64_t>(code, r31mode);
}
+ enum RegLogMode { LogRegWrites, NoRegLog };
+
// Write 'value' into an integer register. The value is zero-extended. This
// behaviour matches AArch64 register writes.
template<typename T>
@@ -369,7 +906,7 @@ class Simulator : public DecoderVisitor {
template <typename T>
void set_reg_no_log(unsigned code, T value,
Reg31Mode r31mode = Reg31IsZeroRegister) {
- DCHECK(code < kNumberOfRegisters);
+ DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters));
if (!IsZeroRegister(code, r31mode)) {
registers_[code].Set(value);
}
@@ -388,16 +925,39 @@ class Simulator : public DecoderVisitor {
// Commonly-used special cases.
template<typename T>
void set_lr(T value) {
- DCHECK(sizeof(T) == kPointerSize);
+ DCHECK_EQ(sizeof(T), static_cast<unsigned>(kPointerSize));
set_reg(kLinkRegCode, value);
}
template<typename T>
void set_sp(T value) {
- DCHECK(sizeof(T) == kPointerSize);
+ DCHECK_EQ(sizeof(T), static_cast<unsigned>(kPointerSize));
set_reg(31, value, Reg31IsStackPointer);
}
+ // Vector register accessors.
+ // These are equivalent to the integer register accessors, but for vector
+ // registers.
+
+ // A structure for representing a 128-bit Q register.
+ struct qreg_t {
+ uint8_t val[kQRegSize];
+ };
+
+ // Basic accessor: read the register as the specified type.
+ template <typename T>
+ T vreg(unsigned code) const {
+ static_assert((sizeof(T) == kBRegSize) || (sizeof(T) == kHRegSize) ||
+ (sizeof(T) == kSRegSize) || (sizeof(T) == kDRegSize) ||
+ (sizeof(T) == kQRegSize),
+ "Template type must match size of register.");
+ DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
+
+ return vregisters_[code].Get<T>();
+ }
+
+ inline SimVRegister& vreg(unsigned code) { return vregisters_[code]; }
+
int64_t sp() { return xreg(31, Reg31IsStackPointer); }
int64_t jssp() { return xreg(kJSSPCode, Reg31IsStackPointer); }
int64_t fp() {
@@ -407,87 +967,134 @@ class Simulator : public DecoderVisitor {
Address get_sp() const { return reg<Address>(31, Reg31IsStackPointer); }
- template<typename T>
- T fpreg(unsigned code) const {
- DCHECK(code < kNumberOfRegisters);
- return fpregisters_[code].Get<T>();
- }
+ // Common specialized accessors for the vreg() template.
+ uint8_t breg(unsigned code) const { return vreg<uint8_t>(code); }
- // Common specialized accessors for the fpreg() template.
- float sreg(unsigned code) const {
- return fpreg<float>(code);
- }
+ float hreg(unsigned code) const { return vreg<uint16_t>(code); }
- uint32_t sreg_bits(unsigned code) const {
- return fpreg<uint32_t>(code);
- }
+ float sreg(unsigned code) const { return vreg<float>(code); }
- double dreg(unsigned code) const {
- return fpreg<double>(code);
- }
+ uint32_t sreg_bits(unsigned code) const { return vreg<uint32_t>(code); }
- uint64_t dreg_bits(unsigned code) const {
- return fpreg<uint64_t>(code);
- }
+ double dreg(unsigned code) const { return vreg<double>(code); }
+
+ uint64_t dreg_bits(unsigned code) const { return vreg<uint64_t>(code); }
+
+ qreg_t qreg(unsigned code) const { return vreg<qreg_t>(code); }
+
+ // As above, with parameterized size and return type. The value is
+ // either zero-extended or truncated to fit, as required.
+ template <typename T>
+ T vreg(unsigned size, unsigned code) const {
+ uint64_t raw = 0;
+ T result;
- double fpreg(unsigned size, unsigned code) const {
switch (size) {
- case kSRegSizeInBits: return sreg(code);
- case kDRegSizeInBits: return dreg(code);
+ case kSRegSize:
+ raw = vreg<uint32_t>(code);
+ break;
+ case kDRegSize:
+ raw = vreg<uint64_t>(code);
+ break;
default:
UNREACHABLE();
- return 0.0;
}
+
+ static_assert(sizeof(result) <= sizeof(raw),
+ "Template type must be <= 64 bits.");
+ // Copy the result and truncate to fit. This assumes a little-endian host.
+ memcpy(&result, &raw, sizeof(result));
+ return result;
}
// Write 'value' into a floating-point register. The value is zero-extended.
// This behaviour matches AArch64 register writes.
- template<typename T>
- void set_fpreg(unsigned code, T value) {
- set_fpreg_no_log(code, value);
-
- if (sizeof(value) <= kSRegSize) {
- LogFPRegister(code, kPrintSRegValue);
- } else {
- LogFPRegister(code, kPrintDRegValue);
+ template <typename T>
+ void set_vreg(unsigned code, T value, RegLogMode log_mode = LogRegWrites) {
+ static_assert(
+ (sizeof(value) == kBRegSize) || (sizeof(value) == kHRegSize) ||
+ (sizeof(value) == kSRegSize) || (sizeof(value) == kDRegSize) ||
+ (sizeof(value) == kQRegSize),
+ "Template type must match size of register.");
+ DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
+ vregisters_[code].Set(value);
+
+ if (log_mode == LogRegWrites) {
+ LogVRegister(code, GetPrintRegisterFormat(value));
}
}
- // Common specialized accessors for the set_fpreg() template.
- void set_sreg(unsigned code, float value) {
- set_fpreg(code, value);
+ // Common specialized accessors for the set_vreg() template.
+ void set_breg(unsigned code, int8_t value,
+ RegLogMode log_mode = LogRegWrites) {
+ set_vreg(code, value, log_mode);
+ }
+
+ void set_hreg(unsigned code, int16_t value,
+ RegLogMode log_mode = LogRegWrites) {
+ set_vreg(code, value, log_mode);
+ }
+
+ void set_sreg(unsigned code, float value,
+ RegLogMode log_mode = LogRegWrites) {
+ set_vreg(code, value, log_mode);
+ }
+
+ void set_sreg_bits(unsigned code, uint32_t value,
+ RegLogMode log_mode = LogRegWrites) {
+ set_vreg(code, value, log_mode);
}
- void set_sreg_bits(unsigned code, uint32_t value) {
- set_fpreg(code, value);
+ void set_dreg(unsigned code, double value,
+ RegLogMode log_mode = LogRegWrites) {
+ set_vreg(code, value, log_mode);
}
- void set_dreg(unsigned code, double value) {
- set_fpreg(code, value);
+ void set_dreg_bits(unsigned code, uint64_t value,
+ RegLogMode log_mode = LogRegWrites) {
+ set_vreg(code, value, log_mode);
}
- void set_dreg_bits(unsigned code, uint64_t value) {
- set_fpreg(code, value);
+ void set_qreg(unsigned code, qreg_t value,
+ RegLogMode log_mode = LogRegWrites) {
+ set_vreg(code, value, log_mode);
}
// As above, but don't automatically log the register update.
template <typename T>
- void set_fpreg_no_log(unsigned code, T value) {
- DCHECK((sizeof(value) == kDRegSize) || (sizeof(value) == kSRegSize));
- DCHECK(code < kNumberOfFPRegisters);
- fpregisters_[code].Set(value);
+ void set_vreg_no_log(unsigned code, T value) {
+ STATIC_ASSERT((sizeof(value) == kBRegSize) ||
+ (sizeof(value) == kHRegSize) ||
+ (sizeof(value) == kSRegSize) ||
+ (sizeof(value) == kDRegSize) || (sizeof(value) == kQRegSize));
+ DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
+ vregisters_[code].Set(value);
+ }
+
+ void set_breg_no_log(unsigned code, uint8_t value) {
+ set_vreg_no_log(code, value);
+ }
+
+ void set_hreg_no_log(unsigned code, uint16_t value) {
+ set_vreg_no_log(code, value);
}
void set_sreg_no_log(unsigned code, float value) {
- set_fpreg_no_log(code, value);
+ set_vreg_no_log(code, value);
}
void set_dreg_no_log(unsigned code, double value) {
- set_fpreg_no_log(code, value);
+ set_vreg_no_log(code, value);
+ }
+
+ void set_qreg_no_log(unsigned code, qreg_t value) {
+ set_vreg_no_log(code, value);
}
SimSystemRegister& nzcv() { return nzcv_; }
SimSystemRegister& fpcr() { return fpcr_; }
+ FPRounding RMode() { return static_cast<FPRounding>(fpcr_.RMode()); }
+ bool DN() { return fpcr_.DN() != 0; }
// Debug helpers
@@ -514,66 +1121,195 @@ class Simulator : public DecoderVisitor {
// Print all registers of the specified types.
void PrintRegisters();
- void PrintFPRegisters();
+ void PrintVRegisters();
void PrintSystemRegisters();
- // Like Print* (above), but respect log_parameters().
- void LogSystemRegisters() {
- if (log_parameters() & LOG_SYS_REGS) PrintSystemRegisters();
+ // As above, but only print the registers that have been updated.
+ void PrintWrittenRegisters();
+ void PrintWrittenVRegisters();
+
+ // As above, but respect LOG_REG and LOG_VREG.
+ void LogWrittenRegisters() {
+ if (log_parameters() & LOG_REGS) PrintWrittenRegisters();
+ }
+ void LogWrittenVRegisters() {
+ if (log_parameters() & LOG_VREGS) PrintWrittenVRegisters();
+ }
+ void LogAllWrittenRegisters() {
+ LogWrittenRegisters();
+ LogWrittenVRegisters();
+ }
+
+ // Specify relevant register formats for Print(V)Register and related helpers.
+ enum PrintRegisterFormat {
+ // The lane size.
+ kPrintRegLaneSizeB = 0 << 0,
+ kPrintRegLaneSizeH = 1 << 0,
+ kPrintRegLaneSizeS = 2 << 0,
+ kPrintRegLaneSizeW = kPrintRegLaneSizeS,
+ kPrintRegLaneSizeD = 3 << 0,
+ kPrintRegLaneSizeX = kPrintRegLaneSizeD,
+ kPrintRegLaneSizeQ = 4 << 0,
+
+ kPrintRegLaneSizeOffset = 0,
+ kPrintRegLaneSizeMask = 7 << 0,
+
+ // The lane count.
+ kPrintRegAsScalar = 0,
+ kPrintRegAsDVector = 1 << 3,
+ kPrintRegAsQVector = 2 << 3,
+
+ kPrintRegAsVectorMask = 3 << 3,
+
+ // Indicate floating-point format lanes. (This flag is only supported for S-
+ // and D-sized lanes.)
+ kPrintRegAsFP = 1 << 5,
+
+ // Supported combinations.
+
+ kPrintXReg = kPrintRegLaneSizeX | kPrintRegAsScalar,
+ kPrintWReg = kPrintRegLaneSizeW | kPrintRegAsScalar,
+ kPrintSReg = kPrintRegLaneSizeS | kPrintRegAsScalar | kPrintRegAsFP,
+ kPrintDReg = kPrintRegLaneSizeD | kPrintRegAsScalar | kPrintRegAsFP,
+
+ kPrintReg1B = kPrintRegLaneSizeB | kPrintRegAsScalar,
+ kPrintReg8B = kPrintRegLaneSizeB | kPrintRegAsDVector,
+ kPrintReg16B = kPrintRegLaneSizeB | kPrintRegAsQVector,
+ kPrintReg1H = kPrintRegLaneSizeH | kPrintRegAsScalar,
+ kPrintReg4H = kPrintRegLaneSizeH | kPrintRegAsDVector,
+ kPrintReg8H = kPrintRegLaneSizeH | kPrintRegAsQVector,
+ kPrintReg1S = kPrintRegLaneSizeS | kPrintRegAsScalar,
+ kPrintReg2S = kPrintRegLaneSizeS | kPrintRegAsDVector,
+ kPrintReg4S = kPrintRegLaneSizeS | kPrintRegAsQVector,
+ kPrintReg1SFP = kPrintRegLaneSizeS | kPrintRegAsScalar | kPrintRegAsFP,
+ kPrintReg2SFP = kPrintRegLaneSizeS | kPrintRegAsDVector | kPrintRegAsFP,
+ kPrintReg4SFP = kPrintRegLaneSizeS | kPrintRegAsQVector | kPrintRegAsFP,
+ kPrintReg1D = kPrintRegLaneSizeD | kPrintRegAsScalar,
+ kPrintReg2D = kPrintRegLaneSizeD | kPrintRegAsQVector,
+ kPrintReg1DFP = kPrintRegLaneSizeD | kPrintRegAsScalar | kPrintRegAsFP,
+ kPrintReg2DFP = kPrintRegLaneSizeD | kPrintRegAsQVector | kPrintRegAsFP,
+ kPrintReg1Q = kPrintRegLaneSizeQ | kPrintRegAsScalar
+ };
+
+ unsigned GetPrintRegLaneSizeInBytesLog2(PrintRegisterFormat format) {
+ return (format & kPrintRegLaneSizeMask) >> kPrintRegLaneSizeOffset;
}
- void LogRegisters() {
- if (log_parameters() & LOG_REGS) PrintRegisters();
+
+ unsigned GetPrintRegLaneSizeInBytes(PrintRegisterFormat format) {
+ return 1 << GetPrintRegLaneSizeInBytesLog2(format);
}
- void LogFPRegisters() {
- if (log_parameters() & LOG_FP_REGS) PrintFPRegisters();
+
+ unsigned GetPrintRegSizeInBytesLog2(PrintRegisterFormat format) {
+ if (format & kPrintRegAsDVector) return kDRegSizeLog2;
+ if (format & kPrintRegAsQVector) return kQRegSizeLog2;
+
+ // Scalar types.
+ return GetPrintRegLaneSizeInBytesLog2(format);
}
- // Specify relevant register sizes, for PrintFPRegister.
- //
- // These values are bit masks; they can be combined in case multiple views of
- // a machine register are interesting.
- enum PrintFPRegisterSizes {
- kPrintDRegValue = 1 << kDRegSize,
- kPrintSRegValue = 1 << kSRegSize,
- kPrintAllFPRegValues = kPrintDRegValue | kPrintSRegValue
- };
+ unsigned GetPrintRegSizeInBytes(PrintRegisterFormat format) {
+ return 1 << GetPrintRegSizeInBytesLog2(format);
+ }
+
+ unsigned GetPrintRegLaneCount(PrintRegisterFormat format) {
+ unsigned reg_size_log2 = GetPrintRegSizeInBytesLog2(format);
+ unsigned lane_size_log2 = GetPrintRegLaneSizeInBytesLog2(format);
+ DCHECK_GE(reg_size_log2, lane_size_log2);
+ return 1 << (reg_size_log2 - lane_size_log2);
+ }
+
+ template <typename T>
+ PrintRegisterFormat GetPrintRegisterFormat(T value) {
+ return GetPrintRegisterFormatForSize(sizeof(value));
+ }
+
+ PrintRegisterFormat GetPrintRegisterFormat(double value) {
+ static_assert(sizeof(value) == kDRegSize,
+ "D register must be size of double.");
+ return GetPrintRegisterFormatForSizeFP(sizeof(value));
+ }
+
+ PrintRegisterFormat GetPrintRegisterFormat(float value) {
+ static_assert(sizeof(value) == kSRegSize,
+ "S register must be size of float.");
+ return GetPrintRegisterFormatForSizeFP(sizeof(value));
+ }
+
+ PrintRegisterFormat GetPrintRegisterFormat(VectorFormat vform);
+ PrintRegisterFormat GetPrintRegisterFormatFP(VectorFormat vform);
+
+ PrintRegisterFormat GetPrintRegisterFormatForSize(size_t reg_size,
+ size_t lane_size);
+
+ PrintRegisterFormat GetPrintRegisterFormatForSize(size_t size) {
+ return GetPrintRegisterFormatForSize(size, size);
+ }
+
+ PrintRegisterFormat GetPrintRegisterFormatForSizeFP(size_t size) {
+ switch (size) {
+ default:
+ UNREACHABLE();
+ case kDRegSize:
+ return kPrintDReg;
+ case kSRegSize:
+ return kPrintSReg;
+ }
+ }
+
+ PrintRegisterFormat GetPrintRegisterFormatTryFP(PrintRegisterFormat format) {
+ if ((GetPrintRegLaneSizeInBytes(format) == kSRegSize) ||
+ (GetPrintRegLaneSizeInBytes(format) == kDRegSize)) {
+ return static_cast<PrintRegisterFormat>(format | kPrintRegAsFP);
+ }
+ return format;
+ }
// Print individual register values (after update).
void PrintRegister(unsigned code, Reg31Mode r31mode = Reg31IsStackPointer);
- void PrintFPRegister(unsigned code,
- PrintFPRegisterSizes sizes = kPrintAllFPRegValues);
+ void PrintVRegister(unsigned code, PrintRegisterFormat sizes);
void PrintSystemRegister(SystemRegister id);
// Like Print* (above), but respect log_parameters().
void LogRegister(unsigned code, Reg31Mode r31mode = Reg31IsStackPointer) {
if (log_parameters() & LOG_REGS) PrintRegister(code, r31mode);
}
- void LogFPRegister(unsigned code,
- PrintFPRegisterSizes sizes = kPrintAllFPRegValues) {
- if (log_parameters() & LOG_FP_REGS) PrintFPRegister(code, sizes);
+ void LogVRegister(unsigned code, PrintRegisterFormat format) {
+ if (log_parameters() & LOG_VREGS) PrintVRegister(code, format);
}
void LogSystemRegister(SystemRegister id) {
if (log_parameters() & LOG_SYS_REGS) PrintSystemRegister(id);
}
// Print memory accesses.
- void PrintRead(uintptr_t address, size_t size, unsigned reg_code);
- void PrintReadFP(uintptr_t address, size_t size, unsigned reg_code);
- void PrintWrite(uintptr_t address, size_t size, unsigned reg_code);
- void PrintWriteFP(uintptr_t address, size_t size, unsigned reg_code);
+ void PrintRead(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format);
+ void PrintWrite(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format);
+ void PrintVRead(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format, unsigned lane);
+ void PrintVWrite(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format, unsigned lane);
// Like Print* (above), but respect log_parameters().
- void LogRead(uintptr_t address, size_t size, unsigned reg_code) {
- if (log_parameters() & LOG_REGS) PrintRead(address, size, reg_code);
- }
- void LogReadFP(uintptr_t address, size_t size, unsigned reg_code) {
- if (log_parameters() & LOG_FP_REGS) PrintReadFP(address, size, reg_code);
- }
- void LogWrite(uintptr_t address, size_t size, unsigned reg_code) {
- if (log_parameters() & LOG_WRITE) PrintWrite(address, size, reg_code);
+ void LogRead(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format) {
+ if (log_parameters() & LOG_REGS) PrintRead(address, reg_code, format);
+ }
+ void LogWrite(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format) {
+ if (log_parameters() & LOG_WRITE) PrintWrite(address, reg_code, format);
+ }
+ void LogVRead(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format, unsigned lane = 0) {
+ if (log_parameters() & LOG_VREGS) {
+ PrintVRead(address, reg_code, format, lane);
+ }
}
- void LogWriteFP(uintptr_t address, size_t size, unsigned reg_code) {
- if (log_parameters() & LOG_WRITE) PrintWriteFP(address, size, reg_code);
+ void LogVWrite(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format, unsigned lane = 0) {
+ if (log_parameters() & LOG_WRITE) {
+ PrintVWrite(address, reg_code, format, lane);
+ }
}
int log_parameters() { return log_parameters_; }
@@ -592,6 +1328,14 @@ class Simulator : public DecoderVisitor {
}
}
+ // Helper functions for register tracing.
+ void PrintRegisterRawHelper(unsigned code, Reg31Mode r31mode,
+ int size_in_bytes = kXRegSize);
+ void PrintVRegisterRawHelper(unsigned code, int bytes = kQRegSize,
+ int lsb = 0);
+ void PrintVRegisterFPHelper(unsigned code, unsigned lane_size_in_bytes,
+ int lane_count = 1, int rightmost_lane = 0);
+
static inline const char* WRegNameForCode(unsigned code,
Reg31Mode mode = Reg31IsZeroRegister);
static inline const char* XRegNameForCode(unsigned code,
@@ -639,7 +1383,6 @@ class Simulator : public DecoderVisitor {
return true;
default:
UNREACHABLE();
- return false;
}
}
@@ -666,6 +1409,10 @@ class Simulator : public DecoderVisitor {
void LoadStoreWriteBack(unsigned addr_reg,
int64_t offset,
AddrMode addrmode);
+ void NEONLoadStoreMultiStructHelper(const Instruction* instr,
+ AddrMode addr_mode);
+ void NEONLoadStoreSingleStructHelper(const Instruction* instr,
+ AddrMode addr_mode);
void CheckMemoryAccess(uintptr_t address, uintptr_t stack);
// Memory read helpers.
@@ -673,7 +1420,8 @@ class Simulator : public DecoderVisitor {
T MemoryRead(A address) {
T value;
STATIC_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) ||
- (sizeof(value) == 4) || (sizeof(value) == 8));
+ (sizeof(value) == 4) || (sizeof(value) == 8) ||
+ (sizeof(value) == 16));
memcpy(&value, reinterpret_cast<const void*>(address), sizeof(value));
return value;
}
@@ -682,7 +1430,8 @@ class Simulator : public DecoderVisitor {
template <typename T, typename A>
void MemoryWrite(A address, T value) {
STATIC_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) ||
- (sizeof(value) == 4) || (sizeof(value) == 8));
+ (sizeof(value) == 4) || (sizeof(value) == 8) ||
+ (sizeof(value) == 16));
memcpy(reinterpret_cast<void*>(address), &value, sizeof(value));
}
@@ -700,14 +1449,652 @@ class Simulator : public DecoderVisitor {
void DataProcessing2Source(Instruction* instr);
template <typename T>
void BitfieldHelper(Instruction* instr);
+ uint16_t PolynomialMult(uint8_t op1, uint8_t op2);
+
+ void ld1(VectorFormat vform, LogicVRegister dst, uint64_t addr);
+ void ld1(VectorFormat vform, LogicVRegister dst, int index, uint64_t addr);
+ void ld1r(VectorFormat vform, LogicVRegister dst, uint64_t addr);
+ void ld2(VectorFormat vform, LogicVRegister dst1, LogicVRegister dst2,
+ uint64_t addr);
+ void ld2(VectorFormat vform, LogicVRegister dst1, LogicVRegister dst2,
+ int index, uint64_t addr);
+ void ld2r(VectorFormat vform, LogicVRegister dst1, LogicVRegister dst2,
+ uint64_t addr);
+ void ld3(VectorFormat vform, LogicVRegister dst1, LogicVRegister dst2,
+ LogicVRegister dst3, uint64_t addr);
+ void ld3(VectorFormat vform, LogicVRegister dst1, LogicVRegister dst2,
+ LogicVRegister dst3, int index, uint64_t addr);
+ void ld3r(VectorFormat vform, LogicVRegister dst1, LogicVRegister dst2,
+ LogicVRegister dst3, uint64_t addr);
+ void ld4(VectorFormat vform, LogicVRegister dst1, LogicVRegister dst2,
+ LogicVRegister dst3, LogicVRegister dst4, uint64_t addr);
+ void ld4(VectorFormat vform, LogicVRegister dst1, LogicVRegister dst2,
+ LogicVRegister dst3, LogicVRegister dst4, int index, uint64_t addr);
+ void ld4r(VectorFormat vform, LogicVRegister dst1, LogicVRegister dst2,
+ LogicVRegister dst3, LogicVRegister dst4, uint64_t addr);
+ void st1(VectorFormat vform, LogicVRegister src, uint64_t addr);
+ void st1(VectorFormat vform, LogicVRegister src, int index, uint64_t addr);
+ void st2(VectorFormat vform, LogicVRegister src, LogicVRegister src2,
+ uint64_t addr);
+ void st2(VectorFormat vform, LogicVRegister src, LogicVRegister src2,
+ int index, uint64_t addr);
+ void st3(VectorFormat vform, LogicVRegister src, LogicVRegister src2,
+ LogicVRegister src3, uint64_t addr);
+ void st3(VectorFormat vform, LogicVRegister src, LogicVRegister src2,
+ LogicVRegister src3, int index, uint64_t addr);
+ void st4(VectorFormat vform, LogicVRegister src, LogicVRegister src2,
+ LogicVRegister src3, LogicVRegister src4, uint64_t addr);
+ void st4(VectorFormat vform, LogicVRegister src, LogicVRegister src2,
+ LogicVRegister src3, LogicVRegister src4, int index, uint64_t addr);
+ LogicVRegister cmp(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2,
+ Condition cond);
+ LogicVRegister cmp(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, int imm, Condition cond);
+ LogicVRegister cmptst(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister add(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister addp(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister mla(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister mls(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister mul(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister mul(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2,
+ int index);
+ LogicVRegister mla(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2,
+ int index);
+ LogicVRegister mls(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2,
+ int index);
+ LogicVRegister pmul(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+
+ typedef LogicVRegister (Simulator::*ByElementOp)(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister fmul(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2,
+ int index);
+ LogicVRegister fmla(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2,
+ int index);
+ LogicVRegister fmls(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2,
+ int index);
+ LogicVRegister fmulx(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2,
+ int index);
+ LogicVRegister smull(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2,
+ int index);
+ LogicVRegister smull2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2,
+ int index);
+ LogicVRegister umull(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2,
+ int index);
+ LogicVRegister umull2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2,
+ int index);
+ LogicVRegister smlal(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2,
+ int index);
+ LogicVRegister smlal2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2,
+ int index);
+ LogicVRegister umlal(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2,
+ int index);
+ LogicVRegister umlal2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2,
+ int index);
+ LogicVRegister smlsl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2,
+ int index);
+ LogicVRegister smlsl2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2,
+ int index);
+ LogicVRegister umlsl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2,
+ int index);
+ LogicVRegister umlsl2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2,
+ int index);
+ LogicVRegister sqdmull(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2,
+ int index);
+ LogicVRegister sqdmull2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, int index);
+ LogicVRegister sqdmlal(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2,
+ int index);
+ LogicVRegister sqdmlal2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, int index);
+ LogicVRegister sqdmlsl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2,
+ int index);
+ LogicVRegister sqdmlsl2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, int index);
+ LogicVRegister sqdmulh(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2,
+ int index);
+ LogicVRegister sqrdmulh(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, int index);
+ LogicVRegister sub(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister and_(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister orr(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister orn(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister eor(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister bic(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister bic(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, uint64_t imm);
+ LogicVRegister bif(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister bit(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister bsl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister cls(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister clz(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister cnt(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister not_(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister rbit(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister rev(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int revSize);
+ LogicVRegister rev16(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister rev32(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister rev64(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister addlp(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, bool is_signed,
+ bool do_accumulate);
+ LogicVRegister saddlp(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister uaddlp(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister sadalp(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister uadalp(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister ext(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2,
+ int index);
+ LogicVRegister ins_element(VectorFormat vform, LogicVRegister dst,
+ int dst_index, const LogicVRegister& src,
+ int src_index);
+ LogicVRegister ins_immediate(VectorFormat vform, LogicVRegister dst,
+ int dst_index, uint64_t imm);
+ LogicVRegister dup_element(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int src_index);
+ LogicVRegister dup_immediate(VectorFormat vform, LogicVRegister dst,
+ uint64_t imm);
+ LogicVRegister movi(VectorFormat vform, LogicVRegister dst, uint64_t imm);
+ LogicVRegister mvni(VectorFormat vform, LogicVRegister dst, uint64_t imm);
+ LogicVRegister orr(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, uint64_t imm);
+ LogicVRegister sshl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister ushl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister SMinMax(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2,
+ bool max);
+ LogicVRegister smax(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister smin(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister SMinMaxP(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, bool max);
+ LogicVRegister smaxp(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister sminp(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister addp(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister addv(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister uaddlv(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister saddlv(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister SMinMaxV(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, bool max);
+ LogicVRegister smaxv(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister sminv(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister uxtl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister uxtl2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister sxtl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister sxtl2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister Table(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& ind, bool zero_out_of_bounds,
+ const LogicVRegister* tab1,
+ const LogicVRegister* tab2 = NULL,
+ const LogicVRegister* tab3 = NULL,
+ const LogicVRegister* tab4 = NULL);
+ LogicVRegister tbl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& tab, const LogicVRegister& ind);
+ LogicVRegister tbl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& tab, const LogicVRegister& tab2,
+ const LogicVRegister& ind);
+ LogicVRegister tbl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& tab, const LogicVRegister& tab2,
+ const LogicVRegister& tab3, const LogicVRegister& ind);
+ LogicVRegister tbl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& tab, const LogicVRegister& tab2,
+ const LogicVRegister& tab3, const LogicVRegister& tab4,
+ const LogicVRegister& ind);
+ LogicVRegister tbx(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& tab, const LogicVRegister& ind);
+ LogicVRegister tbx(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& tab, const LogicVRegister& tab2,
+ const LogicVRegister& ind);
+ LogicVRegister tbx(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& tab, const LogicVRegister& tab2,
+ const LogicVRegister& tab3, const LogicVRegister& ind);
+ LogicVRegister tbx(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& tab, const LogicVRegister& tab2,
+ const LogicVRegister& tab3, const LogicVRegister& tab4,
+ const LogicVRegister& ind);
+ LogicVRegister uaddl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister uaddl2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister uaddw(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister uaddw2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister saddl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister saddl2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister saddw(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister saddw2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister usubl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister usubl2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister usubw(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister usubw2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister ssubl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister ssubl2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister ssubw(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister ssubw2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister UMinMax(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2,
+ bool max);
+ LogicVRegister umax(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister umin(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister UMinMaxP(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, bool max);
+ LogicVRegister umaxp(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister uminp(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister UMinMaxV(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, bool max);
+ LogicVRegister umaxv(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister uminv(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister trn1(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister trn2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister zip1(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister zip2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister uzp1(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister uzp2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister shl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift);
+ LogicVRegister scvtf(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int fbits,
+ FPRounding rounding_mode);
+ LogicVRegister ucvtf(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int fbits,
+ FPRounding rounding_mode);
+ LogicVRegister sshll(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift);
+ LogicVRegister sshll2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift);
+ LogicVRegister shll(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister shll2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister ushll(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift);
+ LogicVRegister ushll2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift);
+ LogicVRegister sli(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift);
+ LogicVRegister sri(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift);
+ LogicVRegister sshr(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift);
+ LogicVRegister ushr(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift);
+ LogicVRegister ssra(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift);
+ LogicVRegister usra(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift);
+ LogicVRegister srsra(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift);
+ LogicVRegister ursra(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift);
+ LogicVRegister suqadd(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister usqadd(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister sqshl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift);
+ LogicVRegister uqshl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift);
+ LogicVRegister sqshlu(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift);
+ LogicVRegister abs(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister neg(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister ExtractNarrow(VectorFormat vform, LogicVRegister dst,
+ bool dstIsSigned, const LogicVRegister& src,
+ bool srcIsSigned);
+ LogicVRegister xtn(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister sqxtn(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister uqxtn(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister sqxtun(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister AbsDiff(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2,
+ bool issigned);
+ LogicVRegister saba(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister uaba(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister shrn(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift);
+ LogicVRegister shrn2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift);
+ LogicVRegister rshrn(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift);
+ LogicVRegister rshrn2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift);
+ LogicVRegister uqshrn(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift);
+ LogicVRegister uqshrn2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift);
+ LogicVRegister uqrshrn(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift);
+ LogicVRegister uqrshrn2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift);
+ LogicVRegister sqshrn(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift);
+ LogicVRegister sqshrn2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift);
+ LogicVRegister sqrshrn(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift);
+ LogicVRegister sqrshrn2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift);
+ LogicVRegister sqshrun(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift);
+ LogicVRegister sqshrun2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift);
+ LogicVRegister sqrshrun(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift);
+ LogicVRegister sqrshrun2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift);
+ LogicVRegister sqrdmulh(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, bool round = true);
+ LogicVRegister sqdmulh(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+#define NEON_3VREG_LOGIC_LIST(V) \
+ V(addhn) \
+ V(addhn2) \
+ V(raddhn) \
+ V(raddhn2) \
+ V(subhn) \
+ V(subhn2) \
+ V(rsubhn) \
+ V(rsubhn2) \
+ V(pmull) \
+ V(pmull2) \
+ V(sabal) \
+ V(sabal2) \
+ V(uabal) \
+ V(uabal2) \
+ V(sabdl) \
+ V(sabdl2) \
+ V(uabdl) \
+ V(uabdl2) \
+ V(smull) \
+ V(smull2) \
+ V(umull) \
+ V(umull2) \
+ V(smlal) \
+ V(smlal2) \
+ V(umlal) \
+ V(umlal2) \
+ V(smlsl) \
+ V(smlsl2) \
+ V(umlsl) \
+ V(umlsl2) \
+ V(sqdmlal) \
+ V(sqdmlal2) \
+ V(sqdmlsl) \
+ V(sqdmlsl2) \
+ V(sqdmull) \
+ V(sqdmull2)
+
+#define DEFINE_LOGIC_FUNC(FXN) \
+ LogicVRegister FXN(VectorFormat vform, LogicVRegister dst, \
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ NEON_3VREG_LOGIC_LIST(DEFINE_LOGIC_FUNC)
+#undef DEFINE_LOGIC_FUNC
+
+#define NEON_FP3SAME_LIST(V) \
+ V(fadd, FPAdd, false) \
+ V(fsub, FPSub, true) \
+ V(fmul, FPMul, true) \
+ V(fmulx, FPMulx, true) \
+ V(fdiv, FPDiv, true) \
+ V(fmax, FPMax, false) \
+ V(fmin, FPMin, false) \
+ V(fmaxnm, FPMaxNM, false) \
+ V(fminnm, FPMinNM, false)
+
+#define DECLARE_NEON_FP_VECTOR_OP(FN, OP, PROCNAN) \
+ template <typename T> \
+ LogicVRegister FN(VectorFormat vform, LogicVRegister dst, \
+ const LogicVRegister& src1, const LogicVRegister& src2); \
+ LogicVRegister FN(VectorFormat vform, LogicVRegister dst, \
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ NEON_FP3SAME_LIST(DECLARE_NEON_FP_VECTOR_OP)
+#undef DECLARE_NEON_FP_VECTOR_OP
+
+#define NEON_FPPAIRWISE_LIST(V) \
+ V(faddp, fadd, FPAdd) \
+ V(fmaxp, fmax, FPMax) \
+ V(fmaxnmp, fmaxnm, FPMaxNM) \
+ V(fminp, fmin, FPMin) \
+ V(fminnmp, fminnm, FPMinNM)
+
+#define DECLARE_NEON_FP_PAIR_OP(FNP, FN, OP) \
+ LogicVRegister FNP(VectorFormat vform, LogicVRegister dst, \
+ const LogicVRegister& src1, const LogicVRegister& src2); \
+ LogicVRegister FNP(VectorFormat vform, LogicVRegister dst, \
+ const LogicVRegister& src);
+ NEON_FPPAIRWISE_LIST(DECLARE_NEON_FP_PAIR_OP)
+#undef DECLARE_NEON_FP_PAIR_OP
+
+ template <typename T>
+ LogicVRegister frecps(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister frecps(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ template <typename T>
+ LogicVRegister frsqrts(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister frsqrts(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ template <typename T>
+ LogicVRegister fmla(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister fmla(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ template <typename T>
+ LogicVRegister fmls(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister fmls(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister fnmul(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
template <typename T>
- T FPDefaultNaN() const;
+ LogicVRegister fcmp(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2,
+ Condition cond);
+ LogicVRegister fcmp(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2,
+ Condition cond);
+ LogicVRegister fabscmp(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2,
+ Condition cond);
+ LogicVRegister fcmp_zero(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, Condition cond);
+
+ template <typename T>
+ LogicVRegister fneg(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fneg(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ template <typename T>
+ LogicVRegister frecpx(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister frecpx(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ template <typename T>
+ LogicVRegister fabs_(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fabs_(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fabd(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, const LogicVRegister& src2);
+ LogicVRegister frint(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, FPRounding rounding_mode,
+ bool inexact_exception = false);
+ LogicVRegister fcvts(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, FPRounding rounding_mode,
+ int fbits = 0);
+ LogicVRegister fcvtu(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, FPRounding rounding_mode,
+ int fbits = 0);
+ LogicVRegister fcvtl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fcvtl2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fcvtn(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fcvtn2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fcvtxn(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fcvtxn2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fsqrt(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister frsqrte(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister frecpe(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, FPRounding rounding);
+ LogicVRegister ursqrte(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister urecpe(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+
+ typedef float (Simulator::*FPMinMaxOp)(float a, float b);
+
+ LogicVRegister FMinMaxV(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, FPMinMaxOp Op);
+
+ LogicVRegister fminv(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fmaxv(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fminnmv(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fmaxnmv(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src);
+
+ template <typename T>
+ T FPRecipSqrtEstimate(T op);
+ template <typename T>
+ T FPRecipEstimate(T op, FPRounding rounding);
+ template <typename T, typename R>
+ R FPToFixed(T op, int fbits, bool is_signed, FPRounding rounding);
void FPCompare(double val0, double val1);
double FPRoundInt(double value, FPRounding round_mode);
double FPToDouble(float value);
float FPToFloat(double value, FPRounding round_mode);
+ float FPToFloat(float16 value);
+ float16 FPToFloat16(float value, FPRounding round_mode);
+ float16 FPToFloat16(double value, FPRounding round_mode);
+ double recip_sqrt_estimate(double a);
+ double recip_estimate(double a);
+ double FPRecipSqrtEstimate(double a);
+ double FPRecipEstimate(double a);
double FixedToDouble(int64_t src, int fbits, FPRounding round_mode);
double UFixedToDouble(uint64_t src, int fbits, FPRounding round_mode);
float FixedToFloat(int64_t src, int fbits, FPRounding round_mode);
@@ -739,6 +2126,9 @@ class Simulator : public DecoderVisitor {
T FPMul(T op1, T op2);
template <typename T>
+ T FPMulx(T op1, T op2);
+
+ template <typename T>
T FPMulAdd(T a, T op1, T op2);
template <typename T>
@@ -747,17 +2137,18 @@ class Simulator : public DecoderVisitor {
template <typename T>
T FPSub(T op1, T op2);
- // Standard NaN processing.
template <typename T>
- T FPProcessNaN(T op);
-
- bool FPProcessNaNs(Instruction* instr);
+ T FPRecipStepFused(T op1, T op2);
template <typename T>
- T FPProcessNaNs(T op1, T op2);
+ T FPRSqrtStepFused(T op1, T op2);
- template <typename T>
- T FPProcessNaNs3(T op1, T op2, T op3);
+ // This doesn't do anything at the moment. We'll need it if we want support
+ // for cumulative exception bits or floating-point exceptions.
+ void FPProcessException() {}
+
+ // Standard NaN processing.
+ bool FPProcessNaNs(Instruction* instr);
void CheckStackAlignment();
@@ -769,7 +2160,7 @@ class Simulator : public DecoderVisitor {
static const uint64_t kCallerSavedRegisterCorruptionValue =
0xca11edc0de000000UL;
// This value is a NaN in both 32-bit and 64-bit FP.
- static const uint64_t kCallerSavedFPRegisterCorruptionValue =
+ static const uint64_t kCallerSavedVRegisterCorruptionValue =
0x7ff000007f801000UL;
// This value is a mix of 32/64-bits NaN and "verbose" immediate.
static const uint64_t kDefaultCPURegisterCorruptionValue =
@@ -797,7 +2188,7 @@ class Simulator : public DecoderVisitor {
SimRegister registers_[kNumberOfRegisters];
// Floating point registers
- SimFPRegister fpregisters_[kNumberOfFPRegisters];
+ SimVRegister vregisters_[kNumberOfVRegisters];
// Processor state
// bits[31, 27]: Condition flags N, Z, C, and V.
@@ -889,9 +2280,9 @@ class Simulator : public DecoderVisitor {
// not actually perform loads and stores. NotifyStoreExcl only returns
// true if the exclusive store is allowed; the global monitor will still
// have to be checked to see whether the memory should be updated.
- void NotifyLoad(uintptr_t addr);
+ void NotifyLoad();
void NotifyLoadExcl(uintptr_t addr, TransactionSize size);
- void NotifyStore(uintptr_t addr);
+ void NotifyStore();
bool NotifyStoreExcl(uintptr_t addr, TransactionSize size);
private:
@@ -916,7 +2307,7 @@ class Simulator : public DecoderVisitor {
// not actually perform loads and stores.
void Clear_Locked();
void NotifyLoadExcl_Locked(uintptr_t addr);
- void NotifyStore_Locked(uintptr_t addr, bool is_requesting_processor);
+ void NotifyStore_Locked(bool is_requesting_processor);
bool NotifyStoreExcl_Locked(uintptr_t addr, bool is_requesting_processor);
MonitorAccess access_state_;
@@ -935,7 +2326,7 @@ class Simulator : public DecoderVisitor {
base::Mutex mutex;
void NotifyLoadExcl_Locked(uintptr_t addr, Processor* processor);
- void NotifyStore_Locked(uintptr_t addr, Processor* processor);
+ void NotifyStore_Locked(Processor* processor);
bool NotifyStoreExcl_Locked(uintptr_t addr, Processor* processor);
// Called when the simulator is destroyed.
@@ -955,10 +2346,67 @@ class Simulator : public DecoderVisitor {
private:
void Init(FILE* stream);
+ template <typename T>
+ static T FPDefaultNaN();
+
+ template <typename T>
+ T FPProcessNaN(T op) {
+ DCHECK(std::isnan(op));
+ return fpcr().DN() ? FPDefaultNaN<T>() : ToQuietNaN(op);
+ }
+
+ template <typename T>
+ T FPProcessNaNs(T op1, T op2) {
+ if (IsSignallingNaN(op1)) {
+ return FPProcessNaN(op1);
+ } else if (IsSignallingNaN(op2)) {
+ return FPProcessNaN(op2);
+ } else if (std::isnan(op1)) {
+ DCHECK(IsQuietNaN(op1));
+ return FPProcessNaN(op1);
+ } else if (std::isnan(op2)) {
+ DCHECK(IsQuietNaN(op2));
+ return FPProcessNaN(op2);
+ } else {
+ return 0.0;
+ }
+ }
+
+ template <typename T>
+ T FPProcessNaNs3(T op1, T op2, T op3) {
+ if (IsSignallingNaN(op1)) {
+ return FPProcessNaN(op1);
+ } else if (IsSignallingNaN(op2)) {
+ return FPProcessNaN(op2);
+ } else if (IsSignallingNaN(op3)) {
+ return FPProcessNaN(op3);
+ } else if (std::isnan(op1)) {
+ DCHECK(IsQuietNaN(op1));
+ return FPProcessNaN(op1);
+ } else if (std::isnan(op2)) {
+ DCHECK(IsQuietNaN(op2));
+ return FPProcessNaN(op2);
+ } else if (std::isnan(op3)) {
+ DCHECK(IsQuietNaN(op3));
+ return FPProcessNaN(op3);
+ } else {
+ return 0.0;
+ }
+ }
+
int log_parameters_;
Isolate* isolate_;
};
+template <>
+inline double Simulator::FPDefaultNaN<double>() {
+ return kFP64DefaultNaN;
+}
+
+template <>
+inline float Simulator::FPDefaultNaN<float>() {
+ return kFP32DefaultNaN;
+}
// When running with the simulator transition into simulated execution at this
// point.
diff --git a/deps/v8/src/arm64/simulator-logic-arm64.cc b/deps/v8/src/arm64/simulator-logic-arm64.cc
new file mode 100644
index 0000000000..44a31c4097
--- /dev/null
+++ b/deps/v8/src/arm64/simulator-logic-arm64.cc
@@ -0,0 +1,4191 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_ARM64
+
+#include <cmath>
+#include "src/arm64/simulator-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+#if defined(USE_SIMULATOR)
+
+namespace {
+
+// See FPRound for a description of this function.
+inline double FPRoundToDouble(int64_t sign, int64_t exponent, uint64_t mantissa,
+ FPRounding round_mode) {
+ uint64_t bits = FPRound<uint64_t, kDoubleExponentBits, kDoubleMantissaBits>(
+ sign, exponent, mantissa, round_mode);
+ return bit_cast<double>(bits);
+}
+
+// See FPRound for a description of this function.
+inline float FPRoundToFloat(int64_t sign, int64_t exponent, uint64_t mantissa,
+ FPRounding round_mode) {
+ uint32_t bits = FPRound<uint32_t, kFloatExponentBits, kFloatMantissaBits>(
+ sign, exponent, mantissa, round_mode);
+ return bit_cast<float>(bits);
+}
+
+// See FPRound for a description of this function.
+inline float16 FPRoundToFloat16(int64_t sign, int64_t exponent,
+ uint64_t mantissa, FPRounding round_mode) {
+ return FPRound<float16, kFloat16ExponentBits, kFloat16MantissaBits>(
+ sign, exponent, mantissa, round_mode);
+}
+
+} // namespace
+
+double Simulator::FixedToDouble(int64_t src, int fbits, FPRounding round) {
+ if (src >= 0) {
+ return UFixedToDouble(src, fbits, round);
+ } else if (src == INT64_MIN) {
+ return -UFixedToDouble(src, fbits, round);
+ } else {
+ return -UFixedToDouble(-src, fbits, round);
+ }
+}
+
+double Simulator::UFixedToDouble(uint64_t src, int fbits, FPRounding round) {
+ // An input of 0 is a special case because the result is effectively
+ // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit.
+ if (src == 0) {
+ return 0.0;
+ }
+
+ // Calculate the exponent. The highest significant bit will have the value
+ // 2^exponent.
+ const int highest_significant_bit = 63 - CountLeadingZeros(src, 64);
+ const int64_t exponent = highest_significant_bit - fbits;
+
+ return FPRoundToDouble(0, exponent, src, round);
+}
+
+float Simulator::FixedToFloat(int64_t src, int fbits, FPRounding round) {
+ if (src >= 0) {
+ return UFixedToFloat(src, fbits, round);
+ } else if (src == INT64_MIN) {
+ return -UFixedToFloat(src, fbits, round);
+ } else {
+ return -UFixedToFloat(-src, fbits, round);
+ }
+}
+
+float Simulator::UFixedToFloat(uint64_t src, int fbits, FPRounding round) {
+ // An input of 0 is a special case because the result is effectively
+ // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit.
+ if (src == 0) {
+ return 0.0f;
+ }
+
+ // Calculate the exponent. The highest significant bit will have the value
+ // 2^exponent.
+ const int highest_significant_bit = 63 - CountLeadingZeros(src, 64);
+ const int32_t exponent = highest_significant_bit - fbits;
+
+ return FPRoundToFloat(0, exponent, src, round);
+}
+
+double Simulator::FPToDouble(float value) {
+ switch (std::fpclassify(value)) {
+ case FP_NAN: {
+ if (IsSignallingNaN(value)) {
+ FPProcessException();
+ }
+ if (DN()) return kFP64DefaultNaN;
+
+ // Convert NaNs as the processor would:
+ // - The sign is propagated.
+ // - The mantissa is transferred entirely, except that the top bit is
+ // forced to '1', making the result a quiet NaN. The unused (low-order)
+ // mantissa bits are set to 0.
+ uint32_t raw = bit_cast<uint32_t>(value);
+
+ uint64_t sign = raw >> 31;
+ uint64_t exponent = (1 << kDoubleExponentBits) - 1;
+ uint64_t mantissa = unsigned_bitextract_64(21, 0, raw);
+
+ // Unused low-order bits remain zero.
+ mantissa <<= (kDoubleMantissaBits - kFloatMantissaBits);
+
+ // Force a quiet NaN.
+ mantissa |= (UINT64_C(1) << (kDoubleMantissaBits - 1));
+
+ return double_pack(sign, exponent, mantissa);
+ }
+
+ case FP_ZERO:
+ case FP_NORMAL:
+ case FP_SUBNORMAL:
+ case FP_INFINITE: {
+ // All other inputs are preserved in a standard cast, because every value
+ // representable using an IEEE-754 float is also representable using an
+ // IEEE-754 double.
+ return static_cast<double>(value);
+ }
+ }
+
+ UNREACHABLE();
+}
+
+float Simulator::FPToFloat(float16 value) {
+ uint32_t sign = value >> 15;
+ uint32_t exponent =
+ unsigned_bitextract_32(kFloat16MantissaBits + kFloat16ExponentBits - 1,
+ kFloat16MantissaBits, value);
+ uint32_t mantissa =
+ unsigned_bitextract_32(kFloat16MantissaBits - 1, 0, value);
+
+ switch (float16classify(value)) {
+ case FP_ZERO:
+ return (sign == 0) ? 0.0f : -0.0f;
+
+ case FP_INFINITE:
+ return (sign == 0) ? kFP32PositiveInfinity : kFP32NegativeInfinity;
+
+ case FP_SUBNORMAL: {
+ // Calculate shift required to put mantissa into the most-significant bits
+ // of the destination mantissa.
+ int shift = CountLeadingZeros(mantissa << (32 - 10), 32);
+
+ // Shift mantissa and discard implicit '1'.
+ mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits) + shift + 1;
+ mantissa &= (1 << kFloatMantissaBits) - 1;
+
+ // Adjust the exponent for the shift applied, and rebias.
+ exponent = exponent - shift + (kFloatExponentBias - kFloat16ExponentBias);
+ break;
+ }
+
+ case FP_NAN: {
+ if (IsSignallingNaN(value)) {
+ FPProcessException();
+ }
+ if (DN()) return kFP32DefaultNaN;
+
+ // Convert NaNs as the processor would:
+ // - The sign is propagated.
+ // - The mantissa is transferred entirely, except that the top bit is
+ // forced to '1', making the result a quiet NaN. The unused (low-order)
+ // mantissa bits are set to 0.
+ exponent = (1 << kFloatExponentBits) - 1;
+
+ // Increase bits in mantissa, making low-order bits 0.
+ mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits);
+ mantissa |= 1 << (kFloatMantissaBits - 1); // Force a quiet NaN.
+ break;
+ }
+
+ case FP_NORMAL: {
+ // Increase bits in mantissa, making low-order bits 0.
+ mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits);
+
+ // Change exponent bias.
+ exponent += (kFloatExponentBias - kFloat16ExponentBias);
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+ return float_pack(sign, exponent, mantissa);
+}
+
+float16 Simulator::FPToFloat16(float value, FPRounding round_mode) {
+ // Only the FPTieEven rounding mode is implemented.
+ DCHECK_EQ(round_mode, FPTieEven);
+ USE(round_mode);
+
+ int64_t sign = float_sign(value);
+ int64_t exponent =
+ static_cast<int64_t>(float_exp(value)) - kFloatExponentBias;
+ uint32_t mantissa = float_mantissa(value);
+
+ switch (std::fpclassify(value)) {
+ case FP_NAN: {
+ if (IsSignallingNaN(value)) {
+ FPProcessException();
+ }
+ if (DN()) return kFP16DefaultNaN;
+
+ // Convert NaNs as the processor would:
+ // - The sign is propagated.
+ // - The mantissa is transferred as much as possible, except that the top
+ // bit is forced to '1', making the result a quiet NaN.
+ float16 result =
+ (sign == 0) ? kFP16PositiveInfinity : kFP16NegativeInfinity;
+ result |= mantissa >> (kFloatMantissaBits - kFloat16MantissaBits);
+ result |= (1 << (kFloat16MantissaBits - 1)); // Force a quiet NaN;
+ return result;
+ }
+
+ case FP_ZERO:
+ return (sign == 0) ? 0 : 0x8000;
+
+ case FP_INFINITE:
+ return (sign == 0) ? kFP16PositiveInfinity : kFP16NegativeInfinity;
+
+ case FP_NORMAL:
+ case FP_SUBNORMAL: {
+ // Convert float-to-half as the processor would, assuming that FPCR.FZ
+ // (flush-to-zero) is not set.
+
+ // Add the implicit '1' bit to the mantissa.
+ mantissa += (1 << kFloatMantissaBits);
+ return FPRoundToFloat16(sign, exponent, mantissa, round_mode);
+ }
+ }
+
+ UNREACHABLE();
+}
+
+float16 Simulator::FPToFloat16(double value, FPRounding round_mode) {
+ // Only the FPTieEven rounding mode is implemented.
+ DCHECK_EQ(round_mode, FPTieEven);
+ USE(round_mode);
+
+ int64_t sign = double_sign(value);
+ int64_t exponent =
+ static_cast<int64_t>(double_exp(value)) - kDoubleExponentBias;
+ uint64_t mantissa = double_mantissa(value);
+
+ switch (std::fpclassify(value)) {
+ case FP_NAN: {
+ if (IsSignallingNaN(value)) {
+ FPProcessException();
+ }
+ if (DN()) return kFP16DefaultNaN;
+
+ // Convert NaNs as the processor would:
+ // - The sign is propagated.
+ // - The mantissa is transferred as much as possible, except that the top
+ // bit is forced to '1', making the result a quiet NaN.
+ float16 result =
+ (sign == 0) ? kFP16PositiveInfinity : kFP16NegativeInfinity;
+ result |= mantissa >> (kDoubleMantissaBits - kFloat16MantissaBits);
+ result |= (1 << (kFloat16MantissaBits - 1)); // Force a quiet NaN;
+ return result;
+ }
+
+ case FP_ZERO:
+ return (sign == 0) ? 0 : 0x8000;
+
+ case FP_INFINITE:
+ return (sign == 0) ? kFP16PositiveInfinity : kFP16NegativeInfinity;
+
+ case FP_NORMAL:
+ case FP_SUBNORMAL: {
+ // Convert double-to-half as the processor would, assuming that FPCR.FZ
+ // (flush-to-zero) is not set.
+
+ // Add the implicit '1' bit to the mantissa.
+ mantissa += (UINT64_C(1) << kDoubleMantissaBits);
+ return FPRoundToFloat16(sign, exponent, mantissa, round_mode);
+ }
+ }
+
+ UNREACHABLE();
+}
+
+float Simulator::FPToFloat(double value, FPRounding round_mode) {
+ // Only the FPTieEven rounding mode is implemented.
+ DCHECK((round_mode == FPTieEven) || (round_mode == FPRoundOdd));
+ USE(round_mode);
+
+ switch (std::fpclassify(value)) {
+ case FP_NAN: {
+ if (IsSignallingNaN(value)) {
+ FPProcessException();
+ }
+ if (DN()) return kFP32DefaultNaN;
+
+ // Convert NaNs as the processor would:
+ // - The sign is propagated.
+ // - The mantissa is transferred as much as possible, except that the
+ // top bit is forced to '1', making the result a quiet NaN.
+
+ uint64_t raw = bit_cast<uint64_t>(value);
+
+ uint32_t sign = raw >> 63;
+ uint32_t exponent = (1 << 8) - 1;
+ uint32_t mantissa = static_cast<uint32_t>(unsigned_bitextract_64(
+ 50, kDoubleMantissaBits - kFloatMantissaBits, raw));
+ mantissa |= (1 << (kFloatMantissaBits - 1)); // Force a quiet NaN.
+
+ return float_pack(sign, exponent, mantissa);
+ }
+
+ case FP_ZERO:
+ case FP_INFINITE: {
+ // In a C++ cast, any value representable in the target type will be
+ // unchanged. This is always the case for +/-0.0 and infinities.
+ return static_cast<float>(value);
+ }
+
+ case FP_NORMAL:
+ case FP_SUBNORMAL: {
+ // Convert double-to-float as the processor would, assuming that FPCR.FZ
+ // (flush-to-zero) is not set.
+ uint32_t sign = double_sign(value);
+ int64_t exponent =
+ static_cast<int64_t>(double_exp(value)) - kDoubleExponentBias;
+ uint64_t mantissa = double_mantissa(value);
+ if (std::fpclassify(value) == FP_NORMAL) {
+ // For normal FP values, add the hidden bit.
+ mantissa |= (UINT64_C(1) << kDoubleMantissaBits);
+ }
+ return FPRoundToFloat(sign, exponent, mantissa, round_mode);
+ }
+ }
+
+ UNREACHABLE();
+}
+
+void Simulator::ld1(VectorFormat vform, LogicVRegister dst, uint64_t addr) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.ReadUintFromMem(vform, i, addr);
+ addr += LaneSizeInBytesFromFormat(vform);
+ }
+}
+
+void Simulator::ld1(VectorFormat vform, LogicVRegister dst, int index,
+ uint64_t addr) {
+ dst.ReadUintFromMem(vform, index, addr);
+}
+
+void Simulator::ld1r(VectorFormat vform, LogicVRegister dst, uint64_t addr) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.ReadUintFromMem(vform, i, addr);
+ }
+}
+
+void Simulator::ld2(VectorFormat vform, LogicVRegister dst1,
+ LogicVRegister dst2, uint64_t addr1) {
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ int esize = LaneSizeInBytesFromFormat(vform);
+ uint64_t addr2 = addr1 + esize;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst1.ReadUintFromMem(vform, i, addr1);
+ dst2.ReadUintFromMem(vform, i, addr2);
+ addr1 += 2 * esize;
+ addr2 += 2 * esize;
+ }
+}
+
+void Simulator::ld2(VectorFormat vform, LogicVRegister dst1,
+ LogicVRegister dst2, int index, uint64_t addr1) {
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ uint64_t addr2 = addr1 + LaneSizeInBytesFromFormat(vform);
+ dst1.ReadUintFromMem(vform, index, addr1);
+ dst2.ReadUintFromMem(vform, index, addr2);
+}
+
+void Simulator::ld2r(VectorFormat vform, LogicVRegister dst1,
+ LogicVRegister dst2, uint64_t addr) {
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ uint64_t addr2 = addr + LaneSizeInBytesFromFormat(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst1.ReadUintFromMem(vform, i, addr);
+ dst2.ReadUintFromMem(vform, i, addr2);
+ }
+}
+
+void Simulator::ld3(VectorFormat vform, LogicVRegister dst1,
+ LogicVRegister dst2, LogicVRegister dst3, uint64_t addr1) {
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ dst3.ClearForWrite(vform);
+ int esize = LaneSizeInBytesFromFormat(vform);
+ uint64_t addr2 = addr1 + esize;
+ uint64_t addr3 = addr2 + esize;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst1.ReadUintFromMem(vform, i, addr1);
+ dst2.ReadUintFromMem(vform, i, addr2);
+ dst3.ReadUintFromMem(vform, i, addr3);
+ addr1 += 3 * esize;
+ addr2 += 3 * esize;
+ addr3 += 3 * esize;
+ }
+}
+
+void Simulator::ld3(VectorFormat vform, LogicVRegister dst1,
+ LogicVRegister dst2, LogicVRegister dst3, int index,
+ uint64_t addr1) {
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ dst3.ClearForWrite(vform);
+ uint64_t addr2 = addr1 + LaneSizeInBytesFromFormat(vform);
+ uint64_t addr3 = addr2 + LaneSizeInBytesFromFormat(vform);
+ dst1.ReadUintFromMem(vform, index, addr1);
+ dst2.ReadUintFromMem(vform, index, addr2);
+ dst3.ReadUintFromMem(vform, index, addr3);
+}
+
+void Simulator::ld3r(VectorFormat vform, LogicVRegister dst1,
+ LogicVRegister dst2, LogicVRegister dst3, uint64_t addr) {
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ dst3.ClearForWrite(vform);
+ uint64_t addr2 = addr + LaneSizeInBytesFromFormat(vform);
+ uint64_t addr3 = addr2 + LaneSizeInBytesFromFormat(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst1.ReadUintFromMem(vform, i, addr);
+ dst2.ReadUintFromMem(vform, i, addr2);
+ dst3.ReadUintFromMem(vform, i, addr3);
+ }
+}
+
+void Simulator::ld4(VectorFormat vform, LogicVRegister dst1,
+ LogicVRegister dst2, LogicVRegister dst3,
+ LogicVRegister dst4, uint64_t addr1) {
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ dst3.ClearForWrite(vform);
+ dst4.ClearForWrite(vform);
+ int esize = LaneSizeInBytesFromFormat(vform);
+ uint64_t addr2 = addr1 + esize;
+ uint64_t addr3 = addr2 + esize;
+ uint64_t addr4 = addr3 + esize;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst1.ReadUintFromMem(vform, i, addr1);
+ dst2.ReadUintFromMem(vform, i, addr2);
+ dst3.ReadUintFromMem(vform, i, addr3);
+ dst4.ReadUintFromMem(vform, i, addr4);
+ addr1 += 4 * esize;
+ addr2 += 4 * esize;
+ addr3 += 4 * esize;
+ addr4 += 4 * esize;
+ }
+}
+
+void Simulator::ld4(VectorFormat vform, LogicVRegister dst1,
+ LogicVRegister dst2, LogicVRegister dst3,
+ LogicVRegister dst4, int index, uint64_t addr1) {
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ dst3.ClearForWrite(vform);
+ dst4.ClearForWrite(vform);
+ uint64_t addr2 = addr1 + LaneSizeInBytesFromFormat(vform);
+ uint64_t addr3 = addr2 + LaneSizeInBytesFromFormat(vform);
+ uint64_t addr4 = addr3 + LaneSizeInBytesFromFormat(vform);
+ dst1.ReadUintFromMem(vform, index, addr1);
+ dst2.ReadUintFromMem(vform, index, addr2);
+ dst3.ReadUintFromMem(vform, index, addr3);
+ dst4.ReadUintFromMem(vform, index, addr4);
+}
+
+void Simulator::ld4r(VectorFormat vform, LogicVRegister dst1,
+ LogicVRegister dst2, LogicVRegister dst3,
+ LogicVRegister dst4, uint64_t addr) {
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ dst3.ClearForWrite(vform);
+ dst4.ClearForWrite(vform);
+ uint64_t addr2 = addr + LaneSizeInBytesFromFormat(vform);
+ uint64_t addr3 = addr2 + LaneSizeInBytesFromFormat(vform);
+ uint64_t addr4 = addr3 + LaneSizeInBytesFromFormat(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst1.ReadUintFromMem(vform, i, addr);
+ dst2.ReadUintFromMem(vform, i, addr2);
+ dst3.ReadUintFromMem(vform, i, addr3);
+ dst4.ReadUintFromMem(vform, i, addr4);
+ }
+}
+
+void Simulator::st1(VectorFormat vform, LogicVRegister src, uint64_t addr) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ src.WriteUintToMem(vform, i, addr);
+ addr += LaneSizeInBytesFromFormat(vform);
+ }
+}
+
+void Simulator::st1(VectorFormat vform, LogicVRegister src, int index,
+ uint64_t addr) {
+ src.WriteUintToMem(vform, index, addr);
+}
+
+void Simulator::st2(VectorFormat vform, LogicVRegister dst, LogicVRegister dst2,
+ uint64_t addr) {
+ int esize = LaneSizeInBytesFromFormat(vform);
+ uint64_t addr2 = addr + esize;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.WriteUintToMem(vform, i, addr);
+ dst2.WriteUintToMem(vform, i, addr2);
+ addr += 2 * esize;
+ addr2 += 2 * esize;
+ }
+}
+
+void Simulator::st2(VectorFormat vform, LogicVRegister dst, LogicVRegister dst2,
+ int index, uint64_t addr) {
+ int esize = LaneSizeInBytesFromFormat(vform);
+ dst.WriteUintToMem(vform, index, addr);
+ dst2.WriteUintToMem(vform, index, addr + 1 * esize);
+}
+
+void Simulator::st3(VectorFormat vform, LogicVRegister dst, LogicVRegister dst2,
+ LogicVRegister dst3, uint64_t addr) {
+ int esize = LaneSizeInBytesFromFormat(vform);
+ uint64_t addr2 = addr + esize;
+ uint64_t addr3 = addr2 + esize;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.WriteUintToMem(vform, i, addr);
+ dst2.WriteUintToMem(vform, i, addr2);
+ dst3.WriteUintToMem(vform, i, addr3);
+ addr += 3 * esize;
+ addr2 += 3 * esize;
+ addr3 += 3 * esize;
+ }
+}
+
+void Simulator::st3(VectorFormat vform, LogicVRegister dst, LogicVRegister dst2,
+ LogicVRegister dst3, int index, uint64_t addr) {
+ int esize = LaneSizeInBytesFromFormat(vform);
+ dst.WriteUintToMem(vform, index, addr);
+ dst2.WriteUintToMem(vform, index, addr + 1 * esize);
+ dst3.WriteUintToMem(vform, index, addr + 2 * esize);
+}
+
+void Simulator::st4(VectorFormat vform, LogicVRegister dst, LogicVRegister dst2,
+ LogicVRegister dst3, LogicVRegister dst4, uint64_t addr) {
+ int esize = LaneSizeInBytesFromFormat(vform);
+ uint64_t addr2 = addr + esize;
+ uint64_t addr3 = addr2 + esize;
+ uint64_t addr4 = addr3 + esize;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.WriteUintToMem(vform, i, addr);
+ dst2.WriteUintToMem(vform, i, addr2);
+ dst3.WriteUintToMem(vform, i, addr3);
+ dst4.WriteUintToMem(vform, i, addr4);
+ addr += 4 * esize;
+ addr2 += 4 * esize;
+ addr3 += 4 * esize;
+ addr4 += 4 * esize;
+ }
+}
+
+void Simulator::st4(VectorFormat vform, LogicVRegister dst, LogicVRegister dst2,
+ LogicVRegister dst3, LogicVRegister dst4, int index,
+ uint64_t addr) {
+ int esize = LaneSizeInBytesFromFormat(vform);
+ dst.WriteUintToMem(vform, index, addr);
+ dst2.WriteUintToMem(vform, index, addr + 1 * esize);
+ dst3.WriteUintToMem(vform, index, addr + 2 * esize);
+ dst4.WriteUintToMem(vform, index, addr + 3 * esize);
+}
+
+LogicVRegister Simulator::cmp(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, Condition cond) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ int64_t sa = src1.Int(vform, i);
+ int64_t sb = src2.Int(vform, i);
+ uint64_t ua = src1.Uint(vform, i);
+ uint64_t ub = src2.Uint(vform, i);
+ bool result = false;
+ switch (cond) {
+ case eq:
+ result = (ua == ub);
+ break;
+ case ge:
+ result = (sa >= sb);
+ break;
+ case gt:
+ result = (sa > sb);
+ break;
+ case hi:
+ result = (ua > ub);
+ break;
+ case hs:
+ result = (ua >= ub);
+ break;
+ case lt:
+ result = (sa < sb);
+ break;
+ case le:
+ result = (sa <= sb);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ dst.SetUint(vform, i, result ? MaxUintFromFormat(vform) : 0);
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::cmp(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1, int imm,
+ Condition cond) {
+ SimVRegister temp;
+ LogicVRegister imm_reg = dup_immediate(vform, temp, imm);
+ return cmp(vform, dst, src1, imm_reg, cond);
+}
+
+LogicVRegister Simulator::cmptst(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t ua = src1.Uint(vform, i);
+ uint64_t ub = src2.Uint(vform, i);
+ dst.SetUint(vform, i, ((ua & ub) != 0) ? MaxUintFromFormat(vform) : 0);
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::add(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ int lane_size = LaneSizeInBitsFromFormat(vform);
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ // Test for unsigned saturation.
+ uint64_t ua = src1.UintLeftJustified(vform, i);
+ uint64_t ub = src2.UintLeftJustified(vform, i);
+ uint64_t ur = ua + ub;
+ if (ur < ua) {
+ dst.SetUnsignedSat(i, true);
+ }
+
+ // Test for signed saturation.
+ bool pos_a = (ua >> 63) == 0;
+ bool pos_b = (ub >> 63) == 0;
+ bool pos_r = (ur >> 63) == 0;
+ // If the signs of the operands are the same, but different from the result,
+ // there was an overflow.
+ if ((pos_a == pos_b) && (pos_a != pos_r)) {
+ dst.SetSignedSat(i, pos_a);
+ }
+
+ dst.SetInt(vform, i, ur >> (64 - lane_size));
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::addp(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uzp1(vform, temp1, src1, src2);
+ uzp2(vform, temp2, src1, src2);
+ add(vform, dst, temp1, temp2);
+ return dst;
+}
+
+LogicVRegister Simulator::mla(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ mul(vform, temp, src1, src2);
+ add(vform, dst, dst, temp);
+ return dst;
+}
+
+LogicVRegister Simulator::mls(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ mul(vform, temp, src1, src2);
+ sub(vform, dst, dst, temp);
+ return dst;
+}
+
+LogicVRegister Simulator::mul(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, src1.Uint(vform, i) * src2.Uint(vform, i));
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::mul(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, int index) {
+ SimVRegister temp;
+ VectorFormat indexform = VectorFormatFillQ(vform);
+ return mul(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+LogicVRegister Simulator::mla(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, int index) {
+ SimVRegister temp;
+ VectorFormat indexform = VectorFormatFillQ(vform);
+ return mla(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+LogicVRegister Simulator::mls(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, int index) {
+ SimVRegister temp;
+ VectorFormat indexform = VectorFormatFillQ(vform);
+ return mls(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+LogicVRegister Simulator::smull(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return smull(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+LogicVRegister Simulator::smull2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return smull2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+LogicVRegister Simulator::umull(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return umull(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+LogicVRegister Simulator::umull2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return umull2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+LogicVRegister Simulator::smlal(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return smlal(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+LogicVRegister Simulator::smlal2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return smlal2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+LogicVRegister Simulator::umlal(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return umlal(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+LogicVRegister Simulator::umlal2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return umlal2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+LogicVRegister Simulator::smlsl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return smlsl(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+LogicVRegister Simulator::smlsl2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return smlsl2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+LogicVRegister Simulator::umlsl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return umlsl(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+LogicVRegister Simulator::umlsl2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return umlsl2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+LogicVRegister Simulator::sqdmull(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return sqdmull(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+LogicVRegister Simulator::sqdmull2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return sqdmull2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+LogicVRegister Simulator::sqdmlal(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return sqdmlal(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+LogicVRegister Simulator::sqdmlal2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return sqdmlal2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+LogicVRegister Simulator::sqdmlsl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return sqdmlsl(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+LogicVRegister Simulator::sqdmlsl2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return sqdmlsl2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+LogicVRegister Simulator::sqdmulh(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, int index) {
+ SimVRegister temp;
+ VectorFormat indexform = VectorFormatFillQ(vform);
+ return sqdmulh(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+LogicVRegister Simulator::sqrdmulh(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, int index) {
+ SimVRegister temp;
+ VectorFormat indexform = VectorFormatFillQ(vform);
+ return sqrdmulh(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+uint16_t Simulator::PolynomialMult(uint8_t op1, uint8_t op2) {
+ uint16_t result = 0;
+ uint16_t extended_op2 = op2;
+ for (int i = 0; i < 8; ++i) {
+ if ((op1 >> i) & 1) {
+ result = result ^ (extended_op2 << i);
+ }
+ }
+ return result;
+}
+
+LogicVRegister Simulator::pmul(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i,
+ PolynomialMult(src1.Uint(vform, i), src2.Uint(vform, i)));
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::pmull(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ VectorFormat vform_src = VectorFormatHalfWidth(vform);
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(
+ vform, i,
+ PolynomialMult(src1.Uint(vform_src, i), src2.Uint(vform_src, i)));
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::pmull2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ VectorFormat vform_src = VectorFormatHalfWidthDoubleLanes(vform);
+ dst.ClearForWrite(vform);
+ int lane_count = LaneCountFromFormat(vform);
+ for (int i = 0; i < lane_count; i++) {
+ dst.SetUint(vform, i,
+ PolynomialMult(src1.Uint(vform_src, lane_count + i),
+ src2.Uint(vform_src, lane_count + i)));
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::sub(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ int lane_size = LaneSizeInBitsFromFormat(vform);
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ // Test for unsigned saturation.
+ uint64_t ua = src1.UintLeftJustified(vform, i);
+ uint64_t ub = src2.UintLeftJustified(vform, i);
+ uint64_t ur = ua - ub;
+ if (ub > ua) {
+ dst.SetUnsignedSat(i, false);
+ }
+
+ // Test for signed saturation.
+ bool pos_a = (ua >> 63) == 0;
+ bool pos_b = (ub >> 63) == 0;
+ bool pos_r = (ur >> 63) == 0;
+ // If the signs of the operands are different, and the sign of the first
+ // operand doesn't match the result, there was an overflow.
+ if ((pos_a != pos_b) && (pos_a != pos_r)) {
+ dst.SetSignedSat(i, pos_a);
+ }
+
+ dst.SetInt(vform, i, ur >> (64 - lane_size));
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::and_(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, src1.Uint(vform, i) & src2.Uint(vform, i));
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::orr(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, src1.Uint(vform, i) | src2.Uint(vform, i));
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::orn(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, src1.Uint(vform, i) | ~src2.Uint(vform, i));
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::eor(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, src1.Uint(vform, i) ^ src2.Uint(vform, i));
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::bic(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, src1.Uint(vform, i) & ~src2.Uint(vform, i));
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::bic(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, uint64_t imm) {
+ uint64_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ result[i] = src.Uint(vform, i) & ~imm;
+ }
+ dst.SetUintArray(vform, result);
+ return dst;
+}
+
+LogicVRegister Simulator::bif(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t operand1 = dst.Uint(vform, i);
+ uint64_t operand2 = ~src2.Uint(vform, i);
+ uint64_t operand3 = src1.Uint(vform, i);
+ uint64_t result = operand1 ^ ((operand1 ^ operand3) & operand2);
+ dst.SetUint(vform, i, result);
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::bit(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t operand1 = dst.Uint(vform, i);
+ uint64_t operand2 = src2.Uint(vform, i);
+ uint64_t operand3 = src1.Uint(vform, i);
+ uint64_t result = operand1 ^ ((operand1 ^ operand3) & operand2);
+ dst.SetUint(vform, i, result);
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::bsl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t operand1 = src2.Uint(vform, i);
+ uint64_t operand2 = dst.Uint(vform, i);
+ uint64_t operand3 = src1.Uint(vform, i);
+ uint64_t result = operand1 ^ ((operand1 ^ operand3) & operand2);
+ dst.SetUint(vform, i, result);
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::SMinMax(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, bool max) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ int64_t src1_val = src1.Int(vform, i);
+ int64_t src2_val = src2.Int(vform, i);
+ int64_t dst_val;
+ if (max) {
+ dst_val = (src1_val > src2_val) ? src1_val : src2_val;
+ } else {
+ dst_val = (src1_val < src2_val) ? src1_val : src2_val;
+ }
+ dst.SetInt(vform, i, dst_val);
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::smax(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ return SMinMax(vform, dst, src1, src2, true);
+}
+
+LogicVRegister Simulator::smin(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ return SMinMax(vform, dst, src1, src2, false);
+}
+
+LogicVRegister Simulator::SMinMaxP(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, bool max) {
+ int lanes = LaneCountFromFormat(vform);
+ int64_t result[kMaxLanesPerVector];
+ const LogicVRegister* src = &src1;
+ for (int j = 0; j < 2; j++) {
+ for (int i = 0; i < lanes; i += 2) {
+ int64_t first_val = src->Int(vform, i);
+ int64_t second_val = src->Int(vform, i + 1);
+ int64_t dst_val;
+ if (max) {
+ dst_val = (first_val > second_val) ? first_val : second_val;
+ } else {
+ dst_val = (first_val < second_val) ? first_val : second_val;
+ }
+ DCHECK_LT((i >> 1) + (j * lanes / 2), kMaxLanesPerVector);
+ result[(i >> 1) + (j * lanes / 2)] = dst_val;
+ }
+ src = &src2;
+ }
+ dst.SetIntArray(vform, result);
+ return dst;
+}
+
+LogicVRegister Simulator::smaxp(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ return SMinMaxP(vform, dst, src1, src2, true);
+}
+
+LogicVRegister Simulator::sminp(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ return SMinMaxP(vform, dst, src1, src2, false);
+}
+
+LogicVRegister Simulator::addp(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ DCHECK_EQ(vform, kFormatD);
+
+ uint64_t dst_val = src.Uint(kFormat2D, 0) + src.Uint(kFormat2D, 1);
+ dst.ClearForWrite(vform);
+ dst.SetUint(vform, 0, dst_val);
+ return dst;
+}
+
+LogicVRegister Simulator::addv(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ VectorFormat vform_dst =
+ ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform));
+
+ int64_t dst_val = 0;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst_val += src.Int(vform, i);
+ }
+
+ dst.ClearForWrite(vform_dst);
+ dst.SetInt(vform_dst, 0, dst_val);
+ return dst;
+}
+
+LogicVRegister Simulator::saddlv(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ VectorFormat vform_dst =
+ ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform) * 2);
+
+ int64_t dst_val = 0;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst_val += src.Int(vform, i);
+ }
+
+ dst.ClearForWrite(vform_dst);
+ dst.SetInt(vform_dst, 0, dst_val);
+ return dst;
+}
+
+LogicVRegister Simulator::uaddlv(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ VectorFormat vform_dst =
+ ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform) * 2);
+
+ uint64_t dst_val = 0;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst_val += src.Uint(vform, i);
+ }
+
+ dst.ClearForWrite(vform_dst);
+ dst.SetUint(vform_dst, 0, dst_val);
+ return dst;
+}
+
+LogicVRegister Simulator::SMinMaxV(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, bool max) {
+ int64_t dst_val = max ? INT64_MIN : INT64_MAX;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ int64_t src_val = src.Int(vform, i);
+ if (max) {
+ dst_val = (src_val > dst_val) ? src_val : dst_val;
+ } else {
+ dst_val = (src_val < dst_val) ? src_val : dst_val;
+ }
+ }
+ dst.ClearForWrite(ScalarFormatFromFormat(vform));
+ dst.SetInt(vform, 0, dst_val);
+ return dst;
+}
+
+LogicVRegister Simulator::smaxv(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ SMinMaxV(vform, dst, src, true);
+ return dst;
+}
+
+LogicVRegister Simulator::sminv(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ SMinMaxV(vform, dst, src, false);
+ return dst;
+}
+
+LogicVRegister Simulator::UMinMax(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, bool max) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t src1_val = src1.Uint(vform, i);
+ uint64_t src2_val = src2.Uint(vform, i);
+ uint64_t dst_val;
+ if (max) {
+ dst_val = (src1_val > src2_val) ? src1_val : src2_val;
+ } else {
+ dst_val = (src1_val < src2_val) ? src1_val : src2_val;
+ }
+ dst.SetUint(vform, i, dst_val);
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::umax(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ return UMinMax(vform, dst, src1, src2, true);
+}
+
+LogicVRegister Simulator::umin(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ return UMinMax(vform, dst, src1, src2, false);
+}
+
+LogicVRegister Simulator::UMinMaxP(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, bool max) {
+ int lanes = LaneCountFromFormat(vform);
+ uint64_t result[kMaxLanesPerVector];
+ const LogicVRegister* src = &src1;
+ for (int j = 0; j < 2; j++) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i += 2) {
+ uint64_t first_val = src->Uint(vform, i);
+ uint64_t second_val = src->Uint(vform, i + 1);
+ uint64_t dst_val;
+ if (max) {
+ dst_val = (first_val > second_val) ? first_val : second_val;
+ } else {
+ dst_val = (first_val < second_val) ? first_val : second_val;
+ }
+ DCHECK_LT((i >> 1) + (j * lanes / 2), kMaxLanesPerVector);
+ result[(i >> 1) + (j * lanes / 2)] = dst_val;
+ }
+ src = &src2;
+ }
+ dst.SetUintArray(vform, result);
+ return dst;
+}
+
+LogicVRegister Simulator::umaxp(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ return UMinMaxP(vform, dst, src1, src2, true);
+}
+
+LogicVRegister Simulator::uminp(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ return UMinMaxP(vform, dst, src1, src2, false);
+}
+
+LogicVRegister Simulator::UMinMaxV(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, bool max) {
+ uint64_t dst_val = max ? 0 : UINT64_MAX;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t src_val = src.Uint(vform, i);
+ if (max) {
+ dst_val = (src_val > dst_val) ? src_val : dst_val;
+ } else {
+ dst_val = (src_val < dst_val) ? src_val : dst_val;
+ }
+ }
+ dst.ClearForWrite(ScalarFormatFromFormat(vform));
+ dst.SetUint(vform, 0, dst_val);
+ return dst;
+}
+
+LogicVRegister Simulator::umaxv(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ UMinMaxV(vform, dst, src, true);
+ return dst;
+}
+
+LogicVRegister Simulator::uminv(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ UMinMaxV(vform, dst, src, false);
+ return dst;
+}
+
+LogicVRegister Simulator::shl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift) {
+ DCHECK_GE(shift, 0);
+ SimVRegister temp;
+ LogicVRegister shiftreg = dup_immediate(vform, temp, shift);
+ return ushl(vform, dst, src, shiftreg);
+}
+
+LogicVRegister Simulator::sshll(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift) {
+ DCHECK_GE(shift, 0);
+ SimVRegister temp1, temp2;
+ LogicVRegister shiftreg = dup_immediate(vform, temp1, shift);
+ LogicVRegister extendedreg = sxtl(vform, temp2, src);
+ return sshl(vform, dst, extendedreg, shiftreg);
+}
+
+LogicVRegister Simulator::sshll2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift) {
+ DCHECK_GE(shift, 0);
+ SimVRegister temp1, temp2;
+ LogicVRegister shiftreg = dup_immediate(vform, temp1, shift);
+ LogicVRegister extendedreg = sxtl2(vform, temp2, src);
+ return sshl(vform, dst, extendedreg, shiftreg);
+}
+
+LogicVRegister Simulator::shll(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ int shift = LaneSizeInBitsFromFormat(vform) / 2;
+ return sshll(vform, dst, src, shift);
+}
+
+LogicVRegister Simulator::shll2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ int shift = LaneSizeInBitsFromFormat(vform) / 2;
+ return sshll2(vform, dst, src, shift);
+}
+
+LogicVRegister Simulator::ushll(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift) {
+ DCHECK_GE(shift, 0);
+ SimVRegister temp1, temp2;
+ LogicVRegister shiftreg = dup_immediate(vform, temp1, shift);
+ LogicVRegister extendedreg = uxtl(vform, temp2, src);
+ return ushl(vform, dst, extendedreg, shiftreg);
+}
+
+LogicVRegister Simulator::ushll2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift) {
+ DCHECK_GE(shift, 0);
+ SimVRegister temp1, temp2;
+ LogicVRegister shiftreg = dup_immediate(vform, temp1, shift);
+ LogicVRegister extendedreg = uxtl2(vform, temp2, src);
+ return ushl(vform, dst, extendedreg, shiftreg);
+}
+
+LogicVRegister Simulator::sli(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift) {
+ dst.ClearForWrite(vform);
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount; i++) {
+ uint64_t src_lane = src.Uint(vform, i);
+ uint64_t dst_lane = dst.Uint(vform, i);
+ uint64_t shifted = src_lane << shift;
+ uint64_t mask = MaxUintFromFormat(vform) << shift;
+ dst.SetUint(vform, i, (dst_lane & ~mask) | shifted);
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::sqshl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift) {
+ DCHECK_GE(shift, 0);
+ SimVRegister temp;
+ LogicVRegister shiftreg = dup_immediate(vform, temp, shift);
+ return sshl(vform, dst, src, shiftreg).SignedSaturate(vform);
+}
+
+LogicVRegister Simulator::uqshl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift) {
+ DCHECK_GE(shift, 0);
+ SimVRegister temp;
+ LogicVRegister shiftreg = dup_immediate(vform, temp, shift);
+ return ushl(vform, dst, src, shiftreg).UnsignedSaturate(vform);
+}
+
+LogicVRegister Simulator::sqshlu(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift) {
+ DCHECK_GE(shift, 0);
+ SimVRegister temp;
+ LogicVRegister shiftreg = dup_immediate(vform, temp, shift);
+ return sshl(vform, dst, src, shiftreg).UnsignedSaturate(vform);
+}
+
+LogicVRegister Simulator::sri(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift) {
+ dst.ClearForWrite(vform);
+ int laneCount = LaneCountFromFormat(vform);
+ DCHECK((shift > 0) &&
+ (shift <= static_cast<int>(LaneSizeInBitsFromFormat(vform))));
+ for (int i = 0; i < laneCount; i++) {
+ uint64_t src_lane = src.Uint(vform, i);
+ uint64_t dst_lane = dst.Uint(vform, i);
+ uint64_t shifted;
+ uint64_t mask;
+ if (shift == 64) {
+ shifted = 0;
+ mask = 0;
+ } else {
+ shifted = src_lane >> shift;
+ mask = MaxUintFromFormat(vform) >> shift;
+ }
+ dst.SetUint(vform, i, (dst_lane & ~mask) | shifted);
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::ushr(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift) {
+ DCHECK_GE(shift, 0);
+ SimVRegister temp;
+ LogicVRegister shiftreg = dup_immediate(vform, temp, -shift);
+ return ushl(vform, dst, src, shiftreg);
+}
+
+LogicVRegister Simulator::sshr(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift) {
+ DCHECK_GE(shift, 0);
+ SimVRegister temp;
+ LogicVRegister shiftreg = dup_immediate(vform, temp, -shift);
+ return sshl(vform, dst, src, shiftreg);
+}
+
+LogicVRegister Simulator::ssra(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift) {
+ SimVRegister temp;
+ LogicVRegister shifted_reg = sshr(vform, temp, src, shift);
+ return add(vform, dst, dst, shifted_reg);
+}
+
+LogicVRegister Simulator::usra(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift) {
+ SimVRegister temp;
+ LogicVRegister shifted_reg = ushr(vform, temp, src, shift);
+ return add(vform, dst, dst, shifted_reg);
+}
+
+LogicVRegister Simulator::srsra(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift) {
+ SimVRegister temp;
+ LogicVRegister shifted_reg = sshr(vform, temp, src, shift).Round(vform);
+ return add(vform, dst, dst, shifted_reg);
+}
+
+LogicVRegister Simulator::ursra(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift) {
+ SimVRegister temp;
+ LogicVRegister shifted_reg = ushr(vform, temp, src, shift).Round(vform);
+ return add(vform, dst, dst, shifted_reg);
+}
+
+LogicVRegister Simulator::cls(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ uint64_t result[16];
+ int laneSizeInBits = LaneSizeInBitsFromFormat(vform);
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount; i++) {
+ result[i] = CountLeadingSignBits(src.Int(vform, i), laneSizeInBits);
+ }
+
+ dst.SetUintArray(vform, result);
+ return dst;
+}
+
+LogicVRegister Simulator::clz(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ uint64_t result[16];
+ int laneSizeInBits = LaneSizeInBitsFromFormat(vform);
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount; i++) {
+ result[i] = CountLeadingZeros(src.Uint(vform, i), laneSizeInBits);
+ }
+
+ dst.SetUintArray(vform, result);
+ return dst;
+}
+
+LogicVRegister Simulator::cnt(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ uint64_t result[16];
+ int laneSizeInBits = LaneSizeInBitsFromFormat(vform);
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount; i++) {
+ uint64_t value = src.Uint(vform, i);
+ result[i] = 0;
+ for (int j = 0; j < laneSizeInBits; j++) {
+ result[i] += (value & 1);
+ value >>= 1;
+ }
+ }
+
+ dst.SetUintArray(vform, result);
+ return dst;
+}
+
+LogicVRegister Simulator::sshl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ int8_t shift_val = src2.Int(vform, i);
+ int64_t lj_src_val = src1.IntLeftJustified(vform, i);
+
+ // Set signed saturation state.
+ if ((shift_val > CountLeadingSignBits(lj_src_val, 64)) &&
+ (lj_src_val != 0)) {
+ dst.SetSignedSat(i, lj_src_val >= 0);
+ }
+
+ // Set unsigned saturation state.
+ if (lj_src_val < 0) {
+ dst.SetUnsignedSat(i, false);
+ } else if ((shift_val > CountLeadingZeros(lj_src_val, 64)) &&
+ (lj_src_val != 0)) {
+ dst.SetUnsignedSat(i, true);
+ }
+
+ int64_t src_val = src1.Int(vform, i);
+ bool src_is_negative = src_val < 0;
+ if (shift_val > 63) {
+ dst.SetInt(vform, i, 0);
+ } else if (shift_val < -63) {
+ dst.SetRounding(i, src_is_negative);
+ dst.SetInt(vform, i, src_is_negative ? -1 : 0);
+ } else {
+ // Use unsigned types for shifts, as behaviour is undefined for signed
+ // lhs.
+ uint64_t usrc_val = static_cast<uint64_t>(src_val);
+
+ if (shift_val < 0) {
+ // Convert to right shift.
+ shift_val = -shift_val;
+
+ // Set rounding state by testing most-significant bit shifted out.
+ // Rounding only needed on right shifts.
+ if (((usrc_val >> (shift_val - 1)) & 1) == 1) {
+ dst.SetRounding(i, true);
+ }
+
+ usrc_val >>= shift_val;
+
+ if (src_is_negative) {
+ // Simulate sign-extension.
+ usrc_val |= (~UINT64_C(0) << (64 - shift_val));
+ }
+ } else {
+ usrc_val <<= shift_val;
+ }
+ dst.SetUint(vform, i, usrc_val);
+ }
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::ushl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ int8_t shift_val = src2.Int(vform, i);
+ uint64_t lj_src_val = src1.UintLeftJustified(vform, i);
+
+ // Set saturation state.
+ if ((shift_val > CountLeadingZeros(lj_src_val, 64)) && (lj_src_val != 0)) {
+ dst.SetUnsignedSat(i, true);
+ }
+
+ uint64_t src_val = src1.Uint(vform, i);
+ if ((shift_val > 63) || (shift_val < -64)) {
+ dst.SetUint(vform, i, 0);
+ } else {
+ if (shift_val < 0) {
+ // Set rounding state. Rounding only needed on right shifts.
+ if (((src_val >> (-shift_val - 1)) & 1) == 1) {
+ dst.SetRounding(i, true);
+ }
+
+ if (shift_val == -64) {
+ src_val = 0;
+ } else {
+ src_val >>= -shift_val;
+ }
+ } else {
+ src_val <<= shift_val;
+ }
+ dst.SetUint(vform, i, src_val);
+ }
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::neg(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ // Test for signed saturation.
+ int64_t sa = src.Int(vform, i);
+ if (sa == MinIntFromFormat(vform)) {
+ dst.SetSignedSat(i, true);
+ }
+ dst.SetInt(vform, i, (sa == INT64_MIN) ? sa : -sa);
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::suqadd(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ int64_t sa = dst.IntLeftJustified(vform, i);
+ uint64_t ub = src.UintLeftJustified(vform, i);
+ uint64_t ur = sa + ub;
+
+ int64_t sr = bit_cast<int64_t>(ur);
+ if (sr < sa) { // Test for signed positive saturation.
+ dst.SetInt(vform, i, MaxIntFromFormat(vform));
+ } else {
+ dst.SetUint(vform, i, dst.Int(vform, i) + src.Uint(vform, i));
+ }
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::usqadd(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t ua = dst.UintLeftJustified(vform, i);
+ int64_t sb = src.IntLeftJustified(vform, i);
+ uint64_t ur = ua + sb;
+
+ if ((sb > 0) && (ur <= ua)) {
+ dst.SetUint(vform, i, MaxUintFromFormat(vform)); // Positive saturation.
+ } else if ((sb < 0) && (ur >= ua)) {
+ dst.SetUint(vform, i, 0); // Negative saturation.
+ } else {
+ dst.SetUint(vform, i, dst.Uint(vform, i) + src.Int(vform, i));
+ }
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::abs(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ // Test for signed saturation.
+ int64_t sa = src.Int(vform, i);
+ if (sa == MinIntFromFormat(vform)) {
+ dst.SetSignedSat(i, true);
+ }
+ if (sa < 0) {
+ dst.SetInt(vform, i, (sa == INT64_MIN) ? sa : -sa);
+ } else {
+ dst.SetInt(vform, i, sa);
+ }
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::ExtractNarrow(VectorFormat dstform,
+ LogicVRegister dst, bool dstIsSigned,
+ const LogicVRegister& src,
+ bool srcIsSigned) {
+ bool upperhalf = false;
+ VectorFormat srcform = kFormatUndefined;
+ int64_t ssrc[8];
+ uint64_t usrc[8];
+
+ switch (dstform) {
+ case kFormat8B:
+ upperhalf = false;
+ srcform = kFormat8H;
+ break;
+ case kFormat16B:
+ upperhalf = true;
+ srcform = kFormat8H;
+ break;
+ case kFormat4H:
+ upperhalf = false;
+ srcform = kFormat4S;
+ break;
+ case kFormat8H:
+ upperhalf = true;
+ srcform = kFormat4S;
+ break;
+ case kFormat2S:
+ upperhalf = false;
+ srcform = kFormat2D;
+ break;
+ case kFormat4S:
+ upperhalf = true;
+ srcform = kFormat2D;
+ break;
+ case kFormatB:
+ upperhalf = false;
+ srcform = kFormatH;
+ break;
+ case kFormatH:
+ upperhalf = false;
+ srcform = kFormatS;
+ break;
+ case kFormatS:
+ upperhalf = false;
+ srcform = kFormatD;
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+
+ for (int i = 0; i < LaneCountFromFormat(srcform); i++) {
+ ssrc[i] = src.Int(srcform, i);
+ usrc[i] = src.Uint(srcform, i);
+ }
+
+ int offset;
+ if (upperhalf) {
+ offset = LaneCountFromFormat(dstform) / 2;
+ } else {
+ offset = 0;
+ dst.ClearForWrite(dstform);
+ }
+
+ for (int i = 0; i < LaneCountFromFormat(srcform); i++) {
+ // Test for signed saturation
+ if (ssrc[i] > MaxIntFromFormat(dstform)) {
+ dst.SetSignedSat(offset + i, true);
+ } else if (ssrc[i] < MinIntFromFormat(dstform)) {
+ dst.SetSignedSat(offset + i, false);
+ }
+
+ // Test for unsigned saturation
+ if (srcIsSigned) {
+ if (ssrc[i] > static_cast<int64_t>(MaxUintFromFormat(dstform))) {
+ dst.SetUnsignedSat(offset + i, true);
+ } else if (ssrc[i] < 0) {
+ dst.SetUnsignedSat(offset + i, false);
+ }
+ } else {
+ if (usrc[i] > MaxUintFromFormat(dstform)) {
+ dst.SetUnsignedSat(offset + i, true);
+ }
+ }
+
+ int64_t result;
+ if (srcIsSigned) {
+ result = ssrc[i] & MaxUintFromFormat(dstform);
+ } else {
+ result = usrc[i] & MaxUintFromFormat(dstform);
+ }
+
+ if (dstIsSigned) {
+ dst.SetInt(dstform, offset + i, result);
+ } else {
+ dst.SetUint(dstform, offset + i, result);
+ }
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::xtn(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ return ExtractNarrow(vform, dst, true, src, true);
+}
+
+LogicVRegister Simulator::sqxtn(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ return ExtractNarrow(vform, dst, true, src, true).SignedSaturate(vform);
+}
+
+LogicVRegister Simulator::sqxtun(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ return ExtractNarrow(vform, dst, false, src, true).UnsignedSaturate(vform);
+}
+
+LogicVRegister Simulator::uqxtn(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ return ExtractNarrow(vform, dst, false, src, false).UnsignedSaturate(vform);
+}
+
+LogicVRegister Simulator::AbsDiff(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, bool issigned) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ if (issigned) {
+ int64_t sr = src1.Int(vform, i) - src2.Int(vform, i);
+ sr = sr > 0 ? sr : -sr;
+ dst.SetInt(vform, i, sr);
+ } else {
+ int64_t sr = src1.Uint(vform, i) - src2.Uint(vform, i);
+ sr = sr > 0 ? sr : -sr;
+ dst.SetUint(vform, i, sr);
+ }
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::saba(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ dst.ClearForWrite(vform);
+ AbsDiff(vform, temp, src1, src2, true);
+ add(vform, dst, dst, temp);
+ return dst;
+}
+
+LogicVRegister Simulator::uaba(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ dst.ClearForWrite(vform);
+ AbsDiff(vform, temp, src1, src2, false);
+ add(vform, dst, dst, temp);
+ return dst;
+}
+
+LogicVRegister Simulator::not_(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, ~src.Uint(vform, i));
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::rbit(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ uint64_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ int laneSizeInBits = LaneSizeInBitsFromFormat(vform);
+ uint64_t reversed_value;
+ uint64_t value;
+ for (int i = 0; i < laneCount; i++) {
+ value = src.Uint(vform, i);
+ reversed_value = 0;
+ for (int j = 0; j < laneSizeInBits; j++) {
+ reversed_value = (reversed_value << 1) | (value & 1);
+ value >>= 1;
+ }
+ result[i] = reversed_value;
+ }
+
+ dst.SetUintArray(vform, result);
+ return dst;
+}
+
+LogicVRegister Simulator::rev(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int revSize) {
+ uint64_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ int laneSize = LaneSizeInBytesFromFormat(vform);
+ int lanesPerLoop = revSize / laneSize;
+ for (int i = 0; i < laneCount; i += lanesPerLoop) {
+ for (int j = 0; j < lanesPerLoop; j++) {
+ result[i + lanesPerLoop - 1 - j] = src.Uint(vform, i + j);
+ }
+ }
+ dst.SetUintArray(vform, result);
+ return dst;
+}
+
+LogicVRegister Simulator::rev16(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ return rev(vform, dst, src, 2);
+}
+
+LogicVRegister Simulator::rev32(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ return rev(vform, dst, src, 4);
+}
+
+LogicVRegister Simulator::rev64(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ return rev(vform, dst, src, 8);
+}
+
+LogicVRegister Simulator::addlp(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, bool is_signed,
+ bool do_accumulate) {
+ VectorFormat vformsrc = VectorFormatHalfWidthDoubleLanes(vform);
+ DCHECK_LE(LaneSizeInBitsFromFormat(vformsrc), 32U);
+ DCHECK_LE(LaneCountFromFormat(vform), 8);
+
+ uint64_t result[8];
+ int lane_count = LaneCountFromFormat(vform);
+ for (int i = 0; i < lane_count; i++) {
+ if (is_signed) {
+ result[i] = static_cast<uint64_t>(src.Int(vformsrc, 2 * i) +
+ src.Int(vformsrc, 2 * i + 1));
+ } else {
+ result[i] = src.Uint(vformsrc, 2 * i) + src.Uint(vformsrc, 2 * i + 1);
+ }
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < lane_count; ++i) {
+ if (do_accumulate) {
+ result[i] += dst.Uint(vform, i);
+ }
+ dst.SetUint(vform, i, result[i]);
+ }
+
+ return dst;
+}
+
+LogicVRegister Simulator::saddlp(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ return addlp(vform, dst, src, true, false);
+}
+
+LogicVRegister Simulator::uaddlp(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ return addlp(vform, dst, src, false, false);
+}
+
+LogicVRegister Simulator::sadalp(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ return addlp(vform, dst, src, true, true);
+}
+
+LogicVRegister Simulator::uadalp(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ return addlp(vform, dst, src, false, true);
+}
+
+LogicVRegister Simulator::ext(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, int index) {
+ uint8_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount - index; ++i) {
+ result[i] = src1.Uint(vform, i + index);
+ }
+ for (int i = 0; i < index; ++i) {
+ result[laneCount - index + i] = src2.Uint(vform, i);
+ }
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::dup_element(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src,
+ int src_index) {
+ int laneCount = LaneCountFromFormat(vform);
+ uint64_t value = src.Uint(vform, src_index);
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, value);
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::dup_immediate(VectorFormat vform, LogicVRegister dst,
+ uint64_t imm) {
+ int laneCount = LaneCountFromFormat(vform);
+ uint64_t value = imm & MaxUintFromFormat(vform);
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, value);
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::ins_element(VectorFormat vform, LogicVRegister dst,
+ int dst_index, const LogicVRegister& src,
+ int src_index) {
+ dst.SetUint(vform, dst_index, src.Uint(vform, src_index));
+ return dst;
+}
+
+LogicVRegister Simulator::ins_immediate(VectorFormat vform, LogicVRegister dst,
+ int dst_index, uint64_t imm) {
+ uint64_t value = imm & MaxUintFromFormat(vform);
+ dst.SetUint(vform, dst_index, value);
+ return dst;
+}
+
+LogicVRegister Simulator::movi(VectorFormat vform, LogicVRegister dst,
+ uint64_t imm) {
+ int laneCount = LaneCountFromFormat(vform);
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, imm);
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::mvni(VectorFormat vform, LogicVRegister dst,
+ uint64_t imm) {
+ int laneCount = LaneCountFromFormat(vform);
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, ~imm);
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::orr(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, uint64_t imm) {
+ uint64_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ result[i] = src.Uint(vform, i) | imm;
+ }
+ dst.SetUintArray(vform, result);
+ return dst;
+}
+
+LogicVRegister Simulator::uxtl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ VectorFormat vform_half = VectorFormatHalfWidth(vform);
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, src.Uint(vform_half, i));
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::sxtl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ VectorFormat vform_half = VectorFormatHalfWidth(vform);
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetInt(vform, i, src.Int(vform_half, i));
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::uxtl2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ VectorFormat vform_half = VectorFormatHalfWidth(vform);
+ int lane_count = LaneCountFromFormat(vform);
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < lane_count; i++) {
+ dst.SetUint(vform, i, src.Uint(vform_half, lane_count + i));
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::sxtl2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ VectorFormat vform_half = VectorFormatHalfWidth(vform);
+ int lane_count = LaneCountFromFormat(vform);
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < lane_count; i++) {
+ dst.SetInt(vform, i, src.Int(vform_half, lane_count + i));
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::shrn(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift) {
+ SimVRegister temp;
+ VectorFormat vform_src = VectorFormatDoubleWidth(vform);
+ VectorFormat vform_dst = vform;
+ LogicVRegister shifted_src = ushr(vform_src, temp, src, shift);
+ return ExtractNarrow(vform_dst, dst, false, shifted_src, false);
+}
+
+LogicVRegister Simulator::shrn2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform));
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = ushr(vformsrc, temp, src, shift);
+ return ExtractNarrow(vformdst, dst, false, shifted_src, false);
+}
+
+LogicVRegister Simulator::rshrn(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(vform);
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = ushr(vformsrc, temp, src, shift).Round(vformsrc);
+ return ExtractNarrow(vformdst, dst, false, shifted_src, false);
+}
+
+LogicVRegister Simulator::rshrn2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform));
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = ushr(vformsrc, temp, src, shift).Round(vformsrc);
+ return ExtractNarrow(vformdst, dst, false, shifted_src, false);
+}
+
+LogicVRegister Simulator::Table(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& ind,
+ bool zero_out_of_bounds,
+ const LogicVRegister* tab1,
+ const LogicVRegister* tab2,
+ const LogicVRegister* tab3,
+ const LogicVRegister* tab4) {
+ DCHECK_NOT_NULL(tab1);
+ const LogicVRegister* tab[4] = {tab1, tab2, tab3, tab4};
+ uint64_t result[kMaxLanesPerVector];
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ result[i] = zero_out_of_bounds ? 0 : dst.Uint(kFormat16B, i);
+ }
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t j = ind.Uint(vform, i);
+ int tab_idx = static_cast<int>(j >> 4);
+ int j_idx = static_cast<int>(j & 15);
+ if ((tab_idx < 4) && (tab[tab_idx] != NULL)) {
+ result[i] = tab[tab_idx]->Uint(kFormat16B, j_idx);
+ }
+ }
+ dst.SetUintArray(vform, result);
+ return dst;
+}
+
+LogicVRegister Simulator::tbl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& ind) {
+ return Table(vform, dst, ind, true, &tab);
+}
+
+LogicVRegister Simulator::tbl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& ind) {
+ return Table(vform, dst, ind, true, &tab, &tab2);
+}
+
+LogicVRegister Simulator::tbl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& tab3,
+ const LogicVRegister& ind) {
+ return Table(vform, dst, ind, true, &tab, &tab2, &tab3);
+}
+
+LogicVRegister Simulator::tbl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& tab3,
+ const LogicVRegister& tab4,
+ const LogicVRegister& ind) {
+ return Table(vform, dst, ind, true, &tab, &tab2, &tab3, &tab4);
+}
+
+LogicVRegister Simulator::tbx(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& ind) {
+ return Table(vform, dst, ind, false, &tab);
+}
+
+LogicVRegister Simulator::tbx(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& ind) {
+ return Table(vform, dst, ind, false, &tab, &tab2);
+}
+
+LogicVRegister Simulator::tbx(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& tab3,
+ const LogicVRegister& ind) {
+ return Table(vform, dst, ind, false, &tab, &tab2, &tab3);
+}
+
+LogicVRegister Simulator::tbx(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& tab3,
+ const LogicVRegister& tab4,
+ const LogicVRegister& ind) {
+ return Table(vform, dst, ind, false, &tab, &tab2, &tab3, &tab4);
+}
+
+LogicVRegister Simulator::uqshrn(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift) {
+ return shrn(vform, dst, src, shift).UnsignedSaturate(vform);
+}
+
+LogicVRegister Simulator::uqshrn2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift) {
+ return shrn2(vform, dst, src, shift).UnsignedSaturate(vform);
+}
+
+LogicVRegister Simulator::uqrshrn(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift) {
+ return rshrn(vform, dst, src, shift).UnsignedSaturate(vform);
+}
+
+LogicVRegister Simulator::uqrshrn2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift) {
+ return rshrn2(vform, dst, src, shift).UnsignedSaturate(vform);
+}
+
+LogicVRegister Simulator::sqshrn(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(vform);
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift);
+ return sqxtn(vformdst, dst, shifted_src);
+}
+
+LogicVRegister Simulator::sqshrn2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform));
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift);
+ return sqxtn(vformdst, dst, shifted_src);
+}
+
+LogicVRegister Simulator::sqrshrn(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(vform);
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift).Round(vformsrc);
+ return sqxtn(vformdst, dst, shifted_src);
+}
+
+LogicVRegister Simulator::sqrshrn2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform));
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift).Round(vformsrc);
+ return sqxtn(vformdst, dst, shifted_src);
+}
+
+LogicVRegister Simulator::sqshrun(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(vform);
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift);
+ return sqxtun(vformdst, dst, shifted_src);
+}
+
+LogicVRegister Simulator::sqshrun2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform));
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift);
+ return sqxtun(vformdst, dst, shifted_src);
+}
+
+LogicVRegister Simulator::sqrshrun(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(vform);
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift).Round(vformsrc);
+ return sqxtun(vformdst, dst, shifted_src);
+}
+
+LogicVRegister Simulator::sqrshrun2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform));
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift).Round(vformsrc);
+ return sqxtun(vformdst, dst, shifted_src);
+}
+
+LogicVRegister Simulator::uaddl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl(vform, temp1, src1);
+ uxtl(vform, temp2, src2);
+ add(vform, dst, temp1, temp2);
+ return dst;
+}
+
+LogicVRegister Simulator::uaddl2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl2(vform, temp1, src1);
+ uxtl2(vform, temp2, src2);
+ add(vform, dst, temp1, temp2);
+ return dst;
+}
+
+LogicVRegister Simulator::uaddw(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ uxtl(vform, temp, src2);
+ add(vform, dst, src1, temp);
+ return dst;
+}
+
+LogicVRegister Simulator::uaddw2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ uxtl2(vform, temp, src2);
+ add(vform, dst, src1, temp);
+ return dst;
+}
+
+LogicVRegister Simulator::saddl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl(vform, temp1, src1);
+ sxtl(vform, temp2, src2);
+ add(vform, dst, temp1, temp2);
+ return dst;
+}
+
+LogicVRegister Simulator::saddl2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl2(vform, temp1, src1);
+ sxtl2(vform, temp2, src2);
+ add(vform, dst, temp1, temp2);
+ return dst;
+}
+
+LogicVRegister Simulator::saddw(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ sxtl(vform, temp, src2);
+ add(vform, dst, src1, temp);
+ return dst;
+}
+
+LogicVRegister Simulator::saddw2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ sxtl2(vform, temp, src2);
+ add(vform, dst, src1, temp);
+ return dst;
+}
+
+LogicVRegister Simulator::usubl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl(vform, temp1, src1);
+ uxtl(vform, temp2, src2);
+ sub(vform, dst, temp1, temp2);
+ return dst;
+}
+
+LogicVRegister Simulator::usubl2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl2(vform, temp1, src1);
+ uxtl2(vform, temp2, src2);
+ sub(vform, dst, temp1, temp2);
+ return dst;
+}
+
+LogicVRegister Simulator::usubw(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ uxtl(vform, temp, src2);
+ sub(vform, dst, src1, temp);
+ return dst;
+}
+
+LogicVRegister Simulator::usubw2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ uxtl2(vform, temp, src2);
+ sub(vform, dst, src1, temp);
+ return dst;
+}
+
+LogicVRegister Simulator::ssubl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl(vform, temp1, src1);
+ sxtl(vform, temp2, src2);
+ sub(vform, dst, temp1, temp2);
+ return dst;
+}
+
+LogicVRegister Simulator::ssubl2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl2(vform, temp1, src1);
+ sxtl2(vform, temp2, src2);
+ sub(vform, dst, temp1, temp2);
+ return dst;
+}
+
+LogicVRegister Simulator::ssubw(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ sxtl(vform, temp, src2);
+ sub(vform, dst, src1, temp);
+ return dst;
+}
+
+LogicVRegister Simulator::ssubw2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ sxtl2(vform, temp, src2);
+ sub(vform, dst, src1, temp);
+ return dst;
+}
+
+LogicVRegister Simulator::uabal(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl(vform, temp1, src1);
+ uxtl(vform, temp2, src2);
+ uaba(vform, dst, temp1, temp2);
+ return dst;
+}
+
+LogicVRegister Simulator::uabal2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl2(vform, temp1, src1);
+ uxtl2(vform, temp2, src2);
+ uaba(vform, dst, temp1, temp2);
+ return dst;
+}
+
+LogicVRegister Simulator::sabal(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl(vform, temp1, src1);
+ sxtl(vform, temp2, src2);
+ saba(vform, dst, temp1, temp2);
+ return dst;
+}
+
+LogicVRegister Simulator::sabal2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl2(vform, temp1, src1);
+ sxtl2(vform, temp2, src2);
+ saba(vform, dst, temp1, temp2);
+ return dst;
+}
+
+LogicVRegister Simulator::uabdl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl(vform, temp1, src1);
+ uxtl(vform, temp2, src2);
+ AbsDiff(vform, dst, temp1, temp2, false);
+ return dst;
+}
+
+LogicVRegister Simulator::uabdl2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl2(vform, temp1, src1);
+ uxtl2(vform, temp2, src2);
+ AbsDiff(vform, dst, temp1, temp2, false);
+ return dst;
+}
+
+LogicVRegister Simulator::sabdl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl(vform, temp1, src1);
+ sxtl(vform, temp2, src2);
+ AbsDiff(vform, dst, temp1, temp2, true);
+ return dst;
+}
+
+LogicVRegister Simulator::sabdl2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl2(vform, temp1, src1);
+ sxtl2(vform, temp2, src2);
+ AbsDiff(vform, dst, temp1, temp2, true);
+ return dst;
+}
+
+LogicVRegister Simulator::umull(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl(vform, temp1, src1);
+ uxtl(vform, temp2, src2);
+ mul(vform, dst, temp1, temp2);
+ return dst;
+}
+
+LogicVRegister Simulator::umull2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl2(vform, temp1, src1);
+ uxtl2(vform, temp2, src2);
+ mul(vform, dst, temp1, temp2);
+ return dst;
+}
+
+LogicVRegister Simulator::smull(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl(vform, temp1, src1);
+ sxtl(vform, temp2, src2);
+ mul(vform, dst, temp1, temp2);
+ return dst;
+}
+
+LogicVRegister Simulator::smull2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl2(vform, temp1, src1);
+ sxtl2(vform, temp2, src2);
+ mul(vform, dst, temp1, temp2);
+ return dst;
+}
+
+LogicVRegister Simulator::umlsl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl(vform, temp1, src1);
+ uxtl(vform, temp2, src2);
+ mls(vform, dst, temp1, temp2);
+ return dst;
+}
+
+LogicVRegister Simulator::umlsl2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl2(vform, temp1, src1);
+ uxtl2(vform, temp2, src2);
+ mls(vform, dst, temp1, temp2);
+ return dst;
+}
+
+LogicVRegister Simulator::smlsl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl(vform, temp1, src1);
+ sxtl(vform, temp2, src2);
+ mls(vform, dst, temp1, temp2);
+ return dst;
+}
+
+LogicVRegister Simulator::smlsl2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl2(vform, temp1, src1);
+ sxtl2(vform, temp2, src2);
+ mls(vform, dst, temp1, temp2);
+ return dst;
+}
+
+LogicVRegister Simulator::umlal(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl(vform, temp1, src1);
+ uxtl(vform, temp2, src2);
+ mla(vform, dst, temp1, temp2);
+ return dst;
+}
+
+LogicVRegister Simulator::umlal2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl2(vform, temp1, src1);
+ uxtl2(vform, temp2, src2);
+ mla(vform, dst, temp1, temp2);
+ return dst;
+}
+
+LogicVRegister Simulator::smlal(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl(vform, temp1, src1);
+ sxtl(vform, temp2, src2);
+ mla(vform, dst, temp1, temp2);
+ return dst;
+}
+
+LogicVRegister Simulator::smlal2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl2(vform, temp1, src1);
+ sxtl2(vform, temp2, src2);
+ mla(vform, dst, temp1, temp2);
+ return dst;
+}
+
+LogicVRegister Simulator::sqdmlal(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ LogicVRegister product = sqdmull(vform, temp, src1, src2);
+ return add(vform, dst, dst, product).SignedSaturate(vform);
+}
+
+LogicVRegister Simulator::sqdmlal2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ LogicVRegister product = sqdmull2(vform, temp, src1, src2);
+ return add(vform, dst, dst, product).SignedSaturate(vform);
+}
+
+LogicVRegister Simulator::sqdmlsl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ LogicVRegister product = sqdmull(vform, temp, src1, src2);
+ return sub(vform, dst, dst, product).SignedSaturate(vform);
+}
+
+LogicVRegister Simulator::sqdmlsl2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ LogicVRegister product = sqdmull2(vform, temp, src1, src2);
+ return sub(vform, dst, dst, product).SignedSaturate(vform);
+}
+
+LogicVRegister Simulator::sqdmull(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ LogicVRegister product = smull(vform, temp, src1, src2);
+ return add(vform, dst, product, product).SignedSaturate(vform);
+}
+
+LogicVRegister Simulator::sqdmull2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ LogicVRegister product = smull2(vform, temp, src1, src2);
+ return add(vform, dst, product, product).SignedSaturate(vform);
+}
+
+LogicVRegister Simulator::sqrdmulh(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, bool round) {
+ // 2 * INT_32_MIN * INT_32_MIN causes int64_t to overflow.
+ // To avoid this, we use (src1 * src2 + 1 << (esize - 2)) >> (esize - 1)
+ // which is same as (2 * src1 * src2 + 1 << (esize - 1)) >> esize.
+
+ int esize = LaneSizeInBitsFromFormat(vform);
+ int round_const = round ? (1 << (esize - 2)) : 0;
+ int64_t product;
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ product = src1.Int(vform, i) * src2.Int(vform, i);
+ product += round_const;
+ product = product >> (esize - 1);
+
+ if (product > MaxIntFromFormat(vform)) {
+ product = MaxIntFromFormat(vform);
+ } else if (product < MinIntFromFormat(vform)) {
+ product = MinIntFromFormat(vform);
+ }
+ dst.SetInt(vform, i, product);
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::sqdmulh(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ return sqrdmulh(vform, dst, src1, src2, false);
+}
+
+LogicVRegister Simulator::addhn(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ add(VectorFormatDoubleWidth(vform), temp, src1, src2);
+ shrn(vform, dst, temp, LaneSizeInBitsFromFormat(vform));
+ return dst;
+}
+
+LogicVRegister Simulator::addhn2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ add(VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)), temp, src1, src2);
+ shrn2(vform, dst, temp, LaneSizeInBitsFromFormat(vform));
+ return dst;
+}
+
+LogicVRegister Simulator::raddhn(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ add(VectorFormatDoubleWidth(vform), temp, src1, src2);
+ rshrn(vform, dst, temp, LaneSizeInBitsFromFormat(vform));
+ return dst;
+}
+
+LogicVRegister Simulator::raddhn2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ add(VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)), temp, src1, src2);
+ rshrn2(vform, dst, temp, LaneSizeInBitsFromFormat(vform));
+ return dst;
+}
+
+LogicVRegister Simulator::subhn(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ sub(VectorFormatDoubleWidth(vform), temp, src1, src2);
+ shrn(vform, dst, temp, LaneSizeInBitsFromFormat(vform));
+ return dst;
+}
+
+LogicVRegister Simulator::subhn2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ sub(VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)), temp, src1, src2);
+ shrn2(vform, dst, temp, LaneSizeInBitsFromFormat(vform));
+ return dst;
+}
+
+LogicVRegister Simulator::rsubhn(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ sub(VectorFormatDoubleWidth(vform), temp, src1, src2);
+ rshrn(vform, dst, temp, LaneSizeInBitsFromFormat(vform));
+ return dst;
+}
+
+LogicVRegister Simulator::rsubhn2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ sub(VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)), temp, src1, src2);
+ rshrn2(vform, dst, temp, LaneSizeInBitsFromFormat(vform));
+ return dst;
+}
+
+LogicVRegister Simulator::trn1(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ uint64_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ int pairs = laneCount / 2;
+ for (int i = 0; i < pairs; ++i) {
+ result[2 * i] = src1.Uint(vform, 2 * i);
+ result[(2 * i) + 1] = src2.Uint(vform, 2 * i);
+ }
+
+ dst.SetUintArray(vform, result);
+ return dst;
+}
+
+LogicVRegister Simulator::trn2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ uint64_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ int pairs = laneCount / 2;
+ for (int i = 0; i < pairs; ++i) {
+ result[2 * i] = src1.Uint(vform, (2 * i) + 1);
+ result[(2 * i) + 1] = src2.Uint(vform, (2 * i) + 1);
+ }
+
+ dst.SetUintArray(vform, result);
+ return dst;
+}
+
+LogicVRegister Simulator::zip1(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ uint64_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ int pairs = laneCount / 2;
+ for (int i = 0; i < pairs; ++i) {
+ result[2 * i] = src1.Uint(vform, i);
+ result[(2 * i) + 1] = src2.Uint(vform, i);
+ }
+
+ dst.SetUintArray(vform, result);
+ return dst;
+}
+
+LogicVRegister Simulator::zip2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ uint64_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ int pairs = laneCount / 2;
+ for (int i = 0; i < pairs; ++i) {
+ result[2 * i] = src1.Uint(vform, pairs + i);
+ result[(2 * i) + 1] = src2.Uint(vform, pairs + i);
+ }
+
+ dst.SetUintArray(vform, result);
+ return dst;
+}
+
+LogicVRegister Simulator::uzp1(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ uint64_t result[32];
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ result[i] = src1.Uint(vform, i);
+ result[laneCount + i] = src2.Uint(vform, i);
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[2 * i]);
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::uzp2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ uint64_t result[32];
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ result[i] = src1.Uint(vform, i);
+ result[laneCount + i] = src2.Uint(vform, i);
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[(2 * i) + 1]);
+ }
+ return dst;
+}
+
+template <typename T>
+T Simulator::FPAdd(T op1, T op2) {
+ T result = FPProcessNaNs(op1, op2);
+ if (std::isnan(result)) return result;
+
+ if (std::isinf(op1) && std::isinf(op2) && (op1 != op2)) {
+ // inf + -inf returns the default NaN.
+ FPProcessException();
+ return FPDefaultNaN<T>();
+ } else {
+ // Other cases should be handled by standard arithmetic.
+ return op1 + op2;
+ }
+}
+
+template <typename T>
+T Simulator::FPSub(T op1, T op2) {
+ // NaNs should be handled elsewhere.
+ DCHECK(!std::isnan(op1) && !std::isnan(op2));
+
+ if (std::isinf(op1) && std::isinf(op2) && (op1 == op2)) {
+ // inf - inf returns the default NaN.
+ FPProcessException();
+ return FPDefaultNaN<T>();
+ } else {
+ // Other cases should be handled by standard arithmetic.
+ return op1 - op2;
+ }
+}
+
+template <typename T>
+T Simulator::FPMul(T op1, T op2) {
+ // NaNs should be handled elsewhere.
+ DCHECK(!std::isnan(op1) && !std::isnan(op2));
+
+ if ((std::isinf(op1) && (op2 == 0.0)) || (std::isinf(op2) && (op1 == 0.0))) {
+ // inf * 0.0 returns the default NaN.
+ FPProcessException();
+ return FPDefaultNaN<T>();
+ } else {
+ // Other cases should be handled by standard arithmetic.
+ return op1 * op2;
+ }
+}
+
+template <typename T>
+T Simulator::FPMulx(T op1, T op2) {
+ if ((std::isinf(op1) && (op2 == 0.0)) || (std::isinf(op2) && (op1 == 0.0))) {
+ // inf * 0.0 returns +/-2.0.
+ T two = 2.0;
+ return copysign(1.0, op1) * copysign(1.0, op2) * two;
+ }
+ return FPMul(op1, op2);
+}
+
+template <typename T>
+T Simulator::FPMulAdd(T a, T op1, T op2) {
+ T result = FPProcessNaNs3(a, op1, op2);
+
+ T sign_a = copysign(1.0, a);
+ T sign_prod = copysign(1.0, op1) * copysign(1.0, op2);
+ bool isinf_prod = std::isinf(op1) || std::isinf(op2);
+ bool operation_generates_nan =
+ (std::isinf(op1) && (op2 == 0.0)) || // inf * 0.0
+ (std::isinf(op2) && (op1 == 0.0)) || // 0.0 * inf
+ (std::isinf(a) && isinf_prod && (sign_a != sign_prod)); // inf - inf
+
+ if (std::isnan(result)) {
+ // Generated NaNs override quiet NaNs propagated from a.
+ if (operation_generates_nan && IsQuietNaN(a)) {
+ FPProcessException();
+ return FPDefaultNaN<T>();
+ } else {
+ return result;
+ }
+ }
+
+ // If the operation would produce a NaN, return the default NaN.
+ if (operation_generates_nan) {
+ FPProcessException();
+ return FPDefaultNaN<T>();
+ }
+
+ // Work around broken fma implementations for exact zero results: The sign of
+ // exact 0.0 results is positive unless both a and op1 * op2 are negative.
+ if (((op1 == 0.0) || (op2 == 0.0)) && (a == 0.0)) {
+ return ((sign_a < 0) && (sign_prod < 0)) ? -0.0 : 0.0;
+ }
+
+ result = FusedMultiplyAdd(op1, op2, a);
+ DCHECK(!std::isnan(result));
+
+ // Work around broken fma implementations for rounded zero results: If a is
+ // 0.0, the sign of the result is the sign of op1 * op2 before rounding.
+ if ((a == 0.0) && (result == 0.0)) {
+ return copysign(0.0, sign_prod);
+ }
+
+ return result;
+}
+
+template <typename T>
+T Simulator::FPDiv(T op1, T op2) {
+ // NaNs should be handled elsewhere.
+ DCHECK(!std::isnan(op1) && !std::isnan(op2));
+
+ if ((std::isinf(op1) && std::isinf(op2)) || ((op1 == 0.0) && (op2 == 0.0))) {
+ // inf / inf and 0.0 / 0.0 return the default NaN.
+ FPProcessException();
+ return FPDefaultNaN<T>();
+ } else {
+ if (op2 == 0.0) {
+ FPProcessException();
+ if (!std::isnan(op1)) {
+ double op1_sign = copysign(1.0, op1);
+ double op2_sign = copysign(1.0, op2);
+ return static_cast<T>(op1_sign * op2_sign * kFP64PositiveInfinity);
+ }
+ }
+
+ // Other cases should be handled by standard arithmetic.
+ return op1 / op2;
+ }
+}
+
+template <typename T>
+T Simulator::FPSqrt(T op) {
+ if (std::isnan(op)) {
+ return FPProcessNaN(op);
+ } else if (op < 0.0) {
+ FPProcessException();
+ return FPDefaultNaN<T>();
+ } else {
+ return sqrt(op);
+ }
+}
+
+template <typename T>
+T Simulator::FPMax(T a, T b) {
+ T result = FPProcessNaNs(a, b);
+ if (std::isnan(result)) return result;
+
+ if ((a == 0.0) && (b == 0.0) && (copysign(1.0, a) != copysign(1.0, b))) {
+ // a and b are zero, and the sign differs: return +0.0.
+ return 0.0;
+ } else {
+ return (a > b) ? a : b;
+ }
+}
+
+template <typename T>
+T Simulator::FPMaxNM(T a, T b) {
+ if (IsQuietNaN(a) && !IsQuietNaN(b)) {
+ a = kFP64NegativeInfinity;
+ } else if (!IsQuietNaN(a) && IsQuietNaN(b)) {
+ b = kFP64NegativeInfinity;
+ }
+
+ T result = FPProcessNaNs(a, b);
+ return std::isnan(result) ? result : FPMax(a, b);
+}
+
+template <typename T>
+T Simulator::FPMin(T a, T b) {
+ T result = FPProcessNaNs(a, b);
+ if (std::isnan(result)) return result;
+
+ if ((a == 0.0) && (b == 0.0) && (copysign(1.0, a) != copysign(1.0, b))) {
+ // a and b are zero, and the sign differs: return -0.0.
+ return -0.0;
+ } else {
+ return (a < b) ? a : b;
+ }
+}
+
+template <typename T>
+T Simulator::FPMinNM(T a, T b) {
+ if (IsQuietNaN(a) && !IsQuietNaN(b)) {
+ a = kFP64PositiveInfinity;
+ } else if (!IsQuietNaN(a) && IsQuietNaN(b)) {
+ b = kFP64PositiveInfinity;
+ }
+
+ T result = FPProcessNaNs(a, b);
+ return std::isnan(result) ? result : FPMin(a, b);
+}
+
+template <typename T>
+T Simulator::FPRecipStepFused(T op1, T op2) {
+ const T two = 2.0;
+ if ((std::isinf(op1) && (op2 == 0.0)) ||
+ ((op1 == 0.0) && (std::isinf(op2)))) {
+ return two;
+ } else if (std::isinf(op1) || std::isinf(op2)) {
+ // Return +inf if signs match, otherwise -inf.
+ return ((op1 >= 0.0) == (op2 >= 0.0)) ? kFP64PositiveInfinity
+ : kFP64NegativeInfinity;
+ } else {
+ return FusedMultiplyAdd(op1, op2, two);
+ }
+}
+
+template <typename T>
+T Simulator::FPRSqrtStepFused(T op1, T op2) {
+ const T one_point_five = 1.5;
+ const T two = 2.0;
+
+ if ((std::isinf(op1) && (op2 == 0.0)) ||
+ ((op1 == 0.0) && (std::isinf(op2)))) {
+ return one_point_five;
+ } else if (std::isinf(op1) || std::isinf(op2)) {
+ // Return +inf if signs match, otherwise -inf.
+ return ((op1 >= 0.0) == (op2 >= 0.0)) ? kFP64PositiveInfinity
+ : kFP64NegativeInfinity;
+ } else {
+ // The multiply-add-halve operation must be fully fused, so avoid interim
+ // rounding by checking which operand can be losslessly divided by two
+ // before doing the multiply-add.
+ if (std::isnormal(op1 / two)) {
+ return FusedMultiplyAdd(op1 / two, op2, one_point_five);
+ } else if (std::isnormal(op2 / two)) {
+ return FusedMultiplyAdd(op1, op2 / two, one_point_five);
+ } else {
+ // Neither operand is normal after halving: the result is dominated by
+ // the addition term, so just return that.
+ return one_point_five;
+ }
+ }
+}
+
+double Simulator::FPRoundInt(double value, FPRounding round_mode) {
+ if ((value == 0.0) || (value == kFP64PositiveInfinity) ||
+ (value == kFP64NegativeInfinity)) {
+ return value;
+ } else if (std::isnan(value)) {
+ return FPProcessNaN(value);
+ }
+
+ double int_result = std::floor(value);
+ double error = value - int_result;
+ switch (round_mode) {
+ case FPTieAway: {
+ // Take care of correctly handling the range ]-0.5, -0.0], which must
+ // yield -0.0.
+ if ((-0.5 < value) && (value < 0.0)) {
+ int_result = -0.0;
+
+ } else if ((error > 0.5) || ((error == 0.5) && (int_result >= 0.0))) {
+ // If the error is greater than 0.5, or is equal to 0.5 and the integer
+ // result is positive, round up.
+ int_result++;
+ }
+ break;
+ }
+ case FPTieEven: {
+ // Take care of correctly handling the range [-0.5, -0.0], which must
+ // yield -0.0.
+ if ((-0.5 <= value) && (value < 0.0)) {
+ int_result = -0.0;
+
+ // If the error is greater than 0.5, or is equal to 0.5 and the integer
+ // result is odd, round up.
+ } else if ((error > 0.5) ||
+ ((error == 0.5) && (std::fmod(int_result, 2) != 0))) {
+ int_result++;
+ }
+ break;
+ }
+ case FPZero: {
+ // If value>0 then we take floor(value)
+ // otherwise, ceil(value).
+ if (value < 0) {
+ int_result = ceil(value);
+ }
+ break;
+ }
+ case FPNegativeInfinity: {
+ // We always use floor(value).
+ break;
+ }
+ case FPPositiveInfinity: {
+ // Take care of correctly handling the range ]-1.0, -0.0], which must
+ // yield -0.0.
+ if ((-1.0 < value) && (value < 0.0)) {
+ int_result = -0.0;
+
+ // If the error is non-zero, round up.
+ } else if (error > 0.0) {
+ int_result++;
+ }
+ break;
+ }
+ default:
+ UNIMPLEMENTED();
+ }
+ return int_result;
+}
+
+int32_t Simulator::FPToInt32(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ if (value >= kWMaxInt) {
+ return kWMaxInt;
+ } else if (value < kWMinInt) {
+ return kWMinInt;
+ }
+ return std::isnan(value) ? 0 : static_cast<int32_t>(value);
+}
+
+int64_t Simulator::FPToInt64(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ if (value >= kXMaxInt) {
+ return kXMaxInt;
+ } else if (value < kXMinInt) {
+ return kXMinInt;
+ }
+ return std::isnan(value) ? 0 : static_cast<int64_t>(value);
+}
+
+uint32_t Simulator::FPToUInt32(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ if (value >= kWMaxUInt) {
+ return kWMaxUInt;
+ } else if (value < 0.0) {
+ return 0;
+ }
+ return std::isnan(value) ? 0 : static_cast<uint32_t>(value);
+}
+
+uint64_t Simulator::FPToUInt64(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ if (value >= kXMaxUInt) {
+ return kXMaxUInt;
+ } else if (value < 0.0) {
+ return 0;
+ }
+ return std::isnan(value) ? 0 : static_cast<uint64_t>(value);
+}
+
+#define DEFINE_NEON_FP_VECTOR_OP(FN, OP, PROCNAN) \
+ template <typename T> \
+ LogicVRegister Simulator::FN(VectorFormat vform, LogicVRegister dst, \
+ const LogicVRegister& src1, \
+ const LogicVRegister& src2) { \
+ dst.ClearForWrite(vform); \
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) { \
+ T op1 = src1.Float<T>(i); \
+ T op2 = src2.Float<T>(i); \
+ T result; \
+ if (PROCNAN) { \
+ result = FPProcessNaNs(op1, op2); \
+ if (!std::isnan(result)) { \
+ result = OP(op1, op2); \
+ } \
+ } else { \
+ result = OP(op1, op2); \
+ } \
+ dst.SetFloat(i, result); \
+ } \
+ return dst; \
+ } \
+ \
+ LogicVRegister Simulator::FN(VectorFormat vform, LogicVRegister dst, \
+ const LogicVRegister& src1, \
+ const LogicVRegister& src2) { \
+ if (LaneSizeInBytesFromFormat(vform) == kSRegSize) { \
+ FN<float>(vform, dst, src1, src2); \
+ } else { \
+ DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize); \
+ FN<double>(vform, dst, src1, src2); \
+ } \
+ return dst; \
+ }
+NEON_FP3SAME_LIST(DEFINE_NEON_FP_VECTOR_OP)
+#undef DEFINE_NEON_FP_VECTOR_OP
+
+LogicVRegister Simulator::fnmul(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ LogicVRegister product = fmul(vform, temp, src1, src2);
+ return fneg(vform, dst, product);
+}
+
+template <typename T>
+LogicVRegister Simulator::frecps(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ T op1 = -src1.Float<T>(i);
+ T op2 = src2.Float<T>(i);
+ T result = FPProcessNaNs(op1, op2);
+ dst.SetFloat(i, std::isnan(result) ? result : FPRecipStepFused(op1, op2));
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::frecps(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ if (LaneSizeInBytesFromFormat(vform) == kSRegSize) {
+ frecps<float>(vform, dst, src1, src2);
+ } else {
+ DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize);
+ frecps<double>(vform, dst, src1, src2);
+ }
+ return dst;
+}
+
+template <typename T>
+LogicVRegister Simulator::frsqrts(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ T op1 = -src1.Float<T>(i);
+ T op2 = src2.Float<T>(i);
+ T result = FPProcessNaNs(op1, op2);
+ dst.SetFloat(i, std::isnan(result) ? result : FPRSqrtStepFused(op1, op2));
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::frsqrts(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ if (LaneSizeInBytesFromFormat(vform) == kSRegSize) {
+ frsqrts<float>(vform, dst, src1, src2);
+ } else {
+ DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize);
+ frsqrts<double>(vform, dst, src1, src2);
+ }
+ return dst;
+}
+
+template <typename T>
+LogicVRegister Simulator::fcmp(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, Condition cond) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ bool result = false;
+ T op1 = src1.Float<T>(i);
+ T op2 = src2.Float<T>(i);
+ T nan_result = FPProcessNaNs(op1, op2);
+ if (!std::isnan(nan_result)) {
+ switch (cond) {
+ case eq:
+ result = (op1 == op2);
+ break;
+ case ge:
+ result = (op1 >= op2);
+ break;
+ case gt:
+ result = (op1 > op2);
+ break;
+ case le:
+ result = (op1 <= op2);
+ break;
+ case lt:
+ result = (op1 < op2);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ dst.SetUint(vform, i, result ? MaxUintFromFormat(vform) : 0);
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::fcmp(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, Condition cond) {
+ if (LaneSizeInBytesFromFormat(vform) == kSRegSize) {
+ fcmp<float>(vform, dst, src1, src2, cond);
+ } else {
+ DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize);
+ fcmp<double>(vform, dst, src1, src2, cond);
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::fcmp_zero(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, Condition cond) {
+ SimVRegister temp;
+ if (LaneSizeInBytesFromFormat(vform) == kSRegSize) {
+ LogicVRegister zero_reg =
+ dup_immediate(vform, temp, bit_cast<uint32_t>(0.0f));
+ fcmp<float>(vform, dst, src, zero_reg, cond);
+ } else {
+ DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize);
+ LogicVRegister zero_reg =
+ dup_immediate(vform, temp, bit_cast<uint64_t>(0.0));
+ fcmp<double>(vform, dst, src, zero_reg, cond);
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::fabscmp(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, Condition cond) {
+ SimVRegister temp1, temp2;
+ if (LaneSizeInBytesFromFormat(vform) == kSRegSize) {
+ LogicVRegister abs_src1 = fabs_<float>(vform, temp1, src1);
+ LogicVRegister abs_src2 = fabs_<float>(vform, temp2, src2);
+ fcmp<float>(vform, dst, abs_src1, abs_src2, cond);
+ } else {
+ DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize);
+ LogicVRegister abs_src1 = fabs_<double>(vform, temp1, src1);
+ LogicVRegister abs_src2 = fabs_<double>(vform, temp2, src2);
+ fcmp<double>(vform, dst, abs_src1, abs_src2, cond);
+ }
+ return dst;
+}
+
+template <typename T>
+LogicVRegister Simulator::fmla(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ T op1 = src1.Float<T>(i);
+ T op2 = src2.Float<T>(i);
+ T acc = dst.Float<T>(i);
+ T result = FPMulAdd(acc, op1, op2);
+ dst.SetFloat(i, result);
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::fmla(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ if (LaneSizeInBytesFromFormat(vform) == kSRegSize) {
+ fmla<float>(vform, dst, src1, src2);
+ } else {
+ DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize);
+ fmla<double>(vform, dst, src1, src2);
+ }
+ return dst;
+}
+
+template <typename T>
+LogicVRegister Simulator::fmls(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ T op1 = -src1.Float<T>(i);
+ T op2 = src2.Float<T>(i);
+ T acc = dst.Float<T>(i);
+ T result = FPMulAdd(acc, op1, op2);
+ dst.SetFloat(i, result);
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::fmls(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ if (LaneSizeInBytesFromFormat(vform) == kSRegSize) {
+ fmls<float>(vform, dst, src1, src2);
+ } else {
+ DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize);
+ fmls<double>(vform, dst, src1, src2);
+ }
+ return dst;
+}
+
+template <typename T>
+LogicVRegister Simulator::fneg(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ T op = src.Float<T>(i);
+ op = -op;
+ dst.SetFloat(i, op);
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::fneg(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ if (LaneSizeInBytesFromFormat(vform) == kSRegSize) {
+ fneg<float>(vform, dst, src);
+ } else {
+ DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize);
+ fneg<double>(vform, dst, src);
+ }
+ return dst;
+}
+
+template <typename T>
+LogicVRegister Simulator::fabs_(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ T op = src.Float<T>(i);
+ if (copysign(1.0, op) < 0.0) {
+ op = -op;
+ }
+ dst.SetFloat(i, op);
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::fabs_(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ if (LaneSizeInBytesFromFormat(vform) == kSRegSize) {
+ fabs_<float>(vform, dst, src);
+ } else {
+ DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize);
+ fabs_<double>(vform, dst, src);
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::fabd(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ fsub(vform, temp, src1, src2);
+ fabs_(vform, dst, temp);
+ return dst;
+}
+
+LogicVRegister Simulator::fsqrt(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ if (LaneSizeInBytesFromFormat(vform) == kSRegSize) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ float result = FPSqrt(src.Float<float>(i));
+ dst.SetFloat(i, result);
+ }
+ } else {
+ DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ double result = FPSqrt(src.Float<double>(i));
+ dst.SetFloat(i, result);
+ }
+ }
+ return dst;
+}
+
+#define DEFINE_NEON_FP_PAIR_OP(FNP, FN, OP) \
+ LogicVRegister Simulator::FNP(VectorFormat vform, LogicVRegister dst, \
+ const LogicVRegister& src1, \
+ const LogicVRegister& src2) { \
+ SimVRegister temp1, temp2; \
+ uzp1(vform, temp1, src1, src2); \
+ uzp2(vform, temp2, src1, src2); \
+ FN(vform, dst, temp1, temp2); \
+ return dst; \
+ } \
+ \
+ LogicVRegister Simulator::FNP(VectorFormat vform, LogicVRegister dst, \
+ const LogicVRegister& src) { \
+ if (vform == kFormatS) { \
+ float result = OP(src.Float<float>(0), src.Float<float>(1)); \
+ dst.SetFloat(0, result); \
+ } else { \
+ DCHECK_EQ(vform, kFormatD); \
+ double result = OP(src.Float<double>(0), src.Float<double>(1)); \
+ dst.SetFloat(0, result); \
+ } \
+ dst.ClearForWrite(vform); \
+ return dst; \
+ }
+NEON_FPPAIRWISE_LIST(DEFINE_NEON_FP_PAIR_OP)
+#undef DEFINE_NEON_FP_PAIR_OP
+
+LogicVRegister Simulator::FMinMaxV(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, FPMinMaxOp Op) {
+ DCHECK_EQ(vform, kFormat4S);
+ USE(vform);
+ float result1 = (this->*Op)(src.Float<float>(0), src.Float<float>(1));
+ float result2 = (this->*Op)(src.Float<float>(2), src.Float<float>(3));
+ float result = (this->*Op)(result1, result2);
+ dst.ClearForWrite(kFormatS);
+ dst.SetFloat<float>(0, result);
+ return dst;
+}
+
+LogicVRegister Simulator::fmaxv(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ return FMinMaxV(vform, dst, src, &Simulator::FPMax);
+}
+
+LogicVRegister Simulator::fminv(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ return FMinMaxV(vform, dst, src, &Simulator::FPMin);
+}
+
+LogicVRegister Simulator::fmaxnmv(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ return FMinMaxV(vform, dst, src, &Simulator::FPMaxNM);
+}
+
+LogicVRegister Simulator::fminnmv(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ return FMinMaxV(vform, dst, src, &Simulator::FPMinNM);
+}
+
+LogicVRegister Simulator::fmul(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, int index) {
+ dst.ClearForWrite(vform);
+ SimVRegister temp;
+ if (LaneSizeInBytesFromFormat(vform) == kSRegSize) {
+ LogicVRegister index_reg = dup_element(kFormat4S, temp, src2, index);
+ fmul<float>(vform, dst, src1, index_reg);
+ } else {
+ DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize);
+ LogicVRegister index_reg = dup_element(kFormat2D, temp, src2, index);
+ fmul<double>(vform, dst, src1, index_reg);
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::fmla(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, int index) {
+ dst.ClearForWrite(vform);
+ SimVRegister temp;
+ if (LaneSizeInBytesFromFormat(vform) == kSRegSize) {
+ LogicVRegister index_reg = dup_element(kFormat4S, temp, src2, index);
+ fmla<float>(vform, dst, src1, index_reg);
+ } else {
+ DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize);
+ LogicVRegister index_reg = dup_element(kFormat2D, temp, src2, index);
+ fmla<double>(vform, dst, src1, index_reg);
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::fmls(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, int index) {
+ dst.ClearForWrite(vform);
+ SimVRegister temp;
+ if (LaneSizeInBytesFromFormat(vform) == kSRegSize) {
+ LogicVRegister index_reg = dup_element(kFormat4S, temp, src2, index);
+ fmls<float>(vform, dst, src1, index_reg);
+ } else {
+ DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize);
+ LogicVRegister index_reg = dup_element(kFormat2D, temp, src2, index);
+ fmls<double>(vform, dst, src1, index_reg);
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::fmulx(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2, int index) {
+ dst.ClearForWrite(vform);
+ SimVRegister temp;
+ if (LaneSizeInBytesFromFormat(vform) == kSRegSize) {
+ LogicVRegister index_reg = dup_element(kFormat4S, temp, src2, index);
+ fmulx<float>(vform, dst, src1, index_reg);
+
+ } else {
+ DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize);
+ LogicVRegister index_reg = dup_element(kFormat2D, temp, src2, index);
+ fmulx<double>(vform, dst, src1, index_reg);
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::frint(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src,
+ FPRounding rounding_mode,
+ bool inexact_exception) {
+ dst.ClearForWrite(vform);
+ if (LaneSizeInBytesFromFormat(vform) == kSRegSize) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ float input = src.Float<float>(i);
+ float rounded = FPRoundInt(input, rounding_mode);
+ if (inexact_exception && !std::isnan(input) && (input != rounded)) {
+ FPProcessException();
+ }
+ dst.SetFloat<float>(i, rounded);
+ }
+ } else {
+ DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ double input = src.Float<double>(i);
+ double rounded = FPRoundInt(input, rounding_mode);
+ if (inexact_exception && !std::isnan(input) && (input != rounded)) {
+ FPProcessException();
+ }
+ dst.SetFloat<double>(i, rounded);
+ }
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::fcvts(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src,
+ FPRounding rounding_mode, int fbits) {
+ dst.ClearForWrite(vform);
+ if (LaneSizeInBytesFromFormat(vform) == kSRegSize) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ float op = src.Float<float>(i) * std::pow(2.0f, fbits);
+ dst.SetInt(vform, i, FPToInt32(op, rounding_mode));
+ }
+ } else {
+ DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ double op = src.Float<double>(i) * std::pow(2.0, fbits);
+ dst.SetInt(vform, i, FPToInt64(op, rounding_mode));
+ }
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::fcvtu(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src,
+ FPRounding rounding_mode, int fbits) {
+ dst.ClearForWrite(vform);
+ if (LaneSizeInBytesFromFormat(vform) == kSRegSize) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ float op = src.Float<float>(i) * std::pow(2.0f, fbits);
+ dst.SetUint(vform, i, FPToUInt32(op, rounding_mode));
+ }
+ } else {
+ DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ double op = src.Float<double>(i) * std::pow(2.0, fbits);
+ dst.SetUint(vform, i, FPToUInt64(op, rounding_mode));
+ }
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::fcvtl(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ if (LaneSizeInBytesFromFormat(vform) == kSRegSize) {
+ for (int i = LaneCountFromFormat(vform) - 1; i >= 0; i--) {
+ dst.SetFloat(i, FPToFloat(src.Float<float16>(i)));
+ }
+ } else {
+ DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize);
+ for (int i = LaneCountFromFormat(vform) - 1; i >= 0; i--) {
+ dst.SetFloat(i, FPToDouble(src.Float<float>(i)));
+ }
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::fcvtl2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ int lane_count = LaneCountFromFormat(vform);
+ if (LaneSizeInBytesFromFormat(vform) == kSRegSize) {
+ for (int i = 0; i < lane_count; i++) {
+ dst.SetFloat(i, FPToFloat(src.Float<float16>(i + lane_count)));
+ }
+ } else {
+ DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize);
+ for (int i = 0; i < lane_count; i++) {
+ dst.SetFloat(i, FPToDouble(src.Float<float>(i + lane_count)));
+ }
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::fcvtn(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ if (LaneSizeInBytesFromFormat(vform) == kHRegSize) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetFloat(i, FPToFloat16(src.Float<float>(i), FPTieEven));
+ }
+ } else {
+ DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kSRegSize);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetFloat(i, FPToFloat(src.Float<double>(i), FPTieEven));
+ }
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::fcvtn2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ int lane_count = LaneCountFromFormat(vform) / 2;
+ if (LaneSizeInBytesFromFormat(vform) == kHRegSize) {
+ for (int i = lane_count - 1; i >= 0; i--) {
+ dst.SetFloat(i + lane_count, FPToFloat16(src.Float<float>(i), FPTieEven));
+ }
+ } else {
+ DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kSRegSize);
+ for (int i = lane_count - 1; i >= 0; i--) {
+ dst.SetFloat(i + lane_count, FPToFloat(src.Float<double>(i), FPTieEven));
+ }
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::fcvtxn(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kSRegSize);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetFloat(i, FPToFloat(src.Float<double>(i), FPRoundOdd));
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::fcvtxn2(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kSRegSize);
+ int lane_count = LaneCountFromFormat(vform) / 2;
+ for (int i = lane_count - 1; i >= 0; i--) {
+ dst.SetFloat(i + lane_count, FPToFloat(src.Float<double>(i), FPRoundOdd));
+ }
+ return dst;
+}
+
+// Based on reference C function recip_sqrt_estimate from ARM ARM.
+double Simulator::recip_sqrt_estimate(double a) {
+ int q0, q1, s;
+ double r;
+ if (a < 0.5) {
+ q0 = static_cast<int>(a * 512.0);
+ r = 1.0 / sqrt((static_cast<double>(q0) + 0.5) / 512.0);
+ } else {
+ q1 = static_cast<int>(a * 256.0);
+ r = 1.0 / sqrt((static_cast<double>(q1) + 0.5) / 256.0);
+ }
+ s = static_cast<int>(256.0 * r + 0.5);
+ return static_cast<double>(s) / 256.0;
+}
+
+namespace {
+
+inline uint64_t Bits(uint64_t val, int start_bit, int end_bit) {
+ return unsigned_bitextract_64(start_bit, end_bit, val);
+}
+
+} // anonymous namespace
+
+template <typename T>
+T Simulator::FPRecipSqrtEstimate(T op) {
+ static_assert(std::is_same<float, T>::value || std::is_same<double, T>::value,
+ "T must be a float or double");
+
+ if (std::isnan(op)) {
+ return FPProcessNaN(op);
+ } else if (op == 0.0) {
+ if (copysign(1.0, op) < 0.0) {
+ return kFP64NegativeInfinity;
+ } else {
+ return kFP64PositiveInfinity;
+ }
+ } else if (copysign(1.0, op) < 0.0) {
+ FPProcessException();
+ return FPDefaultNaN<T>();
+ } else if (std::isinf(op)) {
+ return 0.0;
+ } else {
+ uint64_t fraction;
+ int32_t exp, result_exp;
+
+ if (sizeof(T) == sizeof(float)) {
+ exp = static_cast<int32_t>(float_exp(op));
+ fraction = float_mantissa(op);
+ fraction <<= 29;
+ } else {
+ exp = static_cast<int32_t>(double_exp(op));
+ fraction = double_mantissa(op);
+ }
+
+ if (exp == 0) {
+ while (Bits(fraction, 51, 51) == 0) {
+ fraction = Bits(fraction, 50, 0) << 1;
+ exp -= 1;
+ }
+ fraction = Bits(fraction, 50, 0) << 1;
+ }
+
+ double scaled;
+ if (Bits(exp, 0, 0) == 0) {
+ scaled = double_pack(0, 1022, Bits(fraction, 51, 44) << 44);
+ } else {
+ scaled = double_pack(0, 1021, Bits(fraction, 51, 44) << 44);
+ }
+
+ if (sizeof(T) == sizeof(float)) {
+ result_exp = (380 - exp) / 2;
+ } else {
+ result_exp = (3068 - exp) / 2;
+ }
+
+ uint64_t estimate = bit_cast<uint64_t>(recip_sqrt_estimate(scaled));
+
+ if (sizeof(T) == sizeof(float)) {
+ uint32_t exp_bits = static_cast<uint32_t>(Bits(result_exp, 7, 0));
+ uint32_t est_bits = static_cast<uint32_t>(Bits(estimate, 51, 29));
+ return float_pack(0, exp_bits, est_bits);
+ } else {
+ return double_pack(0, Bits(result_exp, 10, 0), Bits(estimate, 51, 0));
+ }
+ }
+}
+
+LogicVRegister Simulator::frsqrte(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ if (LaneSizeInBytesFromFormat(vform) == kSRegSize) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ float input = src.Float<float>(i);
+ dst.SetFloat(i, FPRecipSqrtEstimate<float>(input));
+ }
+ } else {
+ DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ double input = src.Float<double>(i);
+ dst.SetFloat(i, FPRecipSqrtEstimate<double>(input));
+ }
+ }
+ return dst;
+}
+
+template <typename T>
+T Simulator::FPRecipEstimate(T op, FPRounding rounding) {
+ static_assert(std::is_same<float, T>::value || std::is_same<double, T>::value,
+ "T must be a float or double");
+ uint32_t sign;
+
+ if (sizeof(T) == sizeof(float)) {
+ sign = float_sign(op);
+ } else {
+ sign = double_sign(op);
+ }
+
+ if (std::isnan(op)) {
+ return FPProcessNaN(op);
+ } else if (std::isinf(op)) {
+ return (sign == 1) ? -0.0 : 0.0;
+ } else if (op == 0.0) {
+ FPProcessException(); // FPExc_DivideByZero exception.
+ return (sign == 1) ? kFP64NegativeInfinity : kFP64PositiveInfinity;
+ } else if (((sizeof(T) == sizeof(float)) &&
+ (std::fabs(op) < std::pow(2.0, -128.0))) ||
+ ((sizeof(T) == sizeof(double)) &&
+ (std::fabs(op) < std::pow(2.0, -1024.0)))) {
+ bool overflow_to_inf = false;
+ switch (rounding) {
+ case FPTieEven:
+ overflow_to_inf = true;
+ break;
+ case FPPositiveInfinity:
+ overflow_to_inf = (sign == 0);
+ break;
+ case FPNegativeInfinity:
+ overflow_to_inf = (sign == 1);
+ break;
+ case FPZero:
+ overflow_to_inf = false;
+ break;
+ default:
+ break;
+ }
+ FPProcessException(); // FPExc_Overflow and FPExc_Inexact.
+ if (overflow_to_inf) {
+ return (sign == 1) ? kFP64NegativeInfinity : kFP64PositiveInfinity;
+ } else {
+ // Return FPMaxNormal(sign).
+ if (sizeof(T) == sizeof(float)) {
+ return float_pack(sign, 0xfe, 0x07fffff);
+ } else {
+ return double_pack(sign, 0x7fe, 0x0fffffffffffffl);
+ }
+ }
+ } else {
+ uint64_t fraction;
+ int32_t exp, result_exp;
+ uint32_t sign;
+
+ if (sizeof(T) == sizeof(float)) {
+ sign = float_sign(op);
+ exp = static_cast<int32_t>(float_exp(op));
+ fraction = float_mantissa(op);
+ fraction <<= 29;
+ } else {
+ sign = double_sign(op);
+ exp = static_cast<int32_t>(double_exp(op));
+ fraction = double_mantissa(op);
+ }
+
+ if (exp == 0) {
+ if (Bits(fraction, 51, 51) == 0) {
+ exp -= 1;
+ fraction = Bits(fraction, 49, 0) << 2;
+ } else {
+ fraction = Bits(fraction, 50, 0) << 1;
+ }
+ }
+
+ double scaled = double_pack(0, 1022, Bits(fraction, 51, 44) << 44);
+
+ if (sizeof(T) == sizeof(float)) {
+ result_exp = 253 - exp;
+ } else {
+ result_exp = 2045 - exp;
+ }
+
+ double estimate = recip_estimate(scaled);
+
+ fraction = double_mantissa(estimate);
+ if (result_exp == 0) {
+ fraction = (UINT64_C(1) << 51) | Bits(fraction, 51, 1);
+ } else if (result_exp == -1) {
+ fraction = (UINT64_C(1) << 50) | Bits(fraction, 51, 2);
+ result_exp = 0;
+ }
+ if (sizeof(T) == sizeof(float)) {
+ uint32_t exp_bits = static_cast<uint32_t>(Bits(result_exp, 7, 0));
+ uint32_t frac_bits = static_cast<uint32_t>(Bits(fraction, 51, 29));
+ return float_pack(sign, exp_bits, frac_bits);
+ } else {
+ return double_pack(sign, Bits(result_exp, 10, 0), Bits(fraction, 51, 0));
+ }
+ }
+}
+
+LogicVRegister Simulator::frecpe(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, FPRounding round) {
+ dst.ClearForWrite(vform);
+ if (LaneSizeInBytesFromFormat(vform) == kSRegSize) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ float input = src.Float<float>(i);
+ dst.SetFloat(i, FPRecipEstimate<float>(input, round));
+ }
+ } else {
+ DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ double input = src.Float<double>(i);
+ dst.SetFloat(i, FPRecipEstimate<double>(input, round));
+ }
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::ursqrte(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ uint64_t operand;
+ uint32_t result;
+ double dp_operand, dp_result;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ operand = src.Uint(vform, i);
+ if (operand <= 0x3FFFFFFF) {
+ result = 0xFFFFFFFF;
+ } else {
+ dp_operand = operand * std::pow(2.0, -32);
+ dp_result = recip_sqrt_estimate(dp_operand) * std::pow(2.0, 31);
+ result = static_cast<uint32_t>(dp_result);
+ }
+ dst.SetUint(vform, i, result);
+ }
+ return dst;
+}
+
+// Based on reference C function recip_estimate from ARM ARM.
+double Simulator::recip_estimate(double a) {
+ int q, s;
+ double r;
+ q = static_cast<int>(a * 512.0);
+ r = 1.0 / ((static_cast<double>(q) + 0.5) / 512.0);
+ s = static_cast<int>(256.0 * r + 0.5);
+ return static_cast<double>(s) / 256.0;
+}
+
+LogicVRegister Simulator::urecpe(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ uint64_t operand;
+ uint32_t result;
+ double dp_operand, dp_result;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ operand = src.Uint(vform, i);
+ if (operand <= 0x7FFFFFFF) {
+ result = 0xFFFFFFFF;
+ } else {
+ dp_operand = operand * std::pow(2.0, -32);
+ dp_result = recip_estimate(dp_operand) * std::pow(2.0, 31);
+ result = static_cast<uint32_t>(dp_result);
+ }
+ dst.SetUint(vform, i, result);
+ }
+ return dst;
+}
+
+template <typename T>
+LogicVRegister Simulator::frecpx(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ T op = src.Float<T>(i);
+ T result;
+ if (std::isnan(op)) {
+ result = FPProcessNaN(op);
+ } else {
+ int exp;
+ uint32_t sign;
+ if (sizeof(T) == sizeof(float)) {
+ sign = float_sign(op);
+ exp = static_cast<int>(float_exp(op));
+ exp = (exp == 0) ? (0xFF - 1) : static_cast<int>(Bits(~exp, 7, 0));
+ result = float_pack(sign, exp, 0);
+ } else {
+ sign = double_sign(op);
+ exp = static_cast<int>(double_exp(op));
+ exp = (exp == 0) ? (0x7FF - 1) : static_cast<int>(Bits(~exp, 10, 0));
+ result = double_pack(sign, exp, 0);
+ }
+ }
+ dst.SetFloat(i, result);
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::frecpx(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src) {
+ if (LaneSizeInBytesFromFormat(vform) == kSRegSize) {
+ frecpx<float>(vform, dst, src);
+ } else {
+ DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize);
+ frecpx<double>(vform, dst, src);
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::scvtf(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int fbits,
+ FPRounding round) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ if (LaneSizeInBytesFromFormat(vform) == kSRegSize) {
+ float result = FixedToFloat(src.Int(kFormatS, i), fbits, round);
+ dst.SetFloat<float>(i, result);
+ } else {
+ DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize);
+ double result = FixedToDouble(src.Int(kFormatD, i), fbits, round);
+ dst.SetFloat<double>(i, result);
+ }
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::ucvtf(VectorFormat vform, LogicVRegister dst,
+ const LogicVRegister& src, int fbits,
+ FPRounding round) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ if (LaneSizeInBytesFromFormat(vform) == kSRegSize) {
+ float result = UFixedToFloat(src.Uint(kFormatS, i), fbits, round);
+ dst.SetFloat<float>(i, result);
+ } else {
+ DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize);
+ double result = UFixedToDouble(src.Uint(kFormatD, i), fbits, round);
+ dst.SetFloat<double>(i, result);
+ }
+ }
+ return dst;
+}
+
+#endif // USE_SIMULATOR
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/utils-arm64.cc b/deps/v8/src/arm64/utils-arm64.cc
index 1cd9785417..38ec8478fc 100644
--- a/deps/v8/src/arm64/utils-arm64.cc
+++ b/deps/v8/src/arm64/utils-arm64.cc
@@ -12,23 +12,78 @@ namespace internal {
#define __ assm->
+uint32_t float_sign(float val) {
+ uint32_t bits = bit_cast<uint32_t>(val);
+ return unsigned_bitextract_32(31, 31, bits);
+}
+
+uint32_t float_exp(float val) {
+ uint32_t bits = bit_cast<uint32_t>(val);
+ return unsigned_bitextract_32(30, 23, bits);
+}
+
+uint32_t float_mantissa(float val) {
+ uint32_t bits = bit_cast<uint32_t>(val);
+ return unsigned_bitextract_32(22, 0, bits);
+}
+
+uint32_t double_sign(double val) {
+ uint64_t bits = bit_cast<uint64_t>(val);
+ return static_cast<uint32_t>(unsigned_bitextract_64(63, 63, bits));
+}
+
+uint32_t double_exp(double val) {
+ uint64_t bits = bit_cast<uint64_t>(val);
+ return static_cast<uint32_t>(unsigned_bitextract_64(62, 52, bits));
+}
+
+uint64_t double_mantissa(double val) {
+ uint64_t bits = bit_cast<uint64_t>(val);
+ return unsigned_bitextract_64(51, 0, bits);
+}
+
+float float_pack(uint32_t sign, uint32_t exp, uint32_t mantissa) {
+ uint32_t bits = sign << kFloatExponentBits | exp;
+ return bit_cast<float>((bits << kFloatMantissaBits) | mantissa);
+}
+
+double double_pack(uint64_t sign, uint64_t exp, uint64_t mantissa) {
+ uint64_t bits = sign << kDoubleExponentBits | exp;
+ return bit_cast<double>((bits << kDoubleMantissaBits) | mantissa);
+}
+
+int float16classify(float16 value) {
+ const uint16_t exponent_max = (1 << kFloat16ExponentBits) - 1;
+ const uint16_t exponent_mask = exponent_max << kFloat16MantissaBits;
+ const uint16_t mantissa_mask = (1 << kFloat16MantissaBits) - 1;
+
+ const uint16_t exponent = (value & exponent_mask) >> kFloat16MantissaBits;
+ const uint16_t mantissa = value & mantissa_mask;
+ if (exponent == 0) {
+ if (mantissa == 0) {
+ return FP_ZERO;
+ }
+ return FP_SUBNORMAL;
+ } else if (exponent == exponent_max) {
+ if (mantissa == 0) {
+ return FP_INFINITE;
+ }
+ return FP_NAN;
+ }
+ return FP_NORMAL;
+}
int CountLeadingZeros(uint64_t value, int width) {
- // TODO(jbramley): Optimize this for ARM64 hosts.
- DCHECK((width == 32) || (width == 64));
- int count = 0;
- uint64_t bit_test = 1UL << (width - 1);
- while ((count < width) && ((bit_test & value) == 0)) {
- count++;
- bit_test >>= 1;
+ DCHECK(base::bits::IsPowerOfTwo(width) && (width <= 64));
+ if (value == 0) {
+ return width;
}
- return count;
+ return base::bits::CountLeadingZeros64(value << (64 - width));
}
int CountLeadingSignBits(int64_t value, int width) {
- // TODO(jbramley): Optimize this for ARM64 hosts.
- DCHECK((width == 32) || (width == 64));
+ DCHECK(base::bits::IsPowerOfTwo(width) && (width <= 64));
if (value >= 0) {
return CountLeadingZeros(value, width) - 1;
} else {
@@ -38,43 +93,32 @@ int CountLeadingSignBits(int64_t value, int width) {
int CountTrailingZeros(uint64_t value, int width) {
- // TODO(jbramley): Optimize this for ARM64 hosts.
DCHECK((width == 32) || (width == 64));
- int count = 0;
- while ((count < width) && (((value >> count) & 1) == 0)) {
- count++;
+ if (width == 64) {
+ return static_cast<int>(base::bits::CountTrailingZeros64(value));
}
- return count;
+ return static_cast<int>(base::bits::CountTrailingZeros32(
+ static_cast<uint32_t>(value & 0xfffffffff)));
}
int CountSetBits(uint64_t value, int width) {
- // TODO(jbramley): Would it be useful to allow other widths? The
- // implementation already supports them.
DCHECK((width == 32) || (width == 64));
+ if (width == 64) {
+ return static_cast<int>(base::bits::CountPopulation64(value));
+ }
+ return static_cast<int>(base::bits::CountPopulation32(
+ static_cast<uint32_t>(value & 0xfffffffff)));
+}
- // Mask out unused bits to ensure that they are not counted.
- value &= (0xffffffffffffffffUL >> (64-width));
-
- // Add up the set bits.
- // The algorithm works by adding pairs of bit fields together iteratively,
- // where the size of each bit field doubles each time.
- // An example for an 8-bit value:
- // Bits: h g f e d c b a
- // \ | \ | \ | \ |
- // value = h+g f+e d+c b+a
- // \ | \ |
- // value = h+g+f+e d+c+b+a
- // \ |
- // value = h+g+f+e+d+c+b+a
- value = ((value >> 1) & 0x5555555555555555) + (value & 0x5555555555555555);
- value = ((value >> 2) & 0x3333333333333333) + (value & 0x3333333333333333);
- value = ((value >> 4) & 0x0f0f0f0f0f0f0f0f) + (value & 0x0f0f0f0f0f0f0f0f);
- value = ((value >> 8) & 0x00ff00ff00ff00ff) + (value & 0x00ff00ff00ff00ff);
- value = ((value >> 16) & 0x0000ffff0000ffff) + (value & 0x0000ffff0000ffff);
- value = ((value >> 32) & 0x00000000ffffffff) + (value & 0x00000000ffffffff);
+int LowestSetBitPosition(uint64_t value) {
+ DCHECK_NE(value, 0U);
+ return CountTrailingZeros(value, 64) + 1;
+}
- return static_cast<int>(value);
+int HighestSetBitPosition(uint64_t value) {
+ DCHECK_NE(value, 0U);
+ return 63 - CountLeadingZeros(value, 64);
}
@@ -84,7 +128,7 @@ uint64_t LargestPowerOf2Divisor(uint64_t value) {
int MaskToBit(uint64_t mask) {
- DCHECK(CountSetBits(mask, 64) == 1);
+ DCHECK_EQ(CountSetBits(mask, 64), 1);
return CountTrailingZeros(mask, 64);
}
diff --git a/deps/v8/src/arm64/utils-arm64.h b/deps/v8/src/arm64/utils-arm64.h
index 35d9824837..920a84dbdf 100644
--- a/deps/v8/src/arm64/utils-arm64.h
+++ b/deps/v8/src/arm64/utils-arm64.h
@@ -8,6 +8,7 @@
#include <cmath>
#include "src/arm64/constants-arm64.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
@@ -16,40 +17,26 @@ namespace internal {
STATIC_ASSERT((static_cast<int32_t>(-1) >> 1) == -1);
STATIC_ASSERT((static_cast<uint32_t>(-1) >> 1) == 0x7FFFFFFF);
-// Floating point representation.
-static inline uint32_t float_to_rawbits(float value) {
- uint32_t bits = 0;
- memcpy(&bits, &value, 4);
- return bits;
-}
-
-
-static inline uint64_t double_to_rawbits(double value) {
- uint64_t bits = 0;
- memcpy(&bits, &value, 8);
- return bits;
-}
-
-
-static inline float rawbits_to_float(uint32_t bits) {
- float value = 0.0;
- memcpy(&value, &bits, 4);
- return value;
-}
+uint32_t float_sign(float val);
+uint32_t float_exp(float val);
+uint32_t float_mantissa(float val);
+uint32_t double_sign(double val);
+uint32_t double_exp(double val);
+uint64_t double_mantissa(double val);
+float float_pack(uint32_t sign, uint32_t exp, uint32_t mantissa);
+double double_pack(uint64_t sign, uint64_t exp, uint64_t mantissa);
-static inline double rawbits_to_double(uint64_t bits) {
- double value = 0.0;
- memcpy(&value, &bits, 8);
- return value;
-}
-
+// An fpclassify() function for 16-bit half-precision floats.
+int float16classify(float16 value);
// Bit counting.
int CountLeadingZeros(uint64_t value, int width);
int CountLeadingSignBits(int64_t value, int width);
int CountTrailingZeros(uint64_t value, int width);
int CountSetBits(uint64_t value, int width);
+int LowestSetBitPosition(uint64_t value);
+int HighestSetBitPosition(uint64_t value);
uint64_t LargestPowerOf2Divisor(uint64_t value);
int MaskToBit(uint64_t mask);
@@ -86,7 +73,7 @@ T ReverseBytes(T value, int block_bytes_log2) {
// NaN tests.
inline bool IsSignallingNaN(double num) {
- uint64_t raw = double_to_rawbits(num);
+ uint64_t raw = bit_cast<uint64_t>(num);
if (std::isnan(num) && ((raw & kDQuietNanMask) == 0)) {
return true;
}
@@ -95,13 +82,17 @@ inline bool IsSignallingNaN(double num) {
inline bool IsSignallingNaN(float num) {
- uint32_t raw = float_to_rawbits(num);
+ uint32_t raw = bit_cast<uint32_t>(num);
if (std::isnan(num) && ((raw & kSQuietNanMask) == 0)) {
return true;
}
return false;
}
+inline bool IsSignallingNaN(float16 num) {
+ const uint16_t kFP16QuietNaNMask = 0x0200;
+ return (float16classify(num) == FP_NAN) && ((num & kFP16QuietNaNMask) == 0);
+}
template <typename T>
inline bool IsQuietNaN(T num) {
@@ -112,13 +103,14 @@ inline bool IsQuietNaN(T num) {
// Convert the NaN in 'num' to a quiet NaN.
inline double ToQuietNaN(double num) {
DCHECK(std::isnan(num));
- return rawbits_to_double(double_to_rawbits(num) | kDQuietNanMask);
+ return bit_cast<double>(bit_cast<uint64_t>(num) | kDQuietNanMask);
}
inline float ToQuietNaN(float num) {
DCHECK(std::isnan(num));
- return rawbits_to_float(float_to_rawbits(num) | kSQuietNanMask);
+ return bit_cast<float>(bit_cast<uint32_t>(num) |
+ static_cast<uint32_t>(kSQuietNanMask));
}
diff --git a/deps/v8/src/asmjs/OWNERS b/deps/v8/src/asmjs/OWNERS
index 4f54661aeb..e40f5b57f3 100644
--- a/deps/v8/src/asmjs/OWNERS
+++ b/deps/v8/src/asmjs/OWNERS
@@ -6,3 +6,5 @@ clemensh@chromium.org
mtrofin@chromium.org
rossberg@chromium.org
titzer@chromium.org
+
+# COMPONENT: Blink>JavaScript>WebAssembly
diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc
index 516bce2543..fb257e316e 100644
--- a/deps/v8/src/asmjs/asm-js.cc
+++ b/deps/v8/src/asmjs/asm-js.cc
@@ -4,8 +4,6 @@
#include "src/asmjs/asm-js.h"
-#include "src/api-natives.h"
-#include "src/api.h"
#include "src/asmjs/asm-names.h"
#include "src/asmjs/asm-parser.h"
#include "src/assert-scope.h"
@@ -17,7 +15,8 @@
#include "src/handles.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
-#include "src/objects.h"
+#include "src/parsing/scanner-character-streams.h"
+#include "src/parsing/scanner.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-js.h"
@@ -54,12 +53,12 @@ bool IsStdlibMemberValid(Isolate* isolate, Handle<JSReceiver> stdlib,
bool* is_typed_array) {
switch (member) {
case wasm::AsmJsParser::StandardMember::kInfinity: {
- Handle<Name> name = isolate->factory()->infinity_string();
+ Handle<Name> name = isolate->factory()->Infinity_string();
Handle<Object> value = JSReceiver::GetDataProperty(stdlib, name);
return value->IsNumber() && std::isinf(value->Number());
}
case wasm::AsmJsParser::StandardMember::kNaN: {
- Handle<Name> name = isolate->factory()->nan_string();
+ Handle<Name> name = isolate->factory()->NaN_string();
Handle<Object> value = JSReceiver::GetDataProperty(stdlib, name);
return value->IsNaN();
}
@@ -105,7 +104,6 @@ bool IsStdlibMemberValid(Isolate* isolate, Handle<JSReceiver> stdlib,
#undef STDLIB_ARRAY_TYPE
}
UNREACHABLE();
- return false;
}
void Report(Handle<Script> script, int position, Vector<const char> text,
@@ -193,9 +191,11 @@ MaybeHandle<FixedArray> AsmJs::CompileAsmViaWasm(CompilationInfo* info) {
Zone* compile_zone = info->zone();
Zone translate_zone(info->isolate()->allocator(), ZONE_NAME);
- wasm::AsmJsParser parser(info->isolate(), &translate_zone, info->script(),
- info->literal()->start_position(),
- info->literal()->end_position());
+ std::unique_ptr<Utf16CharacterStream> stream(ScannerStream::For(
+ handle(String::cast(info->script()->source())),
+ info->literal()->start_position(), info->literal()->end_position()));
+ uintptr_t stack_limit = info->isolate()->stack_guard()->real_climit();
+ wasm::AsmJsParser parser(&translate_zone, stack_limit, std::move(stream));
if (!parser.Run()) {
DCHECK(!info->isolate()->has_pending_exception());
ReportCompilationFailure(info->script(), parser.failure_location(),
@@ -277,7 +277,7 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate,
ReportInstantiationFailure(script, position, "Requires standard library");
return MaybeHandle<Object>();
}
- int member_id = Smi::cast(stdlib_uses->get(i))->value();
+ int member_id = Smi::ToInt(stdlib_uses->get(i));
wasm::AsmJsParser::StandardMember member =
static_cast<wasm::AsmJsParser::StandardMember>(member_id);
if (!IsStdlibMemberValid(isolate, stdlib, member,
@@ -287,16 +287,6 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate,
}
}
- // Create the ffi object for foreign functions {"": foreign}.
- Handle<JSObject> ffi_object;
- if (!foreign.is_null()) {
- Handle<JSFunction> object_function = Handle<JSFunction>(
- isolate->native_context()->object_function(), isolate);
- ffi_object = isolate->factory()->NewJSObject(object_function);
- JSObject::AddProperty(ffi_object, isolate->factory()->empty_string(),
- foreign, NONE);
- }
-
// Check that a valid heap buffer is provided if required.
if (stdlib_use_of_typed_array_present) {
if (memory.is_null()) {
@@ -314,8 +304,9 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate,
wasm::ErrorThrower thrower(isolate, "AsmJs::Instantiate");
MaybeHandle<Object> maybe_module_object =
- wasm::SyncInstantiate(isolate, &thrower, module, ffi_object, memory);
+ wasm::SyncInstantiate(isolate, &thrower, module, foreign, memory);
if (maybe_module_object.is_null()) {
+ DCHECK(!isolate->has_pending_exception());
thrower.Reset(); // Ensure exceptions do not propagate.
ReportInstantiationFailure(script, position, "Internal wasm failure");
return MaybeHandle<Object>();
diff --git a/deps/v8/src/asmjs/asm-parser.cc b/deps/v8/src/asmjs/asm-parser.cc
index 51b8f7bbc2..1e5f7d5dc4 100644
--- a/deps/v8/src/asmjs/asm-parser.cc
+++ b/deps/v8/src/asmjs/asm-parser.cc
@@ -11,9 +11,8 @@
#include "src/asmjs/asm-js.h"
#include "src/asmjs/asm-types.h"
-#include "src/objects-inl.h"
-#include "src/objects.h"
-#include "src/parsing/scanner-character-streams.h"
+#include "src/base/optional.h"
+#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker.
#include "src/parsing/scanner.h"
#include "src/wasm/wasm-opcodes.h"
@@ -68,16 +67,16 @@ namespace wasm {
#define TOK(name) AsmJsScanner::kToken_##name
-AsmJsParser::AsmJsParser(Isolate* isolate, Zone* zone, Handle<Script> script,
- int start, int end)
+AsmJsParser::AsmJsParser(Zone* zone, uintptr_t stack_limit,
+ std::unique_ptr<Utf16CharacterStream> stream)
: zone_(zone),
module_builder_(new (zone) WasmModuleBuilder(zone)),
return_type_(nullptr),
- stack_limit_(isolate->stack_guard()->real_climit()),
+ stack_limit_(stack_limit),
global_var_info_(zone),
local_var_info_(zone),
failed_(false),
- failure_location_(start),
+ failure_location_(kNoSourcePosition),
stdlib_name_(kTokenNone),
foreign_name_(kTokenNone),
heap_name_(kTokenNone),
@@ -89,9 +88,6 @@ AsmJsParser::AsmJsParser(Isolate* isolate, Zone* zone, Handle<Script> script,
pending_label_(0),
global_imports_(zone) {
InitializeStdlibTypes();
- Handle<String> source(String::cast(script->source()), isolate);
- std::unique_ptr<Utf16CharacterStream> stream(
- ScannerStream::For(source, start, end));
scanner_.SetStream(std::move(stream));
}
@@ -144,8 +140,8 @@ void AsmJsParser::InitializeStdlibTypes() {
stdlib_fround_ = AsmType::FroundType(zone());
}
-FunctionSig* AsmJsParser::ConvertSignature(
- AsmType* return_type, const std::vector<AsmType*>& params) {
+FunctionSig* AsmJsParser::ConvertSignature(AsmType* return_type,
+ const ZoneVector<AsmType*>& params) {
FunctionSig::Builder sig_builder(
zone(), !return_type->IsA(AsmType::Void()) ? 1 : 0, params.size());
for (auto param : params) {
@@ -215,7 +211,6 @@ wasm::AsmJsParser::VarInfo* AsmJsParser::GetVarInfo(
return &local_var_info_[index];
}
UNREACHABLE();
- return nullptr;
}
uint32_t AsmJsParser::VarIndex(VarInfo* info) {
@@ -348,9 +343,15 @@ void AsmJsParser::ValidateModule() {
if (info.kind == VarKind::kTable && !info.function_defined) {
FAIL("Undefined function table");
}
+ if (info.kind == VarKind::kImportedFunction && !info.function_defined) {
+ // For imported functions without a single call site, we insert a dummy
+ // import here to preserve the fact that there actually was an import.
+ FunctionSig* void_void_sig = FunctionSig::Builder(zone(), 0, 0).Build();
+ module_builder_->AddImport(info.import->function_name, void_void_sig);
+ }
}
- // Add start function to init things.
+ // Add start function to initialize things.
WasmFunctionBuilder* start = module_builder_->AddFunction();
module_builder_->MarkStartFunction(start);
for (auto& global_import : global_imports_) {
@@ -725,9 +726,9 @@ void AsmJsParser::ValidateFunction() {
int start_position = static_cast<int>(scanner_.Position());
current_function_builder_->SetAsmFunctionStartPosition(start_position);
- std::vector<AsmType*> params;
+ CachedVector<AsmType*> params(cached_asm_type_p_vectors_);
ValidateFunctionParams(&params);
- std::vector<ValueType> locals;
+ CachedVector<ValueType> locals(cached_valuetype_vectors_);
ValidateFunctionLocals(params.size(), &locals);
function_temp_locals_offset_ = static_cast<uint32_t>(
@@ -787,13 +788,14 @@ void AsmJsParser::ValidateFunction() {
}
// 6.4 ValidateFunction
-void AsmJsParser::ValidateFunctionParams(std::vector<AsmType*>* params) {
+void AsmJsParser::ValidateFunctionParams(ZoneVector<AsmType*>* params) {
// TODO(bradnelson): Do this differently so that the scanner doesn't need to
// have a state transition that needs knowledge of how the scanner works
// inside.
scanner_.EnterLocalScope();
EXPECT_TOKEN('(');
- std::vector<AsmJsScanner::token_t> function_parameters;
+ CachedVector<AsmJsScanner::token_t> function_parameters(
+ cached_token_t_vectors_);
while (!failed_ && !Peek(')')) {
if (!scanner_.IsLocal()) {
FAIL("Expected parameter name");
@@ -847,8 +849,8 @@ void AsmJsParser::ValidateFunctionParams(std::vector<AsmType*>* params) {
}
// 6.4 ValidateFunction - locals
-void AsmJsParser::ValidateFunctionLocals(
- size_t param_count, std::vector<ValueType>* locals) {
+void AsmJsParser::ValidateFunctionLocals(size_t param_count,
+ ZoneVector<ValueType>* locals) {
// Local Variables.
while (Peek(TOK(var))) {
scanner_.EnterLocalScope();
@@ -1262,7 +1264,7 @@ void AsmJsParser::SwitchStatement() {
Begin(pending_label_);
pending_label_ = 0;
// TODO(bradnelson): Make less weird.
- std::vector<int32_t> cases;
+ CachedVector<int32_t> cases(cached_int_vectors_);
GatherCases(&cases);
EXPECT_TOKEN('{');
size_t count = cases.size() + 1;
@@ -1398,7 +1400,6 @@ AsmType* AsmJsParser::Identifier() {
return info->type;
}
UNREACHABLE();
- return nullptr;
}
// 6.8.4 CallExpression
@@ -1677,7 +1678,7 @@ AsmType* AsmJsParser::MultiplicativeExpression() {
}
} else if (Check('/')) {
AsmType* b;
- RECURSEn(b = MultiplicativeExpression());
+ RECURSEn(b = UnaryExpression());
if (a->IsA(AsmType::DoubleQ()) && b->IsA(AsmType::DoubleQ())) {
current_function_builder_->Emit(kExprF64Div);
a = AsmType::Double();
@@ -1695,7 +1696,7 @@ AsmType* AsmJsParser::MultiplicativeExpression() {
}
} else if (Check('%')) {
AsmType* b;
- RECURSEn(b = MultiplicativeExpression());
+ RECURSEn(b = UnaryExpression());
if (a->IsA(AsmType::DoubleQ()) && b->IsA(AsmType::DoubleQ())) {
current_function_builder_->Emit(kExprF64Mod);
a = AsmType::Double();
@@ -2014,8 +2015,7 @@ AsmType* AsmJsParser::ValidateCall() {
// both cases we might be seeing the {function_name} for the first time and
// hence allocate a {VarInfo} here, all subsequent uses of the same name then
// need to match the information stored at this point.
- // TODO(mstarzinger): Consider using Chromiums base::Optional instead.
- std::unique_ptr<TemporaryVariableScope> tmp;
+ base::Optional<TemporaryVariableScope> tmp;
if (Check('[')) {
RECURSEn(EqualityExpression());
EXPECT_TOKENn('&');
@@ -2023,7 +2023,7 @@ AsmType* AsmJsParser::ValidateCall() {
if (!CheckForUnsigned(&mask)) {
FAILn("Expected mask literal");
}
- if (!base::bits::IsPowerOfTwo32(mask + 1)) {
+ if (!base::bits::IsPowerOfTwo(mask + 1)) {
FAILn("Expected power of 2 mask");
}
current_function_builder_->EmitI32Const(mask);
@@ -2050,8 +2050,8 @@ AsmType* AsmJsParser::ValidateCall() {
current_function_builder_->EmitI32Const(function_info->index);
current_function_builder_->Emit(kExprI32Add);
// We have to use a temporary for the correct order of evaluation.
- tmp.reset(new TemporaryVariableScope(this));
- current_function_builder_->EmitSetLocal(tmp.get()->get());
+ tmp.emplace(this);
+ current_function_builder_->EmitSetLocal(tmp->get());
// The position of function table calls is after the table lookup.
call_pos = static_cast<int>(scanner_.Position());
} else {
@@ -2070,8 +2070,8 @@ AsmType* AsmJsParser::ValidateCall() {
}
// Parse argument list and gather types.
- std::vector<AsmType*> param_types;
- ZoneVector<AsmType*> param_specific_types(zone());
+ CachedVector<AsmType*> param_types(cached_asm_type_p_vectors_);
+ CachedVector<AsmType*> param_specific_types(cached_asm_type_p_vectors_);
EXPECT_TOKENn('(');
while (!failed_ && !Peek(')')) {
AsmType* t;
@@ -2149,10 +2149,12 @@ AsmType* AsmJsParser::ValidateCall() {
auto it = function_info->import->cache.find(sig);
if (it != function_info->import->cache.end()) {
index = it->second;
+ DCHECK(function_info->function_defined);
} else {
index =
module_builder_->AddImport(function_info->import->function_name, sig);
function_info->import->cache[sig] = index;
+ function_info->function_defined = true;
}
current_function_builder_->AddAsmWasmOffset(call_pos, to_number_pos);
current_function_builder_->EmitWithU32V(kExprCallFunction, index);
@@ -2283,7 +2285,7 @@ AsmType* AsmJsParser::ValidateCall() {
}
}
if (function_info->kind == VarKind::kTable) {
- current_function_builder_->EmitGetLocal(tmp.get()->get());
+ current_function_builder_->EmitGetLocal(tmp->get());
current_function_builder_->AddAsmWasmOffset(call_pos, to_number_pos);
current_function_builder_->Emit(kExprCallIndirect);
current_function_builder_->EmitU32V(signature_index);
@@ -2420,7 +2422,7 @@ void AsmJsParser::ScanToClosingParenthesis() {
}
}
-void AsmJsParser::GatherCases(std::vector<int32_t>* cases) {
+void AsmJsParser::GatherCases(ZoneVector<int32_t>* cases) {
size_t start = scanner_.Position();
int depth = 0;
for (;;) {
diff --git a/deps/v8/src/asmjs/asm-parser.h b/deps/v8/src/asmjs/asm-parser.h
index 2f20b4813d..4f880785ef 100644
--- a/deps/v8/src/asmjs/asm-parser.h
+++ b/deps/v8/src/asmjs/asm-parser.h
@@ -5,8 +5,8 @@
#ifndef V8_ASMJS_ASM_PARSER_H_
#define V8_ASMJS_ASM_PARSER_H_
+#include <memory>
#include <string>
-#include <vector>
#include "src/asmjs/asm-scanner.h"
#include "src/asmjs/asm-types.h"
@@ -15,6 +15,9 @@
namespace v8 {
namespace internal {
+
+class Utf16CharacterStream;
+
namespace wasm {
// A custom parser + validator + wasm converter for asm.js:
@@ -46,8 +49,8 @@ class AsmJsParser {
typedef std::unordered_set<StandardMember, std::hash<int>> StdlibSet;
- explicit AsmJsParser(Isolate* isolate, Zone* zone, Handle<Script> script,
- int start, int end);
+ explicit AsmJsParser(Zone* zone, uintptr_t stack_limit,
+ std::unique_ptr<Utf16CharacterStream> stream);
bool Run();
const char* failure_message() const { return failure_message_; }
int failure_location() const { return failure_location_; }
@@ -105,6 +108,41 @@ class AsmJsParser {
// Helper class to make {TempVariable} safe for nesting.
class TemporaryVariableScope;
+ template <typename T>
+ class CachedVectors {
+ public:
+ explicit CachedVectors(Zone* zone) : reusable_vectors_(zone) {}
+
+ Zone* zone() const { return reusable_vectors_.get_allocator().zone(); }
+
+ inline void fill(ZoneVector<T>* vec) {
+ if (reusable_vectors_.empty()) return;
+ reusable_vectors_.back().swap(*vec);
+ reusable_vectors_.pop_back();
+ vec->clear();
+ }
+
+ inline void reuse(ZoneVector<T>* vec) {
+ reusable_vectors_.emplace_back(std::move(*vec));
+ }
+
+ private:
+ ZoneVector<ZoneVector<T>> reusable_vectors_;
+ };
+
+ template <typename T>
+ class CachedVector final : public ZoneVector<T> {
+ public:
+ explicit CachedVector(CachedVectors<T>& cache)
+ : ZoneVector<T>(cache.zone()), cache_(&cache) {
+ cache.fill(this);
+ }
+ ~CachedVector() { cache_->reuse(this); }
+
+ private:
+ CachedVectors<T>* cache_;
+ };
+
Zone* zone_;
AsmJsScanner scanner_;
WasmModuleBuilder* module_builder_;
@@ -115,6 +153,11 @@ class AsmJsParser {
ZoneVector<VarInfo> global_var_info_;
ZoneVector<VarInfo> local_var_info_;
+ CachedVectors<ValueType> cached_valuetype_vectors_{zone_};
+ CachedVectors<AsmType*> cached_asm_type_p_vectors_{zone_};
+ CachedVectors<AsmJsScanner::token_t> cached_token_t_vectors_{zone_};
+ CachedVectors<int32_t> cached_int_vectors_{zone_};
+
int function_temp_locals_offset_;
int function_temp_locals_used_;
int function_temp_locals_depth_;
@@ -267,7 +310,7 @@ class AsmJsParser {
void InitializeStdlibTypes();
FunctionSig* ConvertSignature(AsmType* return_type,
- const std::vector<AsmType*>& params);
+ const ZoneVector<AsmType*>& params);
void ValidateModule(); // 6.1 ValidateModule
void ValidateModuleParameters(); // 6.1 ValidateModule - parameters
@@ -281,9 +324,9 @@ class AsmJsParser {
void ValidateExport(); // 6.2 ValidateExport
void ValidateFunctionTable(); // 6.3 ValidateFunctionTable
void ValidateFunction(); // 6.4 ValidateFunction
- void ValidateFunctionParams(std::vector<AsmType*>* params);
+ void ValidateFunctionParams(ZoneVector<AsmType*>* params);
void ValidateFunctionLocals(size_t param_count,
- std::vector<ValueType>* locals);
+ ZoneVector<ValueType>* locals);
void ValidateStatement(); // 6.5 ValidateStatement
void Block(); // 6.5.1 Block
void ExpressionStatement(); // 6.5.2 ExpressionStatement
@@ -331,7 +374,7 @@ class AsmJsParser {
// Used as part of {SwitchStatement}. Collects all case labels in the current
// switch-statement, then resets the scanner position. This is one piece that
// makes this parser not be a pure single-pass.
- void GatherCases(std::vector<int32_t>* cases);
+ void GatherCases(ZoneVector<int32_t>* cases);
};
} // namespace wasm
diff --git a/deps/v8/src/asmjs/asm-scanner.cc b/deps/v8/src/asmjs/asm-scanner.cc
index 14b07306fd..a1e2b05c9d 100644
--- a/deps/v8/src/asmjs/asm-scanner.cc
+++ b/deps/v8/src/asmjs/asm-scanner.cc
@@ -46,6 +46,10 @@ AsmJsScanner::AsmJsScanner()
#undef V
}
+// Destructor of unique_ptr<T> requires complete declaration of T, we only want
+// to include the necessary declaration here instead of the header file.
+AsmJsScanner::~AsmJsScanner() {}
+
void AsmJsScanner::SetStream(std::unique_ptr<Utf16CharacterStream> stream) {
stream_ = std::move(stream);
Next();
@@ -208,7 +212,6 @@ std::string AsmJsScanner::Name(token_t token) const {
break;
}
UNREACHABLE();
- return "{unreachable}";
}
#endif
diff --git a/deps/v8/src/asmjs/asm-scanner.h b/deps/v8/src/asmjs/asm-scanner.h
index d519862a83..13ffb21bc7 100644
--- a/deps/v8/src/asmjs/asm-scanner.h
+++ b/deps/v8/src/asmjs/asm-scanner.h
@@ -32,6 +32,8 @@ class V8_EXPORT_PRIVATE AsmJsScanner {
typedef int32_t token_t;
AsmJsScanner();
+ ~AsmJsScanner();
+
// Pick the stream to parse (must be called before anything else).
void SetStream(std::unique_ptr<Utf16CharacterStream> stream);
diff --git a/deps/v8/src/asmjs/asm-types.cc b/deps/v8/src/asmjs/asm-types.cc
index 79c43a370b..3deb588e4f 100644
--- a/deps/v8/src/asmjs/asm-types.cc
+++ b/deps/v8/src/asmjs/asm-types.cc
@@ -69,7 +69,6 @@ bool AsmType::IsA(AsmType* that) {
}
UNREACHABLE();
- return that == this;
}
int32_t AsmType::ElementSizeInBytes() {
diff --git a/deps/v8/src/assembler-inl.h b/deps/v8/src/assembler-inl.h
index 24d0377ce5..5cf4fae63a 100644
--- a/deps/v8/src/assembler-inl.h
+++ b/deps/v8/src/assembler-inl.h
@@ -23,8 +23,6 @@
#include "src/mips64/assembler-mips64-inl.h"
#elif V8_TARGET_ARCH_S390
#include "src/s390/assembler-s390-inl.h"
-#elif V8_TARGET_ARCH_X87
-#include "src/x87/assembler-x87-inl.h"
#else
#error Unknown architecture.
#endif
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index 20a7b6c51e..c561050ed6 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -55,6 +55,7 @@
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/interpreter/interpreter.h"
+#include "src/isolate.h"
#include "src/ostreams.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
@@ -84,8 +85,6 @@
#include "src/regexp/mips64/regexp-macro-assembler-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_S390
#include "src/regexp/s390/regexp-macro-assembler-s390.h" // NOLINT
-#elif V8_TARGET_ARCH_X87
-#include "src/regexp/x87/regexp-macro-assembler-x87.h" // NOLINT
#else // Unknown architecture.
#error "Unknown architecture."
#endif // Target architecture.
@@ -144,8 +143,7 @@ const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
// Implementation of AssemblerBase
AssemblerBase::IsolateData::IsolateData(Isolate* isolate)
- : serializer_enabled_(isolate->serializer_enabled()),
- max_old_generation_size_(isolate->heap()->MaxOldGenerationSize())
+ : serializer_enabled_(isolate->serializer_enabled())
#if V8_TARGET_ARCH_X64
,
code_range_start_(
@@ -267,16 +265,12 @@ unsigned CpuFeatures::dcache_line_size_ = 0;
// 01: code_target: [6-bit pc delta] 01
//
// 10: short_data_record: [6-bit pc delta] 10 followed by
-// [6-bit data delta] [2-bit data type tag]
+// [8-bit data delta]
//
// 11: long_record [6 bit reloc mode] 11
// followed by pc delta
// followed by optional data depending on type.
//
-// 1-bit data type tags, used in short_data_record and data_jump long_record:
-// code_target_with_id: 0
-// deopt_reason: 1
-//
// If a pc delta exceeds 6 bits, it is split into a remainder that fits into
// 6 bits and a part that does not. The latter is encoded as a long record
// with PC_JUMP as pseudo reloc info mode. The former is encoded as part of
@@ -292,8 +286,6 @@ unsigned CpuFeatures::dcache_line_size_ = 0;
const int kTagBits = 2;
const int kTagMask = (1 << kTagBits) - 1;
const int kLongTagBits = 6;
-const int kShortDataTypeTagBits = 1;
-const int kShortDataBits = kBitsPerByte - kShortDataTypeTagBits;
const int kEmbeddedObjectTag = 0;
const int kCodeTargetTag = 1;
@@ -310,14 +302,10 @@ const int kLastChunkTagBits = 1;
const int kLastChunkTagMask = 1;
const int kLastChunkTag = 1;
-const int kCodeWithIdTag = 0;
-const int kDeoptReasonTag = 1;
-
void RelocInfo::update_wasm_memory_reference(
Isolate* isolate, Address old_base, Address new_base,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsWasmMemoryReference(rmode_));
- DCHECK_GE(wasm_memory_reference(), old_base);
Address updated_reference = new_base + (wasm_memory_reference() - old_base);
// The reference is not checked here but at runtime. Validity of references
// may change over time.
@@ -399,9 +387,8 @@ void RelocInfoWriter::WriteShortTaggedPC(uint32_t pc_delta, int tag) {
*--pos_ = pc_delta << kTagBits | tag;
}
-
-void RelocInfoWriter::WriteShortTaggedData(intptr_t data_delta, int tag) {
- *--pos_ = static_cast<byte>(data_delta << kShortDataTypeTagBits | tag);
+void RelocInfoWriter::WriteShortData(intptr_t data_delta) {
+ *--pos_ = static_cast<byte>(data_delta);
}
@@ -453,24 +440,10 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
} else if (rmode == RelocInfo::CODE_TARGET) {
WriteShortTaggedPC(pc_delta, kCodeTargetTag);
DCHECK(begin_pos - pos_ <= RelocInfo::kMaxCallSize);
- } else if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- // Use signed delta-encoding for id.
- DCHECK_EQ(static_cast<int>(rinfo->data()), rinfo->data());
- int id_delta = static_cast<int>(rinfo->data()) - last_id_;
- // Check if delta is small enough to fit in a tagged byte.
- if (is_intn(id_delta, kShortDataBits)) {
- WriteShortTaggedPC(pc_delta, kLocatableTag);
- WriteShortTaggedData(id_delta, kCodeWithIdTag);
- } else {
- // Otherwise, use costly encoding.
- WriteModeAndPC(pc_delta, rmode);
- WriteIntData(id_delta);
- }
- last_id_ = static_cast<int>(rinfo->data());
} else if (rmode == RelocInfo::DEOPT_REASON) {
- DCHECK(rinfo->data() < (1 << kShortDataBits));
+ DCHECK(rinfo->data() < (1 << kBitsPerByte));
WriteShortTaggedPC(pc_delta, kLocatableTag);
- WriteShortTaggedData(rinfo->data(), kDeoptReasonTag);
+ WriteShortData(rinfo->data());
} else {
WriteModeAndPC(pc_delta, rmode);
if (RelocInfo::IsComment(rmode)) {
@@ -511,16 +484,6 @@ inline void RelocIterator::AdvanceReadPC() {
}
-void RelocIterator::AdvanceReadId() {
- int x = 0;
- for (int i = 0; i < kIntSize; i++) {
- x |= static_cast<int>(*--pos_) << i * kBitsPerByte;
- }
- last_id_ += x;
- rinfo_.data_ = last_id_;
-}
-
-
void RelocIterator::AdvanceReadInt() {
int x = 0;
for (int i = 0; i < kIntSize; i++) {
@@ -554,23 +517,9 @@ void RelocIterator::AdvanceReadLongPCJump() {
rinfo_.pc_ += pc_jump << kSmallPCDeltaBits;
}
-
-inline int RelocIterator::GetShortDataTypeTag() {
- return *pos_ & ((1 << kShortDataTypeTagBits) - 1);
-}
-
-
-inline void RelocIterator::ReadShortTaggedId() {
- int8_t signed_b = *pos_;
- // Signed right shift is arithmetic shift. Tested in test-utils.cc.
- last_id_ += signed_b >> kShortDataTypeTagBits;
- rinfo_.data_ = last_id_;
-}
-
-
-inline void RelocIterator::ReadShortTaggedData() {
+inline void RelocIterator::ReadShortData() {
uint8_t unsigned_b = *pos_;
- rinfo_.data_ = unsigned_b >> kShortDataTypeTagBits;
+ rinfo_.data_ = unsigned_b;
}
@@ -592,18 +541,9 @@ void RelocIterator::next() {
} else if (tag == kLocatableTag) {
ReadShortTaggedPC();
Advance();
- int data_type_tag = GetShortDataTypeTag();
- if (data_type_tag == kCodeWithIdTag) {
- if (SetMode(RelocInfo::CODE_TARGET_WITH_ID)) {
- ReadShortTaggedId();
- return;
- }
- } else {
- DCHECK(data_type_tag == kDeoptReasonTag);
- if (SetMode(RelocInfo::DEOPT_REASON)) {
- ReadShortTaggedData();
- return;
- }
+ if (SetMode(RelocInfo::DEOPT_REASON)) {
+ ReadShortData();
+ return;
}
} else {
DCHECK(tag == kDefaultTag);
@@ -612,13 +552,7 @@ void RelocIterator::next() {
AdvanceReadLongPCJump();
} else {
AdvanceReadPC();
- if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- if (SetMode(rmode)) {
- AdvanceReadId();
- return;
- }
- Advance(kIntSize);
- } else if (RelocInfo::IsComment(rmode)) {
+ if (RelocInfo::IsComment(rmode)) {
if (SetMode(rmode)) {
AdvanceReadData();
return;
@@ -661,7 +595,6 @@ RelocIterator::RelocIterator(Code* code, int mode_mask) {
end_ = code->relocation_start();
done_ = false;
mode_mask_ = mode_mask;
- last_id_ = 0;
byte* sequence = code->FindCodeAgeSequence();
// We get the isolate from the map, because at serialization time
// the code pointer has been cloned and isn't really in heap space.
@@ -683,7 +616,6 @@ RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
end_ = pos_ - desc.reloc_size;
done_ = false;
mode_mask_ = mode_mask;
- last_id_ = 0;
code_age_sequence_ = NULL;
if (mode_mask_ == 0) pos_ = end_;
next();
@@ -723,8 +655,6 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "embedded object";
case CODE_TARGET:
return "code target";
- case CODE_TARGET_WITH_ID:
- return "code target with id";
case CELL:
return "property cell";
case RUNTIME_ENTRY:
@@ -772,7 +702,6 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
case NUMBER_OF_MODES:
case PC_JUMP:
UNREACHABLE();
- return "number_of_modes";
}
return "unknown relocation type";
}
@@ -799,9 +728,6 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
Code* code = Code::GetCodeFromTargetAddress(target_address());
os << " (" << Code::Kind2String(code->kind()) << ") ("
<< static_cast<const void*>(target_address()) << ")";
- if (rmode_ == CODE_TARGET_WITH_ID) {
- os << " (id=" << static_cast<int>(data_) << ")";
- }
} else if (IsRuntimeEntry(rmode_) &&
isolate->deoptimizer_data() != NULL) {
// Depotimization bailouts are stored as runtime entries.
@@ -828,7 +754,6 @@ void RelocInfo::Verify(Isolate* isolate) {
case CELL:
Object::VerifyPointer(target_cell());
break;
- case CODE_TARGET_WITH_ID:
case CODE_TARGET: {
// convert inline target address to code object
Address addr = target_address();
@@ -895,7 +820,6 @@ static ExternalReference::Type BuiltinCallTypeForResultSize(int result_size) {
return ExternalReference::BUILTIN_CALL_TRIPLE;
}
UNREACHABLE();
- return ExternalReference::BUILTIN_CALL;
}
@@ -951,10 +875,8 @@ ExternalReference ExternalReference::interpreter_dispatch_counters(
ExternalReference::ExternalReference(StatsCounter* counter)
: address_(reinterpret_cast<Address>(counter->GetInternalPointer())) {}
-
-ExternalReference::ExternalReference(Isolate::AddressId id, Isolate* isolate)
- : address_(isolate->get_address_from_id(id)) {}
-
+ExternalReference::ExternalReference(IsolateAddressId id, Isolate* isolate)
+ : address_(isolate->get_address_from_id(id)) {}
ExternalReference::ExternalReference(const SCTableReference& table_ref)
: address_(table_ref.address()) {}
@@ -1015,6 +937,13 @@ ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) {
return ExternalReference(isolate->date_cache()->stamp_address());
}
+void ExternalReference::set_redirector(
+ Isolate* isolate, ExternalReferenceRedirector* redirector) {
+ // We can't stack them.
+ DCHECK(isolate->external_reference_redirector() == NULL);
+ isolate->set_external_reference_redirector(
+ reinterpret_cast<ExternalReferenceRedirectorPointer*>(redirector));
+}
ExternalReference ExternalReference::stress_deopt_count(Isolate* isolate) {
return ExternalReference(isolate->stress_deopt_count_address());
@@ -1393,8 +1322,6 @@ ExternalReference ExternalReference::re_check_stack_guard_state(
function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
#elif V8_TARGET_ARCH_S390
function = FUNCTION_ADDR(RegExpMacroAssemblerS390::CheckStackGuardState);
-#elif V8_TARGET_ARCH_X87
- function = FUNCTION_ADDR(RegExpMacroAssemblerX87::CheckStackGuardState);
#else
UNREACHABLE();
#endif
@@ -1578,6 +1505,19 @@ ExternalReference ExternalReference::search_string_raw(Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f)));
}
+ExternalReference ExternalReference::orderedhashmap_gethash_raw(
+ Isolate* isolate) {
+ auto f = OrderedHashMap::GetHash;
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f)));
+}
+
+template <typename CollectionType, int entrysize>
+ExternalReference ExternalReference::orderedhashtable_has_raw(
+ Isolate* isolate) {
+ auto f = OrderedHashTable<CollectionType, entrysize>::HasKey;
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f)));
+}
+
ExternalReference ExternalReference::try_internalize_string_function(
Isolate* isolate) {
return ExternalReference(Redirect(
@@ -1608,6 +1548,11 @@ ExternalReference::search_string_raw<const uc16, const uint8_t>(Isolate*);
template ExternalReference
ExternalReference::search_string_raw<const uc16, const uc16>(Isolate*);
+template ExternalReference
+ExternalReference::orderedhashtable_has_raw<OrderedHashMap, 2>(Isolate*);
+template ExternalReference
+ExternalReference::orderedhashtable_has_raw<OrderedHashSet, 1>(Isolate*);
+
ExternalReference ExternalReference::page_flags(Page* page) {
return ExternalReference(reinterpret_cast<Address>(page) +
MemoryChunk::kFlagsOffset);
@@ -1624,11 +1569,6 @@ ExternalReference ExternalReference::cpu_features() {
return ExternalReference(&CpuFeatures::supported_);
}
-ExternalReference ExternalReference::is_tail_call_elimination_enabled_address(
- Isolate* isolate) {
- return ExternalReference(isolate->is_tail_call_elimination_enabled_address());
-}
-
ExternalReference ExternalReference::promise_hook_or_debug_is_active_address(
Isolate* isolate) {
return ExternalReference(isolate->promise_hook_or_debug_is_active_address());
@@ -1959,6 +1899,17 @@ int ConstantPoolBuilder::Emit(Assembler* assm) {
return !empty ? emitted_label_.pos() : 0;
}
+HeapObjectRequest::HeapObjectRequest(double heap_number, int offset)
+ : kind_(kHeapNumber), offset_(offset) {
+ value_.heap_number = heap_number;
+ DCHECK(!IsSmiDouble(value_.heap_number));
+}
+
+HeapObjectRequest::HeapObjectRequest(CodeStub* code_stub, int offset)
+ : kind_(kCodeStub), offset_(offset) {
+ value_.code_stub = code_stub;
+ DCHECK_NOT_NULL(value_.code_stub);
+}
// Platform specific but identical code for all the platforms.
@@ -1988,10 +1939,16 @@ void Assembler::RecordDebugBreakSlot(RelocInfo::Mode mode) {
void Assembler::DataAlign(int m) {
- DCHECK(m >= 2 && base::bits::IsPowerOfTwo32(m));
+ DCHECK(m >= 2 && base::bits::IsPowerOfTwo(m));
while ((pc_offset() & (m - 1)) != 0) {
db(0);
}
}
+
+void Assembler::RequestHeapObject(HeapObjectRequest request) {
+ request.set_offset(pc_offset());
+ heap_object_requests_.push_front(request);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index 65976676b4..f625dc5625 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -35,11 +35,13 @@
#ifndef V8_ASSEMBLER_H_
#define V8_ASSEMBLER_H_
+#include <forward_list>
+
#include "src/allocation.h"
#include "src/builtins/builtins.h"
#include "src/deoptimize-reason.h"
+#include "src/double.h"
#include "src/globals.h"
-#include "src/isolate.h"
#include "src/label.h"
#include "src/log.h"
#include "src/register-configuration.h"
@@ -53,6 +55,7 @@ class ApiFunction;
namespace internal {
// Forward declarations.
+class Isolate;
class SourcePosition;
class StatsCounter;
@@ -69,7 +72,6 @@ class AssemblerBase: public Malloced {
IsolateData(const IsolateData&) = default;
bool serializer_enabled_;
- size_t max_old_generation_size_;
#if V8_TARGET_ARCH_X64
Address code_range_start_;
#endif
@@ -108,7 +110,6 @@ class AssemblerBase: public Malloced {
} else {
// Embedded constant pool not supported on this architecture.
UNREACHABLE();
- return false;
}
}
@@ -163,7 +164,6 @@ class AssemblerBase: public Malloced {
friend class ConstantPoolUnavailableScope;
};
-
// Avoids emitting debug code during the lifetime of this scope object.
class DontEmitDebugCodeScope BASE_EMBEDDED {
public:
@@ -324,9 +324,10 @@ class RelocInfo {
enum Mode {
// Please note the order is important (see IsCodeTarget, IsGCRelocMode).
CODE_TARGET,
- CODE_TARGET_WITH_ID,
EMBEDDED_OBJECT,
- // To relocate pointers into the wasm memory embedded in wasm code
+ // Wasm entries are to relocate pointers into the wasm memory embedded in
+ // wasm code. Everything after WASM_MEMORY_REFERENCE (inclusive) is not
+ // GC'ed.
WASM_MEMORY_REFERENCE,
WASM_GLOBAL_REFERENCE,
WASM_MEMORY_SIZE_REFERENCE,
@@ -334,7 +335,6 @@ class RelocInfo {
WASM_PROTECTED_INSTRUCTION_LANDING,
CELL,
- // Everything after runtime_entry (inclusive) is not GC'ed.
RUNTIME_ENTRY,
COMMENT,
@@ -373,8 +373,8 @@ class RelocInfo {
FIRST_REAL_RELOC_MODE = CODE_TARGET,
LAST_REAL_RELOC_MODE = VENEER_POOL,
- LAST_CODE_ENUM = CODE_TARGET_WITH_ID,
- LAST_GCED_ENUM = WASM_FUNCTION_TABLE_SIZE_REFERENCE,
+ LAST_CODE_ENUM = CODE_TARGET,
+ LAST_GCED_ENUM = EMBEDDED_OBJECT,
FIRST_SHAREABLE_RELOC_MODE = CELL,
};
@@ -431,7 +431,7 @@ class RelocInfo {
}
static inline bool IsDebugBreakSlot(Mode mode) {
return IsDebugBreakSlotAtPosition(mode) || IsDebugBreakSlotAtReturn(mode) ||
- IsDebugBreakSlotAtCall(mode) || IsDebugBreakSlotAtTailCall(mode);
+ IsDebugBreakSlotAtCall(mode);
}
static inline bool IsDebugBreakSlotAtPosition(Mode mode) {
return mode == DEBUG_BREAK_SLOT_AT_POSITION;
@@ -442,9 +442,6 @@ class RelocInfo {
static inline bool IsDebugBreakSlotAtCall(Mode mode) {
return mode == DEBUG_BREAK_SLOT_AT_CALL;
}
- static inline bool IsDebugBreakSlotAtTailCall(Mode mode) {
- return mode == DEBUG_BREAK_SLOT_AT_TAIL_CALL;
- }
static inline bool IsNone(Mode mode) {
return mode == NONE32 || mode == NONE64;
}
@@ -619,7 +616,6 @@ class RelocInfo {
#endif
static const int kCodeTargetMask = (1 << (LAST_CODE_ENUM + 1)) - 1;
- static const int kDataMask = (1 << CODE_TARGET_WITH_ID) | (1 << COMMENT);
static const int kDebugBreakSlotMask = 1 << DEBUG_BREAK_SLOT_AT_POSITION |
1 << DEBUG_BREAK_SLOT_AT_RETURN |
1 << DEBUG_BREAK_SLOT_AT_CALL;
@@ -647,8 +643,8 @@ class RelocInfo {
// lower addresses.
class RelocInfoWriter BASE_EMBEDDED {
public:
- RelocInfoWriter() : pos_(NULL), last_pc_(NULL), last_id_(0) {}
- RelocInfoWriter(byte* pos, byte* pc) : pos_(pos), last_pc_(pc), last_id_(0) {}
+ RelocInfoWriter() : pos_(NULL), last_pc_(NULL) {}
+ RelocInfoWriter(byte* pos, byte* pc) : pos_(pos), last_pc_(pc) {}
byte* pos() const { return pos_; }
byte* last_pc() const { return last_pc_; }
@@ -673,7 +669,7 @@ class RelocInfoWriter BASE_EMBEDDED {
inline uint32_t WriteLongPCJump(uint32_t pc_delta);
inline void WriteShortTaggedPC(uint32_t pc_delta, int tag);
- inline void WriteShortTaggedData(intptr_t data_delta, int tag);
+ inline void WriteShortData(intptr_t data_delta);
inline void WriteMode(RelocInfo::Mode rmode);
inline void WriteModeAndPC(uint32_t pc_delta, RelocInfo::Mode rmode);
@@ -682,7 +678,6 @@ class RelocInfoWriter BASE_EMBEDDED {
byte* pos_;
byte* last_pc_;
- int last_id_;
RelocInfo::Mode last_mode_;
DISALLOW_COPY_AND_ASSIGN(RelocInfoWriter);
@@ -726,13 +721,10 @@ class RelocIterator: public Malloced {
void AdvanceReadLongPCJump();
- int GetShortDataTypeTag();
void ReadShortTaggedPC();
- void ReadShortTaggedId();
- void ReadShortTaggedData();
+ void ReadShortData();
void AdvanceReadPC();
- void AdvanceReadId();
void AdvanceReadInt();
void AdvanceReadData();
@@ -748,7 +740,6 @@ class RelocIterator: public Malloced {
RelocInfo rinfo_;
bool done_;
int mode_mask_;
- int last_id_;
DISALLOW_COPY_AND_ASSIGN(RelocIterator);
};
@@ -836,7 +827,7 @@ class ExternalReference BASE_EMBEDDED {
explicit ExternalReference(StatsCounter* counter);
- ExternalReference(Isolate::AddressId id, Isolate* isolate);
+ ExternalReference(IsolateAddressId id, Isolate* isolate);
explicit ExternalReference(const SCTableReference& table_ref);
@@ -1002,15 +993,17 @@ class ExternalReference BASE_EMBEDDED {
template <typename SubjectChar, typename PatternChar>
static ExternalReference search_string_raw(Isolate* isolate);
+ static ExternalReference orderedhashmap_gethash_raw(Isolate* isolate);
+
+ template <typename CollectionType, int entrysize>
+ static ExternalReference orderedhashtable_has_raw(Isolate* isolate);
+
static ExternalReference page_flags(Page* page);
static ExternalReference ForDeoptEntry(Address entry);
static ExternalReference cpu_features();
- static ExternalReference is_tail_call_elimination_enabled_address(
- Isolate* isolate);
-
static ExternalReference debug_is_active_address(Isolate* isolate);
static ExternalReference debug_hook_on_function_call_address(
Isolate* isolate);
@@ -1057,12 +1050,7 @@ class ExternalReference BASE_EMBEDDED {
// This lets you register a function that rewrites all external references.
// Used by the ARM simulator to catch calls to external references.
static void set_redirector(Isolate* isolate,
- ExternalReferenceRedirector* redirector) {
- // We can't stack them.
- DCHECK(isolate->external_reference_redirector() == NULL);
- isolate->set_external_reference_redirector(
- reinterpret_cast<ExternalReferenceRedirectorPointer*>(redirector));
- }
+ ExternalReferenceRedirector* redirector);
static ExternalReference stress_deopt_count(Isolate* isolate);
@@ -1156,8 +1144,10 @@ class ConstantPoolEntry {
: position_(position),
merged_index_(sharing_ok ? SHARING_ALLOWED : SHARING_PROHIBITED),
value_(value) {}
- ConstantPoolEntry(int position, double value)
- : position_(position), merged_index_(SHARING_ALLOWED), value64_(value) {}
+ ConstantPoolEntry(int position, Double value)
+ : position_(position),
+ merged_index_(SHARING_ALLOWED),
+ value64_(value.AsUint64()) {}
int position() const { return position_; }
bool sharing_ok() const { return merged_index_ != SHARING_PROHIBITED; }
@@ -1167,6 +1157,7 @@ class ConstantPoolEntry {
return merged_index_;
}
void set_merged_index(int index) {
+ DCHECK(sharing_ok());
merged_index_ = index;
DCHECK(is_merged());
}
@@ -1179,7 +1170,7 @@ class ConstantPoolEntry {
merged_index_ = offset;
}
intptr_t value() const { return value_; }
- uint64_t value64() const { return bit_cast<uint64_t>(value64_); }
+ uint64_t value64() const { return value64_; }
enum Type { INTPTR, DOUBLE, NUMBER_OF_TYPES };
@@ -1194,7 +1185,7 @@ class ConstantPoolEntry {
int merged_index_;
union {
intptr_t value_;
- double value64_;
+ uint64_t value64_;
};
enum { SHARING_PROHIBITED = -2, SHARING_ALLOWED = -1 };
};
@@ -1215,11 +1206,16 @@ class ConstantPoolBuilder BASE_EMBEDDED {
}
// Add double constant to the embedded constant pool
- ConstantPoolEntry::Access AddEntry(int position, double value) {
+ ConstantPoolEntry::Access AddEntry(int position, Double value) {
ConstantPoolEntry entry(position, value);
return AddEntry(entry, ConstantPoolEntry::DOUBLE);
}
+ // Add double constant to the embedded constant pool
+ ConstantPoolEntry::Access AddEntry(int position, double value) {
+ return AddEntry(position, Double(value));
+ }
+
// Previews the access type required for the next new entry to be added.
ConstantPoolEntry::Access NextAccess(ConstantPoolEntry::Type type) const;
@@ -1265,6 +1261,46 @@ class ConstantPoolBuilder BASE_EMBEDDED {
PerTypeEntryInfo info_[ConstantPoolEntry::NUMBER_OF_TYPES];
};
+class HeapObjectRequest {
+ public:
+ explicit HeapObjectRequest(double heap_number, int offset = -1);
+ explicit HeapObjectRequest(CodeStub* code_stub, int offset = -1);
+
+ enum Kind { kHeapNumber, kCodeStub };
+ Kind kind() const { return kind_; }
+
+ double heap_number() const {
+ DCHECK_EQ(kind(), kHeapNumber);
+ return value_.heap_number;
+ }
+
+ CodeStub* code_stub() const {
+ DCHECK_EQ(kind(), kCodeStub);
+ return value_.code_stub;
+ }
+
+ // The code buffer offset at the time of the request.
+ int offset() const {
+ DCHECK_GE(offset_, 0);
+ return offset_;
+ }
+ void set_offset(int offset) {
+ DCHECK_LT(offset_, 0);
+ offset_ = offset;
+ DCHECK_GE(offset_, 0);
+ }
+
+ private:
+ Kind kind_;
+
+ union {
+ double heap_number;
+ CodeStub* code_stub;
+ } value_;
+
+ int offset_;
+};
+
} // namespace internal
} // namespace v8
#endif // V8_ASSEMBLER_H_
diff --git a/deps/v8/src/ast/OWNERS b/deps/v8/src/ast/OWNERS
index 16e048accd..ece6b5048a 100644
--- a/deps/v8/src/ast/OWNERS
+++ b/deps/v8/src/ast/OWNERS
@@ -8,3 +8,5 @@ mstarzinger@chromium.org
neis@chromium.org
rossberg@chromium.org
verwaest@chromium.org
+
+# COMPONENT: Blink>JavaScript>Language
diff --git a/deps/v8/src/ast/ast-expression-rewriter.cc b/deps/v8/src/ast/ast-expression-rewriter.cc
index a3ee43204a..1b7f5ffca2 100644
--- a/deps/v8/src/ast/ast-expression-rewriter.cc
+++ b/deps/v8/src/ast/ast-expression-rewriter.cc
@@ -265,12 +265,20 @@ void AstExpressionRewriter::VisitAssignment(Assignment* node) {
AST_REWRITE_PROPERTY(Expression, node, value);
}
-void AstExpressionRewriter::VisitSuspend(Suspend* node) {
+void AstExpressionRewriter::VisitYield(Yield* node) {
REWRITE_THIS(node);
- AST_REWRITE_PROPERTY(Expression, node, generator_object);
AST_REWRITE_PROPERTY(Expression, node, expression);
}
+void AstExpressionRewriter::VisitYieldStar(YieldStar* node) {
+ REWRITE_THIS(node);
+ AST_REWRITE_PROPERTY(Expression, node, expression);
+}
+
+void AstExpressionRewriter::VisitAwait(Await* node) {
+ REWRITE_THIS(node);
+ AST_REWRITE_PROPERTY(Expression, node, expression);
+}
void AstExpressionRewriter::VisitThrow(Throw* node) {
REWRITE_THIS(node);
diff --git a/deps/v8/src/ast/ast-expression-rewriter.h b/deps/v8/src/ast/ast-expression-rewriter.h
index 26eef24c1d..c246fcd37d 100644
--- a/deps/v8/src/ast/ast-expression-rewriter.h
+++ b/deps/v8/src/ast/ast-expression-rewriter.h
@@ -8,7 +8,6 @@
#include "src/allocation.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
-#include "src/type-info.h"
#include "src/zone/zone.h"
namespace v8 {
diff --git a/deps/v8/src/ast/ast-numbering.cc b/deps/v8/src/ast/ast-numbering.cc
index 202b61b17f..3ebb3df3a3 100644
--- a/deps/v8/src/ast/ast-numbering.cc
+++ b/deps/v8/src/ast/ast-numbering.cc
@@ -24,9 +24,9 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
properties_(zone),
language_mode_(SLOPPY),
slot_cache_(zone),
- disable_crankshaft_reason_(kNoReason),
+ disable_fullcodegen_reason_(kNoReason),
dont_optimize_reason_(kNoReason),
- catch_prediction_(HandlerTable::UNCAUGHT),
+ dont_self_optimize_(false),
collect_type_profile_(collect_type_profile) {
InitializeAstVisitor(stack_limit);
}
@@ -43,6 +43,7 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
void VisitVariableProxyReference(VariableProxy* node);
void VisitPropertyReference(Property* node);
void VisitReference(Expression* expr);
+ void VisitSuspend(Suspend* node);
void VisitStatementsAndDeclarations(Block* node);
void VisitStatements(ZoneList<Statement*>* statements);
@@ -50,23 +51,20 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
void VisitArguments(ZoneList<Expression*>* arguments);
void VisitLiteralProperty(LiteralProperty* property);
- int ReserveIdRange(int n) {
+ int ReserveId() {
int tmp = next_id_;
- next_id_ += n;
+ next_id_ += 1;
return tmp;
}
void IncrementNodeCount() { properties_.add_node_count(1); }
- void DisableSelfOptimization() {
- properties_.flags() |= AstProperties::kDontSelfOptimize;
- }
+ void DisableSelfOptimization() { dont_self_optimize_ = true; }
void DisableOptimization(BailoutReason reason) {
dont_optimize_reason_ = reason;
DisableSelfOptimization();
}
- void DisableFullCodegenAndCrankshaft(BailoutReason reason) {
- disable_crankshaft_reason_ = reason;
- properties_.flags() |= AstProperties::kMustUseIgnitionTurbo;
+ void DisableFullCodegen(BailoutReason reason) {
+ disable_fullcodegen_reason_ = reason;
}
template <typename Node>
@@ -100,9 +98,9 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
LanguageMode language_mode_;
// The slot cache allows us to reuse certain feedback slots.
FeedbackSlotCache slot_cache_;
- BailoutReason disable_crankshaft_reason_;
+ BailoutReason disable_fullcodegen_reason_;
BailoutReason dont_optimize_reason_;
- HandlerTable::CatchPrediction catch_prediction_;
+ bool dont_self_optimize_;
bool collect_type_profile_;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
@@ -140,7 +138,7 @@ void AstNumberingVisitor::VisitBreakStatement(BreakStatement* node) {
void AstNumberingVisitor::VisitDebuggerStatement(DebuggerStatement* node) {
IncrementNodeCount();
- DisableFullCodegenAndCrankshaft(kDebuggerStatement);
+ DisableFullCodegen(kDebuggerStatement);
}
@@ -148,14 +146,12 @@ void AstNumberingVisitor::VisitNativeFunctionLiteral(
NativeFunctionLiteral* node) {
IncrementNodeCount();
DisableOptimization(kNativeFunctionLiteral);
- node->set_base_id(ReserveIdRange(NativeFunctionLiteral::num_ids()));
ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitDoExpression(DoExpression* node) {
IncrementNodeCount();
- node->set_base_id(ReserveIdRange(DoExpression::num_ids()));
Visit(node->block());
Visit(node->result());
}
@@ -163,13 +159,11 @@ void AstNumberingVisitor::VisitDoExpression(DoExpression* node) {
void AstNumberingVisitor::VisitLiteral(Literal* node) {
IncrementNodeCount();
- node->set_base_id(ReserveIdRange(Literal::num_ids()));
}
void AstNumberingVisitor::VisitRegExpLiteral(RegExpLiteral* node) {
IncrementNodeCount();
- node->set_base_id(ReserveIdRange(RegExpLiteral::num_ids()));
ReserveFeedbackSlots(node);
}
@@ -178,16 +172,14 @@ void AstNumberingVisitor::VisitVariableProxyReference(VariableProxy* node) {
IncrementNodeCount();
switch (node->var()->location()) {
case VariableLocation::LOOKUP:
- DisableFullCodegenAndCrankshaft(
- kReferenceToAVariableWhichRequiresDynamicLookup);
+ DisableFullCodegen(kReferenceToAVariableWhichRequiresDynamicLookup);
break;
case VariableLocation::MODULE:
- DisableFullCodegenAndCrankshaft(kReferenceToModuleVariable);
+ DisableFullCodegen(kReferenceToModuleVariable);
break;
default:
break;
}
- node->set_base_id(ReserveIdRange(VariableProxy::num_ids()));
}
void AstNumberingVisitor::VisitVariableProxy(VariableProxy* node,
@@ -203,15 +195,13 @@ void AstNumberingVisitor::VisitVariableProxy(VariableProxy* node) {
void AstNumberingVisitor::VisitThisFunction(ThisFunction* node) {
IncrementNodeCount();
- node->set_base_id(ReserveIdRange(ThisFunction::num_ids()));
}
void AstNumberingVisitor::VisitSuperPropertyReference(
SuperPropertyReference* node) {
IncrementNodeCount();
- DisableFullCodegenAndCrankshaft(kSuperReference);
- node->set_base_id(ReserveIdRange(SuperPropertyReference::num_ids()));
+ DisableFullCodegen(kSuperReference);
Visit(node->this_var());
Visit(node->home_object());
}
@@ -219,8 +209,7 @@ void AstNumberingVisitor::VisitSuperPropertyReference(
void AstNumberingVisitor::VisitSuperCallReference(SuperCallReference* node) {
IncrementNodeCount();
- DisableFullCodegenAndCrankshaft(kSuperReference);
- node->set_base_id(ReserveIdRange(SuperCallReference::num_ids()));
+ DisableFullCodegen(kSuperReference);
Visit(node->this_var());
Visit(node->new_target_var());
Visit(node->this_function_var());
@@ -237,30 +226,33 @@ void AstNumberingVisitor::VisitReturnStatement(ReturnStatement* node) {
IncrementNodeCount();
Visit(node->expression());
- DCHECK(!node->is_async_return() ||
- properties_.flags() & AstProperties::kMustUseIgnitionTurbo);
+ DCHECK(!node->is_async_return() || disable_fullcodegen_reason_ != kNoReason);
}
void AstNumberingVisitor::VisitSuspend(Suspend* node) {
node->set_suspend_id(suspend_count_);
suspend_count_++;
IncrementNodeCount();
- node->set_base_id(ReserveIdRange(Suspend::num_ids()));
- Visit(node->generator_object());
Visit(node->expression());
}
+void AstNumberingVisitor::VisitYield(Yield* node) { VisitSuspend(node); }
+
+void AstNumberingVisitor::VisitYieldStar(YieldStar* node) {
+ VisitSuspend(node);
+ ReserveFeedbackSlots(node);
+}
+
+void AstNumberingVisitor::VisitAwait(Await* node) { VisitSuspend(node); }
void AstNumberingVisitor::VisitThrow(Throw* node) {
IncrementNodeCount();
- node->set_base_id(ReserveIdRange(Throw::num_ids()));
Visit(node->exception());
}
void AstNumberingVisitor::VisitUnaryOperation(UnaryOperation* node) {
IncrementNodeCount();
- node->set_base_id(ReserveIdRange(UnaryOperation::num_ids()));
if ((node->op() == Token::TYPEOF) && node->expression()->IsVariableProxy()) {
VariableProxy* proxy = node->expression()->AsVariableProxy();
VisitVariableProxy(proxy, INSIDE_TYPEOF);
@@ -272,7 +264,6 @@ void AstNumberingVisitor::VisitUnaryOperation(UnaryOperation* node) {
void AstNumberingVisitor::VisitCountOperation(CountOperation* node) {
IncrementNodeCount();
- node->set_base_id(ReserveIdRange(CountOperation::num_ids()));
Visit(node->expression());
ReserveFeedbackSlots(node);
}
@@ -280,7 +271,6 @@ void AstNumberingVisitor::VisitCountOperation(CountOperation* node) {
void AstNumberingVisitor::VisitBlock(Block* node) {
IncrementNodeCount();
- node->set_base_id(ReserveIdRange(Block::num_ids()));
Scope* scope = node->scope();
if (scope != nullptr) {
LanguageModeScope language_mode_scope(this, scope->language_mode());
@@ -306,42 +296,13 @@ void AstNumberingVisitor::VisitFunctionDeclaration(FunctionDeclaration* node) {
void AstNumberingVisitor::VisitCallRuntime(CallRuntime* node) {
IncrementNodeCount();
- node->set_base_id(ReserveIdRange(CallRuntime::num_ids()));
VisitArguments(node->arguments());
- // To support catch prediction within async/await:
- //
- // The AstNumberingVisitor is when catch prediction currently occurs, and it
- // is the only common point that has access to this information. The parser
- // just doesn't know yet. Take the following two cases of catch prediction:
- //
- // try { await fn(); } catch (e) { }
- // try { await fn(); } finally { }
- //
- // When parsing the await that we want to mark as caught or uncaught, it's
- // not yet known whether it will be followed by a 'finally' or a 'catch.
- // The AstNumberingVisitor is what learns whether it is caught. To make
- // the information available later to the runtime, the AstNumberingVisitor
- // has to stash it somewhere. Changing the runtime function into another
- // one in ast-numbering seemed like a simple and straightforward solution to
- // that problem.
- if (node->is_jsruntime() && catch_prediction_ == HandlerTable::ASYNC_AWAIT) {
- switch (node->context_index()) {
- case Context::ASYNC_FUNCTION_AWAIT_CAUGHT_INDEX:
- node->set_context_index(Context::ASYNC_FUNCTION_AWAIT_UNCAUGHT_INDEX);
- break;
- case Context::ASYNC_GENERATOR_AWAIT_CAUGHT:
- node->set_context_index(Context::ASYNC_GENERATOR_AWAIT_UNCAUGHT);
- break;
- default:
- break;
- }
- }
}
void AstNumberingVisitor::VisitWithStatement(WithStatement* node) {
IncrementNodeCount();
- DisableFullCodegenAndCrankshaft(kWithStatement);
+ DisableFullCodegen(kWithStatement);
Visit(node->expression());
Visit(node->statement());
}
@@ -350,7 +311,7 @@ void AstNumberingVisitor::VisitWithStatement(WithStatement* node) {
void AstNumberingVisitor::VisitDoWhileStatement(DoWhileStatement* node) {
IncrementNodeCount();
DisableSelfOptimization();
- node->set_base_id(ReserveIdRange(DoWhileStatement::num_ids()));
+ node->set_osr_id(ReserveId());
node->set_first_suspend_id(suspend_count_);
Visit(node->body());
Visit(node->cond());
@@ -361,7 +322,7 @@ void AstNumberingVisitor::VisitDoWhileStatement(DoWhileStatement* node) {
void AstNumberingVisitor::VisitWhileStatement(WhileStatement* node) {
IncrementNodeCount();
DisableSelfOptimization();
- node->set_base_id(ReserveIdRange(WhileStatement::num_ids()));
+ node->set_osr_id(ReserveId());
node->set_first_suspend_id(suspend_count_);
Visit(node->cond());
Visit(node->body());
@@ -372,29 +333,15 @@ void AstNumberingVisitor::VisitWhileStatement(WhileStatement* node) {
void AstNumberingVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
DCHECK(node->scope() == nullptr || !node->scope()->HasBeenRemoved());
IncrementNodeCount();
- DisableFullCodegenAndCrankshaft(kTryCatchStatement);
- {
- const HandlerTable::CatchPrediction old_prediction = catch_prediction_;
- // This node uses its own prediction, unless it's "uncaught", in which case
- // we adopt the prediction of the outer try-block.
- HandlerTable::CatchPrediction catch_prediction = node->catch_prediction();
- if (catch_prediction != HandlerTable::UNCAUGHT) {
- catch_prediction_ = catch_prediction;
- }
- node->set_catch_prediction(catch_prediction_);
- Visit(node->try_block());
- catch_prediction_ = old_prediction;
- }
+ DisableFullCodegen(kTryCatchStatement);
+ Visit(node->try_block());
Visit(node->catch_block());
}
void AstNumberingVisitor::VisitTryFinallyStatement(TryFinallyStatement* node) {
IncrementNodeCount();
- DisableFullCodegenAndCrankshaft(kTryFinallyStatement);
- // We can't know whether the finally block will override ("catch") an
- // exception thrown in the try block, so we just adopt the outer prediction.
- node->set_catch_prediction(catch_prediction_);
+ DisableFullCodegen(kTryFinallyStatement);
Visit(node->try_block());
Visit(node->finally_block());
}
@@ -402,7 +349,6 @@ void AstNumberingVisitor::VisitTryFinallyStatement(TryFinallyStatement* node) {
void AstNumberingVisitor::VisitPropertyReference(Property* node) {
IncrementNodeCount();
- node->set_base_id(ReserveIdRange(Property::num_ids()));
Visit(node->key());
Visit(node->obj());
}
@@ -426,7 +372,6 @@ void AstNumberingVisitor::VisitProperty(Property* node) {
void AstNumberingVisitor::VisitAssignment(Assignment* node) {
IncrementNodeCount();
- node->set_base_id(ReserveIdRange(Assignment::num_ids()));
if (node->is_compound()) VisitBinaryOperation(node->binary_operation());
VisitReference(node->target());
@@ -437,7 +382,6 @@ void AstNumberingVisitor::VisitAssignment(Assignment* node) {
void AstNumberingVisitor::VisitBinaryOperation(BinaryOperation* node) {
IncrementNodeCount();
- node->set_base_id(ReserveIdRange(BinaryOperation::num_ids()));
Visit(node->left());
Visit(node->right());
ReserveFeedbackSlots(node);
@@ -446,7 +390,6 @@ void AstNumberingVisitor::VisitBinaryOperation(BinaryOperation* node) {
void AstNumberingVisitor::VisitCompareOperation(CompareOperation* node) {
IncrementNodeCount();
- node->set_base_id(ReserveIdRange(CompareOperation::num_ids()));
Visit(node->left());
Visit(node->right());
ReserveFeedbackSlots(node);
@@ -455,8 +398,7 @@ void AstNumberingVisitor::VisitCompareOperation(CompareOperation* node) {
void AstNumberingVisitor::VisitSpread(Spread* node) {
IncrementNodeCount();
// We can only get here from spread calls currently.
- DisableFullCodegenAndCrankshaft(kSpreadCall);
- node->set_base_id(ReserveIdRange(Spread::num_ids()));
+ DisableFullCodegen(kSpreadCall);
Visit(node->expression());
}
@@ -466,8 +408,7 @@ void AstNumberingVisitor::VisitEmptyParentheses(EmptyParentheses* node) {
void AstNumberingVisitor::VisitGetIterator(GetIterator* node) {
IncrementNodeCount();
- DisableFullCodegenAndCrankshaft(kGetIterator);
- node->set_base_id(ReserveIdRange(GetIterator::num_ids()));
+ DisableFullCodegen(kGetIterator);
Visit(node->iterable());
ReserveFeedbackSlots(node);
}
@@ -475,14 +416,14 @@ void AstNumberingVisitor::VisitGetIterator(GetIterator* node) {
void AstNumberingVisitor::VisitImportCallExpression(
ImportCallExpression* node) {
IncrementNodeCount();
- DisableFullCodegenAndCrankshaft(kDynamicImport);
+ DisableFullCodegen(kDynamicImport);
Visit(node->argument());
}
void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
IncrementNodeCount();
DisableSelfOptimization();
- node->set_base_id(ReserveIdRange(ForInStatement::num_ids()));
+ node->set_osr_id(ReserveId());
Visit(node->enumerable()); // Not part of loop.
node->set_first_suspend_id(suspend_count_);
Visit(node->each());
@@ -494,8 +435,8 @@ void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
void AstNumberingVisitor::VisitForOfStatement(ForOfStatement* node) {
IncrementNodeCount();
- DisableFullCodegenAndCrankshaft(kForOfStatement);
- node->set_base_id(ReserveIdRange(ForOfStatement::num_ids()));
+ DisableFullCodegen(kForOfStatement);
+ node->set_osr_id(ReserveId());
Visit(node->assign_iterator()); // Not part of loop.
node->set_first_suspend_id(suspend_count_);
Visit(node->next_result());
@@ -508,7 +449,6 @@ void AstNumberingVisitor::VisitForOfStatement(ForOfStatement* node) {
void AstNumberingVisitor::VisitConditional(Conditional* node) {
IncrementNodeCount();
- node->set_base_id(ReserveIdRange(Conditional::num_ids()));
Visit(node->condition());
Visit(node->then_expression());
Visit(node->else_expression());
@@ -517,7 +457,6 @@ void AstNumberingVisitor::VisitConditional(Conditional* node) {
void AstNumberingVisitor::VisitIfStatement(IfStatement* node) {
IncrementNodeCount();
- node->set_base_id(ReserveIdRange(IfStatement::num_ids()));
Visit(node->condition());
Visit(node->then_statement());
if (node->HasElseStatement()) {
@@ -528,7 +467,6 @@ void AstNumberingVisitor::VisitIfStatement(IfStatement* node) {
void AstNumberingVisitor::VisitSwitchStatement(SwitchStatement* node) {
IncrementNodeCount();
- node->set_base_id(ReserveIdRange(SwitchStatement::num_ids()));
Visit(node->tag());
ZoneList<CaseClause*>* cases = node->cases();
for (int i = 0; i < cases->length(); i++) {
@@ -539,7 +477,6 @@ void AstNumberingVisitor::VisitSwitchStatement(SwitchStatement* node) {
void AstNumberingVisitor::VisitCaseClause(CaseClause* node) {
IncrementNodeCount();
- node->set_base_id(ReserveIdRange(CaseClause::num_ids()));
if (!node->is_default()) Visit(node->label());
VisitStatements(node->statements());
ReserveFeedbackSlots(node);
@@ -549,7 +486,7 @@ void AstNumberingVisitor::VisitCaseClause(CaseClause* node) {
void AstNumberingVisitor::VisitForStatement(ForStatement* node) {
IncrementNodeCount();
DisableSelfOptimization();
- node->set_base_id(ReserveIdRange(ForStatement::num_ids()));
+ node->set_osr_id(ReserveId());
if (node->init() != NULL) Visit(node->init()); // Not part of loop.
node->set_first_suspend_id(suspend_count_);
if (node->cond() != NULL) Visit(node->cond());
@@ -561,8 +498,7 @@ void AstNumberingVisitor::VisitForStatement(ForStatement* node) {
void AstNumberingVisitor::VisitClassLiteral(ClassLiteral* node) {
IncrementNodeCount();
- DisableFullCodegenAndCrankshaft(kClassLiteral);
- node->set_base_id(ReserveIdRange(ClassLiteral::num_ids()));
+ DisableFullCodegen(kClassLiteral);
LanguageModeScope language_mode_scope(this, STRICT);
if (node->extends()) Visit(node->extends());
if (node->constructor()) Visit(node->constructor());
@@ -578,7 +514,6 @@ void AstNumberingVisitor::VisitClassLiteral(ClassLiteral* node) {
void AstNumberingVisitor::VisitObjectLiteral(ObjectLiteral* node) {
IncrementNodeCount();
- node->set_base_id(ReserveIdRange(node->num_ids()));
for (int i = 0; i < node->properties()->length(); i++) {
VisitLiteralProperty(node->properties()->at(i));
}
@@ -591,15 +526,13 @@ void AstNumberingVisitor::VisitObjectLiteral(ObjectLiteral* node) {
}
void AstNumberingVisitor::VisitLiteralProperty(LiteralProperty* node) {
- if (node->is_computed_name())
- DisableFullCodegenAndCrankshaft(kComputedPropertyName);
+ if (node->is_computed_name()) DisableFullCodegen(kComputedPropertyName);
Visit(node->key());
Visit(node->value());
}
void AstNumberingVisitor::VisitArrayLiteral(ArrayLiteral* node) {
IncrementNodeCount();
- node->set_base_id(ReserveIdRange(node->num_ids()));
for (int i = 0; i < node->values()->length(); i++) {
Visit(node->values()->at(i));
}
@@ -610,11 +543,10 @@ void AstNumberingVisitor::VisitArrayLiteral(ArrayLiteral* node) {
void AstNumberingVisitor::VisitCall(Call* node) {
if (node->is_possibly_eval()) {
- DisableFullCodegenAndCrankshaft(kFunctionCallsEval);
+ DisableFullCodegen(kFunctionCallsEval);
}
IncrementNodeCount();
ReserveFeedbackSlots(node);
- node->set_base_id(ReserveIdRange(Call::num_ids()));
Visit(node->expression());
VisitArguments(node->arguments());
}
@@ -623,7 +555,6 @@ void AstNumberingVisitor::VisitCall(Call* node) {
void AstNumberingVisitor::VisitCallNew(CallNew* node) {
IncrementNodeCount();
ReserveFeedbackSlots(node);
- node->set_base_id(ReserveIdRange(CallNew::num_ids()));
Visit(node->expression());
VisitArguments(node->arguments());
}
@@ -651,7 +582,6 @@ void AstNumberingVisitor::VisitArguments(ZoneList<Expression*>* arguments) {
void AstNumberingVisitor::VisitFunctionLiteral(FunctionLiteral* node) {
IncrementNodeCount();
- node->set_base_id(ReserveIdRange(FunctionLiteral::num_ids()));
if (node->ShouldEagerCompile()) {
if (eager_literals_) {
eager_literals_->Add(new (zone())
@@ -672,7 +602,6 @@ void AstNumberingVisitor::VisitFunctionLiteral(FunctionLiteral* node) {
void AstNumberingVisitor::VisitRewritableExpression(
RewritableExpression* node) {
IncrementNodeCount();
- node->set_base_id(ReserveIdRange(RewritableExpression::num_ids()));
Visit(node->expression());
}
@@ -683,24 +612,24 @@ bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
if (scope->new_target_var() != nullptr ||
scope->this_function_var() != nullptr) {
- DisableFullCodegenAndCrankshaft(kSuperReference);
+ DisableFullCodegen(kSuperReference);
}
if (scope->arguments() != nullptr &&
!scope->arguments()->IsStackAllocated()) {
- DisableFullCodegenAndCrankshaft(kContextAllocatedArguments);
+ DisableFullCodegen(kContextAllocatedArguments);
}
if (scope->rest_parameter() != nullptr) {
- DisableFullCodegenAndCrankshaft(kRestParameter);
+ DisableFullCodegen(kRestParameter);
}
if (IsResumableFunction(node->kind())) {
- DisableFullCodegenAndCrankshaft(kGenerator);
+ DisableFullCodegen(kGenerator);
}
if (IsClassConstructor(node->kind())) {
- DisableFullCodegenAndCrankshaft(kClassConstructorFunction);
+ DisableFullCodegen(kClassConstructorFunction);
}
LanguageModeScope language_mode_scope(this, node->language_mode());
@@ -716,8 +645,12 @@ bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
node->set_dont_optimize_reason(dont_optimize_reason());
node->set_suspend_count(suspend_count_);
- if (FLAG_trace_opt && !FLAG_turbo) {
- if (disable_crankshaft_reason_ != kNoReason) {
+ if (dont_self_optimize_) {
+ node->set_dont_self_optimize();
+ }
+ if (disable_fullcodegen_reason_ != kNoReason) {
+ node->set_must_use_ignition();
+ if (FLAG_trace_opt && FLAG_stress_fullcodegen) {
// TODO(leszeks): This is a quick'n'dirty fix to allow the debug name of
// the function to be accessed in the below print. This DCHECK will fail
// if we move ast numbering off the main thread, but that won't be before
@@ -725,9 +658,9 @@ bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
AllowHandleDereference allow_deref;
DCHECK(!node->debug_name().is_null());
- PrintF("[enforcing Ignition and TurboFan for %s because: %s\n",
+ PrintF("[enforcing Ignition for %s because: %s\n",
node->debug_name()->ToCString().get(),
- GetBailoutReason(disable_crankshaft_reason_));
+ GetBailoutReason(disable_fullcodegen_reason_));
}
}
diff --git a/deps/v8/src/ast/ast-source-ranges.h b/deps/v8/src/ast/ast-source-ranges.h
new file mode 100644
index 0000000000..17ebc09e76
--- /dev/null
+++ b/deps/v8/src/ast/ast-source-ranges.h
@@ -0,0 +1,236 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_AST_AST_SOURCE_RANGES_H_
+#define V8_AST_AST_SOURCE_RANGES_H_
+
+#include "src/ast/ast.h"
+#include "src/zone/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+// Specifies a range within the source code. {start} is 0-based and inclusive,
+// {end} is 0-based and exclusive.
+struct SourceRange {
+ SourceRange() : SourceRange(kNoSourcePosition, kNoSourcePosition) {}
+ SourceRange(int start, int end) : start(start), end(end) {}
+ bool IsEmpty() const { return start == kNoSourcePosition; }
+ static SourceRange Empty() { return SourceRange(); }
+ static SourceRange OpenEnded(int32_t start) {
+ return SourceRange(start, kNoSourcePosition);
+ }
+ static SourceRange ContinuationOf(const SourceRange& that) {
+ return that.IsEmpty() ? Empty() : OpenEnded(that.end);
+ }
+ int32_t start, end;
+};
+
+// The list of ast node kinds that have associated source ranges.
+#define AST_SOURCE_RANGE_LIST(V) \
+ V(Block) \
+ V(CaseClause) \
+ V(Conditional) \
+ V(IfStatement) \
+ V(IterationStatement) \
+ V(JumpStatement) \
+ V(SwitchStatement) \
+ V(Throw) \
+ V(TryCatchStatement) \
+ V(TryFinallyStatement)
+
+enum class SourceRangeKind {
+ kBody,
+ kCatch,
+ kContinuation,
+ kElse,
+ kFinally,
+ kThen,
+};
+
+class AstNodeSourceRanges : public ZoneObject {
+ public:
+ virtual ~AstNodeSourceRanges() {}
+ virtual SourceRange GetRange(SourceRangeKind kind) = 0;
+};
+
+class ContinuationSourceRanges : public AstNodeSourceRanges {
+ public:
+ explicit ContinuationSourceRanges(int32_t continuation_position)
+ : continuation_position_(continuation_position) {}
+
+ SourceRange GetRange(SourceRangeKind kind) {
+ DCHECK(kind == SourceRangeKind::kContinuation);
+ return SourceRange::OpenEnded(continuation_position_);
+ }
+
+ private:
+ int32_t continuation_position_;
+};
+
+class BlockSourceRanges final : public ContinuationSourceRanges {
+ public:
+ explicit BlockSourceRanges(int32_t continuation_position)
+ : ContinuationSourceRanges(continuation_position) {}
+};
+
+class CaseClauseSourceRanges final : public AstNodeSourceRanges {
+ public:
+ explicit CaseClauseSourceRanges(const SourceRange& body_range)
+ : body_range_(body_range) {}
+
+ SourceRange GetRange(SourceRangeKind kind) {
+ DCHECK(kind == SourceRangeKind::kBody);
+ return body_range_;
+ }
+
+ private:
+ SourceRange body_range_;
+};
+
+class ConditionalSourceRanges final : public AstNodeSourceRanges {
+ public:
+ explicit ConditionalSourceRanges(const SourceRange& then_range,
+ const SourceRange& else_range)
+ : then_range_(then_range), else_range_(else_range) {}
+
+ SourceRange GetRange(SourceRangeKind kind) {
+ switch (kind) {
+ case SourceRangeKind::kThen:
+ return then_range_;
+ case SourceRangeKind::kElse:
+ return else_range_;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ private:
+ SourceRange then_range_;
+ SourceRange else_range_;
+};
+
+class IfStatementSourceRanges final : public AstNodeSourceRanges {
+ public:
+ explicit IfStatementSourceRanges(const SourceRange& then_range,
+ const SourceRange& else_range)
+ : then_range_(then_range), else_range_(else_range) {}
+
+ SourceRange GetRange(SourceRangeKind kind) {
+ switch (kind) {
+ case SourceRangeKind::kElse:
+ return else_range_;
+ case SourceRangeKind::kThen:
+ return then_range_;
+ case SourceRangeKind::kContinuation: {
+ const SourceRange& trailing_range =
+ else_range_.IsEmpty() ? then_range_ : else_range_;
+ return SourceRange::ContinuationOf(trailing_range);
+ }
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ private:
+ SourceRange then_range_;
+ SourceRange else_range_;
+};
+
+class IterationStatementSourceRanges final : public AstNodeSourceRanges {
+ public:
+ explicit IterationStatementSourceRanges(const SourceRange& body_range)
+ : body_range_(body_range) {}
+
+ SourceRange GetRange(SourceRangeKind kind) {
+ switch (kind) {
+ case SourceRangeKind::kBody:
+ return body_range_;
+ case SourceRangeKind::kContinuation:
+ return SourceRange::ContinuationOf(body_range_);
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ private:
+ SourceRange body_range_;
+};
+
+class JumpStatementSourceRanges final : public ContinuationSourceRanges {
+ public:
+ explicit JumpStatementSourceRanges(int32_t continuation_position)
+ : ContinuationSourceRanges(continuation_position) {}
+};
+
+class SwitchStatementSourceRanges final : public ContinuationSourceRanges {
+ public:
+ explicit SwitchStatementSourceRanges(int32_t continuation_position)
+ : ContinuationSourceRanges(continuation_position) {}
+};
+
+class ThrowSourceRanges final : public ContinuationSourceRanges {
+ public:
+ explicit ThrowSourceRanges(int32_t continuation_position)
+ : ContinuationSourceRanges(continuation_position) {}
+};
+
+class TryCatchStatementSourceRanges final : public AstNodeSourceRanges {
+ public:
+ explicit TryCatchStatementSourceRanges(const SourceRange& catch_range)
+ : catch_range_(catch_range) {}
+
+ SourceRange GetRange(SourceRangeKind kind) {
+ DCHECK(kind == SourceRangeKind::kCatch);
+ return catch_range_;
+ }
+
+ private:
+ SourceRange catch_range_;
+};
+
+class TryFinallyStatementSourceRanges final : public AstNodeSourceRanges {
+ public:
+ explicit TryFinallyStatementSourceRanges(const SourceRange& finally_range)
+ : finally_range_(finally_range) {}
+
+ SourceRange GetRange(SourceRangeKind kind) {
+ DCHECK(kind == SourceRangeKind::kFinally);
+ return finally_range_;
+ }
+
+ private:
+ SourceRange finally_range_;
+};
+
+// Maps ast node pointers to associated source ranges. The parser creates these
+// mappings and the bytecode generator consumes them.
+class SourceRangeMap final : public ZoneObject {
+ public:
+ explicit SourceRangeMap(Zone* zone) : map_(zone) {}
+
+ AstNodeSourceRanges* Find(AstNode* node) {
+ auto it = map_.find(node);
+ if (it == map_.end()) return nullptr;
+ return it->second;
+ }
+
+// Type-checked insertion.
+#define DEFINE_MAP_INSERT(type) \
+ void Insert(type* node, type##SourceRanges* ranges) { \
+ map_.emplace(node, ranges); \
+ }
+ AST_SOURCE_RANGE_LIST(DEFINE_MAP_INSERT)
+#undef DEFINE_MAP_INSERT
+
+ private:
+ ZoneMap<AstNode*, AstNodeSourceRanges*> map_;
+};
+
+#undef AST_SOURCE_RANGE_LIST
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_AST_AST_SOURCE_RANGES_H_
diff --git a/deps/v8/src/ast/ast-traversal-visitor.h b/deps/v8/src/ast/ast-traversal-visitor.h
index 390ba6db3a..5eee300cc3 100644
--- a/deps/v8/src/ast/ast-traversal-visitor.h
+++ b/deps/v8/src/ast/ast-traversal-visitor.h
@@ -136,6 +136,9 @@ void AstTraversalVisitor<Subclass>::VisitFunctionDeclaration(
template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitBlock(Block* stmt) {
PROCESS_NODE(stmt);
+ if (stmt->scope() != nullptr) {
+ RECURSE_EXPRESSION(VisitDeclarations(stmt->scope()->declarations()));
+ }
RECURSE(VisitStatements(stmt->statements()));
}
@@ -357,9 +360,20 @@ void AstTraversalVisitor<Subclass>::VisitAssignment(Assignment* expr) {
}
template <class Subclass>
-void AstTraversalVisitor<Subclass>::VisitSuspend(Suspend* expr) {
+void AstTraversalVisitor<Subclass>::VisitYield(Yield* expr) {
+ PROCESS_EXPRESSION(expr);
+ RECURSE_EXPRESSION(Visit(expr->expression()));
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitYieldStar(YieldStar* expr) {
+ PROCESS_EXPRESSION(expr);
+ RECURSE_EXPRESSION(Visit(expr->expression()));
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitAwait(Await* expr) {
PROCESS_EXPRESSION(expr);
- RECURSE_EXPRESSION(Visit(expr->generator_object()));
RECURSE_EXPRESSION(Visit(expr->expression()));
}
diff --git a/deps/v8/src/ast/ast-type-bounds.h b/deps/v8/src/ast/ast-type-bounds.h
deleted file mode 100644
index 0d1a3c8498..0000000000
--- a/deps/v8/src/ast/ast-type-bounds.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// A container to associate type bounds with AST Expression nodes.
-
-#ifndef V8_AST_AST_TYPE_BOUNDS_H_
-#define V8_AST_AST_TYPE_BOUNDS_H_
-
-#include "src/ast/ast-types.h"
-#include "src/zone/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-
-class Expression;
-
-class AstTypeBounds {
- public:
- explicit AstTypeBounds(Zone* zone) : bounds_map_(zone) {}
- ~AstTypeBounds() {}
-
- AstBounds get(Expression* expression) const {
- ZoneMap<Expression*, AstBounds>::const_iterator i =
- bounds_map_.find(expression);
- return (i != bounds_map_.end()) ? i->second : AstBounds::Unbounded();
- }
-
- void set(Expression* expression, AstBounds bounds) {
- bounds_map_[expression] = bounds;
- }
-
- private:
- ZoneMap<Expression*, AstBounds> bounds_map_;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_AST_AST_TYPE_BOUNDS_H_
diff --git a/deps/v8/src/ast/ast-types.cc b/deps/v8/src/ast/ast-types.cc
deleted file mode 100644
index 8ff1d88351..0000000000
--- a/deps/v8/src/ast/ast-types.cc
+++ /dev/null
@@ -1,1308 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <iomanip>
-
-#include "src/ast/ast-types.h"
-
-#include "src/handles-inl.h"
-#include "src/objects-inl.h"
-#include "src/ostreams.h"
-
-namespace v8 {
-namespace internal {
-
-// NOTE: If code is marked as being a "shortcut", this means that removing
-// the code won't affect the semantics of the surrounding function definition.
-
-// static
-bool AstType::IsInteger(i::Object* x) {
- return x->IsNumber() && AstType::IsInteger(x->Number());
-}
-
-// -----------------------------------------------------------------------------
-// Range-related helper functions.
-
-bool AstRangeType::Limits::IsEmpty() { return this->min > this->max; }
-
-AstRangeType::Limits AstRangeType::Limits::Intersect(Limits lhs, Limits rhs) {
- DisallowHeapAllocation no_allocation;
- Limits result(lhs);
- if (lhs.min < rhs.min) result.min = rhs.min;
- if (lhs.max > rhs.max) result.max = rhs.max;
- return result;
-}
-
-AstRangeType::Limits AstRangeType::Limits::Union(Limits lhs, Limits rhs) {
- DisallowHeapAllocation no_allocation;
- if (lhs.IsEmpty()) return rhs;
- if (rhs.IsEmpty()) return lhs;
- Limits result(lhs);
- if (lhs.min > rhs.min) result.min = rhs.min;
- if (lhs.max < rhs.max) result.max = rhs.max;
- return result;
-}
-
-bool AstType::Overlap(AstRangeType* lhs, AstRangeType* rhs) {
- DisallowHeapAllocation no_allocation;
- return !AstRangeType::Limits::Intersect(AstRangeType::Limits(lhs),
- AstRangeType::Limits(rhs))
- .IsEmpty();
-}
-
-bool AstType::Contains(AstRangeType* lhs, AstRangeType* rhs) {
- DisallowHeapAllocation no_allocation;
- return lhs->Min() <= rhs->Min() && rhs->Max() <= lhs->Max();
-}
-
-bool AstType::Contains(AstRangeType* lhs, AstConstantType* rhs) {
- DisallowHeapAllocation no_allocation;
- return IsInteger(*rhs->Value()) && lhs->Min() <= rhs->Value()->Number() &&
- rhs->Value()->Number() <= lhs->Max();
-}
-
-bool AstType::Contains(AstRangeType* range, i::Object* val) {
- DisallowHeapAllocation no_allocation;
- return IsInteger(val) && range->Min() <= val->Number() &&
- val->Number() <= range->Max();
-}
-
-// -----------------------------------------------------------------------------
-// Min and Max computation.
-
-double AstType::Min() {
- DCHECK(this->SemanticIs(Number()));
- if (this->IsBitset()) return AstBitsetType::Min(this->AsBitset());
- if (this->IsUnion()) {
- double min = +V8_INFINITY;
- for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
- min = std::min(min, this->AsUnion()->Get(i)->Min());
- }
- return min;
- }
- if (this->IsRange()) return this->AsRange()->Min();
- if (this->IsConstant()) return this->AsConstant()->Value()->Number();
- UNREACHABLE();
- return 0;
-}
-
-double AstType::Max() {
- DCHECK(this->SemanticIs(Number()));
- if (this->IsBitset()) return AstBitsetType::Max(this->AsBitset());
- if (this->IsUnion()) {
- double max = -V8_INFINITY;
- for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
- max = std::max(max, this->AsUnion()->Get(i)->Max());
- }
- return max;
- }
- if (this->IsRange()) return this->AsRange()->Max();
- if (this->IsConstant()) return this->AsConstant()->Value()->Number();
- UNREACHABLE();
- return 0;
-}
-
-// -----------------------------------------------------------------------------
-// Glb and lub computation.
-
-// The largest bitset subsumed by this type.
-AstType::bitset AstBitsetType::Glb(AstType* type) {
- DisallowHeapAllocation no_allocation;
- // Fast case.
- if (IsBitset(type)) {
- return type->AsBitset();
- } else if (type->IsUnion()) {
- SLOW_DCHECK(type->AsUnion()->Wellformed());
- return type->AsUnion()->Get(0)->BitsetGlb() |
- AST_SEMANTIC(type->AsUnion()->Get(1)->BitsetGlb()); // Shortcut.
- } else if (type->IsRange()) {
- bitset glb = AST_SEMANTIC(
- AstBitsetType::Glb(type->AsRange()->Min(), type->AsRange()->Max()));
- return glb | AST_REPRESENTATION(type->BitsetLub());
- } else {
- return type->Representation();
- }
-}
-
-// The smallest bitset subsuming this type, possibly not a proper one.
-AstType::bitset AstBitsetType::Lub(AstType* type) {
- DisallowHeapAllocation no_allocation;
- if (IsBitset(type)) return type->AsBitset();
- if (type->IsUnion()) {
- // Take the representation from the first element, which is always
- // a bitset.
- int bitset = type->AsUnion()->Get(0)->BitsetLub();
- for (int i = 0, n = type->AsUnion()->Length(); i < n; ++i) {
- // Other elements only contribute their semantic part.
- bitset |= AST_SEMANTIC(type->AsUnion()->Get(i)->BitsetLub());
- }
- return bitset;
- }
- if (type->IsClass()) return type->AsClass()->Lub();
- if (type->IsConstant()) return type->AsConstant()->Lub();
- if (type->IsRange()) return type->AsRange()->Lub();
- if (type->IsContext()) return kOtherInternal & kTaggedPointer;
- if (type->IsArray()) return kOtherObject;
- if (type->IsFunction()) return kFunction;
- if (type->IsTuple()) return kOtherInternal;
- UNREACHABLE();
- return kNone;
-}
-
-AstType::bitset AstBitsetType::Lub(i::Map* map) {
- DisallowHeapAllocation no_allocation;
- switch (map->instance_type()) {
- case STRING_TYPE:
- case ONE_BYTE_STRING_TYPE:
- case CONS_STRING_TYPE:
- case CONS_ONE_BYTE_STRING_TYPE:
- case THIN_STRING_TYPE:
- case THIN_ONE_BYTE_STRING_TYPE:
- case SLICED_STRING_TYPE:
- case SLICED_ONE_BYTE_STRING_TYPE:
- case EXTERNAL_STRING_TYPE:
- case EXTERNAL_ONE_BYTE_STRING_TYPE:
- case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
- case SHORT_EXTERNAL_STRING_TYPE:
- case SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE:
- case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
- return kOtherString;
- case INTERNALIZED_STRING_TYPE:
- case ONE_BYTE_INTERNALIZED_STRING_TYPE:
- case EXTERNAL_INTERNALIZED_STRING_TYPE:
- case EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
- case EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
- case SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE:
- case SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
- case SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
- return kInternalizedString;
- case SYMBOL_TYPE:
- return kSymbol;
- case ODDBALL_TYPE: {
- Heap* heap = map->GetHeap();
- if (map == heap->undefined_map()) return kUndefined;
- if (map == heap->null_map()) return kNull;
- if (map == heap->boolean_map()) return kBoolean;
- if (map == heap->the_hole_map()) return kHole;
- DCHECK(map == heap->uninitialized_map() ||
- map == heap->termination_exception_map() ||
- map == heap->arguments_marker_map() ||
- map == heap->optimized_out_map() ||
- map == heap->stale_register_map());
- return kOtherInternal & kTaggedPointer;
- }
- case HEAP_NUMBER_TYPE:
- return kNumber & kTaggedPointer;
- case JS_OBJECT_TYPE:
- case JS_ARGUMENTS_TYPE:
- case JS_ERROR_TYPE:
- case JS_GLOBAL_OBJECT_TYPE:
- case JS_GLOBAL_PROXY_TYPE:
- case JS_API_OBJECT_TYPE:
- case JS_SPECIAL_API_OBJECT_TYPE:
- if (map->is_undetectable()) return kOtherUndetectable;
- return kOtherObject;
- case JS_VALUE_TYPE:
- case JS_MESSAGE_OBJECT_TYPE:
- case JS_DATE_TYPE:
- case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
- case JS_GENERATOR_OBJECT_TYPE:
- case JS_ASYNC_GENERATOR_OBJECT_TYPE:
- case JS_MODULE_NAMESPACE_TYPE:
- case JS_ARRAY_BUFFER_TYPE:
- case JS_ARRAY_TYPE:
- case JS_REGEXP_TYPE: // TODO(rossberg): there should be a RegExp type.
- case JS_TYPED_ARRAY_TYPE:
- case JS_DATA_VIEW_TYPE:
- case JS_SET_TYPE:
- case JS_MAP_TYPE:
- case JS_SET_ITERATOR_TYPE:
- case JS_MAP_ITERATOR_TYPE:
- case JS_STRING_ITERATOR_TYPE:
- case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
-
- case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
- case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
- case JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE:
- case JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_INT8_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_INT16_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_INT32_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE:
-
- case JS_WEAK_MAP_TYPE:
- case JS_WEAK_SET_TYPE:
- case JS_PROMISE_CAPABILITY_TYPE:
- case JS_PROMISE_TYPE:
- case JS_BOUND_FUNCTION_TYPE:
- DCHECK(!map->is_undetectable());
- return kOtherObject;
- case JS_FUNCTION_TYPE:
- DCHECK(!map->is_undetectable());
- return kFunction;
- case JS_PROXY_TYPE:
- DCHECK(!map->is_undetectable());
- return kProxy;
- case MAP_TYPE:
- case ALLOCATION_SITE_TYPE:
- case ACCESSOR_INFO_TYPE:
- case SHARED_FUNCTION_INFO_TYPE:
- case ACCESSOR_PAIR_TYPE:
- case FIXED_ARRAY_TYPE:
- case FIXED_DOUBLE_ARRAY_TYPE:
- case BYTE_ARRAY_TYPE:
- case BYTECODE_ARRAY_TYPE:
- case TRANSITION_ARRAY_TYPE:
- case FOREIGN_TYPE:
- case SCRIPT_TYPE:
- case CODE_TYPE:
- case PROPERTY_CELL_TYPE:
- case MODULE_TYPE:
- case MODULE_INFO_ENTRY_TYPE:
- case ASYNC_GENERATOR_REQUEST_TYPE:
- return kOtherInternal & kTaggedPointer;
-
- // Remaining instance types are unsupported for now. If any of them do
- // require bit set types, they should get kOtherInternal & kTaggedPointer.
- case MUTABLE_HEAP_NUMBER_TYPE:
- case FREE_SPACE_TYPE:
-#define FIXED_TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case FIXED_##TYPE##_ARRAY_TYPE:
-
- TYPED_ARRAYS(FIXED_TYPED_ARRAY_CASE)
-#undef FIXED_TYPED_ARRAY_CASE
- case FILLER_TYPE:
- case ACCESS_CHECK_INFO_TYPE:
- case INTERCEPTOR_INFO_TYPE:
- case PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE:
- case PROMISE_REACTION_JOB_INFO_TYPE:
- case FUNCTION_TEMPLATE_INFO_TYPE:
- case OBJECT_TEMPLATE_INFO_TYPE:
- case ALLOCATION_MEMENTO_TYPE:
- case ALIASED_ARGUMENTS_ENTRY_TYPE:
- case DEBUG_INFO_TYPE:
- case STACK_FRAME_INFO_TYPE:
- case CELL_TYPE:
- case WEAK_CELL_TYPE:
- case PROTOTYPE_INFO_TYPE:
- case TUPLE2_TYPE:
- case TUPLE3_TYPE:
- case CONTEXT_EXTENSION_TYPE:
- case PADDING_TYPE_1:
- case PADDING_TYPE_2:
- case PADDING_TYPE_3:
- case PADDING_TYPE_4:
- UNREACHABLE();
- return kNone;
- }
- UNREACHABLE();
- return kNone;
-}
-
-AstType::bitset AstBitsetType::Lub(i::Object* value) {
- DisallowHeapAllocation no_allocation;
- if (value->IsNumber()) {
- return Lub(value->Number()) &
- (value->IsSmi() ? kTaggedSigned : kTaggedPointer);
- }
- return Lub(i::HeapObject::cast(value)->map());
-}
-
-AstType::bitset AstBitsetType::Lub(double value) {
- DisallowHeapAllocation no_allocation;
- if (i::IsMinusZero(value)) return kMinusZero;
- if (std::isnan(value)) return kNaN;
- if (IsUint32Double(value) || IsInt32Double(value)) return Lub(value, value);
- return kOtherNumber;
-}
-
-// Minimum values of plain numeric bitsets.
-const AstBitsetType::Boundary AstBitsetType::BoundariesArray[] = {
- {kOtherNumber, kPlainNumber, -V8_INFINITY},
- {kOtherSigned32, kNegative32, kMinInt},
- {kNegative31, kNegative31, -0x40000000},
- {kUnsigned30, kUnsigned30, 0},
- {kOtherUnsigned31, kUnsigned31, 0x40000000},
- {kOtherUnsigned32, kUnsigned32, 0x80000000},
- {kOtherNumber, kPlainNumber, static_cast<double>(kMaxUInt32) + 1}};
-
-const AstBitsetType::Boundary* AstBitsetType::Boundaries() {
- return BoundariesArray;
-}
-
-size_t AstBitsetType::BoundariesSize() {
- // Windows doesn't like arraysize here.
- // return arraysize(BoundariesArray);
- return 7;
-}
-
-AstType::bitset AstBitsetType::ExpandInternals(AstType::bitset bits) {
- DisallowHeapAllocation no_allocation;
- if (!(bits & AST_SEMANTIC(kPlainNumber))) return bits; // Shortcut.
- const Boundary* boundaries = Boundaries();
- for (size_t i = 0; i < BoundariesSize(); ++i) {
- DCHECK(AstBitsetType::Is(boundaries[i].internal, boundaries[i].external));
- if (bits & AST_SEMANTIC(boundaries[i].internal))
- bits |= AST_SEMANTIC(boundaries[i].external);
- }
- return bits;
-}
-
-AstType::bitset AstBitsetType::Lub(double min, double max) {
- DisallowHeapAllocation no_allocation;
- int lub = kNone;
- const Boundary* mins = Boundaries();
-
- for (size_t i = 1; i < BoundariesSize(); ++i) {
- if (min < mins[i].min) {
- lub |= mins[i - 1].internal;
- if (max < mins[i].min) return lub;
- }
- }
- return lub | mins[BoundariesSize() - 1].internal;
-}
-
-AstType::bitset AstBitsetType::NumberBits(bitset bits) {
- return AST_SEMANTIC(bits & kPlainNumber);
-}
-
-AstType::bitset AstBitsetType::Glb(double min, double max) {
- DisallowHeapAllocation no_allocation;
- int glb = kNone;
- const Boundary* mins = Boundaries();
-
- // If the range does not touch 0, the bound is empty.
- if (max < -1 || min > 0) return glb;
-
- for (size_t i = 1; i + 1 < BoundariesSize(); ++i) {
- if (min <= mins[i].min) {
- if (max + 1 < mins[i + 1].min) break;
- glb |= mins[i].external;
- }
- }
- // OtherNumber also contains float numbers, so it can never be
- // in the greatest lower bound.
- return glb & ~(AST_SEMANTIC(kOtherNumber));
-}
-
-double AstBitsetType::Min(bitset bits) {
- DisallowHeapAllocation no_allocation;
- DCHECK(Is(AST_SEMANTIC(bits), kNumber));
- const Boundary* mins = Boundaries();
- bool mz = AST_SEMANTIC(bits & kMinusZero);
- for (size_t i = 0; i < BoundariesSize(); ++i) {
- if (Is(AST_SEMANTIC(mins[i].internal), bits)) {
- return mz ? std::min(0.0, mins[i].min) : mins[i].min;
- }
- }
- if (mz) return 0;
- return std::numeric_limits<double>::quiet_NaN();
-}
-
-double AstBitsetType::Max(bitset bits) {
- DisallowHeapAllocation no_allocation;
- DCHECK(Is(AST_SEMANTIC(bits), kNumber));
- const Boundary* mins = Boundaries();
- bool mz = AST_SEMANTIC(bits & kMinusZero);
- if (AstBitsetType::Is(AST_SEMANTIC(mins[BoundariesSize() - 1].internal),
- bits)) {
- return +V8_INFINITY;
- }
- for (size_t i = BoundariesSize() - 1; i-- > 0;) {
- if (Is(AST_SEMANTIC(mins[i].internal), bits)) {
- return mz ? std::max(0.0, mins[i + 1].min - 1) : mins[i + 1].min - 1;
- }
- }
- if (mz) return 0;
- return std::numeric_limits<double>::quiet_NaN();
-}
-
-// -----------------------------------------------------------------------------
-// Predicates.
-
-bool AstType::SimplyEquals(AstType* that) {
- DisallowHeapAllocation no_allocation;
- if (this->IsClass()) {
- return that->IsClass() &&
- *this->AsClass()->Map() == *that->AsClass()->Map();
- }
- if (this->IsConstant()) {
- return that->IsConstant() &&
- *this->AsConstant()->Value() == *that->AsConstant()->Value();
- }
- if (this->IsContext()) {
- return that->IsContext() &&
- this->AsContext()->Outer()->Equals(that->AsContext()->Outer());
- }
- if (this->IsArray()) {
- return that->IsArray() &&
- this->AsArray()->Element()->Equals(that->AsArray()->Element());
- }
- if (this->IsFunction()) {
- if (!that->IsFunction()) return false;
- AstFunctionType* this_fun = this->AsFunction();
- AstFunctionType* that_fun = that->AsFunction();
- if (this_fun->Arity() != that_fun->Arity() ||
- !this_fun->Result()->Equals(that_fun->Result()) ||
- !this_fun->Receiver()->Equals(that_fun->Receiver())) {
- return false;
- }
- for (int i = 0, n = this_fun->Arity(); i < n; ++i) {
- if (!this_fun->Parameter(i)->Equals(that_fun->Parameter(i))) return false;
- }
- return true;
- }
- if (this->IsTuple()) {
- if (!that->IsTuple()) return false;
- AstTupleType* this_tuple = this->AsTuple();
- AstTupleType* that_tuple = that->AsTuple();
- if (this_tuple->Arity() != that_tuple->Arity()) {
- return false;
- }
- for (int i = 0, n = this_tuple->Arity(); i < n; ++i) {
- if (!this_tuple->Element(i)->Equals(that_tuple->Element(i))) return false;
- }
- return true;
- }
- UNREACHABLE();
- return false;
-}
-
-AstType::bitset AstType::Representation() {
- return AST_REPRESENTATION(this->BitsetLub());
-}
-
-// Check if [this] <= [that].
-bool AstType::SlowIs(AstType* that) {
- DisallowHeapAllocation no_allocation;
-
- // Fast bitset cases
- if (that->IsBitset()) {
- return AstBitsetType::Is(this->BitsetLub(), that->AsBitset());
- }
-
- if (this->IsBitset()) {
- return AstBitsetType::Is(this->AsBitset(), that->BitsetGlb());
- }
-
- // Check the representations.
- if (!AstBitsetType::Is(Representation(), that->Representation())) {
- return false;
- }
-
- // Check the semantic part.
- return SemanticIs(that);
-}
-
-// Check if AST_SEMANTIC([this]) <= AST_SEMANTIC([that]). The result of the
-// method
-// should be independent of the representation axis of the types.
-bool AstType::SemanticIs(AstType* that) {
- DisallowHeapAllocation no_allocation;
-
- if (this == that) return true;
-
- if (that->IsBitset()) {
- return AstBitsetType::Is(AST_SEMANTIC(this->BitsetLub()), that->AsBitset());
- }
- if (this->IsBitset()) {
- return AstBitsetType::Is(AST_SEMANTIC(this->AsBitset()), that->BitsetGlb());
- }
-
- // (T1 \/ ... \/ Tn) <= T if (T1 <= T) /\ ... /\ (Tn <= T)
- if (this->IsUnion()) {
- for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
- if (!this->AsUnion()->Get(i)->SemanticIs(that)) return false;
- }
- return true;
- }
-
- // T <= (T1 \/ ... \/ Tn) if (T <= T1) \/ ... \/ (T <= Tn)
- if (that->IsUnion()) {
- for (int i = 0, n = that->AsUnion()->Length(); i < n; ++i) {
- if (this->SemanticIs(that->AsUnion()->Get(i))) return true;
- if (i > 1 && this->IsRange()) return false; // Shortcut.
- }
- return false;
- }
-
- if (that->IsRange()) {
- return (this->IsRange() && Contains(that->AsRange(), this->AsRange())) ||
- (this->IsConstant() &&
- Contains(that->AsRange(), this->AsConstant()));
- }
- if (this->IsRange()) return false;
-
- return this->SimplyEquals(that);
-}
-
-// Most precise _current_ type of a value (usually its class).
-AstType* AstType::NowOf(i::Object* value, Zone* zone) {
- if (value->IsSmi() ||
- i::HeapObject::cast(value)->map()->instance_type() == HEAP_NUMBER_TYPE) {
- return Of(value, zone);
- }
- return Class(i::handle(i::HeapObject::cast(value)->map()), zone);
-}
-
-bool AstType::NowContains(i::Object* value) {
- DisallowHeapAllocation no_allocation;
- if (this->IsAny()) return true;
- if (value->IsHeapObject()) {
- i::Map* map = i::HeapObject::cast(value)->map();
- for (Iterator<i::Map> it = this->Classes(); !it.Done(); it.Advance()) {
- if (*it.Current() == map) return true;
- }
- }
- return this->Contains(value);
-}
-
-bool AstType::NowIs(AstType* that) {
- DisallowHeapAllocation no_allocation;
-
- // TODO(rossberg): this is incorrect for
- // Union(Constant(V), T)->NowIs(Class(M))
- // but fuzzing does not cover that!
- if (this->IsConstant()) {
- i::Object* object = *this->AsConstant()->Value();
- if (object->IsHeapObject()) {
- i::Map* map = i::HeapObject::cast(object)->map();
- for (Iterator<i::Map> it = that->Classes(); !it.Done(); it.Advance()) {
- if (*it.Current() == map) return true;
- }
- }
- }
- return this->Is(that);
-}
-
-// Check if [this] contains only (currently) stable classes.
-bool AstType::NowStable() {
- DisallowHeapAllocation no_allocation;
- return !this->IsClass() || this->AsClass()->Map()->is_stable();
-}
-
-// Check if [this] and [that] overlap.
-bool AstType::Maybe(AstType* that) {
- DisallowHeapAllocation no_allocation;
-
- // Take care of the representation part (and also approximate
- // the semantic part).
- if (!AstBitsetType::IsInhabited(this->BitsetLub() & that->BitsetLub()))
- return false;
-
- return SemanticMaybe(that);
-}
-
-bool AstType::SemanticMaybe(AstType* that) {
- DisallowHeapAllocation no_allocation;
-
- // (T1 \/ ... \/ Tn) overlaps T if (T1 overlaps T) \/ ... \/ (Tn overlaps T)
- if (this->IsUnion()) {
- for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
- if (this->AsUnion()->Get(i)->SemanticMaybe(that)) return true;
- }
- return false;
- }
-
- // T overlaps (T1 \/ ... \/ Tn) if (T overlaps T1) \/ ... \/ (T overlaps Tn)
- if (that->IsUnion()) {
- for (int i = 0, n = that->AsUnion()->Length(); i < n; ++i) {
- if (this->SemanticMaybe(that->AsUnion()->Get(i))) return true;
- }
- return false;
- }
-
- if (!AstBitsetType::SemanticIsInhabited(this->BitsetLub() &
- that->BitsetLub()))
- return false;
-
- if (this->IsBitset() && that->IsBitset()) return true;
-
- if (this->IsClass() != that->IsClass()) return true;
-
- if (this->IsRange()) {
- if (that->IsConstant()) {
- return Contains(this->AsRange(), that->AsConstant());
- }
- if (that->IsRange()) {
- return Overlap(this->AsRange(), that->AsRange());
- }
- if (that->IsBitset()) {
- bitset number_bits = AstBitsetType::NumberBits(that->AsBitset());
- if (number_bits == AstBitsetType::kNone) {
- return false;
- }
- double min = std::max(AstBitsetType::Min(number_bits), this->Min());
- double max = std::min(AstBitsetType::Max(number_bits), this->Max());
- return min <= max;
- }
- }
- if (that->IsRange()) {
- return that->SemanticMaybe(this); // This case is handled above.
- }
-
- if (this->IsBitset() || that->IsBitset()) return true;
-
- return this->SimplyEquals(that);
-}
-
-// Return the range in [this], or [NULL].
-AstType* AstType::GetRange() {
- DisallowHeapAllocation no_allocation;
- if (this->IsRange()) return this;
- if (this->IsUnion() && this->AsUnion()->Get(1)->IsRange()) {
- return this->AsUnion()->Get(1);
- }
- return NULL;
-}
-
-bool AstType::Contains(i::Object* value) {
- DisallowHeapAllocation no_allocation;
- for (Iterator<i::Object> it = this->Constants(); !it.Done(); it.Advance()) {
- if (*it.Current() == value) return true;
- }
- if (IsInteger(value)) {
- AstType* range = this->GetRange();
- if (range != NULL && Contains(range->AsRange(), value)) return true;
- }
- return AstBitsetType::New(AstBitsetType::Lub(value))->Is(this);
-}
-
-bool AstUnionType::Wellformed() {
- DisallowHeapAllocation no_allocation;
- // This checks the invariants of the union representation:
- // 1. There are at least two elements.
- // 2. The first element is a bitset, no other element is a bitset.
- // 3. At most one element is a range, and it must be the second one.
- // 4. No element is itself a union.
- // 5. No element (except the bitset) is a subtype of any other.
- // 6. If there is a range, then the bitset type does not contain
- // plain number bits.
- DCHECK(this->Length() >= 2); // (1)
- DCHECK(this->Get(0)->IsBitset()); // (2a)
-
- for (int i = 0; i < this->Length(); ++i) {
- if (i != 0) DCHECK(!this->Get(i)->IsBitset()); // (2b)
- if (i != 1) DCHECK(!this->Get(i)->IsRange()); // (3)
- DCHECK(!this->Get(i)->IsUnion()); // (4)
- for (int j = 0; j < this->Length(); ++j) {
- if (i != j && i != 0)
- DCHECK(!this->Get(i)->SemanticIs(this->Get(j))); // (5)
- }
- }
- DCHECK(!this->Get(1)->IsRange() ||
- (AstBitsetType::NumberBits(this->Get(0)->AsBitset()) ==
- AstBitsetType::kNone)); // (6)
- return true;
-}
-
-// -----------------------------------------------------------------------------
-// Union and intersection
-
-static bool AddIsSafe(int x, int y) {
- return x >= 0 ? y <= std::numeric_limits<int>::max() - x
- : y >= std::numeric_limits<int>::min() - x;
-}
-
-AstType* AstType::Intersect(AstType* type1, AstType* type2, Zone* zone) {
- // Fast case: bit sets.
- if (type1->IsBitset() && type2->IsBitset()) {
- return AstBitsetType::New(type1->AsBitset() & type2->AsBitset());
- }
-
- // Fast case: top or bottom types.
- if (type1->IsNone() || type2->IsAny()) return type1; // Shortcut.
- if (type2->IsNone() || type1->IsAny()) return type2; // Shortcut.
-
- // Semi-fast case.
- if (type1->Is(type2)) return type1;
- if (type2->Is(type1)) return type2;
-
- // Slow case: create union.
-
- // Figure out the representation of the result first.
- // The rest of the method should not change this representation and
- // it should not make any decisions based on representations (i.e.,
- // it should only use the semantic part of types).
- const bitset representation =
- type1->Representation() & type2->Representation();
-
- // Semantic subtyping check - this is needed for consistency with the
- // semi-fast case above - we should behave the same way regardless of
- // representations. Intersection with a universal bitset should only update
- // the representations.
- if (type1->SemanticIs(type2)) {
- type2 = Any();
- } else if (type2->SemanticIs(type1)) {
- type1 = Any();
- }
-
- bitset bits =
- AST_SEMANTIC(type1->BitsetGlb() & type2->BitsetGlb()) | representation;
- int size1 = type1->IsUnion() ? type1->AsUnion()->Length() : 1;
- int size2 = type2->IsUnion() ? type2->AsUnion()->Length() : 1;
- if (!AddIsSafe(size1, size2)) return Any();
- int size = size1 + size2;
- if (!AddIsSafe(size, 2)) return Any();
- size += 2;
- AstType* result_type = AstUnionType::New(size, zone);
- AstUnionType* result = result_type->AsUnion();
- size = 0;
-
- // Deal with bitsets.
- result->Set(size++, AstBitsetType::New(bits));
-
- AstRangeType::Limits lims = AstRangeType::Limits::Empty();
- size = IntersectAux(type1, type2, result, size, &lims, zone);
-
- // If the range is not empty, then insert it into the union and
- // remove the number bits from the bitset.
- if (!lims.IsEmpty()) {
- size = UpdateRange(AstRangeType::New(lims, representation, zone), result,
- size, zone);
-
- // Remove the number bits.
- bitset number_bits = AstBitsetType::NumberBits(bits);
- bits &= ~number_bits;
- result->Set(0, AstBitsetType::New(bits));
- }
- return NormalizeUnion(result_type, size, zone);
-}
-
-int AstType::UpdateRange(AstType* range, AstUnionType* result, int size,
- Zone* zone) {
- if (size == 1) {
- result->Set(size++, range);
- } else {
- // Make space for the range.
- result->Set(size++, result->Get(1));
- result->Set(1, range);
- }
-
- // Remove any components that just got subsumed.
- for (int i = 2; i < size;) {
- if (result->Get(i)->SemanticIs(range)) {
- result->Set(i, result->Get(--size));
- } else {
- ++i;
- }
- }
- return size;
-}
-
-AstRangeType::Limits AstType::ToLimits(bitset bits, Zone* zone) {
- bitset number_bits = AstBitsetType::NumberBits(bits);
-
- if (number_bits == AstBitsetType::kNone) {
- return AstRangeType::Limits::Empty();
- }
-
- return AstRangeType::Limits(AstBitsetType::Min(number_bits),
- AstBitsetType::Max(number_bits));
-}
-
-AstRangeType::Limits AstType::IntersectRangeAndBitset(AstType* range,
- AstType* bitset,
- Zone* zone) {
- AstRangeType::Limits range_lims(range->AsRange());
- AstRangeType::Limits bitset_lims = ToLimits(bitset->AsBitset(), zone);
- return AstRangeType::Limits::Intersect(range_lims, bitset_lims);
-}
-
-int AstType::IntersectAux(AstType* lhs, AstType* rhs, AstUnionType* result,
- int size, AstRangeType::Limits* lims, Zone* zone) {
- if (lhs->IsUnion()) {
- for (int i = 0, n = lhs->AsUnion()->Length(); i < n; ++i) {
- size =
- IntersectAux(lhs->AsUnion()->Get(i), rhs, result, size, lims, zone);
- }
- return size;
- }
- if (rhs->IsUnion()) {
- for (int i = 0, n = rhs->AsUnion()->Length(); i < n; ++i) {
- size =
- IntersectAux(lhs, rhs->AsUnion()->Get(i), result, size, lims, zone);
- }
- return size;
- }
-
- if (!AstBitsetType::SemanticIsInhabited(lhs->BitsetLub() &
- rhs->BitsetLub())) {
- return size;
- }
-
- if (lhs->IsRange()) {
- if (rhs->IsBitset()) {
- AstRangeType::Limits lim = IntersectRangeAndBitset(lhs, rhs, zone);
-
- if (!lim.IsEmpty()) {
- *lims = AstRangeType::Limits::Union(lim, *lims);
- }
- return size;
- }
- if (rhs->IsClass()) {
- *lims = AstRangeType::Limits::Union(AstRangeType::Limits(lhs->AsRange()),
- *lims);
- }
- if (rhs->IsConstant() && Contains(lhs->AsRange(), rhs->AsConstant())) {
- return AddToUnion(rhs, result, size, zone);
- }
- if (rhs->IsRange()) {
- AstRangeType::Limits lim =
- AstRangeType::Limits::Intersect(AstRangeType::Limits(lhs->AsRange()),
- AstRangeType::Limits(rhs->AsRange()));
- if (!lim.IsEmpty()) {
- *lims = AstRangeType::Limits::Union(lim, *lims);
- }
- }
- return size;
- }
- if (rhs->IsRange()) {
- // This case is handled symmetrically above.
- return IntersectAux(rhs, lhs, result, size, lims, zone);
- }
- if (lhs->IsBitset() || rhs->IsBitset()) {
- return AddToUnion(lhs->IsBitset() ? rhs : lhs, result, size, zone);
- }
- if (lhs->IsClass() != rhs->IsClass()) {
- return AddToUnion(lhs->IsClass() ? rhs : lhs, result, size, zone);
- }
- if (lhs->SimplyEquals(rhs)) {
- return AddToUnion(lhs, result, size, zone);
- }
- return size;
-}
-
-// Make sure that we produce a well-formed range and bitset:
-// If the range is non-empty, the number bits in the bitset should be
-// clear. Moreover, if we have a canonical range (such as Signed32),
-// we want to produce a bitset rather than a range.
-AstType* AstType::NormalizeRangeAndBitset(AstType* range, bitset* bits,
- Zone* zone) {
- // Fast path: If the bitset does not mention numbers, we can just keep the
- // range.
- bitset number_bits = AstBitsetType::NumberBits(*bits);
- if (number_bits == 0) {
- return range;
- }
-
- // If the range is semantically contained within the bitset, return None and
- // leave the bitset untouched.
- bitset range_lub = AST_SEMANTIC(range->BitsetLub());
- if (AstBitsetType::Is(range_lub, *bits)) {
- return None();
- }
-
- // Slow path: reconcile the bitset range and the range.
- double bitset_min = AstBitsetType::Min(number_bits);
- double bitset_max = AstBitsetType::Max(number_bits);
-
- double range_min = range->Min();
- double range_max = range->Max();
-
- // Remove the number bits from the bitset, they would just confuse us now.
- // NOTE: bits contains OtherNumber iff bits contains PlainNumber, in which
- // case we already returned after the subtype check above.
- *bits &= ~number_bits;
-
- if (range_min <= bitset_min && range_max >= bitset_max) {
- // Bitset is contained within the range, just return the range.
- return range;
- }
-
- if (bitset_min < range_min) {
- range_min = bitset_min;
- }
- if (bitset_max > range_max) {
- range_max = bitset_max;
- }
- return AstRangeType::New(range_min, range_max, AstBitsetType::kNone, zone);
-}
-
-AstType* AstType::Union(AstType* type1, AstType* type2, Zone* zone) {
- // Fast case: bit sets.
- if (type1->IsBitset() && type2->IsBitset()) {
- return AstBitsetType::New(type1->AsBitset() | type2->AsBitset());
- }
-
- // Fast case: top or bottom types.
- if (type1->IsAny() || type2->IsNone()) return type1;
- if (type2->IsAny() || type1->IsNone()) return type2;
-
- // Semi-fast case.
- if (type1->Is(type2)) return type2;
- if (type2->Is(type1)) return type1;
-
- // Figure out the representation of the result.
- // The rest of the method should not change this representation and
- // it should not make any decisions based on representations (i.e.,
- // it should only use the semantic part of types).
- const bitset representation =
- type1->Representation() | type2->Representation();
-
- // Slow case: create union.
- int size1 = type1->IsUnion() ? type1->AsUnion()->Length() : 1;
- int size2 = type2->IsUnion() ? type2->AsUnion()->Length() : 1;
- if (!AddIsSafe(size1, size2)) return Any();
- int size = size1 + size2;
- if (!AddIsSafe(size, 2)) return Any();
- size += 2;
- AstType* result_type = AstUnionType::New(size, zone);
- AstUnionType* result = result_type->AsUnion();
- size = 0;
-
- // Compute the new bitset.
- bitset new_bitset = AST_SEMANTIC(type1->BitsetGlb() | type2->BitsetGlb());
-
- // Deal with ranges.
- AstType* range = None();
- AstType* range1 = type1->GetRange();
- AstType* range2 = type2->GetRange();
- if (range1 != NULL && range2 != NULL) {
- AstRangeType::Limits lims =
- AstRangeType::Limits::Union(AstRangeType::Limits(range1->AsRange()),
- AstRangeType::Limits(range2->AsRange()));
- AstType* union_range = AstRangeType::New(lims, representation, zone);
- range = NormalizeRangeAndBitset(union_range, &new_bitset, zone);
- } else if (range1 != NULL) {
- range = NormalizeRangeAndBitset(range1, &new_bitset, zone);
- } else if (range2 != NULL) {
- range = NormalizeRangeAndBitset(range2, &new_bitset, zone);
- }
- new_bitset = AST_SEMANTIC(new_bitset) | representation;
- AstType* bits = AstBitsetType::New(new_bitset);
- result->Set(size++, bits);
- if (!range->IsNone()) result->Set(size++, range);
-
- size = AddToUnion(type1, result, size, zone);
- size = AddToUnion(type2, result, size, zone);
- return NormalizeUnion(result_type, size, zone);
-}
-
-// Add [type] to [result] unless [type] is bitset, range, or already subsumed.
-// Return new size of [result].
-int AstType::AddToUnion(AstType* type, AstUnionType* result, int size,
- Zone* zone) {
- if (type->IsBitset() || type->IsRange()) return size;
- if (type->IsUnion()) {
- for (int i = 0, n = type->AsUnion()->Length(); i < n; ++i) {
- size = AddToUnion(type->AsUnion()->Get(i), result, size, zone);
- }
- return size;
- }
- for (int i = 0; i < size; ++i) {
- if (type->SemanticIs(result->Get(i))) return size;
- }
- result->Set(size++, type);
- return size;
-}
-
-AstType* AstType::NormalizeUnion(AstType* union_type, int size, Zone* zone) {
- AstUnionType* unioned = union_type->AsUnion();
- DCHECK(size >= 1);
- DCHECK(unioned->Get(0)->IsBitset());
- // If the union has just one element, return it.
- if (size == 1) {
- return unioned->Get(0);
- }
- bitset bits = unioned->Get(0)->AsBitset();
- // If the union only consists of a range, we can get rid of the union.
- if (size == 2 && AST_SEMANTIC(bits) == AstBitsetType::kNone) {
- bitset representation = AST_REPRESENTATION(bits);
- if (representation == unioned->Get(1)->Representation()) {
- return unioned->Get(1);
- }
- if (unioned->Get(1)->IsRange()) {
- return AstRangeType::New(unioned->Get(1)->AsRange()->Min(),
- unioned->Get(1)->AsRange()->Max(),
- unioned->Get(0)->AsBitset(), zone);
- }
- }
- unioned->Shrink(size);
- SLOW_DCHECK(unioned->Wellformed());
- return union_type;
-}
-
-// -----------------------------------------------------------------------------
-// Component extraction
-
-// static
-AstType* AstType::Representation(AstType* t, Zone* zone) {
- return AstBitsetType::New(t->Representation());
-}
-
-// static
-AstType* AstType::Semantic(AstType* t, Zone* zone) {
- return Intersect(t, AstBitsetType::New(AstBitsetType::kSemantic), zone);
-}
-
-// -----------------------------------------------------------------------------
-// Iteration.
-
-int AstType::NumClasses() {
- DisallowHeapAllocation no_allocation;
- if (this->IsClass()) {
- return 1;
- } else if (this->IsUnion()) {
- int result = 0;
- for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
- if (this->AsUnion()->Get(i)->IsClass()) ++result;
- }
- return result;
- } else {
- return 0;
- }
-}
-
-int AstType::NumConstants() {
- DisallowHeapAllocation no_allocation;
- if (this->IsConstant()) {
- return 1;
- } else if (this->IsUnion()) {
- int result = 0;
- for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
- if (this->AsUnion()->Get(i)->IsConstant()) ++result;
- }
- return result;
- } else {
- return 0;
- }
-}
-
-template <class T>
-AstType* AstType::Iterator<T>::get_type() {
- DCHECK(!Done());
- return type_->IsUnion() ? type_->AsUnion()->Get(index_) : type_;
-}
-
-// C++ cannot specialise nested templates, so we have to go through this
-// contortion with an auxiliary template to simulate it.
-template <class T>
-struct TypeImplIteratorAux {
- static bool matches(AstType* type);
- static i::Handle<T> current(AstType* type);
-};
-
-template <>
-struct TypeImplIteratorAux<i::Map> {
- static bool matches(AstType* type) { return type->IsClass(); }
- static i::Handle<i::Map> current(AstType* type) {
- return type->AsClass()->Map();
- }
-};
-
-template <>
-struct TypeImplIteratorAux<i::Object> {
- static bool matches(AstType* type) { return type->IsConstant(); }
- static i::Handle<i::Object> current(AstType* type) {
- return type->AsConstant()->Value();
- }
-};
-
-template <class T>
-bool AstType::Iterator<T>::matches(AstType* type) {
- return TypeImplIteratorAux<T>::matches(type);
-}
-
-template <class T>
-i::Handle<T> AstType::Iterator<T>::Current() {
- return TypeImplIteratorAux<T>::current(get_type());
-}
-
-template <class T>
-void AstType::Iterator<T>::Advance() {
- DisallowHeapAllocation no_allocation;
- ++index_;
- if (type_->IsUnion()) {
- for (int n = type_->AsUnion()->Length(); index_ < n; ++index_) {
- if (matches(type_->AsUnion()->Get(index_))) return;
- }
- } else if (index_ == 0 && matches(type_)) {
- return;
- }
- index_ = -1;
-}
-
-// -----------------------------------------------------------------------------
-// Printing.
-
-const char* AstBitsetType::Name(bitset bits) {
- switch (bits) {
- case AST_REPRESENTATION(kAny):
- return "Any";
-#define RETURN_NAMED_REPRESENTATION_TYPE(type, value) \
- case AST_REPRESENTATION(k##type): \
- return #type;
- AST_REPRESENTATION_BITSET_TYPE_LIST(RETURN_NAMED_REPRESENTATION_TYPE)
-#undef RETURN_NAMED_REPRESENTATION_TYPE
-
-#define RETURN_NAMED_SEMANTIC_TYPE(type, value) \
- case AST_SEMANTIC(k##type): \
- return #type;
- AST_SEMANTIC_BITSET_TYPE_LIST(RETURN_NAMED_SEMANTIC_TYPE)
- AST_INTERNAL_BITSET_TYPE_LIST(RETURN_NAMED_SEMANTIC_TYPE)
-#undef RETURN_NAMED_SEMANTIC_TYPE
-
- default:
- return NULL;
- }
-}
-
-void AstBitsetType::Print(std::ostream& os, // NOLINT
- bitset bits) {
- DisallowHeapAllocation no_allocation;
- const char* name = Name(bits);
- if (name != NULL) {
- os << name;
- return;
- }
-
- // clang-format off
- static const bitset named_bitsets[] = {
-#define BITSET_CONSTANT(type, value) AST_REPRESENTATION(k##type),
- AST_REPRESENTATION_BITSET_TYPE_LIST(BITSET_CONSTANT)
-#undef BITSET_CONSTANT
-
-#define BITSET_CONSTANT(type, value) AST_SEMANTIC(k##type),
- AST_INTERNAL_BITSET_TYPE_LIST(BITSET_CONSTANT)
- AST_SEMANTIC_BITSET_TYPE_LIST(BITSET_CONSTANT)
-#undef BITSET_CONSTANT
- };
- // clang-format on
-
- bool is_first = true;
- os << "(";
- for (int i(arraysize(named_bitsets) - 1); bits != 0 && i >= 0; --i) {
- bitset subset = named_bitsets[i];
- if ((bits & subset) == subset) {
- if (!is_first) os << " | ";
- is_first = false;
- os << Name(subset);
- bits -= subset;
- }
- }
- DCHECK(bits == 0);
- os << ")";
-}
-
-void AstType::PrintTo(std::ostream& os, PrintDimension dim) {
- DisallowHeapAllocation no_allocation;
- if (dim != REPRESENTATION_DIM) {
- if (this->IsBitset()) {
- AstBitsetType::Print(os, AST_SEMANTIC(this->AsBitset()));
- } else if (this->IsClass()) {
- os << "Class(" << static_cast<void*>(*this->AsClass()->Map()) << " < ";
- AstBitsetType::New(AstBitsetType::Lub(this))->PrintTo(os, dim);
- os << ")";
- } else if (this->IsConstant()) {
- os << "Constant(" << Brief(*this->AsConstant()->Value()) << ")";
- } else if (this->IsRange()) {
- std::ostream::fmtflags saved_flags = os.setf(std::ios::fixed);
- std::streamsize saved_precision = os.precision(0);
- os << "Range(" << this->AsRange()->Min() << ", " << this->AsRange()->Max()
- << ")";
- os.flags(saved_flags);
- os.precision(saved_precision);
- } else if (this->IsContext()) {
- os << "Context(";
- this->AsContext()->Outer()->PrintTo(os, dim);
- os << ")";
- } else if (this->IsUnion()) {
- os << "(";
- for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
- AstType* type_i = this->AsUnion()->Get(i);
- if (i > 0) os << " | ";
- type_i->PrintTo(os, dim);
- }
- os << ")";
- } else if (this->IsArray()) {
- os << "Array(";
- AsArray()->Element()->PrintTo(os, dim);
- os << ")";
- } else if (this->IsFunction()) {
- if (!this->AsFunction()->Receiver()->IsAny()) {
- this->AsFunction()->Receiver()->PrintTo(os, dim);
- os << ".";
- }
- os << "(";
- for (int i = 0; i < this->AsFunction()->Arity(); ++i) {
- if (i > 0) os << ", ";
- this->AsFunction()->Parameter(i)->PrintTo(os, dim);
- }
- os << ")->";
- this->AsFunction()->Result()->PrintTo(os, dim);
- } else if (this->IsTuple()) {
- os << "<";
- for (int i = 0, n = this->AsTuple()->Arity(); i < n; ++i) {
- AstType* type_i = this->AsTuple()->Element(i);
- if (i > 0) os << ", ";
- type_i->PrintTo(os, dim);
- }
- os << ">";
- } else {
- UNREACHABLE();
- }
- }
- if (dim == BOTH_DIMS) os << "/";
- if (dim != SEMANTIC_DIM) {
- AstBitsetType::Print(os, AST_REPRESENTATION(this->BitsetLub()));
- }
-}
-
-#ifdef DEBUG
-void AstType::Print() {
- OFStream os(stdout);
- PrintTo(os);
- os << std::endl;
-}
-void AstBitsetType::Print(bitset bits) {
- OFStream os(stdout);
- Print(os, bits);
- os << std::endl;
-}
-#endif
-
-AstBitsetType::bitset AstBitsetType::SignedSmall() {
- return i::SmiValuesAre31Bits() ? kSigned31 : kSigned32;
-}
-
-AstBitsetType::bitset AstBitsetType::UnsignedSmall() {
- return i::SmiValuesAre31Bits() ? kUnsigned30 : kUnsigned31;
-}
-
-// -----------------------------------------------------------------------------
-// Instantiations.
-
-template class AstType::Iterator<i::Map>;
-template class AstType::Iterator<i::Object>;
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/ast/ast-types.h b/deps/v8/src/ast/ast-types.h
deleted file mode 100644
index ea0be65eb6..0000000000
--- a/deps/v8/src/ast/ast-types.h
+++ /dev/null
@@ -1,1017 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_AST_AST_TYPES_H_
-#define V8_AST_AST_TYPES_H_
-
-#include "src/conversions.h"
-#include "src/handles.h"
-#include "src/objects.h"
-#include "src/ostreams.h"
-
-namespace v8 {
-namespace internal {
-
-// SUMMARY
-//
-// A simple type system for compiler-internal use. It is based entirely on
-// union types, and all subtyping hence amounts to set inclusion. Besides the
-// obvious primitive types and some predefined unions, the type language also
-// can express class types (a.k.a. specific maps) and singleton types (i.e.,
-// concrete constants).
-//
-// Types consist of two dimensions: semantic (value range) and representation.
-// Both are related through subtyping.
-//
-//
-// SEMANTIC DIMENSION
-//
-// The following equations and inequations hold for the semantic axis:
-//
-// None <= T
-// T <= Any
-//
-// Number = Signed32 \/ Unsigned32 \/ Double
-// Smi <= Signed32
-// Name = String \/ Symbol
-// UniqueName = InternalizedString \/ Symbol
-// InternalizedString < String
-//
-// Receiver = Object \/ Proxy
-// Array < Object
-// Function < Object
-// RegExp < Object
-// OtherUndetectable < Object
-// DetectableReceiver = Receiver - OtherUndetectable
-//
-// Class(map) < T iff instance_type(map) < T
-// Constant(x) < T iff instance_type(map(x)) < T
-// Array(T) < Array
-// Function(R, S, T0, T1, ...) < Function
-// Context(T) < Internal
-//
-// Both structural Array and Function types are invariant in all parameters;
-// relaxing this would make Union and Intersect operations more involved.
-// There is no subtyping relation between Array, Function, or Context types
-// and respective Constant types, since these types cannot be reconstructed
-// for arbitrary heap values.
-// Note also that Constant(x) < Class(map(x)) does _not_ hold, since x's map can
-// change! (Its instance type cannot, however.)
-// TODO(rossberg): the latter is not currently true for proxies, because of fix,
-// but will hold once we implement direct proxies.
-// However, we also define a 'temporal' variant of the subtyping relation that
-// considers the _current_ state only, i.e., Constant(x) <_now Class(map(x)).
-//
-//
-// REPRESENTATIONAL DIMENSION
-//
-// For the representation axis, the following holds:
-//
-// None <= R
-// R <= Any
-//
-// UntaggedInt = UntaggedInt1 \/ UntaggedInt8 \/
-// UntaggedInt16 \/ UntaggedInt32
-// UntaggedFloat = UntaggedFloat32 \/ UntaggedFloat64
-// UntaggedNumber = UntaggedInt \/ UntaggedFloat
-// Untagged = UntaggedNumber \/ UntaggedPtr
-// Tagged = TaggedInt \/ TaggedPtr
-//
-// Subtyping relates the two dimensions, for example:
-//
-// Number <= Tagged \/ UntaggedNumber
-// Object <= TaggedPtr \/ UntaggedPtr
-//
-// That holds because the semantic type constructors defined by the API create
-// types that allow for all possible representations, and dually, the ones for
-// representation types initially include all semantic ranges. Representations
-// can then e.g. be narrowed for a given semantic type using intersection:
-//
-// SignedSmall /\ TaggedInt (a 'smi')
-// Number /\ TaggedPtr (a heap number)
-//
-//
-// RANGE TYPES
-//
-// A range type represents a continuous integer interval by its minimum and
-// maximum value. Either value may be an infinity, in which case that infinity
-// itself is also included in the range. A range never contains NaN or -0.
-//
-// If a value v happens to be an integer n, then Constant(v) is considered a
-// subtype of Range(n, n) (and therefore also a subtype of any larger range).
-// In order to avoid large unions, however, it is usually a good idea to use
-// Range rather than Constant.
-//
-//
-// PREDICATES
-//
-// There are two main functions for testing types:
-//
-// T1->Is(T2) -- tests whether T1 is included in T2 (i.e., T1 <= T2)
-// T1->Maybe(T2) -- tests whether T1 and T2 overlap (i.e., T1 /\ T2 =/= 0)
-//
-// Typically, the former is to be used to select representations (e.g., via
-// T->Is(SignedSmall())), and the latter to check whether a specific case needs
-// handling (e.g., via T->Maybe(Number())).
-//
-// There is no functionality to discover whether a type is a leaf in the
-// lattice. That is intentional. It should always be possible to refine the
-// lattice (e.g., splitting up number types further) without invalidating any
-// existing assumptions or tests.
-// Consequently, do not normally use Equals for type tests, always use Is!
-//
-// The NowIs operator implements state-sensitive subtying, as described above.
-// Any compilation decision based on such temporary properties requires runtime
-// guarding!
-//
-//
-// PROPERTIES
-//
-// Various formal properties hold for constructors, operators, and predicates
-// over types. For example, constructors are injective and subtyping is a
-// complete partial order.
-//
-// See test/cctest/test-types.cc for a comprehensive executable specification,
-// especially with respect to the properties of the more exotic 'temporal'
-// constructors and predicates (those prefixed 'Now').
-//
-//
-// IMPLEMENTATION
-//
-// Internally, all 'primitive' types, and their unions, are represented as
-// bitsets. Bit 0 is reserved for tagging. Class is a heap pointer to the
-// respective map. Only structured types require allocation.
-// Note that the bitset representation is closed under both Union and Intersect.
-
-// -----------------------------------------------------------------------------
-// Values for bitset types
-
-// clang-format off
-
-#define AST_MASK_BITSET_TYPE_LIST(V) \
- V(Representation, 0xffc00000u) \
- V(Semantic, 0x003ffffeu)
-
-#define AST_REPRESENTATION(k) ((k) & AstBitsetType::kRepresentation)
-#define AST_SEMANTIC(k) ((k) & AstBitsetType::kSemantic)
-
-// Bits 21-22 are available.
-#define AST_REPRESENTATION_BITSET_TYPE_LIST(V) \
- V(None, 0) \
- V(UntaggedBit, 1u << 23 | kSemantic) \
- V(UntaggedIntegral8, 1u << 24 | kSemantic) \
- V(UntaggedIntegral16, 1u << 25 | kSemantic) \
- V(UntaggedIntegral32, 1u << 26 | kSemantic) \
- V(UntaggedFloat32, 1u << 27 | kSemantic) \
- V(UntaggedFloat64, 1u << 28 | kSemantic) \
- V(UntaggedPointer, 1u << 29 | kSemantic) \
- V(TaggedSigned, 1u << 30 | kSemantic) \
- V(TaggedPointer, 1u << 31 | kSemantic) \
- \
- V(UntaggedIntegral, kUntaggedBit | kUntaggedIntegral8 | \
- kUntaggedIntegral16 | kUntaggedIntegral32) \
- V(UntaggedFloat, kUntaggedFloat32 | kUntaggedFloat64) \
- V(UntaggedNumber, kUntaggedIntegral | kUntaggedFloat) \
- V(Untagged, kUntaggedNumber | kUntaggedPointer) \
- V(Tagged, kTaggedSigned | kTaggedPointer)
-
-#define AST_INTERNAL_BITSET_TYPE_LIST(V) \
- V(OtherUnsigned31, 1u << 1 | AST_REPRESENTATION(kTagged | kUntaggedNumber)) \
- V(OtherUnsigned32, 1u << 2 | AST_REPRESENTATION(kTagged | kUntaggedNumber)) \
- V(OtherSigned32, 1u << 3 | AST_REPRESENTATION(kTagged | kUntaggedNumber)) \
- V(OtherNumber, 1u << 4 | AST_REPRESENTATION(kTagged | kUntaggedNumber))
-
-#define AST_SEMANTIC_BITSET_TYPE_LIST(V) \
- V(Negative31, 1u << 5 | \
- AST_REPRESENTATION(kTagged | kUntaggedNumber)) \
- V(Null, 1u << 6 | AST_REPRESENTATION(kTaggedPointer)) \
- V(Undefined, 1u << 7 | AST_REPRESENTATION(kTaggedPointer)) \
- V(Boolean, 1u << 8 | AST_REPRESENTATION(kTaggedPointer)) \
- V(Unsigned30, 1u << 9 | \
- AST_REPRESENTATION(kTagged | kUntaggedNumber)) \
- V(MinusZero, 1u << 10 | \
- AST_REPRESENTATION(kTagged | kUntaggedNumber)) \
- V(NaN, 1u << 11 | \
- AST_REPRESENTATION(kTagged | kUntaggedNumber)) \
- V(Symbol, 1u << 12 | AST_REPRESENTATION(kTaggedPointer)) \
- V(InternalizedString, 1u << 13 | AST_REPRESENTATION(kTaggedPointer)) \
- V(OtherString, 1u << 14 | AST_REPRESENTATION(kTaggedPointer)) \
- V(OtherObject, 1u << 15 | AST_REPRESENTATION(kTaggedPointer)) \
- V(OtherUndetectable, 1u << 16 | AST_REPRESENTATION(kTaggedPointer)) \
- V(Proxy, 1u << 17 | AST_REPRESENTATION(kTaggedPointer)) \
- V(Function, 1u << 18 | AST_REPRESENTATION(kTaggedPointer)) \
- V(Hole, 1u << 19 | AST_REPRESENTATION(kTaggedPointer)) \
- V(OtherInternal, 1u << 20 | \
- AST_REPRESENTATION(kTagged | kUntagged)) \
- \
- V(Signed31, kUnsigned30 | kNegative31) \
- V(Signed32, kSigned31 | kOtherUnsigned31 | \
- kOtherSigned32) \
- V(Signed32OrMinusZero, kSigned32 | kMinusZero) \
- V(Signed32OrMinusZeroOrNaN, kSigned32 | kMinusZero | kNaN) \
- V(Negative32, kNegative31 | kOtherSigned32) \
- V(Unsigned31, kUnsigned30 | kOtherUnsigned31) \
- V(Unsigned32, kUnsigned30 | kOtherUnsigned31 | \
- kOtherUnsigned32) \
- V(Unsigned32OrMinusZero, kUnsigned32 | kMinusZero) \
- V(Unsigned32OrMinusZeroOrNaN, kUnsigned32 | kMinusZero | kNaN) \
- V(Integral32, kSigned32 | kUnsigned32) \
- V(PlainNumber, kIntegral32 | kOtherNumber) \
- V(OrderedNumber, kPlainNumber | kMinusZero) \
- V(MinusZeroOrNaN, kMinusZero | kNaN) \
- V(Number, kOrderedNumber | kNaN) \
- V(String, kInternalizedString | kOtherString) \
- V(UniqueName, kSymbol | kInternalizedString) \
- V(Name, kSymbol | kString) \
- V(BooleanOrNumber, kBoolean | kNumber) \
- V(BooleanOrNullOrNumber, kBooleanOrNumber | kNull) \
- V(BooleanOrNullOrUndefined, kBoolean | kNull | kUndefined) \
- V(NullOrNumber, kNull | kNumber) \
- V(NullOrUndefined, kNull | kUndefined) \
- V(Undetectable, kNullOrUndefined | kOtherUndetectable) \
- V(NumberOrOddball, kNumber | kNullOrUndefined | kBoolean | kHole) \
- V(NumberOrString, kNumber | kString) \
- V(NumberOrUndefined, kNumber | kUndefined) \
- V(PlainPrimitive, kNumberOrString | kBoolean | kNullOrUndefined) \
- V(Primitive, kSymbol | kPlainPrimitive) \
- V(DetectableReceiver, kFunction | kOtherObject | kProxy) \
- V(Object, kFunction | kOtherObject | kOtherUndetectable) \
- V(Receiver, kObject | kProxy) \
- V(StringOrReceiver, kString | kReceiver) \
- V(Unique, kBoolean | kUniqueName | kNull | kUndefined | \
- kReceiver) \
- V(Internal, kHole | kOtherInternal) \
- V(NonInternal, kPrimitive | kReceiver) \
- V(NonNumber, kUnique | kString | kInternal) \
- V(Any, 0xfffffffeu)
-
-// clang-format on
-
-/*
- * The following diagrams show how integers (in the mathematical sense) are
- * divided among the different atomic numerical types.
- *
- * ON OS32 N31 U30 OU31 OU32 ON
- * ______[_______[_______[_______[_______[_______[_______
- * -2^31 -2^30 0 2^30 2^31 2^32
- *
- * E.g., OtherUnsigned32 (OU32) covers all integers from 2^31 to 2^32-1.
- *
- * Some of the atomic numerical bitsets are internal only (see
- * INTERNAL_BITSET_TYPE_LIST). To a types user, they should only occur in
- * union with certain other bitsets. For instance, OtherNumber should only
- * occur as part of PlainNumber.
- */
-
-#define AST_PROPER_BITSET_TYPE_LIST(V) \
- AST_REPRESENTATION_BITSET_TYPE_LIST(V) \
- AST_SEMANTIC_BITSET_TYPE_LIST(V)
-
-#define AST_BITSET_TYPE_LIST(V) \
- AST_MASK_BITSET_TYPE_LIST(V) \
- AST_REPRESENTATION_BITSET_TYPE_LIST(V) \
- AST_INTERNAL_BITSET_TYPE_LIST(V) \
- AST_SEMANTIC_BITSET_TYPE_LIST(V)
-
-class AstType;
-
-// -----------------------------------------------------------------------------
-// Bitset types (internal).
-
-class AstBitsetType {
- public:
- typedef uint32_t bitset; // Internal
-
- enum : uint32_t {
-#define DECLARE_TYPE(type, value) k##type = (value),
- AST_BITSET_TYPE_LIST(DECLARE_TYPE)
-#undef DECLARE_TYPE
- kUnusedEOL = 0
- };
-
- static bitset SignedSmall();
- static bitset UnsignedSmall();
-
- bitset Bitset() {
- return static_cast<bitset>(reinterpret_cast<uintptr_t>(this) ^ 1u);
- }
-
- static bool IsInhabited(bitset bits) {
- return AST_SEMANTIC(bits) != kNone && AST_REPRESENTATION(bits) != kNone;
- }
-
- static bool SemanticIsInhabited(bitset bits) {
- return AST_SEMANTIC(bits) != kNone;
- }
-
- static bool Is(bitset bits1, bitset bits2) {
- return (bits1 | bits2) == bits2;
- }
-
- static double Min(bitset);
- static double Max(bitset);
-
- static bitset Glb(AstType* type); // greatest lower bound that's a bitset
- static bitset Glb(double min, double max);
- static bitset Lub(AstType* type); // least upper bound that's a bitset
- static bitset Lub(i::Map* map);
- static bitset Lub(i::Object* value);
- static bitset Lub(double value);
- static bitset Lub(double min, double max);
- static bitset ExpandInternals(bitset bits);
-
- static const char* Name(bitset);
- static void Print(std::ostream& os, bitset); // NOLINT
-#ifdef DEBUG
- static void Print(bitset);
-#endif
-
- static bitset NumberBits(bitset bits);
-
- static bool IsBitset(AstType* type) {
- return reinterpret_cast<uintptr_t>(type) & 1;
- }
-
- static AstType* NewForTesting(bitset bits) { return New(bits); }
-
- private:
- friend class AstType;
-
- static AstType* New(bitset bits) {
- return reinterpret_cast<AstType*>(static_cast<uintptr_t>(bits | 1u));
- }
-
- struct Boundary {
- bitset internal;
- bitset external;
- double min;
- };
- static const Boundary BoundariesArray[];
- static inline const Boundary* Boundaries();
- static inline size_t BoundariesSize();
-};
-
-// -----------------------------------------------------------------------------
-// Superclass for non-bitset types (internal).
-class AstTypeBase {
- protected:
- friend class AstType;
-
- enum Kind {
- kClass,
- kConstant,
- kContext,
- kArray,
- kFunction,
- kTuple,
- kUnion,
- kRange
- };
-
- Kind kind() const { return kind_; }
- explicit AstTypeBase(Kind kind) : kind_(kind) {}
-
- static bool IsKind(AstType* type, Kind kind) {
- if (AstBitsetType::IsBitset(type)) return false;
- AstTypeBase* base = reinterpret_cast<AstTypeBase*>(type);
- return base->kind() == kind;
- }
-
- // The hacky conversion to/from AstType*.
- static AstType* AsType(AstTypeBase* type) {
- return reinterpret_cast<AstType*>(type);
- }
- static AstTypeBase* FromType(AstType* type) {
- return reinterpret_cast<AstTypeBase*>(type);
- }
-
- private:
- Kind kind_;
-};
-
-// -----------------------------------------------------------------------------
-// Class types.
-
-class AstClassType : public AstTypeBase {
- public:
- i::Handle<i::Map> Map() { return map_; }
-
- private:
- friend class AstType;
- friend class AstBitsetType;
-
- static AstType* New(i::Handle<i::Map> map, Zone* zone) {
- return AsType(new (zone->New(sizeof(AstClassType)))
- AstClassType(AstBitsetType::Lub(*map), map));
- }
-
- static AstClassType* cast(AstType* type) {
- DCHECK(IsKind(type, kClass));
- return static_cast<AstClassType*>(FromType(type));
- }
-
- AstClassType(AstBitsetType::bitset bitset, i::Handle<i::Map> map)
- : AstTypeBase(kClass), bitset_(bitset), map_(map) {}
-
- AstBitsetType::bitset Lub() { return bitset_; }
-
- AstBitsetType::bitset bitset_;
- Handle<i::Map> map_;
-};
-
-// -----------------------------------------------------------------------------
-// Constant types.
-
-class AstConstantType : public AstTypeBase {
- public:
- i::Handle<i::Object> Value() { return object_; }
-
- private:
- friend class AstType;
- friend class AstBitsetType;
-
- static AstType* New(i::Handle<i::Object> value, Zone* zone) {
- AstBitsetType::bitset bitset = AstBitsetType::Lub(*value);
- return AsType(new (zone->New(sizeof(AstConstantType)))
- AstConstantType(bitset, value));
- }
-
- static AstConstantType* cast(AstType* type) {
- DCHECK(IsKind(type, kConstant));
- return static_cast<AstConstantType*>(FromType(type));
- }
-
- AstConstantType(AstBitsetType::bitset bitset, i::Handle<i::Object> object)
- : AstTypeBase(kConstant), bitset_(bitset), object_(object) {}
-
- AstBitsetType::bitset Lub() { return bitset_; }
-
- AstBitsetType::bitset bitset_;
- Handle<i::Object> object_;
-};
-// TODO(neis): Also cache value if numerical.
-// TODO(neis): Allow restricting the representation.
-
-// -----------------------------------------------------------------------------
-// Range types.
-
-class AstRangeType : public AstTypeBase {
- public:
- struct Limits {
- double min;
- double max;
- Limits(double min, double max) : min(min), max(max) {}
- explicit Limits(AstRangeType* range)
- : min(range->Min()), max(range->Max()) {}
- bool IsEmpty();
- static Limits Empty() { return Limits(1, 0); }
- static Limits Intersect(Limits lhs, Limits rhs);
- static Limits Union(Limits lhs, Limits rhs);
- };
-
- double Min() { return limits_.min; }
- double Max() { return limits_.max; }
-
- private:
- friend class AstType;
- friend class AstBitsetType;
- friend class AstUnionType;
-
- static AstType* New(double min, double max,
- AstBitsetType::bitset representation, Zone* zone) {
- return New(Limits(min, max), representation, zone);
- }
-
- static bool IsInteger(double x) {
- return nearbyint(x) == x && !i::IsMinusZero(x); // Allows for infinities.
- }
-
- static AstType* New(Limits lim, AstBitsetType::bitset representation,
- Zone* zone) {
- DCHECK(IsInteger(lim.min) && IsInteger(lim.max));
- DCHECK(lim.min <= lim.max);
- DCHECK(AST_REPRESENTATION(representation) == representation);
- AstBitsetType::bitset bits =
- AST_SEMANTIC(AstBitsetType::Lub(lim.min, lim.max)) | representation;
-
- return AsType(new (zone->New(sizeof(AstRangeType)))
- AstRangeType(bits, lim));
- }
-
- static AstRangeType* cast(AstType* type) {
- DCHECK(IsKind(type, kRange));
- return static_cast<AstRangeType*>(FromType(type));
- }
-
- AstRangeType(AstBitsetType::bitset bitset, Limits limits)
- : AstTypeBase(kRange), bitset_(bitset), limits_(limits) {}
-
- AstBitsetType::bitset Lub() { return bitset_; }
-
- AstBitsetType::bitset bitset_;
- Limits limits_;
-};
-
-// -----------------------------------------------------------------------------
-// Context types.
-
-class AstContextType : public AstTypeBase {
- public:
- AstType* Outer() { return outer_; }
-
- private:
- friend class AstType;
-
- static AstType* New(AstType* outer, Zone* zone) {
- return AsType(new (zone->New(sizeof(AstContextType)))
- AstContextType(outer)); // NOLINT
- }
-
- static AstContextType* cast(AstType* type) {
- DCHECK(IsKind(type, kContext));
- return static_cast<AstContextType*>(FromType(type));
- }
-
- explicit AstContextType(AstType* outer)
- : AstTypeBase(kContext), outer_(outer) {}
-
- AstType* outer_;
-};
-
-// -----------------------------------------------------------------------------
-// Array types.
-
-class AstArrayType : public AstTypeBase {
- public:
- AstType* Element() { return element_; }
-
- private:
- friend class AstType;
-
- explicit AstArrayType(AstType* element)
- : AstTypeBase(kArray), element_(element) {}
-
- static AstType* New(AstType* element, Zone* zone) {
- return AsType(new (zone->New(sizeof(AstArrayType))) AstArrayType(element));
- }
-
- static AstArrayType* cast(AstType* type) {
- DCHECK(IsKind(type, kArray));
- return static_cast<AstArrayType*>(FromType(type));
- }
-
- AstType* element_;
-};
-
-// -----------------------------------------------------------------------------
-// Superclass for types with variable number of type fields.
-class AstStructuralType : public AstTypeBase {
- public:
- int LengthForTesting() { return Length(); }
-
- protected:
- friend class AstType;
-
- int Length() { return length_; }
-
- AstType* Get(int i) {
- DCHECK(0 <= i && i < this->Length());
- return elements_[i];
- }
-
- void Set(int i, AstType* type) {
- DCHECK(0 <= i && i < this->Length());
- elements_[i] = type;
- }
-
- void Shrink(int length) {
- DCHECK(2 <= length && length <= this->Length());
- length_ = length;
- }
-
- AstStructuralType(Kind kind, int length, i::Zone* zone)
- : AstTypeBase(kind), length_(length) {
- elements_ =
- reinterpret_cast<AstType**>(zone->New(sizeof(AstType*) * length));
- }
-
- private:
- int length_;
- AstType** elements_;
-};
-
-// -----------------------------------------------------------------------------
-// Function types.
-
-class AstFunctionType : public AstStructuralType {
- public:
- int Arity() { return this->Length() - 2; }
- AstType* Result() { return this->Get(0); }
- AstType* Receiver() { return this->Get(1); }
- AstType* Parameter(int i) { return this->Get(2 + i); }
-
- void InitParameter(int i, AstType* type) { this->Set(2 + i, type); }
-
- private:
- friend class AstType;
-
- AstFunctionType(AstType* result, AstType* receiver, int arity, Zone* zone)
- : AstStructuralType(kFunction, 2 + arity, zone) {
- Set(0, result);
- Set(1, receiver);
- }
-
- static AstType* New(AstType* result, AstType* receiver, int arity,
- Zone* zone) {
- return AsType(new (zone->New(sizeof(AstFunctionType)))
- AstFunctionType(result, receiver, arity, zone));
- }
-
- static AstFunctionType* cast(AstType* type) {
- DCHECK(IsKind(type, kFunction));
- return static_cast<AstFunctionType*>(FromType(type));
- }
-};
-
-// -----------------------------------------------------------------------------
-// Tuple types.
-
-class AstTupleType : public AstStructuralType {
- public:
- int Arity() { return this->Length(); }
- AstType* Element(int i) { return this->Get(i); }
-
- void InitElement(int i, AstType* type) { this->Set(i, type); }
-
- private:
- friend class AstType;
-
- AstTupleType(int length, Zone* zone)
- : AstStructuralType(kTuple, length, zone) {}
-
- static AstType* New(int length, Zone* zone) {
- return AsType(new (zone->New(sizeof(AstTupleType)))
- AstTupleType(length, zone));
- }
-
- static AstTupleType* cast(AstType* type) {
- DCHECK(IsKind(type, kTuple));
- return static_cast<AstTupleType*>(FromType(type));
- }
-};
-
-// -----------------------------------------------------------------------------
-// Union types (internal).
-// A union is a structured type with the following invariants:
-// - its length is at least 2
-// - at most one field is a bitset, and it must go into index 0
-// - no field is a union
-// - no field is a subtype of any other field
-class AstUnionType : public AstStructuralType {
- private:
- friend AstType;
- friend AstBitsetType;
-
- AstUnionType(int length, Zone* zone)
- : AstStructuralType(kUnion, length, zone) {}
-
- static AstType* New(int length, Zone* zone) {
- return AsType(new (zone->New(sizeof(AstUnionType)))
- AstUnionType(length, zone));
- }
-
- static AstUnionType* cast(AstType* type) {
- DCHECK(IsKind(type, kUnion));
- return static_cast<AstUnionType*>(FromType(type));
- }
-
- bool Wellformed();
-};
-
-class AstType {
- public:
- typedef AstBitsetType::bitset bitset; // Internal
-
-// Constructors.
-#define DEFINE_TYPE_CONSTRUCTOR(type, value) \
- static AstType* type() { return AstBitsetType::New(AstBitsetType::k##type); }
- AST_PROPER_BITSET_TYPE_LIST(DEFINE_TYPE_CONSTRUCTOR)
-#undef DEFINE_TYPE_CONSTRUCTOR
-
- static AstType* SignedSmall() {
- return AstBitsetType::New(AstBitsetType::SignedSmall());
- }
- static AstType* UnsignedSmall() {
- return AstBitsetType::New(AstBitsetType::UnsignedSmall());
- }
-
- static AstType* Class(i::Handle<i::Map> map, Zone* zone) {
- return AstClassType::New(map, zone);
- }
- static AstType* Constant(i::Handle<i::Object> value, Zone* zone) {
- return AstConstantType::New(value, zone);
- }
- static AstType* Range(double min, double max, Zone* zone) {
- return AstRangeType::New(min, max,
- AST_REPRESENTATION(AstBitsetType::kTagged |
- AstBitsetType::kUntaggedNumber),
- zone);
- }
- static AstType* Context(AstType* outer, Zone* zone) {
- return AstContextType::New(outer, zone);
- }
- static AstType* Array(AstType* element, Zone* zone) {
- return AstArrayType::New(element, zone);
- }
- static AstType* Function(AstType* result, AstType* receiver, int arity,
- Zone* zone) {
- return AstFunctionType::New(result, receiver, arity, zone);
- }
- static AstType* Function(AstType* result, Zone* zone) {
- return Function(result, Any(), 0, zone);
- }
- static AstType* Function(AstType* result, AstType* param0, Zone* zone) {
- AstType* function = Function(result, Any(), 1, zone);
- function->AsFunction()->InitParameter(0, param0);
- return function;
- }
- static AstType* Function(AstType* result, AstType* param0, AstType* param1,
- Zone* zone) {
- AstType* function = Function(result, Any(), 2, zone);
- function->AsFunction()->InitParameter(0, param0);
- function->AsFunction()->InitParameter(1, param1);
- return function;
- }
- static AstType* Function(AstType* result, AstType* param0, AstType* param1,
- AstType* param2, Zone* zone) {
- AstType* function = Function(result, Any(), 3, zone);
- function->AsFunction()->InitParameter(0, param0);
- function->AsFunction()->InitParameter(1, param1);
- function->AsFunction()->InitParameter(2, param2);
- return function;
- }
- static AstType* Function(AstType* result, int arity, AstType** params,
- Zone* zone) {
- AstType* function = Function(result, Any(), arity, zone);
- for (int i = 0; i < arity; ++i) {
- function->AsFunction()->InitParameter(i, params[i]);
- }
- return function;
- }
- static AstType* Tuple(AstType* first, AstType* second, AstType* third,
- Zone* zone) {
- AstType* tuple = AstTupleType::New(3, zone);
- tuple->AsTuple()->InitElement(0, first);
- tuple->AsTuple()->InitElement(1, second);
- tuple->AsTuple()->InitElement(2, third);
- return tuple;
- }
-
- static AstType* Union(AstType* type1, AstType* type2, Zone* zone);
- static AstType* Intersect(AstType* type1, AstType* type2, Zone* zone);
-
- static AstType* Of(double value, Zone* zone) {
- return AstBitsetType::New(
- AstBitsetType::ExpandInternals(AstBitsetType::Lub(value)));
- }
- static AstType* Of(i::Object* value, Zone* zone) {
- return AstBitsetType::New(
- AstBitsetType::ExpandInternals(AstBitsetType::Lub(value)));
- }
- static AstType* Of(i::Handle<i::Object> value, Zone* zone) {
- return Of(*value, zone);
- }
-
- static AstType* For(i::Map* map) {
- return AstBitsetType::New(
- AstBitsetType::ExpandInternals(AstBitsetType::Lub(map)));
- }
- static AstType* For(i::Handle<i::Map> map) { return For(*map); }
-
- // Extraction of components.
- static AstType* Representation(AstType* t, Zone* zone);
- static AstType* Semantic(AstType* t, Zone* zone);
-
- // Predicates.
- bool IsInhabited() { return AstBitsetType::IsInhabited(this->BitsetLub()); }
-
- bool Is(AstType* that) { return this == that || this->SlowIs(that); }
- bool Maybe(AstType* that);
- bool Equals(AstType* that) { return this->Is(that) && that->Is(this); }
-
- // Equivalent to Constant(val)->Is(this), but avoiding allocation.
- bool Contains(i::Object* val);
- bool Contains(i::Handle<i::Object> val) { return this->Contains(*val); }
-
- // State-dependent versions of the above that consider subtyping between
- // a constant and its map class.
- static AstType* NowOf(i::Object* value, Zone* zone);
- static AstType* NowOf(i::Handle<i::Object> value, Zone* zone) {
- return NowOf(*value, zone);
- }
- bool NowIs(AstType* that);
- bool NowContains(i::Object* val);
- bool NowContains(i::Handle<i::Object> val) { return this->NowContains(*val); }
-
- bool NowStable();
-
- // Inspection.
- bool IsRange() { return IsKind(AstTypeBase::kRange); }
- bool IsClass() { return IsKind(AstTypeBase::kClass); }
- bool IsConstant() { return IsKind(AstTypeBase::kConstant); }
- bool IsContext() { return IsKind(AstTypeBase::kContext); }
- bool IsArray() { return IsKind(AstTypeBase::kArray); }
- bool IsFunction() { return IsKind(AstTypeBase::kFunction); }
- bool IsTuple() { return IsKind(AstTypeBase::kTuple); }
-
- AstClassType* AsClass() { return AstClassType::cast(this); }
- AstConstantType* AsConstant() { return AstConstantType::cast(this); }
- AstRangeType* AsRange() { return AstRangeType::cast(this); }
- AstContextType* AsContext() { return AstContextType::cast(this); }
- AstArrayType* AsArray() { return AstArrayType::cast(this); }
- AstFunctionType* AsFunction() { return AstFunctionType::cast(this); }
- AstTupleType* AsTuple() { return AstTupleType::cast(this); }
-
- // Minimum and maximum of a numeric type.
- // These functions do not distinguish between -0 and +0. If the type equals
- // kNaN, they return NaN; otherwise kNaN is ignored. Only call these
- // functions on subtypes of Number.
- double Min();
- double Max();
-
- // Extracts a range from the type: if the type is a range or a union
- // containing a range, that range is returned; otherwise, NULL is returned.
- AstType* GetRange();
-
- static bool IsInteger(i::Object* x);
- static bool IsInteger(double x) {
- return nearbyint(x) == x && !i::IsMinusZero(x); // Allows for infinities.
- }
-
- int NumClasses();
- int NumConstants();
-
- template <class T>
- class Iterator {
- public:
- bool Done() const { return index_ < 0; }
- i::Handle<T> Current();
- void Advance();
-
- private:
- friend class AstType;
-
- Iterator() : index_(-1) {}
- explicit Iterator(AstType* type) : type_(type), index_(-1) { Advance(); }
-
- inline bool matches(AstType* type);
- inline AstType* get_type();
-
- AstType* type_;
- int index_;
- };
-
- Iterator<i::Map> Classes() {
- if (this->IsBitset()) return Iterator<i::Map>();
- return Iterator<i::Map>(this);
- }
- Iterator<i::Object> Constants() {
- if (this->IsBitset()) return Iterator<i::Object>();
- return Iterator<i::Object>(this);
- }
-
- // Printing.
-
- enum PrintDimension { BOTH_DIMS, SEMANTIC_DIM, REPRESENTATION_DIM };
-
- void PrintTo(std::ostream& os, PrintDimension dim = BOTH_DIMS); // NOLINT
-
-#ifdef DEBUG
- void Print();
-#endif
-
- // Helpers for testing.
- bool IsBitsetForTesting() { return IsBitset(); }
- bool IsUnionForTesting() { return IsUnion(); }
- bitset AsBitsetForTesting() { return AsBitset(); }
- AstUnionType* AsUnionForTesting() { return AsUnion(); }
-
- private:
- // Friends.
- template <class>
- friend class Iterator;
- friend AstBitsetType;
- friend AstUnionType;
-
- // Internal inspection.
- bool IsKind(AstTypeBase::Kind kind) {
- return AstTypeBase::IsKind(this, kind);
- }
-
- bool IsNone() { return this == None(); }
- bool IsAny() { return this == Any(); }
- bool IsBitset() { return AstBitsetType::IsBitset(this); }
- bool IsUnion() { return IsKind(AstTypeBase::kUnion); }
-
- bitset AsBitset() {
- DCHECK(this->IsBitset());
- return reinterpret_cast<AstBitsetType*>(this)->Bitset();
- }
- AstUnionType* AsUnion() { return AstUnionType::cast(this); }
-
- bitset Representation();
-
- // Auxiliary functions.
- bool SemanticMaybe(AstType* that);
-
- bitset BitsetGlb() { return AstBitsetType::Glb(this); }
- bitset BitsetLub() { return AstBitsetType::Lub(this); }
-
- bool SlowIs(AstType* that);
- bool SemanticIs(AstType* that);
-
- static bool Overlap(AstRangeType* lhs, AstRangeType* rhs);
- static bool Contains(AstRangeType* lhs, AstRangeType* rhs);
- static bool Contains(AstRangeType* range, AstConstantType* constant);
- static bool Contains(AstRangeType* range, i::Object* val);
-
- static int UpdateRange(AstType* type, AstUnionType* result, int size,
- Zone* zone);
-
- static AstRangeType::Limits IntersectRangeAndBitset(AstType* range,
- AstType* bits,
- Zone* zone);
- static AstRangeType::Limits ToLimits(bitset bits, Zone* zone);
-
- bool SimplyEquals(AstType* that);
-
- static int AddToUnion(AstType* type, AstUnionType* result, int size,
- Zone* zone);
- static int IntersectAux(AstType* type, AstType* other, AstUnionType* result,
- int size, AstRangeType::Limits* limits, Zone* zone);
- static AstType* NormalizeUnion(AstType* unioned, int size, Zone* zone);
- static AstType* NormalizeRangeAndBitset(AstType* range, bitset* bits,
- Zone* zone);
-};
-
-// -----------------------------------------------------------------------------
-// Type bounds. A simple struct to represent a pair of lower/upper types.
-
-struct AstBounds {
- AstType* lower;
- AstType* upper;
-
- AstBounds()
- : // Make sure accessing uninitialized bounds crashes big-time.
- lower(nullptr),
- upper(nullptr) {}
- explicit AstBounds(AstType* t) : lower(t), upper(t) {}
- AstBounds(AstType* l, AstType* u) : lower(l), upper(u) {
- DCHECK(lower->Is(upper));
- }
-
- // Unrestricted bounds.
- static AstBounds Unbounded() {
- return AstBounds(AstType::None(), AstType::Any());
- }
-
- // Meet: both b1 and b2 are known to hold.
- static AstBounds Both(AstBounds b1, AstBounds b2, Zone* zone) {
- AstType* lower = AstType::Union(b1.lower, b2.lower, zone);
- AstType* upper = AstType::Intersect(b1.upper, b2.upper, zone);
- // Lower bounds are considered approximate, correct as necessary.
- if (!lower->Is(upper)) lower = upper;
- return AstBounds(lower, upper);
- }
-
- // Join: either b1 or b2 is known to hold.
- static AstBounds Either(AstBounds b1, AstBounds b2, Zone* zone) {
- AstType* lower = AstType::Intersect(b1.lower, b2.lower, zone);
- AstType* upper = AstType::Union(b1.upper, b2.upper, zone);
- return AstBounds(lower, upper);
- }
-
- static AstBounds NarrowLower(AstBounds b, AstType* t, Zone* zone) {
- AstType* lower = AstType::Union(b.lower, t, zone);
- // Lower bounds are considered approximate, correct as necessary.
- if (!lower->Is(b.upper)) lower = b.upper;
- return AstBounds(lower, b.upper);
- }
- static AstBounds NarrowUpper(AstBounds b, AstType* t, Zone* zone) {
- AstType* lower = b.lower;
- AstType* upper = AstType::Intersect(b.upper, t, zone);
- // Lower bounds are considered approximate, correct as necessary.
- if (!lower->Is(upper)) lower = upper;
- return AstBounds(lower, upper);
- }
-
- bool Narrows(AstBounds that) {
- return that.lower->Is(this->lower) && this->upper->Is(that.upper);
- }
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_AST_AST_TYPES_H_
diff --git a/deps/v8/src/ast/ast-value-factory.cc b/deps/v8/src/ast/ast-value-factory.cc
index a304aa0e00..c9c89d7745 100644
--- a/deps/v8/src/ast/ast-value-factory.cc
+++ b/deps/v8/src/ast/ast-value-factory.cc
@@ -32,7 +32,7 @@
#include "src/objects-inl.h"
#include "src/objects.h"
#include "src/string-hasher.h"
-#include "src/utils.h"
+#include "src/utils-inl.h"
namespace v8 {
namespace internal {
@@ -55,10 +55,10 @@ class OneByteStringStream {
} // namespace
-class AstRawStringInternalizationKey : public HashTableKey {
+class AstRawStringInternalizationKey : public StringTableKey {
public:
explicit AstRawStringInternalizationKey(const AstRawString* string)
- : string_(string) {}
+ : StringTableKey(string->hash_field()), string_(string) {}
bool IsMatch(Object* other) override {
if (string_->is_one_byte())
@@ -67,18 +67,13 @@ class AstRawStringInternalizationKey : public HashTableKey {
Vector<const uint16_t>::cast(string_->literal_bytes_));
}
- uint32_t Hash() override { return string_->hash() >> Name::kHashShift; }
-
- uint32_t HashForObject(Object* key) override {
- return String::cast(key)->Hash();
- }
-
- Handle<Object> AsHandle(Isolate* isolate) override {
+ Handle<String> AsHandle(Isolate* isolate) override {
if (string_->is_one_byte())
return isolate->factory()->NewOneByteInternalizedString(
- string_->literal_bytes_, string_->hash());
+ string_->literal_bytes_, string_->hash_field());
return isolate->factory()->NewTwoByteInternalizedString(
- Vector<const uint16_t>::cast(string_->literal_bytes_), string_->hash());
+ Vector<const uint16_t>::cast(string_->literal_bytes_),
+ string_->hash_field());
}
private:
@@ -98,9 +93,9 @@ void AstRawString::Internalize(Isolate* isolate) {
bool AstRawString::AsArrayIndex(uint32_t* index) const {
// The StringHasher will set up the hash in such a way that we can use it to
// figure out whether the string is convertible to an array index.
- if ((hash_ & Name::kIsNotArrayIndexMask) != 0) return false;
+ if ((hash_field_ & Name::kIsNotArrayIndexMask) != 0) return false;
if (length() <= Name::kMaxCachedArrayIndexLength) {
- *index = Name::ArrayIndexValueBits::decode(hash_);
+ *index = Name::ArrayIndexValueBits::decode(hash_field_);
} else {
OneByteStringStream stream(literal_bytes_);
CHECK(StringToArrayIndex(&stream, index));
@@ -127,7 +122,7 @@ uint16_t AstRawString::FirstCharacter() const {
bool AstRawString::Compare(void* a, void* b) {
const AstRawString* lhs = static_cast<AstRawString*>(a);
const AstRawString* rhs = static_cast<AstRawString*>(b);
- DCHECK_EQ(lhs->hash(), rhs->hash());
+ DCHECK_EQ(lhs->Hash(), rhs->Hash());
if (lhs->length() != rhs->length()) return false;
const unsigned char* l = lhs->raw_data();
@@ -205,7 +200,6 @@ bool AstValue::BooleanValue() const {
return false;
}
UNREACHABLE();
- return false;
}
@@ -253,23 +247,23 @@ AstRawString* AstValueFactory::GetOneByteStringInternal(
if (literal.length() == 1 && IsInRange(literal[0], 'a', 'z')) {
int key = literal[0] - 'a';
if (one_character_strings_[key] == nullptr) {
- uint32_t hash = StringHasher::HashSequentialString<uint8_t>(
+ uint32_t hash_field = StringHasher::HashSequentialString<uint8_t>(
literal.start(), literal.length(), hash_seed_);
- one_character_strings_[key] = GetString(hash, true, literal);
+ one_character_strings_[key] = GetString(hash_field, true, literal);
}
return one_character_strings_[key];
}
- uint32_t hash = StringHasher::HashSequentialString<uint8_t>(
+ uint32_t hash_field = StringHasher::HashSequentialString<uint8_t>(
literal.start(), literal.length(), hash_seed_);
- return GetString(hash, true, literal);
+ return GetString(hash_field, true, literal);
}
AstRawString* AstValueFactory::GetTwoByteStringInternal(
Vector<const uint16_t> literal) {
- uint32_t hash = StringHasher::HashSequentialString<uint16_t>(
+ uint32_t hash_field = StringHasher::HashSequentialString<uint16_t>(
literal.start(), literal.length(), hash_seed_);
- return GetString(hash, false, Vector<const byte>::cast(literal));
+ return GetString(hash_field, false, Vector<const byte>::cast(literal));
}
@@ -385,21 +379,21 @@ const AstValue* AstValueFactory::NewTheHole() {
#undef GENERATE_VALUE_GETTER
-AstRawString* AstValueFactory::GetString(uint32_t hash, bool is_one_byte,
+AstRawString* AstValueFactory::GetString(uint32_t hash_field, bool is_one_byte,
Vector<const byte> literal_bytes) {
// literal_bytes here points to whatever the user passed, and this is OK
// because we use vector_compare (which checks the contents) to compare
// against the AstRawStrings which are in the string_table_. We should not
// return this AstRawString.
- AstRawString key(is_one_byte, literal_bytes, hash);
- base::HashMap::Entry* entry = string_table_.LookupOrInsert(&key, hash);
+ AstRawString key(is_one_byte, literal_bytes, hash_field);
+ base::HashMap::Entry* entry = string_table_.LookupOrInsert(&key, key.Hash());
if (entry->value == nullptr) {
// Copy literal contents for later comparison.
int length = literal_bytes.length();
byte* new_literal_bytes = zone_->NewArray<byte>(length);
memcpy(new_literal_bytes, literal_bytes.start(), length);
AstRawString* new_string = new (zone_) AstRawString(
- is_one_byte, Vector<const byte>(new_literal_bytes, length), hash);
+ is_one_byte, Vector<const byte>(new_literal_bytes, length), hash_field);
CHECK_NOT_NULL(new_string);
AddString(new_string);
entry->key = new_string;
diff --git a/deps/v8/src/ast/ast-value-factory.h b/deps/v8/src/ast/ast-value-factory.h
index 34e8b9e1c1..b72e34a36c 100644
--- a/deps/v8/src/ast/ast-value-factory.h
+++ b/deps/v8/src/ast/ast-value-factory.h
@@ -64,9 +64,8 @@ class AstRawString final : public ZoneObject {
}
// For storing AstRawStrings in a hash map.
- uint32_t hash() const {
- return hash_;
- }
+ uint32_t hash_field() const { return hash_field_; }
+ uint32_t Hash() const { return hash_field_ >> Name::kHashShift; }
// This function can be called after internalizing.
V8_INLINE Handle<String> string() const {
@@ -83,10 +82,10 @@ class AstRawString final : public ZoneObject {
// Members accessed only by the AstValueFactory & related classes:
static bool Compare(void* a, void* b);
AstRawString(bool is_one_byte, const Vector<const byte>& literal_bytes,
- uint32_t hash)
+ uint32_t hash_field)
: next_(nullptr),
literal_bytes_(literal_bytes),
- hash_(hash),
+ hash_field_(hash_field),
is_one_byte_(is_one_byte) {}
AstRawString* next() {
DCHECK(!has_string_);
@@ -114,7 +113,7 @@ class AstRawString final : public ZoneObject {
};
Vector<const byte> literal_bytes_; // Memory owned by Zone.
- uint32_t hash_;
+ uint32_t hash_field_;
bool is_one_byte_;
#ifdef DEBUG
// (Debug-only:) Verify the object life-cylce: Some functions may only be
@@ -203,7 +202,6 @@ class AstValue : public ZoneObject {
if (IsHeapNumber()) return number_;
if (IsSmi()) return smi_;
UNREACHABLE();
- return 0;
}
Smi* AsSmi() const {
@@ -368,21 +366,21 @@ class AstStringConstants final {
string_table_(AstRawString::Compare),
hash_seed_(hash_seed) {
DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
-#define F(name, str) \
- { \
- const char* data = str; \
- Vector<const uint8_t> literal(reinterpret_cast<const uint8_t*>(data), \
- static_cast<int>(strlen(data))); \
- uint32_t hash = StringHasher::HashSequentialString<uint8_t>( \
- literal.start(), literal.length(), hash_seed_); \
- name##_string_ = new (&zone_) AstRawString(true, literal, hash); \
- /* The Handle returned by the factory is located on the roots */ \
- /* array, not on the temporary HandleScope, so this is safe. */ \
- name##_string_->set_string(isolate->factory()->name##_string()); \
- base::HashMap::Entry* entry = \
- string_table_.InsertNew(name##_string_, name##_string_->hash()); \
- DCHECK(entry->value == nullptr); \
- entry->value = reinterpret_cast<void*>(1); \
+#define F(name, str) \
+ { \
+ const char* data = str; \
+ Vector<const uint8_t> literal(reinterpret_cast<const uint8_t*>(data), \
+ static_cast<int>(strlen(data))); \
+ uint32_t hash_field = StringHasher::HashSequentialString<uint8_t>( \
+ literal.start(), literal.length(), hash_seed_); \
+ name##_string_ = new (&zone_) AstRawString(true, literal, hash_field); \
+ /* The Handle returned by the factory is located on the roots */ \
+ /* array, not on the temporary HandleScope, so this is safe. */ \
+ name##_string_->set_string(isolate->factory()->name##_string()); \
+ base::HashMap::Entry* entry = \
+ string_table_.InsertNew(name##_string_, name##_string_->Hash()); \
+ DCHECK_NULL(entry->value); \
+ entry->value = reinterpret_cast<void*>(1); \
}
STRING_CONSTANTS(F)
#undef F
diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc
index b367df7dae..40c93c8963 100644
--- a/deps/v8/src/ast/ast.cc
+++ b/deps/v8/src/ast/ast.cc
@@ -23,7 +23,6 @@
#include "src/property-details.h"
#include "src/property.h"
#include "src/string-stream.h"
-#include "src/type-info.h"
namespace v8 {
namespace internal {
@@ -152,14 +151,12 @@ bool Expression::IsAnonymousFunctionDefinition() const {
AsClassLiteral()->IsAnonymousFunctionDefinition());
}
-void Expression::MarkTail() {
- if (IsConditional()) {
- AsConditional()->MarkTail();
- } else if (IsCall()) {
- AsCall()->MarkTail();
- } else if (IsBinaryOperation()) {
- AsBinaryOperation()->MarkTail();
- }
+bool Expression::IsConciseMethodDefinition() const {
+ return IsFunctionLiteral() && IsConciseMethod(AsFunctionLiteral()->kind());
+}
+
+bool Expression::IsAccessorFunctionDefinition() const {
+ return IsFunctionLiteral() && IsAccessorFunction(AsFunctionLiteral()->kind());
}
bool Statement::IsJump() const {
@@ -193,17 +190,6 @@ VariableProxy::VariableProxy(Variable* var, int start_position)
BindTo(var);
}
-VariableProxy::VariableProxy(const AstRawString* name,
- VariableKind variable_kind, int start_position)
- : Expression(start_position, kVariableProxy),
- raw_name_(name),
- next_unresolved_(nullptr) {
- bit_field_ |= IsThisField::encode(variable_kind == THIS_VARIABLE) |
- IsAssignedField::encode(false) |
- IsResolvedField::encode(false) |
- HoleCheckModeField::encode(HoleCheckMode::kElided);
-}
-
VariableProxy::VariableProxy(const VariableProxy* copy_from)
: Expression(copy_from->position(), kVariableProxy),
next_unresolved_(nullptr) {
@@ -396,10 +382,9 @@ void LiteralProperty::SetStoreDataPropertySlot(FeedbackSlot slot) {
}
bool LiteralProperty::NeedsSetFunctionName() const {
- return is_computed_name_ &&
- (value_->IsAnonymousFunctionDefinition() ||
- (value_->IsFunctionLiteral() &&
- IsConciseMethod(value_->AsFunctionLiteral()->kind())));
+ return is_computed_name_ && (value_->IsAnonymousFunctionDefinition() ||
+ value_->IsConciseMethodDefinition() ||
+ value_->IsAccessorFunctionDefinition());
}
ClassLiteralProperty::ClassLiteralProperty(Expression* key, Expression* value,
@@ -554,10 +539,11 @@ void ObjectLiteral::InitFlagsForPendingNullPrototype(int i) {
}
}
-void ObjectLiteral::InitDepthAndFlags() {
- if (is_initialized()) return;
+int ObjectLiteral::InitDepthAndFlags() {
+ if (is_initialized()) return depth();
bool is_simple = true;
bool has_seen_prototype = false;
+ bool needs_initial_allocation_site = false;
int depth_acc = 1;
uint32_t nof_properties = 0;
uint32_t elements = 0;
@@ -584,26 +570,17 @@ void ObjectLiteral::InitDepthAndFlags() {
}
DCHECK(!property->is_computed_name());
- MaterializedLiteral* m_literal = property->value()->AsMaterializedLiteral();
- if (m_literal != NULL) {
- m_literal->InitDepthAndFlags();
- if (m_literal->depth() >= depth_acc) depth_acc = m_literal->depth() + 1;
+ MaterializedLiteral* literal = property->value()->AsMaterializedLiteral();
+ if (literal != nullptr) {
+ int subliteral_depth = literal->InitDepthAndFlags() + 1;
+ if (subliteral_depth > depth_acc) depth_acc = subliteral_depth;
+ needs_initial_allocation_site |= literal->NeedsInitialAllocationSite();
}
const AstValue* key = property->key()->AsLiteral()->raw_value();
Expression* value = property->value();
bool is_compile_time_value = CompileTimeValue::IsCompileTimeValue(value);
-
- // Ensure objects that may, at any point in time, contain fields with double
- // representation are always treated as nested objects. This is true for
- // computed fields, and smi and double literals.
- // TODO(verwaest): Remove once we can store them inline.
- if (FLAG_track_double_fields &&
- (value->IsNumberLiteral() || !is_compile_time_value)) {
- set_may_store_doubles(true);
- }
-
is_simple = is_simple && is_compile_time_value;
// Keep track of the number of elements in the object literal and
@@ -622,11 +599,13 @@ void ObjectLiteral::InitDepthAndFlags() {
nof_properties++;
}
+ set_depth(depth_acc);
+ set_is_simple(is_simple);
+ set_needs_initial_allocation_site(needs_initial_allocation_site);
+ set_has_elements(elements > 0);
set_fast_elements((max_element_index <= 32) ||
((2 * elements) >= max_element_index));
- set_has_elements(elements > 0);
- set_is_simple(is_simple);
- set_depth(depth_acc);
+ return depth_acc;
}
void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
@@ -699,19 +678,14 @@ bool ObjectLiteral::IsFastCloningSupported() const {
// The FastCloneShallowObject builtin doesn't copy elements, and object
// literals don't support copy-on-write (COW) elements for now.
// TODO(mvstanton): make object literals support COW elements.
- return fast_elements() && has_shallow_properties() &&
+ return fast_elements() && is_shallow() &&
properties_count() <=
ConstructorBuiltins::kMaximumClonedShallowObjectProperties;
}
-ElementsKind ArrayLiteral::constant_elements_kind() const {
- return static_cast<ElementsKind>(constant_elements()->elements_kind());
-}
-
-void ArrayLiteral::InitDepthAndFlags() {
+int ArrayLiteral::InitDepthAndFlags() {
DCHECK_LT(first_spread_index_, 0);
-
- if (is_initialized()) return;
+ if (is_initialized()) return depth();
int constants_length = values()->length();
@@ -722,12 +696,10 @@ void ArrayLiteral::InitDepthAndFlags() {
for (; array_index < constants_length; array_index++) {
Expression* element = values()->at(array_index);
DCHECK(!element->IsSpread());
- MaterializedLiteral* m_literal = element->AsMaterializedLiteral();
- if (m_literal != NULL) {
- m_literal->InitDepthAndFlags();
- if (m_literal->depth() + 1 > depth_acc) {
- depth_acc = m_literal->depth() + 1;
- }
+ MaterializedLiteral* literal = element->AsMaterializedLiteral();
+ if (literal != NULL) {
+ int subliteral_depth = literal->InitDepthAndFlags() + 1;
+ if (subliteral_depth > depth_acc) depth_acc = subliteral_depth;
}
if (!CompileTimeValue::IsCompileTimeValue(element)) {
@@ -735,8 +707,12 @@ void ArrayLiteral::InitDepthAndFlags() {
}
}
- set_is_simple(is_simple);
set_depth(depth_acc);
+ set_is_simple(is_simple);
+ // Array literals always need an initial allocation site to properly track
+ // elements transitions.
+ set_needs_initial_allocation_site(true);
+ return depth_acc;
}
void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
@@ -782,12 +758,12 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
// Simple and shallow arrays can be lazily copied, we transform the
// elements array to a copy-on-write array.
if (is_simple() && depth() == 1 && array_index > 0 &&
- IsFastSmiOrObjectElementsKind(kind)) {
+ IsSmiOrObjectElementsKind(kind)) {
fixed_array->set_map(isolate->heap()->fixed_cow_array_map());
}
Handle<FixedArrayBase> elements = fixed_array;
- if (IsFastDoubleElementsKind(kind)) {
+ if (IsDoubleElementsKind(kind)) {
ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
elements = isolate->factory()->NewFixedDoubleArray(constants_length);
// We are copying from non-fast-double to fast-double.
@@ -832,6 +808,12 @@ void ArrayLiteral::AssignFeedbackSlots(FeedbackVectorSpec* spec,
}
}
+bool MaterializedLiteral::IsSimple() const {
+ if (IsArrayLiteral()) return AsArrayLiteral()->is_simple();
+ if (IsObjectLiteral()) return AsObjectLiteral()->is_simple();
+ DCHECK(IsRegExpLiteral());
+ return false;
+}
Handle<Object> MaterializedLiteral::GetBoilerplateValue(Expression* expression,
Isolate* isolate) {
@@ -844,15 +826,22 @@ Handle<Object> MaterializedLiteral::GetBoilerplateValue(Expression* expression,
return isolate->factory()->uninitialized_value();
}
-void MaterializedLiteral::InitDepthAndFlags() {
+int MaterializedLiteral::InitDepthAndFlags() {
+ if (IsArrayLiteral()) return AsArrayLiteral()->InitDepthAndFlags();
+ if (IsObjectLiteral()) return AsObjectLiteral()->InitDepthAndFlags();
+ DCHECK(IsRegExpLiteral());
+ return 1;
+}
+
+bool MaterializedLiteral::NeedsInitialAllocationSite() {
if (IsArrayLiteral()) {
- return AsArrayLiteral()->InitDepthAndFlags();
+ return AsArrayLiteral()->needs_initial_allocation_site();
}
if (IsObjectLiteral()) {
- return AsObjectLiteral()->InitDepthAndFlags();
+ return AsObjectLiteral()->needs_initial_allocation_site();
}
DCHECK(IsRegExpLiteral());
- DCHECK_LE(1, depth()); // Depth should be initialized.
+ return false;
}
void MaterializedLiteral::BuildConstants(Isolate* isolate) {
@@ -865,26 +854,6 @@ void MaterializedLiteral::BuildConstants(Isolate* isolate) {
DCHECK(IsRegExpLiteral());
}
-
-void UnaryOperation::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
- // TODO(olivf) If this Operation is used in a test context, then the
- // expression has a ToBoolean stub and we want to collect the type
- // information. However the GraphBuilder expects it to be on the instruction
- // corresponding to the TestContext, therefore we have to store it here and
- // not on the operand.
- set_to_boolean_types(oracle->ToBooleanTypes(expression()->test_id()));
-}
-
-
-void BinaryOperation::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
- // TODO(olivf) If this Operation is used in a test context, then the right
- // hand side has a ToBoolean stub and we want to collect the type information.
- // However the GraphBuilder expects it to be on the instruction corresponding
- // to the TestContext, therefore we have to store it here and not on the
- // right hand operand.
- set_to_boolean_types(oracle->ToBooleanTypes(right()->test_id()));
-}
-
void BinaryOperation::AssignFeedbackSlots(FeedbackVectorSpec* spec,
LanguageMode language_mode,
FeedbackSlotCache* cache) {
@@ -1018,35 +987,6 @@ bool CompareOperation::IsLiteralCompareNull(Expression** expr) {
// ----------------------------------------------------------------------------
// Recording of type feedback
-// TODO(rossberg): all RecordTypeFeedback functions should disappear
-// once we use the common type field in the AST consistently.
-
-void Expression::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
- if (IsUnaryOperation()) {
- AsUnaryOperation()->RecordToBooleanTypeFeedback(oracle);
- } else if (IsBinaryOperation()) {
- AsBinaryOperation()->RecordToBooleanTypeFeedback(oracle);
- } else {
- set_to_boolean_types(oracle->ToBooleanTypes(test_id()));
- }
-}
-
-void SmallMapList::AddMapIfMissing(Handle<Map> map, Zone* zone) {
- if (!Map::TryUpdate(map).ToHandle(&map)) return;
- for (int i = 0; i < length(); ++i) {
- if (at(i).is_identical_to(map)) return;
- }
- Add(map, zone);
-}
-
-void SmallMapList::FilterForPossibleTransitions(Map* root_map) {
- for (int i = list_.length() - 1; i >= 0; i--) {
- if (at(i)->FindRootMap() != root_map) {
- list_.RemoveElement(list_.at(i));
- }
- }
-}
-
Handle<Map> SmallMapList::at(int i) const { return Handle<Map>(list_.at(i)); }
SmallMapList* Expression::GetReceiverTypes() {
@@ -1062,7 +1002,6 @@ SmallMapList* Expression::GetReceiverTypes() {
#undef GENERATE_CASE
default:
UNREACHABLE();
- return nullptr;
}
}
@@ -1075,7 +1014,6 @@ KeyedAccessStoreMode Expression::GetStoreMode() const {
#undef GENERATE_CASE
default:
UNREACHABLE();
- return STANDARD_STORE;
}
}
@@ -1088,7 +1026,6 @@ IcCheckType Expression::GetKeyType() const {
#undef GENERATE_CASE
default:
UNREACHABLE();
- return PROPERTY;
}
}
@@ -1102,7 +1039,6 @@ bool Expression::IsMonomorphic() const {
#undef GENERATE_CASE
default:
UNREACHABLE();
- return false;
}
}
@@ -1141,10 +1077,7 @@ Call::CallType Call::GetCallType() const {
CaseClause::CaseClause(Expression* label, ZoneList<Statement*>* statements,
int pos)
- : Expression(pos, kCaseClause),
- label_(label),
- statements_(statements),
- compare_type_(AstType::None()) {}
+ : Expression(pos, kCaseClause), label_(label), statements_(statements) {}
void CaseClause::AssignFeedbackSlots(FeedbackVectorSpec* spec,
LanguageMode language_mode,
@@ -1154,7 +1087,7 @@ void CaseClause::AssignFeedbackSlots(FeedbackVectorSpec* spec,
uint32_t Literal::Hash() {
return raw_value()->IsString()
- ? raw_value()->AsString()->hash()
+ ? raw_value()->AsString()->Hash()
: ComputeLongHash(double_to_uint64(raw_value()->AsNumber()));
}
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index 0fc9af621c..6bce32fdfc 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -5,12 +5,10 @@
#ifndef V8_AST_AST_H_
#define V8_AST_AST_H_
-#include "src/ast/ast-types.h"
#include "src/ast/ast-value-factory.h"
#include "src/ast/modules.h"
#include "src/ast/variables.h"
#include "src/bailout-reason.h"
-#include "src/base/flags.h"
#include "src/factory.h"
#include "src/globals.h"
#include "src/isolate.h"
@@ -91,7 +89,9 @@ namespace internal {
V(Conditional) \
V(VariableProxy) \
V(Literal) \
- V(Suspend) \
+ V(Yield) \
+ V(YieldStar) \
+ V(Await) \
V(Throw) \
V(CallRuntime) \
V(UnaryOperation) \
@@ -121,8 +121,8 @@ class BreakableStatement;
class Expression;
class IterationStatement;
class MaterializedLiteral;
+class ProducedPreParsedScopeData;
class Statement;
-class TypeFeedbackOracle;
#define DEF_FORWARD_DECLARATION(type) class type;
AST_NODE_LIST(DEF_FORWARD_DECLARATION)
@@ -156,18 +156,8 @@ class FeedbackSlotCache {
class AstProperties final BASE_EMBEDDED {
public:
- enum Flag {
- kNoFlags = 0,
- kDontSelfOptimize = 1 << 0,
- kMustUseIgnitionTurbo = 1 << 1
- };
-
- typedef base::Flags<Flag> Flags;
-
explicit AstProperties(Zone* zone) : node_count_(0), spec_(zone) {}
- Flags& flags() { return flags_; }
- Flags flags() const { return flags_; }
int node_count() { return node_count_; }
void add_node_count(int count) { node_count_ += count; }
@@ -175,13 +165,10 @@ class AstProperties final BASE_EMBEDDED {
FeedbackVectorSpec* get_spec() { return &spec_; }
private:
- Flags flags_;
int node_count_;
FeedbackVectorSpec spec_;
};
-DEFINE_OPERATORS_FOR_FLAGS(AstProperties::Flags)
-
class AstNode: public ZoneObject {
public:
@@ -252,10 +239,6 @@ class SmallMapList final {
bool is_empty() const { return list_.is_empty(); }
int length() const { return list_.length(); }
- void AddMapIfMissing(Handle<Map> map, Zone* zone);
-
- void FilterForPossibleTransitions(Map* root_map);
-
void Add(Handle<Map> handle, Zone* zone) {
list_.Add(handle.location(), zone);
}
@@ -287,9 +270,6 @@ class Expression : public AstNode {
kTest
};
- // Mark this expression as being in tail position.
- void MarkTail();
-
// True iff the expression is a valid reference expression.
bool IsValidReferenceExpression() const;
@@ -306,6 +286,12 @@ class Expression : public AstNode {
// a syntactic name.
bool IsAnonymousFunctionDefinition() const;
+ // True iff the expression is a concise method definition.
+ bool IsConciseMethodDefinition() const;
+
+ // True iff the expression is an accessor function definition.
+ bool IsAccessorFunctionDefinition() const;
+
// True iff the expression is a literal represented as a smi.
bool IsSmiLiteral() const;
@@ -325,45 +311,15 @@ class Expression : public AstNode {
// True iff the expression is a valid target for an assignment.
bool IsValidReferenceExpressionOrThis() const;
- // TODO(rossberg): this should move to its own AST node eventually.
- void RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle);
- uint16_t to_boolean_types() const {
- return ToBooleanTypesField::decode(bit_field_);
- }
-
SmallMapList* GetReceiverTypes();
KeyedAccessStoreMode GetStoreMode() const;
IcCheckType GetKeyType() const;
bool IsMonomorphic() const;
- void set_base_id(int id) { base_id_ = id; }
- static int num_ids() { return parent_num_ids() + 2; }
- BailoutId id() const { return BailoutId(local_id(0)); }
- TypeFeedbackId test_id() const { return TypeFeedbackId(local_id(1)); }
-
- private:
- int local_id(int n) const { return base_id() + parent_num_ids() + n; }
-
- int base_id_;
- class ToBooleanTypesField
- : public BitField<uint16_t, AstNode::kNextBitFieldIndex, 9> {};
-
protected:
- Expression(int pos, NodeType type)
- : AstNode(pos, type), base_id_(BailoutId::None().ToInt()) {
- bit_field_ = ToBooleanTypesField::update(bit_field_, 0);
- }
+ Expression(int pos, NodeType type) : AstNode(pos, type) {}
- static int parent_num_ids() { return 0; }
- void set_to_boolean_types(uint16_t types) {
- bit_field_ = ToBooleanTypesField::update(bit_field_, types);
- }
- int base_id() const {
- DCHECK(!BailoutId(base_id_).IsNone());
- return base_id_;
- }
-
- static const uint8_t kNextBitFieldIndex = ToBooleanTypesField::kNext;
+ static const uint8_t kNextBitFieldIndex = AstNode::kNextBitFieldIndex;
};
@@ -378,28 +334,16 @@ class BreakableStatement : public Statement {
// if it is != NULL, guaranteed to contain at least one entry.
ZoneList<const AstRawString*>* labels() const { return labels_; }
- // Code generation
- Label* break_target() { return &break_target_; }
-
// Testers.
bool is_target_for_anonymous() const {
return BreakableTypeField::decode(bit_field_) == TARGET_FOR_ANONYMOUS;
}
- void set_base_id(int id) { base_id_ = id; }
- static int num_ids() { return parent_num_ids() + 2; }
- BailoutId EntryId() const { return BailoutId(local_id(0)); }
- BailoutId ExitId() const { return BailoutId(local_id(1)); }
-
private:
- int local_id(int n) const { return base_id() + parent_num_ids() + n; }
-
BreakableType breakableType() const {
return BreakableTypeField::decode(bit_field_);
}
- int base_id_;
- Label break_target_;
ZoneList<const AstRawString*>* labels_;
class BreakableTypeField
@@ -409,17 +353,10 @@ class BreakableStatement : public Statement {
BreakableStatement(ZoneList<const AstRawString*>* labels,
BreakableType breakable_type, int position, NodeType type)
: Statement(position, type),
- base_id_(BailoutId::None().ToInt()),
labels_(labels) {
DCHECK(labels == NULL || labels->length() > 0);
bit_field_ |= BreakableTypeField::encode(breakable_type);
}
- static int parent_num_ids() { return 0; }
-
- int base_id() const {
- DCHECK(!BailoutId(base_id_).IsNone());
- return base_id_;
- }
static const uint8_t kNextBitFieldIndex = BreakableTypeField::kNext;
};
@@ -432,9 +369,6 @@ class Block final : public BreakableStatement {
return IgnoreCompletionField::decode(bit_field_);
}
- static int num_ids() { return parent_num_ids() + 1; }
- BailoutId DeclsId() const { return BailoutId(local_id(0)); }
-
bool IsJump() const {
return !statements_.is_empty() && statements_.last()->IsJump()
&& labels() == NULL; // Good enough as an approximation...
@@ -453,8 +387,6 @@ class Block final : public BreakableStatement {
scope_(NULL) {
bit_field_ |= IgnoreCompletionField::encode(ignore_completion_value);
}
- static int parent_num_ids() { return BreakableStatement::num_ids(); }
- int local_id(int n) const { return base_id() + parent_num_ids() + n; }
ZoneList<Statement*> statements_;
Scope* scope_;
@@ -479,8 +411,6 @@ class DoExpression final : public Expression {
DCHECK_NOT_NULL(block_);
DCHECK_NOT_NULL(result_);
}
- static int parent_num_ids() { return Expression::num_ids(); }
- int local_id(int n) const { return base_id() + parent_num_ids() + n; }
Block* block_;
VariableProxy* result_;
@@ -548,30 +478,28 @@ class IterationStatement : public BreakableStatement {
first_suspend_id_ = first_suspend_id;
}
- static int num_ids() { return parent_num_ids() + 1; }
- BailoutId OsrEntryId() const { return BailoutId(local_id(0)); }
-
- // Code generation
- Label* continue_target() { return &continue_target_; }
+ void set_osr_id(int id) { osr_id_ = BailoutId(id); }
+ BailoutId OsrEntryId() const {
+ DCHECK(!osr_id_.IsNone());
+ return osr_id_;
+ }
protected:
IterationStatement(ZoneList<const AstRawString*>* labels, int pos,
NodeType type)
: BreakableStatement(labels, TARGET_FOR_ANONYMOUS, pos, type),
+ osr_id_(BailoutId::None()),
body_(NULL),
suspend_count_(0),
first_suspend_id_(0) {}
- static int parent_num_ids() { return BreakableStatement::num_ids(); }
void Initialize(Statement* body) { body_ = body; }
static const uint8_t kNextBitFieldIndex =
BreakableStatement::kNextBitFieldIndex;
private:
- int local_id(int n) const { return base_id() + parent_num_ids() + n; }
-
+ BailoutId osr_id_;
Statement* body_;
- Label continue_target_;
int suspend_count_;
int first_suspend_id_;
};
@@ -587,18 +515,11 @@ class DoWhileStatement final : public IterationStatement {
Expression* cond() const { return cond_; }
void set_cond(Expression* e) { cond_ = e; }
- static int num_ids() { return parent_num_ids() + 2; }
- BailoutId ContinueId() const { return BailoutId(local_id(0)); }
- BailoutId StackCheckId() const { return BackEdgeId(); }
- BailoutId BackEdgeId() const { return BailoutId(local_id(1)); }
-
private:
friend class AstNodeFactory;
DoWhileStatement(ZoneList<const AstRawString*>* labels, int pos)
: IterationStatement(labels, pos, kDoWhileStatement), cond_(NULL) {}
- static int parent_num_ids() { return IterationStatement::num_ids(); }
- int local_id(int n) const { return base_id() + parent_num_ids() + n; }
Expression* cond_;
};
@@ -614,18 +535,11 @@ class WhileStatement final : public IterationStatement {
Expression* cond() const { return cond_; }
void set_cond(Expression* e) { cond_ = e; }
- static int num_ids() { return parent_num_ids() + 1; }
- BailoutId ContinueId() const { return EntryId(); }
- BailoutId StackCheckId() const { return BodyId(); }
- BailoutId BodyId() const { return BailoutId(local_id(0)); }
-
private:
friend class AstNodeFactory;
WhileStatement(ZoneList<const AstRawString*>* labels, int pos)
: IterationStatement(labels, pos, kWhileStatement), cond_(NULL) {}
- static int parent_num_ids() { return IterationStatement::num_ids(); }
- int local_id(int n) const { return base_id() + parent_num_ids() + n; }
Expression* cond_;
};
@@ -633,9 +547,7 @@ class WhileStatement final : public IterationStatement {
class ForStatement final : public IterationStatement {
public:
- void Initialize(Statement* init,
- Expression* cond,
- Statement* next,
+ void Initialize(Statement* init, Expression* cond, Statement* next,
Statement* body) {
IterationStatement::Initialize(body);
init_ = init;
@@ -651,11 +563,6 @@ class ForStatement final : public IterationStatement {
void set_cond(Expression* e) { cond_ = e; }
void set_next(Statement* s) { next_ = s; }
- static int num_ids() { return parent_num_ids() + 2; }
- BailoutId ContinueId() const { return BailoutId(local_id(0)); }
- BailoutId StackCheckId() const { return BodyId(); }
- BailoutId BodyId() const { return BailoutId(local_id(1)); }
-
private:
friend class AstNodeFactory;
@@ -664,8 +571,6 @@ class ForStatement final : public IterationStatement {
init_(NULL),
cond_(NULL),
next_(NULL) {}
- static int parent_num_ids() { return IterationStatement::num_ids(); }
- int local_id(int n) const { return base_id() + parent_num_ids() + n; }
Statement* init_;
Expression* cond_;
@@ -726,16 +631,6 @@ class ForInStatement final : public ForEachStatement {
bit_field_ = ForInTypeField::update(bit_field_, type);
}
- static int num_ids() { return parent_num_ids() + 7; }
- BailoutId BodyId() const { return BailoutId(local_id(0)); }
- BailoutId EnumId() const { return BailoutId(local_id(1)); }
- BailoutId ToObjectId() const { return BailoutId(local_id(2)); }
- BailoutId PrepareId() const { return BailoutId(local_id(3)); }
- BailoutId FilterId() const { return BailoutId(local_id(4)); }
- BailoutId AssignmentId() const { return BailoutId(local_id(5)); }
- BailoutId IncrementId() const { return BailoutId(local_id(6)); }
- BailoutId StackCheckId() const { return BodyId(); }
-
private:
friend class AstNodeFactory;
@@ -746,9 +641,6 @@ class ForInStatement final : public ForEachStatement {
bit_field_ = ForInTypeField::update(bit_field_, SLOW_FOR_IN);
}
- static int parent_num_ids() { return ForEachStatement::num_ids(); }
- int local_id(int n) const { return base_id() + parent_num_ids() + n; }
-
Expression* each_;
Expression* subject_;
FeedbackSlot each_slot_;
@@ -882,15 +774,20 @@ class ReturnStatement final : public JumpStatement {
Type type() const { return TypeField::decode(bit_field_); }
bool is_async_return() const { return type() == kAsyncReturn; }
+ int end_position() const { return end_position_; }
+
private:
friend class AstNodeFactory;
- ReturnStatement(Expression* expression, Type type, int pos)
- : JumpStatement(pos, kReturnStatement), expression_(expression) {
+ ReturnStatement(Expression* expression, Type type, int pos, int end_position)
+ : JumpStatement(pos, kReturnStatement),
+ expression_(expression),
+ end_position_(end_position) {
bit_field_ |= TypeField::encode(type);
}
Expression* expression_;
+ int end_position_;
class TypeField
: public BitField<Type, JumpStatement::kNextBitFieldIndex, 1> {};
@@ -932,16 +829,6 @@ class CaseClause final : public Expression {
Label* body_target() { return &body_target_; }
ZoneList<Statement*>* statements() const { return statements_; }
- static int num_ids() { return parent_num_ids() + 2; }
- BailoutId EntryId() const { return BailoutId(local_id(0)); }
- TypeFeedbackId CompareId() { return TypeFeedbackId(local_id(1)); }
-
- AstType* compare_type() { return compare_type_; }
- void set_compare_type(AstType* type) { compare_type_ = type; }
-
- // CaseClause will have both a slot in the feedback vector and the
- // TypeFeedbackId to record the type information. TypeFeedbackId is used by
- // full codegen and the feedback vector slot is used by interpreter.
void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
FeedbackSlotCache* cache);
@@ -950,15 +837,12 @@ class CaseClause final : public Expression {
private:
friend class AstNodeFactory;
- static int parent_num_ids() { return Expression::num_ids(); }
CaseClause(Expression* label, ZoneList<Statement*>* statements, int pos);
- int local_id(int n) const { return base_id() + parent_num_ids() + n; }
FeedbackSlot feedback_slot_;
Expression* label_;
Label body_target_;
ZoneList<Statement*>* statements_;
- AstType* compare_type_;
};
@@ -1010,31 +894,16 @@ class IfStatement final : public Statement {
&& HasElseStatement() && else_statement()->IsJump();
}
- void set_base_id(int id) { base_id_ = id; }
- static int num_ids() { return parent_num_ids() + 3; }
- BailoutId IfId() const { return BailoutId(local_id(0)); }
- BailoutId ThenId() const { return BailoutId(local_id(1)); }
- BailoutId ElseId() const { return BailoutId(local_id(2)); }
-
private:
friend class AstNodeFactory;
IfStatement(Expression* condition, Statement* then_statement,
Statement* else_statement, int pos)
: Statement(pos, kIfStatement),
- base_id_(BailoutId::None().ToInt()),
condition_(condition),
then_statement_(then_statement),
else_statement_(else_statement) {}
- static int parent_num_ids() { return 0; }
- int base_id() const {
- DCHECK(!BailoutId(base_id_).IsNone());
- return base_id_;
- }
- int local_id(int n) const { return base_id() + parent_num_ids() + n; }
-
- int base_id_;
Expression* condition_;
Statement* then_statement_;
Statement* else_statement_;
@@ -1046,30 +915,9 @@ class TryStatement : public Statement {
Block* try_block() const { return try_block_; }
void set_try_block(Block* b) { try_block_ = b; }
- // Prediction of whether exceptions thrown into the handler for this try block
- // will be caught.
- //
- // This is set in ast-numbering and later compiled into the code's handler
- // table. The runtime uses this information to implement a feature that
- // notifies the debugger when an uncaught exception is thrown, _before_ the
- // exception propagates to the top.
- //
- // Since it's generally undecidable whether an exception will be caught, our
- // prediction is only an approximation.
- HandlerTable::CatchPrediction catch_prediction() const {
- return catch_prediction_;
- }
- void set_catch_prediction(HandlerTable::CatchPrediction prediction) {
- catch_prediction_ = prediction;
- }
-
protected:
TryStatement(Block* try_block, int pos, NodeType type)
- : Statement(pos, type),
- catch_prediction_(HandlerTable::UNCAUGHT),
- try_block_(try_block) {}
-
- HandlerTable::CatchPrediction catch_prediction_;
+ : Statement(pos, type), try_block_(try_block) {}
private:
Block* try_block_;
@@ -1082,18 +930,52 @@ class TryCatchStatement final : public TryStatement {
Block* catch_block() const { return catch_block_; }
void set_catch_block(Block* b) { catch_block_ = b; }
- // The clear_pending_message flag indicates whether or not to clear the
- // isolate's pending exception message before executing the catch_block. In
- // the normal use case, this flag is always on because the message object
- // is not needed anymore when entering the catch block and should not be kept
- // alive.
- // The use case where the flag is off is when the catch block is guaranteed to
- // rethrow the caught exception (using %ReThrow), which reuses the pending
- // message instead of generating a new one.
+ // Prediction of whether exceptions thrown into the handler for this try block
+ // will be caught.
+ //
+ // BytecodeGenerator tracks the state of catch prediction, which can change
+ // with each TryCatchStatement encountered. The tracked catch prediction is
+ // later compiled into the code's handler table. The runtime uses this
+ // information to implement a feature that notifies the debugger when an
+ // uncaught exception is thrown, _before_ the exception propagates to the top.
+ //
+ // If this try/catch statement is meant to rethrow (HandlerTable::UNCAUGHT),
+ // the catch prediction value is set to the same value as the surrounding
+ // catch prediction.
+ //
+ // Since it's generally undecidable whether an exception will be caught, our
+ // prediction is only an approximation.
+ // ---------------------------------------------------------------------------
+ inline HandlerTable::CatchPrediction GetCatchPrediction(
+ HandlerTable::CatchPrediction outer_catch_prediction) const {
+ if (catch_prediction_ == HandlerTable::UNCAUGHT) {
+ return outer_catch_prediction;
+ }
+ return catch_prediction_;
+ }
+
+ // Indicates whether or not code should be generated to clear the pending
+ // exception. The pending exception is cleared for cases where the exception
+ // is not guaranteed to be rethrown, indicated by the value
+ // HandlerTable::UNCAUGHT. If both the current and surrounding catch handler's
+ // are predicted uncaught, the exception is not cleared.
+ //
+ // If this handler is not going to simply rethrow the exception, this method
+ // indicates that the isolate's pending exception message should be cleared
+ // before executing the catch_block.
+ // In the normal use case, this flag is always on because the message object
+ // is not needed anymore when entering the catch block and should not be
+ // kept alive.
+ // The use case where the flag is off is when the catch block is guaranteed
+ // to rethrow the caught exception (using %ReThrow), which reuses the
+ // pending message instead of generating a new one.
// (When the catch block doesn't rethrow but is guaranteed to perform an
- // ordinary throw, not clearing the old message is safe but not very useful.)
- bool clear_pending_message() const {
- return catch_prediction_ != HandlerTable::UNCAUGHT;
+ // ordinary throw, not clearing the old message is safe but not very
+ // useful.)
+ inline bool ShouldClearPendingException(
+ HandlerTable::CatchPrediction outer_catch_prediction) const {
+ return catch_prediction_ != HandlerTable::UNCAUGHT ||
+ outer_catch_prediction != HandlerTable::UNCAUGHT;
}
private:
@@ -1103,12 +985,12 @@ class TryCatchStatement final : public TryStatement {
HandlerTable::CatchPrediction catch_prediction, int pos)
: TryStatement(try_block, pos, kTryCatchStatement),
scope_(scope),
- catch_block_(catch_block) {
- catch_prediction_ = catch_prediction;
- }
+ catch_block_(catch_block),
+ catch_prediction_(catch_prediction) {}
Scope* scope_;
Block* catch_block_;
+ HandlerTable::CatchPrediction catch_prediction_;
};
@@ -1195,32 +1077,18 @@ class Literal final : public Expression {
uint32_t Hash();
static bool Match(void* literal1, void* literal2);
- static int num_ids() { return parent_num_ids() + 1; }
- TypeFeedbackId LiteralFeedbackId() const {
- return TypeFeedbackId(local_id(0));
- }
-
private:
friend class AstNodeFactory;
Literal(const AstValue* value, int position)
: Expression(position, kLiteral), value_(value) {}
- static int parent_num_ids() { return Expression::num_ids(); }
- int local_id(int n) const { return base_id() + parent_num_ids() + n; }
-
const AstValue* value_;
};
// Base class for literals that need space in the type feedback vector.
class MaterializedLiteral : public Expression {
public:
- bool is_initialized() const { return 0 < depth_; }
- int depth() const {
- DCHECK(is_initialized());
- return depth_;
- }
-
void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
FeedbackSlotCache* cache) {
literal_slot_ = spec->AddLiteralSlot();
@@ -1228,40 +1096,27 @@ class MaterializedLiteral : public Expression {
FeedbackSlot literal_slot() const { return literal_slot_; }
+ // A Materializedliteral is simple if the values consist of only
+ // constants and simple object and array literals.
+ bool IsSimple() const;
+
private:
- int depth_ : 31;
FeedbackSlot literal_slot_;
- class IsSimpleField
- : public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
-
protected:
- MaterializedLiteral(int pos, NodeType type)
- : Expression(pos, type), depth_(0) {
- bit_field_ |= IsSimpleField::encode(false);
- }
-
- // A materialized literal is simple if the values consist of only
- // constants and simple object and array literals.
- bool is_simple() const { return IsSimpleField::decode(bit_field_); }
- void set_is_simple(bool is_simple) {
- bit_field_ = IsSimpleField::update(bit_field_, is_simple);
- }
+ MaterializedLiteral(int pos, NodeType type) : Expression(pos, type) {}
friend class CompileTimeValue;
+ friend class ArrayLiteral;
+ friend class ObjectLiteral;
- void set_depth(int depth) {
- DCHECK(!is_initialized());
- depth_ = depth;
- }
+ // Populate the depth field and any flags the literal has, returns the depth.
+ int InitDepthAndFlags();
- // Populate the depth field and any flags the literal has.
- void InitDepthAndFlags();
+ bool NeedsInitialAllocationSite();
// Populate the constant properties/elements fixed array.
void BuildConstants(Isolate* isolate);
- friend class ArrayLiteral;
- friend class ObjectLiteral;
// If the expression is a literal, return the literal value;
// if the expression is a materialized literal and is simple return a
@@ -1269,6 +1124,88 @@ class MaterializedLiteral : public Expression {
// Otherwise, return undefined literal as the placeholder
// in the object literal boilerplate.
Handle<Object> GetBoilerplateValue(Expression* expression, Isolate* isolate);
+};
+
+// Node for capturing a regexp literal.
+class RegExpLiteral final : public MaterializedLiteral {
+ public:
+ Handle<String> pattern() const { return pattern_->string(); }
+ const AstRawString* raw_pattern() const { return pattern_; }
+ int flags() const { return flags_; }
+
+ private:
+ friend class AstNodeFactory;
+
+ RegExpLiteral(const AstRawString* pattern, int flags, int pos)
+ : MaterializedLiteral(pos, kRegExpLiteral),
+ flags_(flags),
+ pattern_(pattern) {}
+
+ int const flags_;
+ const AstRawString* const pattern_;
+};
+
+// Base class for Array and Object literals, providing common code for handling
+// nested subliterals.
+class AggregateLiteral : public MaterializedLiteral {
+ public:
+ enum Flags {
+ kNoFlags = 0,
+ kIsShallow = 1,
+ kDisableMementos = 1 << 1,
+ kNeedsInitialAllocationSite = 1 << 2,
+ };
+
+ bool is_initialized() const { return 0 < depth_; }
+ int depth() const {
+ DCHECK(is_initialized());
+ return depth_;
+ }
+
+ bool is_shallow() const { return depth() == 1; }
+ bool needs_initial_allocation_site() const {
+ return NeedsInitialAllocationSiteField::decode(bit_field_);
+ }
+
+ int ComputeFlags(bool disable_mementos = false) const {
+ int flags = kNoFlags;
+ if (is_shallow()) flags |= kIsShallow;
+ if (disable_mementos) flags |= kDisableMementos;
+ if (needs_initial_allocation_site()) flags |= kNeedsInitialAllocationSite;
+ return flags;
+ }
+
+ // An AggregateLiteral is simple if the values consist of only
+ // constants and simple object and array literals.
+ bool is_simple() const { return IsSimpleField::decode(bit_field_); }
+
+ private:
+ int depth_ : 31;
+ class NeedsInitialAllocationSiteField
+ : public BitField<bool, MaterializedLiteral::kNextBitFieldIndex, 1> {};
+ class IsSimpleField
+ : public BitField<bool, NeedsInitialAllocationSiteField::kNext, 1> {};
+
+ protected:
+ friend class AstNodeFactory;
+ AggregateLiteral(int pos, NodeType type)
+ : MaterializedLiteral(pos, type), depth_(0) {
+ bit_field_ |= NeedsInitialAllocationSiteField::encode(false) |
+ IsSimpleField::encode(false);
+ }
+
+ void set_is_simple(bool is_simple) {
+ bit_field_ = IsSimpleField::update(bit_field_, is_simple);
+ }
+
+ void set_depth(int depth) {
+ DCHECK(!is_initialized());
+ depth_ = depth;
+ }
+
+ void set_needs_initial_allocation_site(bool required) {
+ bit_field_ = NeedsInitialAllocationSiteField::update(bit_field_, required);
+ }
static const uint8_t kNextBitFieldIndex = IsSimpleField::kNext;
};
@@ -1358,7 +1295,7 @@ class ObjectLiteralProperty final : public LiteralProperty {
// An object literal has a boilerplate object that is used
// for minimizing the work when constructing it at runtime.
-class ObjectLiteral final : public MaterializedLiteral {
+class ObjectLiteral final : public AggregateLiteral {
public:
typedef ObjectLiteralProperty Property;
@@ -1368,23 +1305,17 @@ class ObjectLiteral final : public MaterializedLiteral {
}
int properties_count() const { return boilerplate_properties_; }
ZoneList<Property*>* properties() const { return properties_; }
- bool fast_elements() const { return FastElementsField::decode(bit_field_); }
- bool may_store_doubles() const {
- return MayStoreDoublesField::decode(bit_field_);
- }
bool has_elements() const { return HasElementsField::decode(bit_field_); }
- bool has_shallow_properties() const {
- return depth() == 1 && !has_elements() && !may_store_doubles();
- }
bool has_rest_property() const {
return HasRestPropertyField::decode(bit_field_);
}
+ bool fast_elements() const { return FastElementsField::decode(bit_field_); }
bool has_null_prototype() const {
return HasNullPrototypeField::decode(bit_field_);
}
- // Populate the depth field and flags.
- void InitDepthAndFlags();
+ // Populate the depth field and flags, returns the depth.
+ int InitDepthAndFlags();
// Get the constant properties fixed array, populating it if necessary.
Handle<BoilerplateDescription> GetOrBuildConstantProperties(
@@ -1408,44 +1339,33 @@ class ObjectLiteral final : public MaterializedLiteral {
// Assemble bitfield of flags for the CreateObjectLiteral helper.
int ComputeFlags(bool disable_mementos = false) const {
- int flags = fast_elements() ? kFastElements : kNoFlags;
- if (has_shallow_properties()) flags |= kShallowProperties;
- if (disable_mementos) flags |= kDisableMementos;
+ int flags = AggregateLiteral::ComputeFlags(disable_mementos);
+ if (fast_elements()) flags |= kFastElements;
if (has_null_prototype()) flags |= kHasNullPrototype;
return flags;
}
int EncodeLiteralType() {
- int flags = fast_elements() ? kFastElements : kNoFlags;
- if (has_shallow_properties()) flags |= kShallowProperties;
+ int flags = kNoFlags;
+ if (fast_elements()) flags |= kFastElements;
if (has_null_prototype()) flags |= kHasNullPrototype;
return flags;
}
enum Flags {
- kNoFlags = 0,
- kFastElements = 1,
- kShallowProperties = 1 << 1,
- kDisableMementos = 1 << 2,
- kHasNullPrototype = 1 << 3,
+ kFastElements = 1 << 3,
+ kHasNullPrototype = 1 << 4,
};
+ STATIC_ASSERT(
+ static_cast<int>(AggregateLiteral::kNeedsInitialAllocationSite) <
+ static_cast<int>(kFastElements));
struct Accessors: public ZoneObject {
- Accessors() : getter(NULL), setter(NULL), bailout_id(BailoutId::None()) {}
+ Accessors() : getter(NULL), setter(NULL) {}
ObjectLiteralProperty* getter;
ObjectLiteralProperty* setter;
- BailoutId bailout_id;
};
- BailoutId CreateLiteralId() const { return BailoutId(local_id(0)); }
-
- // Return an AST id for a property that is used in simulate instructions.
- BailoutId GetIdForPropertySet(int i) { return BailoutId(local_id(i + 1)); }
-
- // Unlike other AST nodes, this number of bailout IDs allocated for an
- // ObjectLiteral can vary, so num_ids() is not a static method.
- int num_ids() const { return parent_num_ids() + 1 + properties()->length(); }
-
// Object literals need one feedback slot for each non-trivial value, as well
// as some slots for home objects.
void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
@@ -1457,30 +1377,23 @@ class ObjectLiteral final : public MaterializedLiteral {
ObjectLiteral(ZoneList<Property*>* properties,
uint32_t boilerplate_properties, int pos,
bool has_rest_property)
- : MaterializedLiteral(pos, kObjectLiteral),
+ : AggregateLiteral(pos, kObjectLiteral),
boilerplate_properties_(boilerplate_properties),
properties_(properties) {
- bit_field_ |= FastElementsField::encode(false) |
- HasElementsField::encode(false) |
- MayStoreDoublesField::encode(false) |
+ bit_field_ |= HasElementsField::encode(false) |
HasRestPropertyField::encode(has_rest_property) |
+ FastElementsField::encode(false) |
HasNullPrototypeField::encode(false);
}
- static int parent_num_ids() { return MaterializedLiteral::num_ids(); }
- int local_id(int n) const { return base_id() + parent_num_ids() + n; }
-
void InitFlagsForPendingNullPrototype(int i);
- void set_may_store_doubles(bool may_store_doubles) {
- bit_field_ = MayStoreDoublesField::update(bit_field_, may_store_doubles);
+ void set_has_elements(bool has_elements) {
+ bit_field_ = HasElementsField::update(bit_field_, has_elements);
}
void set_fast_elements(bool fast_elements) {
bit_field_ = FastElementsField::update(bit_field_, fast_elements);
}
- void set_has_elements(bool has_elements) {
- bit_field_ = HasElementsField::update(bit_field_, has_elements);
- }
void set_has_null_protoype(bool has_null_prototype) {
bit_field_ = HasNullPrototypeField::update(bit_field_, has_null_prototype);
}
@@ -1489,16 +1402,14 @@ class ObjectLiteral final : public MaterializedLiteral {
Handle<BoilerplateDescription> constant_properties_;
ZoneList<Property*>* properties_;
- class FastElementsField
- : public BitField<bool, MaterializedLiteral::kNextBitFieldIndex, 1> {};
- class HasElementsField : public BitField<bool, FastElementsField::kNext, 1> {
- };
- class MayStoreDoublesField
- : public BitField<bool, HasElementsField::kNext, 1> {};
+ class HasElementsField
+ : public BitField<bool, AggregateLiteral::kNextBitFieldIndex, 1> {};
class HasRestPropertyField
- : public BitField<bool, MayStoreDoublesField::kNext, 1> {};
- class HasNullPrototypeField
+ : public BitField<bool, HasElementsField::kNext, 1> {};
+ class FastElementsField
: public BitField<bool, HasRestPropertyField::kNext, 1> {};
+ class HasNullPrototypeField
+ : public BitField<bool, FastElementsField::kNext, 1> {};
};
@@ -1525,50 +1436,18 @@ class AccessorTable
};
-// Node for capturing a regexp literal.
-class RegExpLiteral final : public MaterializedLiteral {
- public:
- Handle<String> pattern() const { return pattern_->string(); }
- const AstRawString* raw_pattern() const { return pattern_; }
- int flags() const { return flags_; }
-
- private:
- friend class AstNodeFactory;
-
- RegExpLiteral(const AstRawString* pattern, int flags, int pos)
- : MaterializedLiteral(pos, kRegExpLiteral),
- flags_(flags),
- pattern_(pattern) {
- set_depth(1);
- }
-
- int const flags_;
- const AstRawString* const pattern_;
-};
-
-
// An array literal has a literals object that is used
// for minimizing the work when constructing it at runtime.
-class ArrayLiteral final : public MaterializedLiteral {
+class ArrayLiteral final : public AggregateLiteral {
public:
Handle<ConstantElementsPair> constant_elements() const {
return constant_elements_;
}
- ElementsKind constant_elements_kind() const;
ZoneList<Expression*>* values() const { return values_; }
- BailoutId CreateLiteralId() const { return BailoutId(local_id(0)); }
-
- // Return an AST id for an element that is used in simulate instructions.
- BailoutId GetIdForElement(int i) { return BailoutId(local_id(i + 1)); }
-
- // Unlike other AST nodes, this number of bailout IDs allocated for an
- // ArrayLiteral can vary, so num_ids() is not a static method.
- int num_ids() const { return parent_num_ids() + 1 + values()->length(); }
-
- // Populate the depth field and flags.
- void InitDepthAndFlags();
+ // Populate the depth field and flags, returns the depth.
+ int InitDepthAndFlags();
// Get the constant elements fixed array, populating it if necessary.
Handle<ConstantElementsPair> GetOrBuildConstantElements(Isolate* isolate) {
@@ -1586,9 +1465,7 @@ class ArrayLiteral final : public MaterializedLiteral {
// Assemble bitfield of flags for the CreateArrayLiteral helper.
int ComputeFlags(bool disable_mementos = false) const {
- int flags = depth() == 1 ? kShallowElements : kNoFlags;
- if (disable_mementos) flags |= kDisableMementos;
- return flags;
+ return AggregateLiteral::ComputeFlags(disable_mementos);
}
// Provide a mechanism for iterating through values to rewrite spreads.
@@ -1601,12 +1478,6 @@ class ArrayLiteral final : public MaterializedLiteral {
// Rewind an array literal omitting everything from the first spread on.
void RewindSpreads();
- enum Flags {
- kNoFlags = 0,
- kShallowElements = 1,
- kDisableMementos = 1 << 1
- };
-
void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
FeedbackSlotCache* cache);
FeedbackSlot LiteralFeedbackSlot() const { return literal_slot_; }
@@ -1615,13 +1486,10 @@ class ArrayLiteral final : public MaterializedLiteral {
friend class AstNodeFactory;
ArrayLiteral(ZoneList<Expression*>* values, int first_spread_index, int pos)
- : MaterializedLiteral(pos, kArrayLiteral),
+ : AggregateLiteral(pos, kArrayLiteral),
first_spread_index_(first_spread_index),
values_(values) {}
- static int parent_num_ids() { return MaterializedLiteral::num_ids(); }
- int local_id(int n) const { return base_id() + parent_num_ids() + n; }
-
int first_spread_index_;
FeedbackSlot literal_slot_;
Handle<ConstantElementsPair> constant_elements_;
@@ -1694,8 +1562,6 @@ class VariableProxy final : public Expression {
FeedbackSlot VariableFeedbackSlot() { return variable_feedback_slot_; }
- static int num_ids() { return parent_num_ids() + 1; }
- BailoutId BeforeId() const { return BailoutId(local_id(0)); }
void set_next_unresolved(VariableProxy* next) { next_unresolved_ = next; }
VariableProxy* next_unresolved() { return next_unresolved_; }
@@ -1703,12 +1569,19 @@ class VariableProxy final : public Expression {
friend class AstNodeFactory;
VariableProxy(Variable* var, int start_position);
+
VariableProxy(const AstRawString* name, VariableKind variable_kind,
- int start_position);
- explicit VariableProxy(const VariableProxy* copy_from);
+ int start_position)
+ : Expression(start_position, kVariableProxy),
+ raw_name_(name),
+ next_unresolved_(nullptr) {
+ bit_field_ |= IsThisField::encode(variable_kind == THIS_VARIABLE) |
+ IsAssignedField::encode(false) |
+ IsResolvedField::encode(false) |
+ HoleCheckModeField::encode(HoleCheckMode::kElided);
+ }
- static int parent_num_ids() { return Expression::num_ids(); }
- int local_id(int n) const { return base_id() + parent_num_ids() + n; }
+ explicit VariableProxy(const VariableProxy* copy_from);
class IsThisField : public BitField<bool, Expression::kNextBitFieldIndex, 1> {
};
@@ -1748,9 +1621,6 @@ class Property final : public Expression {
void set_obj(Expression* e) { obj_ = e; }
void set_key(Expression* e) { key_ = e; }
- static int num_ids() { return parent_num_ids() + 1; }
- BailoutId LoadId() const { return BailoutId(local_id(0)); }
-
bool IsStringAccess() const {
return IsStringAccessField::decode(bit_field_);
}
@@ -1815,9 +1685,6 @@ class Property final : public Expression {
InlineCacheStateField::encode(UNINITIALIZED);
}
- static int parent_num_ids() { return Expression::num_ids(); }
- int local_id(int n) const { return base_id() + parent_num_ids() + n; }
-
class IsForCallField
: public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
class IsStringAccessField : public BitField<bool, IsForCallField::kNext, 1> {
@@ -1863,20 +1730,11 @@ class Call final : public Expression {
Handle<JSFunction> target() { return target_; }
- Handle<AllocationSite> allocation_site() { return allocation_site_; }
-
void SetKnownGlobalTarget(Handle<JSFunction> target) {
target_ = target;
set_is_uninitialized(false);
}
void set_target(Handle<JSFunction> target) { target_ = target; }
- void set_allocation_site(Handle<AllocationSite> site) {
- allocation_site_ = site;
- }
-
- static int num_ids() { return parent_num_ids() + 2; }
- BailoutId ReturnId() const { return BailoutId(local_id(0)); }
- BailoutId CallId() const { return BailoutId(local_id(1)); }
bool is_uninitialized() const {
return IsUninitializedField::decode(bit_field_);
@@ -1889,12 +1747,6 @@ class Call final : public Expression {
return IsPossiblyEvalField::decode(bit_field_);
}
- TailCallMode tail_call_mode() const {
- return IsTailField::decode(bit_field_) ? TailCallMode::kAllow
- : TailCallMode::kDisallow;
- }
- void MarkTail() { bit_field_ = IsTailField::update(bit_field_, true); }
-
bool only_last_arg_is_spread() {
return !arguments_->is_empty() && arguments_->last()->IsSpread();
}
@@ -1918,11 +1770,6 @@ class Call final : public Expression {
// Helpers to determine how to handle the call.
CallType GetCallType() const;
-#ifdef DEBUG
- // Used to assert that the FullCodeGenerator records the return site.
- bool return_is_recorded_;
-#endif
-
private:
friend class AstNodeFactory;
@@ -1940,19 +1787,15 @@ class Call final : public Expression {
}
}
- static int parent_num_ids() { return Expression::num_ids(); }
- int local_id(int n) const { return base_id() + parent_num_ids() + n; }
-
class IsUninitializedField
: public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
- class IsTailField : public BitField<bool, IsUninitializedField::kNext, 1> {};
- class IsPossiblyEvalField : public BitField<bool, IsTailField::kNext, 1> {};
+ class IsPossiblyEvalField
+ : public BitField<bool, IsUninitializedField::kNext, 1> {};
FeedbackSlot ic_slot_;
Expression* expression_;
ZoneList<Expression*>* arguments_;
Handle<JSFunction> target_;
- Handle<AllocationSite> allocation_site_;
};
@@ -1978,17 +1821,7 @@ class CallNew final : public Expression {
bool IsMonomorphic() const { return IsMonomorphicField::decode(bit_field_); }
Handle<JSFunction> target() const { return target_; }
- Handle<AllocationSite> allocation_site() const {
- return allocation_site_;
- }
-
- static int num_ids() { return parent_num_ids() + 1; }
- static int feedback_slots() { return 1; }
- BailoutId ReturnId() const { return BailoutId(local_id(0)); }
- void set_allocation_site(Handle<AllocationSite> site) {
- allocation_site_ = site;
- }
void set_is_monomorphic(bool monomorphic) {
bit_field_ = IsMonomorphicField::update(bit_field_, monomorphic);
}
@@ -2012,14 +1845,10 @@ class CallNew final : public Expression {
bit_field_ |= IsMonomorphicField::encode(false);
}
- static int parent_num_ids() { return Expression::num_ids(); }
- int local_id(int n) const { return base_id() + parent_num_ids() + n; }
-
FeedbackSlot callnew_feedback_slot_;
Expression* expression_;
ZoneList<Expression*>* arguments_;
Handle<JSFunction> target_;
- Handle<AllocationSite> allocation_site_;
class IsMonomorphicField
: public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
@@ -2048,8 +1877,6 @@ class CallRuntime final : public Expression {
return function_;
}
- static int num_ids() { return parent_num_ids() + 1; }
- BailoutId CallId() { return BailoutId(local_id(0)); }
const char* debug_name();
private:
@@ -2066,9 +1893,6 @@ class CallRuntime final : public Expression {
function_(NULL),
arguments_(arguments) {}
- static int parent_num_ids() { return Expression::num_ids(); }
- int local_id(int n) const { return base_id() + parent_num_ids() + n; }
-
int context_index_;
const Runtime::Function* function_;
ZoneList<Expression*>* arguments_;
@@ -2081,14 +1905,6 @@ class UnaryOperation final : public Expression {
Expression* expression() const { return expression_; }
void set_expression(Expression* e) { expression_ = e; }
- // For unary not (Token::NOT), the AST ids where true and false will
- // actually be materialized, respectively.
- static int num_ids() { return parent_num_ids() + 2; }
- BailoutId MaterializeTrueId() const { return BailoutId(local_id(0)); }
- BailoutId MaterializeFalseId() const { return BailoutId(local_id(1)); }
-
- void RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle);
-
private:
friend class AstNodeFactory;
@@ -2098,9 +1914,6 @@ class UnaryOperation final : public Expression {
DCHECK(Token::IsUnaryOp(op));
}
- static int parent_num_ids() { return Expression::num_ids(); }
- int local_id(int n) const { return base_id() + parent_num_ids() + n; }
-
Expression* expression_;
class OperatorField
@@ -2115,77 +1928,28 @@ class BinaryOperation final : public Expression {
void set_left(Expression* e) { left_ = e; }
Expression* right() const { return right_; }
void set_right(Expression* e) { right_ = e; }
- Handle<AllocationSite> allocation_site() const { return allocation_site_; }
- void set_allocation_site(Handle<AllocationSite> allocation_site) {
- allocation_site_ = allocation_site;
- }
-
- void MarkTail() {
- switch (op()) {
- case Token::COMMA:
- case Token::AND:
- case Token::OR:
- right_->MarkTail();
- default:
- break;
- }
- }
- // The short-circuit logical operations need an AST ID for their
- // right-hand subexpression.
- static int num_ids() { return parent_num_ids() + 2; }
- BailoutId RightId() const { return BailoutId(local_id(0)); }
-
- // BinaryOperation will have both a slot in the feedback vector and the
- // TypeFeedbackId to record the type information. TypeFeedbackId is used
- // by full codegen and the feedback vector slot is used by interpreter.
void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
FeedbackSlotCache* cache);
FeedbackSlot BinaryOperationFeedbackSlot() const { return feedback_slot_; }
- TypeFeedbackId BinaryOperationFeedbackId() const {
- return TypeFeedbackId(local_id(1));
- }
-
// Returns true if one side is a Smi literal, returning the other side's
// sub-expression in |subexpr| and the literal Smi in |literal|.
bool IsSmiLiteralOperation(Expression** subexpr, Smi** literal);
- Maybe<int> fixed_right_arg() const {
- return has_fixed_right_arg_ ? Just(fixed_right_arg_value_) : Nothing<int>();
- }
- void set_fixed_right_arg(Maybe<int> arg) {
- has_fixed_right_arg_ = arg.IsJust();
- if (arg.IsJust()) fixed_right_arg_value_ = arg.FromJust();
- }
-
- void RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle);
-
private:
friend class AstNodeFactory;
BinaryOperation(Token::Value op, Expression* left, Expression* right, int pos)
- : Expression(pos, kBinaryOperation),
- left_(left),
- right_(right),
- has_fixed_right_arg_(false),
- fixed_right_arg_value_(0) {
+ : Expression(pos, kBinaryOperation), left_(left), right_(right) {
bit_field_ |= OperatorField::encode(op);
DCHECK(Token::IsBinaryOp(op));
}
- static int parent_num_ids() { return Expression::num_ids(); }
- int local_id(int n) const { return base_id() + parent_num_ids() + n; }
-
FeedbackSlot feedback_slot_;
Expression* left_;
Expression* right_;
- Handle<AllocationSite> allocation_site_;
- // TODO(rossberg): the fixed arg should probably be represented as a Constant
- // type for the RHS. Currenty it's actually a Maybe<int>
- bool has_fixed_right_arg_;
- int fixed_right_arg_value_;
class OperatorField
: public BitField<Token::Value, Expression::kNextBitFieldIndex, 7> {};
@@ -2211,24 +1975,12 @@ class CountOperation final : public Expression {
KeyedAccessStoreMode GetStoreMode() const {
return StoreModeField::decode(bit_field_);
}
- AstType* type() const { return type_; }
void set_key_type(IcCheckType type) {
bit_field_ = KeyTypeField::update(bit_field_, type);
}
void set_store_mode(KeyedAccessStoreMode mode) {
bit_field_ = StoreModeField::update(bit_field_, mode);
}
- void set_type(AstType* type) { type_ = type; }
-
- static int num_ids() { return parent_num_ids() + 4; }
- BailoutId AssignmentId() const { return BailoutId(local_id(0)); }
- BailoutId ToNumberId() const { return BailoutId(local_id(1)); }
- TypeFeedbackId CountBinOpFeedbackId() const {
- return TypeFeedbackId(local_id(2));
- }
- TypeFeedbackId CountStoreFeedbackId() const {
- return TypeFeedbackId(local_id(3));
- }
// Feedback slot for binary operation is only used by ignition.
FeedbackSlot CountBinaryOpFeedbackSlot() const {
@@ -2243,15 +1995,12 @@ class CountOperation final : public Expression {
friend class AstNodeFactory;
CountOperation(Token::Value op, bool is_prefix, Expression* expr, int pos)
- : Expression(pos, kCountOperation), type_(NULL), expression_(expr) {
+ : Expression(pos, kCountOperation), expression_(expr) {
bit_field_ |=
IsPrefixField::encode(is_prefix) | KeyTypeField::encode(ELEMENT) |
StoreModeField::encode(STANDARD_STORE) | TokenField::encode(op);
}
- static int parent_num_ids() { return Expression::num_ids(); }
- int local_id(int n) const { return base_id() + parent_num_ids() + n; }
-
class IsPrefixField
: public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
class KeyTypeField : public BitField<IcCheckType, IsPrefixField::kNext, 1> {};
@@ -2261,7 +2010,6 @@ class CountOperation final : public Expression {
FeedbackSlot slot_;
FeedbackSlot binary_operation_slot_;
- AstType* type_;
Expression* expression_;
SmallMapList receiver_types_;
};
@@ -2276,17 +2024,6 @@ class CompareOperation final : public Expression {
void set_left(Expression* e) { left_ = e; }
void set_right(Expression* e) { right_ = e; }
- // Type feedback information.
- static int num_ids() { return parent_num_ids() + 1; }
- TypeFeedbackId CompareOperationFeedbackId() const {
- return TypeFeedbackId(local_id(0));
- }
- AstType* combined_type() const { return combined_type_; }
- void set_combined_type(AstType* type) { combined_type_ = type; }
-
- // CompareOperation will have both a slot in the feedback vector and the
- // TypeFeedbackId to record the type information. TypeFeedbackId is used
- // by full codegen and the feedback vector slot is used by interpreter.
void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
FeedbackSlotCache* cache);
@@ -2302,21 +2039,14 @@ class CompareOperation final : public Expression {
CompareOperation(Token::Value op, Expression* left, Expression* right,
int pos)
- : Expression(pos, kCompareOperation),
- left_(left),
- right_(right),
- combined_type_(AstType::None()) {
+ : Expression(pos, kCompareOperation), left_(left), right_(right) {
bit_field_ |= OperatorField::encode(op);
DCHECK(Token::IsCompareOp(op));
}
- static int parent_num_ids() { return Expression::num_ids(); }
- int local_id(int n) const { return base_id() + parent_num_ids() + n; }
-
FeedbackSlot feedback_slot_;
Expression* left_;
Expression* right_;
- AstType* combined_type_;
class OperatorField
: public BitField<Token::Value, Expression::kNextBitFieldIndex, 7> {};
@@ -2330,8 +2060,6 @@ class Spread final : public Expression {
int expression_position() const { return expr_pos_; }
- static int num_ids() { return parent_num_ids(); }
-
private:
friend class AstNodeFactory;
@@ -2340,9 +2068,6 @@ class Spread final : public Expression {
expr_pos_(expr_pos),
expression_(expression) {}
- static int parent_num_ids() { return Expression::num_ids(); }
- int local_id(int n) const { return base_id() + parent_num_ids() + n; }
-
int expr_pos_;
Expression* expression_;
};
@@ -2358,15 +2083,6 @@ class Conditional final : public Expression {
void set_then_expression(Expression* e) { then_expression_ = e; }
void set_else_expression(Expression* e) { else_expression_ = e; }
- void MarkTail() {
- then_expression_->MarkTail();
- else_expression_->MarkTail();
- }
-
- static int num_ids() { return parent_num_ids() + 2; }
- BailoutId ThenId() const { return BailoutId(local_id(0)); }
- BailoutId ElseId() const { return BailoutId(local_id(1)); }
-
private:
friend class AstNodeFactory;
@@ -2377,9 +2093,6 @@ class Conditional final : public Expression {
then_expression_(then_expression),
else_expression_(else_expression) {}
- static int parent_num_ids() { return Expression::num_ids(); }
- int local_id(int n) const { return base_id() + parent_num_ids() + n; }
-
Expression* condition_;
Expression* then_expression_;
Expression* else_expression_;
@@ -2404,11 +2117,7 @@ class Assignment final : public Expression {
// This check relies on the definition order of token in token.h.
bool is_compound() const { return op() > Token::ASSIGN; }
- static int num_ids() { return parent_num_ids() + 2; }
- BailoutId AssignmentId() const { return BailoutId(local_id(0)); }
-
// Type feedback information.
- TypeFeedbackId AssignmentFeedbackId() { return TypeFeedbackId(local_id(1)); }
bool IsUninitialized() const {
return IsUninitializedField::decode(bit_field_);
}
@@ -2431,6 +2140,18 @@ class Assignment final : public Expression {
bit_field_ = StoreModeField::update(bit_field_, mode);
}
+ // The assignment was generated as part of block-scoped sloppy-mode
+ // function hoisting, see
+ // ES#sec-block-level-function-declarations-web-legacy-compatibility-semantics
+ LookupHoistingMode lookup_hoisting_mode() const {
+ return static_cast<LookupHoistingMode>(
+ LookupHoistingModeField::decode(bit_field_));
+ }
+ void set_lookup_hoisting_mode(LookupHoistingMode mode) {
+ bit_field_ =
+ LookupHoistingModeField::update(bit_field_, static_cast<bool>(mode));
+ }
+
void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
FeedbackSlotCache* cache);
FeedbackSlot AssignmentSlot() const { return slot_; }
@@ -2440,9 +2161,6 @@ class Assignment final : public Expression {
Assignment(Token::Value op, Expression* target, Expression* value, int pos);
- static int parent_num_ids() { return Expression::num_ids(); }
- int local_id(int n) const { return base_id() + parent_num_ids() + n; }
-
class IsUninitializedField
: public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
class KeyTypeField
@@ -2450,6 +2168,8 @@ class Assignment final : public Expression {
class StoreModeField
: public BitField<KeyedAccessStoreMode, KeyTypeField::kNext, 3> {};
class TokenField : public BitField<Token::Value, StoreModeField::kNext, 7> {};
+ class LookupHoistingModeField : public BitField<bool, TokenField::kNext, 1> {
+ };
FeedbackSlot slot_;
Expression* target_;
@@ -2487,8 +2207,6 @@ class RewritableExpression final : public Expression {
bit_field_ = IsRewrittenField::update(bit_field_, true);
}
- static int num_ids() { return parent_num_ids(); }
-
private:
friend class AstNodeFactory;
@@ -2499,8 +2217,6 @@ class RewritableExpression final : public Expression {
DCHECK(!expression->IsRewritableExpression());
}
- int local_id(int n) const { return base_id() + parent_num_ids() + n; }
-
Expression* expr_;
class IsRewrittenField
@@ -2516,73 +2232,130 @@ class RewritableExpression final : public Expression {
// Our Yield is different from the JS yield in that it "returns" its argument as
// is, without wrapping it in an iterator result object. Such wrapping, if
// desired, must be done beforehand (see the parser).
-class Suspend final : public Expression {
+class Suspend : public Expression {
public:
- enum OnException { kOnExceptionThrow, kOnExceptionRethrow };
+ // With {kNoControl}, the {Suspend} behaves like yield, except that it never
+ // throws and never causes the current generator to return. This is used to
+ // desugar yield*.
+ enum OnAbruptResume { kOnExceptionThrow, kOnExceptionRethrow, kNoControl };
- Expression* generator_object() const { return generator_object_; }
Expression* expression() const { return expression_; }
- OnException on_exception() const {
- return OnExceptionField::decode(bit_field_);
+ OnAbruptResume on_abrupt_resume() const {
+ return OnAbruptResumeField::decode(bit_field_);
}
bool rethrow_on_exception() const {
- return on_exception() == kOnExceptionRethrow;
+ return on_abrupt_resume() == kOnExceptionRethrow;
}
int suspend_id() const { return suspend_id_; }
- SuspendFlags flags() const { return FlagsField::decode(bit_field_); }
- SuspendFlags suspend_type() const {
- return flags() & SuspendFlags::kSuspendTypeMask;
- }
- SuspendFlags generator_type() const {
- return flags() & SuspendFlags::kGeneratorTypeMask;
- }
- bool is_yield() const { return suspend_type() == SuspendFlags::kYield; }
- bool is_yield_star() const {
- return suspend_type() == SuspendFlags::kYieldStar;
- }
- bool is_await() const { return suspend_type() == SuspendFlags::kAwait; }
- bool is_async_generator() const {
- return generator_type() == SuspendFlags::kAsyncGenerator;
- }
- inline bool IsNonInitialAsyncGeneratorYield() const {
- // Return true if is_async_generator() && !is_await() && yield_id() > 0
- return suspend_id() > 0 && (flags() & SuspendFlags::kAsyncGeneratorAwait) ==
- SuspendFlags::kAsyncGenerator;
- }
- void set_generator_object(Expression* e) { generator_object_ = e; }
void set_expression(Expression* e) { expression_ = e; }
void set_suspend_id(int id) { suspend_id_ = id; }
- void set_suspend_type(SuspendFlags type) {
- DCHECK_EQ(0, static_cast<int>(type & ~SuspendFlags::kSuspendTypeMask));
- bit_field_ = FlagsField::update(bit_field_, type);
- }
+
+ inline bool IsInitialYield() const { return suspend_id_ == 0 && IsYield(); }
private:
friend class AstNodeFactory;
+ friend class Yield;
+ friend class YieldStar;
+ friend class Await;
- Suspend(Expression* generator_object, Expression* expression, int pos,
- OnException on_exception, SuspendFlags flags)
- : Expression(pos, kSuspend),
- suspend_id_(-1),
- generator_object_(generator_object),
- expression_(expression) {
- bit_field_ |=
- OnExceptionField::encode(on_exception) | FlagsField::encode(flags);
+ Suspend(NodeType node_type, Expression* expression, int pos,
+ OnAbruptResume on_abrupt_resume)
+ : Expression(pos, node_type), suspend_id_(-1), expression_(expression) {
+ bit_field_ |= OnAbruptResumeField::encode(on_abrupt_resume);
}
int suspend_id_;
- Expression* generator_object_;
Expression* expression_;
- class OnExceptionField
- : public BitField<OnException, Expression::kNextBitFieldIndex, 1> {};
- class FlagsField
- : public BitField<SuspendFlags, OnExceptionField::kNext,
- static_cast<int>(SuspendFlags::kBitWidth)> {};
+ class OnAbruptResumeField
+ : public BitField<OnAbruptResume, Expression::kNextBitFieldIndex, 2> {};
};
+class Yield final : public Suspend {
+ private:
+ friend class AstNodeFactory;
+ Yield(Expression* expression, int pos, OnAbruptResume on_abrupt_resume)
+ : Suspend(kYield, expression, pos, on_abrupt_resume) {}
+};
+
+class YieldStar final : public Suspend {
+ public:
+ void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
+ FeedbackSlotCache* cache) {
+ load_iterable_iterator_slot_ = spec->AddLoadICSlot();
+ load_iterator_return_slot_ = spec->AddLoadICSlot();
+ load_iterator_next_slot_ = spec->AddLoadICSlot();
+ load_iterator_throw_slot_ = spec->AddLoadICSlot();
+ load_output_done_slot_ = spec->AddLoadICSlot();
+ load_output_value_slot_ = spec->AddLoadICSlot();
+ call_iterable_iterator_slot_ = spec->AddCallICSlot();
+ call_iterator_return_slot1_ = spec->AddCallICSlot();
+ call_iterator_return_slot2_ = spec->AddCallICSlot();
+ call_iterator_next_slot_ = spec->AddCallICSlot();
+ call_iterator_throw_slot_ = spec->AddCallICSlot();
+ }
+
+ FeedbackSlot load_iterable_iterator_slot() const {
+ return load_iterable_iterator_slot_;
+ }
+ FeedbackSlot load_iterator_return_slot() const {
+ return load_iterator_return_slot_;
+ }
+ FeedbackSlot load_iterator_next_slot() const {
+ return load_iterator_next_slot_;
+ }
+ FeedbackSlot load_iterator_throw_slot() const {
+ return load_iterator_throw_slot_;
+ }
+ FeedbackSlot load_output_done_slot() const { return load_output_done_slot_; }
+ FeedbackSlot load_output_value_slot() const {
+ return load_output_value_slot_;
+ }
+ FeedbackSlot call_iterable_iterator_slot() const {
+ return call_iterable_iterator_slot_;
+ }
+ FeedbackSlot call_iterator_return_slot1() const {
+ return call_iterator_return_slot1_;
+ }
+ FeedbackSlot call_iterator_return_slot2() const {
+ return call_iterator_return_slot2_;
+ }
+ FeedbackSlot call_iterator_next_slot() const {
+ return call_iterator_next_slot_;
+ }
+ FeedbackSlot call_iterator_throw_slot() const {
+ return call_iterator_throw_slot_;
+ }
+
+ private:
+ friend class AstNodeFactory;
+
+ YieldStar(Expression* expression, int pos)
+ : Suspend(kYieldStar, expression, pos,
+ Suspend::OnAbruptResume::kNoControl) {}
+
+ FeedbackSlot load_iterable_iterator_slot_;
+ FeedbackSlot load_iterator_return_slot_;
+ FeedbackSlot load_iterator_next_slot_;
+ FeedbackSlot load_iterator_throw_slot_;
+ FeedbackSlot load_output_done_slot_;
+ FeedbackSlot load_output_value_slot_;
+ FeedbackSlot call_iterable_iterator_slot_;
+ FeedbackSlot call_iterator_return_slot1_;
+ FeedbackSlot call_iterator_return_slot2_;
+ FeedbackSlot call_iterator_next_slot_;
+ FeedbackSlot call_iterator_throw_slot_;
+};
+
+class Await final : public Suspend {
+ private:
+ friend class AstNodeFactory;
+
+ Await(Expression* expression, int pos)
+ : Suspend(kAwait, expression, pos, Suspend::kOnExceptionRethrow) {}
+};
class Throw final : public Expression {
public:
@@ -2614,7 +2387,15 @@ class FunctionLiteral final : public Expression {
enum EagerCompileHint { kShouldEagerCompile, kShouldLazyCompile };
- Handle<String> name() const { return raw_name_->string(); }
+ // Empty handle means that the function does not have a shared name (i.e.
+ // the name will be set dynamically after creation of the function closure).
+ MaybeHandle<String> name() const {
+ return raw_name_ ? raw_name_->string() : MaybeHandle<String>();
+ }
+ Handle<String> name(Isolate* isolate) const {
+ return raw_name_ ? raw_name_->string() : isolate->factory()->empty_string();
+ }
+ bool has_shared_name() const { return raw_name_ != nullptr; }
const AstConsString* raw_name() const { return raw_name_; }
void set_raw_name(const AstConsString* name) { raw_name_ = name; }
DeclarationScope* scope() const { return scope_; }
@@ -2623,7 +2404,6 @@ class FunctionLiteral final : public Expression {
int function_token_position() const { return function_token_position_; }
int start_position() const;
int end_position() const;
- int SourceSize() const { return end_position() - start_position(); }
bool is_declaration() const { return function_type() == kDeclaration; }
bool is_named_expression() const {
return function_type() == kNamedExpression;
@@ -2676,7 +2456,6 @@ class FunctionLiteral final : public Expression {
return raw_inferred_name_->string();
}
UNREACHABLE();
- return Handle<String>();
}
// Only one of {set_inferred_name, set_raw_inferred_name} should be called.
@@ -2725,14 +2504,24 @@ class FunctionLiteral final : public Expression {
}
FunctionKind kind() const;
- int ast_node_count() { return ast_properties_.node_count(); }
- AstProperties::Flags flags() const { return ast_properties_.flags(); }
void set_ast_properties(AstProperties* ast_properties) {
ast_properties_ = *ast_properties;
}
+ int ast_node_count() { return ast_properties_.node_count(); }
const FeedbackVectorSpec* feedback_vector_spec() const {
return ast_properties_.get_spec();
}
+
+ bool must_use_ignition() { return MustUseIgnitionField::decode(bit_field_); }
+ void set_must_use_ignition() {
+ bit_field_ = MustUseIgnitionField::update(bit_field_, true);
+ }
+
+ bool dont_self_optimize() { return DontSelfOptimize::decode(bit_field_); }
+ void set_dont_self_optimize() {
+ bit_field_ = DontSelfOptimize::update(bit_field_, true);
+ }
+
bool dont_optimize() { return dont_optimize_reason() != kNoReason; }
BailoutReason dont_optimize_reason() {
return DontOptimizeReasonField::decode(bit_field_);
@@ -2757,19 +2546,23 @@ class FunctionLiteral final : public Expression {
function_literal_id_ = function_literal_id;
}
+ ProducedPreParsedScopeData* produced_preparsed_scope_data() const {
+ return produced_preparsed_scope_data_;
+ }
+
void ReplaceBodyAndScope(FunctionLiteral* other);
private:
friend class AstNodeFactory;
- FunctionLiteral(Zone* zone, const AstRawString* name,
- AstValueFactory* ast_value_factory, DeclarationScope* scope,
- ZoneList<Statement*>* body, int expected_property_count,
- int parameter_count, int function_length,
- FunctionType function_type,
- ParameterFlag has_duplicate_parameters,
- EagerCompileHint eager_compile_hint, int position,
- bool has_braces, int function_literal_id)
+ FunctionLiteral(
+ Zone* zone, const AstRawString* name, AstValueFactory* ast_value_factory,
+ DeclarationScope* scope, ZoneList<Statement*>* body,
+ int expected_property_count, int parameter_count, int function_length,
+ FunctionType function_type, ParameterFlag has_duplicate_parameters,
+ EagerCompileHint eager_compile_hint, int position, bool has_braces,
+ int function_literal_id,
+ ProducedPreParsedScopeData* produced_preparsed_scope_data = nullptr)
: Expression(position, kFunctionLiteral),
expected_property_count_(expected_property_count),
parameter_count_(parameter_count),
@@ -2777,12 +2570,13 @@ class FunctionLiteral final : public Expression {
function_token_position_(kNoSourcePosition),
suspend_count_(0),
has_braces_(has_braces),
- raw_name_(ast_value_factory->NewConsString(name)),
+ raw_name_(name ? ast_value_factory->NewConsString(name) : nullptr),
scope_(scope),
body_(body),
raw_inferred_name_(ast_value_factory->empty_cons_string()),
ast_properties_(zone),
- function_literal_id_(function_literal_id) {
+ function_literal_id_(function_literal_id),
+ produced_preparsed_scope_data_(produced_preparsed_scope_data) {
bit_field_ |= FunctionTypeBits::encode(function_type) |
Pretenure::encode(false) |
HasDuplicateParameters::encode(has_duplicate_parameters ==
@@ -2799,9 +2593,13 @@ class FunctionLiteral final : public Expression {
class HasDuplicateParameters : public BitField<bool, Pretenure::kNext, 1> {};
class ShouldNotBeUsedOnceHintField
: public BitField<bool, HasDuplicateParameters::kNext, 1> {};
+ class MustUseIgnitionField
+ : public BitField<bool, ShouldNotBeUsedOnceHintField::kNext, 1> {};
+ // TODO(6409): Remove when Full-Codegen dies.
+ class DontSelfOptimize
+ : public BitField<bool, MustUseIgnitionField::kNext, 1> {};
class DontOptimizeReasonField
- : public BitField<BailoutReason, ShouldNotBeUsedOnceHintField::kNext, 8> {
- };
+ : public BitField<BailoutReason, DontSelfOptimize::kNext, 8> {};
int expected_property_count_;
int parameter_count_;
@@ -2818,6 +2616,7 @@ class FunctionLiteral final : public Expression {
AstProperties ast_properties_;
int function_literal_id_;
FeedbackSlot literal_feedback_slot_;
+ ProducedPreParsedScopeData* produced_preparsed_scope_data_;
};
// Property is used for passing information
@@ -3042,8 +2841,6 @@ class GetIterator final : public Expression {
Expression* iterable() const { return iterable_; }
void set_iterable(Expression* iterable) { iterable_ = iterable; }
- static int num_ids() { return parent_num_ids(); }
-
void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
FeedbackSlotCache* cache) {
iterator_property_feedback_slot_ = spec->AddLoadICSlot();
@@ -3070,14 +2867,35 @@ class GetIterator final : public Expression {
return async_iterator_call_feedback_slot_;
}
+ Expression* iterable_for_call_printer() const {
+ return destructured_iterable_ != nullptr ? destructured_iterable_
+ : iterable_;
+ }
+
private:
friend class AstNodeFactory;
- explicit GetIterator(Expression* iterable, IteratorType hint, int pos)
- : Expression(pos, kGetIterator), hint_(hint), iterable_(iterable) {}
+ GetIterator(Expression* iterable, Expression* destructured_iterable,
+ IteratorType hint, int pos)
+ : Expression(pos, kGetIterator),
+ hint_(hint),
+ iterable_(iterable),
+ destructured_iterable_(destructured_iterable) {}
+
+ GetIterator(Expression* iterable, IteratorType hint, int pos)
+ : Expression(pos, kGetIterator),
+ hint_(hint),
+ iterable_(iterable),
+ destructured_iterable_(nullptr) {}
IteratorType hint_;
Expression* iterable_;
+
+ // iterable_ is the variable proxy, while destructured_iterable_ points to
+ // the raw value stored in the variable proxy. This is only used for
+ // pretty printing error messages.
+ Expression* destructured_iterable_;
+
FeedbackSlot iterator_property_feedback_slot_;
FeedbackSlot iterator_call_feedback_slot_;
FeedbackSlot async_iterator_property_feedback_slot_;
@@ -3289,7 +3107,6 @@ class AstNodeFactory final BASE_EMBEDDED {
}
}
UNREACHABLE();
- return NULL;
}
ForOfStatement* NewForOfStatement(ZoneList<const AstRawString*>* labels,
@@ -3309,14 +3126,16 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (zone_) BreakStatement(target, pos);
}
- ReturnStatement* NewReturnStatement(Expression* expression, int pos) {
- return new (zone_)
- ReturnStatement(expression, ReturnStatement::kNormal, pos);
+ ReturnStatement* NewReturnStatement(Expression* expression, int pos,
+ int end_position = kNoSourcePosition) {
+ return new (zone_) ReturnStatement(expression, ReturnStatement::kNormal,
+ pos, end_position);
}
- ReturnStatement* NewAsyncReturnStatement(Expression* expression, int pos) {
- return new (zone_)
- ReturnStatement(expression, ReturnStatement::kAsyncReturn, pos);
+ ReturnStatement* NewAsyncReturnStatement(
+ Expression* expression, int pos, int end_position = kNoSourcePosition) {
+ return new (zone_) ReturnStatement(
+ expression, ReturnStatement::kAsyncReturn, pos, end_position);
}
WithStatement* NewWithStatement(Scope* scope,
@@ -3326,10 +3145,8 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (zone_) WithStatement(scope, expression, statement, pos);
}
- IfStatement* NewIfStatement(Expression* condition,
- Statement* then_statement,
- Statement* else_statement,
- int pos) {
+ IfStatement* NewIfStatement(Expression* condition, Statement* then_statement,
+ Statement* else_statement, int pos) {
return new (zone_)
IfStatement(condition, then_statement, else_statement, pos);
}
@@ -3382,8 +3199,8 @@ class AstNodeFactory final BASE_EMBEDDED {
SloppyBlockFunctionStatement(NewEmptyStatement(kNoSourcePosition));
}
- CaseClause* NewCaseClause(
- Expression* label, ZoneList<Statement*>* statements, int pos) {
+ CaseClause* NewCaseClause(Expression* label, ZoneList<Statement*>* statements,
+ int pos) {
return new (zone_) CaseClause(label, statements, pos);
}
@@ -3473,6 +3290,10 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (zone_) VariableProxy(proxy);
}
+ Variable* CopyVariable(Variable* variable) {
+ return new (zone_) Variable(variable);
+ }
+
Property* NewProperty(Expression* obj, Expression* key, int pos) {
return new (zone_) Property(obj, key, pos);
}
@@ -3565,12 +3386,20 @@ class AstNodeFactory final BASE_EMBEDDED {
return assign;
}
- Suspend* NewSuspend(Expression* generator_object, Expression* expression,
- int pos, Suspend::OnException on_exception,
- SuspendFlags flags) {
+ Suspend* NewYield(Expression* expression, int pos,
+ Suspend::OnAbruptResume on_abrupt_resume) {
if (!expression) expression = NewUndefinedLiteral(pos);
- return new (zone_)
- Suspend(generator_object, expression, pos, on_exception, flags);
+ return new (zone_) Yield(expression, pos, on_abrupt_resume);
+ }
+
+ YieldStar* NewYieldStar(Expression* expression, int pos) {
+ DCHECK_NOT_NULL(expression);
+ return new (zone_) YieldStar(expression, pos);
+ }
+
+ Await* NewAwait(Expression* expression, int pos) {
+ if (!expression) expression = NewUndefinedLiteral(pos);
+ return new (zone_) Await(expression, pos);
}
Throw* NewThrow(Expression* exception, int pos) {
@@ -3584,12 +3413,13 @@ class AstNodeFactory final BASE_EMBEDDED {
FunctionLiteral::ParameterFlag has_duplicate_parameters,
FunctionLiteral::FunctionType function_type,
FunctionLiteral::EagerCompileHint eager_compile_hint, int position,
- bool has_braces, int function_literal_id) {
+ bool has_braces, int function_literal_id,
+ ProducedPreParsedScopeData* produced_preparsed_scope_data = nullptr) {
return new (zone_) FunctionLiteral(
zone_, name, ast_value_factory_, scope, body, expected_property_count,
parameter_count, function_length, function_type,
has_duplicate_parameters, eager_compile_hint, position, has_braces,
- function_literal_id);
+ function_literal_id, produced_preparsed_scope_data);
}
// Creates a FunctionLiteral representing a top-level script, the
@@ -3662,6 +3492,12 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (zone_) EmptyParentheses(pos);
}
+ GetIterator* NewGetIterator(Expression* iterable,
+ Expression* destructured_iterable,
+ IteratorType hint, int pos) {
+ return new (zone_) GetIterator(iterable, destructured_iterable, hint, pos);
+ }
+
GetIterator* NewGetIterator(Expression* iterable, IteratorType hint,
int pos) {
return new (zone_) GetIterator(iterable, hint, pos);
@@ -3723,7 +3559,6 @@ class AstNodeFactory final BASE_EMBEDDED {
AST_NODE_LIST(DECLARE_NODE_FUNCTIONS)
#undef DECLARE_NODE_FUNCTIONS
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ast/compile-time-value.cc b/deps/v8/src/ast/compile-time-value.cc
index b86343d059..4345e774ee 100644
--- a/deps/v8/src/ast/compile-time-value.cc
+++ b/deps/v8/src/ast/compile-time-value.cc
@@ -15,8 +15,9 @@ namespace internal {
bool CompileTimeValue::IsCompileTimeValue(Expression* expression) {
if (expression->IsLiteral()) return true;
- MaterializedLiteral* lit = expression->AsMaterializedLiteral();
- return lit != NULL && lit->is_simple();
+ MaterializedLiteral* literal = expression->AsMaterializedLiteral();
+ if (literal == nullptr) return false;
+ return literal->IsSimple();
}
Handle<FixedArray> CompileTimeValue::GetValue(Isolate* isolate,
@@ -33,7 +34,7 @@ Handle<FixedArray> CompileTimeValue::GetValue(Isolate* isolate,
result->set(kElementsSlot, *object_literal->constant_properties());
} else {
ArrayLiteral* array_literal = expression->AsArrayLiteral();
- DCHECK(array_literal != NULL && array_literal->is_simple());
+ DCHECK(array_literal->is_simple());
result->set(kLiteralTypeSlot, Smi::FromInt(kArrayLiteralFlag));
result->set(kElementsSlot, *array_literal->constant_elements());
}
@@ -41,7 +42,7 @@ Handle<FixedArray> CompileTimeValue::GetValue(Isolate* isolate,
}
int CompileTimeValue::GetLiteralTypeFlags(Handle<FixedArray> value) {
- return Smi::cast(value->get(kLiteralTypeSlot))->value();
+ return Smi::ToInt(value->get(kLiteralTypeSlot));
}
Handle<HeapObject> CompileTimeValue::GetElements(Handle<FixedArray> value) {
diff --git a/deps/v8/src/ast/context-slot-cache.cc b/deps/v8/src/ast/context-slot-cache.cc
index 45482181ab..b523330502 100644
--- a/deps/v8/src/ast/context-slot-cache.cc
+++ b/deps/v8/src/ast/context-slot-cache.cc
@@ -30,8 +30,9 @@ int ContextSlotCache::Lookup(Object* data, String* name, VariableMode* mode,
InitializationFlag* init_flag,
MaybeAssignedFlag* maybe_assigned_flag) {
int index = Hash(data, name);
+ DCHECK(name->IsInternalizedString());
Key& key = keys_[index];
- if ((key.data == data) && key.name->Equals(name)) {
+ if (key.data == data && key.name == name) {
Value result(values_[index]);
if (mode != nullptr) *mode = result.mode();
if (init_flag != nullptr) *init_flag = result.initialization_flag();
@@ -46,23 +47,18 @@ void ContextSlotCache::Update(Handle<Object> data, Handle<String> name,
VariableMode mode, InitializationFlag init_flag,
MaybeAssignedFlag maybe_assigned_flag,
int slot_index) {
- DisallowHeapAllocation no_gc;
- Handle<String> internalized_name;
- DCHECK(slot_index > kNotFound);
- if (StringTable::InternalizeStringIfExists(name->GetIsolate(), name)
- .ToHandle(&internalized_name)) {
- int index = Hash(*data, *internalized_name);
- Key& key = keys_[index];
- key.data = *data;
- key.name = *internalized_name;
- // Please note value only takes a uint as index.
- values_[index] =
- Value(mode, init_flag, maybe_assigned_flag, slot_index - kNotFound)
- .raw();
+ DCHECK(name->IsInternalizedString());
+ DCHECK_LT(kNotFound, slot_index);
+ int index = Hash(*data, *name);
+ Key& key = keys_[index];
+ key.data = *data;
+ key.name = *name;
+ // Please note value only takes a uint as index.
+ values_[index] =
+ Value(mode, init_flag, maybe_assigned_flag, slot_index - kNotFound).raw();
#ifdef DEBUG
- ValidateEntry(data, name, mode, init_flag, maybe_assigned_flag, slot_index);
+ ValidateEntry(data, name, mode, init_flag, maybe_assigned_flag, slot_index);
#endif
- }
}
void ContextSlotCache::Clear() {
@@ -76,20 +72,16 @@ void ContextSlotCache::ValidateEntry(Handle<Object> data, Handle<String> name,
InitializationFlag init_flag,
MaybeAssignedFlag maybe_assigned_flag,
int slot_index) {
- DisallowHeapAllocation no_gc;
- Handle<String> internalized_name;
- if (StringTable::InternalizeStringIfExists(name->GetIsolate(), name)
- .ToHandle(&internalized_name)) {
- int index = Hash(*data, *name);
- Key& key = keys_[index];
- DCHECK(key.data == *data);
- DCHECK(key.name->Equals(*name));
- Value result(values_[index]);
- DCHECK(result.mode() == mode);
- DCHECK(result.initialization_flag() == init_flag);
- DCHECK(result.maybe_assigned_flag() == maybe_assigned_flag);
- DCHECK(result.index() + kNotFound == slot_index);
- }
+ DCHECK(name->IsInternalizedString());
+ int index = Hash(*data, *name);
+ Key& key = keys_[index];
+ DCHECK_EQ(key.data, *data);
+ DCHECK_EQ(key.name, *name);
+ Value result(values_[index]);
+ DCHECK_EQ(result.mode(), mode);
+ DCHECK_EQ(result.initialization_flag(), init_flag);
+ DCHECK_EQ(result.maybe_assigned_flag(), maybe_assigned_flag);
+ DCHECK_EQ(result.index() + kNotFound, slot_index);
}
#endif // DEBUG
diff --git a/deps/v8/src/ast/modules.cc b/deps/v8/src/ast/modules.cc
index 9d3a23535e..6b9932b191 100644
--- a/deps/v8/src/ast/modules.cc
+++ b/deps/v8/src/ast/modules.cc
@@ -12,28 +12,33 @@
namespace v8 {
namespace internal {
-void ModuleDescriptor::AddImport(
- const AstRawString* import_name, const AstRawString* local_name,
- const AstRawString* module_request, Scanner::Location loc, Zone* zone) {
+void ModuleDescriptor::AddImport(const AstRawString* import_name,
+ const AstRawString* local_name,
+ const AstRawString* module_request,
+ const Scanner::Location loc,
+ const Scanner::Location specifier_loc,
+ Zone* zone) {
Entry* entry = new (zone) Entry(loc);
entry->local_name = local_name;
entry->import_name = import_name;
- entry->module_request = AddModuleRequest(module_request);
+ entry->module_request = AddModuleRequest(module_request, specifier_loc);
AddRegularImport(entry);
}
-
-void ModuleDescriptor::AddStarImport(
- const AstRawString* local_name, const AstRawString* module_request,
- Scanner::Location loc, Zone* zone) {
+void ModuleDescriptor::AddStarImport(const AstRawString* local_name,
+ const AstRawString* module_request,
+ const Scanner::Location loc,
+ const Scanner::Location specifier_loc,
+ Zone* zone) {
Entry* entry = new (zone) Entry(loc);
entry->local_name = local_name;
- entry->module_request = AddModuleRequest(module_request);
+ entry->module_request = AddModuleRequest(module_request, specifier_loc);
AddNamespaceImport(entry, zone);
}
-void ModuleDescriptor::AddEmptyImport(const AstRawString* module_request) {
- AddModuleRequest(module_request);
+void ModuleDescriptor::AddEmptyImport(const AstRawString* module_request,
+ const Scanner::Location specifier_loc) {
+ AddModuleRequest(module_request, specifier_loc);
}
@@ -46,24 +51,27 @@ void ModuleDescriptor::AddExport(
AddRegularExport(entry);
}
-
-void ModuleDescriptor::AddExport(
- const AstRawString* import_name, const AstRawString* export_name,
- const AstRawString* module_request, Scanner::Location loc, Zone* zone) {
+void ModuleDescriptor::AddExport(const AstRawString* import_name,
+ const AstRawString* export_name,
+ const AstRawString* module_request,
+ const Scanner::Location loc,
+ const Scanner::Location specifier_loc,
+ Zone* zone) {
DCHECK_NOT_NULL(import_name);
DCHECK_NOT_NULL(export_name);
Entry* entry = new (zone) Entry(loc);
entry->export_name = export_name;
entry->import_name = import_name;
- entry->module_request = AddModuleRequest(module_request);
+ entry->module_request = AddModuleRequest(module_request, specifier_loc);
AddSpecialExport(entry, zone);
}
-
-void ModuleDescriptor::AddStarExport(
- const AstRawString* module_request, Scanner::Location loc, Zone* zone) {
+void ModuleDescriptor::AddStarExport(const AstRawString* module_request,
+ const Scanner::Location loc,
+ const Scanner::Location specifier_loc,
+ Zone* zone) {
Entry* entry = new (zone) Entry(loc);
- entry->module_request = AddModuleRequest(module_request);
+ entry->module_request = AddModuleRequest(module_request, specifier_loc);
AddSpecialExport(entry, zone);
}
diff --git a/deps/v8/src/ast/modules.h b/deps/v8/src/ast/modules.h
index 1eb6f44796..d44bb46c75 100644
--- a/deps/v8/src/ast/modules.h
+++ b/deps/v8/src/ast/modules.h
@@ -32,20 +32,23 @@ class ModuleDescriptor : public ZoneObject {
// import x from "foo.js";
// import {x} from "foo.js";
// import {x as y} from "foo.js";
- void AddImport(
- const AstRawString* import_name, const AstRawString* local_name,
- const AstRawString* module_request, const Scanner::Location loc,
- Zone* zone);
+ void AddImport(const AstRawString* import_name,
+ const AstRawString* local_name,
+ const AstRawString* module_request,
+ const Scanner::Location loc,
+ const Scanner::Location specifier_loc, Zone* zone);
// import * as x from "foo.js";
- void AddStarImport(
- const AstRawString* local_name, const AstRawString* module_request,
- const Scanner::Location loc, Zone* zone);
+ void AddStarImport(const AstRawString* local_name,
+ const AstRawString* module_request,
+ const Scanner::Location loc,
+ const Scanner::Location specifier_loc, Zone* zone);
// import "foo.js";
// import {} from "foo.js";
// export {} from "foo.js"; (sic!)
- void AddEmptyImport(const AstRawString* module_request);
+ void AddEmptyImport(const AstRawString* module_request,
+ const Scanner::Location specifier_loc);
// export {x};
// export {x as y};
@@ -58,15 +61,16 @@ class ModuleDescriptor : public ZoneObject {
// export {x} from "foo.js";
// export {x as y} from "foo.js";
- void AddExport(
- const AstRawString* export_name, const AstRawString* import_name,
- const AstRawString* module_request, const Scanner::Location loc,
- Zone* zone);
+ void AddExport(const AstRawString* export_name,
+ const AstRawString* import_name,
+ const AstRawString* module_request,
+ const Scanner::Location loc,
+ const Scanner::Location specifier_loc, Zone* zone);
// export * from "foo.js";
- void AddStarExport(
- const AstRawString* module_request, const Scanner::Location loc,
- Zone* zone);
+ void AddStarExport(const AstRawString* module_request,
+ const Scanner::Location loc,
+ const Scanner::Location specifier_loc, Zone* zone);
// Check if module is well-formed and report error if not.
// Also canonicalize indirect exports.
@@ -114,8 +118,14 @@ class ModuleDescriptor : public ZoneObject {
enum CellIndexKind { kInvalid, kExport, kImport };
static CellIndexKind GetCellIndexKind(int cell_index);
+ struct ModuleRequest {
+ int index;
+ int position;
+ ModuleRequest(int index, int position) : index(index), position(position) {}
+ };
+
// Module requests.
- const ZoneMap<const AstRawString*, int>& module_requests() const {
+ const ZoneMap<const AstRawString*, ModuleRequest>& module_requests() const {
return module_requests_;
}
@@ -179,7 +189,7 @@ class ModuleDescriptor : public ZoneObject {
private:
// TODO(neis): Use STL datastructure instead of ZoneList?
- ZoneMap<const AstRawString*, int> module_requests_;
+ ZoneMap<const AstRawString*, ModuleRequest> module_requests_;
ZoneList<const Entry*> special_exports_;
ZoneList<const Entry*> namespace_imports_;
ZoneMultimap<const AstRawString*, Entry*> regular_exports_;
@@ -212,13 +222,16 @@ class ModuleDescriptor : public ZoneObject {
// Assign a cell_index of 0 to anything else.
void AssignCellIndices();
- int AddModuleRequest(const AstRawString* specifier) {
+ int AddModuleRequest(const AstRawString* specifier,
+ Scanner::Location specifier_loc) {
DCHECK_NOT_NULL(specifier);
int module_requests_count = static_cast<int>(module_requests_.size());
auto it = module_requests_
- .insert(std::make_pair(specifier, module_requests_count))
+ .insert(std::make_pair(specifier,
+ ModuleRequest(module_requests_count,
+ specifier_loc.beg_pos)))
.first;
- return it->second;
+ return it->second.index;
}
};
diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc
index 21ce932a08..78d92b9936 100644
--- a/deps/v8/src/ast/prettyprinter.cc
+++ b/deps/v8/src/ast/prettyprinter.cc
@@ -22,10 +22,15 @@ CallPrinter::CallPrinter(Isolate* isolate, bool is_user_js)
num_prints_ = 0;
found_ = false;
done_ = false;
+ iterator_hint_ = IteratorHint::kNone;
is_user_js_ = is_user_js;
InitializeAstVisitor(isolate);
}
+CallPrinter::IteratorHint CallPrinter::GetIteratorHint() const {
+ return iterator_hint_;
+}
+
Handle<String> CallPrinter::Print(FunctionLiteral* program, int position) {
num_prints_ = 0;
position_ = position;
@@ -223,9 +228,11 @@ void CallPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
void CallPrinter::VisitObjectLiteral(ObjectLiteral* node) {
+ Print("{");
for (int i = 0; i < node->properties()->length(); i++) {
Find(node->properties()->at(i)->value());
}
+ Print("}");
}
@@ -254,7 +261,11 @@ void CallPrinter::VisitAssignment(Assignment* node) {
Find(node->value());
}
-void CallPrinter::VisitSuspend(Suspend* node) { Find(node->expression()); }
+void CallPrinter::VisitYield(Yield* node) { Find(node->expression()); }
+
+void CallPrinter::VisitYieldStar(YieldStar* node) { Find(node->expression()); }
+
+void CallPrinter::VisitAwait(Await* node) { Find(node->expression()); }
void CallPrinter::VisitThrow(Throw* node) { Find(node->exception()); }
@@ -370,17 +381,14 @@ void CallPrinter::VisitEmptyParentheses(EmptyParentheses* node) {
}
void CallPrinter::VisitGetIterator(GetIterator* node) {
- // Because CallPrinter is used by RenderCallSite() in runtime-internal.cc,
- // and the GetIterator node results in a Call, either to a [@@iterator] or
- // [@@asyncIterator]. It's unknown which call this error refers to, so we
- // assume it's the first call.
bool was_found = !found_ && node->position() == position_;
if (was_found) {
found_ = true;
+ iterator_hint_ = node->hint() == IteratorType::kNormal
+ ? IteratorHint::kNormal
+ : IteratorHint::kAsync;
}
- Find(node->iterable(), true);
- Print(node->hint() == IteratorType::kNormal ? "[Symbol.iterator]"
- : "[Symbol.asyncIterator]");
+ Find(node->iterable_for_call_printer(), true);
if (was_found) done_ = true;
}
@@ -515,7 +523,12 @@ void AstPrinter::PrintLabels(ZoneList<const AstRawString*>* labels) {
}
}
-void AstPrinter::PrintLiteral(Handle<Object> value, bool quote) {
+void AstPrinter::PrintLiteral(MaybeHandle<Object> maybe_value, bool quote) {
+ Handle<Object> value;
+ if (!maybe_value.ToHandle(&value)) {
+ Print("<nil>");
+ return;
+ }
Object* object = *value;
if (object->IsString()) {
String* string = String::cast(object);
@@ -539,8 +552,7 @@ void AstPrinter::PrintLiteral(Handle<Object> value, bool quote) {
if (object->IsJSFunction()) {
Print("JS-Function");
} else if (object->IsJSArray()) {
- Print("JS-array[%u]",
- Smi::cast(JSArray::cast(object)->length())->value());
+ Print("JS-array[%u]", Smi::ToInt(JSArray::cast(object)->length()));
} else if (object->IsJSObject()) {
Print("JS-Object");
} else {
@@ -617,13 +629,12 @@ void AstPrinter::PrintIndented(const char* txt) {
Print("%s", txt);
}
-
void AstPrinter::PrintLiteralIndented(const char* info,
- Handle<Object> value,
+ MaybeHandle<Object> maybe_value,
bool quote) {
PrintIndented(info);
Print(" ");
- PrintLiteral(value, quote);
+ PrintLiteral(maybe_value, quote);
Print("\n");
}
@@ -869,24 +880,10 @@ void AstPrinter::VisitForOfStatement(ForOfStatement* node) {
void AstPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
IndentedScope indent(this, "TRY CATCH", node->position());
- PrintTryStatement(node);
- PrintLiteralWithModeIndented("CATCHVAR", node->scope()->catch_variable(),
- node->scope()->catch_variable()->name());
- PrintIndentedVisit("CATCH", node->catch_block());
-}
-
-
-void AstPrinter::VisitTryFinallyStatement(TryFinallyStatement* node) {
- IndentedScope indent(this, "TRY FINALLY", node->position());
- PrintTryStatement(node);
- PrintIndentedVisit("FINALLY", node->finally_block());
-}
-
-void AstPrinter::PrintTryStatement(TryStatement* node) {
PrintIndentedVisit("TRY", node->try_block());
PrintIndented("CATCH PREDICTION");
const char* prediction = "";
- switch (node->catch_prediction()) {
+ switch (node->GetCatchPrediction(HandlerTable::UNCAUGHT)) {
case HandlerTable::UNCAUGHT:
prediction = "UNCAUGHT";
break;
@@ -905,6 +902,15 @@ void AstPrinter::PrintTryStatement(TryStatement* node) {
UNREACHABLE();
}
Print(" %s\n", prediction);
+ PrintLiteralWithModeIndented("CATCHVAR", node->scope()->catch_variable(),
+ node->scope()->catch_variable()->name());
+ PrintIndentedVisit("CATCH", node->catch_block());
+}
+
+void AstPrinter::VisitTryFinallyStatement(TryFinallyStatement* node) {
+ IndentedScope indent(this, "TRY FINALLY", node->position());
+ PrintIndentedVisit("TRY", node->try_block());
+ PrintIndentedVisit("FINALLY", node->finally_block());
}
void AstPrinter::VisitDebuggerStatement(DebuggerStatement* node) {
@@ -1108,13 +1114,26 @@ void AstPrinter::VisitAssignment(Assignment* node) {
Visit(node->value());
}
-void AstPrinter::VisitSuspend(Suspend* node) {
+void AstPrinter::VisitYield(Yield* node) {
EmbeddedVector<char, 128> buf;
- SNPrintF(buf, "SUSPEND id %d", node->suspend_id());
+ SNPrintF(buf, "YIELD id %d", node->suspend_id());
IndentedScope indent(this, buf.start(), node->position());
Visit(node->expression());
}
+void AstPrinter::VisitYieldStar(YieldStar* node) {
+ EmbeddedVector<char, 128> buf;
+ SNPrintF(buf, "YIELD_STAR id %d", node->suspend_id());
+ IndentedScope indent(this, buf.start(), node->position());
+ Visit(node->expression());
+}
+
+void AstPrinter::VisitAwait(Await* node) {
+ EmbeddedVector<char, 128> buf;
+ SNPrintF(buf, "AWAIT id %d", node->suspend_id());
+ IndentedScope indent(this, buf.start(), node->position());
+ Visit(node->expression());
+}
void AstPrinter::VisitThrow(Throw* node) {
IndentedScope indent(this, "THROW", node->position());
@@ -1139,9 +1158,7 @@ void AstPrinter::VisitProperty(Property* node) {
void AstPrinter::VisitCall(Call* node) {
EmbeddedVector<char, 128> buf;
- const char* name =
- node->tail_call_mode() == TailCallMode::kAllow ? "TAIL CALL" : "CALL";
- FormatSlotNode(&buf, node, name, node->CallFeedbackICSlot());
+ FormatSlotNode(&buf, node, "CALL", node->CallFeedbackICSlot());
IndentedScope indent(this, buf.start());
Visit(node->expression());
diff --git a/deps/v8/src/ast/prettyprinter.h b/deps/v8/src/ast/prettyprinter.h
index fdc079ca07..298e083251 100644
--- a/deps/v8/src/ast/prettyprinter.h
+++ b/deps/v8/src/ast/prettyprinter.h
@@ -20,6 +20,8 @@ class CallPrinter final : public AstVisitor<CallPrinter> {
// The following routine prints the node with position |position| into a
// string.
Handle<String> Print(FunctionLiteral* program, int position);
+ enum IteratorHint { kNone, kNormal, kAsync };
+ IteratorHint GetIteratorHint() const;
// Individual nodes
#define DECLARE_VISIT(type) void Visit##type(type* node);
@@ -39,6 +41,7 @@ class CallPrinter final : public AstVisitor<CallPrinter> {
bool found_;
bool done_;
bool is_user_js_;
+ IteratorHint iterator_hint_;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
@@ -79,7 +82,7 @@ class AstPrinter final : public AstVisitor<AstPrinter> {
void PrintLabels(ZoneList<const AstRawString*>* labels);
void PrintLiteral(const AstRawString* value, bool quote);
- void PrintLiteral(Handle<Object> value, bool quote);
+ void PrintLiteral(MaybeHandle<Object> maybe_value, bool quote);
void PrintIndented(const char* txt);
void PrintIndentedVisit(const char* s, AstNode* node);
@@ -88,14 +91,14 @@ class AstPrinter final : public AstVisitor<AstPrinter> {
void PrintParameters(DeclarationScope* scope);
void PrintArguments(ZoneList<Expression*>* arguments);
void PrintCaseClause(CaseClause* clause);
- void PrintLiteralIndented(const char* info, Handle<Object> value, bool quote);
+ void PrintLiteralIndented(const char* info, MaybeHandle<Object> maybe_value,
+ bool quote);
void PrintLiteralWithModeIndented(const char* info,
Variable* var,
Handle<Object> value);
void PrintLabelsIndented(ZoneList<const AstRawString*>* labels);
void PrintObjectProperties(ZoneList<ObjectLiteral::Property*>* properties);
void PrintClassProperties(ZoneList<ClassLiteral::Property*>* properties);
- void PrintTryStatement(TryStatement* try_statement);
void inc_indent() { indent_++; }
void dec_indent() { indent_--; }
diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc
index f4c21d7513..2fdb599c35 100644
--- a/deps/v8/src/ast/scopes.cc
+++ b/deps/v8/src/ast/scopes.cc
@@ -54,7 +54,7 @@ Variable* VariableMap::Declare(Zone* zone, Scope* scope,
// by the same AstRawString*.
// FIXME(marja): fix the type of Lookup.
Entry* p =
- ZoneHashMap::LookupOrInsert(const_cast<AstRawString*>(name), name->hash(),
+ ZoneHashMap::LookupOrInsert(const_cast<AstRawString*>(name), name->Hash(),
ZoneAllocationPolicy(zone));
if (added) *added = p->value == nullptr;
if (p->value == nullptr) {
@@ -69,7 +69,7 @@ Variable* VariableMap::Declare(Zone* zone, Scope* scope,
Variable* VariableMap::DeclareName(Zone* zone, const AstRawString* name,
VariableMode mode) {
Entry* p =
- ZoneHashMap::LookupOrInsert(const_cast<AstRawString*>(name), name->hash(),
+ ZoneHashMap::LookupOrInsert(const_cast<AstRawString*>(name), name->Hash(),
ZoneAllocationPolicy(zone));
if (p->value == nullptr) {
// The variable has not been declared yet -> insert it.
@@ -82,13 +82,13 @@ Variable* VariableMap::DeclareName(Zone* zone, const AstRawString* name,
void VariableMap::Remove(Variable* var) {
const AstRawString* name = var->raw_name();
- ZoneHashMap::Remove(const_cast<AstRawString*>(name), name->hash());
+ ZoneHashMap::Remove(const_cast<AstRawString*>(name), name->Hash());
}
void VariableMap::Add(Zone* zone, Variable* var) {
const AstRawString* name = var->raw_name();
Entry* p =
- ZoneHashMap::LookupOrInsert(const_cast<AstRawString*>(name), name->hash(),
+ ZoneHashMap::LookupOrInsert(const_cast<AstRawString*>(name), name->Hash(),
ZoneAllocationPolicy(zone));
DCHECK_NULL(p->value);
DCHECK_EQ(name, p->key);
@@ -96,7 +96,7 @@ void VariableMap::Add(Zone* zone, Variable* var) {
}
Variable* VariableMap::Lookup(const AstRawString* name) {
- Entry* p = ZoneHashMap::Lookup(const_cast<AstRawString*>(name), name->hash());
+ Entry* p = ZoneHashMap::Lookup(const_cast<AstRawString*>(name), name->Hash());
if (p != NULL) {
DCHECK(reinterpret_cast<const AstRawString*>(p->key) == name);
DCHECK(p->value != NULL);
@@ -121,7 +121,7 @@ void SloppyBlockFunctionMap::Declare(Zone* zone, const AstRawString* name,
// AstRawStrings are unambiguous, i.e., the same string is always represented
// by the same AstRawString*.
Entry* p =
- ZoneHashMap::LookupOrInsert(const_cast<AstRawString*>(name), name->hash(),
+ ZoneHashMap::LookupOrInsert(const_cast<AstRawString*>(name), name->Hash(),
ZoneAllocationPolicy(zone));
delegate->set_next(static_cast<SloppyBlockFunctionMap::Delegate*>(p->value));
p->value = delegate;
@@ -314,6 +314,7 @@ void DeclarationScope::SetDefaults() {
should_eager_compile_ = false;
was_lazily_parsed_ = false;
is_skipped_function_ = false;
+ produced_preparsed_scope_data_ = nullptr;
#ifdef DEBUG
DeclarationScope* outer_declaration_scope =
outer_scope_ ? outer_scope_->GetDeclarationScope() : nullptr;
@@ -348,6 +349,7 @@ void Scope::SetDefaults() {
inner_scope_calls_eval_ = false;
force_context_allocation_ = false;
+ force_context_allocation_for_parameters_ = false;
is_declaration_scope_ = false;
@@ -580,9 +582,10 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
if (factory) {
DCHECK(!is_being_lazily_parsed_);
- Expression* assignment = factory->NewAssignment(
+ Assignment* assignment = factory->NewAssignment(
Token::ASSIGN, NewUnresolved(factory, name),
delegate->scope()->NewUnresolved(factory, name), kNoSourcePosition);
+ assignment->set_lookup_hoisting_mode(LookupHoistingMode::kLegacySloppy);
Statement* statement =
factory->NewExpressionStatement(assignment, kNoSourcePosition);
delegate->set_statement(statement);
@@ -663,9 +666,8 @@ void DeclarationScope::Analyze(ParseInfo* info, Isolate* isolate,
if (scope->must_use_preparsed_scope_data_) {
DCHECK(FLAG_experimental_preparser_scope_analysis);
- DCHECK_NOT_NULL(info->preparsed_scope_data());
DCHECK_EQ(scope->scope_type_, ScopeType::FUNCTION_SCOPE);
- info->preparsed_scope_data()->RestoreData(scope);
+ info->consumed_preparsed_scope_data()->RestoreScopeAllocationData(scope);
}
scope->AllocateVariables(info, isolate, mode);
@@ -767,16 +769,6 @@ Variable* DeclarationScope::DeclarePromiseVar(const AstRawString* name) {
return result;
}
-Variable* DeclarationScope::DeclareAsyncGeneratorAwaitVar(
- const AstRawString* name) {
- DCHECK(is_function_scope());
- DCHECK_NULL(async_generator_await_var());
- Variable* result = EnsureRareData()->promise = NewTemporary(name);
- DCHECK_NULL(promise_var()); // promise is alias for generator await var
- result->set_is_used();
- return result;
-}
-
bool Scope::HasBeenRemoved() const {
if (sibling() == this) {
DCHECK_NULL(inner_scope_);
@@ -1022,7 +1014,9 @@ Variable* DeclarationScope::DeclareParameter(
DCHECK_EQ(mode, VAR);
var = Declare(zone(), name, mode);
// TODO(wingo): Avoid O(n^2) check.
- *is_duplicate = IsDeclaredParameter(name);
+ if (is_duplicate != nullptr) {
+ *is_duplicate = *is_duplicate || IsDeclaredParameter(name);
+ }
}
has_rest_ = is_rest;
var->set_initializer_position(position);
@@ -1034,8 +1028,8 @@ Variable* DeclarationScope::DeclareParameter(
}
Variable* DeclarationScope::DeclareParameterName(
- const AstRawString* name, bool is_rest,
- AstValueFactory* ast_value_factory) {
+ const AstRawString* name, bool is_rest, AstValueFactory* ast_value_factory,
+ bool declare_as_local, bool add_parameter) {
DCHECK(!already_resolved_);
DCHECK(is_function_scope() || is_module_scope());
DCHECK(!has_rest_ || is_rest);
@@ -1045,8 +1039,16 @@ Variable* DeclarationScope::DeclareParameterName(
has_arguments_parameter_ = true;
}
if (FLAG_experimental_preparser_scope_analysis) {
- Variable* var = Declare(zone(), name, VAR);
- params_.Add(var, zone());
+ Variable* var;
+ if (declare_as_local) {
+ var = Declare(zone(), name, VAR);
+ } else {
+ var = new (zone())
+ Variable(this, name, TEMPORARY, NORMAL_VARIABLE, kCreatedInitialized);
+ }
+ if (add_parameter) {
+ params_.Add(var, zone());
+ }
return var;
}
DeclareVariableName(name, VAR);
@@ -1138,7 +1140,7 @@ Variable* Scope::DeclareVariable(
GetDeclarationScope()->sloppy_block_function_map();
duplicate_allowed = map != nullptr &&
map->Lookup(const_cast<AstRawString*>(name),
- name->hash()) != nullptr &&
+ name->Hash()) != nullptr &&
!IsAsyncFunction(function_kind) &&
!(allow_harmony_restrictive_generators &&
IsGeneratorFunction(function_kind));
@@ -1222,20 +1224,6 @@ Variable* Scope::DeclareVariableName(const AstRawString* name,
}
}
-VariableProxy* Scope::NewUnresolved(AstNodeFactory* factory,
- const AstRawString* name,
- int start_position, VariableKind kind) {
- // Note that we must not share the unresolved variables with
- // the same name because they may be removed selectively via
- // RemoveUnresolved().
- DCHECK(!already_resolved_);
- DCHECK_EQ(factory->zone(), zone());
- VariableProxy* proxy = factory->NewVariableProxy(name, kind, start_position);
- proxy->set_next_unresolved(unresolved_);
- unresolved_ = proxy;
- return proxy;
-}
-
void Scope::AddUnresolved(VariableProxy* proxy) {
DCHECK(!already_resolved_);
DCHECK(!proxy->is_resolved());
@@ -1517,13 +1505,30 @@ void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory,
was_lazily_parsed_ = !aborted;
}
-void DeclarationScope::AnalyzePartially(
- AstNodeFactory* ast_node_factory,
- PreParsedScopeData* preparsed_scope_data) {
+void Scope::SavePreParsedScopeData() {
+ DCHECK(FLAG_experimental_preparser_scope_analysis);
+ if (ProducedPreParsedScopeData::ScopeIsSkippableFunctionScope(this)) {
+ AsDeclarationScope()->SavePreParsedScopeDataForDeclarationScope();
+ }
+
+ for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
+ scope->SavePreParsedScopeData();
+ }
+}
+
+void DeclarationScope::SavePreParsedScopeDataForDeclarationScope() {
+ if (produced_preparsed_scope_data_ != nullptr) {
+ DCHECK(FLAG_experimental_preparser_scope_analysis);
+ produced_preparsed_scope_data_->SaveScopeAllocationData(this);
+ }
+}
+
+void DeclarationScope::AnalyzePartially(AstNodeFactory* ast_node_factory) {
DCHECK(!force_eager_compilation_);
VariableProxy* unresolved = nullptr;
- if (!outer_scope_->is_script_scope()) {
+ if (!outer_scope_->is_script_scope() ||
+ FLAG_experimental_preparser_scope_analysis) {
// Try to resolve unresolved variables for this Scope and migrate those
// which cannot be resolved inside. It doesn't make sense to try to resolve
// them in the outer Scopes here, because they are incomplete.
@@ -1541,13 +1546,16 @@ void DeclarationScope::AnalyzePartially(
arguments_ = nullptr;
}
- if (FLAG_experimental_preparser_scope_analysis &&
- preparsed_scope_data->Producing()) {
- // Store the information needed for allocating the locals of this scope
- // and its inner scopes.
- preparsed_scope_data->SaveData(this);
+ // Migrate function_ to the right Zone.
+ if (function_ != nullptr) {
+ function_ = ast_node_factory->CopyVariable(function_);
+ }
+
+ if (FLAG_experimental_preparser_scope_analysis) {
+ SavePreParsedScopeData();
}
}
+
#ifdef DEBUG
if (FLAG_print_scopes) {
PrintF("Inner function scope:\n");
@@ -1580,7 +1588,6 @@ const char* Header(ScopeType scope_type, FunctionKind function_kind,
case WITH_SCOPE: return "with";
}
UNREACHABLE();
- return NULL;
}
void Indent(int n, const char* str) { PrintF("%*s%s", n, "", str); }
@@ -2182,7 +2189,8 @@ void DeclarationScope::AllocateParameterLocals() {
void DeclarationScope::AllocateParameter(Variable* var, int index) {
if (MustAllocate(var)) {
- if (MustAllocateInContext(var)) {
+ if (has_forced_context_allocation_for_parameters() ||
+ MustAllocateInContext(var)) {
DCHECK(var->IsUnallocated() || var->IsContextSlot());
if (var->IsUnallocated()) {
AllocateHeapSlot(var);
diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h
index 35c0bb0b2d..eea966fe26 100644
--- a/deps/v8/src/ast/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -5,6 +5,7 @@
#ifndef V8_AST_SCOPES_H_
#define V8_AST_SCOPES_H_
+#include "src/ast/ast.h"
#include "src/base/compiler-specific.h"
#include "src/base/hashmap.h"
#include "src/globals.h"
@@ -20,6 +21,7 @@ class AstRawString;
class Declaration;
class ParseInfo;
class PreParsedScopeData;
+class ProducedPreParsedScopeData;
class SloppyBlockFunctionStatement;
class Statement;
class StringSet;
@@ -208,8 +210,18 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// Create a new unresolved variable.
VariableProxy* NewUnresolved(AstNodeFactory* factory,
const AstRawString* name,
- int start_position = kNoSourcePosition,
- VariableKind kind = NORMAL_VARIABLE);
+ int start_pos = kNoSourcePosition,
+ VariableKind kind = NORMAL_VARIABLE) {
+ // Note that we must not share the unresolved variables with
+ // the same name because they may be removed selectively via
+ // RemoveUnresolved().
+ DCHECK(!already_resolved_);
+ DCHECK_EQ(factory->zone(), zone());
+ VariableProxy* proxy = factory->NewVariableProxy(name, kind, start_pos);
+ proxy->set_next_unresolved(unresolved_);
+ unresolved_ = proxy;
+ return proxy;
+ }
void AddUnresolved(VariableProxy* proxy);
@@ -252,9 +264,16 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// eval call.
void RecordEvalCall() {
scope_calls_eval_ = true;
+ RecordInnerScopeEvalCall();
+ }
+
+ void RecordInnerScopeEvalCall() {
inner_scope_calls_eval_ = true;
for (Scope* scope = outer_scope(); scope != nullptr;
scope = scope->outer_scope()) {
+ if (scope->inner_scope_calls_eval_) {
+ return;
+ }
scope->inner_scope_calls_eval_ = true;
}
}
@@ -323,6 +342,13 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
bool has_forced_context_allocation() const {
return force_context_allocation_;
}
+ void ForceContextAllocationForParameters() {
+ DCHECK(!already_resolved_);
+ force_context_allocation_for_parameters_ = true;
+ }
+ bool has_forced_context_allocation_for_parameters() const {
+ return force_context_allocation_for_parameters_;
+ }
// ---------------------------------------------------------------------------
// Predicates.
@@ -428,17 +454,6 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
return scope_info_;
}
- // ---------------------------------------------------------------------------
- // Strict mode support.
- bool IsDeclared(const AstRawString* name) {
- // During formal parameter list parsing the scope only contains
- // two variables inserted at initialization: "this" and "arguments".
- // "this" is an invalid parameter name and "arguments" is invalid parameter
- // name in strict mode. Therefore looking up with the map which includes
- // "this" and "arguments" in addition to all formal parameters is safe.
- return variables_.Lookup(name) != NULL;
- }
-
int num_var() const { return variables_.occupancy(); }
// ---------------------------------------------------------------------------
@@ -502,6 +517,11 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
Variable* NewTemporary(const AstRawString* name,
MaybeAssignedFlag maybe_assigned);
+
+ // Walk the scope chain to find DeclarationScopes; call
+ // SavePreParsedScopeDataForDeclarationScope for each.
+ void SavePreParsedScopeData();
+
Zone* zone_;
// Scope tree.
@@ -570,6 +590,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// True if one of the inner scopes or the scope itself calls eval.
bool inner_scope_calls_eval_ : 1;
bool force_context_allocation_ : 1;
+ bool force_context_allocation_for_parameters_ : 1;
// True if it holds 'var' declarations.
bool is_declaration_scope_ : 1;
@@ -713,7 +734,6 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// Ignition without ScopeInfo.
Variable* DeclareGeneratorObjectVar(const AstRawString* name);
Variable* DeclarePromiseVar(const AstRawString* name);
- Variable* DeclareAsyncGeneratorAwaitVar(const AstRawString* name);
// Declare a parameter in this scope. When there are duplicated
// parameters the rightmost one 'wins'. However, the implementation
@@ -725,7 +745,8 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// Declares that a parameter with the name exists. Creates a Variable and
// returns it if FLAG_preparser_scope_analysis is on.
Variable* DeclareParameterName(const AstRawString* name, bool is_rest,
- AstValueFactory* ast_value_factory);
+ AstValueFactory* ast_value_factory,
+ bool declare_local, bool add_parameter);
// Declare an implicit global variable in this scope which must be a
// script scope. The variable was introduced (possibly from an inner
@@ -770,12 +791,6 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
return GetRareVariable(RareVariable::kPromise);
}
- Variable* async_generator_await_var() const {
- DCHECK(is_function_scope());
- DCHECK(IsAsyncGeneratorFunction(function_kind_));
- return GetRareVariable(RareVariable::kAsyncGeneratorAwaitResult);
- }
-
// Parameters. The left-most parameter has index 0.
// Only valid for function and module scopes.
Variable* parameter(int index) const {
@@ -849,12 +864,11 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
static void Analyze(ParseInfo* info, Isolate* isolate, AnalyzeMode mode);
// To be called during parsing. Do just enough scope analysis that we can
- // discard the Scope for lazily compiled functions. In particular, this
- // records variables which cannot be resolved inside the Scope (we don't yet
- // know what they will resolve to since the outer Scopes are incomplete) and
- // migrates them into migrate_to.
- void AnalyzePartially(AstNodeFactory* ast_node_factory,
- PreParsedScopeData* preparsed_scope_data);
+ // discard the Scope contents for lazily compiled functions. In particular,
+ // this records variables which cannot be resolved inside the Scope (we don't
+ // yet know what they will resolve to since the outer Scopes are incomplete)
+ // and recreates them with the correct Zone with ast_node_factory.
+ void AnalyzePartially(AstNodeFactory* ast_node_factory);
Handle<StringSet> CollectNonLocals(ParseInfo* info,
Handle<StringSet> non_locals);
@@ -888,6 +902,20 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
is_skipped_function_ = is_skipped_function;
}
+ // Save data describing the context allocation of the variables in this scope
+ // and its subscopes (except scopes at the laziness boundary). The data is
+ // saved in produced_preparsed_scope_data_.
+ void SavePreParsedScopeDataForDeclarationScope();
+
+ void set_produced_preparsed_scope_data(
+ ProducedPreParsedScopeData* produced_preparsed_scope_data) {
+ produced_preparsed_scope_data_ = produced_preparsed_scope_data;
+ }
+
+ ProducedPreParsedScopeData* produced_preparsed_scope_data() const {
+ return produced_preparsed_scope_data_;
+ }
+
private:
void AllocateParameter(Variable* var, int index);
@@ -939,6 +967,9 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// Convenience variable; function scopes only.
Variable* arguments_;
+ // For producing the scope allocation data during preparsing.
+ ProducedPreParsedScopeData* produced_preparsed_scope_data_;
+
struct RareData : public ZoneObject {
// Convenience variable; Subclass constructor only
Variable* this_function = nullptr;
@@ -953,8 +984,7 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
enum class RareVariable {
kThisFunction = offsetof(RareData, this_function),
kGeneratorObject = offsetof(RareData, generator_object),
- kPromise = offsetof(RareData, promise),
- kAsyncGeneratorAwaitResult = kPromise
+ kPromise = offsetof(RareData, promise)
};
V8_INLINE RareData* EnsureRareData() {
diff --git a/deps/v8/src/ast/variables.cc b/deps/v8/src/ast/variables.cc
index c6611bd0d9..bce552c2c1 100644
--- a/deps/v8/src/ast/variables.cc
+++ b/deps/v8/src/ast/variables.cc
@@ -14,26 +14,14 @@ namespace internal {
// ----------------------------------------------------------------------------
// Implementation Variable.
-Variable::Variable(Scope* scope, const AstRawString* name, VariableMode mode,
- VariableKind kind, InitializationFlag initialization_flag,
- MaybeAssignedFlag maybe_assigned_flag)
- : scope_(scope),
- name_(name),
+Variable::Variable(Variable* other)
+ : scope_(other->scope_),
+ name_(other->name_),
local_if_not_shadowed_(nullptr),
next_(nullptr),
- index_(-1),
- initializer_position_(kNoSourcePosition),
- bit_field_(MaybeAssignedFlagField::encode(maybe_assigned_flag) |
- InitializationFlagField::encode(initialization_flag) |
- VariableModeField::encode(mode) | IsUsedField::encode(false) |
- ForceContextAllocationField::encode(false) |
- ForceHoleInitializationField::encode(false) |
- LocationField::encode(VariableLocation::UNALLOCATED) |
- VariableKindField::encode(kind)) {
- // Var declared variables never need initialization.
- DCHECK(!(mode == VAR && initialization_flag == kNeedsInitialization));
-}
-
+ index_(other->index_),
+ initializer_position_(other->initializer_position_),
+ bit_field_(other->bit_field_) {}
bool Variable::IsGlobalObjectProperty() const {
// Temporaries are never global, they must always be allocated in the
diff --git a/deps/v8/src/ast/variables.h b/deps/v8/src/ast/variables.h
index c01db36274..09df57ad54 100644
--- a/deps/v8/src/ast/variables.h
+++ b/deps/v8/src/ast/variables.h
@@ -20,7 +20,26 @@ class Variable final : public ZoneObject {
public:
Variable(Scope* scope, const AstRawString* name, VariableMode mode,
VariableKind kind, InitializationFlag initialization_flag,
- MaybeAssignedFlag maybe_assigned_flag = kNotAssigned);
+ MaybeAssignedFlag maybe_assigned_flag = kNotAssigned)
+ : scope_(scope),
+ name_(name),
+ local_if_not_shadowed_(nullptr),
+ next_(nullptr),
+ index_(-1),
+ initializer_position_(kNoSourcePosition),
+ bit_field_(MaybeAssignedFlagField::encode(maybe_assigned_flag) |
+ InitializationFlagField::encode(initialization_flag) |
+ VariableModeField::encode(mode) |
+ IsUsedField::encode(false) |
+ ForceContextAllocationField::encode(false) |
+ ForceHoleInitializationField::encode(false) |
+ LocationField::encode(VariableLocation::UNALLOCATED) |
+ VariableKindField::encode(kind)) {
+ // Var declared variables never need initialization.
+ DCHECK(!(mode == VAR && initialization_flag == kNeedsInitialization));
+ }
+
+ explicit Variable(Variable* other);
// The source code for an eval() call may refer to a variable that is
// in an outer scope about which we don't know anything (it may not
diff --git a/deps/v8/src/bailout-reason.h b/deps/v8/src/bailout-reason.h
index b49b6eef5d..43a0945473 100644
--- a/deps/v8/src/bailout-reason.h
+++ b/deps/v8/src/bailout-reason.h
@@ -67,6 +67,7 @@ namespace internal {
V(kEval, "eval") \
V(kExpectedAllocationSite, "Expected allocation site") \
V(kExpectedBooleanValue, "Expected boolean value") \
+ V(kExpectedFeedbackVector, "Expected feedback vector") \
V(kExpectedFixedDoubleArrayMap, \
"Expected a fixed double array map in fast shallow clone array literal") \
V(kExpectedFunctionObject, "Expected function object in register") \
@@ -75,6 +76,8 @@ namespace internal {
V(kExpectedNativeContext, "Expected native context") \
V(kExpectedNonIdenticalObjects, "Expected non-identical objects") \
V(kExpectedNonNullContext, "Expected non-null context") \
+ V(kExpectedOptimizationSentinel, \
+ "Expected optimized code cell or optimization sentinel") \
V(kExpectedPositiveZero, "Expected +0.0") \
V(kExpectedNewSpaceObject, "Expected new space object") \
V(kExpectedUndefinedOrCell, "Expected undefined or cell in register") \
@@ -142,6 +145,7 @@ namespace internal {
V(kOffsetOutOfRange, "Offset out of range") \
V(kOperandIsASmiAndNotABoundFunction, \
"Operand is a smi and not a bound function") \
+ V(kOperandIsASmiAndNotAFixedArray, "Operand is a smi and not a fixed array") \
V(kOperandIsASmiAndNotAFunction, "Operand is a smi and not a function") \
V(kOperandIsASmiAndNotAGeneratorObject, \
"Operand is a smi and not a generator object") \
@@ -150,6 +154,7 @@ namespace internal {
V(kOperandIsASmiAndNotAString, "Operand is a smi and not a string") \
V(kOperandIsASmi, "Operand is a smi") \
V(kOperandIsNotABoundFunction, "Operand is not a bound function") \
+ V(kOperandIsNotAFixedArray, "Operand is not a fixed array") \
V(kOperandIsNotAFunction, "Operand is not a function") \
V(kOperandIsNotAGeneratorObject, "Operand is not a generator object") \
V(kOperandIsNotAReceiver, "Operand is not a receiver") \
diff --git a/deps/v8/src/base/OWNERS b/deps/v8/src/base/OWNERS
index d691287b2d..5d24bda820 100644
--- a/deps/v8/src/base/OWNERS
+++ b/deps/v8/src/base/OWNERS
@@ -1 +1,3 @@
-jochen@chromium.org
+mlippautz@chromium.org
+
+# COMPONENT: Blink>JavaScript
diff --git a/deps/v8/src/base/atomic-utils.h b/deps/v8/src/base/atomic-utils.h
index f4f43fc817..a7f5b3ca8e 100644
--- a/deps/v8/src/base/atomic-utils.h
+++ b/deps/v8/src/base/atomic-utils.h
@@ -6,6 +6,7 @@
#define V8_ATOMIC_UTILS_H_
#include <limits.h>
+#include <type_traits>
#include "src/base/atomicops.h"
#include "src/base/macros.h"
@@ -68,18 +69,18 @@ class NoBarrierAtomicValue {
}
V8_INLINE bool TrySetValue(T old_value, T new_value) {
- return base::NoBarrier_CompareAndSwap(
+ return base::Relaxed_CompareAndSwap(
&value_, cast_helper<T>::to_storage_type(old_value),
cast_helper<T>::to_storage_type(new_value)) ==
cast_helper<T>::to_storage_type(old_value);
}
V8_INLINE T Value() const {
- return cast_helper<T>::to_return_type(base::NoBarrier_Load(&value_));
+ return cast_helper<T>::to_return_type(base::Relaxed_Load(&value_));
}
V8_INLINE void SetValue(T new_value) {
- base::NoBarrier_Store(&value_, cast_helper<T>::to_storage_type(new_value));
+ base::Relaxed_Store(&value_, cast_helper<T>::to_storage_type(new_value));
}
private:
@@ -250,6 +251,172 @@ class AtomicEnumSet {
base::AtomicWord bits_;
};
+class AsAtomic32 {
+ public:
+ template <typename T>
+ static T Acquire_Load(T* addr) {
+ STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic32));
+ return to_return_type<T>(base::Acquire_Load(to_storage_addr(addr)));
+ }
+
+ template <typename T>
+ static T Relaxed_Load(T* addr) {
+ STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic32));
+ return to_return_type<T>(base::Relaxed_Load(to_storage_addr(addr)));
+ }
+
+ template <typename T>
+ static void Release_Store(T* addr,
+ typename std::remove_reference<T>::type new_value) {
+ STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic32));
+ base::Release_Store(to_storage_addr(addr), to_storage_type(new_value));
+ }
+
+ template <typename T>
+ static void Relaxed_Store(T* addr,
+ typename std::remove_reference<T>::type new_value) {
+ STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic32));
+ base::Relaxed_Store(to_storage_addr(addr), to_storage_type(new_value));
+ }
+
+ template <typename T>
+ static T Release_CompareAndSwap(
+ T* addr, typename std::remove_reference<T>::type old_value,
+ typename std::remove_reference<T>::type new_value) {
+ STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic32));
+ return to_return_type<T>(base::Release_CompareAndSwap(
+ to_storage_addr(addr), to_storage_type(old_value),
+ to_storage_type(new_value)));
+ }
+
+ // Atomically sets bits selected by the mask to the given value.
+ // Returns false if the bits are already set as needed.
+ template <typename T>
+ static bool SetBits(T* addr, T bits, T mask) {
+ STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic32));
+ DCHECK_EQ(bits & ~mask, static_cast<T>(0));
+ T old_value;
+ T new_value;
+ do {
+ old_value = Relaxed_Load(addr);
+ if ((old_value & mask) == bits) return false;
+ new_value = (old_value & ~mask) | bits;
+ } while (Release_CompareAndSwap(addr, old_value, new_value) != old_value);
+ return true;
+ }
+
+ private:
+ template <typename T>
+ static base::Atomic32 to_storage_type(T value) {
+ return static_cast<base::Atomic32>(value);
+ }
+ template <typename T>
+ static T to_return_type(base::Atomic32 value) {
+ return static_cast<T>(value);
+ }
+ template <typename T>
+ static base::Atomic32* to_storage_addr(T* value) {
+ return reinterpret_cast<base::Atomic32*>(value);
+ }
+ template <typename T>
+ static const base::Atomic32* to_storage_addr(const T* value) {
+ return reinterpret_cast<const base::Atomic32*>(value);
+ }
+};
+
+class AsAtomicWord {
+ public:
+ template <typename T>
+ static T Acquire_Load(T* addr) {
+ STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
+ return to_return_type<T>(base::Acquire_Load(to_storage_addr(addr)));
+ }
+
+ template <typename T>
+ static T Relaxed_Load(T* addr) {
+ STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
+ return to_return_type<T>(base::Relaxed_Load(to_storage_addr(addr)));
+ }
+
+ template <typename T>
+ static void Release_Store(T* addr,
+ typename std::remove_reference<T>::type new_value) {
+ STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
+ base::Release_Store(to_storage_addr(addr), to_storage_type(new_value));
+ }
+
+ template <typename T>
+ static void Relaxed_Store(T* addr,
+ typename std::remove_reference<T>::type new_value) {
+ STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
+ base::Relaxed_Store(to_storage_addr(addr), to_storage_type(new_value));
+ }
+
+ template <typename T>
+ static T Release_CompareAndSwap(
+ T* addr, typename std::remove_reference<T>::type old_value,
+ typename std::remove_reference<T>::type new_value) {
+ STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
+ return to_return_type<T>(base::Release_CompareAndSwap(
+ to_storage_addr(addr), to_storage_type(old_value),
+ to_storage_type(new_value)));
+ }
+
+ private:
+ template <typename T>
+ static base::AtomicWord to_storage_type(T value) {
+ return reinterpret_cast<base::AtomicWord>(value);
+ }
+ template <typename T>
+ static T to_return_type(base::AtomicWord value) {
+ return reinterpret_cast<T>(value);
+ }
+ template <typename T>
+ static base::AtomicWord* to_storage_addr(T* value) {
+ return reinterpret_cast<base::AtomicWord*>(value);
+ }
+ template <typename T>
+ static const base::AtomicWord* to_storage_addr(const T* value) {
+ return reinterpret_cast<const base::AtomicWord*>(value);
+ }
+};
+
+// This class is intended to be used as a wrapper for elements of an array
+// that is passed in to STL functions such as std::sort. It ensures that
+// elements accesses are atomic.
+// Usage example:
+// Object** given_array;
+// AtomicElement<Object*>* wrapped =
+// reinterpret_cast<AtomicElement<Object*>(given_array);
+// std::sort(wrapped, wrapped + given_length, cmp);
+// where the cmp function uses the value() accessor to compare the elements.
+template <typename T>
+class AtomicElement {
+ public:
+ AtomicElement(const AtomicElement<T>& other) {
+ AsAtomicWord::Relaxed_Store(&value_,
+ AsAtomicWord::Relaxed_Load(&other.value_));
+ }
+
+ void operator=(const AtomicElement<T>& other) {
+ AsAtomicWord::Relaxed_Store(&value_,
+ AsAtomicWord::Relaxed_Load(&other.value_));
+ }
+
+ T value() const { return AsAtomicWord::Relaxed_Load(&value_); }
+
+ bool operator<(const AtomicElement<T>& other) const {
+ return value() < other.value();
+ }
+
+ bool operator==(const AtomicElement<T>& other) const {
+ return value() == other.value();
+ }
+
+ private:
+ T value_;
+};
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/atomicops.h b/deps/v8/src/base/atomicops.h
index 927ebbee11..0cd1369d3e 100644
--- a/deps/v8/src/base/atomicops.h
+++ b/deps/v8/src/base/atomicops.h
@@ -14,10 +14,10 @@
// do not know what you are doing, avoid these routines, and use a Mutex.
//
// It is incorrect to make direct assignments to/from an atomic variable.
-// You should use one of the Load or Store routines. The NoBarrier
-// versions are provided when no barriers are needed:
-// NoBarrier_Store()
-// NoBarrier_Load()
+// You should use one of the Load or Store routines. The Relaxed versions
+// are provided when no fences are needed:
+// Relaxed_Store()
+// Relaxed_Load()
// Although there are currently no compiler enforcement, you are encouraged
// to use these.
//
@@ -36,15 +36,6 @@
#include "src/base/base-export.h"
#include "src/base/build_config.h"
-#if defined(V8_OS_WIN) && defined(V8_HOST_ARCH_64_BIT)
-// windows.h #defines this (only on x64). This causes problems because the
-// public API also uses MemoryBarrier at the public name for this fence. So, on
-// X64, undef it, and call its documented
-// (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx)
-// implementation directly.
-#undef MemoryBarrier
-#endif
-
namespace v8 {
namespace base {
@@ -74,17 +65,16 @@ typedef intptr_t AtomicWord;
// Always return the old value of "*ptr"
//
// This routine implies no memory barriers.
-Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value);
+Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value,
+ Atomic32 new_value);
// Atomically store new_value into *ptr, returning the previous value held in
// *ptr. This routine implies no memory barriers.
-Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
+Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
// Atomically increment *ptr by "increment". Returns the new value of
// *ptr with the increment applied. This routine implies no memory barriers.
-Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);
+Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);
Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment);
@@ -95,9 +85,8 @@ Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
// a store with appropriate memory-ordering instructions. "Acquire" operations
// ensure that no later memory access can be reordered ahead of the operation.
// "Release" operations ensure that no previous memory access can be reordered
-// after the operation. "Barrier" operations have both "Acquire" and "Release"
-// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
-// access.
+// after the operation. "Fence" operations have both "Acquire" and "Release"
+// semantics. A MemoryFence() has "Fence" semantics, but does no memory access.
Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value);
@@ -105,22 +94,21 @@ Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value);
-void MemoryBarrier();
-void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value);
-void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
+void MemoryFence();
+void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value);
+void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value);
void Release_Store(volatile Atomic32* ptr, Atomic32 value);
-Atomic8 NoBarrier_Load(volatile const Atomic8* ptr);
-Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
+Atomic8 Relaxed_Load(volatile const Atomic8* ptr);
+Atomic32 Relaxed_Load(volatile const Atomic32* ptr);
Atomic32 Acquire_Load(volatile const Atomic32* ptr);
// 64-bit atomic operations (only available on 64-bit processors).
#ifdef V8_HOST_ARCH_64_BIT
-Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value);
-Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
-Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
+Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value,
+ Atomic64 new_value);
+Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
+Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
@@ -129,9 +117,9 @@ Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value);
-void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value);
+void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value);
void Release_Store(volatile Atomic64* ptr, Atomic64 value);
-Atomic64 NoBarrier_Load(volatile const Atomic64* ptr);
+Atomic64 Relaxed_Load(volatile const Atomic64* ptr);
Atomic64 Acquire_Load(volatile const Atomic64* ptr);
#endif // V8_HOST_ARCH_64_BIT
diff --git a/deps/v8/src/base/atomicops_internals_atomicword_compat.h b/deps/v8/src/base/atomicops_internals_atomicword_compat.h
index 5071f442b4..09f75bdfd2 100644
--- a/deps/v8/src/base/atomicops_internals_atomicword_compat.h
+++ b/deps/v8/src/base/atomicops_internals_atomicword_compat.h
@@ -23,23 +23,23 @@
namespace v8 {
namespace base {
-inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
- AtomicWord old_value,
- AtomicWord new_value) {
- return NoBarrier_CompareAndSwap(
- reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
+inline AtomicWord Relaxed_CompareAndSwap(volatile AtomicWord* ptr,
+ AtomicWord old_value,
+ AtomicWord new_value) {
+ return Relaxed_CompareAndSwap(reinterpret_cast<volatile Atomic32*>(ptr),
+ old_value, new_value);
}
-inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
- AtomicWord new_value) {
- return NoBarrier_AtomicExchange(
- reinterpret_cast<volatile Atomic32*>(ptr), new_value);
+inline AtomicWord Relaxed_AtomicExchange(volatile AtomicWord* ptr,
+ AtomicWord new_value) {
+ return Relaxed_AtomicExchange(reinterpret_cast<volatile Atomic32*>(ptr),
+ new_value);
}
-inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,
- AtomicWord increment) {
- return NoBarrier_AtomicIncrement(
- reinterpret_cast<volatile Atomic32*>(ptr), increment);
+inline AtomicWord Relaxed_AtomicIncrement(volatile AtomicWord* ptr,
+ AtomicWord increment) {
+ return Relaxed_AtomicIncrement(reinterpret_cast<volatile Atomic32*>(ptr),
+ increment);
}
inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
@@ -62,9 +62,8 @@ inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
}
-inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
- NoBarrier_Store(
- reinterpret_cast<volatile Atomic32*>(ptr), value);
+inline void Relaxed_Store(volatile AtomicWord* ptr, AtomicWord value) {
+ Relaxed_Store(reinterpret_cast<volatile Atomic32*>(ptr), value);
}
inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
@@ -72,9 +71,8 @@ inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
reinterpret_cast<volatile Atomic32*>(ptr), value);
}
-inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
- return NoBarrier_Load(
- reinterpret_cast<volatile const Atomic32*>(ptr));
+inline AtomicWord Relaxed_Load(volatile const AtomicWord* ptr) {
+ return Relaxed_Load(reinterpret_cast<volatile const Atomic32*>(ptr));
}
inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
diff --git a/deps/v8/src/base/atomicops_internals_portable.h b/deps/v8/src/base/atomicops_internals_portable.h
index 72c1d9a328..ad1e5954dc 100644
--- a/deps/v8/src/base/atomicops_internals_portable.h
+++ b/deps/v8/src/base/atomicops_internals_portable.h
@@ -39,7 +39,7 @@ namespace base {
// This implementation is transitional and maintains the original API for
// atomicops.h.
-inline void MemoryBarrier() {
+inline void MemoryFence() {
#if defined(__GLIBCXX__)
// Work around libstdc++ bug 51038 where atomic_thread_fence was declared but
// not defined, leading to the linker complaining about undefined references.
@@ -49,21 +49,20 @@ inline void MemoryBarrier() {
#endif
}
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
+inline Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value, Atomic32 new_value) {
__atomic_compare_exchange_n(ptr, &old_value, new_value, false,
__ATOMIC_RELAXED, __ATOMIC_RELAXED);
return old_value;
}
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
+inline Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);
}
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
+inline Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED);
}
@@ -86,11 +85,11 @@ inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
return old_value;
}
-inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) {
__atomic_store_n(ptr, value, __ATOMIC_RELAXED);
}
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+inline void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value) {
__atomic_store_n(ptr, value, __ATOMIC_RELAXED);
}
@@ -98,11 +97,11 @@ inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
__atomic_store_n(ptr, value, __ATOMIC_RELEASE);
}
-inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) {
return __atomic_load_n(ptr, __ATOMIC_RELAXED);
}
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+inline Atomic32 Relaxed_Load(volatile const Atomic32* ptr) {
return __atomic_load_n(ptr, __ATOMIC_RELAXED);
}
@@ -112,21 +111,20 @@ inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
#if defined(V8_HOST_ARCH_64_BIT)
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
+inline Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value, Atomic64 new_value) {
__atomic_compare_exchange_n(ptr, &old_value, new_value, false,
__ATOMIC_RELAXED, __ATOMIC_RELAXED);
return old_value;
}
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
+inline Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);
}
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
+inline Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED);
}
@@ -149,7 +147,7 @@ inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
return old_value;
}
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+inline void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value) {
__atomic_store_n(ptr, value, __ATOMIC_RELAXED);
}
@@ -157,7 +155,7 @@ inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
__atomic_store_n(ptr, value, __ATOMIC_RELEASE);
}
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+inline Atomic64 Relaxed_Load(volatile const Atomic64* ptr) {
return __atomic_load_n(ptr, __ATOMIC_RELAXED);
}
diff --git a/deps/v8/src/base/atomicops_internals_x86_msvc.h b/deps/v8/src/base/atomicops_internals_x86_msvc.h
index 0d2068e9f0..89a458e4f0 100644
--- a/deps/v8/src/base/atomicops_internals_x86_msvc.h
+++ b/deps/v8/src/base/atomicops_internals_x86_msvc.h
@@ -10,29 +10,19 @@
#include "src/base/macros.h"
#include "src/base/win32-headers.h"
-#if defined(V8_HOST_ARCH_64_BIT)
-// windows.h #defines this (only on x64). This causes problems because the
-// public API also uses MemoryBarrier at the public name for this fence. So, on
-// X64, undef it, and call its documented
-// (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx)
-// implementation directly.
-#undef MemoryBarrier
-#endif
-
namespace v8 {
namespace base {
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
+inline Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value, Atomic32 new_value) {
LONG result = InterlockedCompareExchange(
reinterpret_cast<volatile LONG*>(ptr), static_cast<LONG>(new_value),
static_cast<LONG>(old_value));
return static_cast<Atomic32>(result);
}
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
+inline Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
LONG result = InterlockedExchange(reinterpret_cast<volatile LONG*>(ptr),
static_cast<LONG>(new_value));
return static_cast<Atomic32>(result);
@@ -45,38 +35,30 @@ inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
increment;
}
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
+inline Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
return Barrier_AtomicIncrement(ptr, increment);
}
-inline void MemoryBarrier() {
-#if defined(V8_HOST_ARCH_64_BIT)
- // See #undef and note at the top of this file.
- __faststorefence();
-#else
- // We use MemoryBarrier from WinNT.h
- ::MemoryBarrier();
-#endif
-}
+inline void MemoryFence() { MemoryBarrier(); }
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ return Relaxed_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ return Relaxed_CompareAndSwap(ptr, old_value, new_value);
}
-inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) {
*ptr = value;
}
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+inline void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
@@ -85,13 +67,9 @@ inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
// See comments in Atomic64 version of Release_Store() below.
}
-inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
- return *ptr;
-}
+inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) { return *ptr; }
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
- return *ptr;
-}
+inline Atomic32 Relaxed_Load(volatile const Atomic32* ptr) { return *ptr; }
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
@@ -104,17 +82,16 @@ inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
static_assert(sizeof(Atomic64) == sizeof(PVOID), "atomic word is atomic");
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
+inline Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value, Atomic64 new_value) {
PVOID result = InterlockedCompareExchangePointer(
reinterpret_cast<volatile PVOID*>(ptr),
reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
return reinterpret_cast<Atomic64>(result);
}
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
+inline Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
PVOID result = InterlockedExchangePointer(
reinterpret_cast<volatile PVOID*>(ptr),
reinterpret_cast<PVOID>(new_value));
@@ -128,12 +105,12 @@ inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
static_cast<LONGLONG>(increment)) + increment;
}
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
+inline Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
return Barrier_AtomicIncrement(ptr, increment);
}
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+inline void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
@@ -148,9 +125,7 @@ inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
// http://developer.intel.com/design/pentium4/manuals/index_new.htm
}
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
- return *ptr;
-}
+inline Atomic64 Relaxed_Load(volatile const Atomic64* ptr) { return *ptr; }
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr;
@@ -160,13 +135,13 @@ inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ return Relaxed_CompareAndSwap(ptr, old_value, new_value);
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ return Relaxed_CompareAndSwap(ptr, old_value, new_value);
}
diff --git a/deps/v8/src/base/bits.h b/deps/v8/src/base/bits.h
index 362940fcbe..496860fadd 100644
--- a/deps/v8/src/base/bits.h
+++ b/deps/v8/src/base/bits.h
@@ -6,6 +6,7 @@
#define V8_BASE_BITS_H_
#include <stdint.h>
+#include <type_traits>
#include "src/base/base-export.h"
#include "src/base/macros.h"
@@ -26,6 +27,33 @@ class CheckedNumeric;
namespace bits {
+// Define overloaded |Name| for |Name32| and |Name64|, depending on the size of
+// the given value.
+//
+// The overloads are only defined for input types of size 4 and 8, respectively,
+// using enable_if and SFINAE to disable them otherwise. enable_if<bool,
+// typename> only has a "type" member if the first parameter is true, in which
+// case "type" is a typedef to the second member (here, set to "unsigned").
+// Otherwise, enable_if::type doesn't exist, making the function signature
+// invalid, and so the entire function is thrown away (without an error) due to
+// SFINAE.
+//
+// Not that we cannot simply check sizeof(T) using an if statement, as we need
+// both branches of the if to be syntactically valid even if one of the branches
+// is dead.
+#define DEFINE_32_64_OVERLOADS(Name) \
+ template <typename T> \
+ inline typename std::enable_if<sizeof(T) == 4, unsigned>::type Name( \
+ T value) { \
+ return Name##32(value); \
+ } \
+ \
+ template <typename T> \
+ inline typename std::enable_if<sizeof(T) == 8, unsigned>::type Name( \
+ T value) { \
+ return Name##64(value); \
+ }
+
// CountPopulation32(value) returns the number of bits set in |value|.
inline unsigned CountPopulation32(uint32_t value) {
#if V8_HAS_BUILTIN_POPCOUNT
@@ -51,17 +79,7 @@ inline unsigned CountPopulation64(uint64_t value) {
#endif
}
-
-// Overloaded versions of CountPopulation32/64.
-inline unsigned CountPopulation(uint32_t value) {
- return CountPopulation32(value);
-}
-
-
-inline unsigned CountPopulation(uint64_t value) {
- return CountPopulation64(value);
-}
-
+DEFINE_32_64_OVERLOADS(CountPopulation)
// CountLeadingZeros32(value) returns the number of zero bits following the most
// significant 1 bit in |value| if |value| is non-zero, otherwise it returns 32.
@@ -148,24 +166,13 @@ inline unsigned CountTrailingZeros64(uint64_t value) {
#endif
}
-// Overloaded versions of CountTrailingZeros32/64.
-inline unsigned CountTrailingZeros(uint32_t value) {
- return CountTrailingZeros32(value);
-}
-
-inline unsigned CountTrailingZeros(uint64_t value) {
- return CountTrailingZeros64(value);
-}
-
-// Returns true iff |value| is a power of 2.
-constexpr inline bool IsPowerOfTwo32(uint32_t value) {
- return value && !(value & (value - 1));
-}
-
+DEFINE_32_64_OVERLOADS(CountTrailingZeros)
// Returns true iff |value| is a power of 2.
-constexpr inline bool IsPowerOfTwo64(uint64_t value) {
- return value && !(value & (value - 1));
+template <typename T,
+ typename = typename std::enable_if<std::is_integral<T>::value>::type>
+constexpr inline bool IsPowerOfTwo(T value) {
+ return value > 0 && (value & (value - 1)) == 0;
}
// RoundUpToPowerOfTwo32(value) returns the smallest power of two which is
@@ -330,6 +337,8 @@ V8_BASE_EXPORT int64_t SignedSaturatedAdd64(int64_t lhs, int64_t rhs);
// checks and returns the result.
V8_BASE_EXPORT int64_t SignedSaturatedSub64(int64_t lhs, int64_t rhs);
+#undef DEFINE_32_64_OVERLOADS
+
} // namespace bits
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/build_config.h b/deps/v8/src/base/build_config.h
index 0374f0fc25..73488de5bd 100644
--- a/deps/v8/src/base/build_config.h
+++ b/deps/v8/src/base/build_config.h
@@ -35,7 +35,7 @@
#define V8_HOST_ARCH_32_BIT 1
#elif defined(__PPC__) || defined(_ARCH_PPC)
#define V8_HOST_ARCH_PPC 1
-#if defined(__PPC64__) || defined(_ARCH_PPC64) || defined(_ARCH_PPCGR)
+#if defined(__PPC64__) || defined(_ARCH_PPC64)
#define V8_HOST_ARCH_64_BIT 1
#else
#define V8_HOST_ARCH_32_BIT 1
@@ -76,9 +76,9 @@
// Target architecture detection. This may be set externally. If not, detect
// in the same way as the host architecture, that is, target the native
// environment as presented by the compiler.
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_X87 && \
- !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_S390
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && \
+ !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64 && \
+ !V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_S390
#if defined(_M_X64) || defined(__x86_64__)
#define V8_TARGET_ARCH_X64 1
#elif defined(_M_IX86) || defined(__i386__)
@@ -129,8 +129,6 @@
#else
#define V8_TARGET_ARCH_32_BIT 1
#endif
-#elif V8_TARGET_ARCH_X87
-#define V8_TARGET_ARCH_32_BIT 1
#else
#error Unknown target architecture pointer size
#endif
@@ -181,8 +179,6 @@
#else
#define V8_TARGET_LITTLE_ENDIAN 1
#endif
-#elif V8_TARGET_ARCH_X87
-#define V8_TARGET_LITTLE_ENDIAN 1
#elif __BIG_ENDIAN__ // FOR PPCGR on AIX
#define V8_TARGET_BIG_ENDIAN 1
#elif V8_TARGET_ARCH_PPC_LE
@@ -199,8 +195,7 @@
#error Unknown target architecture endianness
#endif
-#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64) || \
- defined(V8_TARGET_ARCH_X87)
+#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64)
#define V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK 1
#else
#define V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK 0
diff --git a/deps/v8/src/base/debug/stack_trace_fuchsia.cc b/deps/v8/src/base/debug/stack_trace_fuchsia.cc
new file mode 100644
index 0000000000..4ad594b9e8
--- /dev/null
+++ b/deps/v8/src/base/debug/stack_trace_fuchsia.cc
@@ -0,0 +1,38 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/debug/stack_trace.h"
+
+#include <iomanip>
+#include <ostream>
+
+#include "src/base/platform/platform.h"
+
+namespace v8 {
+namespace base {
+namespace debug {
+
+bool EnableInProcessStackDumping() {
+ CHECK(false); // TODO(fuchsia): Port, https://crbug.com/731217.
+ return false;
+}
+
+void DisableSignalStackDump() {}
+
+StackTrace::StackTrace() {}
+
+void StackTrace::Print() const {
+ std::string backtrace = ToString();
+ OS::Print("%s\n", backtrace.c_str());
+}
+
+void StackTrace::OutputToStream(std::ostream* os) const {
+ for (size_t i = 0; i < count_; ++i) {
+ *os << "#" << std::setw(2) << i << trace_[i] << "\n";
+ }
+}
+
+} // namespace debug
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/functional.cc b/deps/v8/src/base/functional.cc
index d212912efa..80a7585bcc 100644
--- a/deps/v8/src/base/functional.cc
+++ b/deps/v8/src/base/functional.cc
@@ -61,7 +61,6 @@ V8_INLINE size_t hash_value_unsigned(T v) {
}
}
UNREACHABLE();
- return static_cast<size_t>(v);
}
} // namespace
diff --git a/deps/v8/src/base/hashmap.h b/deps/v8/src/base/hashmap.h
index 4436a2d949..d2b5810c1c 100644
--- a/deps/v8/src/base/hashmap.h
+++ b/deps/v8/src/base/hashmap.h
@@ -297,7 +297,7 @@ template <typename Key, typename Value, typename MatchFun,
typename TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Entry*
TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Probe(
const Key& key, uint32_t hash) const {
- DCHECK(base::bits::IsPowerOfTwo32(capacity_));
+ DCHECK(base::bits::IsPowerOfTwo(capacity_));
size_t i = hash & (capacity_ - 1);
DCHECK(i < capacity_);
@@ -333,7 +333,7 @@ template <typename Key, typename Value, typename MatchFun,
class AllocationPolicy>
void TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Initialize(
uint32_t capacity, AllocationPolicy allocator) {
- DCHECK(base::bits::IsPowerOfTwo32(capacity));
+ DCHECK(base::bits::IsPowerOfTwo(capacity));
map_ = reinterpret_cast<Entry*>(allocator.New(capacity * sizeof(Entry)));
if (map_ == nullptr) {
FATAL("Out of memory: HashMap::Initialize");
diff --git a/deps/v8/src/base/iterator.h b/deps/v8/src/base/iterator.h
index 7d96be209f..59d9fda6f1 100644
--- a/deps/v8/src/base/iterator.h
+++ b/deps/v8/src/base/iterator.h
@@ -10,6 +10,16 @@
namespace v8 {
namespace base {
+template <class Category, class Type, class Diff = std::ptrdiff_t,
+ class Pointer = Type*, class Reference = Type&>
+struct iterator {
+ typedef Category iterator_category;
+ typedef Type value_type;
+ typedef Diff difference_type;
+ typedef Pointer pointer;
+ typedef Reference reference;
+};
+
// The intention of the base::iterator_range class is to encapsulate two
// iterators so that the range defined by the iterators can be used like
// a regular STL container (actually only a subset of the full container
diff --git a/deps/v8/src/base/logging.cc b/deps/v8/src/base/logging.cc
index a33b65ba4d..740f1fa987 100644
--- a/deps/v8/src/base/logging.cc
+++ b/deps/v8/src/base/logging.cc
@@ -4,6 +4,7 @@
#include "src/base/logging.h"
+#include <cctype>
#include <cstdarg>
#include <cstdio>
#include <cstdlib>
@@ -18,22 +19,73 @@ namespace {
void (*g_print_stack_trace)() = nullptr;
+void PrettyPrintChar(std::ostream& os, int ch) {
+ switch (ch) {
+#define CHAR_PRINT_CASE(ch) \
+ case ch: \
+ os << #ch; \
+ break;
+
+ CHAR_PRINT_CASE('\0')
+ CHAR_PRINT_CASE('\'')
+ CHAR_PRINT_CASE('\\')
+ CHAR_PRINT_CASE('\a')
+ CHAR_PRINT_CASE('\b')
+ CHAR_PRINT_CASE('\f')
+ CHAR_PRINT_CASE('\n')
+ CHAR_PRINT_CASE('\r')
+ CHAR_PRINT_CASE('\t')
+ CHAR_PRINT_CASE('\v')
+#undef CHAR_PRINT_CASE
+ default:
+ if (std::isprint(ch)) {
+ os << '\'' << ch << '\'';
+ } else {
+ auto flags = os.flags(std::ios_base::hex);
+ os << "\\x" << static_cast<unsigned int>(ch);
+ os.flags(flags);
+ }
+ }
+}
+
} // namespace
void SetPrintStackTrace(void (*print_stack_trace)()) {
g_print_stack_trace = print_stack_trace;
}
+// Define specialization to pretty print characters (escaping non-printable
+// characters) and to print c strings as pointers instead of strings.
+#define DEFINE_PRINT_CHECK_OPERAND_CHAR(type) \
+ template <> \
+ void PrintCheckOperand<type>(std::ostream & os, type ch) { \
+ PrettyPrintChar(os, ch); \
+ } \
+ template <> \
+ void PrintCheckOperand<type*>(std::ostream & os, type * cstr) { \
+ os << static_cast<void*>(cstr); \
+ } \
+ template <> \
+ void PrintCheckOperand<const type*>(std::ostream & os, const type* cstr) { \
+ os << static_cast<const void*>(cstr); \
+ }
+
+DEFINE_PRINT_CHECK_OPERAND_CHAR(char)
+DEFINE_PRINT_CHECK_OPERAND_CHAR(signed char)
+DEFINE_PRINT_CHECK_OPERAND_CHAR(unsigned char)
+#undef DEFINE_PRINT_CHECK_OPERAND_CHAR
+
// Explicit instantiations for commonly used comparisons.
-#define DEFINE_MAKE_CHECK_OP_STRING(type) \
- template std::string* MakeCheckOpString<type, type>(type, type, char const*);
+#define DEFINE_MAKE_CHECK_OP_STRING(type) \
+ template std::string* MakeCheckOpString<type, type>(type, type, \
+ char const*); \
+ template void PrintCheckOperand<type>(std::ostream&, type);
DEFINE_MAKE_CHECK_OP_STRING(int)
DEFINE_MAKE_CHECK_OP_STRING(long) // NOLINT(runtime/int)
DEFINE_MAKE_CHECK_OP_STRING(long long) // NOLINT(runtime/int)
DEFINE_MAKE_CHECK_OP_STRING(unsigned int)
DEFINE_MAKE_CHECK_OP_STRING(unsigned long) // NOLINT(runtime/int)
DEFINE_MAKE_CHECK_OP_STRING(unsigned long long) // NOLINT(runtime/int)
-DEFINE_MAKE_CHECK_OP_STRING(char const*)
DEFINE_MAKE_CHECK_OP_STRING(void const*)
#undef DEFINE_MAKE_CHECK_OP_STRING
@@ -57,7 +109,7 @@ DEFINE_CHECK_OP_IMPL(GT)
// Contains protection against recursive calls (faults while handling faults).
-extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
+void V8_Fatal(const char* file, int line, const char* format, ...) {
fflush(stdout);
fflush(stderr);
v8::base::OS::PrintError("\n\n#\n# Fatal error in %s, line %d\n# ", file,
diff --git a/deps/v8/src/base/logging.h b/deps/v8/src/base/logging.h
index 6e54508d43..1e6e7a3091 100644
--- a/deps/v8/src/base/logging.h
+++ b/deps/v8/src/base/logging.h
@@ -13,7 +13,7 @@
#include "src/base/build_config.h"
#include "src/base/compiler-specific.h"
-extern "C" PRINTF_FORMAT(3, 4) V8_NORETURN V8_BASE_EXPORT
+[[noreturn]] PRINTF_FORMAT(3, 4) V8_BASE_EXPORT
void V8_Fatal(const char* file, int line, const char* format, ...);
// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
@@ -56,6 +56,14 @@ V8_BASE_EXPORT void SetPrintStackTrace(void (*print_stack_trace_)());
#ifdef DEBUG
+#define DCHECK_WITH_MSG(condition, message) \
+ do { \
+ if (V8_UNLIKELY(!(condition))) { \
+ V8_Fatal(__FILE__, __LINE__, "Debug check failed: %s.", message); \
+ } \
+ } while (0)
+#define DCHECK(condition) DCHECK_WITH_MSG(condition, #condition)
+
// Helper macro for binary operators.
// Don't use this macro directly in your code, use CHECK_EQ et al below.
#define CHECK_OP(name, op, lhs, rhs) \
@@ -68,6 +76,16 @@ V8_BASE_EXPORT void SetPrintStackTrace(void (*print_stack_trace_)());
} \
} while (0)
+#define DCHECK_OP(name, op, lhs, rhs) \
+ do { \
+ if (std::string* _msg = \
+ ::v8::base::Check##name##Impl<decltype(lhs), decltype(rhs)>( \
+ (lhs), (rhs), #lhs " " #op " " #rhs)) { \
+ V8_Fatal(__FILE__, __LINE__, "Debug check failed: %s.", _msg->c_str()); \
+ delete _msg; \
+ } \
+ } while (0)
+
#else
// Make all CHECK functions discard their log strings to reduce code
@@ -91,6 +109,27 @@ struct PassType : public std::conditional<
std::is_scalar<typename std::decay<T>::type>::value,
typename std::decay<T>::type, T const&> {};
+template <typename Op>
+void PrintCheckOperand(std::ostream& os, Op op) {
+ os << op;
+}
+
+// Define specializations for character types, defined in logging.cc.
+#define DEFINE_PRINT_CHECK_OPERAND_CHAR(type) \
+ template <> \
+ V8_BASE_EXPORT void PrintCheckOperand<type>(std::ostream & os, type ch); \
+ template <> \
+ V8_BASE_EXPORT void PrintCheckOperand<type*>(std::ostream & os, \
+ type * cstr); \
+ template <> \
+ V8_BASE_EXPORT void PrintCheckOperand<const type*>(std::ostream & os, \
+ const type* cstr);
+
+DEFINE_PRINT_CHECK_OPERAND_CHAR(char)
+DEFINE_PRINT_CHECK_OPERAND_CHAR(signed char)
+DEFINE_PRINT_CHECK_OPERAND_CHAR(unsigned char)
+#undef DEFINE_PRINT_CHECK_OPERAND_CHAR
+
// Build the error message string. This is separate from the "Impl"
// function template because it is not performance critical and so can
// be out of line, while the "Impl" code should be inline. Caller
@@ -100,35 +139,55 @@ std::string* MakeCheckOpString(typename PassType<Lhs>::type lhs,
typename PassType<Rhs>::type rhs,
char const* msg) {
std::ostringstream ss;
- ss << msg << " (" << lhs << " vs. " << rhs << ")";
+ ss << msg << " (";
+ PrintCheckOperand(ss, lhs);
+ ss << " vs. ";
+ PrintCheckOperand(ss, rhs);
+ ss << ")";
return new std::string(ss.str());
}
// Commonly used instantiations of MakeCheckOpString<>. Explicitly instantiated
// in logging.cc.
-#define DEFINE_MAKE_CHECK_OP_STRING(type) \
+#define EXPLICIT_CHECK_OP_INSTANTIATION(type) \
extern template V8_BASE_EXPORT std::string* MakeCheckOpString<type, type>( \
- type, type, char const*);
-DEFINE_MAKE_CHECK_OP_STRING(int)
-DEFINE_MAKE_CHECK_OP_STRING(long) // NOLINT(runtime/int)
-DEFINE_MAKE_CHECK_OP_STRING(long long) // NOLINT(runtime/int)
-DEFINE_MAKE_CHECK_OP_STRING(unsigned int)
-DEFINE_MAKE_CHECK_OP_STRING(unsigned long) // NOLINT(runtime/int)
-DEFINE_MAKE_CHECK_OP_STRING(unsigned long long) // NOLINT(runtime/int)
-DEFINE_MAKE_CHECK_OP_STRING(char const*)
-DEFINE_MAKE_CHECK_OP_STRING(void const*)
-#undef DEFINE_MAKE_CHECK_OP_STRING
+ type, type, char const*); \
+ extern template V8_BASE_EXPORT void PrintCheckOperand<type>(std::ostream&, \
+ type);
+
+EXPLICIT_CHECK_OP_INSTANTIATION(int)
+EXPLICIT_CHECK_OP_INSTANTIATION(long) // NOLINT(runtime/int)
+EXPLICIT_CHECK_OP_INSTANTIATION(long long) // NOLINT(runtime/int)
+EXPLICIT_CHECK_OP_INSTANTIATION(unsigned int)
+EXPLICIT_CHECK_OP_INSTANTIATION(unsigned long) // NOLINT(runtime/int)
+EXPLICIT_CHECK_OP_INSTANTIATION(unsigned long long) // NOLINT(runtime/int)
+EXPLICIT_CHECK_OP_INSTANTIATION(void const*)
+#undef EXPLICIT_CHECK_OP_INSTANTIATION
+
+// comparison_underlying_type provides the underlying integral type of an enum,
+// or std::decay<T>::type if T is not an enum.
+template <typename T>
+struct comparison_underlying_type {
+ // std::underlying_type must only be used with enum types, thus use this
+ // {Dummy} type if the given type is not an enum.
+ enum Dummy {};
+ using decay = typename std::decay<T>::type;
+ static constexpr bool is_enum = std::is_enum<decay>::value;
+ using underlying = typename std::underlying_type<
+ typename std::conditional<is_enum, decay, Dummy>::type>::type;
+ using type = typename std::conditional<is_enum, underlying, decay>::type;
+};
// is_signed_vs_unsigned::value is true if both types are integral, Lhs is
// signed, and Rhs is unsigned. False in all other cases.
template <typename Lhs, typename Rhs>
struct is_signed_vs_unsigned {
- enum : bool {
- value = std::is_integral<typename std::decay<Lhs>::type>::value &&
- std::is_integral<typename std::decay<Rhs>::type>::value &&
- std::is_signed<typename std::decay<Lhs>::type>::value &&
- std::is_unsigned<typename std::decay<Rhs>::type>::value
- };
+ using lhs_underlying = typename comparison_underlying_type<Lhs>::type;
+ using rhs_underlying = typename comparison_underlying_type<Rhs>::type;
+ static constexpr bool value = std::is_integral<lhs_underlying>::value &&
+ std::is_integral<rhs_underlying>::value &&
+ std::is_signed<lhs_underlying>::value &&
+ std::is_unsigned<rhs_underlying>::value;
};
// Same thing, other way around: Lhs is unsigned, Rhs signed.
template <typename Lhs, typename Rhs>
@@ -137,14 +196,13 @@ struct is_unsigned_vs_signed : public is_signed_vs_unsigned<Rhs, Lhs> {};
// Specialize the compare functions for signed vs. unsigned comparisons.
// std::enable_if ensures that this template is only instantiable if both Lhs
// and Rhs are integral types, and their signedness does not match.
-#define MAKE_UNSIGNED(Type, value) \
- static_cast< \
- typename std::make_unsigned<typename std::decay<Type>::type>::type>( \
- value)
+#define MAKE_UNSIGNED(Type, value) \
+ static_cast<typename std::make_unsigned< \
+ typename comparison_underlying_type<Type>::type>::type>(value)
#define DEFINE_SIGNED_MISMATCH_COMP(CHECK, NAME, IMPL) \
template <typename Lhs, typename Rhs> \
V8_INLINE typename std::enable_if<CHECK<Lhs, Rhs>::value, bool>::type \
- Cmp##NAME##Impl(Lhs const& lhs, Rhs const& rhs) { \
+ Cmp##NAME##Impl(Lhs lhs, Rhs rhs) { \
return IMPL; \
}
DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, EQ,
@@ -221,16 +279,16 @@ DEFINE_CHECK_OP_IMPL(GT, > )
// The DCHECK macro is equivalent to CHECK except that it only
// generates code in debug builds.
#ifdef DEBUG
-#define DCHECK(condition) CHECK(condition)
-#define DCHECK_EQ(v1, v2) CHECK_EQ(v1, v2)
-#define DCHECK_NE(v1, v2) CHECK_NE(v1, v2)
-#define DCHECK_GT(v1, v2) CHECK_GT(v1, v2)
-#define DCHECK_GE(v1, v2) CHECK_GE(v1, v2)
-#define DCHECK_LT(v1, v2) CHECK_LT(v1, v2)
-#define DCHECK_LE(v1, v2) CHECK_LE(v1, v2)
-#define DCHECK_NULL(val) CHECK_NULL(val)
-#define DCHECK_NOT_NULL(val) CHECK_NOT_NULL(val)
-#define DCHECK_IMPLIES(v1, v2) CHECK_IMPLIES(v1, v2)
+#define DCHECK_EQ(lhs, rhs) DCHECK_OP(EQ, ==, lhs, rhs)
+#define DCHECK_NE(lhs, rhs) DCHECK_OP(NE, !=, lhs, rhs)
+#define DCHECK_GT(lhs, rhs) DCHECK_OP(GT, >, lhs, rhs)
+#define DCHECK_GE(lhs, rhs) DCHECK_OP(GE, >=, lhs, rhs)
+#define DCHECK_LT(lhs, rhs) DCHECK_OP(LT, <, lhs, rhs)
+#define DCHECK_LE(lhs, rhs) DCHECK_OP(LE, <=, lhs, rhs)
+#define DCHECK_NULL(val) DCHECK((val) == nullptr)
+#define DCHECK_NOT_NULL(val) DCHECK((val) != nullptr)
+#define DCHECK_IMPLIES(lhs, rhs) \
+ DCHECK_WITH_MSG(!(lhs) || (rhs), #lhs " implies " #rhs)
#else
#define DCHECK(condition) ((void) 0)
#define DCHECK_EQ(v1, v2) ((void) 0)
diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h
index 33a0ef0f4f..fef1d78457 100644
--- a/deps/v8/src/base/macros.h
+++ b/deps/v8/src/base/macros.h
@@ -124,6 +124,17 @@ V8_INLINE Dest bit_cast(Source const& source) {
TypeName() = delete; \
DISALLOW_COPY_AND_ASSIGN(TypeName)
+// A macro to disallow the dynamic allocation.
+// This should be used in the private: declarations for a class
+// Declaring operator new and delete as deleted is not spec compliant.
+// Extract from 3.2.2 of C++11 spec:
+// [...] A non-placement deallocation function for a class is
+// odr-used by the definition of the destructor of that class, [...]
+#define DISALLOW_NEW_AND_DELETE() \
+ void* operator new(size_t) { base::OS::Abort(); } \
+ void* operator new[](size_t) { base::OS::Abort(); }; \
+ void operator delete(void*, size_t) { base::OS::Abort(); } \
+ void operator delete[](void*, size_t) { base::OS::Abort(); }
// Newly written code should use V8_INLINE and V8_NOINLINE directly.
#define INLINE(declarator) V8_INLINE declarator
@@ -170,15 +181,25 @@ V8_INLINE Dest bit_cast(Source const& source) {
// TODO(all) Replace all uses of this macro with static_assert, remove macro.
#define STATIC_ASSERT(test) static_assert(test, #test)
+// TODO(rongjie) Remove this workaround once we require gcc >= 5.0
+#if __GNUG__ && __GNUC__ < 5
+#define IS_TRIVIALLY_COPYABLE(T) __has_trivial_copy(T)
+#else
+#define IS_TRIVIALLY_COPYABLE(T) std::is_trivially_copyable<T>::value
+#endif
-// The USE(x) template is used to silence C++ compiler warnings
+// The USE(x, ...) template is used to silence C++ compiler warnings
// issued for (yet) unused variables (typically parameters).
-template <typename T>
-inline void USE(T) { }
-
-
-#define IS_POWER_OF_TWO(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
-
+// The arguments are guaranteed to be evaluated from left to right.
+struct Use {
+ template <typename T>
+ Use(T&&) {} // NOLINT(runtime/explicit)
+};
+#define USE(...) \
+ do { \
+ ::Use unused_tmp_array_for_use_macro[]{__VA_ARGS__}; \
+ (void)unused_tmp_array_for_use_macro; \
+ } while (false)
// Define our own macros for writing 64-bit constants. This is less fragile
// than defining __STDC_CONSTANT_MACROS before including <stdint.h>, and it
@@ -271,7 +292,8 @@ inline T AddressFrom(intptr_t x) {
// Return the largest multiple of m which is <= x.
template <typename T>
inline T RoundDown(T x, intptr_t m) {
- DCHECK(IS_POWER_OF_TWO(m));
+ // m must be a power of two.
+ DCHECK(m != 0 && ((m & (m - 1)) == 0));
return AddressFrom<T>(OffsetFrom(x) & -m);
}
diff --git a/deps/v8/src/base/optional.h b/deps/v8/src/base/optional.h
new file mode 100644
index 0000000000..a229745f84
--- /dev/null
+++ b/deps/v8/src/base/optional.h
@@ -0,0 +1,493 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is a clone of "base/optional.h" in chromium.
+// Keep in sync, especially when fixing bugs.
+// Copyright 2017 the V8 project authors. All rights reserved.
+
+#ifndef V8_BASE_OPTIONAL_H_
+#define V8_BASE_OPTIONAL_H_
+
+#include <type_traits>
+
+#include "src/base/logging.h"
+
+namespace v8 {
+namespace base {
+
+// Specification:
+// http://en.cppreference.com/w/cpp/utility/optional/in_place_t
+struct in_place_t {};
+
+// Specification:
+// http://en.cppreference.com/w/cpp/utility/optional/nullopt_t
+struct nullopt_t {
+ constexpr explicit nullopt_t(int) {}
+};
+
+// Specification:
+// http://en.cppreference.com/w/cpp/utility/optional/in_place
+constexpr in_place_t in_place = {};
+
+// Specification:
+// http://en.cppreference.com/w/cpp/utility/optional/nullopt
+constexpr nullopt_t nullopt(0);
+
+namespace internal {
+
+template <typename T, bool = std::is_trivially_destructible<T>::value>
+struct OptionalStorage {
+ // Initializing |empty_| here instead of using default member initializing
+ // to avoid errors in g++ 4.8.
+ constexpr OptionalStorage() : empty_('\0') {}
+
+ constexpr explicit OptionalStorage(const T& value)
+ : is_null_(false), value_(value) {}
+
+ // TODO(alshabalin): Can't use 'constexpr' with std::move until C++14.
+ explicit OptionalStorage(T&& value)
+ : is_null_(false), value_(std::move(value)) {}
+
+ // TODO(alshabalin): Can't use 'constexpr' with std::forward until C++14.
+ template <class... Args>
+ explicit OptionalStorage(base::in_place_t, Args&&... args)
+ : is_null_(false), value_(std::forward<Args>(args)...) {}
+
+ // When T is not trivially destructible we must call its
+ // destructor before deallocating its memory.
+ ~OptionalStorage() {
+ if (!is_null_) value_.~T();
+ }
+
+ bool is_null_ = true;
+ union {
+ // |empty_| exists so that the union will always be initialized, even when
+ // it doesn't contain a value. Union members must be initialized for the
+ // constructor to be 'constexpr'.
+ char empty_;
+ T value_;
+ };
+};
+
+template <typename T>
+struct OptionalStorage<T, true> {
+ // Initializing |empty_| here instead of using default member initializing
+ // to avoid errors in g++ 4.8.
+ constexpr OptionalStorage() : empty_('\0') {}
+
+ constexpr explicit OptionalStorage(const T& value)
+ : is_null_(false), value_(value) {}
+
+ // TODO(alshabalin): Can't use 'constexpr' with std::move until C++14.
+ explicit OptionalStorage(T&& value)
+ : is_null_(false), value_(std::move(value)) {}
+
+ // TODO(alshabalin): Can't use 'constexpr' with std::forward until C++14.
+ template <class... Args>
+ explicit OptionalStorage(base::in_place_t, Args&&... args)
+ : is_null_(false), value_(std::forward<Args>(args)...) {}
+
+ // When T is trivially destructible (i.e. its destructor does nothing) there
+ // is no need to call it. Explicitly defaulting the destructor means it's not
+ // user-provided. Those two together make this destructor trivial.
+ ~OptionalStorage() = default;
+
+ bool is_null_ = true;
+ union {
+ // |empty_| exists so that the union will always be initialized, even when
+ // it doesn't contain a value. Union members must be initialized for the
+ // constructor to be 'constexpr'.
+ char empty_;
+ T value_;
+ };
+};
+
+} // namespace internal
+
+// base::Optional is a Chromium version of the C++17 optional class:
+// std::optional documentation:
+// http://en.cppreference.com/w/cpp/utility/optional
+// Chromium documentation:
+// https://chromium.googlesource.com/chromium/src/+/master/docs/optional.md
+//
+// These are the differences between the specification and the implementation:
+// - The constructor and emplace method using initializer_list are not
+// implemented because 'initializer_list' is banned from Chromium.
+// - Constructors do not use 'constexpr' as it is a C++14 extension.
+// - 'constexpr' might be missing in some places for reasons specified locally.
+// - No exceptions are thrown, because they are banned from Chromium.
+// - All the non-members are in the 'base' namespace instead of 'std'.
+template <typename T>
+class Optional {
+ public:
+ using value_type = T;
+
+ constexpr Optional() {}
+
+ explicit constexpr Optional(base::nullopt_t) {}
+
+ Optional(const Optional& other) {
+ if (!other.storage_.is_null_) Init(other.value());
+ }
+
+ Optional(Optional&& other) {
+ if (!other.storage_.is_null_) Init(std::move(other.value()));
+ }
+
+ explicit constexpr Optional(const T& value) : storage_(value) {}
+
+ // TODO(alshabalin): Can't use 'constexpr' with std::move until C++14.
+ explicit Optional(T&& value) : storage_(std::move(value)) {}
+
+ // TODO(alshabalin): Can't use 'constexpr' with std::forward until C++14.
+ template <class... Args>
+ explicit Optional(base::in_place_t, Args&&... args)
+ : storage_(base::in_place, std::forward<Args>(args)...) {}
+
+ ~Optional() = default;
+
+ Optional& operator=(base::nullopt_t) {
+ FreeIfNeeded();
+ return *this;
+ }
+
+ Optional& operator=(const Optional& other) {
+ if (other.storage_.is_null_) {
+ FreeIfNeeded();
+ return *this;
+ }
+
+ InitOrAssign(other.value());
+ return *this;
+ }
+
+ Optional& operator=(Optional&& other) {
+ if (other.storage_.is_null_) {
+ FreeIfNeeded();
+ return *this;
+ }
+
+ InitOrAssign(std::move(other.value()));
+ return *this;
+ }
+
+ template <class U>
+ typename std::enable_if<std::is_same<std::decay<U>, T>::value,
+ Optional&>::type
+ operator=(U&& value) {
+ InitOrAssign(std::forward<U>(value));
+ return *this;
+ }
+
+ // TODO(mlamouri): can't use 'constexpr' with DCHECK.
+ const T* operator->() const {
+ DCHECK(!storage_.is_null_);
+ return &value();
+ }
+
+ // TODO(mlamouri): using 'constexpr' here breaks compiler that assume it was
+ // meant to be 'constexpr const'.
+ T* operator->() {
+ DCHECK(!storage_.is_null_);
+ return &value();
+ }
+
+ constexpr const T& operator*() const & { return value(); }
+
+ // TODO(mlamouri): using 'constexpr' here breaks compiler that assume it was
+ // meant to be 'constexpr const'.
+ T& operator*() & { return value(); }
+
+ constexpr const T&& operator*() const && { return std::move(value()); }
+
+ // TODO(mlamouri): using 'constexpr' here breaks compiler that assume it was
+ // meant to be 'constexpr const'.
+ T&& operator*() && { return std::move(value()); }
+
+ constexpr explicit operator bool() const { return !storage_.is_null_; }
+
+ constexpr bool has_value() const { return !storage_.is_null_; }
+
+ // TODO(mlamouri): using 'constexpr' here breaks compiler that assume it was
+ // meant to be 'constexpr const'.
+ T& value() & {
+ DCHECK(!storage_.is_null_);
+ return storage_.value_;
+ }
+
+ // TODO(mlamouri): can't use 'constexpr' with DCHECK.
+ const T& value() const & {
+ DCHECK(!storage_.is_null_);
+ return storage_.value_;
+ }
+
+ // TODO(mlamouri): using 'constexpr' here breaks compiler that assume it was
+ // meant to be 'constexpr const'.
+ T&& value() && {
+ DCHECK(!storage_.is_null_);
+ return std::move(storage_.value_);
+ }
+
+ // TODO(mlamouri): can't use 'constexpr' with DCHECK.
+ const T&& value() const && {
+ DCHECK(!storage_.is_null_);
+ return std::move(storage_.value_);
+ }
+
+ template <class U>
+ constexpr T value_or(U&& default_value) const & {
+ // TODO(mlamouri): add the following assert when possible:
+ // static_assert(std::is_copy_constructible<T>::value,
+ // "T must be copy constructible");
+ static_assert(std::is_convertible<U, T>::value,
+ "U must be convertible to T");
+ return storage_.is_null_ ? static_cast<T>(std::forward<U>(default_value))
+ : value();
+ }
+
+ template <class U>
+ T value_or(U&& default_value) && {
+ // TODO(mlamouri): add the following assert when possible:
+ // static_assert(std::is_move_constructible<T>::value,
+ // "T must be move constructible");
+ static_assert(std::is_convertible<U, T>::value,
+ "U must be convertible to T");
+ return storage_.is_null_ ? static_cast<T>(std::forward<U>(default_value))
+ : std::move(value());
+ }
+
+ void swap(Optional& other) {
+ if (storage_.is_null_ && other.storage_.is_null_) return;
+
+ if (storage_.is_null_ != other.storage_.is_null_) {
+ if (storage_.is_null_) {
+ Init(std::move(other.storage_.value_));
+ other.FreeIfNeeded();
+ } else {
+ other.Init(std::move(storage_.value_));
+ FreeIfNeeded();
+ }
+ return;
+ }
+
+ DCHECK(!storage_.is_null_ && !other.storage_.is_null_);
+ using std::swap;
+ swap(**this, *other);
+ }
+
+ void reset() { FreeIfNeeded(); }
+
+ template <class... Args>
+ void emplace(Args&&... args) {
+ FreeIfNeeded();
+ Init(std::forward<Args>(args)...);
+ }
+
+ private:
+ void Init(const T& value) {
+ DCHECK(storage_.is_null_);
+ new (&storage_.value_) T(value);
+ storage_.is_null_ = false;
+ }
+
+ void Init(T&& value) {
+ DCHECK(storage_.is_null_);
+ new (&storage_.value_) T(std::move(value));
+ storage_.is_null_ = false;
+ }
+
+ template <class... Args>
+ void Init(Args&&... args) {
+ DCHECK(storage_.is_null_);
+ new (&storage_.value_) T(std::forward<Args>(args)...);
+ storage_.is_null_ = false;
+ }
+
+ void InitOrAssign(const T& value) {
+ if (storage_.is_null_)
+ Init(value);
+ else
+ storage_.value_ = value;
+ }
+
+ void InitOrAssign(T&& value) {
+ if (storage_.is_null_)
+ Init(std::move(value));
+ else
+ storage_.value_ = std::move(value);
+ }
+
+ void FreeIfNeeded() {
+ if (storage_.is_null_) return;
+ storage_.value_.~T();
+ storage_.is_null_ = true;
+ }
+
+ internal::OptionalStorage<T> storage_;
+};
+
+template <class T>
+constexpr bool operator==(const Optional<T>& lhs, const Optional<T>& rhs) {
+ return !!lhs != !!rhs ? false : lhs == nullopt || (*lhs == *rhs);
+}
+
+template <class T>
+constexpr bool operator!=(const Optional<T>& lhs, const Optional<T>& rhs) {
+ return !(lhs == rhs);
+}
+
+template <class T>
+constexpr bool operator<(const Optional<T>& lhs, const Optional<T>& rhs) {
+ return rhs == nullopt ? false : (lhs == nullopt ? true : *lhs < *rhs);
+}
+
+template <class T>
+constexpr bool operator<=(const Optional<T>& lhs, const Optional<T>& rhs) {
+ return !(rhs < lhs);
+}
+
+template <class T>
+constexpr bool operator>(const Optional<T>& lhs, const Optional<T>& rhs) {
+ return rhs < lhs;
+}
+
+template <class T>
+constexpr bool operator>=(const Optional<T>& lhs, const Optional<T>& rhs) {
+ return !(lhs < rhs);
+}
+
+template <class T>
+constexpr bool operator==(const Optional<T>& opt, base::nullopt_t) {
+ return !opt;
+}
+
+template <class T>
+constexpr bool operator==(base::nullopt_t, const Optional<T>& opt) {
+ return !opt;
+}
+
+template <class T>
+constexpr bool operator!=(const Optional<T>& opt, base::nullopt_t) {
+ return !!opt;
+}
+
+template <class T>
+constexpr bool operator!=(base::nullopt_t, const Optional<T>& opt) {
+ return !!opt;
+}
+
+template <class T>
+constexpr bool operator<(const Optional<T>& opt, base::nullopt_t) {
+ return false;
+}
+
+template <class T>
+constexpr bool operator<(base::nullopt_t, const Optional<T>& opt) {
+ return !!opt;
+}
+
+template <class T>
+constexpr bool operator<=(const Optional<T>& opt, base::nullopt_t) {
+ return !opt;
+}
+
+template <class T>
+constexpr bool operator<=(base::nullopt_t, const Optional<T>& opt) {
+ return true;
+}
+
+template <class T>
+constexpr bool operator>(const Optional<T>& opt, base::nullopt_t) {
+ return !!opt;
+}
+
+template <class T>
+constexpr bool operator>(base::nullopt_t, const Optional<T>& opt) {
+ return false;
+}
+
+template <class T>
+constexpr bool operator>=(const Optional<T>& opt, base::nullopt_t) {
+ return true;
+}
+
+template <class T>
+constexpr bool operator>=(base::nullopt_t, const Optional<T>& opt) {
+ return !opt;
+}
+
+template <class T>
+constexpr bool operator==(const Optional<T>& opt, const T& value) {
+ return opt != nullopt ? *opt == value : false;
+}
+
+template <class T>
+constexpr bool operator==(const T& value, const Optional<T>& opt) {
+ return opt == value;
+}
+
+template <class T>
+constexpr bool operator!=(const Optional<T>& opt, const T& value) {
+ return !(opt == value);
+}
+
+template <class T>
+constexpr bool operator!=(const T& value, const Optional<T>& opt) {
+ return !(opt == value);
+}
+
+template <class T>
+constexpr bool operator<(const Optional<T>& opt, const T& value) {
+ return opt != nullopt ? *opt < value : true;
+}
+
+template <class T>
+constexpr bool operator<(const T& value, const Optional<T>& opt) {
+ return opt != nullopt ? value < *opt : false;
+}
+
+template <class T>
+constexpr bool operator<=(const Optional<T>& opt, const T& value) {
+ return !(opt > value);
+}
+
+template <class T>
+constexpr bool operator<=(const T& value, const Optional<T>& opt) {
+ return !(value > opt);
+}
+
+template <class T>
+constexpr bool operator>(const Optional<T>& opt, const T& value) {
+ return value < opt;
+}
+
+template <class T>
+constexpr bool operator>(const T& value, const Optional<T>& opt) {
+ return opt < value;
+}
+
+template <class T>
+constexpr bool operator>=(const Optional<T>& opt, const T& value) {
+ return !(opt < value);
+}
+
+template <class T>
+constexpr bool operator>=(const T& value, const Optional<T>& opt) {
+ return !(value < opt);
+}
+
+template <class T>
+constexpr Optional<typename std::decay<T>::type> make_optional(T&& value) {
+ return Optional<typename std::decay<T>::type>(std::forward<T>(value));
+}
+
+template <class T>
+void swap(Optional<T>& lhs, Optional<T>& rhs) {
+ lhs.swap(rhs);
+}
+
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_OPTIONAL_H_
diff --git a/deps/v8/src/base/platform/condition-variable.cc b/deps/v8/src/base/platform/condition-variable.cc
index 19c33f8b1f..6df8599def 100644
--- a/deps/v8/src/base/platform/condition-variable.cc
+++ b/deps/v8/src/base/platform/condition-variable.cc
@@ -118,210 +118,45 @@ bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) {
#elif V8_OS_WIN
-struct ConditionVariable::Event {
- Event() : handle_(::CreateEventA(NULL, true, false, NULL)) {
- DCHECK(handle_ != NULL);
- }
-
- ~Event() {
- BOOL ok = ::CloseHandle(handle_);
- DCHECK(ok);
- USE(ok);
- }
-
- bool WaitFor(DWORD timeout_ms) {
- DWORD result = ::WaitForSingleObject(handle_, timeout_ms);
- if (result == WAIT_OBJECT_0) {
- return true;
- }
- DCHECK(result == WAIT_TIMEOUT);
- return false;
- }
-
- HANDLE handle_;
- Event* next_;
- HANDLE thread_;
- volatile bool notified_;
-};
-
-
-ConditionVariable::NativeHandle::~NativeHandle() {
- DCHECK(waitlist_ == NULL);
-
- while (freelist_ != NULL) {
- Event* event = freelist_;
- freelist_ = event->next_;
- delete event;
- }
-}
-
-
-ConditionVariable::Event* ConditionVariable::NativeHandle::Pre() {
- LockGuard<Mutex> lock_guard(&mutex_);
-
- // Grab an event from the free list or create a new one.
- Event* event = freelist_;
- if (event != NULL) {
- freelist_ = event->next_;
- } else {
- event = new Event;
- }
- event->thread_ = GetCurrentThread();
- event->notified_ = false;
-
-#ifdef DEBUG
- // The event must not be on the wait list.
- for (Event* we = waitlist_; we != NULL; we = we->next_) {
- DCHECK_NE(event, we);
- }
-#endif
-
- // Prepend the event to the wait list.
- event->next_ = waitlist_;
- waitlist_ = event;
-
- return event;
-}
-
-
-void ConditionVariable::NativeHandle::Post(Event* event, bool result) {
- LockGuard<Mutex> lock_guard(&mutex_);
-
- // Remove the event from the wait list.
- for (Event** wep = &waitlist_;; wep = &(*wep)->next_) {
- DCHECK(*wep);
- if (*wep == event) {
- *wep = event->next_;
- break;
- }
- }
-
-#ifdef DEBUG
- // The event must not be on the free list.
- for (Event* fe = freelist_; fe != NULL; fe = fe->next_) {
- DCHECK_NE(event, fe);
- }
-#endif
-
- // Reset the event.
- BOOL ok = ::ResetEvent(event->handle_);
- DCHECK(ok);
- USE(ok);
-
- // Insert the event into the free list.
- event->next_ = freelist_;
- freelist_ = event;
-
- // Forward signals delivered after the timeout to the next waiting event.
- if (!result && event->notified_ && waitlist_ != NULL) {
- ok = ::SetEvent(waitlist_->handle_);
- DCHECK(ok);
- USE(ok);
- waitlist_->notified_ = true;
- }
+ConditionVariable::ConditionVariable() {
+ InitializeConditionVariable(&native_handle_);
}
-ConditionVariable::ConditionVariable() {}
-
-
ConditionVariable::~ConditionVariable() {}
-
-void ConditionVariable::NotifyOne() {
- // Notify the thread with the highest priority in the waitlist
- // that was not already signalled.
- LockGuard<Mutex> lock_guard(native_handle_.mutex());
- Event* highest_event = NULL;
- int highest_priority = std::numeric_limits<int>::min();
- for (Event* event = native_handle().waitlist();
- event != NULL;
- event = event->next_) {
- if (event->notified_) {
- continue;
- }
- int priority = GetThreadPriority(event->thread_);
- DCHECK_NE(THREAD_PRIORITY_ERROR_RETURN, priority);
- if (priority >= highest_priority) {
- highest_priority = priority;
- highest_event = event;
- }
- }
- if (highest_event != NULL) {
- DCHECK(!highest_event->notified_);
- ::SetEvent(highest_event->handle_);
- highest_event->notified_ = true;
- }
-}
-
+void ConditionVariable::NotifyOne() { WakeConditionVariable(&native_handle_); }
void ConditionVariable::NotifyAll() {
- // Notify all threads on the waitlist.
- LockGuard<Mutex> lock_guard(native_handle_.mutex());
- for (Event* event = native_handle().waitlist();
- event != NULL;
- event = event->next_) {
- if (!event->notified_) {
- ::SetEvent(event->handle_);
- event->notified_ = true;
- }
- }
+ WakeAllConditionVariable(&native_handle_);
}
void ConditionVariable::Wait(Mutex* mutex) {
- // Create and setup the wait event.
- Event* event = native_handle_.Pre();
-
- // Release the user mutex.
- mutex->Unlock();
-
- // Wait on the wait event.
- while (!event->WaitFor(INFINITE)) {
- }
-
- // Reaquire the user mutex.
- mutex->Lock();
-
- // Release the wait event (we must have been notified).
- DCHECK(event->notified_);
- native_handle_.Post(event, true);
+ mutex->AssertHeldAndUnmark();
+ SleepConditionVariableSRW(&native_handle_, &mutex->native_handle(), INFINITE,
+ 0);
+ mutex->AssertUnheldAndMark();
}
bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) {
- // Create and setup the wait event.
- Event* event = native_handle_.Pre();
-
- // Release the user mutex.
- mutex->Unlock();
-
- // Wait on the wait event.
- TimeTicks now = TimeTicks::Now();
- TimeTicks end = now + rel_time;
- bool result = false;
- while (true) {
- int64_t msec = (end - now).InMilliseconds();
- if (msec >= static_cast<int64_t>(INFINITE)) {
- result = event->WaitFor(INFINITE - 1);
- if (result) {
- break;
- }
- now = TimeTicks::Now();
- } else {
- result = event->WaitFor((msec < 0) ? 0 : static_cast<DWORD>(msec));
- break;
- }
+ int64_t msec = rel_time.InMilliseconds();
+ mutex->AssertHeldAndUnmark();
+ BOOL result = SleepConditionVariableSRW(
+ &native_handle_, &mutex->native_handle(), static_cast<DWORD>(msec), 0);
+#ifdef DEBUG
+ if (!result) {
+ // On failure, we only expect the CV to timeout. Any other error value means
+ // that we've unexpectedly woken up.
+ // Note that WAIT_TIMEOUT != ERROR_TIMEOUT. WAIT_TIMEOUT is used with the
+ // WaitFor* family of functions as a direct return value. ERROR_TIMEOUT is
+ // used with GetLastError().
+ DCHECK_EQ(static_cast<DWORD>(ERROR_TIMEOUT), GetLastError());
}
-
- // Reaquire the user mutex.
- mutex->Lock();
-
- // Release the wait event.
- DCHECK(!result || event->notified_);
- native_handle_.Post(event, result);
-
- return result;
+#endif
+ mutex->AssertUnheldAndMark();
+ return result != 0;
}
#endif // V8_OS_POSIX
diff --git a/deps/v8/src/base/platform/condition-variable.h b/deps/v8/src/base/platform/condition-variable.h
index 48e7c369ca..30c19612aa 100644
--- a/deps/v8/src/base/platform/condition-variable.h
+++ b/deps/v8/src/base/platform/condition-variable.h
@@ -63,25 +63,7 @@ class V8_BASE_EXPORT ConditionVariable final {
#if V8_OS_POSIX
typedef pthread_cond_t NativeHandle;
#elif V8_OS_WIN
- struct Event;
- class NativeHandle final {
- public:
- NativeHandle() : waitlist_(NULL), freelist_(NULL) {}
- ~NativeHandle();
-
- Event* Pre() WARN_UNUSED_RESULT;
- void Post(Event* event, bool result);
-
- Mutex* mutex() { return &mutex_; }
- Event* waitlist() { return waitlist_; }
-
- private:
- Event* waitlist_;
- Event* freelist_;
- Mutex mutex_;
-
- DISALLOW_COPY_AND_ASSIGN(NativeHandle);
- };
+ typedef CONDITION_VARIABLE NativeHandle;
#endif
NativeHandle& native_handle() {
diff --git a/deps/v8/src/base/platform/mutex.cc b/deps/v8/src/base/platform/mutex.cc
index cc459a4b56..191f07ffb1 100644
--- a/deps/v8/src/base/platform/mutex.cc
+++ b/deps/v8/src/base/platform/mutex.cc
@@ -76,42 +76,88 @@ static V8_INLINE bool TryLockNativeHandle(pthread_mutex_t* mutex) {
return true;
}
-#elif V8_OS_WIN
-static V8_INLINE void InitializeNativeHandle(PCRITICAL_SECTION cs) {
- InitializeCriticalSection(cs);
+Mutex::Mutex() {
+ InitializeNativeHandle(&native_handle_);
+#ifdef DEBUG
+ level_ = 0;
+#endif
}
-static V8_INLINE void InitializeRecursiveNativeHandle(PCRITICAL_SECTION cs) {
- InitializeCriticalSection(cs);
+Mutex::~Mutex() {
+ DestroyNativeHandle(&native_handle_);
+ DCHECK_EQ(0, level_);
}
-static V8_INLINE void DestroyNativeHandle(PCRITICAL_SECTION cs) {
- DeleteCriticalSection(cs);
+void Mutex::Lock() {
+ LockNativeHandle(&native_handle_);
+ AssertUnheldAndMark();
}
-static V8_INLINE void LockNativeHandle(PCRITICAL_SECTION cs) {
- EnterCriticalSection(cs);
+void Mutex::Unlock() {
+ AssertHeldAndUnmark();
+ UnlockNativeHandle(&native_handle_);
}
-static V8_INLINE void UnlockNativeHandle(PCRITICAL_SECTION cs) {
- LeaveCriticalSection(cs);
+bool Mutex::TryLock() {
+ if (!TryLockNativeHandle(&native_handle_)) {
+ return false;
+ }
+ AssertUnheldAndMark();
+ return true;
}
-static V8_INLINE bool TryLockNativeHandle(PCRITICAL_SECTION cs) {
- return TryEnterCriticalSection(cs) != FALSE;
+RecursiveMutex::RecursiveMutex() {
+ InitializeRecursiveNativeHandle(&native_handle_);
+#ifdef DEBUG
+ level_ = 0;
+#endif
}
-#endif // V8_OS_POSIX
+RecursiveMutex::~RecursiveMutex() {
+ DestroyNativeHandle(&native_handle_);
+ DCHECK_EQ(0, level_);
+}
-Mutex::Mutex() {
- InitializeNativeHandle(&native_handle_);
+
+void RecursiveMutex::Lock() {
+ LockNativeHandle(&native_handle_);
+#ifdef DEBUG
+ DCHECK_LE(0, level_);
+ level_++;
+#endif
+}
+
+
+void RecursiveMutex::Unlock() {
+#ifdef DEBUG
+ DCHECK_LT(0, level_);
+ level_--;
+#endif
+ UnlockNativeHandle(&native_handle_);
+}
+
+
+bool RecursiveMutex::TryLock() {
+ if (!TryLockNativeHandle(&native_handle_)) {
+ return false;
+ }
+#ifdef DEBUG
+ DCHECK_LE(0, level_);
+ level_++;
+#endif
+ return true;
+}
+
+#elif V8_OS_WIN
+
+Mutex::Mutex() : native_handle_(SRWLOCK_INIT) {
#ifdef DEBUG
level_ = 0;
#endif
@@ -119,25 +165,24 @@ Mutex::Mutex() {
Mutex::~Mutex() {
- DestroyNativeHandle(&native_handle_);
DCHECK_EQ(0, level_);
}
void Mutex::Lock() {
- LockNativeHandle(&native_handle_);
+ AcquireSRWLockExclusive(&native_handle_);
AssertUnheldAndMark();
}
void Mutex::Unlock() {
AssertHeldAndUnmark();
- UnlockNativeHandle(&native_handle_);
+ ReleaseSRWLockExclusive(&native_handle_);
}
bool Mutex::TryLock() {
- if (!TryLockNativeHandle(&native_handle_)) {
+ if (!TryAcquireSRWLockExclusive(&native_handle_)) {
return false;
}
AssertUnheldAndMark();
@@ -146,7 +191,7 @@ bool Mutex::TryLock() {
RecursiveMutex::RecursiveMutex() {
- InitializeRecursiveNativeHandle(&native_handle_);
+ InitializeCriticalSection(&native_handle_);
#ifdef DEBUG
level_ = 0;
#endif
@@ -154,13 +199,13 @@ RecursiveMutex::RecursiveMutex() {
RecursiveMutex::~RecursiveMutex() {
- DestroyNativeHandle(&native_handle_);
+ DeleteCriticalSection(&native_handle_);
DCHECK_EQ(0, level_);
}
void RecursiveMutex::Lock() {
- LockNativeHandle(&native_handle_);
+ EnterCriticalSection(&native_handle_);
#ifdef DEBUG
DCHECK_LE(0, level_);
level_++;
@@ -173,12 +218,12 @@ void RecursiveMutex::Unlock() {
DCHECK_LT(0, level_);
level_--;
#endif
- UnlockNativeHandle(&native_handle_);
+ LeaveCriticalSection(&native_handle_);
}
bool RecursiveMutex::TryLock() {
- if (!TryLockNativeHandle(&native_handle_)) {
+ if (!TryEnterCriticalSection(&native_handle_)) {
return false;
}
#ifdef DEBUG
@@ -188,5 +233,7 @@ bool RecursiveMutex::TryLock() {
return true;
}
+#endif // V8_OS_POSIX
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/mutex.h b/deps/v8/src/base/platform/mutex.h
index e7231bdd9e..25f85b907e 100644
--- a/deps/v8/src/base/platform/mutex.h
+++ b/deps/v8/src/base/platform/mutex.h
@@ -57,7 +57,7 @@ class V8_BASE_EXPORT Mutex final {
#if V8_OS_POSIX
typedef pthread_mutex_t NativeHandle;
#elif V8_OS_WIN
- typedef CRITICAL_SECTION NativeHandle;
+ typedef SRWLOCK NativeHandle;
#endif
NativeHandle& native_handle() {
@@ -153,7 +153,11 @@ class V8_BASE_EXPORT RecursiveMutex final {
bool TryLock() WARN_UNUSED_RESULT;
// The implementation-defined native handle type.
- typedef Mutex::NativeHandle NativeHandle;
+#if V8_OS_POSIX
+ typedef pthread_mutex_t NativeHandle;
+#elif V8_OS_WIN
+ typedef CRITICAL_SECTION NativeHandle;
+#endif
NativeHandle& native_handle() {
return native_handle_;
diff --git a/deps/v8/src/base/platform/platform-aix.cc b/deps/v8/src/base/platform/platform-aix.cc
index 7d406996cb..49b6e936a2 100644
--- a/deps/v8/src/base/platform/platform-aix.cc
+++ b/deps/v8/src/base/platform/platform-aix.cc
@@ -36,12 +36,6 @@ namespace v8 {
namespace base {
-static inline void* mmapHelper(size_t len, int prot, int flags, int fildes,
- off_t off) {
- void* addr = OS::GetRandomMmapAddr();
- return mmap(addr, len, prot, flags, fildes, off);
-}
-
class AIXTimezoneCache : public PosixTimezoneCache {
const char* LocalTimezone(double time) override;
@@ -72,10 +66,10 @@ double AIXTimezoneCache::LocalTimeOffset() {
TimezoneCache* OS::CreateTimezoneCache() { return new AIXTimezoneCache(); }
void* OS::Allocate(const size_t requested, size_t* allocated,
- OS::MemoryPermission access) {
+ OS::MemoryPermission access, void* hint) {
const size_t msize = RoundUp(requested, getpagesize());
int prot = GetProtectionFromMemoryPermission(access);
- void* mbase = mmapHelper(msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mbase == MAP_FAILED) return NULL;
*allocated = msize;
@@ -138,19 +132,16 @@ static const int kMmapFdOffset = 0;
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) {}
+VirtualMemory::VirtualMemory(size_t size, void* hint)
+ : address_(ReserveRegion(size, hint)), size_(size) {}
-VirtualMemory::VirtualMemory(size_t size)
- : address_(ReserveRegion(size)), size_(size) {}
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
: address_(NULL), size_(0) {
DCHECK((alignment % OS::AllocateAlignment()) == 0);
size_t request_size =
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
- void* reservation =
- mmapHelper(request_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, kMmapFd,
- kMmapFdOffset);
+ void* reservation = mmap(hint, request_size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS, kMmapFd, kMmapFdOffset);
if (reservation == MAP_FAILED) return;
uint8_t* base = static_cast<uint8_t*>(reservation);
@@ -188,10 +179,6 @@ VirtualMemory::~VirtualMemory() {
}
}
-
-bool VirtualMemory::IsReserved() { return address_ != NULL; }
-
-
void VirtualMemory::Reset() {
address_ = NULL;
size_ = 0;
@@ -213,10 +200,9 @@ bool VirtualMemory::Guard(void* address) {
return true;
}
-
-void* VirtualMemory::ReserveRegion(size_t size) {
- void* result = mmapHelper(size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS,
- kMmapFd, kMmapFdOffset);
+void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
+ void* result = mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS,
+ kMmapFd, kMmapFdOffset);
if (result == MAP_FAILED) return NULL;
diff --git a/deps/v8/src/base/platform/platform-cygwin.cc b/deps/v8/src/base/platform/platform-cygwin.cc
index 0205021d69..fb09179d1b 100644
--- a/deps/v8/src/base/platform/platform-cygwin.cc
+++ b/deps/v8/src/base/platform/platform-cygwin.cc
@@ -56,7 +56,7 @@ double CygwinTimezoneCache::LocalTimeOffset() {
}
void* OS::Allocate(const size_t requested, size_t* allocated,
- OS::MemoryPermission access) {
+ OS::MemoryPermission access, void* hint) {
const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
int prot = GetProtectionFromMemoryPermission(access);
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
@@ -138,14 +138,13 @@ void OS::SignalCodeMovingGC() {
// This causes VirtualMemory::Commit to not always commit the memory region
// specified.
-static void* RandomizedVirtualAlloc(size_t size, int action, int protection) {
+static void* RandomizedVirtualAlloc(size_t size, int action, int protection,
+ void* hint) {
LPVOID base = NULL;
if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
// For exectutable pages try and randomize the allocation address
- for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) {
- base = VirtualAlloc(OS::GetRandomMmapAddr(), size, action, protection);
- }
+ base = VirtualAlloc(hint, size, action, protection);
}
// After three attempts give up and let the OS find an address to use.
@@ -157,17 +156,15 @@ static void* RandomizedVirtualAlloc(size_t size, int action, int protection) {
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+VirtualMemory::VirtualMemory(size_t size, void* hint)
+ : address_(ReserveRegion(size, hint)), size_(size) {}
-VirtualMemory::VirtualMemory(size_t size)
- : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
: address_(NULL), size_(0) {
DCHECK((alignment % OS::AllocateAlignment()) == 0);
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
- void* address = ReserveRegion(request_size);
+ void* address = ReserveRegion(request_size, hint);
if (address == NULL) return;
uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
// Try reducing the size by freeing and then reallocating a specific area.
@@ -180,7 +177,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
DCHECK(base == static_cast<uint8_t*>(address));
} else {
// Resizing failed, just go with a bigger area.
- address = ReserveRegion(request_size);
+ address = ReserveRegion(request_size, hint);
if (address == NULL) return;
}
address_ = address;
@@ -196,12 +193,6 @@ VirtualMemory::~VirtualMemory() {
}
}
-
-bool VirtualMemory::IsReserved() {
- return address_ != NULL;
-}
-
-
void VirtualMemory::Reset() {
address_ = NULL;
size_ = 0;
@@ -218,9 +209,8 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
return UncommitRegion(address, size);
}
-
-void* VirtualMemory::ReserveRegion(size_t size) {
- return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS);
+void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
+ return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS, hint);
}
diff --git a/deps/v8/src/base/platform/platform-freebsd.cc b/deps/v8/src/base/platform/platform-freebsd.cc
index 9ab1601e7a..4f07d571d2 100644
--- a/deps/v8/src/base/platform/platform-freebsd.cc
+++ b/deps/v8/src/base/platform/platform-freebsd.cc
@@ -41,10 +41,10 @@ TimezoneCache* OS::CreateTimezoneCache() {
}
void* OS::Allocate(const size_t requested, size_t* allocated,
- OS::MemoryPermission access) {
+ OS::MemoryPermission access, void* hint) {
const size_t msize = RoundUp(requested, getpagesize());
int prot = GetProtectionFromMemoryPermission(access);
- void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
+ void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
if (mbase == MAP_FAILED) return NULL;
*allocated = msize;
@@ -111,22 +111,16 @@ static const int kMmapFdOffset = 0;
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+VirtualMemory::VirtualMemory(size_t size, void* hint)
+ : address_(ReserveRegion(size, hint)), size_(size) {}
-VirtualMemory::VirtualMemory(size_t size)
- : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
: address_(NULL), size_(0) {
DCHECK((alignment % OS::AllocateAlignment()) == 0);
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
- void* reservation = mmap(OS::GetRandomMmapAddr(),
- request_size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON,
- kMmapFd,
- kMmapFdOffset);
+ void* reservation = mmap(hint, request_size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANON, kMmapFd, kMmapFdOffset);
if (reservation == MAP_FAILED) return;
uint8_t* base = static_cast<uint8_t*>(reservation);
@@ -164,12 +158,6 @@ VirtualMemory::~VirtualMemory() {
}
}
-
-bool VirtualMemory::IsReserved() {
- return address_ != NULL;
-}
-
-
void VirtualMemory::Reset() {
address_ = NULL;
size_ = 0;
@@ -191,13 +179,8 @@ bool VirtualMemory::Guard(void* address) {
return true;
}
-
-void* VirtualMemory::ReserveRegion(size_t size) {
- void* result = mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON,
- kMmapFd,
+void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
+ void* result = mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON, kMmapFd,
kMmapFdOffset);
if (result == MAP_FAILED) return NULL;
diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc
new file mode 100644
index 0000000000..dc703162e3
--- /dev/null
+++ b/deps/v8/src/base/platform/platform-fuchsia.cc
@@ -0,0 +1,98 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <sys/mman.h>
+
+#include "src/base/macros.h"
+#include "src/base/platform/platform-posix-time.h"
+#include "src/base/platform/platform-posix.h"
+#include "src/base/platform/platform.h"
+
+namespace v8 {
+namespace base {
+
+TimezoneCache* OS::CreateTimezoneCache() {
+ return new PosixDefaultTimezoneCache();
+}
+
+void* OS::Allocate(const size_t requested, size_t* allocated,
+ OS::MemoryPermission access, void* hint) {
+ const size_t msize = RoundUp(requested, AllocateAlignment());
+ int prot = GetProtectionFromMemoryPermission(access);
+ void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (mbase == MAP_FAILED) return NULL;
+ *allocated = msize;
+ return mbase;
+}
+
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+ CHECK(false); // TODO(fuchsia): Port, https://crbug.com/731217.
+ return std::vector<SharedLibraryAddress>();
+}
+
+void OS::SignalCodeMovingGC() {
+ CHECK(false); // TODO(fuchsia): Port, https://crbug.com/731217.
+}
+
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) {}
+
+VirtualMemory::VirtualMemory(size_t size, void* hint)
+ : address_(ReserveRegion(size, hint)), size_(size) {
+ CHECK(false); // TODO(fuchsia): Port, https://crbug.com/731217.
+}
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
+ : address_(NULL), size_(0) {}
+
+VirtualMemory::~VirtualMemory() {}
+
+void VirtualMemory::Reset() {}
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+ return false;
+}
+
+bool VirtualMemory::Uncommit(void* address, size_t size) { return false; }
+
+bool VirtualMemory::Guard(void* address) { return false; }
+
+// static
+void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
+ CHECK(false); // TODO(fuchsia): Port, https://crbug.com/731217.
+ return NULL;
+}
+
+// static
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+ CHECK(false); // TODO(fuchsia): Port, https://crbug.com/731217.
+ return false;
+}
+
+// static
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+ CHECK(false); // TODO(fuchsia): Port, https://crbug.com/731217.
+ return false;
+}
+
+// static
+bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
+ void* free_start, size_t free_size) {
+ CHECK(false); // TODO(fuchsia): Port, https://crbug.com/731217.
+ return false;
+}
+
+// static
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+ CHECK(false); // TODO(fuchsia): Port, https://crbug.com/731217.
+ return false;
+}
+
+// static
+bool VirtualMemory::HasLazyCommits() {
+ CHECK(false); // TODO(fuchsia): Port, https://crbug.com/731217.
+ return false;
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-linux.cc b/deps/v8/src/base/platform/platform-linux.cc
index ba161b26c7..8ebb961520 100644
--- a/deps/v8/src/base/platform/platform-linux.cc
+++ b/deps/v8/src/base/platform/platform-linux.cc
@@ -98,11 +98,10 @@ TimezoneCache* OS::CreateTimezoneCache() {
}
void* OS::Allocate(const size_t requested, size_t* allocated,
- OS::MemoryPermission access) {
+ OS::MemoryPermission access, void* hint) {
const size_t msize = RoundUp(requested, AllocateAlignment());
int prot = GetProtectionFromMemoryPermission(access);
- void* addr = OS::GetRandomMmapAddr();
- void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mbase == MAP_FAILED) return NULL;
*allocated = msize;
return mbase;
@@ -197,16 +196,16 @@ static const int kMmapFdOffset = 0;
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) {}
-VirtualMemory::VirtualMemory(size_t size)
- : address_(ReserveRegion(size)), size_(size) {}
+VirtualMemory::VirtualMemory(size_t size, void* hint)
+ : address_(ReserveRegion(size, hint)), size_(size) {}
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
: address_(NULL), size_(0) {
DCHECK((alignment % OS::AllocateAlignment()) == 0);
size_t request_size =
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
void* reservation =
- mmap(OS::GetRandomMmapAddr(), request_size, PROT_NONE,
+ mmap(hint, request_size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, kMmapFd, kMmapFdOffset);
if (reservation == MAP_FAILED) return;
@@ -247,8 +246,6 @@ VirtualMemory::~VirtualMemory() {
}
}
-bool VirtualMemory::IsReserved() { return address_ != NULL; }
-
void VirtualMemory::Reset() {
address_ = NULL;
size_ = 0;
@@ -270,10 +267,10 @@ bool VirtualMemory::Guard(void* address) {
return true;
}
-void* VirtualMemory::ReserveRegion(size_t size) {
+void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
void* result =
- mmap(OS::GetRandomMmapAddr(), size, PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, kMmapFd, kMmapFdOffset);
+ mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+ kMmapFd, kMmapFdOffset);
if (result == MAP_FAILED) return NULL;
diff --git a/deps/v8/src/base/platform/platform-macos.cc b/deps/v8/src/base/platform/platform-macos.cc
index 50ac55d880..1a08d83f43 100644
--- a/deps/v8/src/base/platform/platform-macos.cc
+++ b/deps/v8/src/base/platform/platform-macos.cc
@@ -52,15 +52,11 @@ static const int kMmapFd = VM_MAKE_TAG(255);
static const off_t kMmapFdOffset = 0;
void* OS::Allocate(const size_t requested, size_t* allocated,
- OS::MemoryPermission access) {
+ OS::MemoryPermission access, void* hint) {
const size_t msize = RoundUp(requested, getpagesize());
int prot = GetProtectionFromMemoryPermission(access);
- void* mbase = mmap(OS::GetRandomMmapAddr(),
- msize,
- prot,
- MAP_PRIVATE | MAP_ANON,
- kMmapFd,
- kMmapFdOffset);
+ void* mbase =
+ mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, kMmapFd, kMmapFdOffset);
if (mbase == MAP_FAILED) return NULL;
*allocated = msize;
return mbase;
@@ -103,22 +99,17 @@ TimezoneCache* OS::CreateTimezoneCache() {
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+VirtualMemory::VirtualMemory(size_t size, void* hint)
+ : address_(ReserveRegion(size, hint)), size_(size) {}
-VirtualMemory::VirtualMemory(size_t size)
- : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
: address_(NULL), size_(0) {
DCHECK((alignment % OS::AllocateAlignment()) == 0);
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
- void* reservation = mmap(OS::GetRandomMmapAddr(),
- request_size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
+ void* reservation =
+ mmap(hint, request_size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, kMmapFd, kMmapFdOffset);
if (reservation == MAP_FAILED) return;
uint8_t* base = static_cast<uint8_t*>(reservation);
@@ -156,12 +147,6 @@ VirtualMemory::~VirtualMemory() {
}
}
-
-bool VirtualMemory::IsReserved() {
- return address_ != NULL;
-}
-
-
void VirtualMemory::Reset() {
address_ = NULL;
size_ = 0;
@@ -183,14 +168,10 @@ bool VirtualMemory::Guard(void* address) {
return true;
}
-
-void* VirtualMemory::ReserveRegion(size_t size) {
- void* result = mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
+void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
+ void* result =
+ mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ kMmapFd, kMmapFdOffset);
if (result == MAP_FAILED) return NULL;
diff --git a/deps/v8/src/base/platform/platform-openbsd.cc b/deps/v8/src/base/platform/platform-openbsd.cc
index 0056ad56d4..3c413d7ac2 100644
--- a/deps/v8/src/base/platform/platform-openbsd.cc
+++ b/deps/v8/src/base/platform/platform-openbsd.cc
@@ -39,11 +39,10 @@ TimezoneCache* OS::CreateTimezoneCache() {
}
void* OS::Allocate(const size_t requested, size_t* allocated,
- OS::MemoryPermission access) {
+ OS::MemoryPermission access, void* hint) {
const size_t msize = RoundUp(requested, AllocateAlignment());
int prot = GetProtectionFromMemoryPermission(access);
- void* addr = OS::GetRandomMmapAddr();
- void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
+ void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
if (mbase == MAP_FAILED) return NULL;
*allocated = msize;
return mbase;
@@ -142,22 +141,17 @@ static const int kMmapFdOffset = 0;
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+VirtualMemory::VirtualMemory(size_t size, void* hint)
+ : address_(ReserveRegion(size, hint)), size_(size) {}
-VirtualMemory::VirtualMemory(size_t size)
- : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
: address_(NULL), size_(0) {
DCHECK((alignment % OS::AllocateAlignment()) == 0);
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
- void* reservation = mmap(OS::GetRandomMmapAddr(),
- request_size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
+ void* reservation =
+ mmap(hint, request_size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, kMmapFd, kMmapFdOffset);
if (reservation == MAP_FAILED) return;
uint8_t* base = static_cast<uint8_t*>(reservation);
@@ -195,12 +189,6 @@ VirtualMemory::~VirtualMemory() {
}
}
-
-bool VirtualMemory::IsReserved() {
- return address_ != NULL;
-}
-
-
void VirtualMemory::Reset() {
address_ = NULL;
size_ = 0;
@@ -222,14 +210,10 @@ bool VirtualMemory::Guard(void* address) {
return true;
}
-
-void* VirtualMemory::ReserveRegion(size_t size) {
- void* result = mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
+void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
+ void* result =
+ mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ kMmapFd, kMmapFdOffset);
if (result == MAP_FAILED) return NULL;
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index 621abbe6f1..10833facae 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -57,7 +57,7 @@
#include <sys/prctl.h> // NOLINT, for prctl
#endif
-#ifndef _AIX
+#if !defined(_AIX) && !defined(V8_OS_FUCHSIA)
#include <sys/syscall.h>
#endif
@@ -102,24 +102,11 @@ intptr_t OS::CommitPageSize() {
}
void* OS::Allocate(const size_t requested, size_t* allocated,
- bool is_executable) {
+ bool is_executable, void* hint) {
return OS::Allocate(requested, allocated,
is_executable ? OS::MemoryPermission::kReadWriteExecute
- : OS::MemoryPermission::kReadWrite);
-}
-
-void* OS::AllocateGuarded(const size_t requested) {
- size_t allocated = 0;
- void* mbase =
- OS::Allocate(requested, &allocated, OS::MemoryPermission::kNoAccess);
- if (allocated != requested) {
- OS::Free(mbase, allocated);
- return nullptr;
- }
- if (mbase == nullptr) {
- return nullptr;
- }
- return mbase;
+ : OS::MemoryPermission::kReadWrite,
+ hint);
}
void OS::Free(void* address, const size_t size) {
@@ -362,6 +349,8 @@ int OS::GetCurrentThreadId() {
return static_cast<int>(gettid());
#elif V8_OS_AIX
return static_cast<int>(thread_self());
+#elif V8_OS_FUCHSIA
+ return static_cast<int>(pthread_self());
#elif V8_OS_SOLARIS
return static_cast<int>(pthread_self());
#else
diff --git a/deps/v8/src/base/platform/platform-qnx.cc b/deps/v8/src/base/platform/platform-qnx.cc
index f151bba8bb..0325ca4849 100644
--- a/deps/v8/src/base/platform/platform-qnx.cc
+++ b/deps/v8/src/base/platform/platform-qnx.cc
@@ -90,11 +90,10 @@ TimezoneCache* OS::CreateTimezoneCache() {
}
void* OS::Allocate(const size_t requested, size_t* allocated,
- OS::MemoryPermission access) {
+ OS::MemoryPermission access, void* hint) {
const size_t msize = RoundUp(requested, AllocateAlignment());
int prot = GetProtectionFromMemoryPermission(access);
- void* addr = OS::GetRandomMmapAddr();
- void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mbase == MAP_FAILED) return NULL;
*allocated = msize;
return mbase;
@@ -168,22 +167,17 @@ static const int kMmapFdOffset = 0;
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+VirtualMemory::VirtualMemory(size_t size, void* hint)
+ : address_(ReserveRegion(size, hint)), size_(size) {}
-VirtualMemory::VirtualMemory(size_t size)
- : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
: address_(NULL), size_(0) {
DCHECK((alignment % OS::AllocateAlignment()) == 0);
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
- void* reservation = mmap(OS::GetRandomMmapAddr(),
- request_size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY,
- kMmapFd,
- kMmapFdOffset);
+ void* reservation =
+ mmap(hint, request_size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY, kMmapFd, kMmapFdOffset);
if (reservation == MAP_FAILED) return;
uint8_t* base = static_cast<uint8_t*>(reservation);
@@ -221,12 +215,6 @@ VirtualMemory::~VirtualMemory() {
}
}
-
-bool VirtualMemory::IsReserved() {
- return address_ != NULL;
-}
-
-
void VirtualMemory::Reset() {
address_ = NULL;
size_ = 0;
@@ -248,14 +236,10 @@ bool VirtualMemory::Guard(void* address) {
return true;
}
-
-void* VirtualMemory::ReserveRegion(size_t size) {
- void* result = mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY,
- kMmapFd,
- kMmapFdOffset);
+void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
+ void* result =
+ mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY,
+ kMmapFd, kMmapFdOffset);
if (result == MAP_FAILED) return NULL;
diff --git a/deps/v8/src/base/platform/platform-solaris.cc b/deps/v8/src/base/platform/platform-solaris.cc
index 64498eaf1f..88851d104c 100644
--- a/deps/v8/src/base/platform/platform-solaris.cc
+++ b/deps/v8/src/base/platform/platform-solaris.cc
@@ -59,10 +59,10 @@ double SolarisTimezoneCache::LocalTimeOffset() {
TimezoneCache* OS::CreateTimezoneCache() { return new SolarisTimezoneCache(); }
void* OS::Allocate(const size_t requested, size_t* allocated,
- OS::MemoryPermission access) {
+ OS::MemoryPermission access, void* hint) {
const size_t msize = RoundUp(requested, getpagesize());
int prot = GetProtectionFromMemoryPermission(access);
- void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
+ void* mbase = mmap(hint, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
if (mbase == MAP_FAILED) return NULL;
*allocated = msize;
@@ -86,22 +86,17 @@ static const int kMmapFdOffset = 0;
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+VirtualMemory::VirtualMemory(size_t size, void* hint)
+ : address_(ReserveRegion(size, hint)), size_(size) {}
-VirtualMemory::VirtualMemory(size_t size)
- : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
: address_(NULL), size_(0) {
DCHECK((alignment % OS::AllocateAlignment()) == 0);
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
- void* reservation = mmap(OS::GetRandomMmapAddr(),
- request_size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
+ void* reservation =
+ mmap(hint, request_size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, kMmapFd, kMmapFdOffset);
if (reservation == MAP_FAILED) return;
uint8_t* base = static_cast<uint8_t*>(reservation);
@@ -139,12 +134,6 @@ VirtualMemory::~VirtualMemory() {
}
}
-
-bool VirtualMemory::IsReserved() {
- return address_ != NULL;
-}
-
-
void VirtualMemory::Reset() {
address_ = NULL;
size_ = 0;
@@ -166,14 +155,10 @@ bool VirtualMemory::Guard(void* address) {
return true;
}
-
-void* VirtualMemory::ReserveRegion(size_t size) {
- void* result = mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
+void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
+ void* result =
+ mmap(hint, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+ kMmapFd, kMmapFdOffset);
if (result == MAP_FAILED) return NULL;
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index 7b7ff99d20..61a1ab9ab3 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -37,7 +37,7 @@
#define _TRUNCATE 0
#define STRUNCATE 80
-inline void MemoryBarrier() {
+inline void MemoryFence() {
int barrier = 0;
__asm__ __volatile__("xchgl %%eax,%0 ":"=r" (barrier));
}
@@ -737,8 +737,8 @@ void* OS::GetRandomMmapAddr() {
return reinterpret_cast<void *>(address);
}
-
-static void* RandomizedVirtualAlloc(size_t size, int action, int protection) {
+static void* RandomizedVirtualAlloc(size_t size, int action, int protection,
+ void* hint) {
LPVOID base = NULL;
static BOOL use_aslr = -1;
#ifdef V8_HOST_ARCH_32_BIT
@@ -753,9 +753,7 @@ static void* RandomizedVirtualAlloc(size_t size, int action, int protection) {
if (use_aslr &&
(protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS)) {
// For executable pages try and randomize the allocation address
- for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) {
- base = VirtualAlloc(OS::GetRandomMmapAddr(), size, action, protection);
- }
+ base = VirtualAlloc(hint, size, action, protection);
}
// After three attempts give up and let the OS find an address to use.
@@ -765,14 +763,15 @@ static void* RandomizedVirtualAlloc(size_t size, int action, int protection) {
}
void* OS::Allocate(const size_t requested, size_t* allocated,
- bool is_executable) {
+ bool is_executable, void* hint) {
return OS::Allocate(requested, allocated,
is_executable ? OS::MemoryPermission::kReadWriteExecute
- : OS::MemoryPermission::kReadWrite);
+ : OS::MemoryPermission::kReadWrite,
+ hint);
}
void* OS::Allocate(const size_t requested, size_t* allocated,
- OS::MemoryPermission access) {
+ OS::MemoryPermission access, void* hint) {
// VirtualAlloc rounds allocated size to page size automatically.
size_t msize = RoundUp(requested, static_cast<int>(GetPageSize()));
@@ -793,9 +792,8 @@ void* OS::Allocate(const size_t requested, size_t* allocated,
}
}
- LPVOID mbase = RandomizedVirtualAlloc(msize,
- MEM_COMMIT | MEM_RESERVE,
- prot);
+ LPVOID mbase =
+ RandomizedVirtualAlloc(msize, MEM_COMMIT | MEM_RESERVE, prot, hint);
if (mbase == NULL) return NULL;
@@ -805,10 +803,6 @@ void* OS::Allocate(const size_t requested, size_t* allocated,
return mbase;
}
-void* OS::AllocateGuarded(const size_t requested) {
- return VirtualAlloc(nullptr, requested, MEM_RESERVE, PAGE_NOACCESS);
-}
-
void OS::Free(void* address, const size_t size) {
// TODO(1240712): VirtualFree has a return value which is ignored here.
VirtualFree(address, 0, MEM_RELEASE);
@@ -1214,17 +1208,15 @@ int OS::ActivationFrameAlignment() {
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+VirtualMemory::VirtualMemory(size_t size, void* hint)
+ : address_(ReserveRegion(size, hint)), size_(size) {}
-VirtualMemory::VirtualMemory(size_t size)
- : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
: address_(NULL), size_(0) {
DCHECK((alignment % OS::AllocateAlignment()) == 0);
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
- void* address = ReserveRegion(request_size);
+ void* address = ReserveRegion(request_size, hint);
if (address == NULL) return;
uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
// Try reducing the size by freeing and then reallocating a specific area.
@@ -1237,7 +1229,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
DCHECK(base == static_cast<uint8_t*>(address));
} else {
// Resizing failed, just go with a bigger area.
- address = ReserveRegion(request_size);
+ address = ReserveRegion(request_size, hint);
if (address == NULL) return;
}
address_ = address;
@@ -1253,12 +1245,6 @@ VirtualMemory::~VirtualMemory() {
}
}
-
-bool VirtualMemory::IsReserved() {
- return address_ != NULL;
-}
-
-
void VirtualMemory::Reset() {
address_ = NULL;
size_ = 0;
@@ -1286,9 +1272,8 @@ bool VirtualMemory::Guard(void* address) {
return true;
}
-
-void* VirtualMemory::ReserveRegion(size_t size) {
- return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS);
+void* VirtualMemory::ReserveRegion(size_t size, void* hint) {
+ return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS, hint);
}
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index 55cff6bf64..487968f5d7 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -165,13 +165,12 @@ class V8_BASE_EXPORT OS {
// Allocate/Free memory used by JS heap. Permissions are set according to the
// is_* flags. Returns the address of allocated memory, or NULL if failed.
static void* Allocate(const size_t requested, size_t* allocated,
- MemoryPermission access);
+ MemoryPermission access, void* hint = nullptr);
// Allocate/Free memory used by JS heap. Pages are readable/writable, but
// they are not guaranteed to be executable unless 'executable' is true.
// Returns the address of allocated memory, or NULL if failed.
- static void* Allocate(const size_t requested,
- size_t* allocated,
- bool is_executable);
+ static void* Allocate(const size_t requested, size_t* allocated,
+ bool is_executable, void* hint = nullptr);
static void Free(void* address, const size_t size);
// Allocates a region of memory that is inaccessible. On Windows this reserves
@@ -202,7 +201,7 @@ class V8_BASE_EXPORT OS {
static void Sleep(TimeDelta interval);
// Abort the current process.
- V8_NORETURN static void Abort();
+ [[noreturn]] static void Abort();
// Debug break.
static void DebugBreak();
@@ -297,12 +296,12 @@ class V8_BASE_EXPORT VirtualMemory {
VirtualMemory();
// Reserves virtual memory with size.
- explicit VirtualMemory(size_t size);
+ explicit VirtualMemory(size_t size, void* hint);
// Reserves virtual memory containing an area of the given size that
// is aligned per alignment. This may not be at the position returned
// by address().
- VirtualMemory(size_t size, size_t alignment);
+ VirtualMemory(size_t size, size_t alignment, void* hint);
// Construct a virtual memory by assigning it some already mapped address
// and size.
@@ -313,7 +312,7 @@ class V8_BASE_EXPORT VirtualMemory {
~VirtualMemory();
// Returns whether the memory has been reserved.
- bool IsReserved();
+ bool IsReserved() const { return address_ != nullptr; }
// Initialize or resets an embedded VirtualMemory object.
void Reset();
@@ -322,16 +321,22 @@ class V8_BASE_EXPORT VirtualMemory {
// If the memory was reserved with an alignment, this address is not
// necessarily aligned. The user might need to round it up to a multiple of
// the alignment to get the start of the aligned block.
- void* address() {
+ void* address() const {
DCHECK(IsReserved());
return address_;
}
+ void* end() const {
+ DCHECK(IsReserved());
+ return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(address_) +
+ size_);
+ }
+
// Returns the size of the reserved memory. The returned value is only
// meaningful when IsReserved() returns true.
// If the memory was reserved with an alignment, this size may be larger
// than the requested size.
- size_t size() { return size_; }
+ size_t size() const { return size_; }
// Commits real memory. Returns whether the operation succeeded.
bool Commit(void* address, size_t size, bool is_executable);
@@ -342,21 +347,22 @@ class V8_BASE_EXPORT VirtualMemory {
// Creates a single guard page at the given address.
bool Guard(void* address);
- // Releases the memory after |free_start|.
- void ReleasePartial(void* free_start) {
+ // Releases the memory after |free_start|. Returns the bytes released.
+ size_t ReleasePartial(void* free_start) {
DCHECK(IsReserved());
// Notice: Order is important here. The VirtualMemory object might live
// inside the allocated region.
- size_t size = size_ - (reinterpret_cast<size_t>(free_start) -
- reinterpret_cast<size_t>(address_));
+ const size_t size = size_ - (reinterpret_cast<size_t>(free_start) -
+ reinterpret_cast<size_t>(address_));
CHECK(InVM(free_start, size));
DCHECK_LT(address_, free_start);
DCHECK_LT(free_start, reinterpret_cast<void*>(
reinterpret_cast<size_t>(address_) + size_));
- bool result = ReleasePartialRegion(address_, size_, free_start, size);
+ const bool result = ReleasePartialRegion(address_, size_, free_start, size);
USE(result);
DCHECK(result);
size_ -= size;
+ return size;
}
void Release() {
@@ -381,7 +387,7 @@ class V8_BASE_EXPORT VirtualMemory {
from->Reset();
}
- static void* ReserveRegion(size_t size);
+ static void* ReserveRegion(size_t size, void* hint);
static bool CommitRegion(void* base, size_t size, bool is_executable);
diff --git a/deps/v8/src/base/platform/time.cc b/deps/v8/src/base/platform/time.cc
index 6b483382f0..09e3fd02dd 100644
--- a/deps/v8/src/base/platform/time.cc
+++ b/deps/v8/src/base/platform/time.cc
@@ -63,14 +63,12 @@ V8_INLINE int64_t ClockNow(clockid_t clk_id) {
if (clk_id == CLOCK_THREAD_CPUTIME_ID) {
if (thread_cputime(-1, &tc) != 0) {
UNREACHABLE();
- return 0;
}
}
#endif
struct timespec ts;
if (clock_gettime(clk_id, &ts) != 0) {
UNREACHABLE();
- return 0;
}
v8::base::internal::CheckedNumeric<int64_t> result(ts.tv_sec);
result *= v8::base::Time::kMicrosecondsPerSecond;
@@ -661,7 +659,6 @@ ThreadTicks ThreadTicks::Now() {
return ThreadTicks::GetForThread(::GetCurrentThread());
#else
UNREACHABLE();
- return ThreadTicks();
#endif
}
diff --git a/deps/v8/src/base/safe_conversions.h b/deps/v8/src/base/safe_conversions.h
index 0a1bd69646..c16fa36682 100644
--- a/deps/v8/src/base/safe_conversions.h
+++ b/deps/v8/src/base/safe_conversions.h
@@ -58,7 +58,6 @@ inline Dst saturated_cast(Src value) {
}
UNREACHABLE();
- return static_cast<Dst>(value);
}
} // namespace base
diff --git a/deps/v8/src/base/template-utils.h b/deps/v8/src/base/template-utils.h
new file mode 100644
index 0000000000..4bb6a325a2
--- /dev/null
+++ b/deps/v8/src/base/template-utils.h
@@ -0,0 +1,56 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <array>
+#include <memory>
+
+namespace v8 {
+namespace base {
+
+namespace detail {
+
+// make_array_helper statically iteratively creates the index list 0 .. Size-1.
+// A specialization for the base case (first index is 0) finally constructs the
+// array.
+// TODO(clemensh): Use std::index_sequence once we have C++14 support.
+template <class Function, std::size_t... Indexes>
+struct make_array_helper;
+
+template <class Function, std::size_t... Indexes>
+struct make_array_helper<Function, 0, Indexes...> {
+ constexpr static auto make_array(Function f)
+ -> std::array<decltype(f(std::size_t{0})), sizeof...(Indexes) + 1> {
+ return {{f(0), f(Indexes)...}};
+ }
+};
+
+template <class Function, std::size_t FirstIndex, std::size_t... Indexes>
+struct make_array_helper<Function, FirstIndex, Indexes...>
+ : make_array_helper<Function, FirstIndex - 1, FirstIndex, Indexes...> {};
+
+} // namespace detail
+
+// base::make_array: Create an array of fixed length, initialized by a function.
+// The content of the array is created by calling the function with 0 .. Size-1.
+// Example usage to create the array {0, 2, 4}:
+// std::array<int, 3> arr = base::make_array<3>(
+// [](std::size_t i) { return static_cast<int>(2 * i); });
+// The resulting array will be constexpr if the passed function is constexpr.
+template <std::size_t Size, class Function>
+constexpr auto make_array(Function f)
+ -> std::array<decltype(f(std::size_t{0})), Size> {
+ static_assert(Size > 0, "Can only create non-empty arrays");
+ return detail::make_array_helper<Function, Size - 1>::make_array(f);
+}
+
+// base::make_unique<T>: Construct an object of type T and wrap it in a
+// std::unique_ptr.
+// Replacement for C++14's std::make_unique.
+template <typename T, typename... Args>
+std::unique_ptr<T> make_unique(Args&&... args) {
+ return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/utils/random-number-generator.cc b/deps/v8/src/base/utils/random-number-generator.cc
index 3a6f2c63cf..842b36a1a0 100644
--- a/deps/v8/src/base/utils/random-number-generator.cc
+++ b/deps/v8/src/base/utils/random-number-generator.cc
@@ -9,6 +9,7 @@
#include <new>
+#include "src/base/bits.h"
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/time.h"
@@ -82,7 +83,7 @@ int RandomNumberGenerator::NextInt(int max) {
DCHECK_LT(0, max);
// Fast path if max is a power of 2.
- if (IS_POWER_OF_TWO(max)) {
+ if (bits::IsPowerOfTwo(max)) {
return static_cast<int>((max * static_cast<int64_t>(Next(31))) >> 31);
}
diff --git a/deps/v8/src/bignum.cc b/deps/v8/src/bignum.cc
index e2a9c4e557..9a4af3f497 100644
--- a/deps/v8/src/bignum.cc
+++ b/deps/v8/src/bignum.cc
@@ -105,7 +105,6 @@ static int HexCharValue(char c) {
if ('a' <= c && c <= 'f') return 10 + c - 'a';
if ('A' <= c && c <= 'F') return 10 + c - 'A';
UNREACHABLE();
- return 0; // To make compiler happy.
}
diff --git a/deps/v8/src/bit-vector.cc b/deps/v8/src/bit-vector.cc
index e6aec7efb1..1da110b342 100644
--- a/deps/v8/src/bit-vector.cc
+++ b/deps/v8/src/bit-vector.cc
@@ -32,7 +32,8 @@ void BitVector::Iterator::Advance() {
while (val == 0) {
current_index_++;
if (Done()) return;
- val = target_->data_[current_index_];
+ DCHECK(!target_->is_inline());
+ val = target_->data_.ptr_[current_index_];
current_ = current_index_ << kDataBitShift;
}
val = SkipZeroBytes(val);
@@ -42,16 +43,15 @@ void BitVector::Iterator::Advance() {
int BitVector::Count() const {
- int count = 0;
- for (int i = 0; i < data_length_; i++) {
- uintptr_t data = data_[i];
- if (sizeof(data) == 8) {
- count += base::bits::CountPopulation64(data);
- } else {
- count += base::bits::CountPopulation32(static_cast<uint32_t>(data));
+ if (data_length_ == 0) {
+ return base::bits::CountPopulation(data_.inline_);
+ } else {
+ int count = 0;
+ for (int i = 0; i < data_length_; i++) {
+ count += base::bits::CountPopulation(data_.ptr_[i]);
}
+ return count;
}
- return count;
}
} // namespace internal
diff --git a/deps/v8/src/bit-vector.h b/deps/v8/src/bit-vector.h
index fd61489c2a..71d69b20c2 100644
--- a/deps/v8/src/bit-vector.h
+++ b/deps/v8/src/bit-vector.h
@@ -13,15 +13,22 @@ namespace internal {
class BitVector : public ZoneObject {
public:
+ union DataStorage {
+ uintptr_t* ptr_; // valid if data_length_ > 1
+ uintptr_t inline_; // valid if data_length_ == 1
+
+ DataStorage(uintptr_t value) : inline_(value) {}
+ };
+
// Iterator for the elements of this BitVector.
class Iterator BASE_EMBEDDED {
public:
explicit Iterator(BitVector* target)
: target_(target),
current_index_(0),
- current_value_(target->data_[0]),
+ current_value_(target->is_inline() ? target->data_.inline_
+ : target->data_.ptr_[0]),
current_(-1) {
- DCHECK(target->data_length_ > 0);
Advance();
}
~Iterator() {}
@@ -58,119 +65,205 @@ class BitVector : public ZoneObject {
friend class BitVector;
};
+ static const int kDataLengthForInline = 1;
static const int kDataBits = kPointerSize * 8;
static const int kDataBitShift = kPointerSize == 8 ? 6 : 5;
static const uintptr_t kOne = 1; // This saves some static_casts.
+ BitVector() : length_(0), data_length_(kDataLengthForInline), data_(0) {}
+
BitVector(int length, Zone* zone)
- : length_(length),
- data_length_(SizeFor(length)),
- data_(zone->NewArray<uintptr_t>(data_length_)) {
+ : length_(length), data_length_(SizeFor(length)), data_(0) {
DCHECK_LE(0, length);
- Clear();
+ if (!is_inline()) {
+ data_.ptr_ = zone->NewArray<uintptr_t>(data_length_);
+ Clear();
+ }
+ // Otherwise, clearing is implicit
}
BitVector(const BitVector& other, Zone* zone)
- : length_(other.length()),
- data_length_(SizeFor(length_)),
- data_(zone->NewArray<uintptr_t>(data_length_)) {
- CopyFrom(other);
+ : length_(other.length_),
+ data_length_(other.data_length_),
+ data_(other.data_.inline_) {
+ if (!is_inline()) {
+ data_.ptr_ = zone->NewArray<uintptr_t>(data_length_);
+ for (int i = 0; i < other.data_length_; i++) {
+ data_.ptr_[i] = other.data_.ptr_[i];
+ }
+ }
}
static int SizeFor(int length) {
- if (length == 0) return 1;
- return 1 + ((length - 1) / kDataBits);
+ if (length <= kDataBits) {
+ return kDataLengthForInline;
+ }
+
+ int data_length = 1 + ((length - 1) / kDataBits);
+ DCHECK_GT(data_length, kDataLengthForInline);
+ return data_length;
}
void CopyFrom(const BitVector& other) {
- DCHECK(other.length() <= length());
- for (int i = 0; i < other.data_length_; i++) {
- data_[i] = other.data_[i];
- }
- for (int i = other.data_length_; i < data_length_; i++) {
- data_[i] = 0;
+ DCHECK_LE(other.length(), length());
+ CopyFrom(other.data_, other.data_length_);
+ }
+
+ void Resize(int new_length, Zone* zone) {
+ DCHECK_GT(new_length, length());
+ int new_data_length = SizeFor(new_length);
+ if (new_data_length > data_length_) {
+ DataStorage old_data = data_;
+ int old_data_length = data_length_;
+
+ // Make sure the new data length is large enough to need allocation.
+ DCHECK_GT(new_data_length, kDataLengthForInline);
+ data_.ptr_ = zone->NewArray<uintptr_t>(new_data_length);
+ data_length_ = new_data_length;
+ CopyFrom(old_data, old_data_length);
}
+ length_ = new_length;
}
bool Contains(int i) const {
DCHECK(i >= 0 && i < length());
- uintptr_t block = data_[i / kDataBits];
+ uintptr_t block = is_inline() ? data_.inline_ : data_.ptr_[i / kDataBits];
return (block & (kOne << (i % kDataBits))) != 0;
}
void Add(int i) {
DCHECK(i >= 0 && i < length());
- data_[i / kDataBits] |= (kOne << (i % kDataBits));
+ if (is_inline()) {
+ data_.inline_ |= (kOne << i);
+ } else {
+ data_.ptr_[i / kDataBits] |= (kOne << (i % kDataBits));
+ }
}
- void AddAll() { memset(data_, -1, sizeof(uintptr_t) * data_length_); }
+ void AddAll() {
+ // TODO(leszeks): This sets bits outside of the length of this bit-vector,
+ // which is observable if we resize it or copy from it. If this is a
+ // problem, we should clear the high bits either on add, or on resize/copy.
+ if (is_inline()) {
+ data_.inline_ = -1;
+ } else {
+ memset(data_.ptr_, -1, sizeof(uintptr_t) * data_length_);
+ }
+ }
void Remove(int i) {
DCHECK(i >= 0 && i < length());
- data_[i / kDataBits] &= ~(kOne << (i % kDataBits));
+ if (is_inline()) {
+ data_.inline_ &= ~(kOne << i);
+ } else {
+ data_.ptr_[i / kDataBits] &= ~(kOne << (i % kDataBits));
+ }
}
void Union(const BitVector& other) {
DCHECK(other.length() == length());
- for (int i = 0; i < data_length_; i++) {
- data_[i] |= other.data_[i];
+ if (is_inline()) {
+ DCHECK(other.is_inline());
+ data_.inline_ |= other.data_.inline_;
+ } else {
+ for (int i = 0; i < data_length_; i++) {
+ data_.ptr_[i] |= other.data_.ptr_[i];
+ }
}
}
bool UnionIsChanged(const BitVector& other) {
DCHECK(other.length() == length());
- bool changed = false;
- for (int i = 0; i < data_length_; i++) {
- uintptr_t old_data = data_[i];
- data_[i] |= other.data_[i];
- if (data_[i] != old_data) changed = true;
+ if (is_inline()) {
+ DCHECK(other.is_inline());
+ uintptr_t old_data = data_.inline_;
+ data_.inline_ |= other.data_.inline_;
+ return data_.inline_ != old_data;
+ } else {
+ bool changed = false;
+ for (int i = 0; i < data_length_; i++) {
+ uintptr_t old_data = data_.ptr_[i];
+ data_.ptr_[i] |= other.data_.ptr_[i];
+ if (data_.ptr_[i] != old_data) changed = true;
+ }
+ return changed;
}
- return changed;
}
void Intersect(const BitVector& other) {
DCHECK(other.length() == length());
- for (int i = 0; i < data_length_; i++) {
- data_[i] &= other.data_[i];
+ if (is_inline()) {
+ DCHECK(other.is_inline());
+ data_.inline_ &= other.data_.inline_;
+ } else {
+ for (int i = 0; i < data_length_; i++) {
+ data_.ptr_[i] &= other.data_.ptr_[i];
+ }
}
}
bool IntersectIsChanged(const BitVector& other) {
DCHECK(other.length() == length());
- bool changed = false;
- for (int i = 0; i < data_length_; i++) {
- uintptr_t old_data = data_[i];
- data_[i] &= other.data_[i];
- if (data_[i] != old_data) changed = true;
+ if (is_inline()) {
+ DCHECK(other.is_inline());
+ uintptr_t old_data = data_.inline_;
+ data_.inline_ &= other.data_.inline_;
+ return data_.inline_ != old_data;
+ } else {
+ bool changed = false;
+ for (int i = 0; i < data_length_; i++) {
+ uintptr_t old_data = data_.ptr_[i];
+ data_.ptr_[i] &= other.data_.ptr_[i];
+ if (data_.ptr_[i] != old_data) changed = true;
+ }
+ return changed;
}
- return changed;
}
void Subtract(const BitVector& other) {
DCHECK(other.length() == length());
- for (int i = 0; i < data_length_; i++) {
- data_[i] &= ~other.data_[i];
+ if (is_inline()) {
+ DCHECK(other.is_inline());
+ data_.inline_ &= ~other.data_.inline_;
+ } else {
+ for (int i = 0; i < data_length_; i++) {
+ data_.ptr_[i] &= ~other.data_.ptr_[i];
+ }
}
}
void Clear() {
- for (int i = 0; i < data_length_; i++) {
- data_[i] = 0;
+ if (is_inline()) {
+ data_.inline_ = 0;
+ } else {
+ for (int i = 0; i < data_length_; i++) {
+ data_.ptr_[i] = 0;
+ }
}
}
bool IsEmpty() const {
- for (int i = 0; i < data_length_; i++) {
- if (data_[i] != 0) return false;
+ if (is_inline()) {
+ return data_.inline_ == 0;
+ } else {
+ for (int i = 0; i < data_length_; i++) {
+ if (data_.ptr_[i] != 0) return false;
+ }
+ return true;
}
- return true;
}
bool Equals(const BitVector& other) const {
- for (int i = 0; i < data_length_; i++) {
- if (data_[i] != other.data_[i]) return false;
+ DCHECK(other.length() == length());
+ if (is_inline()) {
+ DCHECK(other.is_inline());
+ return data_.inline_ == other.data_.inline_;
+ } else {
+ for (int i = 0; i < data_length_; i++) {
+ if (data_.ptr_[i] != other.data_.ptr_[i]) return false;
+ }
+ return true;
}
- return true;
}
int Count() const;
@@ -182,9 +275,32 @@ class BitVector : public ZoneObject {
#endif
private:
- const int length_;
- const int data_length_;
- uintptr_t* const data_;
+ int length_;
+ int data_length_;
+ DataStorage data_;
+
+ bool is_inline() const { return data_length_ == kDataLengthForInline; }
+
+ void CopyFrom(DataStorage other_data, int other_data_length) {
+ DCHECK_LE(other_data_length, data_length_);
+
+ if (is_inline()) {
+ DCHECK_EQ(other_data_length, kDataLengthForInline);
+ data_.inline_ = other_data.inline_;
+ } else if (other_data_length == kDataLengthForInline) {
+ data_.ptr_[0] = other_data.inline_;
+ for (int i = 1; i < data_length_; i++) {
+ data_.ptr_[i] = 0;
+ }
+ } else {
+ for (int i = 0; i < other_data_length; i++) {
+ data_.ptr_[i] = other_data.ptr_[i];
+ }
+ for (int i = other_data_length; i < data_length_; i++) {
+ data_.ptr_[i] = 0;
+ }
+ }
+ }
DISALLOW_COPY_AND_ASSIGN(BitVector);
};
@@ -195,8 +311,8 @@ class GrowableBitVector BASE_EMBEDDED {
class Iterator BASE_EMBEDDED {
public:
Iterator(const GrowableBitVector* target, Zone* zone)
- : it_(target->bits_ == NULL ? new (zone) BitVector(1, zone)
- : target->bits_) {}
+ : it_(target->bits_ == nullptr ? new (zone) BitVector(1, zone)
+ : target->bits_) {}
bool Done() const { return it_.Done(); }
void Advance() { it_.Advance(); }
int Current() const { return it_.Current(); }
@@ -205,7 +321,7 @@ class GrowableBitVector BASE_EMBEDDED {
BitVector::Iterator it_;
};
- GrowableBitVector() : bits_(NULL) {}
+ GrowableBitVector() : bits_(nullptr) {}
GrowableBitVector(int length, Zone* zone)
: bits_(new (zone) BitVector(length, zone)) {}
@@ -226,23 +342,26 @@ class GrowableBitVector BASE_EMBEDDED {
}
void Clear() {
- if (bits_ != NULL) bits_->Clear();
+ if (bits_ != nullptr) bits_->Clear();
}
private:
static const int kInitialLength = 1024;
bool InBitsRange(int value) const {
- return bits_ != NULL && bits_->length() > value;
+ return bits_ != nullptr && bits_->length() > value;
}
void EnsureCapacity(int value, Zone* zone) {
if (InBitsRange(value)) return;
- int new_length = bits_ == NULL ? kInitialLength : bits_->length();
+ int new_length = bits_ == nullptr ? kInitialLength : bits_->length();
while (new_length <= value) new_length *= 2;
- BitVector* new_bits = new (zone) BitVector(new_length, zone);
- if (bits_ != NULL) new_bits->CopyFrom(*bits_);
- bits_ = new_bits;
+
+ if (bits_ == nullptr) {
+ bits_ = new (zone) BitVector(new_length, zone);
+ } else {
+ bits_->Resize(new_length, zone);
+ }
}
BitVector* bits_;
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 2652ab028e..a864789a03 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -112,6 +112,7 @@ class Genesis BASE_EMBEDDED {
Isolate* isolate() const { return isolate_; }
Factory* factory() const { return isolate_->factory(); }
+ Builtins* builtins() const { return isolate_->builtins(); }
Heap* heap() const { return isolate_->heap(); }
Handle<Context> result() { return result_; }
@@ -125,12 +126,13 @@ class Genesis BASE_EMBEDDED {
void CreateRoots();
// Creates the empty function. Used for creating a context from scratch.
Handle<JSFunction> CreateEmptyFunction(Isolate* isolate);
- // Creates the ThrowTypeError function. ECMA 5th Ed. 13.2.3
- Handle<JSFunction> GetRestrictedFunctionPropertiesThrower();
- Handle<JSFunction> GetStrictArgumentsPoisonFunction();
- Handle<JSFunction> GetThrowTypeErrorIntrinsic(Builtins::Name builtin_name);
+ // Returns the %ThrowTypeError% intrinsic function.
+ // See ES#sec-%throwtypeerror% for details.
+ Handle<JSFunction> GetThrowTypeErrorIntrinsic();
+ void CreateSloppyModeFunctionMaps(Handle<JSFunction> empty);
void CreateStrictModeFunctionMaps(Handle<JSFunction> empty);
+ void CreateObjectFunction(Handle<JSFunction> empty);
void CreateIteratorMaps(Handle<JSFunction> empty);
void CreateAsyncIteratorMaps(Handle<JSFunction> empty);
void CreateAsyncFunctionMaps(Handle<JSFunction> empty);
@@ -176,14 +178,10 @@ class Genesis BASE_EMBEDDED {
HARMONY_SHIPPING(DECLARE_FEATURE_INITIALIZATION)
#undef DECLARE_FEATURE_INITIALIZATION
- void InstallOneBuiltinFunction(Handle<Object> prototype, const char* method,
- Builtins::Name name);
-
- Handle<JSFunction> InstallArrayBuffer(Handle<JSObject> target,
- const char* name,
- Builtins::Name call_byteLength,
- BuiltinFunctionId byteLength_id,
- Builtins::Name call_slice);
+ Handle<JSFunction> CreateArrayBuffer(Handle<String> name,
+ Builtins::Name call_byteLength,
+ BuiltinFunctionId byteLength_id,
+ Builtins::Name call_slice);
Handle<JSFunction> InstallInternalArray(Handle<JSObject> target,
const char* name,
ElementsKind elements_kind);
@@ -241,11 +239,6 @@ class Genesis BASE_EMBEDDED {
void TransferNamedProperties(Handle<JSObject> from, Handle<JSObject> to);
void TransferIndexedProperties(Handle<JSObject> from, Handle<JSObject> to);
- void MakeFunctionInstancePrototypeWritable();
-
- void SetStrictFunctionInstanceDescriptor(Handle<Map> map,
- FunctionMode function_mode);
-
static bool CallUtilsFunction(Isolate* isolate, const char* name);
static bool CompileExtension(Isolate* isolate, v8::Extension* extension);
@@ -255,15 +248,12 @@ class Genesis BASE_EMBEDDED {
Handle<Context> native_context_;
Handle<JSGlobalProxy> global_proxy_;
- // Function maps. Function maps are created initially with a read only
- // prototype for the processing of JS builtins. Later the function maps are
- // replaced in order to make prototype writable. These are the final, writable
- // prototype, maps.
- Handle<Map> sloppy_function_map_writable_prototype_;
- Handle<Map> strict_function_map_writable_prototype_;
- Handle<Map> class_function_map_;
- Handle<JSFunction> strict_poison_function_;
- Handle<JSFunction> restricted_function_properties_thrower_;
+ // Temporary function maps needed only during bootstrapping.
+ Handle<Map> strict_function_with_home_object_map_;
+ Handle<Map> strict_function_with_name_and_home_object_map_;
+
+ // %ThrowTypeError%. See ES#sec-%throwtypeerror% for details.
+ Handle<JSFunction> restricted_properties_thrower_;
BootstrapperActive active_;
friend class Bootstrapper;
@@ -336,46 +326,41 @@ void InstallFunction(Handle<JSObject> target, Handle<JSFunction> function,
Handle<JSFunction> CreateFunction(Isolate* isolate, Handle<String> name,
InstanceType type, int instance_size,
- MaybeHandle<JSObject> maybe_prototype,
- Builtins::Name call,
- bool strict_function_map = false) {
+ MaybeHandle<Object> maybe_prototype,
+ Builtins::Name call) {
Factory* factory = isolate->factory();
Handle<Code> call_code(isolate->builtins()->builtin(call));
- Handle<JSObject> prototype;
+ Handle<Object> prototype;
Handle<JSFunction> result =
maybe_prototype.ToHandle(&prototype)
? factory->NewFunction(name, call_code, prototype, type,
- instance_size, strict_function_map)
- : factory->NewFunctionWithoutPrototype(name, call_code,
- strict_function_map);
+ instance_size, STRICT, IMMUTABLE)
+ : factory->NewFunctionWithoutPrototype(name, call_code, STRICT);
result->shared()->set_native(true);
return result;
}
Handle<JSFunction> InstallFunction(Handle<JSObject> target, Handle<Name> name,
InstanceType type, int instance_size,
- MaybeHandle<JSObject> maybe_prototype,
+ MaybeHandle<Object> maybe_prototype,
Builtins::Name call,
- PropertyAttributes attributes,
- bool strict_function_map = false) {
+ PropertyAttributes attributes) {
Handle<String> name_string = Name::ToFunctionName(name).ToHandleChecked();
Handle<JSFunction> function =
CreateFunction(target->GetIsolate(), name_string, type, instance_size,
- maybe_prototype, call, strict_function_map);
+ maybe_prototype, call);
InstallFunction(target, name, function, name_string, attributes);
return function;
}
Handle<JSFunction> InstallFunction(Handle<JSObject> target, const char* name,
InstanceType type, int instance_size,
- MaybeHandle<JSObject> maybe_prototype,
- Builtins::Name call,
- bool strict_function_map = false) {
+ MaybeHandle<Object> maybe_prototype,
+ Builtins::Name call) {
Factory* const factory = target->GetIsolate()->factory();
PropertyAttributes attributes = DONT_ENUM;
return InstallFunction(target, factory->InternalizeUtf8String(name), type,
- instance_size, maybe_prototype, call, attributes,
- strict_function_map);
+ instance_size, maybe_prototype, call, attributes);
}
Handle<JSFunction> SimpleCreateFunction(Isolate* isolate, Handle<String> name,
@@ -393,51 +378,55 @@ Handle<JSFunction> SimpleCreateFunction(Isolate* isolate, Handle<String> name,
return fun;
}
-Handle<JSFunction> InstallArrayBuiltinFunction(Handle<JSObject> base,
- const char* name,
- Builtins::Name call) {
- Isolate* isolate = base->GetIsolate();
- Handle<String> str_name = isolate->factory()->InternalizeUtf8String(name);
+Handle<JSFunction> SimpleInstallFunction(
+ Handle<JSObject> base, Handle<Name> property_name,
+ Handle<String> function_name, Builtins::Name call, int len, bool adapt,
+ PropertyAttributes attrs = DONT_ENUM,
+ BuiltinFunctionId id = kInvalidBuiltinFunctionId) {
Handle<JSFunction> fun =
- CreateFunction(isolate, str_name, JS_OBJECT_TYPE, JSObject::kHeaderSize,
- MaybeHandle<JSObject>(), call, true);
- fun->shared()->set_internal_formal_parameter_count(
- Builtins::GetBuiltinParameterCount(call));
-
- // Set the length to 1 to satisfy ECMA-262.
- fun->shared()->set_length(1);
- fun->shared()->set_language_mode(STRICT);
- InstallFunction(base, fun, str_name);
+ SimpleCreateFunction(base->GetIsolate(), function_name, call, len, adapt);
+ if (id != kInvalidBuiltinFunctionId) {
+ fun->shared()->set_builtin_function_id(id);
+ }
+ InstallFunction(base, fun, property_name, attrs);
return fun;
}
-Handle<JSFunction> SimpleInstallFunction(Handle<JSObject> base,
- Handle<String> name,
- Builtins::Name call, int len,
- bool adapt,
- PropertyAttributes attrs = DONT_ENUM) {
- Handle<JSFunction> fun =
- SimpleCreateFunction(base->GetIsolate(), name, call, len, adapt);
- InstallFunction(base, fun, name, attrs);
- return fun;
+Handle<JSFunction> SimpleInstallFunction(
+ Handle<JSObject> base, Handle<String> name, Builtins::Name call, int len,
+ bool adapt, PropertyAttributes attrs = DONT_ENUM,
+ BuiltinFunctionId id = kInvalidBuiltinFunctionId) {
+ return SimpleInstallFunction(base, name, name, call, len, adapt, attrs, id);
}
-Handle<JSFunction> SimpleInstallFunction(Handle<JSObject> base,
- const char* name, Builtins::Name call,
- int len, bool adapt,
- PropertyAttributes attrs = DONT_ENUM) {
+Handle<JSFunction> SimpleInstallFunction(
+ Handle<JSObject> base, Handle<Name> property_name,
+ const char* function_name, Builtins::Name call, int len, bool adapt,
+ PropertyAttributes attrs = DONT_ENUM,
+ BuiltinFunctionId id = kInvalidBuiltinFunctionId) {
Factory* const factory = base->GetIsolate()->factory();
+ // Function name does not have to be internalized.
+ return SimpleInstallFunction(
+ base, property_name, factory->NewStringFromAsciiChecked(function_name),
+ call, len, adapt, attrs, id);
+}
+
+Handle<JSFunction> SimpleInstallFunction(
+ Handle<JSObject> base, const char* name, Builtins::Name call, int len,
+ bool adapt, PropertyAttributes attrs = DONT_ENUM,
+ BuiltinFunctionId id = kInvalidBuiltinFunctionId) {
+ Factory* const factory = base->GetIsolate()->factory();
+ // Although function name does not have to be internalized the property name
+ // will be internalized during property addition anyway, so do it here now.
return SimpleInstallFunction(base, factory->InternalizeUtf8String(name), call,
- len, adapt, attrs);
+ len, adapt, attrs, id);
}
Handle<JSFunction> SimpleInstallFunction(Handle<JSObject> base,
const char* name, Builtins::Name call,
int len, bool adapt,
BuiltinFunctionId id) {
- Handle<JSFunction> fun = SimpleInstallFunction(base, name, call, len, adapt);
- fun->shared()->set_builtin_function_id(id);
- return fun;
+ return SimpleInstallFunction(base, name, call, len, adapt, DONT_ENUM, id);
}
void SimpleInstallGetterSetter(Handle<JSObject> base, Handle<String> name,
@@ -514,89 +503,20 @@ void InstallSpeciesGetter(Handle<JSFunction> constructor) {
} // namespace
Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
- // Allocate the map for function instances. Maps are allocated first and their
- // prototypes patched later, once empty function is created.
-
- // Functions with this map will not have a 'prototype' property, and
- // can not be used as constructors.
- Handle<Map> function_without_prototype_map =
- factory()->CreateSloppyFunctionMap(FUNCTION_WITHOUT_PROTOTYPE);
- native_context()->set_sloppy_function_without_prototype_map(
- *function_without_prototype_map);
-
- // Allocate the function map. This map is temporary, used only for processing
- // of builtins.
- // Later the map is replaced with writable prototype map, allocated below.
- Handle<Map> function_map =
- factory()->CreateSloppyFunctionMap(FUNCTION_WITH_READONLY_PROTOTYPE);
- native_context()->set_sloppy_function_map(*function_map);
- native_context()->set_sloppy_function_with_readonly_prototype_map(
- *function_map);
-
- // The final map for functions. Writeable prototype.
- // This map is installed in MakeFunctionInstancePrototypeWritable.
- sloppy_function_map_writable_prototype_ =
- factory()->CreateSloppyFunctionMap(FUNCTION_WITH_WRITEABLE_PROTOTYPE);
Factory* factory = isolate->factory();
- Handle<String> object_name = factory->Object_string();
-
- Handle<JSObject> object_function_prototype;
-
- { // --- O b j e c t ---
- Handle<JSFunction> object_fun = factory->NewFunction(object_name);
- int unused = JSObject::kInitialGlobalObjectUnusedPropertiesCount;
- int instance_size = JSObject::kHeaderSize + kPointerSize * unused;
- Handle<Map> object_function_map =
- factory->NewMap(JS_OBJECT_TYPE, instance_size);
- object_function_map->SetInObjectProperties(unused);
- JSFunction::SetInitialMap(object_fun, object_function_map,
- isolate->factory()->null_value());
- object_function_map->set_unused_property_fields(unused);
-
- native_context()->set_object_function(*object_fun);
-
- // Allocate a new prototype for the object function.
- object_function_prototype =
- factory->NewJSObject(isolate->object_function(), TENURED);
- Handle<Map> map = Map::Copy(handle(object_function_prototype->map()),
- "EmptyObjectPrototype");
- map->set_is_prototype_map(true);
- // Ban re-setting Object.prototype.__proto__ to prevent Proxy security bug
- map->set_immutable_proto(true);
- object_function_prototype->set_map(*map);
-
- native_context()->set_initial_object_prototype(*object_function_prototype);
- JSFunction::SetPrototype(object_fun, object_function_prototype);
-
- {
- // Set up slow map for Object.create(null) instances without in-object
- // properties.
- Handle<Map> map(object_fun->initial_map(), isolate);
- map = Map::CopyInitialMapNormalized(map);
- Map::SetPrototype(map, isolate->factory()->null_value());
- native_context()->set_slow_object_with_null_prototype_map(*map);
-
- // Set up slow map for literals with too many properties.
- map = Map::Copy(map, "slow_object_with_object_prototype_map");
- Map::SetPrototype(map, object_function_prototype);
- native_context()->set_slow_object_with_object_prototype_map(*map);
- }
- }
+ // Allocate the function map first and then patch the prototype later.
+ Handle<Map> empty_function_map = factory->CreateSloppyFunctionMap(
+ FUNCTION_WITHOUT_PROTOTYPE, MaybeHandle<JSFunction>());
+ empty_function_map->set_is_prototype_map(true);
+ DCHECK(!empty_function_map->is_dictionary_map());
- // Allocate the empty function as the prototype for function - ES6 19.2.3
+ // Allocate the empty function as the prototype for function according to
+ // ES#sec-properties-of-the-function-prototype-object
Handle<Code> code(isolate->builtins()->EmptyFunction());
Handle<JSFunction> empty_function =
- factory->NewFunctionWithoutPrototype(factory->empty_string(), code);
-
- // Allocate the function map first and then patch the prototype later
- Handle<Map> empty_function_map =
- factory->CreateSloppyFunctionMap(FUNCTION_WITHOUT_PROTOTYPE);
- DCHECK(!empty_function_map->is_dictionary_map());
- Map::SetPrototype(empty_function_map, object_function_prototype);
- empty_function_map->set_is_prototype_map(true);
-
- empty_function->set_map(*empty_function_map);
+ factory->NewFunction(empty_function_map, factory->empty_string(), code);
+ empty_function->shared()->set_language_mode(STRICT);
// --- E m p t y ---
Handle<String> source = factory->NewStringFromStaticChars("() {}");
@@ -610,27 +530,46 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
empty_function->shared()->DontAdaptArguments();
SharedFunctionInfo::SetScript(handle(empty_function->shared()), script);
- // Set prototypes for the function maps.
- Handle<Map> sloppy_function_map(native_context()->sloppy_function_map(),
- isolate);
- Handle<Map> sloppy_function_without_prototype_map(
- native_context()->sloppy_function_without_prototype_map(), isolate);
- Map::SetPrototype(sloppy_function_map, empty_function);
- Map::SetPrototype(sloppy_function_without_prototype_map, empty_function);
- Map::SetPrototype(sloppy_function_map_writable_prototype_, empty_function);
-
return empty_function;
}
+void Genesis::CreateSloppyModeFunctionMaps(Handle<JSFunction> empty) {
+ Factory* factory = isolate_->factory();
+ Handle<Map> map;
-// Creates the %ThrowTypeError% function.
-Handle<JSFunction> Genesis::GetThrowTypeErrorIntrinsic(
- Builtins::Name builtin_name) {
- Handle<String> name =
- factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("ThrowTypeError"));
- Handle<Code> code(isolate()->builtins()->builtin(builtin_name));
+ //
+ // Allocate maps for sloppy functions without prototype.
+ //
+ map = factory->CreateSloppyFunctionMap(FUNCTION_WITHOUT_PROTOTYPE, empty);
+ native_context()->set_sloppy_function_without_prototype_map(*map);
+
+ //
+ // Allocate maps for sloppy functions with readonly prototype.
+ //
+ map =
+ factory->CreateSloppyFunctionMap(FUNCTION_WITH_READONLY_PROTOTYPE, empty);
+ native_context()->set_sloppy_function_with_readonly_prototype_map(*map);
+
+ //
+ // Allocate maps for sloppy functions with writable prototype.
+ //
+ map = factory->CreateSloppyFunctionMap(FUNCTION_WITH_WRITEABLE_PROTOTYPE,
+ empty);
+ native_context()->set_sloppy_function_map(*map);
+
+ map = factory->CreateSloppyFunctionMap(
+ FUNCTION_WITH_NAME_AND_WRITEABLE_PROTOTYPE, empty);
+ native_context()->set_sloppy_function_with_name_map(*map);
+}
+
+Handle<JSFunction> Genesis::GetThrowTypeErrorIntrinsic() {
+ if (!restricted_properties_thrower_.is_null()) {
+ return restricted_properties_thrower_;
+ }
+ Handle<String> name(factory()->empty_string());
+ Handle<Code> code(builtins()->StrictPoisonPillThrower());
Handle<JSFunction> function =
- factory()->NewFunctionWithoutPrototype(name, code, true);
+ factory()->NewFunctionWithoutPrototype(name, code, STRICT);
function->shared()->DontAdaptArguments();
// %ThrowTypeError% must not have a name property.
@@ -654,68 +593,141 @@ Handle<JSFunction> Genesis::GetThrowTypeErrorIntrinsic(
JSObject::MigrateSlowToFast(function, 0, "Bootstrapping");
+ restricted_properties_thrower_ = function;
return function;
}
+void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
+ Factory* factory = isolate_->factory();
+ Handle<Map> map;
-// ECMAScript 5th Edition, 13.2.3
-Handle<JSFunction> Genesis::GetRestrictedFunctionPropertiesThrower() {
- if (restricted_function_properties_thrower_.is_null()) {
- restricted_function_properties_thrower_ = GetThrowTypeErrorIntrinsic(
- Builtins::kRestrictedFunctionPropertiesThrower);
- }
- return restricted_function_properties_thrower_;
-}
+ //
+ // Allocate maps for strict functions without prototype.
+ //
+ map = factory->CreateStrictFunctionMap(FUNCTION_WITHOUT_PROTOTYPE, empty);
+ native_context()->set_strict_function_without_prototype_map(*map);
+ map = factory->CreateStrictFunctionMap(METHOD_WITH_NAME, empty);
+ native_context()->set_method_with_name_map(*map);
-Handle<JSFunction> Genesis::GetStrictArgumentsPoisonFunction() {
- if (strict_poison_function_.is_null()) {
- strict_poison_function_ = GetThrowTypeErrorIntrinsic(
- Builtins::kRestrictedStrictArgumentsPropertiesThrower);
- }
- return strict_poison_function_;
-}
+ map = factory->CreateStrictFunctionMap(METHOD_WITH_HOME_OBJECT, empty);
+ native_context()->set_method_with_home_object_map(*map);
+ map =
+ factory->CreateStrictFunctionMap(METHOD_WITH_NAME_AND_HOME_OBJECT, empty);
+ native_context()->set_method_with_name_and_home_object_map(*map);
-void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
- // Allocate map for the prototype-less strict mode instances.
- Handle<Map> strict_function_without_prototype_map =
- factory()->CreateStrictFunctionMap(FUNCTION_WITHOUT_PROTOTYPE, empty);
- native_context()->set_strict_function_without_prototype_map(
- *strict_function_without_prototype_map);
-
- // Allocate map for the strict mode functions. This map is temporary, used
- // only for processing of builtins.
- // Later the map is replaced with writable prototype map, allocated below.
- Handle<Map> strict_function_map = factory()->CreateStrictFunctionMap(
- FUNCTION_WITH_READONLY_PROTOTYPE, empty);
- native_context()->set_strict_function_map(*strict_function_map);
-
- // The final map for the strict mode functions. Writeable prototype.
- // This map is installed in MakeFunctionInstancePrototypeWritable.
- strict_function_map_writable_prototype_ = factory()->CreateStrictFunctionMap(
- FUNCTION_WITH_WRITEABLE_PROTOTYPE, empty);
-
- // Allocate map for classes
- class_function_map_ = factory()->CreateClassFunctionMap(empty);
- native_context()->set_class_function_map(*class_function_map_);
+ //
+ // Allocate maps for strict functions with writable prototype.
+ //
+ map = factory->CreateStrictFunctionMap(FUNCTION_WITH_WRITEABLE_PROTOTYPE,
+ empty);
+ native_context()->set_strict_function_map(*map);
+
+ map = factory->CreateStrictFunctionMap(
+ FUNCTION_WITH_NAME_AND_WRITEABLE_PROTOTYPE, empty);
+ native_context()->set_strict_function_with_name_map(*map);
+
+ strict_function_with_home_object_map_ = factory->CreateStrictFunctionMap(
+ FUNCTION_WITH_HOME_OBJECT_AND_WRITEABLE_PROTOTYPE, empty);
+ strict_function_with_name_and_home_object_map_ =
+ factory->CreateStrictFunctionMap(
+ FUNCTION_WITH_NAME_AND_HOME_OBJECT_AND_WRITEABLE_PROTOTYPE, empty);
+
+ //
+ // Allocate maps for strict functions with readonly prototype.
+ //
+ map =
+ factory->CreateStrictFunctionMap(FUNCTION_WITH_READONLY_PROTOTYPE, empty);
+ native_context()->set_strict_function_with_readonly_prototype_map(*map);
+
+ //
+ // Allocate map for class functions.
+ //
+ map = factory->CreateClassFunctionMap(empty);
+ native_context()->set_class_function_map(*map);
// Now that the strict mode function map is available, set up the
// restricted "arguments" and "caller" getters.
AddRestrictedFunctionProperties(empty);
}
+void Genesis::CreateObjectFunction(Handle<JSFunction> empty_function) {
+ Factory* factory = isolate_->factory();
+
+ // --- O b j e c t ---
+ int unused = JSObject::kInitialGlobalObjectUnusedPropertiesCount;
+ int instance_size = JSObject::kHeaderSize + kPointerSize * unused;
+
+ Handle<JSFunction> object_fun =
+ CreateFunction(isolate_, factory->Object_string(), JS_OBJECT_TYPE,
+ instance_size, factory->null_value(), Builtins::kIllegal);
+ native_context()->set_object_function(*object_fun);
+
+ {
+ // Finish setting up Object function's initial map.
+ Map* initial_map = object_fun->initial_map();
+ initial_map->SetInObjectProperties(unused);
+ initial_map->set_unused_property_fields(unused);
+ initial_map->set_elements_kind(HOLEY_ELEMENTS);
+ }
+
+ // Allocate a new prototype for the object function.
+ Handle<JSObject> object_function_prototype =
+ factory->NewFunctionPrototype(object_fun);
+
+ Handle<Map> map = Map::Copy(handle(object_function_prototype->map()),
+ "EmptyObjectPrototype");
+ map->set_is_prototype_map(true);
+ // Ban re-setting Object.prototype.__proto__ to prevent Proxy security bug
+ map->set_immutable_proto(true);
+ object_function_prototype->set_map(*map);
+
+ // Complete setting up empty function.
+ {
+ Handle<Map> empty_function_map(empty_function->map(), isolate_);
+ Map::SetPrototype(empty_function_map, object_function_prototype);
+ }
+
+ native_context()->set_initial_object_prototype(*object_function_prototype);
+ JSFunction::SetPrototype(object_fun, object_function_prototype);
+
+ {
+ // Set up slow map for Object.create(null) instances without in-object
+ // properties.
+ Handle<Map> map(object_fun->initial_map(), isolate_);
+ map = Map::CopyInitialMapNormalized(map);
+ Map::SetPrototype(map, factory->null_value());
+ native_context()->set_slow_object_with_null_prototype_map(*map);
+
+ // Set up slow map for literals with too many properties.
+ map = Map::Copy(map, "slow_object_with_object_prototype_map");
+ Map::SetPrototype(map, object_function_prototype);
+ native_context()->set_slow_object_with_object_prototype_map(*map);
+ }
+}
+
+namespace {
+
+Handle<Map> CreateNonConstructorMap(Handle<Map> source_map,
+ Handle<JSObject> prototype,
+ const char* reason) {
+ Handle<Map> map = Map::Copy(source_map, reason);
+ map->set_is_constructor(false);
+ Map::SetPrototype(map, prototype);
+ return map;
+}
+
+} // namespace
+
void Genesis::CreateIteratorMaps(Handle<JSFunction> empty) {
// Create iterator-related meta-objects.
Handle<JSObject> iterator_prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
- Handle<JSFunction> iterator_prototype_iterator = SimpleCreateFunction(
- isolate(), factory()->NewStringFromAsciiChecked("[Symbol.iterator]"),
- Builtins::kReturnReceiver, 0, true);
-
- JSObject::AddProperty(iterator_prototype, factory()->iterator_symbol(),
- iterator_prototype_iterator, DONT_ENUM);
+ SimpleInstallFunction(iterator_prototype, factory()->iterator_symbol(),
+ "[Symbol.iterator]", Builtins::kReturnReceiver, 0,
+ true);
native_context()->set_initial_iterator_prototype(*iterator_prototype);
Handle<JSObject> generator_object_prototype =
@@ -745,17 +757,17 @@ void Genesis::CreateIteratorMaps(Handle<JSFunction> empty) {
factory()->NewStringFromAsciiChecked("Generator"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
SimpleInstallFunction(generator_object_prototype, "next",
- Builtins::kGeneratorPrototypeNext, 1, true);
+ Builtins::kGeneratorPrototypeNext, 1, false);
SimpleInstallFunction(generator_object_prototype, "return",
- Builtins::kGeneratorPrototypeReturn, 1, true);
+ Builtins::kGeneratorPrototypeReturn, 1, false);
SimpleInstallFunction(generator_object_prototype, "throw",
- Builtins::kGeneratorPrototypeThrow, 1, true);
+ Builtins::kGeneratorPrototypeThrow, 1, false);
// Internal version of generator_prototype_next, flagged as non-native such
// that it doesn't show up in Error traces.
Handle<JSFunction> generator_next_internal =
SimpleCreateFunction(isolate(), factory()->next_string(),
- Builtins::kGeneratorPrototypeNext, 1, true);
+ Builtins::kGeneratorPrototypeNext, 1, false);
generator_next_internal->shared()->set_native(false);
native_context()->set_generator_next_internal(*generator_next_internal);
@@ -763,13 +775,27 @@ void Genesis::CreateIteratorMaps(Handle<JSFunction> empty) {
// maps in the native context. The "prototype" property descriptor is
// writable, non-enumerable, and non-configurable (as per ES6 draft
// 04-14-15, section 25.2.4.3).
- Handle<Map> strict_function_map(strict_function_map_writable_prototype_);
// Generator functions do not have "caller" or "arguments" accessors.
- Handle<Map> generator_function_map =
- Map::Copy(strict_function_map, "GeneratorFunction");
- generator_function_map->set_is_constructor(false);
- Map::SetPrototype(generator_function_map, generator_function_prototype);
- native_context()->set_generator_function_map(*generator_function_map);
+ Handle<Map> map;
+ map = CreateNonConstructorMap(isolate()->strict_function_map(),
+ generator_function_prototype,
+ "GeneratorFunction");
+ native_context()->set_generator_function_map(*map);
+
+ map = CreateNonConstructorMap(isolate()->strict_function_with_name_map(),
+ generator_function_prototype,
+ "GeneratorFunction with name");
+ native_context()->set_generator_function_with_name_map(*map);
+
+ map = CreateNonConstructorMap(strict_function_with_home_object_map_,
+ generator_function_prototype,
+ "GeneratorFunction with home object");
+ native_context()->set_generator_function_with_home_object_map(*map);
+
+ map = CreateNonConstructorMap(strict_function_with_name_and_home_object_map_,
+ generator_function_prototype,
+ "GeneratorFunction with name and home object");
+ native_context()->set_generator_function_with_name_and_home_object_map(*map);
Handle<JSFunction> object_function(native_context()->object_function());
Handle<Map> generator_object_prototype_map = Map::Create(isolate(), 0);
@@ -784,13 +810,9 @@ void Genesis::CreateAsyncIteratorMaps(Handle<JSFunction> empty) {
Handle<JSObject> async_iterator_prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
- Handle<JSFunction> async_iterator_prototype_iterator = SimpleCreateFunction(
- isolate(), factory()->NewStringFromAsciiChecked("[Symbol.asyncIterator]"),
- Builtins::kReturnReceiver, 0, true);
-
- JSObject::AddProperty(async_iterator_prototype,
- factory()->async_iterator_symbol(),
- async_iterator_prototype_iterator, DONT_ENUM);
+ SimpleInstallFunction(
+ async_iterator_prototype, factory()->async_iterator_symbol(),
+ "[Symbol.asyncIterator]", Builtins::kReturnReceiver, 0, true);
// %AsyncFromSyncIteratorPrototype%
// proposal-async-iteration/#sec-%asyncfromsynciteratorprototype%-object
@@ -855,25 +877,39 @@ void Genesis::CreateAsyncIteratorMaps(Handle<JSFunction> empty) {
factory()->NewStringFromAsciiChecked("AsyncGenerator"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
SimpleInstallFunction(async_generator_object_prototype, "next",
- Builtins::kAsyncGeneratorPrototypeNext, 1, true);
+ Builtins::kAsyncGeneratorPrototypeNext, 1, false);
SimpleInstallFunction(async_generator_object_prototype, "return",
- Builtins::kAsyncGeneratorPrototypeReturn, 1, true);
+ Builtins::kAsyncGeneratorPrototypeReturn, 1, false);
SimpleInstallFunction(async_generator_object_prototype, "throw",
- Builtins::kAsyncGeneratorPrototypeThrow, 1, true);
+ Builtins::kAsyncGeneratorPrototypeThrow, 1, false);
// Create maps for generator functions and their prototypes. Store those
// maps in the native context. The "prototype" property descriptor is
// writable, non-enumerable, and non-configurable (as per ES6 draft
// 04-14-15, section 25.2.4.3).
- Handle<Map> strict_function_map(strict_function_map_writable_prototype_);
// Async Generator functions do not have "caller" or "arguments" accessors.
- Handle<Map> async_generator_function_map =
- Map::Copy(strict_function_map, "AsyncGeneratorFunction");
- async_generator_function_map->set_is_constructor(false);
- Map::SetPrototype(async_generator_function_map,
- async_generator_function_prototype);
- native_context()->set_async_generator_function_map(
- *async_generator_function_map);
+ Handle<Map> map;
+ map = CreateNonConstructorMap(isolate()->strict_function_map(),
+ async_generator_function_prototype,
+ "AsyncGeneratorFunction");
+ native_context()->set_async_generator_function_map(*map);
+
+ map = CreateNonConstructorMap(isolate()->strict_function_with_name_map(),
+ async_generator_function_prototype,
+ "AsyncGeneratorFunction with name");
+ native_context()->set_async_generator_function_with_name_map(*map);
+
+ map = CreateNonConstructorMap(strict_function_with_home_object_map_,
+ async_generator_function_prototype,
+ "AsyncGeneratorFunction with home object");
+ native_context()->set_async_generator_function_with_home_object_map(*map);
+
+ map = CreateNonConstructorMap(
+ strict_function_with_name_and_home_object_map_,
+ async_generator_function_prototype,
+ "AsyncGeneratorFunction with name and home object");
+ native_context()->set_async_generator_function_with_name_and_home_object_map(
+ *map);
Handle<JSFunction> object_function(native_context()->object_function());
Handle<Map> async_generator_object_prototype_map = Map::Create(isolate(), 0);
@@ -894,26 +930,34 @@ void Genesis::CreateAsyncFunctionMaps(Handle<JSFunction> empty) {
factory()->NewStringFromAsciiChecked("AsyncFunction"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
- Handle<Map> strict_function_map(
- native_context()->strict_function_without_prototype_map());
- Handle<Map> async_function_map =
- Map::Copy(strict_function_map, "AsyncFunction");
- async_function_map->set_is_constructor(false);
- Map::SetPrototype(async_function_map, async_function_prototype);
- native_context()->set_async_function_map(*async_function_map);
+ Handle<Map> map;
+ map = CreateNonConstructorMap(
+ isolate()->strict_function_without_prototype_map(),
+ async_function_prototype, "AsyncFunction");
+ native_context()->set_async_function_map(*map);
+
+ map = CreateNonConstructorMap(isolate()->method_with_name_map(),
+ async_function_prototype,
+ "AsyncFunction with name");
+ native_context()->set_async_function_with_name_map(*map);
+
+ map = CreateNonConstructorMap(isolate()->method_with_home_object_map(),
+ async_function_prototype,
+ "AsyncFunction with home object");
+ native_context()->set_async_function_with_home_object_map(*map);
+
+ map = CreateNonConstructorMap(
+ isolate()->method_with_name_and_home_object_map(),
+ async_function_prototype, "AsyncFunction with name and home object");
+ native_context()->set_async_function_with_name_and_home_object_map(*map);
}
void Genesis::CreateJSProxyMaps() {
- // Allocate the different maps for all Proxy types.
+ // Allocate maps for all Proxy types.
// Next to the default proxy, we need maps indicating callable and
// constructable proxies.
- Handle<Map> proxy_function_map =
- Map::Copy(isolate()->sloppy_function_without_prototype_map(), "Proxy");
- proxy_function_map->set_is_constructor(true);
- native_context()->set_proxy_function_map(*proxy_function_map);
-
Handle<Map> proxy_map =
- factory()->NewMap(JS_PROXY_TYPE, JSProxy::kSize, FAST_ELEMENTS);
+ factory()->NewMap(JS_PROXY_TYPE, JSProxy::kSize, PACKED_ELEMENTS);
proxy_map->set_dictionary_map(true);
native_context()->set_proxy_map(*proxy_map);
@@ -928,19 +972,20 @@ void Genesis::CreateJSProxyMaps() {
native_context()->set_proxy_constructor_map(*proxy_constructor_map);
}
-static void ReplaceAccessors(Handle<Map> map,
- Handle<String> name,
- PropertyAttributes attributes,
- Handle<AccessorPair> accessor_pair) {
+namespace {
+void ReplaceAccessors(Handle<Map> map, Handle<String> name,
+ PropertyAttributes attributes,
+ Handle<AccessorPair> accessor_pair) {
DescriptorArray* descriptors = map->instance_descriptors();
int idx = descriptors->SearchWithCache(map->GetIsolate(), *name, *map);
Descriptor d = Descriptor::AccessorConstant(name, accessor_pair, attributes);
descriptors->Replace(idx, &d);
}
+} // namespace
void Genesis::AddRestrictedFunctionProperties(Handle<JSFunction> empty) {
PropertyAttributes rw_attribs = static_cast<PropertyAttributes>(DONT_ENUM);
- Handle<JSFunction> thrower = GetRestrictedFunctionPropertiesThrower();
+ Handle<JSFunction> thrower = GetThrowTypeErrorIntrinsic();
Handle<AccessorPair> accessors = factory()->NewAccessorPair();
accessors->set_getter(*thrower);
accessors->set_setter(*thrower);
@@ -1042,12 +1087,13 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
}
if (js_global_object_template.is_null()) {
- Handle<String> name = Handle<String>(heap()->empty_string());
- Handle<Code> code = isolate()->builtins()->Illegal();
+ Handle<String> name(factory()->empty_string());
+ Handle<Code> code(builtins()->Illegal());
Handle<JSObject> prototype =
factory()->NewFunctionPrototype(isolate()->object_function());
- js_global_object_function = factory()->NewFunction(
- name, code, prototype, JS_GLOBAL_OBJECT_TYPE, JSGlobalObject::kSize);
+ js_global_object_function =
+ factory()->NewFunction(name, code, prototype, JS_GLOBAL_OBJECT_TYPE,
+ JSGlobalObject::kSize, STRICT);
#ifdef DEBUG
LookupIterator it(prototype, factory()->constructor_string(),
LookupIterator::OWN_SKIP_INTERCEPTOR);
@@ -1071,8 +1117,8 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
// Step 2: (re)initialize the global proxy object.
Handle<JSFunction> global_proxy_function;
if (global_proxy_template.IsEmpty()) {
- Handle<String> name = Handle<String>(heap()->empty_string());
- Handle<Code> code = isolate()->builtins()->Illegal();
+ Handle<String> name(factory()->empty_string());
+ Handle<Code> code(builtins()->Illegal());
global_proxy_function =
factory()->NewFunction(name, code, JS_GLOBAL_PROXY_TYPE,
JSGlobalProxy::SizeWithEmbedderFields(0));
@@ -1149,10 +1195,9 @@ static void InstallError(Isolate* isolate, Handle<JSObject> global,
Handle<String> name, int context_index) {
Factory* factory = isolate->factory();
- Handle<JSFunction> error_fun =
- InstallFunction(global, name, JS_ERROR_TYPE, JSObject::kHeaderSize,
- isolate->initial_object_prototype(),
- Builtins::kErrorConstructor, DONT_ENUM);
+ Handle<JSFunction> error_fun = InstallFunction(
+ global, name, JS_ERROR_TYPE, JSObject::kHeaderSize,
+ factory->the_hole_value(), Builtins::kErrorConstructor, DONT_ENUM);
error_fun->shared()->set_instance_class_name(*factory->Error_string());
error_fun->shared()->DontAdaptArguments();
error_fun->shared()->set_construct_stub(
@@ -1167,20 +1212,19 @@ static void InstallError(Isolate* isolate, Handle<JSObject> global,
InstallWithIntrinsicDefaultProto(isolate, error_fun, context_index);
{
- Handle<JSObject> prototype =
- factory->NewJSObject(isolate->object_function(), TENURED);
+ // Setup %XXXErrorPrototype%.
+ Handle<JSObject> prototype(JSObject::cast(error_fun->instance_prototype()));
JSObject::AddProperty(prototype, factory->name_string(), name, DONT_ENUM);
JSObject::AddProperty(prototype, factory->message_string(),
factory->empty_string(), DONT_ENUM);
- JSObject::AddProperty(prototype, factory->constructor_string(), error_fun,
- DONT_ENUM);
if (context_index == Context::ERROR_FUNCTION_INDEX) {
Handle<JSFunction> to_string_fun =
SimpleInstallFunction(prototype, factory->toString_string(),
Builtins::kErrorPrototypeToString, 0, true);
isolate->native_context()->set_error_to_string(*to_string_fun);
+ isolate->native_context()->set_initial_error_prototype(*prototype);
} else {
DCHECK(isolate->native_context()->error_to_string()->IsJSFunction());
@@ -1196,8 +1240,6 @@ static void InstallError(Isolate* isolate, Handle<JSObject> global,
false, Object::THROW_ON_ERROR)
.FromMaybe(false));
}
-
- JSFunction::SetPrototype(error_fun, prototype);
}
Handle<Map> initial_map(error_fun->initial_map());
@@ -1213,8 +1255,9 @@ static void InstallError(Isolate* isolate, Handle<JSObject> global,
}
}
-static void InstallMakeError(Isolate* isolate, Handle<Code> code,
- int context_index) {
+namespace {
+
+void InstallMakeError(Isolate* isolate, Handle<Code> code, int context_index) {
Handle<JSFunction> function =
isolate->factory()->NewFunction(isolate->factory()->empty_string(), code,
JS_OBJECT_TYPE, JSObject::kHeaderSize);
@@ -1222,6 +1265,8 @@ static void InstallMakeError(Isolate* isolate, Handle<Code> code,
isolate->native_context()->set(context_index, *function);
}
+} // namespace
+
// This is only called if we are not using snapshots. The equivalent
// work in the snapshot case is done in HookUpGlobalObject.
void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
@@ -1240,8 +1285,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Isolate* isolate = global_object->GetIsolate();
Factory* factory = isolate->factory();
-
- native_context()->set_osr_code_table(*factory->empty_fixed_array());
+ Builtins* builtins = isolate->builtins();
Handle<ScriptContextTable> script_context_table =
factory->NewScriptContextTable();
@@ -1274,7 +1318,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> object_create =
SimpleInstallFunction(object_function, factory->create_string(),
- Builtins::kObjectCreate, 2, true);
+ Builtins::kObjectCreate, 2, false);
native_context()->set_object_create(*object_create);
Handle<JSFunction> object_define_properties = SimpleInstallFunction(
@@ -1333,6 +1377,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(isolate->initial_object_prototype(),
"__lookupSetter__", Builtins::kObjectLookupSetter, 1,
true);
+ SimpleInstallFunction(isolate->initial_object_prototype(), "isPrototypeOf",
+ Builtins::kObjectPrototypeIsPrototypeOf, 1, true);
SimpleInstallFunction(
isolate->initial_object_prototype(), "propertyIsEnumerable",
Builtins::kObjectPrototypePropertyIsEnumerable, 1, false);
@@ -1358,16 +1404,17 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> function_fun =
InstallFunction(global, "Function", JS_FUNCTION_TYPE, JSFunction::kSize,
prototype, Builtins::kFunctionConstructor);
- function_fun->set_prototype_or_initial_map(
- *sloppy_function_map_writable_prototype_);
+ // Function instances are sloppy by default.
+ function_fun->set_prototype_or_initial_map(*isolate->sloppy_function_map());
function_fun->shared()->DontAdaptArguments();
- function_fun->shared()->SetConstructStub(
- *isolate->builtins()->FunctionConstructor());
+ function_fun->shared()->SetConstructStub(*builtins->FunctionConstructor());
function_fun->shared()->set_length(1);
InstallWithIntrinsicDefaultProto(isolate, function_fun,
Context::FUNCTION_FUNCTION_INDEX);
// Setup the methods on the %FunctionPrototype%.
+ JSObject::AddProperty(prototype, factory->constructor_string(),
+ function_fun, DONT_ENUM);
SimpleInstallFunction(prototype, factory->apply_string(),
Builtins::kFunctionPrototypeApply, 2, false);
SimpleInstallFunction(prototype, factory->bind_string(),
@@ -1378,34 +1425,34 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kFunctionPrototypeToString, 0, false);
// Install the @@hasInstance function.
- Handle<JSFunction> has_instance = InstallFunction(
- prototype, factory->has_instance_symbol(), JS_OBJECT_TYPE,
- JSObject::kHeaderSize, MaybeHandle<JSObject>(),
- Builtins::kFunctionPrototypeHasInstance,
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY));
- has_instance->shared()->set_builtin_function_id(kFunctionHasInstance);
+ Handle<JSFunction> has_instance = SimpleInstallFunction(
+ prototype, factory->has_instance_symbol(), "[Symbol.hasInstance]",
+ Builtins::kFunctionPrototypeHasInstance, 1, true,
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY),
+ kFunctionHasInstance);
native_context()->set_function_has_instance(*has_instance);
- // Set the expected parameters for @@hasInstance to 1; required by builtin.
- has_instance->shared()->set_internal_formal_parameter_count(1);
-
- // Set the length for the function to satisfy ECMA-262.
- has_instance->shared()->set_length(1);
-
- // Install the "constructor" property on the %FunctionPrototype%.
- JSObject::AddProperty(prototype, factory->constructor_string(),
- function_fun, DONT_ENUM);
-
- sloppy_function_map_writable_prototype_->SetConstructor(*function_fun);
- strict_function_map_writable_prototype_->SetConstructor(*function_fun);
- class_function_map_->SetConstructor(*function_fun);
-
- JSObject::MigrateSlowToFast(function_fun, 0, "Bootstrapping");
+ // Complete setting up function maps.
+ {
+ isolate->sloppy_function_map()->SetConstructor(*function_fun);
+ isolate->sloppy_function_with_name_map()->SetConstructor(*function_fun);
+ isolate->sloppy_function_with_readonly_prototype_map()->SetConstructor(
+ *function_fun);
+
+ isolate->strict_function_map()->SetConstructor(*function_fun);
+ isolate->strict_function_with_name_map()->SetConstructor(*function_fun);
+ strict_function_with_home_object_map_->SetConstructor(*function_fun);
+ strict_function_with_name_and_home_object_map_->SetConstructor(
+ *function_fun);
+ isolate->strict_function_with_readonly_prototype_map()->SetConstructor(
+ *function_fun);
+
+ isolate->class_function_map()->SetConstructor(*function_fun);
+ }
}
- {
- // --- A s y n c F r o m S y n c I t e r a t o r
- Handle<Code> code = isolate->builtins()->AsyncIteratorValueUnwrap();
+ { // --- A s y n c F r o m S y n c I t e r a t o r
+ Handle<Code> code(builtins->AsyncIteratorValueUnwrap());
Handle<SharedFunctionInfo> info =
factory->NewSharedFunctionInfo(factory->empty_string(), code, false);
info->set_internal_formal_parameter_count(1);
@@ -1416,27 +1463,22 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // --- A s y n c G e n e r a t o r ---
Handle<JSFunction> await_caught =
SimpleCreateFunction(isolate, factory->empty_string(),
- Builtins::kAsyncGeneratorAwaitCaught, 2, false);
- InstallWithIntrinsicDefaultProto(isolate, await_caught,
- Context::ASYNC_GENERATOR_AWAIT_CAUGHT);
+ Builtins::kAsyncGeneratorAwaitCaught, 1, false);
+ native_context()->set_async_generator_await_caught(*await_caught);
Handle<JSFunction> await_uncaught =
SimpleCreateFunction(isolate, factory->empty_string(),
- Builtins::kAsyncGeneratorAwaitUncaught, 2, false);
- InstallWithIntrinsicDefaultProto(isolate, await_uncaught,
- Context::ASYNC_GENERATOR_AWAIT_UNCAUGHT);
+ Builtins::kAsyncGeneratorAwaitUncaught, 1, false);
+ native_context()->set_async_generator_await_uncaught(*await_uncaught);
- Handle<Code> code =
- isolate->builtins()->AsyncGeneratorAwaitResolveClosure();
+ Handle<Code> code(builtins->AsyncGeneratorAwaitResolveClosure());
Handle<SharedFunctionInfo> info =
factory->NewSharedFunctionInfo(factory->empty_string(), code, false);
info->set_internal_formal_parameter_count(1);
info->set_length(1);
native_context()->set_async_generator_await_resolve_shared_fun(*info);
- code = handle(isolate->builtins()->builtin(
- Builtins::kAsyncGeneratorAwaitRejectClosure),
- isolate);
+ code = builtins->AsyncGeneratorAwaitRejectClosure();
info = factory->NewSharedFunctionInfo(factory->empty_string(), code, false);
info->set_internal_formal_parameter_count(1);
info->set_length(1);
@@ -1485,16 +1527,44 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
array_function->shared()->SetConstructStub(*code);
// Set up %ArrayPrototype%.
- Handle<JSArray> array_prototype =
+ Handle<JSArray> proto =
Handle<JSArray>::cast(factory->NewJSObject(array_function, TENURED));
- JSArray::Initialize(array_prototype, 0);
- JSFunction::SetPrototype(array_function, array_prototype);
- native_context()->set_initial_array_prototype(*array_prototype);
+ JSArray::Initialize(proto, 0);
+ JSFunction::SetPrototype(array_function, proto);
+ native_context()->set_initial_array_prototype(*proto);
Handle<JSFunction> is_arraylike = SimpleInstallFunction(
- array_function, isolate->factory()->InternalizeUtf8String("isArray"),
- Builtins::kArrayIsArray, 1, true);
+ array_function, "isArray", Builtins::kArrayIsArray, 1, true);
native_context()->set_is_arraylike(*is_arraylike);
+
+ JSObject::AddProperty(proto, factory->constructor_string(), array_function,
+ DONT_ENUM);
+
+ SimpleInstallFunction(proto, "concat", Builtins::kArrayConcat, 1, false);
+ SimpleInstallFunction(proto, "pop", Builtins::kFastArrayPop, 0, false);
+ SimpleInstallFunction(proto, "push", Builtins::kFastArrayPush, 1, false);
+ SimpleInstallFunction(proto, "shift", Builtins::kFastArrayShift, 0, false);
+ SimpleInstallFunction(proto, "unshift", Builtins::kArrayUnshift, 1, false);
+ SimpleInstallFunction(proto, "slice", Builtins::kArraySlice, 2, false);
+ SimpleInstallFunction(proto, "splice", Builtins::kArraySplice, 2, false);
+ SimpleInstallFunction(proto, "includes", Builtins::kArrayIncludes, 1,
+ false);
+ SimpleInstallFunction(proto, "indexOf", Builtins::kArrayIndexOf, 1, false);
+ SimpleInstallFunction(proto, "keys", Builtins::kArrayPrototypeKeys, 0, true,
+ kArrayKeys);
+ SimpleInstallFunction(proto, "entries", Builtins::kArrayPrototypeEntries, 0,
+ true, kArrayEntries);
+ SimpleInstallFunction(proto, factory->iterator_symbol(), "values",
+ Builtins::kArrayPrototypeValues, 0, true, DONT_ENUM,
+ kArrayValues);
+ SimpleInstallFunction(proto, "forEach", Builtins::kArrayForEach, 1, false);
+ SimpleInstallFunction(proto, "filter", Builtins::kArrayFilter, 1, false);
+ SimpleInstallFunction(proto, "map", Builtins::kArrayMap, 1, false);
+ SimpleInstallFunction(proto, "every", Builtins::kArrayEvery, 1, false);
+ SimpleInstallFunction(proto, "some", Builtins::kArraySome, 1, false);
+ SimpleInstallFunction(proto, "reduce", Builtins::kArrayReduce, 1, false);
+ SimpleInstallFunction(proto, "reduceRight", Builtins::kArrayReduceRight, 1,
+ false);
}
{ // --- A r r a y I t e r a t o r ---
@@ -1510,17 +1580,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
factory->ArrayIterator_string(),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
- Handle<JSFunction> next = InstallFunction(
- array_iterator_prototype, "next", JS_OBJECT_TYPE, JSObject::kHeaderSize,
- MaybeHandle<JSObject>(), Builtins::kArrayIteratorPrototypeNext);
- next->shared()->set_builtin_function_id(kArrayIteratorNext);
-
- // Set the expected parameters for %ArrayIteratorPrototype%.next to 0 (not
- // including the receiver), as required by the builtin.
- next->shared()->set_internal_formal_parameter_count(0);
-
- // Set the length for the function to satisfy ECMA-262.
- next->shared()->set_length(0);
+ SimpleInstallFunction(array_iterator_prototype, "next",
+ Builtins::kArrayIteratorPrototypeNext, 0, true,
+ kArrayIteratorNext);
Handle<JSFunction> array_iterator_function = CreateFunction(
isolate, factory->ArrayIterator_string(),
@@ -1595,12 +1657,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate->initial_object_prototype(), Builtins::kNumberConstructor);
number_fun->shared()->DontAdaptArguments();
number_fun->shared()->SetConstructStub(
- *isolate->builtins()->NumberConstructor_ConstructStub());
+ *builtins->NumberConstructor_ConstructStub());
number_fun->shared()->set_length(1);
- // https://tc39.github.io/ecma262/#sec-built-in-function-objects says
- // that "Built-in functions that are ECMAScript function objects must
- // be strict functions".
- number_fun->shared()->set_language_mode(STRICT);
InstallWithIntrinsicDefaultProto(isolate, number_fun,
Context::NUMBER_FUNCTION_INDEX);
@@ -1716,7 +1774,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kBooleanConstructor);
boolean_fun->shared()->DontAdaptArguments();
boolean_fun->shared()->SetConstructStub(
- *isolate->builtins()->BooleanConstructor_ConstructStub());
+ *builtins->BooleanConstructor_ConstructStub());
boolean_fun->shared()->set_length(1);
InstallWithIntrinsicDefaultProto(isolate, boolean_fun,
Context::BOOLEAN_FUNCTION_INDEX);
@@ -1743,13 +1801,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
global, "String", JS_VALUE_TYPE, JSValue::kSize,
isolate->initial_object_prototype(), Builtins::kStringConstructor);
string_fun->shared()->SetConstructStub(
- *isolate->builtins()->StringConstructor_ConstructStub());
+ *builtins->StringConstructor_ConstructStub());
string_fun->shared()->DontAdaptArguments();
string_fun->shared()->set_length(1);
- // https://tc39.github.io/ecma262/#sec-built-in-function-objects says
- // that "Built-in functions that are ECMAScript function objects must
- // be strict functions".
- string_fun->shared()->set_language_mode(STRICT);
InstallWithIntrinsicDefaultProto(isolate, string_fun,
Context::STRING_FUNCTION_INDEX);
@@ -1816,11 +1870,11 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(prototype, "slice", Builtins::kStringPrototypeSlice,
2, false);
SimpleInstallFunction(prototype, "split", Builtins::kStringPrototypeSplit,
- 2, true);
+ 2, false);
SimpleInstallFunction(prototype, "substr", Builtins::kStringPrototypeSubstr,
- 2, true);
+ 2, false);
SimpleInstallFunction(prototype, "substring",
- Builtins::kStringPrototypeSubstring, 2, true);
+ Builtins::kStringPrototypeSubstring, 2, false);
SimpleInstallFunction(prototype, "startsWith",
Builtins::kStringPrototypeStartsWith, 1, false);
SimpleInstallFunction(prototype, "toString",
@@ -1831,6 +1885,12 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kStringPrototypeTrimLeft, 0, false);
SimpleInstallFunction(prototype, "trimRight",
Builtins::kStringPrototypeTrimRight, 0, false);
+#ifdef V8_INTL_SUPPORT
+ SimpleInstallFunction(prototype, "toLowerCase",
+ Builtins::kStringPrototypeToLowerCaseIntl, 0, true);
+ SimpleInstallFunction(prototype, "toUpperCase",
+ Builtins::kStringPrototypeToUpperCaseIntl, 0, false);
+#else
SimpleInstallFunction(prototype, "toLocaleLowerCase",
Builtins::kStringPrototypeToLocaleLowerCase, 0,
false);
@@ -1841,15 +1901,14 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kStringPrototypeToLowerCase, 0, false);
SimpleInstallFunction(prototype, "toUpperCase",
Builtins::kStringPrototypeToUpperCase, 0, false);
+#endif
SimpleInstallFunction(prototype, "valueOf",
Builtins::kStringPrototypeValueOf, 0, true);
- Handle<JSFunction> iterator = SimpleCreateFunction(
- isolate, factory->NewStringFromAsciiChecked("[Symbol.iterator]"),
- Builtins::kStringPrototypeIterator, 0, true);
- iterator->shared()->set_builtin_function_id(kStringIterator);
- JSObject::AddProperty(prototype, factory->iterator_symbol(), iterator,
- static_cast<PropertyAttributes>(DONT_ENUM));
+ SimpleInstallFunction(prototype, factory->iterator_symbol(),
+ "[Symbol.iterator]",
+ Builtins::kStringPrototypeIterator, 0, true,
+ DONT_ENUM, kStringIterator);
}
{ // --- S t r i n g I t e r a t o r ---
@@ -1865,18 +1924,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
factory->NewStringFromAsciiChecked("String Iterator"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
- Handle<JSFunction> next =
- InstallFunction(string_iterator_prototype, "next", JS_OBJECT_TYPE,
- JSObject::kHeaderSize, MaybeHandle<JSObject>(),
- Builtins::kStringIteratorPrototypeNext);
- next->shared()->set_builtin_function_id(kStringIteratorNext);
-
- // Set the expected parameters for %StringIteratorPrototype%.next to 0 (not
- // including the receiver), as required by the builtin.
- next->shared()->set_internal_formal_parameter_count(0);
-
- // Set the length for the function to satisfy ECMA-262.
- next->shared()->set_length(0);
+ SimpleInstallFunction(string_iterator_prototype, "next",
+ Builtins::kStringIteratorPrototypeNext, 0, true,
+ kStringIteratorNext);
Handle<JSFunction> string_iterator_function = CreateFunction(
isolate, factory->NewStringFromAsciiChecked("StringIterator"),
@@ -1887,15 +1937,12 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
string_iterator_function->initial_map());
}
- {
- // --- S y m b o l ---
- Handle<JSObject> prototype =
- factory->NewJSObject(isolate->object_function(), TENURED);
- Handle<JSFunction> symbol_fun =
- InstallFunction(global, "Symbol", JS_VALUE_TYPE, JSValue::kSize,
- prototype, Builtins::kSymbolConstructor);
+ { // --- S y m b o l ---
+ Handle<JSFunction> symbol_fun = InstallFunction(
+ global, "Symbol", JS_VALUE_TYPE, JSValue::kSize,
+ factory->the_hole_value(), Builtins::kSymbolConstructor);
symbol_fun->shared()->SetConstructStub(
- *isolate->builtins()->SymbolConstructor_ConstructStub());
+ *builtins->SymbolConstructor_ConstructStub());
symbol_fun->shared()->set_length(0);
symbol_fun->shared()->DontAdaptArguments();
native_context()->set_symbol_function(*symbol_fun);
@@ -1924,16 +1971,16 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
InstallConstant(isolate, symbol_fun, "unscopables",
factory->unscopables_symbol());
+ // Setup %SymbolPrototype%.
+ Handle<JSObject> prototype(
+ JSObject::cast(symbol_fun->instance_prototype()));
+
// Install the @@toStringTag property on the {prototype}.
JSObject::AddProperty(
prototype, factory->to_string_tag_symbol(),
factory->NewStringFromAsciiChecked("Symbol"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
- // Install the "constructor" property on the {prototype}.
- JSObject::AddProperty(prototype, factory->constructor_string(), symbol_fun,
- DONT_ENUM);
-
// Install the Symbol.prototype methods.
SimpleInstallFunction(prototype, "toString",
Builtins::kSymbolPrototypeToString, 0, true);
@@ -1955,16 +2002,13 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{ // --- D a t e ---
- // Builtin functions for Date.prototype.
- Handle<JSObject> prototype =
- factory->NewJSObject(isolate->object_function(), TENURED);
Handle<JSFunction> date_fun =
- InstallFunction(global, "Date", JS_DATE_TYPE, JSDate::kSize, prototype,
- Builtins::kDateConstructor);
+ InstallFunction(global, "Date", JS_DATE_TYPE, JSDate::kSize,
+ factory->the_hole_value(), Builtins::kDateConstructor);
InstallWithIntrinsicDefaultProto(isolate, date_fun,
Context::DATE_FUNCTION_INDEX);
date_fun->shared()->SetConstructStub(
- *isolate->builtins()->DateConstructor_ConstructStub());
+ *builtins->DateConstructor_ConstructStub());
date_fun->shared()->set_length(7);
date_fun->shared()->DontAdaptArguments();
@@ -1973,9 +2017,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(date_fun, "parse", Builtins::kDateParse, 1, false);
SimpleInstallFunction(date_fun, "UTC", Builtins::kDateUTC, 7, false);
- // Install the "constructor" property on the {prototype}.
- JSObject::AddProperty(prototype, factory->constructor_string(), date_fun,
- DONT_ENUM);
+ // Setup %DatePrototype%.
+ Handle<JSObject> prototype(JSObject::cast(date_fun->instance_prototype()));
// Install the Date.prototype methods.
SimpleInstallFunction(prototype, "toString",
@@ -2089,10 +2132,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{
- Handle<Code> code = isolate->builtins()->PromiseGetCapabilitiesExecutor();
+ Handle<Code> code(builtins->PromiseGetCapabilitiesExecutor());
Handle<SharedFunctionInfo> info =
factory->NewSharedFunctionInfo(factory->empty_string(), code, true);
- info->SetConstructStub(*isolate->builtins()->JSBuiltinsConstructStub());
+ info->SetConstructStub(*builtins->JSBuiltinsConstructStub());
info->set_instance_class_name(isolate->heap()->Object_string());
info->set_internal_formal_parameter_count(2);
info->set_length(2);
@@ -2102,28 +2145,37 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> new_promise_capability =
SimpleCreateFunction(isolate, factory->empty_string(),
Builtins::kNewPromiseCapability, 2, false);
- InstallWithIntrinsicDefaultProto(isolate, new_promise_capability,
- Context::NEW_PROMISE_CAPABILITY_INDEX);
+ native_context()->set_new_promise_capability(*new_promise_capability);
}
{ // -- P r o m i s e
- Handle<JSObject> prototype =
- factory->NewJSObject(isolate->object_function(), TENURED);
Handle<JSFunction> promise_fun = InstallFunction(
global, "Promise", JS_PROMISE_TYPE, JSPromise::kSizeWithEmbedderFields,
- prototype, Builtins::kPromiseConstructor);
+ factory->the_hole_value(), Builtins::kPromiseConstructor);
InstallWithIntrinsicDefaultProto(isolate, promise_fun,
Context::PROMISE_FUNCTION_INDEX);
Handle<SharedFunctionInfo> shared(promise_fun->shared(), isolate);
- shared->SetConstructStub(*isolate->builtins()->JSBuiltinsConstructStub());
+ shared->SetConstructStub(*builtins->JSBuiltinsConstructStub());
shared->set_instance_class_name(isolate->heap()->Object_string());
shared->set_internal_formal_parameter_count(1);
shared->set_length(1);
- // Install the "constructor" property on the {prototype}.
- JSObject::AddProperty(prototype, factory->constructor_string(), promise_fun,
- DONT_ENUM);
+ InstallSpeciesGetter(promise_fun);
+
+ SimpleInstallFunction(promise_fun, "all", Builtins::kPromiseAll, 1, true);
+
+ SimpleInstallFunction(promise_fun, "race", Builtins::kPromiseRace, 1, true);
+
+ SimpleInstallFunction(promise_fun, "resolve", Builtins::kPromiseResolve, 1,
+ true);
+
+ SimpleInstallFunction(promise_fun, "reject", Builtins::kPromiseReject, 1,
+ true);
+
+ // Setup %PromisePrototype%.
+ Handle<JSObject> prototype(
+ JSObject::cast(promise_fun->instance_prototype()));
// Install the @@toStringTag property on the {prototype}.
JSObject::AddProperty(
@@ -2133,21 +2185,21 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> promise_then =
SimpleInstallFunction(prototype, isolate->factory()->then_string(),
Builtins::kPromiseThen, 2, true);
- InstallWithIntrinsicDefaultProto(isolate, promise_then,
- Context::PROMISE_THEN_INDEX);
+ native_context()->set_promise_then(*promise_then);
Handle<JSFunction> promise_catch = SimpleInstallFunction(
- prototype, "catch", Builtins::kPromiseCatch, 1, true, DONT_ENUM);
- InstallWithIntrinsicDefaultProto(isolate, promise_catch,
- Context::PROMISE_CATCH_INDEX);
-
- InstallSpeciesGetter(promise_fun);
+ prototype, "catch", Builtins::kPromiseCatch, 1, true);
+ native_context()->set_promise_catch(*promise_catch);
- SimpleInstallFunction(promise_fun, "resolve", Builtins::kPromiseResolve, 1,
- true, DONT_ENUM);
-
- SimpleInstallFunction(promise_fun, "reject", Builtins::kPromiseReject, 1,
- true, DONT_ENUM);
+ // Force the Promise constructor to fast properties, so that we can use the
+ // fast paths for various things like
+ //
+ // x instanceof Promise
+ //
+ // etc. We should probably come up with a more principled approach once
+ // the JavaScript builtins are gone.
+ JSObject::MigrateSlowToFast(Handle<JSObject>::cast(promise_fun), 0,
+ "Bootstrapping");
Handle<Map> prototype_map(prototype->map());
Map::SetShouldBeFastPrototypeMap(prototype_map, true, isolate);
@@ -2162,15 +2214,13 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleCreateFunction(isolate, factory->empty_string(),
Builtins::kPromiseInternalConstructor, 1, true);
function->shared()->set_native(false);
- InstallWithIntrinsicDefaultProto(
- isolate, function, Context::PROMISE_INTERNAL_CONSTRUCTOR_INDEX);
+ native_context()->set_promise_internal_constructor(*function);
}
{ // Internal: IsPromise
Handle<JSFunction> function = SimpleCreateFunction(
isolate, factory->empty_string(), Builtins::kIsPromise, 1, false);
- InstallWithIntrinsicDefaultProto(isolate, function,
- Context::IS_PROMISE_INDEX);
+ native_context()->set_is_promise(*function);
}
{ // Internal: ResolvePromise
@@ -2178,23 +2228,20 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> function = SimpleCreateFunction(
isolate, factory->empty_string(), Builtins::kResolvePromise, 2, true);
function->shared()->set_native(false);
- InstallWithIntrinsicDefaultProto(isolate, function,
- Context::PROMISE_RESOLVE_INDEX);
+ native_context()->set_promise_resolve(*function);
}
{ // Internal: PromiseHandle
Handle<JSFunction> function = SimpleCreateFunction(
isolate, factory->empty_string(), Builtins::kPromiseHandle, 5, false);
- InstallWithIntrinsicDefaultProto(isolate, function,
- Context::PROMISE_HANDLE_INDEX);
+ native_context()->set_promise_handle(*function);
}
{ // Internal: PromiseHandleReject
Handle<JSFunction> function =
SimpleCreateFunction(isolate, factory->empty_string(),
Builtins::kPromiseHandleReject, 3, false);
- InstallWithIntrinsicDefaultProto(isolate, function,
- Context::PROMISE_HANDLE_REJECT_INDEX);
+ native_context()->set_promise_handle_reject(*function);
}
{ // Internal: InternalPromiseReject
@@ -2202,23 +2249,18 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleCreateFunction(isolate, factory->empty_string(),
Builtins::kInternalPromiseReject, 3, true);
function->shared()->set_native(false);
- InstallWithIntrinsicDefaultProto(isolate, function,
- Context::PROMISE_INTERNAL_REJECT_INDEX);
+ native_context()->set_promise_internal_reject(*function);
}
{
- Handle<Code> code =
- handle(isolate->builtins()->builtin(Builtins::kPromiseResolveClosure),
- isolate);
+ Handle<Code> code(builtins->PromiseResolveClosure());
Handle<SharedFunctionInfo> info =
factory->NewSharedFunctionInfo(factory->empty_string(), code, false);
info->set_internal_formal_parameter_count(1);
info->set_length(1);
native_context()->set_promise_resolve_shared_fun(*info);
- code =
- handle(isolate->builtins()->builtin(Builtins::kPromiseRejectClosure),
- isolate);
+ code = builtins->PromiseRejectClosure();
info =
factory->NewSharedFunctionInfo(factory->empty_string(), code, false);
info->set_internal_formal_parameter_count(1);
@@ -2226,31 +2268,43 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
native_context()->set_promise_reject_shared_fun(*info);
}
+ {
+ Handle<Code> code(builtins->PromiseAllResolveElementClosure());
+ Handle<SharedFunctionInfo> info =
+ factory->NewSharedFunctionInfo(factory->empty_string(), code, false);
+ info->set_internal_formal_parameter_count(1);
+ info->set_length(1);
+ native_context()->set_promise_all_resolve_element_shared_fun(*info);
+ }
+
+ // Force the Promise constructor to fast properties, so that we can use the
+ // fast paths for various things like
+ //
+ // x instanceof Promise
+ //
+ // etc. We should probably come up with a more principled approach once
+ // the JavaScript builtins are gone.
JSObject::MigrateSlowToFast(promise_fun, 0, "Bootstrapping");
}
{ // -- R e g E x p
// Builtin functions for RegExp.prototype.
- Handle<JSObject> prototype =
- factory->NewJSObject(isolate->object_function(), TENURED);
- Handle<JSFunction> regexp_fun =
- InstallFunction(global, "RegExp", JS_REGEXP_TYPE, JSRegExp::kSize,
- prototype, Builtins::kRegExpConstructor);
+ Handle<JSFunction> regexp_fun = InstallFunction(
+ global, "RegExp", JS_REGEXP_TYPE, JSRegExp::kSize,
+ factory->the_hole_value(), Builtins::kRegExpConstructor);
InstallWithIntrinsicDefaultProto(isolate, regexp_fun,
Context::REGEXP_FUNCTION_INDEX);
Handle<SharedFunctionInfo> shared(regexp_fun->shared(), isolate);
- shared->SetConstructStub(*isolate->builtins()->JSBuiltinsConstructStub());
+ shared->SetConstructStub(*builtins->JSBuiltinsConstructStub());
shared->set_instance_class_name(isolate->heap()->RegExp_string());
shared->set_internal_formal_parameter_count(2);
shared->set_length(2);
{
- // RegExp.prototype setup.
-
- // Install the "constructor" property on the {prototype}.
- JSObject::AddProperty(prototype, factory->constructor_string(),
- regexp_fun, DONT_ENUM);
+ // Setup %RegExpPrototype%.
+ Handle<JSObject> prototype(
+ JSObject::cast(regexp_fun->instance_prototype()));
{
Handle<JSFunction> fun = SimpleInstallFunction(
@@ -2283,33 +2337,21 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(prototype, "test", Builtins::kRegExpPrototypeTest,
1, true, DONT_ENUM);
- {
- Handle<JSFunction> fun = SimpleCreateFunction(
- isolate, factory->InternalizeUtf8String("[Symbol.match]"),
- Builtins::kRegExpPrototypeMatch, 1, true);
- InstallFunction(prototype, fun, factory->match_symbol(), DONT_ENUM);
- }
+ SimpleInstallFunction(prototype, factory->match_symbol(),
+ "[Symbol.match]", Builtins::kRegExpPrototypeMatch,
+ 1, true);
- {
- Handle<JSFunction> fun = SimpleCreateFunction(
- isolate, factory->InternalizeUtf8String("[Symbol.replace]"),
- Builtins::kRegExpPrototypeReplace, 2, false);
- InstallFunction(prototype, fun, factory->replace_symbol(), DONT_ENUM);
- }
+ SimpleInstallFunction(prototype, factory->replace_symbol(),
+ "[Symbol.replace]",
+ Builtins::kRegExpPrototypeReplace, 2, false);
- {
- Handle<JSFunction> fun = SimpleCreateFunction(
- isolate, factory->InternalizeUtf8String("[Symbol.search]"),
- Builtins::kRegExpPrototypeSearch, 1, true);
- InstallFunction(prototype, fun, factory->search_symbol(), DONT_ENUM);
- }
+ SimpleInstallFunction(prototype, factory->search_symbol(),
+ "[Symbol.search]", Builtins::kRegExpPrototypeSearch,
+ 1, true);
- {
- Handle<JSFunction> fun = SimpleCreateFunction(
- isolate, factory->InternalizeUtf8String("[Symbol.split]"),
- Builtins::kRegExpPrototypeSplit, 2, false);
- InstallFunction(prototype, fun, factory->split_symbol(), DONT_ENUM);
- }
+ SimpleInstallFunction(prototype, factory->split_symbol(),
+ "[Symbol.split]", Builtins::kRegExpPrototypeSplit,
+ 2, false);
Handle<Map> prototype_map(prototype->map());
Map::SetShouldBeFastPrototypeMap(prototype_map, true, isolate);
@@ -2405,12 +2447,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // Internal: RegExpInternalMatch
Handle<JSFunction> function =
- factory->NewFunction(isolate->factory()->empty_string(),
- isolate->builtins()->RegExpInternalMatch(),
- JS_OBJECT_TYPE, JSObject::kHeaderSize);
- function->shared()->set_internal_formal_parameter_count(2);
- function->shared()->set_length(2);
- function->shared()->set_native(true);
+ SimpleCreateFunction(isolate, isolate->factory()->empty_string(),
+ Builtins::kRegExpInternalMatch, 2, true);
native_context()->set(Context::REGEXP_INTERNAL_MATCH, *function);
}
@@ -2434,8 +2472,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- E r r o r
InstallError(isolate, global, factory->Error_string(),
Context::ERROR_FUNCTION_INDEX);
- InstallMakeError(isolate, isolate->builtins()->MakeError(),
- Context::MAKE_ERROR_INDEX);
+ InstallMakeError(isolate, builtins->MakeError(), Context::MAKE_ERROR_INDEX);
}
{ // -- E v a l E r r o r
@@ -2446,7 +2483,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- R a n g e E r r o r
InstallError(isolate, global, factory->RangeError_string(),
Context::RANGE_ERROR_FUNCTION_INDEX);
- InstallMakeError(isolate, isolate->builtins()->MakeRangeError(),
+ InstallMakeError(isolate, builtins->MakeRangeError(),
Context::MAKE_RANGE_ERROR_INDEX);
}
@@ -2458,21 +2495,21 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- S y n t a x E r r o r
InstallError(isolate, global, factory->SyntaxError_string(),
Context::SYNTAX_ERROR_FUNCTION_INDEX);
- InstallMakeError(isolate, isolate->builtins()->MakeSyntaxError(),
+ InstallMakeError(isolate, builtins->MakeSyntaxError(),
Context::MAKE_SYNTAX_ERROR_INDEX);
}
{ // -- T y p e E r r o r
InstallError(isolate, global, factory->TypeError_string(),
Context::TYPE_ERROR_FUNCTION_INDEX);
- InstallMakeError(isolate, isolate->builtins()->MakeTypeError(),
+ InstallMakeError(isolate, builtins->MakeTypeError(),
Context::MAKE_TYPE_ERROR_INDEX);
}
{ // -- U R I E r r o r
InstallError(isolate, global, factory->URIError_string(),
Context::URI_ERROR_FUNCTION_INDEX);
- InstallMakeError(isolate, isolate->builtins()->MakeURIError(),
+ InstallMakeError(isolate, builtins->MakeURIError(),
Context::MAKE_URI_ERROR_INDEX);
}
@@ -2496,10 +2533,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- J S O N
Handle<String> name = factory->InternalizeUtf8String("JSON");
- Handle<JSFunction> cons = factory->NewFunction(name);
- JSFunction::SetPrototype(cons, isolate->initial_object_prototype());
- Handle<JSObject> json_object = factory->NewJSObject(cons, TENURED);
- DCHECK(json_object->IsJSObject());
+ Handle<JSObject> json_object =
+ factory->NewJSObject(isolate->object_function(), TENURED);
JSObject::AddProperty(global, name, json_object, DONT_ENUM);
SimpleInstallFunction(json_object, "parse", Builtins::kJsonParse, 2, false);
SimpleInstallFunction(json_object, "stringify", Builtins::kJsonStringify, 3,
@@ -2512,10 +2547,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- M a t h
Handle<String> name = factory->InternalizeUtf8String("Math");
- Handle<JSFunction> cons = factory->NewFunction(name);
- JSFunction::SetPrototype(cons, isolate->initial_object_prototype());
- Handle<JSObject> math = factory->NewJSObject(cons, TENURED);
- DCHECK(math->IsJSObject());
+ Handle<JSObject> math =
+ factory->NewJSObject(isolate->object_function(), TENURED);
JSObject::AddProperty(global, name, math, DONT_ENUM);
SimpleInstallFunction(math, "abs", Builtins::kMathAbs, 1, true);
SimpleInstallFunction(math, "acos", Builtins::kMathAcos, 1, true);
@@ -2581,7 +2614,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- C o n s o l e
Handle<String> name = factory->InternalizeUtf8String("console");
- Handle<JSFunction> cons = factory->NewFunction(name);
+ Handle<JSFunction> cons = factory->NewFunction(
+ isolate->strict_function_map(), name, MaybeHandle<Code>());
Handle<JSObject> empty = factory->NewJSObject(isolate->object_function());
JSFunction::SetPrototype(cons, empty);
Handle<JSObject> console = factory->NewJSObject(cons, TENURED);
@@ -2633,6 +2667,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
false, NONE);
SimpleInstallFunction(console, "timeStamp", Builtins::kConsoleTimeStamp, 1,
false, NONE);
+ SimpleInstallFunction(console, "context", Builtins::kConsoleContext, 1,
+ true, NONE);
JSObject::AddProperty(
console, factory->to_string_tag_symbol(),
factory->NewStringFromAsciiChecked("Object"),
@@ -2642,85 +2678,82 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
#ifdef V8_INTL_SUPPORT
{ // -- I n t l
Handle<String> name = factory->InternalizeUtf8String("Intl");
- Handle<JSFunction> cons = factory->NewFunction(name);
- JSFunction::SetPrototype(cons, isolate->initial_object_prototype());
- Handle<JSObject> intl = factory->NewJSObject(cons, TENURED);
- DCHECK(intl->IsJSObject());
+ Handle<JSObject> intl =
+ factory->NewJSObject(isolate->object_function(), TENURED);
JSObject::AddProperty(global, name, intl, DONT_ENUM);
- Handle<JSObject> date_time_format_prototype =
- factory->NewJSObject(isolate->object_function(), TENURED);
- // Install the @@toStringTag property on the {prototype}.
- JSObject::AddProperty(
- date_time_format_prototype, factory->to_string_tag_symbol(),
- factory->Object_string(),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
- Handle<JSFunction> date_time_format_constructor = InstallFunction(
- intl, "DateTimeFormat", JS_OBJECT_TYPE, DateFormat::kSize,
- date_time_format_prototype, Builtins::kIllegal);
- JSObject::AddProperty(date_time_format_prototype,
- factory->constructor_string(),
- date_time_format_constructor, DONT_ENUM);
- InstallWithIntrinsicDefaultProto(
- isolate, date_time_format_constructor,
- Context::INTL_DATE_TIME_FORMAT_FUNCTION_INDEX);
+ {
+ Handle<JSFunction> date_time_format_constructor = InstallFunction(
+ intl, "DateTimeFormat", JS_OBJECT_TYPE, DateFormat::kSize,
+ factory->the_hole_value(), Builtins::kIllegal);
+ native_context()->set_intl_date_time_format_function(
+ *date_time_format_constructor);
+
+ Handle<JSObject> prototype(
+ JSObject::cast(date_time_format_constructor->prototype()), isolate);
+
+ // Install the @@toStringTag property on the {prototype}.
+ JSObject::AddProperty(
+ prototype, factory->to_string_tag_symbol(), factory->Object_string(),
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ }
- Handle<JSObject> number_format_prototype =
- factory->NewJSObject(isolate->object_function(), TENURED);
- // Install the @@toStringTag property on the {prototype}.
- JSObject::AddProperty(
- number_format_prototype, factory->to_string_tag_symbol(),
- factory->Object_string(),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
- Handle<JSFunction> number_format_constructor = InstallFunction(
- intl, "NumberFormat", JS_OBJECT_TYPE, NumberFormat::kSize,
- number_format_prototype, Builtins::kIllegal);
- JSObject::AddProperty(number_format_prototype,
- factory->constructor_string(),
- number_format_constructor, DONT_ENUM);
- InstallWithIntrinsicDefaultProto(
- isolate, number_format_constructor,
- Context::INTL_NUMBER_FORMAT_FUNCTION_INDEX);
+ {
+ Handle<JSFunction> number_format_constructor = InstallFunction(
+ intl, "NumberFormat", JS_OBJECT_TYPE, NumberFormat::kSize,
+ factory->the_hole_value(), Builtins::kIllegal);
+ native_context()->set_intl_number_format_function(
+ *number_format_constructor);
+
+ Handle<JSObject> prototype(
+ JSObject::cast(number_format_constructor->prototype()), isolate);
+
+ // Install the @@toStringTag property on the {prototype}.
+ JSObject::AddProperty(
+ prototype, factory->to_string_tag_symbol(), factory->Object_string(),
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ }
- Handle<JSObject> collator_prototype =
- factory->NewJSObject(isolate->object_function(), TENURED);
- // Install the @@toStringTag property on the {prototype}.
- JSObject::AddProperty(
- collator_prototype, factory->to_string_tag_symbol(),
- factory->Object_string(),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
- Handle<JSFunction> collator_constructor =
- InstallFunction(intl, "Collator", JS_OBJECT_TYPE, Collator::kSize,
- collator_prototype, Builtins::kIllegal);
- JSObject::AddProperty(collator_prototype, factory->constructor_string(),
- collator_constructor, DONT_ENUM);
- InstallWithIntrinsicDefaultProto(isolate, collator_constructor,
- Context::INTL_COLLATOR_FUNCTION_INDEX);
-
- Handle<JSObject> v8_break_iterator_prototype =
- factory->NewJSObject(isolate->object_function(), TENURED);
- // Install the @@toStringTag property on the {prototype}.
- JSObject::AddProperty(
- v8_break_iterator_prototype, factory->to_string_tag_symbol(),
- factory->Object_string(),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
- Handle<JSFunction> v8_break_iterator_constructor = InstallFunction(
- intl, "v8BreakIterator", JS_OBJECT_TYPE, V8BreakIterator::kSize,
- v8_break_iterator_prototype, Builtins::kIllegal);
- JSObject::AddProperty(v8_break_iterator_prototype,
- factory->constructor_string(),
- v8_break_iterator_constructor, DONT_ENUM);
- InstallWithIntrinsicDefaultProto(
- isolate, v8_break_iterator_constructor,
- Context::INTL_V8_BREAK_ITERATOR_FUNCTION_INDEX);
+ {
+ Handle<JSFunction> collator_constructor =
+ InstallFunction(intl, "Collator", JS_OBJECT_TYPE, Collator::kSize,
+ factory->the_hole_value(), Builtins::kIllegal);
+ native_context()->set_intl_collator_function(*collator_constructor);
+
+ Handle<JSObject> prototype(
+ JSObject::cast(collator_constructor->prototype()), isolate);
+
+ // Install the @@toStringTag property on the {prototype}.
+ JSObject::AddProperty(
+ prototype, factory->to_string_tag_symbol(), factory->Object_string(),
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ }
+
+ {
+ Handle<JSFunction> v8_break_iterator_constructor = InstallFunction(
+ intl, "v8BreakIterator", JS_OBJECT_TYPE, V8BreakIterator::kSize,
+ factory->the_hole_value(), Builtins::kIllegal);
+ native_context()->set_intl_v8_break_iterator_function(
+ *v8_break_iterator_constructor);
+
+ Handle<JSObject> prototype(
+ JSObject::cast(v8_break_iterator_constructor->prototype()), isolate);
+
+ // Install the @@toStringTag property on the {prototype}.
+ JSObject::AddProperty(
+ prototype, factory->to_string_tag_symbol(), factory->Object_string(),
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ }
}
#endif // V8_INTL_SUPPORT
{ // -- A r r a y B u f f e r
- Handle<JSFunction> array_buffer_fun = InstallArrayBuffer(
- global, "ArrayBuffer", Builtins::kArrayBufferPrototypeGetByteLength,
- BuiltinFunctionId::kArrayBufferByteLength,
- Builtins::kArrayBufferPrototypeSlice);
+ Handle<String> name = factory->InternalizeUtf8String("ArrayBuffer");
+ Handle<JSFunction> array_buffer_fun =
+ CreateArrayBuffer(name, Builtins::kArrayBufferPrototypeGetByteLength,
+ BuiltinFunctionId::kArrayBufferByteLength,
+ Builtins::kArrayBufferPrototypeSlice);
+ JSObject::AddProperty(global, name, array_buffer_fun, DONT_ENUM);
InstallWithIntrinsicDefaultProto(isolate, array_buffer_fun,
Context::ARRAY_BUFFER_FUN_INDEX);
InstallSpeciesGetter(array_buffer_fun);
@@ -2733,23 +2766,61 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
native_context()->set_array_buffer_noinit_fun(*array_buffer_noinit_fun);
}
- { // -- T y p e d A r r a y
- Handle<JSObject> prototype =
+ { // -- S h a r e d A r r a y B u f f e r
+ Handle<String> name = factory->InternalizeUtf8String("SharedArrayBuffer");
+ Handle<JSFunction> shared_array_buffer_fun = CreateArrayBuffer(
+ name, Builtins::kSharedArrayBufferPrototypeGetByteLength,
+ BuiltinFunctionId::kSharedArrayBufferByteLength,
+ Builtins::kSharedArrayBufferPrototypeSlice);
+ InstallWithIntrinsicDefaultProto(isolate, shared_array_buffer_fun,
+ Context::SHARED_ARRAY_BUFFER_FUN_INDEX);
+ InstallSpeciesGetter(shared_array_buffer_fun);
+ }
+
+ { // -- A t o m i c s
+ Handle<JSObject> atomics_object =
factory->NewJSObject(isolate->object_function(), TENURED);
- native_context()->set_typed_array_prototype(*prototype);
+ native_context()->set_atomics_object(*atomics_object);
+
+ SimpleInstallFunction(atomics_object, "load", Builtins::kAtomicsLoad, 2,
+ true);
+ SimpleInstallFunction(atomics_object, "store", Builtins::kAtomicsStore, 3,
+ true);
+ SimpleInstallFunction(atomics_object, "add", Builtins::kAtomicsAdd, 3,
+ true);
+ SimpleInstallFunction(atomics_object, "sub", Builtins::kAtomicsSub, 3,
+ true);
+ SimpleInstallFunction(atomics_object, "and", Builtins::kAtomicsAnd, 3,
+ true);
+ SimpleInstallFunction(atomics_object, "or", Builtins::kAtomicsOr, 3, true);
+ SimpleInstallFunction(atomics_object, "xor", Builtins::kAtomicsXor, 3,
+ true);
+ SimpleInstallFunction(atomics_object, "exchange",
+ Builtins::kAtomicsExchange, 3, true);
+ SimpleInstallFunction(atomics_object, "compareExchange",
+ Builtins::kAtomicsCompareExchange, 4, true);
+ SimpleInstallFunction(atomics_object, "isLockFree",
+ Builtins::kAtomicsIsLockFree, 1, true);
+ SimpleInstallFunction(atomics_object, "wait", Builtins::kAtomicsWait, 4,
+ true);
+ SimpleInstallFunction(atomics_object, "wake", Builtins::kAtomicsWake, 3,
+ true);
+ }
+ { // -- T y p e d A r r a y
Handle<JSFunction> typed_array_fun =
CreateFunction(isolate, factory->InternalizeUtf8String("TypedArray"),
- JS_TYPED_ARRAY_TYPE, JSTypedArray::kSize, prototype,
- Builtins::kIllegal);
+ JS_TYPED_ARRAY_TYPE, JSTypedArray::kSize,
+ factory->the_hole_value(), Builtins::kIllegal);
typed_array_fun->shared()->set_native(false);
InstallSpeciesGetter(typed_array_fun);
-
- // Install the "constructor" property on the {prototype}.
- JSObject::AddProperty(prototype, factory->constructor_string(),
- typed_array_fun, DONT_ENUM);
native_context()->set_typed_array_function(*typed_array_fun);
+ // Setup %TypedArrayPrototype%.
+ Handle<JSObject> prototype(
+ JSObject::cast(typed_array_fun->instance_prototype()));
+ native_context()->set_typed_array_prototype(*prototype);
+
// Install the "buffer", "byteOffset", "byteLength" and "length"
// getters on the {prototype}.
SimpleInstallGetter(prototype, factory->buffer_string(),
@@ -2765,20 +2836,16 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
kTypedArrayLength);
// Install "keys", "values" and "entries" methods on the {prototype}.
- Handle<JSFunction> entries =
- SimpleInstallFunction(prototype, factory->entries_string(),
- Builtins::kTypedArrayPrototypeEntries, 0, true);
- entries->shared()->set_builtin_function_id(kTypedArrayEntries);
-
- Handle<JSFunction> keys =
- SimpleInstallFunction(prototype, factory->keys_string(),
- Builtins::kTypedArrayPrototypeKeys, 0, true);
- keys->shared()->set_builtin_function_id(kTypedArrayKeys);
-
- Handle<JSFunction> values =
- SimpleInstallFunction(prototype, factory->values_string(),
- Builtins::kTypedArrayPrototypeValues, 0, true);
- values->shared()->set_builtin_function_id(kTypedArrayValues);
+ SimpleInstallFunction(prototype, "entries",
+ Builtins::kTypedArrayPrototypeEntries, 0, true,
+ kTypedArrayEntries);
+
+ SimpleInstallFunction(prototype, "keys", Builtins::kTypedArrayPrototypeKeys,
+ 0, true, kTypedArrayKeys);
+
+ Handle<JSFunction> values = SimpleInstallFunction(
+ prototype, "values", Builtins::kTypedArrayPrototypeValues, 0, true,
+ kTypedArrayValues);
JSObject::AddProperty(prototype, factory->iterator_symbol(), values,
DONT_ENUM);
@@ -2789,6 +2856,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kTypedArrayPrototypeEvery, 1, false);
SimpleInstallFunction(prototype, "fill",
Builtins::kTypedArrayPrototypeFill, 1, false);
+ SimpleInstallFunction(prototype, "forEach",
+ Builtins::kTypedArrayPrototypeForEach, 1, false);
SimpleInstallFunction(prototype, "includes",
Builtins::kTypedArrayPrototypeIncludes, 1, false);
SimpleInstallFunction(prototype, "indexOf",
@@ -2845,29 +2914,27 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{ // -- D a t a V i e w
- Handle<JSObject> prototype =
- factory->NewJSObject(isolate->object_function(), TENURED);
- Handle<JSFunction> data_view_fun =
- InstallFunction(global, "DataView", JS_DATA_VIEW_TYPE,
- JSDataView::kSizeWithEmbedderFields, prototype,
- Builtins::kDataViewConstructor);
+ Handle<JSFunction> data_view_fun = InstallFunction(
+ global, "DataView", JS_DATA_VIEW_TYPE,
+ JSDataView::kSizeWithEmbedderFields, factory->the_hole_value(),
+ Builtins::kDataViewConstructor);
InstallWithIntrinsicDefaultProto(isolate, data_view_fun,
Context::DATA_VIEW_FUN_INDEX);
data_view_fun->shared()->SetConstructStub(
- *isolate->builtins()->DataViewConstructor_ConstructStub());
+ *builtins->DataViewConstructor_ConstructStub());
data_view_fun->shared()->set_length(3);
data_view_fun->shared()->DontAdaptArguments();
+ // Setup %DataViewPrototype%.
+ Handle<JSObject> prototype(
+ JSObject::cast(data_view_fun->instance_prototype()));
+
// Install the @@toStringTag property on the {prototype}.
JSObject::AddProperty(
prototype, factory->to_string_tag_symbol(),
factory->NewStringFromAsciiChecked("DataView"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
- // Install the "constructor" property on the {prototype}.
- JSObject::AddProperty(prototype, factory->constructor_string(),
- data_view_fun, DONT_ENUM);
-
// Install the "buffer", "byteOffset" and "byteLength" getters
// on the {prototype}.
SimpleInstallGetter(prototype, factory->buffer_string(),
@@ -2915,20 +2982,101 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{ // -- M a p
- Handle<JSFunction> js_map_fun = InstallFunction(
- global, "Map", JS_MAP_TYPE, JSMap::kSize,
- isolate->initial_object_prototype(), Builtins::kIllegal);
+ {
+ Handle<String> index_string = isolate->factory()->zero_string();
+ uint32_t field =
+ StringHasher::MakeArrayIndexHash(0, index_string->length());
+ index_string->set_hash_field(field);
+
+ index_string = isolate->factory()->one_string();
+ field = StringHasher::MakeArrayIndexHash(1, index_string->length());
+ index_string->set_hash_field(field);
+ }
+
+ Handle<JSFunction> js_map_fun =
+ InstallFunction(global, "Map", JS_MAP_TYPE, JSMap::kSize,
+ factory->the_hole_value(), Builtins::kMapConstructor);
InstallWithIntrinsicDefaultProto(isolate, js_map_fun,
Context::JS_MAP_FUN_INDEX);
+
+ Handle<SharedFunctionInfo> shared(js_map_fun->shared(), isolate);
+ shared->SetConstructStub(*builtins->JSBuiltinsConstructStub());
+ shared->set_instance_class_name(isolate->heap()->Map_string());
+ shared->DontAdaptArguments();
+ shared->set_length(0);
+
+ // Setup %MapPrototype%.
+ Handle<JSObject> prototype(
+ JSObject::cast(js_map_fun->instance_prototype()));
+
+ // Install the @@toStringTag property on the {prototype}.
+ JSObject::AddProperty(
+ prototype, factory->to_string_tag_symbol(), factory->Map_string(),
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+ Handle<JSFunction> map_get =
+ SimpleInstallFunction(prototype, "get", Builtins::kMapGet, 1, true);
+ native_context()->set_map_get(*map_get);
+
+ Handle<JSFunction> map_has =
+ SimpleInstallFunction(prototype, "has", Builtins::kMapHas, 1, true);
+ native_context()->set_map_has(*map_has);
+
+ SimpleInstallFunction(prototype, "clear", Builtins::kMapClear, 0, true);
+ Handle<JSFunction> entries = SimpleInstallFunction(
+ prototype, "entries", Builtins::kMapPrototypeEntries, 0, true);
+ JSObject::AddProperty(prototype, factory->iterator_symbol(), entries,
+ DONT_ENUM);
+ SimpleInstallFunction(prototype, "forEach", Builtins::kMapPrototypeForEach,
+ 1, false);
+ SimpleInstallFunction(prototype, "keys", Builtins::kMapPrototypeKeys, 0,
+ true);
+ SimpleInstallGetter(prototype, factory->InternalizeUtf8String("size"),
+ Builtins::kMapPrototypeGetSize, true,
+ BuiltinFunctionId::kMapSize);
+ SimpleInstallFunction(prototype, "values", Builtins::kMapPrototypeValues, 0,
+ true);
InstallSpeciesGetter(js_map_fun);
}
{ // -- S e t
- Handle<JSFunction> js_set_fun = InstallFunction(
- global, "Set", JS_SET_TYPE, JSSet::kSize,
- isolate->initial_object_prototype(), Builtins::kIllegal);
+ Handle<JSFunction> js_set_fun =
+ InstallFunction(global, "Set", JS_SET_TYPE, JSSet::kSize,
+ factory->the_hole_value(), Builtins::kSetConstructor);
InstallWithIntrinsicDefaultProto(isolate, js_set_fun,
Context::JS_SET_FUN_INDEX);
+
+ Handle<SharedFunctionInfo> shared(js_set_fun->shared(), isolate);
+ shared->SetConstructStub(*builtins->JSBuiltinsConstructStub());
+ shared->set_instance_class_name(isolate->heap()->Set_string());
+ shared->DontAdaptArguments();
+ shared->set_length(0);
+
+ // Setup %SetPrototype%.
+ Handle<JSObject> prototype(
+ JSObject::cast(js_set_fun->instance_prototype()));
+
+ // Install the @@toStringTag property on the {prototype}.
+ JSObject::AddProperty(
+ prototype, factory->to_string_tag_symbol(), factory->Set_string(),
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+ Handle<JSFunction> set_has =
+ SimpleInstallFunction(prototype, "has", Builtins::kSetHas, 1, true);
+ native_context()->set_set_has(*set_has);
+ SimpleInstallFunction(prototype, "clear", Builtins::kSetClear, 0, true);
+ SimpleInstallFunction(prototype, "entries", Builtins::kSetPrototypeEntries,
+ 0, true);
+ SimpleInstallFunction(prototype, "forEach", Builtins::kSetPrototypeForEach,
+ 1, false);
+ SimpleInstallGetter(prototype, factory->InternalizeUtf8String("size"),
+ Builtins::kSetPrototypeGetSize, true,
+ BuiltinFunctionId::kSetSize);
+ Handle<JSFunction> values = SimpleInstallFunction(
+ prototype, "values", Builtins::kSetPrototypeValues, 0, true);
+ JSObject::AddProperty(prototype, factory->keys_string(), values, DONT_ENUM);
+ JSObject::AddProperty(prototype, factory->iterator_symbol(), values,
+ DONT_ENUM);
InstallSpeciesGetter(js_set_fun);
}
@@ -2978,37 +3126,57 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{ // -- W e a k M a p
- Handle<JSFunction> js_weak_map_fun = InstallFunction(
- global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize,
- isolate->initial_object_prototype(), Builtins::kIllegal);
- InstallWithIntrinsicDefaultProto(isolate, js_weak_map_fun,
+ Handle<JSFunction> cons =
+ InstallFunction(global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize,
+ factory->the_hole_value(), Builtins::kIllegal);
+ InstallWithIntrinsicDefaultProto(isolate, cons,
Context::JS_WEAK_MAP_FUN_INDEX);
+ // Setup %WeakMapPrototype%.
+ Handle<JSObject> prototype(JSObject::cast(cons->instance_prototype()));
+
+ SimpleInstallFunction(prototype, "get", Builtins::kWeakMapGet, 1, true);
+ SimpleInstallFunction(prototype, "has", Builtins::kWeakMapHas, 1, true);
+
+ JSObject::AddProperty(
+ prototype, factory->to_string_tag_symbol(),
+ factory->NewStringFromAsciiChecked("WeakMap"),
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
}
{ // -- W e a k S e t
- Handle<JSFunction> js_weak_set_fun = InstallFunction(
- global, "WeakSet", JS_WEAK_SET_TYPE, JSWeakSet::kSize,
- isolate->initial_object_prototype(), Builtins::kIllegal);
- InstallWithIntrinsicDefaultProto(isolate, js_weak_set_fun,
+ Handle<JSFunction> cons =
+ InstallFunction(global, "WeakSet", JS_WEAK_SET_TYPE, JSWeakSet::kSize,
+ factory->the_hole_value(), Builtins::kIllegal);
+ InstallWithIntrinsicDefaultProto(isolate, cons,
Context::JS_WEAK_SET_FUN_INDEX);
+ // Setup %WeakSetPrototype%.
+ Handle<JSObject> prototype(JSObject::cast(cons->instance_prototype()));
+
+ SimpleInstallFunction(prototype, "has", Builtins::kWeakSetHas, 1, true);
+
+ JSObject::AddProperty(
+ prototype, factory->to_string_tag_symbol(),
+ factory->NewStringFromAsciiChecked("WeakSet"),
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
}
{ // -- P r o x y
CreateJSProxyMaps();
- Handle<String> name = factory->Proxy_string();
- Handle<Code> code(isolate->builtins()->ProxyConstructor());
+ Handle<Map> proxy_function_map =
+ Map::Copy(isolate->sloppy_function_without_prototype_map(), "Proxy");
+ proxy_function_map->set_is_constructor(true);
+ Handle<String> name = factory->Proxy_string();
+ Handle<Code> code(builtins->ProxyConstructor());
Handle<JSFunction> proxy_function =
- factory->NewFunction(isolate->proxy_function_map(),
- factory->Proxy_string(), MaybeHandle<Code>(code));
+ factory->NewFunction(proxy_function_map, name, code);
- JSFunction::SetInitialMap(
- proxy_function, Handle<Map>(native_context()->proxy_map(), isolate),
- factory->null_value());
+ JSFunction::SetInitialMap(proxy_function, isolate->proxy_map(),
+ factory->null_value());
proxy_function->shared()->SetConstructStub(
- *isolate->builtins()->ProxyConstructor_ConstructStub());
+ *builtins->ProxyConstructor_ConstructStub());
proxy_function->shared()->set_internal_formal_parameter_count(2);
proxy_function->shared()->set_length(2);
@@ -3098,13 +3266,13 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// This is done by introducing an anonymous function with
// class_name equals 'Arguments'.
Handle<String> arguments_string = factory->Arguments_string();
- Handle<Code> code = isolate->builtins()->Illegal();
- Handle<JSFunction> function = factory->NewFunctionWithoutPrototype(
- arguments_string, code);
+ Handle<Code> code(builtins->Illegal());
+ Handle<JSFunction> function =
+ factory->NewFunctionWithoutPrototype(arguments_string, code, STRICT);
function->shared()->set_instance_class_name(*arguments_string);
Handle<Map> map = factory->NewMap(
- JS_ARGUMENTS_TYPE, JSSloppyArgumentsObject::kSize, FAST_ELEMENTS);
+ JS_ARGUMENTS_TYPE, JSSloppyArgumentsObject::kSize, PACKED_ELEMENTS);
// Create the descriptor array for the arguments object.
Map::EnsureDescriptorSlack(map, 2);
@@ -3130,7 +3298,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate->initial_object_prototype());
DCHECK(!map->is_dictionary_map());
- DCHECK(IsFastObjectElementsKind(map->elements_kind()));
+ DCHECK(IsObjectElementsKind(map->elements_kind()));
}
{ // --- fast and slow aliased arguments map
@@ -3153,7 +3321,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// Create the ThrowTypeError function.
Handle<AccessorPair> callee = factory->NewAccessorPair();
- Handle<JSFunction> poison = GetStrictArgumentsPoisonFunction();
+ Handle<JSFunction> poison = GetThrowTypeErrorIntrinsic();
// Install the ThrowTypeError function.
callee->set_getter(*poison);
@@ -3161,7 +3329,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// Create the map. Allocate one in-object field for length.
Handle<Map> map = factory->NewMap(
- JS_ARGUMENTS_TYPE, JSStrictArgumentsObject::kSize, FAST_ELEMENTS);
+ JS_ARGUMENTS_TYPE, JSStrictArgumentsObject::kSize, PACKED_ELEMENTS);
// Create the descriptor array for the arguments object.
Map::EnsureDescriptorSlack(map, 2);
@@ -3190,39 +3358,33 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
native_context()->set_strict_arguments_map(*map);
DCHECK(!map->is_dictionary_map());
- DCHECK(IsFastObjectElementsKind(map->elements_kind()));
+ DCHECK(IsObjectElementsKind(map->elements_kind()));
}
{ // --- context extension
// Create a function for the context extension objects.
- Handle<Code> code = isolate->builtins()->Illegal();
- Handle<JSFunction> context_extension_fun = factory->NewFunction(
- factory->empty_string(), code, JS_CONTEXT_EXTENSION_OBJECT_TYPE,
- JSObject::kHeaderSize);
-
- Handle<String> name = factory->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("context_extension"));
+ Handle<JSFunction> context_extension_fun = CreateFunction(
+ isolate, factory->empty_string(), JS_CONTEXT_EXTENSION_OBJECT_TYPE,
+ JSObject::kHeaderSize, factory->the_hole_value(), Builtins::kIllegal);
+ Handle<String> name = factory->InternalizeUtf8String("context_extension");
context_extension_fun->shared()->set_instance_class_name(*name);
native_context()->set_context_extension_function(*context_extension_fun);
}
-
{
// Set up the call-as-function delegate.
- Handle<Code> code = isolate->builtins()->HandleApiCallAsFunction();
- Handle<JSFunction> delegate = factory->NewFunction(
- factory->empty_string(), code, JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ Handle<JSFunction> delegate =
+ SimpleCreateFunction(isolate, factory->empty_string(),
+ Builtins::kHandleApiCallAsFunction, 0, false);
native_context()->set_call_as_function_delegate(*delegate);
- delegate->shared()->DontAdaptArguments();
}
{
// Set up the call-as-constructor delegate.
- Handle<Code> code = isolate->builtins()->HandleApiCallAsConstructor();
- Handle<JSFunction> delegate = factory->NewFunction(
- factory->empty_string(), code, JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ Handle<JSFunction> delegate =
+ SimpleCreateFunction(isolate, factory->empty_string(),
+ Builtins::kHandleApiCallAsConstructor, 0, false);
native_context()->set_call_as_constructor_delegate(*delegate);
- delegate->shared()->DontAdaptArguments();
}
} // NOLINT(readability/fn_size)
@@ -3316,7 +3478,7 @@ bool Bootstrapper::CompileNative(Isolate* isolate, Vector<const char> name,
// environment has been at least partially initialized. Add a stack check
// before entering JS code to catch overflow early.
StackLimitCheck check(isolate);
- if (check.JsHasOverflowed(4 * KB)) {
+ if (check.JsHasOverflowed(kStackSpaceRequiredForCompilation * KB)) {
isolate->StackOverflow();
return false;
}
@@ -3468,13 +3630,13 @@ void Genesis::ConfigureUtilsObject(GlobalContextType context_type) {
// The utils object can be removed for cases that reach this point.
native_context()->set_natives_utils_object(heap()->undefined_value());
native_context()->set_extras_utils_object(heap()->undefined_value());
- native_context()->set_exports_container(heap()->undefined_value());
}
void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Handle<JSObject> container) {
Factory* factory = isolate->factory();
+ Builtins* builtins = isolate->builtins();
HandleScope scope(isolate);
Handle<Context> native_context = isolate->native_context();
#define EXPORT_PRIVATE_SYMBOL(NAME) \
@@ -3505,16 +3667,14 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
container, factory->InternalizeUtf8String("GeneratorFunctionPrototype"),
generator_function_prototype, NONE);
- static const bool kUseStrictFunctionMap = true;
Handle<JSFunction> generator_function_function = InstallFunction(
container, "GeneratorFunction", JS_FUNCTION_TYPE, JSFunction::kSize,
- generator_function_prototype, Builtins::kGeneratorFunctionConstructor,
- kUseStrictFunctionMap);
+ generator_function_prototype, Builtins::kGeneratorFunctionConstructor);
generator_function_function->set_prototype_or_initial_map(
native_context->generator_function_map());
generator_function_function->shared()->DontAdaptArguments();
generator_function_function->shared()->SetConstructStub(
- *isolate->builtins()->GeneratorFunctionConstructor());
+ *builtins->GeneratorFunctionConstructor());
generator_function_function->shared()->set_length(1);
InstallWithIntrinsicDefaultProto(
isolate, generator_function_function,
@@ -3536,16 +3696,15 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Handle<JSObject> async_generator_function_prototype(
iter.GetCurrent<JSObject>());
- static const bool kUseStrictFunctionMap = true;
- Handle<JSFunction> async_generator_function_function = InstallFunction(
- container, "AsyncGeneratorFunction", JS_FUNCTION_TYPE,
- JSFunction::kSize, async_generator_function_prototype,
- Builtins::kAsyncGeneratorFunctionConstructor, kUseStrictFunctionMap);
+ Handle<JSFunction> async_generator_function_function =
+ InstallFunction(container, "AsyncGeneratorFunction", JS_FUNCTION_TYPE,
+ JSFunction::kSize, async_generator_function_prototype,
+ Builtins::kAsyncGeneratorFunctionConstructor);
async_generator_function_function->set_prototype_or_initial_map(
native_context->async_generator_function_map());
async_generator_function_function->shared()->DontAdaptArguments();
async_generator_function_function->shared()->SetConstructStub(
- *isolate->builtins()->AsyncGeneratorFunctionConstructor());
+ *builtins->AsyncGeneratorFunctionConstructor());
async_generator_function_function->shared()->set_length(1);
InstallWithIntrinsicDefaultProto(
isolate, async_generator_function_function,
@@ -3564,33 +3723,88 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
}
{ // -- S e t I t e r a t o r
- Handle<JSObject> set_iterator_prototype =
- isolate->factory()->NewJSObject(isolate->object_function(), TENURED);
- JSObject::ForceSetPrototype(set_iterator_prototype, iterator_prototype);
- Handle<JSFunction> set_iterator_function = InstallFunction(
- container, "SetIterator", JS_SET_ITERATOR_TYPE, JSSetIterator::kSize,
- set_iterator_prototype, Builtins::kIllegal);
- native_context->set_set_iterator_map(set_iterator_function->initial_map());
+ Handle<String> name = factory->InternalizeUtf8String("Set Iterator");
+
+ // Setup %SetIteratorPrototype%.
+ Handle<JSObject> prototype =
+ factory->NewJSObject(isolate->object_function(), TENURED);
+ JSObject::ForceSetPrototype(prototype, iterator_prototype);
+
+ // Install the @@toStringTag property on the {prototype}.
+ JSObject::AddProperty(
+ prototype, factory->to_string_tag_symbol(), name,
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+ // Install the next function on the {prototype}.
+ SimpleInstallFunction(prototype, "next",
+ Builtins::kSetIteratorPrototypeNext, 0, true,
+ kSetIteratorNext);
+
+ // Setup SetIterator constructor.
+ Handle<JSFunction> set_iterator_function =
+ InstallFunction(container, "SetIterator", JS_SET_VALUE_ITERATOR_TYPE,
+ JSSetIterator::kSize, prototype, Builtins::kIllegal);
+ set_iterator_function->shared()->set_native(false);
+ set_iterator_function->shared()->set_instance_class_name(*name);
+
+ Handle<Map> set_value_iterator_map(set_iterator_function->initial_map(),
+ isolate);
+ native_context->set_set_value_iterator_map(*set_value_iterator_map);
+
+ Handle<Map> set_key_value_iterator_map =
+ Map::Copy(set_value_iterator_map, "JS_SET_KEY_VALUE_ITERATOR_TYPE");
+ set_key_value_iterator_map->set_instance_type(
+ JS_SET_KEY_VALUE_ITERATOR_TYPE);
+ native_context->set_set_key_value_iterator_map(*set_key_value_iterator_map);
}
{ // -- M a p I t e r a t o r
- Handle<JSObject> map_iterator_prototype =
- isolate->factory()->NewJSObject(isolate->object_function(), TENURED);
- JSObject::ForceSetPrototype(map_iterator_prototype, iterator_prototype);
- Handle<JSFunction> map_iterator_function = InstallFunction(
- container, "MapIterator", JS_MAP_ITERATOR_TYPE, JSMapIterator::kSize,
- map_iterator_prototype, Builtins::kIllegal);
- native_context->set_map_iterator_map(map_iterator_function->initial_map());
+ Handle<String> name = factory->InternalizeUtf8String("Map Iterator");
+
+ // Setup %MapIteratorPrototype%.
+ Handle<JSObject> prototype =
+ factory->NewJSObject(isolate->object_function(), TENURED);
+ JSObject::ForceSetPrototype(prototype, iterator_prototype);
+
+ // Install the @@toStringTag property on the {prototype}.
+ JSObject::AddProperty(
+ prototype, factory->to_string_tag_symbol(), name,
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+ // Install the next function on the {prototype}.
+ SimpleInstallFunction(prototype, "next",
+ Builtins::kMapIteratorPrototypeNext, 0, true,
+ kMapIteratorNext);
+
+ // Setup MapIterator constructor.
+ Handle<JSFunction> map_iterator_function =
+ InstallFunction(container, "MapIterator", JS_MAP_KEY_ITERATOR_TYPE,
+ JSMapIterator::kSize, prototype, Builtins::kIllegal);
+ map_iterator_function->shared()->set_native(false);
+ map_iterator_function->shared()->set_instance_class_name(*name);
+
+ Handle<Map> map_key_iterator_map(map_iterator_function->initial_map(),
+ isolate);
+ native_context->set_map_key_iterator_map(*map_key_iterator_map);
+
+ Handle<Map> map_key_value_iterator_map =
+ Map::Copy(map_key_iterator_map, "JS_MAP_KEY_VALUE_ITERATOR_TYPE");
+ map_key_value_iterator_map->set_instance_type(
+ JS_MAP_KEY_VALUE_ITERATOR_TYPE);
+ native_context->set_map_key_value_iterator_map(*map_key_value_iterator_map);
+
+ Handle<Map> map_value_iterator_map =
+ Map::Copy(map_key_iterator_map, "JS_MAP_VALUE_ITERATOR_TYPE");
+ map_value_iterator_map->set_instance_type(JS_MAP_VALUE_ITERATOR_TYPE);
+ native_context->set_map_value_iterator_map(*map_value_iterator_map);
}
{ // -- S c r i p t
- // Builtin functions for Script.
+ Handle<String> name = factory->InternalizeUtf8String("Script");
Handle<JSFunction> script_fun = InstallFunction(
- container, "Script", JS_VALUE_TYPE, JSValue::kSize,
- isolate->initial_object_prototype(), Builtins::kUnsupportedThrower);
- Handle<JSObject> prototype =
- factory->NewJSObject(isolate->object_function(), TENURED);
- JSFunction::SetPrototype(script_fun, prototype);
+ container, name, JS_VALUE_TYPE, JSValue::kSize,
+ factory->the_hole_value(), Builtins::kUnsupportedThrower, DONT_ENUM);
+ script_fun->shared()->set_instance_class_name(*name);
native_context->set_script_function(*script_fun);
Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
@@ -3718,17 +3932,16 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
PrototypeIterator iter(native_context->async_function_map());
Handle<JSObject> async_function_prototype(iter.GetCurrent<JSObject>());
- static const bool kUseStrictFunctionMap = true;
Handle<JSFunction> async_function_constructor = InstallFunction(
container, "AsyncFunction", JS_FUNCTION_TYPE, JSFunction::kSize,
- async_function_prototype, Builtins::kAsyncFunctionConstructor,
- kUseStrictFunctionMap);
+ async_function_prototype, Builtins::kAsyncFunctionConstructor);
+ async_function_constructor->set_prototype_or_initial_map(
+ native_context->async_function_map());
async_function_constructor->shared()->DontAdaptArguments();
async_function_constructor->shared()->SetConstructStub(
- *isolate->builtins()->AsyncFunctionConstructor());
+ *builtins->AsyncFunctionConstructor());
async_function_constructor->shared()->set_length(1);
- InstallWithIntrinsicDefaultProto(isolate, async_function_constructor,
- Context::ASYNC_FUNCTION_FUNCTION_INDEX);
+ native_context->set_async_function_constructor(*async_function_constructor);
JSObject::ForceSetPrototype(async_function_constructor,
isolate->function_function());
@@ -3743,22 +3956,19 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
{
Handle<JSFunction> function =
SimpleCreateFunction(isolate, factory->empty_string(),
- Builtins::kAsyncFunctionAwaitCaught, 3, false);
- InstallWithIntrinsicDefaultProto(
- isolate, function, Context::ASYNC_FUNCTION_AWAIT_CAUGHT_INDEX);
+ Builtins::kAsyncFunctionAwaitCaught, 2, false);
+ native_context->set_async_function_await_caught(*function);
}
{
Handle<JSFunction> function =
SimpleCreateFunction(isolate, factory->empty_string(),
- Builtins::kAsyncFunctionAwaitUncaught, 3, false);
- InstallWithIntrinsicDefaultProto(
- isolate, function, Context::ASYNC_FUNCTION_AWAIT_UNCAUGHT_INDEX);
+ Builtins::kAsyncFunctionAwaitUncaught, 2, false);
+ native_context->set_async_function_await_uncaught(*function);
}
{
- Handle<Code> code =
- isolate->builtins()->AsyncFunctionAwaitRejectClosure();
+ Handle<Code> code(builtins->AsyncFunctionAwaitRejectClosure());
Handle<SharedFunctionInfo> info =
factory->NewSharedFunctionInfo(factory->empty_string(), code, false);
info->set_internal_formal_parameter_count(1);
@@ -3767,8 +3977,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
}
{
- Handle<Code> code =
- isolate->builtins()->AsyncFunctionAwaitResolveClosure();
+ Handle<Code> code(builtins->AsyncFunctionAwaitResolveClosure());
Handle<SharedFunctionInfo> info =
factory->NewSharedFunctionInfo(factory->empty_string(), code, false);
info->set_internal_formal_parameter_count(1);
@@ -3780,16 +3989,14 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Handle<JSFunction> function =
SimpleCreateFunction(isolate, factory->empty_string(),
Builtins::kAsyncFunctionPromiseCreate, 0, false);
- InstallWithIntrinsicDefaultProto(
- isolate, function, Context::ASYNC_FUNCTION_PROMISE_CREATE_INDEX);
+ native_context->set_async_function_promise_create(*function);
}
{
Handle<JSFunction> function = SimpleCreateFunction(
isolate, factory->empty_string(),
Builtins::kAsyncFunctionPromiseRelease, 1, false);
- InstallWithIntrinsicDefaultProto(
- isolate, function, Context::ASYNC_FUNCTION_PROMISE_RELEASE_INDEX);
+ native_context->set_async_function_promise_release(*function);
}
}
@@ -3802,15 +4009,14 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Handle<JSFunction> callsite_fun = InstallFunction(
container, "CallSite", JS_OBJECT_TYPE, JSObject::kHeaderSize,
- isolate->initial_object_prototype(), Builtins::kUnsupportedThrower);
+ factory->the_hole_value(), Builtins::kUnsupportedThrower);
callsite_fun->shared()->DontAdaptArguments();
isolate->native_context()->set_callsite_function(*callsite_fun);
{
- Handle<JSObject> proto =
- factory->NewJSObject(isolate->object_function(), TENURED);
- JSObject::AddProperty(proto, factory->constructor_string(), callsite_fun,
- DONT_ENUM);
+ // Setup CallSite.prototype.
+ Handle<JSObject> prototype(
+ JSObject::cast(callsite_fun->instance_prototype()));
struct FunctionInfo {
const char* name;
@@ -3841,13 +4047,10 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Handle<JSFunction> fun;
for (const FunctionInfo& info : infos) {
- SimpleInstallFunction(proto, info.name, info.id, 0, true, attrs);
+ SimpleInstallFunction(prototype, info.name, info.id, 0, true, attrs);
}
-
- JSFunction::SetPrototype(callsite_fun, proto);
}
}
- isolate->native_context()->set_exports_container(*container);
}
@@ -3859,9 +4062,7 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_lookbehind)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_named_captures)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_property)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_sent)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_tailcalls)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_restrictive_generators)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_trailing_commas)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_tostring)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_class_fields)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_object_rest_spread)
@@ -3883,21 +4084,6 @@ void InstallPublicSymbol(Factory* factory, Handle<Context> native_context,
JSObject::AddProperty(symbol, name_string, value, attributes);
}
-void Genesis::InstallOneBuiltinFunction(Handle<Object> prototype,
- const char* method_name,
- Builtins::Name builtin_name) {
- LookupIterator it(
- prototype, isolate()->factory()->NewStringFromAsciiChecked(method_name),
- LookupIterator::OWN_SKIP_INTERCEPTOR);
- Handle<Object> function = Object::GetProperty(&it).ToHandleChecked();
- Handle<JSFunction>::cast(function)->set_code(
- isolate()->builtins()->builtin(builtin_name));
- SharedFunctionInfo* info = Handle<JSFunction>::cast(function)->shared();
- info->set_code(isolate()->builtins()->builtin(builtin_name));
- info->set_internal_formal_parameter_count(
- Builtins::GetBuiltinParameterCount(builtin_name));
-}
-
void Genesis::InitializeGlobal_harmony_sharedarraybuffer() {
if (!FLAG_harmony_sharedarraybuffer) return;
@@ -3905,51 +4091,19 @@ void Genesis::InitializeGlobal_harmony_sharedarraybuffer() {
Isolate* isolate = global->GetIsolate();
Factory* factory = isolate->factory();
- Handle<JSFunction> shared_array_buffer_fun =
- InstallArrayBuffer(global, "SharedArrayBuffer",
- Builtins::kSharedArrayBufferPrototypeGetByteLength,
- BuiltinFunctionId::kSharedArrayBufferByteLength,
- Builtins::kSharedArrayBufferPrototypeSlice);
- InstallWithIntrinsicDefaultProto(isolate, shared_array_buffer_fun,
- Context::SHARED_ARRAY_BUFFER_FUN_INDEX);
- InstallSpeciesGetter(shared_array_buffer_fun);
-
- Handle<String> name = factory->InternalizeUtf8String("Atomics");
- Handle<JSFunction> cons = factory->NewFunction(name);
- JSFunction::SetPrototype(cons, isolate->initial_object_prototype());
- Handle<JSObject> atomics_object = factory->NewJSObject(cons, TENURED);
- DCHECK(atomics_object->IsJSObject());
- JSObject::AddProperty(global, name, atomics_object, DONT_ENUM);
- JSObject::AddProperty(atomics_object, factory->to_string_tag_symbol(), name,
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ {
+ Handle<String> name = factory->InternalizeUtf8String("SharedArrayBuffer");
+ JSObject::AddProperty(global, name, isolate->shared_array_buffer_fun(),
+ DONT_ENUM);
+ }
- SimpleInstallFunction(atomics_object, factory->InternalizeUtf8String("load"),
- Builtins::kAtomicsLoad, 2, true);
- SimpleInstallFunction(atomics_object, factory->InternalizeUtf8String("store"),
- Builtins::kAtomicsStore, 3, true);
- SimpleInstallFunction(atomics_object, factory->InternalizeUtf8String("add"),
- Builtins::kAtomicsAdd, 3, true);
- SimpleInstallFunction(atomics_object, factory->InternalizeUtf8String("sub"),
- Builtins::kAtomicsSub, 3, true);
- SimpleInstallFunction(atomics_object, factory->InternalizeUtf8String("and"),
- Builtins::kAtomicsAnd, 3, true);
- SimpleInstallFunction(atomics_object, factory->InternalizeUtf8String("or"),
- Builtins::kAtomicsOr, 3, true);
- SimpleInstallFunction(atomics_object, factory->InternalizeUtf8String("xor"),
- Builtins::kAtomicsXor, 3, true);
- SimpleInstallFunction(atomics_object,
- factory->InternalizeUtf8String("exchange"),
- Builtins::kAtomicsExchange, 3, true);
- SimpleInstallFunction(atomics_object,
- factory->InternalizeUtf8String("compareExchange"),
- Builtins::kAtomicsCompareExchange, 4, true);
- SimpleInstallFunction(atomics_object,
- factory->InternalizeUtf8String("isLockFree"),
- Builtins::kAtomicsIsLockFree, 1, true);
- SimpleInstallFunction(atomics_object, factory->InternalizeUtf8String("wait"),
- Builtins::kAtomicsWait, 4, true);
- SimpleInstallFunction(atomics_object, factory->InternalizeUtf8String("wake"),
- Builtins::kAtomicsWake, 3, true);
+ {
+ Handle<String> name = factory->InternalizeUtf8String("Atomics");
+ JSObject::AddProperty(global, name, isolate->atomics_object(), DONT_ENUM);
+ JSObject::AddProperty(
+ isolate->atomics_object(), factory->to_string_tag_symbol(), name,
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ }
}
void Genesis::InitializeGlobal_harmony_array_prototype_values() {
@@ -3995,9 +4149,7 @@ void Genesis::InitializeGlobal_harmony_promise_finally() {
native_context()->set_promise_prototype_map(*prototype_map);
{
- Handle<Code> code =
- handle(isolate()->builtins()->builtin(Builtins::kPromiseThenFinally),
- isolate());
+ Handle<Code> code(builtins()->PromiseThenFinally());
Handle<SharedFunctionInfo> info = factory()->NewSharedFunctionInfo(
factory()->empty_string(), code, false);
info->set_internal_formal_parameter_count(1);
@@ -4007,9 +4159,7 @@ void Genesis::InitializeGlobal_harmony_promise_finally() {
}
{
- Handle<Code> code =
- handle(isolate()->builtins()->builtin(Builtins::kPromiseCatchFinally),
- isolate());
+ Handle<Code> code(builtins()->PromiseCatchFinally());
Handle<SharedFunctionInfo> info = factory()->NewSharedFunctionInfo(
factory()->empty_string(), code, false);
info->set_internal_formal_parameter_count(1);
@@ -4019,9 +4169,7 @@ void Genesis::InitializeGlobal_harmony_promise_finally() {
}
{
- Handle<Code> code = handle(
- isolate()->builtins()->builtin(Builtins::kPromiseValueThunkFinally),
- isolate());
+ Handle<Code> code(builtins()->PromiseValueThunkFinally());
Handle<SharedFunctionInfo> info = factory()->NewSharedFunctionInfo(
factory()->empty_string(), code, false);
info->set_internal_formal_parameter_count(0);
@@ -4030,9 +4178,7 @@ void Genesis::InitializeGlobal_harmony_promise_finally() {
}
{
- Handle<Code> code =
- handle(isolate()->builtins()->builtin(Builtins::kPromiseThrowerFinally),
- isolate());
+ Handle<Code> code(builtins()->PromiseThrowerFinally());
Handle<SharedFunctionInfo> info = factory()->NewSharedFunctionInfo(
factory()->empty_string(), code, false);
info->set_internal_formal_parameter_count(0);
@@ -4058,82 +4204,42 @@ void Genesis::InitializeGlobal_harmony_regexp_dotall() {
}
#ifdef V8_INTL_SUPPORT
-namespace {
-void SetFunction(Handle<JSObject> target, Handle<JSFunction> function,
- Handle<Name> name, PropertyAttributes attributes = DONT_ENUM) {
- JSObject::SetOwnPropertyIgnoreAttributes(target, name, function, attributes)
- .ToHandleChecked();
+void Genesis::InitializeGlobal_harmony_number_format_to_parts() {
+ if (!FLAG_harmony_number_format_to_parts) return;
+ Handle<JSObject> number_format_prototype(JSObject::cast(
+ native_context()->intl_number_format_function()->prototype()));
+ Handle<String> name = factory()->InternalizeUtf8String("formatToParts");
+ InstallFunction(number_format_prototype,
+ SimpleCreateFunction(
+ isolate(), name,
+ Builtins::kNumberFormatPrototypeFormatToParts, 0, false),
+ name);
}
-} // namespace
-
-void Genesis::InitializeGlobal_icu_case_mapping() {
- if (!FLAG_icu_case_mapping) return;
-
- Handle<JSReceiver> exports_container(
- JSReceiver::cast(native_context()->exports_container()));
-
- Handle<JSObject> string_prototype(
- JSObject::cast(native_context()->string_function()->prototype()));
-
- {
- Handle<String> name = factory()->InternalizeUtf8String("toLowerCase");
- SetFunction(string_prototype,
- SimpleCreateFunction(isolate(), name,
- Builtins::kStringPrototypeToLowerCaseIntl,
- 0, true),
- name);
- }
- {
- Handle<String> name = factory()->InternalizeUtf8String("toUpperCase");
- SetFunction(string_prototype,
- SimpleCreateFunction(isolate(), name,
- Builtins::kStringPrototypeToUpperCaseIntl,
- 0, false),
- name);
- }
-
- Handle<JSFunction> to_locale_lower_case = Handle<JSFunction>::cast(
- JSReceiver::GetProperty(
- exports_container,
- factory()->InternalizeUtf8String("ToLocaleLowerCaseIntl"))
- .ToHandleChecked());
- SetFunction(string_prototype, to_locale_lower_case,
- factory()->InternalizeUtf8String("toLocaleLowerCase"));
-
- Handle<JSFunction> to_locale_upper_case = Handle<JSFunction>::cast(
- JSReceiver::GetProperty(
- exports_container,
- factory()->InternalizeUtf8String("ToLocaleUpperCaseIntl"))
- .ToHandleChecked());
- SetFunction(string_prototype, to_locale_upper_case,
- factory()->InternalizeUtf8String("toLocaleUpperCase"));
-}
-#endif
+#endif // V8_INTL_SUPPORT
-Handle<JSFunction> Genesis::InstallArrayBuffer(Handle<JSObject> target,
- const char* name,
- Builtins::Name call_byteLength,
- BuiltinFunctionId byteLength_id,
- Builtins::Name call_slice) {
+Handle<JSFunction> Genesis::CreateArrayBuffer(Handle<String> name,
+ Builtins::Name call_byteLength,
+ BuiltinFunctionId byteLength_id,
+ Builtins::Name call_slice) {
// Create the %ArrayBufferPrototype%
// Setup the {prototype} with the given {name} for @@toStringTag.
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
- JSObject::AddProperty(prototype, factory()->to_string_tag_symbol(),
- factory()->NewStringFromAsciiChecked(name),
+ JSObject::AddProperty(prototype, factory()->to_string_tag_symbol(), name,
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
// Allocate the constructor with the given {prototype}.
Handle<JSFunction> array_buffer_fun =
- InstallFunction(target, name, JS_ARRAY_BUFFER_TYPE,
- JSArrayBuffer::kSizeWithEmbedderFields, prototype,
- Builtins::kArrayBufferConstructor);
+ CreateFunction(isolate(), name, JS_ARRAY_BUFFER_TYPE,
+ JSArrayBuffer::kSizeWithEmbedderFields, prototype,
+ Builtins::kArrayBufferConstructor);
array_buffer_fun->shared()->SetConstructStub(
- *isolate()->builtins()->ArrayBufferConstructor_ConstructStub());
+ *builtins()->ArrayBufferConstructor_ConstructStub());
array_buffer_fun->shared()->DontAdaptArguments();
array_buffer_fun->shared()->set_length(1);
+ array_buffer_fun->shared()->set_instance_class_name(*name);
// Install the "constructor" property on the {prototype}.
JSObject::AddProperty(prototype, factory()->constructor_string(),
@@ -4209,7 +4315,7 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
factory()->NewJSObject(isolate()->object_function());
native_context()->set_extras_utils_object(*extras_utils);
- InstallInternalArray(extras_utils, "InternalPackedArray", FAST_ELEMENTS);
+ InstallInternalArray(extras_utils, "InternalPackedArray", PACKED_ELEMENTS);
InstallFunction(extras_utils, isolate()->promise_internal_constructor(),
factory()->NewStringFromAsciiChecked("createPromise"));
@@ -4227,12 +4333,11 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
// Builtin function for OpaqueReference -- a JSValue-based object,
// that keeps its field isolated from JavaScript code. It may store
// objects, that JavaScript code may not access.
- Handle<JSFunction> opaque_reference_fun = factory()->NewFunction(
- factory()->empty_string(), isolate()->builtins()->Illegal(),
- isolate()->initial_object_prototype(), JS_VALUE_TYPE, JSValue::kSize);
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
- JSFunction::SetPrototype(opaque_reference_fun, prototype);
+ Handle<JSFunction> opaque_reference_fun =
+ CreateFunction(isolate(), factory()->empty_string(), JS_VALUE_TYPE,
+ JSValue::kSize, prototype, Builtins::kIllegal);
native_context()->set_opaque_reference_function(*opaque_reference_fun);
}
@@ -4246,9 +4351,9 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
Handle<JSObject> utils =
Handle<JSObject>::cast(isolate()->natives_utils_object());
Handle<JSFunction> array_function =
- InstallInternalArray(utils, "InternalArray", FAST_HOLEY_ELEMENTS);
+ InstallInternalArray(utils, "InternalArray", HOLEY_ELEMENTS);
native_context()->set_internal_array_function(*array_function);
- InstallInternalArray(utils, "InternalPackedArray", FAST_ELEMENTS);
+ InstallInternalArray(utils, "InternalPackedArray", PACKED_ELEMENTS);
}
// Run the rest of the native scripts.
@@ -4339,49 +4444,11 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
// Verification of important array prototype properties.
Object* length = proto->length();
CHECK(length->IsSmi());
- CHECK(Smi::cast(length)->value() == 0);
- CHECK(proto->HasFastSmiOrObjectElements());
+ CHECK(Smi::ToInt(length) == 0);
+ CHECK(proto->HasSmiOrObjectElements());
// This is necessary to enable fast checks for absence of elements
// on Array.prototype and below.
proto->set_elements(heap()->empty_fixed_array());
-
- // Install Array.prototype.concat
- Handle<JSFunction> concat =
- InstallFunction(proto, "concat", JS_OBJECT_TYPE, JSObject::kHeaderSize,
- MaybeHandle<JSObject>(), Builtins::kArrayConcat);
-
- // Make sure that Array.prototype.concat appears to be compiled.
- // The code will never be called, but inline caching for call will
- // only work if it appears to be compiled.
- concat->shared()->DontAdaptArguments();
- DCHECK(concat->is_compiled());
- // Set the lengths for the functions to satisfy ECMA-262.
- concat->shared()->set_length(1);
-
- // Install Array.prototype.forEach
- Handle<JSFunction> forEach =
- InstallArrayBuiltinFunction(proto, "forEach", Builtins::kArrayForEach);
- // Add forEach to the context.
- native_context()->set_array_for_each_iterator(*forEach);
-
- // Install Array.prototype.filter
- InstallArrayBuiltinFunction(proto, "filter", Builtins::kArrayFilter);
-
- // Install Array.prototype.map
- InstallArrayBuiltinFunction(proto, "map", Builtins::kArrayMap);
-
- // Install Array.prototype.every
- InstallArrayBuiltinFunction(proto, "every", Builtins::kArrayEvery);
-
- // Install Array.prototype.some
- InstallArrayBuiltinFunction(proto, "some", Builtins::kArraySome);
-
- // Install Array.prototype.reduce
- InstallArrayBuiltinFunction(proto, "reduce", Builtins::kArrayReduce);
-
- // Install Array.prototype.reduceRight
- InstallArrayBuiltinFunction(proto, "reduceRight",
- Builtins::kArrayReduceRight);
}
// Install InternalArray.prototype.concat
@@ -4389,17 +4456,7 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
Handle<JSFunction> array_constructor(
native_context()->internal_array_function());
Handle<JSObject> proto(JSObject::cast(array_constructor->prototype()));
- Handle<JSFunction> concat =
- InstallFunction(proto, "concat", JS_OBJECT_TYPE, JSObject::kHeaderSize,
- MaybeHandle<JSObject>(), Builtins::kArrayConcat);
-
- // Make sure that InternalArray.prototype.concat appears to be compiled.
- // The code will never be called, but inline caching for call will
- // only work if it appears to be compiled.
- concat->shared()->DontAdaptArguments();
- DCHECK(concat->is_compiled());
- // Set the lengths for the functions to satisfy ECMA-262.
- concat->shared()->set_length(1);
+ SimpleInstallFunction(proto, "concat", Builtins::kArrayConcat, 1, false);
}
InstallBuiltinFunctionIds();
@@ -4854,10 +4911,22 @@ bool Genesis::ConfigureGlobalObjects(
native_context()->set_array_buffer_map(
native_context()->array_buffer_fun()->initial_map());
- native_context()->set_js_map_map(
- native_context()->js_map_fun()->initial_map());
- native_context()->set_js_set_map(
- native_context()->js_set_fun()->initial_map());
+
+ Handle<JSFunction> js_map_fun(native_context()->js_map_fun());
+ Handle<JSFunction> js_set_fun(native_context()->js_set_fun());
+ // Force the Map/Set constructor to fast properties, so that we can use the
+ // fast paths for various things like
+ //
+ // x instanceof Map
+ // x instanceof Set
+ //
+ // etc. We should probably come up with a more principled approach once
+ // the JavaScript builtins are gone.
+ JSObject::MigrateSlowToFast(js_map_fun, 0, "Bootstrapping");
+ JSObject::MigrateSlowToFast(js_set_fun, 0, "Bootstrapping");
+
+ native_context()->set_js_map_map(js_map_fun->initial_map());
+ native_context()->set_js_set_map(js_set_fun->initial_map());
return true;
}
@@ -4927,7 +4996,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
DCHECK(!to->HasFastProperties());
// Add to dictionary.
Handle<Object> value(descs->GetValue(i), isolate());
- PropertyDetails d(kAccessor, details.attributes(), i + 1,
+ PropertyDetails d(kAccessor, details.attributes(),
PropertyCellType::kMutable);
JSObject::SetNormalizedProperty(to, key, value, d);
}
@@ -4935,24 +5004,18 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
}
} else if (from->IsJSGlobalObject()) {
// Copy all keys and values in enumeration order.
- Handle<GlobalDictionary> properties =
- Handle<GlobalDictionary>(from->global_dictionary());
- Handle<FixedArray> key_indices =
- GlobalDictionary::IterationIndices(properties);
- for (int i = 0; i < key_indices->length(); i++) {
- int key_index = Smi::cast(key_indices->get(i))->value();
- Object* raw_key = properties->KeyAt(key_index);
- DCHECK(properties->IsKey(isolate(), raw_key));
- DCHECK(raw_key->IsName());
+ Handle<GlobalDictionary> properties(
+ JSGlobalObject::cast(*from)->global_dictionary());
+ Handle<FixedArray> indices = GlobalDictionary::IterationIndices(properties);
+ for (int i = 0; i < indices->length(); i++) {
+ int index = Smi::ToInt(indices->get(i));
// If the property is already there we skip it.
- Handle<Name> key(Name::cast(raw_key), isolate());
+ Handle<PropertyCell> cell(properties->CellAt(index));
+ Handle<Name> key(cell->name(), isolate());
LookupIterator it(to, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
if (it.IsFound()) continue;
// Set the property.
- DCHECK(properties->ValueAt(key_index)->IsPropertyCell());
- Handle<PropertyCell> cell(
- PropertyCell::cast(properties->ValueAt(key_index)), isolate());
Handle<Object> value(cell->value(), isolate());
if (value->IsTheHole(isolate())) continue;
PropertyDetails details = cell->property_details();
@@ -4966,7 +5029,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
Handle<FixedArray> key_indices =
NameDictionary::IterationIndices(properties);
for (int i = 0; i < key_indices->length(); i++) {
- int key_index = Smi::cast(key_indices->get(i))->value();
+ int key_index = Smi::ToInt(key_indices->get(i));
Object* raw_key = properties->KeyAt(key_index);
DCHECK(properties->IsKey(isolate(), raw_key));
DCHECK(raw_key->IsName());
@@ -5013,21 +5076,6 @@ void Genesis::TransferObject(Handle<JSObject> from, Handle<JSObject> to) {
}
-void Genesis::MakeFunctionInstancePrototypeWritable() {
- // The maps with writable prototype are created in CreateEmptyFunction
- // and CreateStrictModeFunctionMaps respectively. Initially the maps are
- // created with read-only prototype for JS builtins processing.
- DCHECK(!sloppy_function_map_writable_prototype_.is_null());
- DCHECK(!strict_function_map_writable_prototype_.is_null());
-
- // Replace function instance maps to make prototype writable.
- native_context()->set_sloppy_function_map(
- *sloppy_function_map_writable_prototype_);
- native_context()->set_strict_function_map(
- *strict_function_map_writable_prototype_);
-}
-
-
Genesis::Genesis(
Isolate* isolate, MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_proxy_template,
@@ -5063,7 +5111,7 @@ Genesis::Genesis(
// proxy of the correct size.
Object* size = isolate->heap()->serialized_global_proxy_sizes()->get(
static_cast<int>(context_snapshot_index) - 1);
- instance_size = Smi::cast(size)->value();
+ instance_size = Smi::ToInt(size);
} else {
instance_size = JSGlobalProxy::SizeWithEmbedderFields(
global_proxy_template.IsEmpty()
@@ -5111,11 +5159,15 @@ Genesis::Genesis(
}
DCHECK(!global_proxy->IsDetachedFrom(native_context()->global_object()));
} else {
+ base::ElapsedTimer timer;
+ if (FLAG_profile_deserialization) timer.Start();
DCHECK_EQ(0u, context_snapshot_index);
// We get here if there was no context snapshot.
CreateRoots();
Handle<JSFunction> empty_function = CreateEmptyFunction(isolate);
+ CreateSloppyModeFunctionMaps(empty_function);
CreateStrictModeFunctionMaps(empty_function);
+ CreateObjectFunction(empty_function);
CreateIteratorMaps(empty_function);
CreateAsyncIteratorMaps(empty_function);
CreateAsyncFunctionMaps(empty_function);
@@ -5125,13 +5177,15 @@ Genesis::Genesis(
InitializeNormalizedMapCaches();
if (!InstallNatives(context_type)) return;
-
- MakeFunctionInstancePrototypeWritable();
-
if (!InstallExtraNatives()) return;
if (!ConfigureGlobalObjects(global_proxy_template)) return;
isolate->counters()->contexts_created_from_scratch()->Increment();
+
+ if (FLAG_profile_deserialization) {
+ double ms = timer.Elapsed().InMillisecondsF();
+ i::PrintF("[Initializing context from scratch took %0.3f ms]\n", ms);
+ }
}
// Install experimental natives. Do not include them into the
@@ -5214,7 +5268,7 @@ Genesis::Genesis(Isolate* isolate,
DCHECK_EQ(global_proxy_data->embedder_field_count(),
global_proxy_template->InternalFieldCount());
Handle<Map> global_proxy_map = isolate->factory()->NewMap(
- JS_GLOBAL_PROXY_TYPE, proxy_size, FAST_HOLEY_SMI_ELEMENTS);
+ JS_GLOBAL_PROXY_TYPE, proxy_size, HOLEY_SMI_ELEMENTS);
global_proxy_map->set_is_access_check_needed(true);
global_proxy_map->set_has_hidden_prototype(true);
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index 286ec1ad54..05eb74f091 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -6,6 +6,7 @@
#define V8_BOOTSTRAPPER_H_
#include "src/factory.h"
+#include "src/objects/shared-function-info.h"
#include "src/snapshot/natives.h"
#include "src/visitors.h"
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index 286df2eea7..b739170eb5 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -226,7 +226,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(r6);
__ EnterBuiltinFrame(cp, r1, r6);
__ Push(r2); // first argument
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Pop(r2);
__ LeaveBuiltinFrame(cp, r1, r6);
@@ -374,7 +374,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(r6);
__ EnterBuiltinFrame(cp, r1, r6);
__ Push(r2); // first argument
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Pop(r2);
__ LeaveBuiltinFrame(cp, r1, r6);
@@ -427,23 +427,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Jump(r2);
}
-void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
- // Checking whether the queued function is ready for install is optional,
- // since we come across interrupts and stack checks elsewhere. However,
- // not checking may delay installing ready functions, and always checking
- // would be quite expensive. A good compromise is to first check against
- // stack limit as a cue for an interrupt signal.
- Label ok;
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, &ok);
-
- GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
-
- __ bind(&ok);
- GenerateTailCallToSharedCode(masm);
-}
-
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
@@ -456,6 +439,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// -- sp[...]: constructor arguments
// -----------------------------------
+ Register scratch = r2;
+
// Enter a construct frame.
{
FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
@@ -486,8 +471,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// -----------------------------------
__ b(&entry);
__ bind(&loop);
- __ ldr(ip, MemOperand(r4, r5, LSL, kPointerSizeLog2));
- __ push(ip);
+ __ ldr(scratch, MemOperand(r4, r5, LSL, kPointerSizeLog2));
+ __ push(scratch);
__ bind(&entry);
__ sub(r5, r5, Operand(1), SetCC);
__ b(ge, &loop);
@@ -503,13 +488,13 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Restore context from the frame.
__ ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
// Restore smi-tagged arguments count from the frame.
- __ ldr(r1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ __ ldr(scratch, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
// Leave construct frame.
}
// Remove caller arguments from the stack and return.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(sp, sp, Operand(scratch, LSL, kPointerSizeLog2 - kSmiTagSize));
__ add(sp, sp, Operand(kPointerSize));
__ Jump(lr);
}
@@ -543,15 +528,14 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -----------------------------------
__ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldrb(r4,
- FieldMemOperand(r4, SharedFunctionInfo::kFunctionKindByteOffset));
- __ tst(r4, Operand(SharedFunctionInfo::kDerivedConstructorBitsWithinByte));
+ __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
+ __ tst(r4, Operand(SharedFunctionInfo::kDerivedConstructorMask));
__ b(ne, &not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
r4, r5);
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ b(&post_instantiation_deopt_entry);
@@ -610,9 +594,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -- sp[4*kPointerSize]: context
// -----------------------------------
__ b(&entry);
+
__ bind(&loop);
- __ ldr(ip, MemOperand(r4, r5, LSL, kPointerSizeLog2));
- __ push(ip);
+ __ ldr(r6, MemOperand(r4, r5, LSL, kPointerSizeLog2));
+ __ push(r6);
__ bind(&entry);
__ sub(r5, r5, Operand(1), SetCC);
__ b(ge, &loop);
@@ -657,18 +642,20 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ CompareObjectType(r0, r4, r5, FIRST_JS_RECEIVER_TYPE);
__ b(ge, &leave_frame);
- __ bind(&other_result);
// The result is now neither undefined nor an object.
+ __ bind(&other_result);
+ __ ldr(r4, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
+ __ ldr(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
+ __ tst(r4, Operand(SharedFunctionInfo::kClassConstructorMask));
+
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
- __ ldr(r4, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
- __ ldr(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ ldrb(r4,
- FieldMemOperand(r4, SharedFunctionInfo::kFunctionKindByteOffset));
- __ tst(r4, Operand(SharedFunctionInfo::kClassConstructorBitsWithinByte));
__ b(eq, &use_receiver);
-
} else {
+ __ b(ne, &use_receiver);
+ __ CallRuntime(
+ Runtime::kIncrementUseCounterConstructorReturnNonUndefinedPrimitive);
__ b(&use_receiver);
}
@@ -715,33 +702,14 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- r0 : the value to pass to the generator
// -- r1 : the JSGeneratorObject to resume
// -- r2 : the resume mode (tagged)
- // -- r3 : the SuspendFlags of the earlier suspend call (tagged)
// -- lr : return address
// -----------------------------------
- __ SmiUntag(r3);
- __ AssertGeneratorObject(r1, r3);
+ __ AssertGeneratorObject(r1);
// Store input value into generator object.
- Label async_await, done_store_input;
-
- __ And(r3, r3, Operand(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
- __ cmp(r3, Operand(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
- __ b(eq, &async_await);
-
__ str(r0, FieldMemOperand(r1, JSGeneratorObject::kInputOrDebugPosOffset));
__ RecordWriteField(r1, JSGeneratorObject::kInputOrDebugPosOffset, r0, r3,
kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ jmp(&done_store_input);
-
- __ bind(&async_await);
- __ str(r0, FieldMemOperand(
- r1, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset));
- __ RecordWriteField(r1, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset,
- r0, r3, kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ jmp(&done_store_input);
-
- __ bind(&done_store_input);
- // `r3` no longer holds SuspendFlags
// Store resume mode into generator object.
__ str(r2, FieldMemOperand(r1, JSGeneratorObject::kResumeModeOffset));
@@ -750,28 +718,31 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
__ ldr(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
- // Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
Label stepping_prepared;
+ Register scratch = r5;
+
+ // Flood function if we are stepping.
ExternalReference debug_hook =
ExternalReference::debug_hook_on_function_call_address(masm->isolate());
- __ mov(ip, Operand(debug_hook));
- __ ldrsb(ip, MemOperand(ip));
- __ cmp(ip, Operand(0));
+ __ mov(scratch, Operand(debug_hook));
+ __ ldrsb(scratch, MemOperand(scratch));
+ __ cmp(scratch, Operand(0));
__ b(ne, &prepare_step_in_if_stepping);
- // Flood function if we need to continue stepping in the suspended generator.
+ // Flood function if we need to continue stepping in the suspended
+ // generator.
ExternalReference debug_suspended_generator =
ExternalReference::debug_suspended_generator_address(masm->isolate());
- __ mov(ip, Operand(debug_suspended_generator));
- __ ldr(ip, MemOperand(ip));
- __ cmp(ip, Operand(r1));
+ __ mov(scratch, Operand(debug_suspended_generator));
+ __ ldr(scratch, MemOperand(scratch));
+ __ cmp(scratch, Operand(r1));
__ b(eq, &prepare_step_in_suspended_generator);
__ bind(&stepping_prepared);
// Push receiver.
- __ ldr(ip, FieldMemOperand(r1, JSGeneratorObject::kReceiverOffset));
- __ Push(ip);
+ __ ldr(scratch, FieldMemOperand(r1, JSGeneratorObject::kReceiverOffset));
+ __ Push(scratch);
// ----------- S t a t e -------------
// -- r1 : the JSGeneratorObject to resume
@@ -792,7 +763,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
{
Label done_loop, loop;
__ bind(&loop);
- __ sub(r3, r3, Operand(Smi::FromInt(1)), SetCC);
+ __ sub(r3, r3, Operand(1), SetCC);
__ b(mi, &done_loop);
__ PushRoot(Heap::kTheHoleValueRootIndex);
__ b(&loop);
@@ -812,14 +783,13 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ ldr(r0, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r0, FieldMemOperand(
r0, SharedFunctionInfo::kFormalParameterCountOffset));
- __ SmiUntag(r0);
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
__ Move(r3, r1);
__ Move(r1, r4);
- __ ldr(r5, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- __ Jump(r5);
+ __ ldr(scratch, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+ __ Jump(scratch);
}
__ bind(&prepare_step_in_if_stepping);
@@ -893,7 +863,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
// Setup the context (we need to use the caller context from the isolate).
- ExternalReference context_address(Isolate::kContextAddress,
+ ExternalReference context_address(IsolateAddressId::kContextAddress,
masm->isolate());
__ mov(cp, Operand(context_address));
__ ldr(cp, MemOperand(cp));
@@ -1011,6 +981,118 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
__ add(sp, sp, args_count, LeaveCC);
}
+// Tail-call |function_id| if |smi_entry| == |marker|
+static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
+ Register smi_entry,
+ OptimizationMarker marker,
+ Runtime::FunctionId function_id) {
+ Label no_match;
+ __ cmp(smi_entry, Operand(Smi::FromEnum(marker)));
+ __ b(ne, &no_match);
+ GenerateTailCallToReturnedCode(masm, function_id);
+ __ bind(&no_match);
+}
+
+static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
+ Register feedback_vector,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+ // ----------- S t a t e -------------
+ // -- r0 : argument count (preserved for callee if needed, and caller)
+ // -- r3 : new target (preserved for callee if needed, and caller)
+ // -- r1 : target function (preserved for callee if needed, and caller)
+ // -- feedback vector (preserved for caller if needed)
+ // -----------------------------------
+ DCHECK(
+ !AreAliased(feedback_vector, r0, r1, r3, scratch1, scratch2, scratch3));
+
+ Label optimized_code_slot_is_cell, fallthrough;
+
+ Register closure = r1;
+ Register optimized_code_entry = scratch1;
+
+ const int kOptimizedCodeCellOffset =
+ FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize;
+ __ ldr(optimized_code_entry,
+ FieldMemOperand(feedback_vector, kOptimizedCodeCellOffset));
+
+ // Check if the code entry is a Smi. If yes, we interpret it as an
+ // optimisation marker. Otherwise, interpret is as a weak cell to a code
+ // object.
+ __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
+
+ {
+ // Optimized code slot is a Smi optimization marker.
+
+ // Fall through if no optimization trigger.
+ __ cmp(optimized_code_entry,
+ Operand(Smi::FromEnum(OptimizationMarker::kNone)));
+ __ b(eq, &fallthrough);
+
+ TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimized,
+ Runtime::kCompileOptimized_NotConcurrent);
+ TailCallRuntimeIfMarkerEquals(
+ masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimizedConcurrent,
+ Runtime::kCompileOptimized_Concurrent);
+
+ {
+ // Otherwise, the marker is InOptimizationQueue.
+ if (FLAG_debug_code) {
+ __ cmp(
+ optimized_code_entry,
+ Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
+ __ Assert(eq, kExpectedOptimizationSentinel);
+ }
+ // Checking whether the queued function is ready for install is
+ // optional, since we come across interrupts and stack checks elsewhere.
+ // However, not checking may delay installing ready functions, and
+ // always checking would be quite expensive. A good compromise is to
+ // first check against stack limit as a cue for an interrupt signal.
+ __ LoadRoot(scratch2, Heap::kStackLimitRootIndex);
+ __ cmp(sp, Operand(scratch2));
+ __ b(hs, &fallthrough);
+ GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+ }
+ }
+
+ {
+ // Optimized code slot is a WeakCell.
+ __ bind(&optimized_code_slot_is_cell);
+
+ __ ldr(optimized_code_entry,
+ FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(optimized_code_entry, &fallthrough);
+
+ // Check if the optimized code is marked for deopt. If it is, call the
+ // runtime to clear it.
+ Label found_deoptimized_code;
+ __ ldr(scratch2, FieldMemOperand(optimized_code_entry,
+ Code::kKindSpecificFlags1Offset));
+ __ tst(scratch2, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ __ b(ne, &found_deoptimized_code);
+
+ // Optimized code is good, get it into the closure and link the closure into
+ // the optimized functions list, then tail call the optimized code.
+ // The feedback vector is no longer used, so re-use it as a scratch
+ // register.
+ ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, closure,
+ scratch2, scratch3, feedback_vector);
+ __ Jump(optimized_code_entry);
+
+ // Optimized code slot contains deoptimized code, evict it and re-enter the
+ // closure's code.
+ __ bind(&found_deoptimized_code);
+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+ }
+
+ // Fall-through if the optimized code cell is clear and there is no
+ // optimization marker.
+ __ bind(&fallthrough);
+}
+
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
@@ -1029,38 +1111,33 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
+ Register closure = r1;
+ Register feedback_vector = r2;
+
+ // Load the feedback vector from the closure.
+ __ ldr(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ // Read off the optimized code slot in the feedback vector, and if there
+ // is optimized code or an optimization marker, call that instead.
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6, r5);
+
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ PushStandardFrame(r1);
-
- // First check if there is optimized code in the feedback vector which we
- // could call instead.
- Label switch_to_optimized_code;
- Register optimized_code_entry = r4;
- __ ldr(r0, FieldMemOperand(r1, JSFunction::kFeedbackVectorOffset));
- __ ldr(r0, FieldMemOperand(r0, Cell::kValueOffset));
- __ ldr(
- optimized_code_entry,
- FieldMemOperand(r0, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
- __ ldr(optimized_code_entry,
- FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
- __ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
+ __ PushStandardFrame(closure);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
- __ ldr(r0, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- Register debug_info = kInterpreterBytecodeArrayRegister;
- DCHECK(!debug_info.is(r0));
- __ ldr(debug_info, FieldMemOperand(r0, SharedFunctionInfo::kDebugInfoOffset));
- __ SmiTst(debug_info);
- // Load original bytecode array or the debug copy.
+ Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
+ __ ldr(r0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ ldr(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(r0, SharedFunctionInfo::kFunctionDataOffset), eq);
- __ ldr(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex), ne);
+ FieldMemOperand(r0, SharedFunctionInfo::kFunctionDataOffset));
+ __ ldr(r4, FieldMemOperand(r0, SharedFunctionInfo::kDebugInfoOffset));
+ __ SmiTst(r4);
+ __ b(ne, &maybe_load_debug_bytecode_array);
+ __ bind(&bytecode_array_loaded);
// Check whether we should continue to use the interpreter.
// TODO(rmcilroy) Remove self healing once liveedit only has to deal with
@@ -1071,15 +1148,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ b(ne, &switch_to_different_code_kind);
// Increment invocation count for the function.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kFeedbackVectorOffset));
- __ ldr(r2, FieldMemOperand(r2, Cell::kValueOffset));
- __ ldr(r9, FieldMemOperand(
- r2, FeedbackVector::kInvocationCountIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
+ __ ldr(r9,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
__ add(r9, r9, Operand(Smi::FromInt(1)));
- __ str(r9, FieldMemOperand(
- r2, FeedbackVector::kInvocationCountIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
+ __ str(r9,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
@@ -1141,50 +1218,37 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Dispatch to the first bytecode handler for the function.
__ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ ldr(ip, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
+ __ ldr(r4, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
kPointerSizeLog2));
- __ Call(ip);
+ __ Call(r4);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// The return value is in r0.
LeaveInterpreterFrame(masm, r2);
__ Jump(lr);
+ // Load debug copy of the bytecode array if it exists.
+ // kInterpreterBytecodeArrayRegister is already loaded with
+ // SharedFunctionInfo::kFunctionDataOffset.
+ __ bind(&maybe_load_debug_bytecode_array);
+ __ ldr(r9, FieldMemOperand(r4, DebugInfo::kFlagsOffset));
+ __ SmiUntag(r9);
+ __ tst(r9, Operand(DebugInfo::kHasBreakInfo));
+ __ ldr(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(r4, DebugInfo::kDebugBytecodeArrayOffset), ne);
+ __ b(&bytecode_array_loaded);
+
// If the shared code is no longer this entry trampoline, then the underlying
// function has been switched to a different kind of code and we heal the
// closure by switching the code entry field over to the new code as well.
__ bind(&switch_to_different_code_kind);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
- __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r4, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kCodeOffset));
__ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ str(r4, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- __ RecordWriteCodeEntryField(r1, r4, r5);
+ __ str(r4, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+ __ RecordWriteCodeEntryField(closure, r4, r5);
__ Jump(r4);
-
- // If there is optimized code on the type feedback vector, check if it is good
- // to run, and if so, self heal the closure and call the optimized code.
- __ bind(&switch_to_optimized_code);
- __ LeaveFrame(StackFrame::JAVA_SCRIPT);
- Label gotta_call_runtime;
-
- // Check if the optimized code is marked for deopt.
- __ ldr(r5, FieldMemOperand(optimized_code_entry,
- Code::kKindSpecificFlags1Offset));
- __ tst(r5, Operand(1 << Code::kMarkedForDeoptimizationBit));
-
- __ b(ne, &gotta_call_runtime);
-
- // Optimized code is good, get it into the closure and link the closure into
- // the optimized functions list, then tail call the optimized code.
- ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, r1, r6, r5,
- r2);
- __ Jump(optimized_code_entry);
-
- // Optimized code is marked for deopt, bailout to the CompileLazy runtime
- // function which will clear the feedback vector's optimized code slot.
- __ bind(&gotta_call_runtime);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@@ -1223,7 +1287,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// static
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
- TailCallMode tail_call_mode, InterpreterPushArgsMode mode) {
+ InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- r0 : the number of arguments (not including the receiver)
// -- r2 : the address of the first argument to be pushed. Subsequent
@@ -1246,17 +1310,21 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Push the arguments. r2, r4, r5 will be modified.
Generate_InterpreterPushArgs(masm, r3, r2, r4, r5);
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Pop(r2); // Pass the spread in a register
+ __ sub(r0, r0, Operand(1)); // Subtract one for spread
+ }
+
// Call the target.
if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
- tail_call_mode),
- RelocInfo::CODE_TARGET);
+ __ Jump(
+ masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny),
+ RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(masm->isolate()->builtins()->CallWithSpread(),
RelocInfo::CODE_TARGET);
} else {
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
RelocInfo::CODE_TARGET);
}
@@ -1281,15 +1349,21 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
Label stack_overflow;
// Push a slot for the receiver to be constructed.
- __ mov(ip, Operand::Zero());
- __ push(ip);
+ __ mov(r5, Operand::Zero());
+ __ push(r5);
Generate_StackOverflowCheck(masm, r0, r5, &stack_overflow);
// Push the arguments. r5, r4, r6 will be modified.
Generate_InterpreterPushArgs(masm, r0, r4, r5, r6);
- __ AssertUndefinedOrAllocationSite(r2, r5);
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Pop(r2); // Pass the spread in a register
+ __ sub(r0, r0, Operand(1)); // Subtract one for spread
+ } else {
+ __ AssertUndefinedOrAllocationSite(r2, r5);
+ }
+
if (mode == InterpreterPushArgsMode::kJSFunction) {
__ AssertFunction(r1);
@@ -1329,8 +1403,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructArray(
Label stack_overflow;
// Push a slot for the receiver to be constructed.
- __ mov(ip, Operand::Zero());
- __ push(ip);
+ __ mov(r5, Operand::Zero());
+ __ push(r5);
Generate_StackOverflowCheck(masm, r0, r5, &stack_overflow);
@@ -1387,9 +1461,11 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Dispatch to the target bytecode.
__ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ ldr(ip, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
- kPointerSizeLog2));
- __ mov(pc, ip);
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ ldr(scratch, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
+ kPointerSizeLog2));
+ __ Jump(scratch);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
@@ -1415,6 +1491,33 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
+void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : argument count (preserved for callee)
+ // -- r3 : new target (preserved for callee)
+ // -- r1 : target function (preserved for callee)
+ // -----------------------------------
+ Register closure = r1;
+
+ // Get the feedback vector.
+ Register feedback_vector = r2;
+ __ ldr(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ // The feedback vector must be defined.
+ if (FLAG_debug_code) {
+ __ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
+ __ Assert(ne, BailoutReason::kExpectedFeedbackVector);
+ }
+
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6, r5);
+
+ // Otherwise, tail call the SFI code.
+ GenerateTailCallToSharedCode(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argument count (preserved for callee)
@@ -1423,43 +1526,24 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
- Label try_shared;
Register closure = r1;
- Register index = r2;
+ Register feedback_vector = r2;
// Do we have a valid feedback vector?
- __ ldr(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
- __ ldr(index, FieldMemOperand(index, Cell::kValueOffset));
- __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
+ __ ldr(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
+ &gotta_call_runtime);
- // Is optimized code available in the feedback vector?
- Register entry = r4;
- __ ldr(entry, FieldMemOperand(
- index, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
- __ ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &try_shared);
-
- // Found code, check if it is marked for deopt, if so call into runtime to
- // clear the optimized code slot.
- __ ldr(r5, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset));
- __ tst(r5, Operand(1 << Code::kMarkedForDeoptimizationBit));
- __ b(ne, &gotta_call_runtime);
-
- // Code is good, get it into the closure and tail call.
- ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, r6, r5, r2);
- __ Jump(entry);
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6, r5);
// We found no optimized code.
- __ bind(&try_shared);
+ Register entry = r4;
__ ldr(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- // Is the shared function marked for tier up?
- __ ldrb(r5, FieldMemOperand(entry,
- SharedFunctionInfo::kMarkedForTierUpByteOffset));
- __ tst(r5, Operand(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
- __ b(ne, &gotta_call_runtime);
// If SFI points to anything other than CompileLazy, install that.
__ ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
@@ -1477,15 +1561,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
-void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm,
- Runtime::kCompileOptimized_NotConcurrent);
-}
-
-void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
-}
-
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argument count (preserved for callee)
@@ -1568,7 +1643,7 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// r3 - new target
FrameScope scope(masm, StackFrame::MANUAL);
__ stm(db_w, sp, r0.bit() | r1.bit() | r3.bit() | fp.bit() | lr.bit());
- __ PrepareCallCFunction(2, 0, r2);
+ __ PrepareCallCFunction(2, 0);
__ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::get_make_code_young_function(masm->isolate()), 2);
@@ -1596,7 +1671,7 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// r3 - new target
FrameScope scope(masm, StackFrame::MANUAL);
__ stm(db_w, sp, r0.bit() | r1.bit() | r3.bit() | fp.bit() | lr.bit());
- __ PrepareCallCFunction(2, 0, r2);
+ __ PrepareCallCFunction(2, 0);
__ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
@@ -1619,30 +1694,70 @@ void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
-static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
- SaveFPRegsMode save_doubles) {
+void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve registers across notification, this is important for compiled
- // stubs that tail call the runtime on deopts passing their parameters in
- // registers.
- __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved);
+ // Preserve possible return result from lazy deopt.
+ __ push(r0);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
- __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved);
+ __ CallRuntime(Runtime::kNotifyStubFailure, false);
+ __ pop(r0);
}
__ add(sp, sp, Operand(kPointerSize)); // Ignore state
- __ mov(pc, lr); // Jump to miss handler
+ __ mov(pc, lr); // Jump to ContinueToBuiltin stub
}
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+namespace {
+void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
+ bool java_script_builtin,
+ bool with_result) {
+ const RegisterConfiguration* config(RegisterConfiguration::Turbofan());
+ int allocatable_register_count = config->num_allocatable_general_registers();
+ if (with_result) {
+ // Overwrite the hole inserted by the deoptimizer with the return value from
+ // the LAZY deopt point.
+ __ str(r0,
+ MemOperand(
+ sp, config->num_allocatable_general_registers() * kPointerSize +
+ BuiltinContinuationFrameConstants::kFixedFrameSize));
+ }
+ for (int i = allocatable_register_count - 1; i >= 0; --i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ __ Pop(Register::from_code(code));
+ if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
+ __ SmiUntag(Register::from_code(code));
+ }
+ }
+ __ ldr(fp, MemOperand(
+ sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ Pop(scratch);
+ __ add(sp, sp,
+ Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ __ Pop(lr);
+ __ add(pc, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
}
+} // namespace
-void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, false);
+}
+
+void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, true);
+}
+
+void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, false);
+}
+
+void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, true);
}
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
@@ -1761,45 +1876,39 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// -- sp[8] : receiver
// -----------------------------------
- // 1. Load receiver into r1, argArray into r0 (if present), remove all
+ // 1. Load receiver into r1, argArray into r2 (if present), remove all
// arguments from the stack (including the receiver), and push thisArg (if
// present) instead.
{
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ mov(r3, r2);
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ mov(r2, r5);
__ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2)); // receiver
__ sub(r4, r0, Operand(1), SetCC);
- __ ldr(r2, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // thisArg
+ __ ldr(r5, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // thisArg
__ sub(r4, r4, Operand(1), SetCC, ge);
- __ ldr(r3, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // argArray
+ __ ldr(r2, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // argArray
__ add(sp, sp, Operand(r0, LSL, kPointerSizeLog2));
- __ str(r2, MemOperand(sp, 0));
- __ mov(r0, r3);
+ __ str(r5, MemOperand(sp, 0));
}
// ----------- S t a t e -------------
- // -- r0 : argArray
+ // -- r2 : argArray
// -- r1 : receiver
// -- sp[0] : thisArg
// -----------------------------------
- // 2. Make sure the receiver is actually callable.
- Label receiver_not_callable;
- __ JumpIfSmi(r1, &receiver_not_callable);
- __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
- __ tst(r4, Operand(1 << Map::kIsCallable));
- __ b(eq, &receiver_not_callable);
+ // 2. We don't need to check explicitly for callable receiver here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
// 3. Tail call with no arguments if argArray is null or undefined.
Label no_arguments;
- __ JumpIfRoot(r0, Heap::kNullValueRootIndex, &no_arguments);
- __ JumpIfRoot(r0, Heap::kUndefinedValueRootIndex, &no_arguments);
+ __ JumpIfRoot(r2, Heap::kNullValueRootIndex, &no_arguments);
+ __ JumpIfRoot(r2, Heap::kUndefinedValueRootIndex, &no_arguments);
- // 4a. Apply the receiver to the given argArray (passing undefined for
- // new.target).
- __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 4a. Apply the receiver to the given argArray.
+ __ Jump(masm->isolate()->builtins()->CallWithArrayLike(),
+ RelocInfo::CODE_TARGET);
// 4b. The argArray is either null or undefined, so we tail call without any
// arguments to the receiver.
@@ -1808,13 +1917,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ mov(r0, Operand(0));
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
-
- // 4c. The receiver is not callable, throw an appropriate TypeError.
- __ bind(&receiver_not_callable);
- {
- __ str(r1, MemOperand(sp, 0));
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
}
// static
@@ -1840,13 +1942,14 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// r0: actual number of arguments
// r1: callable
{
+ Register scratch = r3;
Label loop;
// Calculate the copy start address (destination). Copy end address is sp.
__ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
__ bind(&loop);
- __ ldr(ip, MemOperand(r2, -kPointerSize));
- __ str(ip, MemOperand(r2));
+ __ ldr(scratch, MemOperand(r2, -kPointerSize));
+ __ str(scratch, MemOperand(r2));
__ sub(r2, r2, Operand(kPointerSize));
__ cmp(r2, sp);
__ b(ne, &loop);
@@ -1869,49 +1972,36 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// -- sp[12] : receiver
// -----------------------------------
- // 1. Load target into r1 (if present), argumentsList into r0 (if present),
+ // 1. Load target into r1 (if present), argumentsList into r2 (if present),
// remove all arguments from the stack (including the receiver), and push
// thisArgument (if present) instead.
{
__ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+ __ mov(r5, r1);
__ mov(r2, r1);
- __ mov(r3, r1);
__ sub(r4, r0, Operand(1), SetCC);
__ ldr(r1, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // target
__ sub(r4, r4, Operand(1), SetCC, ge);
- __ ldr(r2, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // thisArgument
+ __ ldr(r5, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // thisArgument
__ sub(r4, r4, Operand(1), SetCC, ge);
- __ ldr(r3, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // argumentsList
+ __ ldr(r2, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // argumentsList
__ add(sp, sp, Operand(r0, LSL, kPointerSizeLog2));
- __ str(r2, MemOperand(sp, 0));
- __ mov(r0, r3);
+ __ str(r5, MemOperand(sp, 0));
}
// ----------- S t a t e -------------
- // -- r0 : argumentsList
+ // -- r2 : argumentsList
// -- r1 : target
// -- sp[0] : thisArgument
// -----------------------------------
- // 2. Make sure the target is actually callable.
- Label target_not_callable;
- __ JumpIfSmi(r1, &target_not_callable);
- __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
- __ tst(r4, Operand(1 << Map::kIsCallable));
- __ b(eq, &target_not_callable);
+ // 2. We don't need to check explicitly for callable target here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
- // 3a. Apply the target to the given argumentsList (passing undefined for
- // new.target).
- __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
-
- // 3b. The target is not callable, throw an appropriate TypeError.
- __ bind(&target_not_callable);
- {
- __ str(r1, MemOperand(sp, 0));
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
+ // 3. Apply the target to the given argumentsList.
+ __ Jump(masm->isolate()->builtins()->CallWithArrayLike(),
+ RelocInfo::CODE_TARGET);
}
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
@@ -1923,7 +2013,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// -- sp[12] : receiver
// -----------------------------------
- // 1. Load target into r1 (if present), argumentsList into r0 (if present),
+ // 1. Load target into r1 (if present), argumentsList into r2 (if present),
// new.target into r3 (if present, otherwise use target), remove all
// arguments from the stack (including the receiver), and push thisArgument
// (if present) instead.
@@ -1939,48 +2029,26 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ sub(r4, r4, Operand(1), SetCC, ge);
__ ldr(r3, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // new.target
__ add(sp, sp, Operand(r0, LSL, kPointerSizeLog2));
- __ mov(r0, r2);
}
// ----------- S t a t e -------------
- // -- r0 : argumentsList
+ // -- r2 : argumentsList
// -- r3 : new.target
// -- r1 : target
// -- sp[0] : receiver (undefined)
// -----------------------------------
- // 2. Make sure the target is actually a constructor.
- Label target_not_constructor;
- __ JumpIfSmi(r1, &target_not_constructor);
- __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
- __ tst(r4, Operand(1 << Map::kIsConstructor));
- __ b(eq, &target_not_constructor);
-
- // 3. Make sure the target is actually a constructor.
- Label new_target_not_constructor;
- __ JumpIfSmi(r3, &new_target_not_constructor);
- __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
- __ tst(r4, Operand(1 << Map::kIsConstructor));
- __ b(eq, &new_target_not_constructor);
-
- // 4a. Construct the target with the given new.target and argumentsList.
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 2. We don't need to check explicitly for constructor target here,
+ // since that's the first thing the Construct/ConstructWithArrayLike
+ // builtins will do.
- // 4b. The target is not a constructor, throw an appropriate TypeError.
- __ bind(&target_not_constructor);
- {
- __ str(r1, MemOperand(sp, 0));
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
+ // 3. We don't need to check explicitly for constructor new.target here,
+ // since that's the second thing the Construct/ConstructWithArrayLike
+ // builtins will do.
- // 4c. The new.target is not a constructor, throw an appropriate TypeError.
- __ bind(&new_target_not_constructor);
- {
- __ str(r3, MemOperand(sp, 0));
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
+ // 4. Construct the target with the given new.target and argumentsList.
+ __ Jump(masm->isolate()->builtins()->ConstructWithArrayLike(),
+ RelocInfo::CODE_TARGET);
}
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
@@ -2007,154 +2075,61 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_Apply(MacroAssembler* masm) {
+void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
- // -- r0 : argumentsList
- // -- r1 : target
- // -- r3 : new.target (checked to be constructor or undefined)
- // -- sp[0] : thisArgument
+ // -- r1 : target
+ // -- r0 : number of parameters on the stack (not including the receiver)
+ // -- r2 : arguments list (a FixedArray)
+ // -- r4 : len (number of elements to push from args)
+ // -- r3 : new.target (for [[Construct]])
// -----------------------------------
+ __ AssertFixedArray(r2);
- // Create the list of arguments from the array-like argumentsList.
- {
- Label create_arguments, create_array, create_holey_array, create_runtime,
- done_create;
- __ JumpIfSmi(r0, &create_runtime);
-
- // Load the map of argumentsList into r2.
- __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
-
- // Load native context into r4.
- __ ldr(r4, NativeContextMemOperand());
-
- // Check if argumentsList is an (unmodified) arguments object.
- __ ldr(ip, ContextMemOperand(r4, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
- __ cmp(ip, r2);
- __ b(eq, &create_arguments);
- __ ldr(ip, ContextMemOperand(r4, Context::STRICT_ARGUMENTS_MAP_INDEX));
- __ cmp(ip, r2);
- __ b(eq, &create_arguments);
-
- // Check if argumentsList is a fast JSArray.
- __ CompareInstanceType(r2, ip, JS_ARRAY_TYPE);
- __ b(eq, &create_array);
-
- // Ask the runtime to create the list (actually a FixedArray).
- __ bind(&create_runtime);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r1, r3, r0);
- __ CallRuntime(Runtime::kCreateListFromArrayLike);
- __ Pop(r1, r3);
- __ ldr(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
- __ SmiUntag(r2);
- }
- __ jmp(&done_create);
-
- // Try to create the list from an arguments object.
- __ bind(&create_arguments);
- __ ldr(r2, FieldMemOperand(r0, JSArgumentsObject::kLengthOffset));
- __ ldr(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
- __ ldr(ip, FieldMemOperand(r4, FixedArray::kLengthOffset));
- __ cmp(r2, ip);
- __ b(ne, &create_runtime);
- __ SmiUntag(r2);
- __ mov(r0, r4);
- __ b(&done_create);
-
- // For holey JSArrays we need to check that the array prototype chain
- // protector is intact and our prototype is the Array.prototype actually.
- __ bind(&create_holey_array);
- __ ldr(r2, FieldMemOperand(r2, Map::kPrototypeOffset));
- __ ldr(r4, ContextMemOperand(r4, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ cmp(r2, r4);
- __ b(ne, &create_runtime);
- __ LoadRoot(r4, Heap::kArrayProtectorRootIndex);
- __ ldr(r2, FieldMemOperand(r4, PropertyCell::kValueOffset));
- __ cmp(r2, Operand(Smi::FromInt(Isolate::kProtectorValid)));
- __ b(ne, &create_runtime);
- __ ldr(r2, FieldMemOperand(r0, JSArray::kLengthOffset));
- __ ldr(r0, FieldMemOperand(r0, JSArray::kElementsOffset));
- __ SmiUntag(r2);
- __ b(&done_create);
-
- // Try to create the list from a JSArray object.
- // -- r2 and r4 must be preserved till bne create_holey_array.
- __ bind(&create_array);
- __ ldr(r5, FieldMemOperand(r2, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(r5);
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- __ cmp(r5, Operand(FAST_HOLEY_ELEMENTS));
- __ b(hi, &create_runtime);
- // Only FAST_XXX after this point, FAST_HOLEY_XXX are odd values.
- __ tst(r5, Operand(1));
- __ b(ne, &create_holey_array);
- // FAST_SMI_ELEMENTS or FAST_ELEMENTS after this point.
- __ ldr(r2, FieldMemOperand(r0, JSArray::kLengthOffset));
- __ ldr(r0, FieldMemOperand(r0, JSArray::kElementsOffset));
- __ SmiUntag(r2);
-
- __ bind(&done_create);
- }
+ Register scratch = r8;
// Check for stack overflow.
{
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label done;
- __ LoadRoot(ip, Heap::kRealStackLimitRootIndex);
- // Make ip the space we have left. The stack might already be overflowed
- // here which will cause ip to become negative.
- __ sub(ip, sp, ip);
+ __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
+ // The stack might already be overflowed here which will cause 'scratch' to
+ // become negative.
+ __ sub(scratch, sp, scratch);
// Check if the arguments will overflow the stack.
- __ cmp(ip, Operand(r2, LSL, kPointerSizeLog2));
+ __ cmp(scratch, Operand(r4, LSL, kPointerSizeLog2));
__ b(gt, &done); // Signed comparison.
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ bind(&done);
}
- // ----------- S t a t e -------------
- // -- r1 : target
- // -- r0 : args (a FixedArray built from argumentsList)
- // -- r2 : len (number of elements to push from args)
- // -- r3 : new.target (checked to be constructor or undefined)
- // -- sp[0] : thisArgument
- // -----------------------------------
-
// Push arguments onto the stack (thisArgument is already on the stack).
{
- __ mov(r4, Operand(0));
+ __ mov(r6, Operand(0));
__ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
- __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
Label done, loop;
__ bind(&loop);
- __ cmp(r4, r2);
+ __ cmp(r6, r4);
__ b(eq, &done);
- __ add(ip, r0, Operand(r4, LSL, kPointerSizeLog2));
- __ ldr(ip, FieldMemOperand(ip, FixedArray::kHeaderSize));
- __ cmp(r5, ip);
- __ mov(ip, r6, LeaveCC, eq);
- __ Push(ip);
- __ add(r4, r4, Operand(1));
+ __ add(scratch, r2, Operand(r6, LSL, kPointerSizeLog2));
+ __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+ __ cmp(scratch, r5);
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex, eq);
+ __ Push(scratch);
+ __ add(r6, r6, Operand(1));
__ b(&loop);
__ bind(&done);
- __ Move(r0, r4);
+ __ add(r0, r0, r6);
}
- // Dispatch to Call or Construct depending on whether new.target is undefined.
- {
- __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET, eq);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
- }
+ // Tail-call to the actual Call or Construct builtin.
+ __ Jump(code, RelocInfo::CODE_TARGET);
}
// static
-void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
- Handle<Code> code) {
+void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
// -- r0 : the number of arguments (not including the receiver)
// -- r3 : the new.target (for [[Construct]] calls)
@@ -2162,11 +2137,15 @@ void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
// -- r2 : start index (to support rest parameters)
// -----------------------------------
+ Register scratch = r6;
+
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
__ ldr(r4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(ip, MemOperand(r4, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ cmp(ip, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ ldr(scratch,
+ MemOperand(r4, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ cmp(scratch,
+ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(eq, &arguments_adaptor);
{
__ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -2180,11 +2159,11 @@ void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
{
// Load the length from the ArgumentsAdaptorFrame.
__ ldr(r5, MemOperand(r4, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(r5);
}
__ bind(&arguments_done);
Label stack_done, stack_overflow;
- __ SmiUntag(r5);
__ sub(r5, r5, r2, SetCC);
__ b(le, &stack_done);
{
@@ -2198,8 +2177,8 @@ void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
__ add(r0, r0, r5);
__ bind(&loop);
{
- __ ldr(ip, MemOperand(r4, r5, LSL, kPointerSizeLog2));
- __ push(ip);
+ __ ldr(scratch, MemOperand(r4, r5, LSL, kPointerSizeLog2));
+ __ push(scratch);
__ sub(r5, r5, Operand(1), SetCC);
__ b(ne, &loop);
}
@@ -2214,103 +2193,9 @@ void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
__ Jump(code, RelocInfo::CODE_TARGET);
}
-namespace {
-
-// Drops top JavaScript frame and an arguments adaptor frame below it (if
-// present) preserving all the arguments prepared for current call.
-// Does nothing if debugger is currently active.
-// ES6 14.6.3. PrepareForTailCall
-//
-// Stack structure for the function g() tail calling f():
-//
-// ------- Caller frame: -------
-// | ...
-// | g()'s arg M
-// | ...
-// | g()'s arg 1
-// | g()'s receiver arg
-// | g()'s caller pc
-// ------- g()'s frame: -------
-// | g()'s caller fp <- fp
-// | g()'s context
-// | function pointer: g
-// | -------------------------
-// | ...
-// | ...
-// | f()'s arg N
-// | ...
-// | f()'s arg 1
-// | f()'s receiver arg <- sp (f()'s caller pc is not on the stack yet!)
-// ----------------------
-//
-void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
- Register scratch1, Register scratch2,
- Register scratch3) {
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Comment cmnt(masm, "[ PrepareForTailCall");
-
- // Prepare for tail call only if ES2015 tail call elimination is enabled.
- Label done;
- ExternalReference is_tail_call_elimination_enabled =
- ExternalReference::is_tail_call_elimination_enabled_address(
- masm->isolate());
- __ mov(scratch1, Operand(is_tail_call_elimination_enabled));
- __ ldrb(scratch1, MemOperand(scratch1));
- __ cmp(scratch1, Operand(0));
- __ b(eq, &done);
-
- // Drop possible interpreter handler/stub frame.
- {
- Label no_interpreter_frame;
- __ ldr(scratch3,
- MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ cmp(scratch3, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
- __ b(ne, &no_interpreter_frame);
- __ ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ bind(&no_interpreter_frame);
- }
-
- // Check if next frame is an arguments adaptor frame.
- Register caller_args_count_reg = scratch1;
- Label no_arguments_adaptor, formal_parameter_count_loaded;
- __ ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(scratch3,
- MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ cmp(scratch3,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(ne, &no_arguments_adaptor);
-
- // Drop current frame and load arguments count from arguments adaptor frame.
- __ mov(fp, scratch2);
- __ ldr(caller_args_count_reg,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
- __ b(&formal_parameter_count_loaded);
-
- __ bind(&no_arguments_adaptor);
- // Load caller's formal parameter count
- __ ldr(scratch1,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
- __ ldr(scratch1,
- FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(caller_args_count_reg,
- FieldMemOperand(scratch1,
- SharedFunctionInfo::kFormalParameterCountOffset));
- __ SmiUntag(caller_args_count_reg);
-
- __ bind(&formal_parameter_count_loaded);
-
- ParameterCount callee_args_count(args_reg);
- __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
- scratch3);
- __ bind(&done);
-}
-} // namespace
-
// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
- ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
+ ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- r0 : the number of arguments (not including the receiver)
// -- r1 : the function to call (checked to be a JSFunction)
@@ -2321,21 +2206,19 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Check that the function is not a "classConstructor".
Label class_constructor;
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldrb(r3, FieldMemOperand(r2, SharedFunctionInfo::kFunctionKindByteOffset));
- __ tst(r3, Operand(SharedFunctionInfo::kClassConstructorBitsWithinByte));
+ __ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ tst(r3, Operand(SharedFunctionInfo::kClassConstructorMask));
__ b(ne, &class_constructor);
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
- SharedFunctionInfo::kStrictModeByteOffset);
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
- __ ldrb(r3, FieldMemOperand(r2, SharedFunctionInfo::kNativeByteOffset));
- __ tst(r3, Operand((1 << SharedFunctionInfo::kNativeBitWithinByte) |
- (1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
+ __ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ tst(r3, Operand(SharedFunctionInfo::IsNativeBit::kMask |
+ SharedFunctionInfo::IsStrictBit::kMask));
__ b(ne, &done_convert);
{
// ----------- S t a t e -------------
@@ -2398,13 +2281,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- cp : the function context.
// -----------------------------------
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, r0, r3, r4, r5);
- }
-
__ ldr(r2,
FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
- __ SmiUntag(r2);
ParameterCount actual(r0);
ParameterCount expected(r2);
__ InvokeFunctionCode(r1, no_reg, expected, actual, JUMP_FUNCTION,
@@ -2463,6 +2341,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&done);
}
+ Register scratch = r6;
+
// Relocate arguments down the stack.
{
Label loop, done_loop;
@@ -2470,8 +2350,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&loop);
__ cmp(r5, r0);
__ b(gt, &done_loop);
- __ ldr(ip, MemOperand(sp, r4, LSL, kPointerSizeLog2));
- __ str(ip, MemOperand(sp, r5, LSL, kPointerSizeLog2));
+ __ ldr(scratch, MemOperand(sp, r4, LSL, kPointerSizeLog2));
+ __ str(scratch, MemOperand(sp, r5, LSL, kPointerSizeLog2));
__ add(r4, r4, Operand(1));
__ add(r5, r5, Operand(1));
__ b(&loop);
@@ -2486,8 +2366,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ bind(&loop);
__ sub(r4, r4, Operand(1), SetCC);
- __ ldr(ip, MemOperand(r2, r4, LSL, kPointerSizeLog2));
- __ str(ip, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+ __ ldr(scratch, MemOperand(r2, r4, LSL, kPointerSizeLog2));
+ __ str(scratch, MemOperand(sp, r0, LSL, kPointerSizeLog2));
__ add(r0, r0, Operand(1));
__ b(gt, &loop);
}
@@ -2498,36 +2378,31 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
} // namespace
// static
-void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
- TailCallMode tail_call_mode) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : the number of arguments (not including the receiver)
// -- r1 : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(r1);
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, r0, r3, r4, r5);
- }
-
// Patch the receiver to [[BoundThis]].
- __ ldr(ip, FieldMemOperand(r1, JSBoundFunction::kBoundThisOffset));
- __ str(ip, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+ __ ldr(r3, FieldMemOperand(r1, JSBoundFunction::kBoundThisOffset));
+ __ str(r3, MemOperand(sp, r0, LSL, kPointerSizeLog2));
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
// Call the [[BoundTargetFunction]] via the Call builtin.
__ ldr(r1, FieldMemOperand(r1, JSBoundFunction::kBoundTargetFunctionOffset));
- __ mov(ip, Operand(ExternalReference(Builtins::kCall_ReceiverIsAny,
+
+ __ mov(r3, Operand(ExternalReference(Builtins::kCall_ReceiverIsAny,
masm->isolate())));
- __ ldr(ip, MemOperand(ip));
- __ add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ ldr(r3, MemOperand(r3));
+ __ add(pc, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
}
// static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- r0 : the number of arguments (not including the receiver)
// -- r1 : the target to call (can be any Object).
@@ -2537,10 +2412,10 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
__ JumpIfSmi(r1, &non_callable);
__ bind(&non_smi);
__ CompareObjectType(r1, r4, r5, JS_FUNCTION_TYPE);
- __ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET, eq);
__ cmp(r5, Operand(JS_BOUND_FUNCTION_TYPE));
- __ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
RelocInfo::CODE_TARGET, eq);
// Check if target has a [[Call]] internal method.
@@ -2548,22 +2423,13 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
__ tst(r4, Operand(1 << Map::kIsCallable));
__ b(eq, &non_callable);
+ // Check if target is a proxy and call CallProxy external builtin
__ cmp(r5, Operand(JS_PROXY_TYPE));
__ b(ne, &non_function);
- // 0. Prepare for tail call if necessary.
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, r0, r3, r4, r5);
- }
-
- // 1. Runtime fallback for Proxy [[Call]].
- __ Push(r1);
- // Increase the arguments size to include the pushed function and the
- // existing receiver on the stack.
- __ add(r0, r0, Operand(2));
- // Tail-call to the runtime.
- __ JumpToExternalReference(
- ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
+ __ mov(r5, Operand(ExternalReference(Builtins::kCallProxy, masm->isolate())));
+ __ ldr(r5, MemOperand(r5));
+ __ add(pc, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
@@ -2573,7 +2439,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r1);
__ Jump(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
+ ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
@@ -2585,161 +2451,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
-static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
- Register argc = r0;
- Register constructor = r1;
- Register new_target = r3;
-
- Register scratch = r2;
- Register scratch2 = r6;
-
- Register spread = r4;
- Register spread_map = r5;
-
- Register spread_len = r5;
-
- Label runtime_call, push_args;
- __ ldr(spread, MemOperand(sp, 0));
- __ JumpIfSmi(spread, &runtime_call);
- __ ldr(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
-
- // Check that the spread is an array.
- __ CompareInstanceType(spread_map, scratch, JS_ARRAY_TYPE);
- __ b(ne, &runtime_call);
-
- // Check that we have the original ArrayPrototype.
- __ ldr(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
- __ ldr(scratch2, NativeContextMemOperand());
- __ ldr(scratch2,
- ContextMemOperand(scratch2, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ cmp(scratch, scratch2);
- __ b(ne, &runtime_call);
-
- // Check that the ArrayPrototype hasn't been modified in a way that would
- // affect iteration.
- __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
- __ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
- __ cmp(scratch, Operand(Smi::FromInt(Isolate::kProtectorValid)));
- __ b(ne, &runtime_call);
-
- // Check that the map of the initial array iterator hasn't changed.
- __ ldr(scratch2, NativeContextMemOperand());
- __ ldr(scratch,
- ContextMemOperand(scratch2,
- Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
- __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
- __ ldr(scratch2,
- ContextMemOperand(
- scratch2, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
- __ cmp(scratch, scratch2);
- __ b(ne, &runtime_call);
-
- // For FastPacked kinds, iteration will have the same effect as simply
- // accessing each property in order.
- Label no_protector_check;
- __ ldr(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(scratch);
- __ cmp(scratch, Operand(FAST_HOLEY_ELEMENTS));
- __ b(hi, &runtime_call);
- // For non-FastHoley kinds, we can skip the protector check.
- __ cmp(scratch, Operand(FAST_SMI_ELEMENTS));
- __ b(eq, &no_protector_check);
- __ cmp(scratch, Operand(FAST_ELEMENTS));
- __ b(eq, &no_protector_check);
- // Check the ArrayProtector cell.
- __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
- __ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
- __ cmp(scratch, Operand(Smi::FromInt(Isolate::kProtectorValid)));
- __ b(ne, &runtime_call);
-
- __ bind(&no_protector_check);
- // Load the FixedArray backing store, but use the length from the array.
- __ ldr(spread_len, FieldMemOperand(spread, JSArray::kLengthOffset));
- __ SmiUntag(spread_len);
- __ ldr(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
- __ b(&push_args);
-
- __ bind(&runtime_call);
- {
- // Call the builtin for the result of the spread.
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(argc);
- __ Push(constructor);
- __ Push(new_target);
- __ Push(argc);
- __ Push(spread);
- __ CallRuntime(Runtime::kSpreadIterableFixed);
- __ mov(spread, r0);
- __ Pop(argc);
- __ Pop(new_target);
- __ Pop(constructor);
- __ SmiUntag(argc);
- }
-
- {
- // Calculate the new nargs including the result of the spread.
- __ ldr(spread_len, FieldMemOperand(spread, FixedArray::kLengthOffset));
- __ SmiUntag(spread_len);
-
- __ bind(&push_args);
- // argc += spread_len - 1. Subtract 1 for the spread itself.
- __ add(argc, argc, spread_len);
- __ sub(argc, argc, Operand(1));
-
- // Pop the spread argument off the stack.
- __ Pop(scratch);
- }
-
- // Check for stack overflow.
- {
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack limit".
- Label done;
- __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
- // Make scratch the space we have left. The stack might already be
- // overflowed here which will cause scratch to become negative.
- __ sub(scratch, sp, scratch);
- // Check if the arguments will overflow the stack.
- __ cmp(scratch, Operand(spread_len, LSL, kPointerSizeLog2));
- __ b(gt, &done); // Signed comparison.
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&done);
- }
-
- // Put the evaluated spread onto the stack as additional arguments.
- {
- __ mov(scratch, Operand(0));
- Label done, push, loop;
- __ bind(&loop);
- __ cmp(scratch, spread_len);
- __ b(eq, &done);
- __ add(scratch2, spread, Operand(scratch, LSL, kPointerSizeLog2));
- __ ldr(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
- __ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
- __ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
- __ bind(&push);
- __ Push(scratch2);
- __ add(scratch, scratch, Operand(1));
- __ b(&loop);
- __ bind(&done);
- }
-}
-
-// static
-void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
- // -- r1 : the constructor to call (can be any Object)
- // -----------------------------------
-
- // CheckSpreadAndPushToStack will push r3 to save it.
- __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- TailCallMode::kDisallow),
- RelocInfo::CODE_TARGET);
-}
-
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2779,9 +2490,10 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// Construct the [[BoundTargetFunction]] via the Construct builtin.
__ ldr(r1, FieldMemOperand(r1, JSBoundFunction::kBoundTargetFunctionOffset));
- __ mov(ip, Operand(ExternalReference(Builtins::kConstruct, masm->isolate())));
- __ ldr(ip, MemOperand(ip));
- __ add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ __ mov(r2, Operand(ExternalReference(Builtins::kConstruct, masm->isolate())));
+ __ ldr(r2, MemOperand(r2));
+ __ add(pc, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
}
// static
@@ -2855,19 +2567,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
- // -- r1 : the constructor to call (can be any Object)
- // -- r3 : the new target (either the same as the constructor or
- // the JSFunction on which new was invoked initially)
- // -----------------------------------
-
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
-}
-
-// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r1 : requested object size (untagged)
@@ -2919,10 +2618,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ cmp(r2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
__ b(eq, &dont_adapt_arguments);
+ Register scratch = r5;
+
{ // Enough parameters: actual >= expected
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
- Generate_StackOverflowCheck(masm, r2, r5, &stack_overflow);
+ Generate_StackOverflowCheck(masm, r2, scratch, &stack_overflow);
// Calculate copy start address into r0 and copy end address into r4.
// r0: actual number of arguments as a smi
@@ -2943,8 +2644,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label copy;
__ bind(&copy);
- __ ldr(ip, MemOperand(r0, 0));
- __ push(ip);
+ __ ldr(scratch, MemOperand(r0, 0));
+ __ push(scratch);
__ cmp(r0, r4); // Compare before moving to next argument.
__ sub(r0, r0, Operand(kPointerSize));
__ b(ne, &copy);
@@ -2955,7 +2656,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected
__ bind(&too_few);
EnterArgumentsAdaptorFrame(masm);
- Generate_StackOverflowCheck(masm, r2, r5, &stack_overflow);
+ Generate_StackOverflowCheck(masm, r2, scratch, &stack_overflow);
// Calculate copy start address into r0 and copy end address is fp.
// r0: actual number of arguments as a smi
@@ -2971,9 +2672,11 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r3: new target (passed through to callee)
Label copy;
__ bind(&copy);
+
// Adjust load for return address and receiver.
- __ ldr(ip, MemOperand(r0, 2 * kPointerSize));
- __ push(ip);
+ __ ldr(scratch, MemOperand(r0, 2 * kPointerSize));
+ __ push(scratch);
+
__ cmp(r0, fp); // Compare before moving to next argument.
__ sub(r0, r0, Operand(kPointerSize));
__ b(ne, &copy);
@@ -2982,7 +2685,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r1: function
// r2: expected number of arguments
// r3: new target (passed through to callee)
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
__ sub(r4, fp, Operand(r2, LSL, kPointerSizeLog2));
// Adjust for frame.
__ sub(r4, r4, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
@@ -2990,7 +2693,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label fill;
__ bind(&fill);
- __ push(ip);
+ __ push(scratch);
__ cmp(sp, r4);
__ b(ne, &fill);
}
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index 7e96dc4fb3..619c5de97b 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -227,7 +227,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(x6);
__ EnterBuiltinFrame(cp, x1, x6);
__ Push(x2); // first argument
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Pop(x2);
__ LeaveBuiltinFrame(cp, x1, x6);
@@ -379,7 +379,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(x6);
__ EnterBuiltinFrame(cp, x1, x6);
__ Push(x2); // first argument
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Pop(x2);
__ LeaveBuiltinFrame(cp, x1, x6);
@@ -428,22 +428,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Br(x2);
}
-void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
- // Checking whether the queued function is ready for install is optional,
- // since we come across interrupts and stack checks elsewhere. However, not
- // checking may delay installing ready functions, and always checking would be
- // quite expensive. A good compromise is to first check against stack limit as
- // a cue for an interrupt signal.
- Label ok;
- __ CompareRoot(masm->StackPointer(), Heap::kStackLimitRootIndex);
- __ B(hs, &ok);
-
- GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
-
- __ Bind(&ok);
- GenerateTailCallToSharedCode(masm);
-}
-
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
@@ -555,15 +539,14 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -----------------------------------
__ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldrb(x4,
- FieldMemOperand(x4, SharedFunctionInfo::kFunctionKindByteOffset));
- __ tst(x4, Operand(SharedFunctionInfo::kDerivedConstructorBitsWithinByte));
+ __ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kCompilerHintsOffset));
+ __ tst(w4, Operand(SharedFunctionInfo::kDerivedConstructorMask));
__ B(ne, &not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
x4, x5);
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ B(&post_instantiation_deopt_entry);
@@ -677,18 +660,20 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ JumpIfObjectType(x0, x4, x5, FIRST_JS_RECEIVER_TYPE, &leave_frame, ge);
- __ Bind(&other_result);
// The result is now neither undefined nor an object.
+ __ Bind(&other_result);
+ __ Ldr(x4, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
+ __ Ldr(x4, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kCompilerHintsOffset));
+ __ tst(w4, Operand(SharedFunctionInfo::kClassConstructorMask));
+
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
- __ Ldr(x4, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
- __ Ldr(x4, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
- __ Ldrb(x4,
- FieldMemOperand(x4, SharedFunctionInfo::kFunctionKindByteOffset));
- __ tst(x4, Operand(SharedFunctionInfo::kClassConstructorBitsWithinByte));
__ B(eq, &use_receiver);
-
} else {
+ __ B(ne, &use_receiver);
+ __ CallRuntime(
+ Runtime::kIncrementUseCounterConstructorReturnNonUndefinedPrimitive);
__ B(&use_receiver);
}
@@ -741,32 +726,14 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- x0 : the value to pass to the generator
// -- x1 : the JSGeneratorObject to resume
// -- x2 : the resume mode (tagged)
- // -- x3 : the SuspendFlags of the earlier suspend call (tagged)
// -- lr : return address
// -----------------------------------
- __ SmiUntag(x3);
- __ AssertGeneratorObject(x1, x3);
+ __ AssertGeneratorObject(x1);
// Store input value into generator object.
- Label async_await, done_store_input;
-
- __ And(x3, x3, Operand(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
- __ Cmp(x3, Operand(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
- __ B(eq, &async_await);
-
__ Str(x0, FieldMemOperand(x1, JSGeneratorObject::kInputOrDebugPosOffset));
__ RecordWriteField(x1, JSGeneratorObject::kInputOrDebugPosOffset, x0, x3,
kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ b(&done_store_input);
-
- __ Bind(&async_await);
- __ Str(x0, FieldMemOperand(
- x1, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset));
- __ RecordWriteField(x1, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset,
- x0, x3, kLRHasNotBeenSaved, kDontSaveFPRegs);
-
- __ Bind(&done_store_input);
- // `x3` no longer holds SuspendFlags
// Store resume mode into generator object.
__ Str(x2, FieldMemOperand(x1, JSGeneratorObject::kResumeModeOffset));
@@ -915,7 +882,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
// Setup the context (we need to use the caller context from the isolate).
- __ Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
+ __ Mov(scratch, Operand(ExternalReference(IsolateAddressId::kContextAddress,
masm->isolate())));
__ Ldr(cp, MemOperand(scratch));
@@ -1033,6 +1000,117 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
__ Drop(args_count, 1);
}
+// Tail-call |function_id| if |smi_entry| == |marker|
+static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
+ Register smi_entry,
+ OptimizationMarker marker,
+ Runtime::FunctionId function_id) {
+ Label no_match;
+ __ CompareAndBranch(smi_entry, Operand(Smi::FromEnum(marker)), ne, &no_match);
+ GenerateTailCallToReturnedCode(masm, function_id);
+ __ bind(&no_match);
+}
+
+static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
+ Register feedback_vector,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+ // ----------- S t a t e -------------
+ // -- x0 : argument count (preserved for callee if needed, and caller)
+ // -- x3 : new target (preserved for callee if needed, and caller)
+ // -- x1 : target function (preserved for callee if needed, and caller)
+ // -- feedback vector (preserved for caller if needed)
+ // -----------------------------------
+ DCHECK(
+ !AreAliased(feedback_vector, x0, x1, x3, scratch1, scratch2, scratch3));
+
+ Label optimized_code_slot_is_cell, fallthrough;
+
+ Register closure = x1;
+ Register optimized_code_entry = scratch1;
+
+ const int kOptimizedCodeCellOffset =
+ FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize;
+ __ Ldr(optimized_code_entry,
+ FieldMemOperand(feedback_vector, kOptimizedCodeCellOffset));
+
+ // Check if the code entry is a Smi. If yes, we interpret it as an
+ // optimisation marker. Otherwise, interpret is as a weak cell to a code
+ // object.
+ __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
+
+ {
+ // Optimized code slot is a Smi optimization marker.
+
+ // Fall through if no optimization trigger.
+ __ CompareAndBranch(optimized_code_entry,
+ Operand(Smi::FromEnum(OptimizationMarker::kNone)), eq,
+ &fallthrough);
+
+ TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimized,
+ Runtime::kCompileOptimized_NotConcurrent);
+ TailCallRuntimeIfMarkerEquals(
+ masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimizedConcurrent,
+ Runtime::kCompileOptimized_Concurrent);
+
+ {
+ // Otherwise, the marker is InOptimizationQueue.
+ if (FLAG_debug_code) {
+ __ Cmp(
+ optimized_code_entry,
+ Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
+ __ Assert(eq, kExpectedOptimizationSentinel);
+ }
+
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ __ CompareRoot(masm->StackPointer(), Heap::kStackLimitRootIndex);
+ __ B(hs, &fallthrough);
+ GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+ }
+ }
+
+ {
+ // Optimized code slot is a WeakCell.
+ __ bind(&optimized_code_slot_is_cell);
+
+ __ Ldr(optimized_code_entry,
+ FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(optimized_code_entry, &fallthrough);
+
+ // Check if the optimized code is marked for deopt. If it is, call the
+ // runtime to clear it.
+ Label found_deoptimized_code;
+ __ Ldr(scratch2, FieldMemOperand(optimized_code_entry,
+ Code::kKindSpecificFlags1Offset));
+ __ TestAndBranchIfAnySet(scratch2, 1 << Code::kMarkedForDeoptimizationBit,
+ &found_deoptimized_code);
+
+ // Optimized code is good, get it into the closure and link the closure into
+ // the optimized functions list, then tail call the optimized code.
+ // The feedback vector is no longer used, so re-use it as a scratch
+ // register.
+ ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, closure,
+ scratch2, scratch3, feedback_vector);
+ __ Jump(optimized_code_entry);
+
+ // Optimized code slot contains deoptimized code, evict it and re-enter the
+ // closure's code.
+ __ bind(&found_deoptimized_code);
+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+ }
+
+ // Fall-through if the optimized code cell is clear and there is no
+ // optimization marker.
+ __ bind(&fallthrough);
+}
+
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
@@ -1051,37 +1129,32 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
+ Register closure = x1;
+ Register feedback_vector = x2;
+
+ // Load the feedback vector from the closure.
+ __ Ldr(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ Ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ // Read off the optimized code slot in the feedback vector, and if there
+ // is optimized code or an optimization marker, call that instead.
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4, x5);
+
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ Push(lr, fp, cp, x1);
+ __ Push(lr, fp, cp, closure);
__ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
- // First check if there is optimized code in the feedback vector which we
- // could call instead.
- Label switch_to_optimized_code;
- Register optimized_code_entry = x7;
- __ Ldr(x0, FieldMemOperand(x1, JSFunction::kFeedbackVectorOffset));
- __ Ldr(x0, FieldMemOperand(x0, Cell::kValueOffset));
- __ Ldr(
- optimized_code_entry,
- FieldMemOperand(x0, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
- __ Ldr(optimized_code_entry,
- FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
- __ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
-
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
- __ Ldr(x0, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- Register debug_info = kInterpreterBytecodeArrayRegister;
- Label load_debug_bytecode_array, bytecode_array_loaded;
- DCHECK(!debug_info.is(x0));
- __ Ldr(debug_info, FieldMemOperand(x0, SharedFunctionInfo::kDebugInfoOffset));
- __ JumpIfNotSmi(debug_info, &load_debug_bytecode_array);
+ Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
+ __ Ldr(x0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(kInterpreterBytecodeArrayRegister,
FieldMemOperand(x0, SharedFunctionInfo::kFunctionDataOffset));
+ __ Ldr(x11, FieldMemOperand(x0, SharedFunctionInfo::kDebugInfoOffset));
+ __ JumpIfNotSmi(x11, &maybe_load_debug_bytecode_array);
__ Bind(&bytecode_array_loaded);
// Check whether we should continue to use the interpreter.
@@ -1093,7 +1166,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ B(ne, &switch_to_different_code_kind);
// Increment invocation count for the function.
- __ Ldr(x11, FieldMemOperand(x1, JSFunction::kFeedbackVectorOffset));
+ __ Ldr(x11, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ Ldr(x11, FieldMemOperand(x11, Cell::kValueOffset));
__ Ldr(x10, FieldMemOperand(
x11, FeedbackVector::kInvocationCountIndex * kPointerSize +
@@ -1170,10 +1243,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, x2);
__ Ret();
- // Load debug copy of the bytecode array.
- __ Bind(&load_debug_bytecode_array);
+ // Load debug copy of the bytecode array if it exists.
+ // kInterpreterBytecodeArrayRegister is already loaded with
+ // SharedFunctionInfo::kFunctionDataOffset.
+ __ Bind(&maybe_load_debug_bytecode_array);
+ __ Ldr(x10, FieldMemOperand(x11, DebugInfo::kFlagsOffset));
+ __ SmiUntag(x10);
+ __ TestAndBranchIfAllClear(x10, DebugInfo::kHasBreakInfo,
+ &bytecode_array_loaded);
__ Ldr(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
+ FieldMemOperand(x11, DebugInfo::kDebugBytecodeArrayOffset));
__ B(&bytecode_array_loaded);
// If the shared code is no longer this entry trampoline, then the underlying
@@ -1181,35 +1260,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// closure by switching the code entry field over to the new code as well.
__ bind(&switch_to_different_code_kind);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
- __ Ldr(x7, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(x7, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(x7, FieldMemOperand(x7, SharedFunctionInfo::kCodeOffset));
__ Add(x7, x7, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Str(x7, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
- __ RecordWriteCodeEntryField(x1, x7, x5);
+ __ Str(x7, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+ __ RecordWriteCodeEntryField(closure, x7, x5);
__ Jump(x7);
-
- // If there is optimized code on the type feedback vector, check if it is good
- // to run, and if so, self heal the closure and call the optimized code.
- __ bind(&switch_to_optimized_code);
- __ LeaveFrame(StackFrame::JAVA_SCRIPT);
- Label gotta_call_runtime;
-
- // Check if the optimized code is marked for deopt.
- __ Ldr(w8, FieldMemOperand(optimized_code_entry,
- Code::kKindSpecificFlags1Offset));
- __ TestAndBranchIfAnySet(w8, 1 << Code::kMarkedForDeoptimizationBit,
- &gotta_call_runtime);
-
- // Optimized code is good, get it into the closure and link the closure into
- // the optimized functions list, then tail call the optimized code.
- ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, x1, x4, x5,
- x13);
- __ Jump(optimized_code_entry);
-
- // Optimized code is marked for deopt, bailout to the CompileLazy runtime
- // function which will clear the feedback vector's optimized code slot.
- __ bind(&gotta_call_runtime);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@@ -1255,7 +1311,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// static
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
- TailCallMode tail_call_mode, InterpreterPushArgsMode mode) {
+ InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
// -- x2 : the address of the first argument to be pushed. Subsequent
@@ -1280,17 +1336,21 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Push the arguments. x2, x4, x5, x6 will be modified.
Generate_InterpreterPushArgs(masm, x3, x2, x4, x5, x6);
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Pop(x2); // Pass the spread in a register
+ __ Sub(x0, x0, 1); // Subtract one for spread
+ }
+
// Call the target.
if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
- tail_call_mode),
- RelocInfo::CODE_TARGET);
+ __ Jump(
+ masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny),
+ RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(masm->isolate()->builtins()->CallWithSpread(),
RelocInfo::CODE_TARGET);
} else {
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
RelocInfo::CODE_TARGET);
}
@@ -1322,7 +1382,13 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// Push the arguments. x5, x4, x6, x7 will be modified.
Generate_InterpreterPushArgs(masm, x0, x4, x5, x6, x7);
- __ AssertUndefinedOrAllocationSite(x2, x6);
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Pop(x2); // Pass the spread in a register
+ __ Sub(x0, x0, 1); // Subtract one for spread
+ } else {
+ __ AssertUndefinedOrAllocationSite(x2, x6);
+ }
+
if (mode == InterpreterPushArgsMode::kJSFunction) {
__ AssertFunction(x1);
@@ -1446,6 +1512,33 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
+void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : argument count (preserved for callee)
+ // -- x3 : new target (preserved for callee)
+ // -- x1 : target function (preserved for callee)
+ // -----------------------------------
+ Register closure = x1;
+
+ // Get the feedback vector.
+ Register feedback_vector = x2;
+ __ Ldr(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ Ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ // The feedback vector must be defined.
+ if (FLAG_debug_code) {
+ __ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
+ __ Assert(ne, BailoutReason::kExpectedFeedbackVector);
+ }
+
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4, x5);
+
+ // Otherwise, tail call the SFI code.
+ GenerateTailCallToSharedCode(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argument count (preserved for callee)
@@ -1454,50 +1547,29 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
- Label try_shared;
Register closure = x1;
- Register index = x2;
+ Register feedback_vector = x2;
// Do we have a valid feedback vector?
- __ Ldr(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
- __ Ldr(index, FieldMemOperand(index, Cell::kValueOffset));
- __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
+ __ Ldr(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ Ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
+ &gotta_call_runtime);
- // Is optimized code available in the feedback vector?
- Register entry = x7;
- __ Ldr(entry, FieldMemOperand(
- index, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
- __ Ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &try_shared);
-
- // Found code, check if it is marked for deopt, if so call into runtime to
- // clear the optimized code slot.
- __ Ldr(w8, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset));
- __ TestAndBranchIfAnySet(w8, 1 << Code::kMarkedForDeoptimizationBit,
- &gotta_call_runtime);
-
- // Code is good, get it into the closure and tail call.
- ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, x4, x5, x13);
- __ Jump(entry);
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4, x5);
// We found no optimized code.
- Register temp = x5;
- __ Bind(&try_shared);
+ Register entry = x7;
__ Ldr(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- // Is the shared function marked for tier up?
- __ Ldrb(temp, FieldMemOperand(
- entry, SharedFunctionInfo::kMarkedForTierUpByteOffset));
- __ TestAndBranchIfAnySet(
- temp, 1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte,
- &gotta_call_runtime);
// If SFI points to anything other than CompileLazy, install that.
__ Ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
- __ Move(temp, masm->CodeObject());
- __ Cmp(entry, temp);
+ __ Move(x5, masm->CodeObject());
+ __ Cmp(entry, x5);
__ B(eq, &gotta_call_runtime);
// Install the SFI's code entry.
@@ -1510,15 +1582,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
-void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm,
- Runtime::kCompileOptimized_NotConcurrent);
-}
-
-void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
-}
-
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argument count (preserved for callee)
@@ -1657,37 +1720,73 @@ void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
-static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
- SaveFPRegsMode save_doubles) {
+void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve registers across notification, this is important for compiled
- // stubs that tail call the runtime on deopts passing their parameters in
- // registers.
- // TODO(jbramley): Is it correct (and appropriate) to use safepoint
- // registers here? According to the comment above, we should only need to
- // preserve the registers with parameters.
- __ PushXRegList(kSafepointSavedRegisters);
+ // Preserve possible return result from lazy deopt.
+ __ Push(x0);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
- __ PopXRegList(kSafepointSavedRegisters);
+ __ CallRuntime(Runtime::kNotifyStubFailure, false);
+ __ Pop(x0);
}
// Ignore state (pushed by Deoptimizer::EntryGenerator::Generate).
__ Drop(1);
- // Jump to the miss handler. Deoptimizer::EntryGenerator::Generate loads this
- // into lr before it jumps here.
+ // Jump to the ContinueToBuiltin stub. Deoptimizer::EntryGenerator::Generate
+ // loads this into lr before it jumps here.
__ Br(lr);
}
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+namespace {
+void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
+ bool java_script_builtin,
+ bool with_result) {
+ const RegisterConfiguration* config(RegisterConfiguration::Turbofan());
+ int allocatable_register_count = config->num_allocatable_general_registers();
+ if (with_result) {
+ // Overwrite the hole inserted by the deoptimizer with the return value from
+ // the LAZY deopt point.
+ __ Str(x0, MemOperand(
+ jssp,
+ config->num_allocatable_general_registers() * kPointerSize +
+ BuiltinContinuationFrameConstants::kFixedFrameSize));
+ }
+ for (int i = allocatable_register_count - 1; i >= 0; --i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ __ Pop(Register::from_code(code));
+ if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
+ __ SmiUntag(Register::from_code(code));
+ }
+ }
+ __ ldr(fp,
+ MemOperand(jssp,
+ BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ __ Pop(ip0);
+ __ Add(jssp, jssp,
+ Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ __ Pop(lr);
+ __ Add(ip0, ip0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Br(ip0);
+}
+} // namespace
+
+void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, false);
+}
+
+void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, true);
+}
+
+void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, false);
}
-void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, true);
}
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
@@ -1807,16 +1906,16 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_FunctionPrototypeApply");
Register argc = x0;
- Register arg_array = x0;
+ Register arg_array = x2;
Register receiver = x1;
- Register this_arg = x2;
+ Register this_arg = x0;
Register undefined_value = x3;
Register null_value = x4;
__ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
- // 1. Load receiver into x1, argArray into x0 (if present), remove all
+ // 1. Load receiver into x1, argArray into x2 (if present), remove all
// arguments from the stack (including the receiver), and push thisArg (if
// present) instead.
{
@@ -1841,19 +1940,14 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
}
// ----------- S t a t e -------------
- // -- x0 : argArray
+ // -- x2 : argArray
// -- x1 : receiver
- // -- x3 : undefined root value
// -- jssp[0] : thisArg
// -----------------------------------
- // 2. Make sure the receiver is actually callable.
- Label receiver_not_callable;
- __ JumpIfSmi(receiver, &receiver_not_callable);
- __ Ldr(x10, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Ldrb(w10, FieldMemOperand(x10, Map::kBitFieldOffset));
- __ TestAndBranchIfAllClear(x10, 1 << Map::kIsCallable,
- &receiver_not_callable);
+ // 2. We don't need to check explicitly for callable receiver here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
// 3. Tail call with no arguments if argArray is null or undefined.
Label no_arguments;
@@ -1861,10 +1955,9 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ Ccmp(arg_array, undefined_value, ZFlag, ne);
__ B(eq, &no_arguments);
- // 4a. Apply the receiver to the given argArray (passing undefined for
- // new.target in x3).
- DCHECK(undefined_value.Is(x3));
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 4a. Apply the receiver to the given argArray.
+ __ Jump(masm->isolate()->builtins()->CallWithArrayLike(),
+ RelocInfo::CODE_TARGET);
// 4b. The argArray is either null or undefined, so we tail call without any
// arguments to the receiver.
@@ -1874,13 +1967,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
DCHECK(receiver.Is(x1));
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
-
- // 4c. The receiver is not callable, throw an appropriate TypeError.
- __ Bind(&receiver_not_callable);
- {
- __ Poke(receiver, 0);
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
}
// static
@@ -1940,14 +2026,14 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_ReflectApply");
Register argc = x0;
- Register arguments_list = x0;
+ Register arguments_list = x2;
Register target = x1;
- Register this_argument = x2;
+ Register this_argument = x4;
Register undefined_value = x3;
__ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
- // 1. Load target into x1 (if present), argumentsList into x0 (if present),
+ // 1. Load target into x1 (if present), argumentsList into x2 (if present),
// remove all arguments from the stack (including the receiver), and push
// thisArgument (if present) instead.
{
@@ -1974,29 +2060,18 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
}
// ----------- S t a t e -------------
- // -- x0 : argumentsList
+ // -- x2 : argumentsList
// -- x1 : target
// -- jssp[0] : thisArgument
// -----------------------------------
- // 2. Make sure the target is actually callable.
- Label target_not_callable;
- __ JumpIfSmi(target, &target_not_callable);
- __ Ldr(x10, FieldMemOperand(target, HeapObject::kMapOffset));
- __ Ldr(x10, FieldMemOperand(x10, Map::kBitFieldOffset));
- __ TestAndBranchIfAllClear(x10, 1 << Map::kIsCallable, &target_not_callable);
-
- // 3a. Apply the target to the given argumentsList (passing undefined for
- // new.target in x3).
- DCHECK(undefined_value.Is(x3));
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 2. We don't need to check explicitly for callable target here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
- // 3b. The target is not callable, throw an appropriate TypeError.
- __ Bind(&target_not_callable);
- {
- __ Poke(target, 0);
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
+ // 3. Apply the target to the given argumentsList.
+ __ Jump(masm->isolate()->builtins()->CallWithArrayLike(),
+ RelocInfo::CODE_TARGET);
}
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
@@ -2010,14 +2085,14 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_ReflectConstruct");
Register argc = x0;
- Register arguments_list = x0;
+ Register arguments_list = x2;
Register target = x1;
Register new_target = x3;
Register undefined_value = x4;
__ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
- // 1. Load target into x1 (if present), argumentsList into x0 (if present),
+ // 1. Load target into x1 (if present), argumentsList into x2 (if present),
// new.target into x3 (if present, otherwise use target), remove all
// arguments from the stack (including the receiver), and push thisArgument
// (if present) instead.
@@ -2045,53 +2120,33 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
// ----------- S t a t e -------------
- // -- x0 : argumentsList
+ // -- x2 : argumentsList
// -- x1 : target
// -- x3 : new.target
// -- jssp[0] : receiver (undefined)
// -----------------------------------
- // 2. Make sure the target is actually a constructor.
- Label target_not_constructor;
- __ JumpIfSmi(target, &target_not_constructor);
- __ Ldr(x10, FieldMemOperand(target, HeapObject::kMapOffset));
- __ Ldrb(x10, FieldMemOperand(x10, Map::kBitFieldOffset));
- __ TestAndBranchIfAllClear(x10, 1 << Map::kIsConstructor,
- &target_not_constructor);
-
- // 3. Make sure the new.target is actually a constructor.
- Label new_target_not_constructor;
- __ JumpIfSmi(new_target, &new_target_not_constructor);
- __ Ldr(x10, FieldMemOperand(new_target, HeapObject::kMapOffset));
- __ Ldrb(x10, FieldMemOperand(x10, Map::kBitFieldOffset));
- __ TestAndBranchIfAllClear(x10, 1 << Map::kIsConstructor,
- &new_target_not_constructor);
-
- // 4a. Construct the target with the given new.target and argumentsList.
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
-
- // 4b. The target is not a constructor, throw an appropriate TypeError.
- __ Bind(&target_not_constructor);
- {
- __ Poke(target, 0);
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
+ // 2. We don't need to check explicitly for constructor target here,
+ // since that's the first thing the Construct/ConstructWithArrayLike
+ // builtins will do.
- // 4c. The new.target is not a constructor, throw an appropriate TypeError.
- __ Bind(&new_target_not_constructor);
- {
- __ Poke(new_target, 0);
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
+ // 3. We don't need to check explicitly for constructor new.target here,
+ // since that's the second thing the Construct/ConstructWithArrayLike
+ // builtins will do.
+
+ // 4. Construct the target with the given new.target and argumentsList.
+ __ Jump(masm->isolate()->builtins()->ConstructWithArrayLike(),
+ RelocInfo::CODE_TARGET);
}
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- __ SmiTag(x10, x0);
- __ Mov(x11, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR));
__ Push(lr, fp);
- __ Push(x11, x1, x10);
- __ Add(fp, jssp,
- StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+ __ Mov(x11, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR));
+ __ Push(x11, x1); // x1: function
+ // We do not yet push the number of arguments, to maintain a 16-byte aligned
+ // stack pointer. This is done in step (3) in
+ // Generate_ArgumentsAdaptorTrampoline.
+ __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
}
static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
@@ -2104,118 +2159,30 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
kPointerSize)));
__ Mov(jssp, fp);
__ Pop(fp, lr);
+
+ // Drop actual parameters and receiver.
+ // TODO(all): This will need to be rounded up to a multiple of two when using
+ // the CSP, as we will have claimed an even number of slots in total for the
+ // parameters.
__ DropBySMI(x10, kXRegSize);
__ Drop(1);
}
// static
-void Builtins::Generate_Apply(MacroAssembler* masm) {
+void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
- // -- x0 : argumentsList
- // -- x1 : target
- // -- x3 : new.target (checked to be constructor or undefined)
- // -- jssp[0] : thisArgument
+ // -- x1 : target
+ // -- x0 : number of parameters on the stack (not including the receiver)
+ // -- x2 : arguments list (a FixedArray)
+ // -- x4 : len (number of elements to push from args)
+ // -- x3 : new.target (for [[Construct]])
// -----------------------------------
+ __ AssertFixedArray(x2);
- Register arguments_list = x0;
- Register target = x1;
- Register new_target = x3;
-
- Register args = x0;
- Register len = x2;
-
- // Create the list of arguments from the array-like argumentsList.
- {
- Label create_arguments, create_array, create_holey_array, create_runtime,
- done_create;
- __ JumpIfSmi(arguments_list, &create_runtime);
-
- // Load native context.
- Register native_context = x4;
- __ Ldr(native_context, NativeContextMemOperand());
-
- // Load the map of argumentsList.
- Register arguments_list_map = x2;
- __ Ldr(arguments_list_map,
- FieldMemOperand(arguments_list, HeapObject::kMapOffset));
-
- // Check if argumentsList is an (unmodified) arguments object.
- __ Ldr(x10, ContextMemOperand(native_context,
- Context::SLOPPY_ARGUMENTS_MAP_INDEX));
- __ Ldr(x11, ContextMemOperand(native_context,
- Context::STRICT_ARGUMENTS_MAP_INDEX));
- __ Cmp(arguments_list_map, x10);
- __ Ccmp(arguments_list_map, x11, ZFlag, ne);
- __ B(eq, &create_arguments);
-
- // Check if argumentsList is a fast JSArray.
- __ CompareInstanceType(arguments_list_map, x10, JS_ARRAY_TYPE);
- __ B(eq, &create_array);
-
- // Ask the runtime to create the list (actually a FixedArray).
- __ Bind(&create_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(target, new_target, arguments_list);
- __ CallRuntime(Runtime::kCreateListFromArrayLike);
- __ Pop(new_target, target);
- __ Ldrsw(len, UntagSmiFieldMemOperand(arguments_list,
- FixedArray::kLengthOffset));
- }
- __ B(&done_create);
-
- // Try to create the list from an arguments object.
- __ Bind(&create_arguments);
- __ Ldrsw(len, UntagSmiFieldMemOperand(arguments_list,
- JSArgumentsObject::kLengthOffset));
- __ Ldr(x10, FieldMemOperand(arguments_list, JSObject::kElementsOffset));
- __ Ldrsw(x11, UntagSmiFieldMemOperand(x10, FixedArray::kLengthOffset));
- __ CompareAndBranch(len, x11, ne, &create_runtime);
- __ Mov(args, x10);
- __ B(&done_create);
-
- // For holey JSArrays we need to check that the array prototype chain
- // protector is intact and our prototype is the Array.prototype actually.
- __ Bind(&create_holey_array);
- // -- x2 : arguments_list_map
- // -- x4 : native_context
- Register arguments_list_prototype = x2;
- __ Ldr(arguments_list_prototype,
- FieldMemOperand(arguments_list_map, Map::kPrototypeOffset));
- __ Ldr(x10, ContextMemOperand(native_context,
- Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ Cmp(arguments_list_prototype, x10);
- __ B(ne, &create_runtime);
- __ LoadRoot(x10, Heap::kArrayProtectorRootIndex);
- __ Ldrsw(x11, UntagSmiFieldMemOperand(x10, PropertyCell::kValueOffset));
- __ Cmp(x11, Isolate::kProtectorValid);
- __ B(ne, &create_runtime);
- __ Ldrsw(len,
- UntagSmiFieldMemOperand(arguments_list, JSArray::kLengthOffset));
- __ Ldr(args, FieldMemOperand(arguments_list, JSArray::kElementsOffset));
- __ B(&done_create);
-
- // Try to create the list from a JSArray object.
- __ Bind(&create_array);
- __ Ldr(x10, FieldMemOperand(arguments_list_map, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(x10);
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- // Check if it is a holey array, the order of the cmp is important as
- // anything higher than FAST_HOLEY_ELEMENTS will fall back to runtime.
- __ Cmp(x10, FAST_HOLEY_ELEMENTS);
- __ B(hi, &create_runtime);
- // Only FAST_XXX after this point, FAST_HOLEY_XXX are odd values.
- __ Tbnz(x10, 0, &create_holey_array);
- // FAST_SMI_ELEMENTS or FAST_ELEMENTS after this point.
- __ Ldrsw(len,
- UntagSmiFieldMemOperand(arguments_list, JSArray::kLengthOffset));
- __ Ldr(args, FieldMemOperand(arguments_list, JSArray::kElementsOffset));
-
- __ Bind(&done_create);
- }
+ Register arguments_list = x2;
+ Register argc = x0;
+ Register len = x4;
// Check for stack overflow.
{
@@ -2233,21 +2200,13 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
__ Bind(&done);
}
- // ----------- S t a t e -------------
- // -- x0 : args (a FixedArray built from argumentsList)
- // -- x1 : target
- // -- x2 : len (number of elements to push from args)
- // -- x3 : new.target (checked to be constructor or undefined)
- // -- jssp[0] : thisArgument
- // -----------------------------------
-
// Push arguments onto the stack (thisArgument is already on the stack).
{
Label done, push, loop;
- Register src = x4;
+ Register src = x5;
- __ Add(src, args, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Mov(x0, len); // The 'len' argument for Call() or Construct().
+ __ Add(src, arguments_list, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add(argc, argc, len); // The 'len' argument for Call() or Construct().
__ Cbz(len, &done);
Register the_hole_value = x11;
Register undefined_value = x12;
@@ -2266,28 +2225,13 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
__ Bind(&done);
}
- // ----------- S t a t e -------------
- // -- x0 : argument count (len)
- // -- x1 : target
- // -- x3 : new.target (checked to be constructor or undefined)
- // -- jssp[0] : args[len-1]
- // -- jssp[8] : args[len-2]
- // ... : ...
- // -- jssp[8*(len-2)] : args[1]
- // -- jssp[8*(len-1)] : args[0]
- // -----------------------------------
-
- // Dispatch to Call or Construct depending on whether new.target is undefined.
- {
- __ CompareRoot(new_target, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET, eq);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
- }
+ // Tail-call to the actual Call or Construct builtin.
+ __ Jump(code, RelocInfo::CODE_TARGET);
}
// static
-void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
- Handle<Code> code) {
+void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
// -- x3 : the new.target (for [[Construct]] calls)
@@ -2346,100 +2290,9 @@ void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
__ Jump(code, RelocInfo::CODE_TARGET);
}
-namespace {
-
-// Drops top JavaScript frame and an arguments adaptor frame below it (if
-// present) preserving all the arguments prepared for current call.
-// Does nothing if debugger is currently active.
-// ES6 14.6.3. PrepareForTailCall
-//
-// Stack structure for the function g() tail calling f():
-//
-// ------- Caller frame: -------
-// | ...
-// | g()'s arg M
-// | ...
-// | g()'s arg 1
-// | g()'s receiver arg
-// | g()'s caller pc
-// ------- g()'s frame: -------
-// | g()'s caller fp <- fp
-// | g()'s context
-// | function pointer: g
-// | -------------------------
-// | ...
-// | ...
-// | f()'s arg N
-// | ...
-// | f()'s arg 1
-// | f()'s receiver arg <- sp (f()'s caller pc is not on the stack yet!)
-// ----------------------
-//
-void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
- Register scratch1, Register scratch2,
- Register scratch3) {
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Comment cmnt(masm, "[ PrepareForTailCall");
-
- // Prepare for tail call only if ES2015 tail call elimination is enabled.
- Label done;
- ExternalReference is_tail_call_elimination_enabled =
- ExternalReference::is_tail_call_elimination_enabled_address(
- masm->isolate());
- __ Mov(scratch1, Operand(is_tail_call_elimination_enabled));
- __ Ldrb(scratch1, MemOperand(scratch1));
- __ Cmp(scratch1, Operand(0));
- __ B(eq, &done);
-
- // Drop possible interpreter handler/stub frame.
- {
- Label no_interpreter_frame;
- __ Ldr(scratch3,
- MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Cmp(scratch3, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
- __ B(ne, &no_interpreter_frame);
- __ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ bind(&no_interpreter_frame);
- }
-
- // Check if next frame is an arguments adaptor frame.
- Register caller_args_count_reg = scratch1;
- Label no_arguments_adaptor, formal_parameter_count_loaded;
- __ Ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(scratch3,
- MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Cmp(scratch3,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ B(ne, &no_arguments_adaptor);
-
- // Drop current frame and load arguments count from arguments adaptor frame.
- __ mov(fp, scratch2);
- __ Ldr(caller_args_count_reg,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
- __ B(&formal_parameter_count_loaded);
-
- __ bind(&no_arguments_adaptor);
- // Load caller's formal parameter count
- __ Ldr(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Ldr(scratch1,
- FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldrsw(caller_args_count_reg,
- FieldMemOperand(scratch1,
- SharedFunctionInfo::kFormalParameterCountOffset));
- __ bind(&formal_parameter_count_loaded);
-
- ParameterCount callee_args_count(args_reg);
- __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
- scratch3);
- __ bind(&done);
-}
-} // namespace
-
// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
- ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
+ ConvertReceiverMode mode) {
ASM_LOCATION("Builtins::Generate_CallFunction");
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
@@ -2452,8 +2305,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestAndBranchIfAnySet(w3, FunctionKind::kClassConstructor
- << SharedFunctionInfo::kFunctionKindShift,
+ __ TestAndBranchIfAnySet(w3, SharedFunctionInfo::kClassConstructorMask,
&class_constructor);
// Enter the context of the function; ToObject has to run in the function
@@ -2463,8 +2315,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
__ TestAndBranchIfAnySet(w3,
- (1 << SharedFunctionInfo::kNative) |
- (1 << SharedFunctionInfo::kStrictModeFunction),
+ SharedFunctionInfo::IsNativeBit::kMask |
+ SharedFunctionInfo::IsStrictBit::kMask,
&done_convert);
{
// ----------- S t a t e -------------
@@ -2527,10 +2379,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- cp : the function context.
// -----------------------------------
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, x0, x3, x4, x5);
- }
-
__ Ldrsw(
x2, FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount actual(x0);
@@ -2625,18 +2473,13 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
} // namespace
// static
-void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
- TailCallMode tail_call_mode) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
// -- x1 : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(x1);
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, x0, x3, x4, x5);
- }
-
// Patch the receiver to [[BoundThis]].
__ Ldr(x10, FieldMemOperand(x1, JSBoundFunction::kBoundThisOffset));
__ Poke(x10, Operand(x0, LSL, kPointerSizeLog2));
@@ -2654,8 +2497,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
}
// static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
// -- x1 : the target to call (can be any Object).
@@ -2665,32 +2507,24 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
__ JumpIfSmi(x1, &non_callable);
__ Bind(&non_smi);
__ CompareObjectType(x1, x4, x5, JS_FUNCTION_TYPE);
- __ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET, eq);
__ Cmp(x5, JS_BOUND_FUNCTION_TYPE);
- __ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
RelocInfo::CODE_TARGET, eq);
// Check if target has a [[Call]] internal method.
__ Ldrb(x4, FieldMemOperand(x4, Map::kBitFieldOffset));
__ TestAndBranchIfAllClear(x4, 1 << Map::kIsCallable, &non_callable);
+ // Check if target is a proxy and call CallProxy external builtin
__ Cmp(x5, JS_PROXY_TYPE);
__ B(ne, &non_function);
- // 0. Prepare for tail call if necessary.
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, x0, x3, x4, x5);
- }
-
- // 1. Runtime fallback for Proxy [[Call]].
- __ Push(x1);
- // Increase the arguments size to include the pushed function and the
- // existing receiver on the stack.
- __ Add(x0, x0, Operand(2));
- // Tail-call to the runtime.
- __ JumpToExternalReference(
- ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
+ __ Mov(x5, ExternalReference(Builtins::kCallProxy, masm->isolate()));
+ __ Ldr(x5, MemOperand(x5));
+ __ Add(x6, x5, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(x6);
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
@@ -2700,7 +2534,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, x1);
__ Jump(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
+ ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
@@ -2712,155 +2546,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
-static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
- Register argc = x0;
- Register constructor = x1;
- Register new_target = x3;
-
- Register scratch = x2;
- Register scratch2 = x6;
-
- Register spread = x4;
- Register spread_map = x5;
-
- Register spread_len = x5;
-
- Label runtime_call, push_args;
- __ Peek(spread, 0);
- __ JumpIfSmi(spread, &runtime_call);
- __ Ldr(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
-
- // Check that the spread is an array.
- __ CompareInstanceType(spread_map, scratch, JS_ARRAY_TYPE);
- __ B(ne, &runtime_call);
-
- // Check that we have the original ArrayPrototype.
- __ Ldr(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
- __ Ldr(scratch2, NativeContextMemOperand());
- __ Ldr(scratch2,
- ContextMemOperand(scratch2, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ Cmp(scratch, scratch2);
- __ B(ne, &runtime_call);
-
- // Check that the ArrayPrototype hasn't been modified in a way that would
- // affect iteration.
- __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
- __ Ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
- __ Cmp(scratch, Smi::FromInt(Isolate::kProtectorValid));
- __ B(ne, &runtime_call);
-
- // Check that the map of the initial array iterator hasn't changed.
- __ Ldr(scratch2, NativeContextMemOperand());
- __ Ldr(scratch,
- ContextMemOperand(scratch2,
- Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
- __ Ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
- __ Ldr(scratch2,
- ContextMemOperand(
- scratch2, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
- __ Cmp(scratch, scratch2);
- __ B(ne, &runtime_call);
-
- // For FastPacked kinds, iteration will have the same effect as simply
- // accessing each property in order.
- Label no_protector_check;
- __ Ldr(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(scratch);
- __ Cmp(scratch, FAST_HOLEY_ELEMENTS);
- __ B(hi, &runtime_call);
- // For non-FastHoley kinds, we can skip the protector check.
- __ Cmp(scratch, FAST_SMI_ELEMENTS);
- __ B(eq, &no_protector_check);
- __ Cmp(scratch, FAST_ELEMENTS);
- __ B(eq, &no_protector_check);
- // Check the ArrayProtector cell.
- __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
- __ Ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
- __ Cmp(scratch, Smi::FromInt(Isolate::kProtectorValid));
- __ B(ne, &runtime_call);
-
- __ Bind(&no_protector_check);
- // Load the FixedArray backing store, but use the length from the array.
- __ Ldrsw(spread_len, UntagSmiFieldMemOperand(spread, JSArray::kLengthOffset));
- __ Ldr(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
- __ B(&push_args);
-
- __ Bind(&runtime_call);
- {
- // Call the builtin for the result of the spread.
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(argc);
- __ Push(constructor, new_target, argc, spread);
- __ CallRuntime(Runtime::kSpreadIterableFixed);
- __ Mov(spread, x0);
- __ Pop(argc, new_target, constructor);
- __ SmiUntag(argc);
- }
-
- {
- // Calculate the new nargs including the result of the spread.
- __ Ldrsw(spread_len,
- UntagSmiFieldMemOperand(spread, FixedArray::kLengthOffset));
-
- __ Bind(&push_args);
- // argc += spread_len - 1. Subtract 1 for the spread itself.
- __ Add(argc, argc, spread_len);
- __ Sub(argc, argc, 1);
-
- // Pop the spread argument off the stack.
- __ Pop(scratch);
- }
-
- // Check for stack overflow.
- {
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack limit".
- Label done;
- __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
- // Make scratch the space we have left. The stack might already be
- // overflowed here which will cause scratch to become negative.
- __ Sub(scratch, masm->StackPointer(), scratch);
- // Check if the arguments will overflow the stack.
- __ Cmp(scratch, Operand(spread_len, LSL, kPointerSizeLog2));
- __ B(gt, &done); // Signed comparison.
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ Bind(&done);
- }
-
- // Put the evaluated spread onto the stack as additional arguments.
- {
- __ Mov(scratch, 0);
- Label done, push, loop;
- __ Bind(&loop);
- __ Cmp(scratch, spread_len);
- __ B(eq, &done);
- __ Add(scratch2, spread, Operand(scratch, LSL, kPointerSizeLog2));
- __ Ldr(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
- __ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
- __ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
- __ bind(&push);
- __ Push(scratch2);
- __ Add(scratch, scratch, Operand(1));
- __ B(&loop);
- __ Bind(&done);
- }
-}
-
-// static
-void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
- // -- x1 : the constructor to call (can be any Object)
- // -----------------------------------
-
- // CheckSpreadAndPushToStack will push r3 to save it.
- __ LoadRoot(x3, Heap::kUndefinedValueRootIndex);
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- TailCallMode::kDisallow),
- RelocInfo::CODE_TARGET);
-}
-
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2982,19 +2667,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
- // -- x1 : the constructor to call (can be any Object)
- // -- x3 : the new target (either the same as the constructor or
- // the JSFunction on which new was invoked initially)
- // -----------------------------------
-
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
-}
-
-// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_AllocateInNewSpace");
// ----------- S t a t e -------------
@@ -3043,115 +2715,152 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- x3 : new target (passed through to callee)
// -----------------------------------
+ // The frame we are about to construct will look like:
+ //
+ // slot Adaptor frame
+ // +-----------------+--------------------------------
+ // -n-1 | receiver | ^
+ // | (parameter 0) | |
+ // |- - - - - - - - -| |
+ // -n | | Caller
+ // ... | ... | frame slots --> actual args
+ // -2 | parameter n-1 | |
+ // |- - - - - - - - -| |
+ // -1 | parameter n | v
+ // -----+-----------------+--------------------------------
+ // 0 | return addr | ^
+ // |- - - - - - - - -| |
+ // 1 | saved frame ptr | <-- frame ptr |
+ // |- - - - - - - - -| |
+ // 2 |Frame Type Marker| |
+ // |- - - - - - - - -| |
+ // 3 | function | Callee
+ // |- - - - - - - - -| frame slots
+ // 4 | num of | |
+ // | actual args | |
+ // |- - - - - - - - -| |
+ // [5] | [padding] | |
+ // |-----------------+---- |
+ // 5+pad | receiver | ^ |
+ // | (parameter 0) | | |
+ // |- - - - - - - - -| | |
+ // 6+pad | parameter 1 | | |
+ // |- - - - - - - - -| Frame slots ----> expected args
+ // 7+pad | parameter 2 | | |
+ // |- - - - - - - - -| | |
+ // | | | |
+ // ... | ... | | |
+ // | parameter m | | |
+ // |- - - - - - - - -| | |
+ // | [undefined] | | |
+ // |- - - - - - - - -| | |
+ // | | | |
+ // | ... | | |
+ // | [undefined] | v <-- stack ptr v
+ // -----+-----------------+---------------------------------
+ //
+ // There is an optional slot of padding to ensure stack alignment.
+ // If the number of expected arguments is larger than the number of actual
+ // arguments, the remaining expected slots will be filled with undefined.
+
Register argc_actual = x0; // Excluding the receiver.
Register argc_expected = x2; // Excluding the receiver.
Register function = x1;
Register code_entry = x10;
- Label invoke, dont_adapt_arguments, stack_overflow;
+ Label dont_adapt_arguments, stack_overflow;
- Label enough, too_few;
- __ Cmp(argc_actual, argc_expected);
- __ B(lt, &too_few);
+ Label enough_arguments;
__ Cmp(argc_expected, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
__ B(eq, &dont_adapt_arguments);
- { // Enough parameters: actual >= expected
- EnterArgumentsAdaptorFrame(masm);
- Generate_StackOverflowCheck(masm, x2, x10, &stack_overflow);
-
- Register copy_start = x10;
- Register copy_end = x11;
- Register copy_to = x12;
- Register scratch1 = x13, scratch2 = x14;
-
- __ Lsl(scratch2, argc_expected, kPointerSizeLog2);
-
- // Adjust for fp, lr, and the receiver.
- __ Add(copy_start, fp, 3 * kPointerSize);
- __ Add(copy_start, copy_start, Operand(argc_actual, LSL, kPointerSizeLog2));
- __ Sub(copy_end, copy_start, scratch2);
- __ Sub(copy_end, copy_end, kPointerSize);
- __ Mov(copy_to, jssp);
-
- // Claim space for the arguments, the receiver, and one extra slot.
- // The extra slot ensures we do not write under jssp. It will be popped
- // later.
- __ Add(scratch1, scratch2, 2 * kPointerSize);
- __ Claim(scratch1, 1);
-
- // Copy the arguments (including the receiver) to the new stack frame.
- Label copy_2_by_2;
- __ Bind(&copy_2_by_2);
- __ Ldp(scratch1, scratch2,
- MemOperand(copy_start, -2 * kPointerSize, PreIndex));
- __ Stp(scratch1, scratch2,
- MemOperand(copy_to, -2 * kPointerSize, PreIndex));
- __ Cmp(copy_start, copy_end);
- __ B(hi, &copy_2_by_2);
-
- // Correct the space allocated for the extra slot.
- __ Drop(1);
-
- __ B(&invoke);
- }
-
- { // Too few parameters: Actual < expected
- __ Bind(&too_few);
-
- Register copy_from = x10;
- Register copy_end = x11;
- Register copy_to = x12;
- Register scratch1 = x13, scratch2 = x14;
-
- EnterArgumentsAdaptorFrame(masm);
- Generate_StackOverflowCheck(masm, x2, x10, &stack_overflow);
-
- __ Lsl(scratch2, argc_expected, kPointerSizeLog2);
- __ Lsl(argc_actual, argc_actual, kPointerSizeLog2);
-
- // Adjust for fp, lr, and the receiver.
- __ Add(copy_from, fp, 3 * kPointerSize);
- __ Add(copy_from, copy_from, argc_actual);
- __ Mov(copy_to, jssp);
- __ Sub(copy_end, copy_to, 1 * kPointerSize); // Adjust for the receiver.
- __ Sub(copy_end, copy_end, argc_actual);
-
- // Claim space for the arguments, the receiver, and one extra slot.
- // The extra slot ensures we do not write under jssp. It will be popped
- // later.
- __ Add(scratch1, scratch2, 2 * kPointerSize);
- __ Claim(scratch1, 1);
-
- // Copy the arguments (including the receiver) to the new stack frame.
- Label copy_2_by_2;
- __ Bind(&copy_2_by_2);
- __ Ldp(scratch1, scratch2,
- MemOperand(copy_from, -2 * kPointerSize, PreIndex));
- __ Stp(scratch1, scratch2,
- MemOperand(copy_to, -2 * kPointerSize, PreIndex));
- __ Cmp(copy_to, copy_end);
- __ B(hi, &copy_2_by_2);
-
- __ Mov(copy_to, copy_end);
-
- // Fill the remaining expected arguments with undefined.
- __ LoadRoot(scratch1, Heap::kUndefinedValueRootIndex);
- __ Add(copy_end, jssp, kPointerSize);
-
- Label fill;
- __ Bind(&fill);
- __ Stp(scratch1, scratch1,
- MemOperand(copy_to, -2 * kPointerSize, PreIndex));
- __ Cmp(copy_to, copy_end);
- __ B(hi, &fill);
-
- // Correct the space allocated for the extra slot.
- __ Drop(1);
- }
+ EnterArgumentsAdaptorFrame(masm);
+
+ Register copy_from = x10;
+ Register copy_end = x11;
+ Register copy_to = x12;
+ Register argc_to_copy = x13;
+ Register argc_unused_actual = x14;
+ Register scratch1 = x15, scratch2 = x16;
+
+ // We need slots for the expected arguments, with two extra slots for the
+ // number of actual arguments and the receiver.
+ __ RecordComment("-- Stack check --");
+ __ Add(scratch1, argc_expected, 2);
+ Generate_StackOverflowCheck(masm, scratch1, scratch2, &stack_overflow);
+
+ // Round up number of slots to be even, to maintain stack alignment.
+ __ RecordComment("-- Allocate callee frame slots --");
+ __ Add(scratch1, scratch1, 1);
+ __ Bic(scratch1, scratch1, 1);
+ __ Claim(scratch1, kPointerSize);
+
+ __ Mov(copy_to, jssp);
+
+ // Preparing the expected arguments is done in four steps, the order of
+ // which is chosen so we can use LDP/STP and avoid conditional branches as
+ // much as possible.
+
+ // (1) If we don't have enough arguments, fill the remaining expected
+ // arguments with undefined, otherwise skip this step.
+ __ Subs(scratch1, argc_actual, argc_expected);
+ __ Csel(argc_unused_actual, xzr, scratch1, lt);
+ __ Csel(argc_to_copy, argc_expected, argc_actual, ge);
+ __ B(ge, &enough_arguments);
+
+ // Fill the remaining expected arguments with undefined.
+ __ RecordComment("-- Fill slots with undefined --");
+ __ Sub(copy_end, copy_to, Operand(scratch1, LSL, kPointerSizeLog2));
+ __ LoadRoot(scratch1, Heap::kUndefinedValueRootIndex);
+
+ Label fill;
+ __ Bind(&fill);
+ __ Stp(scratch1, scratch1, MemOperand(copy_to, 2 * kPointerSize, PostIndex));
+ // We might write one slot extra, but that is ok because we'll overwrite it
+ // below.
+ __ Cmp(copy_end, copy_to);
+ __ B(hi, &fill);
+
+ // Correct copy_to, for the case where we wrote one additional slot.
+ __ Mov(copy_to, copy_end);
+
+ __ Bind(&enough_arguments);
+ // (2) Copy all of the actual arguments, or as many as we need.
+ __ RecordComment("-- Copy actual arguments --");
+ __ Add(copy_end, copy_to, Operand(argc_to_copy, LSL, kPointerSizeLog2));
+ __ Add(copy_from, fp, 2 * kPointerSize);
+ // Adjust for difference between actual and expected arguments.
+ __ Add(copy_from, copy_from,
+ Operand(argc_unused_actual, LSL, kPointerSizeLog2));
+
+ // Copy arguments. We use load/store pair instructions, so we might overshoot
+ // by one slot, but since we copy the arguments starting from the last one, if
+ // we do overshoot, the extra slot will be overwritten later by the receiver.
+ Label copy_2_by_2;
+ __ Bind(&copy_2_by_2);
+ __ Ldp(scratch1, scratch2,
+ MemOperand(copy_from, 2 * kPointerSize, PostIndex));
+ __ Stp(scratch1, scratch2, MemOperand(copy_to, 2 * kPointerSize, PostIndex));
+ __ Cmp(copy_end, copy_to);
+ __ B(hi, &copy_2_by_2);
+
+ // (3) Store number of actual arguments and padding. The padding might be
+ // unnecessary, in which case it will be overwritten by the receiver.
+ __ RecordComment("-- Store number of args and padding --");
+ __ SmiTag(scratch1, argc_actual);
+ __ Stp(xzr, scratch1, MemOperand(fp, -4 * kPointerSize));
+
+ // (4) Store receiver. Calculate target address from jssp to avoid checking
+ // for padding. Storing the receiver will overwrite either the extra slot
+ // we copied with the actual arguments, if we did copy one, or the padding we
+ // stored above.
+ __ RecordComment("-- Store receiver --");
+ __ Add(copy_from, fp, 2 * kPointerSize);
+ __ Ldr(scratch1, MemOperand(copy_from, argc_actual, LSL, kPointerSizeLog2));
+ __ Str(scratch1, MemOperand(jssp, argc_expected, LSL, kPointerSizeLog2));
// Arguments have been adapted. Now call the entry point.
- __ Bind(&invoke);
+ __ RecordComment("-- Call entry point --");
__ Mov(argc_actual, argc_expected);
// x0 : expected number of arguments
// x1 : function (passed through to callee)
@@ -3167,11 +2876,13 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Ret();
// Call the entry point without adapting the arguments.
+ __ RecordComment("-- Call without adapting args --");
__ Bind(&dont_adapt_arguments);
__ Ldr(code_entry, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
__ Jump(code_entry);
__ Bind(&stack_overflow);
+ __ RecordComment("-- Stack overflow --");
{
FrameScope frame(masm, StackFrame::MANUAL);
__ CallRuntime(Runtime::kThrowStackOverflow);
diff --git a/deps/v8/src/builtins/builtins-arguments-gen.cc b/deps/v8/src/builtins/builtins-arguments-gen.cc
index 1875958d64..571d562422 100644
--- a/deps/v8/src/builtins/builtins-arguments-gen.cc
+++ b/deps/v8/src/builtins/builtins-arguments-gen.cc
@@ -39,8 +39,12 @@ ArgumentsBuiltinsAssembler::GetArgumentsFrameAndCount(Node* function,
MachineType::Pointer());
Node* shared =
LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset);
- Node* formal_parameter_count = LoadSharedFunctionInfoSpecialField(
- shared, SharedFunctionInfo::kFormalParameterCountOffset, mode);
+ CSA_SLOW_ASSERT(this, HasInstanceType(shared, SHARED_FUNCTION_INFO_TYPE));
+ Node* formal_parameter_count =
+ LoadObjectField(shared, SharedFunctionInfo::kFormalParameterCountOffset,
+ MachineType::Int32());
+ formal_parameter_count = Word32ToParameter(formal_parameter_count, mode);
+
argument_count.Bind(formal_parameter_count);
Node* marker_or_function = LoadBufferObject(
frame_ptr_above, CommonFrameConstants::kContextOrFrameTypeOffset);
@@ -77,13 +81,13 @@ ArgumentsBuiltinsAssembler::AllocateArgumentsObject(Node* map,
DCHECK_IMPLIES(empty, parameter_map_count == nullptr);
Node* size =
empty ? IntPtrConstant(base_size)
- : ElementOffsetFromIndex(element_count, FAST_ELEMENTS, mode,
+ : ElementOffsetFromIndex(element_count, PACKED_ELEMENTS, mode,
base_size + FixedArray::kHeaderSize);
Node* result = Allocate(size);
Comment("Initialize arguments object");
StoreMapNoWriteBarrier(result, map);
Node* empty_fixed_array = LoadRoot(Heap::kEmptyFixedArrayRootIndex);
- StoreObjectField(result, JSArray::kPropertiesOffset, empty_fixed_array);
+ StoreObjectField(result, JSArray::kPropertiesOrHashOffset, empty_fixed_array);
Node* smi_arguments_count = ParameterToTagged(arguments_count, mode);
StoreObjectFieldNoWriteBarrier(result, JSArray::kLengthOffset,
smi_arguments_count);
@@ -98,7 +102,7 @@ ArgumentsBuiltinsAssembler::AllocateArgumentsObject(Node* map,
Node* parameter_map = nullptr;
if (parameter_map_count != nullptr) {
Node* parameter_map_offset = ElementOffsetFromIndex(
- arguments_count, FAST_ELEMENTS, mode, FixedArray::kHeaderSize);
+ arguments_count, PACKED_ELEMENTS, mode, FixedArray::kHeaderSize);
parameter_map = InnerAllocate(arguments, parameter_map_offset);
StoreObjectFieldNoWriteBarrier(result, JSArray::kElementsOffset,
parameter_map);
@@ -165,7 +169,8 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewRestParameter(Node* context,
Node* rest_count =
IntPtrOrSmiSub(argument_count, formal_parameter_count, mode);
Node* const native_context = LoadNativeContext(context);
- Node* const array_map = LoadJSArrayElementsMap(FAST_ELEMENTS, native_context);
+ Node* const array_map =
+ LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
GotoIf(IntPtrOrSmiLessThanOrEqual(rest_count, zero, mode),
&no_rest_parameters);
@@ -314,10 +319,10 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
Comment("Fill in non-mapped parameters");
Node* argument_offset =
- ElementOffsetFromIndex(argument_count, FAST_ELEMENTS, mode,
+ ElementOffsetFromIndex(argument_count, PACKED_ELEMENTS, mode,
FixedArray::kHeaderSize - kHeapObjectTag);
Node* mapped_offset =
- ElementOffsetFromIndex(mapped_count, FAST_ELEMENTS, mode,
+ ElementOffsetFromIndex(mapped_count, PACKED_ELEMENTS, mode,
FixedArray::kHeaderSize - kHeapObjectTag);
CodeStubArguments arguments(this, argument_count, frame_ptr, mode);
VARIABLE(current_argument, MachineType::PointerRepresentation());
@@ -355,7 +360,7 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
BitcastTaggedToWord(map_array),
IntPtrConstant(kParameterMapHeaderSize - FixedArray::kHeaderSize));
Node* zero_offset = ElementOffsetFromIndex(
- zero, FAST_ELEMENTS, mode, FixedArray::kHeaderSize - kHeapObjectTag);
+ zero, PACKED_ELEMENTS, mode, FixedArray::kHeaderSize - kHeapObjectTag);
BuildFastLoop(var_list2, mapped_offset, zero_offset,
[this, the_hole, elements, adjusted_map_array, &context_index,
mode](Node* offset) {
diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc
index 8c95007622..f1a07ceff0 100644
--- a/deps/v8/src/builtins/builtins-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-array-gen.cc
@@ -84,9 +84,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
void ReducePostLoopAction() {
Label ok(this);
GotoIf(WordNotEqual(a(), TheHoleConstant()), &ok);
- CallRuntime(Runtime::kThrowTypeError, context(),
- SmiConstant(MessageTemplate::kReduceNoInitial));
- Unreachable();
+ ThrowTypeError(context(), MessageTemplate::kReduceNoInitial);
BIND(&ok);
}
@@ -117,29 +115,30 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
BIND(&fast);
{
+ GotoIf(SmiNotEqual(LoadJSArrayLength(a()), to_.value()), &runtime);
kind = EnsureArrayPushable(a(), &runtime);
- GotoIf(IsElementsKindGreaterThan(kind, FAST_HOLEY_SMI_ELEMENTS),
+ GotoIf(IsElementsKindGreaterThan(kind, HOLEY_SMI_ELEMENTS),
&object_push_pre);
- BuildAppendJSArray(FAST_SMI_ELEMENTS, a(), k_value, &runtime);
+ BuildAppendJSArray(HOLEY_SMI_ELEMENTS, a(), k_value, &runtime);
Goto(&after_work);
}
BIND(&object_push_pre);
{
- Branch(IsElementsKindGreaterThan(kind, FAST_HOLEY_ELEMENTS),
- &double_push, &object_push);
+ Branch(IsElementsKindGreaterThan(kind, HOLEY_ELEMENTS), &double_push,
+ &object_push);
}
BIND(&object_push);
{
- BuildAppendJSArray(FAST_ELEMENTS, a(), k_value, &runtime);
+ BuildAppendJSArray(HOLEY_ELEMENTS, a(), k_value, &runtime);
Goto(&after_work);
}
BIND(&double_push);
{
- BuildAppendJSArray(FAST_DOUBLE_ELEMENTS, a(), k_value, &runtime);
+ BuildAppendJSArray(HOLEY_DOUBLE_ELEMENTS, a(), k_value, &runtime);
Goto(&after_work);
}
@@ -168,11 +167,10 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
// 6. Let A be ? TypedArraySpeciesCreate(O, len).
Node* a = TypedArraySpeciesCreateByLength(context(), o(), len_);
// In the Spec and our current implementation, the length check is already
- // performed in TypedArraySpeciesCreate. Repeating the check here to
- // keep this invariant local.
- // TODO(tebbi): Change this to a release mode check.
- CSA_ASSERT(
- this, WordEqual(len_, LoadObjectField(a, JSTypedArray::kLengthOffset)));
+ // performed in TypedArraySpeciesCreate.
+ CSA_ASSERT(this,
+ SmiLessThanOrEqual(
+ len_, LoadObjectField(a, JSTypedArray::kLengthOffset)));
fast_typed_array_target_ = Word32Equal(LoadInstanceType(LoadElements(o_)),
LoadInstanceType(LoadElements(a)));
a_.Bind(a);
@@ -181,70 +179,101 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
Node* SpecCompliantMapProcessor(Node* k_value, Node* k) {
// i. Let kValue be ? Get(O, Pk). Performed by the caller of
// SpecCompliantMapProcessor.
- // ii. Let mappedValue be ? Call(callbackfn, T, kValue, k, O).
- Node* mappedValue = CallJS(CodeFactory::Call(isolate()), context(),
- callbackfn(), this_arg(), k_value, k, o());
+ // ii. Let mapped_value be ? Call(callbackfn, T, kValue, k, O).
+ Node* mapped_value = CallJS(CodeFactory::Call(isolate()), context(),
+ callbackfn(), this_arg(), k_value, k, o());
- // iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mappedValue).
- CallRuntime(Runtime::kCreateDataProperty, context(), a(), k, mappedValue);
+ // iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mapped_value).
+ CallRuntime(Runtime::kCreateDataProperty, context(), a(), k, mapped_value);
return a();
}
Node* FastMapProcessor(Node* k_value, Node* k) {
// i. Let kValue be ? Get(O, Pk). Performed by the caller of
// FastMapProcessor.
- // ii. Let mappedValue be ? Call(callbackfn, T, kValue, k, O).
- Node* mappedValue = CallJS(CodeFactory::Call(isolate()), context(),
- callbackfn(), this_arg(), k_value, k, o());
-
- Label finished(this);
- Node* kind = nullptr;
- Node* elements = nullptr;
+ // ii. Let mapped_value be ? Call(callbackfn, T, kValue, k, O).
+ Node* mapped_value = CallJS(CodeFactory::Call(isolate()), context(),
+ callbackfn(), this_arg(), k_value, k, o());
- // If a() is a JSArray, we can have a fast path.
// mode is SMI_PARAMETERS because k has tagged representation.
ParameterMode mode = SMI_PARAMETERS;
- Label fast(this);
- Label runtime(this);
- Label object_push_pre(this), object_push(this), double_push(this);
- BranchIfFastJSArray(a(), context(), FastJSArrayAccessMode::ANY_ACCESS,
- &fast, &runtime);
+ Label runtime(this), finished(this);
+ Label transition_pre(this), transition_smi_fast(this),
+ transition_smi_double(this);
+ Label array_not_smi(this), array_fast(this), array_double(this);
+
+ Node* kind = LoadMapElementsKind(LoadMap(a()));
+ Node* elements = LoadElements(a());
+ GotoIf(IsElementsKindGreaterThan(kind, HOLEY_SMI_ELEMENTS), &array_not_smi);
+ TryStoreArrayElement(HOLEY_SMI_ELEMENTS, mode, &transition_pre, elements, k,
+ mapped_value);
+ Goto(&finished);
+
+ BIND(&transition_pre);
+ {
+ // array is smi. Value is either tagged or a heap number.
+ CSA_ASSERT(this, TaggedIsNotSmi(mapped_value));
+ GotoIf(IsHeapNumberMap(LoadMap(mapped_value)), &transition_smi_double);
+ Goto(&transition_smi_fast);
+ }
- BIND(&fast);
+ BIND(&array_not_smi);
{
- kind = EnsureArrayPushable(a(), &runtime);
- elements = LoadElements(a());
- GotoIf(IsElementsKindGreaterThan(kind, FAST_HOLEY_SMI_ELEMENTS),
- &object_push_pre);
- TryStoreArrayElement(FAST_SMI_ELEMENTS, mode, &runtime, elements, k,
- mappedValue);
- Goto(&finished);
+ Branch(IsElementsKindGreaterThan(kind, HOLEY_ELEMENTS), &array_double,
+ &array_fast);
}
- BIND(&object_push_pre);
+ BIND(&transition_smi_fast);
{
- Branch(IsElementsKindGreaterThan(kind, FAST_HOLEY_ELEMENTS), &double_push,
- &object_push);
+ // iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mapped_value).
+ Node* const native_context = LoadNativeContext(context());
+ Node* const fast_map = LoadContextElement(
+ native_context, Context::JS_ARRAY_HOLEY_ELEMENTS_MAP_INDEX);
+
+ // Since this transition is only a map change, just do it right here.
+ // Since a() doesn't have an allocation site, it's safe to do the
+ // map store directly, otherwise I'd call TransitionElementsKind().
+ StoreMap(a(), fast_map);
+ Goto(&array_fast);
}
- BIND(&object_push);
+ BIND(&array_fast);
{
- TryStoreArrayElement(FAST_ELEMENTS, mode, &runtime, elements, k,
- mappedValue);
+ TryStoreArrayElement(HOLEY_ELEMENTS, mode, &runtime, elements, k,
+ mapped_value);
Goto(&finished);
}
- BIND(&double_push);
+ BIND(&transition_smi_double);
{
- TryStoreArrayElement(FAST_DOUBLE_ELEMENTS, mode, &runtime, elements, k,
- mappedValue);
+ // iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mapped_value).
+ Node* const native_context = LoadNativeContext(context());
+ Node* const double_map = LoadContextElement(
+ native_context, Context::JS_ARRAY_HOLEY_DOUBLE_ELEMENTS_MAP_INDEX);
+ CallStub(CodeFactory::TransitionElementsKind(
+ isolate(), HOLEY_SMI_ELEMENTS, HOLEY_DOUBLE_ELEMENTS, true),
+ context(), a(), double_map);
+ Goto(&array_double);
+ }
+
+ BIND(&array_double);
+ {
+ // TODO(mvstanton): If we use a variable for elements and bind it
+ // appropriately, we can avoid an extra load of elements by binding the
+ // value only after a transition from smi to double.
+ elements = LoadElements(a());
+ // If the mapped_value isn't a number, this will bail out to the runtime
+ // to make the transition.
+ TryStoreArrayElement(HOLEY_DOUBLE_ELEMENTS, mode, &runtime, elements, k,
+ mapped_value);
Goto(&finished);
}
BIND(&runtime);
{
- // iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mappedValue).
- CallRuntime(Runtime::kCreateDataProperty, context(), a(), k, mappedValue);
+ // iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mapped_value).
+ CallRuntime(Runtime::kCreateDataProperty, context(), a(), k,
+ mapped_value);
Goto(&finished);
}
@@ -254,12 +283,12 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
// See tc39.github.io/ecma262/#sec-%typedarray%.prototype.map.
Node* TypedArrayMapProcessor(Node* k_value, Node* k) {
- // 8. c. Let mappedValue be ? Call(callbackfn, T, Ā« kValue, k, O Ā»).
- Node* mappedValue = CallJS(CodeFactory::Call(isolate()), context(),
- callbackfn(), this_arg(), k_value, k, o());
+ // 8. c. Let mapped_value be ? Call(callbackfn, T, Ā« kValue, k, O Ā»).
+ Node* mapped_value = CallJS(CodeFactory::Call(isolate()), context(),
+ callbackfn(), this_arg(), k_value, k, o());
Label fast(this), slow(this), done(this), detached(this, Label::kDeferred);
- // 8. d. Perform ? Set(A, Pk, mappedValue, true).
+ // 8. d. Perform ? Set(A, Pk, mapped_value, true).
// Since we know that A is a TypedArray, this always ends up in
// #sec-integer-indexed-exotic-objects-set-p-v-receiver and then
// tc39.github.io/ecma262/#sec-integerindexedelementset .
@@ -267,28 +296,21 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
BIND(&fast);
// #sec-integerindexedelementset 3. Let numValue be ? ToNumber(value).
- Node* num_value = ToNumber(context(), mappedValue);
+ Node* num_value = ToNumber(context(), mapped_value);
// The only way how this can bailout is because of a detached buffer.
- EmitElementStore(
- a(), k, num_value, false, source_elements_kind_,
- KeyedAccessStoreMode::STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS,
- &detached);
+ EmitElementStore(a(), k, num_value, false, source_elements_kind_,
+ KeyedAccessStoreMode::STANDARD_STORE, &detached);
Goto(&done);
BIND(&slow);
- CallRuntime(Runtime::kSetProperty, context(), a(), k, mappedValue,
+ CallRuntime(Runtime::kSetProperty, context(), a(), k, mapped_value,
SmiConstant(STRICT));
Goto(&done);
BIND(&detached);
- {
- // tc39.github.io/ecma262/#sec-integerindexedelementset
- // 5. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
- CallRuntime(Runtime::kThrowTypeError, context_,
- SmiConstant(MessageTemplate::kDetachedOperation),
- name_string_);
- Unreachable();
- }
+ // tc39.github.io/ecma262/#sec-integerindexedelementset
+ // 5. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
+ ThrowTypeError(context_, MessageTemplate::kDetachedOperation, name_);
BIND(&done);
return a();
@@ -348,7 +370,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
// 1. Let O be ToObject(this value).
// 2. ReturnIfAbrupt(O)
- o_ = CallStub(CodeFactory::ToObject(isolate()), context(), receiver());
+ o_ = CallBuiltin(Builtins::kToObject, context(), receiver());
// 3. Let len be ToLength(Get(O, "length")).
// 4. ReturnIfAbrupt(len).
@@ -372,21 +394,11 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
Branch(IsCallableMap(LoadMap(callbackfn())), &done, &type_exception);
BIND(&throw_null_undefined_exception);
- {
- CallRuntime(
- Runtime::kThrowTypeError, context(),
- SmiConstant(MessageTemplate::kCalledOnNullOrUndefined),
- HeapConstant(isolate()->factory()->NewStringFromAsciiChecked(name)));
- Unreachable();
- }
+ ThrowTypeError(context(), MessageTemplate::kCalledOnNullOrUndefined, name);
BIND(&type_exception);
- {
- CallRuntime(Runtime::kThrowTypeError, context(),
- SmiConstant(MessageTemplate::kCalledNonCallable),
- callbackfn());
- Unreachable();
- }
+ ThrowTypeError(context(), MessageTemplate::kCalledNonCallable,
+ callbackfn());
BIND(&done);
@@ -432,8 +444,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
const char* name, const BuiltinResultGenerator& generator,
const CallResultProcessor& processor, const PostLoopAction& action,
ForEachDirection direction = ForEachDirection::kForward) {
- name_string_ =
- HeapConstant(isolate()->factory()->NewStringFromAsciiChecked(name));
+ name_ = name;
// ValidateTypedArray: tc39.github.io/ecma262/#sec-validatetypedarray
@@ -457,27 +468,13 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
&throw_not_callable);
BIND(&throw_not_typed_array);
- {
- CallRuntime(Runtime::kThrowTypeError, context_,
- SmiConstant(MessageTemplate::kNotTypedArray));
- Unreachable();
- }
+ ThrowTypeError(context_, MessageTemplate::kNotTypedArray);
BIND(&throw_detached);
- {
- CallRuntime(Runtime::kThrowTypeError, context_,
- SmiConstant(MessageTemplate::kDetachedOperation),
- name_string_);
- Unreachable();
- }
+ ThrowTypeError(context_, MessageTemplate::kDetachedOperation, name_);
BIND(&throw_not_callable);
- {
- CallRuntime(Runtime::kThrowTypeError, context_,
- SmiConstant(MessageTemplate::kCalledNonCallable),
- callbackfn_);
- Unreachable();
- }
+ ThrowTypeError(context_, MessageTemplate::kCalledNonCallable, callbackfn_);
Label unexpected_instance_type(this);
BIND(&unexpected_instance_type);
@@ -592,7 +589,6 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
default:
UNREACHABLE();
- return static_cast<ElementsKind>(-1);
}
}
@@ -664,13 +660,13 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
// Fast case: load the element directly from the elements FixedArray
// and call the callback if the element is not the hole.
- DCHECK(kind == FAST_ELEMENTS || kind == FAST_DOUBLE_ELEMENTS);
- int base_size = kind == FAST_ELEMENTS
+ DCHECK(kind == PACKED_ELEMENTS || kind == PACKED_DOUBLE_ELEMENTS);
+ int base_size = kind == PACKED_ELEMENTS
? FixedArray::kHeaderSize
: (FixedArray::kHeaderSize - kHeapObjectTag);
Node* offset = ElementOffsetFromIndex(index, kind, mode, base_size);
Node* value = nullptr;
- if (kind == FAST_ELEMENTS) {
+ if (kind == PACKED_ELEMENTS) {
value = LoadObjectField(elements, offset);
GotoIf(WordEqual(value, TheHoleConstant()), &hole_element);
} else {
@@ -712,13 +708,13 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
Node* o_map = LoadMap(o());
Node* bit_field2 = LoadMapBitField2(o_map);
Node* kind = DecodeWord32<Map::ElementsKindBits>(bit_field2);
- Branch(IsElementsKindGreaterThan(kind, FAST_HOLEY_ELEMENTS),
+ Branch(IsElementsKindGreaterThan(kind, HOLEY_ELEMENTS),
&maybe_double_elements, &fast_elements);
ParameterMode mode = OptimalParameterMode();
BIND(&fast_elements);
{
- VisitAllFastElementsOneKind(FAST_ELEMENTS, processor, slow, mode,
+ VisitAllFastElementsOneKind(PACKED_ELEMENTS, processor, slow, mode,
direction);
action(this);
@@ -728,12 +724,12 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
}
BIND(&maybe_double_elements);
- Branch(IsElementsKindGreaterThan(kind, FAST_HOLEY_DOUBLE_ELEMENTS), slow,
+ Branch(IsElementsKindGreaterThan(kind, HOLEY_DOUBLE_ELEMENTS), slow,
&fast_double_elements);
BIND(&fast_double_elements);
{
- VisitAllFastElementsOneKind(FAST_DOUBLE_ELEMENTS, processor, slow, mode,
+ VisitAllFastElementsOneKind(PACKED_DOUBLE_ELEMENTS, processor, slow, mode,
direction);
action(this);
@@ -759,7 +755,8 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
GotoIf(WordNotEqual(proto, initial_array_prototype), &runtime);
Node* species_protector = SpeciesProtectorConstant();
- Node* value = LoadObjectField(species_protector, Cell::kValueOffset);
+ Node* value =
+ LoadObjectField(species_protector, PropertyCell::kValueOffset);
Node* const protector_invalid = SmiConstant(Isolate::kProtectorInvalid);
GotoIf(WordEqual(value, protector_invalid), &runtime);
@@ -767,10 +764,13 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
GotoIf(SmiAbove(len, SmiConstant(JSArray::kInitialMaxFastElementArray)),
&runtime);
+ // We need to be conservative and start with holey because the builtins
+ // that create output arrays aren't gauranteed to be called for every
+ // element in the input array (maybe the callback deletes an element).
const ElementsKind elements_kind =
GetHoleyElementsKind(GetInitialFastElementsKind());
Node* array_map = LoadJSArrayElementsMap(elements_kind, native_context);
- a_.Bind(AllocateJSArray(FAST_SMI_ELEMENTS, array_map, len, len, nullptr,
+ a_.Bind(AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, len, len, nullptr,
CodeStubAssembler::SMI_PARAMETERS));
Goto(&done);
@@ -797,7 +797,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
Node* new_target_ = nullptr;
Node* argc_ = nullptr;
Node* fast_typed_array_target_ = nullptr;
- Node* name_string_ = nullptr;
+ const char* name_ = nullptr;
Variable k_;
Variable a_;
Variable to_;
@@ -868,8 +868,8 @@ TF_BUILTIN(FastArrayPop, CodeStubAssembler) {
&return_undefined);
int32_t header_size = FixedDoubleArray::kHeaderSize - kHeapObjectTag;
- Node* offset = ElementOffsetFromIndex(
- new_length, FAST_HOLEY_DOUBLE_ELEMENTS, INTPTR_PARAMETERS, header_size);
+ Node* offset = ElementOffsetFromIndex(new_length, HOLEY_DOUBLE_ELEMENTS,
+ INTPTR_PARAMETERS, header_size);
if (Is64()) {
Node* double_hole = Int64Constant(kHoleNanInt64);
StoreNoWriteBarrier(MachineRepresentation::kWord64, elements, offset,
@@ -935,10 +935,10 @@ TF_BUILTIN(FastArrayPush, CodeStubAssembler) {
{
arg_index.Bind(IntPtrConstant(0));
kind = EnsureArrayPushable(receiver, &runtime);
- GotoIf(IsElementsKindGreaterThan(kind, FAST_HOLEY_SMI_ELEMENTS),
+ GotoIf(IsElementsKindGreaterThan(kind, HOLEY_SMI_ELEMENTS),
&object_push_pre);
- Node* new_length = BuildAppendJSArray(FAST_SMI_ELEMENTS, receiver, args,
+ Node* new_length = BuildAppendJSArray(PACKED_SMI_ELEMENTS, receiver, args,
arg_index, &smi_transition);
args.PopAndReturn(new_length);
}
@@ -971,21 +971,21 @@ TF_BUILTIN(FastArrayPush, CodeStubAssembler) {
BIND(&object_push_pre);
{
- Branch(IsElementsKindGreaterThan(kind, FAST_HOLEY_ELEMENTS), &double_push,
+ Branch(IsElementsKindGreaterThan(kind, HOLEY_ELEMENTS), &double_push,
&object_push);
}
BIND(&object_push);
{
- Node* new_length = BuildAppendJSArray(FAST_ELEMENTS, receiver, args,
+ Node* new_length = BuildAppendJSArray(PACKED_ELEMENTS, receiver, args,
arg_index, &default_label);
args.PopAndReturn(new_length);
}
BIND(&double_push);
{
- Node* new_length = BuildAppendJSArray(FAST_DOUBLE_ELEMENTS, receiver, args,
- arg_index, &double_transition);
+ Node* new_length = BuildAppendJSArray(PACKED_DOUBLE_ELEMENTS, receiver,
+ args, arg_index, &double_transition);
args.PopAndReturn(new_length);
}
@@ -1065,7 +1065,7 @@ TF_BUILTIN(FastArrayShift, CodeStubAssembler) {
LoadObjectField(receiver, JSArray::kLengthOffset)));
Node* length = LoadAndUntagObjectField(receiver, JSArray::kLengthOffset);
Label return_undefined(this), fast_elements_tagged(this),
- fast_elements_untagged(this);
+ fast_elements_smi(this);
GotoIf(IntPtrEqual(length, IntPtrConstant(0)), &return_undefined);
// 2) Ensure that the length is writable.
@@ -1098,43 +1098,55 @@ TF_BUILTIN(FastArrayShift, CodeStubAssembler) {
SmiTag(new_length));
Node* elements_kind = LoadMapElementsKind(LoadMap(receiver));
- GotoIf(Int32LessThanOrEqual(elements_kind,
- Int32Constant(FAST_HOLEY_SMI_ELEMENTS)),
- &fast_elements_untagged);
- GotoIf(Int32LessThanOrEqual(elements_kind,
- Int32Constant(TERMINAL_FAST_ELEMENTS_KIND)),
+ GotoIf(
+ Int32LessThanOrEqual(elements_kind, Int32Constant(HOLEY_SMI_ELEMENTS)),
+ &fast_elements_smi);
+ GotoIf(Int32LessThanOrEqual(elements_kind, Int32Constant(HOLEY_ELEMENTS)),
&fast_elements_tagged);
- Node* value = LoadFixedDoubleArrayElement(
- elements, IntPtrConstant(0), MachineType::Float64(), 0,
- INTPTR_PARAMETERS, &return_undefined);
- int32_t header_size = FixedDoubleArray::kHeaderSize - kHeapObjectTag;
- Node* memmove =
- ExternalConstant(ExternalReference::libc_memmove_function(isolate()));
- Node* start = IntPtrAdd(
- BitcastTaggedToWord(elements),
- ElementOffsetFromIndex(IntPtrConstant(0), FAST_HOLEY_DOUBLE_ELEMENTS,
- INTPTR_PARAMETERS, header_size));
- CallCFunction3(MachineType::AnyTagged(), MachineType::Pointer(),
- MachineType::Pointer(), MachineType::UintPtr(), memmove,
- start, IntPtrAdd(start, IntPtrConstant(kDoubleSize)),
- IntPtrMul(new_length, IntPtrConstant(kDoubleSize)));
- Node* offset = ElementOffsetFromIndex(
- new_length, FAST_HOLEY_DOUBLE_ELEMENTS, INTPTR_PARAMETERS, header_size);
- if (Is64()) {
- Node* double_hole = Int64Constant(kHoleNanInt64);
- StoreNoWriteBarrier(MachineRepresentation::kWord64, elements, offset,
- double_hole);
- } else {
- STATIC_ASSERT(kHoleNanLower32 == kHoleNanUpper32);
- Node* double_hole = Int32Constant(kHoleNanLower32);
- StoreNoWriteBarrier(MachineRepresentation::kWord32, elements, offset,
- double_hole);
- StoreNoWriteBarrier(MachineRepresentation::kWord32, elements,
- IntPtrAdd(offset, IntPtrConstant(kPointerSize)),
- double_hole);
+ // Fast double elements kind:
+ {
+ CSA_ASSERT(this,
+ Int32LessThanOrEqual(elements_kind,
+ Int32Constant(HOLEY_DOUBLE_ELEMENTS)));
+
+ VARIABLE(result, MachineRepresentation::kTagged, UndefinedConstant());
+
+ Label move_elements(this);
+ result.Bind(AllocateHeapNumberWithValue(LoadFixedDoubleArrayElement(
+ elements, IntPtrConstant(0), MachineType::Float64(), 0,
+ INTPTR_PARAMETERS, &move_elements)));
+ Goto(&move_elements);
+ BIND(&move_elements);
+
+ int32_t header_size = FixedDoubleArray::kHeaderSize - kHeapObjectTag;
+ Node* memmove =
+ ExternalConstant(ExternalReference::libc_memmove_function(isolate()));
+ Node* start = IntPtrAdd(
+ BitcastTaggedToWord(elements),
+ ElementOffsetFromIndex(IntPtrConstant(0), HOLEY_DOUBLE_ELEMENTS,
+ INTPTR_PARAMETERS, header_size));
+ CallCFunction3(MachineType::AnyTagged(), MachineType::Pointer(),
+ MachineType::Pointer(), MachineType::UintPtr(), memmove,
+ start, IntPtrAdd(start, IntPtrConstant(kDoubleSize)),
+ IntPtrMul(new_length, IntPtrConstant(kDoubleSize)));
+ Node* offset = ElementOffsetFromIndex(new_length, HOLEY_DOUBLE_ELEMENTS,
+ INTPTR_PARAMETERS, header_size);
+ if (Is64()) {
+ Node* double_hole = Int64Constant(kHoleNanInt64);
+ StoreNoWriteBarrier(MachineRepresentation::kWord64, elements, offset,
+ double_hole);
+ } else {
+ STATIC_ASSERT(kHoleNanLower32 == kHoleNanUpper32);
+ Node* double_hole = Int32Constant(kHoleNanLower32);
+ StoreNoWriteBarrier(MachineRepresentation::kWord32, elements, offset,
+ double_hole);
+ StoreNoWriteBarrier(MachineRepresentation::kWord32, elements,
+ IntPtrAdd(offset, IntPtrConstant(kPointerSize)),
+ double_hole);
+ }
+ args.PopAndReturn(result.value());
}
- args.PopAndReturn(AllocateHeapNumberWithValue(value));
BIND(&fast_elements_tagged);
{
@@ -1153,14 +1165,15 @@ TF_BUILTIN(FastArrayShift, CodeStubAssembler) {
args.PopAndReturn(value);
}
- BIND(&fast_elements_untagged);
+ BIND(&fast_elements_smi);
{
Node* value = LoadFixedArrayElement(elements, 0);
+ int32_t header_size = FixedDoubleArray::kHeaderSize - kHeapObjectTag;
Node* memmove =
ExternalConstant(ExternalReference::libc_memmove_function(isolate()));
Node* start = IntPtrAdd(
BitcastTaggedToWord(elements),
- ElementOffsetFromIndex(IntPtrConstant(0), FAST_HOLEY_SMI_ELEMENTS,
+ ElementOffsetFromIndex(IntPtrConstant(0), HOLEY_SMI_ELEMENTS,
INTPTR_PARAMETERS, header_size));
CallCFunction3(MachineType::AnyTagged(), MachineType::Pointer(),
MachineType::Pointer(), MachineType::UintPtr(), memmove,
@@ -1204,6 +1217,38 @@ TF_BUILTIN(ArrayForEachLoopContinuation, ArrayBuiltinCodeStubAssembler) {
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
}
+TF_BUILTIN(ArrayForEachLoopEagerDeoptContinuation,
+ ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* this_arg = Parameter(Descriptor::kThisArg);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+
+ Callable stub(Builtins::CallableFor(isolate(),
+ Builtins::kArrayForEachLoopContinuation));
+ Return(CallStub(stub, context, receiver, callbackfn, this_arg,
+ UndefinedConstant(), receiver, initial_k, len,
+ UndefinedConstant()));
+}
+
+TF_BUILTIN(ArrayForEachLoopLazyDeoptContinuation,
+ ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* this_arg = Parameter(Descriptor::kThisArg);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+
+ Callable stub(Builtins::CallableFor(isolate(),
+ Builtins::kArrayForEachLoopContinuation));
+ Return(CallStub(stub, context, receiver, callbackfn, this_arg,
+ UndefinedConstant(), receiver, initial_k, len,
+ UndefinedConstant()));
+}
+
TF_BUILTIN(ArrayForEach, ArrayBuiltinCodeStubAssembler) {
Node* argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
@@ -1211,8 +1256,8 @@ TF_BUILTIN(ArrayForEach, ArrayBuiltinCodeStubAssembler) {
Node* context = Parameter(BuiltinDescriptor::kContext);
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
Node* receiver = args.GetReceiver();
- Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
- Node* this_arg = args.GetOptionalArgumentValue(1, UndefinedConstant());
+ Node* callbackfn = args.GetOptionalArgumentValue(0);
+ Node* this_arg = args.GetOptionalArgumentValue(1);
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
new_target, argc);
@@ -1226,6 +1271,26 @@ TF_BUILTIN(ArrayForEach, ArrayBuiltinCodeStubAssembler) {
Builtins::kArrayForEachLoopContinuation));
}
+TF_BUILTIN(TypedArrayPrototypeForEach, ArrayBuiltinCodeStubAssembler) {
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Node* receiver = args.GetReceiver();
+ Node* callbackfn = args.GetOptionalArgumentValue(0);
+ Node* this_arg = args.GetOptionalArgumentValue(1);
+
+ InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
+ new_target, argc);
+
+ GenerateIteratingTypedArrayBuiltinBody(
+ "%TypedArray%.prototype.forEach",
+ &ArrayBuiltinCodeStubAssembler::ForEachResultGenerator,
+ &ArrayBuiltinCodeStubAssembler::ForEachProcessor,
+ &ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
+}
+
TF_BUILTIN(ArraySomeLoopContinuation, ArrayBuiltinCodeStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -1253,8 +1318,8 @@ TF_BUILTIN(ArraySome, ArrayBuiltinCodeStubAssembler) {
Node* context = Parameter(BuiltinDescriptor::kContext);
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
Node* receiver = args.GetReceiver();
- Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
- Node* this_arg = args.GetOptionalArgumentValue(1, UndefinedConstant());
+ Node* callbackfn = args.GetOptionalArgumentValue(0);
+ Node* this_arg = args.GetOptionalArgumentValue(1);
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
new_target, argc);
@@ -1274,8 +1339,8 @@ TF_BUILTIN(TypedArrayPrototypeSome, ArrayBuiltinCodeStubAssembler) {
Node* context = Parameter(BuiltinDescriptor::kContext);
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
Node* receiver = args.GetReceiver();
- Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
- Node* this_arg = args.GetOptionalArgumentValue(1, UndefinedConstant());
+ Node* callbackfn = args.GetOptionalArgumentValue(0);
+ Node* this_arg = args.GetOptionalArgumentValue(1);
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
new_target, argc);
@@ -1314,8 +1379,8 @@ TF_BUILTIN(ArrayEvery, ArrayBuiltinCodeStubAssembler) {
Node* context = Parameter(BuiltinDescriptor::kContext);
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
Node* receiver = args.GetReceiver();
- Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
- Node* this_arg = args.GetOptionalArgumentValue(1, UndefinedConstant());
+ Node* callbackfn = args.GetOptionalArgumentValue(0);
+ Node* this_arg = args.GetOptionalArgumentValue(1);
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
new_target, argc);
@@ -1335,8 +1400,8 @@ TF_BUILTIN(TypedArrayPrototypeEvery, ArrayBuiltinCodeStubAssembler) {
Node* context = Parameter(BuiltinDescriptor::kContext);
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
Node* receiver = args.GetReceiver();
- Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
- Node* this_arg = args.GetOptionalArgumentValue(1, UndefinedConstant());
+ Node* callbackfn = args.GetOptionalArgumentValue(0);
+ Node* this_arg = args.GetOptionalArgumentValue(1);
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
new_target, argc);
@@ -1375,7 +1440,7 @@ TF_BUILTIN(ArrayReduce, ArrayBuiltinCodeStubAssembler) {
Node* context = Parameter(BuiltinDescriptor::kContext);
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
Node* receiver = args.GetReceiver();
- Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
+ Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, initial_value,
@@ -1396,7 +1461,7 @@ TF_BUILTIN(TypedArrayPrototypeReduce, ArrayBuiltinCodeStubAssembler) {
Node* context = Parameter(BuiltinDescriptor::kContext);
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
Node* receiver = args.GetReceiver();
- Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
+ Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, initial_value,
@@ -1437,7 +1502,7 @@ TF_BUILTIN(ArrayReduceRight, ArrayBuiltinCodeStubAssembler) {
Node* context = Parameter(BuiltinDescriptor::kContext);
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
Node* receiver = args.GetReceiver();
- Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
+ Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, initial_value,
@@ -1460,7 +1525,7 @@ TF_BUILTIN(TypedArrayPrototypeReduceRight, ArrayBuiltinCodeStubAssembler) {
Node* context = Parameter(BuiltinDescriptor::kContext);
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
Node* receiver = args.GetReceiver();
- Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
+ Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, initial_value,
@@ -1501,8 +1566,8 @@ TF_BUILTIN(ArrayFilter, ArrayBuiltinCodeStubAssembler) {
Node* context = Parameter(BuiltinDescriptor::kContext);
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
Node* receiver = args.GetReceiver();
- Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
- Node* this_arg = args.GetOptionalArgumentValue(1, UndefinedConstant());
+ Node* callbackfn = args.GetOptionalArgumentValue(0);
+ Node* this_arg = args.GetOptionalArgumentValue(1);
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
new_target, argc);
@@ -1535,6 +1600,47 @@ TF_BUILTIN(ArrayMapLoopContinuation, ArrayBuiltinCodeStubAssembler) {
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
}
+TF_BUILTIN(ArrayMapLoopEagerDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* this_arg = Parameter(Descriptor::kThisArg);
+ Node* array = Parameter(Descriptor::kArray);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+
+ Callable stub(
+ Builtins::CallableFor(isolate(), Builtins::kArrayMapLoopContinuation));
+ Return(CallStub(stub, context, receiver, callbackfn, this_arg, array,
+ receiver, initial_k, len, UndefinedConstant()));
+}
+
+TF_BUILTIN(ArrayMapLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* this_arg = Parameter(Descriptor::kThisArg);
+ Node* array = Parameter(Descriptor::kArray);
+ Node* initial_k = Parameter(Descriptor::kInitialK);
+ Node* len = Parameter(Descriptor::kLength);
+ Node* result = Parameter(Descriptor::kResult);
+
+ // This custom lazy deopt point is right after the callback. map() needs
+ // to pick up at the next step, which is setting the callback result in
+ // the output array. After incrementing k, we can glide into the loop
+ // continuation builtin.
+
+ // iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mappedValue).
+ CallRuntime(Runtime::kCreateDataProperty, context, array, initial_k, result);
+ // Then we have to increment k before going on.
+ initial_k = NumberInc(initial_k);
+
+ Callable stub(
+ Builtins::CallableFor(isolate(), Builtins::kArrayMapLoopContinuation));
+ Return(CallStub(stub, context, receiver, callbackfn, this_arg, array,
+ receiver, initial_k, len, UndefinedConstant()));
+}
+
TF_BUILTIN(ArrayMap, ArrayBuiltinCodeStubAssembler) {
Node* argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
@@ -1542,8 +1648,8 @@ TF_BUILTIN(ArrayMap, ArrayBuiltinCodeStubAssembler) {
Node* context = Parameter(BuiltinDescriptor::kContext);
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
Node* receiver = args.GetReceiver();
- Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
- Node* this_arg = args.GetOptionalArgumentValue(1, UndefinedConstant());
+ Node* callbackfn = args.GetOptionalArgumentValue(0);
+ Node* this_arg = args.GetOptionalArgumentValue(1);
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
new_target, argc);
@@ -1562,8 +1668,8 @@ TF_BUILTIN(TypedArrayPrototypeMap, ArrayBuiltinCodeStubAssembler) {
Node* context = Parameter(BuiltinDescriptor::kContext);
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
Node* receiver = args.GetReceiver();
- Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
- Node* this_arg = args.GetOptionalArgumentValue(1, UndefinedConstant());
+ Node* callbackfn = args.GetOptionalArgumentValue(0);
+ Node* this_arg = args.GetOptionalArgumentValue(1);
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
new_target, argc);
@@ -1620,8 +1726,7 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
CodeStubArguments args(this, argc);
Node* array = args.GetReceiver();
- Node* search_element =
- args.GetOptionalArgumentValue(kSearchElementArg, UndefinedConstant());
+ Node* search_element = args.GetOptionalArgumentValue(kSearchElementArg);
Node* context = Parameter(BuiltinDescriptor::kContext);
Node* intptr_zero = IntPtrConstant(0);
@@ -1684,16 +1789,15 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
Node* elements_kind = LoadMapElementsKind(LoadMap(array));
Node* elements = LoadElements(array);
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- GotoIf(
- Uint32LessThanOrEqual(elements_kind, Int32Constant(FAST_HOLEY_ELEMENTS)),
- &if_smiorobjects);
- GotoIf(Word32Equal(elements_kind, Int32Constant(FAST_DOUBLE_ELEMENTS)),
+ STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(PACKED_ELEMENTS == 2);
+ STATIC_ASSERT(HOLEY_ELEMENTS == 3);
+ GotoIf(Uint32LessThanOrEqual(elements_kind, Int32Constant(HOLEY_ELEMENTS)),
+ &if_smiorobjects);
+ GotoIf(Word32Equal(elements_kind, Int32Constant(PACKED_DOUBLE_ELEMENTS)),
&if_packed_doubles);
- GotoIf(Word32Equal(elements_kind, Int32Constant(FAST_HOLEY_DOUBLE_ELEMENTS)),
+ GotoIf(Word32Equal(elements_kind, Int32Constant(HOLEY_DOUBLE_ELEMENTS)),
&if_holey_doubles);
Goto(&return_not_found);
@@ -1956,8 +2060,7 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
BIND(&call_runtime);
{
- Node* start_from =
- args.GetOptionalArgumentValue(kFromIndexArg, UndefinedConstant());
+ Node* start_from = args.GetOptionalArgumentValue(kFromIndexArg);
Runtime::FunctionId function = variant == kIncludes
? Runtime::kArrayIncludes_Slow
: Runtime::kArrayIndexOf;
@@ -1996,8 +2099,7 @@ class ArrayPrototypeIterationAssembler : public CodeStubAssembler {
BIND(&if_isnotobject);
{
- Callable callable = CodeFactory::ToObject(isolate());
- Node* result = CallStub(callable, context, receiver);
+ Node* result = CallBuiltin(Builtins::kToObject, context, receiver);
var_array.Bind(result);
var_map.Bind(LoadMap(result));
var_type.Bind(LoadMapInstanceType(var_map.value()));
@@ -2090,7 +2192,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
GotoIfNot(SmiBelow(index, length), &set_done);
- Node* one = SmiConstant(Smi::FromInt(1));
+ Node* one = SmiConstant(1);
StoreObjectFieldNoWriteBarrier(iterator, JSArrayIterator::kNextIndexOffset,
SmiAdd(index, one));
@@ -2142,7 +2244,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
BIND(&holey_object_values);
{
// Check the array_protector cell, and take the slow path if it's invalid.
- Node* invalid = SmiConstant(Smi::FromInt(Isolate::kProtectorInvalid));
+ Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
Node* cell = LoadRoot(Heap::kArrayProtectorRootIndex);
Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
GotoIf(WordEqual(cell_value, invalid), &generic_values);
@@ -2157,7 +2259,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
BIND(&holey_double_values);
{
// Check the array_protector cell, and take the slow path if it's invalid.
- Node* invalid = SmiConstant(Smi::FromInt(Isolate::kProtectorInvalid));
+ Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
Node* cell = LoadRoot(Heap::kArrayProtectorRootIndex);
Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
GotoIf(WordEqual(cell_value, invalid), &generic_values);
@@ -2215,8 +2317,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
Int32Constant(JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE)),
&done);
- Node* invalid =
- SmiConstant(Smi::FromInt(Isolate::kProtectorInvalid));
+ Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
Node* cell = LoadRoot(Heap::kFastArrayIterationProtectorRootIndex);
StoreObjectFieldNoWriteBarrier(cell, Cell::kValueOffset, invalid);
Goto(&done);
@@ -2397,20 +2498,20 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
Int32Constant(LAST_ARRAY_KEY_VALUE_ITERATOR_TYPE)),
&allocate_iterator_result);
- Node* elements = AllocateFixedArray(FAST_ELEMENTS, IntPtrConstant(2));
+ Node* elements = AllocateFixedArray(PACKED_ELEMENTS, IntPtrConstant(2));
StoreFixedArrayElement(elements, 0, index, SKIP_WRITE_BARRIER);
StoreFixedArrayElement(elements, 1, var_value.value(), SKIP_WRITE_BARRIER);
Node* entry = Allocate(JSArray::kSize);
Node* map = LoadContextElement(LoadNativeContext(context),
- Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX);
+ Context::JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX);
StoreMapNoWriteBarrier(entry, map);
- StoreObjectFieldRoot(entry, JSArray::kPropertiesOffset,
+ StoreObjectFieldRoot(entry, JSArray::kPropertiesOrHashOffset,
Heap::kEmptyFixedArrayRootIndex);
StoreObjectFieldNoWriteBarrier(entry, JSArray::kElementsOffset, elements);
StoreObjectFieldNoWriteBarrier(entry, JSArray::kLengthOffset,
- SmiConstant(Smi::FromInt(2)));
+ SmiConstant(2));
var_value.Bind(entry);
Goto(&allocate_iterator_result);
@@ -2422,7 +2523,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
Node* map = LoadContextElement(LoadNativeContext(context),
Context::ITERATOR_RESULT_MAP_INDEX);
StoreMapNoWriteBarrier(result, map);
- StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOffset,
+ StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOrHashOffset,
Heap::kEmptyFixedArrayRootIndex);
StoreObjectFieldRoot(result, JSIteratorResult::kElementsOffset,
Heap::kEmptyFixedArrayRootIndex);
@@ -2442,12 +2543,8 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
}
BIND(&if_isdetached);
- {
- Node* message = SmiConstant(MessageTemplate::kDetachedOperation);
- CallRuntime(Runtime::kThrowTypeError, context, message,
- HeapConstant(operation));
- Unreachable();
- }
+ ThrowTypeError(context, MessageTemplate::kDetachedOperation,
+ HeapConstant(operation));
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc
index da1602b963..73c9c7ef89 100644
--- a/deps/v8/src/builtins/builtins-array.cc
+++ b/deps/v8/src/builtins/builtins-array.cc
@@ -24,7 +24,7 @@ inline bool ClampedToInteger(Isolate* isolate, Object* object, int* out) {
// This is an extended version of ECMA-262 7.1.11 handling signed values
// Try to convert object to a number and clamp values to [kMinInt, kMaxInt]
if (object->IsSmi()) {
- *out = Smi::cast(object)->value();
+ *out = Smi::ToInt(object);
return true;
} else if (object->IsHeapNumber()) {
double value = HeapNumber::cast(object)->value();
@@ -60,7 +60,7 @@ inline bool GetSloppyArgumentsLength(Isolate* isolate, Handle<JSObject> object,
DCHECK(object->HasFastElements() || object->HasFastArgumentsElements());
Object* len_obj = object->InObjectPropertyAt(JSArgumentsObject::kLengthIndex);
if (!len_obj->IsSmi()) return false;
- *out = Max(0, Smi::cast(len_obj)->value());
+ *out = Max(0, Smi::ToInt(len_obj));
FixedArray* parameters = FixedArray::cast(object->elements());
if (object->HasSloppyArgumentsElements()) {
@@ -124,7 +124,7 @@ inline bool EnsureJSArrayWithWritableFastElements(Isolate* isolate,
int args_length = args->length();
if (first_added_arg >= args_length) return true;
- if (IsFastObjectElementsKind(origin_kind)) return true;
+ if (IsObjectElementsKind(origin_kind)) return true;
ElementsKind target_kind = origin_kind;
{
DisallowHeapAllocation no_gc;
@@ -132,9 +132,9 @@ inline bool EnsureJSArrayWithWritableFastElements(Isolate* isolate,
Object* arg = (*args)[i];
if (arg->IsHeapObject()) {
if (arg->IsHeapNumber()) {
- target_kind = FAST_DOUBLE_ELEMENTS;
+ target_kind = PACKED_DOUBLE_ELEMENTS;
} else {
- target_kind = FAST_ELEMENTS;
+ target_kind = PACKED_ELEMENTS;
break;
}
}
@@ -173,11 +173,11 @@ BUILTIN(ArrayPush) {
// Fast Elements Path
int to_add = args.length() - 1;
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
- int len = Smi::cast(array->length())->value();
+ int len = Smi::ToInt(array->length());
if (to_add == 0) return Smi::FromInt(len);
// Currently fixed arrays cannot grow too big, so we should never hit this.
- DCHECK_LE(to_add, Smi::kMaxValue - Smi::cast(array->length())->value());
+ DCHECK_LE(to_add, Smi::kMaxValue - Smi::ToInt(array->length()));
if (JSArray::HasReadOnlyLength(array)) {
return CallJsIntrinsic(isolate, isolate->array_push(), args);
@@ -197,7 +197,7 @@ BUILTIN(ArrayPop) {
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
- uint32_t len = static_cast<uint32_t>(Smi::cast(array->length())->value());
+ uint32_t len = static_cast<uint32_t>(Smi::ToInt(array->length()));
if (len == 0) return isolate->heap()->undefined_value();
if (JSArray::HasReadOnlyLength(array)) {
@@ -228,7 +228,7 @@ BUILTIN(ArrayShift) {
}
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
- int len = Smi::cast(array->length())->value();
+ int len = Smi::ToInt(array->length());
if (len == 0) return heap->undefined_value();
if (JSArray::HasReadOnlyLength(array)) {
@@ -250,7 +250,7 @@ BUILTIN(ArrayUnshift) {
if (to_add == 0) return array->length();
// Currently fixed arrays cannot grow too big, so we should never hit this.
- DCHECK_LE(to_add, Smi::kMaxValue - Smi::cast(array->length())->value());
+ DCHECK_LE(to_add, Smi::kMaxValue - Smi::ToInt(array->length()));
if (JSArray::HasReadOnlyLength(array)) {
return CallJsIntrinsic(isolate, isolate->array_unshift(), args);
@@ -279,7 +279,7 @@ BUILTIN(ArraySlice) {
AllowHeapAllocation allow_allocation;
return CallJsIntrinsic(isolate, isolate->array_slice(), args);
}
- len = Smi::cast(array->length())->value();
+ len = Smi::ToInt(array->length());
} else if (receiver->IsJSObject() &&
GetSloppyArgumentsLength(isolate, Handle<JSObject>::cast(receiver),
&len)) {
@@ -352,7 +352,7 @@ BUILTIN(ArraySplice) {
return CallJsIntrinsic(isolate, isolate->array_splice(), args);
}
}
- int len = Smi::cast(array->length())->value();
+ int len = Smi::ToInt(array->length());
// clip relative start to [0, len]
int actual_start = (relative_start < 0) ? Max(len + relative_start, 0)
: Min(relative_start, len);
@@ -461,8 +461,8 @@ class ArrayConcatVisitor {
// The object holding this backing store has just been allocated, so
// it cannot yet be used as a prototype.
Handle<JSObject> not_a_prototype_holder;
- Handle<SeededNumberDictionary> result = SeededNumberDictionary::AtNumberPut(
- dict, index, elm, not_a_prototype_holder);
+ Handle<SeededNumberDictionary> result =
+ SeededNumberDictionary::Set(dict, index, elm, not_a_prototype_holder);
if (!result.is_identical_to(dict)) {
// Dictionary needed to grow.
clear_storage();
@@ -497,10 +497,10 @@ class ArrayConcatVisitor {
Handle<Object> length =
isolate_->factory()->NewNumber(static_cast<double>(index_offset_));
Handle<Map> map = JSObject::GetElementsTransitionMap(
- array, fast_elements() ? FAST_HOLEY_ELEMENTS : DICTIONARY_ELEMENTS);
- array->set_map(*map);
+ array, fast_elements() ? HOLEY_ELEMENTS : DICTIONARY_ELEMENTS);
array->set_length(*length);
array->set_elements(*storage_fixed_array());
+ array->synchronized_set_map(*map);
return array;
}
@@ -535,8 +535,8 @@ class ArrayConcatVisitor {
// it cannot yet be used as a prototype.
Handle<JSObject> not_a_prototype_holder;
Handle<SeededNumberDictionary> new_storage =
- SeededNumberDictionary::AtNumberPut(slow_storage, i, element,
- not_a_prototype_holder);
+ SeededNumberDictionary::Set(slow_storage, i, element,
+ not_a_prototype_holder);
if (!new_storage.is_identical_to(slow_storage)) {
slow_storage = loop_scope.CloseAndEscape(new_storage);
}
@@ -582,10 +582,10 @@ uint32_t EstimateElementCount(Handle<JSArray> array) {
uint32_t length = static_cast<uint32_t>(array->length()->Number());
int element_count = 0;
switch (array->GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
+ case PACKED_SMI_ELEMENTS:
+ case HOLEY_SMI_ELEMENTS:
+ case PACKED_ELEMENTS:
+ case HOLEY_ELEMENTS: {
// Fast elements can't have lengths that are not representable by
// a 32-bit signed integer.
DCHECK(static_cast<int32_t>(FixedArray::kMaxLength) >= 0);
@@ -597,8 +597,8 @@ uint32_t EstimateElementCount(Handle<JSArray> array) {
}
break;
}
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS: {
+ case PACKED_DOUBLE_ELEMENTS:
+ case HOLEY_DOUBLE_ELEMENTS: {
// Fast elements can't have lengths that are not representable by
// a 32-bit signed integer.
DCHECK(static_cast<int32_t>(FixedDoubleArray::kMaxLength) >= 0);
@@ -639,7 +639,6 @@ uint32_t EstimateElementCount(Handle<JSArray> array) {
case FAST_STRING_WRAPPER_ELEMENTS:
case SLOW_STRING_WRAPPER_ELEMENTS:
UNREACHABLE();
- return 0;
}
// As an estimate, we assume that the prototype doesn't contain any
// inherited elements.
@@ -658,10 +657,10 @@ void CollectElementIndices(Handle<JSObject> object, uint32_t range,
Isolate* isolate = object->GetIsolate();
ElementsKind kind = object->GetElementsKind();
switch (kind) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
+ case PACKED_SMI_ELEMENTS:
+ case PACKED_ELEMENTS:
+ case HOLEY_SMI_ELEMENTS:
+ case HOLEY_ELEMENTS: {
DisallowHeapAllocation no_gc;
FixedArray* elements = FixedArray::cast(object->elements());
uint32_t length = static_cast<uint32_t>(elements->length());
@@ -673,8 +672,8 @@ void CollectElementIndices(Handle<JSObject> object, uint32_t range,
}
break;
}
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS: {
+ case HOLEY_DOUBLE_ELEMENTS:
+ case PACKED_DOUBLE_ELEMENTS: {
if (object->elements()->IsFixedArray()) {
DCHECK(object->elements()->length() == 0);
break;
@@ -823,10 +822,10 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
Handle<JSObject> array = Handle<JSObject>::cast(receiver);
switch (array->GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
+ case PACKED_SMI_ELEMENTS:
+ case PACKED_ELEMENTS:
+ case HOLEY_SMI_ELEMENTS:
+ case HOLEY_ELEMENTS: {
// Run through the elements FixedArray and use HasElement and GetElement
// to check the prototype for missing elements.
Handle<FixedArray> elements(FixedArray::cast(array->elements()));
@@ -851,8 +850,8 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
});
break;
}
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS: {
+ case HOLEY_DOUBLE_ELEMENTS:
+ case PACKED_DOUBLE_ELEMENTS: {
// Empty array is FixedArray but not FixedDoubleArray.
if (length == 0) break;
// Run through the elements FixedArray and use HasElement and GetElement
@@ -964,10 +963,10 @@ Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
// that mutate other arguments (but will otherwise be precise).
// The number of elements is precise if there are no inherited elements.
- ElementsKind kind = FAST_SMI_ELEMENTS;
+ ElementsKind kind = PACKED_SMI_ELEMENTS;
uint32_t estimate_result_length = 0;
- uint32_t estimate_nof_elements = 0;
+ uint32_t estimate_nof = 0;
FOR_WITH_HANDLE_SCOPE(isolate, int, i = 0, i, i < argument_count, i++, {
Handle<Object> obj((*args)[i], isolate);
uint32_t length_estimate;
@@ -984,7 +983,7 @@ Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
} else {
if (obj->IsHeapObject()) {
kind = GetMoreGeneralElementsKind(
- kind, obj->IsNumber() ? FAST_DOUBLE_ELEMENTS : FAST_ELEMENTS);
+ kind, obj->IsNumber() ? PACKED_DOUBLE_ELEMENTS : PACKED_ELEMENTS);
}
length_estimate = 1;
element_estimate = 1;
@@ -995,10 +994,10 @@ Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
} else {
estimate_result_length += length_estimate;
}
- if (JSObject::kMaxElementCount - estimate_nof_elements < element_estimate) {
- estimate_nof_elements = JSObject::kMaxElementCount;
+ if (JSObject::kMaxElementCount - estimate_nof < element_estimate) {
+ estimate_nof = JSObject::kMaxElementCount;
} else {
- estimate_nof_elements += element_estimate;
+ estimate_nof += element_estimate;
}
});
@@ -1006,10 +1005,10 @@ Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
// fixed array (fast case) is more time and space-efficient than a
// dictionary.
bool fast_case = is_array_species &&
- (estimate_nof_elements * 2) >= estimate_result_length &&
+ (estimate_nof * 2) >= estimate_result_length &&
isolate->IsIsConcatSpreadableLookupChainIntact();
- if (fast_case && kind == FAST_DOUBLE_ELEMENTS) {
+ if (fast_case && kind == PACKED_DOUBLE_ELEMENTS) {
Handle<FixedArrayBase> storage =
isolate->factory()->NewFixedDoubleArray(estimate_result_length);
int j = 0;
@@ -1020,7 +1019,7 @@ Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
for (int i = 0; i < argument_count; i++) {
Handle<Object> obj((*args)[i], isolate);
if (obj->IsSmi()) {
- double_storage->set(j, Smi::cast(*obj)->value());
+ double_storage->set(j, Smi::ToInt(*obj));
j++;
} else if (obj->IsNumber()) {
double_storage->set(j, obj->Number());
@@ -1030,8 +1029,8 @@ Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
JSArray* array = JSArray::cast(*obj);
uint32_t length = static_cast<uint32_t>(array->length()->Number());
switch (array->GetElementsKind()) {
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS: {
+ case HOLEY_DOUBLE_ELEMENTS:
+ case PACKED_DOUBLE_ELEMENTS: {
// Empty array is FixedArray but not FixedDoubleArray.
if (length == 0) break;
FixedDoubleArray* elements =
@@ -1052,8 +1051,8 @@ Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
}
break;
}
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_SMI_ELEMENTS: {
+ case HOLEY_SMI_ELEMENTS:
+ case PACKED_SMI_ELEMENTS: {
Object* the_hole = isolate->heap()->the_hole_value();
FixedArray* elements(FixedArray::cast(array->elements()));
for (uint32_t i = 0; i < length; i++) {
@@ -1062,14 +1061,14 @@ Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
failure = true;
break;
}
- int32_t int_value = Smi::cast(element)->value();
+ int32_t int_value = Smi::ToInt(element);
double_storage->set(j, int_value);
j++;
}
break;
}
- case FAST_HOLEY_ELEMENTS:
- case FAST_ELEMENTS:
+ case HOLEY_ELEMENTS:
+ case PACKED_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NO_ELEMENTS:
DCHECK_EQ(0u, length);
@@ -1094,10 +1093,7 @@ Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
storage =
isolate->factory()->NewFixedArrayWithHoles(estimate_result_length);
} else if (is_array_species) {
- // TODO(126): move 25% pre-allocation logic into Dictionary::Allocate
- uint32_t at_least_space_for =
- estimate_nof_elements + (estimate_nof_elements >> 2);
- storage = SeededNumberDictionary::New(isolate, at_least_space_for);
+ storage = SeededNumberDictionary::New(isolate, estimate_nof);
} else {
DCHECK(species->IsConstructor());
Handle<Object> length(Smi::kZero, isolate);
@@ -1184,7 +1180,7 @@ MaybeHandle<JSArray> Fast_ArrayConcat(Isolate* isolate,
}
// The Array length is guaranted to be <= kHalfOfMaxInt thus we won't
// overflow.
- result_len += Smi::cast(array->length())->value();
+ result_len += Smi::ToInt(array->length());
DCHECK(result_len >= 0);
// Throw an Error if we overflow the FixedArray limits
if (FixedDoubleArray::kMaxLength < result_len ||
diff --git a/deps/v8/src/builtins/builtins-async-function-gen.cc b/deps/v8/src/builtins/builtins-async-function-gen.cc
index f661f7e82e..5cff179c63 100644
--- a/deps/v8/src/builtins/builtins-async-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-function-gen.cc
@@ -61,8 +61,7 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwaitResumeClosure(
// Resume the {receiver} using our trampoline.
Callable callable = CodeFactory::ResumeGenerator(isolate());
- CallStub(callable, context, sent_value, generator, SmiConstant(resume_mode),
- SmiConstant(static_cast<int>(SuspendFlags::kGeneratorAwait)));
+ CallStub(callable, context, sent_value, generator, SmiConstant(resume_mode));
// The resulting Promise is a throwaway, so it doesn't matter what it
// resolves to. What is important is that we don't end up keeping the
@@ -104,12 +103,9 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwait(
CSA_SLOW_ASSERT(this, HasInstanceType(generator, JS_GENERATOR_OBJECT_TYPE));
CSA_SLOW_ASSERT(this, HasInstanceType(outer_promise, JS_PROMISE_TYPE));
- NodeGenerator1 create_closure_context = [&](Node* native_context) -> Node* {
- Node* const context =
- CreatePromiseContext(native_context, AwaitContext::kLength);
+ ContextInitializer init_closure_context = [&](Node* context) {
StoreContextElementNoWriteBarrier(context, AwaitContext::kGeneratorSlot,
generator);
- return context;
};
// TODO(jgruber): AsyncBuiltinsAssembler::Await currently does not reuse
@@ -119,19 +115,21 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwait(
// TODO(jgruber): Use a faster specialized version of
// InternalPerformPromiseThen.
- Node* const result = Await(
- context, generator, awaited, outer_promise, create_closure_context,
- Context::ASYNC_FUNCTION_AWAIT_RESOLVE_SHARED_FUN,
- Context::ASYNC_FUNCTION_AWAIT_REJECT_SHARED_FUN, is_predicted_as_caught);
+ Await(context, generator, awaited, outer_promise, AwaitContext::kLength,
+ init_closure_context, Context::ASYNC_FUNCTION_AWAIT_RESOLVE_SHARED_FUN,
+ Context::ASYNC_FUNCTION_AWAIT_REJECT_SHARED_FUN,
+ is_predicted_as_caught);
- Return(result);
+ // Return outer promise to avoid adding an load of the outer promise before
+ // suspending in BytecodeGenerator.
+ Return(outer_promise);
}
// Called by the parser from the desugaring of 'await' when catch
// prediction indicates that there is a locally surrounding catch block.
TF_BUILTIN(AsyncFunctionAwaitCaught, AsyncFunctionBuiltinsAssembler) {
- CSA_ASSERT_JS_ARGC_EQ(this, 3);
- Node* const generator = Parameter(Descriptor::kGenerator);
+ CSA_ASSERT_JS_ARGC_EQ(this, 2);
+ Node* const generator = Parameter(Descriptor::kReceiver);
Node* const awaited = Parameter(Descriptor::kAwaited);
Node* const outer_promise = Parameter(Descriptor::kOuterPromise);
Node* const context = Parameter(Descriptor::kContext);
@@ -145,8 +143,8 @@ TF_BUILTIN(AsyncFunctionAwaitCaught, AsyncFunctionBuiltinsAssembler) {
// Called by the parser from the desugaring of 'await' when catch
// prediction indicates no locally surrounding catch block.
TF_BUILTIN(AsyncFunctionAwaitUncaught, AsyncFunctionBuiltinsAssembler) {
- CSA_ASSERT_JS_ARGC_EQ(this, 3);
- Node* const generator = Parameter(Descriptor::kGenerator);
+ CSA_ASSERT_JS_ARGC_EQ(this, 2);
+ Node* const generator = Parameter(Descriptor::kReceiver);
Node* const awaited = Parameter(Descriptor::kAwaited);
Node* const outer_promise = Parameter(Descriptor::kOuterPromise);
Node* const context = Parameter(Descriptor::kContext);
diff --git a/deps/v8/src/builtins/builtins-async-gen.cc b/deps/v8/src/builtins/builtins-async-gen.cc
index f8974acd98..95192de3eb 100644
--- a/deps/v8/src/builtins/builtins-async-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-gen.cc
@@ -21,42 +21,116 @@ class ValueUnwrapContext {
Node* AsyncBuiltinsAssembler::Await(
Node* context, Node* generator, Node* value, Node* outer_promise,
- const NodeGenerator1& create_closure_context, int on_resolve_context_index,
- int on_reject_context_index, bool is_predicted_as_caught) {
+ int context_length, const ContextInitializer& init_closure_context,
+ int on_resolve_context_index, int on_reject_context_index,
+ bool is_predicted_as_caught) {
+ DCHECK_GE(context_length, Context::MIN_CONTEXT_SLOTS);
+
+ Node* const native_context = LoadNativeContext(context);
+
+#ifdef DEBUG
+ {
+ Node* const map = LoadContextElement(
+ native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
+ Node* const instance_size = LoadMapInstanceSize(map);
+ // Assert that the strict function map has an instance size is
+ // JSFunction::kSize
+ CSA_ASSERT(this, WordEqual(instance_size, IntPtrConstant(JSFunction::kSize /
+ kPointerSize)));
+ }
+#endif
+
+#ifdef DEBUG
+ {
+ Node* const promise_fun =
+ LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+ Node* const map =
+ LoadObjectField(promise_fun, JSFunction::kPrototypeOrInitialMapOffset);
+ Node* const instance_size = LoadMapInstanceSize(map);
+ // Assert that the JSPromise map has an instance size is
+ // JSPromise::kSize
+ CSA_ASSERT(this,
+ WordEqual(instance_size,
+ IntPtrConstant(JSPromise::kSizeWithEmbedderFields /
+ kPointerSize)));
+ }
+#endif
+
+ static const int kWrappedPromiseOffset = FixedArray::SizeFor(context_length);
+ static const int kThrowawayPromiseOffset =
+ kWrappedPromiseOffset + JSPromise::kSizeWithEmbedderFields;
+ static const int kResolveClosureOffset =
+ kThrowawayPromiseOffset + JSPromise::kSizeWithEmbedderFields;
+ static const int kRejectClosureOffset =
+ kResolveClosureOffset + JSFunction::kSize;
+ static const int kTotalSize = kRejectClosureOffset + JSFunction::kSize;
+
+ Node* const base = AllocateInNewSpace(kTotalSize);
+ Node* const closure_context = base;
+ {
+ // Initialize closure context
+ InitializeFunctionContext(native_context, closure_context, context_length);
+ init_closure_context(closure_context);
+ }
+
// Let promiseCapability be ! NewPromiseCapability(%Promise%).
- Node* const wrapped_value = AllocateAndInitJSPromise(context);
+ Node* const promise_fun =
+ LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+ Node* const promise_map =
+ LoadObjectField(promise_fun, JSFunction::kPrototypeOrInitialMapOffset);
+ Node* const wrapped_value = InnerAllocate(base, kWrappedPromiseOffset);
+ {
+ // Initialize Promise
+ StoreMapNoWriteBarrier(wrapped_value, promise_map);
+ InitializeJSObjectFromMap(
+ wrapped_value, promise_map,
+ IntPtrConstant(JSPromise::kSizeWithEmbedderFields),
+ EmptyFixedArrayConstant(), EmptyFixedArrayConstant());
+ PromiseInit(wrapped_value);
+ }
- // Perform ! Call(promiseCapability.[[Resolve]], undefined, Ā« promise Ā»).
- CallBuiltin(Builtins::kResolveNativePromise, context, wrapped_value, value);
+ Node* const throwaway = InnerAllocate(base, kThrowawayPromiseOffset);
+ {
+ // Initialize throwawayPromise
+ StoreMapNoWriteBarrier(throwaway, promise_map);
+ InitializeJSObjectFromMap(
+ throwaway, promise_map,
+ IntPtrConstant(JSPromise::kSizeWithEmbedderFields),
+ EmptyFixedArrayConstant(), EmptyFixedArrayConstant());
+ PromiseInit(throwaway);
+ }
- Node* const native_context = LoadNativeContext(context);
+ Node* const on_resolve = InnerAllocate(base, kResolveClosureOffset);
+ {
+ // Initialize resolve handler
+ InitializeNativeClosure(closure_context, native_context, on_resolve,
+ on_resolve_context_index);
+ }
- Node* const closure_context = create_closure_context(native_context);
- Node* const map = LoadContextElement(
- native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
+ Node* const on_reject = InnerAllocate(base, kRejectClosureOffset);
+ {
+ // Initialize reject handler
+ InitializeNativeClosure(closure_context, native_context, on_reject,
+ on_reject_context_index);
+ }
+
+ {
+ // Add PromiseHooks if needed
+ Label next(this);
+ GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &next);
+ CallRuntime(Runtime::kPromiseHookInit, context, wrapped_value,
+ outer_promise);
+ CallRuntime(Runtime::kPromiseHookInit, context, throwaway, wrapped_value);
+ Goto(&next);
+ BIND(&next);
+ }
- // Load and allocate on_resolve closure
- Node* const on_resolve_shared_fun =
- LoadContextElement(native_context, on_resolve_context_index);
- CSA_SLOW_ASSERT(
- this, HasInstanceType(on_resolve_shared_fun, SHARED_FUNCTION_INFO_TYPE));
- Node* const on_resolve = AllocateFunctionWithMapAndContext(
- map, on_resolve_shared_fun, closure_context);
-
- // Load and allocate on_reject closure
- Node* const on_reject_shared_fun =
- LoadContextElement(native_context, on_reject_context_index);
- CSA_SLOW_ASSERT(
- this, HasInstanceType(on_reject_shared_fun, SHARED_FUNCTION_INFO_TYPE));
- Node* const on_reject = AllocateFunctionWithMapAndContext(
- map, on_reject_shared_fun, closure_context);
-
- Node* const throwaway_promise =
- AllocateAndInitJSPromise(context, wrapped_value);
+ // Perform ! Call(promiseCapability.[[Resolve]], undefined, Ā« promise Ā»).
+ CallBuiltin(Builtins::kResolveNativePromise, context, wrapped_value, value);
// The Promise will be thrown away and not handled, but it shouldn't trigger
// unhandled reject events as its work is done
- PromiseSetHasHandler(throwaway_promise);
+ PromiseSetHasHandler(throwaway);
Label do_perform_promise_then(this);
GotoIfNot(IsDebugActive(), &do_perform_promise_then);
@@ -82,18 +156,52 @@ Node* AsyncBuiltinsAssembler::Await(
CSA_SLOW_ASSERT(this, HasInstanceType(outer_promise, JS_PROMISE_TYPE));
Node* const key = HeapConstant(factory()->promise_handled_by_symbol());
- CallRuntime(Runtime::kSetProperty, context, throwaway_promise, key,
- outer_promise, SmiConstant(STRICT));
+ CallRuntime(Runtime::kSetProperty, context, throwaway, key, outer_promise,
+ SmiConstant(STRICT));
}
Goto(&do_perform_promise_then);
BIND(&do_perform_promise_then);
+
CallBuiltin(Builtins::kPerformNativePromiseThen, context, wrapped_value,
- on_resolve, on_reject, throwaway_promise);
+ on_resolve, on_reject, throwaway);
return wrapped_value;
}
+void AsyncBuiltinsAssembler::InitializeNativeClosure(Node* context,
+ Node* native_context,
+ Node* function,
+ int context_index) {
+ Node* const function_map = LoadContextElement(
+ native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
+ StoreMapNoWriteBarrier(function, function_map);
+ StoreObjectFieldRoot(function, JSObject::kPropertiesOrHashOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldRoot(function, JSObject::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldRoot(function, JSFunction::kFeedbackVectorOffset,
+ Heap::kUndefinedCellRootIndex);
+ StoreObjectFieldRoot(function, JSFunction::kPrototypeOrInitialMapOffset,
+ Heap::kTheHoleValueRootIndex);
+
+ Node* shared_info = LoadContextElement(native_context, context_index);
+ CSA_ASSERT(this, IsSharedFunctionInfo(shared_info));
+ StoreObjectFieldNoWriteBarrier(
+ function, JSFunction::kSharedFunctionInfoOffset, shared_info);
+ StoreObjectFieldNoWriteBarrier(function, JSFunction::kContextOffset, context);
+
+ Node* const code = BitcastTaggedToWord(
+ LoadObjectField(shared_info, SharedFunctionInfo::kCodeOffset));
+ Node* const code_entry =
+ IntPtrAdd(code, IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
+ StoreObjectFieldNoWriteBarrier(function, JSFunction::kCodeEntryOffset,
+ code_entry,
+ MachineType::PointerRepresentation());
+ StoreObjectFieldRoot(function, JSFunction::kNextFunctionLinkOffset,
+ Heap::kUndefinedValueRootIndex);
+}
+
Node* AsyncBuiltinsAssembler::CreateUnwrapClosure(Node* native_context,
Node* done) {
Node* const map = LoadContextElement(
@@ -127,8 +235,8 @@ TF_BUILTIN(AsyncIteratorValueUnwrap, AsyncBuiltinsAssembler) {
Node* const done = LoadContextElement(context, ValueUnwrapContext::kDoneSlot);
CSA_ASSERT(this, IsBoolean(done));
- Node* const unwrapped_value = CallStub(
- CodeFactory::CreateIterResultObject(isolate()), context, value, done);
+ Node* const unwrapped_value =
+ CallBuiltin(Builtins::kCreateIterResultObject, context, value, done);
Return(unwrapped_value);
}
diff --git a/deps/v8/src/builtins/builtins-async-gen.h b/deps/v8/src/builtins/builtins-async-gen.h
index 26bc3988ed..caba5ebd36 100644
--- a/deps/v8/src/builtins/builtins-async-gen.h
+++ b/deps/v8/src/builtins/builtins-async-gen.h
@@ -16,7 +16,7 @@ class AsyncBuiltinsAssembler : public PromiseBuiltinsAssembler {
: PromiseBuiltinsAssembler(state) {}
protected:
- typedef std::function<Node*(Node*)> NodeGenerator1;
+ typedef std::function<void(Node*)> ContextInitializer;
// Perform steps to resume generator after `value` is resolved.
// `on_reject_context_index` is an index into the Native Context, which should
@@ -24,7 +24,8 @@ class AsyncBuiltinsAssembler : public PromiseBuiltinsAssembler {
// value following the reject index should be a similar value for the resolve
// closure. Returns the Promise-wrapped `value`.
Node* Await(Node* context, Node* generator, Node* value, Node* outer_promise,
- const NodeGenerator1& create_closure_context,
+ int context_length,
+ const ContextInitializer& init_closure_context,
int on_resolve_context_index, int on_reject_context_index,
bool is_predicted_as_caught);
@@ -33,6 +34,8 @@ class AsyncBuiltinsAssembler : public PromiseBuiltinsAssembler {
Node* CreateUnwrapClosure(Node* const native_context, Node* const done);
private:
+ void InitializeNativeClosure(Node* context, Node* native_context,
+ Node* function, int context_index);
Node* AllocateAsyncIteratorValueUnwrapContext(Node* native_context,
Node* done);
};
diff --git a/deps/v8/src/builtins/builtins-async-generator-gen.cc b/deps/v8/src/builtins/builtins-async-generator-gen.cc
index b3cb3d8ebd..72a6a496b7 100644
--- a/deps/v8/src/builtins/builtins-async-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-generator-gen.cc
@@ -113,7 +113,8 @@ class AsyncGeneratorBuiltinsAssembler : public AsyncBuiltinsAssembler {
return SmiNotEqual(resume_type, SmiConstant(JSGeneratorObject::kNext));
}
- void AsyncGeneratorEnqueue(Node* context, Node* generator, Node* value,
+ void AsyncGeneratorEnqueue(CodeStubArguments* args, Node* context,
+ Node* generator, Node* value,
JSAsyncGeneratorObject::ResumeMode resume_mode,
const char* method_name);
@@ -138,7 +139,7 @@ class AsyncGeneratorBuiltinsAssembler : public AsyncBuiltinsAssembler {
// Shared implementation for the 3 Async Iterator protocol methods of Async
// Generators.
void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorEnqueue(
- Node* context, Node* generator, Node* value,
+ CodeStubArguments* args, Node* context, Node* generator, Node* value,
JSAsyncGeneratorObject::ResumeMode resume_mode, const char* method_name) {
// AsyncGeneratorEnqueue produces a new Promise, and appends it to the list
// of async generator requests to be executed. If the generator is not
@@ -175,18 +176,18 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorEnqueue(
Goto(&done);
BIND(&done);
- Return(promise);
+ args->PopAndReturn(promise);
}
BIND(&if_receiverisincompatible);
{
Node* const error =
MakeTypeError(MessageTemplate::kIncompatibleMethodReceiver, context,
- CStringConstant(method_name), generator);
+ StringConstant(method_name), generator);
CallBuiltin(Builtins::kRejectNativePromise, context, promise, error,
TrueConstant());
- Return(promise);
+ args->PopAndReturn(promise);
}
}
@@ -231,18 +232,16 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwaitResumeClosure(
CSA_SLOW_ASSERT(this, IsGeneratorSuspended(generator));
CallStub(CodeFactory::ResumeGenerator(isolate()), context, value, generator,
- SmiConstant(resume_mode),
- SmiConstant(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
+ SmiConstant(resume_mode));
- TailCallStub(CodeFactory::AsyncGeneratorResumeNext(isolate()), context,
- generator);
+ TailCallBuiltin(Builtins::kAsyncGeneratorResumeNext, context, generator);
}
template <typename Descriptor>
void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwait(bool is_catchable) {
- Node* generator = Parameter(1);
- Node* value = Parameter(2);
- Node* context = Parameter(5);
+ Node* generator = Parameter(Descriptor::kReceiver);
+ Node* value = Parameter(Descriptor::kAwaited);
+ Node* context = Parameter(Descriptor::kContext);
CSA_SLOW_ASSERT(this,
HasInstanceType(generator, JS_ASYNC_GENERATOR_OBJECT_TYPE));
@@ -250,12 +249,9 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwait(bool is_catchable) {
Node* const request = LoadFirstAsyncGeneratorRequestFromQueue(generator);
CSA_ASSERT(this, WordNotEqual(request, UndefinedConstant()));
- NodeGenerator1 closure_context = [&](Node* native_context) -> Node* {
- Node* const context =
- CreatePromiseContext(native_context, AwaitContext::kLength);
+ ContextInitializer init_closure_context = [&](Node* context) {
StoreContextElementNoWriteBarrier(context, AwaitContext::kGeneratorSlot,
generator);
- return context;
};
Node* outer_promise =
@@ -265,8 +261,8 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwait(bool is_catchable) {
const int reject_index = Context::ASYNC_GENERATOR_AWAIT_REJECT_SHARED_FUN;
Node* promise =
- Await(context, generator, value, outer_promise, closure_context,
- resolve_index, reject_index, is_catchable);
+ Await(context, generator, value, outer_promise, AwaitContext::kLength,
+ init_closure_context, resolve_index, reject_index, is_catchable);
CSA_SLOW_ASSERT(this, IsGeneratorNotSuspendedForAwait(generator));
StoreObjectField(generator, JSAsyncGeneratorObject::kAwaitedPromiseOffset,
@@ -330,10 +326,17 @@ Node* AsyncGeneratorBuiltinsAssembler::TakeFirstAsyncGeneratorRequestFromQueue(
// https://tc39.github.io/proposal-async-iteration/
// Section #sec-asyncgenerator-prototype-next
TF_BUILTIN(AsyncGeneratorPrototypeNext, AsyncGeneratorBuiltinsAssembler) {
- Node* const generator = Parameter(Descriptor::kReceiver);
- Node* const value = Parameter(Descriptor::kValue);
- Node* const context = Parameter(Descriptor::kContext);
- AsyncGeneratorEnqueue(context, generator, value,
+ const int kValueArg = 0;
+
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+
+ Node* generator = args.GetReceiver();
+ Node* value = args.GetOptionalArgumentValue(kValueArg);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+
+ AsyncGeneratorEnqueue(&args, context, generator, value,
JSAsyncGeneratorObject::kNext,
"[AsyncGenerator].prototype.next");
}
@@ -341,10 +344,17 @@ TF_BUILTIN(AsyncGeneratorPrototypeNext, AsyncGeneratorBuiltinsAssembler) {
// https://tc39.github.io/proposal-async-iteration/
// Section #sec-asyncgenerator-prototype-return
TF_BUILTIN(AsyncGeneratorPrototypeReturn, AsyncGeneratorBuiltinsAssembler) {
- Node* generator = Parameter(Descriptor::kReceiver);
- Node* value = Parameter(Descriptor::kValue);
- Node* context = Parameter(Descriptor::kContext);
- AsyncGeneratorEnqueue(context, generator, value,
+ const int kValueArg = 0;
+
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+
+ Node* generator = args.GetReceiver();
+ Node* value = args.GetOptionalArgumentValue(kValueArg);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+
+ AsyncGeneratorEnqueue(&args, context, generator, value,
JSAsyncGeneratorObject::kReturn,
"[AsyncGenerator].prototype.return");
}
@@ -352,10 +362,17 @@ TF_BUILTIN(AsyncGeneratorPrototypeReturn, AsyncGeneratorBuiltinsAssembler) {
// https://tc39.github.io/proposal-async-iteration/
// Section #sec-asyncgenerator-prototype-throw
TF_BUILTIN(AsyncGeneratorPrototypeThrow, AsyncGeneratorBuiltinsAssembler) {
- Node* generator = Parameter(Descriptor::kReceiver);
- Node* value = Parameter(Descriptor::kValue);
- Node* context = Parameter(Descriptor::kContext);
- AsyncGeneratorEnqueue(context, generator, value,
+ const int kValueArg = 0;
+
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+
+ Node* generator = args.GetReceiver();
+ Node* value = args.GetOptionalArgumentValue(kValueArg);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+
+ AsyncGeneratorEnqueue(&args, context, generator, value,
JSAsyncGeneratorObject::kThrow,
"[AsyncGenerator].prototype.throw");
}
@@ -461,8 +478,7 @@ TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) {
BIND(&resume_generator);
{
CallStub(CodeFactory::ResumeGenerator(isolate()), context,
- LoadValueFromAsyncGeneratorRequest(next), generator, resume_type,
- SmiConstant(static_cast<int>(SuspendFlags::kAsyncGeneratorYield)));
+ LoadValueFromAsyncGeneratorRequest(next), generator, resume_type);
var_state.Bind(LoadGeneratorState(generator));
var_next.Bind(LoadFirstAsyncGeneratorRequestFromQueue(generator));
Goto(&start);
diff --git a/deps/v8/src/builtins/builtins-async-iterator-gen.cc b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
index 2caa3c9edb..f232b32700 100644
--- a/deps/v8/src/builtins/builtins-async-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
@@ -66,7 +66,7 @@ void AsyncFromSyncBuiltinsAssembler::ThrowIfNotAsyncFromSyncIterator(
// Let badIteratorError be a new TypeError exception.
Node* const error =
MakeTypeError(MessageTemplate::kIncompatibleMethodReceiver, context,
- CStringConstant(method_name), object);
+ StringConstant(method_name), object);
// Perform ! Call(promiseCapability.[[Reject]], undefined,
// Ā« badIteratorError Ā»).
@@ -203,7 +203,7 @@ std::pair<Node*, Node*> AsyncFromSyncBuiltinsAssembler::LoadIteratorResult(
BIND(&to_boolean);
{
Node* const result =
- CallStub(CodeFactory::ToBoolean(isolate()), context, var_done.value());
+ CallBuiltin(Builtins::kToBoolean, context, var_done.value());
var_done.Bind(result);
Goto(&done);
}
@@ -237,9 +237,8 @@ TF_BUILTIN(AsyncFromSyncIteratorPrototypeReturn,
Node* const promise, Label* if_exception) {
// If return is undefined, then
// Let iterResult be ! CreateIterResultObject(value, true)
- Node* const iter_result =
- CallStub(CodeFactory::CreateIterResultObject(isolate()), context, value,
- TrueConstant());
+ Node* const iter_result = CallBuiltin(Builtins::kCreateIterResultObject,
+ context, value, TrueConstant());
// Perform ! Call(promiseCapability.[[Resolve]], undefined, Ā« iterResult Ā»).
// IfAbruptRejectPromise(nextDone, promiseCapability).
diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc
index bd70865399..4f4839b5f6 100644
--- a/deps/v8/src/builtins/builtins-call-gen.cc
+++ b/deps/v8/src/builtins/builtins-call-gen.cc
@@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/builtins/builtins-call-gen.h"
+
+#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/globals.h"
#include "src/isolate.h"
@@ -12,82 +15,383 @@ namespace internal {
void Builtins::Generate_CallFunction_ReceiverIsNullOrUndefined(
MacroAssembler* masm) {
- Generate_CallFunction(masm, ConvertReceiverMode::kNullOrUndefined,
- TailCallMode::kDisallow);
+ Generate_CallFunction(masm, ConvertReceiverMode::kNullOrUndefined);
}
void Builtins::Generate_CallFunction_ReceiverIsNotNullOrUndefined(
MacroAssembler* masm) {
- Generate_CallFunction(masm, ConvertReceiverMode::kNotNullOrUndefined,
- TailCallMode::kDisallow);
+ Generate_CallFunction(masm, ConvertReceiverMode::kNotNullOrUndefined);
}
void Builtins::Generate_CallFunction_ReceiverIsAny(MacroAssembler* masm) {
- Generate_CallFunction(masm, ConvertReceiverMode::kAny,
- TailCallMode::kDisallow);
+ Generate_CallFunction(masm, ConvertReceiverMode::kAny);
}
-void Builtins::Generate_TailCallFunction_ReceiverIsNullOrUndefined(
- MacroAssembler* masm) {
- Generate_CallFunction(masm, ConvertReceiverMode::kNullOrUndefined,
- TailCallMode::kAllow);
+void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+ Generate_CallBoundFunctionImpl(masm);
}
-void Builtins::Generate_TailCallFunction_ReceiverIsNotNullOrUndefined(
- MacroAssembler* masm) {
- Generate_CallFunction(masm, ConvertReceiverMode::kNotNullOrUndefined,
- TailCallMode::kAllow);
+void Builtins::Generate_Call_ReceiverIsNullOrUndefined(MacroAssembler* masm) {
+ Generate_Call(masm, ConvertReceiverMode::kNullOrUndefined);
}
-void Builtins::Generate_TailCallFunction_ReceiverIsAny(MacroAssembler* masm) {
- Generate_CallFunction(masm, ConvertReceiverMode::kAny, TailCallMode::kAllow);
+void Builtins::Generate_Call_ReceiverIsNotNullOrUndefined(
+ MacroAssembler* masm) {
+ Generate_Call(masm, ConvertReceiverMode::kNotNullOrUndefined);
}
-void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
- Generate_CallBoundFunctionImpl(masm, TailCallMode::kDisallow);
+void Builtins::Generate_Call_ReceiverIsAny(MacroAssembler* masm) {
+ Generate_Call(masm, ConvertReceiverMode::kAny);
}
-void Builtins::Generate_TailCallBoundFunction(MacroAssembler* masm) {
- Generate_CallBoundFunctionImpl(masm, TailCallMode::kAllow);
+void Builtins::Generate_CallVarargs(MacroAssembler* masm) {
+ Generate_CallOrConstructVarargs(masm, masm->isolate()->builtins()->Call());
}
-void Builtins::Generate_Call_ReceiverIsNullOrUndefined(MacroAssembler* masm) {
- Generate_Call(masm, ConvertReceiverMode::kNullOrUndefined,
- TailCallMode::kDisallow);
+void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm) {
+ Generate_CallOrConstructForwardVarargs(masm,
+ masm->isolate()->builtins()->Call());
}
-void Builtins::Generate_Call_ReceiverIsNotNullOrUndefined(
- MacroAssembler* masm) {
- Generate_Call(masm, ConvertReceiverMode::kNotNullOrUndefined,
- TailCallMode::kDisallow);
+void Builtins::Generate_CallFunctionForwardVarargs(MacroAssembler* masm) {
+ Generate_CallOrConstructForwardVarargs(
+ masm, masm->isolate()->builtins()->CallFunction());
}
-void Builtins::Generate_Call_ReceiverIsAny(MacroAssembler* masm) {
- Generate_Call(masm, ConvertReceiverMode::kAny, TailCallMode::kDisallow);
-}
+void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
+ Node* target, Node* new_target, Node* arguments_list, Node* context) {
+ VARIABLE(var_elements, MachineRepresentation::kTagged);
+ VARIABLE(var_length, MachineRepresentation::kWord32);
+ Label if_done(this), if_arguments(this), if_array(this),
+ if_holey_array(this, Label::kDeferred),
+ if_runtime(this, Label::kDeferred);
-void Builtins::Generate_TailCall_ReceiverIsNullOrUndefined(
- MacroAssembler* masm) {
- Generate_Call(masm, ConvertReceiverMode::kNullOrUndefined,
- TailCallMode::kAllow);
+ // Perform appropriate checks on {target} (and {new_target} first).
+ if (new_target == nullptr) {
+ // Check that {target} is Callable.
+ Label if_target_callable(this),
+ if_target_not_callable(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(target), &if_target_not_callable);
+ Branch(IsCallable(target), &if_target_callable, &if_target_not_callable);
+ BIND(&if_target_not_callable);
+ {
+ CallRuntime(Runtime::kThrowApplyNonFunction, context, target);
+ Unreachable();
+ }
+ BIND(&if_target_callable);
+ } else {
+ // Check that {target} is a Constructor.
+ Label if_target_constructor(this),
+ if_target_not_constructor(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(target), &if_target_not_constructor);
+ Branch(IsConstructor(target), &if_target_constructor,
+ &if_target_not_constructor);
+ BIND(&if_target_not_constructor);
+ {
+ CallRuntime(Runtime::kThrowNotConstructor, context, target);
+ Unreachable();
+ }
+ BIND(&if_target_constructor);
+
+ // Check that {new_target} is a Constructor.
+ Label if_new_target_constructor(this),
+ if_new_target_not_constructor(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(new_target), &if_new_target_not_constructor);
+ Branch(IsConstructor(new_target), &if_new_target_constructor,
+ &if_new_target_not_constructor);
+ BIND(&if_new_target_not_constructor);
+ {
+ CallRuntime(Runtime::kThrowNotConstructor, context, new_target);
+ Unreachable();
+ }
+ BIND(&if_new_target_constructor);
+ }
+
+ GotoIf(TaggedIsSmi(arguments_list), &if_runtime);
+ Node* arguments_list_map = LoadMap(arguments_list);
+ Node* native_context = LoadNativeContext(context);
+
+ // Check if {arguments_list} is an (unmodified) arguments object.
+ Node* sloppy_arguments_map =
+ LoadContextElement(native_context, Context::SLOPPY_ARGUMENTS_MAP_INDEX);
+ GotoIf(WordEqual(arguments_list_map, sloppy_arguments_map), &if_arguments);
+ Node* strict_arguments_map =
+ LoadContextElement(native_context, Context::STRICT_ARGUMENTS_MAP_INDEX);
+ GotoIf(WordEqual(arguments_list_map, strict_arguments_map), &if_arguments);
+
+ // Check if {arguments_list} is a fast JSArray.
+ Branch(IsJSArrayMap(arguments_list_map), &if_array, &if_runtime);
+
+ BIND(&if_array);
+ {
+ // Try to extract the elements from a JSArray object.
+ var_elements.Bind(
+ LoadObjectField(arguments_list, JSArray::kElementsOffset));
+ var_length.Bind(LoadAndUntagToWord32ObjectField(arguments_list,
+ JSArray::kLengthOffset));
+
+ // Holey arrays and double backing stores need special treatment.
+ STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(PACKED_ELEMENTS == 2);
+ STATIC_ASSERT(HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS == 4);
+ STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS == 5);
+ STATIC_ASSERT(LAST_FAST_ELEMENTS_KIND == HOLEY_DOUBLE_ELEMENTS);
+
+ Node* kind = LoadMapElementsKind(arguments_list_map);
+
+ GotoIf(Int32GreaterThan(kind, Int32Constant(LAST_FAST_ELEMENTS_KIND)),
+ &if_runtime);
+ Branch(Word32And(kind, Int32Constant(1)), &if_holey_array, &if_done);
+ }
+
+ BIND(&if_holey_array);
+ {
+ // For holey JSArrays we need to check that the array prototype chain
+ // protector is intact and our prototype is the Array.prototype actually.
+ Node* arguments_list_prototype = LoadMapPrototype(arguments_list_map);
+ Node* initial_array_prototype = LoadContextElement(
+ native_context, Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
+ GotoIfNot(WordEqual(arguments_list_prototype, initial_array_prototype),
+ &if_runtime);
+ Node* protector_cell = LoadRoot(Heap::kArrayProtectorRootIndex);
+ DCHECK(isolate()->heap()->array_protector()->IsPropertyCell());
+ Branch(
+ WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
+ SmiConstant(Isolate::kProtectorValid)),
+ &if_done, &if_runtime);
+ }
+
+ BIND(&if_arguments);
+ {
+ // Try to extract the elements from an JSArgumentsObject.
+ Node* length =
+ LoadObjectField(arguments_list, JSArgumentsObject::kLengthOffset);
+ Node* elements =
+ LoadObjectField(arguments_list, JSArgumentsObject::kElementsOffset);
+ Node* elements_length =
+ LoadObjectField(elements, FixedArray::kLengthOffset);
+ GotoIfNot(WordEqual(length, elements_length), &if_runtime);
+ var_elements.Bind(elements);
+ var_length.Bind(SmiToWord32(length));
+ Goto(&if_done);
+ }
+
+ BIND(&if_runtime);
+ {
+ // Ask the runtime to create the list (actually a FixedArray).
+ Node* elements =
+ CallRuntime(Runtime::kCreateListFromArrayLike, context, arguments_list);
+ var_elements.Bind(elements);
+ var_length.Bind(
+ LoadAndUntagToWord32ObjectField(elements, FixedArray::kLengthOffset));
+ Goto(&if_done);
+ }
+
+ // Tail call to the appropriate builtin (depending on whether we have
+ // a {new_target} passed).
+ BIND(&if_done);
+ {
+ Label if_not_double(this), if_double(this);
+ Node* elements = var_elements.value();
+ Node* length = var_length.value();
+ Node* args_count = Int32Constant(0); // args already on the stack
+
+ Branch(IsFixedDoubleArray(elements), &if_double, &if_not_double);
+
+ BIND(&if_not_double);
+ if (new_target == nullptr) {
+ Callable callable = CodeFactory::CallVarargs(isolate());
+ TailCallStub(callable, context, target, args_count, elements, length);
+ } else {
+ Callable callable = CodeFactory::ConstructVarargs(isolate());
+ TailCallStub(callable, context, target, new_target, args_count, elements,
+ length);
+ }
+
+ BIND(&if_double);
+ {
+ // Kind is hardcoded here because CreateListFromArrayLike will only
+ // produce holey double arrays.
+ CallOrConstructDoubleVarargs(target, new_target, elements, length,
+ args_count, context,
+ Int32Constant(HOLEY_DOUBLE_ELEMENTS));
+ }
+ }
}
-void Builtins::Generate_TailCall_ReceiverIsNotNullOrUndefined(
- MacroAssembler* masm) {
- Generate_Call(masm, ConvertReceiverMode::kNotNullOrUndefined,
- TailCallMode::kAllow);
+// Takes a FixedArray of doubles and creates a new FixedArray with those doubles
+// boxed as HeapNumbers, then tail calls CallVarargs/ConstructVarargs depending
+// on whether {new_target} was passed.
+void CallOrConstructBuiltinsAssembler::CallOrConstructDoubleVarargs(
+ Node* target, Node* new_target, Node* elements, Node* length,
+ Node* args_count, Node* context, Node* kind) {
+ Label if_holey_double(this), if_packed_double(this), if_done(this);
+
+ const ElementsKind new_kind = PACKED_ELEMENTS;
+ const ParameterMode mode = INTPTR_PARAMETERS;
+ const WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER;
+ Node* intptr_length = ChangeInt32ToIntPtr(length);
+
+ // Allocate a new FixedArray of Objects.
+ Node* new_elements =
+ AllocateFixedArray(new_kind, intptr_length, mode,
+ CodeStubAssembler::kAllowLargeObjectAllocation);
+ Branch(Word32Equal(kind, Int32Constant(HOLEY_DOUBLE_ELEMENTS)),
+ &if_holey_double, &if_packed_double);
+
+ BIND(&if_holey_double);
+ {
+ // Fill the FixedArray with pointers to HeapObjects.
+ CopyFixedArrayElements(HOLEY_DOUBLE_ELEMENTS, elements, new_kind,
+ new_elements, intptr_length, intptr_length,
+ barrier_mode);
+ Goto(&if_done);
+ }
+
+ BIND(&if_packed_double);
+ {
+ CopyFixedArrayElements(PACKED_DOUBLE_ELEMENTS, elements, new_kind,
+ new_elements, intptr_length, intptr_length,
+ barrier_mode);
+ Goto(&if_done);
+ }
+
+ BIND(&if_done);
+ {
+ if (new_target == nullptr) {
+ Callable callable = CodeFactory::CallVarargs(isolate());
+ TailCallStub(callable, context, target, args_count, new_elements, length);
+ } else {
+ Callable callable = CodeFactory::ConstructVarargs(isolate());
+ TailCallStub(callable, context, target, new_target, args_count,
+ new_elements, length);
+ }
+ }
}
-void Builtins::Generate_TailCall_ReceiverIsAny(MacroAssembler* masm) {
- Generate_Call(masm, ConvertReceiverMode::kAny, TailCallMode::kAllow);
+void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
+ Node* target, Node* new_target, Node* spread, Node* args_count,
+ Node* context) {
+ Label if_done(this), if_holey(this), if_runtime(this, Label::kDeferred);
+
+ VARIABLE(spread_result, MachineRepresentation::kTagged, spread);
+
+ GotoIf(TaggedIsSmi(spread), &if_runtime);
+ Node* spread_map = LoadMap(spread);
+ GotoIfNot(IsJSArrayMap(spread_map), &if_runtime);
+
+ Node* native_context = LoadNativeContext(context);
+
+ // Check that we have the original ArrayPrototype.
+ Node* prototype = LoadMapPrototype(spread_map);
+ Node* array_prototype = LoadContextElement(
+ native_context, Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
+ GotoIfNot(WordEqual(prototype, array_prototype), &if_runtime);
+
+ // Check that the ArrayPrototype hasn't been modified in a way that would
+ // affect iteration.
+ Node* protector_cell = LoadRoot(Heap::kArrayIteratorProtectorRootIndex);
+ DCHECK(isolate()->heap()->array_iterator_protector()->IsPropertyCell());
+ GotoIfNot(
+ WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
+ SmiConstant(Isolate::kProtectorValid)),
+ &if_runtime);
+
+ // Check that the map of the initial array iterator hasn't changed.
+ Node* arr_it_proto_map = LoadMap(LoadContextElement(
+ native_context, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
+ Node* initial_map = LoadContextElement(
+ native_context, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX);
+ GotoIfNot(WordEqual(arr_it_proto_map, initial_map), &if_runtime);
+
+ Node* kind = LoadMapElementsKind(spread_map);
+
+ STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(PACKED_ELEMENTS == 2);
+ STATIC_ASSERT(HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS == 4);
+ STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS == 5);
+ STATIC_ASSERT(LAST_FAST_ELEMENTS_KIND == HOLEY_DOUBLE_ELEMENTS);
+
+ GotoIf(Int32GreaterThan(kind, Int32Constant(LAST_FAST_ELEMENTS_KIND)),
+ &if_runtime);
+ Branch(Word32And(kind, Int32Constant(1)), &if_holey, &if_done);
+
+ // Check the ArrayProtector cell for holey arrays.
+ BIND(&if_holey);
+ {
+ Node* protector_cell = LoadRoot(Heap::kArrayProtectorRootIndex);
+ DCHECK(isolate()->heap()->array_protector()->IsPropertyCell());
+ Branch(
+ WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
+ SmiConstant(Isolate::kProtectorValid)),
+ &if_done, &if_runtime);
+ }
+
+ BIND(&if_runtime);
+ {
+ Node* spread_iterable = LoadContextElement(LoadNativeContext(context),
+ Context::SPREAD_ITERABLE_INDEX);
+ spread_result.Bind(CallJS(CodeFactory::Call(isolate()), context,
+ spread_iterable, UndefinedConstant(), spread));
+ CSA_ASSERT(this, IsJSArray(spread_result.value()));
+ Goto(&if_done);
+ }
+
+ BIND(&if_done);
+ {
+ // The result from if_runtime can be an array of doubles.
+ Label if_not_double(this), if_double(this);
+ Node* elements =
+ LoadObjectField(spread_result.value(), JSArray::kElementsOffset);
+ Node* length = LoadAndUntagToWord32ObjectField(spread_result.value(),
+ JSArray::kLengthOffset);
+
+ Node* kind = LoadMapElementsKind(LoadMap(elements));
+ CSA_ASSERT(this, Int32LessThanOrEqual(
+ kind, Int32Constant(LAST_FAST_ELEMENTS_KIND)));
+
+ Branch(Int32GreaterThan(kind, Int32Constant(HOLEY_ELEMENTS)), &if_double,
+ &if_not_double);
+
+ BIND(&if_not_double);
+ {
+ if (new_target == nullptr) {
+ Callable callable = CodeFactory::CallVarargs(isolate());
+ TailCallStub(callable, context, target, args_count, elements, length);
+ } else {
+ Callable callable = CodeFactory::ConstructVarargs(isolate());
+ TailCallStub(callable, context, target, new_target, args_count,
+ elements, length);
+ }
+ }
+
+ BIND(&if_double);
+ {
+ CallOrConstructDoubleVarargs(target, new_target, elements, length,
+ args_count, context, kind);
+ }
+ }
}
-void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm) {
- Generate_ForwardVarargs(masm, masm->isolate()->builtins()->Call());
+TF_BUILTIN(CallWithArrayLike, CallOrConstructBuiltinsAssembler) {
+ Node* target = Parameter(CallWithArrayLikeDescriptor::kTarget);
+ Node* new_target = nullptr;
+ Node* arguments_list = Parameter(CallWithArrayLikeDescriptor::kArgumentsList);
+ Node* context = Parameter(CallWithArrayLikeDescriptor::kContext);
+ CallOrConstructWithArrayLike(target, new_target, arguments_list, context);
}
-void Builtins::Generate_CallFunctionForwardVarargs(MacroAssembler* masm) {
- Generate_ForwardVarargs(masm, masm->isolate()->builtins()->CallFunction());
+TF_BUILTIN(CallWithSpread, CallOrConstructBuiltinsAssembler) {
+ Node* target = Parameter(CallWithSpreadDescriptor::kTarget);
+ Node* new_target = nullptr;
+ Node* spread = Parameter(CallWithSpreadDescriptor::kSpread);
+ Node* args_count = Parameter(CallWithSpreadDescriptor::kArgumentsCount);
+ Node* context = Parameter(CallWithSpreadDescriptor::kContext);
+ CallOrConstructWithSpread(target, new_target, spread, args_count, context);
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-call-gen.h b/deps/v8/src/builtins/builtins-call-gen.h
new file mode 100644
index 0000000000..bbbdefc0c5
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-call-gen.h
@@ -0,0 +1,31 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_BUILTINS_CALL_GEN_H_
+#define V8_BUILTINS_BUILTINS_CALL_GEN_H_
+
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+class CallOrConstructBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit CallOrConstructBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ void CallOrConstructWithArrayLike(Node* target, Node* new_target,
+ Node* arguments_list, Node* context);
+ void CallOrConstructDoubleVarargs(Node* target, Node* new_target,
+ Node* elements, Node* length,
+ Node* args_count, Node* context,
+ Node* kind);
+ void CallOrConstructWithSpread(Node* target, Node* new_target, Node* spread,
+ Node* args_count, Node* context);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BUILTINS_BUILTINS_CALL_GEN_H_
diff --git a/deps/v8/src/builtins/builtins-call.cc b/deps/v8/src/builtins/builtins-call.cc
index e6598c88a2..e78fb699d0 100644
--- a/deps/v8/src/builtins/builtins-call.cc
+++ b/deps/v8/src/builtins/builtins-call.cc
@@ -11,71 +11,28 @@
namespace v8 {
namespace internal {
-Handle<Code> Builtins::CallFunction(ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
- switch (tail_call_mode) {
- case TailCallMode::kDisallow:
- switch (mode) {
- case ConvertReceiverMode::kNullOrUndefined:
- return CallFunction_ReceiverIsNullOrUndefined();
- case ConvertReceiverMode::kNotNullOrUndefined:
- return CallFunction_ReceiverIsNotNullOrUndefined();
- case ConvertReceiverMode::kAny:
- return CallFunction_ReceiverIsAny();
- }
- break;
- case TailCallMode::kAllow:
- switch (mode) {
- case ConvertReceiverMode::kNullOrUndefined:
- return TailCallFunction_ReceiverIsNullOrUndefined();
- case ConvertReceiverMode::kNotNullOrUndefined:
- return TailCallFunction_ReceiverIsNotNullOrUndefined();
- case ConvertReceiverMode::kAny:
- return TailCallFunction_ReceiverIsAny();
- }
- break;
+Handle<Code> Builtins::CallFunction(ConvertReceiverMode mode) {
+ switch (mode) {
+ case ConvertReceiverMode::kNullOrUndefined:
+ return CallFunction_ReceiverIsNullOrUndefined();
+ case ConvertReceiverMode::kNotNullOrUndefined:
+ return CallFunction_ReceiverIsNotNullOrUndefined();
+ case ConvertReceiverMode::kAny:
+ return CallFunction_ReceiverIsAny();
}
UNREACHABLE();
- return Handle<Code>::null();
}
-Handle<Code> Builtins::Call(ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
- switch (tail_call_mode) {
- case TailCallMode::kDisallow:
- switch (mode) {
- case ConvertReceiverMode::kNullOrUndefined:
- return Call_ReceiverIsNullOrUndefined();
- case ConvertReceiverMode::kNotNullOrUndefined:
- return Call_ReceiverIsNotNullOrUndefined();
- case ConvertReceiverMode::kAny:
- return Call_ReceiverIsAny();
- }
- break;
- case TailCallMode::kAllow:
- switch (mode) {
- case ConvertReceiverMode::kNullOrUndefined:
- return TailCall_ReceiverIsNullOrUndefined();
- case ConvertReceiverMode::kNotNullOrUndefined:
- return TailCall_ReceiverIsNotNullOrUndefined();
- case ConvertReceiverMode::kAny:
- return TailCall_ReceiverIsAny();
- }
- break;
+Handle<Code> Builtins::Call(ConvertReceiverMode mode) {
+ switch (mode) {
+ case ConvertReceiverMode::kNullOrUndefined:
+ return Call_ReceiverIsNullOrUndefined();
+ case ConvertReceiverMode::kNotNullOrUndefined:
+ return Call_ReceiverIsNotNullOrUndefined();
+ case ConvertReceiverMode::kAny:
+ return Call_ReceiverIsAny();
}
UNREACHABLE();
- return Handle<Code>::null();
-}
-
-Handle<Code> Builtins::CallBoundFunction(TailCallMode tail_call_mode) {
- switch (tail_call_mode) {
- case TailCallMode::kDisallow:
- return CallBoundFunction();
- case TailCallMode::kAllow:
- return TailCallBoundFunction();
- }
- UNREACHABLE();
- return Handle<Code>::null();
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-callsite.cc b/deps/v8/src/builtins/builtins-callsite.cc
index ebf90990a0..24dc946a24 100644
--- a/deps/v8/src/builtins/builtins-callsite.cc
+++ b/deps/v8/src/builtins/builtins-callsite.cc
@@ -41,7 +41,7 @@ Handle<FrameArray> GetFrameArray(Isolate* isolate, Handle<JSObject> object) {
int GetFrameIndex(Isolate* isolate, Handle<JSObject> object) {
Handle<Object> frame_index_obj = JSObject::GetDataProperty(
object, isolate->factory()->call_site_frame_index_symbol());
- return Smi::cast(*frame_index_obj)->value();
+ return Smi::ToInt(*frame_index_obj);
}
} // namespace
diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc
new file mode 100644
index 0000000000..9f65065db5
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-collections-gen.cc
@@ -0,0 +1,1357 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-constructor-gen.h"
+#include "src/builtins/builtins-iterator-gen.h"
+#include "src/builtins/builtins-utils-gen.h"
+#include "src/code-stub-assembler.h"
+#include "src/objects/hash-table.h"
+
+namespace v8 {
+namespace internal {
+
+using compiler::Node;
+
+class CollectionsBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit CollectionsBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ protected:
+ Node* AllocateJSMap(Node* js_map_function);
+
+ template <typename CollectionType>
+ Node* AllocateOrderedHashTable();
+ Node* AllocateJSCollection(Node* js_map_function);
+ template <typename IteratorType>
+ Node* AllocateJSCollectionIterator(Node* context, int map_index,
+ Node* collection);
+
+ Node* CallGetHashRaw(Node* const key);
+ template <typename CollectionType, int entrysize>
+ Node* CallHasRaw(Node* const table, Node* const key);
+
+ // Transitions the iterator to the non obsolete backing store.
+ // This is a NOP if the [table] is not obsolete.
+ typedef std::function<void(Node* const table, Node* const index)>
+ UpdateInTransition;
+ template <typename TableType>
+ std::tuple<Node*, Node*> Transition(
+ Node* const table, Node* const index,
+ UpdateInTransition const& update_in_transition);
+ template <typename IteratorType, typename TableType>
+ std::tuple<Node*, Node*> TransitionAndUpdate(Node* const iterator);
+ template <typename TableType>
+ std::tuple<Node*, Node*, Node*> NextSkipHoles(Node* table, Node* index,
+ Label* if_end);
+
+ // Builds code that finds OrderedHashTable entry for a key with hash code
+ // {hash} with using the comparison code generated by {key_compare}. The code
+ // jumps to {entry_found} if the key is found, or to {not_found} if the key
+ // was not found. In the {entry_found} branch, the variable
+ // entry_start_position will be bound to the index of the entry (relative to
+ // OrderedHashTable::kHashTableStartIndex).
+ //
+ // The {CollectionType} template parameter stands for the particular instance
+ // of OrderedHashTable, it should be OrderedHashMap or OrderedHashSet.
+ template <typename CollectionType>
+ void FindOrderedHashTableEntry(
+ Node* table, Node* hash,
+ std::function<void(Node* other, Label* if_same, Label* if_not_same)>
+ key_compare,
+ Variable* entry_start_position, Label* entry_found, Label* not_found);
+
+ // Specialization for Smi.
+ template <typename CollectionType>
+ void FindOrderedHashTableEntryForSmiKey(Node* table, Node* key_tagged,
+ Variable* entry_start_position,
+ Label* entry_found, Label* not_found);
+ void SameValueZeroSmi(Node* key_smi, Node* candidate_key, Label* if_same,
+ Label* if_not_same);
+
+ // Specialization for heap numbers.
+ void SameValueZeroHeapNumber(Node* key_string, Node* candidate_key,
+ Label* if_same, Label* if_not_same);
+ template <typename CollectionType>
+ void FindOrderedHashTableEntryForHeapNumberKey(Node* context, Node* table,
+ Node* key_heap_number,
+ Variable* entry_start_position,
+ Label* entry_found,
+ Label* not_found);
+
+ // Specialization for string.
+ template <typename CollectionType>
+ void FindOrderedHashTableEntryForStringKey(Node* context, Node* table,
+ Node* key_tagged,
+ Variable* entry_start_position,
+ Label* entry_found,
+ Label* not_found);
+ Node* ComputeIntegerHashForString(Node* context, Node* string_key);
+ void SameValueZeroString(Node* context, Node* key_string, Node* candidate_key,
+ Label* if_same, Label* if_not_same);
+
+ // Specialization for non-strings, non-numbers. For those we only need
+ // reference equality to compare the keys.
+ template <typename CollectionType>
+ void FindOrderedHashTableEntryForOtherKey(Node* context, Node* table,
+ Node* key,
+ Variable* entry_start_position,
+ Label* entry_found,
+ Label* not_found);
+};
+
+template <typename CollectionType>
+Node* CollectionsBuiltinsAssembler::AllocateOrderedHashTable() {
+ static const int kCapacity = CollectionType::kMinCapacity;
+ static const int kBucketCount = kCapacity / CollectionType::kLoadFactor;
+ static const int kDataTableLength = kCapacity * CollectionType::kEntrySize;
+ static const int kFixedArrayLength =
+ CollectionType::kHashTableStartIndex + kBucketCount + kDataTableLength;
+ static const int kDataTableStartIndex =
+ CollectionType::kHashTableStartIndex + kBucketCount;
+
+ STATIC_ASSERT(base::bits::IsPowerOfTwo(kCapacity));
+ STATIC_ASSERT(kCapacity <= CollectionType::kMaxCapacity);
+
+ // Allocate the table and add the proper map.
+ const ElementsKind elements_kind = HOLEY_ELEMENTS;
+ Node* const length_intptr = IntPtrConstant(kFixedArrayLength);
+ Node* const table = AllocateFixedArray(elements_kind, length_intptr);
+ CSA_ASSERT(this,
+ IntPtrLessThanOrEqual(
+ length_intptr, IntPtrConstant(FixedArray::kMaxRegularLength)));
+ Heap::RootListIndex map_index = Heap::kOrderedHashTableMapRootIndex;
+ // TODO(gsathya): Directly store correct in AllocateFixedArray,
+ // instead of overwriting here.
+ StoreMapNoWriteBarrier(table, map_index);
+
+ // Initialize the OrderedHashTable fields.
+ const WriteBarrierMode barrier_mode = SKIP_WRITE_BARRIER;
+ StoreFixedArrayElement(table, CollectionType::kNumberOfElementsIndex,
+ SmiConstant(0), barrier_mode);
+ StoreFixedArrayElement(table, CollectionType::kNumberOfDeletedElementsIndex,
+ SmiConstant(0), barrier_mode);
+ StoreFixedArrayElement(table, CollectionType::kNumberOfBucketsIndex,
+ SmiConstant(kBucketCount), barrier_mode);
+
+ // Fill the buckets with kNotFound.
+ Node* const not_found = SmiConstant(CollectionType::kNotFound);
+ STATIC_ASSERT(CollectionType::kHashTableStartIndex ==
+ CollectionType::kNumberOfBucketsIndex + 1);
+ STATIC_ASSERT((CollectionType::kHashTableStartIndex + kBucketCount) ==
+ kDataTableStartIndex);
+ for (int i = 0; i < kBucketCount; i++) {
+ StoreFixedArrayElement(table, CollectionType::kHashTableStartIndex + i,
+ not_found, barrier_mode);
+ }
+
+ // Fill the data table with undefined.
+ STATIC_ASSERT(kDataTableStartIndex + kDataTableLength == kFixedArrayLength);
+ for (int i = 0; i < kDataTableLength; i++) {
+ StoreFixedArrayElement(table, kDataTableStartIndex + i, UndefinedConstant(),
+ barrier_mode);
+ }
+
+ return table;
+}
+
+Node* CollectionsBuiltinsAssembler::AllocateJSCollection(
+ Node* js_map_function) {
+ CSA_ASSERT(this, IsConstructorMap(LoadMap(js_map_function)));
+ Node* const initial_map = LoadObjectField(
+ js_map_function, JSFunction::kPrototypeOrInitialMapOffset);
+ Node* const instance = AllocateJSObjectFromMap(initial_map);
+
+ StoreObjectFieldRoot(instance, JSMap::kTableOffset,
+ Heap::kUndefinedValueRootIndex);
+
+ return instance;
+}
+
+template <typename IteratorType>
+Node* CollectionsBuiltinsAssembler::AllocateJSCollectionIterator(
+ Node* context, int map_index, Node* collection) {
+ Node* const table = LoadObjectField(collection, JSCollection::kTableOffset);
+ Node* const native_context = LoadNativeContext(context);
+ Node* const iterator_map = LoadContextElement(native_context, map_index);
+ Node* const iterator = AllocateInNewSpace(IteratorType::kSize);
+ StoreMapNoWriteBarrier(iterator, iterator_map);
+ StoreObjectFieldRoot(iterator, IteratorType::kPropertiesOrHashOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldRoot(iterator, IteratorType::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldNoWriteBarrier(iterator, IteratorType::kTableOffset, table);
+ StoreObjectFieldNoWriteBarrier(iterator, IteratorType::kIndexOffset,
+ SmiConstant(0));
+ return iterator;
+}
+
+TF_BUILTIN(MapConstructor, CollectionsBuiltinsAssembler) {
+ const int kIterableArg = 0;
+
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+
+ Node* const iterable = args.GetOptionalArgumentValue(kIterableArg);
+ Node* const new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Node* const context = Parameter(BuiltinDescriptor::kContext);
+
+ Label if_target_is_undefined(this, Label::kDeferred);
+ GotoIf(IsUndefined(new_target), &if_target_is_undefined);
+
+ Node* const native_context = LoadNativeContext(context);
+ Node* const js_map_fun =
+ LoadContextElement(native_context, Context::JS_MAP_FUN_INDEX);
+
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+
+ Label init(this), exit(this), if_targetisnotmodified(this),
+ if_targetismodified(this);
+ Branch(WordEqual(js_map_fun, new_target), &if_targetisnotmodified,
+ &if_targetismodified);
+
+ BIND(&if_targetisnotmodified);
+ {
+ Node* const instance = AllocateJSCollection(js_map_fun);
+ var_result.Bind(instance);
+ Goto(&init);
+ }
+
+ BIND(&if_targetismodified);
+ {
+ ConstructorBuiltinsAssembler constructor_assembler(this->state());
+ Node* const instance = constructor_assembler.EmitFastNewObject(
+ context, js_map_fun, new_target);
+ var_result.Bind(instance);
+ Goto(&init);
+ }
+
+ BIND(&init);
+ Node* table = AllocateOrderedHashTable<OrderedHashMap>();
+ StoreObjectField(var_result.value(), JSMap::kTableOffset, table);
+
+ GotoIf(Word32Or(IsUndefined(iterable), IsNull(iterable)), &exit);
+
+ Label if_notcallable(this);
+ // TODO(gsathya): Add fast path for unmodified maps.
+ Node* const adder = GetProperty(context, var_result.value(),
+ isolate()->factory()->set_string());
+ GotoIf(TaggedIsSmi(adder), &if_notcallable);
+ GotoIfNot(IsCallable(adder), &if_notcallable);
+
+ IteratorBuiltinsAssembler iterator_assembler(this->state());
+ Node* const iterator = iterator_assembler.GetIterator(context, iterable);
+ GotoIf(IsUndefined(iterator), &exit);
+
+ Node* const fast_iterator_result_map =
+ LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
+
+ VARIABLE(var_exception, MachineRepresentation::kTagged, TheHoleConstant());
+
+ Label loop(this), if_notobject(this), if_exception(this);
+ Goto(&loop);
+
+ BIND(&loop);
+ {
+ Node* const next = iterator_assembler.IteratorStep(
+ context, iterator, &exit, fast_iterator_result_map);
+
+ Node* const next_value = iterator_assembler.IteratorValue(
+ context, next, fast_iterator_result_map);
+
+ GotoIf(TaggedIsSmi(next_value), &if_notobject);
+ GotoIfNot(IsJSReceiver(next_value), &if_notobject);
+
+ Node* const k =
+ GetProperty(context, next_value, isolate()->factory()->zero_string());
+ GotoIfException(k, &if_exception, &var_exception);
+
+ Node* const v =
+ GetProperty(context, next_value, isolate()->factory()->one_string());
+ GotoIfException(v, &if_exception, &var_exception);
+
+ Node* add_call = CallJS(CodeFactory::Call(isolate()), context, adder,
+ var_result.value(), k, v);
+ GotoIfException(add_call, &if_exception, &var_exception);
+ Goto(&loop);
+
+ BIND(&if_notobject);
+ {
+ Node* const exception = MakeTypeError(
+ MessageTemplate::kIteratorValueNotAnObject, context, next_value);
+ var_exception.Bind(exception);
+ Goto(&if_exception);
+ }
+ }
+
+ BIND(&if_exception);
+ {
+ iterator_assembler.IteratorCloseOnException(context, iterator,
+ &var_exception);
+ }
+
+ BIND(&if_notcallable);
+ {
+ Node* const receiver_str = HeapConstant(isolate()->factory()->add_string());
+ ThrowTypeError(context, MessageTemplate::kPropertyNotFunction, adder,
+ receiver_str, var_result.value());
+ }
+
+ BIND(&if_target_is_undefined);
+ ThrowTypeError(context, MessageTemplate::kConstructorNotFunction,
+ HeapConstant(isolate()->factory()->Map_string()));
+
+ BIND(&exit);
+ args.PopAndReturn(var_result.value());
+}
+
+TF_BUILTIN(SetConstructor, CollectionsBuiltinsAssembler) {
+ const int kIterableArg = 0;
+
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+
+ Node* const iterable = args.GetOptionalArgumentValue(kIterableArg);
+ Node* const new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Node* const context = Parameter(BuiltinDescriptor::kContext);
+
+ Label if_target_is_undefined(this, Label::kDeferred);
+ GotoIf(IsUndefined(new_target), &if_target_is_undefined);
+
+ Node* const native_context = LoadNativeContext(context);
+ Node* const js_set_fun =
+ LoadContextElement(native_context, Context::JS_SET_FUN_INDEX);
+
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+
+ Label init(this), exit(this), if_targetisnotmodified(this),
+ if_targetismodified(this);
+ Branch(WordEqual(js_set_fun, new_target), &if_targetisnotmodified,
+ &if_targetismodified);
+
+ BIND(&if_targetisnotmodified);
+ {
+ Node* const instance = AllocateJSCollection(js_set_fun);
+ var_result.Bind(instance);
+ Goto(&init);
+ }
+
+ BIND(&if_targetismodified);
+ {
+ ConstructorBuiltinsAssembler constructor_assembler(this->state());
+ Node* const instance = constructor_assembler.EmitFastNewObject(
+ context, js_set_fun, new_target);
+ var_result.Bind(instance);
+ Goto(&init);
+ }
+
+ BIND(&init);
+ Node* table = AllocateOrderedHashTable<OrderedHashSet>();
+ StoreObjectField(var_result.value(), JSSet::kTableOffset, table);
+
+ GotoIf(Word32Or(IsUndefined(iterable), IsNull(iterable)), &exit);
+
+ Label if_notcallable(this);
+ // TODO(gsathya): Add fast path for unmodified maps.
+ Node* const adder = GetProperty(context, var_result.value(),
+ isolate()->factory()->add_string());
+ GotoIf(TaggedIsSmi(adder), &if_notcallable);
+ GotoIfNot(IsCallable(adder), &if_notcallable);
+
+ IteratorBuiltinsAssembler iterator_assembler(this->state());
+ Node* const iterator = iterator_assembler.GetIterator(context, iterable);
+ GotoIf(IsUndefined(iterator), &exit);
+
+ Node* const fast_iterator_result_map =
+ LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
+
+ VARIABLE(var_exception, MachineRepresentation::kTagged, TheHoleConstant());
+
+ Label loop(this), if_notobject(this), if_exception(this);
+ Goto(&loop);
+
+ BIND(&loop);
+ {
+ Node* const next = iterator_assembler.IteratorStep(
+ context, iterator, &exit, fast_iterator_result_map);
+
+ Node* const next_value = iterator_assembler.IteratorValue(
+ context, next, fast_iterator_result_map);
+
+ Node* add_call = CallJS(CodeFactory::Call(isolate()), context, adder,
+ var_result.value(), next_value);
+
+ GotoIfException(add_call, &if_exception, &var_exception);
+ Goto(&loop);
+ }
+
+ BIND(&if_exception);
+ {
+ iterator_assembler.IteratorCloseOnException(context, iterator,
+ &var_exception);
+ }
+
+ BIND(&if_notcallable);
+ ThrowTypeError(context, MessageTemplate::kPropertyNotFunction, adder,
+ HeapConstant(isolate()->factory()->add_string()),
+ var_result.value());
+
+ BIND(&if_target_is_undefined);
+ ThrowTypeError(context, MessageTemplate::kConstructorNotFunction,
+ HeapConstant(isolate()->factory()->Set_string()));
+
+ BIND(&exit);
+ args.PopAndReturn(var_result.value());
+}
+
+Node* CollectionsBuiltinsAssembler::CallGetHashRaw(Node* const key) {
+ Node* const function_addr = ExternalConstant(
+ ExternalReference::orderedhashmap_gethash_raw(isolate()));
+ Node* const isolate_ptr =
+ ExternalConstant(ExternalReference::isolate_address(isolate()));
+
+ MachineType type_ptr = MachineType::Pointer();
+ MachineType type_tagged = MachineType::AnyTagged();
+
+ Node* const result = CallCFunction2(type_tagged, type_ptr, type_tagged,
+ function_addr, isolate_ptr, key);
+
+ return result;
+}
+
+void CollectionsBuiltinsAssembler::SameValueZeroSmi(Node* key_smi,
+ Node* candidate_key,
+ Label* if_same,
+ Label* if_not_same) {
+ // If the key is the same, we are done.
+ GotoIf(WordEqual(candidate_key, key_smi), if_same);
+
+ // If the candidate key is smi, then it must be different (because
+ // we already checked for equality above).
+ GotoIf(TaggedIsSmi(candidate_key), if_not_same);
+
+ // If the candidate key is not smi, we still have to check if it is a
+ // heap number with the same value.
+ GotoIfNot(IsHeapNumber(candidate_key), if_not_same);
+
+ Node* const candidate_key_number = LoadHeapNumberValue(candidate_key);
+ Node* const key_number = SmiToFloat64(key_smi);
+
+ GotoIf(Float64Equal(candidate_key_number, key_number), if_same);
+
+ Goto(if_not_same);
+}
+
+template <typename CollectionType>
+void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForSmiKey(
+ Node* table, Node* smi_key, Variable* entry_start_position,
+ Label* entry_found, Label* not_found) {
+ Node* const key_untagged = SmiUntag(smi_key);
+ Node* const hash =
+ ChangeInt32ToIntPtr(ComputeIntegerHash(key_untagged, Int32Constant(0)));
+ FindOrderedHashTableEntry<CollectionType>(
+ table, hash,
+ [&](Node* other_key, Label* if_same, Label* if_not_same) {
+ SameValueZeroSmi(smi_key, other_key, if_same, if_not_same);
+ },
+ entry_start_position, entry_found, not_found);
+}
+
+template <typename CollectionType>
+void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForStringKey(
+ Node* context, Node* table, Node* key_tagged,
+ Variable* entry_start_position, Label* entry_found, Label* not_found) {
+ Node* const hash = ComputeIntegerHashForString(context, key_tagged);
+ FindOrderedHashTableEntry<CollectionType>(
+ table, hash,
+ [&](Node* other_key, Label* if_same, Label* if_not_same) {
+ SameValueZeroString(context, key_tagged, other_key, if_same,
+ if_not_same);
+ },
+ entry_start_position, entry_found, not_found);
+}
+
+template <typename CollectionType>
+void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForHeapNumberKey(
+ Node* context, Node* table, Node* key_heap_number,
+ Variable* entry_start_position, Label* entry_found, Label* not_found) {
+ Node* tagged_hash = CallGetHashRaw(key_heap_number);
+ CSA_ASSERT(this, TaggedIsSmi(tagged_hash));
+ Node* const key_float = LoadHeapNumberValue(key_heap_number);
+ FindOrderedHashTableEntry<CollectionType>(
+ table, SmiUntag(tagged_hash),
+ [&](Node* other_key, Label* if_same, Label* if_not_same) {
+ SameValueZeroHeapNumber(key_float, other_key, if_same, if_not_same);
+ },
+ entry_start_position, entry_found, not_found);
+}
+
+template <typename CollectionType>
+void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForOtherKey(
+ Node* context, Node* table, Node* key, Variable* entry_start_position,
+ Label* entry_found, Label* not_found) {
+ Node* tagged_hash = CallGetHashRaw(key);
+ CSA_ASSERT(this, TaggedIsSmi(tagged_hash));
+ FindOrderedHashTableEntry<CollectionType>(
+ table, SmiUntag(tagged_hash),
+ [&](Node* other_key, Label* if_same, Label* if_not_same) {
+ Branch(WordEqual(key, other_key), if_same, if_not_same);
+ },
+ entry_start_position, entry_found, not_found);
+}
+
+Node* CollectionsBuiltinsAssembler::ComputeIntegerHashForString(
+ Node* context, Node* string_key) {
+ VARIABLE(var_result, MachineType::PointerRepresentation());
+
+ Label hash_not_computed(this), done(this, &var_result);
+ Node* hash =
+ ChangeInt32ToIntPtr(LoadNameHash(string_key, &hash_not_computed));
+ var_result.Bind(hash);
+ Goto(&done);
+
+ BIND(&hash_not_computed);
+ Node* tagged_hash = CallGetHashRaw(string_key);
+ CSA_ASSERT(this, TaggedIsSmi(tagged_hash));
+ var_result.Bind(SmiUntag(tagged_hash));
+ Goto(&done);
+
+ BIND(&done);
+ return var_result.value();
+}
+
+void CollectionsBuiltinsAssembler::SameValueZeroString(Node* context,
+ Node* key_string,
+ Node* candidate_key,
+ Label* if_same,
+ Label* if_not_same) {
+ // If the candidate is not a string, the keys are not equal.
+ GotoIf(TaggedIsSmi(candidate_key), if_not_same);
+ GotoIfNot(IsString(candidate_key), if_not_same);
+
+ Branch(WordEqual(CallBuiltin(Builtins::kStringEqual, context, key_string,
+ candidate_key),
+ TrueConstant()),
+ if_same, if_not_same);
+}
+
+void CollectionsBuiltinsAssembler::SameValueZeroHeapNumber(Node* key_float,
+ Node* candidate_key,
+ Label* if_same,
+ Label* if_not_same) {
+ Label if_smi(this), if_keyisnan(this);
+
+ // If the candidate is not a string, the keys are not equal.
+ GotoIf(TaggedIsSmi(candidate_key), &if_smi);
+ GotoIfNot(IsHeapNumber(candidate_key), if_not_same);
+
+ {
+ // {candidate_key} is a heap number.
+ Node* const candidate_float = LoadHeapNumberValue(candidate_key);
+ GotoIf(Float64Equal(key_float, candidate_float), if_same);
+
+ // SameValueZero needs to treat NaNs as equal. First check if {key_float}
+ // is NaN.
+ BranchIfFloat64IsNaN(key_float, &if_keyisnan, if_not_same);
+
+ BIND(&if_keyisnan);
+ {
+ // Return true iff {candidate_key} is NaN.
+ Branch(Float64Equal(candidate_float, candidate_float), if_not_same,
+ if_same);
+ }
+ }
+
+ BIND(&if_smi);
+ {
+ Node* const candidate_float = SmiToFloat64(candidate_key);
+ Branch(Float64Equal(key_float, candidate_float), if_same, if_not_same);
+ }
+}
+
+template <typename CollectionType>
+void CollectionsBuiltinsAssembler::FindOrderedHashTableEntry(
+ Node* table, Node* hash,
+ std::function<void(Node*, Label*, Label*)> key_compare,
+ Variable* entry_start_position, Label* entry_found, Label* not_found) {
+ // Get the index of the bucket.
+ Node* const number_of_buckets = SmiUntag(
+ LoadFixedArrayElement(table, CollectionType::kNumberOfBucketsIndex));
+ Node* const bucket =
+ WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1)));
+ Node* const first_entry = SmiUntag(LoadFixedArrayElement(
+ table, bucket, CollectionType::kHashTableStartIndex * kPointerSize));
+
+ // Walk the bucket chain.
+ {
+ VARIABLE(var_entry, MachineType::PointerRepresentation(), first_entry);
+ Label loop(this, {&var_entry, entry_start_position}),
+ continue_next_entry(this);
+ Goto(&loop);
+ BIND(&loop);
+
+ // If the entry index is the not-found sentinel, we are done.
+ GotoIf(
+ WordEqual(var_entry.value(), IntPtrConstant(CollectionType::kNotFound)),
+ not_found);
+
+ // Make sure the entry index is within range.
+ CSA_ASSERT(
+ this,
+ UintPtrLessThan(
+ var_entry.value(),
+ SmiUntag(SmiAdd(
+ LoadFixedArrayElement(table,
+ CollectionType::kNumberOfElementsIndex),
+ LoadFixedArrayElement(
+ table, CollectionType::kNumberOfDeletedElementsIndex)))));
+
+ // Compute the index of the entry relative to kHashTableStartIndex.
+ Node* entry_start =
+ IntPtrAdd(IntPtrMul(var_entry.value(),
+ IntPtrConstant(CollectionType::kEntrySize)),
+ number_of_buckets);
+ entry_start_position->Bind(entry_start);
+
+ // Load the key from the entry.
+ Node* const candidate_key = LoadFixedArrayElement(
+ table, entry_start,
+ CollectionType::kHashTableStartIndex * kPointerSize);
+
+ key_compare(candidate_key, entry_found, &continue_next_entry);
+
+ BIND(&continue_next_entry);
+ // Load the index of the next entry in the bucket chain.
+ var_entry.Bind(SmiUntag(LoadFixedArrayElement(
+ table, entry_start,
+ (CollectionType::kHashTableStartIndex + CollectionType::kChainOffset) *
+ kPointerSize)));
+
+ Goto(&loop);
+ }
+}
+
+TF_BUILTIN(OrderedHashTableHealIndex, CollectionsBuiltinsAssembler) {
+ Node* table = Parameter(Descriptor::kTable);
+ Node* index = Parameter(Descriptor::kIndex);
+ CSA_ASSERT(this, TaggedIsNotSmi(table));
+ CSA_ASSERT(this, TaggedIsSmi(index));
+ Label return_index(this), return_zero(this);
+
+ // Check if we need to update the {index}.
+ GotoIfNot(SmiLessThan(SmiConstant(Smi::kZero), index), &return_zero);
+
+ // Check if the {table} was cleared.
+ Node* number_of_deleted_elements = LoadAndUntagObjectField(
+ table, OrderedHashTableBase::kNumberOfDeletedElementsOffset);
+ GotoIf(WordEqual(number_of_deleted_elements,
+ IntPtrConstant(OrderedHashTableBase::kClearedTableSentinel)),
+ &return_zero);
+
+ VARIABLE(var_i, MachineType::PointerRepresentation(), IntPtrConstant(0));
+ VARIABLE(var_index, MachineRepresentation::kTagged, index);
+ Label loop(this, {&var_i, &var_index});
+ Goto(&loop);
+ BIND(&loop);
+ {
+ Node* i = var_i.value();
+ GotoIfNot(IntPtrLessThan(i, number_of_deleted_elements), &return_index);
+ Node* removed_index = LoadFixedArrayElement(
+ table, i, OrderedHashTableBase::kRemovedHolesIndex * kPointerSize);
+ GotoIf(SmiGreaterThanOrEqual(removed_index, index), &return_index);
+ Decrement(var_index, 1, SMI_PARAMETERS);
+ Increment(var_i);
+ Goto(&loop);
+ }
+
+ BIND(&return_index);
+ Return(var_index.value());
+
+ BIND(&return_zero);
+ Return(SmiConstant(Smi::kZero));
+}
+
+template <typename TableType>
+std::tuple<Node*, Node*> CollectionsBuiltinsAssembler::Transition(
+ Node* const table, Node* const index,
+ UpdateInTransition const& update_in_transition) {
+ VARIABLE(var_index, MachineType::PointerRepresentation(), index);
+ VARIABLE(var_table, MachineRepresentation::kTagged, table);
+ Label if_done(this), if_transition(this, Label::kDeferred);
+ Branch(TaggedIsSmi(
+ LoadObjectField(var_table.value(), TableType::kNextTableOffset)),
+ &if_done, &if_transition);
+
+ BIND(&if_transition);
+ {
+ Label loop(this, {&var_table, &var_index}), done_loop(this);
+ Goto(&loop);
+ BIND(&loop);
+ {
+ Node* table = var_table.value();
+ Node* index = var_index.value();
+
+ Node* next_table = LoadObjectField(table, TableType::kNextTableOffset);
+ GotoIf(TaggedIsSmi(next_table), &done_loop);
+
+ var_table.Bind(next_table);
+ var_index.Bind(
+ SmiUntag(CallBuiltin(Builtins::kOrderedHashTableHealIndex,
+ NoContextConstant(), table, SmiTag(index))));
+ Goto(&loop);
+ }
+ BIND(&done_loop);
+
+ // Update with the new {table} and {index}.
+ update_in_transition(var_table.value(), var_index.value());
+ Goto(&if_done);
+ }
+
+ BIND(&if_done);
+ return std::tuple<Node*, Node*>(var_table.value(), var_index.value());
+}
+
+template <typename IteratorType, typename TableType>
+std::tuple<Node*, Node*> CollectionsBuiltinsAssembler::TransitionAndUpdate(
+ Node* const iterator) {
+ return Transition<TableType>(
+ LoadObjectField(iterator, IteratorType::kTableOffset),
+ LoadAndUntagObjectField(iterator, IteratorType::kIndexOffset),
+ [this, iterator](Node* const table, Node* const index) {
+ // Update the {iterator} with the new state.
+ StoreObjectField(iterator, IteratorType::kTableOffset, table);
+ StoreObjectFieldNoWriteBarrier(iterator, IteratorType::kIndexOffset,
+ SmiTag(index));
+ });
+}
+
+template <typename TableType>
+std::tuple<Node*, Node*, Node*> CollectionsBuiltinsAssembler::NextSkipHoles(
+ Node* table, Node* index, Label* if_end) {
+ // Compute the used capacity for the {table}.
+ Node* number_of_buckets =
+ LoadAndUntagObjectField(table, TableType::kNumberOfBucketsOffset);
+ Node* number_of_elements =
+ LoadAndUntagObjectField(table, TableType::kNumberOfElementsOffset);
+ Node* number_of_deleted_elements =
+ LoadAndUntagObjectField(table, TableType::kNumberOfDeletedElementsOffset);
+ Node* used_capacity =
+ IntPtrAdd(number_of_elements, number_of_deleted_elements);
+
+ Node* entry_key;
+ Node* entry_start_position;
+ VARIABLE(var_index, MachineType::PointerRepresentation(), index);
+ Label loop(this, &var_index), done_loop(this);
+ Goto(&loop);
+ BIND(&loop);
+ {
+ GotoIfNot(IntPtrLessThan(var_index.value(), used_capacity), if_end);
+ entry_start_position = IntPtrAdd(
+ IntPtrMul(var_index.value(), IntPtrConstant(TableType::kEntrySize)),
+ number_of_buckets);
+ entry_key =
+ LoadFixedArrayElement(table, entry_start_position,
+ TableType::kHashTableStartIndex * kPointerSize);
+ Increment(var_index);
+ Branch(IsTheHole(entry_key), &loop, &done_loop);
+ }
+
+ BIND(&done_loop);
+ return std::tuple<Node*, Node*, Node*>(entry_key, entry_start_position,
+ var_index.value());
+}
+
+TF_BUILTIN(MapGet, CollectionsBuiltinsAssembler) {
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const key = Parameter(Descriptor::kKey);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.get");
+
+ Node* const table = LoadObjectField(receiver, JSMap::kTableOffset);
+ Node* index = CallBuiltin(Builtins::kMapLookupHashIndex, context, table, key);
+
+ Label if_found(this), if_not_found(this);
+ Branch(SmiGreaterThanOrEqual(index, SmiConstant(0)), &if_found,
+ &if_not_found);
+
+ BIND(&if_found);
+ Return(LoadFixedArrayElement(table, SmiUntag(index)));
+
+ BIND(&if_not_found);
+ Return(UndefinedConstant());
+}
+
+TF_BUILTIN(MapHas, CollectionsBuiltinsAssembler) {
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const key = Parameter(Descriptor::kKey);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.has");
+
+ Node* const table = LoadObjectField(receiver, JSMap::kTableOffset);
+ Node* index = CallBuiltin(Builtins::kMapLookupHashIndex, context, table, key);
+
+ Label if_found(this), if_not_found(this);
+ Branch(SmiGreaterThanOrEqual(index, SmiConstant(0)), &if_found,
+ &if_not_found);
+
+ BIND(&if_found);
+ Return(TrueConstant());
+
+ BIND(&if_not_found);
+ Return(FalseConstant());
+}
+
+TF_BUILTIN(MapPrototypeEntries, CollectionsBuiltinsAssembler) {
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const context = Parameter(Descriptor::kContext);
+ ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE,
+ "Map.prototype.entries");
+ Return(AllocateJSCollectionIterator<JSMapIterator>(
+ context, Context::MAP_KEY_VALUE_ITERATOR_MAP_INDEX, receiver));
+}
+
+TF_BUILTIN(MapPrototypeGetSize, CollectionsBuiltinsAssembler) {
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const context = Parameter(Descriptor::kContext);
+ ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE,
+ "get Map.prototype.size");
+ Node* const table = LoadObjectField(receiver, JSMap::kTableOffset);
+ Return(LoadObjectField(table, OrderedHashMap::kNumberOfElementsOffset));
+}
+
+TF_BUILTIN(MapPrototypeForEach, CollectionsBuiltinsAssembler) {
+ const char* const kMethodName = "Map.prototype.forEach";
+ Node* const argc = Parameter(BuiltinDescriptor::kArgumentsCount);
+ Node* const context = Parameter(BuiltinDescriptor::kContext);
+ CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
+ Node* const receiver = args.GetReceiver();
+ Node* const callback = args.GetOptionalArgumentValue(0);
+ Node* const this_arg = args.GetOptionalArgumentValue(1);
+
+ ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, kMethodName);
+
+ // Ensure that {callback} is actually callable.
+ Label callback_not_callable(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(callback), &callback_not_callable);
+ GotoIfNot(IsCallable(callback), &callback_not_callable);
+
+ VARIABLE(var_index, MachineType::PointerRepresentation(), IntPtrConstant(0));
+ VARIABLE(var_table, MachineRepresentation::kTagged,
+ LoadObjectField(receiver, JSMap::kTableOffset));
+ Label loop(this, {&var_index, &var_table}), done_loop(this);
+ Goto(&loop);
+ BIND(&loop);
+ {
+ // Transition {table} and {index} if there was any modification to
+ // the {receiver} while we're iterating.
+ Node* index = var_index.value();
+ Node* table = var_table.value();
+ std::tie(table, index) =
+ Transition<OrderedHashMap>(table, index, [](Node*, Node*) {});
+
+ // Read the next entry from the {table}, skipping holes.
+ Node* entry_key;
+ Node* entry_start_position;
+ std::tie(entry_key, entry_start_position, index) =
+ NextSkipHoles<OrderedHashMap>(table, index, &done_loop);
+
+ // Load the entry value as well.
+ Node* entry_value = LoadFixedArrayElement(
+ table, entry_start_position,
+ (OrderedHashMap::kHashTableStartIndex + OrderedHashMap::kValueOffset) *
+ kPointerSize);
+
+ // Invoke the {callback} passing the {entry_key}, {entry_value} and the
+ // {receiver}.
+ CallJS(CodeFactory::Call(isolate()), context, callback, this_arg,
+ entry_value, entry_key, receiver);
+
+ // Continue with the next entry.
+ var_index.Bind(index);
+ var_table.Bind(table);
+ Goto(&loop);
+ }
+
+ BIND(&done_loop);
+ args.PopAndReturn(UndefinedConstant());
+
+ BIND(&callback_not_callable);
+ {
+ CallRuntime(Runtime::kThrowCalledNonCallable, context, callback);
+ Unreachable();
+ }
+}
+
+TF_BUILTIN(MapPrototypeKeys, CollectionsBuiltinsAssembler) {
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const context = Parameter(Descriptor::kContext);
+ ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.keys");
+ Return(AllocateJSCollectionIterator<JSMapIterator>(
+ context, Context::MAP_KEY_ITERATOR_MAP_INDEX, receiver));
+}
+
+TF_BUILTIN(MapPrototypeValues, CollectionsBuiltinsAssembler) {
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const context = Parameter(Descriptor::kContext);
+ ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE,
+ "Map.prototype.values");
+ Return(AllocateJSCollectionIterator<JSMapIterator>(
+ context, Context::MAP_VALUE_ITERATOR_MAP_INDEX, receiver));
+}
+
+TF_BUILTIN(MapIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
+ const char* const kMethodName = "Map Iterator.prototype.next";
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ // Ensure that the {receiver} is actually a JSMapIterator.
+ Label if_receiver_valid(this), if_receiver_invalid(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(receiver), &if_receiver_invalid);
+ Node* const receiver_instance_type = LoadInstanceType(receiver);
+ GotoIf(
+ InstanceTypeEqual(receiver_instance_type, JS_MAP_KEY_VALUE_ITERATOR_TYPE),
+ &if_receiver_valid);
+ GotoIf(InstanceTypeEqual(receiver_instance_type, JS_MAP_KEY_ITERATOR_TYPE),
+ &if_receiver_valid);
+ Branch(InstanceTypeEqual(receiver_instance_type, JS_MAP_VALUE_ITERATOR_TYPE),
+ &if_receiver_valid, &if_receiver_invalid);
+ BIND(&if_receiver_invalid);
+ ThrowIncompatibleMethodReceiver(context, kMethodName, receiver);
+ BIND(&if_receiver_valid);
+
+ // Check if the {receiver} is exhausted.
+ VARIABLE(var_done, MachineRepresentation::kTagged, TrueConstant());
+ VARIABLE(var_value, MachineRepresentation::kTagged, UndefinedConstant());
+ Label return_value(this, {&var_done, &var_value}), return_entry(this),
+ return_end(this, Label::kDeferred);
+
+ // Transition the {receiver} table if necessary.
+ Node* table;
+ Node* index;
+ std::tie(table, index) =
+ TransitionAndUpdate<JSMapIterator, OrderedHashMap>(receiver);
+
+ // Read the next entry from the {table}, skipping holes.
+ Node* entry_key;
+ Node* entry_start_position;
+ std::tie(entry_key, entry_start_position, index) =
+ NextSkipHoles<OrderedHashMap>(table, index, &return_end);
+ StoreObjectFieldNoWriteBarrier(receiver, JSMapIterator::kIndexOffset,
+ SmiTag(index));
+ var_value.Bind(entry_key);
+ var_done.Bind(FalseConstant());
+
+ // Check how to return the {key} (depending on {receiver} type).
+ GotoIf(InstanceTypeEqual(receiver_instance_type, JS_MAP_KEY_ITERATOR_TYPE),
+ &return_value);
+ var_value.Bind(LoadFixedArrayElement(
+ table, entry_start_position,
+ (OrderedHashMap::kHashTableStartIndex + OrderedHashMap::kValueOffset) *
+ kPointerSize));
+ Branch(InstanceTypeEqual(receiver_instance_type, JS_MAP_VALUE_ITERATOR_TYPE),
+ &return_value, &return_entry);
+
+ BIND(&return_entry);
+ {
+ Node* result =
+ AllocateJSIteratorResultForEntry(context, entry_key, var_value.value());
+ Return(result);
+ }
+
+ BIND(&return_value);
+ {
+ Node* result =
+ AllocateJSIteratorResult(context, var_value.value(), var_done.value());
+ Return(result);
+ }
+
+ BIND(&return_end);
+ {
+ StoreObjectFieldRoot(receiver, JSMapIterator::kTableOffset,
+ Heap::kEmptyOrderedHashTableRootIndex);
+ Goto(&return_value);
+ }
+}
+
+TF_BUILTIN(SetHas, CollectionsBuiltinsAssembler) {
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const key = Parameter(Descriptor::kKey);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE, "Set.prototype.has");
+
+ Node* const table = LoadObjectField(receiver, JSMap::kTableOffset);
+
+ VARIABLE(entry_start_position, MachineType::PointerRepresentation(),
+ IntPtrConstant(0));
+ VARIABLE(result, MachineRepresentation::kTaggedSigned, IntPtrConstant(0));
+ Label if_key_smi(this), if_key_string(this), if_key_heap_number(this),
+ entry_found(this), not_found(this), done(this);
+
+ GotoIf(TaggedIsSmi(key), &if_key_smi);
+ GotoIf(IsString(key), &if_key_string);
+ GotoIf(IsHeapNumber(key), &if_key_heap_number);
+
+ FindOrderedHashTableEntryForOtherKey<OrderedHashSet>(
+ context, table, key, &entry_start_position, &entry_found, &not_found);
+
+ BIND(&if_key_smi);
+ {
+ FindOrderedHashTableEntryForSmiKey<OrderedHashSet>(
+ table, key, &entry_start_position, &entry_found, &not_found);
+ }
+
+ BIND(&if_key_string);
+ {
+ FindOrderedHashTableEntryForStringKey<OrderedHashSet>(
+ context, table, key, &entry_start_position, &entry_found, &not_found);
+ }
+
+ BIND(&if_key_heap_number);
+ {
+ FindOrderedHashTableEntryForHeapNumberKey<OrderedHashSet>(
+ context, table, key, &entry_start_position, &entry_found, &not_found);
+ }
+
+ BIND(&entry_found);
+ Return(TrueConstant());
+
+ BIND(&not_found);
+ Return(FalseConstant());
+}
+
+TF_BUILTIN(SetPrototypeEntries, CollectionsBuiltinsAssembler) {
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const context = Parameter(Descriptor::kContext);
+ ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE,
+ "Set.prototype.entries");
+ Return(AllocateJSCollectionIterator<JSSetIterator>(
+ context, Context::SET_KEY_VALUE_ITERATOR_MAP_INDEX, receiver));
+}
+
+TF_BUILTIN(SetPrototypeGetSize, CollectionsBuiltinsAssembler) {
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const context = Parameter(Descriptor::kContext);
+ ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE,
+ "get Set.prototype.size");
+ Node* const table = LoadObjectField(receiver, JSSet::kTableOffset);
+ Return(LoadObjectField(table, OrderedHashSet::kNumberOfElementsOffset));
+}
+
+TF_BUILTIN(SetPrototypeForEach, CollectionsBuiltinsAssembler) {
+ const char* const kMethodName = "Set.prototype.forEach";
+ Node* const argc = Parameter(BuiltinDescriptor::kArgumentsCount);
+ Node* const context = Parameter(BuiltinDescriptor::kContext);
+ CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
+ Node* const receiver = args.GetReceiver();
+ Node* const callback = args.GetOptionalArgumentValue(0);
+ Node* const this_arg = args.GetOptionalArgumentValue(1);
+
+ ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE, kMethodName);
+
+ // Ensure that {callback} is actually callable.
+ Label callback_not_callable(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(callback), &callback_not_callable);
+ GotoIfNot(IsCallable(callback), &callback_not_callable);
+
+ VARIABLE(var_index, MachineType::PointerRepresentation(), IntPtrConstant(0));
+ VARIABLE(var_table, MachineRepresentation::kTagged,
+ LoadObjectField(receiver, JSSet::kTableOffset));
+ Label loop(this, {&var_index, &var_table}), done_loop(this);
+ Goto(&loop);
+ BIND(&loop);
+ {
+ // Transition {table} and {index} if there was any modification to
+ // the {receiver} while we're iterating.
+ Node* index = var_index.value();
+ Node* table = var_table.value();
+ std::tie(table, index) =
+ Transition<OrderedHashSet>(table, index, [](Node*, Node*) {});
+
+ // Read the next entry from the {table}, skipping holes.
+ Node* entry_key;
+ Node* entry_start_position;
+ std::tie(entry_key, entry_start_position, index) =
+ NextSkipHoles<OrderedHashSet>(table, index, &done_loop);
+
+ // Invoke the {callback} passing the {entry_key} (twice) and the {receiver}.
+ CallJS(CodeFactory::Call(isolate()), context, callback, this_arg, entry_key,
+ entry_key, receiver);
+
+ // Continue with the next entry.
+ var_index.Bind(index);
+ var_table.Bind(table);
+ Goto(&loop);
+ }
+
+ BIND(&done_loop);
+ args.PopAndReturn(UndefinedConstant());
+
+ BIND(&callback_not_callable);
+ {
+ CallRuntime(Runtime::kThrowCalledNonCallable, context, callback);
+ Unreachable();
+ }
+}
+
+TF_BUILTIN(SetPrototypeValues, CollectionsBuiltinsAssembler) {
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const context = Parameter(Descriptor::kContext);
+ ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE,
+ "Set.prototype.values");
+ Return(AllocateJSCollectionIterator<JSSetIterator>(
+ context, Context::SET_VALUE_ITERATOR_MAP_INDEX, receiver));
+}
+
+TF_BUILTIN(SetIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
+ const char* const kMethodName = "Set Iterator.prototype.next";
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ // Ensure that the {receiver} is actually a JSSetIterator.
+ Label if_receiver_valid(this), if_receiver_invalid(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(receiver), &if_receiver_invalid);
+ Node* const receiver_instance_type = LoadInstanceType(receiver);
+ GotoIf(InstanceTypeEqual(receiver_instance_type, JS_SET_VALUE_ITERATOR_TYPE),
+ &if_receiver_valid);
+ Branch(
+ InstanceTypeEqual(receiver_instance_type, JS_SET_KEY_VALUE_ITERATOR_TYPE),
+ &if_receiver_valid, &if_receiver_invalid);
+ BIND(&if_receiver_invalid);
+ ThrowIncompatibleMethodReceiver(context, kMethodName, receiver);
+ BIND(&if_receiver_valid);
+
+ // Check if the {receiver} is exhausted.
+ VARIABLE(var_done, MachineRepresentation::kTagged, TrueConstant());
+ VARIABLE(var_value, MachineRepresentation::kTagged, UndefinedConstant());
+ Label return_value(this, {&var_done, &var_value}), return_entry(this),
+ return_end(this, Label::kDeferred);
+
+ // Transition the {receiver} table if necessary.
+ Node* table;
+ Node* index;
+ std::tie(table, index) =
+ TransitionAndUpdate<JSSetIterator, OrderedHashSet>(receiver);
+
+ // Read the next entry from the {table}, skipping holes.
+ Node* entry_key;
+ Node* entry_start_position;
+ std::tie(entry_key, entry_start_position, index) =
+ NextSkipHoles<OrderedHashSet>(table, index, &return_end);
+ StoreObjectFieldNoWriteBarrier(receiver, JSSetIterator::kIndexOffset,
+ SmiTag(index));
+ var_value.Bind(entry_key);
+ var_done.Bind(FalseConstant());
+
+ // Check how to return the {key} (depending on {receiver} type).
+ Branch(InstanceTypeEqual(receiver_instance_type, JS_SET_VALUE_ITERATOR_TYPE),
+ &return_value, &return_entry);
+
+ BIND(&return_entry);
+ {
+ Node* result = AllocateJSIteratorResultForEntry(context, var_value.value(),
+ var_value.value());
+ Return(result);
+ }
+
+ BIND(&return_value);
+ {
+ Node* result =
+ AllocateJSIteratorResult(context, var_value.value(), var_done.value());
+ Return(result);
+ }
+
+ BIND(&return_end);
+ {
+ StoreObjectFieldRoot(receiver, JSSetIterator::kTableOffset,
+ Heap::kEmptyOrderedHashTableRootIndex);
+ Goto(&return_value);
+ }
+}
+
+TF_BUILTIN(MapLookupHashIndex, CollectionsBuiltinsAssembler) {
+ Node* const table = Parameter(Descriptor::kTable);
+ Node* const key = Parameter(Descriptor::kKey);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ VARIABLE(entry_start_position, MachineType::PointerRepresentation(),
+ IntPtrConstant(0));
+ VARIABLE(result, MachineRepresentation::kTaggedSigned, IntPtrConstant(0));
+ Label if_key_smi(this), if_key_string(this), if_key_heap_number(this),
+ entry_found(this), not_found(this), done(this);
+
+ GotoIf(TaggedIsSmi(key), &if_key_smi);
+ GotoIf(IsString(key), &if_key_string);
+ GotoIf(IsHeapNumber(key), &if_key_heap_number);
+
+ FindOrderedHashTableEntryForOtherKey<OrderedHashMap>(
+ context, table, key, &entry_start_position, &entry_found, &not_found);
+
+ BIND(&if_key_smi);
+ {
+ FindOrderedHashTableEntryForSmiKey<OrderedHashMap>(
+ table, key, &entry_start_position, &entry_found, &not_found);
+ }
+
+ BIND(&if_key_string);
+ {
+ FindOrderedHashTableEntryForStringKey<OrderedHashMap>(
+ context, table, key, &entry_start_position, &entry_found, &not_found);
+ }
+
+ BIND(&if_key_heap_number);
+ {
+ FindOrderedHashTableEntryForHeapNumberKey<OrderedHashMap>(
+ context, table, key, &entry_start_position, &entry_found, &not_found);
+ }
+
+ BIND(&entry_found);
+ Node* index = IntPtrAdd(entry_start_position.value(),
+ IntPtrConstant(OrderedHashMap::kHashTableStartIndex +
+ OrderedHashMap::kValueOffset));
+ result.Bind(SmiTag(index));
+ Goto(&done);
+
+ BIND(&not_found);
+ result.Bind(SmiConstant(-1));
+ Goto(&done);
+
+ BIND(&done);
+ Return(result.value());
+}
+
+TF_BUILTIN(WeakMapLookupHashIndex, CollectionsBuiltinsAssembler) {
+ Node* const table = Parameter(Descriptor::kTable);
+ Node* const key = Parameter(Descriptor::kKey);
+
+ Label if_found(this), if_not_found(this);
+
+ Node* const capacity =
+ SmiUntag(LoadFixedArrayElement(table, WeakHashTable::kCapacityIndex));
+ Node* const mask = IntPtrSub(capacity, IntPtrConstant(1));
+
+ Node* const hash = SmiUntag(CallGetHashRaw(key));
+
+ GotoIf(IntPtrLessThan(hash, IntPtrConstant(0)), &if_not_found);
+
+ // See HashTable::FirstProbe().
+ Node* entry = WordAnd(hash, mask);
+
+ VARIABLE(var_count, MachineType::PointerRepresentation(), IntPtrConstant(0));
+ VARIABLE(var_entry, MachineType::PointerRepresentation(), entry);
+ Variable* loop_vars[] = {&var_count, &var_entry};
+ Label loop(this, arraysize(loop_vars), loop_vars);
+ Goto(&loop);
+ BIND(&loop);
+ Node* index;
+ {
+ Node* entry = var_entry.value();
+
+ index = IntPtrMul(entry, IntPtrConstant(WeakHashTable::kEntrySize));
+ index =
+ IntPtrAdd(index, IntPtrConstant(WeakHashTable::kElementsStartIndex));
+
+ Node* current = LoadFixedArrayElement(table, index);
+ GotoIf(WordEqual(current, UndefinedConstant()), &if_not_found);
+ GotoIf(WordEqual(current, key), &if_found);
+
+ // See HashTable::NextProbe().
+ Increment(var_count);
+ entry = WordAnd(IntPtrAdd(entry, var_count.value()), mask);
+
+ var_entry.Bind(entry);
+ Goto(&loop);
+ }
+
+ BIND(&if_not_found);
+ Return(SmiConstant(-1));
+
+ BIND(&if_found);
+ Return(SmiTag(IntPtrAdd(index, IntPtrConstant(1))));
+}
+
+TF_BUILTIN(WeakMapGet, CollectionsBuiltinsAssembler) {
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const key = Parameter(Descriptor::kKey);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ Label return_undefined(this);
+
+ ThrowIfNotInstanceType(context, receiver, JS_WEAK_MAP_TYPE,
+ "WeakMap.prototype.get");
+
+ GotoIf(TaggedIsSmi(key), &return_undefined);
+ GotoIfNot(IsJSReceiver(key), &return_undefined);
+
+ Node* const table = LoadObjectField(receiver, JSWeakCollection::kTableOffset);
+
+ Node* const index =
+ CallBuiltin(Builtins::kWeakMapLookupHashIndex, context, table, key);
+
+ GotoIf(WordEqual(index, SmiConstant(-1)), &return_undefined);
+
+ Return(LoadFixedArrayElement(table, SmiUntag(index)));
+
+ BIND(&return_undefined);
+ Return(UndefinedConstant());
+}
+
+TF_BUILTIN(WeakMapHas, CollectionsBuiltinsAssembler) {
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const key = Parameter(Descriptor::kKey);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ Label return_false(this);
+
+ ThrowIfNotInstanceType(context, receiver, JS_WEAK_MAP_TYPE,
+ "WeakMap.prototype.get");
+
+ GotoIf(TaggedIsSmi(key), &return_false);
+ GotoIfNot(IsJSReceiver(key), &return_false);
+
+ Node* const table = LoadObjectField(receiver, JSWeakCollection::kTableOffset);
+
+ Node* const index =
+ CallBuiltin(Builtins::kWeakMapLookupHashIndex, context, table, key);
+
+ GotoIf(WordEqual(index, SmiConstant(-1)), &return_false);
+
+ Return(TrueConstant());
+
+ BIND(&return_false);
+ Return(FalseConstant());
+}
+
+TF_BUILTIN(WeakSetHas, CollectionsBuiltinsAssembler) {
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const key = Parameter(Descriptor::kKey);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ Label return_false(this);
+
+ ThrowIfNotInstanceType(context, receiver, JS_WEAK_SET_TYPE,
+ "WeakSet.prototype.get");
+
+ GotoIf(TaggedIsSmi(key), &return_false);
+ GotoIfNot(IsJSReceiver(key), &return_false);
+
+ Node* const table = LoadObjectField(receiver, JSWeakCollection::kTableOffset);
+
+ Node* const index =
+ CallBuiltin(Builtins::kWeakMapLookupHashIndex, context, table, key);
+
+ GotoIf(WordEqual(index, SmiConstant(-1)), &return_false);
+
+ Return(TrueConstant());
+
+ BIND(&return_false);
+ Return(FalseConstant());
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-collections.cc b/deps/v8/src/builtins/builtins-collections.cc
new file mode 100644
index 0000000000..0497eaaac1
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-collections.cc
@@ -0,0 +1,29 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+BUILTIN(MapClear) {
+ HandleScope scope(isolate);
+ const char* const kMethodName = "Map.prototype.clear";
+ CHECK_RECEIVER(JSMap, map, kMethodName);
+ JSMap::Clear(map);
+ return isolate->heap()->undefined_value();
+}
+
+BUILTIN(SetClear) {
+ HandleScope scope(isolate);
+ const char* const kMethodName = "Set.prototype.clear";
+ CHECK_RECEIVER(JSSet, set, kMethodName);
+ JSSet::Clear(set);
+ return isolate->heap()->undefined_value();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-console.cc b/deps/v8/src/builtins/builtins-console.cc
index a43fe136d0..c3a7bd6557 100644
--- a/deps/v8/src/builtins/builtins-console.cc
+++ b/deps/v8/src/builtins/builtins-console.cc
@@ -14,45 +14,114 @@ namespace internal {
// -----------------------------------------------------------------------------
// Console
-#define CONSOLE_METHOD_LIST(V) \
- V(Debug) \
- V(Error) \
- V(Info) \
- V(Log) \
- V(Warn) \
- V(Dir) \
- V(DirXml) \
- V(Table) \
- V(Trace) \
- V(Group) \
- V(GroupCollapsed) \
- V(GroupEnd) \
- V(Clear) \
- V(Count) \
- V(Assert) \
- V(MarkTimeline) \
- V(Profile) \
- V(ProfileEnd) \
- V(Timeline) \
- V(TimelineEnd) \
- V(Time) \
- V(TimeEnd) \
- V(TimeStamp)
-
-#define CONSOLE_BUILTIN_IMPLEMENTATION(name) \
- BUILTIN(Console##name) { \
- HandleScope scope(isolate); \
- if (isolate->console_delegate()) { \
- debug::ConsoleCallArguments wrapper(args); \
- isolate->console_delegate()->name(wrapper); \
- CHECK(!isolate->has_pending_exception()); \
- CHECK(!isolate->has_scheduled_exception()); \
- } \
- return isolate->heap()->undefined_value(); \
+#define CONSOLE_METHOD_LIST(V) \
+ V(Debug, debug) \
+ V(Error, error) \
+ V(Info, info) \
+ V(Log, log) \
+ V(Warn, warn) \
+ V(Dir, dir) \
+ V(DirXml, dirXml) \
+ V(Table, table) \
+ V(Trace, trace) \
+ V(Group, group) \
+ V(GroupCollapsed, groupCollapsed) \
+ V(GroupEnd, groupEnd) \
+ V(Clear, clear) \
+ V(Count, count) \
+ V(Assert, assert) \
+ V(MarkTimeline, markTimeline) \
+ V(Profile, profile) \
+ V(ProfileEnd, profileEnd) \
+ V(Timeline, timeline) \
+ V(TimelineEnd, timelineEnd) \
+ V(Time, time) \
+ V(TimeEnd, timeEnd) \
+ V(TimeStamp, timeStamp)
+
+namespace {
+void ConsoleCall(
+ Isolate* isolate, internal::BuiltinArguments& args,
+ void (debug::ConsoleDelegate::*func)(const v8::debug::ConsoleCallArguments&,
+ const v8::debug::ConsoleContext&)) {
+ HandleScope scope(isolate);
+ if (!isolate->console_delegate()) return;
+ debug::ConsoleCallArguments wrapper(args);
+ Handle<Object> context_id_obj = JSObject::GetDataProperty(
+ args.target(), isolate->factory()->console_context_id_symbol());
+ int context_id =
+ context_id_obj->IsSmi() ? Handle<Smi>::cast(context_id_obj)->value() : 0;
+ Handle<Object> context_name_obj = JSObject::GetDataProperty(
+ args.target(), isolate->factory()->console_context_name_symbol());
+ Handle<String> context_name = context_name_obj->IsString()
+ ? Handle<String>::cast(context_name_obj)
+ : isolate->factory()->anonymous_string();
+ (isolate->console_delegate()->*func)(
+ wrapper,
+ v8::debug::ConsoleContext(context_id, Utils::ToLocal(context_name)));
+ CHECK(!isolate->has_pending_exception());
+ CHECK(!isolate->has_scheduled_exception());
+}
+} // namespace
+
+#define CONSOLE_BUILTIN_IMPLEMENTATION(call, name) \
+ BUILTIN(Console##call) { \
+ ConsoleCall(isolate, args, &debug::ConsoleDelegate::call); \
+ return isolate->heap()->undefined_value(); \
}
CONSOLE_METHOD_LIST(CONSOLE_BUILTIN_IMPLEMENTATION)
#undef CONSOLE_BUILTIN_IMPLEMENTATION
+namespace {
+void InstallContextFunction(Handle<JSObject> target, const char* name,
+ Builtins::Name call, int context_id,
+ Handle<Object> context_name) {
+ Factory* const factory = target->GetIsolate()->factory();
+
+ Handle<Code> call_code(target->GetIsolate()->builtins()->builtin(call));
+
+ Handle<String> name_string =
+ Name::ToFunctionName(factory->InternalizeUtf8String(name))
+ .ToHandleChecked();
+ Handle<JSFunction> fun =
+ factory->NewFunctionWithoutPrototype(name_string, call_code, SLOPPY);
+ fun->shared()->set_native(true);
+ fun->shared()->DontAdaptArguments();
+ fun->shared()->set_length(1);
+
+ JSObject::AddProperty(fun, factory->console_context_id_symbol(),
+ handle(Smi::FromInt(context_id), target->GetIsolate()),
+ NONE);
+ if (context_name->IsString()) {
+ JSObject::AddProperty(fun, factory->console_context_name_symbol(),
+ context_name, NONE);
+ }
+ JSObject::AddProperty(target, name_string, fun, NONE);
+}
+} // namespace
+
+BUILTIN(ConsoleContext) {
+ HandleScope scope(isolate);
+
+ Factory* const factory = isolate->factory();
+ Handle<String> name = factory->InternalizeUtf8String("Context");
+ Handle<JSFunction> cons = factory->NewFunction(name);
+ Handle<JSObject> empty = factory->NewJSObject(isolate->object_function());
+ JSFunction::SetPrototype(cons, empty);
+ Handle<JSObject> context = factory->NewJSObject(cons, TENURED);
+ DCHECK(context->IsJSObject());
+ int id = isolate->last_console_context_id() + 1;
+ isolate->set_last_console_context_id(id);
+
+#define CONSOLE_BUILTIN_SETUP(call, name) \
+ InstallContextFunction(context, #name, Builtins::kConsole##call, id, \
+ args.at(1));
+ CONSOLE_METHOD_LIST(CONSOLE_BUILTIN_SETUP)
+#undef CONSOLE_BUILTIN_SETUP
+
+ return *context;
+}
+
#undef CONSOLE_METHOD_LIST
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc
index 1769e65e83..d7a2f8e34e 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.cc
+++ b/deps/v8/src/builtins/builtins-constructor-gen.cc
@@ -5,6 +5,7 @@
#include "src/builtins/builtins-constructor-gen.h"
#include "src/ast/ast.h"
+#include "src/builtins/builtins-call-gen.h"
#include "src/builtins/builtins-constructor.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
@@ -17,17 +18,73 @@
namespace v8 {
namespace internal {
+void Builtins::Generate_ConstructVarargs(MacroAssembler* masm) {
+ Generate_CallOrConstructVarargs(masm,
+ masm->isolate()->builtins()->Construct());
+}
+
void Builtins::Generate_ConstructForwardVarargs(MacroAssembler* masm) {
- Generate_ForwardVarargs(masm, masm->isolate()->builtins()->Construct());
+ Generate_CallOrConstructForwardVarargs(
+ masm, masm->isolate()->builtins()->Construct());
}
void Builtins::Generate_ConstructFunctionForwardVarargs(MacroAssembler* masm) {
- Generate_ForwardVarargs(masm,
- masm->isolate()->builtins()->ConstructFunction());
+ Generate_CallOrConstructForwardVarargs(
+ masm, masm->isolate()->builtins()->ConstructFunction());
+}
+
+TF_BUILTIN(ConstructWithArrayLike, CallOrConstructBuiltinsAssembler) {
+ Node* target = Parameter(ConstructWithArrayLikeDescriptor::kTarget);
+ Node* new_target = Parameter(ConstructWithArrayLikeDescriptor::kNewTarget);
+ Node* arguments_list =
+ Parameter(ConstructWithArrayLikeDescriptor::kArgumentsList);
+ Node* context = Parameter(ConstructWithArrayLikeDescriptor::kContext);
+ CallOrConstructWithArrayLike(target, new_target, arguments_list, context);
+}
+
+TF_BUILTIN(ConstructWithSpread, CallOrConstructBuiltinsAssembler) {
+ Node* target = Parameter(ConstructWithSpreadDescriptor::kTarget);
+ Node* new_target = Parameter(ConstructWithSpreadDescriptor::kNewTarget);
+ Node* spread = Parameter(ConstructWithSpreadDescriptor::kSpread);
+ Node* args_count = Parameter(ConstructWithSpreadDescriptor::kArgumentsCount);
+ Node* context = Parameter(ConstructWithSpreadDescriptor::kContext);
+ CallOrConstructWithSpread(target, new_target, spread, args_count, context);
}
typedef compiler::Node Node;
+Node* ConstructorBuiltinsAssembler::CopyFixedArrayBase(Node* fixed_array) {
+ Label if_fixed_array(this), if_fixed_double_array(this), done(this);
+ VARIABLE(result, MachineRepresentation::kTagged);
+ Node* capacity = LoadAndUntagFixedArrayBaseLength(fixed_array);
+ Branch(IsFixedDoubleArrayMap(LoadMap(fixed_array)), &if_fixed_double_array,
+ &if_fixed_array);
+ BIND(&if_fixed_double_array);
+ {
+ ElementsKind kind = PACKED_DOUBLE_ELEMENTS;
+ Node* copy = AllocateFixedArray(kind, capacity);
+ CopyFixedArrayElements(kind, fixed_array, kind, copy, capacity, capacity,
+ SKIP_WRITE_BARRIER);
+ result.Bind(copy);
+ Goto(&done);
+ }
+
+ BIND(&if_fixed_array);
+ {
+ ElementsKind kind = PACKED_ELEMENTS;
+ Node* copy = AllocateFixedArray(kind, capacity);
+ CopyFixedArrayElements(kind, fixed_array, kind, copy, capacity, capacity,
+ UPDATE_WRITE_BARRIER);
+ result.Bind(copy);
+ Goto(&done);
+ }
+ BIND(&done);
+ // Manually copy over the map of the incoming array to preserve the elements
+ // kind.
+ StoreMap(result.value(), LoadMap(fixed_array));
+ return result.value();
+}
+
Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
Node* feedback_vector,
Node* slot,
@@ -36,107 +93,36 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
Factory* factory = isolate->factory();
IncrementCounter(isolate->counters()->fast_new_closure_total(), 1);
- // Create a new closure from the given function info in new space
- Node* result = Allocate(JSFunction::kSize);
-
- // Calculate the index of the map we should install on the function based on
- // the FunctionKind and LanguageMode of the function.
- // Note: Must be kept in sync with Context::FunctionMapIndex
Node* compiler_hints =
LoadObjectField(shared_info, SharedFunctionInfo::kCompilerHintsOffset,
MachineType::Uint32());
- Node* is_strict = Word32And(
- compiler_hints, Int32Constant(1 << SharedFunctionInfo::kStrictModeBit));
-
- Label if_normal(this), if_generator(this), if_async(this),
- if_class_constructor(this), if_function_without_prototype(this),
- load_map(this);
- VARIABLE(map_index, MachineType::PointerRepresentation());
-
- STATIC_ASSERT(FunctionKind::kNormalFunction == 0);
- Node* is_not_normal =
- Word32And(compiler_hints,
- Int32Constant(SharedFunctionInfo::kAllFunctionKindBitsMask));
- GotoIfNot(is_not_normal, &if_normal);
-
- Node* is_generator = Word32And(
- compiler_hints, Int32Constant(FunctionKind::kGeneratorFunction
- << SharedFunctionInfo::kFunctionKindShift));
- GotoIf(is_generator, &if_generator);
-
- Node* is_async = Word32And(
- compiler_hints, Int32Constant(FunctionKind::kAsyncFunction
- << SharedFunctionInfo::kFunctionKindShift));
- GotoIf(is_async, &if_async);
-
- Node* is_class_constructor = Word32And(
- compiler_hints, Int32Constant(FunctionKind::kClassConstructor
- << SharedFunctionInfo::kFunctionKindShift));
- GotoIf(is_class_constructor, &if_class_constructor);
-
- if (FLAG_debug_code) {
- // Function must be a function without a prototype.
- CSA_ASSERT(
- this,
- Word32And(compiler_hints,
- Int32Constant((FunctionKind::kAccessorFunction |
- FunctionKind::kArrowFunction |
- FunctionKind::kConciseMethod)
- << SharedFunctionInfo::kFunctionKindShift)));
- }
- Goto(&if_function_without_prototype);
-
- BIND(&if_normal);
- {
- map_index.Bind(SelectIntPtrConstant(is_strict,
- Context::STRICT_FUNCTION_MAP_INDEX,
- Context::SLOPPY_FUNCTION_MAP_INDEX));
- Goto(&load_map);
- }
-
- BIND(&if_generator);
- {
- Node* is_async =
- Word32And(compiler_hints,
- Int32Constant(FunctionKind::kAsyncFunction
- << SharedFunctionInfo::kFunctionKindShift));
- map_index.Bind(SelectIntPtrConstant(
- is_async, Context::ASYNC_GENERATOR_FUNCTION_MAP_INDEX,
- Context::GENERATOR_FUNCTION_MAP_INDEX));
- Goto(&load_map);
- }
-
- BIND(&if_async);
- {
- map_index.Bind(IntPtrConstant(Context::ASYNC_FUNCTION_MAP_INDEX));
- Goto(&load_map);
- }
- BIND(&if_class_constructor);
- {
- map_index.Bind(IntPtrConstant(Context::CLASS_FUNCTION_MAP_INDEX));
- Goto(&load_map);
- }
-
- BIND(&if_function_without_prototype);
- {
- map_index.Bind(
- IntPtrConstant(Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX));
- Goto(&load_map);
- }
-
- BIND(&load_map);
+ // The calculation of |function_map_index| must be in sync with
+ // SharedFunctionInfo::function_map_index().
+ Node* function_map_index =
+ IntPtrAdd(DecodeWordFromWord32<SharedFunctionInfo::FunctionMapIndexBits>(
+ compiler_hints),
+ IntPtrConstant(Context::FIRST_FUNCTION_MAP_INDEX));
+ CSA_ASSERT(this, UintPtrLessThanOrEqual(
+ function_map_index,
+ IntPtrConstant(Context::LAST_FUNCTION_MAP_INDEX)));
// Get the function map in the current native context and set that
// as the map of the allocated object.
Node* native_context = LoadNativeContext(context);
- Node* map_slot_value =
- LoadFixedArrayElement(native_context, map_index.value());
- StoreMapNoWriteBarrier(result, map_slot_value);
+ Node* function_map = LoadContextElement(native_context, function_map_index);
+
+ // Create a new closure from the given function info in new space
+ Node* instance_size_in_bytes =
+ TimesPointerSize(LoadMapInstanceSize(function_map));
+ Node* result = Allocate(instance_size_in_bytes);
+ StoreMapNoWriteBarrier(result, function_map);
+ InitializeJSObjectBody(result, function_map, instance_size_in_bytes,
+ JSFunction::kSize);
// Initialize the rest of the function.
Node* empty_fixed_array = HeapConstant(factory->empty_fixed_array());
- StoreObjectFieldNoWriteBarrier(result, JSObject::kPropertiesOffset,
+ StoreObjectFieldNoWriteBarrier(result, JSObject::kPropertiesOrHashOffset,
empty_fixed_array);
StoreObjectFieldNoWriteBarrier(result, JSObject::kElementsOffset,
empty_fixed_array);
@@ -164,23 +150,27 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
}
{
// If the feedback vector has optimized code, check whether it is marked
- // for deopt and, if so, clear it.
- Label optimized_code_ok(this);
+ // for deopt and, if so, clear the slot.
+ Label optimized_code_ok(this), clear_optimized_code(this);
Node* literals = LoadObjectField(literals_cell, Cell::kValueOffset);
GotoIfNot(IsFeedbackVector(literals), &optimized_code_ok);
- Node* optimized_code_cell =
+ Node* optimized_code_cell_slot =
LoadFixedArrayElement(literals, FeedbackVector::kOptimizedCodeIndex);
+ GotoIf(TaggedIsSmi(optimized_code_cell_slot), &optimized_code_ok);
+
Node* optimized_code =
- LoadWeakCellValue(optimized_code_cell, &optimized_code_ok);
+ LoadWeakCellValue(optimized_code_cell_slot, &clear_optimized_code);
Node* code_flags = LoadObjectField(
optimized_code, Code::kKindSpecificFlags1Offset, MachineType::Uint32());
Node* marked_for_deopt =
DecodeWord32<Code::MarkedForDeoptimizationField>(code_flags);
- GotoIf(Word32Equal(marked_for_deopt, Int32Constant(0)), &optimized_code_ok);
+ Branch(Word32Equal(marked_for_deopt, Int32Constant(0)), &optimized_code_ok,
+ &clear_optimized_code);
- // Code is marked for deopt, clear the optimized code slot.
+ // Cell is empty or code is marked for deopt, clear the optimized code slot.
+ BIND(&clear_optimized_code);
StoreFixedArrayElement(literals, FeedbackVector::kOptimizedCodeIndex,
- EmptyWeakCellConstant(), SKIP_WRITE_BARRIER);
+ SmiConstant(0), SKIP_WRITE_BARRIER);
Goto(&optimized_code_ok);
BIND(&optimized_code_ok);
@@ -207,6 +197,24 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
return result;
}
+Node* ConstructorBuiltinsAssembler::LoadFeedbackVectorSlot(
+ Node* closure, Node* literal_index) {
+ Node* cell = LoadObjectField(closure, JSFunction::kFeedbackVectorOffset);
+ Node* feedback_vector = LoadObjectField(cell, Cell::kValueOffset);
+ return LoadFixedArrayElement(feedback_vector, literal_index, 0,
+ CodeStubAssembler::SMI_PARAMETERS);
+}
+
+Node* ConstructorBuiltinsAssembler::NotHasBoilerplate(Node* literal_site) {
+ return TaggedIsSmi(literal_site);
+}
+
+Node* ConstructorBuiltinsAssembler::LoadAllocationSiteBoilerplate(Node* site) {
+ CSA_ASSERT(this, IsAllocationSite(site));
+ return LoadObjectField(site,
+ AllocationSite::kTransitionInfoOrBoilerplateOffset);
+}
+
TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
Node* shared = Parameter(FastNewClosureDescriptor::kSharedFunctionInfo);
Node* context = Parameter(FastNewClosureDescriptor::kContext);
@@ -304,7 +312,7 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewFunctionContext(
ParameterMode mode = INTPTR_PARAMETERS;
Node* min_context_slots = IntPtrConstant(Context::MIN_CONTEXT_SLOTS);
Node* length = IntPtrAdd(slots, min_context_slots);
- Node* size = GetFixedArrayAllocationSize(length, FAST_ELEMENTS, mode);
+ Node* size = GetFixedArrayAllocationSize(length, PACKED_ELEMENTS, mode);
// Create a new closure from the given function info in new space
Node* function_context = AllocateInNewSpace(size);
@@ -340,7 +348,7 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewFunctionContext(
// Initialize the rest of the slots to undefined.
Node* undefined = UndefinedConstant();
BuildFastFixedArrayForEach(
- function_context, FAST_ELEMENTS, min_context_slots, length,
+ function_context, PACKED_ELEMENTS, min_context_slots, length,
[this, undefined](Node* context, Node* offset) {
StoreNoWriteBarrier(MachineRepresentation::kTagged, context, offset,
undefined);
@@ -374,14 +382,11 @@ Node* ConstructorBuiltinsAssembler::EmitFastCloneRegExp(Node* closure,
Label call_runtime(this, Label::kDeferred), end(this);
VARIABLE(result, MachineRepresentation::kTagged);
-
- Node* cell = LoadObjectField(closure, JSFunction::kFeedbackVectorOffset);
- Node* feedback_vector = LoadObjectField(cell, Cell::kValueOffset);
- Node* boilerplate = LoadFixedArrayElement(feedback_vector, literal_index, 0,
- CodeStubAssembler::SMI_PARAMETERS);
- GotoIf(IsUndefined(boilerplate), &call_runtime);
-
+ Node* literal_site = LoadFeedbackVectorSlot(closure, literal_index);
+ GotoIf(NotHasBoilerplate(literal_site), &call_runtime);
{
+ Node* boilerplate = literal_site;
+ CSA_ASSERT(this, IsJSRegExp(boilerplate));
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
Node* copy = Allocate(size);
for (int offset = 0; offset < size; offset += kPointerSize) {
@@ -452,24 +457,18 @@ Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowArray(
return_result(this);
VARIABLE(result, MachineRepresentation::kTagged);
- Node* cell = LoadObjectField(closure, JSFunction::kFeedbackVectorOffset);
- Node* feedback_vector = LoadObjectField(cell, Cell::kValueOffset);
- Node* allocation_site = LoadFixedArrayElement(
- feedback_vector, literal_index, 0, CodeStubAssembler::SMI_PARAMETERS);
-
- GotoIf(IsUndefined(allocation_site), call_runtime);
- allocation_site = LoadFixedArrayElement(feedback_vector, literal_index, 0,
- CodeStubAssembler::SMI_PARAMETERS);
+ Node* allocation_site = LoadFeedbackVectorSlot(closure, literal_index);
+ GotoIf(NotHasBoilerplate(allocation_site), call_runtime);
- Node* boilerplate =
- LoadObjectField(allocation_site, AllocationSite::kTransitionInfoOffset);
+ Node* boilerplate = LoadAllocationSiteBoilerplate(allocation_site);
Node* boilerplate_map = LoadMap(boilerplate);
+ CSA_ASSERT(this, IsJSArrayMap(boilerplate_map));
Node* boilerplate_elements = LoadElements(boilerplate);
Node* capacity = LoadFixedArrayBaseLength(boilerplate_elements);
allocation_site =
allocation_site_mode == TRACK_ALLOCATION_SITE ? allocation_site : nullptr;
- Node* zero = SmiConstant(Smi::kZero);
+ Node* zero = SmiConstant(0);
GotoIf(SmiEqual(capacity, zero), &zero_capacity);
Node* elements_map = LoadMap(boilerplate_elements);
@@ -478,25 +477,10 @@ Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowArray(
GotoIf(IsFixedArrayMap(elements_map), &fast_elements);
{
Comment("fast double elements path");
- if (FLAG_debug_code) {
- Label correct_elements_map(this), abort(this, Label::kDeferred);
- Branch(IsFixedDoubleArrayMap(elements_map), &correct_elements_map,
- &abort);
-
- BIND(&abort);
- {
- Node* abort_id = SmiConstant(
- Smi::FromInt(BailoutReason::kExpectedFixedDoubleArrayMap));
- CallRuntime(Runtime::kAbort, context, abort_id);
- result.Bind(UndefinedConstant());
- Goto(&return_result);
- }
- BIND(&correct_elements_map);
- }
-
+ if (FLAG_debug_code) CSA_CHECK(this, IsFixedDoubleArrayMap(elements_map));
Node* array =
NonEmptyShallowClone(boilerplate, boilerplate_map, boilerplate_elements,
- allocation_site, capacity, FAST_DOUBLE_ELEMENTS);
+ allocation_site, capacity, PACKED_DOUBLE_ELEMENTS);
result.Bind(array);
Goto(&return_result);
}
@@ -506,7 +490,7 @@ Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowArray(
Comment("fast elements path");
Node* array =
NonEmptyShallowClone(boilerplate, boilerplate_map, boilerplate_elements,
- allocation_site, capacity, FAST_ELEMENTS);
+ allocation_site, capacity, PACKED_ELEMENTS);
result.Bind(array);
Goto(&return_result);
}
@@ -536,7 +520,7 @@ Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowArray(
BIND(&allocate_without_elements);
{
Node* array = AllocateUninitializedJSArrayWithoutElements(
- FAST_ELEMENTS, boilerplate_map, length.value(), allocation_site);
+ PACKED_ELEMENTS, boilerplate_map, length.value(), allocation_site);
StoreObjectField(array, JSObject::kElementsOffset, elements.value());
result.Bind(array);
Goto(&return_result);
@@ -561,13 +545,15 @@ void ConstructorBuiltinsAssembler::CreateFastCloneShallowArrayBuiltin(
BIND(&call_runtime);
{
Comment("call runtime");
- Node* flags =
- SmiConstant(Smi::FromInt(ArrayLiteral::kShallowElements |
- (allocation_site_mode == TRACK_ALLOCATION_SITE
- ? 0
- : ArrayLiteral::kDisableMementos)));
+ int flags = AggregateLiteral::kIsShallow;
+ if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
+ // Force initial allocation sites on the initial literal setup step.
+ flags |= AggregateLiteral::kNeedsInitialAllocationSite;
+ } else {
+ flags |= AggregateLiteral::kDisableMementos;
+ }
Return(CallRuntime(Runtime::kCreateArrayLiteral, context, closure,
- literal_index, constant_elements, flags));
+ literal_index, constant_elements, SmiConstant(flags)));
}
}
@@ -581,31 +567,28 @@ TF_BUILTIN(FastCloneShallowArrayDontTrack, ConstructorBuiltinsAssembler) {
Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowObject(
Label* call_runtime, Node* closure, Node* literals_index) {
- Node* allocation_site;
- {
- // Load the alloation site.
- Node* cell = LoadObjectField(closure, JSFunction::kFeedbackVectorOffset);
- Node* feedback_vector = LoadObjectField(cell, Cell::kValueOffset);
- allocation_site = LoadFixedArrayElement(feedback_vector, literals_index, 0,
- CodeStubAssembler::SMI_PARAMETERS);
- GotoIf(IsUndefined(allocation_site), call_runtime);
- }
+ Node* allocation_site = LoadFeedbackVectorSlot(closure, literals_index);
+ GotoIf(NotHasBoilerplate(allocation_site), call_runtime);
- Node* boilerplate =
- LoadObjectField(allocation_site, AllocationSite::kTransitionInfoOffset);
+ Node* boilerplate = LoadAllocationSiteBoilerplate(allocation_site);
Node* boilerplate_map = LoadMap(boilerplate);
+ CSA_ASSERT(this, IsJSObjectMap(boilerplate_map));
VARIABLE(var_properties, MachineRepresentation::kTagged);
{
+ Node* bit_field_3 = LoadMapBitField3(boilerplate_map);
+ GotoIf(IsSetWord32<Map::Deprecated>(bit_field_3), call_runtime);
// Directly copy over the property store for dict-mode boilerplates.
- Label if_dictionary(this), if_fast(this), allocate_object(this);
- Branch(IsDictionaryMap(boilerplate_map), &if_dictionary, &if_fast);
+ Label if_dictionary(this), if_fast(this), done(this);
+ Branch(IsSetWord32<Map::DictionaryMap>(bit_field_3), &if_dictionary,
+ &if_fast);
BIND(&if_dictionary);
{
+ Comment("Copy dictionary properties");
var_properties.Bind(
CopyNameDictionary(LoadProperties(boilerplate), call_runtime));
// Slow objects have no in-object properties.
- Goto(&allocate_object);
+ Goto(&done);
}
BIND(&if_fast);
{
@@ -613,14 +596,38 @@ Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowObject(
Node* boilerplate_properties = LoadProperties(boilerplate);
GotoIfNot(IsEmptyFixedArray(boilerplate_properties), call_runtime);
var_properties.Bind(EmptyFixedArrayConstant());
- Goto(&allocate_object);
+ Goto(&done);
}
- BIND(&allocate_object);
+ BIND(&done);
}
+ VARIABLE(var_elements, MachineRepresentation::kTagged);
+ {
+ // Copy the elements backing store, assuming that it's flat.
+ Label if_empty_fixed_array(this), if_copy_elements(this), done(this);
+ Node* boilerplate_elements = LoadElements(boilerplate);
+ Branch(IsEmptyFixedArray(boilerplate_elements), &if_empty_fixed_array,
+ &if_copy_elements);
+
+ BIND(&if_empty_fixed_array);
+ var_elements.Bind(boilerplate_elements);
+ Goto(&done);
+
+ BIND(&if_copy_elements);
+ CSA_ASSERT(this, Word32BinaryNot(
+ IsFixedCOWArrayMap(LoadMap(boilerplate_elements))));
+ var_elements.Bind(CopyFixedArrayBase(boilerplate_elements));
+ Goto(&done);
+ BIND(&done);
+ }
+
+ // Ensure new-space allocation for a fresh JSObject so we can skip write
+ // barriers when copying all object fields.
+ STATIC_ASSERT(JSObject::kMaxInstanceSize < kMaxRegularHeapObjectSize);
Node* instance_size = TimesPointerSize(LoadMapInstanceSize(boilerplate_map));
Node* allocation_size = instance_size;
- if (FLAG_allocation_site_pretenuring) {
+ bool needs_allocation_memento = FLAG_allocation_site_pretenuring;
+ if (needs_allocation_memento) {
// Prepare for inner-allocating the AllocationMemento.
allocation_size =
IntPtrAdd(instance_size, IntPtrConstant(AllocationMemento::kSize));
@@ -628,43 +635,90 @@ Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowObject(
Node* copy = AllocateInNewSpace(allocation_size);
{
+ Comment("Initialize Literal Copy");
// Initialize Object fields.
StoreMapNoWriteBarrier(copy, boilerplate_map);
- StoreObjectFieldNoWriteBarrier(copy, JSObject::kPropertiesOffset,
+ StoreObjectFieldNoWriteBarrier(copy, JSObject::kPropertiesOrHashOffset,
var_properties.value());
- // TODO(cbruni): support elements cloning for object literals.
- CSA_ASSERT(this, IsEmptyFixedArray(LoadElements(boilerplate)));
StoreObjectFieldNoWriteBarrier(copy, JSObject::kElementsOffset,
- EmptyFixedArrayConstant());
+ var_elements.value());
}
- // Copy over in-object properties.
- Node* start_offset = IntPtrConstant(JSObject::kHeaderSize);
- BuildFastLoop(start_offset, instance_size,
- [=](Node* offset) {
- // The Allocate above guarantees that the copy lies in new
- // space. This allows us to skip write barriers. This is
- // necessary since we may also be copying unboxed doubles.
- // TODO(verwaest): Allocate and fill in double boxes.
- // TODO(cbruni): decode map information and support mutable
- // heap numbers.
- Node* field = LoadObjectField(boilerplate, offset);
- StoreObjectFieldNoWriteBarrier(copy, offset, field);
- },
- kPointerSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
-
- if (FLAG_allocation_site_pretenuring) {
- Node* memento = InnerAllocate(copy, instance_size);
- StoreMapNoWriteBarrier(memento, Heap::kAllocationMementoMapRootIndex);
- StoreObjectFieldNoWriteBarrier(
- memento, AllocationMemento::kAllocationSiteOffset, allocation_site);
- Node* memento_create_count = LoadObjectField(
- allocation_site, AllocationSite::kPretenureCreateCountOffset);
- memento_create_count =
- SmiAdd(memento_create_count, SmiConstant(Smi::FromInt(1)));
- StoreObjectFieldNoWriteBarrier(allocation_site,
- AllocationSite::kPretenureCreateCountOffset,
- memento_create_count);
+ // Initialize the AllocationMemento before potential GCs due to heap number
+ // allocation when copying the in-object properties.
+ if (needs_allocation_memento) {
+ InitializeAllocationMemento(copy, instance_size, allocation_site);
+ }
+
+ {
+ // Copy over in-object properties.
+ Label continue_with_write_barrier(this), done_init(this);
+ VARIABLE(offset, MachineType::PointerRepresentation(),
+ IntPtrConstant(JSObject::kHeaderSize));
+ // Mutable heap numbers only occur on 32-bit platforms.
+ bool may_use_mutable_heap_numbers =
+ FLAG_track_double_fields && !FLAG_unbox_double_fields;
+ {
+ Comment("Copy in-object properties fast");
+ Label continue_fast(this, &offset);
+ Branch(WordEqual(offset.value(), instance_size), &done_init,
+ &continue_fast);
+ BIND(&continue_fast);
+ Node* field = LoadObjectField(boilerplate, offset.value());
+ if (may_use_mutable_heap_numbers) {
+ Label store_field(this);
+ GotoIf(TaggedIsSmi(field), &store_field);
+ GotoIf(IsMutableHeapNumber(field), &continue_with_write_barrier);
+ Goto(&store_field);
+ BIND(&store_field);
+ }
+ StoreObjectFieldNoWriteBarrier(copy, offset.value(), field);
+ offset.Bind(IntPtrAdd(offset.value(), IntPtrConstant(kPointerSize)));
+ Branch(WordNotEqual(offset.value(), instance_size), &continue_fast,
+ &done_init);
+ }
+
+ if (!may_use_mutable_heap_numbers) {
+ BIND(&done_init);
+ return copy;
+ }
+ // Continue initializing the literal after seeing the first sub-object
+ // potentially causing allocation. In this case we prepare the new literal
+ // by copying all pending fields over from the boilerplate and emit full
+ // write barriers from here on.
+ BIND(&continue_with_write_barrier);
+ {
+ Comment("Copy in-object properties slow");
+ BuildFastLoop(offset.value(), instance_size,
+ [=](Node* offset) {
+ Node* field = LoadObjectField(boilerplate, offset);
+ StoreObjectFieldNoWriteBarrier(copy, offset, field);
+ },
+ kPointerSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ Comment("Copy mutable HeapNumber values");
+ BuildFastLoop(offset.value(), instance_size,
+ [=](Node* offset) {
+ Node* field = LoadObjectField(copy, offset);
+ Label copy_mutable_heap_number(this, Label::kDeferred),
+ continue_loop(this);
+ // We only have to clone complex field values.
+ GotoIf(TaggedIsSmi(field), &continue_loop);
+ Branch(IsMutableHeapNumber(field),
+ &copy_mutable_heap_number, &continue_loop);
+ BIND(&copy_mutable_heap_number);
+ {
+ Node* double_value = LoadHeapNumberValue(field);
+ Node* mutable_heap_number =
+ AllocateHeapNumberWithValue(double_value, MUTABLE);
+ StoreObjectField(copy, offset, mutable_heap_number);
+ Goto(&continue_loop);
+ }
+ BIND(&continue_loop);
+ },
+ kPointerSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ Goto(&done_init);
+ }
+ BIND(&done_init);
}
return copy;
}
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.h b/deps/v8/src/builtins/builtins-constructor-gen.h
index 9b04eb378e..fe049893eb 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.h
+++ b/deps/v8/src/builtins/builtins-constructor-gen.h
@@ -41,6 +41,11 @@ class ConstructorBuiltinsAssembler : public CodeStubAssembler {
Node* NonEmptyShallowClone(Node* boilerplate, Node* boilerplate_map,
Node* boilerplate_elements, Node* allocation_site,
Node* capacity, ElementsKind kind);
+ Node* CopyFixedArrayBase(Node* elements);
+
+ Node* LoadFeedbackVectorSlot(Node* closure, Node* literal_index);
+ Node* NotHasBoilerplate(Node* literal_site);
+ Node* LoadAllocationSiteBoilerplate(Node* allocation_site);
};
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-conversion-gen.cc b/deps/v8/src/builtins/builtins-conversion-gen.cc
index 5fe2cb03bd..9edeb56e1e 100644
--- a/deps/v8/src/builtins/builtins-conversion-gen.cc
+++ b/deps/v8/src/builtins/builtins-conversion-gen.cc
@@ -2,28 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/builtins/builtins-conversion-gen.h"
+
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
-#include "src/code-stub-assembler.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
-class ConversionBuiltinsAssembler : public CodeStubAssembler {
- public:
- explicit ConversionBuiltinsAssembler(compiler::CodeAssemblerState* state)
- : CodeStubAssembler(state) {}
-
- protected:
- void Generate_NonPrimitiveToPrimitive(Node* context, Node* input,
- ToPrimitiveHint hint);
-
- void Generate_OrdinaryToPrimitive(Node* context, Node* input,
- OrdinaryToPrimitiveHint hint);
-};
-
// ES6 section 7.1.1 ToPrimitive ( input [ , PreferredType ] )
void ConversionBuiltinsAssembler::Generate_NonPrimitiveToPrimitive(
Node* context, Node* input, ToPrimitiveHint hint) {
@@ -136,6 +124,56 @@ TF_BUILTIN(ToString, CodeStubAssembler) {
Return(ToString(context, input));
}
+// ES6 section 7.1.1 ToPrimitive( argument, "default" ) followed by
+// ES6 section 7.1.12 ToString ( argument )
+compiler::Node* ConversionBuiltinsAssembler::ToPrimitiveToString(
+ Node* context, Node* input, Variable* feedback) {
+ Label is_string(this), to_primitive(this, Label::kDeferred),
+ to_string(this, Label::kDeferred), done(this);
+ VARIABLE(result, MachineRepresentation::kTagged, input);
+
+ GotoIf(TaggedIsSmi(input), &to_string);
+ GotoIf(IsString(input), &is_string);
+ BranchIfJSReceiver(input, &to_primitive, &to_string);
+
+ BIND(&to_primitive);
+ {
+ Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate());
+ result.Bind(CallStub(callable, context, input));
+ Goto(&to_string);
+ }
+
+ BIND(&to_string);
+ {
+ if (feedback) {
+ feedback->Bind(SmiConstant(BinaryOperationFeedback::kAny));
+ }
+ result.Bind(CallBuiltin(Builtins::kToString, context, result.value()));
+ Goto(&done);
+ }
+
+ BIND(&is_string);
+ {
+ if (feedback) {
+ feedback->Bind(
+ SelectSmiConstant(WordEqual(input, EmptyStringConstant()),
+ BinaryOperationFeedback::kString,
+ BinaryOperationFeedback::kNonEmptyString));
+ }
+ Goto(&done);
+ }
+
+ BIND(&done);
+ return result.value();
+}
+
+TF_BUILTIN(ToPrimitiveToString, ConversionBuiltinsAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* input = Parameter(Descriptor::kArgument);
+
+ Return(ToPrimitiveToString(context, input));
+}
+
// 7.1.1.1 OrdinaryToPrimitive ( O, hint )
void ConversionBuiltinsAssembler::Generate_OrdinaryToPrimitive(
Node* context, Node* input, OrdinaryToPrimitiveHint hint) {
@@ -221,6 +259,22 @@ TF_BUILTIN(ToBoolean, CodeStubAssembler) {
Return(BooleanConstant(false));
}
+// ES6 section 7.1.2 ToBoolean ( argument )
+// Requires parameter on stack so that it can be used as a continuation from a
+// LAZY deopt.
+TF_BUILTIN(ToBooleanLazyDeoptContinuation, CodeStubAssembler) {
+ Node* value = Parameter(Descriptor::kArgument);
+
+ Label return_true(this), return_false(this);
+ BranchIfToBooleanIsTrue(value, &return_true, &return_false);
+
+ BIND(&return_true);
+ Return(BooleanConstant(true));
+
+ BIND(&return_false);
+ Return(BooleanConstant(false));
+}
+
TF_BUILTIN(ToLength, CodeStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
@@ -247,8 +301,7 @@ TF_BUILTIN(ToLength, CodeStubAssembler) {
// Check if {len} is a HeapNumber.
Label if_lenisheapnumber(this),
if_lenisnotheapnumber(this, Label::kDeferred);
- Branch(IsHeapNumberMap(LoadMap(len)), &if_lenisheapnumber,
- &if_lenisnotheapnumber);
+ Branch(IsHeapNumber(len), &if_lenisheapnumber, &if_lenisnotheapnumber);
BIND(&if_lenisheapnumber);
{
@@ -273,8 +326,7 @@ TF_BUILTIN(ToLength, CodeStubAssembler) {
BIND(&if_lenisnotheapnumber);
{
// Need to convert {len} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_len.Bind(CallStub(callable, context, len));
+ var_len.Bind(CallBuiltin(Builtins::kNonNumberToNumber, context, len));
Goto(&loop);
}
@@ -285,7 +337,7 @@ TF_BUILTIN(ToLength, CodeStubAssembler) {
Return(NumberConstant(kMaxSafeInteger));
BIND(&return_zero);
- Return(SmiConstant(Smi::kZero));
+ Return(SmiConstant(0));
}
}
@@ -337,7 +389,7 @@ TF_BUILTIN(ToObject, CodeStubAssembler) {
LoadObjectField(constructor, JSFunction::kPrototypeOrInitialMapOffset);
Node* js_value = Allocate(JSValue::kSize);
StoreMapNoWriteBarrier(js_value, initial_map);
- StoreObjectFieldRoot(js_value, JSValue::kPropertiesOffset,
+ StoreObjectFieldRoot(js_value, JSValue::kPropertiesOrHashOffset,
Heap::kEmptyFixedArrayRootIndex);
StoreObjectFieldRoot(js_value, JSObject::kElementsOffset,
Heap::kEmptyFixedArrayRootIndex);
@@ -345,9 +397,8 @@ TF_BUILTIN(ToObject, CodeStubAssembler) {
Return(js_value);
BIND(&if_noconstructor);
- TailCallRuntime(
- Runtime::kThrowUndefinedOrNullToObject, context,
- HeapConstant(factory()->NewStringFromAsciiChecked("ToObject", TENURED)));
+ TailCallRuntime(Runtime::kThrowUndefinedOrNullToObject, context,
+ StringConstant("ToObject"));
BIND(&if_jsreceiver);
Return(object);
diff --git a/deps/v8/src/builtins/builtins-conversion-gen.h b/deps/v8/src/builtins/builtins-conversion-gen.h
new file mode 100644
index 0000000000..fedbc54d2e
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-conversion-gen.h
@@ -0,0 +1,32 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_BUILTINS_CONVERSION_GEN_H_
+#define V8_BUILTINS_BUILTINS_CONVERSION_GEN_H_
+
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+class ConversionBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit ConversionBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ Node* ToPrimitiveToString(Node* context, Node* input,
+ Variable* feedback = nullptr);
+
+ protected:
+ void Generate_NonPrimitiveToPrimitive(Node* context, Node* input,
+ ToPrimitiveHint hint);
+
+ void Generate_OrdinaryToPrimitive(Node* context, Node* input,
+ OrdinaryToPrimitiveHint hint);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BUILTINS_BUILTINS_CONVERSION_GEN_H_
diff --git a/deps/v8/src/builtins/builtins-date-gen.cc b/deps/v8/src/builtins/builtins-date-gen.cc
index 10bb39f861..579d537b73 100644
--- a/deps/v8/src/builtins/builtins-date-gen.cc
+++ b/deps/v8/src/builtins/builtins-date-gen.cc
@@ -50,7 +50,7 @@ void DateBuiltinsAssembler::Generate_DatePrototype_GetField(Node* context,
BIND(&stamp_mismatch);
}
- Node* field_index_smi = SmiConstant(Smi::FromInt(field_index));
+ Node* field_index_smi = SmiConstant(field_index);
Node* function =
ExternalConstant(ExternalReference::get_date_field_function(isolate()));
Node* result = CallCFunction2(
@@ -204,17 +204,19 @@ TF_BUILTIN(DatePrototypeToPrimitive, CodeStubAssembler) {
GotoIf(WordEqual(hint, string_string), &hint_is_string);
// Slow-case with actual string comparisons.
- Callable string_equal = CodeFactory::StringEqual(isolate());
GotoIf(TaggedIsSmi(hint), &hint_is_invalid);
GotoIfNot(IsString(hint), &hint_is_invalid);
- GotoIf(WordEqual(CallStub(string_equal, context, hint, number_string),
- TrueConstant()),
+ GotoIf(WordEqual(
+ CallBuiltin(Builtins::kStringEqual, context, hint, number_string),
+ TrueConstant()),
&hint_is_number);
- GotoIf(WordEqual(CallStub(string_equal, context, hint, default_string),
- TrueConstant()),
+ GotoIf(WordEqual(
+ CallBuiltin(Builtins::kStringEqual, context, hint, default_string),
+ TrueConstant()),
&hint_is_string);
- GotoIf(WordEqual(CallStub(string_equal, context, hint, string_string),
- TrueConstant()),
+ GotoIf(WordEqual(
+ CallBuiltin(Builtins::kStringEqual, context, hint, string_string),
+ TrueConstant()),
&hint_is_string);
Goto(&hint_is_invalid);
@@ -247,9 +249,7 @@ TF_BUILTIN(DatePrototypeToPrimitive, CodeStubAssembler) {
BIND(&receiver_is_invalid);
{
CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
- HeapConstant(factory()->NewStringFromAsciiChecked(
- "Date.prototype [ @@toPrimitive ]", TENURED)),
- receiver);
+ StringConstant("Date.prototype [ @@toPrimitive ]"), receiver);
Unreachable();
}
}
diff --git a/deps/v8/src/builtins/builtins-date.cc b/deps/v8/src/builtins/builtins-date.cc
index 9985bbe4b7..c46a44d0d3 100644
--- a/deps/v8/src/builtins/builtins-date.cc
+++ b/deps/v8/src/builtins/builtins-date.cc
@@ -162,7 +162,7 @@ void ToDateString(double time_val, Vector<char> str, DateCache* date_cache,
const char* local_timezone = date_cache->LocalTimezone(time_ms);
switch (mode) {
case kDateOnly:
- SNPrintF(str, "%s %s %02d %4d", kShortWeekDays[weekday],
+ SNPrintF(str, "%s %s %02d %04d", kShortWeekDays[weekday],
kShortMonths[month], day, year);
return;
case kTimeOnly:
@@ -171,7 +171,7 @@ void ToDateString(double time_val, Vector<char> str, DateCache* date_cache,
local_timezone);
return;
case kDateAndTime:
- SNPrintF(str, "%s %s %02d %4d %02d:%02d:%02d GMT%c%02d%02d (%s)",
+ SNPrintF(str, "%s %s %02d %04d %02d:%02d:%02d GMT%c%02d%02d (%s)",
kShortWeekDays[weekday], kShortMonths[month], day, year, hour,
min, sec, (timezone_offset < 0) ? '-' : '+', timezone_hour,
timezone_min, local_timezone);
@@ -822,7 +822,7 @@ BUILTIN(DatePrototypeToUTCString) {
int year, month, day, weekday, hour, min, sec, ms;
isolate->date_cache()->BreakDownTime(time_ms, &year, &month, &day, &weekday,
&hour, &min, &sec, &ms);
- SNPrintF(ArrayVector(buffer), "%s, %02d %s %4d %02d:%02d:%02d GMT",
+ SNPrintF(ArrayVector(buffer), "%s, %02d %s %04d %02d:%02d:%02d GMT",
kShortWeekDays[weekday], day, kShortMonths[month], year, hour, min,
sec);
return *isolate->factory()->NewStringFromAsciiChecked(buffer);
diff --git a/deps/v8/src/builtins/builtins-debug.cc b/deps/v8/src/builtins/builtins-debug-gen.cc
index de603287f2..de603287f2 100644
--- a/deps/v8/src/builtins/builtins-debug.cc
+++ b/deps/v8/src/builtins/builtins-debug-gen.cc
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index bce8eebb0f..8a87008def 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -62,20 +62,18 @@ namespace internal {
ASM(CallFunction_ReceiverIsNullOrUndefined) \
ASM(CallFunction_ReceiverIsNotNullOrUndefined) \
ASM(CallFunction_ReceiverIsAny) \
- ASM(TailCallFunction_ReceiverIsNullOrUndefined) \
- ASM(TailCallFunction_ReceiverIsNotNullOrUndefined) \
- ASM(TailCallFunction_ReceiverIsAny) \
/* ES6 section 9.4.1.1 [[Call]] ( thisArgument, argumentsList) */ \
ASM(CallBoundFunction) \
- ASM(TailCallBoundFunction) \
/* ES6 section 7.3.12 Call(F, V, [argumentsList]) */ \
ASM(Call_ReceiverIsNullOrUndefined) \
ASM(Call_ReceiverIsNotNullOrUndefined) \
ASM(Call_ReceiverIsAny) \
- ASM(TailCall_ReceiverIsNullOrUndefined) \
- ASM(TailCall_ReceiverIsNotNullOrUndefined) \
- ASM(TailCall_ReceiverIsAny) \
- ASM(CallWithSpread) \
+ \
+ /* ES6 section 9.5.12[[Call]] ( thisArgument, argumentsList ) */ \
+ TFC(CallProxy, CallTrampoline, 1) \
+ ASM(CallVarargs) \
+ TFC(CallWithSpread, CallWithSpread, 1) \
+ TFC(CallWithArrayLike, CallWithArrayLike, 1) \
ASM(CallForwardVarargs) \
ASM(CallFunctionForwardVarargs) \
\
@@ -89,7 +87,9 @@ namespace internal {
ASM(ConstructProxy) \
/* ES6 section 7.3.13 Construct (F, [argumentsList], [newTarget]) */ \
ASM(Construct) \
- ASM(ConstructWithSpread) \
+ ASM(ConstructVarargs) \
+ TFC(ConstructWithSpread, ConstructWithSpread, 1) \
+ TFC(ConstructWithArrayLike, ConstructWithArrayLike, 1) \
ASM(ConstructForwardVarargs) \
ASM(ConstructFunctionForwardVarargs) \
ASM(JSConstructStubApi) \
@@ -108,7 +108,6 @@ namespace internal {
TFC(FastCloneShallowObject, FastCloneShallowObject, 1) \
\
/* Apply and entries */ \
- ASM(Apply) \
ASM(JSEntryTrampoline) \
ASM(JSConstructEntryTrampoline) \
ASM(ResumeGeneratorTrampoline) \
@@ -126,6 +125,10 @@ namespace internal {
TFS(StringIndexOf, kReceiver, kSearchString, kPosition) \
TFC(StringLessThan, Compare, 1) \
TFC(StringLessThanOrEqual, Compare, 1) \
+ TFC(StringConcat, StringConcat, 1) \
+ \
+ /* OrderedHashTable helpers */ \
+ TFS(OrderedHashTableHealIndex, kTable, kIndex) \
\
/* Interpreter */ \
ASM(InterpreterEntryTrampoline) \
@@ -134,8 +137,6 @@ namespace internal {
ASM(InterpreterPushArgsThenCallFunction) \
ASM(InterpreterPushUndefinedAndArgsThenCallFunction) \
ASM(InterpreterPushArgsThenCallWithFinalSpread) \
- ASM(InterpreterPushArgsThenTailCall) \
- ASM(InterpreterPushArgsThenTailCallFunction) \
ASM(InterpreterPushArgsThenConstruct) \
ASM(InterpreterPushArgsThenConstructFunction) \
ASM(InterpreterPushArgsThenConstructArray) \
@@ -145,9 +146,7 @@ namespace internal {
ASM(InterpreterOnStackReplacement) \
\
/* Code life-cycle */ \
- ASM(CompileOptimized) \
- ASM(CompileOptimizedConcurrent) \
- ASM(InOptimizationQueue) \
+ ASM(CheckOptimizationMarker) \
ASM(InstantiateAsmJs) \
ASM(MarkCodeAsToBeExecutedOnce) \
ASM(MarkCodeAsExecutedOnce) \
@@ -155,8 +154,33 @@ namespace internal {
ASM(NotifyDeoptimized) \
ASM(NotifySoftDeoptimized) \
ASM(NotifyLazyDeoptimized) \
- ASM(NotifyStubFailure) \
- ASM(NotifyStubFailureSaveDoubles) \
+ ASM(NotifyBuiltinContinuation) \
+ \
+ /* Trampolines called when returning from a deoptimization that expects */ \
+ /* to continue in a JavaScript builtin to finish the functionality of a */ \
+ /* an TF-inlined version of builtin that has side-effects. */ \
+ /* */ \
+ /* The trampolines work as follows: */ \
+ /* 1. Trampoline restores input register values that */ \
+ /* the builtin expects from a BuiltinContinuationFrame. */ \
+ /* 2. Trampoline tears down BuiltinContinuationFrame. */ \
+ /* 3. Trampoline jumps to the builtin's address. */ \
+ /* 4. Builtin executes as if invoked by the frame above it. */ \
+ /* 5. When the builtin returns, execution resumes normally in the */ \
+ /* calling frame, processing any return result from the JavaScript */ \
+ /* builtin as if it had called the builtin directly. */ \
+ /* */ \
+ /* There are two variants of the stub that differ in their handling of a */ \
+ /* value returned by the next frame deeper on the stack. For LAZY deopts, */ \
+ /* the return value (e.g. rax on x64) is explicitly passed as an extra */ \
+ /* stack parameter to the JavaScript builtin by the "WithResult" */ \
+ /* trampoline variant. The plain variant is used in EAGER deopt contexts */ \
+ /* and has no such special handling. */ \
+ ASM(ContinueToCodeStubBuiltin) \
+ ASM(ContinueToCodeStubBuiltinWithResult) \
+ ASM(ContinueToJavaScriptBuiltin) \
+ ASM(ContinueToJavaScriptBuiltinWithResult) \
+ \
ASM(OnStackReplacement) \
\
/* API callback handling */ \
@@ -192,12 +216,16 @@ namespace internal {
TFC(NonNumberToNumber, TypeConversion, 1) \
TFC(ToNumber, TypeConversion, 1) \
TFC(ToString, TypeConversion, 1) \
+ TFC(ToPrimitiveToString, TypeConversion, 1) \
TFC(ToInteger, TypeConversion, 1) \
TFC(ToLength, TypeConversion, 1) \
TFC(ClassOf, Typeof, 1) \
TFC(Typeof, Typeof, 1) \
TFC(GetSuperConstructor, Typeof, 1) \
\
+ /* Type conversions continuations */ \
+ TFC(ToBooleanLazyDeoptContinuation, TypeConversionStackParameter, 1) \
+ \
/* Handlers */ \
TFH(LoadICProtoArray, BUILTIN, kNoExtraICState, LoadICProtoArray) \
TFH(LoadICProtoArrayThrowIfNonexistent, BUILTIN, kNoExtraICState, \
@@ -233,8 +261,7 @@ namespace internal {
/* Special internal builtins */ \
CPP(EmptyFunction) \
CPP(Illegal) \
- CPP(RestrictedFunctionPropertiesThrower) \
- CPP(RestrictedStrictArgumentsPropertiesThrower) \
+ CPP(StrictPoisonPillThrower) \
CPP(UnsupportedThrower) \
TFJ(ReturnReceiver, 0) \
\
@@ -268,6 +295,10 @@ namespace internal {
/* ES6 #sec-array.prototype.foreach */ \
TFS(ArrayForEachLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \
kObject, kInitialK, kLength, kTo) \
+ TFJ(ArrayForEachLoopEagerDeoptContinuation, 4, kCallbackFn, kThisArg, \
+ kInitialK, kLength) \
+ TFJ(ArrayForEachLoopLazyDeoptContinuation, 5, kCallbackFn, kThisArg, \
+ kInitialK, kLength, kResult) \
TFJ(ArrayForEach, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.every */ \
TFS(ArrayEveryLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \
@@ -284,6 +315,10 @@ namespace internal {
/* ES6 #sec-array.prototype.foreach */ \
TFS(ArrayMapLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \
kObject, kInitialK, kLength, kTo) \
+ TFJ(ArrayMapLoopEagerDeoptContinuation, 5, kCallbackFn, kThisArg, kArray, \
+ kInitialK, kLength) \
+ TFJ(ArrayMapLoopLazyDeoptContinuation, 6, kCallbackFn, kThisArg, kArray, \
+ kInitialK, kLength, kResult) \
TFJ(ArrayMap, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.reduce */ \
TFS(ArrayReduceLoopContinuation, kReceiver, kCallbackFn, kThisArg, \
@@ -311,8 +346,8 @@ namespace internal {
CPP(ArrayBufferPrototypeSlice) \
\
/* AsyncFunction */ \
- TFJ(AsyncFunctionAwaitCaught, 3, kGenerator, kAwaited, kOuterPromise) \
- TFJ(AsyncFunctionAwaitUncaught, 3, kGenerator, kAwaited, kOuterPromise) \
+ TFJ(AsyncFunctionAwaitCaught, 2, kAwaited, kOuterPromise) \
+ TFJ(AsyncFunctionAwaitUncaught, 2, kAwaited, kOuterPromise) \
TFJ(AsyncFunctionAwaitRejectClosure, 1, kSentError) \
TFJ(AsyncFunctionAwaitResolveClosure, 1, kSentValue) \
TFJ(AsyncFunctionPromiseCreate, 0) \
@@ -369,6 +404,7 @@ namespace internal {
CPP(ConsoleTime) \
CPP(ConsoleTimeEnd) \
CPP(ConsoleTimeStamp) \
+ CPP(ConsoleContext) \
\
/* DataView */ \
CPP(DataViewConstructor) \
@@ -493,11 +529,13 @@ namespace internal {
TFS(CreateGeneratorObject, kClosure, kReceiver) \
CPP(GeneratorFunctionConstructor) \
/* ES6 #sec-generator.prototype.next */ \
- TFJ(GeneratorPrototypeNext, 1, kValue) \
+ TFJ(GeneratorPrototypeNext, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-generator.prototype.return */ \
- TFJ(GeneratorPrototypeReturn, 1, kValue) \
+ TFJ(GeneratorPrototypeReturn, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-generator.prototype.throw */ \
- TFJ(GeneratorPrototypeThrow, 1, kException) \
+ TFJ(GeneratorPrototypeThrow, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
CPP(AsyncFunctionConstructor) \
\
/* Global object */ \
@@ -538,6 +576,25 @@ namespace internal {
TFH(LoadGlobalICInsideTypeofTrampoline, LOAD_GLOBAL_IC, kNoExtraICState, \
LoadGlobal) \
\
+ /* Map */ \
+ TFS(MapLookupHashIndex, kTable, kKey) \
+ TFJ(MapConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(MapGet, 1, kKey) \
+ TFJ(MapHas, 1, kKey) \
+ CPP(MapClear) \
+ /* ES #sec-map.prototype.entries */ \
+ TFJ(MapPrototypeEntries, 0) \
+ /* ES #sec-get-map.prototype.size */ \
+ TFJ(MapPrototypeGetSize, 0) \
+ /* ES #sec-map.prototype.forEach */ \
+ TFJ(MapPrototypeForEach, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ES #sec-map.prototype.keys */ \
+ TFJ(MapPrototypeKeys, 0) \
+ /* ES #sec-map.prototype.values */ \
+ TFJ(MapPrototypeValues, 0) \
+ /* ES #sec-%mapiteratorprototype%.next */ \
+ TFJ(MapIteratorPrototypeNext, 0) \
+ \
/* Math */ \
/* ES6 #sec-math.abs */ \
TFJ(MathAbs, 1, kX) \
@@ -651,16 +708,11 @@ namespace internal {
TFC(GreaterThanOrEqual, Compare, 1) \
TFC(Equal, Compare, 1) \
TFC(StrictEqual, Compare, 1) \
- TFC(AddWithFeedback, BinaryOpWithVector, 1) \
- TFC(SubtractWithFeedback, BinaryOpWithVector, 1) \
- TFC(MultiplyWithFeedback, BinaryOpWithVector, 1) \
- TFC(DivideWithFeedback, BinaryOpWithVector, 1) \
- TFC(ModulusWithFeedback, BinaryOpWithVector, 1) \
\
/* Object */ \
CPP(ObjectAssign) \
/* ES #sec-object.create */ \
- TFJ(ObjectCreate, 2, kPrototype, kProperties) \
+ TFJ(ObjectCreate, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
CPP(ObjectDefineGetter) \
CPP(ObjectDefineProperties) \
CPP(ObjectDefineProperty) \
@@ -687,6 +739,7 @@ namespace internal {
TFJ(ObjectProtoToString, 0) \
/* ES6 #sec-object.prototype.valueof */ \
TFJ(ObjectPrototypeValueOf, 0) \
+ TFJ(ObjectPrototypeIsPrototypeOf, 1, kValue) \
CPP(ObjectPrototypePropertyIsEnumerable) \
CPP(ObjectPrototypeGetProto) \
CPP(ObjectPrototypeSetProto) \
@@ -710,11 +763,12 @@ namespace internal {
/* ES6 #sec-promise-executor */ \
TFJ(PromiseConstructor, 1, kExecutor) \
TFJ(PromiseInternalConstructor, 1, kParent) \
- TFJ(IsPromise, 1, kObject) \
+ CPP(IsPromise) \
/* ES #sec-promise-resolve-functions */ \
TFJ(PromiseResolveClosure, 1, kValue) \
/* ES #sec-promise-reject-functions */ \
TFJ(PromiseRejectClosure, 1, kValue) \
+ TFJ(PromiseAllResolveElementClosure, 1, kValue) \
/* ES #sec-promise.prototype.then */ \
TFJ(PromiseThen, 2, kOnFullfilled, kOnRejected) \
/* ES #sec-promise.prototype.catch */ \
@@ -734,10 +788,15 @@ namespace internal {
TFJ(PromiseCatchFinally, 1, kReason) \
TFJ(PromiseValueThunkFinally, 0) \
TFJ(PromiseThrowerFinally, 0) \
+ /* ES #sec-promise.all */ \
+ TFJ(PromiseAll, 1, kIterable) \
+ /* ES #sec-promise.race */ \
+ TFJ(PromiseRace, 1, kIterable) \
\
/* Proxy */ \
- CPP(ProxyConstructor) \
- CPP(ProxyConstructor_ConstructStub) \
+ TFJ(ProxyConstructor, 0) \
+ TFJ(ProxyConstructor_ConstructStub, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
\
/* Reflect */ \
ASM(ReflectApply) \
@@ -755,7 +814,9 @@ namespace internal {
CPP(ReflectSetPrototypeOf) \
\
/* RegExp */ \
+ TFS(RegExpExecAtom, kRegExp, kString, kLastIndex, kMatchInfo) \
TFS(RegExpPrototypeExecSlow, kReceiver, kString) \
+ \
CPP(RegExpCapture1Getter) \
CPP(RegExpCapture2Getter) \
CPP(RegExpCapture3Getter) \
@@ -810,6 +871,21 @@ namespace internal {
/* ES #sec-regexp.prototype-@@split */ \
TFJ(RegExpPrototypeSplit, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
\
+ /* Set */ \
+ TFJ(SetConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(SetHas, 1, kKey) \
+ CPP(SetClear) \
+ /* ES #sec-set.prototype.entries */ \
+ TFJ(SetPrototypeEntries, 0) \
+ /* ES #sec-get-set.prototype.size */ \
+ TFJ(SetPrototypeGetSize, 0) \
+ /* ES #sec-set.prototype.foreach */ \
+ TFJ(SetPrototypeForEach, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ES #sec-set.prototype.values */ \
+ TFJ(SetPrototypeValues, 0) \
+ /* ES #sec-%setiteratorprototype%.next */ \
+ TFJ(SetIteratorPrototypeNext, 0) \
+ \
/* SharedArrayBuffer */ \
CPP(SharedArrayBufferPrototypeGetByteLength) \
CPP(SharedArrayBufferPrototypeSlice) \
@@ -853,23 +929,16 @@ namespace internal {
/* ES6 #sec-string.prototype.slice */ \
TFJ(StringPrototypeSlice, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.split */ \
- TFJ(StringPrototypeSplit, 2, kSeparator, kLimit) \
+ TFJ(StringPrototypeSplit, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.substr */ \
- TFJ(StringPrototypeSubstr, 2, kStart, kLength) \
+ TFJ(StringPrototypeSubstr, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.substring */ \
- TFJ(StringPrototypeSubstring, 2, kStart, kEnd) \
+ TFJ(StringPrototypeSubstring, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.startswith */ \
CPP(StringPrototypeStartsWith) \
/* ES6 #sec-string.prototype.tostring */ \
TFJ(StringPrototypeToString, 0) \
- /* ES #sec-string.prototype.tolocalelowercase */ \
- CPP(StringPrototypeToLocaleLowerCase) \
- /* ES #sec-string.prototype.tolocaleuppercase */ \
- CPP(StringPrototypeToLocaleUpperCase) \
- /* (obsolete) Unibrow version */ \
- CPP(StringPrototypeToLowerCase) \
- /* (obsolete) Unibrow version */ \
- CPP(StringPrototypeToUpperCase) \
CPP(StringPrototypeTrim) \
CPP(StringPrototypeTrimLeft) \
CPP(StringPrototypeTrimRight) \
@@ -948,6 +1017,9 @@ namespace internal {
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 %TypedArray%.prototype.map */ \
TFJ(TypedArrayPrototypeMap, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ES6 %TypedArray%.prototype.forEach */ \
+ TFJ(TypedArrayPrototypeForEach, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
\
/* Wasm */ \
ASM(WasmCompileLazy) \
@@ -961,6 +1033,14 @@ namespace internal {
TFC(ThrowWasmTrapFuncInvalid, WasmRuntimeCall, 1) \
TFC(ThrowWasmTrapFuncSigMismatch, WasmRuntimeCall, 1) \
\
+ /* WeakMap */ \
+ TFS(WeakMapLookupHashIndex, kTable, kKey) \
+ TFJ(WeakMapGet, 1, kKey) \
+ TFJ(WeakMapHas, 1, kKey) \
+ \
+ /* WeakSet */ \
+ TFJ(WeakSetHas, 1, kKey) \
+ \
/* AsyncGenerator */ \
\
TFS(AsyncGeneratorResolve, kGenerator, kValue, kDone) \
@@ -972,18 +1052,21 @@ namespace internal {
CPP(AsyncGeneratorFunctionConstructor) \
/* AsyncGenerator.prototype.next ( value ) */ \
/* proposal-async-iteration/#sec-asyncgenerator-prototype-next */ \
- TFJ(AsyncGeneratorPrototypeNext, 1, kValue) \
+ TFJ(AsyncGeneratorPrototypeNext, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* AsyncGenerator.prototype.return ( value ) */ \
/* proposal-async-iteration/#sec-asyncgenerator-prototype-return */ \
- TFJ(AsyncGeneratorPrototypeReturn, 1, kValue) \
+ TFJ(AsyncGeneratorPrototypeReturn, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* AsyncGenerator.prototype.throw ( exception ) */ \
/* proposal-async-iteration/#sec-asyncgenerator-prototype-throw */ \
- TFJ(AsyncGeneratorPrototypeThrow, 1, kValue) \
+ TFJ(AsyncGeneratorPrototypeThrow, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
\
/* Await (proposal-async-iteration/#await), with resume behaviour */ \
/* specific to Async Generators. Internal / Not exposed to JS code. */ \
- TFJ(AsyncGeneratorAwaitCaught, 2, kGenerator, kAwaited) \
- TFJ(AsyncGeneratorAwaitUncaught, 2, kGenerator, kAwaited) \
+ TFJ(AsyncGeneratorAwaitCaught, 1, kAwaited) \
+ TFJ(AsyncGeneratorAwaitUncaught, 1, kAwaited) \
TFJ(AsyncGeneratorAwaitResolveClosure, 1, kValue) \
TFJ(AsyncGeneratorAwaitRejectClosure, 1, kValue) \
\
@@ -1001,23 +1084,36 @@ namespace internal {
TFJ(AsyncIteratorValueUnwrap, 1, kValue)
#ifdef V8_INTL_SUPPORT
-#define BUILTIN_LIST(CPP, API, TFJ, TFC, TFS, TFH, ASM, DBG) \
- BUILTIN_LIST_BASE(CPP, API, TFJ, TFC, TFS, TFH, ASM, DBG) \
- \
- /* ES #sec-string.prototype.tolowercase */ \
- TFJ(StringPrototypeToLowerCaseIntl, 0) \
- /* ES #sec-string.prototype.touppercase */ \
- CPP(StringPrototypeToUpperCaseIntl) \
- /* ES #sec-string.prototype.normalize */ \
- CPP(StringPrototypeNormalizeIntl)
+#define BUILTIN_LIST(CPP, API, TFJ, TFC, TFS, TFH, ASM, DBG) \
+ BUILTIN_LIST_BASE(CPP, API, TFJ, TFC, TFS, TFH, ASM, DBG) \
+ \
+ TFS(StringToLowerCaseIntl, kString) \
+ /* ES #sec-string.prototype.tolowercase */ \
+ TFJ(StringPrototypeToLowerCaseIntl, 0) \
+ /* ES #sec-string.prototype.touppercase */ \
+ CPP(StringPrototypeToUpperCaseIntl) \
+ /* ES #sec-string.prototype.normalize */ \
+ CPP(StringPrototypeNormalizeIntl) \
+ /* ecma402 #sec-intl.numberformat.prototype.formattoparts */ \
+ CPP(NumberFormatPrototypeFormatToParts)
#else
#define BUILTIN_LIST(CPP, API, TFJ, TFC, TFS, TFH, ASM, DBG) \
BUILTIN_LIST_BASE(CPP, API, TFJ, TFC, TFS, TFH, ASM, DBG) \
\
/* no-op fallback version */ \
- CPP(StringPrototypeNormalize)
+ CPP(StringPrototypeNormalize) \
+ /* same as toLowercase; fallback version */ \
+ CPP(StringPrototypeToLocaleLowerCase) \
+ /* same as toUppercase; fallback version */ \
+ CPP(StringPrototypeToLocaleUpperCase) \
+ /* (obsolete) Unibrow version */ \
+ CPP(StringPrototypeToLowerCase) \
+ /* (obsolete) Unibrow version */ \
+ CPP(StringPrototypeToUpperCase)
#endif // V8_INTL_SUPPORT
+// The exception thrown in the following builtins are caught
+// internally and result in a promise rejection.
#define BUILTIN_PROMISE_REJECTION_PREDICTION_LIST(V) \
V(AsyncFromSyncIteratorPrototypeNext) \
V(AsyncFromSyncIteratorPrototypeReturn) \
@@ -1028,14 +1124,18 @@ namespace internal {
V(AsyncGeneratorAwaitCaught) \
V(AsyncGeneratorAwaitUncaught) \
V(PerformNativePromiseThen) \
+ V(PromiseAll) \
V(PromiseConstructor) \
V(PromiseHandle) \
+ V(PromiseRace) \
V(PromiseResolve) \
V(PromiseResolveClosure) \
V(RejectNativePromise) \
V(ResolveNativePromise) \
V(ResolvePromise)
+// The exception thrown in the following builtins are caught internally and will
+// not be propagated further or re-thrown
#define BUILTIN_EXCEPTION_CAUGHT_PREDICTION_LIST(V) V(PromiseHandleReject)
#define IGNORE_BUILTIN(...)
@@ -1058,6 +1158,14 @@ namespace internal {
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
V, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
+#define BUILTIN_LIST_TFJ(V) \
+ BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, V, IGNORE_BUILTIN, \
+ IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
+
+#define BUILTIN_LIST_TFC(V) \
+ BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, V, \
+ IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
+
#define BUILTINS_WITH_UNTAGGED_PARAMS(V) V(WasmCompileLazy)
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-error.cc b/deps/v8/src/builtins/builtins-error.cc
index 5b28863364..6d33d88f3f 100644
--- a/deps/v8/src/builtins/builtins-error.cc
+++ b/deps/v8/src/builtins/builtins-error.cc
@@ -5,6 +5,7 @@
#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
+#include "src/accessors.h"
#include "src/counters.h"
#include "src/messages.h"
#include "src/objects-inl.h"
@@ -40,10 +41,12 @@ BUILTIN(ErrorConstructor) {
BUILTIN(ErrorCaptureStackTrace) {
HandleScope scope(isolate);
Handle<Object> object_obj = args.atOrUndefined(isolate, 1);
+
if (!object_obj->IsJSObject()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kInvalidArgument, object_obj));
}
+
Handle<JSObject> object = Handle<JSObject>::cast(object_obj);
Handle<Object> caller = args.atOrUndefined(isolate, 2);
FrameSkipMode mode = caller->IsJSFunction() ? SKIP_UNTIL_SEEN : SKIP_FIRST;
@@ -52,27 +55,24 @@ BUILTIN(ErrorCaptureStackTrace) {
RETURN_FAILURE_ON_EXCEPTION(isolate,
isolate->CaptureAndSetDetailedStackTrace(object));
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate, isolate->CaptureAndSetSimpleStackTrace(object, mode, caller));
+
+ // Add the stack accessors.
+
+ Handle<AccessorInfo> error_stack =
+ Accessors::ErrorStackInfo(isolate, DONT_ENUM);
- // Eagerly format the stack trace and set the stack property.
-
- Handle<Object> stack_trace =
- isolate->CaptureSimpleStackTrace(object, mode, caller);
- if (!stack_trace->IsJSArray()) return isolate->heap()->undefined_value();
-
- Handle<Object> formatted_stack_trace;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, formatted_stack_trace,
- ErrorUtils::FormatStackTrace(isolate, object, stack_trace));
-
- PropertyDescriptor desc;
- desc.set_configurable(true);
- desc.set_writable(true);
- desc.set_value(formatted_stack_trace);
- Maybe<bool> status = JSReceiver::DefineOwnProperty(
- isolate, object, isolate->factory()->stack_string(), &desc,
- Object::THROW_ON_ERROR);
- if (!status.IsJust()) return isolate->heap()->exception();
- CHECK(status.FromJust());
+ // Explicitly check for frozen objects. Other access checks are performed by
+ // the LookupIterator in SetAccessor below.
+ if (!JSObject::IsExtensible(object)) {
+ return isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kDefineDisallowed,
+ handle(error_stack->name(), isolate)));
+ }
+
+ RETURN_FAILURE_ON_EXCEPTION(isolate,
+ JSObject::SetAccessor(object, error_stack));
return isolate->heap()->undefined_value();
}
@@ -96,8 +96,8 @@ Object* MakeGenericError(Isolate* isolate, BuiltinArguments args,
RETURN_RESULT_OR_FAILURE(
isolate, ErrorUtils::MakeGenericError(isolate, constructor,
- Smi::cast(*template_index)->value(),
- arg0, arg1, arg2, SKIP_NONE));
+ Smi::ToInt(*template_index), arg0,
+ arg1, arg2, SKIP_NONE));
}
} // namespace
diff --git a/deps/v8/src/builtins/builtins-forin-gen.cc b/deps/v8/src/builtins/builtins-forin-gen.cc
index 476d3766dc..3547bda52d 100644
--- a/deps/v8/src/builtins/builtins-forin-gen.cc
+++ b/deps/v8/src/builtins/builtins-forin-gen.cc
@@ -111,7 +111,7 @@ void ForInBuiltinsAssembler::CheckPrototypeEnumCache(Node* receiver, Node* map,
// For all objects but the receiver, check that the cache is empty.
current_map.Bind(LoadMap(current_js_object.value()));
Node* enum_length = EnumLength(current_map.value());
- Node* zero_constant = SmiConstant(Smi::kZero);
+ Node* zero_constant = SmiConstant(0);
Branch(WordEqual(enum_length, zero_constant), &loop, use_runtime);
}
}
@@ -127,8 +127,7 @@ void ForInBuiltinsAssembler::CheckEnumCache(Node* receiver, Label* use_cache,
// Check if the enum length field is properly initialized, indicating that
// there is an enum cache.
{
- Node* invalid_enum_cache_sentinel =
- SmiConstant(Smi::FromInt(kInvalidEnumCacheSentinel));
+ Node* invalid_enum_cache_sentinel = SmiConstant(kInvalidEnumCacheSentinel);
Node* enum_length = EnumLength(map);
Branch(WordEqual(enum_length, invalid_enum_cache_sentinel),
&check_dict_receiver, &check_empty_prototype);
diff --git a/deps/v8/src/builtins/builtins-function-gen.cc b/deps/v8/src/builtins/builtins-function-gen.cc
index 6144c8828d..529e752f27 100644
--- a/deps/v8/src/builtins/builtins-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-function-gen.cc
@@ -78,9 +78,7 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
Node* native_context = LoadNativeContext(context);
Label map_done(this, vars);
- Node* bit_field = LoadMapBitField(receiver_map);
- int mask = static_cast<int>(1 << Map::kIsConstructor);
- GotoIf(IsSetWord32(bit_field, mask), &with_constructor);
+ GotoIf(IsConstructorMap(receiver_map), &with_constructor);
bound_function_map.Bind(LoadContextElement(
native_context, Context::BOUND_FUNCTION_WITHOUT_CONSTRUCTOR_MAP_INDEX));
@@ -106,7 +104,9 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
Label arguments_done(this, &argument_array);
GotoIf(Uint32LessThanOrEqual(argc, Int32Constant(1)), &empty_arguments);
Node* elements_length = ChangeUint32ToWord(Int32Sub(argc, Int32Constant(1)));
- Node* elements = AllocateFixedArray(FAST_ELEMENTS, elements_length);
+ Node* elements =
+ AllocateFixedArray(PACKED_ELEMENTS, elements_length, INTPTR_PARAMETERS,
+ kAllowLargeObjectAllocation);
VARIABLE(index, MachineType::PointerRepresentation());
index.Bind(IntPtrConstant(0));
VariableList foreach_vars({&index}, zone());
@@ -153,8 +153,8 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
JSBoundFunction::kBoundArgumentsOffset,
argument_array.value());
Node* empty_fixed_array = EmptyFixedArrayConstant();
- StoreObjectFieldNoWriteBarrier(bound_function, JSObject::kPropertiesOffset,
- empty_fixed_array);
+ StoreObjectFieldNoWriteBarrier(
+ bound_function, JSObject::kPropertiesOrHashOffset, empty_fixed_array);
StoreObjectFieldNoWriteBarrier(bound_function, JSObject::kElementsOffset,
empty_fixed_array);
diff --git a/deps/v8/src/builtins/builtins-function.cc b/deps/v8/src/builtins/builtins-function.cc
index 7db1899b64..4f5a82cf97 100644
--- a/deps/v8/src/builtins/builtins-function.cc
+++ b/deps/v8/src/builtins/builtins-function.cc
@@ -134,8 +134,7 @@ MaybeHandle<Object> CreateDynamicFunction(Isolate* isolate,
JSFunction::GetDerivedMap(isolate, target, new_target), Object);
Handle<SharedFunctionInfo> shared_info(function->shared(), isolate);
- Handle<Map> map = Map::AsLanguageMode(
- initial_map, shared_info->language_mode(), shared_info->kind());
+ Handle<Map> map = Map::AsLanguageMode(initial_map, shared_info);
Handle<Context> context(function->context(), isolate);
function = isolate->factory()->NewFunctionFromSharedFunctionInfo(
diff --git a/deps/v8/src/builtins/builtins-generator-gen.cc b/deps/v8/src/builtins/builtins-generator-gen.cc
index b011f1e5cd..2dbf34fcff 100644
--- a/deps/v8/src/builtins/builtins-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-generator-gen.cc
@@ -18,16 +18,15 @@ class GeneratorBuiltinsAssembler : public CodeStubAssembler {
: CodeStubAssembler(state) {}
protected:
- void GeneratorPrototypeResume(Node* receiver, Node* value, Node* context,
+ void GeneratorPrototypeResume(CodeStubArguments* args, Node* receiver,
+ Node* value, Node* context,
JSGeneratorObject::ResumeMode resume_mode,
char const* const method_name);
};
void GeneratorBuiltinsAssembler::GeneratorPrototypeResume(
- Node* receiver, Node* value, Node* context,
+ CodeStubArguments* args, Node* receiver, Node* value, Node* context,
JSGeneratorObject::ResumeMode resume_mode, char const* const method_name) {
- Node* closed = SmiConstant(JSGeneratorObject::kGeneratorClosed);
-
// Check if the {receiver} is actually a JSGeneratorObject.
Label if_receiverisincompatible(this, Label::kDeferred);
GotoIf(TaggedIsSmi(receiver), &if_receiverisincompatible);
@@ -41,49 +40,70 @@ void GeneratorBuiltinsAssembler::GeneratorPrototypeResume(
LoadObjectField(receiver, JSGeneratorObject::kContinuationOffset);
Label if_receiverisclosed(this, Label::kDeferred),
if_receiverisrunning(this, Label::kDeferred);
+ Node* closed = SmiConstant(JSGeneratorObject::kGeneratorClosed);
GotoIf(SmiEqual(receiver_continuation, closed), &if_receiverisclosed);
DCHECK_LT(JSGeneratorObject::kGeneratorExecuting,
JSGeneratorObject::kGeneratorClosed);
GotoIf(SmiLessThan(receiver_continuation, closed), &if_receiverisrunning);
// Resume the {receiver} using our trampoline.
- Node* result =
- CallStub(CodeFactory::ResumeGenerator(isolate()), context, value,
- receiver, SmiConstant(resume_mode),
- SmiConstant(static_cast<int>(SuspendFlags::kGeneratorYield)));
- Return(result);
+ VARIABLE(var_exception, MachineRepresentation::kTagged, UndefinedConstant());
+ Label if_exception(this, Label::kDeferred), if_final_return(this);
+ Node* result = CallStub(CodeFactory::ResumeGenerator(isolate()), context,
+ value, receiver, SmiConstant(resume_mode));
+ // Make sure we close the generator if there was an exception.
+ GotoIfException(result, &if_exception, &var_exception);
+
+ // If the generator is not suspended (i.e., its state is 'executing'),
+ // close it and wrap the return value in IteratorResult.
+ Node* result_continuation =
+ LoadObjectField(receiver, JSGeneratorObject::kContinuationOffset);
+
+ // The generator function should not close the generator by itself, let's
+ // check it is indeed not closed yet.
+ CSA_ASSERT(this, SmiNotEqual(result_continuation, closed));
+
+ Node* executing = SmiConstant(JSGeneratorObject::kGeneratorExecuting);
+ GotoIf(SmiEqual(result_continuation, executing), &if_final_return);
+
+ args->PopAndReturn(result);
+
+ BIND(&if_final_return);
+ {
+ // Close the generator.
+ StoreObjectFieldNoWriteBarrier(
+ receiver, JSGeneratorObject::kContinuationOffset, closed);
+ // Return the wrapped result.
+ args->PopAndReturn(CallBuiltin(Builtins::kCreateIterResultObject, context,
+ result, TrueConstant()));
+ }
BIND(&if_receiverisincompatible);
{
// The {receiver} is not a valid JSGeneratorObject.
CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
- HeapConstant(
- factory()->NewStringFromAsciiChecked(method_name, TENURED)),
- receiver);
+ StringConstant(method_name), receiver);
Unreachable();
}
BIND(&if_receiverisclosed);
{
- Callable create_iter_result_object =
- CodeFactory::CreateIterResultObject(isolate());
-
// The {receiver} is closed already.
Node* result = nullptr;
switch (resume_mode) {
case JSGeneratorObject::kNext:
- result = CallStub(create_iter_result_object, context,
- UndefinedConstant(), TrueConstant());
+ result = CallBuiltin(Builtins::kCreateIterResultObject, context,
+ UndefinedConstant(), TrueConstant());
break;
case JSGeneratorObject::kReturn:
- result =
- CallStub(create_iter_result_object, context, value, TrueConstant());
+ result = CallBuiltin(Builtins::kCreateIterResultObject, context, value,
+ TrueConstant());
break;
case JSGeneratorObject::kThrow:
result = CallRuntime(Runtime::kThrow, context, value);
break;
}
- Return(result);
+ args->PopAndReturn(result);
}
BIND(&if_receiverisrunning);
@@ -91,32 +111,63 @@ void GeneratorBuiltinsAssembler::GeneratorPrototypeResume(
CallRuntime(Runtime::kThrowGeneratorRunning, context);
Unreachable();
}
+
+ BIND(&if_exception);
+ {
+ StoreObjectFieldNoWriteBarrier(
+ receiver, JSGeneratorObject::kContinuationOffset, closed);
+ CallRuntime(Runtime::kReThrow, context, var_exception.value());
+ Unreachable();
+ }
}
// ES6 #sec-generator.prototype.next
TF_BUILTIN(GeneratorPrototypeNext, GeneratorBuiltinsAssembler) {
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* value = Parameter(Descriptor::kValue);
- Node* context = Parameter(Descriptor::kContext);
- GeneratorPrototypeResume(receiver, value, context, JSGeneratorObject::kNext,
+ const int kValueArg = 0;
+
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+
+ Node* receiver = args.GetReceiver();
+ Node* value = args.GetOptionalArgumentValue(kValueArg);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+
+ GeneratorPrototypeResume(&args, receiver, value, context,
+ JSGeneratorObject::kNext,
"[Generator].prototype.next");
}
// ES6 #sec-generator.prototype.return
TF_BUILTIN(GeneratorPrototypeReturn, GeneratorBuiltinsAssembler) {
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* value = Parameter(Descriptor::kValue);
- Node* context = Parameter(Descriptor::kContext);
- GeneratorPrototypeResume(receiver, value, context, JSGeneratorObject::kReturn,
+ const int kValueArg = 0;
+
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+
+ Node* receiver = args.GetReceiver();
+ Node* value = args.GetOptionalArgumentValue(kValueArg);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+
+ GeneratorPrototypeResume(&args, receiver, value, context,
+ JSGeneratorObject::kReturn,
"[Generator].prototype.return");
}
// ES6 #sec-generator.prototype.throw
TF_BUILTIN(GeneratorPrototypeThrow, GeneratorBuiltinsAssembler) {
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* exception = Parameter(Descriptor::kException);
- Node* context = Parameter(Descriptor::kContext);
- GeneratorPrototypeResume(receiver, exception, context,
+ const int kExceptionArg = 0;
+
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+
+ Node* receiver = args.GetReceiver();
+ Node* exception = args.GetOptionalArgumentValue(kExceptionArg);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+
+ GeneratorPrototypeResume(&args, receiver, exception, context,
JSGeneratorObject::kThrow,
"[Generator].prototype.throw");
}
diff --git a/deps/v8/src/builtins/builtins-global-gen.cc b/deps/v8/src/builtins/builtins-global-gen.cc
index fc0f580796..5708fe67fb 100644
--- a/deps/v8/src/builtins/builtins-global-gen.cc
+++ b/deps/v8/src/builtins/builtins-global-gen.cc
@@ -30,8 +30,7 @@ TF_BUILTIN(GlobalIsFinite, CodeStubAssembler) {
// Check if {num} is a HeapNumber.
Label if_numisheapnumber(this),
if_numisnotheapnumber(this, Label::kDeferred);
- Branch(IsHeapNumberMap(LoadMap(num)), &if_numisheapnumber,
- &if_numisnotheapnumber);
+ Branch(IsHeapNumber(num), &if_numisheapnumber, &if_numisnotheapnumber);
BIND(&if_numisheapnumber);
{
@@ -44,17 +43,16 @@ TF_BUILTIN(GlobalIsFinite, CodeStubAssembler) {
BIND(&if_numisnotheapnumber);
{
// Need to convert {num} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_num.Bind(CallStub(callable, context, num));
+ var_num.Bind(CallBuiltin(Builtins::kNonNumberToNumber, context, num));
Goto(&loop);
}
}
BIND(&return_true);
- Return(BooleanConstant(true));
+ Return(TrueConstant());
BIND(&return_false);
- Return(BooleanConstant(false));
+ Return(FalseConstant());
}
// ES6 #sec-isnan-number
@@ -78,8 +76,7 @@ TF_BUILTIN(GlobalIsNaN, CodeStubAssembler) {
// Check if {num} is a HeapNumber.
Label if_numisheapnumber(this),
if_numisnotheapnumber(this, Label::kDeferred);
- Branch(IsHeapNumberMap(LoadMap(num)), &if_numisheapnumber,
- &if_numisnotheapnumber);
+ Branch(IsHeapNumber(num), &if_numisheapnumber, &if_numisnotheapnumber);
BIND(&if_numisheapnumber);
{
@@ -91,17 +88,16 @@ TF_BUILTIN(GlobalIsNaN, CodeStubAssembler) {
BIND(&if_numisnotheapnumber);
{
// Need to convert {num} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_num.Bind(CallStub(callable, context, num));
+ var_num.Bind(CallBuiltin(Builtins::kNonNumberToNumber, context, num));
Goto(&loop);
}
}
BIND(&return_true);
- Return(BooleanConstant(true));
+ Return(TrueConstant());
BIND(&return_false);
- Return(BooleanConstant(false));
+ Return(FalseConstant());
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index abd961998c..1426d987fc 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -35,12 +35,12 @@ TF_BUILTIN(CopyFastSmiOrObjectElements, CodeStubAssembler) {
Node* length = TaggedToParameter(LoadFixedArrayBaseLength(source), mode);
// Check if we can allocate in new space.
- ElementsKind kind = FAST_ELEMENTS;
+ ElementsKind kind = PACKED_ELEMENTS;
int max_elements = FixedArrayBase::GetMaxLengthForNewSpaceAllocation(kind);
- Label if_newspace(this), if_oldspace(this);
+ Label if_newspace(this), if_lospace(this, Label::kDeferred);
Branch(UintPtrOrSmiLessThan(length, IntPtrOrSmiConstant(max_elements, mode),
mode),
- &if_newspace, &if_oldspace);
+ &if_newspace, &if_lospace);
BIND(&if_newspace);
{
@@ -51,9 +51,10 @@ TF_BUILTIN(CopyFastSmiOrObjectElements, CodeStubAssembler) {
Return(target);
}
- BIND(&if_oldspace);
+ BIND(&if_lospace);
{
- Node* target = AllocateFixedArray(kind, length, mode, kPretenured);
+ Node* target =
+ AllocateFixedArray(kind, length, mode, kAllowLargeObjectAllocation);
CopyFixedArrayElements(kind, source, target, length, UPDATE_WRITE_BARRIER,
mode);
StoreObjectField(object, JSObject::kElementsOffset, target);
@@ -68,7 +69,7 @@ TF_BUILTIN(GrowFastDoubleElements, CodeStubAssembler) {
Label runtime(this, Label::kDeferred);
Node* elements = LoadElements(object);
- elements = TryGrowElementsCapacity(object, elements, FAST_DOUBLE_ELEMENTS,
+ elements = TryGrowElementsCapacity(object, elements, PACKED_DOUBLE_ELEMENTS,
key, &runtime);
Return(elements);
@@ -84,7 +85,7 @@ TF_BUILTIN(GrowFastSmiOrObjectElements, CodeStubAssembler) {
Label runtime(this, Label::kDeferred);
Node* elements = LoadElements(object);
elements =
- TryGrowElementsCapacity(object, elements, FAST_ELEMENTS, key, &runtime);
+ TryGrowElementsCapacity(object, elements, PACKED_ELEMENTS, key, &runtime);
Return(elements);
BIND(&runtime);
@@ -96,7 +97,7 @@ TF_BUILTIN(NewUnmappedArgumentsElements, CodeStubAssembler) {
Node* length = SmiToWord(Parameter(Descriptor::kLength));
// Check if we can allocate in new space.
- ElementsKind kind = FAST_ELEMENTS;
+ ElementsKind kind = PACKED_ELEMENTS;
int max_elements = FixedArray::GetMaxLengthForNewSpaceAllocation(kind);
Label if_newspace(this), if_oldspace(this, Label::kDeferred);
Branch(IntPtrLessThan(length, IntPtrConstant(max_elements)), &if_newspace,
@@ -189,7 +190,7 @@ class DeletePropertyBaseAssembler : public CodeStubAssembler {
StoreValueByKeyIndex<NameDictionary>(properties, key_index, filler,
SKIP_WRITE_BARRIER);
StoreDetailsByKeyIndex<NameDictionary>(properties, key_index,
- SmiConstant(Smi::kZero));
+ SmiConstant(0));
// Update bookkeeping information (see NameDictionary::ElementRemoved).
Node* nof = GetNumberOfElements<NameDictionary>(properties);
@@ -204,7 +205,7 @@ class DeletePropertyBaseAssembler : public CodeStubAssembler {
Node* capacity = GetCapacity<NameDictionary>(properties);
GotoIf(SmiGreaterThan(new_nof, SmiShr(capacity, 2)), &shrinking_done);
GotoIf(SmiLessThan(new_nof, SmiConstant(16)), &shrinking_done);
- CallRuntime(Runtime::kShrinkPropertyDictionary, context, receiver, name);
+ CallRuntime(Runtime::kShrinkPropertyDictionary, context, receiver);
Goto(&shrinking_done);
BIND(&shrinking_done);
diff --git a/deps/v8/src/builtins/builtins-internal.cc b/deps/v8/src/builtins/builtins-internal.cc
index 22d20031ea..810d6e930d 100644
--- a/deps/v8/src/builtins/builtins-internal.cc
+++ b/deps/v8/src/builtins/builtins-internal.cc
@@ -13,7 +13,6 @@ namespace internal {
BUILTIN(Illegal) {
UNREACHABLE();
- return isolate->heap()->undefined_value(); // Make compiler happy.
}
BUILTIN(EmptyFunction) { return isolate->heap()->undefined_value(); }
@@ -24,17 +23,7 @@ BUILTIN(UnsupportedThrower) {
NewError(MessageTemplate::kUnsupported));
}
-// -----------------------------------------------------------------------------
-// Throwers for restricted function properties and strict arguments object
-// properties
-
-BUILTIN(RestrictedFunctionPropertiesThrower) {
- HandleScope scope(isolate);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kRestrictedFunctionProperties));
-}
-
-BUILTIN(RestrictedStrictArgumentsPropertiesThrower) {
+BUILTIN(StrictPoisonPillThrower) {
HandleScope scope(isolate);
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kStrictPoisonPill));
diff --git a/deps/v8/src/builtins/builtins-interpreter-gen.cc b/deps/v8/src/builtins/builtins-interpreter-gen.cc
index d11aa64af0..a8552338c8 100644
--- a/deps/v8/src/builtins/builtins-interpreter-gen.cc
+++ b/deps/v8/src/builtins/builtins-interpreter-gen.cc
@@ -11,51 +11,36 @@ namespace internal {
void Builtins::Generate_InterpreterPushArgsThenCall(MacroAssembler* masm) {
return Generate_InterpreterPushArgsThenCallImpl(
- masm, ConvertReceiverMode::kAny, TailCallMode::kDisallow,
- InterpreterPushArgsMode::kOther);
+ masm, ConvertReceiverMode::kAny, InterpreterPushArgsMode::kOther);
}
void Builtins::Generate_InterpreterPushArgsThenCallFunction(
MacroAssembler* masm) {
return Generate_InterpreterPushArgsThenCallImpl(
- masm, ConvertReceiverMode::kAny, TailCallMode::kDisallow,
- InterpreterPushArgsMode::kJSFunction);
+ masm, ConvertReceiverMode::kAny, InterpreterPushArgsMode::kJSFunction);
}
void Builtins::Generate_InterpreterPushUndefinedAndArgsThenCall(
MacroAssembler* masm) {
return Generate_InterpreterPushArgsThenCallImpl(
- masm, ConvertReceiverMode::kNullOrUndefined, TailCallMode::kDisallow,
+ masm, ConvertReceiverMode::kNullOrUndefined,
InterpreterPushArgsMode::kOther);
}
void Builtins::Generate_InterpreterPushUndefinedAndArgsThenCallFunction(
MacroAssembler* masm) {
return Generate_InterpreterPushArgsThenCallImpl(
- masm, ConvertReceiverMode::kNullOrUndefined, TailCallMode::kDisallow,
+ masm, ConvertReceiverMode::kNullOrUndefined,
InterpreterPushArgsMode::kJSFunction);
}
void Builtins::Generate_InterpreterPushArgsThenCallWithFinalSpread(
MacroAssembler* masm) {
return Generate_InterpreterPushArgsThenCallImpl(
- masm, ConvertReceiverMode::kAny, TailCallMode::kDisallow,
+ masm, ConvertReceiverMode::kAny,
InterpreterPushArgsMode::kWithFinalSpread);
}
-void Builtins::Generate_InterpreterPushArgsThenTailCall(MacroAssembler* masm) {
- return Generate_InterpreterPushArgsThenCallImpl(
- masm, ConvertReceiverMode::kAny, TailCallMode::kAllow,
- InterpreterPushArgsMode::kOther);
-}
-
-void Builtins::Generate_InterpreterPushArgsThenTailCallFunction(
- MacroAssembler* masm) {
- return Generate_InterpreterPushArgsThenCallImpl(
- masm, ConvertReceiverMode::kAny, TailCallMode::kAllow,
- InterpreterPushArgsMode::kJSFunction);
-}
-
void Builtins::Generate_InterpreterPushArgsThenConstruct(MacroAssembler* masm) {
return Generate_InterpreterPushArgsThenConstructImpl(
masm, InterpreterPushArgsMode::kOther);
diff --git a/deps/v8/src/builtins/builtins-interpreter.cc b/deps/v8/src/builtins/builtins-interpreter.cc
index dd6ef0d0d0..0e50ce2c59 100644
--- a/deps/v8/src/builtins/builtins-interpreter.cc
+++ b/deps/v8/src/builtins/builtins-interpreter.cc
@@ -12,41 +12,28 @@ namespace v8 {
namespace internal {
Handle<Code> Builtins::InterpreterPushArgsThenCall(
- ConvertReceiverMode receiver_mode, TailCallMode tail_call_mode,
- InterpreterPushArgsMode mode) {
+ ConvertReceiverMode receiver_mode, InterpreterPushArgsMode mode) {
switch (mode) {
case InterpreterPushArgsMode::kJSFunction:
- if (tail_call_mode == TailCallMode::kDisallow) {
- switch (receiver_mode) {
- case ConvertReceiverMode::kNullOrUndefined:
- return InterpreterPushUndefinedAndArgsThenCallFunction();
- case ConvertReceiverMode::kNotNullOrUndefined:
- case ConvertReceiverMode::kAny:
- return InterpreterPushArgsThenCallFunction();
- }
- } else {
- CHECK_EQ(receiver_mode, ConvertReceiverMode::kAny);
- return InterpreterPushArgsThenTailCallFunction();
+ switch (receiver_mode) {
+ case ConvertReceiverMode::kNullOrUndefined:
+ return InterpreterPushUndefinedAndArgsThenCallFunction();
+ case ConvertReceiverMode::kNotNullOrUndefined:
+ case ConvertReceiverMode::kAny:
+ return InterpreterPushArgsThenCallFunction();
}
case InterpreterPushArgsMode::kWithFinalSpread:
- CHECK(tail_call_mode == TailCallMode::kDisallow);
return InterpreterPushArgsThenCallWithFinalSpread();
case InterpreterPushArgsMode::kOther:
- if (tail_call_mode == TailCallMode::kDisallow) {
- switch (receiver_mode) {
- case ConvertReceiverMode::kNullOrUndefined:
- return InterpreterPushUndefinedAndArgsThenCall();
- case ConvertReceiverMode::kNotNullOrUndefined:
- case ConvertReceiverMode::kAny:
- return InterpreterPushArgsThenCall();
- }
- } else {
- CHECK_EQ(receiver_mode, ConvertReceiverMode::kAny);
- return InterpreterPushArgsThenTailCall();
+ switch (receiver_mode) {
+ case ConvertReceiverMode::kNullOrUndefined:
+ return InterpreterPushUndefinedAndArgsThenCall();
+ case ConvertReceiverMode::kNotNullOrUndefined:
+ case ConvertReceiverMode::kAny:
+ return InterpreterPushArgsThenCall();
}
}
UNREACHABLE();
- return Handle<Code>::null();
}
Handle<Code> Builtins::InterpreterPushArgsThenConstruct(
@@ -60,7 +47,6 @@ Handle<Code> Builtins::InterpreterPushArgsThenConstruct(
return InterpreterPushArgsThenConstruct();
}
UNREACHABLE();
- return Handle<Code>::null();
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-intl-gen.cc b/deps/v8/src/builtins/builtins-intl-gen.cc
index 3782d43a9a..cb7de423d3 100644
--- a/deps/v8/src/builtins/builtins-intl-gen.cc
+++ b/deps/v8/src/builtins/builtins-intl-gen.cc
@@ -18,12 +18,11 @@ class IntlBuiltinsAssembler : public CodeStubAssembler {
: CodeStubAssembler(state) {}
};
-TF_BUILTIN(StringPrototypeToLowerCaseIntl, IntlBuiltinsAssembler) {
- Node* const maybe_string = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
+ Node* const string = Parameter(Descriptor::kString);
Node* const context = Parameter(Descriptor::kContext);
- Node* const string =
- ToThisString(context, maybe_string, "String.prototype.toLowerCase");
+ CSA_ASSERT(this, IsString(string));
Label call_c(this), return_string(this), runtime(this, Label::kDeferred);
@@ -64,21 +63,21 @@ TF_BUILTIN(StringPrototypeToLowerCaseIntl, IntlBuiltinsAssembler) {
VARIABLE(var_did_change, MachineRepresentation::kWord32, Int32Constant(0));
VariableList push_vars({&var_cursor, &var_did_change}, zone());
- BuildFastLoop(
- push_vars, start_address, end_address,
- [=, &var_cursor, &var_did_change](Node* current) {
- Node* c = Load(MachineType::Uint8(), current);
- Node* lower = Load(MachineType::Uint8(), to_lower_table_addr,
+ BuildFastLoop(push_vars, start_address, end_address,
+ [=, &var_cursor, &var_did_change](Node* current) {
+ Node* c = Load(MachineType::Uint8(), current);
+ Node* lower =
+ Load(MachineType::Uint8(), to_lower_table_addr,
ChangeInt32ToIntPtr(c));
- StoreNoWriteBarrier(MachineRepresentation::kWord8, dst_ptr,
- var_cursor.value(), lower);
+ StoreNoWriteBarrier(MachineRepresentation::kWord8, dst_ptr,
+ var_cursor.value(), lower);
- var_did_change.Bind(
- Word32Or(Word32NotEqual(c, lower), var_did_change.value()));
+ var_did_change.Bind(Word32Or(Word32NotEqual(c, lower),
+ var_did_change.value()));
- Increment(var_cursor);
- },
- kCharSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ Increment(var_cursor);
+ },
+ kCharSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
// Return the original string if it remained unchanged in order to preserve
// e.g. internalization and private symbols (such as the preserved object
@@ -114,11 +113,21 @@ TF_BUILTIN(StringPrototypeToLowerCaseIntl, IntlBuiltinsAssembler) {
BIND(&runtime);
{
- Node* const result =
- CallRuntime(Runtime::kStringToLowerCaseIntl, context, string);
+ Node* const result = CallRuntime(Runtime::kStringToLowerCaseIntl,
+ NoContextConstant(), string);
Return(result);
}
}
+TF_BUILTIN(StringPrototypeToLowerCaseIntl, IntlBuiltinsAssembler) {
+ Node* const maybe_string = Parameter(Descriptor::kReceiver);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ Node* const string =
+ ToThisString(context, maybe_string, "String.prototype.toLowerCase");
+
+ Return(CallBuiltin(Builtins::kStringToLowerCaseIntl, context, string));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-intl.cc b/deps/v8/src/builtins/builtins-intl.cc
index c14d73b3b6..b3ad156158 100644
--- a/deps/v8/src/builtins/builtins-intl.cc
+++ b/deps/v8/src/builtins/builtins-intl.cc
@@ -6,12 +6,21 @@
#error Internationalization is expected to be enabled.
#endif // V8_INTL_SUPPORT
+#include "src/builtins/builtins-intl.h"
#include "src/builtins/builtins-utils.h"
#include "src/builtins/builtins.h"
#include "src/intl.h"
#include "src/objects-inl.h"
+#include "src/objects/intl-objects.h"
+#include "unicode/decimfmt.h"
+#include "unicode/fieldpos.h"
+#include "unicode/fpositer.h"
#include "unicode/normalizer2.h"
+#include "unicode/numfmt.h"
+#include "unicode/ufieldpositer.h"
+#include "unicode/unistr.h"
+#include "unicode/ustring.h"
namespace v8 {
namespace internal {
@@ -97,5 +106,265 @@ BUILTIN(StringPrototypeNormalizeIntl) {
result.length())));
}
+namespace {
+
+// The list comes from third_party/icu/source/i18n/unicode/unum.h.
+// They're mapped to NumberFormat part types mentioned throughout
+// https://tc39.github.io/ecma402/#sec-partitionnumberpattern .
+Handle<String> IcuNumberFieldIdToNumberType(int32_t field_id, double number,
+ Isolate* isolate) {
+ switch (static_cast<UNumberFormatFields>(field_id)) {
+ case UNUM_INTEGER_FIELD:
+ if (std::isfinite(number)) return isolate->factory()->integer_string();
+ if (std::isnan(number)) return isolate->factory()->nan_string();
+ return isolate->factory()->infinity_string();
+ case UNUM_FRACTION_FIELD:
+ return isolate->factory()->fraction_string();
+ case UNUM_DECIMAL_SEPARATOR_FIELD:
+ return isolate->factory()->decimal_string();
+ case UNUM_GROUPING_SEPARATOR_FIELD:
+ return isolate->factory()->group_string();
+ case UNUM_CURRENCY_FIELD:
+ return isolate->factory()->currency_string();
+ case UNUM_PERCENT_FIELD:
+ return isolate->factory()->percentSign_string();
+ case UNUM_SIGN_FIELD:
+ return number < 0 ? isolate->factory()->minusSign_string()
+ : isolate->factory()->plusSign_string();
+
+ case UNUM_EXPONENT_SYMBOL_FIELD:
+ case UNUM_EXPONENT_SIGN_FIELD:
+ case UNUM_EXPONENT_FIELD:
+ // We should never get these because we're not using any scientific
+ // formatter.
+ UNREACHABLE();
+ return Handle<String>();
+
+ case UNUM_PERMILL_FIELD:
+ // We're not creating any permill formatter, and it's not even clear how
+ // that would be possible with the ICU API.
+ UNREACHABLE();
+ return Handle<String>();
+
+ default:
+ UNREACHABLE();
+ return Handle<String>();
+ }
+}
+
+bool AddElement(Handle<JSArray> array, int index,
+ Handle<String> field_type_string,
+ const icu::UnicodeString& formatted, int32_t begin, int32_t end,
+ Isolate* isolate) {
+ HandleScope scope(isolate);
+ Factory* factory = isolate->factory();
+ Handle<JSObject> element = factory->NewJSObject(isolate->object_function());
+ Handle<String> value;
+ JSObject::AddProperty(element, factory->type_string(), field_type_string,
+ NONE);
+
+ icu::UnicodeString field(formatted.tempSubStringBetween(begin, end));
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value,
+ factory->NewStringFromTwoByte(Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(field.getBuffer()),
+ field.length())),
+ false);
+
+ JSObject::AddProperty(element, factory->value_string(), value, NONE);
+ RETURN_ON_EXCEPTION_VALUE(
+ isolate, JSObject::AddDataElement(array, index, element, NONE), false);
+ return true;
+}
+
+bool cmp_NumberFormatSpan(const NumberFormatSpan& a,
+ const NumberFormatSpan& b) {
+ // Regions that start earlier should be encountered earlier.
+ if (a.begin_pos < b.begin_pos) return true;
+ if (a.begin_pos > b.begin_pos) return false;
+ // For regions that start in the same place, regions that last longer should
+ // be encountered earlier.
+ if (a.end_pos < b.end_pos) return false;
+ if (a.end_pos > b.end_pos) return true;
+ // For regions that are exactly the same, one of them must be the "literal"
+ // backdrop we added, which has a field_id of -1, so consider higher field_ids
+ // to be later.
+ return a.field_id < b.field_id;
+}
+
+Object* FormatNumberToParts(Isolate* isolate, icu::NumberFormat* fmt,
+ double number) {
+ Factory* factory = isolate->factory();
+
+ icu::UnicodeString formatted;
+ icu::FieldPositionIterator fp_iter;
+ UErrorCode status = U_ZERO_ERROR;
+ fmt->format(number, formatted, &fp_iter, status);
+ if (U_FAILURE(status)) return isolate->heap()->undefined_value();
+
+ Handle<JSArray> result = factory->NewJSArray(0);
+ int32_t length = formatted.length();
+ if (length == 0) return *result;
+
+ std::vector<NumberFormatSpan> regions;
+ // Add a "literal" backdrop for the entire string. This will be used if no
+ // other region covers some part of the formatted string. It's possible
+ // there's another field with exactly the same begin and end as this backdrop,
+ // in which case the backdrop's field_id of -1 will give it lower priority.
+ regions.push_back(NumberFormatSpan(-1, 0, formatted.length()));
+
+ {
+ icu::FieldPosition fp;
+ while (fp_iter.next(fp)) {
+ regions.push_back(NumberFormatSpan(fp.getField(), fp.getBeginIndex(),
+ fp.getEndIndex()));
+ }
+ }
+
+ std::vector<NumberFormatSpan> parts = FlattenRegionsToParts(&regions);
+
+ int index = 0;
+ for (auto it = parts.begin(); it < parts.end(); it++) {
+ NumberFormatSpan part = *it;
+ Handle<String> field_type_string =
+ part.field_id == -1
+ ? isolate->factory()->literal_string()
+ : IcuNumberFieldIdToNumberType(part.field_id, number, isolate);
+ if (!AddElement(result, index, field_type_string, formatted, part.begin_pos,
+ part.end_pos, isolate)) {
+ return isolate->heap()->undefined_value();
+ }
+ ++index;
+ }
+ JSObject::ValidateElements(*result);
+
+ return *result;
+}
+} // namespace
+
+// Flattens a list of possibly-overlapping "regions" to a list of
+// non-overlapping "parts". At least one of the input regions must span the
+// entire space of possible indexes. The regions parameter will sorted in-place
+// according to some criteria; this is done for performance to avoid copying the
+// input.
+std::vector<NumberFormatSpan> FlattenRegionsToParts(
+ std::vector<NumberFormatSpan>* regions) {
+ // The intention of this algorithm is that it's used to translate ICU "fields"
+ // to JavaScript "parts" of a formatted string. Each ICU field and JavaScript
+ // part has an integer field_id, which corresponds to something like "grouping
+ // separator", "fraction", or "percent sign", and has a begin and end
+ // position. Here's a diagram of:
+
+ // var nf = new Intl.NumberFormat(['de'], {style:'currency',currency:'EUR'});
+ // nf.formatToParts(123456.78);
+
+ // : 6
+ // input regions: 0000000211 7
+ // ('-' means -1): ------------
+ // formatted string: "123.456,78Ā ā‚¬"
+ // output parts: 0006000211-7
+
+ // To illustrate the requirements of this algorithm, here's a contrived and
+ // convoluted example of inputs and expected outputs:
+
+ // : 4
+ // : 22 33 3
+ // : 11111 22
+ // input regions: 0000000 111
+ // : ------------
+ // formatted string: "abcdefghijkl"
+ // output parts: 0221340--231
+ // (The characters in the formatted string are irrelevant to this function.)
+
+ // We arrange the overlapping input regions like a mountain range where
+ // smaller regions are "on top" of larger regions, and we output a birds-eye
+ // view of the mountains, so that smaller regions take priority over larger
+ // regions.
+ std::sort(regions->begin(), regions->end(), cmp_NumberFormatSpan);
+ std::vector<size_t> overlapping_region_index_stack;
+ // At least one item in regions must be a region spanning the entire string.
+ // Due to the sorting above, the first item in the vector will be one of them.
+ overlapping_region_index_stack.push_back(0);
+ NumberFormatSpan top_region = regions->at(0);
+ size_t region_iterator = 1;
+ int32_t entire_size = top_region.end_pos;
+
+ std::vector<NumberFormatSpan> out_parts;
+
+ // The "climber" is a cursor that advances from left to right climbing "up"
+ // and "down" the mountains. Whenever the climber moves to the right, that
+ // represents an item of output.
+ int32_t climber = 0;
+ while (climber < entire_size) {
+ int32_t next_region_begin_pos;
+ if (region_iterator < regions->size()) {
+ next_region_begin_pos = regions->at(region_iterator).begin_pos;
+ } else {
+ // finish off the rest of the input by proceeding to the end.
+ next_region_begin_pos = entire_size;
+ }
+
+ if (climber < next_region_begin_pos) {
+ while (top_region.end_pos < next_region_begin_pos) {
+ if (climber < top_region.end_pos) {
+ // step down
+ out_parts.push_back(NumberFormatSpan(top_region.field_id, climber,
+ top_region.end_pos));
+ climber = top_region.end_pos;
+ } else {
+ // drop down
+ }
+ overlapping_region_index_stack.pop_back();
+ top_region = regions->at(overlapping_region_index_stack.back());
+ }
+ if (climber < next_region_begin_pos) {
+ // cross a plateau/mesa/valley
+ out_parts.push_back(NumberFormatSpan(top_region.field_id, climber,
+ next_region_begin_pos));
+ climber = next_region_begin_pos;
+ }
+ }
+ if (region_iterator < regions->size()) {
+ overlapping_region_index_stack.push_back(region_iterator++);
+ top_region = regions->at(overlapping_region_index_stack.back());
+ }
+ }
+ return out_parts;
+}
+
+BUILTIN(NumberFormatPrototypeFormatToParts) {
+ const char* const method = "Intl.NumberFormat.prototype.formatToParts";
+ HandleScope handle_scope(isolate);
+ CHECK_RECEIVER(JSObject, number_format_holder, method);
+
+ Handle<Symbol> marker = isolate->factory()->intl_initialized_marker_symbol();
+ Handle<Object> tag =
+ JSReceiver::GetDataProperty(number_format_holder, marker);
+ Handle<String> expected_tag =
+ isolate->factory()->NewStringFromStaticChars("numberformat");
+ if (!(tag->IsString() && String::cast(*tag)->Equals(*expected_tag))) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
+ isolate->factory()->NewStringFromAsciiChecked(method),
+ number_format_holder));
+ }
+
+ Handle<Object> x;
+ if (args.length() >= 1) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x,
+ Object::ToNumber(args.at(1)));
+ } else {
+ x = isolate->factory()->nan_value();
+ }
+
+ icu::DecimalFormat* number_format =
+ NumberFormat::UnpackNumberFormat(isolate, number_format_holder);
+ CHECK_NOT_NULL(number_format);
+
+ Object* result = FormatNumberToParts(isolate, number_format, x->Number());
+ return result;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-intl.h b/deps/v8/src/builtins/builtins-intl.h
new file mode 100644
index 0000000000..8dda0c0898
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-intl.h
@@ -0,0 +1,30 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_BUILTINS_INTL_H_
+#define V8_BUILTINS_BUILTINS_INTL_H_
+
+#include <stdint.h>
+#include <vector>
+
+namespace v8 {
+namespace internal {
+
+struct NumberFormatSpan {
+ int32_t field_id;
+ int32_t begin_pos;
+ int32_t end_pos;
+
+ NumberFormatSpan() {}
+ NumberFormatSpan(int32_t field_id, int32_t begin_pos, int32_t end_pos)
+ : field_id(field_id), begin_pos(begin_pos), end_pos(end_pos) {}
+};
+
+std::vector<NumberFormatSpan> FlattenRegionsToParts(
+ std::vector<NumberFormatSpan>* regions);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BUILTINS_BUILTINS_H_
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc
new file mode 100644
index 0000000000..d60cfb7128
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-iterator-gen.cc
@@ -0,0 +1,184 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-iterator-gen.h"
+
+namespace v8 {
+namespace internal {
+
+using compiler::Node;
+
+Node* IteratorBuiltinsAssembler::GetIterator(Node* context, Node* object,
+ Label* if_exception,
+ Variable* exception) {
+ Node* method = GetProperty(context, object, factory()->iterator_symbol());
+ GotoIfException(method, if_exception, exception);
+
+ Callable callable = CodeFactory::Call(isolate());
+ Node* iterator = CallJS(callable, context, method, object);
+ GotoIfException(iterator, if_exception, exception);
+
+ Label done(this), if_notobject(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(iterator), &if_notobject);
+ Branch(IsJSReceiver(iterator), &done, &if_notobject);
+
+ BIND(&if_notobject);
+ {
+ Node* ret =
+ CallRuntime(Runtime::kThrowTypeError, context,
+ SmiConstant(MessageTemplate::kNotAnIterator), iterator);
+ GotoIfException(ret, if_exception, exception);
+ Unreachable();
+ }
+
+ BIND(&done);
+ return iterator;
+}
+
+Node* IteratorBuiltinsAssembler::IteratorStep(Node* context, Node* iterator,
+ Label* if_done,
+ Node* fast_iterator_result_map,
+ Label* if_exception,
+ Variable* exception) {
+ DCHECK_NOT_NULL(if_done);
+
+ // IteratorNext
+ Node* next_method = GetProperty(context, iterator, factory()->next_string());
+ GotoIfException(next_method, if_exception, exception);
+
+ // 1. a. Let result be ? Invoke(iterator, "next", Ā« Ā»).
+ Callable callable = CodeFactory::Call(isolate());
+ Node* result = CallJS(callable, context, next_method, iterator);
+ GotoIfException(result, if_exception, exception);
+
+ // 3. If Type(result) is not Object, throw a TypeError exception.
+ Label if_notobject(this, Label::kDeferred), return_result(this);
+ GotoIf(TaggedIsSmi(result), &if_notobject);
+ GotoIfNot(IsJSReceiver(result), &if_notobject);
+
+ VARIABLE(var_done, MachineRepresentation::kTagged);
+
+ if (fast_iterator_result_map != nullptr) {
+ // Fast iterator result case:
+ Label if_generic(this);
+
+ // 4. Return result.
+ Node* map = LoadMap(result);
+ GotoIfNot(WordEqual(map, fast_iterator_result_map), &if_generic);
+
+ // IteratorComplete
+ // 2. Return ToBoolean(? Get(iterResult, "done")).
+ Node* done = LoadObjectField(result, JSIteratorResult::kDoneOffset);
+ CSA_ASSERT(this, IsBoolean(done));
+ var_done.Bind(done);
+ Goto(&return_result);
+
+ BIND(&if_generic);
+ }
+
+ // Generic iterator result case:
+ {
+ // IteratorComplete
+ // 2. Return ToBoolean(? Get(iterResult, "done")).
+ Node* done = GetProperty(context, result, factory()->done_string());
+ GotoIfException(done, if_exception, exception);
+ var_done.Bind(done);
+
+ Label to_boolean(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(done), &to_boolean);
+ Branch(IsBoolean(done), &return_result, &to_boolean);
+
+ BIND(&to_boolean);
+ var_done.Bind(CallBuiltin(Builtins::kToBoolean, context, done));
+ Goto(&return_result);
+ }
+
+ BIND(&if_notobject);
+ {
+ Node* ret =
+ CallRuntime(Runtime::kThrowIteratorResultNotAnObject, context, result);
+ GotoIfException(ret, if_exception, exception);
+ Unreachable();
+ }
+
+ BIND(&return_result);
+ GotoIf(IsTrue(var_done.value()), if_done);
+ return result;
+}
+
+Node* IteratorBuiltinsAssembler::IteratorValue(Node* context, Node* result,
+ Node* fast_iterator_result_map,
+ Label* if_exception,
+ Variable* exception) {
+ CSA_ASSERT(this, IsJSReceiver(result));
+
+ Label exit(this);
+ VARIABLE(var_value, MachineRepresentation::kTagged);
+ if (fast_iterator_result_map != nullptr) {
+ // Fast iterator result case:
+ Label if_generic(this);
+ Node* map = LoadMap(result);
+ GotoIfNot(WordEqual(map, fast_iterator_result_map), &if_generic);
+ var_value.Bind(LoadObjectField(result, JSIteratorResult::kValueOffset));
+ Goto(&exit);
+
+ BIND(&if_generic);
+ }
+
+ // Generic iterator result case:
+ {
+ Node* value = GetProperty(context, result, factory()->value_string());
+ GotoIfException(value, if_exception, exception);
+ var_value.Bind(value);
+ Goto(&exit);
+ }
+
+ BIND(&exit);
+ return var_value.value();
+}
+
+void IteratorBuiltinsAssembler::IteratorCloseOnException(Node* context,
+ Node* iterator,
+ Label* if_exception,
+ Variable* exception) {
+ // Perform ES #sec-iteratorclose when an exception occurs. This simpler
+ // algorithm does not include redundant steps which are never reachable from
+ // the spec IteratorClose algorithm.
+ DCHECK_NOT_NULL(if_exception);
+ DCHECK_NOT_NULL(exception);
+ CSA_ASSERT(this, IsNotTheHole(exception->value()));
+ CSA_ASSERT(this, IsJSReceiver(iterator));
+
+ // Let return be ? GetMethod(iterator, "return").
+ Node* method = GetProperty(context, iterator, factory()->return_string());
+ GotoIfException(method, if_exception, exception);
+
+ // If return is undefined, return Completion(completion).
+ GotoIf(Word32Or(IsUndefined(method), IsNull(method)), if_exception);
+
+ {
+ // Let innerResult be Call(return, iterator, Ā« Ā»).
+ // If an exception occurs, the original exception remains bound
+ Node* inner_result =
+ CallJS(CodeFactory::Call(isolate()), context, method, iterator);
+ GotoIfException(inner_result, if_exception, nullptr);
+
+ // (If completion.[[Type]] is throw) return Completion(completion).
+ Goto(if_exception);
+ }
+}
+
+void IteratorBuiltinsAssembler::IteratorCloseOnException(Node* context,
+ Node* iterator,
+ Variable* exception) {
+ Label rethrow(this, Label::kDeferred);
+ IteratorCloseOnException(context, iterator, &rethrow, exception);
+
+ BIND(&rethrow);
+ CallRuntime(Runtime::kReThrow, context, exception->value());
+ Unreachable();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.h b/deps/v8/src/builtins/builtins-iterator-gen.h
new file mode 100644
index 0000000000..0ed6077024
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-iterator-gen.h
@@ -0,0 +1,49 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+using compiler::Node;
+
+class IteratorBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit IteratorBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ // https://tc39.github.io/ecma262/#sec-getiterator --- never used for
+ // @@asyncIterator.
+ Node* GetIterator(Node* context, Node* object, Label* if_exception = nullptr,
+ Variable* exception = nullptr);
+
+ // https://tc39.github.io/ecma262/#sec-iteratorstep
+ // Returns `false` if the iterator is done, otherwise returns an
+ // iterator result.
+ // `fast_iterator_result_map` refers to the map for the JSIteratorResult
+ // object, loaded from the native context.
+ Node* IteratorStep(Node* context, Node* iterator, Label* if_done,
+ Node* fast_iterator_result_map = nullptr,
+ Label* if_exception = nullptr,
+ Variable* exception = nullptr);
+
+ // https://tc39.github.io/ecma262/#sec-iteratorvalue
+ // Return the `value` field from an iterator.
+ // `fast_iterator_result_map` refers to the map for the JSIteratorResult
+ // object, loaded from the native context.
+ Node* IteratorValue(Node* context, Node* result,
+ Node* fast_iterator_result_map = nullptr,
+ Label* if_exception = nullptr,
+ Variable* exception = nullptr);
+
+ // https://tc39.github.io/ecma262/#sec-iteratorclose
+ void IteratorCloseOnException(Node* context, Node* iterator,
+ Label* if_exception, Variable* exception);
+ void IteratorCloseOnException(Node* context, Node* iterator,
+ Variable* exception);
+};
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-math-gen.cc b/deps/v8/src/builtins/builtins-math-gen.cc
index e5c8489301..b8d7b44e0b 100644
--- a/deps/v8/src/builtins/builtins-math-gen.cc
+++ b/deps/v8/src/builtins/builtins-math-gen.cc
@@ -59,8 +59,8 @@ TF_BUILTIN(MathAbs, CodeStubAssembler) {
} else {
// Check if {x} is already positive.
Label if_xispositive(this), if_xisnotpositive(this);
- BranchIfSmiLessThanOrEqual(SmiConstant(Smi::FromInt(0)), x,
- &if_xispositive, &if_xisnotpositive);
+ BranchIfSmiLessThanOrEqual(SmiConstant(0), x, &if_xispositive,
+ &if_xisnotpositive);
BIND(&if_xispositive);
{
@@ -93,8 +93,7 @@ TF_BUILTIN(MathAbs, CodeStubAssembler) {
{
// Check if {x} is a HeapNumber.
Label if_xisheapnumber(this), if_xisnotheapnumber(this, Label::kDeferred);
- Branch(IsHeapNumberMap(LoadMap(x)), &if_xisheapnumber,
- &if_xisnotheapnumber);
+ Branch(IsHeapNumber(x), &if_xisheapnumber, &if_xisnotheapnumber);
BIND(&if_xisheapnumber);
{
@@ -107,8 +106,7 @@ TF_BUILTIN(MathAbs, CodeStubAssembler) {
BIND(&if_xisnotheapnumber);
{
// Need to convert {x} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_x.Bind(CallStub(callable, context, x));
+ var_x.Bind(CallBuiltin(Builtins::kNonNumberToNumber, context, x));
Goto(&loop);
}
}
@@ -140,8 +138,7 @@ void MathBuiltinsAssembler::MathRoundingOperation(
{
// Check if {x} is a HeapNumber.
Label if_xisheapnumber(this), if_xisnotheapnumber(this, Label::kDeferred);
- Branch(IsHeapNumberMap(LoadMap(x)), &if_xisheapnumber,
- &if_xisnotheapnumber);
+ Branch(IsHeapNumber(x), &if_xisheapnumber, &if_xisnotheapnumber);
BIND(&if_xisheapnumber);
{
@@ -154,8 +151,7 @@ void MathBuiltinsAssembler::MathRoundingOperation(
BIND(&if_xisnotheapnumber);
{
// Need to convert {x} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_x.Bind(CallStub(callable, context, x));
+ var_x.Bind(CallBuiltin(Builtins::kNonNumberToNumber, context, x));
Goto(&loop);
}
}
@@ -289,8 +285,7 @@ TF_BUILTIN(MathClz32, CodeStubAssembler) {
{
// Check if {x} is a HeapNumber.
Label if_xisheapnumber(this), if_xisnotheapnumber(this, Label::kDeferred);
- Branch(IsHeapNumberMap(LoadMap(x)), &if_xisheapnumber,
- &if_xisnotheapnumber);
+ Branch(IsHeapNumber(x), &if_xisheapnumber, &if_xisnotheapnumber);
BIND(&if_xisheapnumber);
{
@@ -301,8 +296,7 @@ TF_BUILTIN(MathClz32, CodeStubAssembler) {
BIND(&if_xisnotheapnumber);
{
// Need to convert {x} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_x.Bind(CallStub(callable, context, x));
+ var_x.Bind(CallBuiltin(Builtins::kNonNumberToNumber, context, x));
Goto(&loop);
}
}
@@ -427,7 +421,7 @@ TF_BUILTIN(MathRandom, CodeStubAssembler) {
// Cached random numbers are exhausted if index is 0. Go to slow path.
Label if_cached(this);
- GotoIf(SmiAbove(smi_index.value(), SmiConstant(Smi::kZero)), &if_cached);
+ GotoIf(SmiAbove(smi_index.value(), SmiConstant(0)), &if_cached);
// Cache exhausted, populate the cache. Return value is the new index.
smi_index.Bind(CallRuntime(Runtime::kGenerateRandomNumbers, context));
@@ -435,7 +429,7 @@ TF_BUILTIN(MathRandom, CodeStubAssembler) {
// Compute next index by decrement.
BIND(&if_cached);
- Node* new_smi_index = SmiSub(smi_index.value(), SmiConstant(Smi::FromInt(1)));
+ Node* new_smi_index = SmiSub(smi_index.value(), SmiConstant(1));
StoreContextElement(native_context, Context::MATH_RANDOM_INDEX_INDEX,
new_smi_index);
@@ -468,10 +462,10 @@ TF_BUILTIN(MathSign, CodeStubAssembler) {
Return(ChangeFloat64ToTagged(x_value));
BIND(&if_xisnegative);
- Return(SmiConstant(Smi::FromInt(-1)));
+ Return(SmiConstant(-1));
BIND(&if_xispositive);
- Return(SmiConstant(Smi::FromInt(1)));
+ Return(SmiConstant(1));
}
// ES6 #sec-math.sin
diff --git a/deps/v8/src/builtins/builtins-number-gen.cc b/deps/v8/src/builtins/builtins-number-gen.cc
index 56f988a1ca..9a1484708f 100644
--- a/deps/v8/src/builtins/builtins-number-gen.cc
+++ b/deps/v8/src/builtins/builtins-number-gen.cc
@@ -53,6 +53,11 @@ class NumberBuiltinsAssembler : public CodeStubAssembler {
Return(RelationalComparison(mode, lhs, rhs, context));
}
+
+ template <typename Descriptor>
+ void BinaryOp(Label* smis, Variable* var_left, Variable* var_right,
+ Label* doubles, Variable* var_left_double,
+ Variable* var_right_double);
};
// ES6 #sec-number.isfinite
@@ -65,7 +70,7 @@ TF_BUILTIN(NumberIsFinite, CodeStubAssembler) {
GotoIf(TaggedIsSmi(number), &return_true);
// Check if {number} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(LoadMap(number)), &return_false);
+ GotoIfNot(IsHeapNumber(number), &return_false);
// Check if {number} contains a finite, non-NaN value.
Node* number_value = LoadHeapNumberValue(number);
@@ -89,7 +94,7 @@ TF_BUILTIN(NumberIsInteger, CodeStubAssembler) {
GotoIf(TaggedIsSmi(number), &return_true);
// Check if {number} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(LoadMap(number)), &return_false);
+ GotoIfNot(IsHeapNumber(number), &return_false);
// Load the actual value of {number}.
Node* number_value = LoadHeapNumberValue(number);
@@ -118,7 +123,7 @@ TF_BUILTIN(NumberIsNaN, CodeStubAssembler) {
GotoIf(TaggedIsSmi(number), &return_false);
// Check if {number} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(LoadMap(number)), &return_false);
+ GotoIfNot(IsHeapNumber(number), &return_false);
// Check if {number} contains a NaN value.
Node* number_value = LoadHeapNumberValue(number);
@@ -141,7 +146,7 @@ TF_BUILTIN(NumberIsSafeInteger, CodeStubAssembler) {
GotoIf(TaggedIsSmi(number), &return_true);
// Check if {number} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(LoadMap(number)), &return_false);
+ GotoIfNot(IsHeapNumber(number), &return_false);
// Load the actual value of {number}.
Node* number_value = LoadHeapNumberValue(number);
@@ -205,10 +210,9 @@ TF_BUILTIN(NumberParseFloat, CodeStubAssembler) {
// a cached array index.
Label if_inputcached(this), if_inputnotcached(this);
Node* input_hash = LoadNameHashField(input);
- Node* input_bit = Word32And(
- input_hash, Int32Constant(String::kContainsCachedArrayIndexMask));
- Branch(Word32Equal(input_bit, Int32Constant(0)), &if_inputcached,
- &if_inputnotcached);
+ Branch(IsClearWord32(input_hash,
+ Name::kDoesNotContainCachedArrayIndexMask),
+ &if_inputcached, &if_inputnotcached);
BIND(&if_inputcached);
{
@@ -252,8 +256,7 @@ TF_BUILTIN(NumberParseFloat, CodeStubAssembler) {
{
// Need to convert the {input} to String first.
// TODO(bmeurer): This could be more efficient if necessary.
- Callable callable = CodeFactory::ToString(isolate());
- var_input.Bind(CallStub(callable, context, input));
+ var_input.Bind(CallBuiltin(Builtins::kToString, context, input));
Goto(&loop);
}
}
@@ -270,8 +273,8 @@ TF_BUILTIN(NumberParseInt, CodeStubAssembler) {
// Check if {radix} is treated as 10 (i.e. undefined, 0 or 10).
Label if_radix10(this), if_generic(this, Label::kDeferred);
GotoIf(WordEqual(radix, UndefinedConstant()), &if_radix10);
- GotoIf(WordEqual(radix, SmiConstant(Smi::FromInt(10))), &if_radix10);
- GotoIf(WordEqual(radix, SmiConstant(Smi::FromInt(0))), &if_radix10);
+ GotoIf(WordEqual(radix, SmiConstant(10)), &if_radix10);
+ GotoIf(WordEqual(radix, SmiConstant(0)), &if_radix10);
Goto(&if_generic);
BIND(&if_radix10);
@@ -319,9 +322,8 @@ TF_BUILTIN(NumberParseInt, CodeStubAssembler) {
{
// Check if the String {input} has a cached array index.
Node* input_hash = LoadNameHashField(input);
- Node* input_bit = Word32And(
- input_hash, Int32Constant(String::kContainsCachedArrayIndexMask));
- GotoIf(Word32NotEqual(input_bit, Int32Constant(0)), &if_generic);
+ GotoIf(IsSetWord32(input_hash, Name::kDoesNotContainCachedArrayIndexMask),
+ &if_generic);
// Return the cached array index as result.
Node* input_index =
@@ -348,985 +350,428 @@ TF_BUILTIN(NumberPrototypeValueOf, CodeStubAssembler) {
Return(result);
}
-TF_BUILTIN(Add, CodeStubAssembler) {
+class AddStubAssembler : public CodeStubAssembler {
+ public:
+ explicit AddStubAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ protected:
+ void ConvertReceiverAndLoop(Variable* var_value, Label* loop, Node* context) {
+ // Call ToPrimitive explicitly without hint (whereas ToNumber
+ // would pass a "number" hint).
+ Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate());
+ var_value->Bind(CallStub(callable, context, var_value->value()));
+ Goto(loop);
+ }
+
+ void ConvertNonReceiverAndLoop(Variable* var_value, Label* loop,
+ Node* context) {
+ var_value->Bind(
+ CallBuiltin(Builtins::kNonNumberToNumber, context, var_value->value()));
+ Goto(loop);
+ }
+
+ void ConvertAndLoop(Variable* var_value, Node* instance_type, Label* loop,
+ Node* context) {
+ Label is_not_receiver(this, Label::kDeferred);
+ GotoIfNot(IsJSReceiverInstanceType(instance_type), &is_not_receiver);
+
+ ConvertReceiverAndLoop(var_value, loop, context);
+
+ BIND(&is_not_receiver);
+ ConvertNonReceiverAndLoop(var_value, loop, context);
+ }
+};
+
+TF_BUILTIN(Add, AddStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
- Node* left = Parameter(Descriptor::kLeft);
- Node* right = Parameter(Descriptor::kRight);
+ VARIABLE(var_left, MachineRepresentation::kTagged,
+ Parameter(Descriptor::kLeft));
+ VARIABLE(var_right, MachineRepresentation::kTagged,
+ Parameter(Descriptor::kRight));
// Shared entry for floating point addition.
- Label do_fadd(this);
- VARIABLE(var_fadd_lhs, MachineRepresentation::kFloat64);
- VARIABLE(var_fadd_rhs, MachineRepresentation::kFloat64);
+ Label do_double_add(this);
+ VARIABLE(var_left_double, MachineRepresentation::kFloat64);
+ VARIABLE(var_right_double, MachineRepresentation::kFloat64);
// We might need to loop several times due to ToPrimitive, ToString and/or
// ToNumber conversions.
- VARIABLE(var_lhs, MachineRepresentation::kTagged);
- VARIABLE(var_rhs, MachineRepresentation::kTagged);
VARIABLE(var_result, MachineRepresentation::kTagged);
- Variable* loop_vars[2] = {&var_lhs, &var_rhs};
- Label loop(this, 2, loop_vars), end(this),
+ Variable* loop_vars[2] = {&var_left, &var_right};
+ Label loop(this, 2, loop_vars),
string_add_convert_left(this, Label::kDeferred),
string_add_convert_right(this, Label::kDeferred);
- var_lhs.Bind(left);
- var_rhs.Bind(right);
Goto(&loop);
BIND(&loop);
{
- // Load the current {lhs} and {rhs} values.
- Node* lhs = var_lhs.value();
- Node* rhs = var_rhs.value();
+ Node* left = var_left.value();
+ Node* right = var_right.value();
- // Check if the {lhs} is a Smi or a HeapObject.
- Label if_lhsissmi(this), if_lhsisnotsmi(this);
- Branch(TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+ Label if_left_smi(this), if_left_heapobject(this);
+ Branch(TaggedIsSmi(left), &if_left_smi, &if_left_heapobject);
- BIND(&if_lhsissmi);
+ BIND(&if_left_smi);
{
- // Check if the {rhs} is also a Smi.
- Label if_rhsissmi(this), if_rhsisnotsmi(this);
- Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+ Label if_right_smi(this), if_right_heapobject(this);
+ Branch(TaggedIsSmi(right), &if_right_smi, &if_right_heapobject);
- BIND(&if_rhsissmi);
+ BIND(&if_right_smi);
{
- // Try fast Smi addition first.
- Node* pair = IntPtrAddWithOverflow(BitcastTaggedToWord(lhs),
- BitcastTaggedToWord(rhs));
+ // Try fast Smi addition first, bail out if it overflows.
+ Node* pair = IntPtrAddWithOverflow(BitcastTaggedToWord(left),
+ BitcastTaggedToWord(right));
Node* overflow = Projection(1, pair);
-
- // Check if the Smi additon overflowed.
- Label if_overflow(this), if_notoverflow(this);
- Branch(overflow, &if_overflow, &if_notoverflow);
+ Label if_overflow(this);
+ GotoIf(overflow, &if_overflow);
+ Return(BitcastWordToTaggedSigned(Projection(0, pair)));
BIND(&if_overflow);
{
- var_fadd_lhs.Bind(SmiToFloat64(lhs));
- var_fadd_rhs.Bind(SmiToFloat64(rhs));
- Goto(&do_fadd);
+ var_left_double.Bind(SmiToFloat64(left));
+ var_right_double.Bind(SmiToFloat64(right));
+ Goto(&do_double_add);
}
+ } // if_right_smi
- BIND(&if_notoverflow);
- var_result.Bind(BitcastWordToTaggedSigned(Projection(0, pair)));
- Goto(&end);
- }
-
- BIND(&if_rhsisnotsmi);
+ BIND(&if_right_heapobject);
{
- // Load the map of {rhs}.
- Node* rhs_map = LoadMap(rhs);
+ Node* right_map = LoadMap(right);
- // Check if the {rhs} is a HeapNumber.
- Label if_rhsisnumber(this), if_rhsisnotnumber(this, Label::kDeferred);
- Branch(IsHeapNumberMap(rhs_map), &if_rhsisnumber, &if_rhsisnotnumber);
+ Label if_right_not_number(this, Label::kDeferred);
+ GotoIfNot(IsHeapNumberMap(right_map), &if_right_not_number);
- BIND(&if_rhsisnumber);
- {
- var_fadd_lhs.Bind(SmiToFloat64(lhs));
- var_fadd_rhs.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fadd);
- }
+ // {right} is a HeapNumber.
+ var_left_double.Bind(SmiToFloat64(left));
+ var_right_double.Bind(LoadHeapNumberValue(right));
+ Goto(&do_double_add);
- BIND(&if_rhsisnotnumber);
+ BIND(&if_right_not_number);
{
- // Load the instance type of {rhs}.
- Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
-
- // Check if the {rhs} is a String.
- Label if_rhsisstring(this, Label::kDeferred),
- if_rhsisnotstring(this, Label::kDeferred);
- Branch(IsStringInstanceType(rhs_instance_type), &if_rhsisstring,
- &if_rhsisnotstring);
-
- BIND(&if_rhsisstring);
- {
- var_lhs.Bind(lhs);
- var_rhs.Bind(rhs);
- Goto(&string_add_convert_left);
- }
-
- BIND(&if_rhsisnotstring);
- {
- // Check if {rhs} is a JSReceiver.
- Label if_rhsisreceiver(this, Label::kDeferred),
- if_rhsisnotreceiver(this, Label::kDeferred);
- Branch(IsJSReceiverInstanceType(rhs_instance_type),
- &if_rhsisreceiver, &if_rhsisnotreceiver);
-
- BIND(&if_rhsisreceiver);
- {
- // Convert {rhs} to a primitive first passing no hint.
- Callable callable =
- CodeFactory::NonPrimitiveToPrimitive(isolate());
- var_rhs.Bind(CallStub(callable, context, rhs));
- Goto(&loop);
- }
-
- BIND(&if_rhsisnotreceiver);
- {
- // Convert {rhs} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_rhs.Bind(CallStub(callable, context, rhs));
- Goto(&loop);
- }
- }
+ Node* right_instance_type = LoadMapInstanceType(right_map);
+ GotoIf(IsStringInstanceType(right_instance_type),
+ &string_add_convert_left);
+ ConvertAndLoop(&var_right, right_instance_type, &loop, context);
}
- }
- }
+ } // if_right_heapobject
+ } // if_left_smi
- BIND(&if_lhsisnotsmi);
+ BIND(&if_left_heapobject);
{
- // Load the map and instance type of {lhs}.
- Node* lhs_instance_type = LoadInstanceType(lhs);
+ Node* left_map = LoadMap(left);
+ Label if_right_smi(this), if_right_heapobject(this);
+ Branch(TaggedIsSmi(right), &if_right_smi, &if_right_heapobject);
- // Check if {lhs} is a String.
- Label if_lhsisstring(this), if_lhsisnotstring(this);
- Branch(IsStringInstanceType(lhs_instance_type), &if_lhsisstring,
- &if_lhsisnotstring);
-
- BIND(&if_lhsisstring);
+ BIND(&if_right_smi);
{
- var_lhs.Bind(lhs);
- var_rhs.Bind(rhs);
- Goto(&string_add_convert_right);
- }
+ Label if_left_not_number(this, Label::kDeferred);
+ GotoIfNot(IsHeapNumberMap(left_map), &if_left_not_number);
- BIND(&if_lhsisnotstring);
- {
- // Check if {rhs} is a Smi.
- Label if_rhsissmi(this), if_rhsisnotsmi(this);
- Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+ // {left} is a HeapNumber, {right} is a Smi.
+ var_left_double.Bind(LoadHeapNumberValue(left));
+ var_right_double.Bind(SmiToFloat64(right));
+ Goto(&do_double_add);
- BIND(&if_rhsissmi);
+ BIND(&if_left_not_number);
{
- // Check if {lhs} is a Number.
- Label if_lhsisnumber(this), if_lhsisnotnumber(this, Label::kDeferred);
- Branch(
- Word32Equal(lhs_instance_type, Int32Constant(HEAP_NUMBER_TYPE)),
- &if_lhsisnumber, &if_lhsisnotnumber);
+ Node* left_instance_type = LoadMapInstanceType(left_map);
+ GotoIf(IsStringInstanceType(left_instance_type),
+ &string_add_convert_right);
+ // {left} is neither a Number nor a String, and {right} is a Smi.
+ ConvertAndLoop(&var_left, left_instance_type, &loop, context);
+ }
+ } // if_right_smi
- BIND(&if_lhsisnumber);
- {
- // The {lhs} is a HeapNumber, the {rhs} is a Smi, just add them.
- var_fadd_lhs.Bind(LoadHeapNumberValue(lhs));
- var_fadd_rhs.Bind(SmiToFloat64(rhs));
- Goto(&do_fadd);
- }
+ BIND(&if_right_heapobject);
+ {
+ Node* right_map = LoadMap(right);
- BIND(&if_lhsisnotnumber);
- {
- // The {lhs} is neither a Number nor a String, and the {rhs} is a
- // Smi.
- Label if_lhsisreceiver(this, Label::kDeferred),
- if_lhsisnotreceiver(this, Label::kDeferred);
- Branch(IsJSReceiverInstanceType(lhs_instance_type),
- &if_lhsisreceiver, &if_lhsisnotreceiver);
-
- BIND(&if_lhsisreceiver);
- {
- // Convert {lhs} to a primitive first passing no hint.
- Callable callable =
- CodeFactory::NonPrimitiveToPrimitive(isolate());
- var_lhs.Bind(CallStub(callable, context, lhs));
- Goto(&loop);
- }
-
- BIND(&if_lhsisnotreceiver);
- {
- // Convert {lhs} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_lhs.Bind(CallStub(callable, context, lhs));
- Goto(&loop);
- }
- }
- }
+ Label if_left_number(this), if_left_not_number(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(left_map), &if_left_number, &if_left_not_number);
- BIND(&if_rhsisnotsmi);
+ BIND(&if_left_number);
{
- // Load the instance type of {rhs}.
- Node* rhs_instance_type = LoadInstanceType(rhs);
+ Label if_right_not_number(this, Label::kDeferred);
+ GotoIfNot(IsHeapNumberMap(right_map), &if_right_not_number);
- // Check if {rhs} is a String.
- Label if_rhsisstring(this), if_rhsisnotstring(this);
- Branch(IsStringInstanceType(rhs_instance_type), &if_rhsisstring,
- &if_rhsisnotstring);
+ // Both {left} and {right} are HeapNumbers.
+ var_left_double.Bind(LoadHeapNumberValue(left));
+ var_right_double.Bind(LoadHeapNumberValue(right));
+ Goto(&do_double_add);
- BIND(&if_rhsisstring);
+ BIND(&if_right_not_number);
{
- var_lhs.Bind(lhs);
- var_rhs.Bind(rhs);
- Goto(&string_add_convert_left);
+ Node* right_instance_type = LoadMapInstanceType(right_map);
+ GotoIf(IsStringInstanceType(right_instance_type),
+ &string_add_convert_left);
+ // {left} is a HeapNumber, {right} is neither Number nor String.
+ ConvertAndLoop(&var_right, right_instance_type, &loop, context);
}
+ } // if_left_number
- BIND(&if_rhsisnotstring);
- {
- // Check if {lhs} is a HeapNumber.
- Label if_lhsisnumber(this), if_lhsisnotnumber(this);
- Branch(
- Word32Equal(lhs_instance_type, Int32Constant(HEAP_NUMBER_TYPE)),
- &if_lhsisnumber, &if_lhsisnotnumber);
-
- BIND(&if_lhsisnumber);
- {
- // Check if {rhs} is also a HeapNumber.
- Label if_rhsisnumber(this),
- if_rhsisnotnumber(this, Label::kDeferred);
- Branch(Word32Equal(rhs_instance_type,
- Int32Constant(HEAP_NUMBER_TYPE)),
- &if_rhsisnumber, &if_rhsisnotnumber);
-
- BIND(&if_rhsisnumber);
- {
- // Perform a floating point addition.
- var_fadd_lhs.Bind(LoadHeapNumberValue(lhs));
- var_fadd_rhs.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fadd);
- }
-
- BIND(&if_rhsisnotnumber);
- {
- // Check if {rhs} is a JSReceiver.
- Label if_rhsisreceiver(this, Label::kDeferred),
- if_rhsisnotreceiver(this, Label::kDeferred);
- Branch(IsJSReceiverInstanceType(rhs_instance_type),
- &if_rhsisreceiver, &if_rhsisnotreceiver);
-
- BIND(&if_rhsisreceiver);
- {
- // Convert {rhs} to a primitive first passing no hint.
- Callable callable =
- CodeFactory::NonPrimitiveToPrimitive(isolate());
- var_rhs.Bind(CallStub(callable, context, rhs));
- Goto(&loop);
- }
-
- BIND(&if_rhsisnotreceiver);
- {
- // Convert {rhs} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_rhs.Bind(CallStub(callable, context, rhs));
- Goto(&loop);
- }
- }
- }
-
- BIND(&if_lhsisnotnumber);
- {
- // Check if {lhs} is a JSReceiver.
- Label if_lhsisreceiver(this, Label::kDeferred),
- if_lhsisnotreceiver(this);
- Branch(IsJSReceiverInstanceType(lhs_instance_type),
- &if_lhsisreceiver, &if_lhsisnotreceiver);
-
- BIND(&if_lhsisreceiver);
- {
- // Convert {lhs} to a primitive first passing no hint.
- Callable callable =
- CodeFactory::NonPrimitiveToPrimitive(isolate());
- var_lhs.Bind(CallStub(callable, context, lhs));
- Goto(&loop);
- }
-
- BIND(&if_lhsisnotreceiver);
- {
- // Check if {rhs} is a JSReceiver.
- Label if_rhsisreceiver(this, Label::kDeferred),
- if_rhsisnotreceiver(this, Label::kDeferred);
- Branch(IsJSReceiverInstanceType(rhs_instance_type),
- &if_rhsisreceiver, &if_rhsisnotreceiver);
-
- BIND(&if_rhsisreceiver);
- {
- // Convert {rhs} to a primitive first passing no hint.
- Callable callable =
- CodeFactory::NonPrimitiveToPrimitive(isolate());
- var_rhs.Bind(CallStub(callable, context, rhs));
- Goto(&loop);
- }
-
- BIND(&if_rhsisnotreceiver);
- {
- // Convert {lhs} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_lhs.Bind(CallStub(callable, context, lhs));
- Goto(&loop);
- }
- }
- }
- }
+ BIND(&if_left_not_number);
+ {
+ Node* left_instance_type = LoadMapInstanceType(left_map);
+ GotoIf(IsStringInstanceType(left_instance_type),
+ &string_add_convert_right);
+ Node* right_instance_type = LoadMapInstanceType(right_map);
+ GotoIf(IsStringInstanceType(right_instance_type),
+ &string_add_convert_left);
+ Label if_left_not_receiver(this, Label::kDeferred);
+ Label if_right_not_receiver(this, Label::kDeferred);
+ GotoIfNot(IsJSReceiverInstanceType(left_instance_type),
+ &if_left_not_receiver);
+ // {left} is a JSReceiver, convert it first.
+ ConvertReceiverAndLoop(&var_left, &loop, context);
+
+ BIND(&if_left_not_receiver);
+ GotoIfNot(IsJSReceiverInstanceType(right_instance_type),
+ &if_right_not_receiver);
+ // {left} is a Primitive, but {right} is a JSReceiver, so convert
+ // {right} with priority.
+ ConvertReceiverAndLoop(&var_right, &loop, context);
+
+ BIND(&if_right_not_receiver);
+ // Neither {left} nor {right} are JSReceivers.
+ ConvertNonReceiverAndLoop(&var_left, &loop, context);
}
- }
- }
+ } // if_right_heapobject
+ } // if_left_heapobject
}
BIND(&string_add_convert_left);
{
- // Convert {lhs}, which is a Smi, to a String and concatenate the
- // resulting string with the String {rhs}.
+ // Convert {left} to a String and concatenate it with the String {right}.
Callable callable =
CodeFactory::StringAdd(isolate(), STRING_ADD_CONVERT_LEFT, NOT_TENURED);
- var_result.Bind(
- CallStub(callable, context, var_lhs.value(), var_rhs.value()));
- Goto(&end);
+ Return(CallStub(callable, context, var_left.value(), var_right.value()));
}
BIND(&string_add_convert_right);
{
- // Convert {lhs}, which is a Smi, to a String and concatenate the
- // resulting string with the String {rhs}.
+ // Convert {right} to a String and concatenate it with the String {left}.
Callable callable = CodeFactory::StringAdd(
isolate(), STRING_ADD_CONVERT_RIGHT, NOT_TENURED);
- var_result.Bind(
- CallStub(callable, context, var_lhs.value(), var_rhs.value()));
- Goto(&end);
+ Return(CallStub(callable, context, var_left.value(), var_right.value()));
}
- BIND(&do_fadd);
+ BIND(&do_double_add);
{
- Node* lhs_value = var_fadd_lhs.value();
- Node* rhs_value = var_fadd_rhs.value();
- Node* value = Float64Add(lhs_value, rhs_value);
- Node* result = AllocateHeapNumberWithValue(value);
- var_result.Bind(result);
- Goto(&end);
+ Node* value = Float64Add(var_left_double.value(), var_right_double.value());
+ Return(AllocateHeapNumberWithValue(value));
}
- BIND(&end);
- Return(var_result.value());
}
-TF_BUILTIN(Subtract, CodeStubAssembler) {
+template <typename Descriptor>
+void NumberBuiltinsAssembler::BinaryOp(Label* smis, Variable* var_left,
+ Variable* var_right, Label* doubles,
+ Variable* var_left_double,
+ Variable* var_right_double) {
+ DCHECK(var_left->rep() == MachineRepresentation::kTagged);
+ DCHECK(var_right->rep() == MachineRepresentation::kTagged);
+
Node* context = Parameter(Descriptor::kContext);
- Node* left = Parameter(Descriptor::kLeft);
- Node* right = Parameter(Descriptor::kRight);
-
- // Shared entry for floating point subtraction.
- Label do_fsub(this), end(this);
- VARIABLE(var_fsub_lhs, MachineRepresentation::kFloat64);
- VARIABLE(var_fsub_rhs, MachineRepresentation::kFloat64);
-
- // We might need to loop several times due to ToPrimitive and/or ToNumber
- // conversions.
- VARIABLE(var_lhs, MachineRepresentation::kTagged);
- VARIABLE(var_rhs, MachineRepresentation::kTagged);
- VARIABLE(var_result, MachineRepresentation::kTagged);
- Variable* loop_vars[2] = {&var_lhs, &var_rhs};
- Label loop(this, 2, loop_vars);
- var_lhs.Bind(left);
- var_rhs.Bind(right);
+ var_left->Bind(Parameter(Descriptor::kLeft));
+ var_right->Bind(Parameter(Descriptor::kRight));
+
+ // We might need to loop for ToNumber conversions.
+ Label loop(this, {var_left, var_right});
Goto(&loop);
BIND(&loop);
- {
- // Load the current {lhs} and {rhs} values.
- Node* lhs = var_lhs.value();
- Node* rhs = var_rhs.value();
-
- // Check if the {lhs} is a Smi or a HeapObject.
- Label if_lhsissmi(this), if_lhsisnotsmi(this);
- Branch(TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
- BIND(&if_lhsissmi);
- {
- // Check if the {rhs} is also a Smi.
- Label if_rhsissmi(this), if_rhsisnotsmi(this);
- Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
-
- BIND(&if_rhsissmi);
- {
- // Try a fast Smi subtraction first.
- Node* pair = IntPtrSubWithOverflow(BitcastTaggedToWord(lhs),
- BitcastTaggedToWord(rhs));
- Node* overflow = Projection(1, pair);
-
- // Check if the Smi subtraction overflowed.
- Label if_overflow(this), if_notoverflow(this);
- Branch(overflow, &if_overflow, &if_notoverflow);
-
- BIND(&if_overflow);
- {
- // The result doesn't fit into Smi range.
- var_fsub_lhs.Bind(SmiToFloat64(lhs));
- var_fsub_rhs.Bind(SmiToFloat64(rhs));
- Goto(&do_fsub);
- }
-
- BIND(&if_notoverflow);
- var_result.Bind(BitcastWordToTaggedSigned(Projection(0, pair)));
- Goto(&end);
- }
-
- BIND(&if_rhsisnotsmi);
- {
- // Load the map of the {rhs}.
- Node* rhs_map = LoadMap(rhs);
-
- // Check if {rhs} is a HeapNumber.
- Label if_rhsisnumber(this), if_rhsisnotnumber(this, Label::kDeferred);
- Branch(IsHeapNumberMap(rhs_map), &if_rhsisnumber, &if_rhsisnotnumber);
+ Label left_not_smi(this), right_not_smi(this);
+ Label left_not_number(this), right_not_number(this);
+ GotoIfNot(TaggedIsSmi(var_left->value()), &left_not_smi);
+ GotoIf(TaggedIsSmi(var_right->value()), smis);
- BIND(&if_rhsisnumber);
- {
- // Perform a floating point subtraction.
- var_fsub_lhs.Bind(SmiToFloat64(lhs));
- var_fsub_rhs.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fsub);
- }
+ // At this point, var_left is a Smi but var_right is not.
+ GotoIfNot(IsHeapNumber(var_right->value()), &right_not_number);
+ var_left_double->Bind(SmiToFloat64(var_left->value()));
+ var_right_double->Bind(LoadHeapNumberValue(var_right->value()));
+ Goto(doubles);
- BIND(&if_rhsisnotnumber);
- {
- // Convert the {rhs} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_rhs.Bind(CallStub(callable, context, rhs));
- Goto(&loop);
- }
- }
- }
-
- BIND(&if_lhsisnotsmi);
- {
- // Load the map of the {lhs}.
- Node* lhs_map = LoadMap(lhs);
-
- // Check if the {lhs} is a HeapNumber.
- Label if_lhsisnumber(this), if_lhsisnotnumber(this, Label::kDeferred);
- Branch(IsHeapNumberMap(lhs_map), &if_lhsisnumber, &if_lhsisnotnumber);
-
- BIND(&if_lhsisnumber);
- {
- // Check if the {rhs} is a Smi.
- Label if_rhsissmi(this), if_rhsisnotsmi(this);
- Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
-
- BIND(&if_rhsissmi);
- {
- // Perform a floating point subtraction.
- var_fsub_lhs.Bind(LoadHeapNumberValue(lhs));
- var_fsub_rhs.Bind(SmiToFloat64(rhs));
- Goto(&do_fsub);
- }
-
- BIND(&if_rhsisnotsmi);
- {
- // Load the map of the {rhs}.
- Node* rhs_map = LoadMap(rhs);
-
- // Check if the {rhs} is a HeapNumber.
- Label if_rhsisnumber(this), if_rhsisnotnumber(this, Label::kDeferred);
- Branch(IsHeapNumberMap(rhs_map), &if_rhsisnumber, &if_rhsisnotnumber);
+ BIND(&left_not_smi);
+ {
+ GotoIfNot(IsHeapNumber(var_left->value()), &left_not_number);
+ GotoIfNot(TaggedIsSmi(var_right->value()), &right_not_smi);
- BIND(&if_rhsisnumber);
- {
- // Perform a floating point subtraction.
- var_fsub_lhs.Bind(LoadHeapNumberValue(lhs));
- var_fsub_rhs.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fsub);
- }
+ // At this point, var_left is a HeapNumber and var_right is a Smi.
+ var_left_double->Bind(LoadHeapNumberValue(var_left->value()));
+ var_right_double->Bind(SmiToFloat64(var_right->value()));
+ Goto(doubles);
+ }
- BIND(&if_rhsisnotnumber);
- {
- // Convert the {rhs} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_rhs.Bind(CallStub(callable, context, rhs));
- Goto(&loop);
- }
- }
- }
+ BIND(&right_not_smi);
+ {
+ GotoIfNot(IsHeapNumber(var_right->value()), &right_not_number);
+ var_left_double->Bind(LoadHeapNumberValue(var_left->value()));
+ var_right_double->Bind(LoadHeapNumberValue(var_right->value()));
+ Goto(doubles);
+ }
- BIND(&if_lhsisnotnumber);
- {
- // Convert the {lhs} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_lhs.Bind(CallStub(callable, context, lhs));
- Goto(&loop);
- }
- }
+ BIND(&left_not_number);
+ {
+ var_left->Bind(
+ CallBuiltin(Builtins::kNonNumberToNumber, context, var_left->value()));
+ Goto(&loop);
}
- BIND(&do_fsub);
+ BIND(&right_not_number);
{
- Node* lhs_value = var_fsub_lhs.value();
- Node* rhs_value = var_fsub_rhs.value();
- Node* value = Float64Sub(lhs_value, rhs_value);
- var_result.Bind(AllocateHeapNumberWithValue(value));
- Goto(&end);
+ var_right->Bind(
+ CallBuiltin(Builtins::kNonNumberToNumber, context, var_right->value()));
+ Goto(&loop);
}
- BIND(&end);
- Return(var_result.value());
}
-TF_BUILTIN(Multiply, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* left = Parameter(Descriptor::kLeft);
- Node* right = Parameter(Descriptor::kRight);
+TF_BUILTIN(Subtract, NumberBuiltinsAssembler) {
+ VARIABLE(var_left, MachineRepresentation::kTagged);
+ VARIABLE(var_right, MachineRepresentation::kTagged);
+ VARIABLE(var_left_double, MachineRepresentation::kFloat64);
+ VARIABLE(var_right_double, MachineRepresentation::kFloat64);
+ Label do_smi_sub(this), do_double_sub(this);
- // Shared entry point for floating point multiplication.
- Label do_fmul(this), return_result(this);
- VARIABLE(var_lhs_float64, MachineRepresentation::kFloat64);
- VARIABLE(var_rhs_float64, MachineRepresentation::kFloat64);
+ BinaryOp<Descriptor>(&do_smi_sub, &var_left, &var_right, &do_double_sub,
+ &var_left_double, &var_right_double);
- // We might need to loop one or two times due to ToNumber conversions.
- VARIABLE(var_lhs, MachineRepresentation::kTagged);
- VARIABLE(var_rhs, MachineRepresentation::kTagged);
- VARIABLE(var_result, MachineRepresentation::kTagged);
- Variable* loop_variables[] = {&var_lhs, &var_rhs};
- Label loop(this, 2, loop_variables);
- var_lhs.Bind(left);
- var_rhs.Bind(right);
- Goto(&loop);
- BIND(&loop);
+ BIND(&do_smi_sub);
{
- Node* lhs = var_lhs.value();
- Node* rhs = var_rhs.value();
-
- Label lhs_is_smi(this), lhs_is_not_smi(this);
- Branch(TaggedIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
-
- BIND(&lhs_is_smi);
+ // Try a fast Smi subtraction first, bail out if it overflows.
+ Node* pair = IntPtrSubWithOverflow(BitcastTaggedToWord(var_left.value()),
+ BitcastTaggedToWord(var_right.value()));
+ Node* overflow = Projection(1, pair);
+ Label if_overflow(this), if_notoverflow(this);
+ Branch(overflow, &if_overflow, &if_notoverflow);
+
+ BIND(&if_overflow);
{
- Label rhs_is_smi(this), rhs_is_not_smi(this);
- Branch(TaggedIsSmi(rhs), &rhs_is_smi, &rhs_is_not_smi);
-
- BIND(&rhs_is_smi);
- {
- // Both {lhs} and {rhs} are Smis. The result is not necessarily a smi,
- // in case of overflow.
- var_result.Bind(SmiMul(lhs, rhs));
- Goto(&return_result);
- }
-
- BIND(&rhs_is_not_smi);
- {
- Node* rhs_map = LoadMap(rhs);
-
- // Check if {rhs} is a HeapNumber.
- Label rhs_is_number(this), rhs_is_not_number(this, Label::kDeferred);
- Branch(IsHeapNumberMap(rhs_map), &rhs_is_number, &rhs_is_not_number);
-
- BIND(&rhs_is_number);
- {
- // Convert {lhs} to a double and multiply it with the value of {rhs}.
- var_lhs_float64.Bind(SmiToFloat64(lhs));
- var_rhs_float64.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fmul);
- }
-
- BIND(&rhs_is_not_number);
- {
- // Multiplication is commutative, swap {lhs} with {rhs} and loop.
- var_lhs.Bind(rhs);
- var_rhs.Bind(lhs);
- Goto(&loop);
- }
- }
+ var_left_double.Bind(SmiToFloat64(var_left.value()));
+ var_right_double.Bind(SmiToFloat64(var_right.value()));
+ Goto(&do_double_sub);
}
- BIND(&lhs_is_not_smi);
- {
- Node* lhs_map = LoadMap(lhs);
-
- // Check if {lhs} is a HeapNumber.
- Label lhs_is_number(this), lhs_is_not_number(this, Label::kDeferred);
- Branch(IsHeapNumberMap(lhs_map), &lhs_is_number, &lhs_is_not_number);
-
- BIND(&lhs_is_number);
- {
- // Check if {rhs} is a Smi.
- Label rhs_is_smi(this), rhs_is_not_smi(this);
- Branch(TaggedIsSmi(rhs), &rhs_is_smi, &rhs_is_not_smi);
-
- BIND(&rhs_is_smi);
- {
- // Convert {rhs} to a double and multiply it with the value of {lhs}.
- var_lhs_float64.Bind(LoadHeapNumberValue(lhs));
- var_rhs_float64.Bind(SmiToFloat64(rhs));
- Goto(&do_fmul);
- }
-
- BIND(&rhs_is_not_smi);
- {
- Node* rhs_map = LoadMap(rhs);
-
- // Check if {rhs} is a HeapNumber.
- Label rhs_is_number(this), rhs_is_not_number(this, Label::kDeferred);
- Branch(IsHeapNumberMap(rhs_map), &rhs_is_number, &rhs_is_not_number);
-
- BIND(&rhs_is_number);
- {
- // Both {lhs} and {rhs} are HeapNumbers. Load their values and
- // multiply them.
- var_lhs_float64.Bind(LoadHeapNumberValue(lhs));
- var_rhs_float64.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fmul);
- }
-
- BIND(&rhs_is_not_number);
- {
- // Multiplication is commutative, swap {lhs} with {rhs} and loop.
- var_lhs.Bind(rhs);
- var_rhs.Bind(lhs);
- Goto(&loop);
- }
- }
- }
-
- BIND(&lhs_is_not_number);
- {
- // Convert {lhs} to a Number and loop.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_lhs.Bind(CallStub(callable, context, lhs));
- Goto(&loop);
- }
- }
+ BIND(&if_notoverflow);
+ Return(BitcastWordToTaggedSigned(Projection(0, pair)));
}
- BIND(&do_fmul);
+ BIND(&do_double_sub);
{
- Node* value = Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
- Node* result = AllocateHeapNumberWithValue(value);
- var_result.Bind(result);
- Goto(&return_result);
+ Node* value = Float64Sub(var_left_double.value(), var_right_double.value());
+ Return(AllocateHeapNumberWithValue(value));
}
-
- BIND(&return_result);
- Return(var_result.value());
}
-TF_BUILTIN(Divide, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* left = Parameter(Descriptor::kLeft);
- Node* right = Parameter(Descriptor::kRight);
-
- // Shared entry point for floating point division.
- Label do_fdiv(this), end(this);
- VARIABLE(var_dividend_float64, MachineRepresentation::kFloat64);
- VARIABLE(var_divisor_float64, MachineRepresentation::kFloat64);
-
- // We might need to loop one or two times due to ToNumber conversions.
- VARIABLE(var_dividend, MachineRepresentation::kTagged);
- VARIABLE(var_divisor, MachineRepresentation::kTagged);
- VARIABLE(var_result, MachineRepresentation::kTagged);
- Variable* loop_variables[] = {&var_dividend, &var_divisor};
- Label loop(this, 2, loop_variables);
- var_dividend.Bind(left);
- var_divisor.Bind(right);
- Goto(&loop);
- BIND(&loop);
- {
- Node* dividend = var_dividend.value();
- Node* divisor = var_divisor.value();
-
- Label dividend_is_smi(this), dividend_is_not_smi(this);
- Branch(TaggedIsSmi(dividend), &dividend_is_smi, &dividend_is_not_smi);
+TF_BUILTIN(Multiply, NumberBuiltinsAssembler) {
+ VARIABLE(var_left, MachineRepresentation::kTagged);
+ VARIABLE(var_right, MachineRepresentation::kTagged);
+ VARIABLE(var_left_double, MachineRepresentation::kFloat64);
+ VARIABLE(var_right_double, MachineRepresentation::kFloat64);
+ Label do_smi_mul(this), do_double_mul(this);
- BIND(&dividend_is_smi);
- {
- Label divisor_is_smi(this), divisor_is_not_smi(this);
- Branch(TaggedIsSmi(divisor), &divisor_is_smi, &divisor_is_not_smi);
+ BinaryOp<Descriptor>(&do_smi_mul, &var_left, &var_right, &do_double_mul,
+ &var_left_double, &var_right_double);
- BIND(&divisor_is_smi);
- {
- Label bailout(this);
+ BIND(&do_smi_mul);
+ // The result is not necessarily a smi, in case of overflow.
+ Return(SmiMul(var_left.value(), var_right.value()));
- // Do floating point division if {divisor} is zero.
- GotoIf(SmiEqual(divisor, SmiConstant(0)), &bailout);
+ BIND(&do_double_mul);
+ Node* value = Float64Mul(var_left_double.value(), var_right_double.value());
+ Return(AllocateHeapNumberWithValue(value));
+}
- // Do floating point division {dividend} is zero and {divisor} is
- // negative.
- Label dividend_is_zero(this), dividend_is_not_zero(this);
- Branch(SmiEqual(dividend, SmiConstant(0)), &dividend_is_zero,
- &dividend_is_not_zero);
+TF_BUILTIN(Divide, NumberBuiltinsAssembler) {
+ VARIABLE(var_left, MachineRepresentation::kTagged);
+ VARIABLE(var_right, MachineRepresentation::kTagged);
+ VARIABLE(var_left_double, MachineRepresentation::kFloat64);
+ VARIABLE(var_right_double, MachineRepresentation::kFloat64);
+ Label do_smi_div(this), do_double_div(this);
- BIND(&dividend_is_zero);
- {
- GotoIf(SmiLessThan(divisor, SmiConstant(0)), &bailout);
- Goto(&dividend_is_not_zero);
- }
- BIND(&dividend_is_not_zero);
+ BinaryOp<Descriptor>(&do_smi_div, &var_left, &var_right, &do_double_div,
+ &var_left_double, &var_right_double);
- Node* untagged_divisor = SmiToWord32(divisor);
- Node* untagged_dividend = SmiToWord32(dividend);
+ BIND(&do_smi_div);
+ {
+ // TODO(jkummerow): Consider just always doing a double division.
+ Label bailout(this);
+ Node* dividend = var_left.value();
+ Node* divisor = var_right.value();
- // Do floating point division if {dividend} is kMinInt (or kMinInt - 1
- // if the Smi size is 31) and {divisor} is -1.
- Label divisor_is_minus_one(this), divisor_is_not_minus_one(this);
- Branch(Word32Equal(untagged_divisor, Int32Constant(-1)),
- &divisor_is_minus_one, &divisor_is_not_minus_one);
+ // Do floating point division if {divisor} is zero.
+ GotoIf(SmiEqual(divisor, SmiConstant(0)), &bailout);
- BIND(&divisor_is_minus_one);
- {
- GotoIf(
- Word32Equal(untagged_dividend,
- Int32Constant(kSmiValueSize == 32 ? kMinInt
- : (kMinInt >> 1))),
- &bailout);
- Goto(&divisor_is_not_minus_one);
- }
- BIND(&divisor_is_not_minus_one);
-
- // TODO(epertoso): consider adding a machine instruction that returns
- // both the result and the remainder.
- Node* untagged_result = Int32Div(untagged_dividend, untagged_divisor);
- Node* truncated = Int32Mul(untagged_result, untagged_divisor);
- // Do floating point division if the remainder is not 0.
- GotoIf(Word32NotEqual(untagged_dividend, truncated), &bailout);
- var_result.Bind(SmiFromWord32(untagged_result));
- Goto(&end);
-
- // Bailout: convert {dividend} and {divisor} to double and do double
- // division.
- BIND(&bailout);
- {
- var_dividend_float64.Bind(SmiToFloat64(dividend));
- var_divisor_float64.Bind(SmiToFloat64(divisor));
- Goto(&do_fdiv);
- }
- }
+ // Do floating point division if {dividend} is zero and {divisor} is
+ // negative.
+ Label dividend_is_zero(this), dividend_is_not_zero(this);
+ Branch(SmiEqual(dividend, SmiConstant(0)), &dividend_is_zero,
+ &dividend_is_not_zero);
- BIND(&divisor_is_not_smi);
- {
- Node* divisor_map = LoadMap(divisor);
+ BIND(&dividend_is_zero);
+ {
+ GotoIf(SmiLessThan(divisor, SmiConstant(0)), &bailout);
+ Goto(&dividend_is_not_zero);
+ }
+ BIND(&dividend_is_not_zero);
- // Check if {divisor} is a HeapNumber.
- Label divisor_is_number(this),
- divisor_is_not_number(this, Label::kDeferred);
- Branch(IsHeapNumberMap(divisor_map), &divisor_is_number,
- &divisor_is_not_number);
+ Node* untagged_divisor = SmiToWord32(divisor);
+ Node* untagged_dividend = SmiToWord32(dividend);
- BIND(&divisor_is_number);
- {
- // Convert {dividend} to a double and divide it with the value of
- // {divisor}.
- var_dividend_float64.Bind(SmiToFloat64(dividend));
- var_divisor_float64.Bind(LoadHeapNumberValue(divisor));
- Goto(&do_fdiv);
- }
+ // Do floating point division if {dividend} is kMinInt (or kMinInt - 1
+ // if the Smi size is 31) and {divisor} is -1.
+ Label divisor_is_minus_one(this), divisor_is_not_minus_one(this);
+ Branch(Word32Equal(untagged_divisor, Int32Constant(-1)),
+ &divisor_is_minus_one, &divisor_is_not_minus_one);
- BIND(&divisor_is_not_number);
- {
- // Convert {divisor} to a number and loop.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_divisor.Bind(CallStub(callable, context, divisor));
- Goto(&loop);
- }
- }
+ BIND(&divisor_is_minus_one);
+ {
+ GotoIf(Word32Equal(
+ untagged_dividend,
+ Int32Constant(kSmiValueSize == 32 ? kMinInt : (kMinInt >> 1))),
+ &bailout);
+ Goto(&divisor_is_not_minus_one);
}
-
- BIND(&dividend_is_not_smi);
+ BIND(&divisor_is_not_minus_one);
+
+ // TODO(epertoso): consider adding a machine instruction that returns
+ // both the result and the remainder.
+ Node* untagged_result = Int32Div(untagged_dividend, untagged_divisor);
+ Node* truncated = Int32Mul(untagged_result, untagged_divisor);
+ // Do floating point division if the remainder is not 0.
+ GotoIf(Word32NotEqual(untagged_dividend, truncated), &bailout);
+ Return(SmiFromWord32(untagged_result));
+
+ // Bailout: convert {dividend} and {divisor} to double and do double
+ // division.
+ BIND(&bailout);
{
- Node* dividend_map = LoadMap(dividend);
-
- // Check if {dividend} is a HeapNumber.
- Label dividend_is_number(this),
- dividend_is_not_number(this, Label::kDeferred);
- Branch(IsHeapNumberMap(dividend_map), &dividend_is_number,
- &dividend_is_not_number);
-
- BIND(&dividend_is_number);
- {
- // Check if {divisor} is a Smi.
- Label divisor_is_smi(this), divisor_is_not_smi(this);
- Branch(TaggedIsSmi(divisor), &divisor_is_smi, &divisor_is_not_smi);
-
- BIND(&divisor_is_smi);
- {
- // Convert {divisor} to a double and use it for a floating point
- // division.
- var_dividend_float64.Bind(LoadHeapNumberValue(dividend));
- var_divisor_float64.Bind(SmiToFloat64(divisor));
- Goto(&do_fdiv);
- }
-
- BIND(&divisor_is_not_smi);
- {
- Node* divisor_map = LoadMap(divisor);
-
- // Check if {divisor} is a HeapNumber.
- Label divisor_is_number(this),
- divisor_is_not_number(this, Label::kDeferred);
- Branch(IsHeapNumberMap(divisor_map), &divisor_is_number,
- &divisor_is_not_number);
-
- BIND(&divisor_is_number);
- {
- // Both {dividend} and {divisor} are HeapNumbers. Load their values
- // and divide them.
- var_dividend_float64.Bind(LoadHeapNumberValue(dividend));
- var_divisor_float64.Bind(LoadHeapNumberValue(divisor));
- Goto(&do_fdiv);
- }
-
- BIND(&divisor_is_not_number);
- {
- // Convert {divisor} to a number and loop.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_divisor.Bind(CallStub(callable, context, divisor));
- Goto(&loop);
- }
- }
- }
-
- BIND(&dividend_is_not_number);
- {
- // Convert {dividend} to a Number and loop.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_dividend.Bind(CallStub(callable, context, dividend));
- Goto(&loop);
- }
+ var_left_double.Bind(SmiToFloat64(dividend));
+ var_right_double.Bind(SmiToFloat64(divisor));
+ Goto(&do_double_div);
}
}
- BIND(&do_fdiv);
+ BIND(&do_double_div);
{
- Node* value =
- Float64Div(var_dividend_float64.value(), var_divisor_float64.value());
- var_result.Bind(AllocateHeapNumberWithValue(value));
- Goto(&end);
+ Node* value = Float64Div(var_left_double.value(), var_right_double.value());
+ Return(AllocateHeapNumberWithValue(value));
}
- BIND(&end);
- Return(var_result.value());
}
-TF_BUILTIN(Modulus, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* left = Parameter(Descriptor::kLeft);
- Node* right = Parameter(Descriptor::kRight);
-
- VARIABLE(var_result, MachineRepresentation::kTagged);
- Label return_result(this, &var_result);
-
- // Shared entry point for floating point modulus.
- Label do_fmod(this);
- VARIABLE(var_dividend_float64, MachineRepresentation::kFloat64);
- VARIABLE(var_divisor_float64, MachineRepresentation::kFloat64);
-
- // We might need to loop one or two times due to ToNumber conversions.
- VARIABLE(var_dividend, MachineRepresentation::kTagged);
- VARIABLE(var_divisor, MachineRepresentation::kTagged);
- Variable* loop_variables[] = {&var_dividend, &var_divisor};
- Label loop(this, 2, loop_variables);
- var_dividend.Bind(left);
- var_divisor.Bind(right);
- Goto(&loop);
- BIND(&loop);
- {
- Node* dividend = var_dividend.value();
- Node* divisor = var_divisor.value();
-
- Label dividend_is_smi(this), dividend_is_not_smi(this);
- Branch(TaggedIsSmi(dividend), &dividend_is_smi, &dividend_is_not_smi);
-
- BIND(&dividend_is_smi);
- {
- Label dividend_is_not_zero(this);
- Label divisor_is_smi(this), divisor_is_not_smi(this);
- Branch(TaggedIsSmi(divisor), &divisor_is_smi, &divisor_is_not_smi);
-
- BIND(&divisor_is_smi);
- {
- // Compute the modulus of two Smis.
- var_result.Bind(SmiMod(dividend, divisor));
- Goto(&return_result);
- }
-
- BIND(&divisor_is_not_smi);
- {
- Node* divisor_map = LoadMap(divisor);
-
- // Check if {divisor} is a HeapNumber.
- Label divisor_is_number(this),
- divisor_is_not_number(this, Label::kDeferred);
- Branch(IsHeapNumberMap(divisor_map), &divisor_is_number,
- &divisor_is_not_number);
-
- BIND(&divisor_is_number);
- {
- // Convert {dividend} to a double and compute its modulus with the
- // value of {dividend}.
- var_dividend_float64.Bind(SmiToFloat64(dividend));
- var_divisor_float64.Bind(LoadHeapNumberValue(divisor));
- Goto(&do_fmod);
- }
-
- BIND(&divisor_is_not_number);
- {
- // Convert {divisor} to a number and loop.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_divisor.Bind(CallStub(callable, context, divisor));
- Goto(&loop);
- }
- }
- }
-
- BIND(&dividend_is_not_smi);
- {
- Node* dividend_map = LoadMap(dividend);
-
- // Check if {dividend} is a HeapNumber.
- Label dividend_is_number(this),
- dividend_is_not_number(this, Label::kDeferred);
- Branch(IsHeapNumberMap(dividend_map), &dividend_is_number,
- &dividend_is_not_number);
-
- BIND(&dividend_is_number);
- {
- // Check if {divisor} is a Smi.
- Label divisor_is_smi(this), divisor_is_not_smi(this);
- Branch(TaggedIsSmi(divisor), &divisor_is_smi, &divisor_is_not_smi);
-
- BIND(&divisor_is_smi);
- {
- // Convert {divisor} to a double and compute {dividend}'s modulus with
- // it.
- var_dividend_float64.Bind(LoadHeapNumberValue(dividend));
- var_divisor_float64.Bind(SmiToFloat64(divisor));
- Goto(&do_fmod);
- }
-
- BIND(&divisor_is_not_smi);
- {
- Node* divisor_map = LoadMap(divisor);
-
- // Check if {divisor} is a HeapNumber.
- Label divisor_is_number(this),
- divisor_is_not_number(this, Label::kDeferred);
- Branch(IsHeapNumberMap(divisor_map), &divisor_is_number,
- &divisor_is_not_number);
-
- BIND(&divisor_is_number);
- {
- // Both {dividend} and {divisor} are HeapNumbers. Load their values
- // and compute their modulus.
- var_dividend_float64.Bind(LoadHeapNumberValue(dividend));
- var_divisor_float64.Bind(LoadHeapNumberValue(divisor));
- Goto(&do_fmod);
- }
-
- BIND(&divisor_is_not_number);
- {
- // Convert {divisor} to a number and loop.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_divisor.Bind(CallStub(callable, context, divisor));
- Goto(&loop);
- }
- }
- }
+TF_BUILTIN(Modulus, NumberBuiltinsAssembler) {
+ VARIABLE(var_left, MachineRepresentation::kTagged);
+ VARIABLE(var_right, MachineRepresentation::kTagged);
+ VARIABLE(var_left_double, MachineRepresentation::kFloat64);
+ VARIABLE(var_right_double, MachineRepresentation::kFloat64);
+ Label do_smi_mod(this), do_double_mod(this);
- BIND(&dividend_is_not_number);
- {
- // Convert {dividend} to a Number and loop.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_dividend.Bind(CallStub(callable, context, dividend));
- Goto(&loop);
- }
- }
- }
+ BinaryOp<Descriptor>(&do_smi_mod, &var_left, &var_right, &do_double_mod,
+ &var_left_double, &var_right_double);
- BIND(&do_fmod);
- {
- Node* value =
- Float64Mod(var_dividend_float64.value(), var_divisor_float64.value());
- var_result.Bind(AllocateHeapNumberWithValue(value));
- Goto(&return_result);
- }
+ BIND(&do_smi_mod);
+ Return(SmiMod(var_left.value(), var_right.value()));
- BIND(&return_result);
- Return(var_result.value());
+ BIND(&do_double_mod);
+ Node* value = Float64Mod(var_left_double.value(), var_right_double.value());
+ Return(AllocateHeapNumberWithValue(value));
}
TF_BUILTIN(ShiftLeft, NumberBuiltinsAssembler) {
@@ -1393,60 +838,5 @@ TF_BUILTIN(StrictEqual, CodeStubAssembler) {
Return(StrictEqual(lhs, rhs));
}
-TF_BUILTIN(AddWithFeedback, BinaryOpAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* left = Parameter(Descriptor::kLeft);
- Node* right = Parameter(Descriptor::kRight);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
-
- Return(Generate_AddWithFeedback(context, left, right,
- ChangeUint32ToWord(slot), vector));
-}
-
-TF_BUILTIN(SubtractWithFeedback, BinaryOpAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* left = Parameter(Descriptor::kLeft);
- Node* right = Parameter(Descriptor::kRight);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
-
- Return(Generate_SubtractWithFeedback(context, left, right,
- ChangeUint32ToWord(slot), vector));
-}
-
-TF_BUILTIN(MultiplyWithFeedback, BinaryOpAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* left = Parameter(Descriptor::kLeft);
- Node* right = Parameter(Descriptor::kRight);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
-
- Return(Generate_MultiplyWithFeedback(context, left, right,
- ChangeUint32ToWord(slot), vector));
-}
-
-TF_BUILTIN(DivideWithFeedback, BinaryOpAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* left = Parameter(Descriptor::kLeft);
- Node* right = Parameter(Descriptor::kRight);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
-
- Return(Generate_DivideWithFeedback(context, left, right,
- ChangeUint32ToWord(slot), vector));
-}
-
-TF_BUILTIN(ModulusWithFeedback, BinaryOpAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* left = Parameter(Descriptor::kLeft);
- Node* right = Parameter(Descriptor::kRight);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
-
- Return(Generate_ModulusWithFeedback(context, left, right,
- ChangeUint32ToWord(slot), vector));
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-number.cc b/deps/v8/src/builtins/builtins-number.cc
index 346bafa1ae..2622daba49 100644
--- a/deps/v8/src/builtins/builtins-number.cc
+++ b/deps/v8/src/builtins/builtins-number.cc
@@ -39,10 +39,10 @@ BUILTIN(NumberPrototypeToExponential) {
isolate, fraction_digits, Object::ToInteger(isolate, fraction_digits));
double const fraction_digits_number = fraction_digits->Number();
- if (std::isnan(value_number)) return isolate->heap()->nan_string();
+ if (std::isnan(value_number)) return isolate->heap()->NaN_string();
if (std::isinf(value_number)) {
- return (value_number < 0.0) ? isolate->heap()->minus_infinity_string()
- : isolate->heap()->infinity_string();
+ return (value_number < 0.0) ? isolate->heap()->minus_Infinity_string()
+ : isolate->heap()->Infinity_string();
}
if (fraction_digits_number < 0.0 || fraction_digits_number > 20.0) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -91,10 +91,10 @@ BUILTIN(NumberPrototypeToFixed) {
"toFixed() digits")));
}
- if (std::isnan(value_number)) return isolate->heap()->nan_string();
+ if (std::isnan(value_number)) return isolate->heap()->NaN_string();
if (std::isinf(value_number)) {
- return (value_number < 0.0) ? isolate->heap()->minus_infinity_string()
- : isolate->heap()->infinity_string();
+ return (value_number < 0.0) ? isolate->heap()->minus_Infinity_string()
+ : isolate->heap()->Infinity_string();
}
char* const str = DoubleToFixedCString(
value_number, static_cast<int>(fraction_digits_number));
@@ -153,10 +153,10 @@ BUILTIN(NumberPrototypeToPrecision) {
Object::ToInteger(isolate, precision));
double const precision_number = precision->Number();
- if (std::isnan(value_number)) return isolate->heap()->nan_string();
+ if (std::isnan(value_number)) return isolate->heap()->NaN_string();
if (std::isinf(value_number)) {
- return (value_number < 0.0) ? isolate->heap()->minus_infinity_string()
- : isolate->heap()->infinity_string();
+ return (value_number < 0.0) ? isolate->heap()->minus_Infinity_string()
+ : isolate->heap()->Infinity_string();
}
if (precision_number < 1.0 || precision_number > 21.0) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -217,10 +217,10 @@ BUILTIN(NumberPrototypeToString) {
}
// Slow case.
- if (std::isnan(value_number)) return isolate->heap()->nan_string();
+ if (std::isnan(value_number)) return isolate->heap()->NaN_string();
if (std::isinf(value_number)) {
- return (value_number < 0.0) ? isolate->heap()->minus_infinity_string()
- : isolate->heap()->infinity_string();
+ return (value_number < 0.0) ? isolate->heap()->minus_Infinity_string()
+ : isolate->heap()->Infinity_string();
}
char* const str =
DoubleToRadixCString(value_number, static_cast<int>(radix_number));
diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc
index 6173bb79ab..6db05d9f1f 100644
--- a/deps/v8/src/builtins/builtins-object-gen.cc
+++ b/deps/v8/src/builtins/builtins-object-gen.cc
@@ -39,8 +39,8 @@ void ObjectBuiltinsAssembler::IsString(Node* object, Label* if_string,
void ObjectBuiltinsAssembler::ReturnToStringFormat(Node* context,
Node* string) {
- Node* lhs = HeapConstant(factory()->NewStringFromStaticChars("[object "));
- Node* rhs = HeapConstant(factory()->NewStringFromStaticChars("]"));
+ Node* lhs = StringConstant("[object ");
+ Node* rhs = StringConstant("]");
Callable callable =
CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
@@ -157,15 +157,15 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
Node* array = nullptr;
Node* elements = nullptr;
Node* native_context = LoadNativeContext(context);
- Node* array_map = LoadJSArrayElementsMap(FAST_ELEMENTS, native_context);
+ Node* array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
Node* array_length = SmiTag(object_enum_length);
std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
- FAST_ELEMENTS, array_map, array_length, nullptr, object_enum_length,
+ PACKED_ELEMENTS, array_map, array_length, nullptr, object_enum_length,
INTPTR_PARAMETERS);
StoreMapNoWriteBarrier(elements, Heap::kFixedArrayMapRootIndex);
StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset,
array_length);
- CopyFixedArrayElements(FAST_ELEMENTS, object_enum_cache, elements,
+ CopyFixedArrayElements(PACKED_ELEMENTS, object_enum_cache, elements,
object_enum_length, SKIP_WRITE_BARRIER);
Return(array);
}
@@ -191,15 +191,61 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
{
// Wrap the elements into a proper JSArray and return that.
Node* native_context = LoadNativeContext(context);
- Node* array_map = LoadJSArrayElementsMap(FAST_ELEMENTS, native_context);
+ Node* array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
Node* array = AllocateUninitializedJSArrayWithoutElements(
- FAST_ELEMENTS, array_map, var_length.value(), nullptr);
+ PACKED_ELEMENTS, array_map, var_length.value(), nullptr);
StoreObjectFieldNoWriteBarrier(array, JSArray::kElementsOffset,
var_elements.value());
Return(array);
}
}
+// ES #sec-object.prototype.isprototypeof
+TF_BUILTIN(ObjectPrototypeIsPrototypeOf, ObjectBuiltinsAssembler) {
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* value = Parameter(Descriptor::kValue);
+ Node* context = Parameter(Descriptor::kContext);
+ Label if_receiverisnullorundefined(this, Label::kDeferred),
+ if_valueisnotreceiver(this, Label::kDeferred);
+
+ // We only check whether {value} is a Smi here, so that the
+ // prototype chain walk below can safely access the {value}s
+ // map. We don't rule out Primitive {value}s, since all of
+ // them have null as their prototype, so the chain walk below
+ // immediately aborts and returns false anyways.
+ GotoIf(TaggedIsSmi(value), &if_valueisnotreceiver);
+
+ // Check if {receiver} is either null or undefined and in that case,
+ // invoke the ToObject builtin, which raises the appropriate error.
+ // Otherwise we don't need to invoke ToObject, since {receiver} is
+ // either already a JSReceiver, in which case ToObject is a no-op,
+ // or it's a Primitive and ToObject would allocate a fresh JSValue
+ // wrapper, which wouldn't be identical to any existing JSReceiver
+ // found in the prototype chain of {value}, hence it will return
+ // false no matter if we search for the Primitive {receiver} or
+ // a newly allocated JSValue wrapper for {receiver}.
+ GotoIf(IsNull(receiver), &if_receiverisnullorundefined);
+ GotoIf(IsUndefined(receiver), &if_receiverisnullorundefined);
+
+ // Loop through the prototype chain looking for the {receiver}.
+ Return(HasInPrototypeChain(context, value, receiver));
+
+ BIND(&if_receiverisnullorundefined);
+ {
+ // If {value} is a primitive HeapObject, we need to return
+ // false instead of throwing an exception per order of the
+ // steps in the specification, so check that first here.
+ GotoIfNot(IsJSReceiver(value), &if_valueisnotreceiver);
+
+ // Simulate the ToObject invocation on {receiver}.
+ CallBuiltin(Builtins::kToObject, context, receiver);
+ Unreachable();
+ }
+
+ BIND(&if_valueisnotreceiver);
+ Return(FalseConstant());
+}
+
// ES6 #sec-object.prototype.tostring
TF_BUILTIN(ObjectProtoToString, ObjectBuiltinsAssembler) {
Label return_undefined(this, Label::kDeferred),
@@ -222,8 +268,7 @@ TF_BUILTIN(ObjectProtoToString, ObjectBuiltinsAssembler) {
GotoIf(WordEqual(receiver, NullConstant()), &return_null);
- Callable to_object = CodeFactory::ToObject(isolate());
- receiver = CallStub(to_object, context, receiver);
+ receiver = CallBuiltin(Builtins::kToObject, context, receiver);
Node* receiver_instance_type = LoadInstanceType(receiver);
@@ -368,17 +413,21 @@ TF_BUILTIN(ObjectPrototypeValueOf, CodeStubAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* context = Parameter(Descriptor::kContext);
- Callable to_object = CodeFactory::ToObject(isolate());
- receiver = CallStub(to_object, context, receiver);
-
- Return(receiver);
+ Return(CallBuiltin(Builtins::kToObject, context, receiver));
}
// ES #sec-object.create
TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
- Node* prototype = Parameter(Descriptor::kPrototype);
- Node* properties = Parameter(Descriptor::kProperties);
- Node* context = Parameter(Descriptor::kContext);
+ int const kPrototypeArg = 0;
+ int const kPropertiesArg = 1;
+
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+
+ Node* prototype = args.GetOptionalArgumentValue(kPrototypeArg);
+ Node* properties = args.GetOptionalArgumentValue(kPropertiesArg);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
Label call_runtime(this, Label::kDeferred), prototype_valid(this),
no_properties(this);
@@ -449,13 +498,15 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
BIND(&instantiate_map);
{
Node* instance = AllocateJSObjectFromMap(map.value(), properties.value());
- Return(instance);
+ args.PopAndReturn(instance);
}
}
BIND(&call_runtime);
{
- Return(CallRuntime(Runtime::kObjectCreate, context, prototype, properties));
+ Node* result =
+ CallRuntime(Runtime::kObjectCreate, context, prototype, properties);
+ args.PopAndReturn(result);
}
}
@@ -527,8 +578,8 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
Node* frame_size = ChangeInt32ToIntPtr(LoadObjectField(
bytecode_array, BytecodeArray::kFrameSizeOffset, MachineType::Int32()));
Node* size = WordSar(frame_size, IntPtrConstant(kPointerSizeLog2));
- Node* register_file = AllocateFixedArray(FAST_HOLEY_ELEMENTS, size);
- FillFixedArrayWithValue(FAST_HOLEY_ELEMENTS, register_file, IntPtrConstant(0),
+ Node* register_file = AllocateFixedArray(HOLEY_ELEMENTS, size);
+ FillFixedArrayWithValue(HOLEY_ELEMENTS, register_file, IntPtrConstant(0),
size, Heap::kUndefinedValueRootIndex);
Node* const result = AllocateJSObjectFromMap(maybe_map);
diff --git a/deps/v8/src/builtins/builtins-promise-gen.cc b/deps/v8/src/builtins/builtins-promise-gen.cc
index 1b236ec97c..d6d4772fd3 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.cc
+++ b/deps/v8/src/builtins/builtins-promise-gen.cc
@@ -5,6 +5,7 @@
#include "src/builtins/builtins-promise-gen.h"
#include "src/builtins/builtins-constructor-gen.h"
+#include "src/builtins/builtins-iterator-gen.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
@@ -33,7 +34,7 @@ void PromiseBuiltinsAssembler::PromiseInit(Node* promise) {
SmiConstant(0));
for (int i = 0; i < v8::Promise::kEmbedderFieldCount; i++) {
int offset = JSPromise::kSize + i * kPointerSize;
- StoreObjectFieldNoWriteBarrier(promise, offset, SmiConstant(Smi::kZero));
+ StoreObjectFieldNoWriteBarrier(promise, offset, SmiConstant(0));
}
}
@@ -68,7 +69,7 @@ Node* PromiseBuiltinsAssembler::AllocateAndSetJSPromise(Node* context,
SmiConstant(0));
for (int i = 0; i < v8::Promise::kEmbedderFieldCount; i++) {
int offset = JSPromise::kSize + i * kPointerSize;
- StoreObjectFieldNoWriteBarrier(instance, offset, SmiConstant(Smi::kZero));
+ StoreObjectFieldNoWriteBarrier(instance, offset, SmiConstant(0));
}
Label out(this);
@@ -182,33 +183,26 @@ Node* PromiseBuiltinsAssembler::NewPromiseCapability(Node* context,
Goto(&out);
BIND(&if_notcallable);
- Node* message = SmiConstant(MessageTemplate::kPromiseNonCallable);
StoreObjectField(capability, JSPromiseCapability::kPromiseOffset,
UndefinedConstant());
StoreObjectField(capability, JSPromiseCapability::kResolveOffset,
UndefinedConstant());
StoreObjectField(capability, JSPromiseCapability::kRejectOffset,
UndefinedConstant());
- CallRuntime(Runtime::kThrowTypeError, context, message);
- Unreachable();
+ ThrowTypeError(context, MessageTemplate::kPromiseNonCallable);
}
BIND(&if_not_constructor);
- {
- Node* const message_id = SmiConstant(MessageTemplate::kNotConstructor);
- CallRuntime(Runtime::kThrowTypeError, context, message_id, constructor);
- Unreachable();
- }
+ ThrowTypeError(context, MessageTemplate::kNotConstructor, constructor);
BIND(&out);
return var_result.value();
}
-Node* PromiseBuiltinsAssembler::CreatePromiseContext(Node* native_context,
- int slots) {
+void PromiseBuiltinsAssembler::InitializeFunctionContext(Node* native_context,
+ Node* context,
+ int slots) {
DCHECK_GE(slots, Context::MIN_CONTEXT_SLOTS);
-
- Node* const context = Allocate(FixedArray::SizeFor(slots));
StoreMapNoWriteBarrier(context, Heap::kFunctionContextMapRootIndex);
StoreObjectFieldNoWriteBarrier(context, FixedArray::kLengthOffset,
SmiConstant(slots));
@@ -222,6 +216,14 @@ Node* PromiseBuiltinsAssembler::CreatePromiseContext(Node* native_context,
TheHoleConstant());
StoreContextElementNoWriteBarrier(context, Context::NATIVE_CONTEXT_INDEX,
native_context);
+}
+
+Node* PromiseBuiltinsAssembler::CreatePromiseContext(Node* native_context,
+ int slots) {
+ DCHECK_GE(slots, Context::MIN_CONTEXT_SLOTS);
+
+ Node* const context = AllocateInNewSpace(FixedArray::SizeFor(slots));
+ InitializeFunctionContext(native_context, context, slots);
return context;
}
@@ -261,16 +263,7 @@ Node* PromiseBuiltinsAssembler::ThrowIfNotJSReceiver(
// The {value} is not a compatible receiver for this method.
BIND(&throw_exception);
- {
- Node* const method =
- method_name == nullptr
- ? UndefinedConstant()
- : HeapConstant(
- isolate()->factory()->NewStringFromAsciiChecked(method_name));
- Node* const message_id = SmiConstant(msg_template);
- CallRuntime(Runtime::kThrowTypeError, context, message_id, method);
- Unreachable();
- }
+ ThrowTypeError(context, msg_template, method_name);
BIND(&out);
return var_value_map.value();
@@ -324,22 +317,13 @@ Node* PromiseBuiltinsAssembler::SpeciesConstructor(Node* context, Node* object,
// 7. If IsConstructor(S) is true, return S.
Label throw_error(this);
GotoIf(TaggedIsSmi(species), &throw_error);
- Node* species_bitfield = LoadMapBitField(LoadMap(species));
- GotoIfNot(Word32Equal(Word32And(species_bitfield,
- Int32Constant((1 << Map::kIsConstructor))),
- Int32Constant(1 << Map::kIsConstructor)),
- &throw_error);
+ GotoIfNot(IsConstructorMap(LoadMap(species)), &throw_error);
var_result.Bind(species);
Goto(&out);
// 8. Throw a TypeError exception.
BIND(&throw_error);
- {
- Node* const message_id =
- SmiConstant(MessageTemplate::kSpeciesNotConstructor);
- CallRuntime(Runtime::kThrowTypeError, context, message_id);
- Unreachable();
- }
+ ThrowTypeError(context, MessageTemplate::kSpeciesNotConstructor);
BIND(&out);
return var_result.value();
@@ -355,7 +339,7 @@ void PromiseBuiltinsAssembler::AppendPromiseCallback(int offset, Node* promise,
Node* delta = IntPtrOrSmiConstant(1, mode);
Node* new_capacity = IntPtrOrSmiAdd(length, delta, mode);
- const ElementsKind kind = FAST_ELEMENTS;
+ const ElementsKind kind = PACKED_ELEMENTS;
const WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER;
const CodeStubAssembler::AllocationFlags flags =
CodeStubAssembler::kAllowLargeObjectAllocation;
@@ -515,34 +499,34 @@ Node* PromiseBuiltinsAssembler::InternalPerformPromiseThen(
// Create new FixedArrays to store callbacks, and migrate
// existing callbacks.
Node* const deferred_promise_arr =
- AllocateFixedArray(FAST_ELEMENTS, IntPtrConstant(2));
+ AllocateFixedArray(PACKED_ELEMENTS, IntPtrConstant(2));
StoreFixedArrayElement(deferred_promise_arr, 0,
existing_deferred_promise);
StoreFixedArrayElement(deferred_promise_arr, 1, deferred_promise);
Node* const deferred_on_resolve_arr =
- AllocateFixedArray(FAST_ELEMENTS, IntPtrConstant(2));
+ AllocateFixedArray(PACKED_ELEMENTS, IntPtrConstant(2));
StoreFixedArrayElement(
deferred_on_resolve_arr, 0,
LoadObjectField(promise, JSPromise::kDeferredOnResolveOffset));
StoreFixedArrayElement(deferred_on_resolve_arr, 1, deferred_on_resolve);
Node* const deferred_on_reject_arr =
- AllocateFixedArray(FAST_ELEMENTS, IntPtrConstant(2));
+ AllocateFixedArray(PACKED_ELEMENTS, IntPtrConstant(2));
StoreFixedArrayElement(
deferred_on_reject_arr, 0,
LoadObjectField(promise, JSPromise::kDeferredOnRejectOffset));
StoreFixedArrayElement(deferred_on_reject_arr, 1, deferred_on_reject);
Node* const fulfill_reactions =
- AllocateFixedArray(FAST_ELEMENTS, IntPtrConstant(2));
+ AllocateFixedArray(PACKED_ELEMENTS, IntPtrConstant(2));
StoreFixedArrayElement(
fulfill_reactions, 0,
LoadObjectField(promise, JSPromise::kFulfillReactionsOffset));
StoreFixedArrayElement(fulfill_reactions, 1, var_on_resolve.value());
Node* const reject_reactions =
- AllocateFixedArray(FAST_ELEMENTS, IntPtrConstant(2));
+ AllocateFixedArray(PACKED_ELEMENTS, IntPtrConstant(2));
StoreFixedArrayElement(
reject_reactions, 0,
LoadObjectField(promise, JSPromise::kRejectReactionsOffset));
@@ -992,6 +976,31 @@ void PromiseBuiltinsAssembler::InternalPromiseReject(Node* context,
PromiseFulfill(context, promise, value, v8::Promise::kRejected);
}
+void PromiseBuiltinsAssembler::SetForwardingHandlerIfTrue(
+ Node* context, Node* condition, const NodeGenerator& object) {
+ Label done(this);
+ GotoIfNot(condition, &done);
+ CallRuntime(Runtime::kSetProperty, context, object(),
+ HeapConstant(factory()->promise_forwarding_handler_symbol()),
+ TrueConstant(), SmiConstant(STRICT));
+ Goto(&done);
+ BIND(&done);
+}
+
+void PromiseBuiltinsAssembler::SetPromiseHandledByIfTrue(
+ Node* context, Node* condition, Node* promise,
+ const NodeGenerator& handled_by) {
+ Label done(this);
+ GotoIfNot(condition, &done);
+ GotoIf(TaggedIsSmi(promise), &done);
+ GotoIfNot(HasInstanceType(promise, JS_PROMISE_TYPE), &done);
+ CallRuntime(Runtime::kSetProperty, context, promise,
+ HeapConstant(factory()->promise_handled_by_symbol()),
+ handled_by(), SmiConstant(STRICT));
+ Goto(&done);
+ BIND(&done);
+}
+
// ES#sec-promise-reject-functions
// Promise Reject Functions
TF_BUILTIN(PromiseRejectClosure, PromiseBuiltinsAssembler) {
@@ -1124,20 +1133,11 @@ TF_BUILTIN(PromiseConstructor, PromiseBuiltinsAssembler) {
// 1. If NewTarget is undefined, throw a TypeError exception.
BIND(&if_targetisundefined);
- {
- Node* const message_id = SmiConstant(MessageTemplate::kNotAPromise);
- CallRuntime(Runtime::kThrowTypeError, context, message_id, new_target);
- Unreachable();
- }
+ ThrowTypeError(context, MessageTemplate::kNotAPromise, new_target);
// 2. If IsCallable(executor) is false, throw a TypeError exception.
BIND(&if_notcallable);
- {
- Node* const message_id =
- SmiConstant(MessageTemplate::kResolverNotAFunction);
- CallRuntime(Runtime::kThrowTypeError, context, message_id, executor);
- Unreachable();
- }
+ ThrowTypeError(context, MessageTemplate::kResolverNotAFunction, executor);
// Silently fail if the stack looks fishy.
BIND(&if_noaccess);
@@ -1155,20 +1155,6 @@ TF_BUILTIN(PromiseInternalConstructor, PromiseBuiltinsAssembler) {
Return(AllocateAndInitJSPromise(context, parent));
}
-TF_BUILTIN(IsPromise, PromiseBuiltinsAssembler) {
- Node* const maybe_promise = Parameter(Descriptor::kObject);
- Label if_notpromise(this, Label::kDeferred);
-
- GotoIf(TaggedIsSmi(maybe_promise), &if_notpromise);
-
- Node* const result =
- SelectBooleanConstant(HasInstanceType(maybe_promise, JS_PROMISE_TYPE));
- Return(result);
-
- BIND(&if_notpromise);
- Return(FalseConstant());
-}
-
// ES#sec-promise.prototype.then
// Promise.prototype.catch ( onFulfilled, onRejected )
TF_BUILTIN(PromiseThen, PromiseBuiltinsAssembler) {
@@ -1334,9 +1320,8 @@ TF_BUILTIN(PromiseHandle, PromiseBuiltinsAssembler) {
BIND(&if_rejectpromise);
{
- Callable promise_handle_reject = CodeFactory::PromiseHandleReject(isolate);
- CallStub(promise_handle_reject, context, deferred_promise,
- deferred_on_reject, var_reason.value());
+ CallBuiltin(Builtins::kPromiseHandleReject, context, deferred_promise,
+ deferred_on_reject, var_reason.value());
Goto(&promisehook_after);
}
@@ -1499,9 +1484,7 @@ TF_BUILTIN(PromiseGetCapabilitiesExecutor, PromiseBuiltinsAssembler) {
Return(UndefinedConstant());
BIND(&if_alreadyinvoked);
- Node* message = SmiConstant(MessageTemplate::kPromiseExecutorAlreadyInvoked);
- CallRuntime(Runtime::kThrowTypeError, context, message);
- Unreachable();
+ ThrowTypeError(context, MessageTemplate::kPromiseExecutorAlreadyInvoked);
}
// ES6 #sec-newpromisecapability
@@ -1819,5 +1802,430 @@ TF_BUILTIN(PerformNativePromiseThen, PromiseBuiltinsAssembler) {
Return(result_promise);
}
+Node* PromiseBuiltinsAssembler::PerformPromiseAll(
+ Node* context, Node* constructor, Node* capability, Node* iterator,
+ Label* if_exception, Variable* var_exception) {
+ IteratorBuiltinsAssembler iter_assembler(state());
+ Label close_iterator(this);
+
+ Node* const instrumenting = IsDebugActive();
+
+ // For catch prediction, don't treat the .then calls as handling it;
+ // instead, recurse outwards.
+ SetForwardingHandlerIfTrue(
+ context, instrumenting,
+ LoadObjectField(capability, JSPromiseCapability::kRejectOffset));
+
+ Node* const native_context = LoadNativeContext(context);
+ Node* const array_map = LoadContextElement(
+ native_context, Context::JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX);
+ Node* const values_array = AllocateJSArray(PACKED_ELEMENTS, array_map,
+ IntPtrConstant(0), SmiConstant(0));
+ Node* const remaining_elements = AllocateSmiCell(1);
+
+ VARIABLE(var_index, MachineRepresentation::kTagged, SmiConstant(0));
+
+ Label loop(this, &var_index), break_loop(this);
+ Goto(&loop);
+ BIND(&loop);
+ {
+ // Let next be IteratorStep(iteratorRecord.[[Iterator]]).
+ // If next is an abrupt completion, set iteratorRecord.[[Done]] to true.
+ // ReturnIfAbrupt(next).
+ Node* const fast_iterator_result_map =
+ LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
+ Node* const next = iter_assembler.IteratorStep(
+ context, iterator, &break_loop, fast_iterator_result_map, if_exception,
+ var_exception);
+
+ // Let nextValue be IteratorValue(next).
+ // If nextValue is an abrupt completion, set iteratorRecord.[[Done]] to
+ // true.
+ // ReturnIfAbrupt(nextValue).
+ Node* const next_value = iter_assembler.IteratorValue(
+ context, next, fast_iterator_result_map, if_exception, var_exception);
+
+ // Let nextPromise be ? Invoke(constructor, "resolve", Ā« nextValue Ā»).
+ Node* const promise_resolve =
+ GetProperty(context, constructor, factory()->resolve_string());
+ GotoIfException(promise_resolve, &close_iterator, var_exception);
+
+ Node* const next_promise = CallJS(CodeFactory::Call(isolate()), context,
+ promise_resolve, constructor, next_value);
+ GotoIfException(next_promise, &close_iterator, var_exception);
+
+ // Let resolveElement be a new built-in function object as defined in
+ // Promise.all Resolve Element Functions.
+ Node* const resolve_context =
+ CreatePromiseContext(native_context, kPromiseAllResolveElementLength);
+ StoreContextElementNoWriteBarrier(
+ resolve_context, kPromiseAllResolveElementAlreadyVisitedSlot,
+ SmiConstant(0));
+ StoreContextElementNoWriteBarrier(
+ resolve_context, kPromiseAllResolveElementIndexSlot, var_index.value());
+ StoreContextElementNoWriteBarrier(
+ resolve_context, kPromiseAllResolveElementRemainingElementsSlot,
+ remaining_elements);
+ StoreContextElementNoWriteBarrier(
+ resolve_context, kPromiseAllResolveElementCapabilitySlot, capability);
+ StoreContextElementNoWriteBarrier(resolve_context,
+ kPromiseAllResolveElementValuesArraySlot,
+ values_array);
+
+ Node* const map = LoadContextElement(
+ native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
+ Node* const resolve_info = LoadContextElement(
+ native_context, Context::PROMISE_ALL_RESOLVE_ELEMENT_SHARED_FUN);
+ Node* const resolve =
+ AllocateFunctionWithMapAndContext(map, resolve_info, resolve_context);
+
+ // Set remainingElementsCount.[[Value]] to
+ // remainingElementsCount.[[Value]] + 1.
+ {
+ Label if_outofrange(this, Label::kDeferred), done(this);
+ IncrementSmiCell(remaining_elements, &if_outofrange);
+ Goto(&done);
+
+ BIND(&if_outofrange);
+ {
+ // If the incremented value is out of Smi range, crash.
+ Abort(kOffsetOutOfRange);
+ }
+
+ BIND(&done);
+ }
+
+ // Perform ? Invoke(nextPromise, "then", Ā« resolveElement,
+ // resultCapability.[[Reject]] Ā»).
+ Node* const then =
+ GetProperty(context, next_promise, factory()->then_string());
+ GotoIfException(then, &close_iterator, var_exception);
+
+ Node* const then_call = CallJS(
+ CodeFactory::Call(isolate()), context, then, next_promise, resolve,
+ LoadObjectField(capability, JSPromiseCapability::kRejectOffset));
+ GotoIfException(then_call, &close_iterator, var_exception);
+
+ // For catch prediction, mark that rejections here are semantically
+ // handled by the combined Promise.
+ SetPromiseHandledByIfTrue(context, instrumenting, then_call, [=]() {
+ // Load promiseCapability.[[Promise]]
+ return LoadObjectField(capability, JSPromiseCapability::kPromiseOffset);
+ });
+
+ // Set index to index + 1
+ var_index.Bind(NumberInc(var_index.value()));
+ Goto(&loop);
+ }
+
+ BIND(&close_iterator);
+ {
+ // Exception must be bound to a JS value.
+ CSA_ASSERT(this, IsNotTheHole(var_exception->value()));
+ iter_assembler.IteratorCloseOnException(context, iterator, if_exception,
+ var_exception);
+ }
+
+ BIND(&break_loop);
+ {
+ Label resolve_promise(this), return_promise(this);
+ // Set iteratorRecord.[[Done]] to true.
+ // Set remainingElementsCount.[[Value]] to
+ // remainingElementsCount.[[Value]] - 1.
+ Node* const remaining = DecrementSmiCell(remaining_elements);
+ Branch(SmiEqual(remaining, SmiConstant(0)), &resolve_promise,
+ &return_promise);
+
+ // If remainingElementsCount.[[Value]] is 0, then
+ // Let valuesArray be CreateArrayFromList(values).
+ // Perform ? Call(resultCapability.[[Resolve]], undefined,
+ // Ā« valuesArray Ā»).
+ BIND(&resolve_promise);
+
+ Node* const resolve =
+ LoadObjectField(capability, JSPromiseCapability::kResolveOffset);
+ Node* const resolve_call =
+ CallJS(CodeFactory::Call(isolate()), context, resolve,
+ UndefinedConstant(), values_array);
+ GotoIfException(resolve_call, if_exception, var_exception);
+ Goto(&return_promise);
+
+ // Return resultCapability.[[Promise]].
+ BIND(&return_promise);
+ }
+
+ Node* const promise =
+ LoadObjectField(capability, JSPromiseCapability::kPromiseOffset);
+ return promise;
+}
+
+Node* PromiseBuiltinsAssembler::IncrementSmiCell(Node* cell,
+ Label* if_overflow) {
+ CSA_SLOW_ASSERT(this, HasInstanceType(cell, CELL_TYPE));
+ Node* value = LoadCellValue(cell);
+ CSA_SLOW_ASSERT(this, TaggedIsSmi(value));
+
+ if (if_overflow != nullptr) {
+ GotoIf(SmiEqual(value, SmiConstant(Smi::kMaxValue)), if_overflow);
+ }
+
+ Node* result = SmiAdd(value, SmiConstant(1));
+ StoreCellValue(cell, result, SKIP_WRITE_BARRIER);
+ return result;
+}
+
+Node* PromiseBuiltinsAssembler::DecrementSmiCell(Node* cell) {
+ CSA_SLOW_ASSERT(this, HasInstanceType(cell, CELL_TYPE));
+ Node* value = LoadCellValue(cell);
+ CSA_SLOW_ASSERT(this, TaggedIsSmi(value));
+
+ Node* result = SmiSub(value, SmiConstant(1));
+ StoreCellValue(cell, result, SKIP_WRITE_BARRIER);
+ return result;
+}
+
+// ES#sec-promise.all
+// Promise.all ( iterable )
+TF_BUILTIN(PromiseAll, PromiseBuiltinsAssembler) {
+ IteratorBuiltinsAssembler iter_assembler(state());
+
+ // Let C be the this value.
+ // If Type(C) is not Object, throw a TypeError exception.
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const context = Parameter(Descriptor::kContext);
+ ThrowIfNotJSReceiver(context, receiver, MessageTemplate::kCalledOnNonObject,
+ "Promise.all");
+
+ // Let promiseCapability be ? NewPromiseCapability(C).
+ // Don't fire debugEvent so that forwarding the rejection through all does not
+ // trigger redundant ExceptionEvents
+ Node* const debug_event = FalseConstant();
+ Node* const capability = NewPromiseCapability(context, receiver, debug_event);
+
+ VARIABLE(var_exception, MachineRepresentation::kTagged, TheHoleConstant());
+ Label reject_promise(this, &var_exception, Label::kDeferred);
+
+ // Let iterator be GetIterator(iterable).
+ // IfAbruptRejectPromise(iterator, promiseCapability).
+ Node* const iterable = Parameter(Descriptor::kIterable);
+ Node* const iterator = iter_assembler.GetIterator(
+ context, iterable, &reject_promise, &var_exception);
+
+ // Let result be PerformPromiseAll(iteratorRecord, C, promiseCapability).
+ // If result is an abrupt completion, then
+ // If iteratorRecord.[[Done]] is false, let result be
+ // IteratorClose(iterator, result).
+ // IfAbruptRejectPromise(result, promiseCapability).
+ Node* const result = PerformPromiseAll(
+ context, receiver, capability, iterator, &reject_promise, &var_exception);
+
+ Return(result);
+
+ BIND(&reject_promise);
+ {
+ // Exception must be bound to a JS value.
+ CSA_SLOW_ASSERT(this, IsNotTheHole(var_exception.value()));
+ Node* const reject =
+ LoadObjectField(capability, JSPromiseCapability::kRejectOffset);
+ Callable callable = CodeFactory::Call(isolate());
+ CallJS(callable, context, reject, UndefinedConstant(),
+ var_exception.value());
+
+ Node* const promise =
+ LoadObjectField(capability, JSPromiseCapability::kPromiseOffset);
+ Return(promise);
+ }
+}
+
+TF_BUILTIN(PromiseAllResolveElementClosure, PromiseBuiltinsAssembler) {
+ Node* const value = Parameter(Descriptor::kValue);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ CSA_ASSERT(this, SmiEqual(LoadFixedArrayBaseLength(context),
+ SmiConstant(kPromiseAllResolveElementLength)));
+
+ Label already_called(this), resolve_promise(this);
+ GotoIf(SmiEqual(LoadContextElement(
+ context, kPromiseAllResolveElementAlreadyVisitedSlot),
+ SmiConstant(1)),
+ &already_called);
+ StoreContextElementNoWriteBarrier(
+ context, kPromiseAllResolveElementAlreadyVisitedSlot, SmiConstant(1));
+
+ Node* const index =
+ LoadContextElement(context, kPromiseAllResolveElementIndexSlot);
+ Node* const values_array =
+ LoadContextElement(context, kPromiseAllResolveElementValuesArraySlot);
+
+ // Set element in FixedArray
+ Label runtime_set_element(this), did_set_element(this);
+ GotoIfNot(TaggedIsPositiveSmi(index), &runtime_set_element);
+ {
+ VARIABLE(var_elements, MachineRepresentation::kTagged,
+ LoadElements(values_array));
+ PossiblyGrowElementsCapacity(SMI_PARAMETERS, PACKED_ELEMENTS, values_array,
+ index, &var_elements, SmiConstant(1),
+ &runtime_set_element);
+ StoreFixedArrayElement(var_elements.value(), index, value,
+ UPDATE_WRITE_BARRIER, 0, SMI_PARAMETERS);
+
+ // Update array length
+ Label did_set_length(this);
+ Node* const length = LoadJSArrayLength(values_array);
+ GotoIfNot(TaggedIsPositiveSmi(length), &did_set_length);
+ Node* const new_length = SmiAdd(index, SmiConstant(1));
+ GotoIfNot(SmiLessThan(length, new_length), &did_set_length);
+ StoreObjectFieldNoWriteBarrier(values_array, JSArray::kLengthOffset,
+ new_length);
+ // Assert that valuesArray.[[Length]] is less than or equal to the
+ // elements backing-store length.e
+ CSA_SLOW_ASSERT(
+ this, SmiAboveOrEqual(LoadFixedArrayBaseLength(var_elements.value()),
+ new_length));
+ Goto(&did_set_length);
+ BIND(&did_set_length);
+ }
+ Goto(&did_set_element);
+ BIND(&runtime_set_element);
+ // New-space filled up or index too large, set element via runtime
+ CallRuntime(Runtime::kCreateDataProperty, context, values_array, index,
+ value);
+ Goto(&did_set_element);
+ BIND(&did_set_element);
+
+ Node* const remaining_elements = LoadContextElement(
+ context, kPromiseAllResolveElementRemainingElementsSlot);
+ Node* const result = DecrementSmiCell(remaining_elements);
+ GotoIf(SmiEqual(result, SmiConstant(0)), &resolve_promise);
+ Return(UndefinedConstant());
+
+ BIND(&resolve_promise);
+ Node* const capability =
+ LoadContextElement(context, kPromiseAllResolveElementCapabilitySlot);
+ Node* const resolve =
+ LoadObjectField(capability, JSPromiseCapability::kResolveOffset);
+ CallJS(CodeFactory::Call(isolate()), context, resolve, UndefinedConstant(),
+ values_array);
+ Return(UndefinedConstant());
+
+ BIND(&already_called);
+ Return(UndefinedConstant());
+}
+
+// ES#sec-promise.race
+// Promise.race ( iterable )
+TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) {
+ IteratorBuiltinsAssembler iter_assembler(state());
+ VARIABLE(var_exception, MachineRepresentation::kTagged, TheHoleConstant());
+
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const context = Parameter(Descriptor::kContext);
+ ThrowIfNotJSReceiver(context, receiver, MessageTemplate::kCalledOnNonObject,
+ "Promise.race");
+
+ // Let promiseCapability be ? NewPromiseCapability(C).
+ // Don't fire debugEvent so that forwarding the rejection through all does not
+ // trigger redundant ExceptionEvents
+ Node* const debug_event = FalseConstant();
+ Node* const capability = NewPromiseCapability(context, receiver, debug_event);
+
+ Node* const resolve =
+ LoadObjectField(capability, JSPromiseCapability::kResolveOffset);
+ Node* const reject =
+ LoadObjectField(capability, JSPromiseCapability::kRejectOffset);
+
+ Node* const instrumenting = IsDebugActive();
+
+ Label close_iterator(this, Label::kDeferred);
+ Label reject_promise(this, Label::kDeferred);
+
+ // For catch prediction, don't treat the .then calls as handling it;
+ // instead, recurse outwards.
+ SetForwardingHandlerIfTrue(context, instrumenting, reject);
+
+ // Let iterator be GetIterator(iterable).
+ // IfAbruptRejectPromise(iterator, promiseCapability).
+ Node* const iterable = Parameter(Descriptor::kIterable);
+ Node* const iterator = iter_assembler.GetIterator(
+ context, iterable, &reject_promise, &var_exception);
+
+ // Let result be PerformPromiseRace(iteratorRecord, C, promiseCapability).
+ {
+ Label loop(this), break_loop(this);
+ Goto(&loop);
+ BIND(&loop);
+ {
+ Node* const native_context = LoadNativeContext(context);
+ Node* const fast_iterator_result_map = LoadContextElement(
+ native_context, Context::ITERATOR_RESULT_MAP_INDEX);
+
+ // Let next be IteratorStep(iteratorRecord.[[Iterator]]).
+ // If next is an abrupt completion, set iteratorRecord.[[Done]] to true.
+ // ReturnIfAbrupt(next).
+ Node* const next = iter_assembler.IteratorStep(
+ context, iterator, &break_loop, fast_iterator_result_map,
+ &reject_promise, &var_exception);
+
+ // Let nextValue be IteratorValue(next).
+ // If nextValue is an abrupt completion, set iteratorRecord.[[Done]] to
+ // true.
+ // ReturnIfAbrupt(nextValue).
+ Node* const next_value =
+ iter_assembler.IteratorValue(context, next, fast_iterator_result_map,
+ &reject_promise, &var_exception);
+
+ // Let nextPromise be ? Invoke(constructor, "resolve", Ā« nextValue Ā»).
+ Node* const promise_resolve =
+ GetProperty(context, receiver, factory()->resolve_string());
+ GotoIfException(promise_resolve, &close_iterator, &var_exception);
+
+ Node* const next_promise = CallJS(CodeFactory::Call(isolate()), context,
+ promise_resolve, receiver, next_value);
+ GotoIfException(next_promise, &close_iterator, &var_exception);
+
+ // Perform ? Invoke(nextPromise, "then", Ā« resolveElement,
+ // resultCapability.[[Reject]] Ā»).
+ Node* const then =
+ GetProperty(context, next_promise, factory()->then_string());
+ GotoIfException(then, &close_iterator, &var_exception);
+
+ Node* const then_call = CallJS(CodeFactory::Call(isolate()), context,
+ then, next_promise, resolve, reject);
+ GotoIfException(then_call, &close_iterator, &var_exception);
+
+ // For catch prediction, mark that rejections here are semantically
+ // handled by the combined Promise.
+ SetPromiseHandledByIfTrue(context, instrumenting, then_call, [=]() {
+ // Load promiseCapability.[[Promise]]
+ return LoadObjectField(capability, JSPromiseCapability::kPromiseOffset);
+ });
+ Goto(&loop);
+ }
+
+ BIND(&break_loop);
+ Return(LoadObjectField(capability, JSPromiseCapability::kPromiseOffset));
+ }
+
+ BIND(&close_iterator);
+ {
+ CSA_ASSERT(this, IsNotTheHole(var_exception.value()));
+ iter_assembler.IteratorCloseOnException(context, iterator, &reject_promise,
+ &var_exception);
+ }
+
+ BIND(&reject_promise);
+ {
+ Node* const reject =
+ LoadObjectField(capability, JSPromiseCapability::kRejectOffset);
+ Callable callable = CodeFactory::Call(isolate());
+ CallJS(callable, context, reject, UndefinedConstant(),
+ var_exception.value());
+
+ Node* const promise =
+ LoadObjectField(capability, JSPromiseCapability::kPromiseOffset);
+ Return(promise);
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-promise-gen.h b/deps/v8/src/builtins/builtins-promise-gen.h
index a03132d6a6..997933e10b 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.h
+++ b/deps/v8/src/builtins/builtins-promise-gen.h
@@ -28,6 +28,27 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
kPromiseContextLength,
};
+ protected:
+ enum PromiseAllResolveElementContextSlots {
+ // Whether the resolve callback was already called.
+ kPromiseAllResolveElementAlreadyVisitedSlot = Context::MIN_CONTEXT_SLOTS,
+
+ // Index into the values array
+ kPromiseAllResolveElementIndexSlot,
+
+ // Remaining elements count (mutable HeapNumber)
+ kPromiseAllResolveElementRemainingElementsSlot,
+
+ // Promise capability from Promise.all
+ kPromiseAllResolveElementCapabilitySlot,
+
+ // Values array from Promise.all
+ kPromiseAllResolveElementValuesArraySlot,
+
+ kPromiseAllResolveElementLength
+ };
+
+ public:
enum FunctionContextSlot {
kCapabilitySlot = Context::MIN_CONTEXT_SLOTS,
@@ -113,6 +134,7 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
void BranchIfFastPath(Node* native_context, Node* promise_fun, Node* promise,
Label* if_isunmodified, Label* if_ismodified);
+ void InitializeFunctionContext(Node* native_context, Node* context, int len);
Node* CreatePromiseContext(Node* native_context, int slots);
void PromiseFulfill(Node* context, Node* promise, Node* result,
v8::Promise::PromiseState status);
@@ -135,6 +157,23 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
Node* CreateThrowerFunctionContext(Node* reason, Node* native_context);
Node* CreateThrowerFunction(Node* reason, Node* native_context);
+ Node* PerformPromiseAll(Node* context, Node* constructor, Node* capability,
+ Node* iterator, Label* if_exception,
+ Variable* var_exception);
+
+ Node* IncrementSmiCell(Node* cell, Label* if_overflow = nullptr);
+ Node* DecrementSmiCell(Node* cell);
+
+ void SetForwardingHandlerIfTrue(Node* context, Node* condition,
+ const NodeGenerator& object);
+ inline void SetForwardingHandlerIfTrue(Node* context, Node* condition,
+ Node* object) {
+ return SetForwardingHandlerIfTrue(context, condition,
+ [object]() -> Node* { return object; });
+ }
+ void SetPromiseHandledByIfTrue(Node* context, Node* condition, Node* promise,
+ const NodeGenerator& handled_by);
+
private:
Node* AllocateJSPromise(Node* context);
};
diff --git a/deps/v8/src/builtins/builtins-promise.cc b/deps/v8/src/builtins/builtins-promise.cc
new file mode 100644
index 0000000000..671bfa21fb
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-promise.cc
@@ -0,0 +1,20 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+BUILTIN(IsPromise) {
+ SealHandleScope scope(isolate);
+
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+ return isolate->heap()->ToBoolean(object->IsJSPromise());
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc
new file mode 100644
index 0000000000..30b0f08ec0
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-proxy-gen.cc
@@ -0,0 +1,215 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-utils-gen.h"
+#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/code-stub-assembler.h"
+
+#include "src/counters.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+using compiler::Node;
+using compiler::CodeAssembler;
+
+// ES6 section 26.2.1.1 Proxy ( target, handler ) for the [[Call]] case.
+TF_BUILTIN(ProxyConstructor, CodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ ThrowTypeError(context, MessageTemplate::kConstructorNotFunction, "Proxy");
+}
+
+class ProxiesCodeStubAssembler : public CodeStubAssembler {
+ public:
+ explicit ProxiesCodeStubAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ Node* IsProxyRevoked(Node* proxy) {
+ CSA_ASSERT(this, IsJSProxy(proxy));
+
+ Node* handler = LoadObjectField(proxy, JSProxy::kHandlerOffset);
+ CSA_ASSERT(this, Word32Or(IsJSReceiver(handler), IsNull(handler)));
+
+ return IsNull(handler);
+ }
+
+ void GotoIfProxyRevoked(Node* object, Label* if_proxy_revoked) {
+ Label continue_checks(this);
+ GotoIfNot(IsJSProxy(object), &continue_checks);
+ GotoIf(IsProxyRevoked(object), if_proxy_revoked);
+ Goto(&continue_checks);
+ BIND(&continue_checks);
+ }
+
+ Node* AllocateProxy(Node* target, Node* handler, Node* context) {
+ VARIABLE(map, MachineRepresentation::kTagged);
+
+ Label callable_target(this), constructor_target(this), none_target(this),
+ create_proxy(this);
+
+ Node* nativeContext = LoadNativeContext(context);
+
+ Branch(IsCallable(target), &callable_target, &none_target);
+
+ BIND(&callable_target);
+ {
+ // Every object that is a constructor is implicitly callable
+ // so it's okay to nest this check here
+ GotoIf(IsConstructor(target), &constructor_target);
+ map.Bind(
+ LoadContextElement(nativeContext, Context::PROXY_CALLABLE_MAP_INDEX));
+ Goto(&create_proxy);
+ }
+ BIND(&constructor_target);
+ {
+ map.Bind(LoadContextElement(nativeContext,
+ Context::PROXY_CONSTRUCTOR_MAP_INDEX));
+ Goto(&create_proxy);
+ }
+ BIND(&none_target);
+ {
+ map.Bind(LoadContextElement(nativeContext, Context::PROXY_MAP_INDEX));
+ Goto(&create_proxy);
+ }
+
+ BIND(&create_proxy);
+ Node* proxy = Allocate(JSProxy::kSize);
+ StoreMapNoWriteBarrier(proxy, map.value());
+ StoreObjectFieldRoot(proxy, JSProxy::kPropertiesOrHashOffset,
+ Heap::kEmptyPropertiesDictionaryRootIndex);
+ StoreObjectFieldNoWriteBarrier(proxy, JSProxy::kTargetOffset, target);
+ StoreObjectFieldNoWriteBarrier(proxy, JSProxy::kHandlerOffset, handler);
+ StoreObjectFieldNoWriteBarrier(proxy, JSProxy::kHashOffset,
+ UndefinedConstant());
+
+ return proxy;
+ }
+
+ Node* AllocateJSArrayForCodeStubArguments(Node* context,
+ CodeStubArguments& args, Node* argc,
+ ParameterMode mode) {
+ Node* array = nullptr;
+ Node* elements = nullptr;
+ Node* native_context = LoadNativeContext(context);
+ Node* array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
+ Node* argc_smi = ParameterToTagged(argc, mode);
+ std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
+ PACKED_ELEMENTS, array_map, argc_smi, nullptr, argc, INTPTR_PARAMETERS);
+
+ StoreMapNoWriteBarrier(elements, Heap::kFixedArrayMapRootIndex);
+ StoreObjectFieldNoWriteBarrier(elements, FixedArrayBase::kLengthOffset,
+ argc_smi);
+
+ VARIABLE(index, MachineType::PointerRepresentation());
+ index.Bind(IntPtrConstant(FixedArrayBase::kHeaderSize - kHeapObjectTag));
+ VariableList list({&index}, zone());
+ args.ForEach(list, [this, elements, &index](Node* arg) {
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, elements,
+ index.value(), arg);
+ Increment(index, kPointerSize);
+ });
+ return array;
+ }
+};
+
+// ES6 section 26.2.1.1 Proxy ( target, handler ) for the [[Construct]] case.
+TF_BUILTIN(ProxyConstructor_ConstructStub, ProxiesCodeStubAssembler) {
+ int const kTargetArg = 0;
+ int const kHandlerArg = 1;
+
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+
+ Node* target = args.GetOptionalArgumentValue(kTargetArg);
+ Node* handler = args.GetOptionalArgumentValue(kHandlerArg);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+
+ Label throw_proxy_non_object(this, Label::kDeferred),
+ throw_proxy_handler_or_target_revoked(this, Label::kDeferred),
+ return_create_proxy(this);
+
+ GotoIf(TaggedIsSmi(target), &throw_proxy_non_object);
+ GotoIfNot(IsJSReceiver(target), &throw_proxy_non_object);
+ GotoIfProxyRevoked(target, &throw_proxy_handler_or_target_revoked);
+
+ GotoIf(TaggedIsSmi(handler), &throw_proxy_non_object);
+ GotoIfNot(IsJSReceiver(handler), &throw_proxy_non_object);
+ GotoIfProxyRevoked(handler, &throw_proxy_handler_or_target_revoked);
+
+ args.PopAndReturn(AllocateProxy(target, handler, context));
+
+ BIND(&throw_proxy_non_object);
+ ThrowTypeError(context, MessageTemplate::kProxyNonObject);
+
+ BIND(&throw_proxy_handler_or_target_revoked);
+ ThrowTypeError(context, MessageTemplate::kProxyHandlerOrTargetRevoked);
+}
+
+TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) {
+ Node* argc = Parameter(Descriptor::kActualArgumentsCount);
+ Node* argc_ptr = ChangeInt32ToIntPtr(argc);
+ Node* proxy = Parameter(Descriptor::kFunction);
+ Node* context = Parameter(Descriptor::kContext);
+
+ CSA_ASSERT(this, IsJSProxy(proxy));
+ CSA_ASSERT(this, IsCallable(proxy));
+
+ Label throw_proxy_handler_revoked(this, Label::kDeferred),
+ trap_undefined(this), trap_defined(this, Label::kDeferred);
+
+ // 1. Let handler be the value of the [[ProxyHandler]] internal slot of O.
+ Node* handler = LoadObjectField(proxy, JSProxy::kHandlerOffset);
+
+ // 2. If handler is null, throw a TypeError exception.
+ CSA_ASSERT(this, Word32Or(IsJSReceiver(handler), IsNull(handler)));
+ GotoIf(IsNull(handler), &throw_proxy_handler_revoked);
+
+ // 3. Assert: Type(handler) is Object.
+ CSA_ASSERT(this, IsJSReceiver(handler));
+
+ // 4. Let target be the value of the [[ProxyTarget]] internal slot of O.
+ Node* target = LoadObjectField(proxy, JSProxy::kTargetOffset);
+
+ // 5. Let trap be ? GetMethod(handler, "apply").
+ Handle<Name> trap_name = factory()->apply_string();
+ Node* trap = GetProperty(context, handler, trap_name);
+
+ // 6. If trap is undefined, then
+ GotoIf(IsUndefined(trap), &trap_undefined);
+ Branch(IsNull(trap), &trap_undefined, &trap_defined);
+
+ BIND(&trap_defined);
+ {
+ CodeStubArguments args(this, argc_ptr);
+ Node* receiver = args.GetReceiver();
+
+ // 7. Let argArray be CreateArrayFromList(argumentsList).
+ Node* array = AllocateJSArrayForCodeStubArguments(context, args, argc_ptr,
+ INTPTR_PARAMETERS);
+
+ // 8. Return Call(trap, handler, Ā«target, thisArgument, argArrayĀ»).
+ Node* result = CallJS(CodeFactory::Call(isolate()), context, trap, handler,
+ target, receiver, array);
+ args.PopAndReturn(result);
+ }
+
+ BIND(&trap_undefined);
+ {
+ // 6.a. Return Call(target, thisArgument, argumentsList).
+ TailCallStub(CodeFactory::Call(isolate()), context, target, argc);
+ }
+
+ BIND(&throw_proxy_handler_revoked);
+ {
+ CallRuntime(Runtime::kThrowTypeError, context,
+ SmiConstant(MessageTemplate::kProxyRevoked),
+ StringConstant("apply"));
+ Unreachable();
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-proxy.cc b/deps/v8/src/builtins/builtins-proxy.cc
deleted file mode 100644
index db6f7b57c9..0000000000
--- a/deps/v8/src/builtins/builtins-proxy.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/builtins/builtins.h"
-#include "src/builtins/builtins-utils.h"
-
-#include "src/counters.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// ES6 section 26.2.1.1 Proxy ( target, handler ) for the [[Call]] case.
-BUILTIN(ProxyConstructor) {
- HandleScope scope(isolate);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewTypeError(MessageTemplate::kConstructorNotFunction,
- isolate->factory()->NewStringFromAsciiChecked("Proxy")));
-}
-
-// ES6 section 26.2.1.1 Proxy ( target, handler ) for the [[Construct]] case.
-BUILTIN(ProxyConstructor_ConstructStub) {
- HandleScope scope(isolate);
- DCHECK(isolate->proxy_function()->IsConstructor());
- Handle<Object> target = args.atOrUndefined(isolate, 1);
- Handle<Object> handler = args.atOrUndefined(isolate, 2);
- RETURN_RESULT_OR_FAILURE(isolate, JSProxy::New(isolate, target, handler));
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index 04a35bd000..e32ff69c95 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -52,7 +52,7 @@ void RegExpBuiltinsAssembler::SlowStoreLastIndex(Node* context, Node* regexp,
// Store through runtime.
// TODO(ishell): Use SetPropertyStub here once available.
Node* const name = HeapConstant(isolate()->factory()->lastIndex_string());
- Node* const language_mode = SmiConstant(Smi::FromInt(STRICT));
+ Node* const language_mode = SmiConstant(STRICT);
CallRuntime(Runtime::kSetProperty, context, regexp, name, value,
language_mode);
}
@@ -257,7 +257,7 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
ToDirectStringAssembler to_direct(state(), string);
VARIABLE(var_result, MachineRepresentation::kTagged);
- Label out(this), runtime(this, Label::kDeferred);
+ Label out(this), atom(this), runtime(this, Label::kDeferred);
// External constants.
Node* const isolate_address =
@@ -269,11 +269,20 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
Node* const static_offsets_vector_address = ExternalConstant(
ExternalReference::address_of_static_offsets_vector(isolate()));
- // Ensure that a RegExp stack is allocated.
+ // At this point, last_index is definitely a canonicalized non-negative
+ // number, which implies that any non-Smi last_index is greater than
+ // the maximal string length. If lastIndex > string.length then the matcher
+ // must fail.
+
+ Label if_failure(this);
+ Node* const smi_string_length = LoadStringLength(string);
{
- Node* const stack_size =
- Load(MachineType::IntPtr(), regexp_stack_memory_size_address);
- GotoIf(IntPtrEqual(stack_size, int_zero), &runtime);
+ CSA_ASSERT(this, IsNumberNormalized(last_index));
+ CSA_ASSERT(this, IsNumberPositive(last_index));
+ Node* const last_index_is_not_smi = TaggedIsNotSmi(last_index);
+ Node* const last_index_is_oob =
+ SmiGreaterThan(last_index, smi_string_length);
+ GotoIf(Word32Or(last_index_is_not_smi, last_index_is_oob), &if_failure);
}
Node* const data = LoadObjectField(regexp, JSRegExp::kDataOffset);
@@ -282,10 +291,25 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
CSA_ASSERT(this, TaggedIsNotSmi(data));
CSA_ASSERT(this, HasInstanceType(data, FIXED_ARRAY_TYPE));
- // Check the type of the RegExp. Only continue if type is
- // JSRegExp::IRREGEXP.
- Node* const tag = LoadFixedArrayElement(data, JSRegExp::kTagIndex);
- GotoIfNot(SmiEqual(tag, SmiConstant(JSRegExp::IRREGEXP)), &runtime);
+ // Dispatch on the type of the RegExp.
+ {
+ Label next(this), unreachable(this, Label::kDeferred);
+ Node* const tag = LoadAndUntagToWord32FixedArrayElement(
+ data, IntPtrConstant(JSRegExp::kTagIndex));
+
+ int32_t values[] = {
+ JSRegExp::IRREGEXP, JSRegExp::ATOM, JSRegExp::NOT_COMPILED,
+ };
+ Label* labels[] = {&next, &atom, &runtime};
+
+ STATIC_ASSERT(arraysize(values) == arraysize(labels));
+ Switch(tag, &unreachable, values, labels, arraysize(values));
+
+ BIND(&unreachable);
+ Unreachable();
+
+ BIND(&next);
+ }
// Check (number_of_captures + 1) * 2 <= offsets vector size
// Or number_of_captures <= offsets vector size / 2 - 1
@@ -300,23 +324,18 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
&runtime);
}
+ // Ensure that a RegExp stack is allocated. This check is after branching off
+ // for ATOM regexps to avoid unnecessary trips to runtime.
+ {
+ Node* const stack_size =
+ Load(MachineType::IntPtr(), regexp_stack_memory_size_address);
+ GotoIf(IntPtrEqual(stack_size, int_zero), &runtime);
+ }
+
// Unpack the string if possible.
to_direct.TryToDirect(&runtime);
- Node* const smi_string_length = LoadStringLength(string);
-
- // At this point, last_index is definitely a canonicalized non-negative
- // number, which implies that any non-Smi last_index is greater than
- // the maximal string length. If lastIndex > string.length then the matcher
- // must fail.
-
- Label if_failure(this);
- CSA_ASSERT(this, IsNumberNormalized(last_index));
- CSA_ASSERT(this, IsNumberPositive(last_index));
- GotoIfNot(TaggedIsSmi(last_index), &if_failure); // Outside Smi range.
- GotoIf(SmiGreaterThan(last_index, smi_string_length), &if_failure);
-
// Load the irregexp code object and offsets into the subject string. Both
// depend on whether the string is one- or two-byte.
@@ -358,10 +377,22 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
}
// Check that the irregexp code has been generated for the actual string
- // encoding. If it has, the field contains a code object otherwise it contains
- // smi (code flushing support).
+ // encoding. If it has, the field contains a code object; and otherwise it
+ // contains the uninitialized sentinel as a smi.
Node* const code = var_code.value();
+#ifdef DEBUG
+ {
+ Label next(this);
+ GotoIfNot(TaggedIsSmi(code), &next);
+
+ CSA_ASSERT(this,
+ SmiEqual(code, SmiConstant(JSRegExp::kUninitializedValue)));
+ Goto(&next);
+
+ BIND(&next);
+ }
+#endif
GotoIf(TaggedIsSmi(code), &runtime);
CSA_ASSERT(this, HasInstanceType(code, CODE_TYPE));
@@ -481,7 +512,7 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
register_count, INT32_ELEMENTS, SMI_PARAMETERS, 0);
Node* const to_offset = ElementOffsetFromIndex(
- IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex), FAST_ELEMENTS,
+ IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex), PACKED_ELEMENTS,
INTPTR_PARAMETERS, RegExpMatchInfo::kHeaderSize - kHeapObjectTag);
VARIABLE(var_to_offset, MachineType::PointerRepresentation(), to_offset);
@@ -513,8 +544,8 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
{
// A stack overflow was detected in RegExp code.
#ifdef DEBUG
- Node* const pending_exception_address = ExternalConstant(
- ExternalReference(Isolate::kPendingExceptionAddress, isolate()));
+ Node* const pending_exception_address = ExternalConstant(ExternalReference(
+ IsolateAddressId::kPendingExceptionAddress, isolate()));
CSA_ASSERT(this, IsTheHole(Load(MachineType::AnyTagged(),
pending_exception_address)));
#endif // DEBUG
@@ -530,6 +561,16 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
Goto(&out);
}
+ BIND(&atom);
+ {
+ // TODO(jgruber): A call with 4 args stresses register allocation, this
+ // should probably just be inlined.
+ Node* const result = CallBuiltin(Builtins::kRegExpExecAtom, context, regexp,
+ string, last_index, match_info);
+ var_result.Bind(result);
+ Goto(&out);
+ }
+
BIND(&out);
return var_result.value();
#endif // V8_INTERPRETED_REGEXP
@@ -546,7 +587,7 @@ Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult(
Label* if_didnotmatch, const bool is_fastpath) {
Node* const null = NullConstant();
Node* const int_zero = IntPtrConstant(0);
- Node* const smi_zero = SmiConstant(Smi::kZero);
+ Node* const smi_zero = SmiConstant(0);
if (is_fastpath) {
CSA_ASSERT(this, IsFastRegExpNoPrototype(context, regexp));
@@ -714,16 +755,10 @@ Node* RegExpBuiltinsAssembler::ThrowIfNotJSReceiver(
// The {value} is not a compatible receiver for this method.
BIND(&throw_exception);
{
- Node* const message_id = SmiConstant(Smi::FromInt(msg_template));
- Node* const method_name_str = HeapConstant(
- isolate()->factory()->NewStringFromAsciiChecked(method_name, TENURED));
-
Node* const value_str =
CallBuiltin(Builtins::kToString, context, maybe_receiver);
-
- CallRuntime(Runtime::kThrowTypeError, context, message_id, method_name_str,
- value_str);
- Unreachable();
+ ThrowTypeError(context, msg_template, StringConstant(method_name),
+ value_str);
}
BIND(&out);
@@ -851,6 +886,70 @@ TF_BUILTIN(RegExpPrototypeExecSlow, RegExpBuiltinsAssembler) {
Return(RegExpPrototypeExecBody(context, regexp, string, false));
}
+// Fast path stub for ATOM regexps. String matching is done by StringIndexOf,
+// and {match_info} is updated on success.
+// The slow path is implemented in RegExpImpl::AtomExec.
+TF_BUILTIN(RegExpExecAtom, RegExpBuiltinsAssembler) {
+ Node* const regexp = Parameter(Descriptor::kRegExp);
+ Node* const subject_string = Parameter(Descriptor::kString);
+ Node* const last_index = Parameter(Descriptor::kLastIndex);
+ Node* const match_info = Parameter(Descriptor::kMatchInfo);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ CSA_ASSERT(this, IsJSRegExp(regexp));
+ CSA_ASSERT(this, IsString(subject_string));
+ CSA_ASSERT(this, TaggedIsPositiveSmi(last_index));
+ CSA_ASSERT(this, IsFixedArray(match_info));
+
+ Node* const data = LoadObjectField(regexp, JSRegExp::kDataOffset);
+ CSA_ASSERT(this, IsFixedArray(data));
+ CSA_ASSERT(this, SmiEqual(LoadFixedArrayElement(data, JSRegExp::kTagIndex),
+ SmiConstant(JSRegExp::ATOM)));
+
+ // Callers ensure that last_index is in-bounds.
+ CSA_ASSERT(this,
+ SmiLessThanOrEqual(last_index, LoadStringLength(subject_string)));
+
+ Node* const needle_string =
+ LoadFixedArrayElement(data, JSRegExp::kAtomPatternIndex);
+ CSA_ASSERT(this, IsString(needle_string));
+
+ Node* const match_from =
+ CallBuiltin(Builtins::kStringIndexOf, context, subject_string,
+ needle_string, last_index);
+ CSA_ASSERT(this, TaggedIsSmi(match_from));
+
+ Label if_failure(this), if_success(this);
+ Branch(SmiEqual(match_from, SmiConstant(-1)), &if_failure, &if_success);
+
+ BIND(&if_success);
+ {
+ CSA_ASSERT(this, TaggedIsPositiveSmi(match_from));
+ CSA_ASSERT(this, SmiLessThan(match_from, LoadStringLength(subject_string)));
+
+ const int kNumRegisters = 2;
+ STATIC_ASSERT(RegExpMatchInfo::kInitialCaptureIndices >= kNumRegisters);
+
+ Node* const match_to = SmiAdd(match_from, LoadStringLength(needle_string));
+
+ StoreFixedArrayElement(match_info, RegExpMatchInfo::kNumberOfCapturesIndex,
+ SmiConstant(kNumRegisters), SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(match_info, RegExpMatchInfo::kLastSubjectIndex,
+ subject_string);
+ StoreFixedArrayElement(match_info, RegExpMatchInfo::kLastInputIndex,
+ subject_string);
+ StoreFixedArrayElement(match_info, RegExpMatchInfo::kFirstCaptureIndex,
+ match_from, SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(match_info, RegExpMatchInfo::kFirstCaptureIndex + 1,
+ match_to, SKIP_WRITE_BARRIER);
+
+ Return(match_info);
+ }
+
+ BIND(&if_failure);
+ Return(NullConstant());
+}
+
// ES#sec-regexp.prototype.exec
// RegExp.prototype.exec ( string )
TF_BUILTIN(RegExpPrototypeExec, RegExpBuiltinsAssembler) {
@@ -864,7 +963,7 @@ TF_BUILTIN(RegExpPrototypeExec, RegExpBuiltinsAssembler) {
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
- Node* const string = ToString(context, maybe_string);
+ Node* const string = ToString_Inline(context, maybe_string);
Label if_isfastpath(this), if_isslowpath(this);
Branch(IsFastRegExpNoPrototype(context, receiver), &if_isfastpath,
@@ -1050,13 +1149,13 @@ Node* RegExpBuiltinsAssembler::RegExpInitialize(Node* const context,
// Normalize pattern.
Node* const pattern =
Select(IsUndefined(maybe_pattern), [=] { return EmptyStringConstant(); },
- [=] { return ToString(context, maybe_pattern); },
+ [=] { return ToString_Inline(context, maybe_pattern); },
MachineRepresentation::kTagged);
// Normalize flags.
Node* const flags =
Select(IsUndefined(maybe_flags), [=] { return EmptyStringConstant(); },
- [=] { return ToString(context, maybe_flags); },
+ [=] { return ToString_Inline(context, maybe_flags); },
MachineRepresentation::kTagged);
// Initialize.
@@ -1308,8 +1407,7 @@ TF_BUILTIN(RegExpPrototypeSourceGetter, RegExpBuiltinsAssembler) {
BIND(&if_isnotprototype);
{
- Node* const message_id =
- SmiConstant(Smi::FromInt(MessageTemplate::kRegExpNonRegExp));
+ Node* const message_id = SmiConstant(MessageTemplate::kRegExpNonRegExp);
Node* const method_name_str =
HeapConstant(isolate->factory()->NewStringFromAsciiChecked(
"RegExp.prototype.source"));
@@ -1322,9 +1420,9 @@ TF_BUILTIN(RegExpPrototypeSourceGetter, RegExpBuiltinsAssembler) {
// Fast-path implementation for flag checks on an unmodified JSRegExp instance.
Node* RegExpBuiltinsAssembler::FastFlagGetter(Node* const regexp,
JSRegExp::Flag flag) {
- Node* const smi_zero = SmiConstant(Smi::kZero);
+ Node* const smi_zero = SmiConstant(0);
Node* const flags = LoadObjectField(regexp, JSRegExp::kFlagsOffset);
- Node* const mask = SmiConstant(Smi::FromInt(flag));
+ Node* const mask = SmiConstant(flag);
Node* const is_flag_set = WordNotEqual(SmiAnd(flags, mask), smi_zero);
return is_flag_set;
@@ -1428,7 +1526,7 @@ void RegExpBuiltinsAssembler::FlagGetter(Node* context, Node* receiver,
BIND(&if_isprototype);
{
if (counter != -1) {
- Node* const counter_smi = SmiConstant(Smi::FromInt(counter));
+ Node* const counter_smi = SmiConstant(counter);
CallRuntime(Runtime::kIncrementUseCounter, context, counter_smi);
}
Return(UndefinedConstant());
@@ -1436,8 +1534,7 @@ void RegExpBuiltinsAssembler::FlagGetter(Node* context, Node* receiver,
BIND(&if_isnotprototype);
{
- Node* const message_id =
- SmiConstant(Smi::FromInt(MessageTemplate::kRegExpNonRegExp));
+ Node* const message_id = SmiConstant(MessageTemplate::kRegExpNonRegExp);
Node* const method_name_str = HeapConstant(
isolate->factory()->NewStringFromAsciiChecked(method_name));
CallRuntime(Runtime::kThrowTypeError, context, message_id,
@@ -1578,7 +1675,7 @@ TF_BUILTIN(RegExpPrototypeTest, RegExpBuiltinsAssembler) {
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
- Node* const string = ToString(context, maybe_string);
+ Node* const string = ToString_Inline(context, maybe_string);
Label fast_path(this), slow_path(this);
BranchIfFastRegExp(context, receiver, &fast_path, &slow_path);
@@ -1720,7 +1817,7 @@ class GrowableFixedArray {
Node* ToJSArray(Node* const context) {
CodeStubAssembler* a = assembler_;
- const ElementsKind kind = FAST_ELEMENTS;
+ const ElementsKind kind = PACKED_ELEMENTS;
Node* const native_context = a->LoadNativeContext(context);
Node* const array_map = a->LoadJSArrayElementsMap(kind, native_context);
@@ -1757,7 +1854,7 @@ class GrowableFixedArray {
void Initialize() {
CodeStubAssembler* a = assembler_;
- const ElementsKind kind = FAST_ELEMENTS;
+ const ElementsKind kind = PACKED_ELEMENTS;
static const int kInitialArraySize = 8;
Node* const capacity = a->IntPtrConstant(kInitialArraySize);
@@ -1793,7 +1890,7 @@ class GrowableFixedArray {
CSA_ASSERT(a, a->IntPtrGreaterThan(new_capacity, a->IntPtrConstant(0)));
CSA_ASSERT(a, a->IntPtrGreaterThanOrEqual(new_capacity, element_count));
- const ElementsKind kind = FAST_ELEMENTS;
+ const ElementsKind kind = PACKED_ELEMENTS;
const WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER;
const CodeStubAssembler::ParameterMode mode =
CodeStubAssembler::INTPTR_PARAMETERS;
@@ -1827,7 +1924,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
Node* const null = NullConstant();
Node* const int_zero = IntPtrConstant(0);
- Node* const smi_zero = SmiConstant(Smi::kZero);
+ Node* const smi_zero = SmiConstant(0);
Node* const is_global =
FlagGetter(context, regexp, JSRegExp::kGlobal, is_fastpath);
@@ -1910,7 +2007,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
{
// TODO(ishell): Use GetElement stub once it's available.
Node* const match = GetProperty(context, result, smi_zero);
- var_match.Bind(ToString(context, match));
+ var_match.Bind(ToString_Inline(context, match));
Goto(&if_didmatch);
}
}
@@ -1984,7 +2081,7 @@ TF_BUILTIN(RegExpPrototypeMatch, RegExpBuiltinsAssembler) {
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
- Node* const string = ToString(context, maybe_string);
+ Node* const string = ToString_Inline(context, maybe_string);
Label fast_path(this), slow_path(this);
BranchIfFastRegExp(context, receiver, &fast_path, &slow_path);
@@ -2005,7 +2102,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodyFast(
Node* const previous_last_index = FastLoadLastIndex(regexp);
// Ensure last index is 0.
- FastStoreLastIndex(regexp, SmiConstant(Smi::kZero));
+ FastStoreLastIndex(regexp, SmiConstant(0));
// Call exec.
Label if_didnotmatch(this);
@@ -2038,7 +2135,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodySlow(
Isolate* const isolate = this->isolate();
- Node* const smi_zero = SmiConstant(Smi::kZero);
+ Node* const smi_zero = SmiConstant(0);
// Grab the initial value of last index.
Node* const previous_last_index = SlowLoadLastIndex(context, regexp);
@@ -2111,7 +2208,7 @@ TF_BUILTIN(RegExpPrototypeSearch, RegExpBuiltinsAssembler) {
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
- Node* const string = ToString(context, maybe_string);
+ Node* const string = ToString_Inline(context, maybe_string);
Label fast_path(this), slow_path(this);
BranchIfFastRegExp(context, receiver, &fast_path, &slow_path);
@@ -2138,7 +2235,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
Node* const int_zero = IntPtrConstant(0);
Node* const int_limit = SmiUntag(limit);
- const ElementsKind kind = FAST_ELEMENTS;
+ const ElementsKind kind = PACKED_ELEMENTS;
const ParameterMode mode = CodeStubAssembler::INTPTR_PARAMETERS;
Node* const allocation_site = nullptr;
@@ -2444,10 +2541,8 @@ TF_BUILTIN(RegExpPrototypeSplit, RegExpBuiltinsAssembler) {
CodeStubArguments args(this, argc);
Node* const maybe_receiver = args.GetReceiver();
- Node* const maybe_string =
- args.GetOptionalArgumentValue(kStringArg, UndefinedConstant());
- Node* const maybe_limit =
- args.GetOptionalArgumentValue(kLimitArg, UndefinedConstant());
+ Node* const maybe_string = args.GetOptionalArgumentValue(kStringArg);
+ Node* const maybe_limit = args.GetOptionalArgumentValue(kLimitArg);
Node* const context = Parameter(BuiltinDescriptor::kContext);
// Ensure {maybe_receiver} is a JSReceiver.
@@ -2457,7 +2552,7 @@ TF_BUILTIN(RegExpPrototypeSplit, RegExpBuiltinsAssembler) {
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
- Node* const string = ToString(context, maybe_string);
+ Node* const string = ToString_Inline(context, maybe_string);
Label stub(this), runtime(this, Label::kDeferred);
BranchIfFastRegExp(context, receiver, &stub, &runtime);
@@ -2486,7 +2581,7 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
Node* const undefined = UndefinedConstant();
Node* const int_zero = IntPtrConstant(0);
Node* const int_one = IntPtrConstant(1);
- Node* const smi_zero = SmiConstant(Smi::kZero);
+ Node* const smi_zero = SmiConstant(0);
Node* const native_context = LoadNativeContext(context);
@@ -2499,7 +2594,7 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
// Allocate {result_array}.
Node* result_array;
{
- ElementsKind kind = FAST_ELEMENTS;
+ ElementsKind kind = PACKED_ELEMENTS;
Node* const array_map = LoadJSArrayElementsMap(kind, native_context);
Node* const capacity = IntPtrConstant(16);
Node* const length = smi_zero;
@@ -2536,7 +2631,7 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
Label if_hasexplicitcaptures(this), if_noexplicitcaptures(this),
create_result(this);
- Branch(SmiEqual(num_capture_registers, SmiConstant(Smi::FromInt(2))),
+ Branch(SmiEqual(num_capture_registers, SmiConstant(2)),
&if_noexplicitcaptures, &if_hasexplicitcaptures);
BIND(&if_noexplicitcaptures);
@@ -2600,14 +2695,14 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
BIND(&if_isstring);
{
- CSA_ASSERT(this, IsStringInstanceType(LoadInstanceType(elem)));
+ CSA_ASSERT(this, IsString(elem));
Callable call_callable = CodeFactory::Call(isolate);
Node* const replacement_obj =
CallJS(call_callable, context, replace_callable, undefined, elem,
var_match_start.value(), string);
- Node* const replacement_str = ToString(context, replacement_obj);
+ Node* const replacement_str = ToString_Inline(context, replacement_obj);
StoreFixedArrayElement(res_elems, i, replacement_str);
Node* const elem_length = LoadStringLength(elem);
@@ -2660,7 +2755,7 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
// we got back from the callback function.
Node* const replacement_str =
- ToString(context, replacement_obj);
+ ToString_Inline(context, replacement_obj);
StoreFixedArrayElement(res_elems, index, replacement_str);
Goto(&do_continue);
@@ -2692,7 +2787,7 @@ Node* RegExpBuiltinsAssembler::ReplaceSimpleStringFastPath(
// string replacement.
Node* const int_zero = IntPtrConstant(0);
- Node* const smi_zero = SmiConstant(Smi::kZero);
+ Node* const smi_zero = SmiConstant(0);
CSA_ASSERT(this, IsFastRegExp(context, regexp));
CSA_ASSERT(this, IsString(replace_string));
@@ -2865,10 +2960,8 @@ TF_BUILTIN(RegExpPrototypeReplace, RegExpBuiltinsAssembler) {
CodeStubArguments args(this, argc);
Node* const maybe_receiver = args.GetReceiver();
- Node* const maybe_string =
- args.GetOptionalArgumentValue(kStringArg, UndefinedConstant());
- Node* const replace_value =
- args.GetOptionalArgumentValue(kReplaceValueArg, UndefinedConstant());
+ Node* const maybe_string = args.GetOptionalArgumentValue(kStringArg);
+ Node* const replace_value = args.GetOptionalArgumentValue(kReplaceValueArg);
Node* const context = Parameter(BuiltinDescriptor::kContext);
// RegExpPrototypeReplace is a bit of a beast - a summary of dispatch logic:
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
index d7a81a2ffe..8edb3574cd 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
@@ -70,8 +70,9 @@ MUST_USE_RESULT Maybe<size_t> ValidateAtomicAccess(
MessageTemplate::kInvalidAtomicAccessIndex),
Nothing<size_t>());
- size_t access_index = NumberToSize(*access_index_obj);
- if (access_index >= typed_array->length_value()) {
+ size_t access_index;
+ if (!TryNumberToSize(*access_index_obj, &access_index) ||
+ access_index >= typed_array->length_value()) {
isolate->Throw(*isolate->factory()->NewRangeError(
MessageTemplate::kInvalidAtomicAccessIndex));
return Nothing<size_t>();
diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc
index ee85476401..7dd7eaef76 100644
--- a/deps/v8/src/builtins/builtins-string-gen.cc
+++ b/deps/v8/src/builtins/builtins-string-gen.cc
@@ -126,7 +126,7 @@ void StringBuiltinsAssembler::ConvertAndBoundsCheckStartArgument(
Node* context, Variable* var_start, Node* start, Node* string_length) {
Node* const start_int =
ToInteger(context, start, CodeStubAssembler::kTruncateMinusZero);
- Node* const zero = SmiConstant(Smi::kZero);
+ Node* const zero = SmiConstant(0);
Label done(this);
Label if_issmi(this), if_isheapnumber(this, Label::kDeferred);
@@ -997,16 +997,16 @@ void StringBuiltinsAssembler::RequireObjectCoercible(Node* const context,
Branch(IsNullOrUndefined(value), &throw_exception, &out);
BIND(&throw_exception);
- TailCallRuntime(
- Runtime::kThrowCalledOnNullOrUndefined, context,
- HeapConstant(factory()->NewStringFromAsciiChecked(method_name, TENURED)));
+ TailCallRuntime(Runtime::kThrowCalledOnNullOrUndefined, context,
+ StringConstant(method_name));
BIND(&out);
}
void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol(
Node* const context, Node* const object, Handle<Symbol> symbol,
- const NodeFunction0& regexp_call, const NodeFunction1& generic_call) {
+ const NodeFunction0& regexp_call, const NodeFunction1& generic_call,
+ CodeStubArguments* args) {
Label out(this);
// Smis definitely don't have an attached symbol.
@@ -1044,7 +1044,12 @@ void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol(
&slow_lookup);
BIND(&stub_call);
- Return(regexp_call());
+ Node* const result = regexp_call();
+ if (args == nullptr) {
+ Return(result);
+ } else {
+ args->PopAndReturn(result);
+ }
BIND(&slow_lookup);
}
@@ -1065,7 +1070,11 @@ void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol(
// Attempt to call the function.
Node* const result = generic_call(maybe_func);
- Return(result);
+ if (args == nullptr) {
+ Return(result);
+ } else {
+ args->PopAndReturn(result);
+ }
BIND(&out);
}
@@ -1144,9 +1153,8 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
[=]() {
Node* const subject_string = ToString_Inline(context, receiver);
- Callable replace_callable = CodeFactory::RegExpReplace(isolate());
- return CallStub(replace_callable, context, search, subject_string,
- replace);
+ return CallBuiltin(Builtins::kRegExpReplace, context, search,
+ subject_string, replace);
},
[=](Node* fn) {
Callable call_callable = CodeFactory::Call(isolate());
@@ -1155,8 +1163,6 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
// Convert {receiver} and {search} to strings.
- Callable indexof_callable = CodeFactory::StringIndexOf(isolate());
-
Node* const subject_string = ToString_Inline(context, receiver);
Node* const search_string = ToString_Inline(context, search);
@@ -1193,8 +1199,9 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
// longer substrings - we can handle up to 8 chars (one-byte) / 4 chars
// (2-byte).
- Node* const match_start_index = CallStub(
- indexof_callable, context, subject_string, search_string, smi_zero);
+ Node* const match_start_index =
+ CallBuiltin(Builtins::kStringIndexOf, context, subject_string,
+ search_string, smi_zero);
CSA_ASSERT(this, TaggedIsSmi(match_start_index));
// Early exit if no match found.
@@ -1294,9 +1301,8 @@ TF_BUILTIN(StringPrototypeSlice, StringBuiltinsAssembler) {
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
Node* const receiver = args.GetReceiver();
- Node* const start =
- args.GetOptionalArgumentValue(kStart, UndefinedConstant());
- Node* const end = args.GetOptionalArgumentValue(kEnd, UndefinedConstant());
+ Node* const start = args.GetOptionalArgumentValue(kStart);
+ Node* const end = args.GetOptionalArgumentValue(kEnd);
Node* const context = Parameter(BuiltinDescriptor::kContext);
Node* const smi_zero = SmiConstant(0);
@@ -1305,8 +1311,8 @@ TF_BUILTIN(StringPrototypeSlice, StringBuiltinsAssembler) {
RequireObjectCoercible(context, receiver, "String.prototype.slice");
// 2. Let S be ? ToString(O).
- Callable tostring_callable = CodeFactory::ToString(isolate());
- Node* const subject_string = CallStub(tostring_callable, context, receiver);
+ Node* const subject_string =
+ CallBuiltin(Builtins::kToString, context, receiver);
// 3. Let len be the number of elements in S.
Node* const length = LoadStringLength(subject_string);
@@ -1367,12 +1373,17 @@ TF_BUILTIN(StringPrototypeSlice, StringBuiltinsAssembler) {
// ES6 section 21.1.3.19 String.prototype.split ( separator, limit )
TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
- Label out(this);
+ const int kSeparatorArg = 0;
+ const int kLimitArg = 1;
- Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* const separator = Parameter(Descriptor::kSeparator);
- Node* const limit = Parameter(Descriptor::kLimit);
- Node* const context = Parameter(Descriptor::kContext);
+ Node* const argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+
+ Node* const receiver = args.GetReceiver();
+ Node* const separator = args.GetOptionalArgumentValue(kSeparatorArg);
+ Node* const limit = args.GetOptionalArgumentValue(kLimitArg);
+ Node* const context = Parameter(BuiltinDescriptor::kContext);
Node* const smi_zero = SmiConstant(0);
@@ -1385,14 +1396,14 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
[=]() {
Node* const subject_string = ToString_Inline(context, receiver);
- Callable split_callable = CodeFactory::RegExpSplit(isolate());
- return CallStub(split_callable, context, separator, subject_string,
- limit);
+ return CallBuiltin(Builtins::kRegExpSplit, context, separator,
+ subject_string, limit);
},
[=](Node* fn) {
Callable call_callable = CodeFactory::Call(isolate());
return CallJS(call_callable, context, fn, separator, receiver, limit);
- });
+ },
+ &args);
// String and integer conversions.
@@ -1408,7 +1419,7 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
Label next(this);
GotoIfNot(SmiEqual(limit_number, smi_zero), &next);
- const ElementsKind kind = FAST_ELEMENTS;
+ const ElementsKind kind = PACKED_ELEMENTS;
Node* const native_context = LoadNativeContext(context);
Node* const array_map = LoadJSArrayElementsMap(kind, native_context);
@@ -1416,7 +1427,7 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
Node* const capacity = IntPtrConstant(0);
Node* const result = AllocateJSArray(kind, array_map, capacity, length);
- Return(result);
+ args.PopAndReturn(result);
BIND(&next);
}
@@ -1427,7 +1438,7 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
Label next(this);
GotoIfNot(IsUndefined(separator), &next);
- const ElementsKind kind = FAST_ELEMENTS;
+ const ElementsKind kind = PACKED_ELEMENTS;
Node* const native_context = LoadNativeContext(context);
Node* const array_map = LoadJSArrayElementsMap(kind, native_context);
@@ -1438,7 +1449,7 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
Node* const fixed_array = LoadElements(result);
StoreFixedArrayElement(fixed_array, 0, subject_string);
- Return(result);
+ args.PopAndReturn(result);
BIND(&next);
}
@@ -1450,7 +1461,7 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
Node* const result = CallRuntime(Runtime::kStringToArray, context,
subject_string, limit_number);
- Return(result);
+ args.PopAndReturn(result);
BIND(&next);
}
@@ -1458,22 +1469,29 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
Node* const result =
CallRuntime(Runtime::kStringSplit, context, subject_string,
separator_string, limit_number);
- Return(result);
+ args.PopAndReturn(result);
}
// ES6 #sec-string.prototype.substr
TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
+ const int kStartArg = 0;
+ const int kLengthArg = 1;
+
+ Node* const argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+
+ Node* const receiver = args.GetReceiver();
+ Node* const start = args.GetOptionalArgumentValue(kStartArg);
+ Node* const length = args.GetOptionalArgumentValue(kLengthArg);
+ Node* const context = Parameter(BuiltinDescriptor::kContext);
+
Label out(this);
VARIABLE(var_start, MachineRepresentation::kTagged);
VARIABLE(var_length, MachineRepresentation::kTagged);
- Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* const start = Parameter(Descriptor::kStart);
- Node* const length = Parameter(Descriptor::kLength);
- Node* const context = Parameter(Descriptor::kContext);
-
- Node* const zero = SmiConstant(Smi::kZero);
+ Node* const zero = SmiConstant(0);
// Check that {receiver} is coercible to Object and convert it to a String.
Node* const string =
@@ -1513,7 +1531,7 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
var_length.Bind(SmiMin(positive_length, minimal_length));
GotoIfNot(SmiLessThanOrEqual(var_length.value(), zero), &out);
- Return(EmptyStringConstant());
+ args.PopAndReturn(EmptyStringConstant());
}
BIND(&if_isheapnumber);
@@ -1522,7 +1540,7 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
// two cases according to the spec: if it is negative, "" is returned; if
// it is positive, then length is set to {string_length} - {start}.
- CSA_ASSERT(this, IsHeapNumberMap(LoadMap(var_length.value())));
+ CSA_ASSERT(this, IsHeapNumber(var_length.value()));
Label if_isnegative(this), if_ispositive(this);
Node* const float_zero = Float64Constant(0.);
@@ -1531,13 +1549,13 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
&if_ispositive);
BIND(&if_isnegative);
- Return(EmptyStringConstant());
+ args.PopAndReturn(EmptyStringConstant());
BIND(&if_ispositive);
{
var_length.Bind(SmiSub(string_length, var_start.value()));
GotoIfNot(SmiLessThanOrEqual(var_length.value(), zero), &out);
- Return(EmptyStringConstant());
+ args.PopAndReturn(EmptyStringConstant());
}
}
@@ -1545,7 +1563,7 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
{
Node* const end = SmiAdd(var_start.value(), var_length.value());
Node* const result = SubString(context, string, var_start.value(), end);
- Return(result);
+ args.PopAndReturn(result);
}
}
@@ -1574,7 +1592,7 @@ compiler::Node* StringBuiltinsAssembler::ToSmiBetweenZeroAnd(Node* context,
BIND(&if_isoutofbounds);
{
- Node* const zero = SmiConstant(Smi::kZero);
+ Node* const zero = SmiConstant(0);
var_result.Bind(
SelectTaggedConstant(SmiLessThan(value_int, zero), zero, limit));
Goto(&out);
@@ -1584,10 +1602,10 @@ compiler::Node* StringBuiltinsAssembler::ToSmiBetweenZeroAnd(Node* context,
BIND(&if_isnotsmi);
{
// {value} is a heap number - in this case, it is definitely out of bounds.
- CSA_ASSERT(this, IsHeapNumberMap(LoadMap(value_int)));
+ CSA_ASSERT(this, IsHeapNumber(value_int));
Node* const float_zero = Float64Constant(0.);
- Node* const smi_zero = SmiConstant(Smi::kZero);
+ Node* const smi_zero = SmiConstant(0);
Node* const value_float = LoadHeapNumberValue(value_int);
var_result.Bind(SelectTaggedConstant(
Float64LessThan(value_float, float_zero), smi_zero, limit));
@@ -1600,16 +1618,23 @@ compiler::Node* StringBuiltinsAssembler::ToSmiBetweenZeroAnd(Node* context,
// ES6 #sec-string.prototype.substring
TF_BUILTIN(StringPrototypeSubstring, StringBuiltinsAssembler) {
+ const int kStartArg = 0;
+ const int kEndArg = 1;
+
+ Node* const argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+
+ Node* const receiver = args.GetReceiver();
+ Node* const start = args.GetOptionalArgumentValue(kStartArg);
+ Node* const end = args.GetOptionalArgumentValue(kEndArg);
+ Node* const context = Parameter(BuiltinDescriptor::kContext);
+
Label out(this);
VARIABLE(var_start, MachineRepresentation::kTagged);
VARIABLE(var_end, MachineRepresentation::kTagged);
- Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* const start = Parameter(Descriptor::kStart);
- Node* const end = Parameter(Descriptor::kEnd);
- Node* const context = Parameter(Descriptor::kContext);
-
// Check that {receiver} is coercible to Object and convert it to a String.
Node* const string =
ToThisString(context, receiver, "String.prototype.substring");
@@ -1643,7 +1668,7 @@ TF_BUILTIN(StringPrototypeSubstring, StringBuiltinsAssembler) {
{
Node* result =
SubString(context, string, var_start.value(), var_end.value());
- Return(result);
+ args.PopAndReturn(result);
}
}
@@ -1679,13 +1704,13 @@ TF_BUILTIN(StringPrototypeIterator, CodeStubAssembler) {
LoadContextElement(native_context, Context::STRING_ITERATOR_MAP_INDEX);
Node* iterator = Allocate(JSStringIterator::kSize);
StoreMapNoWriteBarrier(iterator, map);
- StoreObjectFieldRoot(iterator, JSValue::kPropertiesOffset,
+ StoreObjectFieldRoot(iterator, JSValue::kPropertiesOrHashOffset,
Heap::kEmptyFixedArrayRootIndex);
StoreObjectFieldRoot(iterator, JSObject::kElementsOffset,
Heap::kEmptyFixedArrayRootIndex);
StoreObjectFieldNoWriteBarrier(iterator, JSStringIterator::kStringOffset,
string);
- Node* index = SmiConstant(Smi::kZero);
+ Node* index = SmiConstant(0);
StoreObjectFieldNoWriteBarrier(iterator, JSStringIterator::kNextIndexOffset,
index);
Return(iterator);
@@ -1705,7 +1730,7 @@ compiler::Node* StringBuiltinsAssembler::LoadSurrogatePairAt(
GotoIf(Word32NotEqual(Word32And(var_result.value(), Int32Constant(0xFC00)),
Int32Constant(0xD800)),
&return_result);
- Node* next_index = SmiAdd(index, SmiConstant(Smi::FromInt(1)));
+ Node* next_index = SmiAdd(index, SmiConstant(1));
GotoIfNot(SmiLessThan(next_index, length), &return_result);
var_trail.Bind(StringCharCodeAt(string, next_index));
@@ -1796,19 +1821,8 @@ TF_BUILTIN(StringIteratorPrototypeNext, StringBuiltinsAssembler) {
BIND(&return_result);
{
- Node* native_context = LoadNativeContext(context);
- Node* map =
- LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
- Node* result = Allocate(JSIteratorResult::kSize);
- StoreMapNoWriteBarrier(result, map);
- StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOffset,
- Heap::kEmptyFixedArrayRootIndex);
- StoreObjectFieldRoot(result, JSIteratorResult::kElementsOffset,
- Heap::kEmptyFixedArrayRootIndex);
- StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kValueOffset,
- var_value.value());
- StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kDoneOffset,
- var_done.value());
+ Node* result =
+ AllocateJSIteratorResult(context, var_value.value(), var_done.value());
Return(result);
}
@@ -1816,12 +1830,272 @@ TF_BUILTIN(StringIteratorPrototypeNext, StringBuiltinsAssembler) {
{
// The {receiver} is not a valid JSGeneratorObject.
CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
- HeapConstant(factory()->NewStringFromAsciiChecked(
- "String Iterator.prototype.next", TENURED)),
- iterator);
+ StringConstant("String Iterator.prototype.next"), iterator);
Unreachable();
}
}
+Node* StringBuiltinsAssembler::ConcatenateSequentialStrings(
+ Node* context, Node* first_arg_ptr, Node* last_arg_ptr, Node* total_length,
+ String::Encoding encoding) {
+ Node* result;
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ result = AllocateSeqOneByteString(context, total_length, SMI_PARAMETERS);
+ } else {
+ DCHECK_EQ(String::TWO_BYTE_ENCODING, encoding);
+ result = AllocateSeqTwoByteString(context, total_length, SMI_PARAMETERS);
+ }
+
+ VARIABLE(current_arg, MachineType::PointerRepresentation(), first_arg_ptr);
+ VARIABLE(str_index, MachineRepresentation::kTaggedSigned, SmiConstant(0));
+
+ Label loop(this, {&current_arg, &str_index}), done(this);
+
+ Goto(&loop);
+ BIND(&loop);
+ {
+ VARIABLE(current_string, MachineRepresentation::kTagged,
+ Load(MachineType::AnyTagged(), current_arg.value()));
+
+ Label deref_indirect(this, Label::kDeferred),
+ is_sequential(this, &current_string);
+
+ // Check if we need to dereference an indirect string.
+ Node* instance_type = LoadInstanceType(current_string.value());
+ Branch(IsSequentialStringInstanceType(instance_type), &is_sequential,
+ &deref_indirect);
+
+ BIND(&is_sequential);
+ {
+ CSA_ASSERT(this, IsSequentialStringInstanceType(
+ LoadInstanceType(current_string.value())));
+ Node* current_length = LoadStringLength(current_string.value());
+ CopyStringCharacters(current_string.value(), result, SmiConstant(0),
+ str_index.value(), current_length, encoding,
+ encoding, SMI_PARAMETERS);
+ str_index.Bind(SmiAdd(str_index.value(), current_length));
+ current_arg.Bind(
+ IntPtrSub(current_arg.value(), IntPtrConstant(kPointerSize)));
+ Branch(IntPtrGreaterThanOrEqual(current_arg.value(), last_arg_ptr), &loop,
+ &done);
+ }
+
+ BIND(&deref_indirect);
+ {
+ DerefIndirectString(&current_string, instance_type);
+ Goto(&is_sequential);
+ }
+ }
+ BIND(&done);
+ CSA_ASSERT(this, SmiEqual(str_index.value(), total_length));
+ return result;
+}
+
+Node* StringBuiltinsAssembler::ConcatenateStrings(Node* context,
+ Node* first_arg_ptr,
+ Node* arg_count,
+ Label* bailout_to_runtime) {
+ Label do_flat_string(this), do_cons_string(this), done(this);
+ // There must be at least two strings being concatenated.
+ CSA_ASSERT(this, Uint32GreaterThanOrEqual(arg_count, Int32Constant(2)));
+ // Arguments grow up on the stack, so subtract arg_count - 1 from first_arg to
+ // get the last argument to be concatenated.
+ Node* last_arg_ptr = IntPtrSub(
+ first_arg_ptr, TimesPointerSize(IntPtrSub(ChangeUint32ToWord(arg_count),
+ IntPtrConstant(1))));
+
+ VARIABLE(current_arg, MachineType::PointerRepresentation(), first_arg_ptr);
+ VARIABLE(current_string, MachineRepresentation::kTagged,
+ Load(MachineType::AnyTagged(), current_arg.value()));
+ VARIABLE(total_length, MachineRepresentation::kTaggedSigned, SmiConstant(0));
+ VARIABLE(result, MachineRepresentation::kTagged);
+
+ Node* string_encoding = Word32And(LoadInstanceType(current_string.value()),
+ Int32Constant(kStringEncodingMask));
+
+ Label flat_length_loop(this, {&current_arg, &current_string, &total_length}),
+ done_flat_length_loop(this);
+ Goto(&flat_length_loop);
+ BIND(&flat_length_loop);
+ {
+ Comment("Loop to find length and type of initial flat-string");
+ Label is_sequential_or_can_deref(this), check_deref_instance_type(this);
+
+ // Increment total_length by the current string's length.
+ Node* string_length = LoadStringLength(current_string.value());
+ CSA_ASSERT(this, TaggedIsSmi(string_length));
+ // No need to check for Smi overflow since String::kMaxLength is 2^28 - 16.
+ total_length.Bind(SmiAdd(total_length.value(), string_length));
+
+ // If we are above the min cons string length, bailout.
+ GotoIf(SmiAboveOrEqual(total_length.value(),
+ SmiConstant(ConsString::kMinLength)),
+ &done_flat_length_loop);
+
+ VARIABLE(instance_type, MachineRepresentation::kWord32,
+ LoadInstanceType(current_string.value()));
+
+ // Check if the new string is sequential or can be dereferenced as a
+ // sequential string. If it can't and we've reached here, we are still under
+ // ConsString::kMinLength so need to bailout to the runtime.
+ GotoIf(IsSequentialStringInstanceType(instance_type.value()),
+ &is_sequential_or_can_deref);
+ MaybeDerefIndirectString(&current_string, instance_type.value(),
+ &check_deref_instance_type, bailout_to_runtime);
+
+ BIND(&check_deref_instance_type);
+ {
+ instance_type.Bind(LoadInstanceType(current_string.value()));
+ Branch(IsSequentialStringInstanceType(instance_type.value()),
+ &is_sequential_or_can_deref, bailout_to_runtime);
+ }
+
+ BIND(&is_sequential_or_can_deref);
+
+ // Check that all the strings have the same encoding type. If we got here
+ // we are still under ConsString::kMinLength so need to bailout to the
+ // runtime if the strings have different encodings.
+ GotoIf(Word32NotEqual(string_encoding,
+ Word32And(instance_type.value(),
+ Int32Constant(kStringEncodingMask))),
+ bailout_to_runtime);
+
+ current_arg.Bind(
+ IntPtrSub(current_arg.value(), IntPtrConstant(kPointerSize)));
+ GotoIf(IntPtrLessThan(current_arg.value(), last_arg_ptr),
+ &done_flat_length_loop);
+ current_string.Bind(Load(MachineType::AnyTagged(), current_arg.value()));
+ Goto(&flat_length_loop);
+ }
+ BIND(&done_flat_length_loop);
+
+ // If new length is greater than String::kMaxLength, goto runtime to throw.
+ GotoIf(SmiAboveOrEqual(total_length.value(), SmiConstant(String::kMaxLength)),
+ bailout_to_runtime);
+
+ // If new length is less than ConsString::kMinLength, concatenate all operands
+ // as a flat string.
+ GotoIf(SmiLessThan(total_length.value(), SmiConstant(ConsString::kMinLength)),
+ &do_flat_string);
+
+ // If the new length is is greater than ConsString::kMinLength, create a flat
+ // string for first_arg to current_arg if there is at least two strings
+ // between.
+ {
+ Comment("New length is greater than ConsString::kMinLength");
+
+ // Subtract length of the last string that pushed us over the edge.
+ Node* string_length = LoadStringLength(current_string.value());
+ total_length.Bind(SmiSub(total_length.value(), string_length));
+
+ // If we have 2 or more operands under ConsString::kMinLength, concatenate
+ // them as a flat string before concatenating the rest as a cons string. We
+ // concatenate the initial string as a flat string even though we will end
+ // up with a cons string since the time and memory overheads of that initial
+ // flat string will be less than they would be for concatenating the whole
+ // string as cons strings.
+ GotoIf(
+ IntPtrGreaterThanOrEqual(IntPtrSub(first_arg_ptr, current_arg.value()),
+ IntPtrConstant(2 * kPointerSize)),
+ &do_flat_string);
+
+ // Otherwise the whole concatenation should be cons strings.
+ result.Bind(Load(MachineType::AnyTagged(), first_arg_ptr));
+ total_length.Bind(LoadStringLength(result.value()));
+ current_arg.Bind(IntPtrSub(first_arg_ptr, IntPtrConstant(kPointerSize)));
+ Goto(&do_cons_string);
+ }
+
+ BIND(&do_flat_string);
+ {
+ Comment("Flat string concatenation");
+ Node* last_flat_arg_ptr =
+ IntPtrAdd(current_arg.value(), IntPtrConstant(kPointerSize));
+ Label two_byte(this);
+ GotoIf(Word32Equal(string_encoding, Int32Constant(kTwoByteStringTag)),
+ &two_byte);
+
+ {
+ Comment("One-byte sequential string case");
+ result.Bind(ConcatenateSequentialStrings(
+ context, first_arg_ptr, last_flat_arg_ptr, total_length.value(),
+ String::ONE_BYTE_ENCODING));
+ // If there is still more arguments to concatenate, jump to the cons
+ // string case, otherwise we are done.
+ Branch(IntPtrLessThan(current_arg.value(), last_arg_ptr), &done,
+ &do_cons_string);
+ }
+
+ BIND(&two_byte);
+ {
+ Comment("Two-byte sequential string case");
+ result.Bind(ConcatenateSequentialStrings(
+ context, first_arg_ptr, last_flat_arg_ptr, total_length.value(),
+ String::TWO_BYTE_ENCODING));
+ // If there is still more arguments to concatenate, jump to the cons
+ // string case, otherwise we are done.
+ Branch(IntPtrLessThan(current_arg.value(), last_arg_ptr), &done,
+ &do_cons_string);
+ }
+ }
+
+ BIND(&do_cons_string);
+ {
+ Comment("Create cons string");
+ Label loop(this, {&current_arg, &total_length, &result}), done_cons(this);
+
+ Goto(&loop);
+ BIND(&loop);
+ {
+ Node* current_string =
+ Load(MachineType::AnyTagged(), current_arg.value());
+ Node* string_length = LoadStringLength(current_string);
+
+ // Skip concatenating empty string.
+ GotoIf(SmiEqual(string_length, SmiConstant(0)), &done_cons);
+
+ total_length.Bind(SmiAdd(total_length.value(), string_length));
+
+ // If new length is greater than String::kMaxLength, goto runtime to
+ // throw. Note: we also need to invalidate the string length protector, so
+ // can't just throw here directly.
+ GotoIf(SmiAboveOrEqual(total_length.value(),
+ SmiConstant(String::kMaxLength)),
+ bailout_to_runtime);
+
+ result.Bind(NewConsString(context, total_length.value(), result.value(),
+ current_string, CodeStubAssembler::kNone));
+ Goto(&done_cons);
+
+ BIND(&done_cons);
+ current_arg.Bind(
+ IntPtrSub(current_arg.value(), IntPtrConstant(kPointerSize)));
+ Branch(IntPtrLessThan(current_arg.value(), last_arg_ptr), &done, &loop);
+ }
+ }
+
+ BIND(&done);
+ IncrementCounter(isolate()->counters()->string_add_native(), 1);
+ return result.value();
+}
+
+TF_BUILTIN(StringConcat, StringBuiltinsAssembler) {
+ Node* argc = Parameter(Descriptor::kArgumentsCount);
+ Node* context = Parameter(Descriptor::kContext);
+
+ CodeStubArguments args(this, ChangeInt32ToIntPtr(argc),
+ CodeStubArguments::ReceiverMode::kNoReceiver);
+ Node* first_arg_ptr =
+ args.AtIndexPtr(IntPtrConstant(0), ParameterMode::INTPTR_PARAMETERS);
+
+ Label call_runtime(this, Label::kDeferred);
+ Node* result =
+ ConcatenateStrings(context, first_arg_ptr, argc, &call_runtime);
+ args.PopAndReturn(result);
+
+ BIND(&call_runtime);
+ TailCallRuntimeN(Runtime::kStringConcat, context, argc);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-string-gen.h b/deps/v8/src/builtins/builtins-string-gen.h
index 399f565e55..ed1225328a 100644
--- a/deps/v8/src/builtins/builtins-string-gen.h
+++ b/deps/v8/src/builtins/builtins-string-gen.h
@@ -24,6 +24,10 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
Label* if_equal, Label* if_not_equal,
Label* if_notbothdirectonebyte);
+ // String concatenation.
+ Node* ConcatenateStrings(Node* context, Node* first_arg_ptr, Node* arg_count,
+ Label* bailout_to_runtime);
+
protected:
Node* DirectStringData(Node* string, Node* string_instance_type);
@@ -54,6 +58,10 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
Node* LoadSurrogatePairAt(Node* string, Node* length, Node* index,
UnicodeEncoding encoding);
+ Node* ConcatenateSequentialStrings(Node* context, Node* first_arg_ptr,
+ Node* arg_count, Node* total_length,
+ String::Encoding encoding);
+
void StringIndexOf(Node* const subject_string,
Node* const subject_instance_type,
Node* const search_string,
@@ -86,7 +94,8 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
void MaybeCallFunctionAtSymbol(Node* const context, Node* const object,
Handle<Symbol> symbol,
const NodeFunction0& regexp_call,
- const NodeFunction1& generic_call);
+ const NodeFunction1& generic_call,
+ CodeStubArguments* args = nullptr);
};
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-string.cc b/deps/v8/src/builtins/builtins-string.cc
index a6b1d02fa9..ba87d755f6 100644
--- a/deps/v8/src/builtins/builtins-string.cc
+++ b/deps/v8/src/builtins/builtins-string.cc
@@ -368,6 +368,7 @@ BUILTIN(StringPrototypeTrimRight) {
return *String::Trim(string, String::kTrimRight);
}
+#ifndef V8_INTL_SUPPORT
namespace {
inline bool ToUpperOverflows(uc32 character) {
@@ -518,7 +519,7 @@ MUST_USE_RESULT static Object* ConvertCase(
if (answer->IsException(isolate) || answer->IsString()) return answer;
DCHECK(answer->IsSmi());
- length = Smi::cast(answer)->value();
+ length = Smi::ToInt(answer);
if (s->IsOneByteRepresentation() && length > 0) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result, isolate->factory()->NewRawOneByteString(length));
@@ -559,6 +560,7 @@ BUILTIN(StringPrototypeToUpperCase) {
return ConvertCase(string, isolate,
isolate->runtime_state()->to_upper_mapping());
}
+#endif // !V8_INTL_SUPPORT
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-typedarray-gen.cc b/deps/v8/src/builtins/builtins-typedarray-gen.cc
index 870be3b216..9505c4034f 100644
--- a/deps/v8/src/builtins/builtins-typedarray-gen.cc
+++ b/deps/v8/src/builtins/builtins-typedarray-gen.cc
@@ -112,7 +112,7 @@ void TypedArrayBuiltinsAssembler::SetupTypedArray(Node* holder, Node* length,
StoreObjectField(holder, JSArrayBufferView::kByteLengthOffset, byte_length);
for (int offset = JSTypedArray::kSize;
offset < JSTypedArray::kSizeWithEmbedderFields; offset += kPointerSize) {
- StoreObjectField(holder, offset, SmiConstant(Smi::kZero));
+ StoreObjectField(holder, offset, SmiConstant(0));
}
}
@@ -216,7 +216,7 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
Node* buffer = Allocate(JSArrayBuffer::kSizeWithEmbedderFields);
StoreMapNoWriteBarrier(buffer, map);
- StoreObjectFieldNoWriteBarrier(buffer, JSArray::kPropertiesOffset,
+ StoreObjectFieldNoWriteBarrier(buffer, JSArray::kPropertiesOrHashOffset,
empty_fixed_array);
StoreObjectFieldNoWriteBarrier(buffer, JSArray::kElementsOffset,
empty_fixed_array);
@@ -227,7 +227,7 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
// - Set backing_store to null/Smi(0).
// - Set all embedder fields to Smi(0).
StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kBitFieldSlot,
- SmiConstant(Smi::kZero));
+ SmiConstant(0));
int32_t bitfield_value = (1 << JSArrayBuffer::IsExternal::kShift) |
(1 << JSArrayBuffer::IsNeuterable::kShift);
StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kBitFieldOffset,
@@ -237,10 +237,10 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kByteLengthOffset,
byte_length);
StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kBackingStoreOffset,
- SmiConstant(Smi::kZero));
+ SmiConstant(0));
for (int i = 0; i < v8::ArrayBuffer::kEmbedderFieldCount; i++) {
int offset = JSArrayBuffer::kSize + i * kPointerSize;
- StoreObjectFieldNoWriteBarrier(buffer, offset, SmiConstant(Smi::kZero));
+ StoreObjectFieldNoWriteBarrier(buffer, offset, SmiConstant(0));
}
StoreObjectField(holder, JSArrayBufferView::kBufferOffset, buffer);
@@ -397,14 +397,6 @@ TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
check_length(this), call_init(this), invalid_length(this),
length_undefined(this), length_defined(this);
- Callable add = CodeFactory::Add(isolate());
- Callable div = CodeFactory::Divide(isolate());
- Callable equal = CodeFactory::Equal(isolate());
- Callable greater_than = CodeFactory::GreaterThan(isolate());
- Callable less_than = CodeFactory::LessThan(isolate());
- Callable mod = CodeFactory::Modulus(isolate());
- Callable sub = CodeFactory::Subtract(isolate());
-
GotoIf(IsUndefined(byte_offset), &check_length);
offset.Bind(
@@ -422,11 +414,14 @@ TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
}
BIND(&offset_not_smi);
{
- GotoIf(IsTrue(CallStub(less_than, context, offset.value(), SmiConstant(0))),
+ GotoIf(IsTrue(CallBuiltin(Builtins::kLessThan, context, offset.value(),
+ SmiConstant(0))),
&invalid_length);
- Node* remainder = CallStub(mod, context, offset.value(), element_size);
+ Node* remainder =
+ CallBuiltin(Builtins::kModulus, context, offset.value(), element_size);
// Remainder can be a heap number.
- Branch(IsTrue(CallStub(equal, context, remainder, SmiConstant(0))),
+ Branch(IsTrue(CallBuiltin(Builtins::kEqual, context, remainder,
+ SmiConstant(0))),
&check_length, &start_offset_error);
}
@@ -439,16 +434,18 @@ TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
Node* buffer_byte_length =
LoadObjectField(buffer, JSArrayBuffer::kByteLengthOffset);
- Node* remainder = CallStub(mod, context, buffer_byte_length, element_size);
+ Node* remainder = CallBuiltin(Builtins::kModulus, context,
+ buffer_byte_length, element_size);
// Remainder can be a heap number.
- GotoIf(IsFalse(CallStub(equal, context, remainder, SmiConstant(0))),
+ GotoIf(IsFalse(CallBuiltin(Builtins::kEqual, context, remainder,
+ SmiConstant(0))),
&byte_length_error);
- new_byte_length.Bind(
- CallStub(sub, context, buffer_byte_length, offset.value()));
+ new_byte_length.Bind(CallBuiltin(Builtins::kSubtract, context,
+ buffer_byte_length, offset.value()));
- Branch(IsTrue(CallStub(less_than, context, new_byte_length.value(),
- SmiConstant(0))),
+ Branch(IsTrue(CallBuiltin(Builtins::kLessThan, context,
+ new_byte_length.value(), SmiConstant(0))),
&invalid_offset_error, &call_init);
}
@@ -461,16 +458,18 @@ TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
Node* buffer_byte_length =
LoadObjectField(buffer, JSArrayBuffer::kByteLengthOffset);
- Node* end = CallStub(add, context, offset.value(), new_byte_length.value());
+ Node* end = CallBuiltin(Builtins::kAdd, context, offset.value(),
+ new_byte_length.value());
- Branch(IsTrue(CallStub(greater_than, context, end, buffer_byte_length)),
+ Branch(IsTrue(CallBuiltin(Builtins::kGreaterThan, context, end,
+ buffer_byte_length)),
&invalid_length, &call_init);
}
BIND(&call_init);
{
- Node* new_length =
- CallStub(div, context, new_byte_length.value(), element_size);
+ Node* new_length = CallBuiltin(Builtins::kDivide, context,
+ new_byte_length.value(), element_size);
// Force the result into a Smi, or throw a range error if it doesn't fit.
new_length = ToSmiIndex(new_length, context, &invalid_length);
@@ -489,8 +488,7 @@ TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
BIND(&start_offset_error);
{
Node* holder_map = LoadMap(holder);
- Node* problem_string = HeapConstant(
- factory()->NewStringFromAsciiChecked("start offset", TENURED));
+ Node* problem_string = StringConstant("start offset");
CallRuntime(Runtime::kThrowInvalidTypedArrayAlignment, context, holder_map,
problem_string);
@@ -500,8 +498,7 @@ TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
BIND(&byte_length_error);
{
Node* holder_map = LoadMap(holder);
- Node* problem_string = HeapConstant(
- factory()->NewStringFromAsciiChecked("byte length", TENURED));
+ Node* problem_string = StringConstant("byte length");
CallRuntime(Runtime::kThrowInvalidTypedArrayAlignment, context, holder_map,
problem_string);
@@ -640,9 +637,7 @@ void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeGetter(
{
// The {receiver} is not a valid JSTypedArray.
CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
- HeapConstant(
- factory()->NewStringFromAsciiChecked(method_name, TENURED)),
- receiver);
+ StringConstant(method_name), receiver);
Unreachable();
}
}
@@ -702,14 +697,12 @@ void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeIterationMethod(
Goto(&throw_typeerror);
BIND(&if_receiverisneutered);
- var_message.Bind(
- SmiConstant(Smi::FromInt(MessageTemplate::kDetachedOperation)));
+ var_message.Bind(SmiConstant(MessageTemplate::kDetachedOperation));
Goto(&throw_typeerror);
BIND(&throw_typeerror);
{
- Node* method_arg = HeapConstant(
- isolate()->factory()->NewStringFromAsciiChecked(method_name, TENURED));
+ Node* method_arg = StringConstant(method_name);
Node* result = CallRuntime(Runtime::kThrowTypeError, context,
var_message.value(), method_arg);
Return(result);
diff --git a/deps/v8/src/builtins/builtins-typedarray.cc b/deps/v8/src/builtins/builtins-typedarray.cc
index 773e5480ac..176a79965b 100644
--- a/deps/v8/src/builtins/builtins-typedarray.cc
+++ b/deps/v8/src/builtins/builtins-typedarray.cc
@@ -27,7 +27,7 @@ namespace {
int64_t CapRelativeIndex(Handle<Object> num, int64_t minimum, int64_t maximum) {
int64_t relative;
if (V8_LIKELY(num->IsSmi())) {
- relative = Smi::cast(*num)->value();
+ relative = Smi::ToInt(*num);
} else {
DCHECK(num->IsHeapNumber());
double fp = HeapNumber::cast(*num)->value();
diff --git a/deps/v8/src/builtins/builtins-wasm-gen.cc b/deps/v8/src/builtins/builtins-wasm-gen.cc
index 88bbe8cd32..cb110bea95 100644
--- a/deps/v8/src/builtins/builtins-wasm-gen.cc
+++ b/deps/v8/src/builtins/builtins-wasm-gen.cc
@@ -13,15 +13,14 @@ namespace internal {
typedef compiler::Node Node;
TF_BUILTIN(WasmStackGuard, CodeStubAssembler) {
- Node* context = SmiConstant(Smi::kZero);
- TailCallRuntime(Runtime::kWasmStackGuard, context);
+ TailCallRuntime(Runtime::kWasmStackGuard, NoContextConstant());
}
#define DECLARE_ENUM(name) \
TF_BUILTIN(ThrowWasm##name, CodeStubAssembler) { \
int message_id = wasm::WasmOpcodes::TrapReasonToMessageId(wasm::k##name); \
- TailCallRuntime(Runtime::kThrowWasmErrorFromTrapIf, \
- SmiConstant(Smi::kZero), SmiConstant(message_id)); \
+ TailCallRuntime(Runtime::kThrowWasmErrorFromTrapIf, NoContextConstant(), \
+ SmiConstant(message_id)); \
}
FOREACH_WASM_TRAPREASON(DECLARE_ENUM)
#undef DECLARE_ENUM
diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc
index 4d5e83a9e0..3f98d4fb13 100644
--- a/deps/v8/src/builtins/builtins.cc
+++ b/deps/v8/src/builtins/builtins.cc
@@ -26,6 +26,32 @@ Builtins::Builtins() : initialized_(false) {
Builtins::~Builtins() {}
+BailoutId Builtins::GetContinuationBailoutId(Name name) {
+ switch (name) {
+#define BAILOUT_ID(NAME, ...) \
+ case k##NAME: \
+ return BailoutId(BailoutId::kFirstBuiltinContinuationId + name);
+ BUILTIN_LIST_TFJ(BAILOUT_ID);
+ BUILTIN_LIST_TFC(BAILOUT_ID);
+#undef BAILOUT_ID
+ default:
+ UNREACHABLE();
+ }
+}
+
+Builtins::Name Builtins::GetBuiltinFromBailoutId(BailoutId id) {
+ switch (id.ToInt()) {
+#define BAILOUT_ID(NAME, ...) \
+ case BailoutId::kFirstBuiltinContinuationId + k##NAME: \
+ return k##NAME;
+ BUILTIN_LIST_TFJ(BAILOUT_ID)
+ BUILTIN_LIST_TFC(BAILOUT_ID)
+#undef BAILOUT_ID
+ default:
+ UNREACHABLE();
+ }
+}
+
void Builtins::TearDown() { initialized_ = false; }
void Builtins::IterateBuiltins(RootVisitor* v) {
@@ -79,7 +105,6 @@ Handle<Code> Builtins::NonPrimitiveToPrimitive(ToPrimitiveHint hint) {
return NonPrimitiveToPrimitive_String();
}
UNREACHABLE();
- return Handle<Code>::null();
}
Handle<Code> Builtins::OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint) {
@@ -90,7 +115,10 @@ Handle<Code> Builtins::OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint) {
return OrdinaryToPrimitive_String();
}
UNREACHABLE();
- return Handle<Code>::null();
+}
+
+Handle<Code> Builtins::builtin_handle(Name name) {
+ return Handle<Code>(reinterpret_cast<Code**>(builtin_address(name)));
}
// static
@@ -105,7 +133,6 @@ int Builtins::GetBuiltinParameterCount(Name name) {
#undef TFJ_CASE
default:
UNREACHABLE();
- return 0;
}
}
@@ -117,26 +144,64 @@ Callable Builtins::CallableFor(Isolate* isolate, Name name) {
switch (name) {
// This macro is deliberately crafted so as to emit very little code,
// in order to keep binary size of this function under control.
-#define CASE(Name, ...) \
+#define CASE_OTHER(Name, ...) \
case k##Name: { \
key = Builtin_##Name##_InterfaceDescriptor::key(); \
break; \
}
- BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, CASE, CASE,
- CASE, IGNORE_BUILTIN, IGNORE_BUILTIN)
-#undef CASE
+ BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, CASE_OTHER,
+ CASE_OTHER, CASE_OTHER, IGNORE_BUILTIN, IGNORE_BUILTIN)
+#undef CASE_OTHER
case kConsoleAssert: {
return Callable(code, BuiltinDescriptor(isolate));
}
+ case kArrayForEach: {
+ Handle<Code> code = isolate->builtins()->ArrayForEach();
+ return Callable(code, BuiltinDescriptor(isolate));
+ }
+ case kArrayForEachLoopEagerDeoptContinuation: {
+ Handle<Code> code =
+ isolate->builtins()->ArrayForEachLoopEagerDeoptContinuation();
+ return Callable(code, BuiltinDescriptor(isolate));
+ }
+ case kArrayForEachLoopLazyDeoptContinuation: {
+ Handle<Code> code =
+ isolate->builtins()->ArrayForEachLoopLazyDeoptContinuation();
+ return Callable(code, BuiltinDescriptor(isolate));
+ }
+ case kArrayMapLoopEagerDeoptContinuation: {
+ Handle<Code> code =
+ isolate->builtins()->ArrayMapLoopEagerDeoptContinuation();
+ return Callable(code, BuiltinDescriptor(isolate));
+ }
+ case kArrayMapLoopLazyDeoptContinuation: {
+ Handle<Code> code =
+ isolate->builtins()->ArrayMapLoopLazyDeoptContinuation();
+ return Callable(code, BuiltinDescriptor(isolate));
+ }
default:
UNREACHABLE();
- return Callable(Handle<Code>::null(), VoidDescriptor(isolate));
}
CallInterfaceDescriptor descriptor(isolate, key);
return Callable(code, descriptor);
}
// static
+int Builtins::GetStackParameterCount(Isolate* isolate, Name name) {
+ switch (name) {
+#define CASE(Name, Count, ...) \
+ case k##Name: { \
+ return Count; \
+ }
+ BUILTIN_LIST_TFJ(CASE)
+#undef CASE
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+// static
const char* Builtins::name(int index) {
switch (index) {
#define CASE(Name, ...) \
diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h
index b5eebff73b..7ef7f257b8 100644
--- a/deps/v8/src/builtins/builtins.h
+++ b/deps/v8/src/builtins/builtins.h
@@ -18,6 +18,7 @@ class Handle;
class Isolate;
// Forward declarations.
+class BailoutId;
class RootVisitor;
enum class InterpreterPushArgsMode : unsigned;
namespace compiler {
@@ -43,23 +44,21 @@ class Builtins {
builtin_count
};
+ static BailoutId GetContinuationBailoutId(Name name);
+ static Name GetBuiltinFromBailoutId(BailoutId);
+
#define DECLARE_BUILTIN_ACCESSOR(Name, ...) \
V8_EXPORT_PRIVATE Handle<Code> Name();
BUILTIN_LIST_ALL(DECLARE_BUILTIN_ACCESSOR)
#undef DECLARE_BUILTIN_ACCESSOR
// Convenience wrappers.
- Handle<Code> CallFunction(
- ConvertReceiverMode = ConvertReceiverMode::kAny,
- TailCallMode tail_call_mode = TailCallMode::kDisallow);
- Handle<Code> Call(ConvertReceiverMode = ConvertReceiverMode::kAny,
- TailCallMode tail_call_mode = TailCallMode::kDisallow);
- Handle<Code> CallBoundFunction(TailCallMode tail_call_mode);
+ Handle<Code> CallFunction(ConvertReceiverMode = ConvertReceiverMode::kAny);
+ Handle<Code> Call(ConvertReceiverMode = ConvertReceiverMode::kAny);
Handle<Code> NonPrimitiveToPrimitive(
ToPrimitiveHint hint = ToPrimitiveHint::kDefault);
Handle<Code> OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint);
Handle<Code> InterpreterPushArgsThenCall(ConvertReceiverMode receiver_mode,
- TailCallMode tail_call_mode,
InterpreterPushArgsMode mode);
Handle<Code> InterpreterPushArgsThenConstruct(InterpreterPushArgsMode mode);
Handle<Code> NewFunctionContext(ScopeType scope_type);
@@ -76,9 +75,13 @@ class Builtins {
return reinterpret_cast<Address>(&builtins_[name]);
}
+ Handle<Code> builtin_handle(Name name);
+
static int GetBuiltinParameterCount(Name name);
- static Callable CallableFor(Isolate* isolate, Name name);
+ V8_EXPORT_PRIVATE static Callable CallableFor(Isolate* isolate, Name name);
+
+ static int GetStackParameterCount(Isolate* isolate, Name name);
static const char* name(int index);
@@ -115,20 +118,20 @@ class Builtins {
Builtins();
static void Generate_CallFunction(MacroAssembler* masm,
- ConvertReceiverMode mode,
- TailCallMode tail_call_mode);
+ ConvertReceiverMode mode);
- static void Generate_CallBoundFunctionImpl(MacroAssembler* masm,
- TailCallMode tail_call_mode);
+ static void Generate_CallBoundFunctionImpl(MacroAssembler* masm);
- static void Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
- TailCallMode tail_call_mode);
+ static void Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode);
- static void Generate_ForwardVarargs(MacroAssembler* masm, Handle<Code> code);
+ static void Generate_CallOrConstructVarargs(MacroAssembler* masm,
+ Handle<Code> code);
+ static void Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code);
static void Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
- TailCallMode tail_call_mode, InterpreterPushArgsMode mode);
+ InterpreterPushArgsMode mode);
static void Generate_InterpreterPushArgsThenConstructImpl(
MacroAssembler* masm, InterpreterPushArgsMode mode);
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index bcffedfef2..86e5ad509a 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -92,24 +92,6 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ jmp(ebx);
}
-void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
- // Checking whether the queued function is ready for install is optional,
- // since we come across interrupts and stack checks elsewhere. However,
- // not checking may delay installing ready functions, and always checking
- // would be quite expensive. A good compromise is to first check against
- // stack limit as a cue for an interrupt signal.
- Label ok;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm->isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok, Label::kNear);
-
- GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
-
- __ bind(&ok);
- GenerateTailCallToSharedCode(masm);
-}
-
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
@@ -211,13 +193,13 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -----------------------------------
__ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ test_b(FieldOperand(ebx, SharedFunctionInfo::kFunctionKindByteOffset),
- Immediate(SharedFunctionInfo::kDerivedConstructorBitsWithinByte));
+ __ test(FieldOperand(ebx, SharedFunctionInfo::kCompilerHintsOffset),
+ Immediate(SharedFunctionInfo::kDerivedConstructorMask));
__ j(not_zero, &not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ jmp(&post_instantiation_deopt_entry, Label::kNear);
@@ -325,16 +307,20 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
__ j(above_equal, &leave_frame, Label::kNear);
- __ bind(&other_result);
// The result is now neither undefined nor an object.
+ __ bind(&other_result);
+ __ mov(ebx, Operand(ebp, ConstructFrameConstants::kConstructorOffset));
+ __ mov(ebx, FieldOperand(ebx, JSFunction::kSharedFunctionInfoOffset));
+ __ test(FieldOperand(ebx, SharedFunctionInfo::kCompilerHintsOffset),
+ Immediate(SharedFunctionInfo::kClassConstructorMask));
+
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
- __ mov(ebx, Operand(ebp, ConstructFrameConstants::kConstructorOffset));
- __ mov(ebx, FieldOperand(ebx, JSFunction::kSharedFunctionInfoOffset));
- __ test_b(FieldOperand(ebx, SharedFunctionInfo::kFunctionKindByteOffset),
- Immediate(SharedFunctionInfo::kClassConstructorBitsWithinByte));
__ j(Condition::zero, &use_receiver, Label::kNear);
} else {
+ __ j(not_zero, &use_receiver, Label::kNear);
+ __ CallRuntime(
+ Runtime::kIncrementUseCounterConstructorReturnNonUndefinedPrimitive);
__ jmp(&use_receiver, Label::kNear);
}
@@ -423,7 +409,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
// Setup the context (we need to use the caller context from the isolate).
- ExternalReference context_address(Isolate::kContextAddress,
+ ExternalReference context_address(IsolateAddressId::kContextAddress,
masm->isolate());
__ mov(esi, Operand::StaticVariable(context_address));
@@ -488,33 +474,14 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- eax : the value to pass to the generator
// -- ebx : the JSGeneratorObject to resume
// -- edx : the resume mode (tagged)
- // -- ecx : the SuspendFlags of the earlier suspend call (tagged)
// -- esp[0] : return address
// -----------------------------------
- __ SmiUntag(ecx);
- __ AssertGeneratorObject(ebx, ecx);
+ __ AssertGeneratorObject(ebx);
// Store input value into generator object.
- Label async_await, done_store_input;
-
- __ and_(ecx, Immediate(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
- __ cmpb(ecx, Immediate(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
- __ j(equal, &async_await, Label::kNear);
-
__ mov(FieldOperand(ebx, JSGeneratorObject::kInputOrDebugPosOffset), eax);
__ RecordWriteField(ebx, JSGeneratorObject::kInputOrDebugPosOffset, eax, ecx,
kDontSaveFPRegs);
- __ jmp(&done_store_input, Label::kNear);
-
- __ bind(&async_await);
- __ mov(FieldOperand(ebx, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset),
- eax);
- __ RecordWriteField(ebx, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset,
- eax, ecx, kDontSaveFPRegs);
- __ jmp(&done_store_input, Label::kNear);
-
- __ bind(&done_store_input);
- // `ecx` no longer holds SuspendFlags
// Store resume mode into generator object.
__ mov(FieldOperand(ebx, JSGeneratorObject::kResumeModeOffset), edx);
@@ -563,7 +530,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
{
Label done_loop, loop;
__ bind(&loop);
- __ sub(ecx, Immediate(Smi::FromInt(1)));
+ __ sub(ecx, Immediate(1));
__ j(carry, &done_loop, Label::kNear);
__ PushRoot(Heap::kTheHoleValueRootIndex);
__ jmp(&loop);
@@ -668,6 +635,121 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ push(return_pc);
}
+// Tail-call |function_id| if |smi_entry| == |marker|
+static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
+ Register smi_entry,
+ OptimizationMarker marker,
+ Runtime::FunctionId function_id) {
+ Label no_match;
+ __ cmp(smi_entry, Immediate(Smi::FromEnum(marker)));
+ __ j(not_equal, &no_match, Label::kNear);
+ GenerateTailCallToReturnedCode(masm, function_id);
+ __ bind(&no_match);
+}
+
+static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
+ Register feedback_vector,
+ Register scratch) {
+ // ----------- S t a t e -------------
+ // -- eax : argument count (preserved for callee if needed, and caller)
+ // -- edx : new target (preserved for callee if needed, and caller)
+ // -- edi : target function (preserved for callee if needed, and caller)
+ // -- feedback vector (preserved for caller if needed)
+ // -----------------------------------
+ DCHECK(!AreAliased(feedback_vector, eax, edx, edi, scratch));
+
+ Label optimized_code_slot_is_cell, fallthrough;
+
+ Register closure = edi;
+ Register optimized_code_entry = scratch;
+
+ const int kOptimizedCodeCellOffset =
+ FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize;
+ __ mov(optimized_code_entry,
+ FieldOperand(feedback_vector, kOptimizedCodeCellOffset));
+
+ // Check if the code entry is a Smi. If yes, we interpret it as an
+ // optimisation marker. Otherwise, interpret is as a weak cell to a code
+ // object.
+ __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
+
+ {
+ // Optimized code slot is an optimization marker.
+
+ // Fall through if no optimization trigger.
+ __ cmp(optimized_code_entry,
+ Immediate(Smi::FromEnum(OptimizationMarker::kNone)));
+ __ j(equal, &fallthrough);
+
+ TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimized,
+ Runtime::kCompileOptimized_NotConcurrent);
+ TailCallRuntimeIfMarkerEquals(
+ masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimizedConcurrent,
+ Runtime::kCompileOptimized_Concurrent);
+
+ {
+ // Otherwise, the marker is InOptimizationQueue.
+ if (FLAG_debug_code) {
+ __ cmp(
+ optimized_code_entry,
+ Immediate(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
+ __ Assert(equal, kExpectedOptimizationSentinel);
+ }
+
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(masm->isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &fallthrough);
+ GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+ }
+ }
+
+ {
+ // Optimized code slot is a WeakCell.
+ __ bind(&optimized_code_slot_is_cell);
+
+ __ mov(optimized_code_entry,
+ FieldOperand(optimized_code_entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(optimized_code_entry, &fallthrough);
+
+ // Check if the optimized code is marked for deopt. If it is, bailout to a
+ // given label.
+ Label found_deoptimized_code;
+ __ test(FieldOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset),
+ Immediate(1 << Code::kMarkedForDeoptimizationBit));
+ __ j(not_zero, &found_deoptimized_code);
+
+ // Optimized code is good, get it into the closure and link the closure into
+ // the optimized functions list, then tail call the optimized code.
+ __ push(eax);
+ __ push(edx);
+ // The feedback vector is no longer used, so re-use it as a scratch
+ // register.
+ ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, closure,
+ edx, eax, feedback_vector);
+ __ pop(edx);
+ __ pop(eax);
+ __ jmp(optimized_code_entry);
+
+ // Optimized code slot contains deoptimized code, evict it and re-enter the
+ // closure's code.
+ __ bind(&found_deoptimized_code);
+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+ }
+
+ // Fall-through if the optimized code cell is clear and there is no
+ // optimization marker.
+ __ bind(&fallthrough);
+}
+
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
@@ -685,9 +767,20 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
+ Register closure = edi;
+ Register feedback_vector = ebx;
+
+ // Load the feedback vector from the closure.
+ __ mov(feedback_vector,
+ FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
+ // Read off the optimized code slot in the feedback vector, and if there
+ // is optimized code or an optimization marker, call that instead.
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, ecx);
+
// Open a frame scope to indicate that there is a frame on the stack. The
- // MANUAL indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done below).
+ // MANUAL indicates that the scope shouldn't actually generate code to set
+ // up the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ push(ebp); // Caller's frame pointer.
__ mov(ebp, esp);
@@ -695,27 +788,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ push(edi); // Callee's JS function.
__ push(edx); // Callee's new target.
- // First check if there is optimized code in the feedback vector which we
- // could call instead.
- Label switch_to_optimized_code;
- Register optimized_code_entry = ecx;
- __ mov(ebx, FieldOperand(edi, JSFunction::kFeedbackVectorOffset));
- __ mov(ebx, FieldOperand(ebx, Cell::kValueOffset));
- __ mov(optimized_code_entry,
- FieldOperand(ebx, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
- __ mov(optimized_code_entry,
- FieldOperand(optimized_code_entry, WeakCell::kValueOffset));
- __ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
-
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
+ Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
__ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- Label load_debug_bytecode_array, bytecode_array_loaded;
- __ JumpIfNotSmi(FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset),
- &load_debug_bytecode_array);
__ mov(kInterpreterBytecodeArrayRegister,
FieldOperand(eax, SharedFunctionInfo::kFunctionDataOffset));
+ __ JumpIfNotSmi(FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset),
+ &maybe_load_debug_bytecode_array);
__ bind(&bytecode_array_loaded);
// Check whether we should continue to use the interpreter.
@@ -727,11 +807,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ j(not_equal, &switch_to_different_code_kind);
// Increment invocation count for the function.
- __ EmitLoadFeedbackVector(ecx);
- __ add(
- FieldOperand(ecx, FeedbackVector::kInvocationCountIndex * kPointerSize +
- FeedbackVector::kHeaderSize),
- Immediate(Smi::FromInt(1)));
+ __ add(FieldOperand(feedback_vector,
+ FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize),
+ Immediate(Smi::FromInt(1)));
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
@@ -802,12 +881,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, ebx, ecx);
__ ret(0);
- // Load debug copy of the bytecode array.
- __ bind(&load_debug_bytecode_array);
- Register debug_info = kInterpreterBytecodeArrayRegister;
- __ mov(debug_info, FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset));
+ // Load debug copy of the bytecode array if it exists.
+ // kInterpreterBytecodeArrayRegister is already loaded with
+ // SharedFunctionInfo::kFunctionDataOffset.
+ __ bind(&maybe_load_debug_bytecode_array);
+ __ push(ebx); // feedback_vector == ebx, so save it.
+ __ mov(ecx, FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset));
+ __ mov(ebx, FieldOperand(ecx, DebugInfo::kFlagsOffset));
+ __ SmiUntag(ebx);
+ __ test(ebx, Immediate(DebugInfo::kHasBreakInfo));
+ __ pop(ebx);
+ __ j(zero, &bytecode_array_loaded);
__ mov(kInterpreterBytecodeArrayRegister,
- FieldOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
+ FieldOperand(ecx, DebugInfo::kDebugBytecodeArrayOffset));
__ jmp(&bytecode_array_loaded);
// If the shared code is no longer this entry trampoline, then the underlying
@@ -824,31 +910,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ mov(FieldOperand(edi, JSFunction::kCodeEntryOffset), ecx);
__ RecordWriteCodeEntryField(edi, ecx, ebx);
__ jmp(ecx);
-
- // If there is optimized code on the type feedback vector, check if it is good
- // to run, and if so, self heal the closure and call the optimized code.
- __ bind(&switch_to_optimized_code);
- Label gotta_call_runtime;
-
- // Check if the optimized code is marked for deopt.
- __ test(FieldOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset),
- Immediate(1 << Code::kMarkedForDeoptimizationBit));
- __ j(not_zero, &gotta_call_runtime);
-
- // Optimized code is good, get it into the closure and link the closure into
- // the optimized functions list, then tail call the optimized code.
- __ push(edx);
- ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, edi, edx,
- eax, ebx);
- __ pop(edx);
- __ leave();
- __ jmp(optimized_code_entry);
-
- // Optimized code is marked for deopt, bailout to the CompileLazy runtime
- // function which will clear the feedback vector's optimized code slot.
- __ bind(&gotta_call_runtime);
- __ leave();
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@@ -898,7 +959,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// static
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
- TailCallMode tail_call_mode, InterpreterPushArgsMode mode) {
+ InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- ebx : the address of the first argument to be pushed. Subsequent
@@ -933,19 +994,23 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
__ add(ecx, ebx);
Generate_InterpreterPushArgs(masm, ecx, ebx);
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Pop(ebx); // Pass the spread in a register
+ __ sub(eax, Immediate(1)); // Subtract one for spread
+ }
+
// Call the target.
__ Push(edx); // Re-push return address.
if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
- tail_call_mode),
- RelocInfo::CODE_TARGET);
+ __ Jump(
+ masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny),
+ RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(masm->isolate()->builtins()->CallWithSpread(),
RelocInfo::CODE_TARGET);
} else {
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
RelocInfo::CODE_TARGET);
}
@@ -1076,7 +1141,15 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
__ Pop(edx);
__ Pop(edi);
- __ AssertUndefinedOrAllocationSite(ebx);
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ PopReturnAddressTo(ecx);
+ __ Pop(ebx); // Pass the spread in a register
+ __ PushReturnAddressFrom(ecx);
+ __ sub(eax, Immediate(1)); // Subtract one for spread
+ } else {
+ __ AssertUndefinedOrAllocationSite(ebx);
+ }
+
if (mode == InterpreterPushArgsMode::kJSFunction) {
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
@@ -1158,8 +1231,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
Smi* interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
- __ LoadHeapObject(ebx,
- masm->isolate()->builtins()->InterpreterEntryTrampoline());
+ __ Move(ebx, masm->isolate()->builtins()->InterpreterEntryTrampoline());
__ add(ebx, Immediate(interpreter_entry_return_pc_offset->value() +
Code::kHeaderSize - kHeapObjectTag));
__ push(ebx);
@@ -1219,6 +1291,33 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
+void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : argument count (preserved for callee)
+ // -- rdx : new target (preserved for callee)
+ // -- rdi : target function (preserved for callee)
+ // -----------------------------------
+ Register closure = edi;
+
+ // Get the feedback vector.
+ Register feedback_vector = ebx;
+ __ mov(feedback_vector,
+ FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
+
+ // The feedback vector must be defined.
+ if (FLAG_debug_code) {
+ __ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
+ __ Assert(not_equal, BailoutReason::kExpectedFeedbackVector);
+ }
+
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, ecx);
+
+ // Otherwise, tail call the SFI code.
+ GenerateTailCallToSharedCode(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argument count (preserved for callee)
@@ -1227,46 +1326,23 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
- Label try_shared;
Register closure = edi;
- Register new_target = edx;
- Register argument_count = eax;
+ Register feedback_vector = ebx;
// Do we have a valid feedback vector?
- __ mov(ebx, FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
- __ mov(ebx, FieldOperand(ebx, Cell::kValueOffset));
- __ JumpIfRoot(ebx, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
+ __ mov(feedback_vector,
+ FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
+ __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
+ &gotta_call_runtime);
- // Is optimized code available in the feedback vector?
- Register entry = ecx;
- __ mov(entry,
- FieldOperand(ebx, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
- __ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &try_shared);
-
- // Found code, check if it is marked for deopt, if so call into runtime to
- // clear the optimized code slot.
- __ test(FieldOperand(entry, Code::kKindSpecificFlags1Offset),
- Immediate(1 << Code::kMarkedForDeoptimizationBit));
- __ j(not_zero, &gotta_call_runtime);
-
- // Code is good, get it into the closure and tail call.
- __ push(argument_count);
- __ push(new_target);
- ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, edx, eax, ebx);
- __ pop(new_target);
- __ pop(argument_count);
- __ jmp(entry);
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, ecx);
// We found no optimized code.
- __ bind(&try_shared);
+ Register entry = ecx;
__ mov(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- // Is the shared function marked for tier up?
- __ test_b(FieldOperand(entry, SharedFunctionInfo::kMarkedForTierUpByteOffset),
- Immediate(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
- __ j(not_zero, &gotta_call_runtime);
// If SFI points to anything other than CompileLazy, install that.
__ mov(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
@@ -1281,19 +1357,9 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ jmp(entry);
__ bind(&gotta_call_runtime);
-
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
-void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm,
- Runtime::kCompileOptimized_NotConcurrent);
-}
-
-void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
-}
-
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argument count (preserved for callee)
@@ -1435,31 +1501,70 @@ void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
-static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
- SaveFPRegsMode save_doubles) {
+void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve registers across notification, this is important for compiled
- // stubs that tail call the runtime on deopts passing their parameters in
- // registers.
- __ pushad();
- __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
- __ popad();
+ // Preserve possible return result from lazy deopt.
+ __ push(eax);
+ __ CallRuntime(Runtime::kNotifyStubFailure, false);
+ __ pop(eax);
// Tear down internal frame.
}
__ pop(MemOperand(esp, 0)); // Ignore state offset
- __ ret(0); // Return to IC Miss stub, continuation still on stack.
+ __ ret(0); // Return to ContinueToBuiltin stub still on stack.
}
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+namespace {
+void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
+ bool java_script_builtin,
+ bool with_result) {
+ const RegisterConfiguration* config(RegisterConfiguration::Turbofan());
+ int allocatable_register_count = config->num_allocatable_general_registers();
+ if (with_result) {
+ // Overwrite the hole inserted by the deoptimizer with the return value from
+ // the LAZY deopt point.
+ __ mov(Operand(esp,
+ config->num_allocatable_general_registers() * kPointerSize +
+ BuiltinContinuationFrameConstants::kFixedFrameSize),
+ eax);
+ }
+ for (int i = allocatable_register_count - 1; i >= 0; --i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ __ pop(Register::from_code(code));
+ if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
+ __ SmiUntag(Register::from_code(code));
+ }
+ }
+ __ mov(
+ ebp,
+ Operand(esp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ const int offsetToPC =
+ BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp - kPointerSize;
+ __ pop(Operand(esp, offsetToPC));
+ __ Drop(offsetToPC / kPointerSize);
+ __ add(Operand(esp, 0), Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ ret(0);
+}
+} // namespace
+
+void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, false);
+}
+
+void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, true);
+}
+
+void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, false);
}
-void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, true);
}
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
@@ -1517,7 +1622,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// -- esp[12] : receiver
// -----------------------------------
- // 1. Load receiver into edi, argArray into eax (if present), remove all
+ // 1. Load receiver into edi, argArray into ebx (if present), remove all
// arguments from the stack (including the receiver), and push thisArg (if
// present) instead.
{
@@ -1539,34 +1644,28 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
__ Push(edx);
__ PushReturnAddressFrom(ecx);
- __ Move(eax, ebx);
}
// ----------- S t a t e -------------
- // -- eax : argArray
+ // -- ebx : argArray
// -- edi : receiver
// -- esp[0] : return address
// -- esp[4] : thisArg
// -----------------------------------
- // 2. Make sure the receiver is actually callable.
- Label receiver_not_callable;
- __ JumpIfSmi(edi, &receiver_not_callable, Label::kNear);
- __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsCallable));
- __ j(zero, &receiver_not_callable, Label::kNear);
+ // 2. We don't need to check explicitly for callable receiver here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
// 3. Tail call with no arguments if argArray is null or undefined.
Label no_arguments;
- __ JumpIfRoot(eax, Heap::kNullValueRootIndex, &no_arguments, Label::kNear);
- __ JumpIfRoot(eax, Heap::kUndefinedValueRootIndex, &no_arguments,
+ __ JumpIfRoot(ebx, Heap::kNullValueRootIndex, &no_arguments, Label::kNear);
+ __ JumpIfRoot(ebx, Heap::kUndefinedValueRootIndex, &no_arguments,
Label::kNear);
- // 4a. Apply the receiver to the given argArray (passing undefined for
- // new.target).
- __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 4a. Apply the receiver to the given argArray.
+ __ Jump(masm->isolate()->builtins()->CallWithArrayLike(),
+ RelocInfo::CODE_TARGET);
// 4b. The argArray is either null or undefined, so we tail call without any
// arguments to the receiver.
@@ -1575,13 +1674,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ Set(eax, 0);
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
-
- // 4c. The receiver is not callable, throw an appropriate TypeError.
- __ bind(&receiver_not_callable);
- {
- __ mov(Operand(esp, kPointerSize), edi);
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
}
// static
@@ -1640,7 +1732,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// -- esp[16] : receiver
// -----------------------------------
- // 1. Load target into edi (if present), argumentsList into eax (if present),
+ // 1. Load target into edi (if present), argumentsList into ebx (if present),
// remove all arguments from the stack (including the receiver), and push
// thisArgument (if present) instead.
{
@@ -1661,35 +1753,22 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
__ Push(edx);
__ PushReturnAddressFrom(ecx);
- __ Move(eax, ebx);
}
// ----------- S t a t e -------------
- // -- eax : argumentsList
+ // -- ebx : argumentsList
// -- edi : target
// -- esp[0] : return address
// -- esp[4] : thisArgument
// -----------------------------------
- // 2. Make sure the target is actually callable.
- Label target_not_callable;
- __ JumpIfSmi(edi, &target_not_callable, Label::kNear);
- __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsCallable));
- __ j(zero, &target_not_callable, Label::kNear);
+ // 2. We don't need to check explicitly for callable target here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
- // 3a. Apply the target to the given argumentsList (passing undefined for
- // new.target).
- __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
-
- // 3b. The target is not callable, throw an appropriate TypeError.
- __ bind(&target_not_callable);
- {
- __ mov(Operand(esp, kPointerSize), edi);
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
+ // 3. Apply the target to the given argumentsList.
+ __ Jump(masm->isolate()->builtins()->CallWithArrayLike(),
+ RelocInfo::CODE_TARGET);
}
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
@@ -1702,7 +1781,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// -- esp[16] : receiver
// -----------------------------------
- // 1. Load target into edi (if present), argumentsList into eax (if present),
+ // 1. Load target into edi (if present), argumentsList into ebx (if present),
// new.target into edx (if present, otherwise use target), remove all
// arguments from the stack (including the receiver), and push thisArgument
// (if present) instead.
@@ -1725,49 +1804,27 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
__ PushRoot(Heap::kUndefinedValueRootIndex);
__ PushReturnAddressFrom(ecx);
- __ Move(eax, ebx);
}
// ----------- S t a t e -------------
- // -- eax : argumentsList
+ // -- ebx : argumentsList
// -- edx : new.target
// -- edi : target
// -- esp[0] : return address
// -- esp[4] : receiver (undefined)
// -----------------------------------
- // 2. Make sure the target is actually a constructor.
- Label target_not_constructor;
- __ JumpIfSmi(edi, &target_not_constructor, Label::kNear);
- __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsConstructor));
- __ j(zero, &target_not_constructor, Label::kNear);
+ // 2. We don't need to check explicitly for constructor target here,
+ // since that's the first thing the Construct/ConstructWithArrayLike
+ // builtins will do.
- // 3. Make sure the target is actually a constructor.
- Label new_target_not_constructor;
- __ JumpIfSmi(edx, &new_target_not_constructor, Label::kNear);
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsConstructor));
- __ j(zero, &new_target_not_constructor, Label::kNear);
+ // 3. We don't need to check explicitly for constructor new.target here,
+ // since that's the second thing the Construct/ConstructWithArrayLike
+ // builtins will do.
- // 4a. Construct the target with the given new.target and argumentsList.
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
-
- // 4b. The target is not a constructor, throw an appropriate TypeError.
- __ bind(&target_not_constructor);
- {
- __ mov(Operand(esp, kPointerSize), edi);
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
-
- // 4c. The new.target is not a constructor, throw an appropriate TypeError.
- __ bind(&new_target_not_constructor);
- {
- __ mov(Operand(esp, kPointerSize), edx);
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
+ // 4. Construct the target with the given new.target and argumentsList.
+ __ Jump(masm->isolate()->builtins()->ConstructWithArrayLike(),
+ RelocInfo::CODE_TARGET);
}
void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
@@ -1939,7 +1996,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterBuiltinFrame(esi, edi, ecx);
__ Push(ebx); // the first argument
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Pop(FieldOperand(eax, JSValue::kValueOffset));
__ LeaveBuiltinFrame(esi, edi, ecx);
@@ -2102,7 +2159,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(ebx);
__ EnterBuiltinFrame(esi, edi, ebx);
__ Push(eax); // the first argument
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Pop(FieldOperand(eax, JSValue::kValueOffset));
__ LeaveBuiltinFrame(esi, edi, ebx);
@@ -2152,97 +2209,22 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_Apply(MacroAssembler* masm) {
+void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
- // -- eax : argumentsList
// -- edi : target
+ // -- eax : number of parameters on the stack (not including the receiver)
+ // -- ebx : arguments list (a FixedArray)
+ // -- ecx : len (number of elements to from args)
// -- edx : new.target (checked to be constructor or undefined)
// -- esp[0] : return address.
- // -- esp[4] : thisArgument
// -----------------------------------
+ __ AssertFixedArray(ebx);
- // Create the list of arguments from the array-like argumentsList.
- {
- Label create_arguments, create_array, create_holey_array, create_runtime,
- done_create;
- __ JumpIfSmi(eax, &create_runtime);
-
- // Load the map of argumentsList into ecx.
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
-
- // Load native context into ebx.
- __ mov(ebx, NativeContextOperand());
-
- // Check if argumentsList is an (unmodified) arguments object.
- __ cmp(ecx, ContextOperand(ebx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
- __ j(equal, &create_arguments);
- __ cmp(ecx, ContextOperand(ebx, Context::STRICT_ARGUMENTS_MAP_INDEX));
- __ j(equal, &create_arguments);
-
- // Check if argumentsList is a fast JSArray.
- __ CmpInstanceType(ecx, JS_ARRAY_TYPE);
- __ j(equal, &create_array);
-
- // Ask the runtime to create the list (actually a FixedArray).
- __ bind(&create_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(edi);
- __ Push(edx);
- __ Push(eax);
- __ CallRuntime(Runtime::kCreateListFromArrayLike);
- __ Pop(edx);
- __ Pop(edi);
- __ mov(ebx, FieldOperand(eax, FixedArray::kLengthOffset));
- __ SmiUntag(ebx);
- }
- __ jmp(&done_create);
-
- // Try to create the list from an arguments object.
- __ bind(&create_arguments);
- __ mov(ebx, FieldOperand(eax, JSArgumentsObject::kLengthOffset));
- __ mov(ecx, FieldOperand(eax, JSObject::kElementsOffset));
- __ cmp(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
- __ j(not_equal, &create_runtime);
- __ SmiUntag(ebx);
- __ mov(eax, ecx);
- __ jmp(&done_create);
-
- // For holey JSArrays we need to check that the array prototype chain
- // protector is intact and our prototype is the Array.prototype actually.
- __ bind(&create_holey_array);
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ mov(ecx, FieldOperand(ecx, Map::kPrototypeOffset));
- __ cmp(ecx, ContextOperand(ebx, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ j(not_equal, &create_runtime);
- __ LoadRoot(ecx, Heap::kArrayProtectorRootIndex);
- __ cmp(FieldOperand(ecx, PropertyCell::kValueOffset),
- Immediate(Smi::FromInt(Isolate::kProtectorValid)));
- __ j(not_equal, &create_runtime);
- __ mov(ebx, FieldOperand(eax, JSArray::kLengthOffset));
- __ SmiUntag(ebx);
- __ mov(eax, FieldOperand(eax, JSArray::kElementsOffset));
- __ jmp(&done_create);
-
- // Try to create the list from a JSArray object.
- __ bind(&create_array);
- __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(ecx);
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- __ cmp(ecx, Immediate(FAST_HOLEY_SMI_ELEMENTS));
- __ j(equal, &create_holey_array, Label::kNear);
- __ cmp(ecx, Immediate(FAST_HOLEY_ELEMENTS));
- __ j(equal, &create_holey_array, Label::kNear);
- __ j(above, &create_runtime);
- __ mov(ebx, FieldOperand(eax, JSArray::kLengthOffset));
- __ SmiUntag(ebx);
- __ mov(eax, FieldOperand(eax, JSArray::kElementsOffset));
-
- __ bind(&done_create);
- }
+ // We need to preserve eax, edi and ebx.
+ __ movd(xmm0, edx);
+ __ movd(xmm1, edi);
+ __ movd(xmm2, eax);
// Check for stack overflow.
{
@@ -2251,66 +2233,56 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
Label done;
ExternalReference real_stack_limit =
ExternalReference::address_of_real_stack_limit(masm->isolate());
- __ mov(ecx, Operand::StaticVariable(real_stack_limit));
- // Make ecx the space we have left. The stack might already be overflowed
- // here which will cause ecx to become negative.
- __ neg(ecx);
- __ add(ecx, esp);
- __ sar(ecx, kPointerSizeLog2);
+ __ mov(edx, Operand::StaticVariable(real_stack_limit));
+ // Make edx the space we have left. The stack might already be overflowed
+ // here which will cause edx to become negative.
+ __ neg(edx);
+ __ add(edx, esp);
+ __ sar(edx, kPointerSizeLog2);
// Check if the arguments will overflow the stack.
- __ cmp(ecx, ebx);
+ __ cmp(edx, ecx);
__ j(greater, &done, Label::kNear); // Signed comparison.
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ bind(&done);
}
- // ----------- S t a t e -------------
- // -- edi : target
- // -- eax : args (a FixedArray built from argumentsList)
- // -- ebx : len (number of elements to push from args)
- // -- edx : new.target (checked to be constructor or undefined)
- // -- esp[0] : return address.
- // -- esp[4] : thisArgument
- // -----------------------------------
-
- // Push arguments onto the stack (thisArgument is already on the stack).
+ // Push additional arguments onto the stack.
{
- __ movd(xmm0, edx);
- __ movd(xmm1, edi);
__ PopReturnAddressTo(edx);
- __ Move(ecx, Immediate(0));
+ __ Move(eax, Immediate(0));
Label done, push, loop;
__ bind(&loop);
- __ cmp(ecx, ebx);
+ __ cmp(eax, ecx);
__ j(equal, &done, Label::kNear);
// Turn the hole into undefined as we go.
__ mov(edi,
- FieldOperand(eax, ecx, times_pointer_size, FixedArray::kHeaderSize));
+ FieldOperand(ebx, eax, times_pointer_size, FixedArray::kHeaderSize));
__ CompareRoot(edi, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &push, Label::kNear);
__ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
__ bind(&push);
__ Push(edi);
- __ inc(ecx);
+ __ inc(eax);
__ jmp(&loop);
__ bind(&done);
__ PushReturnAddressFrom(edx);
- __ movd(edi, xmm1);
- __ movd(edx, xmm0);
- __ Move(eax, ebx);
}
- // Dispatch to Call or Construct depending on whether new.target is undefined.
- {
- __ CompareRoot(edx, Heap::kUndefinedValueRootIndex);
- __ j(equal, masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
- }
+ // Restore eax, edi and edx.
+ __ movd(eax, xmm2);
+ __ movd(edi, xmm1);
+ __ movd(edx, xmm0);
+
+ // Compute the actual parameter count.
+ __ add(eax, ecx);
+
+ // Tail-call to the actual Call or Construct builtin.
+ __ Jump(code, RelocInfo::CODE_TARGET);
}
// static
-void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
- Handle<Code> code) {
+void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edi : the target to call (can be any Object)
@@ -2339,11 +2311,11 @@ void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
{
// Just load the length from the ArgumentsAdaptorFrame.
__ mov(edx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(edx);
}
__ bind(&arguments_done);
Label stack_done;
- __ SmiUntag(edx);
__ sub(edx, ecx);
__ j(less_equal, &stack_done);
{
@@ -2389,100 +2361,9 @@ void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
__ Jump(code, RelocInfo::CODE_TARGET);
}
-namespace {
-
-// Drops top JavaScript frame and an arguments adaptor frame below it (if
-// present) preserving all the arguments prepared for current call.
-// Does nothing if debugger is currently active.
-// ES6 14.6.3. PrepareForTailCall
-//
-// Stack structure for the function g() tail calling f():
-//
-// ------- Caller frame: -------
-// | ...
-// | g()'s arg M
-// | ...
-// | g()'s arg 1
-// | g()'s receiver arg
-// | g()'s caller pc
-// ------- g()'s frame: -------
-// | g()'s caller fp <- fp
-// | g()'s context
-// | function pointer: g
-// | -------------------------
-// | ...
-// | ...
-// | f()'s arg N
-// | ...
-// | f()'s arg 1
-// | f()'s receiver arg
-// | f()'s caller pc <- sp
-// ----------------------
-//
-void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
- Register scratch1, Register scratch2,
- Register scratch3) {
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Comment cmnt(masm, "[ PrepareForTailCall");
-
- // Prepare for tail call only if ES2015 tail call elimination is enabled.
- Label done;
- ExternalReference is_tail_call_elimination_enabled =
- ExternalReference::is_tail_call_elimination_enabled_address(
- masm->isolate());
- __ movzx_b(scratch1,
- Operand::StaticVariable(is_tail_call_elimination_enabled));
- __ cmp(scratch1, Immediate(0));
- __ j(equal, &done, Label::kNear);
-
- // Drop possible interpreter handler/stub frame.
- {
- Label no_interpreter_frame;
- __ cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
- Immediate(StackFrame::TypeToMarker(StackFrame::STUB)));
- __ j(not_equal, &no_interpreter_frame, Label::kNear);
- __ mov(ebp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ bind(&no_interpreter_frame);
- }
-
- // Check if next frame is an arguments adaptor frame.
- Register caller_args_count_reg = scratch1;
- Label no_arguments_adaptor, formal_parameter_count_loaded;
- __ mov(scratch2, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ cmp(Operand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset),
- Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &no_arguments_adaptor, Label::kNear);
-
- // Drop current frame and load arguments count from arguments adaptor frame.
- __ mov(ebp, scratch2);
- __ mov(caller_args_count_reg,
- Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
- __ jmp(&formal_parameter_count_loaded, Label::kNear);
-
- __ bind(&no_arguments_adaptor);
- // Load caller's formal parameter count
- __ mov(scratch1, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(scratch1,
- FieldOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
- __ mov(
- caller_args_count_reg,
- FieldOperand(scratch1, SharedFunctionInfo::kFormalParameterCountOffset));
- __ SmiUntag(caller_args_count_reg);
-
- __ bind(&formal_parameter_count_loaded);
-
- ParameterCount callee_args_count(args_reg);
- __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
- scratch3, ReturnAddressState::kOnStack, 0);
- __ bind(&done);
-}
-} // namespace
-
// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
- ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
+ ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edi : the function to call (checked to be a JSFunction)
@@ -2493,21 +2374,19 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Check that the function is not a "classConstructor".
Label class_constructor;
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ test_b(FieldOperand(edx, SharedFunctionInfo::kFunctionKindByteOffset),
- Immediate(SharedFunctionInfo::kClassConstructorBitsWithinByte));
+ __ test(FieldOperand(edx, SharedFunctionInfo::kCompilerHintsOffset),
+ Immediate(SharedFunctionInfo::kClassConstructorMask));
__ j(not_zero, &class_constructor);
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
- SharedFunctionInfo::kStrictModeByteOffset);
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
- __ test_b(FieldOperand(edx, SharedFunctionInfo::kNativeByteOffset),
- Immediate((1 << SharedFunctionInfo::kNativeBitWithinByte) |
- (1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
+ __ test(FieldOperand(edx, SharedFunctionInfo::kCompilerHintsOffset),
+ Immediate(SharedFunctionInfo::IsNativeBit::kMask |
+ SharedFunctionInfo::IsStrictBit::kMask));
__ j(not_zero, &done_convert);
{
// ----------- S t a t e -------------
@@ -2573,15 +2452,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- esi : the function context.
// -----------------------------------
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, eax, ebx, ecx, edx);
- // Reload shared function info.
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- }
-
__ mov(ebx,
FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
- __ SmiUntag(ebx);
ParameterCount actual(eax);
ParameterCount expected(ebx);
__ InvokeFunctionCode(edi, no_reg, expected, actual, JUMP_FUNCTION,
@@ -2682,18 +2554,13 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
} // namespace
// static
-void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
- TailCallMode tail_call_mode) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edi : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(edi);
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, eax, ebx, ecx, edx);
- }
-
// Patch the receiver to [[BoundThis]].
__ mov(ebx, FieldOperand(edi, JSBoundFunction::kBoundThisOffset));
__ mov(Operand(esp, eax, times_pointer_size, kPointerSize), ebx);
@@ -2710,8 +2577,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
}
// static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edi : the target to call (can be any Object).
@@ -2721,35 +2587,25 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
__ JumpIfSmi(edi, &non_callable);
__ bind(&non_smi);
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(equal, masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
+ __ j(equal, masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET);
__ CmpInstanceType(ecx, JS_BOUND_FUNCTION_TYPE);
- __ j(equal, masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
+ __ j(equal, masm->isolate()->builtins()->CallBoundFunction(),
RelocInfo::CODE_TARGET);
- // Check if target has a [[Call]] internal method.
+ // Check if target is a proxy and call CallProxy external builtin
__ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsCallable));
__ j(zero, &non_callable);
+ // Call CallProxy external builtin
__ CmpInstanceType(ecx, JS_PROXY_TYPE);
__ j(not_equal, &non_function);
- // 0. Prepare for tail call if necessary.
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, eax, ebx, ecx, edx);
- }
-
- // 1. Runtime fallback for Proxy [[Call]].
- __ PopReturnAddressTo(ecx);
- __ Push(edi);
- __ PushReturnAddressFrom(ecx);
- // Increase the arguments size to include the pushed function and the
- // existing receiver on the stack.
- __ add(eax, Immediate(2));
- // Tail-call to the runtime.
- __ JumpToExternalReference(
- ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
+ __ mov(ecx, Operand::StaticVariable(
+ ExternalReference(Builtins::kCallProxy, masm->isolate())));
+ __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+ __ jmp(ecx);
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
@@ -2759,7 +2615,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
// Let the "call_as_function_delegate" take care of the rest.
__ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, edi);
__ Jump(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
+ ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
@@ -2771,178 +2627,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
-static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
- // Free up some registers.
- __ movd(xmm0, edx);
- __ movd(xmm1, edi);
-
- Register argc = eax;
-
- Register scratch = ecx;
- Register scratch2 = edi;
-
- Register spread = ebx;
- Register spread_map = edx;
-
- Register spread_len = edx;
-
- Label runtime_call, push_args;
- __ mov(spread, Operand(esp, kPointerSize));
- __ JumpIfSmi(spread, &runtime_call);
- __ mov(spread_map, FieldOperand(spread, HeapObject::kMapOffset));
-
- // Check that the spread is an array.
- __ CmpInstanceType(spread_map, JS_ARRAY_TYPE);
- __ j(not_equal, &runtime_call);
-
- // Check that we have the original ArrayPrototype.
- __ mov(scratch, FieldOperand(spread_map, Map::kPrototypeOffset));
- __ mov(scratch2, NativeContextOperand());
- __ cmp(scratch,
- ContextOperand(scratch2, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ j(not_equal, &runtime_call);
-
- // Check that the ArrayPrototype hasn't been modified in a way that would
- // affect iteration.
- __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
- __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
- Immediate(Smi::FromInt(Isolate::kProtectorValid)));
- __ j(not_equal, &runtime_call);
-
- // Check that the map of the initial array iterator hasn't changed.
- __ mov(scratch2, NativeContextOperand());
- __ mov(scratch,
- ContextOperand(scratch2,
- Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
- __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
- __ cmp(scratch,
- ContextOperand(scratch2,
- Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
- __ j(not_equal, &runtime_call);
-
- // For FastPacked kinds, iteration will have the same effect as simply
- // accessing each property in order.
- Label no_protector_check;
- __ mov(scratch, FieldOperand(spread_map, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(scratch);
- __ cmp(scratch, Immediate(FAST_HOLEY_ELEMENTS));
- __ j(above, &runtime_call);
- // For non-FastHoley kinds, we can skip the protector check.
- __ cmp(scratch, Immediate(FAST_SMI_ELEMENTS));
- __ j(equal, &no_protector_check);
- __ cmp(scratch, Immediate(FAST_ELEMENTS));
- __ j(equal, &no_protector_check);
- // Check the ArrayProtector cell.
- __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
- __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
- Immediate(Smi::FromInt(Isolate::kProtectorValid)));
- __ j(not_equal, &runtime_call);
-
- __ bind(&no_protector_check);
- // Load the FixedArray backing store, but use the length from the array.
- __ mov(spread_len, FieldOperand(spread, JSArray::kLengthOffset));
- __ SmiUntag(spread_len);
- __ mov(spread, FieldOperand(spread, JSArray::kElementsOffset));
- __ jmp(&push_args);
-
- __ bind(&runtime_call);
- {
- // Call the builtin for the result of the spread.
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Need to save these on the stack.
- __ movd(edi, xmm1);
- __ movd(edx, xmm0);
- __ Push(edi);
- __ Push(edx);
- __ SmiTag(argc);
- __ Push(argc);
- __ Push(spread);
- __ CallRuntime(Runtime::kSpreadIterableFixed);
- __ mov(spread, eax);
- __ Pop(argc);
- __ SmiUntag(argc);
- __ Pop(edx);
- __ Pop(edi);
- // Free up some registers.
- __ movd(xmm0, edx);
- __ movd(xmm1, edi);
- }
-
- {
- // Calculate the new nargs including the result of the spread.
- __ mov(spread_len, FieldOperand(spread, FixedArray::kLengthOffset));
- __ SmiUntag(spread_len);
-
- __ bind(&push_args);
- // argc += spread_len - 1. Subtract 1 for the spread itself.
- __ lea(argc, Operand(argc, spread_len, times_1, -1));
- }
-
- // Check for stack overflow.
- {
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack limit".
- Label done;
- __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
- // Make scratch the space we have left. The stack might already be
- // overflowed here which will cause scratch to become negative.
- __ neg(scratch);
- __ add(scratch, esp);
- __ sar(scratch, kPointerSizeLog2);
- // Check if the arguments will overflow the stack.
- __ cmp(scratch, spread_len);
- __ j(greater, &done, Label::kNear); // Signed comparison.
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&done);
- }
-
- // Put the evaluated spread onto the stack as additional arguments.
- {
- Register return_address = edi;
- // Pop the return address and spread argument.
- __ PopReturnAddressTo(return_address);
- __ Pop(scratch);
-
- Register scratch2 = esi;
- __ movd(xmm2, esi);
-
- __ mov(scratch, Immediate(0));
- Label done, push, loop;
- __ bind(&loop);
- __ cmp(scratch, spread_len);
- __ j(equal, &done, Label::kNear);
- __ mov(scratch2, FieldOperand(spread, scratch, times_pointer_size,
- FixedArray::kHeaderSize));
- __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &push, Label::kNear);
- __ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
- __ bind(&push);
- __ Push(scratch2);
- __ inc(scratch);
- __ jmp(&loop);
- __ bind(&done);
- __ PushReturnAddressFrom(return_address);
- __ movd(esi, xmm2);
- __ movd(edi, xmm1);
- __ movd(edx, xmm0);
- }
-}
-
-// static
-void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edi : the target to call (can be any Object)
- // -----------------------------------
-
- // CheckSpreadAndPushToStack will push edx to save it.
- __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- TailCallMode::kDisallow),
- RelocInfo::CODE_TARGET);
-}
-
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -3066,19 +2750,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edx : the new target (either the same as the constructor or
- // the JSFunction on which new was invoked initially)
- // -- edi : the constructor to call (can be any Object)
- // -----------------------------------
-
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
-}
-
-// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- edx : requested object size (untagged)
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index 24fe271cb3..4134d137a4 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -227,7 +227,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(t0);
__ EnterBuiltinFrame(cp, a1, t0);
__ Push(a0); // first argument
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Pop(a0);
__ LeaveBuiltinFrame(cp, a1, t0);
@@ -378,7 +378,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(t0);
__ EnterBuiltinFrame(cp, a1, t0);
__ Push(a0); // first argument
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Pop(a0);
__ LeaveBuiltinFrame(cp, a1, t0);
@@ -423,22 +423,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Jump(at, v0, Code::kHeaderSize - kHeapObjectTag);
}
-void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
- // Checking whether the queued function is ready for install is optional,
- // since we come across interrupts and stack checks elsewhere. However,
- // not checking may delay installing ready functions, and always checking
- // would be quite expensive. A good compromise is to first check against
- // stack limit as a cue for an interrupt signal.
- Label ok;
- __ LoadRoot(t0, Heap::kStackLimitRootIndex);
- __ Branch(&ok, hs, sp, Operand(t0));
-
- GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
-
- __ bind(&ok);
- GenerateTailCallToSharedCode(masm);
-}
-
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
@@ -537,16 +521,14 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -----------------------------------
__ lw(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lbu(t2,
- FieldMemOperand(t2, SharedFunctionInfo::kFunctionKindByteOffset));
- __ And(t2, t2,
- Operand(SharedFunctionInfo::kDerivedConstructorBitsWithinByte));
+ __ lw(t2, FieldMemOperand(t2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ And(t2, t2, Operand(SharedFunctionInfo::kDerivedConstructorMask));
__ Branch(&not_create_implicit_receiver, ne, t2, Operand(zero_reg));
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
t2, t3);
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Branch(&post_instantiation_deopt_entry);
@@ -653,18 +635,20 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ Branch(&leave_frame, greater_equal, t2, Operand(FIRST_JS_RECEIVER_TYPE));
- __ bind(&other_result);
// The result is now neither undefined nor an object.
+ __ bind(&other_result);
+ __ lw(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
+ __ lw(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(t2, FieldMemOperand(t2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ And(t2, t2, Operand(SharedFunctionInfo::kClassConstructorMask));
+
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
- __ lw(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
- __ lw(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lbu(t2,
- FieldMemOperand(t2, SharedFunctionInfo::kFunctionKindByteOffset));
- __ And(t2, t2,
- Operand(SharedFunctionInfo::kClassConstructorBitsWithinByte));
__ Branch(&use_receiver, eq, t2, Operand(zero_reg));
} else {
+ __ Branch(&use_receiver, ne, t2, Operand(zero_reg));
+ __ CallRuntime(
+ Runtime::kIncrementUseCounterConstructorReturnNonUndefinedPrimitive);
__ Branch(&use_receiver);
}
@@ -758,7 +742,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
// Setup the context (we need to use the caller context from the isolate).
- ExternalReference context_address(Isolate::kContextAddress,
+ ExternalReference context_address(IsolateAddressId::kContextAddress,
masm->isolate());
__ li(cp, Operand(context_address));
__ lw(cp, MemOperand(cp));
@@ -830,33 +814,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- v0 : the value to pass to the generator
// -- a1 : the JSGeneratorObject to resume
// -- a2 : the resume mode (tagged)
- // -- a3 : the SuspendFlags of the earlier suspend call (tagged)
// -- ra : return address
// -----------------------------------
- __ SmiUntag(a3);
- __ AssertGeneratorObject(a1, a3);
+ __ AssertGeneratorObject(a1);
// Store input value into generator object.
- Label async_await, done_store_input;
-
- __ And(t8, a3, Operand(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
- __ Branch(&async_await, equal, t8,
- Operand(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
-
__ sw(v0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
__ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, v0, a3,
kRAHasNotBeenSaved, kDontSaveFPRegs);
- __ jmp(&done_store_input);
-
- __ bind(&async_await);
- __ sw(v0, FieldMemOperand(
- a1, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset));
- __ RecordWriteField(a1, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset,
- v0, a3, kRAHasNotBeenSaved, kDontSaveFPRegs);
-
- __ bind(&done_store_input);
- // `a3` no longer holds SuspendFlags
// Store resume mode into generator object.
__ sw(a2, FieldMemOperand(a1, JSGeneratorObject::kResumeModeOffset));
@@ -905,7 +871,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
{
Label done_loop, loop;
__ bind(&loop);
- __ Subu(a3, a3, Operand(Smi::FromInt(1)));
+ __ Subu(a3, a3, Operand(1));
__ Branch(&done_loop, lt, a3, Operand(zero_reg));
__ PushRoot(Heap::kTheHoleValueRootIndex);
__ Branch(&loop);
@@ -925,7 +891,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ lw(a0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
__ lw(a0,
FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
- __ SmiUntag(a0);
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
@@ -1004,6 +969,115 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
__ Addu(sp, sp, args_count);
}
+// Tail-call |function_id| if |smi_entry| == |marker|
+static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
+ Register smi_entry,
+ OptimizationMarker marker,
+ Runtime::FunctionId function_id) {
+ Label no_match;
+ __ Branch(&no_match, ne, smi_entry, Operand(Smi::FromEnum(marker)));
+ GenerateTailCallToReturnedCode(masm, function_id);
+ __ bind(&no_match);
+}
+
+static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
+ Register feedback_vector,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+ // ----------- S t a t e -------------
+ // -- a0 : argument count (preserved for callee if needed, and caller)
+ // -- a3 : new target (preserved for callee if needed, and caller)
+ // -- a1 : target function (preserved for callee if needed, and caller)
+ // -- feedback vector (preserved for caller if needed)
+ // -----------------------------------
+ DCHECK(
+ !AreAliased(feedback_vector, a0, a1, a3, scratch1, scratch2, scratch3));
+
+ Label optimized_code_slot_is_cell, fallthrough;
+
+ Register closure = a1;
+ Register optimized_code_entry = scratch1;
+
+ const int kOptimizedCodeCellOffset =
+ FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize;
+ __ lw(optimized_code_entry,
+ FieldMemOperand(feedback_vector, kOptimizedCodeCellOffset));
+
+ // Check if the code entry is a Smi. If yes, we interpret it as an
+ // optimisation marker. Otherwise, interpret is as a weak cell to a code
+ // object.
+ __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
+
+ {
+ // Optimized code slot is a Smi optimization marker.
+
+ // Fall through if no optimization trigger.
+ __ Branch(&fallthrough, eq, optimized_code_entry,
+ Operand(Smi::FromEnum(OptimizationMarker::kNone)));
+
+ TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimized,
+ Runtime::kCompileOptimized_NotConcurrent);
+ TailCallRuntimeIfMarkerEquals(
+ masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimizedConcurrent,
+ Runtime::kCompileOptimized_Concurrent);
+
+ {
+ // Otherwise, the marker is InOptimizationQueue.
+ if (FLAG_debug_code) {
+ __ Assert(
+ eq, kExpectedOptimizationSentinel, optimized_code_entry,
+ Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
+ }
+
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ __ LoadRoot(at, Heap::kStackLimitRootIndex);
+ __ Branch(&fallthrough, hs, sp, Operand(at));
+ GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+ }
+ }
+
+ {
+ // Optimized code slot is a WeakCell.
+ __ bind(&optimized_code_slot_is_cell);
+
+ __ lw(optimized_code_entry,
+ FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(optimized_code_entry, &fallthrough);
+
+ // Check if the optimized code is marked for deopt. If it is, call the
+ // runtime to clear it.
+ Label found_deoptimized_code;
+ __ lw(scratch2, FieldMemOperand(optimized_code_entry,
+ Code::kKindSpecificFlags1Offset));
+ __ And(scratch2, scratch2, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ __ Branch(&found_deoptimized_code, ne, scratch2, Operand(zero_reg));
+
+ // Optimized code is good, get it into the closure and link the closure into
+ // the optimized functions list, then tail call the optimized code.
+ // The feedback vector is no longer used, so re-use it as a scratch
+ // register.
+ ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, closure,
+ scratch2, scratch3, feedback_vector);
+ __ Jump(optimized_code_entry);
+
+ // Optimized code slot contains deoptimized code, evict it and re-enter the
+ // losure's code.
+ __ bind(&found_deoptimized_code);
+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+ }
+
+ // Fall-through if the optimized code cell is clear and there is no
+ // optimization marker.
+ __ bind(&fallthrough);
+}
+
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
@@ -1022,35 +1096,31 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
+ Register closure = a1;
+ Register feedback_vector = a2;
+
+ // Load the feedback vector from the closure.
+ __ lw(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ // Read off the optimized code slot in the feedback vector, and if there
+ // is optimized code or an optimization marker, call that instead.
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, t0, t3, t1);
+
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ PushStandardFrame(a1);
-
- // First check if there is optimized code in the feedback vector which we
- // could call instead.
- Label switch_to_optimized_code;
- Register optimized_code_entry = t0;
- __ lw(a0, FieldMemOperand(a1, JSFunction::kFeedbackVectorOffset));
- __ lw(a0, FieldMemOperand(a0, Cell::kValueOffset));
- __ lw(optimized_code_entry,
- FieldMemOperand(a0, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
- __ lw(optimized_code_entry,
- FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
- __ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
+ __ PushStandardFrame(closure);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
- __ lw(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- Label load_debug_bytecode_array, bytecode_array_loaded;
- Register debug_info = kInterpreterBytecodeArrayRegister;
- DCHECK(!debug_info.is(a0));
- __ lw(debug_info, FieldMemOperand(a0, SharedFunctionInfo::kDebugInfoOffset));
- __ JumpIfNotSmi(debug_info, &load_debug_bytecode_array);
+ Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
+ __ lw(a0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ lw(kInterpreterBytecodeArrayRegister,
FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
+ __ lw(t0, FieldMemOperand(a0, SharedFunctionInfo::kDebugInfoOffset));
+ __ JumpIfNotSmi(t0, &maybe_load_debug_bytecode_array);
__ bind(&bytecode_array_loaded);
// Check whether we should continue to use the interpreter.
@@ -1062,15 +1132,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(masm->CodeObject())); // Self-reference to this code.
// Increment invocation count for the function.
- __ lw(a0, FieldMemOperand(a1, JSFunction::kFeedbackVectorOffset));
- __ lw(a0, FieldMemOperand(a0, Cell::kValueOffset));
- __ lw(t0, FieldMemOperand(
- a0, FeedbackVector::kInvocationCountIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
+ __ lw(t0,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
__ Addu(t0, t0, Operand(Smi::FromInt(1)));
- __ sw(t0, FieldMemOperand(
- a0, FeedbackVector::kInvocationCountIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
+ __ sw(t0,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
@@ -1142,10 +1212,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, t0);
__ Jump(ra);
- // Load debug copy of the bytecode array.
- __ bind(&load_debug_bytecode_array);
+ // Load debug copy of the bytecode array if it exists.
+ // kInterpreterBytecodeArrayRegister is already loaded with
+ // SharedFunctionInfo::kFunctionDataOffset.
+ __ bind(&maybe_load_debug_bytecode_array);
+ __ lw(t1, FieldMemOperand(t0, DebugInfo::kFlagsOffset));
+ __ SmiUntag(t1);
+ __ And(t1, t1, Operand(DebugInfo::kHasBreakInfo));
+ __ Branch(&bytecode_array_loaded, eq, t1, Operand(zero_reg));
__ lw(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
+ FieldMemOperand(t0, DebugInfo::kDebugBytecodeArrayOffset));
__ Branch(&bytecode_array_loaded);
// If the shared code is no longer this entry trampoline, then the underlying
@@ -1153,35 +1229,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// closure by switching the code entry field over to the new code as well.
__ bind(&switch_to_different_code_kind);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
- __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(t0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kCodeOffset));
__ Addu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ sw(t0, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
- __ RecordWriteCodeEntryField(a1, t0, t1);
+ __ sw(t0, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+ __ RecordWriteCodeEntryField(closure, t0, t1);
__ Jump(t0);
-
- // If there is optimized code on the type feedback vector, check if it is good
- // to run, and if so, self heal the closure and call the optimized code.
- __ bind(&switch_to_optimized_code);
- __ LeaveFrame(StackFrame::JAVA_SCRIPT);
- Label gotta_call_runtime;
-
- // Check if the optimized code is marked for deopt.
- __ lw(t1,
- FieldMemOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset));
- __ And(t1, t1, Operand(1 << Code::kMarkedForDeoptimizationBit));
- __ Branch(&gotta_call_runtime, ne, t1, Operand(zero_reg));
-
- // Optimized code is good, get it into the closure and link the closure into
- // the optimized functions list, then tail call the optimized code.
- ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, a1, t3, t1,
- t2);
- __ Jump(optimized_code_entry);
-
- // Optimized code is marked for deopt, bailout to the CompileLazy runtime
- // function which will clear the feedback vector's optimized code slot.
- __ bind(&gotta_call_runtime);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@@ -1222,7 +1275,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// static
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
- TailCallMode tail_call_mode, InterpreterPushArgsMode mode) {
+ InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a2 : the address of the first argument to be pushed. Subsequent
@@ -1245,17 +1298,21 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// This function modifies a2, t4 and t1.
Generate_InterpreterPushArgs(masm, t0, a2, t4, t1);
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Pop(a2); // Pass the spread in a register
+ __ Subu(a0, a0, Operand(1)); // Subtract one for spread
+ }
+
// Call the target.
if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
- tail_call_mode),
- RelocInfo::CODE_TARGET);
+ __ Jump(
+ masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny),
+ RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(masm->isolate()->builtins()->CallWithSpread(),
RelocInfo::CODE_TARGET);
} else {
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
RelocInfo::CODE_TARGET);
}
@@ -1287,7 +1344,13 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// This function modified t4, t1 and t0.
Generate_InterpreterPushArgs(masm, a0, t4, t1, t0);
- __ AssertUndefinedOrAllocationSite(a2, t0);
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Pop(a2); // Pass the spread in a register
+ __ Subu(a0, a0, Operand(1)); // Subtract one for spread
+ } else {
+ __ AssertUndefinedOrAllocationSite(a2, t0);
+ }
+
if (mode == InterpreterPushArgsMode::kJSFunction) {
__ AssertFunction(a1);
@@ -1415,6 +1478,34 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
+void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argument count (preserved for callee)
+ // -- a3 : new target (preserved for callee)
+ // -- a1 : target function (preserved for callee)
+ // -----------------------------------
+ Register closure = a1;
+
+ // Get the feedback vector.
+ Register feedback_vector = a2;
+ __ lw(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ // The feedback vector must be defined.
+ if (FLAG_debug_code) {
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Assert(ne, BailoutReason::kExpectedFeedbackVector, feedback_vector,
+ Operand(at));
+ }
+
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, t0, t3, t1);
+
+ // Otherwise, tail call the SFI code.
+ GenerateTailCallToSharedCode(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)
@@ -1423,43 +1514,23 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
- Label try_shared;
Register closure = a1;
- Register index = a2;
+ Register feedback_vector = a2;
// Do we have a valid feedback vector?
- __ lw(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
- __ lw(index, FieldMemOperand(index, Cell::kValueOffset));
- __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
+ __ lw(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
+ &gotta_call_runtime);
- // Is optimized code available in the feedback vector?
- Register entry = t0;
- __ lw(entry, FieldMemOperand(
- index, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
- __ lw(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &try_shared);
-
- // Found code, check if it is marked for deopt, if so call into runtime to
- // clear the optimized code slot.
- __ lw(t1, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset));
- __ And(t1, t1, Operand(1 << Code::kMarkedForDeoptimizationBit));
- __ Branch(&gotta_call_runtime, ne, t1, Operand(zero_reg));
-
- // Code is good, get it into the closure and tail call.
- ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, t3, t1, t2);
- __ Jump(entry);
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, t0, t3, t1);
// We found no optimized code.
- __ bind(&try_shared);
+ Register entry = t0;
__ lw(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- // Is the shared function marked for tier up?
- __ lbu(t1, FieldMemOperand(entry,
- SharedFunctionInfo::kMarkedForTierUpByteOffset));
- __ And(t1, t1,
- Operand(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
- __ Branch(&gotta_call_runtime, ne, t1, Operand(zero_reg));
// If SFI points to anything other than CompileLazy, install that.
__ lw(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
@@ -1476,15 +1547,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
-void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm,
- Runtime::kCompileOptimized_NotConcurrent);
-}
-
-void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
-}
-
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)
@@ -1620,30 +1682,68 @@ void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
-static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
- SaveFPRegsMode save_doubles) {
+void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve registers across notification, this is important for compiled
- // stubs that tail call the runtime on deopts passing their parameters in
- // registers.
- __ MultiPush(kJSCallerSaved | kCalleeSaved);
+ // Preserve possible return result from lazy deopt.
+ __ Push(v0);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
- __ MultiPop(kJSCallerSaved | kCalleeSaved);
+ __ CallRuntime(Runtime::kNotifyStubFailure, false);
+ __ Pop(v0);
}
__ Addu(sp, sp, Operand(kPointerSize)); // Ignore state
- __ Jump(ra); // Jump to miss handler
+ __ Jump(ra); // Jump to the ContinueToBuiltin stub
}
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+namespace {
+void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
+ bool java_script_builtin,
+ bool with_result) {
+ const RegisterConfiguration* config(RegisterConfiguration::Turbofan());
+ int allocatable_register_count = config->num_allocatable_general_registers();
+ if (with_result) {
+ // Overwrite the hole inserted by the deoptimizer with the return value from
+ // the LAZY deopt point.
+ __ sw(v0,
+ MemOperand(
+ sp, config->num_allocatable_general_registers() * kPointerSize +
+ BuiltinContinuationFrameConstants::kFixedFrameSize));
+ }
+ for (int i = allocatable_register_count - 1; i >= 0; --i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ __ Pop(Register::from_code(code));
+ if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
+ __ SmiUntag(Register::from_code(code));
+ }
+ }
+ __ lw(fp, MemOperand(
+ sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ __ Pop(t0);
+ __ Addu(sp, sp,
+ Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ __ Pop(ra);
+ __ Addu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(t0);
}
+} // namespace
-void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, false);
+}
+
+void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, true);
+}
+
+void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, false);
+}
+
+void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, true);
}
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
@@ -1777,32 +1877,27 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ bind(&no_arg);
__ Addu(sp, sp, Operand(scratch));
__ sw(a2, MemOperand(sp));
- __ mov(a0, a3);
+ __ mov(a2, a3);
}
// ----------- S t a t e -------------
- // -- a0 : argArray
+ // -- a2 : argArray
// -- a1 : receiver
// -- sp[0] : thisArg
// -----------------------------------
- // 2. Make sure the receiver is actually callable.
- Label receiver_not_callable;
- __ JumpIfSmi(a1, &receiver_not_callable);
- __ lw(t0, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ lbu(t0, FieldMemOperand(t0, Map::kBitFieldOffset));
- __ And(t0, t0, Operand(1 << Map::kIsCallable));
- __ Branch(&receiver_not_callable, eq, t0, Operand(zero_reg));
+ // 2. We don't need to check explicitly for callable receiver here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
// 3. Tail call with no arguments if argArray is null or undefined.
Label no_arguments;
- __ JumpIfRoot(a0, Heap::kNullValueRootIndex, &no_arguments);
- __ JumpIfRoot(a0, Heap::kUndefinedValueRootIndex, &no_arguments);
+ __ JumpIfRoot(a2, Heap::kNullValueRootIndex, &no_arguments);
+ __ JumpIfRoot(a2, Heap::kUndefinedValueRootIndex, &no_arguments);
- // 4a. Apply the receiver to the given argArray (passing undefined for
- // new.target).
- __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 4a. Apply the receiver to the given argArray.
+ __ Jump(masm->isolate()->builtins()->CallWithArrayLike(),
+ RelocInfo::CODE_TARGET);
// 4b. The argArray is either null or undefined, so we tail call without any
// arguments to the receiver.
@@ -1811,13 +1906,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ mov(a0, zero_reg);
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
-
- // 4c. The receiver is not callable, throw an appropriate TypeError.
- __ bind(&receiver_not_callable);
- {
- __ sw(a1, MemOperand(sp));
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
}
// static
@@ -1895,34 +1983,22 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ bind(&no_arg);
__ Addu(sp, sp, Operand(scratch));
__ sw(a2, MemOperand(sp));
- __ mov(a0, a3);
+ __ mov(a2, a3);
}
// ----------- S t a t e -------------
- // -- a0 : argumentsList
+ // -- a2 : argumentsList
// -- a1 : target
// -- sp[0] : thisArgument
// -----------------------------------
- // 2. Make sure the target is actually callable.
- Label target_not_callable;
- __ JumpIfSmi(a1, &target_not_callable);
- __ lw(t0, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ lbu(t0, FieldMemOperand(t0, Map::kBitFieldOffset));
- __ And(t0, t0, Operand(1 << Map::kIsCallable));
- __ Branch(&target_not_callable, eq, t0, Operand(zero_reg));
-
- // 3a. Apply the target to the given argumentsList (passing undefined for
- // new.target).
- __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
-
- // 3b. The target is not callable, throw an appropriate TypeError.
- __ bind(&target_not_callable);
- {
- __ sw(a1, MemOperand(sp));
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
+ // 2. We don't need to check explicitly for callable target here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
+
+ // 3. Apply the target to the given argumentsList.
+ __ Jump(masm->isolate()->builtins()->CallWithArrayLike(),
+ RelocInfo::CODE_TARGET);
}
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
@@ -1959,48 +2035,26 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ lw(a3, MemOperand(a0)); // new.target
__ bind(&no_arg);
__ Addu(sp, sp, Operand(scratch));
- __ mov(a0, a2);
}
// ----------- S t a t e -------------
- // -- a0 : argumentsList
+ // -- a2 : argumentsList
// -- a3 : new.target
// -- a1 : target
// -- sp[0] : receiver (undefined)
// -----------------------------------
- // 2. Make sure the target is actually a constructor.
- Label target_not_constructor;
- __ JumpIfSmi(a1, &target_not_constructor);
- __ lw(t0, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ lbu(t0, FieldMemOperand(t0, Map::kBitFieldOffset));
- __ And(t0, t0, Operand(1 << Map::kIsConstructor));
- __ Branch(&target_not_constructor, eq, t0, Operand(zero_reg));
-
- // 3. Make sure the target is actually a constructor.
- Label new_target_not_constructor;
- __ JumpIfSmi(a3, &new_target_not_constructor);
- __ lw(t0, FieldMemOperand(a3, HeapObject::kMapOffset));
- __ lbu(t0, FieldMemOperand(t0, Map::kBitFieldOffset));
- __ And(t0, t0, Operand(1 << Map::kIsConstructor));
- __ Branch(&new_target_not_constructor, eq, t0, Operand(zero_reg));
-
- // 4a. Construct the target with the given new.target and argumentsList.
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
-
- // 4b. The target is not a constructor, throw an appropriate TypeError.
- __ bind(&target_not_constructor);
- {
- __ sw(a1, MemOperand(sp));
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
+ // 2. We don't need to check explicitly for constructor target here,
+ // since that's the first thing the Construct/ConstructWithArrayLike
+ // builtins will do.
- // 4c. The new.target is not a constructor, throw an appropriate TypeError.
- __ bind(&new_target_not_constructor);
- {
- __ sw(a3, MemOperand(sp));
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
+ // 3. We don't need to check explicitly for constructor new.target here,
+ // since that's the second thing the Construct/ConstructWithArrayLike
+ // builtins will do.
+
+ // 4. Construct the target with the given new.target and argumentsList.
+ __ Jump(masm->isolate()->builtins()->ConstructWithArrayLike(),
+ RelocInfo::CODE_TARGET);
}
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
@@ -2027,149 +2081,59 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_Apply(MacroAssembler* masm) {
+void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
- // -- a0 : argumentsList
- // -- a1 : target
- // -- a3 : new.target (checked to be constructor or undefined)
- // -- sp[0] : thisArgument
+ // -- a1 : target
+ // -- a0 : number of parameters on the stack (not including the receiver)
+ // -- a2 : arguments list (a FixedArray)
+ // -- t0 : len (number of elements to push from args)
+ // -- a3 : new.target (for [[Construct]])
// -----------------------------------
-
- // Create the list of arguments from the array-like argumentsList.
- {
- Label create_arguments, create_array, create_holey_array, create_runtime,
- done_create;
- __ JumpIfSmi(a0, &create_runtime);
-
- // Load the map of argumentsList into a2.
- __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
-
- // Load native context into t0.
- __ lw(t0, NativeContextMemOperand());
-
- // Check if argumentsList is an (unmodified) arguments object.
- __ lw(at, ContextMemOperand(t0, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
- __ Branch(&create_arguments, eq, a2, Operand(at));
- __ lw(at, ContextMemOperand(t0, Context::STRICT_ARGUMENTS_MAP_INDEX));
- __ Branch(&create_arguments, eq, a2, Operand(at));
-
- // Check if argumentsList is a fast JSArray.
- __ lbu(v0, FieldMemOperand(a2, Map::kInstanceTypeOffset));
- __ Branch(&create_array, eq, v0, Operand(JS_ARRAY_TYPE));
-
- // Ask the runtime to create the list (actually a FixedArray).
- __ bind(&create_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a1, a3, a0);
- __ CallRuntime(Runtime::kCreateListFromArrayLike);
- __ mov(a0, v0);
- __ Pop(a1, a3);
- __ lw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
- __ SmiUntag(a2);
- }
- __ Branch(&done_create);
-
- // Try to create the list from an arguments object.
- __ bind(&create_arguments);
- __ lw(a2, FieldMemOperand(a0, JSArgumentsObject::kLengthOffset));
- __ lw(t0, FieldMemOperand(a0, JSObject::kElementsOffset));
- __ lw(at, FieldMemOperand(t0, FixedArray::kLengthOffset));
- __ Branch(&create_runtime, ne, a2, Operand(at));
- __ SmiUntag(a2);
- __ mov(a0, t0);
- __ Branch(&done_create);
-
- // For holey JSArrays we need to check that the array prototype chain
- // protector is intact and our prototype is the Array.prototype actually.
- __ bind(&create_holey_array);
- __ lw(a2, FieldMemOperand(a2, Map::kPrototypeOffset));
- __ lw(at, ContextMemOperand(t0, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ Branch(&create_runtime, ne, a2, Operand(at));
- __ LoadRoot(at, Heap::kArrayProtectorRootIndex);
- __ lw(a2, FieldMemOperand(at, PropertyCell::kValueOffset));
- __ Branch(&create_runtime, ne, a2,
- Operand(Smi::FromInt(Isolate::kProtectorValid)));
- __ lw(a2, FieldMemOperand(a0, JSArray::kLengthOffset));
- __ lw(a0, FieldMemOperand(a0, JSArray::kElementsOffset));
- __ SmiUntag(a2);
- __ Branch(&done_create);
-
- // Try to create the list from a JSArray object.
- __ bind(&create_array);
- __ lbu(t1, FieldMemOperand(a2, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(t1);
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- __ Branch(&create_holey_array, eq, t1, Operand(FAST_HOLEY_SMI_ELEMENTS));
- __ Branch(&create_holey_array, eq, t1, Operand(FAST_HOLEY_ELEMENTS));
- __ Branch(&create_runtime, hi, t1, Operand(FAST_ELEMENTS));
- __ lw(a2, FieldMemOperand(a0, JSArray::kLengthOffset));
- __ lw(a0, FieldMemOperand(a0, JSArray::kElementsOffset));
- __ SmiUntag(a2);
-
- __ bind(&done_create);
- }
+ __ AssertFixedArray(a2);
// Check for stack overflow.
{
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label done;
- __ LoadRoot(t0, Heap::kRealStackLimitRootIndex);
+ __ LoadRoot(t1, Heap::kRealStackLimitRootIndex);
// Make ip the space we have left. The stack might already be overflowed
// here which will cause ip to become negative.
- __ Subu(t0, sp, t0);
+ __ Subu(t1, sp, t1);
// Check if the arguments will overflow the stack.
- __ sll(at, a2, kPointerSizeLog2);
- __ Branch(&done, gt, t0, Operand(at)); // Signed comparison.
+ __ sll(at, t0, kPointerSizeLog2);
+ __ Branch(&done, gt, t1, Operand(at)); // Signed comparison.
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ bind(&done);
}
- // ----------- S t a t e -------------
- // -- a1 : target
- // -- a0 : args (a FixedArray built from argumentsList)
- // -- a2 : len (number of elements to push from args)
- // -- a3 : new.target (checked to be constructor or undefined)
- // -- sp[0] : thisArgument
- // -----------------------------------
-
// Push arguments onto the stack (thisArgument is already on the stack).
{
- __ mov(t0, zero_reg);
+ __ mov(t2, zero_reg);
Label done, push, loop;
__ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
__ bind(&loop);
- __ Branch(&done, eq, t0, Operand(a2));
- __ Lsa(at, a0, t0, kPointerSizeLog2);
+ __ Branch(&done, eq, t2, Operand(t0));
+ __ Lsa(at, a2, t2, kPointerSizeLog2);
__ lw(at, FieldMemOperand(at, FixedArray::kHeaderSize));
__ Branch(&push, ne, t1, Operand(at));
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ bind(&push);
__ Push(at);
- __ Addu(t0, t0, Operand(1));
+ __ Addu(t2, t2, Operand(1));
__ Branch(&loop);
__ bind(&done);
- __ Move(a0, t0);
+ __ Addu(a0, a0, t2);
}
- // Dispatch to Call or Construct depending on whether new.target is undefined.
- {
- Label construct;
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&construct, ne, a3, Operand(at));
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
- __ bind(&construct);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
- }
+ // Tail-call to the actual Call or Construct builtin.
+ __ Jump(code, RelocInfo::CODE_TARGET);
}
// static
-void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
- Handle<Code> code) {
+void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a3 : the new.target (for [[Construct]] calls)
@@ -2195,11 +2159,11 @@ void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
{
// Just get the length from the ArgumentsAdaptorFrame.
__ lw(t2, MemOperand(t3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(t2);
}
__ bind(&arguments_done);
Label stack_done, stack_overflow;
- __ SmiUntag(t2);
__ Subu(t2, t2, a2);
__ Branch(&stack_done, le, t2, Operand(zero_reg));
{
@@ -2229,101 +2193,9 @@ void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
__ Jump(code, RelocInfo::CODE_TARGET);
}
-namespace {
-
-// Drops top JavaScript frame and an arguments adaptor frame below it (if
-// present) preserving all the arguments prepared for current call.
-// Does nothing if debugger is currently active.
-// ES6 14.6.3. PrepareForTailCall
-//
-// Stack structure for the function g() tail calling f():
-//
-// ------- Caller frame: -------
-// | ...
-// | g()'s arg M
-// | ...
-// | g()'s arg 1
-// | g()'s receiver arg
-// | g()'s caller pc
-// ------- g()'s frame: -------
-// | g()'s caller fp <- fp
-// | g()'s context
-// | function pointer: g
-// | -------------------------
-// | ...
-// | ...
-// | f()'s arg N
-// | ...
-// | f()'s arg 1
-// | f()'s receiver arg <- sp (f()'s caller pc is not on the stack yet!)
-// ----------------------
-//
-void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
- Register scratch1, Register scratch2,
- Register scratch3) {
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Comment cmnt(masm, "[ PrepareForTailCall");
-
- // Prepare for tail call only if ES2015 tail call elimination is enabled.
- Label done;
- ExternalReference is_tail_call_elimination_enabled =
- ExternalReference::is_tail_call_elimination_enabled_address(
- masm->isolate());
- __ li(at, Operand(is_tail_call_elimination_enabled));
- __ lb(scratch1, MemOperand(at));
- __ Branch(&done, eq, scratch1, Operand(zero_reg));
-
- // Drop possible interpreter handler/stub frame.
- {
- Label no_interpreter_frame;
- __ lw(scratch3,
- MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Branch(&no_interpreter_frame, ne, scratch3,
- Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
- __ lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ bind(&no_interpreter_frame);
- }
-
- // Check if next frame is an arguments adaptor frame.
- Register caller_args_count_reg = scratch1;
- Label no_arguments_adaptor, formal_parameter_count_loaded;
- __ lw(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(scratch3,
- MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Branch(&no_arguments_adaptor, ne, scratch3,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Drop current frame and load arguments count from arguments adaptor frame.
- __ mov(fp, scratch2);
- __ lw(caller_args_count_reg,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
- __ Branch(&formal_parameter_count_loaded);
-
- __ bind(&no_arguments_adaptor);
- // Load caller's formal parameter count
- __ lw(scratch1,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
- __ lw(scratch1,
- FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(caller_args_count_reg,
- FieldMemOperand(scratch1,
- SharedFunctionInfo::kFormalParameterCountOffset));
- __ SmiUntag(caller_args_count_reg);
-
- __ bind(&formal_parameter_count_loaded);
-
- ParameterCount callee_args_count(args_reg);
- __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
- scratch3);
- __ bind(&done);
-}
-} // namespace
-
// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
- ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
+ ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the function to call (checked to be a JSFunction)
@@ -2334,21 +2206,20 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Check that the function is not a "classConstructor".
Label class_constructor;
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFunctionKindByteOffset));
- __ And(at, a3, Operand(SharedFunctionInfo::kClassConstructorBitsWithinByte));
+ __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ And(at, a3, Operand(SharedFunctionInfo::kClassConstructorMask));
__ Branch(&class_constructor, ne, at, Operand(zero_reg));
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
- SharedFunctionInfo::kStrictModeByteOffset);
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
- __ lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kNativeByteOffset));
- __ And(at, a3, Operand((1 << SharedFunctionInfo::kNativeBitWithinByte) |
- (1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
+ __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ And(at, a3,
+ Operand(SharedFunctionInfo::IsNativeBit::kMask |
+ SharedFunctionInfo::IsStrictBit::kMask));
__ Branch(&done_convert, ne, at, Operand(zero_reg));
{
// ----------- S t a t e -------------
@@ -2413,13 +2284,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- cp : the function context.
// -----------------------------------
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, a0, t0, t1, t2);
- }
-
__ lw(a2,
FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
- __ sra(a2, a2, kSmiTagSize); // Un-tag.
ParameterCount actual(a0);
ParameterCount expected(a2);
__ InvokeFunctionCode(a1, no_reg, expected, actual, JUMP_FUNCTION,
@@ -2435,18 +2301,13 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
}
// static
-void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
- TailCallMode tail_call_mode) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(a1);
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, a0, t0, t1, t2);
- }
-
// Patch the receiver to [[BoundThis]].
{
__ lw(at, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
@@ -2528,8 +2389,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
}
// static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the target to call (can be any Object).
@@ -2539,9 +2399,9 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
__ JumpIfSmi(a1, &non_callable);
__ bind(&non_smi);
__ GetObjectType(a1, t1, t2);
- __ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
- __ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
// Check if target has a [[Call]] internal method.
@@ -2549,21 +2409,11 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
__ And(t1, t1, Operand(1 << Map::kIsCallable));
__ Branch(&non_callable, eq, t1, Operand(zero_reg));
+ // Check if target is a proxy and call CallProxy external builtin
__ Branch(&non_function, ne, t2, Operand(JS_PROXY_TYPE));
-
- // 0. Prepare for tail call if necessary.
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, a0, t0, t1, t2);
- }
-
- // 1. Runtime fallback for Proxy [[Call]].
- __ Push(a1);
- // Increase the arguments size to include the pushed function and the
- // existing receiver on the stack.
- __ Addu(a0, a0, 2);
- // Tail-call to the runtime.
- __ JumpToExternalReference(
- ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
+ __ li(t2, Operand(ExternalReference(Builtins::kCallProxy, masm->isolate())));
+ __ lw(t2, MemOperand(t2));
+ __ Jump(t2, Operand(Code::kHeaderSize - kHeapObjectTag));
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
@@ -2574,7 +2424,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
__ Jump(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
+ ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
@@ -2586,151 +2436,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
-static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
- Register argc = a0;
- Register constructor = a1;
- Register new_target = a3;
-
- Register scratch = t0;
- Register scratch2 = t1;
-
- Register spread = a2;
- Register spread_map = t3;
-
- Register spread_len = t3;
-
- Register native_context = t4;
-
- Label runtime_call, push_args;
- __ lw(spread, MemOperand(sp, 0));
- __ JumpIfSmi(spread, &runtime_call);
- __ lw(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
- __ lw(native_context, NativeContextMemOperand());
-
- // Check that the spread is an array.
- __ lbu(scratch, FieldMemOperand(spread_map, Map::kInstanceTypeOffset));
- __ Branch(&runtime_call, ne, scratch, Operand(JS_ARRAY_TYPE));
-
- // Check that we have the original ArrayPrototype.
- __ lw(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
- __ lw(scratch2, ContextMemOperand(native_context,
- Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ Branch(&runtime_call, ne, scratch, Operand(scratch2));
-
- // Check that the ArrayPrototype hasn't been modified in a way that would
- // affect iteration.
- __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
- __ lw(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
- __ Branch(&runtime_call, ne, scratch,
- Operand(Smi::FromInt(Isolate::kProtectorValid)));
-
- // Check that the map of the initial array iterator hasn't changed.
- __ lw(scratch,
- ContextMemOperand(native_context,
- Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
- __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
- __ lw(scratch2,
- ContextMemOperand(native_context,
- Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
- __ Branch(&runtime_call, ne, scratch, Operand(scratch2));
-
- // For FastPacked kinds, iteration will have the same effect as simply
- // accessing each property in order.
- Label no_protector_check;
- __ lbu(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(scratch);
- __ Branch(&runtime_call, hi, scratch, Operand(FAST_HOLEY_ELEMENTS));
- // For non-FastHoley kinds, we can skip the protector check.
- __ Branch(&no_protector_check, eq, scratch, Operand(FAST_SMI_ELEMENTS));
- __ Branch(&no_protector_check, eq, scratch, Operand(FAST_ELEMENTS));
- // Check the ArrayProtector cell.
- __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
- __ lw(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
- __ Branch(&runtime_call, ne, scratch,
- Operand(Smi::FromInt(Isolate::kProtectorValid)));
-
- __ bind(&no_protector_check);
- // Load the FixedArray backing store, but use the length from the array.
- __ lw(spread_len, FieldMemOperand(spread, JSArray::kLengthOffset));
- __ SmiUntag(spread_len);
- __ lw(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
- __ Branch(&push_args);
-
- __ bind(&runtime_call);
- {
- // Call the builtin for the result of the spread.
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(argc);
- __ Push(constructor, new_target, argc, spread);
- __ CallRuntime(Runtime::kSpreadIterableFixed);
- __ mov(spread, v0);
- __ Pop(constructor, new_target, argc);
- __ SmiUntag(argc);
- }
-
- {
- // Calculate the new nargs including the result of the spread.
- __ lw(spread_len, FieldMemOperand(spread, FixedArray::kLengthOffset));
- __ SmiUntag(spread_len);
-
- __ bind(&push_args);
- // argc += spread_len - 1. Subtract 1 for the spread itself.
- __ Addu(argc, argc, spread_len);
- __ Subu(argc, argc, Operand(1));
-
- // Pop the spread argument off the stack.
- __ Pop(scratch);
- }
-
- // Check for stack overflow.
- {
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack limit".
- Label done;
- __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
- // Make scratch the space we have left. The stack might already be
- // overflowed here which will cause ip to become negative.
- __ Subu(scratch, sp, scratch);
- // Check if the arguments will overflow the stack.
- __ sll(at, spread_len, kPointerSizeLog2);
- __ Branch(&done, gt, scratch, Operand(at)); // Signed comparison.
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&done);
- }
-
- // Put the evaluated spread onto the stack as additional arguments.
- {
- __ mov(scratch, zero_reg);
- Label done, push, loop;
- __ bind(&loop);
- __ Branch(&done, eq, scratch, Operand(spread_len));
- __ Lsa(scratch2, spread, scratch, kPointerSizeLog2);
- __ lw(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
- __ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
- __ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
- __ bind(&push);
- __ Push(scratch2);
- __ Addu(scratch, scratch, Operand(1));
- __ Branch(&loop);
- __ bind(&done);
- }
-}
-
-// static
-void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : the number of arguments (not including the receiver)
- // -- a1 : the target to call (can be any Object).
- // -----------------------------------
-
- // CheckSpreadAndPushToStack will push a3 to save it.
- __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- TailCallMode::kDisallow),
- RelocInfo::CODE_TARGET);
-}
-
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2911,19 +2616,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : the number of arguments (not including the receiver)
- // -- a1 : the constructor to call (can be any Object)
- // -- a3 : the new target (either the same as the constructor or
- // the JSFunction on which new was invoked initially)
- // -----------------------------------
-
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
-}
-
-// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : requested object size (untagged)
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index 4d80993952..5af11c3fc5 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -228,7 +228,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(t0);
__ EnterBuiltinFrame(cp, a1, t0);
__ Push(a0);
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Pop(a0);
__ LeaveBuiltinFrame(cp, a1, t0);
@@ -380,7 +380,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(t0);
__ EnterBuiltinFrame(cp, a1, t0);
__ Push(a0);
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Pop(a0);
__ LeaveBuiltinFrame(cp, a1, t0);
@@ -426,22 +426,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Jump(at);
}
-void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
- // Checking whether the queued function is ready for install is optional,
- // since we come across interrupts and stack checks elsewhere. However,
- // not checking may delay installing ready functions, and always checking
- // would be quite expensive. A good compromise is to first check against
- // stack limit as a cue for an interrupt signal.
- Label ok;
- __ LoadRoot(a4, Heap::kStackLimitRootIndex);
- __ Branch(&ok, hs, sp, Operand(a4));
-
- GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
-
- __ bind(&ok);
- GenerateTailCallToSharedCode(masm);
-}
-
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
@@ -541,16 +525,14 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -----------------------------------
__ Ld(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lbu(t2,
- FieldMemOperand(t2, SharedFunctionInfo::kFunctionKindByteOffset));
- __ And(t2, t2,
- Operand(SharedFunctionInfo::kDerivedConstructorBitsWithinByte));
+ __ lwu(t2, FieldMemOperand(t2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ And(t2, t2, Operand(SharedFunctionInfo::kDerivedConstructorMask));
__ Branch(&not_create_implicit_receiver, ne, t2, Operand(zero_reg));
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
t2, t3);
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Branch(&post_instantiation_deopt_entry);
@@ -657,18 +639,20 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ Branch(&leave_frame, greater_equal, t2, Operand(FIRST_JS_RECEIVER_TYPE));
- __ bind(&other_result);
// The result is now neither undefined nor an object.
+ __ bind(&other_result);
+ __ Ld(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
+ __ Ld(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lwu(t2, FieldMemOperand(t2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ And(t2, t2, Operand(SharedFunctionInfo::kClassConstructorMask));
+
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
- __ Ld(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
- __ Ld(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lbu(t2,
- FieldMemOperand(t2, SharedFunctionInfo::kFunctionKindByteOffset));
- __ And(t2, t2,
- Operand(SharedFunctionInfo::kClassConstructorBitsWithinByte));
__ Branch(&use_receiver, eq, t2, Operand(zero_reg));
} else {
+ __ Branch(&use_receiver, ne, t2, Operand(zero_reg));
+ __ CallRuntime(
+ Runtime::kIncrementUseCounterConstructorReturnNonUndefinedPrimitive);
__ Branch(&use_receiver);
}
@@ -716,32 +700,14 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- v0 : the value to pass to the generator
// -- a1 : the JSGeneratorObject to resume
// -- a2 : the resume mode (tagged)
- // -- a3 : the SuspendFlags of the earlier suspend call (tagged)
// -- ra : return address
// -----------------------------------
- __ SmiUntag(a3);
- __ AssertGeneratorObject(a1, a3);
+ __ AssertGeneratorObject(a1);
// Store input value into generator object.
- Label async_await, done_store_input;
-
- __ And(t8, a3, Operand(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
- __ Branch(&async_await, equal, t8,
- Operand(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
-
__ Sd(v0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
__ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, v0, a3,
kRAHasNotBeenSaved, kDontSaveFPRegs);
- __ jmp(&done_store_input);
-
- __ bind(&async_await);
- __ Sd(v0, FieldMemOperand(
- a1, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset));
- __ RecordWriteField(a1, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset,
- v0, a3, kRAHasNotBeenSaved, kDontSaveFPRegs);
-
- __ bind(&done_store_input);
- // `a3` no longer holds SuspendFlags
// Store resume mode into generator object.
__ Sd(a2, FieldMemOperand(a1, JSGeneratorObject::kResumeModeOffset));
@@ -892,7 +858,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
// Setup the context (we need to use the caller context from the isolate).
- ExternalReference context_address(Isolate::kContextAddress,
+ ExternalReference context_address(IsolateAddressId::kContextAddress,
masm->isolate());
__ li(cp, Operand(context_address));
__ Ld(cp, MemOperand(cp));
@@ -1004,6 +970,115 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
__ Daddu(sp, sp, args_count);
}
+// Tail-call |function_id| if |smi_entry| == |marker|
+static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
+ Register smi_entry,
+ OptimizationMarker marker,
+ Runtime::FunctionId function_id) {
+ Label no_match;
+ __ Branch(&no_match, ne, smi_entry, Operand(Smi::FromEnum(marker)));
+ GenerateTailCallToReturnedCode(masm, function_id);
+ __ bind(&no_match);
+}
+
+static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
+ Register feedback_vector,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+ // ----------- S t a t e -------------
+ // -- a0 : argument count (preserved for callee if needed, and caller)
+ // -- a3 : new target (preserved for callee if needed, and caller)
+ // -- a1 : target function (preserved for callee if needed, and caller)
+ // -- feedback vector (preserved for caller if needed)
+ // -----------------------------------
+ DCHECK(
+ !AreAliased(feedback_vector, a0, a1, a3, scratch1, scratch2, scratch3));
+
+ Label optimized_code_slot_is_cell, fallthrough;
+
+ Register closure = a1;
+ Register optimized_code_entry = scratch1;
+
+ const int kOptimizedCodeCellOffset =
+ FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize;
+ __ Ld(optimized_code_entry,
+ FieldMemOperand(feedback_vector, kOptimizedCodeCellOffset));
+
+ // Check if the code entry is a Smi. If yes, we interpret it as an
+ // optimisation marker. Otherwise, interpret is as a weak cell to a code
+ // object.
+ __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
+
+ {
+ // Optimized code slot is a Smi optimization marker.
+
+ // Fall through if no optimization trigger.
+ __ Branch(&fallthrough, eq, optimized_code_entry,
+ Operand(Smi::FromEnum(OptimizationMarker::kNone)));
+
+ TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimized,
+ Runtime::kCompileOptimized_NotConcurrent);
+ TailCallRuntimeIfMarkerEquals(
+ masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimizedConcurrent,
+ Runtime::kCompileOptimized_Concurrent);
+
+ {
+ // Otherwise, the marker is InOptimizationQueue.
+ if (FLAG_debug_code) {
+ __ Assert(
+ eq, kExpectedOptimizationSentinel, optimized_code_entry,
+ Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
+ }
+
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ __ LoadRoot(t0, Heap::kStackLimitRootIndex);
+ __ Branch(&fallthrough, hs, sp, Operand(t0));
+ GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+ }
+ }
+
+ {
+ // Optimized code slot is a WeakCell.
+ __ bind(&optimized_code_slot_is_cell);
+
+ __ Ld(optimized_code_entry,
+ FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(optimized_code_entry, &fallthrough);
+
+ // Check if the optimized code is marked for deopt. If it is, call the
+ // runtime to clear it.
+ Label found_deoptimized_code;
+ __ Lw(a5, FieldMemOperand(optimized_code_entry,
+ Code::kKindSpecificFlags1Offset));
+ __ And(a5, a5, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ __ Branch(&found_deoptimized_code, ne, a5, Operand(zero_reg));
+
+ // Optimized code is good, get it into the closure and link the closure into
+ // the optimized functions list, then tail call the optimized code.
+ // The feedback vector is no longer used, so re-use it as a scratch
+ // register.
+ ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, closure,
+ scratch2, scratch3, feedback_vector);
+ __ Jump(optimized_code_entry);
+
+ // Optimized code slot contains deoptimized code, evict it and re-enter the
+ // losure's code.
+ __ bind(&found_deoptimized_code);
+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+ }
+
+ // Fall-through if the optimized code cell is clear and there is no
+ // optimization marker.
+ __ bind(&fallthrough);
+}
+
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
@@ -1022,35 +1097,31 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
+ Register closure = a1;
+ Register feedback_vector = a2;
+
+ // Load the feedback vector from the closure.
+ __ Ld(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ // Read off the optimized code slot in the feedback vector, and if there
+ // is optimized code or an optimization marker, call that instead.
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, a4, t3, a5);
+
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ PushStandardFrame(a1);
-
- // First check if there is optimized code in the feedback vector which we
- // could call instead.
- Label switch_to_optimized_code;
- Register optimized_code_entry = a4;
- __ Ld(a0, FieldMemOperand(a1, JSFunction::kFeedbackVectorOffset));
- __ Ld(a0, FieldMemOperand(a0, Cell::kValueOffset));
- __ Ld(optimized_code_entry,
- FieldMemOperand(a0, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
- __ Ld(optimized_code_entry,
- FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
- __ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
+ __ PushStandardFrame(closure);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
- __ Ld(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- Label load_debug_bytecode_array, bytecode_array_loaded;
- Register debug_info = kInterpreterBytecodeArrayRegister;
- DCHECK(!debug_info.is(a0));
- __ Ld(debug_info, FieldMemOperand(a0, SharedFunctionInfo::kDebugInfoOffset));
- __ JumpIfNotSmi(debug_info, &load_debug_bytecode_array);
+ Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
+ __ Ld(a0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ld(kInterpreterBytecodeArrayRegister,
FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
+ __ Ld(a4, FieldMemOperand(a0, SharedFunctionInfo::kDebugInfoOffset));
+ __ JumpIfNotSmi(a4, &maybe_load_debug_bytecode_array);
__ bind(&bytecode_array_loaded);
// Check whether we should continue to use the interpreter.
@@ -1062,15 +1133,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(masm->CodeObject())); // Self-reference to this code.
// Increment invocation count for the function.
- __ Ld(a0, FieldMemOperand(a1, JSFunction::kFeedbackVectorOffset));
- __ Ld(a0, FieldMemOperand(a0, Cell::kValueOffset));
- __ Ld(a4, FieldMemOperand(
- a0, FeedbackVector::kInvocationCountIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
+ __ Ld(a4,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
__ Daddu(a4, a4, Operand(Smi::FromInt(1)));
- __ Sd(a4, FieldMemOperand(
- a0, FeedbackVector::kInvocationCountIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
+ __ Sd(a4,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
@@ -1142,10 +1213,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, t0);
__ Jump(ra);
- // Load debug copy of the bytecode array.
- __ bind(&load_debug_bytecode_array);
+ // Load debug copy of the bytecode array if it exists.
+ // kInterpreterBytecodeArrayRegister is already loaded with
+ // SharedFunctionInfo::kFunctionDataOffset.
+ __ bind(&maybe_load_debug_bytecode_array);
+ __ Ld(a5, FieldMemOperand(a4, DebugInfo::kFlagsOffset));
+ __ SmiUntag(a5);
+ __ And(a5, a5, Operand(DebugInfo::kHasBreakInfo));
+ __ Branch(&bytecode_array_loaded, eq, a5, Operand(zero_reg));
__ Ld(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
+ FieldMemOperand(a4, DebugInfo::kDebugBytecodeArrayOffset));
__ Branch(&bytecode_array_loaded);
// If the shared code is no longer this entry trampoline, then the underlying
@@ -1153,35 +1230,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// closure by switching the code entry field over to the new code as well.
__ bind(&switch_to_different_code_kind);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
- __ Ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(a4, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kCodeOffset));
__ Daddu(a4, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Sd(a4, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
- __ RecordWriteCodeEntryField(a1, a4, a5);
+ __ Sd(a4, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+ __ RecordWriteCodeEntryField(closure, a4, a5);
__ Jump(a4);
-
- // If there is optimized code on the type feedback vector, check if it is good
- // to run, and if so, self heal the closure and call the optimized code.
- __ bind(&switch_to_optimized_code);
- __ LeaveFrame(StackFrame::JAVA_SCRIPT);
- Label gotta_call_runtime;
-
- // Check if the optimized code is marked for deopt.
- __ Lw(a5,
- FieldMemOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset));
- __ And(a5, a5, Operand(1 << Code::kMarkedForDeoptimizationBit));
- __ Branch(&gotta_call_runtime, ne, a5, Operand(zero_reg));
-
- // Optimized code is good, get it into the closure and link the closure into
- // the optimized functions list, then tail call the optimized code.
- ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, a1, t3, a5,
- t0);
- __ Jump(optimized_code_entry);
-
- // Optimized code is marked for deopt, bailout to the CompileLazy runtime
- // function which will clear the feedback vector's optimized code slot.
- __ bind(&gotta_call_runtime);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@@ -1222,7 +1276,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// static
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
- TailCallMode tail_call_mode, InterpreterPushArgsMode mode) {
+ InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a2 : the address of the first argument to be pushed. Subsequent
@@ -1245,17 +1299,21 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// This function modifies a2, t0 and a4.
Generate_InterpreterPushArgs(masm, a3, a2, a4, t0);
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Pop(a2); // Pass the spread in a register
+ __ Dsubu(a0, a0, Operand(1)); // Subtract one for spread
+ }
+
// Call the target.
if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
- tail_call_mode),
- RelocInfo::CODE_TARGET);
+ __ Jump(
+ masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny),
+ RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(masm->isolate()->builtins()->CallWithSpread(),
RelocInfo::CODE_TARGET);
} else {
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
RelocInfo::CODE_TARGET);
}
@@ -1287,7 +1345,13 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// This function modifies t0, a4 and a5.
Generate_InterpreterPushArgs(masm, a0, a4, a5, t0);
- __ AssertUndefinedOrAllocationSite(a2, t0);
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Pop(a2); // Pass the spread in a register
+ __ Dsubu(a0, a0, Operand(1)); // Subtract one for spread
+ } else {
+ __ AssertUndefinedOrAllocationSite(a2, t0);
+ }
+
if (mode == InterpreterPushArgsMode::kJSFunction) {
__ AssertFunction(a1);
@@ -1416,6 +1480,34 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
+void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argument count (preserved for callee)
+ // -- a3 : new target (preserved for callee)
+ // -- a1 : target function (preserved for callee)
+ // -----------------------------------
+ Register closure = a1;
+
+ // Get the feedback vector.
+ Register feedback_vector = a2;
+ __ Ld(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ // The feedback vector must be defined.
+ if (FLAG_debug_code) {
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Assert(ne, BailoutReason::kExpectedFeedbackVector, feedback_vector,
+ Operand(at));
+ }
+
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, a4, t3, a5);
+
+ // Otherwise, tail call the SFI code.
+ GenerateTailCallToSharedCode(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)
@@ -1424,43 +1516,23 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
- Label try_shared;
Register closure = a1;
- Register index = a2;
+ Register feedback_vector = a2;
// Do we have a valid feedback vector?
- __ Ld(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
- __ Ld(index, FieldMemOperand(index, Cell::kValueOffset));
- __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
+ __ Ld(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
+ &gotta_call_runtime);
- // Is optimized code available in the feedback vector?
- Register entry = a4;
- __ Ld(entry, FieldMemOperand(
- index, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
- __ Ld(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &try_shared);
-
- // Found code, check if it is marked for deopt, if so call into runtime to
- // clear the optimized code slot.
- __ Lw(a5, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset));
- __ And(a5, a5, Operand(1 << Code::kMarkedForDeoptimizationBit));
- __ Branch(&gotta_call_runtime, ne, a5, Operand(zero_reg));
-
- // Code is good, get it into the closure and tail call.
- ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, t3, a5, t0);
- __ Jump(entry);
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, a4, t3, a5);
// We found no optimized code.
- __ bind(&try_shared);
+ Register entry = a4;
__ Ld(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- // Is the shared function marked for tier up?
- __ Lbu(a5, FieldMemOperand(entry,
- SharedFunctionInfo::kMarkedForTierUpByteOffset));
- __ And(a5, a5,
- Operand(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
- __ Branch(&gotta_call_runtime, ne, a5, Operand(zero_reg));
// If SFI points to anything other than CompileLazy, install that.
__ Ld(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
@@ -1477,15 +1549,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
-void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm,
- Runtime::kCompileOptimized_NotConcurrent);
-}
-
-void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
-}
-
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)
@@ -1621,30 +1684,68 @@ void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
-static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
- SaveFPRegsMode save_doubles) {
+void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve registers across notification, this is important for compiled
- // stubs that tail call the runtime on deopts passing their parameters in
- // registers.
- __ MultiPush(kJSCallerSaved | kCalleeSaved);
+ // Preserve possible return result from lazy deopt.
+ __ push(v0);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
- __ MultiPop(kJSCallerSaved | kCalleeSaved);
+ __ CallRuntime(Runtime::kNotifyStubFailure, false);
+ __ pop(v0);
}
__ Daddu(sp, sp, Operand(kPointerSize)); // Ignore state
- __ Jump(ra); // Jump to miss handler
+ __ Jump(ra); // Jump to the ContinueToBuiltin stub
}
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+namespace {
+void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
+ bool java_script_builtin,
+ bool with_result) {
+ const RegisterConfiguration* config(RegisterConfiguration::Turbofan());
+ int allocatable_register_count = config->num_allocatable_general_registers();
+ if (with_result) {
+ // Overwrite the hole inserted by the deoptimizer with the return value from
+ // the LAZY deopt point.
+ __ Sd(v0,
+ MemOperand(
+ sp, config->num_allocatable_general_registers() * kPointerSize +
+ BuiltinContinuationFrameConstants::kFixedFrameSize));
+ }
+ for (int i = allocatable_register_count - 1; i >= 0; --i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ __ Pop(Register::from_code(code));
+ if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
+ __ SmiUntag(Register::from_code(code));
+ }
+ }
+ __ Ld(fp, MemOperand(
+ sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ __ Pop(t0);
+ __ Daddu(sp, sp,
+ Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ __ Pop(ra);
+ __ Daddu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(t0);
}
+} // namespace
-void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, false);
+}
+
+void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, true);
+}
+
+void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, false);
+}
+
+void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, true);
}
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
@@ -1759,14 +1860,14 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// -----------------------------------
Register argc = a0;
- Register arg_array = a0;
+ Register arg_array = a2;
Register receiver = a1;
- Register this_arg = a2;
+ Register this_arg = a5;
Register undefined_value = a3;
Register scratch = a4;
__ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
- // 1. Load receiver into a1, argArray into a0 (if present), remove all
+ // 1. Load receiver into a1, argArray into a2 (if present), remove all
// arguments from the stack (including the receiver), and push thisArg (if
// present) instead.
{
@@ -1786,29 +1887,24 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
}
// ----------- S t a t e -------------
- // -- a0 : argArray
+ // -- a2 : argArray
// -- a1 : receiver
// -- a3 : undefined root value
// -- sp[0] : thisArg
// -----------------------------------
- // 2. Make sure the receiver is actually callable.
- Label receiver_not_callable;
- __ JumpIfSmi(receiver, &receiver_not_callable);
- __ Ld(a4, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
- __ And(a4, a4, Operand(1 << Map::kIsCallable));
- __ Branch(&receiver_not_callable, eq, a4, Operand(zero_reg));
+ // 2. We don't need to check explicitly for callable receiver here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
// 3. Tail call with no arguments if argArray is null or undefined.
Label no_arguments;
__ JumpIfRoot(arg_array, Heap::kNullValueRootIndex, &no_arguments);
__ Branch(&no_arguments, eq, arg_array, Operand(undefined_value));
- // 4a. Apply the receiver to the given argArray (passing undefined for
- // new.target).
- DCHECK(undefined_value.is(a3));
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 4a. Apply the receiver to the given argArray.
+ __ Jump(masm->isolate()->builtins()->CallWithArrayLike(),
+ RelocInfo::CODE_TARGET);
// 4b. The argArray is either null or undefined, so we tail call without any
// arguments to the receiver.
@@ -1818,13 +1914,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
DCHECK(receiver.is(a1));
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
-
- // 4c. The receiver is not callable, throw an appropriate TypeError.
- __ bind(&receiver_not_callable);
- {
- __ Sd(receiver, MemOperand(sp));
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
}
// static
@@ -1879,14 +1968,14 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// -----------------------------------
Register argc = a0;
- Register arguments_list = a0;
+ Register arguments_list = a2;
Register target = a1;
- Register this_argument = a2;
+ Register this_argument = a5;
Register undefined_value = a3;
Register scratch = a4;
__ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
- // 1. Load target into a1 (if present), argumentsList into a0 (if present),
+ // 1. Load target into a1 (if present), argumentsList into a2 (if present),
// remove all arguments from the stack (including the receiver), and push
// thisArgument (if present) instead.
{
@@ -1910,31 +1999,19 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
}
// ----------- S t a t e -------------
- // -- a0 : argumentsList
+ // -- a2 : argumentsList
// -- a1 : target
// -- a3 : undefined root value
// -- sp[0] : thisArgument
// -----------------------------------
- // 2. Make sure the target is actually callable.
- Label target_not_callable;
- __ JumpIfSmi(target, &target_not_callable);
- __ Ld(a4, FieldMemOperand(target, HeapObject::kMapOffset));
- __ Lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
- __ And(a4, a4, Operand(1 << Map::kIsCallable));
- __ Branch(&target_not_callable, eq, a4, Operand(zero_reg));
-
- // 3a. Apply the target to the given argumentsList (passing undefined for
- // new.target).
- DCHECK(undefined_value.is(a3));
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
-
- // 3b. The target is not callable, throw an appropriate TypeError.
- __ bind(&target_not_callable);
- {
- __ Sd(target, MemOperand(sp));
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
+ // 2. We don't need to check explicitly for callable target here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
+
+ // 3. Apply the target to the given argumentsList.
+ __ Jump(masm->isolate()->builtins()->CallWithArrayLike(),
+ RelocInfo::CODE_TARGET);
}
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
@@ -1946,13 +2023,13 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// -- sp[12] : receiver
// -----------------------------------
Register argc = a0;
- Register arguments_list = a0;
+ Register arguments_list = a2;
Register target = a1;
Register new_target = a3;
Register undefined_value = a4;
Register scratch = a5;
- // 1. Load target into a1 (if present), argumentsList into a0 (if present),
+ // 1. Load target into a1 (if present), argumentsList into a2 (if present),
// new.target into a3 (if present, otherwise use target), remove all
// arguments from the stack (including the receiver), and push thisArgument
// (if present) instead.
@@ -1977,44 +2054,23 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
// ----------- S t a t e -------------
- // -- a0 : argumentsList
+ // -- a2 : argumentsList
// -- a1 : target
// -- a3 : new.target
// -- sp[0] : receiver (undefined)
// -----------------------------------
- // 2. Make sure the target is actually a constructor.
- Label target_not_constructor;
- __ JumpIfSmi(target, &target_not_constructor);
- __ Ld(a4, FieldMemOperand(target, HeapObject::kMapOffset));
- __ Lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
- __ And(a4, a4, Operand(1 << Map::kIsConstructor));
- __ Branch(&target_not_constructor, eq, a4, Operand(zero_reg));
-
- // 3. Make sure the target is actually a constructor.
- Label new_target_not_constructor;
- __ JumpIfSmi(new_target, &new_target_not_constructor);
- __ Ld(a4, FieldMemOperand(new_target, HeapObject::kMapOffset));
- __ Lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
- __ And(a4, a4, Operand(1 << Map::kIsConstructor));
- __ Branch(&new_target_not_constructor, eq, a4, Operand(zero_reg));
-
- // 4a. Construct the target with the given new.target and argumentsList.
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
-
- // 4b. The target is not a constructor, throw an appropriate TypeError.
- __ bind(&target_not_constructor);
- {
- __ Sd(target, MemOperand(sp));
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
+ // 2. We don't need to check explicitly for constructor target here,
+ // since that's the first thing the Construct/ConstructWithArrayLike
+ // builtins will do.
- // 4c. The new.target is not a constructor, throw an appropriate TypeError.
- __ bind(&new_target_not_constructor);
- {
- __ Sd(new_target, MemOperand(sp));
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
+ // 3. We don't need to check explicitly for constructor new.target here,
+ // since that's the second thing the Construct/ConstructWithArrayLike
+ // builtins will do.
+
+ // 4. Construct the target with the given new.target and argumentsList.
+ __ Jump(masm->isolate()->builtins()->ConstructWithArrayLike(),
+ RelocInfo::CODE_TARGET);
}
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
@@ -2043,135 +2099,45 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_Apply(MacroAssembler* masm) {
+void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
- // -- a0 : argumentsList
- // -- a1 : target
- // -- a3 : new.target (checked to be constructor or undefined)
- // -- sp[0] : thisArgument
+ // -- a1 : target
+ // -- a0 : number of parameters on the stack (not including the receiver)
+ // -- a2 : arguments list (a FixedArray)
+ // -- a4 : len (number of elements to push from args)
+ // -- a3 : new.target (for [[Construct]])
// -----------------------------------
+ __ AssertFixedArray(a2);
- Register arguments_list = a0;
- Register target = a1;
- Register new_target = a3;
-
- Register args = a0;
- Register len = a2;
-
- // Create the list of arguments from the array-like argumentsList.
- {
- Label create_arguments, create_array, create_holey_array, create_runtime,
- done_create;
- __ JumpIfSmi(arguments_list, &create_runtime);
-
- // Load the map of argumentsList into a2.
- Register arguments_list_map = a2;
- __ Ld(arguments_list_map,
- FieldMemOperand(arguments_list, HeapObject::kMapOffset));
-
- // Load native context into a4.
- Register native_context = a4;
- __ Ld(native_context, NativeContextMemOperand());
-
- // Check if argumentsList is an (unmodified) arguments object.
- __ Ld(at, ContextMemOperand(native_context,
- Context::SLOPPY_ARGUMENTS_MAP_INDEX));
- __ Branch(&create_arguments, eq, arguments_list_map, Operand(at));
- __ Ld(at, ContextMemOperand(native_context,
- Context::STRICT_ARGUMENTS_MAP_INDEX));
- __ Branch(&create_arguments, eq, arguments_list_map, Operand(at));
-
- // Check if argumentsList is a fast JSArray.
- __ Lbu(v0, FieldMemOperand(a2, Map::kInstanceTypeOffset));
- __ Branch(&create_array, eq, v0, Operand(JS_ARRAY_TYPE));
-
- // Ask the runtime to create the list (actually a FixedArray).
- __ bind(&create_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(target, new_target, arguments_list);
- __ CallRuntime(Runtime::kCreateListFromArrayLike);
- __ mov(arguments_list, v0);
- __ Pop(target, new_target);
- __ Lw(len, UntagSmiFieldMemOperand(v0, FixedArray::kLengthOffset));
- }
- __ Branch(&done_create);
-
- // Try to create the list from an arguments object.
- __ bind(&create_arguments);
- __ Lw(len, UntagSmiFieldMemOperand(arguments_list,
- JSArgumentsObject::kLengthOffset));
- __ Ld(a4, FieldMemOperand(arguments_list, JSObject::kElementsOffset));
- __ Lw(at, UntagSmiFieldMemOperand(a4, FixedArray::kLengthOffset));
- __ Branch(&create_runtime, ne, len, Operand(at));
- __ mov(args, a4);
-
- __ Branch(&done_create);
-
- // For holey JSArrays we need to check that the array prototype chain
- // protector is intact and our prototype is the Array.prototype actually.
- __ bind(&create_holey_array);
- __ Ld(a2, FieldMemOperand(a2, Map::kPrototypeOffset));
- __ Ld(at, ContextMemOperand(native_context,
- Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ Branch(&create_runtime, ne, a2, Operand(at));
- __ LoadRoot(at, Heap::kArrayProtectorRootIndex);
- __ Lw(a2, FieldMemOperand(at, PropertyCell::kValueOffset));
- __ Branch(&create_runtime, ne, a2,
- Operand(Smi::FromInt(Isolate::kProtectorValid)));
- __ Lw(a2, UntagSmiFieldMemOperand(a0, JSArray::kLengthOffset));
- __ Ld(a0, FieldMemOperand(a0, JSArray::kElementsOffset));
- __ Branch(&done_create);
-
- // Try to create the list from a JSArray object.
- __ bind(&create_array);
- __ Lbu(t1, FieldMemOperand(a2, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(t1);
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- __ Branch(&create_holey_array, eq, t1, Operand(FAST_HOLEY_SMI_ELEMENTS));
- __ Branch(&create_holey_array, eq, t1, Operand(FAST_HOLEY_ELEMENTS));
- __ Branch(&create_runtime, hi, t1, Operand(FAST_ELEMENTS));
- __ Lw(a2, UntagSmiFieldMemOperand(arguments_list, JSArray::kLengthOffset));
- __ Ld(a0, FieldMemOperand(arguments_list, JSArray::kElementsOffset));
-
- __ bind(&done_create);
- }
+ Register args = a2;
+ Register len = a4;
// Check for stack overflow.
{
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label done;
- __ LoadRoot(a4, Heap::kRealStackLimitRootIndex);
+ __ LoadRoot(a5, Heap::kRealStackLimitRootIndex);
// Make ip the space we have left. The stack might already be overflowed
// here which will cause ip to become negative.
- __ Dsubu(a4, sp, a4);
+ __ Dsubu(a5, sp, a5);
// Check if the arguments will overflow the stack.
__ dsll(at, len, kPointerSizeLog2);
- __ Branch(&done, gt, a4, Operand(at)); // Signed comparison.
+ __ Branch(&done, gt, a5, Operand(at)); // Signed comparison.
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ bind(&done);
}
- // ----------- S t a t e -------------
- // -- a1 : target
- // -- a0 : args (a FixedArray built from argumentsList)
- // -- a2 : len (number of elements to push from args)
- // -- a3 : new.target (checked to be constructor or undefined)
- // -- sp[0] : thisArgument
- // -----------------------------------
-
// Push arguments onto the stack (thisArgument is already on the stack).
{
Label done, push, loop;
- Register src = a4;
+ Register src = a6;
Register scratch = len;
__ daddiu(src, args, FixedArray::kHeaderSize - kHeapObjectTag);
__ Branch(&done, eq, len, Operand(zero_reg), i::USE_DELAY_SLOT);
- __ mov(a0, len); // The 'len' argument for Call() or Construct().
+ __ Daddu(a0, a0, len); // The 'len' argument for Call() or Construct().
__ dsll(scratch, len, kPointerSizeLog2);
__ Dsubu(scratch, sp, Operand(scratch));
__ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
@@ -2186,31 +2152,13 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
__ bind(&done);
}
- // ----------- S t a t e -------------
- // -- a0 : argument count (len)
- // -- a1 : target
- // -- a3 : new.target (checked to be constructor or undefinded)
- // -- sp[0] : args[len-1]
- // -- sp[8] : args[len-2]
- // ... : ...
- // -- sp[8*(len-2)] : args[1]
- // -- sp[8*(len-1)] : args[0]
- // ----------------------------------
-
- // Dispatch to Call or Construct depending on whether new.target is undefined.
- {
- Label construct;
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&construct, ne, a3, Operand(at));
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
- __ bind(&construct);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
- }
+ // Tail-call to the actual Call or Construct builtin.
+ __ Jump(code, RelocInfo::CODE_TARGET);
}
// static
-void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
- Handle<Code> code) {
+void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a3 : the new.target (for [[Construct]] calls)
@@ -2270,99 +2218,9 @@ void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
__ Jump(code, RelocInfo::CODE_TARGET);
}
-namespace {
-
-// Drops top JavaScript frame and an arguments adaptor frame below it (if
-// present) preserving all the arguments prepared for current call.
-// Does nothing if debugger is currently active.
-// ES6 14.6.3. PrepareForTailCall
-//
-// Stack structure for the function g() tail calling f():
-//
-// ------- Caller frame: -------
-// | ...
-// | g()'s arg M
-// | ...
-// | g()'s arg 1
-// | g()'s receiver arg
-// | g()'s caller pc
-// ------- g()'s frame: -------
-// | g()'s caller fp <- fp
-// | g()'s context
-// | function pointer: g
-// | -------------------------
-// | ...
-// | ...
-// | f()'s arg N
-// | ...
-// | f()'s arg 1
-// | f()'s receiver arg <- sp (f()'s caller pc is not on the stack yet!)
-// ----------------------
-//
-void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
- Register scratch1, Register scratch2,
- Register scratch3) {
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Comment cmnt(masm, "[ PrepareForTailCall");
-
- // Prepare for tail call only if ES2015 tail call elimination is enabled.
- Label done;
- ExternalReference is_tail_call_elimination_enabled =
- ExternalReference::is_tail_call_elimination_enabled_address(
- masm->isolate());
- __ li(at, Operand(is_tail_call_elimination_enabled));
- __ Lb(scratch1, MemOperand(at));
- __ Branch(&done, eq, scratch1, Operand(zero_reg));
-
- // Drop possible interpreter handler/stub frame.
- {
- Label no_interpreter_frame;
- __ Ld(scratch3,
- MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Branch(&no_interpreter_frame, ne, scratch3,
- Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
- __ Ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ bind(&no_interpreter_frame);
- }
-
- // Check if next frame is an arguments adaptor frame.
- Register caller_args_count_reg = scratch1;
- Label no_arguments_adaptor, formal_parameter_count_loaded;
- __ Ld(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ld(scratch3,
- MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Branch(&no_arguments_adaptor, ne, scratch3,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Drop current frame and load arguments count from arguments adaptor frame.
- __ mov(fp, scratch2);
- __ Lw(caller_args_count_reg,
- UntagSmiMemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ Branch(&formal_parameter_count_loaded);
-
- __ bind(&no_arguments_adaptor);
- // Load caller's formal parameter count
- __ Ld(scratch1,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
- __ Ld(scratch1,
- FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
- __ Lw(caller_args_count_reg,
- FieldMemOperand(scratch1,
- SharedFunctionInfo::kFormalParameterCountOffset));
-
- __ bind(&formal_parameter_count_loaded);
-
- ParameterCount callee_args_count(args_reg);
- __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
- scratch3);
- __ bind(&done);
-}
-} // namespace
-
// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
- ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
+ ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the function to call (checked to be a JSFunction)
@@ -2373,21 +2231,20 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Check that function is not a "classConstructor".
Label class_constructor;
__ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ Lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFunctionKindByteOffset));
- __ And(at, a3, Operand(SharedFunctionInfo::kClassConstructorBitsWithinByte));
+ __ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ And(at, a3, Operand(SharedFunctionInfo::kClassConstructorMask));
__ Branch(&class_constructor, ne, at, Operand(zero_reg));
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
- SharedFunctionInfo::kStrictModeByteOffset);
__ Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
- __ Lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kNativeByteOffset));
- __ And(at, a3, Operand((1 << SharedFunctionInfo::kNativeBitWithinByte) |
- (1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
+ __ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ And(at, a3,
+ Operand(SharedFunctionInfo::IsNativeBit::kMask |
+ SharedFunctionInfo::IsStrictBit::kMask));
__ Branch(&done_convert, ne, at, Operand(zero_reg));
{
// ----------- S t a t e -------------
@@ -2452,10 +2309,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- cp : the function context.
// -----------------------------------
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, a0, t0, t1, t2);
- }
-
__ Lw(a2,
FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount actual(a0);
@@ -2473,18 +2326,13 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
}
// static
-void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
- TailCallMode tail_call_mode) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(a1);
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, a0, t0, t1, t2);
- }
-
// Patch the receiver to [[BoundThis]].
{
__ Ld(at, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
@@ -2565,8 +2413,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
}
// static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the target to call (can be any Object).
@@ -2576,9 +2423,9 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
__ JumpIfSmi(a1, &non_callable);
__ bind(&non_smi);
__ GetObjectType(a1, t1, t2);
- __ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
- __ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
// Check if target has a [[Call]] internal method.
@@ -2587,20 +2434,10 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
__ Branch(&non_callable, eq, t1, Operand(zero_reg));
__ Branch(&non_function, ne, t2, Operand(JS_PROXY_TYPE));
-
- // 0. Prepare for tail call if necessary.
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, a0, t0, t1, t2);
- }
-
- // 1. Runtime fallback for Proxy [[Call]].
- __ Push(a1);
- // Increase the arguments size to include the pushed function and the
- // existing receiver on the stack.
- __ Daddu(a0, a0, 2);
- // Tail-call to the runtime.
- __ JumpToExternalReference(
- ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
+ __ li(t2, Operand(ExternalReference(Builtins::kCallProxy, masm->isolate())));
+ __ Ld(t2, MemOperand(t2));
+ __ Daddu(t2, t2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(t2);
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
@@ -2611,7 +2448,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
__ Jump(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
+ ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
@@ -2623,150 +2460,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
-static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
- Register argc = a0;
- Register constructor = a1;
- Register new_target = a3;
-
- Register scratch = t0;
- Register scratch2 = t1;
-
- Register spread = a2;
- Register spread_map = a4;
-
- Register spread_len = a4;
-
- Register native_context = a5;
-
- Label runtime_call, push_args;
- __ Ld(spread, MemOperand(sp, 0));
- __ JumpIfSmi(spread, &runtime_call);
- __ Ld(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
- __ Ld(native_context, NativeContextMemOperand());
-
- // Check that the spread is an array.
- __ Lbu(scratch, FieldMemOperand(spread_map, Map::kInstanceTypeOffset));
- __ Branch(&runtime_call, ne, scratch, Operand(JS_ARRAY_TYPE));
-
- // Check that we have the original ArrayPrototype.
- __ Ld(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
- __ Ld(scratch2, ContextMemOperand(native_context,
- Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ Branch(&runtime_call, ne, scratch, Operand(scratch2));
-
- // Check that the ArrayPrototype hasn't been modified in a way that would
- // affect iteration.
- __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
- __ Ld(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
- __ Branch(&runtime_call, ne, scratch,
- Operand(Smi::FromInt(Isolate::kProtectorValid)));
-
- // Check that the map of the initial array iterator hasn't changed.
- __ Ld(scratch,
- ContextMemOperand(native_context,
- Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
- __ Ld(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
- __ Ld(scratch2,
- ContextMemOperand(native_context,
- Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
- __ Branch(&runtime_call, ne, scratch, Operand(scratch2));
-
- // For FastPacked kinds, iteration will have the same effect as simply
- // accessing each property in order.
- Label no_protector_check;
- __ Lbu(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(scratch);
- __ Branch(&runtime_call, hi, scratch, Operand(FAST_HOLEY_ELEMENTS));
- // For non-FastHoley kinds, we can skip the protector check.
- __ Branch(&no_protector_check, eq, scratch, Operand(FAST_SMI_ELEMENTS));
- __ Branch(&no_protector_check, eq, scratch, Operand(FAST_ELEMENTS));
- // Check the ArrayProtector cell.
- __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
- __ Ld(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
- __ Branch(&runtime_call, ne, scratch,
- Operand(Smi::FromInt(Isolate::kProtectorValid)));
-
- __ bind(&no_protector_check);
- // Load the FixedArray backing store, but use the length from the array.
- __ Lw(spread_len, UntagSmiFieldMemOperand(spread, JSArray::kLengthOffset));
- __ Ld(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
- __ Branch(&push_args);
-
- __ bind(&runtime_call);
- {
- // Call the builtin for the result of the spread.
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(argc);
- __ Push(constructor, new_target, argc, spread);
- __ CallRuntime(Runtime::kSpreadIterableFixed);
- __ mov(spread, v0);
- __ Pop(constructor, new_target, argc);
- __ SmiUntag(argc);
- }
-
- {
- // Calculate the new nargs including the result of the spread.
- __ Lw(spread_len,
- UntagSmiFieldMemOperand(spread, FixedArray::kLengthOffset));
-
- __ bind(&push_args);
- // argc += spread_len - 1. Subtract 1 for the spread itself.
- __ Daddu(argc, argc, spread_len);
- __ Dsubu(argc, argc, Operand(1));
-
- // Pop the spread argument off the stack.
- __ Pop(scratch);
- }
-
- // Check for stack overflow.
- {
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack limit".
- Label done;
- __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
- // Make scratch the space we have left. The stack might already be
- // overflowed here which will cause ip to become negative.
- __ Dsubu(scratch, sp, scratch);
- // Check if the arguments will overflow the stack.
- __ dsll(at, spread_len, kPointerSizeLog2);
- __ Branch(&done, gt, scratch, Operand(at)); // Signed comparison.
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&done);
- }
-
- // Put the evaluated spread onto the stack as additional arguments.
- {
- __ mov(scratch, zero_reg);
- Label done, push, loop;
- __ bind(&loop);
- __ Branch(&done, eq, scratch, Operand(spread_len));
- __ Dlsa(scratch2, spread, scratch, kPointerSizeLog2);
- __ Ld(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
- __ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
- __ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
- __ bind(&push);
- __ Push(scratch2);
- __ Daddu(scratch, scratch, Operand(1));
- __ Branch(&loop);
- __ bind(&done);
- }
-}
-
-// static
-void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : the number of arguments (not including the receiver)
- // -- a1 : the target to call (can be any Object).
- // -----------------------------------
-
- // CheckSpreadAndPushToStack will push a3 to save it.
- __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- TailCallMode::kDisallow),
- RelocInfo::CODE_TARGET);
-}
-
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
@@ -2946,19 +2639,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : the number of arguments (not including the receiver)
- // -- a1 : the constructor to call (can be any Object)
- // -- a3 : the new target (either the same as the constructor or
- // the JSFunction on which new was invoked initially)
- // -----------------------------------
-
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
-}
-
-// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : requested object size (untagged)
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index dc2221e10b..33d734f3bb 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -227,7 +227,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(r9);
__ EnterBuiltinFrame(cp, r4, r9);
__ Push(r5); // first argument
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Pop(r5);
__ LeaveBuiltinFrame(cp, r4, r9);
@@ -379,7 +379,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(r9);
__ EnterBuiltinFrame(cp, r4, r9);
__ Push(r5); // first argument
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Pop(r5);
__ LeaveBuiltinFrame(cp, r4, r9);
@@ -427,23 +427,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ JumpToJSEntry(ip);
}
-void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
- // Checking whether the queued function is ready for install is optional,
- // since we come across interrupts and stack checks elsewhere. However,
- // not checking may delay installing ready functions, and always checking
- // would be quite expensive. A good compromise is to first check against
- // stack limit as a cue for an interrupt signal.
- Label ok;
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmpl(sp, ip);
- __ bge(&ok);
-
- GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
-
- __ bind(&ok);
- GenerateTailCallToSharedCode(masm);
-}
-
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
@@ -552,16 +535,13 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBitMask(r7,
- FunctionKind::kDerivedConstructor
- << SharedFunctionInfo::kFunctionKindShift,
- r0);
+ __ TestBitMask(r7, SharedFunctionInfo::kDerivedConstructorMask, r0);
__ bne(&not_create_implicit_receiver, cr0);
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
r7, r8);
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ b(&post_instantiation_deopt_entry);
@@ -679,10 +659,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ LoadP(r7, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ LoadP(r7, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBitMask(r7,
- FunctionKind::kClassConstructor
- << SharedFunctionInfo::kFunctionKindShift,
- r0);
+ __ TestBitMask(r7, SharedFunctionInfo::kClassConstructorMask, r0);
__ beq(&use_receiver, cr0);
} else {
@@ -739,37 +716,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- r3 : the value to pass to the generator
// -- r4 : the JSGeneratorObject to resume
// -- r5 : the resume mode (tagged)
- // -- r6 : the SuspendFlags of the earlier suspend call (tagged)
// -- lr : return address
// -----------------------------------
- __ SmiUntag(r6);
- __ AssertGeneratorObject(r4, r6);
+ __ AssertGeneratorObject(r4);
// Store input value into generator object.
- Label async_await, done_store_input;
-
- __ andi(r6, r6,
- Operand(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
- __ cmpi(r6, Operand(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
- __ beq(&async_await);
-
__ StoreP(r3, FieldMemOperand(r4, JSGeneratorObject::kInputOrDebugPosOffset),
r0);
__ RecordWriteField(r4, JSGeneratorObject::kInputOrDebugPosOffset, r3, r6,
kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ b(&done_store_input);
-
- __ bind(&async_await);
- __ StoreP(
- r3,
- FieldMemOperand(r4, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset),
- r0);
- __ RecordWriteField(r4, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset,
- r3, r6, kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ b(&done_store_input);
-
- __ bind(&done_store_input);
- // `r6` no longer holds SuspendFlags
// Store resume mode into generator object.
__ StoreP(r5, FieldMemOperand(r4, JSGeneratorObject::kResumeModeOffset), r0);
@@ -823,13 +778,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
{
Label loop, done_loop;
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-#if V8_TARGET_ARCH_PPC64
__ cmpi(r3, Operand::Zero());
__ beq(&done_loop);
-#else
- __ SmiUntag(r3, SetRC);
- __ beq(&done_loop, cr0);
-#endif
__ mtctr(r3);
__ bind(&loop);
__ push(ip);
@@ -927,7 +877,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
// Setup the context (we need to use the caller context from the isolate).
- ExternalReference context_address(Isolate::kContextAddress,
+ ExternalReference context_address(IsolateAddressId::kContextAddress,
masm->isolate());
__ mov(cp, Operand(context_address));
__ LoadP(cp, MemOperand(cp));
@@ -1044,6 +994,121 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
__ add(sp, sp, args_count);
}
+// Tail-call |function_id| if |smi_entry| == |marker|
+static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
+ Register smi_entry,
+ OptimizationMarker marker,
+ Runtime::FunctionId function_id) {
+ Label no_match;
+ __ CmpSmiLiteral(smi_entry, Smi::FromEnum(marker), r0);
+ __ bne(&no_match);
+ GenerateTailCallToReturnedCode(masm, function_id);
+ __ bind(&no_match);
+}
+
+static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
+ Register feedback_vector,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+ // ----------- S t a t e -------------
+ // -- r0 : argument count (preserved for callee if needed, and caller)
+ // -- r3 : new target (preserved for callee if needed, and caller)
+ // -- r1 : target function (preserved for callee if needed, and caller)
+ // -- feedback vector (preserved for caller if needed)
+ // -----------------------------------
+ DCHECK(
+ !AreAliased(feedback_vector, r3, r4, r6, scratch1, scratch2, scratch3));
+
+ Label optimized_code_slot_is_cell, fallthrough;
+
+ Register closure = r4;
+ Register optimized_code_entry = scratch1;
+
+ const int kOptimizedCodeCellOffset =
+ FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize;
+ __ LoadP(optimized_code_entry,
+ FieldMemOperand(feedback_vector, kOptimizedCodeCellOffset));
+
+ // Check if the code entry is a Smi. If yes, we interpret it as an
+ // optimisation marker. Otherwise, interpret is as a weak cell to a code
+ // object.
+ __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
+
+ {
+ // Optimized code slot is a Smi optimization marker.
+
+ // Fall through if no optimization trigger.
+ __ CmpSmiLiteral(optimized_code_entry,
+ Smi::FromEnum(OptimizationMarker::kNone), r0);
+ __ beq(&fallthrough);
+
+ TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimized,
+ Runtime::kCompileOptimized_NotConcurrent);
+ TailCallRuntimeIfMarkerEquals(
+ masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimizedConcurrent,
+ Runtime::kCompileOptimized_Concurrent);
+
+ {
+ // Otherwise, the marker is InOptimizationQueue.
+ if (FLAG_debug_code) {
+ __ CmpSmiLiteral(
+ optimized_code_entry,
+ Smi::FromEnum(OptimizationMarker::kInOptimizationQueue), r0);
+ __ Assert(eq, kExpectedOptimizationSentinel);
+ }
+
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmpl(sp, ip);
+ __ bge(&fallthrough);
+ GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+ }
+ }
+
+ {
+ // Optimized code slot is a WeakCell.
+ __ bind(&optimized_code_slot_is_cell);
+
+ __ LoadP(optimized_code_entry,
+ FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(optimized_code_entry, &fallthrough);
+
+ // Check if the optimized code is marked for deopt. If it is, call the
+ // runtime to clear it.
+ Label found_deoptimized_code;
+ __ LoadWordArith(
+ scratch2,
+ FieldMemOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset));
+ __ TestBit(scratch2, Code::kMarkedForDeoptimizationBit, r0);
+ __ bne(&found_deoptimized_code, cr0);
+
+ // Optimized code is good, get it into the closure and link the closure into
+ // the optimized functions list, then tail call the optimized code.
+ // The feedback vector is no longer used, so re-use it as a scratch
+ // register.
+ ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, closure,
+ scratch2, scratch3, feedback_vector);
+ __ mr(ip, optimized_code_entry);
+ __ Jump(optimized_code_entry);
+
+ // Optimized code slot contains deoptimized code, evict it and re-enter the
+ // closure's code.
+ __ bind(&found_deoptimized_code);
+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+ }
+
+ // Fall-through if the optimized code cell is clear and there is no
+ // optimization marker.
+ __ bind(&fallthrough);
+}
+
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
@@ -1063,43 +1128,35 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
+ Register closure = r4;
+ Register feedback_vector = r5;
+
+ // Load the feedback vector from the closure.
+ __ LoadP(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ LoadP(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ // Read off the optimized code slot in the feedback vector, and if there
+ // is optimized code or an optimization marker, call that instead.
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r7, r9, r8);
+
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ PushStandardFrame(r4);
-
- // First check if there is optimized code in the feedback vector which we
- // could call instead.
- Label switch_to_optimized_code;
-
- Register optimized_code_entry = r7;
- __ LoadP(r3, FieldMemOperand(r4, JSFunction::kFeedbackVectorOffset));
- __ LoadP(r3, FieldMemOperand(r3, Cell::kValueOffset));
- __ LoadP(
- optimized_code_entry,
- FieldMemOperand(r3, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
- __ LoadP(optimized_code_entry,
- FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
- __ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
+ __ PushStandardFrame(closure);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
- __ LoadP(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- Label array_done;
- Register debug_info = r5;
- DCHECK(!debug_info.is(r3));
- __ LoadP(debug_info,
- FieldMemOperand(r3, SharedFunctionInfo::kDebugInfoOffset));
+ Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
+ __ LoadP(r3, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Load original bytecode array or the debug copy.
__ LoadP(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
- __ TestIfSmi(debug_info, r0);
- __ beq(&array_done, cr0);
- __ LoadP(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
- __ bind(&array_done);
+ __ LoadP(r7, FieldMemOperand(r3, SharedFunctionInfo::kDebugInfoOffset));
+ __ TestIfSmi(r7, r0);
+ __ bne(&maybe_load_debug_bytecode_array, cr0);
+ __ bind(&bytecode_array_loaded);
// Check whether we should continue to use the interpreter.
// TODO(rmcilroy) Remove self healing once liveedit only has to deal with
@@ -1111,16 +1168,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bne(&switch_to_different_code_kind);
// Increment invocation count for the function.
- __ LoadP(r7, FieldMemOperand(r4, JSFunction::kFeedbackVectorOffset));
- __ LoadP(r7, FieldMemOperand(r7, Cell::kValueOffset));
- __ LoadP(r8, FieldMemOperand(
- r7, FeedbackVector::kInvocationCountIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
+ __ LoadP(
+ r8, FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
__ AddSmiLiteral(r8, r8, Smi::FromInt(1), r0);
- __ StoreP(r8, FieldMemOperand(
- r7, FeedbackVector::kInvocationCountIndex * kPointerSize +
- FeedbackVector::kHeaderSize),
- r0);
+ __ StoreP(
+ r8,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize),
+ r0);
// Check function data field is actually a BytecodeArray object.
@@ -1193,40 +1251,31 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, r5);
__ blr();
+ // Load debug copy of the bytecode array if it exists.
+ // kInterpreterBytecodeArrayRegister is already loaded with
+ // SharedFunctionInfo::kFunctionDataOffset.
+ Label done;
+ __ bind(&maybe_load_debug_bytecode_array);
+ __ LoadP(ip, FieldMemOperand(r7, DebugInfo::kFlagsOffset));
+ __ SmiUntag(ip);
+ __ andi(r0, ip, Operand(DebugInfo::kHasBreakInfo));
+ __ beq(&done, cr0);
+ __ LoadP(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(r7, DebugInfo::kDebugBytecodeArrayOffset));
+ __ bind(&done);
+ __ b(&bytecode_array_loaded);
+
// If the shared code is no longer this entry trampoline, then the underlying
// function has been switched to a different kind of code and we heal the
// closure by switching the code entry field over to the new code as well.
__ bind(&switch_to_different_code_kind);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
- __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r7, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kCodeOffset));
__ addi(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ StoreP(r7, FieldMemOperand(r4, JSFunction::kCodeEntryOffset), r0);
- __ RecordWriteCodeEntryField(r4, r7, r8);
+ __ StoreP(r7, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
+ __ RecordWriteCodeEntryField(closure, r7, r8);
__ JumpToJSEntry(r7);
-
- // If there is optimized code on the type feedback vector, check if it is good
- // to run, and if so, self heal the closure and call the optimized code.
- __ bind(&switch_to_optimized_code);
- __ LeaveFrame(StackFrame::JAVA_SCRIPT);
- Label gotta_call_runtime;
-
- // Check if the optimized code is marked for deopt.
- __ lwz(r8, FieldMemOperand(optimized_code_entry,
- Code::kKindSpecificFlags1Offset));
- __ TestBit(r8, Code::kMarkedForDeoptimizationBit, r0);
- __ bne(&gotta_call_runtime, cr0);
-
- // Optimized code is good, get it into the closure and link the closure into
- // the optimized functions list, then tail call the optimized code.
- ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, r4, r9, r8,
- r5);
- __ JumpToJSEntry(optimized_code_entry);
-
- // Optimized code is marked for deopt, bailout to the CompileLazy runtime
- // function which will clear the feedback vector's optimized code slot.
- __ bind(&gotta_call_runtime);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@@ -1260,7 +1309,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// static
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
- TailCallMode tail_call_mode, InterpreterPushArgsMode mode) {
+ InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r5 : the address of the first argument to be pushed. Subsequent
@@ -1284,17 +1333,21 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Push the arguments. r5, r6, r7 will be modified.
Generate_InterpreterPushArgs(masm, r6, r5, r6, r7);
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Pop(r5); // Pass the spread in a register
+ __ subi(r3, r3, Operand(1)); // Subtract one for spread
+ }
+
// Call the target.
if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
- tail_call_mode),
- RelocInfo::CODE_TARGET);
+ __ Jump(
+ masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny),
+ RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(masm->isolate()->builtins()->CallWithSpread(),
RelocInfo::CODE_TARGET);
} else {
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
RelocInfo::CODE_TARGET);
}
@@ -1330,8 +1383,12 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// Push the arguments. r8, r7, r9 will be modified.
Generate_InterpreterPushArgs(masm, r3, r7, r3, r9);
__ bind(&skip);
-
- __ AssertUndefinedOrAllocationSite(r5, r8);
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Pop(r5); // Pass the spread in a register
+ __ subi(r3, r3, Operand(1)); // Subtract one for spread
+ } else {
+ __ AssertUndefinedOrAllocationSite(r5, r8);
+ }
if (mode == InterpreterPushArgsMode::kJSFunction) {
__ AssertFunction(r4);
@@ -1461,6 +1518,34 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
+void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : argument count (preserved for callee)
+ // -- r6 : new target (preserved for callee)
+ // -- r4 : target function (preserved for callee)
+ // -----------------------------------
+ Register closure = r4;
+
+ // Get the feedback vector.
+ Register feedback_vector = r5;
+ __ LoadP(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ LoadP(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ // The feedback vector must be defined.
+ if (FLAG_debug_code) {
+ __ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
+ __ Assert(ne, BailoutReason::kExpectedFeedbackVector);
+ }
+
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r7, r9, r8);
+
+ // Otherwise, tail call the SFI code.
+ GenerateTailCallToSharedCode(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : argument count (preserved for callee)
@@ -1469,43 +1554,25 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
- Label try_shared;
Register closure = r4;
- Register index = r5;
+ Register feedback_vector = r5;
// Do we have a valid feedback vector?
- __ LoadP(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
- __ LoadP(index, FieldMemOperand(index, Cell::kValueOffset));
- __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
+ __ LoadP(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ LoadP(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
+ &gotta_call_runtime);
- // Is optimized code available in the feedback vector?
- Register entry = r7;
- __ LoadP(entry, FieldMemOperand(index, FeedbackVector::kOptimizedCodeIndex *
- kPointerSize +
- FeedbackVector::kHeaderSize));
- __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &try_shared);
-
- // Found code, check if it is marked for deopt, if so call into runtime to
- // clear the optimized code slot.
- __ lwz(r8, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset));
- __ TestBit(r8, Code::kMarkedForDeoptimizationBit, r0);
- __ bne(&gotta_call_runtime, cr0);
-
- // Code is good, get it into the closure and tail call.
- ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, r9, r8, r5);
- __ JumpToJSEntry(entry);
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r7, r9, r8);
// We found no optimized code.
- __ bind(&try_shared);
+ Register entry = r7;
__ LoadP(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- // Is the shared function marked for tier up?
- __ lbz(r8, FieldMemOperand(entry,
- SharedFunctionInfo::kMarkedForTierUpByteOffset));
- __ TestBit(r8, SharedFunctionInfo::kMarkedForTierUpBitWithinByte, r0);
- __ bne(&gotta_call_runtime, cr0);
// If SFI points to anything other than CompileLazy, install that.
__ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
@@ -1523,15 +1590,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
-void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm,
- Runtime::kCompileOptimized_NotConcurrent);
-}
-
-void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
-}
-
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : argument count (preserved for callee)
@@ -1674,30 +1732,70 @@ void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
-static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
- SaveFPRegsMode save_doubles) {
+void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve registers across notification, this is important for compiled
- // stubs that tail call the runtime on deopts passing their parameters in
- // registers.
- __ MultiPush(kJSCallerSaved | kCalleeSaved);
+ // Preserve possible return result from lazy deopt.
+ __ push(r3);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
- __ MultiPop(kJSCallerSaved | kCalleeSaved);
+ __ CallRuntime(Runtime::kNotifyStubFailure, false);
+ __ pop(r3);
}
__ addi(sp, sp, Operand(kPointerSize)); // Ignore state
- __ blr(); // Jump to miss handler
+ __ blr(); // Jump to ContinueToBuiltin stub
+}
+
+namespace {
+void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
+ bool java_script_builtin,
+ bool with_result) {
+ const RegisterConfiguration* config(RegisterConfiguration::Turbofan());
+ int allocatable_register_count = config->num_allocatable_general_registers();
+ if (with_result) {
+ // Overwrite the hole inserted by the deoptimizer with the return value from
+ // the LAZY deopt point.
+ __ StoreP(
+ r3, MemOperand(
+ sp, config->num_allocatable_general_registers() * kPointerSize +
+ BuiltinContinuationFrameConstants::kFixedFrameSize));
+ }
+ for (int i = allocatable_register_count - 1; i >= 0; --i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ __ Pop(Register::from_code(code));
+ if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
+ __ SmiUntag(Register::from_code(code));
+ }
+ }
+ __ LoadP(
+ fp,
+ MemOperand(sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ __ Pop(ip);
+ __ addi(sp, sp,
+ Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ __ Pop(r0);
+ __ mtlr(r0);
+ __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(ip);
}
+} // namespace
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, false);
+}
+
+void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, true);
}
-void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, false);
+}
+
+void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, true);
}
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
@@ -1824,52 +1922,47 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// -- sp[8] : receiver
// -----------------------------------
- // 1. Load receiver into r4, argArray into r3 (if present), remove all
+ // 1. Load receiver into r4, argArray into r5 (if present), remove all
// arguments from the stack (including the receiver), and push thisArg (if
// present) instead.
{
Label skip;
- Register arg_size = r5;
+ Register arg_size = r8;
Register new_sp = r6;
Register scratch = r7;
__ ShiftLeftImm(arg_size, r3, Operand(kPointerSizeLog2));
__ add(new_sp, sp, arg_size);
- __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
- __ mr(scratch, r3);
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ __ mr(r5, scratch);
__ LoadP(r4, MemOperand(new_sp, 0)); // receiver
__ cmpi(arg_size, Operand(kPointerSize));
__ blt(&skip);
__ LoadP(scratch, MemOperand(new_sp, 1 * -kPointerSize)); // thisArg
__ beq(&skip);
- __ LoadP(r3, MemOperand(new_sp, 2 * -kPointerSize)); // argArray
+ __ LoadP(r5, MemOperand(new_sp, 2 * -kPointerSize)); // argArray
__ bind(&skip);
__ mr(sp, new_sp);
__ StoreP(scratch, MemOperand(sp, 0));
}
// ----------- S t a t e -------------
- // -- r3 : argArray
+ // -- r5 : argArray
// -- r4 : receiver
// -- sp[0] : thisArg
// -----------------------------------
- // 2. Make sure the receiver is actually callable.
- Label receiver_not_callable;
- __ JumpIfSmi(r4, &receiver_not_callable);
- __ LoadP(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
- __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
- __ TestBit(r7, Map::kIsCallable, r0);
- __ beq(&receiver_not_callable, cr0);
+ // 2. We don't need to check explicitly for callable receiver here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
// 3. Tail call with no arguments if argArray is null or undefined.
Label no_arguments;
- __ JumpIfRoot(r3, Heap::kNullValueRootIndex, &no_arguments);
- __ JumpIfRoot(r3, Heap::kUndefinedValueRootIndex, &no_arguments);
+ __ JumpIfRoot(r5, Heap::kNullValueRootIndex, &no_arguments);
+ __ JumpIfRoot(r5, Heap::kUndefinedValueRootIndex, &no_arguments);
- // 4a. Apply the receiver to the given argArray (passing undefined for
- // new.target).
- __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 4a. Apply the receiver to the given argArray.
+ __ Jump(masm->isolate()->builtins()->CallWithArrayLike(),
+ RelocInfo::CODE_TARGET);
// 4b. The argArray is either null or undefined, so we tail call without any
// arguments to the receiver.
@@ -1878,13 +1971,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ li(r3, Operand::Zero());
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
-
- // 4c. The receiver is not callable, throw an appropriate TypeError.
- __ bind(&receiver_not_callable);
- {
- __ StoreP(r4, MemOperand(sp, 0));
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
}
// static
@@ -1940,19 +2026,19 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// -- sp[12] : receiver
// -----------------------------------
- // 1. Load target into r4 (if present), argumentsList into r3 (if present),
+ // 1. Load target into r4 (if present), argumentsList into r5 (if present),
// remove all arguments from the stack (including the receiver), and push
// thisArgument (if present) instead.
{
Label skip;
- Register arg_size = r5;
+ Register arg_size = r8;
Register new_sp = r6;
Register scratch = r7;
__ ShiftLeftImm(arg_size, r3, Operand(kPointerSizeLog2));
__ add(new_sp, sp, arg_size);
__ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
__ mr(scratch, r4);
- __ mr(r3, r4);
+ __ mr(r5, r4);
__ cmpi(arg_size, Operand(kPointerSize));
__ blt(&skip);
__ LoadP(r4, MemOperand(new_sp, 1 * -kPointerSize)); // target
@@ -1960,37 +2046,25 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadP(scratch, MemOperand(new_sp, 2 * -kPointerSize)); // thisArgument
__ cmpi(arg_size, Operand(2 * kPointerSize));
__ beq(&skip);
- __ LoadP(r3, MemOperand(new_sp, 3 * -kPointerSize)); // argumentsList
+ __ LoadP(r5, MemOperand(new_sp, 3 * -kPointerSize)); // argumentsList
__ bind(&skip);
__ mr(sp, new_sp);
__ StoreP(scratch, MemOperand(sp, 0));
}
// ----------- S t a t e -------------
- // -- r3 : argumentsList
+ // -- r5 : argumentsList
// -- r4 : target
// -- sp[0] : thisArgument
// -----------------------------------
- // 2. Make sure the target is actually callable.
- Label target_not_callable;
- __ JumpIfSmi(r4, &target_not_callable);
- __ LoadP(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
- __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
- __ TestBit(r7, Map::kIsCallable, r0);
- __ beq(&target_not_callable, cr0);
+ // 2. We don't need to check explicitly for callable target here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
- // 3a. Apply the target to the given argumentsList (passing undefined for
- // new.target).
- __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
-
- // 3b. The target is not callable, throw an appropriate TypeError.
- __ bind(&target_not_callable);
- {
- __ StoreP(r4, MemOperand(sp, 0));
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
+ // 3. Apply the target to the given argumentsList.
+ __ Jump(masm->isolate()->builtins()->CallWithArrayLike(),
+ RelocInfo::CODE_TARGET);
}
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
@@ -2002,18 +2076,18 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// -- sp[12] : receiver
// -----------------------------------
- // 1. Load target into r4 (if present), argumentsList into r3 (if present),
+ // 1. Load target into r4 (if present), argumentsList into r5 (if present),
// new.target into r6 (if present, otherwise use target), remove all
// arguments from the stack (including the receiver), and push thisArgument
// (if present) instead.
{
Label skip;
- Register arg_size = r5;
+ Register arg_size = r8;
Register new_sp = r7;
__ ShiftLeftImm(arg_size, r3, Operand(kPointerSizeLog2));
__ add(new_sp, sp, arg_size);
__ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
- __ mr(r3, r4);
+ __ mr(r5, r4);
__ mr(r6, r4);
__ StoreP(r4, MemOperand(new_sp, 0)); // receiver (undefined)
__ cmpi(arg_size, Operand(kPointerSize));
@@ -2021,7 +2095,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ LoadP(r4, MemOperand(new_sp, 1 * -kPointerSize)); // target
__ mr(r6, r4); // new.target defaults to target
__ beq(&skip);
- __ LoadP(r3, MemOperand(new_sp, 2 * -kPointerSize)); // argumentsList
+ __ LoadP(r5, MemOperand(new_sp, 2 * -kPointerSize)); // argumentsList
__ cmpi(arg_size, Operand(2 * kPointerSize));
__ beq(&skip);
__ LoadP(r6, MemOperand(new_sp, 3 * -kPointerSize)); // new.target
@@ -2030,44 +2104,23 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
// ----------- S t a t e -------------
- // -- r3 : argumentsList
+ // -- r5 : argumentsList
// -- r6 : new.target
// -- r4 : target
// -- sp[0] : receiver (undefined)
// -----------------------------------
- // 2. Make sure the target is actually a constructor.
- Label target_not_constructor;
- __ JumpIfSmi(r4, &target_not_constructor);
- __ LoadP(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
- __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
- __ TestBit(r7, Map::kIsConstructor, r0);
- __ beq(&target_not_constructor, cr0);
-
- // 3. Make sure the target is actually a constructor.
- Label new_target_not_constructor;
- __ JumpIfSmi(r6, &new_target_not_constructor);
- __ LoadP(r7, FieldMemOperand(r6, HeapObject::kMapOffset));
- __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
- __ TestBit(r7, Map::kIsConstructor, r0);
- __ beq(&new_target_not_constructor, cr0);
+ // 2. We don't need to check explicitly for constructor target here,
+ // since that's the first thing the Construct/ConstructWithArrayLike
+ // builtins will do.
- // 4a. Construct the target with the given new.target and argumentsList.
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
-
- // 4b. The target is not a constructor, throw an appropriate TypeError.
- __ bind(&target_not_constructor);
- {
- __ StoreP(r4, MemOperand(sp, 0));
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
+ // 3. We don't need to check explicitly for constructor new.target here,
+ // since that's the second thing the Construct/ConstructWithArrayLike
+ // builtins will do.
- // 4c. The new.target is not a constructor, throw an appropriate TypeError.
- __ bind(&new_target_not_constructor);
- {
- __ StoreP(r6, MemOperand(sp, 0));
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
+ // 4. Construct the target with the given new.target and argumentsList.
+ __ Jump(masm->isolate()->builtins()->ConstructWithArrayLike(),
+ RelocInfo::CODE_TARGET);
}
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
@@ -2099,99 +2152,17 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_Apply(MacroAssembler* masm) {
+void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
- // -- r3 : argumentsList
- // -- r4 : target
- // -- r6 : new.target (checked to be constructor or undefined)
- // -- sp[0] : thisArgument
+ // -- r4 : target
+ // -- r3 : number of parameters on the stack (not including the receiver)
+ // -- r5 : arguments list (a FixedArray)
+ // -- r7 : len (number of elements to push from args)
+ // -- r6 : new.target (for [[Construct]])
// -----------------------------------
- // Create the list of arguments from the array-like argumentsList.
- {
- Label create_arguments, create_array, create_holey_array, create_runtime,
- done_create;
- __ JumpIfSmi(r3, &create_runtime);
-
- // Load the map of argumentsList into r5.
- __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset));
-
- // Load native context into r7.
- __ LoadP(r7, NativeContextMemOperand());
-
- // Check if argumentsList is an (unmodified) arguments object.
- __ LoadP(ip, ContextMemOperand(r7, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
- __ cmp(ip, r5);
- __ beq(&create_arguments);
- __ LoadP(ip, ContextMemOperand(r7, Context::STRICT_ARGUMENTS_MAP_INDEX));
- __ cmp(ip, r5);
- __ beq(&create_arguments);
-
- // Check if argumentsList is a fast JSArray.
- __ CompareInstanceType(r5, ip, JS_ARRAY_TYPE);
- __ beq(&create_array);
-
- // Ask the runtime to create the list (actually a FixedArray).
- __ bind(&create_runtime);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r4, r6, r3);
- __ CallRuntime(Runtime::kCreateListFromArrayLike);
- __ Pop(r4, r6);
- __ LoadP(r5, FieldMemOperand(r3, FixedArray::kLengthOffset));
- __ SmiUntag(r5);
- }
- __ b(&done_create);
-
- // Try to create the list from an arguments object.
- __ bind(&create_arguments);
- __ LoadP(r5, FieldMemOperand(r3, JSArgumentsObject::kLengthOffset));
- __ LoadP(r7, FieldMemOperand(r3, JSObject::kElementsOffset));
- __ LoadP(ip, FieldMemOperand(r7, FixedArray::kLengthOffset));
- __ cmp(r5, ip);
- __ bne(&create_runtime);
- __ SmiUntag(r5);
- __ mr(r3, r7);
- __ b(&done_create);
-
- // For holey JSArrays we need to check that the array prototype chain
- // protector is intact and our prototype is the Array.prototype actually.
- __ bind(&create_holey_array);
- __ LoadP(r5, FieldMemOperand(r5, Map::kPrototypeOffset));
- __ LoadP(r7, ContextMemOperand(r7, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ cmp(r5, r7);
- __ bne(&create_runtime);
- __ LoadRoot(r7, Heap::kArrayProtectorRootIndex);
- __ LoadP(r5, FieldMemOperand(r7, PropertyCell::kValueOffset));
- __ CmpSmiLiteral(r5, Smi::FromInt(Isolate::kProtectorValid), r0);
- __ bne(&create_runtime);
- __ LoadP(r5, FieldMemOperand(r3, JSArray::kLengthOffset));
- __ LoadP(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
- __ SmiUntag(r5);
- __ b(&done_create);
-
- // Try to create the list from a JSArray object.
- // -- r5 and r7 must be preserved till bne create_holey_array.
- __ bind(&create_array);
- __ lbz(r8, FieldMemOperand(r5, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(r8);
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- __ cmpi(r8, Operand(FAST_HOLEY_ELEMENTS));
- __ bgt(&create_runtime);
- // Only FAST_XXX after this point, FAST_HOLEY_XXX are odd values.
- __ TestBit(r8, Map::kHasNonInstancePrototype, r0);
- __ bne(&create_holey_array, cr0);
- // FAST_SMI_ELEMENTS or FAST_ELEMENTS after this point.
- __ LoadP(r5, FieldMemOperand(r3, JSArray::kLengthOffset));
- __ LoadP(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
- __ SmiUntag(r5);
-
- __ bind(&done_create);
- }
-
+ __ AssertFixedArray(r5);
// Check for stack overflow.
{
// Check the stack for overflow. We are not trying to catch interruptions
@@ -2202,53 +2173,40 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// here which will cause ip to become negative.
__ sub(ip, sp, ip);
// Check if the arguments will overflow the stack.
- __ ShiftLeftImm(r0, r5, Operand(kPointerSizeLog2));
+ __ ShiftLeftImm(r0, r7, Operand(kPointerSizeLog2));
__ cmp(ip, r0); // Signed comparison.
__ bgt(&done);
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ bind(&done);
}
- // ----------- S t a t e -------------
- // -- r4 : target
- // -- r3 : args (a FixedArray built from argumentsList)
- // -- r5 : len (number of elements to push from args)
- // -- r6 : new.target (checked to be constructor or undefined)
- // -- sp[0] : thisArgument
- // -----------------------------------
-
// Push arguments onto the stack (thisArgument is already on the stack).
{
- __ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
Label loop, no_args, skip;
- __ cmpi(r5, Operand::Zero());
+ __ cmpi(r7, Operand::Zero());
__ beq(&no_args);
- __ addi(r3, r3,
+ __ addi(r5, r5,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
- __ mtctr(r5);
+ __ mtctr(r7);
__ bind(&loop);
- __ LoadPU(ip, MemOperand(r3, kPointerSize));
+ __ LoadPU(ip, MemOperand(r5, kPointerSize));
__ CompareRoot(ip, Heap::kTheHoleValueRootIndex);
__ bne(&skip);
- __ mr(ip, r9);
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ bind(&skip);
__ push(ip);
__ bdnz(&loop);
__ bind(&no_args);
- __ mr(r3, r5);
+ __ add(r3, r3, r7);
}
- // Dispatch to Call or Construct depending on whether new.target is undefined.
- {
- __ CompareRoot(r6, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET, eq);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
- }
+ // Tail-call to the actual Call or Construct builtin.
+ __ Jump(code, RelocInfo::CODE_TARGET);
}
// static
-void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
- Handle<Code> code) {
+void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r6 : the new.target (for [[Construct]] calls)
@@ -2275,16 +2233,11 @@ void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
{
// Load the length from the ArgumentsAdaptorFrame.
__ LoadP(r8, MemOperand(r7, ArgumentsAdaptorFrameConstants::kLengthOffset));
-#if V8_TARGET_ARCH_PPC64
__ SmiUntag(r8);
-#endif
}
__ bind(&arguments_done);
Label stack_done, stack_overflow;
-#if !V8_TARGET_ARCH_PPC64
- __ SmiUntag(r8);
-#endif
__ sub(r8, r8, r5);
__ cmpi(r8, Operand::Zero());
__ ble(&stack_done);
@@ -2317,107 +2270,9 @@ void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
__ Jump(code, RelocInfo::CODE_TARGET);
}
-namespace {
-
-// Drops top JavaScript frame and an arguments adaptor frame below it (if
-// present) preserving all the arguments prepared for current call.
-// Does nothing if debugger is currently active.
-// ES6 14.6.3. PrepareForTailCall
-//
-// Stack structure for the function g() tail calling f():
-//
-// ------- Caller frame: -------
-// | ...
-// | g()'s arg M
-// | ...
-// | g()'s arg 1
-// | g()'s receiver arg
-// | g()'s caller pc
-// ------- g()'s frame: -------
-// | g()'s caller fp <- fp
-// | g()'s context
-// | function pointer: g
-// | -------------------------
-// | ...
-// | ...
-// | f()'s arg N
-// | ...
-// | f()'s arg 1
-// | f()'s receiver arg <- sp (f()'s caller pc is not on the stack yet!)
-// ----------------------
-//
-void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
- Register scratch1, Register scratch2,
- Register scratch3) {
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Comment cmnt(masm, "[ PrepareForTailCall");
-
- // Prepare for tail call only if ES2015 tail call elimination is enabled.
- Label done;
- ExternalReference is_tail_call_elimination_enabled =
- ExternalReference::is_tail_call_elimination_enabled_address(
- masm->isolate());
- __ mov(scratch1, Operand(is_tail_call_elimination_enabled));
- __ lbz(scratch1, MemOperand(scratch1));
- __ cmpi(scratch1, Operand::Zero());
- __ beq(&done);
-
- // Drop possible interpreter handler/stub frame.
- {
- Label no_interpreter_frame;
- __ LoadP(scratch3,
- MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ cmpi(scratch3, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
- __ bne(&no_interpreter_frame);
- __ LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ bind(&no_interpreter_frame);
- }
-
- // Check if next frame is an arguments adaptor frame.
- Register caller_args_count_reg = scratch1;
- Label no_arguments_adaptor, formal_parameter_count_loaded;
- __ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(
- scratch3,
- MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ cmpi(scratch3,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ bne(&no_arguments_adaptor);
-
- // Drop current frame and load arguments count from arguments adaptor frame.
- __ mr(fp, scratch2);
- __ LoadP(caller_args_count_reg,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
- __ b(&formal_parameter_count_loaded);
-
- __ bind(&no_arguments_adaptor);
- // Load caller's formal parameter count
- __ LoadP(scratch1,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
- __ LoadP(scratch1,
- FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
- __ LoadWordArith(
- caller_args_count_reg,
- FieldMemOperand(scratch1,
- SharedFunctionInfo::kFormalParameterCountOffset));
-#if !V8_TARGET_ARCH_PPC64
- __ SmiUntag(caller_args_count_reg);
-#endif
-
- __ bind(&formal_parameter_count_loaded);
-
- ParameterCount callee_args_count(args_reg);
- __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
- scratch3);
- __ bind(&done);
-}
-} // namespace
-
// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
- ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
+ ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r4 : the function to call (checked to be a JSFunction)
@@ -2429,9 +2284,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBitMask(r6, FunctionKind::kClassConstructor
- << SharedFunctionInfo::kFunctionKindShift,
- r0);
+ __ TestBitMask(r6, SharedFunctionInfo::kClassConstructorMask, r0);
__ bne(&class_constructor, cr0);
// Enter the context of the function; ToObject has to run in the function
@@ -2440,8 +2293,9 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
- __ andi(r0, r6, Operand((1 << SharedFunctionInfo::kStrictModeBit) |
- (1 << SharedFunctionInfo::kNativeBit)));
+ __ andi(r0, r6,
+ Operand(SharedFunctionInfo::IsStrictBit::kMask |
+ SharedFunctionInfo::IsNativeBit::kMask));
__ bne(&done_convert, cr0);
{
// ----------- S t a t e -------------
@@ -2506,15 +2360,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- cp : the function context.
// -----------------------------------
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, r3, r6, r7, r8);
- }
-
__ LoadWordArith(
r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
-#if !V8_TARGET_ARCH_PPC64
- __ SmiUntag(r5);
-#endif
ParameterCount actual(r3);
ParameterCount expected(r5);
__ InvokeFunctionCode(r4, no_reg, expected, actual, JUMP_FUNCTION,
@@ -2612,18 +2459,13 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
} // namespace
// static
-void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
- TailCallMode tail_call_mode) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r4 : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(r4);
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, r3, r6, r7, r8);
- }
-
// Patch the receiver to [[BoundThis]].
__ LoadP(ip, FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset));
__ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
@@ -2643,8 +2485,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
}
// static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r4 : the target to call (can be any Object).
@@ -2654,10 +2495,10 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
__ JumpIfSmi(r4, &non_callable);
__ bind(&non_smi);
__ CompareObjectType(r4, r7, r8, JS_FUNCTION_TYPE);
- __ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET, eq);
__ cmpi(r8, Operand(JS_BOUND_FUNCTION_TYPE));
- __ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
RelocInfo::CODE_TARGET, eq);
// Check if target has a [[Call]] internal method.
@@ -2665,22 +2506,14 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
__ TestBit(r7, Map::kIsCallable, r0);
__ beq(&non_callable, cr0);
+ // Check if target is a proxy and call CallProxy external builtin
__ cmpi(r8, Operand(JS_PROXY_TYPE));
__ bne(&non_function);
- // 0. Prepare for tail call if necessary.
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, r3, r6, r7, r8);
- }
-
- // 1. Runtime fallback for Proxy [[Call]].
- __ Push(r4);
- // Increase the arguments size to include the pushed function and the
- // existing receiver on the stack.
- __ addi(r3, r3, Operand(2));
- // Tail-call to the runtime.
- __ JumpToExternalReference(
- ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
+ __ mov(r8, Operand(ExternalReference(Builtins::kCallProxy, masm->isolate())));
+ __ LoadP(r8, MemOperand(r8));
+ __ addi(r8, r8, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(r8);
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
@@ -2691,7 +2524,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r4);
__ Jump(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
+ ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
@@ -2703,156 +2536,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
-static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
- Register argc = r3;
- Register constructor = r4;
- Register new_target = r6;
-
- Register scratch = r5;
- Register scratch2 = r9;
-
- Register spread = r7;
- Register spread_map = r8;
- Register spread_len = r8;
- Label runtime_call, push_args;
- __ LoadP(spread, MemOperand(sp, 0));
- __ JumpIfSmi(spread, &runtime_call);
- __ LoadP(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
-
- // Check that the spread is an array.
- __ CompareInstanceType(spread_map, scratch, JS_ARRAY_TYPE);
- __ bne(&runtime_call);
-
- // Check that we have the original ArrayPrototype.
- __ LoadP(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
- __ LoadP(scratch2, NativeContextMemOperand());
- __ LoadP(scratch2,
- ContextMemOperand(scratch2, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ cmp(scratch, scratch2);
- __ bne(&runtime_call);
-
- // Check that the ArrayPrototype hasn't been modified in a way that would
- // affect iteration.
- __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
- __ LoadP(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
- __ CmpSmiLiteral(scratch, Smi::FromInt(Isolate::kProtectorValid), r0);
- __ bne(&runtime_call);
-
- // Check that the map of the initial array iterator hasn't changed.
- __ LoadP(scratch2, NativeContextMemOperand());
- __ LoadP(scratch,
- ContextMemOperand(scratch2,
- Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
- __ LoadP(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
- __ LoadP(scratch2,
- ContextMemOperand(
- scratch2, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
- __ cmp(scratch, scratch2);
- __ bne(&runtime_call);
-
- // For FastPacked kinds, iteration will have the same effect as simply
- // accessing each property in order.
- Label no_protector_check;
- __ lbz(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(scratch);
- __ cmpi(scratch, Operand(FAST_HOLEY_ELEMENTS));
- __ bgt(&runtime_call);
- // For non-FastHoley kinds, we can skip the protector check.
- __ cmpi(scratch, Operand(FAST_SMI_ELEMENTS));
- __ beq(&no_protector_check);
- __ cmpi(scratch, Operand(FAST_ELEMENTS));
- __ beq(&no_protector_check);
- // Check the ArrayProtector cell.
- __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
- __ LoadP(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
- __ CmpSmiLiteral(scratch, Smi::FromInt(Isolate::kProtectorValid), r0);
- __ bne(&runtime_call);
-
- __ bind(&no_protector_check);
- // Load the FixedArray backing store, but use the length from the array.
- __ LoadP(spread_len, FieldMemOperand(spread, JSArray::kLengthOffset));
- __ SmiUntag(spread_len);
- __ LoadP(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
- __ b(&push_args);
-
- __ bind(&runtime_call);
- {
- // Call the builtin for the result of the spread.
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(argc);
- __ Push(constructor, new_target, argc, spread);
- __ CallRuntime(Runtime::kSpreadIterableFixed);
- __ mr(spread, r3);
- __ Pop(constructor, new_target, argc);
- __ SmiUntag(argc);
- }
-
- {
- // Calculate the new nargs including the result of the spread.
- __ LoadP(spread_len, FieldMemOperand(spread, FixedArray::kLengthOffset));
- __ SmiUntag(spread_len);
-
- __ bind(&push_args);
- // argc += spread_len - 1. Subtract 1 for the spread itself.
- __ add(argc, argc, spread_len);
- __ subi(argc, argc, Operand(1));
-
- // Pop the spread argument off the stack.
- __ Pop(scratch);
- }
-
- // Check for stack overflow.
- {
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack limit".
- Label done;
- __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
- // Make scratch the space we have left. The stack might already be
- // overflowed here which will cause scratch to become negative.
- __ sub(scratch, sp, scratch);
- // Check if the arguments will overflow the stack.
- __ ShiftLeftImm(r0, spread_len, Operand(kPointerSizeLog2));
- __ cmp(scratch, r0);
- __ bgt(&done); // Signed comparison.
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&done);
- }
-
- // Put the evaluated spread onto the stack as additional arguments.
- {
- __ li(scratch, Operand::Zero());
- Label done, push, loop;
- __ bind(&loop);
- __ cmp(scratch, spread_len);
- __ beq(&done);
- __ ShiftLeftImm(r0, scratch, Operand(kPointerSizeLog2));
- __ add(scratch2, spread, r0);
- __ LoadP(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
- __ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
- __ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
- __ bind(&push);
- __ Push(scratch2);
- __ addi(scratch, scratch, Operand(1));
- __ b(&loop);
- __ bind(&done);
- }
-}
-
-// static
-void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : the number of arguments (not including the receiver)
- // -- r4 : the constructor to call (can be any Object)
- // -----------------------------------
-
- // CheckSpreadAndPushToStack will push r6 to save it.
- __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- TailCallMode::kDisallow),
- RelocInfo::CODE_TARGET);
-}
-
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2973,18 +2656,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : the number of arguments (not including the receiver)
- // -- r4 : the constructor to call (can be any Object)
- // -- r6 : the new target (either the same as the constructor or
- // the JSFunction on which new was invoked initially)
- // -----------------------------------
-
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
-}
-
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index 2148f11105..f6bd0af3bf 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -226,7 +226,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(r8);
__ EnterBuiltinFrame(cp, r3, r8);
__ Push(r4); // first argument
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Pop(r4);
__ LeaveBuiltinFrame(cp, r3, r8);
@@ -376,7 +376,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(r8);
__ EnterBuiltinFrame(cp, r3, r8);
__ Push(r4); // first argument
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Pop(r4);
__ LeaveBuiltinFrame(cp, r3, r8);
@@ -424,22 +424,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ JumpToJSEntry(ip);
}
-void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
- // Checking whether the queued function is ready for install is optional,
- // since we come across interrupts and stack checks elsewhere. However,
- // not checking may delay installing ready functions, and always checking
- // would be quite expensive. A good compromise is to first check against
- // stack limit as a cue for an interrupt signal.
- Label ok;
- __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex));
- __ bge(&ok, Label::kNear);
-
- GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
-
- __ bind(&ok);
- GenerateTailCallToSharedCode(masm);
-}
-
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
@@ -543,16 +527,13 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadlW(r6,
FieldMemOperand(r6, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBitMask(r6,
- FunctionKind::kDerivedConstructor
- << SharedFunctionInfo::kFunctionKindShift,
- r0);
+ __ TestBitMask(r6, SharedFunctionInfo::kDerivedConstructorMask, r0);
__ bne(&not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
r6, r7);
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ b(&post_instantiation_deopt_entry);
@@ -669,10 +650,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ LoadP(r6, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
__ LoadlW(r6,
FieldMemOperand(r6, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBitMask(r6,
- FunctionKind::kClassConstructor
- << SharedFunctionInfo::kFunctionKindShift,
- r0);
+ __ TestBitMask(r6, SharedFunctionInfo::kClassConstructorMask, r0);
__ beq(&use_receiver);
} else {
__ b(&use_receiver);
@@ -726,35 +704,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- r2 : the value to pass to the generator
// -- r3 : the JSGeneratorObject to resume
// -- r4 : the resume mode (tagged)
- // -- r5 : the SuspendFlags of the earlier suspend call (tagged)
// -- lr : return address
// -----------------------------------
- __ SmiUntag(r5);
- __ AssertGeneratorObject(r3, r5);
+ __ AssertGeneratorObject(r3);
// Store input value into generator object.
- Label async_await, done_store_input;
-
- __ tmll(r5, Operand(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
- __ b(Condition(1), &async_await);
-
__ StoreP(r2, FieldMemOperand(r3, JSGeneratorObject::kInputOrDebugPosOffset),
r0);
__ RecordWriteField(r3, JSGeneratorObject::kInputOrDebugPosOffset, r2, r5,
kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ b(&done_store_input);
-
- __ bind(&async_await);
- __ StoreP(
- r2,
- FieldMemOperand(r3, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset),
- r0);
- __ RecordWriteField(r3, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset,
- r2, r5, kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ b(&done_store_input);
-
- __ bind(&done_store_input);
- // `r5` no longer holds SuspendFlags
// Store resume mode into generator object.
__ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kResumeModeOffset));
@@ -811,7 +769,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ CmpP(r2, Operand::Zero());
__ beq(&done_loop);
#else
- __ SmiUntag(r2);
__ LoadAndTestP(r2, r2);
__ beq(&done_loop);
#endif
@@ -913,7 +870,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
// Setup the context (we need to use the caller context from the isolate).
- ExternalReference context_address(Isolate::kContextAddress,
+ ExternalReference context_address(IsolateAddressId::kContextAddress,
masm->isolate());
__ mov(cp, Operand(context_address));
__ LoadP(cp, MemOperand(cp));
@@ -1036,6 +993,118 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
__ AddP(sp, sp, args_count);
}
+// Tail-call |function_id| if |smi_entry| == |marker|
+static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
+ Register smi_entry,
+ OptimizationMarker marker,
+ Runtime::FunctionId function_id) {
+ Label no_match;
+ __ CmpSmiLiteral(smi_entry, Smi::FromEnum(marker), r0);
+ __ bne(&no_match);
+ GenerateTailCallToReturnedCode(masm, function_id);
+ __ bind(&no_match);
+}
+
+static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
+ Register feedback_vector,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+ // ----------- S t a t e -------------
+ // -- r0 : argument count (preserved for callee if needed, and caller)
+ // -- r3 : new target (preserved for callee if needed, and caller)
+ // -- r1 : target function (preserved for callee if needed, and caller)
+ // -- feedback vector (preserved for caller if needed)
+ // -----------------------------------
+ DCHECK(
+ !AreAliased(feedback_vector, r2, r3, r5, scratch1, scratch2, scratch3));
+
+ Label optimized_code_slot_is_cell, fallthrough;
+
+ Register closure = r3;
+ Register optimized_code_entry = scratch1;
+
+ const int kOptimizedCodeCellOffset =
+ FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize;
+ __ LoadP(optimized_code_entry,
+ FieldMemOperand(feedback_vector, kOptimizedCodeCellOffset));
+
+ // Check if the code entry is a Smi. If yes, we interpret it as an
+ // optimisation marker. Otherwise, interpret is as a weak cell to a code
+ // object.
+ __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
+
+ {
+ // Optimized code slot is a Smi optimization marker.
+
+ // Fall through if no optimization trigger.
+ __ CmpSmiLiteral(optimized_code_entry,
+ Smi::FromEnum(OptimizationMarker::kNone), r0);
+ __ beq(&fallthrough);
+
+ TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimized,
+ Runtime::kCompileOptimized_NotConcurrent);
+ TailCallRuntimeIfMarkerEquals(
+ masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimizedConcurrent,
+ Runtime::kCompileOptimized_Concurrent);
+
+ {
+ // Otherwise, the marker is InOptimizationQueue.
+ if (FLAG_debug_code) {
+ __ CmpSmiLiteral(
+ optimized_code_entry,
+ Smi::FromEnum(OptimizationMarker::kInOptimizationQueue), r0);
+ __ Assert(eq, kExpectedOptimizationSentinel);
+ }
+
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex));
+ __ bge(&fallthrough, Label::kNear);
+ GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+ }
+ }
+
+ {
+ // Optimized code slot is a WeakCell.
+ __ bind(&optimized_code_slot_is_cell);
+
+ __ LoadP(optimized_code_entry,
+ FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(optimized_code_entry, &fallthrough);
+
+ // Check if the optimized code is marked for deopt. If it is, call the
+ // runtime to clear it.
+ Label found_deoptimized_code;
+ __ LoadW(scratch2, FieldMemOperand(optimized_code_entry,
+ Code::kKindSpecificFlags1Offset));
+ __ TestBit(scratch2, Code::kMarkedForDeoptimizationBit, r0);
+ __ bne(&found_deoptimized_code);
+
+ // Optimized code is good, get it into the closure and link the closure into
+ // the optimized functions list, then tail call the optimized code.
+ // The feedback vector is no longer used, so re-use it as a scratch
+ // register.
+ ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, closure,
+ scratch2, scratch3, feedback_vector);
+ __ Jump(optimized_code_entry);
+
+ // Optimized code slot contains deoptimized code, evict it and re-enter the
+ // closure's code.
+ __ bind(&found_deoptimized_code);
+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+ }
+
+ // Fall-through if the optimized code cell is clear and there is no
+ // optimization marker.
+ __ bind(&fallthrough);
+}
+
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
@@ -1055,43 +1124,35 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
+ Register closure = r3;
+ Register feedback_vector = r4;
+
+ // Load the feedback vector from the closure.
+ __ LoadP(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ LoadP(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ // Read off the optimized code slot in the feedback vector, and if there
+ // is optimized code or an optimization marker, call that instead.
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7);
+
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ PushStandardFrame(r3);
-
- // First check if there is optimized code in the feedback vector which we
- // could call instead.
- Label switch_to_optimized_code;
-
- Register optimized_code_entry = r6;
- __ LoadP(r2, FieldMemOperand(r3, JSFunction::kFeedbackVectorOffset));
- __ LoadP(r2, FieldMemOperand(r2, Cell::kValueOffset));
- __ LoadP(
- optimized_code_entry,
- FieldMemOperand(r2, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
- __ LoadP(optimized_code_entry,
- FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
- __ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
+ __ PushStandardFrame(closure);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
- __ LoadP(r2, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
- Label array_done;
- Register debug_info = r4;
- DCHECK(!debug_info.is(r2));
- __ LoadP(debug_info,
- FieldMemOperand(r2, SharedFunctionInfo::kDebugInfoOffset));
+ Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
+ __ LoadP(r2, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Load original bytecode array or the debug copy.
__ LoadP(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r2, SharedFunctionInfo::kFunctionDataOffset));
- __ TestIfSmi(debug_info);
- __ beq(&array_done);
- __ LoadP(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
- __ bind(&array_done);
+ __ LoadP(r6, FieldMemOperand(r2, SharedFunctionInfo::kDebugInfoOffset));
+ __ TestIfSmi(r6);
+ __ bne(&maybe_load_debug_bytecode_array);
+ __ bind(&bytecode_array_loaded);
// Check whether we should continue to use the interpreter.
// TODO(rmcilroy) Remove self healing once liveedit only has to deal with
@@ -1102,15 +1163,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bne(&switch_to_different_code_kind);
// Increment invocation count for the function.
- __ LoadP(r6, FieldMemOperand(r3, JSFunction::kFeedbackVectorOffset));
- __ LoadP(r6, FieldMemOperand(r6, Cell::kValueOffset));
- __ LoadP(r1, FieldMemOperand(
- r6, FeedbackVector::kInvocationCountIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
+ __ LoadP(
+ r1, FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
__ AddSmiLiteral(r1, r1, Smi::FromInt(1), r0);
- __ StoreP(r1, FieldMemOperand(
- r6, FeedbackVector::kInvocationCountIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
+ __ StoreP(
+ r1, FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
@@ -1184,40 +1245,31 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, r4);
__ Ret();
+ // Load debug copy of the bytecode array if it exists.
+ // kInterpreterBytecodeArrayRegister is already loaded with
+ // SharedFunctionInfo::kFunctionDataOffset.
+ Label done;
+ __ bind(&maybe_load_debug_bytecode_array);
+ __ LoadP(ip, FieldMemOperand(r6, DebugInfo::kFlagsOffset));
+ __ SmiUntag(ip);
+ __ tmll(ip, Operand(DebugInfo::kHasBreakInfo));
+ __ beq(&done);
+ __ LoadP(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(r6, DebugInfo::kDebugBytecodeArrayOffset));
+ __ bind(&done);
+ __ b(&bytecode_array_loaded);
+
// If the shared code is no longer this entry trampoline, then the underlying
// function has been switched to a different kind of code and we heal the
// closure by switching the code entry field over to the new code as well.
__ bind(&switch_to_different_code_kind);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
- __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r6, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kCodeOffset));
__ AddP(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ StoreP(r6, FieldMemOperand(r3, JSFunction::kCodeEntryOffset), r0);
- __ RecordWriteCodeEntryField(r3, r6, r7);
+ __ StoreP(r6, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
+ __ RecordWriteCodeEntryField(closure, r6, r7);
__ JumpToJSEntry(r6);
-
- // If there is optimized code on the type feedback vector, check if it is good
- // to run, and if so, self heal the closure and call the optimized code.
- __ bind(&switch_to_optimized_code);
- __ LeaveFrame(StackFrame::JAVA_SCRIPT);
- Label gotta_call_runtime;
-
- // Check if the optimized code is marked for deopt.
- __ LoadlW(r7, FieldMemOperand(optimized_code_entry,
- Code::kKindSpecificFlags1Offset));
- __ And(r0, r7, Operand(1 << Code::kMarkedForDeoptimizationBit));
- __ bne(&gotta_call_runtime);
-
- // Optimized code is good, get it into the closure and link the closure into
- // the optimized functions list, then tail call the optimized code.
- ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, r3, r8, r7,
- r4);
- __ JumpToJSEntry(optimized_code_entry);
-
- // Optimized code is marked for deopt, bailout to the CompileLazy runtime
- // function which will clear the feedback vector's optimized code slot.
- __ bind(&gotta_call_runtime);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@@ -1253,7 +1305,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// static
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
- TailCallMode tail_call_mode, InterpreterPushArgsMode mode) {
+ InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- r2 : the number of arguments (not including the receiver)
// -- r4 : the address of the first argument to be pushed. Subsequent
@@ -1275,18 +1327,21 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Push the arguments.
Generate_InterpreterPushArgs(masm, r5, r4, r5, r6);
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Pop(r4); // Pass the spread in a register
+ __ SubP(r2, r2, Operand(1)); // Subtract one for spread
+ }
// Call the target.
if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
- tail_call_mode),
- RelocInfo::CODE_TARGET);
+ __ Jump(
+ masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny),
+ RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(masm->isolate()->builtins()->CallWithSpread(),
RelocInfo::CODE_TARGET);
} else {
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
RelocInfo::CODE_TARGET);
}
@@ -1322,7 +1377,12 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
Generate_InterpreterPushArgs(masm, r2, r6, r2, r7);
__ bind(&skip);
- __ AssertUndefinedOrAllocationSite(r4, r7);
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Pop(r4); // Pass the spread in a register
+ __ SubP(r2, r2, Operand(1)); // Subtract one for spread
+ } else {
+ __ AssertUndefinedOrAllocationSite(r4, r7);
+ }
if (mode == InterpreterPushArgsMode::kJSFunction) {
__ AssertFunction(r3);
@@ -1451,6 +1511,34 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
+void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : argument count (preserved for callee)
+ // -- r6 : new target (preserved for callee)
+ // -- r4 : target function (preserved for callee)
+ // -----------------------------------
+ Register closure = r3;
+
+ // Get the feedback vector.
+ Register feedback_vector = r4;
+ __ LoadP(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ LoadP(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ // The feedback vector must be defined.
+ if (FLAG_debug_code) {
+ __ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
+ __ Assert(ne, BailoutReason::kExpectedFeedbackVector);
+ }
+
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7);
+
+ // Otherwise, tail call the SFI code.
+ GenerateTailCallToSharedCode(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : argument count (preserved for callee)
@@ -1459,43 +1547,25 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
- Label try_shared;
Register closure = r3;
- Register index = r4;
+ Register feedback_vector = r4;
// Do we have a valid feedback vector?
- __ LoadP(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
- __ LoadP(index, FieldMemOperand(index, Cell::kValueOffset));
- __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
+ __ LoadP(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ LoadP(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
+ &gotta_call_runtime);
- // Is optimized code available in the feedback vector?
- Register entry = r6;
- __ LoadP(entry, FieldMemOperand(index, FeedbackVector::kOptimizedCodeIndex *
- kPointerSize +
- FeedbackVector::kHeaderSize));
- __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &try_shared);
-
- // Found code, check if it is marked for deopt, if so call into runtime to
- // clear the optimized code slot.
- __ LoadlW(r7, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset));
- __ And(r0, r7, Operand(1 << Code::kMarkedForDeoptimizationBit));
- __ bne(&gotta_call_runtime);
-
- // Code is good, get it into the closure and tail call.
- ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, r8, r7, r4);
- __ JumpToJSEntry(entry);
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7);
// We found no optimized code.
- __ bind(&try_shared);
+ Register entry = r6;
__ LoadP(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- // Is the shared function marked for tier up?
- __ LoadlB(r7, FieldMemOperand(
- entry, SharedFunctionInfo::kMarkedForTierUpByteOffset));
- __ TestBit(r7, SharedFunctionInfo::kMarkedForTierUpBitWithinByte, r0);
- __ bne(&gotta_call_runtime);
// If SFI points to anything other than CompileLazy, install that.
__ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
@@ -1513,15 +1583,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
-void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm,
- Runtime::kCompileOptimized_NotConcurrent);
-}
-
-void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
-}
-
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : argument count (preserved for callee)
@@ -1668,30 +1729,70 @@ void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
-static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
- SaveFPRegsMode save_doubles) {
+void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
{
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve registers across notification, this is important for compiled
- // stubs that tail call the runtime on deopts passing their parameters in
- // registers.
- __ MultiPush(kJSCallerSaved | kCalleeSaved);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ // Preserve possible return result from lazy deopt.
+ __ push(r2);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
- __ MultiPop(kJSCallerSaved | kCalleeSaved);
+ __ CallRuntime(Runtime::kNotifyStubFailure, false);
+ __ pop(r2);
+ }
+
+ __ AddP(sp, sp, Operand(kPointerSize)); // Ignore state
+ __ Ret(); // Jump to ContinueToBuiltin stub
+}
+
+namespace {
+void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
+ bool java_script_builtin,
+ bool with_result) {
+ const RegisterConfiguration* config(RegisterConfiguration::Turbofan());
+ int allocatable_register_count = config->num_allocatable_general_registers();
+ if (with_result) {
+ // Overwrite the hole inserted by the deoptimizer with the return value from
+ // the LAZY deopt point.
+ __ StoreP(
+ r2, MemOperand(
+ sp, config->num_allocatable_general_registers() * kPointerSize +
+ BuiltinContinuationFrameConstants::kFixedFrameSize));
+ }
+ for (int i = allocatable_register_count - 1; i >= 0; --i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ __ Pop(Register::from_code(code));
+ if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
+ __ SmiUntag(Register::from_code(code));
+ }
}
+ __ LoadP(
+ fp,
+ MemOperand(sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ __ Pop(ip);
+ __ AddP(sp, sp,
+ Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ __ Pop(r0);
+ __ LoadRR(r14, r0);
+ __ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(ip);
+}
+} // namespace
- __ la(sp, MemOperand(sp, kPointerSize)); // Ignore state
- __ Ret(); // Jump to miss handler
+void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, false);
}
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, true);
+}
+
+void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, false);
}
-void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, true);
}
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
@@ -1811,52 +1912,47 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// -- sp[8] : receiver
// -----------------------------------
- // 1. Load receiver into r3, argArray into r2 (if present), remove all
+ // 1. Load receiver into r3, argArray into r4 (if present), remove all
// arguments from the stack (including the receiver), and push thisArg (if
// present) instead.
{
Label skip;
- Register arg_size = r4;
+ Register arg_size = r7;
Register new_sp = r5;
Register scratch = r6;
__ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2));
__ AddP(new_sp, sp, arg_size);
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ LoadRR(scratch, r2);
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ __ LoadRR(r4, scratch);
__ LoadP(r3, MemOperand(new_sp, 0)); // receiver
__ CmpP(arg_size, Operand(kPointerSize));
__ blt(&skip);
__ LoadP(scratch, MemOperand(new_sp, 1 * -kPointerSize)); // thisArg
__ beq(&skip);
- __ LoadP(r2, MemOperand(new_sp, 2 * -kPointerSize)); // argArray
+ __ LoadP(r4, MemOperand(new_sp, 2 * -kPointerSize)); // argArray
__ bind(&skip);
__ LoadRR(sp, new_sp);
__ StoreP(scratch, MemOperand(sp, 0));
}
// ----------- S t a t e -------------
- // -- r2 : argArray
+ // -- r4 : argArray
// -- r3 : receiver
// -- sp[0] : thisArg
// -----------------------------------
- // 2. Make sure the receiver is actually callable.
- Label receiver_not_callable;
- __ JumpIfSmi(r3, &receiver_not_callable);
- __ LoadP(r6, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
- __ TestBit(r6, Map::kIsCallable);
- __ beq(&receiver_not_callable);
+ // 2. We don't need to check explicitly for callable receiver here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
// 3. Tail call with no arguments if argArray is null or undefined.
Label no_arguments;
- __ JumpIfRoot(r2, Heap::kNullValueRootIndex, &no_arguments);
- __ JumpIfRoot(r2, Heap::kUndefinedValueRootIndex, &no_arguments);
+ __ JumpIfRoot(r4, Heap::kNullValueRootIndex, &no_arguments);
+ __ JumpIfRoot(r4, Heap::kUndefinedValueRootIndex, &no_arguments);
- // 4a. Apply the receiver to the given argArray (passing undefined for
- // new.target).
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 4a. Apply the receiver to the given argArray.
+ __ Jump(masm->isolate()->builtins()->CallWithArrayLike(),
+ RelocInfo::CODE_TARGET);
// 4b. The argArray is either null or undefined, so we tail call without any
// arguments to the receiver.
@@ -1865,13 +1961,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ LoadImmP(r2, Operand::Zero());
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
-
- // 4c. The receiver is not callable, throw an appropriate TypeError.
- __ bind(&receiver_not_callable);
- {
- __ StoreP(r3, MemOperand(sp, 0));
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
}
// static
@@ -1927,19 +2016,19 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// -- sp[12] : receiver
// -----------------------------------
- // 1. Load target into r3 (if present), argumentsList into r2 (if present),
+ // 1. Load target into r3 (if present), argumentsList into r4 (if present),
// remove all arguments from the stack (including the receiver), and push
// thisArgument (if present) instead.
{
Label skip;
- Register arg_size = r4;
+ Register arg_size = r7;
Register new_sp = r5;
Register scratch = r6;
__ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2));
__ AddP(new_sp, sp, arg_size);
__ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
__ LoadRR(scratch, r3);
- __ LoadRR(r2, r3);
+ __ LoadRR(r4, r3);
__ CmpP(arg_size, Operand(kPointerSize));
__ blt(&skip);
__ LoadP(r3, MemOperand(new_sp, 1 * -kPointerSize)); // target
@@ -1947,37 +2036,25 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadP(scratch, MemOperand(new_sp, 2 * -kPointerSize)); // thisArgument
__ CmpP(arg_size, Operand(2 * kPointerSize));
__ beq(&skip);
- __ LoadP(r2, MemOperand(new_sp, 3 * -kPointerSize)); // argumentsList
+ __ LoadP(r4, MemOperand(new_sp, 3 * -kPointerSize)); // argumentsList
__ bind(&skip);
__ LoadRR(sp, new_sp);
__ StoreP(scratch, MemOperand(sp, 0));
}
// ----------- S t a t e -------------
- // -- r2 : argumentsList
+ // -- r4 : argumentsList
// -- r3 : target
// -- sp[0] : thisArgument
// -----------------------------------
- // 2. Make sure the target is actually callable.
- Label target_not_callable;
- __ JumpIfSmi(r3, &target_not_callable);
- __ LoadP(r6, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
- __ TestBit(r6, Map::kIsCallable);
- __ beq(&target_not_callable);
-
- // 3a. Apply the target to the given argumentsList (passing undefined for
- // new.target).
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 2. We don't need to check explicitly for callable target here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
- // 3b. The target is not callable, throw an appropriate TypeError.
- __ bind(&target_not_callable);
- {
- __ StoreP(r3, MemOperand(sp, 0));
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
+ // 3 Apply the target to the given argumentsList.
+ __ Jump(masm->isolate()->builtins()->CallWithArrayLike(),
+ RelocInfo::CODE_TARGET);
}
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
@@ -1989,18 +2066,18 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// -- sp[12] : receiver
// -----------------------------------
- // 1. Load target into r3 (if present), argumentsList into r2 (if present),
+ // 1. Load target into r3 (if present), argumentsList into r4 (if present),
// new.target into r5 (if present, otherwise use target), remove all
// arguments from the stack (including the receiver), and push thisArgument
// (if present) instead.
{
Label skip;
- Register arg_size = r4;
+ Register arg_size = r7;
Register new_sp = r6;
__ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2));
__ AddP(new_sp, sp, arg_size);
__ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
- __ LoadRR(r2, r3);
+ __ LoadRR(r4, r3);
__ LoadRR(r5, r3);
__ StoreP(r3, MemOperand(new_sp, 0)); // receiver (undefined)
__ CmpP(arg_size, Operand(kPointerSize));
@@ -2008,7 +2085,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ LoadP(r3, MemOperand(new_sp, 1 * -kPointerSize)); // target
__ LoadRR(r5, r3); // new.target defaults to target
__ beq(&skip);
- __ LoadP(r2, MemOperand(new_sp, 2 * -kPointerSize)); // argumentsList
+ __ LoadP(r4, MemOperand(new_sp, 2 * -kPointerSize)); // argumentsList
__ CmpP(arg_size, Operand(2 * kPointerSize));
__ beq(&skip);
__ LoadP(r5, MemOperand(new_sp, 3 * -kPointerSize)); // new.target
@@ -2017,44 +2094,23 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
// ----------- S t a t e -------------
- // -- r2 : argumentsList
+ // -- r4 : argumentsList
// -- r5 : new.target
// -- r3 : target
// -- sp[0] : receiver (undefined)
// -----------------------------------
- // 2. Make sure the target is actually a constructor.
- Label target_not_constructor;
- __ JumpIfSmi(r3, &target_not_constructor);
- __ LoadP(r6, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
- __ TestBit(r6, Map::kIsConstructor);
- __ beq(&target_not_constructor);
-
- // 3. Make sure the target is actually a constructor.
- Label new_target_not_constructor;
- __ JumpIfSmi(r5, &new_target_not_constructor);
- __ LoadP(r6, FieldMemOperand(r5, HeapObject::kMapOffset));
- __ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
- __ TestBit(r6, Map::kIsConstructor);
- __ beq(&new_target_not_constructor);
+ // 2. We don't need to check explicitly for constructor target here,
+ // since that's the first thing the Construct/ConstructWithArrayLike
+ // builtins will do.
- // 4a. Construct the target with the given new.target and argumentsList.
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
-
- // 4b. The target is not a constructor, throw an appropriate TypeError.
- __ bind(&target_not_constructor);
- {
- __ StoreP(r3, MemOperand(sp, 0));
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
+ // 3. We don't need to check explicitly for constructor new.target here,
+ // since that's the second thing the Construct/ConstructWithArrayLike
+ // builtins will do.
- // 4c. The new.target is not a constructor, throw an appropriate TypeError.
- __ bind(&new_target_not_constructor);
- {
- __ StoreP(r5, MemOperand(sp, 0));
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
+ // 4. Construct the target with the given new.target and argumentsList.
+ __ Jump(masm->isolate()->builtins()->ConstructWithArrayLike(),
+ RelocInfo::CODE_TARGET);
}
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
@@ -2095,99 +2151,17 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_Apply(MacroAssembler* masm) {
+void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
- // -- r2 : argumentsList
- // -- r3 : target
- // -- r5 : new.target (checked to be constructor or undefined)
- // -- sp[0] : thisArgument
+ // -- r3 : target
+ // -- r2 : number of parameters on the stack (not including the receiver)
+ // -- r4 : arguments list (a FixedArray)
+ // -- r6 : len (number of elements to push from args)
+ // -- r5 : new.target (for [[Construct]])
// -----------------------------------
- // Create the list of arguments from the array-like argumentsList.
- {
- Label create_arguments, create_array, create_holey_array, create_runtime,
- done_create;
- __ JumpIfSmi(r2, &create_runtime);
-
- // Load the map of argumentsList into r4.
- __ LoadP(r4, FieldMemOperand(r2, HeapObject::kMapOffset));
-
- // Load native context into r6.
- __ LoadP(r6, NativeContextMemOperand());
-
- // Check if argumentsList is an (unmodified) arguments object.
- __ LoadP(ip, ContextMemOperand(r6, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
- __ CmpP(ip, r4);
- __ beq(&create_arguments);
- __ LoadP(ip, ContextMemOperand(r6, Context::STRICT_ARGUMENTS_MAP_INDEX));
- __ CmpP(ip, r4);
- __ beq(&create_arguments);
-
- // Check if argumentsList is a fast JSArray.
- __ CompareInstanceType(r4, ip, JS_ARRAY_TYPE);
- __ beq(&create_array);
-
- // Ask the runtime to create the list (actually a FixedArray).
- __ bind(&create_runtime);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r3, r5, r2);
- __ CallRuntime(Runtime::kCreateListFromArrayLike);
- __ Pop(r3, r5);
- __ LoadP(r4, FieldMemOperand(r2, FixedArray::kLengthOffset));
- __ SmiUntag(r4);
- }
- __ b(&done_create);
-
- // Try to create the list from an arguments object.
- __ bind(&create_arguments);
- __ LoadP(r4, FieldMemOperand(r2, JSArgumentsObject::kLengthOffset));
- __ LoadP(r6, FieldMemOperand(r2, JSObject::kElementsOffset));
- __ LoadP(ip, FieldMemOperand(r6, FixedArray::kLengthOffset));
- __ CmpP(r4, ip);
- __ bne(&create_runtime);
- __ SmiUntag(r4);
- __ LoadRR(r2, r6);
- __ b(&done_create);
-
- // For holey JSArrays we need to check that the array prototype chain
- // protector is intact and our prototype is the Array.prototype actually.
- __ bind(&create_holey_array);
- __ LoadP(r4, FieldMemOperand(r4, Map::kPrototypeOffset));
- __ LoadP(r6, ContextMemOperand(r6, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ CmpP(r4, r6);
- __ bne(&create_runtime);
- __ LoadRoot(r6, Heap::kArrayProtectorRootIndex);
- __ LoadP(r4, FieldMemOperand(r6, PropertyCell::kValueOffset));
- __ CmpSmiLiteral(r4, Smi::FromInt(Isolate::kProtectorValid), r0);
- __ bne(&create_runtime);
- __ LoadP(r4, FieldMemOperand(r2, JSArray::kLengthOffset));
- __ LoadP(r2, FieldMemOperand(r2, JSArray::kElementsOffset));
- __ SmiUntag(r4);
- __ b(&done_create);
-
- // Try to create the list from a JSArray object.
- // -- r4 and r6 must be preserved till bne create_holey_array.
- __ bind(&create_array);
- __ LoadlB(r7, FieldMemOperand(r4, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(r7);
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- __ CmpP(r7, Operand(FAST_HOLEY_ELEMENTS));
- __ bgt(&create_runtime);
- // Only FAST_XXX after this point, FAST_HOLEY_XXX are odd values.
- __ TestBit(r7, Map::kHasNonInstancePrototype, r0);
- __ bne(&create_holey_array);
- // FAST_SMI_ELEMENTS or FAST_ELEMENTS after this point.
- __ LoadP(r4, FieldMemOperand(r2, JSArray::kLengthOffset));
- __ LoadP(r2, FieldMemOperand(r2, JSArray::kElementsOffset));
- __ SmiUntag(r4);
-
- __ bind(&done_create);
- }
-
+ __ AssertFixedArray(r4);
// Check for stack overflow.
{
// Check the stack for overflow. We are not trying to catch interruptions
@@ -2198,54 +2172,41 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// here which will cause ip to become negative.
__ SubP(ip, sp, ip);
// Check if the arguments will overflow the stack.
- __ ShiftLeftP(r0, r4, Operand(kPointerSizeLog2));
+ __ ShiftLeftP(r0, r6, Operand(kPointerSizeLog2));
__ CmpP(ip, r0); // Signed comparison.
__ bgt(&done);
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ bind(&done);
}
- // ----------- S t a t e -------------
- // -- r3 : target
- // -- r2 : args (a FixedArray built from argumentsList)
- // -- r4 : len (number of elements to push from args)
- // -- r5 : new.target (checked to be constructor or undefined)
- // -- sp[0] : thisArgument
- // -----------------------------------
-
// Push arguments onto the stack (thisArgument is already on the stack).
{
- __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
Label loop, no_args, skip;
- __ CmpP(r4, Operand::Zero());
+ __ CmpP(r6, Operand::Zero());
__ beq(&no_args);
- __ AddP(r2, r2,
+ __ AddP(r4, r4,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
- __ LoadRR(r1, r4);
+ __ LoadRR(r1, r6);
__ bind(&loop);
- __ LoadP(ip, MemOperand(r2, kPointerSize));
- __ la(r2, MemOperand(r2, kPointerSize));
+ __ LoadP(ip, MemOperand(r4, kPointerSize));
+ __ la(r4, MemOperand(r4, kPointerSize));
__ CompareRoot(ip, Heap::kTheHoleValueRootIndex);
__ bne(&skip, Label::kNear);
- __ LoadRR(ip, r8);
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ bind(&skip);
__ push(ip);
__ BranchOnCount(r1, &loop);
__ bind(&no_args);
- __ LoadRR(r2, r4);
+ __ AddP(r2, r2, r6);
}
- // Dispatch to Call or Construct depending on whether new.target is undefined.
- {
- __ CompareRoot(r5, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET, eq);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
- }
+ // Tail-call to the actual Call or Construct builtin.
+ __ Jump(code, RelocInfo::CODE_TARGET);
}
// static
-void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
- Handle<Code> code) {
+void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
// -- r2 : the number of arguments (not including the receiver)
// -- r5 : the new.target (for [[Construct]] calls)
@@ -2271,16 +2232,11 @@ void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
{
// Load the length from the ArgumentsAdaptorFrame.
__ LoadP(r7, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset));
-#if V8_TARGET_ARCH_S390X
__ SmiUntag(r7);
-#endif
}
__ bind(&arguments_done);
Label stack_done, stack_overflow;
-#if !V8_TARGET_ARCH_S390X
- __ SmiUntag(r7);
-#endif
__ SubP(r7, r7, r4);
__ CmpP(r7, Operand::Zero());
__ ble(&stack_done);
@@ -2313,106 +2269,9 @@ void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
__ Jump(code, RelocInfo::CODE_TARGET);
}
-namespace {
-
-// Drops top JavaScript frame and an arguments adaptor frame below it (if
-// present) preserving all the arguments prepared for current call.
-// Does nothing if debugger is currently active.
-// ES6 14.6.3. PrepareForTailCall
-//
-// Stack structure for the function g() tail calling f():
-//
-// ------- Caller frame: -------
-// | ...
-// | g()'s arg M
-// | ...
-// | g()'s arg 1
-// | g()'s receiver arg
-// | g()'s caller pc
-// ------- g()'s frame: -------
-// | g()'s caller fp <- fp
-// | g()'s context
-// | function pointer: g
-// | -------------------------
-// | ...
-// | ...
-// | f()'s arg N
-// | ...
-// | f()'s arg 1
-// | f()'s receiver arg <- sp (f()'s caller pc is not on the stack yet!)
-// ----------------------
-//
-void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
- Register scratch1, Register scratch2,
- Register scratch3) {
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Comment cmnt(masm, "[ PrepareForTailCall");
-
- // Prepare for tail call only if ES2015 tail call elimination is active.
- Label done;
- ExternalReference is_tail_call_elimination_enabled =
- ExternalReference::is_tail_call_elimination_enabled_address(
- masm->isolate());
- __ mov(scratch1, Operand(is_tail_call_elimination_enabled));
- __ LoadlB(scratch1, MemOperand(scratch1));
- __ CmpP(scratch1, Operand::Zero());
- __ beq(&done);
-
- // Drop possible interpreter handler/stub frame.
- {
- Label no_interpreter_frame;
- __ LoadP(scratch3,
- MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ CmpP(scratch3, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
- __ bne(&no_interpreter_frame);
- __ LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ bind(&no_interpreter_frame);
- }
-
- // Check if next frame is an arguments adaptor frame.
- Register caller_args_count_reg = scratch1;
- Label no_arguments_adaptor, formal_parameter_count_loaded;
- __ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(
- scratch3,
- MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ CmpP(scratch3,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ bne(&no_arguments_adaptor);
-
- // Drop current frame and load arguments count from arguments adaptor frame.
- __ LoadRR(fp, scratch2);
- __ LoadP(caller_args_count_reg,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
- __ b(&formal_parameter_count_loaded);
-
- __ bind(&no_arguments_adaptor);
- // Load caller's formal parameter count
- __ LoadP(scratch1,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
- __ LoadP(scratch1,
- FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
- __ LoadW(caller_args_count_reg,
- FieldMemOperand(scratch1,
- SharedFunctionInfo::kFormalParameterCountOffset));
-#if !V8_TARGET_ARCH_S390X
- __ SmiUntag(caller_args_count_reg);
-#endif
-
- __ bind(&formal_parameter_count_loaded);
-
- ParameterCount callee_args_count(args_reg);
- __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
- scratch3);
- __ bind(&done);
-}
-} // namespace
-
// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
- ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
+ ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- r2 : the number of arguments (not including the receiver)
// -- r3 : the function to call (checked to be a JSFunction)
@@ -2424,9 +2283,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadlW(r5, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBitMask(r5, FunctionKind::kClassConstructor
- << SharedFunctionInfo::kFunctionKindShift,
- r0);
+ __ TestBitMask(r5, SharedFunctionInfo::kClassConstructorMask, r0);
__ bne(&class_constructor);
// Enter the context of the function; ToObject has to run in the function
@@ -2435,8 +2292,9 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
- __ AndP(r0, r5, Operand((1 << SharedFunctionInfo::kStrictModeBit) |
- (1 << SharedFunctionInfo::kNativeBit)));
+ __ AndP(r0, r5,
+ Operand(SharedFunctionInfo::IsStrictBit::kMask |
+ SharedFunctionInfo::IsNativeBit::kMask));
__ bne(&done_convert);
{
// ----------- S t a t e -------------
@@ -2501,15 +2359,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- cp : the function context.
// -----------------------------------
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, r2, r5, r6, r7);
- }
-
__ LoadW(
r4, FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset));
-#if !V8_TARGET_ARCH_S390X
- __ SmiUntag(r4);
-#endif
ParameterCount actual(r2);
ParameterCount expected(r4);
__ InvokeFunctionCode(r3, no_reg, expected, actual, JUMP_FUNCTION,
@@ -2609,18 +2460,13 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
} // namespace
// static
-void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
- TailCallMode tail_call_mode) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : the number of arguments (not including the receiver)
// -- r3 : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(r3);
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, r2, r5, r6, r7);
- }
-
// Patch the receiver to [[BoundThis]].
__ LoadP(ip, FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset));
__ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2));
@@ -2640,8 +2486,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
}
// static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- r2 : the number of arguments (not including the receiver)
// -- r3 : the target to call (can be any Object).
@@ -2651,10 +2496,10 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
__ JumpIfSmi(r3, &non_callable);
__ bind(&non_smi);
__ CompareObjectType(r3, r6, r7, JS_FUNCTION_TYPE);
- __ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET, eq);
__ CmpP(r7, Operand(JS_BOUND_FUNCTION_TYPE));
- __ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
RelocInfo::CODE_TARGET, eq);
// Check if target has a [[Call]] internal method.
@@ -2662,22 +2507,14 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
__ TestBit(r6, Map::kIsCallable);
__ beq(&non_callable);
+ // Check if target is a proxy and call CallProxy external builtin
__ CmpP(r7, Operand(JS_PROXY_TYPE));
__ bne(&non_function);
- // 0. Prepare for tail call if necessary.
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, r2, r5, r6, r7);
- }
-
- // 1. Runtime fallback for Proxy [[Call]].
- __ Push(r3);
- // Increase the arguments size to include the pushed function and the
- // existing receiver on the stack.
- __ AddP(r2, r2, Operand(2));
- // Tail-call to the runtime.
- __ JumpToExternalReference(
- ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
+ __ mov(r7, Operand(ExternalReference(Builtins::kCallProxy, masm->isolate())));
+ __ LoadP(r7, MemOperand(r7));
+ __ AddP(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(r7);
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
@@ -2688,7 +2525,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r3);
__ Jump(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
+ ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
@@ -2700,156 +2537,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
-static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
- Register argc = r2;
- Register constructor = r3;
- Register new_target = r5;
-
- Register scratch = r4;
- Register scratch2 = r8;
-
- Register spread = r6;
- Register spread_map = r7;
- Register spread_len = r7;
- Label runtime_call, push_args;
- __ LoadP(spread, MemOperand(sp, 0));
- __ JumpIfSmi(spread, &runtime_call);
- __ LoadP(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
-
- // Check that the spread is an array.
- __ CompareInstanceType(spread_map, scratch, JS_ARRAY_TYPE);
- __ bne(&runtime_call);
-
- // Check that we have the original ArrayPrototype.
- __ LoadP(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
- __ LoadP(scratch2, NativeContextMemOperand());
- __ LoadP(scratch2,
- ContextMemOperand(scratch2, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ CmpP(scratch, scratch2);
- __ bne(&runtime_call);
-
- // Check that the ArrayPrototype hasn't been modified in a way that would
- // affect iteration.
- __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
- __ LoadP(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
- __ CmpSmiLiteral(scratch, Smi::FromInt(Isolate::kProtectorValid), r0);
- __ bne(&runtime_call);
-
- // Check that the map of the initial array iterator hasn't changed.
- __ LoadP(scratch2, NativeContextMemOperand());
- __ LoadP(scratch,
- ContextMemOperand(scratch2,
- Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
- __ LoadP(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
- __ LoadP(scratch2,
- ContextMemOperand(
- scratch2, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
- __ CmpP(scratch, scratch2);
- __ bne(&runtime_call);
-
- // For FastPacked kinds, iteration will have the same effect as simply
- // accessing each property in order.
- Label no_protector_check;
- __ LoadlB(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(scratch);
- __ CmpP(scratch, Operand(FAST_HOLEY_ELEMENTS));
- __ bgt(&runtime_call);
- // For non-FastHoley kinds, we can skip the protector check.
- __ CmpP(scratch, Operand(FAST_SMI_ELEMENTS));
- __ beq(&no_protector_check);
- __ CmpP(scratch, Operand(FAST_ELEMENTS));
- __ beq(&no_protector_check);
- // Check the ArrayProtector cell.
- __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
- __ LoadP(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
- __ CmpSmiLiteral(scratch, Smi::FromInt(Isolate::kProtectorValid), r0);
- __ bne(&runtime_call);
-
- __ bind(&no_protector_check);
- // Load the FixedArray backing store, but use the length from the array.
- __ LoadP(spread_len, FieldMemOperand(spread, JSArray::kLengthOffset));
- __ SmiUntag(spread_len);
- __ LoadP(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
- __ b(&push_args);
-
- __ bind(&runtime_call);
- {
- // Call the builtin for the result of the spread.
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(argc);
- __ Push(constructor, new_target, argc, spread);
- __ CallRuntime(Runtime::kSpreadIterableFixed);
- __ LoadRR(spread, r2);
- __ Pop(constructor, new_target, argc);
- __ SmiUntag(argc);
- }
-
- {
- // Calculate the new nargs including the result of the spread.
- __ LoadP(spread_len, FieldMemOperand(spread, FixedArray::kLengthOffset));
- __ SmiUntag(spread_len);
-
- __ bind(&push_args);
- // argc += spread_len - 1. Subtract 1 for the spread itself.
- __ AddP(argc, argc, spread_len);
- __ SubP(argc, argc, Operand(1));
-
- // Pop the spread argument off the stack.
- __ Pop(scratch);
- }
-
- // Check for stack overflow.
- {
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack limit".
- Label done;
- __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
- // Make scratch the space we have left. The stack might already be
- // overflowed here which will cause scratch to become negative.
- __ SubP(scratch, sp, scratch);
- // Check if the arguments will overflow the stack.
- __ ShiftLeftP(r0, spread_len, Operand(kPointerSizeLog2));
- __ CmpP(scratch, r0);
- __ bgt(&done); // Signed comparison.
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&done);
- }
-
- // Put the evaluated spread onto the stack as additional arguments.
- {
- __ LoadImmP(scratch, Operand::Zero());
- Label done, push, loop;
- __ bind(&loop);
- __ CmpP(scratch, spread_len);
- __ beq(&done);
- __ ShiftLeftP(r0, scratch, Operand(kPointerSizeLog2));
- __ AddP(scratch2, spread, r0);
- __ LoadP(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
- __ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
- __ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
- __ bind(&push);
- __ Push(scratch2);
- __ AddP(scratch, scratch, Operand(1));
- __ b(&loop);
- __ bind(&done);
- }
-}
-
-// static
-void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : the number of arguments (not including the receiver)
- // -- r3 : the constructor to call (can be any Object)
- // -----------------------------------
-
- // CheckSpreadAndPushToStack will push r5 to save it.
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- TailCallMode::kDisallow),
- RelocInfo::CODE_TARGET);
-}
-
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2970,18 +2657,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : the number of arguments (not including the receiver)
- // -- r3 : the constructor to call (can be any Object)
- // -- r5 : the new target (either the same as the constructor or
- // the JSFunction on which new was invoked initially)
- // -----------------------------------
-
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
-}
-
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc
index ca88e6332b..a191bcadf5 100644
--- a/deps/v8/src/builtins/setup-builtins-internal.cc
+++ b/deps/v8/src/builtins/setup-builtins-internal.cc
@@ -41,13 +41,16 @@ Code* BuildWithMacroAssembler(Isolate* isolate,
MacroAssemblerGenerator generator,
Code::Flags flags, const char* s_name) {
HandleScope scope(isolate);
+ // Canonicalize handles, so that we can share constant pool entries pointing
+ // to code targets without dereferencing their handles.
+ CanonicalHandleScope canonical(isolate);
const size_t buffer_size = 32 * KB;
byte buffer[buffer_size]; // NOLINT(runtime/arrays)
MacroAssembler masm(isolate, buffer, buffer_size, CodeObjectRequired::kYes);
DCHECK(!masm.has_frame());
generator(&masm);
CodeDesc desc;
- masm.GetCode(&desc);
+ masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, flags, masm.CodeObject());
PostBuildProfileAndTracing(isolate, *code, s_name);
@@ -58,13 +61,16 @@ Code* BuildAdaptor(Isolate* isolate, Address builtin_address,
Builtins::ExitFrameType exit_frame_type, Code::Flags flags,
const char* name) {
HandleScope scope(isolate);
+ // Canonicalize handles, so that we can share constant pool entries pointing
+ // to code targets without dereferencing their handles.
+ CanonicalHandleScope canonical(isolate);
const size_t buffer_size = 32 * KB;
byte buffer[buffer_size]; // NOLINT(runtime/arrays)
MacroAssembler masm(isolate, buffer, buffer_size, CodeObjectRequired::kYes);
DCHECK(!masm.has_frame());
Builtins::Generate_Adaptor(&masm, builtin_address, exit_frame_type);
CodeDesc desc;
- masm.GetCode(&desc);
+ masm.GetCode(isolate, &desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, flags, masm.CodeObject());
PostBuildProfileAndTracing(isolate, *code, name);
@@ -76,6 +82,9 @@ Code* BuildWithCodeStubAssemblerJS(Isolate* isolate,
CodeAssemblerGenerator generator, int argc,
Code::Flags flags, const char* name) {
HandleScope scope(isolate);
+ // Canonicalize handles, so that we can share constant pool entries pointing
+ // to code targets without dereferencing their handles.
+ CanonicalHandleScope canonical(isolate);
Zone zone(isolate->allocator(), ZONE_NAME);
const int argc_with_recv =
(argc == SharedFunctionInfo::kDontAdaptArgumentsSentinel) ? 0 : argc + 1;
@@ -94,6 +103,9 @@ Code* BuildWithCodeStubAssemblerCS(Isolate* isolate,
Code::Flags flags, const char* name,
int result_size) {
HandleScope scope(isolate);
+ // Canonicalize handles, so that we can share constant pool entries pointing
+ // to code targets without dereferencing their handles.
+ CanonicalHandleScope canonical(isolate);
Zone zone(isolate->allocator(), ZONE_NAME);
// The interface descriptor with given key must be initialized at this point
// and this construction just queries the details from the descriptors table.
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index d4fb131afc..bedfcfc59c 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -98,22 +98,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ jmp(rbx);
}
-void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
- // Checking whether the queued function is ready for install is optional,
- // since we come across interrupts and stack checks elsewhere. However,
- // not checking may delay installing ready functions, and always checking
- // would be quite expensive. A good compromise is to first check against
- // stack limit as a cue for an interrupt signal.
- Label ok;
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &ok);
-
- GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
-
- __ bind(&ok);
- GenerateTailCallToSharedCode(masm);
-}
-
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
@@ -215,13 +199,13 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -----------------------------------
__ movp(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ testb(FieldOperand(rbx, SharedFunctionInfo::kFunctionKindByteOffset),
- Immediate(SharedFunctionInfo::kDerivedConstructorBitsWithinByte));
+ __ testl(FieldOperand(rbx, SharedFunctionInfo::kCompilerHintsOffset),
+ Immediate(SharedFunctionInfo::kDerivedConstructorMask));
__ j(not_zero, &not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ jmp(&post_instantiation_deopt_entry, Label::kNear);
@@ -328,16 +312,20 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rcx);
__ j(above_equal, &leave_frame, Label::kNear);
- __ bind(&other_result);
// The result is now neither undefined nor an object.
+ __ bind(&other_result);
+ __ movp(rbx, Operand(rbp, ConstructFrameConstants::kConstructorOffset));
+ __ movp(rbx, FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset));
+ __ testl(FieldOperand(rbx, SharedFunctionInfo::kCompilerHintsOffset),
+ Immediate(SharedFunctionInfo::kClassConstructorMask));
+
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
- __ movp(rbx, Operand(rbp, ConstructFrameConstants::kConstructorOffset));
- __ movp(rbx, FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset));
- __ testb(FieldOperand(rbx, SharedFunctionInfo::kFunctionKindByteOffset),
- Immediate(SharedFunctionInfo::kClassConstructorBitsWithinByte));
__ j(Condition::zero, &use_receiver, Label::kNear);
} else {
+ __ j(not_zero, &use_receiver, Label::kNear);
+ __ CallRuntime(
+ Runtime::kIncrementUseCounterConstructorReturnNonUndefinedPrimitive);
__ jmp(&use_receiver, Label::kNear);
}
@@ -452,7 +440,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
// Setup the context (we need to use the caller context from the isolate).
- ExternalReference context_address(Isolate::kContextAddress,
+ ExternalReference context_address(IsolateAddressId::kContextAddress,
masm->isolate());
__ movp(rsi, masm->ExternalOperand(context_address));
@@ -489,7 +477,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
// Setup the context (we need to use the caller context from the isolate).
- ExternalReference context_address(Isolate::kContextAddress,
+ ExternalReference context_address(IsolateAddressId::kContextAddress,
masm->isolate());
__ movp(rsi, masm->ExternalOperand(context_address));
@@ -563,34 +551,14 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- rax : the value to pass to the generator
// -- rbx : the JSGeneratorObject to resume
// -- rdx : the resume mode (tagged)
- // -- rcx : the SuspendFlags of the earlier suspend call (tagged)
// -- rsp[0] : return address
// -----------------------------------
- // Untag rcx
- __ shrq(rcx, Immediate(kSmiTagSize + kSmiShiftSize));
- __ AssertGeneratorObject(rbx, rcx);
+ __ AssertGeneratorObject(rbx);
// Store input value into generator object.
- Label async_await, done_store_input;
-
- __ andb(rcx, Immediate(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
- __ cmpb(rcx, Immediate(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
- __ j(equal, &async_await);
-
__ movp(FieldOperand(rbx, JSGeneratorObject::kInputOrDebugPosOffset), rax);
__ RecordWriteField(rbx, JSGeneratorObject::kInputOrDebugPosOffset, rax, rcx,
kDontSaveFPRegs);
- __ j(always, &done_store_input, Label::kNear);
-
- __ bind(&async_await);
- __ movp(
- FieldOperand(rbx, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset),
- rax);
- __ RecordWriteField(rbx, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset,
- rax, rcx, kDontSaveFPRegs);
-
- __ bind(&done_store_input);
- // `rcx` no longer holds SuspendFlags
// Store resume mode into generator object.
__ movp(FieldOperand(rbx, JSGeneratorObject::kResumeModeOffset), rdx);
@@ -637,8 +605,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// values have already been copied into the context and these dummy values
// will never be used.
__ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ LoadSharedFunctionInfoSpecialField(
- rcx, rcx, SharedFunctionInfo::kFormalParameterCountOffset);
+ __ movl(rcx,
+ FieldOperand(rcx, SharedFunctionInfo::kFormalParameterCountOffset));
{
Label done_loop, loop;
__ bind(&loop);
@@ -661,8 +629,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
{
__ PushReturnAddressFrom(rax);
__ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ LoadSharedFunctionInfoSpecialField(
- rax, rax, SharedFunctionInfo::kFormalParameterCountOffset);
+ __ movsxlq(rax, FieldOperand(
+ rax, SharedFunctionInfo::kFormalParameterCountOffset));
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
@@ -747,6 +715,117 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ PushReturnAddressFrom(return_pc);
}
+// Tail-call |function_id| if |smi_entry| == |marker|
+static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
+ Register smi_entry,
+ OptimizationMarker marker,
+ Runtime::FunctionId function_id) {
+ Label no_match;
+ __ SmiCompare(smi_entry, Smi::FromEnum(marker));
+ __ j(not_equal, &no_match, Label::kNear);
+ GenerateTailCallToReturnedCode(masm, function_id);
+ __ bind(&no_match);
+}
+
+static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
+ Register feedback_vector,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+ // ----------- S t a t e -------------
+ // -- rax : argument count (preserved for callee if needed, and caller)
+ // -- rdx : new target (preserved for callee if needed, and caller)
+ // -- rdi : target function (preserved for callee if needed, and caller)
+ // -- feedback vector (preserved for caller if needed)
+ // -----------------------------------
+ DCHECK(!AreAliased(feedback_vector, rax, rdx, rdi, scratch1, scratch2,
+ scratch3));
+
+ Label optimized_code_slot_is_cell, fallthrough;
+
+ Register closure = rdi;
+ Register optimized_code_entry = scratch1;
+
+ const int kOptimizedCodeCellOffset =
+ FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize;
+ __ movp(optimized_code_entry,
+ FieldOperand(feedback_vector, kOptimizedCodeCellOffset));
+
+ // Check if the code entry is a Smi. If yes, we interpret it as an
+ // optimisation marker. Otherwise, interpret is as a weak cell to a code
+ // object.
+ __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
+
+ {
+ // Optimized code slot is a Smi optimization marker.
+
+ // Fall through if no optimization trigger.
+ __ SmiCompare(optimized_code_entry,
+ Smi::FromEnum(OptimizationMarker::kNone));
+ __ j(equal, &fallthrough);
+
+ TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimized,
+ Runtime::kCompileOptimized_NotConcurrent);
+ TailCallRuntimeIfMarkerEquals(
+ masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimizedConcurrent,
+ Runtime::kCompileOptimized_Concurrent);
+
+ {
+ // Otherwise, the marker is InOptimizationQueue.
+ if (FLAG_debug_code) {
+ __ SmiCompare(optimized_code_entry,
+ Smi::FromEnum(OptimizationMarker::kInOptimizationQueue));
+ __ Assert(equal, kExpectedOptimizationSentinel);
+ }
+
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+ __ j(above_equal, &fallthrough);
+ GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+ }
+ }
+
+ {
+ // Optimized code slot is a WeakCell.
+ __ bind(&optimized_code_slot_is_cell);
+
+ __ movp(optimized_code_entry,
+ FieldOperand(optimized_code_entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(optimized_code_entry, &fallthrough);
+
+ // Check if the optimized code is marked for deopt. If it is, call the
+ // runtime to clear it.
+ Label found_deoptimized_code;
+ __ testl(
+ FieldOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset),
+ Immediate(1 << Code::kMarkedForDeoptimizationBit));
+ __ j(not_zero, &found_deoptimized_code);
+
+ // Optimized code is good, get it into the closure and link the closure into
+ // the optimized functions list, then tail call the optimized code.
+ // The feedback vector is no longer used, so re-use it as a scratch
+ // register.
+ ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, closure,
+ scratch2, scratch3, feedback_vector);
+ __ jmp(optimized_code_entry);
+
+ // Optimized code slot contains deoptimized code, evict it and re-enter the
+ // closure's code.
+ __ bind(&found_deoptimized_code);
+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+ }
+
+ // Fall-through if the optimized code cell is clear and there is no
+ // optimization marker.
+ __ bind(&fallthrough);
+}
+
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
@@ -764,6 +843,17 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
+ Register closure = rdi;
+ Register feedback_vector = rbx;
+
+ // Load the feedback vector from the closure.
+ __ movp(feedback_vector,
+ FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ movp(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
+ // Read off the optimized code slot in the feedback vector, and if there
+ // is optimized code or an optimization marker, call that instead.
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, rcx, r14, r15);
+
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
@@ -774,26 +864,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(rdi); // Callee's JS function.
__ Push(rdx); // Callee's new target.
- // First check if there is optimized code in the feedback vector which we
- // could call instead.
- Label switch_to_optimized_code;
- Register optimized_code_entry = rcx;
- __ movp(rbx, FieldOperand(rdi, JSFunction::kFeedbackVectorOffset));
- __ movp(rbx, FieldOperand(rbx, Cell::kValueOffset));
- __ movp(rbx,
- FieldOperand(rbx, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
- __ movp(optimized_code_entry, FieldOperand(rbx, WeakCell::kValueOffset));
- __ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
-
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
- __ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- Label load_debug_bytecode_array, bytecode_array_loaded;
- __ JumpIfNotSmi(FieldOperand(rax, SharedFunctionInfo::kDebugInfoOffset),
- &load_debug_bytecode_array);
+ Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
+ __ movp(rax, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ movp(kInterpreterBytecodeArrayRegister,
FieldOperand(rax, SharedFunctionInfo::kFunctionDataOffset));
+ __ JumpIfNotSmi(FieldOperand(rax, SharedFunctionInfo::kDebugInfoOffset),
+ &maybe_load_debug_bytecode_array);
__ bind(&bytecode_array_loaded);
// Check whether we should continue to use the interpreter.
@@ -805,11 +883,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ j(not_equal, &switch_to_different_code_kind);
// Increment invocation count for the function.
- __ movp(rcx, FieldOperand(rdi, JSFunction::kFeedbackVectorOffset));
- __ movp(rcx, FieldOperand(rcx, Cell::kValueOffset));
__ SmiAddConstant(
- FieldOperand(rcx, FeedbackVector::kInvocationCountIndex * kPointerSize +
- FeedbackVector::kHeaderSize),
+ FieldOperand(feedback_vector,
+ FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize),
Smi::FromInt(1));
// Check function data field is actually a BytecodeArray object.
@@ -881,12 +958,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, rbx, rcx);
__ ret(0);
- // Load debug copy of the bytecode array.
- __ bind(&load_debug_bytecode_array);
- Register debug_info = kInterpreterBytecodeArrayRegister;
- __ movp(debug_info, FieldOperand(rax, SharedFunctionInfo::kDebugInfoOffset));
+ // Load debug copy of the bytecode array if it exists.
+ // kInterpreterBytecodeArrayRegister is already loaded with
+ // SharedFunctionInfo::kFunctionDataOffset.
+ __ bind(&maybe_load_debug_bytecode_array);
+ __ movp(rcx, FieldOperand(rax, SharedFunctionInfo::kDebugInfoOffset));
+ __ SmiToInteger32(kScratchRegister,
+ FieldOperand(rcx, DebugInfo::kFlagsOffset));
+ __ testl(kScratchRegister, Immediate(DebugInfo::kHasBreakInfo));
+ __ j(zero, &bytecode_array_loaded);
__ movp(kInterpreterBytecodeArrayRegister,
- FieldOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
+ FieldOperand(rcx, DebugInfo::kDebugBytecodeArrayOffset));
__ jmp(&bytecode_array_loaded);
// If the shared code is no longer this entry trampoline, then the underlying
@@ -900,28 +982,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ movp(FieldOperand(rdi, JSFunction::kCodeEntryOffset), rcx);
__ RecordWriteCodeEntryField(rdi, rcx, r15);
__ jmp(rcx);
-
- // If there is optimized code on the type feedback vector, check if it is good
- // to run, and if so, self heal the closure and call the optimized code.
- __ bind(&switch_to_optimized_code);
- __ leave();
- Label gotta_call_runtime;
-
- // Check if the optimized code is marked for deopt.
- __ testl(FieldOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset),
- Immediate(1 << Code::kMarkedForDeoptimizationBit));
- __ j(not_zero, &gotta_call_runtime);
-
- // Optimized code is good, get it into the closure and link the closure into
- // the optimized functions list, then tail call the optimized code.
- ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, rdi, r14,
- r15, rbx);
- __ jmp(optimized_code_entry);
-
- // Optimized code is marked for deopt, bailout to the CompileLazy runtime
- // function which will clear the feedback vector's optimized code slot.
- __ bind(&gotta_call_runtime);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(
@@ -967,7 +1027,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// static
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
- TailCallMode tail_call_mode, InterpreterPushArgsMode mode) {
+ InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
// -- rbx : the address of the first argument to be pushed. Subsequent
@@ -996,18 +1056,22 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// rbx and rdx will be modified.
Generate_InterpreterPushArgs(masm, rcx, rbx, rdx);
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Pop(rbx); // Pass the spread in a register
+ __ subp(rax, Immediate(1)); // Subtract one for spread
+ }
+
// Call the target.
__ PushReturnAddressFrom(kScratchRegister); // Re-push return address.
if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(masm->isolate()->builtins()->CallFunction(receiver_mode,
- tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->CallFunction(receiver_mode),
RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(masm->isolate()->builtins()->CallWithSpread(),
RelocInfo::CODE_TARGET);
} else {
- __ Jump(masm->isolate()->builtins()->Call(receiver_mode, tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->Call(receiver_mode),
RelocInfo::CODE_TARGET);
}
@@ -1047,10 +1111,17 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// rcx and r8 will be modified.
Generate_InterpreterPushArgs(masm, rax, rcx, r8);
- // Push return address in preparation for the tail-call.
- __ PushReturnAddressFrom(kScratchRegister);
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Pop(rbx); // Pass the spread in a register
+ __ subp(rax, Immediate(1)); // Subtract one for spread
+
+ // Push return address in preparation for the tail-call.
+ __ PushReturnAddressFrom(kScratchRegister);
+ } else {
+ __ PushReturnAddressFrom(kScratchRegister);
+ __ AssertUndefinedOrAllocationSite(rbx);
+ }
- __ AssertUndefinedOrAllocationSite(rbx);
if (mode == InterpreterPushArgsMode::kJSFunction) {
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
@@ -1193,6 +1264,33 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
+void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : argument count (preserved for callee)
+ // -- rdx : new target (preserved for callee)
+ // -- rdi : target function (preserved for callee)
+ // -----------------------------------
+ Register closure = rdi;
+
+ // Get the feedback vector.
+ Register feedback_vector = rbx;
+ __ movp(feedback_vector,
+ FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ movp(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
+
+ // The feedback vector must be defined.
+ if (FLAG_debug_code) {
+ __ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
+ __ Assert(not_equal, BailoutReason::kExpectedFeedbackVector);
+ }
+
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, rcx, r14, r15);
+
+ // Otherwise, tail call the SFI code.
+ GenerateTailCallToSharedCode(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argument count (preserved for callee)
@@ -1201,40 +1299,23 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
- Label try_shared;
Register closure = rdi;
+ Register feedback_vector = rbx;
// Do we have a valid feedback vector?
- __ movp(rbx, FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
- __ movp(rbx, FieldOperand(rbx, Cell::kValueOffset));
- __ JumpIfRoot(rbx, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
+ __ movp(feedback_vector,
+ FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ movp(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
+ __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
+ &gotta_call_runtime);
- // Is optimized code available in the feedback vector?
- Register entry = rcx;
- __ movp(entry,
- FieldOperand(rbx, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
- __ movp(entry, FieldOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &try_shared);
-
- // Found code, check if it is marked for deopt, if so call into runtime to
- // clear the optimized code slot.
- __ testl(FieldOperand(entry, Code::kKindSpecificFlags1Offset),
- Immediate(1 << Code::kMarkedForDeoptimizationBit));
- __ j(not_zero, &gotta_call_runtime);
-
- // Code is good, get it into the closure and tail call.
- ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, r14, r15, rbx);
- __ jmp(entry);
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, rcx, r14, r15);
// We found no optimized code.
- __ bind(&try_shared);
+ Register entry = rcx;
__ movp(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- // Is the shared function marked for tier up?
- __ testb(FieldOperand(entry, SharedFunctionInfo::kMarkedForTierUpByteOffset),
- Immediate(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
- __ j(not_zero, &gotta_call_runtime);
// If SFI points to anything other than CompileLazy, install that.
__ movp(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
@@ -1252,15 +1333,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
-void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm,
- Runtime::kCompileOptimized_NotConcurrent);
-}
-
-void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
-}
-
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argument count (preserved for callee)
@@ -1398,31 +1470,70 @@ void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
-static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
- SaveFPRegsMode save_doubles) {
+void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve registers across notification, this is important for compiled
- // stubs that tail call the runtime on deopts passing their parameters in
- // registers.
- __ Pushad();
- __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
- __ Popad();
+ // Preserve possible return result from lazy deopt.
+ __ pushq(rax);
+ __ CallRuntime(Runtime::kNotifyStubFailure, false);
+ __ popq(rax);
// Tear down internal frame.
}
__ DropUnderReturnAddress(1); // Ignore state offset
- __ ret(0); // Return to IC Miss stub, continuation still on stack.
+ __ ret(0); // Return to ContinueToBuiltin stub still on stack.
}
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+namespace {
+void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
+ bool java_script_builtin,
+ bool with_result) {
+ const RegisterConfiguration* config(RegisterConfiguration::Turbofan());
+ int allocatable_register_count = config->num_allocatable_general_registers();
+ if (with_result) {
+ // Overwrite the hole inserted by the deoptimizer with the return value from
+ // the LAZY deopt point.
+ __ movq(Operand(rsp,
+ config->num_allocatable_general_registers() * kPointerSize +
+ BuiltinContinuationFrameConstants::kFixedFrameSize),
+ rax);
+ }
+ for (int i = allocatable_register_count - 1; i >= 0; --i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ __ popq(Register::from_code(code));
+ if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
+ __ SmiToInteger32(Register::from_code(code), Register::from_code(code));
+ }
+ }
+ __ movq(
+ rbp,
+ Operand(rsp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ const int offsetToPC =
+ BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp - kPointerSize;
+ __ popq(Operand(rsp, offsetToPC));
+ __ Drop(offsetToPC / kPointerSize);
+ __ addq(Operand(rsp, 0), Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ Ret();
}
+} // namespace
-void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, false);
+}
+
+void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, true);
+}
+
+void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, false);
+}
+
+void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, true);
}
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
@@ -1482,7 +1593,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// -- rsp[24] : receiver
// -----------------------------------
- // 1. Load receiver into rdi, argArray into rax (if present), remove all
+ // 1. Load receiver into rdi, argArray into rbx (if present), remove all
// arguments from the stack (including the receiver), and push thisArg (if
// present) instead.
{
@@ -1505,34 +1616,28 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
__ Push(rdx);
__ PushReturnAddressFrom(rcx);
- __ movp(rax, rbx);
}
// ----------- S t a t e -------------
- // -- rax : argArray
+ // -- rbx : argArray
// -- rdi : receiver
// -- rsp[0] : return address
// -- rsp[8] : thisArg
// -----------------------------------
- // 2. Make sure the receiver is actually callable.
- Label receiver_not_callable;
- __ JumpIfSmi(rdi, &receiver_not_callable, Label::kNear);
- __ movp(rcx, FieldOperand(rdi, HeapObject::kMapOffset));
- __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsCallable));
- __ j(zero, &receiver_not_callable, Label::kNear);
+ // 2. We don't need to check explicitly for callable receiver here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
// 3. Tail call with no arguments if argArray is null or undefined.
Label no_arguments;
- __ JumpIfRoot(rax, Heap::kNullValueRootIndex, &no_arguments, Label::kNear);
- __ JumpIfRoot(rax, Heap::kUndefinedValueRootIndex, &no_arguments,
+ __ JumpIfRoot(rbx, Heap::kNullValueRootIndex, &no_arguments, Label::kNear);
+ __ JumpIfRoot(rbx, Heap::kUndefinedValueRootIndex, &no_arguments,
Label::kNear);
- // 4a. Apply the receiver to the given argArray (passing undefined for
- // new.target).
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 4a. Apply the receiver to the given argArray.
+ __ Jump(masm->isolate()->builtins()->CallWithArrayLike(),
+ RelocInfo::CODE_TARGET);
// 4b. The argArray is either null or undefined, so we tail call without any
// arguments to the receiver. Since we did not create a frame for
@@ -1542,14 +1647,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ Set(rax, 0);
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
-
- // 4c. The receiver is not callable, throw an appropriate TypeError.
- __ bind(&receiver_not_callable);
- {
- StackArgumentsAccessor args(rsp, 0);
- __ movp(args.GetReceiverOperand(), rdi);
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
}
// static
@@ -1614,7 +1711,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// -- rsp[32] : receiver
// -----------------------------------
- // 1. Load target into rdi (if present), argumentsList into rax (if present),
+ // 1. Load target into rdi (if present), argumentsList into rbx (if present),
// remove all arguments from the stack (including the receiver), and push
// thisArgument (if present) instead.
{
@@ -1636,36 +1733,22 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
__ Push(rdx);
__ PushReturnAddressFrom(rcx);
- __ movp(rax, rbx);
}
// ----------- S t a t e -------------
- // -- rax : argumentsList
+ // -- rbx : argumentsList
// -- rdi : target
// -- rsp[0] : return address
// -- rsp[8] : thisArgument
// -----------------------------------
- // 2. Make sure the target is actually callable.
- Label target_not_callable;
- __ JumpIfSmi(rdi, &target_not_callable, Label::kNear);
- __ movp(rcx, FieldOperand(rdi, HeapObject::kMapOffset));
- __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsCallable));
- __ j(zero, &target_not_callable, Label::kNear);
+ // 2. We don't need to check explicitly for callable target here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
- // 3a. Apply the target to the given argumentsList (passing undefined for
- // new.target).
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
-
- // 3b. The target is not callable, throw an appropriate TypeError.
- __ bind(&target_not_callable);
- {
- StackArgumentsAccessor args(rsp, 0);
- __ movp(args.GetReceiverOperand(), rdi);
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
+ // 3. Apply the target to the given argumentsList.
+ __ Jump(masm->isolate()->builtins()->CallWithArrayLike(),
+ RelocInfo::CODE_TARGET);
}
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
@@ -1678,7 +1761,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// -- rsp[32] : receiver
// -----------------------------------
- // 1. Load target into rdi (if present), argumentsList into rax (if present),
+ // 1. Load target into rdi (if present), argumentsList into rbx (if present),
// new.target into rdx (if present, otherwise use target), remove all
// arguments from the stack (including the receiver), and push thisArgument
// (if present) instead.
@@ -1702,51 +1785,27 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
__ PushRoot(Heap::kUndefinedValueRootIndex);
__ PushReturnAddressFrom(rcx);
- __ movp(rax, rbx);
}
// ----------- S t a t e -------------
- // -- rax : argumentsList
+ // -- rbx : argumentsList
// -- rdx : new.target
// -- rdi : target
// -- rsp[0] : return address
// -- rsp[8] : receiver (undefined)
// -----------------------------------
- // 2. Make sure the target is actually a constructor.
- Label target_not_constructor;
- __ JumpIfSmi(rdi, &target_not_constructor, Label::kNear);
- __ movp(rcx, FieldOperand(rdi, HeapObject::kMapOffset));
- __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsConstructor));
- __ j(zero, &target_not_constructor, Label::kNear);
-
- // 3. Make sure the target is actually a constructor.
- Label new_target_not_constructor;
- __ JumpIfSmi(rdx, &new_target_not_constructor, Label::kNear);
- __ movp(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
- __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsConstructor));
- __ j(zero, &new_target_not_constructor, Label::kNear);
-
- // 4a. Construct the target with the given new.target and argumentsList.
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 2. We don't need to check explicitly for constructor target here,
+ // since that's the first thing the Construct/ConstructWithArrayLike
+ // builtins will do.
- // 4b. The target is not a constructor, throw an appropriate TypeError.
- __ bind(&target_not_constructor);
- {
- StackArgumentsAccessor args(rsp, 0);
- __ movp(args.GetReceiverOperand(), rdi);
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
+ // 3. We don't need to check explicitly for constructor new.target here,
+ // since that's the second thing the Construct/ConstructWithArrayLike
+ // builtins will do.
- // 4c. The new.target is not a constructor, throw an appropriate TypeError.
- __ bind(&new_target_not_constructor);
- {
- StackArgumentsAccessor args(rsp, 0);
- __ movp(args.GetReceiverOperand(), rdx);
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
+ // 4. Construct the target with the given new.target and argumentsList.
+ __ Jump(masm->isolate()->builtins()->ConstructWithArrayLike(),
+ RelocInfo::CODE_TARGET);
}
void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
@@ -1918,7 +1977,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterBuiltinFrame(rsi, rdi, r8);
__ Push(rbx); // the first argument
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Pop(FieldOperand(rax, JSValue::kValueOffset));
__ LeaveBuiltinFrame(rsi, rdi, r8);
@@ -2073,7 +2132,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterBuiltinFrame(rsi, rdi, r8);
__ Push(rbx); // the first argument
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Pop(FieldOperand(rax, JSValue::kValueOffset));
__ LeaveBuiltinFrame(rsi, rdi, r8);
@@ -2268,94 +2327,17 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_Apply(MacroAssembler* masm) {
+void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
- // -- rax : argumentsList
// -- rdi : target
- // -- rdx : new.target (checked to be constructor or undefined)
- // -- rsp[0] : return address.
- // -- rsp[8] : thisArgument
+ // -- rax : number of parameters on the stack (not including the receiver)
+ // -- rbx : arguments list (a FixedArray)
+ // -- rcx : len (number of elements to push from args)
+ // -- rdx : new.target (for [[Construct]])
+ // -- rsp[0] : return address
// -----------------------------------
-
- // Create the list of arguments from the array-like argumentsList.
- {
- Label create_arguments, create_array, create_holey_array, create_runtime,
- done_create;
- __ JumpIfSmi(rax, &create_runtime);
-
- // Load the map of argumentsList into rcx.
- __ movp(rcx, FieldOperand(rax, HeapObject::kMapOffset));
-
- // Load native context into rbx.
- __ movp(rbx, NativeContextOperand());
-
- // Check if argumentsList is an (unmodified) arguments object.
- __ cmpp(rcx, ContextOperand(rbx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
- __ j(equal, &create_arguments);
- __ cmpp(rcx, ContextOperand(rbx, Context::STRICT_ARGUMENTS_MAP_INDEX));
- __ j(equal, &create_arguments);
-
- // Check if argumentsList is a fast JSArray.
- __ CmpInstanceType(rcx, JS_ARRAY_TYPE);
- __ j(equal, &create_array);
-
- // Ask the runtime to create the list (actually a FixedArray).
- __ bind(&create_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(rdi);
- __ Push(rdx);
- __ Push(rax);
- __ CallRuntime(Runtime::kCreateListFromArrayLike);
- __ Pop(rdx);
- __ Pop(rdi);
- __ SmiToInteger32(rbx, FieldOperand(rax, FixedArray::kLengthOffset));
- }
- __ jmp(&done_create);
-
- // Try to create the list from an arguments object.
- __ bind(&create_arguments);
- __ movp(rbx, FieldOperand(rax, JSArgumentsObject::kLengthOffset));
- __ movp(rcx, FieldOperand(rax, JSObject::kElementsOffset));
- __ cmpp(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
- __ j(not_equal, &create_runtime);
- __ SmiToInteger32(rbx, rbx);
- __ movp(rax, rcx);
- __ jmp(&done_create);
-
- __ bind(&create_holey_array);
- // For holey JSArrays we need to check that the array prototype chain
- // protector is intact and our prototype is the Array.prototype actually.
- __ movp(rcx, FieldOperand(rax, HeapObject::kMapOffset));
- __ movp(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
- __ cmpp(rcx, ContextOperand(rbx, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ j(not_equal, &create_runtime);
- __ LoadRoot(rcx, Heap::kArrayProtectorRootIndex);
- __ Cmp(FieldOperand(rcx, PropertyCell::kValueOffset),
- Smi::FromInt(Isolate::kProtectorValid));
- __ j(not_equal, &create_runtime);
- __ SmiToInteger32(rbx, FieldOperand(rax, JSArray::kLengthOffset));
- __ movp(rax, FieldOperand(rax, JSArray::kElementsOffset));
- __ jmp(&done_create);
-
- // Try to create the list from a JSArray object.
- __ bind(&create_array);
- __ movzxbp(rcx, FieldOperand(rcx, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(rcx);
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- __ cmpl(rcx, Immediate(FAST_HOLEY_SMI_ELEMENTS));
- __ j(equal, &create_holey_array);
- __ cmpl(rcx, Immediate(FAST_HOLEY_ELEMENTS));
- __ j(equal, &create_holey_array);
- __ j(above, &create_runtime);
- __ SmiToInteger32(rbx, FieldOperand(rax, JSArray::kLengthOffset));
- __ movp(rax, FieldOperand(rax, JSArray::kElementsOffset));
-
- __ bind(&done_create);
- }
+ __ AssertFixedArray(rbx);
// Check for stack overflow.
{
@@ -2363,61 +2345,48 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label done;
__ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
- __ movp(rcx, rsp);
- // Make rcx the space we have left. The stack might already be overflowed
- // here which will cause rcx to become negative.
- __ subp(rcx, kScratchRegister);
- __ sarp(rcx, Immediate(kPointerSizeLog2));
+ __ movp(r8, rsp);
+ // Make r8 the space we have left. The stack might already be overflowed
+ // here which will cause r8 to become negative.
+ __ subp(r8, kScratchRegister);
+ __ sarp(r8, Immediate(kPointerSizeLog2));
// Check if the arguments will overflow the stack.
- __ cmpp(rcx, rbx);
+ __ cmpp(r8, rcx);
__ j(greater, &done, Label::kNear); // Signed comparison.
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ bind(&done);
}
- // ----------- S t a t e -------------
- // -- rdi : target
- // -- rax : args (a FixedArray built from argumentsList)
- // -- rbx : len (number of elements to push from args)
- // -- rdx : new.target (checked to be constructor or undefined)
- // -- rsp[0] : return address.
- // -- rsp[8] : thisArgument
- // -----------------------------------
-
- // Push arguments onto the stack (thisArgument is already on the stack).
+ // Push additional arguments onto the stack.
{
__ PopReturnAddressTo(r8);
- __ Set(rcx, 0);
+ __ Set(r9, 0);
Label done, push, loop;
__ bind(&loop);
- __ cmpl(rcx, rbx);
+ __ cmpl(r9, rcx);
__ j(equal, &done, Label::kNear);
// Turn the hole into undefined as we go.
- __ movp(r9, FieldOperand(rax, rcx, times_pointer_size,
- FixedArray::kHeaderSize));
- __ CompareRoot(r9, Heap::kTheHoleValueRootIndex);
+ __ movp(r11,
+ FieldOperand(rbx, r9, times_pointer_size, FixedArray::kHeaderSize));
+ __ CompareRoot(r11, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &push, Label::kNear);
- __ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r11, Heap::kUndefinedValueRootIndex);
__ bind(&push);
- __ Push(r9);
- __ incl(rcx);
+ __ Push(r11);
+ __ incl(r9);
__ jmp(&loop);
__ bind(&done);
__ PushReturnAddressFrom(r8);
- __ Move(rax, rcx);
+ __ addq(rax, r9);
}
- // Dispatch to Call or Construct depending on whether new.target is undefined.
- {
- __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ j(equal, masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
- }
+ // Tail-call to the actual Call or Construct builtin.
+ __ Jump(code, RelocInfo::CODE_TARGET);
}
// static
-void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
- Handle<Code> code) {
+void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
// -- rdx : the new target (for [[Construct]] calls)
@@ -2434,8 +2403,8 @@ void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
{
__ movp(r8, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ movp(r8, FieldOperand(r8, JSFunction::kSharedFunctionInfoOffset));
- __ LoadSharedFunctionInfoSpecialField(
- r8, r8, SharedFunctionInfo::kFormalParameterCountOffset);
+ __ movl(r8,
+ FieldOperand(r8, SharedFunctionInfo::kFormalParameterCountOffset));
__ movp(rbx, rbp);
}
__ jmp(&arguments_done, Label::kNear);
@@ -2477,98 +2446,9 @@ void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
__ Jump(code, RelocInfo::CODE_TARGET);
}
-namespace {
-
-// Drops top JavaScript frame and an arguments adaptor frame below it (if
-// present) preserving all the arguments prepared for current call.
-// Does nothing if debugger is currently active.
-// ES6 14.6.3. PrepareForTailCall
-//
-// Stack structure for the function g() tail calling f():
-//
-// ------- Caller frame: -------
-// | ...
-// | g()'s arg M
-// | ...
-// | g()'s arg 1
-// | g()'s receiver arg
-// | g()'s caller pc
-// ------- g()'s frame: -------
-// | g()'s caller fp <- fp
-// | g()'s context
-// | function pointer: g
-// | -------------------------
-// | ...
-// | ...
-// | f()'s arg N
-// | ...
-// | f()'s arg 1
-// | f()'s receiver arg
-// | f()'s caller pc <- sp
-// ----------------------
-//
-void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
- Register scratch1, Register scratch2,
- Register scratch3) {
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Comment cmnt(masm, "[ PrepareForTailCall");
-
- // Prepare for tail call only if ES2015 tail call elimination is active.
- Label done;
- ExternalReference is_tail_call_elimination_enabled =
- ExternalReference::is_tail_call_elimination_enabled_address(
- masm->isolate());
- __ Move(kScratchRegister, is_tail_call_elimination_enabled);
- __ cmpb(Operand(kScratchRegister, 0), Immediate(0));
- __ j(equal, &done);
-
- // Drop possible interpreter handler/stub frame.
- {
- Label no_interpreter_frame;
- __ cmpp(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
- Immediate(StackFrame::TypeToMarker(StackFrame::STUB)));
- __ j(not_equal, &no_interpreter_frame, Label::kNear);
- __ movp(rbp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ bind(&no_interpreter_frame);
- }
-
- // Check if next frame is an arguments adaptor frame.
- Register caller_args_count_reg = scratch1;
- Label no_arguments_adaptor, formal_parameter_count_loaded;
- __ movp(scratch2, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ cmpp(Operand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset),
- Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &no_arguments_adaptor, Label::kNear);
-
- // Drop current frame and load arguments count from arguments adaptor frame.
- __ movp(rbp, scratch2);
- __ SmiToInteger32(
- caller_args_count_reg,
- Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ jmp(&formal_parameter_count_loaded, Label::kNear);
-
- __ bind(&no_arguments_adaptor);
- // Load caller's formal parameter count
- __ movp(scratch1, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movp(scratch1,
- FieldOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
- __ LoadSharedFunctionInfoSpecialField(
- caller_args_count_reg, scratch1,
- SharedFunctionInfo::kFormalParameterCountOffset);
-
- __ bind(&formal_parameter_count_loaded);
-
- ParameterCount callee_args_count(args_reg);
- __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
- scratch3, ReturnAddressState::kOnStack);
- __ bind(&done);
-}
-} // namespace
-
// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
- ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
+ ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
// -- rdi : the function to call (checked to be a JSFunction)
@@ -2580,8 +2460,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Check that the function is not a "classConstructor".
Label class_constructor;
__ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ testb(FieldOperand(rdx, SharedFunctionInfo::kFunctionKindByteOffset),
- Immediate(SharedFunctionInfo::kClassConstructorBitsWithinByte));
+ __ testl(FieldOperand(rdx, SharedFunctionInfo::kCompilerHintsOffset),
+ Immediate(SharedFunctionInfo::kClassConstructorMask));
__ j(not_zero, &class_constructor);
// ----------- S t a t e -------------
@@ -2593,14 +2473,12 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
- SharedFunctionInfo::kStrictModeByteOffset);
__ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
- __ testb(FieldOperand(rdx, SharedFunctionInfo::kNativeByteOffset),
- Immediate((1 << SharedFunctionInfo::kNativeBitWithinByte) |
- (1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
+ __ testl(FieldOperand(rdx, SharedFunctionInfo::kCompilerHintsOffset),
+ Immediate(SharedFunctionInfo::IsNativeBit::kMask |
+ SharedFunctionInfo::IsStrictBit::kMask));
__ j(not_zero, &done_convert);
{
// ----------- S t a t e -------------
@@ -2666,12 +2544,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- rsi : the function context.
// -----------------------------------
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, rax, rbx, rcx, r8);
- }
-
- __ LoadSharedFunctionInfoSpecialField(
- rbx, rdx, SharedFunctionInfo::kFormalParameterCountOffset);
+ __ movsxlq(
+ rbx, FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount actual(rax);
ParameterCount expected(rbx);
@@ -2772,18 +2646,13 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
} // namespace
// static
-void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
- TailCallMode tail_call_mode) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
// -- rdi : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(rdi);
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, rax, rbx, rcx, r8);
- }
-
// Patch the receiver to [[BoundThis]].
StackArgumentsAccessor args(rsp, rax);
__ movp(rbx, FieldOperand(rdi, JSBoundFunction::kBoundThisOffset));
@@ -2801,8 +2670,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
}
// static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
// -- rdi : the target to call (can be any Object)
@@ -2813,10 +2681,10 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
__ JumpIfSmi(rdi, &non_callable);
__ bind(&non_smi);
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(equal, masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
+ __ j(equal, masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET);
__ CmpInstanceType(rcx, JS_BOUND_FUNCTION_TYPE);
- __ j(equal, masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
+ __ j(equal, masm->isolate()->builtins()->CallBoundFunction(),
RelocInfo::CODE_TARGET);
// Check if target has a [[Call]] internal method.
@@ -2824,24 +2692,13 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
Immediate(1 << Map::kIsCallable));
__ j(zero, &non_callable);
+ // Check if target is a proxy and call CallProxy external builtin
__ CmpInstanceType(rcx, JS_PROXY_TYPE);
__ j(not_equal, &non_function);
- // 0. Prepare for tail call if necessary.
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, rax, rbx, rcx, r8);
- }
-
- // 1. Runtime fallback for Proxy [[Call]].
- __ PopReturnAddressTo(kScratchRegister);
- __ Push(rdi);
- __ PushReturnAddressFrom(kScratchRegister);
- // Increase the arguments size to include the pushed function and the
- // existing receiver on the stack.
- __ addp(rax, Immediate(2));
- // Tail-call to the runtime.
- __ JumpToExternalReference(
- ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
+ __ Load(rcx, ExternalReference(Builtins::kCallProxy, masm->isolate()));
+ __ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
+ __ jmp(rcx);
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
@@ -2851,7 +2708,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, rdi);
__ Jump(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
+ ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
@@ -2863,148 +2720,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
-static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
- Label runtime_call, push_args;
- // Load the spread argument into rbx.
- __ movp(rbx, Operand(rsp, kPointerSize));
- __ JumpIfSmi(rbx, &runtime_call);
- // Load the map of the spread into r15.
- __ movp(r15, FieldOperand(rbx, HeapObject::kMapOffset));
- // Load native context into r14.
- __ movp(r14, NativeContextOperand());
-
- // Check that the spread is an array.
- __ CmpInstanceType(r15, JS_ARRAY_TYPE);
- __ j(not_equal, &runtime_call);
-
- // Check that we have the original ArrayPrototype.
- __ movp(rcx, FieldOperand(r15, Map::kPrototypeOffset));
- __ cmpp(rcx, ContextOperand(r14, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ j(not_equal, &runtime_call);
-
- // Check that the ArrayPrototype hasn't been modified in a way that would
- // affect iteration.
- __ LoadRoot(rcx, Heap::kArrayIteratorProtectorRootIndex);
- __ Cmp(FieldOperand(rcx, PropertyCell::kValueOffset),
- Smi::FromInt(Isolate::kProtectorValid));
- __ j(not_equal, &runtime_call);
-
- // Check that the map of the initial array iterator hasn't changed.
- __ movp(rcx,
- ContextOperand(r14, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
- __ movp(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ cmpp(rcx, ContextOperand(
- r14, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
- __ j(not_equal, &runtime_call);
-
- // For FastPacked kinds, iteration will have the same effect as simply
- // accessing each property in order.
- Label no_protector_check;
- __ movzxbp(rcx, FieldOperand(r15, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(rcx);
- __ cmpp(rcx, Immediate(FAST_HOLEY_ELEMENTS));
- __ j(above, &runtime_call);
- // For non-FastHoley kinds, we can skip the protector check.
- __ cmpp(rcx, Immediate(FAST_SMI_ELEMENTS));
- __ j(equal, &no_protector_check);
- __ cmpp(rcx, Immediate(FAST_ELEMENTS));
- __ j(equal, &no_protector_check);
- // Check the ArrayProtector cell.
- __ LoadRoot(rcx, Heap::kArrayProtectorRootIndex);
- __ Cmp(FieldOperand(rcx, PropertyCell::kValueOffset),
- Smi::FromInt(Isolate::kProtectorValid));
- __ j(not_equal, &runtime_call);
-
- __ bind(&no_protector_check);
- // Load the FixedArray backing store, but use the length from the array.
- __ SmiToInteger32(r9, FieldOperand(rbx, JSArray::kLengthOffset));
- __ movp(rbx, FieldOperand(rbx, JSArray::kElementsOffset));
- __ jmp(&push_args);
-
- __ bind(&runtime_call);
- {
- // Call the builtin for the result of the spread.
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(rdi); // target
- __ Push(rdx); // new target
- __ Integer32ToSmi(rax, rax);
- __ Push(rax); // nargs
- __ Push(rbx);
- __ CallRuntime(Runtime::kSpreadIterableFixed);
- __ movp(rbx, rax);
- __ Pop(rax); // nargs
- __ SmiToInteger32(rax, rax);
- __ Pop(rdx); // new target
- __ Pop(rdi); // target
- }
-
- {
- // Calculate the new nargs including the result of the spread.
- __ SmiToInteger32(r9, FieldOperand(rbx, FixedArray::kLengthOffset));
-
- __ bind(&push_args);
- // rax += r9 - 1. Subtract 1 for the spread itself.
- __ leap(rax, Operand(rax, r9, times_1, -1));
- }
-
- // Check for stack overflow.
- {
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack limit".
- Label done;
- __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
- __ movp(rcx, rsp);
- // Make rcx the space we have left. The stack might already be overflowed
- // here which will cause rcx to become negative.
- __ subp(rcx, kScratchRegister);
- __ sarp(rcx, Immediate(kPointerSizeLog2));
- // Check if the arguments will overflow the stack.
- __ cmpp(rcx, r9);
- __ j(greater, &done, Label::kNear); // Signed comparison.
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&done);
- }
-
- // Put the evaluated spread onto the stack as additional arguments.
- {
- // Pop the return address and spread argument.
- __ PopReturnAddressTo(r8);
- __ Pop(rcx);
-
- __ Set(rcx, 0);
- Label done, push, loop;
- __ bind(&loop);
- __ cmpl(rcx, r9);
- __ j(equal, &done, Label::kNear);
- __ movp(kScratchRegister, FieldOperand(rbx, rcx, times_pointer_size,
- FixedArray::kHeaderSize));
- __ CompareRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &push, Label::kNear);
- __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ bind(&push);
- __ Push(kScratchRegister);
- __ incl(rcx);
- __ jmp(&loop);
- __ bind(&done);
- __ PushReturnAddressFrom(r8);
- }
-}
-
-// static
-void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
- // -- rdi : the target to call (can be any Object)
- // -----------------------------------
-
- // CheckSpreadAndPushToStack will push rdx to save it.
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- TailCallMode::kDisallow),
- RelocInfo::CODE_TARGET);
-}
-
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -3127,19 +2842,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-// static
-void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
- // -- rdx : the new target (either the same as the constructor or
- // the JSFunction on which new was invoked initially)
- // -- rdi : the constructor to call (can be any Object)
- // -----------------------------------
-
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
-}
-
static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
bool has_handler_frame) {
// Lookup the function in the JavaScript frame.
diff --git a/deps/v8/src/builtins/x87/OWNERS b/deps/v8/src/builtins/x87/OWNERS
deleted file mode 100644
index 61245ae8e2..0000000000
--- a/deps/v8/src/builtins/x87/OWNERS
+++ /dev/null
@@ -1,2 +0,0 @@
-weiliang.lin@intel.com
-chunyang.dai@intel.com
diff --git a/deps/v8/src/builtins/x87/builtins-x87.cc b/deps/v8/src/builtins/x87/builtins-x87.cc
deleted file mode 100644
index 55b5dc4f56..0000000000
--- a/deps/v8/src/builtins/x87/builtins-x87.cc
+++ /dev/null
@@ -1,3183 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X87
-
-#include "src/code-factory.h"
-#include "src/codegen.h"
-#include "src/deoptimizer.h"
-#include "src/full-codegen/full-codegen.h"
-#include "src/x87/frames-x87.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
- ExitFrameType exit_frame_type) {
- // ----------- S t a t e -------------
- // -- eax : number of arguments excluding receiver
- // -- edi : target
- // -- edx : new.target
- // -- esp[0] : return address
- // -- esp[4] : last argument
- // -- ...
- // -- esp[4 * argc] : first argument
- // -- esp[4 * (argc +1)] : receiver
- // -----------------------------------
- __ AssertFunction(edi);
-
- // Make sure we operate in the context of the called function (for example
- // ConstructStubs implemented in C++ will be run in the context of the caller
- // instead of the callee, due to the way that [[Construct]] is defined for
- // ordinary functions).
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // JumpToExternalReference expects eax to contain the number of arguments
- // including the receiver and the extra arguments.
- const int num_extra_args = 3;
- __ add(eax, Immediate(num_extra_args + 1));
-
- // Insert extra arguments.
- __ PopReturnAddressTo(ecx);
- __ SmiTag(eax);
- __ Push(eax);
- __ SmiUntag(eax);
- __ Push(edi);
- __ Push(edx);
- __ PushReturnAddressFrom(ecx);
-
- __ JumpToExternalReference(ExternalReference(address, masm->isolate()),
- exit_frame_type == BUILTIN_EXIT);
-}
-
-static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
- Runtime::FunctionId function_id) {
- // ----------- S t a t e -------------
- // -- eax : argument count (preserved for callee)
- // -- edx : new target (preserved for callee)
- // -- edi : target function (preserved for callee)
- // -----------------------------------
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Push the number of arguments to the callee.
- __ SmiTag(eax);
- __ push(eax);
- // Push a copy of the target function and the new target.
- __ push(edi);
- __ push(edx);
- // Function is also the parameter to the runtime call.
- __ push(edi);
-
- __ CallRuntime(function_id, 1);
- __ mov(ebx, eax);
-
- // Restore target function and new target.
- __ pop(edx);
- __ pop(edi);
- __ pop(eax);
- __ SmiUntag(eax);
- }
-
- __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
- __ jmp(ebx);
-}
-
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kCodeOffset));
- __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
- __ jmp(ebx);
-}
-
-void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
- // Checking whether the queued function is ready for install is optional,
- // since we come across interrupts and stack checks elsewhere. However,
- // not checking may delay installing ready functions, and always checking
- // would be quite expensive. A good compromise is to first check against
- // stack limit as a cue for an interrupt signal.
- Label ok;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm->isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok, Label::kNear);
-
- GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
-
- __ bind(&ok);
- GenerateTailCallToSharedCode(masm);
-}
-
-namespace {
-
-void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
- bool create_implicit_receiver,
- bool check_derived_construct) {
- // ----------- S t a t e -------------
- // -- eax: number of arguments
- // -- esi: context
- // -- edi: constructor function
- // -- edx: new target
- // -----------------------------------
-
- // Enter a construct frame.
- {
- FrameScope scope(masm, StackFrame::CONSTRUCT);
-
- // Preserve the incoming parameters on the stack.
- __ SmiTag(eax);
- __ push(esi);
- __ push(eax);
-
- if (create_implicit_receiver) {
- // Allocate the new receiver object.
- __ Push(edi);
- __ Push(edx);
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
- RelocInfo::CODE_TARGET);
- __ mov(ebx, eax);
- __ Pop(edx);
- __ Pop(edi);
-
- // ----------- S t a t e -------------
- // -- edi: constructor function
- // -- ebx: newly allocated object
- // -- edx: new target
- // -----------------------------------
-
- // Retrieve smi-tagged arguments count from the stack.
- __ mov(eax, Operand(esp, 0));
- }
-
- __ SmiUntag(eax);
-
- if (create_implicit_receiver) {
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ push(ebx);
- __ push(ebx);
- } else {
- __ PushRoot(Heap::kTheHoleValueRootIndex);
- }
-
- // Set up pointer to last argument.
- __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
-
- // Copy arguments and receiver to the expression stack.
- Label loop, entry;
- __ mov(ecx, eax);
- __ jmp(&entry);
- __ bind(&loop);
- __ push(Operand(ebx, ecx, times_4, 0));
- __ bind(&entry);
- __ dec(ecx);
- __ j(greater_equal, &loop);
-
- // Call the function.
- ParameterCount actual(eax);
- __ InvokeFunction(edi, edx, actual, CALL_FUNCTION,
- CheckDebugStepCallWrapper());
-
- // Store offset of return address for deoptimizer.
- if (create_implicit_receiver && !is_api_function) {
- masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
- }
-
- // Restore context from the frame.
- __ mov(esi, Operand(ebp, ConstructFrameConstants::kContextOffset));
-
- if (create_implicit_receiver) {
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- __ JumpIfSmi(eax, &use_receiver, Label::kNear);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
- __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
- __ j(above_equal, &exit, Label::kNear);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ mov(eax, Operand(esp, 0));
-
- // Restore the arguments count and leave the construct frame. The
- // arguments count is stored below the receiver.
- __ bind(&exit);
- __ mov(ebx, Operand(esp, 1 * kPointerSize));
- } else {
- __ mov(ebx, Operand(esp, 0));
- }
-
- // Leave construct frame.
- }
-
- // ES6 9.2.2. Step 13+
- // Check that the result is not a Smi, indicating that the constructor result
- // from a derived class is neither undefined nor an Object.
- if (check_derived_construct) {
- Label dont_throw;
- __ JumpIfNotSmi(eax, &dont_throw);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
- }
- __ bind(&dont_throw);
- }
-
- // Remove caller arguments from the stack and return.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ pop(ecx);
- __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver
- __ push(ecx);
- if (create_implicit_receiver) {
- __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
- }
- __ ret(0);
-}
-
-} // namespace
-
-void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true, false);
-}
-
-void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false, false);
-}
-
-void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false, false);
-}
-
-void Builtins::Generate_JSBuiltinsConstructStubForDerived(
- MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false, true);
-}
-
-void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edi);
- __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
-}
-
-enum IsTagged { kEaxIsSmiTagged, kEaxIsUntaggedInt };
-
-// Clobbers ecx, edx, edi; preserves all other registers.
-static void Generate_CheckStackOverflow(MacroAssembler* masm,
- IsTagged eax_is_tagged) {
- // eax : the number of items to be pushed to the stack
- //
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- Label okay;
- ExternalReference real_stack_limit =
- ExternalReference::address_of_real_stack_limit(masm->isolate());
- __ mov(edi, Operand::StaticVariable(real_stack_limit));
- // Make ecx the space we have left. The stack might already be overflowed
- // here which will cause ecx to become negative.
- __ mov(ecx, esp);
- __ sub(ecx, edi);
- // Make edx the space we need for the array when it is unrolled onto the
- // stack.
- __ mov(edx, eax);
- int smi_tag = eax_is_tagged == kEaxIsSmiTagged ? kSmiTagSize : 0;
- __ shl(edx, kPointerSizeLog2 - smi_tag);
- // Check if the arguments will overflow the stack.
- __ cmp(ecx, edx);
- __ j(greater, &okay); // Signed comparison.
-
- // Out of stack space.
- __ CallRuntime(Runtime::kThrowStackOverflow);
-
- __ bind(&okay);
-}
-
-static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
- bool is_construct) {
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Setup the context (we need to use the caller context from the isolate).
- ExternalReference context_address(Isolate::kContextAddress,
- masm->isolate());
- __ mov(esi, Operand::StaticVariable(context_address));
-
- // Load the previous frame pointer (ebx) to access C arguments
- __ mov(ebx, Operand(ebp, 0));
-
- // Push the function and the receiver onto the stack.
- __ push(Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
- __ push(Operand(ebx, EntryFrameConstants::kReceiverArgOffset));
-
- // Load the number of arguments and setup pointer to the arguments.
- __ mov(eax, Operand(ebx, EntryFrameConstants::kArgcOffset));
- __ mov(ebx, Operand(ebx, EntryFrameConstants::kArgvOffset));
-
- // Check if we have enough stack space to push all arguments.
- // Expects argument count in eax. Clobbers ecx, edx, edi.
- Generate_CheckStackOverflow(masm, kEaxIsUntaggedInt);
-
- // Copy arguments to the stack in a loop.
- Label loop, entry;
- __ Move(ecx, Immediate(0));
- __ jmp(&entry, Label::kNear);
- __ bind(&loop);
- __ mov(edx, Operand(ebx, ecx, times_4, 0)); // push parameter from argv
- __ push(Operand(edx, 0)); // dereference handle
- __ inc(ecx);
- __ bind(&entry);
- __ cmp(ecx, eax);
- __ j(not_equal, &loop);
-
- // Load the previous frame pointer (ebx) to access C arguments
- __ mov(ebx, Operand(ebp, 0));
-
- // Get the new.target and function from the frame.
- __ mov(edx, Operand(ebx, EntryFrameConstants::kNewTargetArgOffset));
- __ mov(edi, Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
-
- // Invoke the code.
- Handle<Code> builtin = is_construct
- ? masm->isolate()->builtins()->Construct()
- : masm->isolate()->builtins()->Call();
- __ Call(builtin, RelocInfo::CODE_TARGET);
-
- // Exit the internal frame. Notice that this also removes the empty.
- // context and the function left on the stack by the code
- // invocation.
- }
- __ ret(kPointerSize); // Remove receiver.
-}
-
-void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
- Generate_JSEntryTrampolineHelper(masm, false);
-}
-
-void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
- Generate_JSEntryTrampolineHelper(masm, true);
-}
-
-// static
-void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : the value to pass to the generator
- // -- ebx : the JSGeneratorObject to resume
- // -- edx : the resume mode (tagged)
- // -- esp[0] : return address
- // -----------------------------------
- __ AssertGeneratorObject(ebx);
-
- // Store input value into generator object.
- __ mov(FieldOperand(ebx, JSGeneratorObject::kInputOrDebugPosOffset), eax);
- __ RecordWriteField(ebx, JSGeneratorObject::kInputOrDebugPosOffset, eax, ecx,
- kDontSaveFPRegs);
-
- // Store resume mode into generator object.
- __ mov(FieldOperand(ebx, JSGeneratorObject::kResumeModeOffset), edx);
-
- // Load suspended function and context.
- __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Flood function if we are stepping.
- Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
- Label stepping_prepared;
- ExternalReference debug_hook =
- ExternalReference::debug_hook_on_function_call_address(masm->isolate());
- __ cmpb(Operand::StaticVariable(debug_hook), Immediate(0));
- __ j(not_equal, &prepare_step_in_if_stepping);
-
- // Flood function if we need to continue stepping in the suspended generator.
- ExternalReference debug_suspended_generator =
- ExternalReference::debug_suspended_generator_address(masm->isolate());
- __ cmp(ebx, Operand::StaticVariable(debug_suspended_generator));
- __ j(equal, &prepare_step_in_suspended_generator);
- __ bind(&stepping_prepared);
-
- // Pop return address.
- __ PopReturnAddressTo(eax);
-
- // Push receiver.
- __ Push(FieldOperand(ebx, JSGeneratorObject::kReceiverOffset));
-
- // ----------- S t a t e -------------
- // -- eax : return address
- // -- ebx : the JSGeneratorObject to resume
- // -- edx : the resume mode (tagged)
- // -- edi : generator function
- // -- esi : generator context
- // -- esp[0] : generator receiver
- // -----------------------------------
-
- // Push holes for arguments to generator function. Since the parser forced
- // context allocation for any variables in generators, the actual argument
- // values have already been copied into the context and these dummy values
- // will never be used.
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ecx,
- FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
- {
- Label done_loop, loop;
- __ bind(&loop);
- __ sub(ecx, Immediate(Smi::FromInt(1)));
- __ j(carry, &done_loop, Label::kNear);
- __ PushRoot(Heap::kTheHoleValueRootIndex);
- __ jmp(&loop);
- __ bind(&done_loop);
- }
-
- // Underlying function needs to have bytecode available.
- if (FLAG_debug_code) {
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kFunctionDataOffset));
- __ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, ecx);
- __ Assert(equal, kMissingBytecodeArray);
- }
-
- // Resume (Ignition/TurboFan) generator object.
- {
- __ PushReturnAddressFrom(eax);
- __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(eax,
- FieldOperand(eax, SharedFunctionInfo::kFormalParameterCountOffset));
- // We abuse new.target both to indicate that this is a resume call and to
- // pass in the generator object. In ordinary calls, new.target is always
- // undefined because generator functions are non-constructable.
- __ mov(edx, ebx);
- __ jmp(FieldOperand(edi, JSFunction::kCodeEntryOffset));
- }
-
- __ bind(&prepare_step_in_if_stepping);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(ebx);
- __ Push(edx);
- __ Push(edi);
- __ CallRuntime(Runtime::kDebugOnFunctionCall);
- __ Pop(edx);
- __ Pop(ebx);
- __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
- }
- __ jmp(&stepping_prepared);
-
- __ bind(&prepare_step_in_suspended_generator);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(ebx);
- __ Push(edx);
- __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
- __ Pop(edx);
- __ Pop(ebx);
- __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
- }
- __ jmp(&stepping_prepared);
-}
-
-static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
- Register scratch2) {
- Register args_count = scratch1;
- Register return_pc = scratch2;
-
- // Get the arguments + reciever count.
- __ mov(args_count,
- Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
- __ mov(args_count,
- FieldOperand(args_count, BytecodeArray::kParameterSizeOffset));
-
- // Leave the frame (also dropping the register file).
- __ leave();
-
- // Drop receiver + arguments.
- __ pop(return_pc);
- __ add(esp, args_count);
- __ push(return_pc);
-}
-
-// Generate code for entering a JS function with the interpreter.
-// On entry to the function the receiver and arguments have been pushed on the
-// stack left to right. The actual argument count matches the formal parameter
-// count expected by the function.
-//
-// The live registers are:
-// o edi: the JS function object being called
-// o edx: the new target
-// o esi: our context
-// o ebp: the caller's frame pointer
-// o esp: stack pointer (pointing to return address)
-//
-// The function builds an interpreter frame. See InterpreterFrameConstants in
-// frames.h for its layout.
-void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
- // Open a frame scope to indicate that there is a frame on the stack. The
- // MANUAL indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done below).
- FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ push(ebp); // Caller's frame pointer.
- __ mov(ebp, esp);
- __ push(esi); // Callee's context.
- __ push(edi); // Callee's JS function.
- __ push(edx); // Callee's new target.
-
- // Get the bytecode array from the function object (or from the DebugInfo if
- // it is present) and load it into kInterpreterBytecodeArrayRegister.
- __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- Label load_debug_bytecode_array, bytecode_array_loaded;
- __ JumpIfNotSmi(FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset),
- &load_debug_bytecode_array);
- __ mov(kInterpreterBytecodeArrayRegister,
- FieldOperand(eax, SharedFunctionInfo::kFunctionDataOffset));
- __ bind(&bytecode_array_loaded);
-
- // Check whether we should continue to use the interpreter.
- // TODO(rmcilroy) Remove self healing once liveedit only has to deal with
- // Ignition bytecode.
- Label switch_to_different_code_kind;
- __ Move(ecx, masm->CodeObject()); // Self-reference to this code.
- __ cmp(ecx, FieldOperand(eax, SharedFunctionInfo::kCodeOffset));
- __ j(not_equal, &switch_to_different_code_kind);
-
- // Increment invocation count for the function.
- __ EmitLoadFeedbackVector(ecx);
- __ add(
- FieldOperand(ecx, FeedbackVector::kInvocationCountIndex * kPointerSize +
- FeedbackVector::kHeaderSize),
- Immediate(Smi::FromInt(1)));
-
- // Check function data field is actually a BytecodeArray object.
- if (FLAG_debug_code) {
- __ AssertNotSmi(kInterpreterBytecodeArrayRegister);
- __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
- eax);
- __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
- }
-
- // Reset code age.
- __ mov_b(FieldOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kBytecodeAgeOffset),
- Immediate(BytecodeArray::kNoAgeBytecodeAge));
-
- // Push bytecode array.
- __ push(kInterpreterBytecodeArrayRegister);
- // Push Smi tagged initial bytecode array offset.
- __ push(Immediate(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag)));
-
- // Allocate the local and temporary register file on the stack.
- {
- // Load frame size from the BytecodeArray object.
- __ mov(ebx, FieldOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kFrameSizeOffset));
-
- // Do a stack check to ensure we don't go over the limit.
- Label ok;
- __ mov(ecx, esp);
- __ sub(ecx, ebx);
- ExternalReference stack_limit =
- ExternalReference::address_of_real_stack_limit(masm->isolate());
- __ cmp(ecx, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok);
- __ CallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&ok);
-
- // If ok, push undefined as the initial value for all register file entries.
- Label loop_header;
- Label loop_check;
- __ mov(eax, Immediate(masm->isolate()->factory()->undefined_value()));
- __ jmp(&loop_check);
- __ bind(&loop_header);
- // TODO(rmcilroy): Consider doing more than one push per loop iteration.
- __ push(eax);
- // Continue loop if not done.
- __ bind(&loop_check);
- __ sub(ebx, Immediate(kPointerSize));
- __ j(greater_equal, &loop_header);
- }
-
- // Load accumulator, bytecode offset and dispatch table into registers.
- __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
- __ mov(kInterpreterBytecodeOffsetRegister,
- Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
- __ mov(kInterpreterDispatchTableRegister,
- Immediate(ExternalReference::interpreter_dispatch_table_address(
- masm->isolate())));
-
- // Dispatch to the first bytecode handler for the function.
- __ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ mov(ebx, Operand(kInterpreterDispatchTableRegister, ebx,
- times_pointer_size, 0));
- __ call(ebx);
- masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
-
- // The return value is in eax.
- LeaveInterpreterFrame(masm, ebx, ecx);
- __ ret(0);
-
- // Load debug copy of the bytecode array.
- __ bind(&load_debug_bytecode_array);
- Register debug_info = kInterpreterBytecodeArrayRegister;
- __ mov(debug_info, FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset));
- __ mov(kInterpreterBytecodeArrayRegister,
- FieldOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
- __ jmp(&bytecode_array_loaded);
-
- // If the shared code is no longer this entry trampoline, then the underlying
- // function has been switched to a different kind of code and we heal the
- // closure by switching the code entry field over to the new code as well.
- __ bind(&switch_to_different_code_kind);
- __ pop(edx); // Callee's new target.
- __ pop(edi); // Callee's JS function.
- __ pop(esi); // Callee's context.
- __ leave(); // Leave the frame so we can tail call.
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kCodeOffset));
- __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
- __ mov(FieldOperand(edi, JSFunction::kCodeEntryOffset), ecx);
- __ RecordWriteCodeEntryField(edi, ecx, ebx);
- __ jmp(ecx);
-}
-
-static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
- Register scratch1, Register scratch2,
- Label* stack_overflow,
- bool include_receiver = false) {
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- ExternalReference real_stack_limit =
- ExternalReference::address_of_real_stack_limit(masm->isolate());
- __ mov(scratch1, Operand::StaticVariable(real_stack_limit));
- // Make scratch2 the space we have left. The stack might already be overflowed
- // here which will cause scratch2 to become negative.
- __ mov(scratch2, esp);
- __ sub(scratch2, scratch1);
- // Make scratch1 the space we need for the array when it is unrolled onto the
- // stack.
- __ mov(scratch1, num_args);
- if (include_receiver) {
- __ add(scratch1, Immediate(1));
- }
- __ shl(scratch1, kPointerSizeLog2);
- // Check if the arguments will overflow the stack.
- __ cmp(scratch2, scratch1);
- __ j(less_equal, stack_overflow); // Signed comparison.
-}
-
-static void Generate_InterpreterPushArgs(MacroAssembler* masm,
- Register array_limit,
- Register start_address) {
- // ----------- S t a t e -------------
- // -- start_address : Pointer to the last argument in the args array.
- // -- array_limit : Pointer to one before the first argument in the
- // args array.
- // -----------------------------------
- Label loop_header, loop_check;
- __ jmp(&loop_check);
- __ bind(&loop_header);
- __ Push(Operand(start_address, 0));
- __ sub(start_address, Immediate(kPointerSize));
- __ bind(&loop_check);
- __ cmp(start_address, array_limit);
- __ j(greater, &loop_header, Label::kNear);
-}
-
-// static
-void Builtins::Generate_InterpreterPushArgsThenCallImpl(
- MacroAssembler* masm, TailCallMode tail_call_mode,
- InterpreterPushArgsMode mode) {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- ebx : the address of the first argument to be pushed. Subsequent
- // arguments should be consecutive above this, in the same order as
- // they are to be pushed onto the stack.
- // -- edi : the target to call (can be any Object).
- // -----------------------------------
- Label stack_overflow;
- // Compute the expected number of arguments.
- __ mov(ecx, eax);
- __ add(ecx, Immediate(1)); // Add one for receiver.
-
- // Add a stack check before pushing the arguments. We need an extra register
- // to perform a stack check. So push it onto the stack temporarily. This
- // might cause stack overflow, but it will be detected by the check.
- __ Push(edi);
- Generate_StackOverflowCheck(masm, ecx, edx, edi, &stack_overflow);
- __ Pop(edi);
-
- // Pop return address to allow tail-call after pushing arguments.
- __ Pop(edx);
-
- // Find the address of the last argument.
- __ shl(ecx, kPointerSizeLog2);
- __ neg(ecx);
- __ add(ecx, ebx);
- Generate_InterpreterPushArgs(masm, ecx, ebx);
-
- // Call the target.
- __ Push(edx); // Re-push return address.
-
- if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
- tail_call_mode),
- RelocInfo::CODE_TARGET);
- } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- __ Jump(masm->isolate()->builtins()->CallWithSpread(),
- RelocInfo::CODE_TARGET);
- } else {
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- tail_call_mode),
- RelocInfo::CODE_TARGET);
- }
-
- __ bind(&stack_overflow);
- {
- // Pop the temporary registers, so that return address is on top of stack.
- __ Pop(edi);
-
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
-
- // This should be unreachable.
- __ int3();
- }
-}
-
-namespace {
-
-// This function modified start_addr, and only reads the contents of num_args
-// register. scratch1 and scratch2 are used as temporary registers. Their
-// original values are restored after the use.
-void Generate_InterpreterPushArgsThenReturnAddress(
- MacroAssembler* masm, Register num_args, Register start_addr,
- Register scratch1, Register scratch2, bool receiver_in_args,
- int num_slots_above_ret_addr, Label* stack_overflow) {
- // We have to move return address and the temporary registers above it
- // before we can copy arguments onto the stack. To achieve this:
- // Step 1: Increment the stack pointer by num_args + 1 (for receiver).
- // Step 2: Move the return address and values above it to the top of stack.
- // Step 3: Copy the arguments into the correct locations.
- // current stack =====> required stack layout
- // | | | scratch1 | (2) <-- esp(1)
- // | | | .... | (2)
- // | | | scratch-n | (2)
- // | | | return addr | (2)
- // | | | arg N | (3)
- // | scratch1 | <-- esp | .... |
- // | .... | | arg 0 |
- // | scratch-n | | arg 0 |
- // | return addr | | receiver slot |
-
- // Check for stack overflow before we increment the stack pointer.
- Generate_StackOverflowCheck(masm, num_args, scratch1, scratch2,
- stack_overflow, true);
-
-// Step 1 - Update the stack pointer. scratch1 already contains the required
-// increment to the stack. i.e. num_args + 1 stack slots. This is computed in
-// the Generate_StackOverflowCheck.
-
-#ifdef _MSC_VER
- // TODO(mythria): Move it to macro assembler.
- // In windows, we cannot increment the stack size by more than one page
- // (mimimum page size is 4KB) without accessing at least one byte on the
- // page. Check this:
- // https://msdn.microsoft.com/en-us/library/aa227153(v=vs.60).aspx.
- const int page_size = 4 * 1024;
- Label check_offset, update_stack_pointer;
- __ bind(&check_offset);
- __ cmp(scratch1, page_size);
- __ j(less, &update_stack_pointer);
- __ sub(esp, Immediate(page_size));
- // Just to touch the page, before we increment further.
- __ mov(Operand(esp, 0), Immediate(0));
- __ sub(scratch1, Immediate(page_size));
- __ jmp(&check_offset);
- __ bind(&update_stack_pointer);
-#endif
-
- __ sub(esp, scratch1);
-
- // Step 2 move return_address and slots above it to the correct locations.
- // Move from top to bottom, otherwise we may overwrite when num_args = 0 or 1,
- // basically when the source and destination overlap. We at least need one
- // extra slot for receiver, so no extra checks are required to avoid copy.
- for (int i = 0; i < num_slots_above_ret_addr + 1; i++) {
- __ mov(scratch1,
- Operand(esp, num_args, times_pointer_size, (i + 1) * kPointerSize));
- __ mov(Operand(esp, i * kPointerSize), scratch1);
- }
-
- // Step 3 copy arguments to correct locations.
- if (receiver_in_args) {
- __ mov(scratch1, num_args);
- __ add(scratch1, Immediate(1));
- } else {
- // Slot meant for receiver contains return address. Reset it so that
- // we will not incorrectly interpret return address as an object.
- __ mov(Operand(esp, num_args, times_pointer_size,
- (num_slots_above_ret_addr + 1) * kPointerSize),
- Immediate(0));
- __ mov(scratch1, num_args);
- }
-
- Label loop_header, loop_check;
- __ jmp(&loop_check);
- __ bind(&loop_header);
- __ mov(scratch2, Operand(start_addr, 0));
- __ mov(Operand(esp, scratch1, times_pointer_size,
- num_slots_above_ret_addr * kPointerSize),
- scratch2);
- __ sub(start_addr, Immediate(kPointerSize));
- __ sub(scratch1, Immediate(1));
- __ bind(&loop_check);
- __ cmp(scratch1, Immediate(0));
- __ j(greater, &loop_header, Label::kNear);
-}
-
-} // end anonymous namespace
-
-// static
-void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
- MacroAssembler* masm, InterpreterPushArgsMode mode) {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edx : the new target
- // -- edi : the constructor
- // -- ebx : allocation site feedback (if available or undefined)
- // -- ecx : the address of the first argument to be pushed. Subsequent
- // arguments should be consecutive above this, in the same order as
- // they are to be pushed onto the stack.
- // -----------------------------------
- Label stack_overflow;
- // We need two scratch registers. Push edi and edx onto stack.
- __ Push(edi);
- __ Push(edx);
-
- // Push arguments and move return address to the top of stack.
- // The eax register is readonly. The ecx register will be modified. The edx
- // and edi registers will be modified but restored to their original values.
- Generate_InterpreterPushArgsThenReturnAddress(masm, eax, ecx, edx, edi, false,
- 2, &stack_overflow);
-
- // Restore edi and edx
- __ Pop(edx);
- __ Pop(edi);
-
- __ AssertUndefinedOrAllocationSite(ebx);
- if (mode == InterpreterPushArgsMode::kJSFunction) {
- // Tail call to the function-specific construct stub (still in the caller
- // context at this point).
- __ AssertFunction(edi);
-
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kConstructStubOffset));
- __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
- __ jmp(ecx);
- } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- // Call the constructor with unmodified eax, edi, edx values.
- __ Jump(masm->isolate()->builtins()->ConstructWithSpread(),
- RelocInfo::CODE_TARGET);
- } else {
- DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
- // Call the constructor with unmodified eax, edi, edx values.
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
- }
-
- __ bind(&stack_overflow);
- {
- // Pop the temporary registers, so that return address is on top of stack.
- __ Pop(edx);
- __ Pop(edi);
-
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
-
- // This should be unreachable.
- __ int3();
- }
-}
-
-// static
-void Builtins::Generate_InterpreterPushArgsThenConstructArray(
- MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edx : the target to call checked to be Array function.
- // -- ebx : the allocation site feedback
- // -- ecx : the address of the first argument to be pushed. Subsequent
- // arguments should be consecutive above this, in the same order as
- // they are to be pushed onto the stack.
- // -----------------------------------
- Label stack_overflow;
- // We need two scratch registers. Register edi is available, push edx onto
- // stack.
- __ Push(edx);
-
- // Push arguments and move return address to the top of stack.
- // The eax register is readonly. The ecx register will be modified. The edx
- // and edi registers will be modified but restored to their original values.
- Generate_InterpreterPushArgsThenReturnAddress(masm, eax, ecx, edx, edi, true,
- 1, &stack_overflow);
-
- // Restore edx.
- __ Pop(edx);
-
- // Array constructor expects constructor in edi. It is same as edx here.
- __ Move(edi, edx);
-
- ArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-
- __ bind(&stack_overflow);
- {
- // Pop the temporary registers, so that return address is on top of stack.
- __ Pop(edx);
-
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
-
- // This should be unreachable.
- __ int3();
- }
-}
-
-static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
- // Set the return address to the correct point in the interpreter entry
- // trampoline.
- Smi* interpreter_entry_return_pc_offset(
- masm->isolate()->heap()->interpreter_entry_return_pc_offset());
- DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
- __ LoadHeapObject(ebx,
- masm->isolate()->builtins()->InterpreterEntryTrampoline());
- __ add(ebx, Immediate(interpreter_entry_return_pc_offset->value() +
- Code::kHeaderSize - kHeapObjectTag));
- __ push(ebx);
-
- // Initialize the dispatch table register.
- __ mov(kInterpreterDispatchTableRegister,
- Immediate(ExternalReference::interpreter_dispatch_table_address(
- masm->isolate())));
-
- // Get the bytecode array pointer from the frame.
- __ mov(kInterpreterBytecodeArrayRegister,
- Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
-
- if (FLAG_debug_code) {
- // Check function data field is actually a BytecodeArray object.
- __ AssertNotSmi(kInterpreterBytecodeArrayRegister);
- __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
- ebx);
- __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
- }
-
- // Get the target bytecode offset from the frame.
- __ mov(kInterpreterBytecodeOffsetRegister,
- Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
- __ SmiUntag(kInterpreterBytecodeOffsetRegister);
-
- // Dispatch to the target bytecode.
- __ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ mov(ebx, Operand(kInterpreterDispatchTableRegister, ebx,
- times_pointer_size, 0));
- __ jmp(ebx);
-}
-
-void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
- // Advance the current bytecode offset stored within the given interpreter
- // stack frame. This simulates what all bytecode handlers do upon completion
- // of the underlying operation.
- __ mov(ebx, Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
- __ mov(edx, Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(kInterpreterAccumulatorRegister);
- __ Push(ebx); // First argument is the bytecode array.
- __ Push(edx); // Second argument is the bytecode offset.
- __ CallRuntime(Runtime::kInterpreterAdvanceBytecodeOffset);
- __ Move(edx, eax); // Result is the new bytecode offset.
- __ Pop(kInterpreterAccumulatorRegister);
- }
- __ mov(Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp), edx);
-
- Generate_InterpreterEnterBytecode(masm);
-}
-
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
- Generate_InterpreterEnterBytecode(masm);
-}
-
-void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argument count (preserved for callee)
- // -- edx : new target (preserved for callee)
- // -- edi : target function (preserved for callee)
- // -----------------------------------
- // First lookup code, maybe we don't need to compile!
- Label gotta_call_runtime, gotta_call_runtime_no_stack;
- Label try_shared;
- Label loop_top, loop_bottom;
-
- Register closure = edi;
- Register new_target = edx;
- Register argument_count = eax;
-
- // Do we have a valid feedback vector?
- __ mov(ebx, FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
- __ mov(ebx, FieldOperand(ebx, Cell::kValueOffset));
- __ cmp(ebx, masm->isolate()->factory()->undefined_value());
- __ j(equal, &gotta_call_runtime_no_stack);
-
- __ push(argument_count);
- __ push(new_target);
- __ push(closure);
-
- Register map = argument_count;
- Register index = ebx;
- __ mov(map, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ mov(map, FieldOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
- __ mov(index, FieldOperand(map, FixedArray::kLengthOffset));
- __ cmp(index, Immediate(Smi::FromInt(2)));
- __ j(less, &try_shared);
-
- // edx : native context
- // ebx : length / index
- // eax : optimized code map
- // stack[0] : new target
- // stack[4] : closure
- Register native_context = edx;
- __ mov(native_context, NativeContextOperand());
-
- __ bind(&loop_top);
- Register temp = edi;
-
- // Does the native context match?
- __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
- SharedFunctionInfo::kOffsetToPreviousContext));
- __ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
- __ cmp(temp, native_context);
- __ j(not_equal, &loop_bottom);
- // Code available?
- Register entry = ecx;
- __ mov(entry, FieldOperand(map, index, times_half_pointer_size,
- SharedFunctionInfo::kOffsetToPreviousCachedCode));
- __ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &try_shared);
-
- // Found code. Get it into the closure and return.
- __ pop(closure);
- // Store code entry in the closure.
- __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
- __ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
- __ RecordWriteCodeEntryField(closure, entry, eax);
-
- // Link the closure into the optimized function list.
- // ecx : code entry
- // edx : native context
- // edi : closure
- __ mov(ebx,
- ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
- __ mov(FieldOperand(closure, JSFunction::kNextFunctionLinkOffset), ebx);
- __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, ebx, eax,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- const int function_list_offset =
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
- __ mov(ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST),
- closure);
- // Save closure before the write barrier.
- __ mov(ebx, closure);
- __ RecordWriteContextSlot(native_context, function_list_offset, closure, eax,
- kDontSaveFPRegs);
- __ mov(closure, ebx);
- __ pop(new_target);
- __ pop(argument_count);
- __ jmp(entry);
-
- __ bind(&loop_bottom);
- __ sub(index, Immediate(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
- __ cmp(index, Immediate(Smi::FromInt(1)));
- __ j(greater, &loop_top);
-
- // We found no code.
- __ jmp(&gotta_call_runtime);
-
- __ bind(&try_shared);
- __ pop(closure);
- __ pop(new_target);
- __ pop(argument_count);
- __ mov(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- // Is the shared function marked for tier up?
- __ test_b(FieldOperand(entry, SharedFunctionInfo::kMarkedForTierUpByteOffset),
- Immediate(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
- __ j(not_zero, &gotta_call_runtime_no_stack);
-
- // If SFI points to anything other than CompileLazy, install that.
- __ mov(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
- __ Move(ebx, masm->CodeObject());
- __ cmp(entry, ebx);
- __ j(equal, &gotta_call_runtime_no_stack);
-
- // Install the SFI's code entry.
- __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
- __ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
- __ RecordWriteCodeEntryField(closure, entry, ebx);
- __ jmp(entry);
-
- __ bind(&gotta_call_runtime);
- __ pop(closure);
- __ pop(new_target);
- __ pop(argument_count);
- __ bind(&gotta_call_runtime_no_stack);
-
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
-}
-
-void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm,
- Runtime::kCompileOptimized_NotConcurrent);
-}
-
-void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
-}
-
-void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argument count (preserved for callee)
- // -- edx : new target (preserved for callee)
- // -- edi : target function (preserved for callee)
- // -----------------------------------
- Label failed;
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Preserve argument count for later compare.
- __ mov(ecx, eax);
- // Push the number of arguments to the callee.
- __ SmiTag(eax);
- __ push(eax);
- // Push a copy of the target function and the new target.
- __ push(edi);
- __ push(edx);
-
- // The function.
- __ push(edi);
- // Copy arguments from caller (stdlib, foreign, heap).
- Label args_done;
- for (int j = 0; j < 4; ++j) {
- Label over;
- if (j < 3) {
- __ cmp(ecx, Immediate(j));
- __ j(not_equal, &over, Label::kNear);
- }
- for (int i = j - 1; i >= 0; --i) {
- __ Push(Operand(
- ebp, StandardFrameConstants::kCallerSPOffset + i * kPointerSize));
- }
- for (int i = 0; i < 3 - j; ++i) {
- __ PushRoot(Heap::kUndefinedValueRootIndex);
- }
- if (j < 3) {
- __ jmp(&args_done, Label::kNear);
- __ bind(&over);
- }
- }
- __ bind(&args_done);
-
- // Call runtime, on success unwind frame, and parent frame.
- __ CallRuntime(Runtime::kInstantiateAsmJs, 4);
- // A smi 0 is returned on failure, an object on success.
- __ JumpIfSmi(eax, &failed, Label::kNear);
-
- __ Drop(2);
- __ Pop(ecx);
- __ SmiUntag(ecx);
- scope.GenerateLeaveFrame();
-
- __ PopReturnAddressTo(ebx);
- __ inc(ecx);
- __ lea(esp, Operand(esp, ecx, times_pointer_size, 0));
- __ PushReturnAddressFrom(ebx);
- __ ret(0);
-
- __ bind(&failed);
- // Restore target function and new target.
- __ pop(edx);
- __ pop(edi);
- __ pop(eax);
- __ SmiUntag(eax);
- }
- // On failure, tail call back to regular js.
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
-}
-
-static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
- // For now, we are relying on the fact that make_code_young doesn't do any
- // garbage collection which allows us to save/restore the registers without
- // worrying about which of them contain pointers. We also don't build an
- // internal frame to make the code faster, since we shouldn't have to do stack
- // crawls in MakeCodeYoung. This seems a bit fragile.
-
- // Re-execute the code that was patched back to the young age when
- // the stub returns.
- __ sub(Operand(esp, 0), Immediate(5));
- __ pushad();
- __ mov(eax, Operand(esp, 8 * kPointerSize));
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ PrepareCallCFunction(2, ebx);
- __ mov(Operand(esp, 1 * kPointerSize),
- Immediate(ExternalReference::isolate_address(masm->isolate())));
- __ mov(Operand(esp, 0), eax);
- __ CallCFunction(
- ExternalReference::get_make_code_young_function(masm->isolate()), 2);
- }
- __ popad();
- __ ret(0);
-}
-
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
- void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
- }
-CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
-#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
-
-void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
- // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
- // that make_code_young doesn't do any garbage collection which allows us to
- // save/restore the registers without worrying about which of them contain
- // pointers.
- __ pushad();
- __ mov(eax, Operand(esp, 8 * kPointerSize));
- __ sub(eax, Immediate(Assembler::kCallInstructionLength));
- { // NOLINT
- FrameScope scope(masm, StackFrame::MANUAL);
- __ PrepareCallCFunction(2, ebx);
- __ mov(Operand(esp, 1 * kPointerSize),
- Immediate(ExternalReference::isolate_address(masm->isolate())));
- __ mov(Operand(esp, 0), eax);
- __ CallCFunction(
- ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
- 2);
- }
- __ popad();
-
- // Perform prologue operations usually performed by the young code stub.
- __ pop(eax); // Pop return address into scratch register.
- __ push(ebp); // Caller's frame pointer.
- __ mov(ebp, esp);
- __ push(esi); // Callee's context.
- __ push(edi); // Callee's JS Function.
- __ push(eax); // Push return address after frame prologue.
-
- // Jump to point after the code-age stub.
- __ ret(0);
-}
-
-void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
- GenerateMakeCodeYoungAgainCommon(masm);
-}
-
-void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
- Generate_MarkCodeAsExecutedOnce(masm);
-}
-
-static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
- SaveFPRegsMode save_doubles) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve registers across notification, this is important for compiled
- // stubs that tail call the runtime on deopts passing their parameters in
- // registers.
- __ pushad();
- __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
- __ popad();
- // Tear down internal frame.
- }
-
- __ pop(MemOperand(esp, 0)); // Ignore state offset
- __ ret(0); // Return to IC Miss stub, continuation still on stack.
-}
-
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
-}
-
-void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
-}
-
-static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
- Deoptimizer::BailoutType type) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Pass deoptimization type to the runtime system.
- __ push(Immediate(Smi::FromInt(static_cast<int>(type))));
- __ CallRuntime(Runtime::kNotifyDeoptimized);
-
- // Tear down internal frame.
- }
-
- // Get the full codegen state from the stack and untag it.
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
- __ SmiUntag(ecx);
-
- // Switch on the state.
- Label not_no_registers, not_tos_eax;
- __ cmp(ecx, static_cast<int>(Deoptimizer::BailoutState::NO_REGISTERS));
- __ j(not_equal, &not_no_registers, Label::kNear);
- __ ret(1 * kPointerSize); // Remove state.
-
- __ bind(&not_no_registers);
- DCHECK_EQ(kInterpreterAccumulatorRegister.code(), eax.code());
- __ mov(eax, Operand(esp, 2 * kPointerSize));
- __ cmp(ecx, static_cast<int>(Deoptimizer::BailoutState::TOS_REGISTER));
- __ j(not_equal, &not_tos_eax, Label::kNear);
- __ ret(2 * kPointerSize); // Remove state, eax.
-
- __ bind(&not_tos_eax);
- __ Abort(kNoCasesLeft);
-}
-
-void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
-}
-
-void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
-}
-
-// static
-void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argc
- // -- esp[0] : return address
- // -- esp[4] : argArray
- // -- esp[8] : thisArg
- // -- esp[12] : receiver
- // -----------------------------------
-
- // 1. Load receiver into edi, argArray into eax (if present), remove all
- // arguments from the stack (including the receiver), and push thisArg (if
- // present) instead.
- {
- Label no_arg_array, no_this_arg;
- __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
- __ mov(ebx, edx);
- __ mov(edi, Operand(esp, eax, times_pointer_size, kPointerSize));
- __ test(eax, eax);
- __ j(zero, &no_this_arg, Label::kNear);
- {
- __ mov(edx, Operand(esp, eax, times_pointer_size, 0));
- __ cmp(eax, Immediate(1));
- __ j(equal, &no_arg_array, Label::kNear);
- __ mov(ebx, Operand(esp, eax, times_pointer_size, -kPointerSize));
- __ bind(&no_arg_array);
- }
- __ bind(&no_this_arg);
- __ PopReturnAddressTo(ecx);
- __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
- __ Push(edx);
- __ PushReturnAddressFrom(ecx);
- __ Move(eax, ebx);
- }
-
- // ----------- S t a t e -------------
- // -- eax : argArray
- // -- edi : receiver
- // -- esp[0] : return address
- // -- esp[4] : thisArg
- // -----------------------------------
-
- // 2. Make sure the receiver is actually callable.
- Label receiver_not_callable;
- __ JumpIfSmi(edi, &receiver_not_callable, Label::kNear);
- __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsCallable));
- __ j(zero, &receiver_not_callable, Label::kNear);
-
- // 3. Tail call with no arguments if argArray is null or undefined.
- Label no_arguments;
- __ JumpIfRoot(eax, Heap::kNullValueRootIndex, &no_arguments, Label::kNear);
- __ JumpIfRoot(eax, Heap::kUndefinedValueRootIndex, &no_arguments,
- Label::kNear);
-
- // 4a. Apply the receiver to the given argArray (passing undefined for
- // new.target).
- __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
-
- // 4b. The argArray is either null or undefined, so we tail call without any
- // arguments to the receiver.
- __ bind(&no_arguments);
- {
- __ Set(eax, 0);
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
- }
-
- // 4c. The receiver is not callable, throw an appropriate TypeError.
- __ bind(&receiver_not_callable);
- {
- __ mov(Operand(esp, kPointerSize), edi);
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
-}
-
-// static
-void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
- // Stack Layout:
- // esp[0] : Return address
- // esp[8] : Argument n
- // esp[16] : Argument n-1
- // ...
- // esp[8 * n] : Argument 1
- // esp[8 * (n + 1)] : Receiver (callable to call)
- //
- // eax contains the number of arguments, n, not counting the receiver.
- //
- // 1. Make sure we have at least one argument.
- {
- Label done;
- __ test(eax, eax);
- __ j(not_zero, &done, Label::kNear);
- __ PopReturnAddressTo(ebx);
- __ PushRoot(Heap::kUndefinedValueRootIndex);
- __ PushReturnAddressFrom(ebx);
- __ inc(eax);
- __ bind(&done);
- }
-
- // 2. Get the callable to call (passed as receiver) from the stack.
- __ mov(edi, Operand(esp, eax, times_pointer_size, kPointerSize));
-
- // 3. Shift arguments and return address one slot down on the stack
- // (overwriting the original receiver). Adjust argument count to make
- // the original first argument the new receiver.
- {
- Label loop;
- __ mov(ecx, eax);
- __ bind(&loop);
- __ mov(ebx, Operand(esp, ecx, times_pointer_size, 0));
- __ mov(Operand(esp, ecx, times_pointer_size, kPointerSize), ebx);
- __ dec(ecx);
- __ j(not_sign, &loop); // While non-negative (to copy return address).
- __ pop(ebx); // Discard copy of return address.
- __ dec(eax); // One fewer argument (first argument is new receiver).
- }
-
- // 4. Call the callable.
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
-}
-
-void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argc
- // -- esp[0] : return address
- // -- esp[4] : argumentsList
- // -- esp[8] : thisArgument
- // -- esp[12] : target
- // -- esp[16] : receiver
- // -----------------------------------
-
- // 1. Load target into edi (if present), argumentsList into eax (if present),
- // remove all arguments from the stack (including the receiver), and push
- // thisArgument (if present) instead.
- {
- Label done;
- __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
- __ mov(edx, edi);
- __ mov(ebx, edi);
- __ cmp(eax, Immediate(1));
- __ j(below, &done, Label::kNear);
- __ mov(edi, Operand(esp, eax, times_pointer_size, -0 * kPointerSize));
- __ j(equal, &done, Label::kNear);
- __ mov(edx, Operand(esp, eax, times_pointer_size, -1 * kPointerSize));
- __ cmp(eax, Immediate(3));
- __ j(below, &done, Label::kNear);
- __ mov(ebx, Operand(esp, eax, times_pointer_size, -2 * kPointerSize));
- __ bind(&done);
- __ PopReturnAddressTo(ecx);
- __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
- __ Push(edx);
- __ PushReturnAddressFrom(ecx);
- __ Move(eax, ebx);
- }
-
- // ----------- S t a t e -------------
- // -- eax : argumentsList
- // -- edi : target
- // -- esp[0] : return address
- // -- esp[4] : thisArgument
- // -----------------------------------
-
- // 2. Make sure the target is actually callable.
- Label target_not_callable;
- __ JumpIfSmi(edi, &target_not_callable, Label::kNear);
- __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsCallable));
- __ j(zero, &target_not_callable, Label::kNear);
-
- // 3a. Apply the target to the given argumentsList (passing undefined for
- // new.target).
- __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
-
- // 3b. The target is not callable, throw an appropriate TypeError.
- __ bind(&target_not_callable);
- {
- __ mov(Operand(esp, kPointerSize), edi);
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
-}
-
-void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argc
- // -- esp[0] : return address
- // -- esp[4] : new.target (optional)
- // -- esp[8] : argumentsList
- // -- esp[12] : target
- // -- esp[16] : receiver
- // -----------------------------------
-
- // 1. Load target into edi (if present), argumentsList into eax (if present),
- // new.target into edx (if present, otherwise use target), remove all
- // arguments from the stack (including the receiver), and push thisArgument
- // (if present) instead.
- {
- Label done;
- __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
- __ mov(edx, edi);
- __ mov(ebx, edi);
- __ cmp(eax, Immediate(1));
- __ j(below, &done, Label::kNear);
- __ mov(edi, Operand(esp, eax, times_pointer_size, -0 * kPointerSize));
- __ mov(edx, edi);
- __ j(equal, &done, Label::kNear);
- __ mov(ebx, Operand(esp, eax, times_pointer_size, -1 * kPointerSize));
- __ cmp(eax, Immediate(3));
- __ j(below, &done, Label::kNear);
- __ mov(edx, Operand(esp, eax, times_pointer_size, -2 * kPointerSize));
- __ bind(&done);
- __ PopReturnAddressTo(ecx);
- __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
- __ PushRoot(Heap::kUndefinedValueRootIndex);
- __ PushReturnAddressFrom(ecx);
- __ Move(eax, ebx);
- }
-
- // ----------- S t a t e -------------
- // -- eax : argumentsList
- // -- edx : new.target
- // -- edi : target
- // -- esp[0] : return address
- // -- esp[4] : receiver (undefined)
- // -----------------------------------
-
- // 2. Make sure the target is actually a constructor.
- Label target_not_constructor;
- __ JumpIfSmi(edi, &target_not_constructor, Label::kNear);
- __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsConstructor));
- __ j(zero, &target_not_constructor, Label::kNear);
-
- // 3. Make sure the target is actually a constructor.
- Label new_target_not_constructor;
- __ JumpIfSmi(edx, &new_target_not_constructor, Label::kNear);
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsConstructor));
- __ j(zero, &new_target_not_constructor, Label::kNear);
-
- // 4a. Construct the target with the given new.target and argumentsList.
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
-
- // 4b. The target is not a constructor, throw an appropriate TypeError.
- __ bind(&target_not_constructor);
- {
- __ mov(Operand(esp, kPointerSize), edi);
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
-
- // 4c. The new.target is not a constructor, throw an appropriate TypeError.
- __ bind(&new_target_not_constructor);
- {
- __ mov(Operand(esp, kPointerSize), edx);
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
-}
-
-void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argc
- // -- esp[0] : return address
- // -- esp[4] : last argument
- // -----------------------------------
- Label generic_array_code;
-
- // Get the InternalArray function.
- __ LoadGlobalFunction(Context::INTERNAL_ARRAY_FUNCTION_INDEX, edi);
-
- if (FLAG_debug_code) {
- // Initial map for the builtin InternalArray function should be a map.
- __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- __ test(ebx, Immediate(kSmiTagMask));
- __ Assert(not_zero, kUnexpectedInitialMapForInternalArrayFunction);
- __ CmpObjectType(ebx, MAP_TYPE, ecx);
- __ Assert(equal, kUnexpectedInitialMapForInternalArrayFunction);
- }
-
- // Run the native code for the InternalArray function called as a normal
- // function.
- // tail call a stub
- InternalArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argc
- // -- esp[0] : return address
- // -- esp[4] : last argument
- // -----------------------------------
- Label generic_array_code;
-
- // Get the Array function.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, edi);
- __ mov(edx, edi);
-
- if (FLAG_debug_code) {
- // Initial map for the builtin Array function should be a map.
- __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- __ test(ebx, Immediate(kSmiTagMask));
- __ Assert(not_zero, kUnexpectedInitialMapForArrayFunction);
- __ CmpObjectType(ebx, MAP_TYPE, ecx);
- __ Assert(equal, kUnexpectedInitialMapForArrayFunction);
- }
-
- // Run the native code for the Array function called as a normal function.
- // tail call a stub
- __ mov(ebx, masm->isolate()->factory()->undefined_value());
- ArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-// static
-void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : number of arguments
- // -- edi : constructor function
- // -- esi : context
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // 1. Load the first argument into ebx.
- Label no_arguments;
- {
- __ test(eax, eax);
- __ j(zero, &no_arguments, Label::kNear);
- __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
- }
-
- // 2a. Convert the first argument to a number.
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(eax);
- __ EnterBuiltinFrame(esi, edi, eax);
- __ mov(eax, ebx);
- __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
- __ LeaveBuiltinFrame(esi, edi, ebx); // Argc popped to ebx.
- __ SmiUntag(ebx);
- }
-
- {
- // Drop all arguments including the receiver.
- __ PopReturnAddressTo(ecx);
- __ lea(esp, Operand(esp, ebx, times_pointer_size, kPointerSize));
- __ PushReturnAddressFrom(ecx);
- __ Ret();
- }
-
- // 2b. No arguments, return +0 (already in eax).
- __ bind(&no_arguments);
- __ ret(1 * kPointerSize);
-}
-
-// static
-void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : number of arguments
- // -- edi : constructor function
- // -- edx : new target
- // -- esi : context
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // 1. Make sure we operate in the context of the called function.
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Store argc in r8.
- __ mov(ecx, eax);
- __ SmiTag(ecx);
-
- // 2. Load the first argument into ebx.
- {
- Label no_arguments, done;
- __ test(eax, eax);
- __ j(zero, &no_arguments, Label::kNear);
- __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
- __ jmp(&done, Label::kNear);
- __ bind(&no_arguments);
- __ Move(ebx, Smi::kZero);
- __ bind(&done);
- }
-
- // 3. Make sure ebx is a number.
- {
- Label done_convert;
- __ JumpIfSmi(ebx, &done_convert);
- __ CompareRoot(FieldOperand(ebx, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(equal, &done_convert);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterBuiltinFrame(esi, edi, ecx);
- __ Push(edx);
- __ Move(eax, ebx);
- __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
- __ Move(ebx, eax);
- __ Pop(edx);
- __ LeaveBuiltinFrame(esi, edi, ecx);
- }
- __ bind(&done_convert);
- }
-
- // 4. Check if new target and constructor differ.
- Label drop_frame_and_ret, done_alloc, new_object;
- __ cmp(edx, edi);
- __ j(not_equal, &new_object);
-
- // 5. Allocate a JSValue wrapper for the number.
- __ AllocateJSValue(eax, edi, ebx, esi, &done_alloc);
- __ jmp(&drop_frame_and_ret);
-
- __ bind(&done_alloc);
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); // Restore esi.
-
- // 6. Fallback to the runtime to create new object.
- __ bind(&new_object);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterBuiltinFrame(esi, edi, ecx);
- __ Push(ebx); // the first argument
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
- RelocInfo::CODE_TARGET);
- __ Pop(FieldOperand(eax, JSValue::kValueOffset));
- __ LeaveBuiltinFrame(esi, edi, ecx);
- }
-
- __ bind(&drop_frame_and_ret);
- {
- // Drop all arguments including the receiver.
- __ PopReturnAddressTo(esi);
- __ SmiUntag(ecx);
- __ lea(esp, Operand(esp, ecx, times_pointer_size, kPointerSize));
- __ PushReturnAddressFrom(esi);
- __ Ret();
- }
-}
-
-// static
-void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : number of arguments
- // -- edi : constructor function
- // -- esi : context
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // 1. Load the first argument into eax.
- Label no_arguments;
- {
- __ mov(ebx, eax); // Store argc in ebx.
- __ test(eax, eax);
- __ j(zero, &no_arguments, Label::kNear);
- __ mov(eax, Operand(esp, eax, times_pointer_size, 0));
- }
-
- // 2a. At least one argument, return eax if it's a string, otherwise
- // dispatch to appropriate conversion.
- Label drop_frame_and_ret, to_string, symbol_descriptive_string;
- {
- __ JumpIfSmi(eax, &to_string, Label::kNear);
- STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
- __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx);
- __ j(above, &to_string, Label::kNear);
- __ j(equal, &symbol_descriptive_string, Label::kNear);
- __ jmp(&drop_frame_and_ret, Label::kNear);
- }
-
- // 2b. No arguments, return the empty string (and pop the receiver).
- __ bind(&no_arguments);
- {
- __ LoadRoot(eax, Heap::kempty_stringRootIndex);
- __ ret(1 * kPointerSize);
- }
-
- // 3a. Convert eax to a string.
- __ bind(&to_string);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(ebx);
- __ EnterBuiltinFrame(esi, edi, ebx);
- __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
- __ LeaveBuiltinFrame(esi, edi, ebx);
- __ SmiUntag(ebx);
- }
- __ jmp(&drop_frame_and_ret, Label::kNear);
-
- // 3b. Convert symbol in eax to a string.
- __ bind(&symbol_descriptive_string);
- {
- __ PopReturnAddressTo(ecx);
- __ lea(esp, Operand(esp, ebx, times_pointer_size, kPointerSize));
- __ Push(eax);
- __ PushReturnAddressFrom(ecx);
- __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
- }
-
- __ bind(&drop_frame_and_ret);
- {
- // Drop all arguments including the receiver.
- __ PopReturnAddressTo(ecx);
- __ lea(esp, Operand(esp, ebx, times_pointer_size, kPointerSize));
- __ PushReturnAddressFrom(ecx);
- __ Ret();
- }
-}
-
-// static
-void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : number of arguments
- // -- edi : constructor function
- // -- edx : new target
- // -- esi : context
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // 1. Make sure we operate in the context of the called function.
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- __ mov(ebx, eax);
-
- // 2. Load the first argument into eax.
- {
- Label no_arguments, done;
- __ test(ebx, ebx);
- __ j(zero, &no_arguments, Label::kNear);
- __ mov(eax, Operand(esp, ebx, times_pointer_size, 0));
- __ jmp(&done, Label::kNear);
- __ bind(&no_arguments);
- __ LoadRoot(eax, Heap::kempty_stringRootIndex);
- __ bind(&done);
- }
-
- // 3. Make sure eax is a string.
- {
- Label convert, done_convert;
- __ JumpIfSmi(eax, &convert, Label::kNear);
- __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx);
- __ j(below, &done_convert);
- __ bind(&convert);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(ebx);
- __ EnterBuiltinFrame(esi, edi, ebx);
- __ Push(edx);
- __ Call(masm->isolate()->builtins()->ToString(), RelocInfo::CODE_TARGET);
- __ Pop(edx);
- __ LeaveBuiltinFrame(esi, edi, ebx);
- __ SmiUntag(ebx);
- }
- __ bind(&done_convert);
- }
-
- // 4. Check if new target and constructor differ.
- Label drop_frame_and_ret, done_alloc, new_object;
- __ cmp(edx, edi);
- __ j(not_equal, &new_object);
-
- // 5. Allocate a JSValue wrapper for the string.
- // AllocateJSValue can't handle src == dst register. Reuse esi and restore it
- // as needed after the call.
- __ mov(esi, eax);
- __ AllocateJSValue(eax, edi, esi, ecx, &done_alloc);
- __ jmp(&drop_frame_and_ret);
-
- __ bind(&done_alloc);
- {
- // Restore eax to the first argument and esi to the context.
- __ mov(eax, esi);
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- }
-
- // 6. Fallback to the runtime to create new object.
- __ bind(&new_object);
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ SmiTag(ebx);
- __ EnterBuiltinFrame(esi, edi, ebx);
- __ Push(eax); // the first argument
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
- RelocInfo::CODE_TARGET);
- __ Pop(FieldOperand(eax, JSValue::kValueOffset));
- __ LeaveBuiltinFrame(esi, edi, ebx);
- __ SmiUntag(ebx);
- }
-
- __ bind(&drop_frame_and_ret);
- {
- // Drop all arguments including the receiver.
- __ PopReturnAddressTo(ecx);
- __ lea(esp, Operand(esp, ebx, times_pointer_size, kPointerSize));
- __ PushReturnAddressFrom(ecx);
- __ Ret();
- }
-}
-
-static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- __ push(ebp);
- __ mov(ebp, esp);
-
- // Store the arguments adaptor context sentinel.
- __ push(Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Push the function on the stack.
- __ push(edi);
-
- // Preserve the number of arguments on the stack. Must preserve eax,
- // ebx and ecx because these registers are used when copying the
- // arguments and the receiver.
- STATIC_ASSERT(kSmiTagSize == 1);
- __ lea(edi, Operand(eax, eax, times_1, kSmiTag));
- __ push(edi);
-}
-
-static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
- // Retrieve the number of arguments from the stack.
- __ mov(ebx, Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- // Leave the frame.
- __ leave();
-
- // Remove caller arguments from the stack.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ pop(ecx);
- __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver
- __ push(ecx);
-}
-
-// static
-void Builtins::Generate_Apply(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argumentsList
- // -- edi : target
- // -- edx : new.target (checked to be constructor or undefined)
- // -- esp[0] : return address.
- // -- esp[4] : thisArgument
- // -----------------------------------
-
- // Create the list of arguments from the array-like argumentsList.
- {
- Label create_arguments, create_array, create_holey_array, create_runtime,
- done_create;
- __ JumpIfSmi(eax, &create_runtime);
-
- // Load the map of argumentsList into ecx.
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
-
- // Load native context into ebx.
- __ mov(ebx, NativeContextOperand());
-
- // Check if argumentsList is an (unmodified) arguments object.
- __ cmp(ecx, ContextOperand(ebx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
- __ j(equal, &create_arguments);
- __ cmp(ecx, ContextOperand(ebx, Context::STRICT_ARGUMENTS_MAP_INDEX));
- __ j(equal, &create_arguments);
-
- // Check if argumentsList is a fast JSArray.
- __ CmpInstanceType(ecx, JS_ARRAY_TYPE);
- __ j(equal, &create_array);
-
- // Ask the runtime to create the list (actually a FixedArray).
- __ bind(&create_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(edi);
- __ Push(edx);
- __ Push(eax);
- __ CallRuntime(Runtime::kCreateListFromArrayLike);
- __ Pop(edx);
- __ Pop(edi);
- __ mov(ebx, FieldOperand(eax, FixedArray::kLengthOffset));
- __ SmiUntag(ebx);
- }
- __ jmp(&done_create);
-
- // Try to create the list from an arguments object.
- __ bind(&create_arguments);
- __ mov(ebx, FieldOperand(eax, JSArgumentsObject::kLengthOffset));
- __ mov(ecx, FieldOperand(eax, JSObject::kElementsOffset));
- __ cmp(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
- __ j(not_equal, &create_runtime);
- __ SmiUntag(ebx);
- __ mov(eax, ecx);
- __ jmp(&done_create);
-
- // For holey JSArrays we need to check that the array prototype chain
- // protector is intact and our prototype is the Array.prototype actually.
- __ bind(&create_holey_array);
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ mov(ecx, FieldOperand(ecx, Map::kPrototypeOffset));
- __ cmp(ecx, ContextOperand(ebx, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ j(not_equal, &create_runtime);
- __ LoadRoot(ecx, Heap::kArrayProtectorRootIndex);
- __ cmp(FieldOperand(ecx, PropertyCell::kValueOffset),
- Immediate(Smi::FromInt(Isolate::kProtectorValid)));
- __ j(not_equal, &create_runtime);
- __ mov(ebx, FieldOperand(eax, JSArray::kLengthOffset));
- __ SmiUntag(ebx);
- __ mov(eax, FieldOperand(eax, JSArray::kElementsOffset));
- __ jmp(&done_create);
-
- // Try to create the list from a JSArray object.
- __ bind(&create_array);
- __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(ecx);
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- __ cmp(ecx, Immediate(FAST_HOLEY_SMI_ELEMENTS));
- __ j(equal, &create_holey_array, Label::kNear);
- __ cmp(ecx, Immediate(FAST_HOLEY_ELEMENTS));
- __ j(equal, &create_holey_array, Label::kNear);
- __ j(above, &create_runtime);
- __ mov(ebx, FieldOperand(eax, JSArray::kLengthOffset));
- __ SmiUntag(ebx);
- __ mov(eax, FieldOperand(eax, JSArray::kElementsOffset));
-
- __ bind(&done_create);
- }
-
- // Check for stack overflow.
- {
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack limit".
- Label done;
- ExternalReference real_stack_limit =
- ExternalReference::address_of_real_stack_limit(masm->isolate());
- __ mov(ecx, Operand::StaticVariable(real_stack_limit));
- // Make ecx the space we have left. The stack might already be overflowed
- // here which will cause ecx to become negative.
- __ neg(ecx);
- __ add(ecx, esp);
- __ sar(ecx, kPointerSizeLog2);
- // Check if the arguments will overflow the stack.
- __ cmp(ecx, ebx);
- __ j(greater, &done, Label::kNear); // Signed comparison.
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&done);
- }
-
- // ----------- S t a t e -------------
- // -- edi : target
- // -- eax : args (a FixedArray built from argumentsList)
- // -- ebx : len (number of elements to push from args)
- // -- edx : new.target (checked to be constructor or undefined)
- // -- esp[0] : return address.
- // -- esp[4] : thisArgument
- // -----------------------------------
-
- // Push arguments onto the stack (thisArgument is already on the stack).
- {
- // Save edx/edi to stX0/stX1.
- __ push(edx);
- __ push(edi);
- __ fld_s(MemOperand(esp, 0));
- __ fld_s(MemOperand(esp, 4));
- __ lea(esp, Operand(esp, 2 * kFloatSize));
-
- __ PopReturnAddressTo(edx);
- __ Move(ecx, Immediate(0));
- Label done, push, loop;
- __ bind(&loop);
- __ cmp(ecx, ebx);
- __ j(equal, &done, Label::kNear);
- // Turn the hole into undefined as we go.
- __ mov(edi,
- FieldOperand(eax, ecx, times_pointer_size, FixedArray::kHeaderSize));
- __ CompareRoot(edi, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &push, Label::kNear);
- __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
- __ bind(&push);
- __ Push(edi);
- __ inc(ecx);
- __ jmp(&loop);
- __ bind(&done);
- __ PushReturnAddressFrom(edx);
-
- // Restore edx/edi from stX0/stX1.
- __ lea(esp, Operand(esp, -2 * kFloatSize));
- __ fstp_s(MemOperand(esp, 0));
- __ fstp_s(MemOperand(esp, 4));
- __ pop(edx);
- __ pop(edi);
-
- __ Move(eax, ebx);
- }
-
- // Dispatch to Call or Construct depending on whether new.target is undefined.
- {
- __ CompareRoot(edx, Heap::kUndefinedValueRootIndex);
- __ j(equal, masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
- }
-}
-
-// static
-void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
- Handle<Code> code) {
- // ----------- S t a t e -------------
- // -- edi : the target to call (can be any Object)
- // -- ecx : start index (to support rest parameters)
- // -- esp[0] : return address.
- // -- esp[4] : thisArgument
- // -----------------------------------
-
- // Check if we have an arguments adaptor frame below the function frame.
- Label arguments_adaptor, arguments_done;
- __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ cmp(Operand(ebx, CommonFrameConstants::kContextOrFrameTypeOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &arguments_adaptor, Label::kNear);
- {
- __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(eax, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
- __ mov(eax,
- FieldOperand(eax, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mov(ebx, ebp);
- }
- __ jmp(&arguments_done, Label::kNear);
- __ bind(&arguments_adaptor);
- {
- // Just load the length from the ArgumentsAdaptorFrame.
- __ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- }
- __ bind(&arguments_done);
-
- Label stack_empty, stack_done;
- __ SmiUntag(eax);
- __ sub(eax, ecx);
- __ j(less_equal, &stack_empty);
- {
- // Check for stack overflow.
- {
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack
- // limit".
- Label done;
- __ LoadRoot(ecx, Heap::kRealStackLimitRootIndex);
- // Make ecx the space we have left. The stack might already be
- // overflowed here which will cause ecx to become negative.
- __ neg(ecx);
- __ add(ecx, esp);
- __ sar(ecx, kPointerSizeLog2);
- // Check if the arguments will overflow the stack.
- __ cmp(ecx, eax);
- __ j(greater, &done, Label::kNear); // Signed comparison.
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&done);
- }
-
- // Forward the arguments from the caller frame.
- {
- Label loop;
- __ mov(ecx, eax);
- __ pop(edx);
- __ bind(&loop);
- {
- __ Push(Operand(ebx, ecx, times_pointer_size, 1 * kPointerSize));
- __ dec(ecx);
- __ j(not_zero, &loop);
- }
- __ push(edx);
- }
- }
- __ jmp(&stack_done, Label::kNear);
- __ bind(&stack_empty);
- {
- // We just pass the receiver, which is already on the stack.
- __ Move(eax, Immediate(0));
- }
- __ bind(&stack_done);
-
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-namespace {
-
-// Drops top JavaScript frame and an arguments adaptor frame below it (if
-// present) preserving all the arguments prepared for current call.
-// Does nothing if debugger is currently active.
-// ES6 14.6.3. PrepareForTailCall
-//
-// Stack structure for the function g() tail calling f():
-//
-// ------- Caller frame: -------
-// | ...
-// | g()'s arg M
-// | ...
-// | g()'s arg 1
-// | g()'s receiver arg
-// | g()'s caller pc
-// ------- g()'s frame: -------
-// | g()'s caller fp <- fp
-// | g()'s context
-// | function pointer: g
-// | -------------------------
-// | ...
-// | ...
-// | f()'s arg N
-// | ...
-// | f()'s arg 1
-// | f()'s receiver arg
-// | f()'s caller pc <- sp
-// ----------------------
-//
-void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
- Register scratch1, Register scratch2,
- Register scratch3) {
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Comment cmnt(masm, "[ PrepareForTailCall");
-
- // Prepare for tail call only if ES2015 tail call elimination is enabled.
- Label done;
- ExternalReference is_tail_call_elimination_enabled =
- ExternalReference::is_tail_call_elimination_enabled_address(
- masm->isolate());
- __ movzx_b(scratch1,
- Operand::StaticVariable(is_tail_call_elimination_enabled));
- __ cmp(scratch1, Immediate(0));
- __ j(equal, &done, Label::kNear);
-
- // Drop possible interpreter handler/stub frame.
- {
- Label no_interpreter_frame;
- __ cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
- Immediate(Smi::FromInt(StackFrame::STUB)));
- __ j(not_equal, &no_interpreter_frame, Label::kNear);
- __ mov(ebp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ bind(&no_interpreter_frame);
- }
-
- // Check if next frame is an arguments adaptor frame.
- Register caller_args_count_reg = scratch1;
- Label no_arguments_adaptor, formal_parameter_count_loaded;
- __ mov(scratch2, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ cmp(Operand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &no_arguments_adaptor, Label::kNear);
-
- // Drop current frame and load arguments count from arguments adaptor frame.
- __ mov(ebp, scratch2);
- __ mov(caller_args_count_reg,
- Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
- __ jmp(&formal_parameter_count_loaded, Label::kNear);
-
- __ bind(&no_arguments_adaptor);
- // Load caller's formal parameter count
- __ mov(scratch1, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(scratch1,
- FieldOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
- __ mov(
- caller_args_count_reg,
- FieldOperand(scratch1, SharedFunctionInfo::kFormalParameterCountOffset));
- __ SmiUntag(caller_args_count_reg);
-
- __ bind(&formal_parameter_count_loaded);
-
- ParameterCount callee_args_count(args_reg);
- __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
- scratch3, ReturnAddressState::kOnStack, 0);
- __ bind(&done);
-}
-} // namespace
-
-// static
-void Builtins::Generate_CallFunction(MacroAssembler* masm,
- ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edi : the function to call (checked to be a JSFunction)
- // -----------------------------------
- __ AssertFunction(edi);
-
- // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
- // Check that the function is not a "classConstructor".
- Label class_constructor;
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ test_b(FieldOperand(edx, SharedFunctionInfo::kFunctionKindByteOffset),
- Immediate(SharedFunctionInfo::kClassConstructorBitsWithinByte));
- __ j(not_zero, &class_constructor);
-
- // Enter the context of the function; ToObject has to run in the function
- // context, and we also need to take the global proxy from the function
- // context in case of conversion.
- STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
- SharedFunctionInfo::kStrictModeByteOffset);
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- // We need to convert the receiver for non-native sloppy mode functions.
- Label done_convert;
- __ test_b(FieldOperand(edx, SharedFunctionInfo::kNativeByteOffset),
- Immediate((1 << SharedFunctionInfo::kNativeBitWithinByte) |
- (1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
- __ j(not_zero, &done_convert);
- {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edx : the shared function info.
- // -- edi : the function to call (checked to be a JSFunction)
- // -- esi : the function context.
- // -----------------------------------
-
- if (mode == ConvertReceiverMode::kNullOrUndefined) {
- // Patch receiver to global proxy.
- __ LoadGlobalProxy(ecx);
- } else {
- Label convert_to_object, convert_receiver;
- __ mov(ecx, Operand(esp, eax, times_pointer_size, kPointerSize));
- __ JumpIfSmi(ecx, &convert_to_object, Label::kNear);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CmpObjectType(ecx, FIRST_JS_RECEIVER_TYPE, ebx);
- __ j(above_equal, &done_convert);
- if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
- Label convert_global_proxy;
- __ JumpIfRoot(ecx, Heap::kUndefinedValueRootIndex,
- &convert_global_proxy, Label::kNear);
- __ JumpIfNotRoot(ecx, Heap::kNullValueRootIndex, &convert_to_object,
- Label::kNear);
- __ bind(&convert_global_proxy);
- {
- // Patch receiver to global proxy.
- __ LoadGlobalProxy(ecx);
- }
- __ jmp(&convert_receiver);
- }
- __ bind(&convert_to_object);
- {
- // Convert receiver using ToObject.
- // TODO(bmeurer): Inline the allocation here to avoid building the frame
- // in the fast case? (fall back to AllocateInNewSpace?)
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(eax);
- __ Push(eax);
- __ Push(edi);
- __ mov(eax, ecx);
- __ Push(esi);
- __ Call(masm->isolate()->builtins()->ToObject(),
- RelocInfo::CODE_TARGET);
- __ Pop(esi);
- __ mov(ecx, eax);
- __ Pop(edi);
- __ Pop(eax);
- __ SmiUntag(eax);
- }
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ bind(&convert_receiver);
- }
- __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), ecx);
- }
- __ bind(&done_convert);
-
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edx : the shared function info.
- // -- edi : the function to call (checked to be a JSFunction)
- // -- esi : the function context.
- // -----------------------------------
-
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, eax, ebx, ecx, edx);
- // Reload shared function info.
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- }
-
- __ mov(ebx,
- FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
- __ SmiUntag(ebx);
- ParameterCount actual(eax);
- ParameterCount expected(ebx);
- __ InvokeFunctionCode(edi, no_reg, expected, actual, JUMP_FUNCTION,
- CheckDebugStepCallWrapper());
- // The function is a "classConstructor", need to raise an exception.
- __ bind(&class_constructor);
- {
- FrameScope frame(masm, StackFrame::INTERNAL);
- __ push(edi);
- __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
- }
-}
-
-namespace {
-
-void Generate_PushBoundArguments(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edx : new.target (only in case of [[Construct]])
- // -- edi : target (checked to be a JSBoundFunction)
- // -----------------------------------
-
- // Load [[BoundArguments]] into ecx and length of that into ebx.
- Label no_bound_arguments;
- __ mov(ecx, FieldOperand(edi, JSBoundFunction::kBoundArgumentsOffset));
- __ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
- __ SmiUntag(ebx);
- __ test(ebx, ebx);
- __ j(zero, &no_bound_arguments);
- {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edx : new.target (only in case of [[Construct]])
- // -- edi : target (checked to be a JSBoundFunction)
- // -- ecx : the [[BoundArguments]] (implemented as FixedArray)
- // -- ebx : the number of [[BoundArguments]]
- // -----------------------------------
-
- // Reserve stack space for the [[BoundArguments]].
- {
- Label done;
- __ lea(ecx, Operand(ebx, times_pointer_size, 0));
- __ sub(esp, ecx);
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack
- // limit".
- __ CompareRoot(esp, ecx, Heap::kRealStackLimitRootIndex);
- __ j(greater, &done, Label::kNear); // Signed comparison.
- // Restore the stack pointer.
- __ lea(esp, Operand(esp, ebx, times_pointer_size, 0));
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterFrame(StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kThrowStackOverflow);
- }
- __ bind(&done);
- }
-
- // Adjust effective number of arguments to include return address.
- __ inc(eax);
-
- // Relocate arguments and return address down the stack.
- {
- Label loop;
- __ Set(ecx, 0);
- __ lea(ebx, Operand(esp, ebx, times_pointer_size, 0));
- __ bind(&loop);
- __ fld_s(Operand(ebx, ecx, times_pointer_size, 0));
- __ fstp_s(Operand(esp, ecx, times_pointer_size, 0));
- __ inc(ecx);
- __ cmp(ecx, eax);
- __ j(less, &loop);
- }
-
- // Copy [[BoundArguments]] to the stack (below the arguments).
- {
- Label loop;
- __ mov(ecx, FieldOperand(edi, JSBoundFunction::kBoundArgumentsOffset));
- __ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
- __ SmiUntag(ebx);
- __ bind(&loop);
- __ dec(ebx);
- __ fld_s(
- FieldOperand(ecx, ebx, times_pointer_size, FixedArray::kHeaderSize));
- __ fstp_s(Operand(esp, eax, times_pointer_size, 0));
- __ lea(eax, Operand(eax, 1));
- __ j(greater, &loop);
- }
-
- // Adjust effective number of arguments (eax contains the number of
- // arguments from the call plus return address plus the number of
- // [[BoundArguments]]), so we need to subtract one for the return address.
- __ dec(eax);
- }
- __ bind(&no_bound_arguments);
-}
-
-} // namespace
-
-// static
-void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
- TailCallMode tail_call_mode) {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edi : the function to call (checked to be a JSBoundFunction)
- // -----------------------------------
- __ AssertBoundFunction(edi);
-
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, eax, ebx, ecx, edx);
- }
-
- // Patch the receiver to [[BoundThis]].
- __ mov(ebx, FieldOperand(edi, JSBoundFunction::kBoundThisOffset));
- __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), ebx);
-
- // Push the [[BoundArguments]] onto the stack.
- Generate_PushBoundArguments(masm);
-
- // Call the [[BoundTargetFunction]] via the Call builtin.
- __ mov(edi, FieldOperand(edi, JSBoundFunction::kBoundTargetFunctionOffset));
- __ mov(ecx, Operand::StaticVariable(ExternalReference(
- Builtins::kCall_ReceiverIsAny, masm->isolate())));
- __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
- __ jmp(ecx);
-}
-
-// static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edi : the target to call (can be any Object).
- // -----------------------------------
-
- Label non_callable, non_function, non_smi;
- __ JumpIfSmi(edi, &non_callable);
- __ bind(&non_smi);
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(equal, masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
- RelocInfo::CODE_TARGET);
- __ CmpInstanceType(ecx, JS_BOUND_FUNCTION_TYPE);
- __ j(equal, masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
- RelocInfo::CODE_TARGET);
-
- // Check if target has a [[Call]] internal method.
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsCallable));
- __ j(zero, &non_callable);
-
- __ CmpInstanceType(ecx, JS_PROXY_TYPE);
- __ j(not_equal, &non_function);
-
- // 0. Prepare for tail call if necessary.
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, eax, ebx, ecx, edx);
- }
-
- // 1. Runtime fallback for Proxy [[Call]].
- __ PopReturnAddressTo(ecx);
- __ Push(edi);
- __ PushReturnAddressFrom(ecx);
- // Increase the arguments size to include the pushed function and the
- // existing receiver on the stack.
- __ add(eax, Immediate(2));
- // Tail-call to the runtime.
- __ JumpToExternalReference(
- ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
-
- // 2. Call to something else, which might have a [[Call]] internal method (if
- // not we raise an exception).
- __ bind(&non_function);
- // Overwrite the original receiver with the (original) target.
- __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
- // Let the "call_as_function_delegate" take care of the rest.
- __ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, edi);
- __ Jump(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
- RelocInfo::CODE_TARGET);
-
- // 3. Call to something that is not callable.
- __ bind(&non_callable);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(edi);
- __ CallRuntime(Runtime::kThrowCalledNonCallable);
- }
-}
-
-static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
- // Free up some registers.
- // Save edx/edi to stX0/stX1.
- __ push(edx);
- __ push(edi);
- __ fld_s(MemOperand(esp, 0));
- __ fld_s(MemOperand(esp, 4));
- __ lea(esp, Operand(esp, 2 * kFloatSize));
-
- Register argc = eax;
-
- Register scratch = ecx;
- Register scratch2 = edi;
-
- Register spread = ebx;
- Register spread_map = edx;
-
- Register spread_len = edx;
-
- Label runtime_call, push_args;
- __ mov(spread, Operand(esp, kPointerSize));
- __ JumpIfSmi(spread, &runtime_call);
- __ mov(spread_map, FieldOperand(spread, HeapObject::kMapOffset));
-
- // Check that the spread is an array.
- __ CmpInstanceType(spread_map, JS_ARRAY_TYPE);
- __ j(not_equal, &runtime_call);
-
- // Check that we have the original ArrayPrototype.
- __ mov(scratch, FieldOperand(spread_map, Map::kPrototypeOffset));
- __ mov(scratch2, NativeContextOperand());
- __ cmp(scratch,
- ContextOperand(scratch2, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ j(not_equal, &runtime_call);
-
- // Check that the ArrayPrototype hasn't been modified in a way that would
- // affect iteration.
- __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
- __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
- Immediate(Smi::FromInt(Isolate::kProtectorValid)));
- __ j(not_equal, &runtime_call);
-
- // Check that the map of the initial array iterator hasn't changed.
- __ mov(scratch2, NativeContextOperand());
- __ mov(scratch,
- ContextOperand(scratch2,
- Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
- __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
- __ cmp(scratch,
- ContextOperand(scratch2,
- Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
- __ j(not_equal, &runtime_call);
-
- // For FastPacked kinds, iteration will have the same effect as simply
- // accessing each property in order.
- Label no_protector_check;
- __ mov(scratch, FieldOperand(spread_map, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(scratch);
- __ cmp(scratch, Immediate(FAST_HOLEY_ELEMENTS));
- __ j(above, &runtime_call);
- // For non-FastHoley kinds, we can skip the protector check.
- __ cmp(scratch, Immediate(FAST_SMI_ELEMENTS));
- __ j(equal, &no_protector_check);
- __ cmp(scratch, Immediate(FAST_ELEMENTS));
- __ j(equal, &no_protector_check);
- // Check the ArrayProtector cell.
- __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
- __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
- Immediate(Smi::FromInt(Isolate::kProtectorValid)));
- __ j(not_equal, &runtime_call);
-
- __ bind(&no_protector_check);
- // Load the FixedArray backing store, but use the length from the array.
- __ mov(spread_len, FieldOperand(spread, JSArray::kLengthOffset));
- __ SmiUntag(spread_len);
- __ mov(spread, FieldOperand(spread, JSArray::kElementsOffset));
- __ jmp(&push_args);
-
- __ bind(&runtime_call);
- {
- // Call the builtin for the result of the spread.
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Need to save these on the stack.
- // Restore edx/edi from stX0/stX1.
- __ lea(esp, Operand(esp, -2 * kFloatSize));
- __ fstp_s(MemOperand(esp, 0));
- __ fstp_s(MemOperand(esp, 4));
- __ pop(edx);
- __ pop(edi);
-
- __ Push(edi);
- __ Push(edx);
- __ SmiTag(argc);
- __ Push(argc);
- __ Push(spread);
- __ CallRuntime(Runtime::kSpreadIterableFixed);
- __ mov(spread, eax);
- __ Pop(argc);
- __ SmiUntag(argc);
- __ Pop(edx);
- __ Pop(edi);
- // Free up some registers.
- // Save edx/edi to stX0/stX1.
- __ push(edx);
- __ push(edi);
- __ fld_s(MemOperand(esp, 0));
- __ fld_s(MemOperand(esp, 4));
- __ lea(esp, Operand(esp, 2 * kFloatSize));
- }
-
- {
- // Calculate the new nargs including the result of the spread.
- __ mov(spread_len, FieldOperand(spread, FixedArray::kLengthOffset));
- __ SmiUntag(spread_len);
-
- __ bind(&push_args);
- // argc += spread_len - 1. Subtract 1 for the spread itself.
- __ lea(argc, Operand(argc, spread_len, times_1, -1));
- }
-
- // Check for stack overflow.
- {
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack limit".
- Label done;
- __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
- // Make scratch the space we have left. The stack might already be
- // overflowed here which will cause scratch to become negative.
- __ neg(scratch);
- __ add(scratch, esp);
- __ sar(scratch, kPointerSizeLog2);
- // Check if the arguments will overflow the stack.
- __ cmp(scratch, spread_len);
- __ j(greater, &done, Label::kNear); // Signed comparison.
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&done);
- }
-
- // Put the evaluated spread onto the stack as additional arguments.
- {
- Register return_address = edi;
- // Pop the return address and spread argument.
- __ PopReturnAddressTo(return_address);
- __ Pop(scratch);
-
- Register scratch2 = esi;
- // Save esi to stX0, edx/edi in stX1/stX2 now.
- __ push(esi);
- __ fld_s(MemOperand(esp, 0));
- __ lea(esp, Operand(esp, 1 * kFloatSize));
-
- __ mov(scratch, Immediate(0));
- Label done, push, loop;
- __ bind(&loop);
- __ cmp(scratch, spread_len);
- __ j(equal, &done, Label::kNear);
- __ mov(scratch2, FieldOperand(spread, scratch, times_pointer_size,
- FixedArray::kHeaderSize));
- __ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
- __ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
- __ bind(&push);
- __ Push(scratch2);
- __ inc(scratch);
- __ jmp(&loop);
- __ bind(&done);
- __ PushReturnAddressFrom(return_address);
-
- // Now Restore esi from stX0, edx/edi from stX1/stX2.
- __ lea(esp, Operand(esp, -3 * kFloatSize));
- __ fstp_s(MemOperand(esp, 0));
- __ fstp_s(MemOperand(esp, 4));
- __ fstp_s(MemOperand(esp, 8));
- __ pop(esi);
- __ pop(edx);
- __ pop(edi);
- }
-}
-
-// static
-void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edi : the target to call (can be any Object)
- // -----------------------------------
-
- // CheckSpreadAndPushToStack will push edx to save it.
- __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- TailCallMode::kDisallow),
- RelocInfo::CODE_TARGET);
-}
-
-// static
-void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edx : the new target (checked to be a constructor)
- // -- edi : the constructor to call (checked to be a JSFunction)
- // -----------------------------------
- __ AssertFunction(edi);
-
- // Calling convention for function specific ConstructStubs require
- // ebx to contain either an AllocationSite or undefined.
- __ LoadRoot(ebx, Heap::kUndefinedValueRootIndex);
-
- // Tail call to the function-specific construct stub (still in the caller
- // context at this point).
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kConstructStubOffset));
- __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
- __ jmp(ecx);
-}
-
-// static
-void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edx : the new target (checked to be a constructor)
- // -- edi : the constructor to call (checked to be a JSBoundFunction)
- // -----------------------------------
- __ AssertBoundFunction(edi);
-
- // Push the [[BoundArguments]] onto the stack.
- Generate_PushBoundArguments(masm);
-
- // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
- {
- Label done;
- __ cmp(edi, edx);
- __ j(not_equal, &done, Label::kNear);
- __ mov(edx, FieldOperand(edi, JSBoundFunction::kBoundTargetFunctionOffset));
- __ bind(&done);
- }
-
- // Construct the [[BoundTargetFunction]] via the Construct builtin.
- __ mov(edi, FieldOperand(edi, JSBoundFunction::kBoundTargetFunctionOffset));
- __ mov(ecx, Operand::StaticVariable(
- ExternalReference(Builtins::kConstruct, masm->isolate())));
- __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
- __ jmp(ecx);
-}
-
-// static
-void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edi : the constructor to call (checked to be a JSProxy)
- // -- edx : the new target (either the same as the constructor or
- // the JSFunction on which new was invoked initially)
- // -----------------------------------
-
- // Call into the Runtime for Proxy [[Construct]].
- __ PopReturnAddressTo(ecx);
- __ Push(edi);
- __ Push(edx);
- __ PushReturnAddressFrom(ecx);
- // Include the pushed new_target, constructor and the receiver.
- __ add(eax, Immediate(3));
- // Tail-call to the runtime.
- __ JumpToExternalReference(
- ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
-}
-
-// static
-void Builtins::Generate_Construct(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edx : the new target (either the same as the constructor or
- // the JSFunction on which new was invoked initially)
- // -- edi : the constructor to call (can be any Object)
- // -----------------------------------
-
- // Check if target is a Smi.
- Label non_constructor;
- __ JumpIfSmi(edi, &non_constructor, Label::kNear);
-
- // Dispatch based on instance type.
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(equal, masm->isolate()->builtins()->ConstructFunction(),
- RelocInfo::CODE_TARGET);
-
- // Check if target has a [[Construct]] internal method.
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsConstructor));
- __ j(zero, &non_constructor, Label::kNear);
-
- // Only dispatch to bound functions after checking whether they are
- // constructors.
- __ CmpInstanceType(ecx, JS_BOUND_FUNCTION_TYPE);
- __ j(equal, masm->isolate()->builtins()->ConstructBoundFunction(),
- RelocInfo::CODE_TARGET);
-
- // Only dispatch to proxies after checking whether they are constructors.
- __ CmpInstanceType(ecx, JS_PROXY_TYPE);
- __ j(equal, masm->isolate()->builtins()->ConstructProxy(),
- RelocInfo::CODE_TARGET);
-
- // Called Construct on an exotic Object with a [[Construct]] internal method.
- {
- // Overwrite the original receiver with the (original) target.
- __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
- // Let the "call_as_constructor_delegate" take care of the rest.
- __ LoadGlobalFunction(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, edi);
- __ Jump(masm->isolate()->builtins()->CallFunction(),
- RelocInfo::CODE_TARGET);
- }
-
- // Called Construct on an Object that doesn't have a [[Construct]] internal
- // method.
- __ bind(&non_constructor);
- __ Jump(masm->isolate()->builtins()->ConstructedNonConstructable(),
- RelocInfo::CODE_TARGET);
-}
-
-// static
-void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- edx : the new target (either the same as the constructor or
- // the JSFunction on which new was invoked initially)
- // -- edi : the constructor to call (can be any Object)
- // -----------------------------------
-
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
-}
-
-// static
-void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- edx : requested object size (untagged)
- // -- esp[0] : return address
- // -----------------------------------
- __ SmiTag(edx);
- __ PopReturnAddressTo(ecx);
- __ Push(edx);
- __ PushReturnAddressFrom(ecx);
- __ Move(esi, Smi::kZero);
- __ TailCallRuntime(Runtime::kAllocateInNewSpace);
-}
-
-// static
-void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- edx : requested object size (untagged)
- // -- esp[0] : return address
- // -----------------------------------
- __ SmiTag(edx);
- __ PopReturnAddressTo(ecx);
- __ Push(edx);
- __ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
- __ PushReturnAddressFrom(ecx);
- __ Move(esi, Smi::kZero);
- __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
-}
-
-// static
-void Builtins::Generate_Abort(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- edx : message_id as Smi
- // -- esp[0] : return address
- // -----------------------------------
- __ PopReturnAddressTo(ecx);
- __ Push(edx);
- __ PushReturnAddressFrom(ecx);
- __ Move(esi, Smi::kZero);
- __ TailCallRuntime(Runtime::kAbort);
-}
-
-void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : actual number of arguments
- // -- ebx : expected number of arguments
- // -- edx : new target (passed through to callee)
- // -- edi : function (passed through to callee)
- // -----------------------------------
-
- Label invoke, dont_adapt_arguments, stack_overflow;
- __ IncrementCounter(masm->isolate()->counters()->arguments_adaptors(), 1);
-
- Label enough, too_few;
- __ cmp(eax, ebx);
- __ j(less, &too_few);
- __ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
- __ j(equal, &dont_adapt_arguments);
-
- { // Enough parameters: Actual >= expected.
- __ bind(&enough);
- EnterArgumentsAdaptorFrame(masm);
- // edi is used as a scratch register. It should be restored from the frame
- // when needed.
- Generate_StackOverflowCheck(masm, ebx, ecx, edi, &stack_overflow);
-
- // Copy receiver and all expected arguments.
- const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(edi, Operand(ebp, eax, times_4, offset));
- __ mov(eax, -1); // account for receiver
-
- Label copy;
- __ bind(&copy);
- __ inc(eax);
- __ push(Operand(edi, 0));
- __ sub(edi, Immediate(kPointerSize));
- __ cmp(eax, ebx);
- __ j(less, &copy);
- // eax now contains the expected number of arguments.
- __ jmp(&invoke);
- }
-
- { // Too few parameters: Actual < expected.
- __ bind(&too_few);
- EnterArgumentsAdaptorFrame(masm);
- // edi is used as a scratch register. It should be restored from the frame
- // when needed.
- Generate_StackOverflowCheck(masm, ebx, ecx, edi, &stack_overflow);
-
- // Remember expected arguments in ecx.
- __ mov(ecx, ebx);
-
- // Copy receiver and all actual arguments.
- const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(edi, Operand(ebp, eax, times_4, offset));
- // ebx = expected - actual.
- __ sub(ebx, eax);
- // eax = -actual - 1
- __ neg(eax);
- __ sub(eax, Immediate(1));
-
- Label copy;
- __ bind(&copy);
- __ inc(eax);
- __ push(Operand(edi, 0));
- __ sub(edi, Immediate(kPointerSize));
- __ test(eax, eax);
- __ j(not_zero, &copy);
-
- // Fill remaining expected arguments with undefined values.
- Label fill;
- __ bind(&fill);
- __ inc(eax);
- __ push(Immediate(masm->isolate()->factory()->undefined_value()));
- __ cmp(eax, ebx);
- __ j(less, &fill);
-
- // Restore expected arguments.
- __ mov(eax, ecx);
- }
-
- // Call the entry point.
- __ bind(&invoke);
- // Restore function pointer.
- __ mov(edi, Operand(ebp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
- // eax : expected number of arguments
- // edx : new target (passed through to callee)
- // edi : function (passed through to callee)
- __ mov(ecx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
- __ call(ecx);
-
- // Store offset of return address for deoptimizer.
- masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
-
- // Leave frame and return.
- LeaveArgumentsAdaptorFrame(masm);
- __ ret(0);
-
- // -------------------------------------------
- // Dont adapt arguments.
- // -------------------------------------------
- __ bind(&dont_adapt_arguments);
- __ mov(ecx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
- __ jmp(ecx);
-
- __ bind(&stack_overflow);
- {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ CallRuntime(Runtime::kThrowStackOverflow);
- __ int3();
- }
-}
-
-static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
- bool has_handler_frame) {
- // Lookup the function in the JavaScript frame.
- if (has_handler_frame) {
- __ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(eax, Operand(eax, JavaScriptFrameConstants::kFunctionOffset));
- } else {
- __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- }
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Pass function as argument.
- __ push(eax);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement);
- }
-
- Label skip;
- // If the code object is null, just return to the caller.
- __ cmp(eax, Immediate(0));
- __ j(not_equal, &skip, Label::kNear);
- __ ret(0);
-
- __ bind(&skip);
-
- // Drop any potential handler frame that is be sitting on top of the actual
- // JavaScript frame. This is the case then OSR is triggered from bytecode.
- if (has_handler_frame) {
- __ leave();
- }
-
- // Load deoptimization data from the code object.
- __ mov(ebx, Operand(eax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
-
- // Load the OSR entrypoint offset from the deoptimization data.
- __ mov(ebx, Operand(ebx, FixedArray::OffsetOfElementAt(
- DeoptimizationInputData::kOsrPcOffsetIndex) -
- kHeapObjectTag));
- __ SmiUntag(ebx);
-
- // Compute the target address = code_obj + header_size + osr_offset
- __ lea(eax, Operand(eax, ebx, times_1, Code::kHeaderSize - kHeapObjectTag));
-
- // Overwrite the return address on the stack.
- __ mov(Operand(esp, 0), eax);
-
- // And "return" to the OSR entry point of the function.
- __ ret(0);
-}
-
-void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- Generate_OnStackReplacementHelper(masm, false);
-}
-
-void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
- Generate_OnStackReplacementHelper(masm, true);
-}
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/cancelable-task.cc b/deps/v8/src/cancelable-task.cc
index 9e48fe7593..76056339f7 100644
--- a/deps/v8/src/cancelable-task.cc
+++ b/deps/v8/src/cancelable-task.cc
@@ -29,17 +29,18 @@ Cancelable::~Cancelable() {
CancelableTaskManager::CancelableTaskManager()
: task_id_counter_(0), canceled_(false) {}
-CancelableTaskManager::Id CancelableTaskManager::Register(Cancelable* task) {
+uint32_t CancelableTaskManager::Register(Cancelable* task) {
base::LockGuard<base::Mutex> guard(&mutex_);
- CancelableTaskManager::Id id = ++task_id_counter_;
- // Id overflows are not supported.
- CHECK_NE(0, id);
+ uint32_t id = ++task_id_counter_;
+ // The loop below is just used when task_id_counter_ overflows.
+ while (cancelable_tasks_.count(id) > 0) ++id;
CHECK(!canceled_);
cancelable_tasks_[id] = task;
return id;
}
-void CancelableTaskManager::RemoveFinishedTask(CancelableTaskManager::Id id) {
+
+void CancelableTaskManager::RemoveFinishedTask(uint32_t id) {
base::LockGuard<base::Mutex> guard(&mutex_);
size_t removed = cancelable_tasks_.erase(id);
USE(removed);
@@ -48,7 +49,7 @@ void CancelableTaskManager::RemoveFinishedTask(CancelableTaskManager::Id id) {
}
CancelableTaskManager::TryAbortResult CancelableTaskManager::TryAbort(
- CancelableTaskManager::Id id) {
+ uint32_t id) {
base::LockGuard<base::Mutex> guard(&mutex_);
auto entry = cancelable_tasks_.find(id);
if (entry != cancelable_tasks_.end()) {
@@ -111,17 +112,16 @@ CancelableTaskManager::TryAbortResult CancelableTaskManager::TryAbortAll() {
}
CancelableTask::CancelableTask(Isolate* isolate)
- : CancelableTask(isolate, isolate->cancelable_task_manager()) {}
+ : CancelableTask(isolate->cancelable_task_manager()) {}
-CancelableTask::CancelableTask(Isolate* isolate, CancelableTaskManager* manager)
- : Cancelable(manager), isolate_(isolate) {}
+CancelableTask::CancelableTask(CancelableTaskManager* manager)
+ : Cancelable(manager) {}
CancelableIdleTask::CancelableIdleTask(Isolate* isolate)
- : CancelableIdleTask(isolate, isolate->cancelable_task_manager()) {}
+ : CancelableIdleTask(isolate->cancelable_task_manager()) {}
-CancelableIdleTask::CancelableIdleTask(Isolate* isolate,
- CancelableTaskManager* manager)
- : Cancelable(manager), isolate_(isolate) {}
+CancelableIdleTask::CancelableIdleTask(CancelableTaskManager* manager)
+ : Cancelable(manager) {}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/cancelable-task.h b/deps/v8/src/cancelable-task.h
index 8a1ad325c8..518a721f0f 100644
--- a/deps/v8/src/cancelable-task.h
+++ b/deps/v8/src/cancelable-task.h
@@ -5,7 +5,7 @@
#ifndef V8_CANCELABLE_TASK_H_
#define V8_CANCELABLE_TASK_H_
-#include <unordered_map>
+#include <map>
#include "include/v8-platform.h"
#include "src/base/atomic-utils.h"
@@ -24,14 +24,12 @@ class Isolate;
// from any fore- and background task/thread.
class V8_EXPORT_PRIVATE CancelableTaskManager {
public:
- using Id = uint64_t;
-
CancelableTaskManager();
// Registers a new cancelable {task}. Returns the unique {id} of the task that
// can be used to try to abort a task by calling {Abort}.
// Must not be called after CancelAndWait.
- Id Register(Cancelable* task);
+ uint32_t Register(Cancelable* task);
// Try to abort running a task identified by {id}. The possible outcomes are:
// (1) The task is already finished running or was canceled before and
@@ -41,7 +39,7 @@ class V8_EXPORT_PRIVATE CancelableTaskManager {
// removed.
//
enum TryAbortResult { kTaskRemoved, kTaskRunning, kTaskAborted };
- TryAbortResult TryAbort(Id id);
+ TryAbortResult TryAbort(uint32_t id);
// Cancels all remaining registered tasks and waits for tasks that are
// already running. This disallows subsequent Register calls.
@@ -61,13 +59,13 @@ class V8_EXPORT_PRIVATE CancelableTaskManager {
private:
// Only called by {Cancelable} destructor. The task is done with executing,
// but needs to be removed.
- void RemoveFinishedTask(Id id);
+ void RemoveFinishedTask(uint32_t id);
// To mitigate the ABA problem, the api refers to tasks through an id.
- Id task_id_counter_;
+ uint32_t task_id_counter_;
// A set of cancelable tasks that are currently registered.
- std::unordered_map<Id, Cancelable*> cancelable_tasks_;
+ std::map<uint32_t, Cancelable*> cancelable_tasks_;
// Mutex and condition variable enabling concurrent register and removing, as
// well as waiting for background tasks on {CancelAndWait}.
@@ -91,7 +89,7 @@ class V8_EXPORT_PRIVATE Cancelable {
// a platform. This step transfers ownership to the platform, which destroys
// the task after running it. Since the exact time is not known, we cannot
// access the object after handing it to a platform.
- CancelableTaskManager::Id id() { return id_; }
+ uint32_t id() { return id_; }
protected:
bool TryRun() { return status_.TrySetValue(kWaiting, kRunning); }
@@ -122,7 +120,7 @@ class V8_EXPORT_PRIVATE Cancelable {
CancelableTaskManager* parent_;
base::AtomicValue<Status> status_;
- CancelableTaskManager::Id id_;
+ uint32_t id_;
// The counter is incremented for failing tries to cancel a task. This can be
// used by the task itself as an indication how often external entities tried
@@ -140,7 +138,7 @@ class V8_EXPORT_PRIVATE CancelableTask : public Cancelable,
NON_EXPORTED_BASE(public Task) {
public:
explicit CancelableTask(Isolate* isolate);
- CancelableTask(Isolate* isolate, CancelableTaskManager* manager);
+ explicit CancelableTask(CancelableTaskManager* manager);
// Task overrides.
void Run() final {
@@ -151,10 +149,7 @@ class V8_EXPORT_PRIVATE CancelableTask : public Cancelable,
virtual void RunInternal() = 0;
- Isolate* isolate() { return isolate_; }
-
private:
- Isolate* isolate_;
DISALLOW_COPY_AND_ASSIGN(CancelableTask);
};
@@ -163,7 +158,7 @@ class V8_EXPORT_PRIVATE CancelableTask : public Cancelable,
class CancelableIdleTask : public Cancelable, public IdleTask {
public:
explicit CancelableIdleTask(Isolate* isolate);
- CancelableIdleTask(Isolate* isolate, CancelableTaskManager* manager);
+ explicit CancelableIdleTask(CancelableTaskManager* manager);
// IdleTask overrides.
void Run(double deadline_in_seconds) final {
@@ -174,10 +169,7 @@ class CancelableIdleTask : public Cancelable, public IdleTask {
virtual void RunInternal(double deadline_in_seconds) = 0;
- Isolate* isolate() { return isolate_; }
-
private:
- Isolate* isolate_;
DISALLOW_COPY_AND_ASSIGN(CancelableIdleTask);
};
diff --git a/deps/v8/src/char-predicates.cc b/deps/v8/src/char-predicates.cc
index dc9865b558..747f4194f4 100644
--- a/deps/v8/src/char-predicates.cc
+++ b/deps/v8/src/char-predicates.cc
@@ -2,41 +2,43 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#ifndef V8_INTL_SUPPORT
+#error Internationalization is expected to be enabled.
+#endif // V8_INTL_SUPPORT
+
#include "src/char-predicates.h"
-#ifdef V8_INTL_SUPPORT
#include "unicode/uchar.h"
#include "unicode/urename.h"
-#endif // V8_INTL_SUPPORT
namespace v8 {
namespace internal {
-bool SupplementaryPlanes::IsIDStart(uc32 c) {
- DCHECK(c > 0xFFFF);
-#ifdef V8_INTL_SUPPORT
- // This only works for code points in the SMPs, since ICU does not exclude
- // code points with properties 'Pattern_Syntax' or 'Pattern_White_Space'.
- // Code points in the SMP do not have those properties.
- return u_isIDStart(c);
-#else
- // This is incorrect, but if we don't have ICU, use this as fallback.
- return false;
-#endif // V8_INTL_SUPPORT
+// ES#sec-names-and-keywords Names and Keywords
+// UnicodeIDStart, '$', '_' and '\'
+bool IdentifierStart::Is(uc32 c) {
+ // cannot use u_isIDStart because it does not work for
+ // Other_ID_Start characters.
+ return u_hasBinaryProperty(c, UCHAR_ID_START) ||
+ (c < 0x60 && (c == '$' || c == '\\' || c == '_'));
}
+// ES#sec-names-and-keywords Names and Keywords
+// UnicodeIDContinue, '$', '_', '\', ZWJ, and ZWNJ
+bool IdentifierPart::Is(uc32 c) {
+ // Can't use u_isIDPart because it does not work for
+ // Other_ID_Continue characters.
+ return u_hasBinaryProperty(c, UCHAR_ID_CONTINUE) ||
+ (c < 0x60 && (c == '$' || c == '\\' || c == '_')) || c == 0x200C ||
+ c == 0x200D;
+}
-bool SupplementaryPlanes::IsIDPart(uc32 c) {
- DCHECK(c > 0xFFFF);
-#ifdef V8_INTL_SUPPORT
- // This only works for code points in the SMPs, since ICU does not exclude
- // code points with properties 'Pattern_Syntax' or 'Pattern_White_Space'.
- // Code points in the SMP do not have those properties.
- return u_isIDPart(c);
-#else
- // This is incorrect, but if we don't have ICU, use this as fallback.
- return false;
-#endif // V8_INTL_SUPPORT
+// ES#sec-white-space White Space
+// gC=Zs, U+0009, U+000B, U+000C, U+FEFF
+bool WhiteSpace::Is(uc32 c) {
+ return (u_charType(c) == U_SPACE_SEPARATOR) ||
+ (c < 0x0D && (c == 0x09 || c == 0x0B || c == 0x0C)) || c == 0xFEFF;
}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/char-predicates.h b/deps/v8/src/char-predicates.h
index 966b2a5936..88208d04f6 100644
--- a/deps/v8/src/char-predicates.h
+++ b/deps/v8/src/char-predicates.h
@@ -26,53 +26,58 @@ inline bool IsBinaryDigit(uc32 c);
inline bool IsRegExpWord(uc32 c);
inline bool IsRegExpNewline(uc32 c);
-struct V8_EXPORT_PRIVATE SupplementaryPlanes {
- static bool IsIDStart(uc32 c);
- static bool IsIDPart(uc32 c);
-};
-
-
-// ES6 draft section 11.6
+// ES#sec-names-and-keywords
// This includes '_', '$' and '\', and ID_Start according to
// http://www.unicode.org/reports/tr31/, which consists of categories
// 'Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl', but excluding properties
// 'Pattern_Syntax' or 'Pattern_White_Space'.
-// For code points in the SMPs, we can resort to ICU (if available).
+#ifdef V8_INTL_SUPPORT
+struct V8_EXPORT_PRIVATE IdentifierStart {
+ static bool Is(uc32 c);
+#else
struct IdentifierStart {
+ // Non-BMP characters are not supported without I18N.
static inline bool Is(uc32 c) {
- if (c > 0xFFFF) return SupplementaryPlanes::IsIDStart(c);
- return unibrow::ID_Start::Is(c);
+ return (c <= 0xFFFF) ? unibrow::ID_Start::Is(c) : false;
}
+#endif
};
-
-// ES6 draft section 11.6
+// ES#sec-names-and-keywords
// This includes \u200c and \u200d, and ID_Continue according to
// http://www.unicode.org/reports/tr31/, which consists of ID_Start,
// the categories 'Mn', 'Mc', 'Nd', 'Pc', but excluding properties
// 'Pattern_Syntax' or 'Pattern_White_Space'.
-// For code points in the SMPs, we can resort to ICU (if available).
+#ifdef V8_INTL_SUPPORT
+struct V8_EXPORT_PRIVATE IdentifierPart {
+ static bool Is(uc32 c);
+#else
struct IdentifierPart {
static inline bool Is(uc32 c) {
- if (c > 0xFFFF) return SupplementaryPlanes::IsIDPart(c);
- return unibrow::ID_Start::Is(c) || unibrow::ID_Continue::Is(c);
+ // Non-BMP charaacters are not supported without I18N.
+ if (c <= 0xFFFF) {
+ return unibrow::ID_Start::Is(c) || unibrow::ID_Continue::Is(c);
+ }
+ return false;
}
+#endif
};
-
// ES6 draft section 11.2
// This includes all code points of Unicode category 'Zs'.
-// \u180e stops being one as of Unicode 6.3.0, but ES6 adheres to Unicode 5.1,
-// so it is also included.
-// Further included are \u0009, \u000b, \u0020, \u00a0, \u000c, and \ufeff.
-// There are no category 'Zs' code points in the SMPs.
+// Further included are \u0009, \u000b, \u000c, and \ufeff.
+#ifdef V8_INTL_SUPPORT
+struct V8_EXPORT_PRIVATE WhiteSpace {
+ static bool Is(uc32 c);
+#else
struct WhiteSpace {
static inline bool Is(uc32 c) { return unibrow::WhiteSpace::Is(c); }
+#endif
};
-
// WhiteSpace and LineTerminator according to ES6 draft section 11.2 and 11.3
-// This consists of \000a, \000d, \u2028, and \u2029.
+// This includes all the characters with Unicode category 'Z' (= Zs+Zl+Zp)
+// as well as \u0009 - \u000d and \ufeff.
struct WhiteSpaceOrLineTerminator {
static inline bool Is(uc32 c) {
return WhiteSpace::Is(c) || unibrow::LineTerminator::Is(c);
diff --git a/deps/v8/src/code-factory.cc b/deps/v8/src/code-factory.cc
index 5252b438be..ebeb540230 100644
--- a/deps/v8/src/code-factory.cc
+++ b/deps/v8/src/code-factory.cc
@@ -30,12 +30,6 @@ Handle<Code> CodeFactory::RuntimeCEntry(Isolate* isolate, int result_size) {
}
// static
-Callable CodeFactory::LoadIC(Isolate* isolate) {
- return Callable(isolate->builtins()->LoadICTrampoline(),
- LoadDescriptor(isolate));
-}
-
-// static
Callable CodeFactory::LoadICProtoArray(Isolate* isolate,
bool throw_if_nonexistent) {
return Callable(
@@ -52,18 +46,6 @@ Callable CodeFactory::ApiGetter(Isolate* isolate) {
}
// static
-Callable CodeFactory::LoadICInOptimizedCode(Isolate* isolate) {
- return Callable(isolate->builtins()->LoadIC(),
- LoadWithVectorDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::LoadICInOptimizedCode_Noninlined(Isolate* isolate) {
- return Callable(isolate->builtins()->LoadIC_Noninlined(),
- LoadWithVectorDescriptor(isolate));
-}
-
-// static
Callable CodeFactory::LoadGlobalIC(Isolate* isolate, TypeofMode typeof_mode) {
return Callable(
typeof_mode == NOT_INSIDE_TYPEOF
@@ -82,29 +64,15 @@ Callable CodeFactory::LoadGlobalICInOptimizedCode(Isolate* isolate,
}
// static
-Callable CodeFactory::KeyedLoadIC(Isolate* isolate) {
- return Callable(isolate->builtins()->KeyedLoadICTrampoline(),
- LoadDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::KeyedLoadICInOptimizedCode(Isolate* isolate) {
- return Callable(isolate->builtins()->KeyedLoadIC(),
- LoadWithVectorDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::CallIC(Isolate* isolate, ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
- CallICStub stub(isolate, mode, tail_call_mode);
+Callable CodeFactory::CallIC(Isolate* isolate, ConvertReceiverMode mode) {
+ CallICStub stub(isolate, mode);
return make_callable(stub);
}
// static
Callable CodeFactory::CallICTrampoline(Isolate* isolate,
- ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
- CallICTrampolineStub stub(isolate, mode, tail_call_mode);
+ ConvertReceiverMode mode) {
+ CallICTrampolineStub stub(isolate, mode);
return make_callable(stub);
}
@@ -200,9 +168,34 @@ Callable CodeFactory::CompareIC(Isolate* isolate, Token::Value op) {
}
// static
-Callable CodeFactory::BinaryOpIC(Isolate* isolate, Token::Value op) {
- BinaryOpICStub stub(isolate, op);
- return make_callable(stub);
+Callable CodeFactory::BinaryOperation(Isolate* isolate, Token::Value op) {
+ switch (op) {
+ case Token::SAR:
+ return Builtins::CallableFor(isolate, Builtins::kShiftRight);
+ case Token::SHL:
+ return Builtins::CallableFor(isolate, Builtins::kShiftLeft);
+ case Token::SHR:
+ return Builtins::CallableFor(isolate, Builtins::kShiftRightLogical);
+ case Token::ADD:
+ return Builtins::CallableFor(isolate, Builtins::kAdd);
+ case Token::SUB:
+ return Builtins::CallableFor(isolate, Builtins::kSubtract);
+ case Token::MUL:
+ return Builtins::CallableFor(isolate, Builtins::kMultiply);
+ case Token::DIV:
+ return Builtins::CallableFor(isolate, Builtins::kDivide);
+ case Token::MOD:
+ return Builtins::CallableFor(isolate, Builtins::kModulus);
+ case Token::BIT_OR:
+ return Builtins::CallableFor(isolate, Builtins::kBitwiseOr);
+ case Token::BIT_AND:
+ return Builtins::CallableFor(isolate, Builtins::kBitwiseAnd);
+ case Token::BIT_XOR:
+ return Builtins::CallableFor(isolate, Builtins::kBitwiseXor);
+ default:
+ break;
+ }
+ UNREACHABLE();
}
// static
@@ -232,80 +225,6 @@ Callable CodeFactory::NumberToString(Isolate* isolate) {
}
// static
-Callable CodeFactory::StringFromCharCode(Isolate* isolate) {
- Handle<Code> code(isolate->builtins()->StringFromCharCode());
- return Callable(code, BuiltinDescriptor(isolate));
-}
-
-#define TFS_BUILTIN(Name) \
- Callable CodeFactory::Name(Isolate* isolate) { \
- Handle<Code> code(isolate->builtins()->Name()); \
- return Callable(code, Builtin_##Name##_InterfaceDescriptor(isolate)); \
- }
-
-TFS_BUILTIN(ToString)
-TFS_BUILTIN(Add)
-TFS_BUILTIN(Subtract)
-TFS_BUILTIN(Multiply)
-TFS_BUILTIN(Divide)
-TFS_BUILTIN(Modulus)
-TFS_BUILTIN(BitwiseAnd)
-TFS_BUILTIN(BitwiseOr)
-TFS_BUILTIN(BitwiseXor)
-TFS_BUILTIN(ShiftLeft)
-TFS_BUILTIN(ShiftRight)
-TFS_BUILTIN(ShiftRightLogical)
-TFS_BUILTIN(LessThan)
-TFS_BUILTIN(LessThanOrEqual)
-TFS_BUILTIN(GreaterThan)
-TFS_BUILTIN(GreaterThanOrEqual)
-TFS_BUILTIN(Equal)
-TFS_BUILTIN(StrictEqual)
-TFS_BUILTIN(CreateIterResultObject)
-TFS_BUILTIN(HasProperty)
-TFS_BUILTIN(NonNumberToNumber)
-TFS_BUILTIN(StringToNumber)
-TFS_BUILTIN(ToBoolean)
-TFS_BUILTIN(ToInteger)
-TFS_BUILTIN(ToLength)
-TFS_BUILTIN(ToName)
-TFS_BUILTIN(ToNumber)
-TFS_BUILTIN(ToObject)
-TFS_BUILTIN(ClassOf)
-TFS_BUILTIN(Typeof)
-TFS_BUILTIN(InstanceOf)
-TFS_BUILTIN(OrdinaryHasInstance)
-TFS_BUILTIN(CopyFastSmiOrObjectElements)
-TFS_BUILTIN(GrowFastDoubleElements)
-TFS_BUILTIN(GrowFastSmiOrObjectElements)
-TFS_BUILTIN(NewUnmappedArgumentsElements)
-TFS_BUILTIN(FastCloneRegExp)
-TFS_BUILTIN(FastNewClosure)
-TFS_BUILTIN(FastNewObject)
-TFS_BUILTIN(FastNewRestParameter)
-TFS_BUILTIN(FastNewSloppyArguments)
-TFS_BUILTIN(FastNewStrictArguments)
-TFS_BUILTIN(ForInFilter)
-TFS_BUILTIN(GetSuperConstructor)
-TFS_BUILTIN(LoadIC_Uninitialized)
-TFS_BUILTIN(KeyedLoadIC_Megamorphic)
-TFS_BUILTIN(PromiseHandleReject)
-TFS_BUILTIN(RegExpReplace)
-TFS_BUILTIN(RegExpSplit)
-TFS_BUILTIN(StringCharAt)
-TFS_BUILTIN(StringCharCodeAt)
-TFS_BUILTIN(StringEqual)
-TFS_BUILTIN(StringLessThan)
-TFS_BUILTIN(StringLessThanOrEqual)
-TFS_BUILTIN(StringGreaterThan)
-TFS_BUILTIN(StringGreaterThanOrEqual)
-TFS_BUILTIN(AsyncGeneratorResolve)
-TFS_BUILTIN(AsyncGeneratorReject)
-TFS_BUILTIN(AsyncGeneratorResumeNext)
-
-#undef TFS_BUILTIN
-
-// static
Callable CodeFactory::StringAdd(Isolate* isolate, StringAddFlags flags,
PretenureFlag pretenure_flag) {
StringAddStub stub(isolate, flags, pretenure_flag);
@@ -317,26 +236,20 @@ Callable CodeFactory::StringCompare(Isolate* isolate, Token::Value token) {
switch (token) {
case Token::EQ:
case Token::EQ_STRICT:
- return StringEqual(isolate);
+ return Builtins::CallableFor(isolate, Builtins::kStringEqual);
case Token::LT:
- return StringLessThan(isolate);
+ return Builtins::CallableFor(isolate, Builtins::kStringLessThan);
case Token::GT:
- return StringGreaterThan(isolate);
+ return Builtins::CallableFor(isolate, Builtins::kStringGreaterThan);
case Token::LTE:
- return StringLessThanOrEqual(isolate);
+ return Builtins::CallableFor(isolate, Builtins::kStringLessThanOrEqual);
case Token::GTE:
- return StringGreaterThanOrEqual(isolate);
+ return Builtins::CallableFor(isolate,
+ Builtins::kStringGreaterThanOrEqual);
default:
break;
}
UNREACHABLE();
- return StringEqual(isolate);
-}
-
-// static
-Callable CodeFactory::StringIndexOf(Isolate* isolate) {
- return Callable(isolate->builtins()->StringIndexOf(),
- StringIndexOfDescriptor(isolate));
}
// static
@@ -371,12 +284,6 @@ Callable CodeFactory::FastCloneShallowArray(
}
// static
-Callable CodeFactory::FastCloneShallowObject(Isolate* isolate) {
- return Callable(isolate->builtins()->FastCloneShallowObject(),
- FastCloneShallowObjectDescriptor(isolate));
-}
-
-// static
Callable CodeFactory::FastNewFunctionContext(Isolate* isolate,
ScopeType scope_type) {
return Callable(isolate->builtins()->NewFunctionContext(scope_type),
@@ -384,18 +291,6 @@ Callable CodeFactory::FastNewFunctionContext(Isolate* isolate,
}
// static
-Callable CodeFactory::ForInPrepare(Isolate* isolate) {
- return Callable(isolate->builtins()->ForInPrepare(),
- ForInPrepareDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::ForInNext(Isolate* isolate) {
- return Callable(isolate->builtins()->ForInNext(),
- ForInNextDescriptor(isolate));
-}
-
-// static
Callable CodeFactory::AllocateHeapNumber(Isolate* isolate) {
AllocateHeapNumberStub stub(isolate);
return make_callable(stub);
@@ -408,26 +303,36 @@ Callable CodeFactory::ArgumentAdaptor(Isolate* isolate) {
}
// static
-Callable CodeFactory::Call(Isolate* isolate, ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
- return Callable(isolate->builtins()->Call(mode, tail_call_mode),
+Callable CodeFactory::Call(Isolate* isolate, ConvertReceiverMode mode) {
+ return Callable(isolate->builtins()->Call(mode),
CallTrampolineDescriptor(isolate));
}
// static
+Callable CodeFactory::CallWithArrayLike(Isolate* isolate) {
+ return Callable(isolate->builtins()->CallWithArrayLike(),
+ CallWithArrayLikeDescriptor(isolate));
+}
+
+// static
Callable CodeFactory::CallWithSpread(Isolate* isolate) {
return Callable(isolate->builtins()->CallWithSpread(),
- CallTrampolineDescriptor(isolate));
+ CallWithSpreadDescriptor(isolate));
}
// static
-Callable CodeFactory::CallFunction(Isolate* isolate, ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
- return Callable(isolate->builtins()->CallFunction(mode, tail_call_mode),
+Callable CodeFactory::CallFunction(Isolate* isolate, ConvertReceiverMode mode) {
+ return Callable(isolate->builtins()->CallFunction(mode),
CallTrampolineDescriptor(isolate));
}
// static
+Callable CodeFactory::CallVarargs(Isolate* isolate) {
+ return Callable(isolate->builtins()->CallVarargs(),
+ CallVarargsDescriptor(isolate));
+}
+
+// static
Callable CodeFactory::CallForwardVarargs(Isolate* isolate) {
return Callable(isolate->builtins()->CallForwardVarargs(),
CallForwardVarargsDescriptor(isolate));
@@ -448,7 +353,7 @@ Callable CodeFactory::Construct(Isolate* isolate) {
// static
Callable CodeFactory::ConstructWithSpread(Isolate* isolate) {
return Callable(isolate->builtins()->ConstructWithSpread(),
- ConstructTrampolineDescriptor(isolate));
+ ConstructWithSpreadDescriptor(isolate));
}
// static
@@ -458,6 +363,12 @@ Callable CodeFactory::ConstructFunction(Isolate* isolate) {
}
// static
+Callable CodeFactory::ConstructVarargs(Isolate* isolate) {
+ return Callable(isolate->builtins()->ConstructVarargs(),
+ ConstructVarargsDescriptor(isolate));
+}
+
+// static
Callable CodeFactory::ConstructForwardVarargs(Isolate* isolate) {
return Callable(isolate->builtins()->ConstructForwardVarargs(),
ConstructForwardVarargsDescriptor(isolate));
@@ -472,10 +383,10 @@ Callable CodeFactory::ConstructFunctionForwardVarargs(Isolate* isolate) {
// static
Callable CodeFactory::InterpreterPushArgsThenCall(
Isolate* isolate, ConvertReceiverMode receiver_mode,
- TailCallMode tail_call_mode, InterpreterPushArgsMode mode) {
- return Callable(isolate->builtins()->InterpreterPushArgsThenCall(
- receiver_mode, tail_call_mode, mode),
- InterpreterPushArgsThenCallDescriptor(isolate));
+ InterpreterPushArgsMode mode) {
+ return Callable(
+ isolate->builtins()->InterpreterPushArgsThenCall(receiver_mode, mode),
+ InterpreterPushArgsThenCallDescriptor(isolate));
}
// static
@@ -533,5 +444,13 @@ Callable CodeFactory::FunctionPrototypeBind(Isolate* isolate) {
BuiltinDescriptor(isolate));
}
+// static
+Callable CodeFactory::TransitionElementsKind(Isolate* isolate,
+ ElementsKind from, ElementsKind to,
+ bool is_jsarray) {
+ TransitionElementsKindStub stub(isolate, from, to, is_jsarray);
+ return make_callable(stub);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/code-factory.h b/deps/v8/src/code-factory.h
index c0cc549523..bc8ff5d846 100644
--- a/deps/v8/src/code-factory.h
+++ b/deps/v8/src/code-factory.h
@@ -24,23 +24,14 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Handle<Code> RuntimeCEntry(Isolate* isolate, int result_size = 1);
// Initial states for ICs.
- static Callable LoadIC(Isolate* isolate);
- static Callable LoadIC_Uninitialized(Isolate* isolate);
- static Callable LoadICInOptimizedCode(Isolate* isolate);
- static Callable LoadICInOptimizedCode_Noninlined(Isolate* isolate);
static Callable LoadICProtoArray(Isolate* isolate, bool throw_if_nonexistent);
static Callable LoadGlobalIC(Isolate* isolate, TypeofMode typeof_mode);
static Callable LoadGlobalICInOptimizedCode(Isolate* isolate,
TypeofMode typeof_mode);
- static Callable KeyedLoadIC(Isolate* isolate);
- static Callable KeyedLoadICInOptimizedCode(Isolate* isolate);
- static Callable KeyedLoadIC_Megamorphic(Isolate* isolate);
static Callable CallIC(Isolate* isolate,
- ConvertReceiverMode mode = ConvertReceiverMode::kAny,
- TailCallMode tail_call_mode = TailCallMode::kDisallow);
+ ConvertReceiverMode mode = ConvertReceiverMode::kAny);
static Callable CallICTrampoline(
- Isolate* isolate, ConvertReceiverMode mode = ConvertReceiverMode::kAny,
- TailCallMode tail_call_mode = TailCallMode::kDisallow);
+ Isolate* isolate, ConvertReceiverMode mode = ConvertReceiverMode::kAny);
static Callable StoreGlobalIC(Isolate* isolate, LanguageMode mode);
static Callable StoreGlobalICInOptimizedCode(Isolate* isolate,
LanguageMode mode);
@@ -62,120 +53,53 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Callable CompareIC(Isolate* isolate, Token::Value op);
static Callable CompareNilIC(Isolate* isolate, NilValue nil_value);
- static Callable BinaryOpIC(Isolate* isolate, Token::Value op);
+ static Callable BinaryOperation(Isolate* isolate, Token::Value op);
static Callable ApiGetter(Isolate* isolate);
// Code stubs. Add methods here as needed to reduce dependency on
// code-stubs.h.
- static Callable InstanceOf(Isolate* isolate);
- static Callable OrdinaryHasInstance(Isolate* isolate);
-
- static Callable StringFromCharCode(Isolate* isolate);
-
static Callable GetProperty(Isolate* isolate);
- static Callable ToBoolean(Isolate* isolate);
-
- static Callable ToNumber(Isolate* isolate);
- static Callable NonNumberToNumber(Isolate* isolate);
- static Callable StringToNumber(Isolate* isolate);
- static Callable ToString(Isolate* isolate);
- static Callable ToName(Isolate* isolate);
- static Callable ToInteger(Isolate* isolate);
- static Callable ToLength(Isolate* isolate);
- static Callable ToObject(Isolate* isolate);
static Callable NonPrimitiveToPrimitive(
Isolate* isolate, ToPrimitiveHint hint = ToPrimitiveHint::kDefault);
static Callable OrdinaryToPrimitive(Isolate* isolate,
OrdinaryToPrimitiveHint hint);
static Callable NumberToString(Isolate* isolate);
- static Callable Add(Isolate* isolate);
- static Callable Subtract(Isolate* isolate);
- static Callable Multiply(Isolate* isolate);
- static Callable Divide(Isolate* isolate);
- static Callable Modulus(Isolate* isolate);
- static Callable ShiftRight(Isolate* isolate);
- static Callable ShiftRightLogical(Isolate* isolate);
- static Callable ShiftLeft(Isolate* isolate);
- static Callable BitwiseAnd(Isolate* isolate);
- static Callable BitwiseOr(Isolate* isolate);
- static Callable BitwiseXor(Isolate* isolate);
- static Callable LessThan(Isolate* isolate);
- static Callable LessThanOrEqual(Isolate* isolate);
- static Callable GreaterThan(Isolate* isolate);
- static Callable GreaterThanOrEqual(Isolate* isolate);
- static Callable Equal(Isolate* isolate);
- static Callable StrictEqual(Isolate* isolate);
-
static Callable StringAdd(Isolate* isolate,
StringAddFlags flags = STRING_ADD_CHECK_NONE,
PretenureFlag pretenure_flag = NOT_TENURED);
- static Callable StringCharAt(Isolate* isolate);
- static Callable StringCharCodeAt(Isolate* isolate);
static Callable StringCompare(Isolate* isolate, Token::Value token);
- static Callable StringEqual(Isolate* isolate);
- static Callable StringLessThan(Isolate* isolate);
- static Callable StringLessThanOrEqual(Isolate* isolate);
- static Callable StringGreaterThan(Isolate* isolate);
- static Callable StringGreaterThanOrEqual(Isolate* isolate);
static Callable SubString(Isolate* isolate);
- static Callable StringIndexOf(Isolate* isolate);
-
- static Callable RegExpReplace(Isolate* isolate);
- static Callable RegExpSplit(Isolate* isolate);
- static Callable ClassOf(Isolate* isolate);
- static Callable Typeof(Isolate* isolate);
- static Callable GetSuperConstructor(Isolate* isolate);
-
- static Callable FastCloneRegExp(Isolate* isolate);
static Callable FastCloneShallowArray(Isolate* isolate,
AllocationSiteMode allocation_mode);
- static Callable FastCloneShallowObject(Isolate* isolate);
static Callable FastNewFunctionContext(Isolate* isolate,
ScopeType scope_type);
- static Callable FastNewClosure(Isolate* isolate);
- static Callable FastNewObject(Isolate* isolate);
- static Callable FastNewRestParameter(Isolate* isolate);
- static Callable FastNewSloppyArguments(Isolate* isolate);
- static Callable FastNewStrictArguments(Isolate* isolate);
-
- static Callable ForInPrepare(Isolate* isolate);
- static Callable ForInNext(Isolate* isolate);
-
- static Callable CopyFastSmiOrObjectElements(Isolate* isolate);
- static Callable GrowFastDoubleElements(Isolate* isolate);
- static Callable GrowFastSmiOrObjectElements(Isolate* isolate);
-
- static Callable NewUnmappedArgumentsElements(Isolate* isolate);
static Callable AllocateHeapNumber(Isolate* isolate);
static Callable ArgumentAdaptor(Isolate* isolate);
static Callable Call(Isolate* isolate,
- ConvertReceiverMode mode = ConvertReceiverMode::kAny,
- TailCallMode tail_call_mode = TailCallMode::kDisallow);
+ ConvertReceiverMode mode = ConvertReceiverMode::kAny);
+ static Callable CallWithArrayLike(Isolate* isolate);
static Callable CallWithSpread(Isolate* isolate);
static Callable CallFunction(
- Isolate* isolate, ConvertReceiverMode mode = ConvertReceiverMode::kAny,
- TailCallMode tail_call_mode = TailCallMode::kDisallow);
+ Isolate* isolate, ConvertReceiverMode mode = ConvertReceiverMode::kAny);
+ static Callable CallVarargs(Isolate* isolate);
static Callable CallForwardVarargs(Isolate* isolate);
static Callable CallFunctionForwardVarargs(Isolate* isolate);
static Callable Construct(Isolate* isolate);
static Callable ConstructWithSpread(Isolate* isolate);
static Callable ConstructFunction(Isolate* isolate);
+ static Callable ConstructVarargs(Isolate* isolate);
static Callable ConstructForwardVarargs(Isolate* isolate);
static Callable ConstructFunctionForwardVarargs(Isolate* isolate);
- static Callable CreateIterResultObject(Isolate* isolate);
- static Callable HasProperty(Isolate* isolate);
- static Callable ForInFilter(Isolate* isolate);
static Callable InterpreterPushArgsThenCall(Isolate* isolate,
ConvertReceiverMode receiver_mode,
- TailCallMode tail_call_mode,
InterpreterPushArgsMode mode);
static Callable InterpreterPushArgsThenConstruct(
Isolate* isolate, InterpreterPushArgsMode mode);
@@ -188,11 +112,8 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Callable ArrayPush(Isolate* isolate);
static Callable ArrayShift(Isolate* isolate);
static Callable FunctionPrototypeBind(Isolate* isolate);
- static Callable PromiseHandleReject(Isolate* isolate);
-
- static Callable AsyncGeneratorResolve(Isolate* isolate);
- static Callable AsyncGeneratorReject(Isolate* isolate);
- static Callable AsyncGeneratorResumeNext(Isolate* isolate);
+ static Callable TransitionElementsKind(Isolate* isolate, ElementsKind from,
+ ElementsKind to, bool is_jsarray);
};
} // namespace internal
diff --git a/deps/v8/src/code-stub-assembler.cc b/deps/v8/src/code-stub-assembler.cc
index edfe2de86c..465643dd12 100644
--- a/deps/v8/src/code-stub-assembler.cc
+++ b/deps/v8/src/code-stub-assembler.cc
@@ -74,9 +74,8 @@ void CodeStubAssembler::Check(const NodeGenerator& condition_body,
} else {
SNPrintF(buffer, "CSA_ASSERT failed: %s\n", message);
}
- CallRuntime(
- Runtime::kGlobalPrint, SmiConstant(Smi::kZero),
- HeapConstant(factory()->NewStringFromAsciiChecked(&(buffer[0]))));
+ CallRuntime(Runtime::kGlobalPrint, SmiConstant(0),
+ HeapConstant(factory()->InternalizeUtf8String(&(buffer[0]))));
}
DebugBreak();
Goto(&ok);
@@ -154,9 +153,12 @@ Node* CodeStubAssembler::NoContextConstant() { return NumberConstant(0); }
HEAP_CONSTANT_LIST(HEAP_CONSTANT_ACCESSOR);
#undef HEAP_CONSTANT_ACCESSOR
-#define HEAP_CONSTANT_TEST(rootName, name) \
- Node* CodeStubAssembler::Is##name(Node* value) { \
- return WordEqual(value, name##Constant()); \
+#define HEAP_CONSTANT_TEST(rootName, name) \
+ Node* CodeStubAssembler::Is##name(Node* value) { \
+ return WordEqual(value, name##Constant()); \
+ } \
+ Node* CodeStubAssembler::IsNot##name(Node* value) { \
+ return WordNotEqual(value, name##Constant()); \
}
HEAP_CONSTANT_LIST(HEAP_CONSTANT_TEST);
#undef HEAP_CONSTANT_TEST
@@ -171,7 +173,7 @@ Node* CodeStubAssembler::StaleRegisterConstant() {
Node* CodeStubAssembler::IntPtrOrSmiConstant(int value, ParameterMode mode) {
if (mode == SMI_PARAMETERS) {
- return SmiConstant(Smi::FromInt(value));
+ return SmiConstant(value);
} else {
DCHECK_EQ(INTPTR_PARAMETERS, mode);
return IntPtrConstant(value);
@@ -198,6 +200,10 @@ Node* CodeStubAssembler::IntPtrRoundUpToPowerOfTwo32(Node* value) {
return IntPtrAdd(value, IntPtrConstant(1));
}
+Node* CodeStubAssembler::MatchesParameterMode(Node* value, ParameterMode mode) {
+ return (mode == SMI_PARAMETERS) ? TaggedIsSmi(value) : Int32Constant(1);
+}
+
Node* CodeStubAssembler::WordIsPowerOfTwo(Node* value) {
// value && !(value & (value - 1))
return WordEqual(
@@ -435,7 +441,7 @@ Node* CodeStubAssembler::SmiFromWord32(Node* value) {
Node* CodeStubAssembler::SmiTag(Node* value) {
int32_t constant_value;
if (ToInt32Constant(value, constant_value) && Smi::IsValid(constant_value)) {
- return SmiConstant(Smi::FromInt(constant_value));
+ return SmiConstant(constant_value);
}
return BitcastWordToTaggedSigned(WordShl(value, SmiShiftBitsConstant()));
}
@@ -672,6 +678,7 @@ void CodeStubAssembler::Bind(Label* label) { CodeAssembler::Bind(label); }
void CodeStubAssembler::BranchIfPrototypesHaveNoElements(
Node* receiver_map, Label* definitely_no_elements,
Label* possibly_elements) {
+ CSA_SLOW_ASSERT(this, IsMap(receiver_map));
VARIABLE(var_map, MachineRepresentation::kTagged, receiver_map);
Label loop_body(this, &var_map);
Node* empty_elements = LoadRoot(Heap::kEmptyFixedArrayRootIndex);
@@ -948,12 +955,7 @@ void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true,
// Only null, undefined and document.all have the undetectable bit set,
// so we can return false immediately when that bit is set.
- Node* value_map_bitfield = LoadMapBitField(value_map);
- Node* value_map_undetectable =
- Word32And(value_map_bitfield, Int32Constant(1 << Map::kIsUndetectable));
-
- // Check if the {value} is undetectable.
- GotoIfNot(Word32Equal(value_map_undetectable, Int32Constant(0)), if_false);
+ GotoIf(IsUndetectableMap(value_map), if_false);
// We still need to handle numbers specially, but all other {value}s
// that make it here yield true.
@@ -1093,7 +1095,7 @@ Node* CodeStubAssembler::DoesntHaveInstanceType(Node* object,
}
Node* CodeStubAssembler::LoadProperties(Node* object) {
- return LoadObjectField(object, JSObject::kPropertiesOffset);
+ return LoadObjectField(object, JSObject::kPropertiesOrHashOffset);
}
Node* CodeStubAssembler::LoadElements(Node* object) {
@@ -1210,25 +1212,6 @@ Node* CodeStubAssembler::LoadMapConstructor(Node* map) {
return result.value();
}
-Node* CodeStubAssembler::LoadSharedFunctionInfoSpecialField(
- Node* shared, int offset, ParameterMode mode) {
- if (Is64()) {
- Node* result = LoadObjectField(shared, offset, MachineType::Int32());
- if (mode == SMI_PARAMETERS) {
- result = SmiTag(result);
- } else {
- result = ChangeUint32ToWord(result);
- }
- return result;
- } else {
- Node* result = LoadObjectField(shared, offset);
- if (mode != SMI_PARAMETERS) {
- result = SmiUntag(result);
- }
- return result;
- }
-}
-
Node* CodeStubAssembler::LoadNameHashField(Node* name) {
CSA_ASSERT(this, IsName(name));
return LoadObjectField(name, Name::kHashFieldOffset, MachineType::Uint32());
@@ -1237,9 +1220,7 @@ Node* CodeStubAssembler::LoadNameHashField(Node* name) {
Node* CodeStubAssembler::LoadNameHash(Node* name, Label* if_hash_not_computed) {
Node* hash_field = LoadNameHashField(name);
if (if_hash_not_computed != nullptr) {
- GotoIf(Word32Equal(
- Word32And(hash_field, Int32Constant(Name::kHashNotComputedMask)),
- Int32Constant(0)),
+ GotoIf(IsSetWord32(hash_field, Name::kHashNotComputedMask),
if_hash_not_computed);
}
return Word32Shr(hash_field, Int32Constant(Name::kHashShift));
@@ -1284,7 +1265,7 @@ Node* CodeStubAssembler::LoadFixedArrayElement(Node* object, Node* index_node,
ParameterMode parameter_mode) {
int32_t header_size =
FixedArray::kHeaderSize + additional_offset - kHeapObjectTag;
- Node* offset = ElementOffsetFromIndex(index_node, FAST_HOLEY_ELEMENTS,
+ Node* offset = ElementOffsetFromIndex(index_node, HOLEY_ELEMENTS,
parameter_mode, header_size);
return Load(MachineType::AnyTagged(), object, offset);
}
@@ -1349,13 +1330,14 @@ Node* CodeStubAssembler::LoadFixedTypedArrayElementAsTagged(
return AllocateHeapNumberWithValue(value);
default:
UNREACHABLE();
- return nullptr;
}
}
Node* CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement(
Node* object, Node* index_node, int additional_offset,
ParameterMode parameter_mode) {
+ CSA_SLOW_ASSERT(this, IsFixedArray(object));
+ CSA_SLOW_ASSERT(this, MatchesParameterMode(index_node, parameter_mode));
int32_t header_size =
FixedArray::kHeaderSize + additional_offset - kHeapObjectTag;
#if V8_TARGET_LITTLE_ENDIAN
@@ -1363,7 +1345,7 @@ Node* CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement(
header_size += kPointerSize / 2;
}
#endif
- Node* offset = ElementOffsetFromIndex(index_node, FAST_HOLEY_ELEMENTS,
+ Node* offset = ElementOffsetFromIndex(index_node, HOLEY_ELEMENTS,
parameter_mode, header_size);
if (Is64()) {
return Load(MachineType::Int32(), object, offset);
@@ -1375,10 +1357,12 @@ Node* CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement(
Node* CodeStubAssembler::LoadFixedDoubleArrayElement(
Node* object, Node* index_node, MachineType machine_type,
int additional_offset, ParameterMode parameter_mode, Label* if_hole) {
+ CSA_SLOW_ASSERT(this, IsFixedDoubleArray(object));
+ CSA_SLOW_ASSERT(this, MatchesParameterMode(index_node, parameter_mode));
CSA_ASSERT(this, IsFixedDoubleArray(object));
int32_t header_size =
FixedDoubleArray::kHeaderSize + additional_offset - kHeapObjectTag;
- Node* offset = ElementOffsetFromIndex(index_node, FAST_HOLEY_DOUBLE_ELEMENTS,
+ Node* offset = ElementOffsetFromIndex(index_node, HOLEY_DOUBLE_ELEMENTS,
parameter_mode, header_size);
return LoadDoubleWithHoleCheck(object, offset, if_hole, machine_type);
}
@@ -1542,11 +1526,15 @@ Node* CodeStubAssembler::StoreFixedArrayElement(Node* object, Node* index_node,
WriteBarrierMode barrier_mode,
int additional_offset,
ParameterMode parameter_mode) {
+ CSA_SLOW_ASSERT(this,
+ Word32Or(IsFixedArray(object), IsPropertyArray(object)));
+ CSA_SLOW_ASSERT(this, MatchesParameterMode(index_node, parameter_mode));
DCHECK(barrier_mode == SKIP_WRITE_BARRIER ||
barrier_mode == UPDATE_WRITE_BARRIER);
+ STATIC_ASSERT(FixedArray::kHeaderSize == PropertyArray::kHeaderSize);
int header_size =
FixedArray::kHeaderSize + additional_offset - kHeapObjectTag;
- Node* offset = ElementOffsetFromIndex(index_node, FAST_HOLEY_ELEMENTS,
+ Node* offset = ElementOffsetFromIndex(index_node, HOLEY_ELEMENTS,
parameter_mode, header_size);
if (barrier_mode == SKIP_WRITE_BARRIER) {
return StoreNoWriteBarrier(MachineRepresentation::kTagged, object, offset,
@@ -1559,8 +1547,9 @@ Node* CodeStubAssembler::StoreFixedArrayElement(Node* object, Node* index_node,
Node* CodeStubAssembler::StoreFixedDoubleArrayElement(
Node* object, Node* index_node, Node* value, ParameterMode parameter_mode) {
CSA_ASSERT(this, IsFixedDoubleArray(object));
+ CSA_SLOW_ASSERT(this, MatchesParameterMode(index_node, parameter_mode));
Node* offset =
- ElementOffsetFromIndex(index_node, FAST_DOUBLE_ELEMENTS, parameter_mode,
+ ElementOffsetFromIndex(index_node, PACKED_DOUBLE_ELEMENTS, parameter_mode,
FixedArray::kHeaderSize - kHeapObjectTag);
MachineRepresentation rep = MachineRepresentation::kFloat64;
return StoreNoWriteBarrier(rep, object, offset, value);
@@ -1620,6 +1609,7 @@ Node* CodeStubAssembler::BuildAppendJSArray(ElementsKind kind, Node* array,
CodeStubArguments& args,
Variable& arg_index,
Label* bailout) {
+ CSA_SLOW_ASSERT(this, IsJSArray(array));
Comment("BuildAppendJSArray: %s", ElementsKindToString(kind));
Label pre_bailout(this);
Label success(this);
@@ -1672,24 +1662,25 @@ void CodeStubAssembler::TryStoreArrayElement(ElementsKind kind,
ParameterMode mode, Label* bailout,
Node* elements, Node* index,
Node* value) {
- if (IsFastSmiElementsKind(kind)) {
+ if (IsSmiElementsKind(kind)) {
GotoIf(TaggedIsNotSmi(value), bailout);
- } else if (IsFastDoubleElementsKind(kind)) {
+ } else if (IsDoubleElementsKind(kind)) {
GotoIfNotNumber(value, bailout);
}
- if (IsFastDoubleElementsKind(kind)) {
+ if (IsDoubleElementsKind(kind)) {
Node* double_value = ChangeNumberToFloat64(value);
StoreFixedDoubleArrayElement(elements, index,
Float64SilenceNaN(double_value), mode);
} else {
WriteBarrierMode barrier_mode =
- IsFastSmiElementsKind(kind) ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
+ IsSmiElementsKind(kind) ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
StoreFixedArrayElement(elements, index, value, barrier_mode, 0, mode);
}
}
void CodeStubAssembler::BuildAppendJSArray(ElementsKind kind, Node* array,
Node* value, Label* bailout) {
+ CSA_SLOW_ASSERT(this, IsJSArray(array));
Comment("BuildAppendJSArray: %s", ElementsKindToString(kind));
ParameterMode mode = OptimalParameterMode();
VARIABLE(var_length, OptimalParameterRepresentation(),
@@ -1711,6 +1702,31 @@ void CodeStubAssembler::BuildAppendJSArray(ElementsKind kind, Node* array,
StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
}
+Node* CodeStubAssembler::AllocateCellWithValue(Node* value,
+ WriteBarrierMode mode) {
+ Node* result = Allocate(Cell::kSize, kNone);
+ StoreMapNoWriteBarrier(result, Heap::kCellMapRootIndex);
+ StoreCellValue(result, value, mode);
+ return result;
+}
+
+Node* CodeStubAssembler::LoadCellValue(Node* cell) {
+ CSA_SLOW_ASSERT(this, HasInstanceType(cell, CELL_TYPE));
+ return LoadObjectField(cell, Cell::kValueOffset);
+}
+
+Node* CodeStubAssembler::StoreCellValue(Node* cell, Node* value,
+ WriteBarrierMode mode) {
+ CSA_SLOW_ASSERT(this, HasInstanceType(cell, CELL_TYPE));
+ DCHECK(mode == SKIP_WRITE_BARRIER || mode == UPDATE_WRITE_BARRIER);
+
+ if (mode == UPDATE_WRITE_BARRIER) {
+ return StoreObjectField(cell, Cell::kValueOffset, value);
+ } else {
+ return StoreObjectFieldNoWriteBarrier(cell, Cell::kValueOffset, value);
+ }
+}
+
Node* CodeStubAssembler::AllocateHeapNumber(MutableMode mode) {
Node* result = Allocate(HeapNumber::kSize, kNone);
Heap::RootListIndex heap_map_index =
@@ -1737,7 +1753,7 @@ Node* CodeStubAssembler::AllocateSeqOneByteString(int length,
DCHECK(Heap::RootIsImmortalImmovable(Heap::kOneByteStringMapRootIndex));
StoreMapNoWriteBarrier(result, Heap::kOneByteStringMapRootIndex);
StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kLengthOffset,
- SmiConstant(Smi::FromInt(length)));
+ SmiConstant(length));
// Initialize both used and unused parts of hash field slot at once.
StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldSlot,
IntPtrConstant(String::kEmptyHashField),
@@ -1745,10 +1761,26 @@ Node* CodeStubAssembler::AllocateSeqOneByteString(int length,
return result;
}
+Node* CodeStubAssembler::IsZeroOrFixedArray(Node* object) {
+ Label out(this);
+ VARIABLE(var_result, MachineRepresentation::kWord32, Int32Constant(1));
+
+ GotoIf(WordEqual(object, SmiConstant(0)), &out);
+ GotoIf(IsFixedArray(object), &out);
+
+ var_result.Bind(Int32Constant(0));
+ Goto(&out);
+
+ BIND(&out);
+ return var_result.value();
+}
+
Node* CodeStubAssembler::AllocateSeqOneByteString(Node* context, Node* length,
ParameterMode mode,
AllocationFlags flags) {
Comment("AllocateSeqOneByteString");
+ CSA_SLOW_ASSERT(this, IsZeroOrFixedArray(context));
+ CSA_SLOW_ASSERT(this, MatchesParameterMode(length, mode));
VARIABLE(var_result, MachineRepresentation::kTagged);
// Compute the SeqOneByteString size and check if it fits into new space.
@@ -1808,7 +1840,7 @@ Node* CodeStubAssembler::AllocateSeqTwoByteString(int length,
DCHECK(Heap::RootIsImmortalImmovable(Heap::kStringMapRootIndex));
StoreMapNoWriteBarrier(result, Heap::kStringMapRootIndex);
StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kLengthOffset,
- SmiConstant(Smi::FromInt(length)));
+ SmiConstant(length));
// Initialize both used and unused parts of hash field slot at once.
StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldSlot,
IntPtrConstant(String::kEmptyHashField),
@@ -1819,6 +1851,8 @@ Node* CodeStubAssembler::AllocateSeqTwoByteString(int length,
Node* CodeStubAssembler::AllocateSeqTwoByteString(Node* context, Node* length,
ParameterMode mode,
AllocationFlags flags) {
+ CSA_SLOW_ASSERT(this, IsFixedArray(context));
+ CSA_SLOW_ASSERT(this, MatchesParameterMode(length, mode));
Comment("AllocateSeqTwoByteString");
VARIABLE(var_result, MachineRepresentation::kTagged);
@@ -1874,7 +1908,9 @@ Node* CodeStubAssembler::AllocateSeqTwoByteString(Node* context, Node* length,
Node* CodeStubAssembler::AllocateSlicedString(
Heap::RootListIndex map_root_index, Node* length, Node* parent,
Node* offset) {
+ CSA_ASSERT(this, IsString(parent));
CSA_ASSERT(this, TaggedIsSmi(length));
+ CSA_ASSERT(this, TaggedIsSmi(offset));
Node* result = Allocate(SlicedString::kSize);
DCHECK(Heap::RootIsImmortalImmovable(map_root_index));
StoreMapNoWriteBarrier(result, map_root_index);
@@ -1907,6 +1943,8 @@ Node* CodeStubAssembler::AllocateConsString(Heap::RootListIndex map_root_index,
Node* length, Node* first,
Node* second,
AllocationFlags flags) {
+ CSA_ASSERT(this, IsString(first));
+ CSA_ASSERT(this, IsString(second));
CSA_ASSERT(this, TaggedIsSmi(length));
Node* result = Allocate(ConsString::kSize, flags);
DCHECK(Heap::RootIsImmortalImmovable(map_root_index));
@@ -1946,6 +1984,9 @@ Node* CodeStubAssembler::AllocateTwoByteConsString(Node* length, Node* first,
Node* CodeStubAssembler::NewConsString(Node* context, Node* length, Node* left,
Node* right, AllocationFlags flags) {
+ CSA_ASSERT(this, IsFixedArray(context));
+ CSA_ASSERT(this, IsString(left));
+ CSA_ASSERT(this, IsString(right));
CSA_ASSERT(this, TaggedIsSmi(length));
// Added string can be a cons string.
Comment("Allocating ConsString");
@@ -1973,10 +2014,8 @@ Node* CodeStubAssembler::NewConsString(Node* context, Node* length, Node* left,
Label two_byte_map(this);
VARIABLE(result, MachineRepresentation::kTagged);
Label done(this, &result);
- GotoIf(Word32NotEqual(Word32And(anded_instance_types,
- Int32Constant(kStringEncodingMask |
- kOneByteDataHintTag)),
- Int32Constant(0)),
+ GotoIf(IsSetWord32(anded_instance_types,
+ kStringEncodingMask | kOneByteDataHintTag),
&one_byte_map);
Branch(Word32NotEqual(Word32And(xored_instance_types,
Int32Constant(kStringEncodingMask |
@@ -2001,10 +2040,15 @@ Node* CodeStubAssembler::NewConsString(Node* context, Node* length, Node* left,
Node* CodeStubAssembler::AllocateRegExpResult(Node* context, Node* length,
Node* index, Node* input) {
- Node* const max_length =
- SmiConstant(Smi::FromInt(JSArray::kInitialMaxFastElementArray));
+ CSA_ASSERT(this, IsFixedArray(context));
+ CSA_ASSERT(this, TaggedIsSmi(index));
+ CSA_ASSERT(this, TaggedIsSmi(length));
+ CSA_ASSERT(this, IsString(input));
+
+#ifdef DEBUG
+ Node* const max_length = SmiConstant(JSArray::kInitialMaxFastElementArray);
CSA_ASSERT(this, SmiLessThanOrEqual(length, max_length));
- USE(max_length);
+#endif // DEBUG
// Allocate the JSRegExpResult.
// TODO(jgruber): Fold JSArray and FixedArray allocations, then remove
@@ -2020,7 +2064,7 @@ Node* CodeStubAssembler::AllocateRegExpResult(Node* context, Node* length,
// Initialize the header before allocating the elements.
Node* const empty_array = EmptyFixedArrayConstant();
DCHECK(Heap::RootIsImmortalImmovable(Heap::kEmptyFixedArrayRootIndex));
- StoreObjectFieldNoWriteBarrier(result, JSArray::kPropertiesOffset,
+ StoreObjectFieldNoWriteBarrier(result, JSArray::kPropertiesOrHashOffset,
empty_array);
StoreObjectFieldNoWriteBarrier(result, JSArray::kElementsOffset, empty_array);
StoreObjectFieldNoWriteBarrier(result, JSArray::kLengthOffset, length);
@@ -2030,7 +2074,7 @@ Node* CodeStubAssembler::AllocateRegExpResult(Node* context, Node* length,
Node* const zero = IntPtrConstant(0);
Node* const length_intptr = SmiUntag(length);
- const ElementsKind elements_kind = FAST_ELEMENTS;
+ const ElementsKind elements_kind = PACKED_ELEMENTS;
Node* const elements = AllocateFixedArray(elements_kind, length_intptr);
StoreObjectField(result, JSArray::kElementsOffset, elements);
@@ -2077,8 +2121,6 @@ Node* CodeStubAssembler::AllocateNameDictionaryWithCapacity(Node* capacity) {
SmiTag(capacity), SKIP_WRITE_BARRIER);
// Initialize Dictionary fields.
Node* filler = LoadRoot(Heap::kUndefinedValueRootIndex);
- StoreFixedArrayElement(result, NameDictionary::kMaxNumberKeyIndex, filler,
- SKIP_WRITE_BARRIER);
StoreFixedArrayElement(result, NameDictionary::kNextEnumerationIndexIndex,
SmiConstant(PropertyDetails::kInitialIndex),
SKIP_WRITE_BARRIER);
@@ -2106,7 +2148,7 @@ Node* CodeStubAssembler::CopyNameDictionary(Node* dictionary,
large_object_fallback);
Node* properties = AllocateNameDictionaryWithCapacity(capacity);
Node* length = SmiUntag(LoadFixedArrayBaseLength(dictionary));
- CopyFixedArrayElements(FAST_ELEMENTS, dictionary, properties, length,
+ CopyFixedArrayElements(PACKED_ELEMENTS, dictionary, properties, length,
SKIP_WRITE_BARRIER, INTPTR_PARAMETERS);
return properties;
}
@@ -2125,20 +2167,25 @@ Node* CodeStubAssembler::AllocateJSObjectFromMap(Node* map, Node* properties,
void CodeStubAssembler::InitializeJSObjectFromMap(Node* object, Node* map,
Node* size, Node* properties,
Node* elements) {
+ CSA_SLOW_ASSERT(this, IsMap(map));
// This helper assumes that the object is in new-space, as guarded by the
// check in AllocatedJSObjectFromMap.
if (properties == nullptr) {
CSA_ASSERT(this, Word32BinaryNot(IsDictionaryMap((map))));
- StoreObjectFieldRoot(object, JSObject::kPropertiesOffset,
+ StoreObjectFieldRoot(object, JSObject::kPropertiesOrHashOffset,
Heap::kEmptyFixedArrayRootIndex);
} else {
- StoreObjectFieldNoWriteBarrier(object, JSObject::kPropertiesOffset,
+ CSA_ASSERT(this, Word32Or(Word32Or(IsPropertyArray(properties),
+ IsDictionary(properties)),
+ IsEmptyFixedArray(properties)));
+ StoreObjectFieldNoWriteBarrier(object, JSObject::kPropertiesOrHashOffset,
properties);
}
if (elements == nullptr) {
StoreObjectFieldRoot(object, JSObject::kElementsOffset,
Heap::kEmptyFixedArrayRootIndex);
} else {
+ CSA_ASSERT(this, IsFixedArray(elements));
StoreObjectFieldNoWriteBarrier(object, JSObject::kElementsOffset, elements);
}
InitializeJSObjectBody(object, map, size, JSObject::kHeaderSize);
@@ -2146,6 +2193,7 @@ void CodeStubAssembler::InitializeJSObjectFromMap(Node* object, Node* map,
void CodeStubAssembler::InitializeJSObjectBody(Node* object, Node* map,
Node* size, int start_offset) {
+ CSA_SLOW_ASSERT(this, IsMap(map));
// TODO(cbruni): activate in-object slack tracking machinery.
Comment("InitializeJSObjectBody");
Node* filler = LoadRoot(Heap::kUndefinedValueRootIndex);
@@ -2175,6 +2223,8 @@ void CodeStubAssembler::StoreFieldsNoWriteBarrier(Node* start_address,
Node* CodeStubAssembler::AllocateUninitializedJSArrayWithoutElements(
ElementsKind kind, Node* array_map, Node* length, Node* allocation_site) {
Comment("begin allocation of JSArray without elements");
+ CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
+ CSA_SLOW_ASSERT(this, IsMap(array_map));
int base_size = JSArray::kSize;
if (allocation_site != nullptr) {
base_size += AllocationMemento::kSize;
@@ -2191,6 +2241,8 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
ElementsKind kind, Node* array_map, Node* length, Node* allocation_site,
Node* capacity, ParameterMode capacity_mode) {
Comment("begin allocation of JSArray with elements");
+ CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
+ CSA_SLOW_ASSERT(this, IsMap(array_map));
int base_size = JSArray::kSize;
if (allocation_site != nullptr) {
@@ -2217,20 +2269,23 @@ Node* CodeStubAssembler::AllocateUninitializedJSArray(ElementsKind kind,
Node* length,
Node* allocation_site,
Node* size_in_bytes) {
+ CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
+ CSA_SLOW_ASSERT(this, IsMap(array_map));
+
// Allocate space for the JSArray and the elements FixedArray in one go.
Node* array = AllocateInNewSpace(size_in_bytes);
Comment("write JSArray headers");
StoreMapNoWriteBarrier(array, array_map);
- CSA_ASSERT(this, TaggedIsSmi(length));
StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
- StoreObjectFieldRoot(array, JSArray::kPropertiesOffset,
+ StoreObjectFieldRoot(array, JSArray::kPropertiesOrHashOffset,
Heap::kEmptyFixedArrayRootIndex);
if (allocation_site != nullptr) {
- InitializeAllocationMemento(array, JSArray::kSize, allocation_site);
+ InitializeAllocationMemento(array, IntPtrConstant(JSArray::kSize),
+ allocation_site);
}
return array;
}
@@ -2239,12 +2294,16 @@ Node* CodeStubAssembler::AllocateJSArray(ElementsKind kind, Node* array_map,
Node* capacity, Node* length,
Node* allocation_site,
ParameterMode capacity_mode) {
+ CSA_SLOW_ASSERT(this, IsMap(array_map));
+ CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
+ CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity, capacity_mode));
+
Node *array = nullptr, *elements = nullptr;
if (IsIntPtrOrSmiConstantZero(capacity)) {
// Array is empty. Use the shared empty fixed array instead of allocating a
// new one.
array = AllocateUninitializedJSArrayWithoutElements(kind, array_map, length,
- nullptr);
+ allocation_site);
StoreObjectFieldRoot(array, JSArray::kElementsOffset,
Heap::kEmptyFixedArrayRootIndex);
} else {
@@ -2253,8 +2312,8 @@ Node* CodeStubAssembler::AllocateJSArray(ElementsKind kind, Node* array_map,
kind, array_map, length, allocation_site, capacity, capacity_mode);
// Setup elements object.
Heap::RootListIndex elements_map_index =
- IsFastDoubleElementsKind(kind) ? Heap::kFixedDoubleArrayMapRootIndex
- : Heap::kFixedArrayMapRootIndex;
+ IsDoubleElementsKind(kind) ? Heap::kFixedDoubleArrayMapRootIndex
+ : Heap::kFixedArrayMapRootIndex;
DCHECK(Heap::RootIsImmortalImmovable(elements_map_index));
StoreMapNoWriteBarrier(elements, elements_map_index);
StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset,
@@ -2272,13 +2331,15 @@ Node* CodeStubAssembler::AllocateFixedArray(ElementsKind kind,
Node* capacity_node,
ParameterMode mode,
AllocationFlags flags) {
+ CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity_node, mode));
CSA_ASSERT(this, IntPtrOrSmiGreaterThan(capacity_node,
IntPtrOrSmiConstant(0, mode), mode));
Node* total_size = GetFixedArrayAllocationSize(capacity_node, kind, mode);
+ if (IsDoubleElementsKind(kind)) flags |= kDoubleAlignment;
// Allocate both array and elements object, and initialize the JSArray.
Node* array = Allocate(total_size, flags);
- Heap::RootListIndex map_index = IsFastDoubleElementsKind(kind)
+ Heap::RootListIndex map_index = IsDoubleElementsKind(kind)
? Heap::kFixedDoubleArrayMapRootIndex
: Heap::kFixedArrayMapRootIndex;
DCHECK(Heap::RootIsImmortalImmovable(map_index));
@@ -2288,10 +2349,50 @@ Node* CodeStubAssembler::AllocateFixedArray(ElementsKind kind,
return array;
}
+Node* CodeStubAssembler::AllocatePropertyArray(Node* capacity_node,
+ ParameterMode mode,
+ AllocationFlags flags) {
+ CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity_node, mode));
+ CSA_ASSERT(this, IntPtrOrSmiGreaterThan(capacity_node,
+ IntPtrOrSmiConstant(0, mode), mode));
+ Node* total_size = GetPropertyArrayAllocationSize(capacity_node, mode);
+
+ Node* array = Allocate(total_size, flags);
+ Heap::RootListIndex map_index = Heap::kPropertyArrayMapRootIndex;
+ DCHECK(Heap::RootIsImmortalImmovable(map_index));
+ StoreMapNoWriteBarrier(array, map_index);
+ StoreObjectFieldNoWriteBarrier(array, FixedArray::kLengthOffset,
+ ParameterToTagged(capacity_node, mode));
+ return array;
+}
+
+void CodeStubAssembler::FillPropertyArrayWithUndefined(Node* array,
+ Node* from_node,
+ Node* to_node,
+ ParameterMode mode) {
+ CSA_SLOW_ASSERT(this, MatchesParameterMode(from_node, mode));
+ CSA_SLOW_ASSERT(this, MatchesParameterMode(to_node, mode));
+ CSA_SLOW_ASSERT(this, IsPropertyArray(array));
+ STATIC_ASSERT(kHoleNanLower32 == kHoleNanUpper32);
+ ElementsKind kind = PACKED_ELEMENTS;
+ Node* value = LoadRoot(Heap::kUndefinedValueRootIndex);
+
+ BuildFastFixedArrayForEach(array, kind, from_node, to_node,
+ [this, value](Node* array, Node* offset) {
+ StoreNoWriteBarrier(
+ MachineRepresentation::kTagged, array,
+ offset, value);
+ },
+ mode);
+}
+
void CodeStubAssembler::FillFixedArrayWithValue(
ElementsKind kind, Node* array, Node* from_node, Node* to_node,
Heap::RootListIndex value_root_index, ParameterMode mode) {
- bool is_double = IsFastDoubleElementsKind(kind);
+ CSA_SLOW_ASSERT(this, MatchesParameterMode(from_node, mode));
+ CSA_SLOW_ASSERT(this, MatchesParameterMode(to_node, mode));
+ CSA_SLOW_ASSERT(this, IsFixedArrayWithKind(array, kind));
+ bool is_double = IsDoubleElementsKind(kind);
DCHECK(value_root_index == Heap::kTheHoleValueRootIndex ||
value_root_index == Heap::kUndefinedValueRootIndex);
DCHECK_IMPLIES(is_double, value_root_index == Heap::kTheHoleValueRootIndex);
@@ -2334,6 +2435,10 @@ void CodeStubAssembler::CopyFixedArrayElements(
ElementsKind from_kind, Node* from_array, ElementsKind to_kind,
Node* to_array, Node* element_count, Node* capacity,
WriteBarrierMode barrier_mode, ParameterMode mode) {
+ CSA_SLOW_ASSERT(this, MatchesParameterMode(element_count, mode));
+ CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity, mode));
+ CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(from_array, from_kind));
+ CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(to_array, to_kind));
STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag;
Comment("[ CopyFixedArrayElements");
@@ -2343,16 +2448,15 @@ void CodeStubAssembler::CopyFixedArrayElements(
DCHECK(!IsFixedTypedArrayElementsKind(to_kind));
Label done(this);
- bool from_double_elements = IsFastDoubleElementsKind(from_kind);
- bool to_double_elements = IsFastDoubleElementsKind(to_kind);
- bool element_size_matches =
- Is64() ||
- IsFastDoubleElementsKind(from_kind) == IsFastDoubleElementsKind(to_kind);
+ bool from_double_elements = IsDoubleElementsKind(from_kind);
+ bool to_double_elements = IsDoubleElementsKind(to_kind);
+ bool element_size_matches = Is64() || IsDoubleElementsKind(from_kind) ==
+ IsDoubleElementsKind(to_kind);
bool doubles_to_objects_conversion =
- IsFastDoubleElementsKind(from_kind) && IsFastObjectElementsKind(to_kind);
+ IsDoubleElementsKind(from_kind) && IsObjectElementsKind(to_kind);
bool needs_write_barrier =
- doubles_to_objects_conversion || (barrier_mode == UPDATE_WRITE_BARRIER &&
- IsFastObjectElementsKind(to_kind));
+ doubles_to_objects_conversion ||
+ (barrier_mode == UPDATE_WRITE_BARRIER && IsObjectElementsKind(to_kind));
Node* double_hole =
Is64() ? Int64Constant(kHoleNanInt64) : Int32Constant(kHoleNanLower32);
@@ -2410,7 +2514,7 @@ void CodeStubAssembler::CopyFixedArrayElements(
// The target elements array is already preinitialized with holes, so we
// can just proceed with the next iteration.
if_hole = &next_iter;
- } else if (IsFastDoubleElementsKind(to_kind)) {
+ } else if (IsDoubleElementsKind(to_kind)) {
if_hole = &store_double_hole;
} else {
// In all the other cases don't check for holes and copy the data as is.
@@ -2460,16 +2564,51 @@ void CodeStubAssembler::CopyFixedArrayElements(
}
BIND(&done);
- IncrementCounter(isolate()->counters()->inlined_copied_elements(), 1);
Comment("] CopyFixedArrayElements");
}
+void CodeStubAssembler::CopyPropertyArrayValues(Node* from_array,
+ Node* to_array,
+ Node* property_count,
+ WriteBarrierMode barrier_mode,
+ ParameterMode mode) {
+ CSA_SLOW_ASSERT(this, MatchesParameterMode(property_count, mode));
+ CSA_SLOW_ASSERT(this, Word32Or(IsPropertyArray(from_array),
+ IsEmptyFixedArray(from_array)));
+ CSA_SLOW_ASSERT(this, IsPropertyArray(to_array));
+ Comment("[ CopyPropertyArrayValues");
+
+ bool needs_write_barrier = barrier_mode == UPDATE_WRITE_BARRIER;
+ Node* start = IntPtrOrSmiConstant(0, mode);
+ ElementsKind kind = PACKED_ELEMENTS;
+ BuildFastFixedArrayForEach(
+ from_array, kind, start, property_count,
+ [this, to_array, needs_write_barrier](Node* array, Node* offset) {
+ Node* value = Load(MachineType::AnyTagged(), array, offset);
+
+ if (needs_write_barrier) {
+ Store(to_array, offset, value);
+ } else {
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, to_array, offset,
+ value);
+ }
+ },
+ mode);
+ Comment("] CopyPropertyArrayValues");
+}
+
void CodeStubAssembler::CopyStringCharacters(Node* from_string, Node* to_string,
Node* from_index, Node* to_index,
Node* character_count,
String::Encoding from_encoding,
String::Encoding to_encoding,
ParameterMode mode) {
+ // Cannot assert IsString(from_string) and IsString(to_string) here because
+ // CSA::SubString can pass in faked sequential strings when handling external
+ // subject strings.
+ CSA_SLOW_ASSERT(this, MatchesParameterMode(character_count, mode));
+ CSA_SLOW_ASSERT(this, MatchesParameterMode(from_index, mode));
+ CSA_SLOW_ASSERT(this, MatchesParameterMode(to_index, mode));
bool from_one_byte = from_encoding == String::ONE_BYTE_ENCODING;
bool to_one_byte = to_encoding == String::ONE_BYTE_ENCODING;
DCHECK_IMPLIES(to_one_byte, from_one_byte);
@@ -2528,10 +2667,11 @@ Node* CodeStubAssembler::LoadElementAndPrepareForStore(Node* array,
ElementsKind from_kind,
ElementsKind to_kind,
Label* if_hole) {
- if (IsFastDoubleElementsKind(from_kind)) {
+ CSA_SLOW_ASSERT(this, IsFixedArrayWithKind(array, from_kind));
+ if (IsDoubleElementsKind(from_kind)) {
Node* value =
LoadDoubleWithHoleCheck(array, offset, if_hole, MachineType::Float64());
- if (!IsFastDoubleElementsKind(to_kind)) {
+ if (!IsDoubleElementsKind(to_kind)) {
value = AllocateHeapNumberWithValue(value);
}
return value;
@@ -2541,8 +2681,8 @@ Node* CodeStubAssembler::LoadElementAndPrepareForStore(Node* array,
if (if_hole) {
GotoIf(WordEqual(value, TheHoleConstant()), if_hole);
}
- if (IsFastDoubleElementsKind(to_kind)) {
- if (IsFastSmiElementsKind(from_kind)) {
+ if (IsDoubleElementsKind(to_kind)) {
+ if (IsSmiElementsKind(from_kind)) {
value = SmiToFloat64(value);
} else {
value = LoadHeapNumberValue(value);
@@ -2554,6 +2694,7 @@ Node* CodeStubAssembler::LoadElementAndPrepareForStore(Node* array,
Node* CodeStubAssembler::CalculateNewElementsCapacity(Node* old_capacity,
ParameterMode mode) {
+ CSA_SLOW_ASSERT(this, MatchesParameterMode(old_capacity, mode));
Node* half_old_capacity = WordOrSmiShr(old_capacity, 1, mode);
Node* new_capacity = IntPtrOrSmiAdd(half_old_capacity, old_capacity, mode);
Node* padding = IntPtrOrSmiConstant(16, mode);
@@ -2563,6 +2704,9 @@ Node* CodeStubAssembler::CalculateNewElementsCapacity(Node* old_capacity,
Node* CodeStubAssembler::TryGrowElementsCapacity(Node* object, Node* elements,
ElementsKind kind, Node* key,
Label* bailout) {
+ CSA_SLOW_ASSERT(this, TaggedIsNotSmi(object));
+ CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(elements, kind));
+ CSA_SLOW_ASSERT(this, TaggedIsSmi(key));
Node* capacity = LoadFixedArrayBaseLength(elements);
ParameterMode mode = OptimalParameterMode();
@@ -2579,6 +2723,10 @@ Node* CodeStubAssembler::TryGrowElementsCapacity(Node* object, Node* elements,
ParameterMode mode,
Label* bailout) {
Comment("TryGrowElementsCapacity");
+ CSA_SLOW_ASSERT(this, TaggedIsNotSmi(object));
+ CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(elements, kind));
+ CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity, mode));
+ CSA_SLOW_ASSERT(this, MatchesParameterMode(key, mode));
// If the gap growth is too big, fall back to the runtime.
Node* max_gap = IntPtrOrSmiConstant(JSObject::kMaxGap, mode);
@@ -2596,6 +2744,11 @@ Node* CodeStubAssembler::GrowElementsCapacity(
Node* object, Node* elements, ElementsKind from_kind, ElementsKind to_kind,
Node* capacity, Node* new_capacity, ParameterMode mode, Label* bailout) {
Comment("[ GrowElementsCapacity");
+ CSA_SLOW_ASSERT(this, TaggedIsNotSmi(object));
+ CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(elements, from_kind));
+ CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity, mode));
+ CSA_SLOW_ASSERT(this, MatchesParameterMode(new_capacity, mode));
+
// If size of the allocation for the new capacity doesn't fit in a page
// that we can bump-pointer allocate from, fall back to the runtime.
int max_size = FixedArrayBase::GetMaxLengthForNewSpaceAllocation(to_kind);
@@ -2617,24 +2770,23 @@ Node* CodeStubAssembler::GrowElementsCapacity(
return new_elements;
}
-void CodeStubAssembler::InitializeAllocationMemento(Node* base_allocation,
- int base_allocation_size,
+void CodeStubAssembler::InitializeAllocationMemento(Node* base,
+ Node* base_allocation_size,
Node* allocation_site) {
+ Comment("[Initialize AllocationMemento");
+ Node* memento = InnerAllocate(base, base_allocation_size);
+ StoreMapNoWriteBarrier(memento, Heap::kAllocationMementoMapRootIndex);
StoreObjectFieldNoWriteBarrier(
- base_allocation, AllocationMemento::kMapOffset + base_allocation_size,
- HeapConstant(Handle<Map>(isolate()->heap()->allocation_memento_map())));
- StoreObjectFieldNoWriteBarrier(
- base_allocation,
- AllocationMemento::kAllocationSiteOffset + base_allocation_size,
- allocation_site);
+ memento, AllocationMemento::kAllocationSiteOffset, allocation_site);
if (FLAG_allocation_site_pretenuring) {
Node* count = LoadObjectField(allocation_site,
AllocationSite::kPretenureCreateCountOffset);
- Node* incremented_count = SmiAdd(count, SmiConstant(Smi::FromInt(1)));
+ Node* incremented_count = SmiAdd(count, SmiConstant(1));
StoreObjectFieldNoWriteBarrier(allocation_site,
AllocationSite::kPretenureCreateCountOffset,
incremented_count);
}
+ Comment("]");
}
Node* CodeStubAssembler::TryTaggedToFloat64(Node* value,
@@ -2657,8 +2809,7 @@ Node* CodeStubAssembler::TryTaggedToFloat64(Node* value,
{
// Check if {value} is a HeapNumber.
Label if_valueisheapnumber(this);
- Branch(IsHeapNumberMap(LoadMap(value)), &if_valueisheapnumber,
- if_valueisnotnumber);
+ Branch(IsHeapNumber(value), &if_valueisheapnumber, if_valueisnotnumber);
BIND(&if_valueisheapnumber);
{
@@ -2694,8 +2845,7 @@ Node* CodeStubAssembler::TruncateTaggedToFloat64(Node* context, Node* value) {
BIND(&if_valueisnotnumber);
{
// Convert the {value} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_value.Bind(CallStub(callable, context, value));
+ var_value.Bind(CallBuiltin(Builtins::kNonNumberToNumber, context, value));
Goto(&loop);
}
}
@@ -2730,7 +2880,7 @@ Node* CodeStubAssembler::TruncateTaggedToWord32(Node* context, Node* value) {
// Check if {value} is a HeapNumber.
Label if_valueisheapnumber(this),
if_valueisnotheapnumber(this, Label::kDeferred);
- Branch(IsHeapNumberMap(LoadMap(value)), &if_valueisheapnumber,
+ Branch(IsHeapNumber(value), &if_valueisheapnumber,
&if_valueisnotheapnumber);
BIND(&if_valueisheapnumber);
@@ -2743,8 +2893,8 @@ Node* CodeStubAssembler::TruncateTaggedToWord32(Node* context, Node* value) {
BIND(&if_valueisnotheapnumber);
{
// Convert the {value} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_value.Bind(CallStub(callable, context, value));
+ var_value.Bind(
+ CallBuiltin(Builtins::kNonNumberToNumber, context, value));
Goto(&loop);
}
}
@@ -2903,8 +3053,7 @@ Node* CodeStubAssembler::ToThisString(Node* context, Node* value,
BIND(&if_valueisnotnullorundefined);
{
// Convert the {value} to a String.
- Callable callable = CodeFactory::ToString(isolate());
- var_value.Bind(CallStub(callable, context, value));
+ var_value.Bind(CallBuiltin(Builtins::kToString, context, value));
Goto(&if_valueisstring);
}
}
@@ -2913,8 +3062,7 @@ Node* CodeStubAssembler::ToThisString(Node* context, Node* value,
{
// The {value} is either null or undefined.
CallRuntime(Runtime::kThrowCalledOnNullOrUndefined, context,
- HeapConstant(factory()->NewStringFromAsciiChecked(
- method_name, TENURED)));
+ StringConstant(method_name));
Unreachable();
}
}
@@ -2931,6 +3079,7 @@ Node* CodeStubAssembler::ToThisString(Node* context, Node* value,
}
Node* CodeStubAssembler::ChangeNumberToFloat64(Node* value) {
+ CSA_SLOW_ASSERT(this, IsNumber(value));
VARIABLE(result, MachineRepresentation::kFloat64);
Label smi(this);
Label done(this, &result);
@@ -2950,6 +3099,7 @@ Node* CodeStubAssembler::ChangeNumberToFloat64(Node* value) {
}
Node* CodeStubAssembler::ChangeNumberToIntPtr(Node* value) {
+ CSA_SLOW_ASSERT(this, IsNumber(value));
VARIABLE(result, MachineType::PointerRepresentation());
Label smi(this), done(this, &result);
GotoIf(TaggedIsSmi(value), &smi);
@@ -2988,7 +3138,7 @@ Node* CodeStubAssembler::ToThisValue(Node* context, Node* value,
? &done_loop
: &done_throw);
- // Load the mape of the {value}.
+ // Load the map of the {value}.
Node* value_map = LoadMap(value);
// Load the instance type of the {value}.
@@ -3013,16 +3163,13 @@ Node* CodeStubAssembler::ToThisValue(Node* context, Node* value,
GotoIf(WordEqual(value_map, BooleanMapConstant()), &done_loop);
break;
case PrimitiveType::kNumber:
- GotoIf(
- Word32Equal(value_instance_type, Int32Constant(HEAP_NUMBER_TYPE)),
- &done_loop);
+ GotoIf(WordEqual(value_map, HeapNumberMapConstant()), &done_loop);
break;
case PrimitiveType::kString:
GotoIf(IsStringInstanceType(value_instance_type), &done_loop);
break;
case PrimitiveType::kSymbol:
- GotoIf(Word32Equal(value_instance_type, Int32Constant(SYMBOL_TYPE)),
- &done_loop);
+ GotoIf(WordEqual(value_map, SymbolMapConstant()), &done_loop);
break;
}
Goto(&done_throw);
@@ -3049,16 +3196,22 @@ Node* CodeStubAssembler::ToThisValue(Node* context, Node* value,
CHECK_NOT_NULL(primitive_name);
// The {value} is not a compatible receiver for this method.
- CallRuntime(Runtime::kThrowTypeError, context,
- SmiConstant(MessageTemplate::kNotGeneric),
- CStringConstant(method_name), CStringConstant(primitive_name));
- Unreachable();
+ ThrowTypeError(context, MessageTemplate::kNotGeneric, method_name,
+ primitive_name);
}
BIND(&done_loop);
return var_value.value();
}
+void CodeStubAssembler::ThrowIncompatibleMethodReceiver(Node* context,
+ const char* method_name,
+ Node* receiver) {
+ CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
+ StringConstant(method_name), receiver);
+ Unreachable();
+}
+
Node* CodeStubAssembler::ThrowIfNotInstanceType(Node* context, Node* value,
InstanceType instance_type,
char const* method_name) {
@@ -3076,21 +3229,45 @@ Node* CodeStubAssembler::ThrowIfNotInstanceType(Node* context, Node* value,
// The {value} is not a compatible receiver for this method.
BIND(&throw_exception);
- CallRuntime(
- Runtime::kThrowIncompatibleMethodReceiver, context,
- HeapConstant(factory()->NewStringFromAsciiChecked(method_name, TENURED)),
- value);
- Unreachable();
+ ThrowIncompatibleMethodReceiver(context, method_name, value);
BIND(&out);
return var_value_map.value();
}
+void CodeStubAssembler::ThrowTypeError(Node* context,
+ MessageTemplate::Template message,
+ char const* arg0, char const* arg1) {
+ Node* arg0_node = nullptr;
+ if (arg0) arg0_node = StringConstant(arg0);
+ Node* arg1_node = nullptr;
+ if (arg1) arg1_node = StringConstant(arg1);
+ ThrowTypeError(context, message, arg0_node, arg1_node);
+}
+
+void CodeStubAssembler::ThrowTypeError(Node* context,
+ MessageTemplate::Template message,
+ Node* arg0, Node* arg1, Node* arg2) {
+ Node* template_index = SmiConstant(message);
+ if (arg0 == nullptr) {
+ CallRuntime(Runtime::kThrowTypeError, context, template_index);
+ } else if (arg1 == nullptr) {
+ CallRuntime(Runtime::kThrowTypeError, context, template_index, arg0);
+ } else if (arg2 == nullptr) {
+ CallRuntime(Runtime::kThrowTypeError, context, template_index, arg0, arg1);
+ } else {
+ CallRuntime(Runtime::kThrowTypeError, context, template_index, arg0, arg1,
+ arg2);
+ }
+ Unreachable();
+}
+
Node* CodeStubAssembler::InstanceTypeEqual(Node* instance_type, int type) {
return Word32Equal(instance_type, Int32Constant(type));
}
Node* CodeStubAssembler::IsSpecialReceiverMap(Node* map) {
+ CSA_SLOW_ASSERT(this, IsMap(map));
Node* is_special = IsSpecialReceiverInstanceType(LoadMapInstanceType(map));
uint32_t mask =
1 << Map::kHasNamedInterceptor | 1 << Map::kIsAccessCheckNeeded;
@@ -3105,15 +3282,12 @@ Node* CodeStubAssembler::IsSpecialReceiverMap(Node* map) {
Node* CodeStubAssembler::IsDictionaryMap(Node* map) {
CSA_SLOW_ASSERT(this, IsMap(map));
Node* bit_field3 = LoadMapBitField3(map);
- return Word32NotEqual(IsSetWord32<Map::DictionaryMap>(bit_field3),
- Int32Constant(0));
+ return IsSetWord32<Map::DictionaryMap>(bit_field3);
}
Node* CodeStubAssembler::IsCallableMap(Node* map) {
CSA_ASSERT(this, IsMap(map));
- return Word32NotEqual(
- Word32And(LoadMapBitField(map), Int32Constant(1 << Map::kIsCallable)),
- Int32Constant(0));
+ return IsSetWord32(LoadMapBitField(map), 1 << Map::kIsCallable);
}
Node* CodeStubAssembler::IsDeprecatedMap(Node* map) {
@@ -3121,15 +3295,22 @@ Node* CodeStubAssembler::IsDeprecatedMap(Node* map) {
return IsSetWord32<Map::Deprecated>(LoadMapBitField3(map));
}
+Node* CodeStubAssembler::IsUndetectableMap(Node* map) {
+ CSA_ASSERT(this, IsMap(map));
+ return IsSetWord32(LoadMapBitField(map), 1 << Map::kIsUndetectable);
+}
+
Node* CodeStubAssembler::IsCallable(Node* object) {
return IsCallableMap(LoadMap(object));
}
Node* CodeStubAssembler::IsConstructorMap(Node* map) {
CSA_ASSERT(this, IsMap(map));
- return Word32NotEqual(
- Word32And(LoadMapBitField(map), Int32Constant(1 << Map::kIsConstructor)),
- Int32Constant(0));
+ return IsSetWord32(LoadMapBitField(map), 1 << Map::kIsConstructor);
+}
+
+Node* CodeStubAssembler::IsConstructor(Node* object) {
+ return IsConstructorMap(LoadMap(object));
}
Node* CodeStubAssembler::IsSpecialReceiverInstanceType(Node* instance_type) {
@@ -3182,9 +3363,7 @@ Node* CodeStubAssembler::IsShortExternalStringInstanceType(
Node* instance_type) {
CSA_ASSERT(this, IsStringInstanceType(instance_type));
STATIC_ASSERT(kShortExternalStringTag != 0);
- return Word32NotEqual(
- Word32And(instance_type, Int32Constant(kShortExternalStringMask)),
- Int32Constant(0));
+ return IsSetWord32(instance_type, kShortExternalStringMask);
}
Node* CodeStubAssembler::IsJSReceiverInstanceType(Node* instance_type) {
@@ -3212,14 +3391,19 @@ Node* CodeStubAssembler::IsJSObject(Node* object) {
return IsJSObjectMap(LoadMap(object));
}
+Node* CodeStubAssembler::IsJSProxy(Node* object) {
+ Node* object_map = LoadMap(object);
+ Node* object_instance_type = LoadMapInstanceType(object_map);
+
+ return InstanceTypeEqual(object_instance_type, JS_PROXY_TYPE);
+}
+
Node* CodeStubAssembler::IsJSGlobalProxy(Node* object) {
return Word32Equal(LoadInstanceType(object),
Int32Constant(JS_GLOBAL_PROXY_TYPE));
}
-Node* CodeStubAssembler::IsMap(Node* map) {
- return HasInstanceType(map, MAP_TYPE);
-}
+Node* CodeStubAssembler::IsMap(Node* map) { return IsMetaMap(LoadMap(map)); }
Node* CodeStubAssembler::IsJSValueInstanceType(Node* instance_type) {
return Word32Equal(instance_type, Int32Constant(JS_VALUE_TYPE));
@@ -3245,6 +3429,49 @@ Node* CodeStubAssembler::IsJSArrayMap(Node* map) {
return IsJSArrayInstanceType(LoadMapInstanceType(map));
}
+Node* CodeStubAssembler::IsFixedArray(Node* object) {
+ return HasInstanceType(object, FIXED_ARRAY_TYPE);
+}
+
+Node* CodeStubAssembler::IsPropertyArray(Node* object) {
+ return HasInstanceType(object, PROPERTY_ARRAY_TYPE);
+}
+
+// This complicated check is due to elements oddities. If a smi array is empty
+// after Array.p.shift, it is replaced by the empty array constant. If it is
+// later filled with a double element, we try to grow it but pass in a double
+// elements kind. Usually this would cause a size mismatch (since the source
+// fixed array has HOLEY_ELEMENTS and destination has
+// HOLEY_DOUBLE_ELEMENTS), but we don't have to worry about it when the
+// source array is empty.
+// TODO(jgruber): It might we worth creating an empty_double_array constant to
+// simplify this case.
+Node* CodeStubAssembler::IsFixedArrayWithKindOrEmpty(Node* object,
+ ElementsKind kind) {
+ Label out(this);
+ VARIABLE(var_result, MachineRepresentation::kWord32, Int32Constant(1));
+
+ GotoIf(IsFixedArrayWithKind(object, kind), &out);
+
+ Node* const length = LoadFixedArrayBaseLength(object);
+ GotoIf(SmiEqual(length, SmiConstant(0)), &out);
+
+ var_result.Bind(Int32Constant(0));
+ Goto(&out);
+
+ BIND(&out);
+ return var_result.value();
+}
+
+Node* CodeStubAssembler::IsFixedArrayWithKind(Node* object, ElementsKind kind) {
+ if (IsDoubleElementsKind(kind)) {
+ return IsFixedDoubleArray(object);
+ } else {
+ DCHECK(IsSmiOrObjectElementsKind(kind));
+ return IsFixedArray(object);
+ }
+}
+
Node* CodeStubAssembler::IsWeakCell(Node* object) {
return IsWeakCellMap(LoadMap(object));
}
@@ -3265,10 +3492,22 @@ Node* CodeStubAssembler::IsAccessorPair(Node* object) {
return IsAccessorPairMap(LoadMap(object));
}
+Node* CodeStubAssembler::IsAllocationSite(Node* object) {
+ return IsAllocationSiteMap(LoadMap(object));
+}
+
+Node* CodeStubAssembler::IsAnyHeapNumber(Node* object) {
+ return Word32Or(IsMutableHeapNumber(object), IsHeapNumber(object));
+}
+
Node* CodeStubAssembler::IsHeapNumber(Node* object) {
return IsHeapNumberMap(LoadMap(object));
}
+Node* CodeStubAssembler::IsMutableHeapNumber(Node* object) {
+ return IsMutableHeapNumberMap(LoadMap(object));
+}
+
Node* CodeStubAssembler::IsFeedbackVector(Node* object) {
return IsFeedbackVectorMap(LoadMap(object));
}
@@ -3283,6 +3522,10 @@ Node* CodeStubAssembler::IsString(Node* object) {
Int32Constant(FIRST_NONSTRING_TYPE));
}
+Node* CodeStubAssembler::IsSymbolInstanceType(Node* instance_type) {
+ return Word32Equal(instance_type, Int32Constant(SYMBOL_TYPE));
+}
+
Node* CodeStubAssembler::IsSymbol(Node* object) {
return IsSymbolMap(LoadMap(object));
}
@@ -3396,7 +3639,7 @@ Node* CodeStubAssembler::IsNumberPositive(Node* number) {
Node* CodeStubAssembler::StringCharCodeAt(Node* string, Node* index,
ParameterMode parameter_mode) {
- if (parameter_mode == SMI_PARAMETERS) CSA_ASSERT(this, TaggedIsSmi(index));
+ CSA_ASSERT(this, MatchesParameterMode(index, parameter_mode));
CSA_ASSERT(this, IsString(string));
// Translate the {index} into a Word.
@@ -3508,6 +3751,7 @@ Node* CodeStubAssembler::StringFromCharCode(Node* code) {
}
BIND(&if_done);
+ CSA_ASSERT(this, IsString(var_result.value()));
return var_result.value();
}
@@ -3524,7 +3768,7 @@ Node* CodeStubAssembler::AllocAndCopyStringCharacters(Node* context, Node* from,
Label end(this), one_byte_sequential(this), two_byte_sequential(this);
Variable var_result(this, MachineRepresentation::kTagged);
- Node* const smi_zero = SmiConstant(Smi::kZero);
+ Node* const smi_zero = SmiConstant(0);
Branch(IsOneByteStringInstanceType(from_instance_type), &one_byte_sequential,
&two_byte_sequential);
@@ -3594,7 +3838,7 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
// A real substring (substr_length < string_length).
Label single_char(this);
- GotoIf(SmiEqual(substr_length, SmiConstant(Smi::FromInt(1))), &single_char);
+ GotoIf(SmiEqual(substr_length, SmiConstant(1)), &single_char);
// TODO(jgruber): Add an additional case for substring of length == 0?
@@ -3613,8 +3857,7 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
Label next(this);
// Short slice. Copy instead of slicing.
- GotoIf(SmiLessThan(substr_length,
- SmiConstant(Smi::FromInt(SlicedString::kMinLength))),
+ GotoIf(SmiLessThan(substr_length, SmiConstant(SlicedString::kMinLength)),
&next);
// Allocate new sliced string.
@@ -3690,7 +3933,7 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
}
// Equal length - check if {from, to} == {0, str.length}.
- GotoIf(SmiAbove(from, SmiConstant(Smi::kZero)), &runtime);
+ GotoIf(SmiAbove(from, SmiConstant(0)), &runtime);
// Return the original string (substr_length == string_length).
@@ -3710,6 +3953,7 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
}
BIND(&end);
+ CSA_ASSERT(this, IsString(var_result.value()));
return var_result.value();
}
@@ -3857,47 +4101,52 @@ Node* ToDirectStringAssembler::TryToSequential(StringPointerKind ptr_kind,
return var_result.value();
}
-Node* CodeStubAssembler::TryDerefExternalString(Node* const string,
- Node* const instance_type,
- Label* if_bailout) {
- Label out(this);
-
- CSA_ASSERT(this, IsExternalStringInstanceType(instance_type));
- GotoIf(IsShortExternalStringInstanceType(instance_type), if_bailout);
-
- // Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
-
- Node* resource_data = LoadObjectField(
- string, ExternalString::kResourceDataOffset, MachineType::Pointer());
- Node* const fake_sequential_string =
- IntPtrSub(resource_data,
- IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
- return fake_sequential_string;
-}
-
-void CodeStubAssembler::MaybeDerefIndirectString(Variable* var_string,
- Node* instance_type,
- Variable* var_did_something) {
- Label deref(this), done(this, var_did_something);
+void CodeStubAssembler::BranchIfCanDerefIndirectString(Node* string,
+ Node* instance_type,
+ Label* can_deref,
+ Label* cannot_deref) {
+ CSA_ASSERT(this, IsString(string));
Node* representation =
Word32And(instance_type, Int32Constant(kStringRepresentationMask));
- GotoIf(Word32Equal(representation, Int32Constant(kThinStringTag)), &deref);
- GotoIf(Word32NotEqual(representation, Int32Constant(kConsStringTag)), &done);
+ GotoIf(Word32Equal(representation, Int32Constant(kThinStringTag)), can_deref);
+ GotoIf(Word32NotEqual(representation, Int32Constant(kConsStringTag)),
+ cannot_deref);
// Cons string.
- Node* rhs = LoadObjectField(var_string->value(), ConsString::kSecondOffset);
- GotoIf(WordEqual(rhs, EmptyStringConstant()), &deref);
- Goto(&done);
+ Node* rhs = LoadObjectField(string, ConsString::kSecondOffset);
+ GotoIf(IsEmptyString(rhs), can_deref);
+ Goto(cannot_deref);
+}
+
+void CodeStubAssembler::DerefIndirectString(Variable* var_string,
+ Node* instance_type) {
+#ifdef DEBUG
+ Label can_deref(this), cannot_deref(this);
+ BranchIfCanDerefIndirectString(var_string->value(), instance_type, &can_deref,
+ &cannot_deref);
+ BIND(&cannot_deref);
+ DebugBreak(); // Should be able to dereference string.
+ Goto(&can_deref);
+ BIND(&can_deref);
+#endif // DEBUG
- BIND(&deref);
STATIC_ASSERT(ThinString::kActualOffset == ConsString::kFirstOffset);
var_string->Bind(
LoadObjectField(var_string->value(), ThinString::kActualOffset));
- var_did_something->Bind(IntPtrConstant(1));
- Goto(&done);
+}
- BIND(&done);
+void CodeStubAssembler::MaybeDerefIndirectString(Variable* var_string,
+ Node* instance_type,
+ Label* did_deref,
+ Label* cannot_deref) {
+ Label deref(this);
+ BranchIfCanDerefIndirectString(var_string->value(), instance_type, &deref,
+ cannot_deref);
+
+ BIND(&deref);
+ {
+ DerefIndirectString(var_string, instance_type);
+ Goto(did_deref);
+ }
}
void CodeStubAssembler::MaybeDerefIndirectStrings(Variable* var_left,
@@ -3905,13 +4154,24 @@ void CodeStubAssembler::MaybeDerefIndirectStrings(Variable* var_left,
Variable* var_right,
Node* right_instance_type,
Label* did_something) {
- VARIABLE(var_did_something, MachineType::PointerRepresentation(),
- IntPtrConstant(0));
- MaybeDerefIndirectString(var_left, left_instance_type, &var_did_something);
- MaybeDerefIndirectString(var_right, right_instance_type, &var_did_something);
+ Label did_nothing_left(this), did_something_left(this),
+ didnt_do_anything(this);
+ MaybeDerefIndirectString(var_left, left_instance_type, &did_something_left,
+ &did_nothing_left);
- GotoIf(WordNotEqual(var_did_something.value(), IntPtrConstant(0)),
- did_something);
+ BIND(&did_something_left);
+ {
+ MaybeDerefIndirectString(var_right, right_instance_type, did_something,
+ did_something);
+ }
+
+ BIND(&did_nothing_left);
+ {
+ MaybeDerefIndirectString(var_right, right_instance_type, did_something,
+ &didnt_do_anything);
+ }
+
+ BIND(&didnt_do_anything);
// Fall through if neither string was an indirect string.
}
@@ -3938,6 +4198,10 @@ Node* CodeStubAssembler::StringAdd(Node* context, Node* left, Node* right,
CSA_ASSERT(this, TaggedIsSmi(left_length));
CSA_ASSERT(this, TaggedIsSmi(right_length));
Node* new_length = SmiAdd(left_length, right_length);
+
+ // If new length is greater than String::kMaxLength, goto runtime to
+ // throw. Note: we also need to invalidate the string length protector, so
+ // can't just throw here directly.
GotoIf(SmiAboveOrEqual(new_length, SmiConstant(String::kMaxLength)),
&runtime);
@@ -3966,14 +4230,8 @@ Node* CodeStubAssembler::StringAdd(Node* context, Node* left, Node* right,
Word32Xor(left_instance_type, right_instance_type);
// Check if both strings have the same encoding and both are sequential.
- GotoIf(Word32NotEqual(Word32And(xored_instance_types,
- Int32Constant(kStringEncodingMask)),
- Int32Constant(0)),
- &runtime);
- GotoIf(Word32NotEqual(Word32And(ored_instance_types,
- Int32Constant(kStringRepresentationMask)),
- Int32Constant(0)),
- &slow);
+ GotoIf(IsSetWord32(xored_instance_types, kStringEncodingMask), &runtime);
+ GotoIf(IsSetWord32(ored_instance_types, kStringRepresentationMask), &slow);
Label two_byte(this);
GotoIf(Word32Equal(Word32And(ored_instance_types,
@@ -3983,11 +4241,10 @@ Node* CodeStubAssembler::StringAdd(Node* context, Node* left, Node* right,
// One-byte sequential string case
Node* new_string =
AllocateSeqOneByteString(context, new_length, SMI_PARAMETERS);
- CopyStringCharacters(var_left.value(), new_string, SmiConstant(Smi::kZero),
- SmiConstant(Smi::kZero), left_length,
- String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING,
- SMI_PARAMETERS);
- CopyStringCharacters(var_right.value(), new_string, SmiConstant(Smi::kZero),
+ CopyStringCharacters(var_left.value(), new_string, SmiConstant(0),
+ SmiConstant(0), left_length, String::ONE_BYTE_ENCODING,
+ String::ONE_BYTE_ENCODING, SMI_PARAMETERS);
+ CopyStringCharacters(var_right.value(), new_string, SmiConstant(0),
left_length, right_length, String::ONE_BYTE_ENCODING,
String::ONE_BYTE_ENCODING, SMI_PARAMETERS);
result.Bind(new_string);
@@ -3998,14 +4255,13 @@ Node* CodeStubAssembler::StringAdd(Node* context, Node* left, Node* right,
// Two-byte sequential string case
new_string =
AllocateSeqTwoByteString(context, new_length, SMI_PARAMETERS);
- CopyStringCharacters(var_left.value(), new_string,
- SmiConstant(Smi::kZero), SmiConstant(Smi::kZero),
- left_length, String::TWO_BYTE_ENCODING,
- String::TWO_BYTE_ENCODING, SMI_PARAMETERS);
- CopyStringCharacters(var_right.value(), new_string,
- SmiConstant(Smi::kZero), left_length, right_length,
+ CopyStringCharacters(var_left.value(), new_string, SmiConstant(0),
+ SmiConstant(0), left_length,
String::TWO_BYTE_ENCODING, String::TWO_BYTE_ENCODING,
SMI_PARAMETERS);
+ CopyStringCharacters(var_right.value(), new_string, SmiConstant(0),
+ left_length, right_length, String::TWO_BYTE_ENCODING,
+ String::TWO_BYTE_ENCODING, SMI_PARAMETERS);
result.Bind(new_string);
Goto(&done_native);
}
@@ -4082,10 +4338,12 @@ Node* CodeStubAssembler::StringFromCodePoint(Node* codepoint,
}
BIND(&return_result);
+ CSA_ASSERT(this, IsString(var_result.value()));
return var_result.value();
}
Node* CodeStubAssembler::StringToNumber(Node* context, Node* input) {
+ CSA_SLOW_ASSERT(this, IsString(input));
Label runtime(this, Label::kDeferred);
Label end(this);
@@ -4093,9 +4351,8 @@ Node* CodeStubAssembler::StringToNumber(Node* context, Node* input) {
// Check if string has a cached array index.
Node* hash = LoadNameHashField(input);
- Node* bit =
- Word32And(hash, Int32Constant(String::kContainsCachedArrayIndexMask));
- GotoIf(Word32NotEqual(bit, Int32Constant(0)), &runtime);
+ GotoIf(IsSetWord32(hash, Name::kDoesNotContainCachedArrayIndexMask),
+ &runtime);
var_result.Bind(
SmiTag(DecodeWordFromWord32<String::ArrayIndexValueBits>(hash)));
@@ -4129,8 +4386,7 @@ Node* CodeStubAssembler::NumberToString(Node* context, Node* argument) {
GotoIf(TaggedIsSmi(argument), &smi);
// Argument isn't smi, check to see if it's a heap-number.
- Node* map = LoadMap(argument);
- GotoIfNot(IsHeapNumberMap(map), &runtime);
+ GotoIfNot(IsHeapNumber(argument), &runtime);
// Make a hash from the two 32-bit values of the double.
Node* low =
@@ -4140,13 +4396,12 @@ Node* CodeStubAssembler::NumberToString(Node* context, Node* argument) {
Node* hash = Word32Xor(low, high);
hash = ChangeInt32ToIntPtr(hash);
hash = WordShl(hash, one);
- Node* index = WordAnd(hash, SmiUntag(BitcastWordToTagged(mask)));
+ Node* index = WordAnd(hash, WordSar(mask, SmiShiftBitsConstant()));
// Cache entry's key must be a heap number
Node* number_key = LoadFixedArrayElement(number_string_cache, index);
GotoIf(TaggedIsSmi(number_key), &runtime);
- map = LoadMap(number_key);
- GotoIfNot(IsHeapNumberMap(map), &runtime);
+ GotoIfNot(IsHeapNumber(number_key), &runtime);
// Cache entry's key must match the heap number value we're looking for.
Node* low_compare = LoadObjectField(number_key, HeapNumber::kValueOffset,
@@ -4185,6 +4440,7 @@ Node* CodeStubAssembler::NumberToString(Node* context, Node* argument) {
}
BIND(&done);
+ CSA_ASSERT(this, IsString(result.value()));
return result.value();
}
@@ -4231,13 +4487,14 @@ Node* CodeStubAssembler::ToName(Node* context, Node* value) {
}
BIND(&end);
+ CSA_ASSERT(this, IsName(var_result.value()));
return var_result.value();
}
Node* CodeStubAssembler::NonNumberToNumber(Node* context, Node* input) {
// Assert input is a HeapObject (not smi or heap number)
CSA_ASSERT(this, Word32BinaryNot(TaggedIsSmi(input)));
- CSA_ASSERT(this, Word32BinaryNot(IsHeapNumberMap(LoadMap(input))));
+ CSA_ASSERT(this, Word32BinaryNot(IsHeapNumber(input)));
// We might need to loop once here due to ToPrimitive conversions.
VARIABLE(var_input, MachineRepresentation::kTagged, input);
@@ -4286,9 +4543,7 @@ Node* CodeStubAssembler::NonNumberToNumber(Node* context, Node* input) {
// Check if the {result} is already a Number.
Label if_resultisnumber(this), if_resultisnotnumber(this);
GotoIf(TaggedIsSmi(result), &if_resultisnumber);
- Node* result_map = LoadMap(result);
- Branch(IsHeapNumberMap(result_map), &if_resultisnumber,
- &if_resultisnotnumber);
+ Branch(IsHeapNumber(result), &if_resultisnumber, &if_resultisnotnumber);
BIND(&if_resultisnumber);
{
@@ -4319,6 +4574,7 @@ Node* CodeStubAssembler::NonNumberToNumber(Node* context, Node* input) {
}
BIND(&end);
+ CSA_ASSERT(this, IsNumber(var_result.value()));
return var_result.value();
}
@@ -4334,8 +4590,7 @@ Node* CodeStubAssembler::ToNumber(Node* context, Node* input) {
BIND(&not_smi);
{
Label not_heap_number(this, Label::kDeferred);
- Node* input_map = LoadMap(input);
- GotoIfNot(IsHeapNumberMap(input_map), &not_heap_number);
+ GotoIfNot(IsHeapNumber(input), &not_heap_number);
var_result.Bind(input);
Goto(&end);
@@ -4348,6 +4603,7 @@ Node* CodeStubAssembler::ToNumber(Node* context, Node* input) {
}
BIND(&end);
+ CSA_ASSERT(this, IsNumber(var_result.value()));
return var_result.value();
}
@@ -4444,21 +4700,20 @@ Node* CodeStubAssembler::ToUint32(Node* context, Node* input) {
BIND(&return_zero);
{
- var_result.Bind(SmiConstant(Smi::kZero));
+ var_result.Bind(SmiConstant(0));
Goto(&out);
}
}
BIND(&out);
+ CSA_ASSERT(this, IsNumber(var_result.value()));
return var_result.value();
}
Node* CodeStubAssembler::ToString(Node* context, Node* input) {
Label is_number(this);
- Label runtime(this, Label::kDeferred);
+ Label runtime(this, Label::kDeferred), done(this);
VARIABLE(result, MachineRepresentation::kTagged);
- Label done(this, &result);
-
GotoIf(TaggedIsSmi(input), &is_number);
Node* input_map = LoadMap(input);
@@ -4558,6 +4813,7 @@ Node* CodeStubAssembler::ToSmiIndex(Node* const input, Node* const context,
Goto(&done);
BIND(&done);
+ CSA_SLOW_ASSERT(this, TaggedIsSmi(result.value()));
return result.value();
}
@@ -4570,9 +4826,11 @@ Node* CodeStubAssembler::ToSmiLength(Node* input, Node* const context,
BIND(&to_integer);
result.Bind(ToInteger(context, result.value(),
CodeStubAssembler::kTruncateMinusZero));
- GotoIfNot(TaggedIsSmi(result.value()), range_error);
- CSA_ASSERT(this, TaggedIsSmi(result.value()));
- Goto(&negative_check);
+ GotoIf(TaggedIsSmi(result.value()), &negative_check);
+ // result.value() can still be a negative HeapNumber here.
+ Branch(IsTrue(CallBuiltin(Builtins::kLessThan, context, result.value(),
+ SmiConstant(0))),
+ &return_zero, range_error);
BIND(&negative_check);
Branch(SmiLessThan(result.value(), SmiConstant(0)), &return_zero, &done);
@@ -4582,6 +4840,7 @@ Node* CodeStubAssembler::ToSmiLength(Node* input, Node* const context,
Goto(&done);
BIND(&done);
+ CSA_SLOW_ASSERT(this, TaggedIsSmi(result.value()));
return result.value();
}
@@ -4614,8 +4873,7 @@ Node* CodeStubAssembler::ToInteger(Node* context, Node* input,
// Check if {arg} is a HeapNumber.
Label if_argisheapnumber(this),
if_argisnotheapnumber(this, Label::kDeferred);
- Branch(IsHeapNumberMap(LoadMap(arg)), &if_argisheapnumber,
- &if_argisnotheapnumber);
+ Branch(IsHeapNumber(arg), &if_argisheapnumber, &if_argisnotheapnumber);
BIND(&if_argisheapnumber);
{
@@ -4640,17 +4898,17 @@ Node* CodeStubAssembler::ToInteger(Node* context, Node* input,
BIND(&if_argisnotheapnumber);
{
// Need to convert {arg} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_arg.Bind(CallStub(callable, context, arg));
+ var_arg.Bind(CallBuiltin(Builtins::kNonNumberToNumber, context, arg));
Goto(&loop);
}
BIND(&return_zero);
- var_arg.Bind(SmiConstant(Smi::kZero));
+ var_arg.Bind(SmiConstant(0));
Goto(&out);
}
BIND(&out);
+ CSA_SLOW_ASSERT(this, IsNumber(var_arg.value()));
return var_arg.value();
}
@@ -4731,14 +4989,11 @@ void CodeStubAssembler::TryToName(Node* key, Label* if_keyisindex,
GotoIfNot(IsStringInstanceType(key_instance_type), if_bailout);
// |key| is a String. Check if it has a cached array index.
Node* hash = LoadNameHashField(key);
- Node* contains_index =
- Word32And(hash, Int32Constant(Name::kContainsCachedArrayIndexMask));
- GotoIf(Word32Equal(contains_index, Int32Constant(0)), &if_hascachedindex);
+ GotoIf(IsClearWord32(hash, Name::kDoesNotContainCachedArrayIndexMask),
+ &if_hascachedindex);
// No cached array index. If the string knows that it contains an index,
// then it must be an uncacheable index. Handle this case in the runtime.
- Node* not_an_index =
- Word32And(hash, Int32Constant(Name::kIsNotArrayIndexMask));
- GotoIf(Word32Equal(not_an_index, Int32Constant(0)), if_bailout);
+ GotoIf(IsClearWord32(hash, Name::kIsNotArrayIndexMask), if_bailout);
// Check if we have a ThinString.
GotoIf(Word32Equal(key_instance_type, Int32Constant(THIN_STRING_TYPE)),
&if_thinstring);
@@ -4747,9 +5002,7 @@ void CodeStubAssembler::TryToName(Node* key, Label* if_keyisindex,
&if_thinstring);
// Finally, check if |key| is internalized.
STATIC_ASSERT(kNotInternalizedTag != 0);
- Node* not_internalized =
- Word32And(key_instance_type, Int32Constant(kIsNotInternalizedMask));
- GotoIf(Word32NotEqual(not_internalized, Int32Constant(0)),
+ GotoIf(IsSetWord32(key_instance_type, kIsNotInternalizedMask),
if_notinternalized != nullptr ? if_notinternalized : if_bailout);
Goto(if_keyisunique);
@@ -4767,6 +5020,7 @@ void CodeStubAssembler::TryInternalizeString(
Variable* var_internalized, Label* if_not_internalized, Label* if_bailout) {
DCHECK(var_index->rep() == MachineType::PointerRepresentation());
DCHECK(var_internalized->rep() == MachineRepresentation::kTagged);
+ CSA_SLOW_ASSERT(this, IsString(string));
Node* function = ExternalConstant(
ExternalReference::try_internalize_string_function(isolate()));
Node* result = CallCFunction1(MachineType::AnyTagged(),
@@ -4828,6 +5082,19 @@ void CodeStubAssembler::SetNextEnumerationIndex(Node* dictionary,
next_enum_index_smi, SKIP_WRITE_BARRIER);
}
+template <>
+Node* CodeStubAssembler::LoadName<NameDictionary>(Node* key) {
+ CSA_ASSERT(this, Word32Or(IsTheHole(key), IsName(key)));
+ return key;
+}
+
+template <>
+Node* CodeStubAssembler::LoadName<GlobalDictionary>(Node* key) {
+ CSA_ASSERT(this, IsPropertyCell(key));
+ CSA_ASSERT(this, IsNotTheHole(key));
+ return LoadObjectField(key, PropertyCell::kNameOffset);
+}
+
template <typename Dictionary>
void CodeStubAssembler::NameDictionaryLookup(Node* dictionary,
Node* unique_name, Label* if_found,
@@ -4848,12 +5115,15 @@ void CodeStubAssembler::NameDictionaryLookup(Node* dictionary,
// See Dictionary::FirstProbe().
Node* count = IntPtrConstant(0);
Node* entry = WordAnd(hash, mask);
+ Node* undefined = UndefinedConstant();
for (int i = 0; i < inlined_probes; i++) {
Node* index = EntryToIndex<Dictionary>(entry);
var_name_index->Bind(index);
Node* current = LoadFixedArrayElement(dictionary, index);
+ GotoIf(WordEqual(current, undefined), if_not_found);
+ current = LoadName<Dictionary>(current);
GotoIf(WordEqual(current, unique_name), if_found);
// See Dictionary::NextProbe().
@@ -4865,7 +5135,6 @@ void CodeStubAssembler::NameDictionaryLookup(Node* dictionary,
var_name_index->Bind(IntPtrConstant(0));
}
- Node* undefined = UndefinedConstant();
Node* the_hole = mode == kFindExisting ? nullptr : TheHoleConstant();
VARIABLE(var_count, MachineType::PointerRepresentation(), count);
@@ -4883,6 +5152,7 @@ void CodeStubAssembler::NameDictionaryLookup(Node* dictionary,
Node* current = LoadFixedArrayElement(dictionary, index);
GotoIf(WordEqual(current, undefined), if_not_found);
if (mode == kFindExisting) {
+ current = LoadName<Dictionary>(current);
GotoIf(WordEqual(current, unique_name), if_found);
} else {
DCHECK_EQ(kFindInsertionIndex, mode);
@@ -4904,6 +5174,10 @@ template void CodeStubAssembler::NameDictionaryLookup<NameDictionary>(
template void CodeStubAssembler::NameDictionaryLookup<GlobalDictionary>(
Node*, Node*, Label*, Variable*, Label*, int, LookupMode);
+Node* CodeStubAssembler::ComputeIntegerHash(Node* key) {
+ return ComputeIntegerHash(key, IntPtrConstant(kZeroHashSeed));
+}
+
Node* CodeStubAssembler::ComputeIntegerHash(Node* key, Node* seed) {
// See v8::internal::ComputeIntegerHash()
Node* hash = TruncateWordToWord32(key);
@@ -4931,12 +5205,9 @@ void CodeStubAssembler::NumberDictionaryLookup(Node* dictionary,
Node* capacity = SmiUntag(GetCapacity<Dictionary>(dictionary));
Node* mask = IntPtrSub(capacity, IntPtrConstant(1));
- Node* int32_seed;
- if (Dictionary::ShapeT::UsesSeed) {
- int32_seed = HashSeed();
- } else {
- int32_seed = Int32Constant(kZeroHashSeed);
- }
+ Node* int32_seed = std::is_same<Dictionary, SeededNumberDictionary>::value
+ ? HashSeed()
+ : Int32Constant(kZeroHashSeed);
Node* hash = ChangeUint32ToWord(ComputeIntegerHash(intptr_index, int32_seed));
Node* key_as_float64 = RoundIntPtrToFloat64(intptr_index);
@@ -5014,16 +5285,18 @@ void CodeStubAssembler::InsertEntry<NameDictionary>(Node* dictionary,
Node* name, Node* value,
Node* index,
Node* enum_index) {
+ CSA_SLOW_ASSERT(this, IsDictionary(dictionary));
+
// Store name and value.
StoreFixedArrayElement(dictionary, index, name);
StoreValueByKeyIndex<NameDictionary>(dictionary, index, value);
// Prepare details of the new property.
- const int kInitialIndex = 0;
- PropertyDetails d(kData, NONE, kInitialIndex, PropertyCellType::kNoCell);
+ PropertyDetails d(kData, NONE, PropertyCellType::kNoCell);
enum_index =
SmiShl(enum_index, PropertyDetails::DictionaryStorageField::kShift);
- STATIC_ASSERT(kInitialIndex == 0);
+ // We OR over the actual index below, so we expect the initial value to be 0.
+ DCHECK_EQ(0, d.dictionary_index());
VARIABLE(var_details, MachineRepresentation::kTaggedSigned,
SmiOr(SmiConstant(d.AsSmi()), enum_index));
@@ -5055,6 +5328,7 @@ void CodeStubAssembler::InsertEntry<GlobalDictionary>(Node* dictionary,
template <class Dictionary>
void CodeStubAssembler::Add(Node* dictionary, Node* key, Node* value,
Label* bailout) {
+ CSA_SLOW_ASSERT(this, IsDictionary(dictionary));
Node* capacity = GetCapacity<Dictionary>(dictionary);
Node* nof = GetNumberOfElements<Dictionary>(dictionary);
Node* new_nof = SmiAdd(nof, SmiConstant(1));
@@ -5068,21 +5342,17 @@ void CodeStubAssembler::Add(Node* dictionary, Node* key, Node* value,
CSA_ASSERT(this, SmiAbove(capacity, new_nof));
Node* half_of_free_elements = SmiShr(SmiSub(capacity, new_nof), 1);
GotoIf(SmiAbove(deleted, half_of_free_elements), bailout);
- Node* enum_index = nullptr;
- if (Dictionary::kIsEnumerable) {
- enum_index = GetNextEnumerationIndex<Dictionary>(dictionary);
- Node* new_enum_index = SmiAdd(enum_index, SmiConstant(1));
- Node* max_enum_index =
- SmiConstant(PropertyDetails::DictionaryStorageField::kMax);
- GotoIf(SmiAbove(new_enum_index, max_enum_index), bailout);
-
- // No more bailouts after this point.
- // Operations from here on can have side effects.
-
- SetNextEnumerationIndex<Dictionary>(dictionary, new_enum_index);
- } else {
- USE(enum_index);
- }
+
+ Node* enum_index = GetNextEnumerationIndex<Dictionary>(dictionary);
+ Node* new_enum_index = SmiAdd(enum_index, SmiConstant(1));
+ Node* max_enum_index =
+ SmiConstant(PropertyDetails::DictionaryStorageField::kMax);
+ GotoIf(SmiAbove(new_enum_index, max_enum_index), bailout);
+
+ // No more bailouts after this point.
+ // Operations from here on can have side effects.
+
+ SetNextEnumerationIndex<Dictionary>(dictionary, new_enum_index);
SetNumberOfElements<Dictionary>(dictionary, new_nof);
VARIABLE(var_key_index, MachineType::PointerRepresentation());
@@ -5294,10 +5564,8 @@ void CodeStubAssembler::TryLookupProperty(
// Handle interceptors and access checks in runtime.
Node* bit_field = LoadMapBitField(map);
- Node* mask = Int32Constant(1 << Map::kHasNamedInterceptor |
- 1 << Map::kIsAccessCheckNeeded);
- GotoIf(Word32NotEqual(Word32And(bit_field, mask), Int32Constant(0)),
- if_bailout);
+ int mask = 1 << Map::kHasNamedInterceptor | 1 << Map::kIsAccessCheckNeeded;
+ GotoIf(IsSetWord32(bit_field, mask), if_bailout);
Node* dictionary = LoadProperties(object);
var_meta_storage->Bind(dictionary);
@@ -5455,8 +5723,8 @@ void CodeStubAssembler::LoadPropertyFromGlobalDictionary(Node* dictionary,
Comment("[ LoadPropertyFromGlobalDictionary");
CSA_ASSERT(this, IsDictionary(dictionary));
- Node* property_cell =
- LoadValueByKeyIndex<GlobalDictionary>(dictionary, name_index);
+ Node* property_cell = LoadFixedArrayElement(dictionary, name_index);
+ CSA_ASSERT(this, IsPropertyCell(property_cell));
Node* value = LoadObjectField(property_cell, PropertyCell::kValueOffset);
GotoIf(WordEqual(value, TheHoleConstant()), if_deleted);
@@ -5640,10 +5908,10 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
// clang-format off
int32_t values[] = {
// Handled by {if_isobjectorsmi}.
- FAST_SMI_ELEMENTS, FAST_HOLEY_SMI_ELEMENTS, FAST_ELEMENTS,
- FAST_HOLEY_ELEMENTS,
+ PACKED_SMI_ELEMENTS, HOLEY_SMI_ELEMENTS, PACKED_ELEMENTS,
+ HOLEY_ELEMENTS,
// Handled by {if_isdouble}.
- FAST_DOUBLE_ELEMENTS, FAST_HOLEY_DOUBLE_ELEMENTS,
+ PACKED_DOUBLE_ELEMENTS, HOLEY_DOUBLE_ELEMENTS,
// Handled by {if_isdictionary}.
DICTIONARY_ELEMENTS,
// Handled by {if_isfaststringwrapper}.
@@ -5867,34 +6135,77 @@ void CodeStubAssembler::TryPrototypeChainLookup(
}
}
-Node* CodeStubAssembler::OrdinaryHasInstance(Node* context, Node* callable,
- Node* object) {
+Node* CodeStubAssembler::HasInPrototypeChain(Node* context, Node* object,
+ Node* prototype) {
+ CSA_ASSERT(this, TaggedIsNotSmi(object));
VARIABLE(var_result, MachineRepresentation::kTagged);
Label return_false(this), return_true(this),
return_runtime(this, Label::kDeferred), return_result(this);
- // Goto runtime if {object} is a Smi.
- GotoIf(TaggedIsSmi(object), &return_runtime);
+ // Loop through the prototype chain looking for the {prototype}.
+ VARIABLE(var_object_map, MachineRepresentation::kTagged, LoadMap(object));
+ Label loop(this, &var_object_map);
+ Goto(&loop);
+ BIND(&loop);
+ {
+ // Check if we can determine the prototype directly from the {object_map}.
+ Label if_objectisdirect(this), if_objectisspecial(this, Label::kDeferred);
+ Node* object_map = var_object_map.value();
+ Node* object_instance_type = LoadMapInstanceType(object_map);
+ Branch(IsSpecialReceiverInstanceType(object_instance_type),
+ &if_objectisspecial, &if_objectisdirect);
+ BIND(&if_objectisspecial);
+ {
+ // The {object_map} is a special receiver map or a primitive map, check
+ // if we need to use the if_objectisspecial path in the runtime.
+ GotoIf(InstanceTypeEqual(object_instance_type, JS_PROXY_TYPE),
+ &return_runtime);
+ Node* object_bitfield = LoadMapBitField(object_map);
+ int mask =
+ 1 << Map::kHasNamedInterceptor | 1 << Map::kIsAccessCheckNeeded;
+ Branch(IsSetWord32(object_bitfield, mask), &return_runtime,
+ &if_objectisdirect);
+ }
+ BIND(&if_objectisdirect);
- // Load map of {object}.
- Node* object_map = LoadMap(object);
+ // Check the current {object} prototype.
+ Node* object_prototype = LoadMapPrototype(object_map);
+ GotoIf(IsNull(object_prototype), &return_false);
+ GotoIf(WordEqual(object_prototype, prototype), &return_true);
- // Lookup the {callable} and {object} map in the global instanceof cache.
- // Note: This is safe because we clear the global instanceof cache whenever
- // we change the prototype of any object.
- Node* instanceof_cache_function =
- LoadRoot(Heap::kInstanceofCacheFunctionRootIndex);
- Node* instanceof_cache_map = LoadRoot(Heap::kInstanceofCacheMapRootIndex);
- {
- Label instanceof_cache_miss(this);
- GotoIfNot(WordEqual(instanceof_cache_function, callable),
- &instanceof_cache_miss);
- GotoIfNot(WordEqual(instanceof_cache_map, object_map),
- &instanceof_cache_miss);
- var_result.Bind(LoadRoot(Heap::kInstanceofCacheAnswerRootIndex));
- Goto(&return_result);
- BIND(&instanceof_cache_miss);
+ // Continue with the prototype.
+ CSA_ASSERT(this, TaggedIsNotSmi(object_prototype));
+ var_object_map.Bind(LoadMap(object_prototype));
+ Goto(&loop);
+ }
+
+ BIND(&return_true);
+ var_result.Bind(TrueConstant());
+ Goto(&return_result);
+
+ BIND(&return_false);
+ var_result.Bind(FalseConstant());
+ Goto(&return_result);
+
+ BIND(&return_runtime);
+ {
+ // Fallback to the runtime implementation.
+ var_result.Bind(
+ CallRuntime(Runtime::kHasInPrototypeChain, context, object, prototype));
}
+ Goto(&return_result);
+
+ BIND(&return_result);
+ return var_result.value();
+}
+
+Node* CodeStubAssembler::OrdinaryHasInstance(Node* context, Node* callable,
+ Node* object) {
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+ Label return_runtime(this, Label::kDeferred), return_result(this);
+
+ // Goto runtime if {object} is a Smi.
+ GotoIf(TaggedIsSmi(object), &return_runtime);
// Goto runtime if {callable} is a Smi.
GotoIf(TaggedIsSmi(callable), &return_runtime);
@@ -5942,56 +6253,12 @@ Node* CodeStubAssembler::OrdinaryHasInstance(Node* context, Node* callable,
callable_prototype = var_callable_prototype.value();
}
- // Update the global instanceof cache with the current {object} map and
- // {callable}. The cached answer will be set when it is known below.
- StoreRoot(Heap::kInstanceofCacheFunctionRootIndex, callable);
- StoreRoot(Heap::kInstanceofCacheMapRootIndex, object_map);
-
// Loop through the prototype chain looking for the {callable} prototype.
- VARIABLE(var_object_map, MachineRepresentation::kTagged, object_map);
- Label loop(this, &var_object_map);
- Goto(&loop);
- BIND(&loop);
- {
- Node* object_map = var_object_map.value();
-
- // Check if the current {object} needs to be access checked.
- Node* object_bitfield = LoadMapBitField(object_map);
- GotoIfNot(
- Word32Equal(Word32And(object_bitfield,
- Int32Constant(1 << Map::kIsAccessCheckNeeded)),
- Int32Constant(0)),
- &return_runtime);
-
- // Check if the current {object} is a proxy.
- Node* object_instance_type = LoadMapInstanceType(object_map);
- GotoIf(Word32Equal(object_instance_type, Int32Constant(JS_PROXY_TYPE)),
- &return_runtime);
-
- // Check the current {object} prototype.
- Node* object_prototype = LoadMapPrototype(object_map);
- GotoIf(WordEqual(object_prototype, NullConstant()), &return_false);
- GotoIf(WordEqual(object_prototype, callable_prototype), &return_true);
-
- // Continue with the prototype.
- var_object_map.Bind(LoadMap(object_prototype));
- Goto(&loop);
- }
-
- BIND(&return_true);
- StoreRoot(Heap::kInstanceofCacheAnswerRootIndex, BooleanConstant(true));
- var_result.Bind(BooleanConstant(true));
- Goto(&return_result);
-
- BIND(&return_false);
- StoreRoot(Heap::kInstanceofCacheAnswerRootIndex, BooleanConstant(false));
- var_result.Bind(BooleanConstant(false));
+ var_result.Bind(HasInPrototypeChain(context, object, callable_prototype));
Goto(&return_result);
BIND(&return_runtime);
{
- // Invalidate the global instanceof cache.
- StoreRoot(Heap::kInstanceofCacheFunctionRootIndex, SmiConstant(0));
// Fallback to the runtime implementation.
var_result.Bind(
CallRuntime(Runtime::kOrdinaryHasInstance, context, callable, object));
@@ -6042,14 +6309,28 @@ Node* CodeStubAssembler::LoadFeedbackVectorForStub() {
}
void CodeStubAssembler::UpdateFeedback(Node* feedback, Node* feedback_vector,
- Node* slot_id) {
+ Node* slot_id, Node* function) {
// This method is used for binary op and compare feedback. These
// vector nodes are initialized with a smi 0, so we can simply OR
// our new feedback in place.
Node* previous_feedback = LoadFixedArrayElement(feedback_vector, slot_id);
Node* combined_feedback = SmiOr(previous_feedback, feedback);
- StoreFixedArrayElement(feedback_vector, slot_id, combined_feedback,
- SKIP_WRITE_BARRIER);
+ Label end(this);
+
+ GotoIf(SmiEqual(previous_feedback, combined_feedback), &end);
+ {
+ StoreFixedArrayElement(feedback_vector, slot_id, combined_feedback,
+ SKIP_WRITE_BARRIER);
+ // Reset profiler ticks.
+ Node* shared_info =
+ LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset);
+ StoreObjectFieldNoWriteBarrier(
+ shared_info, SharedFunctionInfo::kProfilerTicksOffset, Int32Constant(0),
+ MachineRepresentation::kWord32);
+ Goto(&end);
+ }
+
+ BIND(&end);
}
void CodeStubAssembler::CombineFeedback(Variable* existing_feedback,
@@ -6084,7 +6365,7 @@ Node* CodeStubAssembler::TryToIntptr(Node* key, Label* miss) {
Label done(this, &var_intptr_key), key_is_smi(this);
GotoIf(TaggedIsSmi(key), &key_is_smi);
// Try to convert a heap number to a Smi.
- GotoIfNot(IsHeapNumberMap(LoadMap(key)), miss);
+ GotoIfNot(IsHeapNumber(key), miss);
{
Node* value = LoadHeapNumberValue(key);
Node* int_value = RoundFloat64ToInt32(value);
@@ -6231,7 +6512,6 @@ MachineRepresentation ElementsKindToMachineRepresentation(ElementsKind kind) {
return MachineRepresentation::kFloat64;
default:
UNREACHABLE();
- return MachineRepresentation::kNone;
}
}
@@ -6252,8 +6532,8 @@ void CodeStubAssembler::StoreElement(Node* elements, ElementsKind kind,
}
WriteBarrierMode barrier_mode =
- IsFastSmiElementsKind(kind) ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
- if (IsFastDoubleElementsKind(kind)) {
+ IsSmiElementsKind(kind) ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
+ if (IsDoubleElementsKind(kind)) {
// Make sure we do not store signalling NaNs into double arrays.
value = Float64SilenceNaN(value);
StoreFixedDoubleArrayElement(elements, index, value, mode);
@@ -6314,14 +6594,13 @@ Node* CodeStubAssembler::PrepareValueForWriteToTypedArray(
break;
default:
UNREACHABLE();
- return nullptr;
}
VARIABLE(var_result, rep);
Label done(this, &var_result), if_smi(this);
GotoIf(TaggedIsSmi(input), &if_smi);
// Try to convert a heap number to a Smi.
- GotoIfNot(IsHeapNumberMap(LoadMap(input)), bailout);
+ GotoIfNot(IsHeapNumber(input), bailout);
{
Node* value = LoadHeapNumberValue(input);
if (rep == MachineRepresentation::kWord32) {
@@ -6366,7 +6645,7 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
KeyedAccessStoreMode store_mode,
Label* bailout) {
Node* elements = LoadElements(object);
- if (IsFastSmiOrObjectElementsKind(elements_kind) &&
+ if (IsSmiOrObjectElementsKind(elements_kind) &&
store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
// Bailout in case of COW elements.
GotoIf(WordNotEqual(LoadMap(elements),
@@ -6420,8 +6699,8 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
BIND(&done);
return;
}
- DCHECK(IsFastSmiOrObjectElementsKind(elements_kind) ||
- IsFastDoubleElementsKind(elements_kind));
+ DCHECK(IsSmiOrObjectElementsKind(elements_kind) ||
+ IsDoubleElementsKind(elements_kind));
Node* length = is_jsarray ? LoadObjectField(object, JSArray::kLengthOffset)
: LoadFixedArrayBaseLength(elements);
@@ -6430,9 +6709,9 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
// In case value is stored into a fast smi array, assure that the value is
// a smi before manipulating the backing store. Otherwise the backing store
// may be left in an invalid state.
- if (IsFastSmiElementsKind(elements_kind)) {
+ if (IsSmiElementsKind(elements_kind)) {
GotoIfNot(TaggedIsSmi(value), bailout);
- } else if (IsFastDoubleElementsKind(elements_kind)) {
+ } else if (IsDoubleElementsKind(elements_kind)) {
value = TryTaggedToFloat64(value, bailout);
}
@@ -6443,7 +6722,7 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
GotoIfNot(UintPtrLessThan(key, length), bailout);
if ((store_mode == STORE_NO_TRANSITION_HANDLE_COW) &&
- IsFastSmiOrObjectElementsKind(elements_kind)) {
+ IsSmiOrObjectElementsKind(elements_kind)) {
elements = CopyElementsOnWrite(object, elements, elements_kind, length,
parameter_mode, bailout);
}
@@ -6460,7 +6739,7 @@ Node* CodeStubAssembler::CheckForCapacityGrow(Node* object, Node* elements,
Label grow_case(this), no_grow_case(this), done(this);
Node* condition;
- if (IsHoleyElementsKind(kind)) {
+ if (IsHoleyOrDictionaryElementsKind(kind)) {
condition = UintPtrGreaterThanOrEqual(key, length);
} else {
condition = WordEqual(key, length);
@@ -6533,9 +6812,8 @@ void CodeStubAssembler::TransitionElementsKind(Node* object, Node* map,
ElementsKind to_kind,
bool is_jsarray,
Label* bailout) {
- DCHECK(!IsFastHoleyElementsKind(from_kind) ||
- IsFastHoleyElementsKind(to_kind));
- if (AllocationSite::GetMode(from_kind, to_kind) == TRACK_ALLOCATION_SITE) {
+ DCHECK(!IsHoleyElementsKind(from_kind) || IsHoleyElementsKind(to_kind));
+ if (AllocationSite::ShouldTrack(from_kind, to_kind)) {
TrapAllocationMemento(object, bailout);
}
@@ -6637,8 +6915,8 @@ Node* CodeStubAssembler::CreateAllocationSiteInFeedbackVector(
StoreMap(site, AllocationSiteMapConstant());
Node* kind = SmiConstant(GetInitialFastElementsKind());
- StoreObjectFieldNoWriteBarrier(site, AllocationSite::kTransitionInfoOffset,
- kind);
+ StoreObjectFieldNoWriteBarrier(
+ site, AllocationSite::kTransitionInfoOrBoilerplateOffset, kind);
// Unlike literals, constructed arrays don't have nested sites
Node* zero = SmiConstant(0);
@@ -6769,6 +7047,8 @@ Node* CodeStubAssembler::BuildFastLoop(
const CodeStubAssembler::VariableList& vars, Node* start_index,
Node* end_index, const FastLoopBody& body, int increment,
ParameterMode parameter_mode, IndexAdvanceMode advance_mode) {
+ CSA_SLOW_ASSERT(this, MatchesParameterMode(start_index, parameter_mode));
+ CSA_SLOW_ASSERT(this, MatchesParameterMode(end_index, parameter_mode));
MachineRepresentation index_rep = (parameter_mode == INTPTR_PARAMETERS)
? MachineType::PointerRepresentation()
: MachineRepresentation::kTaggedSigned;
@@ -6806,6 +7086,10 @@ void CodeStubAssembler::BuildFastFixedArrayForEach(
Node* last_element_exclusive, const FastFixedArrayForEachBody& body,
ParameterMode mode, ForEachDirection direction) {
STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
+ CSA_SLOW_ASSERT(this, MatchesParameterMode(first_element_inclusive, mode));
+ CSA_SLOW_ASSERT(this, MatchesParameterMode(last_element_exclusive, mode));
+ CSA_SLOW_ASSERT(this, Word32Or(IsFixedArrayWithKind(fixed_array, kind),
+ IsPropertyArray(fixed_array)));
int32_t first_val;
bool constant_first = ToInt32Constant(first_element_inclusive, first_val);
int32_t last_val;
@@ -6843,7 +7127,7 @@ void CodeStubAssembler::BuildFastFixedArrayForEach(
FixedArray::kHeaderSize - kHeapObjectTag);
if (direction == ForEachDirection::kReverse) std::swap(start, limit);
- int increment = IsFastDoubleElementsKind(kind) ? kDoubleSize : kPointerSize;
+ int increment = IsDoubleElementsKind(kind) ? kDoubleSize : kPointerSize;
BuildFastLoop(
vars, start, limit,
[fixed_array, &body](Node* offset) { body(fixed_array, offset); },
@@ -6866,6 +7150,7 @@ void CodeStubAssembler::GotoIfFixedArraySizeDoesntFitInNewSpace(
void CodeStubAssembler::InitializeFieldsWithRoot(
Node* object, Node* start_offset, Node* end_offset,
Heap::RootListIndex root_index) {
+ CSA_SLOW_ASSERT(this, TaggedIsNotSmi(object));
start_offset = IntPtrAdd(start_offset, IntPtrConstant(-kHeapObjectTag));
end_offset = IntPtrAdd(end_offset, IntPtrConstant(-kHeapObjectTag));
Node* root_value = LoadRoot(root_index);
@@ -6881,6 +7166,9 @@ void CodeStubAssembler::InitializeFieldsWithRoot(
void CodeStubAssembler::BranchIfNumericRelationalComparison(
RelationalComparisonMode mode, Node* lhs, Node* rhs, Label* if_true,
Label* if_false) {
+ CSA_SLOW_ASSERT(this, IsNumber(lhs));
+ CSA_SLOW_ASSERT(this, IsNumber(rhs));
+
Label end(this);
VARIABLE(result, MachineRepresentation::kTagged);
@@ -6920,7 +7208,7 @@ void CodeStubAssembler::BranchIfNumericRelationalComparison(
BIND(&if_rhsisnotsmi);
{
- CSA_ASSERT(this, IsHeapNumberMap(LoadMap(rhs)));
+ CSA_ASSERT(this, IsHeapNumber(rhs));
// Convert the {lhs} and {rhs} to floating point values, and
// perform a floating point comparison.
var_fcmp_lhs.Bind(SmiToFloat64(lhs));
@@ -6931,7 +7219,7 @@ void CodeStubAssembler::BranchIfNumericRelationalComparison(
BIND(&if_lhsisnotsmi);
{
- CSA_ASSERT(this, IsHeapNumberMap(LoadMap(lhs)));
+ CSA_ASSERT(this, IsHeapNumber(lhs));
// Check if {rhs} is a Smi or a HeapObject.
Label if_rhsissmi(this), if_rhsisnotsmi(this);
@@ -6948,7 +7236,7 @@ void CodeStubAssembler::BranchIfNumericRelationalComparison(
BIND(&if_rhsisnotsmi);
{
- CSA_ASSERT(this, IsHeapNumberMap(LoadMap(rhs)));
+ CSA_ASSERT(this, IsHeapNumber(rhs));
// Convert the {lhs} and {rhs} to floating point values, and
// perform a floating point comparison.
@@ -7055,12 +7343,9 @@ Node* CodeStubAssembler::RelationalComparison(RelationalComparisonMode mode,
BIND(&if_rhsisnotsmi);
{
- // Load the map of {rhs}.
- Node* rhs_map = LoadMap(rhs);
-
// Check if the {rhs} is a HeapNumber.
Label if_rhsisnumber(this), if_rhsisnotnumber(this, Label::kDeferred);
- Branch(IsHeapNumberMap(rhs_map), &if_rhsisnumber, &if_rhsisnotnumber);
+ Branch(IsHeapNumber(rhs), &if_rhsisnumber, &if_rhsisnotnumber);
BIND(&if_rhsisnumber);
{
@@ -7086,8 +7371,7 @@ Node* CodeStubAssembler::RelationalComparison(RelationalComparisonMode mode,
// dedicated ToPrimitive(rhs, hint Number) operation, as the
// ToNumber(rhs) will by itself already invoke ToPrimitive with
// a Number hint.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_rhs.Bind(CallStub(callable, context, rhs));
+ var_rhs.Bind(CallBuiltin(Builtins::kNonNumberToNumber, context, rhs));
Goto(&loop);
}
}
@@ -7132,8 +7416,7 @@ Node* CodeStubAssembler::RelationalComparison(RelationalComparisonMode mode,
// dedicated ToPrimitive(lhs, hint Number) operation, as the
// ToNumber(lhs) will by itself already invoke ToPrimitive with
// a Number hint.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_lhs.Bind(CallStub(callable, context, lhs));
+ var_lhs.Bind(CallBuiltin(Builtins::kNonNumberToNumber, context, lhs));
Goto(&loop);
}
}
@@ -7178,8 +7461,8 @@ Node* CodeStubAssembler::RelationalComparison(RelationalComparisonMode mode,
// dedicated ToPrimitive(rhs, hint Number) operation, as the
// ToNumber(rhs) will by itself already invoke ToPrimitive with
// a Number hint.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_rhs.Bind(CallStub(callable, context, rhs));
+ var_rhs.Bind(
+ CallBuiltin(Builtins::kNonNumberToNumber, context, rhs));
Goto(&loop);
}
}
@@ -7214,26 +7497,23 @@ Node* CodeStubAssembler::RelationalComparison(RelationalComparisonMode mode,
}
switch (mode) {
case kLessThan:
- result.Bind(CallStub(CodeFactory::StringLessThan(isolate()),
- context, lhs, rhs));
+ result.Bind(CallBuiltin(Builtins::kStringLessThan, context,
+ lhs, rhs));
Goto(&end);
break;
case kLessThanOrEqual:
- result.Bind(
- CallStub(CodeFactory::StringLessThanOrEqual(isolate()),
- context, lhs, rhs));
+ result.Bind(CallBuiltin(Builtins::kStringLessThanOrEqual,
+ context, lhs, rhs));
Goto(&end);
break;
case kGreaterThan:
- result.Bind(
- CallStub(CodeFactory::StringGreaterThan(isolate()),
- context, lhs, rhs));
+ result.Bind(CallBuiltin(Builtins::kStringGreaterThan, context,
+ lhs, rhs));
Goto(&end);
break;
case kGreaterThanOrEqual:
- result.Bind(
- CallStub(CodeFactory::StringGreaterThanOrEqual(isolate()),
- context, lhs, rhs));
+ result.Bind(CallBuiltin(Builtins::kStringGreaterThanOrEqual,
+ context, lhs, rhs));
Goto(&end);
break;
}
@@ -7268,9 +7548,8 @@ Node* CodeStubAssembler::RelationalComparison(RelationalComparisonMode mode,
BIND(&if_rhsisnotreceiver);
{
// Convert both {lhs} and {rhs} to Number.
- Callable callable = CodeFactory::ToNumber(isolate());
- var_lhs.Bind(CallStub(callable, context, lhs));
- var_rhs.Bind(CallStub(callable, context, rhs));
+ var_lhs.Bind(CallBuiltin(Builtins::kToNumber, context, lhs));
+ var_rhs.Bind(CallBuiltin(Builtins::kToNumber, context, rhs));
Goto(&loop);
}
}
@@ -7334,9 +7613,8 @@ Node* CodeStubAssembler::RelationalComparison(RelationalComparisonMode mode,
BIND(&if_lhsisnotreceiver);
{
// Convert both {lhs} and {rhs} to Number.
- Callable callable = CodeFactory::ToNumber(isolate());
- var_lhs.Bind(CallStub(callable, context, lhs));
- var_rhs.Bind(CallStub(callable, context, rhs));
+ var_lhs.Bind(CallBuiltin(Builtins::kToNumber, context, lhs));
+ var_rhs.Bind(CallBuiltin(Builtins::kToNumber, context, rhs));
Goto(&loop);
}
}
@@ -7434,9 +7712,12 @@ void CodeStubAssembler::GenerateEqual_Same(Node* value, Label* if_equal,
// Collect type feedback.
Node* instance_type = LoadMapInstanceType(value_map);
- Label if_valueisstring(this), if_valueisnotstring(this);
- Branch(IsStringInstanceType(instance_type), &if_valueisstring,
- &if_valueisnotstring);
+ Label if_valueisstring(this), if_valueisreceiver(this),
+ if_valueissymbol(this), if_valueisother(this, Label::kDeferred);
+ GotoIf(IsStringInstanceType(instance_type), &if_valueisstring);
+ GotoIf(IsJSReceiverInstanceType(instance_type), &if_valueisreceiver);
+ Branch(IsSymbolInstanceType(instance_type), &if_valueissymbol,
+ &if_valueisother);
BIND(&if_valueisstring);
{
@@ -7445,15 +7726,26 @@ void CodeStubAssembler::GenerateEqual_Same(Node* value, Label* if_equal,
Goto(if_equal);
}
- BIND(&if_valueisnotstring);
+ BIND(&if_valueissymbol);
{
- var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kAny));
- GotoIfNot(IsJSReceiverInstanceType(instance_type), if_equal);
+ CombineFeedback(var_type_feedback,
+ SmiConstant(CompareOperationFeedback::kSymbol));
+ Goto(if_equal);
+ }
+ BIND(&if_valueisreceiver);
+ {
CombineFeedback(var_type_feedback,
SmiConstant(CompareOperationFeedback::kReceiver));
Goto(if_equal);
}
+
+ BIND(&if_valueisother);
+ {
+ CombineFeedback(var_type_feedback,
+ SmiConstant(CompareOperationFeedback::kAny));
+ Goto(if_equal);
+ }
} else {
Goto(if_equal);
}
@@ -7688,8 +7980,8 @@ Node* CodeStubAssembler::Equal(Node* lhs, Node* rhs, Node* context,
{
// Both {lhs} and {rhs} are of type String, just do the
// string comparison then.
- Callable callable = CodeFactory::StringEqual(isolate());
- result.Bind(CallStub(callable, context, lhs, rhs));
+ result.Bind(
+ CallBuiltin(Builtins::kStringEqual, context, lhs, rhs));
if (var_type_feedback != nullptr) {
Node* lhs_feedback =
CollectFeedbackForString(lhs_instance_type);
@@ -7840,25 +8132,14 @@ Node* CodeStubAssembler::Equal(Node* lhs, Node* rhs, Node* context,
// The {lhs} is either Null or Undefined; check if the {rhs} is
// undetectable (i.e. either also Null or Undefined or some
// undetectable JSReceiver).
- Node* rhs_bitfield = LoadMapBitField(rhs_map);
- Branch(Word32Equal(
- Word32And(rhs_bitfield,
- Int32Constant(1 << Map::kIsUndetectable)),
- Int32Constant(0)),
- &if_notequal, &if_equal);
+ Branch(IsUndetectableMap(rhs_map), &if_equal, &if_notequal);
}
}
BIND(&if_lhsissymbol);
{
- if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
- }
-
// Check if the {rhs} is a JSReceiver.
Label if_rhsisreceiver(this), if_rhsisnotreceiver(this);
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
Branch(IsJSReceiverInstanceType(rhs_instance_type),
&if_rhsisreceiver, &if_rhsisnotreceiver);
@@ -7868,6 +8149,10 @@ Node* CodeStubAssembler::Equal(Node* lhs, Node* rhs, Node* context,
// Swapping {lhs} and {rhs} is not observable and doesn't
// matter for the result, so we can just swap them and use
// the JSReceiver handling below (for {lhs} being a JSReceiver).
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kAny));
+ }
var_lhs.Bind(rhs);
var_rhs.Bind(lhs);
Goto(&loop);
@@ -7877,7 +8162,27 @@ Node* CodeStubAssembler::Equal(Node* lhs, Node* rhs, Node* context,
{
// The {rhs} is not a JSReceiver and also not the same Symbol
// as the {lhs}, so this is equality check is considered false.
- Goto(&if_notequal);
+ if (var_type_feedback != nullptr) {
+ Label if_rhsissymbol(this), if_rhsisnotsymbol(this);
+ Branch(IsSymbolInstanceType(rhs_instance_type), &if_rhsissymbol,
+ &if_rhsisnotsymbol);
+
+ BIND(&if_rhsissymbol);
+ {
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kSymbol));
+ Goto(&if_notequal);
+ }
+
+ BIND(&if_rhsisnotsymbol);
+ {
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kAny));
+ Goto(&if_notequal);
+ }
+ } else {
+ Goto(&if_notequal);
+ }
}
}
@@ -7915,23 +8220,11 @@ Node* CodeStubAssembler::Equal(Node* lhs, Node* rhs, Node* context,
// a JSReceiver).
Label if_rhsisundetectable(this),
if_rhsisnotundetectable(this, Label::kDeferred);
- Node* rhs_bitfield = LoadMapBitField(rhs_map);
- Branch(Word32Equal(
- Word32And(rhs_bitfield,
- Int32Constant(1 << Map::kIsUndetectable)),
- Int32Constant(0)),
- &if_rhsisnotundetectable, &if_rhsisundetectable);
+ Branch(IsUndetectableMap(rhs_map), &if_rhsisundetectable,
+ &if_rhsisnotundetectable);
BIND(&if_rhsisundetectable);
- {
- // Check if {lhs} is an undetectable JSReceiver.
- Node* lhs_bitfield = LoadMapBitField(lhs_map);
- Branch(Word32Equal(
- Word32And(lhs_bitfield,
- Int32Constant(1 << Map::kIsUndetectable)),
- Int32Constant(0)),
- &if_notequal, &if_equal);
- }
+ Branch(IsUndetectableMap(lhs_map), &if_equal, &if_notequal);
BIND(&if_rhsisnotundetectable);
{
@@ -7950,8 +8243,7 @@ Node* CodeStubAssembler::Equal(Node* lhs, Node* rhs, Node* context,
BIND(&do_rhsstringtonumber);
{
- Callable callable = CodeFactory::StringToNumber(isolate());
- var_rhs.Bind(CallStub(callable, context, rhs));
+ var_rhs.Bind(CallBuiltin(Builtins::kStringToNumber, context, rhs));
Goto(&loop);
}
}
@@ -7994,7 +8286,7 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
// if (!lhs->IsSmi()) {
// if (lhs->IsHeapNumber()) {
// if (rhs->IsSmi()) {
- // return Smi::cast(rhs)->value() == HeapNumber::cast(lhs)->value();
+ // return Smi::ToInt(rhs) == HeapNumber::cast(lhs)->value();
// } else if (rhs->IsHeapNumber()) {
// return HeapNumber::cast(rhs)->value() ==
// HeapNumber::cast(lhs)->value();
@@ -8021,7 +8313,7 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
// return false;
// } else {
// if (rhs->IsHeapNumber()) {
- // return Smi::cast(lhs)->value() == HeapNumber::cast(rhs)->value();
+ // return Smi::ToInt(lhs) == HeapNumber::cast(rhs)->value();
// } else {
// return false;
// }
@@ -8149,7 +8441,6 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
BIND(&if_rhsisstring);
{
- Callable callable = CodeFactory::StringEqual(isolate());
if (var_type_feedback != nullptr) {
Node* lhs_feedback =
CollectFeedbackForString(lhs_instance_type);
@@ -8157,7 +8448,8 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
CollectFeedbackForString(rhs_instance_type);
var_type_feedback->Bind(SmiOr(lhs_feedback, rhs_feedback));
}
- result.Bind(CallStub(callable, NoContextConstant(), lhs, rhs));
+ result.Bind(CallBuiltin(Builtins::kStringEqual,
+ NoContextConstant(), lhs, rhs));
Goto(&end);
}
@@ -8167,14 +8459,31 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
BIND(&if_lhsisnotstring);
if (var_type_feedback != nullptr) {
- GotoIfNot(IsJSReceiverInstanceType(lhs_instance_type),
- &if_notequal);
- GotoIfNot(IsJSReceiverInstanceType(rhs_instance_type),
- &if_notequal);
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kReceiver));
+ Label if_lhsissymbol(this), if_lhsisreceiver(this);
+ GotoIf(IsJSReceiverInstanceType(lhs_instance_type),
+ &if_lhsisreceiver);
+ Branch(IsSymbolInstanceType(lhs_instance_type), &if_lhsissymbol,
+ &if_notequal);
+
+ BIND(&if_lhsisreceiver);
+ {
+ GotoIfNot(IsJSReceiverInstanceType(rhs_instance_type),
+ &if_notequal);
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kReceiver));
+ Goto(&if_notequal);
+ }
+
+ BIND(&if_lhsissymbol);
+ {
+ GotoIfNot(IsSymbolInstanceType(rhs_instance_type), &if_notequal);
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kSymbol));
+ Goto(&if_notequal);
+ }
+ } else {
+ Goto(&if_notequal);
}
- Goto(&if_notequal);
}
}
}
@@ -8604,8 +8913,8 @@ Node* CodeStubAssembler::InstanceOf(Node* object, Node* callable,
GotoIfNot(IsCallable(callable), &if_notcallable);
// Use the OrdinaryHasInstance algorithm.
- Node* result = CallStub(CodeFactory::OrdinaryHasInstance(isolate()),
- context, callable, object);
+ Node* result =
+ CallBuiltin(Builtins::kOrdinaryHasInstance, context, callable, object);
var_result.Bind(result);
Goto(&return_result);
}
@@ -8643,7 +8952,7 @@ Node* CodeStubAssembler::NumberInc(Node* value) {
BIND(&if_issmi);
{
// Try fast Smi addition first.
- Node* one = SmiConstant(Smi::FromInt(1));
+ Node* one = SmiConstant(1);
Node* pair = IntPtrAddWithOverflow(BitcastTaggedToWord(value),
BitcastTaggedToWord(one));
Node* overflow = Projection(1, pair);
@@ -8665,8 +8974,7 @@ Node* CodeStubAssembler::NumberInc(Node* value) {
BIND(&if_isnotsmi);
{
- // Check if the value is a HeapNumber.
- CSA_ASSERT(this, IsHeapNumberMap(LoadMap(value)));
+ CSA_ASSERT(this, IsHeapNumber(value));
// Load the HeapNumber value.
var_finc_value.Bind(LoadHeapNumberValue(value));
@@ -8695,7 +9003,7 @@ Node* CodeStubAssembler::NumberDec(Node* value) {
BIND(&if_issmi);
{
// Try fast Smi addition first.
- Node* one = SmiConstant(Smi::FromInt(1));
+ Node* one = SmiConstant(1);
Node* pair = IntPtrSubWithOverflow(BitcastTaggedToWord(value),
BitcastTaggedToWord(one));
Node* overflow = Projection(1, pair);
@@ -8717,8 +9025,7 @@ Node* CodeStubAssembler::NumberDec(Node* value) {
BIND(&if_isnotsmi);
{
- // Check if the value is a HeapNumber.
- CSA_ASSERT(this, IsHeapNumberMap(LoadMap(value)));
+ CSA_ASSERT(this, IsHeapNumber(value));
// Load the HeapNumber value.
var_fdec_value.Bind(LoadHeapNumberValue(value));
@@ -8741,15 +9048,13 @@ Node* CodeStubAssembler::NumberDec(Node* value) {
void CodeStubAssembler::GotoIfNotNumber(Node* input, Label* is_not_number) {
Label is_number(this);
GotoIf(TaggedIsSmi(input), &is_number);
- Node* input_map = LoadMap(input);
- Branch(IsHeapNumberMap(input_map), &is_number, is_not_number);
+ Branch(IsHeapNumber(input), &is_number, is_not_number);
BIND(&is_number);
}
void CodeStubAssembler::GotoIfNumber(Node* input, Label* is_number) {
GotoIf(TaggedIsSmi(input), is_number);
- Node* input_map = LoadMap(input);
- GotoIf(IsHeapNumberMap(input_map), is_number);
+ GotoIf(IsHeapNumber(input), is_number);
}
Node* CodeStubAssembler::CreateArrayIterator(Node* array, Node* array_map,
@@ -8862,11 +9167,10 @@ Node* CodeStubAssembler::CreateArrayIterator(Node* array, Node* array_map,
// here, and take the slow path if any fail.
Node* protector_cell = LoadRoot(Heap::kArrayProtectorRootIndex);
DCHECK(isolate()->heap()->array_protector()->IsPropertyCell());
- GotoIfNot(
- WordEqual(
- LoadObjectField(protector_cell, PropertyCell::kValueOffset),
- SmiConstant(Smi::FromInt(Isolate::kProtectorValid))),
- &if_isslow);
+ GotoIfNot(WordEqual(LoadObjectField(protector_cell,
+ PropertyCell::kValueOffset),
+ SmiConstant(Isolate::kProtectorValid)),
+ &if_isslow);
Node* native_context = LoadNativeContext(context);
@@ -8945,19 +9249,71 @@ Node* CodeStubAssembler::AllocateJSArrayIterator(Node* array, Node* array_map,
Node* map) {
Node* iterator = Allocate(JSArrayIterator::kSize);
StoreMapNoWriteBarrier(iterator, map);
- StoreObjectFieldRoot(iterator, JSArrayIterator::kPropertiesOffset,
+ StoreObjectFieldRoot(iterator, JSArrayIterator::kPropertiesOrHashOffset,
Heap::kEmptyFixedArrayRootIndex);
StoreObjectFieldRoot(iterator, JSArrayIterator::kElementsOffset,
Heap::kEmptyFixedArrayRootIndex);
StoreObjectFieldNoWriteBarrier(iterator,
JSArrayIterator::kIteratedObjectOffset, array);
StoreObjectFieldNoWriteBarrier(iterator, JSArrayIterator::kNextIndexOffset,
- SmiConstant(Smi::FromInt(0)));
+ SmiConstant(0));
StoreObjectFieldNoWriteBarrier(
iterator, JSArrayIterator::kIteratedObjectMapOffset, array_map);
return iterator;
}
+Node* CodeStubAssembler::AllocateJSIteratorResult(Node* context, Node* value,
+ Node* done) {
+ CSA_ASSERT(this, IsBoolean(done));
+ Node* native_context = LoadNativeContext(context);
+ Node* map =
+ LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
+ Node* result = Allocate(JSIteratorResult::kSize);
+ StoreMapNoWriteBarrier(result, map);
+ StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOrHashOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldRoot(result, JSIteratorResult::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kValueOffset, value);
+ StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kDoneOffset, done);
+ return result;
+}
+
+Node* CodeStubAssembler::AllocateJSIteratorResultForEntry(Node* context,
+ Node* key,
+ Node* value) {
+ Node* native_context = LoadNativeContext(context);
+ Node* length = SmiConstant(2);
+ int const elements_size = FixedArray::SizeFor(2);
+ Node* elements =
+ Allocate(elements_size + JSArray::kSize + JSIteratorResult::kSize);
+ StoreObjectFieldRoot(elements, FixedArray::kMapOffset,
+ Heap::kFixedArrayMapRootIndex);
+ StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset, length);
+ StoreFixedArrayElement(elements, 0, key);
+ StoreFixedArrayElement(elements, 1, value);
+ Node* array_map = LoadContextElement(
+ native_context, Context::JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX);
+ Node* array = InnerAllocate(elements, elements_size);
+ StoreMapNoWriteBarrier(array, array_map);
+ StoreObjectFieldRoot(array, JSArray::kPropertiesOrHashOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldNoWriteBarrier(array, JSArray::kElementsOffset, elements);
+ StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
+ Node* iterator_map =
+ LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
+ Node* result = InnerAllocate(array, JSArray::kSize);
+ StoreMapNoWriteBarrier(result, iterator_map);
+ StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOrHashOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldRoot(result, JSIteratorResult::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kValueOffset, array);
+ StoreObjectFieldRoot(result, JSIteratorResult::kDoneOffset,
+ Heap::kFalseValueRootIndex);
+ return result;
+}
+
Node* CodeStubAssembler::TypedArraySpeciesCreateByLength(Node* context,
Node* originalArray,
Node* len) {
@@ -8975,21 +9331,23 @@ Node* CodeStubAssembler::IsDetachedBuffer(Node* buffer) {
return IsSetWord32<JSArrayBuffer::WasNeutered>(buffer_bit_field);
}
-CodeStubArguments::CodeStubArguments(CodeStubAssembler* assembler, Node* argc,
- Node* fp,
- CodeStubAssembler::ParameterMode mode)
+CodeStubArguments::CodeStubArguments(
+ CodeStubAssembler* assembler, Node* argc, Node* fp,
+ CodeStubAssembler::ParameterMode param_mode, ReceiverMode receiver_mode)
: assembler_(assembler),
- argc_mode_(mode),
+ argc_mode_(param_mode),
+ receiver_mode_(receiver_mode),
argc_(argc),
arguments_(nullptr),
fp_(fp != nullptr ? fp : assembler_->LoadFramePointer()) {
Node* offset = assembler_->ElementOffsetFromIndex(
- argc_, FAST_ELEMENTS, mode,
+ argc_, PACKED_ELEMENTS, param_mode,
(StandardFrameConstants::kFixedSlotCountAboveFp - 1) * kPointerSize);
arguments_ = assembler_->IntPtrAdd(fp_, offset);
}
Node* CodeStubArguments::GetReceiver() const {
+ DCHECK_EQ(receiver_mode_, ReceiverMode::kHasReceiver);
return assembler_->Load(MachineType::AnyTagged(), arguments_,
assembler_->IntPtrConstant(kPointerSize));
}
@@ -8999,8 +9357,8 @@ Node* CodeStubArguments::AtIndexPtr(
typedef compiler::Node Node;
Node* negated_index = assembler_->IntPtrOrSmiSub(
assembler_->IntPtrOrSmiConstant(0, mode), index, mode);
- Node* offset =
- assembler_->ElementOffsetFromIndex(negated_index, FAST_ELEMENTS, mode, 0);
+ Node* offset = assembler_->ElementOffsetFromIndex(negated_index,
+ PACKED_ELEMENTS, mode, 0);
return assembler_->IntPtrAdd(arguments_, offset);
}
@@ -9052,10 +9410,10 @@ void CodeStubArguments::ForEach(
}
Node* start = assembler_->IntPtrSub(
arguments_,
- assembler_->ElementOffsetFromIndex(first, FAST_ELEMENTS, mode));
+ assembler_->ElementOffsetFromIndex(first, PACKED_ELEMENTS, mode));
Node* end = assembler_->IntPtrSub(
arguments_,
- assembler_->ElementOffsetFromIndex(last, FAST_ELEMENTS, mode));
+ assembler_->ElementOffsetFromIndex(last, PACKED_ELEMENTS, mode));
assembler_->BuildFastLoop(vars, start, end,
[this, &body](Node* current) {
Node* arg = assembler_->Load(
@@ -9067,8 +9425,14 @@ void CodeStubArguments::ForEach(
}
void CodeStubArguments::PopAndReturn(Node* value) {
- assembler_->PopAndReturn(
- assembler_->IntPtrAdd(argc_, assembler_->IntPtrConstant(1)), value);
+ Node* pop_count;
+ if (receiver_mode_ == ReceiverMode::kHasReceiver) {
+ pop_count = assembler_->IntPtrOrSmiAdd(
+ argc_, assembler_->IntPtrOrSmiConstant(1, argc_mode_), argc_mode_);
+ } else {
+ pop_count = argc_;
+ }
+ assembler_->PopAndReturn(pop_count, value);
}
Node* CodeStubAssembler::IsFastElementsKind(Node* elements_kind) {
@@ -9079,13 +9443,10 @@ Node* CodeStubAssembler::IsFastElementsKind(Node* elements_kind) {
Node* CodeStubAssembler::IsHoleyFastElementsKind(Node* elements_kind) {
CSA_ASSERT(this, IsFastElementsKind(elements_kind));
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == (FAST_SMI_ELEMENTS | 1));
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == (FAST_ELEMENTS | 1));
- STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == (FAST_DOUBLE_ELEMENTS | 1));
-
- // Check prototype chain if receiver does not have packed elements.
- Node* holey_elements = Word32And(elements_kind, Int32Constant(1));
- return Word32Equal(holey_elements, Int32Constant(1));
+ STATIC_ASSERT(HOLEY_SMI_ELEMENTS == (PACKED_SMI_ELEMENTS | 1));
+ STATIC_ASSERT(HOLEY_ELEMENTS == (PACKED_ELEMENTS | 1));
+ STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS == (PACKED_DOUBLE_ELEMENTS | 1));
+ return IsSetWord32(elements_kind, 1);
}
Node* CodeStubAssembler::IsElementsKindGreaterThan(
@@ -9112,6 +9473,8 @@ Node* CodeStubAssembler::IsPromiseHookEnabledOrDebugIsActive() {
Node* CodeStubAssembler::AllocateFunctionWithMapAndContext(Node* map,
Node* shared_info,
Node* context) {
+ CSA_SLOW_ASSERT(this, IsMap(map));
+
Node* const code = BitcastTaggedToWord(
LoadObjectField(shared_info, SharedFunctionInfo::kCodeOffset));
Node* const code_entry =
@@ -9119,7 +9482,7 @@ Node* CodeStubAssembler::AllocateFunctionWithMapAndContext(Node* map,
Node* const fun = Allocate(JSFunction::kSize);
StoreMapNoWriteBarrier(fun, map);
- StoreObjectFieldRoot(fun, JSObject::kPropertiesOffset,
+ StoreObjectFieldRoot(fun, JSObject::kPropertiesOrHashOffset,
Heap::kEmptyFixedArrayRootIndex);
StoreObjectFieldRoot(fun, JSObject::kElementsOffset,
Heap::kEmptyFixedArrayRootIndex);
@@ -9176,9 +9539,8 @@ void CodeStubAssembler::Print(const char* s) {
#ifdef DEBUG
std::string formatted(s);
formatted += "\n";
- Handle<String> string = isolate()->factory()->NewStringFromAsciiChecked(
- formatted.c_str(), TENURED);
- CallRuntime(Runtime::kGlobalPrint, NoContextConstant(), HeapConstant(string));
+ CallRuntime(Runtime::kGlobalPrint, NoContextConstant(),
+ StringConstant(formatted.c_str()));
#endif
}
diff --git a/deps/v8/src/code-stub-assembler.h b/deps/v8/src/code-stub-assembler.h
index 5b94e3ac6e..eb7b5c006b 100644
--- a/deps/v8/src/code-stub-assembler.h
+++ b/deps/v8/src/code-stub-assembler.h
@@ -27,10 +27,8 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(AllocationSiteMap, AllocationSiteMap) \
V(BooleanMap, BooleanMap) \
V(CodeMap, CodeMap) \
- V(empty_string, EmptyString) \
- V(length_string, LengthString) \
- V(prototype_string, PrototypeString) \
V(EmptyFixedArray, EmptyFixedArray) \
+ V(empty_string, EmptyString) \
V(EmptyWeakCell, EmptyWeakCell) \
V(FalseValue, False) \
V(FeedbackVectorMap, FeedbackVectorMap) \
@@ -38,15 +36,20 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(FixedCOWArrayMap, FixedCOWArrayMap) \
V(FixedDoubleArrayMap, FixedDoubleArrayMap) \
V(FunctionTemplateInfoMap, FunctionTemplateInfoMap) \
+ V(GlobalPropertyCellMap, PropertyCellMap) \
V(has_instance_symbol, HasInstanceSymbol) \
V(HeapNumberMap, HeapNumberMap) \
- V(NoClosuresCellMap, NoClosuresCellMap) \
- V(OneClosureCellMap, OneClosureCellMap) \
+ V(length_string, LengthString) \
V(ManyClosuresCellMap, ManyClosuresCellMap) \
+ V(MetaMap, MetaMap) \
V(MinusZeroValue, MinusZero) \
+ V(MutableHeapNumberMap, MutableHeapNumberMap) \
V(NanValue, Nan) \
+ V(NoClosuresCellMap, NoClosuresCellMap) \
V(NullValue, Null) \
- V(GlobalPropertyCellMap, PropertyCellMap) \
+ V(OneClosureCellMap, OneClosureCellMap) \
+ V(prototype_string, PrototypeString) \
+ V(SpeciesProtector, SpeciesProtector) \
V(SymbolMap, SymbolMap) \
V(TheHoleValue, TheHole) \
V(TrueValue, True) \
@@ -54,7 +57,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(Tuple3Map, Tuple3Map) \
V(UndefinedValue, Undefined) \
V(WeakCellMap, WeakCellMap) \
- V(SpeciesProtector, SpeciesProtector)
+ V(SharedFunctionInfoMap, SharedFunctionInfoMap)
// Provides JavaScript-specific "macro-assembler" functionality on top of the
// CodeAssembler. By factoring the JavaScript-isms out of the CodeAssembler,
@@ -106,6 +109,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
return value;
}
+ Node* Word32ToParameter(Node* value, ParameterMode mode) {
+ return WordToParameter(ChangeUint32ToWord(value), mode);
+ }
+
Node* ParameterToTagged(Node* value, ParameterMode mode) {
if (mode != SMI_PARAMETERS) value = SmiTag(value);
return value;
@@ -116,6 +123,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
return value;
}
+ Node* MatchesParameterMode(Node* value, ParameterMode mode);
+
#define PARAMETER_BINOP(OpName, IntPtrOpName, SmiOpName) \
Node* OpName(Node* a, Node* b, ParameterMode mode) { \
if (mode == SMI_PARAMETERS) { \
@@ -144,7 +153,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
HEAP_CONSTANT_LIST(HEAP_CONSTANT_ACCESSOR)
#undef HEAP_CONSTANT_ACCESSOR
-#define HEAP_CONSTANT_TEST(rootName, name) Node* Is##name(Node* value);
+#define HEAP_CONSTANT_TEST(rootName, name) \
+ Node* Is##name(Node* value); \
+ Node* IsNot##name(Node* value);
HEAP_CONSTANT_LIST(HEAP_CONSTANT_TEST)
#undef HEAP_CONSTANT_TEST
@@ -407,13 +418,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Load the constructor of a Map (equivalent to
// Map::GetConstructor()).
Node* LoadMapConstructor(Node* map);
- // Loads a value from the specially encoded integer fields in the
- // SharedFunctionInfo object.
- // TODO(danno): This currently only works for the integer fields that are
- // mapped to the upper part of 64-bit words. We should customize
- // SFI::BodyDescriptor and store int32 values directly.
- Node* LoadSharedFunctionInfoSpecialField(Node* shared, int offset,
- ParameterMode param_mode);
// Check if the map is set for slow properties.
Node* IsDictionaryMap(Node* map);
@@ -435,6 +439,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* LoadWeakCellValueUnchecked(Node* weak_cell);
Node* LoadWeakCellValue(Node* weak_cell, Label* if_cleared = nullptr);
+ // Get the offset of an element in a fixed array.
+ Node* GetFixedArrayElementOffset(
+ Node* index_node, int additional_offset = 0,
+ ParameterMode parameter_mode = INTPTR_PARAMETERS);
+
// Load an array element from a FixedArray.
Node* LoadFixedArrayElement(Node* object, Node* index,
int additional_offset = 0,
@@ -541,6 +550,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void StoreFieldsNoWriteBarrier(Node* start_address, Node* end_address,
Node* value);
+ Node* AllocateCellWithValue(Node* value,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ Node* AllocateSmiCell(int value = 0) {
+ return AllocateCellWithValue(SmiConstant(value), SKIP_WRITE_BARRIER);
+ }
+
+ Node* LoadCellValue(Node* cell);
+
+ Node* StoreCellValue(Node* cell, Node* value,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
// Allocate a HeapNumber without initializing its value.
Node* AllocateHeapNumber(MutableMode mode = IMMUTABLE);
// Allocate a HeapNumber with a specific value.
@@ -623,11 +643,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
ParameterMode mode = INTPTR_PARAMETERS,
AllocationFlags flags = kNone);
+ Node* AllocatePropertyArray(Node* capacity,
+ ParameterMode mode = INTPTR_PARAMETERS,
+ AllocationFlags flags = kNone);
// Perform CreateArrayIterator (ES6 #sec-createarrayiterator).
Node* CreateArrayIterator(Node* array, Node* array_map, Node* array_type,
Node* context, IterationKind mode);
Node* AllocateJSArrayIterator(Node* array, Node* array_map, Node* map);
+ Node* AllocateJSIteratorResult(Node* context, Node* value, Node* done);
+ Node* AllocateJSIteratorResultForEntry(Node* context, Node* key, Node* value);
Node* TypedArraySpeciesCreateByLength(Node* context, Node* originalArray,
Node* len);
@@ -637,6 +662,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Heap::RootListIndex value_root_index,
ParameterMode mode = INTPTR_PARAMETERS);
+ void FillPropertyArrayWithUndefined(Node* array, Node* from_index,
+ Node* to_index,
+ ParameterMode mode = INTPTR_PARAMETERS);
+
+ void CopyPropertyArrayValues(
+ Node* from_array, Node* to_array, Node* length,
+ WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
+ ParameterMode mode = INTPTR_PARAMETERS);
+
// Copies all elements from |from_array| of |length| size to
// |to_array| of the same size respecting the elements kind.
void CopyFixedArrayElements(
@@ -708,7 +742,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Allocation site manipulation
void InitializeAllocationMemento(Node* base_allocation,
- int base_allocation_size,
+ Node* base_allocation_size,
Node* allocation_site);
Node* TryTaggedToFloat64(Node* value, Label* if_valueisnotnumber);
@@ -736,65 +770,88 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* ToThisValue(Node* context, Node* value, PrimitiveType primitive_type,
char const* method_name);
+ // Throws a TypeError for {method_name}. Terminates the current block.
+ void ThrowIncompatibleMethodReceiver(Node* context, char const* method_name,
+ Node* receiver);
+
// Throws a TypeError for {method_name} if {value} is not of the given
// instance type. Returns {value}'s map.
Node* ThrowIfNotInstanceType(Node* context, Node* value,
InstanceType instance_type,
char const* method_name);
+ void ThrowTypeError(Node* context, MessageTemplate::Template message,
+ char const* arg0 = nullptr, char const* arg1 = nullptr);
+ void ThrowTypeError(Node* context, MessageTemplate::Template message,
+ Node* arg0, Node* arg1 = nullptr, Node* arg2 = nullptr);
// Type checks.
// Check whether the map is for an object with special properties, such as a
// JSProxy or an object with interceptors.
Node* InstanceTypeEqual(Node* instance_type, int type);
- Node* IsSpecialReceiverMap(Node* map);
- Node* IsSpecialReceiverInstanceType(Node* instance_type);
- Node* IsStringInstanceType(Node* instance_type);
- Node* IsOneByteStringInstanceType(Node* instance_type);
- Node* IsExternalStringInstanceType(Node* instance_type);
- Node* IsShortExternalStringInstanceType(Node* instance_type);
- Node* IsSequentialStringInstanceType(Node* instance_type);
+ Node* IsAccessorInfo(Node* object);
+ Node* IsAccessorPair(Node* object);
+ Node* IsAllocationSite(Node* object);
+ Node* IsAnyHeapNumber(Node* object);
+ Node* IsBoolean(Node* object);
+ Node* IsCallableMap(Node* map);
+ Node* IsCallable(Node* object);
Node* IsConsStringInstanceType(Node* instance_type);
+ Node* IsConstructorMap(Node* map);
+ Node* IsConstructor(Node* object);
+ Node* IsDeprecatedMap(Node* map);
+ Node* IsDictionary(Node* object);
+ Node* IsExternalStringInstanceType(Node* instance_type);
+ Node* IsFeedbackVector(Node* object);
+ Node* IsFixedArray(Node* object);
+ Node* IsFixedArrayWithKind(Node* object, ElementsKind kind);
+ Node* IsFixedArrayWithKindOrEmpty(Node* object, ElementsKind kind);
+ Node* IsFixedDoubleArray(Node* object);
+ Node* IsFixedTypedArray(Node* object);
+ Node* IsZeroOrFixedArray(Node* object);
+ Node* IsHashTable(Node* object);
+ Node* IsHeapNumber(Node* object);
Node* IsIndirectStringInstanceType(Node* instance_type);
- Node* IsString(Node* object);
+ Node* IsJSArrayBuffer(Node* object);
+ Node* IsJSArrayInstanceType(Node* instance_type);
+ Node* IsJSArrayMap(Node* object);
+ Node* IsJSArray(Node* object);
+ Node* IsJSFunctionInstanceType(Node* instance_type);
+ Node* IsJSFunctionMap(Node* object);
+ Node* IsJSFunction(Node* object);
+ Node* IsJSGlobalProxy(Node* object);
Node* IsJSObjectMap(Node* map);
Node* IsJSObject(Node* object);
- Node* IsJSGlobalProxy(Node* object);
+ Node* IsJSProxy(Node* object);
Node* IsJSReceiverInstanceType(Node* instance_type);
- Node* IsJSReceiver(Node* object);
Node* IsJSReceiverMap(Node* map);
- Node* IsMap(Node* object);
- Node* IsCallableMap(Node* map);
- Node* IsDeprecatedMap(Node* map);
- Node* IsCallable(Node* object);
- Node* IsBoolean(Node* object);
- Node* IsPropertyCell(Node* object);
- Node* IsAccessorInfo(Node* object);
- Node* IsAccessorPair(Node* object);
- Node* IsHeapNumber(Node* object);
- Node* IsName(Node* object);
- Node* IsSymbol(Node* object);
- Node* IsPrivateSymbol(Node* object);
+ Node* IsJSReceiver(Node* object);
+ Node* IsJSRegExp(Node* object);
+ Node* IsJSTypedArray(Node* object);
Node* IsJSValueInstanceType(Node* instance_type);
- Node* IsJSValue(Node* object);
Node* IsJSValueMap(Node* map);
- Node* IsJSArrayInstanceType(Node* instance_type);
- Node* IsJSArray(Node* object);
- Node* IsJSArrayMap(Node* object);
+ Node* IsJSValue(Node* object);
+ Node* IsMap(Node* object);
+ Node* IsMutableHeapNumber(Node* object);
+ Node* IsName(Node* object);
Node* IsNativeContext(Node* object);
- Node* IsWeakCell(Node* object);
- Node* IsFixedDoubleArray(Node* object);
- Node* IsHashTable(Node* object);
- Node* IsDictionary(Node* object);
+ Node* IsOneByteStringInstanceType(Node* instance_type);
+ Node* IsPrivateSymbol(Node* object);
+ Node* IsPropertyArray(Node* object);
+ Node* IsPropertyCell(Node* object);
+ Node* IsSequentialStringInstanceType(Node* instance_type);
+ inline Node* IsSharedFunctionInfo(Node* object) {
+ return IsSharedFunctionInfoMap(LoadMap(object));
+ }
+ Node* IsShortExternalStringInstanceType(Node* instance_type);
+ Node* IsSpecialReceiverInstanceType(Node* instance_type);
+ Node* IsSpecialReceiverMap(Node* map);
+ Node* IsStringInstanceType(Node* instance_type);
+ Node* IsString(Node* object);
+ Node* IsSymbolInstanceType(Node* instance_type);
+ Node* IsSymbol(Node* object);
Node* IsUnseededNumberDictionary(Node* object);
- Node* IsConstructorMap(Node* map);
- Node* IsJSFunctionInstanceType(Node* instance_type);
- Node* IsJSFunction(Node* object);
- Node* IsJSFunctionMap(Node* object);
- Node* IsJSTypedArray(Node* object);
- Node* IsJSArrayBuffer(Node* object);
- Node* IsFixedTypedArray(Node* object);
- Node* IsJSRegExp(Node* object);
- Node* IsFeedbackVector(Node* object);
+ Node* IsWeakCell(Node* object);
+ Node* IsUndetectableMap(Node* map);
// True iff |object| is a Smi or a HeapNumber.
Node* IsNumber(Node* object);
@@ -830,20 +887,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* StringAdd(Node* context, Node* first, Node* second,
AllocationFlags flags = kNone);
- // Unpack the external string, returning a pointer that (offset-wise) looks
- // like a sequential string.
- // Note that this pointer is not tagged and does not point to a real
- // sequential string instance, and may only be used to access the string
- // data. The pointer is GC-safe as long as a reference to the container
- // ExternalString is live.
- // |string| must be an external string. Bailout for short external strings.
- Node* TryDerefExternalString(Node* const string, Node* const instance_type,
- Label* if_bailout);
-
+ // Check if |string| is an indirect (thin or flat cons) string type that can
+ // be dereferenced by DerefIndirectString.
+ void BranchIfCanDerefIndirectString(Node* string, Node* instance_type,
+ Label* can_deref, Label* cannot_deref);
+ // Unpack an indirect (thin or flat cons) string type.
+ void DerefIndirectString(Variable* var_string, Node* instance_type);
// Check if |var_string| has an indirect (thin or flat cons) string type,
// and unpack it if so.
void MaybeDerefIndirectString(Variable* var_string, Node* instance_type,
- Variable* var_did_something);
+ Label* did_deref, Label* cannot_deref);
// Check if |var_left| or |var_right| has an indirect (thin or flat cons)
// string type, and unpack it/them if so. Fall through if nothing was done.
void MaybeDerefIndirectStrings(Variable* var_left, Node* left_instance_type,
@@ -988,6 +1041,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void Increment(Variable& variable, int value = 1,
ParameterMode mode = INTPTR_PARAMETERS);
+ void Decrement(Variable& variable, int value = 1,
+ ParameterMode mode = INTPTR_PARAMETERS) {
+ Increment(variable, -value, mode);
+ }
// Generates "if (false) goto label" code. Useful for marking a label as
// "live" to avoid assertion failures during graph building. In the resulting
@@ -1112,6 +1169,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// {if_not_found}.
static const int kInlinedDictionaryProbes = 4;
enum LookupMode { kFindExisting, kFindInsertionIndex };
+
+ template <typename Dictionary>
+ Node* LoadName(Node* key);
+
template <typename Dictionary>
void NameDictionaryLookup(Node* dictionary, Node* unique_name,
Label* if_found, Variable* var_name_index,
@@ -1119,6 +1180,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
int inlined_probes = kInlinedDictionaryProbes,
LookupMode mode = kFindExisting);
+ Node* ComputeIntegerHash(Node* key);
Node* ComputeIntegerHash(Node* key, Node* seed);
template <typename Dictionary>
@@ -1225,6 +1287,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Label* if_end, Label* if_bailout);
// Instanceof helpers.
+ // Returns true if {object} has {prototype} somewhere in it's prototype
+ // chain, otherwise false is returned. Might cause arbitrary side effects
+ // due to [[GetPrototypeOf]] invocations.
+ Node* HasInPrototypeChain(Node* context, Node* object, Node* prototype);
// ES6 section 7.3.19 OrdinaryHasInstance (C, O)
Node* OrdinaryHasInstance(Node* context, Node* callable, Node* object);
@@ -1232,7 +1298,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* LoadFeedbackVectorForStub();
// Update the type feedback vector.
- void UpdateFeedback(Node* feedback, Node* feedback_vector, Node* slot_id);
+ void UpdateFeedback(Node* feedback, Node* feedback_vector, Node* slot_id,
+ Node* function);
// Combine the new feedback with the existing_feedback.
void CombineFeedback(Variable* existing_feedback, Node* feedback);
@@ -1350,6 +1417,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
FixedArray::kHeaderSize);
}
+ Node* GetPropertyArrayAllocationSize(Node* element_count,
+ ParameterMode mode) {
+ return GetArrayAllocationSize(element_count, PACKED_ELEMENTS, mode,
+ PropertyArray::kHeaderSize);
+ }
+
void GotoIfFixedArraySizeDoesntFitInNewSpace(Node* element_count,
Label* doesnt_fit, int base_size,
ParameterMode mode);
@@ -1440,6 +1513,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
UndefinedConstant(), SmiConstant(message), args...);
}
+ void Abort(BailoutReason reason) {
+ CallRuntime(Runtime::kAbort, NoContextConstant(), SmiConstant(reason));
+ Unreachable();
+ }
+
protected:
void DescriptorLookup(Node* unique_name, Node* descriptors, Node* bitfield3,
Label* if_found, Variable* var_name_index,
@@ -1516,15 +1594,21 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
class CodeStubArguments {
public:
typedef compiler::Node Node;
+ enum ReceiverMode { kHasReceiver, kNoReceiver };
// |argc| is an intptr value which specifies the number of arguments passed
- // to the builtin excluding the receiver.
- CodeStubArguments(CodeStubAssembler* assembler, Node* argc)
+ // to the builtin excluding the receiver. The arguments will include a
+ // receiver iff |receiver_mode| is kHasReceiver.
+ CodeStubArguments(CodeStubAssembler* assembler, Node* argc,
+ ReceiverMode receiver_mode = ReceiverMode::kHasReceiver)
: CodeStubArguments(assembler, argc, nullptr,
- CodeStubAssembler::INTPTR_PARAMETERS) {}
- // |argc| is either a smi or intptr depending on |param_mode|
+ CodeStubAssembler::INTPTR_PARAMETERS, receiver_mode) {
+ }
+ // |argc| is either a smi or intptr depending on |param_mode|. The arguments
+ // include a receiver iff |receiver_mode| is kHasReceiver.
CodeStubArguments(CodeStubAssembler* assembler, Node* argc, Node* fp,
- CodeStubAssembler::ParameterMode param_mode);
+ CodeStubAssembler::ParameterMode param_mode,
+ ReceiverMode receiver_mode = ReceiverMode::kHasReceiver);
Node* GetReceiver() const;
@@ -1537,6 +1621,9 @@ class CodeStubArguments {
Node* AtIndex(int index) const;
+ Node* GetOptionalArgumentValue(int index) {
+ return GetOptionalArgumentValue(index, assembler_->UndefinedConstant());
+ }
Node* GetOptionalArgumentValue(int index, Node* default_value);
Node* GetLength() const { return argc_; }
@@ -1564,6 +1651,7 @@ class CodeStubArguments {
CodeStubAssembler* assembler_;
CodeStubAssembler::ParameterMode argc_mode_;
+ ReceiverMode receiver_mode_;
Node* argc_;
Node* arguments_;
Node* fp_;
diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc
deleted file mode 100644
index c12d17ae7d..0000000000
--- a/deps/v8/src/code-stubs-hydrogen.cc
+++ /dev/null
@@ -1,503 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/code-stubs.h"
-
-#include <memory>
-
-#include "src/assembler-inl.h"
-#include "src/bailout-reason.h"
-#include "src/code-factory.h"
-#include "src/code-stub-assembler.h"
-#include "src/crankshaft/hydrogen.h"
-#include "src/crankshaft/lithium.h"
-#include "src/field-index.h"
-#include "src/ic/ic.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-static LChunk* OptimizeGraph(HGraph* graph) {
- DisallowHeapAllocation no_allocation;
- DisallowHandleAllocation no_handles;
- DisallowHandleDereference no_deref;
-
- DCHECK(graph != NULL);
- BailoutReason bailout_reason = kNoReason;
- if (!graph->Optimize(&bailout_reason)) {
- FATAL(GetBailoutReason(bailout_reason));
- }
- LChunk* chunk = LChunk::NewChunk(graph);
- if (chunk == NULL) {
- FATAL(GetBailoutReason(graph->info()->bailout_reason()));
- }
- return chunk;
-}
-
-
-class CodeStubGraphBuilderBase : public HGraphBuilder {
- public:
- explicit CodeStubGraphBuilderBase(CompilationInfo* info, CodeStub* code_stub)
- : HGraphBuilder(info, code_stub->GetCallInterfaceDescriptor(), false),
- arguments_length_(NULL),
- info_(info),
- code_stub_(code_stub),
- descriptor_(code_stub),
- context_(NULL) {
- int parameter_count = GetParameterCount();
- parameters_.reset(new HParameter*[parameter_count]);
- }
- virtual bool BuildGraph();
-
- protected:
- virtual HValue* BuildCodeStub() = 0;
- int GetParameterCount() const { return descriptor_.GetParameterCount(); }
- int GetRegisterParameterCount() const {
- return descriptor_.GetRegisterParameterCount();
- }
- HParameter* GetParameter(int parameter) {
- DCHECK(parameter < GetParameterCount());
- return parameters_[parameter];
- }
- Representation GetParameterRepresentation(int parameter) {
- return RepresentationFromMachineType(
- descriptor_.GetParameterType(parameter));
- }
- bool IsParameterCountRegister(int index) const {
- return descriptor_.GetRegisterParameter(index)
- .is(descriptor_.stack_parameter_count());
- }
- HValue* GetArgumentsLength() {
- // This is initialized in BuildGraph()
- DCHECK(arguments_length_ != NULL);
- return arguments_length_;
- }
- CompilationInfo* info() { return info_; }
- CodeStub* stub() { return code_stub_; }
- HContext* context() { return context_; }
- Isolate* isolate() { return info_->isolate(); }
-
- private:
- std::unique_ptr<HParameter* []> parameters_;
- HValue* arguments_length_;
- CompilationInfo* info_;
- CodeStub* code_stub_;
- CodeStubDescriptor descriptor_;
- HContext* context_;
-};
-
-
-bool CodeStubGraphBuilderBase::BuildGraph() {
- // Update the static counter each time a new code stub is generated.
- isolate()->counters()->code_stubs()->Increment();
-
- if (FLAG_trace_hydrogen_stubs) {
- const char* name = CodeStub::MajorName(stub()->MajorKey());
- PrintF("-----------------------------------------------------------\n");
- PrintF("Compiling stub %s using hydrogen\n", name);
- isolate()->GetHTracer()->TraceCompilation(info());
- }
-
- int param_count = GetParameterCount();
- int register_param_count = GetRegisterParameterCount();
- HEnvironment* start_environment = graph()->start_environment();
- HBasicBlock* next_block = CreateBasicBlock(start_environment);
- Goto(next_block);
- next_block->SetJoinId(BailoutId::StubEntry());
- set_current_block(next_block);
-
- bool runtime_stack_params = descriptor_.stack_parameter_count().is_valid();
- HInstruction* stack_parameter_count = NULL;
- for (int i = 0; i < param_count; ++i) {
- Representation r = GetParameterRepresentation(i);
- HParameter* param;
- if (i >= register_param_count) {
- param = Add<HParameter>(i - register_param_count,
- HParameter::STACK_PARAMETER, r);
- } else {
- param = Add<HParameter>(i, HParameter::REGISTER_PARAMETER, r);
- }
- start_environment->Bind(i, param);
- parameters_[i] = param;
- if (i < register_param_count && IsParameterCountRegister(i)) {
- param->set_type(HType::Smi());
- stack_parameter_count = param;
- arguments_length_ = stack_parameter_count;
- }
- }
-
- DCHECK(!runtime_stack_params || arguments_length_ != NULL);
- if (!runtime_stack_params) {
- stack_parameter_count =
- Add<HConstant>(param_count - register_param_count - 1);
- // graph()->GetConstantMinus1();
- arguments_length_ = graph()->GetConstant0();
- }
-
- context_ = Add<HContext>();
- start_environment->BindContext(context_);
- start_environment->Bind(param_count, context_);
-
- Add<HSimulate>(BailoutId::StubEntry());
-
- NoObservableSideEffectsScope no_effects(this);
-
- HValue* return_value = BuildCodeStub();
-
- // We might have extra expressions to pop from the stack in addition to the
- // arguments above.
- HInstruction* stack_pop_count = stack_parameter_count;
- if (descriptor_.function_mode() == JS_FUNCTION_STUB_MODE) {
- if (!stack_parameter_count->IsConstant() &&
- descriptor_.hint_stack_parameter_count() < 0) {
- HInstruction* constant_one = graph()->GetConstant1();
- stack_pop_count = AddUncasted<HAdd>(stack_parameter_count, constant_one);
- stack_pop_count->ClearFlag(HValue::kCanOverflow);
- // TODO(mvstanton): verify that stack_parameter_count+1 really fits in a
- // smi.
- } else {
- int count = descriptor_.hint_stack_parameter_count();
- stack_pop_count = Add<HConstant>(count);
- }
- }
-
- if (current_block() != NULL) {
- HReturn* hreturn_instruction = New<HReturn>(return_value,
- stack_pop_count);
- FinishCurrentBlock(hreturn_instruction);
- }
- return true;
-}
-
-
-template <class Stub>
-class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
- public:
- explicit CodeStubGraphBuilder(CompilationInfo* info, CodeStub* stub)
- : CodeStubGraphBuilderBase(info, stub) {}
-
- typedef typename Stub::Descriptor Descriptor;
-
- protected:
- virtual HValue* BuildCodeStub() {
- if (casted_stub()->IsUninitialized()) {
- return BuildCodeUninitializedStub();
- } else {
- return BuildCodeInitializedStub();
- }
- }
-
- virtual HValue* BuildCodeInitializedStub() {
- UNIMPLEMENTED();
- return NULL;
- }
-
- virtual HValue* BuildCodeUninitializedStub() {
- // Force a deopt that falls back to the runtime.
- HValue* undefined = graph()->GetConstantUndefined();
- IfBuilder builder(this);
- builder.IfNot<HCompareObjectEqAndBranch, HValue*>(undefined, undefined);
- builder.Then();
- builder.ElseDeopt(DeoptimizeReason::kForcedDeoptToRuntime);
- return undefined;
- }
-
- Stub* casted_stub() { return static_cast<Stub*>(stub()); }
-};
-
-
-Handle<Code> HydrogenCodeStub::GenerateLightweightMissCode(
- ExternalReference miss) {
- Factory* factory = isolate()->factory();
-
- // Generate the new code.
- MacroAssembler masm(isolate(), NULL, 256, CodeObjectRequired::kYes);
-
- {
- // Update the static counter each time a new code stub is generated.
- isolate()->counters()->code_stubs()->Increment();
-
- // Generate the code for the stub.
- masm.set_generating_stub(true);
- // TODO(yangguo): remove this once we can serialize IC stubs.
- masm.enable_serializer();
- NoCurrentFrameScope scope(&masm);
- GenerateLightweightMiss(&masm, miss);
- }
-
- // Create the code object.
- CodeDesc desc;
- masm.GetCode(&desc);
-
- // Copy the generated code into a heap object.
- Handle<Code> new_object = factory->NewCode(
- desc, GetCodeFlags(), masm.CodeObject(), NeedsImmovableCode());
- return new_object;
-}
-
-Handle<Code> HydrogenCodeStub::GenerateRuntimeTailCall(
- CodeStubDescriptor* descriptor) {
- const char* name = CodeStub::MajorName(MajorKey());
- Zone zone(isolate()->allocator(), ZONE_NAME);
- CallInterfaceDescriptor interface_descriptor(GetCallInterfaceDescriptor());
- compiler::CodeAssemblerState state(isolate(), &zone, interface_descriptor,
- GetCodeFlags(), name);
- CodeStubAssembler assembler(&state);
- int total_params = interface_descriptor.GetStackParameterCount() +
- interface_descriptor.GetRegisterParameterCount();
- switch (total_params) {
- case 0:
- assembler.TailCallRuntime(descriptor->miss_handler_id(),
- assembler.Parameter(0));
- break;
- case 1:
- assembler.TailCallRuntime(descriptor->miss_handler_id(),
- assembler.Parameter(1), assembler.Parameter(0));
- break;
- case 2:
- assembler.TailCallRuntime(descriptor->miss_handler_id(),
- assembler.Parameter(2), assembler.Parameter(0),
- assembler.Parameter(1));
- break;
- case 3:
- assembler.TailCallRuntime(descriptor->miss_handler_id(),
- assembler.Parameter(3), assembler.Parameter(0),
- assembler.Parameter(1), assembler.Parameter(2));
- break;
- case 4:
- assembler.TailCallRuntime(descriptor->miss_handler_id(),
- assembler.Parameter(4), assembler.Parameter(0),
- assembler.Parameter(1), assembler.Parameter(2),
- assembler.Parameter(3));
- break;
- default:
- UNIMPLEMENTED();
- break;
- }
- return compiler::CodeAssembler::GenerateCode(&state);
-}
-
-template <class Stub>
-static Handle<Code> DoGenerateCode(Stub* stub) {
- Isolate* isolate = stub->isolate();
- CodeStubDescriptor descriptor(stub);
-
- if (FLAG_minimal && descriptor.has_miss_handler()) {
- return stub->GenerateRuntimeTailCall(&descriptor);
- }
-
- // If we are uninitialized we can use a light-weight stub to enter
- // the runtime that is significantly faster than using the standard
- // stub-failure deopt mechanism.
- if (stub->IsUninitialized() && descriptor.has_miss_handler()) {
- DCHECK(!descriptor.stack_parameter_count().is_valid());
- return stub->GenerateLightweightMissCode(descriptor.miss_handler());
- }
- base::ElapsedTimer timer;
- if (FLAG_profile_hydrogen_code_stub_compilation) {
- timer.Start();
- }
- Zone zone(isolate->allocator(), ZONE_NAME);
- CompilationInfo info(CStrVector(CodeStub::MajorName(stub->MajorKey())),
- isolate, &zone, stub->GetCodeFlags());
- // Parameter count is number of stack parameters.
- int parameter_count = descriptor.GetStackParameterCount();
- if (descriptor.function_mode() == NOT_JS_FUNCTION_STUB_MODE) {
- parameter_count--;
- }
- info.set_parameter_count(parameter_count);
- CodeStubGraphBuilder<Stub> builder(&info, stub);
- LChunk* chunk = OptimizeGraph(builder.CreateGraph());
- Handle<Code> code = chunk->Codegen();
- if (FLAG_profile_hydrogen_code_stub_compilation) {
- OFStream os(stdout);
- os << "[Lazy compilation of " << stub << " took "
- << timer.Elapsed().InMillisecondsF() << " ms]" << std::endl;
- }
- return code;
-}
-
-template <>
-HValue* CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
- ElementsKind const from_kind = casted_stub()->from_kind();
- ElementsKind const to_kind = casted_stub()->to_kind();
- HValue* const object = GetParameter(Descriptor::kObject);
- HValue* const map = GetParameter(Descriptor::kMap);
-
- // The {object} is known to be a JSObject (otherwise it wouldn't have elements
- // anyways).
- object->set_type(HType::JSObject());
-
- info()->MarkAsSavesCallerDoubles();
-
- DCHECK_IMPLIES(IsFastHoleyElementsKind(from_kind),
- IsFastHoleyElementsKind(to_kind));
-
- if (AllocationSite::GetMode(from_kind, to_kind) == TRACK_ALLOCATION_SITE) {
- Add<HTrapAllocationMemento>(object);
- }
-
- if (!IsSimpleMapChangeTransition(from_kind, to_kind)) {
- HInstruction* elements = AddLoadElements(object);
-
- IfBuilder if_objecthaselements(this);
- if_objecthaselements.IfNot<HCompareObjectEqAndBranch>(
- elements, Add<HConstant>(isolate()->factory()->empty_fixed_array()));
- if_objecthaselements.Then();
- {
- // Determine the elements capacity.
- HInstruction* elements_length = AddLoadFixedArrayLength(elements);
-
- // Determine the effective (array) length.
- IfBuilder if_objectisarray(this);
- if_objectisarray.If<HHasInstanceTypeAndBranch>(object, JS_ARRAY_TYPE);
- if_objectisarray.Then();
- {
- // The {object} is a JSArray, load the special "length" property.
- Push(Add<HLoadNamedField>(object, nullptr,
- HObjectAccess::ForArrayLength(from_kind)));
- }
- if_objectisarray.Else();
- {
- // The {object} is some other JSObject.
- Push(elements_length);
- }
- if_objectisarray.End();
- HValue* length = Pop();
-
- BuildGrowElementsCapacity(object, elements, from_kind, to_kind, length,
- elements_length);
- }
- if_objecthaselements.End();
- }
-
- Add<HStoreNamedField>(object, HObjectAccess::ForMap(), map);
-
- return object;
-}
-
-
-Handle<Code> TransitionElementsKindStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
-template <>
-HValue* CodeStubGraphBuilder<BinaryOpICStub>::BuildCodeInitializedStub() {
- BinaryOpICState state = casted_stub()->state();
-
- HValue* left = GetParameter(Descriptor::kLeft);
- HValue* right = GetParameter(Descriptor::kRight);
-
- AstType* left_type = state.GetLeftType();
- AstType* right_type = state.GetRightType();
- AstType* result_type = state.GetResultType();
-
- DCHECK(!left_type->Is(AstType::None()) && !right_type->Is(AstType::None()) &&
- (state.HasSideEffects() || !result_type->Is(AstType::None())));
-
- HValue* result = NULL;
- HAllocationMode allocation_mode(NOT_TENURED);
- if (state.op() == Token::ADD && (left_type->Maybe(AstType::String()) ||
- right_type->Maybe(AstType::String())) &&
- !left_type->Is(AstType::String()) && !right_type->Is(AstType::String())) {
- // For the generic add stub a fast case for string addition is performance
- // critical.
- if (left_type->Maybe(AstType::String())) {
- IfBuilder if_leftisstring(this);
- if_leftisstring.If<HIsStringAndBranch>(left);
- if_leftisstring.Then();
- {
- Push(BuildBinaryOperation(state.op(), left, right, AstType::String(),
- right_type, result_type,
- state.fixed_right_arg(), allocation_mode));
- }
- if_leftisstring.Else();
- {
- Push(BuildBinaryOperation(state.op(), left, right, left_type,
- right_type, result_type,
- state.fixed_right_arg(), allocation_mode));
- }
- if_leftisstring.End();
- result = Pop();
- } else {
- IfBuilder if_rightisstring(this);
- if_rightisstring.If<HIsStringAndBranch>(right);
- if_rightisstring.Then();
- {
- Push(BuildBinaryOperation(state.op(), left, right, left_type,
- AstType::String(), result_type,
- state.fixed_right_arg(), allocation_mode));
- }
- if_rightisstring.Else();
- {
- Push(BuildBinaryOperation(state.op(), left, right, left_type,
- right_type, result_type,
- state.fixed_right_arg(), allocation_mode));
- }
- if_rightisstring.End();
- result = Pop();
- }
- } else {
- result = BuildBinaryOperation(state.op(), left, right, left_type,
- right_type, result_type,
- state.fixed_right_arg(), allocation_mode);
- }
-
- // If we encounter a generic argument, the number conversion is
- // observable, thus we cannot afford to bail out after the fact.
- if (!state.HasSideEffects()) {
- result = EnforceNumberType(result, result_type);
- }
-
- return result;
-}
-
-
-Handle<Code> BinaryOpICStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
-
-template <>
-HValue* CodeStubGraphBuilder<BinaryOpWithAllocationSiteStub>::BuildCodeStub() {
- BinaryOpICState state = casted_stub()->state();
-
- HValue* allocation_site = GetParameter(Descriptor::kAllocationSite);
- HValue* left = GetParameter(Descriptor::kLeft);
- HValue* right = GetParameter(Descriptor::kRight);
-
- AstType* left_type = state.GetLeftType();
- AstType* right_type = state.GetRightType();
- AstType* result_type = state.GetResultType();
- HAllocationMode allocation_mode(allocation_site);
-
- return BuildBinaryOperation(state.op(), left, right, left_type, right_type,
- result_type, state.fixed_right_arg(),
- allocation_mode);
-}
-
-
-Handle<Code> BinaryOpWithAllocationSiteStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
-
-template <>
-HValue* CodeStubGraphBuilder<ToBooleanICStub>::BuildCodeInitializedStub() {
- ToBooleanICStub* stub = casted_stub();
- IfBuilder if_true(this);
- if_true.If<HBranch>(GetParameter(Descriptor::kArgument), stub->hints());
- if_true.Then();
- if_true.Return(graph()->GetConstantTrue());
- if_true.Else();
- if_true.End();
- return graph()->GetConstantFalse();
-}
-
-Handle<Code> ToBooleanICStub::GenerateCode() { return DoGenerateCode(this); }
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index 35fb1fe74e..b4c3247fdf 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -120,7 +120,9 @@ Handle<Code> CodeStub::GetCodeCopy(const FindAndReplacePattern& pattern) {
void CodeStub::DeleteStubFromCacheForTesting() {
Heap* heap = isolate_->heap();
Handle<UnseededNumberDictionary> dict(heap->code_stubs());
- dict = UnseededNumberDictionary::DeleteKey(dict, GetKey());
+ int entry = dict->FindEntry(GetKey());
+ DCHECK_NE(UnseededNumberDictionary::kNotFound, entry);
+ dict = UnseededNumberDictionary::DeleteEntry(dict, entry);
heap->SetRootCodeStubs(*dict);
}
@@ -135,7 +137,6 @@ Handle<Code> PlatformCodeStub::GenerateCode() {
isolate()->counters()->code_stubs()->Increment();
// Generate the code for the stub.
- masm.set_generating_stub(true);
// TODO(yangguo): remove this once we can serialize IC stubs.
masm.enable_serializer();
NoCurrentFrameScope scope(&masm);
@@ -144,7 +145,7 @@ Handle<Code> PlatformCodeStub::GenerateCode() {
// Create the code object.
CodeDesc desc;
- masm.GetCode(&desc);
+ masm.GetCode(isolate(), &desc);
// Copy the generated code into a heap object.
Code::Flags flags = Code::ComputeFlags(GetCodeKind(), GetExtraICState());
Handle<Code> new_object = factory->NewCode(
@@ -164,6 +165,9 @@ Handle<Code> CodeStub::GetCode() {
{
HandleScope scope(isolate());
+ // Canonicalize handles, so that we can share constant pool entries pointing
+ // to code targets without dereferencing their handles.
+ CanonicalHandleScope canonical(isolate());
Handle<Code> new_object = GenerateCode();
new_object->set_stub_key(GetKey());
@@ -185,11 +189,8 @@ Handle<Code> CodeStub::GetCode() {
AddToSpecialCache(new_object);
} else {
// Update the dictionary and the root in Heap.
- Handle<UnseededNumberDictionary> dict =
- UnseededNumberDictionary::AtNumberPut(
- Handle<UnseededNumberDictionary>(heap->code_stubs()),
- GetKey(),
- new_object);
+ Handle<UnseededNumberDictionary> dict = UnseededNumberDictionary::Set(
+ handle(heap->code_stubs()), GetKey(), new_object);
heap->SetRootCodeStubs(*dict);
}
code = *new_object;
@@ -211,7 +212,6 @@ const char* CodeStub::MajorName(CodeStub::Major major_key) {
return "<NoCache>Stub";
case NUMBER_OF_IDS:
UNREACHABLE();
- return NULL;
}
return NULL;
}
@@ -280,56 +280,6 @@ MaybeHandle<Code> CodeStub::GetCode(Isolate* isolate, uint32_t key) {
}
-// static
-void BinaryOpICStub::GenerateAheadOfTime(Isolate* isolate) {
- if (FLAG_minimal) return;
- // Generate the uninitialized versions of the stub.
- for (int op = Token::BIT_OR; op <= Token::MOD; ++op) {
- BinaryOpICStub stub(isolate, static_cast<Token::Value>(op));
- stub.GetCode();
- }
-
- // Generate special versions of the stub.
- BinaryOpICState::GenerateAheadOfTime(isolate, &GenerateAheadOfTime);
-}
-
-
-void BinaryOpICStub::PrintState(std::ostream& os) const { // NOLINT
- os << state();
-}
-
-
-// static
-void BinaryOpICStub::GenerateAheadOfTime(Isolate* isolate,
- const BinaryOpICState& state) {
- if (FLAG_minimal) return;
- BinaryOpICStub stub(isolate, state);
- stub.GetCode();
-}
-
-
-// static
-void BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(Isolate* isolate) {
- // Generate special versions of the stub.
- BinaryOpICState::GenerateAheadOfTime(isolate, &GenerateAheadOfTime);
-}
-
-
-void BinaryOpICWithAllocationSiteStub::PrintState(
- std::ostream& os) const { // NOLINT
- os << state();
-}
-
-
-// static
-void BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(
- Isolate* isolate, const BinaryOpICState& state) {
- if (state.CouldCreateAllocationMementos()) {
- BinaryOpICWithAllocationSiteStub stub(isolate, state);
- stub.GetCode();
- }
-}
-
void StringAddStub::PrintBaseName(std::ostream& os) const { // NOLINT
os << "StringAddStub_" << flags() << "_" << pretenure_flag();
}
@@ -385,7 +335,6 @@ InlineCacheState CompareICStub::GetICState() const {
return ::v8::internal::GENERIC;
}
UNREACHABLE();
- return ::v8::internal::UNINITIALIZED;
}
@@ -491,6 +440,23 @@ TF_STUB(StringLengthStub, CodeStubAssembler) {
Return(result);
}
+TF_STUB(TransitionElementsKindStub, CodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* object = Parameter(Descriptor::kObject);
+ Node* new_map = Parameter(Descriptor::kMap);
+
+ Label bailout(this);
+ TransitionElementsKind(object, new_map, stub->from_kind(), stub->to_kind(),
+ stub->is_jsarray(), &bailout);
+ Return(object);
+
+ BIND(&bailout);
+ {
+ Comment("Call runtime");
+ TailCallRuntime(Runtime::kTransitionElementsKind, context, object, new_map);
+ }
+}
+
// TODO(ishell): move to builtins.
TF_STUB(NumberToStringStub, CodeStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
@@ -606,7 +572,7 @@ TF_STUB(LoadIndexedInterceptorStub, CodeStubAssembler) {
}
void CallICStub::PrintState(std::ostream& os) const { // NOLINT
- os << convert_mode() << ", " << tail_call_mode();
+ os << convert_mode();
}
// TODO(ishell): Move to CallICAssembler.
@@ -624,15 +590,14 @@ TF_STUB(CallICStub, CodeStubAssembler) {
// Static checks to assert it is safe to examine the type feedback element.
// We don't know that we have a weak cell. We might have a private symbol
// or an AllocationSite, but the memory is safe to examine.
- // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
- // FixedArray.
- // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
- // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
+ // AllocationSite::kTransitionInfoOrBoilerplateOffset - contains a Smi or
+ // pointer to FixedArray. WeakCell::kValueOffset - contains a JSFunction or
+ // Smi(0) Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
// computed, meaning that it can't appear to be a pointer. If the low bit is
// 0, then hash is computed, but the 0 bit prevents the field from appearing
// to be a pointer.
STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
- STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
+ STATIC_ASSERT(AllocationSite::kTransitionInfoOrBoilerplateOffset ==
WeakCell::kValueOffset &&
WeakCell::kValueOffset == Symbol::kHashFieldSlot);
@@ -661,8 +626,8 @@ TF_STUB(CallICStub, CodeStubAssembler) {
BIND(&call_function);
{
// Call using CallFunction builtin.
- Callable callable = CodeFactory::CallFunction(
- isolate(), stub->convert_mode(), stub->tail_call_mode());
+ Callable callable =
+ CodeFactory::CallFunction(isolate(), stub->convert_mode());
TailCallStub(callable, context, target, argc);
}
@@ -680,8 +645,7 @@ TF_STUB(CallICStub, CodeStubAssembler) {
GotoIf(is_megamorphic, &call);
Comment("check if it is an allocation site");
- GotoIfNot(IsAllocationSiteMap(LoadMap(feedback_element)),
- &check_initialized);
+ GotoIfNot(IsAllocationSite(feedback_element), &check_initialized);
// If it is not the Array() function, mark megamorphic.
Node* context_slot = LoadContextElement(LoadNativeContext(context),
@@ -765,8 +729,7 @@ TF_STUB(CallICStub, CodeStubAssembler) {
{
// Call using call builtin.
Comment("call using Call builtin");
- Callable callable_call = CodeFactory::Call(isolate(), stub->convert_mode(),
- stub->tail_call_mode());
+ Callable callable_call = CodeFactory::Call(isolate(), stub->convert_mode());
TailCallStub(callable_call, context, target, argc);
}
}
@@ -778,8 +741,7 @@ TF_STUB(CallICTrampolineStub, CodeStubAssembler) {
Node* slot = Parameter(Descriptor::kSlot);
Node* vector = LoadFeedbackVectorForStub();
- Callable callable = CodeFactory::CallIC(isolate(), stub->convert_mode(),
- stub->tail_call_mode());
+ Callable callable = CodeFactory::CallIC(isolate(), stub->convert_mode());
TailCallStub(callable, context, target, argc, slot, vector);
}
@@ -790,12 +752,6 @@ void JSEntryStub::FinishCode(Handle<Code> code) {
code->set_handler_table(*handler_table);
}
-void TransitionElementsKindStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- descriptor->Initialize(
- Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry);
-}
-
void AllocateHeapNumberStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
@@ -804,24 +760,6 @@ void AllocateHeapNumberStub::InitializeDescriptor(
}
-void ToBooleanICStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- descriptor->Initialize(FUNCTION_ADDR(Runtime_ToBooleanIC_Miss));
- descriptor->SetMissHandler(Runtime::kToBooleanIC_Miss);
-}
-
-
-void BinaryOpICStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- descriptor->Initialize(FUNCTION_ADDR(Runtime_BinaryOpIC_Miss));
- descriptor->SetMissHandler(Runtime::kBinaryOpIC_Miss);
-}
-
-
-void BinaryOpWithAllocationSiteStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- descriptor->Initialize(
- FUNCTION_ADDR(Runtime_BinaryOpIC_MissWithAllocationSite));
-}
-
// TODO(ishell): move to builtins.
TF_STUB(GetPropertyStub, CodeStubAssembler) {
Label call_runtime(this, Label::kDeferred), return_undefined(this), end(this);
@@ -929,10 +867,11 @@ TF_STUB(StoreFastElementStub, CodeStubAssembler) {
// static
void StoreFastElementStub::GenerateAheadOfTime(Isolate* isolate) {
if (FLAG_minimal) return;
- StoreFastElementStub(isolate, false, FAST_HOLEY_ELEMENTS, STANDARD_STORE)
+ StoreFastElementStub(isolate, false, HOLEY_ELEMENTS, STANDARD_STORE)
+ .GetCode();
+ StoreFastElementStub(isolate, false, HOLEY_ELEMENTS,
+ STORE_AND_GROW_NO_TRANSITION)
.GetCode();
- StoreFastElementStub(isolate, false, FAST_HOLEY_ELEMENTS,
- STORE_AND_GROW_NO_TRANSITION).GetCode();
for (int i = FIRST_FAST_ELEMENTS_KIND; i <= LAST_FAST_ELEMENTS_KIND; i++) {
ElementsKind kind = static_cast<ElementsKind>(i);
StoreFastElementStub(isolate, true, kind, STANDARD_STORE).GetCode();
@@ -941,58 +880,6 @@ void StoreFastElementStub::GenerateAheadOfTime(Isolate* isolate) {
}
}
-bool ToBooleanICStub::UpdateStatus(Handle<Object> object) {
- ToBooleanHints old_hints = hints();
- ToBooleanHints new_hints = old_hints;
- bool to_boolean_value = false; // Dummy initialization.
- if (object->IsUndefined(isolate())) {
- new_hints |= ToBooleanHint::kUndefined;
- to_boolean_value = false;
- } else if (object->IsBoolean()) {
- new_hints |= ToBooleanHint::kBoolean;
- to_boolean_value = object->IsTrue(isolate());
- } else if (object->IsNull(isolate())) {
- new_hints |= ToBooleanHint::kNull;
- to_boolean_value = false;
- } else if (object->IsSmi()) {
- new_hints |= ToBooleanHint::kSmallInteger;
- to_boolean_value = Smi::cast(*object)->value() != 0;
- } else if (object->IsJSReceiver()) {
- new_hints |= ToBooleanHint::kReceiver;
- to_boolean_value = !object->IsUndetectable();
- } else if (object->IsString()) {
- DCHECK(!object->IsUndetectable());
- new_hints |= ToBooleanHint::kString;
- to_boolean_value = String::cast(*object)->length() != 0;
- } else if (object->IsSymbol()) {
- new_hints |= ToBooleanHint::kSymbol;
- to_boolean_value = true;
- } else if (object->IsHeapNumber()) {
- DCHECK(!object->IsUndetectable());
- new_hints |= ToBooleanHint::kHeapNumber;
- double value = HeapNumber::cast(*object)->value();
- to_boolean_value = value != 0 && !std::isnan(value);
- } else {
- // We should never see an internal object at runtime here!
- UNREACHABLE();
- to_boolean_value = true;
- }
-
- set_sub_minor_key(HintsBits::update(sub_minor_key(), new_hints));
- return to_boolean_value;
-}
-
-void ToBooleanICStub::PrintState(std::ostream& os) const { // NOLINT
- os << hints();
-}
-
-void StubFailureTrampolineStub::GenerateAheadOfTime(Isolate* isolate) {
- StubFailureTrampolineStub stub1(isolate, NOT_JS_FUNCTION_STUB_MODE);
- StubFailureTrampolineStub stub2(isolate, JS_FUNCTION_STUB_MODE);
- stub1.GetCode();
- stub2.GetCode();
-}
-
void ProfileEntryHookStub::EntryHookTrampoline(intptr_t function,
intptr_t stack_pointer,
@@ -1020,7 +907,7 @@ TF_STUB(ArrayNoArgumentConstructorStub, CodeStubAssembler) {
Node* native_context = LoadObjectField(Parameter(Descriptor::kFunction),
JSFunction::kContextOffset);
bool track_allocation_site =
- AllocationSite::GetMode(elements_kind) == TRACK_ALLOCATION_SITE &&
+ AllocationSite::ShouldTrack(elements_kind) &&
stub->override_mode() != DISABLE_ALLOCATION_SITES;
Node* allocation_site =
track_allocation_site ? Parameter(Descriptor::kAllocationSite) : nullptr;
@@ -1028,17 +915,16 @@ TF_STUB(ArrayNoArgumentConstructorStub, CodeStubAssembler) {
Node* array =
AllocateJSArray(elements_kind, array_map,
IntPtrConstant(JSArray::kPreallocatedArrayElements),
- SmiConstant(Smi::kZero), allocation_site);
+ SmiConstant(0), allocation_site);
Return(array);
}
TF_STUB(InternalArrayNoArgumentConstructorStub, CodeStubAssembler) {
Node* array_map = LoadObjectField(Parameter(Descriptor::kFunction),
JSFunction::kPrototypeOrInitialMapOffset);
- Node* array =
- AllocateJSArray(stub->elements_kind(), array_map,
- IntPtrConstant(JSArray::kPreallocatedArrayElements),
- SmiConstant(Smi::kZero));
+ Node* array = AllocateJSArray(
+ stub->elements_kind(), array_map,
+ IntPtrConstant(JSArray::kPreallocatedArrayElements), SmiConstant(0));
Return(array);
}
@@ -1069,21 +955,19 @@ void ArrayConstructorAssembler::GenerateConstructor(
if (IsFastPackedElementsKind(elements_kind)) {
Label abort(this, Label::kDeferred);
- Branch(SmiEqual(array_size, SmiConstant(Smi::kZero)), &small_smi_size,
- &abort);
+ Branch(SmiEqual(array_size, SmiConstant(0)), &small_smi_size, &abort);
BIND(&abort);
- Node* reason = SmiConstant(Smi::FromInt(kAllocatingNonEmptyPackedArray));
+ Node* reason = SmiConstant(kAllocatingNonEmptyPackedArray);
TailCallRuntime(Runtime::kAbort, context, reason);
} else {
int element_size =
- IsFastDoubleElementsKind(elements_kind) ? kDoubleSize : kPointerSize;
+ IsDoubleElementsKind(elements_kind) ? kDoubleSize : kPointerSize;
int max_fast_elements =
(kMaxRegularHeapObjectSize - FixedArray::kHeaderSize - JSArray::kSize -
AllocationMemento::kSize) /
element_size;
- Branch(SmiAboveOrEqual(array_size,
- SmiConstant(Smi::FromInt(max_fast_elements))),
+ Branch(SmiAboveOrEqual(array_size, SmiConstant(max_fast_elements)),
&call_runtime, &small_smi_size);
}
@@ -1109,9 +993,13 @@ TF_STUB(ArraySingleArgumentConstructorStub, ArrayConstructorAssembler) {
Node* function = Parameter(Descriptor::kFunction);
Node* native_context = LoadObjectField(function, JSFunction::kContextOffset);
Node* array_map = LoadJSArrayElementsMap(elements_kind, native_context);
- AllocationSiteMode mode = stub->override_mode() == DISABLE_ALLOCATION_SITES
- ? DONT_TRACK_ALLOCATION_SITE
- : AllocationSite::GetMode(elements_kind);
+ AllocationSiteMode mode = DONT_TRACK_ALLOCATION_SITE;
+ if (stub->override_mode() == DONT_OVERRIDE) {
+ mode = AllocationSite::ShouldTrack(elements_kind)
+ ? TRACK_ALLOCATION_SITE
+ : DONT_TRACK_ALLOCATION_SITE;
+ }
+
Node* array_size = Parameter(Descriptor::kArraySizeSmiParameter);
Node* allocation_site = Parameter(Descriptor::kAllocationSite);
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 7a1b905fd6..d57a35f8a5 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -32,7 +32,6 @@ class Node;
#define CODE_STUB_LIST_ALL_PLATFORMS(V) \
/* --- PlatformCodeStubs --- */ \
V(ArrayConstructor) \
- V(BinaryOpICWithAllocationSite) \
V(CallApiCallback) \
V(CallApiGetter) \
V(CallConstruct) \
@@ -49,22 +48,11 @@ class Node;
V(StoreSlowElement) \
V(SubString) \
V(NameDictionaryLookup) \
- /* This can be removed once there are no */ \
- /* more deopting Hydrogen stubs. */ \
- V(StubFailureTrampoline) \
/* These are only called from FCG */ \
/* They can be removed when only the TF */ \
/* version of the corresponding stub is */ \
/* used universally */ \
V(CallICTrampoline) \
- /* --- HydrogenCodeStubs --- */ \
- /* These should never be ported to TF */ \
- /* because they are either used only by */ \
- /* FCG/Crankshaft or are deprecated */ \
- V(BinaryOpIC) \
- V(BinaryOpWithAllocationSite) \
- V(ToBooleanIC) \
- V(TransitionElementsKind) \
/* --- TurboFanCodeStubs --- */ \
V(AllocateHeapNumber) \
V(ArrayNoArgumentConstructor) \
@@ -85,6 +73,7 @@ class Node;
V(GetProperty) \
V(StoreFastElement) \
V(StoreInterceptor) \
+ V(TransitionElementsKind) \
V(LoadIndexedInterceptor) \
V(GrowArrayElements)
@@ -153,8 +142,7 @@ class Node;
static const int kHasReturnedMinusZeroSentinel = 1;
-// Stub is base classes of all stubs.
-class CodeStub BASE_EMBEDDED {
+class CodeStub : public ZoneObject {
public:
enum Major {
// TODO(mvstanton): eliminate the NoCache key by getting rid
@@ -235,6 +223,11 @@ class CodeStub BASE_EMBEDDED {
}
Isolate* isolate() const { return isolate_; }
+ void set_isolate(Isolate* isolate) {
+ DCHECK_NOT_NULL(isolate);
+ DCHECK(isolate_ == nullptr || isolate_ == isolate);
+ isolate_ = isolate;
+ }
void DeleteStubFromCacheForTesting();
@@ -326,12 +319,6 @@ class CodeStub BASE_EMBEDDED {
DEFINE_CODE_STUB(NAME, SUPER)
-#define DEFINE_HYDROGEN_CODE_STUB(NAME, SUPER) \
- public: \
- void InitializeDescriptor(CodeStubDescriptor* descriptor) override; \
- Handle<Code> GenerateCode() override; \
- DEFINE_CODE_STUB(NAME, SUPER)
-
#define DEFINE_TURBOFAN_CODE_STUB(NAME, SUPER) \
public: \
void GenerateAssembly(compiler::CodeAssemblerState* state) const override; \
@@ -472,56 +459,6 @@ class CodeStubDescriptor {
};
-class HydrogenCodeStub : public CodeStub {
- public:
- enum InitializationState {
- UNINITIALIZED,
- INITIALIZED
- };
-
- template<class SubClass>
- static Handle<Code> GetUninitialized(Isolate* isolate) {
- SubClass::GenerateAheadOfTime(isolate);
- return SubClass().GetCode(isolate);
- }
-
- // Retrieve the code for the stub. Generate the code if needed.
- Handle<Code> GenerateCode() override = 0;
-
- bool IsUninitialized() const { return IsMissBits::decode(minor_key_); }
-
- Handle<Code> GenerateLightweightMissCode(ExternalReference miss);
-
- Handle<Code> GenerateRuntimeTailCall(CodeStubDescriptor* descriptor);
-
- template<class StateType>
- void TraceTransition(StateType from, StateType to);
-
- protected:
- explicit HydrogenCodeStub(Isolate* isolate,
- InitializationState state = INITIALIZED)
- : CodeStub(isolate) {
- minor_key_ = IsMissBits::encode(state == UNINITIALIZED);
- }
-
- void set_sub_minor_key(uint32_t key) {
- minor_key_ = SubMinorKeyBits::update(minor_key_, key);
- }
-
- uint32_t sub_minor_key() const { return SubMinorKeyBits::decode(minor_key_); }
-
- static const int kSubMinorKeyBits = kStubMinorKeyBits - 1;
-
- private:
- class IsMissBits : public BitField<bool, kSubMinorKeyBits, 1> {};
- class SubMinorKeyBits : public BitField<int, 0, kSubMinorKeyBits> {};
-
- void GenerateLightweightMiss(MacroAssembler* masm, ExternalReference miss);
-
- DEFINE_CODE_STUB_BASE(HydrogenCodeStub, CodeStub);
-};
-
-
class TurboFanCodeStub : public CodeStub {
public:
// Retrieve the code for the stub. Generate the code if needed.
@@ -577,8 +514,6 @@ class RuntimeCallHelper {
#include "src/mips64/code-stubs-mips64.h"
#elif V8_TARGET_ARCH_S390
#include "src/s390/code-stubs-s390.h"
-#elif V8_TARGET_ARCH_X87
-#include "src/x87/code-stubs-x87.h"
#else
#error Unsupported target architecture.
#endif
@@ -632,6 +567,37 @@ class StoreInterceptorStub : public TurboFanCodeStub {
DEFINE_TURBOFAN_CODE_STUB(StoreInterceptor, TurboFanCodeStub);
};
+class TransitionElementsKindStub : public TurboFanCodeStub {
+ public:
+ TransitionElementsKindStub(Isolate* isolate, ElementsKind from_kind,
+ ElementsKind to_kind, bool is_jsarray)
+ : TurboFanCodeStub(isolate) {
+ set_sub_minor_key(FromKindBits::encode(from_kind) |
+ ToKindBits::encode(to_kind) |
+ IsJSArrayBits::encode(is_jsarray));
+ }
+
+ void set_sub_minor_key(uint32_t key) { minor_key_ = key; }
+
+ uint32_t sub_minor_key() const { return minor_key_; }
+
+ ElementsKind from_kind() const {
+ return FromKindBits::decode(sub_minor_key());
+ }
+
+ ElementsKind to_kind() const { return ToKindBits::decode(sub_minor_key()); }
+
+ bool is_jsarray() const { return IsJSArrayBits::decode(sub_minor_key()); }
+
+ private:
+ class ToKindBits : public BitField<ElementsKind, 0, 8> {};
+ class FromKindBits : public BitField<ElementsKind, ToKindBits::kNext, 8> {};
+ class IsJSArrayBits : public BitField<bool, FromKindBits::kNext, 1> {};
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(TransitionElementsKind);
+ DEFINE_TURBOFAN_CODE_STUB(TransitionElementsKind, TurboFanCodeStub);
+};
+
class LoadIndexedInterceptorStub : public TurboFanCodeStub {
public:
explicit LoadIndexedInterceptorStub(Isolate* isolate)
@@ -764,23 +730,17 @@ class MathPowStub: public PlatformCodeStub {
class CallICStub : public TurboFanCodeStub {
public:
- CallICStub(Isolate* isolate, ConvertReceiverMode convert_mode,
- TailCallMode tail_call_mode)
+ CallICStub(Isolate* isolate, ConvertReceiverMode convert_mode)
: TurboFanCodeStub(isolate) {
- minor_key_ = ConvertModeBits::encode(convert_mode) |
- TailCallModeBits::encode(tail_call_mode);
+ minor_key_ = ConvertModeBits::encode(convert_mode);
}
ConvertReceiverMode convert_mode() const {
return ConvertModeBits::decode(minor_key_);
}
- TailCallMode tail_call_mode() const {
- return TailCallModeBits::decode(minor_key_);
- }
protected:
typedef BitField<ConvertReceiverMode, 0, 2> ConvertModeBits;
- typedef BitField<TailCallMode, ConvertModeBits::kNext, 1> TailCallModeBits;
private:
void PrintState(std::ostream& os) const final; // NOLINT
@@ -865,95 +825,6 @@ class CallApiGetterStub : public PlatformCodeStub {
};
-class BinaryOpICStub : public HydrogenCodeStub {
- public:
- BinaryOpICStub(Isolate* isolate, Token::Value op)
- : HydrogenCodeStub(isolate, UNINITIALIZED) {
- BinaryOpICState state(isolate, op);
- set_sub_minor_key(state.GetExtraICState());
- }
-
- BinaryOpICStub(Isolate* isolate, const BinaryOpICState& state)
- : HydrogenCodeStub(isolate) {
- set_sub_minor_key(state.GetExtraICState());
- }
-
- static void GenerateAheadOfTime(Isolate* isolate);
-
- Code::Kind GetCodeKind() const override { return Code::BINARY_OP_IC; }
-
- ExtraICState GetExtraICState() const final {
- return static_cast<ExtraICState>(sub_minor_key());
- }
-
- BinaryOpICState state() const {
- return BinaryOpICState(isolate(), GetExtraICState());
- }
-
- void PrintState(std::ostream& os) const final; // NOLINT
-
- private:
- static void GenerateAheadOfTime(Isolate* isolate,
- const BinaryOpICState& state);
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
- DEFINE_HYDROGEN_CODE_STUB(BinaryOpIC, HydrogenCodeStub);
-};
-
-
-// TODO(bmeurer): Merge this into the BinaryOpICStub once we have proper tail
-// call support for stubs in Hydrogen.
-class BinaryOpICWithAllocationSiteStub final : public PlatformCodeStub {
- public:
- BinaryOpICWithAllocationSiteStub(Isolate* isolate,
- const BinaryOpICState& state)
- : PlatformCodeStub(isolate) {
- minor_key_ = state.GetExtraICState();
- }
-
- static void GenerateAheadOfTime(Isolate* isolate);
-
- Handle<Code> GetCodeCopyFromTemplate(Handle<AllocationSite> allocation_site) {
- FindAndReplacePattern pattern;
- pattern.Add(isolate()->factory()->undefined_map(), allocation_site);
- return CodeStub::GetCodeCopy(pattern);
- }
-
- Code::Kind GetCodeKind() const override { return Code::BINARY_OP_IC; }
-
- ExtraICState GetExtraICState() const override {
- return static_cast<ExtraICState>(minor_key_);
- }
-
- void PrintState(std::ostream& os) const override; // NOLINT
-
- private:
- BinaryOpICState state() const {
- return BinaryOpICState(isolate(), GetExtraICState());
- }
-
- static void GenerateAheadOfTime(Isolate* isolate,
- const BinaryOpICState& state);
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOpWithAllocationSite);
- DEFINE_PLATFORM_CODE_STUB(BinaryOpICWithAllocationSite, PlatformCodeStub);
-};
-
-
-class BinaryOpWithAllocationSiteStub final : public BinaryOpICStub {
- public:
- BinaryOpWithAllocationSiteStub(Isolate* isolate, Token::Value op)
- : BinaryOpICStub(isolate, op) {}
-
- BinaryOpWithAllocationSiteStub(Isolate* isolate, const BinaryOpICState& state)
- : BinaryOpICStub(isolate, state) {}
-
- Code::Kind GetCodeKind() const final { return Code::STUB; }
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOpWithAllocationSite);
- DEFINE_HYDROGEN_CODE_STUB(BinaryOpWithAllocationSite, BinaryOpICStub);
-};
-
class StringAddStub final : public TurboFanCodeStub {
public:
StringAddStub(Isolate* isolate, StringAddFlags flags,
@@ -1211,9 +1082,8 @@ class StringCharCodeAtGenerator {
class CallICTrampolineStub : public CallICStub {
public:
- CallICTrampolineStub(Isolate* isolate, ConvertReceiverMode convert_mode,
- TailCallMode tail_call_mode)
- : CallICStub(isolate, convert_mode, tail_call_mode) {}
+ CallICTrampolineStub(Isolate* isolate, ConvertReceiverMode convert_mode)
+ : CallICStub(isolate, convert_mode) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(CallICTrampoline);
DEFINE_TURBOFAN_CODE_STUB(CallICTrampoline, CallICStub);
@@ -1359,29 +1229,6 @@ class StoreFastElementStub : public TurboFanCodeStub {
};
-class TransitionElementsKindStub : public HydrogenCodeStub {
- public:
- TransitionElementsKindStub(Isolate* isolate, ElementsKind from_kind,
- ElementsKind to_kind)
- : HydrogenCodeStub(isolate) {
- set_sub_minor_key(FromKindBits::encode(from_kind) |
- ToKindBits::encode(to_kind));
- }
-
- ElementsKind from_kind() const {
- return FromKindBits::decode(sub_minor_key());
- }
-
- ElementsKind to_kind() const { return ToKindBits::decode(sub_minor_key()); }
-
- private:
- class FromKindBits: public BitField<ElementsKind, 8, 8> {};
- class ToKindBits: public BitField<ElementsKind, 0, 8> {};
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(TransitionElementsKind);
- DEFINE_HYDROGEN_CODE_STUB(TransitionElementsKind, HydrogenCodeStub);
-};
-
class AllocateHeapNumberStub : public TurboFanCodeStub {
public:
explicit AllocateHeapNumberStub(Isolate* isolate)
@@ -1402,7 +1249,7 @@ class CommonArrayConstructorStub : public TurboFanCodeStub {
// if there is a difference between the global allocation site policy
// for an ElementsKind and the desired usage of the stub.
DCHECK(override_mode != DISABLE_ALLOCATION_SITES ||
- AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE);
+ AllocationSite::ShouldTrack(kind));
set_sub_minor_key(ElementsKindBits::encode(kind) |
AllocationSiteOverrideModeBits::encode(override_mode));
}
@@ -1529,50 +1376,6 @@ class StoreSlowElementStub : public TurboFanCodeStub {
DEFINE_TURBOFAN_CODE_STUB(StoreSlowElement, TurboFanCodeStub);
};
-class ToBooleanICStub : public HydrogenCodeStub {
- public:
- ToBooleanICStub(Isolate* isolate, ExtraICState state)
- : HydrogenCodeStub(isolate) {
- set_sub_minor_key(HintsBits::encode(static_cast<uint16_t>(state)));
- }
-
- bool UpdateStatus(Handle<Object> object);
- ToBooleanHints hints() const {
- return ToBooleanHints(HintsBits::decode(sub_minor_key()));
- }
-
- Code::Kind GetCodeKind() const override { return Code::TO_BOOLEAN_IC; }
- void PrintState(std::ostream& os) const override; // NOLINT
-
- bool SometimesSetsUpAFrame() override { return false; }
-
- static Handle<Code> GetUninitialized(Isolate* isolate) {
- return ToBooleanICStub(isolate, UNINITIALIZED).GetCode();
- }
-
- ExtraICState GetExtraICState() const override { return hints(); }
-
- InlineCacheState GetICState() const {
- if (hints() == ToBooleanHint::kNone) {
- return ::v8::internal::UNINITIALIZED;
- } else {
- return MONOMORPHIC;
- }
- }
-
- private:
- ToBooleanICStub(Isolate* isolate, InitializationState init_state)
- : HydrogenCodeStub(isolate, init_state) {}
-
- static const int kNumHints = 8;
- STATIC_ASSERT(static_cast<int>(ToBooleanHint::kAny) ==
- ((1 << kNumHints) - 1));
- class HintsBits : public BitField<uint16_t, 0, kNumHints> {};
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
- DEFINE_HYDROGEN_CODE_STUB(ToBooleanIC, HydrogenCodeStub);
-};
-
class ElementsTransitionAndStoreStub : public TurboFanCodeStub {
public:
ElementsTransitionAndStoreStub(Isolate* isolate, ElementsKind from_kind,
@@ -1605,27 +1408,6 @@ class ElementsTransitionAndStoreStub : public TurboFanCodeStub {
};
-class StubFailureTrampolineStub : public PlatformCodeStub {
- public:
- StubFailureTrampolineStub(Isolate* isolate, StubFunctionMode function_mode)
- : PlatformCodeStub(isolate) {
- minor_key_ = FunctionModeField::encode(function_mode);
- }
-
- static void GenerateAheadOfTime(Isolate* isolate);
-
- private:
- StubFunctionMode function_mode() const {
- return FunctionModeField::decode(minor_key_);
- }
-
- class FunctionModeField : public BitField<StubFunctionMode, 0, 1> {};
-
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
- DEFINE_PLATFORM_CODE_STUB(StubFailureTrampoline, PlatformCodeStub);
-};
-
-
class ProfileEntryHookStub : public PlatformCodeStub {
public:
explicit ProfileEntryHookStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
@@ -1635,6 +1417,7 @@ class ProfileEntryHookStub : public PlatformCodeStub {
// Generates a call to the entry hook if it's enabled.
static void MaybeCallEntryHook(MacroAssembler* masm);
+ static void MaybeCallEntryHookDelayed(TurboAssembler* tasm, Zone* zone);
private:
static void EntryHookTrampoline(intptr_t function,
@@ -1679,7 +1462,6 @@ class SubStringStub : public TurboFanCodeStub {
#undef DEFINE_CALL_INTERFACE_DESCRIPTOR
#undef DEFINE_PLATFORM_CODE_STUB
#undef DEFINE_HANDLER_CODE_STUB
-#undef DEFINE_HYDROGEN_CODE_STUB
#undef DEFINE_CODE_STUB
#undef DEFINE_CODE_STUB_BASE
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index d43d1f47b4..3a58415e01 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -110,7 +110,7 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info, const char* kind) {
#endif // DEBUG
}
-Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
+Handle<Code> CodeGenerator::MakeCodeEpilogue(TurboAssembler* tasm,
EhFrameWriter* eh_frame_writer,
CompilationInfo* info,
Handle<Object> self_reference) {
@@ -122,7 +122,7 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
bool is_crankshafted =
Code::ExtractKindFromFlags(flags) == Code::OPTIMIZED_FUNCTION ||
info->IsStub();
- masm->GetCode(&desc);
+ tasm->GetCode(isolate, &desc);
if (eh_frame_writer) eh_frame_writer->GetEhFrame(&desc);
Handle<Code> code = isolate->factory()->NewCode(
@@ -244,8 +244,7 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
// Print the source code if available.
bool print_source =
- info->parse_info() && (code->kind() == Code::OPTIMIZED_FUNCTION ||
- code->kind() == Code::FUNCTION);
+ info->parse_info() && (code->kind() == Code::OPTIMIZED_FUNCTION);
if (print_source) {
Handle<SharedFunctionInfo> shared = info->shared_info();
Handle<Script> script = info->script();
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index b909edc850..c906513358 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -59,8 +59,6 @@
#include "src/mips64/codegen-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_S390
#include "src/s390/codegen-s390.h" // NOLINT
-#elif V8_TARGET_ARCH_X87
-#include "src/x87/codegen-x87.h" // NOLINT
#else
#error Unsupported target architecture.
#endif
@@ -78,7 +76,7 @@ class CodeGenerator {
static void MakeCodePrologue(CompilationInfo* info, const char* kind);
// Allocate and install the code.
- static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
+ static Handle<Code> MakeCodeEpilogue(TurboAssembler* tasm,
EhFrameWriter* unwinding,
CompilationInfo* info,
Handle<Object> self_reference);
diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc
index 1619e0dd30..5183008df8 100644
--- a/deps/v8/src/compilation-cache.cc
+++ b/deps/v8/src/compilation-cache.cc
@@ -14,7 +14,6 @@
namespace v8 {
namespace internal {
-
// The number of generations for each sub cache.
static const int kRegExpGenerations = 2;
@@ -35,10 +34,8 @@ CompilationCache::CompilationCache(Isolate* isolate)
}
}
-
CompilationCache::~CompilationCache() {}
-
Handle<CompilationCacheTable> CompilationSubCache::GetTable(int generation) {
DCHECK(generation < generations_);
Handle<CompilationCacheTable> result;
@@ -53,7 +50,6 @@ Handle<CompilationCacheTable> CompilationSubCache::GetTable(int generation) {
return result;
}
-
void CompilationSubCache::Age() {
// Don't directly age single-generation caches.
if (generations_ == 1) {
@@ -72,27 +68,15 @@ void CompilationSubCache::Age() {
tables_[0] = isolate()->heap()->undefined_value();
}
-
-void CompilationSubCache::IterateFunctions(ObjectVisitor* v) {
- Object* undefined = isolate()->heap()->undefined_value();
- for (int i = 0; i < generations_; i++) {
- if (tables_[i] != undefined) {
- reinterpret_cast<CompilationCacheTable*>(tables_[i])->IterateElements(v);
- }
- }
-}
-
void CompilationSubCache::Iterate(RootVisitor* v) {
v->VisitRootPointers(Root::kCompilationCache, &tables_[0],
&tables_[generations_]);
}
-
void CompilationSubCache::Clear() {
MemsetPointer(tables_, isolate()->heap()->undefined_value(), generations_);
}
-
void CompilationSubCache::Remove(Handle<SharedFunctionInfo> function_info) {
// Probe the script generation tables. Make sure not to leak handles
// into the caller's handle scope.
@@ -134,7 +118,6 @@ bool CompilationCacheScript::HasOrigin(Handle<SharedFunctionInfo> function_info,
Handle<String>(String::cast(script->name())));
}
-
// TODO(245): Need to allow identical code from different contexts to
// be cached in the same script generation. Currently the first use
// will be cached, but subsequent code from different source / line
@@ -236,7 +219,6 @@ void CompilationCacheEval::Put(Handle<String> source,
SetFirstTable(table);
}
-
MaybeHandle<FixedArray> CompilationCacheRegExp::Lookup(
Handle<String> source,
JSRegExp::Flags flags) {
@@ -264,7 +246,6 @@ MaybeHandle<FixedArray> CompilationCacheRegExp::Lookup(
}
}
-
void CompilationCacheRegExp::Put(Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data) {
@@ -273,7 +254,6 @@ void CompilationCacheRegExp::Put(Handle<String> source,
SetFirstTable(CompilationCacheTable::PutRegExp(table, source, flags, data));
}
-
void CompilationCache::Remove(Handle<SharedFunctionInfo> function_info) {
if (!IsEnabled()) return;
@@ -312,7 +292,6 @@ InfoVectorPair CompilationCache::LookupEval(
return result;
}
-
MaybeHandle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
JSRegExp::Flags flags) {
if (!IsEnabled()) return MaybeHandle<FixedArray>();
@@ -348,8 +327,6 @@ void CompilationCache::PutEval(Handle<String> source,
}
}
-
-
void CompilationCache::PutRegExp(Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data) {
@@ -360,7 +337,6 @@ void CompilationCache::PutRegExp(Handle<String> source,
reg_exp_.Put(source, flags, data);
}
-
void CompilationCache::Clear() {
for (int i = 0; i < kSubCacheCount; i++) {
subcaches_[i]->Clear();
@@ -373,31 +349,20 @@ void CompilationCache::Iterate(RootVisitor* v) {
}
}
-
-void CompilationCache::IterateFunctions(ObjectVisitor* v) {
- for (int i = 0; i < kSubCacheCount; i++) {
- subcaches_[i]->IterateFunctions(v);
- }
-}
-
-
void CompilationCache::MarkCompactPrologue() {
for (int i = 0; i < kSubCacheCount; i++) {
subcaches_[i]->Age();
}
}
-
void CompilationCache::Enable() {
enabled_ = true;
}
-
void CompilationCache::Disable() {
enabled_ = false;
Clear();
}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compilation-cache.h b/deps/v8/src/compilation-cache.h
index 89c54a4227..907faf38b3 100644
--- a/deps/v8/src/compilation-cache.h
+++ b/deps/v8/src/compilation-cache.h
@@ -52,7 +52,6 @@ class CompilationSubCache {
// GC support.
void Iterate(RootVisitor* v);
- void IterateFunctions(ObjectVisitor* v);
// Clear this sub-cache evicting all its content.
void Clear();
@@ -200,7 +199,6 @@ class CompilationCache {
// GC support.
void Iterate(RootVisitor* v);
- void IterateFunctions(ObjectVisitor* v);
// Notify the cache that a mark-sweep garbage collection is about to
// take place. This is used to retire entries from the cache to
diff --git a/deps/v8/src/compilation-dependencies.cc b/deps/v8/src/compilation-dependencies.cc
index cd14bcf338..1a9cd7d9f5 100644
--- a/deps/v8/src/compilation-dependencies.cc
+++ b/deps/v8/src/compilation-dependencies.cc
@@ -22,7 +22,6 @@ DependentCode* CompilationDependencies::Get(Handle<Object> object) const {
return Handle<AllocationSite>::cast(object)->dependent_code();
}
UNREACHABLE();
- return nullptr;
}
@@ -141,11 +140,10 @@ void CompilationDependencies::AssumePrototypeMapsStable(
void CompilationDependencies::AssumeTransitionStable(
Handle<AllocationSite> site) {
// Do nothing if the object doesn't have any useful element transitions left.
- ElementsKind kind =
- site->SitePointsToLiteral()
- ? JSObject::cast(site->transition_info())->GetElementsKind()
- : site->GetElementsKind();
- if (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) {
+ ElementsKind kind = site->PointsToLiteral()
+ ? site->boilerplate()->GetElementsKind()
+ : site->GetElementsKind();
+ if (AllocationSite::ShouldTrack(kind)) {
Insert(DependentCode::kAllocationSiteTransitionChangedGroup, site);
}
}
diff --git a/deps/v8/src/compilation-info.cc b/deps/v8/src/compilation-info.cc
index a2e75fb2fc..21a33ce557 100644
--- a/deps/v8/src/compilation-info.cc
+++ b/deps/v8/src/compilation-info.cc
@@ -59,14 +59,6 @@ CompilationInfo::CompilationInfo(Zone* zone, ParseInfo* parse_info,
isolate, zone) {
closure_ = closure;
- // Compiling for the snapshot typically results in different code than
- // compiling later on. This means that code recompiled with deoptimization
- // support won't be "equivalent" (as defined by SharedFunctionInfo::
- // EnableDeoptimizationSupport), so it will replace the old code and all
- // its type feedback. To avoid this, always compile functions in the snapshot
- // with deoptimization support.
- if (isolate_->serializer_enabled()) EnableDeoptimizationSupport();
-
if (FLAG_function_context_specialization) MarkAsFunctionContextSpecializing();
if (FLAG_turbo_splitting) MarkAsSplittingEnabled();
@@ -76,6 +68,11 @@ CompilationInfo::CompilationInfo(Zone* zone, ParseInfo* parse_info,
if (isolate_->NeedsSourcePositionsForProfiling()) {
MarkAsSourcePositionsEnabled();
}
+
+ if (FLAG_block_coverage && isolate->is_block_code_coverage() &&
+ parse_info->script()->IsUserJavaScript()) {
+ MarkAsBlockCoverageEnabled();
+ }
}
CompilationInfo::CompilationInfo(Vector<const char> debug_name,
@@ -123,11 +120,11 @@ bool CompilationInfo::is_this_defined() const { return !IsStub(); }
// Primitive functions are unlikely to be picked up by the stack-walking
// profiler, so they trigger their own optimization when they're called
// for the SharedFunctionInfo::kCallsUntilPrimitiveOptimization-th time.
+// TODO(6409) Remove when Full-Codegen dies.
bool CompilationInfo::ShouldSelfOptimize() {
- return FLAG_opt && !(literal()->flags() & AstProperties::kDontSelfOptimize) &&
+ return FLAG_opt && !literal()->dont_self_optimize() &&
!literal()->dont_optimize() &&
- literal()->scope()->AllowsLazyCompilation() &&
- !shared_info()->optimization_disabled();
+ literal()->scope()->AllowsLazyCompilation();
}
void CompilationInfo::set_deferred_handles(
@@ -239,8 +236,7 @@ void CompilationInfo::SetOptimizing() {
int CompilationInfo::AddInlinedFunction(
Handle<SharedFunctionInfo> inlined_function, SourcePosition pos) {
int id = static_cast<int>(inlined_functions_.size());
- inlined_functions_.push_back(InlinedFunctionHolder(
- inlined_function, handle(inlined_function->code()), pos));
+ inlined_functions_.push_back(InlinedFunctionHolder(inlined_function, pos));
return id;
}
diff --git a/deps/v8/src/compilation-info.h b/deps/v8/src/compilation-info.h
index 3fd35ea06e..c95664a62b 100644
--- a/deps/v8/src/compilation-info.h
+++ b/deps/v8/src/compilation-info.h
@@ -19,6 +19,7 @@
namespace v8 {
namespace internal {
+class CoverageInfo;
class DeclarationScope;
class DeferredHandles;
class FunctionLiteral;
@@ -38,19 +39,19 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
kNonDeferredCalling = 1 << 1,
kSavesCallerDoubles = 1 << 2,
kRequiresFrame = 1 << 3,
- kDeoptimizationSupport = 1 << 4,
- kAccessorInliningEnabled = 1 << 5,
- kSerializing = 1 << 6,
- kFunctionContextSpecializing = 1 << 7,
- kFrameSpecializing = 1 << 8,
- kInliningEnabled = 1 << 9,
- kDisableFutureOptimization = 1 << 10,
- kSplittingEnabled = 1 << 11,
- kDeoptimizationEnabled = 1 << 12,
- kSourcePositionsEnabled = 1 << 13,
- kBailoutOnUninitialized = 1 << 14,
- kOptimizeFromBytecode = 1 << 15,
- kLoopPeelingEnabled = 1 << 16,
+ kAccessorInliningEnabled = 1 << 4,
+ kSerializing = 1 << 5,
+ kFunctionContextSpecializing = 1 << 6,
+ kFrameSpecializing = 1 << 7,
+ kInliningEnabled = 1 << 8,
+ kDisableFutureOptimization = 1 << 9,
+ kSplittingEnabled = 1 << 10,
+ kDeoptimizationEnabled = 1 << 11,
+ kSourcePositionsEnabled = 1 << 12,
+ kBailoutOnUninitialized = 1 << 13,
+ kOptimizeFromBytecode = 1 << 14,
+ kLoopPeelingEnabled = 1 << 15,
+ kBlockCoverageEnabled = 1 << 16,
};
CompilationInfo(Zone* zone, ParseInfo* parse_info, Isolate* isolate,
@@ -113,10 +114,8 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
// Compiles marked as debug produce unoptimized code with debug break slots.
// Inner functions that cannot be compiled w/o context are compiled eagerly.
- // Always include deoptimization support to avoid having to recompile again.
void MarkAsDebug() {
set_is_debug();
- SetFlag(kDeoptimizationSupport);
}
bool is_debug() const;
@@ -179,10 +178,16 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
bool is_loop_peeling_enabled() const { return GetFlag(kLoopPeelingEnabled); }
+ void MarkAsBlockCoverageEnabled() { SetFlag(kBlockCoverageEnabled); }
+
+ bool is_block_coverage_enabled() const {
+ return GetFlag(kBlockCoverageEnabled);
+ }
+
bool GeneratePreagedPrologue() const {
// Generate a pre-aged prologue if we are optimizing for size, which
- // will make code flushing more aggressive. Only apply to Code::FUNCTION,
- // since StaticMarkingVisitor::IsFlushable only flushes proper functions.
+ // will make code old more aggressive. Only apply to Code::FUNCTION,
+ // since only functions are aged in the compilation cache.
return FLAG_optimize_for_size && FLAG_age_code && !is_debug() &&
output_code_kind() == Code::FUNCTION;
}
@@ -219,13 +224,6 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
}
// Deoptimization support.
- bool HasDeoptimizationSupport() const {
- return GetFlag(kDeoptimizationSupport);
- }
- void EnableDeoptimizationSupport() {
- DCHECK_EQ(BASE, mode_);
- SetFlag(kDeoptimizationSupport);
- }
bool ShouldEnsureSpaceForLazyDeopt() { return !IsStub(); }
bool ExpectsJSReceiverAsReceiver();
@@ -269,10 +267,14 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
int optimization_id() const { return optimization_id_; }
- int osr_expr_stack_height() { return osr_expr_stack_height_; }
+ int osr_expr_stack_height() {
+ DCHECK_GE(osr_expr_stack_height_, 0);
+ return osr_expr_stack_height_;
+ }
void set_osr_expr_stack_height(int height) {
- DCHECK(height >= 0);
+ DCHECK_EQ(osr_expr_stack_height_, -1);
osr_expr_stack_height_ = height;
+ DCHECK_GE(osr_expr_stack_height_, 0);
}
bool has_simple_parameters();
@@ -280,18 +282,11 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
struct InlinedFunctionHolder {
Handle<SharedFunctionInfo> shared_info;
- // Root that holds the unoptimized code of the inlined function alive
- // (and out of reach of code flushing) until we finish compilation.
- // Do not remove.
- Handle<Code> inlined_code_object_root;
-
InliningPosition position;
InlinedFunctionHolder(Handle<SharedFunctionInfo> inlined_shared_info,
- Handle<Code> inlined_code_object_root,
SourcePosition pos)
- : shared_info(inlined_shared_info),
- inlined_code_object_root(inlined_code_object_root) {
+ : shared_info(inlined_shared_info) {
position.position = pos;
// initialized when generating the deoptimization literals
position.inlined_function_id = DeoptimizationInputData::kNotInlinedIndex;
@@ -319,6 +314,12 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
SourcePositionTableBuilder::RecordingMode SourcePositionRecordingMode() const;
+ bool has_coverage_info() const { return !coverage_info_.is_null(); }
+ Handle<CoverageInfo> coverage_info() const { return coverage_info_; }
+ void set_coverage_info(Handle<CoverageInfo> coverage_info) {
+ coverage_info_ = coverage_info;
+ }
+
private:
// Compilation mode.
// BASE is generated by the full codegen, optionally prepared for bailouts.
@@ -389,6 +390,10 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
Vector<const char> debug_name_;
+ // Encapsulates coverage information gathered by the bytecode generator.
+ // Needs to be stored on the shared function info once compilation completes.
+ Handle<CoverageInfo> coverage_info_;
+
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
};
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc
index bc1ec45a5b..a4408e92c2 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc
@@ -179,6 +179,50 @@ bool CompilerDispatcherJob::IsAssociatedWith(
return *shared_ == *shared;
}
+void CompilerDispatcherJob::StepNextOnMainThread() {
+ switch (status()) {
+ case CompileJobStatus::kInitial:
+ return PrepareToParseOnMainThread();
+
+ case CompileJobStatus::kReadyToParse:
+ return Parse();
+
+ case CompileJobStatus::kParsed:
+ return FinalizeParsingOnMainThread();
+
+ case CompileJobStatus::kReadyToAnalyze:
+ return AnalyzeOnMainThread();
+
+ case CompileJobStatus::kAnalyzed:
+ return PrepareToCompileOnMainThread();
+
+ case CompileJobStatus::kReadyToCompile:
+ return Compile();
+
+ case CompileJobStatus::kCompiled:
+ return FinalizeCompilingOnMainThread();
+
+ case CompileJobStatus::kFailed:
+ case CompileJobStatus::kDone:
+ return;
+ }
+ UNREACHABLE();
+}
+
+void CompilerDispatcherJob::StepNextOnBackgroundThread() {
+ DCHECK(CanStepNextOnAnyThread());
+ switch (status()) {
+ case CompileJobStatus::kReadyToParse:
+ return Parse();
+
+ case CompileJobStatus::kReadyToCompile:
+ return Compile();
+
+ default:
+ UNREACHABLE();
+ }
+}
+
void CompilerDispatcherJob::PrepareToParseOnMainThread() {
DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
DCHECK(status() == CompileJobStatus::kInitial);
@@ -283,7 +327,7 @@ void CompilerDispatcherJob::PrepareToParseOnMainThread() {
}
parser_->DeserializeScopeChain(parse_info_.get(), outer_scope_info);
- Handle<String> name(String::cast(shared_->name()));
+ Handle<String> name(shared_->name());
parse_info_->set_function_name(
parse_info_->ast_value_factory()->GetString(name));
status_ = CompileJobStatus::kReadyToParse;
@@ -315,7 +359,7 @@ void CompilerDispatcherJob::Parse() {
}
}
-bool CompilerDispatcherJob::FinalizeParsingOnMainThread() {
+void CompilerDispatcherJob::FinalizeParsingOnMainThread() {
DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
DCHECK(status() == CompileJobStatus::kParsed);
COMPILER_DISPATCHER_TRACE_SCOPE(tracer_, kFinalizeParsing);
@@ -367,11 +411,9 @@ bool CompilerDispatcherJob::FinalizeParsingOnMainThread() {
character_stream_.reset();
}
parse_info_->set_deferred_handles(scope.Detach());
-
- return status_ != CompileJobStatus::kFailed;
}
-bool CompilerDispatcherJob::AnalyzeOnMainThread() {
+void CompilerDispatcherJob::AnalyzeOnMainThread() {
DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
DCHECK(status() == CompileJobStatus::kReadyToAnalyze);
COMPILER_DISPATCHER_TRACE_SCOPE(tracer_, kAnalyze);
@@ -393,11 +435,9 @@ bool CompilerDispatcherJob::AnalyzeOnMainThread() {
}
}
compile_info_->set_deferred_handles(scope.Detach());
-
- return status_ != CompileJobStatus::kFailed;
}
-bool CompilerDispatcherJob::PrepareToCompileOnMainThread() {
+void CompilerDispatcherJob::PrepareToCompileOnMainThread() {
DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
DCHECK(status() == CompileJobStatus::kAnalyzed);
COMPILER_DISPATCHER_TRACE_SCOPE(tracer_, kPrepareToCompile);
@@ -407,12 +447,11 @@ bool CompilerDispatcherJob::PrepareToCompileOnMainThread() {
if (!compile_job_.get()) {
if (!isolate_->has_pending_exception()) isolate_->StackOverflow();
status_ = CompileJobStatus::kFailed;
- return false;
+ return;
}
CHECK(compile_job_->can_execute_on_background_thread());
status_ = CompileJobStatus::kReadyToCompile;
- return true;
}
void CompilerDispatcherJob::Compile() {
@@ -437,7 +476,7 @@ void CompilerDispatcherJob::Compile() {
status_ = CompileJobStatus::kCompiled;
}
-bool CompilerDispatcherJob::FinalizeCompilingOnMainThread() {
+void CompilerDispatcherJob::FinalizeCompilingOnMainThread() {
DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
DCHECK(status() == CompileJobStatus::kCompiled);
COMPILER_DISPATCHER_TRACE_SCOPE(tracer_, kFinalizeCompiling);
@@ -452,7 +491,7 @@ bool CompilerDispatcherJob::FinalizeCompilingOnMainThread() {
!Compiler::FinalizeCompilationJob(compile_job_.release())) {
if (!isolate_->has_pending_exception()) isolate_->StackOverflow();
status_ = CompileJobStatus::kFailed;
- return false;
+ return;
}
}
@@ -462,7 +501,6 @@ bool CompilerDispatcherJob::FinalizeCompilingOnMainThread() {
parse_info_.reset();
status_ = CompileJobStatus::kDone;
- return true;
}
void CompilerDispatcherJob::ResetOnMainThread() {
@@ -524,7 +562,6 @@ double CompilerDispatcherJob::EstimateRuntimeOfNextStepInMs() const {
}
UNREACHABLE();
- return 0.0;
}
void CompilerDispatcherJob::ShortPrint() {
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h
index 7b952f6cad..3dc1bcdf04 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h
@@ -76,8 +76,6 @@ class V8_EXPORT_PRIVATE CompilerDispatcherJob {
size_t max_stack_size);
~CompilerDispatcherJob();
- CompileJobStatus status() const { return status_; }
-
bool has_context() const { return !context_.is_null(); }
Context* context() { return *context_; }
@@ -87,31 +85,26 @@ class V8_EXPORT_PRIVATE CompilerDispatcherJob {
// function.
bool IsAssociatedWith(Handle<SharedFunctionInfo> shared) const;
- // Transition from kInitial to kReadyToParse.
- void PrepareToParseOnMainThread();
-
- // Transition from kReadyToParse to kParsed (or kDone if there is
- // finish_callback).
- void Parse();
-
- // Transition from kParsed to kReadyToAnalyze (or kFailed). Returns false
- // when transitioning to kFailed. In that case, an exception is pending.
- bool FinalizeParsingOnMainThread();
+ bool IsFinished() {
+ return status() == CompileJobStatus::kDone ||
+ status() == CompileJobStatus::kFailed;
+ }
- // Transition from kReadyToAnalyze to kAnalyzed (or kFailed). Returns
- // false when transitioning to kFailed. In that case, an exception is pending.
- bool AnalyzeOnMainThread();
+ bool IsFailed() { return status() == CompileJobStatus::kFailed; }
- // Transition from kAnalyzed to kReadyToCompile (or kFailed). Returns
- // false when transitioning to kFailed. In that case, an exception is pending.
- bool PrepareToCompileOnMainThread();
+ // Return true if the next step can be run on any thread, that is when both
+ // StepNextOnMainThread and StepNextOnBackgroundThread could be used for the
+ // next step.
+ bool CanStepNextOnAnyThread() {
+ return status() == CompileJobStatus::kReadyToParse ||
+ status() == CompileJobStatus::kReadyToCompile;
+ }
- // Transition from kReadyToCompile to kCompiled.
- void Compile();
+ // Step the job forward by one state on the main thread.
+ void StepNextOnMainThread();
- // Transition from kCompiled to kDone (or kFailed). Returns false when
- // transitioning to kFailed. In that case, an exception is pending.
- bool FinalizeCompilingOnMainThread();
+ // Step the job forward by one state on a background thread.
+ void StepNextOnBackgroundThread();
// Transition from any state to kInitial and free all resources.
void ResetOnMainThread();
@@ -124,7 +117,10 @@ class V8_EXPORT_PRIVATE CompilerDispatcherJob {
void ShortPrint();
private:
- FRIEND_TEST(CompilerDispatcherJobTest, ScopeChain);
+ friend class CompilerDispatcherTest;
+ friend class CompilerDispatcherJobTest;
+
+ CompileJobStatus status() const { return status_; }
CompileJobStatus status_;
Isolate* isolate_;
@@ -152,6 +148,28 @@ class V8_EXPORT_PRIVATE CompilerDispatcherJob {
bool trace_compiler_dispatcher_jobs_;
+ // Transition from kInitial to kReadyToParse.
+ void PrepareToParseOnMainThread();
+
+ // Transition from kReadyToParse to kParsed (or kDone if there is
+ // finish_callback).
+ void Parse();
+
+ // Transition from kParsed to kReadyToAnalyze (or kFailed).
+ void FinalizeParsingOnMainThread();
+
+ // Transition from kReadyToAnalyze to kAnalyzed (or kFailed).
+ void AnalyzeOnMainThread();
+
+ // Transition from kAnalyzed to kReadyToCompile (or kFailed).
+ void PrepareToCompileOnMainThread();
+
+ // Transition from kReadyToCompile to kCompiled.
+ void Compile();
+
+ // Transition from kCompiled to kDone (or kFailed).
+ void FinalizeCompilingOnMainThread();
+
DISALLOW_COPY_AND_ASSIGN(CompilerDispatcherJob);
};
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc
index d98209b147..bd87c49042 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc
@@ -73,7 +73,6 @@ const char* CompilerDispatcherTracer::Scope::Name(ScopeID scope_id) {
return "V8.BackgroundCompile_FinalizeCompiling";
}
UNREACHABLE();
- return nullptr;
}
CompilerDispatcherTracer::CompilerDispatcherTracer(Isolate* isolate)
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
index 69152b37f7..bbe646c791 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
@@ -21,16 +21,6 @@ namespace {
enum class ExceptionHandling { kSwallow, kThrow };
-bool IsFinished(CompilerDispatcherJob* job) {
- return job->status() == CompileJobStatus::kDone ||
- job->status() == CompileJobStatus::kFailed;
-}
-
-bool CanRunOnAnyThread(CompilerDispatcherJob* job) {
- return job->status() == CompileJobStatus::kReadyToParse ||
- job->status() == CompileJobStatus::kReadyToCompile;
-}
-
bool DoNextStepOnMainThread(Isolate* isolate, CompilerDispatcherJob* job,
ExceptionHandling exception_handling) {
DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
@@ -42,69 +32,23 @@ bool DoNextStepOnMainThread(Isolate* isolate, CompilerDispatcherJob* job,
if (job->has_context()) {
isolate->set_context(job->context());
} else {
- DCHECK(CanRunOnAnyThread(job));
+ DCHECK(job->CanStepNextOnAnyThread());
}
- switch (job->status()) {
- case CompileJobStatus::kInitial:
- job->PrepareToParseOnMainThread();
- break;
-
- case CompileJobStatus::kReadyToParse:
- job->Parse();
- break;
-
- case CompileJobStatus::kParsed:
- job->FinalizeParsingOnMainThread();
- break;
-
- case CompileJobStatus::kReadyToAnalyze:
- job->AnalyzeOnMainThread();
- break;
-
- case CompileJobStatus::kAnalyzed:
- job->PrepareToCompileOnMainThread();
- break;
+ job->StepNextOnMainThread();
- case CompileJobStatus::kReadyToCompile:
- job->Compile();
- break;
-
- case CompileJobStatus::kCompiled:
- job->FinalizeCompilingOnMainThread();
- break;
-
- case CompileJobStatus::kFailed:
- case CompileJobStatus::kDone:
- break;
- }
-
- DCHECK_EQ(job->status() == CompileJobStatus::kFailed,
- isolate->has_pending_exception());
- if (job->status() == CompileJobStatus::kFailed &&
- exception_handling == ExceptionHandling::kSwallow) {
+ DCHECK_EQ(job->IsFailed(), isolate->has_pending_exception());
+ if (job->IsFailed() && exception_handling == ExceptionHandling::kSwallow) {
isolate->clear_pending_exception();
}
- return job->status() != CompileJobStatus::kFailed;
+ return job->IsFailed();
}
void DoNextStepOnBackgroundThread(CompilerDispatcherJob* job) {
- DCHECK(CanRunOnAnyThread(job));
+ DCHECK(job->CanStepNextOnAnyThread());
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompilerDispatcherBackgroundStep");
-
- switch (job->status()) {
- case CompileJobStatus::kReadyToParse:
- job->Parse();
- break;
-
- case CompileJobStatus::kReadyToCompile:
- job->Compile();
- break;
-
- default:
- UNREACHABLE();
- }
+ job->StepNextOnBackgroundThread();
}
// Theoretically we get 50ms of idle time max, however it's unlikely that
@@ -129,7 +73,7 @@ class MemoryPressureTask : public CancelableTask {
MemoryPressureTask::MemoryPressureTask(Isolate* isolate,
CancelableTaskManager* task_manager,
CompilerDispatcher* dispatcher)
- : CancelableTask(isolate, task_manager), dispatcher_(dispatcher) {}
+ : CancelableTask(task_manager), dispatcher_(dispatcher) {}
MemoryPressureTask::~MemoryPressureTask() {}
@@ -157,7 +101,7 @@ class CompilerDispatcher::AbortTask : public CancelableTask {
CompilerDispatcher::AbortTask::AbortTask(Isolate* isolate,
CancelableTaskManager* task_manager,
CompilerDispatcher* dispatcher)
- : CancelableTask(isolate, task_manager), dispatcher_(dispatcher) {}
+ : CancelableTask(task_manager), dispatcher_(dispatcher) {}
CompilerDispatcher::AbortTask::~AbortTask() {}
@@ -183,7 +127,7 @@ class CompilerDispatcher::BackgroundTask : public CancelableTask {
CompilerDispatcher::BackgroundTask::BackgroundTask(
Isolate* isolate, CancelableTaskManager* task_manager,
CompilerDispatcher* dispatcher)
- : CancelableTask(isolate, task_manager), dispatcher_(dispatcher) {}
+ : CancelableTask(task_manager), dispatcher_(dispatcher) {}
CompilerDispatcher::BackgroundTask::~BackgroundTask() {}
@@ -209,7 +153,7 @@ class CompilerDispatcher::IdleTask : public CancelableIdleTask {
CompilerDispatcher::IdleTask::IdleTask(Isolate* isolate,
CancelableTaskManager* task_manager,
CompilerDispatcher* dispatcher)
- : CancelableIdleTask(isolate, task_manager), dispatcher_(dispatcher) {}
+ : CancelableIdleTask(task_manager), dispatcher_(dispatcher) {}
CompilerDispatcher::IdleTask::~IdleTask() {}
@@ -261,7 +205,7 @@ bool CompilerDispatcher::CanEnqueue() {
}
bool CompilerDispatcher::CanEnqueue(Handle<SharedFunctionInfo> function) {
- DCHECK_IMPLIES(IsEnabled(), FLAG_ignition);
+ DCHECK_IMPLIES(IsEnabled(), !FLAG_stress_fullcodegen);
if (!CanEnqueue()) return false;
@@ -277,7 +221,7 @@ bool CompilerDispatcher::CanEnqueue(Handle<SharedFunctionInfo> function) {
CompilerDispatcher::JobId CompilerDispatcher::Enqueue(
std::unique_ptr<CompilerDispatcherJob> job) {
- DCHECK(!IsFinished(job.get()));
+ DCHECK(!job->IsFinished());
bool added;
JobMap::const_iterator it;
std::tie(it, added) =
@@ -293,7 +237,7 @@ CompilerDispatcher::JobId CompilerDispatcher::Enqueue(
CompilerDispatcher::JobId CompilerDispatcher::EnqueueAndStep(
std::unique_ptr<CompilerDispatcherJob> job) {
- DCHECK(!IsFinished(job.get()));
+ DCHECK(!job->IsFinished());
bool added;
JobMap::const_iterator it;
std::tie(it, added) =
@@ -461,10 +405,10 @@ bool CompilerDispatcher::FinishNow(CompilerDispatcherJob* job) {
PrintF(" now\n");
}
WaitForJobIfRunningOnBackground(job);
- while (!IsFinished(job)) {
+ while (!job->IsFinished()) {
DoNextStepOnMainThread(isolate_, job, ExceptionHandling::kThrow);
}
- return job->status() != CompileJobStatus::kFailed;
+ return !job->IsFailed();
}
bool CompilerDispatcher::FinishNow(Handle<SharedFunctionInfo> function) {
@@ -489,7 +433,7 @@ void CompilerDispatcher::FinishAllNow() {
pending_background_jobs_.erase(job);
}
if (!is_running_in_background) {
- while (!IsFinished(job)) {
+ while (!job->IsFinished()) {
DoNextStepOnMainThread(isolate_, job, ExceptionHandling::kThrow);
}
it = RemoveIfFinished(it);
@@ -638,7 +582,7 @@ void CompilerDispatcher::ScheduleAbortTask() {
void CompilerDispatcher::ConsiderJobForBackgroundProcessing(
CompilerDispatcherJob* job) {
- if (!CanRunOnAnyThread(job)) return;
+ if (!job->CanStepNextOnAnyThread()) return;
{
base::LockGuard<base::Mutex> lock(&mutex_);
pending_background_jobs_.insert(job);
@@ -773,7 +717,7 @@ void CompilerDispatcher::DoIdleWork(double deadline_in_seconds) {
ConsiderJobForBackgroundProcessing(job->second.get());
}
++job;
- } else if (IsFinished(job->second.get())) {
+ } else if (job->second->IsFinished()) {
DCHECK(it == pending_background_jobs_.end());
lock.reset();
job = RemoveJob(job);
@@ -794,12 +738,12 @@ void CompilerDispatcher::DoIdleWork(double deadline_in_seconds) {
CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::RemoveIfFinished(
JobMap::const_iterator job) {
- if (!IsFinished(job->second.get())) {
+ if (!job->second->IsFinished()) {
return job;
}
if (trace_compiler_dispatcher_) {
- bool result = job->second->status() != CompileJobStatus::kFailed;
+ bool result = !job->second->IsFailed();
PrintF("CompilerDispatcher: finished working on ");
job->second->ShortPrint();
PrintF(": %s\n", result ? "success" : "failure");
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
index 2e375cc209..34ec88539a 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
@@ -22,6 +22,9 @@ void DisposeCompilationJob(CompilationJob* job, bool restore_function_code) {
if (restore_function_code) {
Handle<JSFunction> function = job->info()->closure();
function->ReplaceCode(function->shared()->code());
+ if (function->IsInOptimizationQueue()) {
+ function->ClearOptimizationMarker();
+ }
// TODO(mvstanton): We can't call ensureliterals here due to allocation,
// but we probably shouldn't call ReplaceCode either, as this
// sometimes runs on the worker thread!
@@ -196,7 +199,7 @@ void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
}
CompilationInfo* info = job->info();
Handle<JSFunction> function(*info->closure());
- if (function->IsOptimized()) {
+ if (function->HasOptimizedCode()) {
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Aborting compilation for ");
function->ShortPrint();
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
index 5a9486d177..5b4ad8a4d6 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
@@ -33,7 +33,7 @@ class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
blocked_jobs_(0),
ref_count_(0),
recompilation_delay_(FLAG_concurrent_recompilation_delay) {
- base::NoBarrier_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
+ base::Relaxed_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
input_queue_ = NewArray<CompilationJob*>(input_queue_capacity_);
}
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index c2d63fb041..f1961148fa 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -12,13 +12,14 @@
#include "src/ast/ast-numbering.h"
#include "src/ast/prettyprinter.h"
#include "src/ast/scopes.h"
+#include "src/base/optional.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/compilation-cache.h"
+#include "src/compilation-info.h"
#include "src/compiler-dispatcher/compiler-dispatcher.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/compiler/pipeline.h"
-#include "src/crankshaft/hydrogen.h"
#include "src/debug/debug.h"
#include "src/debug/liveedit.h"
#include "src/frames-inl.h"
@@ -30,6 +31,7 @@
#include "src/log-inl.h"
#include "src/messages.h"
#include "src/objects/map.h"
+#include "src/parsing/parse-info.h"
#include "src/parsing/parsing.h"
#include "src/parsing/rewriter.h"
#include "src/parsing/scanner-character-streams.h"
@@ -111,15 +113,15 @@ CompilationJob::Status CompilationJob::PrepareJob() {
}
CompilationJob::Status CompilationJob::ExecuteJob() {
- std::unique_ptr<DisallowHeapAllocation> no_allocation;
- std::unique_ptr<DisallowHandleAllocation> no_handles;
- std::unique_ptr<DisallowHandleDereference> no_deref;
- std::unique_ptr<DisallowCodeDependencyChange> no_dependency_change;
+ base::Optional<DisallowHeapAllocation> no_allocation;
+ base::Optional<DisallowHandleAllocation> no_handles;
+ base::Optional<DisallowHandleDereference> no_deref;
+ base::Optional<DisallowCodeDependencyChange> no_dependency_change;
if (can_execute_on_background_thread()) {
- no_allocation.reset(new DisallowHeapAllocation());
- no_handles.reset(new DisallowHandleAllocation());
- no_deref.reset(new DisallowHandleDereference());
- no_dependency_change.reset(new DisallowCodeDependencyChange());
+ no_allocation.emplace();
+ no_handles.emplace();
+ no_deref.emplace();
+ no_dependency_change.emplace();
executed_on_background_thread_ =
!ThreadId::Current().Equals(isolate_thread_id_);
} else {
@@ -202,73 +204,10 @@ void CompilationJob::RecordOptimizedCompilationStats() const {
PrintF("Compiled: %d functions with %d byte source size in %fms.\n",
compiled_functions, code_size, compilation_time);
}
- if (FLAG_hydrogen_stats) {
- isolate()->GetHStatistics()->IncrementSubtotals(time_taken_to_prepare_,
- time_taken_to_execute_,
- time_taken_to_finalize_);
- }
}
Isolate* CompilationJob::isolate() const { return info()->isolate(); }
-namespace {
-
-void AddWeakObjectToCodeDependency(Isolate* isolate, Handle<HeapObject> object,
- Handle<Code> code) {
- Handle<WeakCell> cell = Code::WeakCellFor(code);
- Heap* heap = isolate->heap();
- if (heap->InNewSpace(*object)) {
- heap->AddWeakNewSpaceObjectToCodeDependency(object, cell);
- } else {
- Handle<DependentCode> dep(heap->LookupWeakObjectToCodeDependency(object));
- dep =
- DependentCode::InsertWeakCode(dep, DependentCode::kWeakCodeGroup, cell);
- heap->AddWeakObjectToCodeDependency(object, dep);
- }
-}
-
-} // namespace
-
-void CompilationJob::RegisterWeakObjectsInOptimizedCode(Handle<Code> code) {
- // TODO(turbofan): Move this to pipeline.cc once Crankshaft dies.
- Isolate* const isolate = code->GetIsolate();
- DCHECK(code->is_optimized_code());
- MapHandles maps;
- std::vector<Handle<HeapObject>> objects;
- {
- DisallowHeapAllocation no_gc;
- int const mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::CELL);
- for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::CELL &&
- code->IsWeakObjectInOptimizedCode(it.rinfo()->target_cell())) {
- objects.push_back(handle(it.rinfo()->target_cell(), isolate));
- } else if (mode == RelocInfo::EMBEDDED_OBJECT &&
- code->IsWeakObjectInOptimizedCode(
- it.rinfo()->target_object())) {
- Handle<HeapObject> object(HeapObject::cast(it.rinfo()->target_object()),
- isolate);
- if (object->IsMap()) {
- maps.push_back(Handle<Map>::cast(object));
- } else {
- objects.push_back(object);
- }
- }
- }
- }
- for (Handle<Map> map : maps) {
- if (map->dependent_code()->IsEmpty(DependentCode::kWeakCodeGroup)) {
- isolate->heap()->AddRetainedMap(map);
- }
- Map::AddDependentCode(map, DependentCode::kWeakCodeGroup, code);
- }
- for (Handle<HeapObject> object : objects) {
- AddWeakObjectToCodeDependency(isolate, object, code);
- }
- code->set_can_have_weak_objects(true);
-}
-
// ----------------------------------------------------------------------------
// Local helper methods that make up the compilation pipeline.
@@ -327,52 +266,20 @@ void EnsureFeedbackMetadata(CompilationInfo* info) {
info->literal()->feedback_vector_spec()));
}
-bool UseTurboFan(Handle<SharedFunctionInfo> shared) {
- bool must_use_ignition_turbo = shared->must_use_ignition_turbo();
-
- // Check the enabling conditions for Turbofan.
- // 1. "use asm" code.
- bool is_turbofanable_asm = FLAG_turbo_asm && shared->asm_function();
-
- // 2. Fallback for features unsupported by Crankshaft.
- bool is_unsupported_by_crankshaft_but_turbofanable =
- must_use_ignition_turbo && strcmp(FLAG_turbo_filter, "~~") == 0;
-
- // 3. Explicitly enabled by the command-line filter.
- bool passes_turbo_filter = shared->PassesFilter(FLAG_turbo_filter);
-
- return is_turbofanable_asm || is_unsupported_by_crankshaft_but_turbofanable ||
- passes_turbo_filter;
-}
-
-bool ShouldUseIgnition(Handle<SharedFunctionInfo> shared,
- bool marked_as_debug) {
+bool ShouldUseFullCodegen(FunctionLiteral* literal) {
// Code which can't be supported by the old pipeline should use Ignition.
- if (shared->must_use_ignition_turbo()) return true;
+ if (literal->must_use_ignition()) return false;
// Resumable functions are not supported by {FullCodeGenerator}, suspended
// activations stored as {JSGeneratorObject} on the heap always assume the
// underlying code to be based on the bytecode array.
- DCHECK(!IsResumableFunction(shared->kind()));
-
- // Skip Ignition for asm.js functions.
- if (shared->asm_function()) return false;
-
- // Skip Ignition for asm wasm code.
- if (FLAG_validate_asm && shared->HasAsmWasmData()) {
- return false;
- }
+ DCHECK(!IsResumableFunction(literal->kind()));
- // Code destined for TurboFan should be compiled with Ignition first.
- if (UseTurboFan(shared)) return true;
+ // Use full-codegen for asm.js functions.
+ if (literal->scope()->asm_function()) return true;
- // Only use Ignition for any other function if FLAG_ignition is true.
- return FLAG_ignition;
-}
-
-bool ShouldUseIgnition(CompilationInfo* info) {
- DCHECK(info->has_shared_info());
- return ShouldUseIgnition(info->shared_info(), info->is_debug());
+ // If stressing full-codegen then use it for all functions it can support.
+ return FLAG_stress_fullcodegen;
}
bool UseAsmWasm(DeclarationScope* scope, Handle<SharedFunctionInfo> shared_info,
@@ -382,7 +289,7 @@ bool UseAsmWasm(DeclarationScope* scope, Handle<SharedFunctionInfo> shared_info,
// Modules that have validated successfully, but were subsequently broken by
// invalid module instantiation attempts are off limit forever.
- if (shared_info->is_asm_wasm_broken()) return false;
+ if (!shared_info.is_null() && shared_info->is_asm_wasm_broken()) return false;
// Compiling for debugging is not supported, fall back.
if (is_debug) return false;
@@ -394,12 +301,12 @@ bool UseAsmWasm(DeclarationScope* scope, Handle<SharedFunctionInfo> shared_info,
return scope->asm_module();
}
-bool UseCompilerDispatcher(Compiler::ConcurrencyMode inner_function_mode,
+bool UseCompilerDispatcher(ConcurrencyMode inner_function_mode,
CompilerDispatcher* dispatcher,
DeclarationScope* scope,
Handle<SharedFunctionInfo> shared_info,
bool is_debug, bool will_serialize) {
- return inner_function_mode == Compiler::CONCURRENT &&
+ return inner_function_mode == ConcurrencyMode::kConcurrent &&
dispatcher->IsEnabled() && !is_debug && !will_serialize &&
!UseAsmWasm(scope, shared_info, is_debug);
}
@@ -410,25 +317,37 @@ CompilationJob* GetUnoptimizedCompilationJob(CompilationInfo* info) {
DCHECK_NOT_NULL(info->literal());
DCHECK_NOT_NULL(info->scope());
- if (ShouldUseIgnition(info)) {
- return interpreter::Interpreter::NewCompilationJob(info);
- } else {
+ if (ShouldUseFullCodegen(info->literal())) {
return FullCodeGenerator::NewCompilationJob(info);
+ } else {
+ return interpreter::Interpreter::NewCompilationJob(info);
}
}
-void InstallSharedScopeInfo(CompilationInfo* info,
- Handle<SharedFunctionInfo> shared) {
+void InstallUnoptimizedCode(CompilationInfo* info) {
+ Handle<SharedFunctionInfo> shared = info->shared_info();
+ DCHECK_EQ(info->shared_info()->language_mode(),
+ info->literal()->language_mode());
+
+ // Ensure feedback metadata is installed.
+ EnsureFeedbackMetadata(info);
+
+ // Mark code to be executed once before being aged if necessary.
+ // TODO(6409): Remove when full-codegen dies.
+ DCHECK(!info->code().is_null());
+ if (info->parse_info()->literal()->should_be_used_once_hint()) {
+ info->code()->MarkToBeExecutedOnce(info->isolate());
+ }
+
+ // Update the shared function info with the scope info.
Handle<ScopeInfo> scope_info = info->scope()->scope_info();
shared->set_scope_info(*scope_info);
Scope* outer_scope = info->scope()->GetOuterScopeWithContext();
if (outer_scope) {
shared->set_outer_scope_info(*outer_scope->scope_info());
}
-}
-void InstallSharedCompilationResult(CompilationInfo* info,
- Handle<SharedFunctionInfo> shared) {
+ // Install compilation result on the shared function info.
// TODO(mstarzinger): Compiling for debug code might be used to reveal inner
// functions via {FindSharedFunctionInfoInScript}, in which case we end up
// regenerating existing bytecode. Fix this!
@@ -441,32 +360,26 @@ void InstallSharedCompilationResult(CompilationInfo* info,
DCHECK(!shared->HasBytecodeArray()); // Only compiled once.
shared->set_bytecode_array(*info->bytecode_array());
}
-}
-void InstallUnoptimizedCode(CompilationInfo* info) {
- Handle<SharedFunctionInfo> shared = info->shared_info();
-
- // Update the shared function info with the scope info.
- InstallSharedScopeInfo(info, shared);
-
- // Install compilation result on the shared function info
- InstallSharedCompilationResult(info, shared);
+ // Install coverage info on the shared function info.
+ if (info->has_coverage_info()) {
+ DCHECK(info->is_block_coverage_enabled());
+ info->isolate()->debug()->InstallCoverageInfo(info->shared_info(),
+ info->coverage_info());
+ }
}
-CompilationJob::Status FinalizeUnoptimizedCompilationJob(CompilationJob* job) {
- CompilationJob::Status status = job->FinalizeJob();
- if (status == CompilationJob::SUCCEEDED) {
- CompilationInfo* info = job->info();
- EnsureFeedbackMetadata(info);
- DCHECK(!info->code().is_null());
- if (info->parse_info()->literal()->should_be_used_once_hint()) {
- info->code()->MarkToBeExecutedOnce(info->isolate());
- }
- InstallUnoptimizedCode(info);
- RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, info);
- job->RecordUnoptimizedCompilationStats();
+void EnsureSharedFunctionInfosArrayOnScript(CompilationInfo* info) {
+ DCHECK(info->parse_info()->is_toplevel());
+ DCHECK(!info->script().is_null());
+ if (info->script()->shared_function_infos()->length() > 0) {
+ DCHECK_EQ(info->script()->shared_function_infos()->length(),
+ info->parse_info()->max_function_literal_id() + 1);
+ return;
}
- return status;
+ Handle<FixedArray> infos(info->isolate()->factory()->NewFixedArray(
+ info->parse_info()->max_function_literal_id() + 1));
+ info->script()->set_shared_function_infos(*infos);
}
void SetSharedFunctionFlagsFromLiteral(FunctionLiteral* literal,
@@ -482,9 +395,40 @@ void SetSharedFunctionFlagsFromLiteral(FunctionLiteral* literal,
if (literal->dont_optimize_reason() != kNoReason) {
shared_info->DisableOptimization(literal->dont_optimize_reason());
}
- if (literal->flags() & AstProperties::kMustUseIgnitionTurbo) {
- shared_info->set_must_use_ignition_turbo(true);
+}
+
+CompilationJob::Status FinalizeUnoptimizedCompilationJob(CompilationJob* job) {
+ CompilationInfo* info = job->info();
+ ParseInfo* parse_info = info->parse_info();
+ Isolate* isolate = info->isolate();
+
+ if (parse_info->is_toplevel()) {
+ // Allocate a shared function info and an array for shared function infos
+ // for inner functions.
+ EnsureSharedFunctionInfosArrayOnScript(info);
+ DCHECK_EQ(kNoSourcePosition, info->literal()->function_token_position());
+ if (!info->has_shared_info()) {
+ Handle<SharedFunctionInfo> shared =
+ isolate->factory()->NewSharedFunctionInfoForLiteral(info->literal(),
+ info->script());
+ shared->set_is_toplevel(true);
+ parse_info->set_shared_info(shared);
+ }
+ }
+ SetSharedFunctionFlagsFromLiteral(info->literal(), info->shared_info());
+
+ CompilationJob::Status status = job->FinalizeJob();
+ if (status == CompilationJob::SUCCEEDED) {
+ InstallUnoptimizedCode(info);
+ CodeEventListener::LogEventsAndTags log_tags =
+ parse_info->is_toplevel() ? parse_info->is_eval()
+ ? CodeEventListener::EVAL_TAG
+ : CodeEventListener::SCRIPT_TAG
+ : CodeEventListener::FUNCTION_TAG;
+ RecordFunctionCompilation(log_tags, info);
+ job->RecordUnoptimizedCompilationStats();
}
+ return status;
}
bool Renumber(ParseInfo* parse_info,
@@ -510,19 +454,15 @@ bool Renumber(ParseInfo* parse_info,
collect_type_profile)) {
return false;
}
- if (!parse_info->shared_info().is_null()) {
- SetSharedFunctionFlagsFromLiteral(parse_info->literal(),
- parse_info->shared_info());
- }
return true;
}
bool GenerateUnoptimizedCode(CompilationInfo* info) {
if (UseAsmWasm(info->scope(), info->shared_info(), info->is_debug())) {
- EnsureFeedbackMetadata(info);
MaybeHandle<FixedArray> wasm_data;
wasm_data = AsmJs::CompileAsmViaWasm(info);
if (!wasm_data.is_null()) {
+ SetSharedFunctionFlagsFromLiteral(info->literal(), info->shared_info());
info->shared_info()->set_asm_wasm_data(*wasm_data.ToHandleChecked());
info->SetCode(info->isolate()->builtins()->InstantiateAsmJs());
InstallUnoptimizedCode(info);
@@ -542,8 +482,8 @@ bool GenerateUnoptimizedCode(CompilationInfo* info) {
bool CompileUnoptimizedInnerFunctions(
Compiler::EagerInnerFunctionLiterals* literals,
- Compiler::ConcurrencyMode inner_function_mode,
- std::shared_ptr<Zone> parse_zone, CompilationInfo* outer_info) {
+ ConcurrencyMode inner_function_mode, std::shared_ptr<Zone> parse_zone,
+ CompilationInfo* outer_info) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompileUnoptimizedInnerFunctions");
Isolate* isolate = outer_info->isolate();
@@ -559,10 +499,6 @@ bool CompileUnoptimizedInnerFunctions(
Compiler::GetSharedFunctionInfo(literal, script, outer_info);
if (shared->is_compiled()) continue;
- // The {literal} has already been numbered because AstNumbering decends into
- // eagerly compiled function literals.
- SetSharedFunctionFlagsFromLiteral(literal, shared);
-
// Try to enqueue the eager function on the compiler dispatcher.
CompilerDispatcher* dispatcher = isolate->compiler_dispatcher();
if (UseCompilerDispatcher(inner_function_mode, dispatcher, literal->scope(),
@@ -579,7 +515,7 @@ bool CompileUnoptimizedInnerFunctions(
ParseInfo parse_info(script);
CompilationInfo info(parse_info.zone(), &parse_info, isolate,
Handle<JSFunction>::null());
-
+ parse_info.set_toplevel(false);
parse_info.set_literal(literal);
parse_info.set_shared_info(shared);
parse_info.set_function_literal_id(shared->function_literal_id());
@@ -587,6 +523,8 @@ bool CompileUnoptimizedInnerFunctions(
parse_info.set_ast_value_factory(
outer_info->parse_info()->ast_value_factory());
parse_info.set_ast_value_factory_owned(false);
+ parse_info.set_source_range_map(
+ outer_info->parse_info()->source_range_map());
if (will_serialize) info.PrepareForSerializing();
if (is_debug) info.MarkAsDebug();
@@ -600,25 +538,25 @@ bool CompileUnoptimizedInnerFunctions(
return true;
}
-bool InnerFunctionIsAsmModule(
+bool InnerFunctionShouldUseFullCodegen(
ThreadedList<ThreadedListZoneEntry<FunctionLiteral*>>* literals) {
for (auto it : *literals) {
FunctionLiteral* literal = it->value();
- if (literal->scope()->IsAsmModule()) return true;
+ if (ShouldUseFullCodegen(literal)) return true;
}
return false;
}
bool CompileUnoptimizedCode(CompilationInfo* info,
- Compiler::ConcurrencyMode inner_function_mode) {
+ ConcurrencyMode inner_function_mode) {
Isolate* isolate = info->isolate();
DCHECK(AllowCompilation::IsAllowed(isolate));
Compiler::EagerInnerFunctionLiterals inner_literals;
{
- std::unique_ptr<CompilationHandleScope> compilation_handle_scope;
- if (inner_function_mode == Compiler::CONCURRENT) {
- compilation_handle_scope.reset(new CompilationHandleScope(info));
+ base::Optional<CompilationHandleScope> compilation_handle_scope;
+ if (inner_function_mode == ConcurrencyMode::kConcurrent) {
+ compilation_handle_scope.emplace(info);
}
if (!Compiler::Analyze(info, &inner_literals)) {
if (!isolate->has_pending_exception()) isolate->StackOverflow();
@@ -626,16 +564,17 @@ bool CompileUnoptimizedCode(CompilationInfo* info,
}
}
- // Disable concurrent inner compilation for asm-wasm code.
- // TODO(rmcilroy,bradnelson): Remove this AsmWasm check once the asm-wasm
- // builder doesn't do parsing when visiting function declarations.
- if (info->scope()->IsAsmModule() ||
- InnerFunctionIsAsmModule(&inner_literals)) {
- inner_function_mode = Compiler::NOT_CONCURRENT;
+ if (info->parse_info()->is_toplevel() &&
+ (ShouldUseFullCodegen(info->literal()) ||
+ InnerFunctionShouldUseFullCodegen(&inner_literals))) {
+ // Full-codegen needs to access SFI when compiling, so allocate the array
+ // now.
+ EnsureSharedFunctionInfosArrayOnScript(info);
+ inner_function_mode = ConcurrencyMode::kNotConcurrent;
}
std::shared_ptr<Zone> parse_zone;
- if (inner_function_mode == Compiler::CONCURRENT) {
+ if (inner_function_mode == ConcurrencyMode::kConcurrent) {
// Seal the parse zone so that it can be shared by parallel inner function
// compilation jobs.
DCHECK_NE(info->parse_info()->zone(), info->zone());
@@ -643,9 +582,9 @@ bool CompileUnoptimizedCode(CompilationInfo* info,
parse_zone->Seal();
}
- if (!CompileUnoptimizedInnerFunctions(&inner_literals, inner_function_mode,
- parse_zone, info) ||
- !GenerateUnoptimizedCode(info)) {
+ if (!GenerateUnoptimizedCode(info) ||
+ !CompileUnoptimizedInnerFunctions(&inner_literals, inner_function_mode,
+ parse_zone, info)) {
if (!isolate->has_pending_exception()) isolate->StackOverflow();
return false;
}
@@ -653,26 +592,8 @@ bool CompileUnoptimizedCode(CompilationInfo* info,
return true;
}
-void EnsureSharedFunctionInfosArrayOnScript(ParseInfo* info, Isolate* isolate) {
- DCHECK(info->is_toplevel());
- DCHECK(!info->script().is_null());
- if (info->script()->shared_function_infos()->length() > 0) {
- DCHECK_EQ(info->script()->shared_function_infos()->length(),
- info->max_function_literal_id() + 1);
- return;
- }
- Handle<FixedArray> infos(
- isolate->factory()->NewFixedArray(info->max_function_literal_id() + 1));
- info->script()->set_shared_function_infos(*infos);
-}
-
-void EnsureSharedFunctionInfosArrayOnScript(CompilationInfo* info) {
- return EnsureSharedFunctionInfosArrayOnScript(info->parse_info(),
- info->isolate());
-}
-
MUST_USE_RESULT MaybeHandle<Code> GetUnoptimizedCode(
- CompilationInfo* info, Compiler::ConcurrencyMode inner_function_mode) {
+ CompilationInfo* info, ConcurrencyMode inner_function_mode) {
RuntimeCallTimerScope runtimeTimer(
info->isolate(), &RuntimeCallStats::CompileGetUnoptimizedCode);
VMState<COMPILER> state(info->isolate());
@@ -680,24 +601,19 @@ MUST_USE_RESULT MaybeHandle<Code> GetUnoptimizedCode(
// Parse and update ParseInfo with the results.
{
- if (!parsing::ParseAny(info->parse_info(), info->isolate(),
- inner_function_mode != Compiler::CONCURRENT)) {
+ if (!parsing::ParseAny(
+ info->parse_info(), info->isolate(),
+ inner_function_mode != ConcurrencyMode::kConcurrent)) {
return MaybeHandle<Code>();
}
- if (inner_function_mode == Compiler::CONCURRENT) {
+ if (inner_function_mode == ConcurrencyMode::kConcurrent) {
ParseHandleScope parse_handles(info->parse_info(), info->isolate());
info->parse_info()->ReopenHandlesInNewHandleScope();
info->parse_info()->ast_value_factory()->Internalize(info->isolate());
}
}
- if (info->parse_info()->is_toplevel()) {
- EnsureSharedFunctionInfosArrayOnScript(info);
- }
- DCHECK_EQ(info->shared_info()->language_mode(),
- info->literal()->language_mode());
-
// Compile either unoptimized code or bytecode for the interpreter.
if (!CompileUnoptimizedCode(info, inner_function_mode)) {
return MaybeHandle<Code>();
@@ -716,34 +632,45 @@ MUST_USE_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeCache(
&RuntimeCallStats::CompileGetFromOptimizedCodeMap);
Handle<SharedFunctionInfo> shared(function->shared());
DisallowHeapAllocation no_gc;
- Code* code = nullptr;
if (osr_ast_id.IsNone()) {
if (function->feedback_vector_cell()->value()->IsFeedbackVector()) {
FeedbackVector* feedback_vector = function->feedback_vector();
feedback_vector->EvictOptimizedCodeMarkedForDeoptimization(
function->shared(), "GetCodeFromOptimizedCodeCache");
- code = feedback_vector->optimized_code();
+ Code* code = feedback_vector->optimized_code();
+
+ if (code != nullptr) {
+ // Caching of optimized code enabled and optimized code found.
+ DCHECK(!code->marked_for_deoptimization());
+ DCHECK(function->shared()->is_compiled());
+ return Handle<Code>(code);
+ }
}
- } else {
- code = function->context()->native_context()->SearchOSROptimizedCodeCache(
- function->shared(), osr_ast_id);
- }
- if (code != nullptr) {
- // Caching of optimized code enabled and optimized code found.
- DCHECK(!code->marked_for_deoptimization());
- DCHECK(function->shared()->is_compiled());
- return Handle<Code>(code);
}
return MaybeHandle<Code>();
}
+void ClearOptimizedCodeCache(CompilationInfo* info) {
+ Handle<JSFunction> function = info->closure();
+ if (info->osr_ast_id().IsNone()) {
+ Handle<FeedbackVector> vector =
+ handle(function->feedback_vector(), function->GetIsolate());
+ vector->ClearOptimizedCode();
+ }
+}
+
void InsertCodeIntoOptimizedCodeCache(CompilationInfo* info) {
Handle<Code> code = info->code();
if (code->kind() != Code::OPTIMIZED_FUNCTION) return; // Nothing to do.
// Function context specialization folds-in the function context,
// so no sharing can occur.
- if (info->is_function_context_specializing()) return;
+ if (info->is_function_context_specializing()) {
+ // Native context specialized code is not shared, so make sure the optimized
+ // code cache is clear.
+ ClearOptimizedCodeCache(info);
+ return;
+ }
// Frame specialization implies function context specialization.
DCHECK(!info->is_frame_specializing());
@@ -755,9 +682,6 @@ void InsertCodeIntoOptimizedCodeCache(CompilationInfo* info) {
Handle<FeedbackVector> vector =
handle(function->feedback_vector(), function->GetIsolate());
FeedbackVector::SetOptimizedCode(vector, code);
- } else {
- Context::AddToOSROptimizedCodeCache(native_context, shared, code,
- info->osr_ast_id());
}
}
@@ -802,16 +726,6 @@ bool GetOptimizedCodeLater(CompilationJob* job) {
CompilationInfo* info = job->info();
Isolate* isolate = info->isolate();
- if (FLAG_mark_optimizing_shared_functions &&
- info->closure()->shared()->has_concurrent_optimization_job()) {
- if (FLAG_trace_concurrent_recompilation) {
- PrintF(" ** Compilation job already running for ");
- info->shared_info()->ShortPrint();
- PrintF(".\n");
- }
- return false;
- }
-
if (!isolate->optimizing_compile_dispatcher()->IsQueueAvailable()) {
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Compilation queue full, will retry optimizing ");
@@ -846,7 +760,6 @@ bool GetOptimizedCodeLater(CompilationJob* job) {
if (job->PrepareJob() != CompilationJob::SUCCEEDED) return false;
isolate->optimizing_compile_dispatcher()->QueueForOptimization(job);
- info->closure()->shared()->set_has_concurrent_optimization_job(true);
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Queued ");
@@ -857,16 +770,23 @@ bool GetOptimizedCodeLater(CompilationJob* job) {
}
MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
- Compiler::ConcurrencyMode mode,
+ ConcurrencyMode mode,
BailoutId osr_ast_id = BailoutId::None(),
JavaScriptFrame* osr_frame = nullptr) {
Isolate* isolate = function->GetIsolate();
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
bool ignition_osr = osr_frame && osr_frame->is_interpreted();
+ USE(ignition_osr);
DCHECK_IMPLIES(ignition_osr, !osr_ast_id.IsNone());
DCHECK_IMPLIES(ignition_osr, FLAG_ignition_osr);
+ // Make sure we clear the optimization marker on the function so that we
+ // don't try to re-optimize.
+ if (function->HasOptimizationMarker()) {
+ function->ClearOptimizationMarker();
+ }
+
Handle<Code> cached_code;
if (GetCodeFromOptimizedCodeCache(function, osr_ast_id)
.ToHandle(&cached_code)) {
@@ -883,41 +803,42 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
// Reset profiler ticks, function is no longer considered hot.
DCHECK(shared->is_compiled());
- if (shared->HasBaselineCode()) {
- shared->code()->set_profiler_ticks(0);
- } else if (shared->HasBytecodeArray()) {
- shared->set_profiler_ticks(0);
- }
+ shared->set_profiler_ticks(0);
VMState<COMPILER> state(isolate);
DCHECK(!isolate->has_pending_exception());
PostponeInterruptsScope postpone(isolate);
- bool use_turbofan = UseTurboFan(shared) || ignition_osr;
bool has_script = shared->script()->IsScript();
- // BUG(5946): This DCHECK is necessary to make certain that we won't tolerate
- // the lack of a script without bytecode.
- DCHECK_IMPLIES(!has_script, ShouldUseIgnition(shared, false));
+ // BUG(5946): This DCHECK is necessary to make certain that we won't
+ // tolerate the lack of a script without bytecode.
+ DCHECK_IMPLIES(!has_script, shared->HasBytecodeArray());
std::unique_ptr<CompilationJob> job(
- use_turbofan ? compiler::Pipeline::NewCompilationJob(function, has_script)
- : new HCompilationJob(function));
+ compiler::Pipeline::NewCompilationJob(function, has_script));
CompilationInfo* info = job->info();
ParseInfo* parse_info = info->parse_info();
info->SetOptimizingForOsr(osr_ast_id, osr_frame);
- // Do not use Crankshaft/TurboFan if we need to be able to set break points.
- if (info->shared_info()->HasDebugInfo()) {
+ // Do not use TurboFan if we need to be able to set break points.
+ if (info->shared_info()->HasBreakInfo()) {
info->AbortOptimization(kFunctionBeingDebugged);
return MaybeHandle<Code>();
}
- // Do not use Crankshaft/TurboFan when %NeverOptimizeFunction was applied.
+ // Do not use TurboFan when %NeverOptimizeFunction was applied.
if (shared->optimization_disabled() &&
shared->disable_optimization_reason() == kOptimizationDisabledForTest) {
info->AbortOptimization(kOptimizationDisabledForTest);
return MaybeHandle<Code>();
}
+ // Do not use TurboFan if optimization is disabled or function doesn't pass
+ // turbo_filter.
+ if (!FLAG_opt || !shared->PassesFilter(FLAG_turbo_filter)) {
+ info->AbortOptimization(kOptimizationDisabled);
+ return MaybeHandle<Code>();
+ }
+
// Limit the number of times we try to optimize functions.
const int kMaxDeoptCount =
FLAG_deopt_every_n_times == 0 ? FLAG_max_deopt_count : 1000;
@@ -931,8 +852,7 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.OptimizeCode");
// TurboFan can optimize directly from existing bytecode.
- if (use_turbofan && ShouldUseIgnition(info)) {
- DCHECK(shared->HasBytecodeArray());
+ if (shared->HasBytecodeArray()) {
info->MarkAsOptimizeFromBytecode();
}
@@ -949,23 +869,29 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
// In case of concurrent recompilation, all handles below this point will be
// allocated in a deferred handle scope that is detached and handed off to
// the background thread when we return.
- std::unique_ptr<CompilationHandleScope> compilation;
- if (mode == Compiler::CONCURRENT) {
- compilation.reset(new CompilationHandleScope(info));
+ base::Optional<CompilationHandleScope> compilation;
+ if (mode == ConcurrencyMode::kConcurrent) {
+ compilation.emplace(info);
}
- // In case of TurboFan, all handles below will be canonicalized.
- std::unique_ptr<CanonicalHandleScope> canonical;
- if (use_turbofan) canonical.reset(new CanonicalHandleScope(info->isolate()));
+ // All handles below will be canonicalized.
+ CanonicalHandleScope canonical(info->isolate());
// Reopen handles in the new CompilationHandleScope.
info->ReopenHandlesInNewHandleScope();
parse_info->ReopenHandlesInNewHandleScope();
- if (mode == Compiler::CONCURRENT) {
+ if (mode == ConcurrencyMode::kConcurrent) {
if (GetOptimizedCodeLater(job.get())) {
job.release(); // The background recompile job owns this now.
- return isolate->builtins()->InOptimizationQueue();
+
+ // Set the optimization marker and return a code object which checks it.
+ function->SetOptimizationMarker(OptimizationMarker::kInOptimizationQueue);
+ if (function->IsInterpreted()) {
+ return isolate->builtins()->InterpreterEntryTrampoline();
+ } else {
+ return isolate->builtins()->CheckOptimizationMarker();
+ }
}
} else {
if (GetOptimizedCodeNow(job.get())) return info->code();
@@ -975,13 +901,6 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
return MaybeHandle<Code>();
}
-MaybeHandle<Code> GetOptimizedCodeMaybeLater(Handle<JSFunction> function) {
- Isolate* isolate = function->GetIsolate();
- return GetOptimizedCode(function, isolate->concurrent_recompilation_enabled()
- ? Compiler::CONCURRENT
- : Compiler::NOT_CONCURRENT);
-}
-
CompilationJob::Status FinalizeOptimizedCompilationJob(CompilationJob* job) {
CompilationInfo* info = job->info();
Isolate* isolate = info->isolate();
@@ -995,18 +914,9 @@ CompilationJob::Status FinalizeOptimizedCompilationJob(CompilationJob* job) {
Handle<SharedFunctionInfo> shared = info->shared_info();
// Reset profiler ticks, function is no longer considered hot.
- if (shared->HasBaselineCode()) {
- shared->code()->set_profiler_ticks(0);
- } else if (shared->HasBytecodeArray()) {
- shared->set_profiler_ticks(0);
- }
-
- shared->set_has_concurrent_optimization_job(false);
+ shared->set_profiler_ticks(0);
- // Shared function no longer needs to be tiered up.
- shared->set_marked_for_tier_up(false);
-
- DCHECK(!shared->HasDebugInfo());
+ DCHECK(!shared->HasBreakInfo());
// 1) Optimization on the concurrent thread may have failed.
// 2) The function may have already been optimized by OSR. Simply continue.
@@ -1039,6 +949,10 @@ CompilationJob::Status FinalizeOptimizedCompilationJob(CompilationJob* job) {
PrintF(" because: %s]\n", GetBailoutReason(info->bailout_reason()));
}
info->closure()->ReplaceCode(shared->code());
+ // Clear the InOptimizationQueue marker, if it exists.
+ if (info->closure()->IsInOptimizationQueue()) {
+ info->closure()->ClearOptimizationMarker();
+ }
return CompilationJob::FAILED;
}
@@ -1053,8 +967,11 @@ MaybeHandle<Code> GetLazyCode(Handle<JSFunction> function) {
AggregatedHistogramTimerScope timer(isolate->counters()->compile_lazy());
if (function->shared()->is_compiled()) {
- // Function has already been compiled, get the optimized code if possible,
- // otherwise return baseline code.
+ // Function has already been compiled. Normally we'd expect the CompileLazy
+ // builtin to catch cases where we already have compiled code or optimized
+ // code, but there are paths that call the CompileLazy runtime function
+ // directly (e.g. failed asm.js compilations), so we include a check for
+ // those.
Handle<Code> cached_code;
if (GetCodeFromOptimizedCodeCache(function, BailoutId::None())
.ToHandle(&cached_code)) {
@@ -1063,26 +980,10 @@ MaybeHandle<Code> GetLazyCode(Handle<JSFunction> function) {
function->ShortPrint();
PrintF(" during unoptimized compile]\n");
}
- DCHECK(function->shared()->is_compiled());
return cached_code;
}
-
- if (function->shared()->marked_for_tier_up()) {
- DCHECK(FLAG_mark_shared_functions_for_tier_up);
-
- function->shared()->set_marked_for_tier_up(false);
-
- if (FLAG_trace_opt) {
- PrintF("[optimizing method ");
- function->ShortPrint();
- PrintF(" eagerly (shared function marked for tier up)]\n");
- }
-
- Handle<Code> code;
- if (GetOptimizedCodeMaybeLater(function).ToHandle(&code)) {
- return code;
- }
- }
+ // TODO(leszeks): Either handle optimization markers here, or DCHECK that
+ // there aren't any.
return Handle<Code>(function->shared()->code());
} else {
@@ -1094,22 +995,30 @@ MaybeHandle<Code> GetLazyCode(Handle<JSFunction> function) {
CompilationInfo info(&compile_zone, &parse_info, isolate, function);
if (FLAG_experimental_preparser_scope_analysis) {
Handle<SharedFunctionInfo> shared(function->shared());
- Handle<Script> script(Script::cast(function->shared()->script()));
- if (script->HasPreparsedScopeData()) {
- parse_info.preparsed_scope_data()->Deserialize(
- script->preparsed_scope_data());
+ if (shared->HasPreParsedScopeData()) {
+ Handle<PreParsedScopeData> data(
+ PreParsedScopeData::cast(shared->preparsed_scope_data()));
+ parse_info.consumed_preparsed_scope_data()->SetData(data);
+ // After we've compiled the function, we don't need data about its
+ // skippable functions any more.
+ shared->set_preparsed_scope_data(isolate->heap()->null_value());
}
}
- Compiler::ConcurrencyMode inner_function_mode =
- FLAG_compiler_dispatcher_eager_inner ? Compiler::CONCURRENT
- : Compiler::NOT_CONCURRENT;
+ ConcurrencyMode inner_function_mode = FLAG_compiler_dispatcher_eager_inner
+ ? ConcurrencyMode::kConcurrent
+ : ConcurrencyMode::kNotConcurrent;
Handle<Code> result;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, result, GetUnoptimizedCode(&info, inner_function_mode), Code);
if (FLAG_always_opt && !info.shared_info()->HasAsmWasmData()) {
+ if (FLAG_trace_opt) {
+ PrintF("[optimizing ");
+ function->ShortPrint();
+ PrintF(" because --always-opt]\n");
+ }
Handle<Code> opt_code;
- if (GetOptimizedCode(function, Compiler::NOT_CONCURRENT)
+ if (GetOptimizedCode(function, ConcurrencyMode::kNotConcurrent)
.ToHandle(&opt_code)) {
result = opt_code;
}
@@ -1119,7 +1028,6 @@ MaybeHandle<Code> GetLazyCode(Handle<JSFunction> function) {
}
}
-
Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
Isolate* isolate = info->isolate();
TimerEventScope<TimerEventCompileCode> timer(isolate);
@@ -1127,9 +1035,9 @@ Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
PostponeInterruptsScope postpone(isolate);
DCHECK(!isolate->native_context().is_null());
ParseInfo* parse_info = info->parse_info();
- Compiler::ConcurrencyMode inner_function_mode =
- FLAG_compiler_dispatcher_eager_inner ? Compiler::CONCURRENT
- : Compiler::NOT_CONCURRENT;
+ ConcurrencyMode inner_function_mode = FLAG_compiler_dispatcher_eager_inner
+ ? ConcurrencyMode::kConcurrent
+ : ConcurrencyMode::kNotConcurrent;
RuntimeCallTimerScope runtimeTimer(
isolate, parse_info->is_eval() ? &RuntimeCallStats::CompileEval
@@ -1141,20 +1049,19 @@ Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
{ VMState<COMPILER> state(info->isolate());
if (parse_info->literal() == nullptr) {
- if (!parsing::ParseProgram(parse_info, info->isolate(),
- inner_function_mode != Compiler::CONCURRENT)) {
+ if (!parsing::ParseProgram(
+ parse_info, info->isolate(),
+ inner_function_mode != ConcurrencyMode::kConcurrent)) {
return Handle<SharedFunctionInfo>::null();
}
- if (inner_function_mode == Compiler::CONCURRENT) {
+ if (inner_function_mode == ConcurrencyMode::kConcurrent) {
ParseHandleScope parse_handles(parse_info, info->isolate());
parse_info->ReopenHandlesInNewHandleScope();
parse_info->ast_value_factory()->Internalize(info->isolate());
}
}
- EnsureSharedFunctionInfosArrayOnScript(info);
-
// Measure how long it takes to do the compilation; only take the
// rest of the function into account to avoid overlap with the
// parsing statistics.
@@ -1165,42 +1072,17 @@ Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
parse_info->is_eval() ? "V8.CompileEval" : "V8.Compile");
- // Allocate a shared function info object.
- FunctionLiteral* lit = parse_info->literal();
- DCHECK_EQ(kNoSourcePosition, lit->function_token_position());
- result = isolate->factory()->NewSharedFunctionInfoForLiteral(lit, script);
- result->set_is_toplevel(true);
- parse_info->set_shared_info(result);
- parse_info->set_function_literal_id(result->function_literal_id());
-
// Compile the code.
if (!CompileUnoptimizedCode(info, inner_function_mode)) {
return Handle<SharedFunctionInfo>::null();
}
- Handle<String> script_name =
- script->name()->IsString()
- ? Handle<String>(String::cast(script->name()))
- : isolate->factory()->empty_string();
- CodeEventListener::LogEventsAndTags log_tag =
- parse_info->is_eval()
- ? CodeEventListener::EVAL_TAG
- : Logger::ToNativeByScript(CodeEventListener::SCRIPT_TAG, *script);
-
- PROFILE(isolate, CodeCreateEvent(log_tag, result->abstract_code(), *result,
- *script_name));
-
if (!script.is_null()) {
script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
- if (FLAG_experimental_preparser_scope_analysis) {
- Handle<PodArray<uint32_t>> data =
- parse_info->preparsed_scope_data()->Serialize(isolate);
- script->set_preparsed_scope_data(*data);
- }
}
}
- return result;
+ return info->shared_info();
}
} // namespace
@@ -1229,9 +1111,6 @@ bool Compiler::Analyze(CompilationInfo* info,
bool Compiler::ParseAndAnalyze(ParseInfo* info, Isolate* isolate) {
if (!parsing::ParseAny(info, isolate)) return false;
- if (info->is_toplevel()) {
- EnsureSharedFunctionInfosArrayOnScript(info, isolate);
- }
if (!Compiler::Analyze(info, isolate)) return false;
DCHECK_NOT_NULL(info->literal());
DCHECK_NOT_NULL(info->scope());
@@ -1303,6 +1182,12 @@ bool Compiler::CompileOptimized(Handle<JSFunction> function,
DCHECK(!isolate->has_pending_exception());
DCHECK(function->shared()->is_compiled());
DCHECK(function->is_compiled());
+ DCHECK_IMPLIES(function->HasOptimizationMarker(),
+ function->IsInOptimizationQueue());
+ DCHECK_IMPLIES(function->HasOptimizationMarker(),
+ function->ChecksOptimizationMarker());
+ DCHECK_IMPLIES(function->IsInOptimizationQueue(),
+ mode == ConcurrencyMode::kConcurrent);
return true;
}
@@ -1315,7 +1200,7 @@ bool Compiler::CompileDebugCode(Handle<SharedFunctionInfo> shared) {
CompilationInfo info(parse_info.zone(), &parse_info, isolate,
Handle<JSFunction>::null());
info.MarkAsDebug();
- if (GetUnoptimizedCode(&info, Compiler::NOT_CONCURRENT).is_null()) {
+ if (GetUnoptimizedCode(&info, ConcurrencyMode::kNotConcurrent).is_null()) {
isolate->clear_pending_exception();
return false;
}
@@ -1368,75 +1253,16 @@ bool Compiler::EnsureBytecode(CompilationInfo* info) {
CompilerDispatcher* dispatcher = info->isolate()->compiler_dispatcher();
if (dispatcher->IsEnqueued(info->shared_info())) {
if (!dispatcher->FinishNow(info->shared_info())) return false;
- } else if (GetUnoptimizedCode(info, Compiler::NOT_CONCURRENT).is_null()) {
+ } else if (GetUnoptimizedCode(info, ConcurrencyMode::kNotConcurrent)
+ .is_null()) {
return false;
}
}
DCHECK(info->shared_info()->is_compiled());
-
if (info->shared_info()->HasAsmWasmData()) return false;
-
- DCHECK_EQ(ShouldUseIgnition(info), info->shared_info()->HasBytecodeArray());
return info->shared_info()->HasBytecodeArray();
}
-// TODO(turbofan): In the future, unoptimized code with deopt support could
-// be generated lazily once deopt is triggered.
-bool Compiler::EnsureDeoptimizationSupport(CompilationInfo* info) {
- DCHECK_NOT_NULL(info->literal());
- DCHECK_NOT_NULL(info->scope());
- Handle<SharedFunctionInfo> shared = info->shared_info();
-
- CompilerDispatcher* dispatcher = info->isolate()->compiler_dispatcher();
- if (dispatcher->IsEnqueued(shared)) {
- if (!dispatcher->FinishNow(shared)) return false;
- }
-
- if (!shared->has_deoptimization_support()) {
- // Don't generate full-codegen code for functions which should use Ignition.
- if (ShouldUseIgnition(info)) return false;
-
- DCHECK(!shared->must_use_ignition_turbo());
- DCHECK(!IsResumableFunction(shared->kind()));
-
- Zone compile_zone(info->isolate()->allocator(), ZONE_NAME);
- CompilationInfo unoptimized(&compile_zone, info->parse_info(),
- info->isolate(), info->closure());
- unoptimized.EnableDeoptimizationSupport();
-
- // When we call PrepareForSerializing below, we will change the shared
- // ParseInfo. Make sure to reset it.
- bool old_will_serialize_value = info->parse_info()->will_serialize();
-
- // If the current code has reloc info for serialization, also include
- // reloc info for serialization for the new code, so that deopt support
- // can be added without losing IC state.
- if (shared->code()->kind() == Code::FUNCTION &&
- shared->code()->has_reloc_info_for_serialization()) {
- unoptimized.PrepareForSerializing();
- }
- EnsureFeedbackMetadata(&unoptimized);
-
- if (!FullCodeGenerator::MakeCode(&unoptimized)) return false;
-
- info->parse_info()->set_will_serialize(old_will_serialize_value);
-
- // The scope info might not have been set if a lazily compiled
- // function is inlined before being called for the first time.
- if (shared->scope_info() == ScopeInfo::Empty(info->isolate())) {
- InstallSharedScopeInfo(info, shared);
- }
-
- // Install compilation result on the shared function info
- shared->EnableDeoptimizationSupport(*unoptimized.code());
-
- // The existing unoptimized code was replaced with the new one.
- RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG,
- &unoptimized);
- }
- return true;
-}
-
MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
Handle<String> source, Handle<SharedFunctionInfo> outer_info,
Handle<Context> context, LanguageMode language_mode,
@@ -1549,8 +1375,20 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
namespace {
-bool CodeGenerationFromStringsAllowed(Isolate* isolate,
- Handle<Context> context) {
+bool ContainsAsmModule(Handle<Script> script) {
+ DisallowHeapAllocation no_gc;
+ SharedFunctionInfo::ScriptIterator iter(script);
+ while (SharedFunctionInfo* info = iter.Next()) {
+ if (info->HasAsmWasmData()) return true;
+ }
+ return false;
+}
+
+} // namespace
+
+bool Compiler::CodeGenerationFromStringsAllowed(Isolate* isolate,
+ Handle<Context> context,
+ Handle<String> source) {
DCHECK(context->allow_code_gen_from_strings()->IsFalse(isolate));
// Check with callback if set.
AllowCodeGenerationFromStringsCallback callback =
@@ -1561,21 +1399,10 @@ bool CodeGenerationFromStringsAllowed(Isolate* isolate,
} else {
// Callback set. Let it decide if code generation is allowed.
VMState<EXTERNAL> state(isolate);
- return callback(v8::Utils::ToLocal(context));
- }
-}
-
-bool ContainsAsmModule(Handle<Script> script) {
- DisallowHeapAllocation no_gc;
- SharedFunctionInfo::ScriptIterator iter(script);
- while (SharedFunctionInfo* info = iter.Next()) {
- if (info->HasAsmWasmData()) return true;
+ return callback(v8::Utils::ToLocal(context), v8::Utils::ToLocal(source));
}
- return false;
}
-} // namespace
-
MaybeHandle<JSFunction> Compiler::GetFunctionFromString(
Handle<Context> context, Handle<String> source,
ParseRestriction restriction, int parameters_end_pos) {
@@ -1585,7 +1412,7 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromString(
// Check if native context allows code generation from
// strings. Throw an exception if it doesn't.
if (native_context->allow_code_gen_from_strings()->IsFalse(isolate) &&
- !CodeGenerationFromStringsAllowed(isolate, native_context)) {
+ !CodeGenerationFromStringsAllowed(isolate, native_context, source)) {
Handle<Object> error_message =
native_context->ErrorMessageForCodeGenerationFromStrings();
THROW_NEW_ERROR(isolate, NewEvalError(MessageTemplate::kCodeGenFromStrings,
@@ -1852,7 +1679,8 @@ MaybeHandle<Code> Compiler::GetOptimizedCodeForOSR(Handle<JSFunction> function,
JavaScriptFrame* osr_frame) {
DCHECK(!osr_ast_id.IsNone());
DCHECK_NOT_NULL(osr_frame);
- return GetOptimizedCode(function, NOT_CONCURRENT, osr_ast_id, osr_frame);
+ return GetOptimizedCode(function, ConcurrencyMode::kNotConcurrent, osr_ast_id,
+ osr_frame);
}
CompilationJob* Compiler::PrepareUnoptimizedCompilationJob(
@@ -1886,7 +1714,15 @@ void Compiler::PostInstantiation(Handle<JSFunction> function,
if (FLAG_always_opt && shared->allows_lazy_compilation() &&
!function->shared()->HasAsmWasmData() &&
function->shared()->is_compiled()) {
- function->MarkForOptimization();
+ // TODO(mvstanton): pass pretenure flag to EnsureLiterals.
+ JSFunction::EnsureLiterals(function);
+
+ if (!function->IsOptimized()) {
+ // Only mark for optimization if we don't already have optimized code.
+ if (!function->HasOptimizedCode()) {
+ function->MarkForOptimization(ConcurrencyMode::kNotConcurrent);
+ }
+ }
}
if (shared->is_compiled()) {
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index 5e22a00139..bc39674361 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -40,7 +40,6 @@ class ThreadedListZoneEntry;
class V8_EXPORT_PRIVATE Compiler : public AllStatic {
public:
enum ClearExceptionFlag { KEEP_EXCEPTION, CLEAR_EXCEPTION };
- enum ConcurrencyMode { NOT_CONCURRENT, CONCURRENT };
// ===========================================================================
// The following family of methods ensures a given function is compiled. The
@@ -79,8 +78,6 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
// Convenience function
static bool Analyze(CompilationInfo* info,
EagerInnerFunctionLiterals* eager_literals = nullptr);
- // Adds deoptimization support, requires ParseAndAnalyze.
- static bool EnsureDeoptimizationSupport(CompilationInfo* info);
// Ensures that bytecode is generated, calls ParseAndAnalyze internally.
static bool EnsureBytecode(CompilationInfo* info);
@@ -102,6 +99,12 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
int column_offset = 0, Handle<Object> script_name = Handle<Object>(),
ScriptOriginOptions options = ScriptOriginOptions());
+ // Returns true if the embedder permits compiling the given source string in
+ // the given context.
+ static bool CodeGenerationFromStringsAllowed(Isolate* isolate,
+ Handle<Context> context,
+ Handle<String> source);
+
// Create a (bound) function for a String source within a context for eval.
MUST_USE_RESULT static MaybeHandle<JSFunction> GetFunctionFromString(
Handle<Context> context, Handle<String> source,
@@ -211,10 +214,6 @@ class V8_EXPORT_PRIVATE CompilationJob {
virtual Status ExecuteJobImpl() = 0;
virtual Status FinalizeJobImpl() = 0;
- // Registers weak object to optimized code dependencies.
- // TODO(turbofan): Move this to pipeline.cc once Crankshaft dies.
- void RegisterWeakObjectsInOptimizedCode(Handle<Code> code);
-
private:
CompilationInfo* info_;
ThreadId isolate_thread_id_;
diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS
index 3a26acc668..b63f5431e2 100644
--- a/deps/v8/src/compiler/OWNERS
+++ b/deps/v8/src/compiler/OWNERS
@@ -7,6 +7,8 @@ mtrofin@chromium.org
titzer@chromium.org
danno@chromium.org
tebbi@chromium.org
+neis@chromium.org
+mvstanton@chromium.org
per-file wasm-*=ahaas@chromium.org
per-file wasm-*=bbudge@chromium.org
@@ -14,3 +16,5 @@ per-file wasm-*=bradnelson@chromium.org
per-file wasm-*=clemensh@chromium.org
per-file int64-lowering.*=ahaas@chromium.org
+
+# COMPONENT: Blink>JavaScript>Compiler
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index 5fbbdd09da..7712aac131 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -16,15 +16,6 @@ namespace internal {
namespace compiler {
// static
-FieldAccess AccessBuilder::ForExternalDoubleValue() {
- FieldAccess access = {kUntaggedBase, 0,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::Number(), MachineType::Float64(),
- kNoWriteBarrier};
- return access;
-}
-
-// static
FieldAccess AccessBuilder::ForExternalTaggedValue() {
FieldAccess access = {kUntaggedBase, 0,
MaybeHandle<Name>(), MaybeHandle<Map>(),
@@ -64,7 +55,7 @@ FieldAccess AccessBuilder::ForHeapNumberValue() {
// static
FieldAccess AccessBuilder::ForJSObjectProperties() {
- FieldAccess access = {kTaggedBase, JSObject::kPropertiesOffset,
+ FieldAccess access = {kTaggedBase, JSObject::kPropertiesOrHashOffset,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::Internal(), MachineType::TaggedPointer(),
kPointerWriteBarrier};
@@ -113,6 +104,28 @@ FieldAccess AccessBuilder::ForJSCollectionTable() {
}
// static
+FieldAccess AccessBuilder::ForJSCollectionIteratorTable() {
+ FieldAccess access = {
+ kTaggedBase, JSCollectionIterator::kTableOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSCollectionIteratorIndex() {
+ FieldAccess access = {kTaggedBase,
+ JSCollectionIterator::kIndexOffset,
+ MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
+ TypeCache::Get().kFixedArrayLengthType,
+ MachineType::TaggedSigned(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
FieldAccess AccessBuilder::ForJSFunctionPrototypeOrInitialMap() {
FieldAccess access = {
kTaggedBase, JSFunction::kPrototypeOrInitialMapOffset,
@@ -171,6 +184,35 @@ FieldAccess AccessBuilder::ForJSFunctionNextFunctionLink() {
}
// static
+FieldAccess AccessBuilder::ForJSBoundFunctionBoundTargetFunction() {
+ FieldAccess access = {
+ kTaggedBase, JSBoundFunction::kBoundTargetFunctionOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Callable(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSBoundFunctionBoundThis() {
+ FieldAccess access = {kTaggedBase, JSBoundFunction::kBoundThisOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSBoundFunctionBoundArguments() {
+ FieldAccess access = {
+ kTaggedBase, JSBoundFunction::kBoundArgumentsOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Internal(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
+ return access;
+}
+
+// static
FieldAccess AccessBuilder::ForJSGeneratorObjectContext() {
FieldAccess access = {kTaggedBase, JSGeneratorObject::kContextOffset,
Handle<Name>(), MaybeHandle<Map>(),
@@ -249,16 +291,6 @@ FieldAccess AccessBuilder::ForJSAsyncGeneratorObjectQueue() {
}
// static
-FieldAccess AccessBuilder::ForJSAsyncGeneratorObjectAwaitInputOrDebugPos() {
- FieldAccess access = {
- kTaggedBase, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::NonInternal(), MachineType::AnyTagged(),
- kFullWriteBarrier};
- return access;
-}
-
-// static
FieldAccess AccessBuilder::ForJSAsyncGeneratorObjectAwaitedPromise() {
FieldAccess access = {
kTaggedBase, JSAsyncGeneratorObject::kAwaitedPromiseOffset,
@@ -278,7 +310,7 @@ FieldAccess AccessBuilder::ForJSArrayLength(ElementsKind elements_kind) {
type_cache.kJSArrayLengthType,
MachineType::TaggedSigned(),
kFullWriteBarrier};
- if (IsFastDoubleElementsKind(elements_kind)) {
+ if (IsDoubleElementsKind(elements_kind)) {
access.type = type_cache.kFixedDoubleArrayLengthType;
access.write_barrier_kind = kNoWriteBarrier;
} else if (IsFastElementsKind(elements_kind)) {
@@ -481,6 +513,14 @@ FieldAccess AccessBuilder::ForMapBitField() {
return access;
}
+// static
+FieldAccess AccessBuilder::ForMapBitField2() {
+ FieldAccess access = {
+ kTaggedBase, Map::kBitField2Offset, Handle<Name>(),
+ MaybeHandle<Map>(), TypeCache::Get().kUint8, MachineType::Uint8(),
+ kNoWriteBarrier};
+ return access;
+}
// static
FieldAccess AccessBuilder::ForMapBitField3() {
@@ -691,7 +731,7 @@ FieldAccess AccessBuilder::ForJSArrayIteratorIndex(InstanceType instance_type,
MachineType::AnyTagged(),
kFullWriteBarrier};
if (instance_type == JS_ARRAY_TYPE) {
- if (IsFastDoubleElementsKind(elements_kind)) {
+ if (IsDoubleElementsKind(elements_kind)) {
access.type = TypeCache::Get().kFixedDoubleArrayLengthType;
access.machine_type = MachineType::TaggedSigned();
access.write_barrier_kind = kNoWriteBarrier;
@@ -836,25 +876,25 @@ ElementAccess AccessBuilder::ForFixedArrayElement(ElementsKind kind) {
ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize, Type::Any(),
MachineType::AnyTagged(), kFullWriteBarrier};
switch (kind) {
- case FAST_SMI_ELEMENTS:
+ case PACKED_SMI_ELEMENTS:
access.type = Type::SignedSmall();
access.machine_type = MachineType::TaggedSigned();
access.write_barrier_kind = kNoWriteBarrier;
break;
- case FAST_HOLEY_SMI_ELEMENTS:
+ case HOLEY_SMI_ELEMENTS:
access.type = TypeCache::Get().kHoleySmi;
break;
- case FAST_ELEMENTS:
+ case PACKED_ELEMENTS:
access.type = Type::NonInternal();
break;
- case FAST_HOLEY_ELEMENTS:
+ case HOLEY_ELEMENTS:
break;
- case FAST_DOUBLE_ELEMENTS:
+ case PACKED_DOUBLE_ELEMENTS:
access.type = Type::Number();
access.write_barrier_kind = kNoWriteBarrier;
access.machine_type = MachineType::Float64();
break;
- case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case HOLEY_DOUBLE_ELEMENTS:
access.type = Type::NumberOrHole();
access.write_barrier_kind = kNoWriteBarrier;
access.machine_type = MachineType::Float64();
@@ -966,10 +1006,65 @@ FieldAccess AccessBuilder::ForHashTableBaseCapacity() {
}
// static
+FieldAccess AccessBuilder::ForOrderedHashTableBaseNextTable() {
+ // TODO(turbofan): This will be redundant with the HashTableBase
+ // methods above once the hash table unification is done.
+ FieldAccess const access = {
+ kTaggedBase, OrderedHashTableBase::kNextTableOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForOrderedHashTableBaseNumberOfBuckets() {
+ // TODO(turbofan): This will be redundant with the HashTableBase
+ // methods above once the hash table unification is done.
+ FieldAccess const access = {kTaggedBase,
+ OrderedHashTableBase::kNumberOfBucketsOffset,
+ MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
+ TypeCache::Get().kFixedArrayLengthType,
+ MachineType::TaggedSigned(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForOrderedHashTableBaseNumberOfDeletedElements() {
+ // TODO(turbofan): This will be redundant with the HashTableBase
+ // methods above once the hash table unification is done.
+ FieldAccess const access = {
+ kTaggedBase,
+ OrderedHashTableBase::kNumberOfDeletedElementsOffset,
+ MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
+ TypeCache::Get().kFixedArrayLengthType,
+ MachineType::TaggedSigned(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForOrderedHashTableBaseNumberOfElements() {
+ // TODO(turbofan): This will be redundant with the HashTableBase
+ // methods above once the hash table unification is done.
+ FieldAccess const access = {kTaggedBase,
+ OrderedHashTableBase::kNumberOfElementsOffset,
+ MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
+ TypeCache::Get().kFixedArrayLengthType,
+ MachineType::TaggedSigned(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
FieldAccess AccessBuilder::ForDictionaryMaxNumberKey() {
FieldAccess access = {
kTaggedBase,
- FixedArray::OffsetOfElementAt(NameDictionary::kMaxNumberKeyIndex),
+ FixedArray::OffsetOfElementAt(SeededNumberDictionary::kMaxNumberKeyIndex),
MaybeHandle<Name>(),
MaybeHandle<Map>(),
Type::Any(),
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index b4c3ed0615..cbe3722a14 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -23,9 +23,6 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// ===========================================================================
// Access to external values (based on external references).
- // Provides access to a double field identified by an external reference.
- static FieldAccess ForExternalDoubleValue();
-
// Provides access to a tagged field identified by an external reference.
static FieldAccess ForExternalTaggedValue();
@@ -55,6 +52,12 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to JSCollecton::table() field.
static FieldAccess ForJSCollectionTable();
+ // Provides access to JSCollectionIterator::table() field.
+ static FieldAccess ForJSCollectionIteratorTable();
+
+ // Provides access to JSCollectionIterator::index() field.
+ static FieldAccess ForJSCollectionIteratorIndex();
+
// Provides access to JSFunction::prototype_or_initial_map() field.
static FieldAccess ForJSFunctionPrototypeOrInitialMap();
@@ -73,6 +76,15 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to JSFunction::next_function_link() field.
static FieldAccess ForJSFunctionNextFunctionLink();
+ // Provides access to JSBoundFunction::bound_target_function() field.
+ static FieldAccess ForJSBoundFunctionBoundTargetFunction();
+
+ // Provides access to JSBoundFunction::bound_this() field.
+ static FieldAccess ForJSBoundFunctionBoundThis();
+
+ // Provides access to JSBoundFunction::bound_arguments() field.
+ static FieldAccess ForJSBoundFunctionBoundArguments();
+
// Provides access to JSGeneratorObject::context() field.
static FieldAccess ForJSGeneratorObjectContext();
@@ -97,10 +109,6 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to JSAsyncGeneratorObject::queue() field.
static FieldAccess ForJSAsyncGeneratorObjectQueue();
- // Provides access to JSAsyncGeneratorObject::await_input_or_debug_pos()
- // field.
- static FieldAccess ForJSAsyncGeneratorObjectAwaitInputOrDebugPos();
-
// Provides access to JSAsyncGeneratorObject::awaited_promise() field.
static FieldAccess ForJSAsyncGeneratorObjectAwaitedPromise();
@@ -161,6 +169,9 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to Map::bit_field() byte.
static FieldAccess ForMapBitField();
+ // Provides access to Map::bit_field2() byte.
+ static FieldAccess ForMapBitField2();
+
// Provides access to Map::bit_field3() field.
static FieldAccess ForMapBitField3();
@@ -274,6 +285,12 @@ class V8_EXPORT_PRIVATE AccessBuilder final
static FieldAccess ForHashTableBaseNumberOfDeletedElement();
static FieldAccess ForHashTableBaseCapacity();
+ // Provides access to OrderedHashTableBase fields.
+ static FieldAccess ForOrderedHashTableBaseNextTable();
+ static FieldAccess ForOrderedHashTableBaseNumberOfBuckets();
+ static FieldAccess ForOrderedHashTableBaseNumberOfElements();
+ static FieldAccess ForOrderedHashTableBaseNumberOfDeletedElements();
+
// Provides access to Dictionary fields.
static FieldAccess ForDictionaryMaxNumberKey();
static FieldAccess ForDictionaryNextEnumerationIndex();
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 196bf9e896..f6705cc294 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -56,7 +56,6 @@ std::ostream& operator<<(std::ostream& os, AccessMode access_mode) {
return os << "StoreInLiteral";
}
UNREACHABLE();
- return os;
}
ElementAccessInfo::ElementAccessInfo() {}
@@ -213,7 +212,6 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that,
}
UNREACHABLE();
- return false;
}
AccessInfoFactory::AccessInfoFactory(CompilationDependencies* dependencies,
@@ -411,9 +409,15 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
isolate());
if (!accessor->IsJSFunction()) {
CallOptimization optimization(accessor);
- if (!optimization.is_simple_api_call()) {
- return false;
- }
+ if (!optimization.is_simple_api_call()) return false;
+ CallOptimization::HolderLookup lookup;
+ holder =
+ optimization.LookupHolderOfExpectedType(receiver_map, &lookup);
+ if (lookup == CallOptimization::kHolderNotFound) return false;
+ DCHECK_IMPLIES(lookup == CallOptimization::kHolderIsReceiver,
+ holder.is_null());
+ DCHECK_IMPLIES(lookup == CallOptimization::kHolderFound,
+ !holder.is_null());
if (V8_UNLIKELY(FLAG_runtime_stats)) return false;
}
if (access_mode == AccessMode::kLoad) {
@@ -433,7 +437,6 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
}
}
UNREACHABLE();
- return false;
}
// Don't search on the prototype chain for special indices in case of
@@ -516,14 +519,13 @@ namespace {
Maybe<ElementsKind> GeneralizeElementsKind(ElementsKind this_kind,
ElementsKind that_kind) {
- if (IsHoleyElementsKind(this_kind)) {
+ if (IsHoleyOrDictionaryElementsKind(this_kind)) {
that_kind = GetHoleyElementsKind(that_kind);
- } else if (IsHoleyElementsKind(that_kind)) {
+ } else if (IsHoleyOrDictionaryElementsKind(that_kind)) {
this_kind = GetHoleyElementsKind(this_kind);
}
if (this_kind == that_kind) return Just(this_kind);
- if (IsFastDoubleElementsKind(that_kind) ==
- IsFastDoubleElementsKind(this_kind)) {
+ if (IsDoubleElementsKind(that_kind) == IsDoubleElementsKind(this_kind)) {
if (IsMoreGeneralElementsKindTransition(that_kind, this_kind)) {
return Just(this_kind);
}
@@ -575,7 +577,7 @@ bool AccessInfoFactory::LookupSpecialFieldAccessor(
// elements, a smi in the range [0, FixedArray::kMaxLength]
// in case of other fast elements, and [0, kMaxUInt32] in
// case of other arrays.
- if (IsFastDoubleElementsKind(map->elements_kind())) {
+ if (IsDoubleElementsKind(map->elements_kind())) {
field_type = type_cache_.kFixedDoubleArrayLengthType;
field_representation = MachineRepresentation::kTaggedSigned;
} else if (IsFastElementsKind(map->elements_kind())) {
diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/arm/code-generator-arm.cc
index 953b6a15ea..5124491695 100644
--- a/deps/v8/src/compiler/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/arm/code-generator-arm.cc
@@ -11,14 +11,15 @@
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
+#include "src/double.h"
+#include "src/float.h"
#include "src/heap/heap-inl.h"
namespace v8 {
namespace internal {
namespace compiler {
-#define __ masm()->
-
+#define __ tasm()->
#define kScratchReg r9
@@ -40,7 +41,6 @@ class ArmOperandConverter final : public InstructionOperandConverter {
return LeaveCC;
}
UNREACHABLE();
- return LeaveCC;
}
Operand InputImmediate(size_t index) {
@@ -49,11 +49,9 @@ class ArmOperandConverter final : public InstructionOperandConverter {
case Constant::kInt32:
return Operand(constant.ToInt32());
case Constant::kFloat32:
- return Operand(
- isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
+ return Operand::EmbeddedNumber(constant.ToFloat32());
case Constant::kFloat64:
- return Operand(
- isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+ return Operand::EmbeddedNumber(constant.ToFloat64().value());
case Constant::kInt64:
case Constant::kExternalReference:
case Constant::kHeapObject:
@@ -61,7 +59,6 @@ class ArmOperandConverter final : public InstructionOperandConverter {
break;
}
UNREACHABLE();
- return Operand::Zero();
}
Operand InputOperand2(size_t first_index) {
@@ -93,7 +90,6 @@ class ArmOperandConverter final : public InstructionOperandConverter {
return Operand(InputRegister(index + 0), ROR, InputRegister(index + 1));
}
UNREACHABLE();
- return Operand::Zero();
}
MemOperand InputOffset(size_t* first_index) {
@@ -122,7 +118,6 @@ class ArmOperandConverter final : public InstructionOperandConverter {
return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
}
UNREACHABLE();
- return MemOperand(r0);
}
MemOperand InputOffset(size_t first_index = 0) {
@@ -150,7 +145,7 @@ class OutOfLineLoadFloat final : public OutOfLineCode {
void Generate() final {
// Compute sqrtf(-1.0f), which results in a quiet single-precision NaN.
- __ vmov(result_, -1.0f);
+ __ vmov(result_, Float32(-1.0f));
__ vsqrt(result_, result_);
}
@@ -165,7 +160,7 @@ class OutOfLineLoadDouble final : public OutOfLineCode {
void Generate() final {
// Compute sqrt(-1.0), which results in a quiet double-precision NaN.
- __ vmov(result_, -1.0);
+ __ vmov(result_, Double(-1.0));
__ vsqrt(result_, result_);
}
@@ -201,7 +196,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch1_(scratch1),
mode_(mode),
must_save_lr_(!gen->frame_access_state()->has_frame()),
- unwinding_info_writer_(unwinding_info_writer) {}
+ unwinding_info_writer_(unwinding_info_writer),
+ zone_(gen->zone()) {}
OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t index,
Register value, Register scratch0, Register scratch1,
@@ -216,7 +212,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch1_(scratch1),
mode_(mode),
must_save_lr_(!gen->frame_access_state()->has_frame()),
- unwinding_info_writer_(unwinding_info_writer) {}
+ unwinding_info_writer_(unwinding_info_writer),
+ zone_(gen->zone()) {}
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -235,15 +232,15 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ Push(lr);
unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset());
}
- RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
- remembered_set_action, save_fp_mode);
if (index_.is(no_reg)) {
__ add(scratch1_, object_, Operand(index_immediate_));
} else {
DCHECK_EQ(0, index_immediate_);
__ add(scratch1_, object_, Operand(index_));
}
- __ CallStub(&stub);
+ __ CallStubDelayed(
+ new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
+ remembered_set_action, save_fp_mode));
if (must_save_lr_) {
__ Pop(lr);
unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset());
@@ -260,6 +257,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
RecordWriteMode const mode_;
bool must_save_lr_;
UnwindingInfoWriter* const unwinding_info_writer_;
+ Zone* zone_;
};
template <typename T>
@@ -344,15 +342,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
break;
}
UNREACHABLE();
- return kNoCondition;
-}
-
-int GetVtblTableSize(const Simd128Register& src0, const Simd128Register& src1) {
- // If unary shuffle, table is src0 (2 d-registers).
- if (src0.is(src1)) return 2;
- // Binary shuffle, table is src0, src1. They must be consecutive
- DCHECK_EQ(src0.code() + 1, src1.code());
- return 4; // 4 d-registers.
}
} // namespace
@@ -479,12 +468,12 @@ int GetVtblTableSize(const Simd128Register& src0, const Simd128Register& src1) {
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
/* and generate a CallAddress instruction instead. */ \
- FrameScope scope(masm(), StackFrame::MANUAL); \
- __ PrepareCallCFunction(0, 2, kScratchReg); \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 2); \
__ MovToFloatParameters(i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \
- __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
- 0, 2); \
+ __ CallCFunction( \
+ ExternalReference::ieee754_##name##_function(__ isolate()), 0, 2); \
/* Move the result in the double result register. */ \
__ MovFromFloatResult(i.OutputDoubleRegister()); \
DCHECK_EQ(LeaveCC, i.OutputSBit()); \
@@ -494,11 +483,11 @@ int GetVtblTableSize(const Simd128Register& src0, const Simd128Register& src1) {
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
/* and generate a CallAddress instruction instead. */ \
- FrameScope scope(masm(), StackFrame::MANUAL); \
- __ PrepareCallCFunction(0, 1, kScratchReg); \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 1); \
__ MovToFloatParameter(i.InputDoubleRegister(0)); \
- __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
- 0, 1); \
+ __ CallCFunction( \
+ ExternalReference::ieee754_##name##_function(__ isolate()), 0, 1); \
/* Move the result in the double result register. */ \
__ MovFromFloatResult(i.OutputDoubleRegister()); \
DCHECK_EQ(LeaveCC, i.OutputSBit()); \
@@ -580,20 +569,20 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
namespace {
-void FlushPendingPushRegisters(MacroAssembler* masm,
+void FlushPendingPushRegisters(TurboAssembler* tasm,
FrameAccessState* frame_access_state,
ZoneVector<Register>* pending_pushes) {
switch (pending_pushes->size()) {
case 0:
break;
case 1:
- masm->push((*pending_pushes)[0]);
+ tasm->push((*pending_pushes)[0]);
break;
case 2:
- masm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
+ tasm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
break;
case 3:
- masm->Push((*pending_pushes)[0], (*pending_pushes)[1],
+ tasm->Push((*pending_pushes)[0], (*pending_pushes)[1],
(*pending_pushes)[2]);
break;
default:
@@ -604,18 +593,18 @@ void FlushPendingPushRegisters(MacroAssembler* masm,
pending_pushes->resize(0);
}
-void AddPendingPushRegister(MacroAssembler* masm,
+void AddPendingPushRegister(TurboAssembler* tasm,
FrameAccessState* frame_access_state,
ZoneVector<Register>* pending_pushes,
Register reg) {
pending_pushes->push_back(reg);
if (pending_pushes->size() == 3 || reg.is(ip)) {
- FlushPendingPushRegisters(masm, frame_access_state, pending_pushes);
+ FlushPendingPushRegisters(tasm, frame_access_state, pending_pushes);
}
}
void AdjustStackPointerForTailCall(
- MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp,
+ TurboAssembler* tasm, FrameAccessState* state, int new_slot_above_sp,
ZoneVector<Register>* pending_pushes = nullptr,
bool allow_shrinkage = true) {
int current_sp_offset = state->GetSPToFPSlotCount() +
@@ -623,15 +612,15 @@ void AdjustStackPointerForTailCall(
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
if (pending_pushes != nullptr) {
- FlushPendingPushRegisters(masm, state, pending_pushes);
+ FlushPendingPushRegisters(tasm, state, pending_pushes);
}
- masm->sub(sp, sp, Operand(stack_slot_delta * kPointerSize));
+ tasm->sub(sp, sp, Operand(stack_slot_delta * kPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
if (pending_pushes != nullptr) {
- FlushPendingPushRegisters(masm, state, pending_pushes);
+ FlushPendingPushRegisters(tasm, state, pending_pushes);
}
- masm->add(sp, sp, Operand(-stack_slot_delta * kPointerSize));
+ tasm->add(sp, sp, Operand(-stack_slot_delta * kPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
}
}
@@ -654,20 +643,20 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
LocationOperand::cast(move->destination()));
InstructionOperand source(move->source());
AdjustStackPointerForTailCall(
- masm(), frame_access_state(),
+ tasm(), frame_access_state(),
destination_location.index() - pending_pushes.size(),
&pending_pushes);
if (source.IsStackSlot()) {
LocationOperand source_location(LocationOperand::cast(source));
__ ldr(ip, g.SlotToMemOperand(source_location.index()));
- AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+ AddPendingPushRegister(tasm(), frame_access_state(), &pending_pushes,
ip);
} else if (source.IsRegister()) {
LocationOperand source_location(LocationOperand::cast(source));
- AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+ AddPendingPushRegister(tasm(), frame_access_state(), &pending_pushes,
source_location.GetRegister());
} else if (source.IsImmediate()) {
- AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+ AddPendingPushRegister(tasm(), frame_access_state(), &pending_pushes,
ip);
} else {
// Pushes of non-scalar data types is not supported.
@@ -675,15 +664,15 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
}
move->Eliminate();
}
- FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes);
+ FlushPendingPushRegisters(tasm(), frame_access_state(), &pending_pushes);
}
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot, nullptr, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_stack_slot) {
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot);
}
@@ -697,10 +686,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
switch (arch_opcode) {
case kArchCallCodeObject: {
+ // We must not share code targets for calls to builtins for wasm code, as
+ // they might need to be patched individually.
+ internal::Assembler::BlockCodeTargetSharingScope scope;
+ if (info()->IsWasm()) scope.Open(tasm());
+
EnsureSpaceForLazyDeopt();
if (instr->InputAt(0)->IsImmediate()) {
- __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
- RelocInfo::CODE_TARGET);
+ __ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
__ add(ip, i.InputRegister(0),
Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -713,14 +706,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
+ // We must not share code targets for calls to builtins for wasm code, as
+ // they might need to be patched individually.
+ internal::Assembler::BlockCodeTargetSharingScope scope;
+ if (info()->IsWasm()) scope.Open(tasm());
+
if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
i.TempRegister(0), i.TempRegister(1),
i.TempRegister(2));
}
if (instr->InputAt(0)->IsImmediate()) {
- __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
- RelocInfo::CODE_TARGET);
+ __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
__ add(ip, i.InputRegister(0),
Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -776,7 +773,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchPrepareCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
- __ PrepareCallCFunction(num_parameters, kScratchReg);
+ __ PrepareCallCFunction(num_parameters);
// Frame alignment requires using FP-relative frame addressing.
frame_access_state()->SetFrameAccessToFP();
break;
@@ -850,7 +847,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kArchTruncateDoubleToI:
- __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+ __ TruncateDoubleToIDelayed(zone(), i.OutputRegister(),
+ i.InputDoubleRegister(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArchStoreWithWriteBarrier: {
@@ -945,8 +943,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_IEEE754_UNOP(log10);
break;
case kIeee754Float64Pow: {
- MathPowStub stub(isolate(), MathPowStub::DOUBLE);
- __ CallStub(&stub);
+ __ CallStubDelayed(new (zone())
+ MathPowStub(nullptr, MathPowStub::DOUBLE));
__ vmov(d0, d2);
break;
}
@@ -983,7 +981,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputRegister(2), i.OutputSBit());
break;
case kArmMls: {
- CpuFeatureScope scope(masm(), ARMv7);
+ CpuFeatureScope scope(tasm(), ARMv7);
__ mls(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
i.InputRegister(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -1007,13 +1005,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputRegister(1), i.OutputSBit());
break;
case kArmSdiv: {
- CpuFeatureScope scope(masm(), SUDIV);
+ CpuFeatureScope scope(tasm(), SUDIV);
__ sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmUdiv: {
- CpuFeatureScope scope(masm(), SUDIV);
+ CpuFeatureScope scope(tasm(), SUDIV);
__ udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
@@ -1041,20 +1039,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.OutputSBit());
break;
case kArmBfc: {
- CpuFeatureScope scope(masm(), ARMv7);
+ CpuFeatureScope scope(tasm(), ARMv7);
__ bfc(i.OutputRegister(), i.InputInt8(1), i.InputInt8(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmUbfx: {
- CpuFeatureScope scope(masm(), ARMv7);
+ CpuFeatureScope scope(tasm(), ARMv7);
__ ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
i.InputInt8(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmSbfx: {
- CpuFeatureScope scope(masm(), ARMv7);
+ CpuFeatureScope scope(tasm(), ARMv7);
__ sbfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
i.InputInt8(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -1097,7 +1095,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmRbit: {
- CpuFeatureScope scope(masm(), ARMv7);
+ CpuFeatureScope scope(tasm(), ARMv7);
__ rbit(i.OutputRegister(), i.InputRegister(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
@@ -1288,12 +1286,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmVmodF64: {
// TODO(bmeurer): We should really get rid of this special instruction,
// and generate a CallAddress instruction instead.
- FrameScope scope(masm(), StackFrame::MANUAL);
- __ PrepareCallCFunction(0, 2, kScratchReg);
+ FrameScope scope(tasm(), StackFrame::MANUAL);
+ __ PrepareCallCFunction(0, 2);
__ MovToFloatParameters(i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
- __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
- 0, 2);
+ __ CallCFunction(
+ ExternalReference::mod_two_doubles_operation(__ isolate()), 0, 2);
// Move the result in the double result register.
__ MovFromFloatResult(i.OutputDoubleRegister());
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -1309,47 +1307,47 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArmVrintmF32: {
- CpuFeatureScope scope(masm(), ARMv8);
+ CpuFeatureScope scope(tasm(), ARMv8);
__ vrintm(i.OutputFloatRegister(), i.InputFloatRegister(0));
break;
}
case kArmVrintmF64: {
- CpuFeatureScope scope(masm(), ARMv8);
+ CpuFeatureScope scope(tasm(), ARMv8);
__ vrintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
case kArmVrintpF32: {
- CpuFeatureScope scope(masm(), ARMv8);
+ CpuFeatureScope scope(tasm(), ARMv8);
__ vrintp(i.OutputFloatRegister(), i.InputFloatRegister(0));
break;
}
case kArmVrintpF64: {
- CpuFeatureScope scope(masm(), ARMv8);
+ CpuFeatureScope scope(tasm(), ARMv8);
__ vrintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
case kArmVrintzF32: {
- CpuFeatureScope scope(masm(), ARMv8);
+ CpuFeatureScope scope(tasm(), ARMv8);
__ vrintz(i.OutputFloatRegister(), i.InputFloatRegister(0));
break;
}
case kArmVrintzF64: {
- CpuFeatureScope scope(masm(), ARMv8);
+ CpuFeatureScope scope(tasm(), ARMv8);
__ vrintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
case kArmVrintaF64: {
- CpuFeatureScope scope(masm(), ARMv8);
+ CpuFeatureScope scope(tasm(), ARMv8);
__ vrinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
case kArmVrintnF32: {
- CpuFeatureScope scope(masm(), ARMv8);
+ CpuFeatureScope scope(tasm(), ARMv8);
__ vrintn(i.OutputFloatRegister(), i.InputFloatRegister(0));
break;
}
case kArmVrintnF64: {
- CpuFeatureScope scope(masm(), ARMv8);
+ CpuFeatureScope scope(tasm(), ARMv8);
__ vrintn(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
@@ -1797,14 +1795,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vmvn(dst, dst);
break;
}
- case kArmI32x4LtS: {
- __ vcgt(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(0));
+ case kArmI32x4GtS: {
+ __ vcgt(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
- case kArmI32x4LeS: {
- __ vcge(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(0));
+ case kArmI32x4GeS: {
+ __ vcge(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
case kArmI32x4UConvertF32x4: {
@@ -1836,14 +1834,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kArmI32x4LtU: {
- __ vcgt(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(0));
+ case kArmI32x4GtU: {
+ __ vcgt(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
- case kArmI32x4LeU: {
- __ vcge(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(0));
+ case kArmI32x4GeU: {
+ __ vcge(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
case kArmI16x8Splat: {
@@ -1937,14 +1935,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vmvn(dst, dst);
break;
}
- case kArmI16x8LtS: {
- __ vcgt(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(0));
+ case kArmI16x8GtS: {
+ __ vcgt(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
- case kArmI16x8LeS: {
- __ vcge(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(0));
+ case kArmI16x8GeS: {
+ __ vcge(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
case kArmI16x8UConvertI8x16Low: {
@@ -1985,14 +1983,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kArmI16x8LtU: {
- __ vcgt(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(0));
+ case kArmI16x8GtU: {
+ __ vcgt(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
- case kArmI16x8LeU: {
- __ vcge(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(0));
+ case kArmI16x8GeU: {
+ __ vcge(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
case kArmI8x16Splat: {
@@ -2072,14 +2070,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vmvn(dst, dst);
break;
}
- case kArmI8x16LtS: {
- __ vcgt(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(0));
+ case kArmI8x16GtS: {
+ __ vcgt(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
- case kArmI8x16LeS: {
- __ vcge(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(0));
+ case kArmI8x16GeS: {
+ __ vcge(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
case kArmI8x16ShrU: {
@@ -2110,14 +2108,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kArmI8x16LtU: {
- __ vcgt(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(0));
+ case kArmI8x16GtU: {
+ __ vcgt(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
- case kArmI8x16LeU: {
- __ vcge(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(0));
+ case kArmI8x16GeU: {
+ __ vcge(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
case kArmS128Zero: {
@@ -2145,10 +2143,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmS128Select: {
- // vbsl clobbers the mask input so make sure it was DefineSameAsFirst.
- DCHECK(i.OutputSimd128Register().is(i.InputSimd128Register(0)));
- __ vbsl(i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(2));
+ Simd128Register dst = i.OutputSimd128Register();
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ __ vbsl(dst, i.InputSimd128Register(1), i.InputSimd128Register(2));
break;
}
case kArmS32x4ZipLeft: {
@@ -2289,39 +2286,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vtrn(Neon16, kScratchQuadReg, dst); // dst = [1, 9, 3, 11, ... 15]
break;
}
- case kArmS16x8Shuffle: {
- Simd128Register dst = i.OutputSimd128Register(),
- src0 = i.InputSimd128Register(0),
- src1 = i.InputSimd128Register(1);
- DwVfpRegister table_base = src0.low();
- int table_size = GetVtblTableSize(src0, src1);
- // Convert the shuffle lane masks to byte masks in kScratchQuadReg.
- int scratch_s_base = kScratchQuadReg.code() * 4;
- for (int j = 0; j < 2; j++) {
- int32_t four_lanes = i.InputInt32(2 + j);
- for (int k = 0; k < 2; k++) {
- uint8_t w0 = (four_lanes & 0xF) * kShortSize;
- four_lanes >>= 8;
- uint8_t w1 = (four_lanes & 0xF) * kShortSize;
- four_lanes >>= 8;
- int32_t mask = w0 | ((w0 + 1) << 8) | (w1 << 16) | ((w1 + 1) << 24);
- // Ensure byte indices are in [0, 31] so masks are never NaNs.
- four_lanes &= 0x1F1F1F1F;
- __ vmov(SwVfpRegister::from_code(scratch_s_base + 2 * j + k),
- bit_cast<float>(mask));
- }
- }
- NeonListOperand table(table_base, table_size);
- if (!dst.is(src0) && !dst.is(src1)) {
- __ vtbl(dst.low(), table, kScratchQuadReg.low());
- __ vtbl(dst.high(), table, kScratchQuadReg.high());
- } else {
- __ vtbl(kScratchQuadReg.low(), table, kScratchQuadReg.low());
- __ vtbl(kScratchQuadReg.high(), table, kScratchQuadReg.high());
- __ vmov(dst, kScratchQuadReg);
- }
- break;
- }
case kArmS8x16ZipLeft: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
@@ -2386,15 +2350,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
DwVfpRegister table_base = src0.low();
- int table_size = GetVtblTableSize(src0, src1);
+ // If unary shuffle, table is src0 (2 d-registers), otherwise src0 and
+ // src1. They must be consecutive.
+ int table_size = src0.is(src1) ? 2 : 4;
+ DCHECK_IMPLIES(!src0.is(src1), src0.code() + 1 == src1.code());
// The shuffle lane mask is a byte mask, materialize in kScratchQuadReg.
int scratch_s_base = kScratchQuadReg.code() * 4;
for (int j = 0; j < 4; j++) {
- int32_t four_lanes = i.InputInt32(2 + j);
+ uint32_t four_lanes = i.InputUint32(2 + j);
// Ensure byte indices are in [0, 31] so masks are never NaNs.
four_lanes &= 0x1F1F1F1F;
__ vmov(SwVfpRegister::from_code(scratch_s_base + j),
- bit_cast<float>(four_lanes));
+ Float32(four_lanes));
}
NeonListOperand table(table_base, table_size);
if (!dst.is(src0) && !dst.is(src1)) {
@@ -2669,15 +2636,15 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
// Therefore we emit a call to C here instead of a call to the runtime.
// We use the context register as the scratch register, because we do
// not have a context here.
- __ PrepareCallCFunction(0, 0, cp);
- __ CallCFunction(
- ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
- 0);
+ __ PrepareCallCFunction(0, 0);
+ __ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(
+ __ isolate()),
+ 0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
__ Ret();
} else {
gen_->AssembleSourcePosition(instr_);
- __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+ __ Call(__ isolate()->builtins()->builtin_handle(trap_id),
RelocInfo::CODE_TARGET);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
@@ -2750,12 +2717,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
: Deoptimizer::EAGER;
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- isolate(), deoptimization_id, bailout_type);
+ __ isolate(), deoptimization_id, bailout_type);
// TODO(turbofan): We should be able to generate better code by sharing the
// actual final call site and just bl'ing to it here, similar to what we do
// in the lithium backend.
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- if (isolate()->NeedsSourcePositionsForProfiling()) {
+ if (info()->is_source_positions_enabled()) {
__ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
}
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
@@ -2821,7 +2788,7 @@ void CodeGenerator::AssembleConstructFrame() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
}
const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
@@ -2840,7 +2807,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (shrink_slots * kPointerSize < FLAG_stack_size * 1024) {
__ Move(kScratchReg,
Operand(ExternalReference::address_of_real_stack_limit(
- isolate())));
+ __ isolate())));
__ ldr(kScratchReg, MemOperand(kScratchReg));
__ add(kScratchReg, kScratchReg,
Operand(shrink_slots * kPointerSize));
@@ -2855,7 +2822,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ EnterFrame(StackFrame::WASM_COMPILED);
}
__ Move(cp, Smi::kZero);
- __ CallRuntime(Runtime::kThrowWasmStackOverflow);
+ __ CallRuntimeDelayed(zone(), Runtime::kThrowWasmStackOverflow);
// We come from WebAssembly, there are no references for the GC.
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
RecordSafepoint(reference_map, Safepoint::kSimple, 0,
@@ -2937,7 +2904,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
__ Ret();
}
-void CodeGenerator::FinishCode() { masm()->CheckConstPool(true, false); }
+void CodeGenerator::FinishCode() { __ CheckConstPool(true, false); }
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
@@ -2979,12 +2946,10 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
UNREACHABLE();
break;
case Constant::kFloat32:
- __ Move(dst,
- isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
+ __ mov(dst, Operand::EmbeddedNumber(src.ToFloat32()));
break;
case Constant::kFloat64:
- __ Move(dst,
- isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
+ __ mov(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
break;
case Constant::kExternalReference:
__ mov(dst, Operand(src.ToExternalReference()));
@@ -3011,7 +2976,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ str(ip, dst);
} else {
SwVfpRegister dst = g.ToFloatRegister(destination);
- __ vmov(dst, src.ToFloat32());
+ __ vmov(dst, Float32(src.ToFloat32AsInt()));
}
} else {
DCHECK_EQ(Constant::kFloat64, src.type());
@@ -3078,7 +3043,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ vld1(Neon8, NeonListOperand(dst.low(), 2),
NeonMemOperand(kScratchReg));
}
- } else if (rep == MachineRepresentation::kFloat64) {
+ } else {
DCHECK(destination->IsFPStackSlot());
if (rep == MachineRepresentation::kFloat64) {
DwVfpRegister temp = kScratchDoubleReg;
@@ -3235,10 +3200,10 @@ void CodeGenerator::EnsureSpaceForLazyDeopt() {
int space_needed = Deoptimizer::patch_size();
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
- int current_pc = masm()->pc_offset();
+ int current_pc = tasm()->pc_offset();
if (current_pc < last_lazy_deopt_pc_ + space_needed) {
// Block literal pool emission for duration of padding.
- v8::internal::Assembler::BlockConstPoolScope block_const_pool(masm());
+ v8::internal::Assembler::BlockConstPoolScope block_const_pool(tasm());
int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
while (padding_size > 0) {
diff --git a/deps/v8/src/compiler/arm/instruction-codes-arm.h b/deps/v8/src/compiler/arm/instruction-codes-arm.h
index db3e515c40..00a4154ad3 100644
--- a/deps/v8/src/compiler/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/arm/instruction-codes-arm.h
@@ -160,16 +160,16 @@ namespace compiler {
V(ArmI32x4MaxS) \
V(ArmI32x4Eq) \
V(ArmI32x4Ne) \
- V(ArmI32x4LtS) \
- V(ArmI32x4LeS) \
+ V(ArmI32x4GtS) \
+ V(ArmI32x4GeS) \
V(ArmI32x4UConvertF32x4) \
V(ArmI32x4UConvertI16x8Low) \
V(ArmI32x4UConvertI16x8High) \
V(ArmI32x4ShrU) \
V(ArmI32x4MinU) \
V(ArmI32x4MaxU) \
- V(ArmI32x4LtU) \
- V(ArmI32x4LeU) \
+ V(ArmI32x4GtU) \
+ V(ArmI32x4GeU) \
V(ArmI16x8Splat) \
V(ArmI16x8ExtractLane) \
V(ArmI16x8ReplaceLane) \
@@ -189,8 +189,8 @@ namespace compiler {
V(ArmI16x8MaxS) \
V(ArmI16x8Eq) \
V(ArmI16x8Ne) \
- V(ArmI16x8LtS) \
- V(ArmI16x8LeS) \
+ V(ArmI16x8GtS) \
+ V(ArmI16x8GeS) \
V(ArmI16x8UConvertI8x16Low) \
V(ArmI16x8UConvertI8x16High) \
V(ArmI16x8ShrU) \
@@ -199,8 +199,8 @@ namespace compiler {
V(ArmI16x8SubSaturateU) \
V(ArmI16x8MinU) \
V(ArmI16x8MaxU) \
- V(ArmI16x8LtU) \
- V(ArmI16x8LeU) \
+ V(ArmI16x8GtU) \
+ V(ArmI16x8GeU) \
V(ArmI8x16Splat) \
V(ArmI8x16ExtractLane) \
V(ArmI8x16ReplaceLane) \
@@ -217,16 +217,16 @@ namespace compiler {
V(ArmI8x16MaxS) \
V(ArmI8x16Eq) \
V(ArmI8x16Ne) \
- V(ArmI8x16LtS) \
- V(ArmI8x16LeS) \
+ V(ArmI8x16GtS) \
+ V(ArmI8x16GeS) \
V(ArmI8x16ShrU) \
V(ArmI8x16UConvertI16x8) \
V(ArmI8x16AddSaturateU) \
V(ArmI8x16SubSaturateU) \
V(ArmI8x16MinU) \
V(ArmI8x16MaxU) \
- V(ArmI8x16LtU) \
- V(ArmI8x16LeU) \
+ V(ArmI8x16GtU) \
+ V(ArmI8x16GeU) \
V(ArmS128Zero) \
V(ArmS128And) \
V(ArmS128Or) \
@@ -246,7 +246,6 @@ namespace compiler {
V(ArmS16x8UnzipRight) \
V(ArmS16x8TransposeLeft) \
V(ArmS16x8TransposeRight) \
- V(ArmS16x8Shuffle) \
V(ArmS8x16ZipLeft) \
V(ArmS8x16ZipRight) \
V(ArmS8x16UnzipLeft) \
diff --git a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
index 549752d09e..7b1f1b30f3 100644
--- a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
@@ -144,16 +144,16 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmI32x4MaxS:
case kArmI32x4Eq:
case kArmI32x4Ne:
- case kArmI32x4LtS:
- case kArmI32x4LeS:
+ case kArmI32x4GtS:
+ case kArmI32x4GeS:
case kArmI32x4UConvertF32x4:
case kArmI32x4UConvertI16x8Low:
case kArmI32x4UConvertI16x8High:
case kArmI32x4ShrU:
case kArmI32x4MinU:
case kArmI32x4MaxU:
- case kArmI32x4LtU:
- case kArmI32x4LeU:
+ case kArmI32x4GtU:
+ case kArmI32x4GeU:
case kArmI16x8Splat:
case kArmI16x8ExtractLane:
case kArmI16x8ReplaceLane:
@@ -173,8 +173,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmI16x8MaxS:
case kArmI16x8Eq:
case kArmI16x8Ne:
- case kArmI16x8LtS:
- case kArmI16x8LeS:
+ case kArmI16x8GtS:
+ case kArmI16x8GeS:
case kArmI16x8UConvertI8x16Low:
case kArmI16x8UConvertI8x16High:
case kArmI16x8ShrU:
@@ -183,8 +183,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmI16x8SubSaturateU:
case kArmI16x8MinU:
case kArmI16x8MaxU:
- case kArmI16x8LtU:
- case kArmI16x8LeU:
+ case kArmI16x8GtU:
+ case kArmI16x8GeU:
case kArmI8x16Splat:
case kArmI8x16ExtractLane:
case kArmI8x16ReplaceLane:
@@ -201,16 +201,16 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmI8x16MaxS:
case kArmI8x16Eq:
case kArmI8x16Ne:
- case kArmI8x16LtS:
- case kArmI8x16LeS:
+ case kArmI8x16GtS:
+ case kArmI8x16GeS:
case kArmI8x16UConvertI16x8:
case kArmI8x16AddSaturateU:
case kArmI8x16SubSaturateU:
case kArmI8x16ShrU:
case kArmI8x16MinU:
case kArmI8x16MaxU:
- case kArmI8x16LtU:
- case kArmI8x16LeU:
+ case kArmI8x16GtU:
+ case kArmI8x16GeU:
case kArmS128Zero:
case kArmS128And:
case kArmS128Or:
@@ -230,7 +230,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmS16x8UnzipRight:
case kArmS16x8TransposeLeft:
case kArmS16x8TransposeRight:
- case kArmS16x8Shuffle:
case kArmS8x16ZipLeft:
case kArmS8x16ZipRight:
case kArmS8x16UnzipLeft:
@@ -283,7 +282,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
}
UNREACHABLE();
- return kNoOpcodeFlags;
}
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
index 8983c9b115..3840ae8158 100644
--- a/deps/v8/src/compiler/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
@@ -112,15 +112,6 @@ void VisitRRRShuffle(InstructionSelector* selector, ArchOpcode opcode,
g.UseRegister(node->InputAt(1)));
}
-void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
- ArmOperandGenerator g(selector);
- // Use DefineSameAsFirst for ternary ops that clobber their first input,
- // e.g. the NEON vbsl instruction.
- selector->Emit(
- opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
- g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2)));
-}
-
void VisitRRI(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
ArmOperandGenerator g(selector);
int32_t imm = OpParameter<int32_t>(node);
@@ -459,9 +450,6 @@ void InstructionSelector::VisitLoad(Node* node) {
opcode = kArmVld1S128;
break;
case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -549,9 +537,6 @@ void InstructionSelector::VisitStore(Node* node) {
opcode = kArmVst1S128;
break;
case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -758,9 +743,6 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -805,9 +787,6 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -882,11 +861,18 @@ void InstructionSelector::VisitWord32And(Node* node) {
uint32_t const shift = mshr.right().Value();
if (((shift == 8) || (shift == 16) || (shift == 24)) &&
- ((value == 0xff) || (value == 0xffff))) {
- // Merge SHR into AND by emitting a UXTB or UXTH instruction with a
+ (value == 0xff)) {
+ // Merge SHR into AND by emitting a UXTB instruction with a
+ // bytewise rotation.
+ Emit(kArmUxtb, g.DefineAsRegister(m.node()),
+ g.UseRegister(mshr.left().node()),
+ g.TempImmediate(mshr.right().Value()));
+ return;
+ } else if (((shift == 8) || (shift == 16)) && (value == 0xffff)) {
+ // Merge SHR into AND by emitting a UXTH instruction with a
// bytewise rotation.
- Emit((value == 0xff) ? kArmUxtb : kArmUxth,
- g.DefineAsRegister(m.node()), g.UseRegister(mshr.left().node()),
+ Emit(kArmUxth, g.DefineAsRegister(m.node()),
+ g.UseRegister(mshr.left().node()),
g.TempImmediate(mshr.right().Value()));
return;
} else if (IsSupported(ARMv7) && (width != 0) &&
@@ -1384,14 +1370,14 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
Int32BinopMatcher m(node);
if (m.right().HasValue() && m.right().Value() > 0) {
int32_t value = m.right().Value();
- if (base::bits::IsPowerOfTwo32(value - 1)) {
+ if (base::bits::IsPowerOfTwo(value - 1)) {
Emit(kArmAdd | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value - 1)));
return;
}
- if (value < kMaxInt && base::bits::IsPowerOfTwo32(value + 1)) {
+ if (value < kMaxInt && base::bits::IsPowerOfTwo(value + 1)) {
Emit(kArmRsb | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(m.left().node()),
@@ -1728,7 +1714,6 @@ FlagsCondition MapForFlagSettingBinop(FlagsCondition cond) {
return kNotEqual;
default:
UNREACHABLE();
- return cond;
}
}
@@ -2043,6 +2028,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 4 + sw.value_range;
size_t table_time_cost = 3;
size_t lookup_space_cost = 3 + 2 * sw.case_count;
@@ -2050,7 +2036,8 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
if (sw.case_count > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min()) {
+ sw.min_value > std::numeric_limits<int32_t>::min() &&
+ sw.value_range <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
if (sw.min_value) {
index_operand = g.TempRegister();
@@ -2391,15 +2378,9 @@ VISIT_ATOMIC_BINOP(Xor)
V(I8x16)
#define SIMD_FORMAT_LIST(V) \
- V(32x4) \
- V(16x8) \
- V(8x16)
-
-#define SIMD_ZERO_OP_LIST(V) \
- V(S128Zero) \
- V(S1x4Zero) \
- V(S1x8Zero) \
- V(S1x16Zero)
+ V(32x4, 4) \
+ V(16x8, 8) \
+ V(8x16, 16)
#define SIMD_UNOP_LIST(V) \
V(F32x4SConvertI32x4, kArmF32x4SConvertI32x4) \
@@ -2422,13 +2403,10 @@ VISIT_ATOMIC_BINOP(Xor)
V(I16x8UConvertI8x16High, kArmI16x8UConvertI8x16High) \
V(I8x16Neg, kArmI8x16Neg) \
V(S128Not, kArmS128Not) \
- V(S1x4Not, kArmS128Not) \
V(S1x4AnyTrue, kArmS1x4AnyTrue) \
V(S1x4AllTrue, kArmS1x4AllTrue) \
- V(S1x8Not, kArmS128Not) \
V(S1x8AnyTrue, kArmS1x8AnyTrue) \
V(S1x8AllTrue, kArmS1x8AllTrue) \
- V(S1x16Not, kArmS128Not) \
V(S1x16AnyTrue, kArmS1x16AnyTrue) \
V(S1x16AllTrue, kArmS1x16AllTrue)
@@ -2462,12 +2440,12 @@ VISIT_ATOMIC_BINOP(Xor)
V(I32x4MaxS, kArmI32x4MaxS) \
V(I32x4Eq, kArmI32x4Eq) \
V(I32x4Ne, kArmI32x4Ne) \
- V(I32x4LtS, kArmI32x4LtS) \
- V(I32x4LeS, kArmI32x4LeS) \
+ V(I32x4GtS, kArmI32x4GtS) \
+ V(I32x4GeS, kArmI32x4GeS) \
V(I32x4MinU, kArmI32x4MinU) \
V(I32x4MaxU, kArmI32x4MaxU) \
- V(I32x4LtU, kArmI32x4LtU) \
- V(I32x4LeU, kArmI32x4LeU) \
+ V(I32x4GtU, kArmI32x4GtU) \
+ V(I32x4GeU, kArmI32x4GeU) \
V(I16x8SConvertI32x4, kArmI16x8SConvertI32x4) \
V(I16x8Add, kArmI16x8Add) \
V(I16x8AddSaturateS, kArmI16x8AddSaturateS) \
@@ -2479,15 +2457,15 @@ VISIT_ATOMIC_BINOP(Xor)
V(I16x8MaxS, kArmI16x8MaxS) \
V(I16x8Eq, kArmI16x8Eq) \
V(I16x8Ne, kArmI16x8Ne) \
- V(I16x8LtS, kArmI16x8LtS) \
- V(I16x8LeS, kArmI16x8LeS) \
+ V(I16x8GtS, kArmI16x8GtS) \
+ V(I16x8GeS, kArmI16x8GeS) \
V(I16x8UConvertI32x4, kArmI16x8UConvertI32x4) \
V(I16x8AddSaturateU, kArmI16x8AddSaturateU) \
V(I16x8SubSaturateU, kArmI16x8SubSaturateU) \
V(I16x8MinU, kArmI16x8MinU) \
V(I16x8MaxU, kArmI16x8MaxU) \
- V(I16x8LtU, kArmI16x8LtU) \
- V(I16x8LeU, kArmI16x8LeU) \
+ V(I16x8GtU, kArmI16x8GtU) \
+ V(I16x8GeU, kArmI16x8GeU) \
V(I8x16SConvertI16x8, kArmI8x16SConvertI16x8) \
V(I8x16Add, kArmI8x16Add) \
V(I8x16AddSaturateS, kArmI8x16AddSaturateS) \
@@ -2498,27 +2476,23 @@ VISIT_ATOMIC_BINOP(Xor)
V(I8x16MaxS, kArmI8x16MaxS) \
V(I8x16Eq, kArmI8x16Eq) \
V(I8x16Ne, kArmI8x16Ne) \
- V(I8x16LtS, kArmI8x16LtS) \
- V(I8x16LeS, kArmI8x16LeS) \
+ V(I8x16GtS, kArmI8x16GtS) \
+ V(I8x16GeS, kArmI8x16GeS) \
V(I8x16UConvertI16x8, kArmI8x16UConvertI16x8) \
V(I8x16AddSaturateU, kArmI8x16AddSaturateU) \
V(I8x16SubSaturateU, kArmI8x16SubSaturateU) \
V(I8x16MinU, kArmI8x16MinU) \
V(I8x16MaxU, kArmI8x16MaxU) \
- V(I8x16LtU, kArmI8x16LtU) \
- V(I8x16LeU, kArmI8x16LeU) \
+ V(I8x16GtU, kArmI8x16GtU) \
+ V(I8x16GeU, kArmI8x16GeU) \
V(S128And, kArmS128And) \
V(S128Or, kArmS128Or) \
- V(S128Xor, kArmS128Xor) \
- V(S1x4And, kArmS128And) \
- V(S1x4Or, kArmS128Or) \
- V(S1x4Xor, kArmS128Xor) \
- V(S1x8And, kArmS128And) \
- V(S1x8Or, kArmS128Or) \
- V(S1x8Xor, kArmS128Xor) \
- V(S1x16And, kArmS128And) \
- V(S1x16Or, kArmS128Or) \
- V(S1x16Xor, kArmS128Xor)
+ V(S128Xor, kArmS128Xor)
+
+void InstructionSelector::VisitS128Zero(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmS128Zero, g.DefineAsRegister(node), g.DefineAsRegister(node));
+}
#define SIMD_VISIT_SPLAT(Type) \
void InstructionSelector::Visit##Type##Splat(Node* node) { \
@@ -2541,14 +2515,6 @@ SIMD_TYPE_LIST(SIMD_VISIT_EXTRACT_LANE)
SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
#undef SIMD_VISIT_REPLACE_LANE
-#define SIMD_VISIT_ZERO_OP(Name) \
- void InstructionSelector::Visit##Name(Node* node) { \
- ArmOperandGenerator g(this); \
- Emit(kArmS128Zero, g.DefineAsRegister(node), g.DefineAsRegister(node)); \
- }
-SIMD_ZERO_OP_LIST(SIMD_VISIT_ZERO_OP)
-#undef SIMD_VISIT_ZERO_OP
-
#define SIMD_VISIT_UNOP(Name, instruction) \
void InstructionSelector::Visit##Name(Node* node) { \
VisitRR(this, instruction, node); \
@@ -2570,40 +2536,79 @@ SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
#undef SIMD_VISIT_BINOP
-#define SIMD_VISIT_SELECT_OP(format) \
- void InstructionSelector::VisitS##format##Select(Node* node) { \
- VisitRRRR(this, kArmS128Select, node); \
- }
-SIMD_FORMAT_LIST(SIMD_VISIT_SELECT_OP)
-#undef SIMD_VISIT_SELECT_OP
+void InstructionSelector::VisitS128Select(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmS128Select, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(2)));
+}
namespace {
-template <int LANES>
+
+// Tries to match 8x16 byte shuffle to equivalent 32x4 word shuffle.
+bool TryMatch32x4Shuffle(const uint8_t* shuffle, uint8_t* shuffle32x4) {
+ static const int kLanes = 4;
+ static const int kLaneSize = 4;
+ for (int i = 0; i < kLanes; ++i) {
+ if (shuffle[i * kLaneSize] % kLaneSize != 0) return false;
+ for (int j = 1; j < kLaneSize; ++j) {
+ if (shuffle[i * kLaneSize + j] - shuffle[i * kLaneSize + j - 1] != 1)
+ return false;
+ }
+ shuffle32x4[i] = shuffle[i * kLaneSize] / kLaneSize;
+ }
+ return true;
+}
+
+// Tries to match byte shuffle to concatenate (vext) operation.
+bool TryMatchConcat(const uint8_t* shuffle, uint8_t mask, uint8_t* offset) {
+ uint8_t start = shuffle[0];
+ for (int i = 1; i < kSimd128Size - start; ++i) {
+ if ((shuffle[i] & mask) != ((shuffle[i - 1] + 1) & mask)) return false;
+ }
+ uint8_t wrap = kSimd128Size;
+ for (int i = kSimd128Size - start; i < kSimd128Size; ++i, ++wrap) {
+ if ((shuffle[i] & mask) != (wrap & mask)) return false;
+ }
+ *offset = start;
+ return true;
+}
+
struct ShuffleEntry {
- uint8_t shuffle[LANES];
+ uint8_t shuffle[kSimd128Size];
ArchOpcode opcode;
};
-static const ShuffleEntry<4> arch_s32x4_shuffles[] = {
- {{0, 4, 1, 5}, kArmS32x4ZipLeft},
- {{2, 6, 3, 7}, kArmS32x4ZipRight},
- {{0, 2, 4, 6}, kArmS32x4UnzipLeft},
- {{1, 3, 5, 7}, kArmS32x4UnzipRight},
- {{0, 4, 2, 6}, kArmS32x4TransposeLeft},
- {{1, 5, 3, 7}, kArmS32x4TransposeRight},
- {{1, 0, 3, 2}, kArmS32x2Reverse}};
-
-static const ShuffleEntry<8> arch_s16x8_shuffles[] = {
- {{0, 8, 1, 9, 2, 10, 3, 11}, kArmS16x8ZipLeft},
- {{4, 12, 5, 13, 6, 14, 7, 15}, kArmS16x8ZipRight},
- {{0, 2, 4, 6, 8, 10, 12, 14}, kArmS16x8UnzipLeft},
- {{1, 3, 5, 7, 9, 11, 13, 15}, kArmS16x8UnzipRight},
- {{0, 8, 2, 10, 4, 12, 6, 14}, kArmS16x8TransposeLeft},
- {{1, 9, 3, 11, 5, 13, 7, 15}, kArmS16x8TransposeRight},
- {{3, 2, 1, 0, 7, 6, 5, 4}, kArmS16x4Reverse},
- {{1, 0, 3, 2, 5, 4, 7, 6}, kArmS16x2Reverse}};
-
-static const ShuffleEntry<16> arch_s8x16_shuffles[] = {
+static const ShuffleEntry arch_shuffles[] = {
+ {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
+ kArmS32x4ZipLeft},
+ {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
+ kArmS32x4ZipRight},
+ {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
+ kArmS32x4UnzipLeft},
+ {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
+ kArmS32x4UnzipRight},
+ {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
+ kArmS32x4TransposeLeft},
+ {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31},
+ kArmS32x4TransposeRight},
+ {{4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}, kArmS32x2Reverse},
+
+ {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
+ kArmS16x8ZipLeft},
+ {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
+ kArmS16x8ZipRight},
+ {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
+ kArmS16x8UnzipLeft},
+ {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
+ kArmS16x8UnzipRight},
+ {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
+ kArmS16x8TransposeLeft},
+ {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
+ kArmS16x8TransposeRight},
+ {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9}, kArmS16x4Reverse},
+ {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13}, kArmS16x2Reverse},
+
{{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
kArmS8x16ZipLeft},
{{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
@@ -2620,45 +2625,28 @@ static const ShuffleEntry<16> arch_s8x16_shuffles[] = {
{{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}, kArmS8x4Reverse},
{{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14}, kArmS8x2Reverse}};
-// Use a non-shuffle opcode to signal no match.
-static const ArchOpcode kNoShuffle = kArmS128Not;
-
-template <int LANES>
-ArchOpcode TryMatchArchShuffle(const uint8_t* shuffle,
- const ShuffleEntry<LANES>* table,
- size_t num_entries, uint8_t mask) {
- for (size_t i = 0; i < num_entries; i++) {
- const ShuffleEntry<LANES>& entry = table[i];
+bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
+ size_t num_entries, uint8_t mask, ArchOpcode* opcode) {
+ for (size_t i = 0; i < num_entries; ++i) {
+ const ShuffleEntry& entry = table[i];
int j = 0;
- for (; j < LANES; j++) {
+ for (; j < kSimd128Size; ++j) {
if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
break;
}
}
- if (j == LANES) return entry.opcode;
- }
- return kNoShuffle;
-}
-
-// Returns the bias if shuffle is a concatenation, 0 otherwise.
-template <int LANES>
-uint8_t TryMatchConcat(const uint8_t* shuffle, uint8_t mask) {
- uint8_t start = shuffle[0];
- int i = 1;
- for (; i < LANES - start; i++) {
- if ((shuffle[i] & mask) != ((shuffle[i - 1] + 1) & mask)) return 0;
- }
- uint8_t wrap = LANES;
- for (; i < LANES; i++, wrap++) {
- if ((shuffle[i] & mask) != (wrap & mask)) return 0;
+ if (j == kSimd128Size) {
+ *opcode = entry.opcode;
+ return true;
+ }
}
- return start;
+ return false;
}
// Canonicalize shuffles to make pattern matching simpler. Returns a mask that
// will ignore the high bit of indices in some cases.
-uint8_t CanonicalizeShuffle(InstructionSelector* selector, Node* node,
- int num_lanes) {
+uint8_t CanonicalizeShuffle(InstructionSelector* selector, Node* node) {
+ static const int kUnaryShuffleMask = kSimd128Size - 1;
const uint8_t* shuffle = OpParameter<uint8_t*>(node);
uint8_t mask = 0xff;
// If shuffle is unary, set 'mask' to ignore the high bit of the indices.
@@ -2666,12 +2654,12 @@ uint8_t CanonicalizeShuffle(InstructionSelector* selector, Node* node,
if (selector->GetVirtualRegister(node->InputAt(0)) ==
selector->GetVirtualRegister(node->InputAt(1))) {
// unary, src0 == src1.
- mask = num_lanes - 1;
+ mask = kUnaryShuffleMask;
} else {
bool src0_is_used = false;
bool src1_is_used = false;
- for (int i = 0; i < num_lanes; i++) {
- if (shuffle[i] < num_lanes) {
+ for (int i = 0; i < kSimd128Size; i++) {
+ if (shuffle[i] < kSimd128Size) {
src0_is_used = true;
} else {
src1_is_used = true;
@@ -2679,10 +2667,10 @@ uint8_t CanonicalizeShuffle(InstructionSelector* selector, Node* node,
}
if (src0_is_used && !src1_is_used) {
node->ReplaceInput(1, node->InputAt(0));
- mask = num_lanes - 1;
+ mask = kUnaryShuffleMask;
} else if (src1_is_used && !src0_is_used) {
node->ReplaceInput(0, node->InputAt(1));
- mask = num_lanes - 1;
+ mask = kUnaryShuffleMask;
}
}
return mask;
@@ -2690,7 +2678,7 @@ uint8_t CanonicalizeShuffle(InstructionSelector* selector, Node* node,
int32_t Pack4Lanes(const uint8_t* shuffle, uint8_t mask) {
int32_t result = 0;
- for (int i = 3; i >= 0; i--) {
+ for (int i = 3; i >= 0; --i) {
result <<= 8;
result |= shuffle[i] & mask;
}
@@ -2711,70 +2699,29 @@ void ArrangeShuffleTable(ArmOperandGenerator* g, Node* input0, Node* input1,
} // namespace
-void InstructionSelector::VisitS32x4Shuffle(Node* node) {
+void InstructionSelector::VisitS8x16Shuffle(Node* node) {
const uint8_t* shuffle = OpParameter<uint8_t*>(node);
- uint8_t mask = CanonicalizeShuffle(this, node, 4);
- ArchOpcode opcode = TryMatchArchShuffle<4>(
- shuffle, arch_s32x4_shuffles, arraysize(arch_s32x4_shuffles), mask);
- if (opcode != kNoShuffle) {
- VisitRRRShuffle(this, opcode, node);
- return;
- }
+ uint8_t mask = CanonicalizeShuffle(this, node);
+ uint8_t shuffle32x4[4];
ArmOperandGenerator g(this);
- uint8_t lanes = TryMatchConcat<4>(shuffle, mask);
- if (lanes != 0) {
- Emit(kArmS8x16Concat, g.DefineAsRegister(node),
+ if (TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
+ Emit(kArmS32x4Shuffle, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
- g.UseImmediate(lanes * 4));
- return;
- }
- Emit(kArmS32x4Shuffle, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
- g.UseImmediate(Pack4Lanes(shuffle, mask)));
-}
-
-void InstructionSelector::VisitS16x8Shuffle(Node* node) {
- const uint8_t* shuffle = OpParameter<uint8_t*>(node);
- uint8_t mask = CanonicalizeShuffle(this, node, 8);
- ArchOpcode opcode = TryMatchArchShuffle<8>(
- shuffle, arch_s16x8_shuffles, arraysize(arch_s16x8_shuffles), mask);
- if (opcode != kNoShuffle) {
- VisitRRRShuffle(this, opcode, node);
+ g.UseImmediate(Pack4Lanes(shuffle32x4, mask)));
return;
}
- ArmOperandGenerator g(this);
- Node* input0 = node->InputAt(0);
- Node* input1 = node->InputAt(1);
- uint8_t lanes = TryMatchConcat<8>(shuffle, mask);
- if (lanes != 0) {
- Emit(kArmS8x16Concat, g.DefineAsRegister(node), g.UseRegister(input0),
- g.UseRegister(input1), g.UseImmediate(lanes * 2));
- return;
- }
- // Code generator uses vtbl, arrange sources to form a valid lookup table.
- InstructionOperand src0, src1;
- ArrangeShuffleTable(&g, input0, input1, &src0, &src1);
- Emit(kArmS16x8Shuffle, g.DefineAsRegister(node), src0, src1,
- g.UseImmediate(Pack4Lanes(shuffle, mask)),
- g.UseImmediate(Pack4Lanes(shuffle + 4, mask)));
-}
-
-void InstructionSelector::VisitS8x16Shuffle(Node* node) {
- const uint8_t* shuffle = OpParameter<uint8_t*>(node);
- uint8_t mask = CanonicalizeShuffle(this, node, 16);
- ArchOpcode opcode = TryMatchArchShuffle<16>(
- shuffle, arch_s8x16_shuffles, arraysize(arch_s8x16_shuffles), mask);
- if (opcode != kNoShuffle) {
+ ArchOpcode opcode;
+ if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
+ mask, &opcode)) {
VisitRRRShuffle(this, opcode, node);
return;
}
- ArmOperandGenerator g(this);
Node* input0 = node->InputAt(0);
Node* input1 = node->InputAt(1);
- uint8_t lanes = TryMatchConcat<16>(shuffle, mask);
- if (lanes != 0) {
+ uint8_t offset;
+ if (TryMatchConcat(shuffle, mask, &offset)) {
Emit(kArmS8x16Concat, g.DefineAsRegister(node), g.UseRegister(input0),
- g.UseRegister(input1), g.UseImmediate(lanes));
+ g.UseRegister(input1), g.UseImmediate(offset));
return;
}
// Code generator uses vtbl, arrange sources to form a valid lookup table.
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
index 88311c35e8..b36aab4aa0 100644
--- a/deps/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -18,8 +18,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-#define __ masm()->
-
+#define __ tasm()->
// Adds Arm64-specific methods to convert InstructionOperands.
class Arm64OperandConverter final : public InstructionOperandConverter {
@@ -35,6 +34,10 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
return InputDoubleRegister(index);
}
+ DoubleRegister InputSimd128Register(size_t index) {
+ return InputDoubleRegister(index).Q();
+ }
+
CPURegister InputFloat32OrZeroRegister(size_t index) {
if (instr_->InputAt(index)->IsImmediate()) {
DCHECK(bit_cast<int32_t>(InputFloat32(index)) == 0);
@@ -59,6 +62,8 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
DoubleRegister OutputFloat64Register() { return OutputDoubleRegister(); }
+ DoubleRegister OutputSimd128Register() { return OutputDoubleRegister().Q(); }
+
Register InputRegister32(size_t index) {
return ToRegister(instr_->InputAt(index)).W();
}
@@ -83,10 +88,6 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
return InputRegister64(index);
}
- Operand InputImmediate(size_t index) {
- return ToImmediate(instr_->InputAt(index));
- }
-
Operand InputOperand(size_t index) {
return ToOperand(instr_->InputAt(index));
}
@@ -132,7 +133,6 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
break;
}
UNREACHABLE();
- return Operand(-1);
}
Operand InputOperand2_64(size_t index) {
@@ -162,7 +162,6 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
break;
}
UNREACHABLE();
- return Operand(-1);
}
MemOperand MemoryOperand(size_t* first_index) {
@@ -190,7 +189,6 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
}
UNREACHABLE();
- return MemOperand(no_reg);
}
MemOperand MemoryOperand(size_t first_index = 0) {
@@ -228,11 +226,9 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
return Operand(constant.ToInt64());
}
case Constant::kFloat32:
- return Operand(
- isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
+ return Operand(Operand::EmbeddedNumber(constant.ToFloat32()));
case Constant::kFloat64:
- return Operand(
- isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+ return Operand(Operand::EmbeddedNumber(constant.ToFloat64().value()));
case Constant::kExternalReference:
return Operand(constant.ToExternalReference());
case Constant::kHeapObject:
@@ -242,26 +238,25 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
break;
}
UNREACHABLE();
- return Operand(-1);
}
- MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const {
+ MemOperand ToMemOperand(InstructionOperand* op, TurboAssembler* tasm) const {
DCHECK_NOT_NULL(op);
DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
- return SlotToMemOperand(AllocatedOperand::cast(op)->index(), masm);
+ return SlotToMemOperand(AllocatedOperand::cast(op)->index(), tasm);
}
- MemOperand SlotToMemOperand(int slot, MacroAssembler* masm) const {
+ MemOperand SlotToMemOperand(int slot, TurboAssembler* tasm) const {
FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
if (offset.from_frame_pointer()) {
int from_sp = offset.offset() + frame_access_state()->GetSPToFPOffset();
// Convert FP-offsets to SP-offsets if it results in better code.
if (Assembler::IsImmLSUnscaled(from_sp) ||
- Assembler::IsImmLSScaled(from_sp, LSDoubleWord)) {
+ Assembler::IsImmLSScaled(from_sp, 3)) {
offset = FrameOffset::FromStackPointer(from_sp);
}
}
- return MemOperand(offset.from_stack_pointer() ? masm->StackPointer() : fp,
+ return MemOperand(offset.from_stack_pointer() ? tasm->StackPointer() : fp,
offset.offset());
}
};
@@ -323,7 +318,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch1_(scratch1),
mode_(mode),
must_save_lr_(!gen->frame_access_state()->has_frame()),
- unwinding_info_writer_(unwinding_info_writer) {}
+ unwinding_info_writer_(unwinding_info_writer),
+ zone_(gen->zone()) {}
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -343,10 +339,10 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset(),
__ StackPointer());
}
- RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
- remembered_set_action, save_fp_mode);
__ Add(scratch1_, object_, index_);
- __ CallStub(&stub);
+ __ CallStubDelayed(
+ new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
+ remembered_set_action, save_fp_mode));
if (must_save_lr_) {
__ Pop(lr);
unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset());
@@ -362,6 +358,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
RecordWriteMode const mode_;
bool must_save_lr_;
UnwindingInfoWriter* const unwinding_info_writer_;
+ Zone* zone_;
};
@@ -416,21 +413,20 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
return mi;
}
UNREACHABLE();
- return nv;
}
} // namespace
-#define ASSEMBLE_BOUNDS_CHECK(offset, length, out_of_bounds) \
- do { \
- if (length.IsImmediate() && \
- base::bits::IsPowerOfTwo64(length.ImmediateValue())) { \
- __ Tst(offset, ~(length.ImmediateValue() - 1)); \
- __ B(ne, out_of_bounds); \
- } else { \
- __ Cmp(offset, length); \
- __ B(hs, out_of_bounds); \
- } \
+#define ASSEMBLE_BOUNDS_CHECK(offset, length, out_of_bounds) \
+ do { \
+ if (length.IsImmediate() && \
+ base::bits::IsPowerOfTwo(length.ImmediateValue())) { \
+ __ Tst(offset, ~(length.ImmediateValue() - 1)); \
+ __ B(ne, out_of_bounds); \
+ } else { \
+ __ Cmp(offset, length); \
+ __ B(hs, out_of_bounds); \
+ } \
} while (0)
#define ASSEMBLE_CHECKED_LOAD_FLOAT(width) \
@@ -569,18 +565,18 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
__ cbnz(i.TempRegister32(1), &binop); \
} while (0)
-#define ASSEMBLE_IEEE754_BINOP(name) \
- do { \
- FrameScope scope(masm(), StackFrame::MANUAL); \
- __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
- 0, 2); \
+#define ASSEMBLE_IEEE754_BINOP(name) \
+ do { \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
+ __ CallCFunction( \
+ ExternalReference::ieee754_##name##_function(__ isolate()), 0, 2); \
} while (0)
-#define ASSEMBLE_IEEE754_UNOP(name) \
- do { \
- FrameScope scope(masm(), StackFrame::MANUAL); \
- __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
- 0, 1); \
+#define ASSEMBLE_IEEE754_UNOP(name) \
+ do { \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
+ __ CallCFunction( \
+ ExternalReference::ieee754_##name##_function(__ isolate()), 0, 1); \
} while (0)
void CodeGenerator::AssembleDeconstructFrame() {
@@ -631,7 +627,7 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
namespace {
-void AdjustStackPointerForTailCall(MacroAssembler* masm,
+void AdjustStackPointerForTailCall(TurboAssembler* tasm,
FrameAccessState* state,
int new_slot_above_sp,
bool allow_shrinkage = true) {
@@ -639,10 +635,10 @@ void AdjustStackPointerForTailCall(MacroAssembler* masm,
StandardFrameConstants::kFixedSlotCountAboveFp;
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
- masm->Claim(stack_slot_delta);
+ tasm->Claim(stack_slot_delta);
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
- masm->Drop(-stack_slot_delta);
+ tasm->Drop(-stack_slot_delta);
state->IncreaseSPDelta(stack_slot_delta);
}
}
@@ -651,13 +647,13 @@ void AdjustStackPointerForTailCall(MacroAssembler* masm,
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
int first_unused_stack_slot) {
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_stack_slot) {
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot);
}
@@ -669,10 +665,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
switch (arch_opcode) {
case kArchCallCodeObject: {
+ // We must not share code targets for calls to builtins for wasm code, as
+ // they might need to be patched individually.
+ internal::Assembler::BlockCodeTargetSharingScope scope;
+ if (info()->IsWasm()) scope.Open(tasm());
+
EnsureSpaceForLazyDeopt();
if (instr->InputAt(0)->IsImmediate()) {
- __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
- RelocInfo::CODE_TARGET);
+ __ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
Register target = i.InputRegister(0);
__ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
@@ -696,14 +696,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
+ // We must not share code targets for calls to builtins for wasm code, as
+ // they might need to be patched individually.
+ internal::Assembler::BlockCodeTargetSharingScope scope;
+ if (info()->IsWasm()) scope.Open(tasm());
+
if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
i.TempRegister(0), i.TempRegister(1),
i.TempRegister(2));
}
if (instr->InputAt(0)->IsImmediate()) {
- __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
- RelocInfo::CODE_TARGET);
+ __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
Register target = i.InputRegister(0);
__ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
@@ -727,7 +731,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
- UseScratchRegisterScope scope(masm());
+ UseScratchRegisterScope scope(tasm());
Register temp = scope.AcquireX();
__ Ldr(temp, FieldMemOperand(func, JSFunction::kContextOffset));
__ cmp(cp, temp);
@@ -755,7 +759,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
- UseScratchRegisterScope scope(masm());
+ UseScratchRegisterScope scope(tasm());
Register temp = scope.AcquireX();
__ Ldr(temp, FieldMemOperand(func, JSFunction::kContextOffset));
__ cmp(cp, temp);
@@ -828,7 +832,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssembleReturn(instr->InputAt(0));
break;
case kArchStackPointer:
- __ mov(i.OutputRegister(), masm()->StackPointer());
+ __ mov(i.OutputRegister(), tasm()->StackPointer());
break;
case kArchFramePointer:
__ mov(i.OutputRegister(), fp);
@@ -841,7 +845,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kArchTruncateDoubleToI:
- __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+ __ TruncateDoubleToIDelayed(zone(), i.OutputRegister(),
+ i.InputDoubleRegister(0));
break;
case kArchStoreWithWriteBarrier: {
RecordWriteMode mode =
@@ -930,8 +935,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_IEEE754_UNOP(log10);
break;
case kIeee754Float64Pow: {
- MathPowStub stub(isolate(), MathPowStub::DOUBLE);
- __ CallStub(&stub);
+ __ CallStubDelayed(new (zone())
+ MathPowStub(nullptr, MathPowStub::DOUBLE));
break;
}
case kIeee754Float64Sin:
@@ -1076,14 +1081,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Udiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
break;
case kArm64Imod: {
- UseScratchRegisterScope scope(masm());
+ UseScratchRegisterScope scope(tasm());
Register temp = scope.AcquireX();
__ Sdiv(temp, i.InputRegister(0), i.InputRegister(1));
__ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
break;
}
case kArm64Imod32: {
- UseScratchRegisterScope scope(masm());
+ UseScratchRegisterScope scope(tasm());
Register temp = scope.AcquireW();
__ Sdiv(temp, i.InputRegister32(0), i.InputRegister32(1));
__ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
@@ -1091,14 +1096,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArm64Umod: {
- UseScratchRegisterScope scope(masm());
+ UseScratchRegisterScope scope(tasm());
Register temp = scope.AcquireX();
__ Udiv(temp, i.InputRegister(0), i.InputRegister(1));
__ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
break;
}
case kArm64Umod32: {
- UseScratchRegisterScope scope(masm());
+ UseScratchRegisterScope scope(tasm());
Register temp = scope.AcquireW();
__ Udiv(temp, i.InputRegister32(0), i.InputRegister32(1));
__ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
@@ -1233,7 +1238,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// Align the CSP and store the previous JSSP on the stack. We do not
// need to modify the SP delta here, as we will continue to access the
// frame via JSSP.
- UseScratchRegisterScope scope(masm());
+ UseScratchRegisterScope scope(tasm());
Register tmp = scope.AcquireX();
// TODO(arm64): Storing JSSP on the stack is redundant when calling a C
@@ -1241,7 +1246,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// calling a code object that uses the CSP as the stack pointer). See
// the code generation for kArchCallCodeObject vs. kArchCallCFunction
// (the latter does not restore CSP/JSSP).
- // MacroAssembler::CallCFunction() (safely) drops this extra slot
+ // TurboAssembler::CallCFunction() (safely) drops this extra slot
// anyway.
int sp_alignment = __ ActivationFrameAlignment();
__ Sub(tmp, jssp, kPointerSize);
@@ -1400,13 +1405,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArm64Float64Mod: {
// TODO(dcarney): implement directly. See note in lithium-codegen-arm64.cc
- FrameScope scope(masm(), StackFrame::MANUAL);
+ FrameScope scope(tasm(), StackFrame::MANUAL);
DCHECK(d0.is(i.InputDoubleRegister(0)));
DCHECK(d1.is(i.InputDoubleRegister(1)));
DCHECK(d0.is(i.OutputDoubleRegister()));
// TODO(dcarney): make sure this saves all relevant registers.
- __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
- 0, 2);
+ __ CallCFunction(
+ ExternalReference::mod_two_doubles_operation(__ isolate()), 0, 2);
break;
}
case kArm64Float32Max: {
@@ -1544,7 +1549,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArm64Float64InsertLowWord32: {
// TODO(arm64): This should use MOV (from general) when NEON is supported.
- UseScratchRegisterScope scope(masm());
+ UseScratchRegisterScope scope(tasm());
Register tmp = scope.AcquireX();
__ Fmov(tmp, i.InputFloat64Register(0));
__ Bfi(tmp, i.InputRegister(1), 0, 32);
@@ -1553,7 +1558,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArm64Float64InsertHighWord32: {
// TODO(arm64): This should use MOV (from general) when NEON is supported.
- UseScratchRegisterScope scope(masm());
+ UseScratchRegisterScope scope(tasm());
Register tmp = scope.AcquireX();
__ Fmov(tmp.W(), i.InputFloat32Register(0));
__ Bfi(tmp, i.InputRegister(1), 32, 32);
@@ -1614,6 +1619,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64StrD:
__ Str(i.InputFloat64OrZeroRegister(0), i.MemoryOperand(1));
break;
+ case kArm64LdrQ:
+ __ Ldr(i.OutputSimd128Register(), i.MemoryOperand());
+ break;
+ case kArm64StrQ:
+ __ Str(i.InputSimd128Register(0), i.MemoryOperand(1));
+ break;
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsb);
break;
@@ -1745,10 +1756,438 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, Orr)
ATOMIC_BINOP_CASE(Xor, Eor)
#undef ATOMIC_BINOP_CASE
+
+#define SIMD_UNOP_CASE(Op, Instr, FORMAT) \
+ case Op: \
+ __ Instr(i.OutputSimd128Register().V##FORMAT(), \
+ i.InputSimd128Register(0).V##FORMAT()); \
+ break;
+#define SIMD_WIDENING_UNOP_CASE(Op, Instr, WIDE, NARROW) \
+ case Op: \
+ __ Instr(i.OutputSimd128Register().V##WIDE(), \
+ i.InputSimd128Register(0).V##NARROW()); \
+ break;
+#define SIMD_BINOP_CASE(Op, Instr, FORMAT) \
+ case Op: \
+ __ Instr(i.OutputSimd128Register().V##FORMAT(), \
+ i.InputSimd128Register(0).V##FORMAT(), \
+ i.InputSimd128Register(1).V##FORMAT()); \
+ break;
+
+ case kArm64F32x4Splat: {
+ __ Dup(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).S(), 0);
+ break;
+ }
+ case kArm64F32x4ExtractLane: {
+ __ Mov(i.OutputSimd128Register().S(), i.InputSimd128Register(0).V4S(),
+ i.InputInt8(1));
+ break;
+ }
+ case kArm64F32x4ReplaceLane: {
+ VRegister dst = i.OutputSimd128Register().V4S(),
+ src1 = i.InputSimd128Register(0).V4S();
+ if (!dst.is(src1)) {
+ __ Mov(dst, src1);
+ }
+ __ Mov(dst, i.InputInt8(1), i.InputSimd128Register(2).V4S(), 0);
+ break;
+ }
+ SIMD_UNOP_CASE(kArm64F32x4SConvertI32x4, Scvtf, 4S);
+ SIMD_UNOP_CASE(kArm64F32x4UConvertI32x4, Ucvtf, 4S);
+ SIMD_UNOP_CASE(kArm64F32x4Abs, Fabs, 4S);
+ SIMD_UNOP_CASE(kArm64F32x4Neg, Fneg, 4S);
+ SIMD_UNOP_CASE(kArm64F32x4RecipApprox, Frecpe, 4S);
+ SIMD_UNOP_CASE(kArm64F32x4RecipSqrtApprox, Frsqrte, 4S);
+ SIMD_BINOP_CASE(kArm64F32x4Add, Fadd, 4S);
+ SIMD_BINOP_CASE(kArm64F32x4AddHoriz, Faddp, 4S);
+ SIMD_BINOP_CASE(kArm64F32x4Sub, Fsub, 4S);
+ SIMD_BINOP_CASE(kArm64F32x4Mul, Fmul, 4S);
+ SIMD_BINOP_CASE(kArm64F32x4Min, Fmin, 4S);
+ SIMD_BINOP_CASE(kArm64F32x4Max, Fmax, 4S);
+ SIMD_BINOP_CASE(kArm64F32x4Eq, Fcmeq, 4S);
+ case kArm64F32x4Ne: {
+ VRegister dst = i.OutputSimd128Register().V4S();
+ __ Fcmeq(dst, i.InputSimd128Register(0).V4S(),
+ i.InputSimd128Register(1).V4S());
+ __ Mvn(dst, dst);
+ break;
+ }
+ case kArm64F32x4Lt: {
+ __ Fcmgt(i.OutputSimd128Register().V4S(), i.InputSimd128Register(1).V4S(),
+ i.InputSimd128Register(0).V4S());
+ break;
+ }
+ case kArm64F32x4Le: {
+ __ Fcmge(i.OutputSimd128Register().V4S(), i.InputSimd128Register(1).V4S(),
+ i.InputSimd128Register(0).V4S());
+ break;
+ }
+ case kArm64I32x4Splat: {
+ __ Dup(i.OutputSimd128Register().V4S(), i.InputRegister32(0));
+ break;
+ }
+ case kArm64I32x4ExtractLane: {
+ __ Mov(i.OutputRegister32(), i.InputSimd128Register(0).V4S(),
+ i.InputInt8(1));
+ break;
+ }
+ case kArm64I32x4ReplaceLane: {
+ VRegister dst = i.OutputSimd128Register().V4S(),
+ src1 = i.InputSimd128Register(0).V4S();
+ if (!dst.is(src1)) {
+ __ Mov(dst, src1);
+ }
+ __ Mov(dst, i.InputInt8(1), i.InputRegister32(2));
+ break;
+ }
+ SIMD_UNOP_CASE(kArm64I32x4SConvertF32x4, Fcvtzs, 4S);
+ SIMD_WIDENING_UNOP_CASE(kArm64I32x4SConvertI16x8Low, Sxtl, 4S, 4H);
+ SIMD_WIDENING_UNOP_CASE(kArm64I32x4SConvertI16x8High, Sxtl2, 4S, 8H);
+ SIMD_UNOP_CASE(kArm64I32x4Neg, Neg, 4S);
+ case kArm64I32x4Shl: {
+ __ Shl(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(),
+ i.InputInt5(1));
+ break;
+ }
+ case kArm64I32x4ShrS: {
+ __ Sshr(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(),
+ i.InputInt5(1));
+ break;
+ }
+ SIMD_BINOP_CASE(kArm64I32x4Add, Add, 4S);
+ SIMD_BINOP_CASE(kArm64I32x4AddHoriz, Addp, 4S);
+ SIMD_BINOP_CASE(kArm64I32x4Sub, Sub, 4S);
+ SIMD_BINOP_CASE(kArm64I32x4Mul, Mul, 4S);
+ SIMD_BINOP_CASE(kArm64I32x4MinS, Smin, 4S);
+ SIMD_BINOP_CASE(kArm64I32x4MaxS, Smax, 4S);
+ SIMD_BINOP_CASE(kArm64I32x4Eq, Cmeq, 4S);
+ case kArm64I32x4Ne: {
+ VRegister dst = i.OutputSimd128Register().V4S();
+ __ Cmeq(dst, i.InputSimd128Register(0).V4S(),
+ i.InputSimd128Register(1).V4S());
+ __ Mvn(dst, dst);
+ break;
+ }
+ SIMD_BINOP_CASE(kArm64I32x4GtS, Cmgt, 4S);
+ SIMD_BINOP_CASE(kArm64I32x4GeS, Cmge, 4S);
+ SIMD_UNOP_CASE(kArm64I32x4UConvertF32x4, Fcvtzu, 4S);
+ SIMD_WIDENING_UNOP_CASE(kArm64I32x4UConvertI16x8Low, Uxtl, 4S, 4H);
+ SIMD_WIDENING_UNOP_CASE(kArm64I32x4UConvertI16x8High, Uxtl2, 4S, 8H);
+ case kArm64I32x4ShrU: {
+ __ Ushr(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(),
+ i.InputInt5(1));
+ break;
+ }
+ SIMD_BINOP_CASE(kArm64I32x4MinU, Umin, 4S);
+ SIMD_BINOP_CASE(kArm64I32x4MaxU, Umax, 4S);
+ SIMD_BINOP_CASE(kArm64I32x4GtU, Cmhi, 4S);
+ SIMD_BINOP_CASE(kArm64I32x4GeU, Cmhs, 4S);
+ case kArm64I16x8Splat: {
+ __ Dup(i.OutputSimd128Register().V8H(), i.InputRegister32(0));
+ break;
+ }
+ case kArm64I16x8ExtractLane: {
+ __ Smov(i.OutputRegister32(), i.InputSimd128Register(0).V8H(),
+ i.InputInt8(1));
+ break;
+ }
+ case kArm64I16x8ReplaceLane: {
+ VRegister dst = i.OutputSimd128Register().V8H(),
+ src1 = i.InputSimd128Register(0).V8H();
+ if (!dst.is(src1)) {
+ __ Mov(dst, src1);
+ }
+ __ Mov(dst, i.InputInt8(1), i.InputRegister32(2));
+ break;
+ }
+ SIMD_WIDENING_UNOP_CASE(kArm64I16x8SConvertI8x16Low, Sxtl, 8H, 8B);
+ SIMD_WIDENING_UNOP_CASE(kArm64I16x8SConvertI8x16High, Sxtl2, 8H, 16B);
+ SIMD_UNOP_CASE(kArm64I16x8Neg, Neg, 8H);
+ case kArm64I16x8Shl: {
+ __ Shl(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(),
+ i.InputInt5(1));
+ break;
+ }
+ case kArm64I16x8ShrS: {
+ __ Sshr(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(),
+ i.InputInt5(1));
+ break;
+ }
+ case kArm64I16x8SConvertI32x4: {
+ VRegister dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ UseScratchRegisterScope scope(tasm());
+ VRegister temp = scope.AcquireV(kFormat4S);
+ if (dst.is(src1)) {
+ __ Mov(temp, src1.V4S());
+ src1 = temp;
+ }
+ __ Sqxtn(dst.V4H(), src0.V4S());
+ __ Sqxtn2(dst.V8H(), src1.V4S());
+ break;
+ }
+ SIMD_BINOP_CASE(kArm64I16x8Add, Add, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8AddSaturateS, Sqadd, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8AddHoriz, Addp, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8Sub, Sub, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8SubSaturateS, Sqsub, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8Mul, Mul, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8MinS, Smin, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8MaxS, Smax, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8Eq, Cmeq, 8H);
+ case kArm64I16x8Ne: {
+ VRegister dst = i.OutputSimd128Register().V8H();
+ __ Cmeq(dst, i.InputSimd128Register(0).V8H(),
+ i.InputSimd128Register(1).V8H());
+ __ Mvn(dst, dst);
+ break;
+ }
+ SIMD_BINOP_CASE(kArm64I16x8GtS, Cmgt, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8GeS, Cmge, 8H);
+ case kArm64I16x8UConvertI8x16Low: {
+ __ Uxtl(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8B());
+ break;
+ }
+ case kArm64I16x8UConvertI8x16High: {
+ __ Uxtl2(i.OutputSimd128Register().V8H(),
+ i.InputSimd128Register(0).V16B());
+ break;
+ }
+ case kArm64I16x8ShrU: {
+ __ Ushr(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(),
+ i.InputInt5(1));
+ break;
+ }
+ case kArm64I16x8UConvertI32x4: {
+ VRegister dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ UseScratchRegisterScope scope(tasm());
+ VRegister temp = scope.AcquireV(kFormat4S);
+ if (dst.is(src1)) {
+ __ Mov(temp, src1.V4S());
+ src1 = temp;
+ }
+ __ Uqxtn(dst.V4H(), src0.V4S());
+ __ Uqxtn2(dst.V8H(), src1.V4S());
+ break;
+ }
+ SIMD_BINOP_CASE(kArm64I16x8AddSaturateU, Uqadd, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8SubSaturateU, Uqsub, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8MinU, Umin, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8MaxU, Umax, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8GtU, Cmhi, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8GeU, Cmhs, 8H);
+ case kArm64I8x16Splat: {
+ __ Dup(i.OutputSimd128Register().V16B(), i.InputRegister32(0));
+ break;
+ }
+ case kArm64I8x16ExtractLane: {
+ __ Smov(i.OutputRegister32(), i.InputSimd128Register(0).V16B(),
+ i.InputInt8(1));
+ break;
+ }
+ case kArm64I8x16ReplaceLane: {
+ VRegister dst = i.OutputSimd128Register().V16B(),
+ src1 = i.InputSimd128Register(0).V16B();
+ if (!dst.is(src1)) {
+ __ Mov(dst, src1);
+ }
+ __ Mov(dst, i.InputInt8(1), i.InputRegister32(2));
+ break;
+ }
+ SIMD_UNOP_CASE(kArm64I8x16Neg, Neg, 16B);
+ case kArm64I8x16Shl: {
+ __ Shl(i.OutputSimd128Register().V16B(), i.InputSimd128Register(0).V16B(),
+ i.InputInt5(1));
+ break;
+ }
+ case kArm64I8x16ShrS: {
+ __ Sshr(i.OutputSimd128Register().V16B(),
+ i.InputSimd128Register(0).V16B(), i.InputInt5(1));
+ break;
+ }
+ case kArm64I8x16SConvertI16x8: {
+ VRegister dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ UseScratchRegisterScope scope(tasm());
+ VRegister temp = scope.AcquireV(kFormat8H);
+ if (dst.is(src1)) {
+ __ Mov(temp, src1.V8H());
+ src1 = temp;
+ }
+ __ Sqxtn(dst.V8B(), src0.V8H());
+ __ Sqxtn2(dst.V16B(), src1.V8H());
+ break;
+ }
+ SIMD_BINOP_CASE(kArm64I8x16Add, Add, 16B);
+ SIMD_BINOP_CASE(kArm64I8x16AddSaturateS, Sqadd, 16B);
+ SIMD_BINOP_CASE(kArm64I8x16Sub, Sub, 16B);
+ SIMD_BINOP_CASE(kArm64I8x16SubSaturateS, Sqsub, 16B);
+ SIMD_BINOP_CASE(kArm64I8x16Mul, Mul, 16B);
+ SIMD_BINOP_CASE(kArm64I8x16MinS, Smin, 16B);
+ SIMD_BINOP_CASE(kArm64I8x16MaxS, Smax, 16B);
+ SIMD_BINOP_CASE(kArm64I8x16Eq, Cmeq, 16B);
+ case kArm64I8x16Ne: {
+ VRegister dst = i.OutputSimd128Register().V16B();
+ __ Cmeq(dst, i.InputSimd128Register(0).V16B(),
+ i.InputSimd128Register(1).V16B());
+ __ Mvn(dst, dst);
+ break;
+ }
+ SIMD_BINOP_CASE(kArm64I8x16GtS, Cmgt, 16B);
+ SIMD_BINOP_CASE(kArm64I8x16GeS, Cmge, 16B);
+ case kArm64I8x16ShrU: {
+ __ Ushr(i.OutputSimd128Register().V16B(),
+ i.InputSimd128Register(0).V16B(), i.InputInt5(1));
+ break;
+ }
+ case kArm64I8x16UConvertI16x8: {
+ VRegister dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ UseScratchRegisterScope scope(tasm());
+ VRegister temp = scope.AcquireV(kFormat8H);
+ if (dst.is(src1)) {
+ __ Mov(temp, src1.V8H());
+ src1 = temp;
+ }
+ __ Uqxtn(dst.V8B(), src0.V8H());
+ __ Uqxtn2(dst.V16B(), src1.V8H());
+ break;
+ }
+ SIMD_BINOP_CASE(kArm64I8x16AddSaturateU, Uqadd, 16B);
+ SIMD_BINOP_CASE(kArm64I8x16SubSaturateU, Uqsub, 16B);
+ SIMD_BINOP_CASE(kArm64I8x16MinU, Umin, 16B);
+ SIMD_BINOP_CASE(kArm64I8x16MaxU, Umax, 16B);
+ SIMD_BINOP_CASE(kArm64I8x16GtU, Cmhi, 16B);
+ SIMD_BINOP_CASE(kArm64I8x16GeU, Cmhs, 16B);
+ case kArm64S128Zero: {
+ __ Movi(i.OutputSimd128Register().V16B(), 0);
+ break;
+ }
+ SIMD_BINOP_CASE(kArm64S128And, And, 16B);
+ SIMD_BINOP_CASE(kArm64S128Or, Orr, 16B);
+ SIMD_BINOP_CASE(kArm64S128Xor, Eor, 16B);
+ SIMD_UNOP_CASE(kArm64S128Not, Mvn, 16B);
+ case kArm64S128Select: {
+ VRegister dst = i.OutputSimd128Register().V16B();
+ DCHECK(dst.is(i.InputSimd128Register(0).V16B()));
+ __ Bsl(dst, i.InputSimd128Register(1).V16B(),
+ i.InputSimd128Register(2).V16B());
+ break;
+ }
+ case kArm64S32x4Shuffle: {
+ Simd128Register dst = i.OutputSimd128Register().V4S(),
+ src0 = i.InputSimd128Register(0).V4S(),
+ src1 = i.InputSimd128Register(1).V4S();
+ // Check for in-place shuffles.
+ // If dst == src0 == src1, then the shuffle is unary and we only use src0.
+ UseScratchRegisterScope scope(tasm());
+ VRegister temp = scope.AcquireV(kFormat4S);
+ if (dst.is(src0)) {
+ __ Mov(temp, src0);
+ src0 = temp;
+ } else if (dst.is(src1)) {
+ __ Mov(temp, src1);
+ src1 = temp;
+ }
+ // Perform shuffle as a vmov per lane.
+ int32_t shuffle = i.InputInt32(2);
+ for (int i = 0; i < 4; i++) {
+ VRegister src = src0;
+ int lane = shuffle & 0x7;
+ if (lane >= 4) {
+ src = src1;
+ lane &= 0x3;
+ }
+ __ Mov(dst, i, src, lane);
+ shuffle >>= 8;
+ }
+ break;
+ }
+ SIMD_BINOP_CASE(kArm64S32x4ZipLeft, Zip1, 4S);
+ SIMD_BINOP_CASE(kArm64S32x4ZipRight, Zip2, 4S);
+ SIMD_BINOP_CASE(kArm64S32x4UnzipLeft, Uzp1, 4S);
+ SIMD_BINOP_CASE(kArm64S32x4UnzipRight, Uzp2, 4S);
+ SIMD_BINOP_CASE(kArm64S32x4TransposeLeft, Trn1, 4S);
+ SIMD_BINOP_CASE(kArm64S32x4TransposeRight, Trn2, 4S);
+ SIMD_BINOP_CASE(kArm64S16x8ZipLeft, Zip1, 8H);
+ SIMD_BINOP_CASE(kArm64S16x8ZipRight, Zip2, 8H);
+ SIMD_BINOP_CASE(kArm64S16x8UnzipLeft, Uzp1, 8H);
+ SIMD_BINOP_CASE(kArm64S16x8UnzipRight, Uzp2, 8H);
+ SIMD_BINOP_CASE(kArm64S16x8TransposeLeft, Trn1, 8H);
+ SIMD_BINOP_CASE(kArm64S16x8TransposeRight, Trn2, 8H);
+ SIMD_BINOP_CASE(kArm64S8x16ZipLeft, Zip1, 16B);
+ SIMD_BINOP_CASE(kArm64S8x16ZipRight, Zip2, 16B);
+ SIMD_BINOP_CASE(kArm64S8x16UnzipLeft, Uzp1, 16B);
+ SIMD_BINOP_CASE(kArm64S8x16UnzipRight, Uzp2, 16B);
+ SIMD_BINOP_CASE(kArm64S8x16TransposeLeft, Trn1, 16B);
+ SIMD_BINOP_CASE(kArm64S8x16TransposeRight, Trn2, 16B);
+ case kArm64S8x16Concat: {
+ __ Ext(i.OutputSimd128Register().V16B(), i.InputSimd128Register(0).V16B(),
+ i.InputSimd128Register(1).V16B(), i.InputInt4(2));
+ break;
+ }
+ case kArm64S8x16Shuffle: {
+ Simd128Register dst = i.OutputSimd128Register().V16B(),
+ src0 = i.InputSimd128Register(0).V16B(),
+ src1 = i.InputSimd128Register(1).V16B();
+ // Unary shuffle table is in src0, binary shuffle table is in src0, src1,
+ // which must be consecutive.
+ int64_t mask = 0;
+ if (src0.is(src1)) {
+ mask = 0x0F0F0F0F;
+ } else {
+ mask = 0x1F1F1F1F;
+ DCHECK(AreConsecutive(src0, src1));
+ }
+ int64_t imm1 =
+ (i.InputInt32(2) & mask) | ((i.InputInt32(3) & mask) << 32);
+ int64_t imm2 =
+ (i.InputInt32(4) & mask) | ((i.InputInt32(5) & mask) << 32);
+ UseScratchRegisterScope scope(tasm());
+ VRegister temp = scope.AcquireV(kFormat16B);
+ __ Movi(temp, imm2, imm1);
+
+ if (src0.is(src1)) {
+ __ Tbl(dst, src0, temp.V16B());
+ } else {
+ __ Tbl(dst, src0, src1, temp.V16B());
+ }
+ break;
+ }
+ SIMD_UNOP_CASE(kArm64S32x2Reverse, Rev64, 4S);
+ SIMD_UNOP_CASE(kArm64S16x4Reverse, Rev64, 8H);
+ SIMD_UNOP_CASE(kArm64S16x2Reverse, Rev32, 8H);
+ SIMD_UNOP_CASE(kArm64S8x8Reverse, Rev64, 16B);
+ SIMD_UNOP_CASE(kArm64S8x4Reverse, Rev32, 16B);
+ SIMD_UNOP_CASE(kArm64S8x2Reverse, Rev16, 16B);
+
+#define SIMD_REDUCE_OP_CASE(Op, Instr, format, FORMAT) \
+ case Op: { \
+ UseScratchRegisterScope scope(tasm()); \
+ VRegister temp = scope.AcquireV(format); \
+ __ Instr(temp, i.InputSimd128Register(0).V##FORMAT()); \
+ __ Umov(i.OutputRegister32(), temp, 0); \
+ break; \
+ }
+ SIMD_REDUCE_OP_CASE(kArm64S1x4AnyTrue, Umaxv, kFormatS, 4S);
+ SIMD_REDUCE_OP_CASE(kArm64S1x4AllTrue, Uminv, kFormatS, 4S);
+ SIMD_REDUCE_OP_CASE(kArm64S1x8AnyTrue, Umaxv, kFormatH, 8H);
+ SIMD_REDUCE_OP_CASE(kArm64S1x8AllTrue, Uminv, kFormatH, 8H);
+ SIMD_REDUCE_OP_CASE(kArm64S1x16AnyTrue, Umaxv, kFormatB, 16B);
+ SIMD_REDUCE_OP_CASE(kArm64S1x16AllTrue, Uminv, kFormatB, 16B);
}
return kSuccess;
} // NOLINT(readability/fn_size)
+#undef SIMD_UNOP_CASE
+#undef SIMD_WIDENING_UNOP_CASE
+#undef SIMD_BINOP_CASE
+#undef SIMD_REDUCE_OP_CASE
// Assemble branches after this instruction.
void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
@@ -1843,9 +2282,9 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
if (trap_id == Builtins::builtin_count) {
// We cannot test calls to the runtime in cctest/test-run-wasm.
// Therefore we emit a call to C here instead of a call to the runtime.
- __ CallCFunction(
- ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
- 0);
+ __ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(
+ __ isolate()),
+ 0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
__ Ret();
} else {
@@ -1853,7 +2292,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
// Initialize the jssp because it is required for the runtime call.
__ Mov(jssp, csp);
gen_->AssembleSourcePosition(instr_);
- __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+ __ Call(__ isolate()->builtins()->builtin_handle(trap_id),
RelocInfo::CODE_TARGET);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
@@ -1903,7 +2342,7 @@ void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
Arm64OperandConverter i(this, instr);
- UseScratchRegisterScope scope(masm());
+ UseScratchRegisterScope scope(tasm());
Register input = i.InputRegister32(0);
Register temp = scope.AcquireX();
size_t const case_count = instr->InputCount() - 2;
@@ -1930,9 +2369,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
: Deoptimizer::EAGER;
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- isolate(), deoptimization_id, bailout_type);
+ __ isolate(), deoptimization_id, bailout_type);
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- if (isolate()->NeedsSourcePositionsForProfiling()) {
+ if (info()->is_source_positions_enabled()) {
__ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
}
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
@@ -1950,11 +2389,11 @@ void CodeGenerator::FinishFrame(Frame* frame) {
}
// Save FP registers.
- CPURegList saves_fp = CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
+ CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits,
descriptor->CalleeSavedFPRegisters());
int saved_count = saves_fp.Count();
if (saved_count != 0) {
- DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedFP().list());
+ DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedV().list());
frame->AllocateSavedCalleeRegisterSlots(saved_count *
(kDoubleSize / kPointerSize));
}
@@ -1984,7 +2423,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Prologue(this->info()->GeneratePreagedPrologue());
} else {
__ Push(lr, fp);
- __ Mov(fp, masm_.StackPointer());
+ __ Mov(fp, __ StackPointer());
}
if (!info()->GeneratePreagedPrologue()) {
unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
@@ -2004,7 +2443,7 @@ void CodeGenerator::AssembleConstructFrame() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
}
if (info()->IsWasm() && shrink_slots > 128) {
@@ -2017,11 +2456,10 @@ void CodeGenerator::AssembleConstructFrame() {
// exception unconditionally. Thereby we can avoid the integer overflow
// check in the condition code.
if (shrink_slots * kPointerSize < FLAG_stack_size * 1024) {
- UseScratchRegisterScope scope(masm());
+ UseScratchRegisterScope scope(tasm());
Register scratch = scope.AcquireX();
- __ Mov(
- scratch,
- Operand(ExternalReference::address_of_real_stack_limit(isolate())));
+ __ Mov(scratch, Operand(ExternalReference::address_of_real_stack_limit(
+ __ isolate())));
__ Ldr(scratch, MemOperand(scratch));
__ Add(scratch, scratch, Operand(shrink_slots * kPointerSize));
__ Cmp(__ StackPointer(), scratch);
@@ -2040,7 +2478,7 @@ void CodeGenerator::AssembleConstructFrame() {
// Initialize the jssp because it is required for the runtime call.
__ Mov(jssp, csp);
__ Move(cp, Smi::kZero);
- __ CallRuntime(Runtime::kThrowWasmStackOverflow);
+ __ CallRuntimeDelayed(zone(), Runtime::kThrowWasmStackOverflow);
// We come from WebAssembly, there are no references for the GC.
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
RecordSafepoint(reference_map, Safepoint::kSimple, 0,
@@ -2065,7 +2503,7 @@ void CodeGenerator::AssembleConstructFrame() {
bool is_stub_frame =
!descriptor->IsJSFunctionCall() && !descriptor->IsCFunctionCall();
if (is_stub_frame) {
- UseScratchRegisterScope temps(masm());
+ UseScratchRegisterScope temps(tasm());
Register temp = temps.AcquireX();
__ Mov(temp, StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
__ Str(temp, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
@@ -2073,11 +2511,11 @@ void CodeGenerator::AssembleConstructFrame() {
}
// Save FP registers.
- CPURegList saves_fp = CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
+ CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits,
descriptor->CalleeSavedFPRegisters());
int saved_count = saves_fp.Count();
if (saved_count != 0) {
- DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedFP().list());
+ DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedV().list());
__ PushCPURegList(saves_fp);
}
// Save registers.
@@ -2103,7 +2541,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
}
// Restore fp registers.
- CPURegList saves_fp = CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
+ CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits,
descriptor->CalleeSavedFPRegisters());
if (saves_fp.Count() != 0) {
__ PopCPURegList(saves_fp);
@@ -2155,7 +2593,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
__ Ret();
}
-void CodeGenerator::FinishCode() { masm()->CheckConstPool(true, false); }
+void CodeGenerator::FinishCode() { __ CheckConstPool(true, false); }
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
@@ -2168,23 +2606,23 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (destination->IsRegister()) {
__ Mov(g.ToRegister(destination), src);
} else {
- __ Str(src, g.ToMemOperand(destination, masm()));
+ __ Str(src, g.ToMemOperand(destination, tasm()));
}
} else if (source->IsStackSlot()) {
- MemOperand src = g.ToMemOperand(source, masm());
+ MemOperand src = g.ToMemOperand(source, tasm());
DCHECK(destination->IsRegister() || destination->IsStackSlot());
if (destination->IsRegister()) {
__ Ldr(g.ToRegister(destination), src);
} else {
- UseScratchRegisterScope scope(masm());
+ UseScratchRegisterScope scope(tasm());
Register temp = scope.AcquireX();
__ Ldr(temp, src);
- __ Str(temp, g.ToMemOperand(destination, masm()));
+ __ Str(temp, g.ToMemOperand(destination, tasm()));
}
} else if (source->IsConstant()) {
Constant src = g.ToConstant(ConstantOperand::cast(source));
if (destination->IsRegister() || destination->IsStackSlot()) {
- UseScratchRegisterScope scope(masm());
+ UseScratchRegisterScope scope(tasm());
Register dst = destination->IsRegister() ? g.ToRegister(destination)
: scope.AcquireX();
if (src.type() == Constant::kHeapObject) {
@@ -2193,65 +2631,81 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (IsMaterializableFromRoot(src_object, &index)) {
__ LoadRoot(dst, index);
} else {
- __ LoadObject(dst, src_object);
+ __ Mov(dst, src_object);
}
} else {
__ Mov(dst, g.ToImmediate(source));
}
if (destination->IsStackSlot()) {
- __ Str(dst, g.ToMemOperand(destination, masm()));
+ __ Str(dst, g.ToMemOperand(destination, tasm()));
}
} else if (src.type() == Constant::kFloat32) {
if (destination->IsFPRegister()) {
- FPRegister dst = g.ToDoubleRegister(destination).S();
+ VRegister dst = g.ToDoubleRegister(destination).S();
__ Fmov(dst, src.ToFloat32());
} else {
DCHECK(destination->IsFPStackSlot());
if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
- __ Str(wzr, g.ToMemOperand(destination, masm()));
+ __ Str(wzr, g.ToMemOperand(destination, tasm()));
} else {
- UseScratchRegisterScope scope(masm());
- FPRegister temp = scope.AcquireS();
+ UseScratchRegisterScope scope(tasm());
+ VRegister temp = scope.AcquireS();
__ Fmov(temp, src.ToFloat32());
- __ Str(temp, g.ToMemOperand(destination, masm()));
+ __ Str(temp, g.ToMemOperand(destination, tasm()));
}
}
} else {
DCHECK_EQ(Constant::kFloat64, src.type());
if (destination->IsFPRegister()) {
- FPRegister dst = g.ToDoubleRegister(destination);
- __ Fmov(dst, src.ToFloat64());
+ VRegister dst = g.ToDoubleRegister(destination);
+ __ Fmov(dst, src.ToFloat64().value());
} else {
DCHECK(destination->IsFPStackSlot());
- if (bit_cast<int64_t>(src.ToFloat64()) == 0) {
- __ Str(xzr, g.ToMemOperand(destination, masm()));
+ if (src.ToFloat64().AsUint64() == 0) {
+ __ Str(xzr, g.ToMemOperand(destination, tasm()));
} else {
- UseScratchRegisterScope scope(masm());
- FPRegister temp = scope.AcquireD();
- __ Fmov(temp, src.ToFloat64());
- __ Str(temp, g.ToMemOperand(destination, masm()));
+ UseScratchRegisterScope scope(tasm());
+ VRegister temp = scope.AcquireD();
+ __ Fmov(temp, src.ToFloat64().value());
+ __ Str(temp, g.ToMemOperand(destination, tasm()));
}
}
}
} else if (source->IsFPRegister()) {
- FPRegister src = g.ToDoubleRegister(source);
+ VRegister src = g.ToDoubleRegister(source);
if (destination->IsFPRegister()) {
- FPRegister dst = g.ToDoubleRegister(destination);
+ VRegister dst = g.ToDoubleRegister(destination);
__ Fmov(dst, src);
} else {
DCHECK(destination->IsFPStackSlot());
- __ Str(src, g.ToMemOperand(destination, masm()));
+ MemOperand dst = g.ToMemOperand(destination, tasm());
+ if (destination->IsSimd128StackSlot()) {
+ __ Str(src.Q(), dst);
+ } else {
+ __ Str(src, dst);
+ }
}
} else if (source->IsFPStackSlot()) {
DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
- MemOperand src = g.ToMemOperand(source, masm());
+ MemOperand src = g.ToMemOperand(source, tasm());
if (destination->IsFPRegister()) {
- __ Ldr(g.ToDoubleRegister(destination), src);
+ VRegister dst = g.ToDoubleRegister(destination);
+ if (destination->IsSimd128Register()) {
+ __ Ldr(dst.Q(), src);
+ } else {
+ __ Ldr(dst, src);
+ }
} else {
- UseScratchRegisterScope scope(masm());
- FPRegister temp = scope.AcquireD();
- __ Ldr(temp, src);
- __ Str(temp, g.ToMemOperand(destination, masm()));
+ UseScratchRegisterScope scope(tasm());
+ VRegister temp = scope.AcquireD();
+ MemOperand dst = g.ToMemOperand(destination, tasm());
+ if (destination->IsSimd128StackSlot()) {
+ __ Ldr(temp.Q(), src);
+ __ Str(temp.Q(), dst);
+ } else {
+ __ Ldr(temp, src);
+ __ Str(temp, dst);
+ }
}
} else {
UNREACHABLE();
@@ -2266,7 +2720,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
// combinations are possible.
if (source->IsRegister()) {
// Register-register.
- UseScratchRegisterScope scope(masm());
+ UseScratchRegisterScope scope(tasm());
Register temp = scope.AcquireX();
Register src = g.ToRegister(source);
if (destination->IsRegister()) {
@@ -2276,36 +2730,49 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ Mov(dst, temp);
} else {
DCHECK(destination->IsStackSlot());
- MemOperand dst = g.ToMemOperand(destination, masm());
+ MemOperand dst = g.ToMemOperand(destination, tasm());
__ Mov(temp, src);
__ Ldr(src, dst);
__ Str(temp, dst);
}
} else if (source->IsStackSlot() || source->IsFPStackSlot()) {
- UseScratchRegisterScope scope(masm());
- DoubleRegister temp_0 = scope.AcquireD();
- DoubleRegister temp_1 = scope.AcquireD();
- MemOperand src = g.ToMemOperand(source, masm());
- MemOperand dst = g.ToMemOperand(destination, masm());
- __ Ldr(temp_0, src);
- __ Ldr(temp_1, dst);
- __ Str(temp_0, dst);
- __ Str(temp_1, src);
+ UseScratchRegisterScope scope(tasm());
+ VRegister temp_0 = scope.AcquireD();
+ VRegister temp_1 = scope.AcquireD();
+ MemOperand src = g.ToMemOperand(source, tasm());
+ MemOperand dst = g.ToMemOperand(destination, tasm());
+ if (source->IsSimd128StackSlot()) {
+ __ Ldr(temp_0.Q(), src);
+ __ Ldr(temp_1.Q(), dst);
+ __ Str(temp_0.Q(), dst);
+ __ Str(temp_1.Q(), src);
+ } else {
+ __ Ldr(temp_0, src);
+ __ Ldr(temp_1, dst);
+ __ Str(temp_0, dst);
+ __ Str(temp_1, src);
+ }
} else if (source->IsFPRegister()) {
- UseScratchRegisterScope scope(masm());
- FPRegister temp = scope.AcquireD();
- FPRegister src = g.ToDoubleRegister(source);
+ UseScratchRegisterScope scope(tasm());
+ VRegister temp = scope.AcquireD();
+ VRegister src = g.ToDoubleRegister(source);
if (destination->IsFPRegister()) {
- FPRegister dst = g.ToDoubleRegister(destination);
+ VRegister dst = g.ToDoubleRegister(destination);
__ Fmov(temp, src);
__ Fmov(src, dst);
__ Fmov(dst, temp);
} else {
DCHECK(destination->IsFPStackSlot());
- MemOperand dst = g.ToMemOperand(destination, masm());
- __ Fmov(temp, src);
- __ Ldr(src, dst);
- __ Str(temp, dst);
+ MemOperand dst = g.ToMemOperand(destination, tasm());
+ if (source->IsSimd128Register()) {
+ __ Fmov(temp.Q(), src.Q());
+ __ Ldr(src.Q(), dst);
+ __ Str(temp.Q(), dst);
+ } else {
+ __ Fmov(temp, src);
+ __ Ldr(src, dst);
+ __ Str(temp, dst);
+ }
}
} else {
// No other combinations are possible.
@@ -2328,13 +2795,13 @@ void CodeGenerator::EnsureSpaceForLazyDeopt() {
int space_needed = Deoptimizer::patch_size();
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
- intptr_t current_pc = masm()->pc_offset();
+ intptr_t current_pc = tasm()->pc_offset();
if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
intptr_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
DCHECK((padding_size % kInstructionSize) == 0);
InstructionAccurateScope instruction_accurate(
- masm(), padding_size / kInstructionSize);
+ tasm(), padding_size / kInstructionSize);
while (padding_size > 0) {
__ nop();
diff --git a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
index 898a9e9b35..65c8729bdb 100644
--- a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
@@ -143,6 +143,8 @@ namespace compiler {
V(Arm64StrS) \
V(Arm64LdrD) \
V(Arm64StrD) \
+ V(Arm64LdrQ) \
+ V(Arm64StrQ) \
V(Arm64Ldrb) \
V(Arm64Ldrsb) \
V(Arm64Strb) \
@@ -153,7 +155,149 @@ namespace compiler {
V(Arm64LdrW) \
V(Arm64StrW) \
V(Arm64Ldr) \
- V(Arm64Str)
+ V(Arm64Str) \
+ V(Arm64F32x4Splat) \
+ V(Arm64F32x4ExtractLane) \
+ V(Arm64F32x4ReplaceLane) \
+ V(Arm64F32x4SConvertI32x4) \
+ V(Arm64F32x4UConvertI32x4) \
+ V(Arm64F32x4Abs) \
+ V(Arm64F32x4Neg) \
+ V(Arm64F32x4RecipApprox) \
+ V(Arm64F32x4RecipSqrtApprox) \
+ V(Arm64F32x4Add) \
+ V(Arm64F32x4AddHoriz) \
+ V(Arm64F32x4Sub) \
+ V(Arm64F32x4Mul) \
+ V(Arm64F32x4Min) \
+ V(Arm64F32x4Max) \
+ V(Arm64F32x4Eq) \
+ V(Arm64F32x4Ne) \
+ V(Arm64F32x4Lt) \
+ V(Arm64F32x4Le) \
+ V(Arm64I32x4Splat) \
+ V(Arm64I32x4ExtractLane) \
+ V(Arm64I32x4ReplaceLane) \
+ V(Arm64I32x4SConvertF32x4) \
+ V(Arm64I32x4SConvertI16x8Low) \
+ V(Arm64I32x4SConvertI16x8High) \
+ V(Arm64I32x4Neg) \
+ V(Arm64I32x4Shl) \
+ V(Arm64I32x4ShrS) \
+ V(Arm64I32x4Add) \
+ V(Arm64I32x4AddHoriz) \
+ V(Arm64I32x4Sub) \
+ V(Arm64I32x4Mul) \
+ V(Arm64I32x4MinS) \
+ V(Arm64I32x4MaxS) \
+ V(Arm64I32x4Eq) \
+ V(Arm64I32x4Ne) \
+ V(Arm64I32x4GtS) \
+ V(Arm64I32x4GeS) \
+ V(Arm64I32x4UConvertF32x4) \
+ V(Arm64I32x4UConvertI16x8Low) \
+ V(Arm64I32x4UConvertI16x8High) \
+ V(Arm64I32x4ShrU) \
+ V(Arm64I32x4MinU) \
+ V(Arm64I32x4MaxU) \
+ V(Arm64I32x4GtU) \
+ V(Arm64I32x4GeU) \
+ V(Arm64I16x8Splat) \
+ V(Arm64I16x8ExtractLane) \
+ V(Arm64I16x8ReplaceLane) \
+ V(Arm64I16x8SConvertI8x16Low) \
+ V(Arm64I16x8SConvertI8x16High) \
+ V(Arm64I16x8Neg) \
+ V(Arm64I16x8Shl) \
+ V(Arm64I16x8ShrS) \
+ V(Arm64I16x8SConvertI32x4) \
+ V(Arm64I16x8Add) \
+ V(Arm64I16x8AddSaturateS) \
+ V(Arm64I16x8AddHoriz) \
+ V(Arm64I16x8Sub) \
+ V(Arm64I16x8SubSaturateS) \
+ V(Arm64I16x8Mul) \
+ V(Arm64I16x8MinS) \
+ V(Arm64I16x8MaxS) \
+ V(Arm64I16x8Eq) \
+ V(Arm64I16x8Ne) \
+ V(Arm64I16x8GtS) \
+ V(Arm64I16x8GeS) \
+ V(Arm64I16x8UConvertI8x16Low) \
+ V(Arm64I16x8UConvertI8x16High) \
+ V(Arm64I16x8ShrU) \
+ V(Arm64I16x8UConvertI32x4) \
+ V(Arm64I16x8AddSaturateU) \
+ V(Arm64I16x8SubSaturateU) \
+ V(Arm64I16x8MinU) \
+ V(Arm64I16x8MaxU) \
+ V(Arm64I16x8GtU) \
+ V(Arm64I16x8GeU) \
+ V(Arm64I8x16Splat) \
+ V(Arm64I8x16ExtractLane) \
+ V(Arm64I8x16ReplaceLane) \
+ V(Arm64I8x16Neg) \
+ V(Arm64I8x16Shl) \
+ V(Arm64I8x16ShrS) \
+ V(Arm64I8x16SConvertI16x8) \
+ V(Arm64I8x16Add) \
+ V(Arm64I8x16AddSaturateS) \
+ V(Arm64I8x16Sub) \
+ V(Arm64I8x16SubSaturateS) \
+ V(Arm64I8x16Mul) \
+ V(Arm64I8x16MinS) \
+ V(Arm64I8x16MaxS) \
+ V(Arm64I8x16Eq) \
+ V(Arm64I8x16Ne) \
+ V(Arm64I8x16GtS) \
+ V(Arm64I8x16GeS) \
+ V(Arm64I8x16ShrU) \
+ V(Arm64I8x16UConvertI16x8) \
+ V(Arm64I8x16AddSaturateU) \
+ V(Arm64I8x16SubSaturateU) \
+ V(Arm64I8x16MinU) \
+ V(Arm64I8x16MaxU) \
+ V(Arm64I8x16GtU) \
+ V(Arm64I8x16GeU) \
+ V(Arm64S128Zero) \
+ V(Arm64S128And) \
+ V(Arm64S128Or) \
+ V(Arm64S128Xor) \
+ V(Arm64S128Not) \
+ V(Arm64S128Select) \
+ V(Arm64S32x4ZipLeft) \
+ V(Arm64S32x4ZipRight) \
+ V(Arm64S32x4UnzipLeft) \
+ V(Arm64S32x4UnzipRight) \
+ V(Arm64S32x4TransposeLeft) \
+ V(Arm64S32x4TransposeRight) \
+ V(Arm64S32x4Shuffle) \
+ V(Arm64S16x8ZipLeft) \
+ V(Arm64S16x8ZipRight) \
+ V(Arm64S16x8UnzipLeft) \
+ V(Arm64S16x8UnzipRight) \
+ V(Arm64S16x8TransposeLeft) \
+ V(Arm64S16x8TransposeRight) \
+ V(Arm64S8x16ZipLeft) \
+ V(Arm64S8x16ZipRight) \
+ V(Arm64S8x16UnzipLeft) \
+ V(Arm64S8x16UnzipRight) \
+ V(Arm64S8x16TransposeLeft) \
+ V(Arm64S8x16TransposeRight) \
+ V(Arm64S8x16Concat) \
+ V(Arm64S8x16Shuffle) \
+ V(Arm64S32x2Reverse) \
+ V(Arm64S16x4Reverse) \
+ V(Arm64S16x2Reverse) \
+ V(Arm64S8x8Reverse) \
+ V(Arm64S8x4Reverse) \
+ V(Arm64S8x2Reverse) \
+ V(Arm64S1x4AnyTrue) \
+ V(Arm64S1x4AllTrue) \
+ V(Arm64S1x8AnyTrue) \
+ V(Arm64S1x8AllTrue) \
+ V(Arm64S1x16AnyTrue) \
+ V(Arm64S1x16AllTrue)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
index d3504dfd22..994e157e17 100644
--- a/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
@@ -132,6 +132,148 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Float64MoveU64:
case kArm64U64MoveFloat64:
case kArm64Float64SilenceNaN:
+ case kArm64F32x4Splat:
+ case kArm64F32x4ExtractLane:
+ case kArm64F32x4ReplaceLane:
+ case kArm64F32x4SConvertI32x4:
+ case kArm64F32x4UConvertI32x4:
+ case kArm64F32x4Abs:
+ case kArm64F32x4Neg:
+ case kArm64F32x4RecipApprox:
+ case kArm64F32x4RecipSqrtApprox:
+ case kArm64F32x4Add:
+ case kArm64F32x4AddHoriz:
+ case kArm64F32x4Sub:
+ case kArm64F32x4Mul:
+ case kArm64F32x4Min:
+ case kArm64F32x4Max:
+ case kArm64F32x4Eq:
+ case kArm64F32x4Ne:
+ case kArm64F32x4Lt:
+ case kArm64F32x4Le:
+ case kArm64I32x4Splat:
+ case kArm64I32x4ExtractLane:
+ case kArm64I32x4ReplaceLane:
+ case kArm64I32x4SConvertF32x4:
+ case kArm64I32x4SConvertI16x8Low:
+ case kArm64I32x4SConvertI16x8High:
+ case kArm64I32x4Neg:
+ case kArm64I32x4Shl:
+ case kArm64I32x4ShrS:
+ case kArm64I32x4Add:
+ case kArm64I32x4AddHoriz:
+ case kArm64I32x4Sub:
+ case kArm64I32x4Mul:
+ case kArm64I32x4MinS:
+ case kArm64I32x4MaxS:
+ case kArm64I32x4Eq:
+ case kArm64I32x4Ne:
+ case kArm64I32x4GtS:
+ case kArm64I32x4GeS:
+ case kArm64I32x4UConvertF32x4:
+ case kArm64I32x4UConvertI16x8Low:
+ case kArm64I32x4UConvertI16x8High:
+ case kArm64I32x4ShrU:
+ case kArm64I32x4MinU:
+ case kArm64I32x4MaxU:
+ case kArm64I32x4GtU:
+ case kArm64I32x4GeU:
+ case kArm64I16x8Splat:
+ case kArm64I16x8ExtractLane:
+ case kArm64I16x8ReplaceLane:
+ case kArm64I16x8SConvertI8x16Low:
+ case kArm64I16x8SConvertI8x16High:
+ case kArm64I16x8Neg:
+ case kArm64I16x8Shl:
+ case kArm64I16x8ShrS:
+ case kArm64I16x8SConvertI32x4:
+ case kArm64I16x8Add:
+ case kArm64I16x8AddSaturateS:
+ case kArm64I16x8AddHoriz:
+ case kArm64I16x8Sub:
+ case kArm64I16x8SubSaturateS:
+ case kArm64I16x8Mul:
+ case kArm64I16x8MinS:
+ case kArm64I16x8MaxS:
+ case kArm64I16x8Eq:
+ case kArm64I16x8Ne:
+ case kArm64I16x8GtS:
+ case kArm64I16x8GeS:
+ case kArm64I16x8UConvertI8x16Low:
+ case kArm64I16x8UConvertI8x16High:
+ case kArm64I16x8ShrU:
+ case kArm64I16x8UConvertI32x4:
+ case kArm64I16x8AddSaturateU:
+ case kArm64I16x8SubSaturateU:
+ case kArm64I16x8MinU:
+ case kArm64I16x8MaxU:
+ case kArm64I16x8GtU:
+ case kArm64I16x8GeU:
+ case kArm64I8x16Splat:
+ case kArm64I8x16ExtractLane:
+ case kArm64I8x16ReplaceLane:
+ case kArm64I8x16Neg:
+ case kArm64I8x16Shl:
+ case kArm64I8x16ShrS:
+ case kArm64I8x16SConvertI16x8:
+ case kArm64I8x16Add:
+ case kArm64I8x16AddSaturateS:
+ case kArm64I8x16Sub:
+ case kArm64I8x16SubSaturateS:
+ case kArm64I8x16Mul:
+ case kArm64I8x16MinS:
+ case kArm64I8x16MaxS:
+ case kArm64I8x16Eq:
+ case kArm64I8x16Ne:
+ case kArm64I8x16GtS:
+ case kArm64I8x16GeS:
+ case kArm64I8x16UConvertI16x8:
+ case kArm64I8x16AddSaturateU:
+ case kArm64I8x16SubSaturateU:
+ case kArm64I8x16ShrU:
+ case kArm64I8x16MinU:
+ case kArm64I8x16MaxU:
+ case kArm64I8x16GtU:
+ case kArm64I8x16GeU:
+ case kArm64S128Zero:
+ case kArm64S128And:
+ case kArm64S128Or:
+ case kArm64S128Xor:
+ case kArm64S128Not:
+ case kArm64S128Select:
+ case kArm64S32x4ZipLeft:
+ case kArm64S32x4ZipRight:
+ case kArm64S32x4UnzipLeft:
+ case kArm64S32x4UnzipRight:
+ case kArm64S32x4TransposeLeft:
+ case kArm64S32x4TransposeRight:
+ case kArm64S32x4Shuffle:
+ case kArm64S16x8ZipLeft:
+ case kArm64S16x8ZipRight:
+ case kArm64S16x8UnzipLeft:
+ case kArm64S16x8UnzipRight:
+ case kArm64S16x8TransposeLeft:
+ case kArm64S16x8TransposeRight:
+ case kArm64S8x16ZipLeft:
+ case kArm64S8x16ZipRight:
+ case kArm64S8x16UnzipLeft:
+ case kArm64S8x16UnzipRight:
+ case kArm64S8x16TransposeLeft:
+ case kArm64S8x16TransposeRight:
+ case kArm64S8x16Concat:
+ case kArm64S8x16Shuffle:
+ case kArm64S32x2Reverse:
+ case kArm64S16x4Reverse:
+ case kArm64S16x2Reverse:
+ case kArm64S8x8Reverse:
+ case kArm64S8x4Reverse:
+ case kArm64S8x2Reverse:
+ case kArm64S1x4AnyTrue:
+ case kArm64S1x4AllTrue:
+ case kArm64S1x8AnyTrue:
+ case kArm64S1x8AllTrue:
+ case kArm64S1x16AnyTrue:
+ case kArm64S1x16AllTrue:
return kNoOpcodeFlags;
case kArm64TestAndBranch32:
@@ -142,6 +284,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64LdrS:
case kArm64LdrD:
+ case kArm64LdrQ:
case kArm64Ldrb:
case kArm64Ldrsb:
case kArm64Ldrh:
@@ -158,6 +301,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64PokePair:
case kArm64StrS:
case kArm64StrD:
+ case kArm64StrQ:
case kArm64Strb:
case kArm64Strh:
case kArm64StrW:
@@ -172,7 +316,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
}
UNREACHABLE();
- return kNoOpcodeFlags;
}
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
index 0e9fd0ca2b..f0e306a43c 100644
--- a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -103,13 +103,13 @@ class Arm64OperandGenerator final : public OperandGenerator {
case kArithmeticImm:
return Assembler::IsImmAddSub(value);
case kLoadStoreImm8:
- return IsLoadStoreImmediate(value, LSByte);
+ return IsLoadStoreImmediate(value, 0);
case kLoadStoreImm16:
- return IsLoadStoreImmediate(value, LSHalfword);
+ return IsLoadStoreImmediate(value, 1);
case kLoadStoreImm32:
- return IsLoadStoreImmediate(value, LSWord);
+ return IsLoadStoreImmediate(value, 2);
case kLoadStoreImm64:
- return IsLoadStoreImmediate(value, LSDoubleWord);
+ return IsLoadStoreImmediate(value, 3);
case kNoImmediate:
return false;
case kShift32Imm: // Fall through.
@@ -130,7 +130,7 @@ class Arm64OperandGenerator final : public OperandGenerator {
}
private:
- bool IsLoadStoreImmediate(int64_t value, LSDataSize size) {
+ bool IsLoadStoreImmediate(int64_t value, unsigned size) {
return Assembler::IsImmLSScaled(value, size) ||
Assembler::IsImmLSUnscaled(value);
}
@@ -153,6 +153,12 @@ void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
g.UseRegister(node->InputAt(1)));
}
+void VisitRRI(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+ Arm64OperandGenerator g(selector);
+ int32_t imm = OpParameter<int32_t>(node);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(imm));
+}
void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
ImmediateMode operand_mode) {
@@ -162,6 +168,14 @@ void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
g.UseOperand(node->InputAt(1), operand_mode));
}
+void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+ Arm64OperandGenerator g(selector);
+ int32_t imm = OpParameter<int32_t>(node);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(imm),
+ g.UseRegister(node->InputAt(1)));
+}
+
struct ExtendingLoadMatcher {
ExtendingLoadMatcher(Node* node, InstructionSelector* selector)
: matches_(false), selector_(selector), base_(nullptr), immediate_(0) {
@@ -390,7 +404,6 @@ uint8_t GetBinopProperties(InstructionCode opcode) {
break;
default:
UNREACHABLE();
- return 0;
}
DCHECK_IMPLIES(MustCommuteCondField::decode(result),
CanCommuteField::decode(result));
@@ -518,8 +531,8 @@ int32_t LeftShiftForReducedMultiply(Matcher* m) {
DCHECK(m->IsInt32Mul() || m->IsInt64Mul());
if (m->right().HasValue() && m->right().Value() >= 3) {
uint64_t value_minus_one = m->right().Value() - 1;
- if (base::bits::IsPowerOfTwo64(value_minus_one)) {
- return WhichPowerOf2_64(value_minus_one);
+ if (base::bits::IsPowerOfTwo(value_minus_one)) {
+ return WhichPowerOf2(value_minus_one);
}
}
return 0;
@@ -602,10 +615,10 @@ void InstructionSelector::VisitLoad(Node* node) {
opcode = kArm64Ldr;
immediate_mode = kLoadStoreImm64;
break;
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
+ case MachineRepresentation::kSimd128:
+ opcode = kArm64LdrQ;
+ immediate_mode = kNoImmediate;
+ break;
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -701,10 +714,10 @@ void InstructionSelector::VisitStore(Node* node) {
opcode = kArm64Str;
immediate_mode = kLoadStoreImm64;
break;
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
+ case MachineRepresentation::kSimd128:
+ opcode = kArm64StrQ;
+ immediate_mode = kNoImmediate;
+ break;
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -773,9 +786,6 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -828,9 +838,6 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -1898,7 +1905,6 @@ FlagsCondition MapForFlagSettingBinop(FlagsCondition cond) {
return kNotEqual;
default:
UNREACHABLE();
- return cond;
}
}
@@ -1961,7 +1967,6 @@ FlagsCondition MapForTbz(FlagsCondition cond) {
return kEqual;
default:
UNREACHABLE();
- return cond;
}
}
@@ -1979,7 +1984,6 @@ FlagsCondition MapForCbz(FlagsCondition cond) {
return kNotEqual;
default:
UNREACHABLE();
- return cond;
}
}
@@ -2396,6 +2400,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 4 + sw.value_range;
size_t table_time_cost = 3;
size_t lookup_space_cost = 3 + 2 * sw.case_count;
@@ -2403,7 +2408,8 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
if (sw.case_count > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min()) {
+ sw.min_value > std::numeric_limits<int32_t>::min() &&
+ sw.value_range <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
if (sw.min_value) {
index_operand = g.TempRegister();
@@ -2853,6 +2859,376 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
UNREACHABLE();
}
+#define SIMD_TYPE_LIST(V) \
+ V(F32x4) \
+ V(I32x4) \
+ V(I16x8) \
+ V(I8x16)
+
+#define SIMD_FORMAT_LIST(V) \
+ V(32x4, 4) \
+ V(16x8, 8) \
+ V(8x16, 16)
+
+#define SIMD_UNOP_LIST(V) \
+ V(F32x4SConvertI32x4, kArm64F32x4SConvertI32x4) \
+ V(F32x4UConvertI32x4, kArm64F32x4UConvertI32x4) \
+ V(F32x4Abs, kArm64F32x4Abs) \
+ V(F32x4Neg, kArm64F32x4Neg) \
+ V(F32x4RecipApprox, kArm64F32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox, kArm64F32x4RecipSqrtApprox) \
+ V(I32x4SConvertF32x4, kArm64I32x4SConvertF32x4) \
+ V(I32x4SConvertI16x8Low, kArm64I32x4SConvertI16x8Low) \
+ V(I32x4SConvertI16x8High, kArm64I32x4SConvertI16x8High) \
+ V(I32x4Neg, kArm64I32x4Neg) \
+ V(I32x4UConvertF32x4, kArm64I32x4UConvertF32x4) \
+ V(I32x4UConvertI16x8Low, kArm64I32x4UConvertI16x8Low) \
+ V(I32x4UConvertI16x8High, kArm64I32x4UConvertI16x8High) \
+ V(I16x8SConvertI8x16Low, kArm64I16x8SConvertI8x16Low) \
+ V(I16x8SConvertI8x16High, kArm64I16x8SConvertI8x16High) \
+ V(I16x8Neg, kArm64I16x8Neg) \
+ V(I16x8UConvertI8x16Low, kArm64I16x8UConvertI8x16Low) \
+ V(I16x8UConvertI8x16High, kArm64I16x8UConvertI8x16High) \
+ V(I8x16Neg, kArm64I8x16Neg) \
+ V(S128Not, kArm64S128Not) \
+ V(S1x4AnyTrue, kArm64S1x4AnyTrue) \
+ V(S1x4AllTrue, kArm64S1x4AllTrue) \
+ V(S1x8AnyTrue, kArm64S1x8AnyTrue) \
+ V(S1x8AllTrue, kArm64S1x8AllTrue) \
+ V(S1x16AnyTrue, kArm64S1x16AnyTrue) \
+ V(S1x16AllTrue, kArm64S1x16AllTrue)
+
+#define SIMD_SHIFT_OP_LIST(V) \
+ V(I32x4Shl) \
+ V(I32x4ShrS) \
+ V(I32x4ShrU) \
+ V(I16x8Shl) \
+ V(I16x8ShrS) \
+ V(I16x8ShrU) \
+ V(I8x16Shl) \
+ V(I8x16ShrS) \
+ V(I8x16ShrU)
+
+#define SIMD_BINOP_LIST(V) \
+ V(F32x4Add, kArm64F32x4Add) \
+ V(F32x4AddHoriz, kArm64F32x4AddHoriz) \
+ V(F32x4Sub, kArm64F32x4Sub) \
+ V(F32x4Mul, kArm64F32x4Mul) \
+ V(F32x4Min, kArm64F32x4Min) \
+ V(F32x4Max, kArm64F32x4Max) \
+ V(F32x4Eq, kArm64F32x4Eq) \
+ V(F32x4Ne, kArm64F32x4Ne) \
+ V(F32x4Lt, kArm64F32x4Lt) \
+ V(F32x4Le, kArm64F32x4Le) \
+ V(I32x4Add, kArm64I32x4Add) \
+ V(I32x4AddHoriz, kArm64I32x4AddHoriz) \
+ V(I32x4Sub, kArm64I32x4Sub) \
+ V(I32x4Mul, kArm64I32x4Mul) \
+ V(I32x4MinS, kArm64I32x4MinS) \
+ V(I32x4MaxS, kArm64I32x4MaxS) \
+ V(I32x4Eq, kArm64I32x4Eq) \
+ V(I32x4Ne, kArm64I32x4Ne) \
+ V(I32x4GtS, kArm64I32x4GtS) \
+ V(I32x4GeS, kArm64I32x4GeS) \
+ V(I32x4MinU, kArm64I32x4MinU) \
+ V(I32x4MaxU, kArm64I32x4MaxU) \
+ V(I32x4GtU, kArm64I32x4GtU) \
+ V(I32x4GeU, kArm64I32x4GeU) \
+ V(I16x8SConvertI32x4, kArm64I16x8SConvertI32x4) \
+ V(I16x8Add, kArm64I16x8Add) \
+ V(I16x8AddSaturateS, kArm64I16x8AddSaturateS) \
+ V(I16x8AddHoriz, kArm64I16x8AddHoriz) \
+ V(I16x8Sub, kArm64I16x8Sub) \
+ V(I16x8SubSaturateS, kArm64I16x8SubSaturateS) \
+ V(I16x8Mul, kArm64I16x8Mul) \
+ V(I16x8MinS, kArm64I16x8MinS) \
+ V(I16x8MaxS, kArm64I16x8MaxS) \
+ V(I16x8Eq, kArm64I16x8Eq) \
+ V(I16x8Ne, kArm64I16x8Ne) \
+ V(I16x8GtS, kArm64I16x8GtS) \
+ V(I16x8GeS, kArm64I16x8GeS) \
+ V(I16x8UConvertI32x4, kArm64I16x8UConvertI32x4) \
+ V(I16x8AddSaturateU, kArm64I16x8AddSaturateU) \
+ V(I16x8SubSaturateU, kArm64I16x8SubSaturateU) \
+ V(I16x8MinU, kArm64I16x8MinU) \
+ V(I16x8MaxU, kArm64I16x8MaxU) \
+ V(I16x8GtU, kArm64I16x8GtU) \
+ V(I16x8GeU, kArm64I16x8GeU) \
+ V(I8x16SConvertI16x8, kArm64I8x16SConvertI16x8) \
+ V(I8x16Add, kArm64I8x16Add) \
+ V(I8x16AddSaturateS, kArm64I8x16AddSaturateS) \
+ V(I8x16Sub, kArm64I8x16Sub) \
+ V(I8x16SubSaturateS, kArm64I8x16SubSaturateS) \
+ V(I8x16Mul, kArm64I8x16Mul) \
+ V(I8x16MinS, kArm64I8x16MinS) \
+ V(I8x16MaxS, kArm64I8x16MaxS) \
+ V(I8x16Eq, kArm64I8x16Eq) \
+ V(I8x16Ne, kArm64I8x16Ne) \
+ V(I8x16GtS, kArm64I8x16GtS) \
+ V(I8x16GeS, kArm64I8x16GeS) \
+ V(I8x16UConvertI16x8, kArm64I8x16UConvertI16x8) \
+ V(I8x16AddSaturateU, kArm64I8x16AddSaturateU) \
+ V(I8x16SubSaturateU, kArm64I8x16SubSaturateU) \
+ V(I8x16MinU, kArm64I8x16MinU) \
+ V(I8x16MaxU, kArm64I8x16MaxU) \
+ V(I8x16GtU, kArm64I8x16GtU) \
+ V(I8x16GeU, kArm64I8x16GeU) \
+ V(S128And, kArm64S128And) \
+ V(S128Or, kArm64S128Or) \
+ V(S128Xor, kArm64S128Xor)
+
+void InstructionSelector::VisitS128Zero(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64S128Zero, g.DefineAsRegister(node), g.DefineAsRegister(node));
+}
+
+#define SIMD_VISIT_SPLAT(Type) \
+ void InstructionSelector::Visit##Type##Splat(Node* node) { \
+ VisitRR(this, kArm64##Type##Splat, node); \
+ }
+SIMD_TYPE_LIST(SIMD_VISIT_SPLAT)
+#undef SIMD_VISIT_SPLAT
+
+#define SIMD_VISIT_EXTRACT_LANE(Type) \
+ void InstructionSelector::Visit##Type##ExtractLane(Node* node) { \
+ VisitRRI(this, kArm64##Type##ExtractLane, node); \
+ }
+SIMD_TYPE_LIST(SIMD_VISIT_EXTRACT_LANE)
+#undef SIMD_VISIT_EXTRACT_LANE
+
+#define SIMD_VISIT_REPLACE_LANE(Type) \
+ void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
+ VisitRRIR(this, kArm64##Type##ReplaceLane, node); \
+ }
+SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
+#undef SIMD_VISIT_REPLACE_LANE
+
+#define SIMD_VISIT_UNOP(Name, instruction) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRR(this, instruction, node); \
+ }
+SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
+#undef SIMD_VISIT_UNOP
+
+#define SIMD_VISIT_SHIFT_OP(Name) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRRI(this, kArm64##Name, node); \
+ }
+SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
+#undef SIMD_VISIT_SHIFT_OP
+
+#define SIMD_VISIT_BINOP(Name, instruction) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRRR(this, instruction, node); \
+ }
+SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
+#undef SIMD_VISIT_BINOP
+
+void InstructionSelector::VisitS128Select(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64S128Select, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(2)));
+}
+
+// Tries to match 8x16 byte shuffle to equivalent 32x4 word shuffle. If
+// successful, writes the 32x4 shuffle indices.
+bool TryMatch32x4Shuffle(const uint8_t* shuffle, uint8_t* shuffle32x4) {
+ for (int i = 0; i < 4; i++) {
+ if (shuffle[i * 4] % 4 != 0) return false;
+ for (int j = 1; j < 4; j++) {
+ if (shuffle[i * 4 + j] - shuffle[i * 4 + j - 1] != 1) return false;
+ }
+ shuffle32x4[i] = shuffle[i * 4] / 4;
+ }
+ return true;
+}
+
+// Tries to match byte shuffle to concatenate (vext) operation. If successful,
+// writes the vext immediate value.
+bool TryMatchConcat(const uint8_t* shuffle, uint8_t mask, uint8_t* vext) {
+ uint8_t start = shuffle[0];
+ int i = 1;
+ for (; i < 16 - start; i++) {
+ if ((shuffle[i] & mask) != ((shuffle[i - 1] + 1) & mask)) return false;
+ }
+ uint8_t wrap = 16;
+ for (; i < 16; i++, wrap++) {
+ if ((shuffle[i] & mask) != (wrap & mask)) return false;
+ }
+ *vext = start;
+ return true;
+}
+
+namespace {
+
+static const int kShuffleLanes = 16;
+static const int kMaxLaneIndex = 15;
+static const int kMaxShuffleIndex = 31;
+
+struct ShuffleEntry {
+ uint8_t shuffle[kShuffleLanes];
+ ArchOpcode opcode;
+};
+
+static const ShuffleEntry arch_shuffles[] = {
+ {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
+ kArm64S32x4ZipLeft},
+ {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
+ kArm64S32x4ZipRight},
+ {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
+ kArm64S32x4UnzipLeft},
+ {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
+ kArm64S32x4UnzipRight},
+ {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
+ kArm64S32x4TransposeLeft},
+ {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 21, 22, 23, 24},
+ kArm64S32x4TransposeRight},
+ {{4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11},
+ kArm64S32x2Reverse},
+
+ {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
+ kArm64S16x8ZipLeft},
+ {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
+ kArm64S16x8ZipRight},
+ {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
+ kArm64S16x8UnzipLeft},
+ {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
+ kArm64S16x8UnzipRight},
+ {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
+ kArm64S16x8TransposeLeft},
+ {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
+ kArm64S16x8TransposeRight},
+ {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9},
+ kArm64S16x4Reverse},
+ {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13},
+ kArm64S16x2Reverse},
+
+ {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
+ kArm64S8x16ZipLeft},
+ {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
+ kArm64S8x16ZipRight},
+ {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
+ kArm64S8x16UnzipLeft},
+ {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
+ kArm64S8x16UnzipRight},
+ {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
+ kArm64S8x16TransposeLeft},
+ {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
+ kArm64S8x16TransposeRight},
+ {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}, kArm64S8x8Reverse},
+ {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}, kArm64S8x4Reverse},
+ {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14},
+ kArm64S8x2Reverse}};
+
+bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
+ size_t num_entries, uint8_t mask, ArchOpcode* opcode) {
+ for (size_t i = 0; i < num_entries; i++) {
+ const ShuffleEntry& entry = table[i];
+ int j = 0;
+ for (; j < kShuffleLanes; j++) {
+ if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
+ break;
+ }
+ }
+ if (j == kShuffleLanes) {
+ *opcode = entry.opcode;
+ return true;
+ }
+ }
+ return false;
+}
+
+// Canonicalize shuffles to make pattern matching simpler. Returns a mask that
+// will ignore the high bit of indices in some cases.
+uint8_t CanonicalizeShuffle(InstructionSelector* selector, Node* node) {
+ const uint8_t* shuffle = OpParameter<uint8_t*>(node);
+ uint8_t mask = kMaxShuffleIndex;
+ // If shuffle is unary, set 'mask' to ignore the high bit of the indices.
+ // Replace any unused source with the other.
+ if (selector->GetVirtualRegister(node->InputAt(0)) ==
+ selector->GetVirtualRegister(node->InputAt(1))) {
+ // unary, src0 == src1.
+ mask = kMaxLaneIndex;
+ } else {
+ bool src0_is_used = false;
+ bool src1_is_used = false;
+ for (int i = 0; i < 16; i++) {
+ if (shuffle[i] < 16) {
+ src0_is_used = true;
+ } else {
+ src1_is_used = true;
+ }
+ }
+ if (src0_is_used && !src1_is_used) {
+ node->ReplaceInput(1, node->InputAt(0));
+ mask = kMaxLaneIndex;
+ } else if (src1_is_used && !src0_is_used) {
+ node->ReplaceInput(0, node->InputAt(1));
+ mask = kMaxLaneIndex;
+ }
+ }
+ return mask;
+}
+
+int32_t Pack4Lanes(const uint8_t* shuffle, uint8_t mask) {
+ int32_t result = 0;
+ for (int i = 3; i >= 0; i--) {
+ result <<= 8;
+ result |= shuffle[i] & mask;
+ }
+ return result;
+}
+
+void ArrangeShuffleTable(Arm64OperandGenerator* g, Node* input0, Node* input1,
+ InstructionOperand* src0, InstructionOperand* src1) {
+ if (input0 == input1) {
+ // Unary, any q-register can be the table.
+ *src0 = *src1 = g->UseRegister(input0);
+ } else {
+ // Binary, table registers must be consecutive.
+ *src0 = g->UseFixed(input0, fp_fixed2);
+ *src1 = g->UseFixed(input1, fp_fixed3);
+ }
+}
+
+} // namespace
+
+void InstructionSelector::VisitS8x16Shuffle(Node* node) {
+ const uint8_t* shuffle = OpParameter<uint8_t*>(node);
+ uint8_t mask = CanonicalizeShuffle(this, node);
+ uint8_t shuffle32x4[4];
+ Arm64OperandGenerator g(this);
+ ArchOpcode opcode;
+ if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
+ mask, &opcode)) {
+ VisitRRR(this, opcode, node);
+ return;
+ }
+ Node* input0 = node->InputAt(0);
+ Node* input1 = node->InputAt(1);
+ uint8_t bias;
+ if (TryMatchConcat(shuffle, mask, &bias)) {
+ Emit(kArm64S8x16Concat, g.DefineAsRegister(node), g.UseRegister(input0),
+ g.UseRegister(input1), g.UseImmediate(bias));
+ return;
+ }
+ if (TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
+ Emit(kArm64S32x4Shuffle, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
+ g.UseImmediate(Pack4Lanes(shuffle32x4, mask)));
+ return;
+ }
+ // Code generator uses vtbl, arrange sources to form a valid lookup table.
+ InstructionOperand src0, src1;
+ ArrangeShuffleTable(&g, input0, input1, &src0, &src1);
+ Emit(kArm64S8x16Shuffle, g.DefineAsRegister(node), src0, src1,
+ g.UseImmediate(Pack4Lanes(shuffle, mask)),
+ g.UseImmediate(Pack4Lanes(shuffle + 4, mask)),
+ g.UseImmediate(Pack4Lanes(shuffle + 8, mask)),
+ g.UseImmediate(Pack4Lanes(shuffle + 12, mask)));
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/ast-graph-builder.cc b/deps/v8/src/compiler/ast-graph-builder.cc
index fd2209ed53..18854dfebe 100644
--- a/deps/v8/src/compiler/ast-graph-builder.cc
+++ b/deps/v8/src/compiler/ast-graph-builder.cc
@@ -11,7 +11,6 @@
#include "src/compiler/ast-loop-assignment-analyzer.h"
#include "src/compiler/control-builders.h"
#include "src/compiler/linkage.h"
-#include "src/compiler/liveness-analyzer.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
@@ -34,13 +33,6 @@ class AstGraphBuilder::AstContext BASE_EMBEDDED {
bool IsValue() const { return kind_ == Expression::kValue; }
bool IsTest() const { return kind_ == Expression::kTest; }
- // Determines how to combine the frame state with the value
- // that is about to be plugged into this AstContext.
- OutputFrameStateCombine GetStateCombine() {
- return IsEffect() ? OutputFrameStateCombine::Ignore()
- : OutputFrameStateCombine::Push();
- }
-
// Plug a node into this expression context. Call this function in tail
// position in the Visit functions for expressions.
virtual void ProduceValue(Expression* expr, Node* value) = 0;
@@ -97,14 +89,11 @@ class AstGraphBuilder::AstValueContext final : public AstContext {
// Context to evaluate expression for a condition value (and side effects).
class AstGraphBuilder::AstTestContext final : public AstContext {
public:
- AstTestContext(AstGraphBuilder* owner, TypeFeedbackId feedback_id)
- : AstContext(owner, Expression::kTest), feedback_id_(feedback_id) {}
+ explicit AstTestContext(AstGraphBuilder* owner)
+ : AstContext(owner, Expression::kTest) {}
~AstTestContext() final;
void ProduceValue(Expression* expr, Node* value) final;
Node* ConsumeValue() final;
-
- private:
- TypeFeedbackId const feedback_id_;
};
@@ -286,12 +275,7 @@ AstGraphBuilder::AstGraphBuilder(Zone* local_zone, CompilationInfo* info,
input_buffer_(nullptr),
exit_controls_(local_zone),
loop_assignment_analysis_(loop),
- state_values_cache_(jsgraph),
- liveness_analyzer_(static_cast<size_t>(info->scope()->num_stack_slots()),
- false, local_zone),
- frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
- FrameStateType::kJavaScriptFunction, info->num_parameters() + 1,
- info->scope()->num_stack_slots(), info->shared_info())) {
+ state_values_cache_(jsgraph) {
InitializeAstVisitor(info->isolate());
}
@@ -401,10 +385,6 @@ bool AstGraphBuilder::CreateGraph(bool stack_check) {
Node* end = graph()->NewNode(common()->End(input_count), input_count, inputs);
graph()->SetEnd(end);
- // Compute local variable liveness information and use it to relax
- // frame states.
- ClearNonLiveSlotsInFrameStates();
-
// Failures indicated by stack overflow.
return !HasStackOverflow();
}
@@ -431,8 +411,7 @@ void AstGraphBuilder::CreateGraphBody(bool stack_check) {
// Build a stack-check before the body.
if (stack_check) {
- Node* node = NewNode(javascript()->StackCheck());
- PrepareFrameState(node, BailoutId::FunctionEntry());
+ NewNode(javascript()->StackCheck());
}
// Visit statements in the function body.
@@ -443,33 +422,6 @@ void AstGraphBuilder::CreateGraphBody(bool stack_check) {
}
-void AstGraphBuilder::ClearNonLiveSlotsInFrameStates() {
- if (!FLAG_analyze_environment_liveness ||
- !info()->is_deoptimization_enabled()) {
- return;
- }
-
- NonLiveFrameStateSlotReplacer replacer(
- &state_values_cache_, jsgraph()->OptimizedOutConstant(),
- liveness_analyzer()->local_count(), false, local_zone());
- Variable* arguments = info()->scope()->arguments();
- if (arguments != nullptr && arguments->IsStackAllocated()) {
- replacer.MarkPermanentlyLive(arguments->index());
- }
- liveness_analyzer()->Run(&replacer);
- if (FLAG_trace_environment_liveness) {
- OFStream os(stdout);
- liveness_analyzer()->Print(os);
- }
-}
-
-
-// Gets the bailout id just before reading a variable proxy, but only for
-// unallocated variables.
-static BailoutId BeforeId(VariableProxy* proxy) {
- return proxy->var()->IsUnallocated() ? proxy->BeforeId() : BailoutId::None();
-}
-
static const char* GetDebugParameterName(Zone* zone, DeclarationScope* scope,
int index) {
#if DEBUG
@@ -490,9 +442,6 @@ AstGraphBuilder::Environment::Environment(AstGraphBuilder* builder,
: builder_(builder),
parameters_count_(scope->num_parameters() + 1),
locals_count_(scope->num_stack_slots()),
- liveness_block_(IsLivenessAnalysisEnabled()
- ? builder_->liveness_analyzer()->NewBlock()
- : nullptr),
values_(builder_->local_zone()),
contexts_(builder_->local_zone()),
control_dependency_(control_dependency),
@@ -527,13 +476,10 @@ AstGraphBuilder::Environment::Environment(AstGraphBuilder* builder,
values()->insert(values()->end(), locals_count(), undefined_constant);
}
-
-AstGraphBuilder::Environment::Environment(AstGraphBuilder::Environment* copy,
- LivenessAnalyzerBlock* liveness_block)
+AstGraphBuilder::Environment::Environment(AstGraphBuilder::Environment* copy)
: builder_(copy->builder_),
parameters_count_(copy->parameters_count_),
locals_count_(copy->locals_count_),
- liveness_block_(liveness_block),
values_(copy->zone()),
contexts_(copy->zone()),
control_dependency_(copy->control_dependency_),
@@ -559,10 +505,6 @@ void AstGraphBuilder::Environment::Bind(Variable* variable, Node* node) {
} else {
DCHECK(variable->IsStackLocal());
values()->at(variable->index() + parameters_count_) = node;
- DCHECK(IsLivenessBlockConsistent());
- if (liveness_block() != nullptr) {
- liveness_block()->Bind(variable->index());
- }
}
}
@@ -575,25 +517,11 @@ Node* AstGraphBuilder::Environment::Lookup(Variable* variable) {
return values()->at(variable->index() + 1);
} else {
DCHECK(variable->IsStackLocal());
- DCHECK(IsLivenessBlockConsistent());
- if (liveness_block() != nullptr) {
- liveness_block()->Lookup(variable->index());
- }
return values()->at(variable->index() + parameters_count_);
}
}
-void AstGraphBuilder::Environment::MarkAllLocalsLive() {
- DCHECK(IsLivenessBlockConsistent());
- if (liveness_block() != nullptr) {
- for (int i = 0; i < locals_count_; i++) {
- liveness_block()->Lookup(i);
- }
- }
-}
-
-
void AstGraphBuilder::Environment::RawParameterBind(int index, Node* node) {
DCHECK_LT(index, parameters_count());
values()->at(index) = node;
@@ -608,37 +536,24 @@ Node* AstGraphBuilder::Environment::RawParameterLookup(int index) {
AstGraphBuilder::Environment*
AstGraphBuilder::Environment::CopyForConditional() {
- LivenessAnalyzerBlock* copy_liveness_block = nullptr;
- if (liveness_block() != nullptr) {
- copy_liveness_block =
- builder_->liveness_analyzer()->NewBlock(liveness_block());
- liveness_block_ = builder_->liveness_analyzer()->NewBlock(liveness_block());
- }
- return new (zone()) Environment(this, copy_liveness_block);
+ return new (zone()) Environment(this);
}
AstGraphBuilder::Environment*
AstGraphBuilder::Environment::CopyAsUnreachable() {
- Environment* env = new (zone()) Environment(this, nullptr);
+ Environment* env = new (zone()) Environment(this);
env->MarkAsUnreachable();
return env;
}
AstGraphBuilder::Environment* AstGraphBuilder::Environment::CopyForOsrEntry() {
- LivenessAnalyzerBlock* copy_block =
- liveness_block() == nullptr ? nullptr
- : builder_->liveness_analyzer()->NewBlock();
- return new (zone()) Environment(this, copy_block);
+ return new (zone()) Environment(this);
}
AstGraphBuilder::Environment*
AstGraphBuilder::Environment::CopyAndShareLiveness() {
- if (liveness_block() != nullptr) {
- // Finish the current liveness block before copying.
- liveness_block_ = builder_->liveness_analyzer()->NewBlock(liveness_block());
- }
- Environment* env = new (zone()) Environment(this, liveness_block());
+ Environment* env = new (zone()) Environment(this);
return env;
}
@@ -657,63 +572,6 @@ AstGraphBuilder::Environment* AstGraphBuilder::Environment::CopyForLoop(
}
-void AstGraphBuilder::Environment::UpdateStateValues(Node** state_values,
- int offset, int count) {
- bool should_update = false;
- Node** env_values = (count == 0) ? nullptr : &values()->at(offset);
- if (*state_values == nullptr || (*state_values)->InputCount() != count) {
- should_update = true;
- } else {
- DCHECK(static_cast<size_t>(offset + count) <= values()->size());
- for (int i = 0; i < count; i++) {
- if ((*state_values)->InputAt(i) != env_values[i]) {
- should_update = true;
- break;
- }
- }
- }
- if (should_update) {
- const Operator* op = common()->StateValues(count, SparseInputMask::Dense());
- (*state_values) = graph()->NewNode(op, count, env_values);
- }
-}
-
-
-Node* AstGraphBuilder::Environment::Checkpoint(BailoutId ast_id,
- OutputFrameStateCombine combine,
- bool owner_has_exception) {
- if (!builder()->info()->is_deoptimization_enabled()) {
- return builder()->GetEmptyFrameState();
- }
-
- UpdateStateValues(&parameters_node_, 0, parameters_count());
- UpdateStateValues(&locals_node_, parameters_count(), locals_count());
- UpdateStateValues(&stack_node_, parameters_count() + locals_count(),
- stack_height());
-
- const Operator* op = common()->FrameState(
- ast_id, combine, builder()->frame_state_function_info());
-
- Node* result = graph()->NewNode(op, parameters_node_, locals_node_,
- stack_node_, builder()->current_context(),
- builder()->GetFunctionClosure(),
- builder()->graph()->start());
-
- DCHECK(IsLivenessBlockConsistent());
- if (liveness_block() != nullptr) {
- // If the owning node has an exception, register the checkpoint to the
- // predecessor so that the checkpoint is used for both the normal and the
- // exceptional paths. Yes, this is a terrible hack and we might want
- // to use an explicit frame state for the exceptional path.
- if (owner_has_exception) {
- liveness_block()->GetPredecessor()->Checkpoint(result);
- } else {
- liveness_block()->Checkpoint(result);
- }
- }
- return result;
-}
-
void AstGraphBuilder::Environment::PrepareForLoopExit(
Node* loop, BitVector* assigned_variables) {
if (IsMarkedAsUnreachable()) return;
@@ -743,17 +601,6 @@ void AstGraphBuilder::Environment::PrepareForLoopExit(
UpdateEffectDependency(effect_rename);
}
-bool AstGraphBuilder::Environment::IsLivenessAnalysisEnabled() {
- return FLAG_analyze_environment_liveness &&
- builder()->info()->is_deoptimization_enabled();
-}
-
-
-bool AstGraphBuilder::Environment::IsLivenessBlockConsistent() {
- return (!IsLivenessAnalysisEnabled() || IsMarkedAsUnreachable()) ==
- (liveness_block() == nullptr);
-}
-
AstGraphBuilder::AstContext::AstContext(AstGraphBuilder* own,
Expression::Context kind)
@@ -787,19 +634,16 @@ AstGraphBuilder::AstTestContext::~AstTestContext() {
void AstGraphBuilder::AstEffectContext::ProduceValue(Expression* expr,
Node* value) {
// The value is ignored.
- owner()->PrepareEagerCheckpoint(expr->id());
}
void AstGraphBuilder::AstValueContext::ProduceValue(Expression* expr,
Node* value) {
environment()->Push(value);
- owner()->PrepareEagerCheckpoint(expr->id());
}
void AstGraphBuilder::AstTestContext::ProduceValue(Expression* expr,
Node* value) {
- environment()->Push(owner()->BuildToBoolean(value, feedback_id_));
- owner()->PrepareEagerCheckpoint(expr->id());
+ environment()->Push(owner()->BuildToBoolean(value));
}
@@ -906,7 +750,7 @@ void AstGraphBuilder::VisitForEffect(Expression* expr) {
void AstGraphBuilder::VisitForTest(Expression* expr) {
- AstTestContext for_condition(this, expr->test_id());
+ AstTestContext for_condition(this);
if (!CheckStackOverflow()) {
VisitNoStackOverflowCheck(expr);
} else {
@@ -1133,7 +977,7 @@ void AstGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
void AstGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
LoopBuilder while_loop(this);
while_loop.BeginLoop(GetVariablesAssignedInLoop(stmt), CheckOsrEntry(stmt));
- VisitIterationBody(stmt, &while_loop, stmt->StackCheckId());
+ VisitIterationBody(stmt, &while_loop);
while_loop.EndBody();
VisitForTest(stmt->cond());
Node* condition = environment()->Pop();
@@ -1148,7 +992,7 @@ void AstGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
VisitForTest(stmt->cond());
Node* condition = environment()->Pop();
while_loop.BreakUnless(condition);
- VisitIterationBody(stmt, &while_loop, stmt->StackCheckId());
+ VisitIterationBody(stmt, &while_loop);
while_loop.EndBody();
while_loop.EndLoop();
}
@@ -1165,7 +1009,7 @@ void AstGraphBuilder::VisitForStatement(ForStatement* stmt) {
} else {
for_loop.BreakUnless(jsgraph()->TrueConstant());
}
- VisitIterationBody(stmt, &for_loop, stmt->StackCheckId());
+ VisitIterationBody(stmt, &for_loop);
for_loop.EndBody();
VisitIfNotNull(stmt->next());
for_loop.EndLoop();
@@ -1251,9 +1095,7 @@ void AstGraphBuilder::VisitConditional(Conditional* expr) {
void AstGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
VectorSlotPair pair = CreateVectorSlotPair(expr->VariableFeedbackSlot());
- PrepareEagerCheckpoint(BeforeId(expr));
- Node* value = BuildVariableLoad(expr->var(), expr->id(), pair,
- ast_context()->GetStateCombine());
+ Node* value = BuildVariableLoad(expr->var(), pair);
ast_context()->ProduceValue(expr, value);
}
@@ -1272,7 +1114,6 @@ void AstGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
expr->pattern(), expr->flags(),
FeedbackVector::GetIndex(expr->literal_slot()));
Node* literal = NewNode(op, closure);
- PrepareFrameState(literal, expr->id(), ast_context()->GetStateCombine());
ast_context()->ProduceValue(expr, literal);
}
@@ -1285,8 +1126,6 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
expr->GetOrBuildConstantProperties(isolate()), expr->ComputeFlags(true),
FeedbackVector::GetIndex(expr->literal_slot()), expr->properties_count());
Node* literal = NewNode(op, closure);
- PrepareFrameState(literal, expr->CreateLiteralId(),
- OutputFrameStateCombine::Push());
// The object is expected on the operand stack during computation of the
// property values and is the value of the entire expression.
@@ -1319,9 +1158,7 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<Name> name = key->AsPropertyName();
VectorSlotPair feedback =
CreateVectorSlotPair(property->GetSlot(0));
- Node* store = BuildNamedStoreOwn(literal, name, value, feedback);
- PrepareFrameState(store, key->id(),
- OutputFrameStateCombine::Ignore());
+ BuildNamedStoreOwn(literal, name, value, feedback);
BuildSetHomeObject(value, literal, property, 1);
} else {
VisitForEffect(property->value());
@@ -1337,9 +1174,7 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
if (property->emit_store()) {
Node* language = jsgraph()->Constant(SLOPPY);
const Operator* op = javascript()->CallRuntime(Runtime::kSetProperty);
- Node* set_property = NewNode(op, receiver, key, value, language);
- // SetProperty should not lazy deopt on an object literal.
- PrepareFrameState(set_property, BailoutId::None());
+ NewNode(op, receiver, key, value, language);
BuildSetHomeObject(value, receiver, property);
}
break;
@@ -1352,22 +1187,18 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(property->emit_store());
const Operator* op =
javascript()->CallRuntime(Runtime::kInternalSetPrototype);
- Node* set_prototype = NewNode(op, receiver, value);
- // SetPrototype should not lazy deopt on an object literal.
- PrepareFrameState(set_prototype, expr->GetIdForPropertySet(i));
+ NewNode(op, receiver, value);
break;
}
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->setter = property;
}
break;
@@ -1388,8 +1219,7 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
Node* attr = jsgraph()->Constant(NONE);
const Operator* op =
javascript()->CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
- Node* call = NewNode(op, literal, name, getter, setter, attr);
- PrepareFrameState(call, it->second->bailout_id);
+ NewNode(op, literal, name, getter, setter, attr);
}
ast_context()->ProduceValue(expr, environment()->Pop());
}
@@ -1414,8 +1244,6 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
expr->GetOrBuildConstantElements(isolate()), expr->ComputeFlags(true),
FeedbackVector::GetIndex(expr->literal_slot()), expr->values()->length());
Node* literal = NewNode(op, closure);
- PrepareFrameState(literal, expr->CreateLiteralId(),
- OutputFrameStateCombine::Push());
// The array is expected on the operand stack during computation of the
// element values.
@@ -1434,9 +1262,7 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
Node* value = environment()->Pop();
Node* index = jsgraph()->Constant(array_index);
Node* literal = environment()->Top();
- Node* store = BuildKeyedStore(literal, index, value, pair);
- PrepareFrameState(store, expr->GetIdForElement(array_index),
- OutputFrameStateCombine::Ignore());
+ BuildKeyedStore(literal, index, value, pair);
}
ast_context()->ProduceValue(expr, environment()->Pop());
@@ -1448,19 +1274,11 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
// Left-hand side can only be a property, a global or a variable slot.
Property* property = expr->target()->AsProperty();
LhsKind assign_type = Property::GetAssignType(property);
- bool needs_frame_state_before = true;
// Evaluate LHS expression.
switch (assign_type) {
- case VARIABLE: {
- Variable* variable = expr->target()->AsVariableProxy()->var();
- if (variable->location() == VariableLocation::PARAMETER ||
- variable->location() == VariableLocation::LOCAL ||
- variable->location() == VariableLocation::CONTEXT) {
- needs_frame_state_before = false;
- }
+ case VARIABLE:
break;
- }
case NAMED_PROPERTY:
VisitForValue(property->obj());
break;
@@ -1483,9 +1301,7 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
VariableProxy* proxy = expr->target()->AsVariableProxy();
VectorSlotPair pair =
CreateVectorSlotPair(proxy->VariableFeedbackSlot());
- PrepareEagerCheckpoint(BeforeId(proxy));
- old_value = BuildVariableLoad(proxy->var(), expr->target()->id(), pair,
- OutputFrameStateCombine::Push());
+ old_value = BuildVariableLoad(proxy->var(), pair);
break;
}
case NAMED_PROPERTY: {
@@ -1494,8 +1310,6 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
old_value = BuildNamedLoad(object, name, pair);
- PrepareFrameState(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
break;
}
case KEYED_PROPERTY: {
@@ -1504,8 +1318,6 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
old_value = BuildKeyedLoad(object, key, pair);
- PrepareFrameState(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
break;
}
case NAMED_SUPER_PROPERTY:
@@ -1517,15 +1329,8 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
VisitForValue(expr->value());
Node* right = environment()->Pop();
Node* left = environment()->Pop();
- Node* value =
- BuildBinaryOp(left, right, expr->binary_op(),
- expr->binary_operation()->BinaryOperationFeedbackId());
- PrepareFrameState(value, expr->binary_operation()->id(),
- OutputFrameStateCombine::Push());
+ Node* value = BuildBinaryOp(left, right, expr->binary_op());
environment()->Push(value);
- if (needs_frame_state_before) {
- PrepareEagerCheckpoint(expr->binary_operation()->id());
- }
} else {
VisitForValue(expr->value());
}
@@ -1536,24 +1341,19 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
switch (assign_type) {
case VARIABLE: {
Variable* variable = expr->target()->AsVariableProxy()->var();
- BuildVariableAssignment(variable, value, expr->op(), feedback, expr->id(),
- ast_context()->GetStateCombine());
+ BuildVariableAssignment(variable, value, expr->op(), feedback);
break;
}
case NAMED_PROPERTY: {
Node* object = environment()->Pop();
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- Node* store = BuildNamedStore(object, name, value, feedback);
- PrepareFrameState(store, expr->AssignmentId(),
- OutputFrameStateCombine::Push());
+ BuildNamedStore(object, name, value, feedback);
break;
}
case KEYED_PROPERTY: {
Node* key = environment()->Pop();
Node* object = environment()->Pop();
- Node* store = BuildKeyedStore(object, key, value, feedback);
- PrepareFrameState(store, expr->AssignmentId(),
- OutputFrameStateCombine::Push());
+ BuildKeyedStore(object, key, value, feedback);
break;
}
case NAMED_SUPER_PROPERTY:
@@ -1565,16 +1365,25 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
ast_context()->ProduceValue(expr, value);
}
-void AstGraphBuilder::VisitSuspend(Suspend* expr) {
+void AstGraphBuilder::VisitYield(Yield* expr) {
// Generator functions are supported only by going through Ignition first.
UNREACHABLE();
}
+void AstGraphBuilder::VisitYieldStar(YieldStar* expr) {
+ // Generator functions are supported only by going through Ignition first.
+ UNREACHABLE();
+}
+
+void AstGraphBuilder::VisitAwait(Await* expr) {
+ // Generator functions are supported only by going through Ignition first.
+ UNREACHABLE();
+}
void AstGraphBuilder::VisitThrow(Throw* expr) {
VisitForValue(expr->exception());
Node* exception = environment()->Pop();
- Node* value = BuildThrowError(exception, expr->id());
+ Node* value = BuildThrowError(exception);
ast_context()->ProduceValue(expr, value);
}
@@ -1592,7 +1401,6 @@ void AstGraphBuilder::VisitProperty(Property* expr) {
Node* object = environment()->Pop();
Handle<Name> name = expr->key()->AsLiteral()->AsPropertyName();
value = BuildNamedLoad(object, name, pair);
- PrepareFrameState(value, expr->LoadId(), OutputFrameStateCombine::Push());
break;
}
case KEYED_PROPERTY: {
@@ -1601,7 +1409,6 @@ void AstGraphBuilder::VisitProperty(Property* expr) {
Node* key = environment()->Pop();
Node* object = environment()->Pop();
value = BuildKeyedLoad(object, key, pair);
- PrepareFrameState(value, expr->LoadId(), OutputFrameStateCombine::Push());
break;
}
case NAMED_SUPER_PROPERTY:
@@ -1627,9 +1434,7 @@ void AstGraphBuilder::VisitCall(Call* expr) {
case Call::GLOBAL_CALL: {
VariableProxy* proxy = callee->AsVariableProxy();
VectorSlotPair pair = CreateVectorSlotPair(proxy->VariableFeedbackSlot());
- PrepareEagerCheckpoint(BeforeId(proxy));
- callee_value = BuildVariableLoad(proxy->var(), expr->expression()->id(),
- pair, OutputFrameStateCombine::Push());
+ callee_value = BuildVariableLoad(proxy->var(), pair);
receiver_hint = ConvertReceiverMode::kNullOrUndefined;
receiver_value = jsgraph()->UndefinedConstant();
break;
@@ -1642,8 +1447,6 @@ void AstGraphBuilder::VisitCall(Call* expr) {
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
Node* object = environment()->Top();
callee_value = BuildNamedLoad(object, name, feedback);
- PrepareFrameState(callee_value, property->LoadId(),
- OutputFrameStateCombine::Push());
// Note that a property call requires the receiver to be wrapped into
// an object for sloppy callees. However the receiver is guaranteed
// not to be null or undefined at this point.
@@ -1660,8 +1463,6 @@ void AstGraphBuilder::VisitCall(Call* expr) {
Node* key = environment()->Pop();
Node* object = environment()->Top();
callee_value = BuildKeyedLoad(object, key, feedback);
- PrepareFrameState(callee_value, property->LoadId(),
- OutputFrameStateCombine::Push());
// Note that a property call requires the receiver to be wrapped into
// an object for sloppy callees. However the receiver is guaranteed
// not to be null or undefined at this point.
@@ -1694,17 +1495,9 @@ void AstGraphBuilder::VisitCall(Call* expr) {
// Create node to perform the function call.
CallFrequency frequency = ComputeCallFrequency(expr->CallFeedbackICSlot());
VectorSlotPair feedback = CreateVectorSlotPair(expr->CallFeedbackICSlot());
- const Operator* call =
- javascript()->Call(args->length() + 2, frequency, feedback, receiver_hint,
- expr->tail_call_mode());
- PrepareEagerCheckpoint(expr->CallId());
+ const Operator* call = javascript()->Call(args->length() + 2, frequency,
+ feedback, receiver_hint);
Node* value = ProcessArguments(call, args->length() + 2);
- // The callee passed to the call, we just need to push something here to
- // satisfy the bailout location contract. The fullcodegen code will not
- // ever look at this value, so we just push optimized_out here.
- environment()->Push(jsgraph()->OptimizedOutConstant());
- PrepareFrameState(value, expr->ReturnId(), OutputFrameStateCombine::Push());
- environment()->Drop(1);
ast_context()->ProduceValue(expr, value);
}
@@ -1725,7 +1518,6 @@ void AstGraphBuilder::VisitCallNew(CallNew* expr) {
const Operator* call =
javascript()->Construct(args->length() + 2, frequency, feedback);
Node* value = ProcessArguments(call, args->length() + 2);
- PrepareFrameState(value, expr->ReturnId(), OutputFrameStateCombine::Push());
ast_context()->ProduceValue(expr, value);
}
@@ -1745,9 +1537,7 @@ void AstGraphBuilder::VisitCallJSRuntime(CallRuntime* expr) {
// Create node to perform the JS runtime call.
const Operator* call = javascript()->Call(args->length() + 2);
- PrepareEagerCheckpoint(expr->CallId());
Node* value = ProcessArguments(call, args->length() + 2);
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
ast_context()->ProduceValue(expr, value);
}
@@ -1766,12 +1556,7 @@ void AstGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
// Create node to perform the runtime call.
Runtime::FunctionId functionId = expr->function()->function_id;
const Operator* call = javascript()->CallRuntime(functionId, args->length());
- if (expr->function()->intrinsic_type == Runtime::IntrinsicType::RUNTIME ||
- expr->function()->function_id == Runtime::kInlineCall) {
- PrepareEagerCheckpoint(expr->CallId());
- }
Node* value = ProcessArguments(call, args->length());
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
ast_context()->ProduceValue(expr, value);
}
@@ -1812,9 +1597,7 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
case VARIABLE: {
VariableProxy* proxy = expr->expression()->AsVariableProxy();
VectorSlotPair pair = CreateVectorSlotPair(proxy->VariableFeedbackSlot());
- PrepareEagerCheckpoint(BeforeId(proxy));
- old_value = BuildVariableLoad(proxy->var(), expr->expression()->id(),
- pair, OutputFrameStateCombine::Push());
+ old_value = BuildVariableLoad(proxy->var(), pair);
stack_depth = 0;
break;
}
@@ -1825,8 +1608,6 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
old_value = BuildNamedLoad(object, name, pair);
- PrepareFrameState(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
stack_depth = 1;
break;
}
@@ -1838,8 +1619,6 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
old_value = BuildKeyedLoad(object, key, pair);
- PrepareFrameState(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
stack_depth = 2;
break;
}
@@ -1851,12 +1630,9 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
// Convert old value into a number.
old_value = NewNode(javascript()->ToNumber(), old_value);
- PrepareFrameState(old_value, expr->ToNumberId(),
- OutputFrameStateCombine::Push());
// Create a proper eager frame state for the stores.
environment()->Push(old_value);
- PrepareEagerCheckpoint(expr->ToNumberId());
old_value = environment()->Pop();
// Save result for postfix expressions at correct stack depth.
@@ -1869,10 +1645,8 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
}
// Create node to perform +1/-1 operation.
- Node* value = BuildBinaryOp(old_value, jsgraph()->OneConstant(),
- expr->binary_op(), expr->CountBinOpFeedbackId());
- // This should never lazy deopt because we have converted to number before.
- PrepareFrameState(value, BailoutId::None());
+ Node* value =
+ BuildBinaryOp(old_value, jsgraph()->OneConstant(), expr->binary_op());
// Store the value.
VectorSlotPair feedback = CreateVectorSlotPair(expr->CountSlot());
@@ -1880,25 +1654,20 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
case VARIABLE: {
Variable* variable = expr->expression()->AsVariableProxy()->var();
environment()->Push(value);
- BuildVariableAssignment(variable, value, expr->op(), feedback,
- expr->AssignmentId());
+ BuildVariableAssignment(variable, value, expr->op(), feedback);
environment()->Pop();
break;
}
case NAMED_PROPERTY: {
Node* object = environment()->Pop();
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- Node* store = BuildNamedStore(object, name, value, feedback);
- PrepareFrameState(store, expr->AssignmentId(),
- OutputFrameStateCombine::Push());
+ BuildNamedStore(object, name, value, feedback);
break;
}
case KEYED_PROPERTY: {
Node* key = environment()->Pop();
Node* object = environment()->Pop();
- Node* store = BuildKeyedStore(object, key, value, feedback);
- PrepareFrameState(store, expr->AssignmentId(),
- OutputFrameStateCombine::Push());
+ BuildKeyedStore(object, key, value, feedback);
break;
}
case NAMED_SUPER_PROPERTY:
@@ -1926,9 +1695,7 @@ void AstGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
VisitForValue(expr->right());
Node* right = environment()->Pop();
Node* left = environment()->Pop();
- Node* value = BuildBinaryOp(left, right, expr->op(),
- expr->BinaryOperationFeedbackId());
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ Node* value = BuildBinaryOp(left, right, expr->op());
ast_context()->ProduceValue(expr, value);
}
}
@@ -1951,7 +1718,6 @@ void AstGraphBuilder::VisitLiteralCompareNil(CompareOperation* expr,
VisitForValue(sub_expr);
Node* value_to_compare = environment()->Pop();
Node* value = NewNode(op, value_to_compare, nil_value);
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
return ast_context()->ProduceValue(expr, value);
}
@@ -1962,7 +1728,6 @@ void AstGraphBuilder::VisitLiteralCompareTypeof(CompareOperation* expr,
Node* typeof_arg = NewNode(javascript()->TypeOf(), environment()->Pop());
Node* value = NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
typeof_arg, jsgraph()->Constant(check));
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
return ast_context()->ProduceValue(expr, value);
}
@@ -2020,7 +1785,6 @@ void AstGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
Node* right = environment()->Pop();
Node* left = environment()->Pop();
Node* value = NewNode(op, left, right);
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
ast_context()->ProduceValue(expr, value);
}
@@ -2083,8 +1847,7 @@ void AstGraphBuilder::VisitDeclarations(Declaration::List* declarations) {
Node* decls = jsgraph()->Constant(data);
Node* vector = jsgraph()->Constant(feedback_vector);
const Operator* op = javascript()->CallRuntime(Runtime::kDeclareGlobals);
- Node* call = NewNode(op, decls, flags, vector);
- PrepareFrameState(call, BailoutId::Declarations());
+ NewNode(op, decls, flags, vector);
globals()->clear();
}
@@ -2094,13 +1857,10 @@ void AstGraphBuilder::VisitIfNotNull(Statement* stmt) {
Visit(stmt);
}
-
void AstGraphBuilder::VisitIterationBody(IterationStatement* stmt,
- LoopBuilder* loop,
- BailoutId stack_check_id) {
+ LoopBuilder* loop) {
ControlScopeForIteration scope(this, stmt, loop);
- Node* node = NewNode(javascript()->StackCheck());
- PrepareFrameState(node, stack_check_id);
+ NewNode(javascript()->StackCheck());
Visit(stmt->body());
}
@@ -2112,8 +1872,7 @@ void AstGraphBuilder::VisitDelete(UnaryOperation* expr) {
// "delete this" is allowed.
Variable* variable = expr->expression()->AsVariableProxy()->var();
DCHECK(is_sloppy(language_mode()) || variable->is_this());
- value = BuildVariableDelete(variable, expr->id(),
- ast_context()->GetStateCombine());
+ value = BuildVariableDelete(variable);
} else if (expr->expression()->IsProperty()) {
Property* property = expr->expression()->AsProperty();
VisitForValue(property->obj());
@@ -2122,7 +1881,6 @@ void AstGraphBuilder::VisitDelete(UnaryOperation* expr) {
Node* object = environment()->Pop();
Node* mode = jsgraph()->Constant(static_cast<int32_t>(language_mode()));
value = NewNode(javascript()->DeleteProperty(), object, key, mode);
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
} else {
VisitForEffect(expr->expression());
value = jsgraph()->TrueConstant();
@@ -2143,10 +1901,7 @@ void AstGraphBuilder::VisitTypeofExpression(Expression* expr) {
// perform a non-contextual load in case the operand is a variable proxy.
VariableProxy* proxy = expr->AsVariableProxy();
VectorSlotPair pair = CreateVectorSlotPair(proxy->VariableFeedbackSlot());
- PrepareEagerCheckpoint(BeforeId(proxy));
- Node* load =
- BuildVariableLoad(proxy->var(), expr->id(), pair,
- OutputFrameStateCombine::Push(), INSIDE_TYPEOF);
+ Node* load = BuildVariableLoad(proxy->var(), pair, INSIDE_TYPEOF);
environment()->Push(load);
} else {
VisitForValue(expr);
@@ -2194,7 +1949,7 @@ void AstGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
if (ast_context()->IsValue()) {
VisitForValue(expr->left());
Node* left = environment()->Top();
- condition = BuildToBoolean(left, expr->left()->test_id());
+ condition = BuildToBoolean(left);
} else {
VisitForTest(expr->left());
condition = environment()->Top();
@@ -2317,8 +2072,6 @@ Node* AstGraphBuilder::BuildLocalScriptContext(Scope* scope) {
Handle<ScopeInfo> scope_info = scope->scope_info();
const Operator* op = javascript()->CreateScriptContext(scope_info);
Node* local_context = NewNode(op, GetFunctionClosure());
- PrepareFrameState(local_context, BailoutId::ScriptContext(),
- OutputFrameStateCombine::Push());
return local_context;
}
@@ -2346,26 +2099,23 @@ Node* AstGraphBuilder::BuildArgumentsObject(Variable* arguments) {
: CreateArgumentsType::kMappedArguments;
const Operator* op = javascript()->CreateArguments(type);
Node* object = NewNode(op, GetFunctionClosure());
- PrepareFrameState(object, BailoutId::None());
// Assign the object to the {arguments} variable. This should never lazy
// deopt, so it is fine to send invalid bailout id.
DCHECK(arguments->IsContextSlot() || arguments->IsStackAllocated());
- BuildVariableAssignment(arguments, object, Token::ASSIGN, VectorSlotPair(),
- BailoutId::None());
+ BuildVariableAssignment(arguments, object, Token::ASSIGN, VectorSlotPair());
return object;
}
Node* AstGraphBuilder::BuildHoleCheckThenThrow(Node* value, Variable* variable,
- Node* not_hole,
- BailoutId bailout_id) {
+ Node* not_hole) {
IfBuilder hole_check(this);
Node* the_hole = jsgraph()->TheHoleConstant();
Node* check = NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
value, the_hole);
hole_check.If(check);
hole_check.Then();
- Node* error = BuildThrowReferenceError(variable, bailout_id);
+ Node* error = BuildThrowReferenceError(variable);
environment()->Push(error);
hole_check.Else();
environment()->Push(not_hole);
@@ -2373,10 +2123,8 @@ Node* AstGraphBuilder::BuildHoleCheckThenThrow(Node* value, Variable* variable,
return environment()->Pop();
}
-
Node* AstGraphBuilder::BuildHoleCheckElseThrow(Node* value, Variable* variable,
- Node* for_hole,
- BailoutId bailout_id) {
+ Node* for_hole) {
IfBuilder hole_check(this);
Node* the_hole = jsgraph()->TheHoleConstant();
Node* check = NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
@@ -2385,16 +2133,14 @@ Node* AstGraphBuilder::BuildHoleCheckElseThrow(Node* value, Variable* variable,
hole_check.Then();
environment()->Push(for_hole);
hole_check.Else();
- Node* error = BuildThrowReferenceError(variable, bailout_id);
+ Node* error = BuildThrowReferenceError(variable);
environment()->Push(error);
hole_check.End();
return environment()->Pop();
}
Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
- BailoutId bailout_id,
const VectorSlotPair& feedback,
- OutputFrameStateCombine combine,
TypeofMode typeof_mode) {
Node* the_hole = jsgraph()->TheHoleConstant();
switch (variable->location()) {
@@ -2403,7 +2149,6 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
Handle<Name> name = variable->name();
if (Node* node = TryLoadGlobalConstant(name)) return node;
Node* value = BuildGlobalLoad(name, feedback, typeof_mode);
- PrepareFrameState(value, bailout_id, combine);
return value;
}
case VariableLocation::PARAMETER:
@@ -2413,9 +2158,9 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
if (variable->binding_needs_init()) {
// Perform check for uninitialized let/const variables.
if (value->op() == the_hole->op()) {
- value = BuildThrowReferenceError(variable, bailout_id);
+ value = BuildThrowReferenceError(variable);
} else if (value->opcode() == IrOpcode::kPhi) {
- value = BuildHoleCheckThenThrow(value, variable, value, bailout_id);
+ value = BuildHoleCheckThenThrow(value, variable, value);
}
}
return value;
@@ -2436,7 +2181,7 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
// Maybe specializer should be a parameter to the graph builder?
if (variable->binding_needs_init()) {
// Perform check for uninitialized let/const variables.
- value = BuildHoleCheckThenThrow(value, variable, value, bailout_id);
+ value = BuildHoleCheckThenThrow(value, variable, value);
}
return value;
}
@@ -2445,13 +2190,9 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
UNREACHABLE();
}
UNREACHABLE();
- return nullptr;
}
-
-Node* AstGraphBuilder::BuildVariableDelete(Variable* variable,
- BailoutId bailout_id,
- OutputFrameStateCombine combine) {
+Node* AstGraphBuilder::BuildVariableDelete(Variable* variable) {
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
// Global var, const, or let variable.
@@ -2460,7 +2201,6 @@ Node* AstGraphBuilder::BuildVariableDelete(Variable* variable,
Node* mode = jsgraph()->Constant(static_cast<int32_t>(language_mode()));
const Operator* op = javascript()->DeleteProperty();
Node* result = NewNode(op, global, name, mode);
- PrepareFrameState(result, bailout_id, combine);
return result;
}
case VariableLocation::PARAMETER:
@@ -2474,13 +2214,11 @@ Node* AstGraphBuilder::BuildVariableDelete(Variable* variable,
UNREACHABLE();
}
UNREACHABLE();
- return nullptr;
}
-Node* AstGraphBuilder::BuildVariableAssignment(
- Variable* variable, Node* value, Token::Value op,
- const VectorSlotPair& feedback, BailoutId bailout_id,
- OutputFrameStateCombine combine) {
+Node* AstGraphBuilder::BuildVariableAssignment(Variable* variable, Node* value,
+ Token::Value op,
+ const VectorSlotPair& feedback) {
Node* the_hole = jsgraph()->TheHoleConstant();
VariableMode mode = variable->mode();
switch (variable->location()) {
@@ -2488,7 +2226,6 @@ Node* AstGraphBuilder::BuildVariableAssignment(
// Global var, const, or let variable.
Handle<Name> name = variable->name();
Node* store = BuildGlobalStore(name, value, feedback);
- PrepareFrameState(store, bailout_id, combine);
return store;
}
case VariableLocation::PARAMETER:
@@ -2505,9 +2242,9 @@ Node* AstGraphBuilder::BuildVariableAssignment(
// Perform an initialization check for let declared variables.
Node* current = environment()->Lookup(variable);
if (current->op() == the_hole->op()) {
- return BuildThrowReferenceError(variable, bailout_id);
+ return BuildThrowReferenceError(variable);
} else if (current->opcode() == IrOpcode::kPhi) {
- BuildHoleCheckThenThrow(current, variable, value, bailout_id);
+ BuildHoleCheckThenThrow(current, variable, value);
}
} else if (mode == CONST && op == Token::INIT) {
// Perform an initialization check for const {this} variables.
@@ -2515,7 +2252,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(
// to trigger bind operations outside the TDZ, via {super} calls.
Node* current = environment()->Lookup(variable);
if (current->op() != the_hole->op() && variable->is_this()) {
- value = BuildHoleCheckElseThrow(current, variable, value, bailout_id);
+ value = BuildHoleCheckElseThrow(current, variable, value);
}
} else if (mode == CONST && op != Token::INIT &&
variable->is_sloppy_function_name()) {
@@ -2524,20 +2261,20 @@ Node* AstGraphBuilder::BuildVariableAssignment(
// - ignored in sloppy mode.
DCHECK(!variable->binding_needs_init());
if (variable->throw_on_const_assignment(language_mode())) {
- return BuildThrowConstAssignError(bailout_id);
+ return BuildThrowConstAssignError();
}
return value;
} else if (mode == CONST && op != Token::INIT) {
if (variable->binding_needs_init()) {
Node* current = environment()->Lookup(variable);
if (current->op() == the_hole->op()) {
- return BuildThrowReferenceError(variable, bailout_id);
+ return BuildThrowReferenceError(variable);
} else if (current->opcode() == IrOpcode::kPhi) {
- BuildHoleCheckThenThrow(current, variable, value, bailout_id);
+ BuildHoleCheckThenThrow(current, variable, value);
}
}
// Assignment to const is exception in all modes.
- return BuildThrowConstAssignError(bailout_id);
+ return BuildThrowConstAssignError();
}
environment()->Bind(variable, value);
return value;
@@ -2549,7 +2286,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(
const Operator* op =
javascript()->LoadContext(depth, variable->index(), false);
Node* current = NewNode(op);
- value = BuildHoleCheckThenThrow(current, variable, value, bailout_id);
+ value = BuildHoleCheckThenThrow(current, variable, value);
} else if (mode == CONST && op == Token::INIT) {
// Perform an initialization check for const {this} variables.
// Note that the {this} variable is the only const variable being able
@@ -2558,7 +2295,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(
const Operator* op =
javascript()->LoadContext(depth, variable->index(), false);
Node* current = NewNode(op);
- value = BuildHoleCheckElseThrow(current, variable, value, bailout_id);
+ value = BuildHoleCheckElseThrow(current, variable, value);
}
} else if (mode == CONST && op != Token::INIT &&
variable->is_sloppy_function_name()) {
@@ -2567,7 +2304,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(
// - ignored in sloppy mode.
DCHECK(!variable->binding_needs_init());
if (variable->throw_on_const_assignment(language_mode())) {
- return BuildThrowConstAssignError(bailout_id);
+ return BuildThrowConstAssignError();
}
return value;
} else if (mode == CONST && op != Token::INIT) {
@@ -2575,10 +2312,10 @@ Node* AstGraphBuilder::BuildVariableAssignment(
const Operator* op =
javascript()->LoadContext(depth, variable->index(), false);
Node* current = NewNode(op);
- BuildHoleCheckThenThrow(current, variable, value, bailout_id);
+ BuildHoleCheckThenThrow(current, variable, value);
}
// Assignment to const is exception in all modes.
- return BuildThrowConstAssignError(bailout_id);
+ return BuildThrowConstAssignError();
}
const Operator* op = javascript()->StoreContext(depth, variable->index());
return NewNode(op, value);
@@ -2588,7 +2325,6 @@ Node* AstGraphBuilder::BuildVariableAssignment(
UNREACHABLE();
}
UNREACHABLE();
- return nullptr;
}
@@ -2671,20 +2407,12 @@ Node* AstGraphBuilder::BuildLoadNativeContextField(int index) {
return result;
}
-
-Node* AstGraphBuilder::BuildToBoolean(Node* input, TypeFeedbackId feedback_id) {
+Node* AstGraphBuilder::BuildToBoolean(Node* input) {
if (Node* node = TryFastToBoolean(input)) return node;
ToBooleanHints hints = ToBooleanHint::kAny;
return NewNode(javascript()->ToBoolean(hints), input);
}
-
-Node* AstGraphBuilder::BuildToObject(Node* input, BailoutId bailout_id) {
- Node* object = NewNode(javascript()->ToObject(), input);
- PrepareFrameState(object, bailout_id, OutputFrameStateCombine::Push());
- return object;
-}
-
Node* AstGraphBuilder::BuildSetHomeObject(Node* value, Node* home_object,
LiteralProperty* property,
int slot_number) {
@@ -2694,39 +2422,30 @@ Node* AstGraphBuilder::BuildSetHomeObject(Node* value, Node* home_object,
VectorSlotPair feedback =
CreateVectorSlotPair(property->GetSlot(slot_number));
Node* store = BuildNamedStore(value, name, home_object, feedback);
- PrepareFrameState(store, BailoutId::None(),
- OutputFrameStateCombine::Ignore());
return store;
}
-
-Node* AstGraphBuilder::BuildThrowError(Node* exception, BailoutId bailout_id) {
+Node* AstGraphBuilder::BuildThrowError(Node* exception) {
const Operator* op = javascript()->CallRuntime(Runtime::kThrow);
Node* call = NewNode(op, exception);
- PrepareFrameState(call, bailout_id);
Node* control = NewNode(common()->Throw());
UpdateControlDependencyToLeaveFunction(control);
return call;
}
-
-Node* AstGraphBuilder::BuildThrowReferenceError(Variable* variable,
- BailoutId bailout_id) {
+Node* AstGraphBuilder::BuildThrowReferenceError(Variable* variable) {
Node* variable_name = jsgraph()->Constant(variable->name());
const Operator* op = javascript()->CallRuntime(Runtime::kThrowReferenceError);
Node* call = NewNode(op, variable_name);
- PrepareFrameState(call, bailout_id);
Node* control = NewNode(common()->Throw());
UpdateControlDependencyToLeaveFunction(control);
return call;
}
-
-Node* AstGraphBuilder::BuildThrowConstAssignError(BailoutId bailout_id) {
+Node* AstGraphBuilder::BuildThrowConstAssignError() {
const Operator* op =
javascript()->CallRuntime(Runtime::kThrowConstAssignError);
Node* call = NewNode(op);
- PrepareFrameState(call, bailout_id);
Node* control = NewNode(common()->Throw());
UpdateControlDependencyToLeaveFunction(control);
return call;
@@ -2753,9 +2472,7 @@ Node* AstGraphBuilder::BuildThrow(Node* exception_value) {
return control;
}
-
-Node* AstGraphBuilder::BuildBinaryOp(Node* left, Node* right, Token::Value op,
- TypeFeedbackId feedback_id) {
+Node* AstGraphBuilder::BuildBinaryOp(Node* left, Node* right, Token::Value op) {
const Operator* js_op;
BinaryOperationHint hint = BinaryOperationHint::kAny;
switch (op) {
@@ -2837,7 +2554,6 @@ Node* AstGraphBuilder::TryFastToBoolean(Node* input) {
bool AstGraphBuilder::CheckOsrEntry(IterationStatement* stmt) {
if (info()->osr_ast_id() == stmt->OsrEntryId()) {
- DCHECK_EQ(-1, info()->osr_expr_stack_height());
info()->set_osr_expr_stack_height(environment()->stack_height());
return true;
}
@@ -2845,35 +2561,6 @@ bool AstGraphBuilder::CheckOsrEntry(IterationStatement* stmt) {
}
-void AstGraphBuilder::PrepareFrameState(Node* node, BailoutId ast_id,
- OutputFrameStateCombine combine) {
- if (OperatorProperties::HasFrameStateInput(node->op())) {
- DCHECK(ast_id.IsNone() || info()->shared_info()->VerifyBailoutId(ast_id));
- DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
- DCHECK_EQ(IrOpcode::kDead,
- NodeProperties::GetFrameStateInput(node)->opcode());
- bool has_exception = NodeProperties::IsExceptionalCall(node);
- Node* state = environment()->Checkpoint(ast_id, combine, has_exception);
- NodeProperties::ReplaceFrameStateInput(node, state);
- }
-}
-
-void AstGraphBuilder::PrepareEagerCheckpoint(BailoutId ast_id) {
- if (environment()->GetEffectDependency()->opcode() == IrOpcode::kCheckpoint) {
- // We skip preparing a checkpoint if there already is one the current effect
- // dependency. This is just an optimization and not need for correctness.
- return;
- }
- if (ast_id != BailoutId::None()) {
- DCHECK(info()->shared_info()->VerifyBailoutId(ast_id));
- Node* node = NewNode(common()->Checkpoint());
- DCHECK_EQ(IrOpcode::kDead,
- NodeProperties::GetFrameStateInput(node)->opcode());
- Node* state = environment()->Checkpoint(ast_id);
- NodeProperties::ReplaceFrameStateInput(node, state);
- }
-}
-
BitVector* AstGraphBuilder::GetVariablesAssignedInLoop(
IterationStatement* stmt) {
if (loop_assignment_analysis_ == nullptr) return nullptr;
@@ -2919,10 +2606,8 @@ Node* AstGraphBuilder::MakeNode(const Operator* op, int value_input_count,
*current_input++ = current_context();
}
if (has_frame_state) {
- // The frame state will be inserted later. Here we misuse
- // the {Dead} node as a sentinel to be later overwritten
- // with the real frame state.
- *current_input++ = jsgraph()->Dead();
+ DCHECK(!info()->is_deoptimization_enabled());
+ *current_input++ = GetEmptyFrameState();
}
if (has_effect) {
*current_input++ = environment_->GetEffectDependency();
@@ -2971,24 +2656,9 @@ void AstGraphBuilder::Environment::Merge(Environment* other) {
effect_dependency_ = other->effect_dependency_;
values_ = other->values_;
contexts_ = other->contexts_;
- if (IsLivenessAnalysisEnabled()) {
- liveness_block_ =
- builder_->liveness_analyzer()->NewBlock(other->liveness_block());
- }
return;
}
- // Record the merge for the local variable liveness calculation.
- // For loops, we are connecting a back edge into the existing block;
- // for merges, we create a new merged block.
- if (IsLivenessAnalysisEnabled()) {
- if (GetControlDependency()->opcode() != IrOpcode::kLoop) {
- liveness_block_ =
- builder_->liveness_analyzer()->NewBlock(liveness_block());
- }
- liveness_block()->AddPredecessor(other->liveness_block());
- }
-
// Create a merge of the control dependencies of both environments and update
// the current environment's control dependency accordingly.
Node* control = builder_->MergeControl(this->GetControlDependency(),
diff --git a/deps/v8/src/compiler/ast-graph-builder.h b/deps/v8/src/compiler/ast-graph-builder.h
index 1d0ba3a9c2..ad1f1eb3f7 100644
--- a/deps/v8/src/compiler/ast-graph-builder.h
+++ b/deps/v8/src/compiler/ast-graph-builder.h
@@ -8,7 +8,6 @@
#include "src/ast/ast.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/js-graph.h"
-#include "src/compiler/liveness-analyzer.h"
#include "src/compiler/state-values-utils.h"
namespace v8 {
@@ -114,12 +113,6 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
// Cache for StateValues nodes for frame states.
StateValuesCache state_values_cache_;
- // Analyzer of local variable liveness.
- LivenessAnalyzer liveness_analyzer_;
-
- // Function info for frame state construction.
- const FrameStateFunctionInfo* const frame_state_function_info_;
-
// Growth increment for the temporary buffer used to construct input lists to
// new nodes.
static const int kInputBufferSizeIncrement = 64;
@@ -140,10 +133,6 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
ZoneVector<Handle<Object>>* globals() { return &globals_; }
Scope* current_scope() const;
Node* current_context() const;
- LivenessAnalyzer* liveness_analyzer() { return &liveness_analyzer_; }
- const FrameStateFunctionInfo* frame_state_function_info() const {
- return frame_state_function_info_;
- }
void set_environment(Environment* env) { environment_ = env; }
void set_ast_context(AstContext* ctx) { ast_context_ = ctx; }
@@ -221,28 +210,12 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
// Helper to indicate a node exits the function body.
void UpdateControlDependencyToLeaveFunction(Node* exit);
- // Prepare information for lazy deoptimization. This information is attached
- // to the given node and the output value produced by the node is combined.
- // Conceptually this frame state is "after" a given operation.
- void PrepareFrameState(Node* node, BailoutId ast_id,
- OutputFrameStateCombine framestate_combine =
- OutputFrameStateCombine::Ignore());
-
- // Prepare information for eager deoptimization. This information is carried
- // by dedicated {Checkpoint} nodes that are wired into the effect chain.
- // Conceptually this frame state is "before" a given operation.
- void PrepareEagerCheckpoint(BailoutId ast_id);
-
BitVector* GetVariablesAssignedInLoop(IterationStatement* stmt);
// Check if the given statement is an OSR entry.
// If so, record the stack height into the compilation and return {true}.
bool CheckOsrEntry(IterationStatement* stmt);
- // Computes local variable liveness and replaces dead variables in
- // frame states with the undefined values.
- void ClearNonLiveSlotsInFrameStates();
-
Node** EnsureInputBufferSize(int size);
// Named and keyed loads require a VectorSlotPair for successful lowering.
@@ -267,15 +240,9 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
// Builders for variable load and assignment.
Node* BuildVariableAssignment(Variable* variable, Node* value,
- Token::Value op, const VectorSlotPair& slot,
- BailoutId bailout_id,
- OutputFrameStateCombine framestate_combine =
- OutputFrameStateCombine::Ignore());
- Node* BuildVariableDelete(Variable* variable, BailoutId bailout_id,
- OutputFrameStateCombine framestate_combine);
- Node* BuildVariableLoad(Variable* variable, BailoutId bailout_id,
- const VectorSlotPair& feedback,
- OutputFrameStateCombine framestate_combine,
+ Token::Value op, const VectorSlotPair& slot);
+ Node* BuildVariableDelete(Variable* variable);
+ Node* BuildVariableLoad(Variable* variable, const VectorSlotPair& feedback,
TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
// Builders for property loads and stores.
@@ -301,8 +268,7 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
Node* BuildLoadNativeContextField(int index);
// Builders for automatic type conversion.
- Node* BuildToBoolean(Node* input, TypeFeedbackId feedback_id);
- Node* BuildToObject(Node* input, BailoutId bailout_id);
+ Node* BuildToBoolean(Node* input);
// Builder for adding the [[HomeObject]] to a value if the value came from a
// function literal and needs a home object. Do nothing otherwise.
@@ -310,23 +276,20 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
LiteralProperty* property, int slot_number = 0);
// Builders for error reporting at runtime.
- Node* BuildThrowError(Node* exception, BailoutId bailout_id);
- Node* BuildThrowReferenceError(Variable* var, BailoutId bailout_id);
- Node* BuildThrowConstAssignError(BailoutId bailout_id);
+ Node* BuildThrowError(Node* exception);
+ Node* BuildThrowReferenceError(Variable* var);
+ Node* BuildThrowConstAssignError();
// Builders for dynamic hole-checks at runtime.
- Node* BuildHoleCheckThenThrow(Node* value, Variable* var, Node* not_hole,
- BailoutId bailout_id);
- Node* BuildHoleCheckElseThrow(Node* value, Variable* var, Node* for_hole,
- BailoutId bailout_id);
+ Node* BuildHoleCheckThenThrow(Node* value, Variable* var, Node* not_hole);
+ Node* BuildHoleCheckElseThrow(Node* value, Variable* var, Node* for_hole);
// Builders for non-local control flow.
Node* BuildReturn(Node* return_value);
Node* BuildThrow(Node* exception_value);
// Builders for binary operations.
- Node* BuildBinaryOp(Node* left, Node* right, Token::Value op,
- TypeFeedbackId feedback_id);
+ Node* BuildBinaryOp(Node* left, Node* right, Token::Value op);
// Process arguments to a call by popping {arity} elements off the operand
// stack and build a call node using the given call operator.
@@ -364,8 +327,7 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
void VisitForValues(ZoneList<Expression*>* exprs);
// Common for all IterationStatement bodies.
- void VisitIterationBody(IterationStatement* stmt, LoopBuilder* loop,
- BailoutId stack_check_id);
+ void VisitIterationBody(IterationStatement* stmt, LoopBuilder* loop);
// Dispatched from VisitCall.
void VisitCallSuper(Call* expr);
@@ -426,7 +388,6 @@ class AstGraphBuilder::Environment : public ZoneObject {
// Operations on parameter or local variables.
void Bind(Variable* variable, Node* node);
Node* Lookup(Variable* variable);
- void MarkAllLocalsLive();
// Raw operations on parameter variables.
void RawParameterBind(int index, Node* node);
@@ -476,12 +437,6 @@ class AstGraphBuilder::Environment : public ZoneObject {
values()->erase(values()->end() - depth, values()->end());
}
- // Preserve a checkpoint of the environment for the IR graph. Any
- // further mutation of the environment will not affect checkpoints.
- Node* Checkpoint(BailoutId ast_id, OutputFrameStateCombine combine =
- OutputFrameStateCombine::Ignore(),
- bool node_has_exception = false);
-
// Inserts a loop exit control node and renames the environment.
// This is useful for loop peeling to insert phis at loop exits.
void PrepareForLoopExit(Node* loop, BitVector* assigned_variables);
@@ -501,7 +456,6 @@ class AstGraphBuilder::Environment : public ZoneObject {
// Mark this environment as being unreachable.
void MarkAsUnreachable() {
UpdateControlDependency(builder()->jsgraph()->Dead());
- liveness_block_ = nullptr;
}
bool IsMarkedAsUnreachable() {
return GetControlDependency()->opcode() == IrOpcode::kDead;
@@ -528,7 +482,6 @@ class AstGraphBuilder::Environment : public ZoneObject {
AstGraphBuilder* builder_;
int parameters_count_;
int locals_count_;
- LivenessAnalyzerBlock* liveness_block_;
NodeVector values_;
NodeVector contexts_;
Node* control_dependency_;
@@ -537,19 +490,14 @@ class AstGraphBuilder::Environment : public ZoneObject {
Node* locals_node_;
Node* stack_node_;
- explicit Environment(Environment* copy,
- LivenessAnalyzerBlock* liveness_block);
+ explicit Environment(Environment* copy);
Environment* CopyAndShareLiveness();
- void UpdateStateValues(Node** state_values, int offset, int count);
Zone* zone() const { return builder_->local_zone(); }
Graph* graph() const { return builder_->graph(); }
AstGraphBuilder* builder() const { return builder_; }
CommonOperatorBuilder* common() { return builder_->common(); }
NodeVector* values() { return &values_; }
NodeVector* contexts() { return &contexts_; }
- LivenessAnalyzerBlock* liveness_block() { return liveness_block_; }
- bool IsLivenessAnalysisEnabled();
- bool IsLivenessBlockConsistent();
// Prepare environment to be used as loop header.
void PrepareForLoop(BitVector* assigned);
diff --git a/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc b/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
index ff66bf4976..a6e5029573 100644
--- a/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
+++ b/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
@@ -149,11 +149,11 @@ void ALAA::VisitObjectLiteral(ObjectLiteral* e) {
void ALAA::VisitArrayLiteral(ArrayLiteral* e) { VisitExpressions(e->values()); }
-void ALAA::VisitSuspend(Suspend* stmt) {
- Visit(stmt->generator_object());
- Visit(stmt->expression());
-}
+void ALAA::VisitYield(Yield* e) { Visit(e->expression()); }
+
+void ALAA::VisitYieldStar(YieldStar* e) { Visit(e->expression()); }
+void ALAA::VisitAwait(Await* e) { Visit(e->expression()); }
void ALAA::VisitThrow(Throw* stmt) { Visit(stmt->exception()); }
diff --git a/deps/v8/src/compiler/basic-block-instrumentor.cc b/deps/v8/src/compiler/basic-block-instrumentor.cc
index 40f0a29132..36ffcf1623 100644
--- a/deps/v8/src/compiler/basic-block-instrumentor.cc
+++ b/deps/v8/src/compiler/basic-block-instrumentor.cc
@@ -56,9 +56,9 @@ BasicBlockProfiler::Data* BasicBlockInstrumentor::Instrument(
BasicBlockProfiler::Data* data =
info->isolate()->GetOrCreateBasicBlockProfiler()->NewData(n_blocks);
// Set the function name.
- if (info->has_shared_info() && info->shared_info()->name()->IsString()) {
+ if (info->has_shared_info()) {
std::ostringstream os;
- String::cast(info->shared_info()->name())->PrintUC16(os);
+ info->shared_info()->name()->PrintUC16(os);
data->SetFunctionName(&os);
}
// Capture the schedule string before instrumentation.
diff --git a/deps/v8/src/compiler/branch-elimination.cc b/deps/v8/src/compiler/branch-elimination.cc
index 96327e7856..b553adf333 100644
--- a/deps/v8/src/compiler/branch-elimination.cc
+++ b/deps/v8/src/compiler/branch-elimination.cc
@@ -113,8 +113,7 @@ Reduction BranchElimination::ReduceDeoptimizeConditional(Node* node) {
}
return Replace(dead());
}
- return UpdateConditions(
- node, conditions->AddCondition(zone_, condition, condition_is_true));
+ return UpdateConditions(node, conditions, condition, condition_is_true);
}
Reduction BranchElimination::ReduceIf(Node* node, bool is_true_branch) {
@@ -128,8 +127,7 @@ Reduction BranchElimination::ReduceIf(Node* node, bool is_true_branch) {
return UpdateConditions(node, nullptr);
}
Node* condition = branch->InputAt(0);
- return UpdateConditions(
- node, from_branch->AddCondition(zone_, condition, is_true_branch));
+ return UpdateConditions(node, from_branch, condition, is_true_branch);
}
@@ -224,6 +222,25 @@ Reduction BranchElimination::UpdateConditions(
return NoChange();
}
+Reduction BranchElimination::UpdateConditions(
+ Node* node, const ControlPathConditions* prev_conditions,
+ Node* current_condition, bool is_true_branch) {
+ const ControlPathConditions* original = node_conditions_.Get(node);
+ DCHECK(prev_conditions != nullptr && current_condition != nullptr);
+ // The control path for the node is the path obtained by appending the
+ // current_condition to the prev_conditions. Check if this new control path
+ // would be the same as the already recorded path (original).
+ if (original == nullptr || !prev_conditions->EqualsAfterAddingCondition(
+ original, current_condition, is_true_branch)) {
+ // If this is the first visit or if the control path is different from the
+ // recorded path create the new control path and record it.
+ const ControlPathConditions* new_condition =
+ prev_conditions->AddCondition(zone_, current_condition, is_true_branch);
+ node_conditions_.Set(node, new_condition);
+ return Changed(node);
+ }
+ return NoChange();
+}
// static
const BranchElimination::ControlPathConditions*
@@ -290,12 +307,8 @@ Maybe<bool> BranchElimination::ControlPathConditions::LookupCondition(
return Nothing<bool>();
}
-
-bool BranchElimination::ControlPathConditions::operator==(
- const ControlPathConditions& other) const {
- if (condition_count_ != other.condition_count_) return false;
- BranchCondition* this_condition = head_;
- BranchCondition* other_condition = other.head_;
+bool BranchElimination::ControlPathConditions::IsSamePath(
+ BranchCondition* this_condition, BranchCondition* other_condition) const {
while (true) {
if (this_condition == other_condition) return true;
if (this_condition->condition != other_condition->condition ||
@@ -306,7 +319,31 @@ bool BranchElimination::ControlPathConditions::operator==(
other_condition = other_condition->next;
}
UNREACHABLE();
- return false;
+}
+
+bool BranchElimination::ControlPathConditions::operator==(
+ const ControlPathConditions& other) const {
+ if (condition_count_ != other.condition_count_) return false;
+ return IsSamePath(head_, other.head_);
+}
+
+bool BranchElimination::ControlPathConditions::EqualsAfterAddingCondition(
+ const ControlPathConditions* other, const Node* new_condition,
+ bool new_branch_direction) const {
+ // When an extra condition is added to the current chain, the count of
+ // the resulting chain would increase by 1. Quick check to see if counts
+ // match.
+ if (other->condition_count_ != condition_count_ + 1) return false;
+
+ // Check if the head of the other chain is same as the new condition that
+ // would be added.
+ if (other->head_->condition != new_condition ||
+ other->head_->is_true != new_branch_direction) {
+ return false;
+ }
+
+ // Check if the rest of the path is the same as the prev_condition.
+ return IsSamePath(other->head_->next, head_);
}
Graph* BranchElimination::graph() const { return jsgraph()->graph(); }
diff --git a/deps/v8/src/compiler/branch-elimination.h b/deps/v8/src/compiler/branch-elimination.h
index c1431523e5..d78933e734 100644
--- a/deps/v8/src/compiler/branch-elimination.h
+++ b/deps/v8/src/compiler/branch-elimination.h
@@ -23,6 +23,8 @@ class V8_EXPORT_PRIVATE BranchElimination final
BranchElimination(Editor* editor, JSGraph* js_graph, Zone* zone);
~BranchElimination() final;
+ const char* reducer_name() const override { return "BranchElimination"; }
+
Reduction Reduce(Node* node) final;
private:
@@ -47,6 +49,10 @@ class V8_EXPORT_PRIVATE BranchElimination final
static const ControlPathConditions* Empty(Zone* zone);
void Merge(const ControlPathConditions& other);
+ bool IsSamePath(BranchCondition* first, BranchCondition* second) const;
+ bool EqualsAfterAddingCondition(const ControlPathConditions* other,
+ const Node* new_condition,
+ bool new_branch_condition) const;
bool operator==(const ControlPathConditions& other) const;
bool operator!=(const ControlPathConditions& other) const {
return !(*this == other);
@@ -87,6 +93,9 @@ class V8_EXPORT_PRIVATE BranchElimination final
Reduction TakeConditionsFromFirstControl(Node* node);
Reduction UpdateConditions(Node* node,
const ControlPathConditions* conditions);
+ Reduction UpdateConditions(Node* node,
+ const ControlPathConditions* prev_conditions,
+ Node* current_condition, bool is_true_branch);
Node* dead() const { return dead_; }
Graph* graph() const;
diff --git a/deps/v8/src/compiler/bytecode-analysis.cc b/deps/v8/src/compiler/bytecode-analysis.cc
index e531e75b8c..13185db208 100644
--- a/deps/v8/src/compiler/bytecode-analysis.cc
+++ b/deps/v8/src/compiler/bytecode-analysis.cc
@@ -28,31 +28,17 @@ void BytecodeLoopAssignments::Add(interpreter::Register r) {
}
}
-void BytecodeLoopAssignments::AddPair(interpreter::Register r) {
+void BytecodeLoopAssignments::AddList(interpreter::Register r, uint32_t count) {
if (r.is_parameter()) {
- DCHECK(interpreter::Register(r.index() + 1).is_parameter());
- bit_vector_->Add(r.ToParameterIndex(parameter_count_));
- bit_vector_->Add(r.ToParameterIndex(parameter_count_) + 1);
- } else {
- DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
- bit_vector_->Add(parameter_count_ + r.index());
- bit_vector_->Add(parameter_count_ + r.index() + 1);
- }
-}
-
-void BytecodeLoopAssignments::AddTriple(interpreter::Register r) {
- if (r.is_parameter()) {
- DCHECK(interpreter::Register(r.index() + 1).is_parameter());
- DCHECK(interpreter::Register(r.index() + 2).is_parameter());
- bit_vector_->Add(r.ToParameterIndex(parameter_count_));
- bit_vector_->Add(r.ToParameterIndex(parameter_count_) + 1);
- bit_vector_->Add(r.ToParameterIndex(parameter_count_) + 2);
+ for (uint32_t i = 0; i < count; i++) {
+ DCHECK(interpreter::Register(r.index() + i).is_parameter());
+ bit_vector_->Add(r.ToParameterIndex(parameter_count_) + i);
+ }
} else {
- DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
- DCHECK(!interpreter::Register(r.index() + 2).is_parameter());
- bit_vector_->Add(parameter_count_ + r.index());
- bit_vector_->Add(parameter_count_ + r.index() + 1);
- bit_vector_->Add(parameter_count_ + r.index() + 2);
+ for (uint32_t i = 0; i < count; i++) {
+ DCHECK(!interpreter::Register(r.index() + i).is_parameter());
+ bit_vector_->Add(parameter_count_ + r.index() + i);
+ }
}
}
@@ -112,6 +98,17 @@ void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState& in_liveness,
}
break;
}
+ case OperandType::kRegOutList: {
+ interpreter::Register r = accessor.GetRegisterOperand(i++);
+ uint32_t reg_count = accessor.GetRegisterCountOperand(i);
+ if (!r.is_parameter()) {
+ for (uint32_t j = 0; j < reg_count; ++j) {
+ DCHECK(!interpreter::Register(r.index() + j).is_parameter());
+ in_liveness.MarkRegisterDead(r.index() + j);
+ }
+ }
+ break;
+ }
case OperandType::kRegOutPair: {
interpreter::Register r = accessor.GetRegisterOperand(i);
if (!r.is_parameter()) {
@@ -227,12 +224,18 @@ void UpdateAssignments(Bytecode bytecode, BytecodeLoopAssignments& assignments,
assignments.Add(accessor.GetRegisterOperand(i));
break;
}
+ case OperandType::kRegOutList: {
+ interpreter::Register r = accessor.GetRegisterOperand(i++);
+ uint32_t reg_count = accessor.GetRegisterCountOperand(i);
+ assignments.AddList(r, reg_count);
+ break;
+ }
case OperandType::kRegOutPair: {
- assignments.AddPair(accessor.GetRegisterOperand(i));
+ assignments.AddList(accessor.GetRegisterOperand(i), 2);
break;
}
case OperandType::kRegOutTriple: {
- assignments.AddTriple(accessor.GetRegisterOperand(i));
+ assignments.AddList(accessor.GetRegisterOperand(i), 3);
break;
}
default:
diff --git a/deps/v8/src/compiler/bytecode-analysis.h b/deps/v8/src/compiler/bytecode-analysis.h
index 63dfa3107c..68433a4155 100644
--- a/deps/v8/src/compiler/bytecode-analysis.h
+++ b/deps/v8/src/compiler/bytecode-analysis.h
@@ -24,8 +24,7 @@ class V8_EXPORT_PRIVATE BytecodeLoopAssignments {
BytecodeLoopAssignments(int parameter_count, int register_count, Zone* zone);
void Add(interpreter::Register r);
- void AddPair(interpreter::Register r);
- void AddTriple(interpreter::Register r);
+ void AddList(interpreter::Register r, uint32_t count);
void AddAll();
void Union(const BytecodeLoopAssignments& other);
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index 5bb9a8e976..e1700e6b43 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -57,7 +57,6 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
// Preserve a checkpoint of the environment for the IR graph. Any
// further mutation of the environment will not affect checkpoints.
Node* Checkpoint(BailoutId bytecode_offset, OutputFrameStateCombine combine,
- bool owner_has_exception,
const BytecodeLivenessState* liveness);
// Control dependency tracked by this environment.
@@ -406,7 +405,7 @@ Node* BytecodeGraphBuilder::Environment::GetStateValuesFromCache(
Node* BytecodeGraphBuilder::Environment::Checkpoint(
BailoutId bailout_id, OutputFrameStateCombine combine,
- bool owner_has_exception, const BytecodeLivenessState* liveness) {
+ const BytecodeLivenessState* liveness) {
if (parameter_count() == register_count()) {
// Re-use the state-value cache if the number of local registers happens
// to match the parameter count.
@@ -522,7 +521,7 @@ VectorSlotPair BytecodeGraphBuilder::CreateVectorSlotPair(int slot_id) {
return VectorSlotPair(feedback_vector(), slot);
}
-bool BytecodeGraphBuilder::CreateGraph(bool stack_check) {
+void BytecodeGraphBuilder::CreateGraph(bool stack_check) {
SourcePositionTable::Scope pos_scope(source_positions_, start_position_);
// Set up the basic structure of the graph. Outputs for {Start} are the formal
@@ -544,8 +543,6 @@ bool BytecodeGraphBuilder::CreateGraph(bool stack_check) {
Node** const inputs = &exit_controls_.front();
Node* end = graph()->NewNode(common()->End(input_count), input_count, inputs);
graph()->SetEnd(end);
-
- return true;
}
void BytecodeGraphBuilder::PrepareEagerCheckpoint() {
@@ -564,7 +561,7 @@ void BytecodeGraphBuilder::PrepareEagerCheckpoint() {
bytecode_iterator().current_offset());
Node* frame_state_before = environment()->Checkpoint(
- bailout_id, OutputFrameStateCombine::Ignore(), false, liveness_before);
+ bailout_id, OutputFrameStateCombine::Ignore(), liveness_before);
NodeProperties::ReplaceFrameStateInput(node, frame_state_before);
#ifdef DEBUG
} else {
@@ -592,14 +589,13 @@ void BytecodeGraphBuilder::PrepareFrameState(Node* node,
DCHECK_EQ(IrOpcode::kDead,
NodeProperties::GetFrameStateInput(node)->opcode());
BailoutId bailout_id(bytecode_iterator().current_offset());
- bool has_exception = NodeProperties::IsExceptionalCall(node);
const BytecodeLivenessState* liveness_after =
bytecode_analysis()->GetOutLivenessFor(
bytecode_iterator().current_offset());
- Node* frame_state_after = environment()->Checkpoint(
- bailout_id, combine, has_exception, liveness_after);
+ Node* frame_state_after =
+ environment()->Checkpoint(bailout_id, combine, liveness_after);
NodeProperties::ReplaceFrameStateInput(node, frame_state_after);
}
}
@@ -1006,26 +1002,30 @@ void BytecodeGraphBuilder::VisitLdaLookupGlobalSlotInsideTypeof() {
BuildLdaLookupGlobalSlot(TypeofMode::INSIDE_TYPEOF);
}
-void BytecodeGraphBuilder::BuildStaLookupSlot(LanguageMode language_mode) {
+void BytecodeGraphBuilder::VisitStaLookupSlot() {
PrepareEagerCheckpoint();
Node* value = environment()->LookupAccumulator();
Node* name =
jsgraph()->Constant(bytecode_iterator().GetConstantForIndexOperand(0));
+ int bytecode_flags = bytecode_iterator().GetFlagOperand(1);
+ LanguageMode language_mode = static_cast<LanguageMode>(
+ interpreter::StoreLookupSlotFlags::LanguageModeBit::decode(
+ bytecode_flags));
+ LookupHoistingMode lookup_hoisting_mode = static_cast<LookupHoistingMode>(
+ interpreter::StoreLookupSlotFlags::LookupHoistingModeBit::decode(
+ bytecode_flags));
+ DCHECK_IMPLIES(lookup_hoisting_mode == LookupHoistingMode::kLegacySloppy,
+ is_sloppy(language_mode));
const Operator* op = javascript()->CallRuntime(
- is_strict(language_mode) ? Runtime::kStoreLookupSlot_Strict
- : Runtime::kStoreLookupSlot_Sloppy);
+ is_strict(language_mode)
+ ? Runtime::kStoreLookupSlot_Strict
+ : lookup_hoisting_mode == LookupHoistingMode::kLegacySloppy
+ ? Runtime::kStoreLookupSlot_SloppyHoisting
+ : Runtime::kStoreLookupSlot_Sloppy);
Node* store = NewNode(op, name, value);
environment()->BindAccumulator(store, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::VisitStaLookupSlotSloppy() {
- BuildStaLookupSlot(LanguageMode::SLOPPY);
-}
-
-void BytecodeGraphBuilder::VisitStaLookupSlotStrict() {
- BuildStaLookupSlot(LanguageMode::STRICT);
-}
-
void BytecodeGraphBuilder::VisitLdaNamedProperty() {
PrepareEagerCheckpoint();
Node* object =
@@ -1357,8 +1357,7 @@ Node* BytecodeGraphBuilder::ProcessCallArguments(const Operator* call_op,
return ProcessCallArguments(call_op, call_args, 2 + arg_count);
}
-void BytecodeGraphBuilder::BuildCall(TailCallMode tail_call_mode,
- ConvertReceiverMode receiver_mode,
+void BytecodeGraphBuilder::BuildCall(ConvertReceiverMode receiver_mode,
Node* const* args, size_t arg_count,
int slot_id) {
DCHECK_EQ(interpreter::Bytecodes::GetReceiverMode(
@@ -1372,14 +1371,20 @@ void BytecodeGraphBuilder::BuildCall(TailCallMode tail_call_mode,
VectorSlotPair feedback = CreateVectorSlotPair(slot_id);
CallFrequency frequency = ComputeCallFrequency(slot_id);
- const Operator* call = javascript()->Call(arg_count, frequency, feedback,
- receiver_mode, tail_call_mode);
- Node* value = ProcessCallArguments(call, args, static_cast<int>(arg_count));
- environment()->BindAccumulator(value, Environment::kAttachFrameState);
+ const Operator* op =
+ javascript()->Call(arg_count, frequency, feedback, receiver_mode);
+ Node* node = nullptr;
+ if (Node* simplified = TryBuildSimplifiedCall(
+ op, args, static_cast<int>(arg_count), feedback.slot())) {
+ if (environment() == nullptr) return;
+ node = simplified;
+ } else {
+ node = ProcessCallArguments(op, args, static_cast<int>(arg_count));
+ }
+ environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::BuildCallVarArgs(TailCallMode tail_call_mode,
- ConvertReceiverMode receiver_mode) {
+void BytecodeGraphBuilder::BuildCallVarArgs(ConvertReceiverMode receiver_mode) {
DCHECK_EQ(interpreter::Bytecodes::GetReceiverMode(
bytecode_iterator().current_bytecode()),
receiver_mode);
@@ -1410,17 +1415,16 @@ void BytecodeGraphBuilder::BuildCallVarArgs(TailCallMode tail_call_mode,
Node* const* call_args =
GetCallArgumentsFromRegister(callee, receiver_node, first_arg, arg_count);
- BuildCall(tail_call_mode, receiver_mode, call_args,
- static_cast<size_t>(2 + arg_count), slot_id);
+ BuildCall(receiver_mode, call_args, static_cast<size_t>(2 + arg_count),
+ slot_id);
}
void BytecodeGraphBuilder::VisitCallAnyReceiver() {
- BuildCallVarArgs(TailCallMode::kDisallow, ConvertReceiverMode::kAny);
+ BuildCallVarArgs(ConvertReceiverMode::kAny);
}
void BytecodeGraphBuilder::VisitCallProperty() {
- BuildCallVarArgs(TailCallMode::kDisallow,
- ConvertReceiverMode::kNotNullOrUndefined);
+ BuildCallVarArgs(ConvertReceiverMode::kNotNullOrUndefined);
}
void BytecodeGraphBuilder::VisitCallProperty0() {
@@ -1429,8 +1433,8 @@ void BytecodeGraphBuilder::VisitCallProperty0() {
Node* receiver =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
int const slot_id = bytecode_iterator().GetIndexOperand(2);
- BuildCall(TailCallMode::kDisallow, ConvertReceiverMode::kNotNullOrUndefined,
- {callee, receiver}, slot_id);
+ BuildCall(ConvertReceiverMode::kNotNullOrUndefined, {callee, receiver},
+ slot_id);
}
void BytecodeGraphBuilder::VisitCallProperty1() {
@@ -1441,8 +1445,8 @@ void BytecodeGraphBuilder::VisitCallProperty1() {
Node* arg0 =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(2));
int const slot_id = bytecode_iterator().GetIndexOperand(3);
- BuildCall(TailCallMode::kDisallow, ConvertReceiverMode::kNotNullOrUndefined,
- {callee, receiver, arg0}, slot_id);
+ BuildCall(ConvertReceiverMode::kNotNullOrUndefined, {callee, receiver, arg0},
+ slot_id);
}
void BytecodeGraphBuilder::VisitCallProperty2() {
@@ -1455,13 +1459,12 @@ void BytecodeGraphBuilder::VisitCallProperty2() {
Node* arg1 =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(3));
int const slot_id = bytecode_iterator().GetIndexOperand(4);
- BuildCall(TailCallMode::kDisallow, ConvertReceiverMode::kNotNullOrUndefined,
+ BuildCall(ConvertReceiverMode::kNotNullOrUndefined,
{callee, receiver, arg0, arg1}, slot_id);
}
void BytecodeGraphBuilder::VisitCallUndefinedReceiver() {
- BuildCallVarArgs(TailCallMode::kDisallow,
- ConvertReceiverMode::kNullOrUndefined);
+ BuildCallVarArgs(ConvertReceiverMode::kNullOrUndefined);
}
void BytecodeGraphBuilder::VisitCallUndefinedReceiver0() {
@@ -1469,8 +1472,7 @@ void BytecodeGraphBuilder::VisitCallUndefinedReceiver0() {
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
Node* receiver = jsgraph()->UndefinedConstant();
int const slot_id = bytecode_iterator().GetIndexOperand(1);
- BuildCall(TailCallMode::kDisallow, ConvertReceiverMode::kNullOrUndefined,
- {callee, receiver}, slot_id);
+ BuildCall(ConvertReceiverMode::kNullOrUndefined, {callee, receiver}, slot_id);
}
void BytecodeGraphBuilder::VisitCallUndefinedReceiver1() {
@@ -1480,8 +1482,8 @@ void BytecodeGraphBuilder::VisitCallUndefinedReceiver1() {
Node* arg0 =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
int const slot_id = bytecode_iterator().GetIndexOperand(2);
- BuildCall(TailCallMode::kDisallow, ConvertReceiverMode::kNullOrUndefined,
- {callee, receiver, arg0}, slot_id);
+ BuildCall(ConvertReceiverMode::kNullOrUndefined, {callee, receiver, arg0},
+ slot_id);
}
void BytecodeGraphBuilder::VisitCallUndefinedReceiver2() {
@@ -1493,7 +1495,7 @@ void BytecodeGraphBuilder::VisitCallUndefinedReceiver2() {
Node* arg1 =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(2));
int const slot_id = bytecode_iterator().GetIndexOperand(3);
- BuildCall(TailCallMode::kDisallow, ConvertReceiverMode::kNullOrUndefined,
+ BuildCall(ConvertReceiverMode::kNullOrUndefined,
{callee, receiver, arg0, arg1}, slot_id);
}
@@ -1510,14 +1512,6 @@ void BytecodeGraphBuilder::VisitCallWithSpread() {
environment()->BindAccumulator(value, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::VisitTailCall() {
- TailCallMode tail_call_mode =
- bytecode_array_->GetIsolate()->is_tail_call_elimination_enabled()
- ? TailCallMode::kAllow
- : TailCallMode::kDisallow;
- BuildCallVarArgs(tail_call_mode, ConvertReceiverMode::kAny);
-}
-
void BytecodeGraphBuilder::VisitCallJSRuntime() {
PrepareEagerCheckpoint();
Node* callee =
@@ -1574,28 +1568,63 @@ void BytecodeGraphBuilder::VisitCallRuntimeForPair() {
Environment::kAttachFrameState);
}
-Node* BytecodeGraphBuilder::ProcessConstructWithSpreadArguments(
- const Operator* op, Node* callee, Node* new_target,
- interpreter::Register receiver, size_t reg_count) {
- int arg_count = static_cast<int>(reg_count);
+Node* const* BytecodeGraphBuilder::GetConstructArgumentsFromRegister(
+ Node* target, Node* new_target, interpreter::Register first_arg,
+ int arg_count) {
// arity is args + callee and new target.
int arity = arg_count + 2;
Node** all = local_zone()->NewArray<Node*>(static_cast<size_t>(arity));
- all[0] = callee;
- int first_arg_index = receiver.index();
+ all[0] = target;
+ int first_arg_index = first_arg.index();
for (int i = 0; i < arg_count; ++i) {
all[1 + i] = environment()->LookupRegister(
interpreter::Register(first_arg_index + i));
}
all[arity - 1] = new_target;
- Node* value = MakeNode(op, arity, all, false);
- return value;
+ return all;
+}
+
+Node* BytecodeGraphBuilder::ProcessConstructArguments(const Operator* op,
+ Node* const* args,
+ int arg_count) {
+ return MakeNode(op, arg_count, args, false);
+}
+
+void BytecodeGraphBuilder::VisitConstruct() {
+ PrepareEagerCheckpoint();
+ interpreter::Register callee_reg = bytecode_iterator().GetRegisterOperand(0);
+ interpreter::Register first_reg = bytecode_iterator().GetRegisterOperand(1);
+ size_t reg_count = bytecode_iterator().GetRegisterCountOperand(2);
+ // Slot index of 0 is used indicate no feedback slot is available. Assert
+ // the assumption that slot index 0 is never a valid feedback slot.
+ STATIC_ASSERT(FeedbackVector::kReservedIndexCount > 0);
+ int const slot_id = bytecode_iterator().GetIndexOperand(3);
+ VectorSlotPair feedback = CreateVectorSlotPair(slot_id);
+
+ Node* new_target = environment()->LookupAccumulator();
+ Node* callee = environment()->LookupRegister(callee_reg);
+
+ CallFrequency frequency = ComputeCallFrequency(slot_id);
+ const Operator* op = javascript()->Construct(
+ static_cast<uint32_t>(reg_count + 2), frequency, feedback);
+ int arg_count = static_cast<int>(reg_count);
+ Node* const* args = GetConstructArgumentsFromRegister(callee, new_target,
+ first_reg, arg_count);
+ Node* node = nullptr;
+ if (Node* simplified = TryBuildSimplifiedConstruct(
+ op, args, static_cast<int>(arg_count), feedback.slot())) {
+ if (environment() == nullptr) return;
+ node = simplified;
+ } else {
+ node = ProcessConstructArguments(op, args, 2 + arg_count);
+ }
+ environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitConstructWithSpread() {
PrepareEagerCheckpoint();
interpreter::Register callee_reg = bytecode_iterator().GetRegisterOperand(0);
- interpreter::Register receiver = bytecode_iterator().GetRegisterOperand(1);
+ interpreter::Register first_reg = bytecode_iterator().GetRegisterOperand(1);
size_t reg_count = bytecode_iterator().GetRegisterCountOperand(2);
Node* new_target = environment()->LookupAccumulator();
@@ -1603,8 +1632,10 @@ void BytecodeGraphBuilder::VisitConstructWithSpread() {
const Operator* op =
javascript()->ConstructWithSpread(static_cast<uint32_t>(reg_count + 2));
- Node* value = ProcessConstructWithSpreadArguments(op, callee, new_target,
- receiver, reg_count);
+ int arg_count = static_cast<int>(reg_count);
+ Node* const* args = GetConstructArgumentsFromRegister(callee, new_target,
+ first_reg, arg_count);
+ Node* value = ProcessConstructArguments(op, args, 2 + arg_count);
environment()->BindAccumulator(value, Environment::kAttachFrameState);
}
@@ -1621,46 +1652,6 @@ void BytecodeGraphBuilder::VisitInvokeIntrinsic() {
environment()->BindAccumulator(value, Environment::kAttachFrameState);
}
-Node* BytecodeGraphBuilder::ProcessConstructArguments(
- const Operator* call_new_op, Node* callee, Node* new_target,
- interpreter::Register receiver, size_t reg_count) {
- int arg_count = static_cast<int>(reg_count);
- // arity is args + callee and new target.
- int arity = arg_count + 2;
- Node** all = local_zone()->NewArray<Node*>(static_cast<size_t>(arity));
- all[0] = callee;
- int first_arg_index = receiver.index();
- for (int i = 0; i < arg_count; ++i) {
- all[1 + i] = environment()->LookupRegister(
- interpreter::Register(first_arg_index + i));
- }
- all[arity - 1] = new_target;
- Node* value = MakeNode(call_new_op, arity, all, false);
- return value;
-}
-
-void BytecodeGraphBuilder::VisitConstruct() {
- PrepareEagerCheckpoint();
- interpreter::Register callee_reg = bytecode_iterator().GetRegisterOperand(0);
- interpreter::Register receiver = bytecode_iterator().GetRegisterOperand(1);
- size_t reg_count = bytecode_iterator().GetRegisterCountOperand(2);
- // Slot index of 0 is used indicate no feedback slot is available. Assert
- // the assumption that slot index 0 is never a valid feedback slot.
- STATIC_ASSERT(FeedbackVector::kReservedIndexCount > 0);
- int const slot_id = bytecode_iterator().GetIndexOperand(3);
- VectorSlotPair feedback = CreateVectorSlotPair(slot_id);
-
- Node* new_target = environment()->LookupAccumulator();
- Node* callee = environment()->LookupRegister(callee_reg);
-
- CallFrequency frequency = ComputeCallFrequency(slot_id);
- const Operator* call = javascript()->Construct(
- static_cast<uint32_t>(reg_count + 2), frequency, feedback);
- Node* value =
- ProcessConstructArguments(call, callee, new_target, receiver, reg_count);
- environment()->BindAccumulator(value, Environment::kAttachFrameState);
-}
-
void BytecodeGraphBuilder::VisitThrow() {
BuildLoopExitsForFunctionExit();
Node* value = environment()->LookupAccumulator();
@@ -1678,6 +1669,58 @@ void BytecodeGraphBuilder::VisitReThrow() {
MergeControlToLeaveFunction(control);
}
+void BytecodeGraphBuilder::BuildHoleCheckAndThrow(
+ Node* condition, Runtime::FunctionId runtime_id, Node* name) {
+ Node* accumulator = environment()->LookupAccumulator();
+ NewBranch(condition, BranchHint::kFalse);
+ {
+ SubEnvironment sub_environment(this);
+
+ NewIfTrue();
+ Node* node;
+ const Operator* op = javascript()->CallRuntime(runtime_id);
+ if (runtime_id == Runtime::kThrowReferenceError) {
+ DCHECK(name != nullptr);
+ node = NewNode(op, name);
+ } else {
+ DCHECK(runtime_id == Runtime::kThrowSuperAlreadyCalledError ||
+ runtime_id == Runtime::kThrowSuperNotCalled);
+ node = NewNode(op);
+ }
+ environment()->RecordAfterState(node, Environment::kAttachFrameState);
+ Node* control = NewNode(common()->Throw());
+ MergeControlToLeaveFunction(control);
+ }
+ NewIfFalse();
+ environment()->BindAccumulator(accumulator);
+}
+
+void BytecodeGraphBuilder::VisitThrowReferenceErrorIfHole() {
+ Node* accumulator = environment()->LookupAccumulator();
+ Node* check_for_hole = NewNode(simplified()->ReferenceEqual(), accumulator,
+ jsgraph()->TheHoleConstant());
+ Node* name =
+ jsgraph()->Constant(bytecode_iterator().GetConstantForIndexOperand(0));
+ BuildHoleCheckAndThrow(check_for_hole, Runtime::kThrowReferenceError, name);
+}
+
+void BytecodeGraphBuilder::VisitThrowSuperNotCalledIfHole() {
+ Node* accumulator = environment()->LookupAccumulator();
+ Node* check_for_hole = NewNode(simplified()->ReferenceEqual(), accumulator,
+ jsgraph()->TheHoleConstant());
+ BuildHoleCheckAndThrow(check_for_hole, Runtime::kThrowSuperNotCalled);
+}
+
+void BytecodeGraphBuilder::VisitThrowSuperAlreadyCalledIfNotHole() {
+ Node* accumulator = environment()->LookupAccumulator();
+ Node* check_for_hole = NewNode(simplified()->ReferenceEqual(), accumulator,
+ jsgraph()->TheHoleConstant());
+ Node* check_for_not_hole =
+ NewNode(simplified()->BooleanNot(), check_for_hole);
+ BuildHoleCheckAndThrow(check_for_not_hole,
+ Runtime::kThrowSuperAlreadyCalledError);
+}
+
void BytecodeGraphBuilder::BuildBinaryOp(const Operator* op) {
PrepareEagerCheckpoint();
Node* left =
@@ -2091,6 +2134,45 @@ void BytecodeGraphBuilder::VisitToNumber() {
Environment::kAttachFrameState);
}
+void BytecodeGraphBuilder::VisitToPrimitiveToString() {
+ PrepareEagerCheckpoint();
+ Node* object = environment()->LookupAccumulator();
+
+ Node* node = nullptr;
+ FeedbackSlot slot =
+ feedback_vector()->ToSlot(bytecode_iterator().GetIndexOperand(1));
+ if (Node* simplified = TryBuildSimplifiedToPrimitiveToString(object, slot)) {
+ node = simplified;
+ } else {
+ node = NewNode(javascript()->ToPrimitiveToString(), object);
+ }
+
+ environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0), node,
+ Environment::kAttachFrameState);
+}
+
+void BytecodeGraphBuilder::VisitStringConcat() {
+ PrepareEagerCheckpoint();
+ interpreter::Register first_reg = bytecode_iterator().GetRegisterOperand(0);
+ int operand_count =
+ static_cast<int>(bytecode_iterator().GetRegisterCountOperand(1));
+ Node** operands =
+ local_zone()->NewArray<Node*>(static_cast<size_t>(operand_count));
+ int operand_base = first_reg.index();
+ for (int i = 0; i < operand_count; ++i) {
+ Node* reg =
+ environment()->LookupRegister(interpreter::Register(operand_base + i));
+ // Explicitly insert a string check here. All operands are already strings,
+ // however in the case of generator yields in the middle of string
+ // concatenations we might lose the knowledge that the operand is a string.
+ operands[i] = NewNode(simplified()->CheckString(), reg);
+ }
+
+ Node* node = MakeNode(javascript()->StringConcat(operand_count),
+ operand_count, operands, false);
+ environment()->BindAccumulator(node, Environment::kAttachFrameState);
+}
+
void BytecodeGraphBuilder::VisitJump() { BuildJump(); }
void BytecodeGraphBuilder::VisitJumpConstant() { BuildJump(); }
@@ -2119,12 +2201,6 @@ void BytecodeGraphBuilder::VisitJumpIfToBooleanFalseConstant() {
BuildJumpIfToBooleanFalse();
}
-void BytecodeGraphBuilder::VisitJumpIfNotHole() { BuildJumpIfNotHole(); }
-
-void BytecodeGraphBuilder::VisitJumpIfNotHoleConstant() {
- BuildJumpIfNotHole();
-}
-
void BytecodeGraphBuilder::VisitJumpIfJSReceiver() { BuildJumpIfJSReceiver(); }
void BytecodeGraphBuilder::VisitJumpIfJSReceiverConstant() {
@@ -2218,6 +2294,18 @@ void BytecodeGraphBuilder::VisitDebugger() {
DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK);
#undef DEBUG_BREAK
+void BytecodeGraphBuilder::VisitIncBlockCounter() {
+ DCHECK(FLAG_block_coverage);
+
+ Node* closure = GetFunctionClosure();
+ Node* coverage_array_slot =
+ jsgraph()->Constant(bytecode_iterator().GetIndexOperand(0));
+
+ const Operator* op = javascript()->CallRuntime(Runtime::kIncBlockCounter);
+
+ NewNode(op, closure, coverage_array_slot);
+}
+
void BytecodeGraphBuilder::VisitForInPrepare() {
PrepareEagerCheckpoint();
Node* receiver =
@@ -2271,15 +2359,18 @@ void BytecodeGraphBuilder::VisitSuspendGenerator() {
Node* state = environment()->LookupAccumulator();
Node* generator = environment()->LookupRegister(
bytecode_iterator().GetRegisterOperand(0));
- SuspendFlags flags = interpreter::SuspendGeneratorBytecodeFlags::Decode(
- bytecode_iterator().GetFlagOperand(1));
+ interpreter::Register first_reg = bytecode_iterator().GetRegisterOperand(1);
+ // We assume we are storing a range starting from index 0.
+ CHECK_EQ(0, first_reg.index());
+ int register_count =
+ static_cast<int>(bytecode_iterator().GetRegisterCountOperand(2));
+
// The offsets used by the bytecode iterator are relative to a different base
// than what is used in the interpreter, hence the addition.
Node* offset =
jsgraph()->Constant(bytecode_iterator().current_offset() +
(BytecodeArray::kHeaderSize - kHeapObjectTag));
- int register_count = environment()->register_count();
int value_input_count = 3 + register_count;
Node** value_inputs = local_zone()->NewArray<Node*>(value_input_count);
@@ -2291,25 +2382,35 @@ void BytecodeGraphBuilder::VisitSuspendGenerator() {
environment()->LookupRegister(interpreter::Register(i));
}
- MakeNode(javascript()->GeneratorStore(register_count, flags),
- value_input_count, value_inputs, false);
+ MakeNode(javascript()->GeneratorStore(register_count), value_input_count,
+ value_inputs, false);
}
-void BytecodeGraphBuilder::VisitResumeGenerator() {
+void BytecodeGraphBuilder::VisitRestoreGeneratorState() {
Node* generator = environment()->LookupRegister(
bytecode_iterator().GetRegisterOperand(0));
+ Node* state =
+ NewNode(javascript()->GeneratorRestoreContinuation(), generator);
+
+ environment()->BindAccumulator(state, Environment::kAttachFrameState);
+}
+
+void BytecodeGraphBuilder::VisitRestoreGeneratorRegisters() {
+ Node* generator =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ interpreter::Register first_reg = bytecode_iterator().GetRegisterOperand(1);
+ // We assume we are restoring registers starting fromm index 0.
+ CHECK_EQ(0, first_reg.index());
+ int register_count =
+ static_cast<int>(bytecode_iterator().GetRegisterCountOperand(2));
+
// Bijection between registers and array indices must match that used in
// InterpreterAssembler::ExportRegisterFile.
- for (int i = 0; i < environment()->register_count(); ++i) {
+ for (int i = 0; i < register_count; ++i) {
Node* value = NewNode(javascript()->GeneratorRestoreRegister(i), generator);
environment()->BindRegister(interpreter::Register(i), value);
}
-
- Node* state =
- NewNode(javascript()->GeneratorRestoreContinuation(), generator);
-
- environment()->BindAccumulator(state);
}
void BytecodeGraphBuilder::VisitWide() {
@@ -2327,8 +2428,6 @@ void BytecodeGraphBuilder::VisitIllegal() {
UNREACHABLE();
}
-void BytecodeGraphBuilder::VisitNop() {}
-
void BytecodeGraphBuilder::SwitchToMergeEnvironment(int current_offset) {
auto it = merge_environments_.find(current_offset);
if (it != merge_environments_.end()) {
@@ -2539,6 +2638,58 @@ Node* BytecodeGraphBuilder::TryBuildSimplifiedToNumber(Node* value,
return nullptr;
}
+Node* BytecodeGraphBuilder::TryBuildSimplifiedToPrimitiveToString(
+ Node* value, FeedbackSlot slot) {
+ Node* effect = environment()->GetEffectDependency();
+ Node* control = environment()->GetControlDependency();
+ Reduction early_reduction =
+ type_hint_lowering().ReduceToPrimitiveToStringOperation(value, effect,
+ control, slot);
+ if (early_reduction.Changed()) {
+ ApplyEarlyReduction(early_reduction);
+ return early_reduction.replacement();
+ }
+ return nullptr;
+}
+
+Node* BytecodeGraphBuilder::TryBuildSimplifiedCall(const Operator* op,
+ Node* const* args,
+ int arg_count,
+ FeedbackSlot slot) {
+ // TODO(mstarzinger,6112): This is a workaround for OSR loop entries being
+ // pruned from the graph by a soft-deopt. It can happen that a CallIC that
+ // control-dominates the OSR entry is still in "uninitialized" state.
+ if (!osr_ast_id_.IsNone()) return nullptr;
+ Node* effect = environment()->GetEffectDependency();
+ Node* control = environment()->GetControlDependency();
+ Reduction early_reduction = type_hint_lowering().ReduceCallOperation(
+ op, args, arg_count, effect, control, slot);
+ if (early_reduction.Changed()) {
+ ApplyEarlyReduction(early_reduction);
+ return early_reduction.replacement();
+ }
+ return nullptr;
+}
+
+Node* BytecodeGraphBuilder::TryBuildSimplifiedConstruct(const Operator* op,
+ Node* const* args,
+ int arg_count,
+ FeedbackSlot slot) {
+ // TODO(mstarzinger,6112): This is a workaround for OSR loop entries being
+ // pruned from the graph by a soft-deopt. It can happen that a CallIC that
+ // control-dominates the OSR entry is still in "uninitialized" state.
+ if (!osr_ast_id_.IsNone()) return nullptr;
+ Node* effect = environment()->GetEffectDependency();
+ Node* control = environment()->GetControlDependency();
+ Reduction early_reduction = type_hint_lowering().ReduceConstructOperation(
+ op, args, arg_count, effect, control, slot);
+ if (early_reduction.Changed()) {
+ ApplyEarlyReduction(early_reduction);
+ return early_reduction.replacement();
+ }
+ return nullptr;
+}
+
Node* BytecodeGraphBuilder::TryBuildSimplifiedLoadNamed(const Operator* op,
Node* receiver,
FeedbackSlot slot) {
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index b963c6a197..52d84b0ddc 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -8,7 +8,6 @@
#include "src/compiler/bytecode-analysis.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-type-hint-lowering.h"
-#include "src/compiler/liveness-analyzer.h"
#include "src/compiler/state-values-utils.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-flags.h"
@@ -35,7 +34,7 @@ class BytecodeGraphBuilder {
JSTypeHintLowering::Flags flags = JSTypeHintLowering::kNoFlags);
// Creates a graph by visiting bytecodes.
- bool CreateGraph(bool stack_check = true);
+ void CreateGraph(bool stack_check = true);
private:
class Environment;
@@ -125,14 +124,11 @@ class BytecodeGraphBuilder {
int arg_count);
Node* ProcessCallArguments(const Operator* call_op, Node* callee,
interpreter::Register receiver, size_t reg_count);
- Node* ProcessConstructArguments(const Operator* call_new_op, Node* callee,
- Node* new_target,
- interpreter::Register receiver,
- size_t reg_count);
- Node* ProcessConstructWithSpreadArguments(const Operator* op, Node* callee,
- Node* new_target,
- interpreter::Register receiver,
- size_t reg_count);
+ Node* const* GetConstructArgumentsFromRegister(
+ Node* target, Node* new_target, interpreter::Register first_arg,
+ int arg_count);
+ Node* ProcessConstructArguments(const Operator* op, Node* const* args,
+ int arg_count);
Node* ProcessCallRuntimeArguments(const Operator* call_runtime_op,
interpreter::Register receiver,
size_t reg_count);
@@ -163,15 +159,12 @@ class BytecodeGraphBuilder {
void BuildLdaLookupSlot(TypeofMode typeof_mode);
void BuildLdaLookupContextSlot(TypeofMode typeof_mode);
void BuildLdaLookupGlobalSlot(TypeofMode typeof_mode);
- void BuildStaLookupSlot(LanguageMode language_mode);
- void BuildCallVarArgs(TailCallMode tail_call_mode,
- ConvertReceiverMode receiver_mode);
- void BuildCall(TailCallMode tail_call_mode, ConvertReceiverMode receiver_mode,
- Node* const* args, size_t arg_count, int slot_id);
- void BuildCall(TailCallMode tail_call_mode, ConvertReceiverMode receiver_mode,
+ void BuildCallVarArgs(ConvertReceiverMode receiver_mode);
+ void BuildCall(ConvertReceiverMode receiver_mode, Node* const* args,
+ size_t arg_count, int slot_id);
+ void BuildCall(ConvertReceiverMode receiver_mode,
std::initializer_list<Node*> args, int slot_id) {
- BuildCall(tail_call_mode, receiver_mode, args.begin(), args.size(),
- slot_id);
+ BuildCall(receiver_mode, args.begin(), args.size(), slot_id);
}
void BuildBinaryOp(const Operator* op);
void BuildBinaryOpWithImmediate(const Operator* op);
@@ -179,6 +172,8 @@ class BytecodeGraphBuilder {
void BuildTestingOp(const Operator* op);
void BuildDelete(LanguageMode language_mode);
void BuildCastOperator(const Operator* op);
+ void BuildHoleCheckAndThrow(Node* condition, Runtime::FunctionId runtime_id,
+ Node* name = nullptr);
// Optional early lowering to the simplified operator level. Returns the node
// representing the lowered operation or {nullptr} if no lowering available.
@@ -187,6 +182,11 @@ class BytecodeGraphBuilder {
Node* TryBuildSimplifiedBinaryOp(const Operator* op, Node* left, Node* right,
FeedbackSlot slot);
Node* TryBuildSimplifiedToNumber(Node* input, FeedbackSlot slot);
+ Node* TryBuildSimplifiedToPrimitiveToString(Node* input, FeedbackSlot slot);
+ Node* TryBuildSimplifiedCall(const Operator* op, Node* const* args,
+ int arg_count, FeedbackSlot slot);
+ Node* TryBuildSimplifiedConstruct(const Operator* op, Node* const* args,
+ int arg_count, FeedbackSlot slot);
Node* TryBuildSimplifiedLoadNamed(const Operator* op, Node* receiver,
FeedbackSlot slot);
Node* TryBuildSimplifiedLoadKeyed(const Operator* op, Node* receiver,
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
index d8fc12624d..16a7ce8908 100644
--- a/deps/v8/src/compiler/c-linkage.cc
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -50,12 +50,6 @@ LinkageLocation regloc(Register reg, MachineType type) {
rbx.bit() | r12.bit() | r13.bit() | r14.bit() | r15.bit()
#endif
-#elif V8_TARGET_ARCH_X87
-// ===========================================================================
-// == x87 ====================================================================
-// ===========================================================================
-#define CALLEE_SAVE_REGISTERS esi.bit() | edi.bit() | ebx.bit()
-
#elif V8_TARGET_ARCH_ARM
// ===========================================================================
// == arm ====================================================================
@@ -161,7 +155,7 @@ CallDescriptor* Linkage::GetSimplifiedCDescriptor(
msig->parameter_count());
// Check the types of the signature.
// Currently no floating point parameters or returns are allowed because
- // on x87 and ia32, the FP top of stack is involved.
+ // on ia32, the FP top of stack is involved.
for (size_t i = 0; i < msig->return_count(); i++) {
MachineRepresentation rep = msig->GetReturn(i).representation();
CHECK_NE(MachineRepresentation::kFloat32, rep);
diff --git a/deps/v8/src/compiler/check-elimination.cc b/deps/v8/src/compiler/check-elimination.cc
new file mode 100644
index 0000000000..7e7fdd57b5
--- /dev/null
+++ b/deps/v8/src/compiler/check-elimination.cc
@@ -0,0 +1,76 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/check-elimination.h"
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+#include "src/objects-inl.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Reduction CheckElimination::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kCheckHeapObject:
+ return ReduceCheckHeapObject(node);
+ case IrOpcode::kCheckString:
+ return ReduceCheckString(node);
+ case IrOpcode::kCheckSeqString:
+ return ReduceCheckSeqString(node);
+ case IrOpcode::kCheckNonEmptyString:
+ return ReduceCheckNonEmptyString(node);
+ default:
+ break;
+ }
+ return NoChange();
+}
+
+Reduction CheckElimination::ReduceCheckHeapObject(Node* node) {
+ Node* const input = NodeProperties::GetValueInput(node, 0);
+ HeapObjectMatcher m(input);
+ if (m.HasValue()) {
+ ReplaceWithValue(node, input);
+ return Replace(input);
+ }
+ return NoChange();
+}
+
+Reduction CheckElimination::ReduceCheckString(Node* node) {
+ Node* const input = NodeProperties::GetValueInput(node, 0);
+ HeapObjectMatcher m(input);
+ if (m.HasValue() && m.Value()->IsString()) {
+ ReplaceWithValue(node, input);
+ return Replace(input);
+ }
+ return NoChange();
+}
+
+Reduction CheckElimination::ReduceCheckSeqString(Node* node) {
+ Node* const input = NodeProperties::GetValueInput(node, 0);
+ HeapObjectMatcher m(input);
+ if (m.HasValue() && m.Value()->IsSeqString()) {
+ ReplaceWithValue(node, input);
+ return Replace(input);
+ }
+ return NoChange();
+}
+
+Reduction CheckElimination::ReduceCheckNonEmptyString(Node* node) {
+ Node* const input = NodeProperties::GetValueInput(node, 0);
+ HeapObjectMatcher m(input);
+ if (m.HasValue() && m.Value()->IsString() &&
+ node != jsgraph()->EmptyStringConstant()) {
+ ReplaceWithValue(node, input);
+ return Replace(input);
+ }
+ return NoChange();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/check-elimination.h b/deps/v8/src/compiler/check-elimination.h
new file mode 100644
index 0000000000..2854def848
--- /dev/null
+++ b/deps/v8/src/compiler/check-elimination.h
@@ -0,0 +1,46 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CHECK_ELIMINATION_H_
+#define V8_COMPILER_CHECK_ELIMINATION_H_
+
+#include "src/base/compiler-specific.h"
+#include "src/compiler/graph-reducer.h"
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class JSGraph;
+
+// Performs elimination of redundant checks within the graph due to inlined
+// constants.
+class V8_EXPORT_PRIVATE CheckElimination final
+ : public NON_EXPORTED_BASE(AdvancedReducer) {
+ public:
+ explicit CheckElimination(Editor* editor, JSGraph* jsgraph)
+ : AdvancedReducer(editor), jsgraph_(jsgraph) {}
+ ~CheckElimination() final {}
+
+ const char* reducer_name() const override { return "CheckElimination"; }
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ Reduction ReduceCheckHeapObject(Node* node);
+ Reduction ReduceCheckString(Node* node);
+ Reduction ReduceCheckSeqString(Node* node);
+ Reduction ReduceCheckNonEmptyString(Node* node);
+
+ JSGraph* jsgraph() const { return jsgraph_; }
+
+ JSGraph* jsgraph_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_CHECK_ELIMINATION_H_
diff --git a/deps/v8/src/compiler/checkpoint-elimination.h b/deps/v8/src/compiler/checkpoint-elimination.h
index f30eec0f55..87f14c27a6 100644
--- a/deps/v8/src/compiler/checkpoint-elimination.h
+++ b/deps/v8/src/compiler/checkpoint-elimination.h
@@ -20,6 +20,8 @@ class V8_EXPORT_PRIVATE CheckpointElimination final
explicit CheckpointElimination(Editor* editor);
~CheckpointElimination() final {}
+ const char* reducer_name() const override { return "CheckpointElimination"; }
+
Reduction Reduce(Node* node) final;
private:
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index 19bb76b125..a1cefa1123 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -221,7 +221,7 @@ Node* CodeAssembler::HeapConstant(Handle<HeapObject> object) {
return raw_assembler()->HeapConstant(object);
}
-Node* CodeAssembler::CStringConstant(const char* str) {
+Node* CodeAssembler::StringConstant(const char* str) {
return HeapConstant(factory()->NewStringFromAsciiChecked(str, TENURED));
}
@@ -554,10 +554,16 @@ Node* CodeAssembler::Projection(int index, Node* value) {
void CodeAssembler::GotoIfException(Node* node, Label* if_exception,
Variable* exception_var) {
+ DCHECK(!node->op()->HasProperty(Operator::kNoThrow));
+
+ if (if_exception == nullptr) {
+ // If no handler is supplied, don't add continuations
+ return;
+ }
+
Label success(this), exception(this, Label::kDeferred);
success.MergeVariables();
exception.MergeVariables();
- DCHECK(!node->op()->HasProperty(Operator::kNoThrow));
raw_assembler()->Continuations(node, success.label_, exception.label_);
@@ -620,6 +626,22 @@ Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function,
return raw_assembler()->TailCallN(desc, arraysize(nodes), nodes);
}
+Node* CodeAssembler::TailCallRuntimeN(Runtime::FunctionId function,
+ Node* context, Node* argc) {
+ CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ zone(), function, 0, Operator::kNoProperties,
+ CallDescriptor::kSupportsTailCalls);
+ int return_count = static_cast<int>(desc->ReturnCount());
+
+ Node* centry =
+ HeapConstant(CodeFactory::RuntimeCEntry(isolate(), return_count));
+ Node* ref = ExternalConstant(ExternalReference(function, isolate()));
+
+ Node* nodes[] = {centry, ref, argc, context};
+
+ return raw_assembler()->TailCallN(desc, arraysize(nodes), nodes);
+}
+
// Instantiate TailCallRuntime() for argument counts used by CSA-generated code
#define INSTANTIATE(...) \
template V8_EXPORT_PRIVATE Node* CodeAssembler::TailCallRuntime( \
@@ -1037,13 +1059,25 @@ void CodeAssemblerLabel::UpdateVariablesAfterBind() {
for (auto var : variable_phis_) {
CodeAssemblerVariable::Impl* var_impl = var.first;
auto i = variable_merges_.find(var_impl);
- // If the following asserts fire, then a variable that has been marked as
- // being merged at the label--either by explicitly marking it so in the
- // label constructor or by having seen different bound values at branches
- // into the label--doesn't have a bound value along all of the paths that
- // have been merged into the label up to this point.
- DCHECK(i != variable_merges_.end());
- DCHECK_EQ(i->second.size(), merge_count_);
+#if DEBUG
+ bool not_found = i == variable_merges_.end();
+ if (not_found || i->second.size() != merge_count_) {
+ std::stringstream str;
+ str << "A variable that has been marked as beeing merged at the label"
+ << "\n# doesn't have a bound value along all of the paths that "
+ << "\n# have been merged into the label up to this point."
+ << "\n#"
+ << "\n# This can happen in the following cases:"
+ << "\n# - By explicitly marking it so in the label constructor"
+ << "\n# - By having seen different bound values at branches"
+ << "\n#"
+ << "\n# Merge count: expected=" << merge_count_
+ << " vs. found=" << (not_found ? 0 : i->second.size())
+ << "\n# Variable: " << *var_impl
+ << "\n# Current Block: " << *label_->block();
+ FATAL(str.str().c_str());
+ }
+#endif // DEBUG
Node* phi = state_->raw_assembler_->Phi(
var.first->rep_, static_cast<int>(merge_count_), &(i->second[0]));
variable_phis_[var_impl] = phi;
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 1f2e4d8f4f..039668ebcf 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -225,7 +225,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Node* SmiConstant(Smi* value);
Node* SmiConstant(int value);
Node* HeapConstant(Handle<HeapObject> object);
- Node* CStringConstant(const char* str);
+ Node* StringConstant(const char* str);
Node* BooleanConstant(bool value);
Node* ExternalConstant(ExternalReference address);
Node* Float64Constant(double value);
@@ -351,6 +351,11 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Node* TailCallRuntime(Runtime::FunctionId function, Node* context,
TArgs... args);
+ // Tail call into the runtime passing the same |argc| stack arguments that we
+ // were called with.
+ Node* TailCallRuntimeN(Runtime::FunctionId function, Node* context,
+ Node* argc);
+
template <class... TArgs>
Node* CallStub(Callable const& callable, Node* context, TArgs... args) {
Node* target = HeapConstant(callable.code());
diff --git a/deps/v8/src/compiler/code-generator-impl.h b/deps/v8/src/compiler/code-generator-impl.h
index 7f09b8524e..c6d3174d8c 100644
--- a/deps/v8/src/compiler/code-generator-impl.h
+++ b/deps/v8/src/compiler/code-generator-impl.h
@@ -87,8 +87,8 @@ class InstructionOperandConverter {
return ToExternalReference(instr_->InputAt(index));
}
- Handle<HeapObject> InputHeapObject(size_t index) {
- return ToHeapObject(instr_->InputAt(index));
+ Handle<Code> InputCode(size_t index) {
+ return ToCode(instr_->InputAt(index));
}
Label* InputLabel(size_t index) { return ToLabel(instr_->InputAt(index)); }
@@ -151,7 +151,9 @@ class InstructionOperandConverter {
ConstantOperand::cast(op)->virtual_register());
}
- double ToDouble(InstructionOperand* op) { return ToConstant(op).ToFloat64(); }
+ double ToDouble(InstructionOperand* op) {
+ return ToConstant(op).ToFloat64().value();
+ }
float ToFloat32(InstructionOperand* op) { return ToConstant(op).ToFloat32(); }
@@ -159,8 +161,8 @@ class InstructionOperandConverter {
return ToConstant(op).ToExternalReference();
}
- Handle<HeapObject> ToHeapObject(InstructionOperand* op) {
- return ToConstant(op).ToHeapObject();
+ Handle<Code> ToCode(InstructionOperand* op) {
+ return ToConstant(op).ToCode();
}
const Frame* frame() const { return gen_->frame(); }
@@ -202,15 +204,14 @@ class OutOfLineCode : public ZoneObject {
Label* entry() { return &entry_; }
Label* exit() { return &exit_; }
const Frame* frame() const { return frame_; }
- Isolate* isolate() const { return masm()->isolate(); }
- MacroAssembler* masm() const { return masm_; }
+ TurboAssembler* tasm() { return tasm_; }
OutOfLineCode* next() const { return next_; }
private:
Label entry_;
Label exit_;
const Frame* const frame_;
- MacroAssembler* const masm_;
+ TurboAssembler* const tasm_;
OutOfLineCode* const next_;
};
diff --git a/deps/v8/src/compiler/code-generator.cc b/deps/v8/src/compiler/code-generator.cc
index 66232aa06f..f09cd73a15 100644
--- a/deps/v8/src/compiler/code-generator.cc
+++ b/deps/v8/src/compiler/code-generator.cc
@@ -35,44 +35,50 @@ class CodeGenerator::JumpTable final : public ZoneObject {
size_t const target_count_;
};
-CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
- InstructionSequence* code, CompilationInfo* info)
- : frame_access_state_(nullptr),
+CodeGenerator::CodeGenerator(Zone* codegen_zone, Frame* frame, Linkage* linkage,
+ InstructionSequence* code, CompilationInfo* info,
+ base::Optional<OsrHelper> osr_helper,
+ int start_source_position)
+ : zone_(codegen_zone),
+ frame_access_state_(nullptr),
linkage_(linkage),
code_(code),
unwinding_info_writer_(zone()),
info_(info),
labels_(zone()->NewArray<Label>(code->InstructionBlockCount())),
current_block_(RpoNumber::Invalid()),
+ start_source_position_(start_source_position),
current_source_position_(SourcePosition::Unknown()),
- masm_(info->isolate(), nullptr, 0, CodeObjectRequired::kNo),
+ tasm_(info->isolate(), nullptr, 0, CodeObjectRequired::kNo),
resolver_(this),
- safepoints_(code->zone()),
- handlers_(code->zone()),
- deoptimization_exits_(code->zone()),
- deoptimization_states_(code->zone()),
- deoptimization_literals_(code->zone()),
+ safepoints_(zone()),
+ handlers_(zone()),
+ deoptimization_exits_(zone()),
+ deoptimization_states_(zone()),
+ deoptimization_literals_(zone()),
inlined_function_count_(0),
- translations_(code->zone()),
+ translations_(zone()),
last_lazy_deopt_pc_(0),
jump_tables_(nullptr),
ools_(nullptr),
+ osr_helper_(osr_helper),
osr_pc_offset_(-1),
optimized_out_literal_id_(-1),
- source_position_table_builder_(code->zone(),
+ source_position_table_builder_(zone(),
info->SourcePositionRecordingMode()),
result_(kSuccess) {
for (int i = 0; i < code->InstructionBlockCount(); ++i) {
new (&labels_[i]) Label;
}
CreateFrameAccessState(frame);
+ CHECK_EQ(info->is_osr(), osr_helper_.has_value());
}
Isolate* CodeGenerator::isolate() const { return info_->isolate(); }
void CodeGenerator::CreateFrameAccessState(Frame* frame) {
FinishFrame(frame);
- frame_access_state_ = new (code()->zone()) FrameAccessState(frame);
+ frame_access_state_ = new (zone()) FrameAccessState(frame);
}
void CodeGenerator::AssembleCode() {
@@ -81,19 +87,18 @@ void CodeGenerator::AssembleCode() {
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done in AssemblePrologue).
- FrameScope frame_scope(masm(), StackFrame::MANUAL);
+ FrameScope frame_scope(tasm(), StackFrame::MANUAL);
if (info->is_source_positions_enabled()) {
- SourcePosition source_position(info->shared_info()->start_position());
- AssembleSourcePosition(source_position);
+ AssembleSourcePosition(start_source_position());
}
// Place function entry hook if requested to do so.
if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
- ProfileEntryHookStub::MaybeCallEntryHook(masm());
+ ProfileEntryHookStub::MaybeCallEntryHookDelayed(tasm(), zone());
}
// Architecture-specific, linkage-specific prologue.
- info->set_prologue_offset(masm()->pc_offset());
+ info->set_prologue_offset(tasm()->pc_offset());
// Define deoptimization literals for all inlined functions.
DCHECK_EQ(0u, deoptimization_literals_.size());
@@ -107,16 +112,6 @@ void CodeGenerator::AssembleCode() {
}
inlined_function_count_ = deoptimization_literals_.size();
- // Define deoptimization literals for all unoptimized code objects of inlined
- // functions. This ensures unoptimized code is kept alive by optimized code.
- for (const CompilationInfo::InlinedFunctionHolder& inlined :
- info->inlined_functions()) {
- if (!inlined.shared_info.equals(info->shared_info())) {
- DefineDeoptimizationLiteral(
- DeoptimizationLiteral(inlined.inlined_code_object_root));
- }
- }
-
unwinding_info_writer_.SetNumberOfInstructionBlocks(
code()->InstructionBlockCount());
@@ -127,12 +122,12 @@ void CodeGenerator::AssembleCode() {
continue;
}
// Align loop headers on 16-byte boundaries.
- if (block->IsLoopHeader()) masm()->Align(16);
+ if (block->IsLoopHeader()) tasm()->Align(16);
// Ensure lazy deopt doesn't patch handler entry points.
if (block->IsHandler()) EnsureSpaceForLazyDeopt();
// Bind a label for a block.
current_block_ = block->rpo_number();
- unwinding_info_writer_.BeginInstructionBlock(masm()->pc_offset(), block);
+ unwinding_info_writer_.BeginInstructionBlock(tasm()->pc_offset(), block);
if (FLAG_code_comments) {
// TODO(titzer): these code comments are a giant memory leak.
Vector<char> buffer = Vector<char>::New(200);
@@ -158,12 +153,12 @@ void CodeGenerator::AssembleCode() {
buffer = buffer.SubVector(next, buffer.length());
}
SNPrintF(buffer, " --");
- masm()->RecordComment(buffer_start);
+ tasm()->RecordComment(buffer_start);
}
frame_access_state()->MarkHasFrame(block->needs_frame());
- masm()->bind(GetLabel(current_block_));
+ tasm()->bind(GetLabel(current_block_));
if (block->must_construct_frame()) {
AssembleConstructFrame();
// We need to setup the root register after we assemble the prologue, to
@@ -171,12 +166,12 @@ void CodeGenerator::AssembleCode() {
// using the roots.
// TODO(mtrofin): investigate how we can avoid doing this repeatedly.
if (linkage()->GetIncomingDescriptor()->InitializeRootRegister()) {
- masm()->InitializeRootRegister();
+ tasm()->InitializeRootRegister();
}
}
if (FLAG_enable_embedded_constant_pool && !block->needs_frame()) {
- ConstantPoolUnavailableScope constant_pool_unavailable(masm());
+ ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
result_ = AssembleBlock(block);
} else {
result_ = AssembleBlock(block);
@@ -188,25 +183,29 @@ void CodeGenerator::AssembleCode() {
// Assemble all out-of-line code.
if (ools_) {
- masm()->RecordComment("-- Out of line code --");
+ tasm()->RecordComment("-- Out of line code --");
for (OutOfLineCode* ool = ools_; ool; ool = ool->next()) {
- masm()->bind(ool->entry());
+ tasm()->bind(ool->entry());
ool->Generate();
- if (ool->exit()->is_bound()) masm()->jmp(ool->exit());
+ if (ool->exit()->is_bound()) tasm()->jmp(ool->exit());
}
}
// Assemble all eager deoptimization exits.
for (DeoptimizationExit* exit : deoptimization_exits_) {
- masm()->bind(exit->label());
- AssembleDeoptimizerCall(exit->deoptimization_id(), exit->pos());
+ tasm()->bind(exit->label());
+ int trampoline_pc = tasm()->pc_offset();
+ int deoptimization_id = exit->deoptimization_id();
+ DeoptimizationState* ds = deoptimization_states_[deoptimization_id];
+ ds->set_trampoline_pc(trampoline_pc);
+ AssembleDeoptimizerCall(deoptimization_id, exit->pos());
}
// Ensure there is space for lazy deoptimization in the code.
if (info->ShouldEnsureSpaceForLazyDeopt()) {
- int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
- while (masm()->pc_offset() < target_offset) {
- masm()->nop();
+ int target_offset = tasm()->pc_offset() + Deoptimizer::patch_size();
+ while (tasm()->pc_offset() < target_offset) {
+ tasm()->nop();
}
}
@@ -214,9 +213,9 @@ void CodeGenerator::AssembleCode() {
// Emit the jump tables.
if (jump_tables_) {
- masm()->Align(kPointerSize);
+ tasm()->Align(kPointerSize);
for (JumpTable* table = jump_tables_; table; table = table->next()) {
- masm()->bind(table->label());
+ tasm()->bind(table->label());
AssembleJumpTable(table->targets(), table->target_count());
}
}
@@ -224,9 +223,9 @@ void CodeGenerator::AssembleCode() {
// The PerfJitLogger logs code up until here, excluding the safepoint
// table. Resolve the unwinding info now so it is aware of the same code size
// as reported by perf.
- unwinding_info_writer_.Finish(masm()->pc_offset());
+ unwinding_info_writer_.Finish(tasm()->pc_offset());
- safepoints()->Emit(masm(), frame()->GetTotalFrameSlotCount());
+ safepoints()->Emit(tasm(), frame()->GetTotalFrameSlotCount());
result_ = kSuccess;
}
@@ -234,7 +233,7 @@ Handle<Code> CodeGenerator::FinalizeCode() {
if (result_ != kSuccess) return Handle<Code>();
Handle<Code> result = v8::internal::CodeGenerator::MakeCodeEpilogue(
- masm(), unwinding_info_writer_.eh_frame_writer(), info(),
+ tasm(), unwinding_info_writer_.eh_frame_writer(), info(),
Handle<Object>());
result->set_is_turbofanned(true);
result->set_stack_slots(frame()->GetTotalFrameSlotCount());
@@ -280,7 +279,7 @@ void CodeGenerator::RecordSafepoint(ReferenceMap* references,
Safepoint::Kind kind, int arguments,
Safepoint::DeoptMode deopt_mode) {
Safepoint safepoint =
- safepoints()->DefineSafepoint(masm(), kind, arguments, deopt_mode);
+ safepoints()->DefineSafepoint(tasm(), kind, arguments, deopt_mode);
int stackSlotToSpillSlotDelta =
frame()->GetTotalFrameSlotCount() - frame()->GetSpillSlotCount();
for (const InstructionOperand& operand : references->reference_operands()) {
@@ -308,7 +307,7 @@ bool CodeGenerator::IsMaterializableFromRoot(
if (incoming_descriptor->flags() & CallDescriptor::kCanUseRoots) {
Heap* heap = isolate()->heap();
return heap->IsRootHandle(object, index_return) &&
- heap->RootCanBeTreatedAsConstant(*index_return);
+ !heap->RootCanBeWrittenAfterInitialization(*index_return);
}
return false;
}
@@ -470,7 +469,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
branch.fallthru = true;
// Assemble architecture-specific branch.
AssembleArchBranch(instr, &branch);
- masm()->bind(&continue_label);
+ tasm()->bind(&continue_label);
break;
}
case kFlags_set: {
@@ -500,20 +499,24 @@ void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) {
if (source_position == current_source_position_) return;
current_source_position_ = source_position;
if (!source_position.IsKnown()) return;
- source_position_table_builder_.AddPosition(masm()->pc_offset(),
+ source_position_table_builder_.AddPosition(tasm()->pc_offset(),
source_position, false);
if (FLAG_code_comments) {
CompilationInfo* info = this->info();
if (!info->parse_info()) return;
std::ostringstream buffer;
buffer << "-- ";
- if (FLAG_trace_turbo) {
+ if (FLAG_trace_turbo ||
+ tasm()->isolate()->concurrent_recompilation_enabled()) {
buffer << source_position;
} else {
+ AllowHeapAllocation allocation;
+ AllowHandleAllocation handles;
+ AllowHandleDereference deref;
buffer << source_position.InliningStack(info);
}
buffer << " --";
- masm()->RecordComment(StrDup(buffer.str().c_str()));
+ tasm()->RecordComment(StrDup(buffer.str().c_str()));
}
}
@@ -593,22 +596,23 @@ void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
if (info->is_osr()) {
DCHECK(osr_pc_offset_ >= 0);
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
+ data->SetOsrBytecodeOffset(Smi::FromInt(info_->osr_ast_id().ToInt()));
data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
} else {
BailoutId osr_ast_id = BailoutId::None();
- data->SetOsrAstId(Smi::FromInt(osr_ast_id.ToInt()));
+ data->SetOsrBytecodeOffset(Smi::FromInt(osr_ast_id.ToInt()));
data->SetOsrPcOffset(Smi::FromInt(-1));
}
// Populate deoptimization entries.
for (int i = 0; i < deopt_count; i++) {
DeoptimizationState* deoptimization_state = deoptimization_states_[i];
- data->SetAstId(i, deoptimization_state->bailout_id());
- CHECK(deoptimization_states_[i]);
+ data->SetBytecodeOffset(i, deoptimization_state->bailout_id());
+ CHECK(deoptimization_state);
data->SetTranslationIndex(
- i, Smi::FromInt(deoptimization_states_[i]->translation_id()));
- data->SetArgumentsStackHeight(i, Smi::kZero);
+ i, Smi::FromInt(deoptimization_state->translation_id()));
+ data->SetTrampolinePc(i,
+ Smi::FromInt(deoptimization_state->trampoline_pc()));
data->SetPc(i, Smi::FromInt(deoptimization_state->pc_offset()));
}
@@ -634,7 +638,7 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
if (flags & CallDescriptor::kHasExceptionHandler) {
InstructionOperandConverter i(this, instr);
RpoNumber handler_rpo = i.InputRpo(instr->InputCount() - 1);
- handlers_.push_back({GetLabel(handler_rpo), masm()->pc_offset()});
+ handlers_.push_back({GetLabel(handler_rpo), tasm()->pc_offset()});
}
if (needs_frame_state) {
@@ -644,19 +648,14 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
size_t frame_state_offset = 1;
FrameStateDescriptor* descriptor =
GetDeoptimizationEntry(instr, frame_state_offset).descriptor();
- int pc_offset = masm()->pc_offset();
+ int pc_offset = tasm()->pc_offset();
int deopt_state_id = BuildTranslation(instr, pc_offset, frame_state_offset,
descriptor->state_combine());
- // If the pre-call frame state differs from the post-call one, produce the
- // pre-call frame state, too.
- // TODO(jarin) We might want to avoid building the pre-call frame state
- // because it is only used to get locals and arguments (by the debugger and
- // f.arguments), and those are the same in the pre-call and post-call
- // states.
- if (!descriptor->state_combine().IsOutputIgnored()) {
- deopt_state_id = BuildTranslation(instr, -1, frame_state_offset,
- OutputFrameStateCombine::Ignore());
- }
+
+ DeoptimizationExit* const exit = new (zone())
+ DeoptimizationExit(deopt_state_id, current_source_position_);
+ deoptimization_exits_.push_back(exit);
+
safepoints()->RecordLazyDeoptimizationIndex(deopt_state_id);
}
}
@@ -743,12 +742,11 @@ void CodeGenerator::TranslateFrameStateDescriptorOperands(
for (StateValueList::iterator it = values->begin(); it != values->end();
++it, ++index) {
StateValueDescriptor* value_desc = (*it).desc;
- if (combine.kind() == OutputFrameStateCombine::kPokeAt) {
+ if (!combine.IsOutputIgnored()) {
// The result of the call should be placed at position
// [index_from_top] in the stack (overwriting whatever was
// previously there).
- size_t index_from_top =
- desc->GetSize(combine) - 1 - combine.GetOffsetToPokeAt();
+ size_t index_from_top = desc->GetSize() - 1 - combine.GetOffsetToPokeAt();
if (index >= index_from_top &&
index < index_from_top + iter->instruction()->OutputCount()) {
DCHECK_NOT_NULL(translation);
@@ -763,17 +761,7 @@ void CodeGenerator::TranslateFrameStateDescriptorOperands(
}
TranslateStateValueDescriptor(value_desc, (*it).nested, translation, iter);
}
- DCHECK_EQ(desc->GetSize(OutputFrameStateCombine::Ignore()), index);
-
- if (combine.kind() == OutputFrameStateCombine::kPushOutput) {
- DCHECK(combine.GetPushCount() <= iter->instruction()->OutputCount());
- for (size_t output = 0; output < combine.GetPushCount(); output++) {
- // Materialize the result of the call instruction in this slot.
- AddTranslationForOperand(translation, iter->instruction(),
- iter->instruction()->OutputAt(output),
- MachineType::AnyTagged());
- }
- }
+ DCHECK_EQ(desc->GetSize(), index);
}
@@ -798,12 +786,6 @@ void CodeGenerator::BuildTranslationForFrameStateDescriptor(
DefineDeoptimizationLiteral(DeoptimizationLiteral(shared_info));
switch (descriptor->type()) {
- case FrameStateType::kJavaScriptFunction:
- translation->BeginJSFrame(
- descriptor->bailout_id(), shared_info_id,
- static_cast<unsigned int>(descriptor->GetSize(state_combine) -
- (1 + descriptor->parameters_count())));
- break;
case FrameStateType::kInterpretedFunction:
translation->BeginInterpretedFrame(
descriptor->bailout_id(), shared_info_id,
@@ -814,15 +796,28 @@ void CodeGenerator::BuildTranslationForFrameStateDescriptor(
shared_info_id,
static_cast<unsigned int>(descriptor->parameters_count()));
break;
- case FrameStateType::kTailCallerFunction:
- translation->BeginTailCallerFrame(shared_info_id);
- break;
case FrameStateType::kConstructStub:
DCHECK(descriptor->bailout_id().IsValidForConstructStub());
translation->BeginConstructStubFrame(
descriptor->bailout_id(), shared_info_id,
static_cast<unsigned int>(descriptor->parameters_count()));
break;
+ case FrameStateType::kBuiltinContinuation: {
+ BailoutId bailout_id = descriptor->bailout_id();
+ int parameter_count =
+ static_cast<unsigned int>(descriptor->parameters_count());
+ translation->BeginBuiltinContinuationFrame(bailout_id, shared_info_id,
+ parameter_count);
+ break;
+ }
+ case FrameStateType::kJavaScriptBuiltinContinuation: {
+ BailoutId bailout_id = descriptor->bailout_id();
+ int parameter_count =
+ static_cast<unsigned int>(descriptor->parameters_count());
+ translation->BeginJavaScriptBuiltinContinuationFrame(
+ bailout_id, shared_info_id, parameter_count);
+ break;
+ }
case FrameStateType::kGetterStub:
translation->BeginGetterStubFrame(shared_info_id);
break;
@@ -860,7 +855,6 @@ int CodeGenerator::BuildTranslation(Instruction* instr, int pc_offset,
return deoptimization_id;
}
-
void CodeGenerator::AddTranslationForOperand(Translation* translation,
Instruction* instr,
InstructionOperand* op,
@@ -968,7 +962,7 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
case Constant::kFloat64:
DCHECK(type.representation() == MachineRepresentation::kFloat64 ||
type.representation() == MachineRepresentation::kTagged);
- literal = DeoptimizationLiteral(constant.ToFloat64());
+ literal = DeoptimizationLiteral(constant.ToFloat64().value());
break;
case Constant::kHeapObject:
DCHECK_EQ(MachineRepresentation::kTagged, type.representation());
@@ -986,15 +980,15 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
}
}
-
void CodeGenerator::MarkLazyDeoptSite() {
- last_lazy_deopt_pc_ = masm()->pc_offset();
+ last_lazy_deopt_pc_ = tasm()->pc_offset();
}
DeoptimizationExit* CodeGenerator::AddDeoptimizationExit(
Instruction* instr, size_t frame_state_offset) {
int const deoptimization_id = BuildTranslation(
instr, -1, frame_state_offset, OutputFrameStateCombine::Ignore());
+
DeoptimizationExit* const exit = new (zone())
DeoptimizationExit(deoptimization_id, current_source_position_);
deoptimization_exits_.push_back(exit);
@@ -1002,11 +996,10 @@ DeoptimizationExit* CodeGenerator::AddDeoptimizationExit(
}
OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
- : frame_(gen->frame()), masm_(gen->masm()), next_(gen->ools_) {
+ : frame_(gen->frame()), tasm_(gen->tasm()), next_(gen->ools_) {
gen->ools_ = this;
}
-
OutOfLineCode::~OutOfLineCode() {}
} // namespace compiler
diff --git a/deps/v8/src/compiler/code-generator.h b/deps/v8/src/compiler/code-generator.h
index 5d879a28a5..1d8a5a0983 100644
--- a/deps/v8/src/compiler/code-generator.h
+++ b/deps/v8/src/compiler/code-generator.h
@@ -5,8 +5,10 @@
#ifndef V8_COMPILER_CODE_GENERATOR_H_
#define V8_COMPILER_CODE_GENERATOR_H_
+#include "src/base/optional.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/instruction.h"
+#include "src/compiler/osr.h"
#include "src/compiler/unwinding-info-writer.h"
#include "src/deoptimizer.h"
#include "src/macro-assembler.h"
@@ -77,13 +79,15 @@ class DeoptimizationLiteral {
// Generates native code for a sequence of instructions.
class CodeGenerator final : public GapResolver::Assembler {
public:
- explicit CodeGenerator(Frame* frame, Linkage* linkage,
- InstructionSequence* code, CompilationInfo* info);
+ explicit CodeGenerator(Zone* codegen_zone, Frame* frame, Linkage* linkage,
+ InstructionSequence* code, CompilationInfo* info,
+ base::Optional<OsrHelper> osr_helper,
+ int start_source_position);
// Generate native code. After calling AssembleCode, call FinalizeCode to
// produce the actual code object. If an error occurs during either phase,
// FinalizeCode returns a null handle.
- void AssembleCode();
+ void AssembleCode(); // Does not need to run on main thread.
Handle<Code> FinalizeCode();
InstructionSequence* code() const { return code_; }
@@ -94,20 +98,25 @@ class CodeGenerator final : public GapResolver::Assembler {
Label* GetLabel(RpoNumber rpo) { return &labels_[rpo.ToSize()]; }
- void AssembleSourcePosition(Instruction* instr);
+ SourcePosition start_source_position() const {
+ return start_source_position_;
+ }
+ void AssembleSourcePosition(Instruction* instr);
void AssembleSourcePosition(SourcePosition source_position);
// Record a safepoint with the given pointer map.
void RecordSafepoint(ReferenceMap* references, Safepoint::Kind kind,
int arguments, Safepoint::DeoptMode deopt_mode);
+ Zone* zone() const { return zone_; }
+
private:
- MacroAssembler* masm() { return &masm_; }
+ TurboAssembler* tasm() { return &tasm_; }
GapResolver* resolver() { return &resolver_; }
SafepointTableBuilder* safepoints() { return &safepoints_; }
- Zone* zone() const { return code()->zone(); }
CompilationInfo* info() const { return info_; }
+ OsrHelper* osr_helper() { return &(*osr_helper_); }
// Create the FrameAccessState object. The Frame is immutable from here on.
void CreateFrameAccessState(Frame* frame);
@@ -273,13 +282,16 @@ class CodeGenerator final : public GapResolver::Assembler {
translation_id_(translation_id),
pc_offset_(pc_offset),
kind_(kind),
- reason_(reason) {}
+ reason_(reason),
+ trampoline_pc_(-1) {}
BailoutId bailout_id() const { return bailout_id_; }
int translation_id() const { return translation_id_; }
int pc_offset() const { return pc_offset_; }
DeoptimizeKind kind() const { return kind_; }
DeoptimizeReason reason() const { return reason_; }
+ int trampoline_pc() { return trampoline_pc_; }
+ void set_trampoline_pc(int t_pc) { trampoline_pc_ = t_pc; }
private:
BailoutId bailout_id_;
@@ -287,6 +299,7 @@ class CodeGenerator final : public GapResolver::Assembler {
int pc_offset_;
DeoptimizeKind kind_;
DeoptimizeReason reason_;
+ int trampoline_pc_;
};
struct HandlerInfo {
@@ -296,6 +309,7 @@ class CodeGenerator final : public GapResolver::Assembler {
friend class OutOfLineCode;
+ Zone* zone_;
FrameAccessState* frame_access_state_;
Linkage* const linkage_;
InstructionSequence* const code_;
@@ -304,8 +318,9 @@ class CodeGenerator final : public GapResolver::Assembler {
Label* const labels_;
Label return_label_;
RpoNumber current_block_;
+ SourcePosition start_source_position_;
SourcePosition current_source_position_;
- MacroAssembler masm_;
+ TurboAssembler tasm_;
GapResolver resolver_;
SafepointTableBuilder safepoints_;
ZoneVector<HandlerInfo> handlers_;
@@ -317,6 +332,7 @@ class CodeGenerator final : public GapResolver::Assembler {
int last_lazy_deopt_pc_;
JumpTable* jump_tables_;
OutOfLineCode* ools_;
+ base::Optional<OsrHelper> osr_helper_;
int osr_pc_offset_;
int optimized_out_literal_id_;
SourcePositionTableBuilder source_position_table_builder_;
diff --git a/deps/v8/src/compiler/common-operator-reducer.h b/deps/v8/src/compiler/common-operator-reducer.h
index acc2092f5d..ea3575aa55 100644
--- a/deps/v8/src/compiler/common-operator-reducer.h
+++ b/deps/v8/src/compiler/common-operator-reducer.h
@@ -29,6 +29,8 @@ class V8_EXPORT_PRIVATE CommonOperatorReducer final
MachineOperatorBuilder* machine);
~CommonOperatorReducer() final {}
+ const char* reducer_name() const override { return "CommonOperatorReducer"; }
+
Reduction Reduce(Node* node) final;
private:
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index f87c0755b8..f24221d375 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -28,7 +28,6 @@ std::ostream& operator<<(std::ostream& os, BranchHint hint) {
return os << "False";
}
UNREACHABLE();
- return os;
}
@@ -275,7 +274,6 @@ std::ostream& operator<<(std::ostream& os, RegionObservability observability) {
return os << "not-observable";
}
UNREACHABLE();
- return os;
}
RegionObservability RegionObservabilityOf(Operator const* op) {
@@ -802,7 +800,6 @@ const Operator* CommonOperatorBuilder::Branch(BranchHint hint) {
return &cache_.kBranchFalseOperator;
}
UNREACHABLE();
- return nullptr;
}
const Operator* CommonOperatorBuilder::Deoptimize(DeoptimizeKind kind,
@@ -1161,7 +1158,6 @@ const Operator* CommonOperatorBuilder::BeginRegion(
return &cache_.kBeginRegionNotObservableOperator;
}
UNREACHABLE();
- return nullptr;
}
const Operator* CommonOperatorBuilder::StateValues(int arguments,
@@ -1325,7 +1321,6 @@ const Operator* CommonOperatorBuilder::ResizeMergeOrPhi(const Operator* op,
return Loop(size);
} else {
UNREACHABLE();
- return nullptr;
}
}
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 2b51a814fe..2fa1a479b4 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -37,7 +37,6 @@ inline BranchHint NegateBranchHint(BranchHint hint) {
return BranchHint::kTrue;
}
UNREACHABLE();
- return hint;
}
inline size_t hash_value(BranchHint hint) { return static_cast<size_t>(hint); }
diff --git a/deps/v8/src/compiler/dead-code-elimination.cc b/deps/v8/src/compiler/dead-code-elimination.cc
index d66a9c58d5..10ec4eb042 100644
--- a/deps/v8/src/compiler/dead-code-elimination.cc
+++ b/deps/v8/src/compiler/dead-code-elimination.cc
@@ -35,7 +35,6 @@ Reduction DeadCodeElimination::Reduce(Node* node) {
return ReduceNode(node);
}
UNREACHABLE();
- return NoChange();
}
diff --git a/deps/v8/src/compiler/dead-code-elimination.h b/deps/v8/src/compiler/dead-code-elimination.h
index 1cf9b22833..ede2daac25 100644
--- a/deps/v8/src/compiler/dead-code-elimination.h
+++ b/deps/v8/src/compiler/dead-code-elimination.h
@@ -28,6 +28,8 @@ class V8_EXPORT_PRIVATE DeadCodeElimination final
CommonOperatorBuilder* common);
~DeadCodeElimination() final {}
+ const char* reducer_name() const override { return "DeadCodeElimination"; }
+
Reduction Reduce(Node* node) final;
private:
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index 6a75e8cff2..36a17fd547 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -536,15 +536,9 @@ void EffectControlLinearizer::ProcessNode(Node* node, Node** frame_state,
return;
}
- if (node->opcode() == IrOpcode::kIfSuccess) {
- // We always schedule IfSuccess with its call, so skip it here.
- DCHECK_EQ(IrOpcode::kCall, node->InputAt(0)->opcode());
- // The IfSuccess node should not belong to an exceptional call node
- // because such IfSuccess nodes should only start a basic block (and
- // basic block start nodes are not handled in the ProcessNode method).
- DCHECK(!NodeProperties::IsExceptionalCall(node->InputAt(0)));
- return;
- }
+ // The IfSuccess nodes should always start a basic block (and basic block
+ // start nodes are not handled in the ProcessNode method).
+ DCHECK_NE(IrOpcode::kIfSuccess, node->opcode());
// If the node takes an effect, replace with the current one.
if (node->op()->EffectInputCount() > 0) {
@@ -641,9 +635,18 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCheckReceiver:
result = LowerCheckReceiver(node, frame_state);
break;
+ case IrOpcode::kCheckSymbol:
+ result = LowerCheckSymbol(node, frame_state);
+ break;
case IrOpcode::kCheckString:
result = LowerCheckString(node, frame_state);
break;
+ case IrOpcode::kCheckSeqString:
+ result = LowerCheckSeqString(node, frame_state);
+ break;
+ case IrOpcode::kCheckNonEmptyString:
+ result = LowerCheckNonEmptyString(node, frame_state);
+ break;
case IrOpcode::kCheckInternalizedString:
result = LowerCheckInternalizedString(node, frame_state);
break;
@@ -763,6 +766,15 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kStringCharCodeAt:
result = LowerStringCharCodeAt(node);
break;
+ case IrOpcode::kSeqStringCharCodeAt:
+ result = LowerSeqStringCharCodeAt(node);
+ break;
+ case IrOpcode::kStringToLowerCaseIntl:
+ result = LowerStringToLowerCaseIntl(node);
+ break;
+ case IrOpcode::kStringToUpperCaseIntl:
+ result = LowerStringToUpperCaseIntl(node);
+ break;
case IrOpcode::kStringEqual:
result = LowerStringEqual(node);
break;
@@ -775,8 +787,8 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCheckFloat64Hole:
result = LowerCheckFloat64Hole(node, frame_state);
break;
- case IrOpcode::kCheckTaggedHole:
- result = LowerCheckTaggedHole(node, frame_state);
+ case IrOpcode::kCheckNotTaggedHole:
+ result = LowerCheckNotTaggedHole(node, frame_state);
break;
case IrOpcode::kConvertTaggedHoleToUndefined:
result = LowerConvertTaggedHoleToUndefined(node);
@@ -805,6 +817,14 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kStoreTypedElement:
LowerStoreTypedElement(node);
break;
+ case IrOpcode::kLookupHashStorageIndex:
+ result = LowerLookupHashStorageIndex(node);
+ break;
+ case IrOpcode::kLoadHashMapValue:
+ result = LowerLoadHashMapValue(node);
+ case IrOpcode::kTransitionAndStoreElement:
+ LowerTransitionAndStoreElement(node);
+ break;
case IrOpcode::kFloat64RoundUp:
if (!LowerFloat64RoundUp(node).To(&result)) {
return false;
@@ -1300,6 +1320,17 @@ Node* EffectControlLinearizer::LowerCheckReceiver(Node* node,
return value;
}
+Node* EffectControlLinearizer::LowerCheckSymbol(Node* node, Node* frame_state) {
+ Node* value = node->InputAt(0);
+
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+
+ Node* check =
+ __ WordEqual(value_map, __ HeapConstant(factory()->symbol_map()));
+ __ DeoptimizeUnless(DeoptimizeReason::kNotASymbol, check, frame_state);
+ return value;
+}
+
Node* EffectControlLinearizer::LowerCheckString(Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
@@ -1313,6 +1344,47 @@ Node* EffectControlLinearizer::LowerCheckString(Node* node, Node* frame_state) {
return value;
}
+Node* EffectControlLinearizer::LowerCheckSeqString(Node* node,
+ Node* frame_state) {
+ Node* value = node->InputAt(0);
+
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* value_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
+
+ Node* is_string = __ Uint32LessThan(value_instance_type,
+ __ Uint32Constant(FIRST_NONSTRING_TYPE));
+ Node* is_sequential =
+ __ Word32Equal(__ Word32And(value_instance_type,
+ __ Int32Constant(kStringRepresentationMask)),
+ __ Int32Constant(kSeqStringTag));
+ Node* is_sequential_string = __ Word32And(is_string, is_sequential);
+
+ __ DeoptimizeUnless(DeoptimizeReason::kWrongInstanceType,
+ is_sequential_string, frame_state);
+ return value;
+}
+
+Node* EffectControlLinearizer::LowerCheckNonEmptyString(Node* node,
+ Node* frame_state) {
+ Node* value = node->InputAt(0);
+
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* value_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
+
+ Node* is_string = __ Uint32LessThan(value_instance_type,
+ __ Uint32Constant(FIRST_NONSTRING_TYPE));
+ Node* is_non_empty = __ Word32Equal(
+ __ WordEqual(value, __ EmptyStringConstant()), __ Int32Constant(0));
+
+ Node* is_non_empty_string = __ Word32And(is_string, is_non_empty);
+
+ __ DeoptimizeUnless(DeoptimizeReason::kWrongInstanceType, is_non_empty_string,
+ frame_state);
+ return value;
+}
+
Node* EffectControlLinearizer::LowerCheckInternalizedString(Node* node,
Node* frame_state) {
Node* value = node->InputAt(0);
@@ -1767,6 +1839,7 @@ Node* EffectControlLinearizer::LowerTruncateTaggedToWord32(Node* node) {
Node* EffectControlLinearizer::LowerCheckedTruncateTaggedToWord32(
Node* node, Node* frame_state) {
+ CheckTaggedInputMode mode = CheckTaggedInputModeOf(node->op());
Node* value = node->InputAt(0);
auto if_not_smi = __ MakeLabel<1>();
@@ -1780,8 +1853,8 @@ Node* EffectControlLinearizer::LowerCheckedTruncateTaggedToWord32(
// Otherwise, check that it's a heap number or oddball and truncate the value
// to int32.
__ Bind(&if_not_smi);
- Node* number = BuildCheckedHeapNumberOrOddballToFloat64(
- CheckTaggedInputMode::kNumberOrOddball, value, frame_state);
+ Node* number =
+ BuildCheckedHeapNumberOrOddballToFloat64(mode, value, frame_state);
number = __ TruncateFloat64ToWord32(number);
__ Goto(&done, number);
@@ -2063,7 +2136,7 @@ Node* EffectControlLinearizer::LowerNewUnmappedArgumentsElements(Node* node) {
Node* length = NodeProperties::GetValueInput(node, 1);
Callable const callable =
- CodeFactory::NewUnmappedArgumentsElements(isolate());
+ Builtins::CallableFor(isolate(), Builtins::kNewUnmappedArgumentsElements);
Operator::Properties const properties = node->op()->properties();
CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
@@ -2089,7 +2162,8 @@ Node* EffectControlLinearizer::LowerStringCharAt(Node* node) {
Node* receiver = node->InputAt(0);
Node* position = node->InputAt(1);
- Callable const callable = CodeFactory::StringCharAt(isolate());
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kStringCharAt);
Operator::Properties properties = Operator::kNoThrow | Operator::kNoWrite;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
@@ -2102,7 +2176,8 @@ Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
Node* receiver = node->InputAt(0);
Node* position = node->InputAt(1);
- Callable const callable = CodeFactory::StringCharCodeAt(isolate());
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kStringCharCodeAt);
Operator::Properties properties = Operator::kNoThrow | Operator::kNoWrite;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
@@ -2112,6 +2187,33 @@ Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
__ NoContextConstant());
}
+Node* EffectControlLinearizer::LowerSeqStringCharCodeAt(Node* node) {
+ Node* receiver = node->InputAt(0);
+ Node* position = node->InputAt(1);
+
+ auto one_byte_load = __ MakeLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
+
+ Node* map = __ LoadField(AccessBuilder::ForMap(), receiver);
+ Node* instance_type = __ LoadField(AccessBuilder::ForMapInstanceType(), map);
+ Node* is_one_byte = __ Word32Equal(
+ __ Word32And(instance_type, __ Int32Constant(kStringEncodingMask)),
+ __ Int32Constant(kOneByteStringTag));
+
+ __ GotoIf(is_one_byte, &one_byte_load);
+ Node* two_byte_result = __ LoadElement(
+ AccessBuilder::ForSeqTwoByteStringCharacter(), receiver, position);
+ __ Goto(&done, two_byte_result);
+
+ __ Bind(&one_byte_load);
+ Node* one_byte_element = __ LoadElement(
+ AccessBuilder::ForSeqOneByteStringCharacter(), receiver, position);
+ __ Goto(&done, one_byte_element);
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
+
Node* EffectControlLinearizer::LowerStringFromCharCode(Node* node) {
Node* value = node->InputAt(0);
@@ -2161,6 +2263,46 @@ Node* EffectControlLinearizer::LowerStringFromCharCode(Node* node) {
return done.PhiAt(0);
}
+#ifdef V8_INTL_SUPPORT
+
+Node* EffectControlLinearizer::LowerStringToLowerCaseIntl(Node* node) {
+ Node* receiver = node->InputAt(0);
+
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kStringToLowerCaseIntl);
+ Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+ return __ Call(desc, __ HeapConstant(callable.code()), receiver,
+ __ NoContextConstant());
+}
+
+Node* EffectControlLinearizer::LowerStringToUpperCaseIntl(Node* node) {
+ Node* receiver = node->InputAt(0);
+ Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
+ Runtime::FunctionId id = Runtime::kStringToUpperCaseIntl;
+ CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
+ graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
+ return __ Call(desc, __ CEntryStubConstant(1), receiver,
+ __ ExternalConstant(ExternalReference(id, isolate())),
+ __ Int32Constant(1), __ NoContextConstant());
+}
+
+#else
+
+Node* EffectControlLinearizer::LowerStringToLowerCaseIntl(Node* node) {
+ UNREACHABLE();
+ return nullptr;
+}
+
+Node* EffectControlLinearizer::LowerStringToUpperCaseIntl(Node* node) {
+ UNREACHABLE();
+ return nullptr;
+}
+
+#endif // V8_INTL_SUPPORT
+
Node* EffectControlLinearizer::LowerStringFromCodePoint(Node* node) {
Node* value = node->InputAt(0);
Node* code = value;
@@ -2291,7 +2433,8 @@ Node* EffectControlLinearizer::LowerStringIndexOf(Node* node) {
Node* search_string = node->InputAt(1);
Node* position = node->InputAt(2);
- Callable callable = CodeFactory::StringIndexOf(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kStringIndexOf);
Operator::Properties properties = Operator::kEliminatable;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
@@ -2314,16 +2457,18 @@ Node* EffectControlLinearizer::LowerStringComparison(Callable const& callable,
}
Node* EffectControlLinearizer::LowerStringEqual(Node* node) {
- return LowerStringComparison(CodeFactory::StringEqual(isolate()), node);
+ return LowerStringComparison(
+ Builtins::CallableFor(isolate(), Builtins::kStringEqual), node);
}
Node* EffectControlLinearizer::LowerStringLessThan(Node* node) {
- return LowerStringComparison(CodeFactory::StringLessThan(isolate()), node);
+ return LowerStringComparison(
+ Builtins::CallableFor(isolate(), Builtins::kStringLessThan), node);
}
Node* EffectControlLinearizer::LowerStringLessThanOrEqual(Node* node) {
- return LowerStringComparison(CodeFactory::StringLessThanOrEqual(isolate()),
- node);
+ return LowerStringComparison(
+ Builtins::CallableFor(isolate(), Builtins::kStringLessThanOrEqual), node);
}
Node* EffectControlLinearizer::LowerCheckFloat64Hole(Node* node,
@@ -2338,8 +2483,9 @@ Node* EffectControlLinearizer::LowerCheckFloat64Hole(Node* node,
return value;
}
-Node* EffectControlLinearizer::LowerCheckTaggedHole(Node* node,
- Node* frame_state) {
+
+Node* EffectControlLinearizer::LowerCheckNotTaggedHole(Node* node,
+ Node* frame_state) {
Node* value = node->InputAt(0);
Node* check = __ WordEqual(value, __ TheHoleConstant());
__ DeoptimizeIf(DeoptimizeReason::kHole, check, frame_state);
@@ -2484,7 +2630,8 @@ Node* EffectControlLinearizer::LowerEnsureWritableFastElements(Node* node) {
__ Bind(&if_not_fixed_array);
// We need to take a copy of the {elements} and set them up for {object}.
Operator::Properties properties = Operator::kEliminatable;
- Callable callable = CodeFactory::CopyFastSmiOrObjectElements(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kCopyFastSmiOrObjectElements);
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
@@ -2529,8 +2676,10 @@ Node* EffectControlLinearizer::LowerMaybeGrowFastElements(Node* node,
Operator::Properties properties = Operator::kEliminatable;
Callable callable =
(flags & GrowFastElementsFlag::kDoubleElements)
- ? CodeFactory::GrowFastDoubleElements(isolate())
- : CodeFactory::GrowFastSmiOrObjectElements(isolate());
+ ? Builtins::CallableFor(isolate(),
+ Builtins::kGrowFastDoubleElements)
+ : Builtins::CallableFor(isolate(),
+ Builtins::kGrowFastSmiOrObjectElements);
CallDescriptor::Flags call_flags = CallDescriptor::kNoFlags;
CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, call_flags,
@@ -2554,7 +2703,7 @@ Node* EffectControlLinearizer::LowerMaybeGrowFastElements(Node* node,
ChangeInt32ToSmi(__ Int32Add(index, __ Int32Constant(1)));
// Update the "length" property of the {object}.
- __ StoreField(AccessBuilder::ForJSArrayLength(FAST_ELEMENTS), object,
+ __ StoreField(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS), object,
object_length);
}
__ Goto(&done, done_grow.PhiAt(0));
@@ -2661,6 +2810,171 @@ void EffectControlLinearizer::LowerStoreTypedElement(Node* node) {
storage, index, value);
}
+void EffectControlLinearizer::TransitionElementsTo(Node* node, Node* array,
+ ElementsKind from,
+ ElementsKind to) {
+ DCHECK(IsMoreGeneralElementsKindTransition(from, to));
+ DCHECK(to == HOLEY_ELEMENTS || to == HOLEY_DOUBLE_ELEMENTS);
+
+ Handle<Map> target(to == HOLEY_ELEMENTS ? FastMapParameterOf(node->op())
+ : DoubleMapParameterOf(node->op()));
+ Node* target_map = __ HeapConstant(target);
+
+ if (IsSimpleMapChangeTransition(from, to)) {
+ __ StoreField(AccessBuilder::ForMap(), array, target_map);
+ } else {
+ // Instance migration, call out to the runtime for {array}.
+ Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
+ Runtime::FunctionId id = Runtime::kTransitionElementsKind;
+ CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
+ graph()->zone(), id, 2, properties, CallDescriptor::kNoFlags);
+ __ Call(desc, __ CEntryStubConstant(1), array, target_map,
+ __ ExternalConstant(ExternalReference(id, isolate())),
+ __ Int32Constant(2), __ NoContextConstant());
+ }
+}
+
+Node* EffectControlLinearizer::IsElementsKindGreaterThan(
+ Node* kind, ElementsKind reference_kind) {
+ Node* ref_kind = __ Int32Constant(reference_kind);
+ Node* ret = __ Int32LessThan(ref_kind, kind);
+ return ret;
+}
+
+void EffectControlLinearizer::LowerTransitionAndStoreElement(Node* node) {
+ Node* array = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ // Possibly transition array based on input and store.
+ //
+ // -- TRANSITION PHASE -----------------
+ // kind = ElementsKind(array)
+ // if value is not smi {
+ // if kind == HOLEY_SMI_ELEMENTS {
+ // if value is heap number {
+ // Transition array to HOLEY_DOUBLE_ELEMENTS
+ // kind = HOLEY_DOUBLE_ELEMENTS
+ // } else {
+ // Transition array to HOLEY_ELEMENTS
+ // kind = HOLEY_ELEMENTS
+ // }
+ // } else if kind == HOLEY_DOUBLE_ELEMENTS {
+ // if value is not heap number {
+ // Transition array to HOLEY_ELEMENTS
+ // kind = HOLEY_ELEMENTS
+ // }
+ // }
+ // }
+ //
+ // -- STORE PHASE ----------------------
+ // [make sure {kind} is up-to-date]
+ // if kind == HOLEY_DOUBLE_ELEMENTS {
+ // if value is smi {
+ // float_value = convert smi to float
+ // Store array[index] = float_value
+ // } else {
+ // float_value = value
+ // Store array[index] = float_value
+ // }
+ // } else {
+ // // kind is HOLEY_SMI_ELEMENTS or HOLEY_ELEMENTS
+ // Store array[index] = value
+ // }
+ //
+ Node* map = __ LoadField(AccessBuilder::ForMap(), array);
+ Node* kind;
+ {
+ Node* bit_field2 = __ LoadField(AccessBuilder::ForMapBitField2(), map);
+ Node* mask = __ Int32Constant(Map::ElementsKindBits::kMask);
+ Node* andit = __ Word32And(bit_field2, mask);
+ Node* shift = __ Int32Constant(Map::ElementsKindBits::kShift);
+ kind = __ Word32Shr(andit, shift);
+ }
+
+ auto do_store = __ MakeLabel<6>(MachineRepresentation::kWord32);
+ Node* check1 = ObjectIsSmi(value);
+ __ GotoIf(check1, &do_store, kind);
+ {
+ // {value} is a HeapObject.
+ Node* check2 = IsElementsKindGreaterThan(kind, HOLEY_SMI_ELEMENTS);
+ auto if_array_not_fast_smi = __ MakeLabel<1>();
+ __ GotoIf(check2, &if_array_not_fast_smi);
+ {
+ // Transition {array} from HOLEY_SMI_ELEMENTS to HOLEY_DOUBLE_ELEMENTS or
+ // to HOLEY_ELEMENTS.
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* heap_number_map = __ HeapNumberMapConstant();
+ Node* check3 = __ WordEqual(value_map, heap_number_map);
+ auto if_value_not_heap_number = __ MakeLabel<1>();
+ __ GotoUnless(check3, &if_value_not_heap_number);
+ {
+ // {value} is a HeapNumber.
+ TransitionElementsTo(node, array, HOLEY_SMI_ELEMENTS,
+ HOLEY_DOUBLE_ELEMENTS);
+ __ Goto(&do_store, __ Int32Constant(HOLEY_DOUBLE_ELEMENTS));
+ }
+ __ Bind(&if_value_not_heap_number);
+ {
+ TransitionElementsTo(node, array, HOLEY_SMI_ELEMENTS, HOLEY_ELEMENTS);
+ __ Goto(&do_store, __ Int32Constant(HOLEY_ELEMENTS));
+ }
+ }
+ __ Bind(&if_array_not_fast_smi);
+ {
+ Node* check3 = IsElementsKindGreaterThan(kind, HOLEY_ELEMENTS);
+ __ GotoUnless(check3, &do_store, kind);
+ // We have double elements kind.
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* heap_number_map = __ HeapNumberMapConstant();
+ Node* check4 = __ WordEqual(value_map, heap_number_map);
+ __ GotoIf(check4, &do_store, kind);
+ // But the value is not a heap number, so we must transition.
+ TransitionElementsTo(node, array, HOLEY_DOUBLE_ELEMENTS, HOLEY_ELEMENTS);
+ __ Goto(&do_store, __ Int32Constant(HOLEY_ELEMENTS));
+ }
+ }
+
+ // Make sure kind is up-to-date.
+ __ Bind(&do_store);
+ kind = do_store.PhiAt(0);
+
+ Node* elements = __ LoadField(AccessBuilder::ForJSObjectElements(), array);
+ Node* check2 = IsElementsKindGreaterThan(kind, HOLEY_ELEMENTS);
+ auto if_kind_is_double = __ MakeLabel<1>();
+ auto done = __ MakeLabel<3>();
+ __ GotoIf(check2, &if_kind_is_double);
+ {
+ // Our ElementsKind is HOLEY_SMI_ELEMENTS or HOLEY_ELEMENTS.
+ __ StoreElement(AccessBuilder::ForFixedArrayElement(HOLEY_ELEMENTS),
+ elements, index, value);
+ __ Goto(&done);
+ }
+ __ Bind(&if_kind_is_double);
+ {
+ // Our ElementsKind is HOLEY_DOUBLE_ELEMENTS.
+ Node* check1 = ObjectIsSmi(value);
+ auto do_double_store = __ MakeLabel<1>();
+ __ GotoUnless(check1, &do_double_store);
+ {
+ Node* int_value = ChangeSmiToInt32(value);
+ Node* float_value = __ ChangeInt32ToFloat64(int_value);
+ __ StoreElement(AccessBuilder::ForFixedDoubleArrayElement(), elements,
+ index, float_value);
+ __ Goto(&done);
+ }
+ __ Bind(&do_double_store);
+ {
+ Node* float_value =
+ __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
+ __ StoreElement(AccessBuilder::ForFixedDoubleArrayElement(), elements,
+ index, float_value);
+ __ Goto(&done);
+ }
+ }
+ __ Bind(&done);
+}
+
Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundUp(Node* node) {
// Nothing to be done if a fast hardware instruction is available.
if (machine()->Float64RoundUp().IsSupported()) {
@@ -2987,6 +3301,29 @@ Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundTruncate(Node* node) {
return Just(done.PhiAt(0));
}
+Node* EffectControlLinearizer::LowerLookupHashStorageIndex(Node* node) {
+ Node* table = NodeProperties::GetValueInput(node, 0);
+ Node* key = NodeProperties::GetValueInput(node, 1);
+
+ {
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kMapLookupHashIndex);
+ Operator::Properties const properties = node->op()->properties();
+ CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags,
+ properties);
+ return __ Call(desc, __ HeapConstant(callable.code()), table, key,
+ __ NoContextConstant());
+ }
+}
+
+Node* EffectControlLinearizer::LowerLoadHashMapValue(Node* node) {
+ Node* table = NodeProperties::GetValueInput(node, 0);
+ Node* index = NodeProperties::GetValueInput(node, 1);
+ return __ LoadElement(AccessBuilder::ForFixedArrayElement(), table, index);
+}
+
#undef __
Factory* EffectControlLinearizer::factory() const {
diff --git a/deps/v8/src/compiler/effect-control-linearizer.h b/deps/v8/src/compiler/effect-control-linearizer.h
index bc18ff8162..3cde8d795d 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.h
+++ b/deps/v8/src/compiler/effect-control-linearizer.h
@@ -58,6 +58,9 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerCheckNumber(Node* node, Node* frame_state);
Node* LowerCheckReceiver(Node* node, Node* frame_state);
Node* LowerCheckString(Node* node, Node* frame_state);
+ Node* LowerCheckSeqString(Node* node, Node* frame_state);
+ Node* LowerCheckNonEmptyString(Node* node, Node* frame_state);
+ Node* LowerCheckSymbol(Node* node, Node* frame_state);
Node* LowerCheckIf(Node* node, Node* frame_state);
Node* LowerCheckedInt32Add(Node* node, Node* frame_state);
Node* LowerCheckedInt32Sub(Node* node, Node* frame_state);
@@ -96,6 +99,9 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerArrayBufferWasNeutered(Node* node);
Node* LowerStringCharAt(Node* node);
Node* LowerStringCharCodeAt(Node* node);
+ Node* LowerSeqStringCharCodeAt(Node* node);
+ Node* LowerStringToLowerCaseIntl(Node* node);
+ Node* LowerStringToUpperCaseIntl(Node* node);
Node* LowerStringFromCharCode(Node* node);
Node* LowerStringFromCodePoint(Node* node);
Node* LowerStringIndexOf(Node* node);
@@ -103,7 +109,7 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerStringLessThan(Node* node);
Node* LowerStringLessThanOrEqual(Node* node);
Node* LowerCheckFloat64Hole(Node* node, Node* frame_state);
- Node* LowerCheckTaggedHole(Node* node, Node* frame_state);
+ Node* LowerCheckNotTaggedHole(Node* node, Node* frame_state);
Node* LowerConvertTaggedHoleToUndefined(Node* node);
Node* LowerPlainPrimitiveToNumber(Node* node);
Node* LowerPlainPrimitiveToWord32(Node* node);
@@ -113,6 +119,9 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
void LowerTransitionElementsKind(Node* node);
Node* LowerLoadTypedElement(Node* node);
void LowerStoreTypedElement(Node* node);
+ Node* LowerLookupHashStorageIndex(Node* node);
+ Node* LowerLoadHashMapValue(Node* node);
+ void LowerTransitionAndStoreElement(Node* node);
// Lowering of optional operators.
Maybe<Node*> LowerFloat64RoundUp(Node* node);
@@ -128,6 +137,7 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* frame_state);
Node* BuildFloat64RoundDown(Node* value);
Node* LowerStringComparison(Callable const& callable, Node* node);
+ Node* IsElementsKindGreaterThan(Node* kind, ElementsKind reference_kind);
Node* ChangeInt32ToSmi(Node* value);
Node* ChangeUint32ToSmi(Node* value);
@@ -136,6 +146,8 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* SmiMaxValueConstant();
Node* SmiShiftBitsConstant();
+ void TransitionElementsTo(Node* node, Node* array, ElementsKind from,
+ ElementsKind to);
Factory* factory() const;
Isolate* isolate() const;
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.h b/deps/v8/src/compiler/escape-analysis-reducer.h
index 4373fa4c66..9bbabeb221 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.h
+++ b/deps/v8/src/compiler/escape-analysis-reducer.h
@@ -24,6 +24,8 @@ class V8_EXPORT_PRIVATE EscapeAnalysisReducer final
EscapeAnalysisReducer(Editor* editor, JSGraph* jsgraph,
EscapeAnalysis* escape_analysis, Zone* zone);
+ const char* reducer_name() const override { return "EscapeAnalysisReducer"; }
+
Reduction Reduce(Node* node) final;
void Finalize() override;
diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc
index 52935e0041..97710de6c5 100644
--- a/deps/v8/src/compiler/escape-analysis.cc
+++ b/deps/v8/src/compiler/escape-analysis.cc
@@ -833,7 +833,10 @@ bool EscapeStatusAnalysis::CheckUsesForEscape(Node* uses, Node* rep,
case IrOpcode::kPlainPrimitiveToFloat64:
case IrOpcode::kStringCharAt:
case IrOpcode::kStringCharCodeAt:
+ case IrOpcode::kSeqStringCharCodeAt:
case IrOpcode::kStringIndexOf:
+ case IrOpcode::kStringToLowerCaseIntl:
+ case IrOpcode::kStringToUpperCaseIntl:
case IrOpcode::kObjectIsDetectableCallable:
case IrOpcode::kObjectIsNaN:
case IrOpcode::kObjectIsNonCallable:
@@ -857,13 +860,9 @@ bool EscapeStatusAnalysis::CheckUsesForEscape(Node* uses, Node* rep,
}
break;
default:
- if (use->op()->EffectInputCount() == 0 &&
- uses->op()->EffectInputCount() > 0 &&
- !IrOpcode::IsJsOpcode(use->opcode())) {
- V8_Fatal(__FILE__, __LINE__,
- "Encountered unaccounted use by #%d (%s)\n", use->id(),
- use->op()->mnemonic());
- }
+ DCHECK(use->op()->EffectInputCount() > 0 ||
+ uses->op()->EffectInputCount() == 0 ||
+ IrOpcode::IsJsOpcode(use->opcode()));
if (SetEscaped(rep)) {
TRACE("Setting #%d (%s) to escaped because of use by #%d (%s)\n",
rep->id(), rep->op()->mnemonic(), use->id(),
@@ -1532,8 +1531,8 @@ void EscapeAnalysis::ProcessCheckMaps(Node* node) {
// CheckMapsValue operator that takes the load-eliminated map value as
// input.
if (value->opcode() == IrOpcode::kHeapConstant &&
- params.maps().contains(ZoneHandleSet<Map>(
- Handle<Map>::cast(OpParameter<Handle<HeapObject>>(value))))) {
+ params.maps().contains(ZoneHandleSet<Map>(bit_cast<Handle<Map>>(
+ OpParameter<Handle<HeapObject>>(value))))) {
TRACE("CheckMaps #%i seems to be redundant (until now).\n",
node->id());
return;
diff --git a/deps/v8/src/compiler/frame-states.cc b/deps/v8/src/compiler/frame-states.cc
index ec014dac94..4031f38186 100644
--- a/deps/v8/src/compiler/frame-states.cc
+++ b/deps/v8/src/compiler/frame-states.cc
@@ -5,6 +5,10 @@
#include "src/compiler/frame-states.h"
#include "src/base/functional.h"
+#include "src/callable.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node.h"
#include "src/handles-inl.h"
#include "src/objects-inl.h"
@@ -13,20 +17,14 @@ namespace internal {
namespace compiler {
size_t hash_value(OutputFrameStateCombine const& sc) {
- return base::hash_combine(sc.kind_, sc.parameter_);
+ return base::hash_value(sc.parameter_);
}
std::ostream& operator<<(std::ostream& os, OutputFrameStateCombine const& sc) {
- switch (sc.kind_) {
- case OutputFrameStateCombine::kPushOutput:
- if (sc.parameter_ == 0) return os << "Ignore";
- return os << "Push(" << sc.parameter_ << ")";
- case OutputFrameStateCombine::kPokeAt:
- return os << "PokeAt(" << sc.parameter_ << ")";
- }
- UNREACHABLE();
- return os;
+ if (sc.parameter_ == OutputFrameStateCombine::kInvalidIndex)
+ return os << "Ignore";
+ return os << "PokeAt(" << sc.parameter_ << ")";
}
@@ -50,21 +48,21 @@ size_t hash_value(FrameStateInfo const& info) {
std::ostream& operator<<(std::ostream& os, FrameStateType type) {
switch (type) {
- case FrameStateType::kJavaScriptFunction:
- os << "JS_FRAME";
- break;
case FrameStateType::kInterpretedFunction:
os << "INTERPRETED_FRAME";
break;
case FrameStateType::kArgumentsAdaptor:
os << "ARGUMENTS_ADAPTOR";
break;
- case FrameStateType::kTailCallerFunction:
- os << "TAIL_CALLER_FRAME";
- break;
case FrameStateType::kConstructStub:
os << "CONSTRUCT_STUB";
break;
+ case FrameStateType::kBuiltinContinuation:
+ os << "BUILTIN_CONTINUATION_FRAME";
+ break;
+ case FrameStateType::kJavaScriptBuiltinContinuation:
+ os << "JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME";
+ break;
case FrameStateType::kGetterStub:
os << "GETTER_STUB";
break;
@@ -86,6 +84,116 @@ std::ostream& operator<<(std::ostream& os, FrameStateInfo const& info) {
return os;
}
+namespace {
+Node* CreateBuiltinContinuationFrameStateCommon(
+ JSGraph* js_graph, Builtins::Name name, Node* context, Node** parameters,
+ int parameter_count, Node* outer_frame_state, Handle<JSFunction> function) {
+ Isolate* isolate = js_graph->isolate();
+ Graph* graph = js_graph->graph();
+ CommonOperatorBuilder* common = js_graph->common();
+
+ BailoutId bailout_id = Builtins::GetContinuationBailoutId(name);
+ Callable callable = Builtins::CallableFor(isolate, name);
+
+ const Operator* op_param =
+ common->StateValues(parameter_count, SparseInputMask::Dense());
+ Node* params_node = graph->NewNode(op_param, parameter_count, parameters);
+
+ FrameStateType frame_type =
+ function.is_null() ? FrameStateType::kBuiltinContinuation
+ : FrameStateType::kJavaScriptBuiltinContinuation;
+ const FrameStateFunctionInfo* state_info =
+ common->CreateFrameStateFunctionInfo(
+ frame_type, parameter_count, 0,
+ function.is_null() ? Handle<SharedFunctionInfo>()
+ : Handle<SharedFunctionInfo>(function->shared()));
+ const Operator* op = common->FrameState(
+ bailout_id, OutputFrameStateCombine::Ignore(), state_info);
+
+ Node* function_node = function.is_null() ? js_graph->UndefinedConstant()
+ : js_graph->HeapConstant(function);
+
+ Node* frame_state = graph->NewNode(
+ op, params_node, js_graph->EmptyStateValues(),
+ js_graph->EmptyStateValues(), context, function_node, outer_frame_state);
+
+ return frame_state;
+}
+} // namespace
+
+Node* CreateStubBuiltinContinuationFrameState(JSGraph* js_graph,
+ Builtins::Name name,
+ Node* context, Node** parameters,
+ int parameter_count,
+ Node* outer_frame_state,
+ ContinuationFrameStateMode mode) {
+ Isolate* isolate = js_graph->isolate();
+ Callable callable = Builtins::CallableFor(isolate, name);
+ CallInterfaceDescriptor descriptor = callable.descriptor();
+
+ std::vector<Node*> actual_parameters;
+ // Stack parameters first. If the deoptimization is LAZY, the final parameter
+ // is added by the deoptimizer and isn't explicitly passed in the frame state.
+ int stack_parameter_count =
+ descriptor.GetRegisterParameterCount() -
+ (mode == ContinuationFrameStateMode::LAZY ? 1 : 0);
+ for (int i = 0; i < stack_parameter_count; ++i) {
+ actual_parameters.push_back(
+ parameters[descriptor.GetRegisterParameterCount() + i]);
+ }
+ // Register parameters follow, context will be added by instruction selector
+ // during FrameState translation.
+ for (int i = 0; i < descriptor.GetRegisterParameterCount(); ++i) {
+ actual_parameters.push_back(parameters[i]);
+ }
+
+ return CreateBuiltinContinuationFrameStateCommon(
+ js_graph, name, context, actual_parameters.data(),
+ static_cast<int>(actual_parameters.size()), outer_frame_state,
+ Handle<JSFunction>());
+}
+
+Node* CreateJavaScriptBuiltinContinuationFrameState(
+ JSGraph* js_graph, Handle<JSFunction> function, Builtins::Name name,
+ Node* target, Node* context, Node** stack_parameters,
+ int stack_parameter_count, Node* outer_frame_state,
+ ContinuationFrameStateMode mode) {
+ Isolate* isolate = js_graph->isolate();
+ Callable callable = Builtins::CallableFor(isolate, name);
+
+ // Lazy deopt points where the frame state is assocated with a call get an
+ // additional parameter for the return result from the call that's added by
+ // the deoptimizer and not explicitly specified in the frame state. Check that
+ // there is not a mismatch between the number of frame state parameters and
+ // the stack parameters required by the builtin taking this into account.
+ DCHECK_EQ(
+ Builtins::GetStackParameterCount(isolate, name) + 1, // add receiver
+ stack_parameter_count +
+ (mode == ContinuationFrameStateMode::EAGER ? 0 : 1));
+
+ Node* argc =
+ js_graph->Constant(stack_parameter_count -
+ (mode == ContinuationFrameStateMode::EAGER ? 1 : 0));
+
+ // Stack parameters first. They must be first because the receiver is expected
+ // to be the second value in the translation when creating stack crawls
+ // (e.g. Error.stack) of optimized JavaScript frames.
+ std::vector<Node*> actual_parameters;
+ for (int i = 0; i < stack_parameter_count; ++i) {
+ actual_parameters.push_back(stack_parameters[i]);
+ }
+
+ // Register parameters follow stack paraemters. The context will be added by
+ // instruction selector during FrameState translation.
+ actual_parameters.push_back(target);
+ actual_parameters.push_back(js_graph->UndefinedConstant());
+ actual_parameters.push_back(argc);
+
+ return CreateBuiltinContinuationFrameStateCommon(
+ js_graph, name, context, &actual_parameters[0],
+ static_cast<int>(actual_parameters.size()), outer_frame_state, function);
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/frame-states.h b/deps/v8/src/compiler/frame-states.h
index 0d0ec47f88..4e25fa026b 100644
--- a/deps/v8/src/compiler/frame-states.h
+++ b/deps/v8/src/compiler/frame-states.h
@@ -5,57 +5,43 @@
#ifndef V8_COMPILER_FRAME_STATES_H_
#define V8_COMPILER_FRAME_STATES_H_
+#include "src/builtins/builtins.h"
#include "src/handles.h"
+#include "src/objects/shared-function-info.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
-// Forward declarations.
-class SharedFunctionInfo;
-
namespace compiler {
+class JSGraph;
+class Node;
+
// Flag that describes how to combine the current environment with
// the output of a node to obtain a framestate for lazy bailout.
class OutputFrameStateCombine {
public:
- enum Kind {
- kPushOutput, // Push the output on the expression stack.
- kPokeAt // Poke at the given environment location,
- // counting from the top of the stack.
- };
+ static const size_t kInvalidIndex = SIZE_MAX;
static OutputFrameStateCombine Ignore() {
- return OutputFrameStateCombine(kPushOutput, 0);
- }
- static OutputFrameStateCombine Push(size_t count = 1) {
- return OutputFrameStateCombine(kPushOutput, count);
+ return OutputFrameStateCombine(kInvalidIndex);
}
static OutputFrameStateCombine PokeAt(size_t index) {
- return OutputFrameStateCombine(kPokeAt, index);
+ return OutputFrameStateCombine(index);
}
- Kind kind() const { return kind_; }
- size_t GetPushCount() const {
- DCHECK_EQ(kPushOutput, kind());
- return parameter_;
- }
size_t GetOffsetToPokeAt() const {
- DCHECK_EQ(kPokeAt, kind());
+ DCHECK_NE(parameter_, kInvalidIndex);
return parameter_;
}
- bool IsOutputIgnored() const {
- return kind_ == kPushOutput && parameter_ == 0;
- }
+ bool IsOutputIgnored() const { return parameter_ == kInvalidIndex; }
- size_t ConsumedOutputCount() const {
- return kind_ == kPushOutput ? GetPushCount() : 1;
- }
+ size_t ConsumedOutputCount() const { return IsOutputIgnored() ? 0 : 1; }
bool operator==(OutputFrameStateCombine const& other) const {
- return kind_ == other.kind_ && parameter_ == other.parameter_;
+ return parameter_ == other.parameter_;
}
bool operator!=(OutputFrameStateCombine const& other) const {
return !(*this == other);
@@ -66,23 +52,22 @@ class OutputFrameStateCombine {
OutputFrameStateCombine const&);
private:
- OutputFrameStateCombine(Kind kind, size_t parameter)
- : kind_(kind), parameter_(parameter) {}
+ explicit OutputFrameStateCombine(size_t parameter) : parameter_(parameter) {}
- Kind const kind_;
size_t const parameter_;
};
// The type of stack frame that a FrameState node represents.
enum class FrameStateType {
- kJavaScriptFunction, // Represents an unoptimized JavaScriptFrame.
kInterpretedFunction, // Represents an InterpretedFrame.
kArgumentsAdaptor, // Represents an ArgumentsAdaptorFrame.
- kTailCallerFunction, // Represents a frame removed by tail call elimination.
kConstructStub, // Represents a ConstructStubFrame.
kGetterStub, // Represents a GetterStubFrame.
- kSetterStub // Represents a SetterStubFrame.
+ kSetterStub, // Represents a SetterStubFrame.
+ kBuiltinContinuation, // Represents a continuation to a stub.
+ kJavaScriptBuiltinContinuation // Represents a continuation to a JavaScipt
+ // builtin.
};
class FrameStateFunctionInfo {
@@ -101,8 +86,8 @@ class FrameStateFunctionInfo {
FrameStateType type() const { return type_; }
static bool IsJSFunctionType(FrameStateType type) {
- return type == FrameStateType::kJavaScriptFunction ||
- type == FrameStateType::kInterpretedFunction;
+ return type == FrameStateType::kInterpretedFunction ||
+ type == FrameStateType::kJavaScriptBuiltinContinuation;
}
private:
@@ -122,7 +107,7 @@ class FrameStateInfo final {
info_(info) {}
FrameStateType type() const {
- return info_ == nullptr ? FrameStateType::kJavaScriptFunction
+ return info_ == nullptr ? FrameStateType::kInterpretedFunction
: info_->type();
}
BailoutId bailout_id() const { return bailout_id_; }
@@ -160,6 +145,21 @@ static const int kFrameStateFunctionInput = 4;
static const int kFrameStateOuterStateInput = 5;
static const int kFrameStateInputCount = kFrameStateOuterStateInput + 1;
+enum class ContinuationFrameStateMode { EAGER, LAZY };
+
+Node* CreateStubBuiltinContinuationFrameState(JSGraph* graph,
+ Builtins::Name name,
+ Node* context, Node** parameters,
+ int parameter_count,
+ Node* outer_frame_state,
+ ContinuationFrameStateMode mode);
+
+Node* CreateJavaScriptBuiltinContinuationFrameState(
+ JSGraph* graph, Handle<JSFunction> function, Builtins::Name name,
+ Node* target, Node* context, Node** stack_parameters,
+ int stack_parameter_count, Node* outer_frame_state,
+ ContinuationFrameStateMode mode);
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/gap-resolver.cc b/deps/v8/src/compiler/gap-resolver.cc
index be90a33a21..05131abeea 100644
--- a/deps/v8/src/compiler/gap-resolver.cc
+++ b/deps/v8/src/compiler/gap-resolver.cc
@@ -102,7 +102,7 @@ void GapResolver::Resolve(ParallelMove* moves) {
}
if (!kSimpleFPAliasing) {
- if (reps && !base::bits::IsPowerOfTwo32(reps)) {
+ if (reps && !base::bits::IsPowerOfTwo(reps)) {
// Start with the smallest FP moves, so we never encounter smaller moves
// in the middle of a cycle of larger moves.
if ((reps & kFloat32Bit) != 0) {
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index 12746c2b13..a91b83a035 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -224,7 +224,8 @@ void GraphAssembler::Reset(Node* effect, Node* control) {
Operator const* GraphAssembler::ToNumberOperator() {
if (!to_number_operator_.is_set()) {
- Callable callable = CodeFactory::ToNumber(jsgraph()->isolate());
+ Callable callable =
+ Builtins::CallableFor(jsgraph()->isolate(), Builtins::kToNumber);
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
jsgraph()->isolate(), graph()->zone(), callable.descriptor(), 0, flags,
diff --git a/deps/v8/src/compiler/graph-reducer.cc b/deps/v8/src/compiler/graph-reducer.cc
index cf4d9154e4..faf01e9d9e 100644
--- a/deps/v8/src/compiler/graph-reducer.cc
+++ b/deps/v8/src/compiler/graph-reducer.cc
@@ -89,11 +89,22 @@ Reduction GraphReducer::Reduce(Node* const node) {
// {replacement} == {node} represents an in-place reduction. Rerun
// all the other reducers for this node, as now there may be more
// opportunities for reduction.
+ if (FLAG_trace_turbo_reduction) {
+ OFStream os(stdout);
+ os << "- In-place update of " << *node << " by reducer "
+ << (*i)->reducer_name() << std::endl;
+ }
skip = i;
i = reducers_.begin();
continue;
} else {
// {node} was replaced by another node.
+ if (FLAG_trace_turbo_reduction) {
+ OFStream os(stdout);
+ os << "- Replacement of " << *node << " with "
+ << *(reduction.replacement()) << " by reducer "
+ << (*i)->reducer_name() << std::endl;
+ }
return reduction;
}
}
@@ -146,10 +157,6 @@ void GraphReducer::ReduceTop() {
// Check if the reduction is an in-place update of the {node}.
Node* const replacement = reduction.replacement();
if (replacement == node) {
- if (FLAG_trace_turbo_reduction) {
- OFStream os(stdout);
- os << "- In-place update of " << *replacement << std::endl;
- }
// In-place update of {node}, may need to recurse on an input.
Node::Inputs node_inputs = node->inputs();
for (int i = 0; i < node_inputs.count(); ++i) {
@@ -183,10 +190,6 @@ void GraphReducer::Replace(Node* node, Node* replacement) {
void GraphReducer::Replace(Node* node, Node* replacement, NodeId max_id) {
- if (FLAG_trace_turbo_reduction) {
- OFStream os(stdout);
- os << "- Replacing " << *node << " with " << *replacement << std::endl;
- }
if (node == graph()->start()) graph()->SetStart(replacement);
if (node == graph()->end()) graph()->SetEnd(replacement);
if (replacement->id() <= max_id) {
diff --git a/deps/v8/src/compiler/graph-reducer.h b/deps/v8/src/compiler/graph-reducer.h
index d271881872..517f71e955 100644
--- a/deps/v8/src/compiler/graph-reducer.h
+++ b/deps/v8/src/compiler/graph-reducer.h
@@ -46,6 +46,9 @@ class V8_EXPORT_PRIVATE Reducer {
public:
virtual ~Reducer() {}
+ // Only used for tracing, when using the --trace_turbo_reduction flag.
+ virtual const char* reducer_name() const = 0;
+
// Try to reduce a node if possible.
virtual Reduction Reduce(Node* node) = 0;
diff --git a/deps/v8/src/compiler/graph.h b/deps/v8/src/compiler/graph.h
index 60af4789bc..3c5c9c4de8 100644
--- a/deps/v8/src/compiler/graph.h
+++ b/deps/v8/src/compiler/graph.h
@@ -5,6 +5,8 @@
#ifndef V8_COMPILER_GRAPH_H_
#define V8_COMPILER_GRAPH_H_
+#include <array>
+
#include "src/base/compiler-specific.h"
#include "src/globals.h"
#include "src/zone/zone-containers.h"
@@ -62,58 +64,11 @@ class V8_EXPORT_PRIVATE Graph final : public NON_EXPORTED_BASE(ZoneObject) {
Node* NewNode(const Operator* op, int input_count, Node* const* inputs,
bool incomplete = false);
- // Factories for nodes with static input counts.
- Node* NewNode(const Operator* op) {
- return NewNode(op, 0, static_cast<Node* const*>(nullptr));
- }
- Node* NewNode(const Operator* op, Node* n1) { return NewNode(op, 1, &n1); }
- Node* NewNode(const Operator* op, Node* n1, Node* n2) {
- Node* nodes[] = {n1, n2};
- return NewNode(op, arraysize(nodes), nodes);
- }
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3) {
- Node* nodes[] = {n1, n2, n3};
- return NewNode(op, arraysize(nodes), nodes);
- }
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) {
- Node* nodes[] = {n1, n2, n3, n4};
- return NewNode(op, arraysize(nodes), nodes);
- }
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
- Node* n5) {
- Node* nodes[] = {n1, n2, n3, n4, n5};
- return NewNode(op, arraysize(nodes), nodes);
- }
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
- Node* n5, Node* n6) {
- Node* nodes[] = {n1, n2, n3, n4, n5, n6};
- return NewNode(op, arraysize(nodes), nodes);
- }
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
- Node* n5, Node* n6, Node* n7) {
- Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7};
- return NewNode(op, arraysize(nodes), nodes);
- }
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
- Node* n5, Node* n6, Node* n7, Node* n8) {
- Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8};
- return NewNode(op, arraysize(nodes), nodes);
- }
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
- Node* n5, Node* n6, Node* n7, Node* n8, Node* n9) {
- Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8, n9};
- return NewNode(op, arraysize(nodes), nodes);
- }
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
- Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10) {
- Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8, n9, n10};
- return NewNode(op, arraysize(nodes), nodes);
- }
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
- Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10,
- Node* n11) {
- Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8, n9, n10, n11};
- return NewNode(op, arraysize(nodes), nodes);
+ // Factory template for nodes with static input counts.
+ template <typename... Nodes>
+ Node* NewNode(const Operator* op, Nodes*... nodes) {
+ std::array<Node*, sizeof...(nodes)> nodes_arr{{nodes...}};
+ return NewNode(op, nodes_arr.size(), nodes_arr.data());
}
// Clone the {node}, and assign a new node id to the copy.
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
index dabdab3810..2c20ae9ddb 100644
--- a/deps/v8/src/compiler/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
@@ -18,8 +18,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-#define __ masm()->
-
+#define __ tasm()->
#define kScratchDoubleReg xmm0
@@ -74,11 +73,9 @@ class IA32OperandConverter : public InstructionOperandConverter {
case Constant::kInt32:
return Immediate(constant.ToInt32());
case Constant::kFloat32:
- return Immediate(
- isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
+ return Immediate::EmbeddedNumber(constant.ToFloat32());
case Constant::kFloat64:
- return Immediate(
- isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+ return Immediate::EmbeddedNumber(constant.ToFloat64().value());
case Constant::kExternalReference:
return Immediate(constant.ToExternalReference());
case Constant::kHeapObject:
@@ -89,7 +86,6 @@ class IA32OperandConverter : public InstructionOperandConverter {
return Immediate::CodeRelativeOffset(ToLabel(operand));
}
UNREACHABLE();
- return Immediate(-1);
}
static size_t NextOffset(size_t* offset) {
@@ -165,10 +161,8 @@ class IA32OperandConverter : public InstructionOperandConverter {
}
case kMode_None:
UNREACHABLE();
- return Operand(no_reg, 0);
}
UNREACHABLE();
- return Operand(no_reg, 0);
}
Operand MemoryOperand(size_t first_input = 0) {
@@ -226,18 +220,22 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
public:
OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
XMMRegister input)
- : OutOfLineCode(gen), result_(result), input_(input) {}
+ : OutOfLineCode(gen),
+ result_(result),
+ input_(input),
+ zone_(gen->zone()) {}
void Generate() final {
__ sub(esp, Immediate(kDoubleSize));
__ movsd(MemOperand(esp, 0), input_);
- __ SlowTruncateToI(result_, esp, 0);
+ __ SlowTruncateToIDelayed(zone_, result_, esp, 0);
__ add(esp, Immediate(kDoubleSize));
}
private:
Register const result_;
XMMRegister const input_;
+ Zone* zone_;
};
@@ -252,7 +250,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
value_(value),
scratch0_(scratch0),
scratch1_(scratch1),
- mode_(mode) {}
+ mode_(mode),
+ zone_(gen->zone()) {}
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -266,10 +265,10 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
: OMIT_REMEMBERED_SET;
SaveFPRegsMode const save_fp_mode =
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
- remembered_set_action, save_fp_mode);
__ lea(scratch1_, operand_);
- __ CallStub(&stub);
+ __ CallStubDelayed(
+ new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
+ remembered_set_action, save_fp_mode));
}
private:
@@ -279,6 +278,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch0_;
Register const scratch1_;
RecordWriteMode const mode_;
+ Zone* zone_;
};
} // namespace
@@ -729,35 +729,35 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
} \
} while (0)
-#define ASSEMBLE_IEEE754_BINOP(name) \
- do { \
- /* Pass two doubles as arguments on the stack. */ \
- __ PrepareCallCFunction(4, eax); \
- __ movsd(Operand(esp, 0 * kDoubleSize), i.InputDoubleRegister(0)); \
- __ movsd(Operand(esp, 1 * kDoubleSize), i.InputDoubleRegister(1)); \
- __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
- 4); \
- /* Return value is in st(0) on ia32. */ \
- /* Store it into the result register. */ \
- __ sub(esp, Immediate(kDoubleSize)); \
- __ fstp_d(Operand(esp, 0)); \
- __ movsd(i.OutputDoubleRegister(), Operand(esp, 0)); \
- __ add(esp, Immediate(kDoubleSize)); \
+#define ASSEMBLE_IEEE754_BINOP(name) \
+ do { \
+ /* Pass two doubles as arguments on the stack. */ \
+ __ PrepareCallCFunction(4, eax); \
+ __ movsd(Operand(esp, 0 * kDoubleSize), i.InputDoubleRegister(0)); \
+ __ movsd(Operand(esp, 1 * kDoubleSize), i.InputDoubleRegister(1)); \
+ __ CallCFunction( \
+ ExternalReference::ieee754_##name##_function(__ isolate()), 4); \
+ /* Return value is in st(0) on ia32. */ \
+ /* Store it into the result register. */ \
+ __ sub(esp, Immediate(kDoubleSize)); \
+ __ fstp_d(Operand(esp, 0)); \
+ __ movsd(i.OutputDoubleRegister(), Operand(esp, 0)); \
+ __ add(esp, Immediate(kDoubleSize)); \
} while (false)
-#define ASSEMBLE_IEEE754_UNOP(name) \
- do { \
- /* Pass one double as argument on the stack. */ \
- __ PrepareCallCFunction(2, eax); \
- __ movsd(Operand(esp, 0 * kDoubleSize), i.InputDoubleRegister(0)); \
- __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
- 2); \
- /* Return value is in st(0) on ia32. */ \
- /* Store it into the result register. */ \
- __ sub(esp, Immediate(kDoubleSize)); \
- __ fstp_d(Operand(esp, 0)); \
- __ movsd(i.OutputDoubleRegister(), Operand(esp, 0)); \
- __ add(esp, Immediate(kDoubleSize)); \
+#define ASSEMBLE_IEEE754_UNOP(name) \
+ do { \
+ /* Pass one double as argument on the stack. */ \
+ __ PrepareCallCFunction(2, eax); \
+ __ movsd(Operand(esp, 0 * kDoubleSize), i.InputDoubleRegister(0)); \
+ __ CallCFunction( \
+ ExternalReference::ieee754_##name##_function(__ isolate()), 2); \
+ /* Return value is in st(0) on ia32. */ \
+ /* Store it into the result register. */ \
+ __ sub(esp, Immediate(kDoubleSize)); \
+ __ fstp_d(Operand(esp, 0)); \
+ __ movsd(i.OutputDoubleRegister(), Operand(esp, 0)); \
+ __ add(esp, Immediate(kDoubleSize)); \
} while (false)
#define ASSEMBLE_BINOP(asm_instr) \
@@ -839,7 +839,7 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
namespace {
-void AdjustStackPointerForTailCall(MacroAssembler* masm,
+void AdjustStackPointerForTailCall(TurboAssembler* tasm,
FrameAccessState* state,
int new_slot_above_sp,
bool allow_shrinkage = true) {
@@ -847,10 +847,10 @@ void AdjustStackPointerForTailCall(MacroAssembler* masm,
StandardFrameConstants::kFixedSlotCountAboveFp;
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
- masm->sub(esp, Immediate(stack_slot_delta * kPointerSize));
+ tasm->sub(esp, Immediate(stack_slot_delta * kPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
- masm->add(esp, Immediate(-stack_slot_delta * kPointerSize));
+ tasm->add(esp, Immediate(-stack_slot_delta * kPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
}
}
@@ -871,7 +871,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
LocationOperand destination_location(
LocationOperand::cast(move->destination()));
InstructionOperand source(move->source());
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
destination_location.index());
if (source.IsStackSlot()) {
LocationOperand source_location(LocationOperand::cast(source));
@@ -889,13 +889,13 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
move->Eliminate();
}
}
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_stack_slot) {
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot);
}
@@ -909,7 +909,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchCallCodeObject: {
EnsureSpaceForLazyDeopt();
if (HasImmediateInput(instr, 0)) {
- Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
+ Handle<Code> code = i.InputCode(0);
__ call(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
@@ -927,7 +927,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
no_reg, no_reg, no_reg);
}
if (HasImmediateInput(instr, 0)) {
- Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
+ Handle<Code> code = i.InputCode(0);
__ jmp(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
@@ -1139,8 +1139,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ movaps(xmm1, xmm2);
__ movaps(xmm2, xmm0);
}
- MathPowStub stub(isolate(), MathPowStub::DOUBLE);
- __ CallStub(&stub);
+ __ CallStubDelayed(new (zone())
+ MathPowStub(nullptr, MathPowStub::DOUBLE));
__ movaps(i.OutputDoubleRegister(), xmm3);
break;
}
@@ -1375,7 +1375,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kSSEFloat32Round: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
RoundingMode const mode =
static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
__ roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
@@ -1553,7 +1553,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
case kSSEFloat64Round: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
RoundingMode const mode =
static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
__ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
@@ -1630,25 +1630,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ movd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
case kAVXFloat32Add: {
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vaddss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputOperand(1));
break;
}
case kAVXFloat32Sub: {
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vsubss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputOperand(1));
break;
}
case kAVXFloat32Mul: {
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vmulss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputOperand(1));
break;
}
case kAVXFloat32Div: {
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vdivss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputOperand(1));
// Don't delete this mov. It may improve performance on some CPUs,
@@ -1657,25 +1657,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXFloat64Add: {
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vaddsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputOperand(1));
break;
}
case kAVXFloat64Sub: {
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vsubsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputOperand(1));
break;
}
case kAVXFloat64Mul: {
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vmulsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputOperand(1));
break;
}
case kAVXFloat64Div: {
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vdivsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputOperand(1));
// Don't delete this mov. It may improve performance on some CPUs,
@@ -1687,7 +1687,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// TODO(bmeurer): Use RIP relative 128-bit constants.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psrlq(kScratchDoubleReg, 33);
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vandps(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
break;
}
@@ -1695,7 +1695,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// TODO(bmeurer): Use RIP relative 128-bit constants.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psllq(kScratchDoubleReg, 31);
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
break;
}
@@ -1703,7 +1703,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// TODO(bmeurer): Use RIP relative 128-bit constants.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psrlq(kScratchDoubleReg, 1);
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
break;
}
@@ -1711,7 +1711,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// TODO(bmeurer): Use RIP relative 128-bit constants.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psllq(kScratchDoubleReg, 63);
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
break;
}
@@ -1897,38 +1897,92 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kIA32I32x4Splat: {
XMMRegister dst = i.OutputSimd128Register();
- __ movd(dst, i.InputOperand(0));
- __ pshufd(dst, dst, 0x0);
+ __ Movd(dst, i.InputOperand(0));
+ __ Pshufd(dst, dst, 0x0);
break;
}
case kIA32I32x4ExtractLane: {
__ Pextrd(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1));
break;
}
- case kIA32I32x4ReplaceLane: {
- __ Pinsrd(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
+ case kSSEI32x4ReplaceLane: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ __ pinsrd(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
break;
}
- case kSSEI32x4Add: {
- __ paddd(i.OutputSimd128Register(), i.InputOperand(1));
+ case kAVXI32x4ReplaceLane: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpinsrd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(2), i.InputInt8(1));
break;
}
- case kSSEI32x4Sub: {
- __ psubd(i.OutputSimd128Register(), i.InputOperand(1));
+ case kSSEI32x4Add: {
+ __ paddd(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI32x4Add: {
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vpaddd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
+ case kSSEI32x4Sub: {
+ __ psubd(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
case kAVXI32x4Sub: {
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vpsubd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
+ case kIA32I16x8Splat: {
+ XMMRegister dst = i.OutputSimd128Register();
+ __ Movd(dst, i.InputOperand(0));
+ __ Pshuflw(dst, dst, 0x0);
+ __ Pshufd(dst, dst, 0x0);
+ break;
+ }
+ case kIA32I16x8ExtractLane: {
+ Register dst = i.OutputRegister();
+ __ Pextrw(dst, i.InputSimd128Register(0), i.InputInt8(1));
+ __ movsx_w(dst, dst);
+ break;
+ }
+ case kSSEI16x8ReplaceLane: {
+ __ pinsrw(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
+ break;
+ }
+ case kAVXI16x8ReplaceLane: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpinsrw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(2), i.InputInt8(1));
+ break;
+ }
+ case kIA32I8x16Splat: {
+ XMMRegister dst = i.OutputSimd128Register();
+ __ Movd(dst, i.InputOperand(0));
+ __ Pxor(kScratchDoubleReg, kScratchDoubleReg);
+ __ Pshufb(dst, kScratchDoubleReg);
+ break;
+ }
+ case kIA32I8x16ExtractLane: {
+ Register dst = i.OutputRegister();
+ __ Pextrb(dst, i.InputSimd128Register(0), i.InputInt8(1));
+ __ movsx_b(dst, dst);
+ break;
+ }
+ case kSSEI8x16ReplaceLane: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ __ pinsrb(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
+ break;
+ }
+ case kAVXI8x16ReplaceLane: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpinsrb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(2), i.InputInt8(1));
+ break;
+ }
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_b);
break;
@@ -1967,7 +2021,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kIA32StackCheck: {
ExternalReference const stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
+ ExternalReference::address_of_stack_limit(__ isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
break;
}
@@ -2115,7 +2169,6 @@ static Condition FlagsConditionToCondition(FlagsCondition condition) {
break;
default:
UNREACHABLE();
- return no_condition;
break;
}
}
@@ -2174,22 +2227,20 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
// We cannot test calls to the runtime in cctest/test-run-wasm.
// Therefore we emit a call to C here instead of a call to the runtime.
__ PrepareCallCFunction(0, esi);
- __ CallCFunction(
- ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
- 0);
+ __ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(
+ __ isolate()),
+ 0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
__ Ret();
} else {
gen_->AssembleSourcePosition(instr_);
- __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+ __ Call(__ isolate()->builtins()->builtin_handle(trap_id),
RelocInfo::CODE_TARGET);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
- if (FLAG_debug_code) {
- __ ud2();
- }
+ __ AssertUnreachable(kUnexpectedReturnFromWasmTrap);
}
}
@@ -2284,9 +2335,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
: Deoptimizer::EAGER;
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- isolate(), deoptimization_id, bailout_type);
+ __ isolate(), deoptimization_id, bailout_type);
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- if (isolate()->NeedsSourcePositionsForProfiling()) {
+ if (info()->is_source_positions_enabled()) {
__ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
}
__ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
@@ -2464,11 +2515,45 @@ void CodeGenerator::AssembleConstructFrame() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
}
const RegList saves = descriptor->CalleeSavedRegisters();
if (shrink_slots > 0) {
+ if (info()->IsWasm() && shrink_slots > 128) {
+ // For WebAssembly functions with big frames we have to do the stack
+ // overflow check before we construct the frame. Otherwise we may not
+ // have enough space on the stack to call the runtime for the stack
+ // overflow.
+ Label done;
+
+ // If the frame is bigger than the stack, we throw the stack overflow
+ // exception unconditionally. Thereby we can avoid the integer overflow
+ // check in the condition code.
+ if (shrink_slots * kPointerSize < FLAG_stack_size * 1024) {
+ Register scratch = esi;
+ __ push(scratch);
+ __ mov(scratch,
+ Immediate(ExternalReference::address_of_real_stack_limit(
+ __ isolate())));
+ __ mov(scratch, Operand(scratch, 0));
+ __ add(scratch, Immediate(shrink_slots * kPointerSize));
+ __ cmp(esp, scratch);
+ __ pop(scratch);
+ __ j(above_equal, &done);
+ }
+ if (!frame_access_state()->has_frame()) {
+ __ set_has_frame(true);
+ __ EnterFrame(StackFrame::WASM_COMPILED);
+ }
+ __ Move(esi, Smi::kZero);
+ __ CallRuntimeDelayed(zone(), Runtime::kThrowWasmStackOverflow);
+ ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
+ RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ __ AssertUnreachable(kUnexpectedReturnFromWasmTrap);
+ __ bind(&done);
+ }
__ sub(esp, Immediate(shrink_slots * kPointerSize));
}
@@ -2561,17 +2646,11 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
Handle<HeapObject> src = src_constant.ToHeapObject();
if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
- __ LoadHeapObject(dst, src);
+ __ Move(dst, src);
} else {
DCHECK(destination->IsStackSlot());
Operand dst = g.ToOperand(destination);
- AllowDeferredHandleDereference embedding_raw_address;
- if (isolate()->heap()->InNewSpace(*src)) {
- __ PushHeapObject(src);
- __ pop(dst);
- } else {
- __ mov(dst, src);
- }
+ __ mov(dst, src);
}
} else if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
@@ -2592,7 +2671,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
} else {
DCHECK_EQ(Constant::kFloat64, src_constant.type());
- uint64_t src = src_constant.ToFloat64AsInt();
+ uint64_t src = src_constant.ToFloat64().AsUint64();
uint32_t lower = static_cast<uint32_t>(src);
uint32_t upper = static_cast<uint32_t>(src >> 32);
if (destination->IsFPRegister()) {
@@ -2771,7 +2850,7 @@ void CodeGenerator::EnsureSpaceForLazyDeopt() {
int space_needed = Deoptimizer::patch_size();
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
- int current_pc = masm()->pc_offset();
+ int current_pc = tasm()->pc_offset();
if (current_pc < last_lazy_deopt_pc_ + space_needed) {
int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
__ Nop(padding_size);
diff --git a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
index 8bdfd0988d..67c141ebce 100644
--- a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
@@ -113,11 +113,20 @@ namespace compiler {
V(IA32StackCheck) \
V(IA32I32x4Splat) \
V(IA32I32x4ExtractLane) \
- V(IA32I32x4ReplaceLane) \
+ V(SSEI32x4ReplaceLane) \
+ V(AVXI32x4ReplaceLane) \
V(SSEI32x4Add) \
- V(SSEI32x4Sub) \
V(AVXI32x4Add) \
- V(AVXI32x4Sub)
+ V(SSEI32x4Sub) \
+ V(AVXI32x4Sub) \
+ V(IA32I16x8Splat) \
+ V(IA32I16x8ExtractLane) \
+ V(SSEI16x8ReplaceLane) \
+ V(AVXI16x8ReplaceLane) \
+ V(IA32I8x16Splat) \
+ V(IA32I8x16ExtractLane) \
+ V(SSEI8x16ReplaceLane) \
+ V(AVXI8x16ReplaceLane)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
index 68db94fcff..9286e0febc 100644
--- a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
@@ -99,11 +99,20 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32BitcastIF:
case kIA32I32x4Splat:
case kIA32I32x4ExtractLane:
- case kIA32I32x4ReplaceLane:
+ case kSSEI32x4ReplaceLane:
+ case kAVXI32x4ReplaceLane:
case kSSEI32x4Add:
- case kSSEI32x4Sub:
case kAVXI32x4Add:
+ case kSSEI32x4Sub:
case kAVXI32x4Sub:
+ case kIA32I16x8Splat:
+ case kIA32I16x8ExtractLane:
+ case kSSEI16x8ReplaceLane:
+ case kAVXI16x8ReplaceLane:
+ case kIA32I8x16Splat:
+ case kIA32I8x16ExtractLane:
+ case kSSEI8x16ReplaceLane:
+ case kAVXI8x16ReplaceLane:
return (instr->addressing_mode() == kMode_None)
? kNoOpcodeFlags
: kIsLoadOperation | kHasSideEffect;
@@ -111,8 +120,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Idiv:
case kIA32Udiv:
return (instr->addressing_mode() == kMode_None)
- ? kMayNeedDeoptCheck
- : kMayNeedDeoptCheck | kIsLoadOperation | kHasSideEffect;
+ ? kMayNeedDeoptOrTrapCheck
+ : kMayNeedDeoptOrTrapCheck | kIsLoadOperation | kHasSideEffect;
case kIA32Movsxbl:
case kIA32Movzxbl:
@@ -143,7 +152,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
}
UNREACHABLE();
- return kNoOpcodeFlags;
}
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
index dccfced9e1..caf7abcbfc 100644
--- a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -247,9 +247,6 @@ void InstructionSelector::VisitLoad(Node* node) {
break;
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -340,9 +337,6 @@ void InstructionSelector::VisitStore(Node* node) {
break;
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -410,9 +404,6 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -486,9 +477,6 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -900,9 +888,7 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
V(Float32Mul, kAVXFloat32Mul, kSSEFloat32Mul) \
V(Float64Mul, kAVXFloat64Mul, kSSEFloat64Mul) \
V(Float32Div, kAVXFloat32Div, kSSEFloat32Div) \
- V(Float64Div, kAVXFloat64Div, kSSEFloat64Div) \
- V(I32x4Add, kAVXI32x4Add, kSSEI32x4Add) \
- V(I32x4Sub, kAVXI32x4Sub, kSSEI32x4Sub)
+ V(Float64Div, kAVXFloat64Div, kSSEFloat64Div)
#define FLOAT_UNOP_LIST(V) \
V(Float32Abs, kAVXFloat32Abs, kSSEFloat32Abs) \
@@ -1542,6 +1528,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 4 + sw.value_range;
size_t table_time_cost = 3;
size_t lookup_space_cost = 3 + 2 * sw.case_count;
@@ -1549,7 +1536,8 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
if (sw.case_count > 4 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min()) {
+ sw.min_value > std::numeric_limits<int32_t>::min() &&
+ sw.value_range <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
if (sw.min_value) {
index_operand = g.TempRegister();
@@ -1905,24 +1893,55 @@ VISIT_ATOMIC_BINOP(Or)
VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
-void InstructionSelector::VisitI32x4Splat(Node* node) {
- VisitRO(this, node, kIA32I32x4Splat);
-}
-
-void InstructionSelector::VisitI32x4ExtractLane(Node* node) {
- IA32OperandGenerator g(this);
- int32_t lane = OpParameter<int32_t>(node);
- Emit(kIA32I32x4ExtractLane, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.UseImmediate(lane));
-}
-
-void InstructionSelector::VisitI32x4ReplaceLane(Node* node) {
- IA32OperandGenerator g(this);
- int32_t lane = OpParameter<int32_t>(node);
- Emit(kIA32I32x4ReplaceLane, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseImmediate(lane),
- g.Use(node->InputAt(1)));
-}
+#define SIMD_TYPES(V) \
+ V(I32x4) \
+ V(I16x8) \
+ V(I8x16)
+
+#define SIMD_BINOP_LIST(V) \
+ V(I32x4Add) \
+ V(I32x4Sub)
+
+#define VISIT_SIMD_SPLAT(Type) \
+ void InstructionSelector::Visit##Type##Splat(Node* node) { \
+ VisitRO(this, node, kIA32##Type##Splat); \
+ }
+SIMD_TYPES(VISIT_SIMD_SPLAT)
+#undef VISIT_SIMD_SPLAT
+
+#define VISIT_SIMD_EXTRACT_LANE(Type) \
+ void InstructionSelector::Visit##Type##ExtractLane(Node* node) { \
+ IA32OperandGenerator g(this); \
+ int32_t lane = OpParameter<int32_t>(node); \
+ Emit(kIA32##Type##ExtractLane, g.DefineAsRegister(node), \
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(lane)); \
+ }
+SIMD_TYPES(VISIT_SIMD_EXTRACT_LANE)
+#undef VISIT_SIMD_EXTRACT_LANE
+
+#define VISIT_SIMD_REPLACE_LANE(Type) \
+ void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
+ IA32OperandGenerator g(this); \
+ InstructionOperand operand0 = g.UseRegister(node->InputAt(0)); \
+ InstructionOperand operand1 = g.UseImmediate(OpParameter<int32_t>(node)); \
+ InstructionOperand operand2 = g.Use(node->InputAt(1)); \
+ if (IsSupported(AVX)) { \
+ Emit(kAVX##Type##ReplaceLane, g.DefineAsRegister(node), operand0, \
+ operand1, operand2); \
+ } else { \
+ Emit(kSSE##Type##ReplaceLane, g.DefineSameAsFirst(node), operand0, \
+ operand1, operand2); \
+ } \
+ }
+SIMD_TYPES(VISIT_SIMD_REPLACE_LANE)
+#undef VISIT_SIMD_REPLACE_LANE
+
+#define VISIT_SIMD_BINOP(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ VisitRROFloat(this, node, kAVX##Opcode, kSSE##Opcode); \
+ }
+SIMD_BINOP_LIST(VISIT_SIMD_BINOP)
+#undef VISIT_SIMD_BINOP
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
UNREACHABLE();
diff --git a/deps/v8/src/compiler/instruction-codes.h b/deps/v8/src/compiler/instruction-codes.h
index d4e0449ad9..df7a03163d 100644
--- a/deps/v8/src/compiler/instruction-codes.h
+++ b/deps/v8/src/compiler/instruction-codes.h
@@ -23,8 +23,6 @@
#include "src/compiler/ppc/instruction-codes-ppc.h"
#elif V8_TARGET_ARCH_S390
#include "src/compiler/s390/instruction-codes-s390.h"
-#elif V8_TARGET_ARCH_X87
-#include "src/compiler/x87/instruction-codes-x87.h"
#else
#define TARGET_ARCH_OPCODE_LIST(V)
#define TARGET_ADDRESSING_MODE_LIST(V)
diff --git a/deps/v8/src/compiler/instruction-scheduler.cc b/deps/v8/src/compiler/instruction-scheduler.cc
index cb3c2d66c6..e311abb2a2 100644
--- a/deps/v8/src/compiler/instruction-scheduler.cc
+++ b/deps/v8/src/compiler/instruction-scheduler.cc
@@ -77,7 +77,6 @@ void InstructionScheduler::ScheduleGraphNode::AddSuccessor(
node->unscheduled_predecessors_count_++;
}
-
InstructionScheduler::InstructionScheduler(Zone* zone,
InstructionSequence* sequence)
: zone_(zone),
@@ -86,16 +85,15 @@ InstructionScheduler::InstructionScheduler(Zone* zone,
last_side_effect_instr_(nullptr),
pending_loads_(zone),
last_live_in_reg_marker_(nullptr),
- last_deopt_(nullptr),
+ last_deopt_or_trap_(nullptr),
operands_map_(zone) {}
-
void InstructionScheduler::StartBlock(RpoNumber rpo) {
DCHECK(graph_.empty());
DCHECK(last_side_effect_instr_ == nullptr);
DCHECK(pending_loads_.empty());
DCHECK(last_live_in_reg_marker_ == nullptr);
- DCHECK(last_deopt_ == nullptr);
+ DCHECK(last_deopt_or_trap_ == nullptr);
DCHECK(operands_map_.empty());
sequence()->StartBlock(rpo);
}
@@ -112,7 +110,7 @@ void InstructionScheduler::EndBlock(RpoNumber rpo) {
last_side_effect_instr_ = nullptr;
pending_loads_.clear();
last_live_in_reg_marker_ = nullptr;
- last_deopt_ = nullptr;
+ last_deopt_or_trap_ = nullptr;
operands_map_.clear();
}
@@ -137,9 +135,9 @@ void InstructionScheduler::AddInstruction(Instruction* instr) {
}
// Make sure that instructions are not scheduled before the last
- // deoptimization point when they depend on it.
- if ((last_deopt_ != nullptr) && DependsOnDeoptimization(instr)) {
- last_deopt_->AddSuccessor(new_node);
+ // deoptimization or trap point when they depend on it.
+ if ((last_deopt_or_trap_ != nullptr) && DependsOnDeoptOrTrap(instr)) {
+ last_deopt_or_trap_->AddSuccessor(new_node);
}
// Instructions with side effects and memory operations can't be
@@ -160,13 +158,13 @@ void InstructionScheduler::AddInstruction(Instruction* instr) {
last_side_effect_instr_->AddSuccessor(new_node);
}
pending_loads_.push_back(new_node);
- } else if (instr->IsDeoptimizeCall()) {
- // Ensure that deopts are not reordered with respect to side-effect
- // instructions.
+ } else if (instr->IsDeoptimizeCall() || instr->IsTrap()) {
+ // Ensure that deopts or traps are not reordered with respect to
+ // side-effect instructions.
if (last_side_effect_instr_ != nullptr) {
last_side_effect_instr_->AddSuccessor(new_node);
}
- last_deopt_ = new_node;
+ last_deopt_or_trap_ = new_node;
}
// Look for operand dependencies.
@@ -244,7 +242,6 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kArchParentFramePointer:
case kArchTruncateDoubleToI:
case kArchStackSlot:
- case kArchDebugBreak:
case kArchComment:
case kIeee754Float64Acos:
case kIeee754Float64Acosh:
@@ -292,6 +289,7 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kArchLookupSwitch:
case kArchTableSwitch:
case kArchRet:
+ case kArchDebugBreak:
case kArchThrowTerminator:
return kIsBlockTerminator;
@@ -370,7 +368,6 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
}
UNREACHABLE();
- return kNoOpcodeFlags;
}
diff --git a/deps/v8/src/compiler/instruction-scheduler.h b/deps/v8/src/compiler/instruction-scheduler.h
index 7660520b6d..db2894a92a 100644
--- a/deps/v8/src/compiler/instruction-scheduler.h
+++ b/deps/v8/src/compiler/instruction-scheduler.h
@@ -21,10 +21,11 @@ enum ArchOpcodeFlags {
kHasSideEffect = 2, // The instruction has some side effects (memory
// store, function call...)
kIsLoadOperation = 4, // The instruction is a memory load.
- kMayNeedDeoptCheck = 8, // The instruction might be associated with a deopt
- // check. This is the case of instruction which can
- // blow up with particular inputs (e.g.: division by
- // zero on Intel platforms).
+ kMayNeedDeoptOrTrapCheck = 8, // The instruction may be associated with a
+ // deopt or trap check which must be run before
+ // instruction e.g. div on Intel platform which
+ // will raise an exception when the divisor is
+ // zero.
};
class InstructionScheduler final : public ZoneObject {
@@ -166,17 +167,22 @@ class InstructionScheduler final : public ZoneObject {
return (GetInstructionFlags(instr) & kIsLoadOperation) != 0;
}
- // Return true if this instruction is usually associated with a deopt check
- // to validate its input.
- bool MayNeedDeoptCheck(const Instruction* instr) const {
- return (GetInstructionFlags(instr) & kMayNeedDeoptCheck) != 0;
+ // The scheduler will not move the following instructions before the last
+ // deopt/trap check:
+ // * loads (this is conservative)
+ // * instructions with side effect
+ // * other deopts/traps
+ // Any other instruction can be moved, apart from those that raise exceptions
+ // on specific inputs - these are filtered out by the deopt/trap check.
+ bool MayNeedDeoptOrTrapCheck(const Instruction* instr) const {
+ return (GetInstructionFlags(instr) & kMayNeedDeoptOrTrapCheck) != 0;
}
- // Return true if the instruction cannot be moved before the last deopt
- // point we encountered.
- bool DependsOnDeoptimization(const Instruction* instr) const {
- return MayNeedDeoptCheck(instr) || instr->IsDeoptimizeCall() ||
- HasSideEffect(instr) || IsLoadOperation(instr);
+ // Return true if the instruction cannot be moved before the last deopt or
+ // trap point we encountered.
+ bool DependsOnDeoptOrTrap(const Instruction* instr) const {
+ return MayNeedDeoptOrTrapCheck(instr) || instr->IsDeoptimizeCall() ||
+ instr->IsTrap() || HasSideEffect(instr) || IsLoadOperation(instr);
}
// Identify nops used as a definition point for live-in registers at
@@ -217,8 +223,9 @@ class InstructionScheduler final : public ZoneObject {
// other instructions in the basic block.
ScheduleGraphNode* last_live_in_reg_marker_;
- // Last deoptimization instruction encountered while building the graph.
- ScheduleGraphNode* last_deopt_;
+ // Last deoptimization or trap instruction encountered while building the
+ // graph.
+ ScheduleGraphNode* last_deopt_or_trap_;
// Keep track of definition points for virtual registers. This is used to
// record operand dependencies in the scheduling graph.
diff --git a/deps/v8/src/compiler/instruction-selector-impl.h b/deps/v8/src/compiler/instruction-selector-impl.h
index ecda453351..8334d1751a 100644
--- a/deps/v8/src/compiler/instruction-selector-impl.h
+++ b/deps/v8/src/compiler/instruction-selector-impl.h
@@ -255,7 +255,6 @@ class OperandGenerator {
break;
}
UNREACHABLE();
- return Constant(static_cast<int32_t>(0));
}
static Constant ToNegatedConstant(const Node* node) {
@@ -268,7 +267,6 @@ class OperandGenerator {
break;
}
UNREACHABLE();
- return Constant(static_cast<int32_t>(0));
}
UnallocatedOperand Define(Node* node, UnallocatedOperand operand) {
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc
index 1d07799511..813372881e 100644
--- a/deps/v8/src/compiler/instruction-selector.cc
+++ b/deps/v8/src/compiler/instruction-selector.cc
@@ -302,10 +302,11 @@ int InstructionSelector::GetRename(int virtual_register) {
void InstructionSelector::TryRename(InstructionOperand* op) {
if (!op->IsUnallocated()) return;
- int vreg = UnallocatedOperand::cast(op)->virtual_register();
+ UnallocatedOperand* unalloc = UnallocatedOperand::cast(op);
+ int vreg = unalloc->virtual_register();
int rename = GetRename(vreg);
if (rename != vreg) {
- UnallocatedOperand::cast(op)->set_virtual_register(rename);
+ *unalloc = UnallocatedOperand(*unalloc, rename);
}
}
@@ -471,7 +472,6 @@ InstructionOperand OperandForDeopt(Isolate* isolate, OperandGenerator* g,
}
}
UNREACHABLE();
- return InstructionOperand();
}
} // namespace
@@ -526,7 +526,6 @@ size_t InstructionSelector::AddOperandToStateValueDescriptor(
}
case IrOpcode::kObjectState: {
UNREACHABLE();
- return 0;
}
case IrOpcode::kTypedObjectState: {
size_t id = deduplicator->GetObjectId(input);
@@ -598,8 +597,7 @@ size_t InstructionSelector::AddInputsToFrameStateDescriptor(
StateValueList* values_descriptor = descriptor->GetStateValueDescriptors();
DCHECK_EQ(values_descriptor->size(), 0u);
- values_descriptor->ReserveSize(
- descriptor->GetSize(OutputFrameStateCombine::Ignore()));
+ values_descriptor->ReserveSize(descriptor->GetSize());
entries += AddOperandToStateValueDescriptor(
values_descriptor, inputs, g, deduplicator, function,
@@ -767,10 +765,8 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
buffer->frame_state_descriptor =
buffer->frame_state_descriptor->outer_state();
while (buffer->frame_state_descriptor != nullptr &&
- (buffer->frame_state_descriptor->type() ==
- FrameStateType::kArgumentsAdaptor ||
- buffer->frame_state_descriptor->type() ==
- FrameStateType::kTailCallerFunction)) {
+ buffer->frame_state_descriptor->type() ==
+ FrameStateType::kArgumentsAdaptor) {
frame_state = NodeProperties::GetFrameStateInput(frame_state);
buffer->frame_state_descriptor =
buffer->frame_state_descriptor->outer_state();
@@ -982,8 +978,7 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
}
DCHECK_LE(sw.min_value, sw.max_value);
// Note that {value_range} can be 0 if {min_value} is -2^31 and
- // {max_value}
- // is 2^31-1, so don't assume that it's non-zero below.
+ // {max_value} is 2^31-1, so don't assume that it's non-zero below.
sw.value_range = 1u + bit_cast<uint32_t>(sw.max_value) -
bit_cast<uint32_t>(sw.min_value);
return VisitSwitch(input, sw);
@@ -1525,13 +1520,13 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kF32x4Max:
return MarkAsSimd128(node), VisitF32x4Max(node);
case IrOpcode::kF32x4Eq:
- return MarkAsSimd1x4(node), VisitF32x4Eq(node);
+ return MarkAsSimd128(node), VisitF32x4Eq(node);
case IrOpcode::kF32x4Ne:
- return MarkAsSimd1x4(node), VisitF32x4Ne(node);
+ return MarkAsSimd128(node), VisitF32x4Ne(node);
case IrOpcode::kF32x4Lt:
- return MarkAsSimd1x4(node), VisitF32x4Lt(node);
+ return MarkAsSimd128(node), VisitF32x4Lt(node);
case IrOpcode::kF32x4Le:
- return MarkAsSimd1x4(node), VisitF32x4Le(node);
+ return MarkAsSimd128(node), VisitF32x4Le(node);
case IrOpcode::kI32x4Splat:
return MarkAsSimd128(node), VisitI32x4Splat(node);
case IrOpcode::kI32x4ExtractLane:
@@ -1563,13 +1558,13 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kI32x4MaxS:
return MarkAsSimd128(node), VisitI32x4MaxS(node);
case IrOpcode::kI32x4Eq:
- return MarkAsSimd1x4(node), VisitI32x4Eq(node);
+ return MarkAsSimd128(node), VisitI32x4Eq(node);
case IrOpcode::kI32x4Ne:
- return MarkAsSimd1x4(node), VisitI32x4Ne(node);
- case IrOpcode::kI32x4LtS:
- return MarkAsSimd1x4(node), VisitI32x4LtS(node);
- case IrOpcode::kI32x4LeS:
- return MarkAsSimd1x4(node), VisitI32x4LeS(node);
+ return MarkAsSimd128(node), VisitI32x4Ne(node);
+ case IrOpcode::kI32x4GtS:
+ return MarkAsSimd128(node), VisitI32x4GtS(node);
+ case IrOpcode::kI32x4GeS:
+ return MarkAsSimd128(node), VisitI32x4GeS(node);
case IrOpcode::kI32x4UConvertF32x4:
return MarkAsSimd128(node), VisitI32x4UConvertF32x4(node);
case IrOpcode::kI32x4UConvertI16x8Low:
@@ -1582,10 +1577,10 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI32x4MinU(node);
case IrOpcode::kI32x4MaxU:
return MarkAsSimd128(node), VisitI32x4MaxU(node);
- case IrOpcode::kI32x4LtU:
- return MarkAsSimd1x4(node), VisitI32x4LtU(node);
- case IrOpcode::kI32x4LeU:
- return MarkAsSimd1x4(node), VisitI32x4LeU(node);
+ case IrOpcode::kI32x4GtU:
+ return MarkAsSimd128(node), VisitI32x4GtU(node);
+ case IrOpcode::kI32x4GeU:
+ return MarkAsSimd128(node), VisitI32x4GeU(node);
case IrOpcode::kI16x8Splat:
return MarkAsSimd128(node), VisitI16x8Splat(node);
case IrOpcode::kI16x8ExtractLane:
@@ -1621,13 +1616,13 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kI16x8MaxS:
return MarkAsSimd128(node), VisitI16x8MaxS(node);
case IrOpcode::kI16x8Eq:
- return MarkAsSimd1x8(node), VisitI16x8Eq(node);
+ return MarkAsSimd128(node), VisitI16x8Eq(node);
case IrOpcode::kI16x8Ne:
- return MarkAsSimd1x8(node), VisitI16x8Ne(node);
- case IrOpcode::kI16x8LtS:
- return MarkAsSimd1x8(node), VisitI16x8LtS(node);
- case IrOpcode::kI16x8LeS:
- return MarkAsSimd1x8(node), VisitI16x8LeS(node);
+ return MarkAsSimd128(node), VisitI16x8Ne(node);
+ case IrOpcode::kI16x8GtS:
+ return MarkAsSimd128(node), VisitI16x8GtS(node);
+ case IrOpcode::kI16x8GeS:
+ return MarkAsSimd128(node), VisitI16x8GeS(node);
case IrOpcode::kI16x8UConvertI8x16Low:
return MarkAsSimd128(node), VisitI16x8UConvertI8x16Low(node);
case IrOpcode::kI16x8UConvertI8x16High:
@@ -1644,10 +1639,10 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI16x8MinU(node);
case IrOpcode::kI16x8MaxU:
return MarkAsSimd128(node), VisitI16x8MaxU(node);
- case IrOpcode::kI16x8LtU:
- return MarkAsSimd1x8(node), VisitI16x8LtU(node);
- case IrOpcode::kI16x8LeU:
- return MarkAsSimd1x8(node), VisitI16x8LeU(node);
+ case IrOpcode::kI16x8GtU:
+ return MarkAsSimd128(node), VisitI16x8GtU(node);
+ case IrOpcode::kI16x8GeU:
+ return MarkAsSimd128(node), VisitI16x8GeU(node);
case IrOpcode::kI8x16Splat:
return MarkAsSimd128(node), VisitI8x16Splat(node);
case IrOpcode::kI8x16ExtractLane:
@@ -1677,13 +1672,13 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kI8x16MaxS:
return MarkAsSimd128(node), VisitI8x16MaxS(node);
case IrOpcode::kI8x16Eq:
- return MarkAsSimd1x16(node), VisitI8x16Eq(node);
+ return MarkAsSimd128(node), VisitI8x16Eq(node);
case IrOpcode::kI8x16Ne:
- return MarkAsSimd1x16(node), VisitI8x16Ne(node);
- case IrOpcode::kI8x16LtS:
- return MarkAsSimd1x16(node), VisitI8x16LtS(node);
- case IrOpcode::kI8x16LeS:
- return MarkAsSimd1x16(node), VisitI8x16LeS(node);
+ return MarkAsSimd128(node), VisitI8x16Ne(node);
+ case IrOpcode::kI8x16GtS:
+ return MarkAsSimd128(node), VisitI8x16GtS(node);
+ case IrOpcode::kI8x16GeS:
+ return MarkAsSimd128(node), VisitI8x16GeS(node);
case IrOpcode::kI8x16ShrU:
return MarkAsSimd128(node), VisitI8x16ShrU(node);
case IrOpcode::kI8x16UConvertI16x8:
@@ -1696,10 +1691,10 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI8x16MinU(node);
case IrOpcode::kI8x16MaxU:
return MarkAsSimd128(node), VisitI8x16MaxU(node);
- case IrOpcode::kI8x16LtU:
- return MarkAsSimd1x16(node), VisitI8x16LtU(node);
- case IrOpcode::kI8x16LeU:
- return MarkAsSimd1x16(node), VisitI16x8LeU(node);
+ case IrOpcode::kI8x16GtU:
+ return MarkAsSimd128(node), VisitI8x16GtU(node);
+ case IrOpcode::kI8x16GeU:
+ return MarkAsSimd128(node), VisitI16x8GeU(node);
case IrOpcode::kS128Zero:
return MarkAsSimd128(node), VisitS128Zero(node);
case IrOpcode::kS128And:
@@ -1710,56 +1705,18 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitS128Xor(node);
case IrOpcode::kS128Not:
return MarkAsSimd128(node), VisitS128Not(node);
- case IrOpcode::kS32x4Shuffle:
- return MarkAsSimd128(node), VisitS32x4Shuffle(node);
- case IrOpcode::kS32x4Select:
- return MarkAsSimd128(node), VisitS32x4Select(node);
- case IrOpcode::kS16x8Shuffle:
- return MarkAsSimd128(node), VisitS16x8Shuffle(node);
- case IrOpcode::kS16x8Select:
- return MarkAsSimd128(node), VisitS16x8Select(node);
+ case IrOpcode::kS128Select:
+ return MarkAsSimd128(node), VisitS128Select(node);
case IrOpcode::kS8x16Shuffle:
return MarkAsSimd128(node), VisitS8x16Shuffle(node);
- case IrOpcode::kS8x16Select:
- return MarkAsSimd128(node), VisitS8x16Select(node);
- case IrOpcode::kS1x4Zero:
- return MarkAsSimd1x4(node), VisitS1x4Zero(node);
- case IrOpcode::kS1x4And:
- return MarkAsSimd1x4(node), VisitS1x4And(node);
- case IrOpcode::kS1x4Or:
- return MarkAsSimd1x4(node), VisitS1x4Or(node);
- case IrOpcode::kS1x4Xor:
- return MarkAsSimd1x4(node), VisitS1x4Xor(node);
- case IrOpcode::kS1x4Not:
- return MarkAsSimd1x4(node), VisitS1x4Not(node);
case IrOpcode::kS1x4AnyTrue:
return MarkAsWord32(node), VisitS1x4AnyTrue(node);
case IrOpcode::kS1x4AllTrue:
return MarkAsWord32(node), VisitS1x4AllTrue(node);
- case IrOpcode::kS1x8Zero:
- return MarkAsSimd1x8(node), VisitS1x8Zero(node);
- case IrOpcode::kS1x8And:
- return MarkAsSimd1x8(node), VisitS1x8And(node);
- case IrOpcode::kS1x8Or:
- return MarkAsSimd1x8(node), VisitS1x8Or(node);
- case IrOpcode::kS1x8Xor:
- return MarkAsSimd1x8(node), VisitS1x8Xor(node);
- case IrOpcode::kS1x8Not:
- return MarkAsSimd1x8(node), VisitS1x8Not(node);
case IrOpcode::kS1x8AnyTrue:
return MarkAsWord32(node), VisitS1x8AnyTrue(node);
case IrOpcode::kS1x8AllTrue:
return MarkAsWord32(node), VisitS1x8AllTrue(node);
- case IrOpcode::kS1x16Zero:
- return MarkAsSimd1x16(node), VisitS1x16Zero(node);
- case IrOpcode::kS1x16And:
- return MarkAsSimd1x16(node), VisitS1x16And(node);
- case IrOpcode::kS1x16Or:
- return MarkAsSimd1x16(node), VisitS1x16Or(node);
- case IrOpcode::kS1x16Xor:
- return MarkAsSimd1x16(node), VisitS1x16Xor(node);
- case IrOpcode::kS1x16Not:
- return MarkAsSimd1x16(node), VisitS1x16Not(node);
case IrOpcode::kS1x16AnyTrue:
return MarkAsWord32(node), VisitS1x16AnyTrue(node);
case IrOpcode::kS1x16AllTrue:
@@ -1874,6 +1831,7 @@ void InstructionSelector::EmitTableSwitch(const SwitchInfo& sw,
InstructionOperand& index_operand) {
OperandGenerator g(this);
size_t input_count = 2 + sw.value_range;
+ DCHECK_LE(sw.value_range, std::numeric_limits<size_t>::max() - 2);
auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
inputs[0] = index_operand;
InstructionOperand default_operand = g.Label(sw.default_branch);
@@ -1893,6 +1851,7 @@ void InstructionSelector::EmitLookupSwitch(const SwitchInfo& sw,
InstructionOperand& value_operand) {
OperandGenerator g(this);
size_t input_count = 2 + sw.case_count * 2;
+ DCHECK_LE(sw.case_count, (std::numeric_limits<size_t>::max() - 2) / 2);
auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
inputs[0] = value_operand;
inputs[1] = g.Label(sw.default_branch);
@@ -2084,7 +2043,8 @@ void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
#endif // V8_TARGET_ARCH_64_BIT
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitF32x4Splat(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4ExtractLane(Node* node) { UNIMPLEMENTED(); }
@@ -2108,13 +2068,9 @@ void InstructionSelector::VisitF32x4RecipSqrtApprox(Node* node) {
}
void InstructionSelector::VisitF32x4Add(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitF32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitF32x4Sub(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Mul(Node* node) { UNIMPLEMENTED(); }
@@ -2132,10 +2088,11 @@ void InstructionSelector::VisitF32x4Ne(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Lt(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Le(Node* node) { UNIMPLEMENTED(); }
-#endif // V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_IA32 && \
- !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
+ !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI32x4Splat(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI32x4ExtractLane(Node* node) { UNIMPLEMENTED(); }
@@ -2145,11 +2102,12 @@ void InstructionSelector::VisitI32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI32x4Add(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI32x4Sub(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_IA32 &&
- // !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
+ // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
+ !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI32x4Shl(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI32x4ShrS(Node* node) { UNIMPLEMENTED(); }
@@ -2169,14 +2127,17 @@ void InstructionSelector::VisitI32x4MinU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI32x4MaxU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI32x4ShrU(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS &&
- // !V8_TARGET_ARCH_MIPS64
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
+ // && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_X64
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
+ !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_X64
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
+ // && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
UNIMPLEMENTED();
}
@@ -2184,9 +2145,11 @@ void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
UNIMPLEMENTED();
}
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI32x4SConvertI16x8Low(Node* node) {
UNIMPLEMENTED();
}
@@ -2214,28 +2177,36 @@ void InstructionSelector::VisitI16x8SConvertI8x16High(Node* node) {
void InstructionSelector::VisitI16x8SConvertI32x4(Node* node) {
UNIMPLEMENTED();
}
-#endif // !V8_TARGET_ARCH_ARM
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
+ !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI32x4Neg(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI32x4LtS(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI32x4GtS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI32x4LeS(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI32x4GeS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI32x4LtU(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI32x4GtU(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI32x4LeU(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+void InstructionSelector::VisitI32x4GeU(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
+ // && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
+ !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI16x8Splat(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8ExtractLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
+ // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS &&
+ // !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
+ !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI16x8Shl(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8ShrS(Node* node) { UNIMPLEMENTED(); }
@@ -2253,15 +2224,17 @@ void InstructionSelector::VisitI16x8Sub(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8SubSaturateS(Node* node) {
UNIMPLEMENTED();
}
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS &&
- // !V8_TARGET_ARCH_MIPS64
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
+ // && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
+ !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI16x8AddHoriz(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
+ // && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
+ !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI16x8Mul(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8MinS(Node* node) { UNIMPLEMENTED(); }
@@ -2283,14 +2256,13 @@ void InstructionSelector::VisitI16x8SubSaturateU(Node* node) {
void InstructionSelector::VisitI16x8MinU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8MaxU(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS &&
- // !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI16x8Neg(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
+ // && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI16x8UConvertI32x4(Node* node) {
UNIMPLEMENTED();
}
@@ -2302,49 +2274,58 @@ void InstructionSelector::VisitI16x8UConvertI8x16Low(Node* node) {
void InstructionSelector::VisitI16x8UConvertI8x16High(Node* node) {
UNIMPLEMENTED();
}
-#endif // !V8_TARGET_ARCH_ARM
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitI16x8LtS(Node* node) { UNIMPLEMENTED(); }
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
+ !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+void InstructionSelector::VisitI16x8GtS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI16x8LeS(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI16x8GeS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI16x8LtU(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI16x8GtU(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI16x8LeU(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI16x8GeU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16Neg(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
+ // && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI8x16Shl(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16ShrS(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
+ !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI8x16Splat(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16ExtractLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS &&
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
+ // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS &&
// !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI8x16SConvertI16x8(Node* node) {
UNIMPLEMENTED();
}
-#endif // !V8_TARGET_ARCH_ARM
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
+ !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI8x16Add(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16AddSaturateS(Node* node) {
UNIMPLEMENTED();
}
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitI8x16Sub(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16SubSaturateS(Node* node) {
@@ -2358,23 +2339,31 @@ void InstructionSelector::VisitI8x16MaxS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16Eq(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16Ne(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
-#if !V8_TARGET_ARCH_ARM
-void InstructionSelector::VisitI8x16Mul(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI8x16GtS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI8x16LtS(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI8x16GeS(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
+ // && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitI8x16LeS(Node* node) { UNIMPLEMENTED(); }
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
+void InstructionSelector::VisitI8x16Mul(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16ShrU(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
UNIMPLEMENTED();
}
-#endif // !V8_TARGET_ARCH_ARM
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
+ !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI8x16AddSaturateU(Node* node) {
UNIMPLEMENTED();
}
@@ -2386,15 +2375,11 @@ void InstructionSelector::VisitI8x16SubSaturateU(Node* node) {
void InstructionSelector::VisitI8x16MinU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16MaxU(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
-#if !V8_TARGET_ARCH_ARM
-void InstructionSelector::VisitI8x16LtU(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI8x16GtU(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI8x16LeU(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM
+void InstructionSelector::VisitI8x16GeU(Node* node) { UNIMPLEMENTED(); }
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitS128And(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS128Or(Node* node) { UNIMPLEMENTED(); }
@@ -2402,87 +2387,34 @@ void InstructionSelector::VisitS128Or(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS128Xor(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS128Not(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitS128Zero(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitS1x4Zero(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS1x8Zero(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS1x16Zero(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS &&
- // !V8_TARGET_ARCH_MIPS64
-
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitS32x4Select(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS &&
- // !V8_TARGET_ARCH_MIPS64
-
-#if !V8_TARGET_ARCH_ARM
-void InstructionSelector::VisitS32x4Shuffle(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS16x8Shuffle(Node* node) { UNIMPLEMENTED(); }
-
-#endif // !V8_TARGET_ARCH_ARM
+void InstructionSelector::VisitS128Select(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
+ // && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitS16x8Select(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS &&
- // !V8_TARGET_ARCH_MIPS64
-
-#if !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitS8x16Shuffle(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64
-#endif // !V8_TARGET_ARCH_ARM
-
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitS8x16Select(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS &&
- // !V8_TARGET_ARCH_MIPS64
-
-#if !V8_TARGET_ARCH_ARM
-void InstructionSelector::VisitS1x4And(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS1x4Or(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS1x4Xor(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS1x4Not(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitS1x4AnyTrue(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS1x4AllTrue(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitS1x8And(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS1x8Or(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS1x8Xor(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS1x8Not(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitS1x8AnyTrue(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS1x8AllTrue(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitS1x16And(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS1x16Or(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS1x16Xor(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS1x16Not(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitS1x16AnyTrue(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS1x16AllTrue(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); }
diff --git a/deps/v8/src/compiler/instruction-selector.h b/deps/v8/src/compiler/instruction-selector.h
index 26cc85a81f..512b6d1775 100644
--- a/deps/v8/src/compiler/instruction-selector.h
+++ b/deps/v8/src/compiler/instruction-selector.h
@@ -263,27 +263,6 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void MarkAsSimd128(Node* node) {
MarkAsRepresentation(MachineRepresentation::kSimd128, node);
}
- void MarkAsSimd1x4(Node* node) {
- if (kSimdMaskRegisters) {
- MarkAsRepresentation(MachineRepresentation::kSimd1x4, node);
- } else {
- MarkAsSimd128(node);
- }
- }
- void MarkAsSimd1x8(Node* node) {
- if (kSimdMaskRegisters) {
- MarkAsRepresentation(MachineRepresentation::kSimd1x8, node);
- } else {
- MarkAsSimd128(node);
- }
- }
- void MarkAsSimd1x16(Node* node) {
- if (kSimdMaskRegisters) {
- MarkAsRepresentation(MachineRepresentation::kSimd1x16, node);
- } else {
- MarkAsSimd128(node);
- }
- }
void MarkAsReference(Node* node) {
MarkAsRepresentation(MachineRepresentation::kTagged, node);
}
diff --git a/deps/v8/src/compiler/instruction.cc b/deps/v8/src/compiler/instruction.cc
index 1067d2030a..8096c5b048 100644
--- a/deps/v8/src/compiler/instruction.cc
+++ b/deps/v8/src/compiler/instruction.cc
@@ -64,7 +64,6 @@ FlagsCondition CommuteFlagsCondition(FlagsCondition condition) {
return condition;
}
UNREACHABLE();
- return condition;
}
bool InstructionOperand::InterferesWith(const InstructionOperand& other) const {
@@ -210,15 +209,6 @@ std::ostream& operator<<(std::ostream& os,
case MachineRepresentation::kSimd128:
os << "|s128";
break;
- case MachineRepresentation::kSimd1x4:
- os << "|s1x4";
- break;
- case MachineRepresentation::kSimd1x8:
- os << "|s1x8";
- break;
- case MachineRepresentation::kSimd1x16:
- os << "|s1x16";
- break;
case MachineRepresentation::kTaggedSigned:
os << "|ts";
break;
@@ -235,7 +225,6 @@ std::ostream& operator<<(std::ostream& os,
return os << "(x)";
}
UNREACHABLE();
- return os;
}
void MoveOperands::Print(const RegisterConfiguration* config) const {
@@ -415,7 +404,6 @@ std::ostream& operator<<(std::ostream& os, const ArchOpcode& ao) {
#undef CASE
}
UNREACHABLE();
- return os;
}
@@ -430,7 +418,6 @@ std::ostream& operator<<(std::ostream& os, const AddressingMode& am) {
#undef CASE
}
UNREACHABLE();
- return os;
}
@@ -448,7 +435,6 @@ std::ostream& operator<<(std::ostream& os, const FlagsMode& fm) {
return os << "trap";
}
UNREACHABLE();
- return os;
}
@@ -504,7 +490,6 @@ std::ostream& operator<<(std::ostream& os, const FlagsCondition& fc) {
return os << "negative";
}
UNREACHABLE();
- return os;
}
@@ -576,6 +561,12 @@ Handle<HeapObject> Constant::ToHeapObject() const {
return value;
}
+Handle<Code> Constant::ToCode() const {
+ DCHECK_EQ(kHeapObject, type());
+ Handle<Code> value(bit_cast<Code**>(static_cast<intptr_t>(value_)));
+ return value;
+}
+
std::ostream& operator<<(std::ostream& os, const Constant& constant) {
switch (constant.type()) {
case Constant::kInt32:
@@ -585,7 +576,7 @@ std::ostream& operator<<(std::ostream& os, const Constant& constant) {
case Constant::kFloat32:
return os << constant.ToFloat32() << "f";
case Constant::kFloat64:
- return os << constant.ToFloat64();
+ return os << constant.ToFloat64().value();
case Constant::kExternalReference:
return os << static_cast<const void*>(
constant.ToExternalReference().address());
@@ -595,7 +586,6 @@ std::ostream& operator<<(std::ostream& os, const Constant& constant) {
return os << "RPO" << constant.ToRpoNumber().ToInt();
}
UNREACHABLE();
- return os;
}
@@ -896,21 +886,17 @@ static MachineRepresentation FilterRepresentation(MachineRepresentation rep) {
return InstructionSequence::DefaultRepresentation();
case MachineRepresentation::kWord32:
case MachineRepresentation::kWord64:
- case MachineRepresentation::kFloat32:
- case MachineRepresentation::kFloat64:
- case MachineRepresentation::kSimd128:
- case MachineRepresentation::kSimd1x4:
- case MachineRepresentation::kSimd1x8:
- case MachineRepresentation::kSimd1x16:
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
+ case MachineRepresentation::kFloat32:
+ case MachineRepresentation::kFloat64:
+ case MachineRepresentation::kSimd128:
return rep;
case MachineRepresentation::kNone:
break;
}
UNREACHABLE();
- return MachineRepresentation::kNone;
}
@@ -1033,18 +1019,9 @@ FrameStateDescriptor::FrameStateDescriptor(
shared_info_(shared_info),
outer_state_(outer_state) {}
-
-size_t FrameStateDescriptor::GetSize(OutputFrameStateCombine combine) const {
- size_t size = 1 + parameters_count() + locals_count() + stack_count() +
- (HasContext() ? 1 : 0);
- switch (combine.kind()) {
- case OutputFrameStateCombine::kPushOutput:
- size += combine.GetPushCount();
- break;
- case OutputFrameStateCombine::kPokeAt:
- break;
- }
- return size;
+size_t FrameStateDescriptor::GetSize() const {
+ return 1 + parameters_count() + locals_count() + stack_count() +
+ (HasContext() ? 1 : 0);
}
diff --git a/deps/v8/src/compiler/instruction.h b/deps/v8/src/compiler/instruction.h
index 5cb28627de..668a5c0efd 100644
--- a/deps/v8/src/compiler/instruction.h
+++ b/deps/v8/src/compiler/instruction.h
@@ -15,6 +15,7 @@
#include "src/compiler/frame.h"
#include "src/compiler/instruction-codes.h"
#include "src/compiler/opcodes.h"
+#include "src/double.h"
#include "src/globals.h"
#include "src/macro-assembler.h"
#include "src/register-configuration.h"
@@ -34,8 +35,6 @@ class V8_EXPORT_PRIVATE InstructionOperand {
public:
static const int kInvalidVirtualRegister = -1;
- // TODO(dcarney): recover bit. INVALID can be represented as UNALLOCATED with
- // kInvalidVirtualRegister and some DCHECKS.
enum Kind {
INVALID,
UNALLOCATED,
@@ -167,7 +166,7 @@ std::ostream& operator<<(std::ostream& os,
return *static_cast<const OperandType*>(&op); \
}
-class UnallocatedOperand : public InstructionOperand {
+class UnallocatedOperand final : public InstructionOperand {
public:
enum BasicPolicy { FIXED_SLOT, EXTENDED_POLICY };
@@ -183,15 +182,14 @@ class UnallocatedOperand : public InstructionOperand {
// Lifetime of operand inside the instruction.
enum Lifetime {
- // USED_AT_START operand is guaranteed to be live only at
- // instruction start. Register allocator is free to assign the same register
- // to some other operand used inside instruction (i.e. temporary or
- // output).
+ // USED_AT_START operand is guaranteed to be live only at instruction start.
+ // The register allocator is free to assign the same register to some other
+ // operand used inside instruction (i.e. temporary or output).
USED_AT_START,
- // USED_AT_END operand is treated as live until the end of
- // instruction. This means that register allocator will not reuse it's
- // register for any other operand inside instruction.
+ // USED_AT_END operand is treated as live until the end of instruction.
+ // This means that register allocator will not reuse its register for any
+ // other operand inside instruction.
USED_AT_END
};
@@ -233,6 +231,12 @@ class UnallocatedOperand : public InstructionOperand {
value_ |= SecondaryStorageField::encode(slot_id);
}
+ UnallocatedOperand(const UnallocatedOperand& other, int virtual_register) {
+ DCHECK_NE(kInvalidVirtualRegister, virtual_register);
+ value_ = VirtualRegisterField::update(
+ other.value_, static_cast<uint32_t>(virtual_register));
+ }
+
// Predicates for the operand policy.
bool HasAnyPolicy() const {
return basic_policy() == EXTENDED_POLICY && extended_policy() == ANY;
@@ -275,7 +279,6 @@ class UnallocatedOperand : public InstructionOperand {
// [basic_policy]: Distinguish between FIXED_SLOT and all other policies.
BasicPolicy basic_policy() const {
- DCHECK_EQ(UNALLOCATED, kind());
return BasicPolicyField::decode(value_);
}
@@ -300,16 +303,9 @@ class UnallocatedOperand : public InstructionOperand {
// [virtual_register]: The virtual register ID for this operand.
int32_t virtual_register() const {
- DCHECK_EQ(UNALLOCATED, kind());
return static_cast<int32_t>(VirtualRegisterField::decode(value_));
}
- // TODO(dcarney): remove this.
- void set_virtual_register(int32_t id) {
- DCHECK_EQ(UNALLOCATED, kind());
- value_ = VirtualRegisterField::update(value_, static_cast<uint32_t>(id));
- }
-
// [lifetime]: Only for non-FIXED_SLOT.
bool IsUsedAtStart() const {
DCHECK(basic_policy() == EXTENDED_POLICY);
@@ -484,9 +480,6 @@ class LocationOperand : public InstructionOperand {
case MachineRepresentation::kFloat32:
case MachineRepresentation::kFloat64:
case MachineRepresentation::kSimd128:
- case MachineRepresentation::kSimd1x4:
- case MachineRepresentation::kSimd1x8:
- case MachineRepresentation::kSimd1x16:
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
@@ -498,7 +491,6 @@ class LocationOperand : public InstructionOperand {
return false;
}
UNREACHABLE();
- return false;
}
static LocationOperand* cast(InstructionOperand* op) {
@@ -596,9 +588,8 @@ bool InstructionOperand::IsDoubleRegister() const {
}
bool InstructionOperand::IsSimd128Register() const {
- return IsAnyRegister() &&
- LocationOperand::cast(this)->representation() ==
- MachineRepresentation::kSimd128;
+ return IsAnyRegister() && LocationOperand::cast(this)->representation() ==
+ MachineRepresentation::kSimd128;
}
bool InstructionOperand::IsAnyStackSlot() const {
@@ -903,6 +894,10 @@ class V8_EXPORT_PRIVATE Instruction final {
FlagsModeField::decode(opcode()) == kFlags_deoptimize;
}
+ bool IsTrap() const {
+ return FlagsModeField::decode(opcode()) == kFlags_trap;
+ }
+
bool IsJump() const { return arch_opcode() == ArchOpcode::kArchJmp; }
bool IsRet() const { return arch_opcode() == ArchOpcode::kArchRet; }
bool IsTailCall() const {
@@ -1080,19 +1075,9 @@ class V8_EXPORT_PRIVATE Constant final {
return bit_cast<uint32_t>(static_cast<int32_t>(value_));
}
- double ToFloat64() const {
- // TODO(ahaas): We should remove this function. If value_ has the bit
- // representation of a signalling NaN, then returning it as float can cause
- // the signalling bit to flip, and value_ is returned as a quiet NaN.
- if (type() == kInt32) return ToInt32();
- DCHECK_EQ(kFloat64, type());
- return bit_cast<double>(value_);
- }
-
- uint64_t ToFloat64AsInt() const {
- if (type() == kInt32) return ToInt32();
+ Double ToFloat64() const {
DCHECK_EQ(kFloat64, type());
- return bit_cast<uint64_t>(value_);
+ return Double(bit_cast<uint64_t>(value_));
}
ExternalReference ToExternalReference() const {
@@ -1106,6 +1091,7 @@ class V8_EXPORT_PRIVATE Constant final {
}
Handle<HeapObject> ToHeapObject() const;
+ Handle<Code> ToCode() const;
private:
Type type_;
@@ -1302,11 +1288,11 @@ class FrameStateDescriptor : public ZoneObject {
MaybeHandle<SharedFunctionInfo> shared_info() const { return shared_info_; }
FrameStateDescriptor* outer_state() const { return outer_state_; }
bool HasContext() const {
- return FrameStateFunctionInfo::IsJSFunctionType(type_);
+ return FrameStateFunctionInfo::IsJSFunctionType(type_) ||
+ type_ == FrameStateType::kBuiltinContinuation;
}
- size_t GetSize(OutputFrameStateCombine combine =
- OutputFrameStateCombine::Ignore()) const;
+ size_t GetSize() const;
size_t GetTotalSize() const;
size_t GetFrameCount() const;
size_t GetJSFrameCount() const;
@@ -1599,7 +1585,6 @@ class V8_EXPORT_PRIVATE InstructionSequence final
}
}
UNREACHABLE();
- return Constant(static_cast<int32_t>(0));
}
int AddDeoptimizationEntry(FrameStateDescriptor* descriptor,
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index 82c91cc0eb..19db874ca6 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -12,6 +12,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
+#include "src/compiler/wasm-compiler.h"
#include "src/objects-inl.h"
#include "src/wasm/wasm-module.h"
#include "src/zone/zone.h"
@@ -289,15 +290,15 @@ void Int64Lowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kCall: {
- // TODO(turbofan): Make WASM code const-correct wrt. CallDescriptor.
+ // TODO(turbofan): Make wasm code const-correct wrt. CallDescriptor.
CallDescriptor* descriptor =
const_cast<CallDescriptor*>(CallDescriptorOf(node->op()));
if (DefaultLowering(node) ||
(descriptor->ReturnCount() == 1 &&
descriptor->GetReturnType(0) == MachineType::Int64())) {
// We have to adjust the call descriptor.
- const Operator* op = common()->Call(
- wasm::ModuleEnv::GetI32WasmCallDescriptor(zone(), descriptor));
+ const Operator* op =
+ common()->Call(GetI32WasmCallDescriptor(zone(), descriptor));
NodeProperties::ChangeOp(node, op);
}
if (descriptor->ReturnCount() == 1 &&
diff --git a/deps/v8/src/compiler/js-builtin-reducer.cc b/deps/v8/src/compiler/js-builtin-reducer.cc
index 9ca0c63eb9..0955ff5ec9 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.cc
+++ b/deps/v8/src/compiler/js-builtin-reducer.cc
@@ -165,7 +165,7 @@ bool CanInlineJSArrayIteration(Handle<Map> receiver_map) {
// If the receiver map has packed elements, no need to check the prototype.
// This requires a MapCheck where this is used.
- if (!IsFastHoleyElementsKind(receiver_map->elements_kind())) return true;
+ if (!IsHoleyElementsKind(receiver_map->elements_kind())) return true;
Handle<JSArray> receiver_prototype(JSArray::cast(receiver_map->prototype()),
isolate);
@@ -254,7 +254,7 @@ Reduction JSBuiltinReducer::ReduceArrayIterator(Handle<Map> receiver_map,
// on the prototype chain.
map_index += static_cast<int>(receiver_map->elements_kind());
object_map = jsgraph()->Constant(receiver_map);
- if (IsFastHoleyElementsKind(receiver_map->elements_kind())) {
+ if (IsHoleyElementsKind(receiver_map->elements_kind())) {
Handle<JSObject> initial_array_prototype(
native_context()->initial_array_prototype(), isolate());
dependencies()->AssumePrototypeMapsStable(receiver_map,
@@ -344,7 +344,7 @@ Reduction JSBuiltinReducer::ReduceFastArrayIteratorNext(
ElementsKind elements_kind = JSArrayIterator::ElementsKindForInstanceType(
iterator_map->instance_type());
- if (IsFastHoleyElementsKind(elements_kind)) {
+ if (IsHoleyElementsKind(elements_kind)) {
if (!isolate()->IsFastArrayConstructorPrototypeChainIntact()) {
return NoChange();
} else {
@@ -416,12 +416,12 @@ Reduction JSBuiltinReducer::ReduceFastArrayIteratorNext(
elements, index, etrue1, if_true1);
// Convert hole to undefined if needed.
- if (elements_kind == FAST_HOLEY_ELEMENTS ||
- elements_kind == FAST_HOLEY_SMI_ELEMENTS) {
+ if (elements_kind == HOLEY_ELEMENTS ||
+ elements_kind == HOLEY_SMI_ELEMENTS) {
value = graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(),
value);
- } else if (elements_kind == FAST_HOLEY_DOUBLE_ELEMENTS) {
- // TODO(bmeurer): avoid deopt if not all uses of value are truncated.
+ } else if (elements_kind == HOLEY_DOUBLE_ELEMENTS) {
+ // TODO(6587): avoid deopt if not all uses of value are truncated.
CheckFloat64HoleMode mode = CheckFloat64HoleMode::kAllowReturnHole;
value = etrue1 = graph()->NewNode(
simplified()->CheckFloat64Hole(mode), value, etrue1, if_true1);
@@ -847,7 +847,7 @@ Reduction JSBuiltinReducer::ReduceArrayPop(Node* node) {
// once we got the hole NaN mess sorted out in TurboFan/V8.
if (GetMapWitness(node).ToHandle(&receiver_map) &&
CanInlineArrayResizeOperation(receiver_map) &&
- receiver_map->elements_kind() != FAST_HOLEY_DOUBLE_ELEMENTS) {
+ receiver_map->elements_kind() != HOLEY_DOUBLE_ELEMENTS) {
// Install code dependencies on the {receiver} prototype maps and the
// global array protector cell.
dependencies()->AssumePropertyCell(factory()->array_protector());
@@ -882,7 +882,7 @@ Reduction JSBuiltinReducer::ReduceArrayPop(Node* node) {
receiver, efalse, if_false);
// Ensure that we aren't popping from a copy-on-write backing store.
- if (IsFastSmiOrObjectElementsKind(receiver_map->elements_kind())) {
+ if (IsSmiOrObjectElementsKind(receiver_map->elements_kind())) {
elements = efalse =
graph()->NewNode(simplified()->EnsureWritableFastElements(),
receiver, elements, efalse, if_false);
@@ -919,7 +919,7 @@ Reduction JSBuiltinReducer::ReduceArrayPop(Node* node) {
// Convert the hole to undefined. Do this last, so that we can optimize
// conversion operator via some smart strength reduction in many cases.
- if (IsFastHoleyElementsKind(receiver_map->elements_kind())) {
+ if (IsHoleyElementsKind(receiver_map->elements_kind())) {
value =
graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value);
}
@@ -976,10 +976,10 @@ Reduction JSBuiltinReducer::ReduceArrayPush(Node* node) {
// currently don't have a proper way to deal with this; the proper solution
// here is to learn on deopt, i.e. disable Array.prototype.push inlining
// for this function.
- if (IsFastSmiElementsKind(receiver_map->elements_kind())) {
+ if (IsSmiElementsKind(receiver_map->elements_kind())) {
value = effect =
graph()->NewNode(simplified()->CheckSmi(), value, effect, control);
- } else if (IsFastDoubleElementsKind(receiver_map->elements_kind())) {
+ } else if (IsDoubleElementsKind(receiver_map->elements_kind())) {
value = effect =
graph()->NewNode(simplified()->CheckNumber(), value, effect, control);
// Make sure we do not store signaling NaNs into double arrays.
@@ -1002,7 +1002,7 @@ Reduction JSBuiltinReducer::ReduceArrayPush(Node* node) {
// don't necessarily learn from it. See the comment on the value type check
// above.
GrowFastElementsFlags flags = GrowFastElementsFlag::kArrayObject;
- if (IsFastDoubleElementsKind(receiver_map->elements_kind())) {
+ if (IsDoubleElementsKind(receiver_map->elements_kind())) {
flags |= GrowFastElementsFlag::kDoubleElements;
}
elements = effect =
@@ -1039,7 +1039,7 @@ Reduction JSBuiltinReducer::ReduceArrayShift(Node* node) {
Handle<Map> receiver_map;
if (GetMapWitness(node).ToHandle(&receiver_map) &&
CanInlineArrayResizeOperation(receiver_map) &&
- receiver_map->elements_kind() != FAST_HOLEY_DOUBLE_ELEMENTS) {
+ receiver_map->elements_kind() != HOLEY_DOUBLE_ELEMENTS) {
// Install code dependencies on the {receiver} prototype maps and the
// global array protector cell.
dependencies()->AssumePropertyCell(factory()->array_protector());
@@ -1087,7 +1087,7 @@ Reduction JSBuiltinReducer::ReduceArrayShift(Node* node) {
elements, jsgraph()->ZeroConstant(), etrue1, if_true1);
// Ensure that we aren't shifting a copy-on-write backing store.
- if (IsFastSmiOrObjectElementsKind(receiver_map->elements_kind())) {
+ if (IsSmiOrObjectElementsKind(receiver_map->elements_kind())) {
elements = etrue1 =
graph()->NewNode(simplified()->EnsureWritableFastElements(),
receiver, elements, etrue1, if_true1);
@@ -1187,7 +1187,7 @@ Reduction JSBuiltinReducer::ReduceArrayShift(Node* node) {
// Convert the hole to undefined. Do this last, so that we can optimize
// conversion operator via some smart strength reduction in many cases.
- if (IsFastHoleyElementsKind(receiver_map->elements_kind())) {
+ if (IsHoleyElementsKind(receiver_map->elements_kind())) {
value =
graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value);
}
@@ -1202,33 +1202,365 @@ namespace {
bool HasInstanceTypeWitness(Node* receiver, Node* effect,
InstanceType instance_type) {
- for (Node* dominator = effect;;) {
- if (dominator->opcode() == IrOpcode::kCheckMaps &&
- NodeProperties::IsSame(dominator->InputAt(0), receiver)) {
- ZoneHandleSet<Map> const& maps =
- CheckMapsParametersOf(dominator->op()).maps();
- // Check if all maps have the given {instance_type}.
- for (size_t i = 0; i < maps.size(); ++i) {
- if (maps[i]->instance_type() != instance_type) return false;
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ switch (result) {
+ case NodeProperties::kUnreliableReceiverMaps:
+ case NodeProperties::kReliableReceiverMaps:
+ DCHECK_NE(0, receiver_maps.size());
+ for (size_t i = 0; i < receiver_maps.size(); ++i) {
+ if (receiver_maps[i]->instance_type() != instance_type) return false;
}
return true;
- }
- // The instance type doesn't change for JSReceiver values, so we
- // don't need to pay attention to potentially side-effecting nodes
- // here. Strings and internal structures like FixedArray and
- // FixedDoubleArray are weird here, but we don't use this function then.
- DCHECK_LE(FIRST_JS_RECEIVER_TYPE, instance_type);
- DCHECK_EQ(1, dominator->op()->EffectOutputCount());
- if (dominator->op()->EffectInputCount() != 1) {
- // Didn't find any appropriate CheckMaps node.
+
+ case NodeProperties::kNoReceiverMaps:
return false;
- }
- dominator = NodeProperties::GetEffectInput(dominator);
}
+ UNREACHABLE();
}
} // namespace
+Reduction JSBuiltinReducer::ReduceCollectionIterator(
+ Node* node, InstanceType collection_instance_type,
+ int collection_iterator_map_index) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ if (HasInstanceTypeWitness(receiver, effect, collection_instance_type)) {
+ // Figure out the proper collection iterator map.
+ Handle<Map> collection_iterator_map(
+ Map::cast(native_context()->get(collection_iterator_map_index)),
+ isolate());
+
+ // Load the OrderedHashTable from the {receiver}.
+ Node* table = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSCollectionTable()),
+ receiver, effect, control);
+
+ // Create the JSCollectionIterator result.
+ effect = graph()->NewNode(
+ common()->BeginRegion(RegionObservability::kNotObservable), effect);
+ Node* value = effect = graph()->NewNode(
+ simplified()->Allocate(Type::OtherObject(), NOT_TENURED),
+ jsgraph()->Constant(JSCollectionIterator::kSize), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForMap()), value,
+ jsgraph()->Constant(collection_iterator_map), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSObjectProperties()), value,
+ jsgraph()->EmptyFixedArrayConstant(), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSObjectElements()), value,
+ jsgraph()->EmptyFixedArrayConstant(), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSCollectionIteratorTable()),
+ value, table, effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSCollectionIteratorIndex()),
+ value, jsgraph()->ZeroConstant(), effect, control);
+ value = effect = graph()->NewNode(common()->FinishRegion(), value, effect);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+Reduction JSBuiltinReducer::ReduceCollectionSize(
+ Node* node, InstanceType collection_instance_type) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ if (HasInstanceTypeWitness(receiver, effect, collection_instance_type)) {
+ Node* table = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSCollectionTable()),
+ receiver, effect, control);
+ Node* value = effect = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForOrderedHashTableBaseNumberOfElements()),
+ table, effect, control);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+Reduction JSBuiltinReducer::ReduceCollectionIteratorNext(
+ Node* node, int entry_size,
+ InstanceType collection_iterator_instance_type_first,
+ InstanceType collection_iterator_instance_type_last) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // A word of warning to begin with: This whole method might look a bit
+ // strange at times, but that's mostly because it was carefully handcrafted
+ // to allow for full escape analysis and scalar replacement of both the
+ // collection iterator object and the iterator results, including the
+ // key-value arrays in case of Set/Map entry iteration.
+ //
+ // TODO(turbofan): Currently the escape analysis (and the store-load
+ // forwarding) is unable to eliminate the allocations for the key-value
+ // arrays in case of Set/Map entry iteration, and we should investigate
+ // how to update the escape analysis / arrange the graph in a way that
+ // this becomes possible.
+
+ // Infer the {receiver} instance type.
+ InstanceType receiver_instance_type;
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+ DCHECK_NE(0, receiver_maps.size());
+ receiver_instance_type = receiver_maps[0]->instance_type();
+ for (size_t i = 1; i < receiver_maps.size(); ++i) {
+ if (receiver_maps[i]->instance_type() != receiver_instance_type) {
+ return NoChange();
+ }
+ }
+ if (receiver_instance_type < collection_iterator_instance_type_first ||
+ receiver_instance_type > collection_iterator_instance_type_last) {
+ return NoChange();
+ }
+
+ // Transition the JSCollectionIterator {receiver} if necessary
+ // (i.e. there were certain mutations while we're iterating).
+ {
+ Node* done_loop;
+ Node* done_eloop;
+ Node* loop = control =
+ graph()->NewNode(common()->Loop(2), control, control);
+ Node* eloop = effect =
+ graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
+
+ // Check if reached the final table of the {receiver}.
+ Node* table = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSCollectionIteratorTable()),
+ receiver, effect, control);
+ Node* next_table = effect =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForOrderedHashTableBaseNextTable()),
+ table, effect, control);
+ Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), next_table);
+ control =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ // Abort the {loop} when we reach the final table.
+ done_loop = graph()->NewNode(common()->IfTrue(), control);
+ done_eloop = effect;
+
+ // Migrate to the {next_table} otherwise.
+ control = graph()->NewNode(common()->IfFalse(), control);
+
+ // Self-heal the {receiver}s index.
+ Node* index = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSCollectionIteratorIndex()),
+ receiver, effect, control);
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kOrderedHashTableHealIndex);
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNoFlags, Operator::kEliminatable);
+ index = effect = graph()->NewNode(
+ common()->Call(desc), jsgraph()->HeapConstant(callable.code()), table,
+ index, jsgraph()->NoContextConstant(), effect);
+
+ // Update the {index} and {table} on the {receiver}.
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSCollectionIteratorIndex()),
+ receiver, index, effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSCollectionIteratorTable()),
+ receiver, next_table, effect, control);
+
+ // Tie the knot.
+ loop->ReplaceInput(1, control);
+ eloop->ReplaceInput(1, effect);
+
+ control = done_loop;
+ effect = done_eloop;
+ }
+
+ // Get current index and table from the JSCollectionIterator {receiver}.
+ Node* index = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSCollectionIteratorIndex()),
+ receiver, effect, control);
+ Node* table = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSCollectionIteratorTable()),
+ receiver, effect, control);
+
+ // Create the {JSIteratorResult} first to ensure that we always have
+ // a dominating Allocate node for the allocation folding phase.
+ Node* iterator_result = effect = graph()->NewNode(
+ javascript()->CreateIterResultObject(), jsgraph()->UndefinedConstant(),
+ jsgraph()->TrueConstant(), context, effect);
+
+ // Look for the next non-holey key, starting from {index} in the {table}.
+ Node* controls[2];
+ Node* effects[3];
+ {
+ // Compute the currently used capacity.
+ Node* number_of_buckets = effect = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForOrderedHashTableBaseNumberOfBuckets()),
+ table, effect, control);
+ Node* number_of_elements = effect = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForOrderedHashTableBaseNumberOfElements()),
+ table, effect, control);
+ Node* number_of_deleted_elements = effect = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForOrderedHashTableBaseNumberOfDeletedElements()),
+ table, effect, control);
+ Node* used_capacity =
+ graph()->NewNode(simplified()->NumberAdd(), number_of_elements,
+ number_of_deleted_elements);
+
+ // Skip holes and update the {index}.
+ Node* loop = graph()->NewNode(common()->Loop(2), control, control);
+ Node* eloop =
+ graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
+ Node* iloop = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), index, index, loop);
+ NodeProperties::SetType(iloop, type_cache_.kFixedArrayLengthType);
+ {
+ Node* check0 = graph()->NewNode(simplified()->NumberLessThan(), iloop,
+ used_capacity);
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, loop);
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* efalse0 = eloop;
+ {
+ // Mark the {receiver} as exhausted.
+ efalse0 = graph()->NewNode(
+ simplified()->StoreField(
+ AccessBuilder::ForJSCollectionIteratorTable()),
+ receiver,
+ jsgraph()->HeapConstant(factory()->empty_ordered_hash_table()),
+ efalse0, if_false0);
+
+ controls[0] = if_false0;
+ effects[0] = efalse0;
+ }
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0 = eloop;
+ {
+ // Load the key of the entry.
+ Node* entry_start_position = graph()->NewNode(
+ simplified()->NumberAdd(),
+ graph()->NewNode(
+ simplified()->NumberAdd(),
+ graph()->NewNode(simplified()->NumberMultiply(), iloop,
+ jsgraph()->Constant(entry_size)),
+ number_of_buckets),
+ jsgraph()->Constant(OrderedHashTableBase::kHashTableStartIndex));
+ Node* entry_key = etrue0 = graph()->NewNode(
+ simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()),
+ table, entry_start_position, etrue0, if_true0);
+
+ // Advance the index.
+ Node* index = graph()->NewNode(simplified()->NumberAdd(), iloop,
+ jsgraph()->OneConstant());
+
+ Node* check1 =
+ graph()->NewNode(simplified()->ReferenceEqual(), entry_key,
+ jsgraph()->TheHoleConstant());
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check1, if_true0);
+
+ {
+ // Abort loop with resulting value.
+ Node* control = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* effect = etrue0;
+ Node* value = graph()->NewNode(
+ common()->TypeGuard(Type::NonInternal()), entry_key, control);
+ Node* done = jsgraph()->FalseConstant();
+
+ // Advance the index on the {receiver}.
+ effect = graph()->NewNode(
+ simplified()->StoreField(
+ AccessBuilder::ForJSCollectionIteratorIndex()),
+ receiver, index, effect, control);
+
+ // The actual {value} depends on the {receiver} iteration type.
+ switch (receiver_instance_type) {
+ case JS_MAP_KEY_ITERATOR_TYPE:
+ case JS_SET_VALUE_ITERATOR_TYPE:
+ break;
+
+ case JS_SET_KEY_VALUE_ITERATOR_TYPE:
+ value = effect =
+ graph()->NewNode(javascript()->CreateKeyValueArray(), value,
+ value, context, effect);
+ break;
+
+ case JS_MAP_VALUE_ITERATOR_TYPE:
+ value = effect = graph()->NewNode(
+ simplified()->LoadElement(
+ AccessBuilder::ForFixedArrayElement()),
+ table,
+ graph()->NewNode(
+ simplified()->NumberAdd(), entry_start_position,
+ jsgraph()->Constant(OrderedHashMap::kValueOffset)),
+ effect, control);
+ break;
+
+ case JS_MAP_KEY_VALUE_ITERATOR_TYPE:
+ value = effect = graph()->NewNode(
+ simplified()->LoadElement(
+ AccessBuilder::ForFixedArrayElement()),
+ table,
+ graph()->NewNode(
+ simplified()->NumberAdd(), entry_start_position,
+ jsgraph()->Constant(OrderedHashMap::kValueOffset)),
+ effect, control);
+ value = effect =
+ graph()->NewNode(javascript()->CreateKeyValueArray(),
+ entry_key, value, context, effect);
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ // Store final {value} and {done} into the {iterator_result}.
+ effect =
+ graph()->NewNode(simplified()->StoreField(
+ AccessBuilder::ForJSIteratorResultValue()),
+ iterator_result, value, effect, control);
+ effect =
+ graph()->NewNode(simplified()->StoreField(
+ AccessBuilder::ForJSIteratorResultDone()),
+ iterator_result, done, effect, control);
+
+ controls[1] = control;
+ effects[1] = effect;
+ }
+
+ // Continue with next loop index.
+ loop->ReplaceInput(1, graph()->NewNode(common()->IfTrue(), branch1));
+ eloop->ReplaceInput(1, etrue0);
+ iloop->ReplaceInput(1, index);
+ }
+ }
+
+ control = effects[2] = graph()->NewNode(common()->Merge(2), 2, controls);
+ effect = graph()->NewNode(common()->EffectPhi(2), 3, effects);
+ }
+
+ // Yield the final {iterator_result}.
+ ReplaceWithValue(node, iterator_result, effect, control);
+ return Replace(iterator_result);
+}
+
// ES6 section 20.3.3.1 Date.now ( )
Reduction JSBuiltinReducer::ReduceDateNow(Node* node) {
NodeProperties::RemoveValueInputs(node);
@@ -1252,6 +1584,114 @@ Reduction JSBuiltinReducer::ReduceDateGetTime(Node* node) {
return NoChange();
}
+// ES6 section 19.2.3.2 Function.prototype.bind ( thisArg, ...args )
+Reduction JSBuiltinReducer::ReduceFunctionBind(Node* node) {
+ // Value inputs to the {node} are as follows:
+ //
+ // - target, which is Function.prototype.bind JSFunction
+ // - receiver, which is the [[BoundTargetFunction]]
+ // - bound_this (optional), which is the [[BoundThis]]
+ // - and all the remaining value inouts are [[BoundArguments]]
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Type* receiver_type = NodeProperties::GetType(receiver);
+ Node* bound_this = (node->op()->ValueInputCount() < 3)
+ ? jsgraph()->UndefinedConstant()
+ : NodeProperties::GetValueInput(node, 2);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ if (receiver_type->IsHeapConstant() &&
+ receiver_type->AsHeapConstant()->Value()->IsJSFunction()) {
+ Handle<JSFunction> target_function =
+ Handle<JSFunction>::cast(receiver_type->AsHeapConstant()->Value());
+
+ // Check that the "length" property on the {target_function} is the
+ // default JSFunction accessor.
+ LookupIterator length_lookup(target_function, factory()->length_string(),
+ target_function, LookupIterator::OWN);
+ if (length_lookup.state() != LookupIterator::ACCESSOR ||
+ !length_lookup.GetAccessors()->IsAccessorInfo()) {
+ return NoChange();
+ }
+
+ // Check that the "name" property on the {target_function} is the
+ // default JSFunction accessor.
+ LookupIterator name_lookup(target_function, factory()->name_string(),
+ target_function, LookupIterator::OWN);
+ if (name_lookup.state() != LookupIterator::ACCESSOR ||
+ !name_lookup.GetAccessors()->IsAccessorInfo()) {
+ return NoChange();
+ }
+
+ // Determine the prototype of the {target_function}.
+ Handle<Object> prototype(target_function->map()->prototype(), isolate());
+
+ // Setup the map for the JSBoundFunction instance.
+ Handle<Map> map = target_function->IsConstructor()
+ ? isolate()->bound_function_with_constructor_map()
+ : isolate()->bound_function_without_constructor_map();
+ if (map->prototype() != *prototype) {
+ map = Map::TransitionToPrototype(map, prototype);
+ }
+ DCHECK_EQ(target_function->IsConstructor(), map->is_constructor());
+
+ // Create the [[BoundArguments]] for the result.
+ Node* bound_arguments = jsgraph()->EmptyFixedArrayConstant();
+ if (node->op()->ValueInputCount() > 3) {
+ int const length = node->op()->ValueInputCount() - 3;
+ effect = graph()->NewNode(
+ common()->BeginRegion(RegionObservability::kNotObservable), effect);
+ bound_arguments = effect = graph()->NewNode(
+ simplified()->Allocate(Type::OtherInternal(), NOT_TENURED),
+ jsgraph()->Constant(FixedArray::SizeFor(length)), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForMap()), bound_arguments,
+ jsgraph()->FixedArrayMapConstant(), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForFixedArrayLength()),
+ bound_arguments, jsgraph()->Constant(length), effect, control);
+ for (int i = 0; i < length; ++i) {
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForFixedArraySlot(i)),
+ bound_arguments, NodeProperties::GetValueInput(node, 3 + i), effect,
+ control);
+ }
+ bound_arguments = effect =
+ graph()->NewNode(common()->FinishRegion(), bound_arguments, effect);
+ }
+
+ // Create the JSBoundFunction result.
+ effect = graph()->NewNode(
+ common()->BeginRegion(RegionObservability::kNotObservable), effect);
+ Node* value = effect = graph()->NewNode(
+ simplified()->Allocate(Type::BoundFunction(), NOT_TENURED),
+ jsgraph()->Constant(JSBoundFunction::kSize), effect, control);
+ effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
+ value, jsgraph()->Constant(map), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSObjectProperties()), value,
+ jsgraph()->EmptyFixedArrayConstant(), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSObjectElements()), value,
+ jsgraph()->EmptyFixedArrayConstant(), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(
+ AccessBuilder::ForJSBoundFunctionBoundTargetFunction()),
+ value, receiver, effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSBoundFunctionBoundThis()),
+ value, bound_this, effect, control);
+ effect =
+ graph()->NewNode(simplified()->StoreField(
+ AccessBuilder::ForJSBoundFunctionBoundArguments()),
+ value, bound_arguments, effect, control);
+ value = effect = graph()->NewNode(common()->FinishRegion(), value, effect);
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
// ES6 section 18.2.2 isFinite ( number )
Reduction JSBuiltinReducer::ReduceGlobalIsFinite(Node* node) {
JSCallReduction r(node);
@@ -1280,6 +1720,86 @@ Reduction JSBuiltinReducer::ReduceGlobalIsNaN(Node* node) {
return NoChange();
}
+Reduction JSBuiltinReducer::ReduceMapGet(Node* node) {
+ // We only optimize if we have target, receiver and key parameters.
+ if (node->op()->ValueInputCount() != 3) return NoChange();
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* key = NodeProperties::GetValueInput(node, 2);
+
+ if (!HasInstanceTypeWitness(receiver, effect, JS_MAP_TYPE)) return NoChange();
+
+ Node* storage = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSCollectionTable()), receiver,
+ effect, control);
+
+ Node* index = effect = graph()->NewNode(
+ simplified()->LookupHashStorageIndex(), storage, key, effect, control);
+
+ Node* check = graph()->NewNode(simplified()->NumberEqual(), index,
+ jsgraph()->MinusOneConstant());
+
+ Node* branch = graph()->NewNode(common()->Branch(), check, control);
+
+ // Key not found.
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = jsgraph()->UndefinedConstant();
+
+ // Key found.
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse = efalse = graph()->NewNode(
+ simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()), storage,
+ index, efalse, if_false);
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), vtrue, vfalse, control);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+Reduction JSBuiltinReducer::ReduceMapHas(Node* node) {
+ // We only optimize if we have target, receiver and key parameters.
+ if (node->op()->ValueInputCount() != 3) return NoChange();
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* key = NodeProperties::GetValueInput(node, 2);
+
+ if (!HasInstanceTypeWitness(receiver, effect, JS_MAP_TYPE)) return NoChange();
+
+ Node* storage = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSCollectionTable()), receiver,
+ effect, control);
+
+ Node* index = effect = graph()->NewNode(
+ simplified()->LookupHashStorageIndex(), storage, key, effect, control);
+
+ Node* check = graph()->NewNode(simplified()->NumberEqual(), index,
+ jsgraph()->MinusOneConstant());
+ Node* branch = graph()->NewNode(common()->Branch(), check, control);
+
+ // Key not found.
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* vtrue = jsgraph()->FalseConstant();
+
+ // Key found.
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = jsgraph()->TrueConstant();
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), vtrue, vfalse, control);
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
// ES6 section 20.2.2.1 Math.abs ( x )
Reduction JSBuiltinReducer::ReduceMathAbs(Node* node) {
JSCallReduction r(node);
@@ -1788,7 +2308,7 @@ Reduction JSBuiltinReducer::ReduceObjectCreate(Node* node) {
Handle<Map> map(isolate()->heap()->hash_table_map(), isolate());
int capacity =
NameDictionary::ComputeCapacity(NameDictionary::kInitialCapacity);
- DCHECK(base::bits::IsPowerOfTwo32(capacity));
+ DCHECK(base::bits::IsPowerOfTwo(capacity));
int length = NameDictionary::EntryToIndex(capacity);
int size = NameDictionary::SizeFor(length);
@@ -1821,14 +2341,11 @@ Reduction JSBuiltinReducer::ReduceObjectCreate(Node* node) {
// Initialize Dictionary fields.
Node* undefined = jsgraph()->UndefinedConstant();
effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForDictionaryMaxNumberKey()),
- value, undefined, effect, control);
- effect = graph()->NewNode(
simplified()->StoreField(
AccessBuilder::ForDictionaryNextEnumerationIndex()),
value, jsgraph()->SmiConstant(PropertyDetails::kInitialIndex), effect,
control);
- // Initialize hte Properties fields.
+ // Initialize the Properties fields.
for (int index = NameDictionary::kNextEnumerationIndexIndex + 1;
index < length; index++) {
effect = graph()->NewNode(
@@ -1899,7 +2416,10 @@ Node* GetStringWitness(Node* node) {
// it's {receiver}, and if so use that renaming as {receiver} for
// the lowering below.
for (Node* dominator = effect;;) {
- if (dominator->opcode() == IrOpcode::kCheckString &&
+ if ((dominator->opcode() == IrOpcode::kCheckString ||
+ dominator->opcode() == IrOpcode::kCheckInternalizedString ||
+ dominator->opcode() == IrOpcode::kCheckSeqString ||
+ dominator->opcode() == IrOpcode::kCheckNonEmptyString) &&
NodeProperties::IsSame(dominator->InputAt(0), receiver)) {
return dominator;
}
@@ -2260,6 +2780,30 @@ Reduction JSBuiltinReducer::ReduceStringIteratorNext(Node* node) {
return NoChange();
}
+Reduction JSBuiltinReducer::ReduceStringToLowerCaseIntl(Node* node) {
+ if (Node* receiver = GetStringWitness(node)) {
+ RelaxEffectsAndControls(node);
+ node->ReplaceInput(0, receiver);
+ node->TrimInputCount(1);
+ NodeProperties::ChangeOp(node, simplified()->StringToLowerCaseIntl());
+ NodeProperties::SetType(node, Type::String());
+ return Changed(node);
+ }
+ return NoChange();
+}
+
+Reduction JSBuiltinReducer::ReduceStringToUpperCaseIntl(Node* node) {
+ if (Node* receiver = GetStringWitness(node)) {
+ RelaxEffectsAndControls(node);
+ node->ReplaceInput(0, receiver);
+ node->TrimInputCount(1);
+ NodeProperties::ChangeOp(node, simplified()->StringToUpperCaseIntl());
+ NodeProperties::SetType(node, Type::String());
+ return Changed(node);
+ }
+ return NoChange();
+}
+
Reduction JSBuiltinReducer::ReduceArrayBufferViewAccessor(
Node* node, InstanceType instance_type, FieldAccess const& access) {
Node* receiver = NodeProperties::GetValueInput(node, 1);
@@ -2324,12 +2868,35 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
return ReduceDateNow(node);
case kDateGetTime:
return ReduceDateGetTime(node);
+ case kFunctionBind:
+ return ReduceFunctionBind(node);
case kGlobalIsFinite:
reduction = ReduceGlobalIsFinite(node);
break;
case kGlobalIsNaN:
reduction = ReduceGlobalIsNaN(node);
break;
+ case kMapEntries:
+ return ReduceCollectionIterator(
+ node, JS_MAP_TYPE, Context::MAP_KEY_VALUE_ITERATOR_MAP_INDEX);
+ case kMapGet:
+ reduction = ReduceMapGet(node);
+ break;
+ case kMapHas:
+ reduction = ReduceMapHas(node);
+ break;
+ case kMapKeys:
+ return ReduceCollectionIterator(node, JS_MAP_TYPE,
+ Context::MAP_KEY_ITERATOR_MAP_INDEX);
+ case kMapSize:
+ return ReduceCollectionSize(node, JS_MAP_TYPE);
+ case kMapValues:
+ return ReduceCollectionIterator(node, JS_MAP_TYPE,
+ Context::MAP_VALUE_ITERATOR_MAP_INDEX);
+ case kMapIteratorNext:
+ return ReduceCollectionIteratorNext(node, OrderedHashMap::kEntrySize,
+ FIRST_MAP_ITERATOR_TYPE,
+ LAST_MAP_ITERATOR_TYPE);
case kMathAbs:
reduction = ReduceMathAbs(node);
break;
@@ -2447,6 +3014,18 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
case kObjectCreate:
reduction = ReduceObjectCreate(node);
break;
+ case kSetEntries:
+ return ReduceCollectionIterator(
+ node, JS_SET_TYPE, Context::SET_KEY_VALUE_ITERATOR_MAP_INDEX);
+ case kSetSize:
+ return ReduceCollectionSize(node, JS_SET_TYPE);
+ case kSetValues:
+ return ReduceCollectionIterator(node, JS_SET_TYPE,
+ Context::SET_VALUE_ITERATOR_MAP_INDEX);
+ case kSetIteratorNext:
+ return ReduceCollectionIteratorNext(node, OrderedHashSet::kEntrySize,
+ FIRST_SET_ITERATOR_TYPE,
+ LAST_SET_ITERATOR_TYPE);
case kStringFromCharCode:
reduction = ReduceStringFromCharCode(node);
break;
@@ -2462,6 +3041,10 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
return ReduceStringIterator(node);
case kStringIteratorNext:
return ReduceStringIteratorNext(node);
+ case kStringToLowerCaseIntl:
+ return ReduceStringToLowerCaseIntl(node);
+ case kStringToUpperCaseIntl:
+ return ReduceStringToUpperCaseIntl(node);
case kDataViewByteLength:
return ReduceArrayBufferViewAccessor(
node, JS_DATA_VIEW_TYPE,
diff --git a/deps/v8/src/compiler/js-builtin-reducer.h b/deps/v8/src/compiler/js-builtin-reducer.h
index 736ece34e4..db8bd74dd9 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.h
+++ b/deps/v8/src/compiler/js-builtin-reducer.h
@@ -42,6 +42,8 @@ class V8_EXPORT_PRIVATE JSBuiltinReducer final
Handle<Context> native_context);
~JSBuiltinReducer() final {}
+ const char* reducer_name() const override { return "JSBuiltinReducer"; }
+
Reduction Reduce(Node* node) final;
private:
@@ -59,10 +61,22 @@ class V8_EXPORT_PRIVATE JSBuiltinReducer final
Reduction ReduceArrayPop(Node* node);
Reduction ReduceArrayPush(Node* node);
Reduction ReduceArrayShift(Node* node);
+ Reduction ReduceCollectionIterator(Node* node,
+ InstanceType collection_instance_type,
+ int collection_iterator_map_index);
+ Reduction ReduceCollectionSize(Node* node,
+ InstanceType collection_instance_type);
+ Reduction ReduceCollectionIteratorNext(
+ Node* node, int entry_size,
+ InstanceType collection_iterator_instance_type_first,
+ InstanceType collection_iterator_instance_type_last);
Reduction ReduceDateNow(Node* node);
Reduction ReduceDateGetTime(Node* node);
+ Reduction ReduceFunctionBind(Node* node);
Reduction ReduceGlobalIsFinite(Node* node);
Reduction ReduceGlobalIsNaN(Node* node);
+ Reduction ReduceMapHas(Node* node);
+ Reduction ReduceMapGet(Node* node);
Reduction ReduceMathAbs(Node* node);
Reduction ReduceMathAcos(Node* node);
Reduction ReduceMathAcosh(Node* node);
@@ -109,6 +123,8 @@ class V8_EXPORT_PRIVATE JSBuiltinReducer final
Reduction ReduceStringIndexOf(Node* node);
Reduction ReduceStringIterator(Node* node);
Reduction ReduceStringIteratorNext(Node* node);
+ Reduction ReduceStringToLowerCaseIntl(Node* node);
+ Reduction ReduceStringToUpperCaseIntl(Node* node);
Reduction ReduceArrayBufferViewAccessor(Node* node,
InstanceType instance_type,
FieldAccess const& access);
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index 1e1d3a92ab..ca82afad8c 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -7,6 +7,7 @@
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/compilation-dependencies.h"
+#include "src/compiler/access-builder.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
@@ -23,10 +24,14 @@ Reduction JSCallReducer::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kJSConstruct:
return ReduceJSConstruct(node);
+ case IrOpcode::kJSConstructWithArrayLike:
+ return ReduceJSConstructWithArrayLike(node);
case IrOpcode::kJSConstructWithSpread:
return ReduceJSConstructWithSpread(node);
case IrOpcode::kJSCall:
return ReduceJSCall(node);
+ case IrOpcode::kJSCallWithArrayLike:
+ return ReduceJSCallWithArrayLike(node);
case IrOpcode::kJSCallWithSpread:
return ReduceJSCallWithSpread(node);
default:
@@ -35,6 +40,23 @@ Reduction JSCallReducer::Reduce(Node* node) {
return NoChange();
}
+void JSCallReducer::Finalize() {
+ // TODO(turbofan): This is not the best solution; ideally we would be able
+ // to teach the GraphReducer about arbitrary dependencies between different
+ // nodes, even if they don't show up in the use list of the other node.
+ std::set<Node*> const waitlist = std::move(waitlist_);
+ for (Node* node : waitlist) {
+ if (!node->IsDead()) {
+ Reduction const reduction = Reduce(node);
+ if (reduction.Changed()) {
+ Node* replacement = reduction.replacement();
+ if (replacement != node) {
+ Replace(node, replacement);
+ }
+ }
+ }
+ }
+}
// ES6 section 22.1.1 The Array Constructor
Reduction JSCallReducer::ReduceArrayConstructor(Node* node) {
@@ -94,18 +116,48 @@ Reduction JSCallReducer::ReduceNumberConstructor(Node* node) {
return Changed(node);
}
+namespace {
+
+bool CanBeNullOrUndefined(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kJSCreate:
+ case IrOpcode::kJSCreateArguments:
+ case IrOpcode::kJSCreateArray:
+ case IrOpcode::kJSCreateClosure:
+ case IrOpcode::kJSCreateIterResultObject:
+ case IrOpcode::kJSCreateKeyValueArray:
+ case IrOpcode::kJSCreateLiteralArray:
+ case IrOpcode::kJSCreateLiteralObject:
+ case IrOpcode::kJSCreateLiteralRegExp:
+ case IrOpcode::kJSConstruct:
+ case IrOpcode::kJSConstructForwardVarargs:
+ case IrOpcode::kJSConstructWithSpread:
+ case IrOpcode::kJSConvertReceiver:
+ case IrOpcode::kJSToBoolean:
+ case IrOpcode::kJSToInteger:
+ case IrOpcode::kJSToLength:
+ case IrOpcode::kJSToName:
+ case IrOpcode::kJSToNumber:
+ case IrOpcode::kJSToObject:
+ case IrOpcode::kJSToString:
+ case IrOpcode::kJSToPrimitiveToString:
+ return false;
+ case IrOpcode::kHeapConstant: {
+ Handle<HeapObject> value = HeapObjectMatcher(node).Value();
+ Isolate* const isolate = value->GetIsolate();
+ return value->IsNull(isolate) || value->IsUndefined(isolate);
+ }
+ default:
+ return true;
+ }
+}
+
+} // namespace
// ES6 section 19.2.3.1 Function.prototype.apply ( thisArg, argArray )
Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
- Node* target = NodeProperties::GetValueInput(node, 0);
CallParameters const& p = CallParametersOf(node->op());
- // Tail calls to Function.prototype.apply are not properly supported
- // down the pipeline, so we disable this optimization completely for
- // tail calls (for now).
- if (p.tail_call_mode() == TailCallMode::kAllow) return NoChange();
- Handle<JSFunction> apply =
- Handle<JSFunction>::cast(HeapObjectMatcher(target).Value());
size_t arity = p.arity();
DCHECK_LE(2u, arity);
ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny;
@@ -118,97 +170,101 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
// The argArray was not provided, just remove the {target}.
node->RemoveInput(0);
--arity;
- } else if (arity == 4) {
- // Check if argArray is an arguments object, and {node} is the only value
- // user of argArray (except for value uses in frame states).
- Node* arg_array = NodeProperties::GetValueInput(node, 3);
- if (arg_array->opcode() != IrOpcode::kJSCreateArguments) return NoChange();
- for (Edge edge : arg_array->use_edges()) {
- Node* const user = edge.from();
- if (user == node) continue;
- // Ignore uses as frame state's locals or parameters.
- if (user->opcode() == IrOpcode::kStateValues) continue;
- // Ignore uses as frame state's accumulator.
- if (user->opcode() == IrOpcode::kFrameState &&
- user->InputAt(2) == arg_array) {
- continue;
- }
- if (!NodeProperties::IsValueEdge(edge)) continue;
- return NoChange();
- }
- // Check if the arguments can be handled in the fast case (i.e. we don't
- // have aliased sloppy arguments), and compute the {start_index} for
- // rest parameters.
- CreateArgumentsType const type = CreateArgumentsTypeOf(arg_array->op());
- Node* frame_state = NodeProperties::GetFrameStateInput(arg_array);
- FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
- int start_index = 0;
- // Determine the formal parameter count;
- Handle<SharedFunctionInfo> shared;
- if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
- int formal_parameter_count = shared->internal_formal_parameter_count();
- if (type == CreateArgumentsType::kMappedArguments) {
- // Mapped arguments (sloppy mode) that are aliased can only be handled
- // here if there's no side-effect between the {node} and the {arg_array}.
- // TODO(turbofan): Further relax this constraint.
- if (formal_parameter_count != 0) {
- Node* effect = NodeProperties::GetEffectInput(node);
- while (effect != arg_array) {
- if (effect->op()->EffectInputCount() != 1 ||
- !(effect->op()->properties() & Operator::kNoWrite)) {
- return NoChange();
- }
- effect = NodeProperties::GetEffectInput(effect);
- }
+ } else {
+ Node* target = NodeProperties::GetValueInput(node, 1);
+ Node* this_argument = NodeProperties::GetValueInput(node, 2);
+ Node* arguments_list = NodeProperties::GetValueInput(node, 3);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // If {arguments_list} cannot be null or undefined, we don't need
+ // to expand this {node} to control-flow.
+ if (!CanBeNullOrUndefined(arguments_list)) {
+ // Massage the value inputs appropriately.
+ node->ReplaceInput(0, target);
+ node->ReplaceInput(1, this_argument);
+ node->ReplaceInput(2, arguments_list);
+ while (arity-- > 3) node->RemoveInput(3);
+
+ // Morph the {node} to a {JSCallWithArrayLike}.
+ NodeProperties::ChangeOp(node,
+ javascript()->CallWithArrayLike(p.frequency()));
+ Reduction const reduction = ReduceJSCallWithArrayLike(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ } else {
+ // Check whether {arguments_list} is null.
+ Node* check_null =
+ graph()->NewNode(simplified()->ReferenceEqual(), arguments_list,
+ jsgraph()->NullConstant());
+ control = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check_null, control);
+ Node* if_null = graph()->NewNode(common()->IfTrue(), control);
+ control = graph()->NewNode(common()->IfFalse(), control);
+
+ // Check whether {arguments_list} is undefined.
+ Node* check_undefined =
+ graph()->NewNode(simplified()->ReferenceEqual(), arguments_list,
+ jsgraph()->UndefinedConstant());
+ control = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check_undefined, control);
+ Node* if_undefined = graph()->NewNode(common()->IfTrue(), control);
+ control = graph()->NewNode(common()->IfFalse(), control);
+
+ // Lower to {JSCallWithArrayLike} if {arguments_list} is neither null
+ // nor undefined.
+ Node* effect0 = effect;
+ Node* control0 = control;
+ Node* value0 = effect0 = control0 = graph()->NewNode(
+ javascript()->CallWithArrayLike(p.frequency()), target, this_argument,
+ arguments_list, context, frame_state, effect0, control0);
+
+ // Lower to {JSCall} if {arguments_list} is either null or undefined.
+ Node* effect1 = effect;
+ Node* control1 =
+ graph()->NewNode(common()->Merge(2), if_null, if_undefined);
+ Node* value1 = effect1 = control1 =
+ graph()->NewNode(javascript()->Call(2), target, this_argument,
+ context, frame_state, effect1, control1);
+
+ // Rewire potential exception edges.
+ Node* if_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &if_exception)) {
+ // Create appropriate {IfException} and {IfSuccess} nodes.
+ Node* if_exception0 =
+ graph()->NewNode(common()->IfException(), control0, effect0);
+ control0 = graph()->NewNode(common()->IfSuccess(), control0);
+ Node* if_exception1 =
+ graph()->NewNode(common()->IfException(), control1, effect1);
+ control1 = graph()->NewNode(common()->IfSuccess(), control1);
+
+ // Join the exception edges.
+ Node* merge =
+ graph()->NewNode(common()->Merge(2), if_exception0, if_exception1);
+ Node* ephi = graph()->NewNode(common()->EffectPhi(2), if_exception0,
+ if_exception1, merge);
+ Node* phi =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ if_exception0, if_exception1, merge);
+ ReplaceWithValue(if_exception, phi, ephi, merge);
}
- } else if (type == CreateArgumentsType::kRestParameter) {
- start_index = formal_parameter_count;
- }
- // Check if are applying to inlined arguments or to the arguments of
- // the outermost function.
- Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
- if (outer_state->opcode() != IrOpcode::kFrameState) {
- // Reduce {node} to a JSCallForwardVarargs operation, which just
- // re-pushes the incoming arguments and calls the {target}.
- node->RemoveInput(0); // Function.prototype.apply
- node->RemoveInput(2); // arguments
- NodeProperties::ChangeOp(node, javascript()->CallForwardVarargs(
- 2, start_index, p.tail_call_mode()));
- return Changed(node);
- }
- // Get to the actual frame state from which to extract the arguments;
- // we can only optimize this in case the {node} was already inlined into
- // some other function (and same for the {arg_array}).
- FrameStateInfo outer_info = OpParameter<FrameStateInfo>(outer_state);
- if (outer_info.type() == FrameStateType::kArgumentsAdaptor) {
- // Need to take the parameters from the arguments adaptor.
- frame_state = outer_state;
- }
- // Remove the argArray input from the {node}.
- node->RemoveInput(static_cast<int>(--arity));
- // Add the actual parameters to the {node}, skipping the receiver,
- // starting from {start_index}.
- Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
- for (int i = start_index + 1; i < parameters->InputCount(); ++i) {
- node->InsertInput(graph()->zone(), static_cast<int>(arity),
- parameters->InputAt(i));
- ++arity;
+
+ // Join control paths.
+ control = graph()->NewNode(common()->Merge(2), control0, control1);
+ effect =
+ graph()->NewNode(common()->EffectPhi(2), effect0, effect1, control);
+ Node* value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ value0, value1, control);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
}
- // Drop the {target} from the {node}.
- node->RemoveInput(0);
- --arity;
- } else {
- return NoChange();
}
// Change {node} to the new {JSCall} operator.
NodeProperties::ChangeOp(
node,
- javascript()->Call(arity, p.frequency(), VectorSlotPair(), convert_mode,
- p.tail_call_mode()));
- // Change context of {node} to the Function.prototype.apply context,
- // to ensure any exception is thrown in the correct context.
- NodeProperties::ReplaceContextInput(
- node, jsgraph()->HeapConstant(handle(apply->context(), isolate())));
+ javascript()->Call(arity, p.frequency(), VectorSlotPair(), convert_mode));
// Try to further reduce the JSCall {node}.
Reduction const reduction = ReduceJSCall(node);
return reduction.Changed() ? reduction : Changed(node);
@@ -244,8 +300,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
}
NodeProperties::ChangeOp(
node,
- javascript()->Call(arity, p.frequency(), VectorSlotPair(), convert_mode,
- p.tail_call_mode()));
+ javascript()->Call(arity, p.frequency(), VectorSlotPair(), convert_mode));
// Try to further reduce the JSCall {node}.
Reduction const reduction = ReduceJSCall(node);
return reduction.Changed() ? reduction : Changed(node);
@@ -288,17 +343,12 @@ Reduction JSCallReducer::ReduceObjectGetPrototype(Node* node, Node* object) {
NodeProperties::InferReceiverMapsResult result =
NodeProperties::InferReceiverMaps(object, effect, &object_maps);
if (result != NodeProperties::kNoReceiverMaps) {
- Handle<Map> candidate_map(
- object_maps[0]->GetPrototypeChainRootMap(isolate()));
+ Handle<Map> candidate_map = object_maps[0];
Handle<Object> candidate_prototype(candidate_map->prototype(), isolate());
- // We cannot deal with primitives here.
- if (candidate_map->IsPrimitiveMap()) return NoChange();
-
// Check if we can constant-fold the {candidate_prototype}.
for (size_t i = 0; i < object_maps.size(); ++i) {
- Handle<Map> const object_map(
- object_maps[i]->GetPrototypeChainRootMap(isolate()));
+ Handle<Map> object_map = object_maps[i];
if (object_map->IsSpecialReceiverMap() ||
object_map->has_hidden_prototype() ||
object_map->prototype() != *candidate_prototype) {
@@ -307,6 +357,9 @@ Reduction JSCallReducer::ReduceObjectGetPrototype(Node* node, Node* object) {
// with hidden prototypes at this point.
return NoChange();
}
+ // The above check also excludes maps for primitive values, which is
+ // important because we are not applying [[ToObject]] here as expected.
+ DCHECK(!object_map->IsPrimitiveMap() && object_map->IsJSReceiverMap());
if (result == NodeProperties::kUnreliableReceiverMaps &&
!object_map->is_stable()) {
return NoChange();
@@ -341,6 +394,83 @@ Reduction JSCallReducer::ReduceObjectPrototypeGetProto(Node* node) {
return ReduceObjectGetPrototype(node, receiver);
}
+// ES #sec-object.prototype.isprototypeof
+Reduction JSCallReducer::ReduceObjectPrototypeIsPrototypeOf(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* value = node->op()->ValueInputCount() > 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Node* effect = NodeProperties::GetEffectInput(node);
+
+ // Ensure that the {receiver} is known to be a JSReceiver (so that
+ // the ToObject step of Object.prototype.isPrototypeOf is a no-op).
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+ for (size_t i = 0; i < receiver_maps.size(); ++i) {
+ if (!receiver_maps[i]->IsJSReceiverMap()) return NoChange();
+ }
+
+ // We don't check whether {value} is a proper JSReceiver here explicitly,
+ // and don't explicitly rule out Primitive {value}s, since all of them
+ // have null as their prototype, so the prototype chain walk inside the
+ // JSHasInPrototypeChain operator immediately aborts and yields false.
+ NodeProperties::ReplaceValueInput(node, value, 0);
+ NodeProperties::ReplaceValueInput(node, receiver, 1);
+ for (int i = node->op()->ValueInputCount(); i-- > 2;) {
+ node->RemoveInput(i);
+ }
+ NodeProperties::ChangeOp(node, javascript()->HasInPrototypeChain());
+ return Changed(node);
+}
+
+// ES6 section 26.1.1 Reflect.apply ( target, thisArgument, argumentsList )
+Reduction JSCallReducer::ReduceReflectApply(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ int arity = static_cast<int>(p.arity() - 2);
+ DCHECK_LE(0, arity);
+ // Massage value inputs appropriately.
+ node->RemoveInput(0);
+ node->RemoveInput(0);
+ while (arity < 3) {
+ node->InsertInput(graph()->zone(), arity++, jsgraph()->UndefinedConstant());
+ }
+ while (arity-- > 3) {
+ node->RemoveInput(arity);
+ }
+ NodeProperties::ChangeOp(node,
+ javascript()->CallWithArrayLike(p.frequency()));
+ Reduction const reduction = ReduceJSCallWithArrayLike(node);
+ return reduction.Changed() ? reduction : Changed(node);
+}
+
+// ES6 section 26.1.2 Reflect.construct ( target, argumentsList [, newTarget] )
+Reduction JSCallReducer::ReduceReflectConstruct(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ int arity = static_cast<int>(p.arity() - 2);
+ DCHECK_LE(0, arity);
+ // Massage value inputs appropriately.
+ node->RemoveInput(0);
+ node->RemoveInput(0);
+ while (arity < 2) {
+ node->InsertInput(graph()->zone(), arity++, jsgraph()->UndefinedConstant());
+ }
+ if (arity < 3) {
+ node->InsertInput(graph()->zone(), arity++, node->InputAt(0));
+ }
+ while (arity-- > 3) {
+ node->RemoveInput(arity);
+ }
+ NodeProperties::ChangeOp(node,
+ javascript()->ConstructWithArrayLike(p.frequency()));
+ Reduction const reduction = ReduceJSConstructWithArrayLike(node);
+ return reduction.Changed() ? reduction : Changed(node);
+}
+
// ES6 section 26.1.7 Reflect.getPrototypeOf ( target )
Reduction JSCallReducer::ReduceReflectGetPrototypeOf(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
@@ -350,6 +480,346 @@ Reduction JSCallReducer::ReduceReflectGetPrototypeOf(Node* node) {
return ReduceObjectGetPrototype(node, target);
}
+bool CanInlineArrayIteratingBuiltin(Handle<Map> receiver_map) {
+ Isolate* const isolate = receiver_map->GetIsolate();
+ if (!receiver_map->prototype()->IsJSArray()) return false;
+ Handle<JSArray> receiver_prototype(JSArray::cast(receiver_map->prototype()),
+ isolate);
+ return receiver_map->instance_type() == JS_ARRAY_TYPE &&
+ IsFastElementsKind(receiver_map->elements_kind()) &&
+ (!receiver_map->is_prototype_map() || receiver_map->is_stable()) &&
+ isolate->IsFastArrayConstructorPrototypeChainIntact() &&
+ isolate->IsAnyInitialArrayPrototype(receiver_prototype);
+}
+
+Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
+ Node* node) {
+ if (!FLAG_turbo_inline_array_builtins) return NoChange();
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ CallParameters const& p = CallParametersOf(node->op());
+
+ // Try to determine the {receiver} map.
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* fncallback = node->op()->ValueInputCount() > 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Node* this_arg = node->op()->ValueInputCount() > 3
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->UndefinedConstant();
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result != NodeProperties::kReliableReceiverMaps) {
+ return NoChange();
+ }
+ if (receiver_maps.size() != 1) return NoChange();
+ Handle<Map> receiver_map(receiver_maps[0]);
+ ElementsKind kind = receiver_map->elements_kind();
+ // TODO(danno): Handle double packed elements
+ if (!IsFastElementsKind(kind) || IsDoubleElementsKind(kind) ||
+ !CanInlineArrayIteratingBuiltin(receiver_map)) {
+ return NoChange();
+ }
+
+ // TODO(danno): forEach can throw. Hook up exceptional edges.
+ if (NodeProperties::IsExceptionalCall(node)) return NoChange();
+
+ // Install code dependencies on the {receiver} prototype maps and the
+ // global array protector cell.
+ dependencies()->AssumePropertyCell(factory()->array_protector());
+
+ Node* k = jsgraph()->ZeroConstant();
+
+ Node* original_length = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS)),
+ receiver, effect, control);
+
+ Node* loop = control = graph()->NewNode(common()->Loop(2), control, control);
+ Node* eloop = effect =
+ graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
+ Node* vloop = k = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), k, k, loop);
+
+ control = loop;
+ effect = eloop;
+
+ Node* continue_test =
+ graph()->NewNode(simplified()->NumberLessThan(), k, original_length);
+ Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ continue_test, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), continue_branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), continue_branch);
+ control = if_true;
+
+ std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, this_arg, k, original_length});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayForEachLoopEagerDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::EAGER);
+
+ effect =
+ graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
+
+ // Make sure the map hasn't changed during the iteration
+ Node* orig_map = jsgraph()->HeapConstant(receiver_map);
+ Node* array_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ receiver, effect, control);
+ Node* check_map =
+ graph()->NewNode(simplified()->ReferenceEqual(), array_map, orig_map);
+ effect =
+ graph()->NewNode(simplified()->CheckIf(), check_map, effect, control);
+
+ // Make sure that the access is still in bounds, since the callback could have
+ // changed the array's size.
+ Node* length = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS)),
+ receiver, effect, control);
+ k = effect =
+ graph()->NewNode(simplified()->CheckBounds(), k, length, effect, control);
+
+ // Reload the elements pointer before calling the callback, since the previous
+ // callback might have resized the array causing the elements buffer to be
+ // re-allocated.
+ Node* elements = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
+ effect, control);
+
+ Node* element = graph()->NewNode(
+ simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()),
+ elements, k, effect, control);
+
+ Node* next_k =
+ graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->Constant(1));
+ checkpoint_params[3] = next_k;
+
+ Node* hole_true = nullptr;
+ Node* hole_false = nullptr;
+ Node* effect_true = effect;
+
+ if (IsHoleyElementsKind(kind)) {
+ // Holey elements kind require a hole check and skipping of the element in
+ // the case of a hole.
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(), element,
+ jsgraph()->TheHoleConstant());
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ hole_true = graph()->NewNode(common()->IfTrue(), branch);
+ hole_false = graph()->NewNode(common()->IfFalse(), branch);
+ control = hole_false;
+ }
+
+ frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayForEachLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::LAZY);
+
+ control = effect = graph()->NewNode(
+ javascript()->Call(5, p.frequency()), fncallback, this_arg, element, k,
+ receiver, context, frame_state, effect, control);
+
+ if (IsHoleyElementsKind(kind)) {
+ Node* after_call_control = control;
+ Node* after_call_effect = effect;
+ control = hole_true;
+ effect = effect_true;
+
+ control = graph()->NewNode(common()->Merge(2), control, after_call_control);
+ effect = graph()->NewNode(common()->EffectPhi(2), effect, after_call_effect,
+ control);
+ }
+
+ k = next_k;
+
+ loop->ReplaceInput(1, control);
+ vloop->ReplaceInput(1, k);
+ eloop->ReplaceInput(1, effect);
+
+ control = if_false;
+ effect = eloop;
+
+ ReplaceWithValue(node, jsgraph()->UndefinedConstant(), effect, control);
+ return Replace(jsgraph()->UndefinedConstant());
+}
+
+Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
+ Node* node) {
+ if (!FLAG_turbo_inline_array_builtins) return NoChange();
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ CallParameters const& p = CallParametersOf(node->op());
+
+ // Try to determine the {receiver} map.
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* fncallback = node->op()->ValueInputCount() > 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Node* this_arg = node->op()->ValueInputCount() > 3
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->UndefinedConstant();
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result != NodeProperties::kReliableReceiverMaps) {
+ return NoChange();
+ }
+ if (receiver_maps.size() != 1) return NoChange();
+ Handle<Map> receiver_map(receiver_maps[0]);
+ ElementsKind kind = receiver_map->elements_kind();
+ // TODO(danno): Handle holey Smi and Object fast elements kinds and double
+ // packed.
+ if (!IsFastPackedElementsKind(kind) || IsDoubleElementsKind(kind)) {
+ return NoChange();
+ }
+
+ // TODO(danno): map can throw. Hook up exceptional edges.
+ if (NodeProperties::IsExceptionalCall(node)) return NoChange();
+
+ // We want the input to be a generic Array.
+ const int map_index = Context::ArrayMapIndex(kind);
+ Handle<JSFunction> handle_constructor(
+ JSFunction::cast(
+ Map::cast(native_context()->get(map_index))->GetConstructor()),
+ isolate());
+ Node* array_constructor = jsgraph()->HeapConstant(handle_constructor);
+ if (receiver_map->prototype() !=
+ native_context()->get(Context::INITIAL_ARRAY_PROTOTYPE_INDEX)) {
+ return NoChange();
+ }
+
+ // And ensure that any changes to the Array species constructor cause deopt.
+ if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
+
+ dependencies()->AssumePropertyCell(factory()->species_protector());
+
+ Node* k = jsgraph()->ZeroConstant();
+ Node* orig_map = jsgraph()->HeapConstant(receiver_map);
+
+ // Make sure the map hasn't changed before we construct the output array.
+ {
+ Node* array_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ receiver, effect, control);
+ Node* check_map =
+ graph()->NewNode(simplified()->ReferenceEqual(), array_map, orig_map);
+ effect =
+ graph()->NewNode(simplified()->CheckIf(), check_map, effect, control);
+ }
+
+ Node* original_length = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS)),
+ receiver, effect, control);
+
+ // This array should be HOLEY_SMI_ELEMENTS because of the non-zero length.
+ Node* a = control = effect = graph()->NewNode(
+ javascript()->CreateArray(1, Handle<AllocationSite>::null()),
+ array_constructor, array_constructor, original_length, context,
+ outer_frame_state, effect, control);
+
+ Node* loop = control = graph()->NewNode(common()->Loop(2), control, control);
+ Node* eloop = effect =
+ graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
+ Node* vloop = k = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), k, k, loop);
+
+ control = loop;
+ effect = eloop;
+
+ Node* continue_test =
+ graph()->NewNode(simplified()->NumberLessThan(), k, original_length);
+ Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ continue_test, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), continue_branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), continue_branch);
+ control = if_true;
+
+ std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, this_arg, a, k, original_length});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayMapLoopEagerDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::EAGER);
+
+ effect =
+ graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
+
+ // Make sure the map hasn't changed during the iteration
+ Node* array_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ receiver, effect, control);
+ Node* check_map =
+ graph()->NewNode(simplified()->ReferenceEqual(), array_map, orig_map);
+ effect =
+ graph()->NewNode(simplified()->CheckIf(), check_map, effect, control);
+
+ // Make sure that the access is still in bounds, since the callback could have
+ // changed the array's size.
+ Node* length = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS)),
+ receiver, effect, control);
+ k = effect =
+ graph()->NewNode(simplified()->CheckBounds(), k, length, effect, control);
+
+ // Reload the elements pointer before calling the callback, since the previous
+ // callback might have resized the array causing the elements buffer to be
+ // re-allocated.
+ Node* elements = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
+ effect, control);
+
+ Node* element = graph()->NewNode(
+ simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()),
+ elements, k, effect, control);
+
+ Node* next_k =
+ graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->OneConstant());
+
+ // This frame state is dealt with by hand in
+ // ArrayMapLoopLazyDeoptContinuation.
+ frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayMapLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::LAZY);
+
+ Node* callback_value = control = effect = graph()->NewNode(
+ javascript()->Call(5, p.frequency()), fncallback, this_arg, element, k,
+ receiver, context, frame_state, effect, control);
+
+ Handle<Map> double_map(Map::cast(
+ native_context()->get(Context::ArrayMapIndex(HOLEY_DOUBLE_ELEMENTS))));
+ Handle<Map> fast_map(
+ Map::cast(native_context()->get(Context::ArrayMapIndex(HOLEY_ELEMENTS))));
+ effect = graph()->NewNode(
+ simplified()->TransitionAndStoreElement(double_map, fast_map), a, k,
+ callback_value, effect, control);
+
+ k = next_k;
+
+ loop->ReplaceInput(1, control);
+ vloop->ReplaceInput(1, k);
+ eloop->ReplaceInput(1, effect);
+
+ control = if_false;
+ effect = eloop;
+
+ ReplaceWithValue(node, a, effect, control);
+ return Replace(a);
+}
+
Reduction JSCallReducer::ReduceCallApiFunction(
Node* node, Handle<FunctionTemplateInfo> function_template_info) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
@@ -439,38 +909,106 @@ Reduction JSCallReducer::ReduceCallApiFunction(
return Changed(node);
}
-Reduction JSCallReducer::ReduceSpreadCall(Node* node, int arity) {
- DCHECK(node->opcode() == IrOpcode::kJSCallWithSpread ||
+namespace {
+
+// Check whether elements aren't mutated; we play it extremely safe here by
+// explicitly checking that {node} is only used by {LoadField} or {LoadElement}.
+bool IsSafeArgumentsElements(Node* node) {
+ for (Edge const edge : node->use_edges()) {
+ if (!NodeProperties::IsValueEdge(edge)) continue;
+ if (edge.from()->opcode() != IrOpcode::kLoadField &&
+ edge.from()->opcode() != IrOpcode::kLoadElement) {
+ return false;
+ }
+ }
+ return true;
+}
+
+} // namespace
+
+Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
+ Node* node, int arity, CallFrequency const& frequency) {
+ DCHECK(node->opcode() == IrOpcode::kJSCallWithArrayLike ||
+ node->opcode() == IrOpcode::kJSCallWithSpread ||
+ node->opcode() == IrOpcode::kJSConstructWithArrayLike ||
node->opcode() == IrOpcode::kJSConstructWithSpread);
- // Do check to make sure we can actually avoid iteration.
- if (!isolate()->initial_array_iterator_prototype_map()->is_stable()) {
+ // In case of a call/construct with spread, we need to
+ // ensure that it's safe to avoid the actual iteration.
+ if ((node->opcode() == IrOpcode::kJSCallWithSpread ||
+ node->opcode() == IrOpcode::kJSConstructWithSpread) &&
+ !isolate()->initial_array_iterator_prototype_map()->is_stable()) {
return NoChange();
}
- Node* spread = NodeProperties::GetValueInput(node, arity);
-
- // Check if spread is an arguments object, and {node} is the only value user
- // of spread (except for value uses in frame states).
- if (spread->opcode() != IrOpcode::kJSCreateArguments) return NoChange();
- for (Edge edge : spread->use_edges()) {
+ // Check if {arguments_list} is an arguments object, and {node} is the only
+ // value user of {arguments_list} (except for value uses in frame states).
+ Node* arguments_list = NodeProperties::GetValueInput(node, arity);
+ if (arguments_list->opcode() != IrOpcode::kJSCreateArguments) {
+ return NoChange();
+ }
+ for (Edge edge : arguments_list->use_edges()) {
+ if (!NodeProperties::IsValueEdge(edge)) continue;
Node* const user = edge.from();
- if (user == node) continue;
- // Ignore uses as frame state's locals or parameters.
- if (user->opcode() == IrOpcode::kStateValues) continue;
- // Ignore uses as frame state's accumulator.
- if (user->opcode() == IrOpcode::kFrameState && user->InputAt(2) == spread) {
- continue;
+ switch (user->opcode()) {
+ case IrOpcode::kCheckMaps:
+ case IrOpcode::kFrameState:
+ case IrOpcode::kStateValues:
+ case IrOpcode::kReferenceEqual:
+ case IrOpcode::kReturn:
+ // Ignore safe uses that definitely don't mess with the arguments.
+ continue;
+ case IrOpcode::kLoadField: {
+ DCHECK_EQ(arguments_list, user->InputAt(0));
+ FieldAccess const& access = FieldAccessOf(user->op());
+ if (access.offset == JSArray::kLengthOffset) {
+ // Ignore uses for arguments#length.
+ STATIC_ASSERT(JSArray::kLengthOffset ==
+ JSArgumentsObject::kLengthOffset);
+ continue;
+ } else if (access.offset == JSObject::kElementsOffset) {
+ // Ignore safe uses for arguments#elements.
+ if (IsSafeArgumentsElements(user)) continue;
+ }
+ break;
+ }
+ case IrOpcode::kJSCallWithArrayLike:
+ // Ignore uses as argumentsList input to calls with array like.
+ if (user->InputAt(2) == arguments_list) continue;
+ break;
+ case IrOpcode::kJSConstructWithArrayLike:
+ // Ignore uses as argumentsList input to calls with array like.
+ if (user->InputAt(1) == arguments_list) continue;
+ break;
+ case IrOpcode::kJSCallWithSpread: {
+ // Ignore uses as spread input to calls with spread.
+ SpreadWithArityParameter p = SpreadWithArityParameterOf(user->op());
+ int const arity = static_cast<int>(p.arity() - 1);
+ if (user->InputAt(arity) == arguments_list) continue;
+ break;
+ }
+ case IrOpcode::kJSConstructWithSpread: {
+ // Ignore uses as spread input to construct with spread.
+ SpreadWithArityParameter p = SpreadWithArityParameterOf(user->op());
+ int const arity = static_cast<int>(p.arity() - 2);
+ if (user->InputAt(arity) == arguments_list) continue;
+ break;
+ }
+ default:
+ break;
}
- if (!NodeProperties::IsValueEdge(edge)) continue;
+ // We cannot currently reduce the {node} to something better than what
+ // it already is, but we might be able to do something about the {node}
+ // later, so put it on the waitlist and try again during finalization.
+ waitlist_.insert(node);
return NoChange();
}
// Get to the actual frame state from which to extract the arguments;
// we can only optimize this in case the {node} was already inlined into
- // some other function (and same for the {spread}).
- CreateArgumentsType const type = CreateArgumentsTypeOf(spread->op());
- Node* frame_state = NodeProperties::GetFrameStateInput(spread);
+ // some other function (and same for the {arguments_list}).
+ CreateArgumentsType const type = CreateArgumentsTypeOf(arguments_list->op());
+ Node* frame_state = NodeProperties::GetFrameStateInput(arguments_list);
FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
int start_index = 0;
// Determine the formal parameter count;
@@ -483,7 +1021,7 @@ Reduction JSCallReducer::ReduceSpreadCall(Node* node, int arity) {
// TODO(turbofan): Further relax this constraint.
if (formal_parameter_count != 0) {
Node* effect = NodeProperties::GetEffectInput(node);
- while (effect != spread) {
+ while (effect != arguments_list) {
if (effect->op()->EffectInputCount() != 1 ||
!(effect->op()->properties() & Operator::kNoWrite)) {
return NoChange();
@@ -494,26 +1032,35 @@ Reduction JSCallReducer::ReduceSpreadCall(Node* node, int arity) {
} else if (type == CreateArgumentsType::kRestParameter) {
start_index = formal_parameter_count;
- // Only check the array iterator protector when we have a rest object.
- if (!isolate()->IsArrayIteratorLookupChainIntact()) return NoChange();
+ // For spread calls/constructs with rest parameters we need to ensure that
+ // the array iterator protector is intact, which guards that the rest
+ // parameter iteration is not observable.
+ if (node->opcode() == IrOpcode::kJSCallWithSpread ||
+ node->opcode() == IrOpcode::kJSConstructWithSpread) {
+ if (!isolate()->IsArrayIteratorLookupChainIntact()) return NoChange();
+ dependencies()->AssumePropertyCell(factory()->array_iterator_protector());
+ }
}
- // Install appropriate code dependencies.
- dependencies()->AssumeMapStable(
- isolate()->initial_array_iterator_prototype_map());
- if (type == CreateArgumentsType::kRestParameter) {
- dependencies()->AssumePropertyCell(factory()->array_iterator_protector());
+ // For call/construct with spread, we need to also install a code
+ // dependency on the initial %ArrayIteratorPrototype% map here to
+ // ensure that no one messes with the next method.
+ if (node->opcode() == IrOpcode::kJSCallWithSpread ||
+ node->opcode() == IrOpcode::kJSConstructWithSpread) {
+ dependencies()->AssumeMapStable(
+ isolate()->initial_array_iterator_prototype_map());
}
- // Remove the spread input from the {node}.
+
+ // Remove the {arguments_list} input from the {node}.
node->RemoveInput(arity--);
// Check if are spreading to inlined arguments or to the arguments of
// the outermost function.
Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
if (outer_state->opcode() != IrOpcode::kFrameState) {
Operator const* op =
- (node->opcode() == IrOpcode::kJSCallWithSpread)
- ? javascript()->CallForwardVarargs(arity + 1, start_index,
- TailCallMode::kDisallow)
+ (node->opcode() == IrOpcode::kJSCallWithArrayLike ||
+ node->opcode() == IrOpcode::kJSCallWithSpread)
+ ? javascript()->CallForwardVarargs(arity + 1, start_index)
: javascript()->ConstructForwardVarargs(arity + 2, start_index);
NodeProperties::ChangeOp(node, op);
return Changed(node);
@@ -533,16 +1080,16 @@ Reduction JSCallReducer::ReduceSpreadCall(Node* node, int arity) {
parameters->InputAt(i));
}
- // TODO(turbofan): Collect call counts on spread call/construct and thread it
- // through here.
- if (node->opcode() == IrOpcode::kJSCallWithSpread) {
- NodeProperties::ChangeOp(node, javascript()->Call(arity + 1));
- Reduction const r = ReduceJSCall(node);
- return r.Changed() ? r : Changed(node);
+ if (node->opcode() == IrOpcode::kJSCallWithArrayLike ||
+ node->opcode() == IrOpcode::kJSCallWithSpread) {
+ NodeProperties::ChangeOp(node, javascript()->Call(arity + 1, frequency));
+ Reduction const reduction = ReduceJSCall(node);
+ return reduction.Changed() ? reduction : Changed(node);
} else {
- NodeProperties::ChangeOp(node, javascript()->Construct(arity + 2));
- Reduction const r = ReduceJSConstruct(node);
- return r.Changed() ? r : Changed(node);
+ NodeProperties::ChangeOp(node,
+ javascript()->Construct(arity + 2, frequency));
+ Reduction const reduction = ReduceJSConstruct(node);
+ return reduction.Changed() ? reduction : Changed(node);
}
}
@@ -614,8 +1161,20 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return ReduceObjectGetPrototypeOf(node);
case Builtins::kObjectPrototypeGetProto:
return ReduceObjectPrototypeGetProto(node);
+ case Builtins::kObjectPrototypeIsPrototypeOf:
+ return ReduceObjectPrototypeIsPrototypeOf(node);
+ case Builtins::kReflectApply:
+ return ReduceReflectApply(node);
+ case Builtins::kReflectConstruct:
+ return ReduceReflectConstruct(node);
case Builtins::kReflectGetPrototypeOf:
return ReduceReflectGetPrototypeOf(node);
+ case Builtins::kArrayForEach:
+ return ReduceArrayForEach(function, node);
+ case Builtins::kArrayMap:
+ return ReduceArrayMap(function, node);
+ case Builtins::kReturnReceiver:
+ return ReduceReturnReceiver(node);
default:
break;
}
@@ -658,9 +1217,8 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
arity++;
}
NodeProperties::ChangeOp(
- node,
- javascript()->Call(arity, p.frequency(), VectorSlotPair(),
- convert_mode, p.tail_call_mode()));
+ node, javascript()->Call(arity, p.frequency(), VectorSlotPair(),
+ convert_mode));
// Try to further reduce the JSCall {node}.
Reduction const reduction = ReduceJSCall(node);
return reduction.Changed() ? reduction : Changed(node);
@@ -675,27 +1233,12 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
if (!p.feedback().IsValid()) return NoChange();
CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
if (nexus.IsUninitialized()) {
- // TODO(turbofan): Tail-calling to a CallIC stub is not supported.
- if (p.tail_call_mode() == TailCallMode::kAllow) return NoChange();
-
- // Insert a CallIC here to collect feedback for uninitialized calls.
- int const arg_count = static_cast<int>(p.arity() - 2);
- Callable callable = CodeFactory::CallIC(isolate(), p.convert_mode());
- CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), arg_count + 1,
- flags);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* stub_arity = jsgraph()->Constant(arg_count);
- Node* slot_index =
- jsgraph()->Constant(FeedbackVector::GetIndex(p.feedback().slot()));
- Node* feedback_vector = jsgraph()->HeapConstant(p.feedback().vector());
- node->InsertInput(graph()->zone(), 0, stub_code);
- node->InsertInput(graph()->zone(), 2, stub_arity);
- node->InsertInput(graph()->zone(), 3, slot_index);
- node->InsertInput(graph()->zone(), 4, feedback_vector);
- NodeProperties::ChangeOp(node, common()->Call(desc));
- return Changed(node);
+ if (flags() & kBailoutOnUninitialized) {
+ // Introduce a SOFT deopt if the call {node} wasn't executed so far.
+ return ReduceSoftDeoptimize(
+ node, DeoptimizeReason::kInsufficientTypeFeedbackForCall);
+ }
+ return NoChange();
}
Handle<Object> feedback(nexus.GetFeedback(), isolate());
@@ -740,13 +1283,22 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return NoChange();
}
+Reduction JSCallReducer::ReduceJSCallWithArrayLike(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCallWithArrayLike, node->opcode());
+ CallFrequency frequency = CallFrequencyOf(node->op());
+ return ReduceCallOrConstructWithArrayLikeOrSpread(node, 2, frequency);
+}
+
Reduction JSCallReducer::ReduceJSCallWithSpread(Node* node) {
DCHECK_EQ(IrOpcode::kJSCallWithSpread, node->opcode());
SpreadWithArityParameter const& p = SpreadWithArityParameterOf(node->op());
DCHECK_LE(3u, p.arity());
int arity = static_cast<int>(p.arity() - 1);
- return ReduceSpreadCall(node, arity);
+ // TODO(turbofan): Collect call counts on spread call/construct and thread it
+ // through here.
+ CallFrequency frequency;
+ return ReduceCallOrConstructWithArrayLikeOrSpread(node, arity, frequency);
}
Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
@@ -805,8 +1357,18 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
return NoChange();
}
+ // Extract feedback from the {node} using the CallICNexus.
if (!p.feedback().IsValid()) return NoChange();
CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ if (nexus.IsUninitialized()) {
+ if (flags() & kBailoutOnUninitialized) {
+ // Introduce a SOFT deopt if the construct {node} wasn't executed so far.
+ return ReduceSoftDeoptimize(
+ node, DeoptimizeReason::kInsufficientTypeFeedbackForConstruct);
+ }
+ return NoChange();
+ }
+
Handle<Object> feedback(nexus.GetFeedback(), isolate());
if (feedback->IsAllocationSite()) {
// The feedback is an AllocationSite, which means we have called the
@@ -864,13 +1426,45 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
return NoChange();
}
+Reduction JSCallReducer::ReduceJSConstructWithArrayLike(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSConstructWithArrayLike, node->opcode());
+ CallFrequency frequency = CallFrequencyOf(node->op());
+ return ReduceCallOrConstructWithArrayLikeOrSpread(node, 1, frequency);
+}
+
Reduction JSCallReducer::ReduceJSConstructWithSpread(Node* node) {
DCHECK_EQ(IrOpcode::kJSConstructWithSpread, node->opcode());
SpreadWithArityParameter const& p = SpreadWithArityParameterOf(node->op());
DCHECK_LE(3u, p.arity());
int arity = static_cast<int>(p.arity() - 2);
- return ReduceSpreadCall(node, arity);
+ // TODO(turbofan): Collect call counts on spread call/construct and thread it
+ // through here.
+ CallFrequency frequency;
+ return ReduceCallOrConstructWithArrayLikeOrSpread(node, arity, frequency);
+}
+
+Reduction JSCallReducer::ReduceReturnReceiver(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ ReplaceWithValue(node, receiver);
+ return Replace(receiver);
+}
+
+Reduction JSCallReducer::ReduceSoftDeoptimize(Node* node,
+ DeoptimizeReason reason) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* frame_state = NodeProperties::FindFrameStateBefore(node);
+ Node* deoptimize =
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kSoft, reason),
+ frame_state, effect, control);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ Revisit(graph()->end());
+ node->TrimInputCount(0);
+ NodeProperties::ChangeOp(node, common()->Dead());
+ return Changed(node);
}
Graph* JSCallReducer::graph() const { return jsgraph()->graph(); }
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index 31326084cc..a6598e82d5 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -7,6 +7,7 @@
#include "src/base/flags.h"
#include "src/compiler/graph-reducer.h"
+#include "src/deoptimize-reason.h"
namespace v8 {
namespace internal {
@@ -18,6 +19,7 @@ class Factory;
namespace compiler {
// Forward declarations.
+class CallFrequency;
class CommonOperatorBuilder;
class JSGraph;
class JSOperatorBuilder;
@@ -27,16 +29,27 @@ class SimplifiedOperatorBuilder;
// which might allow inlining or other optimizations to be performed afterwards.
class JSCallReducer final : public AdvancedReducer {
public:
- JSCallReducer(Editor* editor, JSGraph* jsgraph,
+ // Flags that control the mode of operation.
+ enum Flag { kNoFlags = 0u, kBailoutOnUninitialized = 1u << 0 };
+ typedef base::Flags<Flag> Flags;
+
+ JSCallReducer(Editor* editor, JSGraph* jsgraph, Flags flags,
Handle<Context> native_context,
CompilationDependencies* dependencies)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
+ flags_(flags),
native_context_(native_context),
dependencies_(dependencies) {}
+ const char* reducer_name() const override { return "JSCallReducer"; }
+
Reduction Reduce(Node* node) final;
+ // Processes the waitlist gathered while the reducer was running,
+ // and does a final attempt to reduce the nodes in the waitlist.
+ void Finalize() final;
+
private:
Reduction ReduceArrayConstructor(Node* node);
Reduction ReduceBooleanConstructor(Node* node);
@@ -49,12 +62,23 @@ class JSCallReducer final : public AdvancedReducer {
Reduction ReduceObjectGetPrototype(Node* node, Node* object);
Reduction ReduceObjectGetPrototypeOf(Node* node);
Reduction ReduceObjectPrototypeGetProto(Node* node);
+ Reduction ReduceObjectPrototypeIsPrototypeOf(Node* node);
+ Reduction ReduceReflectApply(Node* node);
+ Reduction ReduceReflectConstruct(Node* node);
Reduction ReduceReflectGetPrototypeOf(Node* node);
- Reduction ReduceSpreadCall(Node* node, int arity);
+ Reduction ReduceArrayForEach(Handle<JSFunction> function, Node* node);
+ Reduction ReduceArrayMap(Handle<JSFunction> function, Node* node);
+ Reduction ReduceCallOrConstructWithArrayLikeOrSpread(
+ Node* node, int arity, CallFrequency const& frequency);
Reduction ReduceJSConstruct(Node* node);
+ Reduction ReduceJSConstructWithArrayLike(Node* node);
Reduction ReduceJSConstructWithSpread(Node* node);
Reduction ReduceJSCall(Node* node);
+ Reduction ReduceJSCallWithArrayLike(Node* node);
Reduction ReduceJSCallWithSpread(Node* node);
+ Reduction ReduceReturnReceiver(Node* node);
+
+ Reduction ReduceSoftDeoptimize(Node* node, DeoptimizeReason reason);
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
@@ -65,11 +89,14 @@ class JSCallReducer final : public AdvancedReducer {
CommonOperatorBuilder* common() const;
JSOperatorBuilder* javascript() const;
SimplifiedOperatorBuilder* simplified() const;
+ Flags flags() const { return flags_; }
CompilationDependencies* dependencies() const { return dependencies_; }
JSGraph* const jsgraph_;
+ Flags const flags_;
Handle<Context> const native_context_;
CompilationDependencies* const dependencies_;
+ std::set<Node*> waitlist_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
index c9548ffd1c..e682490386 100644
--- a/deps/v8/src/compiler/js-context-specialization.cc
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -199,11 +199,6 @@ Isolate* JSContextSpecialization::isolate() const {
return jsgraph()->isolate();
}
-
-JSOperatorBuilder* JSContextSpecialization::javascript() const {
- return jsgraph()->javascript();
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-context-specialization.h b/deps/v8/src/compiler/js-context-specialization.h
index 0cf2bc1e54..83949fa3cc 100644
--- a/deps/v8/src/compiler/js-context-specialization.h
+++ b/deps/v8/src/compiler/js-context-specialization.h
@@ -40,6 +40,10 @@ class JSContextSpecialization final : public AdvancedReducer {
outer_(outer),
closure_(closure) {}
+ const char* reducer_name() const override {
+ return "JSContextSpecialization";
+ }
+
Reduction Reduce(Node* node) final;
private:
@@ -53,7 +57,6 @@ class JSContextSpecialization final : public AdvancedReducer {
size_t new_depth);
Isolate* isolate() const;
- JSOperatorBuilder* javascript() const;
JSGraph* jsgraph() const { return jsgraph_; }
Maybe<OuterContext> outer() const { return outer_; }
MaybeHandle<JSFunction> closure() const { return closure_; }
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index 57eedfada2..dcf1575884 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -148,7 +148,7 @@ bool IsFastLiteral(Handle<JSObject> boilerplate, int max_depth,
Handle<FixedArrayBase> elements(boilerplate->elements(), isolate);
if (elements->length() > 0 &&
elements->map() != isolate->heap()->fixed_cow_array_map()) {
- if (boilerplate->HasFastSmiOrObjectElements()) {
+ if (boilerplate->HasSmiOrObjectElements()) {
Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
int length = elements->length();
for (int i = 0; i < length; i++) {
@@ -161,7 +161,7 @@ bool IsFastLiteral(Handle<JSObject> boilerplate, int max_depth,
}
}
}
- } else if (boilerplate->HasFastDoubleElements()) {
+ } else if (boilerplate->HasDoubleElements()) {
if (elements->Size() > kMaxRegularHeapObjectSize) return false;
} else {
return false;
@@ -169,8 +169,10 @@ bool IsFastLiteral(Handle<JSObject> boilerplate, int max_depth,
}
// TODO(turbofan): Do we want to support out-of-object properties?
- Handle<FixedArray> properties(boilerplate->properties(), isolate);
- if (properties->length() > 0) return false;
+ if (!(boilerplate->HasFastProperties() &&
+ boilerplate->property_array()->length() == 0)) {
+ return false;
+ }
// Check the in-object properties.
Handle<DescriptorArray> descriptors(
@@ -200,8 +202,7 @@ bool IsFastLiteral(Handle<JSObject> boilerplate, int max_depth,
// performance of using object literals is not worse than using constructor
// functions, see crbug.com/v8/6211 for details.
const int kMaxFastLiteralDepth = 3;
-const int kMaxFastLiteralProperties =
- (JSObject::kMaxInstanceSize - JSObject::kHeaderSize) >> kPointerSizeLog2;
+const int kMaxFastLiteralProperties = JSObject::kMaxInObjectProperties;
} // namespace
@@ -259,14 +260,11 @@ Reduction JSCreateLowering::ReduceJSCreate(Node* node) {
// Force completion of inobject slack tracking before
// generating code to finalize the instance size.
original_constructor->CompleteInobjectSlackTrackingIfActive();
-
- // Compute instance size from initial map of {original_constructor}.
Handle<Map> initial_map(original_constructor->initial_map(), isolate());
int const instance_size = initial_map->instance_size();
// Add a dependency on the {initial_map} to make sure that this code is
- // deoptimized whenever the {initial_map} of the {original_constructor}
- // changes.
+ // deoptimized whenever the {initial_map} changes.
dependencies()->AssumeInitialMapCantChange(initial_map);
// Emit code to allocate the JSObject instance for the
@@ -338,7 +336,8 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
RelaxControls(node);
a.FinishAndChange(node);
} else {
- Callable callable = CodeFactory::FastNewSloppyArguments(isolate());
+ Callable callable = Builtins::CallableFor(
+ isolate(), Builtins::kFastNewSloppyArguments);
Operator::Properties properties = node->op()->properties();
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
@@ -382,7 +381,8 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
RelaxControls(node);
a.FinishAndChange(node);
} else {
- Callable callable = CodeFactory::FastNewStrictArguments(isolate());
+ Callable callable = Builtins::CallableFor(
+ isolate(), Builtins::kFastNewStrictArguments);
Operator::Properties properties = node->op()->properties();
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
@@ -422,11 +422,13 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
a.Store(AccessBuilder::ForMap(), jsarray_map);
a.Store(AccessBuilder::ForJSObjectProperties(), properties);
a.Store(AccessBuilder::ForJSObjectElements(), elements);
- a.Store(AccessBuilder::ForJSArrayLength(FAST_ELEMENTS), rest_length);
+ a.Store(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS),
+ rest_length);
RelaxControls(node);
a.FinishAndChange(node);
} else {
- Callable callable = CodeFactory::FastNewRestParameter(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kFastNewRestParameter);
Operator::Properties properties = node->op()->properties();
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
@@ -539,7 +541,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
a.Store(AccessBuilder::ForMap(), jsarray_map);
a.Store(AccessBuilder::ForJSObjectProperties(), properties);
a.Store(AccessBuilder::ForJSObjectElements(), elements);
- a.Store(AccessBuilder::ForJSArrayLength(FAST_ELEMENTS),
+ a.Store(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS),
jsgraph()->Constant(length));
RelaxControls(node);
a.FinishAndChange(node);
@@ -558,27 +560,30 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
Type* const closure_type = NodeProperties::GetType(closure);
Node* effect = NodeProperties::GetEffectInput(node);
Node* const control = NodeProperties::GetControlInput(node);
- // Extract constructor and original constructor function.
if (closure_type->IsHeapConstant()) {
DCHECK(closure_type->AsHeapConstant()->Value()->IsJSFunction());
Handle<JSFunction> js_function =
Handle<JSFunction>::cast(closure_type->AsHeapConstant()->Value());
JSFunction::EnsureHasInitialMap(js_function);
- Handle<Map> initial_map(js_function->initial_map());
- initial_map->CompleteInobjectSlackTracking();
+
+ // Force completion of inobject slack tracking before
+ // generating code to finalize the instance size.
+ js_function->CompleteInobjectSlackTrackingIfActive();
+ Handle<Map> initial_map(js_function->initial_map(), isolate());
DCHECK(initial_map->instance_type() == JS_GENERATOR_OBJECT_TYPE ||
initial_map->instance_type() == JS_ASYNC_GENERATOR_OBJECT_TYPE);
// Add a dependency on the {initial_map} to make sure that this code is
- // deoptimized whenever the {initial_map} of the {original_constructor}
- // changes.
+ // deoptimized whenever the {initial_map} changes.
dependencies()->AssumeInitialMapCantChange(initial_map);
+ // Allocate a register file.
DCHECK(js_function->shared()->HasBytecodeArray());
int size = js_function->shared()->bytecode_array()->register_count();
- Node* elements = effect = AllocateElements(
- effect, control, FAST_HOLEY_ELEMENTS, size, NOT_TENURED);
+ Node* register_file = effect =
+ AllocateElements(effect, control, HOLEY_ELEMENTS, size, NOT_TENURED);
+ // Emit code to allocate the JS[Async]GeneratorObject instance.
AllocationBuilder a(jsgraph(), effect, control);
a.Allocate(initial_map->instance_size());
Node* empty_fixed_array = jsgraph()->EmptyFixedArrayConstant();
@@ -594,12 +599,10 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
jsgraph()->Constant(JSGeneratorObject::kNext));
a.Store(AccessBuilder::ForJSGeneratorObjectContinuation(),
jsgraph()->Constant(JSGeneratorObject::kGeneratorExecuting));
- a.Store(AccessBuilder::ForJSGeneratorObjectRegisterFile(), elements);
+ a.Store(AccessBuilder::ForJSGeneratorObjectRegisterFile(), register_file);
if (initial_map->instance_type() == JS_ASYNC_GENERATOR_OBJECT_TYPE) {
a.Store(AccessBuilder::ForJSAsyncGeneratorObjectQueue(), undefined);
- a.Store(AccessBuilder::ForJSAsyncGeneratorObjectAwaitInputOrDebugPos(),
- undefined);
a.Store(AccessBuilder::ForJSAsyncGeneratorObjectAwaitedPromise(),
undefined);
}
@@ -680,14 +683,14 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node,
// Check {values} based on the {elements_kind}. These checks are guarded
// by the {elements_kind} feedback on the {site}, so it's safe to just
// deoptimize in this case.
- if (IsFastSmiElementsKind(elements_kind)) {
+ if (IsSmiElementsKind(elements_kind)) {
for (auto& value : values) {
if (!NodeProperties::GetType(value)->Is(Type::SignedSmall())) {
value = effect =
graph()->NewNode(simplified()->CheckSmi(), value, effect, control);
}
}
- } else if (IsFastDoubleElementsKind(elements_kind)) {
+ } else if (IsDoubleElementsKind(elements_kind)) {
for (auto& value : values) {
if (!NodeProperties::GetType(value)->Is(Type::Number())) {
value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
@@ -728,10 +731,13 @@ Reduction JSCreateLowering::ReduceNewArrayToStubCall(
Node* target = NodeProperties::GetValueInput(node, 0);
Node* new_target = NodeProperties::GetValueInput(node, 1);
Type* new_target_type = NodeProperties::GetType(new_target);
+ Node* type_info = site.is_null() ? jsgraph()->UndefinedConstant()
+ : jsgraph()->HeapConstant(site);
- ElementsKind elements_kind = site->GetElementsKind();
+ ElementsKind elements_kind =
+ site.is_null() ? GetInitialFastElementsKind() : site->GetElementsKind();
AllocationSiteOverrideMode override_mode =
- (AllocationSite::GetMode(elements_kind) == TRACK_ALLOCATION_SITE)
+ (site.is_null() || AllocationSite::ShouldTrack(elements_kind))
? DISABLE_ALLOCATION_SITES
: DONT_OVERRIDE;
@@ -746,112 +752,37 @@ Reduction JSCreateLowering::ReduceNewArrayToStubCall(
ArrayNoArgumentConstructorStub stub(isolate(), elements_kind,
override_mode);
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 1,
- CallDescriptor::kNeedsFrameState, properties);
+ isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(),
+ arity + 1, CallDescriptor::kNeedsFrameState, properties);
node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
- node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
- node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(0));
+ node->InsertInput(graph()->zone(), 2, type_info);
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
NodeProperties::ChangeOp(node, common()->Call(desc));
- return Changed(node);
} else if (arity == 1) {
- AllocationSiteOverrideMode override_mode =
- (AllocationSite::GetMode(elements_kind) == TRACK_ALLOCATION_SITE)
- ? DISABLE_ALLOCATION_SITES
- : DONT_OVERRIDE;
-
- if (IsHoleyElementsKind(elements_kind)) {
- ArraySingleArgumentConstructorStub stub(isolate(), elements_kind,
- override_mode);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 2,
- CallDescriptor::kNeedsFrameState, properties);
- node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
- node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
- node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(1));
- node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
- NodeProperties::ChangeOp(node, common()->Call(desc));
- return Changed(node);
- }
-
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- Node* length = NodeProperties::GetValueInput(node, 2);
- Node* equal = graph()->NewNode(simplified()->ReferenceEqual(), length,
- jsgraph()->ZeroConstant());
-
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), equal, control);
- Node* call_holey;
- Node* call_packed;
- Node* context = NodeProperties::GetContextInput(node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node);
- Node* if_equal = graph()->NewNode(common()->IfTrue(), branch);
- {
- ArraySingleArgumentConstructorStub stub(isolate(), elements_kind,
- override_mode);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 2,
- CallDescriptor::kNeedsFrameState, properties);
-
- Node* inputs[] = {jsgraph()->HeapConstant(stub.GetCode()),
- node->InputAt(1),
- jsgraph()->HeapConstant(site),
- jsgraph()->Constant(1),
- jsgraph()->UndefinedConstant(),
- length,
- context,
- frame_state,
- effect,
- if_equal};
-
- call_holey =
- graph()->NewNode(common()->Call(desc), arraysize(inputs), inputs);
- }
- Node* if_not_equal = graph()->NewNode(common()->IfFalse(), branch);
- {
- // Require elements kind to "go holey."
- ArraySingleArgumentConstructorStub stub(
- isolate(), GetHoleyElementsKind(elements_kind), override_mode);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 2,
- CallDescriptor::kNeedsFrameState, properties);
-
- Node* inputs[] = {jsgraph()->HeapConstant(stub.GetCode()),
- node->InputAt(1),
- jsgraph()->HeapConstant(site),
- jsgraph()->Constant(1),
- jsgraph()->UndefinedConstant(),
- length,
- context,
- frame_state,
- effect,
- if_not_equal};
-
- call_packed =
- graph()->NewNode(common()->Call(desc), arraysize(inputs), inputs);
- }
- Node* merge = graph()->NewNode(common()->Merge(2), call_holey, call_packed);
- Node* effect_phi = graph()->NewNode(common()->EffectPhi(2), call_holey,
- call_packed, merge);
- Node* phi =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- call_holey, call_packed, merge);
-
- ReplaceWithValue(node, phi, effect_phi, merge);
- return Changed(node);
+ // Require elements kind to "go holey".
+ ArraySingleArgumentConstructorStub stub(
+ isolate(), GetHoleyElementsKind(elements_kind), override_mode);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(),
+ arity + 1, CallDescriptor::kNeedsFrameState, properties);
+ node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
+ node->InsertInput(graph()->zone(), 2, type_info);
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
+ node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+ } else {
+ DCHECK_GT(arity, 1);
+ ArrayNArgumentsConstructorStub stub(isolate());
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(),
+ arity + 1, CallDescriptor::kNeedsFrameState);
+ node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
+ node->InsertInput(graph()->zone(), 2, type_info);
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
+ node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
+ NodeProperties::ChangeOp(node, common()->Call(desc));
}
-
- DCHECK(arity > 1);
- ArrayNArgumentsConstructorStub stub(isolate());
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), arity + 1,
- CallDescriptor::kNeedsFrameState);
- node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
- node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
- node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
- node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
- NodeProperties::ChangeOp(node, common()->Call(desc));
return Changed(node);
}
@@ -861,46 +792,43 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
Node* target = NodeProperties::GetValueInput(node, 0);
Node* new_target = NodeProperties::GetValueInput(node, 1);
- // TODO(mstarzinger): Array constructor can throw. Hook up exceptional edges.
- if (NodeProperties::IsExceptionalCall(node)) return NoChange();
-
// TODO(bmeurer): Optimize the subclassing case.
if (target != new_target) return NoChange();
// Check if we have a feedback {site} on the {node}.
Handle<AllocationSite> site = p.site();
- if (p.site().is_null()) return NoChange();
-
- // Attempt to inline calls to the Array constructor for the relevant cases
- // where either no arguments are provided, or exactly one unsigned number
- // argument is given.
- if (site->CanInlineCall()) {
- if (p.arity() == 0) {
- Node* length = jsgraph()->ZeroConstant();
- int capacity = JSArray::kPreallocatedArrayElements;
- return ReduceNewArray(node, length, capacity, site);
- } else if (p.arity() == 1) {
- Node* length = NodeProperties::GetValueInput(node, 2);
- Type* length_type = NodeProperties::GetType(length);
- if (!length_type->Maybe(Type::Number())) {
- // Handle the single argument case, where we know that the value
- // cannot be a valid Array length.
- return ReduceNewArray(node, {length}, site);
- }
- if (length_type->Is(Type::SignedSmall()) && length_type->Min() >= 0 &&
- length_type->Max() <= kElementLoopUnrollLimit &&
- length_type->Min() == length_type->Max()) {
- int capacity = static_cast<int>(length_type->Max());
+ if (!site.is_null()) {
+ // Attempt to inline calls to the Array constructor for the relevant cases
+ // where either no arguments are provided, or exactly one unsigned number
+ // argument is given.
+ if (site->CanInlineCall()) {
+ if (p.arity() == 0) {
+ Node* length = jsgraph()->ZeroConstant();
+ int capacity = JSArray::kPreallocatedArrayElements;
return ReduceNewArray(node, length, capacity, site);
+ } else if (p.arity() == 1) {
+ Node* length = NodeProperties::GetValueInput(node, 2);
+ Type* length_type = NodeProperties::GetType(length);
+ if (!length_type->Maybe(Type::Number())) {
+ // Handle the single argument case, where we know that the value
+ // cannot be a valid Array length.
+ return ReduceNewArray(node, {length}, site);
+ }
+ if (length_type->Is(Type::SignedSmall()) && length_type->Min() >= 0 &&
+ length_type->Max() <= kElementLoopUnrollLimit &&
+ length_type->Min() == length_type->Max()) {
+ int capacity = static_cast<int>(length_type->Max());
+ return ReduceNewArray(node, length, capacity, site);
+ }
+ } else if (p.arity() <= JSArray::kInitialMaxFastElementArray) {
+ std::vector<Node*> values;
+ values.reserve(p.arity());
+ for (size_t i = 0; i < p.arity(); ++i) {
+ values.push_back(
+ NodeProperties::GetValueInput(node, static_cast<int>(2 + i)));
+ }
+ return ReduceNewArray(node, values, site);
}
- } else if (p.arity() <= JSArray::kInitialMaxFastElementArray) {
- std::vector<Node*> values;
- values.reserve(p.arity());
- for (size_t i = 0; i < p.arity(); ++i) {
- values.push_back(
- NodeProperties::GetValueInput(node, static_cast<int>(2 + i)));
- }
- return ReduceNewArray(node, values, site);
}
}
@@ -944,9 +872,9 @@ Reduction JSCreateLowering::ReduceJSCreateKeyValueArray(Node* node) {
AllocationBuilder aa(jsgraph(), effect, graph()->start());
aa.AllocateArray(2, factory()->fixed_array_map());
- aa.Store(AccessBuilder::ForFixedArrayElement(FAST_ELEMENTS),
+ aa.Store(AccessBuilder::ForFixedArrayElement(PACKED_ELEMENTS),
jsgraph()->Constant(0), key);
- aa.Store(AccessBuilder::ForFixedArrayElement(FAST_ELEMENTS),
+ aa.Store(AccessBuilder::ForFixedArrayElement(PACKED_ELEMENTS),
jsgraph()->Constant(1), value);
Node* elements = aa.Finish();
@@ -955,7 +883,7 @@ Reduction JSCreateLowering::ReduceJSCreateKeyValueArray(Node* node) {
a.Store(AccessBuilder::ForMap(), array_map);
a.Store(AccessBuilder::ForJSObjectProperties(), properties);
a.Store(AccessBuilder::ForJSObjectElements(), elements);
- a.Store(AccessBuilder::ForJSArrayLength(FAST_ELEMENTS), length);
+ a.Store(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS), length);
STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
a.FinishAndChange(node);
return Changed(node);
@@ -974,8 +902,7 @@ Reduction JSCreateLowering::ReduceJSCreateLiteral(Node* node) {
Handle<Object> literal(feedback_vector->Get(slot), isolate());
if (literal->IsAllocationSite()) {
Handle<AllocationSite> site = Handle<AllocationSite>::cast(literal);
- Handle<JSObject> boilerplate(JSObject::cast(site->transition_info()),
- isolate());
+ Handle<JSObject> boilerplate(site->boilerplate(), isolate());
int max_properties = kMaxFastLiteralProperties;
if (IsFastLiteral(boilerplate, kMaxFastLiteralDepth, &max_properties)) {
AllocationSiteUsageContext site_context(isolate(), site, false);
@@ -1247,10 +1174,10 @@ Node* JSCreateLowering::AllocateElements(Node* effect, Node* control,
DCHECK_LE(1, capacity);
DCHECK_LE(capacity, JSArray::kInitialMaxFastElementArray);
- Handle<Map> elements_map = IsFastDoubleElementsKind(elements_kind)
+ Handle<Map> elements_map = IsDoubleElementsKind(elements_kind)
? factory()->fixed_double_array_map()
: factory()->fixed_array_map();
- ElementAccess access = IsFastDoubleElementsKind(elements_kind)
+ ElementAccess access = IsDoubleElementsKind(elements_kind)
? AccessBuilder::ForFixedDoubleArrayElement()
: AccessBuilder::ForFixedArrayElement();
Node* value = jsgraph()->TheHoleConstant();
@@ -1273,10 +1200,10 @@ Node* JSCreateLowering::AllocateElements(Node* effect, Node* control,
DCHECK_LE(1, capacity);
DCHECK_LE(capacity, JSArray::kInitialMaxFastElementArray);
- Handle<Map> elements_map = IsFastDoubleElementsKind(elements_kind)
+ Handle<Map> elements_map = IsDoubleElementsKind(elements_kind)
? factory()->fixed_double_array_map()
: factory()->fixed_array_map();
- ElementAccess access = IsFastDoubleElementsKind(elements_kind)
+ ElementAccess access = IsDoubleElementsKind(elements_kind)
? AccessBuilder::ForFixedDoubleArrayElement()
: AccessBuilder::ForFixedArrayElement();
@@ -1498,10 +1425,6 @@ Graph* JSCreateLowering::graph() const { return jsgraph()->graph(); }
Isolate* JSCreateLowering::isolate() const { return jsgraph()->isolate(); }
-JSOperatorBuilder* JSCreateLowering::javascript() const {
- return jsgraph()->javascript();
-}
-
CommonOperatorBuilder* JSCreateLowering::common() const {
return jsgraph()->common();
}
@@ -1510,10 +1433,6 @@ SimplifiedOperatorBuilder* JSCreateLowering::simplified() const {
return jsgraph()->simplified();
}
-MachineOperatorBuilder* JSCreateLowering::machine() const {
- return jsgraph()->machine();
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-create-lowering.h b/deps/v8/src/compiler/js-create-lowering.h
index d03464d39d..e122d4cd6b 100644
--- a/deps/v8/src/compiler/js-create-lowering.h
+++ b/deps/v8/src/compiler/js-create-lowering.h
@@ -44,6 +44,8 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
zone_(zone) {}
~JSCreateLowering() final {}
+ const char* reducer_name() const override { return "JSCreateLowering"; }
+
Reduction Reduce(Node* node) final;
private:
@@ -73,6 +75,8 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
ElementsKind elements_kind, int capacity,
PretenureFlag pretenure);
Node* AllocateElements(Node* effect, Node* control,
+ ElementsKind elements_kind, Node* capacity_and_length);
+ Node* AllocateElements(Node* effect, Node* control,
ElementsKind elements_kind,
std::vector<Node*> const& values,
PretenureFlag pretenure);
@@ -94,10 +98,8 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
JSGraph* jsgraph() const { return jsgraph_; }
Isolate* isolate() const;
Handle<Context> native_context() const { return native_context_; }
- JSOperatorBuilder* javascript() const;
CommonOperatorBuilder* common() const;
SimplifiedOperatorBuilder* simplified() const;
- MachineOperatorBuilder* machine() const;
CompilationDependencies* dependencies() const { return dependencies_; }
Zone* zone() const { return zone_; }
diff --git a/deps/v8/src/compiler/js-frame-specialization.h b/deps/v8/src/compiler/js-frame-specialization.h
index f268b3ac5b..cbc82c4eed 100644
--- a/deps/v8/src/compiler/js-frame-specialization.h
+++ b/deps/v8/src/compiler/js-frame-specialization.h
@@ -25,6 +25,8 @@ class JSFrameSpecialization final : public AdvancedReducer {
: AdvancedReducer(editor), frame_(frame), jsgraph_(jsgraph) {}
~JSFrameSpecialization() final {}
+ const char* reducer_name() const override { return "JSFrameSpecialization"; }
+
Reduction Reduce(Node* node) final;
private:
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index ea5a4a4627..02630b2420 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -50,11 +50,11 @@ Reduction JSGenericLowering::Reduce(Node* node) {
return Changed(node);
}
-#define REPLACE_STUB_CALL(Name) \
- void JSGenericLowering::LowerJS##Name(Node* node) { \
- CallDescriptor::Flags flags = FrameStateFlagForCall(node); \
- Callable callable = CodeFactory::Name(isolate()); \
- ReplaceWithStubCall(node, callable, flags); \
+#define REPLACE_STUB_CALL(Name) \
+ void JSGenericLowering::LowerJS##Name(Node* node) { \
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node); \
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::k##Name); \
+ ReplaceWithStubCall(node, callable, flags); \
}
REPLACE_STUB_CALL(Add)
REPLACE_STUB_CALL(Subtract)
@@ -79,6 +79,7 @@ REPLACE_STUB_CALL(ToNumber)
REPLACE_STUB_CALL(ToName)
REPLACE_STUB_CALL(ToObject)
REPLACE_STUB_CALL(ToString)
+REPLACE_STUB_CALL(ToPrimitiveToString)
#undef REPLACE_STUB_CALL
void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
@@ -120,7 +121,7 @@ void JSGenericLowering::ReplaceWithRuntimeCall(Node* node,
void JSGenericLowering::LowerJSStrictEqual(Node* node) {
// The === operator doesn't need the current context.
NodeProperties::ReplaceContextInput(node, jsgraph()->NoContextConstant());
- Callable callable = CodeFactory::StrictEqual(isolate());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kStrictEqual);
node->RemoveInput(4); // control
ReplaceWithStubCall(node, callable, CallDescriptor::kNoFlags,
Operator::kEliminatable);
@@ -129,7 +130,7 @@ void JSGenericLowering::LowerJSStrictEqual(Node* node) {
void JSGenericLowering::LowerJSToBoolean(Node* node) {
// The ToBoolean conversion doesn't need the current context.
NodeProperties::ReplaceContextInput(node, jsgraph()->NoContextConstant());
- Callable callable = CodeFactory::ToBoolean(isolate());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kToBoolean);
node->AppendInput(zone(), graph()->start());
ReplaceWithStubCall(node, callable, CallDescriptor::kNoAllocate,
Operator::kEliminatable);
@@ -138,7 +139,7 @@ void JSGenericLowering::LowerJSToBoolean(Node* node) {
void JSGenericLowering::LowerJSClassOf(Node* node) {
// The %_ClassOf intrinsic doesn't need the current context.
NodeProperties::ReplaceContextInput(node, jsgraph()->NoContextConstant());
- Callable callable = CodeFactory::ClassOf(isolate());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kClassOf);
node->AppendInput(zone(), graph()->start());
ReplaceWithStubCall(node, callable, CallDescriptor::kNoAllocate,
Operator::kEliminatable);
@@ -147,12 +148,25 @@ void JSGenericLowering::LowerJSClassOf(Node* node) {
void JSGenericLowering::LowerJSTypeOf(Node* node) {
// The typeof operator doesn't need the current context.
NodeProperties::ReplaceContextInput(node, jsgraph()->NoContextConstant());
- Callable callable = CodeFactory::Typeof(isolate());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kTypeof);
node->AppendInput(zone(), graph()->start());
ReplaceWithStubCall(node, callable, CallDescriptor::kNoAllocate,
Operator::kEliminatable);
}
+void JSGenericLowering::LowerJSStringConcat(Node* node) {
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ int operand_count = StringConcatParameterOf(node->op()).operand_count();
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kStringConcat);
+ const CallInterfaceDescriptor& descriptor = callable.descriptor();
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, operand_count, flags,
+ node->op()->properties());
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 1, jsgraph()->Int32Constant(operand_count));
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+}
void JSGenericLowering::LowerJSLoadProperty(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
@@ -161,17 +175,18 @@ void JSGenericLowering::LowerJSLoadProperty(Node* node) {
Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
- Callable callable = CodeFactory::KeyedLoadIC(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kKeyedLoadICTrampoline);
ReplaceWithStubCall(node, callable, flags);
} else {
- Callable callable = CodeFactory::KeyedLoadICInOptimizedCode(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kKeyedLoadIC);
Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
node->InsertInput(zone(), 3, vector);
ReplaceWithStubCall(node, callable, flags);
}
}
-
void JSGenericLowering::LowerJSLoadNamed(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
NamedAccess const& p = NamedAccessOf(node->op());
@@ -180,10 +195,11 @@ void JSGenericLowering::LowerJSLoadNamed(Node* node) {
node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
- Callable callable = CodeFactory::LoadIC(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kLoadICTrampoline);
ReplaceWithStubCall(node, callable, flags);
} else {
- Callable callable = CodeFactory::LoadICInOptimizedCode(isolate());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kLoadIC);
Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
node->InsertInput(zone(), 3, vector);
ReplaceWithStubCall(node, callable, flags);
@@ -317,19 +333,25 @@ void JSGenericLowering::LowerJSDeleteProperty(Node* node) {
void JSGenericLowering::LowerJSGetSuperConstructor(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable = CodeFactory::GetSuperConstructor(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kGetSuperConstructor);
ReplaceWithStubCall(node, callable, flags);
}
+void JSGenericLowering::LowerJSHasInPrototypeChain(Node* node) {
+ ReplaceWithRuntimeCall(node, Runtime::kHasInPrototypeChain);
+}
+
void JSGenericLowering::LowerJSInstanceOf(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable = CodeFactory::InstanceOf(isolate());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kInstanceOf);
ReplaceWithStubCall(node, callable, flags);
}
void JSGenericLowering::LowerJSOrdinaryHasInstance(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable = CodeFactory::OrdinaryHasInstance(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kOrdinaryHasInstance);
ReplaceWithStubCall(node, callable, flags);
}
@@ -345,7 +367,8 @@ void JSGenericLowering::LowerJSStoreContext(Node* node) {
void JSGenericLowering::LowerJSCreate(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable = CodeFactory::FastNewObject(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kFastNewObject);
ReplaceWithStubCall(node, callable, flags);
}
@@ -370,13 +393,21 @@ void JSGenericLowering::LowerJSCreateArray(Node* node) {
CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
int const arity = static_cast<int>(p.arity());
Handle<AllocationSite> const site = p.site();
- Node* new_target = node->InputAt(1);
+ ArrayConstructorDescriptor descriptor(isolate());
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, arity + 1,
+ CallDescriptor::kNeedsFrameState, node->op()->properties(),
+ MachineType::AnyTagged());
+ Node* stub_code = jsgraph()->ArrayConstructorStubConstant();
+ Node* stub_arity = jsgraph()->Int32Constant(arity);
Node* type_info = site.is_null() ? jsgraph()->UndefinedConstant()
: jsgraph()->HeapConstant(site);
- node->RemoveInput(1);
- node->InsertInput(zone(), 1 + arity, new_target);
- node->InsertInput(zone(), 2 + arity, type_info);
- ReplaceWithRuntimeCall(node, Runtime::kNewArray, arity + 3);
+ Node* receiver = jsgraph()->UndefinedConstant();
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 3, stub_arity);
+ node->InsertInput(zone(), 4, type_info);
+ node->InsertInput(zone(), 5, receiver);
+ NodeProperties::ChangeOp(node, common()->Call(desc));
}
@@ -385,11 +416,12 @@ void JSGenericLowering::LowerJSCreateClosure(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Handle<SharedFunctionInfo> const shared_info = p.shared_info();
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(shared_info));
+ node->RemoveInput(3); // control
- // Use the FastNewClosurebuiltin only for functions allocated in new
- // space.
+ // Use the FastNewClosure builtin only for functions allocated in new space.
if (p.pretenure() == NOT_TENURED) {
- Callable callable = CodeFactory::FastNewClosure(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kFastNewClosure);
node->InsertInput(zone(), 1,
jsgraph()->HeapConstant(p.feedback().vector()));
node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
@@ -432,11 +464,11 @@ void JSGenericLowering::LowerJSCreateGeneratorObject(Node* node) {
}
void JSGenericLowering::LowerJSCreateIterResultObject(Node* node) {
- ReplaceWithRuntimeCall(node, Runtime::kCreateIterResultObject);
+ UNREACHABLE(); // Eliminated in typed lowering.
}
void JSGenericLowering::LowerJSCreateKeyValueArray(Node* node) {
- ReplaceWithRuntimeCall(node, Runtime::kCreateKeyValueArray);
+ UNREACHABLE(); // Eliminated in typed lowering.
}
void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
@@ -447,7 +479,7 @@ void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
// Use the FastCloneShallowArray builtin only for shallow boilerplates without
// properties up to the number of elements that the stubs can handle.
- if ((p.flags() & ArrayLiteral::kShallowElements) != 0 &&
+ if ((p.flags() & AggregateLiteral::kIsShallow) != 0 &&
p.length() < ConstructorBuiltins::kMaximumClonedShallowArrayElements) {
Callable callable = CodeFactory::FastCloneShallowArray(
isolate(), DONT_TRACK_ALLOCATION_SITE);
@@ -468,10 +500,11 @@ void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
// Use the FastCloneShallowObject builtin only for shallow boilerplates
// without elements up to the number of properties that the stubs can handle.
- if ((p.flags() & ObjectLiteral::kShallowProperties) != 0 &&
+ if ((p.flags() & AggregateLiteral::kIsShallow) != 0 &&
p.length() <=
ConstructorBuiltins::kMaximumClonedShallowObjectProperties) {
- Callable callable = CodeFactory::FastCloneShallowObject(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kFastCloneShallowObject);
ReplaceWithStubCall(node, callable, flags);
} else {
ReplaceWithRuntimeCall(node, Runtime::kCreateObjectLiteral);
@@ -482,7 +515,8 @@ void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
void JSGenericLowering::LowerJSCreateLiteralRegExp(Node* node) {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable = CodeFactory::FastCloneRegExp(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kFastCloneRegExp);
Node* literal_index = jsgraph()->SmiConstant(p.index());
Node* literal_flags = jsgraph()->SmiConstant(p.flags());
Node* pattern = jsgraph()->HeapConstant(p.constant());
@@ -494,19 +528,11 @@ void JSGenericLowering::LowerJSCreateLiteralRegExp(Node* node) {
void JSGenericLowering::LowerJSCreateCatchContext(Node* node) {
- const CreateCatchContextParameters& parameters =
- CreateCatchContextParametersOf(node->op());
- node->InsertInput(zone(), 0,
- jsgraph()->HeapConstant(parameters.catch_name()));
- node->InsertInput(zone(), 2,
- jsgraph()->HeapConstant(parameters.scope_info()));
- ReplaceWithRuntimeCall(node, Runtime::kPushCatchContext);
+ UNREACHABLE(); // Eliminated in typed lowering.
}
void JSGenericLowering::LowerJSCreateWithContext(Node* node) {
- Handle<ScopeInfo> scope_info = OpParameter<Handle<ScopeInfo>>(node);
- node->InsertInput(zone(), 1, jsgraph()->HeapConstant(scope_info));
- ReplaceWithRuntimeCall(node, Runtime::kPushWithContext);
+ UNREACHABLE(); // Eliminated in typed lowering.
}
void JSGenericLowering::LowerJSCreateBlockContext(Node* node) {
@@ -563,22 +589,46 @@ void JSGenericLowering::LowerJSConstruct(Node* node) {
NodeProperties::ChangeOp(node, common()->Call(desc));
}
+void JSGenericLowering::LowerJSConstructWithArrayLike(Node* node) {
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kConstructWithArrayLike);
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), callable.descriptor(), 1, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* receiver = jsgraph()->UndefinedConstant();
+ Node* arguments_list = node->InputAt(1);
+ Node* new_target = node->InputAt(2);
+ node->InsertInput(zone(), 0, stub_code);
+ node->ReplaceInput(2, new_target);
+ node->ReplaceInput(3, arguments_list);
+ node->InsertInput(zone(), 4, receiver);
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+}
+
void JSGenericLowering::LowerJSConstructWithSpread(Node* node) {
SpreadWithArityParameter const& p = SpreadWithArityParameterOf(node->op());
int const arg_count = static_cast<int>(p.arity() - 2);
+ int const spread_index = arg_count;
+ int const new_target_index = arg_count + 1;
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Callable callable = CodeFactory::ConstructWithSpread(isolate());
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
+ isolate(), zone(), callable.descriptor(), arg_count, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* stub_arity = jsgraph()->Int32Constant(arg_count);
- Node* new_target = node->InputAt(arg_count + 1);
+ Node* stack_arg_count = jsgraph()->Int32Constant(arg_count - 1);
+ Node* new_target = node->InputAt(new_target_index);
+ Node* spread = node->InputAt(spread_index);
Node* receiver = jsgraph()->UndefinedConstant();
- node->RemoveInput(arg_count + 1); // Drop new target.
+ DCHECK(new_target_index > spread_index);
+ node->RemoveInput(new_target_index); // Drop new target.
+ node->RemoveInput(spread_index);
+
node->InsertInput(zone(), 0, stub_code);
node->InsertInput(zone(), 2, new_target);
- node->InsertInput(zone(), 3, stub_arity);
- node->InsertInput(zone(), 4, receiver);
+ node->InsertInput(zone(), 3, stack_arg_count);
+ node->InsertInput(zone(), 4, spread);
+ node->InsertInput(zone(), 5, receiver);
NodeProperties::ChangeOp(node, common()->Call(desc));
}
@@ -587,9 +637,6 @@ void JSGenericLowering::LowerJSCallForwardVarargs(Node* node) {
int const arg_count = static_cast<int>(p.arity() - 2);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Callable callable = CodeFactory::CallForwardVarargs(isolate());
- if (p.tail_call_mode() == TailCallMode::kAllow) {
- flags |= CallDescriptor::kSupportsTailCalls;
- }
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
@@ -607,9 +654,6 @@ void JSGenericLowering::LowerJSCall(Node* node) {
ConvertReceiverMode const mode = p.convert_mode();
Callable callable = CodeFactory::Call(isolate(), mode);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- if (p.tail_call_mode() == TailCallMode::kAllow) {
- flags |= CallDescriptor::kSupportsTailCalls;
- }
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
@@ -619,17 +663,35 @@ void JSGenericLowering::LowerJSCall(Node* node) {
NodeProperties::ChangeOp(node, common()->Call(desc));
}
+void JSGenericLowering::LowerJSCallWithArrayLike(Node* node) {
+ Callable callable = CodeFactory::CallWithArrayLike(isolate());
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), callable.descriptor(), 1, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* receiver = node->InputAt(1);
+ Node* arguments_list = node->InputAt(2);
+ node->InsertInput(zone(), 0, stub_code);
+ node->ReplaceInput(3, receiver);
+ node->ReplaceInput(2, arguments_list);
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+}
+
void JSGenericLowering::LowerJSCallWithSpread(Node* node) {
SpreadWithArityParameter const& p = SpreadWithArityParameterOf(node->op());
int const arg_count = static_cast<int>(p.arity() - 2);
- Callable callable = CodeFactory::CallWithSpread(isolate());
+ int const spread_index = static_cast<int>(p.arity() + 1);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ Callable callable = CodeFactory::CallWithSpread(isolate());
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
+ isolate(), zone(), callable.descriptor(), arg_count, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ // We pass the spread in a register, not on the stack.
+ Node* stack_arg_count = jsgraph()->Int32Constant(arg_count - 1);
node->InsertInput(zone(), 0, stub_code);
- node->InsertInput(zone(), 2, stub_arity);
+ node->InsertInput(zone(), 2, stack_arg_count);
+ node->InsertInput(zone(), 3, node->InputAt(spread_index));
+ node->RemoveInput(spread_index + 1);
NodeProperties::ChangeOp(node, common()->Call(desc));
}
@@ -644,13 +706,13 @@ void JSGenericLowering::LowerJSConvertReceiver(Node* node) {
void JSGenericLowering::LowerJSForInNext(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable = CodeFactory::ForInNext(isolate());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kForInNext);
ReplaceWithStubCall(node, callable, flags);
}
void JSGenericLowering::LowerJSForInPrepare(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable = CodeFactory::ForInPrepare(isolate());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kForInPrepare);
ReplaceWithStubCall(node, callable, flags, node->op()->properties(), 3);
}
diff --git a/deps/v8/src/compiler/js-generic-lowering.h b/deps/v8/src/compiler/js-generic-lowering.h
index 88d0b45156..1a8102da59 100644
--- a/deps/v8/src/compiler/js-generic-lowering.h
+++ b/deps/v8/src/compiler/js-generic-lowering.h
@@ -27,6 +27,8 @@ class JSGenericLowering final : public Reducer {
explicit JSGenericLowering(JSGraph* jsgraph);
~JSGenericLowering() final;
+ const char* reducer_name() const override { return "JSGenericLowering"; }
+
Reduction Reduce(Node* node) final;
protected:
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
index 93706acf5a..dfe05933bb 100644
--- a/deps/v8/src/compiler/js-graph.cc
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -26,6 +26,11 @@ Node* JSGraph::AllocateInOldSpaceStubConstant() {
HeapConstant(isolate()->builtins()->AllocateInOldSpace()));
}
+Node* JSGraph::ArrayConstructorStubConstant() {
+ return CACHED(kArrayConstructorStubConstant,
+ HeapConstant(ArrayConstructorStub(isolate()).GetCode()));
+}
+
Node* JSGraph::ToNumberBuiltinConstant() {
return CACHED(kToNumberBuiltinConstant,
HeapConstant(isolate()->builtins()->ToNumber()));
@@ -77,6 +82,11 @@ Node* JSGraph::FixedArrayMapConstant() {
HeapConstant(factory()->fixed_array_map()));
}
+Node* JSGraph::PropertyArrayMapConstant() {
+ return CACHED(kPropertyArrayMapConstant,
+ HeapConstant(factory()->property_array_map()));
+}
+
Node* JSGraph::FixedDoubleArrayMapConstant() {
return CACHED(kFixedDoubleArrayMapConstant,
HeapConstant(factory()->fixed_double_array_map()));
@@ -130,6 +140,9 @@ Node* JSGraph::OneConstant() {
return CACHED(kOneConstant, NumberConstant(1.0));
}
+Node* JSGraph::MinusOneConstant() {
+ return CACHED(kMinusOneConstant, NumberConstant(-1.0));
+}
Node* JSGraph::NaNConstant() {
return CACHED(kNaNConstant,
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
index 4b3ed4856a..a4eb9a9061 100644
--- a/deps/v8/src/compiler/js-graph.h
+++ b/deps/v8/src/compiler/js-graph.h
@@ -43,6 +43,7 @@ class V8_EXPORT_PRIVATE JSGraph : public NON_EXPORTED_BASE(ZoneObject) {
// Canonicalized global constants.
Node* AllocateInNewSpaceStubConstant();
Node* AllocateInOldSpaceStubConstant();
+ Node* ArrayConstructorStubConstant();
Node* ToNumberBuiltinConstant();
Node* CEntryStubConstant(int result_size,
SaveFPRegsMode save_doubles = kDontSaveFPRegs,
@@ -51,6 +52,7 @@ class V8_EXPORT_PRIVATE JSGraph : public NON_EXPORTED_BASE(ZoneObject) {
Node* EmptyFixedArrayConstant();
Node* EmptyStringConstant();
Node* FixedArrayMapConstant();
+ Node* PropertyArrayMapConstant();
Node* FixedDoubleArrayMapConstant();
Node* HeapNumberMapConstant();
Node* OptimizedOutConstant();
@@ -63,6 +65,7 @@ class V8_EXPORT_PRIVATE JSGraph : public NON_EXPORTED_BASE(ZoneObject) {
Node* ZeroConstant();
Node* OneConstant();
Node* NaNConstant();
+ Node* MinusOneConstant();
// Creates a HeapConstant node, possibly canonicalized, and may access the
// heap to inspect the object.
@@ -164,6 +167,7 @@ class V8_EXPORT_PRIVATE JSGraph : public NON_EXPORTED_BASE(ZoneObject) {
enum CachedNode {
kAllocateInNewSpaceStubConstant,
kAllocateInOldSpaceStubConstant,
+ kArrayConstructorStubConstant,
kToNumberBuiltinConstant,
kCEntryStub1Constant,
kCEntryStub2Constant,
@@ -173,6 +177,7 @@ class V8_EXPORT_PRIVATE JSGraph : public NON_EXPORTED_BASE(ZoneObject) {
kEmptyStringConstant,
kFixedArrayMapConstant,
kFixedDoubleArrayMapConstant,
+ kPropertyArrayMapConstant,
kHeapNumberMapConstant,
kOptimizedOutConstant,
kStaleRegisterConstant,
@@ -183,6 +188,7 @@ class V8_EXPORT_PRIVATE JSGraph : public NON_EXPORTED_BASE(ZoneObject) {
kNullConstant,
kZeroConstant,
kOneConstant,
+ kMinusOneConstant,
kNaNConstant,
kEmptyStateValues,
kSingleDeadTypedStateValues,
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.h b/deps/v8/src/compiler/js-inlining-heuristic.h
index 0f5f9f87c1..e1026d6c3b 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.h
+++ b/deps/v8/src/compiler/js-inlining-heuristic.h
@@ -24,6 +24,8 @@ class JSInliningHeuristic final : public AdvancedReducer {
seen_(local_zone),
jsgraph_(jsgraph) {}
+ const char* reducer_name() const override { return "JSInliningHeuristic"; }
+
Reduction Reduce(Node* node) final;
// Processes the list of candidates gathered while the reducer was running,
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index 9b260e3533..4172998544 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -250,36 +250,6 @@ Node* JSInliner::CreateArtificialFrameState(Node* node, Node* outer_frame_state,
outer_frame_state);
}
-Node* JSInliner::CreateTailCallerFrameState(Node* node, Node* frame_state) {
- FrameStateInfo const& frame_info = OpParameter<FrameStateInfo>(frame_state);
- Handle<SharedFunctionInfo> shared;
- frame_info.shared_info().ToHandle(&shared);
-
- Node* function = frame_state->InputAt(kFrameStateFunctionInput);
-
- // If we are inlining a tail call drop caller's frame state and an
- // arguments adaptor if it exists.
- frame_state = NodeProperties::GetFrameStateInput(frame_state);
- if (frame_state->opcode() == IrOpcode::kFrameState) {
- FrameStateInfo const& frame_info = OpParameter<FrameStateInfo>(frame_state);
- if (frame_info.type() == FrameStateType::kArgumentsAdaptor) {
- frame_state = NodeProperties::GetFrameStateInput(frame_state);
- }
- }
-
- const FrameStateFunctionInfo* state_info =
- common()->CreateFrameStateFunctionInfo(
- FrameStateType::kTailCallerFunction, 0, 0, shared);
-
- const Operator* op = common()->FrameState(
- BailoutId(-1), OutputFrameStateCombine::Ignore(), state_info);
- const Operator* op0 = common()->StateValues(0, SparseInputMask::Dense());
- Node* node0 = graph()->NewNode(op0);
- return graph()->NewNode(op, node0, node0, node0,
- jsgraph()->UndefinedConstant(), function,
- frame_state);
-}
-
namespace {
// TODO(bmeurer): Unify this with the witness helper functions in the
@@ -498,7 +468,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
}
// Function contains break points.
- if (shared_info->HasDebugInfo()) {
+ if (shared_info->HasBreakInfo()) {
TRACE("Not inlining %s into %s because callee may contain break points\n",
shared_info->DebugName()->ToCString().get(),
info_->shared_info()->DebugName()->ToCString().get());
@@ -552,12 +522,6 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
return NoChange();
}
- // Remember that we inlined this function. This needs to be called right
- // after we ensure deoptimization support so that the code flusher
- // does not remove the code with the deoptimization support.
- int inlining_id = info_->AddInlinedFunction(
- shared_info, source_positions_->GetSourcePosition(node));
-
// ----------------------------------------------------------------
// After this point, we've made a decision to inline this function.
// We shall not bailout from inlining if we got here.
@@ -571,6 +535,10 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
Handle<FeedbackVector> feedback_vector;
DetermineCallContext(node, context, feedback_vector);
+ // Remember that we inlined this function.
+ int inlining_id = info_->AddInlinedFunction(
+ shared_info, source_positions_->GetSourcePosition(node));
+
// Create the subgraph for the inlinee.
Node* start;
Node* end;
@@ -754,20 +722,6 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
}
}
- // If we are inlining a JS call at tail position then we have to pop current
- // frame state and its potential arguments adaptor frame state in order to
- // make the call stack be consistent with non-inlining case.
- // After that we add a tail caller frame state which lets deoptimizer handle
- // the case when the outermost function inlines a tail call (it should remove
- // potential arguments adaptor frame that belongs to outermost function when
- // deopt happens).
- if (node->opcode() == IrOpcode::kJSCall) {
- const CallParameters& p = CallParametersOf(node->op());
- if (p.tail_call_mode() == TailCallMode::kAllow) {
- frame_state = CreateTailCallerFrameState(node, frame_state);
- }
- }
-
// Insert argument adaptor frame if required. The callees formal parameter
// count (i.e. value outputs of start node minus target, receiver, new target,
// arguments count and context) have to match the number of arguments passed
diff --git a/deps/v8/src/compiler/js-inlining.h b/deps/v8/src/compiler/js-inlining.h
index e40e6a745e..cff72b0760 100644
--- a/deps/v8/src/compiler/js-inlining.h
+++ b/deps/v8/src/compiler/js-inlining.h
@@ -31,6 +31,8 @@ class JSInliner final : public AdvancedReducer {
jsgraph_(jsgraph),
source_positions_(source_positions) {}
+ const char* reducer_name() const override { return "JSInliner"; }
+
// Reducer interface, eagerly inlines everything.
Reduction Reduce(Node* node) final;
@@ -60,8 +62,6 @@ class JSInliner final : public AdvancedReducer {
FrameStateType frame_state_type,
Handle<SharedFunctionInfo> shared);
- Node* CreateTailCallerFrameState(Node* node, Node* outer_frame_state);
-
Reduction InlineCall(Node* call, Node* new_target, Node* context,
Node* frame_state, Node* start, Node* end,
Node* exception_target,
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index b9ee8a4ed6..b4b0ffaa51 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -42,8 +42,6 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceCreateJSGeneratorObject(node);
case Runtime::kInlineGeneratorGetInputOrDebugPos:
return ReduceGeneratorGetInputOrDebugPos(node);
- case Runtime::kInlineAsyncGeneratorGetAwaitInputOrDebugPos:
- return ReduceAsyncGeneratorGetAwaitInputOrDebugPos(node);
case Runtime::kInlineAsyncGeneratorReject:
return ReduceAsyncGeneratorReject(node);
case Runtime::kInlineAsyncGeneratorResolve:
@@ -62,10 +60,6 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceIsInstanceType(node, JS_MAP_TYPE);
case Runtime::kInlineIsJSSet:
return ReduceIsInstanceType(node, JS_SET_TYPE);
- case Runtime::kInlineIsJSMapIterator:
- return ReduceIsInstanceType(node, JS_MAP_ITERATOR_TYPE);
- case Runtime::kInlineIsJSSetIterator:
- return ReduceIsInstanceType(node, JS_SET_ITERATOR_TYPE);
case Runtime::kInlineIsJSWeakMap:
return ReduceIsInstanceType(node, JS_WEAK_MAP_TYPE);
case Runtime::kInlineIsJSWeakSet:
@@ -198,23 +192,16 @@ Reduction JSIntrinsicLowering::ReduceGeneratorGetInputOrDebugPos(Node* node) {
return Change(node, op, generator, effect, control);
}
-Reduction JSIntrinsicLowering::ReduceAsyncGeneratorGetAwaitInputOrDebugPos(
- Node* node) {
- Node* const generator = NodeProperties::GetValueInput(node, 0);
- Node* const effect = NodeProperties::GetEffectInput(node);
- Node* const control = NodeProperties::GetControlInput(node);
- Operator const* const op = simplified()->LoadField(
- AccessBuilder::ForJSAsyncGeneratorObjectAwaitInputOrDebugPos());
-
- return Change(node, op, generator, effect, control);
-}
-
Reduction JSIntrinsicLowering::ReduceAsyncGeneratorReject(Node* node) {
- return Change(node, CodeFactory::AsyncGeneratorReject(isolate()), 0);
+ return Change(
+ node, Builtins::CallableFor(isolate(), Builtins::kAsyncGeneratorReject),
+ 0);
}
Reduction JSIntrinsicLowering::ReduceAsyncGeneratorResolve(Node* node) {
- return Change(node, CodeFactory::AsyncGeneratorResolve(isolate()), 0);
+ return Change(
+ node, Builtins::CallableFor(isolate(), Builtins::kAsyncGeneratorResolve),
+ 0);
}
Reduction JSIntrinsicLowering::ReduceGeneratorGetContext(Node* node) {
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.h b/deps/v8/src/compiler/js-intrinsic-lowering.h
index 0f3e84a5e5..fe5d4f370f 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.h
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.h
@@ -37,6 +37,8 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
DeoptimizationMode mode);
~JSIntrinsicLowering() final {}
+ const char* reducer_name() const override { return "JSIntrinsicLowering"; }
+
Reduction Reduce(Node* node) final;
private:
@@ -47,7 +49,6 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
Reduction ReduceGeneratorClose(Node* node);
Reduction ReduceGeneratorGetContext(Node* node);
Reduction ReduceGeneratorGetInputOrDebugPos(Node* node);
- Reduction ReduceAsyncGeneratorGetAwaitInputOrDebugPos(Node* node);
Reduction ReduceAsyncGeneratorReject(Node* node);
Reduction ReduceAsyncGeneratorResolve(Node* node);
Reduction ReduceGeneratorSaveInputForAwait(Node* node);
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index 5a3ccebed1..a323ba68f6 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -13,6 +13,7 @@
#include "src/compiler/js-operator.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/property-access-builder.h"
#include "src/compiler/type-cache.h"
#include "src/feedback-vector.h"
#include "src/field-index-inl.h"
@@ -38,15 +39,7 @@ bool HasOnlyJSArrayMaps(MapHandles const& maps) {
return true;
}
-bool HasOnlyNumberMaps(MapHandles const& maps) {
- for (auto map : maps) {
- if (map->instance_type() != HEAP_NUMBER_TYPE) return false;
- }
- return true;
-}
-
-template <typename T>
-bool HasOnlyStringMaps(T const& maps) {
+bool HasOnlyStringMaps(MapHandles const& maps) {
for (auto map : maps) {
if (!map->IsStringMap()) return false;
}
@@ -79,10 +72,14 @@ Reduction JSNativeContextSpecialization::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kJSAdd:
return ReduceJSAdd(node);
+ case IrOpcode::kJSStringConcat:
+ return ReduceJSStringConcat(node);
case IrOpcode::kJSGetSuperConstructor:
return ReduceJSGetSuperConstructor(node);
case IrOpcode::kJSInstanceOf:
return ReduceJSInstanceOf(node);
+ case IrOpcode::kJSHasInPrototypeChain:
+ return ReduceJSHasInPrototypeChain(node);
case IrOpcode::kJSOrdinaryHasInstance:
return ReduceJSOrdinaryHasInstance(node);
case IrOpcode::kJSLoadContext:
@@ -133,6 +130,59 @@ Reduction JSNativeContextSpecialization::ReduceJSAdd(Node* node) {
return NoChange();
}
+Reduction JSNativeContextSpecialization::ReduceJSStringConcat(Node* node) {
+ // TODO(turbofan): This has to run together with the inlining and
+ // native context specialization to be able to leverage the string
+ // constant-folding for optimizing property access, but we should
+ // nevertheless find a better home for this at some point.
+ DCHECK_EQ(IrOpcode::kJSStringConcat, node->opcode());
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ DCHECK_GE(StringConcatParameterOf(node->op()).operand_count(), 3);
+
+ // Constant-fold string concatenation.
+ HeapObjectMatcher last_operand(NodeProperties::GetValueInput(node, 0));
+ int operand_count = StringConcatParameterOf(node->op()).operand_count();
+ for (int i = 1; i < operand_count; ++i) {
+ HeapObjectMatcher current_operand(NodeProperties::GetValueInput(node, i));
+
+ if (last_operand.HasValue() && current_operand.HasValue()) {
+ Handle<String> left = Handle<String>::cast(last_operand.Value());
+ Handle<String> right = Handle<String>::cast(current_operand.Value());
+ if (left->length() + right->length() <= String::kMaxLength) {
+ Handle<String> result =
+ factory()->NewConsString(left, right).ToHandleChecked();
+ Node* value = jsgraph()->HeapConstant(result);
+ node->ReplaceInput(i - 1, value);
+ node->RemoveInput(i);
+ last_operand = HeapObjectMatcher(value);
+ i--;
+ operand_count--;
+ continue;
+ }
+ }
+ last_operand = current_operand;
+ }
+
+ if (operand_count == StringConcatParameterOf(node->op()).operand_count()) {
+ return NoChange();
+ } else if (operand_count == 1) {
+ // Replace with input if there is only one input left.
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ } else if (operand_count == 2) {
+ // Replace with JSAdd if we only have two operands left.
+ NodeProperties::ChangeOp(node,
+ javascript()->Add(BinaryOperationHint::kString));
+ return Changed(node);
+ } else {
+ // Otherwise update operand count.
+ NodeProperties::ChangeOp(node, javascript()->StringConcat(operand_count));
+ return Changed(node);
+ }
+}
+
Reduction JSNativeContextSpecialization::ReduceJSGetSuperConstructor(
Node* node) {
DCHECK_EQ(IrOpcode::kJSGetSuperConstructor, node->opcode());
@@ -166,6 +216,7 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
Node* constructor = NodeProperties::GetValueInput(node, 1);
Node* context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* control = NodeProperties::GetControlInput(node);
// Check if the right hand side is a known {receiver}.
@@ -184,6 +235,8 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
return NoChange();
}
+ PropertyAccessBuilder access_builder(jsgraph(), dependencies());
+
if (access_info.IsNotFound()) {
// If there's no @@hasInstance handler, the OrdinaryHasInstance operation
// takes over, but that requires the {receiver} to be callable.
@@ -191,12 +244,13 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
// Determine actual holder and perform prototype chain checks.
Handle<JSObject> holder;
if (access_info.holder().ToHandle(&holder)) {
- AssumePrototypesStable(access_info.receiver_maps(), holder);
+ access_builder.AssumePrototypesStable(
+ native_context(), access_info.receiver_maps(), holder);
}
// Monomorphic property access.
- effect = BuildCheckMaps(constructor, effect, control,
- access_info.receiver_maps());
+ access_builder.BuildCheckMaps(constructor, &effect, control,
+ access_info.receiver_maps());
// Lower to OrdinaryHasInstance(C, O).
NodeProperties::ReplaceValueInput(node, constructor, 0);
@@ -211,7 +265,8 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
// Determine actual holder and perform prototype chain checks.
Handle<JSObject> holder;
if (access_info.holder().ToHandle(&holder)) {
- AssumePrototypesStable(access_info.receiver_maps(), holder);
+ access_builder.AssumePrototypesStable(
+ native_context(), access_info.receiver_maps(), holder);
} else {
holder = receiver;
}
@@ -232,14 +287,25 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
DCHECK(constant->IsCallable());
// Monomorphic property access.
- effect = BuildCheckMaps(constructor, effect, control,
- access_info.receiver_maps());
+ access_builder.BuildCheckMaps(constructor, &effect, control,
+ access_info.receiver_maps());
+
+ // Create a nested frame state inside the current method's most-recent frame
+ // state that will ensure that deopts that happen after this point will not
+ // fallback to the last Checkpoint--which would completely re-execute the
+ // instanceof logic--but rather create an activation of a version of the
+ // ToBoolean stub that finishes the remaining work of instanceof and returns
+ // to the caller without duplicating side-effects upon a lazy deopt.
+ Node* continuation_frame_state = CreateStubBuiltinContinuationFrameState(
+ jsgraph(), Builtins::kToBooleanLazyDeoptContinuation, context, nullptr,
+ 0, frame_state, ContinuationFrameStateMode::LAZY);
// Call the @@hasInstance handler.
Node* target = jsgraph()->Constant(constant);
node->InsertInput(graph()->zone(), 0, target);
node->ReplaceInput(1, constructor);
node->ReplaceInput(2, object);
+ node->ReplaceInput(4, continuation_frame_state);
node->ReplaceInput(5, effect);
NodeProperties::ChangeOp(
node, javascript()->Call(3, CallFrequency(), VectorSlotPair(),
@@ -260,15 +326,85 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
return NoChange();
}
+JSNativeContextSpecialization::InferHasInPrototypeChainResult
+JSNativeContextSpecialization::InferHasInPrototypeChain(
+ Node* receiver, Node* effect, Handle<HeapObject> prototype) {
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return kMayBeInPrototypeChain;
+
+ // Check if either all or none of the {receiver_maps} have the given
+ // {prototype} in their prototype chain.
+ bool all = true;
+ bool none = true;
+ for (size_t i = 0; i < receiver_maps.size(); ++i) {
+ Handle<Map> receiver_map = receiver_maps[i];
+ if (receiver_map->instance_type() <= LAST_SPECIAL_RECEIVER_TYPE) {
+ return kMayBeInPrototypeChain;
+ }
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ // In case of an unreliable {result} we need to ensure that all
+ // {receiver_maps} are stable, because otherwise we cannot trust
+ // the {receiver_maps} information, since arbitrary side-effects
+ // may have happened.
+ if (!receiver_map->is_stable()) {
+ return kMayBeInPrototypeChain;
+ }
+ }
+ for (PrototypeIterator j(receiver_map);; j.Advance()) {
+ if (j.IsAtEnd()) {
+ all = false;
+ break;
+ }
+ Handle<HeapObject> const current =
+ PrototypeIterator::GetCurrent<HeapObject>(j);
+ if (current.is_identical_to(prototype)) {
+ none = false;
+ break;
+ }
+ if (!current->map()->is_stable() ||
+ current->map()->instance_type() <= LAST_SPECIAL_RECEIVER_TYPE) {
+ return kMayBeInPrototypeChain;
+ }
+ }
+ }
+ DCHECK_IMPLIES(all, !none);
+ DCHECK_IMPLIES(none, !all);
+
+ if (all) return kIsInPrototypeChain;
+ if (none) return kIsNotInPrototypeChain;
+ return kMayBeInPrototypeChain;
+}
+
+Reduction JSNativeContextSpecialization::ReduceJSHasInPrototypeChain(
+ Node* node) {
+ DCHECK_EQ(IrOpcode::kJSHasInPrototypeChain, node->opcode());
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Node* prototype = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+
+ // Check if we can constant-fold the prototype chain walk
+ // for the given {value} and the {prototype}.
+ HeapObjectMatcher m(prototype);
+ if (m.HasValue()) {
+ InferHasInPrototypeChainResult result =
+ InferHasInPrototypeChain(value, effect, m.Value());
+ if (result != kMayBeInPrototypeChain) {
+ Node* value = jsgraph()->BooleanConstant(result == kIsInPrototypeChain);
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ }
+
+ return NoChange();
+}
+
Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
Node* node) {
DCHECK_EQ(IrOpcode::kJSOrdinaryHasInstance, node->opcode());
Node* constructor = NodeProperties::GetValueInput(node, 0);
Node* object = NodeProperties::GetValueInput(node, 1);
- Node* context = NodeProperties::GetContextInput(node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
// Check if the {constructor} is known at compile time.
HeapObjectMatcher m(constructor);
@@ -302,144 +438,15 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
// Install a code dependency on the {function}s initial map.
Handle<Map> initial_map(function->initial_map(), isolate());
dependencies()->AssumeInitialMapCantChange(initial_map);
- Handle<JSReceiver> function_prototype =
- handle(JSReceiver::cast(initial_map->prototype()), isolate());
-
- // Check if we can constant-fold the prototype chain walk
- // for the given {object} and the {function_prototype}.
- InferHasInPrototypeChainResult result =
- InferHasInPrototypeChain(object, effect, function_prototype);
- if (result != kMayBeInPrototypeChain) {
- Node* value = jsgraph()->BooleanConstant(result == kIsInPrototypeChain);
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
-
- Node* prototype = jsgraph()->Constant(function_prototype);
-
- Node* check0 = graph()->NewNode(simplified()->ObjectIsSmi(), object);
- Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check0, control);
-
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* etrue0 = effect;
- Node* vtrue0 = jsgraph()->FalseConstant();
-
- control = graph()->NewNode(common()->IfFalse(), branch0);
-
- // Loop through the {object}s prototype chain looking for the {prototype}.
- Node* loop = control =
- graph()->NewNode(common()->Loop(2), control, control);
- Node* eloop = effect =
- graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
- Node* vloop = object =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- object, object, loop);
-
- // Load the {object} map and instance type.
- Node* object_map = effect =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- object, effect, control);
- Node* object_instance_type = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
- object_map, effect, control);
-
- // Check if the {object} is a special receiver, because for special
- // receivers, i.e. proxies or API objects that need access checks,
- // we have to use the %HasInPrototypeChain runtime function instead.
- Node* check1 = graph()->NewNode(
- simplified()->NumberLessThanOrEqual(), object_instance_type,
- jsgraph()->Constant(LAST_SPECIAL_RECEIVER_TYPE));
- Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check1, control);
-
- control = graph()->NewNode(common()->IfFalse(), branch1);
-
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* etrue1 = effect;
- Node* vtrue1;
-
- // Check if the {object} is not a receiver at all.
- Node* check10 =
- graph()->NewNode(simplified()->NumberLessThan(), object_instance_type,
- jsgraph()->Constant(FIRST_JS_RECEIVER_TYPE));
- Node* branch10 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check10, if_true1);
-
- // A primitive value cannot match the {prototype} we're looking for.
- if_true1 = graph()->NewNode(common()->IfTrue(), branch10);
- vtrue1 = jsgraph()->FalseConstant();
-
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch10);
- Node* efalse1 = etrue1;
- Node* vfalse1;
- {
- // Slow path, need to call the %HasInPrototypeChain runtime function.
- vfalse1 = efalse1 = if_false1 = graph()->NewNode(
- javascript()->CallRuntime(Runtime::kHasInPrototypeChain), object,
- prototype, context, frame_state, efalse1, if_false1);
-
- // Replace any potential {IfException} uses of {node} to catch
- // exceptions from this %HasInPrototypeChain runtime call instead.
- Node* on_exception = nullptr;
- if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
- NodeProperties::ReplaceControlInput(on_exception, vfalse1);
- NodeProperties::ReplaceEffectInput(on_exception, efalse1);
- if_false1 = graph()->NewNode(common()->IfSuccess(), vfalse1);
- Revisit(on_exception);
- }
- }
-
- // Load the {object} prototype.
- Node* object_prototype = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapPrototype()), object_map,
- effect, control);
-
- // Check if we reached the end of {object}s prototype chain.
- Node* check2 =
- graph()->NewNode(simplified()->ReferenceEqual(), object_prototype,
- jsgraph()->NullConstant());
- Node* branch2 = graph()->NewNode(common()->Branch(), check2, control);
-
- Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
- Node* etrue2 = effect;
- Node* vtrue2 = jsgraph()->FalseConstant();
-
- control = graph()->NewNode(common()->IfFalse(), branch2);
-
- // Check if we reached the {prototype}.
- Node* check3 = graph()->NewNode(simplified()->ReferenceEqual(),
- object_prototype, prototype);
- Node* branch3 = graph()->NewNode(common()->Branch(), check3, control);
-
- Node* if_true3 = graph()->NewNode(common()->IfTrue(), branch3);
- Node* etrue3 = effect;
- Node* vtrue3 = jsgraph()->TrueConstant();
-
- control = graph()->NewNode(common()->IfFalse(), branch3);
-
- // Close the loop.
- vloop->ReplaceInput(1, object_prototype);
- eloop->ReplaceInput(1, effect);
- loop->ReplaceInput(1, control);
-
- control = graph()->NewNode(common()->Merge(5), if_true0, if_true1,
- if_true2, if_true3, if_false1);
- effect = graph()->NewNode(common()->EffectPhi(5), etrue0, etrue1, etrue2,
- etrue3, efalse1, control);
-
- // Morph the {node} into an appropriate Phi.
- ReplaceWithValue(node, node, effect, control);
- node->ReplaceInput(0, vtrue0);
- node->ReplaceInput(1, vtrue1);
- node->ReplaceInput(2, vtrue2);
- node->ReplaceInput(3, vtrue3);
- node->ReplaceInput(4, vfalse1);
- node->ReplaceInput(5, control);
- node->TrimInputCount(6);
- NodeProperties::ChangeOp(
- node, common()->Phi(MachineRepresentation::kTagged, 5));
- return Changed(node);
+ Node* prototype =
+ jsgraph()->Constant(handle(initial_map->prototype(), isolate()));
+
+ // Lower the {node} to JSHasInPrototypeChain.
+ NodeProperties::ReplaceValueInput(node, object, 0);
+ NodeProperties::ReplaceValueInput(node, prototype, 1);
+ NodeProperties::ChangeOp(node, javascript()->HasInPrototypeChain());
+ Reduction const reduction = ReduceJSHasInPrototypeChain(node);
+ return reduction.Changed() ? reduction : Changed(node);
}
}
@@ -745,17 +752,6 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
return NoChange();
}
- // TODO(turbofan): Add support for inlining into try blocks.
- bool is_exceptional = NodeProperties::IsExceptionalCall(node);
- for (const auto& access_info : access_infos) {
- if (access_info.IsAccessorConstant()) {
- // Accessor in try-blocks are not supported yet.
- if (is_exceptional || !(flags() & kAccessorInliningEnabled)) {
- return NoChange();
- }
- }
- }
-
// Nothing to do if we have no non-deprecated maps.
if (access_infos.empty()) {
return ReduceSoftDeoptimize(
@@ -769,29 +765,35 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
effect = graph()->NewNode(simplified()->CheckIf(), check, effect, control);
}
+ // Collect call nodes to rewire exception edges.
+ ZoneVector<Node*> if_exception_nodes(zone());
+ ZoneVector<Node*>* if_exceptions = nullptr;
+ Node* if_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &if_exception)) {
+ if_exceptions = &if_exception_nodes;
+ }
+
+ PropertyAccessBuilder access_builder(jsgraph(), dependencies());
+
// Check for the monomorphic cases.
if (access_infos.size() == 1) {
PropertyAccessInfo access_info = access_infos.front();
- if (HasOnlyStringMaps(access_info.receiver_maps())) {
- // Monormorphic string access (ignoring the fact that there are multiple
- // String maps).
- receiver = effect = graph()->NewNode(simplified()->CheckString(),
- receiver, effect, control);
- } else if (HasOnlyNumberMaps(access_info.receiver_maps())) {
- // Monomorphic number access (we also deal with Smis here).
- receiver = effect = graph()->NewNode(simplified()->CheckNumber(),
- receiver, effect, control);
- } else {
- // Monomorphic property access.
- receiver = BuildCheckHeapObject(receiver, &effect, control);
- effect = BuildCheckMaps(receiver, effect, control,
- access_info.receiver_maps());
+ // Try to build string check or number check if possible.
+ // Otherwise build a map check.
+ if (!access_builder.TryBuildStringCheck(access_info.receiver_maps(),
+ &receiver, &effect, control) &&
+ !access_builder.TryBuildNumberCheck(access_info.receiver_maps(),
+ &receiver, &effect, control)) {
+ receiver =
+ access_builder.BuildCheckHeapObject(receiver, &effect, control);
+ access_builder.BuildCheckMaps(receiver, &effect, control,
+ access_info.receiver_maps());
}
// Generate the actual property access.
ValueEffectControl continuation = BuildPropertyAccess(
receiver, value, context, frame_state, effect, control, name,
- access_info, access_mode, language_mode);
+ if_exceptions, access_info, access_mode, language_mode);
value = continuation.value();
effect = continuation.effect();
control = continuation.control();
@@ -821,7 +823,8 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
receiverissmi_control = graph()->NewNode(common()->IfTrue(), branch);
receiverissmi_effect = effect;
} else {
- receiver = BuildCheckHeapObject(receiver, &effect, control);
+ receiver =
+ access_builder.BuildCheckHeapObject(receiver, &effect, control);
}
// Load the {receiver} map. The resulting effect is the dominating effect
@@ -848,8 +851,8 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
if (j == access_infos.size() - 1) {
// Last map check on the fallthrough control path, do a
// conditional eager deoptimization exit here.
- this_effect = BuildCheckMaps(receiver, this_effect, this_control,
- receiver_maps);
+ access_builder.BuildCheckMaps(receiver, &this_effect, this_control,
+ receiver_maps);
this_effects.push_back(this_effect);
this_controls.push_back(fallthrough_control);
fallthrough_control = nullptr;
@@ -894,9 +897,10 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
}
// Generate the actual property access.
- ValueEffectControl continuation = BuildPropertyAccess(
- this_receiver, this_value, context, frame_state, this_effect,
- this_control, name, access_info, access_mode, language_mode);
+ ValueEffectControl continuation =
+ BuildPropertyAccess(this_receiver, this_value, context, frame_state,
+ this_effect, this_control, name, if_exceptions,
+ access_info, access_mode, language_mode);
values.push_back(continuation.value());
effects.push_back(continuation.effect());
controls.push_back(continuation.control());
@@ -924,6 +928,24 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
control_count + 1, &effects.front());
}
}
+
+ // Properly rewire IfException edges if {node} is inside a try-block.
+ if (!if_exception_nodes.empty()) {
+ DCHECK_NOT_NULL(if_exception);
+ DCHECK_EQ(if_exceptions, &if_exception_nodes);
+ int const if_exception_count = static_cast<int>(if_exceptions->size());
+ Node* merge = graph()->NewNode(common()->Merge(if_exception_count),
+ if_exception_count, &if_exceptions->front());
+ if_exceptions->push_back(merge);
+ Node* ephi =
+ graph()->NewNode(common()->EffectPhi(if_exception_count),
+ if_exception_count + 1, &if_exceptions->front());
+ Node* phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, if_exception_count),
+ if_exception_count + 1, &if_exceptions->front());
+ ReplaceWithValue(if_exception, phi, ephi, merge);
+ }
+
ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
@@ -1109,7 +1131,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
// store is either holey, or we have a potentially growing store,
// then we need to check that all prototypes have stable maps with
// fast elements (and we need to guard against changes to that below).
- if (IsHoleyElementsKind(receiver_map->elements_kind()) ||
+ if (IsHoleyOrDictionaryElementsKind(receiver_map->elements_kind()) ||
IsGrowStoreMode(store_mode)) {
// Make sure all prototypes are stable and have fast elements.
for (Handle<Map> map = receiver_map;;) {
@@ -1133,7 +1155,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
}
// Ensure that {receiver} is a heap object.
- receiver = BuildCheckHeapObject(receiver, &effect, control);
+ PropertyAccessBuilder access_builder(jsgraph(), dependencies());
+ receiver = access_builder.BuildCheckHeapObject(receiver, &effect, control);
// Check for the monomorphic case.
if (access_infos.size() == 1) {
@@ -1162,8 +1185,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
control);
// Perform map check on the {receiver}.
- effect = BuildCheckMaps(receiver, effect, control,
- access_info.receiver_maps());
+ access_builder.BuildCheckMaps(receiver, &effect, control,
+ access_info.receiver_maps());
// Access the actual element.
ValueEffectControl continuation =
@@ -1214,8 +1237,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
if (j == access_infos.size() - 1) {
// Last map check on the fallthrough control path, do a
// conditional eager deoptimization exit here.
- this_effect = BuildCheckMaps(receiver, this_effect, this_control,
- receiver_maps);
+ access_builder.BuildCheckMaps(receiver, &this_effect, this_control,
+ receiver_maps);
fallthrough_control = nullptr;
} else {
ZoneVector<Node*> this_controls(zone());
@@ -1450,157 +1473,246 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreProperty(Node* node) {
p.language_mode(), store_mode);
}
+Node* JSNativeContextSpecialization::InlinePropertyGetterCall(
+ Node* receiver, Node* context, Node* frame_state, Node** effect,
+ Node** control, ZoneVector<Node*>* if_exceptions,
+ PropertyAccessInfo const& access_info) {
+ Node* target = jsgraph()->Constant(access_info.constant());
+ FrameStateInfo const& frame_info = OpParameter<FrameStateInfo>(frame_state);
+ Handle<SharedFunctionInfo> shared_info =
+ frame_info.shared_info().ToHandleChecked();
+ // We need a FrameState for the getter stub to restore the correct
+ // context before returning to fullcodegen.
+ FrameStateFunctionInfo const* frame_info0 =
+ common()->CreateFrameStateFunctionInfo(FrameStateType::kGetterStub, 1, 0,
+ shared_info);
+ Node* frame_state0 = graph()->NewNode(
+ common()->FrameState(BailoutId::None(), OutputFrameStateCombine::Ignore(),
+ frame_info0),
+ graph()->NewNode(common()->StateValues(1, SparseInputMask::Dense()),
+ receiver),
+ jsgraph()->EmptyStateValues(), jsgraph()->EmptyStateValues(), context,
+ target, frame_state);
+
+ // Introduce the call to the getter function.
+ Node* value;
+ if (access_info.constant()->IsJSFunction()) {
+ value = *effect = *control = graph()->NewNode(
+ jsgraph()->javascript()->Call(2, CallFrequency(), VectorSlotPair(),
+ ConvertReceiverMode::kNotNullOrUndefined),
+ target, receiver, context, frame_state0, *effect, *control);
+ } else {
+ DCHECK(access_info.constant()->IsFunctionTemplateInfo());
+ Handle<FunctionTemplateInfo> function_template_info(
+ Handle<FunctionTemplateInfo>::cast(access_info.constant()));
+ DCHECK(!function_template_info->call_code()->IsUndefined(isolate()));
+ Node* holder =
+ access_info.holder().is_null()
+ ? receiver
+ : jsgraph()->Constant(access_info.holder().ToHandleChecked());
+ value =
+ InlineApiCall(receiver, holder, context, target, frame_state0, nullptr,
+ effect, control, shared_info, function_template_info);
+ }
+ // Remember to rewire the IfException edge if this is inside a try-block.
+ if (if_exceptions != nullptr) {
+ // Create the appropriate IfException/IfSuccess projections.
+ Node* const if_exception =
+ graph()->NewNode(common()->IfException(), *control, *effect);
+ Node* const if_success = graph()->NewNode(common()->IfSuccess(), *control);
+ if_exceptions->push_back(if_exception);
+ *control = if_success;
+ }
+ return value;
+}
+
+Node* JSNativeContextSpecialization::InlinePropertySetterCall(
+ Node* receiver, Node* value, Node* context, Node* frame_state,
+ Node** effect, Node** control, ZoneVector<Node*>* if_exceptions,
+ PropertyAccessInfo const& access_info) {
+ Node* target = jsgraph()->Constant(access_info.constant());
+ FrameStateInfo const& frame_info = OpParameter<FrameStateInfo>(frame_state);
+ Handle<SharedFunctionInfo> shared_info =
+ frame_info.shared_info().ToHandleChecked();
+ // We need a FrameState for the setter stub to restore the correct
+ // context and return the appropriate value to fullcodegen.
+ FrameStateFunctionInfo const* frame_info0 =
+ common()->CreateFrameStateFunctionInfo(FrameStateType::kSetterStub, 2, 0,
+ shared_info);
+ Node* frame_state0 = graph()->NewNode(
+ common()->FrameState(BailoutId::None(), OutputFrameStateCombine::Ignore(),
+ frame_info0),
+ graph()->NewNode(common()->StateValues(2, SparseInputMask::Dense()),
+ receiver, value),
+ jsgraph()->EmptyStateValues(), jsgraph()->EmptyStateValues(), context,
+ target, frame_state);
+
+ // Introduce the call to the setter function.
+ if (access_info.constant()->IsJSFunction()) {
+ *effect = *control = graph()->NewNode(
+ jsgraph()->javascript()->Call(3, CallFrequency(), VectorSlotPair(),
+ ConvertReceiverMode::kNotNullOrUndefined),
+ target, receiver, value, context, frame_state0, *effect, *control);
+ } else {
+ DCHECK(access_info.constant()->IsFunctionTemplateInfo());
+ Handle<FunctionTemplateInfo> function_template_info(
+ Handle<FunctionTemplateInfo>::cast(access_info.constant()));
+ DCHECK(!function_template_info->call_code()->IsUndefined(isolate()));
+ Node* holder =
+ access_info.holder().is_null()
+ ? receiver
+ : jsgraph()->Constant(access_info.holder().ToHandleChecked());
+ value =
+ InlineApiCall(receiver, holder, context, target, frame_state0, value,
+ effect, control, shared_info, function_template_info);
+ }
+ // Remember to rewire the IfException edge if this is inside a try-block.
+ if (if_exceptions != nullptr) {
+ // Create the appropriate IfException/IfSuccess projections.
+ Node* const if_exception =
+ graph()->NewNode(common()->IfException(), *control, *effect);
+ Node* const if_success = graph()->NewNode(common()->IfSuccess(), *control);
+ if_exceptions->push_back(if_exception);
+ *control = if_success;
+ }
+ return value;
+}
+
+Node* JSNativeContextSpecialization::InlineApiCall(
+ Node* receiver, Node* holder, Node* context, Node* target,
+ Node* frame_state, Node* value, Node** effect, Node** control,
+ Handle<SharedFunctionInfo> shared_info,
+ Handle<FunctionTemplateInfo> function_template_info) {
+ Handle<CallHandlerInfo> call_handler_info = handle(
+ CallHandlerInfo::cast(function_template_info->call_code()), isolate());
+ Handle<Object> call_data_object(call_handler_info->data(), isolate());
+
+ // Only setters have a value.
+ int const argc = value == nullptr ? 0 : 1;
+ // The stub always expects the receiver as the first param on the stack.
+ CallApiCallbackStub stub(
+ isolate(), argc,
+ true /* FunctionTemplateInfo doesn't have an associated context. */);
+ CallInterfaceDescriptor call_interface_descriptor =
+ stub.GetCallInterfaceDescriptor();
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), call_interface_descriptor,
+ call_interface_descriptor.GetStackParameterCount() + argc +
+ 1 /* implicit receiver */,
+ CallDescriptor::kNeedsFrameState, Operator::kNoProperties,
+ MachineType::AnyTagged(), 1);
+
+ Node* data = jsgraph()->Constant(call_data_object);
+ ApiFunction function(v8::ToCData<Address>(call_handler_info->callback()));
+ Node* function_reference =
+ graph()->NewNode(common()->ExternalConstant(ExternalReference(
+ &function, ExternalReference::DIRECT_API_CALL, isolate())));
+ Node* code = jsgraph()->HeapConstant(stub.GetCode());
+
+ // Add CallApiCallbackStub's register argument as well.
+ Node* inputs[11] = {code, target, data, holder, function_reference, receiver};
+ int index = 6 + argc;
+ inputs[index++] = context;
+ inputs[index++] = frame_state;
+ inputs[index++] = *effect;
+ inputs[index++] = *control;
+ // This needs to stay here because of the edge case described in
+ // http://crbug.com/675648.
+ if (value != nullptr) {
+ inputs[6] = value;
+ }
+
+ return *effect = *control =
+ graph()->NewNode(common()->Call(call_descriptor), index, inputs);
+}
+
JSNativeContextSpecialization::ValueEffectControl
-JSNativeContextSpecialization::BuildPropertyAccess(
- Node* receiver, Node* value, Node* context, Node* frame_state, Node* effect,
- Node* control, Handle<Name> name, PropertyAccessInfo const& access_info,
- AccessMode access_mode, LanguageMode language_mode) {
+JSNativeContextSpecialization::BuildPropertyLoad(
+ Node* receiver, Node* context, Node* frame_state, Node* effect,
+ Node* control, Handle<Name> name, ZoneVector<Node*>* if_exceptions,
+ PropertyAccessInfo const& access_info, LanguageMode language_mode) {
// Determine actual holder and perform prototype chain checks.
Handle<JSObject> holder;
+ PropertyAccessBuilder access_builder(jsgraph(), dependencies());
if (access_info.holder().ToHandle(&holder)) {
- DCHECK_NE(AccessMode::kStoreInLiteral, access_mode);
- AssumePrototypesStable(access_info.receiver_maps(), holder);
+ access_builder.AssumePrototypesStable(native_context(),
+ access_info.receiver_maps(), holder);
}
// Generate the actual property access.
+ Node* value;
if (access_info.IsNotFound()) {
- DCHECK_EQ(AccessMode::kLoad, access_mode);
value = jsgraph()->UndefinedConstant();
} else if (access_info.IsDataConstant()) {
DCHECK(!FLAG_track_constant_fields);
+ value = jsgraph()->Constant(access_info.constant());
+ } else if (access_info.IsAccessorConstant()) {
+ value = InlinePropertyGetterCall(receiver, context, frame_state, &effect,
+ &control, if_exceptions, access_info);
+ } else {
+ DCHECK(access_info.IsDataField() || access_info.IsDataConstantField());
+ value = access_builder.BuildLoadDataField(name, access_info, receiver,
+ &effect, &control);
+ }
+
+ return ValueEffectControl(value, effect, control);
+}
+
+JSNativeContextSpecialization::ValueEffectControl
+JSNativeContextSpecialization::BuildPropertyAccess(
+ Node* receiver, Node* value, Node* context, Node* frame_state, Node* effect,
+ Node* control, Handle<Name> name, ZoneVector<Node*>* if_exceptions,
+ PropertyAccessInfo const& access_info, AccessMode access_mode,
+ LanguageMode language_mode) {
+ switch (access_mode) {
+ case AccessMode::kLoad:
+ return BuildPropertyLoad(receiver, context, frame_state, effect, control,
+ name, if_exceptions, access_info, language_mode);
+ case AccessMode::kStore:
+ case AccessMode::kStoreInLiteral:
+ return BuildPropertyStore(receiver, value, context, frame_state, effect,
+ control, name, if_exceptions, access_info,
+ access_mode, language_mode);
+ }
+ UNREACHABLE();
+ return ValueEffectControl();
+}
+
+JSNativeContextSpecialization::ValueEffectControl
+JSNativeContextSpecialization::BuildPropertyStore(
+ Node* receiver, Node* value, Node* context, Node* frame_state, Node* effect,
+ Node* control, Handle<Name> name, ZoneVector<Node*>* if_exceptions,
+ PropertyAccessInfo const& access_info, AccessMode access_mode,
+ LanguageMode language_mode) {
+ // Determine actual holder and perform prototype chain checks.
+ Handle<JSObject> holder;
+ PropertyAccessBuilder access_builder(jsgraph(), dependencies());
+ if (access_info.holder().ToHandle(&holder)) {
+ DCHECK_NE(AccessMode::kStoreInLiteral, access_mode);
+ access_builder.AssumePrototypesStable(native_context(),
+ access_info.receiver_maps(), holder);
+ }
+
+ DCHECK(!access_info.IsNotFound());
+
+ // Generate the actual property access.
+ if (access_info.IsDataConstant()) {
+ DCHECK(!FLAG_track_constant_fields);
Node* constant_value = jsgraph()->Constant(access_info.constant());
- if (access_mode == AccessMode::kStore) {
- Node* check = graph()->NewNode(simplified()->ReferenceEqual(), value,
- constant_value);
- effect =
- graph()->NewNode(simplified()->CheckIf(), check, effect, control);
- }
+ Node* check =
+ graph()->NewNode(simplified()->ReferenceEqual(), value, constant_value);
+ effect = graph()->NewNode(simplified()->CheckIf(), check, effect, control);
value = constant_value;
} else if (access_info.IsAccessorConstant()) {
- // TODO(bmeurer): Properly rewire the IfException edge here if there's any.
- Node* target = jsgraph()->Constant(access_info.constant());
- FrameStateInfo const& frame_info = OpParameter<FrameStateInfo>(frame_state);
- Handle<SharedFunctionInfo> shared_info =
- frame_info.shared_info().ToHandleChecked();
- switch (access_mode) {
- case AccessMode::kLoad: {
- // We need a FrameState for the getter stub to restore the correct
- // context before returning to fullcodegen.
- FrameStateFunctionInfo const* frame_info0 =
- common()->CreateFrameStateFunctionInfo(FrameStateType::kGetterStub,
- 1, 0, shared_info);
- Node* frame_state0 = graph()->NewNode(
- common()->FrameState(BailoutId::None(),
- OutputFrameStateCombine::Ignore(),
- frame_info0),
- graph()->NewNode(common()->StateValues(1, SparseInputMask::Dense()),
- receiver),
- jsgraph()->EmptyStateValues(), jsgraph()->EmptyStateValues(),
- context, target, frame_state);
-
- // Introduce the call to the getter function.
- if (access_info.constant()->IsJSFunction()) {
- value = effect = control = graph()->NewNode(
- javascript()->Call(2, CallFrequency(), VectorSlotPair(),
- ConvertReceiverMode::kNotNullOrUndefined),
- target, receiver, context, frame_state0, effect, control);
- } else {
- DCHECK(access_info.constant()->IsFunctionTemplateInfo());
- Handle<FunctionTemplateInfo> function_template_info(
- Handle<FunctionTemplateInfo>::cast(access_info.constant()));
- DCHECK(!function_template_info->call_code()->IsUndefined(isolate()));
- ValueEffectControl value_effect_control = InlineApiCall(
- receiver, context, target, frame_state0, nullptr, effect, control,
- shared_info, function_template_info);
- value = value_effect_control.value();
- effect = value_effect_control.effect();
- control = value_effect_control.control();
- }
- break;
- }
- case AccessMode::kStoreInLiteral:
- case AccessMode::kStore: {
- // We need a FrameState for the setter stub to restore the correct
- // context and return the appropriate value to fullcodegen.
- FrameStateFunctionInfo const* frame_info0 =
- common()->CreateFrameStateFunctionInfo(FrameStateType::kSetterStub,
- 2, 0, shared_info);
- Node* frame_state0 = graph()->NewNode(
- common()->FrameState(BailoutId::None(),
- OutputFrameStateCombine::Ignore(),
- frame_info0),
- graph()->NewNode(common()->StateValues(2, SparseInputMask::Dense()),
- receiver, value),
- jsgraph()->EmptyStateValues(), jsgraph()->EmptyStateValues(),
- context, target, frame_state);
-
- // Introduce the call to the setter function.
- if (access_info.constant()->IsJSFunction()) {
- effect = control = graph()->NewNode(
- javascript()->Call(3, CallFrequency(), VectorSlotPair(),
- ConvertReceiverMode::kNotNullOrUndefined),
- target, receiver, value, context, frame_state0, effect, control);
- } else {
- DCHECK(access_info.constant()->IsFunctionTemplateInfo());
- Handle<FunctionTemplateInfo> function_template_info(
- Handle<FunctionTemplateInfo>::cast(access_info.constant()));
- DCHECK(!function_template_info->call_code()->IsUndefined(isolate()));
- ValueEffectControl value_effect_control = InlineApiCall(
- receiver, context, target, frame_state0, value, effect, control,
- shared_info, function_template_info);
- value = value_effect_control.value();
- effect = value_effect_control.effect();
- control = value_effect_control.control();
- }
- break;
- }
- }
+ value =
+ InlinePropertySetterCall(receiver, value, context, frame_state, &effect,
+ &control, if_exceptions, access_info);
} else {
DCHECK(access_info.IsDataField() || access_info.IsDataConstantField());
FieldIndex const field_index = access_info.field_index();
Type* const field_type = access_info.field_type();
MachineRepresentation const field_representation =
access_info.field_representation();
- if (access_mode == AccessMode::kLoad) {
- if (access_info.holder().ToHandle(&holder)) {
- receiver = jsgraph()->Constant(holder);
- }
- // Optimize immutable property loads.
- HeapObjectMatcher m(receiver);
- if (m.HasValue() && m.Value()->IsJSObject()) {
- // TODO(ishell): Use something simpler like
- //
- // Handle<Object> value =
- // JSObject::FastPropertyAt(Handle<JSObject>::cast(m.Value()),
- // Representation::Tagged(), field_index);
- //
- // here, once we have the immutable bit in the access_info.
-
- // TODO(turbofan): Given that we already have the field_index here, we
- // might be smarter in the future and not rely on the LookupIterator,
- // but for now let's just do what Crankshaft does.
- LookupIterator it(m.Value(), name,
- LookupIterator::OWN_SKIP_INTERCEPTOR);
- if (it.state() == LookupIterator::DATA) {
- bool is_reaonly_non_configurable =
- it.IsReadOnly() && !it.IsConfigurable();
- if (is_reaonly_non_configurable ||
- (FLAG_track_constant_fields &&
- access_info.IsDataConstantField())) {
- Node* value = jsgraph()->Constant(JSReceiver::GetDataProperty(&it));
- if (!is_reaonly_non_configurable) {
- // It's necessary to add dependency on the map that introduced
- // the field.
- DCHECK(access_info.IsDataConstantField());
- DCHECK(!it.is_dictionary_holder());
- Handle<Map> field_owner_map = it.GetFieldOwnerMap();
- dependencies()->AssumeFieldOwner(field_owner_map);
- }
- return ValueEffectControl(value, effect, control);
- }
- }
- }
- }
Node* storage = receiver;
if (!field_index.is_inobject()) {
storage = effect = graph()->NewNode(
@@ -1615,196 +1727,158 @@ JSNativeContextSpecialization::BuildPropertyAccess(
field_type,
MachineType::TypeForRepresentation(field_representation),
kFullWriteBarrier};
- if (access_mode == AccessMode::kLoad) {
- if (field_representation == MachineRepresentation::kFloat64) {
+ bool store_to_constant_field = FLAG_track_constant_fields &&
+ (access_mode == AccessMode::kStore) &&
+ access_info.IsDataConstantField();
+
+ DCHECK(access_mode == AccessMode::kStore ||
+ access_mode == AccessMode::kStoreInLiteral);
+ switch (field_representation) {
+ case MachineRepresentation::kFloat64: {
+ value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
+ effect, control);
if (!field_index.is_inobject() || field_index.is_hidden_field() ||
!FLAG_unbox_double_fields) {
- FieldAccess const storage_access = {kTaggedBase,
- field_index.offset(),
- name,
- MaybeHandle<Map>(),
- Type::OtherInternal(),
- MachineType::TaggedPointer(),
- kPointerWriteBarrier};
- storage = effect =
- graph()->NewNode(simplified()->LoadField(storage_access), storage,
- effect, control);
- field_access.offset = HeapNumber::kValueOffset;
- field_access.name = MaybeHandle<Name>();
- }
- } else if (field_representation ==
- MachineRepresentation::kTaggedPointer) {
- // Remember the map of the field value, if its map is stable. This is
- // used by the LoadElimination to eliminate map checks on the result.
- Handle<Map> field_map;
- if (access_info.field_map().ToHandle(&field_map)) {
- if (field_map->is_stable()) {
- dependencies()->AssumeMapStable(field_map);
- field_access.map = field_map;
+ if (access_info.HasTransitionMap()) {
+ // Allocate a MutableHeapNumber for the new property.
+ effect = graph()->NewNode(
+ common()->BeginRegion(RegionObservability::kNotObservable),
+ effect);
+ Node* box = effect = graph()->NewNode(
+ simplified()->Allocate(Type::OtherInternal(), NOT_TENURED),
+ jsgraph()->Constant(HeapNumber::kSize), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForMap()), box,
+ jsgraph()->HeapConstant(factory()->mutable_heap_number_map()),
+ effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForHeapNumberValue()),
+ box, value, effect, control);
+ value = effect =
+ graph()->NewNode(common()->FinishRegion(), box, effect);
+
+ field_access.type = Type::Any();
+ field_access.machine_type = MachineType::TaggedPointer();
+ field_access.write_barrier_kind = kPointerWriteBarrier;
+ } else {
+ // We just store directly to the MutableHeapNumber.
+ FieldAccess const storage_access = {kTaggedBase,
+ field_index.offset(),
+ name,
+ MaybeHandle<Map>(),
+ Type::OtherInternal(),
+ MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
+ storage = effect =
+ graph()->NewNode(simplified()->LoadField(storage_access),
+ storage, effect, control);
+ field_access.offset = HeapNumber::kValueOffset;
+ field_access.name = MaybeHandle<Name>();
+ field_access.machine_type = MachineType::Float64();
}
}
+ if (store_to_constant_field) {
+ DCHECK(!access_info.HasTransitionMap());
+ // If the field is constant check that the value we are going
+ // to store matches current value.
+ Node* current_value = effect = graph()->NewNode(
+ simplified()->LoadField(field_access), storage, effect, control);
+
+ Node* check = graph()->NewNode(simplified()->NumberEqual(),
+ current_value, value);
+ effect =
+ graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+ return ValueEffectControl(value, effect, control);
+ }
+ break;
}
- value = effect = graph()->NewNode(simplified()->LoadField(field_access),
- storage, effect, control);
- } else {
- bool store_to_constant_field = FLAG_track_constant_fields &&
- (access_mode == AccessMode::kStore) &&
- access_info.IsDataConstantField();
-
- DCHECK(access_mode == AccessMode::kStore ||
- access_mode == AccessMode::kStoreInLiteral);
- switch (field_representation) {
- case MachineRepresentation::kFloat64: {
- value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
- effect, control);
- if (!field_index.is_inobject() || field_index.is_hidden_field() ||
- !FLAG_unbox_double_fields) {
- if (access_info.HasTransitionMap()) {
- // Allocate a MutableHeapNumber for the new property.
- effect = graph()->NewNode(
- common()->BeginRegion(RegionObservability::kNotObservable),
- effect);
- Node* box = effect = graph()->NewNode(
- simplified()->Allocate(Type::OtherInternal(), NOT_TENURED),
- jsgraph()->Constant(HeapNumber::kSize), effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForMap()), box,
- jsgraph()->HeapConstant(factory()->mutable_heap_number_map()),
- effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForHeapNumberValue()),
- box, value, effect, control);
- value = effect =
- graph()->NewNode(common()->FinishRegion(), box, effect);
-
- field_access.type = Type::Any();
- field_access.machine_type = MachineType::TaggedPointer();
- field_access.write_barrier_kind = kPointerWriteBarrier;
- } else {
- // We just store directly to the MutableHeapNumber.
- FieldAccess const storage_access = {kTaggedBase,
- field_index.offset(),
- name,
- MaybeHandle<Map>(),
- Type::OtherInternal(),
- MachineType::TaggedPointer(),
- kPointerWriteBarrier};
- storage = effect =
- graph()->NewNode(simplified()->LoadField(storage_access),
- storage, effect, control);
- field_access.offset = HeapNumber::kValueOffset;
- field_access.name = MaybeHandle<Name>();
- field_access.machine_type = MachineType::Float64();
- }
- }
- if (store_to_constant_field) {
- DCHECK(!access_info.HasTransitionMap());
- // If the field is constant check that the value we are going
- // to store matches current value.
- Node* current_value = effect =
- graph()->NewNode(simplified()->LoadField(field_access), storage,
- effect, control);
-
- Node* check = graph()->NewNode(simplified()->NumberEqual(),
- current_value, value);
- effect = graph()->NewNode(simplified()->CheckIf(), check, effect,
- control);
- return ValueEffectControl(value, effect, control);
- }
- break;
+ case MachineRepresentation::kTaggedSigned:
+ case MachineRepresentation::kTaggedPointer:
+ case MachineRepresentation::kTagged:
+ if (store_to_constant_field) {
+ DCHECK(!access_info.HasTransitionMap());
+ // If the field is constant check that the value we are going
+ // to store matches current value.
+ Node* current_value = effect = graph()->NewNode(
+ simplified()->LoadField(field_access), storage, effect, control);
+
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
+ current_value, value);
+ effect =
+ graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+ return ValueEffectControl(value, effect, control);
}
- case MachineRepresentation::kTaggedSigned:
- case MachineRepresentation::kTaggedPointer:
- case MachineRepresentation::kTagged:
- if (store_to_constant_field) {
- DCHECK(!access_info.HasTransitionMap());
- // If the field is constant check that the value we are going
- // to store matches current value.
- Node* current_value = effect =
- graph()->NewNode(simplified()->LoadField(field_access), storage,
- effect, control);
-
- Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
- current_value, value);
- effect = graph()->NewNode(simplified()->CheckIf(), check, effect,
- control);
- return ValueEffectControl(value, effect, control);
- }
-
- if (field_representation == MachineRepresentation::kTaggedSigned) {
- value = effect = graph()->NewNode(simplified()->CheckSmi(), value,
- effect, control);
- field_access.write_barrier_kind = kNoWriteBarrier;
-
- } else if (field_representation ==
- MachineRepresentation::kTaggedPointer) {
- // Ensure that {value} is a HeapObject.
- value = BuildCheckHeapObject(value, &effect, control);
- Handle<Map> field_map;
- if (access_info.field_map().ToHandle(&field_map)) {
- // Emit a map check for the value.
- effect = graph()->NewNode(
- simplified()->CheckMaps(CheckMapsFlag::kNone,
- ZoneHandleSet<Map>(field_map)),
- value, effect, control);
- }
- field_access.write_barrier_kind = kPointerWriteBarrier;
- } else {
- DCHECK_EQ(MachineRepresentation::kTagged, field_representation);
+ if (field_representation == MachineRepresentation::kTaggedSigned) {
+ value = effect = graph()->NewNode(simplified()->CheckSmi(), value,
+ effect, control);
+ field_access.write_barrier_kind = kNoWriteBarrier;
+
+ } else if (field_representation ==
+ MachineRepresentation::kTaggedPointer) {
+ // Ensure that {value} is a HeapObject.
+ value = access_builder.BuildCheckHeapObject(value, &effect, control);
+ Handle<Map> field_map;
+ if (access_info.field_map().ToHandle(&field_map)) {
+ // Emit a map check for the value.
+ effect = graph()->NewNode(
+ simplified()->CheckMaps(CheckMapsFlag::kNone,
+ ZoneHandleSet<Map>(field_map)),
+ value, effect, control);
}
- break;
- case MachineRepresentation::kNone:
- case MachineRepresentation::kBit:
- case MachineRepresentation::kWord8:
- case MachineRepresentation::kWord16:
- case MachineRepresentation::kWord32:
- case MachineRepresentation::kWord64:
- case MachineRepresentation::kFloat32:
- case MachineRepresentation::kSimd128:
- case MachineRepresentation::kSimd1x4:
- case MachineRepresentation::kSimd1x8:
- case MachineRepresentation::kSimd1x16:
- UNREACHABLE();
- break;
- }
- // Check if we need to perform a transitioning store.
- Handle<Map> transition_map;
- if (access_info.transition_map().ToHandle(&transition_map)) {
- // Check if we need to grow the properties backing store
- // with this transitioning store.
- Handle<Map> original_map(Map::cast(transition_map->GetBackPointer()),
- isolate());
- if (original_map->unused_property_fields() == 0) {
- DCHECK(!field_index.is_inobject());
-
- // Reallocate the properties {storage}.
- storage = effect = BuildExtendPropertiesBackingStore(
- original_map, storage, effect, control);
-
- // Perform the actual store.
- effect = graph()->NewNode(simplified()->StoreField(field_access),
- storage, value, effect, control);
-
- // Atomically switch to the new properties below.
- field_access = AccessBuilder::ForJSObjectProperties();
- value = storage;
- storage = receiver;
+ field_access.write_barrier_kind = kPointerWriteBarrier;
+
+ } else {
+ DCHECK_EQ(MachineRepresentation::kTagged, field_representation);
}
- effect = graph()->NewNode(
- common()->BeginRegion(RegionObservability::kObservable), effect);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForMap()), receiver,
- jsgraph()->Constant(transition_map), effect, control);
- effect = graph()->NewNode(simplified()->StoreField(field_access),
- storage, value, effect, control);
- effect = graph()->NewNode(common()->FinishRegion(),
- jsgraph()->UndefinedConstant(), effect);
- } else {
- // Regular non-transitioning field store.
+ break;
+ case MachineRepresentation::kNone:
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kWord32:
+ case MachineRepresentation::kWord64:
+ case MachineRepresentation::kFloat32:
+ case MachineRepresentation::kSimd128:
+ UNREACHABLE();
+ break;
+ }
+ // Check if we need to perform a transitioning store.
+ Handle<Map> transition_map;
+ if (access_info.transition_map().ToHandle(&transition_map)) {
+ // Check if we need to grow the properties backing store
+ // with this transitioning store.
+ Handle<Map> original_map(Map::cast(transition_map->GetBackPointer()),
+ isolate());
+ if (original_map->unused_property_fields() == 0) {
+ DCHECK(!field_index.is_inobject());
+
+ // Reallocate the properties {storage}.
+ storage = effect = BuildExtendPropertiesBackingStore(
+ original_map, storage, effect, control);
+
+ // Perform the actual store.
effect = graph()->NewNode(simplified()->StoreField(field_access),
storage, value, effect, control);
+
+ // Atomically switch to the new properties below.
+ field_access = AccessBuilder::ForJSObjectProperties();
+ value = storage;
+ storage = receiver;
}
+ effect = graph()->NewNode(
+ common()->BeginRegion(RegionObservability::kObservable), effect);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForMap()), receiver,
+ jsgraph()->Constant(transition_map), effect, control);
+ effect = graph()->NewNode(simplified()->StoreField(field_access), storage,
+ value, effect, control);
+ effect = graph()->NewNode(common()->FinishRegion(),
+ jsgraph()->UndefinedConstant(), effect);
+ } else {
+ // Regular non-transitioning field store.
+ effect = graph()->NewNode(simplified()->StoreField(field_access), storage,
+ value, effect, control);
}
}
@@ -1838,6 +1912,8 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
}
Handle<Map> receiver_map(map, isolate());
+ if (!Map::TryUpdate(receiver_map).ToHandle(&receiver_map)) return NoChange();
+
Handle<Name> cached_name =
handle(Name::cast(nexus.GetFeedbackExtra()), isolate());
@@ -1855,10 +1931,10 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
Node* control = NodeProperties::GetControlInput(node);
// Monomorphic property access.
- receiver = BuildCheckHeapObject(receiver, &effect, control);
-
- effect =
- BuildCheckMaps(receiver, effect, control, access_info.receiver_maps());
+ PropertyAccessBuilder access_builder(jsgraph(), dependencies());
+ receiver = access_builder.BuildCheckHeapObject(receiver, &effect, control);
+ access_builder.BuildCheckMaps(receiver, &effect, control,
+ access_info.receiver_maps());
// Ensure that {name} matches the cached name.
Node* name = NodeProperties::GetValueInput(node, 1);
@@ -1873,7 +1949,7 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
// Generate the actual property access.
ValueEffectControl continuation = BuildPropertyAccess(
receiver, value, context, frame_state_lazy, effect, control, cached_name,
- access_info, AccessMode::kStoreInLiteral, LanguageMode::SLOPPY);
+ nullptr, access_info, AccessMode::kStoreInLiteral, LanguageMode::SLOPPY);
value = continuation.value();
effect = continuation.effect();
control = continuation.control();
@@ -1895,7 +1971,6 @@ ExternalArrayType GetArrayTypeFromElementsKind(ElementsKind kind) {
break;
}
UNREACHABLE();
- return kExternalInt8Array;
}
} // namespace
@@ -2060,7 +2135,7 @@ JSNativeContextSpecialization::BuildElementAccess(
// Don't try to store to a copy-on-write backing store.
if (access_mode == AccessMode::kStore &&
- IsFastSmiOrObjectElementsKind(elements_kind) &&
+ IsSmiOrObjectElementsKind(elements_kind) &&
store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
effect = graph()->NewNode(
simplified()->CheckMaps(
@@ -2101,10 +2176,10 @@ JSNativeContextSpecialization::BuildElementAccess(
// Compute the element access.
Type* element_type = Type::NonInternal();
MachineType element_machine_type = MachineType::AnyTagged();
- if (IsFastDoubleElementsKind(elements_kind)) {
+ if (IsDoubleElementsKind(elements_kind)) {
element_type = Type::Number();
element_machine_type = MachineType::Float64();
- } else if (IsFastSmiElementsKind(elements_kind)) {
+ } else if (IsSmiElementsKind(elements_kind)) {
element_type = Type::SignedSmall();
element_machine_type = MachineType::TaggedSigned();
}
@@ -2116,12 +2191,12 @@ JSNativeContextSpecialization::BuildElementAccess(
if (access_mode == AccessMode::kLoad) {
// Compute the real element access type, which includes the hole in case
// of holey backing stores.
- if (IsHoleyElementsKind(elements_kind)) {
+ if (IsHoleyOrDictionaryElementsKind(elements_kind)) {
element_access.type =
Type::Union(element_type, Type::Hole(), graph()->zone());
}
- if (elements_kind == FAST_HOLEY_ELEMENTS ||
- elements_kind == FAST_HOLEY_SMI_ELEMENTS) {
+ if (elements_kind == HOLEY_ELEMENTS ||
+ elements_kind == HOLEY_SMI_ELEMENTS) {
element_access.machine_type = MachineType::AnyTagged();
}
// Perform the actual backing store access.
@@ -2130,8 +2205,8 @@ JSNativeContextSpecialization::BuildElementAccess(
index, effect, control);
// Handle loading from holey backing stores correctly, by either mapping
// the hole to undefined if possible, or deoptimizing otherwise.
- if (elements_kind == FAST_HOLEY_ELEMENTS ||
- elements_kind == FAST_HOLEY_SMI_ELEMENTS) {
+ if (elements_kind == HOLEY_ELEMENTS ||
+ elements_kind == HOLEY_SMI_ELEMENTS) {
// Check if we are allowed to turn the hole into undefined.
if (CanTreatHoleAsUndefined(receiver_maps)) {
// Turn the hole into undefined.
@@ -2139,10 +2214,10 @@ JSNativeContextSpecialization::BuildElementAccess(
value);
} else {
// Bailout if we see the hole.
- value = effect = graph()->NewNode(simplified()->CheckTaggedHole(),
+ value = effect = graph()->NewNode(simplified()->CheckNotTaggedHole(),
value, effect, control);
}
- } else if (elements_kind == FAST_HOLEY_DOUBLE_ELEMENTS) {
+ } else if (elements_kind == HOLEY_DOUBLE_ELEMENTS) {
// Perform the hole check on the result.
CheckFloat64HoleMode mode = CheckFloat64HoleMode::kNeverReturnHole;
// Check if we are allowed to return the hole directly.
@@ -2155,10 +2230,10 @@ JSNativeContextSpecialization::BuildElementAccess(
}
} else {
DCHECK_EQ(AccessMode::kStore, access_mode);
- if (IsFastSmiElementsKind(elements_kind)) {
+ if (IsSmiElementsKind(elements_kind)) {
value = effect =
graph()->NewNode(simplified()->CheckSmi(), value, effect, control);
- } else if (IsFastDoubleElementsKind(elements_kind)) {
+ } else if (IsDoubleElementsKind(elements_kind)) {
value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
effect, control);
// Make sure we do not store signalling NaNs into double arrays.
@@ -2166,7 +2241,7 @@ JSNativeContextSpecialization::BuildElementAccess(
}
// Ensure that copy-on-write backing store is writable.
- if (IsFastSmiOrObjectElementsKind(elements_kind) &&
+ if (IsSmiOrObjectElementsKind(elements_kind) &&
store_mode == STORE_NO_TRANSITION_HANDLE_COW) {
elements = effect =
graph()->NewNode(simplified()->EnsureWritableFastElements(),
@@ -2180,10 +2255,10 @@ JSNativeContextSpecialization::BuildElementAccess(
if (receiver_is_jsarray) {
flags |= GrowFastElementsFlag::kArrayObject;
}
- if (IsHoleyElementsKind(elements_kind)) {
+ if (IsHoleyOrDictionaryElementsKind(elements_kind)) {
flags |= GrowFastElementsFlag::kHoleyElements;
}
- if (IsFastDoubleElementsKind(elements_kind)) {
+ if (IsDoubleElementsKind(elements_kind)) {
flags |= GrowFastElementsFlag::kDoubleElements;
}
elements = effect = graph()->NewNode(
@@ -2200,112 +2275,6 @@ JSNativeContextSpecialization::BuildElementAccess(
return ValueEffectControl(value, effect, control);
}
-JSNativeContextSpecialization::ValueEffectControl
-JSNativeContextSpecialization::InlineApiCall(
- Node* receiver, Node* context, Node* target, Node* frame_state, Node* value,
- Node* effect, Node* control, Handle<SharedFunctionInfo> shared_info,
- Handle<FunctionTemplateInfo> function_template_info) {
- Handle<CallHandlerInfo> call_handler_info = handle(
- CallHandlerInfo::cast(function_template_info->call_code()), isolate());
- Handle<Object> call_data_object(call_handler_info->data(), isolate());
-
- // Only setters have a value.
- int const argc = value == nullptr ? 0 : 1;
- // The stub always expects the receiver as the first param on the stack.
- CallApiCallbackStub stub(
- isolate(), argc,
- true /* FunctionTemplateInfo doesn't have an associated context. */);
- CallInterfaceDescriptor call_interface_descriptor =
- stub.GetCallInterfaceDescriptor();
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), call_interface_descriptor,
- call_interface_descriptor.GetStackParameterCount() + argc +
- 1 /* implicit receiver */,
- CallDescriptor::kNeedsFrameState, Operator::kNoProperties,
- MachineType::AnyTagged(), 1);
-
- Node* data = jsgraph()->Constant(call_data_object);
- ApiFunction function(v8::ToCData<Address>(call_handler_info->callback()));
- Node* function_reference =
- graph()->NewNode(common()->ExternalConstant(ExternalReference(
- &function, ExternalReference::DIRECT_API_CALL, isolate())));
- Node* code = jsgraph()->HeapConstant(stub.GetCode());
-
- // Add CallApiCallbackStub's register argument as well.
- Node* inputs[11] = {
- code, target, data, receiver /* holder */, function_reference, receiver};
- int index = 6 + argc;
- inputs[index++] = context;
- inputs[index++] = frame_state;
- inputs[index++] = effect;
- inputs[index++] = control;
- // This needs to stay here because of the edge case described in
- // http://crbug.com/675648.
- if (value != nullptr) {
- inputs[6] = value;
- }
-
- Node* control0;
- Node* effect0;
- Node* value0 = effect0 = control0 =
- graph()->NewNode(common()->Call(call_descriptor), index, inputs);
- return ValueEffectControl(value0, effect0, control0);
-}
-
-Node* JSNativeContextSpecialization::BuildCheckHeapObject(Node* receiver,
- Node** effect,
- Node* control) {
- switch (receiver->opcode()) {
- case IrOpcode::kHeapConstant:
- case IrOpcode::kJSCreate:
- case IrOpcode::kJSCreateArguments:
- case IrOpcode::kJSCreateArray:
- case IrOpcode::kJSCreateClosure:
- case IrOpcode::kJSCreateIterResultObject:
- case IrOpcode::kJSCreateLiteralArray:
- case IrOpcode::kJSCreateLiteralObject:
- case IrOpcode::kJSCreateLiteralRegExp:
- case IrOpcode::kJSConvertReceiver:
- case IrOpcode::kJSToName:
- case IrOpcode::kJSToString:
- case IrOpcode::kJSToObject:
- case IrOpcode::kJSTypeOf: {
- return receiver;
- }
- default: {
- return *effect = graph()->NewNode(simplified()->CheckHeapObject(),
- receiver, *effect, control);
- }
- }
-}
-
-Node* JSNativeContextSpecialization::BuildCheckMaps(
- Node* receiver, Node* effect, Node* control,
- MapHandles const& receiver_maps) {
- HeapObjectMatcher m(receiver);
- if (m.HasValue()) {
- Handle<Map> receiver_map(m.Value()->map(), isolate());
- if (receiver_map->is_stable()) {
- for (Handle<Map> map : receiver_maps) {
- if (map.is_identical_to(receiver_map)) {
- dependencies()->AssumeMapStable(receiver_map);
- return effect;
- }
- }
- }
- }
- ZoneHandleSet<Map> maps;
- CheckMapsFlags flags = CheckMapsFlag::kNone;
- for (Handle<Map> map : receiver_maps) {
- maps.insert(map, graph()->zone());
- if (map->is_migration_target()) {
- flags |= CheckMapsFlag::kTryMigrateInstance;
- }
- }
- return graph()->NewNode(simplified()->CheckMaps(flags, maps), receiver,
- effect, control);
-}
-
Node* JSNativeContextSpecialization::BuildExtendPropertiesBackingStore(
Handle<Map> map, Node* properties, Node* effect, Node* control) {
// TODO(bmeurer/jkummerow): Property deletions can undo map transitions
@@ -2339,10 +2308,10 @@ Node* JSNativeContextSpecialization::BuildExtendPropertiesBackingStore(
common()->BeginRegion(RegionObservability::kNotObservable), effect);
Node* new_properties = effect = graph()->NewNode(
simplified()->Allocate(Type::OtherInternal(), NOT_TENURED),
- jsgraph()->Constant(FixedArray::SizeFor(new_length)), effect, control);
- effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
- new_properties, jsgraph()->FixedArrayMapConstant(),
- effect, control);
+ jsgraph()->Constant(PropertyArray::SizeFor(new_length)), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForMap()), new_properties,
+ jsgraph()->PropertyArrayMapConstant(), effect, control);
effect = graph()->NewNode(
simplified()->StoreField(AccessBuilder::ForFixedArrayLength()),
new_properties, jsgraph()->Constant(new_length), effect, control);
@@ -2354,107 +2323,30 @@ Node* JSNativeContextSpecialization::BuildExtendPropertiesBackingStore(
return graph()->NewNode(common()->FinishRegion(), new_properties, effect);
}
-void JSNativeContextSpecialization::AssumePrototypesStable(
- MapHandles const& receiver_maps, Handle<JSObject> holder) {
- // Determine actual holder and perform prototype chain checks.
- for (auto map : receiver_maps) {
- // Perform the implicit ToObject for primitives here.
- // Implemented according to ES6 section 7.3.2 GetV (V, P).
- Handle<JSFunction> constructor;
- if (Map::GetConstructorFunction(map, native_context())
- .ToHandle(&constructor)) {
- map = handle(constructor->initial_map(), isolate());
- }
- dependencies()->AssumePrototypeMapsStable(map, holder);
- }
-}
-
bool JSNativeContextSpecialization::CanTreatHoleAsUndefined(
MapHandles const& receiver_maps) {
- // Check if the array prototype chain is intact.
- if (!isolate()->IsFastArrayConstructorPrototypeChainIntact()) return false;
-
- // Make sure both the initial Array and Object prototypes are stable.
- Handle<JSObject> initial_array_prototype(
- native_context()->initial_array_prototype(), isolate());
- Handle<JSObject> initial_object_prototype(
- native_context()->initial_object_prototype(), isolate());
- if (!initial_array_prototype->map()->is_stable() ||
- !initial_object_prototype->map()->is_stable()) {
- return false;
- }
-
- // Check if all {receiver_maps} either have the initial Array.prototype
- // or the initial Object.prototype as their prototype, as those are
- // guarded by the array protector cell.
- for (Handle<Map> map : receiver_maps) {
- if (map->prototype() != *initial_array_prototype &&
- map->prototype() != *initial_object_prototype) {
+ // Check if all {receiver_maps} either have one of the initial Array.prototype
+ // or Object.prototype objects as their prototype (in any of the current
+ // native contexts, as the global Array protector works isolate-wide).
+ for (Handle<Map> receiver_map : receiver_maps) {
+ DisallowHeapAllocation no_gc;
+ Object* const receiver_prototype = receiver_map->prototype();
+ if (!isolate()->IsInAnyContext(receiver_prototype,
+ Context::INITIAL_ARRAY_PROTOTYPE_INDEX) &&
+ !isolate()->IsInAnyContext(receiver_prototype,
+ Context::INITIAL_OBJECT_PROTOTYPE_INDEX)) {
return false;
}
}
- // Install code dependencies on the prototype maps.
- for (Handle<Map> map : receiver_maps) {
- dependencies()->AssumePrototypeMapsStable(map, initial_object_prototype);
- }
+ // Check if the array prototype chain is intact.
+ if (!isolate()->IsFastArrayConstructorPrototypeChainIntact()) return false;
// Install code dependency on the array protector cell.
dependencies()->AssumePropertyCell(factory()->array_protector());
return true;
}
-JSNativeContextSpecialization::InferHasInPrototypeChainResult
-JSNativeContextSpecialization::InferHasInPrototypeChain(
- Node* receiver, Node* effect, Handle<JSReceiver> prototype) {
- ZoneHandleSet<Map> receiver_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
- if (result == NodeProperties::kNoReceiverMaps) return kMayBeInPrototypeChain;
-
- // Check if either all or none of the {receiver_maps} have the given
- // {prototype} in their prototype chain.
- bool all = true;
- bool none = true;
- for (size_t i = 0; i < receiver_maps.size(); ++i) {
- Handle<Map> receiver_map = receiver_maps[i];
- if (receiver_map->instance_type() <= LAST_SPECIAL_RECEIVER_TYPE) {
- return kMayBeInPrototypeChain;
- }
- if (result == NodeProperties::kUnreliableReceiverMaps) {
- // In case of an unreliable {result} we need to ensure that all
- // {receiver_maps} are stable, because otherwise we cannot trust
- // the {receiver_maps} information, since arbitrary side-effects
- // may have happened.
- if (!receiver_map->is_stable()) {
- return kMayBeInPrototypeChain;
- }
- }
- for (PrototypeIterator j(receiver_map);; j.Advance()) {
- if (j.IsAtEnd()) {
- all = false;
- break;
- }
- Handle<JSReceiver> const current =
- PrototypeIterator::GetCurrent<JSReceiver>(j);
- if (current.is_identical_to(prototype)) {
- none = false;
- break;
- }
- if (!current->map()->is_stable() ||
- current->map()->instance_type() <= LAST_SPECIAL_RECEIVER_TYPE) {
- return kMayBeInPrototypeChain;
- }
- }
- }
- DCHECK_IMPLIES(all, !none);
- DCHECK_IMPLIES(none, !all);
-
- if (all) return kIsInPrototypeChain;
- if (none) return kIsNotInPrototypeChain;
- return kMayBeInPrototypeChain;
-}
-
bool JSNativeContextSpecialization::ExtractReceiverMaps(
Node* receiver, Node* effect, FeedbackNexus const& nexus,
MapHandles* receiver_maps) {
@@ -2558,10 +2450,6 @@ Factory* JSNativeContextSpecialization::factory() const {
return isolate()->factory();
}
-MachineOperatorBuilder* JSNativeContextSpecialization::machine() const {
- return jsgraph()->machine();
-}
-
CommonOperatorBuilder* JSNativeContextSpecialization::common() const {
return jsgraph()->common();
}
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index 2f9df08f81..a9b04a3e08 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -50,12 +50,18 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
CompilationDependencies* dependencies,
Zone* zone);
+ const char* reducer_name() const override {
+ return "JSNativeContextSpecialization";
+ }
+
Reduction Reduce(Node* node) final;
private:
Reduction ReduceJSAdd(Node* node);
+ Reduction ReduceJSStringConcat(Node* node);
Reduction ReduceJSGetSuperConstructor(Node* node);
Reduction ReduceJSInstanceOf(Node* node);
+ Reduction ReduceJSHasInPrototypeChain(Node* node);
Reduction ReduceJSOrdinaryHasInstance(Node* node);
Reduction ReduceJSLoadContext(Node* node);
Reduction ReduceJSLoadGlobal(Node* node);
@@ -96,6 +102,8 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
// A triple of nodes that represents a continuation.
class ValueEffectControl final {
public:
+ ValueEffectControl()
+ : value_(nullptr), effect_(nullptr), control_(nullptr) {}
ValueEffectControl(Node* value, Node* effect, Node* control)
: value_(value), effect_(effect), control_(control) {}
@@ -104,19 +112,45 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
Node* control() const { return control_; }
private:
- Node* const value_;
- Node* const effect_;
- Node* const control_;
+ Node* value_;
+ Node* effect_;
+ Node* control_;
};
// Construct the appropriate subgraph for property access.
- ValueEffectControl BuildPropertyAccess(Node* receiver, Node* value,
- Node* context, Node* frame_state,
- Node* effect, Node* control,
- Handle<Name> name,
- PropertyAccessInfo const& access_info,
- AccessMode access_mode,
- LanguageMode language_mode);
+ ValueEffectControl BuildPropertyAccess(
+ Node* receiver, Node* value, Node* context, Node* frame_state,
+ Node* effect, Node* control, Handle<Name> name,
+ ZoneVector<Node*>* if_exceptions, PropertyAccessInfo const& access_info,
+ AccessMode access_mode, LanguageMode language_mode);
+ ValueEffectControl BuildPropertyLoad(Node* receiver, Node* context,
+ Node* frame_state, Node* effect,
+ Node* control, Handle<Name> name,
+ ZoneVector<Node*>* if_exceptions,
+ PropertyAccessInfo const& access_info,
+ LanguageMode language_mode);
+
+ ValueEffectControl BuildPropertyStore(
+ Node* receiver, Node* value, Node* context, Node* frame_state,
+ Node* effect, Node* control, Handle<Name> name,
+ ZoneVector<Node*>* if_exceptions, PropertyAccessInfo const& access_info,
+ AccessMode access_mode, LanguageMode language_mode);
+
+ // Helpers for accessor inlining.
+ Node* InlinePropertyGetterCall(Node* receiver, Node* context,
+ Node* frame_state, Node** effect,
+ Node** control,
+ ZoneVector<Node*>* if_exceptions,
+ PropertyAccessInfo const& access_info);
+ Node* InlinePropertySetterCall(Node* receiver, Node* value, Node* context,
+ Node* frame_state, Node** effect,
+ Node** control,
+ ZoneVector<Node*>* if_exceptions,
+ PropertyAccessInfo const& access_info);
+ Node* InlineApiCall(Node* receiver, Node* holder, Node* context, Node* target,
+ Node* frame_state, Node* value, Node** effect,
+ Node** control, Handle<SharedFunctionInfo> shared_info,
+ Handle<FunctionTemplateInfo> function_template_info);
// Construct the appropriate subgraph for element access.
ValueEffectControl BuildElementAccess(Node* receiver, Node* index,
@@ -126,38 +160,15 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
AccessMode access_mode,
KeyedAccessStoreMode store_mode);
- // Construct an appropriate heap object check.
- Node* BuildCheckHeapObject(Node* receiver, Node** effect, Node* control);
-
- // Construct an appropriate map check.
- Node* BuildCheckMaps(Node* receiver, Node* effect, Node* control,
- MapHandles const& maps);
-
// Construct appropriate subgraph to extend properties backing store.
Node* BuildExtendPropertiesBackingStore(Handle<Map> map, Node* properties,
Node* effect, Node* control);
- // Adds stability dependencies on all prototypes of every class in
- // {receiver_type} up to (and including) the {holder}.
- void AssumePrototypesStable(MapHandles const& receiver_maps,
- Handle<JSObject> holder);
-
// Checks if we can turn the hole into undefined when loading an element
// from an object with one of the {receiver_maps}; sets up appropriate
// code dependencies and might use the array protector cell.
bool CanTreatHoleAsUndefined(MapHandles const& receiver_maps);
- // Checks if we know at compile time that the {receiver} either definitely
- // has the {prototype} in it's prototype chain, or the {receiver} definitely
- // doesn't have the {prototype} in it's prototype chain.
- enum InferHasInPrototypeChainResult {
- kIsInPrototypeChain,
- kIsNotInPrototypeChain,
- kMayBeInPrototypeChain
- };
- InferHasInPrototypeChainResult InferHasInPrototypeChain(
- Node* receiver, Node* effect, Handle<JSReceiver> prototype);
-
// Extract receiver maps from {nexus} and filter based on {receiver} if
// possible.
bool ExtractReceiverMaps(Node* receiver, Node* effect,
@@ -174,11 +185,16 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
// program location.
MaybeHandle<Map> InferReceiverRootMap(Node* receiver);
- ValueEffectControl InlineApiCall(
- Node* receiver, Node* context, Node* target, Node* frame_state,
- Node* parameter, Node* effect, Node* control,
- Handle<SharedFunctionInfo> shared_info,
- Handle<FunctionTemplateInfo> function_template_info);
+ // Checks if we know at compile time that the {receiver} either definitely
+ // has the {prototype} in it's prototype chain, or the {receiver} definitely
+ // doesn't have the {prototype} in it's prototype chain.
+ enum InferHasInPrototypeChainResult {
+ kIsInPrototypeChain,
+ kIsNotInPrototypeChain,
+ kMayBeInPrototypeChain
+ };
+ InferHasInPrototypeChainResult InferHasInPrototypeChain(
+ Node* receiver, Node* effect, Handle<HeapObject> prototype);
// Script context lookup logic.
struct ScriptContextTableLookupResult;
@@ -192,7 +208,6 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
CommonOperatorBuilder* common() const;
JSOperatorBuilder* javascript() const;
SimplifiedOperatorBuilder* simplified() const;
- MachineOperatorBuilder* machine() const;
Flags flags() const { return flags_; }
Handle<JSGlobalObject> global_object() const { return global_object_; }
Handle<JSGlobalProxy> global_proxy() const { return global_proxy_; }
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index b8156a23f4..ff025d25fd 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -22,6 +22,12 @@ std::ostream& operator<<(std::ostream& os, CallFrequency f) {
return os << f.value();
}
+CallFrequency CallFrequencyOf(Operator const* op) {
+ DCHECK(op->opcode() == IrOpcode::kJSCallWithArrayLike ||
+ op->opcode() == IrOpcode::kJSConstructWithArrayLike);
+ return OpParameter<CallFrequency>(op);
+}
+
VectorSlotPair::VectorSlotPair() {}
@@ -116,10 +122,31 @@ SpreadWithArityParameter const& SpreadWithArityParameterOf(Operator const* op) {
return OpParameter<SpreadWithArityParameter>(op);
}
+bool operator==(StringConcatParameter const& lhs,
+ StringConcatParameter const& rhs) {
+ return lhs.operand_count() == rhs.operand_count();
+}
+
+bool operator!=(StringConcatParameter const& lhs,
+ StringConcatParameter const& rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(StringConcatParameter const& p) {
+ return base::hash_combine(p.operand_count());
+}
+
+std::ostream& operator<<(std::ostream& os, StringConcatParameter const& p) {
+ return os << p.operand_count();
+}
+
+StringConcatParameter const& StringConcatParameterOf(Operator const* op) {
+ DCHECK(op->opcode() == IrOpcode::kJSStringConcat);
+ return OpParameter<StringConcatParameter>(op);
+}
+
std::ostream& operator<<(std::ostream& os, CallParameters const& p) {
- os << p.arity() << ", " << p.frequency() << ", " << p.convert_mode() << ", "
- << p.tail_call_mode();
- return os;
+ return os << p.arity() << ", " << p.frequency() << ", " << p.convert_mode();
}
const CallParameters& CallParametersOf(const Operator* op) {
@@ -129,8 +156,7 @@ const CallParameters& CallParametersOf(const Operator* op) {
std::ostream& operator<<(std::ostream& os,
CallForwardVarargsParameters const& p) {
- return os << p.arity() << ", " << p.start_index() << ", "
- << p.tail_call_mode();
+ return os << p.arity() << ", " << p.start_index();
}
CallForwardVarargsParameters const& CallForwardVarargsParametersOf(
@@ -533,31 +559,6 @@ const CreateLiteralParameters& CreateLiteralParametersOf(const Operator* op) {
return OpParameter<CreateLiteralParameters>(op);
}
-bool operator==(GeneratorStoreParameters const& lhs,
- GeneratorStoreParameters const& rhs) {
- return lhs.register_count() == rhs.register_count() &&
- lhs.suspend_type() == rhs.suspend_type();
-}
-bool operator!=(GeneratorStoreParameters const& lhs,
- GeneratorStoreParameters const& rhs) {
- return !(lhs == rhs);
-}
-
-size_t hash_value(GeneratorStoreParameters const& p) {
- return base::hash_combine(p.register_count(),
- static_cast<int>(p.suspend_type()));
-}
-
-std::ostream& operator<<(std::ostream& os, GeneratorStoreParameters const& p) {
- const char* suspend_type = SuspendTypeFor(p.suspend_type());
- return os << p.register_count() << " (" << suspend_type << ")";
-}
-
-const GeneratorStoreParameters& GeneratorStoreParametersOf(const Operator* op) {
- DCHECK_EQ(op->opcode(), IrOpcode::kJSGeneratorStore);
- return OpParameter<GeneratorStoreParameters>(op);
-}
-
BinaryOperationHint BinaryOperationHintOf(const Operator* op) {
DCHECK_EQ(IrOpcode::kJSAdd, op->opcode());
return OpParameter<BinaryOperationHint>(op);
@@ -590,12 +591,14 @@ CompareOperationHint CompareOperationHintOf(const Operator* op) {
V(ToNumber, Operator::kNoProperties, 1, 1) \
V(ToObject, Operator::kFoldable, 1, 1) \
V(ToString, Operator::kNoProperties, 1, 1) \
+ V(ToPrimitiveToString, Operator::kNoProperties, 1, 1) \
V(Create, Operator::kNoProperties, 2, 1) \
V(CreateIterResultObject, Operator::kEliminatable, 2, 1) \
V(CreateKeyValueArray, Operator::kEliminatable, 2, 1) \
V(HasProperty, Operator::kNoProperties, 2, 1) \
V(ClassOf, Operator::kPure, 1, 1) \
V(TypeOf, Operator::kPure, 1, 1) \
+ V(HasInPrototypeChain, Operator::kNoProperties, 2, 1) \
V(InstanceOf, Operator::kNoProperties, 2, 1) \
V(OrdinaryHasInstance, Operator::kNoProperties, 2, 1) \
V(ForInNext, Operator::kNoProperties, 4, 1) \
@@ -643,8 +646,11 @@ struct JSOperatorGlobalCache final {
Name##Operator<BinaryOperationHint::kSignedSmall> \
k##Name##SignedSmallOperator; \
Name##Operator<BinaryOperationHint::kSigned32> k##Name##Signed32Operator; \
+ Name##Operator<BinaryOperationHint::kNumber> k##Name##NumberOperator; \
Name##Operator<BinaryOperationHint::kNumberOrOddball> \
k##Name##NumberOrOddballOperator; \
+ Name##Operator<BinaryOperationHint::kNonEmptyString> \
+ k##Name##NonEmptyStringOperator; \
Name##Operator<BinaryOperationHint::kString> k##Name##StringOperator; \
Name##Operator<BinaryOperationHint::kAny> k##Name##AnyOperator;
BINARY_OP_LIST(BINARY_OP)
@@ -667,6 +673,7 @@ struct JSOperatorGlobalCache final {
Name##Operator<CompareOperationHint::kInternalizedString> \
k##Name##InternalizedStringOperator; \
Name##Operator<CompareOperationHint::kString> k##Name##StringOperator; \
+ Name##Operator<CompareOperationHint::kSymbol> k##Name##SymbolOperator; \
Name##Operator<CompareOperationHint::kReceiver> k##Name##ReceiverOperator; \
Name##Operator<CompareOperationHint::kAny> k##Name##AnyOperator;
COMPARE_OP_LIST(COMPARE_OP)
@@ -695,8 +702,12 @@ CACHED_OP_LIST(CACHED_OP)
return &cache_.k##Name##SignedSmallOperator; \
case BinaryOperationHint::kSigned32: \
return &cache_.k##Name##Signed32Operator; \
+ case BinaryOperationHint::kNumber: \
+ return &cache_.k##Name##NumberOperator; \
case BinaryOperationHint::kNumberOrOddball: \
return &cache_.k##Name##NumberOrOddballOperator; \
+ case BinaryOperationHint::kNonEmptyString: \
+ return &cache_.k##Name##NonEmptyStringOperator; \
case BinaryOperationHint::kString: \
return &cache_.k##Name##StringOperator; \
case BinaryOperationHint::kAny: \
@@ -723,6 +734,8 @@ BINARY_OP_LIST(BINARY_OP)
return &cache_.k##Name##InternalizedStringOperator; \
case CompareOperationHint::kString: \
return &cache_.k##Name##StringOperator; \
+ case CompareOperationHint::kSymbol: \
+ return &cache_.k##Name##SymbolOperator; \
case CompareOperationHint::kReceiver: \
return &cache_.k##Name##ReceiverOperator; \
case CompareOperationHint::kAny: \
@@ -734,6 +747,15 @@ BINARY_OP_LIST(BINARY_OP)
COMPARE_OP_LIST(COMPARE_OP)
#undef COMPARE_OP
+const Operator* JSOperatorBuilder::StringConcat(int operand_count) {
+ StringConcatParameter parameters(operand_count);
+ return new (zone()) Operator1<StringConcatParameter>( // --
+ IrOpcode::kJSStringConcat, Operator::kNoProperties, // opcode
+ "JSStringConcat", // name
+ operand_count, 1, 1, 1, 1, 2, // counts
+ parameters); // parameter
+}
+
const Operator* JSOperatorBuilder::StoreDataPropertyInLiteral(
const VectorSlotPair& feedback) {
FeedbackParameter parameters(feedback);
@@ -754,9 +776,9 @@ const Operator* JSOperatorBuilder::ToBoolean(ToBooleanHints hints) {
hints); // parameter
}
-const Operator* JSOperatorBuilder::CallForwardVarargs(
- size_t arity, uint32_t start_index, TailCallMode tail_call_mode) {
- CallForwardVarargsParameters parameters(arity, start_index, tail_call_mode);
+const Operator* JSOperatorBuilder::CallForwardVarargs(size_t arity,
+ uint32_t start_index) {
+ CallForwardVarargsParameters parameters(arity, start_index);
return new (zone()) Operator1<CallForwardVarargsParameters>( // --
IrOpcode::kJSCallForwardVarargs, Operator::kNoProperties, // opcode
"JSCallForwardVarargs", // name
@@ -766,10 +788,8 @@ const Operator* JSOperatorBuilder::CallForwardVarargs(
const Operator* JSOperatorBuilder::Call(size_t arity, CallFrequency frequency,
VectorSlotPair const& feedback,
- ConvertReceiverMode convert_mode,
- TailCallMode tail_call_mode) {
- CallParameters parameters(arity, frequency, feedback, tail_call_mode,
- convert_mode);
+ ConvertReceiverMode convert_mode) {
+ CallParameters parameters(arity, frequency, feedback, convert_mode);
return new (zone()) Operator1<CallParameters>( // --
IrOpcode::kJSCall, Operator::kNoProperties, // opcode
"JSCall", // name
@@ -777,6 +797,14 @@ const Operator* JSOperatorBuilder::Call(size_t arity, CallFrequency frequency,
parameters); // parameter
}
+const Operator* JSOperatorBuilder::CallWithArrayLike(CallFrequency frequency) {
+ return new (zone()) Operator1<CallFrequency>( // --
+ IrOpcode::kJSCallWithArrayLike, Operator::kNoProperties, // opcode
+ "JSCallWithArrayLike", // name
+ 3, 1, 1, 1, 1, 2, // counts
+ frequency); // parameter
+}
+
const Operator* JSOperatorBuilder::CallWithSpread(uint32_t arity) {
SpreadWithArityParameter parameters(arity);
return new (zone()) Operator1<SpreadWithArityParameter>( // --
@@ -831,6 +859,16 @@ const Operator* JSOperatorBuilder::Construct(uint32_t arity,
parameters); // parameter
}
+const Operator* JSOperatorBuilder::ConstructWithArrayLike(
+ CallFrequency frequency) {
+ return new (zone()) Operator1<CallFrequency>( // --
+ IrOpcode::kJSConstructWithArrayLike, // opcode
+ Operator::kNoProperties, // properties
+ "JSConstructWithArrayLike", // name
+ 3, 1, 1, 1, 1, 2, // counts
+ frequency); // parameter
+}
+
const Operator* JSOperatorBuilder::ConstructWithSpread(uint32_t arity) {
SpreadWithArityParameter parameters(arity);
return new (zone()) Operator1<SpreadWithArityParameter>( // --
@@ -869,14 +907,12 @@ const Operator* JSOperatorBuilder::LoadProperty(
access); // parameter
}
-const Operator* JSOperatorBuilder::GeneratorStore(int register_count,
- SuspendFlags suspend_flags) {
- GeneratorStoreParameters parameters(register_count, suspend_flags);
- return new (zone()) Operator1<GeneratorStoreParameters>( // --
- IrOpcode::kJSGeneratorStore, Operator::kNoThrow, // opcode
- "JSGeneratorStore", // name
- 3 + register_count, 1, 1, 0, 1, 0, // counts
- parameters); // parameter
+const Operator* JSOperatorBuilder::GeneratorStore(int register_count) {
+ return new (zone()) Operator1<int>( // --
+ IrOpcode::kJSGeneratorStore, Operator::kNoThrow, // opcode
+ "JSGeneratorStore", // name
+ 3 + register_count, 1, 1, 0, 1, 0, // counts
+ register_count); // parameter
}
const Operator* JSOperatorBuilder::GeneratorRestoreRegister(int index) {
@@ -1005,7 +1041,6 @@ const Operator* JSOperatorBuilder::CreateArguments(CreateArgumentsType type) {
type); // parameter
}
-
const Operator* JSOperatorBuilder::CreateArray(size_t arity,
Handle<AllocationSite> site) {
// constructor, new_target, arg1, ..., argN
@@ -1022,11 +1057,11 @@ const Operator* JSOperatorBuilder::CreateClosure(
Handle<SharedFunctionInfo> shared_info, VectorSlotPair const& feedback,
PretenureFlag pretenure) {
CreateClosureParameters parameters(shared_info, feedback, pretenure);
- return new (zone()) Operator1<CreateClosureParameters>( // --
- IrOpcode::kJSCreateClosure, Operator::kNoThrow, // opcode
- "JSCreateClosure", // name
- 0, 1, 1, 1, 1, 0, // counts
- parameters); // parameter
+ return new (zone()) Operator1<CreateClosureParameters>( // --
+ IrOpcode::kJSCreateClosure, Operator::kEliminatable, // opcode
+ "JSCreateClosure", // name
+ 0, 1, 1, 1, 1, 0, // counts
+ parameters); // parameter
}
const Operator* JSOperatorBuilder::CreateLiteralArray(
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index 5ac3b6769e..4c9f815cd1 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -57,6 +57,8 @@ class CallFrequency final {
std::ostream& operator<<(std::ostream&, CallFrequency);
+CallFrequency CallFrequencyOf(Operator const* op) WARN_UNUSED_RESULT;
+
// Defines a pair of {FeedbackVector} and {FeedbackSlot}, which
// is used to access the type feedback for a certain {Node}.
class V8_EXPORT_PRIVATE VectorSlotPair {
@@ -180,17 +182,12 @@ SpreadWithArityParameter const& SpreadWithArityParameterOf(Operator const*);
// is used as parameter by JSCallForwardVarargs operators.
class CallForwardVarargsParameters final {
public:
- CallForwardVarargsParameters(size_t arity, uint32_t start_index,
- TailCallMode tail_call_mode)
+ CallForwardVarargsParameters(size_t arity, uint32_t start_index)
: bit_field_(ArityField::encode(arity) |
- StartIndexField::encode(start_index) |
- TailCallModeField::encode(tail_call_mode)) {}
+ StartIndexField::encode(start_index)) {}
size_t arity() const { return ArityField::decode(bit_field_); }
uint32_t start_index() const { return StartIndexField::decode(bit_field_); }
- TailCallMode tail_call_mode() const {
- return TailCallModeField::decode(bit_field_);
- }
bool operator==(CallForwardVarargsParameters const& that) const {
return this->bit_field_ == that.bit_field_;
@@ -206,7 +203,6 @@ class CallForwardVarargsParameters final {
typedef BitField<size_t, 0, 15> ArityField;
typedef BitField<uint32_t, 15, 15> StartIndexField;
- typedef BitField<TailCallMode, 30, 1> TailCallModeField;
uint32_t const bit_field_;
};
@@ -221,11 +217,10 @@ CallForwardVarargsParameters const& CallForwardVarargsParametersOf(
class CallParameters final {
public:
CallParameters(size_t arity, CallFrequency frequency,
- VectorSlotPair const& feedback, TailCallMode tail_call_mode,
+ VectorSlotPair const& feedback,
ConvertReceiverMode convert_mode)
: bit_field_(ArityField::encode(arity) |
- ConvertReceiverModeField::encode(convert_mode) |
- TailCallModeField::encode(tail_call_mode)),
+ ConvertReceiverModeField::encode(convert_mode)),
frequency_(frequency),
feedback_(feedback) {}
@@ -234,9 +229,6 @@ class CallParameters final {
ConvertReceiverMode convert_mode() const {
return ConvertReceiverModeField::decode(bit_field_);
}
- TailCallMode tail_call_mode() const {
- return TailCallModeField::decode(bit_field_);
- }
VectorSlotPair const& feedback() const { return feedback_; }
bool operator==(CallParameters const& that) const {
@@ -253,7 +245,6 @@ class CallParameters final {
typedef BitField<size_t, 0, 29> ArityField;
typedef BitField<ConvertReceiverMode, 29, 2> ConvertReceiverModeField;
- typedef BitField<TailCallMode, 31, 1> TailCallModeField;
uint32_t const bit_field_;
CallFrequency const frequency_;
@@ -619,32 +610,26 @@ std::ostream& operator<<(std::ostream&, CreateLiteralParameters const&);
const CreateLiteralParameters& CreateLiteralParametersOf(const Operator* op);
-class GeneratorStoreParameters final {
+// Defines the number of operands passed to a JSStringConcat operator.
+class StringConcatParameter final {
public:
- GeneratorStoreParameters(int register_count, SuspendFlags flags)
- : register_count_(register_count), suspend_flags_(flags) {}
+ explicit StringConcatParameter(int operand_count)
+ : operand_count_(operand_count) {}
- int register_count() const { return register_count_; }
- SuspendFlags suspend_flags() const { return suspend_flags_; }
- SuspendFlags suspend_type() const {
- return suspend_flags_ & SuspendFlags::kSuspendTypeMask;
- }
+ int operand_count() const { return operand_count_; }
private:
- int register_count_;
- SuspendFlags suspend_flags_;
+ uint32_t const operand_count_;
};
-bool operator==(GeneratorStoreParameters const&,
- GeneratorStoreParameters const&);
-bool operator!=(GeneratorStoreParameters const&,
- GeneratorStoreParameters const&);
+bool operator==(StringConcatParameter const&, StringConcatParameter const&);
+bool operator!=(StringConcatParameter const&, StringConcatParameter const&);
-size_t hash_value(GeneratorStoreParameters const&);
+size_t hash_value(StringConcatParameter const&);
-std::ostream& operator<<(std::ostream&, GeneratorStoreParameters const&);
+std::ostream& operator<<(std::ostream&, StringConcatParameter const&);
-const GeneratorStoreParameters& GeneratorStoreParametersOf(const Operator* op);
+StringConcatParameter const& StringConcatParameterOf(Operator const*);
BinaryOperationHint BinaryOperationHintOf(const Operator* op);
@@ -684,6 +669,7 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* ToNumber();
const Operator* ToObject();
const Operator* ToString();
+ const Operator* ToPrimitiveToString();
const Operator* Create();
const Operator* CreateArguments(CreateArgumentsType type);
@@ -702,13 +688,12 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* CreateLiteralRegExp(Handle<String> constant_pattern,
int literal_flags, int literal_index);
- const Operator* CallForwardVarargs(size_t arity, uint32_t start_index,
- TailCallMode tail_call_mode);
+ const Operator* CallForwardVarargs(size_t arity, uint32_t start_index);
const Operator* Call(
size_t arity, CallFrequency frequency = CallFrequency(),
VectorSlotPair const& feedback = VectorSlotPair(),
- ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny,
- TailCallMode tail_call_mode = TailCallMode::kDisallow);
+ ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny);
+ const Operator* CallWithArrayLike(CallFrequency frequency);
const Operator* CallWithSpread(uint32_t arity);
const Operator* CallRuntime(Runtime::FunctionId id);
const Operator* CallRuntime(Runtime::FunctionId id, size_t arity);
@@ -718,6 +703,7 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* Construct(uint32_t arity,
CallFrequency frequency = CallFrequency(),
VectorSlotPair const& feedback = VectorSlotPair());
+ const Operator* ConstructWithArrayLike(CallFrequency frequency);
const Operator* ConstructWithSpread(uint32_t arity);
const Operator* ConvertReceiver(ConvertReceiverMode convert_mode);
@@ -757,6 +743,7 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* ClassOf();
const Operator* TypeOf();
+ const Operator* HasInPrototypeChain();
const Operator* InstanceOf();
const Operator* OrdinaryHasInstance();
@@ -766,12 +753,14 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* LoadMessage();
const Operator* StoreMessage();
+ const Operator* StringConcat(int operand_count);
+
// Used to implement Ignition's SuspendGenerator bytecode.
- const Operator* GeneratorStore(int register_count,
- SuspendFlags suspend_flags);
+ const Operator* GeneratorStore(int register_count);
- // Used to implement Ignition's ResumeGenerator bytecode.
+ // Used to implement Ignition's RestoreGeneratorState bytecode.
const Operator* GeneratorRestoreContinuation();
+ // Used to implement Ignition's RestoreGeneratorRegisters bytecode.
const Operator* GeneratorRestoreRegister(int index);
const Operator* StackCheck();
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.cc b/deps/v8/src/compiler/js-type-hint-lowering.cc
index 7c70b1ea11..3398a33036 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.cc
+++ b/deps/v8/src/compiler/js-type-hint-lowering.cc
@@ -25,11 +25,15 @@ bool BinaryOperationHintToNumberOperationHint(
case BinaryOperationHint::kSigned32:
*number_hint = NumberOperationHint::kSigned32;
return true;
+ case BinaryOperationHint::kNumber:
+ *number_hint = NumberOperationHint::kNumber;
+ return true;
case BinaryOperationHint::kNumberOrOddball:
*number_hint = NumberOperationHint::kNumberOrOddball;
return true;
case BinaryOperationHint::kAny:
case BinaryOperationHint::kNone:
+ case BinaryOperationHint::kNonEmptyString:
case BinaryOperationHint::kString:
break;
}
@@ -82,6 +86,7 @@ class JSSpeculativeBinopBuilder final {
case CompareOperationHint::kAny:
case CompareOperationHint::kNone:
case CompareOperationHint::kString:
+ case CompareOperationHint::kSymbol:
case CompareOperationHint::kReceiver:
case CompareOperationHint::kInternalizedString:
break;
@@ -117,7 +122,6 @@ class JSSpeculativeBinopBuilder final {
break;
}
UNREACHABLE();
- return nullptr;
}
const Operator* SpeculativeCompareOp(NumberOperationHint hint) {
@@ -138,7 +142,6 @@ class JSSpeculativeBinopBuilder final {
break;
}
UNREACHABLE();
- return nullptr;
}
Node* BuildSpeculativeOperation(const Operator* op) {
@@ -254,6 +257,53 @@ Reduction JSTypeHintLowering::ReduceToNumberOperation(Node* input, Node* effect,
return Reduction();
}
+Reduction JSTypeHintLowering::ReduceToPrimitiveToStringOperation(
+ Node* input, Node* effect, Node* control, FeedbackSlot slot) const {
+ DCHECK(!slot.IsInvalid());
+ BinaryOpICNexus nexus(feedback_vector(), slot);
+ BinaryOperationHint hint = nexus.GetBinaryOperationFeedback();
+ if (hint == BinaryOperationHint::kNonEmptyString) {
+ Node* node = jsgraph()->graph()->NewNode(
+ jsgraph()->simplified()->CheckNonEmptyString(), input, effect, control);
+ return Reduction(node);
+ } else if (hint == BinaryOperationHint::kString) {
+ Node* node = jsgraph()->graph()->NewNode(
+ jsgraph()->simplified()->CheckString(), input, effect, control);
+ return Reduction(node);
+ }
+ return Reduction();
+}
+
+Reduction JSTypeHintLowering::ReduceCallOperation(const Operator* op,
+ Node* const* args,
+ int arg_count, Node* effect,
+ Node* control,
+ FeedbackSlot slot) const {
+ DCHECK_EQ(IrOpcode::kJSCall, op->opcode());
+ DCHECK(!slot.IsInvalid());
+ CallICNexus nexus(feedback_vector(), slot);
+ if (Node* node = TryBuildSoftDeopt(
+ nexus, effect, control,
+ DeoptimizeReason::kInsufficientTypeFeedbackForCall)) {
+ return Reduction(node);
+ }
+ return Reduction();
+}
+
+Reduction JSTypeHintLowering::ReduceConstructOperation(
+ const Operator* op, Node* const* args, int arg_count, Node* effect,
+ Node* control, FeedbackSlot slot) const {
+ DCHECK_EQ(IrOpcode::kJSConstruct, op->opcode());
+ DCHECK(!slot.IsInvalid());
+ CallICNexus nexus(feedback_vector(), slot);
+ if (Node* node = TryBuildSoftDeopt(
+ nexus, effect, control,
+ DeoptimizeReason::kInsufficientTypeFeedbackForConstruct)) {
+ return Reduction(node);
+ }
+ return Reduction();
+}
+
Reduction JSTypeHintLowering::ReduceLoadNamedOperation(
const Operator* op, Node* obj, Node* effect, Node* control,
FeedbackSlot slot) const {
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.h b/deps/v8/src/compiler/js-type-hint-lowering.h
index 7bd237814d..50779c9f3c 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.h
+++ b/deps/v8/src/compiler/js-type-hint-lowering.h
@@ -59,6 +59,21 @@ class JSTypeHintLowering {
Reduction ReduceToNumberOperation(Node* value, Node* effect, Node* control,
FeedbackSlot slot) const;
+ // Potential reduction to ToPrimitiveToString operations
+ Reduction ReduceToPrimitiveToStringOperation(Node* value, Node* effect,
+ Node* control,
+ FeedbackSlot slot) const;
+
+ // Potential reduction of call operations.
+ Reduction ReduceCallOperation(const Operator* op, Node* const* args,
+ int arg_count, Node* effect, Node* control,
+ FeedbackSlot slot) const;
+
+ // Potential reduction of construct operations.
+ Reduction ReduceConstructOperation(const Operator* op, Node* const* args,
+ int arg_count, Node* effect, Node* control,
+ FeedbackSlot slot) const;
+
// Potential reduction of property access operations.
Reduction ReduceLoadNamedOperation(const Operator* op, Node* obj,
Node* effect, Node* control,
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 64838a1f83..243a80a645 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -22,6 +22,29 @@ namespace v8 {
namespace internal {
namespace compiler {
+namespace {
+
+bool WillCreateConsString(HeapObjectMatcher left, HeapObjectMatcher right) {
+ if (right.HasValue() && right.Value()->IsString()) {
+ Handle<String> right_string = Handle<String>::cast(right.Value());
+ if (right_string->length() >= ConsString::kMinLength) return true;
+ }
+ if (left.HasValue() && left.Value()->IsString()) {
+ Handle<String> left_string = Handle<String>::cast(left.Value());
+ if (left_string->length() >= ConsString::kMinLength) {
+ // The invariant for ConsString requires the left hand side to be
+ // a sequential or external string if the right hand side is the
+ // empty string. Since we don't know anything about the right hand
+ // side here, we must ensure that the left hand side satisfies the
+ // constraints independent of the right hand side.
+ return left_string->IsSeqString() || left_string->IsExternalString();
+ }
+ }
+ return false;
+}
+
+} // namespace
+
// A helper class to simplify the process of reducing a single binop node with a
// JSOperator. This class manages the rewriting of context, control, and effect
// dependencies during lowering of a binop and contains numerous helper
@@ -47,6 +70,7 @@ class JSBinopReduction final {
case CompareOperationHint::kAny:
case CompareOperationHint::kNone:
case CompareOperationHint::kString:
+ case CompareOperationHint::kSymbol:
case CompareOperationHint::kReceiver:
case CompareOperationHint::kInternalizedString:
break;
@@ -85,6 +109,16 @@ class JSBinopReduction final {
return false;
}
+ bool IsSymbolCompareOperation() {
+ if (lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) {
+ DCHECK_EQ(1, node_->op()->EffectOutputCount());
+ return (CompareOperationHintOf(node_->op()) ==
+ CompareOperationHint::kSymbol) &&
+ BothInputsMaybe(Type::Symbol());
+ }
+ return false;
+ }
+
// Check if a string addition will definitely result in creating a ConsString,
// i.e. if the combined length of the resulting string exceeds the ConsString
// minimum length.
@@ -95,21 +129,7 @@ class JSBinopReduction final {
((lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) &&
BinaryOperationHintOf(node_->op()) == BinaryOperationHint::kString)) {
HeapObjectBinopMatcher m(node_);
- if (m.right().HasValue() && m.right().Value()->IsString()) {
- Handle<String> right_string = Handle<String>::cast(m.right().Value());
- if (right_string->length() >= ConsString::kMinLength) return true;
- }
- if (m.left().HasValue() && m.left().Value()->IsString()) {
- Handle<String> left_string = Handle<String>::cast(m.left().Value());
- if (left_string->length() >= ConsString::kMinLength) {
- // The invariant for ConsString requires the left hand side to be
- // a sequential or external string if the right hand side is the
- // empty string. Since we don't know anything about the right hand
- // side here, we must ensure that the left hand side satisfy the
- // constraints independent of the right hand side.
- return left_string->IsSeqString() || left_string->IsExternalString();
- }
- }
+ return WillCreateConsString(m.left(), m.right());
}
return false;
}
@@ -137,6 +157,24 @@ class JSBinopReduction final {
}
}
+ // Checks that both inputs are Symbol, and if we don't know
+ // statically that one side is already a Symbol, insert a
+ // CheckSymbol node.
+ void CheckInputsToSymbol() {
+ if (!left_type()->Is(Type::Symbol())) {
+ Node* left_input = graph()->NewNode(simplified()->CheckSymbol(), left(),
+ effect(), control());
+ node_->ReplaceInput(0, left_input);
+ update_effect(left_input);
+ }
+ if (!right_type()->Is(Type::Symbol())) {
+ Node* right_input = graph()->NewNode(simplified()->CheckSymbol(), right(),
+ effect(), control());
+ node_->ReplaceInput(1, right_input);
+ update_effect(right_input);
+ }
+ }
+
// Checks that both inputs are String, and if we don't know
// statically that one side is already a String, insert a
// CheckString node.
@@ -307,7 +345,6 @@ class JSBinopReduction final {
break;
}
UNREACHABLE();
- return nullptr;
}
const Operator* NumberOpFromSpeculativeNumberOp() {
@@ -332,7 +369,6 @@ class JSBinopReduction final {
break;
}
UNREACHABLE();
- return nullptr;
}
bool LeftInputIs(Type* t) { return left_type()->Is(t); }
@@ -488,11 +524,9 @@ JSTypedLowering::JSTypedLowering(Editor* editor,
dependencies_(dependencies),
flags_(flags),
jsgraph_(jsgraph),
- empty_string_type_(
- Type::HeapConstant(factory()->empty_string(), graph()->zone())),
pointer_comparable_type_(
Type::Union(Type::Oddball(),
- Type::Union(Type::SymbolOrReceiver(), empty_string_type_,
+ Type::Union(Type::SymbolOrReceiver(), Type::EmptyString(),
graph()->zone()),
graph()->zone())),
type_cache_(TypeCache::Get()) {
@@ -506,7 +540,8 @@ JSTypedLowering::JSTypedLowering(Editor* editor,
Reduction JSTypedLowering::ReduceSpeculativeNumberAdd(Node* node) {
JSBinopReduction r(this, node);
NumberOperationHint hint = NumberOperationHintOf(node->op());
- if (hint == NumberOperationHint::kNumberOrOddball &&
+ if ((hint == NumberOperationHint::kNumber ||
+ hint == NumberOperationHint::kNumberOrOddball) &&
r.BothInputsAre(Type::PlainPrimitive()) &&
r.NeitherInputCanBe(Type::StringOrReceiver())) {
// SpeculativeNumberAdd(x:-string, y:-string) =>
@@ -540,12 +575,12 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
BinaryOperationHintOf(node->op()) == BinaryOperationHint::kString) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- if (r.LeftInputIs(empty_string_type_)) {
+ if (r.LeftInputIs(Type::EmptyString())) {
Node* value = effect = graph()->NewNode(simplified()->CheckString(),
r.right(), effect, control);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
- } else if (r.RightInputIs(empty_string_type_)) {
+ } else if (r.RightInputIs(Type::EmptyString())) {
Node* value = effect = graph()->NewNode(simplified()->CheckString(),
r.left(), effect, control);
ReplaceWithValue(node, value, effect, control);
@@ -594,7 +629,8 @@ Reduction JSTypedLowering::ReduceNumberBinop(Node* node) {
Reduction JSTypedLowering::ReduceSpeculativeNumberBinop(Node* node) {
JSBinopReduction r(this, node);
NumberOperationHint hint = NumberOperationHintOf(node->op());
- if (hint == NumberOperationHint::kNumberOrOddball &&
+ if ((hint == NumberOperationHint::kNumber ||
+ hint == NumberOperationHint::kNumberOrOddball) &&
r.BothInputsAre(Type::NumberOrOddball())) {
r.ConvertInputsToNumber();
return r.ChangeToPureOperator(r.NumberOpFromSpeculativeNumberOp(),
@@ -651,26 +687,9 @@ Reduction JSTypedLowering::ReduceCreateConsString(Node* node) {
second_type = NodeProperties::GetType(second);
}
- // Determine the {first} length.
- HeapObjectBinopMatcher m(node);
- Node* first_length =
- (m.left().HasValue() && m.left().Value()->IsString())
- ? jsgraph()->Constant(
- Handle<String>::cast(m.left().Value())->length())
- : effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForStringLength()),
- first, effect, control);
-
- // Determine the {second} length.
- Node* second_length =
- (m.right().HasValue() && m.right().Value()->IsString())
- ? jsgraph()->Constant(
- Handle<String>::cast(m.right().Value())->length())
- : effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForStringLength()),
- second, effect, control);
-
// Compute the resulting length.
+ Node* first_length = BuildGetStringLength(first, &effect, control);
+ Node* second_length = BuildGetStringLength(second, &effect, control);
Node* length =
graph()->NewNode(simplified()->NumberAdd(), first_length, second_length);
@@ -689,35 +708,175 @@ Reduction JSTypedLowering::ReduceCreateConsString(Node* node) {
} else {
Node* branch =
graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
{
- // Throw a RangeError in case of overflow.
- Node* vfalse = efalse = if_false = graph()->NewNode(
- javascript()->CallRuntime(Runtime::kThrowInvalidStringLength),
- context, frame_state, efalse, if_false);
-
- // Update potential {IfException} uses of {node} to point to the
- // %ThrowInvalidStringLength runtime call node instead.
- Node* on_exception = nullptr;
- if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
- NodeProperties::ReplaceControlInput(on_exception, vfalse);
- NodeProperties::ReplaceEffectInput(on_exception, efalse);
- if_false = graph()->NewNode(common()->IfSuccess(), vfalse);
- Revisit(on_exception);
- }
-
- // The above %ThrowInvalidStringLength runtime call is an unconditional
- // throw, making it impossible to return a successful completion in this
- // case. We simply connect the successful completion to the graph end.
- if_false = graph()->NewNode(common()->Throw(), efalse, if_false);
- // TODO(bmeurer): This should be on the AdvancedReducer somehow.
- NodeProperties::MergeControlToEnd(graph(), common(), if_false);
- Revisit(graph()->end());
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ BuildThrowStringRangeError(node, context, frame_state, effect, if_false);
}
control = graph()->NewNode(common()->IfTrue(), branch);
}
+ Node* result = effect =
+ BuildCreateConsString(first, second, length, effect, control);
+ ReplaceWithValue(node, result, effect, control);
+ return Replace(result);
+}
+
+namespace {
+
+// Check if a string concatenation will definitely result in creating a
+// ConsString for all operands, i.e. if the combined length of the first two
+// operands exceeds the ConsString minimum length and we never concatenate the
+// empty string.
+bool ShouldConcatenateAsConsStrings(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSStringConcat, node->opcode());
+ DCHECK_GE(StringConcatParameterOf(node->op()).operand_count(), 3);
+
+ // Check that the concatenation of the first two strings results in a cons
+ // string.
+ HeapObjectMatcher first_matcher(NodeProperties::GetValueInput(node, 0));
+ HeapObjectMatcher second_matcher(NodeProperties::GetValueInput(node, 1));
+ if (!WillCreateConsString(first_matcher, second_matcher)) return false;
+
+ // Now check that all other RHSs of the ConsStrings will be non-empty.
+ int operand_count = StringConcatParameterOf(node->op()).operand_count();
+ for (int i = 2; i < operand_count; ++i) {
+ Node* operand = NodeProperties::GetValueInput(node, i);
+ DCHECK(NodeProperties::GetType(operand)->Is(Type::String()));
+ if (!NodeProperties::GetType(operand)->Is(Type::NonEmptyString())) {
+ return false;
+ }
+ }
+
+ // If all these constraints hold, the result will definitely be a ConsString.
+ return true;
+}
+
+} // namespace
+
+Reduction JSTypedLowering::ReduceJSStringConcat(Node* node) {
+ if (ShouldConcatenateAsConsStrings(node)) {
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ int operand_count = StringConcatParameterOf(node->op()).operand_count();
+
+ // Set up string overflow check dependencies.
+ NodeVector overflow_controls(graph()->zone());
+ NodeVector overflow_effects(graph()->zone());
+ if (isolate()->IsStringLengthOverflowIntact()) {
+ // Add a code dependency on the string length overflow protector.
+ dependencies()->AssumePropertyCell(factory()->string_length_protector());
+ }
+
+ // Get the first operand and its length.
+ Node* current_result = NodeProperties::GetValueInput(node, 0);
+ Node* current_length =
+ BuildGetStringLength(current_result, &effect, control);
+
+ for (int i = 1; i < operand_count; ++i) {
+ bool last_operand = i == operand_count - 1;
+ // Get the next operand and its length.
+ Node* current_operand = NodeProperties::GetValueInput(node, i);
+ HeapObjectMatcher m(current_operand);
+ Node* operand_length =
+ BuildGetStringLength(current_operand, &effect, control);
+
+ // Update the current length and check that it it doesn't overflow.
+ current_length = graph()->NewNode(simplified()->NumberAdd(),
+ current_length, operand_length);
+ Node* check = graph()->NewNode(simplified()->NumberLessThanOrEqual(),
+ current_length,
+ jsgraph()->Constant(String::kMaxLength));
+ if (isolate()->IsStringLengthOverflowIntact()) {
+ // We can just deoptimize if the {check} fails. Besides generating a
+ // shorter code sequence than the version below, this has the additional
+ // benefit of not holding on to the lazy {frame_state} and thus
+ // potentially reduces the number of live ranges and allows for more
+ // truncations.
+ effect =
+ graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+ } else {
+ // Otherwise insert a branch to the runtime call which throws on
+ // overflow.
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, control);
+ overflow_controls.push_back(
+ graph()->NewNode(common()->IfFalse(), branch));
+ overflow_effects.push_back(effect);
+
+ // Build the string overflow throwing code if we have checked all the
+ // lengths.
+ if (last_operand) {
+ // Merge control and effect of overflow checks.
+ int merge_count = operand_count - 1;
+ DCHECK_EQ(overflow_controls.size(), static_cast<size_t>(merge_count));
+ DCHECK_EQ(overflow_effects.size(), static_cast<size_t>(merge_count));
+
+ Node* if_false =
+ graph()->NewNode(common()->Merge(merge_count), merge_count,
+ &overflow_controls.front());
+ overflow_effects.push_back(if_false);
+ Node* efalse =
+ graph()->NewNode(common()->EffectPhi(merge_count),
+ merge_count + 1, &overflow_effects.front());
+
+ // And throw the range error.
+ BuildThrowStringRangeError(node, context, frame_state, efalse,
+ if_false);
+ }
+ control = graph()->NewNode(common()->IfTrue(), branch);
+ }
+ current_result = effect = BuildCreateConsString(
+ current_result, current_operand, current_length, effect, control);
+ }
+ ReplaceWithValue(node, current_result, effect, control);
+ return Replace(current_result);
+ }
+ return NoChange();
+}
+
+Node* JSTypedLowering::BuildGetStringLength(Node* value, Node** effect,
+ Node* control) {
+ HeapObjectMatcher m(value);
+ Node* length =
+ (m.HasValue() && m.Value()->IsString())
+ ? jsgraph()->Constant(Handle<String>::cast(m.Value())->length())
+ : (*effect) = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForStringLength()),
+ value, *effect, control);
+ return length;
+}
+
+void JSTypedLowering::BuildThrowStringRangeError(Node* node, Node* context,
+ Node* frame_state,
+ Node* effect, Node* control) {
+ // Throw a RangeError in case of overflow.
+ Node* value = effect = control = graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kThrowInvalidStringLength), context,
+ frame_state, effect, control);
+
+ // Update potential {IfException} uses of {node} to point to the
+ // %ThrowInvalidStringLength runtime call node instead.
+ Node* on_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ NodeProperties::ReplaceControlInput(on_exception, value);
+ NodeProperties::ReplaceEffectInput(on_exception, effect);
+ control = graph()->NewNode(common()->IfSuccess(), value);
+ Revisit(on_exception);
+ }
+
+ // The above %ThrowInvalidStringLength runtime call is an unconditional
+ // throw, making it impossible to return a successful completion in this
+ // case. We simply connect the successful completion to the graph end.
+ control = graph()->NewNode(common()->Throw(), effect, control);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), control);
+ Revisit(graph()->end());
+}
+Node* JSTypedLowering::BuildCreateConsString(Node* first, Node* second,
+ Node* length, Node* effect,
+ Node* control) {
// Figure out the map for the resulting ConsString.
// TODO(turbofan): We currently just use the cons_string_map here for
// the sake of simplicity; we could also try to be smarter here and
@@ -746,13 +905,8 @@ Reduction JSTypedLowering::ReduceCreateConsString(Node* node) {
simplified()->StoreField(AccessBuilder::ForConsStringSecond()), value,
second, effect, control);
- // Morph the {node} into a {FinishRegion}.
- ReplaceWithValue(node, node, node, control);
- node->ReplaceInput(0, value);
- node->ReplaceInput(1, effect);
- node->TrimInputCount(2);
- NodeProperties::ChangeOp(node, common()->FinishRegion());
- return Changed(node);
+ // Return the {FinishRegion} node.
+ return graph()->NewNode(common()->FinishRegion(), value, effect);
}
Reduction JSTypedLowering::ReduceSpeculativeNumberComparison(Node* node) {
@@ -761,7 +915,7 @@ Reduction JSTypedLowering::ReduceSpeculativeNumberComparison(Node* node) {
r.BothInputsAre(Type::Unsigned32())) {
return r.ChangeToPureOperator(r.NumberOpFromSpeculativeNumberOp());
}
- return Changed(node);
+ return NoChange();
}
Reduction JSTypedLowering::ReduceJSComparison(Node* node) {
@@ -896,6 +1050,9 @@ Reduction JSTypedLowering::ReduceJSEqual(Node* node) {
} else if (r.IsStringCompareOperation()) {
r.CheckInputsToString();
return r.ChangeToPureOperator(simplified()->StringEqual());
+ } else if (r.IsSymbolCompareOperation()) {
+ r.CheckInputsToSymbol();
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual());
}
return NoChange();
}
@@ -953,6 +1110,9 @@ Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node) {
} else if (r.IsStringCompareOperation()) {
r.CheckInputsToString();
return r.ChangeToPureOperator(simplified()->StringEqual());
+ } else if (r.IsSymbolCompareOperation()) {
+ r.CheckInputsToSymbol();
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual());
}
return NoChange();
}
@@ -1128,6 +1288,7 @@ Reduction JSTypedLowering::ReduceJSToStringInput(Node* input) {
}
Reduction JSTypedLowering::ReduceJSToString(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSToString, node->opcode());
// Try to reduce the input first.
Node* const input = node->InputAt(0);
Reduction reduction = ReduceJSToStringInput(input);
@@ -1138,6 +1299,23 @@ Reduction JSTypedLowering::ReduceJSToString(Node* node) {
return NoChange();
}
+Reduction JSTypedLowering::ReduceJSToPrimitiveToString(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSToPrimitiveToString, node->opcode());
+ Node* input = NodeProperties::GetValueInput(node, 0);
+ Type* input_type = NodeProperties::GetType(input);
+ if (input_type->Is(Type::Primitive())) {
+ // If node is already a primitive, then reduce to JSToString and try to
+ // reduce that further.
+ NodeProperties::ChangeOp(node, javascript()->ToString());
+ Reduction reduction = ReduceJSToString(node);
+ if (reduction.Changed()) {
+ return reduction;
+ }
+ return Changed(node);
+ }
+ return NoChange();
+}
+
Reduction JSTypedLowering::ReduceJSToObject(Node* node) {
DCHECK_EQ(IrOpcode::kJSToObject, node->opcode());
Node* receiver = NodeProperties::GetValueInput(node, 0);
@@ -1151,13 +1329,6 @@ Reduction JSTypedLowering::ReduceJSToObject(Node* node) {
return Replace(receiver);
}
- // TODO(bmeurer/mstarzinger): Add support for lowering inside try blocks.
- if (receiver_type->Maybe(Type::NullOrUndefined()) &&
- NodeProperties::IsExceptionalCall(node)) {
- // ToObject throws for null or undefined inputs.
- return NoChange();
- }
-
// Check whether {receiver} is a spec object.
Node* check = graph()->NewNode(simplified()->ObjectIsReceiver(), receiver);
Node* branch =
@@ -1172,7 +1343,7 @@ Reduction JSTypedLowering::ReduceJSToObject(Node* node) {
Node* rfalse;
{
// Convert {receiver} using the ToObjectStub.
- Callable callable = CodeFactory::ToObject(isolate());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject);
CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
CallDescriptor::kNeedsFrameState, node->op()->properties());
@@ -1181,6 +1352,18 @@ Reduction JSTypedLowering::ReduceJSToObject(Node* node) {
receiver, context, frame_state, efalse, if_false);
}
+ // Update potential {IfException} uses of {node} to point to the above
+ // ToObject stub call node instead. Note that the stub can only throw on
+ // receivers that can be null or undefined.
+ Node* on_exception = nullptr;
+ if (receiver_type->Maybe(Type::NullOrUndefined()) &&
+ NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ NodeProperties::ReplaceControlInput(on_exception, if_false);
+ NodeProperties::ReplaceEffectInput(on_exception, efalse);
+ if_false = graph()->NewNode(common()->IfSuccess(), if_false);
+ Revisit(on_exception);
+ }
+
control = graph()->NewNode(common()->Merge(2), if_true, if_false);
effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
@@ -1339,6 +1522,146 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
return NoChange();
}
+Reduction JSTypedLowering::ReduceJSHasInPrototypeChain(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSHasInPrototypeChain, node->opcode());
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Type* value_type = NodeProperties::GetType(value);
+ Node* prototype = NodeProperties::GetValueInput(node, 1);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // If {value} cannot be a receiver, then it cannot have {prototype} in
+ // it's prototype chain (all Primitive values have a null prototype).
+ if (value_type->Is(Type::Primitive())) {
+ Node* value = jsgraph()->FalseConstant();
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+
+ Node* check0 = graph()->NewNode(simplified()->ObjectIsSmi(), value);
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0 = effect;
+ Node* vtrue0 = jsgraph()->FalseConstant();
+
+ control = graph()->NewNode(common()->IfFalse(), branch0);
+
+ // Loop through the {value}s prototype chain looking for the {prototype}.
+ Node* loop = control = graph()->NewNode(common()->Loop(2), control, control);
+ Node* eloop = effect =
+ graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
+ Node* vloop = value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), value, value, loop);
+ NodeProperties::SetType(vloop, Type::NonInternal());
+
+ // Load the {value} map and instance type.
+ Node* value_map = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMap()), value, effect, control);
+ Node* value_instance_type = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()), value_map,
+ effect, control);
+
+ // Check if the {value} is a special receiver, because for special
+ // receivers, i.e. proxies or API values that need access checks,
+ // we have to use the %HasInPrototypeChain runtime function instead.
+ Node* check1 = graph()->NewNode(
+ simplified()->NumberLessThanOrEqual(), value_instance_type,
+ jsgraph()->Constant(LAST_SPECIAL_RECEIVER_TYPE));
+ Node* branch1 =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check1, control);
+
+ control = graph()->NewNode(common()->IfFalse(), branch1);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* etrue1 = effect;
+ Node* vtrue1;
+
+ // Check if the {value} is not a receiver at all.
+ Node* check10 =
+ graph()->NewNode(simplified()->NumberLessThan(), value_instance_type,
+ jsgraph()->Constant(FIRST_JS_RECEIVER_TYPE));
+ Node* branch10 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check10, if_true1);
+
+ // A primitive value cannot match the {prototype} we're looking for.
+ if_true1 = graph()->NewNode(common()->IfTrue(), branch10);
+ vtrue1 = jsgraph()->FalseConstant();
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch10);
+ Node* efalse1 = etrue1;
+ Node* vfalse1;
+ {
+ // Slow path, need to call the %HasInPrototypeChain runtime function.
+ vfalse1 = efalse1 = if_false1 = graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kHasInPrototypeChain), value,
+ prototype, context, frame_state, efalse1, if_false1);
+
+ // Replace any potential {IfException} uses of {node} to catch
+ // exceptions from this %HasInPrototypeChain runtime call instead.
+ Node* on_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ NodeProperties::ReplaceControlInput(on_exception, vfalse1);
+ NodeProperties::ReplaceEffectInput(on_exception, efalse1);
+ if_false1 = graph()->NewNode(common()->IfSuccess(), vfalse1);
+ Revisit(on_exception);
+ }
+ }
+
+ // Load the {value} prototype.
+ Node* value_prototype = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapPrototype()), value_map,
+ effect, control);
+
+ // Check if we reached the end of {value}s prototype chain.
+ Node* check2 = graph()->NewNode(simplified()->ReferenceEqual(),
+ value_prototype, jsgraph()->NullConstant());
+ Node* branch2 = graph()->NewNode(common()->Branch(), check2, control);
+
+ Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+ Node* etrue2 = effect;
+ Node* vtrue2 = jsgraph()->FalseConstant();
+
+ control = graph()->NewNode(common()->IfFalse(), branch2);
+
+ // Check if we reached the {prototype}.
+ Node* check3 = graph()->NewNode(simplified()->ReferenceEqual(),
+ value_prototype, prototype);
+ Node* branch3 = graph()->NewNode(common()->Branch(), check3, control);
+
+ Node* if_true3 = graph()->NewNode(common()->IfTrue(), branch3);
+ Node* etrue3 = effect;
+ Node* vtrue3 = jsgraph()->TrueConstant();
+
+ control = graph()->NewNode(common()->IfFalse(), branch3);
+
+ // Close the loop.
+ vloop->ReplaceInput(1, value_prototype);
+ eloop->ReplaceInput(1, effect);
+ loop->ReplaceInput(1, control);
+
+ control = graph()->NewNode(common()->Merge(5), if_true0, if_true1, if_true2,
+ if_true3, if_false1);
+ effect = graph()->NewNode(common()->EffectPhi(5), etrue0, etrue1, etrue2,
+ etrue3, efalse1, control);
+
+ // Morph the {node} into an appropriate Phi.
+ ReplaceWithValue(node, node, effect, control);
+ node->ReplaceInput(0, vtrue0);
+ node->ReplaceInput(1, vtrue1);
+ node->ReplaceInput(2, vtrue2);
+ node->ReplaceInput(3, vtrue3);
+ node->ReplaceInput(4, vfalse1);
+ node->ReplaceInput(5, control);
+ node->TrimInputCount(6);
+ NodeProperties::ChangeOp(node,
+ common()->Phi(MachineRepresentation::kTagged, 5));
+ return Changed(node);
+}
+
Reduction JSTypedLowering::ReduceJSOrdinaryHasInstance(Node* node) {
DCHECK_EQ(IrOpcode::kJSOrdinaryHasInstance, node->opcode());
Node* constructor = NodeProperties::GetValueInput(node, 0);
@@ -1534,7 +1857,7 @@ Reduction JSTypedLowering::ReduceJSConvertReceiver(Node* node) {
{
// Convert {receiver} using the ToObjectStub. The call does not require a
// frame-state in this case, because neither null nor undefined is passed.
- Callable callable = CodeFactory::ToObject(isolate());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject);
CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
CallDescriptor::kNoFlags, node->op()->properties());
@@ -1592,7 +1915,7 @@ Reduction JSTypedLowering::ReduceJSConvertReceiver(Node* node) {
{
// Convert {receiver} using the ToObjectStub. The call does not require a
// frame-state in this case, because neither null nor undefined is passed.
- Callable callable = CodeFactory::ToObject(isolate());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject);
CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
CallDescriptor::kNoFlags, node->op()->properties());
@@ -1840,10 +2163,6 @@ Reduction JSTypedLowering::ReduceJSCallForwardVarargs(Node* node) {
if (target_type->Is(Type::Function())) {
// Compute flags for the call.
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
- if (p.tail_call_mode() == TailCallMode::kAllow) {
- flags |= CallDescriptor::kSupportsTailCalls;
- }
-
// Patch {node} to an indirect call via CallFunctionForwardVarargs.
Callable callable = CodeFactory::CallFunctionForwardVarargs(isolate());
node->InsertInput(graph()->zone(), 0,
@@ -1912,10 +2231,6 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
// Compute flags for the call.
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
- if (p.tail_call_mode() == TailCallMode::kAllow) {
- flags |= CallDescriptor::kSupportsTailCalls;
- }
-
Node* new_target = jsgraph()->UndefinedConstant();
Node* argument_count = jsgraph()->Constant(arity);
if (NeedsArgumentAdaptorFrame(shared, arity)) {
@@ -1951,10 +2266,6 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
if (target_type->Is(Type::Function())) {
// Compute flags for the call.
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
- if (p.tail_call_mode() == TailCallMode::kAllow) {
- flags |= CallDescriptor::kSupportsTailCalls;
- }
-
// Patch {node} to an indirect call via the CallFunction builtin.
Callable callable = CodeFactory::CallFunction(isolate(), convert_mode);
node->InsertInput(graph()->zone(), 0,
@@ -1970,9 +2281,8 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
// Maybe we did at least learn something about the {receiver}.
if (p.convert_mode() != convert_mode) {
NodeProperties::ChangeOp(
- node,
- javascript()->Call(p.arity(), p.frequency(), p.feedback(), convert_mode,
- p.tail_call_mode()));
+ node, javascript()->Call(p.arity(), p.frequency(), p.feedback(),
+ convert_mode));
return Changed(node);
}
@@ -2032,7 +2342,8 @@ Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
{
// Filter the {key} to check if it's still a valid property of the
// {receiver} (does the ToName conversion implicitly).
- Callable const callable = CodeFactory::ForInFilter(isolate());
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kForInFilter);
CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
CallDescriptor::kNeedsFrameState);
@@ -2040,7 +2351,7 @@ Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
common()->Call(desc), jsgraph()->HeapConstant(callable.code()), key,
receiver, context, frame_state, effect, if_false0);
- // Update potential {IfException} uses of {node} to point to the ahove
+ // Update potential {IfException} uses of {node} to point to the above
// ForInFilter stub call node instead.
Node* if_exception = nullptr;
if (NodeProperties::IsExceptionalCall(node, &if_exception)) {
@@ -2095,21 +2406,19 @@ Reduction JSTypedLowering::ReduceJSGeneratorStore(Node* node) {
Node* context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- const GeneratorStoreParameters& p = GeneratorStoreParametersOf(node->op());
+ int register_count = OpParameter<int>(node);
FieldAccess array_field = AccessBuilder::ForJSGeneratorObjectRegisterFile();
FieldAccess context_field = AccessBuilder::ForJSGeneratorObjectContext();
FieldAccess continuation_field =
AccessBuilder::ForJSGeneratorObjectContinuation();
FieldAccess input_or_debug_pos_field =
- p.suspend_flags() == SuspendFlags::kAsyncGeneratorAwait
- ? AccessBuilder::ForJSAsyncGeneratorObjectAwaitInputOrDebugPos()
- : AccessBuilder::ForJSGeneratorObjectInputOrDebugPos();
+ AccessBuilder::ForJSGeneratorObjectInputOrDebugPos();
Node* array = effect = graph()->NewNode(simplified()->LoadField(array_field),
generator, effect, control);
- for (int i = 0; i < p.register_count(); ++i) {
+ for (int i = 0; i < register_count; ++i) {
Node* value = NodeProperties::GetValueInput(node, 3 + i);
effect = graph()->NewNode(
simplified()->StoreField(AccessBuilder::ForFixedArraySlot(i)), array,
@@ -2195,6 +2504,8 @@ Reduction JSTypedLowering::Reduce(Node* node) {
case IrOpcode::kJSDivide:
case IrOpcode::kJSModulus:
return ReduceNumberBinop(node);
+ case IrOpcode::kJSHasInPrototypeChain:
+ return ReduceJSHasInPrototypeChain(node);
case IrOpcode::kJSOrdinaryHasInstance:
return ReduceJSOrdinaryHasInstance(node);
case IrOpcode::kJSToBoolean:
@@ -2209,6 +2520,10 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSToNumber(node);
case IrOpcode::kJSToString:
return ReduceJSToString(node);
+ case IrOpcode::kJSToPrimitiveToString:
+ return ReduceJSToPrimitiveToString(node);
+ case IrOpcode::kJSStringConcat:
+ return ReduceJSStringConcat(node);
case IrOpcode::kJSToObject:
return ReduceJSToObject(node);
case IrOpcode::kJSTypeOf:
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
index 0b92a40a5b..b2e2a162ed 100644
--- a/deps/v8/src/compiler/js-typed-lowering.h
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -42,6 +42,8 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Flags flags, JSGraph* jsgraph, Zone* zone);
~JSTypedLowering() final {}
+ const char* reducer_name() const override { return "JSTypedLowering"; }
+
Reduction Reduce(Node* node) final;
private:
@@ -52,6 +54,7 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Reduction ReduceJSLoadNamed(Node* node);
Reduction ReduceJSLoadProperty(Node* node);
Reduction ReduceJSStoreProperty(Node* node);
+ Reduction ReduceJSHasInPrototypeChain(Node* node);
Reduction ReduceJSOrdinaryHasInstance(Node* node);
Reduction ReduceJSLoadContext(Node* node);
Reduction ReduceJSStoreContext(Node* node);
@@ -67,6 +70,8 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Reduction ReduceJSToNumber(Node* node);
Reduction ReduceJSToStringInput(Node* input);
Reduction ReduceJSToString(Node* node);
+ Reduction ReduceJSToPrimitiveToString(Node* node);
+ Reduction ReduceJSStringConcat(Node* node);
Reduction ReduceJSToObject(Node* node);
Reduction ReduceJSConvertReceiver(Node* node);
Reduction ReduceJSConstructForwardVarargs(Node* node);
@@ -92,6 +97,13 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
// Helper for ReduceJSLoadModule and ReduceJSStoreModule.
Node* BuildGetModuleCell(Node* node);
+ // Helpers for ReduceJSCreateConsString and ReduceJSStringConcat.
+ Node* BuildGetStringLength(Node* value, Node** effect, Node* control);
+ void BuildThrowStringRangeError(Node* node, Node* context, Node* frame_state,
+ Node* effect, Node* control);
+ Node* BuildCreateConsString(Node* first, Node* second, Node* length,
+ Node* effect, Node* control);
+
Factory* factory() const;
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
@@ -105,7 +117,6 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
CompilationDependencies* dependencies_;
Flags flags_;
JSGraph* jsgraph_;
- Type* empty_string_type_;
Type* shifted_int32_ranges_[4];
Type* pointer_comparable_type_;
TypeCache const& type_cache_;
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 1275f8f6ff..7224288b5a 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -121,7 +121,6 @@ int CallDescriptor::CalculateFixedFrameSize() const {
return TypedFrameConstants::kFixedSlotCount;
}
UNREACHABLE();
- return 0;
}
CallDescriptor* Linkage::ComputeIncoming(Zone* zone, CompilationInfo* info) {
@@ -148,9 +147,8 @@ bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
case Runtime::kAllocateInTargetSpace:
case Runtime::kConvertReceiver:
case Runtime::kCreateIterResultObject:
- case Runtime::kDefineGetterPropertyUnchecked: // TODO(jarin): Is it safe?
- case Runtime::kDefineSetterPropertyUnchecked: // TODO(jarin): Is it safe?
case Runtime::kGeneratorGetContinuation:
+ case Runtime::kIncBlockCounter:
case Runtime::kIsFunction:
case Runtime::kNewClosure:
case Runtime::kNewClosure_Tenured:
@@ -183,8 +181,6 @@ bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
case Runtime::kInlineIsArray:
case Runtime::kInlineIsJSMap:
case Runtime::kInlineIsJSSet:
- case Runtime::kInlineIsJSMapIterator:
- case Runtime::kInlineIsJSSetIterator:
case Runtime::kInlineIsJSWeakMap:
case Runtime::kInlineIsJSWeakSet:
case Runtime::kInlineIsJSReceiver:
@@ -348,11 +344,11 @@ CallDescriptor* Linkage::GetStubCallDescriptor(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
int stack_parameter_count, CallDescriptor::Flags flags,
Operator::Properties properties, MachineType return_type,
- size_t return_count) {
+ size_t return_count, Linkage::ContextSpecification context_spec) {
const int register_parameter_count = descriptor.GetRegisterParameterCount();
const int js_parameter_count =
register_parameter_count + stack_parameter_count;
- const int context_count = 1;
+ const int context_count = context_spec == kPassContext ? 1 : 0;
const size_t parameter_count =
static_cast<size_t>(js_parameter_count + context_count);
@@ -384,7 +380,9 @@ CallDescriptor* Linkage::GetStubCallDescriptor(
}
}
// Add context.
- locations.AddParam(regloc(kContextRegister, MachineType::AnyTagged()));
+ if (context_count) {
+ locations.AddParam(regloc(kContextRegister, MachineType::AnyTagged()));
+ }
// The target for stub calls is a code object.
MachineType target_type = MachineType::AnyTagged();
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index b515aca2da..82be5c7434 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -340,6 +340,8 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
// Call[BytecodeDispatch] address, arg 1, arg 2, [...]
class V8_EXPORT_PRIVATE Linkage : public NON_EXPORTED_BASE(ZoneObject) {
public:
+ enum ContextSpecification { kNoContext, kPassContext };
+
explicit Linkage(CallDescriptor* incoming) : incoming_(incoming) {}
static CallDescriptor* ComputeIncoming(Zone* zone, CompilationInfo* info);
@@ -365,7 +367,8 @@ class V8_EXPORT_PRIVATE Linkage : public NON_EXPORTED_BASE(ZoneObject) {
int stack_parameter_count, CallDescriptor::Flags flags,
Operator::Properties properties = Operator::kNoProperties,
MachineType return_type = MachineType::AnyTagged(),
- size_t return_count = 1);
+ size_t return_count = 1,
+ ContextSpecification context_spec = kPassContext);
static CallDescriptor* GetAllocateCallDescriptor(Zone* zone);
static CallDescriptor* GetBytecodeDispatchCallDescriptor(
diff --git a/deps/v8/src/compiler/liveness-analyzer.cc b/deps/v8/src/compiler/liveness-analyzer.cc
deleted file mode 100644
index 0cf13332f4..0000000000
--- a/deps/v8/src/compiler/liveness-analyzer.cc
+++ /dev/null
@@ -1,233 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/base/adapters.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/liveness-analyzer.h"
-#include "src/compiler/node.h"
-#include "src/compiler/node-matchers.h"
-#include "src/compiler/state-values-utils.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-LivenessAnalyzer::LivenessAnalyzer(size_t local_count, bool has_accumulator,
- Zone* zone)
- : zone_(zone),
- blocks_(zone),
- local_count_(local_count),
- has_accumulator_(has_accumulator),
- queue_(zone) {}
-
-void LivenessAnalyzer::Print(std::ostream& os) {
- for (auto block : blocks_) {
- block->Print(os);
- os << std::endl;
- }
-}
-
-
-LivenessAnalyzerBlock* LivenessAnalyzer::NewBlock() {
- LivenessAnalyzerBlock* result =
- new (zone()->New(sizeof(LivenessAnalyzerBlock))) LivenessAnalyzerBlock(
- blocks_.size(), local_count_, has_accumulator_, zone());
- blocks_.push_back(result);
- return result;
-}
-
-
-LivenessAnalyzerBlock* LivenessAnalyzer::NewBlock(
- LivenessAnalyzerBlock* predecessor) {
- LivenessAnalyzerBlock* result = NewBlock();
- result->AddPredecessor(predecessor);
- return result;
-}
-
-
-void LivenessAnalyzer::Queue(LivenessAnalyzerBlock* block) {
- if (!block->IsQueued()) {
- block->SetQueued();
- queue_.push(block);
- }
-}
-
-
-void LivenessAnalyzer::Run(NonLiveFrameStateSlotReplacer* replacer) {
- if (local_count_ == 0 && !has_accumulator_) {
- // No variables => nothing to do.
- return;
- }
-
- // Put all blocks into the queue.
- DCHECK(queue_.empty());
- for (auto block : blocks_) {
- Queue(block);
- }
-
- // Compute the fix-point.
- BitVector working_area(
- static_cast<int>(local_count_) + (has_accumulator_ ? 1 : 0), zone_);
- while (!queue_.empty()) {
- LivenessAnalyzerBlock* block = queue_.front();
- queue_.pop();
- block->Process(&working_area, nullptr);
-
- for (auto i = block->pred_begin(); i != block->pred_end(); i++) {
- if ((*i)->UpdateLive(&working_area)) {
- Queue(*i);
- }
- }
- }
-
- // Update the frame states according to the liveness.
- for (auto block : blocks_) {
- block->Process(&working_area, replacer);
- }
-}
-
-LivenessAnalyzerBlock::LivenessAnalyzerBlock(size_t id, size_t local_count,
- bool has_accumulator, Zone* zone)
- : entries_(zone),
- predecessors_(zone),
- live_(static_cast<int>(local_count) + (has_accumulator ? 1 : 0), zone),
- queued_(false),
- has_accumulator_(has_accumulator),
- id_(id) {}
-
-void LivenessAnalyzerBlock::Process(BitVector* result,
- NonLiveFrameStateSlotReplacer* replacer) {
- queued_ = false;
-
- // Copy the bitvector to the target bit vector.
- result->CopyFrom(live_);
-
- for (auto entry : base::Reversed(entries_)) {
- switch (entry.kind()) {
- case Entry::kLookup:
- result->Add(entry.var());
- break;
- case Entry::kBind:
- result->Remove(entry.var());
- break;
- case Entry::kCheckpoint:
- if (replacer != nullptr) {
- replacer->ClearNonLiveFrameStateSlots(entry.node(), result);
- }
- break;
- }
- }
-}
-
-
-bool LivenessAnalyzerBlock::UpdateLive(BitVector* working_area) {
- return live_.UnionIsChanged(*working_area);
-}
-
-
-void NonLiveFrameStateSlotReplacer::ClearNonLiveFrameStateSlots(
- Node* frame_state, BitVector* liveness) {
- DCHECK_EQ(liveness->length(), permanently_live_.length());
-
- DCHECK_EQ(frame_state->opcode(), IrOpcode::kFrameState);
- Node* locals_state = frame_state->InputAt(1);
- DCHECK_EQ(locals_state->opcode(), IrOpcode::kStateValues);
- int count = liveness->length() - (has_accumulator_ ? 1 : 0);
- DCHECK_EQ(count, static_cast<int>(StateValuesAccess(locals_state).size()));
- for (int i = 0; i < count; i++) {
- if (!liveness->Contains(i) && !permanently_live_.Contains(i)) {
- Node* new_values = ClearNonLiveStateValues(locals_state, liveness);
- frame_state->ReplaceInput(1, new_values);
- break;
- }
- }
-
- if (has_accumulator_) {
- DCHECK_EQ(frame_state->InputAt(2)->opcode(), IrOpcode::kStateValues);
- DCHECK_EQ(
- static_cast<int>(StateValuesAccess(frame_state->InputAt(2)).size()), 1);
- int index = liveness->length() - 1;
- if (!liveness->Contains(index) && !permanently_live_.Contains(index)) {
- Node* new_value =
- state_values_cache()->GetNodeForValues(&replacement_node_, 1);
- frame_state->ReplaceInput(2, new_value);
- }
- }
-}
-
-
-Node* NonLiveFrameStateSlotReplacer::ClearNonLiveStateValues(
- Node* values, BitVector* liveness) {
- DCHECK(inputs_buffer_.empty());
-
- int var = 0;
- for (Node* value_node : values->inputs()) {
- // Make sure this isn't a state value tree
- DCHECK(value_node->opcode() != IrOpcode::kStateValues);
-
- // Index of the next variable is its furure index in the inputs buffer,
- // i.e., the buffer's size.
- bool live = liveness->Contains(var) || permanently_live_.Contains(var);
- inputs_buffer_.push_back(live ? value_node : replacement_node_);
-
- var++;
- }
-
- Node* result = state_values_cache()->GetNodeForValues(
- inputs_buffer_.empty() ? nullptr : &(inputs_buffer_.front()),
- inputs_buffer_.size());
- inputs_buffer_.clear();
- return result;
-}
-
-
-void LivenessAnalyzerBlock::Print(std::ostream& os) {
- os << "Block " << id();
- bool first = true;
- for (LivenessAnalyzerBlock* pred : predecessors_) {
- if (!first) {
- os << ", ";
- } else {
- os << "; predecessors: ";
- first = false;
- }
- os << pred->id();
- }
- os << std::endl;
-
- for (auto entry : entries_) {
- os << " ";
- switch (entry.kind()) {
- case Entry::kLookup:
- if (has_accumulator_ && entry.var() == live_.length() - 1) {
- os << "- Lookup accumulator" << std::endl;
- } else {
- os << "- Lookup " << entry.var() << std::endl;
- }
- break;
- case Entry::kBind:
- if (has_accumulator_ && entry.var() == live_.length() - 1) {
- os << "- Bind accumulator" << std::endl;
- } else {
- os << "- Bind " << entry.var() << std::endl;
- }
- break;
- case Entry::kCheckpoint:
- os << "- Checkpoint " << entry.node()->id() << std::endl;
- break;
- }
- }
-
- if (live_.length() > 0) {
- os << " Live set: ";
- for (int i = 0; i < live_.length(); i++) {
- os << (live_.Contains(i) ? "L" : ".");
- }
- os << std::endl;
- }
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/liveness-analyzer.h b/deps/v8/src/compiler/liveness-analyzer.h
deleted file mode 100644
index 63fc52c125..0000000000
--- a/deps/v8/src/compiler/liveness-analyzer.h
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_LIVENESS_ANAYZER_H_
-#define V8_COMPILER_LIVENESS_ANAYZER_H_
-
-#include "src/bit-vector.h"
-#include "src/compiler/node.h"
-#include "src/globals.h"
-#include "src/zone/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-class LivenessAnalyzerBlock;
-class Node;
-class StateValuesCache;
-
-class NonLiveFrameStateSlotReplacer {
- public:
- void ClearNonLiveFrameStateSlots(Node* frame_state, BitVector* liveness);
- NonLiveFrameStateSlotReplacer(StateValuesCache* state_values_cache,
- Node* replacement, size_t local_count,
- bool has_accumulator, Zone* local_zone)
- : replacement_node_(replacement),
- state_values_cache_(state_values_cache),
- local_zone_(local_zone),
- permanently_live_(
- static_cast<int>(local_count) + (has_accumulator ? 1 : 0),
- local_zone),
- inputs_buffer_(local_zone),
- has_accumulator_(has_accumulator) {}
-
- // TODO(leszeks): Not used by bytecode, remove once AST graph builder is gone.
- void MarkPermanentlyLive(int var) { permanently_live_.Add(var); }
-
- private:
- Node* ClearNonLiveStateValues(Node* frame_state, BitVector* liveness);
-
- StateValuesCache* state_values_cache() { return state_values_cache_; }
- Zone* local_zone() { return local_zone_; }
-
- // Node that replaces dead values.
- Node* replacement_node_;
- // Reference to state values cache so that we can create state values
- // nodes.
- StateValuesCache* state_values_cache_;
-
- Zone* local_zone_;
- BitVector permanently_live_;
- NodeVector inputs_buffer_;
-
- bool has_accumulator_;
-};
-
-class V8_EXPORT_PRIVATE LivenessAnalyzer {
- public:
- LivenessAnalyzer(size_t local_count, bool has_accumulator, Zone* zone);
-
- LivenessAnalyzerBlock* NewBlock();
- LivenessAnalyzerBlock* NewBlock(LivenessAnalyzerBlock* predecessor);
-
- void Run(NonLiveFrameStateSlotReplacer* relaxer);
-
- Zone* zone() { return zone_; }
-
- void Print(std::ostream& os);
-
- size_t local_count() { return local_count_; }
-
- private:
- void Queue(LivenessAnalyzerBlock* block);
-
- Zone* zone_;
- ZoneDeque<LivenessAnalyzerBlock*> blocks_;
- size_t local_count_;
-
- // TODO(leszeks): Always true for bytecode, remove once AST graph builder is
- // gone.
- bool has_accumulator_;
-
- ZoneQueue<LivenessAnalyzerBlock*> queue_;
-};
-
-
-class LivenessAnalyzerBlock {
- public:
- friend class LivenessAnalyzer;
-
- void Lookup(int var) { entries_.push_back(Entry(Entry::kLookup, var)); }
- void Bind(int var) { entries_.push_back(Entry(Entry::kBind, var)); }
- void LookupAccumulator() {
- DCHECK(has_accumulator_);
- // The last entry is the accumulator entry.
- entries_.push_back(Entry(Entry::kLookup, live_.length() - 1));
- }
- void BindAccumulator() {
- DCHECK(has_accumulator_);
- // The last entry is the accumulator entry.
- entries_.push_back(Entry(Entry::kBind, live_.length() - 1));
- }
-
- void Checkpoint(Node* node) { entries_.push_back(Entry(node)); }
- void AddPredecessor(LivenessAnalyzerBlock* b) { predecessors_.push_back(b); }
- LivenessAnalyzerBlock* GetPredecessor() {
- DCHECK(predecessors_.size() == 1);
- return predecessors_[0];
- }
-
- private:
- class Entry {
- public:
- enum Kind { kBind, kLookup, kCheckpoint };
-
- Kind kind() const { return kind_; }
- Node* node() const {
- DCHECK(kind() == kCheckpoint);
- return node_;
- }
- int var() const {
- DCHECK(kind() != kCheckpoint);
- return var_;
- }
-
- explicit Entry(Node* node) : kind_(kCheckpoint), var_(-1), node_(node) {}
- Entry(Kind kind, int var) : kind_(kind), var_(var), node_(nullptr) {
- DCHECK(kind != kCheckpoint);
- }
-
- private:
- Kind kind_;
- int var_;
- Node* node_;
- };
-
- LivenessAnalyzerBlock(size_t id, size_t local_count, bool has_accumulator,
- Zone* zone);
- void Process(BitVector* result, NonLiveFrameStateSlotReplacer* relaxer);
- bool UpdateLive(BitVector* working_area);
-
- void SetQueued() { queued_ = true; }
- bool IsQueued() { return queued_; }
-
- ZoneDeque<LivenessAnalyzerBlock*>::const_iterator pred_begin() {
- return predecessors_.begin();
- }
- ZoneDeque<LivenessAnalyzerBlock*>::const_iterator pred_end() {
- return predecessors_.end();
- }
-
- size_t id() { return id_; }
- void Print(std::ostream& os);
-
- ZoneDeque<Entry> entries_;
- ZoneDeque<LivenessAnalyzerBlock*> predecessors_;
-
- BitVector live_;
- bool queued_;
- bool has_accumulator_;
-
- size_t id_;
-};
-
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_AST_GRAPH_BUILDER_H_
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index b4a5b717e6..775da82587 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -114,8 +114,12 @@ Reduction LoadElimination::Reduce(Node* node) {
return ReduceLoadElement(node);
case IrOpcode::kStoreElement:
return ReduceStoreElement(node);
+ case IrOpcode::kTransitionAndStoreElement:
+ return ReduceTransitionAndStoreElement(node);
case IrOpcode::kStoreTypedElement:
return ReduceStoreTypedElement(node);
+ case IrOpcode::kLookupHashStorageIndex:
+ return ReduceLookupHashStorageIndex(node);
case IrOpcode::kEffectPhi:
return ReduceEffectPhi(node);
case IrOpcode::kDead:
@@ -305,6 +309,37 @@ void LoadElimination::AbstractElements::Print() const {
}
}
+Node* LoadElimination::AbstractHashIndexes::Lookup(Node* table,
+ Node* key) const {
+ if (entry_.table == nullptr) return nullptr;
+ if (MustAlias(table, entry_.table) && MustAlias(key, entry_.key)) {
+ return entry_.index;
+ }
+ return nullptr;
+}
+
+bool LoadElimination::AbstractHashIndexes::Equals(
+ AbstractHashIndexes const* that) const {
+ return entry_.table == that->entry_.table && entry_.key == that->entry_.key &&
+ entry_.index == that->entry_.index;
+}
+
+LoadElimination::AbstractHashIndexes const*
+LoadElimination::AbstractHashIndexes::Merge(AbstractHashIndexes const* that,
+ Zone* zone) const {
+ if (this->Equals(that)) return this;
+ return nullptr;
+}
+
+void LoadElimination::AbstractHashIndexes::Print() const {
+ if (entry_.table) {
+ PrintF(" #%d:%s @ #%d:%s -> #%d:%s\n", entry_.table->id(),
+ entry_.table->op()->mnemonic(), entry_.key->id(),
+ entry_.key->op()->mnemonic(), entry_.index->id(),
+ entry_.index->op()->mnemonic());
+ }
+}
+
Node* LoadElimination::AbstractField::Lookup(Node* object) const {
for (auto pair : info_for_node_) {
if (MustAlias(object, pair.first)) return pair.second;
@@ -434,6 +469,13 @@ void LoadElimination::AbstractState::Merge(AbstractState const* that,
if (this->maps_) {
this->maps_ = that->maps_ ? that->maps_->Merge(this->maps_, zone) : nullptr;
}
+
+ // Merge the information about hash maps.
+ if (this->hash_indexes_) {
+ this->hash_indexes_ = that->hash_indexes_ ? that->hash_indexes_->Merge(
+ this->hash_indexes_, zone)
+ : nullptr;
+ }
}
Node* LoadElimination::AbstractState::LookupCheck(Node* node) const {
@@ -504,6 +546,26 @@ LoadElimination::AbstractState::AddElement(Node* object, Node* index,
return that;
}
+Node* LoadElimination::AbstractState::LookupHashIndex(Node* table,
+ Node* key) const {
+ if (this->hash_indexes_) {
+ return this->hash_indexes_->Lookup(table, key);
+ }
+ return nullptr;
+}
+
+LoadElimination::AbstractState const*
+LoadElimination::AbstractState::AddHashIndex(Node* table, Node* key,
+ Node* index, Zone* zone) const {
+ AbstractState* that = new (zone) AbstractState(*this);
+ if (that->hash_indexes_) {
+ that->hash_indexes_ = that->hash_indexes_->Extend(table, key, index, zone);
+ } else {
+ that->hash_indexes_ = new (zone) AbstractHashIndexes(table, key, index);
+ }
+ return that;
+}
+
LoadElimination::AbstractState const*
LoadElimination::AbstractState::KillElement(Node* object, Node* index,
Zone* zone) const {
@@ -724,6 +786,30 @@ Reduction LoadElimination::ReduceTransitionElementsKind(Node* node) {
return UpdateState(node, state);
}
+Reduction LoadElimination::ReduceTransitionAndStoreElement(Node* node) {
+ Node* const object = NodeProperties::GetValueInput(node, 0);
+ Handle<Map> double_map(DoubleMapParameterOf(node->op()));
+ Handle<Map> fast_map(FastMapParameterOf(node->op()));
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ AbstractState const* state = node_states_.Get(effect);
+ if (state == nullptr) return NoChange();
+
+ // We need to add the double and fast maps to the set of possible maps for
+ // this object, because we don't know which of those we'll transition to.
+ // Additionally, we should kill all alias information.
+ ZoneHandleSet<Map> object_maps;
+ if (state->LookupMaps(object, &object_maps)) {
+ object_maps.insert(double_map, zone());
+ object_maps.insert(fast_map, zone());
+ state = state->KillMaps(object, zone());
+ state = state->AddMaps(object, object_maps, zone());
+ }
+ // Kill the elements as well.
+ state =
+ state->KillField(object, FieldIndexOf(JSObject::kElementsOffset), zone());
+ return UpdateState(node, state);
+}
+
Reduction LoadElimination::ReduceLoadField(Node* node) {
FieldAccess const& access = FieldAccessOf(node->op());
Node* const object = NodeProperties::GetValueInput(node, 0);
@@ -785,7 +871,7 @@ Reduction LoadElimination::ReduceStoreField(Node* node) {
if (new_value_type->IsHeapConstant()) {
// Record the new {object} map information.
ZoneHandleSet<Map> object_maps(
- Handle<Map>::cast(new_value_type->AsHeapConstant()->Value()));
+ bit_cast<Handle<Map>>(new_value_type->AsHeapConstant()->Value()));
state = state->AddMaps(object, object_maps, zone());
}
} else {
@@ -819,9 +905,6 @@ Reduction LoadElimination::ReduceLoadElement(Node* node) {
ElementAccess const& access = ElementAccessOf(node->op());
switch (access.machine_type.representation()) {
case MachineRepresentation::kNone:
- case MachineRepresentation::kSimd1x4:
- case MachineRepresentation::kSimd1x8:
- case MachineRepresentation::kSimd1x16:
case MachineRepresentation::kBit:
UNREACHABLE();
break;
@@ -879,9 +962,6 @@ Reduction LoadElimination::ReduceStoreElement(Node* node) {
// Only record the new value if the store doesn't have an implicit truncation.
switch (access.machine_type.representation()) {
case MachineRepresentation::kNone:
- case MachineRepresentation::kSimd1x4:
- case MachineRepresentation::kSimd1x8:
- case MachineRepresentation::kSimd1x16:
case MachineRepresentation::kBit:
UNREACHABLE();
break;
@@ -911,6 +991,25 @@ Reduction LoadElimination::ReduceStoreTypedElement(Node* node) {
return UpdateState(node, state);
}
+Reduction LoadElimination::ReduceLookupHashStorageIndex(Node* node) {
+ Node* table = node->InputAt(0);
+ Node* key = node->InputAt(1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+
+ AbstractState const* state = node_states_.Get(effect);
+ if (state == nullptr) return NoChange();
+
+ if (Node* replacement = state->LookupHashIndex(table, key)) {
+ // Make sure we don't resurrect dead {replacement} nodes.
+ if (!replacement->IsDead()) {
+ ReplaceWithValue(node, replacement, effect);
+ return Replace(replacement);
+ }
+ }
+ state = state->AddHashIndex(table, key, node, zone());
+ return UpdateState(node, state);
+}
+
Reduction LoadElimination::ReduceEffectPhi(Node* node) {
Node* const effect0 = NodeProperties::GetEffectInput(node, 0);
Node* const control = NodeProperties::GetControlInput(node);
@@ -1037,6 +1136,15 @@ LoadElimination::AbstractState const* LoadElimination::ComputeLoopState(
}
break;
}
+ case IrOpcode::kTransitionAndStoreElement: {
+ Node* const object = NodeProperties::GetValueInput(current, 0);
+ // Invalidate what we know about the {object}s map.
+ state = state->KillMaps(object, zone());
+ // Kill the elements as well.
+ state = state->KillField(
+ object, FieldIndexOf(JSObject::kElementsOffset), zone());
+ break;
+ }
case IrOpcode::kStoreField: {
FieldAccess const& access = FieldAccessOf(current->op());
Node* const object = NodeProperties::GetValueInput(current, 0);
@@ -1092,9 +1200,6 @@ int LoadElimination::FieldIndexOf(FieldAccess const& access) {
case MachineRepresentation::kNone:
case MachineRepresentation::kBit:
case MachineRepresentation::kSimd128:
- case MachineRepresentation::kSimd1x4:
- case MachineRepresentation::kSimd1x8:
- case MachineRepresentation::kSimd1x16:
UNREACHABLE();
break;
case MachineRepresentation::kWord32:
diff --git a/deps/v8/src/compiler/load-elimination.h b/deps/v8/src/compiler/load-elimination.h
index 5d09aa5124..dc65a12e11 100644
--- a/deps/v8/src/compiler/load-elimination.h
+++ b/deps/v8/src/compiler/load-elimination.h
@@ -32,6 +32,8 @@ class V8_EXPORT_PRIVATE LoadElimination final
: AdvancedReducer(editor), node_states_(zone), jsgraph_(jsgraph) {}
~LoadElimination() final {}
+ const char* reducer_name() const override { return "LoadElimination"; }
+
Reduction Reduce(Node* node) final;
private:
@@ -123,6 +125,46 @@ class V8_EXPORT_PRIVATE LoadElimination final
size_t next_index_ = 0;
};
+ // Abstract state to approximate the current state of a hash map along the
+ // effect paths through the graph.
+ class AbstractHashIndexes final : public ZoneObject {
+ public:
+ AbstractHashIndexes() {}
+
+ AbstractHashIndexes(Node* table, Node* key, Node* index)
+ : AbstractHashIndexes() {
+ entry_ = Entry(table, key, index);
+ }
+
+ AbstractHashIndexes const* Extend(Node* table, Node* key, Node* index,
+ Zone* zone) const {
+ // Currently, we do only hold one entry, so we just create a new
+ // state with the one entry.
+ AbstractHashIndexes* that =
+ new (zone) AbstractHashIndexes(table, key, index);
+ return that;
+ }
+ Node* Lookup(Node* table, Node* key) const;
+ bool Equals(AbstractHashIndexes const* that) const;
+ AbstractHashIndexes const* Merge(AbstractHashIndexes const* that,
+ Zone* zone) const;
+
+ void Print() const;
+
+ private:
+ struct Entry {
+ Entry() {}
+ Entry(Node* table, Node* key, Node* index)
+ : table(table), key(key), index(index) {}
+
+ Node* table = nullptr;
+ Node* key = nullptr;
+ Node* index = nullptr;
+ };
+
+ Entry entry_;
+ };
+
// Abstract state to approximate the current state of a certain field along
// the effect paths through the graph.
class AbstractField final : public ZoneObject {
@@ -240,6 +282,9 @@ class V8_EXPORT_PRIVATE LoadElimination final
Zone* zone) const;
Node* LookupElement(Node* object, Node* index,
MachineRepresentation representation) const;
+ AbstractState const* AddHashIndex(Node* table, Node* key, Node* index,
+ Zone* zone) const;
+ Node* LookupHashIndex(Node* table, Node* key) const;
AbstractState const* AddCheck(Node* node, Zone* zone) const;
Node* LookupCheck(Node* node) const;
@@ -251,6 +296,7 @@ class V8_EXPORT_PRIVATE LoadElimination final
AbstractElements const* elements_ = nullptr;
AbstractField const* fields_[kMaxTrackedFields];
AbstractMaps const* maps_ = nullptr;
+ AbstractHashIndexes const* hash_indexes_ = nullptr;
};
class AbstractStateForEffectNodes final : public ZoneObject {
@@ -274,7 +320,9 @@ class V8_EXPORT_PRIVATE LoadElimination final
Reduction ReduceStoreField(Node* node);
Reduction ReduceLoadElement(Node* node);
Reduction ReduceStoreElement(Node* node);
+ Reduction ReduceTransitionAndStoreElement(Node* node);
Reduction ReduceStoreTypedElement(Node* node);
+ Reduction ReduceLookupHashStorageIndex(Node* node);
Reduction ReduceEffectPhi(Node* node);
Reduction ReduceStart(Node* node);
Reduction ReduceOtherNode(Node* node);
diff --git a/deps/v8/src/compiler/loop-analysis.h b/deps/v8/src/compiler/loop-analysis.h
index fb3e1e753b..084d4ce06a 100644
--- a/deps/v8/src/compiler/loop-analysis.h
+++ b/deps/v8/src/compiler/loop-analysis.h
@@ -123,7 +123,6 @@ class LoopTree : public ZoneObject {
if (node->opcode() == IrOpcode::kLoop) return node;
}
UNREACHABLE();
- return nullptr;
}
Zone* zone() const { return zone_; }
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index 6ac7a163e1..32123b3440 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -794,9 +794,6 @@ class MachineRepresentationChecker {
case MachineRepresentation::kFloat32:
case MachineRepresentation::kFloat64:
case MachineRepresentation::kSimd128:
- case MachineRepresentation::kSimd1x4:
- case MachineRepresentation::kSimd1x8:
- case MachineRepresentation::kSimd1x16:
case MachineRepresentation::kBit:
case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16:
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index a50f0dcb1b..383f2799fe 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -800,8 +800,8 @@ Reduction MachineOperatorReducer::ReduceInt32Div(Node* node) {
int32_t const divisor = m.right().Value();
Node* const dividend = m.left().node();
Node* quotient = dividend;
- if (base::bits::IsPowerOfTwo32(Abs(divisor))) {
- uint32_t const shift = WhichPowerOf2Abs(divisor);
+ if (base::bits::IsPowerOfTwo(Abs(divisor))) {
+ uint32_t const shift = WhichPowerOf2(Abs(divisor));
DCHECK_NE(0u, shift);
if (shift > 1) {
quotient = Word32Sar(quotient, 31);
@@ -840,7 +840,7 @@ Reduction MachineOperatorReducer::ReduceUint32Div(Node* node) {
if (m.right().HasValue()) {
Node* const dividend = m.left().node();
uint32_t const divisor = m.right().Value();
- if (base::bits::IsPowerOfTwo32(divisor)) { // x / 2^n => x >> n
+ if (base::bits::IsPowerOfTwo(divisor)) { // x / 2^n => x >> n
node->ReplaceInput(1, Uint32Constant(WhichPowerOf2(m.right().Value())));
node->TrimInputCount(2);
NodeProperties::ChangeOp(node, machine()->Word32Shr());
@@ -866,8 +866,8 @@ Reduction MachineOperatorReducer::ReduceInt32Mod(Node* node) {
}
if (m.right().HasValue()) {
Node* const dividend = m.left().node();
- int32_t const divisor = Abs(m.right().Value());
- if (base::bits::IsPowerOfTwo32(divisor)) {
+ uint32_t const divisor = Abs(m.right().Value());
+ if (base::bits::IsPowerOfTwo(divisor)) {
uint32_t const mask = divisor - 1;
Node* const zero = Int32Constant(0);
Diamond d(graph(), common(),
@@ -903,7 +903,7 @@ Reduction MachineOperatorReducer::ReduceUint32Mod(Node* node) {
if (m.right().HasValue()) {
Node* const dividend = m.left().node();
uint32_t const divisor = m.right().Value();
- if (base::bits::IsPowerOfTwo32(divisor)) { // x % 2^n => x & 2^n-1
+ if (base::bits::IsPowerOfTwo(divisor)) { // x % 2^n => x & 2^n-1
node->ReplaceInput(1, Uint32Constant(m.right().Value() - 1));
node->TrimInputCount(2);
NodeProperties::ChangeOp(node, machine()->Word32And());
diff --git a/deps/v8/src/compiler/machine-operator-reducer.h b/deps/v8/src/compiler/machine-operator-reducer.h
index 593f7f2d22..278db5324d 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.h
+++ b/deps/v8/src/compiler/machine-operator-reducer.h
@@ -28,6 +28,8 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final
bool allow_signalling_nan = true);
~MachineOperatorReducer();
+ const char* reducer_name() const override { return "MachineOperatorReducer"; }
+
Reduction Reduce(Node* node) override;
private:
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 96f7dc1a91..b137543e00 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -270,15 +270,15 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
V(I32x4MaxS, Operator::kCommutative, 2, 0, 1) \
V(I32x4Eq, Operator::kCommutative, 2, 0, 1) \
V(I32x4Ne, Operator::kCommutative, 2, 0, 1) \
- V(I32x4LtS, Operator::kNoProperties, 2, 0, 1) \
- V(I32x4LeS, Operator::kNoProperties, 2, 0, 1) \
+ V(I32x4GtS, Operator::kNoProperties, 2, 0, 1) \
+ V(I32x4GeS, Operator::kNoProperties, 2, 0, 1) \
V(I32x4UConvertF32x4, Operator::kNoProperties, 1, 0, 1) \
V(I32x4UConvertI16x8Low, Operator::kNoProperties, 1, 0, 1) \
V(I32x4UConvertI16x8High, Operator::kNoProperties, 1, 0, 1) \
V(I32x4MinU, Operator::kCommutative, 2, 0, 1) \
V(I32x4MaxU, Operator::kCommutative, 2, 0, 1) \
- V(I32x4LtU, Operator::kNoProperties, 2, 0, 1) \
- V(I32x4LeU, Operator::kNoProperties, 2, 0, 1) \
+ V(I32x4GtU, Operator::kNoProperties, 2, 0, 1) \
+ V(I32x4GeU, Operator::kNoProperties, 2, 0, 1) \
V(I16x8Splat, Operator::kNoProperties, 1, 0, 1) \
V(I16x8SConvertI8x16Low, Operator::kNoProperties, 1, 0, 1) \
V(I16x8SConvertI8x16High, Operator::kNoProperties, 1, 0, 1) \
@@ -294,8 +294,8 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
V(I16x8MaxS, Operator::kCommutative, 2, 0, 1) \
V(I16x8Eq, Operator::kCommutative, 2, 0, 1) \
V(I16x8Ne, Operator::kCommutative, 2, 0, 1) \
- V(I16x8LtS, Operator::kNoProperties, 2, 0, 1) \
- V(I16x8LeS, Operator::kNoProperties, 2, 0, 1) \
+ V(I16x8GtS, Operator::kNoProperties, 2, 0, 1) \
+ V(I16x8GeS, Operator::kNoProperties, 2, 0, 1) \
V(I16x8UConvertI8x16Low, Operator::kNoProperties, 1, 0, 1) \
V(I16x8UConvertI8x16High, Operator::kNoProperties, 1, 0, 1) \
V(I16x8UConvertI32x4, Operator::kNoProperties, 2, 0, 1) \
@@ -303,8 +303,8 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
V(I16x8SubSaturateU, Operator::kNoProperties, 2, 0, 1) \
V(I16x8MinU, Operator::kCommutative, 2, 0, 1) \
V(I16x8MaxU, Operator::kCommutative, 2, 0, 1) \
- V(I16x8LtU, Operator::kNoProperties, 2, 0, 1) \
- V(I16x8LeU, Operator::kNoProperties, 2, 0, 1) \
+ V(I16x8GtU, Operator::kNoProperties, 2, 0, 1) \
+ V(I16x8GeU, Operator::kNoProperties, 2, 0, 1) \
V(I8x16Splat, Operator::kNoProperties, 1, 0, 1) \
V(I8x16Neg, Operator::kNoProperties, 1, 0, 1) \
V(I8x16SConvertI16x8, Operator::kNoProperties, 2, 0, 1) \
@@ -317,15 +317,15 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
V(I8x16MaxS, Operator::kCommutative, 2, 0, 1) \
V(I8x16Eq, Operator::kCommutative, 2, 0, 1) \
V(I8x16Ne, Operator::kCommutative, 2, 0, 1) \
- V(I8x16LtS, Operator::kNoProperties, 2, 0, 1) \
- V(I8x16LeS, Operator::kNoProperties, 2, 0, 1) \
+ V(I8x16GtS, Operator::kNoProperties, 2, 0, 1) \
+ V(I8x16GeS, Operator::kNoProperties, 2, 0, 1) \
V(I8x16UConvertI16x8, Operator::kNoProperties, 2, 0, 1) \
V(I8x16AddSaturateU, Operator::kCommutative, 2, 0, 1) \
V(I8x16SubSaturateU, Operator::kNoProperties, 2, 0, 1) \
V(I8x16MinU, Operator::kCommutative, 2, 0, 1) \
V(I8x16MaxU, Operator::kCommutative, 2, 0, 1) \
- V(I8x16LtU, Operator::kNoProperties, 2, 0, 1) \
- V(I8x16LeU, Operator::kNoProperties, 2, 0, 1) \
+ V(I8x16GtU, Operator::kNoProperties, 2, 0, 1) \
+ V(I8x16GeU, Operator::kNoProperties, 2, 0, 1) \
V(S128Load, Operator::kNoProperties, 2, 0, 1) \
V(S128Store, Operator::kNoProperties, 3, 0, 1) \
V(S128Zero, Operator::kNoProperties, 0, 0, 1) \
@@ -333,28 +333,11 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
V(S128Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(S128Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(S128Not, Operator::kNoProperties, 1, 0, 1) \
- V(S32x4Select, Operator::kNoProperties, 3, 0, 1) \
- V(S16x8Select, Operator::kNoProperties, 3, 0, 1) \
- V(S8x16Select, Operator::kNoProperties, 3, 0, 1) \
- V(S1x4Zero, Operator::kNoProperties, 0, 0, 1) \
- V(S1x4And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(S1x4Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(S1x4Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(S1x4Not, Operator::kNoProperties, 1, 0, 1) \
+ V(S128Select, Operator::kNoProperties, 3, 0, 1) \
V(S1x4AnyTrue, Operator::kNoProperties, 1, 0, 1) \
V(S1x4AllTrue, Operator::kNoProperties, 1, 0, 1) \
- V(S1x8Zero, Operator::kNoProperties, 0, 0, 1) \
- V(S1x8And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(S1x8Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(S1x8Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(S1x8Not, Operator::kNoProperties, 1, 0, 1) \
V(S1x8AnyTrue, Operator::kNoProperties, 1, 0, 1) \
V(S1x8AllTrue, Operator::kNoProperties, 1, 0, 1) \
- V(S1x16Zero, Operator::kNoProperties, 0, 0, 1) \
- V(S1x16And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(S1x16Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(S1x16Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(S1x16Not, Operator::kNoProperties, 1, 0, 1) \
V(S1x16AnyTrue, Operator::kNoProperties, 1, 0, 1) \
V(S1x16AllTrue, Operator::kNoProperties, 1, 0, 1)
@@ -710,7 +693,6 @@ const Operator* MachineOperatorBuilder::UnalignedLoad(
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
UNREACHABLE();
- return nullptr;
}
const Operator* MachineOperatorBuilder::UnalignedStore(
@@ -722,14 +704,10 @@ const Operator* MachineOperatorBuilder::UnalignedStore(
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
case MachineRepresentation::kBit:
- case MachineRepresentation::kSimd1x4:
- case MachineRepresentation::kSimd1x8:
- case MachineRepresentation::kSimd1x16:
case MachineRepresentation::kNone:
break;
}
UNREACHABLE();
- return nullptr;
}
#define PURE(Name, properties, value_input_count, control_input_count, \
@@ -759,7 +737,6 @@ const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) {
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
UNREACHABLE();
- return nullptr;
}
const Operator* MachineOperatorBuilder::ProtectedLoad(LoadRepresentation rep) {
@@ -770,7 +747,6 @@ const Operator* MachineOperatorBuilder::ProtectedLoad(LoadRepresentation rep) {
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
UNREACHABLE();
- return nullptr;
}
const Operator* MachineOperatorBuilder::StackSlot(int size, int alignment) {
@@ -810,14 +786,10 @@ const Operator* MachineOperatorBuilder::Store(StoreRepresentation store_rep) {
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
case MachineRepresentation::kBit:
- case MachineRepresentation::kSimd1x4:
- case MachineRepresentation::kSimd1x8:
- case MachineRepresentation::kSimd1x16:
case MachineRepresentation::kNone:
break;
}
UNREACHABLE();
- return nullptr;
}
const Operator* MachineOperatorBuilder::ProtectedStore(
@@ -830,14 +802,10 @@ const Operator* MachineOperatorBuilder::ProtectedStore(
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
case MachineRepresentation::kBit:
- case MachineRepresentation::kSimd1x4:
- case MachineRepresentation::kSimd1x8:
- case MachineRepresentation::kSimd1x16:
case MachineRepresentation::kNone:
break;
}
UNREACHABLE();
- return nullptr;
}
const Operator* MachineOperatorBuilder::UnsafePointerAdd() {
@@ -865,7 +833,6 @@ const Operator* MachineOperatorBuilder::CheckedLoad(
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
UNREACHABLE();
- return nullptr;
}
@@ -878,14 +845,10 @@ const Operator* MachineOperatorBuilder::CheckedStore(
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
case MachineRepresentation::kBit:
- case MachineRepresentation::kSimd1x4:
- case MachineRepresentation::kSimd1x8:
- case MachineRepresentation::kSimd1x16:
case MachineRepresentation::kNone:
break;
}
UNREACHABLE();
- return nullptr;
}
const Operator* MachineOperatorBuilder::AtomicLoad(LoadRepresentation rep) {
@@ -896,7 +859,6 @@ const Operator* MachineOperatorBuilder::AtomicLoad(LoadRepresentation rep) {
ATOMIC_TYPE_LIST(LOAD)
#undef LOAD
UNREACHABLE();
- return nullptr;
}
const Operator* MachineOperatorBuilder::AtomicStore(MachineRepresentation rep) {
@@ -907,7 +869,6 @@ const Operator* MachineOperatorBuilder::AtomicStore(MachineRepresentation rep) {
ATOMIC_REPRESENTATION_LIST(STORE)
#undef STORE
UNREACHABLE();
- return nullptr;
}
const Operator* MachineOperatorBuilder::AtomicExchange(MachineType rep) {
@@ -918,7 +879,6 @@ const Operator* MachineOperatorBuilder::AtomicExchange(MachineType rep) {
ATOMIC_TYPE_LIST(EXCHANGE)
#undef EXCHANGE
UNREACHABLE();
- return nullptr;
}
const Operator* MachineOperatorBuilder::AtomicCompareExchange(MachineType rep) {
@@ -929,7 +889,6 @@ const Operator* MachineOperatorBuilder::AtomicCompareExchange(MachineType rep) {
ATOMIC_TYPE_LIST(COMPARE_EXCHANGE)
#undef COMPARE_EXCHANGE
UNREACHABLE();
- return nullptr;
}
const Operator* MachineOperatorBuilder::AtomicAdd(MachineType rep) {
@@ -940,7 +899,6 @@ const Operator* MachineOperatorBuilder::AtomicAdd(MachineType rep) {
ATOMIC_TYPE_LIST(ADD)
#undef ADD
UNREACHABLE();
- return nullptr;
}
const Operator* MachineOperatorBuilder::AtomicSub(MachineType rep) {
@@ -951,7 +909,6 @@ const Operator* MachineOperatorBuilder::AtomicSub(MachineType rep) {
ATOMIC_TYPE_LIST(SUB)
#undef SUB
UNREACHABLE();
- return nullptr;
}
const Operator* MachineOperatorBuilder::AtomicAnd(MachineType rep) {
@@ -962,7 +919,6 @@ const Operator* MachineOperatorBuilder::AtomicAnd(MachineType rep) {
ATOMIC_TYPE_LIST(AND)
#undef AND
UNREACHABLE();
- return nullptr;
}
const Operator* MachineOperatorBuilder::AtomicOr(MachineType rep) {
@@ -973,7 +929,6 @@ const Operator* MachineOperatorBuilder::AtomicOr(MachineType rep) {
ATOMIC_TYPE_LIST(OR)
#undef OR
UNREACHABLE();
- return nullptr;
}
const Operator* MachineOperatorBuilder::AtomicXor(MachineType rep) {
@@ -984,7 +939,6 @@ const Operator* MachineOperatorBuilder::AtomicXor(MachineType rep) {
ATOMIC_TYPE_LIST(XOR)
#undef XOR
UNREACHABLE();
- return nullptr;
}
#define SIMD_LANE_OPS(Type, lane_count) \
@@ -1027,23 +981,8 @@ SIMD_LANE_OP_LIST(SIMD_LANE_OPS)
SIMD_FORMAT_LIST(SIMD_SHIFT_OPS)
#undef SIMD_SHIFT_OPS
-const Operator* MachineOperatorBuilder::S32x4Shuffle(uint8_t shuffle[16]) {
- uint8_t* array = zone_->NewArray<uint8_t>(4);
- memcpy(array, shuffle, 4);
- return new (zone_)
- Operator1<uint8_t*>(IrOpcode::kS32x4Shuffle, Operator::kPure, "Shuffle",
- 2, 0, 0, 1, 0, 0, array);
-}
-
-const Operator* MachineOperatorBuilder::S16x8Shuffle(uint8_t shuffle[16]) {
- uint8_t* array = zone_->NewArray<uint8_t>(8);
- memcpy(array, shuffle, 8);
- return new (zone_)
- Operator1<uint8_t*>(IrOpcode::kS16x8Shuffle, Operator::kPure, "Shuffle",
- 2, 0, 0, 1, 0, 0, array);
-}
-
-const Operator* MachineOperatorBuilder::S8x16Shuffle(uint8_t shuffle[16]) {
+const Operator* MachineOperatorBuilder::S8x16Shuffle(
+ const uint8_t shuffle[16]) {
uint8_t* array = zone_->NewArray<uint8_t>(16);
memcpy(array, shuffle, 16);
return new (zone_)
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 82d40a09e3..457c598de1 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -494,8 +494,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I32x4MaxS();
const Operator* I32x4Eq();
const Operator* I32x4Ne();
- const Operator* I32x4LtS();
- const Operator* I32x4LeS();
+ const Operator* I32x4GtS();
+ const Operator* I32x4GeS();
const Operator* I32x4UConvertF32x4();
const Operator* I32x4UConvertI16x8Low();
@@ -503,8 +503,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I32x4ShrU(int32_t);
const Operator* I32x4MinU();
const Operator* I32x4MaxU();
- const Operator* I32x4LtU();
- const Operator* I32x4LeU();
+ const Operator* I32x4GtU();
+ const Operator* I32x4GeU();
const Operator* I16x8Splat();
const Operator* I16x8ExtractLane(int32_t);
@@ -525,8 +525,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I16x8MaxS();
const Operator* I16x8Eq();
const Operator* I16x8Ne();
- const Operator* I16x8LtS();
- const Operator* I16x8LeS();
+ const Operator* I16x8GtS();
+ const Operator* I16x8GeS();
const Operator* I16x8UConvertI8x16Low();
const Operator* I16x8UConvertI8x16High();
@@ -536,8 +536,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I16x8SubSaturateU();
const Operator* I16x8MinU();
const Operator* I16x8MaxU();
- const Operator* I16x8LtU();
- const Operator* I16x8LeU();
+ const Operator* I16x8GtU();
+ const Operator* I16x8GeU();
const Operator* I8x16Splat();
const Operator* I8x16ExtractLane(int32_t);
@@ -555,8 +555,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I8x16MaxS();
const Operator* I8x16Eq();
const Operator* I8x16Ne();
- const Operator* I8x16LtS();
- const Operator* I8x16LeS();
+ const Operator* I8x16GtS();
+ const Operator* I8x16GeS();
const Operator* I8x16ShrU(int32_t);
const Operator* I8x16UConvertI16x8();
@@ -564,8 +564,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I8x16SubSaturateU();
const Operator* I8x16MinU();
const Operator* I8x16MaxU();
- const Operator* I8x16LtU();
- const Operator* I8x16LeU();
+ const Operator* I8x16GtU();
+ const Operator* I8x16GeU();
const Operator* S128Load();
const Operator* S128Store();
@@ -575,35 +575,14 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* S128Or();
const Operator* S128Xor();
const Operator* S128Not();
+ const Operator* S128Select();
+
+ const Operator* S8x16Shuffle(const uint8_t shuffle[16]);
- const Operator* S32x4Shuffle(uint8_t shuffle[16]);
- const Operator* S32x4Select();
- const Operator* S16x8Shuffle(uint8_t shuffle[16]);
- const Operator* S16x8Select();
- const Operator* S8x16Shuffle(uint8_t shuffle[16]);
- const Operator* S8x16Select();
-
- const Operator* S1x4Zero();
- const Operator* S1x4And();
- const Operator* S1x4Or();
- const Operator* S1x4Xor();
- const Operator* S1x4Not();
const Operator* S1x4AnyTrue();
const Operator* S1x4AllTrue();
-
- const Operator* S1x8Zero();
- const Operator* S1x8And();
- const Operator* S1x8Or();
- const Operator* S1x8Xor();
- const Operator* S1x8Not();
const Operator* S1x8AnyTrue();
const Operator* S1x8AllTrue();
-
- const Operator* S1x16Zero();
- const Operator* S1x16And();
- const Operator* S1x16Or();
- const Operator* S1x16Xor();
- const Operator* S1x16Not();
const Operator* S1x16AnyTrue();
const Operator* S1x16AllTrue();
diff --git a/deps/v8/src/compiler/mips/code-generator-mips.cc b/deps/v8/src/compiler/mips/code-generator-mips.cc
index 5055735ba6..e87f210264 100644
--- a/deps/v8/src/compiler/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/mips/code-generator-mips.cc
@@ -14,8 +14,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-#define __ masm()->
-
+#define __ tasm()->
// TODO(plind): Possibly avoid using these lithium names.
#define kScratchReg kLithiumScratchReg
@@ -80,11 +79,9 @@ class MipsOperandConverter final : public InstructionOperandConverter {
case Constant::kInt32:
return Operand(constant.ToInt32());
case Constant::kFloat32:
- return Operand(
- isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
+ return Operand::EmbeddedNumber(constant.ToFloat32());
case Constant::kFloat64:
- return Operand(
- isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+ return Operand::EmbeddedNumber(constant.ToFloat64().value());
case Constant::kInt64:
case Constant::kExternalReference:
case Constant::kHeapObject:
@@ -96,7 +93,6 @@ class MipsOperandConverter final : public InstructionOperandConverter {
break;
}
UNREACHABLE();
- return Operand(zero_reg);
}
Operand InputOperand(size_t index) {
@@ -120,7 +116,6 @@ class MipsOperandConverter final : public InstructionOperandConverter {
UNREACHABLE();
}
UNREACHABLE();
- return MemOperand(no_reg);
}
MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); }
@@ -233,7 +228,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
- must_save_lr_(!gen->frame_access_state()->has_frame()) {}
+ must_save_lr_(!gen->frame_access_state()->has_frame()),
+ zone_(gen->zone()) {}
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -251,10 +247,10 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
// We need to save and restore ra if the frame was elided.
__ Push(ra);
}
- RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
- remembered_set_action, save_fp_mode);
__ Addu(scratch1_, object_, index_);
- __ CallStub(&stub);
+ __ CallStubDelayed(
+ new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
+ remembered_set_action, save_fp_mode));
if (must_save_lr_) {
__ Pop(ra);
}
@@ -268,15 +264,16 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch1_;
RecordWriteMode const mode_;
bool must_save_lr_;
+ Zone* zone_;
};
-#define CREATE_OOL_CLASS(ool_name, masm_ool_name, T) \
+#define CREATE_OOL_CLASS(ool_name, tasm_ool_name, T) \
class ool_name final : public OutOfLineCode { \
public: \
ool_name(CodeGenerator* gen, T dst, T src1, T src2) \
: OutOfLineCode(gen), dst_(dst), src1_(src1), src2_(src2) {} \
\
- void Generate() final { __ masm_ool_name(dst_, src1_, src2_); } \
+ void Generate() final { __ tasm_ool_name(dst_, src1_, src2_); } \
\
private: \
T const dst_; \
@@ -320,7 +317,6 @@ Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
break;
}
UNREACHABLE();
- return kNoCondition;
}
@@ -334,7 +330,6 @@ Condition FlagsConditionToConditionTst(FlagsCondition condition) {
break;
}
UNREACHABLE();
- return kNoCondition;
}
@@ -368,7 +363,6 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
break;
}
UNREACHABLE();
- return kNoFPUCondition;
}
} // namespace
@@ -518,7 +512,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
#define ASSEMBLE_IEEE754_BINOP(name) \
do { \
- FrameScope scope(masm(), StackFrame::MANUAL); \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 2, kScratchReg); \
__ MovToFloatParameters(i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \
@@ -530,7 +524,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
#define ASSEMBLE_IEEE754_UNOP(name) \
do { \
- FrameScope scope(masm(), StackFrame::MANUAL); \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 1, kScratchReg); \
__ MovToFloatParameter(i.InputDoubleRegister(0)); \
__ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
@@ -579,7 +573,7 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
namespace {
-void AdjustStackPointerForTailCall(MacroAssembler* masm,
+void AdjustStackPointerForTailCall(TurboAssembler* tasm,
FrameAccessState* state,
int new_slot_above_sp,
bool allow_shrinkage = true) {
@@ -587,10 +581,10 @@ void AdjustStackPointerForTailCall(MacroAssembler* masm,
StandardFrameConstants::kFixedSlotCountAboveFp;
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
- masm->Subu(sp, sp, stack_slot_delta * kPointerSize);
+ tasm->Subu(sp, sp, stack_slot_delta * kPointerSize);
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
- masm->Addu(sp, sp, -stack_slot_delta * kPointerSize);
+ tasm->Addu(sp, sp, -stack_slot_delta * kPointerSize);
state->IncreaseSPDelta(stack_slot_delta);
}
}
@@ -599,13 +593,13 @@ void AdjustStackPointerForTailCall(MacroAssembler* masm,
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
int first_unused_stack_slot) {
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_stack_slot) {
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot);
}
@@ -619,8 +613,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchCallCodeObject: {
EnsureSpaceForLazyDeopt();
if (instr->InputAt(0)->IsImmediate()) {
- __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
- RelocInfo::CODE_TARGET);
+ __ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
__ Call(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
}
@@ -636,8 +629,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.TempRegister(2));
}
if (instr->InputAt(0)->IsImmediate()) {
- __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
- RelocInfo::CODE_TARGET);
+ __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
__ Jump(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
}
@@ -752,7 +744,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kArchTruncateDoubleToI:
- __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+ __ TruncateDoubleToIDelayed(zone(), i.OutputRegister(),
+ i.InputDoubleRegister(0));
break;
case kArchStoreWithWriteBarrier: {
RecordWriteMode mode =
@@ -855,8 +848,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_IEEE754_UNOP(log2);
break;
case kIeee754Float64Pow: {
- MathPowStub stub(isolate(), MathPowStub::DOUBLE);
- __ CallStub(&stub);
+ __ CallStubDelayed(new (zone())
+ MathPowStub(nullptr, MathPowStub::DOUBLE));
break;
}
case kIeee754Float64Sin:
@@ -938,68 +931,75 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Clz(i.OutputRegister(), i.InputRegister(0));
break;
case kMipsCtz: {
- Register reg1 = kScratchReg;
- Register reg2 = kScratchReg2;
- Label skip_for_zero;
- Label end;
- // Branch if the operand is zero
- __ Branch(&skip_for_zero, eq, i.InputRegister(0), Operand(zero_reg));
- // Find the number of bits before the last bit set to 1.
- __ Subu(reg2, zero_reg, i.InputRegister(0));
- __ And(reg2, reg2, i.InputRegister(0));
- __ clz(reg2, reg2);
- // Get the number of bits after the last bit set to 1.
- __ li(reg1, 0x1F);
- __ Subu(i.OutputRegister(), reg1, reg2);
- __ Branch(&end);
- __ bind(&skip_for_zero);
- // If the operand is zero, return word length as the result.
- __ li(i.OutputRegister(), 0x20);
- __ bind(&end);
+ Register src = i.InputRegister(0);
+ Register dst = i.OutputRegister();
+ if (IsMipsArchVariant(kMips32r6)) {
+ // We don't have an instruction to count the number of trailing zeroes.
+ // Start by flipping the bits end-for-end so we can count the number of
+ // leading zeroes instead.
+ __ rotr(dst, src, 16);
+ __ wsbh(dst, dst);
+ __ bitswap(dst, dst);
+ __ Clz(dst, dst);
+ } else {
+ // Convert trailing zeroes to trailing ones, and bits to their left
+ // to zeroes.
+ __ Addu(kScratchReg, src, -1);
+ __ Xor(dst, kScratchReg, src);
+ __ And(dst, dst, kScratchReg);
+ // Count number of leading zeroes.
+ __ Clz(dst, dst);
+ // Subtract number of leading zeroes from 32 to get number of trailing
+ // ones. Remember that the trailing ones were formerly trailing zeroes.
+ __ li(kScratchReg, 32);
+ __ Subu(dst, kScratchReg, dst);
+ }
} break;
case kMipsPopcnt: {
- Register reg1 = kScratchReg;
- Register reg2 = kScratchReg2;
- uint32_t m1 = 0x55555555;
- uint32_t m2 = 0x33333333;
- uint32_t m4 = 0x0f0f0f0f;
- uint32_t m8 = 0x00ff00ff;
- uint32_t m16 = 0x0000ffff;
-
- // Put count of ones in every 2 bits into those 2 bits.
- __ li(at, m1);
- __ srl(reg1, i.InputRegister(0), 1);
- __ And(reg2, i.InputRegister(0), at);
- __ And(reg1, reg1, at);
- __ addu(reg1, reg1, reg2);
-
- // Put count of ones in every 4 bits into those 4 bits.
- __ li(at, m2);
- __ srl(reg2, reg1, 2);
- __ And(reg2, reg2, at);
- __ And(reg1, reg1, at);
- __ addu(reg1, reg1, reg2);
-
- // Put count of ones in every 8 bits into those 8 bits.
- __ li(at, m4);
- __ srl(reg2, reg1, 4);
- __ And(reg2, reg2, at);
- __ And(reg1, reg1, at);
- __ addu(reg1, reg1, reg2);
-
- // Put count of ones in every 16 bits into those 16 bits.
- __ li(at, m8);
- __ srl(reg2, reg1, 8);
- __ And(reg2, reg2, at);
- __ And(reg1, reg1, at);
- __ addu(reg1, reg1, reg2);
-
- // Calculate total number of ones.
- __ li(at, m16);
- __ srl(reg2, reg1, 16);
- __ And(reg2, reg2, at);
- __ And(reg1, reg1, at);
- __ addu(i.OutputRegister(), reg1, reg2);
+ // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
+ //
+ // A generalization of the best bit counting method to integers of
+ // bit-widths up to 128 (parameterized by type T) is this:
+ //
+ // v = v - ((v >> 1) & (T)~(T)0/3); // temp
+ // v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3); // temp
+ // v = (v + (v >> 4)) & (T)~(T)0/255*15; // temp
+ // c = (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * BITS_PER_BYTE; //count
+ //
+ // For comparison, for 32-bit quantities, this algorithm can be executed
+ // using 20 MIPS instructions (the calls to LoadConst32() generate two
+ // machine instructions each for the values being used in this algorithm).
+ // A(n unrolled) loop-based algorithm requires 25 instructions.
+ //
+ // For 64-bit quantities, this algorithm gets executed twice, (once
+ // for in_lo, and again for in_hi), but saves a few instructions
+ // because the mask values only have to be loaded once. Using this
+ // algorithm the count for a 64-bit operand can be performed in 29
+ // instructions compared to a loop-based algorithm which requires 47
+ // instructions.
+ Register src = i.InputRegister(0);
+ Register dst = i.OutputRegister();
+ uint32_t B0 = 0x55555555; // (T)~(T)0/3
+ uint32_t B1 = 0x33333333; // (T)~(T)0/15*3
+ uint32_t B2 = 0x0f0f0f0f; // (T)~(T)0/255*15
+ uint32_t value = 0x01010101; // (T)~(T)0/255
+ uint32_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
+ __ srl(kScratchReg, src, 1);
+ __ li(kScratchReg2, B0);
+ __ And(kScratchReg, kScratchReg, kScratchReg2);
+ __ Subu(kScratchReg, src, kScratchReg);
+ __ li(kScratchReg2, B1);
+ __ And(dst, kScratchReg, kScratchReg2);
+ __ srl(kScratchReg, kScratchReg, 2);
+ __ And(kScratchReg, kScratchReg, kScratchReg2);
+ __ Addu(kScratchReg, dst, kScratchReg);
+ __ srl(dst, kScratchReg, 4);
+ __ Addu(dst, dst, kScratchReg);
+ __ li(kScratchReg2, B2);
+ __ And(dst, dst, kScratchReg2);
+ __ li(kScratchReg, value);
+ __ Mul(dst, dst, kScratchReg);
+ __ srl(dst, dst, shift);
} break;
case kMipsShl:
if (instr->InputAt(1)->IsRegister()) {
@@ -1120,7 +1120,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMipsModS: {
// TODO(bmeurer): We should really get rid of this special instruction,
// and generate a CallAddress instruction instead.
- FrameScope scope(masm(), StackFrame::MANUAL);
+ FrameScope scope(tasm(), StackFrame::MANUAL);
__ PrepareCallCFunction(0, 2, kScratchReg);
__ MovToFloatParameters(i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
@@ -1206,7 +1206,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMipsModD: {
// TODO(bmeurer): We should really get rid of this special instruction,
// and generate a CallAddress instruction instead.
- FrameScope scope(masm(), StackFrame::MANUAL);
+ FrameScope scope(tasm(), StackFrame::MANUAL);
__ PrepareCallCFunction(0, 2, kScratchReg);
__ MovToFloatParameters(i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
@@ -1653,24 +1653,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
UNREACHABLE();
break;
case kMipsS128Zero: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ xor_v(i.OutputSimd128Register(), i.OutputSimd128Register(),
i.OutputSimd128Register());
break;
}
case kMipsI32x4Splat: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_w(i.OutputSimd128Register(), i.InputRegister(0));
break;
}
case kMipsI32x4ExtractLane: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ copy_s_w(i.OutputRegister(), i.InputSimd128Register(0),
i.InputInt8(1));
break;
}
case kMipsI32x4ReplaceLane: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
if (!src.is(dst)) {
@@ -1680,31 +1680,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMipsI32x4Add: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ addv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI32x4Sub: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ subv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsF32x4Splat: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ FmoveLow(kScratchReg, i.InputSingleRegister(0));
__ fill_w(i.OutputSimd128Register(), kScratchReg);
break;
}
case kMipsF32x4ExtractLane: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ copy_u_w(kScratchReg, i.InputSimd128Register(0), i.InputInt8(1));
__ FmoveLow(i.OutputSingleRegister(), kScratchReg);
break;
}
case kMipsF32x4ReplaceLane: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
if (!src.is(dst)) {
@@ -1715,213 +1715,211 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMipsF32x4SConvertI32x4: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ ffint_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMipsF32x4UConvertI32x4: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ ffint_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMipsI32x4Mul: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ mulv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI32x4MaxS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ max_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI32x4MinS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ min_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI32x4Eq: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ ceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI32x4Ne: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
__ ceq_w(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
__ nor_v(dst, dst, dst);
break;
}
case kMipsI32x4Shl: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ slli_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt5(1));
break;
}
case kMipsI32x4ShrS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ srai_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt5(1));
break;
}
case kMipsI32x4ShrU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ srli_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt5(1));
break;
}
case kMipsI32x4MaxU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ max_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI32x4MinU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ min_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kMipsS32x4Select:
- case kMipsS16x8Select:
- case kMipsS8x16Select: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ case kMipsS128Select: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
DCHECK(i.OutputSimd128Register().is(i.InputSimd128Register(0)));
__ bsel_v(i.OutputSimd128Register(), i.InputSimd128Register(2),
i.InputSimd128Register(1));
break;
}
case kMipsF32x4Abs: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ bclri_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
break;
}
case kMipsF32x4Neg: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ bnegi_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
break;
}
case kMipsF32x4RecipApprox: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ frcp_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMipsF32x4RecipSqrtApprox: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ frsqrt_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMipsF32x4Add: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fadd_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsF32x4Sub: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fsub_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsF32x4Mul: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fmul_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsF32x4Max: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fmax_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsF32x4Min: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fmin_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsF32x4Eq: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsF32x4Ne: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fcne_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsF32x4Lt: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fclt_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsF32x4Le: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fcle_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI32x4SConvertF32x4: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ ftrunc_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMipsI32x4UConvertF32x4: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ ftrunc_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMipsI32x4Neg: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ subv_w(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
- case kMipsI32x4LtS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
- __ clt_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kMipsI32x4GtS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ clt_s_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
- case kMipsI32x4LeS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
- __ cle_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kMipsI32x4GeS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ cle_s_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
- case kMipsI32x4LtU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
- __ clt_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kMipsI32x4GtU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ clt_u_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
- case kMipsI32x4LeU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
- __ cle_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kMipsI32x4GeU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ cle_u_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
case kMipsI16x8Splat: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_h(i.OutputSimd128Register(), i.InputRegister(0));
break;
}
case kMipsI16x8ExtractLane: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ copy_s_h(i.OutputRegister(), i.InputSimd128Register(0),
i.InputInt8(1));
break;
}
case kMipsI16x8ReplaceLane: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
if (!src.is(dst)) {
@@ -1931,146 +1929,146 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMipsI16x8Neg: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ subv_h(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
case kMipsI16x8Shl: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ slli_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt4(1));
break;
}
case kMipsI16x8ShrS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ srai_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt4(1));
break;
}
case kMipsI16x8ShrU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ srli_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt4(1));
break;
}
case kMipsI16x8Add: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ addv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI16x8AddSaturateS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ adds_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI16x8Sub: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ subv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI16x8SubSaturateS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ subs_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI16x8Mul: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ mulv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI16x8MaxS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ max_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI16x8MinS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ min_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI16x8Eq: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ ceq_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI16x8Ne: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
__ ceq_h(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
__ nor_v(dst, dst, dst);
break;
}
- case kMipsI16x8LtS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
- __ clt_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kMipsI16x8GtS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ clt_s_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
- case kMipsI16x8LeS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
- __ cle_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kMipsI16x8GeS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ cle_s_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
case kMipsI16x8AddSaturateU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ adds_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI16x8SubSaturateU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ subs_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI16x8MaxU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ max_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI16x8MinU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ min_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kMipsI16x8LtU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
- __ clt_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kMipsI16x8GtU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ clt_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
- case kMipsI16x8LeU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
- __ cle_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kMipsI16x8GeU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ cle_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
case kMipsI8x16Splat: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_b(i.OutputSimd128Register(), i.InputRegister(0));
break;
}
case kMipsI8x16ExtractLane: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ copy_s_b(i.OutputRegister(), i.InputSimd128Register(0),
i.InputInt8(1));
break;
}
case kMipsI8x16ReplaceLane: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
if (!src.is(dst)) {
@@ -2080,24 +2078,637 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMipsI8x16Neg: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ subv_b(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
case kMipsI8x16Shl: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ slli_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt3(1));
break;
}
case kMipsI8x16ShrS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ srai_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt3(1));
break;
}
+ case kMipsI8x16Add: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ addv_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI8x16AddSaturateS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ adds_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI8x16Sub: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ subv_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI8x16SubSaturateS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ subs_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI8x16Mul: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ mulv_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI8x16MaxS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ max_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI8x16MinS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ min_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI8x16Eq: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ ceq_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI8x16Ne: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ ceq_b(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
+ __ nor_v(dst, dst, dst);
+ break;
+ }
+ case kMipsI8x16GtS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ clt_s_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsI8x16GeS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ cle_s_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsI8x16ShrU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ srli_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt3(1));
+ break;
+ }
+ case kMipsI8x16AddSaturateU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ adds_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI8x16SubSaturateU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ subs_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI8x16MaxU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ max_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI8x16MinU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ min_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI8x16GtU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ clt_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsI8x16GeU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ cle_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsS128And: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ and_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsS128Or: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ or_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsS128Xor: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsS128Not: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ nor_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsS1x4AnyTrue:
+ case kMipsS1x8AnyTrue:
+ case kMipsS1x16AnyTrue: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Register dst = i.OutputRegister();
+ Label all_false;
+
+ __ BranchMSA(&all_false, MSA_BRANCH_V, all_zero,
+ i.InputSimd128Register(0), USE_DELAY_SLOT);
+ __ li(dst, 0); // branch delay slot
+ __ li(dst, -1);
+ __ bind(&all_false);
+ break;
+ }
+ case kMipsS1x4AllTrue: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Register dst = i.OutputRegister();
+ Label all_true;
+ __ BranchMSA(&all_true, MSA_BRANCH_W, all_not_zero,
+ i.InputSimd128Register(0), USE_DELAY_SLOT);
+ __ li(dst, -1); // branch delay slot
+ __ li(dst, 0);
+ __ bind(&all_true);
+ break;
+ }
+ case kMipsS1x8AllTrue: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Register dst = i.OutputRegister();
+ Label all_true;
+ __ BranchMSA(&all_true, MSA_BRANCH_H, all_not_zero,
+ i.InputSimd128Register(0), USE_DELAY_SLOT);
+ __ li(dst, -1); // branch delay slot
+ __ li(dst, 0);
+ __ bind(&all_true);
+ break;
+ }
+ case kMipsS1x16AllTrue: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Register dst = i.OutputRegister();
+ Label all_true;
+ __ BranchMSA(&all_true, MSA_BRANCH_B, all_not_zero,
+ i.InputSimd128Register(0), USE_DELAY_SLOT);
+ __ li(dst, -1); // branch delay slot
+ __ li(dst, 0);
+ __ bind(&all_true);
+ break;
+ }
+ case kMipsMsaLd: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ ld_b(i.OutputSimd128Register(), i.MemoryOperand());
+ break;
+ }
+ case kMipsMsaSt: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ st_b(i.InputSimd128Register(2), i.MemoryOperand());
+ break;
+ }
+ case kMipsS32x4InterleaveRight: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
+ // dst = [5, 1, 4, 0]
+ __ ilvr_w(dst, src1, src0);
+ break;
+ }
+ case kMipsS32x4InterleaveLeft: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
+ // dst = [7, 3, 6, 2]
+ __ ilvl_w(dst, src1, src0);
+ break;
+ }
+ case kMipsS32x4PackEven: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
+ // dst = [6, 4, 2, 0]
+ __ pckev_w(dst, src1, src0);
+ break;
+ }
+ case kMipsS32x4PackOdd: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
+ // dst = [7, 5, 3, 1]
+ __ pckod_w(dst, src1, src0);
+ break;
+ }
+ case kMipsS32x4InterleaveEven: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
+ // dst = [6, 2, 4, 0]
+ __ ilvev_w(dst, src1, src0);
+ break;
+ }
+ case kMipsS32x4InterleaveOdd: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
+ // dst = [7, 3, 5, 1]
+ __ ilvod_w(dst, src1, src0);
+ break;
+ }
+ case kMipsS32x4Shuffle: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+
+ int32_t shuffle = i.InputInt32(2);
+
+ if (src0.is(src1)) {
+ // Unary S32x4 shuffles are handled with shf.w instruction
+ uint32_t i8 = 0;
+ for (int i = 0; i < 4; i++) {
+ int lane = shuffle & 0xff;
+ DCHECK(lane < 4);
+ i8 |= lane << (2 * i);
+ shuffle >>= 8;
+ }
+ __ shf_w(dst, src0, i8);
+ } else {
+ // For binary shuffles use vshf.w instruction
+ if (dst.is(src0)) {
+ __ move_v(kSimd128ScratchReg, src0);
+ src0 = kSimd128ScratchReg;
+ } else if (dst.is(src1)) {
+ __ move_v(kSimd128ScratchReg, src1);
+ src1 = kSimd128ScratchReg;
+ }
+
+ __ li(kScratchReg, i.InputInt32(2));
+ __ insert_w(dst, 0, kScratchReg);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvr_b(dst, kSimd128RegZero, dst);
+ __ ilvr_h(dst, kSimd128RegZero, dst);
+ __ vshf_w(dst, src1, src0);
+ }
+ break;
+ }
+ case kMipsS16x8InterleaveRight: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
+ // dst = [11, 3, 10, 2, 9, 1, 8, 0]
+ __ ilvr_h(dst, src1, src0);
+ break;
+ }
+ case kMipsS16x8InterleaveLeft: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
+ // dst = [15, 7, 14, 6, 13, 5, 12, 4]
+ __ ilvl_h(dst, src1, src0);
+ break;
+ }
+ case kMipsS16x8PackEven: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
+ // dst = [14, 12, 10, 8, 6, 4, 2, 0]
+ __ pckev_h(dst, src1, src0);
+ break;
+ }
+ case kMipsS16x8PackOdd: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
+ // dst = [15, 13, 11, 9, 7, 5, 3, 1]
+ __ pckod_h(dst, src1, src0);
+ break;
+ }
+ case kMipsS16x8InterleaveEven: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
+ // dst = [14, 6, 12, 4, 10, 2, 8, 0]
+ __ ilvev_h(dst, src1, src0);
+ break;
+ }
+ case kMipsS16x8InterleaveOdd: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
+ // dst = [15, 7, ... 11, 3, 9, 1]
+ __ ilvod_h(dst, src1, src0);
+ break;
+ }
+ case kMipsS16x4Reverse: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ // src = [7, 6, 5, 4, 3, 2, 1, 0], dst = [4, 5, 6, 7, 0, 1, 2, 3]
+ // shf.df imm field: 0 1 2 3 = 00011011 = 0x1B
+ __ shf_h(i.OutputSimd128Register(), i.InputSimd128Register(0), 0x1B);
+ break;
+ }
+ case kMipsS16x2Reverse: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ // src = [7, 6, 5, 4, 3, 2, 1, 0], dst = [6, 7, 4, 5, 3, 2, 0, 1]
+ // shf.df imm field: 2 3 0 1 = 10110001 = 0xB1
+ __ shf_h(i.OutputSimd128Register(), i.InputSimd128Register(0), 0xB1);
+ break;
+ }
+ case kMipsS8x16InterleaveRight: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
+ // dst = [23, 7, ... 17, 1, 16, 0]
+ __ ilvr_b(dst, src1, src0);
+ break;
+ }
+ case kMipsS8x16InterleaveLeft: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
+ // dst = [31, 15, ... 25, 9, 24, 8]
+ __ ilvl_b(dst, src1, src0);
+ break;
+ }
+ case kMipsS8x16PackEven: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
+ // dst = [30, 28, ... 6, 4, 2, 0]
+ __ pckev_b(dst, src1, src0);
+ break;
+ }
+ case kMipsS8x16PackOdd: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
+ // dst = [31, 29, ... 7, 5, 3, 1]
+ __ pckod_b(dst, src1, src0);
+ break;
+ }
+ case kMipsS8x16InterleaveEven: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
+ // dst = [30, 14, ... 18, 2, 16, 0]
+ __ ilvev_b(dst, src1, src0);
+ break;
+ }
+ case kMipsS8x16InterleaveOdd: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
+ // dst = [31, 15, ... 19, 3, 17, 1]
+ __ ilvod_b(dst, src1, src0);
+ break;
+ }
+ case kMipsS8x16Concat: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ __ sldi_b(dst, i.InputSimd128Register(1), i.InputInt4(2));
+ break;
+ }
+ case kMipsS8x16Shuffle: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+
+ if (dst.is(src0)) {
+ __ move_v(kSimd128ScratchReg, src0);
+ src0 = kSimd128ScratchReg;
+ } else if (dst.is(src1)) {
+ __ move_v(kSimd128ScratchReg, src1);
+ src1 = kSimd128ScratchReg;
+ }
+
+ __ li(kScratchReg, i.InputInt32(2));
+ __ insert_w(dst, 0, kScratchReg);
+ __ li(kScratchReg, i.InputInt32(3));
+ __ insert_w(dst, 1, kScratchReg);
+ __ li(kScratchReg, i.InputInt32(4));
+ __ insert_w(dst, 2, kScratchReg);
+ __ li(kScratchReg, i.InputInt32(5));
+ __ insert_w(dst, 3, kScratchReg);
+ __ vshf_b(dst, src1, src0);
+ break;
+ }
+ case kMipsS8x8Reverse: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ // src = [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
+ // dst = [8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7]
+ // [A B C D] => [B A D C]: shf.w imm: 2 3 0 1 = 10110001 = 0xB1
+ // C: [7, 6, 5, 4] => A': [4, 5, 6, 7]: shf.b imm: 00011011 = 0x1B
+ __ shf_w(kSimd128ScratchReg, i.InputSimd128Register(0), 0xB1);
+ __ shf_b(i.OutputSimd128Register(), kSimd128ScratchReg, 0x1B);
+ break;
+ }
+ case kMipsS8x4Reverse: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ // src = [15, 14, ... 3, 2, 1, 0], dst = [12, 13, 14, 15, ... 0, 1, 2, 3]
+ // shf.df imm field: 0 1 2 3 = 00011011 = 0x1B
+ __ shf_b(i.OutputSimd128Register(), i.InputSimd128Register(0), 0x1B);
+ break;
+ }
+ case kMipsS8x2Reverse: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ // src = [15, 14, ... 3, 2, 1, 0], dst = [14, 15, 12, 13, ... 2, 3, 0, 1]
+ // shf.df imm field: 2 3 0 1 = 10110001 = 0xB1
+ __ shf_b(i.OutputSimd128Register(), i.InputSimd128Register(0), 0xB1);
+ break;
+ }
+ case kMipsI32x4SConvertI16x8Low: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ ilvr_h(kSimd128ScratchReg, src, src);
+ __ slli_w(dst, kSimd128ScratchReg, 16);
+ __ srai_w(dst, dst, 16);
+ break;
+ }
+ case kMipsI32x4SConvertI16x8High: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ ilvl_h(kSimd128ScratchReg, src, src);
+ __ slli_w(dst, kSimd128ScratchReg, 16);
+ __ srai_w(dst, dst, 16);
+ break;
+ }
+ case kMipsI32x4UConvertI16x8Low: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvr_h(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsI32x4UConvertI16x8High: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvl_h(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsI16x8SConvertI8x16Low: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ ilvr_b(kSimd128ScratchReg, src, src);
+ __ slli_h(dst, kSimd128ScratchReg, 8);
+ __ srai_h(dst, dst, 8);
+ break;
+ }
+ case kMipsI16x8SConvertI8x16High: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ ilvl_b(kSimd128ScratchReg, src, src);
+ __ slli_h(dst, kSimd128ScratchReg, 8);
+ __ srai_h(dst, dst, 8);
+ break;
+ }
+ case kMipsI16x8SConvertI32x4: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ __ sat_s_w(kSimd128ScratchReg, src0, 15);
+ __ sat_s_w(kSimd128RegZero, src1, 15); // kSimd128RegZero as scratch
+ __ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg);
+ break;
+ }
+ case kMipsI16x8UConvertI32x4: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ __ sat_u_w(kSimd128ScratchReg, src0, 15);
+ __ sat_u_w(kSimd128RegZero, src1, 15); // kSimd128RegZero as scratch
+ __ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg);
+ break;
+ }
+ case kMipsI16x8UConvertI8x16Low: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvr_b(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsI16x8UConvertI8x16High: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvl_b(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsI8x16SConvertI16x8: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ __ sat_s_h(kSimd128ScratchReg, src0, 7);
+ __ sat_s_h(kSimd128RegZero, src1, 7); // kSimd128RegZero as scratch
+ __ pckev_b(dst, kSimd128RegZero, kSimd128ScratchReg);
+ break;
+ }
+ case kMipsI8x16UConvertI16x8: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ __ sat_u_h(kSimd128ScratchReg, src0, 7);
+ __ sat_u_h(kSimd128RegZero, src1, 7); // kSimd128RegZero as scratch
+ __ pckev_b(dst, kSimd128RegZero, kSimd128ScratchReg);
+ break;
+ }
+ case kMipsF32x4AddHoriz: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ shf_w(kSimd128ScratchReg, src0, 0xB1); // 2 3 0 1 : 10110001 : 0xB1
+ __ shf_w(kSimd128RegZero, src1, 0xB1); // kSimd128RegZero as scratch
+ __ fadd_w(kSimd128ScratchReg, kSimd128ScratchReg, src0);
+ __ fadd_w(kSimd128RegZero, kSimd128RegZero, src1);
+ __ pckev_w(dst, kSimd128RegZero, kSimd128ScratchReg);
+ break;
+ }
+ case kMipsI32x4AddHoriz: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ hadd_s_d(kSimd128ScratchReg, src0, src0);
+ __ hadd_s_d(kSimd128RegZero, src1, src1); // kSimd128RegZero as scratch
+ __ pckev_w(dst, kSimd128RegZero, kSimd128ScratchReg);
+ break;
+ }
+ case kMipsI16x8AddHoriz: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ hadd_s_w(kSimd128ScratchReg, src0, src0);
+ __ hadd_s_w(kSimd128RegZero, src1, src1); // kSimd128RegZero as scratch
+ __ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg);
+ break;
+ }
}
return kSuccess;
} // NOLINT(readability/fn_size)
@@ -2134,11 +2745,11 @@ static bool convertCondition(FlagsCondition condition, Condition& cc) {
return false;
}
-void AssembleBranchToLabels(CodeGenerator* gen, MacroAssembler* masm,
+void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
Instruction* instr, FlagsCondition condition,
Label* tlabel, Label* flabel, bool fallthru) {
#undef __
-#define __ masm->
+#define __ tasm->
Condition cc = kNoCondition;
// MIPS does not have condition code flags, so compare and branch are
@@ -2227,14 +2838,14 @@ void AssembleBranchToLabels(CodeGenerator* gen, MacroAssembler* masm,
}
if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
#undef __
-#define __ masm()->
+#define __ tasm()->
}
// Assembles branches after an instruction.
void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
Label* tlabel = branch->true_label;
Label* flabel = branch->false_label;
- AssembleBranchToLabels(this, masm(), instr, branch->condition, tlabel, flabel,
+ AssembleBranchToLabels(this, tasm(), instr, branch->condition, tlabel, flabel,
branch->fallthru);
}
@@ -2277,14 +2888,14 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
// We use the context register as the scratch register, because we do
// not have a context here.
__ PrepareCallCFunction(0, 0, cp);
- __ CallCFunction(
- ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
- 0);
+ __ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(
+ tasm()->isolate()),
+ 0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
__ Ret();
} else {
gen_->AssembleSourcePosition(instr_);
- __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+ __ Call(tasm()->isolate()->builtins()->builtin_handle(trap_id),
RelocInfo::CODE_TARGET);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
@@ -2303,7 +2914,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
bool frame_elided = !frame_access_state()->has_frame();
auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
Label* tlabel = ool->entry();
- AssembleBranchToLabels(this, masm(), instr, condition, tlabel, nullptr, true);
+ AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true);
}
// Assembles boolean materializations after an instruction.
@@ -2325,7 +2936,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
if (instr->arch_opcode() == kMipsTst) {
cc = FlagsConditionToConditionTst(condition);
if (instr->InputAt(1)->IsImmediate() &&
- base::bits::IsPowerOfTwo32(i.InputOperand(1).immediate())) {
+ base::bits::IsPowerOfTwo(i.InputOperand(1).immediate())) {
uint16_t pos =
base::bits::CountTrailingZeros32(i.InputOperand(1).immediate());
__ Ext(result, i.InputRegister(0), pos, 1);
@@ -2504,7 +3115,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- if (isolate()->NeedsSourcePositionsForProfiling()) {
+ if (info()->is_source_positions_enabled()) {
__ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
}
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
@@ -2563,7 +3174,7 @@ void CodeGenerator::AssembleConstructFrame() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
}
const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
@@ -2672,13 +3283,13 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
break;
case Constant::kFloat32:
- __ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
+ __ li(dst, Operand::EmbeddedNumber(src.ToFloat32()));
break;
case Constant::kInt64:
UNREACHABLE();
break;
case Constant::kFloat64:
- __ li(dst, isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
+ __ li(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
break;
case Constant::kExternalReference:
__ li(dst, Operand(src.ToExternalReference()));
@@ -2717,7 +3328,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
DoubleRegister dst = destination->IsFPRegister()
? g.ToDoubleRegister(destination)
: kScratchDoubleReg;
- __ Move(dst, src.ToFloat64());
+ __ Move(dst, src.ToFloat64().value());
if (destination->IsFPStackSlot()) {
__ Sdc1(dst, g.ToMemOperand(destination));
}
@@ -2875,11 +3486,11 @@ void CodeGenerator::EnsureSpaceForLazyDeopt() {
int space_needed = Deoptimizer::patch_size();
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
- int current_pc = masm()->pc_offset();
+ int current_pc = tasm()->pc_offset();
if (current_pc < last_lazy_deopt_pc_ + space_needed) {
// Block tramoline pool emission for duration of padding.
v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
- masm());
+ tasm());
int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
while (padding_size > 0) {
diff --git a/deps/v8/src/compiler/mips/instruction-codes-mips.h b/deps/v8/src/compiler/mips/instruction-codes-mips.h
index f80fae9340..3a2a873e48 100644
--- a/deps/v8/src/compiler/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/mips/instruction-codes-mips.h
@@ -138,6 +138,7 @@ namespace compiler {
V(MipsI32x4ExtractLane) \
V(MipsI32x4ReplaceLane) \
V(MipsI32x4Add) \
+ V(MipsI32x4AddHoriz) \
V(MipsI32x4Sub) \
V(MipsF32x4Splat) \
V(MipsF32x4ExtractLane) \
@@ -154,12 +155,12 @@ namespace compiler {
V(MipsI32x4ShrU) \
V(MipsI32x4MaxU) \
V(MipsI32x4MinU) \
- V(MipsS32x4Select) \
V(MipsF32x4Abs) \
V(MipsF32x4Neg) \
V(MipsF32x4RecipApprox) \
V(MipsF32x4RecipSqrtApprox) \
V(MipsF32x4Add) \
+ V(MipsF32x4AddHoriz) \
V(MipsF32x4Sub) \
V(MipsF32x4Mul) \
V(MipsF32x4Max) \
@@ -171,10 +172,10 @@ namespace compiler {
V(MipsI32x4SConvertF32x4) \
V(MipsI32x4UConvertF32x4) \
V(MipsI32x4Neg) \
- V(MipsI32x4LtS) \
- V(MipsI32x4LeS) \
- V(MipsI32x4LtU) \
- V(MipsI32x4LeU) \
+ V(MipsI32x4GtS) \
+ V(MipsI32x4GeS) \
+ V(MipsI32x4GtU) \
+ V(MipsI32x4GeU) \
V(MipsI16x8Splat) \
V(MipsI16x8ExtractLane) \
V(MipsI16x8ReplaceLane) \
@@ -184,6 +185,7 @@ namespace compiler {
V(MipsI16x8ShrU) \
V(MipsI16x8Add) \
V(MipsI16x8AddSaturateS) \
+ V(MipsI16x8AddHoriz) \
V(MipsI16x8Sub) \
V(MipsI16x8SubSaturateS) \
V(MipsI16x8Mul) \
@@ -191,22 +193,89 @@ namespace compiler {
V(MipsI16x8MinS) \
V(MipsI16x8Eq) \
V(MipsI16x8Ne) \
- V(MipsI16x8LtS) \
- V(MipsI16x8LeS) \
+ V(MipsI16x8GtS) \
+ V(MipsI16x8GeS) \
V(MipsI16x8AddSaturateU) \
V(MipsI16x8SubSaturateU) \
V(MipsI16x8MaxU) \
V(MipsI16x8MinU) \
- V(MipsI16x8LtU) \
- V(MipsI16x8LeU) \
+ V(MipsI16x8GtU) \
+ V(MipsI16x8GeU) \
V(MipsI8x16Splat) \
V(MipsI8x16ExtractLane) \
V(MipsI8x16ReplaceLane) \
V(MipsI8x16Neg) \
V(MipsI8x16Shl) \
V(MipsI8x16ShrS) \
- V(MipsS16x8Select) \
- V(MipsS8x16Select)
+ V(MipsI8x16Add) \
+ V(MipsI8x16AddSaturateS) \
+ V(MipsI8x16Sub) \
+ V(MipsI8x16SubSaturateS) \
+ V(MipsI8x16Mul) \
+ V(MipsI8x16MaxS) \
+ V(MipsI8x16MinS) \
+ V(MipsI8x16Eq) \
+ V(MipsI8x16Ne) \
+ V(MipsI8x16GtS) \
+ V(MipsI8x16GeS) \
+ V(MipsI8x16ShrU) \
+ V(MipsI8x16AddSaturateU) \
+ V(MipsI8x16SubSaturateU) \
+ V(MipsI8x16MaxU) \
+ V(MipsI8x16MinU) \
+ V(MipsI8x16GtU) \
+ V(MipsI8x16GeU) \
+ V(MipsS128And) \
+ V(MipsS128Or) \
+ V(MipsS128Xor) \
+ V(MipsS128Not) \
+ V(MipsS128Select) \
+ V(MipsS1x4AnyTrue) \
+ V(MipsS1x4AllTrue) \
+ V(MipsS1x8AnyTrue) \
+ V(MipsS1x8AllTrue) \
+ V(MipsS1x16AnyTrue) \
+ V(MipsS1x16AllTrue) \
+ V(MipsS32x4InterleaveRight) \
+ V(MipsS32x4InterleaveLeft) \
+ V(MipsS32x4PackEven) \
+ V(MipsS32x4PackOdd) \
+ V(MipsS32x4InterleaveEven) \
+ V(MipsS32x4InterleaveOdd) \
+ V(MipsS32x4Shuffle) \
+ V(MipsS16x8InterleaveRight) \
+ V(MipsS16x8InterleaveLeft) \
+ V(MipsS16x8PackEven) \
+ V(MipsS16x8PackOdd) \
+ V(MipsS16x8InterleaveEven) \
+ V(MipsS16x8InterleaveOdd) \
+ V(MipsS16x4Reverse) \
+ V(MipsS16x2Reverse) \
+ V(MipsS8x16InterleaveRight) \
+ V(MipsS8x16InterleaveLeft) \
+ V(MipsS8x16PackEven) \
+ V(MipsS8x16PackOdd) \
+ V(MipsS8x16InterleaveEven) \
+ V(MipsS8x16InterleaveOdd) \
+ V(MipsS8x16Shuffle) \
+ V(MipsS8x16Concat) \
+ V(MipsS8x8Reverse) \
+ V(MipsS8x4Reverse) \
+ V(MipsS8x2Reverse) \
+ V(MipsMsaLd) \
+ V(MipsMsaSt) \
+ V(MipsI32x4SConvertI16x8Low) \
+ V(MipsI32x4SConvertI16x8High) \
+ V(MipsI32x4UConvertI16x8Low) \
+ V(MipsI32x4UConvertI16x8High) \
+ V(MipsI16x8SConvertI8x16Low) \
+ V(MipsI16x8SConvertI8x16High) \
+ V(MipsI16x8SConvertI32x4) \
+ V(MipsI16x8UConvertI32x4) \
+ V(MipsI16x8UConvertI8x16Low) \
+ V(MipsI16x8UConvertI8x16High) \
+ V(MipsI8x16SConvertI16x8) \
+ V(MipsI8x16UConvertI16x8)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
index 1058833a43..9d5a2d95a1 100644
--- a/deps/v8/src/compiler/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
@@ -294,11 +294,10 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kWord32:
opcode = kMipsLw;
break;
+ case MachineRepresentation::kSimd128:
+ opcode = kMipsMsaLd;
+ break;
case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -382,11 +381,10 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord32:
opcode = kMipsSw;
break;
+ case MachineRepresentation::kSimd128:
+ opcode = kMipsMsaSt;
+ break;
case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -732,20 +730,20 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
MipsOperandGenerator g(this);
Int32BinopMatcher m(node);
if (m.right().HasValue() && m.right().Value() > 0) {
- int32_t value = m.right().Value();
- if (base::bits::IsPowerOfTwo32(value)) {
+ uint32_t value = static_cast<uint32_t>(m.right().Value());
+ if (base::bits::IsPowerOfTwo(value)) {
Emit(kMipsShl | AddressingModeField::encode(kMode_None),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value)));
return;
}
- if (base::bits::IsPowerOfTwo32(value - 1)) {
+ if (base::bits::IsPowerOfTwo(value - 1)) {
Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value - 1)));
return;
}
- if (base::bits::IsPowerOfTwo32(value + 1)) {
+ if (base::bits::IsPowerOfTwo(value + 1)) {
InstructionOperand temp = g.TempRegister();
Emit(kMipsShl | AddressingModeField::encode(kMode_None), temp,
g.UseRegister(m.left().node()),
@@ -1234,11 +1232,10 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
case MachineRepresentation::kFloat64:
opcode = kMipsUldc1;
break;
+ case MachineRepresentation::kSimd128:
+ opcode = kMipsMsaLd;
+ break;
case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -1287,11 +1284,10 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
case MachineRepresentation::kWord32:
opcode = kMipsUsw;
break;
+ case MachineRepresentation::kSimd128:
+ opcode = kMipsMsaSt;
+ break;
case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -1340,9 +1336,6 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -1683,6 +1676,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 9 + sw.value_range;
size_t table_time_cost = 3;
size_t lookup_space_cost = 2 + 2 * sw.case_count;
@@ -1690,7 +1684,8 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
if (sw.case_count > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min()) {
+ sw.min_value > std::numeric_limits<int32_t>::min() &&
+ sw.value_range <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
if (sw.min_value) {
index_operand = g.TempRegister();
@@ -1941,316 +1936,348 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
UNREACHABLE();
}
-void InstructionSelector::VisitI32x4Splat(Node* node) {
- VisitRR(this, kMipsI32x4Splat, node);
-}
-
-void InstructionSelector::VisitI32x4ExtractLane(Node* node) {
- VisitRRI(this, kMipsI32x4ExtractLane, node);
-}
-
-void InstructionSelector::VisitI32x4ReplaceLane(Node* node) {
- VisitRRIR(this, kMipsI32x4ReplaceLane, node);
-}
-
-void InstructionSelector::VisitI32x4Add(Node* node) {
- VisitRRR(this, kMipsI32x4Add, node);
-}
-
-void InstructionSelector::VisitI32x4Sub(Node* node) {
- VisitRRR(this, kMipsI32x4Sub, node);
-}
+#define SIMD_TYPE_LIST(V) \
+ V(F32x4) \
+ V(I32x4) \
+ V(I16x8) \
+ V(I8x16)
+
+#define SIMD_FORMAT_LIST(V) \
+ V(32x4) \
+ V(16x8) \
+ V(8x16)
+
+#define SIMD_UNOP_LIST(V) \
+ V(F32x4SConvertI32x4, kMipsF32x4SConvertI32x4) \
+ V(F32x4UConvertI32x4, kMipsF32x4UConvertI32x4) \
+ V(F32x4Abs, kMipsF32x4Abs) \
+ V(F32x4Neg, kMipsF32x4Neg) \
+ V(F32x4RecipApprox, kMipsF32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox, kMipsF32x4RecipSqrtApprox) \
+ V(I32x4SConvertF32x4, kMipsI32x4SConvertF32x4) \
+ V(I32x4UConvertF32x4, kMipsI32x4UConvertF32x4) \
+ V(I32x4Neg, kMipsI32x4Neg) \
+ V(I32x4SConvertI16x8Low, kMipsI32x4SConvertI16x8Low) \
+ V(I32x4SConvertI16x8High, kMipsI32x4SConvertI16x8High) \
+ V(I32x4UConvertI16x8Low, kMipsI32x4UConvertI16x8Low) \
+ V(I32x4UConvertI16x8High, kMipsI32x4UConvertI16x8High) \
+ V(I16x8Neg, kMipsI16x8Neg) \
+ V(I16x8SConvertI8x16Low, kMipsI16x8SConvertI8x16Low) \
+ V(I16x8SConvertI8x16High, kMipsI16x8SConvertI8x16High) \
+ V(I16x8UConvertI8x16Low, kMipsI16x8UConvertI8x16Low) \
+ V(I16x8UConvertI8x16High, kMipsI16x8UConvertI8x16High) \
+ V(I8x16Neg, kMipsI8x16Neg) \
+ V(S128Not, kMipsS128Not) \
+ V(S1x4AnyTrue, kMipsS1x4AnyTrue) \
+ V(S1x4AllTrue, kMipsS1x4AllTrue) \
+ V(S1x8AnyTrue, kMipsS1x8AnyTrue) \
+ V(S1x8AllTrue, kMipsS1x8AllTrue) \
+ V(S1x16AnyTrue, kMipsS1x16AnyTrue) \
+ V(S1x16AllTrue, kMipsS1x16AllTrue)
+
+#define SIMD_SHIFT_OP_LIST(V) \
+ V(I32x4Shl) \
+ V(I32x4ShrS) \
+ V(I32x4ShrU) \
+ V(I16x8Shl) \
+ V(I16x8ShrS) \
+ V(I16x8ShrU) \
+ V(I8x16Shl) \
+ V(I8x16ShrS) \
+ V(I8x16ShrU)
+
+#define SIMD_BINOP_LIST(V) \
+ V(F32x4Add, kMipsF32x4Add) \
+ V(F32x4AddHoriz, kMipsF32x4AddHoriz) \
+ V(F32x4Sub, kMipsF32x4Sub) \
+ V(F32x4Mul, kMipsF32x4Mul) \
+ V(F32x4Max, kMipsF32x4Max) \
+ V(F32x4Min, kMipsF32x4Min) \
+ V(F32x4Eq, kMipsF32x4Eq) \
+ V(F32x4Ne, kMipsF32x4Ne) \
+ V(F32x4Lt, kMipsF32x4Lt) \
+ V(F32x4Le, kMipsF32x4Le) \
+ V(I32x4Add, kMipsI32x4Add) \
+ V(I32x4AddHoriz, kMipsI32x4AddHoriz) \
+ V(I32x4Sub, kMipsI32x4Sub) \
+ V(I32x4Mul, kMipsI32x4Mul) \
+ V(I32x4MaxS, kMipsI32x4MaxS) \
+ V(I32x4MinS, kMipsI32x4MinS) \
+ V(I32x4MaxU, kMipsI32x4MaxU) \
+ V(I32x4MinU, kMipsI32x4MinU) \
+ V(I32x4Eq, kMipsI32x4Eq) \
+ V(I32x4Ne, kMipsI32x4Ne) \
+ V(I32x4GtS, kMipsI32x4GtS) \
+ V(I32x4GeS, kMipsI32x4GeS) \
+ V(I32x4GtU, kMipsI32x4GtU) \
+ V(I32x4GeU, kMipsI32x4GeU) \
+ V(I16x8Add, kMipsI16x8Add) \
+ V(I16x8AddSaturateS, kMipsI16x8AddSaturateS) \
+ V(I16x8AddSaturateU, kMipsI16x8AddSaturateU) \
+ V(I16x8AddHoriz, kMipsI16x8AddHoriz) \
+ V(I16x8Sub, kMipsI16x8Sub) \
+ V(I16x8SubSaturateS, kMipsI16x8SubSaturateS) \
+ V(I16x8SubSaturateU, kMipsI16x8SubSaturateU) \
+ V(I16x8Mul, kMipsI16x8Mul) \
+ V(I16x8MaxS, kMipsI16x8MaxS) \
+ V(I16x8MinS, kMipsI16x8MinS) \
+ V(I16x8MaxU, kMipsI16x8MaxU) \
+ V(I16x8MinU, kMipsI16x8MinU) \
+ V(I16x8Eq, kMipsI16x8Eq) \
+ V(I16x8Ne, kMipsI16x8Ne) \
+ V(I16x8GtS, kMipsI16x8GtS) \
+ V(I16x8GeS, kMipsI16x8GeS) \
+ V(I16x8GtU, kMipsI16x8GtU) \
+ V(I16x8GeU, kMipsI16x8GeU) \
+ V(I16x8SConvertI32x4, kMipsI16x8SConvertI32x4) \
+ V(I16x8UConvertI32x4, kMipsI16x8UConvertI32x4) \
+ V(I8x16Add, kMipsI8x16Add) \
+ V(I8x16AddSaturateS, kMipsI8x16AddSaturateS) \
+ V(I8x16AddSaturateU, kMipsI8x16AddSaturateU) \
+ V(I8x16Sub, kMipsI8x16Sub) \
+ V(I8x16SubSaturateS, kMipsI8x16SubSaturateS) \
+ V(I8x16SubSaturateU, kMipsI8x16SubSaturateU) \
+ V(I8x16Mul, kMipsI8x16Mul) \
+ V(I8x16MaxS, kMipsI8x16MaxS) \
+ V(I8x16MinS, kMipsI8x16MinS) \
+ V(I8x16MaxU, kMipsI8x16MaxU) \
+ V(I8x16MinU, kMipsI8x16MinU) \
+ V(I8x16Eq, kMipsI8x16Eq) \
+ V(I8x16Ne, kMipsI8x16Ne) \
+ V(I8x16GtS, kMipsI8x16GtS) \
+ V(I8x16GeS, kMipsI8x16GeS) \
+ V(I8x16GtU, kMipsI8x16GtU) \
+ V(I8x16GeU, kMipsI8x16GeU) \
+ V(I8x16SConvertI16x8, kMipsI8x16SConvertI16x8) \
+ V(I8x16UConvertI16x8, kMipsI8x16UConvertI16x8) \
+ V(S128And, kMipsS128And) \
+ V(S128Or, kMipsS128Or) \
+ V(S128Xor, kMipsS128Xor)
void InstructionSelector::VisitS128Zero(Node* node) {
MipsOperandGenerator g(this);
Emit(kMipsS128Zero, g.DefineSameAsFirst(node));
}
-void InstructionSelector::VisitS1x4Zero(Node* node) {
- MipsOperandGenerator g(this);
- Emit(kMipsS128Zero, g.DefineSameAsFirst(node));
-}
-
-void InstructionSelector::VisitS1x8Zero(Node* node) {
- MipsOperandGenerator g(this);
- Emit(kMipsS128Zero, g.DefineSameAsFirst(node));
-}
-
-void InstructionSelector::VisitS1x16Zero(Node* node) {
- MipsOperandGenerator g(this);
- Emit(kMipsS128Zero, g.DefineSameAsFirst(node));
-}
-
-void InstructionSelector::VisitF32x4Splat(Node* node) {
- VisitRR(this, kMipsF32x4Splat, node);
-}
-
-void InstructionSelector::VisitF32x4ExtractLane(Node* node) {
- VisitRRI(this, kMipsF32x4ExtractLane, node);
-}
-
-void InstructionSelector::VisitF32x4ReplaceLane(Node* node) {
- VisitRRIR(this, kMipsF32x4ReplaceLane, node);
-}
-
-void InstructionSelector::VisitF32x4SConvertI32x4(Node* node) {
- VisitRR(this, kMipsF32x4SConvertI32x4, node);
-}
-
-void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
- VisitRR(this, kMipsF32x4UConvertI32x4, node);
-}
-
-void InstructionSelector::VisitI32x4Mul(Node* node) {
- VisitRRR(this, kMipsI32x4Mul, node);
-}
-
-void InstructionSelector::VisitI32x4MaxS(Node* node) {
- VisitRRR(this, kMipsI32x4MaxS, node);
-}
-
-void InstructionSelector::VisitI32x4MinS(Node* node) {
- VisitRRR(this, kMipsI32x4MinS, node);
-}
-
-void InstructionSelector::VisitI32x4Eq(Node* node) {
- VisitRRR(this, kMipsI32x4Eq, node);
-}
-
-void InstructionSelector::VisitI32x4Ne(Node* node) {
- VisitRRR(this, kMipsI32x4Ne, node);
-}
-
-void InstructionSelector::VisitI32x4Shl(Node* node) {
- VisitRRI(this, kMipsI32x4Shl, node);
-}
-
-void InstructionSelector::VisitI32x4ShrS(Node* node) {
- VisitRRI(this, kMipsI32x4ShrS, node);
-}
-
-void InstructionSelector::VisitI32x4ShrU(Node* node) {
- VisitRRI(this, kMipsI32x4ShrU, node);
-}
-
-void InstructionSelector::VisitI32x4MaxU(Node* node) {
- VisitRRR(this, kMipsI32x4MaxU, node);
-}
-
-void InstructionSelector::VisitI32x4MinU(Node* node) {
- VisitRRR(this, kMipsI32x4MinU, node);
-}
-
-void InstructionSelector::VisitS32x4Select(Node* node) {
- VisitRRRR(this, kMipsS32x4Select, node);
-}
-
-void InstructionSelector::VisitF32x4Abs(Node* node) {
- VisitRR(this, kMipsF32x4Abs, node);
-}
-
-void InstructionSelector::VisitF32x4Neg(Node* node) {
- VisitRR(this, kMipsF32x4Neg, node);
-}
-
-void InstructionSelector::VisitF32x4RecipApprox(Node* node) {
- VisitRR(this, kMipsF32x4RecipApprox, node);
-}
-
-void InstructionSelector::VisitF32x4RecipSqrtApprox(Node* node) {
- VisitRR(this, kMipsF32x4RecipSqrtApprox, node);
-}
-
-void InstructionSelector::VisitF32x4Add(Node* node) {
- VisitRRR(this, kMipsF32x4Add, node);
-}
-
-void InstructionSelector::VisitF32x4Sub(Node* node) {
- VisitRRR(this, kMipsF32x4Sub, node);
-}
-
-void InstructionSelector::VisitF32x4Mul(Node* node) {
- VisitRRR(this, kMipsF32x4Mul, node);
-}
-
-void InstructionSelector::VisitF32x4Max(Node* node) {
- VisitRRR(this, kMipsF32x4Max, node);
-}
-
-void InstructionSelector::VisitF32x4Min(Node* node) {
- VisitRRR(this, kMipsF32x4Min, node);
-}
-
-void InstructionSelector::VisitF32x4Eq(Node* node) {
- VisitRRR(this, kMipsF32x4Eq, node);
-}
-
-void InstructionSelector::VisitF32x4Ne(Node* node) {
- VisitRRR(this, kMipsF32x4Ne, node);
-}
-
-void InstructionSelector::VisitF32x4Lt(Node* node) {
- VisitRRR(this, kMipsF32x4Lt, node);
-}
-
-void InstructionSelector::VisitF32x4Le(Node* node) {
- VisitRRR(this, kMipsF32x4Le, node);
-}
-
-void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
- VisitRR(this, kMipsI32x4SConvertF32x4, node);
-}
-
-void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
- VisitRR(this, kMipsI32x4UConvertF32x4, node);
-}
-
-void InstructionSelector::VisitI32x4Neg(Node* node) {
- VisitRR(this, kMipsI32x4Neg, node);
-}
-
-void InstructionSelector::VisitI32x4LtS(Node* node) {
- VisitRRR(this, kMipsI32x4LtS, node);
-}
-
-void InstructionSelector::VisitI32x4LeS(Node* node) {
- VisitRRR(this, kMipsI32x4LeS, node);
-}
-
-void InstructionSelector::VisitI32x4LtU(Node* node) {
- VisitRRR(this, kMipsI32x4LtU, node);
-}
-
-void InstructionSelector::VisitI32x4LeU(Node* node) {
- VisitRRR(this, kMipsI32x4LeU, node);
-}
-
-void InstructionSelector::VisitI16x8Splat(Node* node) {
- VisitRR(this, kMipsI16x8Splat, node);
-}
-
-void InstructionSelector::VisitI16x8ExtractLane(Node* node) {
- VisitRRI(this, kMipsI16x8ExtractLane, node);
-}
-
-void InstructionSelector::VisitI16x8ReplaceLane(Node* node) {
- VisitRRIR(this, kMipsI16x8ReplaceLane, node);
-}
-
-void InstructionSelector::VisitI16x8Neg(Node* node) {
- VisitRR(this, kMipsI16x8Neg, node);
-}
-
-void InstructionSelector::VisitI16x8Shl(Node* node) {
- VisitRRI(this, kMipsI16x8Shl, node);
-}
-
-void InstructionSelector::VisitI16x8ShrS(Node* node) {
- VisitRRI(this, kMipsI16x8ShrS, node);
-}
-
-void InstructionSelector::VisitI16x8ShrU(Node* node) {
- VisitRRI(this, kMipsI16x8ShrU, node);
-}
-
-void InstructionSelector::VisitI16x8Add(Node* node) {
- VisitRRR(this, kMipsI16x8Add, node);
-}
-
-void InstructionSelector::VisitI16x8AddSaturateS(Node* node) {
- VisitRRR(this, kMipsI16x8AddSaturateS, node);
-}
-
-void InstructionSelector::VisitI16x8Sub(Node* node) {
- VisitRRR(this, kMipsI16x8Sub, node);
-}
-
-void InstructionSelector::VisitI16x8SubSaturateS(Node* node) {
- VisitRRR(this, kMipsI16x8SubSaturateS, node);
-}
-
-void InstructionSelector::VisitI16x8Mul(Node* node) {
- VisitRRR(this, kMipsI16x8Mul, node);
-}
-
-void InstructionSelector::VisitI16x8MaxS(Node* node) {
- VisitRRR(this, kMipsI16x8MaxS, node);
-}
-
-void InstructionSelector::VisitI16x8MinS(Node* node) {
- VisitRRR(this, kMipsI16x8MinS, node);
-}
-
-void InstructionSelector::VisitI16x8Eq(Node* node) {
- VisitRRR(this, kMipsI16x8Eq, node);
-}
-
-void InstructionSelector::VisitI16x8Ne(Node* node) {
- VisitRRR(this, kMipsI16x8Ne, node);
-}
-
-void InstructionSelector::VisitI16x8LtS(Node* node) {
- VisitRRR(this, kMipsI16x8LtS, node);
-}
+#define SIMD_VISIT_SPLAT(Type) \
+ void InstructionSelector::Visit##Type##Splat(Node* node) { \
+ VisitRR(this, kMips##Type##Splat, node); \
+ }
+SIMD_TYPE_LIST(SIMD_VISIT_SPLAT)
+#undef SIMD_VISIT_SPLAT
-void InstructionSelector::VisitI16x8LeS(Node* node) {
- VisitRRR(this, kMipsI16x8LeS, node);
-}
+#define SIMD_VISIT_EXTRACT_LANE(Type) \
+ void InstructionSelector::Visit##Type##ExtractLane(Node* node) { \
+ VisitRRI(this, kMips##Type##ExtractLane, node); \
+ }
+SIMD_TYPE_LIST(SIMD_VISIT_EXTRACT_LANE)
+#undef SIMD_VISIT_EXTRACT_LANE
-void InstructionSelector::VisitI16x8AddSaturateU(Node* node) {
- VisitRRR(this, kMipsI16x8AddSaturateU, node);
-}
+#define SIMD_VISIT_REPLACE_LANE(Type) \
+ void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
+ VisitRRIR(this, kMips##Type##ReplaceLane, node); \
+ }
+SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
+#undef SIMD_VISIT_REPLACE_LANE
-void InstructionSelector::VisitI16x8SubSaturateU(Node* node) {
- VisitRRR(this, kMipsI16x8SubSaturateU, node);
-}
+#define SIMD_VISIT_UNOP(Name, instruction) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRR(this, instruction, node); \
+ }
+SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
+#undef SIMD_VISIT_UNOP
-void InstructionSelector::VisitI16x8MaxU(Node* node) {
- VisitRRR(this, kMipsI16x8MaxU, node);
-}
+#define SIMD_VISIT_SHIFT_OP(Name) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRRI(this, kMips##Name, node); \
+ }
+SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
+#undef SIMD_VISIT_SHIFT_OP
-void InstructionSelector::VisitI16x8MinU(Node* node) {
- VisitRRR(this, kMipsI16x8MinU, node);
-}
+#define SIMD_VISIT_BINOP(Name, instruction) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRRR(this, instruction, node); \
+ }
+SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
+#undef SIMD_VISIT_BINOP
-void InstructionSelector::VisitI16x8LtU(Node* node) {
- VisitRRR(this, kMipsI16x8LtU, node);
+void InstructionSelector::VisitS128Select(Node* node) {
+ VisitRRRR(this, kMipsS128Select, node);
}
-void InstructionSelector::VisitI16x8LeU(Node* node) {
- VisitRRR(this, kMipsI16x8LeU, node);
-}
+namespace {
-void InstructionSelector::VisitI8x16Splat(Node* node) {
- VisitRR(this, kMipsI8x16Splat, node);
+// Tries to match 8x16 byte shuffle to equivalent 32x4 word shuffle.
+bool TryMatch32x4Shuffle(const uint8_t* shuffle, uint8_t* shuffle32x4) {
+ static const int kLanes = 4;
+ static const int kLaneSize = 4;
+ for (int i = 0; i < kLanes; ++i) {
+ if (shuffle[i * kLaneSize] % kLaneSize != 0) return false;
+ for (int j = 1; j < kLaneSize; ++j) {
+ if (shuffle[i * kLaneSize + j] - shuffle[i * kLaneSize + j - 1] != 1)
+ return false;
+ }
+ shuffle32x4[i] = shuffle[i * kLaneSize] / kLaneSize;
+ }
+ return true;
}
-void InstructionSelector::VisitI8x16ExtractLane(Node* node) {
- VisitRRI(this, kMipsI8x16ExtractLane, node);
+// Tries to match byte shuffle to concatenate (sldi) operation.
+bool TryMatchConcat(const uint8_t* shuffle, uint8_t mask, uint8_t* offset) {
+ uint8_t start = shuffle[0];
+ for (int i = 1; i < kSimd128Size - start; ++i) {
+ if ((shuffle[i] & mask) != ((shuffle[i - 1] + 1) & mask)) return false;
+ }
+ uint8_t wrap = kSimd128Size;
+ for (int i = kSimd128Size - start; i < kSimd128Size; ++i, ++wrap) {
+ if ((shuffle[i] & mask) != (wrap & mask)) return false;
+ }
+ *offset = start;
+ return true;
}
-void InstructionSelector::VisitI8x16ReplaceLane(Node* node) {
- VisitRRIR(this, kMipsI8x16ReplaceLane, node);
-}
+struct ShuffleEntry {
+ uint8_t shuffle[kSimd128Size];
+ ArchOpcode opcode;
+};
-void InstructionSelector::VisitI8x16Neg(Node* node) {
- VisitRR(this, kMipsI8x16Neg, node);
+static const ShuffleEntry arch_shuffles[] = {
+ {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
+ kMipsS32x4InterleaveRight},
+ {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
+ kMipsS32x4InterleaveLeft},
+ {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
+ kMipsS32x4PackEven},
+ {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
+ kMipsS32x4PackOdd},
+ {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
+ kMipsS32x4InterleaveEven},
+ {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31},
+ kMipsS32x4InterleaveOdd},
+
+ {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
+ kMipsS16x8InterleaveRight},
+ {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
+ kMipsS16x8InterleaveLeft},
+ {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
+ kMipsS16x8PackEven},
+ {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
+ kMipsS16x8PackOdd},
+ {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
+ kMipsS16x8InterleaveEven},
+ {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
+ kMipsS16x8InterleaveOdd},
+ {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9}, kMipsS16x4Reverse},
+ {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13}, kMipsS16x2Reverse},
+
+ {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
+ kMipsS8x16InterleaveRight},
+ {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
+ kMipsS8x16InterleaveLeft},
+ {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
+ kMipsS8x16PackEven},
+ {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
+ kMipsS8x16PackOdd},
+ {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
+ kMipsS8x16InterleaveEven},
+ {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
+ kMipsS8x16InterleaveOdd},
+ {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}, kMipsS8x8Reverse},
+ {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}, kMipsS8x4Reverse},
+ {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14}, kMipsS8x2Reverse}};
+
+bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
+ size_t num_entries, uint8_t mask, ArchOpcode* opcode) {
+ for (size_t i = 0; i < num_entries; ++i) {
+ const ShuffleEntry& entry = table[i];
+ int j = 0;
+ for (; j < kSimd128Size; ++j) {
+ if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
+ break;
+ }
+ }
+ if (j == kSimd128Size) {
+ *opcode = entry.opcode;
+ return true;
+ }
+ }
+ return false;
}
-void InstructionSelector::VisitI8x16Shl(Node* node) {
- VisitRRI(this, kMipsI8x16Shl, node);
+// Canonicalize shuffles to make pattern matching simpler. Returns a mask that
+// will ignore the high bit of indices in some cases.
+uint8_t CanonicalizeShuffle(InstructionSelector* selector, Node* node) {
+ static const int kUnaryShuffleMask = kSimd128Size - 1;
+ const uint8_t* shuffle = OpParameter<uint8_t*>(node);
+ uint8_t mask = 0xff;
+ // If shuffle is unary, set 'mask' to ignore the high bit of the indices.
+ // Replace any unused source with the other.
+ if (selector->GetVirtualRegister(node->InputAt(0)) ==
+ selector->GetVirtualRegister(node->InputAt(1))) {
+ // unary, src0 == src1.
+ mask = kUnaryShuffleMask;
+ } else {
+ bool src0_is_used = false;
+ bool src1_is_used = false;
+ for (int i = 0; i < kSimd128Size; i++) {
+ if (shuffle[i] < kSimd128Size) {
+ src0_is_used = true;
+ } else {
+ src1_is_used = true;
+ }
+ }
+ if (src0_is_used && !src1_is_used) {
+ node->ReplaceInput(1, node->InputAt(0));
+ mask = kUnaryShuffleMask;
+ } else if (src1_is_used && !src0_is_used) {
+ node->ReplaceInput(0, node->InputAt(1));
+ mask = kUnaryShuffleMask;
+ }
+ }
+ return mask;
}
-void InstructionSelector::VisitI8x16ShrS(Node* node) {
- VisitRRI(this, kMipsI8x16ShrS, node);
+int32_t Pack4Lanes(const uint8_t* shuffle, uint8_t mask) {
+ int32_t result = 0;
+ for (int i = 3; i >= 0; --i) {
+ result <<= 8;
+ result |= shuffle[i] & mask;
+ }
+ return result;
}
-void InstructionSelector::VisitS16x8Select(Node* node) {
- VisitRRRR(this, kMipsS16x8Select, node);
-}
+} // namespace
-void InstructionSelector::VisitS8x16Select(Node* node) {
- VisitRRRR(this, kMipsS8x16Select, node);
+void InstructionSelector::VisitS8x16Shuffle(Node* node) {
+ const uint8_t* shuffle = OpParameter<uint8_t*>(node);
+ uint8_t mask = CanonicalizeShuffle(this, node);
+ uint8_t shuffle32x4[4];
+ ArchOpcode opcode;
+ if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
+ mask, &opcode)) {
+ VisitRRR(this, opcode, node);
+ return;
+ }
+ uint8_t offset;
+ MipsOperandGenerator g(this);
+ if (TryMatchConcat(shuffle, mask, &offset)) {
+ Emit(kMipsS8x16Concat, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)),
+ g.UseImmediate(offset));
+ return;
+ }
+ if (TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
+ Emit(kMipsS32x4Shuffle, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
+ g.UseImmediate(Pack4Lanes(shuffle32x4, mask)));
+ return;
+ }
+ Emit(kMipsS8x16Shuffle, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
+ g.UseImmediate(Pack4Lanes(shuffle, mask)),
+ g.UseImmediate(Pack4Lanes(shuffle + 4, mask)),
+ g.UseImmediate(Pack4Lanes(shuffle + 8, mask)),
+ g.UseImmediate(Pack4Lanes(shuffle + 12, mask)));
}
// static
diff --git a/deps/v8/src/compiler/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
index f4fb71d989..b9957732dc 100644
--- a/deps/v8/src/compiler/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
@@ -14,8 +14,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-#define __ masm()->
-
+#define __ tasm()->
// TODO(plind): Possibly avoid using these lithium names.
#define kScratchReg kLithiumScratchReg
@@ -81,11 +80,9 @@ class MipsOperandConverter final : public InstructionOperandConverter {
case Constant::kInt64:
return Operand(constant.ToInt64());
case Constant::kFloat32:
- return Operand(
- isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
+ return Operand::EmbeddedNumber(constant.ToFloat32());
case Constant::kFloat64:
- return Operand(
- isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+ return Operand::EmbeddedNumber(constant.ToFloat64().value());
case Constant::kExternalReference:
case Constant::kHeapObject:
// TODO(plind): Maybe we should handle ExtRef & HeapObj here?
@@ -96,7 +93,6 @@ class MipsOperandConverter final : public InstructionOperandConverter {
break;
}
UNREACHABLE();
- return Operand(zero_reg);
}
Operand InputOperand(size_t index) {
@@ -120,7 +116,6 @@ class MipsOperandConverter final : public InstructionOperandConverter {
UNREACHABLE();
}
UNREACHABLE();
- return MemOperand(no_reg);
}
MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); }
@@ -233,7 +228,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
- must_save_lr_(!gen->frame_access_state()->has_frame()) {}
+ must_save_lr_(!gen->frame_access_state()->has_frame()),
+ zone_(gen->zone()) {}
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -251,10 +247,10 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
// We need to save and restore ra if the frame was elided.
__ Push(ra);
}
- RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
- remembered_set_action, save_fp_mode);
__ Daddu(scratch1_, object_, index_);
- __ CallStub(&stub);
+ __ CallStubDelayed(
+ new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
+ remembered_set_action, save_fp_mode));
if (must_save_lr_) {
__ Pop(ra);
}
@@ -268,15 +264,16 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch1_;
RecordWriteMode const mode_;
bool must_save_lr_;
+ Zone* zone_;
};
-#define CREATE_OOL_CLASS(ool_name, masm_ool_name, T) \
+#define CREATE_OOL_CLASS(ool_name, tasm_ool_name, T) \
class ool_name final : public OutOfLineCode { \
public: \
ool_name(CodeGenerator* gen, T dst, T src1, T src2) \
: OutOfLineCode(gen), dst_(dst), src1_(src1), src2_(src2) {} \
\
- void Generate() final { __ masm_ool_name(dst_, src1_, src2_); } \
+ void Generate() final { __ tasm_ool_name(dst_, src1_, src2_); } \
\
private: \
T const dst_; \
@@ -320,7 +317,6 @@ Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
break;
}
UNREACHABLE();
- return kNoCondition;
}
@@ -334,7 +330,6 @@ Condition FlagsConditionToConditionTst(FlagsCondition condition) {
break;
}
UNREACHABLE();
- return kNoCondition;
}
@@ -348,7 +343,6 @@ Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
break;
}
UNREACHABLE();
- return kNoCondition;
}
@@ -382,30 +376,29 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
break;
}
UNREACHABLE();
- return kNoFPUCondition;
}
} // namespace
-#define ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, length, out_of_bounds) \
- do { \
- if (!length.is_reg() && base::bits::IsPowerOfTwo64(length.immediate())) { \
- __ And(kScratchReg, offset, Operand(~(length.immediate() - 1))); \
- __ Branch(USE_DELAY_SLOT, out_of_bounds, ne, kScratchReg, \
- Operand(zero_reg)); \
- } else { \
- __ Branch(USE_DELAY_SLOT, out_of_bounds, hs, offset, length); \
- } \
+#define ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, length, out_of_bounds) \
+ do { \
+ if (!length.is_reg() && base::bits::IsPowerOfTwo(length.immediate())) { \
+ __ And(kScratchReg, offset, Operand(~(length.immediate() - 1))); \
+ __ Branch(USE_DELAY_SLOT, out_of_bounds, ne, kScratchReg, \
+ Operand(zero_reg)); \
+ } else { \
+ __ Branch(USE_DELAY_SLOT, out_of_bounds, hs, offset, length); \
+ } \
} while (0)
-#define ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, length, out_of_bounds) \
- do { \
- if (!length.is_reg() && base::bits::IsPowerOfTwo64(length.immediate())) { \
- __ Or(kScratchReg, zero_reg, Operand(offset)); \
- __ And(kScratchReg, kScratchReg, Operand(~(length.immediate() - 1))); \
- __ Branch(out_of_bounds, ne, kScratchReg, Operand(zero_reg)); \
- } else { \
- __ Branch(out_of_bounds, ls, length.rm(), Operand(offset)); \
- } \
+#define ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, length, out_of_bounds) \
+ do { \
+ if (!length.is_reg() && base::bits::IsPowerOfTwo(length.immediate())) { \
+ __ Or(kScratchReg, zero_reg, Operand(offset)); \
+ __ And(kScratchReg, kScratchReg, Operand(~(length.immediate() - 1))); \
+ __ Branch(out_of_bounds, ne, kScratchReg, Operand(zero_reg)); \
+ } else { \
+ __ Branch(out_of_bounds, ls, length.rm(), Operand(offset)); \
+ } \
} while (0)
#define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr) \
@@ -553,27 +546,29 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
__ sync(); \
} while (0)
-#define ASSEMBLE_IEEE754_BINOP(name) \
- do { \
- FrameScope scope(masm(), StackFrame::MANUAL); \
- __ PrepareCallCFunction(0, 2, kScratchReg); \
- __ MovToFloatParameters(i.InputDoubleRegister(0), \
- i.InputDoubleRegister(1)); \
- __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
- 0, 2); \
- /* Move the result in the double result register. */ \
- __ MovFromFloatResult(i.OutputDoubleRegister()); \
+#define ASSEMBLE_IEEE754_BINOP(name) \
+ do { \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 2, kScratchReg); \
+ __ MovToFloatParameters(i.InputDoubleRegister(0), \
+ i.InputDoubleRegister(1)); \
+ __ CallCFunction( \
+ ExternalReference::ieee754_##name##_function(tasm()->isolate()), 0, \
+ 2); \
+ /* Move the result in the double result register. */ \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
} while (0)
-#define ASSEMBLE_IEEE754_UNOP(name) \
- do { \
- FrameScope scope(masm(), StackFrame::MANUAL); \
- __ PrepareCallCFunction(0, 1, kScratchReg); \
- __ MovToFloatParameter(i.InputDoubleRegister(0)); \
- __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
- 0, 1); \
- /* Move the result in the double result register. */ \
- __ MovFromFloatResult(i.OutputDoubleRegister()); \
+#define ASSEMBLE_IEEE754_UNOP(name) \
+ do { \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 1, kScratchReg); \
+ __ MovToFloatParameter(i.InputDoubleRegister(0)); \
+ __ CallCFunction( \
+ ExternalReference::ieee754_##name##_function(tasm()->isolate()), 0, \
+ 1); \
+ /* Move the result in the double result register. */ \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
} while (0)
void CodeGenerator::AssembleDeconstructFrame() {
@@ -616,7 +611,7 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
namespace {
-void AdjustStackPointerForTailCall(MacroAssembler* masm,
+void AdjustStackPointerForTailCall(TurboAssembler* tasm,
FrameAccessState* state,
int new_slot_above_sp,
bool allow_shrinkage = true) {
@@ -624,10 +619,10 @@ void AdjustStackPointerForTailCall(MacroAssembler* masm,
StandardFrameConstants::kFixedSlotCountAboveFp;
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
- masm->Dsubu(sp, sp, stack_slot_delta * kPointerSize);
+ tasm->Dsubu(sp, sp, stack_slot_delta * kPointerSize);
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
- masm->Daddu(sp, sp, -stack_slot_delta * kPointerSize);
+ tasm->Daddu(sp, sp, -stack_slot_delta * kPointerSize);
state->IncreaseSPDelta(stack_slot_delta);
}
}
@@ -636,13 +631,13 @@ void AdjustStackPointerForTailCall(MacroAssembler* masm,
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
int first_unused_stack_slot) {
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_stack_slot) {
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot);
}
@@ -656,8 +651,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchCallCodeObject: {
EnsureSpaceForLazyDeopt();
if (instr->InputAt(0)->IsImmediate()) {
- __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
- RelocInfo::CODE_TARGET);
+ __ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
__ daddiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
__ Call(at);
@@ -674,8 +668,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.TempRegister(2));
}
if (instr->InputAt(0)->IsImmediate()) {
- __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
- RelocInfo::CODE_TARGET);
+ __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
__ daddiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
__ Jump(at);
@@ -790,7 +783,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kArchTruncateDoubleToI:
- __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+ __ TruncateDoubleToIDelayed(zone(), i.OutputRegister(),
+ i.InputDoubleRegister(0));
break;
case kArchStoreWithWriteBarrier: {
RecordWriteMode mode =
@@ -893,8 +887,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_IEEE754_UNOP(log10);
break;
case kIeee754Float64Pow: {
- MathPowStub stub(isolate(), MathPowStub::DOUBLE);
- __ CallStub(&stub);
+ __ CallStubDelayed(new (zone())
+ MathPowStub(nullptr, MathPowStub::DOUBLE));
break;
}
case kIeee754Float64Sin:
@@ -1064,140 +1058,126 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ dclz(i.OutputRegister(), i.InputRegister(0));
break;
case kMips64Ctz: {
- Register reg1 = kScratchReg;
- Register reg2 = kScratchReg2;
- Label skip_for_zero;
- Label end;
- // Branch if the operand is zero
- __ Branch(&skip_for_zero, eq, i.InputRegister(0), Operand(zero_reg));
- // Find the number of bits before the last bit set to 1.
- __ Subu(reg2, zero_reg, i.InputRegister(0));
- __ And(reg2, reg2, i.InputRegister(0));
- __ clz(reg2, reg2);
- // Get the number of bits after the last bit set to 1.
- __ li(reg1, 0x1F);
- __ Subu(i.OutputRegister(), reg1, reg2);
- __ Branch(&end);
- __ bind(&skip_for_zero);
- // If the operand is zero, return word length as the result.
- __ li(i.OutputRegister(), 0x20);
- __ bind(&end);
+ Register src = i.InputRegister(0);
+ Register dst = i.OutputRegister();
+ if (kArchVariant == kMips64r6) {
+ // We don't have an instruction to count the number of trailing zeroes.
+ // Start by flipping the bits end-for-end so we can count the number of
+ // leading zeroes instead.
+ __ rotr(dst, src, 16);
+ __ wsbh(dst, dst);
+ __ bitswap(dst, dst);
+ __ Clz(dst, dst);
+ } else {
+ // Convert trailing zeroes to trailing ones, and bits to their left
+ // to zeroes.
+ __ Daddu(kScratchReg, src, -1);
+ __ Xor(dst, kScratchReg, src);
+ __ And(dst, dst, kScratchReg);
+ // Count number of leading zeroes.
+ __ Clz(dst, dst);
+ // Subtract number of leading zeroes from 32 to get number of trailing
+ // ones. Remember that the trailing ones were formerly trailing zeroes.
+ __ li(kScratchReg, 32);
+ __ Subu(dst, kScratchReg, dst);
+ }
} break;
case kMips64Dctz: {
- Register reg1 = kScratchReg;
- Register reg2 = kScratchReg2;
- Label skip_for_zero;
- Label end;
- // Branch if the operand is zero
- __ Branch(&skip_for_zero, eq, i.InputRegister(0), Operand(zero_reg));
- // Find the number of bits before the last bit set to 1.
- __ Dsubu(reg2, zero_reg, i.InputRegister(0));
- __ And(reg2, reg2, i.InputRegister(0));
- __ dclz(reg2, reg2);
- // Get the number of bits after the last bit set to 1.
- __ li(reg1, 0x3F);
- __ Subu(i.OutputRegister(), reg1, reg2);
- __ Branch(&end);
- __ bind(&skip_for_zero);
- // If the operand is zero, return word length as the result.
- __ li(i.OutputRegister(), 0x40);
- __ bind(&end);
+ Register src = i.InputRegister(0);
+ Register dst = i.OutputRegister();
+ if (kArchVariant == kMips64r6) {
+ // We don't have an instruction to count the number of trailing zeroes.
+ // Start by flipping the bits end-for-end so we can count the number of
+ // leading zeroes instead.
+ __ dsbh(dst, src);
+ __ dshd(dst, dst);
+ __ dbitswap(dst, dst);
+ __ dclz(dst, dst);
+ } else {
+ // Convert trailing zeroes to trailing ones, and bits to their left
+ // to zeroes.
+ __ Daddu(kScratchReg, src, -1);
+ __ Xor(dst, kScratchReg, src);
+ __ And(dst, dst, kScratchReg);
+ // Count number of leading zeroes.
+ __ dclz(dst, dst);
+ // Subtract number of leading zeroes from 64 to get number of trailing
+ // ones. Remember that the trailing ones were formerly trailing zeroes.
+ __ li(kScratchReg, 64);
+ __ Dsubu(dst, kScratchReg, dst);
+ }
} break;
case kMips64Popcnt: {
- Register reg1 = kScratchReg;
- Register reg2 = kScratchReg2;
- uint32_t m1 = 0x55555555;
- uint32_t m2 = 0x33333333;
- uint32_t m4 = 0x0f0f0f0f;
- uint32_t m8 = 0x00ff00ff;
- uint32_t m16 = 0x0000ffff;
-
- // Put count of ones in every 2 bits into those 2 bits.
- __ li(at, m1);
- __ dsrl(reg1, i.InputRegister(0), 1);
- __ And(reg2, i.InputRegister(0), at);
- __ And(reg1, reg1, at);
- __ Daddu(reg1, reg1, reg2);
-
- // Put count of ones in every 4 bits into those 4 bits.
- __ li(at, m2);
- __ dsrl(reg2, reg1, 2);
- __ And(reg2, reg2, at);
- __ And(reg1, reg1, at);
- __ Daddu(reg1, reg1, reg2);
-
- // Put count of ones in every 8 bits into those 8 bits.
- __ li(at, m4);
- __ dsrl(reg2, reg1, 4);
- __ And(reg2, reg2, at);
- __ And(reg1, reg1, at);
- __ Daddu(reg1, reg1, reg2);
-
- // Put count of ones in every 16 bits into those 16 bits.
- __ li(at, m8);
- __ dsrl(reg2, reg1, 8);
- __ And(reg2, reg2, at);
- __ And(reg1, reg1, at);
- __ Daddu(reg1, reg1, reg2);
-
- // Calculate total number of ones.
- __ li(at, m16);
- __ dsrl(reg2, reg1, 16);
- __ And(reg2, reg2, at);
- __ And(reg1, reg1, at);
- __ Daddu(i.OutputRegister(), reg1, reg2);
+ // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
+ //
+ // A generalization of the best bit counting method to integers of
+ // bit-widths up to 128 (parameterized by type T) is this:
+ //
+ // v = v - ((v >> 1) & (T)~(T)0/3); // temp
+ // v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3); // temp
+ // v = (v + (v >> 4)) & (T)~(T)0/255*15; // temp
+ // c = (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * BITS_PER_BYTE; //count
+ //
+ // For comparison, for 32-bit quantities, this algorithm can be executed
+ // using 20 MIPS instructions (the calls to LoadConst32() generate two
+ // machine instructions each for the values being used in this algorithm).
+ // A(n unrolled) loop-based algorithm requires 25 instructions.
+ //
+ // For a 64-bit operand this can be performed in 24 instructions compared
+ // to a(n unrolled) loop based algorithm which requires 38 instructions.
+ //
+ // There are algorithms which are faster in the cases where very few
+ // bits are set but the algorithm here attempts to minimize the total
+ // number of instructions executed even when a large number of bits
+ // are set.
+ Register src = i.InputRegister(0);
+ Register dst = i.OutputRegister();
+ uint32_t B0 = 0x55555555; // (T)~(T)0/3
+ uint32_t B1 = 0x33333333; // (T)~(T)0/15*3
+ uint32_t B2 = 0x0f0f0f0f; // (T)~(T)0/255*15
+ uint32_t value = 0x01010101; // (T)~(T)0/255
+ uint32_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
+ __ srl(kScratchReg, src, 1);
+ __ li(kScratchReg2, B0);
+ __ And(kScratchReg, kScratchReg, kScratchReg2);
+ __ Subu(kScratchReg, src, kScratchReg);
+ __ li(kScratchReg2, B1);
+ __ And(dst, kScratchReg, kScratchReg2);
+ __ srl(kScratchReg, kScratchReg, 2);
+ __ And(kScratchReg, kScratchReg, kScratchReg2);
+ __ Addu(kScratchReg, dst, kScratchReg);
+ __ srl(dst, kScratchReg, 4);
+ __ Addu(dst, dst, kScratchReg);
+ __ li(kScratchReg2, B2);
+ __ And(dst, dst, kScratchReg2);
+ __ li(kScratchReg, value);
+ __ Mul(dst, dst, kScratchReg);
+ __ srl(dst, dst, shift);
} break;
case kMips64Dpopcnt: {
- Register reg1 = kScratchReg;
- Register reg2 = kScratchReg2;
- uint64_t m1 = 0x5555555555555555;
- uint64_t m2 = 0x3333333333333333;
- uint64_t m4 = 0x0f0f0f0f0f0f0f0f;
- uint64_t m8 = 0x00ff00ff00ff00ff;
- uint64_t m16 = 0x0000ffff0000ffff;
- uint64_t m32 = 0x00000000ffffffff;
-
- // Put count of ones in every 2 bits into those 2 bits.
- __ li(at, m1);
- __ dsrl(reg1, i.InputRegister(0), 1);
- __ and_(reg2, i.InputRegister(0), at);
- __ and_(reg1, reg1, at);
- __ Daddu(reg1, reg1, reg2);
-
- // Put count of ones in every 4 bits into those 4 bits.
- __ li(at, m2);
- __ dsrl(reg2, reg1, 2);
- __ and_(reg2, reg2, at);
- __ and_(reg1, reg1, at);
- __ Daddu(reg1, reg1, reg2);
-
- // Put count of ones in every 8 bits into those 8 bits.
- __ li(at, m4);
- __ dsrl(reg2, reg1, 4);
- __ and_(reg2, reg2, at);
- __ and_(reg1, reg1, at);
- __ Daddu(reg1, reg1, reg2);
-
- // Put count of ones in every 16 bits into those 16 bits.
- __ li(at, m8);
- __ dsrl(reg2, reg1, 8);
- __ and_(reg2, reg2, at);
- __ and_(reg1, reg1, at);
- __ Daddu(reg1, reg1, reg2);
-
- // Put count of ones in every 32 bits into those 32 bits.
- __ li(at, m16);
- __ dsrl(reg2, reg1, 16);
- __ and_(reg2, reg2, at);
- __ and_(reg1, reg1, at);
- __ Daddu(reg1, reg1, reg2);
-
- // Calculate total number of ones.
- __ li(at, m32);
- __ dsrl32(reg2, reg1, 0);
- __ and_(reg2, reg2, at);
- __ and_(reg1, reg1, at);
- __ Daddu(i.OutputRegister(), reg1, reg2);
+ Register src = i.InputRegister(0);
+ Register dst = i.OutputRegister();
+ uint64_t B0 = 0x5555555555555555l; // (T)~(T)0/3
+ uint64_t B1 = 0x3333333333333333l; // (T)~(T)0/15*3
+ uint64_t B2 = 0x0f0f0f0f0f0f0f0fl; // (T)~(T)0/255*15
+ uint64_t value = 0x0101010101010101l; // (T)~(T)0/255
+ uint64_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
+ __ dsrl(kScratchReg, src, 1);
+ __ li(kScratchReg2, B0);
+ __ And(kScratchReg, kScratchReg, kScratchReg2);
+ __ Dsubu(kScratchReg, src, kScratchReg);
+ __ li(kScratchReg2, B1);
+ __ And(dst, kScratchReg, kScratchReg2);
+ __ dsrl(kScratchReg, kScratchReg, 2);
+ __ And(kScratchReg, kScratchReg, kScratchReg2);
+ __ Daddu(kScratchReg, dst, kScratchReg);
+ __ dsrl(dst, kScratchReg, 4);
+ __ Daddu(dst, dst, kScratchReg);
+ __ li(kScratchReg2, B2);
+ __ And(dst, dst, kScratchReg2);
+ __ li(kScratchReg, value);
+ __ Dmul(dst, dst, kScratchReg);
+ __ dsrl32(dst, dst, shift);
} break;
case kMips64Shl:
if (instr->InputAt(1)->IsRegister()) {
@@ -1341,13 +1321,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMips64ModS: {
// TODO(bmeurer): We should really get rid of this special instruction,
// and generate a CallAddress instruction instead.
- FrameScope scope(masm(), StackFrame::MANUAL);
+ FrameScope scope(tasm(), StackFrame::MANUAL);
__ PrepareCallCFunction(0, 2, kScratchReg);
__ MovToFloatParameters(i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
// TODO(balazs.kilvady): implement mod_two_floats_operation(isolate())
- __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
- 0, 2);
+ __ CallCFunction(
+ ExternalReference::mod_two_doubles_operation(tasm()->isolate()), 0,
+ 2);
// Move the result in the double result register.
__ MovFromFloatResult(i.OutputSingleRegister());
break;
@@ -1382,26 +1363,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
- case kMips64MaddS:
- __ Madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
- i.InputFloatRegister(1), i.InputFloatRegister(2),
- kScratchDoubleReg);
- break;
- case kMips64MaddD:
- __ Madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1), i.InputDoubleRegister(2),
- kScratchDoubleReg);
- break;
- case kMips64MsubS:
- __ Msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
- i.InputFloatRegister(1), i.InputFloatRegister(2),
- kScratchDoubleReg);
- break;
- case kMips64MsubD:
- __ Msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1), i.InputDoubleRegister(2),
- kScratchDoubleReg);
- break;
case kMips64MulD:
// TODO(plind): add special case: right op is -1.0, see arm port.
__ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@@ -1414,12 +1375,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMips64ModD: {
// TODO(bmeurer): We should really get rid of this special instruction,
// and generate a CallAddress instruction instead.
- FrameScope scope(masm(), StackFrame::MANUAL);
+ FrameScope scope(tasm(), StackFrame::MANUAL);
__ PrepareCallCFunction(0, 2, kScratchReg);
__ MovToFloatParameters(i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
- __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
- 0, 2);
+ __ CallCFunction(
+ ExternalReference::mod_two_doubles_operation(tasm()->isolate()), 0,
+ 2);
// Move the result in the double result register.
__ MovFromFloatResult(i.OutputDoubleRegister());
break;
@@ -1974,24 +1936,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputRegister(0), Operand(i.InputRegister(1)));
break;
case kMips64S128Zero: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ xor_v(i.OutputSimd128Register(), i.OutputSimd128Register(),
i.OutputSimd128Register());
break;
}
case kMips64I32x4Splat: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_w(i.OutputSimd128Register(), i.InputRegister(0));
break;
}
case kMips64I32x4ExtractLane: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ copy_s_w(i.OutputRegister(), i.InputSimd128Register(0),
i.InputInt8(1));
break;
}
case kMips64I32x4ReplaceLane: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
if (!src.is(dst)) {
@@ -2001,31 +1963,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I32x4Add: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ addv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I32x4Sub: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ subv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Splat: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ FmoveLow(kScratchReg, i.InputSingleRegister(0));
__ fill_w(i.OutputSimd128Register(), kScratchReg);
break;
}
case kMips64F32x4ExtractLane: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ copy_u_w(kScratchReg, i.InputSimd128Register(0), i.InputInt8(1));
__ FmoveLow(i.OutputSingleRegister(), kScratchReg);
break;
}
case kMips64F32x4ReplaceLane: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
if (!src.is(dst)) {
@@ -2036,213 +1998,211 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64F32x4SConvertI32x4: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ ffint_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMips64F32x4UConvertI32x4: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ ffint_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMips64I32x4Mul: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ mulv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I32x4MaxS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ max_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I32x4MinS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ min_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I32x4Eq: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ ceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I32x4Ne: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
__ ceq_w(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
__ nor_v(dst, dst, dst);
break;
}
case kMips64I32x4Shl: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ slli_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt5(1));
break;
}
case kMips64I32x4ShrS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ srai_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt5(1));
break;
}
case kMips64I32x4ShrU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ srli_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt5(1));
break;
}
case kMips64I32x4MaxU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ max_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I32x4MinU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ min_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kMips64S32x4Select:
- case kMips64S16x8Select:
- case kMips64S8x16Select: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ case kMips64S128Select: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
DCHECK(i.OutputSimd128Register().is(i.InputSimd128Register(0)));
__ bsel_v(i.OutputSimd128Register(), i.InputSimd128Register(2),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Abs: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ bclri_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
break;
}
case kMips64F32x4Neg: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ bnegi_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
break;
}
case kMips64F32x4RecipApprox: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ frcp_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMips64F32x4RecipSqrtApprox: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ frsqrt_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMips64F32x4Add: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fadd_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Sub: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fsub_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Mul: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fmul_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Max: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fmax_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Min: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fmin_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Eq: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Ne: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fcne_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Lt: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fclt_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Le: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fcle_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I32x4SConvertF32x4: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ ftrunc_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMips64I32x4UConvertF32x4: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ ftrunc_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMips64I32x4Neg: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ subv_w(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
- case kMips64I32x4LtS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
- __ clt_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kMips64I32x4GtS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ clt_s_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
- case kMips64I32x4LeS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
- __ cle_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kMips64I32x4GeS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ cle_s_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
- case kMips64I32x4LtU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
- __ clt_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kMips64I32x4GtU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ clt_u_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
- case kMips64I32x4LeU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
- __ cle_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kMips64I32x4GeU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ cle_u_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
case kMips64I16x8Splat: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_h(i.OutputSimd128Register(), i.InputRegister(0));
break;
}
case kMips64I16x8ExtractLane: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ copy_s_h(i.OutputRegister(), i.InputSimd128Register(0),
i.InputInt8(1));
break;
}
case kMips64I16x8ReplaceLane: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
if (!src.is(dst)) {
@@ -2252,146 +2212,146 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I16x8Neg: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ subv_h(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
case kMips64I16x8Shl: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ slli_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt4(1));
break;
}
case kMips64I16x8ShrS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ srai_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt4(1));
break;
}
case kMips64I16x8ShrU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ srli_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt4(1));
break;
}
case kMips64I16x8Add: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ addv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I16x8AddSaturateS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ adds_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I16x8Sub: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ subv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I16x8SubSaturateS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ subs_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I16x8Mul: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ mulv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I16x8MaxS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ max_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I16x8MinS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ min_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I16x8Eq: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ ceq_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I16x8Ne: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
__ ceq_h(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
__ nor_v(dst, dst, dst);
break;
}
- case kMips64I16x8LtS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
- __ clt_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kMips64I16x8GtS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ clt_s_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
- case kMips64I16x8LeS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
- __ cle_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kMips64I16x8GeS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ cle_s_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
case kMips64I16x8AddSaturateU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ adds_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I16x8SubSaturateU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ subs_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I16x8MaxU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ max_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I16x8MinU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ min_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kMips64I16x8LtU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
- __ clt_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kMips64I16x8GtU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ clt_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
- case kMips64I16x8LeU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
- __ cle_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kMips64I16x8GeU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ cle_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
case kMips64I8x16Splat: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_b(i.OutputSimd128Register(), i.InputRegister(0));
break;
}
case kMips64I8x16ExtractLane: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ copy_s_b(i.OutputRegister(), i.InputSimd128Register(0),
i.InputInt8(1));
break;
}
case kMips64I8x16ReplaceLane: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
if (!src.is(dst)) {
@@ -2401,24 +2361,636 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I8x16Neg: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ subv_b(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
case kMips64I8x16Shl: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ slli_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt3(1));
break;
}
case kMips64I8x16ShrS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ srai_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt3(1));
break;
}
+ case kMips64I8x16Add: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ addv_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I8x16AddSaturateS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ adds_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I8x16Sub: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ subv_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I8x16SubSaturateS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ subs_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I8x16Mul: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ mulv_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I8x16MaxS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ max_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I8x16MinS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ min_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I8x16Eq: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ ceq_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I8x16Ne: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ ceq_b(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
+ __ nor_v(dst, dst, dst);
+ break;
+ }
+ case kMips64I8x16GtS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ clt_s_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64I8x16GeS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ cle_s_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64I8x16ShrU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ srli_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt3(1));
+ break;
+ }
+ case kMips64I8x16AddSaturateU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ adds_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I8x16SubSaturateU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ subs_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I8x16MaxU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ max_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I8x16MinU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ min_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I8x16GtU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ clt_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64I8x16GeU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ cle_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64S128And: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ and_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64S128Or: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ or_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64S128Xor: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64S128Not: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ nor_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64S1x4AnyTrue:
+ case kMips64S1x8AnyTrue:
+ case kMips64S1x16AnyTrue: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Register dst = i.OutputRegister();
+ Label all_false;
+ __ BranchMSA(&all_false, MSA_BRANCH_V, all_zero,
+ i.InputSimd128Register(0), USE_DELAY_SLOT);
+ __ li(dst, 0); // branch delay slot
+ __ li(dst, -1);
+ __ bind(&all_false);
+ break;
+ }
+ case kMips64S1x4AllTrue: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Register dst = i.OutputRegister();
+ Label all_true;
+ __ BranchMSA(&all_true, MSA_BRANCH_W, all_not_zero,
+ i.InputSimd128Register(0), USE_DELAY_SLOT);
+ __ li(dst, -1); // branch delay slot
+ __ li(dst, 0);
+ __ bind(&all_true);
+ break;
+ }
+ case kMips64S1x8AllTrue: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Register dst = i.OutputRegister();
+ Label all_true;
+ __ BranchMSA(&all_true, MSA_BRANCH_H, all_not_zero,
+ i.InputSimd128Register(0), USE_DELAY_SLOT);
+ __ li(dst, -1); // branch delay slot
+ __ li(dst, 0);
+ __ bind(&all_true);
+ break;
+ }
+ case kMips64S1x16AllTrue: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Register dst = i.OutputRegister();
+ Label all_true;
+ __ BranchMSA(&all_true, MSA_BRANCH_B, all_not_zero,
+ i.InputSimd128Register(0), USE_DELAY_SLOT);
+ __ li(dst, -1); // branch delay slot
+ __ li(dst, 0);
+ __ bind(&all_true);
+ break;
+ }
+ case kMips64MsaLd: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ ld_b(i.OutputSimd128Register(), i.MemoryOperand());
+ break;
+ }
+ case kMips64MsaSt: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ st_b(i.InputSimd128Register(2), i.MemoryOperand());
+ break;
+ }
+ case kMips64S32x4InterleaveRight: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
+ // dst = [5, 1, 4, 0]
+ __ ilvr_w(dst, src1, src0);
+ break;
+ }
+ case kMips64S32x4InterleaveLeft: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
+ // dst = [7, 3, 6, 2]
+ __ ilvl_w(dst, src1, src0);
+ break;
+ }
+ case kMips64S32x4PackEven: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
+ // dst = [6, 4, 2, 0]
+ __ pckev_w(dst, src1, src0);
+ break;
+ }
+ case kMips64S32x4PackOdd: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
+ // dst = [7, 5, 3, 1]
+ __ pckod_w(dst, src1, src0);
+ break;
+ }
+ case kMips64S32x4InterleaveEven: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
+ // dst = [6, 2, 4, 0]
+ __ ilvev_w(dst, src1, src0);
+ break;
+ }
+ case kMips64S32x4InterleaveOdd: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
+ // dst = [7, 3, 5, 1]
+ __ ilvod_w(dst, src1, src0);
+ break;
+ }
+ case kMips64S32x4Shuffle: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+
+ int32_t shuffle = i.InputInt32(2);
+
+ if (src0.is(src1)) {
+ // Unary S32x4 shuffles are handled with shf.w instruction
+ uint32_t i8 = 0;
+ for (int i = 0; i < 4; i++) {
+ int lane = shuffle & 0xff;
+ DCHECK(lane < 4);
+ i8 |= lane << (2 * i);
+ shuffle >>= 8;
+ }
+ __ shf_w(dst, src0, i8);
+ } else {
+ // For binary shuffles use vshf.w instruction
+ if (dst.is(src0)) {
+ __ move_v(kSimd128ScratchReg, src0);
+ src0 = kSimd128ScratchReg;
+ } else if (dst.is(src1)) {
+ __ move_v(kSimd128ScratchReg, src1);
+ src1 = kSimd128ScratchReg;
+ }
+
+ __ li(kScratchReg, i.InputInt32(2));
+ __ insert_w(dst, 0, kScratchReg);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvr_b(dst, kSimd128RegZero, dst);
+ __ ilvr_h(dst, kSimd128RegZero, dst);
+ __ vshf_w(dst, src1, src0);
+ }
+ break;
+ }
+ case kMips64S16x8InterleaveRight: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
+ // dst = [11, 3, 10, 2, 9, 1, 8, 0]
+ __ ilvr_h(dst, src1, src0);
+ break;
+ }
+ case kMips64S16x8InterleaveLeft: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
+ // dst = [15, 7, 14, 6, 13, 5, 12, 4]
+ __ ilvl_h(dst, src1, src0);
+ break;
+ }
+ case kMips64S16x8PackEven: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
+ // dst = [14, 12, 10, 8, 6, 4, 2, 0]
+ __ pckev_h(dst, src1, src0);
+ break;
+ }
+ case kMips64S16x8PackOdd: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
+ // dst = [15, 13, 11, 9, 7, 5, 3, 1]
+ __ pckod_h(dst, src1, src0);
+ break;
+ }
+ case kMips64S16x8InterleaveEven: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
+ // dst = [14, 6, 12, 4, 10, 2, 8, 0]
+ __ ilvev_h(dst, src1, src0);
+ break;
+ }
+ case kMips64S16x8InterleaveOdd: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
+ // dst = [15, 7, ... 11, 3, 9, 1]
+ __ ilvod_h(dst, src1, src0);
+ break;
+ }
+ case kMips64S16x4Reverse: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ // src = [7, 6, 5, 4, 3, 2, 1, 0], dst = [4, 5, 6, 7, 0, 1, 2, 3]
+ // shf.df imm field: 0 1 2 3 = 00011011 = 0x1B
+ __ shf_h(i.OutputSimd128Register(), i.InputSimd128Register(0), 0x1B);
+ break;
+ }
+ case kMips64S16x2Reverse: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ // src = [7, 6, 5, 4, 3, 2, 1, 0], dst = [6, 7, 4, 5, 3, 2, 0, 1]
+ // shf.df imm field: 2 3 0 1 = 10110001 = 0xB1
+ __ shf_h(i.OutputSimd128Register(), i.InputSimd128Register(0), 0xB1);
+ break;
+ }
+ case kMips64S8x16InterleaveRight: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
+ // dst = [23, 7, ... 17, 1, 16, 0]
+ __ ilvr_b(dst, src1, src0);
+ break;
+ }
+ case kMips64S8x16InterleaveLeft: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
+ // dst = [31, 15, ... 25, 9, 24, 8]
+ __ ilvl_b(dst, src1, src0);
+ break;
+ }
+ case kMips64S8x16PackEven: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
+ // dst = [30, 28, ... 6, 4, 2, 0]
+ __ pckev_b(dst, src1, src0);
+ break;
+ }
+ case kMips64S8x16PackOdd: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
+ // dst = [31, 29, ... 7, 5, 3, 1]
+ __ pckod_b(dst, src1, src0);
+ break;
+ }
+ case kMips64S8x16InterleaveEven: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
+ // dst = [30, 14, ... 18, 2, 16, 0]
+ __ ilvev_b(dst, src1, src0);
+ break;
+ }
+ case kMips64S8x16InterleaveOdd: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
+ // dst = [31, 15, ... 19, 3, 17, 1]
+ __ ilvod_b(dst, src1, src0);
+ break;
+ }
+ case kMips64S8x16Concat: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ __ sldi_b(dst, i.InputSimd128Register(1), i.InputInt4(2));
+ break;
+ }
+ case kMips64S8x16Shuffle: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+
+ if (dst.is(src0)) {
+ __ move_v(kSimd128ScratchReg, src0);
+ src0 = kSimd128ScratchReg;
+ } else if (dst.is(src1)) {
+ __ move_v(kSimd128ScratchReg, src1);
+ src1 = kSimd128ScratchReg;
+ }
+
+ int64_t control_low =
+ static_cast<int64_t>(i.InputInt32(3)) << 32 | i.InputInt32(2);
+ int64_t control_hi =
+ static_cast<int64_t>(i.InputInt32(5)) << 32 | i.InputInt32(4);
+ __ li(kScratchReg, control_low);
+ __ insert_d(dst, 0, kScratchReg);
+ __ li(kScratchReg, control_hi);
+ __ insert_d(dst, 1, kScratchReg);
+ __ vshf_b(dst, src1, src0);
+ break;
+ }
+ case kMips64S8x8Reverse: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ // src = [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
+ // dst = [8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7]
+ // [A B C D] => [B A D C]: shf.w imm: 2 3 0 1 = 10110001 = 0xB1
+ // C: [7, 6, 5, 4] => A': [4, 5, 6, 7]: shf.b imm: 00011011 = 0x1B
+ __ shf_w(kSimd128ScratchReg, i.InputSimd128Register(0), 0xB1);
+ __ shf_b(i.OutputSimd128Register(), kSimd128ScratchReg, 0x1B);
+ break;
+ }
+ case kMips64S8x4Reverse: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ // src = [15, 14, ... 3, 2, 1, 0], dst = [12, 13, 14, 15, ... 0, 1, 2, 3]
+ // shf.df imm field: 0 1 2 3 = 00011011 = 0x1B
+ __ shf_b(i.OutputSimd128Register(), i.InputSimd128Register(0), 0x1B);
+ break;
+ }
+ case kMips64S8x2Reverse: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ // src = [15, 14, ... 3, 2, 1, 0], dst = [14, 15, 12, 13, ... 2, 3, 0, 1]
+ // shf.df imm field: 2 3 0 1 = 10110001 = 0xB1
+ __ shf_b(i.OutputSimd128Register(), i.InputSimd128Register(0), 0xB1);
+ break;
+ }
+ case kMips64I32x4SConvertI16x8Low: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ ilvr_h(kSimd128ScratchReg, src, src);
+ __ slli_w(dst, kSimd128ScratchReg, 16);
+ __ srai_w(dst, dst, 16);
+ break;
+ }
+ case kMips64I32x4SConvertI16x8High: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ ilvl_h(kSimd128ScratchReg, src, src);
+ __ slli_w(dst, kSimd128ScratchReg, 16);
+ __ srai_w(dst, dst, 16);
+ break;
+ }
+ case kMips64I32x4UConvertI16x8Low: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvr_h(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64I32x4UConvertI16x8High: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvl_h(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64I16x8SConvertI8x16Low: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ ilvr_b(kSimd128ScratchReg, src, src);
+ __ slli_h(dst, kSimd128ScratchReg, 8);
+ __ srai_h(dst, dst, 8);
+ break;
+ }
+ case kMips64I16x8SConvertI8x16High: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ ilvl_b(kSimd128ScratchReg, src, src);
+ __ slli_h(dst, kSimd128ScratchReg, 8);
+ __ srai_h(dst, dst, 8);
+ break;
+ }
+ case kMips64I16x8SConvertI32x4: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ __ sat_s_w(kSimd128ScratchReg, src0, 15);
+ __ sat_s_w(kSimd128RegZero, src1, 15); // kSimd128RegZero as scratch
+ __ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg);
+ break;
+ }
+ case kMips64I16x8UConvertI32x4: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ __ sat_u_w(kSimd128ScratchReg, src0, 15);
+ __ sat_u_w(kSimd128RegZero, src1, 15); // kSimd128RegZero as scratch
+ __ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg);
+ break;
+ }
+ case kMips64I16x8UConvertI8x16Low: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvr_b(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64I16x8UConvertI8x16High: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvl_b(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64I8x16SConvertI16x8: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ __ sat_s_h(kSimd128ScratchReg, src0, 7);
+ __ sat_s_h(kSimd128RegZero, src1, 7); // kSimd128RegZero as scratch
+ __ pckev_b(dst, kSimd128RegZero, kSimd128ScratchReg);
+ break;
+ }
+ case kMips64I8x16UConvertI16x8: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ __ sat_u_h(kSimd128ScratchReg, src0, 7);
+ __ sat_u_h(kSimd128RegZero, src1, 7); // kSimd128RegZero as scratch
+ __ pckev_b(dst, kSimd128RegZero, kSimd128ScratchReg);
+ break;
+ }
+ case kMips64F32x4AddHoriz: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ shf_w(kSimd128ScratchReg, src0, 0xB1); // 2 3 0 1 : 10110001 : 0xB1
+ __ shf_w(kSimd128RegZero, src1, 0xB1); // kSimd128RegZero as scratch
+ __ fadd_w(kSimd128ScratchReg, kSimd128ScratchReg, src0);
+ __ fadd_w(kSimd128RegZero, kSimd128RegZero, src1);
+ __ pckev_w(dst, kSimd128RegZero, kSimd128ScratchReg);
+ break;
+ }
+ case kMips64I32x4AddHoriz: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ hadd_s_d(kSimd128ScratchReg, src0, src0);
+ __ hadd_s_d(kSimd128RegZero, src1, src1); // kSimd128RegZero as scratch
+ __ pckev_w(dst, kSimd128RegZero, kSimd128ScratchReg);
+ break;
+ }
+ case kMips64I16x8AddHoriz: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ hadd_s_w(kSimd128ScratchReg, src0, src0);
+ __ hadd_s_w(kSimd128RegZero, src1, src1); // kSimd128RegZero as scratch
+ __ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg);
+ break;
+ }
}
return kSuccess;
} // NOLINT(readability/fn_size)
@@ -2455,11 +3027,11 @@ static bool convertCondition(FlagsCondition condition, Condition& cc) {
return false;
}
-void AssembleBranchToLabels(CodeGenerator* gen, MacroAssembler* masm,
+void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
Instruction* instr, FlagsCondition condition,
Label* tlabel, Label* flabel, bool fallthru) {
#undef __
-#define __ masm->
+#define __ tasm->
MipsOperandConverter i(gen, instr);
Condition cc = kNoCondition;
@@ -2554,7 +3126,7 @@ void AssembleBranchToLabels(CodeGenerator* gen, MacroAssembler* masm,
}
if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
#undef __
-#define __ masm()->
+#define __ tasm()->
}
// Assembles branches after an instruction.
@@ -2562,7 +3134,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
Label* tlabel = branch->true_label;
Label* flabel = branch->false_label;
- AssembleBranchToLabels(this, masm(), instr, branch->condition, tlabel, flabel,
+ AssembleBranchToLabels(this, tasm(), instr, branch->condition, tlabel, flabel,
branch->fallthru);
}
@@ -2603,14 +3175,14 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
// We use the context register as the scratch register, because we do
// not have a context here.
__ PrepareCallCFunction(0, 0, cp);
- __ CallCFunction(
- ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
- 0);
+ __ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(
+ tasm()->isolate()),
+ 0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
__ Ret();
} else {
gen_->AssembleSourcePosition(instr_);
- __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+ __ Call(tasm()->isolate()->builtins()->builtin_handle(trap_id),
RelocInfo::CODE_TARGET);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
@@ -2628,7 +3200,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
bool frame_elided = !frame_access_state()->has_frame();
auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
Label* tlabel = ool->entry();
- AssembleBranchToLabels(this, masm(), instr, condition, tlabel, nullptr, true);
+ AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true);
}
// Assembles boolean materializations after an instruction.
@@ -2650,7 +3222,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
if (instr->arch_opcode() == kMips64Tst) {
cc = FlagsConditionToConditionTst(condition);
if (instr->InputAt(1)->IsImmediate() &&
- base::bits::IsPowerOfTwo64(i.InputOperand(1).immediate())) {
+ base::bits::IsPowerOfTwo(i.InputOperand(1).immediate())) {
uint16_t pos =
base::bits::CountTrailingZeros64(i.InputOperand(1).immediate());
__ Dext(result, i.InputRegister(0), pos, 1);
@@ -2838,9 +3410,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
: Deoptimizer::EAGER;
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- isolate(), deoptimization_id, bailout_type);
+ tasm()->isolate(), deoptimization_id, bailout_type);
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- if (isolate()->NeedsSourcePositionsForProfiling()) {
+ if (info()->is_source_positions_enabled()) {
__ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
}
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
@@ -2895,7 +3467,7 @@ void CodeGenerator::AssembleConstructFrame() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
}
if (shrink_slots > 0) {
@@ -3005,7 +3577,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
break;
case Constant::kFloat32:
- __ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
+ __ li(dst, Operand::EmbeddedNumber(src.ToFloat32()));
break;
case Constant::kInt64:
if (RelocInfo::IsWasmPtrReference(src.rmode())) {
@@ -3016,7 +3588,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
break;
case Constant::kFloat64:
- __ li(dst, isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
+ __ li(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
break;
case Constant::kExternalReference:
__ li(dst, Operand(src.ToExternalReference()));
@@ -3055,7 +3627,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
DoubleRegister dst = destination->IsFPRegister()
? g.ToDoubleRegister(destination)
: kScratchDoubleReg;
- __ Move(dst, src.ToFloat64());
+ __ Move(dst, src.ToFloat64().value());
if (destination->IsFPStackSlot()) {
__ Sdc1(dst, g.ToMemOperand(destination));
}
@@ -3166,11 +3738,11 @@ void CodeGenerator::EnsureSpaceForLazyDeopt() {
int space_needed = Deoptimizer::patch_size();
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
- int current_pc = masm()->pc_offset();
+ int current_pc = tasm()->pc_offset();
if (current_pc < last_lazy_deopt_pc_ + space_needed) {
// Block tramoline pool emission for duration of padding.
v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
- masm());
+ tasm());
int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
while (padding_size > 0) {
diff --git a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
index 02cd4d5852..1b420d3819 100644
--- a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
@@ -85,10 +85,6 @@ namespace compiler {
V(Mips64SqrtD) \
V(Mips64MaxD) \
V(Mips64MinD) \
- V(Mips64MaddS) \
- V(Mips64MaddD) \
- V(Mips64MsubS) \
- V(Mips64MsubD) \
V(Mips64Float64RoundDown) \
V(Mips64Float64RoundTruncate) \
V(Mips64Float64RoundUp) \
@@ -172,6 +168,7 @@ namespace compiler {
V(Mips64I32x4ExtractLane) \
V(Mips64I32x4ReplaceLane) \
V(Mips64I32x4Add) \
+ V(Mips64I32x4AddHoriz) \
V(Mips64I32x4Sub) \
V(Mips64F32x4Splat) \
V(Mips64F32x4ExtractLane) \
@@ -188,12 +185,12 @@ namespace compiler {
V(Mips64I32x4ShrU) \
V(Mips64I32x4MaxU) \
V(Mips64I32x4MinU) \
- V(Mips64S32x4Select) \
V(Mips64F32x4Abs) \
V(Mips64F32x4Neg) \
V(Mips64F32x4RecipApprox) \
V(Mips64F32x4RecipSqrtApprox) \
V(Mips64F32x4Add) \
+ V(Mips64F32x4AddHoriz) \
V(Mips64F32x4Sub) \
V(Mips64F32x4Mul) \
V(Mips64F32x4Max) \
@@ -205,10 +202,10 @@ namespace compiler {
V(Mips64I32x4SConvertF32x4) \
V(Mips64I32x4UConvertF32x4) \
V(Mips64I32x4Neg) \
- V(Mips64I32x4LtS) \
- V(Mips64I32x4LeS) \
- V(Mips64I32x4LtU) \
- V(Mips64I32x4LeU) \
+ V(Mips64I32x4GtS) \
+ V(Mips64I32x4GeS) \
+ V(Mips64I32x4GtU) \
+ V(Mips64I32x4GeU) \
V(Mips64I16x8Splat) \
V(Mips64I16x8ExtractLane) \
V(Mips64I16x8ReplaceLane) \
@@ -218,6 +215,7 @@ namespace compiler {
V(Mips64I16x8ShrU) \
V(Mips64I16x8Add) \
V(Mips64I16x8AddSaturateS) \
+ V(Mips64I16x8AddHoriz) \
V(Mips64I16x8Sub) \
V(Mips64I16x8SubSaturateS) \
V(Mips64I16x8Mul) \
@@ -225,22 +223,89 @@ namespace compiler {
V(Mips64I16x8MinS) \
V(Mips64I16x8Eq) \
V(Mips64I16x8Ne) \
- V(Mips64I16x8LtS) \
- V(Mips64I16x8LeS) \
+ V(Mips64I16x8GtS) \
+ V(Mips64I16x8GeS) \
V(Mips64I16x8AddSaturateU) \
V(Mips64I16x8SubSaturateU) \
V(Mips64I16x8MaxU) \
V(Mips64I16x8MinU) \
- V(Mips64I16x8LtU) \
- V(Mips64I16x8LeU) \
+ V(Mips64I16x8GtU) \
+ V(Mips64I16x8GeU) \
V(Mips64I8x16Splat) \
V(Mips64I8x16ExtractLane) \
V(Mips64I8x16ReplaceLane) \
V(Mips64I8x16Neg) \
V(Mips64I8x16Shl) \
V(Mips64I8x16ShrS) \
- V(Mips64S16x8Select) \
- V(Mips64S8x16Select)
+ V(Mips64I8x16Add) \
+ V(Mips64I8x16AddSaturateS) \
+ V(Mips64I8x16Sub) \
+ V(Mips64I8x16SubSaturateS) \
+ V(Mips64I8x16Mul) \
+ V(Mips64I8x16MaxS) \
+ V(Mips64I8x16MinS) \
+ V(Mips64I8x16Eq) \
+ V(Mips64I8x16Ne) \
+ V(Mips64I8x16GtS) \
+ V(Mips64I8x16GeS) \
+ V(Mips64I8x16ShrU) \
+ V(Mips64I8x16AddSaturateU) \
+ V(Mips64I8x16SubSaturateU) \
+ V(Mips64I8x16MaxU) \
+ V(Mips64I8x16MinU) \
+ V(Mips64I8x16GtU) \
+ V(Mips64I8x16GeU) \
+ V(Mips64S128And) \
+ V(Mips64S128Or) \
+ V(Mips64S128Xor) \
+ V(Mips64S128Not) \
+ V(Mips64S128Select) \
+ V(Mips64S1x4AnyTrue) \
+ V(Mips64S1x4AllTrue) \
+ V(Mips64S1x8AnyTrue) \
+ V(Mips64S1x8AllTrue) \
+ V(Mips64S1x16AnyTrue) \
+ V(Mips64S1x16AllTrue) \
+ V(Mips64S32x4InterleaveRight) \
+ V(Mips64S32x4InterleaveLeft) \
+ V(Mips64S32x4PackEven) \
+ V(Mips64S32x4PackOdd) \
+ V(Mips64S32x4InterleaveEven) \
+ V(Mips64S32x4InterleaveOdd) \
+ V(Mips64S32x4Shuffle) \
+ V(Mips64S16x8InterleaveRight) \
+ V(Mips64S16x8InterleaveLeft) \
+ V(Mips64S16x8PackEven) \
+ V(Mips64S16x8PackOdd) \
+ V(Mips64S16x8InterleaveEven) \
+ V(Mips64S16x8InterleaveOdd) \
+ V(Mips64S16x4Reverse) \
+ V(Mips64S16x2Reverse) \
+ V(Mips64S8x16InterleaveRight) \
+ V(Mips64S8x16InterleaveLeft) \
+ V(Mips64S8x16PackEven) \
+ V(Mips64S8x16PackOdd) \
+ V(Mips64S8x16InterleaveEven) \
+ V(Mips64S8x16InterleaveOdd) \
+ V(Mips64S8x16Shuffle) \
+ V(Mips64S8x16Concat) \
+ V(Mips64S8x8Reverse) \
+ V(Mips64S8x4Reverse) \
+ V(Mips64S8x2Reverse) \
+ V(Mips64MsaLd) \
+ V(Mips64MsaSt) \
+ V(Mips64I32x4SConvertI16x8Low) \
+ V(Mips64I32x4SConvertI16x8High) \
+ V(Mips64I32x4UConvertI16x8Low) \
+ V(Mips64I32x4UConvertI16x8High) \
+ V(Mips64I16x8SConvertI8x16Low) \
+ V(Mips64I16x8SConvertI8x16High) \
+ V(Mips64I16x8SConvertI32x4) \
+ V(Mips64I16x8UConvertI32x4) \
+ V(Mips64I16x8UConvertI8x16Low) \
+ V(Mips64I16x8UConvertI8x16High) \
+ V(Mips64I8x16SConvertI16x8) \
+ V(Mips64I8x16UConvertI16x8)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
index b4664d036a..1f26d5992b 100644
--- a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
@@ -411,10 +411,9 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kWord64:
opcode = kMips64Ld;
break;
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
+ case MachineRepresentation::kSimd128:
+ opcode = kMips64MsaLd;
+ break;
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -491,10 +490,9 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord64:
opcode = kMips64Sd;
break;
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
+ case MachineRepresentation::kSimd128:
+ opcode = kMips64MsaSt;
+ break;
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -981,20 +979,20 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
Mips64OperandGenerator g(this);
Int32BinopMatcher m(node);
if (m.right().HasValue() && m.right().Value() > 0) {
- int32_t value = m.right().Value();
- if (base::bits::IsPowerOfTwo32(value)) {
+ uint32_t value = static_cast<uint32_t>(m.right().Value());
+ if (base::bits::IsPowerOfTwo(value)) {
Emit(kMips64Shl | AddressingModeField::encode(kMode_None),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value)));
return;
}
- if (base::bits::IsPowerOfTwo32(value - 1)) {
+ if (base::bits::IsPowerOfTwo(value - 1)) {
Emit(kMips64Lsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value - 1)));
return;
}
- if (base::bits::IsPowerOfTwo32(value + 1)) {
+ if (base::bits::IsPowerOfTwo(value + 1)) {
InstructionOperand temp = g.TempRegister();
Emit(kMips64Shl | AddressingModeField::encode(kMode_None), temp,
g.UseRegister(m.left().node()),
@@ -1038,21 +1036,21 @@ void InstructionSelector::VisitInt64Mul(Node* node) {
Int64BinopMatcher m(node);
// TODO(dusmil): Add optimization for shifts larger than 32.
if (m.right().HasValue() && m.right().Value() > 0) {
- int32_t value = static_cast<int32_t>(m.right().Value());
- if (base::bits::IsPowerOfTwo32(value)) {
+ uint32_t value = static_cast<uint32_t>(m.right().Value());
+ if (base::bits::IsPowerOfTwo(value)) {
Emit(kMips64Dshl | AddressingModeField::encode(kMode_None),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value)));
return;
}
- if (base::bits::IsPowerOfTwo32(value - 1)) {
+ if (base::bits::IsPowerOfTwo(value - 1)) {
// Dlsa macro will handle the shifting value out of bound cases.
Emit(kMips64Dlsa, g.DefineAsRegister(node),
g.UseRegister(m.left().node()), g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value - 1)));
return;
}
- if (base::bits::IsPowerOfTwo32(value + 1)) {
+ if (base::bits::IsPowerOfTwo(value + 1)) {
InstructionOperand temp = g.TempRegister();
Emit(kMips64Dshl | AddressingModeField::encode(kMode_None), temp,
g.UseRegister(m.left().node()),
@@ -1496,84 +1494,28 @@ void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
void InstructionSelector::VisitFloat32Add(Node* node) {
- Mips64OperandGenerator g(this);
- if (kArchVariant == kMips64r2) { // Select Madd.S(z, x, y).
- Float32BinopMatcher m(node);
- if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
- // For Add.S(Mul.S(x, y), z):
- Float32BinopMatcher mleft(m.left().node());
- Emit(kMips64MaddS, g.DefineAsRegister(node),
- g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
- g.UseRegister(mleft.right().node()));
- return;
- }
- if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
- // For Add.S(x, Mul.S(y, z)):
- Float32BinopMatcher mright(m.right().node());
- Emit(kMips64MaddS, g.DefineAsRegister(node),
- g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
- g.UseRegister(mright.right().node()));
- return;
- }
- }
+ // Optimization with Madd.S(z, x, y) is intentionally removed.
+ // See explanation for madd_s in assembler-mips64.cc.
VisitRRR(this, kMips64AddS, node);
}
void InstructionSelector::VisitFloat64Add(Node* node) {
- Mips64OperandGenerator g(this);
- if (kArchVariant == kMips64r2) { // Select Madd.S(z, x, y).
- Float64BinopMatcher m(node);
- if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
- // For Add.D(Mul.D(x, y), z):
- Float64BinopMatcher mleft(m.left().node());
- Emit(kMips64MaddD, g.DefineAsRegister(node),
- g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
- g.UseRegister(mleft.right().node()));
- return;
- }
- if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
- // For Add.D(x, Mul.D(y, z)):
- Float64BinopMatcher mright(m.right().node());
- Emit(kMips64MaddD, g.DefineAsRegister(node),
- g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
- g.UseRegister(mright.right().node()));
- return;
- }
- }
+ // Optimization with Madd.D(z, x, y) is intentionally removed.
+ // See explanation for madd_d in assembler-mips64.cc.
VisitRRR(this, kMips64AddD, node);
}
void InstructionSelector::VisitFloat32Sub(Node* node) {
- Mips64OperandGenerator g(this);
- if (kArchVariant == kMips64r2) { // Select Madd.S(z, x, y).
- Float32BinopMatcher m(node);
- if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
- // For Sub.S(Mul.S(x,y), z) select Msub.S(z, x, y).
- Float32BinopMatcher mleft(m.left().node());
- Emit(kMips64MsubS, g.DefineAsRegister(node),
- g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
- g.UseRegister(mleft.right().node()));
- return;
- }
- }
+ // Optimization with Msub.S(z, x, y) is intentionally removed.
+ // See explanation for madd_s in assembler-mips64.cc.
VisitRRR(this, kMips64SubS, node);
}
void InstructionSelector::VisitFloat64Sub(Node* node) {
- Mips64OperandGenerator g(this);
- if (kArchVariant == kMips64r2) { // Select Madd.S(z, x, y).
- Float64BinopMatcher m(node);
- if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
- // For Sub.D(Mul.S(x,y), z) select Msub.D(z, x, y).
- Float64BinopMatcher mleft(m.left().node());
- Emit(kMips64MsubD, g.DefineAsRegister(node),
- g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
- g.UseRegister(mleft.right().node()));
- return;
- }
- }
+ // Optimization with Msub.D(z, x, y) is intentionally removed.
+ // See explanation for madd_d in assembler-mips64.cc.
VisitRRR(this, kMips64SubD, node);
}
@@ -1785,10 +1727,9 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
case MachineRepresentation::kWord64:
opcode = kMips64Uld;
break;
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
+ case MachineRepresentation::kSimd128:
+ opcode = kMips64MsaLd;
+ break;
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -1838,10 +1779,9 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
case MachineRepresentation::kWord64:
opcode = kMips64Usd;
break;
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
+ case MachineRepresentation::kSimd128:
+ opcode = kMips64MsaSt;
+ break;
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -1892,9 +1832,6 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged:
case MachineRepresentation::kSimd128:
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -1956,9 +1893,6 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged:
case MachineRepresentation::kSimd128:
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -2387,6 +2321,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 10 + 2 * sw.value_range;
size_t table_time_cost = 3;
size_t lookup_space_cost = 2 + 2 * sw.case_count;
@@ -2394,7 +2329,8 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
if (sw.case_count > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min()) {
+ sw.min_value > std::numeric_limits<int32_t>::min() &&
+ sw.value_range <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
if (sw.min_value) {
index_operand = g.TempRegister();
@@ -2693,316 +2629,353 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
UNREACHABLE();
}
-void InstructionSelector::VisitI32x4Splat(Node* node) {
- VisitRR(this, kMips64I32x4Splat, node);
-}
-
-void InstructionSelector::VisitI32x4ExtractLane(Node* node) {
- VisitRRI(this, kMips64I32x4ExtractLane, node);
-}
-
-void InstructionSelector::VisitI32x4ReplaceLane(Node* node) {
- VisitRRIR(this, kMips64I32x4ReplaceLane, node);
-}
-
-void InstructionSelector::VisitI32x4Add(Node* node) {
- VisitRRR(this, kMips64I32x4Add, node);
-}
-
-void InstructionSelector::VisitI32x4Sub(Node* node) {
- VisitRRR(this, kMips64I32x4Sub, node);
-}
+#define SIMD_TYPE_LIST(V) \
+ V(F32x4) \
+ V(I32x4) \
+ V(I16x8) \
+ V(I8x16)
+
+#define SIMD_FORMAT_LIST(V) \
+ V(32x4) \
+ V(16x8) \
+ V(8x16)
+
+#define SIMD_UNOP_LIST(V) \
+ V(F32x4SConvertI32x4, kMips64F32x4SConvertI32x4) \
+ V(F32x4UConvertI32x4, kMips64F32x4UConvertI32x4) \
+ V(F32x4Abs, kMips64F32x4Abs) \
+ V(F32x4Neg, kMips64F32x4Neg) \
+ V(F32x4RecipApprox, kMips64F32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox, kMips64F32x4RecipSqrtApprox) \
+ V(I32x4SConvertF32x4, kMips64I32x4SConvertF32x4) \
+ V(I32x4UConvertF32x4, kMips64I32x4UConvertF32x4) \
+ V(I32x4Neg, kMips64I32x4Neg) \
+ V(I32x4SConvertI16x8Low, kMips64I32x4SConvertI16x8Low) \
+ V(I32x4SConvertI16x8High, kMips64I32x4SConvertI16x8High) \
+ V(I32x4UConvertI16x8Low, kMips64I32x4UConvertI16x8Low) \
+ V(I32x4UConvertI16x8High, kMips64I32x4UConvertI16x8High) \
+ V(I16x8Neg, kMips64I16x8Neg) \
+ V(I16x8SConvertI8x16Low, kMips64I16x8SConvertI8x16Low) \
+ V(I16x8SConvertI8x16High, kMips64I16x8SConvertI8x16High) \
+ V(I16x8UConvertI8x16Low, kMips64I16x8UConvertI8x16Low) \
+ V(I16x8UConvertI8x16High, kMips64I16x8UConvertI8x16High) \
+ V(I8x16Neg, kMips64I8x16Neg) \
+ V(S128Not, kMips64S128Not) \
+ V(S1x4AnyTrue, kMips64S1x4AnyTrue) \
+ V(S1x4AllTrue, kMips64S1x4AllTrue) \
+ V(S1x8AnyTrue, kMips64S1x8AnyTrue) \
+ V(S1x8AllTrue, kMips64S1x8AllTrue) \
+ V(S1x16AnyTrue, kMips64S1x16AnyTrue) \
+ V(S1x16AllTrue, kMips64S1x16AllTrue)
+
+#define SIMD_SHIFT_OP_LIST(V) \
+ V(I32x4Shl) \
+ V(I32x4ShrS) \
+ V(I32x4ShrU) \
+ V(I16x8Shl) \
+ V(I16x8ShrS) \
+ V(I16x8ShrU) \
+ V(I8x16Shl) \
+ V(I8x16ShrS) \
+ V(I8x16ShrU)
+
+#define SIMD_BINOP_LIST(V) \
+ V(F32x4Add, kMips64F32x4Add) \
+ V(F32x4AddHoriz, kMips64F32x4AddHoriz) \
+ V(F32x4Sub, kMips64F32x4Sub) \
+ V(F32x4Mul, kMips64F32x4Mul) \
+ V(F32x4Max, kMips64F32x4Max) \
+ V(F32x4Min, kMips64F32x4Min) \
+ V(F32x4Eq, kMips64F32x4Eq) \
+ V(F32x4Ne, kMips64F32x4Ne) \
+ V(F32x4Lt, kMips64F32x4Lt) \
+ V(F32x4Le, kMips64F32x4Le) \
+ V(I32x4Add, kMips64I32x4Add) \
+ V(I32x4AddHoriz, kMips64I32x4AddHoriz) \
+ V(I32x4Sub, kMips64I32x4Sub) \
+ V(I32x4Mul, kMips64I32x4Mul) \
+ V(I32x4MaxS, kMips64I32x4MaxS) \
+ V(I32x4MinS, kMips64I32x4MinS) \
+ V(I32x4MaxU, kMips64I32x4MaxU) \
+ V(I32x4MinU, kMips64I32x4MinU) \
+ V(I32x4Eq, kMips64I32x4Eq) \
+ V(I32x4Ne, kMips64I32x4Ne) \
+ V(I32x4GtS, kMips64I32x4GtS) \
+ V(I32x4GeS, kMips64I32x4GeS) \
+ V(I32x4GtU, kMips64I32x4GtU) \
+ V(I32x4GeU, kMips64I32x4GeU) \
+ V(I16x8Add, kMips64I16x8Add) \
+ V(I16x8AddSaturateS, kMips64I16x8AddSaturateS) \
+ V(I16x8AddSaturateU, kMips64I16x8AddSaturateU) \
+ V(I16x8AddHoriz, kMips64I16x8AddHoriz) \
+ V(I16x8Sub, kMips64I16x8Sub) \
+ V(I16x8SubSaturateS, kMips64I16x8SubSaturateS) \
+ V(I16x8SubSaturateU, kMips64I16x8SubSaturateU) \
+ V(I16x8Mul, kMips64I16x8Mul) \
+ V(I16x8MaxS, kMips64I16x8MaxS) \
+ V(I16x8MinS, kMips64I16x8MinS) \
+ V(I16x8MaxU, kMips64I16x8MaxU) \
+ V(I16x8MinU, kMips64I16x8MinU) \
+ V(I16x8Eq, kMips64I16x8Eq) \
+ V(I16x8Ne, kMips64I16x8Ne) \
+ V(I16x8GtS, kMips64I16x8GtS) \
+ V(I16x8GeS, kMips64I16x8GeS) \
+ V(I16x8GtU, kMips64I16x8GtU) \
+ V(I16x8GeU, kMips64I16x8GeU) \
+ V(I16x8SConvertI32x4, kMips64I16x8SConvertI32x4) \
+ V(I16x8UConvertI32x4, kMips64I16x8UConvertI32x4) \
+ V(I8x16Add, kMips64I8x16Add) \
+ V(I8x16AddSaturateS, kMips64I8x16AddSaturateS) \
+ V(I8x16AddSaturateU, kMips64I8x16AddSaturateU) \
+ V(I8x16Sub, kMips64I8x16Sub) \
+ V(I8x16SubSaturateS, kMips64I8x16SubSaturateS) \
+ V(I8x16SubSaturateU, kMips64I8x16SubSaturateU) \
+ V(I8x16Mul, kMips64I8x16Mul) \
+ V(I8x16MaxS, kMips64I8x16MaxS) \
+ V(I8x16MinS, kMips64I8x16MinS) \
+ V(I8x16MaxU, kMips64I8x16MaxU) \
+ V(I8x16MinU, kMips64I8x16MinU) \
+ V(I8x16Eq, kMips64I8x16Eq) \
+ V(I8x16Ne, kMips64I8x16Ne) \
+ V(I8x16GtS, kMips64I8x16GtS) \
+ V(I8x16GeS, kMips64I8x16GeS) \
+ V(I8x16GtU, kMips64I8x16GtU) \
+ V(I8x16GeU, kMips64I8x16GeU) \
+ V(I8x16SConvertI16x8, kMips64I8x16SConvertI16x8) \
+ V(I8x16UConvertI16x8, kMips64I8x16UConvertI16x8) \
+ V(S128And, kMips64S128And) \
+ V(S128Or, kMips64S128Or) \
+ V(S128Xor, kMips64S128Xor)
void InstructionSelector::VisitS128Zero(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64S128Zero, g.DefineSameAsFirst(node));
}
-void InstructionSelector::VisitS1x4Zero(Node* node) {
- Mips64OperandGenerator g(this);
- Emit(kMips64S128Zero, g.DefineSameAsFirst(node));
-}
-
-void InstructionSelector::VisitS1x8Zero(Node* node) {
- Mips64OperandGenerator g(this);
- Emit(kMips64S128Zero, g.DefineSameAsFirst(node));
-}
-
-void InstructionSelector::VisitS1x16Zero(Node* node) {
- Mips64OperandGenerator g(this);
- Emit(kMips64S128Zero, g.DefineSameAsFirst(node));
-}
-
-void InstructionSelector::VisitF32x4Splat(Node* node) {
- VisitRR(this, kMips64F32x4Splat, node);
-}
-
-void InstructionSelector::VisitF32x4ExtractLane(Node* node) {
- VisitRRI(this, kMips64F32x4ExtractLane, node);
-}
-
-void InstructionSelector::VisitF32x4ReplaceLane(Node* node) {
- VisitRRIR(this, kMips64F32x4ReplaceLane, node);
-}
-
-void InstructionSelector::VisitF32x4SConvertI32x4(Node* node) {
- VisitRR(this, kMips64F32x4SConvertI32x4, node);
-}
-
-void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
- VisitRR(this, kMips64F32x4UConvertI32x4, node);
-}
-
-void InstructionSelector::VisitI32x4Mul(Node* node) {
- VisitRRR(this, kMips64I32x4Mul, node);
-}
-
-void InstructionSelector::VisitI32x4MaxS(Node* node) {
- VisitRRR(this, kMips64I32x4MaxS, node);
-}
-
-void InstructionSelector::VisitI32x4MinS(Node* node) {
- VisitRRR(this, kMips64I32x4MinS, node);
-}
-
-void InstructionSelector::VisitI32x4Eq(Node* node) {
- VisitRRR(this, kMips64I32x4Eq, node);
-}
-
-void InstructionSelector::VisitI32x4Ne(Node* node) {
- VisitRRR(this, kMips64I32x4Ne, node);
-}
-
-void InstructionSelector::VisitI32x4Shl(Node* node) {
- VisitRRI(this, kMips64I32x4Shl, node);
-}
-
-void InstructionSelector::VisitI32x4ShrS(Node* node) {
- VisitRRI(this, kMips64I32x4ShrS, node);
-}
-
-void InstructionSelector::VisitI32x4ShrU(Node* node) {
- VisitRRI(this, kMips64I32x4ShrU, node);
-}
-
-void InstructionSelector::VisitI32x4MaxU(Node* node) {
- VisitRRR(this, kMips64I32x4MaxU, node);
-}
-
-void InstructionSelector::VisitI32x4MinU(Node* node) {
- VisitRRR(this, kMips64I32x4MinU, node);
-}
-
-void InstructionSelector::VisitS32x4Select(Node* node) {
- VisitRRRR(this, kMips64S32x4Select, node);
-}
-
-void InstructionSelector::VisitF32x4Abs(Node* node) {
- VisitRR(this, kMips64F32x4Abs, node);
-}
-
-void InstructionSelector::VisitF32x4Neg(Node* node) {
- VisitRR(this, kMips64F32x4Neg, node);
-}
-
-void InstructionSelector::VisitF32x4RecipApprox(Node* node) {
- VisitRR(this, kMips64F32x4RecipApprox, node);
-}
-
-void InstructionSelector::VisitF32x4RecipSqrtApprox(Node* node) {
- VisitRR(this, kMips64F32x4RecipSqrtApprox, node);
-}
-
-void InstructionSelector::VisitF32x4Add(Node* node) {
- VisitRRR(this, kMips64F32x4Add, node);
-}
-
-void InstructionSelector::VisitF32x4Sub(Node* node) {
- VisitRRR(this, kMips64F32x4Sub, node);
-}
-
-void InstructionSelector::VisitF32x4Mul(Node* node) {
- VisitRRR(this, kMips64F32x4Mul, node);
-}
-
-void InstructionSelector::VisitF32x4Max(Node* node) {
- VisitRRR(this, kMips64F32x4Max, node);
-}
-
-void InstructionSelector::VisitF32x4Min(Node* node) {
- VisitRRR(this, kMips64F32x4Min, node);
-}
-
-void InstructionSelector::VisitF32x4Eq(Node* node) {
- VisitRRR(this, kMips64F32x4Eq, node);
-}
-
-void InstructionSelector::VisitF32x4Ne(Node* node) {
- VisitRRR(this, kMips64F32x4Ne, node);
-}
-
-void InstructionSelector::VisitF32x4Lt(Node* node) {
- VisitRRR(this, kMips64F32x4Lt, node);
-}
-
-void InstructionSelector::VisitF32x4Le(Node* node) {
- VisitRRR(this, kMips64F32x4Le, node);
-}
-
-void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
- VisitRR(this, kMips64I32x4SConvertF32x4, node);
-}
-
-void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
- VisitRR(this, kMips64I32x4UConvertF32x4, node);
-}
-
-void InstructionSelector::VisitI32x4Neg(Node* node) {
- VisitRR(this, kMips64I32x4Neg, node);
-}
-
-void InstructionSelector::VisitI32x4LtS(Node* node) {
- VisitRRR(this, kMips64I32x4LtS, node);
-}
-
-void InstructionSelector::VisitI32x4LeS(Node* node) {
- VisitRRR(this, kMips64I32x4LeS, node);
-}
-
-void InstructionSelector::VisitI32x4LtU(Node* node) {
- VisitRRR(this, kMips64I32x4LtU, node);
-}
-
-void InstructionSelector::VisitI32x4LeU(Node* node) {
- VisitRRR(this, kMips64I32x4LeU, node);
-}
-
-void InstructionSelector::VisitI16x8Splat(Node* node) {
- VisitRR(this, kMips64I16x8Splat, node);
-}
-
-void InstructionSelector::VisitI16x8ExtractLane(Node* node) {
- VisitRRI(this, kMips64I16x8ExtractLane, node);
-}
-
-void InstructionSelector::VisitI16x8ReplaceLane(Node* node) {
- VisitRRIR(this, kMips64I16x8ReplaceLane, node);
-}
-
-void InstructionSelector::VisitI16x8Neg(Node* node) {
- VisitRR(this, kMips64I16x8Neg, node);
-}
-
-void InstructionSelector::VisitI16x8Shl(Node* node) {
- VisitRRI(this, kMips64I16x8Shl, node);
-}
-
-void InstructionSelector::VisitI16x8ShrS(Node* node) {
- VisitRRI(this, kMips64I16x8ShrS, node);
-}
-
-void InstructionSelector::VisitI16x8ShrU(Node* node) {
- VisitRRI(this, kMips64I16x8ShrU, node);
-}
-
-void InstructionSelector::VisitI16x8Add(Node* node) {
- VisitRRR(this, kMips64I16x8Add, node);
-}
-
-void InstructionSelector::VisitI16x8AddSaturateS(Node* node) {
- VisitRRR(this, kMips64I16x8AddSaturateS, node);
-}
-
-void InstructionSelector::VisitI16x8Sub(Node* node) {
- VisitRRR(this, kMips64I16x8Sub, node);
-}
-
-void InstructionSelector::VisitI16x8SubSaturateS(Node* node) {
- VisitRRR(this, kMips64I16x8SubSaturateS, node);
-}
-
-void InstructionSelector::VisitI16x8Mul(Node* node) {
- VisitRRR(this, kMips64I16x8Mul, node);
-}
-
-void InstructionSelector::VisitI16x8MaxS(Node* node) {
- VisitRRR(this, kMips64I16x8MaxS, node);
-}
-
-void InstructionSelector::VisitI16x8MinS(Node* node) {
- VisitRRR(this, kMips64I16x8MinS, node);
-}
-
-void InstructionSelector::VisitI16x8Eq(Node* node) {
- VisitRRR(this, kMips64I16x8Eq, node);
-}
-
-void InstructionSelector::VisitI16x8Ne(Node* node) {
- VisitRRR(this, kMips64I16x8Ne, node);
-}
-
-void InstructionSelector::VisitI16x8LtS(Node* node) {
- VisitRRR(this, kMips64I16x8LtS, node);
-}
+#define SIMD_VISIT_SPLAT(Type) \
+ void InstructionSelector::Visit##Type##Splat(Node* node) { \
+ VisitRR(this, kMips64##Type##Splat, node); \
+ }
+SIMD_TYPE_LIST(SIMD_VISIT_SPLAT)
+#undef SIMD_VISIT_SPLAT
-void InstructionSelector::VisitI16x8LeS(Node* node) {
- VisitRRR(this, kMips64I16x8LeS, node);
-}
+#define SIMD_VISIT_EXTRACT_LANE(Type) \
+ void InstructionSelector::Visit##Type##ExtractLane(Node* node) { \
+ VisitRRI(this, kMips64##Type##ExtractLane, node); \
+ }
+SIMD_TYPE_LIST(SIMD_VISIT_EXTRACT_LANE)
+#undef SIMD_VISIT_EXTRACT_LANE
-void InstructionSelector::VisitI16x8AddSaturateU(Node* node) {
- VisitRRR(this, kMips64I16x8AddSaturateU, node);
-}
+#define SIMD_VISIT_REPLACE_LANE(Type) \
+ void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
+ VisitRRIR(this, kMips64##Type##ReplaceLane, node); \
+ }
+SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
+#undef SIMD_VISIT_REPLACE_LANE
-void InstructionSelector::VisitI16x8SubSaturateU(Node* node) {
- VisitRRR(this, kMips64I16x8SubSaturateU, node);
-}
+#define SIMD_VISIT_UNOP(Name, instruction) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRR(this, instruction, node); \
+ }
+SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
+#undef SIMD_VISIT_UNOP
-void InstructionSelector::VisitI16x8MaxU(Node* node) {
- VisitRRR(this, kMips64I16x8MaxU, node);
-}
+#define SIMD_VISIT_SHIFT_OP(Name) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRRI(this, kMips64##Name, node); \
+ }
+SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
+#undef SIMD_VISIT_SHIFT_OP
-void InstructionSelector::VisitI16x8MinU(Node* node) {
- VisitRRR(this, kMips64I16x8MinU, node);
-}
+#define SIMD_VISIT_BINOP(Name, instruction) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRRR(this, instruction, node); \
+ }
+SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
+#undef SIMD_VISIT_BINOP
-void InstructionSelector::VisitI16x8LtU(Node* node) {
- VisitRRR(this, kMips64I16x8LtU, node);
+void InstructionSelector::VisitS128Select(Node* node) {
+ VisitRRRR(this, kMips64S128Select, node);
}
-void InstructionSelector::VisitI16x8LeU(Node* node) {
- VisitRRR(this, kMips64I16x8LeU, node);
-}
+namespace {
-void InstructionSelector::VisitI8x16Splat(Node* node) {
- VisitRR(this, kMips64I8x16Splat, node);
+// Tries to match 8x16 byte shuffle to equivalent 32x4 word shuffle.
+bool TryMatch32x4Shuffle(const uint8_t* shuffle, uint8_t* shuffle32x4) {
+ static const int kLanes = 4;
+ static const int kLaneSize = 4;
+ for (int i = 0; i < kLanes; ++i) {
+ if (shuffle[i * kLaneSize] % kLaneSize != 0) return false;
+ for (int j = 1; j < kLaneSize; ++j) {
+ if (shuffle[i * kLaneSize + j] - shuffle[i * kLaneSize + j - 1] != 1)
+ return false;
+ }
+ shuffle32x4[i] = shuffle[i * kLaneSize] / kLaneSize;
+ }
+ return true;
}
-void InstructionSelector::VisitI8x16ExtractLane(Node* node) {
- VisitRRI(this, kMips64I8x16ExtractLane, node);
+// Tries to match byte shuffle to concatenate (sldi) operation.
+bool TryMatchConcat(const uint8_t* shuffle, uint8_t mask, uint8_t* offset) {
+ uint8_t start = shuffle[0];
+ for (int i = 1; i < kSimd128Size - start; ++i) {
+ if ((shuffle[i] & mask) != ((shuffle[i - 1] + 1) & mask)) return false;
+ }
+ uint8_t wrap = kSimd128Size;
+ for (int i = kSimd128Size - start; i < kSimd128Size; ++i, ++wrap) {
+ if ((shuffle[i] & mask) != (wrap & mask)) return false;
+ }
+ *offset = start;
+ return true;
}
-void InstructionSelector::VisitI8x16ReplaceLane(Node* node) {
- VisitRRIR(this, kMips64I8x16ReplaceLane, node);
-}
+struct ShuffleEntry {
+ uint8_t shuffle[kSimd128Size];
+ ArchOpcode opcode;
+};
-void InstructionSelector::VisitI8x16Neg(Node* node) {
- VisitRR(this, kMips64I8x16Neg, node);
+static const ShuffleEntry arch_shuffles[] = {
+ {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
+ kMips64S32x4InterleaveRight},
+ {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
+ kMips64S32x4InterleaveLeft},
+ {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
+ kMips64S32x4PackEven},
+ {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
+ kMips64S32x4PackOdd},
+ {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
+ kMips64S32x4InterleaveEven},
+ {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31},
+ kMips64S32x4InterleaveOdd},
+
+ {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
+ kMips64S16x8InterleaveRight},
+ {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
+ kMips64S16x8InterleaveLeft},
+ {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
+ kMips64S16x8PackEven},
+ {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
+ kMips64S16x8PackOdd},
+ {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
+ kMips64S16x8InterleaveEven},
+ {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
+ kMips64S16x8InterleaveOdd},
+ {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9},
+ kMips64S16x4Reverse},
+ {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13},
+ kMips64S16x2Reverse},
+
+ {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
+ kMips64S8x16InterleaveRight},
+ {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
+ kMips64S8x16InterleaveLeft},
+ {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
+ kMips64S8x16PackEven},
+ {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
+ kMips64S8x16PackOdd},
+ {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
+ kMips64S8x16InterleaveEven},
+ {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
+ kMips64S8x16InterleaveOdd},
+ {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8},
+ kMips64S8x8Reverse},
+ {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12},
+ kMips64S8x4Reverse},
+ {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14},
+ kMips64S8x2Reverse}};
+
+bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
+ size_t num_entries, uint8_t mask, ArchOpcode* opcode) {
+ for (size_t i = 0; i < num_entries; ++i) {
+ const ShuffleEntry& entry = table[i];
+ int j = 0;
+ for (; j < kSimd128Size; ++j) {
+ if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
+ break;
+ }
+ }
+ if (j == kSimd128Size) {
+ *opcode = entry.opcode;
+ return true;
+ }
+ }
+ return false;
}
-void InstructionSelector::VisitI8x16Shl(Node* node) {
- VisitRRI(this, kMips64I8x16Shl, node);
+// Canonicalize shuffles to make pattern matching simpler. Returns a mask that
+// will ignore the high bit of indices in some cases.
+uint8_t CanonicalizeShuffle(InstructionSelector* selector, Node* node) {
+ static const int kUnaryShuffleMask = kSimd128Size - 1;
+ const uint8_t* shuffle = OpParameter<uint8_t*>(node);
+ uint8_t mask = 0xff;
+ // If shuffle is unary, set 'mask' to ignore the high bit of the indices.
+ // Replace any unused source with the other.
+ if (selector->GetVirtualRegister(node->InputAt(0)) ==
+ selector->GetVirtualRegister(node->InputAt(1))) {
+ // unary, src0 == src1.
+ mask = kUnaryShuffleMask;
+ } else {
+ bool src0_is_used = false;
+ bool src1_is_used = false;
+ for (int i = 0; i < kSimd128Size; i++) {
+ if (shuffle[i] < kSimd128Size) {
+ src0_is_used = true;
+ } else {
+ src1_is_used = true;
+ }
+ }
+ if (src0_is_used && !src1_is_used) {
+ node->ReplaceInput(1, node->InputAt(0));
+ mask = kUnaryShuffleMask;
+ } else if (src1_is_used && !src0_is_used) {
+ node->ReplaceInput(0, node->InputAt(1));
+ mask = kUnaryShuffleMask;
+ }
+ }
+ return mask;
}
-void InstructionSelector::VisitI8x16ShrS(Node* node) {
- VisitRRI(this, kMips64I8x16ShrS, node);
+int32_t Pack4Lanes(const uint8_t* shuffle, uint8_t mask) {
+ int32_t result = 0;
+ for (int i = 3; i >= 0; --i) {
+ result <<= 8;
+ result |= shuffle[i] & mask;
+ }
+ return result;
}
-void InstructionSelector::VisitS16x8Select(Node* node) {
- VisitRRRR(this, kMips64S16x8Select, node);
-}
+} // namespace
-void InstructionSelector::VisitS8x16Select(Node* node) {
- VisitRRRR(this, kMips64S8x16Select, node);
+void InstructionSelector::VisitS8x16Shuffle(Node* node) {
+ const uint8_t* shuffle = OpParameter<uint8_t*>(node);
+ uint8_t mask = CanonicalizeShuffle(this, node);
+ uint8_t shuffle32x4[4];
+ ArchOpcode opcode;
+ if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
+ mask, &opcode)) {
+ VisitRRR(this, opcode, node);
+ return;
+ }
+ uint8_t offset;
+ Mips64OperandGenerator g(this);
+ if (TryMatchConcat(shuffle, mask, &offset)) {
+ Emit(kMips64S8x16Concat, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)),
+ g.UseImmediate(offset));
+ return;
+ }
+ if (TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
+ Emit(kMips64S32x4Shuffle, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
+ g.UseImmediate(Pack4Lanes(shuffle32x4, mask)));
+ return;
+ }
+ Emit(kMips64S8x16Shuffle, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
+ g.UseImmediate(Pack4Lanes(shuffle, mask)),
+ g.UseImmediate(Pack4Lanes(shuffle + 4, mask)),
+ g.UseImmediate(Pack4Lanes(shuffle + 8, mask)),
+ g.UseImmediate(Pack4Lanes(shuffle + 12, mask)));
}
// static
diff --git a/deps/v8/src/compiler/move-optimizer.cc b/deps/v8/src/compiler/move-optimizer.cc
index b62a8ccb4f..f4ef3273e6 100644
--- a/deps/v8/src/compiler/move-optimizer.cc
+++ b/deps/v8/src/compiler/move-optimizer.cc
@@ -105,7 +105,7 @@ class OperandSet {
}
static bool HasMixedFPReps(int reps) {
- return reps && !base::bits::IsPowerOfTwo32(reps);
+ return reps && !base::bits::IsPowerOfTwo(reps);
}
ZoneVector<InstructionOperand>* set_;
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
index 550317d248..d1eecfe9fd 100644
--- a/deps/v8/src/compiler/node-matchers.h
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -175,8 +175,7 @@ struct FloatMatcher final : public ValueMatcher<T, kOpcode> {
return false;
}
Double value = Double(this->Value());
- return !value.IsInfinite() &&
- base::bits::IsPowerOfTwo64(value.Significand());
+ return !value.IsInfinite() && base::bits::IsPowerOfTwo(value.Significand());
}
};
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index 02ab2ce044..55755649bc 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -138,7 +138,8 @@ class V8_EXPORT_PRIVATE NodeProperties final {
enum InferReceiverMapsResult {
kNoReceiverMaps, // No receiver maps inferred.
kReliableReceiverMaps, // Receiver maps can be trusted.
- kUnreliableReceiverMaps // Receiver maps might have changed (side-effect).
+ kUnreliableReceiverMaps // Receiver maps might have changed (side-effect),
+ // but instance type is reliable.
};
static InferReceiverMapsResult InferReceiverMaps(
Node* receiver, Node* effect, ZoneHandleSet<Map>* maps_return);
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index ce152b1512..c829a39e37 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -106,6 +106,7 @@
JS_COMPARE_BINOP_LIST(V) \
JS_BITWISE_BINOP_LIST(V) \
JS_ARITH_BINOP_LIST(V) \
+ V(JSHasInPrototypeChain) \
V(JSInstanceOf) \
V(JSOrdinaryHasInstance)
@@ -116,7 +117,8 @@
V(JSToName) \
V(JSToNumber) \
V(JSToObject) \
- V(JSToString)
+ V(JSToString) \
+ V(JSToPrimitiveToString)
#define JS_OTHER_UNOP_LIST(V) \
V(JSClassOf) \
@@ -161,9 +163,11 @@
#define JS_OTHER_OP_LIST(V) \
V(JSConstructForwardVarargs) \
V(JSConstruct) \
+ V(JSConstructWithArrayLike) \
V(JSConstructWithSpread) \
V(JSCallForwardVarargs) \
V(JSCall) \
+ V(JSCallWithArrayLike) \
V(JSCallWithSpread) \
V(JSCallRuntime) \
V(JSConvertReceiver) \
@@ -177,6 +181,7 @@
V(JSGeneratorRestoreContinuation) \
V(JSGeneratorRestoreRegister) \
V(JSStackCheck) \
+ V(JSStringConcat) \
V(JSDebugger)
#define JS_OP_LIST(V) \
@@ -311,9 +316,12 @@
V(BooleanNot) \
V(StringCharAt) \
V(StringCharCodeAt) \
+ V(SeqStringCharCodeAt) \
V(StringFromCharCode) \
V(StringFromCodePoint) \
V(StringIndexOf) \
+ V(StringToLowerCaseIntl) \
+ V(StringToUpperCaseIntl) \
V(CheckBounds) \
V(CheckIf) \
V(CheckMaps) \
@@ -321,10 +329,13 @@
V(CheckInternalizedString) \
V(CheckReceiver) \
V(CheckString) \
+ V(CheckSeqString) \
+ V(CheckNonEmptyString) \
+ V(CheckSymbol) \
V(CheckSmi) \
V(CheckHeapObject) \
V(CheckFloat64Hole) \
- V(CheckTaggedHole) \
+ V(CheckNotTaggedHole) \
V(ConvertTaggedHoleToUndefined) \
V(Allocate) \
V(LoadField) \
@@ -335,6 +346,7 @@
V(StoreBuffer) \
V(StoreElement) \
V(StoreTypedElement) \
+ V(TransitionAndStoreElement) \
V(ObjectIsDetectableCallable) \
V(ObjectIsNaN) \
V(ObjectIsNonCallable) \
@@ -350,7 +362,9 @@
V(ArrayBufferWasNeutered) \
V(EnsureWritableFastElements) \
V(MaybeGrowFastElements) \
- V(TransitionElementsKind)
+ V(TransitionElementsKind) \
+ V(LookupHashStorageIndex) \
+ V(LoadHashMapValue)
#define SIMPLIFIED_OP_LIST(V) \
SIMPLIFIED_CHANGE_OP_LIST(V) \
@@ -693,31 +707,12 @@
V(S128And) \
V(S128Or) \
V(S128Xor) \
- V(S32x4Shuffle) \
- V(S32x4Select) \
- V(S16x8Shuffle) \
- V(S16x8Select) \
+ V(S128Select) \
V(S8x16Shuffle) \
- V(S8x16Select) \
- V(S1x4Zero) \
- V(S1x4And) \
- V(S1x4Or) \
- V(S1x4Xor) \
- V(S1x4Not) \
V(S1x4AnyTrue) \
V(S1x4AllTrue) \
- V(S1x8Zero) \
- V(S1x8And) \
- V(S1x8Or) \
- V(S1x8Xor) \
- V(S1x8Not) \
V(S1x8AnyTrue) \
V(S1x8AllTrue) \
- V(S1x16Zero) \
- V(S1x16And) \
- V(S1x16Or) \
- V(S1x16Xor) \
- V(S1x16Not) \
V(S1x16AnyTrue) \
V(S1x16AllTrue)
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index 35b24d8531..5a956dd9af 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -62,6 +62,7 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSLessThan:
case IrOpcode::kJSLessThanOrEqual:
case IrOpcode::kJSHasProperty:
+ case IrOpcode::kJSHasInPrototypeChain:
case IrOpcode::kJSInstanceOf:
case IrOpcode::kJSOrdinaryHasInstance:
@@ -94,16 +95,20 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSToNumber:
case IrOpcode::kJSToObject:
case IrOpcode::kJSToString:
+ case IrOpcode::kJSToPrimitiveToString:
// Call operations
case IrOpcode::kJSConstructForwardVarargs:
case IrOpcode::kJSConstruct:
+ case IrOpcode::kJSConstructWithArrayLike:
case IrOpcode::kJSConstructWithSpread:
case IrOpcode::kJSCallForwardVarargs:
case IrOpcode::kJSCall:
+ case IrOpcode::kJSCallWithArrayLike:
case IrOpcode::kJSCallWithSpread:
// Misc operations
+ case IrOpcode::kJSStringConcat:
case IrOpcode::kJSForInNext:
case IrOpcode::kJSForInPrepare:
case IrOpcode::kJSStackCheck:
diff --git a/deps/v8/src/compiler/operator.cc b/deps/v8/src/compiler/operator.cc
index e43cd5cdb0..2da48ca887 100644
--- a/deps/v8/src/compiler/operator.cc
+++ b/deps/v8/src/compiler/operator.cc
@@ -24,20 +24,16 @@ V8_INLINE N CheckRange(size_t val) {
} // namespace
-
-// static
-STATIC_CONST_MEMBER_DEFINITION const size_t Operator::kMaxControlOutputCount;
-
Operator::Operator(Opcode opcode, Properties properties, const char* mnemonic,
size_t value_in, size_t effect_in, size_t control_in,
size_t value_out, size_t effect_out, size_t control_out)
- : opcode_(opcode),
+ : mnemonic_(mnemonic),
+ opcode_(opcode),
properties_(properties),
- mnemonic_(mnemonic),
value_in_(CheckRange<uint32_t>(value_in)),
effect_in_(CheckRange<uint16_t>(effect_in)),
control_in_(CheckRange<uint16_t>(control_in)),
- value_out_(CheckRange<uint16_t>(value_out)),
+ value_out_(CheckRange<uint32_t>(value_out)),
effect_out_(CheckRange<uint8_t>(effect_out)),
control_out_(CheckRange<uint32_t>(control_out)) {}
diff --git a/deps/v8/src/compiler/operator.h b/deps/v8/src/compiler/operator.h
index dea94f0906..99e8461c86 100644
--- a/deps/v8/src/compiler/operator.h
+++ b/deps/v8/src/compiler/operator.h
@@ -95,9 +95,6 @@ class V8_EXPORT_PRIVATE Operator : public NON_EXPORTED_BASE(ZoneObject) {
Properties properties() const { return properties_; }
- // TODO(bmeurer): Use bit fields below?
- static const size_t kMaxControlOutputCount = (1u << 16) - 1;
-
// TODO(titzer): convert return values here to size_t.
int ValueInputCount() const { return value_in_; }
int EffectInputCount() const { return effect_in_; }
@@ -136,13 +133,13 @@ class V8_EXPORT_PRIVATE Operator : public NON_EXPORTED_BASE(ZoneObject) {
virtual void PrintToImpl(std::ostream& os, PrintVerbosity verbose) const;
private:
+ const char* mnemonic_;
Opcode opcode_;
Properties properties_;
- const char* mnemonic_;
uint32_t value_in_;
uint16_t effect_in_;
uint16_t control_in_;
- uint16_t value_out_;
+ uint32_t value_out_;
uint8_t effect_out_;
uint32_t control_out_;
diff --git a/deps/v8/src/compiler/osr.h b/deps/v8/src/compiler/osr.h
index 1f562c56bf..075a9774a7 100644
--- a/deps/v8/src/compiler/osr.h
+++ b/deps/v8/src/compiler/osr.h
@@ -92,10 +92,6 @@ class Linkage;
class OsrHelper {
public:
explicit OsrHelper(CompilationInfo* info);
- // Only for testing.
- OsrHelper(size_t parameter_count, size_t stack_slot_count)
- : parameter_count_(parameter_count),
- stack_slot_count_(stack_slot_count) {}
// Deconstructs the artificial {OsrNormalEntry} and rewrites the graph so
// that only the path corresponding to {OsrLoopEntry} remains.
diff --git a/deps/v8/src/compiler/pipeline-statistics.cc b/deps/v8/src/compiler/pipeline-statistics.cc
index 2b6ffe418c..99ef25f457 100644
--- a/deps/v8/src/compiler/pipeline-statistics.cc
+++ b/deps/v8/src/compiler/pipeline-statistics.cc
@@ -8,6 +8,8 @@
#include "src/compiler/pipeline-statistics.h"
#include "src/compiler/zone-stats.h"
#include "src/isolate.h"
+#include "src/objects/shared-function-info.h"
+#include "src/objects/string.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index bc8fd0cbe9..0bb242716f 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -9,6 +9,7 @@
#include <sstream>
#include "src/base/adapters.h"
+#include "src/base/optional.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/compilation-info.h"
#include "src/compiler.h"
@@ -17,6 +18,7 @@
#include "src/compiler/basic-block-instrumentor.h"
#include "src/compiler/branch-elimination.h"
#include "src/compiler/bytecode-graph-builder.h"
+#include "src/compiler/check-elimination.h"
#include "src/compiler/checkpoint-elimination.h"
#include "src/compiler/code-generator.h"
#include "src/compiler/common-operator-reducer.h"
@@ -62,7 +64,6 @@
#include "src/compiler/simplified-operator-reducer.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/store-store-elimination.h"
-#include "src/compiler/tail-call-optimization.h"
#include "src/compiler/typed-optimization.h"
#include "src/compiler/typer.h"
#include "src/compiler/value-numbering-reducer.h"
@@ -73,8 +74,8 @@
#include "src/parsing/parse-info.h"
#include "src/register-configuration.h"
#include "src/trap-handler/trap-handler.h"
-#include "src/type-info.h"
#include "src/utils.h"
+#include "src/wasm/wasm-module.h"
namespace v8 {
namespace internal {
@@ -95,6 +96,8 @@ class PipelineData {
graph_zone_(graph_zone_scope_.zone()),
instruction_zone_scope_(zone_stats_, ZONE_NAME),
instruction_zone_(instruction_zone_scope_.zone()),
+ codegen_zone_scope_(zone_stats_, ZONE_NAME),
+ codegen_zone_(codegen_zone_scope_.zone()),
register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
register_allocation_zone_(register_allocation_zone_scope_.zone()) {
PhaseScope scope(pipeline_statistics, "init pipeline data");
@@ -112,7 +115,7 @@ class PipelineData {
is_asm_ = info->shared_info()->asm_function();
}
- // For WASM compile entry point.
+ // For WebAssembly compile entry point.
PipelineData(ZoneStats* zone_stats, CompilationInfo* info, JSGraph* jsgraph,
PipelineStatistics* pipeline_statistics,
SourcePositionTable* source_positions,
@@ -132,6 +135,8 @@ class PipelineData {
jsgraph_(jsgraph),
instruction_zone_scope_(zone_stats_, ZONE_NAME),
instruction_zone_(instruction_zone_scope_.zone()),
+ codegen_zone_scope_(zone_stats_, ZONE_NAME),
+ codegen_zone_(codegen_zone_scope_.zone()),
register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
register_allocation_zone_(register_allocation_zone_scope_.zone()),
protected_instructions_(protected_instructions) {
@@ -152,6 +157,8 @@ class PipelineData {
schedule_(schedule),
instruction_zone_scope_(zone_stats_, ZONE_NAME),
instruction_zone_(instruction_zone_scope_.zone()),
+ codegen_zone_scope_(zone_stats_, ZONE_NAME),
+ codegen_zone_(codegen_zone_scope_.zone()),
register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
register_allocation_zone_(register_allocation_zone_scope_.zone()) {
is_asm_ = false;
@@ -167,6 +174,8 @@ class PipelineData {
instruction_zone_scope_(zone_stats_, ZONE_NAME),
instruction_zone_(sequence->zone()),
sequence_(sequence),
+ codegen_zone_scope_(zone_stats_, ZONE_NAME),
+ codegen_zone_(codegen_zone_scope_.zone()),
register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
register_allocation_zone_(register_allocation_zone_scope_.zone()) {
is_asm_ =
@@ -178,6 +187,7 @@ class PipelineData {
code_generator_ = nullptr;
DeleteRegisterAllocationZone();
DeleteInstructionZone();
+ DeleteCodegenZone();
DeleteGraphZone();
}
@@ -185,6 +195,7 @@ class PipelineData {
CompilationInfo* info() const { return info_; }
ZoneStats* zone_stats() const { return zone_stats_; }
PipelineStatistics* pipeline_statistics() { return pipeline_statistics_; }
+ OsrHelper* osr_helper() { return &(*osr_helper_); }
bool compilation_failed() const { return compilation_failed_; }
void set_compilation_failed() { compilation_failed_ = true; }
@@ -231,6 +242,7 @@ class PipelineData {
void reset_schedule() { schedule_ = nullptr; }
Zone* instruction_zone() const { return instruction_zone_; }
+ Zone* codegen_zone() const { return codegen_zone_; }
InstructionSequence* sequence() const { return sequence_; }
Frame* frame() const { return frame_; }
@@ -276,6 +288,12 @@ class PipelineData {
instruction_zone_scope_.Destroy();
instruction_zone_ = nullptr;
sequence_ = nullptr;
+ }
+
+ void DeleteCodegenZone() {
+ if (codegen_zone_ == nullptr) return;
+ codegen_zone_scope_.Destroy();
+ codegen_zone_ = nullptr;
frame_ = nullptr;
}
@@ -307,7 +325,7 @@ class PipelineData {
if (descriptor != nullptr) {
fixed_frame_size = descriptor->CalculateFixedFrameSize();
}
- frame_ = new (instruction_zone()) Frame(fixed_frame_size);
+ frame_ = new (codegen_zone()) Frame(fixed_frame_size);
}
void InitializeRegisterAllocationData(const RegisterConfiguration* config,
@@ -318,9 +336,21 @@ class PipelineData {
sequence(), debug_name());
}
+ void InitializeOsrHelper() {
+ DCHECK(!osr_helper_.has_value());
+ osr_helper_.emplace(info());
+ }
+
+ void set_start_source_position(int position) {
+ DCHECK_EQ(start_source_position_, kNoSourcePosition);
+ start_source_position_ = position;
+ }
+
void InitializeCodeGenerator(Linkage* linkage) {
DCHECK_NULL(code_generator_);
- code_generator_ = new CodeGenerator(frame(), linkage, sequence(), info());
+ code_generator_ =
+ new CodeGenerator(codegen_zone(), frame(), linkage, sequence(), info(),
+ osr_helper_, start_source_position_);
}
void BeginPhaseKind(const char* phase_kind_name) {
@@ -347,6 +377,8 @@ class PipelineData {
bool compilation_failed_ = false;
bool verify_graph_ = false;
bool is_asm_ = false;
+ int start_source_position_ = kNoSourcePosition;
+ base::Optional<OsrHelper> osr_helper_;
Handle<Code> code_ = Handle<Code>::null();
CodeGenerator* code_generator_ = nullptr;
@@ -365,15 +397,21 @@ class PipelineData {
Schedule* schedule_ = nullptr;
// All objects in the following group of fields are allocated in
- // instruction_zone_. They are all set to nullptr when the instruction_zone_
+ // instruction_zone_. They are all set to nullptr when the instruction_zone_
// is destroyed.
ZoneStats::Scope instruction_zone_scope_;
Zone* instruction_zone_;
InstructionSequence* sequence_ = nullptr;
+
+ // All objects in the following group of fields are allocated in
+ // codegen_zone_. They are all set to nullptr when the codegen_zone_
+ // is destroyed.
+ ZoneStats::Scope codegen_zone_scope_;
+ Zone* codegen_zone_;
Frame* frame_ = nullptr;
// All objects in the following group of fields are allocated in
- // register_allocation_zone_. They are all set to nullptr when the zone is
+ // register_allocation_zone_. They are all set to nullptr when the zone is
// destroyed.
ZoneStats::Scope register_allocation_zone_scope_;
Zone* register_allocation_zone_;
@@ -469,6 +507,8 @@ class SourcePositionWrapper final : public Reducer {
: reducer_(reducer), table_(table) {}
~SourcePositionWrapper() final {}
+ const char* reducer_name() const override { return reducer_->reducer_name(); }
+
Reduction Reduce(Node* node) final {
SourcePosition const pos = table_->GetSourcePosition(node);
SourcePositionTable::Scope position(table_, pos);
@@ -576,6 +616,9 @@ class PipelineCompilationJob final : public CompilationJob {
Status ExecuteJobImpl() final;
Status FinalizeJobImpl() final;
+ // Registers weak object to optimized code dependencies.
+ void RegisterWeakObjectsInOptimizedCode(Handle<Code> code);
+
private:
std::unique_ptr<ParseInfo> parse_info_;
ZoneStats zone_stats_;
@@ -602,9 +645,11 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl() {
info()->MarkAsLoopPeelingEnabled();
}
}
- if (info()->is_optimizing_from_bytecode() ||
- !info()->shared_info()->asm_function()) {
+ if (info()->is_optimizing_from_bytecode()) {
info()->MarkAsDeoptimizationEnabled();
+ if (FLAG_turbo_inlining) {
+ info()->MarkAsInliningEnabled();
+ }
if (FLAG_inline_accessors) {
info()->MarkAsAccessorInliningEnabled();
}
@@ -612,13 +657,11 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl() {
isolate()->heap()->one_closure_cell_map()) {
info()->MarkAsFunctionContextSpecializing();
}
- }
- if (!info()->is_optimizing_from_bytecode()) {
- if (!Compiler::EnsureDeoptimizationSupport(info())) return FAILED;
- } else if (FLAG_turbo_inlining) {
info()->MarkAsInliningEnabled();
}
+ data_.set_start_source_position(info()->shared_info()->start_position());
+
linkage_ = new (info()->zone())
Linkage(Linkage::ComputeIncoming(info()->zone(), info()));
@@ -627,6 +670,8 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl() {
return AbortOptimization(kGraphBuildingFailed);
}
+ if (info()->is_osr()) data_.InitializeOsrHelper();
+
// Make sure that we have generated the maximal number of deopt entries.
// This is in order to avoid triggering the generation of deopt entries later
// during code assembly.
@@ -637,11 +682,11 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl() {
PipelineCompilationJob::Status PipelineCompilationJob::ExecuteJobImpl() {
if (!pipeline_.OptimizeGraph(linkage_)) return FAILED;
+ pipeline_.AssembleCode(linkage_);
return SUCCEEDED;
}
PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl() {
- pipeline_.AssembleCode(linkage_);
Handle<Code> code = pipeline_.FinalizeCode();
if (code.is_null()) {
if (info()->bailout_reason() == kNoReason) {
@@ -658,13 +703,70 @@ PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl() {
return SUCCEEDED;
}
+namespace {
+
+void AddWeakObjectToCodeDependency(Isolate* isolate, Handle<HeapObject> object,
+ Handle<Code> code) {
+ Handle<WeakCell> cell = Code::WeakCellFor(code);
+ Heap* heap = isolate->heap();
+ if (heap->InNewSpace(*object)) {
+ heap->AddWeakNewSpaceObjectToCodeDependency(object, cell);
+ } else {
+ Handle<DependentCode> dep(heap->LookupWeakObjectToCodeDependency(object));
+ dep =
+ DependentCode::InsertWeakCode(dep, DependentCode::kWeakCodeGroup, cell);
+ heap->AddWeakObjectToCodeDependency(object, dep);
+ }
+}
+
+} // namespace
+
+void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode(
+ Handle<Code> code) {
+ DCHECK(code->is_optimized_code());
+ std::vector<Handle<Map>> maps;
+ std::vector<Handle<HeapObject>> objects;
+ {
+ DisallowHeapAllocation no_gc;
+ int const mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::CELL);
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (mode == RelocInfo::CELL &&
+ code->IsWeakObjectInOptimizedCode(it.rinfo()->target_cell())) {
+ objects.push_back(handle(it.rinfo()->target_cell(), isolate()));
+ } else if (mode == RelocInfo::EMBEDDED_OBJECT &&
+ code->IsWeakObjectInOptimizedCode(
+ it.rinfo()->target_object())) {
+ Handle<HeapObject> object(HeapObject::cast(it.rinfo()->target_object()),
+ isolate());
+ if (object->IsMap()) {
+ maps.push_back(Handle<Map>::cast(object));
+ } else {
+ objects.push_back(object);
+ }
+ }
+ }
+ }
+ for (Handle<Map> map : maps) {
+ if (map->dependent_code()->IsEmpty(DependentCode::kWeakCodeGroup)) {
+ isolate()->heap()->AddRetainedMap(map);
+ }
+ Map::AddDependentCode(map, DependentCode::kWeakCodeGroup, code);
+ }
+ for (Handle<HeapObject> object : objects) {
+ AddWeakObjectToCodeDependency(isolate(), object, code);
+ }
+ code->set_can_have_weak_objects(true);
+}
+
class PipelineWasmCompilationJob final : public CompilationJob {
public:
explicit PipelineWasmCompilationJob(
CompilationInfo* info, JSGraph* jsgraph, CallDescriptor* descriptor,
SourcePositionTable* source_positions,
ZoneVector<trap_handler::ProtectedInstructionData>* protected_insts,
- bool allow_signalling_nan)
+ wasm::ModuleOrigin wasm_origin)
: CompilationJob(info->isolate(), info, "TurboFan",
State::kReadyToExecute),
zone_stats_(info->isolate()->allocator()),
@@ -673,7 +775,7 @@ class PipelineWasmCompilationJob final : public CompilationJob {
source_positions, protected_insts),
pipeline_(&data_),
linkage_(descriptor),
- allow_signalling_nan_(allow_signalling_nan) {}
+ wasm_origin_(wasm_origin) {}
protected:
Status PrepareJobImpl() final;
@@ -688,7 +790,7 @@ class PipelineWasmCompilationJob final : public CompilationJob {
PipelineData data_;
PipelineImpl pipeline_;
Linkage linkage_;
- bool allow_signalling_nan_;
+ wasm::ModuleOrigin wasm_origin_;
};
PipelineWasmCompilationJob::Status
@@ -706,15 +808,15 @@ PipelineWasmCompilationJob::ExecuteJobImpl() {
}
pipeline_.RunPrintAndVerify("Machine", true);
- if (FLAG_wasm_opt) {
+ if (FLAG_wasm_opt || wasm_origin_ == wasm::ModuleOrigin::kAsmJsOrigin) {
PipelineData* data = &data_;
- PipelineRunScope scope(data, "WASM optimization");
+ PipelineRunScope scope(data, "Wasm optimization");
JSGraphReducer graph_reducer(data->jsgraph(), scope.zone());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common());
ValueNumberingReducer value_numbering(scope.zone(), data->graph()->zone());
- MachineOperatorReducer machine_reducer(data->jsgraph(),
- allow_signalling_nan_);
+ MachineOperatorReducer machine_reducer(
+ data->jsgraph(), wasm_origin_ == wasm::ModuleOrigin::kAsmJsOrigin);
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->common(), data->machine());
AddReducer(data, &graph_reducer, &dead_code_elimination);
@@ -726,6 +828,7 @@ PipelineWasmCompilationJob::ExecuteJobImpl() {
}
if (!pipeline_.ScheduleAndSelectInstructions(&linkage_, true)) return FAILED;
+ pipeline_.AssembleCode(&linkage_);
return SUCCEEDED;
}
@@ -735,7 +838,6 @@ size_t PipelineWasmCompilationJob::AllocatedMemory() const {
PipelineWasmCompilationJob::Status
PipelineWasmCompilationJob::FinalizeJobImpl() {
- pipeline_.AssembleCode(&linkage_);
pipeline_.FinalizeCode();
return SUCCEEDED;
}
@@ -778,10 +880,8 @@ struct GraphBuilderPhase {
static const char* phase_name() { return "graph builder"; }
void Run(PipelineData* data, Zone* temp_zone) {
- bool succeeded = false;
-
if (data->info()->is_optimizing_from_bytecode()) {
- // Bytecode graph builder assumes deoptimziation is enabled.
+ // Bytecode graph builder assumes deoptimization is enabled.
DCHECK(data->info()->is_deoptimization_enabled());
JSTypeHintLowering::Flags flags = JSTypeHintLowering::kNoFlags;
if (data->info()->is_bailout_on_uninitialized()) {
@@ -792,16 +892,16 @@ struct GraphBuilderPhase {
handle(data->info()->closure()->feedback_vector()),
data->info()->osr_ast_id(), data->jsgraph(), CallFrequency(1.0f),
data->source_positions(), SourcePosition::kNotInlined, flags);
- succeeded = graph_builder.CreateGraph();
+ graph_builder.CreateGraph();
} else {
+ // AST-based graph builder assumes deoptimization is disabled.
+ DCHECK(!data->info()->is_deoptimization_enabled());
AstGraphBuilderWithPositions graph_builder(
temp_zone, data->info(), data->jsgraph(), CallFrequency(1.0f),
data->loop_assignment(), data->source_positions());
- succeeded = graph_builder.CreateGraph();
- }
-
- if (!succeeded) {
- data->set_compilation_failed();
+ if (!graph_builder.CreateGraph()) {
+ data->set_compilation_failed();
+ }
}
}
};
@@ -841,7 +941,11 @@ struct InliningPhase {
CheckpointElimination checkpoint_elimination(&graph_reducer);
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->common(), data->machine());
+ CheckElimination check_elimination(&graph_reducer, data->jsgraph());
JSCallReducer call_reducer(&graph_reducer, data->jsgraph(),
+ data->info()->is_bailout_on_uninitialized()
+ ? JSCallReducer::kBailoutOnUninitialized
+ : JSCallReducer::kNoFlags,
data->native_context(),
data->info()->dependencies());
JSContextSpecialization context_specialization(
@@ -875,6 +979,7 @@ struct InliningPhase {
: JSIntrinsicLowering::kDeoptimizationDisabled);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &checkpoint_elimination);
+ AddReducer(data, &graph_reducer, &check_elimination);
AddReducer(data, &graph_reducer, &common_reducer);
if (data->info()->is_frame_specializing()) {
AddReducer(data, &graph_reducer, &frame_specialization);
@@ -912,6 +1017,7 @@ struct UntyperPhase {
void Run(PipelineData* data, Zone* temp_zone) {
class RemoveTypeReducer final : public Reducer {
public:
+ const char* reducer_name() const override { return "RemoveTypeReducer"; }
Reduction Reduce(Node* node) final {
if (NodeProperties::IsTyped(node)) {
NodeProperties::RemoveType(node);
@@ -943,6 +1049,7 @@ struct OsrDeconstructionPhase {
data->jsgraph()->GetCachedNodes(&roots);
trimmer.TrimGraph(roots.begin(), roots.end());
+ // TODO(neis): Use data->osr_helper() here once AST graph builder is gone.
OsrHelper osr_helper(data->info());
osr_helper.Deconstruct(data->jsgraph(), data->common(), temp_zone);
}
@@ -1062,6 +1169,10 @@ struct ConcurrentOptimizationPrepPhase {
data->jsgraph()->CEntryStubConstant(2);
data->jsgraph()->CEntryStubConstant(3);
+ // TODO(turbofan): Remove this line once the Array constructor code
+ // is a proper builtin and no longer a CodeStub.
+ data->jsgraph()->ArrayConstructorStubConstant();
+
// This is needed for escape analysis.
NodeProperties::SetType(data->jsgraph()->FalseConstant(), Type::Boolean());
NodeProperties::SetType(data->jsgraph()->TrueConstant(), Type::Boolean());
@@ -1234,13 +1345,11 @@ struct LateOptimizationPhase {
data->common(), data->machine());
SelectLowering select_lowering(data->jsgraph()->graph(),
data->jsgraph()->common());
- TailCallOptimization tco(data->common(), data->graph());
AddReducer(data, &graph_reducer, &branch_condition_elimination);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &machine_reducer);
AddReducer(data, &graph_reducer, &common_reducer);
AddReducer(data, &graph_reducer, &select_lowering);
- AddReducer(data, &graph_reducer, &tco);
AddReducer(data, &graph_reducer, &value_numbering);
graph_reducer.ReduceGraph();
}
@@ -1815,10 +1924,10 @@ CompilationJob* Pipeline::NewWasmCompilationJob(
CompilationInfo* info, JSGraph* jsgraph, CallDescriptor* descriptor,
SourcePositionTable* source_positions,
ZoneVector<trap_handler::ProtectedInstructionData>* protected_instructions,
- bool allow_signalling_nan) {
- return new PipelineWasmCompilationJob(
- info, jsgraph, descriptor, source_positions, protected_instructions,
- allow_signalling_nan);
+ wasm::ModuleOrigin wasm_origin) {
+ return new PipelineWasmCompilationJob(info, jsgraph, descriptor,
+ source_positions,
+ protected_instructions, wasm_origin);
}
bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
@@ -1936,6 +2045,7 @@ void PipelineImpl::AssembleCode(Linkage* linkage) {
data->BeginPhaseKind("code generation");
data->InitializeCodeGenerator(linkage);
Run<AssembleCodePhase>();
+ data->DeleteInstructionZone();
}
Handle<Code> PipelineImpl::FinalizeCode() {
@@ -2012,11 +2122,7 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
#endif
data->InitializeRegisterAllocationData(config, descriptor);
- if (info()->is_osr()) {
- AllowHandleDereference allow_deref;
- OsrHelper osr_helper(info());
- osr_helper.SetupFrame(data->frame());
- }
+ if (info()->is_osr()) data->osr_helper()->SetupFrame(data->frame());
Run<MeetRegisterConstraintsPhase>();
Run<ResolvePhisPhase>();
@@ -2048,6 +2154,14 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
Run<AssignSpillSlotsPhase>();
Run<CommitAssignmentPhase>();
+
+ // TODO(chromium:725559): remove this check once
+ // we understand the cause of the bug. We keep just the
+ // check at the end of the allocation.
+ if (verifier != nullptr) {
+ verifier->VerifyAssignment("Immediately after CommitAssignmentPhase.");
+ }
+
Run<PopulateReferenceMapsPhase>();
Run<ConnectRangesPhase>();
Run<ResolveControlFlowPhase>();
@@ -2066,7 +2180,7 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
}
if (verifier != nullptr) {
- verifier->VerifyAssignment();
+ verifier->VerifyAssignment("End of regalloc pipeline.");
verifier->VerifyGapMoves();
}
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index 624ef01ead..8748e3389a 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -22,6 +22,10 @@ namespace trap_handler {
struct ProtectedInstructionData;
} // namespace trap_handler
+namespace wasm {
+enum ModuleOrigin : uint8_t;
+} // namespace wasm
+
namespace compiler {
class CallDescriptor;
@@ -43,7 +47,7 @@ class Pipeline : public AllStatic {
SourcePositionTable* source_positions,
ZoneVector<trap_handler::ProtectedInstructionData>*
protected_instructions,
- bool wasm_origin);
+ wasm::ModuleOrigin wasm_origin);
// Run the pipeline on a machine graph and generate code. The {schedule} must
// be valid, hence the given {graph} does not need to be schedulable.
diff --git a/deps/v8/src/compiler/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
index be10a67f24..fe7d3ab40d 100644
--- a/deps/v8/src/compiler/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
@@ -9,14 +9,14 @@
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
+#include "src/double.h"
#include "src/ppc/macro-assembler-ppc.h"
namespace v8 {
namespace internal {
namespace compiler {
-#define __ masm()->
-
+#define __ tasm()->
#define kScratchReg r11
@@ -40,7 +40,6 @@ class PPCOperandConverter final : public InstructionOperandConverter {
return LeaveRC;
}
UNREACHABLE();
- return LeaveRC;
}
bool CompareLogical() const {
@@ -54,7 +53,6 @@ class PPCOperandConverter final : public InstructionOperandConverter {
return false;
}
UNREACHABLE();
- return false;
}
Operand InputImmediate(size_t index) {
@@ -63,11 +61,9 @@ class PPCOperandConverter final : public InstructionOperandConverter {
case Constant::kInt32:
return Operand(constant.ToInt32());
case Constant::kFloat32:
- return Operand(
- isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
+ return Operand::EmbeddedNumber(constant.ToFloat32());
case Constant::kFloat64:
- return Operand(
- isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+ return Operand::EmbeddedNumber(constant.ToFloat64().value());
case Constant::kInt64:
#if V8_TARGET_ARCH_PPC64
return Operand(constant.ToInt64());
@@ -78,7 +74,6 @@ class PPCOperandConverter final : public InstructionOperandConverter {
break;
}
UNREACHABLE();
- return Operand::Zero();
}
MemOperand MemoryOperand(AddressingMode* mode, size_t* first_index) {
@@ -95,7 +90,6 @@ class PPCOperandConverter final : public InstructionOperandConverter {
return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
}
UNREACHABLE();
- return MemOperand(r0);
}
MemOperand MemoryOperand(AddressingMode* mode, size_t first_index = 0) {
@@ -128,8 +122,8 @@ class OutOfLineLoadNAN32 final : public OutOfLineCode {
: OutOfLineCode(gen), result_(result) {}
void Generate() final {
- __ LoadDoubleLiteral(result_, std::numeric_limits<float>::quiet_NaN(),
- kScratchReg);
+ __ LoadDoubleLiteral(
+ result_, Double(std::numeric_limits<double>::quiet_NaN()), kScratchReg);
}
private:
@@ -143,8 +137,8 @@ class OutOfLineLoadNAN64 final : public OutOfLineCode {
: OutOfLineCode(gen), result_(result) {}
void Generate() final {
- __ LoadDoubleLiteral(result_, std::numeric_limits<double>::quiet_NaN(),
- kScratchReg);
+ __ LoadDoubleLiteral(
+ result_, Double(std::numeric_limits<double>::quiet_NaN()), kScratchReg);
}
private:
@@ -177,7 +171,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
- must_save_lr_(!gen->frame_access_state()->has_frame()) {}
+ must_save_lr_(!gen->frame_access_state()->has_frame()),
+ zone_(gen->zone()) {}
OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
Register value, Register scratch0, Register scratch1,
@@ -190,7 +185,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
- must_save_lr_(!gen->frame_access_state()->has_frame()) {}
+ must_save_lr_(!gen->frame_access_state()->has_frame()),
+ zone_(gen->zone()) {}
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -209,8 +205,6 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ mflr(scratch1_);
__ Push(scratch1_);
}
- RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
- remembered_set_action, save_fp_mode);
if (offset_.is(no_reg)) {
__ addi(scratch1_, object_, Operand(offset_immediate_));
} else {
@@ -218,10 +212,14 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ add(scratch1_, object_, offset_);
}
if (must_save_lr_ && FLAG_enable_embedded_constant_pool) {
- ConstantPoolUnavailableScope constant_pool_unavailable(masm());
- __ CallStub(&stub);
+ ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
+ __ CallStubDelayed(
+ new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
+ remembered_set_action, save_fp_mode));
} else {
- __ CallStub(&stub);
+ __ CallStubDelayed(
+ new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
+ remembered_set_action, save_fp_mode));
}
if (must_save_lr_) {
// We need to save and restore lr if the frame was elided.
@@ -239,6 +237,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch1_;
RecordWriteMode const mode_;
bool must_save_lr_;
+ Zone* zone_;
};
@@ -293,7 +292,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
break;
}
UNREACHABLE();
- return kNoCondition;
}
} // namespace
@@ -431,28 +429,27 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
i.OutputRCBit()); \
} while (0)
-
-#define ASSEMBLE_FLOAT_MODULO() \
- do { \
- FrameScope scope(masm(), StackFrame::MANUAL); \
- __ PrepareCallCFunction(0, 2, kScratchReg); \
- __ MovToFloatParameters(i.InputDoubleRegister(0), \
- i.InputDoubleRegister(1)); \
- __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), \
- 0, 2); \
- __ MovFromFloatResult(i.OutputDoubleRegister()); \
- DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+#define ASSEMBLE_FLOAT_MODULO() \
+ do { \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 2, kScratchReg); \
+ __ MovToFloatParameters(i.InputDoubleRegister(0), \
+ i.InputDoubleRegister(1)); \
+ __ CallCFunction( \
+ ExternalReference::mod_two_doubles_operation(__ isolate()), 0, 2); \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
#define ASSEMBLE_IEEE754_UNOP(name) \
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
/* and generate a CallAddress instruction instead. */ \
- FrameScope scope(masm(), StackFrame::MANUAL); \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 1, kScratchReg); \
__ MovToFloatParameter(i.InputDoubleRegister(0)); \
- __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
- 0, 1); \
+ __ CallCFunction( \
+ ExternalReference::ieee754_##name##_function(__ isolate()), 0, 1); \
/* Move the result in the double result register. */ \
__ MovFromFloatResult(i.OutputDoubleRegister()); \
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
@@ -462,12 +459,12 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
/* and generate a CallAddress instruction instead. */ \
- FrameScope scope(masm(), StackFrame::MANUAL); \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 2, kScratchReg); \
__ MovToFloatParameters(i.InputDoubleRegister(0), \
- i.InputDoubleRegister(1)); \
- __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
- 0, 2); \
+ i.InputDoubleRegister(1)); \
+ __ CallCFunction( \
+ ExternalReference::ieee754_##name##_function(__ isolate()), 0, 2); \
/* Move the result in the double result register. */ \
__ MovFromFloatResult(i.OutputDoubleRegister()); \
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
@@ -845,20 +842,20 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
namespace {
-void FlushPendingPushRegisters(MacroAssembler* masm,
+void FlushPendingPushRegisters(TurboAssembler* tasm,
FrameAccessState* frame_access_state,
ZoneVector<Register>* pending_pushes) {
switch (pending_pushes->size()) {
case 0:
break;
case 1:
- masm->Push((*pending_pushes)[0]);
+ tasm->Push((*pending_pushes)[0]);
break;
case 2:
- masm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
+ tasm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
break;
case 3:
- masm->Push((*pending_pushes)[0], (*pending_pushes)[1],
+ tasm->Push((*pending_pushes)[0], (*pending_pushes)[1],
(*pending_pushes)[2]);
break;
default:
@@ -869,18 +866,18 @@ void FlushPendingPushRegisters(MacroAssembler* masm,
pending_pushes->resize(0);
}
-void AddPendingPushRegister(MacroAssembler* masm,
+void AddPendingPushRegister(TurboAssembler* tasm,
FrameAccessState* frame_access_state,
ZoneVector<Register>* pending_pushes,
Register reg) {
pending_pushes->push_back(reg);
if (pending_pushes->size() == 3 || reg.is(ip)) {
- FlushPendingPushRegisters(masm, frame_access_state, pending_pushes);
+ FlushPendingPushRegisters(tasm, frame_access_state, pending_pushes);
}
}
void AdjustStackPointerForTailCall(
- MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp,
+ TurboAssembler* tasm, FrameAccessState* state, int new_slot_above_sp,
ZoneVector<Register>* pending_pushes = nullptr,
bool allow_shrinkage = true) {
int current_sp_offset = state->GetSPToFPSlotCount() +
@@ -888,15 +885,15 @@ void AdjustStackPointerForTailCall(
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
if (pending_pushes != nullptr) {
- FlushPendingPushRegisters(masm, state, pending_pushes);
+ FlushPendingPushRegisters(tasm, state, pending_pushes);
}
- masm->Add(sp, sp, -stack_slot_delta * kPointerSize, r0);
+ tasm->Add(sp, sp, -stack_slot_delta * kPointerSize, r0);
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
if (pending_pushes != nullptr) {
- FlushPendingPushRegisters(masm, state, pending_pushes);
+ FlushPendingPushRegisters(tasm, state, pending_pushes);
}
- masm->Add(sp, sp, -stack_slot_delta * kPointerSize, r0);
+ tasm->Add(sp, sp, -stack_slot_delta * kPointerSize, r0);
state->IncreaseSPDelta(stack_slot_delta);
}
}
@@ -919,20 +916,20 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
LocationOperand::cast(move->destination()));
InstructionOperand source(move->source());
AdjustStackPointerForTailCall(
- masm(), frame_access_state(),
+ tasm(), frame_access_state(),
destination_location.index() - pending_pushes.size(),
&pending_pushes);
if (source.IsStackSlot()) {
LocationOperand source_location(LocationOperand::cast(source));
__ LoadP(ip, g.SlotToMemOperand(source_location.index()));
- AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+ AddPendingPushRegister(tasm(), frame_access_state(), &pending_pushes,
ip);
} else if (source.IsRegister()) {
LocationOperand source_location(LocationOperand::cast(source));
- AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+ AddPendingPushRegister(tasm(), frame_access_state(), &pending_pushes,
source_location.GetRegister());
} else if (source.IsImmediate()) {
- AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+ AddPendingPushRegister(tasm(), frame_access_state(), &pending_pushes,
ip);
} else {
// Pushes of non-scalar data types is not supported.
@@ -940,15 +937,15 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
}
move->Eliminate();
}
- FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes);
+ FlushPendingPushRegisters(tasm(), frame_access_state(), &pending_pushes);
}
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot, nullptr, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_stack_slot) {
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot);
}
@@ -962,15 +959,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
switch (opcode) {
case kArchCallCodeObject: {
v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
- masm());
+ tasm());
EnsureSpaceForLazyDeopt();
if (HasRegisterInput(instr, 0)) {
__ addi(ip, i.InputRegister(0),
Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(ip);
} else {
- __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
- RelocInfo::CODE_TARGET);
+ __ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
}
RecordCallPosition(instr);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
@@ -991,9 +987,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
// We cannot use the constant pool to load the target since
// we've already restored the caller's frame.
- ConstantPoolUnavailableScope constant_pool_unavailable(masm());
- __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
- RelocInfo::CODE_TARGET);
+ ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
+ __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
}
DCHECK_EQ(LeaveRC, i.OutputRCBit());
frame_access_state()->ClearSPDelta();
@@ -1009,7 +1004,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallJSFunction: {
v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
- masm());
+ tasm());
EnsureSpaceForLazyDeopt();
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
@@ -1122,7 +1117,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchTruncateDoubleToI:
// TODO(mbrandy): move slow call to stub out of line.
- __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+ __ TruncateDoubleToIDelayed(zone(), i.OutputRegister(),
+ i.InputDoubleRegister(0));
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kArchStoreWithWriteBarrier: {
@@ -1383,8 +1379,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sub(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
LeaveOE, i.OutputRCBit());
} else {
- __ subi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
- DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ if (is_int16(i.InputImmediate(1).immediate())) {
+ __ subi(i.OutputRegister(), i.InputRegister(0),
+ i.InputImmediate(1));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ } else {
+ __ mov(kScratchReg, i.InputImmediate(1));
+ __ sub(i.OutputRegister(), i.InputRegister(0), kScratchReg, LeaveOE,
+ i.OutputRCBit());
+ }
}
#if V8_TARGET_ARCH_PPC64
}
@@ -1556,8 +1559,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_IEEE754_UNOP(log10);
break;
case kIeee754Float64Pow: {
- MathPowStub stub(isolate(), MathPowStub::DOUBLE);
- __ CallStub(&stub);
+ __ CallStubDelayed(new (zone())
+ MathPowStub(nullptr, MathPowStub::DOUBLE));
__ Move(d1, d3);
break;
}
@@ -2079,14 +2082,14 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
// We use the context register as the scratch register, because we do
// not have a context here.
__ PrepareCallCFunction(0, 0, cp);
- __ CallCFunction(
- ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
- 0);
+ __ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(
+ __ isolate()),
+ 0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
__ Ret();
} else {
gen_->AssembleSourcePosition(instr_);
- __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+ __ Call(__ isolate()->builtins()->builtin_handle(trap_id),
RelocInfo::CODE_TARGET);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
@@ -2219,12 +2222,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
: Deoptimizer::EAGER;
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- isolate(), deoptimization_id, bailout_type);
+ __ isolate(), deoptimization_id, bailout_type);
// TODO(turbofan): We should be able to generate better code by sharing the
// actual final call site and just bl'ing to it here, similar to what we do
// in the lithium backend.
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- if (isolate()->NeedsSourcePositionsForProfiling()) {
+ if (info()->is_source_positions_enabled()) {
__ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
}
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
@@ -2296,7 +2299,7 @@ void CodeGenerator::AssembleConstructFrame() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
}
const RegList double_saves = descriptor->CalleeSavedFPRegisters();
@@ -2424,12 +2427,10 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
#endif
break;
case Constant::kFloat32:
- __ Move(dst,
- isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
+ __ mov(dst, Operand::EmbeddedNumber(src.ToFloat32()));
break;
case Constant::kFloat64:
- __ Move(dst,
- isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
+ __ mov(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
break;
case Constant::kExternalReference:
__ mov(dst, Operand(src.ToExternalReference()));
@@ -2455,31 +2456,27 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
DoubleRegister dst = destination->IsFPRegister()
? g.ToDoubleRegister(destination)
: kScratchDoubleReg;
- double value;
+ Double value;
#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
// casting double precision snan to single precision
// converts it to qnan on ia32/x64
if (src.type() == Constant::kFloat32) {
- int32_t val = src.ToFloat32AsInt();
+ uint32_t val = src.ToFloat32AsInt();
if ((val & 0x7f800000) == 0x7f800000) {
- int64_t dval = static_cast<int64_t>(val);
+ uint64_t dval = static_cast<uint64_t>(val);
dval = ((dval & 0xc0000000) << 32) | ((dval & 0x40000000) << 31) |
((dval & 0x40000000) << 30) | ((dval & 0x7fffffff) << 29);
- value = bit_cast<double, int64_t>(dval);
+ value = Double(dval);
} else {
- value = src.ToFloat32();
+ value = Double(static_cast<double>(src.ToFloat32()));
}
} else {
- int64_t val = src.ToFloat64AsInt();
- if ((val & 0x7f80000000000000) == 0x7f80000000000000) {
- value = bit_cast<double, int64_t>(val);
- } else {
- value = src.ToFloat64();
- }
+ value = Double(src.ToFloat64());
}
#else
- value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
- : src.ToFloat64();
+ value = src.type() == Constant::kFloat32
+ ? Double(static_cast<double>(src.ToFloat32()))
+ : Double(src.ToFloat64());
#endif
__ LoadDoubleLiteral(dst, value, kScratchReg);
if (destination->IsFPStackSlot()) {
@@ -2611,11 +2608,11 @@ void CodeGenerator::EnsureSpaceForLazyDeopt() {
int space_needed = Deoptimizer::patch_size();
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
- int current_pc = masm()->pc_offset();
+ int current_pc = tasm()->pc_offset();
if (current_pc < last_lazy_deopt_pc_ + space_needed) {
// Block tramoline pool emission for duration of padding.
v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
- masm());
+ tasm());
int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
while (padding_size > 0) {
diff --git a/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc b/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc
index 640a7e439a..2b491f1b80 100644
--- a/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc
@@ -141,7 +141,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
}
UNREACHABLE();
- return kNoOpcodeFlags;
}
diff --git a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
index ea88e81a05..ff7cde50fd 100644
--- a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
@@ -224,9 +224,6 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kWord64: // Fall through.
#endif
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -336,9 +333,6 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord64: // Fall through.
#endif
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -403,9 +397,6 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
case MachineRepresentation::kWord64: // Fall through.
#endif
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -454,9 +445,6 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
case MachineRepresentation::kWord64: // Fall through.
#endif
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -1544,7 +1532,6 @@ static bool CompareLogical(FlagsContinuation* cont) {
return false;
}
UNREACHABLE();
- return false;
}
@@ -1837,6 +1824,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 4 + sw.value_range;
size_t table_time_cost = 3;
size_t lookup_space_cost = 3 + 2 * sw.case_count;
@@ -1844,7 +1832,8 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
if (sw.case_count > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min()) {
+ sw.min_value > std::numeric_limits<int32_t>::min() &&
+ sw.value_range <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
if (sw.min_value) {
index_operand = g.TempRegister();
diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc
new file mode 100644
index 0000000000..417f541bca
--- /dev/null
+++ b/deps/v8/src/compiler/property-access-builder.cc
@@ -0,0 +1,271 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/property-access-builder.h"
+
+#include "src/compilation-dependencies.h"
+#include "src/compiler/access-builder.h"
+#include "src/compiler/access-info.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/lookup.h"
+
+#include "src/field-index-inl.h"
+#include "src/isolate-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Graph* PropertyAccessBuilder::graph() const { return jsgraph()->graph(); }
+
+Isolate* PropertyAccessBuilder::isolate() const { return jsgraph()->isolate(); }
+
+CommonOperatorBuilder* PropertyAccessBuilder::common() const {
+ return jsgraph()->common();
+}
+
+SimplifiedOperatorBuilder* PropertyAccessBuilder::simplified() const {
+ return jsgraph()->simplified();
+}
+
+namespace {
+
+bool HasOnlyNumberMaps(MapHandles const& maps) {
+ for (auto map : maps) {
+ if (map->instance_type() != HEAP_NUMBER_TYPE) return false;
+ }
+ return true;
+}
+
+bool HasOnlyStringMaps(MapHandles const& maps) {
+ for (auto map : maps) {
+ if (!map->IsStringMap()) return false;
+ }
+ return true;
+}
+
+bool HasOnlySequentialStringMaps(MapHandles const& maps) {
+ for (auto map : maps) {
+ if (!map->IsStringMap()) return false;
+ if (!StringShape(map->instance_type()).IsSequential()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+} // namespace
+
+bool PropertyAccessBuilder::TryBuildStringCheck(MapHandles const& maps,
+ Node** receiver, Node** effect,
+ Node* control) {
+ if (HasOnlyStringMaps(maps)) {
+ if (HasOnlySequentialStringMaps(maps)) {
+ *receiver = *effect = graph()->NewNode(simplified()->CheckSeqString(),
+ *receiver, *effect, control);
+ } else {
+ // Monormorphic string access (ignoring the fact that there are multiple
+ // String maps).
+ *receiver = *effect = graph()->NewNode(simplified()->CheckString(),
+ *receiver, *effect, control);
+ }
+ return true;
+ }
+ return false;
+}
+
+bool PropertyAccessBuilder::TryBuildNumberCheck(MapHandles const& maps,
+ Node** receiver, Node** effect,
+ Node* control) {
+ if (HasOnlyNumberMaps(maps)) {
+ // Monomorphic number access (we also deal with Smis here).
+ *receiver = *effect = graph()->NewNode(simplified()->CheckNumber(),
+ *receiver, *effect, control);
+ return true;
+ }
+ return false;
+}
+
+Node* PropertyAccessBuilder::BuildCheckHeapObject(Node* receiver, Node** effect,
+ Node* control) {
+ switch (receiver->opcode()) {
+ case IrOpcode::kHeapConstant:
+ case IrOpcode::kJSCreate:
+ case IrOpcode::kJSCreateArguments:
+ case IrOpcode::kJSCreateArray:
+ case IrOpcode::kJSCreateClosure:
+ case IrOpcode::kJSCreateIterResultObject:
+ case IrOpcode::kJSCreateLiteralArray:
+ case IrOpcode::kJSCreateLiteralObject:
+ case IrOpcode::kJSCreateLiteralRegExp:
+ case IrOpcode::kJSConvertReceiver:
+ case IrOpcode::kJSToName:
+ case IrOpcode::kJSToString:
+ case IrOpcode::kJSToObject:
+ case IrOpcode::kJSTypeOf: {
+ return receiver;
+ }
+ default: {
+ return *effect = graph()->NewNode(simplified()->CheckHeapObject(),
+ receiver, *effect, control);
+ }
+ }
+ UNREACHABLE();
+ return nullptr;
+}
+
+void PropertyAccessBuilder::BuildCheckMaps(
+ Node* receiver, Node** effect, Node* control,
+ std::vector<Handle<Map>> const& receiver_maps) {
+ HeapObjectMatcher m(receiver);
+ if (m.HasValue()) {
+ Handle<Map> receiver_map(m.Value()->map(), isolate());
+ if (receiver_map->is_stable()) {
+ for (Handle<Map> map : receiver_maps) {
+ if (map.is_identical_to(receiver_map)) {
+ dependencies()->AssumeMapStable(receiver_map);
+ return;
+ }
+ }
+ }
+ }
+ ZoneHandleSet<Map> maps;
+ CheckMapsFlags flags = CheckMapsFlag::kNone;
+ for (Handle<Map> map : receiver_maps) {
+ maps.insert(map, graph()->zone());
+ if (map->is_migration_target()) {
+ flags |= CheckMapsFlag::kTryMigrateInstance;
+ }
+ }
+ *effect = graph()->NewNode(simplified()->CheckMaps(flags, maps), receiver,
+ *effect, control);
+}
+
+void PropertyAccessBuilder::AssumePrototypesStable(
+ Handle<Context> native_context,
+ std::vector<Handle<Map>> const& receiver_maps, Handle<JSObject> holder) {
+ // Determine actual holder and perform prototype chain checks.
+ for (auto map : receiver_maps) {
+ // Perform the implicit ToObject for primitives here.
+ // Implemented according to ES6 section 7.3.2 GetV (V, P).
+ Handle<JSFunction> constructor;
+ if (Map::GetConstructorFunction(map, native_context)
+ .ToHandle(&constructor)) {
+ map = handle(constructor->initial_map(), holder->GetIsolate());
+ }
+ dependencies()->AssumePrototypeMapsStable(map, holder);
+ }
+}
+
+Node* PropertyAccessBuilder::ResolveHolder(
+ PropertyAccessInfo const& access_info, Node* receiver) {
+ Handle<JSObject> holder;
+ if (access_info.holder().ToHandle(&holder)) {
+ return jsgraph()->Constant(holder);
+ }
+ return receiver;
+}
+
+Node* PropertyAccessBuilder::TryBuildLoadConstantDataField(
+ Handle<Name> name, PropertyAccessInfo const& access_info, Node* receiver) {
+ // Optimize immutable property loads.
+ HeapObjectMatcher m(receiver);
+ if (m.HasValue() && m.Value()->IsJSObject()) {
+ // TODO(ishell): Use something simpler like
+ //
+ // Handle<Object> value =
+ // JSObject::FastPropertyAt(Handle<JSObject>::cast(m.Value()),
+ // Representation::Tagged(), field_index);
+ //
+ // here, once we have the immutable bit in the access_info.
+
+ // TODO(turbofan): Given that we already have the field_index here, we
+ // might be smarter in the future and not rely on the LookupIterator,
+ // but for now let's just do what Crankshaft does.
+ LookupIterator it(m.Value(), name, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ if (it.state() == LookupIterator::DATA) {
+ bool is_reaonly_non_configurable =
+ it.IsReadOnly() && !it.IsConfigurable();
+ if (is_reaonly_non_configurable ||
+ (FLAG_track_constant_fields && access_info.IsDataConstantField())) {
+ Node* value = jsgraph()->Constant(JSReceiver::GetDataProperty(&it));
+ if (!is_reaonly_non_configurable) {
+ // It's necessary to add dependency on the map that introduced
+ // the field.
+ DCHECK(access_info.IsDataConstantField());
+ DCHECK(!it.is_dictionary_holder());
+ Handle<Map> field_owner_map = it.GetFieldOwnerMap();
+ dependencies()->AssumeFieldOwner(field_owner_map);
+ }
+ return value;
+ }
+ }
+ }
+ return nullptr;
+}
+
+Node* PropertyAccessBuilder::BuildLoadDataField(
+ Handle<Name> name, PropertyAccessInfo const& access_info, Node* receiver,
+ Node** effect, Node** control) {
+ DCHECK(access_info.IsDataField() || access_info.IsDataConstantField());
+ receiver = ResolveHolder(access_info, receiver);
+ if (Node* value =
+ TryBuildLoadConstantDataField(name, access_info, receiver)) {
+ return value;
+ }
+
+ FieldIndex const field_index = access_info.field_index();
+ Type* const field_type = access_info.field_type();
+ MachineRepresentation const field_representation =
+ access_info.field_representation();
+ Node* storage = receiver;
+ if (!field_index.is_inobject()) {
+ storage = *effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectProperties()),
+ storage, *effect, *control);
+ }
+ FieldAccess field_access = {
+ kTaggedBase,
+ field_index.offset(),
+ name,
+ MaybeHandle<Map>(),
+ field_type,
+ MachineType::TypeForRepresentation(field_representation),
+ kFullWriteBarrier};
+ if (field_representation == MachineRepresentation::kFloat64) {
+ if (!field_index.is_inobject() || field_index.is_hidden_field() ||
+ !FLAG_unbox_double_fields) {
+ FieldAccess const storage_access = {kTaggedBase,
+ field_index.offset(),
+ name,
+ MaybeHandle<Map>(),
+ Type::OtherInternal(),
+ MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
+ storage = *effect = graph()->NewNode(
+ simplified()->LoadField(storage_access), storage, *effect, *control);
+ field_access.offset = HeapNumber::kValueOffset;
+ field_access.name = MaybeHandle<Name>();
+ }
+ } else if (field_representation == MachineRepresentation::kTaggedPointer) {
+ // Remember the map of the field value, if its map is stable. This is
+ // used by the LoadElimination to eliminate map checks on the result.
+ Handle<Map> field_map;
+ if (access_info.field_map().ToHandle(&field_map)) {
+ if (field_map->is_stable()) {
+ dependencies()->AssumeMapStable(field_map);
+ field_access.map = field_map;
+ }
+ }
+ }
+ Node* value = *effect = graph()->NewNode(
+ simplified()->LoadField(field_access), storage, *effect, *control);
+ return value;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/property-access-builder.h b/deps/v8/src/compiler/property-access-builder.h
new file mode 100644
index 0000000000..2774423b4c
--- /dev/null
+++ b/deps/v8/src/compiler/property-access-builder.h
@@ -0,0 +1,80 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_PROPERTY_ACCESS_BUILDER_H_
+#define V8_COMPILER_PROPERTY_ACCESS_BUILDER_H_
+
+#include <vector>
+
+#include "src/handles.h"
+#include "src/objects/map.h"
+#include "src/zone/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+class CompilationDependencies;
+
+namespace compiler {
+
+class CommonOperatorBuilder;
+class Graph;
+class JSGraph;
+class Node;
+class PropertyAccessInfo;
+class SimplifiedOperatorBuilder;
+
+class PropertyAccessBuilder {
+ public:
+ PropertyAccessBuilder(JSGraph* jsgraph, CompilationDependencies* dependencies)
+ : jsgraph_(jsgraph), dependencies_(dependencies) {}
+
+ // Builds the appropriate string check if the maps are only string
+ // maps.
+ bool TryBuildStringCheck(MapHandles const& maps, Node** receiver,
+ Node** effect, Node* control);
+ // Builds a number check if all maps are number maps.
+ bool TryBuildNumberCheck(MapHandles const& maps, Node** receiver,
+ Node** effect, Node* control);
+
+ Node* BuildCheckHeapObject(Node* receiver, Node** effect, Node* control);
+ void BuildCheckMaps(Node* receiver, Node** effect, Node* control,
+ std::vector<Handle<Map>> const& receiver_maps);
+
+ // Adds stability dependencies on all prototypes of every class in
+ // {receiver_type} up to (and including) the {holder}.
+ void AssumePrototypesStable(Handle<Context> native_context,
+ std::vector<Handle<Map>> const& receiver_maps,
+ Handle<JSObject> holder);
+
+ // Builds the actual load for data-field and data-constant-field
+ // properties (without heap-object or map checks).
+ Node* BuildLoadDataField(Handle<Name> name,
+ PropertyAccessInfo const& access_info,
+ Node* receiver, Node** effect, Node** control);
+
+ private:
+ JSGraph* jsgraph() const { return jsgraph_; }
+ CompilationDependencies* dependencies() const { return dependencies_; }
+ Graph* graph() const;
+ Isolate* isolate() const;
+ CommonOperatorBuilder* common() const;
+ SimplifiedOperatorBuilder* simplified() const;
+
+ Node* TryBuildLoadConstantDataField(Handle<Name> name,
+ PropertyAccessInfo const& access_info,
+ Node* receiver);
+ // Returns a node with the holder for the property access described by
+ // {access_info}.
+ Node* ResolveHolder(PropertyAccessInfo const& access_info, Node* receiver);
+
+ JSGraph* jsgraph_;
+ CompilationDependencies* dependencies_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_PROPERTY_ACCESS_BUILDER_H_
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index 671aafe381..6134f934c7 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -405,9 +405,17 @@ Node* RawMachineAssembler::MakeNode(const Operator* op, int input_count,
}
RawMachineLabel::~RawMachineLabel() {
- // If this DCHECK fails, it means that the label has been bound but it's not
- // used, or the opposite. This would cause the register allocator to crash.
- DCHECK_EQ(bound_, used_);
+#if DEBUG
+ if (bound_ == used_) return;
+ std::stringstream str;
+ if (bound_) {
+ str << "A label has been bound but it's not used."
+ << "\n# label: " << *block_;
+ } else {
+ str << "A label has been used but it's not bound.";
+ }
+ FATAL(str.str().c_str());
+#endif // DEBUG
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/redundancy-elimination.cc b/deps/v8/src/compiler/redundancy-elimination.cc
index 38feb8b751..666cdd4f58 100644
--- a/deps/v8/src/compiler/redundancy-elimination.cc
+++ b/deps/v8/src/compiler/redundancy-elimination.cc
@@ -27,7 +27,9 @@ Reduction RedundancyElimination::Reduce(Node* node) {
case IrOpcode::kCheckReceiver:
case IrOpcode::kCheckSmi:
case IrOpcode::kCheckString:
- case IrOpcode::kCheckTaggedHole:
+ case IrOpcode::kCheckSeqString:
+ case IrOpcode::kCheckNonEmptyString:
+ case IrOpcode::kCheckNotTaggedHole:
case IrOpcode::kCheckedFloat64ToInt32:
case IrOpcode::kCheckedInt32Add:
case IrOpcode::kCheckedInt32Sub:
@@ -123,9 +125,11 @@ namespace {
bool IsCompatibleCheck(Node const* a, Node const* b) {
if (a->op() != b->op()) {
- if (a->opcode() == IrOpcode::kCheckInternalizedString &&
- b->opcode() == IrOpcode::kCheckString) {
- // CheckInternalizedString(node) implies CheckString(node)
+ if (b->opcode() == IrOpcode::kCheckString &&
+ (a->opcode() == IrOpcode::kCheckInternalizedString ||
+ a->opcode() == IrOpcode::kCheckSeqString ||
+ a->opcode() == IrOpcode::kCheckNonEmptyString)) {
+ // Check[Internalized,Seq,NonEmpty]String(node) implies CheckString(node)
} else {
return false;
}
diff --git a/deps/v8/src/compiler/redundancy-elimination.h b/deps/v8/src/compiler/redundancy-elimination.h
index 786c9608df..05094a388e 100644
--- a/deps/v8/src/compiler/redundancy-elimination.h
+++ b/deps/v8/src/compiler/redundancy-elimination.h
@@ -16,6 +16,8 @@ class RedundancyElimination final : public AdvancedReducer {
RedundancyElimination(Editor* editor, Zone* zone);
~RedundancyElimination() final;
+ const char* reducer_name() const override { return "RedundancyElimination"; }
+
Reduction Reduce(Node* node) final;
private:
diff --git a/deps/v8/src/compiler/register-allocator-verifier.cc b/deps/v8/src/compiler/register-allocator-verifier.cc
index d589a9d371..d4614cd6f1 100644
--- a/deps/v8/src/compiler/register-allocator-verifier.cc
+++ b/deps/v8/src/compiler/register-allocator-verifier.cc
@@ -18,7 +18,6 @@ size_t OperandCount(const Instruction* instr) {
return instr->InputCount() + instr->OutputCount() + instr->TempCount();
}
-
void VerifyEmptyGaps(const Instruction* instr) {
for (int i = Instruction::FIRST_GAP_POSITION;
i <= Instruction::LAST_GAP_POSITION; i++) {
@@ -28,8 +27,7 @@ void VerifyEmptyGaps(const Instruction* instr) {
}
}
-
-void VerifyAllocatedGaps(const Instruction* instr) {
+void VerifyAllocatedGaps(const Instruction* instr, const char* caller_info) {
for (int i = Instruction::FIRST_GAP_POSITION;
i <= Instruction::LAST_GAP_POSITION; i++) {
Instruction::GapPosition inner_pos =
@@ -38,8 +36,10 @@ void VerifyAllocatedGaps(const Instruction* instr) {
if (moves == nullptr) continue;
for (const MoveOperands* move : *moves) {
if (move->IsRedundant()) continue;
- CHECK(move->source().IsAllocated() || move->source().IsConstant());
- CHECK(move->destination().IsAllocated());
+ CHECK_WITH_MSG(
+ move->source().IsAllocated() || move->source().IsConstant(),
+ caller_info);
+ CHECK_WITH_MSG(move->destination().IsAllocated(), caller_info);
}
}
}
@@ -114,13 +114,14 @@ void RegisterAllocatorVerifier::VerifyOutput(
constraint.virtual_register_);
}
-void RegisterAllocatorVerifier::VerifyAssignment() {
+void RegisterAllocatorVerifier::VerifyAssignment(const char* caller_info) {
+ caller_info_ = caller_info;
CHECK(sequence()->instructions().size() == constraints()->size());
auto instr_it = sequence()->begin();
for (const auto& instr_constraint : *constraints()) {
const Instruction* instr = instr_constraint.instruction_;
// All gaps should be totally allocated at this point.
- VerifyAllocatedGaps(instr);
+ VerifyAllocatedGaps(instr, caller_info_);
const size_t operand_count = instr_constraint.operand_constaints_size_;
const OperandConstraint* op_constraints =
instr_constraint.operand_constraints_;
@@ -211,12 +212,12 @@ void RegisterAllocatorVerifier::CheckConstraint(
const InstructionOperand* op, const OperandConstraint* constraint) {
switch (constraint->type_) {
case kConstant:
- CHECK(op->IsConstant());
+ CHECK_WITH_MSG(op->IsConstant(), caller_info_);
CHECK_EQ(ConstantOperand::cast(op)->virtual_register(),
constraint->value_);
return;
case kImmediate: {
- CHECK(op->IsImmediate());
+ CHECK_WITH_MSG(op->IsImmediate(), caller_info_);
const ImmediateOperand* imm = ImmediateOperand::cast(op);
int value = imm->type() == ImmediateOperand::INLINE
? imm->inline_value()
@@ -225,40 +226,40 @@ void RegisterAllocatorVerifier::CheckConstraint(
return;
}
case kRegister:
- CHECK(op->IsRegister());
+ CHECK_WITH_MSG(op->IsRegister(), caller_info_);
return;
case kFPRegister:
- CHECK(op->IsFPRegister());
+ CHECK_WITH_MSG(op->IsFPRegister(), caller_info_);
return;
case kExplicit:
- CHECK(op->IsExplicit());
+ CHECK_WITH_MSG(op->IsExplicit(), caller_info_);
return;
case kFixedRegister:
case kRegisterAndSlot:
- CHECK(op->IsRegister());
+ CHECK_WITH_MSG(op->IsRegister(), caller_info_);
CHECK_EQ(LocationOperand::cast(op)->register_code(), constraint->value_);
return;
case kFixedFPRegister:
- CHECK(op->IsFPRegister());
+ CHECK_WITH_MSG(op->IsFPRegister(), caller_info_);
CHECK_EQ(LocationOperand::cast(op)->register_code(), constraint->value_);
return;
case kFixedSlot:
- CHECK(op->IsStackSlot() || op->IsFPStackSlot());
+ CHECK_WITH_MSG(op->IsStackSlot() || op->IsFPStackSlot(), caller_info_);
CHECK_EQ(LocationOperand::cast(op)->index(), constraint->value_);
return;
case kSlot:
- CHECK(op->IsStackSlot() || op->IsFPStackSlot());
+ CHECK_WITH_MSG(op->IsStackSlot() || op->IsFPStackSlot(), caller_info_);
CHECK_EQ(ElementSizeLog2Of(LocationOperand::cast(op)->representation()),
constraint->value_);
return;
case kNone:
- CHECK(op->IsRegister() || op->IsStackSlot());
+ CHECK_WITH_MSG(op->IsRegister() || op->IsStackSlot(), caller_info_);
return;
case kNoneFP:
- CHECK(op->IsFPRegister() || op->IsFPStackSlot());
+ CHECK_WITH_MSG(op->IsFPRegister() || op->IsFPStackSlot(), caller_info_);
return;
case kSameAsFirst:
- CHECK(false);
+ CHECK_WITH_MSG(false, caller_info_);
return;
}
}
diff --git a/deps/v8/src/compiler/register-allocator-verifier.h b/deps/v8/src/compiler/register-allocator-verifier.h
index 989589e6fb..bc2de1a9f1 100644
--- a/deps/v8/src/compiler/register-allocator-verifier.h
+++ b/deps/v8/src/compiler/register-allocator-verifier.h
@@ -167,7 +167,7 @@ class RegisterAllocatorVerifier final : public ZoneObject {
RegisterAllocatorVerifier(Zone* zone, const RegisterConfiguration* config,
const InstructionSequence* sequence);
- void VerifyAssignment();
+ void VerifyAssignment(const char* caller_info);
void VerifyGapMoves();
private:
@@ -257,6 +257,8 @@ class RegisterAllocatorVerifier final : public ZoneObject {
Constraints constraints_;
ZoneMap<RpoNumber, BlockAssessments*> assessments_;
ZoneMap<RpoNumber, DelayedAssessments*> outstanding_assessments_;
+ // TODO(chromium:725559): remove after we understand this bug's root cause.
+ const char* caller_info_ = nullptr;
DISALLOW_COPY_AND_ASSIGN(RegisterAllocatorVerifier);
};
diff --git a/deps/v8/src/compiler/register-allocator.cc b/deps/v8/src/compiler/register-allocator.cc
index f9c076d951..f5d43761d2 100644
--- a/deps/v8/src/compiler/register-allocator.cc
+++ b/deps/v8/src/compiler/register-allocator.cc
@@ -88,15 +88,10 @@ int GetByteWidth(MachineRepresentation rep) {
return kDoubleSize;
case MachineRepresentation::kSimd128:
return kSimd128Size;
- case MachineRepresentation::kSimd1x4:
- case MachineRepresentation::kSimd1x8:
- case MachineRepresentation::kSimd1x16:
- return kSimdMaskRegisters ? kPointerSize : kSimd128Size;
case MachineRepresentation::kNone:
break;
}
UNREACHABLE();
- return 0;
}
} // namespace
@@ -320,7 +315,6 @@ bool UsePosition::HintRegister(int* register_code) const {
}
}
UNREACHABLE();
- return false;
}
@@ -344,7 +338,6 @@ UsePositionHintType UsePosition::HintTypeForOperand(
break;
}
UNREACHABLE();
- return UsePositionHintType::kNone;
}
void UsePosition::SetHint(UsePosition* use_pos) {
@@ -1780,7 +1773,8 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
int output_vreg = second_output->virtual_register();
int input_vreg = cur_input->virtual_register();
UnallocatedOperand input_copy(UnallocatedOperand::ANY, input_vreg);
- cur_input->set_virtual_register(second_output->virtual_register());
+ *cur_input =
+ UnallocatedOperand(*cur_input, second_output->virtual_register());
MoveOperands* gap_move = data()->AddGapMove(instr_index, Instruction::END,
input_copy, *cur_input);
if (code()->IsReference(input_vreg) && !code()->IsReference(output_vreg)) {
@@ -3576,6 +3570,7 @@ void OperandAssigner::CommitAssignment() {
for (LiveRange* range = top_range; range != nullptr;
range = range->next()) {
InstructionOperand assigned = range->GetAssignedOperand();
+ DCHECK(!assigned.IsUnallocated());
range->ConvertUsesToOperand(assigned, spill_operand);
}
diff --git a/deps/v8/src/compiler/register-allocator.h b/deps/v8/src/compiler/register-allocator.h
index 7698a90387..308bdfe3a3 100644
--- a/deps/v8/src/compiler/register-allocator.h
+++ b/deps/v8/src/compiler/register-allocator.h
@@ -5,6 +5,7 @@
#ifndef V8_REGISTER_ALLOCATOR_H_
#define V8_REGISTER_ALLOCATOR_H_
+#include "src/base/bits.h"
#include "src/base/compiler-specific.h"
#include "src/compiler/instruction.h"
#include "src/globals.h"
@@ -159,8 +160,8 @@ class LifetimePosition final {
static const int kHalfStep = 2;
static const int kStep = 2 * kHalfStep;
- // Code relies on kStep and kHalfStep being a power of two.
- STATIC_ASSERT(IS_POWER_OF_TWO(kHalfStep));
+ static_assert(base::bits::IsPowerOfTwo(kHalfStep),
+ "Code relies on kStep and kHalfStep being a power of two");
explicit LifetimePosition(int value) : value_(value) {}
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index f15df671cf..eee75bdd6f 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -42,7 +42,6 @@ const char* Truncation::description() const {
}
}
UNREACHABLE();
- return nullptr;
}
@@ -114,7 +113,6 @@ bool Truncation::LessGeneral(TruncationKind rep1, TruncationKind rep2) {
return rep2 == TruncationKind::kAny;
}
UNREACHABLE();
- return false;
}
// static
@@ -196,14 +194,10 @@ Node* RepresentationChanger::GetRepresentationFor(
DCHECK(use_info.type_check() == TypeCheckKind::kNone);
return GetWord64RepresentationFor(node, output_rep, output_type);
case MachineRepresentation::kSimd128:
- case MachineRepresentation::kSimd1x4:
- case MachineRepresentation::kSimd1x8:
- case MachineRepresentation::kSimd1x16:
case MachineRepresentation::kNone:
return node;
}
UNREACHABLE();
- return nullptr;
}
Node* RepresentationChanger::GetTaggedSignedRepresentationFor(
@@ -677,22 +671,11 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
return TypeError(node, output_rep, output_type,
MachineRepresentation::kWord32);
}
- } else if (output_rep == MachineRepresentation::kTaggedSigned) {
- if (output_type->Is(Type::Signed32())) {
+ } else if (IsAnyTagged(output_rep)) {
+ if (output_rep == MachineRepresentation::kTaggedSigned &&
+ output_type->Is(Type::SignedSmall())) {
op = simplified()->ChangeTaggedSignedToInt32();
- } else if (use_info.truncation().IsUsedAsWord32()) {
- if (use_info.type_check() != TypeCheckKind::kNone) {
- op = simplified()->CheckedTruncateTaggedToWord32();
- } else {
- op = simplified()->TruncateTaggedToWord32();
- }
- } else {
- return TypeError(node, output_rep, output_type,
- MachineRepresentation::kWord32);
- }
- } else if (output_rep == MachineRepresentation::kTagged ||
- output_rep == MachineRepresentation::kTaggedPointer) {
- if (output_type->Is(Type::Signed32())) {
+ } else if (output_type->Is(Type::Signed32())) {
op = simplified()->ChangeTaggedToInt32();
} else if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
op = simplified()->CheckedTaggedSignedToInt32();
@@ -706,8 +689,12 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
} else if (use_info.truncation().IsUsedAsWord32()) {
if (output_type->Is(Type::NumberOrOddball())) {
op = simplified()->TruncateTaggedToWord32();
- } else if (use_info.type_check() != TypeCheckKind::kNone) {
- op = simplified()->CheckedTruncateTaggedToWord32();
+ } else if (use_info.type_check() == TypeCheckKind::kNumber) {
+ op = simplified()->CheckedTruncateTaggedToWord32(
+ CheckTaggedInputMode::kNumber);
+ } else if (use_info.type_check() == TypeCheckKind::kNumberOrOddball) {
+ op = simplified()->CheckedTruncateTaggedToWord32(
+ CheckTaggedInputMode::kNumberOrOddball);
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kWord32);
@@ -729,8 +716,8 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
return TypeError(node, output_rep, output_type,
MachineRepresentation::kWord32);
}
- } else {
- DCHECK_EQ(TypeCheckKind::kNumberOrOddball, use_info.type_check());
+ } else if (use_info.type_check() == TypeCheckKind::kNumber ||
+ use_info.type_check() == TypeCheckKind::kNumberOrOddball) {
return node;
}
} else if (output_rep == MachineRepresentation::kWord8 ||
@@ -876,7 +863,6 @@ const Operator* RepresentationChanger::Int32OperatorFor(
return machine()->Int32LessThanOrEqual();
default:
UNREACHABLE();
- return nullptr;
}
}
@@ -893,7 +879,6 @@ const Operator* RepresentationChanger::Int32OverflowOperatorFor(
return simplified()->CheckedInt32Mod();
default:
UNREACHABLE();
- return nullptr;
}
}
@@ -911,7 +896,6 @@ const Operator* RepresentationChanger::TaggedSignedOperatorFor(
: machine()->Word64Equal();
default:
UNREACHABLE();
- return nullptr;
}
}
@@ -946,7 +930,6 @@ const Operator* RepresentationChanger::Uint32OperatorFor(
return machine()->Int32Mul();
default:
UNREACHABLE();
- return nullptr;
}
}
@@ -959,7 +942,6 @@ const Operator* RepresentationChanger::Uint32OverflowOperatorFor(
return simplified()->CheckedUint32Mod();
default:
UNREACHABLE();
- return nullptr;
}
}
@@ -1052,7 +1034,6 @@ const Operator* RepresentationChanger::Float64OperatorFor(
return machine()->Float64SilenceNaN();
default:
UNREACHABLE();
- return nullptr;
}
}
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
index b4f3366d42..bd86cd34db 100644
--- a/deps/v8/src/compiler/representation-change.h
+++ b/deps/v8/src/compiler/representation-change.h
@@ -132,7 +132,6 @@ inline std::ostream& operator<<(std::ostream& os, TypeCheckKind type_check) {
return os << "HeapObject";
}
UNREACHABLE();
- return os;
}
// The {UseInfo} class is used to describe a use of an input of a node.
diff --git a/deps/v8/src/compiler/s390/code-generator-s390.cc b/deps/v8/src/compiler/s390/code-generator-s390.cc
index f46740c9ae..4470b544fe 100644
--- a/deps/v8/src/compiler/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/s390/code-generator-s390.cc
@@ -15,7 +15,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-#define __ masm()->
+#define __ tasm()->
#define kScratchReg ip
@@ -48,7 +48,6 @@ class S390OperandConverter final : public InstructionOperandConverter {
return false;
}
UNREACHABLE();
- return false;
}
Operand InputImmediate(size_t index) {
@@ -57,11 +56,9 @@ class S390OperandConverter final : public InstructionOperandConverter {
case Constant::kInt32:
return Operand(constant.ToInt32());
case Constant::kFloat32:
- return Operand(
- isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
+ return Operand::EmbeddedNumber(constant.ToFloat32());
case Constant::kFloat64:
- return Operand(
- isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+ return Operand::EmbeddedNumber(constant.ToFloat64().value());
case Constant::kInt64:
#if V8_TARGET_ARCH_S390X
return Operand(constant.ToInt64());
@@ -72,7 +69,6 @@ class S390OperandConverter final : public InstructionOperandConverter {
break;
}
UNREACHABLE();
- return Operand::Zero();
}
MemOperand MemoryOperand(AddressingMode* mode, size_t* first_index) {
@@ -96,7 +92,6 @@ class S390OperandConverter final : public InstructionOperandConverter {
InputInt32(index + 2));
}
UNREACHABLE();
- return MemOperand(r0);
}
MemOperand MemoryOperand(AddressingMode* mode = NULL,
@@ -211,7 +206,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
- must_save_lr_(!gen->frame_access_state()->has_frame()) {}
+ must_save_lr_(!gen->frame_access_state()->has_frame()),
+ zone_(gen->zone()) {}
OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
Register value, Register scratch0, Register scratch1,
@@ -224,7 +220,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
- must_save_lr_(!gen->frame_access_state()->has_frame()) {}
+ must_save_lr_(!gen->frame_access_state()->has_frame()),
+ zone_(gen->zone()) {}
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -242,15 +239,15 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
// We need to save and restore r14 if the frame was elided.
__ Push(r14);
}
- RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
- remembered_set_action, save_fp_mode);
if (offset_.is(no_reg)) {
__ AddP(scratch1_, object_, Operand(offset_immediate_));
} else {
DCHECK_EQ(0, offset_immediate_);
__ AddP(scratch1_, object_, offset_);
}
- __ CallStub(&stub);
+ __ CallStubDelayed(
+ new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
+ remembered_set_action, save_fp_mode));
if (must_save_lr_) {
// We need to save and restore r14 if the frame was elided.
__ Pop(r14);
@@ -266,6 +263,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch1_;
RecordWriteMode const mode_;
bool must_save_lr_;
+ Zone* zone_;
};
Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
@@ -335,7 +333,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
break;
}
UNREACHABLE();
- return kNoCondition;
}
#define GET_MEMOPERAND32(ret, fi) \
@@ -467,7 +464,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
static int nullInstr() {
UNREACHABLE();
- return -1;
}
template <int numOfOperand, class RType, class MType, class IType>
@@ -481,7 +477,6 @@ static inline int AssembleOp(Instruction* instr, RType r, MType m, IType i) {
return i();
} else {
UNREACHABLE();
- return -1;
}
}
@@ -626,26 +621,26 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
__ LoadlW(i.OutputRegister(), r0); \
} while (0)
-#define ASSEMBLE_FLOAT_MODULO() \
- do { \
- FrameScope scope(masm(), StackFrame::MANUAL); \
- __ PrepareCallCFunction(0, 2, kScratchReg); \
- __ MovToFloatParameters(i.InputDoubleRegister(0), \
- i.InputDoubleRegister(1)); \
- __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), \
- 0, 2); \
- __ MovFromFloatResult(i.OutputDoubleRegister()); \
+#define ASSEMBLE_FLOAT_MODULO() \
+ do { \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 2, kScratchReg); \
+ __ MovToFloatParameters(i.InputDoubleRegister(0), \
+ i.InputDoubleRegister(1)); \
+ __ CallCFunction( \
+ ExternalReference::mod_two_doubles_operation(__ isolate()), 0, 2); \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
} while (0)
#define ASSEMBLE_IEEE754_UNOP(name) \
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
/* and generate a CallAddress instruction instead. */ \
- FrameScope scope(masm(), StackFrame::MANUAL); \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 1, kScratchReg); \
__ MovToFloatParameter(i.InputDoubleRegister(0)); \
- __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
- 0, 1); \
+ __ CallCFunction( \
+ ExternalReference::ieee754_##name##_function(__ isolate()), 0, 1); \
/* Move the result in the double result register. */ \
__ MovFromFloatResult(i.OutputDoubleRegister()); \
} while (0)
@@ -654,12 +649,12 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
/* and generate a CallAddress instruction instead. */ \
- FrameScope scope(masm(), StackFrame::MANUAL); \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 2, kScratchReg); \
__ MovToFloatParameters(i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \
- __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
- 0, 2); \
+ __ CallCFunction( \
+ ExternalReference::ieee754_##name##_function(__ isolate()), 0, 2); \
/* Move the result in the double result register. */ \
__ MovFromFloatResult(i.OutputDoubleRegister()); \
} while (0)
@@ -1055,20 +1050,20 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
namespace {
-void FlushPendingPushRegisters(MacroAssembler* masm,
+void FlushPendingPushRegisters(TurboAssembler* tasm,
FrameAccessState* frame_access_state,
ZoneVector<Register>* pending_pushes) {
switch (pending_pushes->size()) {
case 0:
break;
case 1:
- masm->Push((*pending_pushes)[0]);
+ tasm->Push((*pending_pushes)[0]);
break;
case 2:
- masm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
+ tasm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
break;
case 3:
- masm->Push((*pending_pushes)[0], (*pending_pushes)[1],
+ tasm->Push((*pending_pushes)[0], (*pending_pushes)[1],
(*pending_pushes)[2]);
break;
default:
@@ -1079,17 +1074,17 @@ void FlushPendingPushRegisters(MacroAssembler* masm,
pending_pushes->resize(0);
}
-void AddPendingPushRegister(MacroAssembler* masm,
+void AddPendingPushRegister(TurboAssembler* tasm,
FrameAccessState* frame_access_state,
ZoneVector<Register>* pending_pushes,
Register reg) {
pending_pushes->push_back(reg);
if (pending_pushes->size() == 3 || reg.is(ip)) {
- FlushPendingPushRegisters(masm, frame_access_state, pending_pushes);
+ FlushPendingPushRegisters(tasm, frame_access_state, pending_pushes);
}
}
void AdjustStackPointerForTailCall(
- MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp,
+ TurboAssembler* tasm, FrameAccessState* state, int new_slot_above_sp,
ZoneVector<Register>* pending_pushes = nullptr,
bool allow_shrinkage = true) {
int current_sp_offset = state->GetSPToFPSlotCount() +
@@ -1097,15 +1092,15 @@ void AdjustStackPointerForTailCall(
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
if (pending_pushes != nullptr) {
- FlushPendingPushRegisters(masm, state, pending_pushes);
+ FlushPendingPushRegisters(tasm, state, pending_pushes);
}
- masm->AddP(sp, sp, Operand(-stack_slot_delta * kPointerSize));
+ tasm->AddP(sp, sp, Operand(-stack_slot_delta * kPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
if (pending_pushes != nullptr) {
- FlushPendingPushRegisters(masm, state, pending_pushes);
+ FlushPendingPushRegisters(tasm, state, pending_pushes);
}
- masm->AddP(sp, sp, Operand(-stack_slot_delta * kPointerSize));
+ tasm->AddP(sp, sp, Operand(-stack_slot_delta * kPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
}
}
@@ -1128,20 +1123,20 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
LocationOperand::cast(move->destination()));
InstructionOperand source(move->source());
AdjustStackPointerForTailCall(
- masm(), frame_access_state(),
+ tasm(), frame_access_state(),
destination_location.index() - pending_pushes.size(),
&pending_pushes);
if (source.IsStackSlot()) {
LocationOperand source_location(LocationOperand::cast(source));
__ LoadP(ip, g.SlotToMemOperand(source_location.index()));
- AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+ AddPendingPushRegister(tasm(), frame_access_state(), &pending_pushes,
ip);
} else if (source.IsRegister()) {
LocationOperand source_location(LocationOperand::cast(source));
- AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+ AddPendingPushRegister(tasm(), frame_access_state(), &pending_pushes,
source_location.GetRegister());
} else if (source.IsImmediate()) {
- AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+ AddPendingPushRegister(tasm(), frame_access_state(), &pending_pushes,
ip);
} else {
// Pushes of non-scalar data types is not supported.
@@ -1149,15 +1144,15 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
}
move->Eliminate();
}
- FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes);
+ FlushPendingPushRegisters(tasm(), frame_access_state(), &pending_pushes);
}
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot, nullptr, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_stack_slot) {
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot);
}
@@ -1180,8 +1175,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(ip);
} else {
- __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
- RelocInfo::CODE_TARGET);
+ __ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -1201,9 +1195,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
// We cannot use the constant pool to load the target since
// we've already restored the caller's frame.
- ConstantPoolUnavailableScope constant_pool_unavailable(masm());
- __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
- RelocInfo::CODE_TARGET);
+ ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
+ __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -1315,7 +1308,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchTruncateDoubleToI:
// TODO(mbrandy): move slow call to stub out of line.
- __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+ __ TruncateDoubleToIDelayed(zone(), i.OutputRegister(),
+ i.InputDoubleRegister(0));
break;
case kArchStoreWithWriteBarrier: {
RecordWriteMode mode =
@@ -1781,8 +1775,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_IEEE754_UNOP(log10);
break;
case kIeee754Float64Pow: {
- MathPowStub stub(isolate(), MathPowStub::DOUBLE);
- __ CallStub(&stub);
+ __ CallStubDelayed(new (zone())
+ MathPowStub(nullptr, MathPowStub::DOUBLE));
__ Move(d1, d3);
break;
}
@@ -2445,7 +2439,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
FlagsCondition condition = branch->condition;
Condition cond = FlagsConditionToCondition(condition, op);
- if (op == kS390_CmpDouble) {
+ if (op == kS390_CmpFloat || op == kS390_CmpDouble) {
// check for unordered if necessary
// Branching to flabel/tlabel according to what's expected by tests
if (cond == le || cond == eq || cond == lt) {
@@ -2496,14 +2490,14 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
// We use the context register as the scratch register, because we do
// not have a context here.
__ PrepareCallCFunction(0, 0, cp);
- __ CallCFunction(
- ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
- 0);
+ __ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(
+ __ isolate()),
+ 0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
__ Ret();
} else {
gen_->AssembleSourcePosition(instr_);
- __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+ __ Call(__ isolate()->builtins()->builtin_handle(trap_id),
RelocInfo::CODE_TARGET);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
@@ -2526,14 +2520,12 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
ArchOpcode op = instr->arch_opcode();
Condition cond = FlagsConditionToCondition(condition, op);
- if (op == kS390_CmpDouble) {
+ if (op == kS390_CmpFloat || op == kS390_CmpDouble) {
// check for unordered if necessary
- if (cond == le) {
+ if (cond == le || cond == eq || cond == lt) {
__ bunordered(&end);
- // Unnecessary for eq/lt since only FU bit will be set.
- } else if (cond == gt) {
+ } else if (cond == gt || cond == ne || cond == ge) {
__ bunordered(tlabel);
- // Unnecessary for ne/ge since only FU bit will be set.
}
}
__ b(cond, tlabel);
@@ -2608,12 +2600,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
: Deoptimizer::EAGER;
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- isolate(), deoptimization_id, bailout_type);
+ __ isolate(), deoptimization_id, bailout_type);
// TODO(turbofan): We should be able to generate better code by sharing the
// actual final call site and just bl'ing to it here, similar to what we do
// in the lithium backend.
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- if (isolate()->NeedsSourcePositionsForProfiling()) {
+ if (info()->is_source_positions_enabled()) {
__ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
}
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
@@ -2674,7 +2666,7 @@ void CodeGenerator::AssembleConstructFrame() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
}
const RegList double_saves = descriptor->CalleeSavedFPRegisters();
@@ -2796,12 +2788,10 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
#endif // V8_TARGET_ARCH_S390X
break;
case Constant::kFloat32:
- __ Move(dst,
- isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
+ __ mov(dst, Operand::EmbeddedNumber(src.ToFloat32()));
break;
case Constant::kFloat64:
- __ Move(dst,
- isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
+ __ mov(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
break;
case Constant::kExternalReference:
__ mov(dst, Operand(src.ToExternalReference()));
@@ -2827,8 +2817,9 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
DoubleRegister dst = destination->IsFPRegister()
? g.ToDoubleRegister(destination)
: kScratchDoubleReg;
- double value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
- : src.ToFloat64();
+ double value = (src.type() == Constant::kFloat32)
+ ? src.ToFloat32()
+ : src.ToFloat64().value();
if (src.type() == Constant::kFloat32) {
__ LoadFloat32Literal(dst, src.ToFloat32(), kScratchReg);
} else {
@@ -2962,7 +2953,7 @@ void CodeGenerator::EnsureSpaceForLazyDeopt() {
int space_needed = Deoptimizer::patch_size();
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
- int current_pc = masm()->pc_offset();
+ int current_pc = tasm()->pc_offset();
if (current_pc < last_lazy_deopt_pc_ + space_needed) {
int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
DCHECK_EQ(0, padding_size % 2);
diff --git a/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc b/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
index 352e63af07..350f84b4bd 100644
--- a/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
@@ -174,7 +174,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
}
UNREACHABLE();
- return kNoOpcodeFlags;
}
int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
diff --git a/deps/v8/src/compiler/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
index f4e8ea13d2..e839d8cb1c 100644
--- a/deps/v8/src/compiler/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
@@ -307,9 +307,6 @@ ArchOpcode SelectLoadOpcode(Node* node) {
case MachineRepresentation::kWord64: // Fall through.
#endif
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
default:
UNREACHABLE();
@@ -820,9 +817,6 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord64: // Fall through.
#endif
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -887,9 +881,6 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
case MachineRepresentation::kWord64: // Fall through.
#endif
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -937,9 +928,6 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
case MachineRepresentation::kWord64: // Fall through.
#endif
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -1322,7 +1310,7 @@ bool TryMatchShiftFromMul(InstructionSelector* selector, Node* node) {
Node* left = m.left().node();
Node* right = m.right().node();
if (g.CanBeImmediate(right, OperandMode::kInt32Imm) &&
- base::bits::IsPowerOfTwo64(g.GetImmediate(right))) {
+ base::bits::IsPowerOfTwo(g.GetImmediate(right))) {
int power = 63 - base::bits::CountLeadingZeros64(g.GetImmediate(right));
bool doZeroExt = DoZeroExtForResult(node);
bool canEliminateZeroExt = ProduceWord32Result(left);
@@ -1720,7 +1708,6 @@ static bool CompareLogical(FlagsContinuation* cont) {
return false;
}
UNREACHABLE();
- return false;
}
namespace {
@@ -2220,6 +2207,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 4 + sw.value_range;
size_t table_time_cost = 3;
size_t lookup_space_cost = 3 + 2 * sw.case_count;
@@ -2227,7 +2215,8 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
if (sw.case_count > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min()) {
+ sw.min_value > std::numeric_limits<int32_t>::min() &&
+ sw.value_range <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
if (sw.min_value) {
index_operand = g.TempRegister();
diff --git a/deps/v8/src/compiler/schedule.cc b/deps/v8/src/compiler/schedule.cc
index 3660553041..59c684f9bd 100644
--- a/deps/v8/src/compiler/schedule.cc
+++ b/deps/v8/src/compiler/schedule.cc
@@ -139,7 +139,6 @@ std::ostream& operator<<(std::ostream& os, const BasicBlock::Control& c) {
return os << "throw";
}
UNREACHABLE();
- return os;
}
diff --git a/deps/v8/src/compiler/scheduler.cc b/deps/v8/src/compiler/scheduler.cc
index 76889a69cb..ed74489149 100644
--- a/deps/v8/src/compiler/scheduler.cc
+++ b/deps/v8/src/compiler/scheduler.cc
@@ -940,10 +940,8 @@ class SpecialRPONumberer : public ZoneObject {
size_t num_loops, ZoneVector<Backedge>* backedges) {
// Extend existing loop membership vectors.
for (LoopInfo& loop : loops_) {
- BitVector* new_members = new (zone_)
- BitVector(static_cast<int>(schedule_->BasicBlockCount()), zone_);
- new_members->CopyFrom(*loop.members);
- loop.members = new_members;
+ loop.members->Resize(static_cast<int>(schedule_->BasicBlockCount()),
+ zone_);
}
// Extend loop information vector.
diff --git a/deps/v8/src/compiler/select-lowering.h b/deps/v8/src/compiler/select-lowering.h
index b882a3125f..b66f69f986 100644
--- a/deps/v8/src/compiler/select-lowering.h
+++ b/deps/v8/src/compiler/select-lowering.h
@@ -22,6 +22,8 @@ class SelectLowering final : public Reducer {
SelectLowering(Graph* graph, CommonOperatorBuilder* common);
~SelectLowering();
+ const char* reducer_name() const override { return "SelectLowering"; }
+
Reduction Reduce(Node* node) override;
private:
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
index 6cf88d33cf..1604f020e6 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.cc
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -9,6 +9,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
+#include "src/compiler/wasm-compiler.h"
#include "src/objects-inl.h"
#include "src/wasm/wasm-module.h"
@@ -93,6 +94,16 @@ void SimdScalarLowering::LowerGraph() {
V(I32x4ShrU) \
V(I32x4MinU) \
V(I32x4MaxU) \
+ V(I32x4Eq) \
+ V(I32x4Ne) \
+ V(I32x4LtS) \
+ V(I32x4LeS) \
+ V(I32x4GtS) \
+ V(I32x4GeS) \
+ V(I32x4LtU) \
+ V(I32x4LeU) \
+ V(I32x4GtU) \
+ V(I32x4GeU) \
V(S128And) \
V(S128Or) \
V(S128Xor) \
@@ -112,7 +123,7 @@ void SimdScalarLowering::LowerGraph() {
V(F32x4Min) \
V(F32x4Max)
-#define FOREACH_FLOAT32X4_TO_SIMD1X4OPCODE(V) \
+#define FOREACH_FLOAT32X4_TO_INT32X4OPCODE(V) \
V(F32x4Eq) \
V(F32x4Ne) \
V(F32x4Lt) \
@@ -120,18 +131,6 @@ void SimdScalarLowering::LowerGraph() {
V(F32x4Gt) \
V(F32x4Ge)
-#define FOREACH_INT32X4_TO_SIMD1X4OPCODE(V) \
- V(I32x4Eq) \
- V(I32x4Ne) \
- V(I32x4LtS) \
- V(I32x4LeS) \
- V(I32x4GtS) \
- V(I32x4GeS) \
- V(I32x4LtU) \
- V(I32x4LeU) \
- V(I32x4GtU) \
- V(I32x4GeU)
-
#define FOREACH_INT16X8_OPCODE(V) \
V(I16x8Splat) \
V(I16x8ExtractLane) \
@@ -150,7 +149,13 @@ void SimdScalarLowering::LowerGraph() {
V(I16x8AddSaturateU) \
V(I16x8SubSaturateU) \
V(I16x8MinU) \
- V(I16x8MaxU)
+ V(I16x8MaxU) \
+ V(I16x8Eq) \
+ V(I16x8Ne) \
+ V(I16x8LtS) \
+ V(I16x8LeS) \
+ V(I16x8LtU) \
+ V(I16x8LeU)
#define FOREACH_INT8X16_OPCODE(V) \
V(I8x16Splat) \
@@ -170,35 +175,27 @@ void SimdScalarLowering::LowerGraph() {
V(I8x16AddSaturateU) \
V(I8x16SubSaturateU) \
V(I8x16MinU) \
- V(I8x16MaxU)
-
-#define FOREACH_INT16X8_TO_SIMD1X8OPCODE(V) \
- V(I16x8Eq) \
- V(I16x8Ne) \
- V(I16x8LtS) \
- V(I16x8LeS) \
- V(I16x8LtU) \
- V(I16x8LeU)
-
-#define FOREACH_INT8X16_TO_SIMD1X16OPCODE(V) \
- V(I8x16Eq) \
- V(I8x16Ne) \
- V(I8x16LtS) \
- V(I8x16LeS) \
- V(I8x16LtU) \
+ V(I8x16MaxU) \
+ V(I8x16Eq) \
+ V(I8x16Ne) \
+ V(I8x16LtS) \
+ V(I8x16LeS) \
+ V(I8x16LtU) \
V(I8x16LeU)
-#define FOREACH_SIMD_TYPE_TO_MACHINE_TYPE(V) \
- V(Float32x4, Float32) \
- V(Int32x4, Int32) \
- V(Int16x8, Int16) \
- V(Int8x16, Int8)
-
-#define FOREACH_SIMD_TYPE_TO_MACHINE_REP(V) \
- V(Float32x4, Float32) \
- V(Int32x4, Word32) \
- V(Int16x8, Word16) \
- V(Int8x16, Word8)
+MachineType SimdScalarLowering::MachineTypeFrom(SimdType simdType) {
+ switch (simdType) {
+ case SimdType::kFloat32x4:
+ return MachineType::Float32();
+ case SimdType::kInt32x4:
+ return MachineType::Int32();
+ case SimdType::kInt16x8:
+ return MachineType::Int16();
+ case SimdType::kInt8x16:
+ return MachineType::Int8();
+ }
+ return MachineType::None();
+}
void SimdScalarLowering::SetLoweredType(Node* node, Node* output) {
switch (node->opcode()) {
@@ -214,55 +211,33 @@ void SimdScalarLowering::SetLoweredType(Node* node, Node* output) {
replacements_[node->id()].type = SimdType::kFloat32x4;
break;
}
- FOREACH_FLOAT32X4_TO_SIMD1X4OPCODE(CASE_STMT)
- FOREACH_INT32X4_TO_SIMD1X4OPCODE(CASE_STMT) {
- replacements_[node->id()].type = SimdType::kSimd1x4;
+ FOREACH_FLOAT32X4_TO_INT32X4OPCODE(CASE_STMT) {
+ replacements_[node->id()].type = SimdType::kInt32x4;
break;
}
FOREACH_INT16X8_OPCODE(CASE_STMT) {
replacements_[node->id()].type = SimdType::kInt16x8;
break;
}
- FOREACH_INT16X8_TO_SIMD1X8OPCODE(CASE_STMT) {
- replacements_[node->id()].type = SimdType::kSimd1x8;
- break;
- }
FOREACH_INT8X16_OPCODE(CASE_STMT) {
replacements_[node->id()].type = SimdType::kInt8x16;
break;
}
- FOREACH_INT8X16_TO_SIMD1X16OPCODE(CASE_STMT) {
- replacements_[node->id()].type = SimdType::kSimd1x16;
- break;
- }
default: {
switch (output->opcode()) {
- FOREACH_FLOAT32X4_TO_SIMD1X4OPCODE(CASE_STMT)
case IrOpcode::kF32x4SConvertI32x4:
case IrOpcode::kF32x4UConvertI32x4: {
replacements_[node->id()].type = SimdType::kInt32x4;
break;
}
- FOREACH_INT32X4_TO_SIMD1X4OPCODE(CASE_STMT)
+ FOREACH_FLOAT32X4_TO_INT32X4OPCODE(CASE_STMT)
case IrOpcode::kI32x4SConvertF32x4:
case IrOpcode::kI32x4UConvertF32x4: {
replacements_[node->id()].type = SimdType::kFloat32x4;
break;
}
- case IrOpcode::kS32x4Select: {
- replacements_[node->id()].type = SimdType::kSimd1x4;
- break;
- }
- FOREACH_INT16X8_TO_SIMD1X8OPCODE(CASE_STMT) {
- replacements_[node->id()].type = SimdType::kInt16x8;
- break;
- }
- FOREACH_INT8X16_TO_SIMD1X16OPCODE(CASE_STMT) {
- replacements_[node->id()].type = SimdType::kInt8x16;
- break;
- }
- case IrOpcode::kS16x8Select: {
- replacements_[node->id()].type = SimdType::kSimd1x8;
+ case IrOpcode::kS128Select: {
+ replacements_[node->id()].type = SimdType::kInt32x4;
break;
}
default: {
@@ -310,12 +285,11 @@ static int GetReturnCountAfterLowering(
int SimdScalarLowering::NumLanes(SimdType type) {
int num_lanes = 0;
- if (type == SimdType::kFloat32x4 || type == SimdType::kInt32x4 ||
- type == SimdType::kSimd1x4) {
+ if (type == SimdType::kFloat32x4 || type == SimdType::kInt32x4) {
num_lanes = kNumLanes32;
- } else if (type == SimdType::kInt16x8 || type == SimdType::kSimd1x8) {
+ } else if (type == SimdType::kInt16x8) {
num_lanes = kNumLanes16;
- } else if (type == SimdType::kInt8x16 || type == SimdType::kSimd1x16) {
+ } else if (type == SimdType::kInt8x16) {
num_lanes = kNumLanes8;
} else {
UNREACHABLE();
@@ -415,18 +389,42 @@ void SimdScalarLowering::LowerStoreOp(MachineRepresentation rep, Node* node,
}
void SimdScalarLowering::LowerBinaryOp(Node* node, SimdType input_rep_type,
- const Operator* op, bool invert_inputs) {
+ const Operator* op) {
DCHECK(node->InputCount() == 2);
Node** rep_left = GetReplacementsWithType(node->InputAt(0), input_rep_type);
Node** rep_right = GetReplacementsWithType(node->InputAt(1), input_rep_type);
int num_lanes = NumLanes(input_rep_type);
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
for (int i = 0; i < num_lanes; ++i) {
+ rep_node[i] = graph()->NewNode(op, rep_left[i], rep_right[i]);
+ }
+ ReplaceNode(node, rep_node, num_lanes);
+}
+
+void SimdScalarLowering::LowerCompareOp(Node* node, SimdType input_rep_type,
+ const Operator* op,
+ bool invert_inputs) {
+ DCHECK(node->InputCount() == 2);
+ Node** rep_left = GetReplacementsWithType(node->InputAt(0), input_rep_type);
+ Node** rep_right = GetReplacementsWithType(node->InputAt(1), input_rep_type);
+ int num_lanes = NumLanes(input_rep_type);
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ for (int i = 0; i < num_lanes; ++i) {
+ Node* cmp_result = nullptr;
if (invert_inputs) {
- rep_node[i] = graph()->NewNode(op, rep_right[i], rep_left[i]);
+ cmp_result = graph()->NewNode(op, rep_right[i], rep_left[i]);
} else {
- rep_node[i] = graph()->NewNode(op, rep_left[i], rep_right[i]);
+ cmp_result = graph()->NewNode(op, rep_left[i], rep_right[i]);
}
+ Diamond d_cmp(graph(), common(),
+ graph()->NewNode(machine()->Word32Equal(), cmp_result,
+ jsgraph_->Int32Constant(0)));
+ MachineRepresentation rep =
+ (input_rep_type == SimdType::kFloat32x4)
+ ? MachineRepresentation::kWord32
+ : MachineTypeFrom(input_rep_type).representation();
+ rep_node[i] =
+ d_cmp.Phi(rep, jsgraph_->Int32Constant(0), jsgraph_->Int32Constant(-1));
}
ReplaceNode(node, rep_node, num_lanes);
}
@@ -682,8 +680,12 @@ void SimdScalarLowering::LowerNotEqual(Node* node, SimdType input_rep_type,
for (int i = 0; i < num_lanes; ++i) {
Diamond d(graph(), common(),
graph()->NewNode(op, rep_left[i], rep_right[i]));
- rep_node[i] = d.Phi(MachineRepresentation::kWord32,
- jsgraph_->Int32Constant(0), jsgraph_->Int32Constant(1));
+ MachineRepresentation rep =
+ (input_rep_type == SimdType::kFloat32x4)
+ ? MachineRepresentation::kWord32
+ : MachineTypeFrom(input_rep_type).representation();
+ rep_node[i] =
+ d.Phi(rep, jsgraph_->Int32Constant(0), jsgraph_->Int32Constant(-1));
}
ReplaceNode(node, rep_node, num_lanes);
}
@@ -737,17 +739,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
MachineRepresentation rep =
LoadRepresentationOf(node->op()).representation();
const Operator* load_op;
-#define LOAD_CASE(sType, mType) \
- case SimdType::k##sType: \
- load_op = machine()->Load(MachineType::mType()); \
- break;
-
- switch (rep_type) {
- FOREACH_SIMD_TYPE_TO_MACHINE_TYPE(LOAD_CASE)
- default:
- UNREACHABLE();
- }
-#undef LOAD_CASE
+ load_op = machine()->Load(MachineTypeFrom(rep_type));
LowerLoadOp(rep, node, load_op, rep_type);
break;
}
@@ -755,17 +747,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
MachineRepresentation rep =
UnalignedLoadRepresentationOf(node->op()).representation();
const Operator* load_op;
-#define UNALIGNED_LOAD_CASE(sType, mType) \
- case SimdType::k##sType: \
- load_op = machine()->UnalignedLoad(MachineType::mType()); \
- break;
-
- switch (rep_type) {
- FOREACH_SIMD_TYPE_TO_MACHINE_TYPE(UNALIGNED_LOAD_CASE)
- default:
- UNREACHABLE();
- }
-#undef UNALIGHNED_LOAD_CASE
+ load_op = machine()->UnalignedLoad(MachineTypeFrom(rep_type));
LowerLoadOp(rep, node, load_op, rep_type);
break;
}
@@ -775,35 +757,16 @@ void SimdScalarLowering::LowerNode(Node* node) {
WriteBarrierKind write_barrier_kind =
StoreRepresentationOf(node->op()).write_barrier_kind();
const Operator* store_op;
-#define STORE_CASE(sType, mType) \
- case SimdType::k##sType: \
- store_op = machine()->Store(StoreRepresentation( \
- MachineRepresentation::k##mType, write_barrier_kind)); \
- break;
-
- switch (rep_type) {
- FOREACH_SIMD_TYPE_TO_MACHINE_REP(STORE_CASE)
- default:
- UNREACHABLE();
- }
-#undef STORE_CASE
+ store_op = machine()->Store(StoreRepresentation(
+ MachineTypeFrom(rep_type).representation(), write_barrier_kind));
LowerStoreOp(rep, node, store_op, rep_type);
break;
}
case IrOpcode::kUnalignedStore: {
MachineRepresentation rep = UnalignedStoreRepresentationOf(node->op());
const Operator* store_op;
-#define UNALIGNED_STORE_CASE(sType, mType) \
- case SimdType::k##sType: \
- store_op = machine()->UnalignedStore(MachineRepresentation::k##mType); \
- break;
-
- switch (rep_type) {
- FOREACH_SIMD_TYPE_TO_MACHINE_REP(UNALIGNED_STORE_CASE)
- default:
- UNREACHABLE();
- }
-#undef UNALIGNED_STORE_CASE
+ store_op =
+ machine()->UnalignedStore(MachineTypeFrom(rep_type).representation());
LowerStoreOp(rep, node, store_op, rep_type);
break;
}
@@ -816,7 +779,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kCall: {
- // TODO(turbofan): Make WASM code const-correct wrt. CallDescriptor.
+ // TODO(turbofan): Make wasm code const-correct wrt. CallDescriptor.
CallDescriptor* descriptor =
const_cast<CallDescriptor*>(CallDescriptorOf(node->op()));
if (DefaultLowering(node) ||
@@ -824,8 +787,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
descriptor->GetReturnType(0) == MachineType::Simd128())) {
// We have to adjust the call descriptor.
const Operator* op =
- common()->Call(wasm::ModuleEnv::GetI32WasmCallDescriptorForSimd(
- zone(), descriptor));
+ common()->Call(GetI32WasmCallDescriptorForSimd(zone(), descriptor));
NodeProperties::ChangeOp(node, op);
}
if (descriptor->ReturnCount() == 1 &&
@@ -1050,10 +1012,10 @@ void SimdScalarLowering::LowerNode(Node* node) {
ReplaceNode(node, rep_node, num_lanes);
break;
}
-#define COMPARISON_CASE(type, simd_op, lowering_op, invert) \
- case IrOpcode::simd_op: { \
- LowerBinaryOp(node, SimdType::k##type, machine()->lowering_op(), invert); \
- break; \
+#define COMPARISON_CASE(type, simd_op, lowering_op, invert) \
+ case IrOpcode::simd_op: { \
+ LowerCompareOp(node, SimdType::k##type, machine()->lowering_op(), invert); \
+ break; \
}
COMPARISON_CASE(Float32x4, kF32x4Eq, Float32Equal, false)
COMPARISON_CASE(Float32x4, kF32x4Lt, Float32LessThan, false)
@@ -1104,13 +1066,11 @@ void SimdScalarLowering::LowerNode(Node* node) {
LowerNotEqual(node, SimdType::kInt8x16, machine()->Word32Equal());
break;
}
- case IrOpcode::kS32x4Select:
- case IrOpcode::kS16x8Select:
- case IrOpcode::kS8x16Select: {
+ case IrOpcode::kS128Select: {
DCHECK(node->InputCount() == 3);
- DCHECK(ReplacementType(node->InputAt(0)) == SimdType::kSimd1x4 ||
- ReplacementType(node->InputAt(0)) == SimdType::kSimd1x8 ||
- ReplacementType(node->InputAt(0)) == SimdType::kSimd1x16);
+ DCHECK(ReplacementType(node->InputAt(0)) == SimdType::kInt32x4 ||
+ ReplacementType(node->InputAt(0)) == SimdType::kInt16x8 ||
+ ReplacementType(node->InputAt(0)) == SimdType::kInt8x16);
Node** boolean_input = GetReplacements(node->InputAt(0));
Node** rep_left = GetReplacementsWithType(node->InputAt(1), rep_type);
Node** rep_right = GetReplacementsWithType(node->InputAt(2), rep_type);
@@ -1119,18 +1079,8 @@ void SimdScalarLowering::LowerNode(Node* node) {
Diamond d(graph(), common(),
graph()->NewNode(machine()->Word32Equal(), boolean_input[i],
jsgraph_->Int32Constant(0)));
-#define SELECT_CASE(sType, mType) \
- case SimdType::k##sType: \
- rep_node[i] = \
- d.Phi(MachineRepresentation::k##mType, rep_right[1], rep_left[0]); \
- break;
-
- switch (rep_type) {
- FOREACH_SIMD_TYPE_TO_MACHINE_REP(SELECT_CASE)
- default:
- UNREACHABLE();
- }
-#undef SELECT_CASE
+ rep_node[i] = d.Phi(MachineTypeFrom(rep_type).representation(),
+ rep_right[1], rep_left[0]);
}
ReplaceNode(node, rep_node, num_lanes);
break;
@@ -1264,19 +1214,9 @@ void SimdScalarLowering::PreparePhiReplacement(Node* phi) {
}
Node** rep_nodes = zone()->NewArray<Node*>(num_lanes);
for (int i = 0; i < num_lanes; ++i) {
-#define PHI_CASE(sType, mType) \
- case SimdType::k##sType: \
- rep_nodes[i] = graph()->NewNode( \
- common()->Phi(MachineRepresentation::k##mType, value_count), \
- value_count + 1, inputs_rep[i], false); \
- break;
-
- switch (type) {
- FOREACH_SIMD_TYPE_TO_MACHINE_REP(PHI_CASE)
- default:
- UNREACHABLE();
- }
-#undef PHI_CASE
+ rep_nodes[i] = graph()->NewNode(
+ common()->Phi(MachineTypeFrom(type).representation(), value_count),
+ value_count + 1, inputs_rep[i], false);
}
ReplaceNode(phi, rep_nodes, num_lanes);
}
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.h b/deps/v8/src/compiler/simd-scalar-lowering.h
index 09c78dc983..f7f276cd5e 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.h
+++ b/deps/v8/src/compiler/simd-scalar-lowering.h
@@ -28,15 +28,7 @@ class SimdScalarLowering {
private:
enum class State : uint8_t { kUnvisited, kOnStack, kVisited };
- enum class SimdType : uint8_t {
- kFloat32x4,
- kInt32x4,
- kInt16x8,
- kInt8x16,
- kSimd1x4,
- kSimd1x8,
- kSimd1x16
- };
+ enum class SimdType : uint8_t { kFloat32x4, kInt32x4, kInt16x8, kInt8x16 };
#if defined(V8_TARGET_BIG_ENDIAN)
static constexpr int kLaneOffsets[16] = {15, 14, 13, 12, 11, 10, 9, 8,
@@ -81,8 +73,9 @@ class SimdScalarLowering {
const Operator* load_op, SimdType type);
void LowerStoreOp(MachineRepresentation rep, Node* node,
const Operator* store_op, SimdType rep_type);
- void LowerBinaryOp(Node* node, SimdType input_rep_type, const Operator* op,
- bool invert_inputs = false);
+ void LowerBinaryOp(Node* node, SimdType input_rep_type, const Operator* op);
+ void LowerCompareOp(Node* node, SimdType input_rep_type, const Operator* op,
+ bool invert_inputs = false);
Node* FixUpperBits(Node* input, int32_t shift);
void LowerBinaryOpForSmallInt(Node* node, SimdType input_rep_type,
const Operator* op);
@@ -96,6 +89,7 @@ class SimdScalarLowering {
void LowerShiftOp(Node* node, SimdType type);
Node* BuildF64Trunc(Node* input);
void LowerNotEqual(Node* node, SimdType input_rep_type, const Operator* op);
+ MachineType MachineTypeFrom(SimdType simdType);
JSGraph* const jsgraph_;
NodeMarker<State> state_;
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 33fe9095ce..19578fc7ac 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -84,7 +84,6 @@ MachineRepresentation MachineRepresentationFromArrayType(
return MachineRepresentation::kFloat64;
}
UNREACHABLE();
- return MachineRepresentation::kNone;
}
UseInfo CheckedUseInfoAsWord32FromHint(
@@ -101,7 +100,6 @@ UseInfo CheckedUseInfoAsWord32FromHint(
return UseInfo::CheckedNumberOrOddballAsWord32();
}
UNREACHABLE();
- return UseInfo::None();
}
UseInfo CheckedUseInfoAsFloat64FromHint(NumberOperationHint hint) {
@@ -117,7 +115,6 @@ UseInfo CheckedUseInfoAsFloat64FromHint(NumberOperationHint hint) {
return UseInfo::CheckedNumberOrOddballAsFloat64();
}
UNREACHABLE();
- return UseInfo::None();
}
UseInfo TruncatingUseInfoFromRepresentation(MachineRepresentation rep) {
@@ -139,14 +136,10 @@ UseInfo TruncatingUseInfoFromRepresentation(MachineRepresentation rep) {
case MachineRepresentation::kBit:
return UseInfo::Bool();
case MachineRepresentation::kSimd128:
- case MachineRepresentation::kSimd1x4:
- case MachineRepresentation::kSimd1x8:
- case MachineRepresentation::kSimd1x16:
case MachineRepresentation::kNone:
break;
}
UNREACHABLE();
- return UseInfo::None();
}
@@ -999,6 +992,18 @@ class RepresentationSelector {
}
}
+ void VisitCheck(Node* node, Type* type, SimplifiedLowering* lowering) {
+ if (InputIs(node, type)) {
+ VisitUnop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ } else {
+ VisitUnop(node, UseInfo::CheckedHeapObjectAsTaggedPointer(),
+ MachineRepresentation::kTaggedPointer);
+ }
+ return;
+ }
+
void VisitCall(Node* node, SimplifiedLowering* lowering) {
const CallDescriptor* desc = CallDescriptorOf(node->op());
int params = static_cast<int>(desc->ParameterCount());
@@ -1564,8 +1569,7 @@ class RepresentationSelector {
node->AppendInput(jsgraph_->zone(), jsgraph_->FalseConstant());
NodeProperties::ChangeOp(node, lowering->machine()->WordEqual());
} else {
- DCHECK_EQ(MachineRepresentation::kNone,
- input_info->representation());
+ DCHECK(!TypeOf(node->InputAt(0))->IsInhabited());
DeferReplacement(node, lowering->jsgraph()->Int32Constant(0));
}
} else {
@@ -2332,9 +2336,18 @@ class RepresentationSelector {
return;
}
case IrOpcode::kStringCharCodeAt: {
- // TODO(turbofan): Allow builtins to return untagged values.
- VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
- MachineRepresentation::kTaggedSigned);
+ Type* string_type = TypeOf(node->InputAt(0));
+ if (string_type->Is(Type::SeqString())) {
+ VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower()) {
+ NodeProperties::ChangeOp(node, simplified()->SeqStringCharCodeAt());
+ }
+ } else {
+ // TODO(turbofan): Allow builtins to return untagged values.
+ VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
+ MachineRepresentation::kTaggedSigned);
+ }
return;
}
case IrOpcode::kStringFromCharCode: {
@@ -2354,7 +2367,12 @@ class RepresentationSelector {
SetOutput(node, MachineRepresentation::kTaggedSigned);
return;
}
-
+ case IrOpcode::kStringToLowerCaseIntl:
+ case IrOpcode::kStringToUpperCaseIntl: {
+ VisitUnop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
+ return;
+ }
case IrOpcode::kCheckBounds: {
Type* index_type = TypeOf(node->InputAt(0));
Type* length_type = TypeOf(node->InputAt(1));
@@ -2397,14 +2415,7 @@ class RepresentationSelector {
return;
}
case IrOpcode::kCheckInternalizedString: {
- if (InputIs(node, Type::InternalizedString())) {
- VisitUnop(node, UseInfo::AnyTagged(),
- MachineRepresentation::kTaggedPointer);
- if (lower()) DeferReplacement(node, node->InputAt(0));
- } else {
- VisitUnop(node, UseInfo::CheckedHeapObjectAsTaggedPointer(),
- MachineRepresentation::kTaggedPointer);
- }
+ VisitCheck(node, Type::InternalizedString(), lowering);
return;
}
case IrOpcode::kCheckNumber: {
@@ -2417,14 +2428,7 @@ class RepresentationSelector {
return;
}
case IrOpcode::kCheckReceiver: {
- if (InputIs(node, Type::Receiver())) {
- VisitUnop(node, UseInfo::AnyTagged(),
- MachineRepresentation::kTaggedPointer);
- if (lower()) DeferReplacement(node, node->InputAt(0));
- } else {
- VisitUnop(node, UseInfo::CheckedHeapObjectAsTaggedPointer(),
- MachineRepresentation::kTaggedPointer);
- }
+ VisitCheck(node, Type::Receiver(), lowering);
return;
}
case IrOpcode::kCheckSmi: {
@@ -2440,17 +2444,21 @@ class RepresentationSelector {
return;
}
case IrOpcode::kCheckString: {
- if (InputIs(node, Type::String())) {
- VisitUnop(node, UseInfo::AnyTagged(),
- MachineRepresentation::kTaggedPointer);
- if (lower()) DeferReplacement(node, node->InputAt(0));
- } else {
- VisitUnop(node, UseInfo::CheckedHeapObjectAsTaggedPointer(),
- MachineRepresentation::kTaggedPointer);
- }
+ VisitCheck(node, Type::String(), lowering);
+ return;
+ }
+ case IrOpcode::kCheckSeqString: {
+ VisitCheck(node, Type::SeqString(), lowering);
+ return;
+ }
+ case IrOpcode::kCheckNonEmptyString: {
+ VisitCheck(node, Type::NonEmptyString(), lowering);
+ return;
+ }
+ case IrOpcode::kCheckSymbol: {
+ VisitCheck(node, Type::Symbol(), lowering);
return;
}
-
case IrOpcode::kAllocate: {
ProcessInput(node, 0, UseInfo::TruncatingWord32());
ProcessRemainingInputs(node, 1);
@@ -2467,14 +2475,24 @@ class RepresentationSelector {
}
case IrOpcode::kStoreField: {
FieldAccess access = FieldAccessOf(node->op());
- NodeInfo* input_info = GetInfo(node->InputAt(1));
+ Node* value_node = node->InputAt(1);
+ NodeInfo* input_info = GetInfo(value_node);
+ MachineRepresentation field_representation =
+ access.machine_type.representation();
+
+ // Make sure we convert to Smi if possible. This should help write
+ // barrier elimination.
+ if (field_representation == MachineRepresentation::kTagged &&
+ TypeOf(value_node)->Is(Type::SignedSmall())) {
+ field_representation = MachineRepresentation::kTaggedSigned;
+ }
WriteBarrierKind write_barrier_kind = WriteBarrierKindFor(
- access.base_is_tagged, access.machine_type.representation(),
- access.offset, access.type, input_info->representation(),
- node->InputAt(1));
+ access.base_is_tagged, field_representation, access.offset,
+ access.type, input_info->representation(), value_node);
+
ProcessInput(node, 0, UseInfoForBasePointer(access));
- ProcessInput(node, 1, TruncatingUseInfoFromRepresentation(
- access.machine_type.representation()));
+ ProcessInput(node, 1,
+ TruncatingUseInfoFromRepresentation(field_representation));
ProcessRemainingInputs(node, 2);
SetOutput(node, MachineRepresentation::kNone);
if (lower()) {
@@ -2543,15 +2561,25 @@ class RepresentationSelector {
}
case IrOpcode::kStoreElement: {
ElementAccess access = ElementAccessOf(node->op());
- NodeInfo* input_info = GetInfo(node->InputAt(2));
+ Node* value_node = node->InputAt(2);
+ NodeInfo* input_info = GetInfo(value_node);
+ MachineRepresentation element_representation =
+ access.machine_type.representation();
+
+ // Make sure we convert to Smi if possible. This should help write
+ // barrier elimination.
+ if (element_representation == MachineRepresentation::kTagged &&
+ TypeOf(value_node)->Is(Type::SignedSmall())) {
+ element_representation = MachineRepresentation::kTaggedSigned;
+ }
WriteBarrierKind write_barrier_kind = WriteBarrierKindFor(
- access.base_is_tagged, access.machine_type.representation(),
- access.type, input_info->representation(), node->InputAt(2));
+ access.base_is_tagged, element_representation, access.type,
+ input_info->representation(), value_node);
ProcessInput(node, 0, UseInfoForBasePointer(access)); // base
ProcessInput(node, 1, UseInfo::TruncatingWord32()); // index
ProcessInput(node, 2,
TruncatingUseInfoFromRepresentation(
- access.machine_type.representation())); // value
+ element_representation)); // value
ProcessRemainingInputs(node, 3);
SetOutput(node, MachineRepresentation::kNone);
if (lower()) {
@@ -2563,6 +2591,14 @@ class RepresentationSelector {
}
return;
}
+ case IrOpcode::kTransitionAndStoreElement: {
+ ProcessInput(node, 0, UseInfo::AnyTagged()); // array
+ ProcessInput(node, 1, UseInfo::TruncatingWord32()); // index
+ ProcessInput(node, 2, UseInfo::AnyTagged()); // value
+ ProcessRemainingInputs(node, 3);
+ SetOutput(node, MachineRepresentation::kNone);
+ return;
+ }
case IrOpcode::kLoadTypedElement: {
MachineRepresentation const rep =
MachineRepresentationFromArrayType(ExternalArrayTypeOf(node->op()));
@@ -2752,7 +2788,7 @@ class RepresentationSelector {
}
return;
}
- case IrOpcode::kCheckTaggedHole: {
+ case IrOpcode::kCheckNotTaggedHole: {
VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
return;
}
@@ -2839,6 +2875,16 @@ class RepresentationSelector {
// Assume the output is tagged.
return SetOutput(node, MachineRepresentation::kTagged);
+ case IrOpcode::kLookupHashStorageIndex:
+ VisitInputs(node);
+ return SetOutput(node, MachineRepresentation::kTaggedSigned);
+
+ case IrOpcode::kLoadHashMapValue:
+ ProcessInput(node, 0, UseInfo::AnyTagged()); // table
+ ProcessInput(node, 1, UseInfo::TruncatingWord32()); // index
+ ProcessRemainingInputs(node, 2);
+ return SetOutput(node, MachineRepresentation::kTagged);
+
// Operators with all inputs tagged and no or tagged output have uniform
// handling.
case IrOpcode::kEnd:
@@ -2873,6 +2919,7 @@ class RepresentationSelector {
case IrOpcode::kJSToName:
case IrOpcode::kJSToObject:
case IrOpcode::kJSToString:
+ case IrOpcode::kJSToPrimitiveToString:
VisitInputs(node);
// Assume the output is tagged.
return SetOutput(node, MachineRepresentation::kTagged);
@@ -3596,7 +3643,8 @@ void SimplifiedLowering::DoShift(Node* node, Operator const* op,
void SimplifiedLowering::DoStringToNumber(Node* node) {
Operator::Properties properties = Operator::kEliminatable;
- Callable callable = CodeFactory::StringToNumber(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kStringToNumber);
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
@@ -3702,7 +3750,7 @@ void SimplifiedLowering::DoUnsigned32ToUint8Clamped(Node* node) {
Node* SimplifiedLowering::ToNumberCode() {
if (!to_number_code_.is_set()) {
- Callable callable = CodeFactory::ToNumber(isolate());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kToNumber);
to_number_code_.set(jsgraph()->HeapConstant(callable.code()));
}
return to_number_code_.get();
@@ -3710,7 +3758,7 @@ Node* SimplifiedLowering::ToNumberCode() {
Operator const* SimplifiedLowering::ToNumberOperator() {
if (!to_number_operator_.is_set()) {
- Callable callable = CodeFactory::ToNumber(isolate());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kToNumber);
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags,
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.h b/deps/v8/src/compiler/simplified-operator-reducer.h
index 266cb236ba..39c467d1bc 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.h
+++ b/deps/v8/src/compiler/simplified-operator-reducer.h
@@ -29,6 +29,10 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorReducer final
SimplifiedOperatorReducer(Editor* editor, JSGraph* jsgraph);
~SimplifiedOperatorReducer() final;
+ const char* reducer_name() const override {
+ return "SimplifiedOperatorReducer";
+ }
+
Reduction Reduce(Node* node) final;
private:
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 476f423749..29e466925f 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -9,6 +9,7 @@
#include "src/compiler/operator.h"
#include "src/compiler/types.h"
#include "src/objects/map.h"
+#include "src/objects/name.h"
namespace v8 {
namespace internal {
@@ -26,7 +27,6 @@ std::ostream& operator<<(std::ostream& os, BaseTaggedness base_taggedness) {
return os << "tagged base";
}
UNREACHABLE();
- return os;
}
@@ -51,7 +51,6 @@ MachineType BufferAccess::machine_type() const {
return MachineType::Float64();
}
UNREACHABLE();
- return MachineType::None();
}
@@ -77,7 +76,6 @@ std::ostream& operator<<(std::ostream& os, BufferAccess access) {
#undef TYPED_ARRAY_CASE
}
UNREACHABLE();
- return os;
}
@@ -205,7 +203,6 @@ std::ostream& operator<<(std::ostream& os, CheckFloat64HoleMode mode) {
return os << "never-return-hole";
}
UNREACHABLE();
- return os;
}
CheckFloat64HoleMode CheckFloat64HoleModeOf(const Operator* op) {
@@ -233,7 +230,6 @@ std::ostream& operator<<(std::ostream& os, CheckForMinusZeroMode mode) {
return os << "dont-check-for-minus-zero";
}
UNREACHABLE();
- return os;
}
std::ostream& operator<<(std::ostream& os, CheckMapsFlags flags) {
@@ -286,11 +282,11 @@ std::ostream& operator<<(std::ostream& os, CheckTaggedInputMode mode) {
return os << "NumberOrOddball";
}
UNREACHABLE();
- return os;
}
CheckTaggedInputMode CheckTaggedInputModeOf(const Operator* op) {
- DCHECK_EQ(IrOpcode::kCheckedTaggedToFloat64, op->opcode());
+ DCHECK(op->opcode() == IrOpcode::kCheckedTaggedToFloat64 ||
+ op->opcode() == IrOpcode::kCheckedTruncateTaggedToWord32);
return OpParameter<CheckTaggedInputMode>(op);
}
@@ -345,7 +341,6 @@ std::ostream& operator<<(std::ostream& os, ElementsTransition transition) {
<< " to " << Brief(*transition.target());
}
UNREACHABLE();
- return os;
}
ElementsTransition const& ElementsTransitionOf(const Operator* op) {
@@ -353,6 +348,55 @@ ElementsTransition const& ElementsTransitionOf(const Operator* op) {
return OpParameter<ElementsTransition>(op);
}
+namespace {
+
+// Parameters for the TransitionAndStoreElement opcode.
+class TransitionAndStoreElementParameters final {
+ public:
+ TransitionAndStoreElementParameters(Handle<Map> double_map,
+ Handle<Map> fast_map);
+
+ Handle<Map> double_map() const { return double_map_; }
+ Handle<Map> fast_map() const { return fast_map_; }
+
+ private:
+ Handle<Map> const double_map_;
+ Handle<Map> const fast_map_;
+};
+
+TransitionAndStoreElementParameters::TransitionAndStoreElementParameters(
+ Handle<Map> double_map, Handle<Map> fast_map)
+ : double_map_(double_map), fast_map_(fast_map) {}
+
+bool operator==(TransitionAndStoreElementParameters const& lhs,
+ TransitionAndStoreElementParameters const& rhs) {
+ return lhs.fast_map().address() == rhs.fast_map().address() &&
+ lhs.double_map().address() == rhs.double_map().address();
+}
+
+size_t hash_value(TransitionAndStoreElementParameters parameters) {
+ return base::hash_combine(parameters.fast_map().address(),
+ parameters.double_map().address());
+}
+
+std::ostream& operator<<(std::ostream& os,
+ TransitionAndStoreElementParameters parameters) {
+ return os << "fast-map" << Brief(*parameters.fast_map()) << " double-map"
+ << Brief(*parameters.double_map());
+}
+
+} // namespace
+
+Handle<Map> DoubleMapParameterOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kTransitionAndStoreElement);
+ return OpParameter<TransitionAndStoreElementParameters>(op).double_map();
+}
+
+Handle<Map> FastMapParameterOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kTransitionAndStoreElement);
+ return OpParameter<TransitionAndStoreElementParameters>(op).fast_map();
+}
+
std::ostream& operator<<(std::ostream& os, NumberOperationHint hint) {
switch (hint) {
case NumberOperationHint::kSignedSmall:
@@ -365,7 +409,6 @@ std::ostream& operator<<(std::ostream& os, NumberOperationHint hint) {
return os << "NumberOrOddball";
}
UNREACHABLE();
- return os;
}
size_t hash_value(NumberOperationHint hint) {
@@ -480,8 +523,11 @@ UnicodeEncoding UnicodeEncodingOf(const Operator* op) {
V(NumberSilenceNaN, Operator::kNoProperties, 1, 0) \
V(StringCharAt, Operator::kNoProperties, 2, 1) \
V(StringCharCodeAt, Operator::kNoProperties, 2, 1) \
+ V(SeqStringCharCodeAt, Operator::kNoProperties, 2, 1) \
V(StringFromCharCode, Operator::kNoProperties, 1, 0) \
V(StringIndexOf, Operator::kNoProperties, 3, 0) \
+ V(StringToLowerCaseIntl, Operator::kNoProperties, 1, 0) \
+ V(StringToUpperCaseIntl, Operator::kNoProperties, 1, 0) \
V(PlainPrimitiveToNumber, Operator::kNoProperties, 1, 0) \
V(PlainPrimitiveToWord32, Operator::kNoProperties, 1, 0) \
V(PlainPrimitiveToFloat64, Operator::kNoProperties, 1, 0) \
@@ -521,29 +567,31 @@ UnicodeEncoding UnicodeEncodingOf(const Operator* op) {
V(SpeculativeNumberLessThan) \
V(SpeculativeNumberLessThanOrEqual)
-#define CHECKED_OP_LIST(V) \
- V(CheckBounds, 2, 1) \
- V(CheckHeapObject, 1, 1) \
- V(CheckIf, 1, 0) \
- V(CheckInternalizedString, 1, 1) \
- V(CheckNumber, 1, 1) \
- V(CheckReceiver, 1, 1) \
- V(CheckSmi, 1, 1) \
- V(CheckString, 1, 1) \
- V(CheckTaggedHole, 1, 1) \
- V(CheckedInt32Add, 2, 1) \
- V(CheckedInt32Sub, 2, 1) \
- V(CheckedInt32Div, 2, 1) \
- V(CheckedInt32Mod, 2, 1) \
- V(CheckedUint32Div, 2, 1) \
- V(CheckedUint32Mod, 2, 1) \
- V(CheckedUint32ToInt32, 1, 1) \
- V(CheckedUint32ToTaggedSigned, 1, 1) \
- V(CheckedInt32ToTaggedSigned, 1, 1) \
- V(CheckedTaggedSignedToInt32, 1, 1) \
- V(CheckedTaggedToTaggedSigned, 1, 1) \
- V(CheckedTaggedToTaggedPointer, 1, 1) \
- V(CheckedTruncateTaggedToWord32, 1, 1)
+#define CHECKED_OP_LIST(V) \
+ V(CheckBounds, 2, 1) \
+ V(CheckHeapObject, 1, 1) \
+ V(CheckIf, 1, 0) \
+ V(CheckInternalizedString, 1, 1) \
+ V(CheckNumber, 1, 1) \
+ V(CheckReceiver, 1, 1) \
+ V(CheckSmi, 1, 1) \
+ V(CheckString, 1, 1) \
+ V(CheckSeqString, 1, 1) \
+ V(CheckNonEmptyString, 1, 1) \
+ V(CheckSymbol, 1, 1) \
+ V(CheckNotTaggedHole, 1, 1) \
+ V(CheckedInt32Add, 2, 1) \
+ V(CheckedInt32Sub, 2, 1) \
+ V(CheckedInt32Div, 2, 1) \
+ V(CheckedInt32Mod, 2, 1) \
+ V(CheckedUint32Div, 2, 1) \
+ V(CheckedUint32Mod, 2, 1) \
+ V(CheckedUint32ToInt32, 1, 1) \
+ V(CheckedUint32ToTaggedSigned, 1, 1) \
+ V(CheckedInt32ToTaggedSigned, 1, 1) \
+ V(CheckedTaggedSignedToInt32, 1, 1) \
+ V(CheckedTaggedToTaggedSigned, 1, 1) \
+ V(CheckedTaggedToTaggedPointer, 1, 1)
struct SimplifiedOperatorGlobalCache final {
#define PURE(Name, properties, value_input_count, control_input_count) \
@@ -586,6 +634,20 @@ struct SimplifiedOperatorGlobalCache final {
};
ArrayBufferWasNeuteredOperator kArrayBufferWasNeutered;
+ struct LookupHashStorageIndexOperator final : public Operator {
+ LookupHashStorageIndexOperator()
+ : Operator(IrOpcode::kLookupHashStorageIndex, Operator::kEliminatable,
+ "LookupHashStorageIndex", 2, 1, 1, 1, 1, 0) {}
+ };
+ LookupHashStorageIndexOperator kLookupHashStorageIndex;
+
+ struct LoadHashMapValueOperator final : public Operator {
+ LoadHashMapValueOperator()
+ : Operator(IrOpcode::kLoadHashMapValue, Operator::kEliminatable,
+ "LoadHashMapValue", 2, 1, 1, 1, 1, 0) {}
+ };
+ LoadHashMapValueOperator kLoadHashMapValue;
+
struct ArgumentsFrameOperator final : public Operator {
ArgumentsFrameOperator()
: Operator(IrOpcode::kArgumentsFrame, Operator::kPure, "ArgumentsFrame",
@@ -670,6 +732,20 @@ struct SimplifiedOperatorGlobalCache final {
CheckedTaggedToFloat64Operator<CheckTaggedInputMode::kNumberOrOddball>
kCheckedTaggedToFloat64NumberOrOddballOperator;
+ template <CheckTaggedInputMode kMode>
+ struct CheckedTruncateTaggedToWord32Operator final
+ : public Operator1<CheckTaggedInputMode> {
+ CheckedTruncateTaggedToWord32Operator()
+ : Operator1<CheckTaggedInputMode>(
+ IrOpcode::kCheckedTruncateTaggedToWord32,
+ Operator::kFoldable | Operator::kNoThrow,
+ "CheckedTruncateTaggedToWord32", 1, 1, 1, 1, 1, 0, kMode) {}
+ };
+ CheckedTruncateTaggedToWord32Operator<CheckTaggedInputMode::kNumber>
+ kCheckedTruncateTaggedToWord32NumberOperator;
+ CheckedTruncateTaggedToWord32Operator<CheckTaggedInputMode::kNumberOrOddball>
+ kCheckedTruncateTaggedToWord32NumberOrOddballOperator;
+
template <CheckFloat64HoleMode kMode>
struct CheckFloat64HoleNaNOperator final
: public Operator1<CheckFloat64HoleMode> {
@@ -768,6 +844,8 @@ PURE_OP_LIST(GET_FROM_CACHE)
CHECKED_OP_LIST(GET_FROM_CACHE)
GET_FROM_CACHE(ArrayBufferWasNeutered)
GET_FROM_CACHE(ArgumentsFrame)
+GET_FROM_CACHE(LookupHashStorageIndex)
+GET_FROM_CACHE(LoadHashMapValue)
GET_FROM_CACHE(NewUnmappedArgumentsElements)
#undef GET_FROM_CACHE
@@ -780,7 +858,6 @@ const Operator* SimplifiedOperatorBuilder::ChangeFloat64ToTagged(
return &cache_.kChangeFloat64ToTaggedDontCheckForMinusZeroOperator;
}
UNREACHABLE();
- return nullptr;
}
const Operator* SimplifiedOperatorBuilder::CheckedInt32Mul(
@@ -792,7 +869,6 @@ const Operator* SimplifiedOperatorBuilder::CheckedInt32Mul(
return &cache_.kCheckedInt32MulDontCheckForMinusZeroOperator;
}
UNREACHABLE();
- return nullptr;
}
const Operator* SimplifiedOperatorBuilder::CheckedFloat64ToInt32(
@@ -804,7 +880,6 @@ const Operator* SimplifiedOperatorBuilder::CheckedFloat64ToInt32(
return &cache_.kCheckedFloat64ToInt32DontCheckForMinusZeroOperator;
}
UNREACHABLE();
- return nullptr;
}
const Operator* SimplifiedOperatorBuilder::CheckedTaggedToInt32(
@@ -816,7 +891,6 @@ const Operator* SimplifiedOperatorBuilder::CheckedTaggedToInt32(
return &cache_.kCheckedTaggedToInt32DontCheckForMinusZeroOperator;
}
UNREACHABLE();
- return nullptr;
}
const Operator* SimplifiedOperatorBuilder::CheckedTaggedToFloat64(
@@ -828,7 +902,17 @@ const Operator* SimplifiedOperatorBuilder::CheckedTaggedToFloat64(
return &cache_.kCheckedTaggedToFloat64NumberOrOddballOperator;
}
UNREACHABLE();
- return nullptr;
+}
+
+const Operator* SimplifiedOperatorBuilder::CheckedTruncateTaggedToWord32(
+ CheckTaggedInputMode mode) {
+ switch (mode) {
+ case CheckTaggedInputMode::kNumber:
+ return &cache_.kCheckedTruncateTaggedToWord32NumberOperator;
+ case CheckTaggedInputMode::kNumberOrOddball:
+ return &cache_.kCheckedTruncateTaggedToWord32NumberOrOddballOperator;
+ }
+ UNREACHABLE();
}
const Operator* SimplifiedOperatorBuilder::CheckMaps(CheckMapsFlags flags,
@@ -851,7 +935,6 @@ const Operator* SimplifiedOperatorBuilder::CheckFloat64Hole(
return &cache_.kCheckFloat64HoleNeverReturnHoleOperator;
}
UNREACHABLE();
- return nullptr;
}
const Operator* SimplifiedOperatorBuilder::SpeculativeToNumber(
@@ -867,7 +950,6 @@ const Operator* SimplifiedOperatorBuilder::SpeculativeToNumber(
return &cache_.kSpeculativeToNumberNumberOrOddballOperator;
}
UNREACHABLE();
- return nullptr;
}
const Operator* SimplifiedOperatorBuilder::EnsureWritableFastElements() {
@@ -957,7 +1039,6 @@ const Operator* SimplifiedOperatorBuilder::LoadBuffer(BufferAccess access) {
#undef LOAD_BUFFER
}
UNREACHABLE();
- return nullptr;
}
@@ -970,7 +1051,6 @@ const Operator* SimplifiedOperatorBuilder::StoreBuffer(BufferAccess access) {
#undef STORE_BUFFER
}
UNREACHABLE();
- return nullptr;
}
const Operator* SimplifiedOperatorBuilder::StringFromCodePoint(
@@ -982,7 +1062,6 @@ const Operator* SimplifiedOperatorBuilder::StringFromCodePoint(
return &cache_.kStringFromCodePointOperatorUTF32;
}
UNREACHABLE();
- return nullptr;
}
#define SPECULATIVE_NUMBER_BINOP(Name) \
@@ -1023,6 +1102,15 @@ SPECULATIVE_NUMBER_BINOP_LIST(SPECULATIVE_NUMBER_BINOP)
ACCESS_OP_LIST(ACCESS)
#undef ACCESS
+const Operator* SimplifiedOperatorBuilder::TransitionAndStoreElement(
+ Handle<Map> double_map, Handle<Map> fast_map) {
+ TransitionAndStoreElementParameters parameters(double_map, fast_map);
+ return new (zone()) Operator1<TransitionAndStoreElementParameters>(
+ IrOpcode::kTransitionAndStoreElement,
+ Operator::kNoDeopt | Operator::kNoThrow, "TransitionAndStoreElement", 3,
+ 1, 1, 0, 1, 0, parameters);
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index ac53bfc72e..f2739acef3 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -228,6 +228,10 @@ std::ostream& operator<<(std::ostream&, ElementsTransition);
ElementsTransition const& ElementsTransitionOf(const Operator* op)
WARN_UNUSED_RESULT;
+// Parameters for TransitionAndStoreElement.
+Handle<Map> DoubleMapParameterOf(const Operator* op);
+Handle<Map> FastMapParameterOf(const Operator* op);
+
// A hint for speculative number operations.
enum class NumberOperationHint : uint8_t {
kSignedSmall, // Inputs were always Smi so far, output was in Smi range.
@@ -378,9 +382,15 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* StringLessThanOrEqual();
const Operator* StringCharAt();
const Operator* StringCharCodeAt();
+ const Operator* SeqStringCharCodeAt();
const Operator* StringFromCharCode();
const Operator* StringFromCodePoint(UnicodeEncoding encoding);
const Operator* StringIndexOf();
+ const Operator* StringToLowerCaseIntl();
+ const Operator* StringToUpperCaseIntl();
+
+ const Operator* LookupHashStorageIndex();
+ const Operator* LoadHashMapValue();
const Operator* SpeculativeToNumber(NumberOperationHint hint);
@@ -414,6 +424,9 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* CheckNumber();
const Operator* CheckSmi();
const Operator* CheckString();
+ const Operator* CheckSeqString();
+ const Operator* CheckNonEmptyString();
+ const Operator* CheckSymbol();
const Operator* CheckReceiver();
const Operator* CheckedInt32Add();
@@ -432,10 +445,10 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* CheckedTaggedToFloat64(CheckTaggedInputMode);
const Operator* CheckedTaggedToTaggedSigned();
const Operator* CheckedTaggedToTaggedPointer();
- const Operator* CheckedTruncateTaggedToWord32();
+ const Operator* CheckedTruncateTaggedToWord32(CheckTaggedInputMode);
const Operator* CheckFloat64Hole(CheckFloat64HoleMode);
- const Operator* CheckTaggedHole();
+ const Operator* CheckNotTaggedHole();
const Operator* ConvertTaggedHoleToUndefined();
const Operator* ObjectIsDetectableCallable();
@@ -484,6 +497,10 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
// store-element [base + index], value
const Operator* StoreElement(ElementAccess const&);
+ // store-element [base + index], value, only with fast arrays.
+ const Operator* TransitionAndStoreElement(Handle<Map> double_map,
+ Handle<Map> fast_map);
+
// load-typed-element buffer, [base + external + index]
const Operator* LoadTypedElement(ExternalArrayType const&);
diff --git a/deps/v8/src/compiler/store-store-elimination.cc b/deps/v8/src/compiler/store-store-elimination.cc
index 196cb0d608..71aa2110bb 100644
--- a/deps/v8/src/compiler/store-store-elimination.cc
+++ b/deps/v8/src/compiler/store-store-elimination.cc
@@ -329,7 +329,6 @@ UnobservablesSet RedundantStoreFinder::RecomputeSet(Node* node,
}
}
UNREACHABLE();
- return UnobservablesSet::Unvisited();
}
bool RedundantStoreFinder::CannotObserveStoreField(Node* node) {
diff --git a/deps/v8/src/compiler/tail-call-optimization.cc b/deps/v8/src/compiler/tail-call-optimization.cc
deleted file mode 100644
index 51299f8c66..0000000000
--- a/deps/v8/src/compiler/tail-call-optimization.cc
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/tail-call-optimization.h"
-
-#include "src/compiler/common-operator.h"
-#include "src/compiler/graph.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-Reduction TailCallOptimization::Reduce(Node* node) {
- if (node->opcode() != IrOpcode::kReturn) return NoChange();
- // The value which is returned must be the result of a potential tail call,
- // there must be no try/catch/finally around the Call, and there must be no
- // other effect or control between the Call and the Return nodes.
- Node* const call = NodeProperties::GetValueInput(node, 1);
- if (call->opcode() == IrOpcode::kCall &&
- CallDescriptorOf(call->op())->SupportsTailCalls() &&
- NodeProperties::GetEffectInput(node) == call &&
- NodeProperties::GetControlInput(node) == call &&
- !NodeProperties::IsExceptionalCall(call) && call->UseCount() == 3) {
- // Ensure that no additional arguments are being popped other than those in
- // the CallDescriptor, otherwise the tail call transformation is invalid.
- DCHECK_EQ(0, Int32Matcher(NodeProperties::GetValueInput(node, 0)).Value());
- // Furthermore, the Return node value, effect, and control depends
- // directly on the Call, no other uses of the Call node exist.
- //
- // The input graph looks as follows:
-
- // Value1 ... ValueN Effect Control
- // ^ ^ ^ ^
- // | | | |
- // | +--+ +-+ |
- // +----------+ | | +------+
- // \ | | /
- // Call[Descriptor]
- // ^ ^ ^
- // Int32(0) <-+ | | |
- // \ | | |
- // Return
- // ^
- // |
-
- // The resulting graph looks like this:
-
- // Value1 ... ValueN Effect Control
- // ^ ^ ^ ^
- // | | | |
- // | +--+ +-+ |
- // +----------+ | | +------+
- // \ | | /
- // TailCall[Descriptor]
- // ^
- // |
-
- DCHECK_EQ(4, node->InputCount());
- node->ReplaceInput(0, NodeProperties::GetEffectInput(call));
- node->ReplaceInput(1, NodeProperties::GetControlInput(call));
- node->RemoveInput(3);
- node->RemoveInput(2);
- for (int index = 0; index < call->op()->ValueInputCount(); ++index) {
- node->InsertInput(graph()->zone(), index,
- NodeProperties::GetValueInput(call, index));
- }
- NodeProperties::ChangeOp(node,
- common()->TailCall(CallDescriptorOf(call->op())));
- return Changed(node);
- }
- return NoChange();
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/tail-call-optimization.h b/deps/v8/src/compiler/tail-call-optimization.h
deleted file mode 100644
index d693f3694c..0000000000
--- a/deps/v8/src/compiler/tail-call-optimization.h
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_TAIL_CALL_OPTIMIZATION_H_
-#define V8_COMPILER_TAIL_CALL_OPTIMIZATION_H_
-
-#include "src/compiler/graph-reducer.h"
-#include "src/globals.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// Forward declarations.
-class CommonOperatorBuilder;
-class Graph;
-
-
-// Performs tail call optimization by replacing certain combinations of Return
-// and Call nodes with a single TailCall.
-class V8_EXPORT_PRIVATE TailCallOptimization final : public Reducer {
- public:
- TailCallOptimization(CommonOperatorBuilder* common, Graph* graph)
- : common_(common), graph_(graph) {}
-
- Reduction Reduce(Node* node) final;
-
- private:
- CommonOperatorBuilder* common() const { return common_; }
- Graph* graph() const { return graph_; }
-
- CommonOperatorBuilder* const common_;
- Graph* const graph_;
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_TAIL_CALL_OPTIMIZATION_H_
diff --git a/deps/v8/src/compiler/type-cache.h b/deps/v8/src/compiler/type-cache.h
index 3d9801bc10..5ac9072174 100644
--- a/deps/v8/src/compiler/type-cache.h
+++ b/deps/v8/src/compiler/type-cache.h
@@ -7,6 +7,7 @@
#include "src/compiler/types.h"
#include "src/date.h"
+#include "src/objects/string.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/typed-optimization.cc b/deps/v8/src/compiler/typed-optimization.cc
index b95e22a2e5..8be08630b5 100644
--- a/deps/v8/src/compiler/typed-optimization.cc
+++ b/deps/v8/src/compiler/typed-optimization.cc
@@ -76,12 +76,18 @@ Reduction TypedOptimization::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kCheckHeapObject:
return ReduceCheckHeapObject(node);
+ case IrOpcode::kCheckNotTaggedHole:
+ return ReduceCheckNotTaggedHole(node);
case IrOpcode::kCheckMaps:
return ReduceCheckMaps(node);
case IrOpcode::kCheckNumber:
return ReduceCheckNumber(node);
case IrOpcode::kCheckString:
return ReduceCheckString(node);
+ case IrOpcode::kCheckSeqString:
+ return ReduceCheckSeqString(node);
+ case IrOpcode::kCheckNonEmptyString:
+ return ReduceCheckNonEmptyString(node);
case IrOpcode::kLoadField:
return ReduceLoadField(node);
case IrOpcode::kNumberCeil:
@@ -128,6 +134,16 @@ Reduction TypedOptimization::ReduceCheckHeapObject(Node* node) {
return NoChange();
}
+Reduction TypedOptimization::ReduceCheckNotTaggedHole(Node* node) {
+ Node* const input = NodeProperties::GetValueInput(node, 0);
+ Type* const input_type = NodeProperties::GetType(input);
+ if (!input_type->Maybe(Type::Hole())) {
+ ReplaceWithValue(node, input);
+ return Replace(input);
+ }
+ return NoChange();
+}
+
Reduction TypedOptimization::ReduceCheckMaps(Node* node) {
// The CheckMaps(o, ...map...) can be eliminated if map is stable,
// o has type Constant(object) and map == object->map, and either
@@ -174,6 +190,26 @@ Reduction TypedOptimization::ReduceCheckString(Node* node) {
return NoChange();
}
+Reduction TypedOptimization::ReduceCheckSeqString(Node* node) {
+ Node* const input = NodeProperties::GetValueInput(node, 0);
+ Type* const input_type = NodeProperties::GetType(input);
+ if (input_type->Is(Type::SeqString())) {
+ ReplaceWithValue(node, input);
+ return Replace(input);
+ }
+ return NoChange();
+}
+
+Reduction TypedOptimization::ReduceCheckNonEmptyString(Node* node) {
+ Node* const input = NodeProperties::GetValueInput(node, 0);
+ Type* const input_type = NodeProperties::GetType(input);
+ if (input_type->Is(Type::NonEmptyString())) {
+ ReplaceWithValue(node, input);
+ return Replace(input);
+ }
+ return NoChange();
+}
+
Reduction TypedOptimization::ReduceLoadField(Node* node) {
Node* const object = NodeProperties::GetValueInput(node, 0);
Type* const object_type = NodeProperties::GetType(object);
diff --git a/deps/v8/src/compiler/typed-optimization.h b/deps/v8/src/compiler/typed-optimization.h
index c441daf222..cd4085c3fc 100644
--- a/deps/v8/src/compiler/typed-optimization.h
+++ b/deps/v8/src/compiler/typed-optimization.h
@@ -39,6 +39,8 @@ class V8_EXPORT_PRIVATE TypedOptimization final
Flags flags, JSGraph* jsgraph);
~TypedOptimization();
+ const char* reducer_name() const override { return "TypedOptimization"; }
+
Reduction Reduce(Node* node) final;
private:
@@ -46,6 +48,8 @@ class V8_EXPORT_PRIVATE TypedOptimization final
Reduction ReduceCheckMaps(Node* node);
Reduction ReduceCheckNumber(Node* node);
Reduction ReduceCheckString(Node* node);
+ Reduction ReduceCheckSeqString(Node* node);
+ Reduction ReduceCheckNonEmptyString(Node* node);
Reduction ReduceLoadField(Node* node);
Reduction ReduceNumberFloor(Node* node);
Reduction ReduceNumberRoundop(Node* node);
@@ -54,6 +58,7 @@ class V8_EXPORT_PRIVATE TypedOptimization final
Reduction ReduceReferenceEqual(Node* node);
Reduction ReduceSelect(Node* node);
Reduction ReduceSpeculativeToNumber(Node* node);
+ Reduction ReduceCheckNotTaggedHole(Node* node);
CompilationDependencies* dependencies() const { return dependencies_; }
Factory* factory() const;
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index f92d507dfb..94b6e5a922 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -43,7 +43,7 @@ Typer::Typer(Isolate* isolate, Flags flags, Graph* graph)
Zone* zone = this->zone();
Factory* const factory = isolate->factory();
- singleton_empty_string_ = Type::HeapConstant(factory->empty_string(), zone);
+ singleton_empty_string_ = Type::NewConstant(factory->empty_string(), zone);
singleton_false_ = operation_typer_.singleton_false();
singleton_true_ = operation_typer_.singleton_true();
falsish_ = Type::Union(
@@ -73,6 +73,8 @@ class Typer::Visitor : public Reducer {
induction_vars_(induction_vars),
weakened_nodes_(typer->zone()) {}
+ const char* reducer_name() const override { return "Typer"; }
+
Reduction Reduce(Node* node) override {
if (node->op()->ValueOutputCount() == 0) return NoChange();
switch (node->opcode()) {
@@ -207,7 +209,6 @@ class Typer::Visitor : public Reducer {
break;
}
UNREACHABLE();
- return nullptr;
}
Type* TypeConstant(Handle<Object> value);
@@ -271,6 +272,7 @@ class Typer::Visitor : public Reducer {
static Type* ToNumber(Type*, Typer*);
static Type* ToObject(Type*, Typer*);
static Type* ToString(Type*, Typer*);
+ static Type* ToPrimitiveToString(Type*, Typer*);
#define DECLARE_METHOD(Name) \
static Type* Name(Type* type, Typer* t) { \
return t->operation_typer_.Name(type); \
@@ -505,6 +507,15 @@ Type* Typer::Visitor::ToString(Type* type, Typer* t) {
return Type::String();
}
+// static
+Type* Typer::Visitor::ToPrimitiveToString(Type* type, Typer* t) {
+ // ES6 section 7.1.1 ToPrimitive( argument, "default" ) followed by
+ // ES6 section 7.1.12 ToString ( argument )
+ type = ToPrimitive(type, t);
+ if (type->Is(Type::String())) return type;
+ return Type::String();
+}
+
// Type checks.
Type* Typer::Visitor::ObjectIsDetectableCallable(Type* type, Typer* t) {
@@ -609,37 +620,30 @@ Type* Typer::Visitor::TypeOsrValue(Node* node) { return Type::Any(); }
Type* Typer::Visitor::TypeRetain(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeInt32Constant(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeInt64Constant(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeRelocatableInt32Constant(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeRelocatableInt64Constant(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeFloat32Constant(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeFloat64Constant(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeNumberConstant(Node* node) {
@@ -784,19 +788,16 @@ Type* Typer::Visitor::TypeInductionVariablePhi(Node* node) {
Type* Typer::Visitor::TypeEffectPhi(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeLoopExit(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeLoopExitValue(Node* node) { return Operand(node, 0); }
Type* Typer::Visitor::TypeLoopExitEffect(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeEnsureWritableFastElements(Node* node) {
@@ -809,17 +810,14 @@ Type* Typer::Visitor::TypeMaybeGrowFastElements(Node* node) {
Type* Typer::Visitor::TypeTransitionElementsKind(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeCheckpoint(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeBeginRegion(Node* node) {
UNREACHABLE();
- return nullptr;
}
@@ -1020,6 +1018,9 @@ Type* Typer::Visitor::JSShiftRightLogicalTyper(Type* lhs, Type* rhs, Typer* t) {
return NumberShiftRightLogical(ToNumber(lhs, t), ToNumber(rhs, t), t);
}
+// JS string concatenation.
+
+Type* Typer::Visitor::TypeJSStringConcat(Node* node) { return Type::String(); }
// JS arithmetic operators.
@@ -1096,6 +1097,10 @@ Type* Typer::Visitor::TypeJSToString(Node* node) {
return TypeUnaryOp(node, ToString);
}
+Type* Typer::Visitor::TypeJSToPrimitiveToString(Node* node) {
+ return TypeUnaryOp(node, ToPrimitiveToString);
+}
+
// JS object operators.
@@ -1111,7 +1116,6 @@ Type* Typer::Visitor::TypeJSCreateArguments(Node* node) {
return Type::OtherObject();
}
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeJSCreateArray(Node* node) { return Type::Array(); }
@@ -1244,29 +1248,24 @@ Type* Typer::Visitor::Weaken(Node* node, Type* current_type,
Type* Typer::Visitor::TypeJSStoreProperty(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeJSStoreNamed(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeJSStoreGlobal(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeJSStoreNamedOwn(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeJSStoreDataPropertyInLiteral(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeJSDeleteProperty(Node* node) {
@@ -1277,6 +1276,11 @@ Type* Typer::Visitor::TypeJSHasProperty(Node* node) { return Type::Boolean(); }
// JS instanceof operator.
+Type* Typer::Visitor::JSHasInPrototypeChainTyper(Type* lhs, Type* rhs,
+ Typer* t) {
+ return Type::Boolean();
+}
+
Type* Typer::Visitor::JSInstanceOfTyper(Type* lhs, Type* rhs, Typer* t) {
return Type::Boolean();
}
@@ -1309,7 +1313,6 @@ Type* Typer::Visitor::TypeJSLoadContext(Node* node) {
Type* Typer::Visitor::TypeJSStoreContext(Node* node) {
UNREACHABLE();
- return nullptr;
}
@@ -1341,6 +1344,10 @@ Type* Typer::Visitor::TypeJSConstructForwardVarargs(Node* node) {
Type* Typer::Visitor::TypeJSConstruct(Node* node) { return Type::Receiver(); }
+Type* Typer::Visitor::TypeJSConstructWithArrayLike(Node* node) {
+ return Type::Receiver();
+}
+
Type* Typer::Visitor::TypeJSConstructWithSpread(Node* node) {
return Type::Receiver();
}
@@ -1475,6 +1482,8 @@ Type* Typer::Visitor::JSCallTyper(Type* fun, Typer* t) {
case kTypedArrayKeys:
case kTypedArrayValues:
case kArrayIteratorNext:
+ case kMapIteratorNext:
+ case kSetIteratorNext:
return Type::OtherObject();
// Array functions.
@@ -1519,6 +1528,7 @@ Type* Typer::Visitor::JSCallTyper(Type* fun, Typer* t) {
case kObjectCreate:
return Type::OtherObject();
case kObjectHasOwnProperty:
+ case kObjectIsPrototypeOf:
return Type::Boolean();
case kObjectToString:
return Type::String();
@@ -1534,6 +1544,8 @@ Type* Typer::Visitor::JSCallTyper(Type* fun, Typer* t) {
return Type::String();
// Function functions.
+ case kFunctionBind:
+ return Type::BoundFunction();
case kFunctionHasInstance:
return Type::Boolean();
@@ -1565,7 +1577,6 @@ Type* Typer::Visitor::JSCallTyper(Type* fun, Typer* t) {
// Set functions.
case kSetAdd:
case kSetEntries:
- case kSetKeys:
case kSetValues:
return Type::OtherObject();
case kSetClear:
@@ -1606,6 +1617,10 @@ Type* Typer::Visitor::TypeJSCall(Node* node) {
return TypeUnaryOp(node, JSCallTyper);
}
+Type* Typer::Visitor::TypeJSCallWithArrayLike(Node* node) {
+ return TypeUnaryOp(node, JSCallTyper);
+}
+
Type* Typer::Visitor::TypeJSCallWithSpread(Node* node) {
return TypeUnaryOp(node, JSCallTyper);
}
@@ -1675,19 +1690,16 @@ Type* Typer::Visitor::TypeJSLoadMessage(Node* node) { return Type::Any(); }
Type* Typer::Visitor::TypeJSStoreMessage(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeJSLoadModule(Node* node) { return Type::Any(); }
Type* Typer::Visitor::TypeJSStoreModule(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeJSGeneratorStore(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeJSGeneratorRestoreContinuation(Node* node) {
@@ -1791,10 +1803,18 @@ Type* Typer::Visitor::StringFromCodePointTyper(Type* type, Typer* t) {
Type* Typer::Visitor::TypeStringCharAt(Node* node) { return Type::String(); }
+Type* Typer::Visitor::TypeStringToLowerCaseIntl(Node* node) { UNREACHABLE(); }
+
+Type* Typer::Visitor::TypeStringToUpperCaseIntl(Node* node) { UNREACHABLE(); }
+
Type* Typer::Visitor::TypeStringCharCodeAt(Node* node) {
return typer_->cache_.kUint16;
}
+Type* Typer::Visitor::TypeSeqStringCharCodeAt(Node* node) {
+ return typer_->cache_.kUint16;
+}
+
Type* Typer::Visitor::TypeStringFromCharCode(Node* node) {
return TypeUnaryOp(node, StringFromCharCodeTyper);
}
@@ -1828,7 +1848,6 @@ Type* Typer::Visitor::TypeCheckHeapObject(Node* node) {
Type* Typer::Visitor::TypeCheckIf(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeCheckInternalizedString(Node* node) {
@@ -1838,7 +1857,6 @@ Type* Typer::Visitor::TypeCheckInternalizedString(Node* node) {
Type* Typer::Visitor::TypeCheckMaps(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeCheckNumber(Node* node) {
@@ -1860,11 +1878,26 @@ Type* Typer::Visitor::TypeCheckString(Node* node) {
return Type::Intersect(arg, Type::String(), zone());
}
+Type* Typer::Visitor::TypeCheckSeqString(Node* node) {
+ Type* arg = Operand(node, 0);
+ return Type::Intersect(arg, Type::SeqString(), zone());
+}
+
+Type* Typer::Visitor::TypeCheckNonEmptyString(Node* node) {
+ Type* arg = Operand(node, 0);
+ return Type::Intersect(arg, Type::NonEmptyString(), zone());
+}
+
+Type* Typer::Visitor::TypeCheckSymbol(Node* node) {
+ Type* arg = Operand(node, 0);
+ return Type::Intersect(arg, Type::Symbol(), zone());
+}
+
Type* Typer::Visitor::TypeCheckFloat64Hole(Node* node) {
return typer_->operation_typer_.CheckFloat64Hole(Operand(node, 0));
}
-Type* Typer::Visitor::TypeCheckTaggedHole(Node* node) {
+Type* Typer::Visitor::TypeCheckNotTaggedHole(Node* node) {
Type* type = Operand(node, 0);
type = Type::Intersect(type, Type::NonInternal(), zone());
return type;
@@ -1897,7 +1930,6 @@ Type* Typer::Visitor::TypeLoadBuffer(Node* node) {
#undef TYPED_ARRAY_CASE
}
UNREACHABLE();
- return nullptr;
}
@@ -1914,29 +1946,28 @@ Type* Typer::Visitor::TypeLoadTypedElement(Node* node) {
#undef TYPED_ARRAY_CASE
}
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeStoreField(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeStoreBuffer(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeStoreElement(Node* node) {
UNREACHABLE();
- return nullptr;
+}
+
+Type* Typer::Visitor::TypeTransitionAndStoreElement(Node* node) {
+ UNREACHABLE();
}
Type* Typer::Visitor::TypeStoreTypedElement(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeObjectIsDetectableCallable(Node* node) {
@@ -1993,6 +2024,14 @@ Type* Typer::Visitor::TypeArrayBufferWasNeutered(Node* node) {
return Type::Boolean();
}
+Type* Typer::Visitor::TypeLookupHashStorageIndex(Node* node) {
+ return Type::SignedSmall();
+}
+
+Type* Typer::Visitor::TypeLoadHashMapValue(Node* node) {
+ return Type::NonInternal();
+}
+
// Heap constants.
Type* Typer::Visitor::TypeConstant(Handle<Object> value) {
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index ef2d3a0ef6..73510d7db0 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -80,7 +80,6 @@ double Type::Min() {
if (this->IsOtherNumberConstant())
return this->AsOtherNumberConstant()->Value();
UNREACHABLE();
- return 0;
}
double Type::Max() {
@@ -97,7 +96,6 @@ double Type::Max() {
if (this->IsOtherNumberConstant())
return this->AsOtherNumberConstant()->Value();
UNREACHABLE();
- return 0;
}
// -----------------------------------------------------------------------------
@@ -142,14 +140,11 @@ Type::bitset BitsetType::Lub(Type* type) {
if (type->IsRange()) return type->AsRange()->Lub();
if (type->IsTuple()) return kOtherInternal;
UNREACHABLE();
- return kNone;
}
Type::bitset BitsetType::Lub(i::Map* map) {
DisallowHeapAllocation no_allocation;
switch (map->instance_type()) {
- case STRING_TYPE:
- case ONE_BYTE_STRING_TYPE:
case CONS_STRING_TYPE:
case CONS_ONE_BYTE_STRING_TYPE:
case THIN_STRING_TYPE:
@@ -162,16 +157,20 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case SHORT_EXTERNAL_STRING_TYPE:
case SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE:
case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
- return kOtherString;
- case INTERNALIZED_STRING_TYPE:
- case ONE_BYTE_INTERNALIZED_STRING_TYPE:
+ return kOtherNonSeqString;
+ case STRING_TYPE:
+ case ONE_BYTE_STRING_TYPE:
+ return kOtherSeqString;
case EXTERNAL_INTERNALIZED_STRING_TYPE:
case EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
case EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
case SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE:
case SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
case SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
- return kInternalizedString;
+ return kInternalizedNonSeqString;
+ case INTERNALIZED_STRING_TYPE:
+ case ONE_BYTE_INTERNALIZED_STRING_TYPE:
+ return kInternalizedSeqString;
case SYMBOL_TYPE:
return kSymbol;
case ODDBALL_TYPE: {
@@ -223,8 +222,11 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case JS_DATA_VIEW_TYPE:
case JS_SET_TYPE:
case JS_MAP_TYPE:
- case JS_SET_ITERATOR_TYPE:
- case JS_MAP_ITERATOR_TYPE:
+ case JS_SET_KEY_VALUE_ITERATOR_TYPE:
+ case JS_SET_VALUE_ITERATOR_TYPE:
+ case JS_MAP_KEY_ITERATOR_TYPE:
+ case JS_MAP_KEY_VALUE_ITERATOR_TYPE:
+ case JS_MAP_VALUE_ITERATOR_TYPE:
case JS_STRING_ITERATOR_TYPE:
case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
@@ -268,6 +270,10 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case JS_WEAK_SET_TYPE:
case JS_PROMISE_CAPABILITY_TYPE:
case JS_PROMISE_TYPE:
+ case WASM_MODULE_TYPE:
+ case WASM_INSTANCE_TYPE:
+ case WASM_MEMORY_TYPE:
+ case WASM_TABLE_TYPE:
DCHECK(!map->is_callable());
DCHECK(!map->is_undetectable());
return kOtherObject;
@@ -292,6 +298,7 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case BYTE_ARRAY_TYPE:
case BYTECODE_ARRAY_TYPE:
case TRANSITION_ARRAY_TYPE:
+ case PROPERTY_ARRAY_TYPE:
case FOREIGN_TYPE:
case SCRIPT_TYPE:
case CODE_TYPE:
@@ -321,20 +328,17 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case DEBUG_INFO_TYPE:
case STACK_FRAME_INFO_TYPE:
case WEAK_CELL_TYPE:
+ case SMALL_ORDERED_HASH_MAP_TYPE:
+ case SMALL_ORDERED_HASH_SET_TYPE:
case PROTOTYPE_INFO_TYPE:
case TUPLE2_TYPE:
case TUPLE3_TYPE:
case CONTEXT_EXTENSION_TYPE:
case ASYNC_GENERATOR_REQUEST_TYPE:
- case PADDING_TYPE_1:
- case PADDING_TYPE_2:
- case PADDING_TYPE_3:
- case PADDING_TYPE_4:
+ case PREPARSED_SCOPE_DATA_TYPE:
UNREACHABLE();
- return kNone;
}
UNREACHABLE();
- return kNone;
}
Type::bitset BitsetType::Lub(i::Object* value) {
@@ -342,7 +346,11 @@ Type::bitset BitsetType::Lub(i::Object* value) {
if (value->IsNumber()) {
return Lub(value->Number());
}
- return Lub(i::HeapObject::cast(value)->map());
+ i::HeapObject* heap_value = i::HeapObject::cast(value);
+ if (value == heap_value->GetHeap()->empty_string()) {
+ return kEmptyString;
+ }
+ return Lub(heap_value->map()) & ~kEmptyString;
}
Type::bitset BitsetType::Lub(double value) {
@@ -466,6 +474,8 @@ HeapConstantType::HeapConstantType(BitsetType::bitset bitset,
: TypeBase(kHeapConstant), bitset_(bitset), object_(object) {
DCHECK(!object->IsHeapNumber());
DCHECK_IMPLIES(object->IsString(), object->IsInternalizedString());
+ DCHECK_IMPLIES(object->IsString(),
+ i::Handle<i::String>::cast(object)->length() != 0);
}
// -----------------------------------------------------------------------------
@@ -499,7 +509,6 @@ bool Type::SimplyEquals(Type* that) {
return true;
}
UNREACHABLE();
- return false;
}
// Check if [this] <= [that].
@@ -841,8 +850,13 @@ Type* Type::NewConstant(i::Handle<i::Object> value, Zone* zone) {
return Range(v, v, zone);
} else if (value->IsHeapNumber()) {
return NewConstant(value->Number(), zone);
- } else if (value->IsString() && !value->IsInternalizedString()) {
- return Type::OtherString();
+ } else if (value->IsString()) {
+ i::Isolate* isolate = i::Handle<i::HeapObject>::cast(value)->GetIsolate();
+ if (!value->IsInternalizedString()) {
+ return Type::OtherString();
+ } else if (*value == isolate->heap()->empty_string()) {
+ return Type::EmptyString();
+ }
}
return HeapConstant(i::Handle<i::HeapObject>::cast(value), zone);
}
diff --git a/deps/v8/src/compiler/types.h b/deps/v8/src/compiler/types.h
index 452ac7658e..18df2758f2 100644
--- a/deps/v8/src/compiler/types.h
+++ b/deps/v8/src/compiler/types.h
@@ -105,28 +105,31 @@ namespace compiler {
V(OtherNumber, 1u << 4) \
#define PROPER_BITSET_TYPE_LIST(V) \
- V(None, 0u) \
- V(Negative31, 1u << 5) \
- V(Null, 1u << 6) \
- V(Undefined, 1u << 7) \
- V(Boolean, 1u << 8) \
- V(Unsigned30, 1u << 9) \
- V(MinusZero, 1u << 10) \
- V(NaN, 1u << 11) \
- V(Symbol, 1u << 12) \
- V(InternalizedString, 1u << 13) \
- V(OtherString, 1u << 14) \
- V(OtherCallable, 1u << 15) \
- V(OtherObject, 1u << 16) \
- V(OtherUndetectable, 1u << 17) \
- V(CallableProxy, 1u << 18) \
- V(OtherProxy, 1u << 19) \
- V(Function, 1u << 20) \
- V(BoundFunction, 1u << 21) \
- V(Hole, 1u << 22) \
- V(OtherInternal, 1u << 23) \
- V(ExternalPointer, 1u << 24) \
- V(Array, 1u << 25) \
+ V(None, 0u) \
+ V(Negative31, 1u << 5) \
+ V(Null, 1u << 6) \
+ V(Undefined, 1u << 7) \
+ V(Boolean, 1u << 8) \
+ V(Unsigned30, 1u << 9) \
+ V(MinusZero, 1u << 10) \
+ V(NaN, 1u << 11) \
+ V(Symbol, 1u << 12) \
+ V(EmptyString, 1u << 13) \
+ V(InternalizedNonEmptySeqString, 1u << 14) \
+ V(InternalizedNonSeqString, 1u << 15) \
+ V(OtherNonSeqString, 1u << 16) \
+ V(OtherSeqString, 1u << 17) \
+ V(OtherCallable, 1u << 18) \
+ V(OtherObject, 1u << 19) \
+ V(OtherUndetectable, 1u << 20) \
+ V(CallableProxy, 1u << 21) \
+ V(OtherProxy, 1u << 22) \
+ V(Function, 1u << 23) \
+ V(BoundFunction, 1u << 24) \
+ V(Hole, 1u << 25) \
+ V(OtherInternal, 1u << 26) \
+ V(ExternalPointer, 1u << 27) \
+ V(Array, 1u << 28) \
\
V(Signed31, kUnsigned30 | kNegative31) \
V(Signed32, kSigned31 | kOtherUnsigned31 | \
@@ -146,7 +149,17 @@ namespace compiler {
V(OrderedNumber, kPlainNumber | kMinusZero) \
V(MinusZeroOrNaN, kMinusZero | kNaN) \
V(Number, kOrderedNumber | kNaN) \
- V(String, kInternalizedString | kOtherString) \
+ V(InternalizedSeqString, kEmptyString | \
+ kInternalizedNonEmptySeqString) \
+ V(InternalizedString, kInternalizedSeqString | \
+ kInternalizedNonSeqString) \
+ V(OtherString, kOtherNonSeqString | kOtherSeqString) \
+ V(SeqString, kInternalizedSeqString | kOtherSeqString) \
+ V(NonSeqString, kInternalizedNonSeqString | \
+ kOtherNonSeqString) \
+ V(NonEmptyString, kInternalizedNonEmptySeqString | \
+ kInternalizedNonSeqString| kOtherString) \
+ V(String, kNonEmptyString | kEmptyString) \
V(UniqueName, kSymbol | kInternalizedString) \
V(Name, kSymbol | kString) \
V(InternalizedStringOrNull, kInternalizedString | kNull) \
diff --git a/deps/v8/src/compiler/value-numbering-reducer.h b/deps/v8/src/compiler/value-numbering-reducer.h
index 521ce59f20..44195468c3 100644
--- a/deps/v8/src/compiler/value-numbering-reducer.h
+++ b/deps/v8/src/compiler/value-numbering-reducer.h
@@ -19,6 +19,8 @@ class V8_EXPORT_PRIVATE ValueNumberingReducer final
explicit ValueNumberingReducer(Zone* temp_zone, Zone* graph_zone);
~ValueNumberingReducer();
+ const char* reducer_name() const override { return "ValueNumberingReducer"; }
+
Reduction Reduce(Node* node) override;
private:
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index a1310ed22f..dbb05460a2 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -543,6 +543,16 @@ void Verifier::Visitor::Check(Node* node) {
// Type is 32 bit integral.
CheckTypeIs(node, Type::Integral32());
break;
+
+ case IrOpcode::kJSStringConcat:
+ // Type is string and all inputs are strings.
+ CheckTypeIs(node, Type::String());
+ for (int i = 0; i < StringConcatParameterOf(node->op()).operand_count();
+ i++) {
+ CheckValueInputIs(node, i, Type::String());
+ }
+ break;
+
case IrOpcode::kJSAdd:
// Type is Number or String.
CheckTypeIs(node, Type::NumberOrString());
@@ -575,6 +585,7 @@ void Verifier::Visitor::Check(Node* node) {
CheckTypeIs(node, Type::Number());
break;
case IrOpcode::kJSToString:
+ case IrOpcode::kJSToPrimitiveToString:
// Type is String.
CheckTypeIs(node, Type::String());
break;
@@ -657,6 +668,7 @@ void Verifier::Visitor::Check(Node* node) {
break;
case IrOpcode::kJSDeleteProperty:
case IrOpcode::kJSHasProperty:
+ case IrOpcode::kJSHasInPrototypeChain:
case IrOpcode::kJSInstanceOf:
case IrOpcode::kJSOrdinaryHasInstance:
// Type is Boolean.
@@ -702,6 +714,7 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kJSConstructForwardVarargs:
case IrOpcode::kJSConstruct:
+ case IrOpcode::kJSConstructWithArrayLike:
case IrOpcode::kJSConstructWithSpread:
case IrOpcode::kJSConvertReceiver:
// Type is Receiver.
@@ -709,6 +722,7 @@ void Verifier::Visitor::Check(Node* node) {
break;
case IrOpcode::kJSCallForwardVarargs:
case IrOpcode::kJSCall:
+ case IrOpcode::kJSCallWithArrayLike:
case IrOpcode::kJSCallWithSpread:
case IrOpcode::kJSCallRuntime:
// Type can be anything.
@@ -952,6 +966,12 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 1, Type::Unsigned32());
CheckTypeIs(node, Type::UnsignedSmall());
break;
+ case IrOpcode::kSeqStringCharCodeAt:
+ // (SeqString, Unsigned32) -> UnsignedSmall
+ CheckValueInputIs(node, 0, Type::SeqString());
+ CheckValueInputIs(node, 1, Type::Unsigned32());
+ CheckTypeIs(node, Type::UnsignedSmall());
+ break;
case IrOpcode::kStringFromCharCode:
// Number -> String
CheckValueInputIs(node, 0, Type::Number());
@@ -969,6 +989,11 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 2, Type::SignedSmall());
CheckTypeIs(node, Type::SignedSmall());
break;
+ case IrOpcode::kStringToLowerCaseIntl:
+ case IrOpcode::kStringToUpperCaseIntl:
+ CheckValueInputIs(node, 0, Type::String());
+ CheckTypeIs(node, Type::String());
+ break;
case IrOpcode::kReferenceEqual:
// (Unique, Any) -> Boolean and
@@ -989,6 +1014,13 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 0, Type::Any());
CheckTypeIs(node, Type::Boolean());
break;
+ case IrOpcode::kLookupHashStorageIndex:
+ CheckTypeIs(node, Type::SignedSmall());
+ break;
+ case IrOpcode::kLoadHashMapValue:
+ CheckValueInputIs(node, 2, Type::SignedSmall());
+ CheckTypeIs(node, Type::SignedSmall());
+ break;
case IrOpcode::kArgumentsLength:
CheckValueInputIs(node, 0, Type::ExternalPointer());
CheckTypeIs(node, TypeCache::Get().kArgumentsLengthType);
@@ -1178,6 +1210,17 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 0, Type::Any());
CheckTypeIs(node, Type::String());
break;
+ case IrOpcode::kCheckSeqString:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckTypeIs(node, Type::SeqString());
+ break;
+ case IrOpcode::kCheckNonEmptyString:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckTypeIs(node, Type::NonEmptyString());
+ break;
+ case IrOpcode::kCheckSymbol:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckTypeIs(node, Type::Symbol());
case IrOpcode::kCheckedInt32Add:
case IrOpcode::kCheckedInt32Sub:
@@ -1202,7 +1245,7 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 0, Type::NumberOrHole());
CheckTypeIs(node, Type::NumberOrUndefined());
break;
- case IrOpcode::kCheckTaggedHole:
+ case IrOpcode::kCheckNotTaggedHole:
CheckValueInputIs(node, 0, Type::Any());
CheckTypeIs(node, Type::NonInternal());
break;
@@ -1243,6 +1286,9 @@ void Verifier::Visitor::Check(Node* node) {
// CheckValueInputIs(node, 1, ElementAccessOf(node->op()).type));
CheckNotTyped(node);
break;
+ case IrOpcode::kTransitionAndStoreElement:
+ CheckNotTyped(node);
+ break;
case IrOpcode::kStoreTypedElement:
CheckNotTyped(node);
break;
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index 56c8f6cbef..2b01c290c7 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -7,11 +7,11 @@
#include <memory>
#include "src/assembler-inl.h"
+#include "src/base/optional.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/base/platform/platform.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
-#include "src/code-stubs.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/compiler-source-position-table.h"
@@ -45,16 +45,15 @@
#define WASM_64 0
#endif
+#define FATAL_UNSUPPORTED_OPCODE(opcode) \
+ V8_Fatal(__FILE__, __LINE__, "Unsupported opcode #%d:%s", (opcode), \
+ wasm::WasmOpcodes::OpcodeName(opcode));
+
namespace v8 {
namespace internal {
namespace compiler {
namespace {
-const Operator* UnsupportedOpcode(wasm::WasmOpcode opcode) {
- V8_Fatal(__FILE__, __LINE__, "Unsupported opcode #%d:%s", opcode,
- wasm::WasmOpcodes::OpcodeName(opcode));
- return nullptr;
-}
void MergeControlToEnd(JSGraph* jsgraph, Node* node) {
Graph* g = jsgraph->graph();
@@ -65,97 +64,6 @@ void MergeControlToEnd(JSGraph* jsgraph, Node* node) {
}
}
-Node* BuildModifyThreadInWasmFlag(bool new_value, JSGraph* jsgraph,
- Node* centry_stub_node, Node** effect_ptr,
- Node* control) {
- // TODO(eholk): generate code to modify the thread-local storage directly,
- // rather than calling the runtime.
- if (!trap_handler::UseTrapHandler()) {
- return control;
- }
-
- const Runtime::FunctionId f =
- new_value ? Runtime::kSetThreadInWasm : Runtime::kClearThreadInWasm;
- const Runtime::Function* fun = Runtime::FunctionForId(f);
- DCHECK_EQ(0, fun->nargs);
- const CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
- jsgraph->zone(), f, fun->nargs, Operator::kNoProperties,
- CallDescriptor::kNoFlags);
- // CEntryStubConstant nodes have to be created and cached in the main
- // thread. At the moment this is only done for CEntryStubConstant(1).
- DCHECK_EQ(1, fun->result_size);
- Node* inputs[] = {centry_stub_node,
- jsgraph->ExternalConstant(
- ExternalReference(f, jsgraph->isolate())), // ref
- jsgraph->Int32Constant(fun->nargs), // arity
- jsgraph->NoContextConstant(),
- *effect_ptr,
- control};
-
- Node* node = jsgraph->graph()->NewNode(jsgraph->common()->Call(desc),
- arraysize(inputs), inputs);
- *effect_ptr = node;
- return node;
-}
-
-// Only call this function for code which is not reused across instantiations,
-// as we do not patch the embedded context.
-Node* BuildCallToRuntimeWithContext(Runtime::FunctionId f, JSGraph* jsgraph,
- Node* centry_stub_node, Node* context,
- Node** parameters, int parameter_count,
- Node** effect_ptr, Node** control) {
- // Setting and clearing the thread-in-wasm flag should not be done as a normal
- // runtime call.
- DCHECK_NE(f, Runtime::kSetThreadInWasm);
- DCHECK_NE(f, Runtime::kClearThreadInWasm);
- // We're leaving Wasm code, so clear the flag.
- *control = BuildModifyThreadInWasmFlag(false, jsgraph, centry_stub_node,
- effect_ptr, *control);
-
- const Runtime::Function* fun = Runtime::FunctionForId(f);
- CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
- jsgraph->zone(), f, fun->nargs, Operator::kNoProperties,
- CallDescriptor::kNoFlags);
- // CEntryStubConstant nodes have to be created and cached in the main
- // thread. At the moment this is only done for CEntryStubConstant(1).
- DCHECK_EQ(1, fun->result_size);
- // At the moment we only allow 3 parameters. If more parameters are needed,
- // increase this constant accordingly.
- static const int kMaxParams = 3;
- DCHECK_GE(kMaxParams, parameter_count);
- Node* inputs[kMaxParams + 6];
- int count = 0;
- inputs[count++] = centry_stub_node;
- for (int i = 0; i < parameter_count; i++) {
- inputs[count++] = parameters[i];
- }
- inputs[count++] = jsgraph->ExternalConstant(
- ExternalReference(f, jsgraph->isolate())); // ref
- inputs[count++] = jsgraph->Int32Constant(fun->nargs); // arity
- inputs[count++] = context; // context
- inputs[count++] = *effect_ptr;
- inputs[count++] = *control;
-
- Node* node =
- jsgraph->graph()->NewNode(jsgraph->common()->Call(desc), count, inputs);
- *effect_ptr = node;
-
- // Restore the thread-in-wasm flag, since we have returned to Wasm.
- *control = BuildModifyThreadInWasmFlag(true, jsgraph, centry_stub_node,
- effect_ptr, *control);
-
- return node;
-}
-
-Node* BuildCallToRuntime(Runtime::FunctionId f, JSGraph* jsgraph,
- Node* centry_stub_node, Node** parameters,
- int parameter_count, Node** effect_ptr,
- Node** control) {
- return BuildCallToRuntimeWithContext(f, jsgraph, centry_stub_node,
- jsgraph->NoContextConstant(), parameters,
- parameter_count, effect_ptr, control);
-}
-
} // namespace
WasmGraphBuilder::WasmGraphBuilder(
@@ -296,6 +204,7 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position,
jsgraph()->ExternalConstant(
ExternalReference::address_of_stack_limit(jsgraph()->isolate())),
jsgraph()->IntPtrConstant(0), *effect, *control);
+ *effect = limit;
Node* pointer = graph()->NewNode(jsgraph()->machine()->LoadStackPointer());
Node* check =
@@ -303,29 +212,49 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position,
Diamond stack_check(graph(), jsgraph()->common(), check, BranchHint::kTrue);
stack_check.Chain(*control);
- Node* effect_true = *effect;
Handle<Code> code = jsgraph()->isolate()->builtins()->WasmStackGuard();
CallInterfaceDescriptor idesc =
WasmRuntimeCallDescriptor(jsgraph()->isolate());
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
jsgraph()->isolate(), jsgraph()->zone(), idesc, 0,
- CallDescriptor::kNoFlags, Operator::kNoProperties);
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ MachineType::AnyTagged(), 1, Linkage::kNoContext);
Node* stub_code = jsgraph()->HeapConstant(code);
- Node* context = jsgraph()->NoContextConstant();
Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), stub_code,
- context, *effect, stack_check.if_false);
+ *effect, stack_check.if_false);
SetSourcePosition(call, position);
- Node* ephi = graph()->NewNode(jsgraph()->common()->EffectPhi(2), effect_true,
+ Node* ephi = graph()->NewNode(jsgraph()->common()->EffectPhi(2), *effect,
call, stack_check.merge);
*control = stack_check.merge;
*effect = ephi;
}
+void WasmGraphBuilder::PatchInStackCheckIfNeeded() {
+ if (!needs_stack_check_) return;
+
+ Node* start = graph()->start();
+ // Place a stack check which uses a dummy node as control and effect.
+ Node* dummy = graph()->NewNode(jsgraph()->common()->Dead());
+ Node* control = dummy;
+ Node* effect = dummy;
+ // The function-prologue stack check is associated with position 0, which
+ // is never a position of any instruction in the function.
+ StackCheck(0, &effect, &control);
+
+ // In testing, no steck checks were emitted. Nothing to rewire then.
+ if (effect == dummy) return;
+
+ // Now patch all control uses of {start} to use {control} and all effect uses
+ // to use {effect} instead. Then rewire the dummy node to use start instead.
+ NodeProperties::ReplaceUses(start, start, effect, control);
+ NodeProperties::ReplaceUses(dummy, nullptr, start, start);
+}
+
Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left, Node* right,
wasm::WasmCodePosition position) {
const Operator* op;
@@ -590,7 +519,7 @@ Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left, Node* right,
case wasm::kExprF64AsmjsStoreMem:
return BuildAsmjsStoreMem(MachineType::Float64(), left, right);
default:
- op = UnsupportedOpcode(opcode);
+ FATAL_UNSUPPORTED_OPCODE(opcode);
}
return graph()->NewNode(op, left, right);
}
@@ -851,7 +780,7 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input,
case wasm::kExprF64AsmjsLoadMem:
return BuildAsmjsLoadMem(MachineType::Float64(), input);
default:
- op = UnsupportedOpcode(opcode);
+ FATAL_UNSUPPORTED_OPCODE(opcode);
}
return graph()->NewNode(op, input);
}
@@ -916,7 +845,6 @@ Builtins::Name GetBuiltinIdForTrap(bool in_cctest, wasm::TrapReason reason) {
#undef TRAPREASON_TO_MESSAGE
default:
UNREACHABLE();
- return Builtins::builtin_count;
}
}
} // namespace
@@ -1081,8 +1009,157 @@ static bool ReverseBytesSupported(MachineOperatorBuilder* m,
return false;
}
-Node* WasmGraphBuilder::BuildChangeEndianness(Node* node, MachineType memtype,
- wasm::ValueType wasmtype) {
+Node* WasmGraphBuilder::BuildChangeEndiannessStore(Node* node,
+ MachineType memtype,
+ wasm::ValueType wasmtype) {
+ Node* result;
+ Node* value = node;
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ int valueSizeInBytes = 1 << ElementSizeLog2Of(wasmtype);
+ int valueSizeInBits = 8 * valueSizeInBytes;
+ bool isFloat = false;
+
+ switch (wasmtype) {
+ case wasm::kWasmF64:
+ value = graph()->NewNode(m->BitcastFloat64ToInt64(), node);
+ isFloat = true;
+ case wasm::kWasmI64:
+ result = jsgraph()->Int64Constant(0);
+ break;
+ case wasm::kWasmF32:
+ value = graph()->NewNode(m->BitcastFloat32ToInt32(), node);
+ isFloat = true;
+ case wasm::kWasmI32:
+ result = jsgraph()->Int32Constant(0);
+ break;
+ case wasm::kWasmS128:
+ DCHECK(ReverseBytesSupported(m, valueSizeInBytes));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ if (memtype.representation() == MachineRepresentation::kWord8) {
+ // No need to change endianness for byte size, return original node
+ return node;
+ }
+ if (wasmtype == wasm::kWasmI64 &&
+ memtype.representation() < MachineRepresentation::kWord64) {
+ // In case we store lower part of WasmI64 expression, we can truncate
+ // upper 32bits
+ value = graph()->NewNode(m->TruncateInt64ToInt32(), value);
+ valueSizeInBytes = 1 << ElementSizeLog2Of(wasm::kWasmI32);
+ valueSizeInBits = 8 * valueSizeInBytes;
+ if (memtype.representation() == MachineRepresentation::kWord16) {
+ value =
+ graph()->NewNode(m->Word32Shl(), value, jsgraph()->Int32Constant(16));
+ }
+ } else if (wasmtype == wasm::kWasmI32 &&
+ memtype.representation() == MachineRepresentation::kWord16) {
+ value =
+ graph()->NewNode(m->Word32Shl(), value, jsgraph()->Int32Constant(16));
+ }
+
+ int i;
+ uint32_t shiftCount;
+
+ if (ReverseBytesSupported(m, valueSizeInBytes)) {
+ switch (valueSizeInBytes) {
+ case 4:
+ result = graph()->NewNode(m->Word32ReverseBytes().op(), value);
+ break;
+ case 8:
+ result = graph()->NewNode(m->Word64ReverseBytes().op(), value);
+ break;
+ case 16: {
+ Node* byte_reversed_lanes[4];
+ for (int lane = 0; lane < 4; lane++) {
+ byte_reversed_lanes[lane] = graph()->NewNode(
+ m->Word32ReverseBytes().op(),
+ graph()->NewNode(jsgraph()->machine()->I32x4ExtractLane(lane),
+ value));
+ }
+
+ // This is making a copy of the value.
+ result =
+ graph()->NewNode(jsgraph()->machine()->S128And(), value, value);
+
+ for (int lane = 0; lane < 4; lane++) {
+ result =
+ graph()->NewNode(jsgraph()->machine()->I32x4ReplaceLane(3 - lane),
+ result, byte_reversed_lanes[lane]);
+ }
+
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ for (i = 0, shiftCount = valueSizeInBits - 8; i < valueSizeInBits / 2;
+ i += 8, shiftCount -= 16) {
+ Node* shiftLower;
+ Node* shiftHigher;
+ Node* lowerByte;
+ Node* higherByte;
+
+ DCHECK(shiftCount > 0);
+ DCHECK((shiftCount + 8) % 16 == 0);
+
+ if (valueSizeInBits > 32) {
+ shiftLower = graph()->NewNode(m->Word64Shl(), value,
+ jsgraph()->Int64Constant(shiftCount));
+ shiftHigher = graph()->NewNode(m->Word64Shr(), value,
+ jsgraph()->Int64Constant(shiftCount));
+ lowerByte = graph()->NewNode(
+ m->Word64And(), shiftLower,
+ jsgraph()->Int64Constant(static_cast<uint64_t>(0xFF)
+ << (valueSizeInBits - 8 - i)));
+ higherByte = graph()->NewNode(
+ m->Word64And(), shiftHigher,
+ jsgraph()->Int64Constant(static_cast<uint64_t>(0xFF) << i));
+ result = graph()->NewNode(m->Word64Or(), result, lowerByte);
+ result = graph()->NewNode(m->Word64Or(), result, higherByte);
+ } else {
+ shiftLower = graph()->NewNode(m->Word32Shl(), value,
+ jsgraph()->Int32Constant(shiftCount));
+ shiftHigher = graph()->NewNode(m->Word32Shr(), value,
+ jsgraph()->Int32Constant(shiftCount));
+ lowerByte = graph()->NewNode(
+ m->Word32And(), shiftLower,
+ jsgraph()->Int32Constant(static_cast<uint32_t>(0xFF)
+ << (valueSizeInBits - 8 - i)));
+ higherByte = graph()->NewNode(
+ m->Word32And(), shiftHigher,
+ jsgraph()->Int32Constant(static_cast<uint32_t>(0xFF) << i));
+ result = graph()->NewNode(m->Word32Or(), result, lowerByte);
+ result = graph()->NewNode(m->Word32Or(), result, higherByte);
+ }
+ }
+ }
+
+ if (isFloat) {
+ switch (wasmtype) {
+ case wasm::kWasmF64:
+ result = graph()->NewNode(m->BitcastInt64ToFloat64(), result);
+ break;
+ case wasm::kWasmF32:
+ result = graph()->NewNode(m->BitcastInt32ToFloat32(), result);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+
+ return result;
+}
+
+Node* WasmGraphBuilder::BuildChangeEndiannessLoad(Node* node,
+ MachineType memtype,
+ wasm::ValueType wasmtype) {
Node* result;
Node* value = node;
MachineOperatorBuilder* m = jsgraph()->machine();
@@ -1711,11 +1788,7 @@ Node* WasmGraphBuilder::BuildFloatToIntConversionInstruction(
}
Node* WasmGraphBuilder::GrowMemory(Node* input) {
- // GrowMemory will not be called from asm.js, hence we cannot be in
- // lazy-compilation mode, hence the instance will be set.
- DCHECK_NOT_NULL(module_);
- DCHECK_NOT_NULL(module_->instance);
-
+ SetNeedsStackCheck();
Diamond check_input_range(
graph(), jsgraph()->common(),
graph()->NewNode(jsgraph()->machine()->Uint32LessThanOrEqual(), input,
@@ -1726,9 +1799,9 @@ Node* WasmGraphBuilder::GrowMemory(Node* input) {
Node* parameters[] = {BuildChangeUint32ToSmi(input)};
Node* old_effect = *effect_;
- Node* call = BuildCallToRuntime(
- Runtime::kWasmGrowMemory, jsgraph(), centry_stub_node_, parameters,
- arraysize(parameters), effect_, &check_input_range.if_true);
+ *control_ = check_input_range.if_true;
+ Node* call = BuildCallToRuntime(Runtime::kWasmGrowMemory, parameters,
+ arraysize(parameters));
Node* result = BuildChangeSmiToInt32(call);
@@ -1741,6 +1814,7 @@ Node* WasmGraphBuilder::GrowMemory(Node* input) {
}
Node* WasmGraphBuilder::Throw(Node* input) {
+ SetNeedsStackCheck();
MachineOperatorBuilder* machine = jsgraph()->machine();
// Pass the thrown value as two SMIs:
@@ -1758,18 +1832,17 @@ Node* WasmGraphBuilder::Throw(Node* input) {
graph()->NewNode(machine->Word32And(), input, Int32Constant(0xFFFFu)));
Node* parameters[] = {lower, upper}; // thrown value
- return BuildCallToRuntime(Runtime::kWasmThrow, jsgraph(), centry_stub_node_,
- parameters, arraysize(parameters), effect_,
- control_);
+ return BuildCallToRuntime(Runtime::kWasmThrow, parameters,
+ arraysize(parameters));
}
Node* WasmGraphBuilder::Catch(Node* input, wasm::WasmCodePosition position) {
+ SetNeedsStackCheck();
CommonOperatorBuilder* common = jsgraph()->common();
Node* parameters[] = {input}; // caught value
Node* value = BuildCallToRuntime(Runtime::kWasmGetCaughtExceptionValue,
- jsgraph(), centry_stub_node_, parameters,
- arraysize(parameters), effect_, control_);
+ parameters, arraysize(parameters));
Node* is_smi;
Node* is_heap;
@@ -2127,8 +2200,6 @@ Node* WasmGraphBuilder::BuildDiv64Call(Node* left, Node* right,
Node* call = BuildCCall(sig_builder.Build(), args);
- // TODO(wasm): This can get simpler if we have a specialized runtime call to
- // throw WASM exceptions by trap code instead of by string.
ZeroCheck32(static_cast<wasm::TrapReason>(trap_zero), call, position);
TrapIfEq32(wasm::kTrapDivUnrepresentable, call, -1, position);
const Operator* load_op = jsgraph()->machine()->Load(result_type);
@@ -2163,6 +2234,7 @@ Node* WasmGraphBuilder::BuildCCall(MachineSignature* sig, Node** args) {
Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args,
Node*** rets,
wasm::WasmCodePosition position) {
+ SetNeedsStackCheck();
const size_t params = sig->parameter_count();
const size_t extra = 2; // effect and control inputs.
const size_t count = 1 + params + extra;
@@ -2174,8 +2246,7 @@ Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args,
args[params + 1] = *effect_;
args[params + 2] = *control_;
- CallDescriptor* descriptor =
- wasm::ModuleEnv::GetWasmCallDescriptor(jsgraph()->zone(), sig);
+ CallDescriptor* descriptor = GetWasmCallDescriptor(jsgraph()->zone(), sig);
const Operator* op = jsgraph()->common()->Call(descriptor);
Node* call = graph()->NewNode(op, static_cast<int>(count), args);
SetSourcePosition(call, position);
@@ -2414,12 +2485,12 @@ Node* WasmGraphBuilder::ToJS(Node* node, wasm::ValueType type) {
return jsgraph()->UndefinedConstant();
default:
UNREACHABLE();
- return nullptr;
}
}
Node* WasmGraphBuilder::BuildJavaScriptToNumber(Node* node, Node* context) {
- Callable callable = CodeFactory::ToNumber(jsgraph()->isolate());
+ Callable callable =
+ Builtins::CallableFor(jsgraph()->isolate(), Builtins::kToNumber);
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
jsgraph()->isolate(), jsgraph()->zone(), callable.descriptor(), 0,
CallDescriptor::kNoFlags, Operator::kNoProperties);
@@ -2515,7 +2586,6 @@ Node* WasmGraphBuilder::FromJS(Node* node, Node* context,
break;
default:
UNREACHABLE();
- return nullptr;
}
return num;
}
@@ -2619,16 +2689,14 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
graph()->start());
// Set the ThreadInWasm flag before we do the actual call.
- BuildModifyThreadInWasmFlag(true, jsgraph(), centry_stub_node_, effect_,
- *control_);
+ BuildModifyThreadInWasmFlag(true);
if (!wasm::IsJSCompatibleSignature(sig_)) {
// Throw a TypeError. Use the context of the calling javascript function
// (passed as a parameter), such that the generated code is context
// independent.
- BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, jsgraph(),
- centry_stub_node_, context, nullptr, 0,
- effect_, control_);
+ BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, context,
+ nullptr, 0);
// Add a dummy call to the wasm function so that the generated wrapper
// contains a reference to the wrapped wasm function. Without this reference
@@ -2640,8 +2708,8 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
// We only need a dummy call descriptor.
wasm::FunctionSig::Builder dummy_sig_builder(jsgraph()->zone(), 0, 0);
- CallDescriptor* desc = wasm::ModuleEnv::GetWasmCallDescriptor(
- jsgraph()->zone(), dummy_sig_builder.Build());
+ CallDescriptor* desc =
+ GetWasmCallDescriptor(jsgraph()->zone(), dummy_sig_builder.Build());
*effect_ = graph()->NewNode(jsgraph()->common()->Call(desc), pos, args);
Return(jsgraph()->UndefinedConstant());
return;
@@ -2650,7 +2718,7 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
int pos = 0;
args[pos++] = HeapConstant(wasm_code);
- // Convert JS parameters to WASM numbers.
+ // Convert JS parameters to wasm numbers.
for (int i = 0; i < wasm_count; ++i) {
Node* param = Param(i + 1);
Node* wasm_param = FromJS(param, context, sig->GetParam(i));
@@ -2660,16 +2728,14 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
args[pos++] = *effect_;
args[pos++] = *control_;
- // Call the WASM code.
- CallDescriptor* desc =
- wasm::ModuleEnv::GetWasmCallDescriptor(jsgraph()->zone(), sig);
+ // Call the wasm code.
+ CallDescriptor* desc = GetWasmCallDescriptor(jsgraph()->zone(), sig);
Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), count, args);
*effect_ = call;
// Clear the ThreadInWasmFlag
- BuildModifyThreadInWasmFlag(false, jsgraph(), centry_stub_node_, effect_,
- *control_);
+ BuildModifyThreadInWasmFlag(false);
Node* retval = call;
Node* jsval = ToJS(
@@ -2679,7 +2745,7 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
int WasmGraphBuilder::AddParameterNodes(Node** args, int pos, int param_count,
wasm::FunctionSig* sig) {
- // Convert WASM numbers to JS values.
+ // Convert wasm numbers to JS values.
int param_index = 0;
for (int i = 0; i < param_count; ++i) {
Node* param = Param(param_index++);
@@ -2706,9 +2772,8 @@ void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target,
// regenerated at instantiation time.
Node* context =
jsgraph()->HeapConstant(jsgraph()->isolate()->native_context());
- BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, jsgraph(),
- centry_stub_node_, context, nullptr, 0,
- effect_, control_);
+ BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, context,
+ nullptr, 0);
// We don't need to return a value here, as the runtime call will not return
// anyway (the c entry stub will trigger stack unwinding).
ReturnVoid();
@@ -2719,8 +2784,7 @@ void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target,
Node* call = nullptr;
- BuildModifyThreadInWasmFlag(false, jsgraph(), centry_stub_node_, effect_,
- *control_);
+ BuildModifyThreadInWasmFlag(false);
if (target->IsJSFunction()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(target);
@@ -2740,7 +2804,7 @@ void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target,
desc = Linkage::GetJSCallDescriptor(
graph()->zone(), false, wasm_count + 1, CallDescriptor::kNoFlags);
- // Convert WASM numbers to JS values.
+ // Convert wasm numbers to JS values.
pos = AddParameterNodes(args, pos, wasm_count, sig);
args[pos++] = jsgraph()->UndefinedConstant(); // new target
@@ -2767,7 +2831,7 @@ void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target,
callable.descriptor(), wasm_count + 1,
CallDescriptor::kNoFlags);
- // Convert WASM numbers to JS values.
+ // Convert wasm numbers to JS values.
pos = AddParameterNodes(args, pos, wasm_count, sig);
// The native_context is sufficient here, because all kind of callables
@@ -2785,8 +2849,7 @@ void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target,
*effect_ = call;
SetSourcePosition(call, 0);
- BuildModifyThreadInWasmFlag(true, jsgraph(), centry_stub_node_, effect_,
- *control_);
+ BuildModifyThreadInWasmFlag(true);
// Convert the return value back.
Node* val = sig->return_count() == 0
@@ -2823,10 +2886,11 @@ void WasmGraphBuilder::BuildWasmInterpreterEntry(
sig->return_count() == 0 ? 0 : 1 << ElementSizeLog2Of(sig->GetReturn(0));
// Get a stack slot for the arguments.
- Node* arg_buffer = args_size_bytes == 0 && return_size_bytes == 0
- ? jsgraph()->IntPtrConstant(0)
- : graph()->NewNode(jsgraph()->machine()->StackSlot(
- std::max(args_size_bytes, return_size_bytes)));
+ Node* arg_buffer =
+ args_size_bytes == 0 && return_size_bytes == 0
+ ? jsgraph()->IntPtrConstant(0)
+ : graph()->NewNode(jsgraph()->machine()->StackSlot(
+ std::max(args_size_bytes, return_size_bytes), 8));
// Now store all our arguments to the buffer.
int param_index = 0;
@@ -2836,26 +2900,23 @@ void WasmGraphBuilder::BuildWasmInterpreterEntry(
Node* param = Param(param_index++);
if (Int64Lowering::IsI64AsTwoParameters(jsgraph()->machine(),
sig->GetParam(i))) {
- StoreRepresentation store_rep(wasm::kWasmI32,
- WriteBarrierKind::kNoWriteBarrier);
- *effect_ =
- graph()->NewNode(jsgraph()->machine()->Store(store_rep), arg_buffer,
- Int32Constant(offset + kInt64LowerHalfMemoryOffset),
- param, *effect_, *control_);
+ int lower_half_offset = offset + kInt64LowerHalfMemoryOffset;
+ int upper_half_offset = offset + kInt64UpperHalfMemoryOffset;
+
+ *effect_ = graph()->NewNode(
+ GetSafeStoreOperator(lower_half_offset, wasm::kWasmI32), arg_buffer,
+ Int32Constant(lower_half_offset), param, *effect_, *control_);
param = Param(param_index++);
- *effect_ =
- graph()->NewNode(jsgraph()->machine()->Store(store_rep), arg_buffer,
- Int32Constant(offset + kInt64UpperHalfMemoryOffset),
- param, *effect_, *control_);
+ *effect_ = graph()->NewNode(
+ GetSafeStoreOperator(upper_half_offset, wasm::kWasmI32), arg_buffer,
+ Int32Constant(upper_half_offset), param, *effect_, *control_);
offset += 8;
} else {
MachineRepresentation param_rep = sig->GetParam(i);
- StoreRepresentation store_rep(param_rep,
- WriteBarrierKind::kNoWriteBarrier);
*effect_ =
- graph()->NewNode(jsgraph()->machine()->Store(store_rep), arg_buffer,
+ graph()->NewNode(GetSafeStoreOperator(offset, param_rep), arg_buffer,
Int32Constant(offset), param, *effect_, *control_);
offset += 1 << ElementSizeLog2Of(param_rep);
}
@@ -2871,8 +2932,8 @@ void WasmGraphBuilder::BuildWasmInterpreterEntry(
jsgraph()->SmiConstant(function_index), // function index
arg_buffer, // argument buffer
};
- BuildCallToRuntime(Runtime::kWasmRunInterpreter, jsgraph(), centry_stub_node_,
- parameters, arraysize(parameters), effect_, control_);
+ BuildCallToRuntime(Runtime::kWasmRunInterpreter, parameters,
+ arraysize(parameters));
// Read back the return value.
if (sig->return_count() == 0) {
@@ -2911,17 +2972,16 @@ Node* WasmGraphBuilder::MemBuffer(uint32_t offset) {
return mem_buffer_;
} else {
return jsgraph()->RelocatableIntPtrConstant(
- mem_start + offset, RelocInfo::WASM_MEMORY_REFERENCE);
+ static_cast<uintptr_t>(mem_start + offset),
+ RelocInfo::WASM_MEMORY_REFERENCE);
}
}
Node* WasmGraphBuilder::CurrentMemoryPages() {
- // CurrentMemoryPages will not be called from asm.js, hence we cannot be in
- // lazy-compilation mode, hence the instance will be set.
- DCHECK_EQ(wasm::kWasmOrigin, module_->module->get_origin());
- Node* call =
- BuildCallToRuntime(Runtime::kWasmMemorySize, jsgraph(), centry_stub_node_,
- nullptr, 0, effect_, control_);
+ // CurrentMemoryPages can not be called from asm.js.
+ DCHECK_EQ(wasm::kWasmOrigin, module_->module->origin());
+ SetNeedsStackCheck();
+ Node* call = BuildCallToRuntime(Runtime::kWasmMemorySize, nullptr, 0);
Node* result = BuildChangeSmiToInt32(call);
return result;
}
@@ -2954,6 +3014,91 @@ void WasmGraphBuilder::EnsureFunctionTableNodes() {
}
}
+Node* WasmGraphBuilder::BuildModifyThreadInWasmFlag(bool new_value) {
+ // TODO(eholk): generate code to modify the thread-local storage directly,
+ // rather than calling the runtime.
+ if (!trap_handler::UseTrapHandler()) {
+ return *control_;
+ }
+
+ const Runtime::FunctionId f =
+ new_value ? Runtime::kSetThreadInWasm : Runtime::kClearThreadInWasm;
+ const Runtime::Function* fun = Runtime::FunctionForId(f);
+ DCHECK_EQ(0, fun->nargs);
+ const CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ jsgraph()->zone(), f, fun->nargs, Operator::kNoProperties,
+ CallDescriptor::kNoFlags);
+ // CEntryStubConstant nodes have to be created and cached in the main
+ // thread. At the moment this is only done for CEntryStubConstant(1).
+ DCHECK_EQ(1, fun->result_size);
+ Node* inputs[] = {centry_stub_node_,
+ jsgraph()->ExternalConstant(
+ ExternalReference(f, jsgraph()->isolate())), // ref
+ jsgraph()->Int32Constant(fun->nargs), // arity
+ jsgraph()->NoContextConstant(),
+ *effect_,
+ *control_};
+
+ Node* node = jsgraph()->graph()->NewNode(jsgraph()->common()->Call(desc),
+ arraysize(inputs), inputs);
+ *effect_ = node;
+ return node;
+}
+
+// Only call this function for code which is not reused across instantiations,
+// as we do not patch the embedded context.
+Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(Runtime::FunctionId f,
+ Node* context,
+ Node** parameters,
+ int parameter_count) {
+ // Setting and clearing the thread-in-wasm flag should not be done as a normal
+ // runtime call.
+ DCHECK_NE(f, Runtime::kSetThreadInWasm);
+ DCHECK_NE(f, Runtime::kClearThreadInWasm);
+ // We're leaving Wasm code, so clear the flag.
+ *control_ = BuildModifyThreadInWasmFlag(false);
+
+ const Runtime::Function* fun = Runtime::FunctionForId(f);
+ CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ jsgraph()->zone(), f, fun->nargs, Operator::kNoProperties,
+ CallDescriptor::kNoFlags);
+ // CEntryStubConstant nodes have to be created and cached in the main
+ // thread. At the moment this is only done for CEntryStubConstant(1).
+ DCHECK_EQ(1, fun->result_size);
+ // At the moment we only allow 3 parameters. If more parameters are needed,
+ // increase this constant accordingly.
+ static const int kMaxParams = 3;
+ DCHECK_GE(kMaxParams, parameter_count);
+ Node* inputs[kMaxParams + 6];
+ int count = 0;
+ inputs[count++] = centry_stub_node_;
+ for (int i = 0; i < parameter_count; i++) {
+ inputs[count++] = parameters[i];
+ }
+ inputs[count++] = jsgraph()->ExternalConstant(
+ ExternalReference(f, jsgraph()->isolate())); // ref
+ inputs[count++] = jsgraph()->Int32Constant(fun->nargs); // arity
+ inputs[count++] = context; // context
+ inputs[count++] = *effect_;
+ inputs[count++] = *control_;
+
+ Node* node = jsgraph()->graph()->NewNode(jsgraph()->common()->Call(desc),
+ count, inputs);
+ *effect_ = node;
+
+ // Restore the thread-in-wasm flag, since we have returned to Wasm.
+ *control_ = BuildModifyThreadInWasmFlag(true);
+
+ return node;
+}
+
+Node* WasmGraphBuilder::BuildCallToRuntime(Runtime::FunctionId f,
+ Node** parameters,
+ int parameter_count) {
+ return BuildCallToRuntimeWithContext(f, jsgraph()->NoContextConstant(),
+ parameters, parameter_count);
+}
+
Node* WasmGraphBuilder::GetGlobal(uint32_t index) {
MachineType mem_type =
wasm::WasmOpcodes::MachineTypeFor(module_->GetGlobalType(index));
@@ -3042,13 +3187,25 @@ void WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
TrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
}
+const Operator* WasmGraphBuilder::GetSafeStoreOperator(int offset,
+ wasm::ValueType type) {
+ int alignment = offset % (1 << ElementSizeLog2Of(type));
+ if (alignment == 0 || jsgraph()->machine()->UnalignedStoreSupported(
+ MachineType::TypeForRepresentation(type), 0)) {
+ StoreRepresentation rep(type, WriteBarrierKind::kNoWriteBarrier);
+ return jsgraph()->machine()->Store(rep);
+ }
+ UnalignedStoreRepresentation rep(type);
+ return jsgraph()->machine()->UnalignedStore(rep);
+}
+
Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
Node* index, uint32_t offset,
uint32_t alignment,
wasm::WasmCodePosition position) {
Node* load;
- // WASM semantics throw on OOB. Introduce explicit bounds check.
+ // Wasm semantics throw on OOB. Introduce explicit bounds check.
if (!FLAG_wasm_trap_handler || !V8_TRAP_HANDLER_SUPPORTED) {
BoundsCheckMem(memtype, index, offset, position);
}
@@ -3075,7 +3232,7 @@ Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
*effect_ = load;
#if defined(V8_TARGET_BIG_ENDIAN)
- load = BuildChangeEndianness(load, memtype, type);
+ load = BuildChangeEndiannessLoad(load, memtype, type);
#endif
if (type == wasm::kWasmI64 &&
@@ -3094,19 +3251,19 @@ Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
return load;
}
-
Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
uint32_t offset, uint32_t alignment, Node* val,
- wasm::WasmCodePosition position) {
+ wasm::WasmCodePosition position,
+ wasm::ValueType type) {
Node* store;
- // WASM semantics throw on OOB. Introduce explicit bounds check.
+ // Wasm semantics throw on OOB. Introduce explicit bounds check.
if (!FLAG_wasm_trap_handler || !V8_TRAP_HANDLER_SUPPORTED) {
BoundsCheckMem(memtype, index, offset, position);
}
#if defined(V8_TARGET_BIG_ENDIAN)
- val = BuildChangeEndianness(val, memtype);
+ val = BuildChangeEndiannessStore(val, memtype, type);
#endif
if (memtype.representation() == MachineRepresentation::kWord8 ||
@@ -3193,23 +3350,7 @@ Node* WasmGraphBuilder::S128Zero() {
return graph()->NewNode(jsgraph()->machine()->S128Zero());
}
-Node* WasmGraphBuilder::S1x4Zero() {
- has_simd_ = true;
- return graph()->NewNode(jsgraph()->machine()->S1x4Zero());
-}
-
-Node* WasmGraphBuilder::S1x8Zero() {
- has_simd_ = true;
- return graph()->NewNode(jsgraph()->machine()->S1x8Zero());
-}
-
-Node* WasmGraphBuilder::S1x16Zero() {
- has_simd_ = true;
- return graph()->NewNode(jsgraph()->machine()->S1x16Zero());
-}
-
-Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
- const NodeVector& inputs) {
+Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
has_simd_ = true;
switch (opcode) {
case wasm::kExprF32x4Splat:
@@ -3307,17 +3448,17 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
return graph()->NewNode(jsgraph()->machine()->I32x4Ne(), inputs[0],
inputs[1]);
case wasm::kExprI32x4LtS:
- return graph()->NewNode(jsgraph()->machine()->I32x4LtS(), inputs[0],
- inputs[1]);
+ return graph()->NewNode(jsgraph()->machine()->I32x4GtS(), inputs[1],
+ inputs[0]);
case wasm::kExprI32x4LeS:
- return graph()->NewNode(jsgraph()->machine()->I32x4LeS(), inputs[0],
- inputs[1]);
- case wasm::kExprI32x4GtS:
- return graph()->NewNode(jsgraph()->machine()->I32x4LtS(), inputs[1],
+ return graph()->NewNode(jsgraph()->machine()->I32x4GeS(), inputs[1],
inputs[0]);
+ case wasm::kExprI32x4GtS:
+ return graph()->NewNode(jsgraph()->machine()->I32x4GtS(), inputs[0],
+ inputs[1]);
case wasm::kExprI32x4GeS:
- return graph()->NewNode(jsgraph()->machine()->I32x4LeS(), inputs[1],
- inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I32x4GeS(), inputs[0],
+ inputs[1]);
case wasm::kExprI32x4UConvertI16x8Low:
return graph()->NewNode(jsgraph()->machine()->I32x4UConvertI16x8Low(),
inputs[0]);
@@ -3331,17 +3472,17 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
return graph()->NewNode(jsgraph()->machine()->I32x4MaxU(), inputs[0],
inputs[1]);
case wasm::kExprI32x4LtU:
- return graph()->NewNode(jsgraph()->machine()->I32x4LtU(), inputs[0],
- inputs[1]);
+ return graph()->NewNode(jsgraph()->machine()->I32x4GtU(), inputs[1],
+ inputs[0]);
case wasm::kExprI32x4LeU:
- return graph()->NewNode(jsgraph()->machine()->I32x4LeU(), inputs[0],
- inputs[1]);
- case wasm::kExprI32x4GtU:
- return graph()->NewNode(jsgraph()->machine()->I32x4LtU(), inputs[1],
+ return graph()->NewNode(jsgraph()->machine()->I32x4GeU(), inputs[1],
inputs[0]);
+ case wasm::kExprI32x4GtU:
+ return graph()->NewNode(jsgraph()->machine()->I32x4GtU(), inputs[0],
+ inputs[1]);
case wasm::kExprI32x4GeU:
- return graph()->NewNode(jsgraph()->machine()->I32x4LeU(), inputs[1],
- inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I32x4GeU(), inputs[0],
+ inputs[1]);
case wasm::kExprI16x8Splat:
return graph()->NewNode(jsgraph()->machine()->I16x8Splat(), inputs[0]);
case wasm::kExprI16x8SConvertI8x16Low:
@@ -3386,17 +3527,17 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
return graph()->NewNode(jsgraph()->machine()->I16x8Ne(), inputs[0],
inputs[1]);
case wasm::kExprI16x8LtS:
- return graph()->NewNode(jsgraph()->machine()->I16x8LtS(), inputs[0],
- inputs[1]);
+ return graph()->NewNode(jsgraph()->machine()->I16x8GtS(), inputs[1],
+ inputs[0]);
case wasm::kExprI16x8LeS:
- return graph()->NewNode(jsgraph()->machine()->I16x8LeS(), inputs[0],
- inputs[1]);
- case wasm::kExprI16x8GtS:
- return graph()->NewNode(jsgraph()->machine()->I16x8LtS(), inputs[1],
+ return graph()->NewNode(jsgraph()->machine()->I16x8GeS(), inputs[1],
inputs[0]);
+ case wasm::kExprI16x8GtS:
+ return graph()->NewNode(jsgraph()->machine()->I16x8GtS(), inputs[0],
+ inputs[1]);
case wasm::kExprI16x8GeS:
- return graph()->NewNode(jsgraph()->machine()->I16x8LeS(), inputs[1],
- inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I16x8GeS(), inputs[0],
+ inputs[1]);
case wasm::kExprI16x8UConvertI8x16Low:
return graph()->NewNode(jsgraph()->machine()->I16x8UConvertI8x16Low(),
inputs[0]);
@@ -3419,17 +3560,17 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
return graph()->NewNode(jsgraph()->machine()->I16x8MaxU(), inputs[0],
inputs[1]);
case wasm::kExprI16x8LtU:
- return graph()->NewNode(jsgraph()->machine()->I16x8LtU(), inputs[0],
- inputs[1]);
+ return graph()->NewNode(jsgraph()->machine()->I16x8GtU(), inputs[1],
+ inputs[0]);
case wasm::kExprI16x8LeU:
- return graph()->NewNode(jsgraph()->machine()->I16x8LeU(), inputs[0],
- inputs[1]);
- case wasm::kExprI16x8GtU:
- return graph()->NewNode(jsgraph()->machine()->I16x8LtU(), inputs[1],
+ return graph()->NewNode(jsgraph()->machine()->I16x8GeU(), inputs[1],
inputs[0]);
+ case wasm::kExprI16x8GtU:
+ return graph()->NewNode(jsgraph()->machine()->I16x8GtU(), inputs[0],
+ inputs[1]);
case wasm::kExprI16x8GeU:
- return graph()->NewNode(jsgraph()->machine()->I16x8LeU(), inputs[1],
- inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I16x8GeU(), inputs[0],
+ inputs[1]);
case wasm::kExprI8x16Splat:
return graph()->NewNode(jsgraph()->machine()->I8x16Splat(), inputs[0]);
case wasm::kExprI8x16Neg:
@@ -3465,17 +3606,17 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
return graph()->NewNode(jsgraph()->machine()->I8x16Ne(), inputs[0],
inputs[1]);
case wasm::kExprI8x16LtS:
- return graph()->NewNode(jsgraph()->machine()->I8x16LtS(), inputs[0],
- inputs[1]);
+ return graph()->NewNode(jsgraph()->machine()->I8x16GtS(), inputs[1],
+ inputs[0]);
case wasm::kExprI8x16LeS:
- return graph()->NewNode(jsgraph()->machine()->I8x16LeS(), inputs[0],
- inputs[1]);
- case wasm::kExprI8x16GtS:
- return graph()->NewNode(jsgraph()->machine()->I8x16LtS(), inputs[1],
+ return graph()->NewNode(jsgraph()->machine()->I8x16GeS(), inputs[1],
inputs[0]);
+ case wasm::kExprI8x16GtS:
+ return graph()->NewNode(jsgraph()->machine()->I8x16GtS(), inputs[0],
+ inputs[1]);
case wasm::kExprI8x16GeS:
- return graph()->NewNode(jsgraph()->machine()->I8x16LeS(), inputs[1],
- inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I8x16GeS(), inputs[0],
+ inputs[1]);
case wasm::kExprI8x16UConvertI16x8:
return graph()->NewNode(jsgraph()->machine()->I8x16UConvertI16x8(),
inputs[0], inputs[1]);
@@ -3492,17 +3633,17 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
return graph()->NewNode(jsgraph()->machine()->I8x16MaxU(), inputs[0],
inputs[1]);
case wasm::kExprI8x16LtU:
- return graph()->NewNode(jsgraph()->machine()->I8x16LtU(), inputs[0],
- inputs[1]);
+ return graph()->NewNode(jsgraph()->machine()->I8x16GtU(), inputs[1],
+ inputs[0]);
case wasm::kExprI8x16LeU:
- return graph()->NewNode(jsgraph()->machine()->I8x16LeU(), inputs[0],
- inputs[1]);
- case wasm::kExprI8x16GtU:
- return graph()->NewNode(jsgraph()->machine()->I8x16LtU(), inputs[1],
+ return graph()->NewNode(jsgraph()->machine()->I8x16GeU(), inputs[1],
inputs[0]);
+ case wasm::kExprI8x16GtU:
+ return graph()->NewNode(jsgraph()->machine()->I8x16GtU(), inputs[0],
+ inputs[1]);
case wasm::kExprI8x16GeU:
- return graph()->NewNode(jsgraph()->machine()->I8x16LeU(), inputs[1],
- inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I8x16GeU(), inputs[0],
+ inputs[1]);
case wasm::kExprS128And:
return graph()->NewNode(jsgraph()->machine()->S128And(), inputs[0],
inputs[1]);
@@ -3514,67 +3655,28 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
inputs[1]);
case wasm::kExprS128Not:
return graph()->NewNode(jsgraph()->machine()->S128Not(), inputs[0]);
- case wasm::kExprS32x4Select:
- return graph()->NewNode(jsgraph()->machine()->S32x4Select(), inputs[0],
- inputs[1], inputs[2]);
- case wasm::kExprS16x8Select:
- return graph()->NewNode(jsgraph()->machine()->S16x8Select(), inputs[0],
- inputs[1], inputs[2]);
- case wasm::kExprS8x16Select:
- return graph()->NewNode(jsgraph()->machine()->S8x16Select(), inputs[0],
+ case wasm::kExprS128Select:
+ return graph()->NewNode(jsgraph()->machine()->S128Select(), inputs[0],
inputs[1], inputs[2]);
- case wasm::kExprS1x4And:
- return graph()->NewNode(jsgraph()->machine()->S1x4And(), inputs[0],
- inputs[1]);
- case wasm::kExprS1x4Or:
- return graph()->NewNode(jsgraph()->machine()->S1x4Or(), inputs[0],
- inputs[1]);
- case wasm::kExprS1x4Xor:
- return graph()->NewNode(jsgraph()->machine()->S1x4Xor(), inputs[0],
- inputs[1]);
- case wasm::kExprS1x4Not:
- return graph()->NewNode(jsgraph()->machine()->S1x4Not(), inputs[0]);
case wasm::kExprS1x4AnyTrue:
return graph()->NewNode(jsgraph()->machine()->S1x4AnyTrue(), inputs[0]);
case wasm::kExprS1x4AllTrue:
return graph()->NewNode(jsgraph()->machine()->S1x4AllTrue(), inputs[0]);
- case wasm::kExprS1x8And:
- return graph()->NewNode(jsgraph()->machine()->S1x8And(), inputs[0],
- inputs[1]);
- case wasm::kExprS1x8Or:
- return graph()->NewNode(jsgraph()->machine()->S1x8Or(), inputs[0],
- inputs[1]);
- case wasm::kExprS1x8Xor:
- return graph()->NewNode(jsgraph()->machine()->S1x8Xor(), inputs[0],
- inputs[1]);
- case wasm::kExprS1x8Not:
- return graph()->NewNode(jsgraph()->machine()->S1x8Not(), inputs[0]);
case wasm::kExprS1x8AnyTrue:
return graph()->NewNode(jsgraph()->machine()->S1x8AnyTrue(), inputs[0]);
case wasm::kExprS1x8AllTrue:
return graph()->NewNode(jsgraph()->machine()->S1x8AllTrue(), inputs[0]);
- case wasm::kExprS1x16And:
- return graph()->NewNode(jsgraph()->machine()->S1x16And(), inputs[0],
- inputs[1]);
- case wasm::kExprS1x16Or:
- return graph()->NewNode(jsgraph()->machine()->S1x16Or(), inputs[0],
- inputs[1]);
- case wasm::kExprS1x16Xor:
- return graph()->NewNode(jsgraph()->machine()->S1x16Xor(), inputs[0],
- inputs[1]);
- case wasm::kExprS1x16Not:
- return graph()->NewNode(jsgraph()->machine()->S1x16Not(), inputs[0]);
case wasm::kExprS1x16AnyTrue:
return graph()->NewNode(jsgraph()->machine()->S1x16AnyTrue(), inputs[0]);
case wasm::kExprS1x16AllTrue:
return graph()->NewNode(jsgraph()->machine()->S1x16AllTrue(), inputs[0]);
default:
- return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
+ FATAL_UNSUPPORTED_OPCODE(opcode);
}
}
Node* WasmGraphBuilder::SimdLaneOp(wasm::WasmOpcode opcode, uint8_t lane,
- const NodeVector& inputs) {
+ Node* const* inputs) {
has_simd_ = true;
switch (opcode) {
case wasm::kExprF32x4ExtractLane:
@@ -3602,12 +3704,12 @@ Node* WasmGraphBuilder::SimdLaneOp(wasm::WasmOpcode opcode, uint8_t lane,
return graph()->NewNode(jsgraph()->machine()->I8x16ReplaceLane(lane),
inputs[0], inputs[1]);
default:
- return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
+ FATAL_UNSUPPORTED_OPCODE(opcode);
}
}
Node* WasmGraphBuilder::SimdShiftOp(wasm::WasmOpcode opcode, uint8_t shift,
- const NodeVector& inputs) {
+ Node* const* inputs) {
has_simd_ = true;
switch (opcode) {
case wasm::kExprI32x4Shl:
@@ -3635,27 +3737,15 @@ Node* WasmGraphBuilder::SimdShiftOp(wasm::WasmOpcode opcode, uint8_t shift,
return graph()->NewNode(jsgraph()->machine()->I8x16ShrU(shift),
inputs[0]);
default:
- return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
+ FATAL_UNSUPPORTED_OPCODE(opcode);
}
}
-Node* WasmGraphBuilder::SimdShuffleOp(uint8_t shuffle[16], unsigned lanes,
- const NodeVector& inputs) {
+Node* WasmGraphBuilder::Simd8x16ShuffleOp(const uint8_t shuffle[16],
+ Node* const* inputs) {
has_simd_ = true;
- switch (lanes) {
- case 4:
- return graph()->NewNode(jsgraph()->machine()->S32x4Shuffle(shuffle),
- inputs[0], inputs[1]);
- case 8:
- return graph()->NewNode(jsgraph()->machine()->S16x8Shuffle(shuffle),
- inputs[0], inputs[1]);
- case 16:
- return graph()->NewNode(jsgraph()->machine()->S8x16Shuffle(shuffle),
- inputs[0], inputs[1]);
- default:
- UNREACHABLE();
- return nullptr;
- }
+ return graph()->NewNode(jsgraph()->machine()->S8x16Shuffle(shuffle),
+ inputs[0], inputs[1]);
}
static void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
@@ -3672,7 +3762,7 @@ static void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
Handle<String> name_str =
isolate->factory()->NewStringFromAsciiChecked(buffer.start());
Handle<String> script_str =
- isolate->factory()->NewStringFromAsciiChecked("(WASM)");
+ isolate->factory()->NewStringFromAsciiChecked("(wasm)");
Handle<SharedFunctionInfo> shared =
isolate->factory()->NewSharedFunctionInfo(name_str, code, false);
PROFILE(isolate, CodeCreateEvent(tag, AbstractCode::cast(*code), *shared,
@@ -3793,10 +3883,9 @@ Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
}
// Schedule and compile to machine code.
- CallDescriptor* incoming =
- wasm::ModuleEnv::GetWasmCallDescriptor(&zone, sig);
+ CallDescriptor* incoming = GetWasmCallDescriptor(&zone, sig);
if (machine.Is32()) {
- incoming = wasm::ModuleEnv::GetI32WasmCallDescriptor(&zone, incoming);
+ incoming = GetI32WasmCallDescriptor(&zone, incoming);
}
Code::Flags flags = Code::ComputeFlags(Code::WASM_TO_JS_FUNCTION);
bool debugging =
@@ -3854,7 +3943,10 @@ Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
Zone zone(isolate->allocator(), ZONE_NAME);
Graph graph(&zone);
CommonOperatorBuilder common(&zone);
- MachineOperatorBuilder machine(&zone);
+ MachineOperatorBuilder machine(
+ &zone, MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags(),
+ InstructionSelector::AlignmentRequirements());
JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
Node* control = nullptr;
@@ -3875,10 +3967,9 @@ Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
}
// Schedule and compile to machine code.
- CallDescriptor* incoming =
- wasm::ModuleEnv::GetWasmCallDescriptor(&zone, sig);
+ CallDescriptor* incoming = GetWasmCallDescriptor(&zone, sig);
if (machine.Is32()) {
- incoming = wasm::ModuleEnv::GetI32WasmCallDescriptor(&zone, incoming);
+ incoming = GetI32WasmCallDescriptor(&zone, incoming);
}
Code::Flags flags = Code::ComputeFlags(Code::WASM_INTERPRETER_ENTRY);
EmbeddedVector<char, 32> debug_name;
@@ -3981,135 +4072,152 @@ Vector<const char> GetDebugName(Zone* zone, wasm::WasmName name, int index) {
WasmCompilationUnit::WasmCompilationUnit(Isolate* isolate,
wasm::ModuleBytesEnv* module_env,
const wasm::WasmFunction* function,
- bool is_sync)
+ Handle<Code> centry_stub)
: WasmCompilationUnit(
isolate, &module_env->module_env,
wasm::FunctionBody{
- function->sig, module_env->wire_bytes.start(),
- module_env->wire_bytes.start() + function->code_start_offset,
- module_env->wire_bytes.start() + function->code_end_offset},
+ function->sig, function->code.offset(),
+ module_env->wire_bytes.start() + function->code.offset(),
+ module_env->wire_bytes.start() + function->code.end_offset()},
module_env->wire_bytes.GetNameOrNull(function), function->func_index,
- is_sync) {}
+ centry_stub) {}
WasmCompilationUnit::WasmCompilationUnit(Isolate* isolate,
wasm::ModuleEnv* module_env,
wasm::FunctionBody body,
wasm::WasmName name, int index,
- bool is_sync)
+ Handle<Code> centry_stub)
: isolate_(isolate),
module_env_(module_env),
func_body_(body),
func_name_(name),
- is_sync_(is_sync),
- centry_stub_(CEntryStub(isolate, 1).GetCode()),
+ counters_(isolate->counters()),
+ centry_stub_(centry_stub),
+ func_index_(index) {}
+
+WasmCompilationUnit::WasmCompilationUnit(
+ Isolate* isolate, wasm::ModuleBytesEnv* module_env,
+ const wasm::WasmFunction* function, Handle<Code> centry_stub,
+ const std::shared_ptr<Counters>& async_counters)
+ : WasmCompilationUnit(
+ isolate, &module_env->module_env,
+ wasm::FunctionBody{
+ function->sig, function->code.offset(),
+ module_env->wire_bytes.start() + function->code.offset(),
+ module_env->wire_bytes.start() + function->code.end_offset()},
+ module_env->wire_bytes.GetNameOrNull(function), function->func_index,
+ centry_stub, async_counters) {}
+
+WasmCompilationUnit::WasmCompilationUnit(
+ Isolate* isolate, wasm::ModuleEnv* module_env, wasm::FunctionBody body,
+ wasm::WasmName name, int index, Handle<Code> centry_stub,
+ const std::shared_ptr<Counters>& async_counters)
+ : isolate_(isolate),
+ module_env_(module_env),
+ func_body_(body),
+ func_name_(name),
+ counters_(async_counters.get()),
+ centry_stub_(centry_stub),
func_index_(index) {}
void WasmCompilationUnit::ExecuteCompilation() {
- if (is_sync_) {
- // TODO(karlschimpf): Make this work when asynchronous.
- // https://bugs.chromium.org/p/v8/issues/detail?id=6361
- HistogramTimerScope wasm_compile_function_time_scope(
- isolate_->counters()->wasm_compile_function_time());
- ExecuteCompilationInternal();
- }
- ExecuteCompilationInternal();
- // Record the memory cost this unit places on the system until
- // it is finalized. That may be "0" in error cases.
- if (job_) {
- size_t cost = job_->AllocatedMemory();
- set_memory_cost(cost);
- }
-}
+ TimedHistogramScope wasm_compile_function_time_scope(
+ counters()->wasm_compile_function_time());
-void WasmCompilationUnit::ExecuteCompilationInternal() {
if (FLAG_trace_wasm_compiler) {
if (func_name_.start() != nullptr) {
- PrintF("Compiling WASM function %d:'%.*s'\n\n", func_index(),
+ PrintF("Compiling wasm function %d:'%.*s'\n\n", func_index(),
func_name_.length(), func_name_.start());
} else {
- PrintF("Compiling WASM function %d:<unnamed>\n\n", func_index());
+ PrintF("Compiling wasm function %d:<unnamed>\n\n", func_index());
}
}
double decode_ms = 0;
size_t node_count = 0;
- Zone graph_zone(isolate_->allocator(), ZONE_NAME);
- jsgraph_ = new (&graph_zone) JSGraph(
- isolate_, new (&graph_zone) Graph(&graph_zone),
- new (&graph_zone) CommonOperatorBuilder(&graph_zone), nullptr, nullptr,
- new (&graph_zone) MachineOperatorBuilder(
- &graph_zone, MachineType::PointerRepresentation(),
- InstructionSelector::SupportedMachineOperatorFlags(),
- InstructionSelector::AlignmentRequirements()));
- SourcePositionTable* source_positions = BuildGraphForWasmFunction(&decode_ms);
+ // Scope for the {graph_zone}.
+ {
+ Zone graph_zone(isolate_->allocator(), ZONE_NAME);
+ jsgraph_ = new (&graph_zone) JSGraph(
+ isolate_, new (&graph_zone) Graph(&graph_zone),
+ new (&graph_zone) CommonOperatorBuilder(&graph_zone), nullptr, nullptr,
+ new (&graph_zone) MachineOperatorBuilder(
+ &graph_zone, MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags(),
+ InstructionSelector::AlignmentRequirements()));
+ SourcePositionTable* source_positions =
+ BuildGraphForWasmFunction(&decode_ms);
- if (graph_construction_result_.failed()) {
- ok_ = false;
- return;
- }
+ if (graph_construction_result_.failed()) {
+ ok_ = false;
+ return;
+ }
- base::ElapsedTimer pipeline_timer;
- if (FLAG_trace_wasm_decode_time) {
- node_count = jsgraph_->graph()->NodeCount();
- pipeline_timer.Start();
- }
+ base::ElapsedTimer pipeline_timer;
+ if (FLAG_trace_wasm_decode_time) {
+ node_count = jsgraph_->graph()->NodeCount();
+ pipeline_timer.Start();
+ }
- compilation_zone_.reset(new Zone(isolate_->allocator(), ZONE_NAME));
+ compilation_zone_.reset(new Zone(isolate_->allocator(), ZONE_NAME));
- // Run the compiler pipeline to generate machine code.
- CallDescriptor* descriptor = wasm::ModuleEnv::GetWasmCallDescriptor(
- compilation_zone_.get(), func_body_.sig);
- if (jsgraph_->machine()->Is32()) {
- descriptor = module_env_->GetI32WasmCallDescriptor(compilation_zone_.get(),
- descriptor);
- }
- info_.reset(new CompilationInfo(
- GetDebugName(compilation_zone_.get(), func_name_, func_index_), isolate_,
- compilation_zone_.get(), Code::ComputeFlags(Code::WASM_FUNCTION)));
- ZoneVector<trap_handler::ProtectedInstructionData> protected_instructions(
- compilation_zone_.get());
-
- job_.reset(Pipeline::NewWasmCompilationJob(
- info_.get(), jsgraph_, descriptor, source_positions,
- &protected_instructions, !module_env_->module->is_wasm()));
- ok_ = job_->ExecuteJob() == CompilationJob::SUCCEEDED;
- // TODO(bradnelson): Improve histogram handling of size_t.
- if (is_sync_)
- // TODO(karlschimpf): Make this work when asynchronous.
- // https://bugs.chromium.org/p/v8/issues/detail?id=6361
- isolate_->counters()->wasm_compile_function_peak_memory_bytes()->AddSample(
+ // Run the compiler pipeline to generate machine code.
+ CallDescriptor* descriptor =
+ GetWasmCallDescriptor(compilation_zone_.get(), func_body_.sig);
+ if (jsgraph_->machine()->Is32()) {
+ descriptor =
+ GetI32WasmCallDescriptor(compilation_zone_.get(), descriptor);
+ }
+ info_.reset(new CompilationInfo(
+ GetDebugName(compilation_zone_.get(), func_name_, func_index_),
+ isolate_, compilation_zone_.get(),
+ Code::ComputeFlags(Code::WASM_FUNCTION)));
+ ZoneVector<trap_handler::ProtectedInstructionData> protected_instructions(
+ compilation_zone_.get());
+
+ job_.reset(Pipeline::NewWasmCompilationJob(
+ info_.get(), jsgraph_, descriptor, source_positions,
+ &protected_instructions, module_env_->module->origin()));
+ ok_ = job_->ExecuteJob() == CompilationJob::SUCCEEDED;
+ // TODO(bradnelson): Improve histogram handling of size_t.
+ counters()->wasm_compile_function_peak_memory_bytes()->AddSample(
static_cast<int>(jsgraph_->graph()->zone()->allocation_size()));
- if (FLAG_trace_wasm_decode_time) {
- double pipeline_ms = pipeline_timer.Elapsed().InMillisecondsF();
- PrintF(
- "wasm-compilation phase 1 ok: %u bytes, %0.3f ms decode, %zu nodes, "
- "%0.3f ms pipeline\n",
- static_cast<unsigned>(func_body_.end - func_body_.start), decode_ms,
- node_count, pipeline_ms);
+ if (FLAG_trace_wasm_decode_time) {
+ double pipeline_ms = pipeline_timer.Elapsed().InMillisecondsF();
+ PrintF(
+ "wasm-compilation phase 1 ok: %u bytes, %0.3f ms decode, %zu nodes, "
+ "%0.3f ms pipeline\n",
+ static_cast<unsigned>(func_body_.end - func_body_.start), decode_ms,
+ node_count, pipeline_ms);
+ }
+ // The graph zone is about to get out of scope. Avoid invalid references.
+ jsgraph_ = nullptr;
}
- // The graph zone is about to get out of scope. Avoid invalid references.
- jsgraph_ = nullptr;
+
+ // Record the memory cost this unit places on the system until
+ // it is finalized.
+ size_t cost = job_->AllocatedMemory();
+ set_memory_cost(cost);
}
-Handle<Code> WasmCompilationUnit::FinishCompilation(
+MaybeHandle<Code> WasmCompilationUnit::FinishCompilation(
wasm::ErrorThrower* thrower) {
if (!ok_) {
if (graph_construction_result_.failed()) {
// Add the function as another context for the exception
ScopedVector<char> buffer(128);
if (func_name_.start() == nullptr) {
- SNPrintF(buffer,
- "Compiling WASM function #%d:%.*s failed:", func_index_,
- func_name_.length(), func_name_.start());
+ SNPrintF(buffer, "Compiling wasm function #%d failed", func_index_);
} else {
- SNPrintF(buffer, "Compiling WASM function #%d failed:", func_index_);
+ SNPrintF(buffer, "Compiling wasm function #%d:%.*s failed", func_index_,
+ func_name_.length(), func_name_.start());
}
thrower->CompileFailed(buffer.start(), graph_construction_result_);
}
- return Handle<Code>::null();
+ return {};
}
base::ElapsedTimer codegen_timer;
if (FLAG_trace_wasm_decode_time) {
@@ -4139,10 +4247,11 @@ Handle<Code> WasmCompilationUnit::FinishCompilation(
}
// static
-Handle<Code> WasmCompilationUnit::CompileWasmFunction(
+MaybeHandle<Code> WasmCompilationUnit::CompileWasmFunction(
wasm::ErrorThrower* thrower, Isolate* isolate,
wasm::ModuleBytesEnv* module_env, const wasm::WasmFunction* function) {
- WasmCompilationUnit unit(isolate, module_env, function);
+ WasmCompilationUnit unit(isolate, module_env, function,
+ CEntryStub(isolate, 1).GetCode());
unit.ExecuteCompilation();
return unit.FinishCompilation(thrower);
}
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index f356f624d7..bf763d4499 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -31,7 +31,7 @@ class SourcePositionTable;
} // namespace compiler
namespace wasm {
-// Forward declarations for some WASM data structures.
+// Forward declarations for some wasm data structures.
struct ModuleBytesEnv;
struct ModuleEnv;
struct WasmFunction;
@@ -47,23 +47,33 @@ typedef compiler::JSGraph TFGraph;
namespace compiler {
class WasmCompilationUnit final {
public:
+ // Use the following constructors if you know you are running on the
+ // foreground thread.
WasmCompilationUnit(Isolate* isolate, wasm::ModuleBytesEnv* module_env,
- const wasm::WasmFunction* function, bool is_sync = true);
+ const wasm::WasmFunction* function,
+ Handle<Code> centry_stub);
WasmCompilationUnit(Isolate* isolate, wasm::ModuleEnv* module_env,
wasm::FunctionBody body, wasm::WasmName name, int index,
- bool is_sync = true);
+ Handle<Code> centry_stub);
+ // Use the following constructors if the compilation may run on a background
+ // thread.
+ WasmCompilationUnit(Isolate* isolate, wasm::ModuleBytesEnv* module_env,
+ const wasm::WasmFunction* function,
+ Handle<Code> centry_stub,
+ const std::shared_ptr<Counters>& async_counters);
+ WasmCompilationUnit(Isolate* isolate, wasm::ModuleEnv* module_env,
+ wasm::FunctionBody body, wasm::WasmName name, int index,
+ Handle<Code> centry_stub,
+ const std::shared_ptr<Counters>& async_counters);
int func_index() const { return func_index_; }
- void ReopenCentryStub() { centry_stub_ = handle(*centry_stub_, isolate_); }
- void InitializeHandles();
void ExecuteCompilation();
- Handle<Code> FinishCompilation(wasm::ErrorThrower* thrower);
+ MaybeHandle<Code> FinishCompilation(wasm::ErrorThrower* thrower);
- static Handle<Code> CompileWasmFunction(wasm::ErrorThrower* thrower,
- Isolate* isolate,
- wasm::ModuleBytesEnv* module_env,
- const wasm::WasmFunction* function);
+ static MaybeHandle<Code> CompileWasmFunction(
+ wasm::ErrorThrower* thrower, Isolate* isolate,
+ wasm::ModuleBytesEnv* module_env, const wasm::WasmFunction* function);
void set_memory_cost(size_t memory_cost) { memory_cost_ = memory_cost; }
size_t memory_cost() const { return memory_cost_; }
@@ -75,7 +85,7 @@ class WasmCompilationUnit final {
wasm::ModuleEnv* module_env_;
wasm::FunctionBody func_body_;
wasm::WasmName func_name_;
- bool is_sync_;
+ Counters* counters_;
// The graph zone is deallocated at the end of ExecuteCompilation by virtue of
// it being zone allocated.
JSGraph* jsgraph_ = nullptr;
@@ -90,12 +100,13 @@ class WasmCompilationUnit final {
wasm::Result<wasm::DecodeStruct*> graph_construction_result_;
bool ok_ = true;
size_t memory_cost_ = 0;
- void ExecuteCompilationInternal();
+
+ Counters* counters() { return counters_; }
DISALLOW_COPY_AND_ASSIGN(WasmCompilationUnit);
};
-// Wraps a JS function, producing a code object that can be called from WASM.
+// Wraps a JS function, producing a code object that can be called from wasm.
Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
wasm::FunctionSig* sig, uint32_t index,
Handle<String> module_name,
@@ -113,9 +124,8 @@ Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
wasm::FunctionSig* sig,
Handle<WasmInstanceObject> instance);
-// Abstracts details of building TurboFan graph nodes for WASM to separate
-// the WASM decoder from the internal details of TurboFan.
-class WasmTrapHelper;
+// Abstracts details of building TurboFan graph nodes for wasm to separate
+// the wasm decoder from the internal details of TurboFan.
typedef ZoneVector<Node*> NodeVector;
class WasmGraphBuilder {
public:
@@ -168,6 +178,8 @@ class WasmGraphBuilder {
void StackCheck(wasm::WasmCodePosition position, Node** effect = nullptr,
Node** control = nullptr);
+ void PatchInStackCheckIfNeeded();
+
//-----------------------------------------------------------------------
// Operations that read and/or write {control} and {effect}.
//-----------------------------------------------------------------------
@@ -224,10 +236,9 @@ class WasmGraphBuilder {
Node* LoadMem(wasm::ValueType type, MachineType memtype, Node* index,
uint32_t offset, uint32_t alignment,
wasm::WasmCodePosition position);
- Node* StoreMem(MachineType type, Node* index, uint32_t offset,
- uint32_t alignment, Node* val,
- wasm::WasmCodePosition position);
-
+ Node* StoreMem(MachineType memtype, Node* index, uint32_t offset,
+ uint32_t alignment, Node* val, wasm::WasmCodePosition position,
+ wasm::ValueType type = wasm::kWasmStmt);
static void PrintDebugName(Node* node);
Node* Control() { return *control_; }
@@ -250,16 +261,14 @@ class WasmGraphBuilder {
Node* S1x8Zero();
Node* S1x16Zero();
- Node* SimdOp(wasm::WasmOpcode opcode, const NodeVector& inputs);
+ Node* SimdOp(wasm::WasmOpcode opcode, Node* const* inputs);
- Node* SimdLaneOp(wasm::WasmOpcode opcode, uint8_t lane,
- const NodeVector& inputs);
+ Node* SimdLaneOp(wasm::WasmOpcode opcode, uint8_t lane, Node* const* inputs);
Node* SimdShiftOp(wasm::WasmOpcode opcode, uint8_t shift,
- const NodeVector& inputs);
+ Node* const* inputs);
- Node* SimdShuffleOp(uint8_t shuffle[16], unsigned lanes,
- const NodeVector& inputs);
+ Node* Simd8x16ShuffleOp(const uint8_t shuffle[16], Node* const* inputs);
bool has_simd() const { return has_simd_; }
@@ -267,7 +276,6 @@ class WasmGraphBuilder {
private:
static const int kDefaultBufferSize = 16;
- friend class WasmTrapHelper;
Zone* zone_;
JSGraph* jsgraph_;
@@ -284,6 +292,7 @@ class WasmGraphBuilder {
size_t cur_bufsize_;
Node* def_buffer_[kDefaultBufferSize];
bool has_simd_ = false;
+ bool needs_stack_check_ = false;
wasm::FunctionSig* sig_;
SetOncePointer<const Operator> allocate_heap_number_operator_;
@@ -299,9 +308,11 @@ class WasmGraphBuilder {
Node* MemBuffer(uint32_t offset);
void BoundsCheckMem(MachineType memtype, Node* index, uint32_t offset,
wasm::WasmCodePosition position);
-
- Node* BuildChangeEndianness(Node* node, MachineType type,
- wasm::ValueType wasmtype = wasm::kWasmStmt);
+ const Operator* GetSafeStoreOperator(int offset, wasm::ValueType type);
+ Node* BuildChangeEndiannessStore(Node* node, MachineType type,
+ wasm::ValueType wasmtype = wasm::kWasmStmt);
+ Node* BuildChangeEndiannessLoad(Node* node, MachineType type,
+ wasm::ValueType wasmtype = wasm::kWasmStmt);
Node* MaskShiftCount32(Node* node);
Node* MaskShiftCount64(Node* node);
@@ -409,7 +420,29 @@ class WasmGraphBuilder {
int AddParameterNodes(Node** args, int pos, int param_count,
wasm::FunctionSig* sig);
+
+ void SetNeedsStackCheck() { needs_stack_check_ = true; }
+
+ //-----------------------------------------------------------------------
+ // Operations involving the CEntryStub, a dependency we want to remove
+ // to get off the GC heap.
+ //-----------------------------------------------------------------------
+ Node* BuildCallToRuntime(Runtime::FunctionId f, Node** parameters,
+ int parameter_count);
+
+ Node* BuildCallToRuntimeWithContext(Runtime::FunctionId f, Node* context,
+ Node** parameters, int parameter_count);
+
+ Node* BuildModifyThreadInWasmFlag(bool new_value);
};
+
+V8_EXPORT_PRIVATE CallDescriptor* GetWasmCallDescriptor(Zone* zone,
+ wasm::FunctionSig* sig);
+V8_EXPORT_PRIVATE CallDescriptor* GetI32WasmCallDescriptor(
+ Zone* zone, CallDescriptor* descriptor);
+V8_EXPORT_PRIVATE CallDescriptor* GetI32WasmCallDescriptorForSimd(
+ Zone* zone, CallDescriptor* descriptor);
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/wasm-linkage.cc b/deps/v8/src/compiler/wasm-linkage.cc
index c739be5399..e5130fb63a 100644
--- a/deps/v8/src/compiler/wasm-linkage.cc
+++ b/deps/v8/src/compiler/wasm-linkage.cc
@@ -8,38 +8,33 @@
#include "src/objects-inl.h"
#include "src/register-configuration.h"
-#include "src/wasm/wasm-module.h"
-
#include "src/compiler/linkage.h"
+#include "src/compiler/wasm-compiler.h"
#include "src/zone/zone.h"
namespace v8 {
namespace internal {
-// TODO(titzer): this should not be in the WASM namespace.
-namespace wasm {
+namespace compiler {
-using compiler::LocationSignature;
-using compiler::CallDescriptor;
-using compiler::LinkageLocation;
+using wasm::ValueType;
namespace {
MachineType MachineTypeFor(ValueType type) {
switch (type) {
- case kWasmI32:
+ case wasm::kWasmI32:
return MachineType::Int32();
- case kWasmI64:
+ case wasm::kWasmI64:
return MachineType::Int64();
- case kWasmF64:
+ case wasm::kWasmF64:
return MachineType::Float64();
- case kWasmF32:
+ case wasm::kWasmF32:
return MachineType::Float32();
- case kWasmS128:
+ case wasm::kWasmS128:
return MachineType::Simd128();
default:
UNREACHABLE();
- return MachineType::AnyTagged();
}
}
@@ -74,14 +69,6 @@ LinkageLocation stackloc(int i, MachineType type) {
#define FP_PARAM_REGISTERS xmm1, xmm2, xmm3, xmm4, xmm5, xmm6
#define FP_RETURN_REGISTERS xmm1, xmm2
-#elif V8_TARGET_ARCH_X87
-// ===========================================================================
-// == x87 ====================================================================
-// ===========================================================================
-#define GP_PARAM_REGISTERS eax, edx, ecx, ebx, esi
-#define GP_RETURN_REGISTERS eax, edx
-#define FP_RETURN_REGISTERS stX_0
-
#elif V8_TARGET_ARCH_ARM
// ===========================================================================
// == arm ====================================================================
@@ -183,7 +170,7 @@ struct Allocator {
// Allocate floats using a double register, but modify the code to
// reflect how ARM FP registers alias.
// TODO(bbudge) Modify wasm linkage to allow use of all float regs.
- if (type == kWasmF32) {
+ if (type == wasm::kWasmF32) {
int float_reg_code = reg.code() * 2;
DCHECK(float_reg_code < RegisterConfiguration::kMaxFPRegisters);
return regloc(DoubleRegister::from_code(float_reg_code),
@@ -208,10 +195,11 @@ struct Allocator {
}
}
bool IsFloatingPoint(ValueType type) {
- return type == kWasmF32 || type == kWasmF64;
+ return type == wasm::kWasmF32 || type == wasm::kWasmF64;
}
int Words(ValueType type) {
- if (kPointerSize < 8 && (type == kWasmI64 || type == kWasmF64)) {
+ if (kPointerSize < 8 &&
+ (type == wasm::kWasmI64 || type == wasm::kWasmF64)) {
return 2;
}
return 1;
@@ -276,8 +264,7 @@ static base::LazyInstance<Allocator, ReturnRegistersCreateTrait>::type
return_registers = LAZY_INSTANCE_INITIALIZER;
// General code uses the above configuration data.
-CallDescriptor* ModuleEnv::GetWasmCallDescriptor(Zone* zone,
- FunctionSig* fsig) {
+CallDescriptor* GetWasmCallDescriptor(Zone* zone, wasm::FunctionSig* fsig) {
LocationSignature::Builder locations(zone, fsig->return_count(),
fsig->parameter_count());
@@ -302,7 +289,7 @@ CallDescriptor* ModuleEnv::GetWasmCallDescriptor(Zone* zone,
const RegList kCalleeSaveRegisters = 0;
const RegList kCalleeSaveFPRegisters = 0;
- // The target for WASM calls is always a code object.
+ // The target for wasm calls is always a code object.
MachineType target_type = MachineType::AnyTagged();
LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type);
@@ -380,20 +367,20 @@ CallDescriptor* ReplaceTypeInCallDescriptorWith(
descriptor->debug_name());
}
-CallDescriptor* ModuleEnv::GetI32WasmCallDescriptor(
- Zone* zone, CallDescriptor* descriptor) {
+CallDescriptor* GetI32WasmCallDescriptor(Zone* zone,
+ CallDescriptor* descriptor) {
return ReplaceTypeInCallDescriptorWith(zone, descriptor, 2,
MachineType::Int64(),
MachineRepresentation::kWord32);
}
-CallDescriptor* ModuleEnv::GetI32WasmCallDescriptorForSimd(
- Zone* zone, CallDescriptor* descriptor) {
+CallDescriptor* GetI32WasmCallDescriptorForSimd(Zone* zone,
+ CallDescriptor* descriptor) {
return ReplaceTypeInCallDescriptorWith(zone, descriptor, 4,
MachineType::Simd128(),
MachineRepresentation::kWord32);
}
-} // namespace wasm
+} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/x64/code-generator-x64.cc b/deps/v8/src/compiler/x64/code-generator-x64.cc
index 86c547f460..9e9be09ecb 100644
--- a/deps/v8/src/compiler/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/x64/code-generator-x64.cc
@@ -20,7 +20,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-#define __ masm()->
+#define __ tasm()->
// Adds X64 specific methods for decoding operands.
class X64OperandConverter : public InstructionOperandConverter {
@@ -41,7 +41,7 @@ class X64OperandConverter : public InstructionOperandConverter {
Immediate ToImmediate(InstructionOperand* operand) {
Constant constant = ToConstant(operand);
if (constant.type() == Constant::kFloat64) {
- DCHECK_EQ(0, bit_cast<int64_t>(constant.ToFloat64()));
+ DCHECK_EQ(0, constant.ToFloat64().AsUint64());
return Immediate(0);
}
if (RelocInfo::IsWasmReference(constant.rmode())) {
@@ -141,10 +141,8 @@ class X64OperandConverter : public InstructionOperandConverter {
}
case kMode_None:
UNREACHABLE();
- return Operand(no_reg, 0);
}
UNREACHABLE();
- return Operand(no_reg, 0);
}
Operand MemoryOperand(size_t first_input = 0) {
@@ -207,14 +205,15 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
: OutOfLineCode(gen),
result_(result),
input_(input),
- unwinding_info_writer_(unwinding_info_writer) {}
+ unwinding_info_writer_(unwinding_info_writer),
+ zone_(gen->zone()) {}
void Generate() final {
__ subp(rsp, Immediate(kDoubleSize));
unwinding_info_writer_->MaybeIncreaseBaseOffsetAt(__ pc_offset(),
kDoubleSize);
__ Movsd(MemOperand(rsp, 0), input_);
- __ SlowTruncateToI(result_, rsp, 0);
+ __ SlowTruncateToIDelayed(zone_, result_, rsp, 0);
__ addp(rsp, Immediate(kDoubleSize));
unwinding_info_writer_->MaybeIncreaseBaseOffsetAt(__ pc_offset(),
-kDoubleSize);
@@ -224,6 +223,7 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
Register const result_;
XMMRegister const input_;
UnwindingInfoWriter* const unwinding_info_writer_;
+ Zone* zone_;
};
@@ -238,7 +238,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
value_(value),
scratch0_(scratch0),
scratch1_(scratch1),
- mode_(mode) {}
+ mode_(mode),
+ zone_(gen->zone()) {}
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -252,10 +253,10 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
: OMIT_REMEMBERED_SET;
SaveFPRegsMode const save_fp_mode =
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
- remembered_set_action, save_fp_mode);
__ leap(scratch1_, operand_);
- __ CallStub(&stub);
+ __ CallStubDelayed(
+ new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
+ remembered_set_action, save_fp_mode));
}
private:
@@ -265,6 +266,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch0_;
Register const scratch1_;
RecordWriteMode const mode_;
+ Zone* zone_;
};
class WasmOutOfLineTrap final : public OutOfLineCode {
@@ -294,7 +296,7 @@ class WasmOutOfLineTrap final : public OutOfLineCode {
// with AssembleArchTrap.
__ Push(Smi::FromInt(position_));
__ Move(rsi, Smi::kZero);
- __ CallRuntime(Runtime::kThrowWasmError);
+ __ CallRuntimeDelayed(gen_->zone(), Runtime::kThrowWasmError);
ReferenceMap* reference_map =
new (gen_->code()->zone()) ReferenceMap(gen_->code()->zone());
@@ -451,7 +453,7 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
#define ASSEMBLE_AVX_BINOP(asm_instr) \
do { \
- CpuFeatureScope avx_scope(masm(), AVX); \
+ CpuFeatureScope avx_scope(tasm(), AVX); \
if (instr->InputAt(1)->IsFPRegister()) { \
__ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \
@@ -696,18 +698,18 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
} \
} while (false)
-#define ASSEMBLE_IEEE754_BINOP(name) \
- do { \
- __ PrepareCallCFunction(2); \
- __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
- 2); \
+#define ASSEMBLE_IEEE754_BINOP(name) \
+ do { \
+ __ PrepareCallCFunction(2); \
+ __ CallCFunction( \
+ ExternalReference::ieee754_##name##_function(__ isolate()), 2); \
} while (false)
-#define ASSEMBLE_IEEE754_UNOP(name) \
- do { \
- __ PrepareCallCFunction(1); \
- __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
- 1); \
+#define ASSEMBLE_IEEE754_UNOP(name) \
+ do { \
+ __ PrepareCallCFunction(1); \
+ __ CallCFunction( \
+ ExternalReference::ieee754_##name##_function(__ isolate()), 1); \
} while (false)
#define ASSEMBLE_ATOMIC_BINOP(bin_inst, mov_inst, cmpxchg_inst) \
@@ -794,7 +796,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
LocationOperand destination_location(
LocationOperand::cast(move->destination()));
InstructionOperand source(move->source());
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
destination_location.index());
if (source.IsStackSlot()) {
LocationOperand source_location(LocationOperand::cast(source));
@@ -812,13 +814,13 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
move->Eliminate();
}
}
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_stack_slot) {
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot);
}
@@ -832,7 +834,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchCallCodeObject: {
EnsureSpaceForLazyDeopt();
if (HasImmediateInput(instr, 0)) {
- Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
+ Handle<Code> code = i.InputCode(0);
__ Call(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
@@ -851,7 +853,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.TempRegister(2));
}
if (HasImmediateInput(instr, 0)) {
- Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
+ Handle<Code> code = i.InputCode(0);
__ jmp(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
@@ -1063,8 +1065,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIeee754Float64Pow: {
// TODO(bmeurer): Improve integration of the stub.
__ Movsd(xmm2, xmm0);
- MathPowStub stub(isolate(), MathPowStub::DOUBLE);
- __ CallStub(&stub);
+ __ CallStubDelayed(new (zone())
+ MathPowStub(nullptr, MathPowStub::DOUBLE));
__ Movsd(xmm0, xmm3);
break;
}
@@ -1287,7 +1289,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SSE_UNOP(Cvtss2sd);
break;
case kSSEFloat32Round: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
RoundingMode const mode =
static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
__ Roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
@@ -1344,7 +1346,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// The following 2 instruction implicitly use rax.
__ fnstsw_ax();
if (CpuFeatures::IsSupported(SAHF)) {
- CpuFeatureScope sahf_scope(masm(), SAHF);
+ CpuFeatureScope sahf_scope(tasm(), SAHF);
__ sahf();
} else {
__ shrl(rax, Immediate(8));
@@ -1494,7 +1496,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SSE_UNOP(Sqrtsd);
break;
case kSSEFloat64Round: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
RoundingMode const mode =
static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
__ Roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
@@ -1763,7 +1765,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kAVXFloat32Cmp: {
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
if (instr->InputAt(1)->IsFPRegister()) {
__ vucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
@@ -1787,7 +1789,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
case kAVXFloat64Cmp: {
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
if (instr->InputAt(1)->IsFPRegister()) {
__ vucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
@@ -1812,7 +1814,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kAVXFloat32Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
__ vpsrlq(kScratchDoubleReg, kScratchDoubleReg, 33);
if (instr->InputAt(0)->IsFPRegister()) {
@@ -1826,7 +1828,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kAVXFloat32Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
__ vpsllq(kScratchDoubleReg, kScratchDoubleReg, 31);
if (instr->InputAt(0)->IsFPRegister()) {
@@ -1840,7 +1842,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kAVXFloat64Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
__ vpsrlq(kScratchDoubleReg, kScratchDoubleReg, 1);
if (instr->InputAt(0)->IsFPRegister()) {
@@ -1854,7 +1856,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kAVXFloat64Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
__ vpsllq(kScratchDoubleReg, kScratchDoubleReg, 63);
if (instr->InputAt(0)->IsFPRegister()) {
@@ -2007,7 +2009,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kX64Movdqu: {
- CpuFeatureScope sse_scope(masm(), SSSE3);
+ CpuFeatureScope sse_scope(tasm(), SSSE3);
EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
__ pc_offset());
if (instr->HasOutput()) {
@@ -2174,12 +2176,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I32x4ExtractLane: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ Pextrd(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1));
break;
}
case kX64I32x4ReplaceLane: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
if (instr->InputAt(2)->IsRegister()) {
__ Pinsrd(i.OutputSimd128Register(), i.InputRegister(2),
i.InputInt8(1));
@@ -2188,6 +2190,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kX64I32x4Neg: {
+ CpuFeatureScope sse_scope(tasm(), SSSE3);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+ if (dst.is(src)) {
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psignd(dst, kScratchDoubleReg);
+ } else {
+ __ pxor(dst, dst);
+ __ psubd(dst, src);
+ }
+ break;
+ }
case kX64I32x4Shl: {
__ pslld(i.OutputSimd128Register(), i.InputInt8(1));
break;
@@ -2201,7 +2216,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I32x4AddHoriz: {
- CpuFeatureScope sse_scope(masm(), SSSE3);
+ CpuFeatureScope sse_scope(tasm(), SSSE3);
__ phaddd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
@@ -2210,17 +2225,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I32x4Mul: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pmulld(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I32x4MinS: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pminsd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I32x4MaxS: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pmaxsd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
@@ -2234,20 +2249,50 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ pxor(i.OutputSimd128Register(), kScratchDoubleReg);
break;
}
+ case kX64I32x4GtS: {
+ __ pcmpgtd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I32x4GeS: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ __ pminsd(dst, src);
+ __ pcmpeqd(dst, src);
+ break;
+ }
case kX64I32x4ShrU: {
__ psrld(i.OutputSimd128Register(), i.InputInt8(1));
break;
}
case kX64I32x4MinU: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pminud(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I32x4MaxU: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pmaxud(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
+ case kX64I32x4GtU: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ __ pmaxud(dst, src);
+ __ pcmpeqd(dst, src);
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ pxor(dst, kScratchDoubleReg);
+ break;
+ }
+ case kX64I32x4GeU: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ __ pminud(dst, src);
+ __ pcmpeqd(dst, src);
+ break;
+ }
case kX64S128Zero: {
XMMRegister dst = i.OutputSimd128Register();
__ xorps(dst, dst);
@@ -2262,14 +2307,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I16x8ExtractLane: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
Register dst = i.OutputRegister();
__ pextrw(dst, i.InputSimd128Register(0), i.InputInt8(1));
__ movsxwl(dst, dst);
break;
}
case kX64I16x8ReplaceLane: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
if (instr->InputAt(2)->IsRegister()) {
__ pinsrw(i.OutputSimd128Register(), i.InputRegister(2),
i.InputInt8(1));
@@ -2278,6 +2323,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kX64I16x8Neg: {
+ CpuFeatureScope sse_scope(tasm(), SSSE3);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+ if (dst.is(src)) {
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psignw(dst, kScratchDoubleReg);
+ } else {
+ __ pxor(dst, dst);
+ __ psubw(dst, src);
+ }
+ break;
+ }
case kX64I16x8Shl: {
__ psllw(i.OutputSimd128Register(), i.InputInt8(1));
break;
@@ -2295,7 +2353,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I16x8AddHoriz: {
- CpuFeatureScope sse_scope(masm(), SSSE3);
+ CpuFeatureScope sse_scope(tasm(), SSSE3);
__ phaddw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
@@ -2308,17 +2366,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I16x8Mul: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pmullw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I16x8MinS: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pminsw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I16x8MaxS: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pmaxsw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
@@ -2332,6 +2390,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ pxor(i.OutputSimd128Register(), kScratchDoubleReg);
break;
}
+ case kX64I16x8GtS: {
+ __ pcmpgtw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I16x8GeS: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ __ pminsw(dst, src);
+ __ pcmpeqw(dst, src);
+ break;
+ }
case kX64I16x8ShrU: {
__ psrlw(i.OutputSimd128Register(), i.InputInt8(1));
break;
@@ -2345,17 +2415,35 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I16x8MinU: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pminuw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I16x8MaxU: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pmaxuw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
+ case kX64I16x8GtU: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ __ pmaxuw(dst, src);
+ __ pcmpeqw(dst, src);
+ __ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
+ __ pxor(dst, kScratchDoubleReg);
+ break;
+ }
+ case kX64I16x8GeU: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ __ pminuw(dst, src);
+ __ pcmpeqw(dst, src);
+ break;
+ }
case kX64I8x16Splat: {
- CpuFeatureScope sse_scope(masm(), SSSE3);
+ CpuFeatureScope sse_scope(tasm(), SSSE3);
XMMRegister dst = i.OutputSimd128Register();
__ movd(dst, i.InputRegister(0));
__ xorps(kScratchDoubleReg, kScratchDoubleReg);
@@ -2363,14 +2451,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I8x16ExtractLane: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
Register dst = i.OutputRegister();
__ pextrb(dst, i.InputSimd128Register(0), i.InputInt8(1));
__ movsxbl(dst, dst);
break;
}
case kX64I8x16ReplaceLane: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
if (instr->InputAt(2)->IsRegister()) {
__ pinsrb(i.OutputSimd128Register(), i.InputRegister(2),
i.InputInt8(1));
@@ -2379,6 +2467,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kX64I8x16Neg: {
+ CpuFeatureScope sse_scope(tasm(), SSSE3);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+ if (dst.is(src)) {
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psignb(dst, kScratchDoubleReg);
+ } else {
+ __ pxor(dst, dst);
+ __ psubb(dst, src);
+ }
+ break;
+ }
case kX64I8x16Add: {
__ paddb(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
@@ -2396,12 +2497,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I8x16MinS: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pminsb(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I8x16MaxS: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pmaxsb(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
@@ -2415,6 +2516,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ pxor(i.OutputSimd128Register(), kScratchDoubleReg);
break;
}
+ case kX64I8x16GtS: {
+ __ pcmpgtb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I8x16GeS: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ __ pminsb(dst, src);
+ __ pcmpeqb(dst, src);
+ break;
+ }
case kX64I8x16AddSaturateU: {
__ paddusb(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
@@ -2424,15 +2537,33 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I8x16MinU: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pminub(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I8x16MaxU: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pmaxub(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
+ case kX64I8x16GtU: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ __ pmaxub(dst, src);
+ __ pcmpeqb(dst, src);
+ __ pcmpeqb(kScratchDoubleReg, kScratchDoubleReg);
+ __ pxor(dst, kScratchDoubleReg);
+ break;
+ }
+ case kX64I8x16GeU: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ __ pminub(dst, src);
+ __ pcmpeqb(dst, src);
+ break;
+ }
case kX64S128And: {
__ pand(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
@@ -2447,8 +2578,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64S128Not: {
XMMRegister dst = i.OutputSimd128Register();
- __ pcmpeqd(dst, dst);
- __ pxor(dst, i.InputSimd128Register(1));
+ XMMRegister src = i.InputSimd128Register(0);
+ if (dst.is(src)) {
+ __ movaps(kScratchDoubleReg, dst);
+ __ pcmpeqd(dst, dst);
+ __ pxor(dst, kScratchDoubleReg);
+ } else {
+ __ pcmpeqd(dst, dst);
+ __ pxor(dst, src);
+ }
+
break;
}
case kX64S128Select: {
@@ -2632,7 +2771,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
break;
}
UNREACHABLE();
- return no_condition;
}
} // namespace
@@ -2690,22 +2828,20 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
// We cannot test calls to the runtime in cctest/test-run-wasm.
// Therefore we emit a call to C here instead of a call to the runtime.
__ PrepareCallCFunction(0);
- __ CallCFunction(
- ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
- 0);
+ __ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(
+ __ isolate()),
+ 0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
__ Ret();
} else {
gen_->AssembleSourcePosition(instr_);
- __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+ __ Call(__ isolate()->builtins()->builtin_handle(trap_id),
RelocInfo::CODE_TARGET);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
- if (FLAG_debug_code) {
- __ ud2();
- }
+ __ AssertUnreachable(kUnexpectedReturnFromWasmTrap);
}
}
@@ -2788,9 +2924,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
: Deoptimizer::EAGER;
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- isolate(), deoptimization_id, bailout_type);
+ __ isolate(), deoptimization_id, bailout_type);
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- if (isolate()->NeedsSourcePositionsForProfiling()) {
+ if (info()->is_source_positions_enabled()) {
__ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
}
__ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
@@ -2862,11 +2998,41 @@ void CodeGenerator::AssembleConstructFrame() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- shrink_slots -= static_cast<int>(OsrHelper(info()).UnoptimizedFrameSlots());
+ shrink_slots -= static_cast<int>(osr_helper()->UnoptimizedFrameSlots());
}
const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
if (shrink_slots > 0) {
+ if (info()->IsWasm() && shrink_slots > 128) {
+ // For WebAssembly functions with big frames we have to do the stack
+ // overflow check before we construct the frame. Otherwise we may not
+ // have enough space on the stack to call the runtime for the stack
+ // overflow.
+ Label done;
+
+ // If the frame is bigger than the stack, we throw the stack overflow
+ // exception unconditionally. Thereby we can avoid the integer overflow
+ // check in the condition code.
+ if (shrink_slots * kPointerSize < FLAG_stack_size * 1024) {
+ __ Move(kScratchRegister,
+ ExternalReference::address_of_real_stack_limit(__ isolate()));
+ __ movq(kScratchRegister, Operand(kScratchRegister, 0));
+ __ addq(kScratchRegister, Immediate(shrink_slots * kPointerSize));
+ __ cmpq(rsp, kScratchRegister);
+ __ j(above_equal, &done);
+ }
+ if (!frame_access_state()->has_frame()) {
+ __ set_has_frame(true);
+ __ EnterFrame(StackFrame::WASM_COMPILED);
+ }
+ __ Move(rsi, Smi::kZero);
+ __ CallRuntimeDelayed(zone(), Runtime::kThrowWasmStackOverflow);
+ ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
+ RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ __ AssertUnreachable(kUnexpectedReturnFromWasmTrap);
+ __ bind(&done);
+ }
__ subq(rsp, Immediate(shrink_slots * kPointerSize));
}
@@ -3023,12 +3189,10 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
break;
case Constant::kFloat32:
- __ Move(dst,
- isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
+ __ MoveNumber(dst, src.ToFloat32());
break;
case Constant::kFloat64:
- __ Move(dst,
- isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
+ __ MoveNumber(dst, src.ToFloat64().value());
break;
case Constant::kExternalReference:
__ Move(dst, src.ToExternalReference());
@@ -3062,7 +3226,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
} else {
DCHECK_EQ(Constant::kFloat64, src.type());
- uint64_t src_const = bit_cast<uint64_t>(src.ToFloat64());
+ uint64_t src_const = src.ToFloat64().AsUint64();
if (destination->IsFPRegister()) {
__ Move(g.ToDoubleRegister(destination), src_const);
} else {
diff --git a/deps/v8/src/compiler/x64/instruction-codes-x64.h b/deps/v8/src/compiler/x64/instruction-codes-x64.h
index 959a7d2d03..9c268ededf 100644
--- a/deps/v8/src/compiler/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/x64/instruction-codes-x64.h
@@ -147,6 +147,7 @@ namespace compiler {
V(X64I32x4Splat) \
V(X64I32x4ExtractLane) \
V(X64I32x4ReplaceLane) \
+ V(X64I32x4Neg) \
V(X64I32x4Shl) \
V(X64I32x4ShrS) \
V(X64I32x4Add) \
@@ -157,12 +158,17 @@ namespace compiler {
V(X64I32x4MaxS) \
V(X64I32x4Eq) \
V(X64I32x4Ne) \
+ V(X64I32x4GtS) \
+ V(X64I32x4GeS) \
V(X64I32x4ShrU) \
V(X64I32x4MinU) \
V(X64I32x4MaxU) \
+ V(X64I32x4GtU) \
+ V(X64I32x4GeU) \
V(X64I16x8Splat) \
V(X64I16x8ExtractLane) \
V(X64I16x8ReplaceLane) \
+ V(X64I16x8Neg) \
V(X64I16x8Shl) \
V(X64I16x8ShrS) \
V(X64I16x8Add) \
@@ -175,14 +181,19 @@ namespace compiler {
V(X64I16x8MaxS) \
V(X64I16x8Eq) \
V(X64I16x8Ne) \
+ V(X64I16x8GtS) \
+ V(X64I16x8GeS) \
V(X64I16x8ShrU) \
V(X64I16x8AddSaturateU) \
V(X64I16x8SubSaturateU) \
V(X64I16x8MinU) \
V(X64I16x8MaxU) \
+ V(X64I16x8GtU) \
+ V(X64I16x8GeU) \
V(X64I8x16Splat) \
V(X64I8x16ExtractLane) \
V(X64I8x16ReplaceLane) \
+ V(X64I8x16Neg) \
V(X64I8x16Add) \
V(X64I8x16AddSaturateS) \
V(X64I8x16Sub) \
@@ -191,10 +202,14 @@ namespace compiler {
V(X64I8x16MaxS) \
V(X64I8x16Eq) \
V(X64I8x16Ne) \
+ V(X64I8x16GtS) \
+ V(X64I8x16GeS) \
V(X64I8x16AddSaturateU) \
V(X64I8x16SubSaturateU) \
V(X64I8x16MinU) \
V(X64I8x16MaxU) \
+ V(X64I8x16GtU) \
+ V(X64I8x16GeU) \
V(X64S128And) \
V(X64S128Or) \
V(X64S128Xor) \
diff --git a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
index 0f4c37f033..c5ef1e5a7a 100644
--- a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
@@ -126,6 +126,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I32x4Splat:
case kX64I32x4ExtractLane:
case kX64I32x4ReplaceLane:
+ case kX64I32x4Neg:
case kX64I32x4Shl:
case kX64I32x4ShrS:
case kX64I32x4Add:
@@ -136,12 +137,17 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I32x4MaxS:
case kX64I32x4Eq:
case kX64I32x4Ne:
+ case kX64I32x4GtS:
+ case kX64I32x4GeS:
case kX64I32x4ShrU:
case kX64I32x4MinU:
case kX64I32x4MaxU:
+ case kX64I32x4GtU:
+ case kX64I32x4GeU:
case kX64I16x8Splat:
case kX64I16x8ExtractLane:
case kX64I16x8ReplaceLane:
+ case kX64I16x8Neg:
case kX64I16x8Shl:
case kX64I16x8ShrS:
case kX64I16x8Add:
@@ -154,14 +160,19 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I16x8MaxS:
case kX64I16x8Eq:
case kX64I16x8Ne:
+ case kX64I16x8GtS:
+ case kX64I16x8GeS:
case kX64I16x8ShrU:
case kX64I16x8AddSaturateU:
case kX64I16x8SubSaturateU:
case kX64I16x8MinU:
case kX64I16x8MaxU:
+ case kX64I16x8GtU:
+ case kX64I16x8GeU:
case kX64I8x16Splat:
case kX64I8x16ExtractLane:
case kX64I8x16ReplaceLane:
+ case kX64I8x16Neg:
case kX64I8x16Add:
case kX64I8x16AddSaturateS:
case kX64I8x16Sub:
@@ -170,10 +181,14 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I8x16MaxS:
case kX64I8x16Eq:
case kX64I8x16Ne:
+ case kX64I8x16GtS:
+ case kX64I8x16GeS:
case kX64I8x16AddSaturateU:
case kX64I8x16SubSaturateU:
case kX64I8x16MinU:
case kX64I8x16MaxU:
+ case kX64I8x16GtU:
+ case kX64I8x16GeU:
case kX64S128And:
case kX64S128Or:
case kX64S128Xor:
@@ -189,8 +204,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Udiv:
case kX64Udiv32:
return (instr->addressing_mode() == kMode_None)
- ? kMayNeedDeoptCheck
- : kMayNeedDeoptCheck | kIsLoadOperation | kHasSideEffect;
+ ? kMayNeedDeoptOrTrapCheck
+ : kMayNeedDeoptOrTrapCheck | kIsLoadOperation | kHasSideEffect;
case kX64Movsxbl:
case kX64Movzxbl:
@@ -239,7 +254,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
}
UNREACHABLE();
- return kNoOpcodeFlags;
}
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
index 3f4e2b3b1c..6ac2f428e0 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -233,9 +233,6 @@ ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
case MachineRepresentation::kSimd128: // Fall through.
opcode = kX64Movdqu;
break;
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
break;
@@ -270,15 +267,10 @@ ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
case MachineRepresentation::kSimd128: // Fall through.
return kX64Movdqu;
break;
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
- return kArchNop;
}
UNREACHABLE();
- return kArchNop;
}
} // namespace
@@ -434,9 +426,6 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
break;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
@@ -492,9 +481,6 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
break;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
@@ -2054,6 +2040,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 4 + sw.value_range;
size_t table_time_cost = 3;
size_t lookup_space_cost = 3 + 2 * sw.case_count;
@@ -2061,7 +2048,8 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
if (sw.case_count > 4 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min()) {
+ sw.min_value > std::numeric_limits<int32_t>::min() &&
+ sw.value_range <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = g.TempRegister();
if (sw.min_value) {
// The leal automatically zero extends, so result is a valid 64-bit index.
@@ -2462,12 +2450,6 @@ VISIT_ATOMIC_BINOP(Xor)
V(16x8) \
V(8x16)
-#define SIMD_ZERO_OP_LIST(V) \
- V(S128Zero) \
- V(S1x4Zero) \
- V(S1x8Zero) \
- V(S1x16Zero)
-
#define SIMD_BINOP_LIST(V) \
V(I32x4Add) \
V(I32x4AddHoriz) \
@@ -2477,8 +2459,12 @@ VISIT_ATOMIC_BINOP(Xor)
V(I32x4MaxS) \
V(I32x4Eq) \
V(I32x4Ne) \
+ V(I32x4GtS) \
+ V(I32x4GeS) \
V(I32x4MinU) \
V(I32x4MaxU) \
+ V(I32x4GtU) \
+ V(I32x4GeU) \
V(I16x8Add) \
V(I16x8AddSaturateS) \
V(I16x8AddHoriz) \
@@ -2489,10 +2475,14 @@ VISIT_ATOMIC_BINOP(Xor)
V(I16x8MaxS) \
V(I16x8Eq) \
V(I16x8Ne) \
+ V(I16x8GtS) \
+ V(I16x8GeS) \
V(I16x8AddSaturateU) \
V(I16x8SubSaturateU) \
V(I16x8MinU) \
V(I16x8MaxU) \
+ V(I16x8GtU) \
+ V(I16x8GeU) \
V(I8x16Add) \
V(I8x16AddSaturateS) \
V(I8x16Sub) \
@@ -2501,15 +2491,23 @@ VISIT_ATOMIC_BINOP(Xor)
V(I8x16MaxS) \
V(I8x16Eq) \
V(I8x16Ne) \
+ V(I8x16GtS) \
+ V(I8x16GeS) \
V(I8x16AddSaturateU) \
V(I8x16SubSaturateU) \
V(I8x16MinU) \
V(I8x16MaxU) \
+ V(I8x16GtU) \
+ V(I8x16GeU) \
V(S128And) \
V(S128Or) \
V(S128Xor)
-#define SIMD_UNOP_LIST(V) V(S128Not)
+#define SIMD_UNOP_LIST(V) \
+ V(I32x4Neg) \
+ V(I16x8Neg) \
+ V(I8x16Neg) \
+ V(S128Not)
#define SIMD_SHIFT_OPCODES(V) \
V(I32x4Shl) \
@@ -2519,6 +2517,11 @@ VISIT_ATOMIC_BINOP(Xor)
V(I16x8ShrS) \
V(I16x8ShrU)
+void InstructionSelector::VisitS128Zero(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64S128Zero, g.DefineAsRegister(node), g.DefineAsRegister(node));
+}
+
#define VISIT_SIMD_SPLAT(Type) \
void InstructionSelector::Visit##Type##Splat(Node* node) { \
X64OperandGenerator g(this); \
@@ -2549,14 +2552,6 @@ SIMD_TYPES(VISIT_SIMD_EXTRACT_LANE)
SIMD_TYPES(VISIT_SIMD_REPLACE_LANE)
#undef VISIT_SIMD_REPLACE_LANE
-#define SIMD_VISIT_ZERO_OP(Name) \
- void InstructionSelector::Visit##Name(Node* node) { \
- X64OperandGenerator g(this); \
- Emit(kX64S128Zero, g.DefineAsRegister(node), g.DefineAsRegister(node)); \
- }
-SIMD_ZERO_OP_LIST(SIMD_VISIT_ZERO_OP)
-#undef SIMD_VISIT_ZERO_OP
-
#define VISIT_SIMD_SHIFT(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
X64OperandGenerator g(this); \
@@ -2585,15 +2580,12 @@ SIMD_UNOP_LIST(VISIT_SIMD_UNOP)
SIMD_BINOP_LIST(VISIT_SIMD_BINOP)
#undef VISIT_SIMD_BINOP
-#define SIMD_VISIT_SELECT_OP(format) \
- void InstructionSelector::VisitS##format##Select(Node* node) { \
- X64OperandGenerator g(this); \
- Emit(kX64S128Select, g.DefineSameAsFirst(node), \
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), \
- g.UseRegister(node->InputAt(2))); \
- }
-SIMD_FORMAT_LIST(SIMD_VISIT_SELECT_OP)
-#undef SIMD_VISIT_SELECT_OP
+void InstructionSelector::VisitS128Select(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64S128Select, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(2)));
+}
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
UNREACHABLE();
diff --git a/deps/v8/src/compiler/x87/OWNERS b/deps/v8/src/compiler/x87/OWNERS
deleted file mode 100644
index 61245ae8e2..0000000000
--- a/deps/v8/src/compiler/x87/OWNERS
+++ /dev/null
@@ -1,2 +0,0 @@
-weiliang.lin@intel.com
-chunyang.dai@intel.com
diff --git a/deps/v8/src/compiler/x87/code-generator-x87.cc b/deps/v8/src/compiler/x87/code-generator-x87.cc
deleted file mode 100644
index 32f1019cd2..0000000000
--- a/deps/v8/src/compiler/x87/code-generator-x87.cc
+++ /dev/null
@@ -1,2772 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/code-generator.h"
-
-#include "src/compilation-info.h"
-#include "src/compiler/code-generator-impl.h"
-#include "src/compiler/gap-resolver.h"
-#include "src/compiler/node-matchers.h"
-#include "src/compiler/osr.h"
-#include "src/frames.h"
-#include "src/x87/assembler-x87.h"
-#include "src/x87/frames-x87.h"
-#include "src/x87/macro-assembler-x87.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-#define __ masm()->
-
-
-// Adds X87 specific methods for decoding operands.
-class X87OperandConverter : public InstructionOperandConverter {
- public:
- X87OperandConverter(CodeGenerator* gen, Instruction* instr)
- : InstructionOperandConverter(gen, instr) {}
-
- Operand InputOperand(size_t index, int extra = 0) {
- return ToOperand(instr_->InputAt(index), extra);
- }
-
- Immediate InputImmediate(size_t index) {
- return ToImmediate(instr_->InputAt(index));
- }
-
- Operand OutputOperand() { return ToOperand(instr_->Output()); }
-
- Operand ToOperand(InstructionOperand* op, int extra = 0) {
- if (op->IsRegister()) {
- DCHECK(extra == 0);
- return Operand(ToRegister(op));
- }
- DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
- return SlotToOperand(AllocatedOperand::cast(op)->index(), extra);
- }
-
- Operand SlotToOperand(int slot, int extra = 0) {
- FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
- return Operand(offset.from_stack_pointer() ? esp : ebp,
- offset.offset() + extra);
- }
-
- Operand HighOperand(InstructionOperand* op) {
- DCHECK(op->IsFPStackSlot());
- return ToOperand(op, kPointerSize);
- }
-
- Immediate ToImmediate(InstructionOperand* operand) {
- Constant constant = ToConstant(operand);
- if (constant.type() == Constant::kInt32 &&
- RelocInfo::IsWasmReference(constant.rmode())) {
- return Immediate(reinterpret_cast<Address>(constant.ToInt32()),
- constant.rmode());
- }
- switch (constant.type()) {
- case Constant::kInt32:
- return Immediate(constant.ToInt32());
- case Constant::kFloat32:
- return Immediate(
- isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
- case Constant::kFloat64:
- return Immediate(
- isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
- case Constant::kExternalReference:
- return Immediate(constant.ToExternalReference());
- case Constant::kHeapObject:
- return Immediate(constant.ToHeapObject());
- case Constant::kInt64:
- break;
- case Constant::kRpoNumber:
- return Immediate::CodeRelativeOffset(ToLabel(operand));
- }
- UNREACHABLE();
- return Immediate(-1);
- }
-
- static size_t NextOffset(size_t* offset) {
- size_t i = *offset;
- (*offset)++;
- return i;
- }
-
- static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
- STATIC_ASSERT(0 == static_cast<int>(times_1));
- STATIC_ASSERT(1 == static_cast<int>(times_2));
- STATIC_ASSERT(2 == static_cast<int>(times_4));
- STATIC_ASSERT(3 == static_cast<int>(times_8));
- int scale = static_cast<int>(mode - one);
- DCHECK(scale >= 0 && scale < 4);
- return static_cast<ScaleFactor>(scale);
- }
-
- Operand MemoryOperand(size_t* offset) {
- AddressingMode mode = AddressingModeField::decode(instr_->opcode());
- switch (mode) {
- case kMode_MR: {
- Register base = InputRegister(NextOffset(offset));
- int32_t disp = 0;
- return Operand(base, disp);
- }
- case kMode_MRI: {
- Register base = InputRegister(NextOffset(offset));
- Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
- return Operand(base, ctant.ToInt32(), ctant.rmode());
- }
- case kMode_MR1:
- case kMode_MR2:
- case kMode_MR4:
- case kMode_MR8: {
- Register base = InputRegister(NextOffset(offset));
- Register index = InputRegister(NextOffset(offset));
- ScaleFactor scale = ScaleFor(kMode_MR1, mode);
- int32_t disp = 0;
- return Operand(base, index, scale, disp);
- }
- case kMode_MR1I:
- case kMode_MR2I:
- case kMode_MR4I:
- case kMode_MR8I: {
- Register base = InputRegister(NextOffset(offset));
- Register index = InputRegister(NextOffset(offset));
- ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
- Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
- return Operand(base, index, scale, ctant.ToInt32(), ctant.rmode());
- }
- case kMode_M1:
- case kMode_M2:
- case kMode_M4:
- case kMode_M8: {
- Register index = InputRegister(NextOffset(offset));
- ScaleFactor scale = ScaleFor(kMode_M1, mode);
- int32_t disp = 0;
- return Operand(index, scale, disp);
- }
- case kMode_M1I:
- case kMode_M2I:
- case kMode_M4I:
- case kMode_M8I: {
- Register index = InputRegister(NextOffset(offset));
- ScaleFactor scale = ScaleFor(kMode_M1I, mode);
- Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
- return Operand(index, scale, ctant.ToInt32(), ctant.rmode());
- }
- case kMode_MI: {
- Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
- return Operand(ctant.ToInt32(), ctant.rmode());
- }
- case kMode_None:
- UNREACHABLE();
- return Operand(no_reg, 0);
- }
- UNREACHABLE();
- return Operand(no_reg, 0);
- }
-
- Operand MemoryOperand(size_t first_input = 0) {
- return MemoryOperand(&first_input);
- }
-};
-
-
-namespace {
-
-bool HasImmediateInput(Instruction* instr, size_t index) {
- return instr->InputAt(index)->IsImmediate();
-}
-
-
-class OutOfLineLoadInteger final : public OutOfLineCode {
- public:
- OutOfLineLoadInteger(CodeGenerator* gen, Register result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final { __ xor_(result_, result_); }
-
- private:
- Register const result_;
-};
-
-class OutOfLineLoadFloat32NaN final : public OutOfLineCode {
- public:
- OutOfLineLoadFloat32NaN(CodeGenerator* gen, X87Register result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- DCHECK(result_.code() == 0);
- USE(result_);
- __ fstp(0);
- __ push(Immediate(0xffc00000));
- __ fld_s(MemOperand(esp, 0));
- __ lea(esp, Operand(esp, kFloatSize));
- }
-
- private:
- X87Register const result_;
-};
-
-class OutOfLineLoadFloat64NaN final : public OutOfLineCode {
- public:
- OutOfLineLoadFloat64NaN(CodeGenerator* gen, X87Register result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- DCHECK(result_.code() == 0);
- USE(result_);
- __ fstp(0);
- __ push(Immediate(0xfff80000));
- __ push(Immediate(0x00000000));
- __ fld_d(MemOperand(esp, 0));
- __ lea(esp, Operand(esp, kDoubleSize));
- }
-
- private:
- X87Register const result_;
-};
-
-class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
- public:
- OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
- X87Register input)
- : OutOfLineCode(gen), result_(result), input_(input) {}
-
- void Generate() final {
- UNIMPLEMENTED();
- USE(result_);
- USE(input_);
- }
-
- private:
- Register const result_;
- X87Register const input_;
-};
-
-
-class OutOfLineRecordWrite final : public OutOfLineCode {
- public:
- OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand operand,
- Register value, Register scratch0, Register scratch1,
- RecordWriteMode mode)
- : OutOfLineCode(gen),
- object_(object),
- operand_(operand),
- value_(value),
- scratch0_(scratch0),
- scratch1_(scratch1),
- mode_(mode) {}
-
- void Generate() final {
- if (mode_ > RecordWriteMode::kValueIsPointer) {
- __ JumpIfSmi(value_, exit());
- }
- __ CheckPageFlag(value_, scratch0_,
- MemoryChunk::kPointersToHereAreInterestingMask, zero,
- exit());
- RememberedSetAction const remembered_set_action =
- mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
- : OMIT_REMEMBERED_SET;
- SaveFPRegsMode const save_fp_mode =
- frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
- remembered_set_action, save_fp_mode);
- __ lea(scratch1_, operand_);
- __ CallStub(&stub);
- }
-
- private:
- Register const object_;
- Operand const operand_;
- Register const value_;
- Register const scratch0_;
- Register const scratch1_;
- RecordWriteMode const mode_;
-};
-
-} // namespace
-
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, OutOfLineLoadNaN) \
- do { \
- auto result = i.OutputDoubleRegister(); \
- auto offset = i.InputRegister(0); \
- DCHECK(result.code() == 0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- OutOfLineCode* ool = new (zone()) OutOfLineLoadNaN(this, result); \
- __ j(above_equal, ool->entry()); \
- __ fstp(0); \
- __ asm_instr(i.MemoryOperand(2)); \
- __ bind(ool->exit()); \
- } while (false)
-
-#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
- do { \
- auto result = i.OutputRegister(); \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- OutOfLineCode* ool = new (zone()) OutOfLineLoadInteger(this, result); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(result, i.MemoryOperand(2)); \
- __ bind(ool->exit()); \
- } while (false)
-
-
-#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
- do { \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- Label done; \
- DCHECK(i.InputDoubleRegister(2).code() == 0); \
- __ j(above_equal, &done, Label::kNear); \
- __ asm_instr(i.MemoryOperand(3)); \
- __ bind(&done); \
- } while (false)
-
-
-#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
- do { \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- Label done; \
- __ j(above_equal, &done, Label::kNear); \
- if (instr->InputAt(2)->IsRegister()) { \
- __ asm_instr(i.MemoryOperand(3), i.InputRegister(2)); \
- } else { \
- __ asm_instr(i.MemoryOperand(3), i.InputImmediate(2)); \
- } \
- __ bind(&done); \
- } while (false)
-
-#define ASSEMBLE_COMPARE(asm_instr) \
- do { \
- if (AddressingModeField::decode(instr->opcode()) != kMode_None) { \
- size_t index = 0; \
- Operand left = i.MemoryOperand(&index); \
- if (HasImmediateInput(instr, index)) { \
- __ asm_instr(left, i.InputImmediate(index)); \
- } else { \
- __ asm_instr(left, i.InputRegister(index)); \
- } \
- } else { \
- if (HasImmediateInput(instr, 1)) { \
- if (instr->InputAt(0)->IsRegister()) { \
- __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
- } else { \
- __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \
- } \
- } else { \
- if (instr->InputAt(1)->IsRegister()) { \
- __ asm_instr(i.InputRegister(0), i.InputRegister(1)); \
- } else { \
- __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \
- } \
- } \
- } \
- } while (0)
-
-#define ASSEMBLE_IEEE754_BINOP(name) \
- do { \
- /* Saves the esp into ebx */ \
- __ push(ebx); \
- __ mov(ebx, esp); \
- /* Pass one double as argument on the stack. */ \
- __ PrepareCallCFunction(4, eax); \
- __ fstp(0); \
- /* Load first operand from original stack */ \
- __ fld_d(MemOperand(ebx, 4 + kDoubleSize)); \
- /* Put first operand into stack for function call */ \
- __ fstp_d(Operand(esp, 0 * kDoubleSize)); \
- /* Load second operand from original stack */ \
- __ fld_d(MemOperand(ebx, 4)); \
- /* Put second operand into stack for function call */ \
- __ fstp_d(Operand(esp, 1 * kDoubleSize)); \
- __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
- 4); \
- /* Restore the ebx */ \
- __ pop(ebx); \
- /* Return value is in st(0) on x87. */ \
- __ lea(esp, Operand(esp, 2 * kDoubleSize)); \
- } while (false)
-
-#define ASSEMBLE_IEEE754_UNOP(name) \
- do { \
- /* Saves the esp into ebx */ \
- __ push(ebx); \
- __ mov(ebx, esp); \
- /* Pass one double as argument on the stack. */ \
- __ PrepareCallCFunction(2, eax); \
- __ fstp(0); \
- /* Load operand from original stack */ \
- __ fld_d(MemOperand(ebx, 4)); \
- /* Put operand into stack for function call */ \
- __ fstp_d(Operand(esp, 0)); \
- __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
- 2); \
- /* Restore the ebx */ \
- __ pop(ebx); \
- /* Return value is in st(0) on x87. */ \
- __ lea(esp, Operand(esp, kDoubleSize)); \
- } while (false)
-
-void CodeGenerator::AssembleDeconstructFrame() {
- __ mov(esp, ebp);
- __ pop(ebp);
-}
-
-void CodeGenerator::AssemblePrepareTailCall() {
- if (frame_access_state()->has_frame()) {
- __ mov(ebp, MemOperand(ebp, 0));
- }
- frame_access_state()->SetFrameAccessToSP();
-}
-
-void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
- Register, Register,
- Register) {
- // There are not enough temp registers left on ia32 for a call instruction
- // so we pick some scratch registers and save/restore them manually here.
- int scratch_count = 3;
- Register scratch1 = ebx;
- Register scratch2 = ecx;
- Register scratch3 = edx;
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Label done;
-
- // Check if current frame is an arguments adaptor frame.
- __ cmp(Operand(ebp, StandardFrameConstants::kContextOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &done, Label::kNear);
-
- __ push(scratch1);
- __ push(scratch2);
- __ push(scratch3);
-
- // Load arguments count from current arguments adaptor frame (note, it
- // does not include receiver).
- Register caller_args_count_reg = scratch1;
- __ mov(caller_args_count_reg,
- Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
-
- ParameterCount callee_args_count(args_reg);
- __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
- scratch3, ReturnAddressState::kOnStack, scratch_count);
- __ pop(scratch3);
- __ pop(scratch2);
- __ pop(scratch1);
-
- __ bind(&done);
-}
-
-namespace {
-
-void AdjustStackPointerForTailCall(MacroAssembler* masm,
- FrameAccessState* state,
- int new_slot_above_sp,
- bool allow_shrinkage = true) {
- int current_sp_offset = state->GetSPToFPSlotCount() +
- StandardFrameConstants::kFixedSlotCountAboveFp;
- int stack_slot_delta = new_slot_above_sp - current_sp_offset;
- if (stack_slot_delta > 0) {
- masm->sub(esp, Immediate(stack_slot_delta * kPointerSize));
- state->IncreaseSPDelta(stack_slot_delta);
- } else if (allow_shrinkage && stack_slot_delta < 0) {
- masm->add(esp, Immediate(-stack_slot_delta * kPointerSize));
- state->IncreaseSPDelta(stack_slot_delta);
- }
-}
-
-} // namespace
-
-void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
- int first_unused_stack_slot) {
- CodeGenerator::PushTypeFlags flags(kImmediatePush | kScalarPush);
- ZoneVector<MoveOperands*> pushes(zone());
- GetPushCompatibleMoves(instr, flags, &pushes);
-
- if (!pushes.empty() &&
- (LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
- first_unused_stack_slot)) {
- X87OperandConverter g(this, instr);
- for (auto move : pushes) {
- LocationOperand destination_location(
- LocationOperand::cast(move->destination()));
- InstructionOperand source(move->source());
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
- destination_location.index());
- if (source.IsStackSlot()) {
- LocationOperand source_location(LocationOperand::cast(source));
- __ push(g.SlotToOperand(source_location.index()));
- } else if (source.IsRegister()) {
- LocationOperand source_location(LocationOperand::cast(source));
- __ push(source_location.GetRegister());
- } else if (source.IsImmediate()) {
- __ push(Immediate(ImmediateOperand::cast(source).inline_value()));
- } else {
- // Pushes of non-scalar data types is not supported.
- UNIMPLEMENTED();
- }
- frame_access_state()->IncreaseSPDelta(1);
- move->Eliminate();
- }
- }
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
- first_unused_stack_slot, false);
-}
-
-void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
- int first_unused_stack_slot) {
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
- first_unused_stack_slot);
-}
-
-// Assembles an instruction after register allocation, producing machine code.
-CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
- Instruction* instr) {
- X87OperandConverter i(this, instr);
- InstructionCode opcode = instr->opcode();
- ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
-
- switch (arch_opcode) {
- case kArchCallCodeObject: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- EnsureSpaceForLazyDeopt();
- if (HasImmediateInput(instr, 0)) {
- Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
- __ call(code, RelocInfo::CODE_TARGET);
- } else {
- Register reg = i.InputRegister(0);
- __ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ call(reg);
- }
- RecordCallPosition(instr);
- bool double_result =
- instr->HasOutput() && instr->Output()->IsFPRegister();
- if (double_result) {
- __ lea(esp, Operand(esp, -kDoubleSize));
- __ fstp_d(Operand(esp, 0));
- }
- __ fninit();
- if (double_result) {
- __ fld_d(Operand(esp, 0));
- __ lea(esp, Operand(esp, kDoubleSize));
- } else {
- __ fld1();
- }
- frame_access_state()->ClearSPDelta();
- break;
- }
- case kArchTailCallCodeObjectFromJSFunction:
- case kArchTailCallCodeObject: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
- AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
- no_reg, no_reg, no_reg);
- }
- if (HasImmediateInput(instr, 0)) {
- Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
- __ jmp(code, RelocInfo::CODE_TARGET);
- } else {
- Register reg = i.InputRegister(0);
- __ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(reg);
- }
- frame_access_state()->ClearSPDelta();
- frame_access_state()->SetFrameAccessToDefault();
- break;
- }
- case kArchTailCallAddress: {
- CHECK(!HasImmediateInput(instr, 0));
- Register reg = i.InputRegister(0);
- __ jmp(reg);
- frame_access_state()->ClearSPDelta();
- frame_access_state()->SetFrameAccessToDefault();
- break;
- }
- case kArchCallJSFunction: {
- EnsureSpaceForLazyDeopt();
- Register func = i.InputRegister(0);
- if (FLAG_debug_code) {
- // Check the function's context matches the context argument.
- __ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
- __ Assert(equal, kWrongFunctionContext);
- }
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ call(FieldOperand(func, JSFunction::kCodeEntryOffset));
- RecordCallPosition(instr);
- bool double_result =
- instr->HasOutput() && instr->Output()->IsFPRegister();
- if (double_result) {
- __ lea(esp, Operand(esp, -kDoubleSize));
- __ fstp_d(Operand(esp, 0));
- }
- __ fninit();
- if (double_result) {
- __ fld_d(Operand(esp, 0));
- __ lea(esp, Operand(esp, kDoubleSize));
- } else {
- __ fld1();
- }
- frame_access_state()->ClearSPDelta();
- break;
- }
- case kArchTailCallJSFunctionFromJSFunction: {
- Register func = i.InputRegister(0);
- if (FLAG_debug_code) {
- // Check the function's context matches the context argument.
- __ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
- __ Assert(equal, kWrongFunctionContext);
- }
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister, no_reg,
- no_reg, no_reg);
- __ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
- frame_access_state()->ClearSPDelta();
- frame_access_state()->SetFrameAccessToDefault();
- break;
- }
- case kArchPrepareCallCFunction: {
- // Frame alignment requires using FP-relative frame addressing.
- frame_access_state()->SetFrameAccessToFP();
- int const num_parameters = MiscField::decode(instr->opcode());
- __ PrepareCallCFunction(num_parameters, i.TempRegister(0));
- break;
- }
- case kArchPrepareTailCall:
- AssemblePrepareTailCall();
- break;
- case kArchCallCFunction: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- int const num_parameters = MiscField::decode(instr->opcode());
- if (HasImmediateInput(instr, 0)) {
- ExternalReference ref = i.InputExternalReference(0);
- __ CallCFunction(ref, num_parameters);
- } else {
- Register func = i.InputRegister(0);
- __ CallCFunction(func, num_parameters);
- }
- bool double_result =
- instr->HasOutput() && instr->Output()->IsFPRegister();
- if (double_result) {
- __ lea(esp, Operand(esp, -kDoubleSize));
- __ fstp_d(Operand(esp, 0));
- }
- __ fninit();
- if (double_result) {
- __ fld_d(Operand(esp, 0));
- __ lea(esp, Operand(esp, kDoubleSize));
- } else {
- __ fld1();
- }
- frame_access_state()->SetFrameAccessToDefault();
- frame_access_state()->ClearSPDelta();
- break;
- }
- case kArchJmp:
- AssembleArchJump(i.InputRpo(0));
- break;
- case kArchLookupSwitch:
- AssembleArchLookupSwitch(instr);
- break;
- case kArchTableSwitch:
- AssembleArchTableSwitch(instr);
- break;
- case kArchComment: {
- Address comment_string = i.InputExternalReference(0).address();
- __ RecordComment(reinterpret_cast<const char*>(comment_string));
- break;
- }
- case kArchDebugBreak:
- __ int3();
- break;
- case kArchNop:
- case kArchThrowTerminator:
- // don't emit code for nops.
- break;
- case kArchDeoptimize: {
- int deopt_state_id =
- BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- int double_register_param_count = 0;
- int x87_layout = 0;
- for (size_t i = 0; i < instr->InputCount(); i++) {
- if (instr->InputAt(i)->IsFPRegister()) {
- double_register_param_count++;
- }
- }
- // Currently we use only one X87 register. If double_register_param_count
- // is bigger than 1, it means duplicated double register is added to input
- // of this instruction.
- if (double_register_param_count > 0) {
- x87_layout = (0 << 3) | 1;
- }
- // The layout of x87 register stack is loaded on the top of FPU register
- // stack for deoptimization.
- __ push(Immediate(x87_layout));
- __ fild_s(MemOperand(esp, 0));
- __ lea(esp, Operand(esp, kPointerSize));
-
- CodeGenResult result =
- AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
- if (result != kSuccess) return result;
- break;
- }
- case kArchRet:
- AssembleReturn(instr->InputAt(0));
- break;
- case kArchFramePointer:
- __ mov(i.OutputRegister(), ebp);
- break;
- case kArchStackPointer:
- __ mov(i.OutputRegister(), esp);
- break;
- case kArchParentFramePointer:
- if (frame_access_state()->has_frame()) {
- __ mov(i.OutputRegister(), Operand(ebp, 0));
- } else {
- __ mov(i.OutputRegister(), ebp);
- }
- break;
- case kArchTruncateDoubleToI: {
- if (!instr->InputAt(0)->IsFPRegister()) {
- __ fld_d(i.InputOperand(0));
- }
- __ TruncateX87TOSToI(i.OutputRegister());
- if (!instr->InputAt(0)->IsFPRegister()) {
- __ fstp(0);
- }
- break;
- }
- case kArchStoreWithWriteBarrier: {
- RecordWriteMode mode =
- static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
- Register object = i.InputRegister(0);
- size_t index = 0;
- Operand operand = i.MemoryOperand(&index);
- Register value = i.InputRegister(index);
- Register scratch0 = i.TempRegister(0);
- Register scratch1 = i.TempRegister(1);
- auto ool = new (zone()) OutOfLineRecordWrite(this, object, operand, value,
- scratch0, scratch1, mode);
- __ mov(operand, value);
- __ CheckPageFlag(object, scratch0,
- MemoryChunk::kPointersFromHereAreInterestingMask,
- not_zero, ool->entry());
- __ bind(ool->exit());
- break;
- }
- case kArchStackSlot: {
- FrameOffset offset =
- frame_access_state()->GetFrameOffset(i.InputInt32(0));
- Register base;
- if (offset.from_stack_pointer()) {
- base = esp;
- } else {
- base = ebp;
- }
- __ lea(i.OutputRegister(), Operand(base, offset.offset()));
- break;
- }
- case kIeee754Float64Acos:
- ASSEMBLE_IEEE754_UNOP(acos);
- break;
- case kIeee754Float64Acosh:
- ASSEMBLE_IEEE754_UNOP(acosh);
- break;
- case kIeee754Float64Asin:
- ASSEMBLE_IEEE754_UNOP(asin);
- break;
- case kIeee754Float64Asinh:
- ASSEMBLE_IEEE754_UNOP(asinh);
- break;
- case kIeee754Float64Atan:
- ASSEMBLE_IEEE754_UNOP(atan);
- break;
- case kIeee754Float64Atanh:
- ASSEMBLE_IEEE754_UNOP(atanh);
- break;
- case kIeee754Float64Atan2:
- ASSEMBLE_IEEE754_BINOP(atan2);
- break;
- case kIeee754Float64Cbrt:
- ASSEMBLE_IEEE754_UNOP(cbrt);
- break;
- case kIeee754Float64Cos:
- __ X87SetFPUCW(0x027F);
- ASSEMBLE_IEEE754_UNOP(cos);
- __ X87SetFPUCW(0x037F);
- break;
- case kIeee754Float64Cosh:
- ASSEMBLE_IEEE754_UNOP(cosh);
- break;
- case kIeee754Float64Expm1:
- __ X87SetFPUCW(0x027F);
- ASSEMBLE_IEEE754_UNOP(expm1);
- __ X87SetFPUCW(0x037F);
- break;
- case kIeee754Float64Exp:
- ASSEMBLE_IEEE754_UNOP(exp);
- break;
- case kIeee754Float64Log:
- ASSEMBLE_IEEE754_UNOP(log);
- break;
- case kIeee754Float64Log1p:
- ASSEMBLE_IEEE754_UNOP(log1p);
- break;
- case kIeee754Float64Log2:
- ASSEMBLE_IEEE754_UNOP(log2);
- break;
- case kIeee754Float64Log10:
- ASSEMBLE_IEEE754_UNOP(log10);
- break;
- case kIeee754Float64Pow: {
- // Keep the x87 FPU stack empty before calling stub code
- __ fstp(0);
- // Call the MathStub and put return value in stX_0
- MathPowStub stub(isolate(), MathPowStub::DOUBLE);
- __ CallStub(&stub);
- /* Return value is in st(0) on x87. */
- __ lea(esp, Operand(esp, 2 * kDoubleSize));
- break;
- }
- case kIeee754Float64Sin:
- __ X87SetFPUCW(0x027F);
- ASSEMBLE_IEEE754_UNOP(sin);
- __ X87SetFPUCW(0x037F);
- break;
- case kIeee754Float64Sinh:
- ASSEMBLE_IEEE754_UNOP(sinh);
- break;
- case kIeee754Float64Tan:
- __ X87SetFPUCW(0x027F);
- ASSEMBLE_IEEE754_UNOP(tan);
- __ X87SetFPUCW(0x037F);
- break;
- case kIeee754Float64Tanh:
- ASSEMBLE_IEEE754_UNOP(tanh);
- break;
- case kX87Add:
- if (HasImmediateInput(instr, 1)) {
- __ add(i.InputOperand(0), i.InputImmediate(1));
- } else {
- __ add(i.InputRegister(0), i.InputOperand(1));
- }
- break;
- case kX87And:
- if (HasImmediateInput(instr, 1)) {
- __ and_(i.InputOperand(0), i.InputImmediate(1));
- } else {
- __ and_(i.InputRegister(0), i.InputOperand(1));
- }
- break;
- case kX87Cmp:
- ASSEMBLE_COMPARE(cmp);
- break;
- case kX87Cmp16:
- ASSEMBLE_COMPARE(cmpw);
- break;
- case kX87Cmp8:
- ASSEMBLE_COMPARE(cmpb);
- break;
- case kX87Test:
- ASSEMBLE_COMPARE(test);
- break;
- case kX87Test16:
- ASSEMBLE_COMPARE(test_w);
- break;
- case kX87Test8:
- ASSEMBLE_COMPARE(test_b);
- break;
- case kX87Imul:
- if (HasImmediateInput(instr, 1)) {
- __ imul(i.OutputRegister(), i.InputOperand(0), i.InputInt32(1));
- } else {
- __ imul(i.OutputRegister(), i.InputOperand(1));
- }
- break;
- case kX87ImulHigh:
- __ imul(i.InputRegister(1));
- break;
- case kX87UmulHigh:
- __ mul(i.InputRegister(1));
- break;
- case kX87Idiv:
- __ cdq();
- __ idiv(i.InputOperand(1));
- break;
- case kX87Udiv:
- __ Move(edx, Immediate(0));
- __ div(i.InputOperand(1));
- break;
- case kX87Not:
- __ not_(i.OutputOperand());
- break;
- case kX87Neg:
- __ neg(i.OutputOperand());
- break;
- case kX87Or:
- if (HasImmediateInput(instr, 1)) {
- __ or_(i.InputOperand(0), i.InputImmediate(1));
- } else {
- __ or_(i.InputRegister(0), i.InputOperand(1));
- }
- break;
- case kX87Xor:
- if (HasImmediateInput(instr, 1)) {
- __ xor_(i.InputOperand(0), i.InputImmediate(1));
- } else {
- __ xor_(i.InputRegister(0), i.InputOperand(1));
- }
- break;
- case kX87Sub:
- if (HasImmediateInput(instr, 1)) {
- __ sub(i.InputOperand(0), i.InputImmediate(1));
- } else {
- __ sub(i.InputRegister(0), i.InputOperand(1));
- }
- break;
- case kX87Shl:
- if (HasImmediateInput(instr, 1)) {
- __ shl(i.OutputOperand(), i.InputInt5(1));
- } else {
- __ shl_cl(i.OutputOperand());
- }
- break;
- case kX87Shr:
- if (HasImmediateInput(instr, 1)) {
- __ shr(i.OutputOperand(), i.InputInt5(1));
- } else {
- __ shr_cl(i.OutputOperand());
- }
- break;
- case kX87Sar:
- if (HasImmediateInput(instr, 1)) {
- __ sar(i.OutputOperand(), i.InputInt5(1));
- } else {
- __ sar_cl(i.OutputOperand());
- }
- break;
- case kX87AddPair: {
- // i.OutputRegister(0) == i.InputRegister(0) ... left low word.
- // i.InputRegister(1) ... left high word.
- // i.InputRegister(2) ... right low word.
- // i.InputRegister(3) ... right high word.
- bool use_temp = false;
- if (i.OutputRegister(0).code() == i.InputRegister(1).code() ||
- i.OutputRegister(0).code() == i.InputRegister(3).code()) {
- // We cannot write to the output register directly, because it would
- // overwrite an input for adc. We have to use the temp register.
- use_temp = true;
- __ Move(i.TempRegister(0), i.InputRegister(0));
- __ add(i.TempRegister(0), i.InputRegister(2));
- } else {
- __ add(i.OutputRegister(0), i.InputRegister(2));
- }
- if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
- __ Move(i.OutputRegister(1), i.InputRegister(1));
- }
- __ adc(i.OutputRegister(1), Operand(i.InputRegister(3)));
- if (use_temp) {
- __ Move(i.OutputRegister(0), i.TempRegister(0));
- }
- break;
- }
- case kX87SubPair: {
- // i.OutputRegister(0) == i.InputRegister(0) ... left low word.
- // i.InputRegister(1) ... left high word.
- // i.InputRegister(2) ... right low word.
- // i.InputRegister(3) ... right high word.
- bool use_temp = false;
- if (i.OutputRegister(0).code() == i.InputRegister(1).code() ||
- i.OutputRegister(0).code() == i.InputRegister(3).code()) {
- // We cannot write to the output register directly, because it would
- // overwrite an input for adc. We have to use the temp register.
- use_temp = true;
- __ Move(i.TempRegister(0), i.InputRegister(0));
- __ sub(i.TempRegister(0), i.InputRegister(2));
- } else {
- __ sub(i.OutputRegister(0), i.InputRegister(2));
- }
- if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
- __ Move(i.OutputRegister(1), i.InputRegister(1));
- }
- __ sbb(i.OutputRegister(1), Operand(i.InputRegister(3)));
- if (use_temp) {
- __ Move(i.OutputRegister(0), i.TempRegister(0));
- }
- break;
- }
- case kX87MulPair: {
- __ imul(i.OutputRegister(1), i.InputOperand(0));
- __ mov(i.TempRegister(0), i.InputOperand(1));
- __ imul(i.TempRegister(0), i.InputOperand(2));
- __ add(i.OutputRegister(1), i.TempRegister(0));
- __ mov(i.OutputRegister(0), i.InputOperand(0));
- // Multiplies the low words and stores them in eax and edx.
- __ mul(i.InputRegister(2));
- __ add(i.OutputRegister(1), i.TempRegister(0));
-
- break;
- }
- case kX87ShlPair:
- if (HasImmediateInput(instr, 2)) {
- __ ShlPair(i.InputRegister(1), i.InputRegister(0), i.InputInt6(2));
- } else {
- // Shift has been loaded into CL by the register allocator.
- __ ShlPair_cl(i.InputRegister(1), i.InputRegister(0));
- }
- break;
- case kX87ShrPair:
- if (HasImmediateInput(instr, 2)) {
- __ ShrPair(i.InputRegister(1), i.InputRegister(0), i.InputInt6(2));
- } else {
- // Shift has been loaded into CL by the register allocator.
- __ ShrPair_cl(i.InputRegister(1), i.InputRegister(0));
- }
- break;
- case kX87SarPair:
- if (HasImmediateInput(instr, 2)) {
- __ SarPair(i.InputRegister(1), i.InputRegister(0), i.InputInt6(2));
- } else {
- // Shift has been loaded into CL by the register allocator.
- __ SarPair_cl(i.InputRegister(1), i.InputRegister(0));
- }
- break;
- case kX87Ror:
- if (HasImmediateInput(instr, 1)) {
- __ ror(i.OutputOperand(), i.InputInt5(1));
- } else {
- __ ror_cl(i.OutputOperand());
- }
- break;
- case kX87Lzcnt:
- __ Lzcnt(i.OutputRegister(), i.InputOperand(0));
- break;
- case kX87Popcnt:
- __ Popcnt(i.OutputRegister(), i.InputOperand(0));
- break;
- case kX87LoadFloat64Constant: {
- InstructionOperand* source = instr->InputAt(0);
- InstructionOperand* destination = instr->Output();
- DCHECK(source->IsConstant());
- X87OperandConverter g(this, nullptr);
- Constant src_constant = g.ToConstant(source);
-
- DCHECK_EQ(Constant::kFloat64, src_constant.type());
- uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
- uint32_t lower = static_cast<uint32_t>(src);
- uint32_t upper = static_cast<uint32_t>(src >> 32);
- if (destination->IsFPRegister()) {
- __ sub(esp, Immediate(kDoubleSize));
- __ mov(MemOperand(esp, 0), Immediate(lower));
- __ mov(MemOperand(esp, kInt32Size), Immediate(upper));
- __ fstp(0);
- __ fld_d(MemOperand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
- } else {
- UNREACHABLE();
- }
- break;
- }
- case kX87Float32Cmp: {
- __ fld_s(MemOperand(esp, kFloatSize));
- __ fld_s(MemOperand(esp, 0));
- __ FCmp();
- __ lea(esp, Operand(esp, 2 * kFloatSize));
- break;
- }
- case kX87Float32Add: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ X87SetFPUCW(0x027F);
- __ fstp(0);
- __ fld_s(MemOperand(esp, 0));
- __ fld_s(MemOperand(esp, kFloatSize));
- __ faddp();
- // Clear stack.
- __ lea(esp, Operand(esp, 2 * kFloatSize));
- // Restore the default value of control word.
- __ X87SetFPUCW(0x037F);
- break;
- }
- case kX87Float32Sub: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ X87SetFPUCW(0x027F);
- __ fstp(0);
- __ fld_s(MemOperand(esp, kFloatSize));
- __ fld_s(MemOperand(esp, 0));
- __ fsubp();
- // Clear stack.
- __ lea(esp, Operand(esp, 2 * kFloatSize));
- // Restore the default value of control word.
- __ X87SetFPUCW(0x037F);
- break;
- }
- case kX87Float32Mul: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ X87SetFPUCW(0x027F);
- __ fstp(0);
- __ fld_s(MemOperand(esp, kFloatSize));
- __ fld_s(MemOperand(esp, 0));
- __ fmulp();
- // Clear stack.
- __ lea(esp, Operand(esp, 2 * kFloatSize));
- // Restore the default value of control word.
- __ X87SetFPUCW(0x037F);
- break;
- }
- case kX87Float32Div: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ X87SetFPUCW(0x027F);
- __ fstp(0);
- __ fld_s(MemOperand(esp, kFloatSize));
- __ fld_s(MemOperand(esp, 0));
- __ fdivp();
- // Clear stack.
- __ lea(esp, Operand(esp, 2 * kFloatSize));
- // Restore the default value of control word.
- __ X87SetFPUCW(0x037F);
- break;
- }
-
- case kX87Float32Sqrt: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ fld_s(MemOperand(esp, 0));
- __ fsqrt();
- __ lea(esp, Operand(esp, kFloatSize));
- break;
- }
- case kX87Float32Abs: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ fld_s(MemOperand(esp, 0));
- __ fabs();
- __ lea(esp, Operand(esp, kFloatSize));
- break;
- }
- case kX87Float32Neg: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ fld_s(MemOperand(esp, 0));
- __ fchs();
- __ lea(esp, Operand(esp, kFloatSize));
- break;
- }
- case kX87Float32Round: {
- RoundingMode mode =
- static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
- // Set the correct round mode in x87 control register
- __ X87SetRC((mode << 10));
-
- if (!instr->InputAt(0)->IsFPRegister()) {
- InstructionOperand* input = instr->InputAt(0);
- USE(input);
- DCHECK(input->IsFPStackSlot());
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ fld_s(i.InputOperand(0));
- }
- __ frndint();
- __ X87SetRC(0x0000);
- break;
- }
- case kX87Float64Add: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ X87SetFPUCW(0x027F);
- __ fstp(0);
- __ fld_d(MemOperand(esp, 0));
- __ fld_d(MemOperand(esp, kDoubleSize));
- __ faddp();
- // Clear stack.
- __ lea(esp, Operand(esp, 2 * kDoubleSize));
- // Restore the default value of control word.
- __ X87SetFPUCW(0x037F);
- break;
- }
- case kX87Float64Sub: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ X87SetFPUCW(0x027F);
- __ fstp(0);
- __ fld_d(MemOperand(esp, kDoubleSize));
- __ fsub_d(MemOperand(esp, 0));
- // Clear stack.
- __ lea(esp, Operand(esp, 2 * kDoubleSize));
- // Restore the default value of control word.
- __ X87SetFPUCW(0x037F);
- break;
- }
- case kX87Float64Mul: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ X87SetFPUCW(0x027F);
- __ fstp(0);
- __ fld_d(MemOperand(esp, kDoubleSize));
- __ fmul_d(MemOperand(esp, 0));
- // Clear stack.
- __ lea(esp, Operand(esp, 2 * kDoubleSize));
- // Restore the default value of control word.
- __ X87SetFPUCW(0x037F);
- break;
- }
- case kX87Float64Div: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ X87SetFPUCW(0x027F);
- __ fstp(0);
- __ fld_d(MemOperand(esp, kDoubleSize));
- __ fdiv_d(MemOperand(esp, 0));
- // Clear stack.
- __ lea(esp, Operand(esp, 2 * kDoubleSize));
- // Restore the default value of control word.
- __ X87SetFPUCW(0x037F);
- break;
- }
- case kX87Float64Mod: {
- FrameScope frame_scope(&masm_, StackFrame::MANUAL);
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ mov(eax, esp);
- __ PrepareCallCFunction(4, eax);
- __ fstp(0);
- __ fld_d(MemOperand(eax, 0));
- __ fstp_d(Operand(esp, 1 * kDoubleSize));
- __ fld_d(MemOperand(eax, kDoubleSize));
- __ fstp_d(Operand(esp, 0));
- __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
- 4);
- __ lea(esp, Operand(esp, 2 * kDoubleSize));
- break;
- }
- case kX87Float32Max: {
- Label compare_swap, done_compare;
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ fld_s(MemOperand(esp, kFloatSize));
- __ fld_s(MemOperand(esp, 0));
- __ fld(1);
- __ fld(1);
- __ FCmp();
-
- auto ool =
- new (zone()) OutOfLineLoadFloat32NaN(this, i.OutputDoubleRegister());
- __ j(parity_even, ool->entry());
- __ j(below, &done_compare, Label::kNear);
- __ j(above, &compare_swap, Label::kNear);
- __ push(eax);
- __ lea(esp, Operand(esp, -kFloatSize));
- __ fld(1);
- __ fstp_s(Operand(esp, 0));
- __ mov(eax, MemOperand(esp, 0));
- __ and_(eax, Immediate(0x80000000));
- __ lea(esp, Operand(esp, kFloatSize));
- __ pop(eax);
- __ j(zero, &done_compare, Label::kNear);
-
- __ bind(&compare_swap);
- __ bind(ool->exit());
- __ fxch(1);
-
- __ bind(&done_compare);
- __ fstp(0);
- __ lea(esp, Operand(esp, 2 * kFloatSize));
- break;
- }
- case kX87Float64Max: {
- Label compare_swap, done_compare;
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ fld_d(MemOperand(esp, kDoubleSize));
- __ fld_d(MemOperand(esp, 0));
- __ fld(1);
- __ fld(1);
- __ FCmp();
-
- auto ool =
- new (zone()) OutOfLineLoadFloat64NaN(this, i.OutputDoubleRegister());
- __ j(parity_even, ool->entry());
- __ j(below, &done_compare, Label::kNear);
- __ j(above, &compare_swap, Label::kNear);
- __ push(eax);
- __ lea(esp, Operand(esp, -kDoubleSize));
- __ fld(1);
- __ fstp_d(Operand(esp, 0));
- __ mov(eax, MemOperand(esp, 4));
- __ and_(eax, Immediate(0x80000000));
- __ lea(esp, Operand(esp, kDoubleSize));
- __ pop(eax);
- __ j(zero, &done_compare, Label::kNear);
-
- __ bind(&compare_swap);
- __ bind(ool->exit());
- __ fxch(1);
-
- __ bind(&done_compare);
- __ fstp(0);
- __ lea(esp, Operand(esp, 2 * kDoubleSize));
- break;
- }
- case kX87Float32Min: {
- Label compare_swap, done_compare;
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ fld_s(MemOperand(esp, kFloatSize));
- __ fld_s(MemOperand(esp, 0));
- __ fld(1);
- __ fld(1);
- __ FCmp();
-
- auto ool =
- new (zone()) OutOfLineLoadFloat32NaN(this, i.OutputDoubleRegister());
- __ j(parity_even, ool->entry());
- __ j(above, &done_compare, Label::kNear);
- __ j(below, &compare_swap, Label::kNear);
- __ push(eax);
- __ lea(esp, Operand(esp, -kFloatSize));
- __ fld(0);
- __ fstp_s(Operand(esp, 0));
- __ mov(eax, MemOperand(esp, 0));
- __ and_(eax, Immediate(0x80000000));
- __ lea(esp, Operand(esp, kFloatSize));
- __ pop(eax);
- __ j(zero, &done_compare, Label::kNear);
-
- __ bind(&compare_swap);
- __ bind(ool->exit());
- __ fxch(1);
-
- __ bind(&done_compare);
- __ fstp(0);
- __ lea(esp, Operand(esp, 2 * kFloatSize));
- break;
- }
- case kX87Float64Min: {
- Label compare_swap, done_compare;
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ fld_d(MemOperand(esp, kDoubleSize));
- __ fld_d(MemOperand(esp, 0));
- __ fld(1);
- __ fld(1);
- __ FCmp();
-
- auto ool =
- new (zone()) OutOfLineLoadFloat64NaN(this, i.OutputDoubleRegister());
- __ j(parity_even, ool->entry());
- __ j(above, &done_compare, Label::kNear);
- __ j(below, &compare_swap, Label::kNear);
- __ push(eax);
- __ lea(esp, Operand(esp, -kDoubleSize));
- __ fld(0);
- __ fstp_d(Operand(esp, 0));
- __ mov(eax, MemOperand(esp, 4));
- __ and_(eax, Immediate(0x80000000));
- __ lea(esp, Operand(esp, kDoubleSize));
- __ pop(eax);
- __ j(zero, &done_compare, Label::kNear);
-
- __ bind(&compare_swap);
- __ bind(ool->exit());
- __ fxch(1);
-
- __ bind(&done_compare);
- __ fstp(0);
- __ lea(esp, Operand(esp, 2 * kDoubleSize));
- break;
- }
- case kX87Float64Abs: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ fld_d(MemOperand(esp, 0));
- __ fabs();
- __ lea(esp, Operand(esp, kDoubleSize));
- break;
- }
- case kX87Float64Neg: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ fld_d(MemOperand(esp, 0));
- __ fchs();
- __ lea(esp, Operand(esp, kDoubleSize));
- break;
- }
- case kX87Int32ToFloat32: {
- InstructionOperand* input = instr->InputAt(0);
- DCHECK(input->IsRegister() || input->IsStackSlot());
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- if (input->IsRegister()) {
- Register input_reg = i.InputRegister(0);
- __ push(input_reg);
- __ fild_s(Operand(esp, 0));
- __ pop(input_reg);
- } else {
- __ fild_s(i.InputOperand(0));
- }
- break;
- }
- case kX87Uint32ToFloat32: {
- InstructionOperand* input = instr->InputAt(0);
- DCHECK(input->IsRegister() || input->IsStackSlot());
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- Label msb_set_src;
- Label jmp_return;
- // Put input integer into eax(tmporarilly)
- __ push(eax);
- if (input->IsRegister())
- __ mov(eax, i.InputRegister(0));
- else
- __ mov(eax, i.InputOperand(0));
-
- __ test(eax, eax);
- __ j(sign, &msb_set_src, Label::kNear);
- __ push(eax);
- __ fild_s(Operand(esp, 0));
- __ pop(eax);
-
- __ jmp(&jmp_return, Label::kNear);
- __ bind(&msb_set_src);
- // Need another temp reg
- __ push(ebx);
- __ mov(ebx, eax);
- __ shr(eax, 1);
- // Recover the least significant bit to avoid rounding errors.
- __ and_(ebx, Immediate(1));
- __ or_(eax, ebx);
- __ push(eax);
- __ fild_s(Operand(esp, 0));
- __ pop(eax);
- __ fld(0);
- __ faddp();
- // Restore the ebx
- __ pop(ebx);
- __ bind(&jmp_return);
- // Restore the eax
- __ pop(eax);
- break;
- }
- case kX87Int32ToFloat64: {
- InstructionOperand* input = instr->InputAt(0);
- DCHECK(input->IsRegister() || input->IsStackSlot());
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- if (input->IsRegister()) {
- Register input_reg = i.InputRegister(0);
- __ push(input_reg);
- __ fild_s(Operand(esp, 0));
- __ pop(input_reg);
- } else {
- __ fild_s(i.InputOperand(0));
- }
- break;
- }
- case kX87Float32ToFloat64: {
- InstructionOperand* input = instr->InputAt(0);
- if (input->IsFPRegister()) {
- __ sub(esp, Immediate(kDoubleSize));
- __ fstp_s(MemOperand(esp, 0));
- __ fld_s(MemOperand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
- } else {
- DCHECK(input->IsFPStackSlot());
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ fld_s(i.InputOperand(0));
- }
- break;
- }
- case kX87Uint32ToFloat64: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ LoadUint32NoSSE2(i.InputRegister(0));
- break;
- }
- case kX87Float32ToInt32: {
- if (!instr->InputAt(0)->IsFPRegister()) {
- __ fld_s(i.InputOperand(0));
- }
- __ TruncateX87TOSToI(i.OutputRegister(0));
- if (!instr->InputAt(0)->IsFPRegister()) {
- __ fstp(0);
- }
- break;
- }
- case kX87Float32ToUint32: {
- if (!instr->InputAt(0)->IsFPRegister()) {
- __ fld_s(i.InputOperand(0));
- }
- Label success;
- __ TruncateX87TOSToI(i.OutputRegister(0));
- __ test(i.OutputRegister(0), i.OutputRegister(0));
- __ j(positive, &success);
- // Need to reserve the input float32 data.
- __ fld(0);
- __ push(Immediate(INT32_MIN));
- __ fild_s(Operand(esp, 0));
- __ lea(esp, Operand(esp, kPointerSize));
- __ faddp();
- __ TruncateX87TOSToI(i.OutputRegister(0));
- __ or_(i.OutputRegister(0), Immediate(0x80000000));
- // Only keep input float32 data in x87 stack when return.
- __ fstp(0);
- __ bind(&success);
- if (!instr->InputAt(0)->IsFPRegister()) {
- __ fstp(0);
- }
- break;
- }
- case kX87Float64ToInt32: {
- if (!instr->InputAt(0)->IsFPRegister()) {
- __ fld_d(i.InputOperand(0));
- }
- __ TruncateX87TOSToI(i.OutputRegister(0));
- if (!instr->InputAt(0)->IsFPRegister()) {
- __ fstp(0);
- }
- break;
- }
- case kX87Float64ToFloat32: {
- InstructionOperand* input = instr->InputAt(0);
- if (input->IsFPRegister()) {
- __ sub(esp, Immediate(kDoubleSize));
- __ fstp_s(MemOperand(esp, 0));
- __ fld_s(MemOperand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
- } else {
- DCHECK(input->IsFPStackSlot());
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ fld_d(i.InputOperand(0));
- __ sub(esp, Immediate(kDoubleSize));
- __ fstp_s(MemOperand(esp, 0));
- __ fld_s(MemOperand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
- }
- break;
- }
- case kX87Float64ToUint32: {
- __ push_imm32(-2147483648);
- if (!instr->InputAt(0)->IsFPRegister()) {
- __ fld_d(i.InputOperand(0));
- }
- __ fild_s(Operand(esp, 0));
- __ fld(1);
- __ faddp();
- __ TruncateX87TOSToI(i.OutputRegister(0));
- __ add(esp, Immediate(kInt32Size));
- __ add(i.OutputRegister(), Immediate(0x80000000));
- __ fstp(0);
- if (!instr->InputAt(0)->IsFPRegister()) {
- __ fstp(0);
- }
- break;
- }
- case kX87Float64ExtractHighWord32: {
- if (instr->InputAt(0)->IsFPRegister()) {
- __ sub(esp, Immediate(kDoubleSize));
- __ fst_d(MemOperand(esp, 0));
- __ mov(i.OutputRegister(), MemOperand(esp, kDoubleSize / 2));
- __ add(esp, Immediate(kDoubleSize));
- } else {
- InstructionOperand* input = instr->InputAt(0);
- USE(input);
- DCHECK(input->IsFPStackSlot());
- __ mov(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
- }
- break;
- }
- case kX87Float64ExtractLowWord32: {
- if (instr->InputAt(0)->IsFPRegister()) {
- __ sub(esp, Immediate(kDoubleSize));
- __ fst_d(MemOperand(esp, 0));
- __ mov(i.OutputRegister(), MemOperand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
- } else {
- InstructionOperand* input = instr->InputAt(0);
- USE(input);
- DCHECK(input->IsFPStackSlot());
- __ mov(i.OutputRegister(), i.InputOperand(0));
- }
- break;
- }
- case kX87Float64InsertHighWord32: {
- __ sub(esp, Immediate(kDoubleSize));
- __ fstp_d(MemOperand(esp, 0));
- __ mov(MemOperand(esp, kDoubleSize / 2), i.InputRegister(1));
- __ fld_d(MemOperand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
- break;
- }
- case kX87Float64InsertLowWord32: {
- __ sub(esp, Immediate(kDoubleSize));
- __ fstp_d(MemOperand(esp, 0));
- __ mov(MemOperand(esp, 0), i.InputRegister(1));
- __ fld_d(MemOperand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
- break;
- }
- case kX87Float64Sqrt: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ X87SetFPUCW(0x027F);
- __ fstp(0);
- __ fld_d(MemOperand(esp, 0));
- __ fsqrt();
- __ lea(esp, Operand(esp, kDoubleSize));
- __ X87SetFPUCW(0x037F);
- break;
- }
- case kX87Float64Round: {
- RoundingMode mode =
- static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
- // Set the correct round mode in x87 control register
- __ X87SetRC((mode << 10));
-
- if (!instr->InputAt(0)->IsFPRegister()) {
- InstructionOperand* input = instr->InputAt(0);
- USE(input);
- DCHECK(input->IsFPStackSlot());
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ fld_d(i.InputOperand(0));
- }
- __ frndint();
- __ X87SetRC(0x0000);
- break;
- }
- case kX87Float64Cmp: {
- __ fld_d(MemOperand(esp, kDoubleSize));
- __ fld_d(MemOperand(esp, 0));
- __ FCmp();
- __ lea(esp, Operand(esp, 2 * kDoubleSize));
- break;
- }
- case kX87Float64SilenceNaN: {
- Label end, return_qnan;
- __ fstp(0);
- __ push(ebx);
- // Load Half word of HoleNan(SNaN) into ebx
- __ mov(ebx, MemOperand(esp, 2 * kInt32Size));
- __ cmp(ebx, Immediate(kHoleNanUpper32));
- // Check input is HoleNaN(SNaN)?
- __ j(equal, &return_qnan, Label::kNear);
- // If input isn't HoleNaN(SNaN), just load it and return
- __ fld_d(MemOperand(esp, 1 * kInt32Size));
- __ jmp(&end);
- __ bind(&return_qnan);
- // If input is HoleNaN(SNaN), Return QNaN
- __ push(Immediate(0xffffffff));
- __ push(Immediate(0xfff7ffff));
- __ fld_d(MemOperand(esp, 0));
- __ lea(esp, Operand(esp, kDoubleSize));
- __ bind(&end);
- __ pop(ebx);
- // Clear stack.
- __ lea(esp, Operand(esp, 1 * kDoubleSize));
- break;
- }
- case kX87Movsxbl:
- __ movsx_b(i.OutputRegister(), i.MemoryOperand());
- break;
- case kX87Movzxbl:
- __ movzx_b(i.OutputRegister(), i.MemoryOperand());
- break;
- case kX87Movb: {
- size_t index = 0;
- Operand operand = i.MemoryOperand(&index);
- if (HasImmediateInput(instr, index)) {
- __ mov_b(operand, i.InputInt8(index));
- } else {
- __ mov_b(operand, i.InputRegister(index));
- }
- break;
- }
- case kX87Movsxwl:
- __ movsx_w(i.OutputRegister(), i.MemoryOperand());
- break;
- case kX87Movzxwl:
- __ movzx_w(i.OutputRegister(), i.MemoryOperand());
- break;
- case kX87Movw: {
- size_t index = 0;
- Operand operand = i.MemoryOperand(&index);
- if (HasImmediateInput(instr, index)) {
- __ mov_w(operand, i.InputInt16(index));
- } else {
- __ mov_w(operand, i.InputRegister(index));
- }
- break;
- }
- case kX87Movl:
- if (instr->HasOutput()) {
- __ mov(i.OutputRegister(), i.MemoryOperand());
- } else {
- size_t index = 0;
- Operand operand = i.MemoryOperand(&index);
- if (HasImmediateInput(instr, index)) {
- __ mov(operand, i.InputImmediate(index));
- } else {
- __ mov(operand, i.InputRegister(index));
- }
- }
- break;
- case kX87Movsd: {
- if (instr->HasOutput()) {
- X87Register output = i.OutputDoubleRegister();
- USE(output);
- DCHECK(output.code() == 0);
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ fld_d(i.MemoryOperand());
- } else {
- size_t index = 0;
- Operand operand = i.MemoryOperand(&index);
- __ fst_d(operand);
- }
- break;
- }
- case kX87Movss: {
- if (instr->HasOutput()) {
- X87Register output = i.OutputDoubleRegister();
- USE(output);
- DCHECK(output.code() == 0);
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ fld_s(i.MemoryOperand());
- } else {
- size_t index = 0;
- Operand operand = i.MemoryOperand(&index);
- __ fst_s(operand);
- }
- break;
- }
- case kX87BitcastFI: {
- __ mov(i.OutputRegister(), MemOperand(esp, 0));
- __ lea(esp, Operand(esp, kFloatSize));
- break;
- }
- case kX87BitcastIF: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- if (instr->InputAt(0)->IsRegister()) {
- __ lea(esp, Operand(esp, -kFloatSize));
- __ mov(MemOperand(esp, 0), i.InputRegister(0));
- __ fld_s(MemOperand(esp, 0));
- __ lea(esp, Operand(esp, kFloatSize));
- } else {
- __ fld_s(i.InputOperand(0));
- }
- break;
- }
- case kX87Lea: {
- AddressingMode mode = AddressingModeField::decode(instr->opcode());
- // Shorten "leal" to "addl", "subl" or "shll" if the register allocation
- // and addressing mode just happens to work out. The "addl"/"subl" forms
- // in these cases are faster based on measurements.
- if (mode == kMode_MI) {
- __ Move(i.OutputRegister(), Immediate(i.InputInt32(0)));
- } else if (i.InputRegister(0).is(i.OutputRegister())) {
- if (mode == kMode_MRI) {
- int32_t constant_summand = i.InputInt32(1);
- if (constant_summand > 0) {
- __ add(i.OutputRegister(), Immediate(constant_summand));
- } else if (constant_summand < 0) {
- __ sub(i.OutputRegister(), Immediate(-constant_summand));
- }
- } else if (mode == kMode_MR1) {
- if (i.InputRegister(1).is(i.OutputRegister())) {
- __ shl(i.OutputRegister(), 1);
- } else {
- __ add(i.OutputRegister(), i.InputRegister(1));
- }
- } else if (mode == kMode_M2) {
- __ shl(i.OutputRegister(), 1);
- } else if (mode == kMode_M4) {
- __ shl(i.OutputRegister(), 2);
- } else if (mode == kMode_M8) {
- __ shl(i.OutputRegister(), 3);
- } else {
- __ lea(i.OutputRegister(), i.MemoryOperand());
- }
- } else if (mode == kMode_MR1 &&
- i.InputRegister(1).is(i.OutputRegister())) {
- __ add(i.OutputRegister(), i.InputRegister(0));
- } else {
- __ lea(i.OutputRegister(), i.MemoryOperand());
- }
- break;
- }
- case kX87Push:
- if (instr->InputAt(0)->IsFPRegister()) {
- auto allocated = AllocatedOperand::cast(*instr->InputAt(0));
- if (allocated.representation() == MachineRepresentation::kFloat32) {
- __ sub(esp, Immediate(kFloatSize));
- __ fst_s(Operand(esp, 0));
- frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
- } else {
- DCHECK(allocated.representation() == MachineRepresentation::kFloat64);
- __ sub(esp, Immediate(kDoubleSize));
- __ fst_d(Operand(esp, 0));
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
- }
- } else if (instr->InputAt(0)->IsFPStackSlot()) {
- auto allocated = AllocatedOperand::cast(*instr->InputAt(0));
- if (allocated.representation() == MachineRepresentation::kFloat32) {
- __ sub(esp, Immediate(kFloatSize));
- __ fld_s(i.InputOperand(0));
- __ fstp_s(MemOperand(esp, 0));
- frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
- } else {
- DCHECK(allocated.representation() == MachineRepresentation::kFloat64);
- __ sub(esp, Immediate(kDoubleSize));
- __ fld_d(i.InputOperand(0));
- __ fstp_d(MemOperand(esp, 0));
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
- }
- } else if (HasImmediateInput(instr, 0)) {
- __ push(i.InputImmediate(0));
- frame_access_state()->IncreaseSPDelta(1);
- } else {
- __ push(i.InputOperand(0));
- frame_access_state()->IncreaseSPDelta(1);
- }
- break;
- case kX87Poke: {
- int const slot = MiscField::decode(instr->opcode());
- if (HasImmediateInput(instr, 0)) {
- __ mov(Operand(esp, slot * kPointerSize), i.InputImmediate(0));
- } else {
- __ mov(Operand(esp, slot * kPointerSize), i.InputRegister(0));
- }
- break;
- }
- case kX87Xchgb: {
- size_t index = 0;
- Operand operand = i.MemoryOperand(&index);
- __ xchg_b(i.InputRegister(index), operand);
- break;
- }
- case kX87Xchgw: {
- size_t index = 0;
- Operand operand = i.MemoryOperand(&index);
- __ xchg_w(i.InputRegister(index), operand);
- break;
- }
- case kX87Xchgl: {
- size_t index = 0;
- Operand operand = i.MemoryOperand(&index);
- __ xchg(i.InputRegister(index), operand);
- break;
- }
- case kX87PushFloat32:
- __ lea(esp, Operand(esp, -kFloatSize));
- if (instr->InputAt(0)->IsFPStackSlot()) {
- __ fld_s(i.InputOperand(0));
- __ fstp_s(MemOperand(esp, 0));
- } else if (instr->InputAt(0)->IsFPRegister()) {
- __ fst_s(MemOperand(esp, 0));
- } else {
- UNREACHABLE();
- }
- break;
- case kX87PushFloat64:
- __ lea(esp, Operand(esp, -kDoubleSize));
- if (instr->InputAt(0)->IsFPStackSlot()) {
- __ fld_d(i.InputOperand(0));
- __ fstp_d(MemOperand(esp, 0));
- } else if (instr->InputAt(0)->IsFPRegister()) {
- __ fst_d(MemOperand(esp, 0));
- } else {
- UNREACHABLE();
- }
- break;
- case kCheckedLoadInt8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_b);
- break;
- case kCheckedLoadUint8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movzx_b);
- break;
- case kCheckedLoadInt16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_w);
- break;
- case kCheckedLoadUint16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movzx_w);
- break;
- case kCheckedLoadWord32:
- ASSEMBLE_CHECKED_LOAD_INTEGER(mov);
- break;
- case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FLOAT(fld_s, OutOfLineLoadFloat32NaN);
- break;
- case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FLOAT(fld_d, OutOfLineLoadFloat64NaN);
- break;
- case kCheckedStoreWord8:
- ASSEMBLE_CHECKED_STORE_INTEGER(mov_b);
- break;
- case kCheckedStoreWord16:
- ASSEMBLE_CHECKED_STORE_INTEGER(mov_w);
- break;
- case kCheckedStoreWord32:
- ASSEMBLE_CHECKED_STORE_INTEGER(mov);
- break;
- case kCheckedStoreFloat32:
- ASSEMBLE_CHECKED_STORE_FLOAT(fst_s);
- break;
- case kCheckedStoreFloat64:
- ASSEMBLE_CHECKED_STORE_FLOAT(fst_d);
- break;
- case kX87StackCheck: {
- ExternalReference const stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- break;
- }
- case kCheckedLoadWord64:
- case kCheckedStoreWord64:
- UNREACHABLE(); // currently unsupported checked int64 load/store.
- break;
- case kAtomicLoadInt8:
- case kAtomicLoadUint8:
- case kAtomicLoadInt16:
- case kAtomicLoadUint16:
- case kAtomicLoadWord32:
- case kAtomicStoreWord8:
- case kAtomicStoreWord16:
- case kAtomicStoreWord32:
- UNREACHABLE(); // Won't be generated by instruction selector.
- break;
- }
- return kSuccess;
-} // NOLINT(readability/fn_size)
-
-static Condition FlagsConditionToCondition(FlagsCondition condition) {
- switch (condition) {
- case kUnorderedEqual:
- case kEqual:
- return equal;
- break;
- case kUnorderedNotEqual:
- case kNotEqual:
- return not_equal;
- break;
- case kSignedLessThan:
- return less;
- break;
- case kSignedGreaterThanOrEqual:
- return greater_equal;
- break;
- case kSignedLessThanOrEqual:
- return less_equal;
- break;
- case kSignedGreaterThan:
- return greater;
- break;
- case kUnsignedLessThan:
- return below;
- break;
- case kUnsignedGreaterThanOrEqual:
- return above_equal;
- break;
- case kUnsignedLessThanOrEqual:
- return below_equal;
- break;
- case kUnsignedGreaterThan:
- return above;
- break;
- case kOverflow:
- return overflow;
- break;
- case kNotOverflow:
- return no_overflow;
- break;
- default:
- UNREACHABLE();
- return no_condition;
- break;
- }
-}
-
-// Assembles a branch after an instruction.
-void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
- Label::Distance flabel_distance =
- branch->fallthru ? Label::kNear : Label::kFar;
-
- Label done;
- Label tlabel_tmp;
- Label flabel_tmp;
- Label* tlabel = &tlabel_tmp;
- Label* flabel = &flabel_tmp;
-
- Label* tlabel_dst = branch->true_label;
- Label* flabel_dst = branch->false_label;
-
- if (branch->condition == kUnorderedEqual) {
- __ j(parity_even, flabel, flabel_distance);
- } else if (branch->condition == kUnorderedNotEqual) {
- __ j(parity_even, tlabel);
- }
- __ j(FlagsConditionToCondition(branch->condition), tlabel);
-
- // Add a jump if not falling through to the next block.
- if (!branch->fallthru) __ jmp(flabel);
-
- __ jmp(&done);
- __ bind(&tlabel_tmp);
- FlagsMode mode = FlagsModeField::decode(instr->opcode());
- if (mode == kFlags_deoptimize) {
- int double_register_param_count = 0;
- int x87_layout = 0;
- for (size_t i = 0; i < instr->InputCount(); i++) {
- if (instr->InputAt(i)->IsFPRegister()) {
- double_register_param_count++;
- }
- }
- // Currently we use only one X87 register. If double_register_param_count
- // is bigger than 1, it means duplicated double register is added to input
- // of this instruction.
- if (double_register_param_count > 0) {
- x87_layout = (0 << 3) | 1;
- }
- // The layout of x87 register stack is loaded on the top of FPU register
- // stack for deoptimization.
- __ push(Immediate(x87_layout));
- __ fild_s(MemOperand(esp, 0));
- __ lea(esp, Operand(esp, kPointerSize));
- }
- __ jmp(tlabel_dst);
- __ bind(&flabel_tmp);
- __ jmp(flabel_dst);
- __ bind(&done);
-}
-
-
-void CodeGenerator::AssembleArchJump(RpoNumber target) {
- if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
-}
-
-void CodeGenerator::AssembleArchTrap(Instruction* instr,
- FlagsCondition condition) {
- class OutOfLineTrap final : public OutOfLineCode {
- public:
- OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
- : OutOfLineCode(gen),
- frame_elided_(frame_elided),
- instr_(instr),
- gen_(gen) {}
-
- void Generate() final {
- X87OperandConverter i(gen_, instr_);
-
- Runtime::FunctionId trap_id = static_cast<Runtime::FunctionId>(
- i.InputInt32(instr_->InputCount() - 1));
- bool old_has_frame = __ has_frame();
- if (frame_elided_) {
- __ set_has_frame(true);
- __ EnterFrame(StackFrame::WASM_COMPILED);
- }
- GenerateCallToTrap(trap_id);
- if (frame_elided_) {
- ReferenceMap* reference_map =
- new (gen_->zone()) ReferenceMap(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
- __ set_has_frame(old_has_frame);
- }
- if (FLAG_debug_code) {
- __ ud2();
- }
- }
-
- private:
- void GenerateCallToTrap(Runtime::FunctionId trap_id) {
- if (trap_id == Runtime::kNumFunctions) {
- // We cannot test calls to the runtime in cctest/test-run-wasm.
- // Therefore we emit a call to C here instead of a call to the runtime.
- __ PrepareCallCFunction(0, esi);
- __ CallCFunction(
- ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
- 0);
- } else {
- __ Move(esi, isolate()->native_context());
- gen_->AssembleSourcePosition(instr_);
- __ CallRuntime(trap_id);
- }
- }
-
- bool frame_elided_;
- Instruction* instr_;
- CodeGenerator* gen_;
- };
- bool frame_elided = !frame_access_state()->has_frame();
- auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
- Label* tlabel = ool->entry();
- Label end;
- if (condition == kUnorderedEqual) {
- __ j(parity_even, &end);
- } else if (condition == kUnorderedNotEqual) {
- __ j(parity_even, tlabel);
- }
- __ j(FlagsConditionToCondition(condition), tlabel);
- __ bind(&end);
-}
-
-// Assembles boolean materializations after an instruction.
-void CodeGenerator::AssembleArchBoolean(Instruction* instr,
- FlagsCondition condition) {
- X87OperandConverter i(this, instr);
- Label done;
-
- // Materialize a full 32-bit 1 or 0 value. The result register is always the
- // last output of the instruction.
- Label check;
- DCHECK_NE(0u, instr->OutputCount());
- Register reg = i.OutputRegister(instr->OutputCount() - 1);
- if (condition == kUnorderedEqual) {
- __ j(parity_odd, &check, Label::kNear);
- __ Move(reg, Immediate(0));
- __ jmp(&done, Label::kNear);
- } else if (condition == kUnorderedNotEqual) {
- __ j(parity_odd, &check, Label::kNear);
- __ mov(reg, Immediate(1));
- __ jmp(&done, Label::kNear);
- }
- Condition cc = FlagsConditionToCondition(condition);
-
- __ bind(&check);
- if (reg.is_byte_register()) {
- // setcc for byte registers (al, bl, cl, dl).
- __ setcc(cc, reg);
- __ movzx_b(reg, reg);
- } else {
- // Emit a branch to set a register to either 1 or 0.
- Label set;
- __ j(cc, &set, Label::kNear);
- __ Move(reg, Immediate(0));
- __ jmp(&done, Label::kNear);
- __ bind(&set);
- __ mov(reg, Immediate(1));
- }
- __ bind(&done);
-}
-
-
-void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
- X87OperandConverter i(this, instr);
- Register input = i.InputRegister(0);
- for (size_t index = 2; index < instr->InputCount(); index += 2) {
- __ cmp(input, Immediate(i.InputInt32(index + 0)));
- __ j(equal, GetLabel(i.InputRpo(index + 1)));
- }
- AssembleArchJump(i.InputRpo(1));
-}
-
-
-void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
- X87OperandConverter i(this, instr);
- Register input = i.InputRegister(0);
- size_t const case_count = instr->InputCount() - 2;
- Label** cases = zone()->NewArray<Label*>(case_count);
- for (size_t index = 0; index < case_count; ++index) {
- cases[index] = GetLabel(i.InputRpo(index + 2));
- }
- Label* const table = AddJumpTable(cases, case_count);
- __ cmp(input, Immediate(case_count));
- __ j(above_equal, GetLabel(i.InputRpo(1)));
- __ jmp(Operand::JumpTable(input, times_4, table));
-}
-
-CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
- int deoptimization_id, SourcePosition pos) {
- DeoptimizeKind deoptimization_kind = GetDeoptimizationKind(deoptimization_id);
- DeoptimizeReason deoptimization_reason =
- GetDeoptimizationReason(deoptimization_id);
- Deoptimizer::BailoutType bailout_type =
- deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
- : Deoptimizer::EAGER;
- Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- isolate(), deoptimization_id, bailout_type);
- if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
- __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
- return kSuccess;
-}
-
-
-// The calling convention for JSFunctions on X87 passes arguments on the
-// stack and the JSFunction and context in EDI and ESI, respectively, thus
-// the steps of the call look as follows:
-
-// --{ before the call instruction }--------------------------------------------
-// | caller frame |
-// ^ esp ^ ebp
-
-// --{ push arguments and setup ESI, EDI }--------------------------------------
-// | args + receiver | caller frame |
-// ^ esp ^ ebp
-// [edi = JSFunction, esi = context]
-
-// --{ call [edi + kCodeEntryOffset] }------------------------------------------
-// | RET | args + receiver | caller frame |
-// ^ esp ^ ebp
-
-// =={ prologue of called function }============================================
-// --{ push ebp }---------------------------------------------------------------
-// | FP | RET | args + receiver | caller frame |
-// ^ esp ^ ebp
-
-// --{ mov ebp, esp }-----------------------------------------------------------
-// | FP | RET | args + receiver | caller frame |
-// ^ ebp,esp
-
-// --{ push esi }---------------------------------------------------------------
-// | CTX | FP | RET | args + receiver | caller frame |
-// ^esp ^ ebp
-
-// --{ push edi }---------------------------------------------------------------
-// | FNC | CTX | FP | RET | args + receiver | caller frame |
-// ^esp ^ ebp
-
-// --{ subi esp, #N }-----------------------------------------------------------
-// | callee frame | FNC | CTX | FP | RET | args + receiver | caller frame |
-// ^esp ^ ebp
-
-// =={ body of called function }================================================
-
-// =={ epilogue of called function }============================================
-// --{ mov esp, ebp }-----------------------------------------------------------
-// | FP | RET | args + receiver | caller frame |
-// ^ esp,ebp
-
-// --{ pop ebp }-----------------------------------------------------------
-// | | RET | args + receiver | caller frame |
-// ^ esp ^ ebp
-
-// --{ ret #A+1 }-----------------------------------------------------------
-// | | caller frame |
-// ^ esp ^ ebp
-
-
-// Runtime function calls are accomplished by doing a stub call to the
-// CEntryStub (a real code object). On X87 passes arguments on the
-// stack, the number of arguments in EAX, the address of the runtime function
-// in EBX, and the context in ESI.
-
-// --{ before the call instruction }--------------------------------------------
-// | caller frame |
-// ^ esp ^ ebp
-
-// --{ push arguments and setup EAX, EBX, and ESI }-----------------------------
-// | args + receiver | caller frame |
-// ^ esp ^ ebp
-// [eax = #args, ebx = runtime function, esi = context]
-
-// --{ call #CEntryStub }-------------------------------------------------------
-// | RET | args + receiver | caller frame |
-// ^ esp ^ ebp
-
-// =={ body of runtime function }===============================================
-
-// --{ runtime returns }--------------------------------------------------------
-// | caller frame |
-// ^ esp ^ ebp
-
-// Other custom linkages (e.g. for calling directly into and out of C++) may
-// need to save callee-saved registers on the stack, which is done in the
-// function prologue of generated code.
-
-// --{ before the call instruction }--------------------------------------------
-// | caller frame |
-// ^ esp ^ ebp
-
-// --{ set up arguments in registers on stack }---------------------------------
-// | args | caller frame |
-// ^ esp ^ ebp
-// [r0 = arg0, r1 = arg1, ...]
-
-// --{ call code }--------------------------------------------------------------
-// | RET | args | caller frame |
-// ^ esp ^ ebp
-
-// =={ prologue of called function }============================================
-// --{ push ebp }---------------------------------------------------------------
-// | FP | RET | args | caller frame |
-// ^ esp ^ ebp
-
-// --{ mov ebp, esp }-----------------------------------------------------------
-// | FP | RET | args | caller frame |
-// ^ ebp,esp
-
-// --{ save registers }---------------------------------------------------------
-// | regs | FP | RET | args | caller frame |
-// ^ esp ^ ebp
-
-// --{ subi esp, #N }-----------------------------------------------------------
-// | callee frame | regs | FP | RET | args | caller frame |
-// ^esp ^ ebp
-
-// =={ body of called function }================================================
-
-// =={ epilogue of called function }============================================
-// --{ restore registers }------------------------------------------------------
-// | regs | FP | RET | args | caller frame |
-// ^ esp ^ ebp
-
-// --{ mov esp, ebp }-----------------------------------------------------------
-// | FP | RET | args | caller frame |
-// ^ esp,ebp
-
-// --{ pop ebp }----------------------------------------------------------------
-// | RET | args | caller frame |
-// ^ esp ^ ebp
-
-void CodeGenerator::FinishFrame(Frame* frame) {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- const RegList saves = descriptor->CalleeSavedRegisters();
- if (saves != 0) { // Save callee-saved registers.
- DCHECK(!info()->is_osr());
- int pushed = 0;
- for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
- if (!((1 << i) & saves)) continue;
- ++pushed;
- }
- frame->AllocateSavedCalleeRegisterSlots(pushed);
- }
-
- // Initailize FPU state.
- __ fninit();
- __ fld1();
-}
-
-void CodeGenerator::AssembleConstructFrame() {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (frame_access_state()->has_frame()) {
- if (descriptor->IsCFunctionCall()) {
- __ push(ebp);
- __ mov(ebp, esp);
- } else if (descriptor->IsJSFunctionCall()) {
- __ Prologue(this->info()->GeneratePreagedPrologue());
- if (descriptor->PushArgumentCount()) {
- __ push(kJavaScriptCallArgCountRegister);
- }
- } else {
- __ StubPrologue(info()->GetOutputStackFrameType());
- }
- }
-
- int shrink_slots =
- frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
-
- if (info()->is_osr()) {
- // TurboFan OSR-compiled functions cannot be entered directly.
- __ Abort(kShouldNotDirectlyEnterOsrFunction);
-
- // Unoptimized code jumps directly to this entrypoint while the unoptimized
- // frame is still on the stack. Optimized code uses OSR values directly from
- // the unoptimized frame. Thus, all that needs to be done is to allocate the
- // remaining stack slots.
- if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
- osr_pc_offset_ = __ pc_offset();
- shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
-
- // Initailize FPU state.
- __ fninit();
- __ fld1();
- }
-
- const RegList saves = descriptor->CalleeSavedRegisters();
- if (shrink_slots > 0) {
- __ sub(esp, Immediate(shrink_slots * kPointerSize));
- }
-
- if (saves != 0) { // Save callee-saved registers.
- DCHECK(!info()->is_osr());
- int pushed = 0;
- for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
- if (!((1 << i) & saves)) continue;
- __ push(Register::from_code(i));
- ++pushed;
- }
- }
-}
-
-void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
-
- // Clear the FPU stack only if there is no return value in the stack.
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- bool clear_stack = true;
- for (size_t i = 0; i < descriptor->ReturnCount(); i++) {
- MachineRepresentation rep = descriptor->GetReturnType(i).representation();
- LinkageLocation loc = descriptor->GetReturnLocation(i);
- if (IsFloatingPoint(rep) && loc == LinkageLocation::ForRegister(0)) {
- clear_stack = false;
- break;
- }
- }
- if (clear_stack) __ fstp(0);
-
- const RegList saves = descriptor->CalleeSavedRegisters();
- // Restore registers.
- if (saves != 0) {
- for (int i = 0; i < Register::kNumRegisters; i++) {
- if (!((1 << i) & saves)) continue;
- __ pop(Register::from_code(i));
- }
- }
-
- // Might need ecx for scratch if pop_size is too big or if there is a variable
- // pop count.
- DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & ecx.bit());
- size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
- X87OperandConverter g(this, nullptr);
- if (descriptor->IsCFunctionCall()) {
- AssembleDeconstructFrame();
- } else if (frame_access_state()->has_frame()) {
- // Canonicalize JSFunction return sites for now if they always have the same
- // number of return args.
- if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
- if (return_label_.is_bound()) {
- __ jmp(&return_label_);
- return;
- } else {
- __ bind(&return_label_);
- AssembleDeconstructFrame();
- }
- } else {
- AssembleDeconstructFrame();
- }
- }
- DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & edx.bit());
- DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & ecx.bit());
- if (pop->IsImmediate()) {
- DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
- pop_size += g.ToConstant(pop).ToInt32() * kPointerSize;
- __ Ret(static_cast<int>(pop_size), ecx);
- } else {
- Register pop_reg = g.ToRegister(pop);
- Register scratch_reg = pop_reg.is(ecx) ? edx : ecx;
- __ pop(scratch_reg);
- __ lea(esp, Operand(esp, pop_reg, times_4, static_cast<int>(pop_size)));
- __ jmp(scratch_reg);
- }
-}
-
-void CodeGenerator::FinishCode() {}
-
-void CodeGenerator::AssembleMove(InstructionOperand* source,
- InstructionOperand* destination) {
- X87OperandConverter g(this, nullptr);
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister()) {
- DCHECK(destination->IsRegister() || destination->IsStackSlot());
- Register src = g.ToRegister(source);
- Operand dst = g.ToOperand(destination);
- __ mov(dst, src);
- } else if (source->IsStackSlot()) {
- DCHECK(destination->IsRegister() || destination->IsStackSlot());
- Operand src = g.ToOperand(source);
- if (destination->IsRegister()) {
- Register dst = g.ToRegister(destination);
- __ mov(dst, src);
- } else {
- Operand dst = g.ToOperand(destination);
- __ push(src);
- __ pop(dst);
- }
- } else if (source->IsConstant()) {
- Constant src_constant = g.ToConstant(source);
- if (src_constant.type() == Constant::kHeapObject) {
- Handle<HeapObject> src = src_constant.ToHeapObject();
- if (destination->IsRegister()) {
- Register dst = g.ToRegister(destination);
- __ LoadHeapObject(dst, src);
- } else {
- DCHECK(destination->IsStackSlot());
- Operand dst = g.ToOperand(destination);
- AllowDeferredHandleDereference embedding_raw_address;
- if (isolate()->heap()->InNewSpace(*src)) {
- __ PushHeapObject(src);
- __ pop(dst);
- } else {
- __ mov(dst, src);
- }
- }
- } else if (destination->IsRegister()) {
- Register dst = g.ToRegister(destination);
- __ Move(dst, g.ToImmediate(source));
- } else if (destination->IsStackSlot()) {
- Operand dst = g.ToOperand(destination);
- __ Move(dst, g.ToImmediate(source));
- } else if (src_constant.type() == Constant::kFloat32) {
- // TODO(turbofan): Can we do better here?
- uint32_t src = src_constant.ToFloat32AsInt();
- if (destination->IsFPRegister()) {
- __ sub(esp, Immediate(kInt32Size));
- __ mov(MemOperand(esp, 0), Immediate(src));
- // always only push one value into the x87 stack.
- __ fstp(0);
- __ fld_s(MemOperand(esp, 0));
- __ add(esp, Immediate(kInt32Size));
- } else {
- DCHECK(destination->IsFPStackSlot());
- Operand dst = g.ToOperand(destination);
- __ Move(dst, Immediate(src));
- }
- } else {
- DCHECK_EQ(Constant::kFloat64, src_constant.type());
- uint64_t src = src_constant.ToFloat64AsInt();
- uint32_t lower = static_cast<uint32_t>(src);
- uint32_t upper = static_cast<uint32_t>(src >> 32);
- if (destination->IsFPRegister()) {
- __ sub(esp, Immediate(kDoubleSize));
- __ mov(MemOperand(esp, 0), Immediate(lower));
- __ mov(MemOperand(esp, kInt32Size), Immediate(upper));
- // always only push one value into the x87 stack.
- __ fstp(0);
- __ fld_d(MemOperand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
- } else {
- DCHECK(destination->IsFPStackSlot());
- Operand dst0 = g.ToOperand(destination);
- Operand dst1 = g.HighOperand(destination);
- __ Move(dst0, Immediate(lower));
- __ Move(dst1, Immediate(upper));
- }
- }
- } else if (source->IsFPRegister()) {
- DCHECK(destination->IsFPStackSlot());
- Operand dst = g.ToOperand(destination);
- auto allocated = AllocatedOperand::cast(*source);
- switch (allocated.representation()) {
- case MachineRepresentation::kFloat32:
- __ fst_s(dst);
- break;
- case MachineRepresentation::kFloat64:
- __ fst_d(dst);
- break;
- default:
- UNREACHABLE();
- }
- } else if (source->IsFPStackSlot()) {
- DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
- Operand src = g.ToOperand(source);
- auto allocated = AllocatedOperand::cast(*source);
- if (destination->IsFPRegister()) {
- // always only push one value into the x87 stack.
- __ fstp(0);
- switch (allocated.representation()) {
- case MachineRepresentation::kFloat32:
- __ fld_s(src);
- break;
- case MachineRepresentation::kFloat64:
- __ fld_d(src);
- break;
- default:
- UNREACHABLE();
- }
- } else {
- Operand dst = g.ToOperand(destination);
- switch (allocated.representation()) {
- case MachineRepresentation::kFloat32:
- __ fld_s(src);
- __ fstp_s(dst);
- break;
- case MachineRepresentation::kFloat64:
- __ fld_d(src);
- __ fstp_d(dst);
- break;
- default:
- UNREACHABLE();
- }
- }
- } else {
- UNREACHABLE();
- }
-}
-
-
-void CodeGenerator::AssembleSwap(InstructionOperand* source,
- InstructionOperand* destination) {
- X87OperandConverter g(this, nullptr);
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister() && destination->IsRegister()) {
- // Register-register.
- Register src = g.ToRegister(source);
- Register dst = g.ToRegister(destination);
- __ xchg(dst, src);
- } else if (source->IsRegister() && destination->IsStackSlot()) {
- // Register-memory.
- __ xchg(g.ToRegister(source), g.ToOperand(destination));
- } else if (source->IsStackSlot() && destination->IsStackSlot()) {
- // Memory-memory.
- Operand dst1 = g.ToOperand(destination);
- __ push(dst1);
- frame_access_state()->IncreaseSPDelta(1);
- Operand src1 = g.ToOperand(source);
- __ push(src1);
- Operand dst2 = g.ToOperand(destination);
- __ pop(dst2);
- frame_access_state()->IncreaseSPDelta(-1);
- Operand src2 = g.ToOperand(source);
- __ pop(src2);
- } else if (source->IsFPRegister() && destination->IsFPRegister()) {
- UNREACHABLE();
- } else if (source->IsFPRegister() && destination->IsFPStackSlot()) {
- auto allocated = AllocatedOperand::cast(*source);
- switch (allocated.representation()) {
- case MachineRepresentation::kFloat32:
- __ fld_s(g.ToOperand(destination));
- __ fxch();
- __ fstp_s(g.ToOperand(destination));
- break;
- case MachineRepresentation::kFloat64:
- __ fld_d(g.ToOperand(destination));
- __ fxch();
- __ fstp_d(g.ToOperand(destination));
- break;
- default:
- UNREACHABLE();
- }
- } else if (source->IsFPStackSlot() && destination->IsFPStackSlot()) {
- auto allocated = AllocatedOperand::cast(*source);
- switch (allocated.representation()) {
- case MachineRepresentation::kFloat32:
- __ fld_s(g.ToOperand(source));
- __ fld_s(g.ToOperand(destination));
- __ fstp_s(g.ToOperand(source));
- __ fstp_s(g.ToOperand(destination));
- break;
- case MachineRepresentation::kFloat64:
- __ fld_d(g.ToOperand(source));
- __ fld_d(g.ToOperand(destination));
- __ fstp_d(g.ToOperand(source));
- __ fstp_d(g.ToOperand(destination));
- break;
- default:
- UNREACHABLE();
- }
- } else {
- // No other combinations are possible.
- UNREACHABLE();
- }
-}
-
-
-void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
- for (size_t index = 0; index < target_count; ++index) {
- __ dd(targets[index]);
- }
-}
-
-
-void CodeGenerator::EnsureSpaceForLazyDeopt() {
- if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
- return;
- }
-
- int space_needed = Deoptimizer::patch_size();
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- if (current_pc < last_lazy_deopt_pc_ + space_needed) {
- int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- __ Nop(padding_size);
- }
-}
-
-#undef __
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/x87/instruction-codes-x87.h b/deps/v8/src/compiler/x87/instruction-codes-x87.h
deleted file mode 100644
index 5f527fd43f..0000000000
--- a/deps/v8/src/compiler/x87/instruction-codes-x87.h
+++ /dev/null
@@ -1,144 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_X87_INSTRUCTION_CODES_X87_H_
-#define V8_COMPILER_X87_INSTRUCTION_CODES_X87_H_
-
-#include "src/compiler/instruction.h"
-#include "src/compiler/instruction-codes.h"
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// X87-specific opcodes that specify which assembly sequence to emit.
-// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(X87Add) \
- V(X87And) \
- V(X87Cmp) \
- V(X87Cmp16) \
- V(X87Cmp8) \
- V(X87Test) \
- V(X87Test16) \
- V(X87Test8) \
- V(X87Or) \
- V(X87Xor) \
- V(X87Sub) \
- V(X87Imul) \
- V(X87ImulHigh) \
- V(X87UmulHigh) \
- V(X87Idiv) \
- V(X87Udiv) \
- V(X87Not) \
- V(X87Neg) \
- V(X87Shl) \
- V(X87Shr) \
- V(X87Sar) \
- V(X87AddPair) \
- V(X87SubPair) \
- V(X87MulPair) \
- V(X87ShlPair) \
- V(X87ShrPair) \
- V(X87SarPair) \
- V(X87Ror) \
- V(X87Lzcnt) \
- V(X87Popcnt) \
- V(X87Float32Cmp) \
- V(X87Float32Add) \
- V(X87Float32Sub) \
- V(X87Float32Mul) \
- V(X87Float32Div) \
- V(X87Float32Abs) \
- V(X87Float32Neg) \
- V(X87Float32Sqrt) \
- V(X87Float32Round) \
- V(X87LoadFloat64Constant) \
- V(X87Float64Add) \
- V(X87Float64Sub) \
- V(X87Float64Mul) \
- V(X87Float64Div) \
- V(X87Float64Mod) \
- V(X87Float32Max) \
- V(X87Float64Max) \
- V(X87Float32Min) \
- V(X87Float64Min) \
- V(X87Float64Abs) \
- V(X87Float64Neg) \
- V(X87Int32ToFloat32) \
- V(X87Uint32ToFloat32) \
- V(X87Int32ToFloat64) \
- V(X87Float32ToFloat64) \
- V(X87Uint32ToFloat64) \
- V(X87Float64ToInt32) \
- V(X87Float32ToInt32) \
- V(X87Float32ToUint32) \
- V(X87Float64ToFloat32) \
- V(X87Float64ToUint32) \
- V(X87Float64ExtractHighWord32) \
- V(X87Float64ExtractLowWord32) \
- V(X87Float64InsertHighWord32) \
- V(X87Float64InsertLowWord32) \
- V(X87Float64Sqrt) \
- V(X87Float64Round) \
- V(X87Float64Cmp) \
- V(X87Float64SilenceNaN) \
- V(X87Movsxbl) \
- V(X87Movzxbl) \
- V(X87Movb) \
- V(X87Movsxwl) \
- V(X87Movzxwl) \
- V(X87Movw) \
- V(X87Movl) \
- V(X87Movss) \
- V(X87Movsd) \
- V(X87Lea) \
- V(X87BitcastFI) \
- V(X87BitcastIF) \
- V(X87Push) \
- V(X87PushFloat64) \
- V(X87PushFloat32) \
- V(X87Poke) \
- V(X87StackCheck) \
- V(X87Xchgb) \
- V(X87Xchgw) \
- V(X87Xchgl)
-
-// Addressing modes represent the "shape" of inputs to an instruction.
-// Many instructions support multiple addressing modes. Addressing modes
-// are encoded into the InstructionCode of the instruction and tell the
-// code generator after register allocation which assembler method to call.
-//
-// We use the following local notation for addressing modes:
-//
-// M = memory operand
-// R = base register
-// N = index register * N for N in {1, 2, 4, 8}
-// I = immediate displacement (int32_t)
-
-#define TARGET_ADDRESSING_MODE_LIST(V) \
- V(MR) /* [%r1 ] */ \
- V(MRI) /* [%r1 + K] */ \
- V(MR1) /* [%r1 + %r2*1 ] */ \
- V(MR2) /* [%r1 + %r2*2 ] */ \
- V(MR4) /* [%r1 + %r2*4 ] */ \
- V(MR8) /* [%r1 + %r2*8 ] */ \
- V(MR1I) /* [%r1 + %r2*1 + K] */ \
- V(MR2I) /* [%r1 + %r2*2 + K] */ \
- V(MR4I) /* [%r1 + %r2*3 + K] */ \
- V(MR8I) /* [%r1 + %r2*4 + K] */ \
- V(M1) /* [ %r2*1 ] */ \
- V(M2) /* [ %r2*2 ] */ \
- V(M4) /* [ %r2*4 ] */ \
- V(M8) /* [ %r2*8 ] */ \
- V(M1I) /* [ %r2*1 + K] */ \
- V(M2I) /* [ %r2*2 + K] */ \
- V(M4I) /* [ %r2*4 + K] */ \
- V(M8I) /* [ %r2*8 + K] */ \
- V(MI) /* [ K] */
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_X87_INSTRUCTION_CODES_X87_H_
diff --git a/deps/v8/src/compiler/x87/instruction-scheduler-x87.cc b/deps/v8/src/compiler/x87/instruction-scheduler-x87.cc
deleted file mode 100644
index af86a87ad7..0000000000
--- a/deps/v8/src/compiler/x87/instruction-scheduler-x87.cc
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/instruction-scheduler.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-bool InstructionScheduler::SchedulerSupported() { return false; }
-
-
-int InstructionScheduler::GetTargetInstructionFlags(
- const Instruction* instr) const {
- UNIMPLEMENTED();
-}
-
-
-int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
- UNIMPLEMENTED();
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/x87/instruction-selector-x87.cc b/deps/v8/src/compiler/x87/instruction-selector-x87.cc
deleted file mode 100644
index b5594b8894..0000000000
--- a/deps/v8/src/compiler/x87/instruction-selector-x87.cc
+++ /dev/null
@@ -1,1881 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/base/adapters.h"
-#include "src/compiler/instruction-selector-impl.h"
-#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// Adds X87-specific methods for generating operands.
-class X87OperandGenerator final : public OperandGenerator {
- public:
- explicit X87OperandGenerator(InstructionSelector* selector)
- : OperandGenerator(selector) {}
-
- InstructionOperand UseByteRegister(Node* node) {
- // TODO(titzer): encode byte register use constraints.
- return UseFixed(node, edx);
- }
-
- InstructionOperand DefineAsByteRegister(Node* node) {
- // TODO(titzer): encode byte register def constraints.
- return DefineAsRegister(node);
- }
-
- bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input,
- int effect_level) {
- if (input->opcode() != IrOpcode::kLoad ||
- !selector()->CanCover(node, input)) {
- return false;
- }
- if (effect_level != selector()->GetEffectLevel(input)) {
- return false;
- }
- MachineRepresentation rep =
- LoadRepresentationOf(input->op()).representation();
- switch (opcode) {
- case kX87Cmp:
- case kX87Test:
- return rep == MachineRepresentation::kWord32 ||
- rep == MachineRepresentation::kTagged;
- case kX87Cmp16:
- case kX87Test16:
- return rep == MachineRepresentation::kWord16;
- case kX87Cmp8:
- case kX87Test8:
- return rep == MachineRepresentation::kWord8;
- default:
- break;
- }
- return false;
- }
-
- InstructionOperand CreateImmediate(int imm) {
- return sequence()->AddImmediate(Constant(imm));
- }
-
- bool CanBeImmediate(Node* node) {
- switch (node->opcode()) {
- case IrOpcode::kInt32Constant:
- case IrOpcode::kNumberConstant:
- case IrOpcode::kExternalConstant:
- case IrOpcode::kRelocatableInt32Constant:
- case IrOpcode::kRelocatableInt64Constant:
- return true;
- case IrOpcode::kHeapConstant: {
-// TODO(bmeurer): We must not dereference handles concurrently. If we
-// really have to this here, then we need to find a way to put this
-// information on the HeapConstant node already.
-#if 0
- // Constants in new space cannot be used as immediates in V8 because
- // the GC does not scan code objects when collecting the new generation.
- Handle<HeapObject> value = OpParameter<Handle<HeapObject>>(node);
- Isolate* isolate = value->GetIsolate();
- return !isolate->heap()->InNewSpace(*value);
-#endif
- }
- default:
- return false;
- }
- }
-
- AddressingMode GenerateMemoryOperandInputs(Node* index, int scale, Node* base,
- Node* displacement_node,
- DisplacementMode displacement_mode,
- InstructionOperand inputs[],
- size_t* input_count) {
- AddressingMode mode = kMode_MRI;
- int32_t displacement = (displacement_node == nullptr)
- ? 0
- : OpParameter<int32_t>(displacement_node);
- if (displacement_mode == kNegativeDisplacement) {
- displacement = -displacement;
- }
- if (base != nullptr) {
- if (base->opcode() == IrOpcode::kInt32Constant) {
- displacement += OpParameter<int32_t>(base);
- base = nullptr;
- }
- }
- if (base != nullptr) {
- inputs[(*input_count)++] = UseRegister(base);
- if (index != nullptr) {
- DCHECK(scale >= 0 && scale <= 3);
- inputs[(*input_count)++] = UseRegister(index);
- if (displacement != 0) {
- inputs[(*input_count)++] = TempImmediate(displacement);
- static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
- kMode_MR4I, kMode_MR8I};
- mode = kMRnI_modes[scale];
- } else {
- static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2,
- kMode_MR4, kMode_MR8};
- mode = kMRn_modes[scale];
- }
- } else {
- if (displacement == 0) {
- mode = kMode_MR;
- } else {
- inputs[(*input_count)++] = TempImmediate(displacement);
- mode = kMode_MRI;
- }
- }
- } else {
- DCHECK(scale >= 0 && scale <= 3);
- if (index != nullptr) {
- inputs[(*input_count)++] = UseRegister(index);
- if (displacement != 0) {
- inputs[(*input_count)++] = TempImmediate(displacement);
- static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
- kMode_M4I, kMode_M8I};
- mode = kMnI_modes[scale];
- } else {
- static const AddressingMode kMn_modes[] = {kMode_MR, kMode_M2,
- kMode_M4, kMode_M8};
- mode = kMn_modes[scale];
- }
- } else {
- inputs[(*input_count)++] = TempImmediate(displacement);
- return kMode_MI;
- }
- }
- return mode;
- }
-
- AddressingMode GetEffectiveAddressMemoryOperand(Node* node,
- InstructionOperand inputs[],
- size_t* input_count) {
- BaseWithIndexAndDisplacement32Matcher m(node, AddressOption::kAllowAll);
- DCHECK(m.matches());
- if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
- return GenerateMemoryOperandInputs(
- m.index(), m.scale(), m.base(), m.displacement(),
- m.displacement_mode(), inputs, input_count);
- } else {
- inputs[(*input_count)++] = UseRegister(node->InputAt(0));
- inputs[(*input_count)++] = UseRegister(node->InputAt(1));
- return kMode_MR1;
- }
- }
-
- bool CanBeBetterLeftOperand(Node* node) const {
- return !selector()->IsLive(node);
- }
-};
-
-void InstructionSelector::VisitStackSlot(Node* node) {
- StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
- int slot = frame_->AllocateSpillSlot(rep.size());
- OperandGenerator g(this);
-
- Emit(kArchStackSlot, g.DefineAsRegister(node),
- sequence()->AddImmediate(Constant(slot)), 0, nullptr);
-}
-
-void InstructionSelector::VisitLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
-
- ArchOpcode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kFloat32:
- opcode = kX87Movss;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kX87Movsd;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kX87Movsxbl : kX87Movzxbl;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kX87Movsxwl : kX87Movzxwl;
- break;
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord32:
- opcode = kX87Movl;
- break;
- case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
-
- X87OperandGenerator g(this);
- InstructionOperand outputs[1];
- outputs[0] = g.DefineAsRegister(node);
- InstructionOperand inputs[3];
- size_t input_count = 0;
- AddressingMode mode =
- g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
- InstructionCode code = opcode | AddressingModeField::encode(mode);
- Emit(code, 1, outputs, input_count, inputs);
-}
-
-void InstructionSelector::VisitProtectedLoad(Node* node) {
- // TODO(eholk)
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitStore(Node* node) {
- X87OperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
-
- StoreRepresentation store_rep = StoreRepresentationOf(node->op());
- WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
- MachineRepresentation rep = store_rep.representation();
-
- if (write_barrier_kind != kNoWriteBarrier) {
- DCHECK(CanBeTaggedPointer(rep));
- AddressingMode addressing_mode;
- InstructionOperand inputs[3];
- size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(base);
- if (g.CanBeImmediate(index)) {
- inputs[input_count++] = g.UseImmediate(index);
- addressing_mode = kMode_MRI;
- } else {
- inputs[input_count++] = g.UseUniqueRegister(index);
- addressing_mode = kMode_MR1;
- }
- inputs[input_count++] = g.UseUniqueRegister(value);
- RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
- switch (write_barrier_kind) {
- case kNoWriteBarrier:
- UNREACHABLE();
- break;
- case kMapWriteBarrier:
- record_write_mode = RecordWriteMode::kValueIsMap;
- break;
- case kPointerWriteBarrier:
- record_write_mode = RecordWriteMode::kValueIsPointer;
- break;
- case kFullWriteBarrier:
- record_write_mode = RecordWriteMode::kValueIsAny;
- break;
- }
- InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
- size_t const temp_count = arraysize(temps);
- InstructionCode code = kArchStoreWithWriteBarrier;
- code |= AddressingModeField::encode(addressing_mode);
- code |= MiscField::encode(static_cast<int>(record_write_mode));
- Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
- } else {
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kFloat32:
- opcode = kX87Movss;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kX87Movsd;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kWord8:
- opcode = kX87Movb;
- break;
- case MachineRepresentation::kWord16:
- opcode = kX87Movw;
- break;
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord32:
- opcode = kX87Movl;
- break;
- case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
-
- InstructionOperand val;
- if (g.CanBeImmediate(value)) {
- val = g.UseImmediate(value);
- } else if (rep == MachineRepresentation::kWord8 ||
- rep == MachineRepresentation::kBit) {
- val = g.UseByteRegister(value);
- } else {
- val = g.UseRegister(value);
- }
-
- InstructionOperand inputs[4];
- size_t input_count = 0;
- AddressingMode addressing_mode =
- g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
- InstructionCode code =
- opcode | AddressingModeField::encode(addressing_mode);
- inputs[input_count++] = val;
- Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
- inputs);
- }
-}
-
-void InstructionSelector::VisitProtectedStore(Node* node) {
- // TODO(eholk)
- UNIMPLEMENTED();
-}
-
-// Architecture supports unaligned access, therefore VisitLoad is used instead
-void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
-
-// Architecture supports unaligned access, therefore VisitStore is used instead
-void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
-
-void InstructionSelector::VisitCheckedLoad(Node* node) {
- CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
- X87OperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedLoadWord32;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedLoadFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedLoadFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- InstructionOperand offset_operand = g.UseRegister(offset);
- InstructionOperand length_operand =
- g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
- if (g.CanBeImmediate(buffer)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), offset_operand, length_operand,
- offset_operand, g.UseImmediate(buffer));
- } else {
- Emit(opcode | AddressingModeField::encode(kMode_MR1),
- g.DefineAsRegister(node), offset_operand, length_operand,
- g.UseRegister(buffer), offset_operand);
- }
-}
-
-
-void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
- X87OperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- Node* const value = node->InputAt(3);
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kCheckedStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kCheckedStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedStoreWord32;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedStoreFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedStoreFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- InstructionOperand value_operand =
- g.CanBeImmediate(value) ? g.UseImmediate(value)
- : ((rep == MachineRepresentation::kWord8 ||
- rep == MachineRepresentation::kBit)
- ? g.UseByteRegister(value)
- : g.UseRegister(value));
- InstructionOperand offset_operand = g.UseRegister(offset);
- InstructionOperand length_operand =
- g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
- if (g.CanBeImmediate(buffer)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- offset_operand, length_operand, value_operand, offset_operand,
- g.UseImmediate(buffer));
- } else {
- Emit(opcode | AddressingModeField::encode(kMode_MR1), g.NoOutput(),
- offset_operand, length_operand, value_operand, g.UseRegister(buffer),
- offset_operand);
- }
-}
-
-namespace {
-
-// Shared routine for multiple binary operations.
-void VisitBinop(InstructionSelector* selector, Node* node,
- InstructionCode opcode, FlagsContinuation* cont) {
- X87OperandGenerator g(selector);
- Int32BinopMatcher m(node);
- Node* left = m.left().node();
- Node* right = m.right().node();
- InstructionOperand inputs[4];
- size_t input_count = 0;
- InstructionOperand outputs[2];
- size_t output_count = 0;
-
- // TODO(turbofan): match complex addressing modes.
- if (left == right) {
- // If both inputs refer to the same operand, enforce allocating a register
- // for both of them to ensure that we don't end up generating code like
- // this:
- //
- // mov eax, [ebp-0x10]
- // add eax, [ebp-0x10]
- // jo label
- InstructionOperand const input = g.UseRegister(left);
- inputs[input_count++] = input;
- inputs[input_count++] = input;
- } else if (g.CanBeImmediate(right)) {
- inputs[input_count++] = g.UseRegister(left);
- inputs[input_count++] = g.UseImmediate(right);
- } else {
- if (node->op()->HasProperty(Operator::kCommutative) &&
- g.CanBeBetterLeftOperand(right)) {
- std::swap(left, right);
- }
- inputs[input_count++] = g.UseRegister(left);
- inputs[input_count++] = g.Use(right);
- }
-
- if (cont->IsBranch()) {
- inputs[input_count++] = g.Label(cont->true_block());
- inputs[input_count++] = g.Label(cont->false_block());
- }
-
- outputs[output_count++] = g.DefineSameAsFirst(node);
- if (cont->IsSet()) {
- outputs[output_count++] = g.DefineAsRegister(cont->result());
- }
-
- DCHECK_NE(0u, input_count);
- DCHECK_NE(0u, output_count);
- DCHECK_GE(arraysize(inputs), input_count);
- DCHECK_GE(arraysize(outputs), output_count);
-
- opcode = cont->Encode(opcode);
- if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
- } else {
- selector->Emit(opcode, output_count, outputs, input_count, inputs);
- }
-}
-
-
-// Shared routine for multiple binary operations.
-void VisitBinop(InstructionSelector* selector, Node* node,
- InstructionCode opcode) {
- FlagsContinuation cont;
- VisitBinop(selector, node, opcode, &cont);
-}
-
-} // namespace
-
-void InstructionSelector::VisitWord32And(Node* node) {
- VisitBinop(this, node, kX87And);
-}
-
-
-void InstructionSelector::VisitWord32Or(Node* node) {
- VisitBinop(this, node, kX87Or);
-}
-
-
-void InstructionSelector::VisitWord32Xor(Node* node) {
- X87OperandGenerator g(this);
- Int32BinopMatcher m(node);
- if (m.right().Is(-1)) {
- Emit(kX87Not, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
- } else {
- VisitBinop(this, node, kX87Xor);
- }
-}
-
-
-// Shared routine for multiple shift operations.
-static inline void VisitShift(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
- X87OperandGenerator g(selector);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
-
- if (g.CanBeImmediate(right)) {
- selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
- g.UseImmediate(right));
- } else {
- selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
- g.UseFixed(right, ecx));
- }
-}
-
-
-namespace {
-
-void VisitMulHigh(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
- X87OperandGenerator g(selector);
- InstructionOperand temps[] = {g.TempRegister(eax)};
- selector->Emit(
- opcode, g.DefineAsFixed(node, edx), g.UseFixed(node->InputAt(0), eax),
- g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
-}
-
-
-void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
- X87OperandGenerator g(selector);
- InstructionOperand temps[] = {g.TempRegister(edx)};
- selector->Emit(opcode, g.DefineAsFixed(node, eax),
- g.UseFixed(node->InputAt(0), eax),
- g.UseUnique(node->InputAt(1)), arraysize(temps), temps);
-}
-
-
-void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
- X87OperandGenerator g(selector);
- InstructionOperand temps[] = {g.TempRegister(eax)};
- selector->Emit(opcode, g.DefineAsFixed(node, edx),
- g.UseFixed(node->InputAt(0), eax),
- g.UseUnique(node->InputAt(1)), arraysize(temps), temps);
-}
-
-void EmitLea(InstructionSelector* selector, Node* result, Node* index,
- int scale, Node* base, Node* displacement,
- DisplacementMode displacement_mode) {
- X87OperandGenerator g(selector);
- InstructionOperand inputs[4];
- size_t input_count = 0;
- AddressingMode mode =
- g.GenerateMemoryOperandInputs(index, scale, base, displacement,
- displacement_mode, inputs, &input_count);
-
- DCHECK_NE(0u, input_count);
- DCHECK_GE(arraysize(inputs), input_count);
-
- InstructionOperand outputs[1];
- outputs[0] = g.DefineAsRegister(result);
-
- InstructionCode opcode = AddressingModeField::encode(mode) | kX87Lea;
-
- selector->Emit(opcode, 1, outputs, input_count, inputs);
-}
-
-} // namespace
-
-
-void InstructionSelector::VisitWord32Shl(Node* node) {
- Int32ScaleMatcher m(node, true);
- if (m.matches()) {
- Node* index = node->InputAt(0);
- Node* base = m.power_of_two_plus_one() ? index : nullptr;
- EmitLea(this, node, index, m.scale(), base, nullptr, kPositiveDisplacement);
- return;
- }
- VisitShift(this, node, kX87Shl);
-}
-
-
-void InstructionSelector::VisitWord32Shr(Node* node) {
- VisitShift(this, node, kX87Shr);
-}
-
-
-void InstructionSelector::VisitWord32Sar(Node* node) {
- VisitShift(this, node, kX87Sar);
-}
-
-void InstructionSelector::VisitInt32PairAdd(Node* node) {
- X87OperandGenerator g(this);
-
- Node* projection1 = NodeProperties::FindProjection(node, 1);
- if (projection1) {
- // We use UseUniqueRegister here to avoid register sharing with the temp
- // register.
- InstructionOperand inputs[] = {
- g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
- g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
-
- InstructionOperand outputs[] = {g.DefineSameAsFirst(node),
- g.DefineAsRegister(projection1)};
-
- InstructionOperand temps[] = {g.TempRegister()};
-
- Emit(kX87AddPair, 2, outputs, 4, inputs, 1, temps);
- } else {
- // The high word of the result is not used, so we emit the standard 32 bit
- // instruction.
- Emit(kX87Add, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
- g.Use(node->InputAt(2)));
- }
-}
-
-void InstructionSelector::VisitInt32PairSub(Node* node) {
- X87OperandGenerator g(this);
-
- Node* projection1 = NodeProperties::FindProjection(node, 1);
- if (projection1) {
- // We use UseUniqueRegister here to avoid register sharing with the temp
- // register.
- InstructionOperand inputs[] = {
- g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
- g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
-
- InstructionOperand outputs[] = {g.DefineSameAsFirst(node),
- g.DefineAsRegister(projection1)};
-
- InstructionOperand temps[] = {g.TempRegister()};
-
- Emit(kX87SubPair, 2, outputs, 4, inputs, 1, temps);
- } else {
- // The high word of the result is not used, so we emit the standard 32 bit
- // instruction.
- Emit(kX87Sub, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
- g.Use(node->InputAt(2)));
- }
-}
-
-void InstructionSelector::VisitInt32PairMul(Node* node) {
- X87OperandGenerator g(this);
-
- Node* projection1 = NodeProperties::FindProjection(node, 1);
- if (projection1) {
- // InputAt(3) explicitly shares ecx with OutputRegister(1) to save one
- // register and one mov instruction.
- InstructionOperand inputs[] = {g.UseUnique(node->InputAt(0)),
- g.UseUnique(node->InputAt(1)),
- g.UseUniqueRegister(node->InputAt(2)),
- g.UseFixed(node->InputAt(3), ecx)};
-
- InstructionOperand outputs[] = {
- g.DefineAsFixed(node, eax),
- g.DefineAsFixed(NodeProperties::FindProjection(node, 1), ecx)};
-
- InstructionOperand temps[] = {g.TempRegister(edx)};
-
- Emit(kX87MulPair, 2, outputs, 4, inputs, 1, temps);
- } else {
- // The high word of the result is not used, so we emit the standard 32 bit
- // instruction.
- Emit(kX87Imul, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
- g.Use(node->InputAt(2)));
- }
-}
-
-void VisitWord32PairShift(InstructionSelector* selector, InstructionCode opcode,
- Node* node) {
- X87OperandGenerator g(selector);
-
- Node* shift = node->InputAt(2);
- InstructionOperand shift_operand;
- if (g.CanBeImmediate(shift)) {
- shift_operand = g.UseImmediate(shift);
- } else {
- shift_operand = g.UseFixed(shift, ecx);
- }
- InstructionOperand inputs[] = {g.UseFixed(node->InputAt(0), eax),
- g.UseFixed(node->InputAt(1), edx),
- shift_operand};
-
- InstructionOperand outputs[2];
- InstructionOperand temps[1];
- int32_t output_count = 0;
- int32_t temp_count = 0;
- outputs[output_count++] = g.DefineAsFixed(node, eax);
- Node* projection1 = NodeProperties::FindProjection(node, 1);
- if (projection1) {
- outputs[output_count++] = g.DefineAsFixed(projection1, edx);
- } else {
- temps[temp_count++] = g.TempRegister(edx);
- }
-
- selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps);
-}
-
-void InstructionSelector::VisitWord32PairShl(Node* node) {
- VisitWord32PairShift(this, kX87ShlPair, node);
-}
-
-void InstructionSelector::VisitWord32PairShr(Node* node) {
- VisitWord32PairShift(this, kX87ShrPair, node);
-}
-
-void InstructionSelector::VisitWord32PairSar(Node* node) {
- VisitWord32PairShift(this, kX87SarPair, node);
-}
-
-void InstructionSelector::VisitWord32Ror(Node* node) {
- VisitShift(this, node, kX87Ror);
-}
-
-
-void InstructionSelector::VisitWord32Clz(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Lzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
-
-
-void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
-
-void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
-
-void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
-
-void InstructionSelector::VisitWord32Popcnt(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Popcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitInt32Add(Node* node) {
- X87OperandGenerator g(this);
-
- // Try to match the Add to a lea pattern
- BaseWithIndexAndDisplacement32Matcher m(node);
- if (m.matches() &&
- (m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) {
- InstructionOperand inputs[4];
- size_t input_count = 0;
- AddressingMode mode = g.GenerateMemoryOperandInputs(
- m.index(), m.scale(), m.base(), m.displacement(), m.displacement_mode(),
- inputs, &input_count);
-
- DCHECK_NE(0u, input_count);
- DCHECK_GE(arraysize(inputs), input_count);
-
- InstructionOperand outputs[1];
- outputs[0] = g.DefineAsRegister(node);
-
- InstructionCode opcode = AddressingModeField::encode(mode) | kX87Lea;
- Emit(opcode, 1, outputs, input_count, inputs);
- return;
- }
-
- // No lea pattern match, use add
- VisitBinop(this, node, kX87Add);
-}
-
-
-void InstructionSelector::VisitInt32Sub(Node* node) {
- X87OperandGenerator g(this);
- Int32BinopMatcher m(node);
- if (m.left().Is(0)) {
- Emit(kX87Neg, g.DefineSameAsFirst(node), g.Use(m.right().node()));
- } else {
- VisitBinop(this, node, kX87Sub);
- }
-}
-
-
-void InstructionSelector::VisitInt32Mul(Node* node) {
- Int32ScaleMatcher m(node, true);
- if (m.matches()) {
- Node* index = node->InputAt(0);
- Node* base = m.power_of_two_plus_one() ? index : nullptr;
- EmitLea(this, node, index, m.scale(), base, nullptr, kPositiveDisplacement);
- return;
- }
- X87OperandGenerator g(this);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
- if (g.CanBeImmediate(right)) {
- Emit(kX87Imul, g.DefineAsRegister(node), g.Use(left),
- g.UseImmediate(right));
- } else {
- if (g.CanBeBetterLeftOperand(right)) {
- std::swap(left, right);
- }
- Emit(kX87Imul, g.DefineSameAsFirst(node), g.UseRegister(left),
- g.Use(right));
- }
-}
-
-
-void InstructionSelector::VisitInt32MulHigh(Node* node) {
- VisitMulHigh(this, node, kX87ImulHigh);
-}
-
-
-void InstructionSelector::VisitUint32MulHigh(Node* node) {
- VisitMulHigh(this, node, kX87UmulHigh);
-}
-
-
-void InstructionSelector::VisitInt32Div(Node* node) {
- VisitDiv(this, node, kX87Idiv);
-}
-
-
-void InstructionSelector::VisitUint32Div(Node* node) {
- VisitDiv(this, node, kX87Udiv);
-}
-
-
-void InstructionSelector::VisitInt32Mod(Node* node) {
- VisitMod(this, node, kX87Idiv);
-}
-
-
-void InstructionSelector::VisitUint32Mod(Node* node) {
- VisitMod(this, node, kX87Udiv);
-}
-
-
-void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float32ToFloat64, g.DefineAsFixed(node, stX_0),
- g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Int32ToFloat32, g.DefineAsFixed(node, stX_0),
- g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Uint32ToFloat32, g.DefineAsFixed(node, stX_0),
- g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Int32ToFloat64, g.DefineAsFixed(node, stX_0),
- g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Uint32ToFloat64, g.DefineAsFixed(node, stX_0),
- g.UseRegister(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float32ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float32ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float64ToFloat32, g.DefineAsFixed(node, stX_0),
- g.Use(node->InputAt(0)));
-}
-
-void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
- X87OperandGenerator g(this);
- Emit(kArchTruncateDoubleToI, g.DefineAsRegister(node),
- g.Use(node->InputAt(0)));
-}
-
-void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87BitcastFI, g.DefineAsRegister(node), 0, nullptr);
-}
-
-
-void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87BitcastIF, g.DefineAsFixed(node, stX_0), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitFloat32Add(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float32Add, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-
-void InstructionSelector::VisitFloat64Add(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float64Add, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-
-void InstructionSelector::VisitFloat32Sub(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float32Sub, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-void InstructionSelector::VisitFloat64Sub(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float64Sub, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-void InstructionSelector::VisitFloat32Mul(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float32Mul, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-
-void InstructionSelector::VisitFloat64Mul(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float64Mul, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-
-void InstructionSelector::VisitFloat32Div(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float32Div, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-
-void InstructionSelector::VisitFloat64Div(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float64Div, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-
-void InstructionSelector::VisitFloat64Mod(Node* node) {
- X87OperandGenerator g(this);
- InstructionOperand temps[] = {g.TempRegister(eax)};
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float64Mod, g.DefineAsFixed(node, stX_0), 1, temps)->MarkAsCall();
-}
-
-void InstructionSelector::VisitFloat32Max(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float32Max, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-void InstructionSelector::VisitFloat64Max(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float64Max, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-void InstructionSelector::VisitFloat32Min(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float32Min, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-void InstructionSelector::VisitFloat64Min(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float64Min, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-
-void InstructionSelector::VisitFloat32Abs(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87Float32Abs, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-
-void InstructionSelector::VisitFloat64Abs(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87Float64Abs, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-void InstructionSelector::VisitFloat32Sqrt(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87Float32Sqrt, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-
-void InstructionSelector::VisitFloat64Sqrt(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87Float64Sqrt, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-
-void InstructionSelector::VisitFloat32RoundDown(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float32Round | MiscField::encode(kRoundDown),
- g.UseFixed(node, stX_0), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitFloat64RoundDown(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float64Round | MiscField::encode(kRoundDown),
- g.UseFixed(node, stX_0), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitFloat32RoundUp(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float32Round | MiscField::encode(kRoundUp), g.UseFixed(node, stX_0),
- g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitFloat64RoundUp(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float64Round | MiscField::encode(kRoundUp), g.UseFixed(node, stX_0),
- g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float32Round | MiscField::encode(kRoundToZero),
- g.UseFixed(node, stX_0), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float64Round | MiscField::encode(kRoundToZero),
- g.UseFixed(node, stX_0), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
- UNREACHABLE();
-}
-
-
-void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float32Round | MiscField::encode(kRoundToNearest),
- g.UseFixed(node, stX_0), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float64Round | MiscField::encode(kRoundToNearest),
- g.UseFixed(node, stX_0), g.Use(node->InputAt(0)));
-}
-
-void InstructionSelector::VisitFloat32Neg(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87Float32Neg, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-void InstructionSelector::VisitFloat64Neg(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87Float64Neg, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
- InstructionCode opcode) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(opcode, g.DefineAsFixed(node, stX_0), 0, nullptr)->MarkAsCall();
-}
-
-void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
- InstructionCode opcode) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(opcode, g.DefineAsFixed(node, stX_0), 0, nullptr)->MarkAsCall();
-}
-
-void InstructionSelector::EmitPrepareArguments(
- ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
- Node* node) {
- X87OperandGenerator g(this);
-
- // Prepare for C function call.
- if (descriptor->IsCFunctionCall()) {
- InstructionOperand temps[] = {g.TempRegister()};
- size_t const temp_count = arraysize(temps);
- Emit(kArchPrepareCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
- 0, nullptr, 0, nullptr, temp_count, temps);
-
- // Poke any stack arguments.
- for (size_t n = 0; n < arguments->size(); ++n) {
- PushParameter input = (*arguments)[n];
- if (input.node()) {
- int const slot = static_cast<int>(n);
- InstructionOperand value = g.CanBeImmediate(input.node())
- ? g.UseImmediate(input.node())
- : g.UseRegister(input.node());
- Emit(kX87Poke | MiscField::encode(slot), g.NoOutput(), value);
- }
- }
- } else {
- // Push any stack arguments.
- for (PushParameter input : base::Reversed(*arguments)) {
- // TODO(titzer): handle pushing double parameters.
- if (input.node() == nullptr) continue;
- InstructionOperand value =
- g.CanBeImmediate(input.node())
- ? g.UseImmediate(input.node())
- : IsSupported(ATOM) ||
- sequence()->IsFP(GetVirtualRegister(input.node()))
- ? g.UseRegister(input.node())
- : g.Use(input.node());
- Emit(kX87Push, g.NoOutput(), value);
- }
- }
-}
-
-
-bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
-
-int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 0; }
-
-namespace {
-
-void VisitCompareWithMemoryOperand(InstructionSelector* selector,
- InstructionCode opcode, Node* left,
- InstructionOperand right,
- FlagsContinuation* cont) {
- DCHECK(left->opcode() == IrOpcode::kLoad);
- X87OperandGenerator g(selector);
- size_t input_count = 0;
- InstructionOperand inputs[6];
- AddressingMode addressing_mode =
- g.GetEffectiveAddressMemoryOperand(left, inputs, &input_count);
- opcode |= AddressingModeField::encode(addressing_mode);
- opcode = cont->Encode(opcode);
- inputs[input_count++] = right;
-
- if (cont->IsBranch()) {
- inputs[input_count++] = g.Label(cont->true_block());
- inputs[input_count++] = g.Label(cont->false_block());
- selector->Emit(opcode, 0, nullptr, input_count, inputs);
- } else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
- } else if (cont->IsSet()) {
- InstructionOperand output = g.DefineAsRegister(cont->result());
- selector->Emit(opcode, 1, &output, input_count, inputs);
- } else {
- DCHECK(cont->IsTrap());
- inputs[input_count++] = g.UseImmediate(cont->trap_id());
- selector->Emit(opcode, 0, nullptr, input_count, inputs);
- }
-}
-
-// Shared routine for multiple compare operations.
-void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
- InstructionOperand left, InstructionOperand right,
- FlagsContinuation* cont) {
- X87OperandGenerator g(selector);
- opcode = cont->Encode(opcode);
- if (cont->IsBranch()) {
- selector->Emit(opcode, g.NoOutput(), left, right,
- g.Label(cont->true_block()), g.Label(cont->false_block()));
- } else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->frame_state());
- } else if (cont->IsSet()) {
- selector->Emit(opcode, g.DefineAsByteRegister(cont->result()), left, right);
- } else {
- DCHECK(cont->IsTrap());
- selector->Emit(opcode, g.NoOutput(), left, right,
- g.UseImmediate(cont->trap_id()));
- }
-}
-
-
-// Shared routine for multiple compare operations.
-void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
- Node* left, Node* right, FlagsContinuation* cont,
- bool commutative) {
- X87OperandGenerator g(selector);
- if (commutative && g.CanBeBetterLeftOperand(right)) {
- std::swap(left, right);
- }
- VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
-}
-
-MachineType MachineTypeForNarrow(Node* node, Node* hint_node) {
- if (hint_node->opcode() == IrOpcode::kLoad) {
- MachineType hint = LoadRepresentationOf(hint_node->op());
- if (node->opcode() == IrOpcode::kInt32Constant ||
- node->opcode() == IrOpcode::kInt64Constant) {
- int64_t constant = node->opcode() == IrOpcode::kInt32Constant
- ? OpParameter<int32_t>(node)
- : OpParameter<int64_t>(node);
- if (hint == MachineType::Int8()) {
- if (constant >= std::numeric_limits<int8_t>::min() &&
- constant <= std::numeric_limits<int8_t>::max()) {
- return hint;
- }
- } else if (hint == MachineType::Uint8()) {
- if (constant >= std::numeric_limits<uint8_t>::min() &&
- constant <= std::numeric_limits<uint8_t>::max()) {
- return hint;
- }
- } else if (hint == MachineType::Int16()) {
- if (constant >= std::numeric_limits<int16_t>::min() &&
- constant <= std::numeric_limits<int16_t>::max()) {
- return hint;
- }
- } else if (hint == MachineType::Uint16()) {
- if (constant >= std::numeric_limits<uint16_t>::min() &&
- constant <= std::numeric_limits<uint16_t>::max()) {
- return hint;
- }
- } else if (hint == MachineType::Int32()) {
- return hint;
- } else if (hint == MachineType::Uint32()) {
- if (constant >= 0) return hint;
- }
- }
- }
- return node->opcode() == IrOpcode::kLoad ? LoadRepresentationOf(node->op())
- : MachineType::None();
-}
-
-// Tries to match the size of the given opcode to that of the operands, if
-// possible.
-InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
- Node* right, FlagsContinuation* cont) {
- // TODO(epertoso): we can probably get some size information out of phi nodes.
- // If the load representations don't match, both operands will be
- // zero/sign-extended to 32bit.
- MachineType left_type = MachineTypeForNarrow(left, right);
- MachineType right_type = MachineTypeForNarrow(right, left);
- if (left_type == right_type) {
- switch (left_type.representation()) {
- case MachineRepresentation::kBit:
- case MachineRepresentation::kWord8: {
- if (opcode == kX87Test) return kX87Test8;
- if (opcode == kX87Cmp) {
- if (left_type.semantic() == MachineSemantic::kUint32) {
- cont->OverwriteUnsignedIfSigned();
- } else {
- CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
- }
- return kX87Cmp8;
- }
- break;
- }
- case MachineRepresentation::kWord16:
- if (opcode == kX87Test) return kX87Test16;
- if (opcode == kX87Cmp) {
- if (left_type.semantic() == MachineSemantic::kUint32) {
- cont->OverwriteUnsignedIfSigned();
- } else {
- CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
- }
- return kX87Cmp16;
- }
- break;
- default:
- break;
- }
- }
- return opcode;
-}
-
-// Shared routine for multiple float32 compare operations (inputs commuted).
-void VisitFloat32Compare(InstructionSelector* selector, Node* node,
- FlagsContinuation* cont) {
- X87OperandGenerator g(selector);
- selector->Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
- selector->Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
- if (cont->IsBranch()) {
- selector->Emit(cont->Encode(kX87Float32Cmp), g.NoOutput(),
- g.Label(cont->true_block()), g.Label(cont->false_block()));
- } else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(cont->Encode(kX87Float32Cmp), g.NoOutput(),
- g.Use(node->InputAt(0)), g.Use(node->InputAt(1)),
- cont->kind(), cont->reason(), cont->frame_state());
- } else if (cont->IsSet()) {
- selector->Emit(cont->Encode(kX87Float32Cmp),
- g.DefineAsByteRegister(cont->result()));
- } else {
- DCHECK(cont->IsTrap());
- selector->Emit(cont->Encode(kX87Float32Cmp), g.NoOutput(),
- g.UseImmediate(cont->trap_id()));
- }
-}
-
-
-// Shared routine for multiple float64 compare operations (inputs commuted).
-void VisitFloat64Compare(InstructionSelector* selector, Node* node,
- FlagsContinuation* cont) {
- X87OperandGenerator g(selector);
- selector->Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
- selector->Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
- if (cont->IsBranch()) {
- selector->Emit(cont->Encode(kX87Float64Cmp), g.NoOutput(),
- g.Label(cont->true_block()), g.Label(cont->false_block()));
- } else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(cont->Encode(kX87Float64Cmp), g.NoOutput(),
- g.Use(node->InputAt(0)), g.Use(node->InputAt(1)),
- cont->kind(), cont->reason(), cont->frame_state());
- } else if (cont->IsSet()) {
- selector->Emit(cont->Encode(kX87Float64Cmp),
- g.DefineAsByteRegister(cont->result()));
- } else {
- DCHECK(cont->IsTrap());
- selector->Emit(cont->Encode(kX87Float64Cmp), g.NoOutput(),
- g.UseImmediate(cont->trap_id()));
- }
-}
-
-// Shared routine for multiple word compare operations.
-void VisitWordCompare(InstructionSelector* selector, Node* node,
- InstructionCode opcode, FlagsContinuation* cont) {
- X87OperandGenerator g(selector);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
-
- InstructionCode narrowed_opcode =
- TryNarrowOpcodeSize(opcode, left, right, cont);
-
- int effect_level = selector->GetEffectLevel(node);
- if (cont->IsBranch()) {
- effect_level = selector->GetEffectLevel(
- cont->true_block()->PredecessorAt(0)->control_input());
- }
-
- // If one of the two inputs is an immediate, make sure it's on the right, or
- // if one of the two inputs is a memory operand, make sure it's on the left.
- if ((!g.CanBeImmediate(right) && g.CanBeImmediate(left)) ||
- (g.CanBeMemoryOperand(narrowed_opcode, node, right, effect_level) &&
- !g.CanBeMemoryOperand(narrowed_opcode, node, left, effect_level))) {
- if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
- std::swap(left, right);
- }
-
- // Match immediates on right side of comparison.
- if (g.CanBeImmediate(right)) {
- if (g.CanBeMemoryOperand(narrowed_opcode, node, left, effect_level)) {
- return VisitCompareWithMemoryOperand(selector, narrowed_opcode, left,
- g.UseImmediate(right), cont);
- }
- return VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right),
- cont);
- }
-
- // Match memory operands on left side of comparison.
- if (g.CanBeMemoryOperand(narrowed_opcode, node, left, effect_level)) {
- bool needs_byte_register =
- narrowed_opcode == kX87Test8 || narrowed_opcode == kX87Cmp8;
- return VisitCompareWithMemoryOperand(
- selector, narrowed_opcode, left,
- needs_byte_register ? g.UseByteRegister(right) : g.UseRegister(right),
- cont);
- }
-
- if (g.CanBeBetterLeftOperand(right)) {
- if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
- std::swap(left, right);
- }
-
- return VisitCompare(selector, opcode, left, right, cont,
- node->op()->HasProperty(Operator::kCommutative));
-}
-
-void VisitWordCompare(InstructionSelector* selector, Node* node,
- FlagsContinuation* cont) {
- X87OperandGenerator g(selector);
- Int32BinopMatcher m(node);
- if (m.left().IsLoad() && m.right().IsLoadStackPointer()) {
- LoadMatcher<ExternalReferenceMatcher> mleft(m.left().node());
- ExternalReference js_stack_limit =
- ExternalReference::address_of_stack_limit(selector->isolate());
- if (mleft.object().Is(js_stack_limit) && mleft.index().Is(0)) {
- // Compare(Load(js_stack_limit), LoadStackPointer)
- if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
- InstructionCode opcode = cont->Encode(kX87StackCheck);
- if (cont->IsBranch()) {
- selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
- g.Label(cont->false_block()));
- } else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->kind(),
- cont->reason(), cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
- selector->Emit(opcode, g.DefineAsRegister(cont->result()));
- }
- return;
- }
- }
- VisitWordCompare(selector, node, kX87Cmp, cont);
-}
-
-
-// Shared routine for word comparison with zero.
-void VisitWordCompareZero(InstructionSelector* selector, Node* user,
- Node* value, FlagsContinuation* cont) {
- // Try to combine with comparisons against 0 by simply inverting the branch.
- while (value->opcode() == IrOpcode::kWord32Equal &&
- selector->CanCover(user, value)) {
- Int32BinopMatcher m(value);
- if (!m.right().Is(0)) break;
-
- user = value;
- value = m.left().node();
- cont->Negate();
- }
-
- if (selector->CanCover(user, value)) {
- switch (value->opcode()) {
- case IrOpcode::kWord32Equal:
- cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitWordCompare(selector, value, cont);
- case IrOpcode::kInt32LessThan:
- cont->OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWordCompare(selector, value, cont);
- case IrOpcode::kInt32LessThanOrEqual:
- cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWordCompare(selector, value, cont);
- case IrOpcode::kUint32LessThan:
- cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWordCompare(selector, value, cont);
- case IrOpcode::kUint32LessThanOrEqual:
- cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWordCompare(selector, value, cont);
- case IrOpcode::kFloat32Equal:
- cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
- return VisitFloat32Compare(selector, value, cont);
- case IrOpcode::kFloat32LessThan:
- cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
- return VisitFloat32Compare(selector, value, cont);
- case IrOpcode::kFloat32LessThanOrEqual:
- cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
- return VisitFloat32Compare(selector, value, cont);
- case IrOpcode::kFloat64Equal:
- cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
- return VisitFloat64Compare(selector, value, cont);
- case IrOpcode::kFloat64LessThan:
- cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
- return VisitFloat64Compare(selector, value, cont);
- case IrOpcode::kFloat64LessThanOrEqual:
- cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
- return VisitFloat64Compare(selector, value, cont);
- case IrOpcode::kProjection:
- // Check if this is the overflow output projection of an
- // <Operation>WithOverflow node.
- if (ProjectionIndexOf(value->op()) == 1u) {
- // We cannot combine the <Operation>WithOverflow with this branch
- // unless the 0th projection (the use of the actual value of the
- // <Operation> is either nullptr, which means there's no use of the
- // actual value, or was already defined, which means it is scheduled
- // *AFTER* this branch).
- Node* const node = value->InputAt(0);
- Node* const result = NodeProperties::FindProjection(node, 0);
- if (result == nullptr || selector->IsDefined(result)) {
- switch (node->opcode()) {
- case IrOpcode::kInt32AddWithOverflow:
- cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kX87Add, cont);
- case IrOpcode::kInt32SubWithOverflow:
- cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kX87Sub, cont);
- case IrOpcode::kInt32MulWithOverflow:
- cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kX87Imul, cont);
- default:
- break;
- }
- }
- }
- break;
- case IrOpcode::kInt32Sub:
- return VisitWordCompare(selector, value, cont);
- case IrOpcode::kWord32And:
- return VisitWordCompare(selector, value, kX87Test, cont);
- default:
- break;
- }
- }
-
- // Continuation could not be combined with a compare, emit compare against 0.
- X87OperandGenerator g(selector);
- VisitCompare(selector, kX87Cmp, g.Use(value), g.TempImmediate(0), cont);
-}
-
-} // namespace
-
-
-void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
- BasicBlock* fbranch) {
- FlagsContinuation cont(kNotEqual, tbranch, fbranch);
- VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeIf(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapUnless(Node* node,
- Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
- X87OperandGenerator g(this);
- InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
-
- // Emit either ArchTableSwitch or ArchLookupSwitch.
- size_t table_space_cost = 4 + sw.value_range;
- size_t table_time_cost = 3;
- size_t lookup_space_cost = 3 + 2 * sw.case_count;
- size_t lookup_time_cost = sw.case_count;
- if (sw.case_count > 4 &&
- table_space_cost + 3 * table_time_cost <=
- lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min()) {
- InstructionOperand index_operand = value_operand;
- if (sw.min_value) {
- index_operand = g.TempRegister();
- Emit(kX87Lea | AddressingModeField::encode(kMode_MRI), index_operand,
- value_operand, g.TempImmediate(-sw.min_value));
- }
- // Generate a table lookup.
- return EmitTableSwitch(sw, index_operand);
- }
-
- // Generate a sequence of conditional jumps.
- return EmitLookupSwitch(sw, value_operand);
-}
-
-
-void InstructionSelector::VisitWord32Equal(Node* const node) {
- FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
- Int32BinopMatcher m(node);
- if (m.right().Is(0)) {
- return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
- }
- VisitWordCompare(this, node, &cont);
-}
-
-
-void InstructionSelector::VisitInt32LessThan(Node* node) {
- FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
- VisitWordCompare(this, node, &cont);
-}
-
-
-void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
- FlagsContinuation cont =
- FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
- VisitWordCompare(this, node, &cont);
-}
-
-
-void InstructionSelector::VisitUint32LessThan(Node* node) {
- FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
- VisitWordCompare(this, node, &cont);
-}
-
-
-void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
- FlagsContinuation cont =
- FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
- VisitWordCompare(this, node, &cont);
-}
-
-
-void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
- if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
- return VisitBinop(this, node, kX87Add, &cont);
- }
- FlagsContinuation cont;
- VisitBinop(this, node, kX87Add, &cont);
-}
-
-
-void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
- if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
- return VisitBinop(this, node, kX87Sub, &cont);
- }
- FlagsContinuation cont;
- VisitBinop(this, node, kX87Sub, &cont);
-}
-
-void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
- if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
- return VisitBinop(this, node, kX87Imul, &cont);
- }
- FlagsContinuation cont;
- VisitBinop(this, node, kX87Imul, &cont);
-}
-
-void InstructionSelector::VisitFloat32Equal(Node* node) {
- FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
- VisitFloat32Compare(this, node, &cont);
-}
-
-
-void InstructionSelector::VisitFloat32LessThan(Node* node) {
- FlagsContinuation cont =
- FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
- VisitFloat32Compare(this, node, &cont);
-}
-
-
-void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
- FlagsContinuation cont =
- FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
- VisitFloat32Compare(this, node, &cont);
-}
-
-
-void InstructionSelector::VisitFloat64Equal(Node* node) {
- FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
- VisitFloat64Compare(this, node, &cont);
-}
-
-
-void InstructionSelector::VisitFloat64LessThan(Node* node) {
- FlagsContinuation cont =
- FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
- VisitFloat64Compare(this, node, &cont);
-}
-
-
-void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
- FlagsContinuation cont =
- FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
- VisitFloat64Compare(this, node, &cont);
-}
-
-
-void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float64ExtractLowWord32, g.DefineAsRegister(node),
- g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float64ExtractHighWord32, g.DefineAsRegister(node),
- g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
- X87OperandGenerator g(this);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
- Emit(kX87Float64InsertLowWord32, g.UseFixed(node, stX_0), g.UseRegister(left),
- g.UseRegister(right));
-}
-
-
-void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
- X87OperandGenerator g(this);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
- Emit(kX87Float64InsertHighWord32, g.UseFixed(node, stX_0),
- g.UseRegister(left), g.UseRegister(right));
-}
-
-void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87Float64SilenceNaN, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-void InstructionSelector::VisitAtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
- load_rep.representation() == MachineRepresentation::kWord16 ||
- load_rep.representation() == MachineRepresentation::kWord32);
- USE(load_rep);
- VisitLoad(node);
-}
-
-void InstructionSelector::VisitAtomicStore(Node* node) {
- X87OperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
-
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kX87Xchgb;
- break;
- case MachineRepresentation::kWord16:
- opcode = kX87Xchgw;
- break;
- case MachineRepresentation::kWord32:
- opcode = kX87Xchgl;
- break;
- default:
- UNREACHABLE();
- break;
- }
- AddressingMode addressing_mode;
- InstructionOperand inputs[4];
- size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(base);
- if (g.CanBeImmediate(index)) {
- inputs[input_count++] = g.UseImmediate(index);
- addressing_mode = kMode_MRI;
- } else {
- inputs[input_count++] = g.UseUniqueRegister(index);
- addressing_mode = kMode_MR1;
- }
- inputs[input_count++] = g.UseUniqueRegister(value);
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 0, nullptr, input_count, inputs);
-}
-
-void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
- UNREACHABLE();
-}
-
-void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
- UNREACHABLE();
-}
-
-// static
-MachineOperatorBuilder::Flags
-InstructionSelector::SupportedMachineOperatorFlags() {
- MachineOperatorBuilder::Flags flags =
- MachineOperatorBuilder::kWord32ShiftIsSafe;
- if (CpuFeatures::IsSupported(POPCNT)) {
- flags |= MachineOperatorBuilder::kWord32Popcnt;
- }
-
- flags |= MachineOperatorBuilder::kFloat32RoundDown |
- MachineOperatorBuilder::kFloat64RoundDown |
- MachineOperatorBuilder::kFloat32RoundUp |
- MachineOperatorBuilder::kFloat64RoundUp |
- MachineOperatorBuilder::kFloat32RoundTruncate |
- MachineOperatorBuilder::kFloat64RoundTruncate |
- MachineOperatorBuilder::kFloat32RoundTiesEven |
- MachineOperatorBuilder::kFloat64RoundTiesEven;
- return flags;
-}
-
-// static
-MachineOperatorBuilder::AlignmentRequirements
-InstructionSelector::AlignmentRequirements() {
- return MachineOperatorBuilder::AlignmentRequirements::
- FullUnalignedAccessSupport();
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/contexts-inl.h b/deps/v8/src/contexts-inl.h
index 8eb2750e1a..6ad2abea5f 100644
--- a/deps/v8/src/contexts-inl.h
+++ b/deps/v8/src/contexts-inl.h
@@ -11,6 +11,7 @@
#include "src/objects/dictionary.h"
#include "src/objects/map-inl.h"
#include "src/objects/regexp-match-info.h"
+#include "src/objects/shared-function-info-inl.h"
namespace v8 {
namespace internal {
@@ -22,11 +23,7 @@ ScriptContextTable* ScriptContextTable::cast(Object* context) {
return reinterpret_cast<ScriptContextTable*>(context);
}
-
-int ScriptContextTable::used() const {
- return Smi::cast(get(kUsedSlot))->value();
-}
-
+int ScriptContextTable::used() const { return Smi::ToInt(get(kUsedSlot)); }
void ScriptContextTable::set_used(int used) {
set(kUsedSlot, Smi::FromInt(used));
@@ -132,10 +129,6 @@ bool Context::IsScriptContext() {
return map == map->GetHeap()->script_context_map();
}
-bool Context::OSROptimizedCodeCacheIsCleared() {
- return osr_code_table() == GetHeap()->empty_fixed_array();
-}
-
bool Context::HasSameSecurityTokenAs(Context* that) {
return this->native_context()->security_token() ==
that->native_context()->security_token();
@@ -158,6 +151,72 @@ bool Context::HasSameSecurityTokenAs(Context* that) {
NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSORS)
#undef NATIVE_CONTEXT_FIELD_ACCESSORS
+#define CHECK_FOLLOWS2(v1, v2) STATIC_ASSERT((v1 + 1) == (v2))
+#define CHECK_FOLLOWS4(v1, v2, v3, v4) \
+ CHECK_FOLLOWS2(v1, v2); \
+ CHECK_FOLLOWS2(v2, v3); \
+ CHECK_FOLLOWS2(v3, v4)
+
+int Context::FunctionMapIndex(LanguageMode language_mode, FunctionKind kind,
+ bool has_shared_name, bool needs_home_object) {
+ if (IsClassConstructor(kind)) {
+ // Like the strict function map, but with no 'name' accessor. 'name'
+ // needs to be the last property and it is added during instantiation,
+ // in case a static property with the same name exists"
+ return CLASS_FUNCTION_MAP_INDEX;
+ }
+
+ int base = 0;
+ if (IsGeneratorFunction(kind)) {
+ CHECK_FOLLOWS4(GENERATOR_FUNCTION_MAP_INDEX,
+ GENERATOR_FUNCTION_WITH_NAME_MAP_INDEX,
+ GENERATOR_FUNCTION_WITH_HOME_OBJECT_MAP_INDEX,
+ GENERATOR_FUNCTION_WITH_NAME_AND_HOME_OBJECT_MAP_INDEX);
+ CHECK_FOLLOWS4(
+ ASYNC_GENERATOR_FUNCTION_MAP_INDEX,
+ ASYNC_GENERATOR_FUNCTION_WITH_NAME_MAP_INDEX,
+ ASYNC_GENERATOR_FUNCTION_WITH_HOME_OBJECT_MAP_INDEX,
+ ASYNC_GENERATOR_FUNCTION_WITH_NAME_AND_HOME_OBJECT_MAP_INDEX);
+
+ base = IsAsyncFunction(kind) ? ASYNC_GENERATOR_FUNCTION_MAP_INDEX
+ : GENERATOR_FUNCTION_MAP_INDEX;
+
+ } else if (IsAsyncFunction(kind)) {
+ CHECK_FOLLOWS4(ASYNC_FUNCTION_MAP_INDEX, ASYNC_FUNCTION_WITH_NAME_MAP_INDEX,
+ ASYNC_FUNCTION_WITH_HOME_OBJECT_MAP_INDEX,
+ ASYNC_FUNCTION_WITH_NAME_AND_HOME_OBJECT_MAP_INDEX);
+
+ base = ASYNC_FUNCTION_MAP_INDEX;
+
+ } else if (IsArrowFunction(kind) || IsConciseMethod(kind) ||
+ IsAccessorFunction(kind)) {
+ DCHECK_IMPLIES(IsArrowFunction(kind), !needs_home_object);
+ CHECK_FOLLOWS4(STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
+ METHOD_WITH_NAME_MAP_INDEX,
+ METHOD_WITH_HOME_OBJECT_MAP_INDEX,
+ METHOD_WITH_NAME_AND_HOME_OBJECT_MAP_INDEX);
+
+ base = STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX;
+
+ } else {
+ DCHECK(!needs_home_object);
+ CHECK_FOLLOWS2(SLOPPY_FUNCTION_MAP_INDEX,
+ SLOPPY_FUNCTION_WITH_NAME_MAP_INDEX);
+ CHECK_FOLLOWS2(STRICT_FUNCTION_MAP_INDEX,
+ STRICT_FUNCTION_WITH_NAME_MAP_INDEX);
+
+ base = is_strict(language_mode) ? STRICT_FUNCTION_MAP_INDEX
+ : SLOPPY_FUNCTION_MAP_INDEX;
+ }
+ int offset = static_cast<int>(!has_shared_name) |
+ (static_cast<int>(needs_home_object) << 1);
+ DCHECK_EQ(0, offset & ~3);
+
+ return base + offset;
+}
+
+#undef CHECK_FOLLOWS2
+#undef CHECK_FOLLOWS4
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index a4795af0f2..725e55e72f 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -4,6 +4,7 @@
#include "src/contexts.h"
+#include "src/ast/modules.h"
#include "src/bootstrapper.h"
#include "src/debug/debug.h"
#include "src/isolate-inl.h"
@@ -199,7 +200,6 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
int* index, PropertyAttributes* attributes,
InitializationFlag* init_flag,
VariableMode* variable_mode) {
- DCHECK(!IsModuleContext());
Isolate* isolate = GetIsolate();
Handle<Context> context(this, isolate);
@@ -305,7 +305,8 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
// 2. Check the context proper if it has slots.
if (context->IsFunctionContext() || context->IsBlockContext() ||
- context->IsScriptContext() || context->IsEvalContext()) {
+ context->IsScriptContext() || context->IsEvalContext() ||
+ context->IsModuleContext()) {
// Use serialized scope information of functions and blocks to search
// for the context index.
Handle<ScopeInfo> scope_info(context->scope_info());
@@ -346,6 +347,27 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
}
}
+ // Lookup variable in module imports and exports.
+ if (context->IsModuleContext()) {
+ VariableMode mode;
+ InitializationFlag flag;
+ MaybeAssignedFlag maybe_assigned_flag;
+ int cell_index =
+ scope_info->ModuleIndex(name, &mode, &flag, &maybe_assigned_flag);
+ if (cell_index != 0) {
+ if (FLAG_trace_contexts) {
+ PrintF("=> found in module imports or exports\n");
+ }
+ *index = cell_index;
+ *variable_mode = mode;
+ *init_flag = flag;
+ *attributes = ModuleDescriptor::GetCellIndexKind(cell_index) ==
+ ModuleDescriptor::kExport
+ ? GetAttributesForMode(mode)
+ : READ_ONLY;
+ return handle(context->module(), isolate);
+ }
+ }
} else if (context->IsCatchContext()) {
// Catch contexts have the variable name in the extension slot.
if (String::Equals(name, handle(context->catch_name()))) {
@@ -398,9 +420,11 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
do {
context = Handle<Context>(context->previous(), isolate);
// If we come across a whitelist context, and the name is not
- // whitelisted, then only consider with, script or native contexts.
+ // whitelisted, then only consider with, script, module or native
+ // contexts.
} while (failed_whitelist && !context->IsScriptContext() &&
- !context->IsNativeContext() && !context->IsWithContext());
+ !context->IsNativeContext() && !context->IsWithContext() &&
+ !context->IsModuleContext());
}
} while (follow_context_chain);
@@ -410,171 +434,10 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
return Handle<Object>::null();
}
-static const int kSharedOffset = 0;
-static const int kCachedCodeOffset = 1;
-static const int kOsrAstIdOffset = 2;
-static const int kEntryLength = 3;
-static const int kInitialLength = kEntryLength;
-
-int Context::SearchOSROptimizedCodeCacheEntry(SharedFunctionInfo* shared,
- BailoutId osr_ast_id) {
- DisallowHeapAllocation no_gc;
- DCHECK(this->IsNativeContext());
- DCHECK(!osr_ast_id.IsNone());
- if (!OSROptimizedCodeCacheIsCleared()) {
- FixedArray* osr_code_table = this->osr_code_table();
- int length = osr_code_table->length();
- Smi* osr_ast_id_smi = Smi::FromInt(osr_ast_id.ToInt());
- for (int i = 0; i < length; i += kEntryLength) {
- if (WeakCell::cast(osr_code_table->get(i + kSharedOffset))->value() ==
- shared &&
- osr_code_table->get(i + kOsrAstIdOffset) == osr_ast_id_smi) {
- return i;
- }
- }
- }
- return -1;
-}
-
-Code* Context::SearchOSROptimizedCodeCache(SharedFunctionInfo* shared,
- BailoutId osr_ast_id) {
- DCHECK(this->IsNativeContext());
- int entry = SearchOSROptimizedCodeCacheEntry(shared, osr_ast_id);
- if (entry != -1) {
- FixedArray* code_map = osr_code_table();
- DCHECK_LE(entry + kEntryLength, code_map->length());
- WeakCell* cell = WeakCell::cast(code_map->get(entry + kCachedCodeOffset));
- return cell->cleared() ? nullptr : Code::cast(cell->value());
- }
- return nullptr;
-}
-
-void Context::AddToOSROptimizedCodeCache(Handle<Context> native_context,
- Handle<SharedFunctionInfo> shared,
- Handle<Code> code,
- BailoutId osr_ast_id) {
- DCHECK(native_context->IsNativeContext());
- DCHECK(!osr_ast_id.IsNone());
- DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
- Isolate* isolate = native_context->GetIsolate();
- if (isolate->serializer_enabled()) return;
-
- STATIC_ASSERT(kEntryLength == 3);
- Handle<FixedArray> new_code_map;
- int entry;
-
- if (native_context->OSROptimizedCodeCacheIsCleared()) {
- new_code_map = isolate->factory()->NewFixedArray(kInitialLength, TENURED);
- entry = 0;
- } else {
- Handle<FixedArray> old_code_map(native_context->osr_code_table(), isolate);
- entry =
- native_context->SearchOSROptimizedCodeCacheEntry(*shared, osr_ast_id);
- if (entry >= 0) {
- // Just set the code of the entry.
- Handle<WeakCell> code_cell = isolate->factory()->NewWeakCell(code);
- old_code_map->set(entry + kCachedCodeOffset, *code_cell);
- return;
- }
-
- // Can we reuse an entry?
- DCHECK(entry < 0);
- int length = old_code_map->length();
- for (int i = 0; i < length; i += kEntryLength) {
- if (WeakCell::cast(old_code_map->get(i + kSharedOffset))->cleared()) {
- new_code_map = old_code_map;
- entry = i;
- break;
- }
- }
-
- if (entry < 0) {
- // Copy old optimized code map and append one new entry.
- new_code_map = isolate->factory()->CopyFixedArrayAndGrow(
- old_code_map, kEntryLength, TENURED);
- entry = old_code_map->length();
- }
- }
-
- Handle<WeakCell> code_cell = isolate->factory()->NewWeakCell(code);
- Handle<WeakCell> shared_cell = isolate->factory()->NewWeakCell(shared);
-
- new_code_map->set(entry + kSharedOffset, *shared_cell);
- new_code_map->set(entry + kCachedCodeOffset, *code_cell);
- new_code_map->set(entry + kOsrAstIdOffset, Smi::FromInt(osr_ast_id.ToInt()));
-
-#ifdef DEBUG
- for (int i = 0; i < new_code_map->length(); i += kEntryLength) {
- WeakCell* cell = WeakCell::cast(new_code_map->get(i + kSharedOffset));
- DCHECK(cell->cleared() || cell->value()->IsSharedFunctionInfo());
- cell = WeakCell::cast(new_code_map->get(i + kCachedCodeOffset));
- DCHECK(cell->cleared() ||
- (cell->value()->IsCode() &&
- Code::cast(cell->value())->kind() == Code::OPTIMIZED_FUNCTION));
- DCHECK(new_code_map->get(i + kOsrAstIdOffset)->IsSmi());
- }
-#endif
-
- FixedArray* old_code_map = native_context->osr_code_table();
- if (old_code_map != *new_code_map) {
- native_context->set_osr_code_table(*new_code_map);
- }
-}
-
-void Context::EvictFromOSROptimizedCodeCache(Code* optimized_code,
- const char* reason) {
- DCHECK(IsNativeContext());
- DisallowHeapAllocation no_gc;
- if (OSROptimizedCodeCacheIsCleared()) return;
-
- Heap* heap = GetHeap();
- FixedArray* code_map = osr_code_table();
- int dst = 0;
- int length = code_map->length();
- for (int src = 0; src < length; src += kEntryLength) {
- if (WeakCell::cast(code_map->get(src + kCachedCodeOffset))->value() ==
- optimized_code) {
- BailoutId osr(Smi::cast(code_map->get(src + kOsrAstIdOffset))->value());
- if (FLAG_trace_opt) {
- PrintF(
- "[evicting entry from native context optimizing code map (%s) for ",
- reason);
- ShortPrint();
- DCHECK(!osr.IsNone());
- PrintF(" (osr ast id %d)]\n", osr.ToInt());
- }
- // Evict the src entry by not copying it to the dst entry.
- continue;
- }
- // Keep the src entry by copying it to the dst entry.
- if (dst != src) {
- code_map->set(dst + kSharedOffset, code_map->get(src + kSharedOffset));
- code_map->set(dst + kCachedCodeOffset,
- code_map->get(src + kCachedCodeOffset));
- code_map->set(dst + kOsrAstIdOffset,
- code_map->get(src + kOsrAstIdOffset));
- }
- dst += kEntryLength;
- }
- if (dst != length) {
- // Always trim even when array is cleared because of heap verifier.
- heap->RightTrimFixedArray(code_map, length - dst);
- if (code_map->length() == 0) {
- ClearOSROptimizedCodeCache();
- }
- }
-}
-
-void Context::ClearOSROptimizedCodeCache() {
- DCHECK(IsNativeContext());
- FixedArray* empty_fixed_array = GetHeap()->empty_fixed_array();
- set_osr_code_table(empty_fixed_array);
-}
-
void Context::AddOptimizedFunction(JSFunction* function) {
DCHECK(IsNativeContext());
- Isolate* isolate = GetIsolate();
#ifdef ENABLE_SLOW_DCHECKS
+ Isolate* isolate = GetIsolate();
if (FLAG_enable_slow_asserts) {
Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
while (!element->IsUndefined(isolate)) {
@@ -596,15 +459,7 @@ void Context::AddOptimizedFunction(JSFunction* function) {
CHECK(found);
#endif
- // If the function link field is already used then the function was
- // enqueued as a code flushing candidate and we remove it now.
- if (!function->next_function_link()->IsUndefined(isolate)) {
- CodeFlusher* flusher = GetHeap()->mark_compact_collector()->code_flusher();
- flusher->EvictCandidate(function);
- }
-
- DCHECK(function->next_function_link()->IsUndefined(isolate));
-
+ DCHECK(function->next_function_link()->IsUndefined(GetIsolate()));
function->set_next_function_link(get(OPTIMIZED_FUNCTIONS_LIST),
UPDATE_WEAK_WRITE_BARRIER);
set(OPTIMIZED_FUNCTIONS_LIST, function, UPDATE_WEAK_WRITE_BARRIER);
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index 8377139edd..bf3ef32556 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -20,8 +20,6 @@ enum ContextLookupFlags {
DONT_FOLLOW_CHAINS = 0,
FOLLOW_CHAINS = FOLLOW_CONTEXT_CHAIN | FOLLOW_PROTOTYPE_CHAIN,
- LEXICAL_TEST =
- FOLLOW_CONTEXT_CHAIN | STOP_AT_DECLARATION_SCOPE | SKIP_WITH_CONTEXT,
};
@@ -87,44 +85,44 @@ enum ContextLookupFlags {
V(ASYNC_GENERATOR_AWAIT_CAUGHT, JSFunction, async_generator_await_caught) \
V(ASYNC_GENERATOR_AWAIT_UNCAUGHT, JSFunction, async_generator_await_uncaught)
-#define NATIVE_CONTEXT_IMPORTED_FIELDS(V) \
- V(ARRAY_CONCAT_INDEX, JSFunction, array_concat) \
- V(ARRAY_POP_INDEX, JSFunction, array_pop) \
- V(ARRAY_PUSH_INDEX, JSFunction, array_push) \
- V(ARRAY_SHIFT_INDEX, JSFunction, array_shift) \
- V(ARRAY_SPLICE_INDEX, JSFunction, array_splice) \
- V(ARRAY_SLICE_INDEX, JSFunction, array_slice) \
- V(ARRAY_UNSHIFT_INDEX, JSFunction, array_unshift) \
- V(ARRAY_ENTRIES_ITERATOR_INDEX, JSFunction, array_entries_iterator) \
- V(ARRAY_FOR_EACH_ITERATOR_INDEX, JSFunction, array_for_each_iterator) \
- V(ARRAY_KEYS_ITERATOR_INDEX, JSFunction, array_keys_iterator) \
- V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator) \
- V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
- V(ERROR_FUNCTION_INDEX, JSFunction, error_function) \
- V(ERROR_TO_STRING, JSFunction, error_to_string) \
- V(EVAL_ERROR_FUNCTION_INDEX, JSFunction, eval_error_function) \
- V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \
- V(GLOBAL_PROXY_FUNCTION_INDEX, JSFunction, global_proxy_function) \
- V(MAP_DELETE_METHOD_INDEX, JSFunction, map_delete) \
- V(MAP_GET_METHOD_INDEX, JSFunction, map_get) \
- V(MAP_HAS_METHOD_INDEX, JSFunction, map_has) \
- V(MAP_SET_METHOD_INDEX, JSFunction, map_set) \
- V(FUNCTION_HAS_INSTANCE_INDEX, JSFunction, function_has_instance) \
- V(OBJECT_VALUE_OF, JSFunction, object_value_of) \
- V(OBJECT_TO_STRING, JSFunction, object_to_string) \
- V(PROMISE_CATCH_INDEX, JSFunction, promise_catch) \
- V(PROMISE_FUNCTION_INDEX, JSFunction, promise_function) \
- V(RANGE_ERROR_FUNCTION_INDEX, JSFunction, range_error_function) \
- V(REFERENCE_ERROR_FUNCTION_INDEX, JSFunction, reference_error_function) \
- V(SET_ADD_METHOD_INDEX, JSFunction, set_add) \
- V(SET_DELETE_METHOD_INDEX, JSFunction, set_delete) \
- V(SET_HAS_METHOD_INDEX, JSFunction, set_has) \
- V(SYNTAX_ERROR_FUNCTION_INDEX, JSFunction, syntax_error_function) \
- V(TYPE_ERROR_FUNCTION_INDEX, JSFunction, type_error_function) \
- V(URI_ERROR_FUNCTION_INDEX, JSFunction, uri_error_function) \
- V(WASM_COMPILE_ERROR_FUNCTION_INDEX, JSFunction, \
- wasm_compile_error_function) \
- V(WASM_LINK_ERROR_FUNCTION_INDEX, JSFunction, wasm_link_error_function) \
+#define NATIVE_CONTEXT_IMPORTED_FIELDS(V) \
+ V(ARRAY_CONCAT_INDEX, JSFunction, array_concat) \
+ V(ARRAY_POP_INDEX, JSFunction, array_pop) \
+ V(ARRAY_PUSH_INDEX, JSFunction, array_push) \
+ V(ARRAY_SHIFT_INDEX, JSFunction, array_shift) \
+ V(ARRAY_SPLICE_INDEX, JSFunction, array_splice) \
+ V(ARRAY_SLICE_INDEX, JSFunction, array_slice) \
+ V(ARRAY_UNSHIFT_INDEX, JSFunction, array_unshift) \
+ V(ARRAY_ENTRIES_ITERATOR_INDEX, JSFunction, array_entries_iterator) \
+ V(ARRAY_FOR_EACH_ITERATOR_INDEX, JSFunction, array_for_each_iterator) \
+ V(ARRAY_KEYS_ITERATOR_INDEX, JSFunction, array_keys_iterator) \
+ V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator) \
+ V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
+ V(ERROR_FUNCTION_INDEX, JSFunction, error_function) \
+ V(ERROR_TO_STRING, JSFunction, error_to_string) \
+ V(EVAL_ERROR_FUNCTION_INDEX, JSFunction, eval_error_function) \
+ V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \
+ V(GLOBAL_PROXY_FUNCTION_INDEX, JSFunction, global_proxy_function) \
+ V(MAP_DELETE_INDEX, JSFunction, map_delete) \
+ V(MAP_GET_INDEX, JSFunction, map_get) \
+ V(MAP_HAS_INDEX, JSFunction, map_has) \
+ V(MAP_SET_INDEX, JSFunction, map_set) \
+ V(FUNCTION_HAS_INSTANCE_INDEX, JSFunction, function_has_instance) \
+ V(OBJECT_VALUE_OF, JSFunction, object_value_of) \
+ V(OBJECT_TO_STRING, JSFunction, object_to_string) \
+ V(PROMISE_CATCH_INDEX, JSFunction, promise_catch) \
+ V(PROMISE_FUNCTION_INDEX, JSFunction, promise_function) \
+ V(RANGE_ERROR_FUNCTION_INDEX, JSFunction, range_error_function) \
+ V(REFERENCE_ERROR_FUNCTION_INDEX, JSFunction, reference_error_function) \
+ V(SET_ADD_INDEX, JSFunction, set_add) \
+ V(SET_DELETE_INDEX, JSFunction, set_delete) \
+ V(SET_HAS_INDEX, JSFunction, set_has) \
+ V(SYNTAX_ERROR_FUNCTION_INDEX, JSFunction, syntax_error_function) \
+ V(TYPE_ERROR_FUNCTION_INDEX, JSFunction, type_error_function) \
+ V(URI_ERROR_FUNCTION_INDEX, JSFunction, uri_error_function) \
+ V(WASM_COMPILE_ERROR_FUNCTION_INDEX, JSFunction, \
+ wasm_compile_error_function) \
+ V(WASM_LINK_ERROR_FUNCTION_INDEX, JSFunction, wasm_link_error_function) \
V(WASM_RUNTIME_ERROR_FUNCTION_INDEX, JSFunction, wasm_runtime_error_function)
#define NATIVE_CONTEXT_JS_ARRAY_ITERATOR_MAPS(V) \
@@ -219,6 +217,7 @@ enum ContextLookupFlags {
async_generator_await_reject_shared_fun) \
V(ASYNC_GENERATOR_AWAIT_RESOLVE_SHARED_FUN, SharedFunctionInfo, \
async_generator_await_resolve_shared_fun) \
+ V(ATOMICS_OBJECT, JSObject, atomics_object) \
V(BOOLEAN_FUNCTION_INDEX, JSFunction, boolean_function) \
V(BOUND_FUNCTION_WITH_CONSTRUCTOR_MAP_INDEX, Map, \
bound_function_with_constructor_map) \
@@ -255,6 +254,7 @@ enum ContextLookupFlags {
V(INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX, Map, \
initial_array_iterator_prototype_map) \
V(INITIAL_ARRAY_PROTOTYPE_INDEX, JSObject, initial_array_prototype) \
+ V(INITIAL_ERROR_PROTOTYPE_INDEX, JSObject, initial_error_prototype) \
V(INITIAL_GENERATOR_PROTOTYPE_INDEX, JSObject, initial_generator_prototype) \
V(INITIAL_ASYNC_GENERATOR_PROTOTYPE_INDEX, JSObject, \
initial_async_generator_prototype) \
@@ -272,16 +272,16 @@ enum ContextLookupFlags {
V(INTL_COLLATOR_FUNCTION_INDEX, JSFunction, intl_collator_function) \
V(INTL_V8_BREAK_ITERATOR_FUNCTION_INDEX, JSFunction, \
intl_v8_break_iterator_function) \
- V(JS_ARRAY_FAST_SMI_ELEMENTS_MAP_INDEX, Map, \
+ V(JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX, Map, \
js_array_fast_smi_elements_map_index) \
- V(JS_ARRAY_FAST_HOLEY_SMI_ELEMENTS_MAP_INDEX, Map, \
+ V(JS_ARRAY_HOLEY_SMI_ELEMENTS_MAP_INDEX, Map, \
js_array_fast_holey_smi_elements_map_index) \
- V(JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, Map, js_array_fast_elements_map_index) \
- V(JS_ARRAY_FAST_HOLEY_ELEMENTS_MAP_INDEX, Map, \
+ V(JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX, Map, js_array_fast_elements_map_index) \
+ V(JS_ARRAY_HOLEY_ELEMENTS_MAP_INDEX, Map, \
js_array_fast_holey_elements_map_index) \
- V(JS_ARRAY_FAST_DOUBLE_ELEMENTS_MAP_INDEX, Map, \
+ V(JS_ARRAY_PACKED_DOUBLE_ELEMENTS_MAP_INDEX, Map, \
js_array_fast_double_elements_map_index) \
- V(JS_ARRAY_FAST_HOLEY_DOUBLE_ELEMENTS_MAP_INDEX, Map, \
+ V(JS_ARRAY_HOLEY_DOUBLE_ELEMENTS_MAP_INDEX, Map, \
js_array_fast_holey_double_elements_map_index) \
V(JS_MAP_FUN_INDEX, JSFunction, js_map_fun) \
V(JS_MAP_MAP_INDEX, Map, js_map_map) \
@@ -291,7 +291,9 @@ enum ContextLookupFlags {
V(JS_WEAK_MAP_FUN_INDEX, JSFunction, js_weak_map_fun) \
V(JS_WEAK_SET_FUN_INDEX, JSFunction, js_weak_set_fun) \
V(MAP_CACHE_INDEX, Object, map_cache) \
- V(MAP_ITERATOR_MAP_INDEX, Map, map_iterator_map) \
+ V(MAP_KEY_ITERATOR_MAP_INDEX, Map, map_key_iterator_map) \
+ V(MAP_KEY_VALUE_ITERATOR_MAP_INDEX, Map, map_key_value_iterator_map) \
+ V(MAP_VALUE_ITERATOR_MAP_INDEX, Map, map_value_iterator_map) \
V(MATH_RANDOM_INDEX_INDEX, Smi, math_random_index) \
V(MATH_RANDOM_CACHE_INDEX, Object, math_random_cache) \
V(MESSAGE_LISTENERS_INDEX, TemplateList, message_listeners) \
@@ -301,11 +303,9 @@ enum ContextLookupFlags {
V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
V(OBJECT_FUNCTION_PROTOTYPE_MAP_INDEX, Map, object_function_prototype_map) \
V(OPAQUE_REFERENCE_FUNCTION_INDEX, JSFunction, opaque_reference_function) \
- V(OSR_CODE_TABLE_INDEX, FixedArray, osr_code_table) \
V(PROXY_CALLABLE_MAP_INDEX, Map, proxy_callable_map) \
V(PROXY_CONSTRUCTOR_MAP_INDEX, Map, proxy_constructor_map) \
V(PROXY_FUNCTION_INDEX, JSFunction, proxy_function) \
- V(PROXY_FUNCTION_MAP_INDEX, Map, proxy_function_map) \
V(PROXY_MAP_INDEX, Map, proxy_map) \
V(PROMISE_GET_CAPABILITIES_EXECUTOR_SHARED_FUN, SharedFunctionInfo, \
promise_get_capabilities_executor_shared_fun) \
@@ -320,6 +320,8 @@ enum ContextLookupFlags {
promise_value_thunk_finally_shared_fun) \
V(PROMISE_THROWER_FINALLY_SHARED_FUN, SharedFunctionInfo, \
promise_thrower_finally_shared_fun) \
+ V(PROMISE_ALL_RESOLVE_ELEMENT_SHARED_FUN, SharedFunctionInfo, \
+ promise_all_resolve_element_shared_fun) \
V(PROMISE_PROTOTYPE_MAP_INDEX, Map, promise_prototype_map) \
V(REGEXP_EXEC_FUNCTION_INDEX, JSFunction, regexp_exec_function) \
V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
@@ -332,50 +334,72 @@ enum ContextLookupFlags {
V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function) \
V(SECURITY_TOKEN_INDEX, Object, security_token) \
V(SELF_WEAK_CELL_INDEX, WeakCell, self_weak_cell) \
- V(SET_ITERATOR_MAP_INDEX, Map, set_iterator_map) \
+ V(SET_VALUE_ITERATOR_MAP_INDEX, Map, set_value_iterator_map) \
+ V(SET_KEY_VALUE_ITERATOR_MAP_INDEX, Map, set_key_value_iterator_map) \
V(SHARED_ARRAY_BUFFER_FUN_INDEX, JSFunction, shared_array_buffer_fun) \
V(SLOPPY_ARGUMENTS_MAP_INDEX, Map, sloppy_arguments_map) \
- V(SLOPPY_FUNCTION_MAP_INDEX, Map, sloppy_function_map) \
- V(SLOPPY_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \
- sloppy_function_without_prototype_map) \
- V(SLOPPY_FUNCTION_WITH_READONLY_PROTOTYPE_MAP_INDEX, Map, \
- sloppy_function_with_readonly_prototype_map) \
V(SLOW_ALIASED_ARGUMENTS_MAP_INDEX, Map, slow_aliased_arguments_map) \
+ V(STRICT_ARGUMENTS_MAP_INDEX, Map, strict_arguments_map) \
V(SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP, Map, \
slow_object_with_null_prototype_map) \
V(SLOW_OBJECT_WITH_OBJECT_PROTOTYPE_MAP, Map, \
slow_object_with_object_prototype_map) \
V(SLOW_TEMPLATE_INSTANTIATIONS_CACHE_INDEX, UnseededNumberDictionary, \
slow_template_instantiations_cache) \
- V(STRICT_ARGUMENTS_MAP_INDEX, Map, strict_arguments_map) \
- V(ASYNC_FUNCTION_MAP_INDEX, Map, async_function_map) \
+ /* All *_FUNCTION_MAP_INDEX definitions used by Context::FunctionMapIndex */ \
+ /* must remain together. */ \
+ V(SLOPPY_FUNCTION_MAP_INDEX, Map, sloppy_function_map) \
+ V(SLOPPY_FUNCTION_WITH_NAME_MAP_INDEX, Map, sloppy_function_with_name_map) \
+ V(SLOPPY_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \
+ sloppy_function_without_prototype_map) \
+ V(SLOPPY_FUNCTION_WITH_READONLY_PROTOTYPE_MAP_INDEX, Map, \
+ sloppy_function_with_readonly_prototype_map) \
V(STRICT_FUNCTION_MAP_INDEX, Map, strict_function_map) \
+ V(STRICT_FUNCTION_WITH_NAME_MAP_INDEX, Map, strict_function_with_name_map) \
+ V(STRICT_FUNCTION_WITH_READONLY_PROTOTYPE_MAP_INDEX, Map, \
+ strict_function_with_readonly_prototype_map) \
V(STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \
strict_function_without_prototype_map) \
+ V(METHOD_WITH_NAME_MAP_INDEX, Map, method_with_name_map) \
+ V(METHOD_WITH_HOME_OBJECT_MAP_INDEX, Map, method_with_home_object_map) \
+ V(METHOD_WITH_NAME_AND_HOME_OBJECT_MAP_INDEX, Map, \
+ method_with_name_and_home_object_map) \
+ V(ASYNC_FUNCTION_MAP_INDEX, Map, async_function_map) \
+ V(ASYNC_FUNCTION_WITH_NAME_MAP_INDEX, Map, async_function_with_name_map) \
+ V(ASYNC_FUNCTION_WITH_HOME_OBJECT_MAP_INDEX, Map, \
+ async_function_with_home_object_map) \
+ V(ASYNC_FUNCTION_WITH_NAME_AND_HOME_OBJECT_MAP_INDEX, Map, \
+ async_function_with_name_and_home_object_map) \
V(GENERATOR_FUNCTION_MAP_INDEX, Map, generator_function_map) \
+ V(GENERATOR_FUNCTION_WITH_NAME_MAP_INDEX, Map, \
+ generator_function_with_name_map) \
+ V(GENERATOR_FUNCTION_WITH_HOME_OBJECT_MAP_INDEX, Map, \
+ generator_function_with_home_object_map) \
+ V(GENERATOR_FUNCTION_WITH_NAME_AND_HOME_OBJECT_MAP_INDEX, Map, \
+ generator_function_with_name_and_home_object_map) \
V(ASYNC_GENERATOR_FUNCTION_MAP_INDEX, Map, async_generator_function_map) \
+ V(ASYNC_GENERATOR_FUNCTION_WITH_NAME_MAP_INDEX, Map, \
+ async_generator_function_with_name_map) \
+ V(ASYNC_GENERATOR_FUNCTION_WITH_HOME_OBJECT_MAP_INDEX, Map, \
+ async_generator_function_with_home_object_map) \
+ V(ASYNC_GENERATOR_FUNCTION_WITH_NAME_AND_HOME_OBJECT_MAP_INDEX, Map, \
+ async_generator_function_with_name_and_home_object_map) \
V(CLASS_FUNCTION_MAP_INDEX, Map, class_function_map) \
V(STRING_FUNCTION_INDEX, JSFunction, string_function) \
V(STRING_FUNCTION_PROTOTYPE_MAP_INDEX, Map, string_function_prototype_map) \
V(STRING_ITERATOR_MAP_INDEX, Map, string_iterator_map) \
V(SYMBOL_FUNCTION_INDEX, JSFunction, symbol_function) \
V(NATIVE_FUNCTION_MAP_INDEX, Map, native_function_map) \
- V(WASM_FUNCTION_MAP_INDEX, Map, wasm_function_map) \
V(WASM_INSTANCE_CONSTRUCTOR_INDEX, JSFunction, wasm_instance_constructor) \
- V(WASM_INSTANCE_SYM_INDEX, Symbol, wasm_instance_sym) \
V(WASM_MEMORY_CONSTRUCTOR_INDEX, JSFunction, wasm_memory_constructor) \
- V(WASM_MEMORY_SYM_INDEX, Symbol, wasm_memory_sym) \
V(WASM_MODULE_CONSTRUCTOR_INDEX, JSFunction, wasm_module_constructor) \
- V(WASM_MODULE_SYM_INDEX, Symbol, wasm_module_sym) \
V(WASM_TABLE_CONSTRUCTOR_INDEX, JSFunction, wasm_table_constructor) \
- V(WASM_TABLE_SYM_INDEX, Symbol, wasm_table_sym) \
V(TYPED_ARRAY_FUN_INDEX, JSFunction, typed_array_function) \
V(TYPED_ARRAY_PROTOTYPE_INDEX, JSObject, typed_array_prototype) \
V(UINT16_ARRAY_FUN_INDEX, JSFunction, uint16_array_fun) \
V(UINT32_ARRAY_FUN_INDEX, JSFunction, uint32_array_fun) \
V(UINT8_ARRAY_FUN_INDEX, JSFunction, uint8_array_fun) \
V(UINT8_CLAMPED_ARRAY_FUN_INDEX, JSFunction, uint8_clamped_array_fun) \
- V(EXPORTS_CONTAINER, Object, exports_container) \
NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V) \
NATIVE_CONTEXT_IMPORTED_FIELDS(V) \
NATIVE_CONTEXT_JS_ARRAY_ITERATOR_MAPS(V)
@@ -517,7 +541,7 @@ class Context: public FixedArray {
// Total number of slots.
NATIVE_CONTEXT_SLOTS,
FIRST_WEAK_SLOT = OPTIMIZED_FUNCTIONS_LIST,
- FIRST_JS_ARRAY_MAP_SLOT = JS_ARRAY_FAST_SMI_ELEMENTS_MAP_INDEX,
+ FIRST_JS_ARRAY_MAP_SLOT = JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX,
MIN_CONTEXT_SLOTS = GLOBAL_PROXY_INDEX,
// This slot holds the thrown value in catch contexts.
@@ -528,6 +552,11 @@ class Context: public FixedArray {
WHITE_LIST_INDEX = MIN_CONTEXT_SLOTS + 1
};
+ // A region of native context entries containing maps for functions created
+ // by Builtins::kFastNewClosure.
+ static const int FIRST_FUNCTION_MAP_INDEX = SLOPPY_FUNCTION_MAP_INDEX;
+ static const int LAST_FUNCTION_MAP_INDEX = CLASS_FUNCTION_MAP_INDEX;
+
void ResetErrorsThrown();
void IncrementErrorsThrown();
int GetErrorsThrown();
@@ -590,26 +619,6 @@ class Context: public FixedArray {
inline bool HasSameSecurityTokenAs(Context* that);
- // Removes a specific optimized code object from the optimized code map.
- // In case of non-OSR the code reference is cleared from the cache entry but
- // the entry itself is left in the map in order to proceed sharing literals.
- void EvictFromOSROptimizedCodeCache(Code* optimized_code, const char* reason);
-
- // Clear optimized code map.
- void ClearOSROptimizedCodeCache();
-
- // A native context keeps track of all osrd optimized functions.
- inline bool OSROptimizedCodeCacheIsCleared();
- Code* SearchOSROptimizedCodeCache(SharedFunctionInfo* shared,
- BailoutId osr_ast_id);
- int SearchOSROptimizedCodeCacheEntry(SharedFunctionInfo* shared,
- BailoutId osr_ast_id);
-
- static void AddToOSROptimizedCodeCache(Handle<Context> native_context,
- Handle<SharedFunctionInfo> shared,
- Handle<Code> code,
- BailoutId osr_ast_id);
-
// A native context holds a list of all functions with optimized code.
void AddOptimizedFunction(JSFunction* function);
void RemoveOptimizedFunction(JSFunction* function);
@@ -651,7 +660,11 @@ class Context: public FixedArray {
// of with, or as a property of the global object. *index is -1 and
// *attributes is not ABSENT.
//
- // 3) result.is_null():
+ // 3) result->IsModule():
+ // The binding was found in module imports or exports.
+ // *attributes is never ABSENT. imports are READ_ONLY.
+ //
+ // 4) result.is_null():
// There was no binding found, *index is always -1 and *attributes is
// always ABSENT.
Handle<Object> Lookup(Handle<String> name, ContextLookupFlags flags,
@@ -664,32 +677,9 @@ class Context: public FixedArray {
return kHeaderSize + index * kPointerSize - kHeapObjectTag;
}
- static int FunctionMapIndex(LanguageMode language_mode, FunctionKind kind) {
- // Note: Must be kept in sync with the FastNewClosure builtin.
- if (IsGeneratorFunction(kind)) {
- return IsAsyncFunction(kind) ? ASYNC_GENERATOR_FUNCTION_MAP_INDEX
- : GENERATOR_FUNCTION_MAP_INDEX;
- }
-
- if (IsAsyncFunction(kind)) {
- return ASYNC_FUNCTION_MAP_INDEX;
- }
-
- if (IsClassConstructor(kind)) {
- // Like the strict function map, but with no 'name' accessor. 'name'
- // needs to be the last property and it is added during instantiation,
- // in case a static property with the same name exists"
- return CLASS_FUNCTION_MAP_INDEX;
- }
-
- if (IsArrowFunction(kind) || IsConciseMethod(kind) ||
- IsAccessorFunction(kind)) {
- return STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX;
- }
-
- return is_strict(language_mode) ? STRICT_FUNCTION_MAP_INDEX
- : SLOPPY_FUNCTION_MAP_INDEX;
- }
+ static inline int FunctionMapIndex(LanguageMode language_mode,
+ FunctionKind kind, bool has_shared_name,
+ bool needs_home_object);
static int ArrayMapIndex(ElementsKind elements_kind) {
DCHECK(IsFastElementsKind(elements_kind));
@@ -700,13 +690,11 @@ class Context: public FixedArray {
static const int kNotFound = -1;
// GC support.
- typedef FixedBodyDescriptor<
- kHeaderSize, kSize, kSize> ScavengeBodyDescriptor;
+ typedef FixedBodyDescriptor<kHeaderSize, kSize, kSize> BodyDescriptor;
typedef FixedBodyDescriptor<
- kHeaderSize,
- kHeaderSize + FIRST_WEAK_SLOT * kPointerSize,
- kSize> MarkCompactBodyDescriptor;
+ kHeaderSize, kHeaderSize + FIRST_WEAK_SLOT * kPointerSize, kSize>
+ BodyDescriptorWeak;
private:
#ifdef DEBUG
diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h
index c4753ebc93..fedeb4e9a8 100644
--- a/deps/v8/src/conversions-inl.h
+++ b/deps/v8/src/conversions-inl.h
@@ -153,18 +153,18 @@ bool DoubleToUint32IfEqualToSelf(double value, uint32_t* uint32_value) {
}
int32_t NumberToInt32(Object* number) {
- if (number->IsSmi()) return Smi::cast(number)->value();
+ if (number->IsSmi()) return Smi::ToInt(number);
return DoubleToInt32(number->Number());
}
uint32_t NumberToUint32(Object* number) {
- if (number->IsSmi()) return Smi::cast(number)->value();
+ if (number->IsSmi()) return Smi::ToInt(number);
return DoubleToUint32(number->Number());
}
uint32_t PositiveNumberToUint32(Object* number) {
if (number->IsSmi()) {
- int value = Smi::cast(number)->value();
+ int value = Smi::ToInt(number);
if (value <= 0) return 0;
return value;
}
@@ -178,7 +178,7 @@ uint32_t PositiveNumberToUint32(Object* number) {
}
int64_t NumberToInt64(Object* number) {
- if (number->IsSmi()) return Smi::cast(number)->value();
+ if (number->IsSmi()) return Smi::ToInt(number);
return static_cast<int64_t>(number->Number());
}
@@ -186,7 +186,7 @@ bool TryNumberToSize(Object* number, size_t* result) {
// Do not create handles in this function! Don't use SealHandleScope because
// the function can be used concurrently.
if (number->IsSmi()) {
- int value = Smi::cast(number)->value();
+ int value = Smi::ToInt(number);
DCHECK(static_cast<unsigned>(Smi::kMaxValue) <=
std::numeric_limits<size_t>::max());
if (value >= 0) {
@@ -424,7 +424,7 @@ double InternalStringToInt(UnicodeCache* unicode_cache,
return JunkStringValue();
}
- if (base::bits::IsPowerOfTwo32(radix)) {
+ if (base::bits::IsPowerOfTwo(radix)) {
switch (radix) {
case 2:
return InternalStringToIntDouble<1>(
diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc
index d26274c220..bf79395609 100644
--- a/deps/v8/src/conversions.cc
+++ b/deps/v8/src/conversions.cc
@@ -20,9 +20,11 @@
#include "src/strtod.h"
#include "src/utils.h"
-#ifndef _STLP_VENDOR_CSTD
+#if defined(_STLP_VENDOR_CSTD)
// STLPort doesn't import fpclassify into the std namespace.
-using std::fpclassify;
+#define FPCLASSIFY_NAMESPACE
+#else
+#define FPCLASSIFY_NAMESPACE std
#endif
namespace v8 {
@@ -122,7 +124,7 @@ double StringToInt(UnicodeCache* unicode_cache,
const char* DoubleToCString(double v, Vector<char> buffer) {
- switch (fpclassify(v)) {
+ switch (FPCLASSIFY_NAMESPACE::fpclassify(v)) {
case FP_NAN: return "NaN";
case FP_INFINITE: return (v < 0.0 ? "-Infinity" : "Infinity");
case FP_ZERO: return "0";
diff --git a/deps/v8/src/counters-inl.h b/deps/v8/src/counters-inl.h
index ce77806cdc..66e66bd517 100644
--- a/deps/v8/src/counters-inl.h
+++ b/deps/v8/src/counters-inl.h
@@ -7,6 +7,8 @@
#include "src/counters.h"
+#include "src/isolate.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/counters.cc b/deps/v8/src/counters.cc
index d1a1a44c9f..a06c91f3f7 100644
--- a/deps/v8/src/counters.cc
+++ b/deps/v8/src/counters.cc
@@ -15,53 +15,96 @@
namespace v8 {
namespace internal {
-StatsTable::StatsTable()
+StatsTable::StatsTable(Counters* counters)
: lookup_function_(NULL),
create_histogram_function_(NULL),
add_histogram_sample_function_(NULL) {}
+void StatsTable::SetCounterFunction(CounterLookupCallback f) {
+ lookup_function_ = f;
+}
-int* StatsCounter::FindLocationInStatsTable() const {
- return isolate_->stats_table()->FindLocation(name_);
+int* StatsCounterBase::FindLocationInStatsTable() const {
+ return counters_->FindLocation(name_);
}
+StatsCounterThreadSafe::StatsCounterThreadSafe(Counters* counters,
+ const char* name)
+ : StatsCounterBase(counters, name) {}
-void Histogram::AddSample(int sample) {
- if (Enabled()) {
- isolate()->stats_table()->AddHistogramSample(histogram_, sample);
+void StatsCounterThreadSafe::Set(int Value) {
+ if (ptr_) {
+ base::LockGuard<base::Mutex> Guard(&mutex_);
+ SetLoc(ptr_, Value);
}
}
-void* Histogram::CreateHistogram() const {
- return isolate()->stats_table()->
- CreateHistogram(name_, min_, max_, num_buckets_);
+void StatsCounterThreadSafe::Increment() {
+ if (ptr_) {
+ base::LockGuard<base::Mutex> Guard(&mutex_);
+ IncrementLoc(ptr_);
+ }
+}
+
+void StatsCounterThreadSafe::Increment(int value) {
+ if (ptr_) {
+ base::LockGuard<base::Mutex> Guard(&mutex_);
+ IncrementLoc(ptr_, value);
+ }
+}
+
+void StatsCounterThreadSafe::Decrement() {
+ if (ptr_) {
+ base::LockGuard<base::Mutex> Guard(&mutex_);
+ DecrementLoc(ptr_);
+ }
}
+void StatsCounterThreadSafe::Decrement(int value) {
+ if (ptr_) {
+ base::LockGuard<base::Mutex> Guard(&mutex_);
+ DecrementLoc(ptr_, value);
+ }
+}
-// Start the timer.
-void HistogramTimer::Start() {
+void Histogram::AddSample(int sample) {
if (Enabled()) {
- timer_.Start();
+ counters_->AddHistogramSample(histogram_, sample);
}
- Logger::CallEventLogger(isolate(), name(), Logger::START, true);
}
+void* Histogram::CreateHistogram() const {
+ return counters_->CreateHistogram(name_, min_, max_, num_buckets_);
+}
+
+void TimedHistogram::Start(base::ElapsedTimer* timer, Isolate* isolate) {
+ if (Enabled()) timer->Start();
+ if (isolate) Logger::CallEventLogger(isolate, name(), Logger::START, true);
+}
-// Stop the timer and record the results.
-void HistogramTimer::Stop() {
+void TimedHistogram::Stop(base::ElapsedTimer* timer, Isolate* isolate) {
if (Enabled()) {
- int64_t sample = resolution_ == MICROSECOND
- ? timer_.Elapsed().InMicroseconds()
- : timer_.Elapsed().InMilliseconds();
// Compute the delta between start and stop, in microseconds.
+ int64_t sample = resolution_ == HistogramTimerResolution::MICROSECOND
+ ? timer->Elapsed().InMicroseconds()
+ : timer->Elapsed().InMilliseconds();
+ timer->Stop();
AddSample(static_cast<int>(sample));
- timer_.Stop();
}
- Logger::CallEventLogger(isolate(), name(), Logger::END, true);
+ if (isolate != nullptr) {
+ Logger::CallEventLogger(isolate, name(), Logger::END, true);
+ }
}
-
-Counters::Counters(Isolate* isolate) {
+Counters::Counters(Isolate* isolate)
+ : isolate_(isolate),
+ stats_table_(this),
+// clang format off
+#define SC(name, caption) name##_(this, "c:" #caption),
+ STATS_COUNTER_TS_LIST(SC)
+#undef SC
+ // clang format on
+ runtime_call_stats_() {
static const struct {
Histogram Counters::*member;
const char* caption;
@@ -77,23 +120,41 @@ Counters::Counters(Isolate* isolate) {
for (const auto& histogram : kHistograms) {
this->*histogram.member =
Histogram(histogram.caption, histogram.min, histogram.max,
- histogram.num_buckets, isolate);
+ histogram.num_buckets, this);
}
+ const int DefaultTimedHistogramNumBuckets = 50;
+
static const struct {
HistogramTimer Counters::*member;
const char* caption;
int max;
- HistogramTimer::Resolution res;
+ HistogramTimerResolution res;
} kHistogramTimers[] = {
#define HT(name, caption, max, res) \
- {&Counters::name##_, #caption, max, HistogramTimer::res},
+ {&Counters::name##_, #caption, max, HistogramTimerResolution::res},
HISTOGRAM_TIMER_LIST(HT)
#undef HT
};
for (const auto& timer : kHistogramTimers) {
- this->*timer.member =
- HistogramTimer(timer.caption, 0, timer.max, timer.res, 50, isolate);
+ this->*timer.member = HistogramTimer(timer.caption, 0, timer.max, timer.res,
+ DefaultTimedHistogramNumBuckets, this);
+ }
+
+ static const struct {
+ TimedHistogram Counters::*member;
+ const char* caption;
+ int max;
+ HistogramTimerResolution res;
+ } kTimedHistograms[] = {
+#define HT(name, caption, max, res) \
+ {&Counters::name##_, #caption, max, HistogramTimerResolution::res},
+ TIMED_HISTOGRAM_LIST(HT)
+#undef HT
+ };
+ for (const auto& timer : kTimedHistograms) {
+ this->*timer.member = TimedHistogram(timer.caption, 0, timer.max, timer.res,
+ DefaultTimedHistogramNumBuckets, this);
}
static const struct {
@@ -105,8 +166,8 @@ Counters::Counters(Isolate* isolate) {
#undef AHT
};
for (const auto& aht : kAggregatableHistogramTimers) {
- this->*aht.member =
- AggregatableHistogramTimer(aht.caption, 0, 10000000, 50, isolate);
+ this->*aht.member = AggregatableHistogramTimer(
+ aht.caption, 0, 10000000, DefaultTimedHistogramNumBuckets, this);
}
static const struct {
@@ -118,8 +179,7 @@ Counters::Counters(Isolate* isolate) {
#undef HP
};
for (const auto& percentage : kHistogramPercentages) {
- this->*percentage.member =
- Histogram(percentage.caption, 0, 101, 100, isolate);
+ this->*percentage.member = Histogram(percentage.caption, 0, 101, 100, this);
}
// Exponential histogram assigns bucket limits to points
@@ -138,7 +198,7 @@ Counters::Counters(Isolate* isolate) {
};
for (const auto& histogram : kLegacyMemoryHistograms) {
this->*histogram.member =
- Histogram(histogram.caption, 1000, 500000, 50, isolate);
+ Histogram(histogram.caption, 1000, 500000, 50, this);
}
// For n = 100, low = 4000, high = 2000000: the factor = 1.06.
@@ -154,7 +214,7 @@ Counters::Counters(Isolate* isolate) {
};
for (const auto& histogram : kMemoryHistograms) {
this->*histogram.member =
- Histogram(histogram.caption, 4000, 2000000, 100, isolate);
+ Histogram(histogram.caption, 4000, 2000000, 100, this);
this->*histogram.aggregated =
AggregatedMemoryHistogram<Histogram>(&(this->*histogram.member));
}
@@ -196,17 +256,22 @@ Counters::Counters(Isolate* isolate) {
};
// clang-format on
for (const auto& counter : kStatsCounters) {
- this->*counter.member = StatsCounter(isolate, counter.caption);
+ this->*counter.member = StatsCounter(this, counter.caption);
}
}
+void Counters::ResetCounterFunction(CounterLookupCallback f) {
+ stats_table_.SetCounterFunction(f);
-void Counters::ResetCounters() {
#define SC(name, caption) name##_.Reset();
STATS_COUNTER_LIST_1(SC)
STATS_COUNTER_LIST_2(SC)
#undef SC
+#define SC(name, caption) name##_.Reset();
+ STATS_COUNTER_TS_LIST(SC)
+#undef SC
+
#define SC(name) \
count_of_##name##_.Reset(); \
size_of_##name##_.Reset();
@@ -232,8 +297,9 @@ void Counters::ResetCounters() {
#undef SC
}
+void Counters::ResetCreateHistogramFunction(CreateHistogramCallback f) {
+ stats_table_.SetCreateHistogramFunction(f);
-void Counters::ResetHistograms() {
#define HR(name, caption, min, max, num_buckets) name##_.Reset();
HISTOGRAM_RANGE_LIST(HR)
#undef HR
@@ -242,6 +308,10 @@ void Counters::ResetHistograms() {
HISTOGRAM_TIMER_LIST(HT)
#undef HT
+#define HT(name, caption, max, res) name##_.Reset();
+ TIMED_HISTOGRAM_LIST(HT)
+#undef HT
+
#define AHT(name, caption) name##_.Reset();
AGGREGATABLE_HISTOGRAM_TIMER_LIST(AHT)
#undef AHT
@@ -256,29 +326,6 @@ void Counters::ResetHistograms() {
#undef HM
}
-void Counters::InitializeHistograms() {
-#define HR(name, caption, min, max, num_buckets) name##_.Enabled();
- HISTOGRAM_RANGE_LIST(HR)
-#undef HR
-
-#define HT(name, caption, max, res) name##_.Enabled();
- HISTOGRAM_TIMER_LIST(HT)
-#undef HT
-
-#define AHT(name, caption) name##_.Enabled();
- AGGREGATABLE_HISTOGRAM_TIMER_LIST(AHT)
-#undef AHT
-
-#define HP(name, caption) name##_.Enabled();
- HISTOGRAM_PERCENTAGE_LIST(HP)
-#undef HP
-
-#define HM(name, caption) name##_.Enabled();
- HISTOGRAM_LEGACY_MEMORY_LIST(HM)
- HISTOGRAM_MEMORY_LIST(HM)
-#undef HM
-}
-
class RuntimeCallStatEntries {
public:
void Print(std::ostream& os) {
diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h
index bb917cc518..810eaf2ba7 100644
--- a/deps/v8/src/counters.h
+++ b/deps/v8/src/counters.h
@@ -11,7 +11,6 @@
#include "src/base/platform/elapsed-timer.h"
#include "src/base/platform/time.h"
#include "src/globals.h"
-#include "src/isolate.h"
#include "src/objects.h"
#include "src/runtime/runtime.h"
#include "src/tracing/trace-event.h"
@@ -25,22 +24,22 @@ namespace internal {
// counters for monitoring. Counters can be looked up and
// manipulated by name.
+class Counters;
+
class StatsTable {
public:
- // Register an application-defined function where
- // counters can be looked up.
- void SetCounterFunction(CounterLookupCallback f) {
- lookup_function_ = f;
- }
+ // Register an application-defined function for recording
+ // subsequent counter statistics.
+ void SetCounterFunction(CounterLookupCallback f);
- // Register an application-defined function to create
- // a histogram for passing to the AddHistogramSample function
+ // Register an application-defined function to create histograms for
+ // recording subsequent histogram samples.
void SetCreateHistogramFunction(CreateHistogramCallback f) {
create_histogram_function_ = f;
}
// Register an application-defined function to add a sample
- // to a histogram created with CreateHistogram function
+ // to a histogram created with CreateHistogram function.
void SetAddHistogramSampleFunction(AddHistogramSampleCallback f) {
add_histogram_sample_function_ = f;
}
@@ -81,17 +80,37 @@ class StatsTable {
}
private:
- StatsTable();
+ friend class Counters;
+
+ explicit StatsTable(Counters* counters);
CounterLookupCallback lookup_function_;
CreateHistogramCallback create_histogram_function_;
AddHistogramSampleCallback add_histogram_sample_function_;
- friend class Isolate;
-
DISALLOW_COPY_AND_ASSIGN(StatsTable);
};
+// Base class for stats counters.
+class StatsCounterBase {
+ protected:
+ Counters* counters_;
+ const char* name_;
+ int* ptr_;
+
+ StatsCounterBase() {}
+ StatsCounterBase(Counters* counters, const char* name)
+ : counters_(counters), name_(name), ptr_(nullptr) {}
+
+ void SetLoc(int* loc, int value) { *loc = value; }
+ void IncrementLoc(int* loc) { (*loc)++; }
+ void IncrementLoc(int* loc, int value) { (*loc) += value; }
+ void DecrementLoc(int* loc) { (*loc)--; }
+ void DecrementLoc(int* loc, int value) { (*loc) -= value; }
+
+ int* FindLocationInStatsTable() const;
+};
+
// StatsCounters are dynamically created values which can be tracked in
// the StatsTable. They are designed to be lightweight to create and
// easy to use.
@@ -99,40 +118,31 @@ class StatsTable {
// Internally, a counter represents a value in a row of a StatsTable.
// The row has a 32bit value for each process/thread in the table and also
// a name (stored in the table metadata). Since the storage location can be
-// thread-specific, this class cannot be shared across threads.
-class StatsCounter {
+// thread-specific, this class cannot be shared across threads. Note: This
+// class is not thread safe.
+class StatsCounter : public StatsCounterBase {
public:
- StatsCounter() { }
- explicit StatsCounter(Isolate* isolate, const char* name)
- : isolate_(isolate), name_(name), ptr_(NULL), lookup_done_(false) { }
-
// Sets the counter to a specific value.
void Set(int value) {
- int* loc = GetPtr();
- if (loc) *loc = value;
+ if (int* loc = GetPtr()) SetLoc(loc, value);
}
// Increments the counter.
void Increment() {
- int* loc = GetPtr();
- if (loc) (*loc)++;
+ if (int* loc = GetPtr()) IncrementLoc(loc);
}
void Increment(int value) {
- int* loc = GetPtr();
- if (loc)
- (*loc) += value;
+ if (int* loc = GetPtr()) IncrementLoc(loc, value);
}
// Decrements the counter.
void Decrement() {
- int* loc = GetPtr();
- if (loc) (*loc)--;
+ if (int* loc = GetPtr()) DecrementLoc(loc);
}
void Decrement(int value) {
- int* loc = GetPtr();
- if (loc) (*loc) -= value;
+ if (int* loc = GetPtr()) DecrementLoc(loc, value);
}
// Is this counter enabled?
@@ -150,10 +160,16 @@ class StatsCounter {
return loc;
}
+ private:
+ friend class Counters;
+
+ StatsCounter() {}
+ StatsCounter(Counters* counters, const char* name)
+ : StatsCounterBase(counters, name), lookup_done_(false) {}
+
// Reset the cached internal pointer.
void Reset() { lookup_done_ = false; }
- protected:
// Returns the cached address of this counter location.
int* GetPtr() {
if (lookup_done_) return ptr_;
@@ -162,61 +178,65 @@ class StatsCounter {
return ptr_;
}
+ bool lookup_done_;
+};
+
+// Thread safe version of StatsCounter.
+class StatsCounterThreadSafe : public StatsCounterBase {
+ public:
+ void Set(int Value);
+ void Increment();
+ void Increment(int value);
+ void Decrement();
+ void Decrement(int value);
+ bool Enabled() { return ptr_ != NULL; }
+ int* GetInternalPointer() {
+ DCHECK(ptr_ != NULL);
+ return ptr_;
+ }
+
private:
- int* FindLocationInStatsTable() const;
+ friend class Counters;
- Isolate* isolate_;
- const char* name_;
- int* ptr_;
- bool lookup_done_;
+ StatsCounterThreadSafe(Counters* counters, const char* name);
+ void Reset() { ptr_ = FindLocationInStatsTable(); }
+
+ base::Mutex mutex_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StatsCounterThreadSafe);
};
-// A Histogram represents a dynamically created histogram in the StatsTable.
-// It will be registered with the histogram system on first use.
+// A Histogram represents a dynamically created histogram in the
+// StatsTable. Note: This class is thread safe.
class Histogram {
public:
- Histogram() { }
- Histogram(const char* name,
- int min,
- int max,
- int num_buckets,
- Isolate* isolate)
- : name_(name),
- min_(min),
- max_(max),
- num_buckets_(num_buckets),
- histogram_(NULL),
- lookup_done_(false),
- isolate_(isolate) { }
-
// Add a single sample to this histogram.
void AddSample(int sample);
// Returns true if this histogram is enabled.
- bool Enabled() {
- return GetHistogram() != NULL;
- }
-
- // Reset the cached internal pointer.
- void Reset() {
- lookup_done_ = false;
- }
+ bool Enabled() { return histogram_ != nullptr; }
const char* name() { return name_; }
protected:
- // Returns the handle to the histogram.
- void* GetHistogram() {
- if (!lookup_done_) {
- lookup_done_ = true;
- histogram_ = CreateHistogram();
- }
- return histogram_;
- }
+ Histogram() {}
+ Histogram(const char* name, int min, int max, int num_buckets,
+ Counters* counters)
+ : name_(name),
+ min_(min),
+ max_(max),
+ num_buckets_(num_buckets),
+ histogram_(nullptr),
+ counters_(counters) {}
+
+ Counters* counters() const { return counters_; }
- Isolate* isolate() const { return isolate_; }
+ // Reset the cached internal pointer.
+ void Reset() { histogram_ = CreateHistogram(); }
private:
+ friend class Counters;
+
void* CreateHistogram() const;
const char* name_;
@@ -224,29 +244,65 @@ class Histogram {
int max_;
int num_buckets_;
void* histogram_;
- bool lookup_done_;
- Isolate* isolate_;
+ Counters* counters_;
};
-// A HistogramTimer allows distributions of results to be created.
-class HistogramTimer : public Histogram {
+enum class HistogramTimerResolution { MILLISECOND, MICROSECOND };
+
+// A thread safe histogram timer. It also allows distributions of
+// nested timed results.
+class TimedHistogram : public Histogram {
public:
- enum Resolution {
- MILLISECOND,
- MICROSECOND
- };
+ // Start the timer. Log if isolate non-null.
+ void Start(base::ElapsedTimer* timer, Isolate* isolate);
- HistogramTimer() {}
- HistogramTimer(const char* name, int min, int max, Resolution resolution,
- int num_buckets, Isolate* isolate)
- : Histogram(name, min, max, num_buckets, isolate),
+ // Stop the timer and record the results. Log if isolate non-null.
+ void Stop(base::ElapsedTimer* timer, Isolate* isolate);
+
+ protected:
+ friend class Counters;
+ HistogramTimerResolution resolution_;
+
+ TimedHistogram() {}
+ TimedHistogram(const char* name, int min, int max,
+ HistogramTimerResolution resolution, int num_buckets,
+ Counters* counters)
+ : Histogram(name, min, max, num_buckets, counters),
resolution_(resolution) {}
+ void AddTimeSample();
+};
- // Start the timer.
- void Start();
+// Helper class for scoping a TimedHistogram.
+class TimedHistogramScope {
+ public:
+ explicit TimedHistogramScope(TimedHistogram* histogram,
+ Isolate* isolate = nullptr)
+ : histogram_(histogram), isolate_(isolate) {
+ histogram_->Start(&timer_, isolate);
+ }
+ ~TimedHistogramScope() { histogram_->Stop(&timer_, isolate_); }
- // Stop the timer and record the results.
- void Stop();
+ private:
+ base::ElapsedTimer timer_;
+ TimedHistogram* histogram_;
+ Isolate* isolate_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(TimedHistogramScope);
+};
+
+// A HistogramTimer allows distributions of non-nested timed results
+// to be created. WARNING: This class is not thread safe and can only
+// be run on the foreground thread.
+class HistogramTimer : public TimedHistogram {
+ public:
+ // Note: public for testing purposes only.
+ HistogramTimer(const char* name, int min, int max,
+ HistogramTimerResolution resolution, int num_buckets,
+ Counters* counters)
+ : TimedHistogram(name, min, max, resolution, num_buckets, counters) {}
+
+ inline void Start();
+ inline void Stop();
// Returns true if the timer is running.
bool Running() {
@@ -259,8 +315,11 @@ class HistogramTimer : public Histogram {
#endif
private:
+ friend class Counters;
+
base::ElapsedTimer timer_;
- Resolution resolution_;
+
+ HistogramTimer() {}
};
// Helper class for scoping a HistogramTimer.
@@ -303,7 +362,6 @@ class HistogramTimerScope BASE_EMBEDDED {
#endif
};
-
// A histogram timer that can aggregate events within a larger scope.
//
// Intended use of this timer is to have an outer (aggregating) and an inner
@@ -319,11 +377,6 @@ class HistogramTimerScope BASE_EMBEDDED {
// events to be timed.
class AggregatableHistogramTimer : public Histogram {
public:
- AggregatableHistogramTimer() {}
- AggregatableHistogramTimer(const char* name, int min, int max,
- int num_buckets, Isolate* isolate)
- : Histogram(name, min, max, num_buckets, isolate) {}
-
// Start/stop the "outer" scope.
void Start() { time_ = base::TimeDelta(); }
void Stop() { AddSample(static_cast<int>(time_.InMicroseconds())); }
@@ -332,6 +385,13 @@ class AggregatableHistogramTimer : public Histogram {
void Add(base::TimeDelta other) { time_ += other; }
private:
+ friend class Counters;
+
+ AggregatableHistogramTimer() {}
+ AggregatableHistogramTimer(const char* name, int min, int max,
+ int num_buckets, Counters* counters)
+ : Histogram(name, min, max, num_buckets, counters) {}
+
base::TimeDelta time_;
};
@@ -378,14 +438,7 @@ class AggregatedHistogramTimerScope {
template <typename Histogram>
class AggregatedMemoryHistogram {
public:
- AggregatedMemoryHistogram()
- : is_initialized_(false),
- start_ms_(0.0),
- last_ms_(0.0),
- aggregate_value_(0.0),
- last_value_(0.0),
- backing_histogram_(NULL) {}
-
+ // Note: public for testing purposes only.
explicit AggregatedMemoryHistogram(Histogram* backing_histogram)
: AggregatedMemoryHistogram() {
backing_histogram_ = backing_histogram;
@@ -403,7 +456,17 @@ class AggregatedMemoryHistogram {
void AddSample(double current_ms, double current_value);
private:
+ friend class Counters;
+
+ AggregatedMemoryHistogram()
+ : is_initialized_(false),
+ start_ms_(0.0),
+ last_ms_(0.0),
+ aggregate_value_(0.0),
+ last_value_(0.0),
+ backing_histogram_(NULL) {}
double Aggregate(double current_ms, double current_value);
+
bool is_initialized_;
double start_ms_;
double last_ms_;
@@ -500,14 +563,14 @@ class RuntimeCallCounter final {
void Add(base::TimeDelta delta) { time_ += delta.InMicroseconds(); }
private:
+ friend class RuntimeCallStats;
+
RuntimeCallCounter() {}
const char* name_;
int64_t count_;
// Stored as int64_t so that its initialization can be deferred.
int64_t time_;
-
- friend class RuntimeCallStats;
};
// RuntimeCallTimer is used to keep track of the stack of currently active
@@ -581,10 +644,8 @@ class RuntimeCallTimer final {
V(Message_GetLineNumber) \
V(Message_GetSourceLine) \
V(Message_GetStartColumn) \
- V(Module_FinishDynamicImportSuccess) \
- V(Module_FinishDynamicImportFailure) \
V(Module_Evaluate) \
- V(Module_Instantiate) \
+ V(Module_InstantiateModule) \
V(NumberObject_New) \
V(NumberObject_NumberValue) \
V(Object_CallAsConstructor) \
@@ -604,14 +665,12 @@ class RuntimeCallTimer final {
V(Object_GetRealNamedPropertyAttributes) \
V(Object_GetRealNamedPropertyAttributesInPrototypeChain) \
V(Object_GetRealNamedPropertyInPrototypeChain) \
+ V(Object_Has) \
V(Object_HasOwnProperty) \
V(Object_HasRealIndexedProperty) \
V(Object_HasRealNamedCallbackProperty) \
V(Object_HasRealNamedProperty) \
- V(Object_Int32Value) \
- V(Object_IntegerValue) \
V(Object_New) \
- V(Object_NumberValue) \
V(Object_ObjectProtoToString) \
V(Object_Set) \
V(Object_SetAccessor) \
@@ -628,7 +687,6 @@ class RuntimeCallTimer final {
V(Object_ToObject) \
V(Object_ToString) \
V(Object_ToUint32) \
- V(Object_Uint32Value) \
V(Persistent_New) \
V(Private_New) \
V(Promise_Catch) \
@@ -636,6 +694,7 @@ class RuntimeCallTimer final {
V(Promise_HasRejectHandler) \
V(Promise_Resolver_New) \
V(Promise_Resolver_Resolve) \
+ V(Promise_Resolver_Reject) \
V(Promise_Result) \
V(Promise_Status) \
V(Promise_Then) \
@@ -680,7 +739,11 @@ class RuntimeCallTimer final {
V(UnboundScript_GetSourceMappingURL) \
V(UnboundScript_GetSourceURL) \
V(Value_InstanceOf) \
+ V(Value_IntegerValue) \
+ V(Value_Int32Value) \
+ V(Value_NumberValue) \
V(Value_TypeOf) \
+ V(Value_Uint32Value) \
V(ValueDeserializer_ReadHeader) \
V(ValueDeserializer_ReadValue) \
V(ValueSerializer_WriteValue)
@@ -941,7 +1004,27 @@ class RuntimeCallTimerScope {
100000, 51) \
HR(array_buffer_big_allocations, V8.ArrayBufferLargeAllocations, 0, 4096, \
13) \
- HR(array_buffer_new_size_failures, V8.ArrayBufferNewSizeFailures, 0, 4096, 13)
+ HR(array_buffer_new_size_failures, V8.ArrayBufferNewSizeFailures, 0, 4096, \
+ 13) \
+ HR(shared_array_allocations, V8.SharedArrayAllocationSizes, 0, 4096, 13) \
+ HR(wasm_asm_function_size_bytes, V8.WasmFunctionSizeBytes.asm, 1, GB, 51) \
+ HR(wasm_wasm_function_size_bytes, V8.WasmFunctionSizeBytes.wasm, 1, GB, 51) \
+ HR(wasm_asm_module_size_bytes, V8.WasmModuleSizeBytes.asm, 1, GB, 51) \
+ HR(wasm_wasm_module_size_bytes, V8.WasmModuleSizeBytes.wasm, 1, GB, 51) \
+ HR(wasm_asm_min_mem_pages_count, V8.WasmMinMemPagesCount.asm, 1, 2 << 16, \
+ 51) \
+ HR(wasm_wasm_min_mem_pages_count, V8.WasmMinMemPagesCount.wasm, 1, 2 << 16, \
+ 51) \
+ HR(wasm_wasm_max_mem_pages_count, V8.WasmMaxMemPagesCount.wasm, 1, 2 << 16, \
+ 51) \
+ HR(wasm_decode_asm_module_peak_memory_bytes, \
+ V8.WasmDecodeModulePeakMemoryBytes.asm, 1, GB, 51) \
+ HR(wasm_decode_wasm_module_peak_memory_bytes, \
+ V8.WasmDecodeModulePeakMemoryBytes.wasm, 1, GB, 51) \
+ HR(asm_wasm_translation_peak_memory_bytes, \
+ V8.AsmWasmTranslationPeakMemoryBytes, 1, GB, 51) \
+ HR(wasm_compile_function_peak_memory_bytes, \
+ V8.WasmCompileFunctionPeakMemoryBytes, 1, GB, 51)
#define HISTOGRAM_TIMER_LIST(HT) \
/* Garbage collection timers. */ \
@@ -972,29 +1055,31 @@ class RuntimeCallTimerScope {
/* Total JavaScript execution time (including callbacks and runtime calls */ \
HT(execute, V8.Execute, 1000000, MICROSECOND) \
/* Asm/Wasm */ \
- HT(wasm_instantiate_asm_module_time, \
- V8.WasmInstantiateModuleMicroSeconds.asm, 10000000, MICROSECOND) \
- HT(wasm_instantiate_wasm_module_time, \
- V8.WasmInstantiateModuleMicroSeconds.wasm, 10000000, MICROSECOND) \
- HT(wasm_decode_asm_module_time, V8.WasmDecodeModuleMicroSeconds.asm, \
- 1000000, MICROSECOND) \
- HT(wasm_decode_wasm_module_time, V8.WasmDecodeModuleMicroSeconds.wasm, \
- 1000000, MICROSECOND) \
- HT(wasm_decode_asm_function_time, V8.WasmDecodeFunctionMicroSeconds.asm, \
- 1000000, MICROSECOND) \
- HT(wasm_decode_wasm_function_time, V8.WasmDecodeFunctionMicroSeconds.wasm, \
- 1000000, MICROSECOND) \
- HT(wasm_compile_asm_module_time, V8.WasmCompileModuleMicroSeconds.asm, \
- 10000000, MICROSECOND) \
- HT(wasm_compile_wasm_module_time, V8.WasmCompileModuleMicroSeconds.wasm, \
- 10000000, MICROSECOND) \
- HT(wasm_compile_function_time, V8.WasmCompileFunctionMicroSeconds, 1000000, \
- MICROSECOND) \
HT(asm_wasm_translation_time, V8.AsmWasmTranslationMicroSeconds, 1000000, \
MICROSECOND) \
HT(wasm_lazy_compilation_time, V8.WasmLazyCompilationMicroSeconds, 1000000, \
MICROSECOND)
+#define TIMED_HISTOGRAM_LIST(HT) \
+ HT(wasm_decode_asm_module_time, V8.WasmDecodeModuleMicroSeconds.asm, \
+ 1000000, MICROSECOND) \
+ HT(wasm_decode_wasm_module_time, V8.WasmDecodeModuleMicroSeconds.wasm, \
+ 1000000, MICROSECOND) \
+ HT(wasm_decode_asm_function_time, V8.WasmDecodeFunctionMicroSeconds.asm, \
+ 1000000, MICROSECOND) \
+ HT(wasm_decode_wasm_function_time, V8.WasmDecodeFunctionMicroSeconds.wasm, \
+ 1000000, MICROSECOND) \
+ HT(wasm_compile_asm_module_time, V8.WasmCompileModuleMicroSeconds.asm, \
+ 10000000, MICROSECOND) \
+ HT(wasm_compile_wasm_module_time, V8.WasmCompileModuleMicroSeconds.wasm, \
+ 10000000, MICROSECOND) \
+ HT(wasm_compile_function_time, V8.WasmCompileFunctionMicroSeconds, 1000000, \
+ MICROSECOND) \
+ HT(wasm_instantiate_wasm_module_time, \
+ V8.WasmInstantiateModuleMicroSeconds.wasm, 10000000, MICROSECOND) \
+ HT(wasm_instantiate_asm_module_time, \
+ V8.WasmInstantiateModuleMicroSeconds.asm, 10000000, MICROSECOND)
+
#define AGGREGATABLE_HISTOGRAM_TIMER_LIST(AHT) \
AHT(compile_lazy, V8.CompileLazyMicroSeconds)
@@ -1013,6 +1098,7 @@ class RuntimeCallTimerScope {
HP(heap_fraction_map_space, V8.MemoryHeapFractionMapSpace) \
HP(heap_fraction_lo_space, V8.MemoryHeapFractionLoSpace)
+// Note: These use Histogram with options (min=1000, max=500000, buckets=50).
#define HISTOGRAM_LEGACY_MEMORY_LIST(HM) \
HM(heap_sample_total_committed, V8.MemoryHeapSampleTotalCommitted) \
HM(heap_sample_total_used, V8.MemoryHeapSampleTotalUsed) \
@@ -1020,25 +1106,11 @@ class RuntimeCallTimerScope {
HM(heap_sample_code_space_committed, V8.MemoryHeapSampleCodeSpaceCommitted) \
HM(heap_sample_maximum_committed, V8.MemoryHeapSampleMaximumCommitted)
-#define HISTOGRAM_MEMORY_LIST(HM) \
- HM(memory_heap_committed, V8.MemoryHeapCommitted) \
- HM(memory_heap_used, V8.MemoryHeapUsed) \
- /* Asm/Wasm */ \
- HM(wasm_decode_asm_module_peak_memory_bytes, \
- V8.WasmDecodeModulePeakMemoryBytes.asm) \
- HM(wasm_decode_wasm_module_peak_memory_bytes, \
- V8.WasmDecodeModulePeakMemoryBytes.wasm) \
- HM(wasm_compile_function_peak_memory_bytes, \
- V8.WasmCompileFunctionPeakMemoryBytes) \
- HM(wasm_asm_min_mem_pages_count, V8.WasmMinMemPagesCount.asm) \
- HM(wasm_wasm_min_mem_pages_count, V8.WasmMinMemPagesCount.wasm) \
- HM(wasm_wasm_max_mem_pages_count, V8.WasmMaxMemPagesCount.wasm) \
- HM(wasm_asm_function_size_bytes, V8.WasmFunctionSizeBytes.asm) \
- HM(wasm_wasm_function_size_bytes, V8.WasmFunctionSizeBytes.wasm) \
- HM(wasm_asm_module_size_bytes, V8.WasmModuleSizeBytes.asm) \
- HM(wasm_wasm_module_size_bytes, V8.WasmModuleSizeBytes.wasm) \
- HM(asm_wasm_translation_peak_memory_bytes, \
- V8.AsmWasmTranslationPeakMemoryBytes)
+// Note: These define both Histogram and AggregatedMemoryHistogram<Histogram>
+// histograms with options (min=4000, max=2000000, buckets=100).
+#define HISTOGRAM_MEMORY_LIST(HM) \
+ HM(memory_heap_committed, V8.MemoryHeapCommitted) \
+ HM(memory_heap_used, V8.MemoryHeapUsed)
// WARNING: STATS_COUNTER_LIST_* is a very large macro that is causing MSVC
// Intellisense to crash. It was broken into two macros (each of length 40
@@ -1112,7 +1184,6 @@ class RuntimeCallTimerScope {
SC(ic_keyed_call_miss, V8.ICKeyedCallMiss) \
SC(ic_store_miss, V8.ICStoreMiss) \
SC(ic_keyed_store_miss, V8.ICKeyedStoreMiss) \
- SC(cow_arrays_created_runtime, V8.COWArraysCreatedRuntime) \
SC(cow_arrays_converted, V8.COWArraysConverted) \
SC(constructed_objects, V8.ConstructedObjects) \
SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime) \
@@ -1167,14 +1238,35 @@ class RuntimeCallTimerScope {
/* Total code size (including metadata) of baseline code or bytecode. */ \
SC(total_baseline_code_size, V8.TotalBaselineCodeSize) \
/* Total count of functions compiled using the baseline compiler. */ \
- SC(total_baseline_compile_count, V8.TotalBaselineCompileCount) \
- SC(wasm_generated_code_size, V8.WasmGeneratedCodeBytes) \
- SC(wasm_reloc_size, V8.WasmRelocBytes) \
+ SC(total_baseline_compile_count, V8.TotalBaselineCompileCount)
+
+#define STATS_COUNTER_TS_LIST(SC) \
+ SC(wasm_generated_code_size, V8.WasmGeneratedCodeBytes) \
+ SC(wasm_reloc_size, V8.WasmRelocBytes) \
SC(wasm_lazily_compiled_functions, V8.WasmLazilyCompiledFunctions)
// This file contains all the v8 counters that are in use.
-class Counters {
+class Counters : public std::enable_shared_from_this<Counters> {
public:
+ explicit Counters(Isolate* isolate);
+
+ // Register an application-defined function for recording
+ // subsequent counter statistics. Note: Must be called on the main
+ // thread.
+ void ResetCounterFunction(CounterLookupCallback f);
+
+ // Register an application-defined function to create histograms for
+ // recording subsequent histogram samples. Note: Must be called on
+ // the main thread.
+ void ResetCreateHistogramFunction(CreateHistogramCallback f);
+
+ // Register an application-defined function to add a sample
+ // to a histogram. Will be used in all subsequent sample additions.
+ // Note: Must be called on the main thread.
+ void SetAddHistogramSampleFunction(AddHistogramSampleCallback f) {
+ stats_table_.SetAddHistogramSampleFunction(f);
+ }
+
#define HR(name, caption, min, max, num_buckets) \
Histogram* name() { return &name##_; }
HISTOGRAM_RANGE_LIST(HR)
@@ -1185,6 +1277,11 @@ class Counters {
HISTOGRAM_TIMER_LIST(HT)
#undef HT
+#define HT(name, caption, max, res) \
+ TimedHistogram* name() { return &name##_; }
+ TIMED_HISTOGRAM_LIST(HT)
+#undef HT
+
#define AHT(name, caption) \
AggregatableHistogramTimer* name() { return &name##_; }
AGGREGATABLE_HISTOGRAM_TIMER_LIST(AHT)
@@ -1214,6 +1311,11 @@ class Counters {
STATS_COUNTER_LIST_2(SC)
#undef SC
+#define SC(name, caption) \
+ StatsCounterThreadSafe* name() { return &name##_; }
+ STATS_COUNTER_TS_LIST(SC)
+#undef SC
+
#define SC(name) \
StatsCounter* count_of_##name() { return &count_of_##name##_; } \
StatsCounter* size_of_##name() { return &size_of_##name##_; }
@@ -1244,9 +1346,11 @@ class Counters {
CODE_AGE_LIST_COMPLETE(SC)
#undef SC
+ // clang-format off
enum Id {
#define RATE_ID(name, caption, max, res) k_##name,
HISTOGRAM_TIMER_LIST(RATE_ID)
+ TIMED_HISTOGRAM_LIST(RATE_ID)
#undef RATE_ID
#define AGGREGATABLE_ID(name, caption) k_##name,
AGGREGATABLE_HISTOGRAM_TIMER_LIST(AGGREGATABLE_ID)
@@ -1261,6 +1365,7 @@ class Counters {
#define COUNTER_ID(name, caption) k_##name,
STATS_COUNTER_LIST_1(COUNTER_ID)
STATS_COUNTER_LIST_2(COUNTER_ID)
+ STATS_COUNTER_TS_LIST(COUNTER_ID)
#undef COUNTER_ID
#define COUNTER_ID(name) kCountOf##name, kSizeOf##name,
INSTANCE_TYPE_LIST(COUNTER_ID)
@@ -1279,14 +1384,33 @@ class Counters {
#undef COUNTER_ID
stats_counter_count
};
-
- void ResetCounters();
- void ResetHistograms();
- void InitializeHistograms();
+ // clang-format on
RuntimeCallStats* runtime_call_stats() { return &runtime_call_stats_; }
private:
+ friend class StatsTable;
+ friend class StatsCounterBase;
+ friend class Histogram;
+ friend class HistogramTimer;
+
+ Isolate* isolate_;
+ StatsTable stats_table_;
+
+ int* FindLocation(const char* name) {
+ return stats_table_.FindLocation(name);
+ }
+
+ void* CreateHistogram(const char* name, int min, int max, size_t buckets) {
+ return stats_table_.CreateHistogram(name, min, max, buckets);
+ }
+
+ void AddHistogramSample(void* histogram, int sample) {
+ stats_table_.AddHistogramSample(histogram, sample);
+ }
+
+ Isolate* isolate() { return isolate_; }
+
#define HR(name, caption, min, max, num_buckets) Histogram name##_;
HISTOGRAM_RANGE_LIST(HR)
#undef HR
@@ -1295,6 +1419,10 @@ class Counters {
HISTOGRAM_TIMER_LIST(HT)
#undef HT
+#define HT(name, caption, max, res) TimedHistogram name##_;
+ TIMED_HISTOGRAM_LIST(HT)
+#undef HT
+
#define AHT(name, caption) \
AggregatableHistogramTimer name##_;
AGGREGATABLE_HISTOGRAM_TIMER_LIST(AHT)
@@ -1322,6 +1450,10 @@ class Counters {
STATS_COUNTER_LIST_2(SC)
#undef SC
+#define SC(name, caption) StatsCounterThreadSafe name##_;
+ STATS_COUNTER_TS_LIST(SC)
+#undef SC
+
#define SC(name) \
StatsCounter size_of_##name##_; \
StatsCounter count_of_##name##_;
@@ -1348,13 +1480,17 @@ class Counters {
RuntimeCallStats runtime_call_stats_;
- friend class Isolate;
-
- explicit Counters(Isolate* isolate);
-
DISALLOW_IMPLICIT_CONSTRUCTORS(Counters);
};
+void HistogramTimer::Start() {
+ TimedHistogram::Start(&timer_, counters()->isolate());
+}
+
+void HistogramTimer::Stop() {
+ TimedHistogram::Stop(&timer_, counters()->isolate());
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/OWNERS b/deps/v8/src/crankshaft/OWNERS
deleted file mode 100644
index 2918dddc4c..0000000000
--- a/deps/v8/src/crankshaft/OWNERS
+++ /dev/null
@@ -1,7 +0,0 @@
-set noparent
-
-bmeurer@chromium.org
-danno@chromium.org
-jarin@chromium.org
-jkummerow@chromium.org
-verwaest@chromium.org
diff --git a/deps/v8/src/crankshaft/arm/OWNERS b/deps/v8/src/crankshaft/arm/OWNERS
deleted file mode 100644
index 906a5ce641..0000000000
--- a/deps/v8/src/crankshaft/arm/OWNERS
+++ /dev/null
@@ -1 +0,0 @@
-rmcilroy@chromium.org
diff --git a/deps/v8/src/crankshaft/arm/lithium-arm.cc b/deps/v8/src/crankshaft/arm/lithium-arm.cc
deleted file mode 100644
index 104953d6c1..0000000000
--- a/deps/v8/src/crankshaft/arm/lithium-arm.cc
+++ /dev/null
@@ -1,2397 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/arm/lithium-arm.h"
-
-#include <sstream>
-
-#include "src/crankshaft/arm/lithium-codegen-arm.h"
-#include "src/crankshaft/hydrogen-osr.h"
-#include "src/crankshaft/lithium-inl.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define DEFINE_COMPILE(type) \
- void L##type::CompileToNative(LCodeGen* generator) { \
- generator->Do##type(this); \
- }
-LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
-#undef DEFINE_COMPILE
-
-#ifdef DEBUG
-void LInstruction::VerifyCall() {
- // Call instructions can use only fixed registers as temporaries and
- // outputs because all registers are blocked by the calling convention.
- // Inputs operands must use a fixed register or use-at-start policy or
- // a non-register policy.
- DCHECK(Output() == NULL ||
- LUnallocated::cast(Output())->HasFixedPolicy() ||
- !LUnallocated::cast(Output())->HasRegisterPolicy());
- for (UseIterator it(this); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- DCHECK(operand->HasFixedPolicy() ||
- operand->IsUsedAtStart());
- }
- for (TempIterator it(this); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
- }
-}
-#endif
-
-
-void LInstruction::PrintTo(StringStream* stream) {
- stream->Add("%s ", this->Mnemonic());
-
- PrintOutputOperandTo(stream);
-
- PrintDataTo(stream);
-
- if (HasEnvironment()) {
- stream->Add(" ");
- environment()->PrintTo(stream);
- }
-
- if (HasPointerMap()) {
- stream->Add(" ");
- pointer_map()->PrintTo(stream);
- }
-}
-
-
-void LInstruction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- for (int i = 0; i < InputCount(); i++) {
- if (i > 0) stream->Add(" ");
- if (InputAt(i) == NULL) {
- stream->Add("NULL");
- } else {
- InputAt(i)->PrintTo(stream);
- }
- }
-}
-
-
-void LInstruction::PrintOutputOperandTo(StringStream* stream) {
- if (HasResult()) result()->PrintTo(stream);
-}
-
-
-void LLabel::PrintDataTo(StringStream* stream) {
- LGap::PrintDataTo(stream);
- LLabel* rep = replacement();
- if (rep != NULL) {
- stream->Add(" Dead block replaced with B%d", rep->block_id());
- }
-}
-
-
-bool LGap::IsRedundant() const {
- for (int i = 0; i < 4; i++) {
- if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
- return false;
- }
- }
-
- return true;
-}
-
-
-void LGap::PrintDataTo(StringStream* stream) {
- for (int i = 0; i < 4; i++) {
- stream->Add("(");
- if (parallel_moves_[i] != NULL) {
- parallel_moves_[i]->PrintDataTo(stream);
- }
- stream->Add(") ");
- }
-}
-
-
-const char* LArithmeticD::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-d";
- case Token::SUB: return "sub-d";
- case Token::MUL: return "mul-d";
- case Token::DIV: return "div-d";
- case Token::MOD: return "mod-d";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-const char* LArithmeticT::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-t";
- case Token::SUB: return "sub-t";
- case Token::MUL: return "mul-t";
- case Token::MOD: return "mod-t";
- case Token::DIV: return "div-t";
- case Token::BIT_AND: return "bit-and-t";
- case Token::BIT_OR: return "bit-or-t";
- case Token::BIT_XOR: return "bit-xor-t";
- case Token::ROR: return "ror-t";
- case Token::SHL: return "shl-t";
- case Token::SAR: return "sar-t";
- case Token::SHR: return "shr-t";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-bool LGoto::HasInterestingComment(LCodeGen* gen) const {
- return !gen->IsNextEmittedBlock(block_id());
-}
-
-
-void LGoto::PrintDataTo(StringStream* stream) {
- stream->Add("B%d", block_id());
-}
-
-
-void LBranch::PrintDataTo(StringStream* stream) {
- stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
- value()->PrintTo(stream);
-}
-
-
-void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- left()->PrintTo(stream);
- stream->Add(" %s ", Token::String(op()));
- right()->PrintTo(stream);
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_string(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_smi(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_undetectable(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if string_compare(");
- left()->PrintTo(stream);
- right()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_instance_type(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if class_of_test(");
- value()->PrintTo(stream);
- stream->Add(", \"%o\") then B%d else B%d", *hydrogen()->class_name(),
- true_block_id(), false_block_id());
-}
-
-void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if typeof ");
- value()->PrintTo(stream);
- stream->Add(" == \"%s\" then B%d else B%d",
- hydrogen()->type_literal()->ToCString().get(),
- true_block_id(), false_block_id());
-}
-
-
-void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
- stream->Add(" = ");
- function()->PrintTo(stream);
- stream->Add(".code_entry = ");
- code_object()->PrintTo(stream);
-}
-
-
-void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
- stream->Add(" = ");
- base_object()->PrintTo(stream);
- stream->Add(" + ");
- offset()->PrintTo(stream);
-}
-
-
-void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
- for (int i = 0; i < InputCount(); i++) {
- InputAt(i)->PrintTo(stream);
- stream->Add(" ");
- }
- stream->Add("#%d / ", arity());
-}
-
-
-void LLoadContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add("[%d]", slot_index());
-}
-
-
-void LStoreContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add("[%d] <- ", slot_index());
- value()->PrintTo(stream);
-}
-
-
-void LInvokeFunction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- function()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
-void LCallNewArray::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
- ElementsKind kind = hydrogen()->elements_kind();
- stream->Add(" (%s) ", ElementsKindToString(kind));
-}
-
-
-void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
- arguments()->PrintTo(stream);
- stream->Add(" length ");
- length()->PrintTo(stream);
- stream->Add(" index ");
- index()->PrintTo(stream);
-}
-
-
-void LStoreNamedField::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- std::ostringstream os;
- os << hydrogen()->access() << " <- ";
- stream->Add(os.str().c_str());
- value()->PrintTo(stream);
-}
-
-
-void LLoadKeyed::PrintDataTo(StringStream* stream) {
- elements()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d]", base_offset());
- } else {
- stream->Add("]");
- }
-}
-
-
-void LStoreKeyed::PrintDataTo(StringStream* stream) {
- elements()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d] <-", base_offset());
- } else {
- stream->Add("] <- ");
- }
-
- if (value() == NULL) {
- DCHECK(hydrogen()->IsConstantHoleStore() &&
- hydrogen()->value()->representation().IsDouble());
- stream->Add("<the hole(nan)>");
- } else {
- value()->PrintTo(stream);
- }
-}
-
-
-void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(" %p -> %p", *original_map(), *transitioned_map());
-}
-
-
-int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
- // Skip a slot if for a double-width slot.
- if (kind == DOUBLE_REGISTERS) current_frame_slots_++;
- return current_frame_slots_++;
-}
-
-
-LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
- int index = GetNextSpillIndex(kind);
- if (kind == DOUBLE_REGISTERS) {
- return LDoubleStackSlot::Create(index, zone());
- } else {
- DCHECK(kind == GENERAL_REGISTERS);
- return LStackSlot::Create(index, zone());
- }
-}
-
-
-LPlatformChunk* LChunkBuilder::Build() {
- DCHECK(is_unused());
- chunk_ = new(zone()) LPlatformChunk(info(), graph());
- LPhase phase("L_Building chunk", chunk_);
- status_ = BUILDING;
-
- // If compiling for OSR, reserve space for the unoptimized frame,
- // which will be subsumed into this frame.
- if (graph()->has_osr()) {
- for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
- chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
- }
- }
-
- const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
- for (int i = 0; i < blocks->length(); i++) {
- HBasicBlock* next = NULL;
- if (i < blocks->length() - 1) next = blocks->at(i + 1);
- DoBasicBlock(blocks->at(i), next);
- if (is_aborted()) return NULL;
- }
- status_ = DONE;
- return chunk_;
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
- return new (zone())
- LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
-}
-
-
-LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
- return Use(value, ToUnallocated(fixed_register));
-}
-
-
-LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) {
- return Use(value, ToUnallocated(reg));
-}
-
-
-LOperand* LChunkBuilder::UseRegister(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
- return Use(value,
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
- LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
-}
-
-
-LOperand* LChunkBuilder::UseAtStart(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
- LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value);
-}
-
-
-LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegister(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegisterAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseConstant(HValue* value) {
- return chunk_->DefineConstantOperand(HConstant::cast(value));
-}
-
-
-LOperand* LChunkBuilder::UseAny(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
- if (value->EmitAtUses()) {
- HInstruction* instr = HInstruction::cast(value);
- VisitInstruction(instr);
- }
- operand->set_virtual_register(value->id());
- return operand;
-}
-
-
-LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
- LUnallocated* result) {
- result->set_virtual_register(current_instruction_->id());
- instr->set_result(result);
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::DefineAsRegister(
- LTemplateResultInstruction<1>* instr) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-LInstruction* LChunkBuilder::DefineAsSpilled(
- LTemplateResultInstruction<1>* instr, int index) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
-}
-
-
-LInstruction* LChunkBuilder::DefineSameAsFirst(
- LTemplateResultInstruction<1>* instr) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
-}
-
-
-LInstruction* LChunkBuilder::DefineFixed(
- LTemplateResultInstruction<1>* instr, Register reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-LInstruction* LChunkBuilder::DefineFixedDouble(
- LTemplateResultInstruction<1>* instr, DoubleRegister reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
- HEnvironment* hydrogen_env = current_block_->last_environment();
- return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
-}
-
-
-LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize) {
- info()->MarkAsNonDeferredCalling();
-#ifdef DEBUG
- instr->VerifyCall();
-#endif
- instr->MarkAsCall();
- instr = AssignPointerMap(instr);
-
- // If instruction does not have side-effects lazy deoptimization
- // after the call will try to deoptimize to the point before the call.
- // Thus we still need to attach environment to this call even if
- // call sequence can not deoptimize eagerly.
- bool needs_environment =
- (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
- !hinstr->HasObservableSideEffects();
- if (needs_environment && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- // We can't really figure out if the environment is needed or not.
- instr->environment()->set_has_been_used();
- }
-
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
- DCHECK(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(zone()));
- return instr;
-}
-
-
-LUnallocated* LChunkBuilder::TempRegister() {
- LUnallocated* operand =
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
- int vreg = allocator_->GetVirtualRegister();
- if (!allocator_->AllocationOk()) {
- Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
- vreg = 0;
- }
- operand->set_virtual_register(vreg);
- return operand;
-}
-
-
-LUnallocated* LChunkBuilder::TempDoubleRegister() {
- LUnallocated* operand =
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_DOUBLE_REGISTER);
- int vreg = allocator_->GetVirtualRegister();
- if (!allocator_->AllocationOk()) {
- Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
- vreg = 0;
- }
- operand->set_virtual_register(vreg);
- return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(Register reg) {
- LUnallocated* operand = ToUnallocated(reg);
- DCHECK(operand->HasFixedPolicy());
- return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
- LUnallocated* operand = ToUnallocated(reg);
- DCHECK(operand->HasFixedPolicy());
- return operand;
-}
-
-
-LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
- return new(zone()) LLabel(instr->block());
-}
-
-
-LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
- return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
- return AssignEnvironment(new(zone()) LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoShift(Token::Value op,
- HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->left());
-
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- int constant_value = 0;
- bool does_deopt = false;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
- // Left shifts can deoptimize if we shift by > 0 and the result cannot be
- // truncated to smi.
- if (instr->representation().IsSmi() && constant_value > 0) {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
- }
- } else {
- right = UseRegisterAtStart(right_value);
- }
-
- // Shift operations can only deoptimize if we do a logical shift
- // by 0 and the result cannot be truncated to int32.
- if (op == Token::SHR && constant_value == 0) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
- }
-
- LInstruction* result =
- DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
- return does_deopt ? AssignEnvironment(result) : result;
- } else {
- return DoArithmeticT(op, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->left()->representation().IsDouble());
- DCHECK(instr->right()->representation().IsDouble());
- if (op == Token::MOD) {
- LOperand* left = UseFixedDouble(instr->left(), d0);
- LOperand* right = UseFixedDouble(instr->right(), d1);
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return MarkAsCall(DefineFixedDouble(result, d0), instr);
- } else {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineAsRegister(result);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HBinaryOperation* instr) {
- HValue* left = instr->left();
- HValue* right = instr->right();
- DCHECK(left->representation().IsTagged());
- DCHECK(right->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* left_operand = UseFixed(left, r1);
- LOperand* right_operand = UseFixed(right, r0);
- LArithmeticT* result =
- new(zone()) LArithmeticT(op, context, left_operand, right_operand);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
- DCHECK(is_building());
- current_block_ = block;
- next_block_ = next_block;
- if (block->IsStartBlock()) {
- block->UpdateEnvironment(graph_->start_environment());
- argument_count_ = 0;
- } else if (block->predecessors()->length() == 1) {
- // We have a single predecessor => copy environment and outgoing
- // argument count from the predecessor.
- DCHECK(block->phis()->length() == 0);
- HBasicBlock* pred = block->predecessors()->at(0);
- HEnvironment* last_environment = pred->last_environment();
- DCHECK(last_environment != NULL);
- // Only copy the environment, if it is later used again.
- if (pred->end()->SecondSuccessor() == NULL) {
- DCHECK(pred->end()->FirstSuccessor() == block);
- } else {
- if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
- pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
- last_environment = last_environment->Copy();
- }
- }
- block->UpdateEnvironment(last_environment);
- DCHECK(pred->argument_count() >= 0);
- argument_count_ = pred->argument_count();
- } else {
- // We are at a state join => process phis.
- HBasicBlock* pred = block->predecessors()->at(0);
- // No need to copy the environment, it cannot be used later.
- HEnvironment* last_environment = pred->last_environment();
- for (int i = 0; i < block->phis()->length(); ++i) {
- HPhi* phi = block->phis()->at(i);
- if (phi->HasMergedIndex()) {
- last_environment->SetValueAt(phi->merged_index(), phi);
- }
- }
- for (int i = 0; i < block->deleted_phis()->length(); ++i) {
- if (block->deleted_phis()->at(i) < last_environment->length()) {
- last_environment->SetValueAt(block->deleted_phis()->at(i),
- graph_->GetConstantUndefined());
- }
- }
- block->UpdateEnvironment(last_environment);
- // Pick up the outgoing argument count of one of the predecessors.
- argument_count_ = pred->argument_count();
- }
- HInstruction* current = block->first();
- int start = chunk_->instructions()->length();
- while (current != NULL && !is_aborted()) {
- // Code for constants in registers is generated lazily.
- if (!current->EmitAtUses()) {
- VisitInstruction(current);
- }
- current = current->next();
- }
- int end = chunk_->instructions()->length() - 1;
- if (end >= start) {
- block->set_first_instruction_index(start);
- block->set_last_instruction_index(end);
- }
- block->set_argument_count(argument_count_);
- next_block_ = NULL;
- current_block_ = NULL;
-}
-
-
-void LChunkBuilder::VisitInstruction(HInstruction* current) {
- HInstruction* old_current = current_instruction_;
- current_instruction_ = current;
-
- LInstruction* instr = NULL;
- if (current->CanReplaceWithDummyUses()) {
- if (current->OperandCount() == 0) {
- instr = DefineAsRegister(new(zone()) LDummy());
- } else {
- DCHECK(!current->OperandAt(0)->IsControlInstruction());
- instr = DefineAsRegister(new(zone())
- LDummyUse(UseAny(current->OperandAt(0))));
- }
- for (int i = 1; i < current->OperandCount(); ++i) {
- if (current->OperandAt(i)->IsControlInstruction()) continue;
- LInstruction* dummy =
- new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
- dummy->set_hydrogen_value(current);
- chunk_->AddInstruction(dummy, current_block_);
- }
- } else {
- HBasicBlock* successor;
- if (current->IsControlInstruction() &&
- HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
- successor != NULL) {
- instr = new(zone()) LGoto(successor);
- } else {
- instr = current->CompileToLithium(this);
- }
- }
-
- argument_count_ += current->argument_delta();
- DCHECK(argument_count_ >= 0);
-
- if (instr != NULL) {
- AddInstruction(instr, current);
- }
-
- current_instruction_ = old_current;
-}
-
-
-void LChunkBuilder::AddInstruction(LInstruction* instr,
- HInstruction* hydrogen_val) {
- // Associate the hydrogen instruction first, since we may need it for
- // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
- instr->set_hydrogen_value(hydrogen_val);
-
-#if DEBUG
- // Make sure that the lithium instruction has either no fixed register
- // constraints in temps or the result OR no uses that are only used at
- // start. If this invariant doesn't hold, the register allocator can decide
- // to insert a split of a range immediately before the instruction due to an
- // already allocated register needing to be used for the instruction's fixed
- // register constraint. In this case, The register allocator won't see an
- // interference between the split child and the use-at-start (it would if
- // the it was just a plain use), so it is free to move the split child into
- // the same register that is used for the use-at-start.
- // See https://code.google.com/p/chromium/issues/detail?id=201590
- if (!(instr->ClobbersRegisters() &&
- instr->ClobbersDoubleRegisters(isolate()))) {
- int fixed = 0;
- int used_at_start = 0;
- for (UseIterator it(instr); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- if (operand->IsUsedAtStart()) ++used_at_start;
- }
- if (instr->Output() != NULL) {
- if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
- }
- for (TempIterator it(instr); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- if (operand->HasFixedPolicy()) ++fixed;
- }
- DCHECK(fixed == 0 || used_at_start == 0);
- }
-#endif
-
- if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
- instr = AssignPointerMap(instr);
- }
- if (FLAG_stress_environments && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
- chunk_->AddInstruction(instr, current_block_);
-
- CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
-}
-
-
-LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
- LInstruction* result = new (zone()) LPrologue();
- if (info_->scope()->NeedsContext()) {
- result = MarkAsCall(result, instr);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- return new(zone()) LGoto(instr->FirstSuccessor());
-}
-
-
-LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* value = instr->value();
- Representation r = value->representation();
- HType type = value->type();
- ToBooleanHints expected = instr->expected_input_types();
- if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
-
- bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
- type.IsJSArray() || type.IsHeapNumber() || type.IsString();
- LInstruction* branch = new(zone()) LBranch(UseRegister(value));
- if (!easy_case && ((!(expected & ToBooleanHint::kSmallInteger) &&
- (expected & ToBooleanHint::kNeedsMap)) ||
- expected != ToBooleanHint::kAny)) {
- branch = AssignEnvironment(branch);
- }
- return branch;
-}
-
-
-LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
- return new(zone()) LDebugBreak();
-}
-
-
-LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- return new(zone()) LCmpMapAndBranch(value, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) {
- info()->MarkAsRequiresFrame();
- LOperand* value = UseRegister(instr->value());
- return DefineAsRegister(new(zone()) LArgumentsLength(value));
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
- info()->MarkAsRequiresFrame();
- return DefineAsRegister(new(zone()) LArgumentsElements);
-}
-
-
-LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
- HHasInPrototypeChainAndBranch* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* prototype = UseRegister(instr->prototype());
- LHasInPrototypeChainAndBranch* result =
- new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
- LOperand* receiver = UseRegisterAtStart(instr->receiver());
- LOperand* function = UseRegisterAtStart(instr->function());
- LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
- return AssignEnvironment(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
- LOperand* function = UseFixed(instr->function(), r1);
- LOperand* receiver = UseFixed(instr->receiver(), r0);
- LOperand* length = UseFixed(instr->length(), r2);
- LOperand* elements = UseFixed(instr->elements(), r3);
- LApplyArguments* result = new(zone()) LApplyArguments(function,
- receiver,
- length,
- elements);
- return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
- int argc = instr->OperandCount();
- for (int i = 0; i < argc; ++i) {
- LOperand* argument = Use(instr->argument(i));
- AddInstruction(new(zone()) LPushArgument(argument), instr);
- }
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreCodeEntry(
- HStoreCodeEntry* store_code_entry) {
- LOperand* function = UseRegister(store_code_entry->function());
- LOperand* code_object = UseTempRegister(store_code_entry->code_object());
- return new(zone()) LStoreCodeEntry(function, code_object);
-}
-
-
-LInstruction* LChunkBuilder::DoInnerAllocatedObject(
- HInnerAllocatedObject* instr) {
- LOperand* base_object = UseRegisterAtStart(instr->base_object());
- LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
- return DefineAsRegister(
- new(zone()) LInnerAllocatedObject(base_object, offset));
-}
-
-
-LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
- return instr->HasNoUses()
- ? NULL
- : DefineAsRegister(new(zone()) LThisFunction);
-}
-
-
-LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- if (instr->HasNoUses()) return NULL;
-
- if (info()->IsStub()) {
- return DefineFixed(new(zone()) LContext, cp);
- }
-
- return DefineAsRegister(new(zone()) LContext);
-}
-
-
-LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallWithDescriptor(
- HCallWithDescriptor* instr) {
- CallInterfaceDescriptor descriptor = instr->descriptor();
- DCHECK_EQ(descriptor.GetParameterCount() +
- LCallWithDescriptor::kImplicitRegisterParameterCount,
- instr->OperandCount());
-
- LOperand* target = UseRegisterOrConstantAtStart(instr->target());
- ZoneList<LOperand*> ops(instr->OperandCount(), zone());
- // Target
- ops.Add(target, zone());
- // Context
- LOperand* op = UseFixed(instr->OperandAt(1), cp);
- ops.Add(op, zone());
- // Load register parameters.
- int i = 0;
- for (; i < descriptor.GetRegisterParameterCount(); i++) {
- op = UseFixed(instr->OperandAt(
- i + LCallWithDescriptor::kImplicitRegisterParameterCount),
- descriptor.GetRegisterParameter(i));
- ops.Add(op, zone());
- }
- // Push stack parameters.
- for (; i < descriptor.GetParameterCount(); i++) {
- op = UseAny(instr->OperandAt(
- i + LCallWithDescriptor::kImplicitRegisterParameterCount));
- AddInstruction(new (zone()) LPushArgument(op), instr);
- }
-
- LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
- descriptor, ops, zone());
- if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
- result->MarkAsSyntacticTailCall();
- }
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* function = UseFixed(instr->function(), r1);
- LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
- if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
- result->MarkAsSyntacticTailCall();
- }
- return MarkAsCall(DefineFixed(result, r0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
- switch (instr->op()) {
- case kMathFloor:
- return DoMathFloor(instr);
- case kMathRound:
- return DoMathRound(instr);
- case kMathFround:
- return DoMathFround(instr);
- case kMathAbs:
- return DoMathAbs(instr);
- case kMathLog:
- return DoMathLog(instr);
- case kMathCos:
- return DoMathCos(instr);
- case kMathSin:
- return DoMathSin(instr);
- case kMathExp:
- return DoMathExp(instr);
- case kMathSqrt:
- return DoMathSqrt(instr);
- case kMathPowHalf:
- return DoMathPowHalf(instr);
- case kMathClz32:
- return DoMathClz32(instr);
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
- LOperand* input = UseRegister(instr->value());
- LMathFloor* result = new(zone()) LMathFloor(input);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
-}
-
-
-LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
- LOperand* input = UseRegister(instr->value());
- LOperand* temp = TempDoubleRegister();
- LMathRound* result = new(zone()) LMathRound(input, temp);
- return AssignEnvironment(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
- LOperand* input = UseRegister(instr->value());
- LMathFround* result = new (zone()) LMathFround(input);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
- Representation r = instr->value()->representation();
- LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32())
- ? NULL
- : UseFixed(instr->context(), cp);
- LOperand* input = UseRegister(instr->value());
- LInstruction* result =
- DefineAsRegister(new(zone()) LMathAbs(context, input));
- if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
- if (!r.IsDouble()) result = AssignEnvironment(result);
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseFixedDouble(instr->value(), d0);
- return MarkAsCall(DefineFixedDouble(new(zone()) LMathLog(input), d0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- LMathClz32* result = new(zone()) LMathClz32(input);
- return DefineAsRegister(result);
-}
-
-LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseFixedDouble(instr->value(), d0);
- return MarkAsCall(DefineFixedDouble(new (zone()) LMathCos(input), d0), instr);
-}
-
-LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseFixedDouble(instr->value(), d0);
- return MarkAsCall(DefineFixedDouble(new (zone()) LMathSin(input), d0), instr);
-}
-
-LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseFixedDouble(instr->value(), d0);
- return MarkAsCall(DefineFixedDouble(new (zone()) LMathExp(input), d0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- LMathSqrt* result = new(zone()) LMathSqrt(input);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- LMathPowHalf* result = new(zone()) LMathPowHalf(input);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* constructor = UseFixed(instr->constructor(), r1);
- LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoRor(HRor* instr) {
- return DoShift(Token::ROR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShr(HShr* instr) {
- return DoShift(Token::SHR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoSar(HSar* instr) {
- return DoShift(Token::SAR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShl(HShl* instr) {
- return DoShift(Token::SHL, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32));
-
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
- return DefineAsRegister(new(zone()) LBitI(left, right));
- } else {
- return DoArithmeticT(instr->op(), instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
- dividend, divisor));
- if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
- (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
- (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
- divisor != 1 && divisor != -1)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
- DCHECK(instr->representation().IsInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result = DefineAsRegister(new(zone()) LDivByConstI(
- dividend, divisor));
- if (divisor == 0 ||
- (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
- !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(instr->right());
- LOperand* temp =
- CpuFeatures::IsSupported(SUDIV) ? NULL : TempDoubleRegister();
- LInstruction* result =
- DefineAsRegister(new(zone()) LDivI(dividend, divisor, temp));
- if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- (instr->CheckFlag(HValue::kCanOverflow) &&
- (!CpuFeatures::IsSupported(SUDIV) ||
- !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) ||
- (!instr->IsMathFloorOfDiv() &&
- !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- if (instr->RightIsPowerOf2()) {
- return DoDivByPowerOf2I(instr);
- } else if (instr->right()->IsConstant()) {
- return DoDivByConstI(instr);
- } else {
- return DoDivI(instr);
- }
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else {
- return DoArithmeticT(Token::DIV, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
- LOperand* dividend = UseRegisterAtStart(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result = DefineAsRegister(new(zone()) LFlooringDivByPowerOf2I(
- dividend, divisor));
- if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
- (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
- DCHECK(instr->representation().IsInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LOperand* temp =
- ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
- (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
- NULL : TempRegister();
- LInstruction* result = DefineAsRegister(
- new(zone()) LFlooringDivByConstI(dividend, divisor, temp));
- if (divisor == 0 ||
- (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(instr->right());
- LOperand* temp =
- CpuFeatures::IsSupported(SUDIV) ? NULL : TempDoubleRegister();
- LInstruction* result =
- DefineAsRegister(new (zone()) LFlooringDivI(dividend, divisor, temp));
- if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- (instr->CheckFlag(HValue::kCanOverflow) &&
- (!CpuFeatures::IsSupported(SUDIV) ||
- !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)))) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- if (instr->RightIsPowerOf2()) {
- return DoFlooringDivByPowerOf2I(instr);
- } else if (instr->right()->IsConstant()) {
- return DoFlooringDivByConstI(instr);
- } else {
- return DoFlooringDivI(instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegisterAtStart(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
- dividend, divisor));
- if (instr->CheckFlag(HValue::kLeftCanBeNegative) &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result = DefineAsRegister(new(zone()) LModByConstI(
- dividend, divisor));
- if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoModI(HMod* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(instr->right());
- LOperand* temp =
- CpuFeatures::IsSupported(SUDIV) ? NULL : TempDoubleRegister();
- LOperand* temp2 =
- CpuFeatures::IsSupported(SUDIV) ? NULL : TempDoubleRegister();
- LInstruction* result = DefineAsRegister(new(zone()) LModI(
- dividend, divisor, temp, temp2));
- if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- if (instr->RightIsPowerOf2()) {
- return DoModByPowerOf2I(instr);
- } else if (instr->right()->IsConstant()) {
- return DoModByConstI(instr);
- } else {
- return DoModI(instr);
- }
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::MOD, instr);
- } else {
- return DoArithmeticT(Token::MOD, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMul(HMul* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- HValue* left = instr->BetterLeftOperand();
- HValue* right = instr->BetterRightOperand();
- LOperand* left_op;
- LOperand* right_op;
- bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
- bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
-
- int32_t constant_value = 0;
- if (right->IsConstant()) {
- HConstant* constant = HConstant::cast(right);
- constant_value = constant->Integer32Value();
- // Constants -1, 0 and 1 can be optimized if the result can overflow.
- // For other constants, it can be optimized only without overflow.
- if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) {
- left_op = UseRegisterAtStart(left);
- right_op = UseConstant(right);
- } else {
- if (bailout_on_minus_zero) {
- left_op = UseRegister(left);
- } else {
- left_op = UseRegisterAtStart(left);
- }
- right_op = UseRegister(right);
- }
- } else {
- if (bailout_on_minus_zero) {
- left_op = UseRegister(left);
- } else {
- left_op = UseRegisterAtStart(left);
- }
- right_op = UseRegister(right);
- }
- LMulI* mul = new(zone()) LMulI(left_op, right_op);
- if (right_op->IsConstantOperand()
- ? ((can_overflow && constant_value == -1) ||
- (bailout_on_minus_zero && constant_value <= 0))
- : (can_overflow || bailout_on_minus_zero)) {
- AssignEnvironment(mul);
- }
- return DefineAsRegister(mul);
-
- } else if (instr->representation().IsDouble()) {
- if (instr->HasOneUse() && (instr->uses().value()->IsAdd() ||
- instr->uses().value()->IsSub())) {
- HBinaryOperation* use = HBinaryOperation::cast(instr->uses().value());
-
- if (use->IsAdd() && instr == use->left()) {
- // This mul is the lhs of an add. The add and mul will be folded into a
- // multiply-add in DoAdd.
- return NULL;
- }
- if (instr == use->right() && use->IsAdd() && !use->left()->IsMul()) {
- // This mul is the rhs of an add, where the lhs is not another mul.
- // The add and mul will be folded into a multiply-add in DoAdd.
- return NULL;
- }
- if (instr == use->right() && use->IsSub()) {
- // This mul is the rhs of a sub. The sub and mul will be folded into a
- // multiply-sub in DoSub.
- return NULL;
- }
- }
-
- return DoArithmeticD(Token::MUL, instr);
- } else {
- return DoArithmeticT(Token::MUL, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoSub(HSub* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
-
- if (instr->left()->IsConstant()) {
- // If lhs is constant, do reverse subtraction instead.
- return DoRSub(instr);
- }
-
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- LSubI* sub = new(zone()) LSubI(left, right);
- LInstruction* result = DefineAsRegister(sub);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsDouble()) {
- if (instr->right()->IsMul() && instr->right()->HasOneUse()) {
- return DoMultiplySub(instr->left(), HMul::cast(instr->right()));
- }
-
- return DoArithmeticD(Token::SUB, instr);
- } else {
- return DoArithmeticT(Token::SUB, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoRSub(HSub* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
-
- // Note: The lhs of the subtraction becomes the rhs of the
- // reverse-subtraction.
- LOperand* left = UseRegisterAtStart(instr->right());
- LOperand* right = UseOrConstantAtStart(instr->left());
- LRSubI* rsb = new(zone()) LRSubI(left, right);
- LInstruction* result = DefineAsRegister(rsb);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) {
- LOperand* multiplier_op = UseRegisterAtStart(mul->left());
- LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
- LOperand* addend_op = UseRegisterAtStart(addend);
- return DefineSameAsFirst(new(zone()) LMultiplyAddD(addend_op, multiplier_op,
- multiplicand_op));
-}
-
-
-LInstruction* LChunkBuilder::DoMultiplySub(HValue* minuend, HMul* mul) {
- LOperand* minuend_op = UseRegisterAtStart(minuend);
- LOperand* multiplier_op = UseRegisterAtStart(mul->left());
- LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
-
- return DefineSameAsFirst(new(zone()) LMultiplySubD(minuend_op,
- multiplier_op,
- multiplicand_op));
-}
-
-
-LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
- LAddI* add = new(zone()) LAddI(left, right);
- LInstruction* result = DefineAsRegister(add);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsExternal()) {
- DCHECK(instr->IsConsistentExternalRepresentation());
- DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- LAddI* add = new(zone()) LAddI(left, right);
- LInstruction* result = DefineAsRegister(add);
- return result;
- } else if (instr->representation().IsDouble()) {
- if (instr->left()->IsMul() && instr->left()->HasOneUse()) {
- return DoMultiplyAdd(HMul::cast(instr->left()), instr->right());
- }
-
- if (instr->right()->IsMul() && instr->right()->HasOneUse()) {
- DCHECK(!instr->left()->IsMul() || !instr->left()->HasOneUse());
- return DoMultiplyAdd(HMul::cast(instr->right()), instr->left());
- }
-
- return DoArithmeticD(Token::ADD, instr);
- } else {
- return DoArithmeticT(Token::ADD, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
- LOperand* left = NULL;
- LOperand* right = NULL;
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- left = UseRegisterAtStart(instr->BetterLeftOperand());
- right = UseOrConstantAtStart(instr->BetterRightOperand());
- } else {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->left()->representation().IsDouble());
- DCHECK(instr->right()->representation().IsDouble());
- left = UseRegisterAtStart(instr->left());
- right = UseRegisterAtStart(instr->right());
- }
- return DefineAsRegister(new(zone()) LMathMinMax(left, right));
-}
-
-
-LInstruction* LChunkBuilder::DoPower(HPower* instr) {
- DCHECK(instr->representation().IsDouble());
- // We call a C function for double power. It can't trigger a GC.
- // We need to use fixed result register for the call.
- Representation exponent_type = instr->right()->representation();
- DCHECK(instr->left()->representation().IsDouble());
- LOperand* left = UseFixedDouble(instr->left(), d0);
- LOperand* right =
- exponent_type.IsDouble()
- ? UseFixedDouble(instr->right(), d1)
- : UseFixed(instr->right(), MathPowTaggedDescriptor::exponent());
- LPower* result = new(zone()) LPower(left, right);
- return MarkAsCall(DefineFixedDouble(result, d2),
- instr,
- CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
- DCHECK(instr->left()->representation().IsTagged());
- DCHECK(instr->right()->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* left = UseFixed(instr->left(), r1);
- LOperand* right = UseFixed(instr->right(), r0);
- LCmpT* result = new(zone()) LCmpT(context, left, right);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
- HCompareNumericAndBranch* instr) {
- Representation r = instr->representation();
- if (r.IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(r));
- DCHECK(instr->right()->representation().Equals(r));
- LOperand* left = UseRegisterOrConstantAtStart(instr->left());
- LOperand* right = UseRegisterOrConstantAtStart(instr->right());
- return new(zone()) LCompareNumericAndBranch(left, right);
- } else {
- DCHECK(r.IsDouble());
- DCHECK(instr->left()->representation().IsDouble());
- DCHECK(instr->right()->representation().IsDouble());
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- return new(zone()) LCompareNumericAndBranch(left, right);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
- HCompareObjectEqAndBranch* instr) {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- return new(zone()) LCmpObjectEqAndBranch(left, right);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
- HCompareHoleAndBranch* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LCmpHoleAndBranch(value);
-}
-
-
-LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- return new(zone()) LIsStringAndBranch(value, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- return new(zone()) LIsSmiAndBranch(Use(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
- HIsUndetectableAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LIsUndetectableAndBranch(value, TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoStringCompareAndBranch(
- HStringCompareAndBranch* instr) {
- DCHECK(instr->left()->representation().IsTagged());
- DCHECK(instr->right()->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* left = UseFixed(instr->left(), r1);
- LOperand* right = UseFixed(instr->right(), r0);
- LStringCompareAndBranch* result =
- new(zone()) LStringCompareAndBranch(context, left, right);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
- HHasInstanceTypeAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LHasInstanceTypeAndBranch(value);
-}
-
-LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
- HClassOfTestAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegister(instr->value());
- return new (zone()) LClassOfTestAndBranch(value, TempRegister());
-}
-
-LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
- LOperand* string = UseRegisterAtStart(instr->string());
- LOperand* index = UseRegisterOrConstantAtStart(instr->index());
- return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index));
-}
-
-
-LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
- LOperand* string = UseRegisterAtStart(instr->string());
- LOperand* index = FLAG_debug_code
- ? UseRegisterAtStart(instr->index())
- : UseRegisterOrConstantAtStart(instr->index());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL;
- return new(zone()) LSeqStringSetChar(context, string, index, value);
-}
-
-
-LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- if (!FLAG_debug_code && instr->skip_check()) return NULL;
- LOperand* index = UseRegisterOrConstantAtStart(instr->index());
- LOperand* length = !index->IsConstantOperand()
- ? UseRegisterOrConstantAtStart(instr->length())
- : UseRegisterAtStart(instr->length());
- LInstruction* result = new(zone()) LBoundsCheck(index, length);
- if (!FLAG_debug_code || !instr->skip_check()) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
- // The control instruction marking the end of a block that completed
- // abruptly (e.g., threw an exception). There is nothing specific to do.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
- // All HForceRepresentation instructions should be eliminated in the
- // representation change phase of Hydrogen.
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoChange(HChange* instr) {
- Representation from = instr->from();
- Representation to = instr->to();
- HValue* val = instr->value();
- if (from.IsSmi()) {
- if (to.IsTagged()) {
- LOperand* value = UseRegister(val);
- return DefineSameAsFirst(new(zone()) LDummyUse(value));
- }
- from = Representation::Tagged();
- }
- if (from.IsTagged()) {
- if (to.IsDouble()) {
- LOperand* value = UseRegister(val);
- LInstruction* result = DefineAsRegister(new(zone()) LNumberUntagD(value));
- if (!val->representation().IsSmi()) result = AssignEnvironment(result);
- return result;
- } else if (to.IsSmi()) {
- LOperand* value = UseRegister(val);
- if (val->type().IsSmi()) {
- return DefineSameAsFirst(new(zone()) LDummyUse(value));
- }
- return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
- } else {
- DCHECK(to.IsInteger32());
- if (val->type().IsSmi() || val->representation().IsSmi()) {
- LOperand* value = UseRegisterAtStart(val);
- return DefineAsRegister(new(zone()) LSmiUntag(value, false));
- } else {
- LOperand* value = UseRegister(val);
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempDoubleRegister();
- LInstruction* result =
- DefineSameAsFirst(new(zone()) LTaggedToI(value, temp1, temp2));
- if (!val->representation().IsSmi()) result = AssignEnvironment(result);
- return result;
- }
- }
- } else if (from.IsDouble()) {
- if (to.IsTagged()) {
- info()->MarkAsDeferredCalling();
- LOperand* value = UseRegister(val);
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LUnallocated* result_temp = TempRegister();
- LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
- return AssignPointerMap(Define(result, result_temp));
- } else if (to.IsSmi()) {
- LOperand* value = UseRegister(val);
- return AssignEnvironment(
- DefineAsRegister(new(zone()) LDoubleToSmi(value)));
- } else {
- DCHECK(to.IsInteger32());
- LOperand* value = UseRegister(val);
- LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value));
- if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result);
- return result;
- }
- } else if (from.IsInteger32()) {
- info()->MarkAsDeferredCalling();
- if (to.IsTagged()) {
- if (!instr->CheckFlag(HValue::kCanOverflow)) {
- LOperand* value = UseRegisterAtStart(val);
- return DefineAsRegister(new(zone()) LSmiTag(value));
- } else if (val->CheckFlag(HInstruction::kUint32)) {
- LOperand* value = UseRegisterAtStart(val);
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2);
- return AssignPointerMap(DefineAsRegister(result));
- } else {
- LOperand* value = UseRegisterAtStart(val);
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LNumberTagI* result = new(zone()) LNumberTagI(value, temp1, temp2);
- return AssignPointerMap(DefineAsRegister(result));
- }
- } else if (to.IsSmi()) {
- LOperand* value = UseRegister(val);
- LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else {
- DCHECK(to.IsDouble());
- if (val->CheckFlag(HInstruction::kUint32)) {
- return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val)));
- } else {
- return DefineAsRegister(new(zone()) LInteger32ToDouble(Use(val)));
- }
- }
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* result = new(zone()) LCheckNonSmi(value);
- if (!instr->value()->type().IsHeapObject()) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckArrayBufferNotNeutered(
- HCheckArrayBufferNotNeutered* instr) {
- LOperand* view = UseRegisterAtStart(instr->value());
- LCheckArrayBufferNotNeutered* result =
- new (zone()) LCheckArrayBufferNotNeutered(view);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* result = new(zone()) LCheckInstanceType(value);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckValue(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
- if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps;
- LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value));
- if (instr->HasMigrationTarget()) {
- info()->MarkAsDeferredCalling();
- result = AssignPointerMap(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
- HValue* value = instr->value();
- Representation input_rep = value->representation();
- LOperand* reg = UseRegister(value);
- if (input_rep.IsDouble()) {
- return DefineAsRegister(new(zone()) LClampDToUint8(reg));
- } else if (input_rep.IsInteger32()) {
- return DefineAsRegister(new(zone()) LClampIToUint8(reg));
- } else {
- DCHECK(input_rep.IsSmiOrTagged());
- // Register allocator doesn't (yet) support allocation of double
- // temps. Reserve d1 explicitly.
- LClampTToUint8* result =
- new(zone()) LClampTToUint8(reg, TempDoubleRegister());
- return AssignEnvironment(DefineAsRegister(result));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
- LOperand* context = info()->IsStub()
- ? UseFixed(instr->context(), cp)
- : NULL;
- LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
- return new(zone()) LReturn(UseFixed(instr->value(), r0), context,
- parameter_count);
-}
-
-
-LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
- Representation r = instr->representation();
- if (r.IsSmi()) {
- return DefineAsRegister(new(zone()) LConstantS);
- } else if (r.IsInteger32()) {
- return DefineAsRegister(new(zone()) LConstantI);
- } else if (r.IsDouble()) {
- return DefineAsRegister(new(zone()) LConstantD);
- } else if (r.IsExternal()) {
- return DefineAsRegister(new(zone()) LConstantE);
- } else if (r.IsTagged()) {
- return DefineAsRegister(new(zone()) LConstantT);
- } else {
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- LInstruction* result =
- DefineAsRegister(new(zone()) LLoadContextSlot(context));
- if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
- LOperand* context;
- LOperand* value;
- if (instr->NeedsWriteBarrier()) {
- context = UseTempRegister(instr->context());
- value = UseTempRegister(instr->value());
- } else {
- context = UseRegister(instr->context());
- value = UseRegister(instr->value());
- }
- LInstruction* result = new(zone()) LStoreContextSlot(context, value);
- if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- LOperand* obj = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new(zone()) LLoadNamedField(obj));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
- HLoadFunctionPrototype* instr) {
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()))));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
- return DefineAsRegister(new(zone()) LLoadRoot);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
- DCHECK(instr->key()->representation().IsSmiOrInteger32());
- ElementsKind elements_kind = instr->elements_kind();
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LInstruction* result = NULL;
-
- if (!instr->is_fixed_typed_array()) {
- LOperand* obj = NULL;
- if (instr->representation().IsDouble()) {
- obj = UseRegister(instr->elements());
- } else {
- DCHECK(instr->representation().IsSmiOrTagged());
- obj = UseRegisterAtStart(instr->elements());
- }
- result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr));
- } else {
- DCHECK(
- (instr->representation().IsInteger32() &&
- !IsDoubleOrFloatElementsKind(elements_kind)) ||
- (instr->representation().IsDouble() &&
- IsDoubleOrFloatElementsKind(elements_kind)));
- LOperand* backing_store = UseRegister(instr->elements());
- LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
- result = DefineAsRegister(
- new (zone()) LLoadKeyed(backing_store, key, backing_store_owner));
- }
-
- bool needs_environment;
- if (instr->is_fixed_typed_array()) {
- // see LCodeGen::DoLoadKeyedExternalArray
- needs_environment = elements_kind == UINT32_ELEMENTS &&
- !instr->CheckFlag(HInstruction::kUint32);
- } else {
- // see LCodeGen::DoLoadKeyedFixedDoubleArray and
- // LCodeGen::DoLoadKeyedFixedArray
- needs_environment =
- instr->RequiresHoleCheck() ||
- (instr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED && info()->IsStub());
- }
-
- if (needs_environment) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- if (!instr->is_fixed_typed_array()) {
- DCHECK(instr->elements()->representation().IsTagged());
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- LOperand* object = NULL;
- LOperand* key = NULL;
- LOperand* val = NULL;
-
- if (instr->value()->representation().IsDouble()) {
- object = UseRegisterAtStart(instr->elements());
- val = UseRegister(instr->value());
- key = UseRegisterOrConstantAtStart(instr->key());
- } else {
- DCHECK(instr->value()->representation().IsSmiOrTagged());
- if (needs_write_barrier) {
- object = UseTempRegister(instr->elements());
- val = UseTempRegister(instr->value());
- key = UseTempRegister(instr->key());
- } else {
- object = UseRegisterAtStart(instr->elements());
- val = UseRegisterAtStart(instr->value());
- key = UseRegisterOrConstantAtStart(instr->key());
- }
- }
-
- return new (zone()) LStoreKeyed(object, key, val, nullptr);
- }
-
- DCHECK(
- (instr->value()->representation().IsInteger32() &&
- !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
- (instr->value()->representation().IsDouble() &&
- IsDoubleOrFloatElementsKind(instr->elements_kind())));
- DCHECK(instr->elements()->representation().IsExternal());
- LOperand* val = UseRegister(instr->value());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LOperand* backing_store = UseRegister(instr->elements());
- LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
- return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner);
-}
-
-
-LInstruction* LChunkBuilder::DoTransitionElementsKind(
- HTransitionElementsKind* instr) {
- if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
- LOperand* object = UseRegister(instr->object());
- LOperand* new_map_reg = TempRegister();
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, NULL, new_map_reg);
- return result;
- } else {
- LOperand* object = UseFixed(instr->object(), r0);
- LOperand* context = UseFixed(instr->context(), cp);
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, context, NULL);
- return MarkAsCall(result, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoTrapAllocationMemento(
- HTrapAllocationMemento* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* temp = TempRegister();
- LTrapAllocationMemento* result =
- new(zone()) LTrapAllocationMemento(object, temp);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) {
- info()->MarkAsDeferredCalling();
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object = Use(instr->object());
- LOperand* elements = Use(instr->elements());
- LOperand* key = UseRegisterOrConstant(instr->key());
- LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity());
-
- LMaybeGrowElements* result = new (zone())
- LMaybeGrowElements(context, object, elements, key, current_capacity);
- DefineFixed(result, r0);
- return AssignPointerMap(AssignEnvironment(result));
-}
-
-
-LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
- bool is_in_object = instr->access().IsInobject();
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- bool needs_write_barrier_for_map = instr->has_transition() &&
- instr->NeedsWriteBarrierForMap();
-
- LOperand* obj;
- if (needs_write_barrier) {
- obj = is_in_object
- ? UseRegister(instr->object())
- : UseTempRegister(instr->object());
- } else {
- obj = needs_write_barrier_for_map
- ? UseRegister(instr->object())
- : UseRegisterAtStart(instr->object());
- }
-
- LOperand* val;
- if (needs_write_barrier) {
- val = UseTempRegister(instr->value());
- } else if (instr->field_representation().IsDouble()) {
- val = UseRegisterAtStart(instr->value());
- } else {
- val = UseRegister(instr->value());
- }
-
- // We need a temporary register for write barrier of the map field.
- LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
-
- return new(zone()) LStoreNamedField(obj, val, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* left = UseFixed(instr->left(), r1);
- LOperand* right = UseFixed(instr->right(), r0);
- return MarkAsCall(
- DefineFixed(new(zone()) LStringAdd(context, left, right), r0),
- instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
- LOperand* string = UseTempRegister(instr->string());
- LOperand* index = UseTempRegister(instr->index());
- LOperand* context = UseAny(instr->context());
- LStringCharCodeAt* result =
- new(zone()) LStringCharCodeAt(context, string, index);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
- LOperand* char_code = UseRegister(instr->value());
- LOperand* context = UseAny(instr->context());
- LStringCharFromCode* result =
- new(zone()) LStringCharFromCode(context, char_code);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
- LOperand* size = UseRegisterOrConstant(instr->size());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- if (instr->IsAllocationFolded()) {
- LFastAllocate* result = new (zone()) LFastAllocate(size, temp1, temp2);
- return DefineAsRegister(result);
- } else {
- info()->MarkAsDeferredCalling();
- LOperand* context = UseAny(instr->context());
- LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2);
- return AssignPointerMap(DefineAsRegister(result));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
- DCHECK(argument_count_ == 0);
- allocator_->MarkAsOsrEntry();
- current_block_->last_environment()->set_ast_id(instr->ast_id());
- return AssignEnvironment(new(zone()) LOsrEntry);
-}
-
-
-LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- LParameter* result = new(zone()) LParameter;
- if (instr->kind() == HParameter::STACK_PARAMETER) {
- int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(result, spill_index);
- } else {
- DCHECK(info()->IsStub());
- CallInterfaceDescriptor descriptor = graph()->descriptor();
- int index = static_cast<int>(instr->index());
- Register reg = descriptor.GetRegisterParameter(index);
- return DefineFixed(result, reg);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- // Use an index that corresponds to the location in the unoptimized frame,
- // which the optimized frame will subsume.
- int env_index = instr->index();
- int spill_index = 0;
- if (instr->environment()->is_parameter_index(env_index)) {
- spill_index = chunk()->GetParameterStackSlot(env_index);
- } else {
- spill_index = env_index - instr->environment()->first_local_index();
- if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
- Retry(kTooManySpillSlotsNeededForOSR);
- spill_index = 0;
- }
- spill_index += StandardFrameConstants::kFixedSlotCount;
- }
- return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
- // There are no real uses of the arguments object.
- // arguments.length and element access are supported directly on
- // stack arguments, and any real arguments object use causes a bailout.
- // So this value is never used.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
- instr->ReplayEnvironment(current_block_->last_environment());
-
- // There are no real uses of a captured object.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
- info()->MarkAsRequiresFrame();
- LOperand* args = UseRegister(instr->arguments());
- LOperand* length = UseRegisterOrConstantAtStart(instr->length());
- LOperand* index = UseRegisterOrConstantAtStart(instr->index());
- return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
-}
-
-
-LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* value = UseFixed(instr->value(), r3);
- LTypeof* result = new (zone()) LTypeof(context, value);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
- return new(zone()) LTypeofIsAndBranch(UseRegister(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
- instr->ReplayEnvironment(current_block_->last_environment());
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
- if (instr->is_function_entry()) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(new(zone()) LStackCheck(context), instr);
- } else {
- DCHECK(instr->is_backwards_branch());
- LOperand* context = UseAny(instr->context());
- return AssignEnvironment(
- AssignPointerMap(new(zone()) LStackCheck(context)));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
- HEnvironment* outer = current_block_->last_environment();
- outer->set_ast_id(instr->ReturnId());
- HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner = outer->CopyForInlining(
- instr->closure(), instr->arguments_count(), instr->function(), undefined,
- instr->inlining_kind(), instr->syntactic_tail_call_mode());
- // Only replay binding of arguments object if it wasn't removed from graph.
- if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
- inner->Bind(instr->arguments_var(), instr->arguments_object());
- }
- inner->BindContext(instr->closure_context());
- inner->set_entry(instr);
- current_block_->UpdateEnvironment(inner);
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
- LInstruction* pop = NULL;
-
- HEnvironment* env = current_block_->last_environment();
-
- if (env->entry()->arguments_pushed()) {
- int argument_count = env->arguments_environment()->parameter_count();
- pop = new(zone()) LDrop(argument_count);
- DCHECK(instr->argument_delta() == -argument_count);
- }
-
- HEnvironment* outer = current_block_->last_environment()->
- DiscardInlined(false);
- current_block_->UpdateEnvironment(outer);
-
- return pop;
-}
-
-
-LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object = UseFixed(instr->enumerable(), r0);
- LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
- return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
- LOperand* map = UseRegister(instr->map());
- return AssignEnvironment(DefineAsRegister(new(zone()) LForInCacheArray(map)));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* map = UseRegisterAtStart(instr->map());
- return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* index = UseTempRegister(instr->index());
- LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
- LInstruction* result = DefineSameAsFirst(load);
- return AssignPointerMap(result);
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/arm/lithium-arm.h b/deps/v8/src/crankshaft/arm/lithium-arm.h
deleted file mode 100644
index fede1c1bda..0000000000
--- a/deps/v8/src/crankshaft/arm/lithium-arm.h
+++ /dev/null
@@ -1,2491 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_ARM_LITHIUM_ARM_H_
-#define V8_CRANKSHAFT_ARM_LITHIUM_ARM_H_
-
-#include "src/crankshaft/hydrogen.h"
-#include "src/crankshaft/lithium.h"
-#include "src/crankshaft/lithium-allocator.h"
-#include "src/safepoint-table.h"
-#include "src/utils.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LCodeGen;
-
-#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
- V(AccessArgumentsAt) \
- V(AddI) \
- V(Allocate) \
- V(ApplyArguments) \
- V(ArgumentsElements) \
- V(ArgumentsLength) \
- V(ArithmeticD) \
- V(ArithmeticT) \
- V(BitI) \
- V(BoundsCheck) \
- V(Branch) \
- V(CallWithDescriptor) \
- V(CallNewArray) \
- V(CallRuntime) \
- V(CheckArrayBufferNotNeutered) \
- V(CheckInstanceType) \
- V(CheckNonSmi) \
- V(CheckMaps) \
- V(CheckMapValue) \
- V(CheckSmi) \
- V(CheckValue) \
- V(ClampDToUint8) \
- V(ClampIToUint8) \
- V(ClampTToUint8) \
- V(ClassOfTestAndBranch) \
- V(CompareNumericAndBranch) \
- V(CmpObjectEqAndBranch) \
- V(CmpHoleAndBranch) \
- V(CmpMapAndBranch) \
- V(CmpT) \
- V(ConstantD) \
- V(ConstantE) \
- V(ConstantI) \
- V(ConstantS) \
- V(ConstantT) \
- V(Context) \
- V(DebugBreak) \
- V(DeclareGlobals) \
- V(Deoptimize) \
- V(DivByConstI) \
- V(DivByPowerOf2I) \
- V(DivI) \
- V(DoubleToI) \
- V(DoubleToSmi) \
- V(Drop) \
- V(Dummy) \
- V(DummyUse) \
- V(FastAllocate) \
- V(FlooringDivByConstI) \
- V(FlooringDivByPowerOf2I) \
- V(FlooringDivI) \
- V(ForInCacheArray) \
- V(ForInPrepareMap) \
- V(Goto) \
- V(HasInPrototypeChainAndBranch) \
- V(HasInstanceTypeAndBranch) \
- V(InnerAllocatedObject) \
- V(InstructionGap) \
- V(Integer32ToDouble) \
- V(InvokeFunction) \
- V(IsStringAndBranch) \
- V(IsSmiAndBranch) \
- V(IsUndetectableAndBranch) \
- V(Label) \
- V(LazyBailout) \
- V(LoadContextSlot) \
- V(LoadRoot) \
- V(LoadFieldByIndex) \
- V(LoadFunctionPrototype) \
- V(LoadKeyed) \
- V(LoadNamedField) \
- V(MathAbs) \
- V(MathClz32) \
- V(MathCos) \
- V(MathSin) \
- V(MathExp) \
- V(MathFloor) \
- V(MathFround) \
- V(MathLog) \
- V(MathMinMax) \
- V(MathPowHalf) \
- V(MathRound) \
- V(MathSqrt) \
- V(MaybeGrowElements) \
- V(ModByConstI) \
- V(ModByPowerOf2I) \
- V(ModI) \
- V(MulI) \
- V(MultiplyAddD) \
- V(MultiplySubD) \
- V(NumberTagD) \
- V(NumberTagI) \
- V(NumberTagU) \
- V(NumberUntagD) \
- V(OsrEntry) \
- V(Parameter) \
- V(Power) \
- V(Prologue) \
- V(PushArgument) \
- V(Return) \
- V(SeqStringGetChar) \
- V(SeqStringSetChar) \
- V(ShiftI) \
- V(SmiTag) \
- V(SmiUntag) \
- V(StackCheck) \
- V(StoreCodeEntry) \
- V(StoreContextSlot) \
- V(StoreKeyed) \
- V(StoreNamedField) \
- V(StringAdd) \
- V(StringCharCodeAt) \
- V(StringCharFromCode) \
- V(StringCompareAndBranch) \
- V(SubI) \
- V(RSubI) \
- V(TaggedToI) \
- V(ThisFunction) \
- V(TransitionElementsKind) \
- V(TrapAllocationMemento) \
- V(Typeof) \
- V(TypeofIsAndBranch) \
- V(Uint32ToDouble) \
- V(UnknownOSRValue) \
- V(WrapReceiver)
-
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- Opcode opcode() const final { return LInstruction::k##type; } \
- void CompileToNative(LCodeGen* generator) final; \
- const char* Mnemonic() const final { return mnemonic; } \
- static L##type* cast(LInstruction* instr) { \
- DCHECK(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
- }
-
-
-#define DECLARE_HYDROGEN_ACCESSOR(type) \
- H##type* hydrogen() const { \
- return H##type::cast(hydrogen_value()); \
- }
-
-
-class LInstruction : public ZoneObject {
- public:
- LInstruction()
- : environment_(NULL),
- hydrogen_value_(NULL),
- bit_field_(IsCallBits::encode(false)) {
- }
-
- virtual ~LInstruction() {}
-
- virtual void CompileToNative(LCodeGen* generator) = 0;
- virtual const char* Mnemonic() const = 0;
- virtual void PrintTo(StringStream* stream);
- virtual void PrintDataTo(StringStream* stream);
- virtual void PrintOutputOperandTo(StringStream* stream);
-
- enum Opcode {
- // Declare a unique enum value for each instruction.
-#define DECLARE_OPCODE(type) k##type,
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
- kNumberOfInstructions
-#undef DECLARE_OPCODE
- };
-
- virtual Opcode opcode() const = 0;
-
- // Declare non-virtual type testers for all leaf IR classes.
-#define DECLARE_PREDICATE(type) \
- bool Is##type() const { return opcode() == k##type; }
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
-#undef DECLARE_PREDICATE
-
- // Declare virtual predicates for instructions that don't have
- // an opcode.
- virtual bool IsGap() const { return false; }
-
- virtual bool IsControl() const { return false; }
-
- // Try deleting this instruction if possible.
- virtual bool TryDelete() { return false; }
-
- void set_environment(LEnvironment* env) { environment_ = env; }
- LEnvironment* environment() const { return environment_; }
- bool HasEnvironment() const { return environment_ != NULL; }
-
- void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
- LPointerMap* pointer_map() const { return pointer_map_.get(); }
- bool HasPointerMap() const { return pointer_map_.is_set(); }
-
- void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
- HValue* hydrogen_value() const { return hydrogen_value_; }
-
- void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
- bool IsCall() const { return IsCallBits::decode(bit_field_); }
-
- void MarkAsSyntacticTailCall() {
- bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
- }
- bool IsSyntacticTailCall() const {
- return IsSyntacticTailCallBits::decode(bit_field_);
- }
-
- // Interface to the register allocator and iterators.
- bool ClobbersTemps() const { return IsCall(); }
- bool ClobbersRegisters() const { return IsCall(); }
- virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
- return IsCall();
- }
-
- // Interface to the register allocator and iterators.
- bool IsMarkedAsCall() const { return IsCall(); }
-
- virtual bool HasResult() const = 0;
- virtual LOperand* result() const = 0;
-
- LOperand* FirstInput() { return InputAt(0); }
- LOperand* Output() { return HasResult() ? result() : NULL; }
-
- virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
-
-#ifdef DEBUG
- void VerifyCall();
-#endif
-
- virtual int InputCount() = 0;
- virtual LOperand* InputAt(int i) = 0;
-
- private:
- // Iterator support.
- friend class InputIterator;
-
- friend class TempIterator;
- virtual int TempCount() = 0;
- virtual LOperand* TempAt(int i) = 0;
-
- class IsCallBits: public BitField<bool, 0, 1> {};
- class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
- };
-
- LEnvironment* environment_;
- SetOncePointer<LPointerMap> pointer_map_;
- HValue* hydrogen_value_;
- int bit_field_;
-};
-
-
-// R = number of result operands (0 or 1).
-template<int R>
-class LTemplateResultInstruction : public LInstruction {
- public:
- // Allow 0 or 1 output operands.
- STATIC_ASSERT(R == 0 || R == 1);
- bool HasResult() const final { return R != 0 && result() != NULL; }
- void set_result(LOperand* operand) { results_[0] = operand; }
- LOperand* result() const override { return results_[0]; }
-
- protected:
- EmbeddedContainer<LOperand*, R> results_;
-};
-
-
-// R = number of result operands (0 or 1).
-// I = number of input operands.
-// T = number of temporary operands.
-template<int R, int I, int T>
-class LTemplateInstruction : public LTemplateResultInstruction<R> {
- protected:
- EmbeddedContainer<LOperand*, I> inputs_;
- EmbeddedContainer<LOperand*, T> temps_;
-
- private:
- // Iterator support.
- int InputCount() final { return I; }
- LOperand* InputAt(int i) final { return inputs_[i]; }
-
- int TempCount() final { return T; }
- LOperand* TempAt(int i) final { return temps_[i]; }
-};
-
-
-class LGap : public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGap(HBasicBlock* block)
- : block_(block) {
- parallel_moves_[BEFORE] = NULL;
- parallel_moves_[START] = NULL;
- parallel_moves_[END] = NULL;
- parallel_moves_[AFTER] = NULL;
- }
-
- // Can't use the DECLARE-macro here because of sub-classes.
- bool IsGap() const override { return true; }
- void PrintDataTo(StringStream* stream) override;
- static LGap* cast(LInstruction* instr) {
- DCHECK(instr->IsGap());
- return reinterpret_cast<LGap*>(instr);
- }
-
- bool IsRedundant() const;
-
- HBasicBlock* block() const { return block_; }
-
- enum InnerPosition {
- BEFORE,
- START,
- END,
- AFTER,
- FIRST_INNER_POSITION = BEFORE,
- LAST_INNER_POSITION = AFTER
- };
-
- LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
- if (parallel_moves_[pos] == NULL) {
- parallel_moves_[pos] = new(zone) LParallelMove(zone);
- }
- return parallel_moves_[pos];
- }
-
- LParallelMove* GetParallelMove(InnerPosition pos) {
- return parallel_moves_[pos];
- }
-
- private:
- LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
- HBasicBlock* block_;
-};
-
-
-class LInstructionGap final : public LGap {
- public:
- explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
-
- bool HasInterestingComment(LCodeGen* gen) const override {
- return !IsRedundant();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
-};
-
-
-class LGoto final : public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGoto(HBasicBlock* block) : block_(block) { }
-
- bool HasInterestingComment(LCodeGen* gen) const override;
- DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- void PrintDataTo(StringStream* stream) override;
- bool IsControl() const override { return true; }
-
- int block_id() const { return block_->block_id(); }
-
- private:
- HBasicBlock* block_;
-};
-
-
-class LPrologue final : public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue")
-};
-
-
-class LLazyBailout final : public LTemplateInstruction<0, 0, 0> {
- public:
- LLazyBailout() : gap_instructions_size_(0) { }
-
- DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
-
- void set_gap_instructions_size(int gap_instructions_size) {
- gap_instructions_size_ = gap_instructions_size;
- }
- int gap_instructions_size() { return gap_instructions_size_; }
-
- private:
- int gap_instructions_size_;
-};
-
-
-class LDummy final : public LTemplateInstruction<1, 0, 0> {
- public:
- LDummy() {}
- DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
-};
-
-
-class LDummyUse final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDummyUse(LOperand* value) {
- inputs_[0] = value;
- }
- DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
-};
-
-
-class LDeoptimize final : public LTemplateInstruction<0, 0, 0> {
- public:
- bool IsControl() const override { return true; }
- DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
- DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
-};
-
-
-class LLabel final : public LGap {
- public:
- explicit LLabel(HBasicBlock* block)
- : LGap(block), replacement_(NULL) { }
-
- bool HasInterestingComment(LCodeGen* gen) const override { return false; }
- DECLARE_CONCRETE_INSTRUCTION(Label, "label")
-
- void PrintDataTo(StringStream* stream) override;
-
- int block_id() const { return block()->block_id(); }
- bool is_loop_header() const { return block()->IsLoopHeader(); }
- bool is_osr_entry() const { return block()->is_osr_entry(); }
- Label* label() { return &label_; }
- LLabel* replacement() const { return replacement_; }
- void set_replacement(LLabel* label) { replacement_ = label; }
- bool HasReplacement() const { return replacement_ != NULL; }
-
- private:
- Label label_;
- LLabel* replacement_;
-};
-
-
-class LParameter final : public LTemplateInstruction<1, 0, 0> {
- public:
- bool HasInterestingComment(LCodeGen* gen) const override { return false; }
- DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
-};
-
-
-class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
- public:
- bool HasInterestingComment(LCodeGen* gen) const override { return false; }
- DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
-};
-
-
-template<int I, int T>
-class LControlInstruction : public LTemplateInstruction<0, I, T> {
- public:
- LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
-
- bool IsControl() const final { return true; }
-
- int SuccessorCount() { return hydrogen()->SuccessorCount(); }
- HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
-
- int TrueDestination(LChunk* chunk) {
- return chunk->LookupDestination(true_block_id());
- }
- int FalseDestination(LChunk* chunk) {
- return chunk->LookupDestination(false_block_id());
- }
-
- Label* TrueLabel(LChunk* chunk) {
- if (true_label_ == NULL) {
- true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
- }
- return true_label_;
- }
- Label* FalseLabel(LChunk* chunk) {
- if (false_label_ == NULL) {
- false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
- }
- return false_label_;
- }
-
- protected:
- int true_block_id() { return SuccessorAt(0)->block_id(); }
- int false_block_id() { return SuccessorAt(1)->block_id(); }
-
- private:
- HControlInstruction* hydrogen() {
- return HControlInstruction::cast(this->hydrogen_value());
- }
-
- Label* false_label_;
- Label* true_label_;
-};
-
-
-class LWrapReceiver final : public LTemplateInstruction<1, 2, 0> {
- public:
- LWrapReceiver(LOperand* receiver, LOperand* function) {
- inputs_[0] = receiver;
- inputs_[1] = function;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
- DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
-
- LOperand* receiver() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-};
-
-
-class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
- public:
- LApplyArguments(LOperand* function,
- LOperand* receiver,
- LOperand* length,
- LOperand* elements) {
- inputs_[0] = function;
- inputs_[1] = receiver;
- inputs_[2] = length;
- inputs_[3] = elements;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
- DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
-
- LOperand* function() { return inputs_[0]; }
- LOperand* receiver() { return inputs_[1]; }
- LOperand* length() { return inputs_[2]; }
- LOperand* elements() { return inputs_[3]; }
-};
-
-
-class LAccessArgumentsAt final : public LTemplateInstruction<1, 3, 0> {
- public:
- LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
- inputs_[0] = arguments;
- inputs_[1] = length;
- inputs_[2] = index;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
-
- LOperand* arguments() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LArgumentsLength final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LArgumentsLength(LOperand* elements) {
- inputs_[0] = elements;
- }
-
- LOperand* elements() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
-};
-
-
-class LArgumentsElements final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
- DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
-};
-
-
-class LModByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
- public:
- LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-
- private:
- int32_t divisor_;
-};
-
-
-class LModByConstI final : public LTemplateInstruction<1, 1, 0> {
- public:
- LModByConstI(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-
- private:
- int32_t divisor_;
-};
-
-
-class LModI final : public LTemplateInstruction<1, 2, 2> {
- public:
- LModI(LOperand* left, LOperand* right, LOperand* temp, LOperand* temp2) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-};
-
-
-class LDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
- public:
- LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
-
- private:
- int32_t divisor_;
-};
-
-
-class LDivByConstI final : public LTemplateInstruction<1, 1, 0> {
- public:
- LDivByConstI(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
-
- private:
- int32_t divisor_;
-};
-
-
-class LDivI final : public LTemplateInstruction<1, 2, 1> {
- public:
- LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
- inputs_[0] = dividend;
- inputs_[1] = divisor;
- temps_[0] = temp;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- LOperand* divisor() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
- DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
-};
-
-
-class LFlooringDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
- public:
- LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
- "flooring-div-by-power-of-2-i")
- DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-
- private:
- int32_t divisor_;
-};
-
-
-class LFlooringDivByConstI final : public LTemplateInstruction<1, 1, 2> {
- public:
- LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- temps_[0] = temp;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
- DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-
- private:
- int32_t divisor_;
-};
-
-
-class LFlooringDivI final : public LTemplateInstruction<1, 2, 1> {
- public:
- LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
- inputs_[0] = dividend;
- inputs_[1] = divisor;
- temps_[0] = temp;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- LOperand* divisor() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
- DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-};
-
-
-class LMulI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LMulI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
- DECLARE_HYDROGEN_ACCESSOR(Mul)
-};
-
-
-// Instruction for computing multiplier * multiplicand + addend.
-class LMultiplyAddD final : public LTemplateInstruction<1, 3, 0> {
- public:
- LMultiplyAddD(LOperand* addend, LOperand* multiplier,
- LOperand* multiplicand) {
- inputs_[0] = addend;
- inputs_[1] = multiplier;
- inputs_[2] = multiplicand;
- }
-
- LOperand* addend() { return inputs_[0]; }
- LOperand* multiplier() { return inputs_[1]; }
- LOperand* multiplicand() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d")
-};
-
-
-// Instruction for computing minuend - multiplier * multiplicand.
-class LMultiplySubD final : public LTemplateInstruction<1, 3, 0> {
- public:
- LMultiplySubD(LOperand* minuend, LOperand* multiplier,
- LOperand* multiplicand) {
- inputs_[0] = minuend;
- inputs_[1] = multiplier;
- inputs_[2] = multiplicand;
- }
-
- LOperand* minuend() { return inputs_[0]; }
- LOperand* multiplier() { return inputs_[1]; }
- LOperand* multiplicand() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MultiplySubD, "multiply-sub-d")
-};
-
-
-class LDebugBreak final : public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
-};
-
-
-class LCompareNumericAndBranch final : public LControlInstruction<2, 0> {
- public:
- LCompareNumericAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
- "compare-numeric-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
-
- Token::Value op() const { return hydrogen()->token(); }
- bool is_double() const {
- return hydrogen()->representation().IsDouble();
- }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LMathFloor final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathFloor(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-
-class LMathRound final : public LTemplateInstruction<1, 1, 1> {
- public:
- LMathRound(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-
-class LMathFround final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathFround(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround")
-};
-
-
-class LMathAbs final : public LTemplateInstruction<1, 2, 0> {
- public:
- LMathAbs(LOperand* context, LOperand* value) {
- inputs_[1] = context;
- inputs_[0] = value;
- }
-
- LOperand* context() { return inputs_[1]; }
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-
-class LMathLog final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathLog(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
-};
-
-
-class LMathClz32 final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathClz32(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
-};
-
-class LMathCos final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathCos(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
-};
-
-class LMathSin final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathSin(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
-};
-
-class LMathExp final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathExp(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
-};
-
-
-class LMathSqrt final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathSqrt(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
-};
-
-
-class LMathPowHalf final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathPowHalf(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
-};
-
-
-class LCmpObjectEqAndBranch final : public LControlInstruction<2, 0> {
- public:
- LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
-};
-
-
-class LCmpHoleAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LCmpHoleAndBranch(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
-};
-
-
-class LIsStringAndBranch final : public LControlInstruction<1, 1> {
- public:
- LIsStringAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LIsSmiAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LIsSmiAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LIsUndetectableAndBranch final : public LControlInstruction<1, 1> {
- public:
- explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
- "is-undetectable-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LStringCompareAndBranch final : public LControlInstruction<3, 0> {
- public:
- LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
- "string-compare-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
-
- Token::Value op() const { return hydrogen()->token(); }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LHasInstanceTypeAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
- "has-instance-type-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-class LClassOfTestAndBranch final : public LControlInstruction<1, 1> {
- public:
- LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch, "class-of-test-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-class LCmpT final : public LTemplateInstruction<1, 3, 0> {
- public:
- LCmpT(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
- DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
-
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> {
- public:
- LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) {
- inputs_[0] = object;
- inputs_[1] = prototype;
- }
-
- LOperand* object() const { return inputs_[0]; }
- LOperand* prototype() const { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch,
- "has-in-prototype-chain-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch)
-};
-
-
-class LBoundsCheck final : public LTemplateInstruction<0, 2, 0> {
- public:
- LBoundsCheck(LOperand* index, LOperand* length) {
- inputs_[0] = index;
- inputs_[1] = length;
- }
-
- LOperand* index() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
- DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
-};
-
-
-class LBitI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LBitI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- Token::Value op() const { return hydrogen()->op(); }
-
- DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
- DECLARE_HYDROGEN_ACCESSOR(Bitwise)
-};
-
-
-class LShiftI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
- : op_(op), can_deopt_(can_deopt) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- bool can_deopt() const { return can_deopt_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
-
- private:
- Token::Value op_;
- bool can_deopt_;
-};
-
-
-class LSubI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LSubI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
- DECLARE_HYDROGEN_ACCESSOR(Sub)
-};
-
-
-class LRSubI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LRSubI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(RSubI, "rsub-i")
- DECLARE_HYDROGEN_ACCESSOR(Sub)
-};
-
-
-class LConstantI final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- int32_t value() const { return hydrogen()->Integer32Value(); }
-};
-
-
-class LConstantS final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
-};
-
-
-class LConstantD final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- double value() const { return hydrogen()->DoubleValue(); }
- uint64_t bits() const { return hydrogen()->DoubleValueAsBits(); }
-};
-
-
-class LConstantE final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- ExternalReference value() const {
- return hydrogen()->ExternalReferenceValue();
- }
-};
-
-
-class LConstantT final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- Handle<Object> value(Isolate* isolate) const {
- return hydrogen()->handle(isolate);
- }
-};
-
-
-class LBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
- DECLARE_HYDROGEN_ACCESSOR(Branch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LCmpMapAndBranch final : public LControlInstruction<1, 1> {
- public:
- LCmpMapAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMap)
-
- Handle<Map> map() const { return hydrogen()->map().handle(); }
-};
-
-
-class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
- public:
- LSeqStringGetChar(LOperand* string, LOperand* index) {
- inputs_[0] = string;
- inputs_[1] = index;
- }
-
- LOperand* string() const { return inputs_[0]; }
- LOperand* index() const { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
-};
-
-
-class LSeqStringSetChar final : public LTemplateInstruction<1, 4, 0> {
- public:
- LSeqStringSetChar(LOperand* context,
- LOperand* string,
- LOperand* index,
- LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = string;
- inputs_[2] = index;
- inputs_[3] = value;
- }
-
- LOperand* string() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
- LOperand* value() { return inputs_[3]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
-};
-
-
-class LAddI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LAddI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
- DECLARE_HYDROGEN_ACCESSOR(Add)
-};
-
-
-class LMathMinMax final : public LTemplateInstruction<1, 2, 0> {
- public:
- LMathMinMax(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
- DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
-};
-
-
-class LPower final : public LTemplateInstruction<1, 2, 0> {
- public:
- LPower(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Power, "power")
- DECLARE_HYDROGEN_ACCESSOR(Power)
-};
-
-
-class LArithmeticD final : public LTemplateInstruction<1, 2, 0> {
- public:
- LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
- : op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- Opcode opcode() const override { return LInstruction::kArithmeticD; }
- void CompileToNative(LCodeGen* generator) override;
- const char* Mnemonic() const override;
-
- private:
- Token::Value op_;
-};
-
-
-class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
- public:
- LArithmeticT(Token::Value op,
- LOperand* context,
- LOperand* left,
- LOperand* right)
- : op_(op) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
- Token::Value op() const { return op_; }
-
- Opcode opcode() const override { return LInstruction::kArithmeticT; }
- void CompileToNative(LCodeGen* generator) override;
- const char* Mnemonic() const override;
-
- DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
-
- private:
- Token::Value op_;
-};
-
-
-class LReturn final : public LTemplateInstruction<0, 3, 0> {
- public:
- LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
- inputs_[0] = value;
- inputs_[1] = context;
- inputs_[2] = parameter_count;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- bool has_constant_parameter_count() {
- return parameter_count()->IsConstantOperand();
- }
- LConstantOperand* constant_parameter_count() {
- DCHECK(has_constant_parameter_count());
- return LConstantOperand::cast(parameter_count());
- }
- LOperand* parameter_count() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Return, "return")
-};
-
-
-class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedField(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
-};
-
-
-class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadFunctionPrototype(LOperand* function) {
- inputs_[0] = function;
- }
-
- LOperand* function() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
- DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
-};
-
-
-class LLoadRoot final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
- DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
-
- Heap::RootListIndex index() const { return hydrogen()->index(); }
-};
-
-
-class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
- public:
- LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
- inputs_[0] = elements;
- inputs_[1] = key;
- inputs_[2] = backing_store_owner;
- }
-
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* backing_store_owner() { return inputs_[2]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
- bool is_fixed_typed_array() const {
- return hydrogen()->is_fixed_typed_array();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
-
- void PrintDataTo(StringStream* stream) override;
- uint32_t base_offset() const { return hydrogen()->base_offset(); }
-};
-
-
-class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadContextSlot(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
-
- int slot_index() { return hydrogen()->slot_index(); }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LStoreContextSlot final : public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreContextSlot(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
-
- int slot_index() { return hydrogen()->slot_index(); }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LPushArgument final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LPushArgument(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
-};
-
-
-class LDrop final : public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LDrop(int count) : count_(count) { }
-
- int count() const { return count_; }
-
- DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
-
- private:
- int count_;
-};
-
-
-class LStoreCodeEntry final : public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreCodeEntry(LOperand* function, LOperand* code_object) {
- inputs_[0] = function;
- inputs_[1] = code_object;
- }
-
- LOperand* function() { return inputs_[0]; }
- LOperand* code_object() { return inputs_[1]; }
-
- void PrintDataTo(StringStream* stream) override;
-
- DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
- DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
-};
-
-
-class LInnerAllocatedObject final : public LTemplateInstruction<1, 2, 0> {
- public:
- LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
- inputs_[0] = base_object;
- inputs_[1] = offset;
- }
-
- LOperand* base_object() const { return inputs_[0]; }
- LOperand* offset() const { return inputs_[1]; }
-
- void PrintDataTo(StringStream* stream) override;
-
- DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
-};
-
-
-class LThisFunction final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
- DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
-};
-
-
-class LContext final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Context, "context")
- DECLARE_HYDROGEN_ACCESSOR(Context)
-};
-
-
-class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LDeclareGlobals(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
- DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
-};
-
-
-class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
- public:
- LCallWithDescriptor(CallInterfaceDescriptor descriptor,
- const ZoneList<LOperand*>& operands, Zone* zone)
- : descriptor_(descriptor),
- inputs_(descriptor.GetRegisterParameterCount() +
- kImplicitRegisterParameterCount,
- zone) {
- DCHECK(descriptor.GetRegisterParameterCount() +
- kImplicitRegisterParameterCount ==
- operands.length());
- inputs_.AddAll(operands, zone);
- }
-
- LOperand* target() const { return inputs_[0]; }
-
- const CallInterfaceDescriptor descriptor() { return descriptor_; }
-
- DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
-
- // The target and context are passed as implicit parameters that are not
- // explicitly listed in the descriptor.
- static const int kImplicitRegisterParameterCount = 2;
-
- private:
- DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-
- CallInterfaceDescriptor descriptor_;
- ZoneList<LOperand*> inputs_;
-
- // Iterator support.
- int InputCount() final { return inputs_.length(); }
- LOperand* InputAt(int i) final { return inputs_[i]; }
-
- int TempCount() final { return 0; }
- LOperand* TempAt(int i) final { return NULL; }
-};
-
-
-class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
- public:
- LInvokeFunction(LOperand* context, LOperand* function) {
- inputs_[0] = context;
- inputs_[1] = function;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
- DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallNewArray(LOperand* context, LOperand* constructor) {
- inputs_[0] = context;
- inputs_[1] = constructor;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* constructor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
- DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallRuntime final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallRuntime(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
- DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
-
- bool ClobbersDoubleRegisters(Isolate* isolate) const override {
- return save_doubles() == kDontSaveFPRegs;
- }
-
- const Runtime::Function* function() const { return hydrogen()->function(); }
- int arity() const { return hydrogen()->argument_count(); }
- SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
-};
-
-
-class LInteger32ToDouble final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInteger32ToDouble(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
-};
-
-
-class LUint32ToDouble final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LUint32ToDouble(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
-};
-
-
-class LNumberTagI final : public LTemplateInstruction<1, 1, 2> {
- public:
- LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
-};
-
-
-class LNumberTagU final : public LTemplateInstruction<1, 1, 2> {
- public:
- LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
-};
-
-
-class LNumberTagD final : public LTemplateInstruction<1, 1, 2> {
- public:
- LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LDoubleToSmi final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDoubleToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDoubleToI(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-// Truncating conversion from a tagged value to an int32.
-class LTaggedToI final : public LTemplateInstruction<1, 1, 2> {
- public:
- LTaggedToI(LOperand* value,
- LOperand* temp,
- LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-class LSmiTag final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LSmiTag(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LNumberUntagD final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberUntagD(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-
- bool truncating() { return hydrogen()->CanTruncateToNumber(); }
-};
-
-
-class LSmiUntag final : public LTemplateInstruction<1, 1, 0> {
- public:
- LSmiUntag(LOperand* value, bool needs_check)
- : needs_check_(needs_check) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
- bool needs_check() const { return needs_check_; }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
-
- private:
- bool needs_check_;
-};
-
-
-class LStoreNamedField final : public LTemplateInstruction<0, 2, 1> {
- public:
- LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
- inputs_[0] = object;
- inputs_[1] = value;
- temps_[0] = temp;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
-
- void PrintDataTo(StringStream* stream) override;
-
- Representation representation() const {
- return hydrogen()->field_representation();
- }
-};
-
-
-class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
- public:
- LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
- LOperand* backing_store_owner) {
- inputs_[0] = object;
- inputs_[1] = key;
- inputs_[2] = value;
- inputs_[3] = backing_store_owner;
- }
-
- bool is_fixed_typed_array() const {
- return hydrogen()->is_fixed_typed_array();
- }
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- LOperand* backing_store_owner() { return inputs_[3]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
-
- void PrintDataTo(StringStream* stream) override;
- bool NeedsCanonicalization() {
- if (hydrogen()->value()->IsAdd() || hydrogen()->value()->IsSub() ||
- hydrogen()->value()->IsMul() || hydrogen()->value()->IsDiv()) {
- return false;
- }
- return hydrogen()->NeedsCanonicalization();
- }
- uint32_t base_offset() const { return hydrogen()->base_offset(); }
-};
-
-
-class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 1> {
- public:
- LTransitionElementsKind(LOperand* object,
- LOperand* context,
- LOperand* new_map_temp) {
- inputs_[0] = object;
- inputs_[1] = context;
- temps_[0] = new_map_temp;
- }
-
- LOperand* context() { return inputs_[1]; }
- LOperand* object() { return inputs_[0]; }
- LOperand* new_map_temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
- "transition-elements-kind")
- DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
-
- void PrintDataTo(StringStream* stream) override;
-
- Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
- Handle<Map> transitioned_map() {
- return hydrogen()->transitioned_map().handle();
- }
- ElementsKind from_kind() { return hydrogen()->from_kind(); }
- ElementsKind to_kind() { return hydrogen()->to_kind(); }
-};
-
-
-class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 1> {
- public:
- LTrapAllocationMemento(LOperand* object,
- LOperand* temp) {
- inputs_[0] = object;
- temps_[0] = temp;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
- "trap-allocation-memento")
-};
-
-
-class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
- public:
- LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
- LOperand* key, LOperand* current_capacity) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = elements;
- inputs_[3] = key;
- inputs_[4] = current_capacity;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* elements() { return inputs_[2]; }
- LOperand* key() { return inputs_[3]; }
- LOperand* current_capacity() { return inputs_[4]; }
-
- bool ClobbersDoubleRegisters(Isolate* isolate) const override { return true; }
-
- DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
- DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
-};
-
-
-class LStringAdd final : public LTemplateInstruction<1, 3, 0> {
- public:
- LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
- DECLARE_HYDROGEN_ACCESSOR(StringAdd)
-};
-
-
-class LStringCharCodeAt final : public LTemplateInstruction<1, 3, 0> {
- public:
- LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
- inputs_[0] = context;
- inputs_[1] = string;
- inputs_[2] = index;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* string() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
- DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
-};
-
-
-class LStringCharFromCode final : public LTemplateInstruction<1, 2, 0> {
- public:
- explicit LStringCharFromCode(LOperand* context, LOperand* char_code) {
- inputs_[0] = context;
- inputs_[1] = char_code;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* char_code() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
- DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
-};
-
-
-class LCheckValue final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckValue(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
- DECLARE_HYDROGEN_ACCESSOR(CheckValue)
-};
-
-
-class LCheckArrayBufferNotNeutered final
- : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckArrayBufferNotNeutered(LOperand* view) { inputs_[0] = view; }
-
- LOperand* view() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckArrayBufferNotNeutered,
- "check-array-buffer-not-neutered")
- DECLARE_HYDROGEN_ACCESSOR(CheckArrayBufferNotNeutered)
-};
-
-
-class LCheckInstanceType final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckInstanceType(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
- DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
-};
-
-
-class LCheckMaps final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckMaps(LOperand* value = NULL) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
-};
-
-
-class LCheckSmi final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCheckSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
-};
-
-
-class LCheckNonSmi final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckNonSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
- DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
-};
-
-
-class LClampDToUint8 final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LClampDToUint8(LOperand* unclamped) {
- inputs_[0] = unclamped;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
-};
-
-
-class LClampIToUint8 final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LClampIToUint8(LOperand* unclamped) {
- inputs_[0] = unclamped;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
-};
-
-
-class LClampTToUint8 final : public LTemplateInstruction<1, 1, 1> {
- public:
- LClampTToUint8(LOperand* unclamped, LOperand* temp) {
- inputs_[0] = unclamped;
- temps_[0] = temp;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
-};
-
-
-class LAllocate final : public LTemplateInstruction<1, 2, 2> {
- public:
- LAllocate(LOperand* context,
- LOperand* size,
- LOperand* temp1,
- LOperand* temp2) {
- inputs_[0] = context;
- inputs_[1] = size;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* size() { return inputs_[1]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
- DECLARE_HYDROGEN_ACCESSOR(Allocate)
-};
-
-class LFastAllocate final : public LTemplateInstruction<1, 1, 2> {
- public:
- LFastAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = size;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* size() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
- DECLARE_HYDROGEN_ACCESSOR(Allocate)
-};
-
-class LTypeof final : public LTemplateInstruction<1, 2, 0> {
- public:
- LTypeof(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
-};
-
-
-class LTypeofIsAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LTypeofIsAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
-
- Handle<String> type_literal() { return hydrogen()->type_literal(); }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LOsrEntry final : public LTemplateInstruction<0, 0, 0> {
- public:
- LOsrEntry() {}
-
- bool HasInterestingComment(LCodeGen* gen) const override { return false; }
- DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
-};
-
-
-class LStackCheck final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LStackCheck(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
- DECLARE_HYDROGEN_ACCESSOR(StackCheck)
-
- Label* done_label() { return &done_label_; }
-
- private:
- Label done_label_;
-};
-
-
-class LForInPrepareMap final : public LTemplateInstruction<1, 2, 0> {
- public:
- LForInPrepareMap(LOperand* context, LOperand* object) {
- inputs_[0] = context;
- inputs_[1] = object;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
-};
-
-
-class LForInCacheArray final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LForInCacheArray(LOperand* map) {
- inputs_[0] = map;
- }
-
- LOperand* map() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
-
- int idx() {
- return HForInCacheArray::cast(this->hydrogen_value())->idx();
- }
-};
-
-
-class LCheckMapValue final : public LTemplateInstruction<0, 2, 0> {
- public:
- LCheckMapValue(LOperand* value, LOperand* map) {
- inputs_[0] = value;
- inputs_[1] = map;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* map() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
-};
-
-
-class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadFieldByIndex(LOperand* object, LOperand* index) {
- inputs_[0] = object;
- inputs_[1] = index;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
-};
-
-
-class LChunkBuilder;
-class LPlatformChunk final : public LChunk {
- public:
- LPlatformChunk(CompilationInfo* info, HGraph* graph)
- : LChunk(info, graph) { }
-
- int GetNextSpillIndex(RegisterKind kind);
- LOperand* GetNextSpillSlot(RegisterKind kind);
-};
-
-
-class LChunkBuilder final : public LChunkBuilderBase {
- public:
- LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : LChunkBuilderBase(info, graph),
- current_instruction_(NULL),
- current_block_(NULL),
- next_block_(NULL),
- allocator_(allocator) {}
-
- // Build the sequence for the graph.
- LPlatformChunk* Build();
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
- HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
- LInstruction* DoMultiplySub(HValue* minuend, HMul* mul);
- LInstruction* DoRSub(HSub* instr);
-
- static bool HasMagicNumberForDivisor(int32_t divisor);
-
- LInstruction* DoMathFloor(HUnaryMathOperation* instr);
- LInstruction* DoMathRound(HUnaryMathOperation* instr);
- LInstruction* DoMathFround(HUnaryMathOperation* instr);
- LInstruction* DoMathAbs(HUnaryMathOperation* instr);
- LInstruction* DoMathLog(HUnaryMathOperation* instr);
- LInstruction* DoMathCos(HUnaryMathOperation* instr);
- LInstruction* DoMathSin(HUnaryMathOperation* instr);
- LInstruction* DoMathExp(HUnaryMathOperation* instr);
- LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
- LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
- LInstruction* DoMathClz32(HUnaryMathOperation* instr);
- LInstruction* DoDivByPowerOf2I(HDiv* instr);
- LInstruction* DoDivByConstI(HDiv* instr);
- LInstruction* DoDivI(HDiv* instr);
- LInstruction* DoModByPowerOf2I(HMod* instr);
- LInstruction* DoModByConstI(HMod* instr);
- LInstruction* DoModI(HMod* instr);
- LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
- LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
- LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
-
- private:
- // Methods for getting operands for Use / Define / Temp.
- LUnallocated* ToUnallocated(Register reg);
- LUnallocated* ToUnallocated(DoubleRegister reg);
-
- // Methods for setting up define-use relationships.
- MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
- MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
- MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
- DoubleRegister fixed_register);
-
- // A value that is guaranteed to be allocated to a register.
- // Operand created by UseRegister is guaranteed to be live until the end of
- // instruction. This means that register allocator will not reuse it's
- // register for any other operand inside instruction.
- // Operand created by UseRegisterAtStart is guaranteed to be live only at
- // instruction start. Register allocator is free to assign the same register
- // to some other operand used inside instruction (i.e. temporary or
- // output).
- MUST_USE_RESULT LOperand* UseRegister(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
-
- // An input operand in a register that may be trashed.
- MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
-
- // An input operand in a register or stack slot.
- MUST_USE_RESULT LOperand* Use(HValue* value);
- MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
-
- // An input operand in a register, stack slot or a constant operand.
- MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
-
- // An input operand in a register or a constant operand.
- MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
-
- // An input operand in a constant operand.
- MUST_USE_RESULT LOperand* UseConstant(HValue* value);
-
- // An input operand in register, stack slot or a constant operand.
- // Will not be moved to a register even if one is freely available.
- MUST_USE_RESULT LOperand* UseAny(HValue* value) override;
-
- // Temporary operand that must be in a register.
- MUST_USE_RESULT LUnallocated* TempRegister();
- MUST_USE_RESULT LUnallocated* TempDoubleRegister();
- MUST_USE_RESULT LOperand* FixedTemp(Register reg);
- MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
-
- // Methods for setting up define-use relationships.
- // Return the same instruction that they are passed.
- LInstruction* Define(LTemplateResultInstruction<1>* instr,
- LUnallocated* result);
- LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
- LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
- int index);
- LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
- LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
- Register reg);
- LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
- DoubleRegister reg);
- LInstruction* AssignEnvironment(LInstruction* instr);
- LInstruction* AssignPointerMap(LInstruction* instr);
-
- enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
-
- // By default we assume that instruction sequences generated for calls
- // cannot deoptimize eagerly and we do not attach environment to this
- // instruction.
- LInstruction* MarkAsCall(
- LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
-
- void VisitInstruction(HInstruction* current);
- void AddInstruction(LInstruction* instr, HInstruction* current);
-
- void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
- LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
- LInstruction* DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr);
- LInstruction* DoArithmeticT(Token::Value op,
- HBinaryOperation* instr);
-
- HInstruction* current_instruction_;
- HBasicBlock* current_block_;
- HBasicBlock* next_block_;
- LAllocator* allocator_;
-
- DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
-};
-
-#undef DECLARE_HYDROGEN_ACCESSOR
-#undef DECLARE_CONCRETE_INSTRUCTION
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_ARM_LITHIUM_ARM_H_
diff --git a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc
deleted file mode 100644
index 8d3924db7d..0000000000
--- a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc
+++ /dev/null
@@ -1,5393 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/arm/lithium-codegen-arm.h"
-
-#include "src/assembler-inl.h"
-#include "src/base/bits.h"
-#include "src/builtins/builtins-constructor.h"
-#include "src/code-factory.h"
-#include "src/code-stubs.h"
-#include "src/crankshaft/arm/lithium-gap-resolver-arm.h"
-#include "src/crankshaft/hydrogen-osr.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-class SafepointGenerator final : public CallWrapper {
- public:
- SafepointGenerator(LCodeGen* codegen,
- LPointerMap* pointers,
- Safepoint::DeoptMode mode)
- : codegen_(codegen),
- pointers_(pointers),
- deopt_mode_(mode) { }
- virtual ~SafepointGenerator() {}
-
- void BeforeCall(int call_size) const override {}
-
- void AfterCall() const override {
- codegen_->RecordSafepoint(pointers_, deopt_mode_);
- }
-
- private:
- LCodeGen* codegen_;
- LPointerMap* pointers_;
- Safepoint::DeoptMode deopt_mode_;
-};
-
-
-#define __ masm()->
-
-bool LCodeGen::GenerateCode() {
- LPhase phase("Z_Code generation", chunk());
- DCHECK(is_unused());
- status_ = GENERATING;
-
- // Open a frame scope to indicate that there is a frame on the stack. The
- // NONE indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done in GeneratePrologue).
- FrameScope frame_scope(masm_, StackFrame::NONE);
-
- return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
- GenerateJumpTable() && GenerateSafepointTable();
-}
-
-
-void LCodeGen::FinishCode(Handle<Code> code) {
- DCHECK(is_done());
- code->set_stack_slots(GetTotalFrameSlotCount());
- code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- PopulateDeoptimizationData(code);
-}
-
-
-void LCodeGen::SaveCallerDoubles() {
- DCHECK(info()->saves_caller_doubles());
- DCHECK(NeedsEagerFrame());
- Comment(";;; Save clobbered callee double registers");
- int count = 0;
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- while (!save_iterator.Done()) {
- __ vstr(DoubleRegister::from_code(save_iterator.Current()),
- MemOperand(sp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
-}
-
-
-void LCodeGen::RestoreCallerDoubles() {
- DCHECK(info()->saves_caller_doubles());
- DCHECK(NeedsEagerFrame());
- Comment(";;; Restore clobbered callee double registers");
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- int count = 0;
- while (!save_iterator.Done()) {
- __ vldr(DoubleRegister::from_code(save_iterator.Current()),
- MemOperand(sp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
-}
-
-
-bool LCodeGen::GeneratePrologue() {
- DCHECK(is_generating());
-
- if (info()->IsOptimizing()) {
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
- // r1: Callee's JS function.
- // cp: Callee's context.
- // pp: Callee's constant pool pointer (if enabled)
- // fp: Caller's frame pointer.
- // lr: Caller's pc.
- }
-
- info()->set_prologue_offset(masm_->pc_offset());
- if (NeedsEagerFrame()) {
- if (info()->IsStub()) {
- __ StubPrologue(StackFrame::STUB);
- } else {
- __ Prologue(info()->GeneratePreagedPrologue());
- }
- frame_is_built_ = true;
- }
-
- // Reserve space for the stack slots needed by the code.
- int slots = GetStackSlotCount();
- if (slots > 0) {
- if (FLAG_debug_code) {
- __ sub(sp, sp, Operand(slots * kPointerSize));
- __ push(r0);
- __ push(r1);
- __ add(r0, sp, Operand(slots * kPointerSize));
- __ mov(r1, Operand(kSlotsZapValue));
- Label loop;
- __ bind(&loop);
- __ sub(r0, r0, Operand(kPointerSize));
- __ str(r1, MemOperand(r0, 2 * kPointerSize));
- __ cmp(r0, sp);
- __ b(ne, &loop);
- __ pop(r1);
- __ pop(r0);
- } else {
- __ sub(sp, sp, Operand(slots * kPointerSize));
- }
- }
-
- if (info()->saves_caller_doubles()) {
- SaveCallerDoubles();
- }
- return !is_aborted();
-}
-
-
-void LCodeGen::DoPrologue(LPrologue* instr) {
- Comment(";;; Prologue begin");
-
- // Possibly allocate a local context.
- if (info()->scope()->NeedsContext()) {
- Comment(";;; Allocate local context");
- bool need_write_barrier = true;
- // Argument to NewContext is the function, which is in r1.
- int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
- if (info()->scope()->is_script_scope()) {
- __ push(r1);
- __ Push(info()->scope()->scope_info());
- __ CallRuntime(Runtime::kNewScriptContext);
- deopt_mode = Safepoint::kLazyDeopt;
- } else {
- if (slots <= ConstructorBuiltins::MaximumFunctionContextSlots()) {
- Callable callable = CodeFactory::FastNewFunctionContext(
- isolate(), info()->scope()->scope_type());
- __ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
- Operand(slots));
- __ Call(callable.code(), RelocInfo::CODE_TARGET);
- // Result of the FastNewFunctionContext builtin is always in new space.
- need_write_barrier = false;
- } else {
- __ push(r1);
- __ Push(Smi::FromInt(info()->scope()->scope_type()));
- __ CallRuntime(Runtime::kNewFunctionContext);
- }
- }
- RecordSafepoint(deopt_mode);
-
- // Context is returned in both r0 and cp. It replaces the context
- // passed to us. It's saved in the stack and kept live in cp.
- __ mov(cp, r0);
- __ str(r0, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Copy any necessary parameters into the context.
- int num_parameters = info()->scope()->num_parameters();
- int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0;
- for (int i = first_parameter; i < num_parameters; i++) {
- Variable* var = (i == -1) ? info()->scope()->receiver()
- : info()->scope()->parameter(i);
- if (var->IsContextSlot()) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ ldr(r0, MemOperand(fp, parameter_offset));
- // Store it in the context.
- MemOperand target = ContextMemOperand(cp, var->index());
- __ str(r0, target);
- // Update the write barrier. This clobbers r3 and r0.
- if (need_write_barrier) {
- __ RecordWriteContextSlot(
- cp,
- target.offset(),
- r0,
- r3,
- GetLinkRegisterState(),
- kSaveFPRegs);
- } else if (FLAG_debug_code) {
- Label done;
- __ JumpIfInNewSpace(cp, r0, &done);
- __ Abort(kExpectedNewSpaceObject);
- __ bind(&done);
- }
- }
- }
- Comment(";;; End allocate local context");
- }
-
- Comment(";;; Prologue end");
-}
-
-
-void LCodeGen::GenerateOsrPrologue() {
- // Generate the OSR entry prologue at the first unknown OSR value, or if there
- // are none, at the OSR entrypoint instruction.
- if (osr_pc_offset_ >= 0) return;
-
- osr_pc_offset_ = masm()->pc_offset();
-
- // Adjust the frame size, subsuming the unoptimized frame into the
- // optimized frame.
- int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
- DCHECK(slots >= 0);
- __ sub(sp, sp, Operand(slots * kPointerSize));
-}
-
-
-void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
- if (instr->IsCall()) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- }
- if (!instr->IsLazyBailout() && !instr->IsGap()) {
- safepoints_.BumpLastLazySafepointIndex();
- }
-}
-
-
-bool LCodeGen::GenerateDeferredCode() {
- DCHECK(is_generating());
- if (deferred_.length() > 0) {
- for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
- LDeferredCode* code = deferred_[i];
-
- HValue* value =
- instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(value->position());
-
- Comment(";;; <@%d,#%d> "
- "-------------------- Deferred %s --------------------",
- code->instruction_index(),
- code->instr()->hydrogen_value()->id(),
- code->instr()->Mnemonic());
- __ bind(code->entry());
- if (NeedsDeferredFrame()) {
- Comment(";;; Build frame");
- DCHECK(!frame_is_built_);
- DCHECK(info()->IsStub());
- frame_is_built_ = true;
- __ mov(scratch0(), Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
- __ PushCommonFrame(scratch0());
- Comment(";;; Deferred code");
- }
- code->Generate();
- if (NeedsDeferredFrame()) {
- Comment(";;; Destroy frame");
- DCHECK(frame_is_built_);
- __ PopCommonFrame(scratch0());
- frame_is_built_ = false;
- }
- __ jmp(code->exit());
- }
- }
-
- // Force constant pool emission at the end of the deferred code to make
- // sure that no constant pools are emitted after.
- masm()->CheckConstPool(true, false);
-
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateJumpTable() {
- // Check that the jump table is accessible from everywhere in the function
- // code, i.e. that offsets to the table can be encoded in the 24bit signed
- // immediate of a branch instruction.
- // To simplify we consider the code size from the first instruction to the
- // end of the jump table. We also don't consider the pc load delta.
- // Each entry in the jump table generates one instruction and inlines one
- // 32bit data after it.
- if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
- jump_table_.length() * 7)) {
- Abort(kGeneratedCodeIsTooLarge);
- }
-
- if (jump_table_.length() > 0) {
- Label needs_frame, call_deopt_entry;
-
- Comment(";;; -------------------- Jump table --------------------");
- Address base = jump_table_[0].address;
-
- Register entry_offset = scratch0();
-
- int length = jump_table_.length();
- for (int i = 0; i < length; i++) {
- Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
- __ bind(&table_entry->label);
-
- DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
- Address entry = table_entry->address;
- DeoptComment(table_entry->deopt_info);
-
- // Second-level deopt table entries are contiguous and small, so instead
- // of loading the full, absolute address of each one, load an immediate
- // offset which will be added to the base address later.
- __ mov(entry_offset, Operand(entry - base));
-
- if (table_entry->needs_frame) {
- DCHECK(!info()->saves_caller_doubles());
- Comment(";;; call deopt with frame");
- __ PushCommonFrame();
- __ bl(&needs_frame);
- } else {
- __ bl(&call_deopt_entry);
- }
- masm()->CheckConstPool(false, false);
- }
-
- if (needs_frame.is_linked()) {
- __ bind(&needs_frame);
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- __ mov(ip, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
- __ push(ip);
- DCHECK(info()->IsStub());
- }
-
- Comment(";;; call deopt");
- __ bind(&call_deopt_entry);
-
- if (info()->saves_caller_doubles()) {
- DCHECK(info()->IsStub());
- RestoreCallerDoubles();
- }
-
- // Add the base address to the offset previously loaded in entry_offset.
- __ add(entry_offset, entry_offset,
- Operand(ExternalReference::ForDeoptEntry(base)));
- __ bx(entry_offset);
- }
-
- // Force constant pool emission at the end of the deopt jump table to make
- // sure that no constant pools are emitted after.
- masm()->CheckConstPool(true, false);
-
- // The deoptimization jump table is the last part of the instruction
- // sequence. Mark the generated code as done unless we bailed out.
- if (!is_aborted()) status_ = DONE;
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateSafepointTable() {
- DCHECK(is_done());
- safepoints_.Emit(masm(), GetTotalFrameSlotCount());
- return !is_aborted();
-}
-
-
-Register LCodeGen::ToRegister(int code) const {
- return Register::from_code(code);
-}
-
-
-DwVfpRegister LCodeGen::ToDoubleRegister(int code) const {
- return DwVfpRegister::from_code(code);
-}
-
-
-Register LCodeGen::ToRegister(LOperand* op) const {
- DCHECK(op->IsRegister());
- return ToRegister(op->index());
-}
-
-
-Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
- if (op->IsRegister()) {
- return ToRegister(op->index());
- } else if (op->IsConstantOperand()) {
- LConstantOperand* const_op = LConstantOperand::cast(op);
- HConstant* constant = chunk_->LookupConstant(const_op);
- Handle<Object> literal = constant->handle(isolate());
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
- AllowDeferredHandleDereference get_number;
- DCHECK(literal->IsNumber());
- __ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
- } else if (r.IsDouble()) {
- Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
- } else {
- DCHECK(r.IsSmiOrTagged());
- __ Move(scratch, literal);
- }
- return scratch;
- } else if (op->IsStackSlot()) {
- __ ldr(scratch, ToMemOperand(op));
- return scratch;
- }
- UNREACHABLE();
- return scratch;
-}
-
-
-DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
- DCHECK(op->IsDoubleRegister());
- return ToDoubleRegister(op->index());
-}
-
-
-DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
- SwVfpRegister flt_scratch,
- DwVfpRegister dbl_scratch) {
- if (op->IsDoubleRegister()) {
- return ToDoubleRegister(op->index());
- } else if (op->IsConstantOperand()) {
- LConstantOperand* const_op = LConstantOperand::cast(op);
- HConstant* constant = chunk_->LookupConstant(const_op);
- Handle<Object> literal = constant->handle(isolate());
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
- DCHECK(literal->IsNumber());
- __ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
- __ vmov(flt_scratch, ip);
- __ vcvt_f64_s32(dbl_scratch, flt_scratch);
- return dbl_scratch;
- } else if (r.IsDouble()) {
- Abort(kUnsupportedDoubleImmediate);
- } else if (r.IsTagged()) {
- Abort(kUnsupportedTaggedImmediate);
- }
- } else if (op->IsStackSlot()) {
- // TODO(regis): Why is vldr not taking a MemOperand?
- // __ vldr(dbl_scratch, ToMemOperand(op));
- MemOperand mem_op = ToMemOperand(op);
- __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset());
- return dbl_scratch;
- }
- UNREACHABLE();
- return dbl_scratch;
-}
-
-
-Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
- return constant->handle(isolate());
-}
-
-
-bool LCodeGen::IsInteger32(LConstantOperand* op) const {
- return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
-}
-
-
-bool LCodeGen::IsSmi(LConstantOperand* op) const {
- return chunk_->LookupLiteralRepresentation(op).IsSmi();
-}
-
-
-int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
- return ToRepresentation(op, Representation::Integer32());
-}
-
-
-int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
- const Representation& r) const {
- HConstant* constant = chunk_->LookupConstant(op);
- int32_t value = constant->Integer32Value();
- if (r.IsInteger32()) return value;
- DCHECK(r.IsSmiOrTagged());
- return reinterpret_cast<int32_t>(Smi::FromInt(value));
-}
-
-
-Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- return Smi::FromInt(constant->Integer32Value());
-}
-
-
-double LCodeGen::ToDouble(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- DCHECK(constant->HasDoubleValue());
- return constant->DoubleValue();
-}
-
-
-Operand LCodeGen::ToOperand(LOperand* op) {
- if (op->IsConstantOperand()) {
- LConstantOperand* const_op = LConstantOperand::cast(op);
- HConstant* constant = chunk()->LookupConstant(const_op);
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsSmi()) {
- DCHECK(constant->HasSmiValue());
- return Operand(Smi::FromInt(constant->Integer32Value()));
- } else if (r.IsInteger32()) {
- DCHECK(constant->HasInteger32Value());
- return Operand(constant->Integer32Value());
- } else if (r.IsDouble()) {
- Abort(kToOperandUnsupportedDoubleImmediate);
- }
- DCHECK(r.IsTagged());
- return Operand(constant->handle(isolate()));
- } else if (op->IsRegister()) {
- return Operand(ToRegister(op));
- } else if (op->IsDoubleRegister()) {
- Abort(kToOperandIsDoubleRegisterUnimplemented);
- return Operand::Zero();
- }
- // Stack slots not implemented, use ToMemOperand instead.
- UNREACHABLE();
- return Operand::Zero();
-}
-
-
-static int ArgumentsOffsetWithoutFrame(int index) {
- DCHECK(index < 0);
- return -(index + 1) * kPointerSize;
-}
-
-
-MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
- DCHECK(!op->IsRegister());
- DCHECK(!op->IsDoubleRegister());
- DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- if (NeedsEagerFrame()) {
- return MemOperand(fp, FrameSlotToFPOffset(op->index()));
- } else {
- // Retrieve parameter without eager stack-frame relative to the
- // stack-pointer.
- return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
- }
-}
-
-
-MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
- DCHECK(op->IsDoubleStackSlot());
- if (NeedsEagerFrame()) {
- return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize);
- } else {
- // Retrieve parameter without eager stack-frame relative to the
- // stack-pointer.
- return MemOperand(
- sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
- }
-}
-
-
-void LCodeGen::WriteTranslation(LEnvironment* environment,
- Translation* translation) {
- if (environment == NULL) return;
-
- // The translation includes one command per value in the environment.
- int translation_size = environment->translation_size();
-
- WriteTranslation(environment->outer(), translation);
- WriteTranslationFrame(environment, translation);
-
- int object_index = 0;
- int dematerialized_index = 0;
- for (int i = 0; i < translation_size; ++i) {
- LOperand* value = environment->values()->at(i);
- AddToTranslation(
- environment, translation, value, environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
- }
-}
-
-
-void LCodeGen::AddToTranslation(LEnvironment* environment,
- Translation* translation,
- LOperand* op,
- bool is_tagged,
- bool is_uint32,
- int* object_index_pointer,
- int* dematerialized_index_pointer) {
- if (op == LEnvironment::materialization_marker()) {
- int object_index = (*object_index_pointer)++;
- if (environment->ObjectIsDuplicateAt(object_index)) {
- int dupe_of = environment->ObjectDuplicateOfAt(object_index);
- translation->DuplicateObject(dupe_of);
- return;
- }
- int object_length = environment->ObjectLengthAt(object_index);
- if (environment->ObjectIsArgumentsAt(object_index)) {
- translation->BeginArgumentsObject(object_length);
- } else {
- translation->BeginCapturedObject(object_length);
- }
- int dematerialized_index = *dematerialized_index_pointer;
- int env_offset = environment->translation_size() + dematerialized_index;
- *dematerialized_index_pointer += object_length;
- for (int i = 0; i < object_length; ++i) {
- LOperand* value = environment->values()->at(env_offset + i);
- AddToTranslation(environment,
- translation,
- value,
- environment->HasTaggedValueAt(env_offset + i),
- environment->HasUint32ValueAt(env_offset + i),
- object_index_pointer,
- dematerialized_index_pointer);
- }
- return;
- }
-
- if (op->IsStackSlot()) {
- int index = op->index();
- if (is_tagged) {
- translation->StoreStackSlot(index);
- } else if (is_uint32) {
- translation->StoreUint32StackSlot(index);
- } else {
- translation->StoreInt32StackSlot(index);
- }
- } else if (op->IsDoubleStackSlot()) {
- int index = op->index();
- translation->StoreDoubleStackSlot(index);
- } else if (op->IsRegister()) {
- Register reg = ToRegister(op);
- if (is_tagged) {
- translation->StoreRegister(reg);
- } else if (is_uint32) {
- translation->StoreUint32Register(reg);
- } else {
- translation->StoreInt32Register(reg);
- }
- } else if (op->IsDoubleRegister()) {
- DoubleRegister reg = ToDoubleRegister(op);
- translation->StoreDoubleRegister(reg);
- } else if (op->IsConstantOperand()) {
- HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
- translation->StoreLiteral(src_index);
- } else {
- UNREACHABLE();
- }
-}
-
-
-int LCodeGen::CallCodeSize(Handle<Code> code, RelocInfo::Mode mode) {
- int size = masm()->CallSize(code, mode);
- if (code->kind() == Code::BINARY_OP_IC ||
- code->kind() == Code::COMPARE_IC) {
- size += Assembler::kInstrSize; // extra nop() added in CallCodeGeneric.
- }
- return size;
-}
-
-
-void LCodeGen::CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- TargetAddressStorageMode storage_mode) {
- CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode);
-}
-
-
-void LCodeGen::CallCodeGeneric(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode,
- TargetAddressStorageMode storage_mode) {
- DCHECK(instr != NULL);
- // Block literal pool emission to ensure nop indicating no inlined smi code
- // is in the correct position.
- Assembler::BlockConstPoolScope block_const_pool(masm());
- __ Call(code, mode, TypeFeedbackId::None(), al, storage_mode, false);
- RecordSafepointWithLazyDeopt(instr, safepoint_mode);
-
- // Signal that we don't inline smi code before these stubs in the
- // optimizing code generator.
- if (code->kind() == Code::BINARY_OP_IC ||
- code->kind() == Code::COMPARE_IC) {
- __ nop();
- }
-}
-
-
-void LCodeGen::CallRuntime(const Runtime::Function* function,
- int num_arguments,
- LInstruction* instr,
- SaveFPRegsMode save_doubles) {
- DCHECK(instr != NULL);
-
- __ CallRuntime(function, num_arguments, save_doubles);
-
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::LoadContextFromDeferred(LOperand* context) {
- if (context->IsRegister()) {
- __ Move(cp, ToRegister(context));
- } else if (context->IsStackSlot()) {
- __ ldr(cp, ToMemOperand(context));
- } else if (context->IsConstantOperand()) {
- HConstant* constant =
- chunk_->LookupConstant(LConstantOperand::cast(context));
- __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
- } else {
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
- int argc,
- LInstruction* instr,
- LOperand* context) {
- LoadContextFromDeferred(context);
- __ CallRuntimeSaveDoubles(id);
- RecordSafepointWithRegisters(
- instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
-}
-
-
-void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
- Safepoint::DeoptMode mode) {
- environment->set_has_been_used();
- if (!environment->HasBeenRegistered()) {
- // Physical stack frame layout:
- // -x ............. -4 0 ..................................... y
- // [incoming arguments] [spill slots] [pushed outgoing arguments]
-
- // Layout of the environment:
- // 0 ..................................................... size-1
- // [parameters] [locals] [expression stack including arguments]
-
- // Layout of the translation:
- // 0 ........................................................ size - 1 + 4
- // [expression stack including arguments] [locals] [4 words] [parameters]
- // |>------------ translation_size ------------<|
-
- int frame_count = 0;
- int jsframe_count = 0;
- for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
- ++frame_count;
- if (e->frame_type() == JS_FUNCTION) {
- ++jsframe_count;
- }
- }
- Translation translation(&translations_, frame_count, jsframe_count, zone());
- WriteTranslation(environment, &translation);
- int deoptimization_index = deoptimizations_.length();
- int pc_offset = masm()->pc_offset();
- environment->Register(deoptimization_index,
- translation.index(),
- (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
- deoptimizations_.Add(environment, zone());
- }
-}
-
-void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
- DeoptimizeReason deopt_reason,
- Deoptimizer::BailoutType bailout_type) {
- LEnvironment* environment = instr->environment();
- RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- DCHECK(environment->HasBeenRegistered());
- int id = environment->deoptimization_index();
- Address entry =
- Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
- if (entry == NULL) {
- Abort(kBailoutWasNotPrepared);
- return;
- }
-
- if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
- Register scratch = scratch0();
- ExternalReference count = ExternalReference::stress_deopt_count(isolate());
-
- // Store the condition on the stack if necessary
- if (condition != al) {
- __ mov(scratch, Operand::Zero(), LeaveCC, NegateCondition(condition));
- __ mov(scratch, Operand(1), LeaveCC, condition);
- __ push(scratch);
- }
-
- __ push(r1);
- __ mov(scratch, Operand(count));
- __ ldr(r1, MemOperand(scratch));
- __ sub(r1, r1, Operand(1), SetCC);
- __ mov(r1, Operand(FLAG_deopt_every_n_times), LeaveCC, eq);
- __ str(r1, MemOperand(scratch));
- __ pop(r1);
-
- if (condition != al) {
- // Clean up the stack before the deoptimizer call
- __ pop(scratch);
- }
-
- __ Call(entry, RelocInfo::RUNTIME_ENTRY, eq);
-
- // 'Restore' the condition in a slightly hacky way. (It would be better
- // to use 'msr' and 'mrs' instructions here, but they are not supported by
- // our ARM simulator).
- if (condition != al) {
- condition = ne;
- __ cmp(scratch, Operand::Zero());
- }
- }
-
- if (info()->ShouldTrapOnDeopt()) {
- __ stop("trap_on_deopt", condition);
- }
-
- Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
-
- DCHECK(info()->IsStub() || frame_is_built_);
- // Go through jump table if we need to handle condition, build frame, or
- // restore caller doubles.
- if (condition == al && frame_is_built_ &&
- !info()->saves_caller_doubles()) {
- DeoptComment(deopt_info);
- __ Call(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
- !frame_is_built_);
- // We often have several deopts to the same entry, reuse the last
- // jump entry if this is the case.
- if (FLAG_trace_deopt || isolate()->is_profiling() ||
- jump_table_.is_empty() ||
- !table_entry.IsEquivalentTo(jump_table_.last())) {
- jump_table_.Add(table_entry, zone());
- }
- __ b(condition, &jump_table_.last().label);
- }
-}
-
-void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
- DeoptimizeReason deopt_reason) {
- Deoptimizer::BailoutType bailout_type = info()->IsStub()
- ? Deoptimizer::LAZY
- : Deoptimizer::EAGER;
- DeoptimizeIf(condition, instr, deopt_reason, bailout_type);
-}
-
-
-void LCodeGen::RecordSafepointWithLazyDeopt(
- LInstruction* instr, SafepointMode safepoint_mode) {
- if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
- RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
- } else {
- DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kLazyDeopt);
- }
-}
-
-
-void LCodeGen::RecordSafepoint(
- LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- Safepoint::DeoptMode deopt_mode) {
- DCHECK(expected_safepoint_kind_ == kind);
-
- const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
- Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
- kind, arguments, deopt_mode);
- for (int i = 0; i < operands->length(); i++) {
- LOperand* pointer = operands->at(i);
- if (pointer->IsStackSlot()) {
- safepoint.DefinePointerSlot(pointer->index(), zone());
- } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
- safepoint.DefinePointerRegister(ToRegister(pointer), zone());
- }
- }
-}
-
-
-void LCodeGen::RecordSafepoint(LPointerMap* pointers,
- Safepoint::DeoptMode deopt_mode) {
- RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
-}
-
-
-void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
- LPointerMap empty_pointers(zone());
- RecordSafepoint(&empty_pointers, deopt_mode);
-}
-
-
-void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode deopt_mode) {
- RecordSafepoint(
- pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
-}
-
-
-static const char* LabelType(LLabel* label) {
- if (label->is_loop_header()) return " (loop header)";
- if (label->is_osr_entry()) return " (OSR entry)";
- return "";
-}
-
-
-void LCodeGen::DoLabel(LLabel* label) {
- Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
- current_instruction_,
- label->hydrogen_value()->id(),
- label->block_id(),
- LabelType(label));
- __ bind(label->label());
- current_block_ = label->block_id();
- DoGap(label);
-}
-
-
-void LCodeGen::DoParallelMove(LParallelMove* move) {
- resolver_.Resolve(move);
-}
-
-
-void LCodeGen::DoGap(LGap* gap) {
- for (int i = LGap::FIRST_INNER_POSITION;
- i <= LGap::LAST_INNER_POSITION;
- i++) {
- LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
- LParallelMove* move = gap->GetParallelMove(inner_pos);
- if (move != NULL) DoParallelMove(move);
- }
-}
-
-
-void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
- DoGap(instr);
-}
-
-
-void LCodeGen::DoParameter(LParameter* instr) {
- // Nothing to do.
-}
-
-
-void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- GenerateOsrPrologue();
-}
-
-
-void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- DCHECK(dividend.is(ToRegister(instr->result())));
-
- // Theoretically, a variation of the branch-free code for integer division by
- // a power of 2 (calculating the remainder via an additional multiplication
- // (which gets simplified to an 'and') and subtraction) should be faster, and
- // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
- // indicate that positive dividends are heavily favored, so the branching
- // version performs better.
- HMod* hmod = instr->hydrogen();
- int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
- Label dividend_is_not_negative, done;
- if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
- __ cmp(dividend, Operand::Zero());
- __ b(pl, &dividend_is_not_negative);
- // Note that this is correct even for kMinInt operands.
- __ rsb(dividend, dividend, Operand::Zero());
- __ and_(dividend, dividend, Operand(mask));
- __ rsb(dividend, dividend, Operand::Zero(), SetCC);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
- }
- __ b(&done);
- }
-
- __ bind(&dividend_is_not_negative);
- __ and_(dividend, dividend, Operand(mask));
- __ bind(&done);
-}
-
-
-void LCodeGen::DoModByConstI(LModByConstI* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- Register result = ToRegister(instr->result());
- DCHECK(!dividend.is(result));
-
- if (divisor == 0) {
- DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
- return;
- }
-
- __ TruncatingDiv(result, dividend, Abs(divisor));
- __ mov(ip, Operand(Abs(divisor)));
- __ smull(result, ip, result, ip);
- __ sub(result, dividend, result, SetCC);
-
- // Check for negative zero.
- HMod* hmod = instr->hydrogen();
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label remainder_not_zero;
- __ b(ne, &remainder_not_zero);
- __ cmp(dividend, Operand::Zero());
- DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
- __ bind(&remainder_not_zero);
- }
-}
-
-
-void LCodeGen::DoModI(LModI* instr) {
- HMod* hmod = instr->hydrogen();
- if (CpuFeatures::IsSupported(SUDIV)) {
- CpuFeatureScope scope(masm(), SUDIV);
-
- Register left_reg = ToRegister(instr->left());
- Register right_reg = ToRegister(instr->right());
- Register result_reg = ToRegister(instr->result());
-
- Label done;
- // Check for x % 0, sdiv might signal an exception. We have to deopt in this
- // case because we can't return a NaN.
- if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
- __ cmp(right_reg, Operand::Zero());
- DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
- }
-
- // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we
- // want. We have to deopt if we care about -0, because we can't return that.
- if (hmod->CheckFlag(HValue::kCanOverflow)) {
- Label no_overflow_possible;
- __ cmp(left_reg, Operand(kMinInt));
- __ b(ne, &no_overflow_possible);
- __ cmp(right_reg, Operand(-1));
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
- } else {
- __ b(ne, &no_overflow_possible);
- __ mov(result_reg, Operand::Zero());
- __ jmp(&done);
- }
- __ bind(&no_overflow_possible);
- }
-
- // For 'r3 = r1 % r2' we can have the following ARM code:
- // sdiv r3, r1, r2
- // mls r3, r3, r2, r1
-
- __ sdiv(result_reg, left_reg, right_reg);
- __ Mls(result_reg, result_reg, right_reg, left_reg);
-
- // If we care about -0, test if the dividend is <0 and the result is 0.
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ cmp(result_reg, Operand::Zero());
- __ b(ne, &done);
- __ cmp(left_reg, Operand::Zero());
- DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
- }
- __ bind(&done);
-
- } else {
- // General case, without any SDIV support.
- Register left_reg = ToRegister(instr->left());
- Register right_reg = ToRegister(instr->right());
- Register result_reg = ToRegister(instr->result());
- Register scratch = scratch0();
- DCHECK(!scratch.is(left_reg));
- DCHECK(!scratch.is(right_reg));
- DCHECK(!scratch.is(result_reg));
- DwVfpRegister dividend = ToDoubleRegister(instr->temp());
- DwVfpRegister divisor = ToDoubleRegister(instr->temp2());
- DCHECK(!divisor.is(dividend));
- LowDwVfpRegister quotient = double_scratch0();
- DCHECK(!quotient.is(dividend));
- DCHECK(!quotient.is(divisor));
-
- Label done;
- // Check for x % 0, we have to deopt in this case because we can't return a
- // NaN.
- if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
- __ cmp(right_reg, Operand::Zero());
- DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
- }
-
- __ Move(result_reg, left_reg);
- // Load the arguments in VFP registers. The divisor value is preloaded
- // before. Be careful that 'right_reg' is only live on entry.
- // TODO(svenpanne) The last comments seems to be wrong nowadays.
- __ vmov(double_scratch0().low(), left_reg);
- __ vcvt_f64_s32(dividend, double_scratch0().low());
- __ vmov(double_scratch0().low(), right_reg);
- __ vcvt_f64_s32(divisor, double_scratch0().low());
-
- // We do not care about the sign of the divisor. Note that we still handle
- // the kMinInt % -1 case correctly, though.
- __ vabs(divisor, divisor);
- // Compute the quotient and round it to a 32bit integer.
- __ vdiv(quotient, dividend, divisor);
- __ vcvt_s32_f64(quotient.low(), quotient);
- __ vcvt_f64_s32(quotient, quotient.low());
-
- // Compute the remainder in result.
- __ vmul(double_scratch0(), divisor, quotient);
- __ vcvt_s32_f64(double_scratch0().low(), double_scratch0());
- __ vmov(scratch, double_scratch0().low());
- __ sub(result_reg, left_reg, scratch, SetCC);
-
- // If we care about -0, test if the dividend is <0 and the result is 0.
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ b(ne, &done);
- __ cmp(left_reg, Operand::Zero());
- DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero);
- }
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- Register result = ToRegister(instr->result());
- DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
- DCHECK(!result.is(dividend));
-
- // Check for (0 / -x) that will produce negative zero.
- HDiv* hdiv = instr->hydrogen();
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- __ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
- }
- // Check for (kMinInt / -1).
- if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
- __ cmp(dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
- }
- // Deoptimize if remainder will not be 0.
- if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
- divisor != 1 && divisor != -1) {
- int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
- __ tst(dividend, Operand(mask));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
- }
-
- if (divisor == -1) { // Nice shortcut, not needed for correctness.
- __ rsb(result, dividend, Operand(0));
- return;
- }
- int32_t shift = WhichPowerOf2Abs(divisor);
- if (shift == 0) {
- __ mov(result, dividend);
- } else if (shift == 1) {
- __ add(result, dividend, Operand(dividend, LSR, 31));
- } else {
- __ mov(result, Operand(dividend, ASR, 31));
- __ add(result, dividend, Operand(result, LSR, 32 - shift));
- }
- if (shift > 0) __ mov(result, Operand(result, ASR, shift));
- if (divisor < 0) __ rsb(result, result, Operand(0));
-}
-
-
-void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- Register result = ToRegister(instr->result());
- DCHECK(!dividend.is(result));
-
- if (divisor == 0) {
- DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
- return;
- }
-
- // Check for (0 / -x) that will produce negative zero.
- HDiv* hdiv = instr->hydrogen();
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- __ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
- }
-
- __ TruncatingDiv(result, dividend, Abs(divisor));
- if (divisor < 0) __ rsb(result, result, Operand::Zero());
-
- if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
- __ mov(ip, Operand(divisor));
- __ smull(scratch0(), ip, result, ip);
- __ sub(scratch0(), scratch0(), dividend, SetCC);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
- }
-}
-
-
-// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
-void LCodeGen::DoDivI(LDivI* instr) {
- HBinaryOperation* hdiv = instr->hydrogen();
- Register dividend = ToRegister(instr->dividend());
- Register divisor = ToRegister(instr->divisor());
- Register result = ToRegister(instr->result());
-
- // Check for x / 0.
- if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- __ cmp(divisor, Operand::Zero());
- DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
- }
-
- // Check for (0 / -x) that will produce negative zero.
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label positive;
- if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
- // Do the test only if it hadn't be done above.
- __ cmp(divisor, Operand::Zero());
- }
- __ b(pl, &positive);
- __ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
- __ bind(&positive);
- }
-
- // Check for (kMinInt / -1).
- if (hdiv->CheckFlag(HValue::kCanOverflow) &&
- (!CpuFeatures::IsSupported(SUDIV) ||
- !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
- // We don't need to check for overflow when truncating with sdiv
- // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
- __ cmp(dividend, Operand(kMinInt));
- __ cmp(divisor, Operand(-1), eq);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
- }
-
- if (CpuFeatures::IsSupported(SUDIV)) {
- CpuFeatureScope scope(masm(), SUDIV);
- __ sdiv(result, dividend, divisor);
- } else {
- DoubleRegister vleft = ToDoubleRegister(instr->temp());
- DoubleRegister vright = double_scratch0();
- __ vmov(double_scratch0().low(), dividend);
- __ vcvt_f64_s32(vleft, double_scratch0().low());
- __ vmov(double_scratch0().low(), divisor);
- __ vcvt_f64_s32(vright, double_scratch0().low());
- __ vdiv(vleft, vleft, vright); // vleft now contains the result.
- __ vcvt_s32_f64(double_scratch0().low(), vleft);
- __ vmov(result, double_scratch0().low());
- }
-
- if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
- // Compute remainder and deopt if it's not zero.
- Register remainder = scratch0();
- __ Mls(remainder, result, divisor, dividend);
- __ cmp(remainder, Operand::Zero());
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
- }
-}
-
-
-void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
- DwVfpRegister addend = ToDoubleRegister(instr->addend());
- DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
- DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
-
- // This is computed in-place.
- DCHECK(addend.is(ToDoubleRegister(instr->result())));
-
- __ vmla(addend, multiplier, multiplicand);
-}
-
-
-void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
- DwVfpRegister minuend = ToDoubleRegister(instr->minuend());
- DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
- DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
-
- // This is computed in-place.
- DCHECK(minuend.is(ToDoubleRegister(instr->result())));
-
- __ vmls(minuend, multiplier, multiplicand);
-}
-
-
-void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
- Register dividend = ToRegister(instr->dividend());
- Register result = ToRegister(instr->result());
- int32_t divisor = instr->divisor();
-
- // If the divisor is 1, return the dividend.
- if (divisor == 1) {
- __ Move(result, dividend);
- return;
- }
-
- // If the divisor is positive, things are easy: There can be no deopts and we
- // can simply do an arithmetic right shift.
- int32_t shift = WhichPowerOf2Abs(divisor);
- if (divisor > 1) {
- __ mov(result, Operand(dividend, ASR, shift));
- return;
- }
-
- // If the divisor is negative, we have to negate and handle edge cases.
- __ rsb(result, dividend, Operand::Zero(), SetCC);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
- }
-
- // Dividing by -1 is basically negation, unless we overflow.
- if (divisor == -1) {
- if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
- }
- return;
- }
-
- // If the negation could not overflow, simply shifting is OK.
- if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- __ mov(result, Operand(result, ASR, shift));
- return;
- }
-
- __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs);
- __ mov(result, Operand(result, ASR, shift), LeaveCC, vc);
-}
-
-
-void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- Register result = ToRegister(instr->result());
- DCHECK(!dividend.is(result));
-
- if (divisor == 0) {
- DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
- return;
- }
-
- // Check for (0 / -x) that will produce negative zero.
- HMathFloorOfDiv* hdiv = instr->hydrogen();
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- __ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
- }
-
- // Easy case: We need no dynamic check for the dividend and the flooring
- // division is the same as the truncating division.
- if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
- (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
- __ TruncatingDiv(result, dividend, Abs(divisor));
- if (divisor < 0) __ rsb(result, result, Operand::Zero());
- return;
- }
-
- // In the general case we may need to adjust before and after the truncating
- // division to get a flooring division.
- Register temp = ToRegister(instr->temp());
- DCHECK(!temp.is(dividend) && !temp.is(result));
- Label needs_adjustment, done;
- __ cmp(dividend, Operand::Zero());
- __ b(divisor > 0 ? lt : gt, &needs_adjustment);
- __ TruncatingDiv(result, dividend, Abs(divisor));
- if (divisor < 0) __ rsb(result, result, Operand::Zero());
- __ jmp(&done);
- __ bind(&needs_adjustment);
- __ add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
- __ TruncatingDiv(result, temp, Abs(divisor));
- if (divisor < 0) __ rsb(result, result, Operand::Zero());
- __ sub(result, result, Operand(1));
- __ bind(&done);
-}
-
-
-// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
-void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
- HBinaryOperation* hdiv = instr->hydrogen();
- Register left = ToRegister(instr->dividend());
- Register right = ToRegister(instr->divisor());
- Register result = ToRegister(instr->result());
-
- // Check for x / 0.
- if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- __ cmp(right, Operand::Zero());
- DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
- }
-
- // Check for (0 / -x) that will produce negative zero.
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label positive;
- if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
- // Do the test only if it hadn't be done above.
- __ cmp(right, Operand::Zero());
- }
- __ b(pl, &positive);
- __ cmp(left, Operand::Zero());
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
- __ bind(&positive);
- }
-
- // Check for (kMinInt / -1).
- if (hdiv->CheckFlag(HValue::kCanOverflow) &&
- (!CpuFeatures::IsSupported(SUDIV) ||
- !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
- // We don't need to check for overflow when truncating with sdiv
- // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
- __ cmp(left, Operand(kMinInt));
- __ cmp(right, Operand(-1), eq);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
- }
-
- if (CpuFeatures::IsSupported(SUDIV)) {
- CpuFeatureScope scope(masm(), SUDIV);
- __ sdiv(result, left, right);
- } else {
- DoubleRegister vleft = ToDoubleRegister(instr->temp());
- DoubleRegister vright = double_scratch0();
- __ vmov(double_scratch0().low(), left);
- __ vcvt_f64_s32(vleft, double_scratch0().low());
- __ vmov(double_scratch0().low(), right);
- __ vcvt_f64_s32(vright, double_scratch0().low());
- __ vdiv(vleft, vleft, vright); // vleft now contains the result.
- __ vcvt_s32_f64(double_scratch0().low(), vleft);
- __ vmov(result, double_scratch0().low());
- }
-
- Label done;
- Register remainder = scratch0();
- __ Mls(remainder, result, right, left);
- __ cmp(remainder, Operand::Zero());
- __ b(eq, &done);
- __ eor(remainder, remainder, Operand(right));
- __ add(result, result, Operand(remainder, ASR, 31));
- __ bind(&done);
-}
-
-
-void LCodeGen::DoMulI(LMulI* instr) {
- Register result = ToRegister(instr->result());
- // Note that result may alias left.
- Register left = ToRegister(instr->left());
- LOperand* right_op = instr->right();
-
- bool bailout_on_minus_zero =
- instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
- bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
-
- if (right_op->IsConstantOperand()) {
- int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
-
- if (bailout_on_minus_zero && (constant < 0)) {
- // The case of a null constant will be handled separately.
- // If constant is negative and left is null, the result should be -0.
- __ cmp(left, Operand::Zero());
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
- }
-
- switch (constant) {
- case -1:
- if (overflow) {
- __ rsb(result, left, Operand::Zero(), SetCC);
- DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
- } else {
- __ rsb(result, left, Operand::Zero());
- }
- break;
- case 0:
- if (bailout_on_minus_zero) {
- // If left is strictly negative and the constant is null, the
- // result is -0. Deoptimize if required, otherwise return 0.
- __ cmp(left, Operand::Zero());
- DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero);
- }
- __ mov(result, Operand::Zero());
- break;
- case 1:
- __ Move(result, left);
- break;
- default:
- // Multiplying by powers of two and powers of two plus or minus
- // one can be done faster with shifted operands.
- // For other constants we emit standard code.
- int32_t mask = constant >> 31;
- uint32_t constant_abs = (constant + mask) ^ mask;
-
- if (base::bits::IsPowerOfTwo32(constant_abs)) {
- int32_t shift = WhichPowerOf2(constant_abs);
- __ mov(result, Operand(left, LSL, shift));
- // Correct the sign of the result is the constant is negative.
- if (constant < 0) __ rsb(result, result, Operand::Zero());
- } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
- int32_t shift = WhichPowerOf2(constant_abs - 1);
- __ add(result, left, Operand(left, LSL, shift));
- // Correct the sign of the result is the constant is negative.
- if (constant < 0) __ rsb(result, result, Operand::Zero());
- } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
- int32_t shift = WhichPowerOf2(constant_abs + 1);
- __ rsb(result, left, Operand(left, LSL, shift));
- // Correct the sign of the result is the constant is negative.
- if (constant < 0) __ rsb(result, result, Operand::Zero());
- } else {
- // Generate standard code.
- __ mov(ip, Operand(constant));
- __ mul(result, left, ip);
- }
- }
-
- } else {
- DCHECK(right_op->IsRegister());
- Register right = ToRegister(right_op);
-
- if (overflow) {
- Register scratch = scratch0();
- // scratch:result = left * right.
- if (instr->hydrogen()->representation().IsSmi()) {
- __ SmiUntag(result, left);
- __ smull(result, scratch, result, right);
- } else {
- __ smull(result, scratch, left, right);
- }
- __ cmp(scratch, Operand(result, ASR, 31));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
- } else {
- if (instr->hydrogen()->representation().IsSmi()) {
- __ SmiUntag(result, left);
- __ mul(result, result, right);
- } else {
- __ mul(result, left, right);
- }
- }
-
- if (bailout_on_minus_zero) {
- Label done;
- __ teq(left, Operand(right));
- __ b(pl, &done);
- // Bail out if the result is minus zero.
- __ cmp(result, Operand::Zero());
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
- __ bind(&done);
- }
- }
-}
-
-
-void LCodeGen::DoBitI(LBitI* instr) {
- LOperand* left_op = instr->left();
- LOperand* right_op = instr->right();
- DCHECK(left_op->IsRegister());
- Register left = ToRegister(left_op);
- Register result = ToRegister(instr->result());
- Operand right(no_reg);
-
- if (right_op->IsStackSlot()) {
- right = Operand(EmitLoadRegister(right_op, ip));
- } else {
- DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
- right = ToOperand(right_op);
- }
-
- switch (instr->op()) {
- case Token::BIT_AND:
- __ and_(result, left, right);
- break;
- case Token::BIT_OR:
- __ orr(result, left, right);
- break;
- case Token::BIT_XOR:
- if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
- __ mvn(result, Operand(left));
- } else {
- __ eor(result, left, right);
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void LCodeGen::DoShiftI(LShiftI* instr) {
- // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
- // result may alias either of them.
- LOperand* right_op = instr->right();
- Register left = ToRegister(instr->left());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
- if (right_op->IsRegister()) {
- // Mask the right_op operand.
- __ and_(scratch, ToRegister(right_op), Operand(0x1F));
- switch (instr->op()) {
- case Token::ROR:
- __ mov(result, Operand(left, ROR, scratch));
- break;
- case Token::SAR:
- __ mov(result, Operand(left, ASR, scratch));
- break;
- case Token::SHR:
- if (instr->can_deopt()) {
- __ mov(result, Operand(left, LSR, scratch), SetCC);
- DeoptimizeIf(mi, instr, DeoptimizeReason::kNegativeValue);
- } else {
- __ mov(result, Operand(left, LSR, scratch));
- }
- break;
- case Token::SHL:
- __ mov(result, Operand(left, LSL, scratch));
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- // Mask the right_op operand.
- int value = ToInteger32(LConstantOperand::cast(right_op));
- uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
- switch (instr->op()) {
- case Token::ROR:
- if (shift_count != 0) {
- __ mov(result, Operand(left, ROR, shift_count));
- } else {
- __ Move(result, left);
- }
- break;
- case Token::SAR:
- if (shift_count != 0) {
- __ mov(result, Operand(left, ASR, shift_count));
- } else {
- __ Move(result, left);
- }
- break;
- case Token::SHR:
- if (shift_count != 0) {
- __ mov(result, Operand(left, LSR, shift_count));
- } else {
- if (instr->can_deopt()) {
- __ tst(left, Operand(0x80000000));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNegativeValue);
- }
- __ Move(result, left);
- }
- break;
- case Token::SHL:
- if (shift_count != 0) {
- if (instr->hydrogen_value()->representation().IsSmi() &&
- instr->can_deopt()) {
- if (shift_count != 1) {
- __ mov(result, Operand(left, LSL, shift_count - 1));
- __ SmiTag(result, result, SetCC);
- } else {
- __ SmiTag(result, left, SetCC);
- }
- DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
- } else {
- __ mov(result, Operand(left, LSL, shift_count));
- }
- } else {
- __ Move(result, left);
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoSubI(LSubI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- LOperand* result = instr->result();
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- SBit set_cond = can_overflow ? SetCC : LeaveCC;
-
- if (right->IsStackSlot()) {
- Register right_reg = EmitLoadRegister(right, ip);
- __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
- } else {
- DCHECK(right->IsRegister() || right->IsConstantOperand());
- __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
- }
-
- if (can_overflow) {
- DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
- }
-}
-
-
-void LCodeGen::DoRSubI(LRSubI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- LOperand* result = instr->result();
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- SBit set_cond = can_overflow ? SetCC : LeaveCC;
-
- if (right->IsStackSlot()) {
- Register right_reg = EmitLoadRegister(right, ip);
- __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
- } else {
- DCHECK(right->IsRegister() || right->IsConstantOperand());
- __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
- }
-
- if (can_overflow) {
- DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
- }
-}
-
-
-void LCodeGen::DoConstantI(LConstantI* instr) {
- __ mov(ToRegister(instr->result()), Operand(instr->value()));
-}
-
-
-void LCodeGen::DoConstantS(LConstantS* instr) {
- __ mov(ToRegister(instr->result()), Operand(instr->value()));
-}
-
-
-void LCodeGen::DoConstantD(LConstantD* instr) {
- DCHECK(instr->result()->IsDoubleRegister());
- DwVfpRegister result = ToDoubleRegister(instr->result());
-#if V8_HOST_ARCH_IA32
- // Need some crappy work-around for x87 sNaN -> qNaN breakage in simulator
- // builds.
- uint64_t bits = instr->bits();
- if ((bits & V8_UINT64_C(0x7FF8000000000000)) ==
- V8_UINT64_C(0x7FF0000000000000)) {
- uint32_t lo = static_cast<uint32_t>(bits);
- uint32_t hi = static_cast<uint32_t>(bits >> 32);
- __ mov(ip, Operand(lo));
- __ mov(scratch0(), Operand(hi));
- __ vmov(result, ip, scratch0());
- return;
- }
-#endif
- double v = instr->value();
- __ Vmov(result, v, scratch0());
-}
-
-
-void LCodeGen::DoConstantE(LConstantE* instr) {
- __ mov(ToRegister(instr->result()), Operand(instr->value()));
-}
-
-
-void LCodeGen::DoConstantT(LConstantT* instr) {
- Handle<Object> object = instr->value(isolate());
- AllowDeferredHandleDereference smi_check;
- __ Move(ToRegister(instr->result()), object);
-}
-
-
-MemOperand LCodeGen::BuildSeqStringOperand(Register string,
- LOperand* index,
- String::Encoding encoding) {
- if (index->IsConstantOperand()) {
- int offset = ToInteger32(LConstantOperand::cast(index));
- if (encoding == String::TWO_BYTE_ENCODING) {
- offset *= kUC16Size;
- }
- STATIC_ASSERT(kCharSize == 1);
- return FieldMemOperand(string, SeqString::kHeaderSize + offset);
- }
- Register scratch = scratch0();
- DCHECK(!scratch.is(string));
- DCHECK(!scratch.is(ToRegister(index)));
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ add(scratch, string, Operand(ToRegister(index)));
- } else {
- STATIC_ASSERT(kUC16Size == 2);
- __ add(scratch, string, Operand(ToRegister(index), LSL, 1));
- }
- return FieldMemOperand(scratch, SeqString::kHeaderSize);
-}
-
-
-void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
- String::Encoding encoding = instr->hydrogen()->encoding();
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
-
- if (FLAG_debug_code) {
- Register scratch = scratch0();
- __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
-
- __ and_(scratch, scratch,
- Operand(kStringRepresentationMask | kStringEncodingMask));
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ cmp(scratch, Operand(encoding == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type));
- __ Check(eq, kUnexpectedStringType);
- }
-
- MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ ldrb(result, operand);
- } else {
- __ ldrh(result, operand);
- }
-}
-
-
-void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
- String::Encoding encoding = instr->hydrogen()->encoding();
- Register string = ToRegister(instr->string());
- Register value = ToRegister(instr->value());
-
- if (FLAG_debug_code) {
- Register index = ToRegister(instr->index());
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- int encoding_mask =
- instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type;
- __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
- }
-
- MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ strb(value, operand);
- } else {
- __ strh(value, operand);
- }
-}
-
-
-void LCodeGen::DoAddI(LAddI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- LOperand* result = instr->result();
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- SBit set_cond = can_overflow ? SetCC : LeaveCC;
-
- if (right->IsStackSlot()) {
- Register right_reg = EmitLoadRegister(right, ip);
- __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
- } else {
- DCHECK(right->IsRegister() || right->IsConstantOperand());
- __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
- }
-
- if (can_overflow) {
- DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
- }
-}
-
-
-void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- HMathMinMax::Operation operation = instr->hydrogen()->operation();
- if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
- Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
- Register left_reg = ToRegister(left);
- Operand right_op = (right->IsRegister() || right->IsConstantOperand())
- ? ToOperand(right)
- : Operand(EmitLoadRegister(right, ip));
- Register result_reg = ToRegister(instr->result());
- __ cmp(left_reg, right_op);
- __ Move(result_reg, left_reg, condition);
- __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
- } else {
- DCHECK(instr->hydrogen()->representation().IsDouble());
- DwVfpRegister left_reg = ToDoubleRegister(left);
- DwVfpRegister right_reg = ToDoubleRegister(right);
- DwVfpRegister result_reg = ToDoubleRegister(instr->result());
- Label result_is_nan, return_left, return_right, check_zero, done;
- __ VFPCompareAndSetFlags(left_reg, right_reg);
- if (operation == HMathMinMax::kMathMin) {
- __ b(mi, &return_left);
- __ b(gt, &return_right);
- } else {
- __ b(mi, &return_right);
- __ b(gt, &return_left);
- }
- __ b(vs, &result_is_nan);
- // Left equals right => check for -0.
- __ VFPCompareAndSetFlags(left_reg, 0.0);
- if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
- __ b(ne, &done); // left == right != 0.
- } else {
- __ b(ne, &return_left); // left == right != 0.
- }
- // At this point, both left and right are either 0 or -0.
- if (operation == HMathMinMax::kMathMin) {
- // We could use a single 'vorr' instruction here if we had NEON support.
- // The algorithm is: -((-L) + (-R)), which in case of L and R being
- // different registers is most efficiently expressed as -((-L) - R).
- __ vneg(left_reg, left_reg);
- if (left_reg.is(right_reg)) {
- __ vadd(result_reg, left_reg, right_reg);
- } else {
- __ vsub(result_reg, left_reg, right_reg);
- }
- __ vneg(result_reg, result_reg);
- } else {
- // Since we operate on +0 and/or -0, vadd and vand have the same effect;
- // the decision for vadd is easy because vand is a NEON instruction.
- __ vadd(result_reg, left_reg, right_reg);
- }
- __ b(&done);
-
- __ bind(&result_is_nan);
- __ vadd(result_reg, left_reg, right_reg);
- __ b(&done);
-
- __ bind(&return_right);
- __ Move(result_reg, right_reg);
- if (!left_reg.is(result_reg)) {
- __ b(&done);
- }
-
- __ bind(&return_left);
- __ Move(result_reg, left_reg);
-
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- DwVfpRegister left = ToDoubleRegister(instr->left());
- DwVfpRegister right = ToDoubleRegister(instr->right());
- DwVfpRegister result = ToDoubleRegister(instr->result());
- switch (instr->op()) {
- case Token::ADD:
- __ vadd(result, left, right);
- break;
- case Token::SUB:
- __ vsub(result, left, right);
- break;
- case Token::MUL:
- __ vmul(result, left, right);
- break;
- case Token::DIV:
- __ vdiv(result, left, right);
- break;
- case Token::MOD: {
- __ PrepareCallCFunction(0, 2, scratch0());
- __ MovToFloatParameters(left, right);
- __ CallCFunction(
- ExternalReference::mod_two_doubles_operation(isolate()),
- 0, 2);
- // Move the result in the double result register.
- __ MovFromFloatResult(result);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->left()).is(r1));
- DCHECK(ToRegister(instr->right()).is(r0));
- DCHECK(ToRegister(instr->result()).is(r0));
-
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
- // Block literal pool emission to ensure nop indicating no inlined smi code
- // is in the correct position.
- Assembler::BlockConstPoolScope block_const_pool(masm());
- CallCode(code, RelocInfo::CODE_TARGET, instr);
-}
-
-
-template<class InstrType>
-void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
- int left_block = instr->TrueDestination(chunk_);
- int right_block = instr->FalseDestination(chunk_);
-
- int next_block = GetNextEmittedBlock();
-
- if (right_block == left_block || condition == al) {
- EmitGoto(left_block);
- } else if (left_block == next_block) {
- __ b(NegateCondition(condition), chunk_->GetAssemblyLabel(right_block));
- } else if (right_block == next_block) {
- __ b(condition, chunk_->GetAssemblyLabel(left_block));
- } else {
- __ b(condition, chunk_->GetAssemblyLabel(left_block));
- __ b(chunk_->GetAssemblyLabel(right_block));
- }
-}
-
-
-template <class InstrType>
-void LCodeGen::EmitTrueBranch(InstrType instr, Condition condition) {
- int true_block = instr->TrueDestination(chunk_);
- __ b(condition, chunk_->GetAssemblyLabel(true_block));
-}
-
-
-template <class InstrType>
-void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition) {
- int false_block = instr->FalseDestination(chunk_);
- __ b(condition, chunk_->GetAssemblyLabel(false_block));
-}
-
-
-void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
- __ stop("LBreak");
-}
-
-
-void LCodeGen::DoBranch(LBranch* instr) {
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsInteger32() || r.IsSmi()) {
- DCHECK(!info()->IsStub());
- Register reg = ToRegister(instr->value());
- __ cmp(reg, Operand::Zero());
- EmitBranch(instr, ne);
- } else if (r.IsDouble()) {
- DCHECK(!info()->IsStub());
- DwVfpRegister reg = ToDoubleRegister(instr->value());
- // Test the double value. Zero and NaN are false.
- __ VFPCompareAndSetFlags(reg, 0.0);
- __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN -> false)
- EmitBranch(instr, ne);
- } else {
- DCHECK(r.IsTagged());
- Register reg = ToRegister(instr->value());
- HType type = instr->hydrogen()->value()->type();
- if (type.IsBoolean()) {
- DCHECK(!info()->IsStub());
- __ CompareRoot(reg, Heap::kTrueValueRootIndex);
- EmitBranch(instr, eq);
- } else if (type.IsSmi()) {
- DCHECK(!info()->IsStub());
- __ cmp(reg, Operand::Zero());
- EmitBranch(instr, ne);
- } else if (type.IsJSArray()) {
- DCHECK(!info()->IsStub());
- EmitBranch(instr, al);
- } else if (type.IsHeapNumber()) {
- DCHECK(!info()->IsStub());
- DwVfpRegister dbl_scratch = double_scratch0();
- __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
- // Test the double value. Zero and NaN are false.
- __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
- __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN)
- EmitBranch(instr, ne);
- } else if (type.IsString()) {
- DCHECK(!info()->IsStub());
- __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
- __ cmp(ip, Operand::Zero());
- EmitBranch(instr, ne);
- } else {
- ToBooleanHints expected = instr->hydrogen()->expected_input_types();
- // Avoid deopts in the case where we've never executed this path before.
- if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
-
- if (expected & ToBooleanHint::kUndefined) {
- // undefined -> false.
- __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
- __ b(eq, instr->FalseLabel(chunk_));
- }
- if (expected & ToBooleanHint::kBoolean) {
- // Boolean -> its value.
- __ CompareRoot(reg, Heap::kTrueValueRootIndex);
- __ b(eq, instr->TrueLabel(chunk_));
- __ CompareRoot(reg, Heap::kFalseValueRootIndex);
- __ b(eq, instr->FalseLabel(chunk_));
- }
- if (expected & ToBooleanHint::kNull) {
- // 'null' -> false.
- __ CompareRoot(reg, Heap::kNullValueRootIndex);
- __ b(eq, instr->FalseLabel(chunk_));
- }
-
- if (expected & ToBooleanHint::kSmallInteger) {
- // Smis: 0 -> false, all other -> true.
- __ cmp(reg, Operand::Zero());
- __ b(eq, instr->FalseLabel(chunk_));
- __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
- } else if (expected & ToBooleanHint::kNeedsMap) {
- // If we need a map later and have a Smi -> deopt.
- __ SmiTst(reg);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi);
- }
-
- const Register map = scratch0();
- if (expected & ToBooleanHint::kNeedsMap) {
- __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset));
-
- if (expected & ToBooleanHint::kCanBeUndetectable) {
- // Undetectable -> false.
- __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsUndetectable));
- __ b(ne, instr->FalseLabel(chunk_));
- }
- }
-
- if (expected & ToBooleanHint::kReceiver) {
- // spec object -> true.
- __ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE);
- __ b(ge, instr->TrueLabel(chunk_));
- }
-
- if (expected & ToBooleanHint::kString) {
- // String value -> false iff empty.
- Label not_string;
- __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
- __ b(ge, &not_string);
- __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
- __ cmp(ip, Operand::Zero());
- __ b(ne, instr->TrueLabel(chunk_));
- __ b(instr->FalseLabel(chunk_));
- __ bind(&not_string);
- }
-
- if (expected & ToBooleanHint::kSymbol) {
- // Symbol value -> true.
- __ CompareInstanceType(map, ip, SYMBOL_TYPE);
- __ b(eq, instr->TrueLabel(chunk_));
- }
-
- if (expected & ToBooleanHint::kHeapNumber) {
- // heap number -> false iff +0, -0, or NaN.
- DwVfpRegister dbl_scratch = double_scratch0();
- Label not_heap_number;
- __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- __ b(ne, &not_heap_number);
- __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
- __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
- __ cmp(r0, r0, vs); // NaN -> false.
- __ b(eq, instr->FalseLabel(chunk_)); // +0, -0 -> false.
- __ b(instr->TrueLabel(chunk_));
- __ bind(&not_heap_number);
- }
-
- if (expected != ToBooleanHint::kAny) {
- // We've seen something for the first time -> deopt.
- // This can only happen if we are not generic already.
- DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject);
- }
- }
- }
-}
-
-
-void LCodeGen::EmitGoto(int block) {
- if (!IsNextEmittedBlock(block)) {
- __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
- }
-}
-
-
-void LCodeGen::DoGoto(LGoto* instr) {
- EmitGoto(instr->block_id());
-}
-
-
-Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
- Condition cond = kNoCondition;
- switch (op) {
- case Token::EQ:
- case Token::EQ_STRICT:
- cond = eq;
- break;
- case Token::NE:
- case Token::NE_STRICT:
- cond = ne;
- break;
- case Token::LT:
- cond = is_unsigned ? lo : lt;
- break;
- case Token::GT:
- cond = is_unsigned ? hi : gt;
- break;
- case Token::LTE:
- cond = is_unsigned ? ls : le;
- break;
- case Token::GTE:
- cond = is_unsigned ? hs : ge;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
- return cond;
-}
-
-
-void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- bool is_unsigned =
- instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
- instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
- Condition cond = TokenToCondition(instr->op(), is_unsigned);
-
- if (left->IsConstantOperand() && right->IsConstantOperand()) {
- // We can statically evaluate the comparison.
- double left_val = ToDouble(LConstantOperand::cast(left));
- double right_val = ToDouble(LConstantOperand::cast(right));
- int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
- ? instr->TrueDestination(chunk_)
- : instr->FalseDestination(chunk_);
- EmitGoto(next_block);
- } else {
- if (instr->is_double()) {
- // Compare left and right operands as doubles and load the
- // resulting flags into the normal status register.
- __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
- // If a NaN is involved, i.e. the result is unordered (V set),
- // jump to false block label.
- __ b(vs, instr->FalseLabel(chunk_));
- } else {
- if (right->IsConstantOperand()) {
- int32_t value = ToInteger32(LConstantOperand::cast(right));
- if (instr->hydrogen_value()->representation().IsSmi()) {
- __ cmp(ToRegister(left), Operand(Smi::FromInt(value)));
- } else {
- __ cmp(ToRegister(left), Operand(value));
- }
- } else if (left->IsConstantOperand()) {
- int32_t value = ToInteger32(LConstantOperand::cast(left));
- if (instr->hydrogen_value()->representation().IsSmi()) {
- __ cmp(ToRegister(right), Operand(Smi::FromInt(value)));
- } else {
- __ cmp(ToRegister(right), Operand(value));
- }
- // We commuted the operands, so commute the condition.
- cond = CommuteCondition(cond);
- } else {
- __ cmp(ToRegister(left), ToRegister(right));
- }
- }
- EmitBranch(instr, cond);
- }
-}
-
-
-void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
- Register left = ToRegister(instr->left());
- Register right = ToRegister(instr->right());
-
- __ cmp(left, Operand(right));
- EmitBranch(instr, eq);
-}
-
-
-void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
- if (instr->hydrogen()->representation().IsTagged()) {
- Register input_reg = ToRegister(instr->object());
- __ mov(ip, Operand(factory()->the_hole_value()));
- __ cmp(input_reg, ip);
- EmitBranch(instr, eq);
- return;
- }
-
- DwVfpRegister input_reg = ToDoubleRegister(instr->object());
- __ VFPCompareAndSetFlags(input_reg, input_reg);
- EmitFalseBranch(instr, vc);
-
- Register scratch = scratch0();
- __ VmovHigh(scratch, input_reg);
- __ cmp(scratch, Operand(kHoleNanUpper32));
- EmitBranch(instr, eq);
-}
-
-
-Condition LCodeGen::EmitIsString(Register input,
- Register temp1,
- Label* is_not_string,
- SmiCheck check_needed = INLINE_SMI_CHECK) {
- if (check_needed == INLINE_SMI_CHECK) {
- __ JumpIfSmi(input, is_not_string);
- }
- __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
-
- return lt;
-}
-
-
-void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- Register temp1 = ToRegister(instr->temp());
-
- SmiCheck check_needed =
- instr->hydrogen()->value()->type().IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- Condition true_cond =
- EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
-
- EmitBranch(instr, true_cond);
-}
-
-
-void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
- Register input_reg = EmitLoadRegister(instr->value(), ip);
- __ SmiTst(input_reg);
- EmitBranch(instr, eq);
-}
-
-
-void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- __ JumpIfSmi(input, instr->FalseLabel(chunk_));
- }
- __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
- __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
- __ tst(temp, Operand(1 << Map::kIsUndetectable));
- EmitBranch(instr, ne);
-}
-
-
-static Condition ComputeCompareCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return eq;
- case Token::LT:
- return lt;
- case Token::GT:
- return gt;
- case Token::LTE:
- return le;
- case Token::GTE:
- return ge;
- default:
- UNREACHABLE();
- return kNoCondition;
- }
-}
-
-
-void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->left()).is(r1));
- DCHECK(ToRegister(instr->right()).is(r0));
-
- Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
- CallCode(code, RelocInfo::CODE_TARGET, instr);
- __ CompareRoot(r0, Heap::kTrueValueRootIndex);
- EmitBranch(instr, eq);
-}
-
-
-static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == FIRST_TYPE) return to;
- DCHECK(from == to || to == LAST_TYPE);
- return from;
-}
-
-
-static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == to) return eq;
- if (to == LAST_TYPE) return hs;
- if (from == FIRST_TYPE) return ls;
- UNREACHABLE();
- return eq;
-}
-
-
-void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
- Register scratch = scratch0();
- Register input = ToRegister(instr->value());
-
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- __ JumpIfSmi(input, instr->FalseLabel(chunk_));
- }
-
- __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
- EmitBranch(instr, BranchCondition(instr->hydrogen()));
-}
-
-// Branches to a label or falls through with the answer in flags. Trashes
-// the temp registers, but not the input.
-void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
- Handle<String> class_name, Register input,
- Register temp, Register temp2) {
- DCHECK(!input.is(temp));
- DCHECK(!input.is(temp2));
- DCHECK(!temp.is(temp2));
-
- __ JumpIfSmi(input, is_false);
-
- __ CompareObjectType(input, temp, temp2, FIRST_FUNCTION_TYPE);
- STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
- if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
- __ b(hs, is_true);
- } else {
- __ b(hs, is_false);
- }
-
- // Check if the constructor in the map is a function.
- Register instance_type = ip;
- __ GetMapConstructor(temp, temp, temp2, instance_type);
-
- // Objects with a non-function constructor have class 'Object'.
- __ cmp(instance_type, Operand(JS_FUNCTION_TYPE));
- if (String::Equals(isolate()->factory()->Object_string(), class_name)) {
- __ b(ne, is_true);
- } else {
- __ b(ne, is_false);
- }
-
- // temp now contains the constructor function. Grab the
- // instance class name from there.
- __ ldr(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(temp,
- FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset));
- // The class name we are testing against is internalized since it's a literal.
- // The name in the constructor is internalized because of the way the context
- // is booted. This routine isn't expected to work for random API-created
- // classes and it doesn't have to because you can't access it with natives
- // syntax. Since both sides are internalized it is sufficient to use an
- // identity comparison.
- __ cmp(temp, Operand(class_name));
- // End with the answer in flags.
-}
-
-void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = scratch0();
- Register temp2 = ToRegister(instr->temp());
- Handle<String> class_name = instr->hydrogen()->class_name();
-
- EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
- class_name, input, temp, temp2);
-
- EmitBranch(instr, eq);
-}
-
-void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ cmp(temp, Operand(instr->map()));
- EmitBranch(instr, eq);
-}
-
-
-void LCodeGen::DoHasInPrototypeChainAndBranch(
- LHasInPrototypeChainAndBranch* instr) {
- Register const object = ToRegister(instr->object());
- Register const object_map = scratch0();
- Register const object_instance_type = ip;
- Register const object_prototype = object_map;
- Register const prototype = ToRegister(instr->prototype());
-
- // The {object} must be a spec object. It's sufficient to know that {object}
- // is not a smi, since all other non-spec objects have {null} prototypes and
- // will be ruled out below.
- if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
- __ SmiTst(object);
- EmitFalseBranch(instr, eq);
- }
-
- // Loop through the {object}s prototype chain looking for the {prototype}.
- __ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
- Label loop;
- __ bind(&loop);
-
- // Deoptimize if the object needs to be access checked.
- __ ldrb(object_instance_type,
- FieldMemOperand(object_map, Map::kBitFieldOffset));
- __ tst(object_instance_type, Operand(1 << Map::kIsAccessCheckNeeded));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck);
- // Deoptimize for proxies.
- __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy);
-
- __ ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
- __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
- EmitFalseBranch(instr, eq);
- __ cmp(object_prototype, prototype);
- EmitTrueBranch(instr, eq);
- __ ldr(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
- __ b(&loop);
-}
-
-
-void LCodeGen::DoCmpT(LCmpT* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- Token::Value op = instr->op();
-
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- // This instruction also signals no smi code inlined.
- __ cmp(r0, Operand::Zero());
-
- Condition condition = ComputeCompareCondition(op);
- __ LoadRoot(ToRegister(instr->result()),
- Heap::kTrueValueRootIndex,
- condition);
- __ LoadRoot(ToRegister(instr->result()),
- Heap::kFalseValueRootIndex,
- NegateCondition(condition));
-}
-
-
-void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace && info()->IsOptimizing()) {
- // Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns its parameter in r0. We're leaving the code
- // managed by the register allocator and tearing down the frame, it's
- // safe to write to the context register.
- __ push(r0);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kTraceExit);
- }
- if (info()->saves_caller_doubles()) {
- RestoreCallerDoubles();
- }
- if (NeedsEagerFrame()) {
- masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
- }
- { ConstantPoolUnavailableScope constant_pool_unavailable(masm());
- if (instr->has_constant_parameter_count()) {
- int parameter_count = ToInteger32(instr->constant_parameter_count());
- int32_t sp_delta = (parameter_count + 1) * kPointerSize;
- if (sp_delta != 0) {
- __ add(sp, sp, Operand(sp_delta));
- }
- } else {
- DCHECK(info()->IsStub()); // Functions would need to drop one more value.
- Register reg = ToRegister(instr->parameter_count());
- // The argument count parameter is a smi
- __ SmiUntag(reg);
- __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2));
- }
-
- __ Jump(lr);
- }
-}
-
-
-void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ ldr(result, ContextMemOperand(context, instr->slot_index()));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(result, ip);
- if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
- } else {
- __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
- }
- }
-}
-
-
-void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register value = ToRegister(instr->value());
- Register scratch = scratch0();
- MemOperand target = ContextMemOperand(context, instr->slot_index());
-
- Label skip_assignment;
-
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ ldr(scratch, target);
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch, ip);
- if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
- } else {
- __ b(ne, &skip_assignment);
- }
- }
-
- __ str(value, target);
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- SmiCheck check_needed =
- instr->hydrogen()->value()->type().IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- __ RecordWriteContextSlot(context,
- target.offset(),
- value,
- scratch,
- GetLinkRegisterState(),
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
-
- __ bind(&skip_assignment);
-}
-
-
-void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- HObjectAccess access = instr->hydrogen()->access();
- int offset = access.offset();
- Register object = ToRegister(instr->object());
-
- if (access.IsExternalMemory()) {
- Register result = ToRegister(instr->result());
- MemOperand operand = MemOperand(object, offset);
- __ Load(result, operand, access.representation());
- return;
- }
-
- if (instr->hydrogen()->representation().IsDouble()) {
- DwVfpRegister result = ToDoubleRegister(instr->result());
- __ vldr(result, FieldMemOperand(object, offset));
- return;
- }
-
- Register result = ToRegister(instr->result());
- if (!access.IsInobject()) {
- __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- object = result;
- }
- MemOperand operand = FieldMemOperand(object, offset);
- __ Load(result, operand, access.representation());
-}
-
-
-void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
- Register scratch = scratch0();
- Register function = ToRegister(instr->function());
- Register result = ToRegister(instr->result());
-
- // Get the prototype or initial map from the function.
- __ ldr(result,
- FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Check that the function has a prototype or an initial map.
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(result, ip);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
-
- // If the function does not have an initial map, we're done.
- Label done;
- __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
- __ b(ne, &done);
-
- // Get the prototype from the initial map.
- __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
-
- // All done.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
- Register result = ToRegister(instr->result());
- __ LoadRoot(result, instr->index());
-}
-
-
-void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
- Register arguments = ToRegister(instr->arguments());
- Register result = ToRegister(instr->result());
- // There are two words between the frame pointer and the last argument.
- // Subtracting from length accounts for one of them add one more.
- if (instr->length()->IsConstantOperand()) {
- int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
- if (instr->index()->IsConstantOperand()) {
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- int index = (const_length - const_index) + 1;
- __ ldr(result, MemOperand(arguments, index * kPointerSize));
- } else {
- Register index = ToRegister(instr->index());
- __ rsb(result, index, Operand(const_length + 1));
- __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
- }
- } else if (instr->index()->IsConstantOperand()) {
- Register length = ToRegister(instr->length());
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- int loc = const_index - 1;
- if (loc != 0) {
- __ sub(result, length, Operand(loc));
- __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
- } else {
- __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
- }
- } else {
- Register length = ToRegister(instr->length());
- Register index = ToRegister(instr->index());
- __ sub(result, length, index);
- __ add(result, result, Operand(1));
- __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
- }
-}
-
-
-void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
- Register external_pointer = ToRegister(instr->elements());
- Register key = no_reg;
- ElementsKind elements_kind = instr->elements_kind();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort(kArrayIndexConstantValueTooBig);
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(elements_kind);
- int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- int base_offset = instr->base_offset();
-
- if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
- DwVfpRegister result = ToDoubleRegister(instr->result());
- Operand operand = key_is_constant
- ? Operand(constant_key << element_size_shift)
- : Operand(key, LSL, shift_size);
- __ add(scratch0(), external_pointer, operand);
- if (elements_kind == FLOAT32_ELEMENTS) {
- __ vldr(double_scratch0().low(), scratch0(), base_offset);
- __ vcvt_f64_f32(result, double_scratch0().low());
- } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ vldr(result, scratch0(), base_offset);
- }
- } else {
- Register result = ToRegister(instr->result());
- MemOperand mem_operand = PrepareKeyedOperand(
- key, external_pointer, key_is_constant, constant_key,
- element_size_shift, shift_size, base_offset);
- switch (elements_kind) {
- case INT8_ELEMENTS:
- __ ldrsb(result, mem_operand);
- break;
- case UINT8_ELEMENTS:
- case UINT8_CLAMPED_ELEMENTS:
- __ ldrb(result, mem_operand);
- break;
- case INT16_ELEMENTS:
- __ ldrsh(result, mem_operand);
- break;
- case UINT16_ELEMENTS:
- __ ldrh(result, mem_operand);
- break;
- case INT32_ELEMENTS:
- __ ldr(result, mem_operand);
- break;
- case UINT32_ELEMENTS:
- __ ldr(result, mem_operand);
- if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- __ cmp(result, Operand(0x80000000));
- DeoptimizeIf(cs, instr, DeoptimizeReason::kNegativeValue);
- }
- break;
- case FLOAT32_ELEMENTS:
- case FLOAT64_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
- case FAST_STRING_WRAPPER_ELEMENTS:
- case SLOW_STRING_WRAPPER_ELEMENTS:
- case NO_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
- Register elements = ToRegister(instr->elements());
- bool key_is_constant = instr->key()->IsConstantOperand();
- Register key = no_reg;
- DwVfpRegister result = ToDoubleRegister(instr->result());
- Register scratch = scratch0();
-
- int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
-
- int base_offset = instr->base_offset();
- if (key_is_constant) {
- int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort(kArrayIndexConstantValueTooBig);
- }
- base_offset += constant_key * kDoubleSize;
- }
- __ add(scratch, elements, Operand(base_offset));
-
- if (!key_is_constant) {
- key = ToRegister(instr->key());
- int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- __ add(scratch, scratch, Operand(key, LSL, shift_size));
- }
-
- __ vldr(result, scratch, 0);
-
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
- __ cmp(scratch, Operand(kHoleNanUpper32));
- DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
- }
-}
-
-
-void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
- Register elements = ToRegister(instr->elements());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
- Register store_base = scratch;
- int offset = instr->base_offset();
-
- if (instr->key()->IsConstantOperand()) {
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset += ToInteger32(const_operand) * kPointerSize;
- store_base = elements;
- } else {
- Register key = ToRegister(instr->key());
- // Even though the HLoadKeyed instruction forces the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsSmi()) {
- __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
- } else {
- __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
- }
- }
- __ ldr(result, MemOperand(store_base, offset));
-
- // Check for the hole value.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- __ SmiTst(result);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi);
- } else {
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ cmp(result, scratch);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
- }
- } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
- DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
- Label done;
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ cmp(result, scratch);
- __ b(ne, &done);
- if (info()->IsStub()) {
- // A stub can safely convert the hole to undefined only if the array
- // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
- // it needs to bail out.
- __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
- __ ldr(result, FieldMemOperand(result, PropertyCell::kValueOffset));
- __ cmp(result, Operand(Smi::FromInt(Isolate::kProtectorValid)));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
- }
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_fixed_typed_array()) {
- DoLoadKeyedExternalArray(instr);
- } else if (instr->hydrogen()->representation().IsDouble()) {
- DoLoadKeyedFixedDoubleArray(instr);
- } else {
- DoLoadKeyedFixedArray(instr);
- }
-}
-
-
-MemOperand LCodeGen::PrepareKeyedOperand(Register key,
- Register base,
- bool key_is_constant,
- int constant_key,
- int element_size,
- int shift_size,
- int base_offset) {
- if (key_is_constant) {
- return MemOperand(base, (constant_key << element_size) + base_offset);
- }
-
- if (base_offset == 0) {
- if (shift_size >= 0) {
- return MemOperand(base, key, LSL, shift_size);
- } else {
- DCHECK_EQ(-1, shift_size);
- return MemOperand(base, key, LSR, 1);
- }
- }
-
- if (shift_size >= 0) {
- __ add(scratch0(), base, Operand(key, LSL, shift_size));
- return MemOperand(scratch0(), base_offset);
- } else {
- DCHECK_EQ(-1, shift_size);
- __ add(scratch0(), base, Operand(key, ASR, 1));
- return MemOperand(scratch0(), base_offset);
- }
-}
-
-
-void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
- Register scratch = scratch0();
- Register result = ToRegister(instr->result());
-
- if (instr->hydrogen()->from_inlined()) {
- __ sub(result, sp, Operand(2 * kPointerSize));
- } else if (instr->hydrogen()->arguments_adaptor()) {
- // Check if the calling frame is an arguments adaptor frame.
- Label done, adapted;
- __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(result, MemOperand(scratch,
- CommonFrameConstants::kContextOrFrameTypeOffset));
- __ cmp(result,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Result is the frame pointer for the frame if not adapted and for the real
- // frame below the adaptor frame if adapted.
- __ mov(result, fp, LeaveCC, ne);
- __ mov(result, scratch, LeaveCC, eq);
- } else {
- __ mov(result, fp);
- }
-}
-
-
-void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
- Register elem = ToRegister(instr->elements());
- Register result = ToRegister(instr->result());
-
- Label done;
-
- // If no arguments adaptor frame the number of arguments is fixed.
- __ cmp(fp, elem);
- __ mov(result, Operand(scope()->num_parameters()));
- __ b(eq, &done);
-
- // Arguments adaptor frame present. Get argument length from there.
- __ ldr(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(result,
- MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(result);
-
- // Argument length is in result register.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- // If the receiver is null or undefined, we have to pass the global
- // object as a receiver to normal functions. Values have to be
- // passed unchanged to builtins and strict-mode functions.
- Label global_object, result_in_receiver;
-
- if (!instr->hydrogen()->known_function()) {
- // Do not transform the receiver to object for strict mode
- // functions.
- __ ldr(scratch,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(scratch,
- FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
- int mask = 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
- __ tst(scratch, Operand(mask));
- __ b(ne, &result_in_receiver);
-
- // Do not transform the receiver to object for builtins.
- __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
- __ b(ne, &result_in_receiver);
- }
-
- // Normal function. Replace undefined or null with global receiver.
- __ LoadRoot(scratch, Heap::kNullValueRootIndex);
- __ cmp(receiver, scratch);
- __ b(eq, &global_object);
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- __ cmp(receiver, scratch);
- __ b(eq, &global_object);
-
- // Deoptimize if the receiver is not a JS object.
- __ SmiTst(receiver);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi);
- __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE);
- DeoptimizeIf(lt, instr, DeoptimizeReason::kNotAJavaScriptObject);
-
- __ b(&result_in_receiver);
- __ bind(&global_object);
- __ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
- __ ldr(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
- __ ldr(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
-
- if (result.is(receiver)) {
- __ bind(&result_in_receiver);
- } else {
- Label result_ok;
- __ b(&result_ok);
- __ bind(&result_in_receiver);
- __ mov(result, receiver);
- __ bind(&result_ok);
- }
-}
-
-
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
- Register length = ToRegister(instr->length());
- Register elements = ToRegister(instr->elements());
- Register scratch = scratch0();
- DCHECK(receiver.is(r0)); // Used for parameter count.
- DCHECK(function.is(r1)); // Required by InvokeFunction.
- DCHECK(ToRegister(instr->result()).is(r0));
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- const uint32_t kArgumentsLimit = 1 * KB;
- __ cmp(length, Operand(kArgumentsLimit));
- DeoptimizeIf(hi, instr, DeoptimizeReason::kTooManyArguments);
-
- // Push the receiver and use the register to keep the original
- // number of arguments.
- __ push(receiver);
- __ mov(receiver, length);
- // The arguments are at a one pointer size offset from elements.
- __ add(elements, elements, Operand(1 * kPointerSize));
-
- // Loop through the arguments pushing them onto the execution
- // stack.
- Label invoke, loop;
- // length is a small non-negative integer, due to the test above.
- __ cmp(length, Operand::Zero());
- __ b(eq, &invoke);
- __ bind(&loop);
- __ ldr(scratch, MemOperand(elements, length, LSL, 2));
- __ push(scratch);
- __ sub(length, length, Operand(1), SetCC);
- __ b(ne, &loop);
-
- __ bind(&invoke);
-
- InvokeFlag flag = CALL_FUNCTION;
- if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
- DCHECK(!info()->saves_caller_doubles());
- // TODO(ishell): drop current frame before pushing arguments to the stack.
- flag = JUMP_FUNCTION;
- ParameterCount actual(r0);
- // It is safe to use r3, r4 and r5 as scratch registers here given that
- // 1) we are not going to return to caller function anyway,
- // 2) r3 (new.target) will be initialized below.
- PrepareForTailCall(actual, r3, r4, r5);
- }
-
- DCHECK(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
- // The number of arguments is stored in receiver which is r0, as expected
- // by InvokeFunction.
- ParameterCount actual(receiver);
- __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
-}
-
-
-void LCodeGen::DoPushArgument(LPushArgument* instr) {
- LOperand* argument = instr->value();
- if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
- Abort(kDoPushArgumentNotImplementedForDoubleType);
- } else {
- Register argument_reg = EmitLoadRegister(argument, ip);
- __ push(argument_reg);
- }
-}
-
-
-void LCodeGen::DoDrop(LDrop* instr) {
- __ Drop(instr->count());
-}
-
-
-void LCodeGen::DoThisFunction(LThisFunction* instr) {
- Register result = ToRegister(instr->result());
- __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-}
-
-
-void LCodeGen::DoContext(LContext* instr) {
- // If there is a non-return use, the context must be moved to a register.
- Register result = ToRegister(instr->result());
- if (info()->IsOptimizing()) {
- __ ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
- } else {
- // If there is no frame, the context must be in cp.
- DCHECK(result.is(cp));
- }
-}
-
-
-void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- __ Move(scratch0(), instr->hydrogen()->declarations());
- __ push(scratch0());
- __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
- __ push(scratch0());
- __ Move(scratch0(), instr->hydrogen()->feedback_vector());
- __ push(scratch0());
- CallRuntime(Runtime::kDeclareGlobals, instr);
-}
-
-void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
- int formal_parameter_count, int arity,
- bool is_tail_call, LInstruction* instr) {
- bool dont_adapt_arguments =
- formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
- bool can_invoke_directly =
- dont_adapt_arguments || formal_parameter_count == arity;
-
- Register function_reg = r1;
-
- LPointerMap* pointers = instr->pointer_map();
-
- if (can_invoke_directly) {
- // Change context.
- __ ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
-
- // Always initialize new target and number of actual arguments.
- __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
- __ mov(r0, Operand(arity));
-
- bool is_self_call = function.is_identical_to(info()->closure());
-
- // Invoke function.
- if (is_self_call) {
- Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
- if (is_tail_call) {
- __ Jump(self, RelocInfo::CODE_TARGET);
- } else {
- __ Call(self, RelocInfo::CODE_TARGET);
- }
- } else {
- __ ldr(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
- if (is_tail_call) {
- __ Jump(ip);
- } else {
- __ Call(ip);
- }
- }
-
- if (!is_tail_call) {
- // Set up deoptimization.
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
- }
- } else {
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount actual(arity);
- ParameterCount expected(formal_parameter_count);
- InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
- __ InvokeFunction(function_reg, expected, actual, flag, generator);
- }
-}
-
-
-void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
- DCHECK(instr->context() != NULL);
- DCHECK(ToRegister(instr->context()).is(cp));
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- // Deoptimize if not a heap number.
- __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch, Operand(ip));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
-
- Label done;
- Register exponent = scratch0();
- scratch = no_reg;
- __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
- // Check the sign of the argument. If the argument is positive, just
- // return it.
- __ tst(exponent, Operand(HeapNumber::kSignMask));
- // Move the input to the result if necessary.
- __ Move(result, input);
- __ b(eq, &done);
-
- // Input is negative. Reverse its sign.
- // Preserve the value of all registers.
- {
- PushSafepointRegistersScope scope(this);
-
- // Registers were saved at the safepoint, so we can use
- // many scratch registers.
- Register tmp1 = input.is(r1) ? r0 : r1;
- Register tmp2 = input.is(r2) ? r0 : r2;
- Register tmp3 = input.is(r3) ? r0 : r3;
- Register tmp4 = input.is(r4) ? r0 : r4;
-
- // exponent: floating point exponent value.
-
- Label allocated, slow;
- __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
- __ b(&allocated);
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
-
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
- instr->context());
- // Set the pointer to the new heap number in tmp.
- if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
- // Restore input_reg after call to runtime.
- __ LoadFromSafepointRegisterSlot(input, input);
- __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
-
- __ bind(&allocated);
- // exponent: floating point exponent value.
- // tmp1: allocated heap number.
- __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
- __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
- __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
- __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
-
- __ StoreToSafepointRegisterSlot(tmp1, result);
- }
-
- __ bind(&done);
-}
-
-
-void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- __ cmp(input, Operand::Zero());
- __ Move(result, input, pl);
- // We can make rsb conditional because the previous cmp instruction
- // will clear the V (overflow) flag and rsb won't set this flag
- // if input is positive.
- __ rsb(result, input, Operand::Zero(), SetCC, mi);
- // Deoptimize on overflow.
- DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
-}
-
-
-void LCodeGen::DoMathAbs(LMathAbs* instr) {
- // Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
- public:
- DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override {
- codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LMathAbs* instr_;
- };
-
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsDouble()) {
- DwVfpRegister input = ToDoubleRegister(instr->value());
- DwVfpRegister result = ToDoubleRegister(instr->result());
- __ vabs(result, input);
- } else if (r.IsSmiOrInteger32()) {
- EmitIntegerMathAbs(instr);
- } else {
- // Representation is tagged.
- DeferredMathAbsTaggedHeapNumber* deferred =
- new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
- Register input = ToRegister(instr->value());
- // Smi check.
- __ JumpIfNotSmi(input, deferred->entry());
- // If smi, handle it directly.
- EmitIntegerMathAbs(instr);
- __ bind(deferred->exit());
- }
-}
-
-
-void LCodeGen::DoMathFloor(LMathFloor* instr) {
- DwVfpRegister input = ToDoubleRegister(instr->value());
- Register result = ToRegister(instr->result());
- Register input_high = scratch0();
- Label done, exact;
-
- __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact);
- DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN);
-
- __ bind(&exact);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Test for -0.
- __ cmp(result, Operand::Zero());
- __ b(ne, &done);
- __ cmp(input_high, Operand::Zero());
- DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero);
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoMathRound(LMathRound* instr) {
- DwVfpRegister input = ToDoubleRegister(instr->value());
- Register result = ToRegister(instr->result());
- DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
- DwVfpRegister input_plus_dot_five = double_scratch1;
- Register input_high = scratch0();
- DwVfpRegister dot_five = double_scratch0();
- Label convert, done;
-
- __ Vmov(dot_five, 0.5, scratch0());
- __ vabs(double_scratch1, input);
- __ VFPCompareAndSetFlags(double_scratch1, dot_five);
- // If input is in [-0.5, -0], the result is -0.
- // If input is in [+0, +0.5[, the result is +0.
- // If the input is +0.5, the result is 1.
- __ b(hi, &convert); // Out of [-0.5, +0.5].
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ VmovHigh(input_high, input);
- __ cmp(input_high, Operand::Zero());
- // [-0.5, -0].
- DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero);
- }
- __ VFPCompareAndSetFlags(input, dot_five);
- __ mov(result, Operand(1), LeaveCC, eq); // +0.5.
- // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
- // flag kBailoutOnMinusZero.
- __ mov(result, Operand::Zero(), LeaveCC, ne);
- __ b(&done);
-
- __ bind(&convert);
- __ vadd(input_plus_dot_five, input, dot_five);
- // Reuse dot_five (double_scratch0) as we no longer need this value.
- __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(),
- &done, &done);
- DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoMathFround(LMathFround* instr) {
- DwVfpRegister input_reg = ToDoubleRegister(instr->value());
- DwVfpRegister output_reg = ToDoubleRegister(instr->result());
- LowDwVfpRegister scratch = double_scratch0();
- __ vcvt_f32_f64(scratch.low(), input_reg);
- __ vcvt_f64_f32(output_reg, scratch.low());
-}
-
-
-void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
- DwVfpRegister input = ToDoubleRegister(instr->value());
- DwVfpRegister result = ToDoubleRegister(instr->result());
- __ vsqrt(result, input);
-}
-
-
-void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
- DwVfpRegister input = ToDoubleRegister(instr->value());
- DwVfpRegister result = ToDoubleRegister(instr->result());
- DwVfpRegister temp = double_scratch0();
-
- // Note that according to ECMA-262 15.8.2.13:
- // Math.pow(-Infinity, 0.5) == Infinity
- // Math.sqrt(-Infinity) == NaN
- Label done;
- __ vmov(temp, -V8_INFINITY, scratch0());
- __ VFPCompareAndSetFlags(input, temp);
- __ vneg(result, temp, eq);
- __ b(&done, eq);
-
- // Add +0 to convert -0 to +0.
- __ vadd(result, input, kDoubleRegZero);
- __ vsqrt(result, result);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoPower(LPower* instr) {
- Representation exponent_type = instr->hydrogen()->right()->representation();
- // Having marked this as a call, we can use any registers.
- // Just make sure that the input/output registers are the expected ones.
- Register tagged_exponent = MathPowTaggedDescriptor::exponent();
- DCHECK(!instr->right()->IsDoubleRegister() ||
- ToDoubleRegister(instr->right()).is(d1));
- DCHECK(!instr->right()->IsRegister() ||
- ToRegister(instr->right()).is(tagged_exponent));
- DCHECK(ToDoubleRegister(instr->left()).is(d0));
- DCHECK(ToDoubleRegister(instr->result()).is(d2));
-
- if (exponent_type.IsSmi()) {
- MathPowStub stub(isolate(), MathPowStub::TAGGED);
- __ CallStub(&stub);
- } else if (exponent_type.IsTagged()) {
- Label no_deopt;
- __ JumpIfSmi(tagged_exponent, &no_deopt);
- DCHECK(!r6.is(tagged_exponent));
- __ ldr(r6, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(r6, Operand(ip));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
- __ bind(&no_deopt);
- MathPowStub stub(isolate(), MathPowStub::TAGGED);
- __ CallStub(&stub);
- } else if (exponent_type.IsInteger32()) {
- MathPowStub stub(isolate(), MathPowStub::INTEGER);
- __ CallStub(&stub);
- } else {
- DCHECK(exponent_type.IsDouble());
- MathPowStub stub(isolate(), MathPowStub::DOUBLE);
- __ CallStub(&stub);
- }
-}
-
-void LCodeGen::DoMathCos(LMathCos* instr) {
- __ PrepareCallCFunction(0, 1, scratch0());
- __ MovToFloatParameter(ToDoubleRegister(instr->value()));
- __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1);
- __ MovFromFloatResult(ToDoubleRegister(instr->result()));
-}
-
-void LCodeGen::DoMathSin(LMathSin* instr) {
- __ PrepareCallCFunction(0, 1, scratch0());
- __ MovToFloatParameter(ToDoubleRegister(instr->value()));
- __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1);
- __ MovFromFloatResult(ToDoubleRegister(instr->result()));
-}
-
-void LCodeGen::DoMathExp(LMathExp* instr) {
- __ PrepareCallCFunction(0, 1, scratch0());
- __ MovToFloatParameter(ToDoubleRegister(instr->value()));
- __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1);
- __ MovFromFloatResult(ToDoubleRegister(instr->result()));
-}
-
-
-void LCodeGen::DoMathLog(LMathLog* instr) {
- __ PrepareCallCFunction(0, 1, scratch0());
- __ MovToFloatParameter(ToDoubleRegister(instr->value()));
- __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1);
- __ MovFromFloatResult(ToDoubleRegister(instr->result()));
-}
-
-
-void LCodeGen::DoMathClz32(LMathClz32* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- __ clz(result, input);
-}
-
-void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
- Register scratch1, Register scratch2,
- Register scratch3) {
-#if DEBUG
- if (actual.is_reg()) {
- DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
- } else {
- DCHECK(!AreAliased(scratch1, scratch2, scratch3));
- }
-#endif
- if (FLAG_code_comments) {
- if (actual.is_reg()) {
- Comment(";;; PrepareForTailCall, actual: %s {",
- RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
- actual.reg().code()));
- } else {
- Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
- }
- }
-
- // Check if next frame is an arguments adaptor frame.
- Register caller_args_count_reg = scratch1;
- Label no_arguments_adaptor, formal_parameter_count_loaded;
- __ ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(scratch3,
- MemOperand(scratch2, StandardFrameConstants::kContextOffset));
- __ cmp(scratch3,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(ne, &no_arguments_adaptor);
-
- // Drop current frame and load arguments count from arguments adaptor frame.
- __ mov(fp, scratch2);
- __ ldr(caller_args_count_reg,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
- __ b(&formal_parameter_count_loaded);
-
- __ bind(&no_arguments_adaptor);
- // Load caller's formal parameter count
- __ mov(caller_args_count_reg, Operand(info()->literal()->parameter_count()));
-
- __ bind(&formal_parameter_count_loaded);
- __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
-
- Comment(";;; }");
-}
-
-void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
- HInvokeFunction* hinstr = instr->hydrogen();
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->function()).is(r1));
- DCHECK(instr->HasPointerMap());
-
- bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
-
- if (is_tail_call) {
- DCHECK(!info()->saves_caller_doubles());
- ParameterCount actual(instr->arity());
- // It is safe to use r3, r4 and r5 as scratch registers here given that
- // 1) we are not going to return to caller function anyway,
- // 2) r3 (new.target) will be initialized below.
- PrepareForTailCall(actual, r3, r4, r5);
- }
-
- Handle<JSFunction> known_function = hinstr->known_function();
- if (known_function.is_null()) {
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount actual(instr->arity());
- InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
- __ InvokeFunction(r1, no_reg, actual, flag, generator);
- } else {
- CallKnownFunction(known_function, hinstr->formal_parameter_count(),
- instr->arity(), is_tail_call, instr);
- }
-}
-
-
-void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
- DCHECK(ToRegister(instr->result()).is(r0));
-
- if (instr->hydrogen()->IsTailCall()) {
- if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
-
- if (instr->target()->IsConstantOperand()) {
- LConstantOperand* target = LConstantOperand::cast(instr->target());
- Handle<Code> code = Handle<Code>::cast(ToHandle(target));
- __ Jump(code, RelocInfo::CODE_TARGET);
- } else {
- DCHECK(instr->target()->IsRegister());
- Register target = ToRegister(instr->target());
- // Make sure we don't emit any additional entries in the constant pool
- // before the call to ensure that the CallCodeSize() calculated the
- // correct
- // number of instructions for the constant pool load.
- {
- ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
- __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
- }
- __ Jump(target);
- }
- } else {
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
-
- if (instr->target()->IsConstantOperand()) {
- LConstantOperand* target = LConstantOperand::cast(instr->target());
- Handle<Code> code = Handle<Code>::cast(ToHandle(target));
- generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
- PlatformInterfaceDescriptor* call_descriptor =
- instr->descriptor().platform_specific_descriptor();
- if (call_descriptor != NULL) {
- __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
- call_descriptor->storage_mode());
- } else {
- __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al);
- }
- } else {
- DCHECK(instr->target()->IsRegister());
- Register target = ToRegister(instr->target());
- generator.BeforeCall(__ CallSize(target));
- // Make sure we don't emit any additional entries in the constant pool
- // before the call to ensure that the CallCodeSize() calculated the
- // correct
- // number of instructions for the constant pool load.
- {
- ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
- __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
- }
- __ Call(target);
- }
- generator.AfterCall();
- }
-}
-
-
-void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->constructor()).is(r1));
- DCHECK(ToRegister(instr->result()).is(r0));
-
- __ mov(r0, Operand(instr->arity()));
- __ Move(r2, instr->hydrogen()->site());
-
- ElementsKind kind = instr->hydrogen()->elements_kind();
- AllocationSiteOverrideMode override_mode =
- (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
- ? DISABLE_ALLOCATION_SITES
- : DONT_OVERRIDE;
-
- if (instr->arity() == 0) {
- ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- } else if (instr->arity() == 1) {
- Label done;
- if (IsFastPackedElementsKind(kind)) {
- Label packed_case;
- // We might need a change here
- // look at the first argument
- __ ldr(r5, MemOperand(sp, 0));
- __ cmp(r5, Operand::Zero());
- __ b(eq, &packed_case);
-
- ElementsKind holey_kind = GetHoleyElementsKind(kind);
- ArraySingleArgumentConstructorStub stub(isolate(),
- holey_kind,
- override_mode);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ jmp(&done);
- __ bind(&packed_case);
- }
-
- ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ bind(&done);
- } else {
- ArrayNArgumentsConstructorStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
-void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- CallRuntime(instr->function(), instr->arity(), instr);
-}
-
-
-void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
- Register function = ToRegister(instr->function());
- Register code_object = ToRegister(instr->code_object());
- __ add(code_object, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ str(code_object,
- FieldMemOperand(function, JSFunction::kCodeEntryOffset));
-}
-
-
-void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
- Register result = ToRegister(instr->result());
- Register base = ToRegister(instr->base_object());
- if (instr->offset()->IsConstantOperand()) {
- LConstantOperand* offset = LConstantOperand::cast(instr->offset());
- __ add(result, base, Operand(ToInteger32(offset)));
- } else {
- Register offset = ToRegister(instr->offset());
- __ add(result, base, offset);
- }
-}
-
-
-void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
- Representation representation = instr->representation();
-
- Register object = ToRegister(instr->object());
- Register scratch = scratch0();
- HObjectAccess access = instr->hydrogen()->access();
- int offset = access.offset();
-
- if (access.IsExternalMemory()) {
- Register value = ToRegister(instr->value());
- MemOperand operand = MemOperand(object, offset);
- __ Store(value, operand, representation);
- return;
- }
-
- __ AssertNotSmi(object);
-
- DCHECK(!representation.IsSmi() ||
- !instr->value()->IsConstantOperand() ||
- IsSmi(LConstantOperand::cast(instr->value())));
- if (representation.IsDouble()) {
- DCHECK(access.IsInobject());
- DCHECK(!instr->hydrogen()->has_transition());
- DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
- DwVfpRegister value = ToDoubleRegister(instr->value());
- __ vstr(value, FieldMemOperand(object, offset));
- return;
- }
-
- if (instr->hydrogen()->has_transition()) {
- Handle<Map> transition = instr->hydrogen()->transition_map();
- AddDeprecationDependency(transition);
- __ mov(scratch, Operand(transition));
- __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
- Register temp = ToRegister(instr->temp());
- // Update the write barrier for the map field.
- __ RecordWriteForMap(object,
- scratch,
- temp,
- GetLinkRegisterState(),
- kSaveFPRegs);
- }
- }
-
- // Do the store.
- Register value = ToRegister(instr->value());
- if (access.IsInobject()) {
- MemOperand operand = FieldMemOperand(object, offset);
- __ Store(value, operand, representation);
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- // Update the write barrier for the object for in-object properties.
- __ RecordWriteField(object,
- offset,
- value,
- scratch,
- GetLinkRegisterState(),
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- instr->hydrogen()->SmiCheckForWriteBarrier(),
- instr->hydrogen()->PointersToHereCheckForValue());
- }
- } else {
- __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
- MemOperand operand = FieldMemOperand(scratch, offset);
- __ Store(value, operand, representation);
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- // Update the write barrier for the properties array.
- // object is used as a scratch register.
- __ RecordWriteField(scratch,
- offset,
- value,
- object,
- GetLinkRegisterState(),
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- instr->hydrogen()->SmiCheckForWriteBarrier(),
- instr->hydrogen()->PointersToHereCheckForValue());
- }
- }
-}
-
-
-void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
- if (instr->index()->IsConstantOperand()) {
- Operand index = ToOperand(instr->index());
- Register length = ToRegister(instr->length());
- __ cmp(length, index);
- cc = CommuteCondition(cc);
- } else {
- Register index = ToRegister(instr->index());
- Operand length = ToOperand(instr->length());
- __ cmp(index, length);
- }
- if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
- Label done;
- __ b(NegateCondition(cc), &done);
- __ stop("eliminated bounds check failed");
- __ bind(&done);
- } else {
- DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds);
- }
-}
-
-
-void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
- Register external_pointer = ToRegister(instr->elements());
- Register key = no_reg;
- ElementsKind elements_kind = instr->elements_kind();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort(kArrayIndexConstantValueTooBig);
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(elements_kind);
- int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- int base_offset = instr->base_offset();
-
- if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
- Register address = scratch0();
- DwVfpRegister value(ToDoubleRegister(instr->value()));
- if (key_is_constant) {
- if (constant_key != 0) {
- __ add(address, external_pointer,
- Operand(constant_key << element_size_shift));
- } else {
- address = external_pointer;
- }
- } else {
- __ add(address, external_pointer, Operand(key, LSL, shift_size));
- }
- if (elements_kind == FLOAT32_ELEMENTS) {
- __ vcvt_f32_f64(double_scratch0().low(), value);
- __ vstr(double_scratch0().low(), address, base_offset);
- } else { // Storing doubles, not floats.
- __ vstr(value, address, base_offset);
- }
- } else {
- Register value(ToRegister(instr->value()));
- MemOperand mem_operand = PrepareKeyedOperand(
- key, external_pointer, key_is_constant, constant_key,
- element_size_shift, shift_size,
- base_offset);
- switch (elements_kind) {
- case UINT8_ELEMENTS:
- case UINT8_CLAMPED_ELEMENTS:
- case INT8_ELEMENTS:
- __ strb(value, mem_operand);
- break;
- case INT16_ELEMENTS:
- case UINT16_ELEMENTS:
- __ strh(value, mem_operand);
- break;
- case INT32_ELEMENTS:
- case UINT32_ELEMENTS:
- __ str(value, mem_operand);
- break;
- case FLOAT32_ELEMENTS:
- case FLOAT64_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
- case FAST_STRING_WRAPPER_ELEMENTS:
- case SLOW_STRING_WRAPPER_ELEMENTS:
- case NO_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
- DwVfpRegister value = ToDoubleRegister(instr->value());
- Register elements = ToRegister(instr->elements());
- Register scratch = scratch0();
- DwVfpRegister double_scratch = double_scratch0();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int base_offset = instr->base_offset();
-
- // Calculate the effective address of the slot in the array to store the
- // double value.
- int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- if (key_is_constant) {
- int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort(kArrayIndexConstantValueTooBig);
- }
- __ add(scratch, elements,
- Operand((constant_key << element_size_shift) + base_offset));
- } else {
- int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- __ add(scratch, elements, Operand(base_offset));
- __ add(scratch, scratch,
- Operand(ToRegister(instr->key()), LSL, shift_size));
- }
-
- if (instr->NeedsCanonicalization()) {
- // Force a canonical NaN.
- __ VFPCanonicalizeNaN(double_scratch, value);
- __ vstr(double_scratch, scratch, 0);
- } else {
- __ vstr(value, scratch, 0);
- }
-}
-
-
-void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
- Register value = ToRegister(instr->value());
- Register elements = ToRegister(instr->elements());
- Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
- : no_reg;
- Register scratch = scratch0();
- Register store_base = scratch;
- int offset = instr->base_offset();
-
- // Do the store.
- if (instr->key()->IsConstantOperand()) {
- DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset += ToInteger32(const_operand) * kPointerSize;
- store_base = elements;
- } else {
- // Even though the HLoadKeyed instruction forces the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsSmi()) {
- __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
- } else {
- __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
- }
- }
- __ str(value, MemOperand(store_base, offset));
-
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- SmiCheck check_needed =
- instr->hydrogen()->value()->type().IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- // Compute address of modified element and store it into key register.
- __ add(key, store_base, Operand(offset));
- __ RecordWrite(elements,
- key,
- value,
- GetLinkRegisterState(),
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed,
- instr->hydrogen()->PointersToHereCheckForValue());
- }
-}
-
-
-void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
- // By cases: external, fast double
- if (instr->is_fixed_typed_array()) {
- DoStoreKeyedExternalArray(instr);
- } else if (instr->hydrogen()->value()->representation().IsDouble()) {
- DoStoreKeyedFixedDoubleArray(instr);
- } else {
- DoStoreKeyedFixedArray(instr);
- }
-}
-
-
-void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
- class DeferredMaybeGrowElements final : public LDeferredCode {
- public:
- DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
- : LDeferredCode(codegen), instr_(instr) {}
- void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LMaybeGrowElements* instr_;
- };
-
- Register result = r0;
- DeferredMaybeGrowElements* deferred =
- new (zone()) DeferredMaybeGrowElements(this, instr);
- LOperand* key = instr->key();
- LOperand* current_capacity = instr->current_capacity();
-
- DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
- DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
- DCHECK(key->IsConstantOperand() || key->IsRegister());
- DCHECK(current_capacity->IsConstantOperand() ||
- current_capacity->IsRegister());
-
- if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
- int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
- int32_t constant_capacity =
- ToInteger32(LConstantOperand::cast(current_capacity));
- if (constant_key >= constant_capacity) {
- // Deferred case.
- __ jmp(deferred->entry());
- }
- } else if (key->IsConstantOperand()) {
- int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
- __ cmp(ToRegister(current_capacity), Operand(constant_key));
- __ b(le, deferred->entry());
- } else if (current_capacity->IsConstantOperand()) {
- int32_t constant_capacity =
- ToInteger32(LConstantOperand::cast(current_capacity));
- __ cmp(ToRegister(key), Operand(constant_capacity));
- __ b(ge, deferred->entry());
- } else {
- __ cmp(ToRegister(key), ToRegister(current_capacity));
- __ b(ge, deferred->entry());
- }
-
- if (instr->elements()->IsRegister()) {
- __ Move(result, ToRegister(instr->elements()));
- } else {
- __ ldr(result, ToMemOperand(instr->elements()));
- }
-
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- Register result = r0;
- __ mov(result, Operand::Zero());
-
- // We have to call a stub.
- {
- PushSafepointRegistersScope scope(this);
- if (instr->object()->IsRegister()) {
- __ Move(result, ToRegister(instr->object()));
- } else {
- __ ldr(result, ToMemOperand(instr->object()));
- }
-
- LOperand* key = instr->key();
- if (key->IsConstantOperand()) {
- LConstantOperand* constant_key = LConstantOperand::cast(key);
- int32_t int_key = ToInteger32(constant_key);
- if (Smi::IsValid(int_key)) {
- __ mov(r3, Operand(Smi::FromInt(int_key)));
- } else {
- Abort(kArrayIndexConstantValueTooBig);
- }
- } else {
- Label is_smi;
- __ SmiTag(r3, ToRegister(key), SetCC);
- // Deopt if the key is outside Smi range. The stub expects Smi and would
- // bump the elements into dictionary mode (and trigger a deopt) anyways.
- __ b(vc, &is_smi);
- __ PopSafepointRegisters();
- DeoptimizeIf(al, instr, DeoptimizeReason::kOverflow);
- __ bind(&is_smi);
- }
-
- GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
- __ CallStub(&stub);
- RecordSafepointWithLazyDeopt(
- instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- __ StoreToSafepointRegisterSlot(result, result);
- }
-
- // Deopt on smi, which means the elements array changed to dictionary mode.
- __ SmiTst(result);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi);
-}
-
-
-void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
- Register object_reg = ToRegister(instr->object());
- Register scratch = scratch0();
-
- Handle<Map> from_map = instr->original_map();
- Handle<Map> to_map = instr->transitioned_map();
- ElementsKind from_kind = instr->from_kind();
- ElementsKind to_kind = instr->to_kind();
-
- Label not_applicable;
- __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
- __ cmp(scratch, Operand(from_map));
- __ b(ne, &not_applicable);
-
- if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
- Register new_map_reg = ToRegister(instr->new_map_temp());
- __ mov(new_map_reg, Operand(to_map));
- __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
- // Write barrier.
- __ RecordWriteForMap(object_reg,
- new_map_reg,
- scratch,
- GetLinkRegisterState(),
- kDontSaveFPRegs);
- } else {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(object_reg.is(r0));
- PushSafepointRegistersScope scope(this);
- __ Move(r1, to_map);
- TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
- __ CallStub(&stub);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kLazyDeopt);
- }
- __ bind(&not_applicable);
-}
-
-
-void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
- Register object = ToRegister(instr->object());
- Register temp = ToRegister(instr->temp());
- Label no_memento_found;
- __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMementoFound);
- __ bind(&no_memento_found);
-}
-
-
-void LCodeGen::DoStringAdd(LStringAdd* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->left()).is(r1));
- DCHECK(ToRegister(instr->right()).is(r0));
- StringAddStub stub(isolate(),
- instr->hydrogen()->flags(),
- instr->hydrogen()->pretenure_flag());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt final : public LDeferredCode {
- public:
- DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LStringCharCodeAt* instr_;
- };
-
- DeferredStringCharCodeAt* deferred =
- new(zone()) DeferredStringCharCodeAt(this, instr);
-
- StringCharLoadGenerator::Generate(masm(),
- ToRegister(instr->string()),
- ToRegister(instr->index()),
- ToRegister(instr->result()),
- deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ mov(result, Operand::Zero());
-
- PushSafepointRegistersScope scope(this);
- __ push(string);
- // Push the index as a smi. This is safe because of the checks in
- // DoStringCharCodeAt above.
- if (instr->index()->IsConstantOperand()) {
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- __ mov(scratch, Operand(Smi::FromInt(const_index)));
- __ push(scratch);
- } else {
- Register index = ToRegister(instr->index());
- __ SmiTag(index);
- __ push(index);
- }
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
- instr->context());
- __ AssertSmi(r0);
- __ SmiUntag(r0);
- __ StoreToSafepointRegisterSlot(r0, result);
-}
-
-
-void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode final : public LDeferredCode {
- public:
- DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override {
- codegen()->DoDeferredStringCharFromCode(instr_);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LStringCharFromCode* instr_;
- };
-
- DeferredStringCharFromCode* deferred =
- new(zone()) DeferredStringCharFromCode(this, instr);
-
- DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
- DCHECK(!char_code.is(result));
-
- __ cmp(char_code, Operand(String::kMaxOneByteCharCode));
- __ b(hi, deferred->entry());
- __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
- __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2));
- __ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(result, ip);
- __ b(eq, deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ mov(result, Operand::Zero());
-
- PushSafepointRegistersScope scope(this);
- __ SmiTag(char_code);
- __ push(char_code);
- CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
- instr->context());
- __ StoreToSafepointRegisterSlot(r0, result);
-}
-
-
-void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- LOperand* input = instr->value();
- DCHECK(input->IsRegister() || input->IsStackSlot());
- LOperand* output = instr->result();
- DCHECK(output->IsDoubleRegister());
- SwVfpRegister single_scratch = double_scratch0().low();
- if (input->IsStackSlot()) {
- Register scratch = scratch0();
- __ ldr(scratch, ToMemOperand(input));
- __ vmov(single_scratch, scratch);
- } else {
- __ vmov(single_scratch, ToRegister(input));
- }
- __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
-}
-
-
-void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
- LOperand* input = instr->value();
- LOperand* output = instr->result();
-
- SwVfpRegister flt_scratch = double_scratch0().low();
- __ vmov(flt_scratch, ToRegister(input));
- __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch);
-}
-
-
-void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- class DeferredNumberTagI final : public LDeferredCode {
- public:
- DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override {
- codegen()->DoDeferredNumberTagIU(instr_,
- instr_->value(),
- instr_->temp1(),
- instr_->temp2(),
- SIGNED_INT32);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LNumberTagI* instr_;
- };
-
- Register src = ToRegister(instr->value());
- Register dst = ToRegister(instr->result());
-
- DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
- __ SmiTag(dst, src, SetCC);
- __ b(vs, deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU final : public LDeferredCode {
- public:
- DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override {
- codegen()->DoDeferredNumberTagIU(instr_,
- instr_->value(),
- instr_->temp1(),
- instr_->temp2(),
- UNSIGNED_INT32);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LNumberTagU* instr_;
- };
-
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
-
- DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
- __ cmp(input, Operand(Smi::kMaxValue));
- __ b(hi, deferred->entry());
- __ SmiTag(result, input);
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
- LOperand* value,
- LOperand* temp1,
- LOperand* temp2,
- IntegerSignedness signedness) {
- Label done, slow;
- Register src = ToRegister(value);
- Register dst = ToRegister(instr->result());
- Register tmp1 = scratch0();
- Register tmp2 = ToRegister(temp1);
- Register tmp3 = ToRegister(temp2);
- LowDwVfpRegister dbl_scratch = double_scratch0();
-
- if (signedness == SIGNED_INT32) {
- // There was overflow, so bits 30 and 31 of the original integer
- // disagree. Try to allocate a heap number in new space and store
- // the value in there. If that fails, call the runtime system.
- if (dst.is(src)) {
- __ SmiUntag(src, dst);
- __ eor(src, src, Operand(0x80000000));
- }
- __ vmov(dbl_scratch.low(), src);
- __ vcvt_f64_s32(dbl_scratch, dbl_scratch.low());
- } else {
- __ vmov(dbl_scratch.low(), src);
- __ vcvt_f64_u32(dbl_scratch, dbl_scratch.low());
- }
-
- if (FLAG_inline_new) {
- __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
- __ b(&done);
- }
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
- {
- // TODO(3095996): Put a valid pointer value in the stack slot where the
- // result register is stored, as this register is in the pointer map, but
- // contains an integer value.
- __ mov(dst, Operand::Zero());
-
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this);
- // Reset the context register.
- if (!dst.is(cp)) {
- __ mov(cp, Operand::Zero());
- }
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(r0, dst);
- }
-
- // Done. Put the value in dbl_scratch into the value of the allocated heap
- // number.
- __ bind(&done);
- __ vstr(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
-}
-
-
-void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD final : public LDeferredCode {
- public:
- DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LNumberTagD* instr_;
- };
-
- DwVfpRegister input_reg = ToDoubleRegister(instr->value());
- Register scratch = scratch0();
- Register reg = ToRegister(instr->result());
- Register temp1 = ToRegister(instr->temp());
- Register temp2 = ToRegister(instr->temp2());
-
- DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
- if (FLAG_inline_new) {
- __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
- } else {
- __ jmp(deferred->entry());
- }
- __ bind(deferred->exit());
- __ vstr(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
-}
-
-
-void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- Register reg = ToRegister(instr->result());
- __ mov(reg, Operand::Zero());
-
- PushSafepointRegistersScope scope(this);
- // Reset the context register.
- if (!reg.is(cp)) {
- __ mov(cp, Operand::Zero());
- }
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(r0, reg);
-}
-
-
-void LCodeGen::DoSmiTag(LSmiTag* instr) {
- HChange* hchange = instr->hydrogen();
- Register input = ToRegister(instr->value());
- Register output = ToRegister(instr->result());
- if (hchange->CheckFlag(HValue::kCanOverflow) &&
- hchange->value()->CheckFlag(HValue::kUint32)) {
- __ tst(input, Operand(0xc0000000));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
- }
- if (hchange->CheckFlag(HValue::kCanOverflow) &&
- !hchange->value()->CheckFlag(HValue::kUint32)) {
- __ SmiTag(output, input, SetCC);
- DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
- } else {
- __ SmiTag(output, input);
- }
-}
-
-
-void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- if (instr->needs_check()) {
- STATIC_ASSERT(kHeapObjectTag == 1);
- // If the input is a HeapObject, SmiUntag will set the carry flag.
- __ SmiUntag(result, input, SetCC);
- DeoptimizeIf(cs, instr, DeoptimizeReason::kNotASmi);
- } else {
- __ SmiUntag(result, input);
- }
-}
-
-
-void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
- DwVfpRegister result_reg,
- NumberUntagDMode mode) {
- bool can_convert_undefined_to_nan = instr->truncating();
- bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
-
- Register scratch = scratch0();
- SwVfpRegister flt_scratch = double_scratch0().low();
- DCHECK(!result_reg.is(double_scratch0()));
- Label convert, load_smi, done;
- if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
- // Smi check.
- __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
- // Heap number map check.
- __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch, Operand(ip));
- if (can_convert_undefined_to_nan) {
- __ b(ne, &convert);
- } else {
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
- }
- // load heap number
- __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag);
- if (deoptimize_on_minus_zero) {
- __ VmovLow(scratch, result_reg);
- __ cmp(scratch, Operand::Zero());
- __ b(ne, &done);
- __ VmovHigh(scratch, result_reg);
- __ cmp(scratch, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
- }
- __ jmp(&done);
- if (can_convert_undefined_to_nan) {
- __ bind(&convert);
- // Convert undefined (and hole) to NaN.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(input_reg, Operand(ip));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
- __ LoadRoot(scratch, Heap::kNanValueRootIndex);
- __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
- __ jmp(&done);
- }
- } else {
- __ SmiUntag(scratch, input_reg);
- DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
- }
- // Smi to double register conversion
- __ bind(&load_smi);
- // scratch: untagged value of input_reg
- __ vmov(flt_scratch, scratch);
- __ vcvt_f64_s32(result_reg, flt_scratch);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
- Register input_reg = ToRegister(instr->value());
- Register scratch1 = scratch0();
- Register scratch2 = ToRegister(instr->temp());
- LowDwVfpRegister double_scratch = double_scratch0();
- DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp2());
-
- DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
- DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
-
- Label done;
-
- // The input was optimistically untagged; revert it.
- // The carry flag is set when we reach this deferred code as we just executed
- // SmiUntag(heap_object, SetCC)
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ adc(scratch2, input_reg, Operand(input_reg));
-
- // Heap number map check.
- __ ldr(scratch1, FieldMemOperand(scratch2, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch1, Operand(ip));
-
- if (instr->truncating()) {
- Label truncate;
- __ b(eq, &truncate);
- __ CompareInstanceType(scratch1, scratch1, ODDBALL_TYPE);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotANumberOrOddball);
- __ bind(&truncate);
- __ TruncateHeapNumberToI(input_reg, scratch2);
- } else {
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
-
- __ sub(ip, scratch2, Operand(kHeapObjectTag));
- __ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
- __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ cmp(input_reg, Operand::Zero());
- __ b(ne, &done);
- __ VmovHigh(scratch1, double_scratch2);
- __ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero);
- }
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI final : public LDeferredCode {
- public:
- DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LTaggedToI* instr_;
- };
-
- LOperand* input = instr->value();
- DCHECK(input->IsRegister());
- DCHECK(input->Equals(instr->result()));
-
- Register input_reg = ToRegister(input);
-
- if (instr->hydrogen()->value()->representation().IsSmi()) {
- __ SmiUntag(input_reg);
- } else {
- DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
-
- // Optimistically untag the input.
- // If the input is a HeapObject, SmiUntag will set the carry flag.
- __ SmiUntag(input_reg, SetCC);
- // Branch to deferred code if the input was tagged.
- // The deferred code will take care of restoring the tag.
- __ b(cs, deferred->entry());
- __ bind(deferred->exit());
- }
-}
-
-
-void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
- LOperand* input = instr->value();
- DCHECK(input->IsRegister());
- LOperand* result = instr->result();
- DCHECK(result->IsDoubleRegister());
-
- Register input_reg = ToRegister(input);
- DwVfpRegister result_reg = ToDoubleRegister(result);
-
- HValue* value = instr->hydrogen()->value();
- NumberUntagDMode mode = value->representation().IsSmi()
- ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
-
- EmitNumberUntagD(instr, input_reg, result_reg, mode);
-}
-
-
-void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
- Register result_reg = ToRegister(instr->result());
- Register scratch1 = scratch0();
- DwVfpRegister double_input = ToDoubleRegister(instr->value());
- LowDwVfpRegister double_scratch = double_scratch0();
-
- if (instr->truncating()) {
- __ TruncateDoubleToI(result_reg, double_input);
- } else {
- __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
- // Deoptimize if the input wasn't a int32 (inside a double).
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label done;
- __ cmp(result_reg, Operand::Zero());
- __ b(ne, &done);
- __ VmovHigh(scratch1, double_input);
- __ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero);
- __ bind(&done);
- }
- }
-}
-
-
-void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
- Register result_reg = ToRegister(instr->result());
- Register scratch1 = scratch0();
- DwVfpRegister double_input = ToDoubleRegister(instr->value());
- LowDwVfpRegister double_scratch = double_scratch0();
-
- if (instr->truncating()) {
- __ TruncateDoubleToI(result_reg, double_input);
- } else {
- __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
- // Deoptimize if the input wasn't a int32 (inside a double).
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label done;
- __ cmp(result_reg, Operand::Zero());
- __ b(ne, &done);
- __ VmovHigh(scratch1, double_input);
- __ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero);
- __ bind(&done);
- }
- }
- __ SmiTag(result_reg, SetCC);
- DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
-}
-
-
-void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
- LOperand* input = instr->value();
- __ SmiTst(ToRegister(input));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi);
-}
-
-
-void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- LOperand* input = instr->value();
- __ SmiTst(ToRegister(input));
- DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi);
- }
-}
-
-
-void LCodeGen::DoCheckArrayBufferNotNeutered(
- LCheckArrayBufferNotNeutered* instr) {
- Register view = ToRegister(instr->view());
- Register scratch = scratch0();
-
- __ ldr(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
- __ ldr(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
- __ tst(scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds);
-}
-
-
-void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
- Register input = ToRegister(instr->value());
- Register scratch = scratch0();
-
- __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
-
- if (instr->hydrogen()->is_interval_check()) {
- InstanceType first;
- InstanceType last;
- instr->hydrogen()->GetCheckInterval(&first, &last);
-
- __ cmp(scratch, Operand(first));
-
- // If there is only one type in the interval check for equality.
- if (first == last) {
- DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
- } else {
- DeoptimizeIf(lo, instr, DeoptimizeReason::kWrongInstanceType);
- // Omit check for the last type.
- if (last != LAST_TYPE) {
- __ cmp(scratch, Operand(last));
- DeoptimizeIf(hi, instr, DeoptimizeReason::kWrongInstanceType);
- }
- }
- } else {
- uint8_t mask;
- uint8_t tag;
- instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
-
- if (base::bits::IsPowerOfTwo32(mask)) {
- DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
- __ tst(scratch, Operand(mask));
- DeoptimizeIf(tag == 0 ? ne : eq, instr,
- DeoptimizeReason::kWrongInstanceType);
- } else {
- __ and_(scratch, scratch, Operand(mask));
- __ cmp(scratch, Operand(tag));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
- }
- }
-}
-
-
-void LCodeGen::DoCheckValue(LCheckValue* instr) {
- Register reg = ToRegister(instr->value());
- Handle<HeapObject> object = instr->hydrogen()->object().handle();
- AllowDeferredHandleDereference smi_check;
- if (isolate()->heap()->InNewSpace(*object)) {
- Register reg = ToRegister(instr->value());
- Handle<Cell> cell = isolate()->factory()->NewCell(object);
- __ mov(ip, Operand(cell));
- __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset));
- __ cmp(reg, ip);
- } else {
- __ cmp(reg, Operand(object));
- }
- DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch);
-}
-
-
-void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
- Label deopt, done;
- // If the map is not deprecated the migration attempt does not make sense.
- __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
- __ ldr(scratch0(), FieldMemOperand(scratch0(), Map::kBitField3Offset));
- __ tst(scratch0(), Operand(Map::Deprecated::kMask));
- __ b(eq, &deopt);
-
- {
- PushSafepointRegistersScope scope(this);
- __ push(object);
- __ mov(cp, Operand::Zero());
- __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(r0, scratch0());
- }
- __ tst(scratch0(), Operand(kSmiTagMask));
- __ b(ne, &done);
-
- __ bind(&deopt);
- DeoptimizeIf(al, instr, DeoptimizeReason::kInstanceMigrationFailed);
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- class DeferredCheckMaps final : public LDeferredCode {
- public:
- DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
- : LDeferredCode(codegen), instr_(instr), object_(object) {
- SetExit(check_maps());
- }
- void Generate() override {
- codegen()->DoDeferredInstanceMigration(instr_, object_);
- }
- Label* check_maps() { return &check_maps_; }
- LInstruction* instr() override { return instr_; }
-
- private:
- LCheckMaps* instr_;
- Label check_maps_;
- Register object_;
- };
-
- if (instr->hydrogen()->IsStabilityCheck()) {
- const UniqueSet<Map>* maps = instr->hydrogen()->maps();
- for (int i = 0; i < maps->size(); ++i) {
- AddStabilityDependency(maps->at(i).handle());
- }
- return;
- }
-
- Register map_reg = scratch0();
-
- LOperand* input = instr->value();
- DCHECK(input->IsRegister());
- Register reg = ToRegister(input);
-
- __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
-
- DeferredCheckMaps* deferred = NULL;
- if (instr->hydrogen()->HasMigrationTarget()) {
- deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
- __ bind(deferred->check_maps());
- }
-
- const UniqueSet<Map>* maps = instr->hydrogen()->maps();
- Label success;
- for (int i = 0; i < maps->size() - 1; i++) {
- Handle<Map> map = maps->at(i).handle();
- __ CompareMap(map_reg, map, &success);
- __ b(eq, &success);
- }
-
- Handle<Map> map = maps->at(maps->size() - 1).handle();
- __ CompareMap(map_reg, map, &success);
- if (instr->hydrogen()->HasMigrationTarget()) {
- __ b(ne, deferred->entry());
- } else {
- DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
- }
-
- __ bind(&success);
-}
-
-
-void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
- DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
- Register result_reg = ToRegister(instr->result());
- __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
-}
-
-
-void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
- Register unclamped_reg = ToRegister(instr->unclamped());
- Register result_reg = ToRegister(instr->result());
- __ ClampUint8(result_reg, unclamped_reg);
-}
-
-
-void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
- Register scratch = scratch0();
- Register input_reg = ToRegister(instr->unclamped());
- Register result_reg = ToRegister(instr->result());
- DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
- Label is_smi, done, heap_number;
-
- // Both smi and heap number cases are handled.
- __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
-
- // Check for heap number
- __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
- __ cmp(scratch, Operand(factory()->heap_number_map()));
- __ b(eq, &heap_number);
-
- // Check for undefined. Undefined is converted to zero for clamping
- // conversions.
- __ cmp(input_reg, Operand(factory()->undefined_value()));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
- __ mov(result_reg, Operand::Zero());
- __ jmp(&done);
-
- // Heap number
- __ bind(&heap_number);
- __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
- __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
- __ jmp(&done);
-
- // smi
- __ bind(&is_smi);
- __ ClampUint8(result_reg, result_reg);
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoAllocate(LAllocate* instr) {
- class DeferredAllocate final : public LDeferredCode {
- public:
- DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override { codegen()->DoDeferredAllocate(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LAllocate* instr_;
- };
-
- DeferredAllocate* deferred =
- new(zone()) DeferredAllocate(this, instr);
-
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp1());
- Register scratch2 = ToRegister(instr->temp2());
-
- // Allocate memory for the object.
- AllocationFlags flags = NO_ALLOCATION_FLAGS;
- if (instr->hydrogen()->MustAllocateDoubleAligned()) {
- flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
- }
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- flags = static_cast<AllocationFlags>(flags | PRETENURE);
- }
-
- if (instr->hydrogen()->IsAllocationFoldingDominator()) {
- flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
- }
- DCHECK(!instr->hydrogen()->IsAllocationFolded());
-
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= kMaxRegularHeapObjectSize);
- __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
- } else {
- Register size = ToRegister(instr->size());
- __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
- }
-
- __ bind(deferred->exit());
-
- if (instr->hydrogen()->MustPrefillWithFiller()) {
- STATIC_ASSERT(kHeapObjectTag == 1);
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ mov(scratch, Operand(size - kHeapObjectTag));
- } else {
- __ sub(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
- }
- __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
- Label loop;
- __ bind(&loop);
- __ sub(scratch, scratch, Operand(kPointerSize), SetCC);
- __ str(scratch2, MemOperand(result, scratch));
- __ b(ge, &loop);
- }
-}
-
-
-void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ mov(result, Operand(Smi::kZero));
-
- PushSafepointRegistersScope scope(this);
- if (instr->size()->IsRegister()) {
- Register size = ToRegister(instr->size());
- DCHECK(!size.is(result));
- __ SmiTag(size);
- __ push(size);
- } else {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- if (size >= 0 && size <= Smi::kMaxValue) {
- __ Push(Smi::FromInt(size));
- } else {
- // We should never get here at runtime => abort
- __ stop("invalid allocation size");
- return;
- }
- }
-
- int flags = AllocateDoubleAlignFlag::encode(
- instr->hydrogen()->MustAllocateDoubleAligned());
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- flags = AllocateTargetSpace::update(flags, OLD_SPACE);
- } else {
- flags = AllocateTargetSpace::update(flags, NEW_SPACE);
- }
- __ Push(Smi::FromInt(flags));
-
- CallRuntimeFromDeferred(
- Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
- __ StoreToSafepointRegisterSlot(r0, result);
-
- if (instr->hydrogen()->IsAllocationFoldingDominator()) {
- AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
- }
- // If the allocation folding dominator allocate triggered a GC, allocation
- // happend in the runtime. We have to reset the top pointer to virtually
- // undo the allocation.
- ExternalReference allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
- Register top_address = scratch0();
- __ sub(r0, r0, Operand(kHeapObjectTag));
- __ mov(top_address, Operand(allocation_top));
- __ str(r0, MemOperand(top_address));
- __ add(r0, r0, Operand(kHeapObjectTag));
- }
-}
-
-void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
- DCHECK(instr->hydrogen()->IsAllocationFolded());
- DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
- Register result = ToRegister(instr->result());
- Register scratch1 = ToRegister(instr->temp1());
- Register scratch2 = ToRegister(instr->temp2());
-
- AllocationFlags flags = ALLOCATION_FOLDED;
- if (instr->hydrogen()->MustAllocateDoubleAligned()) {
- flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
- }
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- flags = static_cast<AllocationFlags>(flags | PRETENURE);
- }
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= kMaxRegularHeapObjectSize);
- __ FastAllocate(size, result, scratch1, scratch2, flags);
- } else {
- Register size = ToRegister(instr->size());
- __ FastAllocate(size, result, scratch1, scratch2, flags);
- }
-}
-
-
-void LCodeGen::DoTypeof(LTypeof* instr) {
- DCHECK(ToRegister(instr->value()).is(r3));
- DCHECK(ToRegister(instr->result()).is(r0));
- Label end, do_call;
- Register value_register = ToRegister(instr->value());
- __ JumpIfNotSmi(value_register, &do_call);
- __ mov(r0, Operand(isolate()->factory()->number_string()));
- __ jmp(&end);
- __ bind(&do_call);
- Callable callable = CodeFactory::Typeof(isolate());
- CallCode(callable.code(), RelocInfo::CODE_TARGET, instr);
- __ bind(&end);
-}
-
-
-void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
- Register input = ToRegister(instr->value());
-
- Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
- instr->FalseLabel(chunk_),
- input,
- instr->type_literal());
- if (final_branch_condition != kNoCondition) {
- EmitBranch(instr, final_branch_condition);
- }
-}
-
-
-Condition LCodeGen::EmitTypeofIs(Label* true_label,
- Label* false_label,
- Register input,
- Handle<String> type_name) {
- Condition final_branch_condition = kNoCondition;
- Register scratch = scratch0();
- Factory* factory = isolate()->factory();
- if (String::Equals(type_name, factory->number_string())) {
- __ JumpIfSmi(input, true_label);
- __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
- __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
- final_branch_condition = eq;
-
- } else if (String::Equals(type_name, factory->string_string())) {
- __ JumpIfSmi(input, false_label);
- __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
- final_branch_condition = lt;
-
- } else if (String::Equals(type_name, factory->symbol_string())) {
- __ JumpIfSmi(input, false_label);
- __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
- final_branch_condition = eq;
-
- } else if (String::Equals(type_name, factory->boolean_string())) {
- __ CompareRoot(input, Heap::kTrueValueRootIndex);
- __ b(eq, true_label);
- __ CompareRoot(input, Heap::kFalseValueRootIndex);
- final_branch_condition = eq;
-
- } else if (String::Equals(type_name, factory->undefined_string())) {
- __ CompareRoot(input, Heap::kNullValueRootIndex);
- __ b(eq, false_label);
- __ JumpIfSmi(input, false_label);
- // Check for undetectable objects => true.
- __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ tst(scratch, Operand(1 << Map::kIsUndetectable));
- final_branch_condition = ne;
-
- } else if (String::Equals(type_name, factory->function_string())) {
- __ JumpIfSmi(input, false_label);
- __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ and_(scratch, scratch,
- Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
- __ cmp(scratch, Operand(1 << Map::kIsCallable));
- final_branch_condition = eq;
-
- } else if (String::Equals(type_name, factory->object_string())) {
- __ JumpIfSmi(input, false_label);
- __ CompareRoot(input, Heap::kNullValueRootIndex);
- __ b(eq, true_label);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CompareObjectType(input, scratch, ip, FIRST_JS_RECEIVER_TYPE);
- __ b(lt, false_label);
- // Check for callable or undetectable objects => false.
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ tst(scratch,
- Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
- final_branch_condition = eq;
-
- } else {
- __ b(false_label);
- }
-
- return final_branch_condition;
-}
-
-
-void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
- if (info()->ShouldEnsureSpaceForLazyDeopt()) {
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- if (current_pc < last_lazy_deopt_pc_ + space_needed) {
- // Block literal pool emission for duration of padding.
- Assembler::BlockConstPoolScope block_const_pool(masm());
- int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
- while (padding_size > 0) {
- __ nop();
- padding_size -= Assembler::kInstrSize;
- }
- }
- }
- last_lazy_deopt_pc_ = masm()->pc_offset();
-}
-
-
-void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- last_lazy_deopt_pc_ = masm()->pc_offset();
- DCHECK(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- Deoptimizer::BailoutType type = instr->hydrogen()->type();
- // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
- // needed return address), even though the implementation of LAZY and EAGER is
- // now identical. When LAZY is eventually completely folded into EAGER, remove
- // the special case below.
- if (info()->IsStub() && type == Deoptimizer::EAGER) {
- type = Deoptimizer::LAZY;
- }
-
- DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
-}
-
-
-void LCodeGen::DoDummy(LDummy* instr) {
- // Nothing to see here, move on!
-}
-
-
-void LCodeGen::DoDummyUse(LDummyUse* instr) {
- // Nothing to see here, move on!
-}
-
-
-void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
- PushSafepointRegistersScope scope(this);
- LoadContextFromDeferred(instr->context());
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
- RecordSafepointWithLazyDeopt(
- instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- DCHECK(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck final : public LDeferredCode {
- public:
- DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LStackCheck* instr_;
- };
-
- DCHECK(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- // There is no LLazyBailout instruction for stack-checks. We have to
- // prepare for lazy deoptimization explicitly here.
- if (instr->hydrogen()->is_function_entry()) {
- // Perform stack overflow check.
- Label done;
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, &done);
- Handle<Code> stack_check = isolate()->builtins()->StackCheck();
- masm()->MaybeCheckConstPool();
- PredictableCodeSizeScope predictable(masm());
- predictable.ExpectSize(CallCodeSize(stack_check, RelocInfo::CODE_TARGET));
- DCHECK(instr->context()->IsRegister());
- DCHECK(ToRegister(instr->context()).is(cp));
- CallCode(stack_check, RelocInfo::CODE_TARGET, instr);
- __ bind(&done);
- } else {
- DCHECK(instr->hydrogen()->is_backwards_branch());
- // Perform stack overflow check if this goto needs it before jumping.
- DeferredStackCheck* deferred_stack_check =
- new(zone()) DeferredStackCheck(this, instr);
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(lo, deferred_stack_check->entry());
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- __ bind(instr->done_label());
- deferred_stack_check->SetExit(instr->done_label());
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- // Don't record a deoptimization index for the safepoint here.
- // This will be done explicitly when emitting call and the safepoint in
- // the deferred code.
- }
-}
-
-
-void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
- // This is a pseudo-instruction that ensures that the environment here is
- // properly registered for deoptimization and records the assembler's PC
- // offset.
- LEnvironment* environment = instr->environment();
-
- // If the environment were already registered, we would have no way of
- // backpatching it with the spill slot operands.
- DCHECK(!environment->HasBeenRegistered());
- RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
-
- GenerateOsrPrologue();
-}
-
-
-void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
- Label use_cache, call_runtime;
- __ CheckEnumCache(&call_runtime);
-
- __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ b(&use_cache);
-
- // Get the set of properties to enumerate.
- __ bind(&call_runtime);
- __ push(r0);
- CallRuntime(Runtime::kForInEnumerate, instr);
- __ bind(&use_cache);
-}
-
-
-void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
- Register map = ToRegister(instr->map());
- Register result = ToRegister(instr->result());
- Label load_cache, done;
- __ EnumLength(result, map);
- __ cmp(result, Operand(Smi::kZero));
- __ b(ne, &load_cache);
- __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
- __ jmp(&done);
-
- __ bind(&load_cache);
- __ LoadInstanceDescriptors(map, result);
- __ ldr(result,
- FieldMemOperand(result, DescriptorArray::kEnumCacheBridgeOffset));
- __ ldr(result,
- FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
- __ cmp(result, Operand::Zero());
- DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache);
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
- Register object = ToRegister(instr->value());
- Register map = ToRegister(instr->map());
- __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
- __ cmp(map, scratch0());
- DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
-}
-
-
-void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
- Register result,
- Register object,
- Register index) {
- PushSafepointRegistersScope scope(this);
- __ Push(object);
- __ Push(index);
- __ mov(cp, Operand::Zero());
- __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(r0, result);
-}
-
-
-void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
- class DeferredLoadMutableDouble final : public LDeferredCode {
- public:
- DeferredLoadMutableDouble(LCodeGen* codegen,
- LLoadFieldByIndex* instr,
- Register result,
- Register object,
- Register index)
- : LDeferredCode(codegen),
- instr_(instr),
- result_(result),
- object_(object),
- index_(index) {
- }
- void Generate() override {
- codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LLoadFieldByIndex* instr_;
- Register result_;
- Register object_;
- Register index_;
- };
-
- Register object = ToRegister(instr->object());
- Register index = ToRegister(instr->index());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- DeferredLoadMutableDouble* deferred;
- deferred = new(zone()) DeferredLoadMutableDouble(
- this, instr, result, object, index);
-
- Label out_of_object, done;
-
- __ tst(index, Operand(Smi::FromInt(1)));
- __ b(ne, deferred->entry());
- __ mov(index, Operand(index, ASR, 1));
-
- __ cmp(index, Operand::Zero());
- __ b(lt, &out_of_object);
-
- __ add(scratch, object, Operand::PointerOffsetFromSmiKey(index));
- __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
-
- __ b(&done);
-
- __ bind(&out_of_object);
- __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- // Index is equal to negated out of object property index plus 1.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index));
- __ ldr(result, FieldMemOperand(scratch,
- FixedArray::kHeaderSize - kPointerSize));
- __ bind(deferred->exit());
- __ bind(&done);
-}
-
-#undef __
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.h b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.h
deleted file mode 100644
index 77094e55af..0000000000
--- a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.h
+++ /dev/null
@@ -1,386 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_ARM_LITHIUM_CODEGEN_ARM_H_
-#define V8_CRANKSHAFT_ARM_LITHIUM_CODEGEN_ARM_H_
-
-#include "src/ast/scopes.h"
-#include "src/crankshaft/arm/lithium-arm.h"
-#include "src/crankshaft/arm/lithium-gap-resolver-arm.h"
-#include "src/crankshaft/lithium-codegen.h"
-#include "src/deoptimizer.h"
-#include "src/safepoint-table.h"
-#include "src/utils.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LDeferredCode;
-class SafepointGenerator;
-
-class LCodeGen: public LCodeGenBase {
- public:
- LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : LCodeGenBase(chunk, assembler, info),
- jump_table_(4, info->zone()),
- scope_(info->scope()),
- deferred_(8, info->zone()),
- frame_is_built_(false),
- safepoints_(info->zone()),
- resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple) {
- PopulateDeoptimizationLiteralsWithInlinedFunctions();
- }
-
-
- int LookupDestination(int block_id) const {
- return chunk()->LookupDestination(block_id);
- }
-
- bool IsNextEmittedBlock(int block_id) const {
- return LookupDestination(block_id) == GetNextEmittedBlock();
- }
-
- bool NeedsEagerFrame() const {
- return HasAllocatedStackSlots() || info()->is_non_deferred_calling() ||
- !info()->IsStub() || info()->requires_frame();
- }
- bool NeedsDeferredFrame() const {
- return !NeedsEagerFrame() && info()->is_deferred_calling();
- }
-
- LinkRegisterStatus GetLinkRegisterState() const {
- return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved;
- }
-
- // Support for converting LOperands to assembler types.
- // LOperand must be a register.
- Register ToRegister(LOperand* op) const;
-
- // LOperand is loaded into scratch, unless already a register.
- Register EmitLoadRegister(LOperand* op, Register scratch);
-
- // LOperand must be a double register.
- DwVfpRegister ToDoubleRegister(LOperand* op) const;
-
- // LOperand is loaded into dbl_scratch, unless already a double register.
- DwVfpRegister EmitLoadDoubleRegister(LOperand* op,
- SwVfpRegister flt_scratch,
- DwVfpRegister dbl_scratch);
- int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
- int32_t ToInteger32(LConstantOperand* op) const;
- Smi* ToSmi(LConstantOperand* op) const;
- double ToDouble(LConstantOperand* op) const;
- Operand ToOperand(LOperand* op);
- MemOperand ToMemOperand(LOperand* op) const;
- // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
- MemOperand ToHighMemOperand(LOperand* op) const;
-
- bool IsInteger32(LConstantOperand* op) const;
- bool IsSmi(LConstantOperand* op) const;
- Handle<Object> ToHandle(LConstantOperand* op) const;
-
- // Try to generate code for the entire chunk, but it may fail if the
- // chunk contains constructs we cannot handle. Returns true if the
- // code generation attempt succeeded.
- bool GenerateCode();
-
- // Finish the code by setting stack height, safepoint, and bailout
- // information on it.
- void FinishCode(Handle<Code> code);
-
- // Deferred code support.
- void DoDeferredNumberTagD(LNumberTagD* instr);
-
- enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
- void DoDeferredNumberTagIU(LInstruction* instr,
- LOperand* value,
- LOperand* temp1,
- LOperand* temp2,
- IntegerSignedness signedness);
-
- void DoDeferredTaggedToI(LTaggedToI* instr);
- void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
- void DoDeferredStackCheck(LStackCheck* instr);
- void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
- void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
- void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredAllocate(LAllocate* instr);
- void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
- void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
- Register result,
- Register object,
- Register index);
-
- // Parallel move support.
- void DoParallelMove(LParallelMove* move);
- void DoGap(LGap* instr);
-
- MemOperand PrepareKeyedOperand(Register key,
- Register base,
- bool key_is_constant,
- int constant_key,
- int element_size,
- int shift_size,
- int base_offset);
-
- // Emit frame translation commands for an environment.
- void WriteTranslation(LEnvironment* environment, Translation* translation);
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) void Do##type(L##type* node);
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- private:
- Scope* scope() const { return scope_; }
-
- Register scratch0() { return r9; }
- LowDwVfpRegister double_scratch0() { return kScratchDoubleReg; }
-
- LInstruction* GetNextInstruction();
-
- void EmitClassOfTest(Label* if_true, Label* if_false,
- Handle<String> class_name, Register input,
- Register temporary, Register temporary2);
-
- bool HasAllocatedStackSlots() const {
- return chunk()->HasAllocatedStackSlots();
- }
- int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); }
- int GetTotalFrameSlotCount() const {
- return chunk()->GetTotalFrameSlotCount();
- }
-
- void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
-
- void SaveCallerDoubles();
- void RestoreCallerDoubles();
-
- // Code generation passes. Returns true if code generation should
- // continue.
- void GenerateBodyInstructionPre(LInstruction* instr) override;
- bool GeneratePrologue();
- bool GenerateDeferredCode();
- bool GenerateJumpTable();
- bool GenerateSafepointTable();
-
- // Generates the custom OSR entrypoint and sets the osr_pc_offset.
- void GenerateOsrPrologue();
-
- enum SafepointMode {
- RECORD_SIMPLE_SAFEPOINT,
- RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
- };
-
- int CallCodeSize(Handle<Code> code, RelocInfo::Mode mode);
-
- void CallCode(
- Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- TargetAddressStorageMode storage_mode = CAN_INLINE_TARGET_ADDRESS);
-
- void CallCodeGeneric(
- Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode,
- TargetAddressStorageMode storage_mode = CAN_INLINE_TARGET_ADDRESS);
-
- void CallRuntime(const Runtime::Function* function,
- int num_arguments,
- LInstruction* instr,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
-
- void CallRuntime(Runtime::FunctionId id,
- int num_arguments,
- LInstruction* instr) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, num_arguments, instr);
- }
-
- void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, function->nargs, instr);
- }
-
- void LoadContextFromDeferred(LOperand* context);
- void CallRuntimeFromDeferred(Runtime::FunctionId id,
- int argc,
- LInstruction* instr,
- LOperand* context);
-
- void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
- Register scratch2, Register scratch3);
-
- // Generate a direct call to a known function. Expects the function
- // to be in r1.
- void CallKnownFunction(Handle<JSFunction> function,
- int formal_parameter_count, int arity,
- bool is_tail_call, LInstruction* instr);
-
- void RecordSafepointWithLazyDeopt(LInstruction* instr,
- SafepointMode safepoint_mode);
-
- void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
- Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition condition, LInstruction* instr,
- DeoptimizeReason deopt_reason,
- Deoptimizer::BailoutType bailout_type);
- void DeoptimizeIf(Condition condition, LInstruction* instr,
- DeoptimizeReason deopt_reason);
-
- void AddToTranslation(LEnvironment* environment,
- Translation* translation,
- LOperand* op,
- bool is_tagged,
- bool is_uint32,
- int* object_index_pointer,
- int* dematerialized_index_pointer);
-
- Register ToRegister(int index) const;
- DwVfpRegister ToDoubleRegister(int index) const;
-
- MemOperand BuildSeqStringOperand(Register string,
- LOperand* index,
- String::Encoding encoding);
-
- void EmitIntegerMathAbs(LMathAbs* instr);
-
- // Support for recording safepoint information.
- void RecordSafepoint(LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- Safepoint::DeoptMode mode);
- void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
- void RecordSafepoint(Safepoint::DeoptMode mode);
- void RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode mode);
-
- static Condition TokenToCondition(Token::Value op, bool is_unsigned);
- void EmitGoto(int block);
-
- // EmitBranch expects to be the last instruction of a block.
- template<class InstrType>
- void EmitBranch(InstrType instr, Condition condition);
- template <class InstrType>
- void EmitTrueBranch(InstrType instr, Condition condition);
- template <class InstrType>
- void EmitFalseBranch(InstrType instr, Condition condition);
- void EmitNumberUntagD(LNumberUntagD* instr, Register input,
- DwVfpRegister result, NumberUntagDMode mode);
-
- // Emits optimized code for typeof x == "y". Modifies input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitTypeofIs(Label* true_label,
- Label* false_label,
- Register input,
- Handle<String> type_name);
-
- // Emits optimized code for %_IsString(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsString(Register input,
- Register temp1,
- Label* is_not_string,
- SmiCheck check_needed);
-
- // Emits optimized code to deep-copy the contents of statically known
- // object graphs (e.g. object literal boilerplate).
- void EmitDeepCopy(Handle<JSObject> object,
- Register result,
- Register source,
- int* offset,
- AllocationSiteMode mode);
-
- void EnsureSpaceForLazyDeopt(int space_needed) override;
- void DoLoadKeyedExternalArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedArray(LLoadKeyed* instr);
- void DoStoreKeyedExternalArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedArray(LStoreKeyed* instr);
-
- template <class T>
- void EmitVectorLoadICRegisters(T* instr);
-
- ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
- Scope* const scope_;
- ZoneList<LDeferredCode*> deferred_;
- bool frame_is_built_;
-
- // Builder that keeps track of safepoints in the code. The table
- // itself is emitted at the end of the generated code.
- SafepointTableBuilder safepoints_;
-
- // Compiler from a set of parallel moves to a sequential list of moves.
- LGapResolver resolver_;
-
- Safepoint::Kind expected_safepoint_kind_;
-
- class PushSafepointRegistersScope final BASE_EMBEDDED {
- public:
- explicit PushSafepointRegistersScope(LCodeGen* codegen)
- : codegen_(codegen) {
- DCHECK(codegen_->info()->is_calling());
- DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
- codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
- codegen_->masm_->PushSafepointRegisters();
- }
-
- ~PushSafepointRegistersScope() {
- DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
- codegen_->masm_->PopSafepointRegisters();
- codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
- }
-
- private:
- LCodeGen* codegen_;
- };
-
- friend class LDeferredCode;
- friend class LEnvironment;
- friend class SafepointGenerator;
- DISALLOW_COPY_AND_ASSIGN(LCodeGen);
-};
-
-
-class LDeferredCode : public ZoneObject {
- public:
- explicit LDeferredCode(LCodeGen* codegen)
- : codegen_(codegen),
- external_exit_(NULL),
- instruction_index_(codegen->current_instruction_) {
- codegen->AddDeferredCode(this);
- }
-
- virtual ~LDeferredCode() {}
- virtual void Generate() = 0;
- virtual LInstruction* instr() = 0;
-
- void SetExit(Label* exit) { external_exit_ = exit; }
- Label* entry() { return &entry_; }
- Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
- int instruction_index() const { return instruction_index_; }
-
- protected:
- LCodeGen* codegen() const { return codegen_; }
- MacroAssembler* masm() const { return codegen_->masm(); }
-
- private:
- LCodeGen* codegen_;
- Label entry_;
- Label exit_;
- Label* external_exit_;
- int instruction_index_;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_ARM_LITHIUM_CODEGEN_ARM_H_
diff --git a/deps/v8/src/crankshaft/arm/lithium-gap-resolver-arm.cc b/deps/v8/src/crankshaft/arm/lithium-gap-resolver-arm.cc
deleted file mode 100644
index daf439f53c..0000000000
--- a/deps/v8/src/crankshaft/arm/lithium-gap-resolver-arm.cc
+++ /dev/null
@@ -1,303 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/arm/lithium-gap-resolver-arm.h"
-#include "src/assembler-inl.h"
-#include "src/crankshaft/arm/lithium-codegen-arm.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// We use the root register to spill a value while breaking a cycle in parallel
-// moves. We don't need access to roots while resolving the move list and using
-// the root register has two advantages:
-// - It is not in crankshaft allocatable registers list, so it can't interfere
-// with any of the moves we are resolving.
-// - We don't need to push it on the stack, as we can reload it with its value
-// once we have resolved a cycle.
-#define kSavedValueRegister kRootRegister
-
-
-LGapResolver::LGapResolver(LCodeGen* owner)
- : cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false),
- saved_destination_(NULL), need_to_restore_root_(false) { }
-
-
-#define __ ACCESS_MASM(cgen_->masm())
-
-
-void LGapResolver::Resolve(LParallelMove* parallel_move) {
- DCHECK(moves_.is_empty());
- // Build up a worklist of moves.
- BuildInitialMoveList(parallel_move);
-
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands move = moves_[i];
- // Skip constants to perform them last. They don't block other moves
- // and skipping such moves with register destinations keeps those
- // registers free for the whole algorithm.
- if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
- root_index_ = i; // Any cycle is found when by reaching this move again.
- PerformMove(i);
- if (in_cycle_) {
- RestoreValue();
- }
- }
- }
-
- // Perform the moves with constant sources.
- for (int i = 0; i < moves_.length(); ++i) {
- if (!moves_[i].IsEliminated()) {
- DCHECK(moves_[i].source()->IsConstantOperand());
- EmitMove(i);
- }
- }
-
- if (need_to_restore_root_) {
- DCHECK(kSavedValueRegister.is(kRootRegister));
- __ InitializeRootRegister();
- need_to_restore_root_ = false;
- }
-
- moves_.Rewind(0);
-}
-
-
-void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
- // Perform a linear sweep of the moves to add them to the initial list of
- // moves to perform, ignoring any move that is redundant (the source is
- // the same as the destination, the destination is ignored and
- // unallocated, or the move was already eliminated).
- const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
- for (int i = 0; i < moves->length(); ++i) {
- LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
- }
- Verify();
-}
-
-
-void LGapResolver::PerformMove(int index) {
- // Each call to this function performs a move and deletes it from the move
- // graph. We first recursively perform any move blocking this one. We
- // mark a move as "pending" on entry to PerformMove in order to detect
- // cycles in the move graph.
-
- // We can only find a cycle, when doing a depth-first traversal of moves,
- // be encountering the starting move again. So by spilling the source of
- // the starting move, we break the cycle. All moves are then unblocked,
- // and the starting move is completed by writing the spilled value to
- // its destination. All other moves from the spilled source have been
- // completed prior to breaking the cycle.
- // An additional complication is that moves to MemOperands with large
- // offsets (more than 1K or 4K) require us to spill this spilled value to
- // the stack, to free up the register.
- DCHECK(!moves_[index].IsPending());
- DCHECK(!moves_[index].IsRedundant());
-
- // Clear this move's destination to indicate a pending move. The actual
- // destination is saved in a stack allocated local. Multiple moves can
- // be pending because this function is recursive.
- DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated.
- LOperand* destination = moves_[index].destination();
- moves_[index].set_destination(NULL);
-
- // Perform a depth-first traversal of the move graph to resolve
- // dependencies. Any unperformed, unpending move with a source the same
- // as this one's destination blocks this one so recursively perform all
- // such moves.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(destination) && !other_move.IsPending()) {
- PerformMove(i);
- // If there is a blocking, pending move it must be moves_[root_index_]
- // and all other moves with the same source as moves_[root_index_] are
- // sucessfully executed (because they are cycle-free) by this loop.
- }
- }
-
- // We are about to resolve this move and don't need it marked as
- // pending, so restore its destination.
- moves_[index].set_destination(destination);
-
- // The move may be blocked on a pending move, which must be the starting move.
- // In this case, we have a cycle, and we save the source of this move to
- // a scratch register to break it.
- LMoveOperands other_move = moves_[root_index_];
- if (other_move.Blocks(destination)) {
- DCHECK(other_move.IsPending());
- BreakCycle(index);
- return;
- }
-
- // This move is no longer blocked.
- EmitMove(index);
-}
-
-
-void LGapResolver::Verify() {
-#ifdef ENABLE_SLOW_DCHECKS
- // No operand should be the destination for more than one move.
- for (int i = 0; i < moves_.length(); ++i) {
- LOperand* destination = moves_[i].destination();
- for (int j = i + 1; j < moves_.length(); ++j) {
- SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
- }
- }
-#endif
-}
-
-
-void LGapResolver::BreakCycle(int index) {
- // We save in a register the source of that move and we remember its
- // destination. Then we mark this move as resolved so the cycle is
- // broken and we can perform the other moves.
- DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source()));
- DCHECK(!in_cycle_);
- in_cycle_ = true;
- LOperand* source = moves_[index].source();
- saved_destination_ = moves_[index].destination();
- if (source->IsRegister()) {
- need_to_restore_root_ = true;
- __ mov(kSavedValueRegister, cgen_->ToRegister(source));
- } else if (source->IsStackSlot()) {
- need_to_restore_root_ = true;
- __ ldr(kSavedValueRegister, cgen_->ToMemOperand(source));
- } else if (source->IsDoubleRegister()) {
- __ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
- } else if (source->IsDoubleStackSlot()) {
- __ vldr(kScratchDoubleReg, cgen_->ToMemOperand(source));
- } else {
- UNREACHABLE();
- }
- // This move will be done by restoring the saved value to the destination.
- moves_[index].Eliminate();
-}
-
-
-void LGapResolver::RestoreValue() {
- DCHECK(in_cycle_);
- DCHECK(saved_destination_ != NULL);
-
- if (saved_destination_->IsRegister()) {
- __ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
- } else if (saved_destination_->IsStackSlot()) {
- __ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
- } else if (saved_destination_->IsDoubleRegister()) {
- __ vmov(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg);
- } else if (saved_destination_->IsDoubleStackSlot()) {
- __ vstr(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_));
- } else {
- UNREACHABLE();
- }
-
- in_cycle_ = false;
- saved_destination_ = NULL;
-}
-
-
-void LGapResolver::EmitMove(int index) {
- LOperand* source = moves_[index].source();
- LOperand* destination = moves_[index].destination();
-
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
-
- if (source->IsRegister()) {
- Register source_register = cgen_->ToRegister(source);
- if (destination->IsRegister()) {
- __ mov(cgen_->ToRegister(destination), source_register);
- } else {
- DCHECK(destination->IsStackSlot());
- __ str(source_register, cgen_->ToMemOperand(destination));
- }
- } else if (source->IsStackSlot()) {
- MemOperand source_operand = cgen_->ToMemOperand(source);
- if (destination->IsRegister()) {
- __ ldr(cgen_->ToRegister(destination), source_operand);
- } else {
- DCHECK(destination->IsStackSlot());
- MemOperand destination_operand = cgen_->ToMemOperand(destination);
- if (!destination_operand.OffsetIsUint12Encodable()) {
- // ip is overwritten while saving the value to the destination.
- // Therefore we can't use ip. It is OK if the read from the source
- // destroys ip, since that happens before the value is read.
- __ vldr(kScratchDoubleReg.low(), source_operand);
- __ vstr(kScratchDoubleReg.low(), destination_operand);
- } else {
- __ ldr(ip, source_operand);
- __ str(ip, destination_operand);
- }
- }
-
- } else if (source->IsConstantOperand()) {
- LConstantOperand* constant_source = LConstantOperand::cast(source);
- if (destination->IsRegister()) {
- Register dst = cgen_->ToRegister(destination);
- Representation r = cgen_->IsSmi(constant_source)
- ? Representation::Smi() : Representation::Integer32();
- if (cgen_->IsInteger32(constant_source)) {
- __ mov(dst, Operand(cgen_->ToRepresentation(constant_source, r)));
- } else {
- __ Move(dst, cgen_->ToHandle(constant_source));
- }
- } else if (destination->IsDoubleRegister()) {
- DwVfpRegister result = cgen_->ToDoubleRegister(destination);
- double v = cgen_->ToDouble(constant_source);
- __ Vmov(result, v, ip);
- } else {
- DCHECK(destination->IsStackSlot());
- DCHECK(!in_cycle_); // Constant moves happen after all cycles are gone.
- need_to_restore_root_ = true;
- Representation r = cgen_->IsSmi(constant_source)
- ? Representation::Smi() : Representation::Integer32();
- if (cgen_->IsInteger32(constant_source)) {
- __ mov(kSavedValueRegister,
- Operand(cgen_->ToRepresentation(constant_source, r)));
- } else {
- __ Move(kSavedValueRegister, cgen_->ToHandle(constant_source));
- }
- __ str(kSavedValueRegister, cgen_->ToMemOperand(destination));
- }
-
- } else if (source->IsDoubleRegister()) {
- DwVfpRegister source_register = cgen_->ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
- __ vmov(cgen_->ToDoubleRegister(destination), source_register);
- } else {
- DCHECK(destination->IsDoubleStackSlot());
- __ vstr(source_register, cgen_->ToMemOperand(destination));
- }
-
- } else if (source->IsDoubleStackSlot()) {
- MemOperand source_operand = cgen_->ToMemOperand(source);
- if (destination->IsDoubleRegister()) {
- __ vldr(cgen_->ToDoubleRegister(destination), source_operand);
- } else {
- DCHECK(destination->IsDoubleStackSlot());
- MemOperand destination_operand = cgen_->ToMemOperand(destination);
- if (in_cycle_) {
- // kScratchDoubleReg was used to break the cycle.
- __ vpush(kScratchDoubleReg);
- __ vldr(kScratchDoubleReg, source_operand);
- __ vstr(kScratchDoubleReg, destination_operand);
- __ vpop(kScratchDoubleReg);
- } else {
- __ vldr(kScratchDoubleReg, source_operand);
- __ vstr(kScratchDoubleReg, destination_operand);
- }
- }
- } else {
- UNREACHABLE();
- }
-
- moves_[index].Eliminate();
-}
-
-
-#undef __
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/arm/lithium-gap-resolver-arm.h b/deps/v8/src/crankshaft/arm/lithium-gap-resolver-arm.h
deleted file mode 100644
index 59413c5772..0000000000
--- a/deps/v8/src/crankshaft/arm/lithium-gap-resolver-arm.h
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
-#define V8_CRANKSHAFT_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
-
-#include "src/crankshaft/lithium.h"
-
-namespace v8 {
-namespace internal {
-
-class LCodeGen;
-class LGapResolver;
-
-class LGapResolver final BASE_EMBEDDED {
- public:
- explicit LGapResolver(LCodeGen* owner);
-
- // Resolve a set of parallel moves, emitting assembler instructions.
- void Resolve(LParallelMove* parallel_move);
-
- private:
- // Build the initial list of moves.
- void BuildInitialMoveList(LParallelMove* parallel_move);
-
- // Perform the move at the moves_ index in question (possibly requiring
- // other moves to satisfy dependencies).
- void PerformMove(int index);
-
- // If a cycle is found in the series of moves, save the blocking value to
- // a scratch register. The cycle must be found by hitting the root of the
- // depth-first search.
- void BreakCycle(int index);
-
- // After a cycle has been resolved, restore the value from the scratch
- // register to its proper destination.
- void RestoreValue();
-
- // Emit a move and remove it from the move graph.
- void EmitMove(int index);
-
- // Verify the move list before performing moves.
- void Verify();
-
- LCodeGen* cgen_;
-
- // List of moves not yet resolved.
- ZoneList<LMoveOperands> moves_;
-
- int root_index_;
- bool in_cycle_;
- LOperand* saved_destination_;
-
- // We use the root register as a scratch in a few places. When that happens,
- // this flag is set to indicate that it needs to be restored.
- bool need_to_restore_root_;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
diff --git a/deps/v8/src/crankshaft/arm64/OWNERS b/deps/v8/src/crankshaft/arm64/OWNERS
deleted file mode 100644
index 906a5ce641..0000000000
--- a/deps/v8/src/crankshaft/arm64/OWNERS
+++ /dev/null
@@ -1 +0,0 @@
-rmcilroy@chromium.org
diff --git a/deps/v8/src/crankshaft/arm64/delayed-masm-arm64-inl.h b/deps/v8/src/crankshaft/arm64/delayed-masm-arm64-inl.h
deleted file mode 100644
index c8299ec07e..0000000000
--- a/deps/v8/src/crankshaft/arm64/delayed-masm-arm64-inl.h
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_INL_H_
-#define V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_INL_H_
-
-#include "src/arm64/macro-assembler-arm64-inl.h"
-#include "src/crankshaft/arm64/delayed-masm-arm64.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-DelayedMasm::DelayedMasm(LCodeGen* owner, MacroAssembler* masm,
- const Register& scratch_register)
- : cgen_(owner),
- masm_(masm),
- scratch_register_(scratch_register),
- scratch_register_used_(false),
- pending_(kNone),
- saved_value_(0) {
-#ifdef DEBUG
- pending_register_ = no_reg;
- pending_value_ = 0;
- pending_pc_ = 0;
- scratch_register_acquired_ = false;
-#endif
-}
-
-void DelayedMasm::EndDelayedUse() {
- EmitPending();
- DCHECK(!scratch_register_acquired_);
- ResetSavedValue();
-}
-
-
-void DelayedMasm::Mov(const Register& rd,
- const Operand& operand,
- DiscardMoveMode discard_mode) {
- EmitPending();
- DCHECK(!IsScratchRegister(rd) || scratch_register_acquired_);
- __ Mov(rd, operand, discard_mode);
-}
-
-
-void DelayedMasm::Fmov(FPRegister fd, FPRegister fn) {
- EmitPending();
- __ Fmov(fd, fn);
-}
-
-
-void DelayedMasm::Fmov(FPRegister fd, double imm) {
- EmitPending();
- __ Fmov(fd, imm);
-}
-
-
-void DelayedMasm::LoadObject(Register result, Handle<Object> object) {
- EmitPending();
- DCHECK(!IsScratchRegister(result) || scratch_register_acquired_);
- __ LoadObject(result, object);
-}
-
-void DelayedMasm::InitializeRootRegister() { masm_->InitializeRootRegister(); }
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_INL_H_
diff --git a/deps/v8/src/crankshaft/arm64/delayed-masm-arm64.cc b/deps/v8/src/crankshaft/arm64/delayed-masm-arm64.cc
deleted file mode 100644
index c6a03939b9..0000000000
--- a/deps/v8/src/crankshaft/arm64/delayed-masm-arm64.cc
+++ /dev/null
@@ -1,199 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_ARM64
-
-#include "src/crankshaft/arm64/delayed-masm-arm64.h"
-#include "src/arm64/macro-assembler-arm64-inl.h"
-#include "src/crankshaft/arm64/lithium-codegen-arm64.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-
-void DelayedMasm::StackSlotMove(LOperand* src, LOperand* dst) {
- DCHECK((src->IsStackSlot() && dst->IsStackSlot()) ||
- (src->IsDoubleStackSlot() && dst->IsDoubleStackSlot()));
- MemOperand src_operand = cgen_->ToMemOperand(src);
- MemOperand dst_operand = cgen_->ToMemOperand(dst);
- if (pending_ == kStackSlotMove) {
- DCHECK(pending_pc_ == masm_->pc_offset());
- UseScratchRegisterScope scope(masm_);
- DoubleRegister temp1 = scope.AcquireD();
- DoubleRegister temp2 = scope.AcquireD();
- switch (MemOperand::AreConsistentForPair(pending_address_src_,
- src_operand)) {
- case MemOperand::kNotPair:
- __ Ldr(temp1, pending_address_src_);
- __ Ldr(temp2, src_operand);
- break;
- case MemOperand::kPairAB:
- __ Ldp(temp1, temp2, pending_address_src_);
- break;
- case MemOperand::kPairBA:
- __ Ldp(temp2, temp1, src_operand);
- break;
- }
- switch (MemOperand::AreConsistentForPair(pending_address_dst_,
- dst_operand)) {
- case MemOperand::kNotPair:
- __ Str(temp1, pending_address_dst_);
- __ Str(temp2, dst_operand);
- break;
- case MemOperand::kPairAB:
- __ Stp(temp1, temp2, pending_address_dst_);
- break;
- case MemOperand::kPairBA:
- __ Stp(temp2, temp1, dst_operand);
- break;
- }
- ResetPending();
- return;
- }
-
- EmitPending();
- pending_ = kStackSlotMove;
- pending_address_src_ = src_operand;
- pending_address_dst_ = dst_operand;
-#ifdef DEBUG
- pending_pc_ = masm_->pc_offset();
-#endif
-}
-
-
-void DelayedMasm::StoreConstant(uint64_t value, const MemOperand& operand) {
- DCHECK(!scratch_register_acquired_);
- if ((pending_ == kStoreConstant) && (value == pending_value_)) {
- MemOperand::PairResult result =
- MemOperand::AreConsistentForPair(pending_address_dst_, operand);
- if (result != MemOperand::kNotPair) {
- const MemOperand& dst =
- (result == MemOperand::kPairAB) ?
- pending_address_dst_ :
- operand;
- DCHECK(pending_pc_ == masm_->pc_offset());
- if (pending_value_ == 0) {
- __ Stp(xzr, xzr, dst);
- } else {
- SetSavedValue(pending_value_);
- __ Stp(ScratchRegister(), ScratchRegister(), dst);
- }
- ResetPending();
- return;
- }
- }
-
- EmitPending();
- pending_ = kStoreConstant;
- pending_address_dst_ = operand;
- pending_value_ = value;
-#ifdef DEBUG
- pending_pc_ = masm_->pc_offset();
-#endif
-}
-
-
-void DelayedMasm::Load(const CPURegister& rd, const MemOperand& operand) {
- if ((pending_ == kLoad) &&
- pending_register_.IsSameSizeAndType(rd)) {
- switch (MemOperand::AreConsistentForPair(pending_address_src_, operand)) {
- case MemOperand::kNotPair:
- break;
- case MemOperand::kPairAB:
- DCHECK(pending_pc_ == masm_->pc_offset());
- DCHECK(!IsScratchRegister(pending_register_) ||
- scratch_register_acquired_);
- DCHECK(!IsScratchRegister(rd) || scratch_register_acquired_);
- __ Ldp(pending_register_, rd, pending_address_src_);
- ResetPending();
- return;
- case MemOperand::kPairBA:
- DCHECK(pending_pc_ == masm_->pc_offset());
- DCHECK(!IsScratchRegister(pending_register_) ||
- scratch_register_acquired_);
- DCHECK(!IsScratchRegister(rd) || scratch_register_acquired_);
- __ Ldp(rd, pending_register_, operand);
- ResetPending();
- return;
- }
- }
-
- EmitPending();
- pending_ = kLoad;
- pending_register_ = rd;
- pending_address_src_ = operand;
-#ifdef DEBUG
- pending_pc_ = masm_->pc_offset();
-#endif
-}
-
-
-void DelayedMasm::Store(const CPURegister& rd, const MemOperand& operand) {
- if ((pending_ == kStore) &&
- pending_register_.IsSameSizeAndType(rd)) {
- switch (MemOperand::AreConsistentForPair(pending_address_dst_, operand)) {
- case MemOperand::kNotPair:
- break;
- case MemOperand::kPairAB:
- DCHECK(pending_pc_ == masm_->pc_offset());
- __ Stp(pending_register_, rd, pending_address_dst_);
- ResetPending();
- return;
- case MemOperand::kPairBA:
- DCHECK(pending_pc_ == masm_->pc_offset());
- __ Stp(rd, pending_register_, operand);
- ResetPending();
- return;
- }
- }
-
- EmitPending();
- pending_ = kStore;
- pending_register_ = rd;
- pending_address_dst_ = operand;
-#ifdef DEBUG
- pending_pc_ = masm_->pc_offset();
-#endif
-}
-
-
-void DelayedMasm::EmitPending() {
- DCHECK((pending_ == kNone) || (pending_pc_ == masm_->pc_offset()));
- switch (pending_) {
- case kNone:
- return;
- case kStoreConstant:
- if (pending_value_ == 0) {
- __ Str(xzr, pending_address_dst_);
- } else {
- SetSavedValue(pending_value_);
- __ Str(ScratchRegister(), pending_address_dst_);
- }
- break;
- case kLoad:
- DCHECK(!IsScratchRegister(pending_register_) ||
- scratch_register_acquired_);
- __ Ldr(pending_register_, pending_address_src_);
- break;
- case kStore:
- __ Str(pending_register_, pending_address_dst_);
- break;
- case kStackSlotMove: {
- UseScratchRegisterScope scope(masm_);
- DoubleRegister temp = scope.AcquireD();
- __ Ldr(temp, pending_address_src_);
- __ Str(temp, pending_address_dst_);
- break;
- }
- }
- ResetPending();
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/crankshaft/arm64/delayed-masm-arm64.h b/deps/v8/src/crankshaft/arm64/delayed-masm-arm64.h
deleted file mode 100644
index 2dd36b725e..0000000000
--- a/deps/v8/src/crankshaft/arm64/delayed-masm-arm64.h
+++ /dev/null
@@ -1,154 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_H_
-#define V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_H_
-
-#include "src/crankshaft/lithium.h"
-
-namespace v8 {
-namespace internal {
-
-class LCodeGen;
-
-// This class delays the generation of some instructions. This way, we have a
-// chance to merge two instructions in one (with load/store pair).
-// Each instruction must either:
-// - merge with the pending instruction and generate just one instruction.
-// - emit the pending instruction and then generate the instruction (or set the
-// pending instruction).
-class DelayedMasm BASE_EMBEDDED {
- public:
- inline DelayedMasm(LCodeGen* owner, MacroAssembler* masm,
- const Register& scratch_register);
-
- ~DelayedMasm() {
- DCHECK(!scratch_register_acquired_);
- DCHECK(!scratch_register_used_);
- DCHECK(!pending());
- }
- inline void EndDelayedUse();
-
- const Register& ScratchRegister() {
- scratch_register_used_ = true;
- return scratch_register_;
- }
- bool IsScratchRegister(const CPURegister& reg) {
- return reg.Is(scratch_register_);
- }
- bool scratch_register_used() const { return scratch_register_used_; }
- void reset_scratch_register_used() { scratch_register_used_ = false; }
- // Acquire/Release scratch register for use outside this class.
- void AcquireScratchRegister() {
- EmitPending();
- ResetSavedValue();
-#ifdef DEBUG
- DCHECK(!scratch_register_acquired_);
- scratch_register_acquired_ = true;
-#endif
- }
- void ReleaseScratchRegister() {
-#ifdef DEBUG
- DCHECK(scratch_register_acquired_);
- scratch_register_acquired_ = false;
-#endif
- }
- bool pending() { return pending_ != kNone; }
-
- // Extra layer over the macro-assembler instructions (which emits the
- // potential pending instruction).
- inline void Mov(const Register& rd,
- const Operand& operand,
- DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
- inline void Fmov(FPRegister fd, FPRegister fn);
- inline void Fmov(FPRegister fd, double imm);
- inline void LoadObject(Register result, Handle<Object> object);
- // Instructions which try to merge which the pending instructions.
- void StackSlotMove(LOperand* src, LOperand* dst);
- // StoreConstant can only be used if the scratch register is not acquired.
- void StoreConstant(uint64_t value, const MemOperand& operand);
- void Load(const CPURegister& rd, const MemOperand& operand);
- void Store(const CPURegister& rd, const MemOperand& operand);
- // Emit the potential pending instruction.
- void EmitPending();
- // Reset the pending state.
- void ResetPending() {
- pending_ = kNone;
-#ifdef DEBUG
- pending_register_ = no_reg;
- MemOperand tmp;
- pending_address_src_ = tmp;
- pending_address_dst_ = tmp;
- pending_value_ = 0;
- pending_pc_ = 0;
-#endif
- }
- inline void InitializeRootRegister();
-
- private:
- // Set the saved value and load the ScratchRegister with it.
- void SetSavedValue(uint64_t saved_value) {
- DCHECK(saved_value != 0);
- if (saved_value_ != saved_value) {
- masm_->Mov(ScratchRegister(), saved_value);
- saved_value_ = saved_value;
- }
- }
- // Reset the saved value (i.e. the value of ScratchRegister is no longer
- // known).
- void ResetSavedValue() {
- saved_value_ = 0;
- }
-
- LCodeGen* cgen_;
- MacroAssembler* masm_;
-
- // Register used to store a constant.
- Register scratch_register_;
- bool scratch_register_used_;
-
- // Sometimes we store or load two values in two contiguous stack slots.
- // In this case, we try to use the ldp/stp instructions to reduce code size.
- // To be able to do that, instead of generating directly the instructions,
- // we register with the following fields that an instruction needs to be
- // generated. Then with the next instruction, if the instruction is
- // consistent with the pending one for stp/ldp we generate ldp/stp. Else,
- // if they are not consistent, we generate the pending instruction and we
- // register the new instruction (which becomes pending).
-
- // Enumeration of instructions which can be pending.
- enum Pending {
- kNone,
- kStoreConstant,
- kLoad, kStore,
- kStackSlotMove
- };
- // The pending instruction.
- Pending pending_;
- // For kLoad, kStore: register which must be loaded/stored.
- CPURegister pending_register_;
- // For kLoad, kStackSlotMove: address of the load.
- MemOperand pending_address_src_;
- // For kStoreConstant, kStore, kStackSlotMove: address of the store.
- MemOperand pending_address_dst_;
- // For kStoreConstant: value to be stored.
- uint64_t pending_value_;
- // Value held into the ScratchRegister if the saved_value_ is not 0.
- // For 0, we use xzr.
- uint64_t saved_value_;
-#ifdef DEBUG
- // Address where the pending instruction must be generated. It's only used to
- // check that nothing else has been generated since we set the pending
- // instruction.
- int pending_pc_;
- // If true, the scratch register has been acquired outside this class. The
- // scratch register can no longer be used for constants.
- bool scratch_register_acquired_;
-#endif
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_H_
diff --git a/deps/v8/src/crankshaft/arm64/lithium-arm64.cc b/deps/v8/src/crankshaft/arm64/lithium-arm64.cc
deleted file mode 100644
index a62940f20a..0000000000
--- a/deps/v8/src/crankshaft/arm64/lithium-arm64.cc
+++ /dev/null
@@ -1,2493 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/arm64/lithium-arm64.h"
-
-#include <sstream>
-
-#include "src/arm64/assembler-arm64-inl.h"
-#include "src/crankshaft/arm64/lithium-codegen-arm64.h"
-#include "src/crankshaft/hydrogen-osr.h"
-#include "src/crankshaft/lithium-inl.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define DEFINE_COMPILE(type) \
- void L##type::CompileToNative(LCodeGen* generator) { \
- generator->Do##type(this); \
- }
-LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
-#undef DEFINE_COMPILE
-
-#ifdef DEBUG
-void LInstruction::VerifyCall() {
- // Call instructions can use only fixed registers as temporaries and
- // outputs because all registers are blocked by the calling convention.
- // Inputs operands must use a fixed register or use-at-start policy or
- // a non-register policy.
- DCHECK(Output() == NULL ||
- LUnallocated::cast(Output())->HasFixedPolicy() ||
- !LUnallocated::cast(Output())->HasRegisterPolicy());
- for (UseIterator it(this); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- DCHECK(operand->HasFixedPolicy() ||
- operand->IsUsedAtStart());
- }
- for (TempIterator it(this); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
- }
-}
-#endif
-
-
-void LLabel::PrintDataTo(StringStream* stream) {
- LGap::PrintDataTo(stream);
- LLabel* rep = replacement();
- if (rep != NULL) {
- stream->Add(" Dead block replaced with B%d", rep->block_id());
- }
-}
-
-
-void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
- arguments()->PrintTo(stream);
- stream->Add(" length ");
- length()->PrintTo(stream);
- stream->Add(" index ");
- index()->PrintTo(stream);
-}
-
-
-void LBranch::PrintDataTo(StringStream* stream) {
- stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
- value()->PrintTo(stream);
-}
-
-
-void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
- for (int i = 0; i < InputCount(); i++) {
- InputAt(i)->PrintTo(stream);
- stream->Add(" ");
- }
- stream->Add("#%d / ", arity());
-}
-
-
-void LCallNewArray::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
- ElementsKind kind = hydrogen()->elements_kind();
- stream->Add(" (%s) ", ElementsKindToString(kind));
-}
-
-void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if class_of_test(");
- value()->PrintTo(stream);
- stream->Add(", \"%o\") then B%d else B%d", *hydrogen()->class_name(),
- true_block_id(), false_block_id());
-}
-
-void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- left()->PrintTo(stream);
- stream->Add(" %s ", Token::String(op()));
- right()->PrintTo(stream);
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-bool LGoto::HasInterestingComment(LCodeGen* gen) const {
- return !gen->IsNextEmittedBlock(block_id());
-}
-
-
-void LGoto::PrintDataTo(StringStream* stream) {
- stream->Add("B%d", block_id());
-}
-
-
-void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
- stream->Add(" = ");
- base_object()->PrintTo(stream);
- stream->Add(" + ");
- offset()->PrintTo(stream);
-}
-
-
-void LInvokeFunction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- function()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
-void LInstruction::PrintTo(StringStream* stream) {
- stream->Add("%s ", this->Mnemonic());
-
- PrintOutputOperandTo(stream);
-
- PrintDataTo(stream);
-
- if (HasEnvironment()) {
- stream->Add(" ");
- environment()->PrintTo(stream);
- }
-
- if (HasPointerMap()) {
- stream->Add(" ");
- pointer_map()->PrintTo(stream);
- }
-}
-
-
-void LInstruction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- for (int i = 0; i < InputCount(); i++) {
- if (i > 0) stream->Add(" ");
- if (InputAt(i) == NULL) {
- stream->Add("NULL");
- } else {
- InputAt(i)->PrintTo(stream);
- }
- }
-}
-
-
-void LInstruction::PrintOutputOperandTo(StringStream* stream) {
- if (HasResult()) result()->PrintTo(stream);
-}
-
-
-void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_instance_type(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_string(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_smi(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if typeof ");
- value()->PrintTo(stream);
- stream->Add(" == \"%s\" then B%d else B%d",
- hydrogen()->type_literal()->ToCString().get(),
- true_block_id(), false_block_id());
-}
-
-
-void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_undetectable(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-bool LGap::IsRedundant() const {
- for (int i = 0; i < 4; i++) {
- if ((parallel_moves_[i] != NULL) && !parallel_moves_[i]->IsRedundant()) {
- return false;
- }
- }
-
- return true;
-}
-
-
-void LGap::PrintDataTo(StringStream* stream) {
- for (int i = 0; i < 4; i++) {
- stream->Add("(");
- if (parallel_moves_[i] != NULL) {
- parallel_moves_[i]->PrintDataTo(stream);
- }
- stream->Add(") ");
- }
-}
-
-
-void LLoadContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add("[%d]", slot_index());
-}
-
-
-void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
- stream->Add(" = ");
- function()->PrintTo(stream);
- stream->Add(".code_entry = ");
- code_object()->PrintTo(stream);
-}
-
-
-void LStoreContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add("[%d] <- ", slot_index());
- value()->PrintTo(stream);
-}
-
-
-void LStoreNamedField::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- std::ostringstream os;
- os << hydrogen()->access();
- stream->Add(os.str().c_str());
- stream->Add(" <- ");
- value()->PrintTo(stream);
-}
-
-
-void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if string_compare(");
- left()->PrintTo(stream);
- right()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add("%p -> %p", *original_map(), *transitioned_map());
-}
-
-
-template<int T>
-void LUnaryMathOperation<T>::PrintDataTo(StringStream* stream) {
- value()->PrintTo(stream);
-}
-
-
-const char* LArithmeticD::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-d";
- case Token::SUB: return "sub-d";
- case Token::MUL: return "mul-d";
- case Token::DIV: return "div-d";
- case Token::MOD: return "mod-d";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-const char* LArithmeticT::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-t";
- case Token::SUB: return "sub-t";
- case Token::MUL: return "mul-t";
- case Token::MOD: return "mod-t";
- case Token::DIV: return "div-t";
- case Token::BIT_AND: return "bit-and-t";
- case Token::BIT_OR: return "bit-or-t";
- case Token::BIT_XOR: return "bit-xor-t";
- case Token::ROR: return "ror-t";
- case Token::SHL: return "shl-t";
- case Token::SAR: return "sar-t";
- case Token::SHR: return "shr-t";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
- return new (zone())
- LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
- if (value->EmitAtUses()) {
- HInstruction* instr = HInstruction::cast(value);
- VisitInstruction(instr);
- }
- operand->set_virtual_register(value->id());
- return operand;
-}
-
-
-LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
- return Use(value, ToUnallocated(fixed_register));
-}
-
-
-LOperand* LChunkBuilder::UseFixedDouble(HValue* value,
- DoubleRegister fixed_register) {
- return Use(value, ToUnallocated(fixed_register));
-}
-
-
-LOperand* LChunkBuilder::UseRegister(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::UseRegisterAndClobber(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
- return Use(value,
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
- LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
- return value->IsConstant() ? UseConstant(value) : UseRegister(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
- return value->IsConstant() ? UseConstant(value) : UseRegisterAtStart(value);
-}
-
-
-LConstantOperand* LChunkBuilder::UseConstant(HValue* value) {
- return chunk_->DefineConstantOperand(HConstant::cast(value));
-}
-
-
-LOperand* LChunkBuilder::UseAny(HValue* value) {
- return value->IsConstant()
- ? UseConstant(value)
- : Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
-}
-
-
-LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
- LUnallocated* result) {
- result->set_virtual_register(current_instruction_->id());
- instr->set_result(result);
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::DefineAsRegister(
- LTemplateResultInstruction<1>* instr) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-LInstruction* LChunkBuilder::DefineAsSpilled(
- LTemplateResultInstruction<1>* instr, int index) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
-}
-
-
-LInstruction* LChunkBuilder::DefineSameAsFirst(
- LTemplateResultInstruction<1>* instr) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
-}
-
-
-LInstruction* LChunkBuilder::DefineFixed(
- LTemplateResultInstruction<1>* instr, Register reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-LInstruction* LChunkBuilder::DefineFixedDouble(
- LTemplateResultInstruction<1>* instr, DoubleRegister reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize) {
- info()->MarkAsNonDeferredCalling();
-#ifdef DEBUG
- instr->VerifyCall();
-#endif
- instr->MarkAsCall();
- instr = AssignPointerMap(instr);
-
- // If instruction does not have side-effects lazy deoptimization
- // after the call will try to deoptimize to the point before the call.
- // Thus we still need to attach environment to this call even if
- // call sequence can not deoptimize eagerly.
- bool needs_environment =
- (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
- !hinstr->HasObservableSideEffects();
- if (needs_environment && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- // We can't really figure out if the environment is needed or not.
- instr->environment()->set_has_been_used();
- }
-
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
- DCHECK(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(zone()));
- return instr;
-}
-
-
-LUnallocated* LChunkBuilder::TempRegister() {
- LUnallocated* operand =
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
- int vreg = allocator_->GetVirtualRegister();
- if (!allocator_->AllocationOk()) {
- Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
- vreg = 0;
- }
- operand->set_virtual_register(vreg);
- return operand;
-}
-
-
-LUnallocated* LChunkBuilder::TempDoubleRegister() {
- LUnallocated* operand =
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_DOUBLE_REGISTER);
- int vreg = allocator_->GetVirtualRegister();
- if (!allocator_->AllocationOk()) {
- Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
- vreg = 0;
- }
- operand->set_virtual_register(vreg);
- return operand;
-}
-
-int LPlatformChunk::GetNextSpillIndex() { return current_frame_slots_++; }
-
-LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
- int index = GetNextSpillIndex();
- if (kind == DOUBLE_REGISTERS) {
- return LDoubleStackSlot::Create(index, zone());
- } else {
- DCHECK(kind == GENERAL_REGISTERS);
- return LStackSlot::Create(index, zone());
- }
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(Register reg) {
- LUnallocated* operand = ToUnallocated(reg);
- DCHECK(operand->HasFixedPolicy());
- return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
- LUnallocated* operand = ToUnallocated(reg);
- DCHECK(operand->HasFixedPolicy());
- return operand;
-}
-
-
-LPlatformChunk* LChunkBuilder::Build() {
- DCHECK(is_unused());
- chunk_ = new(zone()) LPlatformChunk(info_, graph_);
- LPhase phase("L_Building chunk", chunk_);
- status_ = BUILDING;
-
- // If compiling for OSR, reserve space for the unoptimized frame,
- // which will be subsumed into this frame.
- if (graph()->has_osr()) {
- // TODO(all): GetNextSpillIndex just increments a field. It has no other
- // side effects, so we should get rid of this loop.
- for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
- chunk_->GetNextSpillIndex();
- }
- }
-
- const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
- for (int i = 0; i < blocks->length(); i++) {
- DoBasicBlock(blocks->at(i));
- if (is_aborted()) return NULL;
- }
- status_ = DONE;
- return chunk_;
-}
-
-
-void LChunkBuilder::DoBasicBlock(HBasicBlock* block) {
- DCHECK(is_building());
- current_block_ = block;
-
- if (block->IsStartBlock()) {
- block->UpdateEnvironment(graph_->start_environment());
- argument_count_ = 0;
- } else if (block->predecessors()->length() == 1) {
- // We have a single predecessor => copy environment and outgoing
- // argument count from the predecessor.
- DCHECK(block->phis()->length() == 0);
- HBasicBlock* pred = block->predecessors()->at(0);
- HEnvironment* last_environment = pred->last_environment();
- DCHECK(last_environment != NULL);
-
- // Only copy the environment, if it is later used again.
- if (pred->end()->SecondSuccessor() == NULL) {
- DCHECK(pred->end()->FirstSuccessor() == block);
- } else {
- if ((pred->end()->FirstSuccessor()->block_id() > block->block_id()) ||
- (pred->end()->SecondSuccessor()->block_id() > block->block_id())) {
- last_environment = last_environment->Copy();
- }
- }
- block->UpdateEnvironment(last_environment);
- DCHECK(pred->argument_count() >= 0);
- argument_count_ = pred->argument_count();
- } else {
- // We are at a state join => process phis.
- HBasicBlock* pred = block->predecessors()->at(0);
- // No need to copy the environment, it cannot be used later.
- HEnvironment* last_environment = pred->last_environment();
- for (int i = 0; i < block->phis()->length(); ++i) {
- HPhi* phi = block->phis()->at(i);
- if (phi->HasMergedIndex()) {
- last_environment->SetValueAt(phi->merged_index(), phi);
- }
- }
- for (int i = 0; i < block->deleted_phis()->length(); ++i) {
- if (block->deleted_phis()->at(i) < last_environment->length()) {
- last_environment->SetValueAt(block->deleted_phis()->at(i),
- graph_->GetConstantUndefined());
- }
- }
- block->UpdateEnvironment(last_environment);
- // Pick up the outgoing argument count of one of the predecessors.
- argument_count_ = pred->argument_count();
- }
-
- // Translate hydrogen instructions to lithium ones for the current block.
- HInstruction* current = block->first();
- int start = chunk_->instructions()->length();
- while ((current != NULL) && !is_aborted()) {
- // Code for constants in registers is generated lazily.
- if (!current->EmitAtUses()) {
- VisitInstruction(current);
- }
- current = current->next();
- }
- int end = chunk_->instructions()->length() - 1;
- if (end >= start) {
- block->set_first_instruction_index(start);
- block->set_last_instruction_index(end);
- }
- block->set_argument_count(argument_count_);
- current_block_ = NULL;
-}
-
-
-void LChunkBuilder::VisitInstruction(HInstruction* current) {
- HInstruction* old_current = current_instruction_;
- current_instruction_ = current;
-
- LInstruction* instr = NULL;
- if (current->CanReplaceWithDummyUses()) {
- if (current->OperandCount() == 0) {
- instr = DefineAsRegister(new(zone()) LDummy());
- } else {
- DCHECK(!current->OperandAt(0)->IsControlInstruction());
- instr = DefineAsRegister(new(zone())
- LDummyUse(UseAny(current->OperandAt(0))));
- }
- for (int i = 1; i < current->OperandCount(); ++i) {
- if (current->OperandAt(i)->IsControlInstruction()) continue;
- LInstruction* dummy =
- new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
- dummy->set_hydrogen_value(current);
- chunk_->AddInstruction(dummy, current_block_);
- }
- } else {
- HBasicBlock* successor;
- if (current->IsControlInstruction() &&
- HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
- successor != NULL) {
- instr = new(zone()) LGoto(successor);
- } else {
- instr = current->CompileToLithium(this);
- }
- }
-
- argument_count_ += current->argument_delta();
- DCHECK(argument_count_ >= 0);
-
- if (instr != NULL) {
- AddInstruction(instr, current);
- }
-
- current_instruction_ = old_current;
-}
-
-
-void LChunkBuilder::AddInstruction(LInstruction* instr,
- HInstruction* hydrogen_val) {
- // Associate the hydrogen instruction first, since we may need it for
- // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
- instr->set_hydrogen_value(hydrogen_val);
-
-#if DEBUG
- // Make sure that the lithium instruction has either no fixed register
- // constraints in temps or the result OR no uses that are only used at
- // start. If this invariant doesn't hold, the register allocator can decide
- // to insert a split of a range immediately before the instruction due to an
- // already allocated register needing to be used for the instruction's fixed
- // register constraint. In this case, the register allocator won't see an
- // interference between the split child and the use-at-start (it would if
- // the it was just a plain use), so it is free to move the split child into
- // the same register that is used for the use-at-start.
- // See https://code.google.com/p/chromium/issues/detail?id=201590
- if (!(instr->ClobbersRegisters() &&
- instr->ClobbersDoubleRegisters(isolate()))) {
- int fixed = 0;
- int used_at_start = 0;
- for (UseIterator it(instr); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- if (operand->IsUsedAtStart()) ++used_at_start;
- }
- if (instr->Output() != NULL) {
- if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
- }
- for (TempIterator it(instr); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- if (operand->HasFixedPolicy()) ++fixed;
- }
- DCHECK(fixed == 0 || used_at_start == 0);
- }
-#endif
-
- if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
- instr = AssignPointerMap(instr);
- }
- if (FLAG_stress_environments && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
- chunk_->AddInstruction(instr, current_block_);
-
- CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
-}
-
-
-LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
- HEnvironment* hydrogen_env = current_block_->last_environment();
- return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
-}
-
-
-LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
- LInstruction* result = new (zone()) LPrologue();
- if (info_->scope()->NeedsContext()) {
- result = MarkAsCall(result, instr);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
- // The control instruction marking the end of a block that completed
- // abruptly (e.g., threw an exception). There is nothing specific to do.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->left()->representation().IsDouble());
- DCHECK(instr->right()->representation().IsDouble());
-
- if (op == Token::MOD) {
- LOperand* left = UseFixedDouble(instr->left(), d0);
- LOperand* right = UseFixedDouble(instr->right(), d1);
- LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
- return MarkAsCall(DefineFixedDouble(result, d0), instr);
- } else {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineAsRegister(result);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HBinaryOperation* instr) {
- DCHECK((op == Token::ADD) || (op == Token::SUB) || (op == Token::MUL) ||
- (op == Token::DIV) || (op == Token::MOD) || (op == Token::SHR) ||
- (op == Token::SHL) || (op == Token::SAR) || (op == Token::ROR) ||
- (op == Token::BIT_OR) || (op == Token::BIT_AND) ||
- (op == Token::BIT_XOR));
- HValue* left = instr->left();
- HValue* right = instr->right();
-
- // TODO(jbramley): Once we've implemented smi support for all arithmetic
- // operations, these assertions should check IsTagged().
- DCHECK(instr->representation().IsSmiOrTagged());
- DCHECK(left->representation().IsSmiOrTagged());
- DCHECK(right->representation().IsSmiOrTagged());
-
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* left_operand = UseFixed(left, x1);
- LOperand* right_operand = UseFixed(right, x0);
- LArithmeticT* result =
- new(zone()) LArithmeticT(op, context, left_operand, right_operand);
- return MarkAsCall(DefineFixed(result, x0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
- info()->MarkAsRequiresFrame();
- LOperand* args = NULL;
- LOperand* length = NULL;
- LOperand* index = NULL;
-
- if (instr->length()->IsConstant() && instr->index()->IsConstant()) {
- args = UseRegisterAtStart(instr->arguments());
- length = UseConstant(instr->length());
- index = UseConstant(instr->index());
- } else {
- args = UseRegister(instr->arguments());
- length = UseRegisterAtStart(instr->length());
- index = UseRegisterOrConstantAtStart(instr->index());
- }
-
- return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
-}
-
-
-LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
-
- LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr);
- if (shifted_operation != NULL) {
- return shifted_operation;
- }
-
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- LOperand* right =
- UseRegisterOrConstantAtStart(instr->BetterRightOperand());
- LInstruction* result = instr->representation().IsSmi() ?
- DefineAsRegister(new(zone()) LAddS(left, right)) :
- DefineAsRegister(new(zone()) LAddI(left, right));
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsExternal()) {
- DCHECK(instr->IsConsistentExternalRepresentation());
- DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterOrConstantAtStart(instr->right());
- return DefineAsRegister(new(zone()) LAddE(left, right));
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::ADD, instr);
- } else {
- DCHECK(instr->representation().IsTagged());
- return DoArithmeticT(Token::ADD, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
- LOperand* size = UseRegisterOrConstant(instr->size());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- if (instr->IsAllocationFolded()) {
- LFastAllocate* result = new (zone()) LFastAllocate(size, temp1, temp2);
- return DefineAsRegister(result);
- } else {
- info()->MarkAsDeferredCalling();
- LOperand* context = UseAny(instr->context());
- LOperand* temp3 = instr->MustPrefillWithFiller() ? TempRegister() : NULL;
- LAllocate* result =
- new (zone()) LAllocate(context, size, temp1, temp2, temp3);
- return AssignPointerMap(DefineAsRegister(result));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
- LOperand* function = UseFixed(instr->function(), x1);
- LOperand* receiver = UseFixed(instr->receiver(), x0);
- LOperand* length = UseFixed(instr->length(), x2);
- LOperand* elements = UseFixed(instr->elements(), x3);
- LApplyArguments* result = new(zone()) LApplyArguments(function,
- receiver,
- length,
- elements);
- return MarkAsCall(DefineFixed(result, x0), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* instr) {
- info()->MarkAsRequiresFrame();
- LOperand* temp = instr->from_inlined() ? NULL : TempRegister();
- return DefineAsRegister(new(zone()) LArgumentsElements(temp));
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) {
- info()->MarkAsRequiresFrame();
- LOperand* value = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LArgumentsLength(value));
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
- // There are no real uses of the arguments object.
- // arguments.length and element access are supported directly on
- // stack arguments, and any real arguments object use causes a bailout.
- // So this value is never used.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32));
-
- LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr);
- if (shifted_operation != NULL) {
- return shifted_operation;
- }
-
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- LOperand* right =
- UseRegisterOrConstantAtStart(instr->BetterRightOperand());
- return instr->representation().IsSmi() ?
- DefineAsRegister(new(zone()) LBitS(left, right)) :
- DefineAsRegister(new(zone()) LBitI(left, right));
- } else {
- return DoArithmeticT(instr->op(), instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
- // V8 expects a label to be generated for each basic block.
- // This is used in some places like LAllocator::IsBlockBoundary
- // in lithium-allocator.cc
- return new(zone()) LLabel(instr->block());
-}
-
-
-LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- if (!FLAG_debug_code && instr->skip_check()) return NULL;
- LOperand* index = UseRegisterOrConstantAtStart(instr->index());
- LOperand* length = !index->IsConstantOperand()
- ? UseRegisterOrConstantAtStart(instr->length())
- : UseRegisterAtStart(instr->length());
- LInstruction* result = new(zone()) LBoundsCheck(index, length);
- if (!FLAG_debug_code || !instr->skip_check()) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* value = instr->value();
- Representation r = value->representation();
- HType type = value->type();
-
- if (r.IsInteger32() || r.IsSmi() || r.IsDouble()) {
- // These representations have simple checks that cannot deoptimize.
- return new(zone()) LBranch(UseRegister(value), NULL, NULL);
- } else {
- DCHECK(r.IsTagged());
- if (type.IsBoolean() || type.IsSmi() || type.IsJSArray() ||
- type.IsHeapNumber()) {
- // These types have simple checks that cannot deoptimize.
- return new(zone()) LBranch(UseRegister(value), NULL, NULL);
- }
-
- if (type.IsString()) {
- // This type cannot deoptimize, but needs a scratch register.
- return new(zone()) LBranch(UseRegister(value), TempRegister(), NULL);
- }
-
- ToBooleanHints expected = instr->expected_input_types();
- bool needs_temps = (expected & ToBooleanHint::kNeedsMap) ||
- expected == ToBooleanHint::kNone;
- LOperand* temp1 = needs_temps ? TempRegister() : NULL;
- LOperand* temp2 = needs_temps ? TempRegister() : NULL;
-
- if (expected == ToBooleanHint::kAny || expected == ToBooleanHint::kNone) {
- // The generic case cannot deoptimize because it already supports every
- // possible input type.
- DCHECK(needs_temps);
- return new(zone()) LBranch(UseRegister(value), temp1, temp2);
- } else {
- return AssignEnvironment(
- new(zone()) LBranch(UseRegister(value), temp1, temp2));
- }
- }
-}
-
-
-LInstruction* LChunkBuilder::DoCallWithDescriptor(
- HCallWithDescriptor* instr) {
- CallInterfaceDescriptor descriptor = instr->descriptor();
- DCHECK_EQ(descriptor.GetParameterCount() +
- LCallWithDescriptor::kImplicitRegisterParameterCount,
- instr->OperandCount());
-
- LOperand* target = UseRegisterOrConstantAtStart(instr->target());
- ZoneList<LOperand*> ops(instr->OperandCount(), zone());
- // Target
- ops.Add(target, zone());
- // Context
- LOperand* op = UseFixed(instr->OperandAt(1), cp);
- ops.Add(op, zone());
- // Load register parameters.
- int i = 0;
- for (; i < descriptor.GetRegisterParameterCount(); i++) {
- op = UseFixed(instr->OperandAt(
- i + LCallWithDescriptor::kImplicitRegisterParameterCount),
- descriptor.GetRegisterParameter(i));
- ops.Add(op, zone());
- }
- // Push stack parameters.
- if (i < descriptor.GetParameterCount()) {
- int argc = descriptor.GetParameterCount() - i;
- AddInstruction(new (zone()) LPreparePushArguments(argc), instr);
- LPushArguments* push_args = new (zone()) LPushArguments(zone());
- for (; i < descriptor.GetParameterCount(); i++) {
- if (push_args->ShouldSplitPush()) {
- AddInstruction(push_args, instr);
- push_args = new (zone()) LPushArguments(zone());
- }
- op = UseRegisterAtStart(instr->OperandAt(
- i + LCallWithDescriptor::kImplicitRegisterParameterCount));
- push_args->AddArgument(op);
- }
- AddInstruction(push_args, instr);
- }
-
- LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(descriptor,
- ops,
- zone());
- if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
- result->MarkAsSyntacticTailCall();
- }
- return MarkAsCall(DefineFixed(result, x0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- // The call to ArrayConstructCode will expect the constructor to be in x1.
- LOperand* constructor = UseFixed(instr->constructor(), x1);
- LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
- return MarkAsCall(DefineFixed(result, x0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), x0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
- instr->ReplayEnvironment(current_block_->last_environment());
-
- // There are no real uses of a captured object.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoChange(HChange* instr) {
- Representation from = instr->from();
- Representation to = instr->to();
- HValue* val = instr->value();
- if (from.IsSmi()) {
- if (to.IsTagged()) {
- LOperand* value = UseRegister(val);
- return DefineSameAsFirst(new(zone()) LDummyUse(value));
- }
- from = Representation::Tagged();
- }
- if (from.IsTagged()) {
- if (to.IsDouble()) {
- LOperand* value = UseRegister(val);
- LOperand* temp = TempRegister();
- LInstruction* result =
- DefineAsRegister(new(zone()) LNumberUntagD(value, temp));
- if (!val->representation().IsSmi()) result = AssignEnvironment(result);
- return result;
- } else if (to.IsSmi()) {
- LOperand* value = UseRegister(val);
- if (val->type().IsSmi()) {
- return DefineSameAsFirst(new(zone()) LDummyUse(value));
- }
- return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
- } else {
- DCHECK(to.IsInteger32());
- if (val->type().IsSmi() || val->representation().IsSmi()) {
- LOperand* value = UseRegisterAtStart(val);
- return DefineAsRegister(new(zone()) LSmiUntag(value, false));
- } else {
- LOperand* value = UseRegister(val);
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = instr->CanTruncateToInt32()
- ? NULL : TempDoubleRegister();
- LInstruction* result =
- DefineAsRegister(new(zone()) LTaggedToI(value, temp1, temp2));
- if (!val->representation().IsSmi()) result = AssignEnvironment(result);
- return result;
- }
- }
- } else if (from.IsDouble()) {
- if (to.IsTagged()) {
- info()->MarkAsDeferredCalling();
- LOperand* value = UseRegister(val);
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
- return AssignPointerMap(DefineAsRegister(result));
- } else {
- DCHECK(to.IsSmi() || to.IsInteger32());
- if (instr->CanTruncateToInt32()) {
- LOperand* value = UseRegister(val);
- return DefineAsRegister(new(zone()) LTruncateDoubleToIntOrSmi(value));
- } else {
- LOperand* value = UseRegister(val);
- LDoubleToIntOrSmi* result = new(zone()) LDoubleToIntOrSmi(value);
- return AssignEnvironment(DefineAsRegister(result));
- }
- }
- } else if (from.IsInteger32()) {
- info()->MarkAsDeferredCalling();
- if (to.IsTagged()) {
- if (val->CheckFlag(HInstruction::kUint32)) {
- LOperand* value = UseRegister(val);
- LNumberTagU* result =
- new(zone()) LNumberTagU(value, TempRegister(), TempRegister());
- return AssignPointerMap(DefineAsRegister(result));
- } else {
- STATIC_ASSERT((kMinInt == Smi::kMinValue) &&
- (kMaxInt == Smi::kMaxValue));
- LOperand* value = UseRegisterAtStart(val);
- return DefineAsRegister(new(zone()) LSmiTag(value));
- }
- } else if (to.IsSmi()) {
- LOperand* value = UseRegisterAtStart(val);
- LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else {
- DCHECK(to.IsDouble());
- if (val->CheckFlag(HInstruction::kUint32)) {
- return DefineAsRegister(
- new(zone()) LUint32ToDouble(UseRegisterAtStart(val)));
- } else {
- return DefineAsRegister(
- new(zone()) LInteger32ToDouble(UseRegisterAtStart(val)));
- }
- }
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckValue(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckArrayBufferNotNeutered(
- HCheckArrayBufferNotNeutered* instr) {
- LOperand* view = UseRegisterAtStart(instr->value());
- LCheckArrayBufferNotNeutered* result =
- new (zone()) LCheckArrayBufferNotNeutered(view);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- LInstruction* result = new(zone()) LCheckInstanceType(value, temp);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
- if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps;
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value, temp));
- if (instr->HasMigrationTarget()) {
- info()->MarkAsDeferredCalling();
- result = AssignPointerMap(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* result = new(zone()) LCheckNonSmi(value);
- if (!instr->value()->type().IsHeapObject()) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
- HValue* value = instr->value();
- Representation input_rep = value->representation();
- LOperand* reg = UseRegister(value);
- if (input_rep.IsDouble()) {
- return DefineAsRegister(new(zone()) LClampDToUint8(reg));
- } else if (input_rep.IsInteger32()) {
- return DefineAsRegister(new(zone()) LClampIToUint8(reg));
- } else {
- DCHECK(input_rep.IsSmiOrTagged());
- return AssignEnvironment(
- DefineAsRegister(new(zone()) LClampTToUint8(reg,
- TempDoubleRegister())));
- }
-}
-
-LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
- HClassOfTestAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return new (zone())
- LClassOfTestAndBranch(value, TempRegister(), TempRegister());
-}
-
-LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
- HCompareNumericAndBranch* instr) {
- Representation r = instr->representation();
- if (r.IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(r));
- DCHECK(instr->right()->representation().Equals(r));
- LOperand* left = UseRegisterOrConstantAtStart(instr->left());
- LOperand* right = UseRegisterOrConstantAtStart(instr->right());
- return new(zone()) LCompareNumericAndBranch(left, right);
- } else {
- DCHECK(r.IsDouble());
- DCHECK(instr->left()->representation().IsDouble());
- DCHECK(instr->right()->representation().IsDouble());
- if (instr->left()->IsConstant() && instr->right()->IsConstant()) {
- LOperand* left = UseConstant(instr->left());
- LOperand* right = UseConstant(instr->right());
- return new(zone()) LCompareNumericAndBranch(left, right);
- }
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- return new(zone()) LCompareNumericAndBranch(left, right);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
- DCHECK(instr->left()->representation().IsTagged());
- DCHECK(instr->right()->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* left = UseFixed(instr->left(), x1);
- LOperand* right = UseFixed(instr->right(), x0);
- LCmpT* result = new(zone()) LCmpT(context, left, right);
- return MarkAsCall(DefineFixed(result, x0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
- HCompareHoleAndBranch* instr) {
- LOperand* value = UseRegister(instr->value());
- if (instr->representation().IsTagged()) {
- return new(zone()) LCmpHoleAndBranchT(value);
- } else {
- LOperand* temp = TempRegister();
- return new(zone()) LCmpHoleAndBranchD(value, temp);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
- HCompareObjectEqAndBranch* instr) {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- return new(zone()) LCmpObjectEqAndBranch(left, right);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- return new(zone()) LCmpMapAndBranch(value, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
- Representation r = instr->representation();
- if (r.IsSmi()) {
- return DefineAsRegister(new(zone()) LConstantS);
- } else if (r.IsInteger32()) {
- return DefineAsRegister(new(zone()) LConstantI);
- } else if (r.IsDouble()) {
- return DefineAsRegister(new(zone()) LConstantD);
- } else if (r.IsExternal()) {
- return DefineAsRegister(new(zone()) LConstantE);
- } else if (r.IsTagged()) {
- return DefineAsRegister(new(zone()) LConstantT);
- } else {
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- if (instr->HasNoUses()) return NULL;
-
- if (info()->IsStub()) {
- return DefineFixed(new(zone()) LContext, cp);
- }
-
- return DefineAsRegister(new(zone()) LContext);
-}
-
-
-LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
- return new(zone()) LDebugBreak();
-}
-
-
-LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
- return AssignEnvironment(new(zone()) LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
- DCHECK(instr->representation().IsInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
- dividend, divisor));
- if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
- (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
- (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
- divisor != 1 && divisor != -1)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
- DCHECK(instr->representation().IsInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LOperand* temp = instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)
- ? NULL : TempRegister();
- LInstruction* result = DefineAsRegister(new(zone()) LDivByConstI(
- dividend, divisor, temp));
- if (divisor == 0 ||
- (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
- !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoDivI(HBinaryOperation* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(instr->right());
- LOperand* temp = instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)
- ? NULL : TempRegister();
- LInstruction* result =
- DefineAsRegister(new(zone()) LDivI(dividend, divisor, temp));
- if (!instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- if (instr->RightIsPowerOf2()) {
- return DoDivByPowerOf2I(instr);
- } else if (instr->right()->IsConstant()) {
- return DoDivByConstI(instr);
- } else {
- return DoDivI(instr);
- }
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else {
- return DoArithmeticT(Token::DIV, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
- return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
- HEnvironment* outer = current_block_->last_environment();
- outer->set_ast_id(instr->ReturnId());
- HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner = outer->CopyForInlining(
- instr->closure(), instr->arguments_count(), instr->function(), undefined,
- instr->inlining_kind(), instr->syntactic_tail_call_mode());
- // Only replay binding of arguments object if it wasn't removed from graph.
- if ((instr->arguments_var() != NULL) &&
- instr->arguments_object()->IsLinked()) {
- inner->Bind(instr->arguments_var(), instr->arguments_object());
- }
- inner->BindContext(instr->closure_context());
- inner->set_entry(instr);
- current_block_->UpdateEnvironment(inner);
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoForceRepresentation(
- HForceRepresentation* instr) {
- // All HForceRepresentation instructions should be eliminated in the
- // representation change phase of Hydrogen.
- UNREACHABLE();
- return NULL;
-}
-
-LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- return new(zone()) LGoto(instr->FirstSuccessor());
-}
-
-LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
- HHasInstanceTypeAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LHasInstanceTypeAndBranch(value, TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoInnerAllocatedObject(
- HInnerAllocatedObject* instr) {
- LOperand* base_object = UseRegisterAtStart(instr->base_object());
- LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
- return DefineAsRegister(
- new(zone()) LInnerAllocatedObject(base_object, offset));
-}
-
-
-LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
- HHasInPrototypeChainAndBranch* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* prototype = UseRegister(instr->prototype());
- LOperand* scratch1 = TempRegister();
- LOperand* scratch2 = TempRegister();
- LHasInPrototypeChainAndBranch* result = new (zone())
- LHasInPrototypeChainAndBranch(object, prototype, scratch1, scratch2);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- // The function is required (by MacroAssembler::InvokeFunction) to be in x1.
- LOperand* function = UseFixed(instr->function(), x1);
- LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
- if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
- result->MarkAsSyntacticTailCall();
- }
- return MarkAsCall(DefineFixed(result, x0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- return new(zone()) LIsStringAndBranch(value, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- return new(zone()) LIsSmiAndBranch(UseRegisterAtStart(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
- HIsUndetectableAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LIsUndetectableAndBranch(value, TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
- LInstruction* pop = NULL;
- HEnvironment* env = current_block_->last_environment();
-
- if (env->entry()->arguments_pushed()) {
- int argument_count = env->arguments_environment()->parameter_count();
- pop = new(zone()) LDrop(argument_count);
- DCHECK(instr->argument_delta() == -argument_count);
- }
-
- HEnvironment* outer =
- current_block_->last_environment()->DiscardInlined(false);
- current_block_->UpdateEnvironment(outer);
-
- return pop;
-}
-
-
-LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- LInstruction* result =
- DefineAsRegister(new(zone()) LLoadContextSlot(context));
- if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
- HLoadFunctionPrototype* instr) {
- LOperand* function = UseRegister(instr->function());
- LOperand* temp = TempRegister();
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LLoadFunctionPrototype(function, temp)));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
- DCHECK(instr->key()->representation().IsSmiOrInteger32());
- ElementsKind elements_kind = instr->elements_kind();
- LOperand* elements = UseRegister(instr->elements());
- LOperand* key = UseRegisterOrConstant(instr->key());
-
- if (!instr->is_fixed_typed_array()) {
- if (instr->representation().IsDouble()) {
- LOperand* temp = (!instr->key()->IsConstant() ||
- instr->RequiresHoleCheck())
- ? TempRegister()
- : NULL;
- LInstruction* result = DefineAsRegister(
- new (zone()) LLoadKeyedFixedDouble(elements, key, temp));
- if (instr->RequiresHoleCheck()) {
- result = AssignEnvironment(result);
- }
- return result;
- } else {
- DCHECK(instr->representation().IsSmiOrTagged() ||
- instr->representation().IsInteger32());
- LOperand* temp = instr->key()->IsConstant() ? NULL : TempRegister();
- LInstruction* result =
- DefineAsRegister(new (zone()) LLoadKeyedFixed(elements, key, temp));
- if (instr->RequiresHoleCheck() ||
- (instr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED &&
- info()->IsStub())) {
- result = AssignEnvironment(result);
- }
- return result;
- }
- } else {
- DCHECK((instr->representation().IsInteger32() &&
- !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
- (instr->representation().IsDouble() &&
- IsDoubleOrFloatElementsKind(instr->elements_kind())));
-
- LOperand* temp = instr->key()->IsConstant() ? NULL : TempRegister();
- LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
- LInstruction* result = DefineAsRegister(new (zone()) LLoadKeyedExternal(
- elements, key, backing_store_owner, temp));
- if (elements_kind == UINT32_ELEMENTS &&
- !instr->CheckFlag(HInstruction::kUint32)) {
- result = AssignEnvironment(result);
- }
- return result;
- }
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new(zone()) LLoadNamedField(object));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
- return DefineAsRegister(new(zone()) LLoadRoot);
-}
-
-
-LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
- DCHECK(instr->representation().IsInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegisterAtStart(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result = DefineAsRegister(new(zone()) LFlooringDivByPowerOf2I(
- dividend, divisor));
- if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
- (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
- DCHECK(instr->representation().IsInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LOperand* temp =
- ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
- (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
- NULL : TempRegister();
- LInstruction* result = DefineAsRegister(
- new(zone()) LFlooringDivByConstI(dividend, divisor, temp));
- if (divisor == 0 ||
- (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(instr->right());
- LOperand* remainder = TempRegister();
- LInstruction* result =
- DefineAsRegister(new(zone()) LFlooringDivI(dividend, divisor, remainder));
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- if (instr->RightIsPowerOf2()) {
- return DoFlooringDivByPowerOf2I(instr);
- } else if (instr->right()->IsConstant()) {
- return DoFlooringDivByConstI(instr);
- } else {
- return DoFlooringDivI(instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
- LOperand* left = NULL;
- LOperand* right = NULL;
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- left = UseRegisterAtStart(instr->BetterLeftOperand());
- right = UseRegisterOrConstantAtStart(instr->BetterRightOperand());
- } else {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->left()->representation().IsDouble());
- DCHECK(instr->right()->representation().IsDouble());
- left = UseRegisterAtStart(instr->left());
- right = UseRegisterAtStart(instr->right());
- }
- return DefineAsRegister(new(zone()) LMathMinMax(left, right));
-}
-
-
-LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
- DCHECK(instr->representation().IsInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegisterAtStart(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
- dividend, divisor));
- if (instr->CheckFlag(HValue::kLeftCanBeNegative) &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
- DCHECK(instr->representation().IsInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LOperand* temp = TempRegister();
- LInstruction* result = DefineAsRegister(new(zone()) LModByConstI(
- dividend, divisor, temp));
- if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoModI(HMod* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(instr->right());
- LInstruction* result = DefineAsRegister(new(zone()) LModI(dividend, divisor));
- if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- if (instr->RightIsPowerOf2()) {
- return DoModByPowerOf2I(instr);
- } else if (instr->right()->IsConstant()) {
- return DoModByConstI(instr);
- } else {
- return DoModI(instr);
- }
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::MOD, instr);
- } else {
- return DoArithmeticT(Token::MOD, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMul(HMul* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
-
- bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
- bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
-
- HValue* least_const = instr->BetterLeftOperand();
- HValue* most_const = instr->BetterRightOperand();
-
- // LMulConstI can handle a subset of constants:
- // With support for overflow detection:
- // -1, 0, 1, 2
- // 2^n, -(2^n)
- // Without support for overflow detection:
- // 2^n + 1, -(2^n - 1)
- if (most_const->IsConstant()) {
- int32_t constant = HConstant::cast(most_const)->Integer32Value();
- bool small_constant = (constant >= -1) && (constant <= 2);
- bool end_range_constant = (constant <= -kMaxInt) || (constant == kMaxInt);
- int32_t constant_abs = Abs(constant);
-
- if (!end_range_constant &&
- (small_constant || (base::bits::IsPowerOfTwo32(constant_abs)) ||
- (!can_overflow && (base::bits::IsPowerOfTwo32(constant_abs + 1) ||
- base::bits::IsPowerOfTwo32(constant_abs - 1))))) {
- LConstantOperand* right = UseConstant(most_const);
- bool need_register =
- base::bits::IsPowerOfTwo32(constant_abs) && !small_constant;
- LOperand* left = need_register ? UseRegister(least_const)
- : UseRegisterAtStart(least_const);
- LInstruction* result =
- DefineAsRegister(new(zone()) LMulConstIS(left, right));
- if ((bailout_on_minus_zero && constant <= 0) ||
- (can_overflow && constant != 1 &&
- base::bits::IsPowerOfTwo32(constant_abs))) {
- result = AssignEnvironment(result);
- }
- return result;
- }
- }
-
- // LMulI/S can handle all cases, but it requires that a register is
- // allocated for the second operand.
- LOperand* left = UseRegisterAtStart(least_const);
- LOperand* right = UseRegisterAtStart(most_const);
- LInstruction* result = instr->representation().IsSmi()
- ? DefineAsRegister(new(zone()) LMulS(left, right))
- : DefineAsRegister(new(zone()) LMulI(left, right));
- if ((bailout_on_minus_zero && least_const != most_const) || can_overflow) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::MUL, instr);
- } else {
- return DoArithmeticT(Token::MUL, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
- DCHECK(argument_count_ == 0);
- allocator_->MarkAsOsrEntry();
- current_block_->last_environment()->set_ast_id(instr->ast_id());
- return AssignEnvironment(new(zone()) LOsrEntry);
-}
-
-
-LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- LParameter* result = new(zone()) LParameter;
- if (instr->kind() == HParameter::STACK_PARAMETER) {
- int spill_index = chunk_->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(result, spill_index);
- } else {
- DCHECK(info()->IsStub());
- CallInterfaceDescriptor descriptor = graph()->descriptor();
- int index = static_cast<int>(instr->index());
- Register reg = descriptor.GetRegisterParameter(index);
- return DefineFixed(result, reg);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoPower(HPower* instr) {
- DCHECK(instr->representation().IsDouble());
- // We call a C function for double power. It can't trigger a GC.
- // We need to use fixed result register for the call.
- Representation exponent_type = instr->right()->representation();
- DCHECK(instr->left()->representation().IsDouble());
- LOperand* left = UseFixedDouble(instr->left(), d0);
- LOperand* right;
- if (exponent_type.IsInteger32()) {
- right = UseFixed(instr->right(), MathPowIntegerDescriptor::exponent());
- } else if (exponent_type.IsDouble()) {
- right = UseFixedDouble(instr->right(), d1);
- } else {
- right = UseFixed(instr->right(), MathPowTaggedDescriptor::exponent());
- }
- LPower* result = new(zone()) LPower(left, right);
- return MarkAsCall(DefineFixedDouble(result, d0),
- instr,
- CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
- int argc = instr->OperandCount();
- AddInstruction(new(zone()) LPreparePushArguments(argc), instr);
-
- LPushArguments* push_args = new(zone()) LPushArguments(zone());
-
- for (int i = 0; i < argc; ++i) {
- if (push_args->ShouldSplitPush()) {
- AddInstruction(push_args, instr);
- push_args = new(zone()) LPushArguments(zone());
- }
- push_args->AddArgument(UseRegister(instr->argument(i)));
- }
-
- return push_args;
-}
-
-
-LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
- LOperand* context = info()->IsStub()
- ? UseFixed(instr->context(), cp)
- : NULL;
- LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
- return new(zone()) LReturn(UseFixed(instr->value(), x0), context,
- parameter_count);
-}
-
-
-LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
- LOperand* string = UseRegisterAtStart(instr->string());
- LOperand* index = UseRegisterOrConstantAtStart(instr->index());
- LOperand* temp = TempRegister();
- LSeqStringGetChar* result =
- new(zone()) LSeqStringGetChar(string, index, temp);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
- LOperand* string = UseRegister(instr->string());
- LOperand* index = FLAG_debug_code
- ? UseRegister(instr->index())
- : UseRegisterOrConstant(instr->index());
- LOperand* value = UseRegister(instr->value());
- LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL;
- LOperand* temp = TempRegister();
- LSeqStringSetChar* result =
- new(zone()) LSeqStringSetChar(context, string, index, value, temp);
- return DefineAsRegister(result);
-}
-
-
-HBitwiseBinaryOperation* LChunkBuilder::CanTransformToShiftedOp(HValue* val,
- HValue** left) {
- if (!val->representation().IsInteger32()) return NULL;
- if (!(val->IsBitwise() || val->IsAdd() || val->IsSub())) return NULL;
-
- HBinaryOperation* hinstr = HBinaryOperation::cast(val);
- HValue* hleft = hinstr->left();
- HValue* hright = hinstr->right();
- DCHECK(hleft->representation().Equals(hinstr->representation()));
- DCHECK(hright->representation().Equals(hinstr->representation()));
-
- if (hleft == hright) return NULL;
-
- if ((hright->IsConstant() &&
- LikelyFitsImmField(hinstr, HConstant::cast(hright)->Integer32Value())) ||
- (hinstr->IsCommutative() && hleft->IsConstant() &&
- LikelyFitsImmField(hinstr, HConstant::cast(hleft)->Integer32Value()))) {
- // The constant operand will likely fit in the immediate field. We are
- // better off with
- // lsl x8, x9, #imm
- // add x0, x8, #imm2
- // than with
- // mov x16, #imm2
- // add x0, x16, x9 LSL #imm
- return NULL;
- }
-
- HBitwiseBinaryOperation* shift = NULL;
- // TODO(aleram): We will miss situations where a shift operation is used by
- // different instructions both as a left and right operands.
- if (hright->IsBitwiseBinaryShift() &&
- HBitwiseBinaryOperation::cast(hright)->right()->IsConstant()) {
- shift = HBitwiseBinaryOperation::cast(hright);
- if (left != NULL) {
- *left = hleft;
- }
- } else if (hinstr->IsCommutative() &&
- hleft->IsBitwiseBinaryShift() &&
- HBitwiseBinaryOperation::cast(hleft)->right()->IsConstant()) {
- shift = HBitwiseBinaryOperation::cast(hleft);
- if (left != NULL) {
- *left = hright;
- }
- } else {
- return NULL;
- }
-
- if ((JSShiftAmountFromHConstant(shift->right()) == 0) && shift->IsShr()) {
- // Shifts right by zero can deoptimize.
- return NULL;
- }
-
- return shift;
-}
-
-
-bool LChunkBuilder::ShiftCanBeOptimizedAway(HBitwiseBinaryOperation* shift) {
- if (!shift->representation().IsInteger32()) {
- return false;
- }
- for (HUseIterator it(shift->uses()); !it.Done(); it.Advance()) {
- if (shift != CanTransformToShiftedOp(it.value())) {
- return false;
- }
- }
- return true;
-}
-
-
-LInstruction* LChunkBuilder::TryDoOpWithShiftedRightOperand(
- HBinaryOperation* instr) {
- HValue* left;
- HBitwiseBinaryOperation* shift = CanTransformToShiftedOp(instr, &left);
-
- if ((shift != NULL) && ShiftCanBeOptimizedAway(shift)) {
- return DoShiftedBinaryOp(instr, left, shift);
- }
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoShiftedBinaryOp(
- HBinaryOperation* hinstr, HValue* hleft, HBitwiseBinaryOperation* hshift) {
- DCHECK(hshift->IsBitwiseBinaryShift());
- DCHECK(!hshift->IsShr() || (JSShiftAmountFromHConstant(hshift->right()) > 0));
-
- LTemplateResultInstruction<1>* res;
- LOperand* left = UseRegisterAtStart(hleft);
- LOperand* right = UseRegisterAtStart(hshift->left());
- LOperand* shift_amount = UseConstant(hshift->right());
- Shift shift_op;
- switch (hshift->opcode()) {
- case HValue::kShl: shift_op = LSL; break;
- case HValue::kShr: shift_op = LSR; break;
- case HValue::kSar: shift_op = ASR; break;
- default: UNREACHABLE(); shift_op = NO_SHIFT;
- }
-
- if (hinstr->IsBitwise()) {
- res = new(zone()) LBitI(left, right, shift_op, shift_amount);
- } else if (hinstr->IsAdd()) {
- res = new(zone()) LAddI(left, right, shift_op, shift_amount);
- } else {
- DCHECK(hinstr->IsSub());
- res = new(zone()) LSubI(left, right, shift_op, shift_amount);
- }
- if (hinstr->CheckFlag(HValue::kCanOverflow)) {
- AssignEnvironment(res);
- }
- return DefineAsRegister(res);
-}
-
-
-LInstruction* LChunkBuilder::DoShift(Token::Value op,
- HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsTagged()) {
- return DoArithmeticT(op, instr);
- }
-
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
-
- if (ShiftCanBeOptimizedAway(instr)) {
- return NULL;
- }
-
- LOperand* left = instr->representation().IsSmi()
- ? UseRegister(instr->left())
- : UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterOrConstantAtStart(instr->right());
-
- // The only shift that can deoptimize is `left >>> 0`, where left is negative.
- // In these cases, the result is a uint32 that is too large for an int32.
- bool right_can_be_zero = !instr->right()->IsConstant() ||
- (JSShiftAmountFromHConstant(instr->right()) == 0);
- bool can_deopt = false;
- if ((op == Token::SHR) && right_can_be_zero) {
- can_deopt = !instr->CheckFlag(HInstruction::kUint32);
- }
-
- LInstruction* result;
- if (instr->representation().IsInteger32()) {
- result = DefineAsRegister(new (zone()) LShiftI(op, left, right, can_deopt));
- } else {
- DCHECK(instr->representation().IsSmi());
- result = DefineAsRegister(new (zone()) LShiftS(op, left, right, can_deopt));
- }
-
- return can_deopt ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoRor(HRor* instr) {
- return DoShift(Token::ROR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoSar(HSar* instr) {
- return DoShift(Token::SAR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShl(HShl* instr) {
- return DoShift(Token::SHL, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShr(HShr* instr) {
- return DoShift(Token::SHR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
- instr->ReplayEnvironment(current_block_->last_environment());
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
- if (instr->is_function_entry()) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(new(zone()) LStackCheck(context), instr);
- } else {
- DCHECK(instr->is_backwards_branch());
- LOperand* context = UseAny(instr->context());
- return AssignEnvironment(
- AssignPointerMap(new(zone()) LStackCheck(context)));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoStoreCodeEntry(HStoreCodeEntry* instr) {
- LOperand* function = UseRegister(instr->function());
- LOperand* code_object = UseRegisterAtStart(instr->code_object());
- LOperand* temp = TempRegister();
- return new(zone()) LStoreCodeEntry(function, code_object, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
- LOperand* temp = TempRegister();
- LOperand* context;
- LOperand* value;
- if (instr->NeedsWriteBarrier()) {
- // TODO(all): Replace these constraints when RecordWriteStub has been
- // rewritten.
- context = UseRegisterAndClobber(instr->context());
- value = UseRegisterAndClobber(instr->value());
- } else {
- context = UseRegister(instr->context());
- value = UseRegister(instr->value());
- }
- LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
- if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- LOperand* key = UseRegisterOrConstant(instr->key());
- LOperand* temp = NULL;
- LOperand* elements = NULL;
- LOperand* val = NULL;
-
- if (!instr->is_fixed_typed_array() &&
- instr->value()->representation().IsTagged() &&
- instr->NeedsWriteBarrier()) {
- // RecordWrite() will clobber all registers.
- elements = UseRegisterAndClobber(instr->elements());
- val = UseRegisterAndClobber(instr->value());
- temp = TempRegister();
- } else {
- elements = UseRegister(instr->elements());
- val = UseRegister(instr->value());
- temp = instr->key()->IsConstant() ? NULL : TempRegister();
- }
-
- if (instr->is_fixed_typed_array()) {
- DCHECK((instr->value()->representation().IsInteger32() &&
- !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
- (instr->value()->representation().IsDouble() &&
- IsDoubleOrFloatElementsKind(instr->elements_kind())));
- DCHECK(instr->elements()->representation().IsExternal());
- LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
- return new (zone())
- LStoreKeyedExternal(elements, key, val, backing_store_owner, temp);
-
- } else if (instr->value()->representation().IsDouble()) {
- DCHECK(instr->elements()->representation().IsTagged());
- return new(zone()) LStoreKeyedFixedDouble(elements, key, val, temp);
-
- } else {
- DCHECK(instr->elements()->representation().IsTagged());
- DCHECK(instr->value()->representation().IsSmiOrTagged() ||
- instr->value()->representation().IsInteger32());
- return new(zone()) LStoreKeyedFixed(elements, key, val, temp);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
- // TODO(jbramley): It might be beneficial to allow value to be a constant in
- // some cases. x64 makes use of this with FLAG_track_fields, for example.
-
- LOperand* object = UseRegister(instr->object());
- LOperand* value;
- LOperand* temp0 = NULL;
- LOperand* temp1 = NULL;
-
- if (instr->access().IsExternalMemory() ||
- (!FLAG_unbox_double_fields && instr->field_representation().IsDouble())) {
- value = UseRegister(instr->value());
- } else if (instr->NeedsWriteBarrier()) {
- value = UseRegisterAndClobber(instr->value());
- temp0 = TempRegister();
- temp1 = TempRegister();
- } else if (instr->NeedsWriteBarrierForMap()) {
- value = UseRegister(instr->value());
- temp0 = TempRegister();
- temp1 = TempRegister();
- } else {
- value = UseRegister(instr->value());
- temp0 = TempRegister();
- }
-
- return new(zone()) LStoreNamedField(object, value, temp0, temp1);
-}
-
-
-LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* left = UseFixed(instr->left(), x1);
- LOperand* right = UseFixed(instr->right(), x0);
-
- LStringAdd* result = new(zone()) LStringAdd(context, left, right);
- return MarkAsCall(DefineFixed(result, x0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
- LOperand* string = UseRegisterAndClobber(instr->string());
- LOperand* index = UseRegisterAndClobber(instr->index());
- LOperand* context = UseAny(instr->context());
- LStringCharCodeAt* result =
- new(zone()) LStringCharCodeAt(context, string, index);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
- LOperand* char_code = UseRegister(instr->value());
- LOperand* context = UseAny(instr->context());
- LStringCharFromCode* result =
- new(zone()) LStringCharFromCode(context, char_code);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoStringCompareAndBranch(
- HStringCompareAndBranch* instr) {
- DCHECK(instr->left()->representation().IsTagged());
- DCHECK(instr->right()->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* left = UseFixed(instr->left(), x1);
- LOperand* right = UseFixed(instr->right(), x0);
- LStringCompareAndBranch* result =
- new(zone()) LStringCompareAndBranch(context, left, right);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoSub(HSub* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
-
- LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr);
- if (shifted_operation != NULL) {
- return shifted_operation;
- }
-
- LOperand *left;
- if (instr->left()->IsConstant() &&
- (HConstant::cast(instr->left())->Integer32Value() == 0)) {
- left = UseConstant(instr->left());
- } else {
- left = UseRegisterAtStart(instr->left());
- }
- LOperand* right = UseRegisterOrConstantAtStart(instr->right());
- LInstruction* result = instr->representation().IsSmi() ?
- DefineAsRegister(new(zone()) LSubS(left, right)) :
- DefineAsRegister(new(zone()) LSubI(left, right));
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::SUB, instr);
- } else {
- return DoArithmeticT(Token::SUB, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
- if (instr->HasNoUses()) {
- return NULL;
- } else {
- return DefineAsRegister(new(zone()) LThisFunction);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoTransitionElementsKind(
- HTransitionElementsKind* instr) {
- if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
- LOperand* object = UseRegister(instr->object());
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, NULL,
- TempRegister(), TempRegister());
- return result;
- } else {
- LOperand* object = UseFixed(instr->object(), x0);
- LOperand* context = UseFixed(instr->context(), cp);
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, context, NULL, NULL);
- return MarkAsCall(result, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoTrapAllocationMemento(
- HTrapAllocationMemento* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LTrapAllocationMemento* result =
- new(zone()) LTrapAllocationMemento(object, temp1, temp2);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) {
- info()->MarkAsDeferredCalling();
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object = UseRegister(instr->object());
- LOperand* elements = UseRegister(instr->elements());
- LOperand* key = UseRegisterOrConstant(instr->key());
- LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity());
-
- LMaybeGrowElements* result = new (zone())
- LMaybeGrowElements(context, object, elements, key, current_capacity);
- DefineFixed(result, x0);
- return AssignPointerMap(AssignEnvironment(result));
-}
-
-
-LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* value = UseFixed(instr->value(), x3);
- LTypeof* result = new (zone()) LTypeof(context, value);
- return MarkAsCall(DefineFixed(result, x0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
- // We only need temp registers in some cases, but we can't dereference the
- // instr->type_literal() handle to test that here.
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
-
- return new(zone()) LTypeofIsAndBranch(
- UseRegister(instr->value()), temp1, temp2);
-}
-
-
-LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
- switch (instr->op()) {
- case kMathAbs: {
- Representation r = instr->representation();
- if (r.IsTagged()) {
- // The tagged case might need to allocate a HeapNumber for the result,
- // so it is handled by a separate LInstruction.
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* input = UseRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LOperand* temp3 = TempRegister();
- LInstruction* result = DefineAsRegister(
- new(zone()) LMathAbsTagged(context, input, temp1, temp2, temp3));
- return AssignEnvironment(AssignPointerMap(result));
- } else {
- LOperand* input = UseRegisterAtStart(instr->value());
- LInstruction* result = DefineAsRegister(new(zone()) LMathAbs(input));
- if (!r.IsDouble()) result = AssignEnvironment(result);
- return result;
- }
- }
- case kMathCos: {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseFixedDouble(instr->value(), d0);
- LMathCos* result = new (zone()) LMathCos(input);
- return MarkAsCall(DefineFixedDouble(result, d0), instr);
- }
- case kMathSin: {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseFixedDouble(instr->value(), d0);
- LMathSin* result = new (zone()) LMathSin(input);
- return MarkAsCall(DefineFixedDouble(result, d0), instr);
- }
- case kMathExp: {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseFixedDouble(instr->value(), d0);
- LMathExp* result = new (zone()) LMathExp(input);
- return MarkAsCall(DefineFixedDouble(result, d0), instr);
- }
- case kMathFloor: {
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseRegisterAtStart(instr->value());
- if (instr->representation().IsInteger32()) {
- LMathFloorI* result = new(zone()) LMathFloorI(input);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- } else {
- DCHECK(instr->representation().IsDouble());
- LMathFloorD* result = new(zone()) LMathFloorD(input);
- return DefineAsRegister(result);
- }
- }
- case kMathLog: {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseFixedDouble(instr->value(), d0);
- LMathLog* result = new(zone()) LMathLog(input);
- return MarkAsCall(DefineFixedDouble(result, d0), instr);
- }
- case kMathPowHalf: {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseRegister(instr->value());
- return DefineAsRegister(new(zone()) LMathPowHalf(input));
- }
- case kMathRound: {
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseRegister(instr->value());
- if (instr->representation().IsInteger32()) {
- LOperand* temp = TempDoubleRegister();
- LMathRoundI* result = new(zone()) LMathRoundI(input, temp);
- return AssignEnvironment(DefineAsRegister(result));
- } else {
- DCHECK(instr->representation().IsDouble());
- LMathRoundD* result = new(zone()) LMathRoundD(input);
- return DefineAsRegister(result);
- }
- }
- case kMathFround: {
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseRegister(instr->value());
- LMathFround* result = new (zone()) LMathFround(input);
- return DefineAsRegister(result);
- }
- case kMathSqrt: {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LMathSqrt(input));
- }
- case kMathClz32: {
- DCHECK(instr->representation().IsInteger32());
- DCHECK(instr->value()->representation().IsInteger32());
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LMathClz32(input));
- }
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- // Use an index that corresponds to the location in the unoptimized frame,
- // which the optimized frame will subsume.
- int env_index = instr->index();
- int spill_index = 0;
- if (instr->environment()->is_parameter_index(env_index)) {
- spill_index = chunk_->GetParameterStackSlot(env_index);
- } else {
- spill_index = env_index - instr->environment()->first_local_index();
- if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
- Retry(kTooManySpillSlotsNeededForOSR);
- spill_index = 0;
- }
- spill_index += StandardFrameConstants::kFixedSlotCount;
- }
- return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
-}
-
-
-LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- // Assign object to a fixed register different from those already used in
- // LForInPrepareMap.
- LOperand* object = UseFixed(instr->enumerable(), x0);
- LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
- return MarkAsCall(DefineFixed(result, x0), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
- LOperand* map = UseRegister(instr->map());
- return AssignEnvironment(DefineAsRegister(new(zone()) LForInCacheArray(map)));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* map = UseRegister(instr->map());
- LOperand* temp = TempRegister();
- return AssignEnvironment(new(zone()) LCheckMapValue(value, map, temp));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- LOperand* index = UseRegisterAndClobber(instr->index());
- LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
- LInstruction* result = DefineSameAsFirst(load);
- return AssignPointerMap(result);
-}
-
-
-LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
- LOperand* receiver = UseRegister(instr->receiver());
- LOperand* function = UseRegister(instr->function());
- LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
- return AssignEnvironment(DefineAsRegister(result));
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/arm64/lithium-arm64.h b/deps/v8/src/crankshaft/arm64/lithium-arm64.h
deleted file mode 100644
index 026f65cb97..0000000000
--- a/deps/v8/src/crankshaft/arm64/lithium-arm64.h
+++ /dev/null
@@ -1,2849 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_ARM64_LITHIUM_ARM64_H_
-#define V8_CRANKSHAFT_ARM64_LITHIUM_ARM64_H_
-
-#include "src/crankshaft/hydrogen.h"
-#include "src/crankshaft/lithium.h"
-#include "src/crankshaft/lithium-allocator.h"
-#include "src/safepoint-table.h"
-#include "src/utils.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LCodeGen;
-
-#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
- V(AccessArgumentsAt) \
- V(AddE) \
- V(AddI) \
- V(AddS) \
- V(Allocate) \
- V(ApplyArguments) \
- V(ArgumentsElements) \
- V(ArgumentsLength) \
- V(ArithmeticD) \
- V(ArithmeticT) \
- V(BitI) \
- V(BitS) \
- V(BoundsCheck) \
- V(Branch) \
- V(CallNewArray) \
- V(CallRuntime) \
- V(CallWithDescriptor) \
- V(CheckArrayBufferNotNeutered) \
- V(CheckInstanceType) \
- V(CheckMapValue) \
- V(CheckMaps) \
- V(CheckNonSmi) \
- V(CheckSmi) \
- V(CheckValue) \
- V(ClampDToUint8) \
- V(ClampIToUint8) \
- V(ClampTToUint8) \
- V(ClassOfTestAndBranch) \
- V(CmpHoleAndBranchD) \
- V(CmpHoleAndBranchT) \
- V(CmpMapAndBranch) \
- V(CmpObjectEqAndBranch) \
- V(CmpT) \
- V(CompareNumericAndBranch) \
- V(ConstantD) \
- V(ConstantE) \
- V(ConstantI) \
- V(ConstantS) \
- V(ConstantT) \
- V(Context) \
- V(DebugBreak) \
- V(DeclareGlobals) \
- V(Deoptimize) \
- V(DivByConstI) \
- V(DivByPowerOf2I) \
- V(DivI) \
- V(DoubleToIntOrSmi) \
- V(Drop) \
- V(Dummy) \
- V(DummyUse) \
- V(FastAllocate) \
- V(FlooringDivByConstI) \
- V(FlooringDivByPowerOf2I) \
- V(FlooringDivI) \
- V(ForInCacheArray) \
- V(ForInPrepareMap) \
- V(Goto) \
- V(HasInPrototypeChainAndBranch) \
- V(HasInstanceTypeAndBranch) \
- V(InnerAllocatedObject) \
- V(InstructionGap) \
- V(Integer32ToDouble) \
- V(InvokeFunction) \
- V(IsSmiAndBranch) \
- V(IsStringAndBranch) \
- V(IsUndetectableAndBranch) \
- V(Label) \
- V(LazyBailout) \
- V(LoadContextSlot) \
- V(LoadFieldByIndex) \
- V(LoadFunctionPrototype) \
- V(LoadKeyedExternal) \
- V(LoadKeyedFixed) \
- V(LoadKeyedFixedDouble) \
- V(LoadNamedField) \
- V(LoadRoot) \
- V(MathAbs) \
- V(MathAbsTagged) \
- V(MathClz32) \
- V(MathCos) \
- V(MathSin) \
- V(MathExp) \
- V(MathFloorD) \
- V(MathFloorI) \
- V(MathFround) \
- V(MathLog) \
- V(MathMinMax) \
- V(MathPowHalf) \
- V(MathRoundD) \
- V(MathRoundI) \
- V(MathSqrt) \
- V(MaybeGrowElements) \
- V(ModByConstI) \
- V(ModByPowerOf2I) \
- V(ModI) \
- V(MulConstIS) \
- V(MulI) \
- V(MulS) \
- V(NumberTagD) \
- V(NumberTagU) \
- V(NumberUntagD) \
- V(OsrEntry) \
- V(Parameter) \
- V(Power) \
- V(Prologue) \
- V(PreparePushArguments) \
- V(PushArguments) \
- V(Return) \
- V(SeqStringGetChar) \
- V(SeqStringSetChar) \
- V(ShiftI) \
- V(ShiftS) \
- V(SmiTag) \
- V(SmiUntag) \
- V(StackCheck) \
- V(StoreCodeEntry) \
- V(StoreContextSlot) \
- V(StoreKeyedExternal) \
- V(StoreKeyedFixed) \
- V(StoreKeyedFixedDouble) \
- V(StoreNamedField) \
- V(StringAdd) \
- V(StringCharCodeAt) \
- V(StringCharFromCode) \
- V(StringCompareAndBranch) \
- V(SubI) \
- V(SubS) \
- V(TaggedToI) \
- V(ThisFunction) \
- V(TransitionElementsKind) \
- V(TrapAllocationMemento) \
- V(TruncateDoubleToIntOrSmi) \
- V(Typeof) \
- V(TypeofIsAndBranch) \
- V(Uint32ToDouble) \
- V(UnknownOSRValue) \
- V(WrapReceiver)
-
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- Opcode opcode() const final { return LInstruction::k##type; } \
- void CompileToNative(LCodeGen* generator) final; \
- const char* Mnemonic() const final { return mnemonic; } \
- static L##type* cast(LInstruction* instr) { \
- DCHECK(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
- }
-
-
-#define DECLARE_HYDROGEN_ACCESSOR(type) \
- H##type* hydrogen() const { \
- return H##type::cast(this->hydrogen_value()); \
- }
-
-
-class LInstruction : public ZoneObject {
- public:
- LInstruction()
- : environment_(NULL),
- hydrogen_value_(NULL),
- bit_field_(IsCallBits::encode(false)) { }
-
- virtual ~LInstruction() { }
-
- virtual void CompileToNative(LCodeGen* generator) = 0;
- virtual const char* Mnemonic() const = 0;
- virtual void PrintTo(StringStream* stream);
- virtual void PrintDataTo(StringStream* stream);
- virtual void PrintOutputOperandTo(StringStream* stream);
-
- enum Opcode {
- // Declare a unique enum value for each instruction.
-#define DECLARE_OPCODE(type) k##type,
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
- kNumberOfInstructions
-#undef DECLARE_OPCODE
- };
-
- virtual Opcode opcode() const = 0;
-
- // Declare non-virtual type testers for all leaf IR classes.
-#define DECLARE_PREDICATE(type) \
- bool Is##type() const { return opcode() == k##type; }
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
-#undef DECLARE_PREDICATE
-
- // Declare virtual predicates for instructions that don't have
- // an opcode.
- virtual bool IsGap() const { return false; }
-
- virtual bool IsControl() const { return false; }
-
- // Try deleting this instruction if possible.
- virtual bool TryDelete() { return false; }
-
- void set_environment(LEnvironment* env) { environment_ = env; }
- LEnvironment* environment() const { return environment_; }
- bool HasEnvironment() const { return environment_ != NULL; }
-
- void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
- LPointerMap* pointer_map() const { return pointer_map_.get(); }
- bool HasPointerMap() const { return pointer_map_.is_set(); }
-
- void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
- HValue* hydrogen_value() const { return hydrogen_value_; }
-
- void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
- bool IsCall() const { return IsCallBits::decode(bit_field_); }
-
- void MarkAsSyntacticTailCall() {
- bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
- }
- bool IsSyntacticTailCall() const {
- return IsSyntacticTailCallBits::decode(bit_field_);
- }
-
- // Interface to the register allocator and iterators.
- bool ClobbersTemps() const { return IsCall(); }
- bool ClobbersRegisters() const { return IsCall(); }
- virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
- return IsCall();
- }
- bool IsMarkedAsCall() const { return IsCall(); }
-
- virtual bool HasResult() const = 0;
- virtual LOperand* result() const = 0;
-
- virtual int InputCount() = 0;
- virtual LOperand* InputAt(int i) = 0;
- virtual int TempCount() = 0;
- virtual LOperand* TempAt(int i) = 0;
-
- LOperand* FirstInput() { return InputAt(0); }
- LOperand* Output() { return HasResult() ? result() : NULL; }
-
- virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
-
-#ifdef DEBUG
- void VerifyCall();
-#endif
-
- private:
- class IsCallBits: public BitField<bool, 0, 1> {};
- class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
- };
-
- LEnvironment* environment_;
- SetOncePointer<LPointerMap> pointer_map_;
- HValue* hydrogen_value_;
- int32_t bit_field_;
-};
-
-
-// R = number of result operands (0 or 1).
-template<int R>
-class LTemplateResultInstruction : public LInstruction {
- public:
- // Allow 0 or 1 output operands.
- STATIC_ASSERT(R == 0 || R == 1);
- bool HasResult() const final { return (R != 0) && (result() != NULL); }
- void set_result(LOperand* operand) { results_[0] = operand; }
- LOperand* result() const override { return results_[0]; }
-
- protected:
- EmbeddedContainer<LOperand*, R> results_;
-};
-
-
-// R = number of result operands (0 or 1).
-// I = number of input operands.
-// T = number of temporary operands.
-template<int R, int I, int T>
-class LTemplateInstruction : public LTemplateResultInstruction<R> {
- protected:
- EmbeddedContainer<LOperand*, I> inputs_;
- EmbeddedContainer<LOperand*, T> temps_;
-
- private:
- // Iterator support.
- int InputCount() final { return I; }
- LOperand* InputAt(int i) final { return inputs_[i]; }
-
- int TempCount() final { return T; }
- LOperand* TempAt(int i) final { return temps_[i]; }
-};
-
-
-class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
- public:
- bool HasInterestingComment(LCodeGen* gen) const override { return false; }
- DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
-};
-
-
-template<int I, int T>
-class LControlInstruction : public LTemplateInstruction<0, I, T> {
- public:
- LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
-
- bool IsControl() const final { return true; }
-
- int SuccessorCount() { return hydrogen()->SuccessorCount(); }
- HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
-
- int TrueDestination(LChunk* chunk) {
- return chunk->LookupDestination(true_block_id());
- }
-
- int FalseDestination(LChunk* chunk) {
- return chunk->LookupDestination(false_block_id());
- }
-
- Label* TrueLabel(LChunk* chunk) {
- if (true_label_ == NULL) {
- true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
- }
- return true_label_;
- }
-
- Label* FalseLabel(LChunk* chunk) {
- if (false_label_ == NULL) {
- false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
- }
- return false_label_;
- }
-
- protected:
- int true_block_id() { return SuccessorAt(0)->block_id(); }
- int false_block_id() { return SuccessorAt(1)->block_id(); }
-
- private:
- DECLARE_HYDROGEN_ACCESSOR(ControlInstruction);
-
- Label* false_label_;
- Label* true_label_;
-};
-
-
-class LGap : public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGap(HBasicBlock* block)
- : block_(block) {
- parallel_moves_[BEFORE] = NULL;
- parallel_moves_[START] = NULL;
- parallel_moves_[END] = NULL;
- parallel_moves_[AFTER] = NULL;
- }
-
- // Can't use the DECLARE-macro here because of sub-classes.
- bool IsGap() const override { return true; }
- void PrintDataTo(StringStream* stream) override;
- static LGap* cast(LInstruction* instr) {
- DCHECK(instr->IsGap());
- return reinterpret_cast<LGap*>(instr);
- }
-
- bool IsRedundant() const;
-
- HBasicBlock* block() const { return block_; }
-
- enum InnerPosition {
- BEFORE,
- START,
- END,
- AFTER,
- FIRST_INNER_POSITION = BEFORE,
- LAST_INNER_POSITION = AFTER
- };
-
- LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
- if (parallel_moves_[pos] == NULL) {
- parallel_moves_[pos] = new(zone) LParallelMove(zone);
- }
- return parallel_moves_[pos];
- }
-
- LParallelMove* GetParallelMove(InnerPosition pos) {
- return parallel_moves_[pos];
- }
-
- private:
- LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
- HBasicBlock* block_;
-};
-
-
-class LInstructionGap final : public LGap {
- public:
- explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
-
- bool HasInterestingComment(LCodeGen* gen) const override {
- return !IsRedundant();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
-};
-
-
-class LDrop final : public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LDrop(int count) : count_(count) { }
-
- int count() const { return count_; }
-
- DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
-
- private:
- int count_;
-};
-
-
-class LDummy final : public LTemplateInstruction<1, 0, 0> {
- public:
- LDummy() {}
- DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
-};
-
-
-class LDummyUse final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDummyUse(LOperand* value) {
- inputs_[0] = value;
- }
- DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
-};
-
-
-class LGoto final : public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGoto(HBasicBlock* block) : block_(block) { }
-
- bool HasInterestingComment(LCodeGen* gen) const override;
- DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- void PrintDataTo(StringStream* stream) override;
- bool IsControl() const override { return true; }
-
- int block_id() const { return block_->block_id(); }
-
- private:
- HBasicBlock* block_;
-};
-
-
-class LPrologue final : public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue")
-};
-
-
-class LLazyBailout final : public LTemplateInstruction<0, 0, 0> {
- public:
- LLazyBailout() : gap_instructions_size_(0) { }
-
- DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
-
- void set_gap_instructions_size(int gap_instructions_size) {
- gap_instructions_size_ = gap_instructions_size;
- }
- int gap_instructions_size() { return gap_instructions_size_; }
-
- private:
- int gap_instructions_size_;
-};
-
-
-class LLabel final : public LGap {
- public:
- explicit LLabel(HBasicBlock* block)
- : LGap(block), replacement_(NULL) { }
-
- bool HasInterestingComment(LCodeGen* gen) const override { return false; }
- DECLARE_CONCRETE_INSTRUCTION(Label, "label")
-
- void PrintDataTo(StringStream* stream) override;
-
- int block_id() const { return block()->block_id(); }
- bool is_loop_header() const { return block()->IsLoopHeader(); }
- bool is_osr_entry() const { return block()->is_osr_entry(); }
- Label* label() { return &label_; }
- LLabel* replacement() const { return replacement_; }
- void set_replacement(LLabel* label) { replacement_ = label; }
- bool HasReplacement() const { return replacement_ != NULL; }
-
- private:
- Label label_;
- LLabel* replacement_;
-};
-
-
-class LOsrEntry final : public LTemplateInstruction<0, 0, 0> {
- public:
- LOsrEntry() {}
-
- bool HasInterestingComment(LCodeGen* gen) const override { return false; }
- DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
-};
-
-
-class LAccessArgumentsAt final : public LTemplateInstruction<1, 3, 0> {
- public:
- LAccessArgumentsAt(LOperand* arguments,
- LOperand* length,
- LOperand* index) {
- inputs_[0] = arguments;
- inputs_[1] = length;
- inputs_[2] = index;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
-
- LOperand* arguments() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LAddE final : public LTemplateInstruction<1, 2, 0> {
- public:
- LAddE(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AddE, "add-e")
- DECLARE_HYDROGEN_ACCESSOR(Add)
-};
-
-
-class LAddI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LAddI(LOperand* left, LOperand* right)
- : shift_(NO_SHIFT), shift_amount_(0) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LAddI(LOperand* left, LOperand* right, Shift shift, LOperand* shift_amount)
- : shift_(shift), shift_amount_(shift_amount) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- Shift shift() const { return shift_; }
- LOperand* shift_amount() const { return shift_amount_; }
-
- DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
- DECLARE_HYDROGEN_ACCESSOR(Add)
-
- protected:
- Shift shift_;
- LOperand* shift_amount_;
-};
-
-
-class LAddS final : public LTemplateInstruction<1, 2, 0> {
- public:
- LAddS(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AddS, "add-s")
- DECLARE_HYDROGEN_ACCESSOR(Add)
-};
-
-
-class LAllocate final : public LTemplateInstruction<1, 2, 3> {
- public:
- LAllocate(LOperand* context,
- LOperand* size,
- LOperand* temp1,
- LOperand* temp2,
- LOperand* temp3) {
- inputs_[0] = context;
- inputs_[1] = size;
- temps_[0] = temp1;
- temps_[1] = temp2;
- temps_[2] = temp3;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* size() { return inputs_[1]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
- LOperand* temp3() { return temps_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
- DECLARE_HYDROGEN_ACCESSOR(Allocate)
-};
-
-class LFastAllocate final : public LTemplateInstruction<1, 1, 2> {
- public:
- LFastAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = size;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* size() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
- DECLARE_HYDROGEN_ACCESSOR(Allocate)
-};
-
-class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
- public:
- LApplyArguments(LOperand* function,
- LOperand* receiver,
- LOperand* length,
- LOperand* elements) {
- inputs_[0] = function;
- inputs_[1] = receiver;
- inputs_[2] = length;
- inputs_[3] = elements;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
- DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
-
- LOperand* function() { return inputs_[0]; }
- LOperand* receiver() { return inputs_[1]; }
- LOperand* length() { return inputs_[2]; }
- LOperand* elements() { return inputs_[3]; }
-};
-
-
-class LArgumentsElements final : public LTemplateInstruction<1, 0, 1> {
- public:
- explicit LArgumentsElements(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
- DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
-};
-
-
-class LArgumentsLength final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LArgumentsLength(LOperand* elements) {
- inputs_[0] = elements;
- }
-
- LOperand* elements() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
-};
-
-
-class LArithmeticD final : public LTemplateInstruction<1, 2, 0> {
- public:
- LArithmeticD(Token::Value op,
- LOperand* left,
- LOperand* right)
- : op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- Opcode opcode() const override { return LInstruction::kArithmeticD; }
- void CompileToNative(LCodeGen* generator) override;
- const char* Mnemonic() const override;
-
- private:
- Token::Value op_;
-};
-
-
-class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
- public:
- LArithmeticT(Token::Value op,
- LOperand* context,
- LOperand* left,
- LOperand* right)
- : op_(op) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
- Token::Value op() const { return op_; }
-
- Opcode opcode() const override { return LInstruction::kArithmeticT; }
- void CompileToNative(LCodeGen* generator) override;
- const char* Mnemonic() const override;
-
- DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
-
- private:
- Token::Value op_;
-};
-
-
-class LBoundsCheck final : public LTemplateInstruction<0, 2, 0> {
- public:
- explicit LBoundsCheck(LOperand* index, LOperand* length) {
- inputs_[0] = index;
- inputs_[1] = length;
- }
-
- LOperand* index() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
- DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
-};
-
-
-class LBitI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LBitI(LOperand* left, LOperand* right)
- : shift_(NO_SHIFT), shift_amount_(0) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LBitI(LOperand* left, LOperand* right, Shift shift, LOperand* shift_amount)
- : shift_(shift), shift_amount_(shift_amount) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- Shift shift() const { return shift_; }
- LOperand* shift_amount() const { return shift_amount_; }
-
- Token::Value op() const { return hydrogen()->op(); }
-
- DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
- DECLARE_HYDROGEN_ACCESSOR(Bitwise)
-
- protected:
- Shift shift_;
- LOperand* shift_amount_;
-};
-
-
-class LBitS final : public LTemplateInstruction<1, 2, 0> {
- public:
- LBitS(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- Token::Value op() const { return hydrogen()->op(); }
-
- DECLARE_CONCRETE_INSTRUCTION(BitS, "bit-s")
- DECLARE_HYDROGEN_ACCESSOR(Bitwise)
-};
-
-
-class LBranch final : public LControlInstruction<1, 2> {
- public:
- explicit LBranch(LOperand* value, LOperand *temp1, LOperand *temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
- DECLARE_HYDROGEN_ACCESSOR(Branch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallNewArray(LOperand* context, LOperand* constructor) {
- inputs_[0] = context;
- inputs_[1] = constructor;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* constructor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
- DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallRuntime final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallRuntime(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
- DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
-
- bool ClobbersDoubleRegisters(Isolate* isolate) const override {
- return save_doubles() == kDontSaveFPRegs;
- }
-
- const Runtime::Function* function() const { return hydrogen()->function(); }
- int arity() const { return hydrogen()->argument_count(); }
- SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
-};
-
-
-class LCheckArrayBufferNotNeutered final
- : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckArrayBufferNotNeutered(LOperand* view) { inputs_[0] = view; }
-
- LOperand* view() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckArrayBufferNotNeutered,
- "check-array-buffer-not-neutered")
- DECLARE_HYDROGEN_ACCESSOR(CheckArrayBufferNotNeutered)
-};
-
-
-class LCheckInstanceType final : public LTemplateInstruction<0, 1, 1> {
- public:
- explicit LCheckInstanceType(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
- DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
-};
-
-
-class LCheckMaps final : public LTemplateInstruction<0, 1, 1> {
- public:
- explicit LCheckMaps(LOperand* value = NULL, LOperand* temp = NULL) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
-};
-
-
-class LCheckNonSmi final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckNonSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
- DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
-};
-
-
-class LCheckSmi final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCheckSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
-};
-
-
-class LCheckValue final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckValue(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
- DECLARE_HYDROGEN_ACCESSOR(CheckValue)
-};
-
-
-class LClampDToUint8 final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LClampDToUint8(LOperand* unclamped) {
- inputs_[0] = unclamped;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
-};
-
-
-class LClampIToUint8 final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LClampIToUint8(LOperand* unclamped) {
- inputs_[0] = unclamped;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
-};
-
-
-class LClampTToUint8 final : public LTemplateInstruction<1, 1, 1> {
- public:
- LClampTToUint8(LOperand* unclamped, LOperand* temp1) {
- inputs_[0] = unclamped;
- temps_[0] = temp1;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
-};
-
-class LClassOfTestAndBranch final : public LControlInstruction<1, 2> {
- public:
- LClassOfTestAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch, "class-of-test-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-class LCmpHoleAndBranchD final : public LControlInstruction<1, 1> {
- public:
- explicit LCmpHoleAndBranchD(LOperand* object, LOperand* temp) {
- inputs_[0] = object;
- temps_[0] = temp;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranchD, "cmp-hole-and-branch-d")
- DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
-};
-
-
-class LCmpHoleAndBranchT final : public LControlInstruction<1, 0> {
- public:
- explicit LCmpHoleAndBranchT(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranchT, "cmp-hole-and-branch-t")
- DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
-};
-
-
-class LCmpMapAndBranch final : public LControlInstruction<1, 1> {
- public:
- LCmpMapAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMap)
-
- Handle<Map> map() const { return hydrogen()->map().handle(); }
-};
-
-
-class LCmpObjectEqAndBranch final : public LControlInstruction<2, 0> {
- public:
- LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
-};
-
-
-class LCmpT final : public LTemplateInstruction<1, 3, 0> {
- public:
- LCmpT(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
- DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
-
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LCompareNumericAndBranch final : public LControlInstruction<2, 0> {
- public:
- LCompareNumericAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
- "compare-numeric-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
-
- Token::Value op() const { return hydrogen()->token(); }
- bool is_double() const {
- return hydrogen()->representation().IsDouble();
- }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LConstantD final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- double value() const { return hydrogen()->DoubleValue(); }
-};
-
-
-class LConstantE final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- ExternalReference value() const {
- return hydrogen()->ExternalReferenceValue();
- }
-};
-
-
-class LConstantI final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- int32_t value() const { return hydrogen()->Integer32Value(); }
-};
-
-
-class LConstantS final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
-};
-
-
-class LConstantT final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- Handle<Object> value(Isolate* isolate) const {
- return hydrogen()->handle(isolate);
- }
-};
-
-
-class LContext final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Context, "context")
- DECLARE_HYDROGEN_ACCESSOR(Context)
-};
-
-
-class LDebugBreak final : public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
-};
-
-
-class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LDeclareGlobals(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
- DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
-};
-
-
-class LDeoptimize final : public LTemplateInstruction<0, 0, 0> {
- public:
- bool IsControl() const override { return true; }
- DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
- DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
-};
-
-
-class LDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
- public:
- LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
-
- private:
- int32_t divisor_;
-};
-
-
-class LDivByConstI final : public LTemplateInstruction<1, 1, 1> {
- public:
- LDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- temps_[0] = temp;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
-
- private:
- int32_t divisor_;
-};
-
-
-class LDivI final : public LTemplateInstruction<1, 2, 1> {
- public:
- LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
- inputs_[0] = dividend;
- inputs_[1] = divisor;
- temps_[0] = temp;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- LOperand* divisor() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
- DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
-};
-
-
-class LDoubleToIntOrSmi final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDoubleToIntOrSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleToIntOrSmi, "double-to-int-or-smi")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
- bool tag_result() { return hydrogen()->representation().IsSmi(); }
-};
-
-
-class LForInCacheArray final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LForInCacheArray(LOperand* map) {
- inputs_[0] = map;
- }
-
- LOperand* map() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
-
- int idx() {
- return HForInCacheArray::cast(this->hydrogen_value())->idx();
- }
-};
-
-
-class LForInPrepareMap final : public LTemplateInstruction<1, 2, 0> {
- public:
- LForInPrepareMap(LOperand* context, LOperand* object) {
- inputs_[0] = context;
- inputs_[1] = object;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
-};
-
-class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 1> {
- public:
- LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
- "has-instance-type-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LInnerAllocatedObject final : public LTemplateInstruction<1, 2, 0> {
- public:
- LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
- inputs_[0] = base_object;
- inputs_[1] = offset;
- }
-
- LOperand* base_object() const { return inputs_[0]; }
- LOperand* offset() const { return inputs_[1]; }
-
- void PrintDataTo(StringStream* stream) override;
-
- DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
-};
-
-
-class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 2> {
- public:
- LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype,
- LOperand* scratch1, LOperand* scratch2) {
- inputs_[0] = object;
- inputs_[1] = prototype;
- temps_[0] = scratch1;
- temps_[1] = scratch2;
- }
-
- LOperand* object() const { return inputs_[0]; }
- LOperand* prototype() const { return inputs_[1]; }
- LOperand* scratch1() const { return temps_[0]; }
- LOperand* scratch2() const { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch,
- "has-in-prototype-chain-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch)
-};
-
-
-class LInteger32ToDouble final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInteger32ToDouble(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
-};
-
-
-class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
- public:
- LCallWithDescriptor(CallInterfaceDescriptor descriptor,
- const ZoneList<LOperand*>& operands, Zone* zone)
- : descriptor_(descriptor),
- inputs_(descriptor.GetRegisterParameterCount() +
- kImplicitRegisterParameterCount,
- zone) {
- DCHECK(descriptor.GetRegisterParameterCount() +
- kImplicitRegisterParameterCount ==
- operands.length());
- inputs_.AddAll(operands, zone);
- }
-
- LOperand* target() const { return inputs_[0]; }
-
- CallInterfaceDescriptor descriptor() { return descriptor_; }
-
- DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
-
- // The target and context are passed as implicit parameters that are not
- // explicitly listed in the descriptor.
- static const int kImplicitRegisterParameterCount = 2;
-
- private:
- DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-
- CallInterfaceDescriptor descriptor_;
- ZoneList<LOperand*> inputs_;
-
- // Iterator support.
- int InputCount() final { return inputs_.length(); }
- LOperand* InputAt(int i) final { return inputs_[i]; }
-
- int TempCount() final { return 0; }
- LOperand* TempAt(int i) final { return NULL; }
-};
-
-
-class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
- public:
- LInvokeFunction(LOperand* context, LOperand* function) {
- inputs_[0] = context;
- inputs_[1] = function;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
- DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LIsStringAndBranch final : public LControlInstruction<1, 1> {
- public:
- LIsStringAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LIsSmiAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LIsSmiAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LIsUndetectableAndBranch final : public LControlInstruction<1, 1> {
- public:
- explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
- "is-undetectable-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadContextSlot(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
-
- int slot_index() const { return hydrogen()->slot_index(); }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedField(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
-};
-
-
-class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 1> {
- public:
- LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
- inputs_[0] = function;
- temps_[0] = temp;
- }
-
- LOperand* function() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
- DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
-};
-
-template <int T>
-class LLoadKeyed : public LTemplateInstruction<1, 3, T> {
- public:
- LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
- this->inputs_[0] = elements;
- this->inputs_[1] = key;
- this->inputs_[2] = backing_store_owner;
- }
-
- LOperand* elements() { return this->inputs_[0]; }
- LOperand* key() { return this->inputs_[1]; }
- LOperand* backing_store_owner() { return this->inputs_[2]; }
- ElementsKind elements_kind() const {
- return this->hydrogen()->elements_kind();
- }
- bool is_external() const {
- return this->hydrogen()->is_external();
- }
- bool is_fixed_typed_array() const {
- return hydrogen()->is_fixed_typed_array();
- }
- bool is_typed_elements() const {
- return is_external() || is_fixed_typed_array();
- }
- uint32_t base_offset() const {
- return this->hydrogen()->base_offset();
- }
- void PrintDataTo(StringStream* stream) override {
- this->elements()->PrintTo(stream);
- stream->Add("[");
- this->key()->PrintTo(stream);
- if (this->base_offset() != 0) {
- stream->Add(" + %d]", this->base_offset());
- } else {
- stream->Add("]");
- }
- }
-
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
-};
-
-
-class LLoadKeyedExternal: public LLoadKeyed<1> {
- public:
- LLoadKeyedExternal(LOperand* elements, LOperand* key,
- LOperand* backing_store_owner, LOperand* temp)
- : LLoadKeyed<1>(elements, key, backing_store_owner) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedExternal, "load-keyed-external");
-};
-
-
-class LLoadKeyedFixed: public LLoadKeyed<1> {
- public:
- LLoadKeyedFixed(LOperand* elements, LOperand* key, LOperand* temp)
- : LLoadKeyed<1>(elements, key, nullptr) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFixed, "load-keyed-fixed");
-};
-
-
-class LLoadKeyedFixedDouble: public LLoadKeyed<1> {
- public:
- LLoadKeyedFixedDouble(LOperand* elements, LOperand* key, LOperand* temp)
- : LLoadKeyed<1>(elements, key, nullptr) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFixedDouble, "load-keyed-fixed-double");
-};
-
-
-class LLoadRoot final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
- DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
-
- Heap::RootListIndex index() const { return hydrogen()->index(); }
-};
-
-
-template<int T>
-class LUnaryMathOperation : public LTemplateInstruction<1, 1, T> {
- public:
- explicit LUnaryMathOperation(LOperand* value) {
- this->inputs_[0] = value;
- }
-
- LOperand* value() { return this->inputs_[0]; }
- BuiltinFunctionId op() const { return this->hydrogen()->op(); }
-
- void PrintDataTo(StringStream* stream) override;
-
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-
-class LMathAbs final : public LUnaryMathOperation<0> {
- public:
- explicit LMathAbs(LOperand* value) : LUnaryMathOperation<0>(value) {}
-
- DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
-};
-
-
-class LMathAbsTagged: public LTemplateInstruction<1, 2, 3> {
- public:
- LMathAbsTagged(LOperand* context, LOperand* value,
- LOperand* temp1, LOperand* temp2, LOperand* temp3) {
- inputs_[0] = context;
- inputs_[1] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- temps_[2] = temp3;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
- LOperand* temp3() { return temps_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathAbsTagged, "math-abs-tagged")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-class LMathCos final : public LUnaryMathOperation<0> {
- public:
- explicit LMathCos(LOperand* value) : LUnaryMathOperation<0>(value) {}
-
- DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
-};
-
-class LMathSin final : public LUnaryMathOperation<0> {
- public:
- explicit LMathSin(LOperand* value) : LUnaryMathOperation<0>(value) {}
-
- DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
-};
-
-class LMathExp final : public LUnaryMathOperation<0> {
- public:
- explicit LMathExp(LOperand* value) : LUnaryMathOperation<0>(value) {}
-
- DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
-};
-
-
-// Math.floor with a double result.
-class LMathFloorD final : public LUnaryMathOperation<0> {
- public:
- explicit LMathFloorD(LOperand* value) : LUnaryMathOperation<0>(value) { }
- DECLARE_CONCRETE_INSTRUCTION(MathFloorD, "math-floor-d")
-};
-
-
-// Math.floor with an integer result.
-class LMathFloorI final : public LUnaryMathOperation<0> {
- public:
- explicit LMathFloorI(LOperand* value) : LUnaryMathOperation<0>(value) { }
- DECLARE_CONCRETE_INSTRUCTION(MathFloorI, "math-floor-i")
-};
-
-
-class LFlooringDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
- public:
- LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
- "flooring-div-by-power-of-2-i")
- DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-
- private:
- int32_t divisor_;
-};
-
-
-class LFlooringDivByConstI final : public LTemplateInstruction<1, 1, 2> {
- public:
- LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- temps_[0] = temp;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
- DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-
- private:
- int32_t divisor_;
-};
-
-
-class LFlooringDivI final : public LTemplateInstruction<1, 2, 1> {
- public:
- LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
- inputs_[0] = dividend;
- inputs_[1] = divisor;
- temps_[0] = temp;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- LOperand* divisor() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
- DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-};
-
-
-class LMathLog final : public LUnaryMathOperation<0> {
- public:
- explicit LMathLog(LOperand* value) : LUnaryMathOperation<0>(value) { }
- DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
-};
-
-
-class LMathClz32 final : public LUnaryMathOperation<0> {
- public:
- explicit LMathClz32(LOperand* value) : LUnaryMathOperation<0>(value) { }
- DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
-};
-
-
-class LMathMinMax final : public LTemplateInstruction<1, 2, 0> {
- public:
- LMathMinMax(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
- DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
-};
-
-
-class LMathPowHalf final : public LUnaryMathOperation<0> {
- public:
- explicit LMathPowHalf(LOperand* value) : LUnaryMathOperation<0>(value) { }
- DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
-};
-
-
-// Math.round with an integer result.
-class LMathRoundD final : public LUnaryMathOperation<0> {
- public:
- explicit LMathRoundD(LOperand* value)
- : LUnaryMathOperation<0>(value) {
- }
-
- DECLARE_CONCRETE_INSTRUCTION(MathRoundD, "math-round-d")
-};
-
-
-// Math.round with an integer result.
-class LMathRoundI final : public LUnaryMathOperation<1> {
- public:
- LMathRoundI(LOperand* value, LOperand* temp1)
- : LUnaryMathOperation<1>(value) {
- temps_[0] = temp1;
- }
-
- LOperand* temp1() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathRoundI, "math-round-i")
-};
-
-
-class LMathFround final : public LUnaryMathOperation<0> {
- public:
- explicit LMathFround(LOperand* value) : LUnaryMathOperation<0>(value) {}
-
- DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround")
-};
-
-
-class LMathSqrt final : public LUnaryMathOperation<0> {
- public:
- explicit LMathSqrt(LOperand* value) : LUnaryMathOperation<0>(value) { }
- DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
-};
-
-
-class LModByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
- public:
- LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-
- private:
- int32_t divisor_;
-};
-
-
-class LModByConstI final : public LTemplateInstruction<1, 1, 1> {
- public:
- LModByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- temps_[0] = temp;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-
- private:
- int32_t divisor_;
-};
-
-
-class LModI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LModI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-};
-
-
-class LMulConstIS final : public LTemplateInstruction<1, 2, 0> {
- public:
- LMulConstIS(LOperand* left, LConstantOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LConstantOperand* right() { return LConstantOperand::cast(inputs_[1]); }
-
- DECLARE_CONCRETE_INSTRUCTION(MulConstIS, "mul-const-i-s")
- DECLARE_HYDROGEN_ACCESSOR(Mul)
-};
-
-
-class LMulI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LMulI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
- DECLARE_HYDROGEN_ACCESSOR(Mul)
-};
-
-
-class LMulS final : public LTemplateInstruction<1, 2, 0> {
- public:
- LMulS(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-s")
- DECLARE_HYDROGEN_ACCESSOR(Mul)
-};
-
-
-class LNumberTagD final : public LTemplateInstruction<1, 1, 2> {
- public:
- LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LNumberTagU final : public LTemplateInstruction<1, 1, 2> {
- public:
- explicit LNumberTagU(LOperand* value,
- LOperand* temp1,
- LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
-};
-
-
-class LNumberUntagD final : public LTemplateInstruction<1, 1, 1> {
- public:
- LNumberUntagD(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-
- bool truncating() { return hydrogen()->CanTruncateToNumber(); }
-};
-
-
-class LParameter final : public LTemplateInstruction<1, 0, 0> {
- public:
- bool HasInterestingComment(LCodeGen* gen) const override { return false; }
- DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
-};
-
-
-class LPower final : public LTemplateInstruction<1, 2, 0> {
- public:
- LPower(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Power, "power")
- DECLARE_HYDROGEN_ACCESSOR(Power)
-};
-
-
-class LPreparePushArguments final : public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LPreparePushArguments(int argc) : argc_(argc) {}
-
- inline int argc() const { return argc_; }
-
- DECLARE_CONCRETE_INSTRUCTION(PreparePushArguments, "prepare-push-arguments")
-
- protected:
- int argc_;
-};
-
-
-class LPushArguments final : public LTemplateResultInstruction<0> {
- public:
- explicit LPushArguments(Zone* zone,
- int capacity = kRecommendedMaxPushedArgs)
- : zone_(zone), inputs_(capacity, zone) {}
-
- LOperand* argument(int i) { return inputs_[i]; }
- int ArgumentCount() const { return inputs_.length(); }
-
- void AddArgument(LOperand* arg) { inputs_.Add(arg, zone_); }
-
- DECLARE_CONCRETE_INSTRUCTION(PushArguments, "push-arguments")
-
- // It is better to limit the number of arguments pushed simultaneously to
- // avoid pressure on the register allocator.
- static const int kRecommendedMaxPushedArgs = 4;
- bool ShouldSplitPush() const {
- return inputs_.length() >= kRecommendedMaxPushedArgs;
- }
-
- protected:
- Zone* zone_;
- ZoneList<LOperand*> inputs_;
-
- private:
- // Iterator support.
- int InputCount() final { return inputs_.length(); }
- LOperand* InputAt(int i) final { return inputs_[i]; }
-
- int TempCount() final { return 0; }
- LOperand* TempAt(int i) final { return NULL; }
-};
-
-
-class LReturn final : public LTemplateInstruction<0, 3, 0> {
- public:
- LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
- inputs_[0] = value;
- inputs_[1] = context;
- inputs_[2] = parameter_count;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* parameter_count() { return inputs_[2]; }
-
- bool has_constant_parameter_count() {
- return parameter_count()->IsConstantOperand();
- }
- LConstantOperand* constant_parameter_count() {
- DCHECK(has_constant_parameter_count());
- return LConstantOperand::cast(parameter_count());
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Return, "return")
-};
-
-
-class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 1> {
- public:
- LSeqStringGetChar(LOperand* string,
- LOperand* index,
- LOperand* temp) {
- inputs_[0] = string;
- inputs_[1] = index;
- temps_[0] = temp;
- }
-
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
-};
-
-
-class LSeqStringSetChar final : public LTemplateInstruction<1, 4, 1> {
- public:
- LSeqStringSetChar(LOperand* context,
- LOperand* string,
- LOperand* index,
- LOperand* value,
- LOperand* temp) {
- inputs_[0] = context;
- inputs_[1] = string;
- inputs_[2] = index;
- inputs_[3] = value;
- temps_[0] = temp;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* string() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
- LOperand* value() { return inputs_[3]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
-};
-
-
-class LSmiTag final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LSmiTag(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LSmiUntag final : public LTemplateInstruction<1, 1, 0> {
- public:
- LSmiUntag(LOperand* value, bool needs_check)
- : needs_check_(needs_check) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
- bool needs_check() const { return needs_check_; }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
-
- private:
- bool needs_check_;
-};
-
-
-class LStackCheck final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LStackCheck(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
- DECLARE_HYDROGEN_ACCESSOR(StackCheck)
-
- Label* done_label() { return &done_label_; }
-
- private:
- Label done_label_;
-};
-
-
-template <int T>
-class LStoreKeyed : public LTemplateInstruction<0, 4, T> {
- public:
- LStoreKeyed(LOperand* elements, LOperand* key, LOperand* value,
- LOperand* backing_store_owner) {
- this->inputs_[0] = elements;
- this->inputs_[1] = key;
- this->inputs_[2] = value;
- this->inputs_[3] = backing_store_owner;
- }
-
- bool is_external() const { return this->hydrogen()->is_external(); }
- bool is_fixed_typed_array() const {
- return hydrogen()->is_fixed_typed_array();
- }
- bool is_typed_elements() const {
- return is_external() || is_fixed_typed_array();
- }
- LOperand* elements() { return this->inputs_[0]; }
- LOperand* key() { return this->inputs_[1]; }
- LOperand* value() { return this->inputs_[2]; }
- LOperand* backing_store_owner() { return this->inputs_[3]; }
- ElementsKind elements_kind() const {
- return this->hydrogen()->elements_kind();
- }
-
- bool NeedsCanonicalization() {
- if (hydrogen()->value()->IsAdd() || hydrogen()->value()->IsSub() ||
- hydrogen()->value()->IsMul() || hydrogen()->value()->IsDiv()) {
- return false;
- }
- return this->hydrogen()->NeedsCanonicalization();
- }
- uint32_t base_offset() const { return this->hydrogen()->base_offset(); }
-
- void PrintDataTo(StringStream* stream) override {
- this->elements()->PrintTo(stream);
- stream->Add("[");
- this->key()->PrintTo(stream);
- if (this->base_offset() != 0) {
- stream->Add(" + %d] <-", this->base_offset());
- } else {
- stream->Add("] <- ");
- }
-
- if (this->value() == NULL) {
- DCHECK(hydrogen()->IsConstantHoleStore() &&
- hydrogen()->value()->representation().IsDouble());
- stream->Add("<the hole(nan)>");
- } else {
- this->value()->PrintTo(stream);
- }
- }
-
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
-};
-
-
-class LStoreKeyedExternal final : public LStoreKeyed<1> {
- public:
- LStoreKeyedExternal(LOperand* elements, LOperand* key, LOperand* value,
- LOperand* backing_store_owner, LOperand* temp)
- : LStoreKeyed<1>(elements, key, value, backing_store_owner) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedExternal, "store-keyed-external")
-};
-
-
-class LStoreKeyedFixed final : public LStoreKeyed<1> {
- public:
- LStoreKeyedFixed(LOperand* elements, LOperand* key, LOperand* value,
- LOperand* temp)
- : LStoreKeyed<1>(elements, key, value, nullptr) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFixed, "store-keyed-fixed")
-};
-
-
-class LStoreKeyedFixedDouble final : public LStoreKeyed<1> {
- public:
- LStoreKeyedFixedDouble(LOperand* elements, LOperand* key, LOperand* value,
- LOperand* temp)
- : LStoreKeyed<1>(elements, key, value, nullptr) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFixedDouble,
- "store-keyed-fixed-double")
-};
-
-
-class LStoreNamedField final : public LTemplateInstruction<0, 2, 2> {
- public:
- LStoreNamedField(LOperand* object, LOperand* value,
- LOperand* temp0, LOperand* temp1) {
- inputs_[0] = object;
- inputs_[1] = value;
- temps_[0] = temp0;
- temps_[1] = temp1;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp0() { return temps_[0]; }
- LOperand* temp1() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
-
- void PrintDataTo(StringStream* stream) override;
-
- Representation representation() const {
- return hydrogen()->field_representation();
- }
-};
-
-
-class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
- public:
- LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
- LOperand* key, LOperand* current_capacity) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = elements;
- inputs_[3] = key;
- inputs_[4] = current_capacity;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* elements() { return inputs_[2]; }
- LOperand* key() { return inputs_[3]; }
- LOperand* current_capacity() { return inputs_[4]; }
-
- bool ClobbersDoubleRegisters(Isolate* isolate) const override { return true; }
-
- DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
- DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
-};
-
-
-class LStringAdd final : public LTemplateInstruction<1, 3, 0> {
- public:
- LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
- DECLARE_HYDROGEN_ACCESSOR(StringAdd)
-};
-
-
-class LStringCharCodeAt final : public LTemplateInstruction<1, 3, 0> {
- public:
- LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
- inputs_[0] = context;
- inputs_[1] = string;
- inputs_[2] = index;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* string() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
- DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
-};
-
-
-class LStringCharFromCode final : public LTemplateInstruction<1, 2, 0> {
- public:
- LStringCharFromCode(LOperand* context, LOperand* char_code) {
- inputs_[0] = context;
- inputs_[1] = char_code;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* char_code() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
- DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
-};
-
-
-class LStringCompareAndBranch final : public LControlInstruction<3, 0> {
- public:
- LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
- "string-compare-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
-
- Token::Value op() const { return hydrogen()->token(); }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-// Truncating conversion from a tagged value to an int32.
-class LTaggedToI final : public LTemplateInstruction<1, 1, 2> {
- public:
- explicit LTaggedToI(LOperand* value, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-class LShiftI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
- : op_(op), can_deopt_(can_deopt) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- bool can_deopt() const { return can_deopt_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
-
- private:
- Token::Value op_;
- bool can_deopt_;
-};
-
-
-class LShiftS final : public LTemplateInstruction<1, 2, 0> {
- public:
- LShiftS(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
- : op_(op), can_deopt_(can_deopt) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- bool can_deopt() const { return can_deopt_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ShiftS, "shift-s")
-
- private:
- Token::Value op_;
- bool can_deopt_;
-};
-
-
-class LStoreCodeEntry final : public LTemplateInstruction<0, 2, 1> {
- public:
- LStoreCodeEntry(LOperand* function, LOperand* code_object,
- LOperand* temp) {
- inputs_[0] = function;
- inputs_[1] = code_object;
- temps_[0] = temp;
- }
-
- LOperand* function() { return inputs_[0]; }
- LOperand* code_object() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- void PrintDataTo(StringStream* stream) override;
-
- DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
- DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
-};
-
-
-class LStoreContextSlot final : public LTemplateInstruction<0, 2, 1> {
- public:
- LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
- inputs_[0] = context;
- inputs_[1] = value;
- temps_[0] = temp;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
-
- int slot_index() { return hydrogen()->slot_index(); }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LSubI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LSubI(LOperand* left, LOperand* right)
- : shift_(NO_SHIFT), shift_amount_(0) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LSubI(LOperand* left, LOperand* right, Shift shift, LOperand* shift_amount)
- : shift_(shift), shift_amount_(shift_amount) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- Shift shift() const { return shift_; }
- LOperand* shift_amount() const { return shift_amount_; }
-
- DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
- DECLARE_HYDROGEN_ACCESSOR(Sub)
-
- protected:
- Shift shift_;
- LOperand* shift_amount_;
-};
-
-
-class LSubS: public LTemplateInstruction<1, 2, 0> {
- public:
- LSubS(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SubS, "sub-s")
- DECLARE_HYDROGEN_ACCESSOR(Sub)
-};
-
-
-class LThisFunction final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
- DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
-};
-
-
-class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 2> {
- public:
- LTransitionElementsKind(LOperand* object,
- LOperand* context,
- LOperand* temp1,
- LOperand* temp2) {
- inputs_[0] = object;
- inputs_[1] = context;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* context() { return inputs_[1]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
- "transition-elements-kind")
- DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
-
- void PrintDataTo(StringStream* stream) override;
-
- Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
- Handle<Map> transitioned_map() {
- return hydrogen()->transitioned_map().handle();
- }
- ElementsKind from_kind() const { return hydrogen()->from_kind(); }
- ElementsKind to_kind() const { return hydrogen()->to_kind(); }
-};
-
-
-class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 2> {
- public:
- LTrapAllocationMemento(LOperand* object, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = object;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento, "trap-allocation-memento")
-};
-
-
-class LTruncateDoubleToIntOrSmi final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LTruncateDoubleToIntOrSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TruncateDoubleToIntOrSmi,
- "truncate-double-to-int-or-smi")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
- bool tag_result() { return hydrogen()->representation().IsSmi(); }
-};
-
-
-class LTypeof final : public LTemplateInstruction<1, 2, 0> {
- public:
- LTypeof(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
-};
-
-
-class LTypeofIsAndBranch final : public LControlInstruction<1, 2> {
- public:
- LTypeofIsAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
-
- Handle<String> type_literal() const { return hydrogen()->type_literal(); }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LUint32ToDouble final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LUint32ToDouble(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
-};
-
-
-class LCheckMapValue final : public LTemplateInstruction<0, 2, 1> {
- public:
- LCheckMapValue(LOperand* value, LOperand* map, LOperand* temp) {
- inputs_[0] = value;
- inputs_[1] = map;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* map() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
-};
-
-
-class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadFieldByIndex(LOperand* object, LOperand* index) {
- inputs_[0] = object;
- inputs_[1] = index;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
-};
-
-
-class LWrapReceiver final : public LTemplateInstruction<1, 2, 0> {
- public:
- LWrapReceiver(LOperand* receiver, LOperand* function) {
- inputs_[0] = receiver;
- inputs_[1] = function;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
- DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
-
- LOperand* receiver() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-};
-
-
-class LChunkBuilder;
-class LPlatformChunk final : public LChunk {
- public:
- LPlatformChunk(CompilationInfo* info, HGraph* graph)
- : LChunk(info, graph) { }
-
- int GetNextSpillIndex();
- LOperand* GetNextSpillSlot(RegisterKind kind);
-};
-
-
-class LChunkBuilder final : public LChunkBuilderBase {
- public:
- LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : LChunkBuilderBase(info, graph),
- current_instruction_(NULL),
- current_block_(NULL),
- allocator_(allocator) {}
-
- // Build the sequence for the graph.
- LPlatformChunk* Build();
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
- HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- LInstruction* DoDivByPowerOf2I(HDiv* instr);
- LInstruction* DoDivByConstI(HDiv* instr);
- LInstruction* DoDivI(HBinaryOperation* instr);
- LInstruction* DoModByPowerOf2I(HMod* instr);
- LInstruction* DoModByConstI(HMod* instr);
- LInstruction* DoModI(HMod* instr);
- LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
- LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
- LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
-
- static bool HasMagicNumberForDivision(int32_t divisor);
-
- private:
- // Methods for getting operands for Use / Define / Temp.
- LUnallocated* ToUnallocated(Register reg);
- LUnallocated* ToUnallocated(DoubleRegister reg);
-
- // Methods for setting up define-use relationships.
- MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
- MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
- MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
- DoubleRegister fixed_register);
-
- // A value that is guaranteed to be allocated to a register.
- // The operand created by UseRegister is guaranteed to be live until the end
- // of the instruction. This means that register allocator will not reuse its
- // register for any other operand inside instruction.
- MUST_USE_RESULT LOperand* UseRegister(HValue* value);
-
- // The operand created by UseRegisterAndClobber is guaranteed to be live until
- // the end of the end of the instruction, and it may also be used as a scratch
- // register by the instruction implementation.
- //
- // This behaves identically to ARM's UseTempRegister. However, it is renamed
- // to discourage its use in ARM64, since in most cases it is better to
- // allocate a temporary register for the Lithium instruction.
- MUST_USE_RESULT LOperand* UseRegisterAndClobber(HValue* value);
-
- // The operand created by UseRegisterAtStart is guaranteed to be live only at
- // instruction start. The register allocator is free to assign the same
- // register to some other operand used inside instruction (i.e. temporary or
- // output).
- MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
-
- // An input operand in a register or a constant operand.
- MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
-
- // A constant operand.
- MUST_USE_RESULT LConstantOperand* UseConstant(HValue* value);
-
- // An input operand in register, stack slot or a constant operand.
- // Will not be moved to a register even if one is freely available.
- virtual MUST_USE_RESULT LOperand* UseAny(HValue* value);
-
- // Temporary operand that must be in a register.
- MUST_USE_RESULT LUnallocated* TempRegister();
-
- // Temporary operand that must be in a double register.
- MUST_USE_RESULT LUnallocated* TempDoubleRegister();
-
- MUST_USE_RESULT LOperand* FixedTemp(Register reg);
-
- // Temporary operand that must be in a fixed double register.
- MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
-
- // Methods for setting up define-use relationships.
- // Return the same instruction that they are passed.
- LInstruction* Define(LTemplateResultInstruction<1>* instr,
- LUnallocated* result);
- LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
- LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
- int index);
-
- LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
- LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
- Register reg);
- LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
- DoubleRegister reg);
-
- enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
-
- // By default we assume that instruction sequences generated for calls
- // cannot deoptimize eagerly and we do not attach environment to this
- // instruction.
- LInstruction* MarkAsCall(
- LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
-
- LInstruction* AssignPointerMap(LInstruction* instr);
- LInstruction* AssignEnvironment(LInstruction* instr);
-
- void VisitInstruction(HInstruction* current);
- void AddInstruction(LInstruction* instr, HInstruction* current);
- void DoBasicBlock(HBasicBlock* block);
-
- int JSShiftAmountFromHConstant(HValue* constant) {
- return HConstant::cast(constant)->Integer32Value() & 0x1f;
- }
- bool LikelyFitsImmField(HInstruction* instr, int imm) {
- if (instr->IsAdd() || instr->IsSub()) {
- return Assembler::IsImmAddSub(imm) || Assembler::IsImmAddSub(-imm);
- } else {
- DCHECK(instr->IsBitwise());
- unsigned unused_n, unused_imm_s, unused_imm_r;
- return Assembler::IsImmLogical(imm, kWRegSizeInBits,
- &unused_n, &unused_imm_s, &unused_imm_r);
- }
- }
-
- // Indicates if a sequence of the form
- // lsl x8, x9, #imm
- // add x0, x1, x8
- // can be replaced with:
- // add x0, x1, x9 LSL #imm
- // If this is not possible, the function returns NULL. Otherwise it returns a
- // pointer to the shift instruction that would be optimized away.
- HBitwiseBinaryOperation* CanTransformToShiftedOp(HValue* val,
- HValue** left = NULL);
- // Checks if all uses of the shift operation can optimize it away.
- bool ShiftCanBeOptimizedAway(HBitwiseBinaryOperation* shift);
- // Attempts to merge the binary operation and an eventual previous shift
- // operation into a single operation. Returns the merged instruction on
- // success, and NULL otherwise.
- LInstruction* TryDoOpWithShiftedRightOperand(HBinaryOperation* op);
- LInstruction* DoShiftedBinaryOp(HBinaryOperation* instr,
- HValue* left,
- HBitwiseBinaryOperation* shift);
-
- LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
- LInstruction* DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr);
- LInstruction* DoArithmeticT(Token::Value op,
- HBinaryOperation* instr);
-
- HInstruction* current_instruction_;
- HBasicBlock* current_block_;
- LAllocator* allocator_;
-
- DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
-};
-
-#undef DECLARE_HYDROGEN_ACCESSOR
-#undef DECLARE_CONCRETE_INSTRUCTION
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_ARM64_LITHIUM_ARM64_H_
diff --git a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
deleted file mode 100644
index c86971c6ce..0000000000
--- a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
+++ /dev/null
@@ -1,5593 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/arm64/lithium-codegen-arm64.h"
-
-#include "src/arm64/frames-arm64.h"
-#include "src/arm64/macro-assembler-arm64-inl.h"
-#include "src/base/bits.h"
-#include "src/builtins/builtins-constructor.h"
-#include "src/code-factory.h"
-#include "src/code-stubs.h"
-#include "src/crankshaft/arm64/lithium-gap-resolver-arm64.h"
-#include "src/crankshaft/hydrogen-osr.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-class SafepointGenerator final : public CallWrapper {
- public:
- SafepointGenerator(LCodeGen* codegen,
- LPointerMap* pointers,
- Safepoint::DeoptMode mode)
- : codegen_(codegen),
- pointers_(pointers),
- deopt_mode_(mode) { }
- virtual ~SafepointGenerator() { }
-
- virtual void BeforeCall(int call_size) const { }
-
- virtual void AfterCall() const {
- codegen_->RecordSafepoint(pointers_, deopt_mode_);
- }
-
- private:
- LCodeGen* codegen_;
- LPointerMap* pointers_;
- Safepoint::DeoptMode deopt_mode_;
-};
-
-LCodeGen::PushSafepointRegistersScope::PushSafepointRegistersScope(
- LCodeGen* codegen)
- : codegen_(codegen) {
- DCHECK(codegen_->info()->is_calling());
- DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
- codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
-
- UseScratchRegisterScope temps(codegen_->masm_);
- // Preserve the value of lr which must be saved on the stack (the call to
- // the stub will clobber it).
- Register to_be_pushed_lr =
- temps.UnsafeAcquire(StoreRegistersStateStub::to_be_pushed_lr());
- codegen_->masm_->Mov(to_be_pushed_lr, lr);
- StoreRegistersStateStub stub(codegen_->isolate());
- codegen_->masm_->CallStub(&stub);
-}
-
-LCodeGen::PushSafepointRegistersScope::~PushSafepointRegistersScope() {
- DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
- RestoreRegistersStateStub stub(codegen_->isolate());
- codegen_->masm_->CallStub(&stub);
- codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
-}
-
-#define __ masm()->
-
-// Emit code to branch if the given condition holds.
-// The code generated here doesn't modify the flags and they must have
-// been set by some prior instructions.
-//
-// The EmitInverted function simply inverts the condition.
-class BranchOnCondition : public BranchGenerator {
- public:
- BranchOnCondition(LCodeGen* codegen, Condition cond)
- : BranchGenerator(codegen),
- cond_(cond) { }
-
- virtual void Emit(Label* label) const {
- __ B(cond_, label);
- }
-
- virtual void EmitInverted(Label* label) const {
- if (cond_ != al) {
- __ B(NegateCondition(cond_), label);
- }
- }
-
- private:
- Condition cond_;
-};
-
-
-// Emit code to compare lhs and rhs and branch if the condition holds.
-// This uses MacroAssembler's CompareAndBranch function so it will handle
-// converting the comparison to Cbz/Cbnz if the right-hand side is 0.
-//
-// EmitInverted still compares the two operands but inverts the condition.
-class CompareAndBranch : public BranchGenerator {
- public:
- CompareAndBranch(LCodeGen* codegen,
- Condition cond,
- const Register& lhs,
- const Operand& rhs)
- : BranchGenerator(codegen),
- cond_(cond),
- lhs_(lhs),
- rhs_(rhs) { }
-
- virtual void Emit(Label* label) const {
- __ CompareAndBranch(lhs_, rhs_, cond_, label);
- }
-
- virtual void EmitInverted(Label* label) const {
- __ CompareAndBranch(lhs_, rhs_, NegateCondition(cond_), label);
- }
-
- private:
- Condition cond_;
- const Register& lhs_;
- const Operand& rhs_;
-};
-
-
-// Test the input with the given mask and branch if the condition holds.
-// If the condition is 'eq' or 'ne' this will use MacroAssembler's
-// TestAndBranchIfAllClear and TestAndBranchIfAnySet so it will handle the
-// conversion to Tbz/Tbnz when possible.
-class TestAndBranch : public BranchGenerator {
- public:
- TestAndBranch(LCodeGen* codegen,
- Condition cond,
- const Register& value,
- uint64_t mask)
- : BranchGenerator(codegen),
- cond_(cond),
- value_(value),
- mask_(mask) { }
-
- virtual void Emit(Label* label) const {
- switch (cond_) {
- case eq:
- __ TestAndBranchIfAllClear(value_, mask_, label);
- break;
- case ne:
- __ TestAndBranchIfAnySet(value_, mask_, label);
- break;
- default:
- __ Tst(value_, mask_);
- __ B(cond_, label);
- }
- }
-
- virtual void EmitInverted(Label* label) const {
- // The inverse of "all clear" is "any set" and vice versa.
- switch (cond_) {
- case eq:
- __ TestAndBranchIfAnySet(value_, mask_, label);
- break;
- case ne:
- __ TestAndBranchIfAllClear(value_, mask_, label);
- break;
- default:
- __ Tst(value_, mask_);
- __ B(NegateCondition(cond_), label);
- }
- }
-
- private:
- Condition cond_;
- const Register& value_;
- uint64_t mask_;
-};
-
-
-// Test the input and branch if it is non-zero and not a NaN.
-class BranchIfNonZeroNumber : public BranchGenerator {
- public:
- BranchIfNonZeroNumber(LCodeGen* codegen, const FPRegister& value,
- const FPRegister& scratch)
- : BranchGenerator(codegen), value_(value), scratch_(scratch) { }
-
- virtual void Emit(Label* label) const {
- __ Fabs(scratch_, value_);
- // Compare with 0.0. Because scratch_ is positive, the result can be one of
- // nZCv (equal), nzCv (greater) or nzCV (unordered).
- __ Fcmp(scratch_, 0.0);
- __ B(gt, label);
- }
-
- virtual void EmitInverted(Label* label) const {
- __ Fabs(scratch_, value_);
- __ Fcmp(scratch_, 0.0);
- __ B(le, label);
- }
-
- private:
- const FPRegister& value_;
- const FPRegister& scratch_;
-};
-
-
-// Test the input and branch if it is a heap number.
-class BranchIfHeapNumber : public BranchGenerator {
- public:
- BranchIfHeapNumber(LCodeGen* codegen, const Register& value)
- : BranchGenerator(codegen), value_(value) { }
-
- virtual void Emit(Label* label) const {
- __ JumpIfHeapNumber(value_, label);
- }
-
- virtual void EmitInverted(Label* label) const {
- __ JumpIfNotHeapNumber(value_, label);
- }
-
- private:
- const Register& value_;
-};
-
-
-// Test the input and branch if it is the specified root value.
-class BranchIfRoot : public BranchGenerator {
- public:
- BranchIfRoot(LCodeGen* codegen, const Register& value,
- Heap::RootListIndex index)
- : BranchGenerator(codegen), value_(value), index_(index) { }
-
- virtual void Emit(Label* label) const {
- __ JumpIfRoot(value_, index_, label);
- }
-
- virtual void EmitInverted(Label* label) const {
- __ JumpIfNotRoot(value_, index_, label);
- }
-
- private:
- const Register& value_;
- const Heap::RootListIndex index_;
-};
-
-
-void LCodeGen::WriteTranslation(LEnvironment* environment,
- Translation* translation) {
- if (environment == NULL) return;
-
- // The translation includes one command per value in the environment.
- int translation_size = environment->translation_size();
-
- WriteTranslation(environment->outer(), translation);
- WriteTranslationFrame(environment, translation);
-
- int object_index = 0;
- int dematerialized_index = 0;
- for (int i = 0; i < translation_size; ++i) {
- LOperand* value = environment->values()->at(i);
- AddToTranslation(
- environment, translation, value, environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
- }
-}
-
-
-void LCodeGen::AddToTranslation(LEnvironment* environment,
- Translation* translation,
- LOperand* op,
- bool is_tagged,
- bool is_uint32,
- int* object_index_pointer,
- int* dematerialized_index_pointer) {
- if (op == LEnvironment::materialization_marker()) {
- int object_index = (*object_index_pointer)++;
- if (environment->ObjectIsDuplicateAt(object_index)) {
- int dupe_of = environment->ObjectDuplicateOfAt(object_index);
- translation->DuplicateObject(dupe_of);
- return;
- }
- int object_length = environment->ObjectLengthAt(object_index);
- if (environment->ObjectIsArgumentsAt(object_index)) {
- translation->BeginArgumentsObject(object_length);
- } else {
- translation->BeginCapturedObject(object_length);
- }
- int dematerialized_index = *dematerialized_index_pointer;
- int env_offset = environment->translation_size() + dematerialized_index;
- *dematerialized_index_pointer += object_length;
- for (int i = 0; i < object_length; ++i) {
- LOperand* value = environment->values()->at(env_offset + i);
- AddToTranslation(environment,
- translation,
- value,
- environment->HasTaggedValueAt(env_offset + i),
- environment->HasUint32ValueAt(env_offset + i),
- object_index_pointer,
- dematerialized_index_pointer);
- }
- return;
- }
-
- if (op->IsStackSlot()) {
- int index = op->index();
- if (is_tagged) {
- translation->StoreStackSlot(index);
- } else if (is_uint32) {
- translation->StoreUint32StackSlot(index);
- } else {
- translation->StoreInt32StackSlot(index);
- }
- } else if (op->IsDoubleStackSlot()) {
- int index = op->index();
- translation->StoreDoubleStackSlot(index);
- } else if (op->IsRegister()) {
- Register reg = ToRegister(op);
- if (is_tagged) {
- translation->StoreRegister(reg);
- } else if (is_uint32) {
- translation->StoreUint32Register(reg);
- } else {
- translation->StoreInt32Register(reg);
- }
- } else if (op->IsDoubleRegister()) {
- DoubleRegister reg = ToDoubleRegister(op);
- translation->StoreDoubleRegister(reg);
- } else if (op->IsConstantOperand()) {
- HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
- translation->StoreLiteral(src_index);
- } else {
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
- Safepoint::DeoptMode mode) {
- environment->set_has_been_used();
- if (!environment->HasBeenRegistered()) {
- int frame_count = 0;
- int jsframe_count = 0;
- for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
- ++frame_count;
- if (e->frame_type() == JS_FUNCTION) {
- ++jsframe_count;
- }
- }
- Translation translation(&translations_, frame_count, jsframe_count, zone());
- WriteTranslation(environment, &translation);
- int deoptimization_index = deoptimizations_.length();
- int pc_offset = masm()->pc_offset();
- environment->Register(deoptimization_index,
- translation.index(),
- (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
- deoptimizations_.Add(environment, zone());
- }
-}
-
-
-void LCodeGen::CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr) {
- CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::CallCodeGeneric(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode) {
- DCHECK(instr != NULL);
-
- Assembler::BlockPoolsScope scope(masm_);
- __ Call(code, mode);
- RecordSafepointWithLazyDeopt(instr, safepoint_mode);
-
- if ((code->kind() == Code::BINARY_OP_IC) ||
- (code->kind() == Code::COMPARE_IC)) {
- // Signal that we don't inline smi code before these stubs in the
- // optimizing code generator.
- InlineSmiCheckInfo::EmitNotInlined(masm());
- }
-}
-
-
-void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
- DCHECK(instr->IsMarkedAsCall());
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->constructor()).is(x1));
-
- __ Mov(x0, Operand(instr->arity()));
- __ Mov(x2, instr->hydrogen()->site());
-
- ElementsKind kind = instr->hydrogen()->elements_kind();
- AllocationSiteOverrideMode override_mode =
- (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
- ? DISABLE_ALLOCATION_SITES
- : DONT_OVERRIDE;
-
- if (instr->arity() == 0) {
- ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- } else if (instr->arity() == 1) {
- Label done;
- if (IsFastPackedElementsKind(kind)) {
- Label packed_case;
-
- // We might need to create a holey array; look at the first argument.
- __ Peek(x10, 0);
- __ Cbz(x10, &packed_case);
-
- ElementsKind holey_kind = GetHoleyElementsKind(kind);
- ArraySingleArgumentConstructorStub stub(isolate(),
- holey_kind,
- override_mode);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ B(&done);
- __ Bind(&packed_case);
- }
-
- ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ Bind(&done);
- } else {
- ArrayNArgumentsConstructorStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- }
- RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
-
- DCHECK(ToRegister(instr->result()).is(x0));
-}
-
-
-void LCodeGen::CallRuntime(const Runtime::Function* function,
- int num_arguments,
- LInstruction* instr,
- SaveFPRegsMode save_doubles) {
- DCHECK(instr != NULL);
-
- __ CallRuntime(function, num_arguments, save_doubles);
-
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::LoadContextFromDeferred(LOperand* context) {
- if (context->IsRegister()) {
- __ Mov(cp, ToRegister(context));
- } else if (context->IsStackSlot()) {
- __ Ldr(cp, ToMemOperand(context, kMustUseFramePointer));
- } else if (context->IsConstantOperand()) {
- HConstant* constant =
- chunk_->LookupConstant(LConstantOperand::cast(context));
- __ LoadHeapObject(cp,
- Handle<HeapObject>::cast(constant->handle(isolate())));
- } else {
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
- int argc,
- LInstruction* instr,
- LOperand* context) {
- if (context != nullptr) LoadContextFromDeferred(context);
- __ CallRuntimeSaveDoubles(id);
- RecordSafepointWithRegisters(
- instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
-}
-
-
-void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
- SafepointMode safepoint_mode) {
- if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
- RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
- } else {
- DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kLazyDeopt);
- }
-}
-
-
-void LCodeGen::RecordSafepoint(LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- Safepoint::DeoptMode deopt_mode) {
- DCHECK(expected_safepoint_kind_ == kind);
-
- const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
- Safepoint safepoint = safepoints_.DefineSafepoint(
- masm(), kind, arguments, deopt_mode);
-
- for (int i = 0; i < operands->length(); i++) {
- LOperand* pointer = operands->at(i);
- if (pointer->IsStackSlot()) {
- safepoint.DefinePointerSlot(pointer->index(), zone());
- } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
- safepoint.DefinePointerRegister(ToRegister(pointer), zone());
- }
- }
-}
-
-void LCodeGen::RecordSafepoint(LPointerMap* pointers,
- Safepoint::DeoptMode deopt_mode) {
- RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
-}
-
-
-void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
- LPointerMap empty_pointers(zone());
- RecordSafepoint(&empty_pointers, deopt_mode);
-}
-
-
-void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode deopt_mode) {
- RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
-}
-
-
-bool LCodeGen::GenerateCode() {
- LPhase phase("Z_Code generation", chunk());
- DCHECK(is_unused());
- status_ = GENERATING;
-
- // Open a frame scope to indicate that there is a frame on the stack. The
- // NONE indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done in GeneratePrologue).
- FrameScope frame_scope(masm_, StackFrame::NONE);
-
- return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
- GenerateJumpTable() && GenerateSafepointTable();
-}
-
-
-void LCodeGen::SaveCallerDoubles() {
- DCHECK(info()->saves_caller_doubles());
- DCHECK(NeedsEagerFrame());
- Comment(";;; Save clobbered callee double registers");
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator iterator(doubles);
- int count = 0;
- while (!iterator.Done()) {
- // TODO(all): Is this supposed to save just the callee-saved doubles? It
- // looks like it's saving all of them.
- FPRegister value = FPRegister::from_code(iterator.Current());
- __ Poke(value, count * kDoubleSize);
- iterator.Advance();
- count++;
- }
-}
-
-
-void LCodeGen::RestoreCallerDoubles() {
- DCHECK(info()->saves_caller_doubles());
- DCHECK(NeedsEagerFrame());
- Comment(";;; Restore clobbered callee double registers");
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator iterator(doubles);
- int count = 0;
- while (!iterator.Done()) {
- // TODO(all): Is this supposed to restore just the callee-saved doubles? It
- // looks like it's restoring all of them.
- FPRegister value = FPRegister::from_code(iterator.Current());
- __ Peek(value, count * kDoubleSize);
- iterator.Advance();
- count++;
- }
-}
-
-
-bool LCodeGen::GeneratePrologue() {
- DCHECK(is_generating());
-
- if (info()->IsOptimizing()) {
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
- }
-
- DCHECK(__ StackPointer().Is(jssp));
- info()->set_prologue_offset(masm_->pc_offset());
- if (NeedsEagerFrame()) {
- if (info()->IsStub()) {
- __ StubPrologue(
- StackFrame::STUB,
- GetStackSlotCount() + TypedFrameConstants::kFixedSlotCount);
- } else {
- __ Prologue(info()->GeneratePreagedPrologue());
- // Reserve space for the stack slots needed by the code.
- int slots = GetStackSlotCount();
- if (slots > 0) {
- __ Claim(slots, kPointerSize);
- }
- }
- frame_is_built_ = true;
- }
-
- if (info()->saves_caller_doubles()) {
- SaveCallerDoubles();
- }
- return !is_aborted();
-}
-
-
-void LCodeGen::DoPrologue(LPrologue* instr) {
- Comment(";;; Prologue begin");
-
- // Allocate a local context if needed.
- if (info()->scope()->NeedsContext()) {
- Comment(";;; Allocate local context");
- bool need_write_barrier = true;
- // Argument to NewContext is the function, which is in x1.
- int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
- if (info()->scope()->is_script_scope()) {
- __ Mov(x10, Operand(info()->scope()->scope_info()));
- __ Push(x1, x10);
- __ CallRuntime(Runtime::kNewScriptContext);
- deopt_mode = Safepoint::kLazyDeopt;
- } else {
- if (slots <= ConstructorBuiltins::MaximumFunctionContextSlots()) {
- Callable callable = CodeFactory::FastNewFunctionContext(
- isolate(), info()->scope()->scope_type());
- __ Mov(FastNewFunctionContextDescriptor::SlotsRegister(), slots);
- __ Call(callable.code(), RelocInfo::CODE_TARGET);
- // Result of the FastNewFunctionContext builtin is always in new space.
- need_write_barrier = false;
- } else {
- __ Push(x1);
- __ Push(Smi::FromInt(info()->scope()->scope_type()));
- __ CallRuntime(Runtime::kNewFunctionContext);
- }
- }
- RecordSafepoint(deopt_mode);
- // Context is returned in x0. It replaces the context passed to us. It's
- // saved in the stack and kept live in cp.
- __ Mov(cp, x0);
- __ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Copy any necessary parameters into the context.
- int num_parameters = info()->scope()->num_parameters();
- int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0;
- for (int i = first_parameter; i < num_parameters; i++) {
- Variable* var = (i == -1) ? info()->scope()->receiver()
- : info()->scope()->parameter(i);
- if (var->IsContextSlot()) {
- Register value = x0;
- Register scratch = x3;
-
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ Ldr(value, MemOperand(fp, parameter_offset));
- // Store it in the context.
- MemOperand target = ContextMemOperand(cp, var->index());
- __ Str(value, target);
- // Update the write barrier. This clobbers value and scratch.
- if (need_write_barrier) {
- __ RecordWriteContextSlot(cp, static_cast<int>(target.offset()),
- value, scratch, GetLinkRegisterState(),
- kSaveFPRegs);
- } else if (FLAG_debug_code) {
- Label done;
- __ JumpIfInNewSpace(cp, &done);
- __ Abort(kExpectedNewSpaceObject);
- __ bind(&done);
- }
- }
- }
- Comment(";;; End allocate local context");
- }
-
- Comment(";;; Prologue end");
-}
-
-
-void LCodeGen::GenerateOsrPrologue() {
- // Generate the OSR entry prologue at the first unknown OSR value, or if there
- // are none, at the OSR entrypoint instruction.
- if (osr_pc_offset_ >= 0) return;
-
- osr_pc_offset_ = masm()->pc_offset();
-
- // Adjust the frame size, subsuming the unoptimized frame into the
- // optimized frame.
- int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
- DCHECK(slots >= 0);
- __ Claim(slots);
-}
-
-
-void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
- if (instr->IsCall()) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- }
- if (!instr->IsLazyBailout() && !instr->IsGap()) {
- safepoints_.BumpLastLazySafepointIndex();
- }
-}
-
-
-bool LCodeGen::GenerateDeferredCode() {
- DCHECK(is_generating());
- if (deferred_.length() > 0) {
- for (int i = 0; !is_aborted() && (i < deferred_.length()); i++) {
- LDeferredCode* code = deferred_[i];
-
- HValue* value =
- instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(value->position());
-
- Comment(";;; <@%d,#%d> "
- "-------------------- Deferred %s --------------------",
- code->instruction_index(),
- code->instr()->hydrogen_value()->id(),
- code->instr()->Mnemonic());
-
- __ Bind(code->entry());
-
- if (NeedsDeferredFrame()) {
- Comment(";;; Build frame");
- DCHECK(!frame_is_built_);
- DCHECK(info()->IsStub());
- frame_is_built_ = true;
- __ Push(lr, fp);
- __ Mov(fp, StackFrame::TypeToMarker(StackFrame::STUB));
- __ Push(fp);
- __ Add(fp, __ StackPointer(),
- TypedFrameConstants::kFixedFrameSizeFromFp);
- Comment(";;; Deferred code");
- }
-
- code->Generate();
-
- if (NeedsDeferredFrame()) {
- Comment(";;; Destroy frame");
- DCHECK(frame_is_built_);
- __ Pop(xzr, fp, lr);
- frame_is_built_ = false;
- }
-
- __ B(code->exit());
- }
- }
-
- // Force constant pool emission at the end of the deferred code to make
- // sure that no constant pools are emitted after deferred code because
- // deferred code generation is the last step which generates code. The two
- // following steps will only output data used by crakshaft.
- masm()->CheckConstPool(true, false);
-
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateJumpTable() {
- Label needs_frame, call_deopt_entry;
-
- if (jump_table_.length() > 0) {
- Comment(";;; -------------------- Jump table --------------------");
- Address base = jump_table_[0]->address;
-
- UseScratchRegisterScope temps(masm());
- Register entry_offset = temps.AcquireX();
-
- int length = jump_table_.length();
- for (int i = 0; i < length; i++) {
- Deoptimizer::JumpTableEntry* table_entry = jump_table_[i];
- __ Bind(&table_entry->label);
-
- Address entry = table_entry->address;
- DeoptComment(table_entry->deopt_info);
-
- // Second-level deopt table entries are contiguous and small, so instead
- // of loading the full, absolute address of each one, load the base
- // address and add an immediate offset.
- __ Mov(entry_offset, entry - base);
-
- if (table_entry->needs_frame) {
- DCHECK(!info()->saves_caller_doubles());
- Comment(";;; call deopt with frame");
- // Save lr before Bl, fp will be adjusted in the needs_frame code.
- __ Push(lr, fp);
- // Reuse the existing needs_frame code.
- __ Bl(&needs_frame);
- } else {
- // There is nothing special to do, so just continue to the second-level
- // table.
- __ Bl(&call_deopt_entry);
- }
-
- masm()->CheckConstPool(false, false);
- }
-
- if (needs_frame.is_linked()) {
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- DCHECK(info()->IsStub());
-
- Comment(";;; needs_frame common code");
- UseScratchRegisterScope temps(masm());
- Register stub_marker = temps.AcquireX();
- __ Bind(&needs_frame);
- __ Mov(stub_marker, StackFrame::TypeToMarker(StackFrame::STUB));
- __ Push(cp, stub_marker);
- __ Add(fp, __ StackPointer(), 2 * kPointerSize);
- }
-
- // Generate common code for calling the second-level deopt table.
- __ Bind(&call_deopt_entry);
-
- if (info()->saves_caller_doubles()) {
- DCHECK(info()->IsStub());
- RestoreCallerDoubles();
- }
-
- Register deopt_entry = temps.AcquireX();
- __ Mov(deopt_entry, Operand(reinterpret_cast<uint64_t>(base),
- RelocInfo::RUNTIME_ENTRY));
- __ Add(deopt_entry, deopt_entry, entry_offset);
- __ Br(deopt_entry);
- }
-
- // Force constant pool emission at the end of the deopt jump table to make
- // sure that no constant pools are emitted after.
- masm()->CheckConstPool(true, false);
-
- // The deoptimization jump table is the last part of the instruction
- // sequence. Mark the generated code as done unless we bailed out.
- if (!is_aborted()) status_ = DONE;
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateSafepointTable() {
- DCHECK(is_done());
- // We do not know how much data will be emitted for the safepoint table, so
- // force emission of the veneer pool.
- masm()->CheckVeneerPool(true, true);
- safepoints_.Emit(masm(), GetTotalFrameSlotCount());
- return !is_aborted();
-}
-
-
-void LCodeGen::FinishCode(Handle<Code> code) {
- DCHECK(is_done());
- code->set_stack_slots(GetTotalFrameSlotCount());
- code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- PopulateDeoptimizationData(code);
-}
-
-void LCodeGen::DeoptimizeBranch(
- LInstruction* instr, DeoptimizeReason deopt_reason, BranchType branch_type,
- Register reg, int bit, Deoptimizer::BailoutType* override_bailout_type) {
- LEnvironment* environment = instr->environment();
- RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- Deoptimizer::BailoutType bailout_type =
- info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
-
- if (override_bailout_type != NULL) {
- bailout_type = *override_bailout_type;
- }
-
- DCHECK(environment->HasBeenRegistered());
- int id = environment->deoptimization_index();
- Address entry =
- Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
-
- if (entry == NULL) {
- Abort(kBailoutWasNotPrepared);
- }
-
- if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
- Label not_zero;
- ExternalReference count = ExternalReference::stress_deopt_count(isolate());
-
- __ Push(x0, x1, x2);
- __ Mrs(x2, NZCV);
- __ Mov(x0, count);
- __ Ldr(w1, MemOperand(x0));
- __ Subs(x1, x1, 1);
- __ B(gt, &not_zero);
- __ Mov(w1, FLAG_deopt_every_n_times);
- __ Str(w1, MemOperand(x0));
- __ Pop(x2, x1, x0);
- DCHECK(frame_is_built_);
- __ Call(entry, RelocInfo::RUNTIME_ENTRY);
- __ Unreachable();
-
- __ Bind(&not_zero);
- __ Str(w1, MemOperand(x0));
- __ Msr(NZCV, x2);
- __ Pop(x2, x1, x0);
- }
-
- if (info()->ShouldTrapOnDeopt()) {
- Label dont_trap;
- __ B(&dont_trap, InvertBranchType(branch_type), reg, bit);
- __ Debug("trap_on_deopt", __LINE__, BREAK);
- __ Bind(&dont_trap);
- }
-
- Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
-
- DCHECK(info()->IsStub() || frame_is_built_);
- // Go through jump table if we need to build frame, or restore caller doubles.
- if (branch_type == always &&
- frame_is_built_ && !info()->saves_caller_doubles()) {
- DeoptComment(deopt_info);
- __ Call(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- Deoptimizer::JumpTableEntry* table_entry =
- new (zone()) Deoptimizer::JumpTableEntry(
- entry, deopt_info, bailout_type, !frame_is_built_);
- // We often have several deopts to the same entry, reuse the last
- // jump entry if this is the case.
- if (FLAG_trace_deopt || isolate()->is_profiling() ||
- jump_table_.is_empty() ||
- !table_entry->IsEquivalentTo(*jump_table_.last())) {
- jump_table_.Add(table_entry, zone());
- }
- __ B(&jump_table_.last()->label, branch_type, reg, bit);
- }
-}
-
-void LCodeGen::Deoptimize(LInstruction* instr, DeoptimizeReason deopt_reason,
- Deoptimizer::BailoutType* override_bailout_type) {
- DeoptimizeBranch(instr, deopt_reason, always, NoReg, -1,
- override_bailout_type);
-}
-
-void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
- DeoptimizeReason deopt_reason) {
- DeoptimizeBranch(instr, deopt_reason, static_cast<BranchType>(cond));
-}
-
-void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr,
- DeoptimizeReason deopt_reason) {
- DeoptimizeBranch(instr, deopt_reason, reg_zero, rt);
-}
-
-void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr,
- DeoptimizeReason deopt_reason) {
- DeoptimizeBranch(instr, deopt_reason, reg_not_zero, rt);
-}
-
-void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr,
- DeoptimizeReason deopt_reason) {
- int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
- DeoptimizeIfBitSet(rt, sign_bit, instr, deopt_reason);
-}
-
-void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr,
- DeoptimizeReason deopt_reason) {
- DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, deopt_reason);
-}
-
-void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
- DeoptimizeReason deopt_reason) {
- DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, deopt_reason);
-}
-
-void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
- LInstruction* instr,
- DeoptimizeReason deopt_reason) {
- __ CompareRoot(rt, index);
- DeoptimizeIf(eq, instr, deopt_reason);
-}
-
-void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
- LInstruction* instr,
- DeoptimizeReason deopt_reason) {
- __ CompareRoot(rt, index);
- DeoptimizeIf(ne, instr, deopt_reason);
-}
-
-void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
- DeoptimizeReason deopt_reason) {
- __ TestForMinusZero(input);
- DeoptimizeIf(vs, instr, deopt_reason);
-}
-
-
-void LCodeGen::DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr) {
- __ CompareObjectMap(object, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
-}
-
-void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
- DeoptimizeReason deopt_reason) {
- DeoptimizeBranch(instr, deopt_reason, reg_bit_set, rt, bit);
-}
-
-void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
- DeoptimizeReason deopt_reason) {
- DeoptimizeBranch(instr, deopt_reason, reg_bit_clear, rt, bit);
-}
-
-
-void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
- if (info()->ShouldEnsureSpaceForLazyDeopt()) {
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- intptr_t current_pc = masm()->pc_offset();
-
- if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
- ptrdiff_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- DCHECK((padding_size % kInstructionSize) == 0);
- InstructionAccurateScope instruction_accurate(
- masm(), padding_size / kInstructionSize);
-
- while (padding_size > 0) {
- __ nop();
- padding_size -= kInstructionSize;
- }
- }
- }
- last_lazy_deopt_pc_ = masm()->pc_offset();
-}
-
-
-Register LCodeGen::ToRegister(LOperand* op) const {
- // TODO(all): support zero register results, as ToRegister32.
- DCHECK((op != NULL) && op->IsRegister());
- return Register::from_code(op->index());
-}
-
-
-Register LCodeGen::ToRegister32(LOperand* op) const {
- DCHECK(op != NULL);
- if (op->IsConstantOperand()) {
- // If this is a constant operand, the result must be the zero register.
- DCHECK(ToInteger32(LConstantOperand::cast(op)) == 0);
- return wzr;
- } else {
- return ToRegister(op).W();
- }
-}
-
-
-Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- return Smi::FromInt(constant->Integer32Value());
-}
-
-
-DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
- DCHECK((op != NULL) && op->IsDoubleRegister());
- return DoubleRegister::from_code(op->index());
-}
-
-
-Operand LCodeGen::ToOperand(LOperand* op) {
- DCHECK(op != NULL);
- if (op->IsConstantOperand()) {
- LConstantOperand* const_op = LConstantOperand::cast(op);
- HConstant* constant = chunk()->LookupConstant(const_op);
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsSmi()) {
- DCHECK(constant->HasSmiValue());
- return Operand(Smi::FromInt(constant->Integer32Value()));
- } else if (r.IsInteger32()) {
- DCHECK(constant->HasInteger32Value());
- return Operand(constant->Integer32Value());
- } else if (r.IsDouble()) {
- Abort(kToOperandUnsupportedDoubleImmediate);
- }
- DCHECK(r.IsTagged());
- return Operand(constant->handle(isolate()));
- } else if (op->IsRegister()) {
- return Operand(ToRegister(op));
- } else if (op->IsDoubleRegister()) {
- Abort(kToOperandIsDoubleRegisterUnimplemented);
- return Operand(0);
- }
- // Stack slots not implemented, use ToMemOperand instead.
- UNREACHABLE();
- return Operand(0);
-}
-
-
-Operand LCodeGen::ToOperand32(LOperand* op) {
- DCHECK(op != NULL);
- if (op->IsRegister()) {
- return Operand(ToRegister32(op));
- } else if (op->IsConstantOperand()) {
- LConstantOperand* const_op = LConstantOperand::cast(op);
- HConstant* constant = chunk()->LookupConstant(const_op);
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
- return Operand(constant->Integer32Value());
- } else {
- // Other constants not implemented.
- Abort(kToOperand32UnsupportedImmediate);
- }
- }
- // Other cases are not implemented.
- UNREACHABLE();
- return Operand(0);
-}
-
-
-static int64_t ArgumentsOffsetWithoutFrame(int index) {
- DCHECK(index < 0);
- return -(index + 1) * kPointerSize;
-}
-
-
-MemOperand LCodeGen::ToMemOperand(LOperand* op, StackMode stack_mode) const {
- DCHECK(op != NULL);
- DCHECK(!op->IsRegister());
- DCHECK(!op->IsDoubleRegister());
- DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- if (NeedsEagerFrame()) {
- int fp_offset = FrameSlotToFPOffset(op->index());
- // Loads and stores have a bigger reach in positive offset than negative.
- // We try to access using jssp (positive offset) first, then fall back to
- // fp (negative offset) if that fails.
- //
- // We can reference a stack slot from jssp only if we know how much we've
- // put on the stack. We don't know this in the following cases:
- // - stack_mode != kCanUseStackPointer: this is the case when deferred
- // code has saved the registers.
- // - saves_caller_doubles(): some double registers have been pushed, jssp
- // references the end of the double registers and not the end of the stack
- // slots.
- // In both of the cases above, we _could_ add the tracking information
- // required so that we can use jssp here, but in practice it isn't worth it.
- if ((stack_mode == kCanUseStackPointer) &&
- !info()->saves_caller_doubles()) {
- int jssp_offset_to_fp =
- (pushed_arguments_ + GetTotalFrameSlotCount()) * kPointerSize -
- StandardFrameConstants::kFixedFrameSizeAboveFp;
- int jssp_offset = fp_offset + jssp_offset_to_fp;
- if (masm()->IsImmLSScaled(jssp_offset, LSDoubleWord)) {
- return MemOperand(masm()->StackPointer(), jssp_offset);
- }
- }
- return MemOperand(fp, fp_offset);
- } else {
- // Retrieve parameter without eager stack-frame relative to the
- // stack-pointer.
- return MemOperand(masm()->StackPointer(),
- ArgumentsOffsetWithoutFrame(op->index()));
- }
-}
-
-
-Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
- return constant->handle(isolate());
-}
-
-
-template <class LI>
-Operand LCodeGen::ToShiftedRightOperand32(LOperand* right, LI* shift_info) {
- if (shift_info->shift() == NO_SHIFT) {
- return ToOperand32(right);
- } else {
- return Operand(
- ToRegister32(right),
- shift_info->shift(),
- JSShiftAmountFromLConstant(shift_info->shift_amount()));
- }
-}
-
-
-bool LCodeGen::IsSmi(LConstantOperand* op) const {
- return chunk_->LookupLiteralRepresentation(op).IsSmi();
-}
-
-
-bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
- return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
-}
-
-
-int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- return constant->Integer32Value();
-}
-
-
-double LCodeGen::ToDouble(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- DCHECK(constant->HasDoubleValue());
- return constant->DoubleValue();
-}
-
-
-Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
- Condition cond = nv;
- switch (op) {
- case Token::EQ:
- case Token::EQ_STRICT:
- cond = eq;
- break;
- case Token::NE:
- case Token::NE_STRICT:
- cond = ne;
- break;
- case Token::LT:
- cond = is_unsigned ? lo : lt;
- break;
- case Token::GT:
- cond = is_unsigned ? hi : gt;
- break;
- case Token::LTE:
- cond = is_unsigned ? ls : le;
- break;
- case Token::GTE:
- cond = is_unsigned ? hs : ge;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
- return cond;
-}
-
-
-template<class InstrType>
-void LCodeGen::EmitBranchGeneric(InstrType instr,
- const BranchGenerator& branch) {
- int left_block = instr->TrueDestination(chunk_);
- int right_block = instr->FalseDestination(chunk_);
-
- int next_block = GetNextEmittedBlock();
-
- if (right_block == left_block) {
- EmitGoto(left_block);
- } else if (left_block == next_block) {
- branch.EmitInverted(chunk_->GetAssemblyLabel(right_block));
- } else {
- branch.Emit(chunk_->GetAssemblyLabel(left_block));
- if (right_block != next_block) {
- __ B(chunk_->GetAssemblyLabel(right_block));
- }
- }
-}
-
-
-template<class InstrType>
-void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
- DCHECK((condition != al) && (condition != nv));
- BranchOnCondition branch(this, condition);
- EmitBranchGeneric(instr, branch);
-}
-
-
-template<class InstrType>
-void LCodeGen::EmitCompareAndBranch(InstrType instr,
- Condition condition,
- const Register& lhs,
- const Operand& rhs) {
- DCHECK((condition != al) && (condition != nv));
- CompareAndBranch branch(this, condition, lhs, rhs);
- EmitBranchGeneric(instr, branch);
-}
-
-
-template<class InstrType>
-void LCodeGen::EmitTestAndBranch(InstrType instr,
- Condition condition,
- const Register& value,
- uint64_t mask) {
- DCHECK((condition != al) && (condition != nv));
- TestAndBranch branch(this, condition, value, mask);
- EmitBranchGeneric(instr, branch);
-}
-
-
-template<class InstrType>
-void LCodeGen::EmitBranchIfNonZeroNumber(InstrType instr,
- const FPRegister& value,
- const FPRegister& scratch) {
- BranchIfNonZeroNumber branch(this, value, scratch);
- EmitBranchGeneric(instr, branch);
-}
-
-
-template<class InstrType>
-void LCodeGen::EmitBranchIfHeapNumber(InstrType instr,
- const Register& value) {
- BranchIfHeapNumber branch(this, value);
- EmitBranchGeneric(instr, branch);
-}
-
-
-template<class InstrType>
-void LCodeGen::EmitBranchIfRoot(InstrType instr,
- const Register& value,
- Heap::RootListIndex index) {
- BranchIfRoot branch(this, value, index);
- EmitBranchGeneric(instr, branch);
-}
-
-
-void LCodeGen::DoGap(LGap* gap) {
- for (int i = LGap::FIRST_INNER_POSITION;
- i <= LGap::LAST_INNER_POSITION;
- i++) {
- LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
- LParallelMove* move = gap->GetParallelMove(inner_pos);
- if (move != NULL) {
- resolver_.Resolve(move);
- }
- }
-}
-
-
-void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
- Register arguments = ToRegister(instr->arguments());
- Register result = ToRegister(instr->result());
-
- // The pointer to the arguments array come from DoArgumentsElements.
- // It does not point directly to the arguments and there is an offest of
- // two words that we must take into account when accessing an argument.
- // Subtracting the index from length accounts for one, so we add one more.
-
- if (instr->length()->IsConstantOperand() &&
- instr->index()->IsConstantOperand()) {
- int index = ToInteger32(LConstantOperand::cast(instr->index()));
- int length = ToInteger32(LConstantOperand::cast(instr->length()));
- int offset = ((length - index) + 1) * kPointerSize;
- __ Ldr(result, MemOperand(arguments, offset));
- } else if (instr->index()->IsConstantOperand()) {
- Register length = ToRegister32(instr->length());
- int index = ToInteger32(LConstantOperand::cast(instr->index()));
- int loc = index - 1;
- if (loc != 0) {
- __ Sub(result.W(), length, loc);
- __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
- } else {
- __ Ldr(result, MemOperand(arguments, length, UXTW, kPointerSizeLog2));
- }
- } else {
- Register length = ToRegister32(instr->length());
- Operand index = ToOperand32(instr->index());
- __ Sub(result.W(), length, index);
- __ Add(result.W(), result.W(), 1);
- __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
- }
-}
-
-
-void LCodeGen::DoAddE(LAddE* instr) {
- Register result = ToRegister(instr->result());
- Register left = ToRegister(instr->left());
- Operand right = Operand(x0); // Dummy initialization.
- if (instr->hydrogen()->external_add_type() == AddOfExternalAndTagged) {
- right = Operand(ToRegister(instr->right()));
- } else if (instr->right()->IsConstantOperand()) {
- right = ToInteger32(LConstantOperand::cast(instr->right()));
- } else {
- right = Operand(ToRegister32(instr->right()), SXTW);
- }
-
- DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
- __ Add(result, left, right);
-}
-
-
-void LCodeGen::DoAddI(LAddI* instr) {
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- Register result = ToRegister32(instr->result());
- Register left = ToRegister32(instr->left());
- Operand right = ToShiftedRightOperand32(instr->right(), instr);
-
- if (can_overflow) {
- __ Adds(result, left, right);
- DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
- } else {
- __ Add(result, left, right);
- }
-}
-
-
-void LCodeGen::DoAddS(LAddS* instr) {
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- Register result = ToRegister(instr->result());
- Register left = ToRegister(instr->left());
- Operand right = ToOperand(instr->right());
- if (can_overflow) {
- __ Adds(result, left, right);
- DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
- } else {
- __ Add(result, left, right);
- }
-}
-
-
-void LCodeGen::DoAllocate(LAllocate* instr) {
- class DeferredAllocate: public LDeferredCode {
- public:
- DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LAllocate* instr_;
- };
-
- DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr);
-
- Register result = ToRegister(instr->result());
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
-
- // Allocate memory for the object.
- AllocationFlags flags = NO_ALLOCATION_FLAGS;
- if (instr->hydrogen()->MustAllocateDoubleAligned()) {
- flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
- }
-
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- flags = static_cast<AllocationFlags>(flags | PRETENURE);
- }
-
- if (instr->hydrogen()->IsAllocationFoldingDominator()) {
- flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
- }
- DCHECK(!instr->hydrogen()->IsAllocationFolded());
-
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= kMaxRegularHeapObjectSize);
- __ Allocate(size, result, temp1, temp2, deferred->entry(), flags);
- } else {
- Register size = ToRegister32(instr->size());
- __ Sxtw(size.X(), size);
- __ Allocate(size.X(), result, temp1, temp2, deferred->entry(), flags);
- }
-
- __ Bind(deferred->exit());
-
- if (instr->hydrogen()->MustPrefillWithFiller()) {
- Register start = temp1;
- Register end = temp2;
- Register filler = ToRegister(instr->temp3());
-
- __ Sub(start, result, kHeapObjectTag);
-
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ Add(end, start, size);
- } else {
- __ Add(end, start, ToRegister(instr->size()));
- }
- __ LoadRoot(filler, Heap::kOnePointerFillerMapRootIndex);
- __ InitializeFieldsWithFiller(start, end, filler);
- } else {
- DCHECK(instr->temp3() == NULL);
- }
-}
-
-
-void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Mov(ToRegister(instr->result()), Smi::kZero);
-
- PushSafepointRegistersScope scope(this);
- LoadContextFromDeferred(instr->context());
- // We're in a SafepointRegistersScope so we can use any scratch registers.
- Register size = x0;
- if (instr->size()->IsConstantOperand()) {
- __ Mov(size, ToSmi(LConstantOperand::cast(instr->size())));
- } else {
- __ SmiTag(size, ToRegister32(instr->size()).X());
- }
- int flags = AllocateDoubleAlignFlag::encode(
- instr->hydrogen()->MustAllocateDoubleAligned());
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- flags = AllocateTargetSpace::update(flags, OLD_SPACE);
- } else {
- flags = AllocateTargetSpace::update(flags, NEW_SPACE);
- }
- __ Mov(x10, Smi::FromInt(flags));
- __ Push(size, x10);
-
- CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr, nullptr);
- __ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result()));
-
- if (instr->hydrogen()->IsAllocationFoldingDominator()) {
- AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
- }
- // If the allocation folding dominator allocate triggered a GC, allocation
- // happend in the runtime. We have to reset the top pointer to virtually
- // undo the allocation.
- ExternalReference allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
- Register top_address = x10;
- __ Sub(x0, x0, Operand(kHeapObjectTag));
- __ Mov(top_address, Operand(allocation_top));
- __ Str(x0, MemOperand(top_address));
- __ Add(x0, x0, Operand(kHeapObjectTag));
- }
-}
-
-void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
- DCHECK(instr->hydrogen()->IsAllocationFolded());
- DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
- Register result = ToRegister(instr->result());
- Register scratch1 = ToRegister(instr->temp1());
- Register scratch2 = ToRegister(instr->temp2());
-
- AllocationFlags flags = ALLOCATION_FOLDED;
- if (instr->hydrogen()->MustAllocateDoubleAligned()) {
- flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
- }
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- flags = static_cast<AllocationFlags>(flags | PRETENURE);
- }
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= kMaxRegularHeapObjectSize);
- __ FastAllocate(size, result, scratch1, scratch2, flags);
- } else {
- Register size = ToRegister(instr->size());
- __ FastAllocate(size, result, scratch1, scratch2, flags);
- }
-}
-
-
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
- Register length = ToRegister32(instr->length());
-
- Register elements = ToRegister(instr->elements());
- Register scratch = x5;
- DCHECK(receiver.Is(x0)); // Used for parameter count.
- DCHECK(function.Is(x1)); // Required by InvokeFunction.
- DCHECK(ToRegister(instr->result()).Is(x0));
- DCHECK(instr->IsMarkedAsCall());
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- const uint32_t kArgumentsLimit = 1 * KB;
- __ Cmp(length, kArgumentsLimit);
- DeoptimizeIf(hi, instr, DeoptimizeReason::kTooManyArguments);
-
- // Push the receiver and use the register to keep the original
- // number of arguments.
- __ Push(receiver);
- Register argc = receiver;
- receiver = NoReg;
- __ Sxtw(argc, length);
- // The arguments are at a one pointer size offset from elements.
- __ Add(elements, elements, 1 * kPointerSize);
-
- // Loop through the arguments pushing them onto the execution
- // stack.
- Label invoke, loop;
- // length is a small non-negative integer, due to the test above.
- __ Cbz(length, &invoke);
- __ Bind(&loop);
- __ Ldr(scratch, MemOperand(elements, length, SXTW, kPointerSizeLog2));
- __ Push(scratch);
- __ Subs(length, length, 1);
- __ B(ne, &loop);
-
- __ Bind(&invoke);
-
- InvokeFlag flag = CALL_FUNCTION;
- if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
- DCHECK(!info()->saves_caller_doubles());
- // TODO(ishell): drop current frame before pushing arguments to the stack.
- flag = JUMP_FUNCTION;
- ParameterCount actual(x0);
- // It is safe to use x3, x4 and x5 as scratch registers here given that
- // 1) we are not going to return to caller function anyway,
- // 2) x3 (new.target) will be initialized below.
- PrepareForTailCall(actual, x3, x4, x5);
- }
-
- DCHECK(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
- // The number of arguments is stored in argc (receiver) which is x0, as
- // expected by InvokeFunction.
- ParameterCount actual(argc);
- __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
-}
-
-
-void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
- Register result = ToRegister(instr->result());
-
- if (instr->hydrogen()->from_inlined()) {
- // When we are inside an inlined function, the arguments are the last things
- // that have been pushed on the stack. Therefore the arguments array can be
- // accessed directly from jssp.
- // However in the normal case, it is accessed via fp but there are two words
- // on the stack between fp and the arguments (the saved lr and fp) and the
- // LAccessArgumentsAt implementation take that into account.
- // In the inlined case we need to subtract the size of 2 words to jssp to
- // get a pointer which will work well with LAccessArgumentsAt.
- DCHECK(masm()->StackPointer().Is(jssp));
- __ Sub(result, jssp, 2 * kPointerSize);
- } else if (instr->hydrogen()->arguments_adaptor()) {
- DCHECK(instr->temp() != NULL);
- Register previous_fp = ToRegister(instr->temp());
-
- __ Ldr(previous_fp,
- MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(result, MemOperand(previous_fp,
- CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Cmp(result, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR));
- __ Csel(result, fp, previous_fp, ne);
- } else {
- __ Mov(result, fp);
- }
-}
-
-
-void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
- Register elements = ToRegister(instr->elements());
- Register result = ToRegister32(instr->result());
- Label done;
-
- // If no arguments adaptor frame the number of arguments is fixed.
- __ Cmp(fp, elements);
- __ Mov(result, scope()->num_parameters());
- __ B(eq, &done);
-
- // Arguments adaptor frame present. Get argument length from there.
- __ Ldr(result.X(), MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(result,
- UntagSmiMemOperand(result.X(),
- ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- // Argument length is in result register.
- __ Bind(&done);
-}
-
-
-void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- DoubleRegister left = ToDoubleRegister(instr->left());
- DoubleRegister right = ToDoubleRegister(instr->right());
- DoubleRegister result = ToDoubleRegister(instr->result());
-
- switch (instr->op()) {
- case Token::ADD: __ Fadd(result, left, right); break;
- case Token::SUB: __ Fsub(result, left, right); break;
- case Token::MUL: __ Fmul(result, left, right); break;
- case Token::DIV: __ Fdiv(result, left, right); break;
- case Token::MOD: {
- // The ECMA-262 remainder operator is the remainder from a truncating
- // (round-towards-zero) division. Note that this differs from IEEE-754.
- //
- // TODO(jbramley): See if it's possible to do this inline, rather than by
- // calling a helper function. With frintz (to produce the intermediate
- // quotient) and fmsub (to calculate the remainder without loss of
- // precision), it should be possible. However, we would need support for
- // fdiv in round-towards-zero mode, and the ARM64 simulator doesn't
- // support that yet.
- DCHECK(left.Is(d0));
- DCHECK(right.Is(d1));
- __ CallCFunction(
- ExternalReference::mod_two_doubles_operation(isolate()),
- 0, 2);
- DCHECK(result.Is(d0));
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->left()).is(x1));
- DCHECK(ToRegister(instr->right()).is(x0));
- DCHECK(ToRegister(instr->result()).is(x0));
-
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
- CallCode(code, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoBitI(LBitI* instr) {
- Register result = ToRegister32(instr->result());
- Register left = ToRegister32(instr->left());
- Operand right = ToShiftedRightOperand32(instr->right(), instr);
-
- switch (instr->op()) {
- case Token::BIT_AND: __ And(result, left, right); break;
- case Token::BIT_OR: __ Orr(result, left, right); break;
- case Token::BIT_XOR: __ Eor(result, left, right); break;
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void LCodeGen::DoBitS(LBitS* instr) {
- Register result = ToRegister(instr->result());
- Register left = ToRegister(instr->left());
- Operand right = ToOperand(instr->right());
-
- switch (instr->op()) {
- case Token::BIT_AND: __ And(result, left, right); break;
- case Token::BIT_OR: __ Orr(result, left, right); break;
- case Token::BIT_XOR: __ Eor(result, left, right); break;
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
- Condition cond = instr->hydrogen()->allow_equality() ? hi : hs;
- DCHECK(instr->hydrogen()->index()->representation().IsInteger32());
- DCHECK(instr->hydrogen()->length()->representation().IsInteger32());
- if (instr->index()->IsConstantOperand()) {
- Operand index = ToOperand32(instr->index());
- Register length = ToRegister32(instr->length());
- __ Cmp(length, index);
- cond = CommuteCondition(cond);
- } else {
- Register index = ToRegister32(instr->index());
- Operand length = ToOperand32(instr->length());
- __ Cmp(index, length);
- }
- if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
- __ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed);
- } else {
- DeoptimizeIf(cond, instr, DeoptimizeReason::kOutOfBounds);
- }
-}
-
-
-void LCodeGen::DoBranch(LBranch* instr) {
- Representation r = instr->hydrogen()->value()->representation();
- Label* true_label = instr->TrueLabel(chunk_);
- Label* false_label = instr->FalseLabel(chunk_);
-
- if (r.IsInteger32()) {
- DCHECK(!info()->IsStub());
- EmitCompareAndBranch(instr, ne, ToRegister32(instr->value()), 0);
- } else if (r.IsSmi()) {
- DCHECK(!info()->IsStub());
- STATIC_ASSERT(kSmiTag == 0);
- EmitCompareAndBranch(instr, ne, ToRegister(instr->value()), 0);
- } else if (r.IsDouble()) {
- DoubleRegister value = ToDoubleRegister(instr->value());
- // Test the double value. Zero and NaN are false.
- EmitBranchIfNonZeroNumber(instr, value, double_scratch());
- } else {
- DCHECK(r.IsTagged());
- Register value = ToRegister(instr->value());
- HType type = instr->hydrogen()->value()->type();
-
- if (type.IsBoolean()) {
- DCHECK(!info()->IsStub());
- __ CompareRoot(value, Heap::kTrueValueRootIndex);
- EmitBranch(instr, eq);
- } else if (type.IsSmi()) {
- DCHECK(!info()->IsStub());
- EmitCompareAndBranch(instr, ne, value, Smi::kZero);
- } else if (type.IsJSArray()) {
- DCHECK(!info()->IsStub());
- EmitGoto(instr->TrueDestination(chunk()));
- } else if (type.IsHeapNumber()) {
- DCHECK(!info()->IsStub());
- __ Ldr(double_scratch(), FieldMemOperand(value,
- HeapNumber::kValueOffset));
- // Test the double value. Zero and NaN are false.
- EmitBranchIfNonZeroNumber(instr, double_scratch(), double_scratch());
- } else if (type.IsString()) {
- DCHECK(!info()->IsStub());
- Register temp = ToRegister(instr->temp1());
- __ Ldr(temp, FieldMemOperand(value, String::kLengthOffset));
- EmitCompareAndBranch(instr, ne, temp, 0);
- } else {
- ToBooleanHints expected = instr->hydrogen()->expected_input_types();
- // Avoid deopts in the case where we've never executed this path before.
- if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
-
- if (expected & ToBooleanHint::kUndefined) {
- // undefined -> false.
- __ JumpIfRoot(
- value, Heap::kUndefinedValueRootIndex, false_label);
- }
-
- if (expected & ToBooleanHint::kBoolean) {
- // Boolean -> its value.
- __ JumpIfRoot(
- value, Heap::kTrueValueRootIndex, true_label);
- __ JumpIfRoot(
- value, Heap::kFalseValueRootIndex, false_label);
- }
-
- if (expected & ToBooleanHint::kNull) {
- // 'null' -> false.
- __ JumpIfRoot(
- value, Heap::kNullValueRootIndex, false_label);
- }
-
- if (expected & ToBooleanHint::kSmallInteger) {
- // Smis: 0 -> false, all other -> true.
- DCHECK(Smi::kZero == 0);
- __ Cbz(value, false_label);
- __ JumpIfSmi(value, true_label);
- } else if (expected & ToBooleanHint::kNeedsMap) {
- // If we need a map later and have a smi, deopt.
- DeoptimizeIfSmi(value, instr, DeoptimizeReason::kSmi);
- }
-
- Register map = NoReg;
- Register scratch = NoReg;
-
- if (expected & ToBooleanHint::kNeedsMap) {
- DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
- map = ToRegister(instr->temp1());
- scratch = ToRegister(instr->temp2());
-
- __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
-
- if (expected & ToBooleanHint::kCanBeUndetectable) {
- // Undetectable -> false.
- __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
- __ TestAndBranchIfAnySet(
- scratch, 1 << Map::kIsUndetectable, false_label);
- }
- }
-
- if (expected & ToBooleanHint::kReceiver) {
- // spec object -> true.
- __ CompareInstanceType(map, scratch, FIRST_JS_RECEIVER_TYPE);
- __ B(ge, true_label);
- }
-
- if (expected & ToBooleanHint::kString) {
- // String value -> false iff empty.
- Label not_string;
- __ CompareInstanceType(map, scratch, FIRST_NONSTRING_TYPE);
- __ B(ge, &not_string);
- __ Ldr(scratch, FieldMemOperand(value, String::kLengthOffset));
- __ Cbz(scratch, false_label);
- __ B(true_label);
- __ Bind(&not_string);
- }
-
- if (expected & ToBooleanHint::kSymbol) {
- // Symbol value -> true.
- __ CompareInstanceType(map, scratch, SYMBOL_TYPE);
- __ B(eq, true_label);
- }
-
- if (expected & ToBooleanHint::kHeapNumber) {
- Label not_heap_number;
- __ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, &not_heap_number);
-
- __ Ldr(double_scratch(),
- FieldMemOperand(value, HeapNumber::kValueOffset));
- __ Fcmp(double_scratch(), 0.0);
- // If we got a NaN (overflow bit is set), jump to the false branch.
- __ B(vs, false_label);
- __ B(eq, false_label);
- __ B(true_label);
- __ Bind(&not_heap_number);
- }
-
- if (expected != ToBooleanHint::kAny) {
- // We've seen something for the first time -> deopt.
- // This can only happen if we are not generic already.
- Deoptimize(instr, DeoptimizeReason::kUnexpectedObject);
- }
- }
- }
-}
-
-void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
- int formal_parameter_count, int arity,
- bool is_tail_call, LInstruction* instr) {
- bool dont_adapt_arguments =
- formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
- bool can_invoke_directly =
- dont_adapt_arguments || formal_parameter_count == arity;
-
- // The function interface relies on the following register assignments.
- Register function_reg = x1;
- Register arity_reg = x0;
-
- LPointerMap* pointers = instr->pointer_map();
-
- if (FLAG_debug_code) {
- Label is_not_smi;
- // Try to confirm that function_reg (x1) is a tagged pointer.
- __ JumpIfNotSmi(function_reg, &is_not_smi);
- __ Abort(kExpectedFunctionObject);
- __ Bind(&is_not_smi);
- }
-
- if (can_invoke_directly) {
- // Change context.
- __ Ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
-
- // Always initialize new target and number of actual arguments.
- __ LoadRoot(x3, Heap::kUndefinedValueRootIndex);
- __ Mov(arity_reg, arity);
-
- bool is_self_call = function.is_identical_to(info()->closure());
-
- // Invoke function.
- if (is_self_call) {
- Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
- if (is_tail_call) {
- __ Jump(self, RelocInfo::CODE_TARGET);
- } else {
- __ Call(self, RelocInfo::CODE_TARGET);
- }
- } else {
- __ Ldr(x10, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
- if (is_tail_call) {
- __ Jump(x10);
- } else {
- __ Call(x10);
- }
- }
-
- if (!is_tail_call) {
- // Set up deoptimization.
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
- }
- } else {
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount actual(arity);
- ParameterCount expected(formal_parameter_count);
- InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
- __ InvokeFunction(function_reg, expected, actual, flag, generator);
- }
-}
-
-void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
- DCHECK(instr->IsMarkedAsCall());
- DCHECK(ToRegister(instr->result()).Is(x0));
-
- if (instr->hydrogen()->IsTailCall()) {
- if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
-
- if (instr->target()->IsConstantOperand()) {
- LConstantOperand* target = LConstantOperand::cast(instr->target());
- Handle<Code> code = Handle<Code>::cast(ToHandle(target));
- // TODO(all): on ARM we use a call descriptor to specify a storage mode
- // but on ARM64 we only have one storage mode so it isn't necessary. Check
- // this understanding is correct.
- __ Jump(code, RelocInfo::CODE_TARGET);
- } else {
- DCHECK(instr->target()->IsRegister());
- Register target = ToRegister(instr->target());
- __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
- __ Br(target);
- }
- } else {
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
-
- if (instr->target()->IsConstantOperand()) {
- LConstantOperand* target = LConstantOperand::cast(instr->target());
- Handle<Code> code = Handle<Code>::cast(ToHandle(target));
- generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
- // TODO(all): on ARM we use a call descriptor to specify a storage mode
- // but on ARM64 we only have one storage mode so it isn't necessary. Check
- // this understanding is correct.
- __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
- } else {
- DCHECK(instr->target()->IsRegister());
- Register target = ToRegister(instr->target());
- generator.BeforeCall(__ CallSize(target));
- __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
- __ Call(target);
- }
- generator.AfterCall();
- }
-
- HCallWithDescriptor* hinstr = instr->hydrogen();
- RecordPushedArgumentsDelta(hinstr->argument_delta());
-
- // HCallWithDescriptor instruction is translated to zero or more
- // LPushArguments (they handle parameters passed on the stack) followed by
- // a LCallWithDescriptor. Each LPushArguments instruction generated records
- // the number of arguments pushed thus we need to offset them here.
- // The |argument_delta()| used above "knows" only about JS parameters while
- // we are dealing here with particular calling convention details.
- RecordPushedArgumentsDelta(-hinstr->descriptor().GetStackParameterCount());
-}
-
-
-void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- CallRuntime(instr->function(), instr->arity(), instr);
- RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
-}
-
-
-void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- GenerateOsrPrologue();
-}
-
-
-void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
- Register temp = ToRegister(instr->temp());
- Label deopt, done;
- // If the map is not deprecated the migration attempt does not make sense.
- __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
- __ Ldr(temp, FieldMemOperand(temp, Map::kBitField3Offset));
- __ Tst(temp, Operand(Map::Deprecated::kMask));
- __ B(eq, &deopt);
-
- {
- PushSafepointRegistersScope scope(this);
- __ Push(object);
- __ Mov(cp, 0);
- __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(x0, temp);
- }
- __ Tst(temp, Operand(kSmiTagMask));
- __ B(ne, &done);
-
- __ bind(&deopt);
- Deoptimize(instr, DeoptimizeReason::kInstanceMigrationFailed);
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- class DeferredCheckMaps: public LDeferredCode {
- public:
- DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
- : LDeferredCode(codegen), instr_(instr), object_(object) {
- SetExit(check_maps());
- }
- virtual void Generate() {
- codegen()->DoDeferredInstanceMigration(instr_, object_);
- }
- Label* check_maps() { return &check_maps_; }
- virtual LInstruction* instr() { return instr_; }
- private:
- LCheckMaps* instr_;
- Label check_maps_;
- Register object_;
- };
-
- if (instr->hydrogen()->IsStabilityCheck()) {
- const UniqueSet<Map>* maps = instr->hydrogen()->maps();
- for (int i = 0; i < maps->size(); ++i) {
- AddStabilityDependency(maps->at(i).handle());
- }
- return;
- }
-
- Register object = ToRegister(instr->value());
- Register map_reg = ToRegister(instr->temp());
-
- __ Ldr(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
-
- DeferredCheckMaps* deferred = NULL;
- if (instr->hydrogen()->HasMigrationTarget()) {
- deferred = new(zone()) DeferredCheckMaps(this, instr, object);
- __ Bind(deferred->check_maps());
- }
-
- const UniqueSet<Map>* maps = instr->hydrogen()->maps();
- Label success;
- for (int i = 0; i < maps->size() - 1; i++) {
- Handle<Map> map = maps->at(i).handle();
- __ CompareMap(map_reg, map);
- __ B(eq, &success);
- }
- Handle<Map> map = maps->at(maps->size() - 1).handle();
- __ CompareMap(map_reg, map);
-
- // We didn't match a map.
- if (instr->hydrogen()->HasMigrationTarget()) {
- __ B(ne, deferred->entry());
- } else {
- DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
- }
-
- __ Bind(&success);
-}
-
-
-void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- DeoptimizeIfSmi(ToRegister(instr->value()), instr, DeoptimizeReason::kSmi);
- }
-}
-
-
-void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
- Register value = ToRegister(instr->value());
- DCHECK(!instr->result() || ToRegister(instr->result()).Is(value));
- DeoptimizeIfNotSmi(value, instr, DeoptimizeReason::kNotASmi);
-}
-
-
-void LCodeGen::DoCheckArrayBufferNotNeutered(
- LCheckArrayBufferNotNeutered* instr) {
- UseScratchRegisterScope temps(masm());
- Register view = ToRegister(instr->view());
- Register scratch = temps.AcquireX();
-
- __ Ldr(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
- __ Ldr(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
- __ Tst(scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds);
-}
-
-
-void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
- Register input = ToRegister(instr->value());
- Register scratch = ToRegister(instr->temp());
-
- __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
- __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
-
- if (instr->hydrogen()->is_interval_check()) {
- InstanceType first, last;
- instr->hydrogen()->GetCheckInterval(&first, &last);
-
- __ Cmp(scratch, first);
- if (first == last) {
- // If there is only one type in the interval check for equality.
- DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
- } else if (last == LAST_TYPE) {
- // We don't need to compare with the higher bound of the interval.
- DeoptimizeIf(lo, instr, DeoptimizeReason::kWrongInstanceType);
- } else {
- // If we are below the lower bound, set the C flag and clear the Z flag
- // to force a deopt.
- __ Ccmp(scratch, last, CFlag, hs);
- DeoptimizeIf(hi, instr, DeoptimizeReason::kWrongInstanceType);
- }
- } else {
- uint8_t mask;
- uint8_t tag;
- instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
-
- if (base::bits::IsPowerOfTwo32(mask)) {
- DCHECK((tag == 0) || (tag == mask));
- if (tag == 0) {
- DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr,
- DeoptimizeReason::kWrongInstanceType);
- } else {
- DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr,
- DeoptimizeReason::kWrongInstanceType);
- }
- } else {
- if (tag == 0) {
- __ Tst(scratch, mask);
- } else {
- __ And(scratch, scratch, mask);
- __ Cmp(scratch, tag);
- }
- DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
- }
- }
-}
-
-
-void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
- DoubleRegister input = ToDoubleRegister(instr->unclamped());
- Register result = ToRegister32(instr->result());
- __ ClampDoubleToUint8(result, input, double_scratch());
-}
-
-
-void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
- Register input = ToRegister32(instr->unclamped());
- Register result = ToRegister32(instr->result());
- __ ClampInt32ToUint8(result, input);
-}
-
-
-void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
- Register input = ToRegister(instr->unclamped());
- Register result = ToRegister32(instr->result());
- Label done;
-
- // Both smi and heap number cases are handled.
- Label is_not_smi;
- __ JumpIfNotSmi(input, &is_not_smi);
- __ SmiUntag(result.X(), input);
- __ ClampInt32ToUint8(result);
- __ B(&done);
-
- __ Bind(&is_not_smi);
-
- // Check for heap number.
- Label is_heap_number;
- __ JumpIfHeapNumber(input, &is_heap_number);
-
- // Check for undefined. Undefined is coverted to zero for clamping conversion.
- DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
- DeoptimizeReason::kNotAHeapNumberUndefined);
- __ Mov(result, 0);
- __ B(&done);
-
- // Heap number case.
- __ Bind(&is_heap_number);
- DoubleRegister dbl_scratch = double_scratch();
- DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp1());
- __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
- __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2);
-
- __ Bind(&done);
-}
-
-void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
- Handle<String> class_name = instr->hydrogen()->class_name();
- Label* true_label = instr->TrueLabel(chunk_);
- Label* false_label = instr->FalseLabel(chunk_);
- Register input = ToRegister(instr->value());
- Register scratch1 = ToRegister(instr->temp1());
- Register scratch2 = ToRegister(instr->temp2());
-
- __ JumpIfSmi(input, false_label);
-
- Register map = scratch2;
- __ CompareObjectType(input, map, scratch1, FIRST_FUNCTION_TYPE);
- STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
- if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
- __ B(hs, true_label);
- } else {
- __ B(hs, false_label);
- }
-
- // Check if the constructor in the map is a function.
- {
- UseScratchRegisterScope temps(masm());
- Register instance_type = temps.AcquireX();
- __ GetMapConstructor(scratch1, map, scratch2, instance_type);
- __ Cmp(instance_type, JS_FUNCTION_TYPE);
- }
- // Objects with a non-function constructor have class 'Object'.
- if (String::Equals(class_name, isolate()->factory()->Object_string())) {
- __ B(ne, true_label);
- } else {
- __ B(ne, false_label);
- }
-
- // The constructor function is in scratch1. Get its instance class name.
- __ Ldr(scratch1,
- FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(scratch1, FieldMemOperand(
- scratch1, SharedFunctionInfo::kInstanceClassNameOffset));
-
- // The class name we are testing against is internalized since it's a literal.
- // The name in the constructor is internalized because of the way the context
- // is booted. This routine isn't expected to work for random API-created
- // classes and it doesn't have to because you can't access it with natives
- // syntax. Since both sides are internalized it is sufficient to use an
- // identity comparison.
- EmitCompareAndBranch(instr, eq, scratch1, Operand(class_name));
-}
-
-void LCodeGen::DoCmpHoleAndBranchD(LCmpHoleAndBranchD* instr) {
- DCHECK(instr->hydrogen()->representation().IsDouble());
- FPRegister object = ToDoubleRegister(instr->object());
- Register temp = ToRegister(instr->temp());
-
- // If we don't have a NaN, we don't have the hole, so branch now to avoid the
- // (relatively expensive) hole-NaN check.
- __ Fcmp(object, object);
- __ B(vc, instr->FalseLabel(chunk_));
-
- // We have a NaN, but is it the hole?
- __ Fmov(temp, object);
- EmitCompareAndBranch(instr, eq, temp, kHoleNanInt64);
-}
-
-
-void LCodeGen::DoCmpHoleAndBranchT(LCmpHoleAndBranchT* instr) {
- DCHECK(instr->hydrogen()->representation().IsTagged());
- Register object = ToRegister(instr->object());
-
- EmitBranchIfRoot(instr, object, Heap::kTheHoleValueRootIndex);
-}
-
-
-void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
- Register value = ToRegister(instr->value());
- Register map = ToRegister(instr->temp());
-
- __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
- EmitCompareAndBranch(instr, eq, map, Operand(instr->map()));
-}
-
-
-void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- bool is_unsigned =
- instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
- instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
- Condition cond = TokenToCondition(instr->op(), is_unsigned);
-
- if (left->IsConstantOperand() && right->IsConstantOperand()) {
- // We can statically evaluate the comparison.
- double left_val = ToDouble(LConstantOperand::cast(left));
- double right_val = ToDouble(LConstantOperand::cast(right));
- int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
- ? instr->TrueDestination(chunk_)
- : instr->FalseDestination(chunk_);
- EmitGoto(next_block);
- } else {
- if (instr->is_double()) {
- __ Fcmp(ToDoubleRegister(left), ToDoubleRegister(right));
-
- // If a NaN is involved, i.e. the result is unordered (V set),
- // jump to false block label.
- __ B(vs, instr->FalseLabel(chunk_));
- EmitBranch(instr, cond);
- } else {
- if (instr->hydrogen_value()->representation().IsInteger32()) {
- if (right->IsConstantOperand()) {
- EmitCompareAndBranch(instr, cond, ToRegister32(left),
- ToOperand32(right));
- } else {
- // Commute the operands and the condition.
- EmitCompareAndBranch(instr, CommuteCondition(cond),
- ToRegister32(right), ToOperand32(left));
- }
- } else {
- DCHECK(instr->hydrogen_value()->representation().IsSmi());
- if (right->IsConstantOperand()) {
- int32_t value = ToInteger32(LConstantOperand::cast(right));
- EmitCompareAndBranch(instr,
- cond,
- ToRegister(left),
- Operand(Smi::FromInt(value)));
- } else if (left->IsConstantOperand()) {
- // Commute the operands and the condition.
- int32_t value = ToInteger32(LConstantOperand::cast(left));
- EmitCompareAndBranch(instr,
- CommuteCondition(cond),
- ToRegister(right),
- Operand(Smi::FromInt(value)));
- } else {
- EmitCompareAndBranch(instr,
- cond,
- ToRegister(left),
- ToRegister(right));
- }
- }
- }
- }
-}
-
-
-void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
- Register left = ToRegister(instr->left());
- Register right = ToRegister(instr->right());
- EmitCompareAndBranch(instr, eq, left, right);
-}
-
-
-void LCodeGen::DoCmpT(LCmpT* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- Token::Value op = instr->op();
- Condition cond = TokenToCondition(op, false);
-
- DCHECK(ToRegister(instr->left()).Is(x1));
- DCHECK(ToRegister(instr->right()).Is(x0));
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- // Signal that we don't inline smi code before this stub.
- InlineSmiCheckInfo::EmitNotInlined(masm());
-
- // Return true or false depending on CompareIC result.
- // This instruction is marked as call. We can clobber any register.
- DCHECK(instr->IsMarkedAsCall());
- __ LoadTrueFalseRoots(x1, x2);
- __ Cmp(x0, 0);
- __ Csel(ToRegister(instr->result()), x1, x2, cond);
-}
-
-
-void LCodeGen::DoConstantD(LConstantD* instr) {
- DCHECK(instr->result()->IsDoubleRegister());
- DoubleRegister result = ToDoubleRegister(instr->result());
- if (instr->value() == 0) {
- if (copysign(1.0, instr->value()) == 1.0) {
- __ Fmov(result, fp_zero);
- } else {
- __ Fneg(result, fp_zero);
- }
- } else {
- __ Fmov(result, instr->value());
- }
-}
-
-
-void LCodeGen::DoConstantE(LConstantE* instr) {
- __ Mov(ToRegister(instr->result()), Operand(instr->value()));
-}
-
-
-void LCodeGen::DoConstantI(LConstantI* instr) {
- DCHECK(is_int32(instr->value()));
- // Cast the value here to ensure that the value isn't sign extended by the
- // implicit Operand constructor.
- __ Mov(ToRegister32(instr->result()), static_cast<uint32_t>(instr->value()));
-}
-
-
-void LCodeGen::DoConstantS(LConstantS* instr) {
- __ Mov(ToRegister(instr->result()), Operand(instr->value()));
-}
-
-
-void LCodeGen::DoConstantT(LConstantT* instr) {
- Handle<Object> object = instr->value(isolate());
- AllowDeferredHandleDereference smi_check;
- __ LoadObject(ToRegister(instr->result()), object);
-}
-
-
-void LCodeGen::DoContext(LContext* instr) {
- // If there is a non-return use, the context must be moved to a register.
- Register result = ToRegister(instr->result());
- if (info()->IsOptimizing()) {
- __ Ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
- } else {
- // If there is no frame, the context must be in cp.
- DCHECK(result.is(cp));
- }
-}
-
-
-void LCodeGen::DoCheckValue(LCheckValue* instr) {
- Register reg = ToRegister(instr->value());
- Handle<HeapObject> object = instr->hydrogen()->object().handle();
- AllowDeferredHandleDereference smi_check;
- if (isolate()->heap()->InNewSpace(*object)) {
- UseScratchRegisterScope temps(masm());
- Register temp = temps.AcquireX();
- Handle<Cell> cell = isolate()->factory()->NewCell(object);
- __ Mov(temp, Operand(cell));
- __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset));
- __ Cmp(reg, temp);
- } else {
- __ Cmp(reg, Operand(object));
- }
- DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch);
-}
-
-
-void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- last_lazy_deopt_pc_ = masm()->pc_offset();
- DCHECK(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- Deoptimizer::BailoutType type = instr->hydrogen()->type();
- // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
- // needed return address), even though the implementation of LAZY and EAGER is
- // now identical. When LAZY is eventually completely folded into EAGER, remove
- // the special case below.
- if (info()->IsStub() && (type == Deoptimizer::EAGER)) {
- type = Deoptimizer::LAZY;
- }
-
- Deoptimize(instr, instr->hydrogen()->reason(), &type);
-}
-
-
-void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
- Register dividend = ToRegister32(instr->dividend());
- int32_t divisor = instr->divisor();
- Register result = ToRegister32(instr->result());
- DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
- DCHECK(!result.is(dividend));
-
- // Check for (0 / -x) that will produce negative zero.
- HDiv* hdiv = instr->hydrogen();
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIfZero(dividend, instr, DeoptimizeReason::kDivisionByZero);
- }
- // Check for (kMinInt / -1).
- if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
- // Test dividend for kMinInt by subtracting one (cmp) and checking for
- // overflow.
- __ Cmp(dividend, 1);
- DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
- }
- // Deoptimize if remainder will not be 0.
- if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
- divisor != 1 && divisor != -1) {
- int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
- __ Tst(dividend, mask);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
- }
-
- if (divisor == -1) { // Nice shortcut, not needed for correctness.
- __ Neg(result, dividend);
- return;
- }
- int32_t shift = WhichPowerOf2Abs(divisor);
- if (shift == 0) {
- __ Mov(result, dividend);
- } else if (shift == 1) {
- __ Add(result, dividend, Operand(dividend, LSR, 31));
- } else {
- __ Mov(result, Operand(dividend, ASR, 31));
- __ Add(result, dividend, Operand(result, LSR, 32 - shift));
- }
- if (shift > 0) __ Mov(result, Operand(result, ASR, shift));
- if (divisor < 0) __ Neg(result, result);
-}
-
-
-void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
- Register dividend = ToRegister32(instr->dividend());
- int32_t divisor = instr->divisor();
- Register result = ToRegister32(instr->result());
- DCHECK(!AreAliased(dividend, result));
-
- if (divisor == 0) {
- Deoptimize(instr, DeoptimizeReason::kDivisionByZero);
- return;
- }
-
- // Check for (0 / -x) that will produce negative zero.
- HDiv* hdiv = instr->hydrogen();
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIfZero(dividend, instr, DeoptimizeReason::kMinusZero);
- }
-
- __ TruncatingDiv(result, dividend, Abs(divisor));
- if (divisor < 0) __ Neg(result, result);
-
- if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
- Register temp = ToRegister32(instr->temp());
- DCHECK(!AreAliased(dividend, result, temp));
- __ Sxtw(dividend.X(), dividend);
- __ Mov(temp, divisor);
- __ Smsubl(temp.X(), result, temp, dividend.X());
- DeoptimizeIfNotZero(temp, instr, DeoptimizeReason::kLostPrecision);
- }
-}
-
-
-// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
-void LCodeGen::DoDivI(LDivI* instr) {
- HBinaryOperation* hdiv = instr->hydrogen();
- Register dividend = ToRegister32(instr->dividend());
- Register divisor = ToRegister32(instr->divisor());
- Register result = ToRegister32(instr->result());
-
- // Issue the division first, and then check for any deopt cases whilst the
- // result is computed.
- __ Sdiv(result, dividend, divisor);
-
- if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
- DCHECK(!instr->temp());
- return;
- }
-
- // Check for x / 0.
- if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIfZero(divisor, instr, DeoptimizeReason::kDivisionByZero);
- }
-
- // Check for (0 / -x) as that will produce negative zero.
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ Cmp(divisor, 0);
-
- // If the divisor < 0 (mi), compare the dividend, and deopt if it is
- // zero, ie. zero dividend with negative divisor deopts.
- // If the divisor >= 0 (pl, the opposite of mi) set the flags to
- // condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
- __ Ccmp(dividend, 0, NoFlag, mi);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
- }
-
- // Check for (kMinInt / -1).
- if (hdiv->CheckFlag(HValue::kCanOverflow)) {
- // Test dividend for kMinInt by subtracting one (cmp) and checking for
- // overflow.
- __ Cmp(dividend, 1);
- // If overflow is set, ie. dividend = kMinInt, compare the divisor with
- // -1. If overflow is clear, set the flags for condition ne, as the
- // dividend isn't -1, and thus we shouldn't deopt.
- __ Ccmp(divisor, -1, NoFlag, vs);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
- }
-
- // Compute remainder and deopt if it's not zero.
- Register remainder = ToRegister32(instr->temp());
- __ Msub(remainder, result, divisor, dividend);
- DeoptimizeIfNotZero(remainder, instr, DeoptimizeReason::kLostPrecision);
-}
-
-
-void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- Register result = ToRegister32(instr->result());
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIfMinusZero(input, instr, DeoptimizeReason::kMinusZero);
- }
-
- __ TryRepresentDoubleAsInt32(result, input, double_scratch());
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
-
- if (instr->tag_result()) {
- __ SmiTag(result.X());
- }
-}
-
-
-void LCodeGen::DoDrop(LDrop* instr) {
- __ Drop(instr->count());
-
- RecordPushedArgumentsDelta(instr->hydrogen_value()->argument_delta());
-}
-
-
-void LCodeGen::DoDummy(LDummy* instr) {
- // Nothing to see here, move on!
-}
-
-
-void LCodeGen::DoDummyUse(LDummyUse* instr) {
- // Nothing to see here, move on!
-}
-
-
-void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
- Register map = ToRegister(instr->map());
- Register result = ToRegister(instr->result());
- Label load_cache, done;
-
- __ EnumLengthUntagged(result, map);
- __ Cbnz(result, &load_cache);
-
- __ Mov(result, Operand(isolate()->factory()->empty_fixed_array()));
- __ B(&done);
-
- __ Bind(&load_cache);
- __ LoadInstanceDescriptors(map, result);
- __ Ldr(result,
- FieldMemOperand(result, DescriptorArray::kEnumCacheBridgeOffset));
- __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
- DeoptimizeIfZero(result, instr, DeoptimizeReason::kNoCache);
-
- __ Bind(&done);
-}
-
-
-void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
- Register object = ToRegister(instr->object());
-
- DCHECK(instr->IsMarkedAsCall());
- DCHECK(object.Is(x0));
-
- Label use_cache, call_runtime;
- __ CheckEnumCache(object, x5, x1, x2, x3, x4, &call_runtime);
-
- __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
- __ B(&use_cache);
-
- // Get the set of properties to enumerate.
- __ Bind(&call_runtime);
- __ Push(object);
- CallRuntime(Runtime::kForInEnumerate, instr);
- __ Bind(&use_cache);
-}
-
-void LCodeGen::EmitGoto(int block) {
- // Do not emit jump if we are emitting a goto to the next block.
- if (!IsNextEmittedBlock(block)) {
- __ B(chunk_->GetAssemblyLabel(LookupDestination(block)));
- }
-}
-
-void LCodeGen::DoGoto(LGoto* instr) {
- EmitGoto(instr->block_id());
-}
-
-// HHasInstanceTypeAndBranch instruction is built with an interval of type
-// to test but is only used in very restricted ways. The only possible kinds
-// of intervals are:
-// - [ FIRST_TYPE, instr->to() ]
-// - [ instr->form(), LAST_TYPE ]
-// - instr->from() == instr->to()
-//
-// These kinds of intervals can be check with only one compare instruction
-// providing the correct value and test condition are used.
-//
-// TestType() will return the value to use in the compare instruction and
-// BranchCondition() will return the condition to use depending on the kind
-// of interval actually specified in the instruction.
-static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == FIRST_TYPE) return to;
- DCHECK((from == to) || (to == LAST_TYPE));
- return from;
-}
-
-
-// See comment above TestType function for what this function does.
-static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == to) return eq;
- if (to == LAST_TYPE) return hs;
- if (from == FIRST_TYPE) return ls;
- UNREACHABLE();
- return eq;
-}
-
-
-void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register scratch = ToRegister(instr->temp());
-
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- __ JumpIfSmi(input, instr->FalseLabel(chunk_));
- }
- __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
- EmitBranch(instr, BranchCondition(instr->hydrogen()));
-}
-
-
-void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
- Register result = ToRegister(instr->result());
- Register base = ToRegister(instr->base_object());
- if (instr->offset()->IsConstantOperand()) {
- __ Add(result, base, ToOperand32(instr->offset()));
- } else {
- __ Add(result, base, Operand(ToRegister32(instr->offset()), SXTW));
- }
-}
-
-
-void LCodeGen::DoHasInPrototypeChainAndBranch(
- LHasInPrototypeChainAndBranch* instr) {
- Register const object = ToRegister(instr->object());
- Register const object_map = ToRegister(instr->scratch1());
- Register const object_instance_type = ToRegister(instr->scratch2());
- Register const object_prototype = object_map;
- Register const prototype = ToRegister(instr->prototype());
-
- // The {object} must be a spec object. It's sufficient to know that {object}
- // is not a smi, since all other non-spec objects have {null} prototypes and
- // will be ruled out below.
- if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
- __ JumpIfSmi(object, instr->FalseLabel(chunk_));
- }
-
- // Loop through the {object}s prototype chain looking for the {prototype}.
- __ Ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
- Label loop;
- __ Bind(&loop);
-
- // Deoptimize if the object needs to be access checked.
- __ Ldrb(object_instance_type,
- FieldMemOperand(object_map, Map::kBitFieldOffset));
- __ Tst(object_instance_type, Operand(1 << Map::kIsAccessCheckNeeded));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck);
- // Deoptimize for proxies.
- __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy);
-
- __ Ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
- __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
- __ B(eq, instr->FalseLabel(chunk_));
- __ Cmp(object_prototype, prototype);
- __ B(eq, instr->TrueLabel(chunk_));
- __ Ldr(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
- __ B(&loop);
-}
-
-
-void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
- DoGap(instr);
-}
-
-
-void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- Register value = ToRegister32(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- __ Scvtf(result, value);
-}
-
-void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
- Register scratch1, Register scratch2,
- Register scratch3) {
-#if DEBUG
- if (actual.is_reg()) {
- DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
- } else {
- DCHECK(!AreAliased(scratch1, scratch2, scratch3));
- }
-#endif
- if (FLAG_code_comments) {
- if (actual.is_reg()) {
- Comment(";;; PrepareForTailCall, actual: %s {",
- RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
- actual.reg().code()));
- } else {
- Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
- }
- }
-
- // Check if next frame is an arguments adaptor frame.
- Register caller_args_count_reg = scratch1;
- Label no_arguments_adaptor, formal_parameter_count_loaded;
- __ Ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(scratch3,
- MemOperand(scratch2, StandardFrameConstants::kContextOffset));
- __ Cmp(scratch3,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ B(ne, &no_arguments_adaptor);
-
- // Drop current frame and load arguments count from arguments adaptor frame.
- __ mov(fp, scratch2);
- __ Ldr(caller_args_count_reg,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
- __ B(&formal_parameter_count_loaded);
-
- __ bind(&no_arguments_adaptor);
- // Load caller's formal parameter count
- __ Mov(caller_args_count_reg,
- Immediate(info()->literal()->parameter_count()));
-
- __ bind(&formal_parameter_count_loaded);
- __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
-
- Comment(";;; }");
-}
-
-void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
- HInvokeFunction* hinstr = instr->hydrogen();
- DCHECK(ToRegister(instr->context()).is(cp));
- // The function is required to be in x1.
- DCHECK(ToRegister(instr->function()).is(x1));
- DCHECK(instr->HasPointerMap());
-
- bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
-
- if (is_tail_call) {
- DCHECK(!info()->saves_caller_doubles());
- ParameterCount actual(instr->arity());
- // It is safe to use x3, x4 and x5 as scratch registers here given that
- // 1) we are not going to return to caller function anyway,
- // 2) x3 (new.target) will be initialized below.
- PrepareForTailCall(actual, x3, x4, x5);
- }
-
- Handle<JSFunction> known_function = hinstr->known_function();
- if (known_function.is_null()) {
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount actual(instr->arity());
- InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
- __ InvokeFunction(x1, no_reg, actual, flag, generator);
- } else {
- CallKnownFunction(known_function, hinstr->formal_parameter_count(),
- instr->arity(), is_tail_call, instr);
- }
- RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
-}
-
-
-Condition LCodeGen::EmitIsString(Register input,
- Register temp1,
- Label* is_not_string,
- SmiCheck check_needed = INLINE_SMI_CHECK) {
- if (check_needed == INLINE_SMI_CHECK) {
- __ JumpIfSmi(input, is_not_string);
- }
- __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
-
- return lt;
-}
-
-
-void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
- Register val = ToRegister(instr->value());
- Register scratch = ToRegister(instr->temp());
-
- SmiCheck check_needed =
- instr->hydrogen()->value()->type().IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- Condition true_cond =
- EmitIsString(val, scratch, instr->FalseLabel(chunk_), check_needed);
-
- EmitBranch(instr, true_cond);
-}
-
-
-void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
- Register value = ToRegister(instr->value());
- STATIC_ASSERT(kSmiTag == 0);
- EmitTestAndBranch(instr, eq, value, kSmiTagMask);
-}
-
-
-void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- __ JumpIfSmi(input, instr->FalseLabel(chunk_));
- }
- __ Ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
- __ Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
-
- EmitTestAndBranch(instr, ne, temp, 1 << Map::kIsUndetectable);
-}
-
-
-static const char* LabelType(LLabel* label) {
- if (label->is_loop_header()) return " (loop header)";
- if (label->is_osr_entry()) return " (OSR entry)";
- return "";
-}
-
-
-void LCodeGen::DoLabel(LLabel* label) {
- Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
- current_instruction_,
- label->hydrogen_value()->id(),
- label->block_id(),
- LabelType(label));
-
- // Inherit pushed_arguments_ from the predecessor's argument count.
- if (label->block()->HasPredecessor()) {
- pushed_arguments_ = label->block()->predecessors()->at(0)->argument_count();
-#ifdef DEBUG
- for (auto p : *label->block()->predecessors()) {
- DCHECK_EQ(p->argument_count(), pushed_arguments_);
- }
-#endif
- }
-
- __ Bind(label->label());
- current_block_ = label->block_id();
- DoGap(label);
-}
-
-
-void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ Ldr(result, ContextMemOperand(context, instr->slot_index()));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
- DeoptimizeReason::kHole);
- } else {
- Label not_the_hole;
- __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, &not_the_hole);
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ Bind(&not_the_hole);
- }
- }
-}
-
-
-void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
- Register function = ToRegister(instr->function());
- Register result = ToRegister(instr->result());
- Register temp = ToRegister(instr->temp());
-
- // Get the prototype or initial map from the function.
- __ Ldr(result, FieldMemOperand(function,
- JSFunction::kPrototypeOrInitialMapOffset));
-
- // Check that the function has a prototype or an initial map.
- DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
- DeoptimizeReason::kHole);
-
- // If the function does not have an initial map, we're done.
- Label done;
- __ CompareObjectType(result, temp, temp, MAP_TYPE);
- __ B(ne, &done);
-
- // Get the prototype from the initial map.
- __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
-
- // All done.
- __ Bind(&done);
-}
-
-
-MemOperand LCodeGen::PrepareKeyedExternalArrayOperand(
- Register key,
- Register base,
- Register scratch,
- bool key_is_smi,
- bool key_is_constant,
- int constant_key,
- ElementsKind elements_kind,
- int base_offset) {
- int element_size_shift = ElementsKindToShiftSize(elements_kind);
-
- if (key_is_constant) {
- int key_offset = constant_key << element_size_shift;
- return MemOperand(base, key_offset + base_offset);
- }
-
- if (key_is_smi) {
- __ Add(scratch, base, Operand::UntagSmiAndScale(key, element_size_shift));
- return MemOperand(scratch, base_offset);
- }
-
- if (base_offset == 0) {
- return MemOperand(base, key, SXTW, element_size_shift);
- }
-
- DCHECK(!AreAliased(scratch, key));
- __ Add(scratch, base, base_offset);
- return MemOperand(scratch, key, SXTW, element_size_shift);
-}
-
-
-void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
- Register ext_ptr = ToRegister(instr->elements());
- Register scratch;
- ElementsKind elements_kind = instr->elements_kind();
-
- bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
- bool key_is_constant = instr->key()->IsConstantOperand();
- Register key = no_reg;
- int constant_key = 0;
- if (key_is_constant) {
- DCHECK(instr->temp() == NULL);
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xf0000000) {
- Abort(kArrayIndexConstantValueTooBig);
- }
- } else {
- scratch = ToRegister(instr->temp());
- key = ToRegister(instr->key());
- }
-
- MemOperand mem_op =
- PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
- key_is_constant, constant_key,
- elements_kind,
- instr->base_offset());
-
- if (elements_kind == FLOAT32_ELEMENTS) {
- DoubleRegister result = ToDoubleRegister(instr->result());
- __ Ldr(result.S(), mem_op);
- __ Fcvt(result, result.S());
- } else if (elements_kind == FLOAT64_ELEMENTS) {
- DoubleRegister result = ToDoubleRegister(instr->result());
- __ Ldr(result, mem_op);
- } else {
- Register result = ToRegister(instr->result());
-
- switch (elements_kind) {
- case INT8_ELEMENTS:
- __ Ldrsb(result, mem_op);
- break;
- case UINT8_ELEMENTS:
- case UINT8_CLAMPED_ELEMENTS:
- __ Ldrb(result, mem_op);
- break;
- case INT16_ELEMENTS:
- __ Ldrsh(result, mem_op);
- break;
- case UINT16_ELEMENTS:
- __ Ldrh(result, mem_op);
- break;
- case INT32_ELEMENTS:
- __ Ldrsw(result, mem_op);
- break;
- case UINT32_ELEMENTS:
- __ Ldr(result.W(), mem_op);
- if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- // Deopt if value > 0x80000000.
- __ Tst(result, 0xFFFFFFFF80000000);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNegativeValue);
- }
- break;
- case FLOAT32_ELEMENTS:
- case FLOAT64_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
- case FAST_STRING_WRAPPER_ELEMENTS:
- case SLOW_STRING_WRAPPER_ELEMENTS:
- case NO_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-MemOperand LCodeGen::PrepareKeyedArrayOperand(Register base,
- Register elements,
- Register key,
- bool key_is_tagged,
- ElementsKind elements_kind,
- Representation representation,
- int base_offset) {
- STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
- STATIC_ASSERT(kSmiTag == 0);
- int element_size_shift = ElementsKindToShiftSize(elements_kind);
-
- // Even though the HLoad/StoreKeyed instructions force the input
- // representation for the key to be an integer, the input gets replaced during
- // bounds check elimination with the index argument to the bounds check, which
- // can be tagged, so that case must be handled here, too.
- if (key_is_tagged) {
- __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift));
- if (representation.IsInteger32()) {
- DCHECK(elements_kind == FAST_SMI_ELEMENTS);
- // Read or write only the smi payload in the case of fast smi arrays.
- return UntagSmiMemOperand(base, base_offset);
- } else {
- return MemOperand(base, base_offset);
- }
- } else {
- // Sign extend key because it could be a 32-bit negative value or contain
- // garbage in the top 32-bits. The address computation happens in 64-bit.
- DCHECK((element_size_shift >= 0) && (element_size_shift <= 4));
- if (representation.IsInteger32()) {
- DCHECK(elements_kind == FAST_SMI_ELEMENTS);
- // Read or write only the smi payload in the case of fast smi arrays.
- __ Add(base, elements, Operand(key, SXTW, element_size_shift));
- return UntagSmiMemOperand(base, base_offset);
- } else {
- __ Add(base, elements, base_offset);
- return MemOperand(base, key, SXTW, element_size_shift);
- }
- }
-}
-
-
-void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
- Register elements = ToRegister(instr->elements());
- DoubleRegister result = ToDoubleRegister(instr->result());
- MemOperand mem_op;
-
- if (instr->key()->IsConstantOperand()) {
- DCHECK(instr->hydrogen()->RequiresHoleCheck() ||
- (instr->temp() == NULL));
-
- int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xf0000000) {
- Abort(kArrayIndexConstantValueTooBig);
- }
- int offset = instr->base_offset() + constant_key * kDoubleSize;
- mem_op = MemOperand(elements, offset);
- } else {
- Register load_base = ToRegister(instr->temp());
- Register key = ToRegister(instr->key());
- bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
- mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
- instr->hydrogen()->elements_kind(),
- instr->hydrogen()->representation(),
- instr->base_offset());
- }
-
- __ Ldr(result, mem_op);
-
- if (instr->hydrogen()->RequiresHoleCheck()) {
- Register scratch = ToRegister(instr->temp());
- __ Fmov(scratch, result);
- __ Eor(scratch, scratch, kHoleNanInt64);
- DeoptimizeIfZero(scratch, instr, DeoptimizeReason::kHole);
- }
-}
-
-
-void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
- Register elements = ToRegister(instr->elements());
- Register result = ToRegister(instr->result());
- MemOperand mem_op;
-
- Representation representation = instr->hydrogen()->representation();
- if (instr->key()->IsConstantOperand()) {
- DCHECK(instr->temp() == NULL);
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- int offset = instr->base_offset() +
- ToInteger32(const_operand) * kPointerSize;
- if (representation.IsInteger32()) {
- DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
- STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
- STATIC_ASSERT(kSmiTag == 0);
- mem_op = UntagSmiMemOperand(elements, offset);
- } else {
- mem_op = MemOperand(elements, offset);
- }
- } else {
- Register load_base = ToRegister(instr->temp());
- Register key = ToRegister(instr->key());
- bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
-
- mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
- instr->hydrogen()->elements_kind(),
- representation, instr->base_offset());
- }
-
- __ Load(result, mem_op, representation);
-
- if (instr->hydrogen()->RequiresHoleCheck()) {
- if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- DeoptimizeIfNotSmi(result, instr, DeoptimizeReason::kNotASmi);
- } else {
- DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
- DeoptimizeReason::kHole);
- }
- } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
- DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
- Label done;
- __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- __ B(ne, &done);
- if (info()->IsStub()) {
- // A stub can safely convert the hole to undefined only if the array
- // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
- // it needs to bail out.
- __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
- __ Ldr(result, FieldMemOperand(result, PropertyCell::kValueOffset));
- __ Cmp(result, Operand(Smi::FromInt(Isolate::kProtectorValid)));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
- }
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ Bind(&done);
- }
-}
-
-
-void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- HObjectAccess access = instr->hydrogen()->access();
- int offset = access.offset();
- Register object = ToRegister(instr->object());
-
- if (access.IsExternalMemory()) {
- Register result = ToRegister(instr->result());
- __ Load(result, MemOperand(object, offset), access.representation());
- return;
- }
-
- if (instr->hydrogen()->representation().IsDouble()) {
- DCHECK(access.IsInobject());
- FPRegister result = ToDoubleRegister(instr->result());
- __ Ldr(result, FieldMemOperand(object, offset));
- return;
- }
-
- Register result = ToRegister(instr->result());
- Register source;
- if (access.IsInobject()) {
- source = object;
- } else {
- // Load the properties array, using result as a scratch register.
- __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- source = result;
- }
-
- if (access.representation().IsSmi() &&
- instr->hydrogen()->representation().IsInteger32()) {
- // Read int value directly from upper half of the smi.
- STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
- STATIC_ASSERT(kSmiTag == 0);
- __ Load(result, UntagSmiFieldMemOperand(source, offset),
- Representation::Integer32());
- } else {
- __ Load(result, FieldMemOperand(source, offset), access.representation());
- }
-}
-
-
-void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
- Register result = ToRegister(instr->result());
- __ LoadRoot(result, instr->index());
-}
-
-
-void LCodeGen::DoMathAbs(LMathAbs* instr) {
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsDouble()) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- __ Fabs(result, input);
- } else if (r.IsSmi() || r.IsInteger32()) {
- Register input = r.IsSmi() ? ToRegister(instr->value())
- : ToRegister32(instr->value());
- Register result = r.IsSmi() ? ToRegister(instr->result())
- : ToRegister32(instr->result());
- __ Abs(result, input);
- DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
- }
-}
-
-
-void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr,
- Label* exit,
- Label* allocation_entry) {
- // Handle the tricky cases of MathAbsTagged:
- // - HeapNumber inputs.
- // - Negative inputs produce a positive result, so a new HeapNumber is
- // allocated to hold it.
- // - Positive inputs are returned as-is, since there is no need to allocate
- // a new HeapNumber for the result.
- // - The (smi) input -0x80000000, produces +0x80000000, which does not fit
- // a smi. In this case, the inline code sets the result and jumps directly
- // to the allocation_entry label.
- DCHECK(instr->context() != NULL);
- DCHECK(ToRegister(instr->context()).is(cp));
- Register input = ToRegister(instr->value());
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
- Register result_bits = ToRegister(instr->temp3());
- Register result = ToRegister(instr->result());
-
- Label runtime_allocation;
-
- // Deoptimize if the input is not a HeapNumber.
- DeoptimizeIfNotHeapNumber(input, instr);
-
- // If the argument is positive, we can return it as-is, without any need to
- // allocate a new HeapNumber for the result. We have to do this in integer
- // registers (rather than with fabs) because we need to be able to distinguish
- // the two zeroes.
- __ Ldr(result_bits, FieldMemOperand(input, HeapNumber::kValueOffset));
- __ Mov(result, input);
- __ Tbz(result_bits, kXSignBit, exit);
-
- // Calculate abs(input) by clearing the sign bit.
- __ Bic(result_bits, result_bits, kXSignMask);
-
- // Allocate a new HeapNumber to hold the result.
- // result_bits The bit representation of the (double) result.
- __ Bind(allocation_entry);
- __ AllocateHeapNumber(result, &runtime_allocation, temp1, temp2);
- // The inline (non-deferred) code will store result_bits into result.
- __ B(exit);
-
- __ Bind(&runtime_allocation);
- if (FLAG_debug_code) {
- // Because result is in the pointer map, we need to make sure it has a valid
- // tagged value before we call the runtime. We speculatively set it to the
- // input (for abs(+x)) or to a smi (for abs(-SMI_MIN)), so it should already
- // be valid.
- Label result_ok;
- Register input = ToRegister(instr->value());
- __ JumpIfSmi(result, &result_ok);
- __ Cmp(input, result);
- __ Assert(eq, kUnexpectedValue);
- __ Bind(&result_ok);
- }
-
- { PushSafepointRegistersScope scope(this);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
- instr->context());
- __ StoreToSafepointRegisterSlot(x0, result);
- }
- // The inline (non-deferred) code will store result_bits into result.
-}
-
-
-void LCodeGen::DoMathAbsTagged(LMathAbsTagged* instr) {
- // Class for deferred case.
- class DeferredMathAbsTagged: public LDeferredCode {
- public:
- DeferredMathAbsTagged(LCodeGen* codegen, LMathAbsTagged* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredMathAbsTagged(instr_, exit(),
- allocation_entry());
- }
- virtual LInstruction* instr() { return instr_; }
- Label* allocation_entry() { return &allocation; }
- private:
- LMathAbsTagged* instr_;
- Label allocation;
- };
-
- // TODO(jbramley): The early-exit mechanism would skip the new frame handling
- // in GenerateDeferredCode. Tidy this up.
- DCHECK(!NeedsDeferredFrame());
-
- DeferredMathAbsTagged* deferred =
- new(zone()) DeferredMathAbsTagged(this, instr);
-
- DCHECK(instr->hydrogen()->value()->representation().IsTagged() ||
- instr->hydrogen()->value()->representation().IsSmi());
- Register input = ToRegister(instr->value());
- Register result_bits = ToRegister(instr->temp3());
- Register result = ToRegister(instr->result());
- Label done;
-
- // Handle smis inline.
- // We can treat smis as 64-bit integers, since the (low-order) tag bits will
- // never get set by the negation. This is therefore the same as the Integer32
- // case in DoMathAbs, except that it operates on 64-bit values.
- STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0));
-
- __ JumpIfNotSmi(input, deferred->entry());
-
- __ Abs(result, input, NULL, &done);
-
- // The result is the magnitude (abs) of the smallest value a smi can
- // represent, encoded as a double.
- __ Mov(result_bits, double_to_rawbits(0x80000000));
- __ B(deferred->allocation_entry());
-
- __ Bind(deferred->exit());
- __ Str(result_bits, FieldMemOperand(result, HeapNumber::kValueOffset));
-
- __ Bind(&done);
-}
-
-void LCodeGen::DoMathCos(LMathCos* instr) {
- DCHECK(instr->IsMarkedAsCall());
- DCHECK(ToDoubleRegister(instr->value()).is(d0));
- __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1);
- DCHECK(ToDoubleRegister(instr->result()).Is(d0));
-}
-
-void LCodeGen::DoMathSin(LMathSin* instr) {
- DCHECK(instr->IsMarkedAsCall());
- DCHECK(ToDoubleRegister(instr->value()).is(d0));
- __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1);
- DCHECK(ToDoubleRegister(instr->result()).Is(d0));
-}
-
-void LCodeGen::DoMathExp(LMathExp* instr) {
- DCHECK(instr->IsMarkedAsCall());
- DCHECK(ToDoubleRegister(instr->value()).is(d0));
- __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1);
- DCHECK(ToDoubleRegister(instr->result()).Is(d0));
-}
-
-
-void LCodeGen::DoMathFloorD(LMathFloorD* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
-
- __ Frintm(result, input);
-}
-
-
-void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- Register result = ToRegister(instr->result());
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIfMinusZero(input, instr, DeoptimizeReason::kMinusZero);
- }
-
- __ Fcvtms(result, input);
-
- // Check that the result fits into a 32-bit integer.
- // - The result did not overflow.
- __ Cmp(result, Operand(result, SXTW));
- // - The input was not NaN.
- __ Fccmp(input, input, NoFlag, eq);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
-}
-
-
-void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
- Register dividend = ToRegister32(instr->dividend());
- Register result = ToRegister32(instr->result());
- int32_t divisor = instr->divisor();
-
- // If the divisor is 1, return the dividend.
- if (divisor == 1) {
- __ Mov(result, dividend, kDiscardForSameWReg);
- return;
- }
-
- // If the divisor is positive, things are easy: There can be no deopts and we
- // can simply do an arithmetic right shift.
- int32_t shift = WhichPowerOf2Abs(divisor);
- if (divisor > 1) {
- __ Mov(result, Operand(dividend, ASR, shift));
- return;
- }
-
- // If the divisor is negative, we have to negate and handle edge cases.
- __ Negs(result, dividend);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
- }
-
- // Dividing by -1 is basically negation, unless we overflow.
- if (divisor == -1) {
- if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
- }
- return;
- }
-
- // If the negation could not overflow, simply shifting is OK.
- if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- __ Mov(result, Operand(dividend, ASR, shift));
- return;
- }
-
- __ Asr(result, result, shift);
- __ Csel(result, result, kMinInt / divisor, vc);
-}
-
-
-void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
- Register dividend = ToRegister32(instr->dividend());
- int32_t divisor = instr->divisor();
- Register result = ToRegister32(instr->result());
- DCHECK(!AreAliased(dividend, result));
-
- if (divisor == 0) {
- Deoptimize(instr, DeoptimizeReason::kDivisionByZero);
- return;
- }
-
- // Check for (0 / -x) that will produce negative zero.
- HMathFloorOfDiv* hdiv = instr->hydrogen();
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIfZero(dividend, instr, DeoptimizeReason::kMinusZero);
- }
-
- // Easy case: We need no dynamic check for the dividend and the flooring
- // division is the same as the truncating division.
- if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
- (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
- __ TruncatingDiv(result, dividend, Abs(divisor));
- if (divisor < 0) __ Neg(result, result);
- return;
- }
-
- // In the general case we may need to adjust before and after the truncating
- // division to get a flooring division.
- Register temp = ToRegister32(instr->temp());
- DCHECK(!AreAliased(temp, dividend, result));
- Label needs_adjustment, done;
- __ Cmp(dividend, 0);
- __ B(divisor > 0 ? lt : gt, &needs_adjustment);
- __ TruncatingDiv(result, dividend, Abs(divisor));
- if (divisor < 0) __ Neg(result, result);
- __ B(&done);
- __ Bind(&needs_adjustment);
- __ Add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
- __ TruncatingDiv(result, temp, Abs(divisor));
- if (divisor < 0) __ Neg(result, result);
- __ Sub(result, result, Operand(1));
- __ Bind(&done);
-}
-
-
-// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
-void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
- Register dividend = ToRegister32(instr->dividend());
- Register divisor = ToRegister32(instr->divisor());
- Register remainder = ToRegister32(instr->temp());
- Register result = ToRegister32(instr->result());
-
- // This can't cause an exception on ARM, so we can speculatively
- // execute it already now.
- __ Sdiv(result, dividend, divisor);
-
- // Check for x / 0.
- DeoptimizeIfZero(divisor, instr, DeoptimizeReason::kDivisionByZero);
-
- // Check for (kMinInt / -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- // The V flag will be set iff dividend == kMinInt.
- __ Cmp(dividend, 1);
- __ Ccmp(divisor, -1, NoFlag, vs);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
- }
-
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ Cmp(divisor, 0);
- __ Ccmp(dividend, 0, ZFlag, mi);
- // "divisor" can't be null because the code would have already been
- // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0).
- // In this case we need to deoptimize to produce a -0.
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
- }
-
- Label done;
- // If both operands have the same sign then we are done.
- __ Eor(remainder, dividend, divisor);
- __ Tbz(remainder, kWSignBit, &done);
-
- // Check if the result needs to be corrected.
- __ Msub(remainder, result, divisor, dividend);
- __ Cbz(remainder, &done);
- __ Sub(result, result, 1);
-
- __ Bind(&done);
-}
-
-
-void LCodeGen::DoMathLog(LMathLog* instr) {
- DCHECK(instr->IsMarkedAsCall());
- DCHECK(ToDoubleRegister(instr->value()).is(d0));
- __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1);
- DCHECK(ToDoubleRegister(instr->result()).Is(d0));
-}
-
-
-void LCodeGen::DoMathClz32(LMathClz32* instr) {
- Register input = ToRegister32(instr->value());
- Register result = ToRegister32(instr->result());
- __ Clz(result, input);
-}
-
-
-void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- Label done;
-
- // Math.pow(x, 0.5) differs from fsqrt(x) in the following cases:
- // Math.pow(-Infinity, 0.5) == +Infinity
- // Math.pow(-0.0, 0.5) == +0.0
-
- // Catch -infinity inputs first.
- // TODO(jbramley): A constant infinity register would be helpful here.
- __ Fmov(double_scratch(), kFP64NegativeInfinity);
- __ Fcmp(double_scratch(), input);
- __ Fabs(result, input);
- __ B(&done, eq);
-
- // Add +0.0 to convert -0.0 to +0.0.
- __ Fadd(double_scratch(), input, fp_zero);
- __ Fsqrt(result, double_scratch());
-
- __ Bind(&done);
-}
-
-
-void LCodeGen::DoPower(LPower* instr) {
- Representation exponent_type = instr->hydrogen()->right()->representation();
- // Having marked this as a call, we can use any registers.
- // Just make sure that the input/output registers are the expected ones.
- Register tagged_exponent = MathPowTaggedDescriptor::exponent();
- Register integer_exponent = MathPowIntegerDescriptor::exponent();
- DCHECK(!instr->right()->IsDoubleRegister() ||
- ToDoubleRegister(instr->right()).is(d1));
- DCHECK(exponent_type.IsInteger32() || !instr->right()->IsRegister() ||
- ToRegister(instr->right()).is(tagged_exponent));
- DCHECK(!exponent_type.IsInteger32() ||
- ToRegister(instr->right()).is(integer_exponent));
- DCHECK(ToDoubleRegister(instr->left()).is(d0));
- DCHECK(ToDoubleRegister(instr->result()).is(d0));
-
- if (exponent_type.IsSmi()) {
- MathPowStub stub(isolate(), MathPowStub::TAGGED);
- __ CallStub(&stub);
- } else if (exponent_type.IsTagged()) {
- Label no_deopt;
- __ JumpIfSmi(tagged_exponent, &no_deopt);
- DeoptimizeIfNotHeapNumber(tagged_exponent, instr);
- __ Bind(&no_deopt);
- MathPowStub stub(isolate(), MathPowStub::TAGGED);
- __ CallStub(&stub);
- } else if (exponent_type.IsInteger32()) {
- // Ensure integer exponent has no garbage in top 32-bits, as MathPowStub
- // supports large integer exponents.
- __ Sxtw(integer_exponent, integer_exponent);
- MathPowStub stub(isolate(), MathPowStub::INTEGER);
- __ CallStub(&stub);
- } else {
- DCHECK(exponent_type.IsDouble());
- MathPowStub stub(isolate(), MathPowStub::DOUBLE);
- __ CallStub(&stub);
- }
-}
-
-
-void LCodeGen::DoMathRoundD(LMathRoundD* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- DoubleRegister scratch_d = double_scratch();
-
- DCHECK(!AreAliased(input, result, scratch_d));
-
- Label done;
-
- __ Frinta(result, input);
- __ Fcmp(input, 0.0);
- __ Fccmp(result, input, ZFlag, lt);
- // The result is correct if the input was in [-0, +infinity], or was a
- // negative integral value.
- __ B(eq, &done);
-
- // Here the input is negative, non integral, with an exponent lower than 52.
- // We do not have to worry about the 0.49999999999999994 (0x3fdfffffffffffff)
- // case. So we can safely add 0.5.
- __ Fmov(scratch_d, 0.5);
- __ Fadd(result, input, scratch_d);
- __ Frintm(result, result);
- // The range [-0.5, -0.0[ yielded +0.0. Force the sign to negative.
- __ Fabs(result, result);
- __ Fneg(result, result);
-
- __ Bind(&done);
-}
-
-
-void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister temp = ToDoubleRegister(instr->temp1());
- DoubleRegister dot_five = double_scratch();
- Register result = ToRegister(instr->result());
- Label done;
-
- // Math.round() rounds to the nearest integer, with ties going towards
- // +infinity. This does not match any IEEE-754 rounding mode.
- // - Infinities and NaNs are propagated unchanged, but cause deopts because
- // they can't be represented as integers.
- // - The sign of the result is the same as the sign of the input. This means
- // that -0.0 rounds to itself, and values -0.5 <= input < 0 also produce a
- // result of -0.0.
-
- // Add 0.5 and round towards -infinity.
- __ Fmov(dot_five, 0.5);
- __ Fadd(temp, input, dot_five);
- __ Fcvtms(result, temp);
-
- // The result is correct if:
- // result is not 0, as the input could be NaN or [-0.5, -0.0].
- // result is not 1, as 0.499...94 will wrongly map to 1.
- // result fits in 32 bits.
- __ Cmp(result, Operand(result.W(), SXTW));
- __ Ccmp(result, 1, ZFlag, eq);
- __ B(hi, &done);
-
- // At this point, we have to handle possible inputs of NaN or numbers in the
- // range [-0.5, 1.5[, or numbers larger than 32 bits.
-
- // Deoptimize if the result > 1, as it must be larger than 32 bits.
- __ Cmp(result, 1);
- DeoptimizeIf(hi, instr, DeoptimizeReason::kOverflow);
-
- // Deoptimize for negative inputs, which at this point are only numbers in
- // the range [-0.5, -0.0]
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ Fmov(result, input);
- DeoptimizeIfNegative(result, instr, DeoptimizeReason::kMinusZero);
- }
-
- // Deoptimize if the input was NaN.
- __ Fcmp(input, dot_five);
- DeoptimizeIf(vs, instr, DeoptimizeReason::kNaN);
-
- // Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[
- // if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1,
- // else 0; we avoid dealing with 0.499...94 directly.
- __ Cset(result, ge);
- __ Bind(&done);
-}
-
-
-void LCodeGen::DoMathFround(LMathFround* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- __ Fcvt(result.S(), input);
- __ Fcvt(result, result.S());
-}
-
-
-void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- __ Fsqrt(result, input);
-}
-
-
-void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
- HMathMinMax::Operation op = instr->hydrogen()->operation();
- if (instr->hydrogen()->representation().IsInteger32()) {
- Register result = ToRegister32(instr->result());
- Register left = ToRegister32(instr->left());
- Operand right = ToOperand32(instr->right());
-
- __ Cmp(left, right);
- __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
- } else if (instr->hydrogen()->representation().IsSmi()) {
- Register result = ToRegister(instr->result());
- Register left = ToRegister(instr->left());
- Operand right = ToOperand(instr->right());
-
- __ Cmp(left, right);
- __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
- } else {
- DCHECK(instr->hydrogen()->representation().IsDouble());
- DoubleRegister result = ToDoubleRegister(instr->result());
- DoubleRegister left = ToDoubleRegister(instr->left());
- DoubleRegister right = ToDoubleRegister(instr->right());
-
- if (op == HMathMinMax::kMathMax) {
- __ Fmax(result, left, right);
- } else {
- DCHECK(op == HMathMinMax::kMathMin);
- __ Fmin(result, left, right);
- }
- }
-}
-
-
-void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
- Register dividend = ToRegister32(instr->dividend());
- int32_t divisor = instr->divisor();
- DCHECK(dividend.is(ToRegister32(instr->result())));
-
- // Theoretically, a variation of the branch-free code for integer division by
- // a power of 2 (calculating the remainder via an additional multiplication
- // (which gets simplified to an 'and') and subtraction) should be faster, and
- // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
- // indicate that positive dividends are heavily favored, so the branching
- // version performs better.
- HMod* hmod = instr->hydrogen();
- int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
- Label dividend_is_not_negative, done;
- if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
- __ Tbz(dividend, kWSignBit, &dividend_is_not_negative);
- // Note that this is correct even for kMinInt operands.
- __ Neg(dividend, dividend);
- __ And(dividend, dividend, mask);
- __ Negs(dividend, dividend);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
- }
- __ B(&done);
- }
-
- __ bind(&dividend_is_not_negative);
- __ And(dividend, dividend, mask);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoModByConstI(LModByConstI* instr) {
- Register dividend = ToRegister32(instr->dividend());
- int32_t divisor = instr->divisor();
- Register result = ToRegister32(instr->result());
- Register temp = ToRegister32(instr->temp());
- DCHECK(!AreAliased(dividend, result, temp));
-
- if (divisor == 0) {
- Deoptimize(instr, DeoptimizeReason::kDivisionByZero);
- return;
- }
-
- __ TruncatingDiv(result, dividend, Abs(divisor));
- __ Sxtw(dividend.X(), dividend);
- __ Mov(temp, Abs(divisor));
- __ Smsubl(result.X(), result, temp, dividend.X());
-
- // Check for negative zero.
- HMod* hmod = instr->hydrogen();
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label remainder_not_zero;
- __ Cbnz(result, &remainder_not_zero);
- DeoptimizeIfNegative(dividend, instr, DeoptimizeReason::kMinusZero);
- __ bind(&remainder_not_zero);
- }
-}
-
-
-void LCodeGen::DoModI(LModI* instr) {
- Register dividend = ToRegister32(instr->left());
- Register divisor = ToRegister32(instr->right());
- Register result = ToRegister32(instr->result());
-
- Label done;
- // modulo = dividend - quotient * divisor
- __ Sdiv(result, dividend, divisor);
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIfZero(divisor, instr, DeoptimizeReason::kDivisionByZero);
- }
- __ Msub(result, result, divisor, dividend);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ Cbnz(result, &done);
- DeoptimizeIfNegative(dividend, instr, DeoptimizeReason::kMinusZero);
- }
- __ Bind(&done);
-}
-
-
-void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
- DCHECK(instr->hydrogen()->representation().IsSmiOrInteger32());
- bool is_smi = instr->hydrogen()->representation().IsSmi();
- Register result =
- is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result());
- Register left =
- is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left());
- int32_t right = ToInteger32(instr->right());
- DCHECK((right > -kMaxInt) && (right < kMaxInt));
-
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- bool bailout_on_minus_zero =
- instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
-
- if (bailout_on_minus_zero) {
- if (right < 0) {
- // The result is -0 if right is negative and left is zero.
- DeoptimizeIfZero(left, instr, DeoptimizeReason::kMinusZero);
- } else if (right == 0) {
- // The result is -0 if the right is zero and the left is negative.
- DeoptimizeIfNegative(left, instr, DeoptimizeReason::kMinusZero);
- }
- }
-
- switch (right) {
- // Cases which can detect overflow.
- case -1:
- if (can_overflow) {
- // Only 0x80000000 can overflow here.
- __ Negs(result, left);
- DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
- } else {
- __ Neg(result, left);
- }
- break;
- case 0:
- // This case can never overflow.
- __ Mov(result, 0);
- break;
- case 1:
- // This case can never overflow.
- __ Mov(result, left, kDiscardForSameWReg);
- break;
- case 2:
- if (can_overflow) {
- __ Adds(result, left, left);
- DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
- } else {
- __ Add(result, left, left);
- }
- break;
-
- default:
- // Multiplication by constant powers of two (and some related values)
- // can be done efficiently with shifted operands.
- int32_t right_abs = Abs(right);
-
- if (base::bits::IsPowerOfTwo32(right_abs)) {
- int right_log2 = WhichPowerOf2(right_abs);
-
- if (can_overflow) {
- Register scratch = result;
- DCHECK(!AreAliased(scratch, left));
- __ Cls(scratch, left);
- __ Cmp(scratch, right_log2);
- DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow);
- }
-
- if (right >= 0) {
- // result = left << log2(right)
- __ Lsl(result, left, right_log2);
- } else {
- // result = -left << log2(-right)
- if (can_overflow) {
- __ Negs(result, Operand(left, LSL, right_log2));
- DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
- } else {
- __ Neg(result, Operand(left, LSL, right_log2));
- }
- }
- return;
- }
-
-
- // For the following cases, we could perform a conservative overflow check
- // with CLS as above. However the few cycles saved are likely not worth
- // the risk of deoptimizing more often than required.
- DCHECK(!can_overflow);
-
- if (right >= 0) {
- if (base::bits::IsPowerOfTwo32(right - 1)) {
- // result = left + left << log2(right - 1)
- __ Add(result, left, Operand(left, LSL, WhichPowerOf2(right - 1)));
- } else if (base::bits::IsPowerOfTwo32(right + 1)) {
- // result = -left + left << log2(right + 1)
- __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(right + 1)));
- __ Neg(result, result);
- } else {
- UNREACHABLE();
- }
- } else {
- if (base::bits::IsPowerOfTwo32(-right + 1)) {
- // result = left - left << log2(-right + 1)
- __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(-right + 1)));
- } else if (base::bits::IsPowerOfTwo32(-right - 1)) {
- // result = -left - left << log2(-right - 1)
- __ Add(result, left, Operand(left, LSL, WhichPowerOf2(-right - 1)));
- __ Neg(result, result);
- } else {
- UNREACHABLE();
- }
- }
- }
-}
-
-
-void LCodeGen::DoMulI(LMulI* instr) {
- Register result = ToRegister32(instr->result());
- Register left = ToRegister32(instr->left());
- Register right = ToRegister32(instr->right());
-
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- bool bailout_on_minus_zero =
- instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
-
- if (bailout_on_minus_zero && !left.Is(right)) {
- // If one operand is zero and the other is negative, the result is -0.
- // - Set Z (eq) if either left or right, or both, are 0.
- __ Cmp(left, 0);
- __ Ccmp(right, 0, ZFlag, ne);
- // - If so (eq), set N (mi) if left + right is negative.
- // - Otherwise, clear N.
- __ Ccmn(left, right, NoFlag, eq);
- DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero);
- }
-
- if (can_overflow) {
- __ Smull(result.X(), left, right);
- __ Cmp(result.X(), Operand(result, SXTW));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
- } else {
- __ Mul(result, left, right);
- }
-}
-
-
-void LCodeGen::DoMulS(LMulS* instr) {
- Register result = ToRegister(instr->result());
- Register left = ToRegister(instr->left());
- Register right = ToRegister(instr->right());
-
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- bool bailout_on_minus_zero =
- instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
-
- if (bailout_on_minus_zero && !left.Is(right)) {
- // If one operand is zero and the other is negative, the result is -0.
- // - Set Z (eq) if either left or right, or both, are 0.
- __ Cmp(left, 0);
- __ Ccmp(right, 0, ZFlag, ne);
- // - If so (eq), set N (mi) if left + right is negative.
- // - Otherwise, clear N.
- __ Ccmn(left, right, NoFlag, eq);
- DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero);
- }
-
- STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
- if (can_overflow) {
- __ Smulh(result, left, right);
- __ Cmp(result, Operand(result.W(), SXTW));
- __ SmiTag(result);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
- } else {
- if (AreAliased(result, left, right)) {
- // All three registers are the same: half untag the input and then
- // multiply, giving a tagged result.
- STATIC_ASSERT((kSmiShift % 2) == 0);
- __ Asr(result, left, kSmiShift / 2);
- __ Mul(result, result, result);
- } else if (result.Is(left) && !left.Is(right)) {
- // Registers result and left alias, right is distinct: untag left into
- // result, and then multiply by right, giving a tagged result.
- __ SmiUntag(result, left);
- __ Mul(result, result, right);
- } else {
- DCHECK(!left.Is(result));
- // Registers result and right alias, left is distinct, or all registers
- // are distinct: untag right into result, and then multiply by left,
- // giving a tagged result.
- __ SmiUntag(result, right);
- __ Mul(result, left, result);
- }
- }
-}
-
-
-void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- Register result = ToRegister(instr->result());
- __ Mov(result, 0);
-
- PushSafepointRegistersScope scope(this);
- // Reset the context register.
- if (!result.is(cp)) {
- __ Mov(cp, 0);
- }
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(x0, result);
-}
-
-
-void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD: public LDeferredCode {
- public:
- DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LNumberTagD* instr_;
- };
-
- DoubleRegister input = ToDoubleRegister(instr->value());
- Register result = ToRegister(instr->result());
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
-
- DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
- if (FLAG_inline_new) {
- __ AllocateHeapNumber(result, deferred->entry(), temp1, temp2);
- } else {
- __ B(deferred->entry());
- }
-
- __ Bind(deferred->exit());
- __ Str(input, FieldMemOperand(result, HeapNumber::kValueOffset));
-}
-
-
-void LCodeGen::DoDeferredNumberTagU(LInstruction* instr,
- LOperand* value,
- LOperand* temp1,
- LOperand* temp2) {
- Label slow, convert_and_store;
- Register src = ToRegister32(value);
- Register dst = ToRegister(instr->result());
- Register scratch1 = ToRegister(temp1);
-
- if (FLAG_inline_new) {
- Register scratch2 = ToRegister(temp2);
- __ AllocateHeapNumber(dst, &slow, scratch1, scratch2);
- __ B(&convert_and_store);
- }
-
- // Slow case: call the runtime system to do the number allocation.
- __ Bind(&slow);
- // TODO(3095996): Put a valid pointer value in the stack slot where the result
- // register is stored, as this register is in the pointer map, but contains an
- // integer value.
- __ Mov(dst, 0);
- {
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this);
- // Reset the context register.
- if (!dst.is(cp)) {
- __ Mov(cp, 0);
- }
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(x0, dst);
- }
-
- // Convert number to floating point and store in the newly allocated heap
- // number.
- __ Bind(&convert_and_store);
- DoubleRegister dbl_scratch = double_scratch();
- __ Ucvtf(dbl_scratch, src);
- __ Str(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
-}
-
-
-void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU: public LDeferredCode {
- public:
- DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredNumberTagU(instr_,
- instr_->value(),
- instr_->temp1(),
- instr_->temp2());
- }
- virtual LInstruction* instr() { return instr_; }
- private:
- LNumberTagU* instr_;
- };
-
- Register value = ToRegister32(instr->value());
- Register result = ToRegister(instr->result());
-
- DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
- __ Cmp(value, Smi::kMaxValue);
- __ B(hi, deferred->entry());
- __ SmiTag(result, value.X());
- __ Bind(deferred->exit());
-}
-
-
-void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
- Register input = ToRegister(instr->value());
- Register scratch = ToRegister(instr->temp());
- DoubleRegister result = ToDoubleRegister(instr->result());
- bool can_convert_undefined_to_nan = instr->truncating();
-
- Label done, load_smi;
-
- // Work out what untag mode we're working with.
- HValue* value = instr->hydrogen()->value();
- NumberUntagDMode mode = value->representation().IsSmi()
- ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
-
- if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
- __ JumpIfSmi(input, &load_smi);
-
- Label convert_undefined;
-
- // Heap number map check.
- if (can_convert_undefined_to_nan) {
- __ JumpIfNotHeapNumber(input, &convert_undefined);
- } else {
- DeoptimizeIfNotHeapNumber(input, instr);
- }
-
- // Load heap number.
- __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
- if (instr->hydrogen()->deoptimize_on_minus_zero()) {
- DeoptimizeIfMinusZero(result, instr, DeoptimizeReason::kMinusZero);
- }
- __ B(&done);
-
- if (can_convert_undefined_to_nan) {
- __ Bind(&convert_undefined);
- DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
- DeoptimizeReason::kNotAHeapNumberUndefined);
-
- __ LoadRoot(scratch, Heap::kNanValueRootIndex);
- __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
- __ B(&done);
- }
-
- } else {
- DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
- // Fall through to load_smi.
- }
-
- // Smi to double register conversion.
- __ Bind(&load_smi);
- __ SmiUntagToDouble(result, input);
-
- __ Bind(&done);
-}
-
-
-void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
- // This is a pseudo-instruction that ensures that the environment here is
- // properly registered for deoptimization and records the assembler's PC
- // offset.
- LEnvironment* environment = instr->environment();
-
- // If the environment were already registered, we would have no way of
- // backpatching it with the spill slot operands.
- DCHECK(!environment->HasBeenRegistered());
- RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
-
- GenerateOsrPrologue();
-}
-
-
-void LCodeGen::DoParameter(LParameter* instr) {
- // Nothing to do.
-}
-
-
-void LCodeGen::DoPreparePushArguments(LPreparePushArguments* instr) {
- __ PushPreamble(instr->argc(), kPointerSize);
-}
-
-
-void LCodeGen::DoPushArguments(LPushArguments* instr) {
- MacroAssembler::PushPopQueue args(masm());
-
- for (int i = 0; i < instr->ArgumentCount(); ++i) {
- LOperand* arg = instr->argument(i);
- if (arg->IsDoubleRegister() || arg->IsDoubleStackSlot()) {
- Abort(kDoPushArgumentNotImplementedForDoubleType);
- return;
- }
- args.Queue(ToRegister(arg));
- }
-
- // The preamble was done by LPreparePushArguments.
- args.PushQueued(MacroAssembler::PushPopQueue::SKIP_PREAMBLE);
-
- RecordPushedArgumentsDelta(instr->ArgumentCount());
-}
-
-
-void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace && info()->IsOptimizing()) {
- // Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns its parameter in x0. We're leaving the code
- // managed by the register allocator and tearing down the frame, it's
- // safe to write to the context register.
- __ Push(x0);
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kTraceExit);
- }
-
- if (info()->saves_caller_doubles()) {
- RestoreCallerDoubles();
- }
-
- if (NeedsEagerFrame()) {
- Register stack_pointer = masm()->StackPointer();
- __ Mov(stack_pointer, fp);
- __ Pop(fp, lr);
- }
-
- if (instr->has_constant_parameter_count()) {
- int parameter_count = ToInteger32(instr->constant_parameter_count());
- __ Drop(parameter_count + 1);
- } else {
- DCHECK(info()->IsStub()); // Functions would need to drop one more value.
- Register parameter_count = ToRegister(instr->parameter_count());
- __ DropBySMI(parameter_count);
- }
- __ Ret();
-}
-
-
-MemOperand LCodeGen::BuildSeqStringOperand(Register string,
- Register temp,
- LOperand* index,
- String::Encoding encoding) {
- if (index->IsConstantOperand()) {
- int offset = ToInteger32(LConstantOperand::cast(index));
- if (encoding == String::TWO_BYTE_ENCODING) {
- offset *= kUC16Size;
- }
- STATIC_ASSERT(kCharSize == 1);
- return FieldMemOperand(string, SeqString::kHeaderSize + offset);
- }
-
- __ Add(temp, string, SeqString::kHeaderSize - kHeapObjectTag);
- if (encoding == String::ONE_BYTE_ENCODING) {
- return MemOperand(temp, ToRegister32(index), SXTW);
- } else {
- STATIC_ASSERT(kUC16Size == 2);
- return MemOperand(temp, ToRegister32(index), SXTW, 1);
- }
-}
-
-
-void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
- String::Encoding encoding = instr->hydrogen()->encoding();
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
- Register temp = ToRegister(instr->temp());
-
- if (FLAG_debug_code) {
- // Even though this lithium instruction comes with a temp register, we
- // can't use it here because we want to use "AtStart" constraints on the
- // inputs and the debug code here needs a scratch register.
- UseScratchRegisterScope temps(masm());
- Register dbg_temp = temps.AcquireX();
-
- __ Ldr(dbg_temp, FieldMemOperand(string, HeapObject::kMapOffset));
- __ Ldrb(dbg_temp, FieldMemOperand(dbg_temp, Map::kInstanceTypeOffset));
-
- __ And(dbg_temp, dbg_temp,
- Operand(kStringRepresentationMask | kStringEncodingMask));
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ Cmp(dbg_temp, Operand(encoding == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type));
- __ Check(eq, kUnexpectedStringType);
- }
-
- MemOperand operand =
- BuildSeqStringOperand(string, temp, instr->index(), encoding);
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ Ldrb(result, operand);
- } else {
- __ Ldrh(result, operand);
- }
-}
-
-
-void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
- String::Encoding encoding = instr->hydrogen()->encoding();
- Register string = ToRegister(instr->string());
- Register value = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- if (FLAG_debug_code) {
- DCHECK(ToRegister(instr->context()).is(cp));
- Register index = ToRegister(instr->index());
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- int encoding_mask =
- instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type;
- __ EmitSeqStringSetCharCheck(string, index, kIndexIsInteger32, temp,
- encoding_mask);
- }
- MemOperand operand =
- BuildSeqStringOperand(string, temp, instr->index(), encoding);
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ Strb(value, operand);
- } else {
- __ Strh(value, operand);
- }
-}
-
-
-void LCodeGen::DoSmiTag(LSmiTag* instr) {
- HChange* hchange = instr->hydrogen();
- Register input = ToRegister(instr->value());
- Register output = ToRegister(instr->result());
- if (hchange->CheckFlag(HValue::kCanOverflow) &&
- hchange->value()->CheckFlag(HValue::kUint32)) {
- DeoptimizeIfNegative(input.W(), instr, DeoptimizeReason::kOverflow);
- }
- __ SmiTag(output, input);
-}
-
-
-void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- Label done, untag;
-
- if (instr->needs_check()) {
- DeoptimizeIfNotSmi(input, instr, DeoptimizeReason::kNotASmi);
- }
-
- __ Bind(&untag);
- __ SmiUntag(result, input);
- __ Bind(&done);
-}
-
-
-void LCodeGen::DoShiftI(LShiftI* instr) {
- LOperand* right_op = instr->right();
- Register left = ToRegister32(instr->left());
- Register result = ToRegister32(instr->result());
-
- if (right_op->IsRegister()) {
- Register right = ToRegister32(instr->right());
- switch (instr->op()) {
- case Token::ROR: __ Ror(result, left, right); break;
- case Token::SAR: __ Asr(result, left, right); break;
- case Token::SHL: __ Lsl(result, left, right); break;
- case Token::SHR:
- __ Lsr(result, left, right);
- if (instr->can_deopt()) {
- // If `left >>> right` >= 0x80000000, the result is not representable
- // in a signed 32-bit smi.
- DeoptimizeIfNegative(result, instr, DeoptimizeReason::kNegativeValue);
- }
- break;
- default: UNREACHABLE();
- }
- } else {
- DCHECK(right_op->IsConstantOperand());
- int shift_count = JSShiftAmountFromLConstant(right_op);
- if (shift_count == 0) {
- if ((instr->op() == Token::SHR) && instr->can_deopt()) {
- DeoptimizeIfNegative(left, instr, DeoptimizeReason::kNegativeValue);
- }
- __ Mov(result, left, kDiscardForSameWReg);
- } else {
- switch (instr->op()) {
- case Token::ROR: __ Ror(result, left, shift_count); break;
- case Token::SAR: __ Asr(result, left, shift_count); break;
- case Token::SHL: __ Lsl(result, left, shift_count); break;
- case Token::SHR: __ Lsr(result, left, shift_count); break;
- default: UNREACHABLE();
- }
- }
- }
-}
-
-
-void LCodeGen::DoShiftS(LShiftS* instr) {
- LOperand* right_op = instr->right();
- Register left = ToRegister(instr->left());
- Register result = ToRegister(instr->result());
-
- if (right_op->IsRegister()) {
- Register right = ToRegister(instr->right());
-
- // JavaScript shifts only look at the bottom 5 bits of the 'right' operand.
- // Since we're handling smis in X registers, we have to extract these bits
- // explicitly.
- __ Ubfx(result, right, kSmiShift, 5);
-
- switch (instr->op()) {
- case Token::ROR: {
- // This is the only case that needs a scratch register. To keep things
- // simple for the other cases, borrow a MacroAssembler scratch register.
- UseScratchRegisterScope temps(masm());
- Register temp = temps.AcquireW();
- __ SmiUntag(temp, left);
- __ Ror(result.W(), temp.W(), result.W());
- __ SmiTag(result);
- break;
- }
- case Token::SAR:
- __ Asr(result, left, result);
- __ Bic(result, result, kSmiShiftMask);
- break;
- case Token::SHL:
- __ Lsl(result, left, result);
- break;
- case Token::SHR:
- __ Lsr(result, left, result);
- __ Bic(result, result, kSmiShiftMask);
- if (instr->can_deopt()) {
- // If `left >>> right` >= 0x80000000, the result is not representable
- // in a signed 32-bit smi.
- DeoptimizeIfNegative(result, instr, DeoptimizeReason::kNegativeValue);
- }
- break;
- default: UNREACHABLE();
- }
- } else {
- DCHECK(right_op->IsConstantOperand());
- int shift_count = JSShiftAmountFromLConstant(right_op);
- if (shift_count == 0) {
- if ((instr->op() == Token::SHR) && instr->can_deopt()) {
- DeoptimizeIfNegative(left, instr, DeoptimizeReason::kNegativeValue);
- }
- __ Mov(result, left);
- } else {
- switch (instr->op()) {
- case Token::ROR:
- __ SmiUntag(result, left);
- __ Ror(result.W(), result.W(), shift_count);
- __ SmiTag(result);
- break;
- case Token::SAR:
- __ Asr(result, left, shift_count);
- __ Bic(result, result, kSmiShiftMask);
- break;
- case Token::SHL:
- __ Lsl(result, left, shift_count);
- break;
- case Token::SHR:
- __ Lsr(result, left, shift_count);
- __ Bic(result, result, kSmiShiftMask);
- break;
- default: UNREACHABLE();
- }
- }
- }
-}
-
-
-void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
- __ Debug("LDebugBreak", 0, BREAK);
-}
-
-
-void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- Register scratch1 = x5;
- Register scratch2 = x6;
- DCHECK(instr->IsMarkedAsCall());
-
- // TODO(all): if Mov could handle object in new space then it could be used
- // here.
- __ LoadHeapObject(scratch1, instr->hydrogen()->declarations());
- __ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags()));
- __ Push(scratch1, scratch2);
- __ LoadHeapObject(scratch1, instr->hydrogen()->feedback_vector());
- __ Push(scratch1);
- CallRuntime(Runtime::kDeclareGlobals, instr);
-}
-
-
-void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
- PushSafepointRegistersScope scope(this);
- LoadContextFromDeferred(instr->context());
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
- RecordSafepointWithLazyDeopt(
- instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- DCHECK(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck: public LDeferredCode {
- public:
- DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LStackCheck* instr_;
- };
-
- DCHECK(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- // There is no LLazyBailout instruction for stack-checks. We have to
- // prepare for lazy deoptimization explicitly here.
- if (instr->hydrogen()->is_function_entry()) {
- // Perform stack overflow check.
- Label done;
- __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
- __ B(hs, &done);
-
- PredictableCodeSizeScope predictable(masm_,
- Assembler::kCallSizeWithRelocation);
- DCHECK(instr->context()->IsRegister());
- DCHECK(ToRegister(instr->context()).is(cp));
- CallCode(isolate()->builtins()->StackCheck(),
- RelocInfo::CODE_TARGET,
- instr);
- __ Bind(&done);
- } else {
- DCHECK(instr->hydrogen()->is_backwards_branch());
- // Perform stack overflow check if this goto needs it before jumping.
- DeferredStackCheck* deferred_stack_check =
- new(zone()) DeferredStackCheck(this, instr);
- __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
- __ B(lo, deferred_stack_check->entry());
-
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- __ Bind(instr->done_label());
- deferred_stack_check->SetExit(instr->done_label());
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- // Don't record a deoptimization index for the safepoint here.
- // This will be done explicitly when emitting call and the safepoint in
- // the deferred code.
- }
-}
-
-
-void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
- Register function = ToRegister(instr->function());
- Register code_object = ToRegister(instr->code_object());
- Register temp = ToRegister(instr->temp());
- __ Add(temp, code_object, Code::kHeaderSize - kHeapObjectTag);
- __ Str(temp, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
-}
-
-
-void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register value = ToRegister(instr->value());
- Register scratch = ToRegister(instr->temp());
- MemOperand target = ContextMemOperand(context, instr->slot_index());
-
- Label skip_assignment;
-
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ Ldr(scratch, target);
- if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr,
- DeoptimizeReason::kHole);
- } else {
- __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
- }
- }
-
- __ Str(value, target);
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- SmiCheck check_needed =
- instr->hydrogen()->value()->type().IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- __ RecordWriteContextSlot(context, static_cast<int>(target.offset()), value,
- scratch, GetLinkRegisterState(), kSaveFPRegs,
- EMIT_REMEMBERED_SET, check_needed);
- }
- __ Bind(&skip_assignment);
-}
-
-
-void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
- Register ext_ptr = ToRegister(instr->elements());
- Register key = no_reg;
- Register scratch;
- ElementsKind elements_kind = instr->elements_kind();
-
- bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- if (key_is_constant) {
- DCHECK(instr->temp() == NULL);
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xf0000000) {
- Abort(kArrayIndexConstantValueTooBig);
- }
- } else {
- key = ToRegister(instr->key());
- scratch = ToRegister(instr->temp());
- }
-
- MemOperand dst =
- PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
- key_is_constant, constant_key,
- elements_kind,
- instr->base_offset());
-
- if (elements_kind == FLOAT32_ELEMENTS) {
- DoubleRegister value = ToDoubleRegister(instr->value());
- DoubleRegister dbl_scratch = double_scratch();
- __ Fcvt(dbl_scratch.S(), value);
- __ Str(dbl_scratch.S(), dst);
- } else if (elements_kind == FLOAT64_ELEMENTS) {
- DoubleRegister value = ToDoubleRegister(instr->value());
- __ Str(value, dst);
- } else {
- Register value = ToRegister(instr->value());
-
- switch (elements_kind) {
- case UINT8_ELEMENTS:
- case UINT8_CLAMPED_ELEMENTS:
- case INT8_ELEMENTS:
- __ Strb(value, dst);
- break;
- case INT16_ELEMENTS:
- case UINT16_ELEMENTS:
- __ Strh(value, dst);
- break;
- case INT32_ELEMENTS:
- case UINT32_ELEMENTS:
- __ Str(value.W(), dst);
- break;
- case FLOAT32_ELEMENTS:
- case FLOAT64_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
- case FAST_STRING_WRAPPER_ELEMENTS:
- case SLOW_STRING_WRAPPER_ELEMENTS:
- case NO_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble* instr) {
- Register elements = ToRegister(instr->elements());
- DoubleRegister value = ToDoubleRegister(instr->value());
- MemOperand mem_op;
-
- if (instr->key()->IsConstantOperand()) {
- int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xf0000000) {
- Abort(kArrayIndexConstantValueTooBig);
- }
- int offset = instr->base_offset() + constant_key * kDoubleSize;
- mem_op = MemOperand(elements, offset);
- } else {
- Register store_base = ToRegister(instr->temp());
- Register key = ToRegister(instr->key());
- bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
- mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged,
- instr->hydrogen()->elements_kind(),
- instr->hydrogen()->representation(),
- instr->base_offset());
- }
-
- if (instr->NeedsCanonicalization()) {
- __ CanonicalizeNaN(double_scratch(), value);
- __ Str(double_scratch(), mem_op);
- } else {
- __ Str(value, mem_op);
- }
-}
-
-
-void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) {
- Register value = ToRegister(instr->value());
- Register elements = ToRegister(instr->elements());
- Register scratch = no_reg;
- Register store_base = no_reg;
- Register key = no_reg;
- MemOperand mem_op;
-
- if (!instr->key()->IsConstantOperand() ||
- instr->hydrogen()->NeedsWriteBarrier()) {
- scratch = ToRegister(instr->temp());
- }
-
- Representation representation = instr->hydrogen()->value()->representation();
- if (instr->key()->IsConstantOperand()) {
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- int offset = instr->base_offset() +
- ToInteger32(const_operand) * kPointerSize;
- store_base = elements;
- if (representation.IsInteger32()) {
- DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
- DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
- STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
- STATIC_ASSERT(kSmiTag == 0);
- mem_op = UntagSmiMemOperand(store_base, offset);
- } else {
- mem_op = MemOperand(store_base, offset);
- }
- } else {
- store_base = scratch;
- key = ToRegister(instr->key());
- bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
-
- mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged,
- instr->hydrogen()->elements_kind(),
- representation, instr->base_offset());
- }
-
- __ Store(value, mem_op, representation);
-
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- DCHECK(representation.IsTagged());
- // This assignment may cause element_addr to alias store_base.
- Register element_addr = scratch;
- SmiCheck check_needed =
- instr->hydrogen()->value()->type().IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- // Compute address of modified element and store it into key register.
- __ Add(element_addr, mem_op.base(), mem_op.OffsetAsOperand());
- __ RecordWrite(elements, element_addr, value, GetLinkRegisterState(),
- kSaveFPRegs, EMIT_REMEMBERED_SET, check_needed,
- instr->hydrogen()->PointersToHereCheckForValue());
- }
-}
-
-
-void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
- class DeferredMaybeGrowElements final : public LDeferredCode {
- public:
- DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
- : LDeferredCode(codegen), instr_(instr) {}
- void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LMaybeGrowElements* instr_;
- };
-
- Register result = x0;
- DeferredMaybeGrowElements* deferred =
- new (zone()) DeferredMaybeGrowElements(this, instr);
- LOperand* key = instr->key();
- LOperand* current_capacity = instr->current_capacity();
-
- DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
- DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
- DCHECK(key->IsConstantOperand() || key->IsRegister());
- DCHECK(current_capacity->IsConstantOperand() ||
- current_capacity->IsRegister());
-
- if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
- int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
- int32_t constant_capacity =
- ToInteger32(LConstantOperand::cast(current_capacity));
- if (constant_key >= constant_capacity) {
- // Deferred case.
- __ B(deferred->entry());
- }
- } else if (key->IsConstantOperand()) {
- int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
- __ Cmp(ToRegister(current_capacity), Operand(constant_key));
- __ B(le, deferred->entry());
- } else if (current_capacity->IsConstantOperand()) {
- int32_t constant_capacity =
- ToInteger32(LConstantOperand::cast(current_capacity));
- __ Cmp(ToRegister(key), Operand(constant_capacity));
- __ B(ge, deferred->entry());
- } else {
- __ Cmp(ToRegister(key), ToRegister(current_capacity));
- __ B(ge, deferred->entry());
- }
-
- __ Mov(result, ToRegister(instr->elements()));
-
- __ Bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- Register result = x0;
- __ Mov(result, 0);
-
- // We have to call a stub.
- {
- PushSafepointRegistersScope scope(this);
- __ Move(result, ToRegister(instr->object()));
-
- LOperand* key = instr->key();
- if (key->IsConstantOperand()) {
- __ Mov(x3, Operand(ToSmi(LConstantOperand::cast(key))));
- } else {
- __ Mov(x3, ToRegister(key));
- __ SmiTag(x3);
- }
-
- GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
- __ CallStub(&stub);
- RecordSafepointWithLazyDeopt(
- instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- __ StoreToSafepointRegisterSlot(result, result);
- }
-
- // Deopt on smi, which means the elements array changed to dictionary mode.
- DeoptimizeIfSmi(result, instr, DeoptimizeReason::kSmi);
-}
-
-
-void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
- Representation representation = instr->representation();
-
- Register object = ToRegister(instr->object());
- HObjectAccess access = instr->hydrogen()->access();
- int offset = access.offset();
-
- if (access.IsExternalMemory()) {
- DCHECK(!instr->hydrogen()->has_transition());
- DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
- Register value = ToRegister(instr->value());
- __ Store(value, MemOperand(object, offset), representation);
- return;
- }
-
- __ AssertNotSmi(object);
-
- if (!FLAG_unbox_double_fields && representation.IsDouble()) {
- DCHECK(access.IsInobject());
- DCHECK(!instr->hydrogen()->has_transition());
- DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
- FPRegister value = ToDoubleRegister(instr->value());
- __ Str(value, FieldMemOperand(object, offset));
- return;
- }
-
- DCHECK(!representation.IsSmi() ||
- !instr->value()->IsConstantOperand() ||
- IsInteger32Constant(LConstantOperand::cast(instr->value())));
-
- if (instr->hydrogen()->has_transition()) {
- Handle<Map> transition = instr->hydrogen()->transition_map();
- AddDeprecationDependency(transition);
- // Store the new map value.
- Register new_map_value = ToRegister(instr->temp0());
- __ Mov(new_map_value, Operand(transition));
- __ Str(new_map_value, FieldMemOperand(object, HeapObject::kMapOffset));
- if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
- // Update the write barrier for the map field.
- __ RecordWriteForMap(object,
- new_map_value,
- ToRegister(instr->temp1()),
- GetLinkRegisterState(),
- kSaveFPRegs);
- }
- }
-
- // Do the store.
- Register destination;
- if (access.IsInobject()) {
- destination = object;
- } else {
- Register temp0 = ToRegister(instr->temp0());
- __ Ldr(temp0, FieldMemOperand(object, JSObject::kPropertiesOffset));
- destination = temp0;
- }
-
- if (FLAG_unbox_double_fields && representation.IsDouble()) {
- DCHECK(access.IsInobject());
- FPRegister value = ToDoubleRegister(instr->value());
- __ Str(value, FieldMemOperand(object, offset));
- } else if (representation.IsSmi() &&
- instr->hydrogen()->value()->representation().IsInteger32()) {
- DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
-#ifdef DEBUG
- Register temp0 = ToRegister(instr->temp0());
- __ Ldr(temp0, FieldMemOperand(destination, offset));
- __ AssertSmi(temp0);
- // If destination aliased temp0, restore it to the address calculated
- // earlier.
- if (destination.Is(temp0)) {
- DCHECK(!access.IsInobject());
- __ Ldr(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
- }
-#endif
- STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
- STATIC_ASSERT(kSmiTag == 0);
- Register value = ToRegister(instr->value());
- __ Store(value, UntagSmiFieldMemOperand(destination, offset),
- Representation::Integer32());
- } else {
- Register value = ToRegister(instr->value());
- __ Store(value, FieldMemOperand(destination, offset), representation);
- }
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- Register value = ToRegister(instr->value());
- __ RecordWriteField(destination,
- offset,
- value, // Clobbered.
- ToRegister(instr->temp1()), // Clobbered.
- GetLinkRegisterState(),
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- instr->hydrogen()->SmiCheckForWriteBarrier(),
- instr->hydrogen()->PointersToHereCheckForValue());
- }
-}
-
-
-void LCodeGen::DoStringAdd(LStringAdd* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->left()).Is(x1));
- DCHECK(ToRegister(instr->right()).Is(x0));
- StringAddStub stub(isolate(),
- instr->hydrogen()->flags(),
- instr->hydrogen()->pretenure_flag());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt: public LDeferredCode {
- public:
- DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LStringCharCodeAt* instr_;
- };
-
- DeferredStringCharCodeAt* deferred =
- new(zone()) DeferredStringCharCodeAt(this, instr);
-
- StringCharLoadGenerator::Generate(masm(),
- ToRegister(instr->string()),
- ToRegister32(instr->index()),
- ToRegister(instr->result()),
- deferred->entry());
- __ Bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Mov(result, 0);
-
- PushSafepointRegistersScope scope(this);
- __ Push(string);
- // Push the index as a smi. This is safe because of the checks in
- // DoStringCharCodeAt above.
- Register index = ToRegister(instr->index());
- __ SmiTagAndPush(index);
-
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
- instr->context());
- __ AssertSmi(x0);
- __ SmiUntag(x0);
- __ StoreToSafepointRegisterSlot(x0, result);
-}
-
-
-void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode: public LDeferredCode {
- public:
- DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LStringCharFromCode* instr_;
- };
-
- DeferredStringCharFromCode* deferred =
- new(zone()) DeferredStringCharFromCode(this, instr);
-
- DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
- Register char_code = ToRegister32(instr->char_code());
- Register result = ToRegister(instr->result());
-
- __ Cmp(char_code, String::kMaxOneByteCharCode);
- __ B(hi, deferred->entry());
- __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
- __ Add(result, result, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Ldr(result, MemOperand(result, char_code, SXTW, kPointerSizeLog2));
- __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
- __ B(eq, deferred->entry());
- __ Bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Mov(result, 0);
-
- PushSafepointRegistersScope scope(this);
- __ SmiTagAndPush(char_code);
- CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
- instr->context());
- __ StoreToSafepointRegisterSlot(x0, result);
-}
-
-
-void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->left()).is(x1));
- DCHECK(ToRegister(instr->right()).is(x0));
-
- Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
- CallCode(code, RelocInfo::CODE_TARGET, instr);
- __ CompareRoot(x0, Heap::kTrueValueRootIndex);
- EmitBranch(instr, eq);
-}
-
-
-void LCodeGen::DoSubI(LSubI* instr) {
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- Register result = ToRegister32(instr->result());
- Register left = ToRegister32(instr->left());
- Operand right = ToShiftedRightOperand32(instr->right(), instr);
-
- if (can_overflow) {
- __ Subs(result, left, right);
- DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
- } else {
- __ Sub(result, left, right);
- }
-}
-
-
-void LCodeGen::DoSubS(LSubS* instr) {
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- Register result = ToRegister(instr->result());
- Register left = ToRegister(instr->left());
- Operand right = ToOperand(instr->right());
- if (can_overflow) {
- __ Subs(result, left, right);
- DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
- } else {
- __ Sub(result, left, right);
- }
-}
-
-
-void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
- LOperand* value,
- LOperand* temp1,
- LOperand* temp2) {
- Register input = ToRegister(value);
- Register scratch1 = ToRegister(temp1);
- DoubleRegister dbl_scratch1 = double_scratch();
-
- Label done;
-
- if (instr->truncating()) {
- UseScratchRegisterScope temps(masm());
- Register output = ToRegister(instr->result());
- Register input_map = temps.AcquireX();
- Register input_instance_type = input_map;
- Label truncate;
- __ CompareObjectType(input, input_map, input_instance_type,
- HEAP_NUMBER_TYPE);
- __ B(eq, &truncate);
- __ Cmp(input_instance_type, ODDBALL_TYPE);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotANumberOrOddball);
- __ Bind(&truncate);
- __ TruncateHeapNumberToI(output, input);
- } else {
- Register output = ToRegister32(instr->result());
- DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
-
- DeoptimizeIfNotHeapNumber(input, instr);
-
- // A heap number: load value and convert to int32 using non-truncating
- // function. If the result is out of range, branch to deoptimize.
- __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
- __ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ Cmp(output, 0);
- __ B(ne, &done);
- __ Fmov(scratch1, dbl_scratch1);
- DeoptimizeIfNegative(scratch1, instr, DeoptimizeReason::kMinusZero);
- }
- }
- __ Bind(&done);
-}
-
-
-void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI: public LDeferredCode {
- public:
- DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredTaggedToI(instr_, instr_->value(), instr_->temp1(),
- instr_->temp2());
- }
-
- virtual LInstruction* instr() { return instr_; }
- private:
- LTaggedToI* instr_;
- };
-
- Register input = ToRegister(instr->value());
- Register output = ToRegister(instr->result());
-
- if (instr->hydrogen()->value()->representation().IsSmi()) {
- __ SmiUntag(output, input);
- } else {
- DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
-
- __ JumpIfNotSmi(input, deferred->entry());
- __ SmiUntag(output, input);
- __ Bind(deferred->exit());
- }
-}
-
-
-void LCodeGen::DoThisFunction(LThisFunction* instr) {
- Register result = ToRegister(instr->result());
- __ Ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-}
-
-
-void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
- Register object = ToRegister(instr->object());
-
- Handle<Map> from_map = instr->original_map();
- Handle<Map> to_map = instr->transitioned_map();
- ElementsKind from_kind = instr->from_kind();
- ElementsKind to_kind = instr->to_kind();
-
- Label not_applicable;
-
- if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
- Register temp1 = ToRegister(instr->temp1());
- Register new_map = ToRegister(instr->temp2());
- __ CheckMap(object, temp1, from_map, &not_applicable, DONT_DO_SMI_CHECK);
- __ Mov(new_map, Operand(to_map));
- __ Str(new_map, FieldMemOperand(object, HeapObject::kMapOffset));
- // Write barrier.
- __ RecordWriteForMap(object, new_map, temp1, GetLinkRegisterState(),
- kDontSaveFPRegs);
- } else {
- {
- UseScratchRegisterScope temps(masm());
- // Use the temp register only in a restricted scope - the codegen checks
- // that we do not use any register across a call.
- __ CheckMap(object, temps.AcquireX(), from_map, &not_applicable,
- DONT_DO_SMI_CHECK);
- }
- DCHECK(object.is(x0));
- DCHECK(ToRegister(instr->context()).is(cp));
- PushSafepointRegistersScope scope(this);
- __ Mov(x1, Operand(to_map));
- TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
- __ CallStub(&stub);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kLazyDeopt);
- }
- __ Bind(&not_applicable);
-}
-
-
-void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
- Register object = ToRegister(instr->object());
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
-
- Label no_memento_found;
- __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMementoFound);
- __ Bind(&no_memento_found);
-}
-
-
-void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- Register result = ToRegister(instr->result());
- __ TruncateDoubleToI(result, input);
- if (instr->tag_result()) {
- __ SmiTag(result, result);
- }
-}
-
-
-void LCodeGen::DoTypeof(LTypeof* instr) {
- DCHECK(ToRegister(instr->value()).is(x3));
- DCHECK(ToRegister(instr->result()).is(x0));
- Label end, do_call;
- Register value_register = ToRegister(instr->value());
- __ JumpIfNotSmi(value_register, &do_call);
- __ Mov(x0, Immediate(isolate()->factory()->number_string()));
- __ B(&end);
- __ Bind(&do_call);
- Callable callable = CodeFactory::Typeof(isolate());
- CallCode(callable.code(), RelocInfo::CODE_TARGET, instr);
- __ Bind(&end);
-}
-
-
-void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
- Handle<String> type_name = instr->type_literal();
- Label* true_label = instr->TrueLabel(chunk_);
- Label* false_label = instr->FalseLabel(chunk_);
- Register value = ToRegister(instr->value());
-
- Factory* factory = isolate()->factory();
- if (String::Equals(type_name, factory->number_string())) {
- __ JumpIfSmi(value, true_label);
-
- int true_block = instr->TrueDestination(chunk_);
- int false_block = instr->FalseDestination(chunk_);
- int next_block = GetNextEmittedBlock();
-
- if (true_block == false_block) {
- EmitGoto(true_block);
- } else if (true_block == next_block) {
- __ JumpIfNotHeapNumber(value, chunk_->GetAssemblyLabel(false_block));
- } else {
- __ JumpIfHeapNumber(value, chunk_->GetAssemblyLabel(true_block));
- if (false_block != next_block) {
- __ B(chunk_->GetAssemblyLabel(false_block));
- }
- }
-
- } else if (String::Equals(type_name, factory->string_string())) {
- DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
- Register map = ToRegister(instr->temp1());
- Register scratch = ToRegister(instr->temp2());
-
- __ JumpIfSmi(value, false_label);
- __ CompareObjectType(value, map, scratch, FIRST_NONSTRING_TYPE);
- EmitBranch(instr, lt);
-
- } else if (String::Equals(type_name, factory->symbol_string())) {
- DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
- Register map = ToRegister(instr->temp1());
- Register scratch = ToRegister(instr->temp2());
-
- __ JumpIfSmi(value, false_label);
- __ CompareObjectType(value, map, scratch, SYMBOL_TYPE);
- EmitBranch(instr, eq);
-
- } else if (String::Equals(type_name, factory->boolean_string())) {
- __ JumpIfRoot(value, Heap::kTrueValueRootIndex, true_label);
- __ CompareRoot(value, Heap::kFalseValueRootIndex);
- EmitBranch(instr, eq);
-
- } else if (String::Equals(type_name, factory->undefined_string())) {
- DCHECK(instr->temp1() != NULL);
- Register scratch = ToRegister(instr->temp1());
-
- __ JumpIfRoot(value, Heap::kNullValueRootIndex, false_label);
- __ JumpIfSmi(value, false_label);
- // Check for undetectable objects and jump to the true branch in this case.
- __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
- __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- EmitTestAndBranch(instr, ne, scratch, 1 << Map::kIsUndetectable);
-
- } else if (String::Equals(type_name, factory->function_string())) {
- DCHECK(instr->temp1() != NULL);
- Register scratch = ToRegister(instr->temp1());
-
- __ JumpIfSmi(value, false_label);
- __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
- __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ And(scratch, scratch,
- (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
- EmitCompareAndBranch(instr, eq, scratch, 1 << Map::kIsCallable);
-
- } else if (String::Equals(type_name, factory->object_string())) {
- DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
- Register map = ToRegister(instr->temp1());
- Register scratch = ToRegister(instr->temp2());
-
- __ JumpIfSmi(value, false_label);
- __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ JumpIfObjectType(value, map, scratch, FIRST_JS_RECEIVER_TYPE,
- false_label, lt);
- // Check for callable or undetectable objects => false.
- __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
- EmitTestAndBranch(instr, eq, scratch,
- (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
-
- } else {
- __ B(false_label);
- }
-}
-
-
-void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
- __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value()));
-}
-
-
-void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
- Register object = ToRegister(instr->value());
- Register map = ToRegister(instr->map());
- Register temp = ToRegister(instr->temp());
- __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
- __ Cmp(map, temp);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
-}
-
-
-void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
- Register result = ToRegister(instr->result());
-
- // If the receiver is null or undefined, we have to pass the global object as
- // a receiver to normal functions. Values have to be passed unchanged to
- // builtins and strict-mode functions.
- Label global_object, done, copy_receiver;
-
- if (!instr->hydrogen()->known_function()) {
- __ Ldr(result, FieldMemOperand(function,
- JSFunction::kSharedFunctionInfoOffset));
-
- // CompilerHints is an int32 field. See objects.h.
- __ Ldr(result.W(),
- FieldMemOperand(result, SharedFunctionInfo::kCompilerHintsOffset));
-
- // Do not transform the receiver to object for strict mode functions.
- __ Tbnz(result, SharedFunctionInfo::kStrictModeFunction, &copy_receiver);
-
- // Do not transform the receiver to object for builtins.
- __ Tbnz(result, SharedFunctionInfo::kNative, &copy_receiver);
- }
-
- // Normal function. Replace undefined or null with global receiver.
- __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object);
- __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
-
- // Deoptimize if the receiver is not a JS object.
- DeoptimizeIfSmi(receiver, instr, DeoptimizeReason::kSmi);
- __ CompareObjectType(receiver, result, result, FIRST_JS_RECEIVER_TYPE);
- __ B(ge, &copy_receiver);
- Deoptimize(instr, DeoptimizeReason::kNotAJavaScriptObject);
-
- __ Bind(&global_object);
- __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
- __ Ldr(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
- __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
- __ B(&done);
-
- __ Bind(&copy_receiver);
- __ Mov(result, receiver);
- __ Bind(&done);
-}
-
-
-void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
- Register result,
- Register object,
- Register index) {
- PushSafepointRegistersScope scope(this);
- __ Push(object);
- __ Push(index);
- __ Mov(cp, 0);
- __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(x0, result);
-}
-
-
-void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
- class DeferredLoadMutableDouble final : public LDeferredCode {
- public:
- DeferredLoadMutableDouble(LCodeGen* codegen,
- LLoadFieldByIndex* instr,
- Register result,
- Register object,
- Register index)
- : LDeferredCode(codegen),
- instr_(instr),
- result_(result),
- object_(object),
- index_(index) {
- }
- void Generate() override {
- codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LLoadFieldByIndex* instr_;
- Register result_;
- Register object_;
- Register index_;
- };
- Register object = ToRegister(instr->object());
- Register index = ToRegister(instr->index());
- Register result = ToRegister(instr->result());
-
- __ AssertSmi(index);
-
- DeferredLoadMutableDouble* deferred;
- deferred = new(zone()) DeferredLoadMutableDouble(
- this, instr, result, object, index);
-
- Label out_of_object, done;
-
- __ TestAndBranchIfAnySet(
- index, reinterpret_cast<uint64_t>(Smi::FromInt(1)), deferred->entry());
- __ Mov(index, Operand(index, ASR, 1));
-
- __ Cmp(index, Smi::kZero);
- __ B(lt, &out_of_object);
-
- STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
- __ Add(result, object, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
- __ Ldr(result, FieldMemOperand(result, JSObject::kHeaderSize));
-
- __ B(&done);
-
- __ Bind(&out_of_object);
- __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- // Index is equal to negated out of object property index plus 1.
- __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
- __ Ldr(result, FieldMemOperand(result,
- FixedArray::kHeaderSize - kPointerSize));
- __ Bind(deferred->exit());
- __ Bind(&done);
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h
deleted file mode 100644
index 7f444738aa..0000000000
--- a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h
+++ /dev/null
@@ -1,442 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_ARM64_LITHIUM_CODEGEN_ARM64_H_
-#define V8_CRANKSHAFT_ARM64_LITHIUM_CODEGEN_ARM64_H_
-
-#include "src/crankshaft/arm64/lithium-arm64.h"
-
-#include "src/ast/scopes.h"
-#include "src/crankshaft/arm64/lithium-gap-resolver-arm64.h"
-#include "src/crankshaft/lithium-codegen.h"
-#include "src/deoptimizer.h"
-#include "src/safepoint-table.h"
-#include "src/utils.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LDeferredCode;
-class SafepointGenerator;
-class BranchGenerator;
-
-class LCodeGen: public LCodeGenBase {
- public:
- LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : LCodeGenBase(chunk, assembler, info),
- jump_table_(4, info->zone()),
- scope_(info->scope()),
- deferred_(8, info->zone()),
- frame_is_built_(false),
- safepoints_(info->zone()),
- resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple),
- pushed_arguments_(0) {
- PopulateDeoptimizationLiteralsWithInlinedFunctions();
- }
-
- // Simple accessors.
- Scope* scope() const { return scope_; }
-
- int LookupDestination(int block_id) const {
- return chunk()->LookupDestination(block_id);
- }
-
- bool IsNextEmittedBlock(int block_id) const {
- return LookupDestination(block_id) == GetNextEmittedBlock();
- }
-
- bool NeedsEagerFrame() const {
- return HasAllocatedStackSlots() || info()->is_non_deferred_calling() ||
- !info()->IsStub() || info()->requires_frame();
- }
- bool NeedsDeferredFrame() const {
- return !NeedsEagerFrame() && info()->is_deferred_calling();
- }
-
- LinkRegisterStatus GetLinkRegisterState() const {
- return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved;
- }
-
- // Try to generate code for the entire chunk, but it may fail if the
- // chunk contains constructs we cannot handle. Returns true if the
- // code generation attempt succeeded.
- bool GenerateCode();
-
- // Finish the code by setting stack height, safepoint, and bailout
- // information on it.
- void FinishCode(Handle<Code> code);
-
- enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
- // Support for converting LOperands to assembler types.
- Register ToRegister(LOperand* op) const;
- Register ToRegister32(LOperand* op) const;
- Operand ToOperand(LOperand* op);
- Operand ToOperand32(LOperand* op);
- enum StackMode { kMustUseFramePointer, kCanUseStackPointer };
- MemOperand ToMemOperand(LOperand* op,
- StackMode stack_mode = kCanUseStackPointer) const;
- Handle<Object> ToHandle(LConstantOperand* op) const;
-
- template <class LI>
- Operand ToShiftedRightOperand32(LOperand* right, LI* shift_info);
-
- int JSShiftAmountFromLConstant(LOperand* constant) {
- return ToInteger32(LConstantOperand::cast(constant)) & 0x1f;
- }
-
- // TODO(jbramley): Examine these helpers and check that they make sense.
- // IsInteger32Constant returns true for smi constants, for example.
- bool IsInteger32Constant(LConstantOperand* op) const;
- bool IsSmi(LConstantOperand* op) const;
-
- int32_t ToInteger32(LConstantOperand* op) const;
- Smi* ToSmi(LConstantOperand* op) const;
- double ToDouble(LConstantOperand* op) const;
- DoubleRegister ToDoubleRegister(LOperand* op) const;
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) void Do##type(L##type* node);
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- private:
- // Return a double scratch register which can be used locally
- // when generating code for a lithium instruction.
- DoubleRegister double_scratch() { return crankshaft_fp_scratch; }
-
- // Deferred code support.
- void DoDeferredNumberTagD(LNumberTagD* instr);
- void DoDeferredStackCheck(LStackCheck* instr);
- void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
- void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
- void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredMathAbsTagged(LMathAbsTagged* instr,
- Label* exit,
- Label* allocation_entry);
-
- void DoDeferredNumberTagU(LInstruction* instr,
- LOperand* value,
- LOperand* temp1,
- LOperand* temp2);
- void DoDeferredTaggedToI(LTaggedToI* instr,
- LOperand* value,
- LOperand* temp1,
- LOperand* temp2);
- void DoDeferredAllocate(LAllocate* instr);
- void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
- void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
- Register result,
- Register object,
- Register index);
-
- static Condition TokenToCondition(Token::Value op, bool is_unsigned);
- void EmitGoto(int block);
- void DoGap(LGap* instr);
-
- // Generic version of EmitBranch. It contains some code to avoid emitting a
- // branch on the next emitted basic block where we could just fall-through.
- // You shouldn't use that directly but rather consider one of the helper like
- // LCodeGen::EmitBranch, LCodeGen::EmitCompareAndBranch...
- template<class InstrType>
- void EmitBranchGeneric(InstrType instr,
- const BranchGenerator& branch);
-
- template<class InstrType>
- void EmitBranch(InstrType instr, Condition condition);
-
- template<class InstrType>
- void EmitCompareAndBranch(InstrType instr,
- Condition condition,
- const Register& lhs,
- const Operand& rhs);
-
- template<class InstrType>
- void EmitTestAndBranch(InstrType instr,
- Condition condition,
- const Register& value,
- uint64_t mask);
-
- template<class InstrType>
- void EmitBranchIfNonZeroNumber(InstrType instr,
- const FPRegister& value,
- const FPRegister& scratch);
-
- template<class InstrType>
- void EmitBranchIfHeapNumber(InstrType instr,
- const Register& value);
-
- template<class InstrType>
- void EmitBranchIfRoot(InstrType instr,
- const Register& value,
- Heap::RootListIndex index);
-
- // Emits optimized code to deep-copy the contents of statically known object
- // graphs (e.g. object literal boilerplate). Expects a pointer to the
- // allocated destination object in the result register, and a pointer to the
- // source object in the source register.
- void EmitDeepCopy(Handle<JSObject> object,
- Register result,
- Register source,
- Register scratch,
- int* offset,
- AllocationSiteMode mode);
-
- template <class T>
- void EmitVectorLoadICRegisters(T* instr);
-
- // Emits optimized code for %_IsString(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
- SmiCheck check_needed);
-
- MemOperand BuildSeqStringOperand(Register string,
- Register temp,
- LOperand* index,
- String::Encoding encoding);
- void DeoptimizeBranch(LInstruction* instr, DeoptimizeReason deopt_reason,
- BranchType branch_type, Register reg = NoReg,
- int bit = -1,
- Deoptimizer::BailoutType* override_bailout_type = NULL);
- void Deoptimize(LInstruction* instr, DeoptimizeReason deopt_reason,
- Deoptimizer::BailoutType* override_bailout_type = NULL);
- void DeoptimizeIf(Condition cond, LInstruction* instr,
- DeoptimizeReason deopt_reason);
- void DeoptimizeIfZero(Register rt, LInstruction* instr,
- DeoptimizeReason deopt_reason);
- void DeoptimizeIfNotZero(Register rt, LInstruction* instr,
- DeoptimizeReason deopt_reason);
- void DeoptimizeIfNegative(Register rt, LInstruction* instr,
- DeoptimizeReason deopt_reason);
- void DeoptimizeIfSmi(Register rt, LInstruction* instr,
- DeoptimizeReason deopt_reason);
- void DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
- DeoptimizeReason deopt_reason);
- void DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
- LInstruction* instr, DeoptimizeReason deopt_reason);
- void DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
- LInstruction* instr, DeoptimizeReason deopt_reason);
- void DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr);
- void DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
- DeoptimizeReason deopt_reason);
- void DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
- DeoptimizeReason deopt_reason);
- void DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
- DeoptimizeReason deopt_reason);
-
- MemOperand PrepareKeyedExternalArrayOperand(Register key,
- Register base,
- Register scratch,
- bool key_is_smi,
- bool key_is_constant,
- int constant_key,
- ElementsKind elements_kind,
- int base_offset);
- MemOperand PrepareKeyedArrayOperand(Register base,
- Register elements,
- Register key,
- bool key_is_tagged,
- ElementsKind elements_kind,
- Representation representation,
- int base_offset);
-
- void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
- Safepoint::DeoptMode mode);
-
- bool HasAllocatedStackSlots() const {
- return chunk()->HasAllocatedStackSlots();
- }
- int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); }
- int GetTotalFrameSlotCount() const {
- return chunk()->GetTotalFrameSlotCount();
- }
-
- void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
-
- // Emit frame translation commands for an environment.
- void WriteTranslation(LEnvironment* environment, Translation* translation);
-
- void AddToTranslation(LEnvironment* environment,
- Translation* translation,
- LOperand* op,
- bool is_tagged,
- bool is_uint32,
- int* object_index_pointer,
- int* dematerialized_index_pointer);
-
- void SaveCallerDoubles();
- void RestoreCallerDoubles();
-
- // Code generation steps. Returns true if code generation should continue.
- void GenerateBodyInstructionPre(LInstruction* instr) override;
- bool GeneratePrologue();
- bool GenerateDeferredCode();
- bool GenerateJumpTable();
- bool GenerateSafepointTable();
-
- // Generates the custom OSR entrypoint and sets the osr_pc_offset.
- void GenerateOsrPrologue();
-
- enum SafepointMode {
- RECORD_SIMPLE_SAFEPOINT,
- RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
- };
-
- void CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr);
-
- void CallCodeGeneric(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode);
-
- void CallRuntime(const Runtime::Function* function,
- int num_arguments,
- LInstruction* instr,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
-
- void CallRuntime(Runtime::FunctionId id,
- int num_arguments,
- LInstruction* instr) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, num_arguments, instr);
- }
-
- void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, function->nargs, instr);
- }
-
- void LoadContextFromDeferred(LOperand* context);
- void CallRuntimeFromDeferred(Runtime::FunctionId id,
- int argc,
- LInstruction* instr,
- LOperand* context);
-
- void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
- Register scratch2, Register scratch3);
-
- // Generate a direct call to a known function. Expects the function
- // to be in x1.
- void CallKnownFunction(Handle<JSFunction> function,
- int formal_parameter_count, int arity,
- bool is_tail_call, LInstruction* instr);
-
- // Support for recording safepoint information.
- void RecordSafepoint(LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- Safepoint::DeoptMode mode);
- void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
- void RecordSafepoint(Safepoint::DeoptMode mode);
- void RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode mode);
- void RecordSafepointWithLazyDeopt(LInstruction* instr,
- SafepointMode safepoint_mode);
-
- void EnsureSpaceForLazyDeopt(int space_needed) override;
-
- ZoneList<Deoptimizer::JumpTableEntry*> jump_table_;
- Scope* const scope_;
- ZoneList<LDeferredCode*> deferred_;
- bool frame_is_built_;
-
- // Builder that keeps track of safepoints in the code. The table itself is
- // emitted at the end of the generated code.
- SafepointTableBuilder safepoints_;
-
- // Compiler from a set of parallel moves to a sequential list of moves.
- LGapResolver resolver_;
-
- Safepoint::Kind expected_safepoint_kind_;
-
- // The number of arguments pushed onto the stack, either by this block or by a
- // predecessor.
- int pushed_arguments_;
-
- void RecordPushedArgumentsDelta(int delta) {
- pushed_arguments_ += delta;
- DCHECK(pushed_arguments_ >= 0);
- }
-
- int old_position_;
-
- class PushSafepointRegistersScope BASE_EMBEDDED {
- public:
- explicit PushSafepointRegistersScope(LCodeGen* codegen);
-
- ~PushSafepointRegistersScope();
-
- private:
- LCodeGen* codegen_;
- };
-
- friend class LDeferredCode;
- friend class SafepointGenerator;
- DISALLOW_COPY_AND_ASSIGN(LCodeGen);
-};
-
-
-class LDeferredCode: public ZoneObject {
- public:
- explicit LDeferredCode(LCodeGen* codegen)
- : codegen_(codegen),
- external_exit_(NULL),
- instruction_index_(codegen->current_instruction_) {
- codegen->AddDeferredCode(this);
- }
-
- virtual ~LDeferredCode() { }
- virtual void Generate() = 0;
- virtual LInstruction* instr() = 0;
-
- void SetExit(Label* exit) { external_exit_ = exit; }
- Label* entry() { return &entry_; }
- Label* exit() { return (external_exit_ != NULL) ? external_exit_ : &exit_; }
- int instruction_index() const { return instruction_index_; }
-
- protected:
- LCodeGen* codegen() const { return codegen_; }
- MacroAssembler* masm() const { return codegen_->masm(); }
-
- private:
- LCodeGen* codegen_;
- Label entry_;
- Label exit_;
- Label* external_exit_;
- int instruction_index_;
-};
-
-
-// This is the abstract class used by EmitBranchGeneric.
-// It is used to emit code for conditional branching. The Emit() function
-// emits code to branch when the condition holds and EmitInverted() emits
-// the branch when the inverted condition is verified.
-//
-// For actual examples of condition see the concrete implementation in
-// lithium-codegen-arm64.cc (e.g. BranchOnCondition, CompareAndBranch).
-class BranchGenerator BASE_EMBEDDED {
- public:
- explicit BranchGenerator(LCodeGen* codegen)
- : codegen_(codegen) { }
-
- virtual ~BranchGenerator() { }
-
- virtual void Emit(Label* label) const = 0;
- virtual void EmitInverted(Label* label) const = 0;
-
- protected:
- MacroAssembler* masm() const { return codegen_->masm(); }
-
- LCodeGen* codegen_;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_ARM64_LITHIUM_CODEGEN_ARM64_H_
diff --git a/deps/v8/src/crankshaft/arm64/lithium-gap-resolver-arm64.cc b/deps/v8/src/crankshaft/arm64/lithium-gap-resolver-arm64.cc
deleted file mode 100644
index 37db921b62..0000000000
--- a/deps/v8/src/crankshaft/arm64/lithium-gap-resolver-arm64.cc
+++ /dev/null
@@ -1,306 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/arm64/lithium-gap-resolver-arm64.h"
-#include "src/crankshaft/arm64/delayed-masm-arm64-inl.h"
-#include "src/crankshaft/arm64/lithium-codegen-arm64.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM((&masm_))
-
-DelayedGapMasm::DelayedGapMasm(LCodeGen* owner, MacroAssembler* masm)
- : DelayedMasm(owner, masm, root) {
- // We use the root register as an extra scratch register.
- // The root register has two advantages:
- // - It is not in crankshaft allocatable registers list, so it can't
- // interfere with the allocatable registers.
- // - We don't need to push it on the stack, as we can reload it with its
- // value once we have finish.
-}
-
-DelayedGapMasm::~DelayedGapMasm() {}
-
-void DelayedGapMasm::EndDelayedUse() {
- DelayedMasm::EndDelayedUse();
- if (scratch_register_used()) {
- DCHECK(ScratchRegister().Is(root));
- DCHECK(!pending());
- InitializeRootRegister();
- reset_scratch_register_used();
- }
-}
-
-
-LGapResolver::LGapResolver(LCodeGen* owner)
- : cgen_(owner), masm_(owner, owner->masm()), moves_(32, owner->zone()),
- root_index_(0), in_cycle_(false), saved_destination_(NULL) {
-}
-
-
-void LGapResolver::Resolve(LParallelMove* parallel_move) {
- DCHECK(moves_.is_empty());
- DCHECK(!masm_.pending());
-
- // Build up a worklist of moves.
- BuildInitialMoveList(parallel_move);
-
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands move = moves_[i];
-
- // Skip constants to perform them last. They don't block other moves
- // and skipping such moves with register destinations keeps those
- // registers free for the whole algorithm.
- if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
- root_index_ = i; // Any cycle is found when we reach this move again.
- PerformMove(i);
- if (in_cycle_) RestoreValue();
- }
- }
-
- // Perform the moves with constant sources.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands move = moves_[i];
-
- if (!move.IsEliminated()) {
- DCHECK(move.source()->IsConstantOperand());
- EmitMove(i);
- }
- }
-
- __ EndDelayedUse();
-
- moves_.Rewind(0);
-}
-
-
-void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
- // Perform a linear sweep of the moves to add them to the initial list of
- // moves to perform, ignoring any move that is redundant (the source is
- // the same as the destination, the destination is ignored and
- // unallocated, or the move was already eliminated).
- const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
- for (int i = 0; i < moves->length(); ++i) {
- LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
- }
- Verify();
-}
-
-
-void LGapResolver::PerformMove(int index) {
- // Each call to this function performs a move and deletes it from the move
- // graph. We first recursively perform any move blocking this one. We
- // mark a move as "pending" on entry to PerformMove in order to detect
- // cycles in the move graph.
- LMoveOperands& current_move = moves_[index];
-
- DCHECK(!current_move.IsPending());
- DCHECK(!current_move.IsRedundant());
-
- // Clear this move's destination to indicate a pending move. The actual
- // destination is saved in a stack allocated local. Multiple moves can
- // be pending because this function is recursive.
- DCHECK(current_move.source() != NULL); // Otherwise it will look eliminated.
- LOperand* destination = current_move.destination();
- current_move.set_destination(NULL);
-
- // Perform a depth-first traversal of the move graph to resolve
- // dependencies. Any unperformed, unpending move with a source the same
- // as this one's destination blocks this one so recursively perform all
- // such moves.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(destination) && !other_move.IsPending()) {
- PerformMove(i);
- // If there is a blocking, pending move it must be moves_[root_index_]
- // and all other moves with the same source as moves_[root_index_] are
- // sucessfully executed (because they are cycle-free) by this loop.
- }
- }
-
- // We are about to resolve this move and don't need it marked as
- // pending, so restore its destination.
- current_move.set_destination(destination);
-
- // The move may be blocked on a pending move, which must be the starting move.
- // In this case, we have a cycle, and we save the source of this move to
- // a scratch register to break it.
- LMoveOperands other_move = moves_[root_index_];
- if (other_move.Blocks(destination)) {
- DCHECK(other_move.IsPending());
- BreakCycle(index);
- return;
- }
-
- // This move is no longer blocked.
- EmitMove(index);
-}
-
-
-void LGapResolver::Verify() {
-#ifdef ENABLE_SLOW_DCHECKS
- // No operand should be the destination for more than one move.
- for (int i = 0; i < moves_.length(); ++i) {
- LOperand* destination = moves_[i].destination();
- for (int j = i + 1; j < moves_.length(); ++j) {
- SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
- }
- }
-#endif
-}
-
-
-void LGapResolver::BreakCycle(int index) {
- DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source()));
- DCHECK(!in_cycle_);
-
- // We save in a register the source of that move and we remember its
- // destination. Then we mark this move as resolved so the cycle is
- // broken and we can perform the other moves.
- in_cycle_ = true;
- LOperand* source = moves_[index].source();
- saved_destination_ = moves_[index].destination();
-
- if (source->IsRegister()) {
- AcquireSavedValueRegister();
- __ Mov(SavedValueRegister(), cgen_->ToRegister(source));
- } else if (source->IsStackSlot()) {
- AcquireSavedValueRegister();
- __ Load(SavedValueRegister(), cgen_->ToMemOperand(source));
- } else if (source->IsDoubleRegister()) {
- __ Fmov(SavedFPValueRegister(), cgen_->ToDoubleRegister(source));
- } else if (source->IsDoubleStackSlot()) {
- __ Load(SavedFPValueRegister(), cgen_->ToMemOperand(source));
- } else {
- UNREACHABLE();
- }
-
- // Mark this move as resolved.
- // This move will be actually performed by moving the saved value to this
- // move's destination in LGapResolver::RestoreValue().
- moves_[index].Eliminate();
-}
-
-
-void LGapResolver::RestoreValue() {
- DCHECK(in_cycle_);
- DCHECK(saved_destination_ != NULL);
-
- if (saved_destination_->IsRegister()) {
- __ Mov(cgen_->ToRegister(saved_destination_), SavedValueRegister());
- ReleaseSavedValueRegister();
- } else if (saved_destination_->IsStackSlot()) {
- __ Store(SavedValueRegister(), cgen_->ToMemOperand(saved_destination_));
- ReleaseSavedValueRegister();
- } else if (saved_destination_->IsDoubleRegister()) {
- __ Fmov(cgen_->ToDoubleRegister(saved_destination_),
- SavedFPValueRegister());
- } else if (saved_destination_->IsDoubleStackSlot()) {
- __ Store(SavedFPValueRegister(), cgen_->ToMemOperand(saved_destination_));
- } else {
- UNREACHABLE();
- }
-
- in_cycle_ = false;
- saved_destination_ = NULL;
-}
-
-
-void LGapResolver::EmitMove(int index) {
- LOperand* source = moves_[index].source();
- LOperand* destination = moves_[index].destination();
-
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
-
- if (source->IsRegister()) {
- Register source_register = cgen_->ToRegister(source);
- if (destination->IsRegister()) {
- __ Mov(cgen_->ToRegister(destination), source_register);
- } else {
- DCHECK(destination->IsStackSlot());
- __ Store(source_register, cgen_->ToMemOperand(destination));
- }
-
- } else if (source->IsStackSlot()) {
- MemOperand source_operand = cgen_->ToMemOperand(source);
- if (destination->IsRegister()) {
- __ Load(cgen_->ToRegister(destination), source_operand);
- } else {
- DCHECK(destination->IsStackSlot());
- EmitStackSlotMove(index);
- }
-
- } else if (source->IsConstantOperand()) {
- LConstantOperand* constant_source = LConstantOperand::cast(source);
- if (destination->IsRegister()) {
- Register dst = cgen_->ToRegister(destination);
- if (cgen_->IsSmi(constant_source)) {
- __ Mov(dst, cgen_->ToSmi(constant_source));
- } else if (cgen_->IsInteger32Constant(constant_source)) {
- __ Mov(dst, cgen_->ToInteger32(constant_source));
- } else {
- __ LoadObject(dst, cgen_->ToHandle(constant_source));
- }
- } else if (destination->IsDoubleRegister()) {
- DoubleRegister result = cgen_->ToDoubleRegister(destination);
- __ Fmov(result, cgen_->ToDouble(constant_source));
- } else {
- DCHECK(destination->IsStackSlot());
- DCHECK(!in_cycle_); // Constant moves happen after all cycles are gone.
- if (cgen_->IsSmi(constant_source)) {
- Smi* smi = cgen_->ToSmi(constant_source);
- __ StoreConstant(reinterpret_cast<intptr_t>(smi),
- cgen_->ToMemOperand(destination));
- } else if (cgen_->IsInteger32Constant(constant_source)) {
- __ StoreConstant(cgen_->ToInteger32(constant_source),
- cgen_->ToMemOperand(destination));
- } else {
- Handle<Object> handle = cgen_->ToHandle(constant_source);
- AllowDeferredHandleDereference smi_object_check;
- if (handle->IsSmi()) {
- Object* obj = *handle;
- DCHECK(!obj->IsHeapObject());
- __ StoreConstant(reinterpret_cast<intptr_t>(obj),
- cgen_->ToMemOperand(destination));
- } else {
- AcquireSavedValueRegister();
- __ LoadObject(SavedValueRegister(), handle);
- __ Store(SavedValueRegister(), cgen_->ToMemOperand(destination));
- ReleaseSavedValueRegister();
- }
- }
- }
-
- } else if (source->IsDoubleRegister()) {
- DoubleRegister src = cgen_->ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
- __ Fmov(cgen_->ToDoubleRegister(destination), src);
- } else {
- DCHECK(destination->IsDoubleStackSlot());
- __ Store(src, cgen_->ToMemOperand(destination));
- }
-
- } else if (source->IsDoubleStackSlot()) {
- MemOperand src = cgen_->ToMemOperand(source);
- if (destination->IsDoubleRegister()) {
- __ Load(cgen_->ToDoubleRegister(destination), src);
- } else {
- DCHECK(destination->IsDoubleStackSlot());
- EmitStackSlotMove(index);
- }
-
- } else {
- UNREACHABLE();
- }
-
- // The move has been emitted, we can eliminate it.
- moves_[index].Eliminate();
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/arm64/lithium-gap-resolver-arm64.h b/deps/v8/src/crankshaft/arm64/lithium-gap-resolver-arm64.h
deleted file mode 100644
index 31dbf30a8a..0000000000
--- a/deps/v8/src/crankshaft/arm64/lithium-gap-resolver-arm64.h
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
-#define V8_CRANKSHAFT_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
-
-#include "src/crankshaft/arm64/delayed-masm-arm64.h"
-#include "src/crankshaft/lithium.h"
-
-namespace v8 {
-namespace internal {
-
-class LCodeGen;
-class LGapResolver;
-
-class DelayedGapMasm : public DelayedMasm {
- public:
- DelayedGapMasm(LCodeGen* owner, MacroAssembler* masm);
- ~DelayedGapMasm();
-
- void EndDelayedUse();
-};
-
-
-class LGapResolver BASE_EMBEDDED {
- public:
- explicit LGapResolver(LCodeGen* owner);
-
- // Resolve a set of parallel moves, emitting assembler instructions.
- void Resolve(LParallelMove* parallel_move);
-
- private:
- // Build the initial list of moves.
- void BuildInitialMoveList(LParallelMove* parallel_move);
-
- // Perform the move at the moves_ index in question (possibly requiring
- // other moves to satisfy dependencies).
- void PerformMove(int index);
-
- // If a cycle is found in the series of moves, save the blocking value to
- // a scratch register. The cycle must be found by hitting the root of the
- // depth-first search.
- void BreakCycle(int index);
-
- // After a cycle has been resolved, restore the value from the scratch
- // register to its proper destination.
- void RestoreValue();
-
- // Emit a move and remove it from the move graph.
- void EmitMove(int index);
-
- // Emit a move from one stack slot to another.
- void EmitStackSlotMove(int index) {
- masm_.StackSlotMove(moves_[index].source(), moves_[index].destination());
- }
-
- // Verify the move list before performing moves.
- void Verify();
-
- // Registers used to solve cycles.
- const Register& SavedValueRegister() {
- DCHECK(!RegisterConfiguration::Crankshaft()->IsAllocatableGeneralCode(
- masm_.ScratchRegister().code()));
- return masm_.ScratchRegister();
- }
- // The scratch register is used to break cycles and to store constant.
- // These two methods switch from one mode to the other.
- void AcquireSavedValueRegister() { masm_.AcquireScratchRegister(); }
- void ReleaseSavedValueRegister() { masm_.ReleaseScratchRegister(); }
- const FPRegister& SavedFPValueRegister() {
- // We use the Crankshaft floating-point scratch register to break a cycle
- // involving double values as the MacroAssembler will not need it for the
- // operations performed by the gap resolver.
- DCHECK(!RegisterConfiguration::Crankshaft()->IsAllocatableGeneralCode(
- crankshaft_fp_scratch.code()));
- return crankshaft_fp_scratch;
- }
-
- LCodeGen* cgen_;
- DelayedGapMasm masm_;
-
- // List of moves not yet resolved.
- ZoneList<LMoveOperands> moves_;
-
- int root_index_;
- bool in_cycle_;
- LOperand* saved_destination_;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
diff --git a/deps/v8/src/crankshaft/compilation-phase.cc b/deps/v8/src/crankshaft/compilation-phase.cc
deleted file mode 100644
index 11300701b0..0000000000
--- a/deps/v8/src/crankshaft/compilation-phase.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/compilation-phase.h"
-
-#include "src/crankshaft/hydrogen.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-CompilationPhase::CompilationPhase(const char* name, CompilationInfo* info)
- : name_(name), info_(info), zone_(info->isolate()->allocator(), ZONE_NAME) {
- if (FLAG_hydrogen_stats) {
- info_zone_start_allocation_size_ = info->zone()->allocation_size();
- timer_.Start();
- }
-}
-
-CompilationPhase::~CompilationPhase() {
- if (FLAG_hydrogen_stats) {
- size_t size = zone()->allocation_size();
- size += info_->zone()->allocation_size() - info_zone_start_allocation_size_;
- isolate()->GetHStatistics()->SaveTiming(name_, timer_.Elapsed(), size);
- }
-}
-
-bool CompilationPhase::ShouldProduceTraceOutput() const {
- // Trace if the appropriate trace flag is set and the phase name's first
- // character is in the FLAG_trace_phase command line parameter.
- AllowHandleDereference allow_deref;
- bool tracing_on =
- info()->IsStub()
- ? FLAG_trace_hydrogen_stubs
- : (FLAG_trace_hydrogen &&
- info()->shared_info()->PassesFilter(FLAG_trace_hydrogen_filter));
- return (tracing_on &&
- base::OS::StrChr(const_cast<char*>(FLAG_trace_phase), name_[0]) !=
- NULL);
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/compilation-phase.h b/deps/v8/src/crankshaft/compilation-phase.h
deleted file mode 100644
index 8d6468d4dc..0000000000
--- a/deps/v8/src/crankshaft/compilation-phase.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_COMPILATION_PHASE_H_
-#define V8_CRANKSHAFT_COMPILATION_PHASE_H_
-
-#include "src/allocation.h"
-#include "src/base/platform/elapsed-timer.h"
-#include "src/compilation-info.h"
-#include "src/zone/zone.h"
-
-namespace v8 {
-namespace internal {
-
-class CompilationPhase BASE_EMBEDDED {
- public:
- CompilationPhase(const char* name, CompilationInfo* info);
- ~CompilationPhase();
-
- protected:
- bool ShouldProduceTraceOutput() const;
-
- const char* name() const { return name_; }
- CompilationInfo* info() const { return info_; }
- Isolate* isolate() const { return info()->isolate(); }
- Zone* zone() { return &zone_; }
-
- private:
- const char* name_;
- CompilationInfo* info_;
- Zone zone_;
- size_t info_zone_start_allocation_size_;
- base::ElapsedTimer timer_;
-
- DISALLOW_COPY_AND_ASSIGN(CompilationPhase);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_COMPILATION_PHASE_H_
diff --git a/deps/v8/src/crankshaft/hydrogen-alias-analysis.h b/deps/v8/src/crankshaft/hydrogen-alias-analysis.h
deleted file mode 100644
index d06aabc76e..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-alias-analysis.h
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_HYDROGEN_ALIAS_ANALYSIS_H_
-#define V8_CRANKSHAFT_HYDROGEN_ALIAS_ANALYSIS_H_
-
-#include "src/crankshaft/hydrogen.h"
-
-namespace v8 {
-namespace internal {
-
-enum HAliasing {
- kMustAlias,
- kMayAlias,
- kNoAlias
-};
-
-
-// Defines the interface to alias analysis for the rest of the compiler.
-// A simple implementation can use only local reasoning, but a more powerful
-// analysis might employ points-to analysis.
-class HAliasAnalyzer : public ZoneObject {
- public:
- // Simple alias analysis distinguishes allocations, parameters,
- // and constants using only local reasoning.
- HAliasing Query(HValue* a, HValue* b) {
- // The same SSA value always references the same object.
- if (a == b) return kMustAlias;
-
- if (a->IsAllocate() || a->IsInnerAllocatedObject()) {
- // Two non-identical allocations can never be aliases.
- if (b->IsAllocate()) return kNoAlias;
- if (b->IsInnerAllocatedObject()) return kNoAlias;
- // An allocation can never alias a parameter or a constant.
- if (b->IsParameter()) return kNoAlias;
- if (b->IsConstant()) return kNoAlias;
- }
- if (b->IsAllocate() || b->IsInnerAllocatedObject()) {
- // An allocation can never alias a parameter or a constant.
- if (a->IsParameter()) return kNoAlias;
- if (a->IsConstant()) return kNoAlias;
- }
-
- // Constant objects can be distinguished statically.
- if (a->IsConstant() && b->IsConstant()) {
- return a->Equals(b) ? kMustAlias : kNoAlias;
- }
- return kMayAlias;
- }
-
- // Checks whether the objects referred to by the given instructions may
- // ever be aliases. Note that this is more conservative than checking
- // {Query(a, b) == kMayAlias}, since this method considers kMustAlias
- // objects to also be may-aliasing.
- inline bool MayAlias(HValue* a, HValue* b) {
- return Query(a, b) != kNoAlias;
- }
-
- inline bool MustAlias(HValue* a, HValue* b) {
- return Query(a, b) == kMustAlias;
- }
-
- inline bool NoAlias(HValue* a, HValue* b) {
- return Query(a, b) == kNoAlias;
- }
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_HYDROGEN_ALIAS_ANALYSIS_H_
diff --git a/deps/v8/src/crankshaft/hydrogen-bce.cc b/deps/v8/src/crankshaft/hydrogen-bce.cc
deleted file mode 100644
index 333fafbf13..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-bce.cc
+++ /dev/null
@@ -1,479 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/hydrogen-bce.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-// We try to "factor up" HBoundsCheck instructions towards the root of the
-// dominator tree.
-// For now we handle checks where the index is like "exp + int32value".
-// If in the dominator tree we check "exp + v1" and later (dominated)
-// "exp + v2", if v2 <= v1 we can safely remove the second check, and if
-// v2 > v1 we can use v2 in the 1st check and again remove the second.
-// To do so we keep a dictionary of all checks where the key if the pair
-// "exp, length".
-// The class BoundsCheckKey represents this key.
-class BoundsCheckKey : public ZoneObject {
- public:
- HValue* IndexBase() const { return index_base_; }
- HValue* Length() const { return length_; }
-
- uint32_t Hash() {
- return static_cast<uint32_t>(index_base_->Hashcode() ^ length_->Hashcode());
- }
-
- static BoundsCheckKey* Create(Zone* zone,
- HBoundsCheck* check,
- int32_t* offset) {
- if (!check->index()->representation().IsSmiOrInteger32()) return NULL;
-
- HValue* index_base = NULL;
- HConstant* constant = NULL;
- bool is_sub = false;
-
- if (check->index()->IsAdd()) {
- HAdd* index = HAdd::cast(check->index());
- if (index->left()->IsConstant()) {
- constant = HConstant::cast(index->left());
- index_base = index->right();
- } else if (index->right()->IsConstant()) {
- constant = HConstant::cast(index->right());
- index_base = index->left();
- }
- } else if (check->index()->IsSub()) {
- HSub* index = HSub::cast(check->index());
- is_sub = true;
- if (index->right()->IsConstant()) {
- constant = HConstant::cast(index->right());
- index_base = index->left();
- }
- } else if (check->index()->IsConstant()) {
- index_base = check->block()->graph()->GetConstant0();
- constant = HConstant::cast(check->index());
- }
-
- if (constant != NULL && constant->HasInteger32Value() &&
- constant->Integer32Value() != kMinInt) {
- *offset = is_sub ? - constant->Integer32Value()
- : constant->Integer32Value();
- } else {
- *offset = 0;
- index_base = check->index();
- }
-
- return new(zone) BoundsCheckKey(index_base, check->length());
- }
-
- private:
- BoundsCheckKey(HValue* index_base, HValue* length)
- : index_base_(index_base),
- length_(length) { }
-
- HValue* index_base_;
- HValue* length_;
-
- DISALLOW_COPY_AND_ASSIGN(BoundsCheckKey);
-};
-
-
-// Data about each HBoundsCheck that can be eliminated or moved.
-// It is the "value" in the dictionary indexed by "base-index, length"
-// (the key is BoundsCheckKey).
-// We scan the code with a dominator tree traversal.
-// Traversing the dominator tree we keep a stack (implemented as a singly
-// linked list) of "data" for each basic block that contains a relevant check
-// with the same key (the dictionary holds the head of the list).
-// We also keep all the "data" created for a given basic block in a list, and
-// use it to "clean up" the dictionary when backtracking in the dominator tree
-// traversal.
-// Doing this each dictionary entry always directly points to the check that
-// is dominating the code being examined now.
-// We also track the current "offset" of the index expression and use it to
-// decide if any check is already "covered" (so it can be removed) or not.
-class BoundsCheckBbData: public ZoneObject {
- public:
- BoundsCheckKey* Key() const { return key_; }
- int32_t LowerOffset() const { return lower_offset_; }
- int32_t UpperOffset() const { return upper_offset_; }
- HBasicBlock* BasicBlock() const { return basic_block_; }
- HBoundsCheck* LowerCheck() const { return lower_check_; }
- HBoundsCheck* UpperCheck() const { return upper_check_; }
- BoundsCheckBbData* NextInBasicBlock() const { return next_in_bb_; }
- BoundsCheckBbData* FatherInDominatorTree() const { return father_in_dt_; }
-
- bool OffsetIsCovered(int32_t offset) const {
- return offset >= LowerOffset() && offset <= UpperOffset();
- }
-
- bool HasSingleCheck() { return lower_check_ == upper_check_; }
-
- void UpdateUpperOffsets(HBoundsCheck* check, int32_t offset) {
- BoundsCheckBbData* data = FatherInDominatorTree();
- while (data != NULL && data->UpperCheck() == check) {
- DCHECK(data->upper_offset_ < offset);
- data->upper_offset_ = offset;
- data = data->FatherInDominatorTree();
- }
- }
-
- void UpdateLowerOffsets(HBoundsCheck* check, int32_t offset) {
- BoundsCheckBbData* data = FatherInDominatorTree();
- while (data != NULL && data->LowerCheck() == check) {
- DCHECK(data->lower_offset_ > offset);
- data->lower_offset_ = offset;
- data = data->FatherInDominatorTree();
- }
- }
-
- // The goal of this method is to modify either upper_offset_ or
- // lower_offset_ so that also new_offset is covered (the covered
- // range grows).
- //
- // The precondition is that new_check follows UpperCheck() and
- // LowerCheck() in the same basic block, and that new_offset is not
- // covered (otherwise we could simply remove new_check).
- //
- // If HasSingleCheck() is true then new_check is added as "second check"
- // (either upper or lower; note that HasSingleCheck() becomes false).
- // Otherwise one of the current checks is modified so that it also covers
- // new_offset, and new_check is removed.
- void CoverCheck(HBoundsCheck* new_check,
- int32_t new_offset) {
- DCHECK(new_check->index()->representation().IsSmiOrInteger32());
- bool keep_new_check = false;
-
- if (new_offset > upper_offset_) {
- upper_offset_ = new_offset;
- if (HasSingleCheck()) {
- keep_new_check = true;
- upper_check_ = new_check;
- } else {
- TightenCheck(upper_check_, new_check, new_offset);
- UpdateUpperOffsets(upper_check_, upper_offset_);
- }
- } else if (new_offset < lower_offset_) {
- lower_offset_ = new_offset;
- if (HasSingleCheck()) {
- keep_new_check = true;
- lower_check_ = new_check;
- } else {
- TightenCheck(lower_check_, new_check, new_offset);
- UpdateLowerOffsets(lower_check_, lower_offset_);
- }
- } else {
- // Should never have called CoverCheck() in this case.
- UNREACHABLE();
- }
-
- if (!keep_new_check) {
- if (FLAG_trace_bce) {
- base::OS::Print("Eliminating check #%d after tightening\n",
- new_check->id());
- }
- new_check->block()->graph()->isolate()->counters()->
- bounds_checks_eliminated()->Increment();
- new_check->DeleteAndReplaceWith(new_check->ActualValue());
- } else {
- HBoundsCheck* first_check = new_check == lower_check_ ? upper_check_
- : lower_check_;
- if (FLAG_trace_bce) {
- base::OS::Print("Moving second check #%d after first check #%d\n",
- new_check->id(), first_check->id());
- }
- // The length is guaranteed to be live at first_check.
- DCHECK(new_check->length() == first_check->length());
- HInstruction* old_position = new_check->next();
- new_check->Unlink();
- new_check->InsertAfter(first_check);
- MoveIndexIfNecessary(new_check->index(), new_check, old_position);
- }
- }
-
- BoundsCheckBbData(BoundsCheckKey* key,
- int32_t lower_offset,
- int32_t upper_offset,
- HBasicBlock* bb,
- HBoundsCheck* lower_check,
- HBoundsCheck* upper_check,
- BoundsCheckBbData* next_in_bb,
- BoundsCheckBbData* father_in_dt)
- : key_(key),
- lower_offset_(lower_offset),
- upper_offset_(upper_offset),
- basic_block_(bb),
- lower_check_(lower_check),
- upper_check_(upper_check),
- next_in_bb_(next_in_bb),
- father_in_dt_(father_in_dt) { }
-
- private:
- BoundsCheckKey* key_;
- int32_t lower_offset_;
- int32_t upper_offset_;
- HBasicBlock* basic_block_;
- HBoundsCheck* lower_check_;
- HBoundsCheck* upper_check_;
- BoundsCheckBbData* next_in_bb_;
- BoundsCheckBbData* father_in_dt_;
-
- void MoveIndexIfNecessary(HValue* index_raw,
- HBoundsCheck* insert_before,
- HInstruction* end_of_scan_range) {
- // index_raw can be HAdd(index_base, offset), HSub(index_base, offset),
- // HConstant(offset) or index_base directly.
- // In the latter case, no need to move anything.
- if (index_raw->IsAdd() || index_raw->IsSub()) {
- HArithmeticBinaryOperation* index =
- HArithmeticBinaryOperation::cast(index_raw);
- HValue* left_input = index->left();
- HValue* right_input = index->right();
- HValue* context = index->context();
- bool must_move_index = false;
- bool must_move_left_input = false;
- bool must_move_right_input = false;
- bool must_move_context = false;
- for (HInstruction* cursor = end_of_scan_range; cursor != insert_before;) {
- if (cursor == left_input) must_move_left_input = true;
- if (cursor == right_input) must_move_right_input = true;
- if (cursor == context) must_move_context = true;
- if (cursor == index) must_move_index = true;
- if (cursor->previous() == NULL) {
- cursor = cursor->block()->dominator()->end();
- } else {
- cursor = cursor->previous();
- }
- }
- if (must_move_index) {
- index->Unlink();
- index->InsertBefore(insert_before);
- }
- // The BCE algorithm only selects mergeable bounds checks that share
- // the same "index_base", so we'll only ever have to move constants.
- if (must_move_left_input) {
- HConstant::cast(left_input)->Unlink();
- HConstant::cast(left_input)->InsertBefore(index);
- }
- if (must_move_right_input) {
- HConstant::cast(right_input)->Unlink();
- HConstant::cast(right_input)->InsertBefore(index);
- }
- if (must_move_context) {
- // Contexts are always constants.
- HConstant::cast(context)->Unlink();
- HConstant::cast(context)->InsertBefore(index);
- }
- } else if (index_raw->IsConstant()) {
- HConstant* index = HConstant::cast(index_raw);
- bool must_move = false;
- for (HInstruction* cursor = end_of_scan_range; cursor != insert_before;) {
- if (cursor == index) must_move = true;
- if (cursor->previous() == NULL) {
- cursor = cursor->block()->dominator()->end();
- } else {
- cursor = cursor->previous();
- }
- }
- if (must_move) {
- index->Unlink();
- index->InsertBefore(insert_before);
- }
- }
- }
-
- void TightenCheck(HBoundsCheck* original_check,
- HBoundsCheck* tighter_check,
- int32_t new_offset) {
- DCHECK(original_check->length() == tighter_check->length());
- MoveIndexIfNecessary(tighter_check->index(), original_check, tighter_check);
- original_check->ReplaceAllUsesWith(original_check->index());
- original_check->SetOperandAt(0, tighter_check->index());
- if (FLAG_trace_bce) {
- base::OS::Print("Tightened check #%d with offset %d from #%d\n",
- original_check->id(), new_offset, tighter_check->id());
- }
- }
-
- DISALLOW_COPY_AND_ASSIGN(BoundsCheckBbData);
-};
-
-
-static bool BoundsCheckKeyMatch(void* key1, void* key2) {
- BoundsCheckKey* k1 = static_cast<BoundsCheckKey*>(key1);
- BoundsCheckKey* k2 = static_cast<BoundsCheckKey*>(key2);
- return k1->IndexBase() == k2->IndexBase() && k1->Length() == k2->Length();
-}
-
-BoundsCheckTable::BoundsCheckTable(Zone* zone)
- : CustomMatcherZoneHashMap(BoundsCheckKeyMatch,
- ZoneHashMap::kDefaultHashMapCapacity,
- ZoneAllocationPolicy(zone)) {}
-
-BoundsCheckBbData** BoundsCheckTable::LookupOrInsert(BoundsCheckKey* key,
- Zone* zone) {
- return reinterpret_cast<BoundsCheckBbData**>(
- &(CustomMatcherZoneHashMap::LookupOrInsert(key, key->Hash(),
- ZoneAllocationPolicy(zone))
- ->value));
-}
-
-
-void BoundsCheckTable::Insert(BoundsCheckKey* key,
- BoundsCheckBbData* data,
- Zone* zone) {
- CustomMatcherZoneHashMap::LookupOrInsert(key, key->Hash(),
- ZoneAllocationPolicy(zone))
- ->value = data;
-}
-
-
-void BoundsCheckTable::Delete(BoundsCheckKey* key) {
- Remove(key, key->Hash());
-}
-
-
-class HBoundsCheckEliminationState {
- public:
- HBasicBlock* block_;
- BoundsCheckBbData* bb_data_list_;
- int index_;
-};
-
-
-// Eliminates checks in bb and recursively in the dominated blocks.
-// Also replace the results of check instructions with the original value, if
-// the result is used. This is safe now, since we don't do code motion after
-// this point. It enables better register allocation since the value produced
-// by check instructions is really a copy of the original value.
-void HBoundsCheckEliminationPhase::EliminateRedundantBoundsChecks(
- HBasicBlock* entry) {
- // Allocate the stack.
- HBoundsCheckEliminationState* stack =
- zone()->NewArray<HBoundsCheckEliminationState>(graph()->blocks()->length());
-
- // Explicitly push the entry block.
- stack[0].block_ = entry;
- stack[0].bb_data_list_ = PreProcessBlock(entry);
- stack[0].index_ = 0;
- int stack_depth = 1;
-
- // Implement depth-first traversal with a stack.
- while (stack_depth > 0) {
- int current = stack_depth - 1;
- HBoundsCheckEliminationState* state = &stack[current];
- const ZoneList<HBasicBlock*>* children = state->block_->dominated_blocks();
-
- if (state->index_ < children->length()) {
- // Recursively visit children blocks.
- HBasicBlock* child = children->at(state->index_++);
- int next = stack_depth++;
- stack[next].block_ = child;
- stack[next].bb_data_list_ = PreProcessBlock(child);
- stack[next].index_ = 0;
- } else {
- // Finished with all children; post process the block.
- PostProcessBlock(state->block_, state->bb_data_list_);
- stack_depth--;
- }
- }
-}
-
-
-BoundsCheckBbData* HBoundsCheckEliminationPhase::PreProcessBlock(
- HBasicBlock* bb) {
- BoundsCheckBbData* bb_data_list = NULL;
-
- for (HInstructionIterator it(bb); !it.Done(); it.Advance()) {
- HInstruction* i = it.Current();
- if (!i->IsBoundsCheck()) continue;
-
- HBoundsCheck* check = HBoundsCheck::cast(i);
- int32_t offset = 0;
- BoundsCheckKey* key =
- BoundsCheckKey::Create(zone(), check, &offset);
- if (key == NULL) continue;
- BoundsCheckBbData** data_p = table_.LookupOrInsert(key, zone());
- BoundsCheckBbData* data = *data_p;
- if (data == NULL) {
- bb_data_list = new(zone()) BoundsCheckBbData(key,
- offset,
- offset,
- bb,
- check,
- check,
- bb_data_list,
- NULL);
- *data_p = bb_data_list;
- if (FLAG_trace_bce) {
- base::OS::Print("Fresh bounds check data for block #%d: [%d]\n",
- bb->block_id(), offset);
- }
- } else if (data->OffsetIsCovered(offset)) {
- bb->graph()->isolate()->counters()->
- bounds_checks_eliminated()->Increment();
- if (FLAG_trace_bce) {
- base::OS::Print("Eliminating bounds check #%d, offset %d is covered\n",
- check->id(), offset);
- }
- check->DeleteAndReplaceWith(check->ActualValue());
- } else if (data->BasicBlock() == bb) {
- // TODO(jkummerow): I think the following logic would be preferable:
- // if (data->Basicblock() == bb ||
- // graph()->use_optimistic_licm() ||
- // bb->IsLoopSuccessorDominator()) {
- // data->CoverCheck(check, offset)
- // } else {
- // /* add pristine BCBbData like in (data == NULL) case above */
- // }
- // Even better would be: distinguish between read-only dominator-imposed
- // knowledge and modifiable upper/lower checks.
- // What happens currently is that the first bounds check in a dominated
- // block will stay around while any further checks are hoisted out,
- // which doesn't make sense. Investigate/fix this in a future CL.
- data->CoverCheck(check, offset);
- } else if (graph()->use_optimistic_licm() ||
- bb->IsLoopSuccessorDominator()) {
- int32_t new_lower_offset = offset < data->LowerOffset()
- ? offset
- : data->LowerOffset();
- int32_t new_upper_offset = offset > data->UpperOffset()
- ? offset
- : data->UpperOffset();
- bb_data_list = new(zone()) BoundsCheckBbData(key,
- new_lower_offset,
- new_upper_offset,
- bb,
- data->LowerCheck(),
- data->UpperCheck(),
- bb_data_list,
- data);
- if (FLAG_trace_bce) {
- base::OS::Print("Updated bounds check data for block #%d: [%d - %d]\n",
- bb->block_id(), new_lower_offset, new_upper_offset);
- }
- table_.Insert(key, bb_data_list, zone());
- }
- }
-
- return bb_data_list;
-}
-
-
-void HBoundsCheckEliminationPhase::PostProcessBlock(
- HBasicBlock* block, BoundsCheckBbData* data) {
- while (data != NULL) {
- if (data->FatherInDominatorTree()) {
- table_.Insert(data->Key(), data->FatherInDominatorTree(), zone());
- } else {
- table_.Delete(data->Key());
- }
- data = data->NextInBasicBlock();
- }
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/hydrogen-bce.h b/deps/v8/src/crankshaft/hydrogen-bce.h
deleted file mode 100644
index 237fb953f2..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-bce.h
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_HYDROGEN_BCE_H_
-#define V8_CRANKSHAFT_HYDROGEN_BCE_H_
-
-#include "src/crankshaft/hydrogen.h"
-
-namespace v8 {
-namespace internal {
-
-
-class BoundsCheckBbData;
-class BoundsCheckKey;
-class BoundsCheckTable : private CustomMatcherZoneHashMap {
- public:
- explicit BoundsCheckTable(Zone* zone);
-
- INLINE(BoundsCheckBbData** LookupOrInsert(BoundsCheckKey* key, Zone* zone));
- INLINE(void Insert(BoundsCheckKey* key, BoundsCheckBbData* data, Zone* zone));
- INLINE(void Delete(BoundsCheckKey* key));
-
- private:
- DISALLOW_COPY_AND_ASSIGN(BoundsCheckTable);
-};
-
-
-class HBoundsCheckEliminationPhase : public HPhase {
- public:
- explicit HBoundsCheckEliminationPhase(HGraph* graph)
- : HPhase("H_Bounds checks elimination", graph), table_(zone()) { }
-
- void Run() {
- EliminateRedundantBoundsChecks(graph()->entry_block());
- }
-
- private:
- void EliminateRedundantBoundsChecks(HBasicBlock* bb);
- BoundsCheckBbData* PreProcessBlock(HBasicBlock* bb);
- void PostProcessBlock(HBasicBlock* bb, BoundsCheckBbData* data);
-
- BoundsCheckTable table_;
-
- DISALLOW_COPY_AND_ASSIGN(HBoundsCheckEliminationPhase);
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_HYDROGEN_BCE_H_
diff --git a/deps/v8/src/crankshaft/hydrogen-canonicalize.cc b/deps/v8/src/crankshaft/hydrogen-canonicalize.cc
deleted file mode 100644
index 20e771763f..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-canonicalize.cc
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/hydrogen-canonicalize.h"
-
-#include "src/counters.h"
-#include "src/crankshaft/hydrogen-redundant-phi.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-void HCanonicalizePhase::Run() {
- const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
- // Before removing no-op instructions, save their semantic value.
- // We must be careful not to set the flag unnecessarily, because GVN
- // cannot identify two instructions when their flag value differs.
- for (int i = 0; i < blocks->length(); ++i) {
- for (HInstructionIterator it(blocks->at(i)); !it.Done(); it.Advance()) {
- HInstruction* instr = it.Current();
- if (instr->IsArithmeticBinaryOperation()) {
- if (instr->representation().IsInteger32()) {
- if (instr->HasAtLeastOneUseWithFlagAndNoneWithout(
- HInstruction::kTruncatingToInt32)) {
- instr->SetFlag(HInstruction::kAllUsesTruncatingToInt32);
- }
- } else if (instr->representation().IsSmi()) {
- if (instr->HasAtLeastOneUseWithFlagAndNoneWithout(
- HInstruction::kTruncatingToSmi)) {
- instr->SetFlag(HInstruction::kAllUsesTruncatingToSmi);
- } else if (instr->HasAtLeastOneUseWithFlagAndNoneWithout(
- HInstruction::kTruncatingToInt32)) {
- // Avoid redundant minus zero check
- instr->SetFlag(HInstruction::kAllUsesTruncatingToInt32);
- }
- }
- }
- }
- }
-
- // Perform actual Canonicalization pass.
- HRedundantPhiEliminationPhase redundant_phi_eliminator(graph());
- for (int i = 0; i < blocks->length(); ++i) {
- // Eliminate redundant phis in the block first; changes to their inputs
- // might have made them redundant, and eliminating them creates more
- // opportunities for constant folding and strength reduction.
- redundant_phi_eliminator.ProcessBlock(blocks->at(i));
- // Now canonicalize each instruction.
- for (HInstructionIterator it(blocks->at(i)); !it.Done(); it.Advance()) {
- HInstruction* instr = it.Current();
- HValue* value = instr->Canonicalize();
- if (value != instr) instr->DeleteAndReplaceWith(value);
- }
- }
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/hydrogen-canonicalize.h b/deps/v8/src/crankshaft/hydrogen-canonicalize.h
deleted file mode 100644
index a17557ac8b..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-canonicalize.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_HYDROGEN_CANONICALIZE_H_
-#define V8_CRANKSHAFT_HYDROGEN_CANONICALIZE_H_
-
-#include "src/crankshaft/hydrogen.h"
-
-namespace v8 {
-namespace internal {
-
-
-class HCanonicalizePhase : public HPhase {
- public:
- explicit HCanonicalizePhase(HGraph* graph)
- : HPhase("H_Canonicalize", graph) { }
-
- void Run();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(HCanonicalizePhase);
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_HYDROGEN_CANONICALIZE_H_
diff --git a/deps/v8/src/crankshaft/hydrogen-check-elimination.cc b/deps/v8/src/crankshaft/hydrogen-check-elimination.cc
deleted file mode 100644
index 951628e3bb..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-check-elimination.cc
+++ /dev/null
@@ -1,914 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/hydrogen-check-elimination.h"
-
-#include "src/crankshaft/hydrogen-alias-analysis.h"
-#include "src/crankshaft/hydrogen-flow-engine.h"
-#include "src/objects-inl.h"
-
-#define GLOBAL 1
-
-// Only collect stats in debug mode.
-#if DEBUG
-#define INC_STAT(x) phase_->x++
-#else
-#define INC_STAT(x)
-#endif
-
-// For code de-uglification.
-#define TRACE(x) if (FLAG_trace_check_elimination) PrintF x
-
-namespace v8 {
-namespace internal {
-
-typedef const UniqueSet<Map>* MapSet;
-
-struct HCheckTableEntry {
- enum State {
- // We have seen a map check (i.e. an HCheckMaps) for these maps, so we can
- // use this information to eliminate further map checks, elements kind
- // transitions, etc.
- CHECKED,
- // Same as CHECKED, but we also know that these maps are stable.
- CHECKED_STABLE,
- // These maps are stable, but not checked (i.e. we learned this via field
- // type tracking or from a constant, or they were initially CHECKED_STABLE,
- // but became UNCHECKED_STABLE because of an instruction that changes maps
- // or elements kind), and we need a stability check for them in order to use
- // this information for check elimination (which turns them back to
- // CHECKED_STABLE).
- UNCHECKED_STABLE
- };
-
- static const char* State2String(State state) {
- switch (state) {
- case CHECKED: return "checked";
- case CHECKED_STABLE: return "checked stable";
- case UNCHECKED_STABLE: return "unchecked stable";
- }
- UNREACHABLE();
- return NULL;
- }
-
- static State StateMerge(State state1, State state2) {
- if (state1 == state2) return state1;
- if ((state1 == CHECKED && state2 == CHECKED_STABLE) ||
- (state2 == CHECKED && state1 == CHECKED_STABLE)) {
- return CHECKED;
- }
- DCHECK((state1 == CHECKED_STABLE && state2 == UNCHECKED_STABLE) ||
- (state2 == CHECKED_STABLE && state1 == UNCHECKED_STABLE));
- return UNCHECKED_STABLE;
- }
-
- HValue* object_; // The object being approximated. NULL => invalid entry.
- HInstruction* check_; // The last check instruction.
- MapSet maps_; // The set of known maps for the object.
- State state_; // The state of this entry.
-};
-
-
-// The main data structure used during check elimination, which stores a
-// set of known maps for each object.
-class HCheckTable : public ZoneObject {
- public:
- static const int kMaxTrackedObjects = 16;
-
- explicit HCheckTable(HCheckEliminationPhase* phase)
- : phase_(phase),
- cursor_(0),
- size_(0) {
- }
-
- // The main processing of instructions.
- HCheckTable* Process(HInstruction* instr, Zone* zone) {
- switch (instr->opcode()) {
- case HValue::kCheckMaps: {
- ReduceCheckMaps(HCheckMaps::cast(instr));
- break;
- }
- case HValue::kLoadNamedField: {
- ReduceLoadNamedField(HLoadNamedField::cast(instr));
- break;
- }
- case HValue::kStoreNamedField: {
- ReduceStoreNamedField(HStoreNamedField::cast(instr));
- break;
- }
- case HValue::kCompareMap: {
- ReduceCompareMap(HCompareMap::cast(instr));
- break;
- }
- case HValue::kCompareObjectEqAndBranch: {
- ReduceCompareObjectEqAndBranch(HCompareObjectEqAndBranch::cast(instr));
- break;
- }
- case HValue::kIsStringAndBranch: {
- ReduceIsStringAndBranch(HIsStringAndBranch::cast(instr));
- break;
- }
- case HValue::kTransitionElementsKind: {
- ReduceTransitionElementsKind(
- HTransitionElementsKind::cast(instr));
- break;
- }
- case HValue::kCheckHeapObject: {
- ReduceCheckHeapObject(HCheckHeapObject::cast(instr));
- break;
- }
- case HValue::kCheckInstanceType: {
- ReduceCheckInstanceType(HCheckInstanceType::cast(instr));
- break;
- }
- default: {
- // If the instruction changes maps uncontrollably, drop everything.
- if (instr->CheckChangesFlag(kOsrEntries)) {
- Kill();
- break;
- }
- if (instr->CheckChangesFlag(kElementsKind) ||
- instr->CheckChangesFlag(kMaps)) {
- KillUnstableEntries();
- }
- }
- // Improvements possible:
- // - eliminate redundant HCheckSmi instructions
- // - track which values have been HCheckHeapObject'd
- }
-
- return this;
- }
-
- // Support for global analysis with HFlowEngine: Merge given state with
- // the other incoming state.
- static HCheckTable* Merge(HCheckTable* succ_state, HBasicBlock* succ_block,
- HCheckTable* pred_state, HBasicBlock* pred_block,
- Zone* zone) {
- if (pred_state == NULL || pred_block->IsUnreachable()) {
- return succ_state;
- }
- if (succ_state == NULL) {
- return pred_state->Copy(succ_block, pred_block, zone);
- } else {
- return succ_state->Merge(succ_block, pred_state, pred_block, zone);
- }
- }
-
- // Support for global analysis with HFlowEngine: Given state merged with all
- // the other incoming states, prepare it for use.
- static HCheckTable* Finish(HCheckTable* state, HBasicBlock* block,
- Zone* zone) {
- if (state == NULL) {
- block->MarkUnreachable();
- } else if (block->IsUnreachable()) {
- state = NULL;
- }
- if (FLAG_trace_check_elimination) {
- PrintF("Processing B%d, checkmaps-table:\n", block->block_id());
- Print(state);
- }
- return state;
- }
-
- private:
- // Copy state to successor block.
- HCheckTable* Copy(HBasicBlock* succ, HBasicBlock* from_block, Zone* zone) {
- HCheckTable* copy = new(zone) HCheckTable(phase_);
- for (int i = 0; i < size_; i++) {
- HCheckTableEntry* old_entry = &entries_[i];
- DCHECK(old_entry->maps_->size() > 0);
- HCheckTableEntry* new_entry = &copy->entries_[i];
- new_entry->object_ = old_entry->object_;
- new_entry->maps_ = old_entry->maps_;
- new_entry->state_ = old_entry->state_;
- // Keep the check if the existing check's block dominates the successor.
- if (old_entry->check_ != NULL &&
- old_entry->check_->block()->Dominates(succ)) {
- new_entry->check_ = old_entry->check_;
- } else {
- // Leave it NULL till we meet a new check instruction for this object
- // in the control flow.
- new_entry->check_ = NULL;
- }
- }
- copy->cursor_ = cursor_;
- copy->size_ = size_;
-
- // Create entries for succ block's phis.
- if (!succ->IsLoopHeader() && succ->phis()->length() > 0) {
- int pred_index = succ->PredecessorIndexOf(from_block);
- for (int phi_index = 0;
- phi_index < succ->phis()->length();
- ++phi_index) {
- HPhi* phi = succ->phis()->at(phi_index);
- HValue* phi_operand = phi->OperandAt(pred_index);
-
- HCheckTableEntry* pred_entry = copy->Find(phi_operand);
- if (pred_entry != NULL) {
- // Create an entry for a phi in the table.
- copy->Insert(phi, NULL, pred_entry->maps_, pred_entry->state_);
- }
- }
- }
-
- // Branch-sensitive analysis for certain comparisons may add more facts
- // to the state for the successor on the true branch.
- bool learned = false;
- if (succ->predecessors()->length() == 1) {
- HControlInstruction* end = succ->predecessors()->at(0)->end();
- bool is_true_branch = end->SuccessorAt(0) == succ;
- if (end->IsCompareMap()) {
- HCompareMap* cmp = HCompareMap::cast(end);
- HValue* object = cmp->value()->ActualValue();
- HCheckTableEntry* entry = copy->Find(object);
- if (is_true_branch) {
- HCheckTableEntry::State state = cmp->map_is_stable()
- ? HCheckTableEntry::CHECKED_STABLE
- : HCheckTableEntry::CHECKED;
- // Learn on the true branch of if(CompareMap(x)).
- if (entry == NULL) {
- copy->Insert(object, cmp, cmp->map(), state);
- } else {
- entry->maps_ = new(zone) UniqueSet<Map>(cmp->map(), zone);
- entry->check_ = cmp;
- entry->state_ = state;
- }
- } else {
- // Learn on the false branch of if(CompareMap(x)).
- if (entry != NULL) {
- EnsureChecked(entry, object, cmp);
- UniqueSet<Map>* maps = entry->maps_->Copy(zone);
- maps->Remove(cmp->map());
- entry->maps_ = maps;
- DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, entry->state_);
- }
- }
- learned = true;
- } else if (is_true_branch && end->IsCompareObjectEqAndBranch()) {
- // Learn on the true branch of if(CmpObjectEq(x, y)).
- HCompareObjectEqAndBranch* cmp =
- HCompareObjectEqAndBranch::cast(end);
- HValue* left = cmp->left()->ActualValue();
- HValue* right = cmp->right()->ActualValue();
- HCheckTableEntry* le = copy->Find(left);
- HCheckTableEntry* re = copy->Find(right);
- if (le == NULL) {
- if (re != NULL) {
- copy->Insert(left, NULL, re->maps_, re->state_);
- }
- } else if (re == NULL) {
- copy->Insert(right, NULL, le->maps_, le->state_);
- } else {
- EnsureChecked(le, cmp->left(), cmp);
- EnsureChecked(re, cmp->right(), cmp);
- le->maps_ = re->maps_ = le->maps_->Intersect(re->maps_, zone);
- le->state_ = re->state_ = HCheckTableEntry::StateMerge(
- le->state_, re->state_);
- DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, le->state_);
- DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, re->state_);
- }
- learned = true;
- } else if (end->IsIsStringAndBranch()) {
- HIsStringAndBranch* cmp = HIsStringAndBranch::cast(end);
- HValue* object = cmp->value()->ActualValue();
- HCheckTableEntry* entry = copy->Find(object);
- if (is_true_branch) {
- // Learn on the true branch of if(IsString(x)).
- if (entry == NULL) {
- copy->Insert(object, NULL, string_maps(),
- HCheckTableEntry::CHECKED);
- } else {
- EnsureChecked(entry, object, cmp);
- entry->maps_ = entry->maps_->Intersect(string_maps(), zone);
- DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, entry->state_);
- }
- } else {
- // Learn on the false branch of if(IsString(x)).
- if (entry != NULL) {
- EnsureChecked(entry, object, cmp);
- entry->maps_ = entry->maps_->Subtract(string_maps(), zone);
- DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, entry->state_);
- }
- }
- }
- // Learning on false branches requires storing negative facts.
- }
-
- if (FLAG_trace_check_elimination) {
- PrintF("B%d checkmaps-table %s from B%d:\n",
- succ->block_id(),
- learned ? "learned" : "copied",
- from_block->block_id());
- Print(copy);
- }
-
- return copy;
- }
-
- // Merge this state with the other incoming state.
- HCheckTable* Merge(HBasicBlock* succ, HCheckTable* that,
- HBasicBlock* pred_block, Zone* zone) {
- if (that->size_ == 0) {
- // If the other state is empty, simply reset.
- size_ = 0;
- cursor_ = 0;
- } else {
- int pred_index = succ->PredecessorIndexOf(pred_block);
- bool compact = false;
- for (int i = 0; i < size_; i++) {
- HCheckTableEntry* this_entry = &entries_[i];
- HCheckTableEntry* that_entry;
- if (this_entry->object_->IsPhi() &&
- this_entry->object_->block() == succ) {
- HPhi* phi = HPhi::cast(this_entry->object_);
- HValue* phi_operand = phi->OperandAt(pred_index);
- that_entry = that->Find(phi_operand);
-
- } else {
- that_entry = that->Find(this_entry->object_);
- }
-
- if (that_entry == NULL ||
- (that_entry->state_ == HCheckTableEntry::CHECKED &&
- this_entry->state_ == HCheckTableEntry::UNCHECKED_STABLE) ||
- (this_entry->state_ == HCheckTableEntry::CHECKED &&
- that_entry->state_ == HCheckTableEntry::UNCHECKED_STABLE)) {
- this_entry->object_ = NULL;
- compact = true;
- } else {
- this_entry->maps_ =
- this_entry->maps_->Union(that_entry->maps_, zone);
- this_entry->state_ = HCheckTableEntry::StateMerge(
- this_entry->state_, that_entry->state_);
- if (this_entry->check_ != that_entry->check_) {
- this_entry->check_ = NULL;
- }
- DCHECK(this_entry->maps_->size() > 0);
- }
- }
- if (compact) Compact();
- }
-
- if (FLAG_trace_check_elimination) {
- PrintF("B%d checkmaps-table merged with B%d table:\n",
- succ->block_id(), pred_block->block_id());
- Print(this);
- }
- return this;
- }
-
- void ReduceCheckMaps(HCheckMaps* instr) {
- HValue* object = instr->value()->ActualValue();
- HCheckTableEntry* entry = Find(object);
- if (entry != NULL) {
- // entry found;
- HGraph* graph = instr->block()->graph();
- if (entry->maps_->IsSubset(instr->maps())) {
- // The first check is more strict; the second is redundant.
- if (entry->check_ != NULL) {
- DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, entry->state_);
- TRACE(("Replacing redundant CheckMaps #%d at B%d with #%d\n",
- instr->id(), instr->block()->block_id(), entry->check_->id()));
- instr->DeleteAndReplaceWith(entry->check_);
- INC_STAT(redundant_);
- } else if (entry->state_ == HCheckTableEntry::UNCHECKED_STABLE) {
- DCHECK_NULL(entry->check_);
- TRACE(("Marking redundant CheckMaps #%d at B%d as stability check\n",
- instr->id(), instr->block()->block_id()));
- instr->set_maps(entry->maps_->Copy(graph->zone()));
- instr->MarkAsStabilityCheck();
- entry->state_ = HCheckTableEntry::CHECKED_STABLE;
- } else if (!instr->IsStabilityCheck()) {
- TRACE(("Marking redundant CheckMaps #%d at B%d as dead\n",
- instr->id(), instr->block()->block_id()));
- // Mark check as dead but leave it in the graph as a checkpoint for
- // subsequent checks.
- instr->SetFlag(HValue::kIsDead);
- entry->check_ = instr;
- INC_STAT(removed_);
- }
- return;
- }
- MapSet intersection = instr->maps()->Intersect(
- entry->maps_, graph->zone());
- if (intersection->size() == 0) {
- // Intersection is empty; probably megamorphic.
- INC_STAT(empty_);
- entry->object_ = NULL;
- Compact();
- } else {
- // Update set of maps in the entry.
- entry->maps_ = intersection;
- // Update state of the entry.
- if (instr->maps_are_stable() ||
- entry->state_ == HCheckTableEntry::UNCHECKED_STABLE) {
- entry->state_ = HCheckTableEntry::CHECKED_STABLE;
- }
- if (intersection->size() != instr->maps()->size()) {
- // Narrow set of maps in the second check maps instruction.
- if (entry->check_ != NULL &&
- entry->check_->block() == instr->block() &&
- entry->check_->IsCheckMaps()) {
- // There is a check in the same block so replace it with a more
- // strict check and eliminate the second check entirely.
- HCheckMaps* check = HCheckMaps::cast(entry->check_);
- DCHECK(!check->IsStabilityCheck());
- TRACE(("CheckMaps #%d at B%d narrowed\n", check->id(),
- check->block()->block_id()));
- // Update map set and ensure that the check is alive.
- check->set_maps(intersection);
- check->ClearFlag(HValue::kIsDead);
- TRACE(("Replacing redundant CheckMaps #%d at B%d with #%d\n",
- instr->id(), instr->block()->block_id(), entry->check_->id()));
- instr->DeleteAndReplaceWith(entry->check_);
- } else {
- TRACE(("CheckMaps #%d at B%d narrowed\n", instr->id(),
- instr->block()->block_id()));
- instr->set_maps(intersection);
- entry->check_ = instr->IsStabilityCheck() ? NULL : instr;
- }
-
- if (FLAG_trace_check_elimination) {
- Print(this);
- }
- INC_STAT(narrowed_);
- }
- }
- } else {
- // No entry; insert a new one.
- HCheckTableEntry::State state = instr->maps_are_stable()
- ? HCheckTableEntry::CHECKED_STABLE
- : HCheckTableEntry::CHECKED;
- HCheckMaps* check = instr->IsStabilityCheck() ? NULL : instr;
- Insert(object, check, instr->maps(), state);
- }
- }
-
- void ReduceCheckInstanceType(HCheckInstanceType* instr) {
- HValue* value = instr->value()->ActualValue();
- HCheckTableEntry* entry = Find(value);
- if (entry == NULL) {
- if (instr->check() == HCheckInstanceType::IS_STRING) {
- Insert(value, NULL, string_maps(), HCheckTableEntry::CHECKED);
- }
- return;
- }
- UniqueSet<Map>* maps = new(zone()) UniqueSet<Map>(
- entry->maps_->size(), zone());
- for (int i = 0; i < entry->maps_->size(); ++i) {
- InstanceType type;
- Unique<Map> map = entry->maps_->at(i);
- {
- // This is safe, because maps don't move and their instance type does
- // not change.
- AllowHandleDereference allow_deref;
- type = map.handle()->instance_type();
- }
- if (instr->is_interval_check()) {
- InstanceType first_type, last_type;
- instr->GetCheckInterval(&first_type, &last_type);
- if (first_type <= type && type <= last_type) maps->Add(map, zone());
- } else {
- uint8_t mask, tag;
- instr->GetCheckMaskAndTag(&mask, &tag);
- if ((type & mask) == tag) maps->Add(map, zone());
- }
- }
- if (maps->size() == entry->maps_->size()) {
- TRACE(("Removing redundant CheckInstanceType #%d at B%d\n",
- instr->id(), instr->block()->block_id()));
- EnsureChecked(entry, value, instr);
- instr->DeleteAndReplaceWith(value);
- INC_STAT(removed_cit_);
- } else if (maps->size() != 0) {
- entry->maps_ = maps;
- if (entry->state_ == HCheckTableEntry::UNCHECKED_STABLE) {
- entry->state_ = HCheckTableEntry::CHECKED_STABLE;
- }
- }
- }
-
- void ReduceLoadNamedField(HLoadNamedField* instr) {
- // Reduce a load of the map field when it is known to be a constant.
- if (!instr->access().IsMap()) {
- // Check if we introduce field maps here.
- MapSet maps = instr->maps();
- if (maps != NULL) {
- DCHECK_NE(0, maps->size());
- Insert(instr, NULL, maps, HCheckTableEntry::UNCHECKED_STABLE);
- }
- return;
- }
-
- HValue* object = instr->object()->ActualValue();
- HCheckTableEntry* entry = Find(object);
- if (entry == NULL || entry->maps_->size() != 1) return; // Not a constant.
-
- EnsureChecked(entry, object, instr);
- Unique<Map> map = entry->maps_->at(0);
- bool map_is_stable = (entry->state_ != HCheckTableEntry::CHECKED);
- HConstant* constant = HConstant::CreateAndInsertBefore(
- instr->block()->graph()->zone(), map, map_is_stable, instr);
- instr->DeleteAndReplaceWith(constant);
- INC_STAT(loads_);
- }
-
- void ReduceCheckHeapObject(HCheckHeapObject* instr) {
- HValue* value = instr->value()->ActualValue();
- if (Find(value) != NULL) {
- // If the object has known maps, it's definitely a heap object.
- instr->DeleteAndReplaceWith(value);
- INC_STAT(removed_cho_);
- }
- }
-
- void ReduceStoreNamedField(HStoreNamedField* instr) {
- HValue* object = instr->object()->ActualValue();
- if (instr->has_transition()) {
- // This store transitions the object to a new map.
- Kill(object);
- HConstant* c_transition = HConstant::cast(instr->transition());
- HCheckTableEntry::State state = c_transition->HasStableMapValue()
- ? HCheckTableEntry::CHECKED_STABLE
- : HCheckTableEntry::CHECKED;
- Insert(object, NULL, c_transition->MapValue(), state);
- } else if (instr->access().IsMap()) {
- // This is a store directly to the map field of the object.
- Kill(object);
- if (!instr->value()->IsConstant()) return;
- HConstant* c_value = HConstant::cast(instr->value());
- HCheckTableEntry::State state = c_value->HasStableMapValue()
- ? HCheckTableEntry::CHECKED_STABLE
- : HCheckTableEntry::CHECKED;
- Insert(object, NULL, c_value->MapValue(), state);
- } else {
- // If the instruction changes maps, it should be handled above.
- CHECK(!instr->CheckChangesFlag(kMaps));
- }
- }
-
- void ReduceCompareMap(HCompareMap* instr) {
- HCheckTableEntry* entry = Find(instr->value()->ActualValue());
- if (entry == NULL) return;
-
- EnsureChecked(entry, instr->value(), instr);
-
- int succ;
- if (entry->maps_->Contains(instr->map())) {
- if (entry->maps_->size() != 1) {
- TRACE(("CompareMap #%d for #%d at B%d can't be eliminated: "
- "ambiguous set of maps\n", instr->id(), instr->value()->id(),
- instr->block()->block_id()));
- return;
- }
- succ = 0;
- INC_STAT(compares_true_);
- } else {
- succ = 1;
- INC_STAT(compares_false_);
- }
-
- TRACE(("Marking redundant CompareMap #%d for #%d at B%d as %s\n",
- instr->id(), instr->value()->id(), instr->block()->block_id(),
- succ == 0 ? "true" : "false"));
- instr->set_known_successor_index(succ);
-
- int unreachable_succ = 1 - succ;
- instr->block()->MarkSuccEdgeUnreachable(unreachable_succ);
- }
-
- void ReduceCompareObjectEqAndBranch(HCompareObjectEqAndBranch* instr) {
- HValue* left = instr->left()->ActualValue();
- HCheckTableEntry* le = Find(left);
- if (le == NULL) return;
- HValue* right = instr->right()->ActualValue();
- HCheckTableEntry* re = Find(right);
- if (re == NULL) return;
-
- EnsureChecked(le, left, instr);
- EnsureChecked(re, right, instr);
-
- // TODO(bmeurer): Add a predicate here instead of computing the intersection
- MapSet intersection = le->maps_->Intersect(re->maps_, zone());
- if (intersection->size() > 0) return;
-
- TRACE(("Marking redundant CompareObjectEqAndBranch #%d at B%d as false\n",
- instr->id(), instr->block()->block_id()));
- int succ = 1;
- instr->set_known_successor_index(succ);
-
- int unreachable_succ = 1 - succ;
- instr->block()->MarkSuccEdgeUnreachable(unreachable_succ);
- }
-
- void ReduceIsStringAndBranch(HIsStringAndBranch* instr) {
- HValue* value = instr->value()->ActualValue();
- HCheckTableEntry* entry = Find(value);
- if (entry == NULL) return;
- EnsureChecked(entry, value, instr);
- int succ;
- if (entry->maps_->IsSubset(string_maps())) {
- TRACE(("Marking redundant IsStringAndBranch #%d at B%d as true\n",
- instr->id(), instr->block()->block_id()));
- succ = 0;
- } else {
- MapSet intersection = entry->maps_->Intersect(string_maps(), zone());
- if (intersection->size() > 0) return;
- TRACE(("Marking redundant IsStringAndBranch #%d at B%d as false\n",
- instr->id(), instr->block()->block_id()));
- succ = 1;
- }
- instr->set_known_successor_index(succ);
- int unreachable_succ = 1 - succ;
- instr->block()->MarkSuccEdgeUnreachable(unreachable_succ);
- }
-
- void ReduceTransitionElementsKind(HTransitionElementsKind* instr) {
- HValue* object = instr->object()->ActualValue();
- HCheckTableEntry* entry = Find(object);
- // Can only learn more about an object that already has a known set of maps.
- if (entry == NULL) {
- Kill(object);
- return;
- }
- EnsureChecked(entry, object, instr);
- if (entry->maps_->Contains(instr->original_map())) {
- // If the object has the original map, it will be transitioned.
- UniqueSet<Map>* maps = entry->maps_->Copy(zone());
- maps->Remove(instr->original_map());
- maps->Add(instr->transitioned_map(), zone());
- HCheckTableEntry::State state =
- (entry->state_ == HCheckTableEntry::CHECKED_STABLE &&
- instr->map_is_stable())
- ? HCheckTableEntry::CHECKED_STABLE
- : HCheckTableEntry::CHECKED;
- Kill(object);
- Insert(object, NULL, maps, state);
- } else {
- // Object does not have the given map, thus the transition is redundant.
- instr->DeleteAndReplaceWith(object);
- INC_STAT(transitions_);
- }
- }
-
- void EnsureChecked(HCheckTableEntry* entry,
- HValue* value,
- HInstruction* instr) {
- if (entry->state_ != HCheckTableEntry::UNCHECKED_STABLE) return;
- HGraph* graph = instr->block()->graph();
- HCheckMaps* check = HCheckMaps::CreateAndInsertBefore(
- graph->zone(), value, entry->maps_->Copy(graph->zone()), true, instr);
- check->MarkAsStabilityCheck();
- entry->state_ = HCheckTableEntry::CHECKED_STABLE;
- entry->check_ = NULL;
- }
-
- // Kill everything in the table.
- void Kill() {
- size_ = 0;
- cursor_ = 0;
- }
-
- // Kill all unstable entries in the table.
- void KillUnstableEntries() {
- bool compact = false;
- for (int i = 0; i < size_; ++i) {
- HCheckTableEntry* entry = &entries_[i];
- DCHECK_NOT_NULL(entry->object_);
- if (entry->state_ == HCheckTableEntry::CHECKED) {
- entry->object_ = NULL;
- compact = true;
- } else {
- // All checked stable entries become unchecked stable.
- entry->state_ = HCheckTableEntry::UNCHECKED_STABLE;
- entry->check_ = NULL;
- }
- }
- if (compact) Compact();
- }
-
- // Kill everything in the table that may alias {object}.
- void Kill(HValue* object) {
- bool compact = false;
- for (int i = 0; i < size_; i++) {
- HCheckTableEntry* entry = &entries_[i];
- DCHECK_NOT_NULL(entry->object_);
- if (phase_->aliasing_->MayAlias(entry->object_, object)) {
- entry->object_ = NULL;
- compact = true;
- }
- }
- if (compact) Compact();
- DCHECK_NULL(Find(object));
- }
-
- void Compact() {
- // First, compact the array in place.
- int max = size_, dest = 0, old_cursor = cursor_;
- for (int i = 0; i < max; i++) {
- if (entries_[i].object_ != NULL) {
- if (dest != i) entries_[dest] = entries_[i];
- dest++;
- } else {
- if (i < old_cursor) cursor_--;
- size_--;
- }
- }
- DCHECK(size_ == dest);
- DCHECK(cursor_ <= size_);
-
- // Preserve the age of the entries by moving the older entries to the end.
- if (cursor_ == size_) return; // Cursor already points at end.
- if (cursor_ != 0) {
- // | L = oldest | R = newest | |
- // ^ cursor ^ size ^ MAX
- HCheckTableEntry tmp_entries[kMaxTrackedObjects];
- int L = cursor_;
- int R = size_ - cursor_;
-
- MemMove(&tmp_entries[0], &entries_[0], L * sizeof(HCheckTableEntry));
- MemMove(&entries_[0], &entries_[L], R * sizeof(HCheckTableEntry));
- MemMove(&entries_[R], &tmp_entries[0], L * sizeof(HCheckTableEntry));
- }
-
- cursor_ = size_; // Move cursor to end.
- }
-
- static void Print(HCheckTable* table) {
- if (table == NULL) {
- PrintF(" unreachable\n");
- return;
- }
-
- for (int i = 0; i < table->size_; i++) {
- HCheckTableEntry* entry = &table->entries_[i];
- DCHECK(entry->object_ != NULL);
- PrintF(" checkmaps-table @%d: %s #%d ", i,
- entry->object_->IsPhi() ? "phi" : "object", entry->object_->id());
- if (entry->check_ != NULL) {
- PrintF("check #%d ", entry->check_->id());
- }
- MapSet list = entry->maps_;
- PrintF("%d %s maps { ", list->size(),
- HCheckTableEntry::State2String(entry->state_));
- for (int j = 0; j < list->size(); j++) {
- if (j > 0) PrintF(", ");
- PrintF("%" V8PRIxPTR, list->at(j).Hashcode());
- }
- PrintF(" }\n");
- }
- }
-
- HCheckTableEntry* Find(HValue* object) {
- for (int i = size_ - 1; i >= 0; i--) {
- // Search from most-recently-inserted to least-recently-inserted.
- HCheckTableEntry* entry = &entries_[i];
- DCHECK(entry->object_ != NULL);
- if (phase_->aliasing_->MustAlias(entry->object_, object)) return entry;
- }
- return NULL;
- }
-
- void Insert(HValue* object,
- HInstruction* check,
- Unique<Map> map,
- HCheckTableEntry::State state) {
- Insert(object, check, new(zone()) UniqueSet<Map>(map, zone()), state);
- }
-
- void Insert(HValue* object,
- HInstruction* check,
- MapSet maps,
- HCheckTableEntry::State state) {
- DCHECK(state != HCheckTableEntry::UNCHECKED_STABLE || check == NULL);
- HCheckTableEntry* entry = &entries_[cursor_++];
- entry->object_ = object;
- entry->check_ = check;
- entry->maps_ = maps;
- entry->state_ = state;
- // If the table becomes full, wrap around and overwrite older entries.
- if (cursor_ == kMaxTrackedObjects) cursor_ = 0;
- if (size_ < kMaxTrackedObjects) size_++;
- }
-
- Zone* zone() const { return phase_->zone(); }
- MapSet string_maps() const { return phase_->string_maps(); }
-
- friend class HCheckMapsEffects;
- friend class HCheckEliminationPhase;
-
- HCheckEliminationPhase* phase_;
- HCheckTableEntry entries_[kMaxTrackedObjects];
- int16_t cursor_; // Must be <= kMaxTrackedObjects
- int16_t size_; // Must be <= kMaxTrackedObjects
- STATIC_ASSERT(kMaxTrackedObjects < (1 << 15));
-};
-
-
-// Collects instructions that can cause effects that invalidate information
-// needed for check elimination.
-class HCheckMapsEffects : public ZoneObject {
- public:
- explicit HCheckMapsEffects(Zone* zone) : objects_(0, zone) { }
-
- // Effects are _not_ disabled.
- inline bool Disabled() const { return false; }
-
- // Process a possibly side-effecting instruction.
- void Process(HInstruction* instr, Zone* zone) {
- switch (instr->opcode()) {
- case HValue::kStoreNamedField: {
- HStoreNamedField* store = HStoreNamedField::cast(instr);
- if (store->access().IsMap() || store->has_transition()) {
- objects_.Add(store->object(), zone);
- }
- break;
- }
- case HValue::kTransitionElementsKind: {
- objects_.Add(HTransitionElementsKind::cast(instr)->object(), zone);
- break;
- }
- default: {
- flags_.Add(instr->ChangesFlags());
- break;
- }
- }
- }
-
- // Apply these effects to the given check elimination table.
- void Apply(HCheckTable* table) {
- if (flags_.Contains(kOsrEntries)) {
- // Uncontrollable map modifications; kill everything.
- table->Kill();
- return;
- }
-
- // Kill all unstable entries.
- if (flags_.Contains(kElementsKind) || flags_.Contains(kMaps)) {
- table->KillUnstableEntries();
- }
-
- // Kill maps for each object contained in these effects.
- for (int i = 0; i < objects_.length(); ++i) {
- table->Kill(objects_[i]->ActualValue());
- }
- }
-
- // Union these effects with the other effects.
- void Union(HCheckMapsEffects* that, Zone* zone) {
- flags_.Add(that->flags_);
- for (int i = 0; i < that->objects_.length(); ++i) {
- objects_.Add(that->objects_[i], zone);
- }
- }
-
- private:
- ZoneList<HValue*> objects_;
- GVNFlagSet flags_;
-};
-
-
-// The main routine of the analysis phase. Use the HFlowEngine for either a
-// local or a global analysis.
-void HCheckEliminationPhase::Run() {
- HFlowEngine<HCheckTable, HCheckMapsEffects> engine(graph(), zone());
- HCheckTable* table = new(zone()) HCheckTable(this);
-
- if (GLOBAL) {
- // Perform a global analysis.
- engine.AnalyzeDominatedBlocks(graph()->blocks()->at(0), table);
- } else {
- // Perform only local analysis.
- for (int i = 0; i < graph()->blocks()->length(); i++) {
- table->Kill();
- engine.AnalyzeOneBlock(graph()->blocks()->at(i), table);
- }
- }
-
- if (FLAG_trace_check_elimination) PrintStats();
-}
-
-
-// Are we eliminated yet?
-void HCheckEliminationPhase::PrintStats() {
-#if DEBUG
- #define PRINT_STAT(x) if (x##_ > 0) PrintF(" %-16s = %2d\n", #x, x##_)
-#else
- #define PRINT_STAT(x)
-#endif
- PRINT_STAT(redundant);
- PRINT_STAT(removed);
- PRINT_STAT(removed_cho);
- PRINT_STAT(removed_cit);
- PRINT_STAT(narrowed);
- PRINT_STAT(loads);
- PRINT_STAT(empty);
- PRINT_STAT(compares_true);
- PRINT_STAT(compares_false);
- PRINT_STAT(transitions);
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/hydrogen-check-elimination.h b/deps/v8/src/crankshaft/hydrogen-check-elimination.h
deleted file mode 100644
index d6339df34c..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-check-elimination.h
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_HYDROGEN_CHECK_ELIMINATION_H_
-#define V8_CRANKSHAFT_HYDROGEN_CHECK_ELIMINATION_H_
-
-#include "src/crankshaft/hydrogen.h"
-#include "src/crankshaft/hydrogen-alias-analysis.h"
-
-namespace v8 {
-namespace internal {
-
-
-// Remove CheckMaps instructions through flow- and branch-sensitive analysis.
-class HCheckEliminationPhase : public HPhase {
- public:
- explicit HCheckEliminationPhase(HGraph* graph)
- : HPhase("H_Check Elimination", graph), aliasing_(),
- string_maps_(kStringMapsSize, zone()) {
- // Compute the set of string maps.
- #define ADD_STRING_MAP(type, size, name, Name) \
- string_maps_.Add(Unique<Map>::CreateImmovable( \
- graph->isolate()->factory()->name##_map()), zone());
- STRING_TYPE_LIST(ADD_STRING_MAP)
- #undef ADD_STRING_MAP
- DCHECK_EQ(kStringMapsSize, string_maps_.size());
-#ifdef DEBUG
- redundant_ = 0;
- removed_ = 0;
- removed_cho_ = 0;
- removed_cit_ = 0;
- narrowed_ = 0;
- loads_ = 0;
- empty_ = 0;
- compares_true_ = 0;
- compares_false_ = 0;
- transitions_ = 0;
-#endif
- }
-
- void Run();
-
- friend class HCheckTable;
-
- private:
- const UniqueSet<Map>* string_maps() const { return &string_maps_; }
-
- void PrintStats();
-
- HAliasAnalyzer* aliasing_;
- #define COUNT(type, size, name, Name) + 1
- static const int kStringMapsSize = 0 STRING_TYPE_LIST(COUNT);
- #undef COUNT
- UniqueSet<Map> string_maps_;
-#ifdef DEBUG
- int redundant_;
- int removed_;
- int removed_cho_;
- int removed_cit_;
- int narrowed_;
- int loads_;
- int empty_;
- int compares_true_;
- int compares_false_;
- int transitions_;
-#endif
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_HYDROGEN_CHECK_ELIMINATION_H_
diff --git a/deps/v8/src/crankshaft/hydrogen-dce.cc b/deps/v8/src/crankshaft/hydrogen-dce.cc
deleted file mode 100644
index 60b41cda76..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-dce.cc
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/hydrogen-dce.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-void HDeadCodeEliminationPhase::MarkLive(
- HValue* instr, ZoneList<HValue*>* worklist) {
- if (instr->CheckFlag(HValue::kIsLive)) return; // Already live.
-
- if (FLAG_trace_dead_code_elimination) PrintLive(NULL, instr);
-
- // Transitively mark all inputs of live instructions live.
- worklist->Add(instr, zone());
- while (!worklist->is_empty()) {
- HValue* instr = worklist->RemoveLast();
- instr->SetFlag(HValue::kIsLive);
- for (int i = 0; i < instr->OperandCount(); ++i) {
- HValue* input = instr->OperandAt(i);
- if (!input->CheckFlag(HValue::kIsLive)) {
- input->SetFlag(HValue::kIsLive);
- worklist->Add(input, zone());
- if (FLAG_trace_dead_code_elimination) PrintLive(instr, input);
- }
- }
- }
-}
-
-
-void HDeadCodeEliminationPhase::PrintLive(HValue* ref, HValue* instr) {
- AllowHandleDereference allow_deref;
- OFStream os(stdout);
- os << "[MarkLive ";
- if (ref != NULL) {
- os << *ref;
- } else {
- os << "root";
- }
- os << " -> " << *instr << "]" << std::endl;
-}
-
-
-void HDeadCodeEliminationPhase::MarkLiveInstructions() {
- ZoneList<HValue*> worklist(10, zone());
-
- // Transitively mark all live instructions, starting from roots.
- for (int i = 0; i < graph()->blocks()->length(); ++i) {
- HBasicBlock* block = graph()->blocks()->at(i);
- for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- HInstruction* instr = it.Current();
- if (instr->CannotBeEliminated()) MarkLive(instr, &worklist);
- }
- for (int j = 0; j < block->phis()->length(); j++) {
- HPhi* phi = block->phis()->at(j);
- if (phi->CannotBeEliminated()) MarkLive(phi, &worklist);
- }
- }
-
- DCHECK(worklist.is_empty()); // Should have processed everything.
-}
-
-
-void HDeadCodeEliminationPhase::RemoveDeadInstructions() {
- ZoneList<HPhi*> worklist(graph()->blocks()->length(), zone());
-
- // Remove any instruction not marked kIsLive.
- for (int i = 0; i < graph()->blocks()->length(); ++i) {
- HBasicBlock* block = graph()->blocks()->at(i);
- for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- HInstruction* instr = it.Current();
- if (!instr->CheckFlag(HValue::kIsLive)) {
- // Instruction has not been marked live, so remove it.
- instr->DeleteAndReplaceWith(NULL);
- } else {
- // Clear the liveness flag to leave the graph clean for the next DCE.
- instr->ClearFlag(HValue::kIsLive);
- }
- }
- // Collect phis that are dead and remove them in the next pass.
- for (int j = 0; j < block->phis()->length(); j++) {
- HPhi* phi = block->phis()->at(j);
- if (!phi->CheckFlag(HValue::kIsLive)) {
- worklist.Add(phi, zone());
- } else {
- phi->ClearFlag(HValue::kIsLive);
- }
- }
- }
-
- // Process phis separately to avoid simultaneously mutating the phi list.
- while (!worklist.is_empty()) {
- HPhi* phi = worklist.RemoveLast();
- HBasicBlock* block = phi->block();
- phi->DeleteAndReplaceWith(NULL);
- if (phi->HasMergedIndex()) {
- block->RecordDeletedPhi(phi->merged_index());
- }
- }
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/hydrogen-dce.h b/deps/v8/src/crankshaft/hydrogen-dce.h
deleted file mode 100644
index f620a3cfa8..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-dce.h
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_HYDROGEN_DCE_H_
-#define V8_CRANKSHAFT_HYDROGEN_DCE_H_
-
-#include "src/crankshaft/hydrogen.h"
-
-namespace v8 {
-namespace internal {
-
-
-class HDeadCodeEliminationPhase : public HPhase {
- public:
- explicit HDeadCodeEliminationPhase(HGraph* graph)
- : HPhase("H_Dead code elimination", graph) { }
-
- void Run() {
- MarkLiveInstructions();
- RemoveDeadInstructions();
- }
-
- private:
- void MarkLive(HValue* instr, ZoneList<HValue*>* worklist);
- void PrintLive(HValue* ref, HValue* instr);
- void MarkLiveInstructions();
- void RemoveDeadInstructions();
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_HYDROGEN_DCE_H_
diff --git a/deps/v8/src/crankshaft/hydrogen-dehoist.cc b/deps/v8/src/crankshaft/hydrogen-dehoist.cc
deleted file mode 100644
index 0fccecc4d3..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-dehoist.cc
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/hydrogen-dehoist.h"
-
-#include "src/base/safe_math.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-static void DehoistArrayIndex(ArrayInstructionInterface* array_operation) {
- HValue* index = array_operation->GetKey()->ActualValue();
- if (!index->representation().IsSmiOrInteger32()) return;
- if (!index->IsAdd() && !index->IsSub()) return;
-
- HConstant* constant;
- HValue* subexpression;
- HBinaryOperation* binary_operation = HBinaryOperation::cast(index);
- if (binary_operation->left()->IsConstant() && index->IsAdd()) {
- subexpression = binary_operation->right();
- constant = HConstant::cast(binary_operation->left());
- } else if (binary_operation->right()->IsConstant()) {
- subexpression = binary_operation->left();
- constant = HConstant::cast(binary_operation->right());
- } else {
- return;
- }
-
- if (!constant->HasInteger32Value()) return;
- v8::base::internal::CheckedNumeric<int32_t> checked_value =
- constant->Integer32Value();
- int32_t sign = binary_operation->IsSub() ? -1 : 1;
- checked_value = checked_value * sign;
-
- // Multiply value by elements size, bailing out on overflow.
- int32_t elements_kind_size =
- 1 << ElementsKindToShiftSize(array_operation->elements_kind());
- checked_value = checked_value * elements_kind_size;
- if (!checked_value.IsValid()) return;
- int32_t value = checked_value.ValueOrDie();
- if (value < 0) return;
-
- // Ensure that the array operation can add value to existing base offset
- // without overflowing.
- if (!array_operation->TryIncreaseBaseOffset(value)) return;
-
- array_operation->SetKey(subexpression);
- if (binary_operation->HasNoUses()) {
- binary_operation->DeleteAndReplaceWith(NULL);
- }
-
- array_operation->SetDehoisted(true);
-}
-
-
-void HDehoistIndexComputationsPhase::Run() {
- const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
- for (int i = 0; i < blocks->length(); ++i) {
- for (HInstructionIterator it(blocks->at(i)); !it.Done(); it.Advance()) {
- HInstruction* instr = it.Current();
- if (instr->IsLoadKeyed()) {
- DehoistArrayIndex(HLoadKeyed::cast(instr));
- } else if (instr->IsStoreKeyed()) {
- DehoistArrayIndex(HStoreKeyed::cast(instr));
- }
- }
- }
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/hydrogen-dehoist.h b/deps/v8/src/crankshaft/hydrogen-dehoist.h
deleted file mode 100644
index d68f62cf7b..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-dehoist.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_HYDROGEN_DEHOIST_H_
-#define V8_CRANKSHAFT_HYDROGEN_DEHOIST_H_
-
-#include "src/crankshaft/hydrogen.h"
-
-namespace v8 {
-namespace internal {
-
-
-class HDehoistIndexComputationsPhase : public HPhase {
- public:
- explicit HDehoistIndexComputationsPhase(HGraph* graph)
- : HPhase("H_Dehoist index computations", graph) { }
-
- void Run();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(HDehoistIndexComputationsPhase);
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_HYDROGEN_DEHOIST_H_
diff --git a/deps/v8/src/crankshaft/hydrogen-environment-liveness.cc b/deps/v8/src/crankshaft/hydrogen-environment-liveness.cc
deleted file mode 100644
index 89b2b7aede..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-environment-liveness.cc
+++ /dev/null
@@ -1,232 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-
-#include "src/crankshaft/hydrogen-environment-liveness.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-HEnvironmentLivenessAnalysisPhase::HEnvironmentLivenessAnalysisPhase(
- HGraph* graph)
- : HPhase("H_Environment liveness analysis", graph),
- block_count_(graph->blocks()->length()),
- maximum_environment_size_(graph->maximum_environment_size()),
- live_at_block_start_(block_count_, zone()),
- first_simulate_(block_count_, zone()),
- first_simulate_invalid_for_index_(block_count_, zone()),
- markers_(maximum_environment_size_, zone()),
- collect_markers_(true),
- last_simulate_(NULL),
- went_live_since_last_simulate_(maximum_environment_size_, zone()) {
- DCHECK(maximum_environment_size_ > 0);
- for (int i = 0; i < block_count_; ++i) {
- live_at_block_start_.Add(
- new(zone()) BitVector(maximum_environment_size_, zone()), zone());
- first_simulate_.Add(NULL, zone());
- first_simulate_invalid_for_index_.Add(
- new(zone()) BitVector(maximum_environment_size_, zone()), zone());
- }
-}
-
-
-void HEnvironmentLivenessAnalysisPhase::ZapEnvironmentSlot(
- int index, HSimulate* simulate) {
- int operand_index = simulate->ToOperandIndex(index);
- if (operand_index == -1) {
- simulate->AddAssignedValue(index, graph()->GetConstantOptimizedOut());
- } else {
- simulate->SetOperandAt(operand_index, graph()->GetConstantOptimizedOut());
- }
-}
-
-
-void HEnvironmentLivenessAnalysisPhase::ZapEnvironmentSlotsInSuccessors(
- HBasicBlock* block, BitVector* live) {
- // When a value is live in successor A but dead in B, we must
- // explicitly zap it in B.
- for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
- HBasicBlock* successor = it.Current();
- int successor_id = successor->block_id();
- BitVector* live_in_successor = live_at_block_start_[successor_id];
- if (live_in_successor->Equals(*live)) continue;
- for (int i = 0; i < live->length(); ++i) {
- if (!live->Contains(i)) continue;
- if (live_in_successor->Contains(i)) continue;
- if (first_simulate_invalid_for_index_.at(successor_id)->Contains(i)) {
- continue;
- }
- HSimulate* simulate = first_simulate_.at(successor_id);
- if (simulate == NULL) continue;
- DCHECK(VerifyClosures(simulate->closure(),
- block->last_environment()->closure()));
- ZapEnvironmentSlot(i, simulate);
- }
- }
-}
-
-
-void HEnvironmentLivenessAnalysisPhase::ZapEnvironmentSlotsForInstruction(
- HEnvironmentMarker* marker) {
- if (!marker->CheckFlag(HValue::kEndsLiveRange)) return;
- HSimulate* simulate = marker->next_simulate();
- if (simulate != NULL) {
- DCHECK(VerifyClosures(simulate->closure(), marker->closure()));
- ZapEnvironmentSlot(marker->index(), simulate);
- }
-}
-
-
-void HEnvironmentLivenessAnalysisPhase::UpdateLivenessAtBlockEnd(
- HBasicBlock* block,
- BitVector* live) {
- // Liveness at the end of each block: union of liveness in successors.
- live->Clear();
- for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
- live->Union(*live_at_block_start_[it.Current()->block_id()]);
- }
-}
-
-
-void HEnvironmentLivenessAnalysisPhase::UpdateLivenessAtInstruction(
- HInstruction* instr,
- BitVector* live) {
- switch (instr->opcode()) {
- case HValue::kEnvironmentMarker: {
- HEnvironmentMarker* marker = HEnvironmentMarker::cast(instr);
- int index = marker->index();
- if (!live->Contains(index)) {
- marker->SetFlag(HValue::kEndsLiveRange);
- } else {
- marker->ClearFlag(HValue::kEndsLiveRange);
- }
- if (!went_live_since_last_simulate_.Contains(index)) {
- marker->set_next_simulate(last_simulate_);
- }
- if (marker->kind() == HEnvironmentMarker::LOOKUP) {
- live->Add(index);
- } else {
- DCHECK(marker->kind() == HEnvironmentMarker::BIND);
- live->Remove(index);
- went_live_since_last_simulate_.Add(index);
- }
- if (collect_markers_) {
- // Populate |markers_| list during the first pass.
- markers_.Add(marker, zone());
- }
- break;
- }
- case HValue::kLeaveInlined:
- // No environment values are live at the end of an inlined section.
- live->Clear();
- last_simulate_ = NULL;
-
- // The following DCHECKs guard the assumption used in case
- // kEnterInlined below:
- DCHECK(instr->next()->IsSimulate());
- DCHECK(instr->next()->next()->IsGoto());
-
- break;
- case HValue::kEnterInlined: {
- // Those environment values are live that are live at any return
- // target block. Here we make use of the fact that the end of an
- // inline sequence always looks like this: HLeaveInlined, HSimulate,
- // HGoto (to return_target block), with no environment lookups in
- // between (see DCHECKs above).
- HEnterInlined* enter = HEnterInlined::cast(instr);
- live->Clear();
- for (int i = 0; i < enter->return_targets()->length(); ++i) {
- int return_id = enter->return_targets()->at(i)->block_id();
- live->Union(*live_at_block_start_[return_id]);
- }
- last_simulate_ = NULL;
- break;
- }
- case HValue::kSimulate:
- last_simulate_ = HSimulate::cast(instr);
- went_live_since_last_simulate_.Clear();
- break;
- default:
- break;
- }
-}
-
-
-void HEnvironmentLivenessAnalysisPhase::Run() {
- DCHECK(maximum_environment_size_ > 0);
-
- // Main iteration. Compute liveness of environment slots, and store it
- // for each block until it doesn't change any more. For efficiency, visit
- // blocks in reverse order and walk backwards through each block. We
- // need several iterations to propagate liveness through nested loops.
- BitVector live(maximum_environment_size_, zone());
- BitVector worklist(block_count_, zone());
- for (int i = 0; i < block_count_; ++i) {
- worklist.Add(i);
- }
- while (!worklist.IsEmpty()) {
- for (int block_id = block_count_ - 1; block_id >= 0; --block_id) {
- if (!worklist.Contains(block_id)) {
- continue;
- }
- worklist.Remove(block_id);
- last_simulate_ = NULL;
-
- HBasicBlock* block = graph()->blocks()->at(block_id);
- UpdateLivenessAtBlockEnd(block, &live);
-
- for (HInstruction* instr = block->end(); instr != NULL;
- instr = instr->previous()) {
- UpdateLivenessAtInstruction(instr, &live);
- }
-
- // Reached the start of the block, do necessary bookkeeping:
- // store computed information for this block and add predecessors
- // to the work list as necessary.
- first_simulate_.Set(block_id, last_simulate_);
- first_simulate_invalid_for_index_[block_id]->CopyFrom(
- went_live_since_last_simulate_);
- if (live_at_block_start_[block_id]->UnionIsChanged(live)) {
- for (int i = 0; i < block->predecessors()->length(); ++i) {
- worklist.Add(block->predecessors()->at(i)->block_id());
- }
- if (block->IsInlineReturnTarget()) {
- worklist.Add(block->inlined_entry_block()->block_id());
- }
- }
- }
- // Only collect bind/lookup instructions during the first pass.
- collect_markers_ = false;
- }
-
- // Analysis finished. Zap dead environment slots.
- for (int i = 0; i < markers_.length(); ++i) {
- ZapEnvironmentSlotsForInstruction(markers_[i]);
- }
- for (int block_id = block_count_ - 1; block_id >= 0; --block_id) {
- HBasicBlock* block = graph()->blocks()->at(block_id);
- UpdateLivenessAtBlockEnd(block, &live);
- ZapEnvironmentSlotsInSuccessors(block, &live);
- }
-
- // Finally, remove the HEnvironment{Bind,Lookup} markers.
- for (int i = 0; i < markers_.length(); ++i) {
- markers_[i]->DeleteAndReplaceWith(NULL);
- }
-}
-
-
-#ifdef DEBUG
-bool HEnvironmentLivenessAnalysisPhase::VerifyClosures(
- Handle<JSFunction> a, Handle<JSFunction> b) {
- base::LockGuard<base::Mutex> guard(isolate()->heap()->relocation_mutex());
- AllowHandleDereference for_verification;
- return a.is_identical_to(b);
-}
-#endif
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/hydrogen-environment-liveness.h b/deps/v8/src/crankshaft/hydrogen-environment-liveness.h
deleted file mode 100644
index d9e156b7e9..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-environment-liveness.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_HYDROGEN_ENVIRONMENT_LIVENESS_H_
-#define V8_CRANKSHAFT_HYDROGEN_ENVIRONMENT_LIVENESS_H_
-
-#include "src/crankshaft/hydrogen.h"
-
-namespace v8 {
-namespace internal {
-
-
-// Trims live ranges of environment slots by doing explicit liveness analysis.
-// Values in the environment are kept alive by every subsequent LInstruction
-// that is assigned an LEnvironment, which creates register pressure and
-// unnecessary spill slot moves. Therefore it is beneficial to trim the
-// live ranges of environment slots by zapping them with a constant after
-// the last lookup that refers to them.
-// Slots are identified by their index and only affected if whitelisted in
-// HOptimizedGraphBuilder::IsEligibleForEnvironmentLivenessAnalysis().
-class HEnvironmentLivenessAnalysisPhase : public HPhase {
- public:
- explicit HEnvironmentLivenessAnalysisPhase(HGraph* graph);
-
- void Run();
-
- private:
- void ZapEnvironmentSlot(int index, HSimulate* simulate);
- void ZapEnvironmentSlotsInSuccessors(HBasicBlock* block, BitVector* live);
- void ZapEnvironmentSlotsForInstruction(HEnvironmentMarker* marker);
- void UpdateLivenessAtBlockEnd(HBasicBlock* block, BitVector* live);
- void UpdateLivenessAtInstruction(HInstruction* instr, BitVector* live);
-#ifdef DEBUG
- bool VerifyClosures(Handle<JSFunction> a, Handle<JSFunction> b);
-#endif
-
- int block_count_;
-
- // Largest number of local variables in any environment in the graph
- // (including inlined environments).
- int maximum_environment_size_;
-
- // Per-block data. All these lists are indexed by block_id.
- ZoneList<BitVector*> live_at_block_start_;
- ZoneList<HSimulate*> first_simulate_;
- ZoneList<BitVector*> first_simulate_invalid_for_index_;
-
- // List of all HEnvironmentMarker instructions for quick iteration/deletion.
- // It is populated during the first pass over the graph, controlled by
- // |collect_markers_|.
- ZoneList<HEnvironmentMarker*> markers_;
- bool collect_markers_;
-
- // Keeps track of the last simulate seen, as well as the environment slots
- // for which a new live range has started since (so they must not be zapped
- // in that simulate when the end of another live range of theirs is found).
- HSimulate* last_simulate_;
- BitVector went_live_since_last_simulate_;
-
- DISALLOW_COPY_AND_ASSIGN(HEnvironmentLivenessAnalysisPhase);
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_HYDROGEN_ENVIRONMENT_LIVENESS_H_
diff --git a/deps/v8/src/crankshaft/hydrogen-escape-analysis.cc b/deps/v8/src/crankshaft/hydrogen-escape-analysis.cc
deleted file mode 100644
index 91b4ff2b67..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-escape-analysis.cc
+++ /dev/null
@@ -1,330 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/hydrogen-escape-analysis.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-bool HEscapeAnalysisPhase::HasNoEscapingUses(HValue* value, int size) {
- for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
- HValue* use = it.value();
- if (use->HasEscapingOperandAt(it.index())) {
- if (FLAG_trace_escape_analysis) {
- PrintF("#%d (%s) escapes through #%d (%s) @%d\n", value->id(),
- value->Mnemonic(), use->id(), use->Mnemonic(), it.index());
- }
- return false;
- }
- if (use->HasOutOfBoundsAccess(size)) {
- if (FLAG_trace_escape_analysis) {
- PrintF("#%d (%s) out of bounds at #%d (%s) @%d\n", value->id(),
- value->Mnemonic(), use->id(), use->Mnemonic(), it.index());
- }
- return false;
- }
- int redefined_index = use->RedefinedOperandIndex();
- if (redefined_index == it.index() && !HasNoEscapingUses(use, size)) {
- if (FLAG_trace_escape_analysis) {
- PrintF("#%d (%s) escapes redefinition #%d (%s) @%d\n", value->id(),
- value->Mnemonic(), use->id(), use->Mnemonic(), it.index());
- }
- return false;
- }
- }
- return true;
-}
-
-
-void HEscapeAnalysisPhase::CollectCapturedValues() {
- int block_count = graph()->blocks()->length();
- for (int i = 0; i < block_count; ++i) {
- HBasicBlock* block = graph()->blocks()->at(i);
- for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- HInstruction* instr = it.Current();
- if (!instr->IsAllocate()) continue;
- HAllocate* allocate = HAllocate::cast(instr);
- if (!allocate->size()->IsInteger32Constant()) continue;
- int size_in_bytes = allocate->size()->GetInteger32Constant();
- if (HasNoEscapingUses(instr, size_in_bytes)) {
- if (FLAG_trace_escape_analysis) {
- PrintF("#%d (%s) is being captured\n", instr->id(),
- instr->Mnemonic());
- }
- captured_.Add(instr, zone());
- }
- }
- }
-}
-
-
-HCapturedObject* HEscapeAnalysisPhase::NewState(HInstruction* previous) {
- Zone* zone = graph()->zone();
- HCapturedObject* state =
- new(zone) HCapturedObject(number_of_values_, number_of_objects_, zone);
- state->InsertAfter(previous);
- return state;
-}
-
-
-// Create a new state for replacing HAllocate instructions.
-HCapturedObject* HEscapeAnalysisPhase::NewStateForAllocation(
- HInstruction* previous) {
- HConstant* undefined = graph()->GetConstantUndefined();
- HCapturedObject* state = NewState(previous);
- for (int index = 0; index < number_of_values_; index++) {
- state->SetOperandAt(index, undefined);
- }
- return state;
-}
-
-
-// Create a new state full of phis for loop header entries.
-HCapturedObject* HEscapeAnalysisPhase::NewStateForLoopHeader(
- HInstruction* previous,
- HCapturedObject* old_state) {
- HBasicBlock* block = previous->block();
- HCapturedObject* state = NewState(previous);
- for (int index = 0; index < number_of_values_; index++) {
- HValue* operand = old_state->OperandAt(index);
- HPhi* phi = NewPhiAndInsert(block, operand, index);
- state->SetOperandAt(index, phi);
- }
- return state;
-}
-
-
-// Create a new state by copying an existing one.
-HCapturedObject* HEscapeAnalysisPhase::NewStateCopy(
- HInstruction* previous,
- HCapturedObject* old_state) {
- HCapturedObject* state = NewState(previous);
- for (int index = 0; index < number_of_values_; index++) {
- HValue* operand = old_state->OperandAt(index);
- state->SetOperandAt(index, operand);
- }
- return state;
-}
-
-
-// Insert a newly created phi into the given block and fill all incoming
-// edges with the given value.
-HPhi* HEscapeAnalysisPhase::NewPhiAndInsert(HBasicBlock* block,
- HValue* incoming_value,
- int index) {
- Zone* zone = graph()->zone();
- HPhi* phi = new(zone) HPhi(HPhi::kInvalidMergedIndex, zone);
- for (int i = 0; i < block->predecessors()->length(); i++) {
- phi->AddInput(incoming_value);
- }
- block->AddPhi(phi);
- return phi;
-}
-
-
-// Insert a newly created value check as a replacement for map checks.
-HValue* HEscapeAnalysisPhase::NewMapCheckAndInsert(HCapturedObject* state,
- HCheckMaps* mapcheck) {
- Zone* zone = graph()->zone();
- HValue* value = state->map_value();
- // TODO(mstarzinger): This will narrow a map check against a set of maps
- // down to the first element in the set. Revisit and fix this.
- HCheckValue* check = HCheckValue::New(graph()->isolate(), zone, NULL, value,
- mapcheck->maps()->at(0), false);
- check->InsertBefore(mapcheck);
- return check;
-}
-
-
-// Replace a field load with a given value, forcing Smi representation if
-// necessary.
-HValue* HEscapeAnalysisPhase::NewLoadReplacement(
- HLoadNamedField* load, HValue* load_value) {
- HValue* replacement = load_value;
- Representation representation = load->representation();
- if (representation.IsSmiOrInteger32() || representation.IsDouble()) {
- Zone* zone = graph()->zone();
- HInstruction* new_instr = HForceRepresentation::New(
- graph()->isolate(), zone, NULL, load_value, representation);
- new_instr->InsertAfter(load);
- replacement = new_instr;
- }
- return replacement;
-}
-
-
-// Performs a forward data-flow analysis of all loads and stores on the
-// given captured allocation. This uses a reverse post-order iteration
-// over affected basic blocks. All non-escaping instructions are handled
-// and replaced during the analysis.
-void HEscapeAnalysisPhase::AnalyzeDataFlow(HInstruction* allocate) {
- HBasicBlock* allocate_block = allocate->block();
- block_states_.AddBlock(NULL, graph()->blocks()->length(), zone());
-
- // Iterate all blocks starting with the allocation block, since the
- // allocation cannot dominate blocks that come before.
- int start = allocate_block->block_id();
- for (int i = start; i < graph()->blocks()->length(); i++) {
- HBasicBlock* block = graph()->blocks()->at(i);
- HCapturedObject* state = StateAt(block);
-
- // Skip blocks that are not dominated by the captured allocation.
- if (!allocate_block->Dominates(block) && allocate_block != block) continue;
- if (FLAG_trace_escape_analysis) {
- PrintF("Analyzing data-flow in B%d\n", block->block_id());
- }
-
- // Go through all instructions of the current block.
- for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- HInstruction* instr = it.Current();
- switch (instr->opcode()) {
- case HValue::kAllocate: {
- if (instr != allocate) continue;
- state = NewStateForAllocation(allocate);
- break;
- }
- case HValue::kLoadNamedField: {
- HLoadNamedField* load = HLoadNamedField::cast(instr);
- int index = load->access().offset() / kPointerSize;
- if (load->object() != allocate) continue;
- DCHECK(load->access().IsInobject());
- HValue* replacement =
- NewLoadReplacement(load, state->OperandAt(index));
- load->DeleteAndReplaceWith(replacement);
- if (FLAG_trace_escape_analysis) {
- PrintF("Replacing load #%d with #%d (%s)\n", load->id(),
- replacement->id(), replacement->Mnemonic());
- }
- break;
- }
- case HValue::kStoreNamedField: {
- HStoreNamedField* store = HStoreNamedField::cast(instr);
- int index = store->access().offset() / kPointerSize;
- if (store->object() != allocate) continue;
- DCHECK(store->access().IsInobject());
- state = NewStateCopy(store->previous(), state);
- state->SetOperandAt(index, store->value());
- if (store->has_transition()) {
- state->SetOperandAt(0, store->transition());
- }
- if (store->HasObservableSideEffects()) {
- state->ReuseSideEffectsFromStore(store);
- }
- store->DeleteAndReplaceWith(store->ActualValue());
- if (FLAG_trace_escape_analysis) {
- PrintF("Replacing store #%d%s\n", instr->id(),
- store->has_transition() ? " (with transition)" : "");
- }
- break;
- }
- case HValue::kArgumentsObject:
- case HValue::kCapturedObject:
- case HValue::kSimulate: {
- for (int i = 0; i < instr->OperandCount(); i++) {
- if (instr->OperandAt(i) != allocate) continue;
- instr->SetOperandAt(i, state);
- }
- break;
- }
- case HValue::kCheckHeapObject: {
- HCheckHeapObject* check = HCheckHeapObject::cast(instr);
- if (check->value() != allocate) continue;
- check->DeleteAndReplaceWith(check->ActualValue());
- break;
- }
- case HValue::kCheckMaps: {
- HCheckMaps* mapcheck = HCheckMaps::cast(instr);
- if (mapcheck->value() != allocate) continue;
- NewMapCheckAndInsert(state, mapcheck);
- mapcheck->DeleteAndReplaceWith(mapcheck->ActualValue());
- break;
- }
- default:
- // Nothing to see here, move along ...
- break;
- }
- }
-
- // Propagate the block state forward to all successor blocks.
- for (int i = 0; i < block->end()->SuccessorCount(); i++) {
- HBasicBlock* succ = block->end()->SuccessorAt(i);
- if (!allocate_block->Dominates(succ)) continue;
- if (succ->predecessors()->length() == 1) {
- // Case 1: This is the only predecessor, just reuse state.
- SetStateAt(succ, state);
- } else if (StateAt(succ) == NULL && succ->IsLoopHeader()) {
- // Case 2: This is a state that enters a loop header, be
- // pessimistic about loop headers, add phis for all values.
- SetStateAt(succ, NewStateForLoopHeader(succ->first(), state));
- } else if (StateAt(succ) == NULL) {
- // Case 3: This is the first state propagated forward to the
- // successor, leave a copy of the current state.
- SetStateAt(succ, NewStateCopy(succ->first(), state));
- } else {
- // Case 4: This is a state that needs merging with previously
- // propagated states, potentially introducing new phis lazily or
- // adding values to existing phis.
- HCapturedObject* succ_state = StateAt(succ);
- for (int index = 0; index < number_of_values_; index++) {
- HValue* operand = state->OperandAt(index);
- HValue* succ_operand = succ_state->OperandAt(index);
- if (succ_operand->IsPhi() && succ_operand->block() == succ) {
- // Phi already exists, add operand.
- HPhi* phi = HPhi::cast(succ_operand);
- phi->SetOperandAt(succ->PredecessorIndexOf(block), operand);
- } else if (succ_operand != operand) {
- // Phi does not exist, introduce one.
- HPhi* phi = NewPhiAndInsert(succ, succ_operand, index);
- phi->SetOperandAt(succ->PredecessorIndexOf(block), operand);
- succ_state->SetOperandAt(index, phi);
- }
- }
- }
- }
- }
-
- // All uses have been handled.
- DCHECK(allocate->HasNoUses());
- allocate->DeleteAndReplaceWith(NULL);
-}
-
-
-void HEscapeAnalysisPhase::PerformScalarReplacement() {
- for (int i = 0; i < captured_.length(); i++) {
- HAllocate* allocate = HAllocate::cast(captured_.at(i));
-
- // Compute number of scalar values and start with clean slate.
- int size_in_bytes = allocate->size()->GetInteger32Constant();
- number_of_values_ = size_in_bytes / kPointerSize;
- number_of_objects_++;
- block_states_.Rewind(0);
-
- // Perform actual analysis step.
- AnalyzeDataFlow(allocate);
-
- cumulative_values_ += number_of_values_;
- DCHECK(allocate->HasNoUses());
- DCHECK(!allocate->IsLinked());
- }
-}
-
-
-void HEscapeAnalysisPhase::Run() {
- // TODO(mstarzinger): We disable escape analysis with OSR for now, because
- // spill slots might be uninitialized. Needs investigation.
- if (graph()->has_osr()) return;
- int max_fixpoint_iteration_count = FLAG_escape_analysis_iterations;
- for (int i = 0; i < max_fixpoint_iteration_count; i++) {
- CollectCapturedValues();
- if (captured_.is_empty()) break;
- PerformScalarReplacement();
- captured_.Rewind(0);
- }
-}
-
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/hydrogen-escape-analysis.h b/deps/v8/src/crankshaft/hydrogen-escape-analysis.h
deleted file mode 100644
index 7dac6debe0..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-escape-analysis.h
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_HYDROGEN_ESCAPE_ANALYSIS_H_
-#define V8_CRANKSHAFT_HYDROGEN_ESCAPE_ANALYSIS_H_
-
-#include "src/allocation.h"
-#include "src/crankshaft/hydrogen.h"
-
-namespace v8 {
-namespace internal {
-
-
-class HEscapeAnalysisPhase : public HPhase {
- public:
- explicit HEscapeAnalysisPhase(HGraph* graph)
- : HPhase("H_Escape analysis", graph),
- captured_(0, zone()),
- number_of_objects_(0),
- number_of_values_(0),
- cumulative_values_(0),
- block_states_(graph->blocks()->length(), zone()) { }
-
- void Run();
-
- private:
- void CollectCapturedValues();
- bool HasNoEscapingUses(HValue* value, int size);
- void PerformScalarReplacement();
- void AnalyzeDataFlow(HInstruction* instr);
-
- HCapturedObject* NewState(HInstruction* prev);
- HCapturedObject* NewStateForAllocation(HInstruction* prev);
- HCapturedObject* NewStateForLoopHeader(HInstruction* prev, HCapturedObject*);
- HCapturedObject* NewStateCopy(HInstruction* prev, HCapturedObject* state);
-
- HPhi* NewPhiAndInsert(HBasicBlock* block, HValue* incoming_value, int index);
-
- HValue* NewMapCheckAndInsert(HCapturedObject* state, HCheckMaps* mapcheck);
-
- HValue* NewLoadReplacement(HLoadNamedField* load, HValue* load_value);
-
- HCapturedObject* StateAt(HBasicBlock* block) {
- return block_states_.at(block->block_id());
- }
-
- void SetStateAt(HBasicBlock* block, HCapturedObject* state) {
- block_states_.Set(block->block_id(), state);
- }
-
- // List of allocations captured during collection phase.
- ZoneList<HInstruction*> captured_;
-
- // Number of captured objects on which scalar replacement was done.
- int number_of_objects_;
-
- // Number of scalar values tracked during scalar replacement phase.
- int number_of_values_;
- int cumulative_values_;
-
- // Map of block IDs to the data-flow state at block entry during the
- // scalar replacement phase.
- ZoneList<HCapturedObject*> block_states_;
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_HYDROGEN_ESCAPE_ANALYSIS_H_
diff --git a/deps/v8/src/crankshaft/hydrogen-flow-engine.h b/deps/v8/src/crankshaft/hydrogen-flow-engine.h
deleted file mode 100644
index 149c99bec5..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-flow-engine.h
+++ /dev/null
@@ -1,220 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_HYDROGEN_FLOW_ENGINE_H_
-#define V8_CRANKSHAFT_HYDROGEN_FLOW_ENGINE_H_
-
-#include "src/crankshaft/hydrogen-instructions.h"
-#include "src/crankshaft/hydrogen.h"
-#include "src/zone/zone.h"
-
-namespace v8 {
-namespace internal {
-
-// An example implementation of effects that doesn't collect anything.
-class NoEffects : public ZoneObject {
- public:
- explicit NoEffects(Zone* zone) { }
-
- inline bool Disabled() {
- return true; // Nothing to do.
- }
- template <class State>
- inline void Apply(State* state) {
- // do nothing.
- }
- inline void Process(HInstruction* value, Zone* zone) {
- // do nothing.
- }
- inline void Union(NoEffects* other, Zone* zone) {
- // do nothing.
- }
-};
-
-
-// An example implementation of state that doesn't track anything.
-class NoState {
- public:
- inline NoState* Copy(HBasicBlock* succ, Zone* zone) {
- return this;
- }
- inline NoState* Process(HInstruction* value, Zone* zone) {
- return this;
- }
- inline NoState* Merge(HBasicBlock* succ, NoState* other, Zone* zone) {
- return this;
- }
-};
-
-
-// This class implements an engine that can drive flow-sensitive analyses
-// over a graph of basic blocks, either one block at a time (local analysis)
-// or over the entire graph (global analysis). The flow engine is parameterized
-// by the type of the state and the effects collected while walking over the
-// graph.
-//
-// The "State" collects which facts are known while passing over instructions
-// in control flow order, and the "Effects" collect summary information about
-// which facts could be invalidated on other control flow paths. The effects
-// are necessary to correctly handle loops in the control flow graph without
-// doing a fixed-point iteration. Thus the flow engine is guaranteed to visit
-// each block at most twice; once for state, and optionally once for effects.
-//
-// The flow engine requires the State and Effects classes to implement methods
-// like the example NoState and NoEffects above. It's not necessary to provide
-// an effects implementation for local analysis.
-template <class State, class Effects>
-class HFlowEngine {
- public:
- HFlowEngine(HGraph* graph, Zone* zone)
- : graph_(graph),
- zone_(zone),
-#if DEBUG
- pred_counts_(graph->blocks()->length(), zone),
-#endif
- block_states_(graph->blocks()->length(), zone),
- loop_effects_(graph->blocks()->length(), zone) {
- loop_effects_.AddBlock(NULL, graph_->blocks()->length(), zone);
- }
-
- // Local analysis. Iterates over the instructions in the given block.
- State* AnalyzeOneBlock(HBasicBlock* block, State* state) {
- // Go through all instructions of the current block, updating the state.
- for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- state = state->Process(it.Current(), zone_);
- }
- return state;
- }
-
- // Global analysis. Iterates over all blocks that are dominated by the given
- // block, starting with the initial state. Computes effects for nested loops.
- void AnalyzeDominatedBlocks(HBasicBlock* root, State* initial) {
- InitializeStates();
- SetStateAt(root, initial);
-
- // Iterate all dominated blocks starting from the given start block.
- for (int i = root->block_id(); i < graph_->blocks()->length(); i++) {
- HBasicBlock* block = graph_->blocks()->at(i);
-
- // Skip blocks not dominated by the root node.
- if (SkipNonDominatedBlock(root, block)) continue;
- State* state = State::Finish(StateAt(block), block, zone_);
-
- if (block->IsReachable()) {
- DCHECK(state != NULL);
- if (block->IsLoopHeader()) {
- // Apply loop effects before analyzing loop body.
- ComputeLoopEffects(block)->Apply(state);
- } else {
- // Must have visited all predecessors before this block.
- CheckPredecessorCount(block);
- }
-
- // Go through all instructions of the current block, updating the state.
- for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- state = state->Process(it.Current(), zone_);
- }
- }
-
- // Propagate the block state forward to all successor blocks.
- int max = block->end()->SuccessorCount();
- for (int i = 0; i < max; i++) {
- HBasicBlock* succ = block->end()->SuccessorAt(i);
- IncrementPredecessorCount(succ);
-
- if (max == 1 && succ->predecessors()->length() == 1) {
- // Optimization: successor can inherit this state.
- SetStateAt(succ, state);
- } else {
- // Merge the current state with the state already at the successor.
- SetStateAt(succ,
- State::Merge(StateAt(succ), succ, state, block, zone_));
- }
- }
- }
- }
-
- private:
- // Computes and caches the loop effects for the loop which has the given
- // block as its loop header.
- Effects* ComputeLoopEffects(HBasicBlock* block) {
- DCHECK(block->IsLoopHeader());
- Effects* effects = loop_effects_[block->block_id()];
- if (effects != NULL) return effects; // Already analyzed this loop.
-
- effects = new(zone_) Effects(zone_);
- loop_effects_[block->block_id()] = effects;
- if (effects->Disabled()) return effects; // No effects for this analysis.
-
- HLoopInformation* loop = block->loop_information();
- int end = loop->GetLastBackEdge()->block_id();
- // Process the blocks between the header and the end.
- for (int i = block->block_id(); i <= end; i++) {
- HBasicBlock* member = graph_->blocks()->at(i);
- if (i != block->block_id() && member->IsLoopHeader()) {
- // Recursively compute and cache the effects of the nested loop.
- DCHECK(member->loop_information()->parent_loop() == loop);
- Effects* nested = ComputeLoopEffects(member);
- effects->Union(nested, zone_);
- // Skip the nested loop's blocks.
- i = member->loop_information()->GetLastBackEdge()->block_id();
- } else {
- // Process all the effects of the block.
- if (member->IsUnreachable()) continue;
- DCHECK(member->current_loop() == loop);
- for (HInstructionIterator it(member); !it.Done(); it.Advance()) {
- effects->Process(it.Current(), zone_);
- }
- }
- }
- return effects;
- }
-
- inline bool SkipNonDominatedBlock(HBasicBlock* root, HBasicBlock* other) {
- if (root->block_id() == 0) return false; // Visit the whole graph.
- if (root == other) return false; // Always visit the root.
- return !root->Dominates(other); // Only visit dominated blocks.
- }
-
- inline State* StateAt(HBasicBlock* block) {
- return block_states_.at(block->block_id());
- }
-
- inline void SetStateAt(HBasicBlock* block, State* state) {
- block_states_.Set(block->block_id(), state);
- }
-
- inline void InitializeStates() {
-#if DEBUG
- pred_counts_.Rewind(0);
- pred_counts_.AddBlock(0, graph_->blocks()->length(), zone_);
-#endif
- block_states_.Rewind(0);
- block_states_.AddBlock(NULL, graph_->blocks()->length(), zone_);
- }
-
- inline void CheckPredecessorCount(HBasicBlock* block) {
- DCHECK(block->predecessors()->length() == pred_counts_[block->block_id()]);
- }
-
- inline void IncrementPredecessorCount(HBasicBlock* block) {
-#if DEBUG
- pred_counts_[block->block_id()]++;
-#endif
- }
-
- HGraph* graph_; // The hydrogen graph.
- Zone* zone_; // Temporary zone.
-#if DEBUG
- ZoneList<int> pred_counts_; // Finished predecessors (by block id).
-#endif
- ZoneList<State*> block_states_; // Block states (by block id).
- ZoneList<Effects*> loop_effects_; // Loop effects (by block id).
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_HYDROGEN_FLOW_ENGINE_H_
diff --git a/deps/v8/src/crankshaft/hydrogen-gvn.cc b/deps/v8/src/crankshaft/hydrogen-gvn.cc
deleted file mode 100644
index e586f4778f..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-gvn.cc
+++ /dev/null
@@ -1,901 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/hydrogen-gvn.h"
-
-#include "src/crankshaft/hydrogen.h"
-#include "src/list.h"
-#include "src/list-inl.h"
-#include "src/objects-inl.h"
-#include "src/v8.h"
-
-namespace v8 {
-namespace internal {
-
-class HInstructionMap final : public ZoneObject {
- public:
- HInstructionMap(Zone* zone, SideEffectsTracker* side_effects_tracker)
- : array_size_(0),
- lists_size_(0),
- count_(0),
- array_(NULL),
- lists_(NULL),
- free_list_head_(kNil),
- side_effects_tracker_(side_effects_tracker) {
- ResizeLists(kInitialSize, zone);
- Resize(kInitialSize, zone);
- }
-
- void Kill(SideEffects side_effects);
-
- void Add(HInstruction* instr, Zone* zone) {
- present_depends_on_.Add(side_effects_tracker_->ComputeDependsOn(instr));
- Insert(instr, zone);
- }
-
- HInstruction* Lookup(HInstruction* instr) const;
-
- HInstructionMap* Copy(Zone* zone) const {
- return new(zone) HInstructionMap(zone, this);
- }
-
- bool IsEmpty() const { return count_ == 0; }
-
- private:
- // A linked list of HInstruction* values. Stored in arrays.
- struct HInstructionMapListElement {
- HInstruction* instr;
- int next; // Index in the array of the next list element.
- };
- static const int kNil = -1; // The end of a linked list
-
- // Must be a power of 2.
- static const int kInitialSize = 16;
-
- HInstructionMap(Zone* zone, const HInstructionMap* other);
-
- void Resize(int new_size, Zone* zone);
- void ResizeLists(int new_size, Zone* zone);
- void Insert(HInstruction* instr, Zone* zone);
- uint32_t Bound(uint32_t value) const { return value & (array_size_ - 1); }
-
- int array_size_;
- int lists_size_;
- int count_; // The number of values stored in the HInstructionMap.
- SideEffects present_depends_on_;
- HInstructionMapListElement* array_;
- // Primary store - contains the first value
- // with a given hash. Colliding elements are stored in linked lists.
- HInstructionMapListElement* lists_;
- // The linked lists containing hash collisions.
- int free_list_head_; // Unused elements in lists_ are on the free list.
- SideEffectsTracker* side_effects_tracker_;
-};
-
-
-class HSideEffectMap final BASE_EMBEDDED {
- public:
- HSideEffectMap();
- explicit HSideEffectMap(HSideEffectMap* other);
- HSideEffectMap& operator= (const HSideEffectMap& other);
-
- void Kill(SideEffects side_effects);
-
- void Store(SideEffects side_effects, HInstruction* instr);
-
- bool IsEmpty() const { return count_ == 0; }
-
- inline HInstruction* operator[](int i) const {
- DCHECK(0 <= i);
- DCHECK(i < kNumberOfTrackedSideEffects);
- return data_[i];
- }
- inline HInstruction* at(int i) const { return operator[](i); }
-
- private:
- int count_;
- HInstruction* data_[kNumberOfTrackedSideEffects];
-};
-
-
-void TraceGVN(const char* msg, ...) {
- va_list arguments;
- va_start(arguments, msg);
- base::OS::VPrint(msg, arguments);
- va_end(arguments);
-}
-
-
-// Wrap TraceGVN in macros to avoid the expense of evaluating its arguments when
-// --trace-gvn is off.
-#define TRACE_GVN_1(msg, a1) \
- if (FLAG_trace_gvn) { \
- TraceGVN(msg, a1); \
- }
-
-#define TRACE_GVN_2(msg, a1, a2) \
- if (FLAG_trace_gvn) { \
- TraceGVN(msg, a1, a2); \
- }
-
-#define TRACE_GVN_3(msg, a1, a2, a3) \
- if (FLAG_trace_gvn) { \
- TraceGVN(msg, a1, a2, a3); \
- }
-
-#define TRACE_GVN_4(msg, a1, a2, a3, a4) \
- if (FLAG_trace_gvn) { \
- TraceGVN(msg, a1, a2, a3, a4); \
- }
-
-#define TRACE_GVN_5(msg, a1, a2, a3, a4, a5) \
- if (FLAG_trace_gvn) { \
- TraceGVN(msg, a1, a2, a3, a4, a5); \
- }
-
-
-HInstructionMap::HInstructionMap(Zone* zone, const HInstructionMap* other)
- : array_size_(other->array_size_),
- lists_size_(other->lists_size_),
- count_(other->count_),
- present_depends_on_(other->present_depends_on_),
- array_(zone->NewArray<HInstructionMapListElement>(other->array_size_)),
- lists_(zone->NewArray<HInstructionMapListElement>(other->lists_size_)),
- free_list_head_(other->free_list_head_),
- side_effects_tracker_(other->side_effects_tracker_) {
- MemCopy(array_, other->array_,
- array_size_ * sizeof(HInstructionMapListElement));
- MemCopy(lists_, other->lists_,
- lists_size_ * sizeof(HInstructionMapListElement));
-}
-
-
-void HInstructionMap::Kill(SideEffects changes) {
- if (!present_depends_on_.ContainsAnyOf(changes)) return;
- present_depends_on_.RemoveAll();
- for (int i = 0; i < array_size_; ++i) {
- HInstruction* instr = array_[i].instr;
- if (instr != NULL) {
- // Clear list of collisions first, so we know if it becomes empty.
- int kept = kNil; // List of kept elements.
- int next;
- for (int current = array_[i].next; current != kNil; current = next) {
- next = lists_[current].next;
- HInstruction* instr = lists_[current].instr;
- SideEffects depends_on = side_effects_tracker_->ComputeDependsOn(instr);
- if (depends_on.ContainsAnyOf(changes)) {
- // Drop it.
- count_--;
- lists_[current].next = free_list_head_;
- free_list_head_ = current;
- } else {
- // Keep it.
- lists_[current].next = kept;
- kept = current;
- present_depends_on_.Add(depends_on);
- }
- }
- array_[i].next = kept;
-
- // Now possibly drop directly indexed element.
- instr = array_[i].instr;
- SideEffects depends_on = side_effects_tracker_->ComputeDependsOn(instr);
- if (depends_on.ContainsAnyOf(changes)) { // Drop it.
- count_--;
- int head = array_[i].next;
- if (head == kNil) {
- array_[i].instr = NULL;
- } else {
- array_[i].instr = lists_[head].instr;
- array_[i].next = lists_[head].next;
- lists_[head].next = free_list_head_;
- free_list_head_ = head;
- }
- } else {
- present_depends_on_.Add(depends_on); // Keep it.
- }
- }
- }
-}
-
-
-HInstruction* HInstructionMap::Lookup(HInstruction* instr) const {
- uint32_t hash = static_cast<uint32_t>(instr->Hashcode());
- uint32_t pos = Bound(hash);
- if (array_[pos].instr != NULL) {
- if (array_[pos].instr->Equals(instr)) return array_[pos].instr;
- int next = array_[pos].next;
- while (next != kNil) {
- if (lists_[next].instr->Equals(instr)) return lists_[next].instr;
- next = lists_[next].next;
- }
- }
- return NULL;
-}
-
-
-void HInstructionMap::Resize(int new_size, Zone* zone) {
- DCHECK(new_size > count_);
- // Hashing the values into the new array has no more collisions than in the
- // old hash map, so we can use the existing lists_ array, if we are careful.
-
- // Make sure we have at least one free element.
- if (free_list_head_ == kNil) {
- ResizeLists(lists_size_ << 1, zone);
- }
-
- HInstructionMapListElement* new_array =
- zone->NewArray<HInstructionMapListElement>(new_size);
- memset(new_array, 0, sizeof(HInstructionMapListElement) * new_size);
-
- HInstructionMapListElement* old_array = array_;
- int old_size = array_size_;
-
- int old_count = count_;
- count_ = 0;
- // Do not modify present_depends_on_. It is currently correct.
- array_size_ = new_size;
- array_ = new_array;
-
- if (old_array != NULL) {
- // Iterate over all the elements in lists, rehashing them.
- for (int i = 0; i < old_size; ++i) {
- if (old_array[i].instr != NULL) {
- int current = old_array[i].next;
- while (current != kNil) {
- Insert(lists_[current].instr, zone);
- int next = lists_[current].next;
- lists_[current].next = free_list_head_;
- free_list_head_ = current;
- current = next;
- }
- // Rehash the directly stored instruction.
- Insert(old_array[i].instr, zone);
- }
- }
- }
- USE(old_count);
- DCHECK(count_ == old_count);
-}
-
-
-void HInstructionMap::ResizeLists(int new_size, Zone* zone) {
- DCHECK(new_size > lists_size_);
-
- HInstructionMapListElement* new_lists =
- zone->NewArray<HInstructionMapListElement>(new_size);
- memset(new_lists, 0, sizeof(HInstructionMapListElement) * new_size);
-
- HInstructionMapListElement* old_lists = lists_;
- int old_size = lists_size_;
-
- lists_size_ = new_size;
- lists_ = new_lists;
-
- if (old_lists != NULL) {
- MemCopy(lists_, old_lists, old_size * sizeof(HInstructionMapListElement));
- }
- for (int i = old_size; i < lists_size_; ++i) {
- lists_[i].next = free_list_head_;
- free_list_head_ = i;
- }
-}
-
-
-void HInstructionMap::Insert(HInstruction* instr, Zone* zone) {
- DCHECK(instr != NULL);
- // Resizing when half of the hashtable is filled up.
- if (count_ >= array_size_ >> 1) Resize(array_size_ << 1, zone);
- DCHECK(count_ < array_size_);
- count_++;
- uint32_t pos = Bound(static_cast<uint32_t>(instr->Hashcode()));
- if (array_[pos].instr == NULL) {
- array_[pos].instr = instr;
- array_[pos].next = kNil;
- } else {
- if (free_list_head_ == kNil) {
- ResizeLists(lists_size_ << 1, zone);
- }
- int new_element_pos = free_list_head_;
- DCHECK(new_element_pos != kNil);
- free_list_head_ = lists_[free_list_head_].next;
- lists_[new_element_pos].instr = instr;
- lists_[new_element_pos].next = array_[pos].next;
- DCHECK(array_[pos].next == kNil || lists_[array_[pos].next].instr != NULL);
- array_[pos].next = new_element_pos;
- }
-}
-
-
-HSideEffectMap::HSideEffectMap() : count_(0) {
- memset(data_, 0, kNumberOfTrackedSideEffects * kPointerSize);
-}
-
-
-HSideEffectMap::HSideEffectMap(HSideEffectMap* other) : count_(other->count_) {
- *this = *other; // Calls operator=.
-}
-
-
-HSideEffectMap& HSideEffectMap::operator=(const HSideEffectMap& other) {
- if (this != &other) {
- MemCopy(data_, other.data_, kNumberOfTrackedSideEffects * kPointerSize);
- }
- return *this;
-}
-
-
-void HSideEffectMap::Kill(SideEffects side_effects) {
- for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
- if (side_effects.ContainsFlag(GVNFlagFromInt(i))) {
- if (data_[i] != NULL) count_--;
- data_[i] = NULL;
- }
- }
-}
-
-
-void HSideEffectMap::Store(SideEffects side_effects, HInstruction* instr) {
- for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
- if (side_effects.ContainsFlag(GVNFlagFromInt(i))) {
- if (data_[i] == NULL) count_++;
- data_[i] = instr;
- }
- }
-}
-
-
-SideEffects SideEffectsTracker::ComputeChanges(HInstruction* instr) {
- int index;
- SideEffects result(instr->ChangesFlags());
- if (result.ContainsFlag(kGlobalVars)) {
- if (instr->IsStoreNamedField()) {
- HStoreNamedField* store = HStoreNamedField::cast(instr);
- HConstant* target = HConstant::cast(store->object());
- if (ComputeGlobalVar(Unique<PropertyCell>::cast(target->GetUnique()),
- &index)) {
- result.RemoveFlag(kGlobalVars);
- result.AddSpecial(GlobalVar(index));
- return result;
- }
- }
- for (index = 0; index < kNumberOfGlobalVars; ++index) {
- result.AddSpecial(GlobalVar(index));
- }
- } else if (result.ContainsFlag(kInobjectFields)) {
- if (instr->IsStoreNamedField() &&
- ComputeInobjectField(HStoreNamedField::cast(instr)->access(), &index)) {
- result.RemoveFlag(kInobjectFields);
- result.AddSpecial(InobjectField(index));
- } else {
- for (index = 0; index < kNumberOfInobjectFields; ++index) {
- result.AddSpecial(InobjectField(index));
- }
- }
- }
- return result;
-}
-
-
-SideEffects SideEffectsTracker::ComputeDependsOn(HInstruction* instr) {
- int index;
- SideEffects result(instr->DependsOnFlags());
- if (result.ContainsFlag(kGlobalVars)) {
- if (instr->IsLoadNamedField()) {
- HLoadNamedField* load = HLoadNamedField::cast(instr);
- HConstant* target = HConstant::cast(load->object());
- if (ComputeGlobalVar(Unique<PropertyCell>::cast(target->GetUnique()),
- &index)) {
- result.RemoveFlag(kGlobalVars);
- result.AddSpecial(GlobalVar(index));
- return result;
- }
- }
- for (index = 0; index < kNumberOfGlobalVars; ++index) {
- result.AddSpecial(GlobalVar(index));
- }
- } else if (result.ContainsFlag(kInobjectFields)) {
- if (instr->IsLoadNamedField() &&
- ComputeInobjectField(HLoadNamedField::cast(instr)->access(), &index)) {
- result.RemoveFlag(kInobjectFields);
- result.AddSpecial(InobjectField(index));
- } else {
- for (index = 0; index < kNumberOfInobjectFields; ++index) {
- result.AddSpecial(InobjectField(index));
- }
- }
- }
- return result;
-}
-
-
-std::ostream& operator<<(std::ostream& os, const TrackedEffects& te) {
- SideEffectsTracker* t = te.tracker;
- const char* separator = "";
- os << "[";
- for (int bit = 0; bit < kNumberOfFlags; ++bit) {
- GVNFlag flag = GVNFlagFromInt(bit);
- if (te.effects.ContainsFlag(flag)) {
- os << separator;
- separator = ", ";
- switch (flag) {
-#define DECLARE_FLAG(Type) \
- case k##Type: \
- os << #Type; \
- break;
-GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
-GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
-#undef DECLARE_FLAG
- default:
- break;
- }
- }
- }
- for (int index = 0; index < t->num_global_vars_; ++index) {
- if (te.effects.ContainsSpecial(t->GlobalVar(index))) {
- os << separator << "[" << *t->global_vars_[index].handle() << "]";
- separator = ", ";
- }
- }
- for (int index = 0; index < t->num_inobject_fields_; ++index) {
- if (te.effects.ContainsSpecial(t->InobjectField(index))) {
- os << separator << t->inobject_fields_[index];
- separator = ", ";
- }
- }
- os << "]";
- return os;
-}
-
-
-bool SideEffectsTracker::ComputeGlobalVar(Unique<PropertyCell> cell,
- int* index) {
- for (int i = 0; i < num_global_vars_; ++i) {
- if (cell == global_vars_[i]) {
- *index = i;
- return true;
- }
- }
- if (num_global_vars_ < kNumberOfGlobalVars) {
- if (FLAG_trace_gvn) {
- OFStream os(stdout);
- os << "Tracking global var [" << *cell.handle() << "] "
- << "(mapped to index " << num_global_vars_ << ")" << std::endl;
- }
- *index = num_global_vars_;
- global_vars_[num_global_vars_++] = cell;
- return true;
- }
- return false;
-}
-
-
-bool SideEffectsTracker::ComputeInobjectField(HObjectAccess access,
- int* index) {
- for (int i = 0; i < num_inobject_fields_; ++i) {
- if (access.Equals(inobject_fields_[i])) {
- *index = i;
- return true;
- }
- }
- if (num_inobject_fields_ < kNumberOfInobjectFields) {
- if (FLAG_trace_gvn) {
- OFStream os(stdout);
- os << "Tracking inobject field access " << access << " (mapped to index "
- << num_inobject_fields_ << ")" << std::endl;
- }
- *index = num_inobject_fields_;
- inobject_fields_[num_inobject_fields_++] = access;
- return true;
- }
- return false;
-}
-
-
-HGlobalValueNumberingPhase::HGlobalValueNumberingPhase(HGraph* graph)
- : HPhase("H_Global value numbering", graph),
- removed_side_effects_(false),
- block_side_effects_(graph->blocks()->length(), zone()),
- loop_side_effects_(graph->blocks()->length(), zone()),
- visited_on_paths_(graph->blocks()->length(), zone()) {
- DCHECK(!AllowHandleAllocation::IsAllowed());
- block_side_effects_.AddBlock(
- SideEffects(), graph->blocks()->length(), zone());
- loop_side_effects_.AddBlock(
- SideEffects(), graph->blocks()->length(), zone());
-}
-
-
-void HGlobalValueNumberingPhase::Run() {
- DCHECK(!removed_side_effects_);
- for (int i = FLAG_gvn_iterations; i > 0; --i) {
- // Compute the side effects.
- ComputeBlockSideEffects();
-
- // Perform loop invariant code motion if requested.
- if (FLAG_loop_invariant_code_motion) LoopInvariantCodeMotion();
-
- // Perform the actual value numbering.
- AnalyzeGraph();
-
- // Continue GVN if we removed any side effects.
- if (!removed_side_effects_) break;
- removed_side_effects_ = false;
-
- // Clear all side effects.
- DCHECK_EQ(block_side_effects_.length(), graph()->blocks()->length());
- DCHECK_EQ(loop_side_effects_.length(), graph()->blocks()->length());
- for (int i = 0; i < graph()->blocks()->length(); ++i) {
- block_side_effects_[i].RemoveAll();
- loop_side_effects_[i].RemoveAll();
- }
- visited_on_paths_.Clear();
- }
-}
-
-
-void HGlobalValueNumberingPhase::ComputeBlockSideEffects() {
- for (int i = graph()->blocks()->length() - 1; i >= 0; --i) {
- // Compute side effects for the block.
- HBasicBlock* block = graph()->blocks()->at(i);
- SideEffects side_effects;
- if (block->IsReachable() && !block->IsDeoptimizing()) {
- int id = block->block_id();
- for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- HInstruction* instr = it.Current();
- side_effects.Add(side_effects_tracker_.ComputeChanges(instr));
- }
- block_side_effects_[id].Add(side_effects);
-
- // Loop headers are part of their loop.
- if (block->IsLoopHeader()) {
- loop_side_effects_[id].Add(side_effects);
- }
-
- // Propagate loop side effects upwards.
- if (block->HasParentLoopHeader()) {
- HBasicBlock* with_parent = block;
- if (block->IsLoopHeader()) side_effects = loop_side_effects_[id];
- do {
- HBasicBlock* parent_block = with_parent->parent_loop_header();
- loop_side_effects_[parent_block->block_id()].Add(side_effects);
- with_parent = parent_block;
- } while (with_parent->HasParentLoopHeader());
- }
- }
- }
-}
-
-
-void HGlobalValueNumberingPhase::LoopInvariantCodeMotion() {
- TRACE_GVN_1("Using optimistic loop invariant code motion: %s\n",
- graph()->use_optimistic_licm() ? "yes" : "no");
- for (int i = graph()->blocks()->length() - 1; i >= 0; --i) {
- HBasicBlock* block = graph()->blocks()->at(i);
- if (block->IsLoopHeader()) {
- SideEffects side_effects = loop_side_effects_[block->block_id()];
- if (FLAG_trace_gvn) {
- OFStream os(stdout);
- os << "Try loop invariant motion for " << *block << " changes "
- << Print(side_effects) << std::endl;
- }
- HBasicBlock* last = block->loop_information()->GetLastBackEdge();
- for (int j = block->block_id(); j <= last->block_id(); ++j) {
- ProcessLoopBlock(graph()->blocks()->at(j), block, side_effects);
- }
- }
- }
-}
-
-
-void HGlobalValueNumberingPhase::ProcessLoopBlock(
- HBasicBlock* block,
- HBasicBlock* loop_header,
- SideEffects loop_kills) {
- HBasicBlock* pre_header = loop_header->predecessors()->at(0);
- if (FLAG_trace_gvn) {
- OFStream os(stdout);
- os << "Loop invariant code motion for " << *block << " depends on "
- << Print(loop_kills) << std::endl;
- }
- HInstruction* instr = block->first();
- while (instr != NULL) {
- HInstruction* next = instr->next();
- if (instr->CheckFlag(HValue::kUseGVN)) {
- SideEffects changes = side_effects_tracker_.ComputeChanges(instr);
- SideEffects depends_on = side_effects_tracker_.ComputeDependsOn(instr);
- if (FLAG_trace_gvn) {
- OFStream os(stdout);
- os << "Checking instruction i" << instr->id() << " ("
- << instr->Mnemonic() << ") changes " << Print(changes)
- << ", depends on " << Print(depends_on) << ". Loop changes "
- << Print(loop_kills) << std::endl;
- }
- bool can_hoist = !depends_on.ContainsAnyOf(loop_kills);
- if (can_hoist && !graph()->use_optimistic_licm()) {
- can_hoist = block->IsLoopSuccessorDominator();
- }
-
- if (can_hoist) {
- bool inputs_loop_invariant = true;
- for (int i = 0; i < instr->OperandCount(); ++i) {
- if (instr->OperandAt(i)->IsDefinedAfter(pre_header)) {
- inputs_loop_invariant = false;
- }
- }
-
- if (inputs_loop_invariant && ShouldMove(instr, loop_header)) {
- TRACE_GVN_2("Hoisting loop invariant instruction i%d to block B%d\n",
- instr->id(), pre_header->block_id());
- // Move the instruction out of the loop.
- instr->Unlink();
- instr->InsertBefore(pre_header->end());
- if (instr->HasSideEffects()) removed_side_effects_ = true;
- }
- }
- }
- instr = next;
- }
-}
-
-
-bool HGlobalValueNumberingPhase::ShouldMove(HInstruction* instr,
- HBasicBlock* loop_header) {
- // If we've disabled code motion or we're in a block that unconditionally
- // deoptimizes, don't move any instructions.
- return graph()->allow_code_motion() && !instr->block()->IsDeoptimizing() &&
- instr->block()->IsReachable();
-}
-
-
-SideEffects
-HGlobalValueNumberingPhase::CollectSideEffectsOnPathsToDominatedBlock(
- HBasicBlock* dominator, HBasicBlock* dominated) {
- SideEffects side_effects;
- List<HBasicBlock*> blocks;
- for (;;) {
- for (int i = 0; i < dominated->predecessors()->length(); ++i) {
- HBasicBlock* block = dominated->predecessors()->at(i);
- if (dominator->block_id() < block->block_id() &&
- block->block_id() < dominated->block_id() &&
- !visited_on_paths_.Contains(block->block_id())) {
- visited_on_paths_.Add(block->block_id());
- side_effects.Add(block_side_effects_[block->block_id()]);
- if (block->IsLoopHeader()) {
- side_effects.Add(loop_side_effects_[block->block_id()]);
- }
- blocks.Add(block);
- }
- }
- if (blocks.is_empty()) break;
- dominated = blocks.RemoveLast();
- }
- return side_effects;
-}
-
-
-// Each instance of this class is like a "stack frame" for the recursive
-// traversal of the dominator tree done during GVN (the stack is handled
-// as a double linked list).
-// We reuse frames when possible so the list length is limited by the depth
-// of the dominator tree but this forces us to initialize each frame calling
-// an explicit "Initialize" method instead of a using constructor.
-class GvnBasicBlockState: public ZoneObject {
- public:
- static GvnBasicBlockState* CreateEntry(Zone* zone,
- HBasicBlock* entry_block,
- HInstructionMap* entry_map) {
- return new(zone)
- GvnBasicBlockState(NULL, entry_block, entry_map, NULL, zone);
- }
-
- HBasicBlock* block() { return block_; }
- HInstructionMap* map() { return map_; }
- HSideEffectMap* dominators() { return &dominators_; }
-
- GvnBasicBlockState* next_in_dominator_tree_traversal(
- Zone* zone,
- HBasicBlock** dominator) {
- // This assignment needs to happen before calling next_dominated() because
- // that call can reuse "this" if we are at the last dominated block.
- *dominator = block();
- GvnBasicBlockState* result = next_dominated(zone);
- if (result == NULL) {
- GvnBasicBlockState* dominator_state = pop();
- if (dominator_state != NULL) {
- // This branch is guaranteed not to return NULL because pop() never
- // returns a state where "is_done() == true".
- *dominator = dominator_state->block();
- result = dominator_state->next_dominated(zone);
- } else {
- // Unnecessary (we are returning NULL) but done for cleanness.
- *dominator = NULL;
- }
- }
- return result;
- }
-
- private:
- void Initialize(HBasicBlock* block,
- HInstructionMap* map,
- HSideEffectMap* dominators,
- bool copy_map,
- Zone* zone) {
- block_ = block;
- map_ = copy_map ? map->Copy(zone) : map;
- dominated_index_ = -1;
- length_ = block->dominated_blocks()->length();
- if (dominators != NULL) {
- dominators_ = *dominators;
- }
- }
- bool is_done() { return dominated_index_ >= length_; }
-
- GvnBasicBlockState(GvnBasicBlockState* previous,
- HBasicBlock* block,
- HInstructionMap* map,
- HSideEffectMap* dominators,
- Zone* zone)
- : previous_(previous), next_(NULL) {
- Initialize(block, map, dominators, true, zone);
- }
-
- GvnBasicBlockState* next_dominated(Zone* zone) {
- dominated_index_++;
- if (dominated_index_ == length_ - 1) {
- // No need to copy the map for the last child in the dominator tree.
- Initialize(block_->dominated_blocks()->at(dominated_index_),
- map(),
- dominators(),
- false,
- zone);
- return this;
- } else if (dominated_index_ < length_) {
- return push(zone, block_->dominated_blocks()->at(dominated_index_));
- } else {
- return NULL;
- }
- }
-
- GvnBasicBlockState* push(Zone* zone, HBasicBlock* block) {
- if (next_ == NULL) {
- next_ =
- new(zone) GvnBasicBlockState(this, block, map(), dominators(), zone);
- } else {
- next_->Initialize(block, map(), dominators(), true, zone);
- }
- return next_;
- }
- GvnBasicBlockState* pop() {
- GvnBasicBlockState* result = previous_;
- while (result != NULL && result->is_done()) {
- TRACE_GVN_2("Backtracking from block B%d to block b%d\n",
- block()->block_id(),
- previous_->block()->block_id())
- result = result->previous_;
- }
- return result;
- }
-
- GvnBasicBlockState* previous_;
- GvnBasicBlockState* next_;
- HBasicBlock* block_;
- HInstructionMap* map_;
- HSideEffectMap dominators_;
- int dominated_index_;
- int length_;
-};
-
-
-// This is a recursive traversal of the dominator tree but it has been turned
-// into a loop to avoid stack overflows.
-// The logical "stack frames" of the recursion are kept in a list of
-// GvnBasicBlockState instances.
-void HGlobalValueNumberingPhase::AnalyzeGraph() {
- HBasicBlock* entry_block = graph()->entry_block();
- HInstructionMap* entry_map =
- new(zone()) HInstructionMap(zone(), &side_effects_tracker_);
- GvnBasicBlockState* current =
- GvnBasicBlockState::CreateEntry(zone(), entry_block, entry_map);
-
- while (current != NULL) {
- HBasicBlock* block = current->block();
- HInstructionMap* map = current->map();
- HSideEffectMap* dominators = current->dominators();
-
- TRACE_GVN_2("Analyzing block B%d%s\n",
- block->block_id(),
- block->IsLoopHeader() ? " (loop header)" : "");
-
- // If this is a loop header kill everything killed by the loop.
- if (block->IsLoopHeader()) {
- map->Kill(loop_side_effects_[block->block_id()]);
- dominators->Kill(loop_side_effects_[block->block_id()]);
- }
-
- // Go through all instructions of the current block.
- for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- HInstruction* instr = it.Current();
- if (instr->CheckFlag(HValue::kTrackSideEffectDominators)) {
- for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
- HValue* other = dominators->at(i);
- GVNFlag flag = GVNFlagFromInt(i);
- if (instr->DependsOnFlags().Contains(flag) && other != NULL) {
- TRACE_GVN_5("Side-effect #%d in %d (%s) is dominated by %d (%s)\n",
- i,
- instr->id(),
- instr->Mnemonic(),
- other->id(),
- other->Mnemonic());
- if (instr->HandleSideEffectDominator(flag, other)) {
- removed_side_effects_ = true;
- }
- }
- }
- }
- // Instruction was unlinked during graph traversal.
- if (!instr->IsLinked()) continue;
-
- SideEffects changes = side_effects_tracker_.ComputeChanges(instr);
- if (!changes.IsEmpty()) {
- // Clear all instructions in the map that are affected by side effects.
- // Store instruction as the dominating one for tracked side effects.
- map->Kill(changes);
- dominators->Store(changes, instr);
- if (FLAG_trace_gvn) {
- OFStream os(stdout);
- os << "Instruction i" << instr->id() << " changes " << Print(changes)
- << std::endl;
- }
- }
- if (instr->CheckFlag(HValue::kUseGVN) &&
- !instr->CheckFlag(HValue::kCantBeReplaced)) {
- DCHECK(!instr->HasObservableSideEffects());
- HInstruction* other = map->Lookup(instr);
- if (other != NULL) {
- DCHECK(instr->Equals(other) && other->Equals(instr));
- TRACE_GVN_4("Replacing instruction i%d (%s) with i%d (%s)\n",
- instr->id(),
- instr->Mnemonic(),
- other->id(),
- other->Mnemonic());
- if (instr->HasSideEffects()) removed_side_effects_ = true;
- instr->DeleteAndReplaceWith(other);
- } else {
- map->Add(instr, zone());
- }
- }
- }
-
- HBasicBlock* dominator_block;
- GvnBasicBlockState* next =
- current->next_in_dominator_tree_traversal(zone(),
- &dominator_block);
-
- if (next != NULL) {
- HBasicBlock* dominated = next->block();
- HInstructionMap* successor_map = next->map();
- HSideEffectMap* successor_dominators = next->dominators();
-
- // Kill everything killed on any path between this block and the
- // dominated block. We don't have to traverse these paths if the
- // value map and the dominators list is already empty. If the range
- // of block ids (block_id, dominated_id) is empty there are no such
- // paths.
- if ((!successor_map->IsEmpty() || !successor_dominators->IsEmpty()) &&
- dominator_block->block_id() + 1 < dominated->block_id()) {
- visited_on_paths_.Clear();
- SideEffects side_effects_on_all_paths =
- CollectSideEffectsOnPathsToDominatedBlock(dominator_block,
- dominated);
- successor_map->Kill(side_effects_on_all_paths);
- successor_dominators->Kill(side_effects_on_all_paths);
- }
- }
- current = next;
- }
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/hydrogen-gvn.h b/deps/v8/src/crankshaft/hydrogen-gvn.h
deleted file mode 100644
index 5f11737dbc..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-gvn.h
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_HYDROGEN_GVN_H_
-#define V8_CRANKSHAFT_HYDROGEN_GVN_H_
-
-#include <iosfwd>
-
-#include "src/crankshaft/hydrogen-instructions.h"
-#include "src/crankshaft/hydrogen.h"
-#include "src/zone/zone.h"
-
-namespace v8 {
-namespace internal {
-
-// This class extends GVNFlagSet with additional "special" dynamic side effects,
-// which can be used to represent side effects that cannot be expressed using
-// the GVNFlags of an HInstruction. These special side effects are tracked by a
-// SideEffectsTracker (see below).
-class SideEffects final {
- public:
- static const int kNumberOfSpecials = 64 - kNumberOfFlags;
-
- SideEffects() : bits_(0) {
- DCHECK(kNumberOfFlags + kNumberOfSpecials == sizeof(bits_) * CHAR_BIT);
- }
- explicit SideEffects(GVNFlagSet flags) : bits_(flags.ToIntegral()) {}
- bool IsEmpty() const { return bits_ == 0; }
- bool ContainsFlag(GVNFlag flag) const {
- return (bits_ & MaskFlag(flag)) != 0;
- }
- bool ContainsSpecial(int special) const {
- return (bits_ & MaskSpecial(special)) != 0;
- }
- bool ContainsAnyOf(SideEffects set) const { return (bits_ & set.bits_) != 0; }
- void Add(SideEffects set) { bits_ |= set.bits_; }
- void AddSpecial(int special) { bits_ |= MaskSpecial(special); }
- void RemoveFlag(GVNFlag flag) { bits_ &= ~MaskFlag(flag); }
- void RemoveAll() { bits_ = 0; }
- uint64_t ToIntegral() const { return bits_; }
-
- private:
- uint64_t MaskFlag(GVNFlag flag) const {
- return static_cast<uint64_t>(1) << static_cast<unsigned>(flag);
- }
- uint64_t MaskSpecial(int special) const {
- DCHECK(special >= 0);
- DCHECK(special < kNumberOfSpecials);
- return static_cast<uint64_t>(1) << static_cast<unsigned>(
- special + kNumberOfFlags);
- }
-
- uint64_t bits_;
-};
-
-
-struct TrackedEffects;
-
-// Tracks global variable and inobject field loads/stores in a fine grained
-// fashion, and represents them using the "special" dynamic side effects of the
-// SideEffects class (see above). This way unrelated global variable/inobject
-// field stores don't prevent hoisting and merging of global variable/inobject
-// field loads.
-class SideEffectsTracker final BASE_EMBEDDED {
- public:
- SideEffectsTracker() : num_global_vars_(0), num_inobject_fields_(0) {}
- SideEffects ComputeChanges(HInstruction* instr);
- SideEffects ComputeDependsOn(HInstruction* instr);
-
- private:
- friend std::ostream& operator<<(std::ostream& os, const TrackedEffects& f);
- bool ComputeGlobalVar(Unique<PropertyCell> cell, int* index);
- bool ComputeInobjectField(HObjectAccess access, int* index);
-
- static int GlobalVar(int index) {
- DCHECK(index >= 0);
- DCHECK(index < kNumberOfGlobalVars);
- return index;
- }
- static int InobjectField(int index) {
- DCHECK(index >= 0);
- DCHECK(index < kNumberOfInobjectFields);
- return index + kNumberOfGlobalVars;
- }
-
- // Track up to four global vars.
- static const int kNumberOfGlobalVars = 4;
- Unique<PropertyCell> global_vars_[kNumberOfGlobalVars];
- int num_global_vars_;
-
- // Track up to n inobject fields.
- static const int kNumberOfInobjectFields =
- SideEffects::kNumberOfSpecials - kNumberOfGlobalVars;
- HObjectAccess inobject_fields_[kNumberOfInobjectFields];
- int num_inobject_fields_;
-};
-
-
-// Helper class for printing, because the effects don't know their tracker.
-struct TrackedEffects {
- TrackedEffects(SideEffectsTracker* t, SideEffects e)
- : tracker(t), effects(e) {}
- SideEffectsTracker* tracker;
- SideEffects effects;
-};
-
-
-std::ostream& operator<<(std::ostream& os, const TrackedEffects& f);
-
-
-// Perform common subexpression elimination and loop-invariant code motion.
-class HGlobalValueNumberingPhase final : public HPhase {
- public:
- explicit HGlobalValueNumberingPhase(HGraph* graph);
-
- void Run();
-
- private:
- SideEffects CollectSideEffectsOnPathsToDominatedBlock(
- HBasicBlock* dominator,
- HBasicBlock* dominated);
- void AnalyzeGraph();
- void ComputeBlockSideEffects();
- void LoopInvariantCodeMotion();
- void ProcessLoopBlock(HBasicBlock* block,
- HBasicBlock* before_loop,
- SideEffects loop_kills);
- bool ShouldMove(HInstruction* instr, HBasicBlock* loop_header);
- TrackedEffects Print(SideEffects side_effects) {
- return TrackedEffects(&side_effects_tracker_, side_effects);
- }
-
- SideEffectsTracker side_effects_tracker_;
- bool removed_side_effects_;
-
- // A map of block IDs to their side effects.
- ZoneList<SideEffects> block_side_effects_;
-
- // A map of loop header block IDs to their loop's side effects.
- ZoneList<SideEffects> loop_side_effects_;
-
- // Used when collecting side effects on paths from dominator to
- // dominated.
- BitVector visited_on_paths_;
-
- DISALLOW_COPY_AND_ASSIGN(HGlobalValueNumberingPhase);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_HYDROGEN_GVN_H_
diff --git a/deps/v8/src/crankshaft/hydrogen-infer-representation.cc b/deps/v8/src/crankshaft/hydrogen-infer-representation.cc
deleted file mode 100644
index bbff24e5d1..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-infer-representation.cc
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/hydrogen-infer-representation.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-void HInferRepresentationPhase::AddToWorklist(HValue* current) {
- if (current->representation().IsTagged()) return;
- if (!current->CheckFlag(HValue::kFlexibleRepresentation)) return;
- if (in_worklist_.Contains(current->id())) return;
- worklist_.Add(current, zone());
- in_worklist_.Add(current->id());
-}
-
-
-void HInferRepresentationPhase::Run() {
- // (1) Initialize bit vectors and count real uses. Each phi gets a
- // bit-vector of length <number of phis>.
- const ZoneList<HPhi*>* phi_list = graph()->phi_list();
- int phi_count = phi_list->length();
- ZoneList<BitVector*> connected_phis(phi_count, zone());
- for (int i = 0; i < phi_count; ++i) {
- phi_list->at(i)->InitRealUses(i);
- BitVector* connected_set = new(zone()) BitVector(phi_count, zone());
- connected_set->Add(i);
- connected_phis.Add(connected_set, zone());
- }
-
- // (2) Do a fixed point iteration to find the set of connected phis. A
- // phi is connected to another phi if its value is used either directly or
- // indirectly through a transitive closure of the def-use relation.
- bool change = true;
- while (change) {
- change = false;
- // We normally have far more "forward edges" than "backward edges",
- // so we terminate faster when we walk backwards.
- for (int i = phi_count - 1; i >= 0; --i) {
- HPhi* phi = phi_list->at(i);
- for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
- HValue* use = it.value();
- if (use->IsPhi()) {
- int id = HPhi::cast(use)->phi_id();
- if (connected_phis[i]->UnionIsChanged(*connected_phis[id]))
- change = true;
- }
- }
- }
- }
-
- // Set truncation flags for groups of connected phis. This is a conservative
- // approximation; the flag will be properly re-computed after representations
- // have been determined.
- if (phi_count > 0) {
- BitVector done(phi_count, zone());
- for (int i = 0; i < phi_count; ++i) {
- if (done.Contains(i)) continue;
-
- // Check if all uses of all connected phis in this group are truncating.
- bool all_uses_everywhere_truncating_int32 = true;
- bool all_uses_everywhere_truncating_smi = true;
- for (BitVector::Iterator it(connected_phis[i]);
- !it.Done();
- it.Advance()) {
- int index = it.Current();
- all_uses_everywhere_truncating_int32 &=
- phi_list->at(index)->CheckFlag(HInstruction::kTruncatingToInt32);
- all_uses_everywhere_truncating_smi &=
- phi_list->at(index)->CheckFlag(HInstruction::kTruncatingToSmi);
- done.Add(index);
- }
-
- if (!all_uses_everywhere_truncating_int32) {
- // Clear truncation flag of this group of connected phis.
- for (BitVector::Iterator it(connected_phis[i]);
- !it.Done();
- it.Advance()) {
- int index = it.Current();
- phi_list->at(index)->ClearFlag(HInstruction::kTruncatingToInt32);
- }
- }
- if (!all_uses_everywhere_truncating_smi) {
- // Clear truncation flag of this group of connected phis.
- for (BitVector::Iterator it(connected_phis[i]);
- !it.Done();
- it.Advance()) {
- int index = it.Current();
- phi_list->at(index)->ClearFlag(HInstruction::kTruncatingToSmi);
- }
- }
- }
- }
-
- // Simplify constant phi inputs where possible.
- // This step uses kTruncatingToInt32 flags of phis.
- for (int i = 0; i < phi_count; ++i) {
- phi_list->at(i)->SimplifyConstantInputs();
- }
-
- // Use the phi reachability information from step 2 to
- // sum up the non-phi use counts of all connected phis.
- for (int i = 0; i < phi_count; ++i) {
- HPhi* phi = phi_list->at(i);
- for (BitVector::Iterator it(connected_phis[i]);
- !it.Done();
- it.Advance()) {
- int index = it.Current();
- HPhi* it_use = phi_list->at(index);
- if (index != i) phi->AddNonPhiUsesFrom(it_use); // Don't count twice.
- }
- }
-
- // Initialize work list
- for (int i = 0; i < graph()->blocks()->length(); ++i) {
- HBasicBlock* block = graph()->blocks()->at(i);
- const ZoneList<HPhi*>* phis = block->phis();
- for (int j = 0; j < phis->length(); ++j) {
- AddToWorklist(phis->at(j));
- }
-
- for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- HInstruction* current = it.Current();
- AddToWorklist(current);
- }
- }
-
- // Do a fixed point iteration, trying to improve representations
- while (!worklist_.is_empty()) {
- HValue* current = worklist_.RemoveLast();
- current->InferRepresentation(this);
- in_worklist_.Remove(current->id());
- }
-
- // Lastly: any instruction that we don't have representation information
- // for defaults to Tagged.
- for (int i = 0; i < graph()->blocks()->length(); ++i) {
- HBasicBlock* block = graph()->blocks()->at(i);
- const ZoneList<HPhi*>* phis = block->phis();
- for (int j = 0; j < phis->length(); ++j) {
- HPhi* phi = phis->at(j);
- if (phi->representation().IsNone()) {
- phi->ChangeRepresentation(Representation::Tagged());
- }
- }
- for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- HInstruction* current = it.Current();
- if (current->representation().IsNone() &&
- current->CheckFlag(HInstruction::kFlexibleRepresentation)) {
- if (current->CheckFlag(HInstruction::kCannotBeTagged)) {
- current->ChangeRepresentation(Representation::Double());
- } else {
- current->ChangeRepresentation(Representation::Tagged());
- }
- }
- }
- }
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/hydrogen-infer-representation.h b/deps/v8/src/crankshaft/hydrogen-infer-representation.h
deleted file mode 100644
index 92f2bc8c59..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-infer-representation.h
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_HYDROGEN_INFER_REPRESENTATION_H_
-#define V8_CRANKSHAFT_HYDROGEN_INFER_REPRESENTATION_H_
-
-#include "src/crankshaft/hydrogen.h"
-
-namespace v8 {
-namespace internal {
-
-
-class HInferRepresentationPhase : public HPhase {
- public:
- explicit HInferRepresentationPhase(HGraph* graph)
- : HPhase("H_Infer representations", graph),
- worklist_(8, zone()),
- in_worklist_(graph->GetMaximumValueID(), zone()) { }
-
- void Run();
- void AddToWorklist(HValue* current);
-
- private:
- ZoneList<HValue*> worklist_;
- BitVector in_worklist_;
-
- DISALLOW_COPY_AND_ASSIGN(HInferRepresentationPhase);
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_HYDROGEN_INFER_REPRESENTATION_H_
diff --git a/deps/v8/src/crankshaft/hydrogen-infer-types.cc b/deps/v8/src/crankshaft/hydrogen-infer-types.cc
deleted file mode 100644
index a2fd72e443..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-infer-types.cc
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/hydrogen-infer-types.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-void HInferTypesPhase::InferTypes(int from_inclusive, int to_inclusive) {
- for (int i = from_inclusive; i <= to_inclusive; ++i) {
- HBasicBlock* block = graph()->blocks()->at(i);
-
- const ZoneList<HPhi*>* phis = block->phis();
- for (int j = 0; j < phis->length(); j++) {
- phis->at(j)->UpdateInferredType();
- }
-
- for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- it.Current()->UpdateInferredType();
- }
-
- if (block->IsLoopHeader()) {
- HBasicBlock* last_back_edge =
- block->loop_information()->GetLastBackEdge();
- InferTypes(i + 1, last_back_edge->block_id());
- // Skip all blocks already processed by the recursive call.
- i = last_back_edge->block_id();
- // Update phis of the loop header now after the whole loop body is
- // guaranteed to be processed.
- for (int j = 0; j < block->phis()->length(); ++j) {
- HPhi* phi = block->phis()->at(j);
- worklist_.Add(phi, zone());
- in_worklist_.Add(phi->id());
- }
- while (!worklist_.is_empty()) {
- HValue* current = worklist_.RemoveLast();
- in_worklist_.Remove(current->id());
- if (current->UpdateInferredType()) {
- for (HUseIterator it(current->uses()); !it.Done(); it.Advance()) {
- HValue* use = it.value();
- if (!in_worklist_.Contains(use->id())) {
- in_worklist_.Add(use->id());
- worklist_.Add(use, zone());
- }
- }
- }
- }
- DCHECK(in_worklist_.IsEmpty());
- }
- }
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/hydrogen-infer-types.h b/deps/v8/src/crankshaft/hydrogen-infer-types.h
deleted file mode 100644
index 8acfeabd60..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-infer-types.h
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_HYDROGEN_INFER_TYPES_H_
-#define V8_CRANKSHAFT_HYDROGEN_INFER_TYPES_H_
-
-#include "src/crankshaft/hydrogen.h"
-
-namespace v8 {
-namespace internal {
-
-
-class HInferTypesPhase : public HPhase {
- public:
- explicit HInferTypesPhase(HGraph* graph)
- : HPhase("H_Inferring types", graph), worklist_(8, zone()),
- in_worklist_(graph->GetMaximumValueID(), zone()) { }
-
- void Run() {
- InferTypes(0, graph()->blocks()->length() - 1);
- }
-
- private:
- void InferTypes(int from_inclusive, int to_inclusive);
-
- ZoneList<HValue*> worklist_;
- BitVector in_worklist_;
-
- DISALLOW_COPY_AND_ASSIGN(HInferTypesPhase);
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_HYDROGEN_INFER_TYPES_H_
diff --git a/deps/v8/src/crankshaft/hydrogen-instructions.cc b/deps/v8/src/crankshaft/hydrogen-instructions.cc
deleted file mode 100644
index c0046fa98f..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-instructions.cc
+++ /dev/null
@@ -1,4051 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/hydrogen-instructions.h"
-
-#include "src/base/bits.h"
-#include "src/base/ieee754.h"
-#include "src/base/safe_math.h"
-#include "src/codegen.h"
-#include "src/crankshaft/hydrogen-infer-representation.h"
-#include "src/double.h"
-#include "src/elements.h"
-#include "src/factory.h"
-#include "src/objects-inl.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "src/crankshaft/ia32/lithium-ia32.h" // NOLINT
-#elif V8_TARGET_ARCH_X64
-#include "src/crankshaft/x64/lithium-x64.h" // NOLINT
-#elif V8_TARGET_ARCH_ARM64
-#include "src/crankshaft/arm64/lithium-arm64.h" // NOLINT
-#elif V8_TARGET_ARCH_ARM
-#include "src/crankshaft/arm/lithium-arm.h" // NOLINT
-#elif V8_TARGET_ARCH_PPC
-#include "src/crankshaft/ppc/lithium-ppc.h" // NOLINT
-#elif V8_TARGET_ARCH_MIPS
-#include "src/crankshaft/mips/lithium-mips.h" // NOLINT
-#elif V8_TARGET_ARCH_MIPS64
-#include "src/crankshaft/mips64/lithium-mips64.h" // NOLINT
-#elif V8_TARGET_ARCH_S390
-#include "src/crankshaft/s390/lithium-s390.h" // NOLINT
-#elif V8_TARGET_ARCH_X87
-#include "src/crankshaft/x87/lithium-x87.h" // NOLINT
-#else
-#error Unsupported target architecture.
-#endif
-
-namespace v8 {
-namespace internal {
-
-#define DEFINE_COMPILE(type) \
- LInstruction* H##type::CompileToLithium(LChunkBuilder* builder) { \
- return builder->Do##type(this); \
- }
-HYDROGEN_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
-#undef DEFINE_COMPILE
-
-Representation RepresentationFromMachineType(MachineType type) {
- if (type == MachineType::Int32()) {
- return Representation::Integer32();
- }
-
- if (type == MachineType::TaggedSigned()) {
- return Representation::Smi();
- }
-
- if (type == MachineType::Pointer()) {
- return Representation::External();
- }
-
- return Representation::Tagged();
-}
-
-Isolate* HValue::isolate() const {
- DCHECK(block() != NULL);
- return block()->isolate();
-}
-
-
-void HValue::AssumeRepresentation(Representation r) {
- if (CheckFlag(kFlexibleRepresentation)) {
- ChangeRepresentation(r);
- // The representation of the value is dictated by type feedback and
- // will not be changed later.
- ClearFlag(kFlexibleRepresentation);
- }
-}
-
-
-void HValue::InferRepresentation(HInferRepresentationPhase* h_infer) {
- DCHECK(CheckFlag(kFlexibleRepresentation));
- Representation new_rep = RepresentationFromInputs();
- UpdateRepresentation(new_rep, h_infer, "inputs");
- new_rep = RepresentationFromUses();
- UpdateRepresentation(new_rep, h_infer, "uses");
- if (representation().IsSmi() && HasNonSmiUse()) {
- UpdateRepresentation(
- Representation::Integer32(), h_infer, "use requirements");
- }
-}
-
-
-Representation HValue::RepresentationFromUses() {
- if (HasNoUses()) return Representation::None();
- Representation result = Representation::None();
-
- for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
- HValue* use = it.value();
- Representation rep = use->observed_input_representation(it.index());
- result = result.generalize(rep);
-
- if (FLAG_trace_representation) {
- PrintF("#%d %s is used by #%d %s as %s%s\n",
- id(), Mnemonic(), use->id(), use->Mnemonic(), rep.Mnemonic(),
- (use->CheckFlag(kTruncatingToInt32) ? "-trunc" : ""));
- }
- }
- if (IsPhi()) {
- result = result.generalize(
- HPhi::cast(this)->representation_from_indirect_uses());
- }
-
- // External representations are dealt with separately.
- return result.IsExternal() ? Representation::None() : result;
-}
-
-
-void HValue::UpdateRepresentation(Representation new_rep,
- HInferRepresentationPhase* h_infer,
- const char* reason) {
- Representation r = representation();
- if (new_rep.is_more_general_than(r)) {
- if (CheckFlag(kCannotBeTagged) && new_rep.IsTagged()) return;
- if (FLAG_trace_representation) {
- PrintF("Changing #%d %s representation %s -> %s based on %s\n",
- id(), Mnemonic(), r.Mnemonic(), new_rep.Mnemonic(), reason);
- }
- ChangeRepresentation(new_rep);
- AddDependantsToWorklist(h_infer);
- }
-}
-
-
-void HValue::AddDependantsToWorklist(HInferRepresentationPhase* h_infer) {
- for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
- h_infer->AddToWorklist(it.value());
- }
- for (int i = 0; i < OperandCount(); ++i) {
- h_infer->AddToWorklist(OperandAt(i));
- }
-}
-
-
-static int32_t ConvertAndSetOverflow(Representation r,
- int64_t result,
- bool* overflow) {
- if (r.IsSmi()) {
- if (result > Smi::kMaxValue) {
- *overflow = true;
- return Smi::kMaxValue;
- }
- if (result < Smi::kMinValue) {
- *overflow = true;
- return Smi::kMinValue;
- }
- } else {
- if (result > kMaxInt) {
- *overflow = true;
- return kMaxInt;
- }
- if (result < kMinInt) {
- *overflow = true;
- return kMinInt;
- }
- }
- return static_cast<int32_t>(result);
-}
-
-
-static int32_t AddWithoutOverflow(Representation r,
- int32_t a,
- int32_t b,
- bool* overflow) {
- int64_t result = static_cast<int64_t>(a) + static_cast<int64_t>(b);
- return ConvertAndSetOverflow(r, result, overflow);
-}
-
-
-static int32_t SubWithoutOverflow(Representation r,
- int32_t a,
- int32_t b,
- bool* overflow) {
- int64_t result = static_cast<int64_t>(a) - static_cast<int64_t>(b);
- return ConvertAndSetOverflow(r, result, overflow);
-}
-
-
-static int32_t MulWithoutOverflow(const Representation& r,
- int32_t a,
- int32_t b,
- bool* overflow) {
- int64_t result = static_cast<int64_t>(a) * static_cast<int64_t>(b);
- return ConvertAndSetOverflow(r, result, overflow);
-}
-
-
-int32_t Range::Mask() const {
- if (lower_ == upper_) return lower_;
- if (lower_ >= 0) {
- int32_t res = 1;
- while (res < upper_) {
- res = (res << 1) | 1;
- }
- return res;
- }
- return 0xffffffff;
-}
-
-
-void Range::AddConstant(int32_t value) {
- if (value == 0) return;
- bool may_overflow = false; // Overflow is ignored here.
- Representation r = Representation::Integer32();
- lower_ = AddWithoutOverflow(r, lower_, value, &may_overflow);
- upper_ = AddWithoutOverflow(r, upper_, value, &may_overflow);
-#ifdef DEBUG
- Verify();
-#endif
-}
-
-
-void Range::Intersect(Range* other) {
- upper_ = Min(upper_, other->upper_);
- lower_ = Max(lower_, other->lower_);
- bool b = CanBeMinusZero() && other->CanBeMinusZero();
- set_can_be_minus_zero(b);
-}
-
-
-void Range::Union(Range* other) {
- upper_ = Max(upper_, other->upper_);
- lower_ = Min(lower_, other->lower_);
- bool b = CanBeMinusZero() || other->CanBeMinusZero();
- set_can_be_minus_zero(b);
-}
-
-
-void Range::CombinedMax(Range* other) {
- upper_ = Max(upper_, other->upper_);
- lower_ = Max(lower_, other->lower_);
- set_can_be_minus_zero(CanBeMinusZero() || other->CanBeMinusZero());
-}
-
-
-void Range::CombinedMin(Range* other) {
- upper_ = Min(upper_, other->upper_);
- lower_ = Min(lower_, other->lower_);
- set_can_be_minus_zero(CanBeMinusZero() || other->CanBeMinusZero());
-}
-
-
-void Range::Sar(int32_t value) {
- int32_t bits = value & 0x1F;
- lower_ = lower_ >> bits;
- upper_ = upper_ >> bits;
- set_can_be_minus_zero(false);
-}
-
-
-void Range::Shl(int32_t value) {
- int32_t bits = value & 0x1F;
- int old_lower = lower_;
- int old_upper = upper_;
- lower_ = lower_ << bits;
- upper_ = upper_ << bits;
- if (old_lower != lower_ >> bits || old_upper != upper_ >> bits) {
- upper_ = kMaxInt;
- lower_ = kMinInt;
- }
- set_can_be_minus_zero(false);
-}
-
-
-bool Range::AddAndCheckOverflow(const Representation& r, Range* other) {
- bool may_overflow = false;
- lower_ = AddWithoutOverflow(r, lower_, other->lower(), &may_overflow);
- upper_ = AddWithoutOverflow(r, upper_, other->upper(), &may_overflow);
- if (may_overflow) {
- Clear();
- } else {
- KeepOrder();
- }
-#ifdef DEBUG
- Verify();
-#endif
- return may_overflow;
-}
-
-
-bool Range::SubAndCheckOverflow(const Representation& r, Range* other) {
- bool may_overflow = false;
- lower_ = SubWithoutOverflow(r, lower_, other->upper(), &may_overflow);
- upper_ = SubWithoutOverflow(r, upper_, other->lower(), &may_overflow);
- if (may_overflow) {
- Clear();
- } else {
- KeepOrder();
- }
-#ifdef DEBUG
- Verify();
-#endif
- return may_overflow;
-}
-
-void Range::Clear() {
- lower_ = kMinInt;
- upper_ = kMaxInt;
-}
-
-void Range::KeepOrder() {
- if (lower_ > upper_) {
- int32_t tmp = lower_;
- lower_ = upper_;
- upper_ = tmp;
- }
-}
-
-
-#ifdef DEBUG
-void Range::Verify() const {
- DCHECK(lower_ <= upper_);
-}
-#endif
-
-
-bool Range::MulAndCheckOverflow(const Representation& r, Range* other) {
- bool may_overflow = false;
- int v1 = MulWithoutOverflow(r, lower_, other->lower(), &may_overflow);
- int v2 = MulWithoutOverflow(r, lower_, other->upper(), &may_overflow);
- int v3 = MulWithoutOverflow(r, upper_, other->lower(), &may_overflow);
- int v4 = MulWithoutOverflow(r, upper_, other->upper(), &may_overflow);
- if (may_overflow) {
- Clear();
- } else {
- lower_ = Min(Min(v1, v2), Min(v3, v4));
- upper_ = Max(Max(v1, v2), Max(v3, v4));
- }
-#ifdef DEBUG
- Verify();
-#endif
- return may_overflow;
-}
-
-
-bool HValue::IsDefinedAfter(HBasicBlock* other) const {
- return block()->block_id() > other->block_id();
-}
-
-
-HUseListNode* HUseListNode::tail() {
- // Skip and remove dead items in the use list.
- while (tail_ != NULL && tail_->value()->CheckFlag(HValue::kIsDead)) {
- tail_ = tail_->tail_;
- }
- return tail_;
-}
-
-
-bool HValue::CheckUsesForFlag(Flag f) const {
- for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
- if (it.value()->IsSimulate()) continue;
- if (!it.value()->CheckFlag(f)) return false;
- }
- return true;
-}
-
-
-bool HValue::CheckUsesForFlag(Flag f, HValue** value) const {
- for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
- if (it.value()->IsSimulate()) continue;
- if (!it.value()->CheckFlag(f)) {
- *value = it.value();
- return false;
- }
- }
- return true;
-}
-
-
-bool HValue::HasAtLeastOneUseWithFlagAndNoneWithout(Flag f) const {
- bool return_value = false;
- for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
- if (it.value()->IsSimulate()) continue;
- if (!it.value()->CheckFlag(f)) return false;
- return_value = true;
- }
- return return_value;
-}
-
-
-HUseIterator::HUseIterator(HUseListNode* head) : next_(head) {
- Advance();
-}
-
-
-void HUseIterator::Advance() {
- current_ = next_;
- if (current_ != NULL) {
- next_ = current_->tail();
- value_ = current_->value();
- index_ = current_->index();
- }
-}
-
-
-int HValue::UseCount() const {
- int count = 0;
- for (HUseIterator it(uses()); !it.Done(); it.Advance()) ++count;
- return count;
-}
-
-
-HUseListNode* HValue::RemoveUse(HValue* value, int index) {
- HUseListNode* previous = NULL;
- HUseListNode* current = use_list_;
- while (current != NULL) {
- if (current->value() == value && current->index() == index) {
- if (previous == NULL) {
- use_list_ = current->tail();
- } else {
- previous->set_tail(current->tail());
- }
- break;
- }
-
- previous = current;
- current = current->tail();
- }
-
-#ifdef DEBUG
- // Do not reuse use list nodes in debug mode, zap them.
- if (current != NULL) {
- HUseListNode* temp =
- new(block()->zone())
- HUseListNode(current->value(), current->index(), NULL);
- current->Zap();
- current = temp;
- }
-#endif
- return current;
-}
-
-
-bool HValue::Equals(HValue* other) {
- if (other->opcode() != opcode()) return false;
- if (!other->representation().Equals(representation())) return false;
- if (!other->type_.Equals(type_)) return false;
- if (other->flags() != flags()) return false;
- if (OperandCount() != other->OperandCount()) return false;
- for (int i = 0; i < OperandCount(); ++i) {
- if (OperandAt(i)->id() != other->OperandAt(i)->id()) return false;
- }
- bool result = DataEquals(other);
- DCHECK(!result || Hashcode() == other->Hashcode());
- return result;
-}
-
-
-intptr_t HValue::Hashcode() {
- intptr_t result = opcode();
- int count = OperandCount();
- for (int i = 0; i < count; ++i) {
- result = result * 19 + OperandAt(i)->id() + (result >> 7);
- }
- return result;
-}
-
-
-const char* HValue::Mnemonic() const {
- switch (opcode()) {
-#define MAKE_CASE(type) case k##type: return #type;
- HYDROGEN_CONCRETE_INSTRUCTION_LIST(MAKE_CASE)
-#undef MAKE_CASE
- case kPhi: return "Phi";
- default: return "";
- }
-}
-
-
-bool HValue::CanReplaceWithDummyUses() {
- return FLAG_unreachable_code_elimination &&
- !(block()->IsReachable() ||
- IsBlockEntry() ||
- IsControlInstruction() ||
- IsArgumentsObject() ||
- IsCapturedObject() ||
- IsSimulate() ||
- IsEnterInlined() ||
- IsLeaveInlined());
-}
-
-
-bool HValue::IsInteger32Constant() {
- return IsConstant() && HConstant::cast(this)->HasInteger32Value();
-}
-
-
-int32_t HValue::GetInteger32Constant() {
- return HConstant::cast(this)->Integer32Value();
-}
-
-
-bool HValue::EqualsInteger32Constant(int32_t value) {
- return IsInteger32Constant() && GetInteger32Constant() == value;
-}
-
-
-void HValue::SetOperandAt(int index, HValue* value) {
- RegisterUse(index, value);
- InternalSetOperandAt(index, value);
-}
-
-
-void HValue::DeleteAndReplaceWith(HValue* other) {
- // We replace all uses first, so Delete can assert that there are none.
- if (other != NULL) ReplaceAllUsesWith(other);
- Kill();
- DeleteFromGraph();
-}
-
-
-void HValue::ReplaceAllUsesWith(HValue* other) {
- while (use_list_ != NULL) {
- HUseListNode* list_node = use_list_;
- HValue* value = list_node->value();
- DCHECK(!value->block()->IsStartBlock());
- value->InternalSetOperandAt(list_node->index(), other);
- use_list_ = list_node->tail();
- list_node->set_tail(other->use_list_);
- other->use_list_ = list_node;
- }
-}
-
-
-void HValue::Kill() {
- // Instead of going through the entire use list of each operand, we only
- // check the first item in each use list and rely on the tail() method to
- // skip dead items, removing them lazily next time we traverse the list.
- SetFlag(kIsDead);
- for (int i = 0; i < OperandCount(); ++i) {
- HValue* operand = OperandAt(i);
- if (operand == NULL) continue;
- HUseListNode* first = operand->use_list_;
- if (first != NULL && first->value()->CheckFlag(kIsDead)) {
- operand->use_list_ = first->tail();
- }
- }
-}
-
-
-void HValue::SetBlock(HBasicBlock* block) {
- DCHECK(block_ == NULL || block == NULL);
- block_ = block;
- if (id_ == kNoNumber && block != NULL) {
- id_ = block->graph()->GetNextValueID(this);
- }
-}
-
-
-std::ostream& operator<<(std::ostream& os, const HValue& v) {
- return v.PrintTo(os);
-}
-
-
-std::ostream& operator<<(std::ostream& os, const TypeOf& t) {
- if (t.value->representation().IsTagged() &&
- !t.value->type().Equals(HType::Tagged()))
- return os;
- return os << " type:" << t.value->type();
-}
-
-
-std::ostream& operator<<(std::ostream& os, const ChangesOf& c) {
- GVNFlagSet changes_flags = c.value->ChangesFlags();
- if (changes_flags.IsEmpty()) return os;
- os << " changes[";
- if (changes_flags == c.value->AllSideEffectsFlagSet()) {
- os << "*";
- } else {
- bool add_comma = false;
-#define PRINT_DO(Type) \
- if (changes_flags.Contains(k##Type)) { \
- if (add_comma) os << ","; \
- add_comma = true; \
- os << #Type; \
- }
- GVN_TRACKED_FLAG_LIST(PRINT_DO);
- GVN_UNTRACKED_FLAG_LIST(PRINT_DO);
-#undef PRINT_DO
- }
- return os << "]";
-}
-
-
-bool HValue::HasMonomorphicJSObjectType() {
- return !GetMonomorphicJSObjectMap().is_null();
-}
-
-
-bool HValue::UpdateInferredType() {
- HType type = CalculateInferredType();
- bool result = (!type.Equals(type_));
- type_ = type;
- return result;
-}
-
-
-void HValue::RegisterUse(int index, HValue* new_value) {
- HValue* old_value = OperandAt(index);
- if (old_value == new_value) return;
-
- HUseListNode* removed = NULL;
- if (old_value != NULL) {
- removed = old_value->RemoveUse(this, index);
- }
-
- if (new_value != NULL) {
- if (removed == NULL) {
- new_value->use_list_ = new(new_value->block()->zone()) HUseListNode(
- this, index, new_value->use_list_);
- } else {
- removed->set_tail(new_value->use_list_);
- new_value->use_list_ = removed;
- }
- }
-}
-
-
-void HValue::AddNewRange(Range* r, Zone* zone) {
- if (!HasRange()) ComputeInitialRange(zone);
- if (!HasRange()) range_ = new(zone) Range();
- DCHECK(HasRange());
- r->StackUpon(range_);
- range_ = r;
-}
-
-
-void HValue::RemoveLastAddedRange() {
- DCHECK(HasRange());
- DCHECK(range_->next() != NULL);
- range_ = range_->next();
-}
-
-
-void HValue::ComputeInitialRange(Zone* zone) {
- DCHECK(!HasRange());
- range_ = InferRange(zone);
- DCHECK(HasRange());
-}
-
-
-std::ostream& HInstruction::PrintTo(std::ostream& os) const { // NOLINT
- os << Mnemonic() << " ";
- PrintDataTo(os) << ChangesOf(this) << TypeOf(this);
- if (CheckFlag(HValue::kHasNoObservableSideEffects)) os << " [noOSE]";
- if (CheckFlag(HValue::kIsDead)) os << " [dead]";
- return os;
-}
-
-
-std::ostream& HInstruction::PrintDataTo(std::ostream& os) const { // NOLINT
- for (int i = 0; i < OperandCount(); ++i) {
- if (i > 0) os << " ";
- os << NameOf(OperandAt(i));
- }
- return os;
-}
-
-
-void HInstruction::Unlink() {
- DCHECK(IsLinked());
- DCHECK(!IsControlInstruction()); // Must never move control instructions.
- DCHECK(!IsBlockEntry()); // Doesn't make sense to delete these.
- DCHECK(previous_ != NULL);
- previous_->next_ = next_;
- if (next_ == NULL) {
- DCHECK(block()->last() == this);
- block()->set_last(previous_);
- } else {
- next_->previous_ = previous_;
- }
- clear_block();
-}
-
-
-void HInstruction::InsertBefore(HInstruction* next) {
- DCHECK(!IsLinked());
- DCHECK(!next->IsBlockEntry());
- DCHECK(!IsControlInstruction());
- DCHECK(!next->block()->IsStartBlock());
- DCHECK(next->previous_ != NULL);
- HInstruction* prev = next->previous();
- prev->next_ = this;
- next->previous_ = this;
- next_ = next;
- previous_ = prev;
- SetBlock(next->block());
- if (!has_position() && next->has_position()) {
- set_position(next->position());
- }
-}
-
-
-void HInstruction::InsertAfter(HInstruction* previous) {
- DCHECK(!IsLinked());
- DCHECK(!previous->IsControlInstruction());
- DCHECK(!IsControlInstruction() || previous->next_ == NULL);
- HBasicBlock* block = previous->block();
- // Never insert anything except constants into the start block after finishing
- // it.
- if (block->IsStartBlock() && block->IsFinished() && !IsConstant()) {
- DCHECK(block->end()->SecondSuccessor() == NULL);
- InsertAfter(block->end()->FirstSuccessor()->first());
- return;
- }
-
- // If we're inserting after an instruction with side-effects that is
- // followed by a simulate instruction, we need to insert after the
- // simulate instruction instead.
- HInstruction* next = previous->next_;
- if (previous->HasObservableSideEffects() && next != NULL) {
- DCHECK(next->IsSimulate());
- previous = next;
- next = previous->next_;
- }
-
- previous_ = previous;
- next_ = next;
- SetBlock(block);
- previous->next_ = this;
- if (next != NULL) next->previous_ = this;
- if (block->last() == previous) {
- block->set_last(this);
- }
- if (!has_position() && previous->has_position()) {
- set_position(previous->position());
- }
-}
-
-
-bool HInstruction::Dominates(HInstruction* other) {
- if (block() != other->block()) {
- return block()->Dominates(other->block());
- }
- // Both instructions are in the same basic block. This instruction
- // should precede the other one in order to dominate it.
- for (HInstruction* instr = next(); instr != NULL; instr = instr->next()) {
- if (instr == other) {
- return true;
- }
- }
- return false;
-}
-
-
-#ifdef DEBUG
-void HInstruction::Verify() {
- // Verify that input operands are defined before use.
- HBasicBlock* cur_block = block();
- for (int i = 0; i < OperandCount(); ++i) {
- HValue* other_operand = OperandAt(i);
- if (other_operand == NULL) continue;
- HBasicBlock* other_block = other_operand->block();
- if (cur_block == other_block) {
- if (!other_operand->IsPhi()) {
- HInstruction* cur = this->previous();
- while (cur != NULL) {
- if (cur == other_operand) break;
- cur = cur->previous();
- }
- // Must reach other operand in the same block!
- DCHECK(cur == other_operand);
- }
- } else {
- // If the following assert fires, you may have forgotten an
- // AddInstruction.
- DCHECK(other_block->Dominates(cur_block));
- }
- }
-
- // Verify that instructions that may have side-effects are followed
- // by a simulate instruction.
- if (HasObservableSideEffects() && !IsOsrEntry()) {
- DCHECK(next()->IsSimulate());
- }
-
- // Verify that instructions that can be eliminated by GVN have overridden
- // HValue::DataEquals. The default implementation is UNREACHABLE. We
- // don't actually care whether DataEquals returns true or false here.
- if (CheckFlag(kUseGVN)) DataEquals(this);
-
- // Verify that all uses are in the graph.
- for (HUseIterator use = uses(); !use.Done(); use.Advance()) {
- if (use.value()->IsInstruction()) {
- DCHECK(HInstruction::cast(use.value())->IsLinked());
- }
- }
-}
-#endif
-
-
-bool HInstruction::CanDeoptimize() {
- switch (opcode()) {
- case HValue::kAbnormalExit:
- case HValue::kAccessArgumentsAt:
- case HValue::kAllocate:
- case HValue::kArgumentsElements:
- case HValue::kArgumentsLength:
- case HValue::kArgumentsObject:
- case HValue::kBlockEntry:
- case HValue::kCallNewArray:
- case HValue::kCapturedObject:
- case HValue::kClassOfTestAndBranch:
- case HValue::kCompareGeneric:
- case HValue::kCompareHoleAndBranch:
- case HValue::kCompareMap:
- case HValue::kCompareNumericAndBranch:
- case HValue::kCompareObjectEqAndBranch:
- case HValue::kConstant:
- case HValue::kContext:
- case HValue::kDebugBreak:
- case HValue::kDeclareGlobals:
- case HValue::kDummyUse:
- case HValue::kEnterInlined:
- case HValue::kEnvironmentMarker:
- case HValue::kForceRepresentation:
- case HValue::kGoto:
- case HValue::kHasInstanceTypeAndBranch:
- case HValue::kInnerAllocatedObject:
- case HValue::kIsSmiAndBranch:
- case HValue::kIsStringAndBranch:
- case HValue::kIsUndetectableAndBranch:
- case HValue::kLeaveInlined:
- case HValue::kLoadFieldByIndex:
- case HValue::kLoadNamedField:
- case HValue::kLoadRoot:
- case HValue::kMathMinMax:
- case HValue::kParameter:
- case HValue::kPhi:
- case HValue::kPushArguments:
- case HValue::kReturn:
- case HValue::kSeqStringGetChar:
- case HValue::kStoreCodeEntry:
- case HValue::kStoreKeyed:
- case HValue::kStoreNamedField:
- case HValue::kStringCharCodeAt:
- case HValue::kStringCharFromCode:
- case HValue::kThisFunction:
- case HValue::kTypeofIsAndBranch:
- case HValue::kUnknownOSRValue:
- case HValue::kUseConst:
- return false;
-
- case HValue::kAdd:
- case HValue::kApplyArguments:
- case HValue::kBitwise:
- case HValue::kBoundsCheck:
- case HValue::kBranch:
- case HValue::kCallRuntime:
- case HValue::kCallWithDescriptor:
- case HValue::kChange:
- case HValue::kCheckArrayBufferNotNeutered:
- case HValue::kCheckHeapObject:
- case HValue::kCheckInstanceType:
- case HValue::kCheckMapValue:
- case HValue::kCheckMaps:
- case HValue::kCheckSmi:
- case HValue::kCheckValue:
- case HValue::kClampToUint8:
- case HValue::kDeoptimize:
- case HValue::kDiv:
- case HValue::kForInCacheArray:
- case HValue::kForInPrepareMap:
- case HValue::kHasInPrototypeChainAndBranch:
- case HValue::kInvokeFunction:
- case HValue::kLoadContextSlot:
- case HValue::kLoadFunctionPrototype:
- case HValue::kLoadKeyed:
- case HValue::kMathFloorOfDiv:
- case HValue::kMaybeGrowElements:
- case HValue::kMod:
- case HValue::kMul:
- case HValue::kOsrEntry:
- case HValue::kPower:
- case HValue::kPrologue:
- case HValue::kRor:
- case HValue::kSar:
- case HValue::kSeqStringSetChar:
- case HValue::kShl:
- case HValue::kShr:
- case HValue::kSimulate:
- case HValue::kStackCheck:
- case HValue::kStoreContextSlot:
- case HValue::kStringAdd:
- case HValue::kStringCompareAndBranch:
- case HValue::kSub:
- case HValue::kTransitionElementsKind:
- case HValue::kTrapAllocationMemento:
- case HValue::kTypeof:
- case HValue::kUnaryMathOperation:
- case HValue::kWrapReceiver:
- return true;
- }
- UNREACHABLE();
- return true;
-}
-
-
-std::ostream& operator<<(std::ostream& os, const NameOf& v) {
- return os << v.value->representation().Mnemonic() << v.value->id();
-}
-
-std::ostream& HDummyUse::PrintDataTo(std::ostream& os) const { // NOLINT
- return os << NameOf(value());
-}
-
-
-std::ostream& HEnvironmentMarker::PrintDataTo(
- std::ostream& os) const { // NOLINT
- return os << (kind() == BIND ? "bind" : "lookup") << " var[" << index()
- << "]";
-}
-
-
-std::ostream& HUnaryCall::PrintDataTo(std::ostream& os) const { // NOLINT
- return os << NameOf(value()) << " #" << argument_count();
-}
-
-
-std::ostream& HBinaryCall::PrintDataTo(std::ostream& os) const { // NOLINT
- return os << NameOf(first()) << " " << NameOf(second()) << " #"
- << argument_count();
-}
-
-std::ostream& HInvokeFunction::PrintTo(std::ostream& os) const { // NOLINT
- if (tail_call_mode() == TailCallMode::kAllow) os << "Tail";
- return HBinaryCall::PrintTo(os);
-}
-
-std::ostream& HInvokeFunction::PrintDataTo(std::ostream& os) const { // NOLINT
- HBinaryCall::PrintDataTo(os);
- if (syntactic_tail_call_mode() == TailCallMode::kAllow) {
- os << ", JSTailCall";
- }
- return os;
-}
-
-std::ostream& HBoundsCheck::PrintDataTo(std::ostream& os) const { // NOLINT
- os << NameOf(index()) << " " << NameOf(length());
- if (base() != NULL && (offset() != 0 || scale() != 0)) {
- os << " base: ((";
- if (base() != index()) {
- os << NameOf(index());
- } else {
- os << "index";
- }
- os << " + " << offset() << ") >> " << scale() << ")";
- }
- if (skip_check()) os << " [DISABLED]";
- return os;
-}
-
-
-void HBoundsCheck::InferRepresentation(HInferRepresentationPhase* h_infer) {
- DCHECK(CheckFlag(kFlexibleRepresentation));
- HValue* actual_index = index()->ActualValue();
- HValue* actual_length = length()->ActualValue();
- Representation index_rep = actual_index->representation();
- Representation length_rep = actual_length->representation();
- if (index_rep.IsTagged() && actual_index->type().IsSmi()) {
- index_rep = Representation::Smi();
- }
- if (length_rep.IsTagged() && actual_length->type().IsSmi()) {
- length_rep = Representation::Smi();
- }
- Representation r = index_rep.generalize(length_rep);
- if (r.is_more_general_than(Representation::Integer32())) {
- r = Representation::Integer32();
- }
- UpdateRepresentation(r, h_infer, "boundscheck");
-}
-
-
-Range* HBoundsCheck::InferRange(Zone* zone) {
- Representation r = representation();
- if (r.IsSmiOrInteger32() && length()->HasRange()) {
- int upper = length()->range()->upper() - (allow_equality() ? 0 : 1);
- int lower = 0;
-
- Range* result = new(zone) Range(lower, upper);
- if (index()->HasRange()) {
- result->Intersect(index()->range());
- }
-
- // In case of Smi representation, clamp result to Smi::kMaxValue.
- if (r.IsSmi()) result->ClampToSmi();
- return result;
- }
- return HValue::InferRange(zone);
-}
-
-
-std::ostream& HCallWithDescriptor::PrintDataTo(
- std::ostream& os) const { // NOLINT
- for (int i = 0; i < OperandCount(); i++) {
- os << NameOf(OperandAt(i)) << " ";
- }
- os << "#" << argument_count();
- if (syntactic_tail_call_mode() == TailCallMode::kAllow) {
- os << ", JSTailCall";
- }
- return os;
-}
-
-
-std::ostream& HCallNewArray::PrintDataTo(std::ostream& os) const { // NOLINT
- os << ElementsKindToString(elements_kind()) << " ";
- return HBinaryCall::PrintDataTo(os);
-}
-
-
-std::ostream& HCallRuntime::PrintDataTo(std::ostream& os) const { // NOLINT
- os << function()->name << " ";
- if (save_doubles() == kSaveFPRegs) os << "[save doubles] ";
- return os << "#" << argument_count();
-}
-
-std::ostream& HClassOfTestAndBranch::PrintDataTo(
- std::ostream& os) const { // NOLINT
- return os << "class_of_test(" << NameOf(value()) << ", \""
- << class_name()->ToCString().get() << "\")";
-}
-
-std::ostream& HWrapReceiver::PrintDataTo(std::ostream& os) const { // NOLINT
- return os << NameOf(receiver()) << " " << NameOf(function());
-}
-
-
-std::ostream& HAccessArgumentsAt::PrintDataTo(
- std::ostream& os) const { // NOLINT
- return os << NameOf(arguments()) << "[" << NameOf(index()) << "], length "
- << NameOf(length());
-}
-
-
-std::ostream& HControlInstruction::PrintDataTo(
- std::ostream& os) const { // NOLINT
- os << " goto (";
- bool first_block = true;
- for (HSuccessorIterator it(this); !it.Done(); it.Advance()) {
- if (!first_block) os << ", ";
- os << *it.Current();
- first_block = false;
- }
- return os << ")";
-}
-
-
-std::ostream& HUnaryControlInstruction::PrintDataTo(
- std::ostream& os) const { // NOLINT
- os << NameOf(value());
- return HControlInstruction::PrintDataTo(os);
-}
-
-
-std::ostream& HReturn::PrintDataTo(std::ostream& os) const { // NOLINT
- return os << NameOf(value()) << " (pop " << NameOf(parameter_count())
- << " values)";
-}
-
-
-Representation HBranch::observed_input_representation(int index) {
- if (expected_input_types_ &
- (ToBooleanHint::kNull | ToBooleanHint::kReceiver |
- ToBooleanHint::kString | ToBooleanHint::kSymbol)) {
- return Representation::Tagged();
- }
- if (expected_input_types_ & ToBooleanHint::kUndefined) {
- if (expected_input_types_ & ToBooleanHint::kHeapNumber) {
- return Representation::Double();
- }
- return Representation::Tagged();
- }
- if (expected_input_types_ & ToBooleanHint::kHeapNumber) {
- return Representation::Double();
- }
- if (expected_input_types_ & ToBooleanHint::kSmallInteger) {
- return Representation::Smi();
- }
- return Representation::None();
-}
-
-
-bool HBranch::KnownSuccessorBlock(HBasicBlock** block) {
- HValue* value = this->value();
- if (value->EmitAtUses()) {
- DCHECK(value->IsConstant());
- DCHECK(!value->representation().IsDouble());
- *block = HConstant::cast(value)->BooleanValue()
- ? FirstSuccessor()
- : SecondSuccessor();
- return true;
- }
- *block = NULL;
- return false;
-}
-
-
-std::ostream& HBranch::PrintDataTo(std::ostream& os) const { // NOLINT
- return HUnaryControlInstruction::PrintDataTo(os) << " "
- << expected_input_types();
-}
-
-
-std::ostream& HCompareMap::PrintDataTo(std::ostream& os) const { // NOLINT
- os << NameOf(value()) << " (" << *map().handle() << ")";
- HControlInstruction::PrintDataTo(os);
- if (known_successor_index() == 0) {
- os << " [true]";
- } else if (known_successor_index() == 1) {
- os << " [false]";
- }
- return os;
-}
-
-
-const char* HUnaryMathOperation::OpName() const {
- switch (op()) {
- case kMathFloor:
- return "floor";
- case kMathFround:
- return "fround";
- case kMathRound:
- return "round";
- case kMathAbs:
- return "abs";
- case kMathCos:
- return "cos";
- case kMathLog:
- return "log";
- case kMathExp:
- return "exp";
- case kMathSin:
- return "sin";
- case kMathSqrt:
- return "sqrt";
- case kMathPowHalf:
- return "pow-half";
- case kMathClz32:
- return "clz32";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-Range* HUnaryMathOperation::InferRange(Zone* zone) {
- Representation r = representation();
- if (op() == kMathClz32) return new(zone) Range(0, 32);
- if (r.IsSmiOrInteger32() && value()->HasRange()) {
- if (op() == kMathAbs) {
- int upper = value()->range()->upper();
- int lower = value()->range()->lower();
- bool spans_zero = value()->range()->CanBeZero();
- // Math.abs(kMinInt) overflows its representation, on which the
- // instruction deopts. Hence clamp it to kMaxInt.
- int abs_upper = upper == kMinInt ? kMaxInt : abs(upper);
- int abs_lower = lower == kMinInt ? kMaxInt : abs(lower);
- Range* result =
- new(zone) Range(spans_zero ? 0 : Min(abs_lower, abs_upper),
- Max(abs_lower, abs_upper));
- // In case of Smi representation, clamp Math.abs(Smi::kMinValue) to
- // Smi::kMaxValue.
- if (r.IsSmi()) result->ClampToSmi();
- return result;
- }
- }
- return HValue::InferRange(zone);
-}
-
-
-std::ostream& HUnaryMathOperation::PrintDataTo(
- std::ostream& os) const { // NOLINT
- return os << OpName() << " " << NameOf(value());
-}
-
-
-std::ostream& HUnaryOperation::PrintDataTo(std::ostream& os) const { // NOLINT
- return os << NameOf(value());
-}
-
-
-std::ostream& HHasInstanceTypeAndBranch::PrintDataTo(
- std::ostream& os) const { // NOLINT
- os << NameOf(value());
- switch (from_) {
- case FIRST_JS_RECEIVER_TYPE:
- if (to_ == LAST_TYPE) os << " spec_object";
- break;
- case JS_REGEXP_TYPE:
- if (to_ == JS_REGEXP_TYPE) os << " reg_exp";
- break;
- case JS_ARRAY_TYPE:
- if (to_ == JS_ARRAY_TYPE) os << " array";
- break;
- case JS_FUNCTION_TYPE:
- if (to_ == JS_FUNCTION_TYPE) os << " function";
- break;
- default:
- break;
- }
- return os;
-}
-
-
-std::ostream& HTypeofIsAndBranch::PrintDataTo(
- std::ostream& os) const { // NOLINT
- os << NameOf(value()) << " == " << type_literal()->ToCString().get();
- return HControlInstruction::PrintDataTo(os);
-}
-
-
-namespace {
-
-String* TypeOfString(HConstant* constant, Isolate* isolate) {
- Heap* heap = isolate->heap();
- if (constant->HasNumberValue()) return heap->number_string();
- if (constant->HasStringValue()) return heap->string_string();
- switch (constant->GetInstanceType()) {
- case ODDBALL_TYPE: {
- Unique<Object> unique = constant->GetUnique();
- if (unique.IsKnownGlobal(heap->true_value()) ||
- unique.IsKnownGlobal(heap->false_value())) {
- return heap->boolean_string();
- }
- if (unique.IsKnownGlobal(heap->null_value())) {
- return heap->object_string();
- }
- DCHECK(unique.IsKnownGlobal(heap->undefined_value()));
- return heap->undefined_string();
- }
- case SYMBOL_TYPE:
- return heap->symbol_string();
- default:
- if (constant->IsUndetectable()) return heap->undefined_string();
- if (constant->IsCallable()) return heap->function_string();
- return heap->object_string();
- }
-}
-
-} // namespace
-
-
-bool HTypeofIsAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
- if (FLAG_fold_constants && value()->IsConstant()) {
- HConstant* constant = HConstant::cast(value());
- String* type_string = TypeOfString(constant, isolate());
- bool same_type = type_literal_.IsKnownGlobal(type_string);
- *block = same_type ? FirstSuccessor() : SecondSuccessor();
- return true;
- } else if (value()->representation().IsSpecialization()) {
- bool number_type =
- type_literal_.IsKnownGlobal(isolate()->heap()->number_string());
- *block = number_type ? FirstSuccessor() : SecondSuccessor();
- return true;
- }
- *block = NULL;
- return false;
-}
-
-
-std::ostream& HCheckMapValue::PrintDataTo(std::ostream& os) const { // NOLINT
- return os << NameOf(value()) << " " << NameOf(map());
-}
-
-
-HValue* HCheckMapValue::Canonicalize() {
- if (map()->IsConstant()) {
- HConstant* c_map = HConstant::cast(map());
- return HCheckMaps::CreateAndInsertAfter(
- block()->graph()->zone(), value(), c_map->MapValue(),
- c_map->HasStableMapValue(), this);
- }
- return this;
-}
-
-
-std::ostream& HForInPrepareMap::PrintDataTo(std::ostream& os) const { // NOLINT
- return os << NameOf(enumerable());
-}
-
-
-std::ostream& HForInCacheArray::PrintDataTo(std::ostream& os) const { // NOLINT
- return os << NameOf(enumerable()) << " " << NameOf(map()) << "[" << idx_
- << "]";
-}
-
-
-std::ostream& HLoadFieldByIndex::PrintDataTo(
- std::ostream& os) const { // NOLINT
- return os << NameOf(object()) << " " << NameOf(index());
-}
-
-
-static bool MatchLeftIsOnes(HValue* l, HValue* r, HValue** negated) {
- if (!l->EqualsInteger32Constant(~0)) return false;
- *negated = r;
- return true;
-}
-
-
-static bool MatchNegationViaXor(HValue* instr, HValue** negated) {
- if (!instr->IsBitwise()) return false;
- HBitwise* b = HBitwise::cast(instr);
- return (b->op() == Token::BIT_XOR) &&
- (MatchLeftIsOnes(b->left(), b->right(), negated) ||
- MatchLeftIsOnes(b->right(), b->left(), negated));
-}
-
-
-static bool MatchDoubleNegation(HValue* instr, HValue** arg) {
- HValue* negated;
- return MatchNegationViaXor(instr, &negated) &&
- MatchNegationViaXor(negated, arg);
-}
-
-
-HValue* HBitwise::Canonicalize() {
- if (!representation().IsSmiOrInteger32()) return this;
- // If x is an int32, then x & -1 == x, x | 0 == x and x ^ 0 == x.
- int32_t nop_constant = (op() == Token::BIT_AND) ? -1 : 0;
- if (left()->EqualsInteger32Constant(nop_constant) &&
- !right()->CheckFlag(kUint32)) {
- return right();
- }
- if (right()->EqualsInteger32Constant(nop_constant) &&
- !left()->CheckFlag(kUint32)) {
- return left();
- }
- // Optimize double negation, a common pattern used for ToInt32(x).
- HValue* arg;
- if (MatchDoubleNegation(this, &arg) && !arg->CheckFlag(kUint32)) {
- return arg;
- }
- return this;
-}
-
-
-// static
-HInstruction* HAdd::New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right,
- ExternalAddType external_add_type) {
- // For everything else, you should use the other factory method without
- // ExternalAddType.
- DCHECK_EQ(external_add_type, AddOfExternalAndTagged);
- return new (zone) HAdd(context, left, right, external_add_type);
-}
-
-
-Representation HAdd::RepresentationFromInputs() {
- Representation left_rep = left()->representation();
- if (left_rep.IsExternal()) {
- return Representation::External();
- }
- return HArithmeticBinaryOperation::RepresentationFromInputs();
-}
-
-
-Representation HAdd::RequiredInputRepresentation(int index) {
- if (index == 2) {
- Representation left_rep = left()->representation();
- if (left_rep.IsExternal()) {
- if (external_add_type_ == AddOfExternalAndTagged) {
- return Representation::Tagged();
- } else {
- return Representation::Integer32();
- }
- }
- }
- return HArithmeticBinaryOperation::RequiredInputRepresentation(index);
-}
-
-
-static bool IsIdentityOperation(HValue* arg1, HValue* arg2, int32_t identity) {
- return arg1->representation().IsSpecialization() &&
- arg2->EqualsInteger32Constant(identity);
-}
-
-
-HValue* HAdd::Canonicalize() {
- // Adding 0 is an identity operation except in case of -0: -0 + 0 = +0
- if (IsIdentityOperation(left(), right(), 0) &&
- !left()->representation().IsDouble()) { // Left could be -0.
- return left();
- }
- if (IsIdentityOperation(right(), left(), 0) &&
- !left()->representation().IsDouble()) { // Right could be -0.
- return right();
- }
- return this;
-}
-
-
-HValue* HSub::Canonicalize() {
- if (IsIdentityOperation(left(), right(), 0)) return left();
- return this;
-}
-
-
-HValue* HMul::Canonicalize() {
- if (IsIdentityOperation(left(), right(), 1)) return left();
- if (IsIdentityOperation(right(), left(), 1)) return right();
- return this;
-}
-
-
-bool HMul::MulMinusOne() {
- if (left()->EqualsInteger32Constant(-1) ||
- right()->EqualsInteger32Constant(-1)) {
- return true;
- }
-
- return false;
-}
-
-
-HValue* HMod::Canonicalize() {
- return this;
-}
-
-
-HValue* HDiv::Canonicalize() {
- if (IsIdentityOperation(left(), right(), 1)) return left();
- return this;
-}
-
-
-HValue* HChange::Canonicalize() {
- return (from().Equals(to())) ? value() : this;
-}
-
-
-HValue* HWrapReceiver::Canonicalize() {
- if (HasNoUses()) return NULL;
- if (receiver()->type().IsJSReceiver()) {
- return receiver();
- }
- return this;
-}
-
-
-std::ostream& HTypeof::PrintDataTo(std::ostream& os) const { // NOLINT
- return os << NameOf(value());
-}
-
-
-HInstruction* HForceRepresentation::New(Isolate* isolate, Zone* zone,
- HValue* context, HValue* value,
- Representation representation) {
- if (FLAG_fold_constants && value->IsConstant()) {
- HConstant* c = HConstant::cast(value);
- c = c->CopyToRepresentation(representation, zone);
- if (c != NULL) return c;
- }
- return new(zone) HForceRepresentation(value, representation);
-}
-
-
-std::ostream& HForceRepresentation::PrintDataTo(
- std::ostream& os) const { // NOLINT
- return os << representation().Mnemonic() << " " << NameOf(value());
-}
-
-
-std::ostream& HChange::PrintDataTo(std::ostream& os) const { // NOLINT
- HUnaryOperation::PrintDataTo(os);
- os << " " << from().Mnemonic() << " to " << to().Mnemonic();
-
- if (CanTruncateToSmi()) os << " truncating-smi";
- if (CanTruncateToInt32()) os << " truncating-int32";
- if (CanTruncateToNumber()) os << " truncating-number";
- if (CheckFlag(kBailoutOnMinusZero)) os << " -0?";
- return os;
-}
-
-
-HValue* HUnaryMathOperation::Canonicalize() {
- if (op() == kMathRound || op() == kMathFloor) {
- HValue* val = value();
- if (val->IsChange()) val = HChange::cast(val)->value();
- if (val->representation().IsSmiOrInteger32()) {
- if (val->representation().Equals(representation())) return val;
- return Prepend(new (block()->zone())
- HChange(val, representation(), false, false, true));
- }
- }
- if (op() == kMathFloor && representation().IsSmiOrInteger32() &&
- value()->IsDiv() && value()->HasOneUse()) {
- HDiv* hdiv = HDiv::cast(value());
-
- HValue* left = hdiv->left();
- if (left->representation().IsInteger32() && !left->CheckFlag(kUint32)) {
- // A value with an integer representation does not need to be transformed.
- } else if (left->IsChange() && HChange::cast(left)->from().IsInteger32() &&
- !HChange::cast(left)->value()->CheckFlag(kUint32)) {
- // A change from an integer32 can be replaced by the integer32 value.
- left = HChange::cast(left)->value();
- } else if (hdiv->observed_input_representation(1).IsSmiOrInteger32()) {
- left = Prepend(new (block()->zone()) HChange(
- left, Representation::Integer32(), false, false, true));
- } else {
- return this;
- }
-
- HValue* right = hdiv->right();
- if (right->IsInteger32Constant()) {
- right = Prepend(HConstant::cast(right)->CopyToRepresentation(
- Representation::Integer32(), right->block()->zone()));
- } else if (right->representation().IsInteger32() &&
- !right->CheckFlag(kUint32)) {
- // A value with an integer representation does not need to be transformed.
- } else if (right->IsChange() &&
- HChange::cast(right)->from().IsInteger32() &&
- !HChange::cast(right)->value()->CheckFlag(kUint32)) {
- // A change from an integer32 can be replaced by the integer32 value.
- right = HChange::cast(right)->value();
- } else if (hdiv->observed_input_representation(2).IsSmiOrInteger32()) {
- right = Prepend(new (block()->zone()) HChange(
- right, Representation::Integer32(), false, false, true));
- } else {
- return this;
- }
-
- return Prepend(HMathFloorOfDiv::New(
- block()->graph()->isolate(), block()->zone(), context(), left, right));
- }
- return this;
-}
-
-
-HValue* HCheckInstanceType::Canonicalize() {
- if ((check_ == IS_JS_RECEIVER && value()->type().IsJSReceiver()) ||
- (check_ == IS_JS_ARRAY && value()->type().IsJSArray()) ||
- (check_ == IS_STRING && value()->type().IsString())) {
- return value();
- }
-
- if (check_ == IS_INTERNALIZED_STRING && value()->IsConstant()) {
- if (HConstant::cast(value())->HasInternalizedStringValue()) {
- return value();
- }
- }
- return this;
-}
-
-
-void HCheckInstanceType::GetCheckInterval(InstanceType* first,
- InstanceType* last) {
- DCHECK(is_interval_check());
- switch (check_) {
- case IS_JS_RECEIVER:
- *first = FIRST_JS_RECEIVER_TYPE;
- *last = LAST_JS_RECEIVER_TYPE;
- return;
- case IS_JS_ARRAY:
- *first = *last = JS_ARRAY_TYPE;
- return;
- case IS_JS_FUNCTION:
- *first = *last = JS_FUNCTION_TYPE;
- return;
- case IS_JS_DATE:
- *first = *last = JS_DATE_TYPE;
- return;
- default:
- UNREACHABLE();
- }
-}
-
-
-void HCheckInstanceType::GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag) {
- DCHECK(!is_interval_check());
- switch (check_) {
- case IS_STRING:
- *mask = kIsNotStringMask;
- *tag = kStringTag;
- return;
- case IS_INTERNALIZED_STRING:
- *mask = kIsNotStringMask | kIsNotInternalizedMask;
- *tag = kInternalizedTag;
- return;
- default:
- UNREACHABLE();
- }
-}
-
-
-std::ostream& HCheckMaps::PrintDataTo(std::ostream& os) const { // NOLINT
- os << NameOf(value()) << " [" << *maps()->at(0).handle();
- for (int i = 1; i < maps()->size(); ++i) {
- os << "," << *maps()->at(i).handle();
- }
- os << "]";
- if (IsStabilityCheck()) os << "(stability-check)";
- return os;
-}
-
-
-HValue* HCheckMaps::Canonicalize() {
- if (!IsStabilityCheck() && maps_are_stable() && value()->IsConstant()) {
- HConstant* c_value = HConstant::cast(value());
- if (c_value->HasObjectMap()) {
- for (int i = 0; i < maps()->size(); ++i) {
- if (c_value->ObjectMap() == maps()->at(i)) {
- if (maps()->size() > 1) {
- set_maps(new(block()->graph()->zone()) UniqueSet<Map>(
- maps()->at(i), block()->graph()->zone()));
- }
- MarkAsStabilityCheck();
- break;
- }
- }
- }
- }
- return this;
-}
-
-
-std::ostream& HCheckValue::PrintDataTo(std::ostream& os) const { // NOLINT
- return os << NameOf(value()) << " " << Brief(*object().handle());
-}
-
-
-HValue* HCheckValue::Canonicalize() {
- return (value()->IsConstant() &&
- HConstant::cast(value())->EqualsUnique(object_)) ? NULL : this;
-}
-
-
-const char* HCheckInstanceType::GetCheckName() const {
- switch (check_) {
- case IS_JS_RECEIVER: return "object";
- case IS_JS_ARRAY: return "array";
- case IS_JS_FUNCTION:
- return "function";
- case IS_JS_DATE:
- return "date";
- case IS_STRING: return "string";
- case IS_INTERNALIZED_STRING: return "internalized_string";
- }
- UNREACHABLE();
- return "";
-}
-
-
-std::ostream& HCheckInstanceType::PrintDataTo(
- std::ostream& os) const { // NOLINT
- os << GetCheckName() << " ";
- return HUnaryOperation::PrintDataTo(os);
-}
-
-
-std::ostream& HUnknownOSRValue::PrintDataTo(std::ostream& os) const { // NOLINT
- const char* type = "expression";
- if (environment_->is_local_index(index_)) type = "local";
- if (environment_->is_special_index(index_)) type = "special";
- if (environment_->is_parameter_index(index_)) type = "parameter";
- return os << type << " @ " << index_;
-}
-
-
-Range* HValue::InferRange(Zone* zone) {
- Range* result;
- if (representation().IsSmi() || type().IsSmi()) {
- result = new(zone) Range(Smi::kMinValue, Smi::kMaxValue);
- result->set_can_be_minus_zero(false);
- } else {
- result = new(zone) Range();
- result->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32));
- // TODO(jkummerow): The range cannot be minus zero when the upper type
- // bound is Integer32.
- }
- return result;
-}
-
-
-Range* HChange::InferRange(Zone* zone) {
- Range* input_range = value()->range();
- if (from().IsInteger32() && !value()->CheckFlag(HInstruction::kUint32) &&
- (to().IsSmi() ||
- (to().IsTagged() &&
- input_range != NULL &&
- input_range->IsInSmiRange()))) {
- set_type(HType::Smi());
- ClearChangesFlag(kNewSpacePromotion);
- }
- if (to().IsSmiOrTagged() &&
- input_range != NULL &&
- input_range->IsInSmiRange() &&
- (!SmiValuesAre32Bits() ||
- !value()->CheckFlag(HValue::kUint32) ||
- input_range->upper() != kMaxInt)) {
- // The Range class can't express upper bounds in the (kMaxInt, kMaxUint32]
- // interval, so we treat kMaxInt as a sentinel for this entire interval.
- ClearFlag(kCanOverflow);
- }
- Range* result = (input_range != NULL)
- ? input_range->Copy(zone)
- : HValue::InferRange(zone);
- result->set_can_be_minus_zero(!to().IsSmiOrInteger32() ||
- !(CheckFlag(kAllUsesTruncatingToInt32) ||
- CheckFlag(kAllUsesTruncatingToSmi)));
- if (to().IsSmi()) result->ClampToSmi();
- return result;
-}
-
-
-Range* HConstant::InferRange(Zone* zone) {
- if (HasInteger32Value()) {
- Range* result = new(zone) Range(int32_value_, int32_value_);
- result->set_can_be_minus_zero(false);
- return result;
- }
- return HValue::InferRange(zone);
-}
-
-
-SourcePosition HPhi::position() const { return block()->first()->position(); }
-
-
-Range* HPhi::InferRange(Zone* zone) {
- Representation r = representation();
- if (r.IsSmiOrInteger32()) {
- if (block()->IsLoopHeader()) {
- Range* range = r.IsSmi()
- ? new(zone) Range(Smi::kMinValue, Smi::kMaxValue)
- : new(zone) Range(kMinInt, kMaxInt);
- return range;
- } else {
- Range* range = OperandAt(0)->range()->Copy(zone);
- for (int i = 1; i < OperandCount(); ++i) {
- range->Union(OperandAt(i)->range());
- }
- return range;
- }
- } else {
- return HValue::InferRange(zone);
- }
-}
-
-
-Range* HAdd::InferRange(Zone* zone) {
- Representation r = representation();
- if (r.IsSmiOrInteger32()) {
- Range* a = left()->range();
- Range* b = right()->range();
- Range* res = a->Copy(zone);
- if (!res->AddAndCheckOverflow(r, b) ||
- (r.IsInteger32() && CheckFlag(kAllUsesTruncatingToInt32)) ||
- (r.IsSmi() && CheckFlag(kAllUsesTruncatingToSmi))) {
- ClearFlag(kCanOverflow);
- }
- res->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToSmi) &&
- !CheckFlag(kAllUsesTruncatingToInt32) &&
- a->CanBeMinusZero() && b->CanBeMinusZero());
- return res;
- } else {
- return HValue::InferRange(zone);
- }
-}
-
-
-Range* HSub::InferRange(Zone* zone) {
- Representation r = representation();
- if (r.IsSmiOrInteger32()) {
- Range* a = left()->range();
- Range* b = right()->range();
- Range* res = a->Copy(zone);
- if (!res->SubAndCheckOverflow(r, b) ||
- (r.IsInteger32() && CheckFlag(kAllUsesTruncatingToInt32)) ||
- (r.IsSmi() && CheckFlag(kAllUsesTruncatingToSmi))) {
- ClearFlag(kCanOverflow);
- }
- res->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToSmi) &&
- !CheckFlag(kAllUsesTruncatingToInt32) &&
- a->CanBeMinusZero() && b->CanBeZero());
- return res;
- } else {
- return HValue::InferRange(zone);
- }
-}
-
-
-Range* HMul::InferRange(Zone* zone) {
- Representation r = representation();
- if (r.IsSmiOrInteger32()) {
- Range* a = left()->range();
- Range* b = right()->range();
- Range* res = a->Copy(zone);
- if (!res->MulAndCheckOverflow(r, b) ||
- (((r.IsInteger32() && CheckFlag(kAllUsesTruncatingToInt32)) ||
- (r.IsSmi() && CheckFlag(kAllUsesTruncatingToSmi))) &&
- MulMinusOne())) {
- // Truncated int multiplication is too precise and therefore not the
- // same as converting to Double and back.
- // Handle truncated integer multiplication by -1 special.
- ClearFlag(kCanOverflow);
- }
- res->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToSmi) &&
- !CheckFlag(kAllUsesTruncatingToInt32) &&
- ((a->CanBeZero() && b->CanBeNegative()) ||
- (a->CanBeNegative() && b->CanBeZero())));
- return res;
- } else {
- return HValue::InferRange(zone);
- }
-}
-
-
-Range* HDiv::InferRange(Zone* zone) {
- if (representation().IsInteger32()) {
- Range* a = left()->range();
- Range* b = right()->range();
- Range* result = new(zone) Range();
- result->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32) &&
- (a->CanBeMinusZero() ||
- (a->CanBeZero() && b->CanBeNegative())));
- if (!a->Includes(kMinInt) || !b->Includes(-1)) {
- ClearFlag(kCanOverflow);
- }
-
- if (!b->CanBeZero()) {
- ClearFlag(kCanBeDivByZero);
- }
- return result;
- } else {
- return HValue::InferRange(zone);
- }
-}
-
-
-Range* HMathFloorOfDiv::InferRange(Zone* zone) {
- if (representation().IsInteger32()) {
- Range* a = left()->range();
- Range* b = right()->range();
- Range* result = new(zone) Range();
- result->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32) &&
- (a->CanBeMinusZero() ||
- (a->CanBeZero() && b->CanBeNegative())));
- if (!a->Includes(kMinInt)) {
- ClearFlag(kLeftCanBeMinInt);
- }
-
- if (!a->CanBeNegative()) {
- ClearFlag(HValue::kLeftCanBeNegative);
- }
-
- if (!a->CanBePositive()) {
- ClearFlag(HValue::kLeftCanBePositive);
- }
-
- if (!a->Includes(kMinInt) || !b->Includes(-1)) {
- ClearFlag(kCanOverflow);
- }
-
- if (!b->CanBeZero()) {
- ClearFlag(kCanBeDivByZero);
- }
- return result;
- } else {
- return HValue::InferRange(zone);
- }
-}
-
-
-// Returns the absolute value of its argument minus one, avoiding undefined
-// behavior at kMinInt.
-static int32_t AbsMinus1(int32_t a) { return a < 0 ? -(a + 1) : (a - 1); }
-
-
-Range* HMod::InferRange(Zone* zone) {
- if (representation().IsInteger32()) {
- Range* a = left()->range();
- Range* b = right()->range();
-
- // The magnitude of the modulus is bounded by the right operand.
- int32_t positive_bound = Max(AbsMinus1(b->lower()), AbsMinus1(b->upper()));
-
- // The result of the modulo operation has the sign of its left operand.
- bool left_can_be_negative = a->CanBeMinusZero() || a->CanBeNegative();
- Range* result = new(zone) Range(left_can_be_negative ? -positive_bound : 0,
- a->CanBePositive() ? positive_bound : 0);
-
- result->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32) &&
- left_can_be_negative);
-
- if (!a->CanBeNegative()) {
- ClearFlag(HValue::kLeftCanBeNegative);
- }
-
- if (!a->Includes(kMinInt) || !b->Includes(-1)) {
- ClearFlag(HValue::kCanOverflow);
- }
-
- if (!b->CanBeZero()) {
- ClearFlag(HValue::kCanBeDivByZero);
- }
- return result;
- } else {
- return HValue::InferRange(zone);
- }
-}
-
-
-Range* HMathMinMax::InferRange(Zone* zone) {
- if (representation().IsSmiOrInteger32()) {
- Range* a = left()->range();
- Range* b = right()->range();
- Range* res = a->Copy(zone);
- if (operation_ == kMathMax) {
- res->CombinedMax(b);
- } else {
- DCHECK(operation_ == kMathMin);
- res->CombinedMin(b);
- }
- return res;
- } else {
- return HValue::InferRange(zone);
- }
-}
-
-
-void HPushArguments::AddInput(HValue* value) {
- inputs_.Add(NULL, value->block()->zone());
- SetOperandAt(OperandCount() - 1, value);
-}
-
-
-std::ostream& HPhi::PrintTo(std::ostream& os) const { // NOLINT
- os << "[";
- for (int i = 0; i < OperandCount(); ++i) {
- os << " " << NameOf(OperandAt(i)) << " ";
- }
- return os << " uses" << UseCount()
- << representation_from_indirect_uses().Mnemonic() << " "
- << TypeOf(this) << "]";
-}
-
-
-void HPhi::AddInput(HValue* value) {
- inputs_.Add(NULL, value->block()->zone());
- SetOperandAt(OperandCount() - 1, value);
- // Mark phis that may have 'arguments' directly or indirectly as an operand.
- if (!CheckFlag(kIsArguments) && value->CheckFlag(kIsArguments)) {
- SetFlag(kIsArguments);
- }
-}
-
-
-bool HPhi::HasRealUses() {
- for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
- if (!it.value()->IsPhi()) return true;
- }
- return false;
-}
-
-
-HValue* HPhi::GetRedundantReplacement() {
- HValue* candidate = NULL;
- int count = OperandCount();
- int position = 0;
- while (position < count && candidate == NULL) {
- HValue* current = OperandAt(position++);
- if (current != this) candidate = current;
- }
- while (position < count) {
- HValue* current = OperandAt(position++);
- if (current != this && current != candidate) return NULL;
- }
- DCHECK(candidate != this);
- return candidate;
-}
-
-
-void HPhi::DeleteFromGraph() {
- DCHECK(block() != NULL);
- block()->RemovePhi(this);
- DCHECK(block() == NULL);
-}
-
-
-void HPhi::InitRealUses(int phi_id) {
- // Initialize real uses.
- phi_id_ = phi_id;
- // Compute a conservative approximation of truncating uses before inferring
- // representations. The proper, exact computation will be done later, when
- // inserting representation changes.
- SetFlag(kTruncatingToSmi);
- SetFlag(kTruncatingToInt32);
- for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
- HValue* value = it.value();
- if (!value->IsPhi()) {
- Representation rep = value->observed_input_representation(it.index());
- representation_from_non_phi_uses_ =
- representation_from_non_phi_uses().generalize(rep);
- if (rep.IsSmi() || rep.IsInteger32() || rep.IsDouble()) {
- has_type_feedback_from_uses_ = true;
- }
-
- if (FLAG_trace_representation) {
- PrintF("#%d Phi is used by real #%d %s as %s\n",
- id(), value->id(), value->Mnemonic(), rep.Mnemonic());
- }
- if (!value->IsSimulate()) {
- if (!value->CheckFlag(kTruncatingToSmi)) {
- ClearFlag(kTruncatingToSmi);
- }
- if (!value->CheckFlag(kTruncatingToInt32)) {
- ClearFlag(kTruncatingToInt32);
- }
- }
- }
- }
-}
-
-
-void HPhi::AddNonPhiUsesFrom(HPhi* other) {
- if (FLAG_trace_representation) {
- PrintF(
- "generalizing use representation '%s' of #%d Phi "
- "with uses of #%d Phi '%s'\n",
- representation_from_indirect_uses().Mnemonic(), id(), other->id(),
- other->representation_from_non_phi_uses().Mnemonic());
- }
-
- representation_from_indirect_uses_ =
- representation_from_indirect_uses().generalize(
- other->representation_from_non_phi_uses());
-}
-
-
-void HSimulate::MergeWith(ZoneList<HSimulate*>* list) {
- while (!list->is_empty()) {
- HSimulate* from = list->RemoveLast();
- ZoneList<HValue*>* from_values = &from->values_;
- for (int i = 0; i < from_values->length(); ++i) {
- if (from->HasAssignedIndexAt(i)) {
- int index = from->GetAssignedIndexAt(i);
- if (HasValueForIndex(index)) continue;
- AddAssignedValue(index, from_values->at(i));
- } else {
- if (pop_count_ > 0) {
- pop_count_--;
- } else {
- AddPushedValue(from_values->at(i));
- }
- }
- }
- pop_count_ += from->pop_count_;
- from->DeleteAndReplaceWith(NULL);
- }
-}
-
-
-std::ostream& HSimulate::PrintDataTo(std::ostream& os) const { // NOLINT
- os << "id=" << ast_id().ToInt();
- if (pop_count_ > 0) os << " pop " << pop_count_;
- if (values_.length() > 0) {
- if (pop_count_ > 0) os << " /";
- for (int i = values_.length() - 1; i >= 0; --i) {
- if (HasAssignedIndexAt(i)) {
- os << " var[" << GetAssignedIndexAt(i) << "] = ";
- } else {
- os << " push ";
- }
- os << NameOf(values_[i]);
- if (i > 0) os << ",";
- }
- }
- return os;
-}
-
-
-void HSimulate::ReplayEnvironment(HEnvironment* env) {
- if (is_done_with_replay()) return;
- DCHECK(env != NULL);
- env->set_ast_id(ast_id());
- env->Drop(pop_count());
- for (int i = values()->length() - 1; i >= 0; --i) {
- HValue* value = values()->at(i);
- if (HasAssignedIndexAt(i)) {
- env->Bind(GetAssignedIndexAt(i), value);
- } else {
- env->Push(value);
- }
- }
- set_done_with_replay();
-}
-
-
-static void ReplayEnvironmentNested(const ZoneList<HValue*>* values,
- HCapturedObject* other) {
- for (int i = 0; i < values->length(); ++i) {
- HValue* value = values->at(i);
- if (value->IsCapturedObject()) {
- if (HCapturedObject::cast(value)->capture_id() == other->capture_id()) {
- values->at(i) = other;
- } else {
- ReplayEnvironmentNested(HCapturedObject::cast(value)->values(), other);
- }
- }
- }
-}
-
-
-// Replay captured objects by replacing all captured objects with the
-// same capture id in the current and all outer environments.
-void HCapturedObject::ReplayEnvironment(HEnvironment* env) {
- DCHECK(env != NULL);
- while (env != NULL) {
- ReplayEnvironmentNested(env->values(), this);
- env = env->outer();
- }
-}
-
-
-std::ostream& HCapturedObject::PrintDataTo(std::ostream& os) const { // NOLINT
- os << "#" << capture_id() << " ";
- return HDematerializedObject::PrintDataTo(os);
-}
-
-
-void HEnterInlined::RegisterReturnTarget(HBasicBlock* return_target,
- Zone* zone) {
- DCHECK(return_target->IsInlineReturnTarget());
- return_targets_.Add(return_target, zone);
-}
-
-
-std::ostream& HEnterInlined::PrintDataTo(std::ostream& os) const { // NOLINT
- os << function()->debug_name()->ToCString().get();
- if (syntactic_tail_call_mode() == TailCallMode::kAllow) {
- os << ", JSTailCall";
- }
- return os;
-}
-
-
-static bool IsInteger32(double value) {
- if (value >= std::numeric_limits<int32_t>::min() &&
- value <= std::numeric_limits<int32_t>::max()) {
- double roundtrip_value = static_cast<double>(static_cast<int32_t>(value));
- return bit_cast<int64_t>(roundtrip_value) == bit_cast<int64_t>(value);
- }
- return false;
-}
-
-
-HConstant::HConstant(Special special)
- : HTemplateInstruction<0>(HType::TaggedNumber()),
- object_(Handle<Object>::null()),
- object_map_(Handle<Map>::null()),
- bit_field_(HasDoubleValueField::encode(true) |
- InstanceTypeField::encode(kUnknownInstanceType)),
- int32_value_(0) {
- DCHECK_EQ(kHoleNaN, special);
- // Manipulating the signaling NaN used for the hole in C++, e.g. with bit_cast
- // will change its value on ia32 (the x87 stack is used to return values
- // and stores to the stack silently clear the signalling bit).
- // Therefore we have to use memcpy for initializing |double_value_| with
- // kHoleNanInt64 here.
- std::memcpy(&double_value_, &kHoleNanInt64, sizeof(double_value_));
- Initialize(Representation::Double());
-}
-
-
-HConstant::HConstant(Handle<Object> object, Representation r)
- : HTemplateInstruction<0>(HType::FromValue(object)),
- object_(Unique<Object>::CreateUninitialized(object)),
- object_map_(Handle<Map>::null()),
- bit_field_(
- HasStableMapValueField::encode(false) |
- HasSmiValueField::encode(false) | HasInt32ValueField::encode(false) |
- HasDoubleValueField::encode(false) |
- HasExternalReferenceValueField::encode(false) |
- IsNotInNewSpaceField::encode(true) |
- BooleanValueField::encode(object->BooleanValue()) |
- IsUndetectableField::encode(false) | IsCallableField::encode(false) |
- InstanceTypeField::encode(kUnknownInstanceType)) {
- if (object->IsNumber()) {
- double n = object->Number();
- bool has_int32_value = IsInteger32(n);
- bit_field_ = HasInt32ValueField::update(bit_field_, has_int32_value);
- int32_value_ = DoubleToInt32(n);
- bit_field_ = HasSmiValueField::update(
- bit_field_, has_int32_value && Smi::IsValid(int32_value_));
- if (std::isnan(n)) {
- double_value_ = std::numeric_limits<double>::quiet_NaN();
- // Canonicalize object with NaN value.
- DCHECK(object->IsHeapObject()); // NaN can't be a Smi.
- Isolate* isolate = HeapObject::cast(*object)->GetIsolate();
- object = isolate->factory()->nan_value();
- object_ = Unique<Object>::CreateUninitialized(object);
- } else {
- double_value_ = n;
- // Canonicalize object with -0.0 value.
- if (bit_cast<int64_t>(n) == bit_cast<int64_t>(-0.0)) {
- DCHECK(object->IsHeapObject()); // -0.0 can't be a Smi.
- Isolate* isolate = HeapObject::cast(*object)->GetIsolate();
- object = isolate->factory()->minus_zero_value();
- object_ = Unique<Object>::CreateUninitialized(object);
- }
- }
- bit_field_ = HasDoubleValueField::update(bit_field_, true);
- }
- if (object->IsHeapObject()) {
- Handle<HeapObject> heap_object = Handle<HeapObject>::cast(object);
- Isolate* isolate = heap_object->GetIsolate();
- Handle<Map> map(heap_object->map(), isolate);
- bit_field_ = IsNotInNewSpaceField::update(
- bit_field_, !isolate->heap()->InNewSpace(*object));
- bit_field_ = InstanceTypeField::update(bit_field_, map->instance_type());
- bit_field_ =
- IsUndetectableField::update(bit_field_, map->is_undetectable());
- bit_field_ = IsCallableField::update(bit_field_, map->is_callable());
- if (map->is_stable()) object_map_ = Unique<Map>::CreateImmovable(map);
- bit_field_ = HasStableMapValueField::update(
- bit_field_,
- HasMapValue() && Handle<Map>::cast(heap_object)->is_stable());
- }
-
- Initialize(r);
-}
-
-
-HConstant::HConstant(Unique<Object> object, Unique<Map> object_map,
- bool has_stable_map_value, Representation r, HType type,
- bool is_not_in_new_space, bool boolean_value,
- bool is_undetectable, InstanceType instance_type)
- : HTemplateInstruction<0>(type),
- object_(object),
- object_map_(object_map),
- bit_field_(HasStableMapValueField::encode(has_stable_map_value) |
- HasSmiValueField::encode(false) |
- HasInt32ValueField::encode(false) |
- HasDoubleValueField::encode(false) |
- HasExternalReferenceValueField::encode(false) |
- IsNotInNewSpaceField::encode(is_not_in_new_space) |
- BooleanValueField::encode(boolean_value) |
- IsUndetectableField::encode(is_undetectable) |
- InstanceTypeField::encode(instance_type)) {
- DCHECK(!object.handle().is_null());
- DCHECK(!type.IsTaggedNumber() || type.IsNone());
- Initialize(r);
-}
-
-
-HConstant::HConstant(int32_t integer_value, Representation r,
- bool is_not_in_new_space, Unique<Object> object)
- : object_(object),
- object_map_(Handle<Map>::null()),
- bit_field_(HasStableMapValueField::encode(false) |
- HasSmiValueField::encode(Smi::IsValid(integer_value)) |
- HasInt32ValueField::encode(true) |
- HasDoubleValueField::encode(true) |
- HasExternalReferenceValueField::encode(false) |
- IsNotInNewSpaceField::encode(is_not_in_new_space) |
- BooleanValueField::encode(integer_value != 0) |
- IsUndetectableField::encode(false) |
- InstanceTypeField::encode(kUnknownInstanceType)),
- int32_value_(integer_value),
- double_value_(FastI2D(integer_value)) {
- // It's possible to create a constant with a value in Smi-range but stored
- // in a (pre-existing) HeapNumber. See crbug.com/349878.
- bool could_be_heapobject = r.IsTagged() && !object.handle().is_null();
- bool is_smi = HasSmiValue() && !could_be_heapobject;
- set_type(is_smi ? HType::Smi() : HType::TaggedNumber());
- Initialize(r);
-}
-
-HConstant::HConstant(double double_value, Representation r,
- bool is_not_in_new_space, Unique<Object> object)
- : object_(object),
- object_map_(Handle<Map>::null()),
- bit_field_(HasStableMapValueField::encode(false) |
- HasInt32ValueField::encode(IsInteger32(double_value)) |
- HasDoubleValueField::encode(true) |
- HasExternalReferenceValueField::encode(false) |
- IsNotInNewSpaceField::encode(is_not_in_new_space) |
- BooleanValueField::encode(double_value != 0 &&
- !std::isnan(double_value)) |
- IsUndetectableField::encode(false) |
- InstanceTypeField::encode(kUnknownInstanceType)),
- int32_value_(DoubleToInt32(double_value)) {
- bit_field_ = HasSmiValueField::update(
- bit_field_, HasInteger32Value() && Smi::IsValid(int32_value_));
- // It's possible to create a constant with a value in Smi-range but stored
- // in a (pre-existing) HeapNumber. See crbug.com/349878.
- bool could_be_heapobject = r.IsTagged() && !object.handle().is_null();
- bool is_smi = HasSmiValue() && !could_be_heapobject;
- set_type(is_smi ? HType::Smi() : HType::TaggedNumber());
- if (std::isnan(double_value)) {
- double_value_ = std::numeric_limits<double>::quiet_NaN();
- } else {
- double_value_ = double_value;
- }
- Initialize(r);
-}
-
-
-HConstant::HConstant(ExternalReference reference)
- : HTemplateInstruction<0>(HType::Any()),
- object_(Unique<Object>(Handle<Object>::null())),
- object_map_(Handle<Map>::null()),
- bit_field_(
- HasStableMapValueField::encode(false) |
- HasSmiValueField::encode(false) | HasInt32ValueField::encode(false) |
- HasDoubleValueField::encode(false) |
- HasExternalReferenceValueField::encode(true) |
- IsNotInNewSpaceField::encode(true) | BooleanValueField::encode(true) |
- IsUndetectableField::encode(false) |
- InstanceTypeField::encode(kUnknownInstanceType)),
- external_reference_value_(reference) {
- Initialize(Representation::External());
-}
-
-
-void HConstant::Initialize(Representation r) {
- if (r.IsNone()) {
- if (HasSmiValue() && SmiValuesAre31Bits()) {
- r = Representation::Smi();
- } else if (HasInteger32Value()) {
- r = Representation::Integer32();
- } else if (HasDoubleValue()) {
- r = Representation::Double();
- } else if (HasExternalReferenceValue()) {
- r = Representation::External();
- } else {
- Handle<Object> object = object_.handle();
- if (object->IsJSObject()) {
- // Try to eagerly migrate JSObjects that have deprecated maps.
- Handle<JSObject> js_object = Handle<JSObject>::cast(object);
- if (js_object->map()->is_deprecated()) {
- JSObject::TryMigrateInstance(js_object);
- }
- }
- r = Representation::Tagged();
- }
- }
- if (r.IsSmi()) {
- // If we have an existing handle, zap it, because it might be a heap
- // number which we must not re-use when copying this HConstant to
- // Tagged representation later, because having Smi representation now
- // could cause heap object checks not to get emitted.
- object_ = Unique<Object>(Handle<Object>::null());
- }
- if (r.IsSmiOrInteger32() && object_.handle().is_null()) {
- // If it's not a heap object, it can't be in new space.
- bit_field_ = IsNotInNewSpaceField::update(bit_field_, true);
- }
- set_representation(r);
- SetFlag(kUseGVN);
-}
-
-
-bool HConstant::ImmortalImmovable() const {
- if (HasInteger32Value()) {
- return false;
- }
- if (HasDoubleValue()) {
- if (IsSpecialDouble()) {
- return true;
- }
- return false;
- }
- if (HasExternalReferenceValue()) {
- return false;
- }
-
- DCHECK(!object_.handle().is_null());
- Heap* heap = isolate()->heap();
- DCHECK(!object_.IsKnownGlobal(heap->minus_zero_value()));
- DCHECK(!object_.IsKnownGlobal(heap->nan_value()));
- return
-#define IMMORTAL_IMMOVABLE_ROOT(name) \
- object_.IsKnownGlobal(heap->root(Heap::k##name##RootIndex)) ||
- IMMORTAL_IMMOVABLE_ROOT_LIST(IMMORTAL_IMMOVABLE_ROOT)
-#undef IMMORTAL_IMMOVABLE_ROOT
-#define INTERNALIZED_STRING(name, value) \
- object_.IsKnownGlobal(heap->name()) ||
- INTERNALIZED_STRING_LIST(INTERNALIZED_STRING)
-#undef INTERNALIZED_STRING
-#define STRING_TYPE(NAME, size, name, Name) \
- object_.IsKnownGlobal(heap->name##_map()) ||
- STRING_TYPE_LIST(STRING_TYPE)
-#undef STRING_TYPE
- false;
-}
-
-
-bool HConstant::EmitAtUses() {
- DCHECK(IsLinked());
- if (block()->graph()->has_osr() &&
- block()->graph()->IsStandardConstant(this)) {
- return true;
- }
- if (HasNoUses()) return true;
- if (IsCell()) return false;
- if (representation().IsDouble()) return false;
- if (representation().IsExternal()) return false;
- return true;
-}
-
-
-HConstant* HConstant::CopyToRepresentation(Representation r, Zone* zone) const {
- if (r.IsSmi() && !HasSmiValue()) return NULL;
- if (r.IsInteger32() && !HasInteger32Value()) return NULL;
- if (r.IsDouble() && !HasDoubleValue()) return NULL;
- if (r.IsExternal() && !HasExternalReferenceValue()) return NULL;
- if (HasInteger32Value()) {
- return new (zone) HConstant(int32_value_, r, NotInNewSpace(), object_);
- }
- if (HasDoubleValue()) {
- return new (zone) HConstant(double_value_, r, NotInNewSpace(), object_);
- }
- if (HasExternalReferenceValue()) {
- return new(zone) HConstant(external_reference_value_);
- }
- DCHECK(!object_.handle().is_null());
- return new (zone) HConstant(object_, object_map_, HasStableMapValue(), r,
- type_, NotInNewSpace(), BooleanValue(),
- IsUndetectable(), GetInstanceType());
-}
-
-
-Maybe<HConstant*> HConstant::CopyToTruncatedInt32(Zone* zone) {
- HConstant* res = NULL;
- if (HasInteger32Value()) {
- res = new (zone) HConstant(int32_value_, Representation::Integer32(),
- NotInNewSpace(), object_);
- } else if (HasDoubleValue()) {
- res = new (zone)
- HConstant(DoubleToInt32(double_value_), Representation::Integer32(),
- NotInNewSpace(), object_);
- }
- return res != NULL ? Just(res) : Nothing<HConstant*>();
-}
-
-
-Maybe<HConstant*> HConstant::CopyToTruncatedNumber(Isolate* isolate,
- Zone* zone) {
- HConstant* res = NULL;
- Handle<Object> handle = this->handle(isolate);
- if (handle->IsBoolean()) {
- res = handle->BooleanValue() ?
- new(zone) HConstant(1) : new(zone) HConstant(0);
- } else if (handle->IsUndefined(isolate)) {
- res = new (zone) HConstant(std::numeric_limits<double>::quiet_NaN());
- } else if (handle->IsNull(isolate)) {
- res = new(zone) HConstant(0);
- } else if (handle->IsString()) {
- res = new(zone) HConstant(String::ToNumber(Handle<String>::cast(handle)));
- }
- return res != NULL ? Just(res) : Nothing<HConstant*>();
-}
-
-
-std::ostream& HConstant::PrintDataTo(std::ostream& os) const { // NOLINT
- if (HasInteger32Value()) {
- os << int32_value_ << " ";
- } else if (HasDoubleValue()) {
- os << double_value_ << " ";
- } else if (HasExternalReferenceValue()) {
- os << reinterpret_cast<void*>(external_reference_value_.address()) << " ";
- } else {
- // The handle() method is silently and lazily mutating the object.
- Handle<Object> h = const_cast<HConstant*>(this)->handle(isolate());
- os << Brief(*h) << " ";
- if (HasStableMapValue()) os << "[stable-map] ";
- if (HasObjectMap()) os << "[map " << *ObjectMap().handle() << "] ";
- }
- if (!NotInNewSpace()) os << "[new space] ";
- return os;
-}
-
-
-std::ostream& HBinaryOperation::PrintDataTo(std::ostream& os) const { // NOLINT
- os << NameOf(left()) << " " << NameOf(right());
- if (CheckFlag(kCanOverflow)) os << " !";
- if (CheckFlag(kBailoutOnMinusZero)) os << " -0?";
- return os;
-}
-
-
-void HBinaryOperation::InferRepresentation(HInferRepresentationPhase* h_infer) {
- DCHECK(CheckFlag(kFlexibleRepresentation));
- Representation new_rep = RepresentationFromInputs();
- UpdateRepresentation(new_rep, h_infer, "inputs");
-
- if (representation().IsSmi() && HasNonSmiUse()) {
- UpdateRepresentation(
- Representation::Integer32(), h_infer, "use requirements");
- }
-
- if (observed_output_representation_.IsNone()) {
- new_rep = RepresentationFromUses();
- UpdateRepresentation(new_rep, h_infer, "uses");
- } else {
- new_rep = RepresentationFromOutput();
- UpdateRepresentation(new_rep, h_infer, "output");
- }
-}
-
-
-Representation HBinaryOperation::RepresentationFromInputs() {
- // Determine the worst case of observed input representations and
- // the currently assumed output representation.
- Representation rep = representation();
- for (int i = 1; i <= 2; ++i) {
- rep = rep.generalize(observed_input_representation(i));
- }
- // If any of the actual input representation is more general than what we
- // have so far but not Tagged, use that representation instead.
- Representation left_rep = left()->representation();
- Representation right_rep = right()->representation();
- if (!left_rep.IsTagged()) rep = rep.generalize(left_rep);
- if (!right_rep.IsTagged()) rep = rep.generalize(right_rep);
-
- return rep;
-}
-
-
-bool HBinaryOperation::IgnoreObservedOutputRepresentation(
- Representation current_rep) {
- return ((current_rep.IsInteger32() && CheckUsesForFlag(kTruncatingToInt32)) ||
- (current_rep.IsSmi() && CheckUsesForFlag(kTruncatingToSmi))) &&
- // Mul in Integer32 mode would be too precise.
- (!this->IsMul() || HMul::cast(this)->MulMinusOne());
-}
-
-
-Representation HBinaryOperation::RepresentationFromOutput() {
- Representation rep = representation();
- // Consider observed output representation, but ignore it if it's Double,
- // this instruction is not a division, and all its uses are truncating
- // to Integer32.
- if (observed_output_representation_.is_more_general_than(rep) &&
- !IgnoreObservedOutputRepresentation(rep)) {
- return observed_output_representation_;
- }
- return Representation::None();
-}
-
-
-void HBinaryOperation::AssumeRepresentation(Representation r) {
- set_observed_input_representation(1, r);
- set_observed_input_representation(2, r);
- HValue::AssumeRepresentation(r);
-}
-
-
-void HMathMinMax::InferRepresentation(HInferRepresentationPhase* h_infer) {
- DCHECK(CheckFlag(kFlexibleRepresentation));
- Representation new_rep = RepresentationFromInputs();
- UpdateRepresentation(new_rep, h_infer, "inputs");
- // Do not care about uses.
-}
-
-
-Range* HBitwise::InferRange(Zone* zone) {
- if (op() == Token::BIT_XOR) {
- if (left()->HasRange() && right()->HasRange()) {
- // The maximum value has the high bit, and all bits below, set:
- // (1 << high) - 1.
- // If the range can be negative, the minimum int is a negative number with
- // the high bit, and all bits below, unset:
- // -(1 << high).
- // If it cannot be negative, conservatively choose 0 as minimum int.
- int64_t left_upper = left()->range()->upper();
- int64_t left_lower = left()->range()->lower();
- int64_t right_upper = right()->range()->upper();
- int64_t right_lower = right()->range()->lower();
-
- if (left_upper < 0) left_upper = ~left_upper;
- if (left_lower < 0) left_lower = ~left_lower;
- if (right_upper < 0) right_upper = ~right_upper;
- if (right_lower < 0) right_lower = ~right_lower;
-
- int high = MostSignificantBit(
- static_cast<uint32_t>(
- left_upper | left_lower | right_upper | right_lower));
-
- int64_t limit = 1;
- limit <<= high;
- int32_t min = (left()->range()->CanBeNegative() ||
- right()->range()->CanBeNegative())
- ? static_cast<int32_t>(-limit) : 0;
- return new(zone) Range(min, static_cast<int32_t>(limit - 1));
- }
- Range* result = HValue::InferRange(zone);
- result->set_can_be_minus_zero(false);
- return result;
- }
- const int32_t kDefaultMask = static_cast<int32_t>(0xffffffff);
- int32_t left_mask = (left()->range() != NULL)
- ? left()->range()->Mask()
- : kDefaultMask;
- int32_t right_mask = (right()->range() != NULL)
- ? right()->range()->Mask()
- : kDefaultMask;
- int32_t result_mask = (op() == Token::BIT_AND)
- ? left_mask & right_mask
- : left_mask | right_mask;
- if (result_mask >= 0) return new(zone) Range(0, result_mask);
-
- Range* result = HValue::InferRange(zone);
- result->set_can_be_minus_zero(false);
- return result;
-}
-
-
-Range* HSar::InferRange(Zone* zone) {
- if (right()->IsConstant()) {
- HConstant* c = HConstant::cast(right());
- if (c->HasInteger32Value()) {
- Range* result = (left()->range() != NULL)
- ? left()->range()->Copy(zone)
- : new(zone) Range();
- result->Sar(c->Integer32Value());
- return result;
- }
- }
- return HValue::InferRange(zone);
-}
-
-
-Range* HShr::InferRange(Zone* zone) {
- if (right()->IsConstant()) {
- HConstant* c = HConstant::cast(right());
- if (c->HasInteger32Value()) {
- int shift_count = c->Integer32Value() & 0x1f;
- if (left()->range()->CanBeNegative()) {
- // Only compute bounds if the result always fits into an int32.
- return (shift_count >= 1)
- ? new(zone) Range(0,
- static_cast<uint32_t>(0xffffffff) >> shift_count)
- : new(zone) Range();
- } else {
- // For positive inputs we can use the >> operator.
- Range* result = (left()->range() != NULL)
- ? left()->range()->Copy(zone)
- : new(zone) Range();
- result->Sar(c->Integer32Value());
- return result;
- }
- }
- }
- return HValue::InferRange(zone);
-}
-
-
-Range* HShl::InferRange(Zone* zone) {
- if (right()->IsConstant()) {
- HConstant* c = HConstant::cast(right());
- if (c->HasInteger32Value()) {
- Range* result = (left()->range() != NULL)
- ? left()->range()->Copy(zone)
- : new(zone) Range();
- result->Shl(c->Integer32Value());
- return result;
- }
- }
- return HValue::InferRange(zone);
-}
-
-
-Range* HLoadNamedField::InferRange(Zone* zone) {
- if (access().representation().IsInteger8()) {
- return new(zone) Range(kMinInt8, kMaxInt8);
- }
- if (access().representation().IsUInteger8()) {
- return new(zone) Range(kMinUInt8, kMaxUInt8);
- }
- if (access().representation().IsInteger16()) {
- return new(zone) Range(kMinInt16, kMaxInt16);
- }
- if (access().representation().IsUInteger16()) {
- return new(zone) Range(kMinUInt16, kMaxUInt16);
- }
- if (access().IsStringLength()) {
- return new(zone) Range(0, String::kMaxLength);
- }
- return HValue::InferRange(zone);
-}
-
-
-Range* HLoadKeyed::InferRange(Zone* zone) {
- switch (elements_kind()) {
- case INT8_ELEMENTS:
- return new(zone) Range(kMinInt8, kMaxInt8);
- case UINT8_ELEMENTS:
- case UINT8_CLAMPED_ELEMENTS:
- return new(zone) Range(kMinUInt8, kMaxUInt8);
- case INT16_ELEMENTS:
- return new(zone) Range(kMinInt16, kMaxInt16);
- case UINT16_ELEMENTS:
- return new(zone) Range(kMinUInt16, kMaxUInt16);
- default:
- return HValue::InferRange(zone);
- }
-}
-
-
-std::ostream& HCompareGeneric::PrintDataTo(std::ostream& os) const { // NOLINT
- os << Token::Name(token()) << " ";
- return HBinaryOperation::PrintDataTo(os);
-}
-
-
-std::ostream& HStringCompareAndBranch::PrintDataTo(
- std::ostream& os) const { // NOLINT
- os << Token::Name(token()) << " ";
- return HControlInstruction::PrintDataTo(os);
-}
-
-
-std::ostream& HCompareNumericAndBranch::PrintDataTo(
- std::ostream& os) const { // NOLINT
- os << Token::Name(token()) << " " << NameOf(left()) << " " << NameOf(right());
- return HControlInstruction::PrintDataTo(os);
-}
-
-
-std::ostream& HCompareObjectEqAndBranch::PrintDataTo(
- std::ostream& os) const { // NOLINT
- os << NameOf(left()) << " " << NameOf(right());
- return HControlInstruction::PrintDataTo(os);
-}
-
-
-bool HCompareObjectEqAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
- if (known_successor_index() != kNoKnownSuccessorIndex) {
- *block = SuccessorAt(known_successor_index());
- return true;
- }
- if (FLAG_fold_constants && left()->IsConstant() && right()->IsConstant()) {
- *block = HConstant::cast(left())->DataEquals(HConstant::cast(right()))
- ? FirstSuccessor() : SecondSuccessor();
- return true;
- }
- *block = NULL;
- return false;
-}
-
-
-bool HIsStringAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
- if (known_successor_index() != kNoKnownSuccessorIndex) {
- *block = SuccessorAt(known_successor_index());
- return true;
- }
- if (FLAG_fold_constants && value()->IsConstant()) {
- *block = HConstant::cast(value())->HasStringValue()
- ? FirstSuccessor() : SecondSuccessor();
- return true;
- }
- if (value()->type().IsString()) {
- *block = FirstSuccessor();
- return true;
- }
- if (value()->type().IsSmi() ||
- value()->type().IsNull() ||
- value()->type().IsBoolean() ||
- value()->type().IsUndefined() ||
- value()->type().IsJSReceiver()) {
- *block = SecondSuccessor();
- return true;
- }
- *block = NULL;
- return false;
-}
-
-
-bool HIsUndetectableAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
- if (FLAG_fold_constants && value()->IsConstant()) {
- *block = HConstant::cast(value())->IsUndetectable()
- ? FirstSuccessor() : SecondSuccessor();
- return true;
- }
- if (value()->type().IsNull() || value()->type().IsUndefined()) {
- *block = FirstSuccessor();
- return true;
- }
- if (value()->type().IsBoolean() ||
- value()->type().IsSmi() ||
- value()->type().IsString() ||
- value()->type().IsJSReceiver()) {
- *block = SecondSuccessor();
- return true;
- }
- *block = NULL;
- return false;
-}
-
-
-bool HHasInstanceTypeAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
- if (FLAG_fold_constants && value()->IsConstant()) {
- InstanceType type = HConstant::cast(value())->GetInstanceType();
- *block = (from_ <= type) && (type <= to_)
- ? FirstSuccessor() : SecondSuccessor();
- return true;
- }
- *block = NULL;
- return false;
-}
-
-
-void HCompareHoleAndBranch::InferRepresentation(
- HInferRepresentationPhase* h_infer) {
- ChangeRepresentation(value()->representation());
-}
-
-
-bool HCompareNumericAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
- if (left() == right() &&
- left()->representation().IsSmiOrInteger32()) {
- *block = (token() == Token::EQ ||
- token() == Token::EQ_STRICT ||
- token() == Token::LTE ||
- token() == Token::GTE)
- ? FirstSuccessor() : SecondSuccessor();
- return true;
- }
- *block = NULL;
- return false;
-}
-
-
-std::ostream& HGoto::PrintDataTo(std::ostream& os) const { // NOLINT
- return os << *SuccessorAt(0);
-}
-
-
-void HCompareNumericAndBranch::InferRepresentation(
- HInferRepresentationPhase* h_infer) {
- Representation left_rep = left()->representation();
- Representation right_rep = right()->representation();
- Representation observed_left = observed_input_representation(0);
- Representation observed_right = observed_input_representation(1);
-
- Representation rep = Representation::None();
- rep = rep.generalize(observed_left);
- rep = rep.generalize(observed_right);
- if (rep.IsNone() || rep.IsSmiOrInteger32()) {
- if (!left_rep.IsTagged()) rep = rep.generalize(left_rep);
- if (!right_rep.IsTagged()) rep = rep.generalize(right_rep);
- } else {
- rep = Representation::Double();
- }
-
- if (rep.IsDouble()) {
- // According to the ES5 spec (11.9.3, 11.8.5), Equality comparisons (==, ===
- // and !=) have special handling of undefined, e.g. undefined == undefined
- // is 'true'. Relational comparisons have a different semantic, first
- // calling ToPrimitive() on their arguments. The standard Crankshaft
- // tagged-to-double conversion to ensure the HCompareNumericAndBranch's
- // inputs are doubles caused 'undefined' to be converted to NaN. That's
- // compatible out-of-the box with ordered relational comparisons (<, >, <=,
- // >=). However, for equality comparisons (and for 'in' and 'instanceof'),
- // it is not consistent with the spec. For example, it would cause undefined
- // == undefined (should be true) to be evaluated as NaN == NaN
- // (false). Therefore, any comparisons other than ordered relational
- // comparisons must cause a deopt when one of their arguments is undefined.
- // See also v8:1434
- if (Token::IsOrderedRelationalCompareOp(token_)) {
- SetFlag(kTruncatingToNumber);
- }
- }
- ChangeRepresentation(rep);
-}
-
-
-std::ostream& HParameter::PrintDataTo(std::ostream& os) const { // NOLINT
- return os << index();
-}
-
-
-std::ostream& HLoadNamedField::PrintDataTo(std::ostream& os) const { // NOLINT
- os << NameOf(object()) << access_;
-
- if (maps() != NULL) {
- os << " [" << *maps()->at(0).handle();
- for (int i = 1; i < maps()->size(); ++i) {
- os << "," << *maps()->at(i).handle();
- }
- os << "]";
- }
-
- if (HasDependency()) os << " " << NameOf(dependency());
- return os;
-}
-
-
-std::ostream& HLoadKeyed::PrintDataTo(std::ostream& os) const { // NOLINT
- if (!is_fixed_typed_array()) {
- os << NameOf(elements());
- } else {
- DCHECK(elements_kind() >= FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND &&
- elements_kind() <= LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
- os << NameOf(elements()) << "." << ElementsKindToString(elements_kind());
- }
-
- os << "[" << NameOf(key());
- if (IsDehoisted()) os << " + " << base_offset();
- os << "]";
-
- if (HasDependency()) os << " " << NameOf(dependency());
- if (RequiresHoleCheck()) os << " check_hole";
- return os;
-}
-
-
-bool HLoadKeyed::TryIncreaseBaseOffset(uint32_t increase_by_value) {
- // The base offset is usually simply the size of the array header, except
- // with dehoisting adds an addition offset due to a array index key
- // manipulation, in which case it becomes (array header size +
- // constant-offset-from-key * kPointerSize)
- uint32_t base_offset = BaseOffsetField::decode(bit_field_);
- v8::base::internal::CheckedNumeric<uint32_t> addition_result = base_offset;
- addition_result += increase_by_value;
- if (!addition_result.IsValid()) return false;
- base_offset = addition_result.ValueOrDie();
- if (!BaseOffsetField::is_valid(base_offset)) return false;
- bit_field_ = BaseOffsetField::update(bit_field_, base_offset);
- return true;
-}
-
-
-bool HLoadKeyed::UsesMustHandleHole() const {
- if (IsFastPackedElementsKind(elements_kind())) {
- return false;
- }
-
- if (IsFixedTypedArrayElementsKind(elements_kind())) {
- return false;
- }
-
- if (hole_mode() == ALLOW_RETURN_HOLE) {
- if (IsFastDoubleElementsKind(elements_kind())) {
- return AllUsesCanTreatHoleAsNaN();
- }
- return true;
- }
-
- if (IsFastDoubleElementsKind(elements_kind())) {
- return false;
- }
-
- // Holes are only returned as tagged values.
- if (!representation().IsTagged()) {
- return false;
- }
-
- for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
- HValue* use = it.value();
- if (!use->IsChange()) return false;
- }
-
- return true;
-}
-
-
-bool HLoadKeyed::AllUsesCanTreatHoleAsNaN() const {
- return IsFastDoubleElementsKind(elements_kind()) &&
- CheckUsesForFlag(HValue::kTruncatingToNumber);
-}
-
-
-bool HLoadKeyed::RequiresHoleCheck() const {
- if (IsFastPackedElementsKind(elements_kind())) {
- return false;
- }
-
- if (IsFixedTypedArrayElementsKind(elements_kind())) {
- return false;
- }
-
- if (hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
- return false;
- }
-
- return !UsesMustHandleHole();
-}
-
-HValue* HCallWithDescriptor::Canonicalize() {
- if (kind() != Code::KEYED_LOAD_IC) return this;
-
- // Recognize generic keyed loads that use property name generated
- // by for-in statement as a key and rewrite them into fast property load
- // by index.
- typedef LoadWithVectorDescriptor Descriptor;
- HValue* key = parameter(Descriptor::kName);
- if (key->IsLoadKeyed()) {
- HLoadKeyed* key_load = HLoadKeyed::cast(key);
- if (key_load->elements()->IsForInCacheArray()) {
- HForInCacheArray* names_cache =
- HForInCacheArray::cast(key_load->elements());
-
- HValue* object = parameter(Descriptor::kReceiver);
- if (names_cache->enumerable() == object) {
- HForInCacheArray* index_cache =
- names_cache->index_cache();
- HCheckMapValue* map_check = HCheckMapValue::New(
- block()->graph()->isolate(), block()->graph()->zone(),
- block()->graph()->GetInvalidContext(), object, names_cache->map());
- HInstruction* index = HLoadKeyed::New(
- block()->graph()->isolate(), block()->graph()->zone(),
- block()->graph()->GetInvalidContext(), index_cache, key_load->key(),
- key_load->key(), nullptr, key_load->elements_kind());
- map_check->InsertBefore(this);
- index->InsertBefore(this);
- return Prepend(new (block()->zone()) HLoadFieldByIndex(object, index));
- }
- }
- }
- return this;
-}
-
-std::ostream& HStoreNamedField::PrintDataTo(std::ostream& os) const { // NOLINT
- os << NameOf(object()) << access_ << " = " << NameOf(value());
- if (NeedsWriteBarrier()) os << " (write-barrier)";
- if (has_transition()) os << " (transition map " << *transition_map() << ")";
- return os;
-}
-
-
-std::ostream& HStoreKeyed::PrintDataTo(std::ostream& os) const { // NOLINT
- if (!is_fixed_typed_array()) {
- os << NameOf(elements());
- } else {
- DCHECK(elements_kind() >= FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND &&
- elements_kind() <= LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
- os << NameOf(elements()) << "." << ElementsKindToString(elements_kind());
- }
-
- os << "[" << NameOf(key());
- if (IsDehoisted()) os << " + " << base_offset();
- return os << "] = " << NameOf(value());
-}
-
-
-std::ostream& HTransitionElementsKind::PrintDataTo(
- std::ostream& os) const { // NOLINT
- os << NameOf(object());
- ElementsKind from_kind = original_map().handle()->elements_kind();
- ElementsKind to_kind = transitioned_map().handle()->elements_kind();
- os << " " << *original_map().handle() << " ["
- << ElementsAccessor::ForKind(from_kind)->name() << "] -> "
- << *transitioned_map().handle() << " ["
- << ElementsAccessor::ForKind(to_kind)->name() << "]";
- if (IsSimpleMapChangeTransition(from_kind, to_kind)) os << " (simple)";
- return os;
-}
-
-
-std::ostream& HInnerAllocatedObject::PrintDataTo(
- std::ostream& os) const { // NOLINT
- os << NameOf(base_object()) << " offset ";
- return offset()->PrintTo(os);
-}
-
-
-std::ostream& HLoadContextSlot::PrintDataTo(std::ostream& os) const { // NOLINT
- return os << NameOf(value()) << "[" << slot_index() << "]";
-}
-
-
-std::ostream& HStoreContextSlot::PrintDataTo(
- std::ostream& os) const { // NOLINT
- return os << NameOf(context()) << "[" << slot_index()
- << "] = " << NameOf(value());
-}
-
-
-// Implementation of type inference and type conversions. Calculates
-// the inferred type of this instruction based on the input operands.
-
-HType HValue::CalculateInferredType() {
- return type_;
-}
-
-
-HType HPhi::CalculateInferredType() {
- if (OperandCount() == 0) return HType::Tagged();
- HType result = OperandAt(0)->type();
- for (int i = 1; i < OperandCount(); ++i) {
- HType current = OperandAt(i)->type();
- result = result.Combine(current);
- }
- return result;
-}
-
-
-HType HChange::CalculateInferredType() {
- if (from().IsDouble() && to().IsTagged()) return HType::HeapNumber();
- return type();
-}
-
-
-Representation HUnaryMathOperation::RepresentationFromInputs() {
- if (SupportsFlexibleFloorAndRound() &&
- (op_ == kMathFloor || op_ == kMathRound)) {
- // Floor and Round always take a double input. The integral result can be
- // used as an integer or a double. Infer the representation from the uses.
- return Representation::None();
- }
- Representation rep = representation();
- // If any of the actual input representation is more general than what we
- // have so far but not Tagged, use that representation instead.
- Representation input_rep = value()->representation();
- if (!input_rep.IsTagged()) {
- rep = rep.generalize(input_rep);
- }
- return rep;
-}
-
-
-bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
- HValue* dominator) {
- DCHECK(side_effect == kNewSpacePromotion);
- DCHECK(!IsAllocationFolded());
- Zone* zone = block()->zone();
- Isolate* isolate = block()->isolate();
- if (!FLAG_use_allocation_folding) return false;
-
- // Try to fold allocations together with their dominating allocations.
- if (!dominator->IsAllocate()) {
- if (FLAG_trace_allocation_folding) {
- PrintF("#%d (%s) cannot fold into #%d (%s)\n",
- id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
- }
- return false;
- }
-
- // Check whether we are folding within the same block for local folding.
- if (FLAG_use_local_allocation_folding && dominator->block() != block()) {
- if (FLAG_trace_allocation_folding) {
- PrintF("#%d (%s) cannot fold into #%d (%s), crosses basic blocks\n",
- id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
- }
- return false;
- }
-
- HAllocate* dominator_allocate = HAllocate::cast(dominator);
- HValue* dominator_size = dominator_allocate->size();
- HValue* current_size = size();
-
- // TODO(hpayer): Add support for non-constant allocation in dominator.
- if (!current_size->IsInteger32Constant() ||
- !dominator_size->IsInteger32Constant()) {
- if (FLAG_trace_allocation_folding) {
- PrintF("#%d (%s) cannot fold into #%d (%s), "
- "dynamic allocation size in dominator\n",
- id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
- }
- return false;
- }
-
- if (IsAllocationFoldingDominator()) {
- if (FLAG_trace_allocation_folding) {
- PrintF("#%d (%s) cannot fold into #%d (%s), already dominator\n", id(),
- Mnemonic(), dominator->id(), dominator->Mnemonic());
- }
- return false;
- }
-
- if (!IsFoldable(dominator_allocate)) {
- if (FLAG_trace_allocation_folding) {
- PrintF("#%d (%s) cannot fold into #%d (%s), different spaces\n", id(),
- Mnemonic(), dominator->id(), dominator->Mnemonic());
- }
- return false;
- }
-
- DCHECK(
- (IsNewSpaceAllocation() && dominator_allocate->IsNewSpaceAllocation()) ||
- (IsOldSpaceAllocation() && dominator_allocate->IsOldSpaceAllocation()));
-
- // First update the size of the dominator allocate instruction.
- dominator_size = dominator_allocate->size();
- int32_t original_object_size =
- HConstant::cast(dominator_size)->GetInteger32Constant();
- int32_t dominator_size_constant = original_object_size;
-
- if (MustAllocateDoubleAligned()) {
- if ((dominator_size_constant & kDoubleAlignmentMask) != 0) {
- dominator_size_constant += kDoubleSize / 2;
- }
- }
-
- int32_t current_size_max_value = size()->GetInteger32Constant();
- int32_t new_dominator_size = dominator_size_constant + current_size_max_value;
-
- // Since we clear the first word after folded memory, we cannot use the
- // whole kMaxRegularHeapObjectSize memory.
- if (new_dominator_size > kMaxRegularHeapObjectSize - kPointerSize) {
- if (FLAG_trace_allocation_folding) {
- PrintF("#%d (%s) cannot fold into #%d (%s) due to size: %d\n",
- id(), Mnemonic(), dominator_allocate->id(),
- dominator_allocate->Mnemonic(), new_dominator_size);
- }
- return false;
- }
-
- HInstruction* new_dominator_size_value = HConstant::CreateAndInsertBefore(
- isolate, zone, context(), new_dominator_size, Representation::None(),
- dominator_allocate);
-
- dominator_allocate->UpdateSize(new_dominator_size_value);
-
- if (MustAllocateDoubleAligned()) {
- if (!dominator_allocate->MustAllocateDoubleAligned()) {
- dominator_allocate->MakeDoubleAligned();
- }
- }
-
- if (!dominator_allocate->IsAllocationFoldingDominator()) {
- HAllocate* first_alloc =
- HAllocate::New(isolate, zone, dominator_allocate->context(),
- dominator_size, dominator_allocate->type(),
- IsNewSpaceAllocation() ? NOT_TENURED : TENURED,
- JS_OBJECT_TYPE, block()->graph()->GetConstant0());
- first_alloc->InsertAfter(dominator_allocate);
- dominator_allocate->ReplaceAllUsesWith(first_alloc);
- dominator_allocate->MakeAllocationFoldingDominator();
- first_alloc->MakeFoldedAllocation(dominator_allocate);
- if (FLAG_trace_allocation_folding) {
- PrintF("#%d (%s) inserted for dominator #%d (%s)\n", first_alloc->id(),
- first_alloc->Mnemonic(), dominator_allocate->id(),
- dominator_allocate->Mnemonic());
- }
- }
-
- MakeFoldedAllocation(dominator_allocate);
-
- if (FLAG_trace_allocation_folding) {
- PrintF("#%d (%s) folded into #%d (%s), new dominator size: %d\n", id(),
- Mnemonic(), dominator_allocate->id(), dominator_allocate->Mnemonic(),
- new_dominator_size);
- }
- return true;
-}
-
-
-std::ostream& HAllocate::PrintDataTo(std::ostream& os) const { // NOLINT
- os << NameOf(size()) << " (";
- if (IsNewSpaceAllocation()) os << "N";
- if (IsOldSpaceAllocation()) os << "P";
- if (MustAllocateDoubleAligned()) os << "A";
- if (MustPrefillWithFiller()) os << "F";
- if (IsAllocationFoldingDominator()) os << "d";
- if (IsAllocationFolded()) os << "f";
- return os << ")";
-}
-
-
-bool HStoreKeyed::TryIncreaseBaseOffset(uint32_t increase_by_value) {
- // The base offset is usually simply the size of the array header, except
- // with dehoisting adds an addition offset due to a array index key
- // manipulation, in which case it becomes (array header size +
- // constant-offset-from-key * kPointerSize)
- v8::base::internal::CheckedNumeric<uint32_t> addition_result = base_offset_;
- addition_result += increase_by_value;
- if (!addition_result.IsValid()) return false;
- base_offset_ = addition_result.ValueOrDie();
- return true;
-}
-
-
-bool HStoreKeyed::NeedsCanonicalization() {
- switch (value()->opcode()) {
- case kLoadKeyed: {
- ElementsKind load_kind = HLoadKeyed::cast(value())->elements_kind();
- return IsFixedFloatElementsKind(load_kind);
- }
- case kChange: {
- Representation from = HChange::cast(value())->from();
- return from.IsTagged() || from.IsHeapObject();
- }
- case kConstant:
- // Double constants are canonicalized upon construction.
- return false;
- default:
- return !value()->IsBinaryOperation();
- }
-}
-
-
-#define H_CONSTANT_INT(val) \
- HConstant::New(isolate, zone, context, static_cast<int32_t>(val))
-#define H_CONSTANT_DOUBLE(val) \
- HConstant::New(isolate, zone, context, static_cast<double>(val))
-
-#define DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HInstr, op) \
- HInstruction* HInstr::New(Isolate* isolate, Zone* zone, HValue* context, \
- HValue* left, HValue* right) { \
- if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { \
- HConstant* c_left = HConstant::cast(left); \
- HConstant* c_right = HConstant::cast(right); \
- if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { \
- double double_res = c_left->DoubleValue() op c_right->DoubleValue(); \
- if (IsInt32Double(double_res)) { \
- return H_CONSTANT_INT(double_res); \
- } \
- return H_CONSTANT_DOUBLE(double_res); \
- } \
- } \
- return new (zone) HInstr(context, left, right); \
- }
-
-DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HAdd, +)
-DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HMul, *)
-DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HSub, -)
-
-#undef DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR
-
-
-HInstruction* HStringAdd::New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right,
- PretenureFlag pretenure_flag,
- StringAddFlags flags,
- Handle<AllocationSite> allocation_site) {
- if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
- HConstant* c_right = HConstant::cast(right);
- HConstant* c_left = HConstant::cast(left);
- if (c_left->HasStringValue() && c_right->HasStringValue()) {
- Handle<String> left_string = c_left->StringValue();
- Handle<String> right_string = c_right->StringValue();
- // Prevent possible exception by invalid string length.
- if (left_string->length() + right_string->length() < String::kMaxLength) {
- MaybeHandle<String> concat = isolate->factory()->NewConsString(
- c_left->StringValue(), c_right->StringValue());
- return HConstant::New(isolate, zone, context, concat.ToHandleChecked());
- }
- }
- }
- return new (zone)
- HStringAdd(context, left, right, pretenure_flag, flags, allocation_site);
-}
-
-
-std::ostream& HStringAdd::PrintDataTo(std::ostream& os) const { // NOLINT
- if ((flags() & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
- os << "_CheckBoth";
- } else if ((flags() & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_LEFT) {
- os << "_CheckLeft";
- } else if ((flags() & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_RIGHT) {
- os << "_CheckRight";
- }
- HBinaryOperation::PrintDataTo(os);
- os << " (";
- if (pretenure_flag() == NOT_TENURED)
- os << "N";
- else if (pretenure_flag() == TENURED)
- os << "D";
- return os << ")";
-}
-
-
-HInstruction* HStringCharFromCode::New(Isolate* isolate, Zone* zone,
- HValue* context, HValue* char_code) {
- if (FLAG_fold_constants && char_code->IsConstant()) {
- HConstant* c_code = HConstant::cast(char_code);
- if (c_code->HasNumberValue()) {
- if (std::isfinite(c_code->DoubleValue())) {
- uint32_t code = c_code->NumberValueAsInteger32() & 0xffff;
- return HConstant::New(
- isolate, zone, context,
- isolate->factory()->LookupSingleCharacterStringFromCode(code));
- }
- return HConstant::New(isolate, zone, context,
- isolate->factory()->empty_string());
- }
- }
- return new(zone) HStringCharFromCode(context, char_code);
-}
-
-
-HInstruction* HUnaryMathOperation::New(Isolate* isolate, Zone* zone,
- HValue* context, HValue* value,
- BuiltinFunctionId op) {
- do {
- if (!FLAG_fold_constants) break;
- if (!value->IsConstant()) break;
- HConstant* constant = HConstant::cast(value);
- if (!constant->HasNumberValue()) break;
- double d = constant->DoubleValue();
- if (std::isnan(d)) { // NaN poisons everything.
- return H_CONSTANT_DOUBLE(std::numeric_limits<double>::quiet_NaN());
- }
- if (std::isinf(d)) { // +Infinity and -Infinity.
- switch (op) {
- case kMathCos:
- case kMathSin:
- return H_CONSTANT_DOUBLE(std::numeric_limits<double>::quiet_NaN());
- case kMathExp:
- return H_CONSTANT_DOUBLE((d > 0.0) ? d : 0.0);
- case kMathLog:
- case kMathSqrt:
- return H_CONSTANT_DOUBLE(
- (d > 0.0) ? d : std::numeric_limits<double>::quiet_NaN());
- case kMathPowHalf:
- case kMathAbs:
- return H_CONSTANT_DOUBLE((d > 0.0) ? d : -d);
- case kMathRound:
- case kMathFround:
- case kMathFloor:
- return H_CONSTANT_DOUBLE(d);
- case kMathClz32:
- return H_CONSTANT_INT(32);
- default:
- UNREACHABLE();
- break;
- }
- }
- switch (op) {
- case kMathCos:
- return H_CONSTANT_DOUBLE(base::ieee754::cos(d));
- case kMathExp:
- return H_CONSTANT_DOUBLE(base::ieee754::exp(d));
- case kMathLog:
- return H_CONSTANT_DOUBLE(base::ieee754::log(d));
- case kMathSin:
- return H_CONSTANT_DOUBLE(base::ieee754::sin(d));
- case kMathSqrt:
- lazily_initialize_fast_sqrt(isolate);
- return H_CONSTANT_DOUBLE(fast_sqrt(d, isolate));
- case kMathPowHalf:
- return H_CONSTANT_DOUBLE(power_double_double(d, 0.5));
- case kMathAbs:
- return H_CONSTANT_DOUBLE((d >= 0.0) ? d + 0.0 : -d);
- case kMathRound:
- // -0.5 .. -0.0 round to -0.0.
- if ((d >= -0.5 && Double(d).Sign() < 0)) return H_CONSTANT_DOUBLE(-0.0);
- // Doubles are represented as Significant * 2 ^ Exponent. If the
- // Exponent is not negative, the double value is already an integer.
- if (Double(d).Exponent() >= 0) return H_CONSTANT_DOUBLE(d);
- return H_CONSTANT_DOUBLE(Floor(d + 0.5));
- case kMathFround:
- return H_CONSTANT_DOUBLE(static_cast<double>(static_cast<float>(d)));
- case kMathFloor:
- return H_CONSTANT_DOUBLE(Floor(d));
- case kMathClz32: {
- uint32_t i = DoubleToUint32(d);
- return H_CONSTANT_INT(base::bits::CountLeadingZeros32(i));
- }
- default:
- UNREACHABLE();
- break;
- }
- } while (false);
- return new(zone) HUnaryMathOperation(context, value, op);
-}
-
-
-Representation HUnaryMathOperation::RepresentationFromUses() {
- if (op_ != kMathFloor && op_ != kMathRound) {
- return HValue::RepresentationFromUses();
- }
-
- // The instruction can have an int32 or double output. Prefer a double
- // representation if there are double uses.
- bool use_double = false;
-
- for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
- HValue* use = it.value();
- int use_index = it.index();
- Representation rep_observed = use->observed_input_representation(use_index);
- Representation rep_required = use->RequiredInputRepresentation(use_index);
- use_double |= (rep_observed.IsDouble() || rep_required.IsDouble());
- if (use_double && !FLAG_trace_representation) {
- // Having seen one double is enough.
- break;
- }
- if (FLAG_trace_representation) {
- if (!rep_required.IsDouble() || rep_observed.IsDouble()) {
- PrintF("#%d %s is used by #%d %s as %s%s\n",
- id(), Mnemonic(), use->id(),
- use->Mnemonic(), rep_observed.Mnemonic(),
- (use->CheckFlag(kTruncatingToInt32) ? "-trunc" : ""));
- } else {
- PrintF("#%d %s is required by #%d %s as %s%s\n",
- id(), Mnemonic(), use->id(),
- use->Mnemonic(), rep_required.Mnemonic(),
- (use->CheckFlag(kTruncatingToInt32) ? "-trunc" : ""));
- }
- }
- }
- return use_double ? Representation::Double() : Representation::Integer32();
-}
-
-
-HInstruction* HPower::New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right) {
- if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
- HConstant* c_left = HConstant::cast(left);
- HConstant* c_right = HConstant::cast(right);
- if (c_left->HasNumberValue() && c_right->HasNumberValue()) {
- double result =
- power_helper(isolate, c_left->DoubleValue(), c_right->DoubleValue());
- return H_CONSTANT_DOUBLE(std::isnan(result)
- ? std::numeric_limits<double>::quiet_NaN()
- : result);
- }
- }
- return new(zone) HPower(left, right);
-}
-
-
-HInstruction* HMathMinMax::New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right, Operation op) {
- if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
- HConstant* c_left = HConstant::cast(left);
- HConstant* c_right = HConstant::cast(right);
- if (c_left->HasNumberValue() && c_right->HasNumberValue()) {
- double d_left = c_left->DoubleValue();
- double d_right = c_right->DoubleValue();
- if (op == kMathMin) {
- if (d_left > d_right) return H_CONSTANT_DOUBLE(d_right);
- if (d_left < d_right) return H_CONSTANT_DOUBLE(d_left);
- if (d_left == d_right) {
- // Handle +0 and -0.
- return H_CONSTANT_DOUBLE((Double(d_left).Sign() == -1) ? d_left
- : d_right);
- }
- } else {
- if (d_left < d_right) return H_CONSTANT_DOUBLE(d_right);
- if (d_left > d_right) return H_CONSTANT_DOUBLE(d_left);
- if (d_left == d_right) {
- // Handle +0 and -0.
- return H_CONSTANT_DOUBLE((Double(d_left).Sign() == -1) ? d_right
- : d_left);
- }
- }
- // All comparisons failed, must be NaN.
- return H_CONSTANT_DOUBLE(std::numeric_limits<double>::quiet_NaN());
- }
- }
- return new(zone) HMathMinMax(context, left, right, op);
-}
-
-HInstruction* HMod::New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right) {
- if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
- HConstant* c_left = HConstant::cast(left);
- HConstant* c_right = HConstant::cast(right);
- if (c_left->HasInteger32Value() && c_right->HasInteger32Value()) {
- int32_t dividend = c_left->Integer32Value();
- int32_t divisor = c_right->Integer32Value();
- if (dividend == kMinInt && divisor == -1) {
- return H_CONSTANT_DOUBLE(-0.0);
- }
- if (divisor != 0) {
- int32_t res = dividend % divisor;
- if ((res == 0) && (dividend < 0)) {
- return H_CONSTANT_DOUBLE(-0.0);
- }
- return H_CONSTANT_INT(res);
- }
- }
- }
- return new (zone) HMod(context, left, right);
-}
-
-HInstruction* HDiv::New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right) {
- // If left and right are constant values, try to return a constant value.
- if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
- HConstant* c_left = HConstant::cast(left);
- HConstant* c_right = HConstant::cast(right);
- if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
- if (std::isnan(c_left->DoubleValue()) ||
- std::isnan(c_right->DoubleValue())) {
- return H_CONSTANT_DOUBLE(std::numeric_limits<double>::quiet_NaN());
- } else if (c_right->DoubleValue() != 0) {
- double double_res = c_left->DoubleValue() / c_right->DoubleValue();
- if (IsInt32Double(double_res)) {
- return H_CONSTANT_INT(double_res);
- }
- return H_CONSTANT_DOUBLE(double_res);
- } else if (c_left->DoubleValue() != 0) {
- int sign = Double(c_left->DoubleValue()).Sign() *
- Double(c_right->DoubleValue()).Sign(); // Right could be -0.
- return H_CONSTANT_DOUBLE(sign * V8_INFINITY);
- } else {
- return H_CONSTANT_DOUBLE(std::numeric_limits<double>::quiet_NaN());
- }
- }
- }
- return new (zone) HDiv(context, left, right);
-}
-
-HInstruction* HBitwise::New(Isolate* isolate, Zone* zone, HValue* context,
- Token::Value op, HValue* left, HValue* right) {
- if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
- HConstant* c_left = HConstant::cast(left);
- HConstant* c_right = HConstant::cast(right);
- if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
- int32_t result;
- int32_t v_left = c_left->NumberValueAsInteger32();
- int32_t v_right = c_right->NumberValueAsInteger32();
- switch (op) {
- case Token::BIT_XOR:
- result = v_left ^ v_right;
- break;
- case Token::BIT_AND:
- result = v_left & v_right;
- break;
- case Token::BIT_OR:
- result = v_left | v_right;
- break;
- default:
- result = 0; // Please the compiler.
- UNREACHABLE();
- }
- return H_CONSTANT_INT(result);
- }
- }
- return new (zone) HBitwise(context, op, left, right);
-}
-
-#define DEFINE_NEW_H_BITWISE_INSTR(HInstr, result) \
- HInstruction* HInstr::New(Isolate* isolate, Zone* zone, HValue* context, \
- HValue* left, HValue* right) { \
- if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { \
- HConstant* c_left = HConstant::cast(left); \
- HConstant* c_right = HConstant::cast(right); \
- if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { \
- return H_CONSTANT_INT(result); \
- } \
- } \
- return new (zone) HInstr(context, left, right); \
- }
-
-DEFINE_NEW_H_BITWISE_INSTR(HSar,
-c_left->NumberValueAsInteger32() >> (c_right->NumberValueAsInteger32() & 0x1f))
-DEFINE_NEW_H_BITWISE_INSTR(HShl,
-c_left->NumberValueAsInteger32() << (c_right->NumberValueAsInteger32() & 0x1f))
-
-#undef DEFINE_NEW_H_BITWISE_INSTR
-
-HInstruction* HShr::New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right) {
- if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
- HConstant* c_left = HConstant::cast(left);
- HConstant* c_right = HConstant::cast(right);
- if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
- int32_t left_val = c_left->NumberValueAsInteger32();
- int32_t right_val = c_right->NumberValueAsInteger32() & 0x1f;
- if ((right_val == 0) && (left_val < 0)) {
- return H_CONSTANT_DOUBLE(static_cast<uint32_t>(left_val));
- }
- return H_CONSTANT_INT(static_cast<uint32_t>(left_val) >> right_val);
- }
- }
- return new (zone) HShr(context, left, right);
-}
-
-
-HInstruction* HSeqStringGetChar::New(Isolate* isolate, Zone* zone,
- HValue* context, String::Encoding encoding,
- HValue* string, HValue* index) {
- if (FLAG_fold_constants && string->IsConstant() && index->IsConstant()) {
- HConstant* c_string = HConstant::cast(string);
- HConstant* c_index = HConstant::cast(index);
- if (c_string->HasStringValue() && c_index->HasInteger32Value()) {
- Handle<String> s = c_string->StringValue();
- int32_t i = c_index->Integer32Value();
- DCHECK_LE(0, i);
- DCHECK_LT(i, s->length());
- return H_CONSTANT_INT(s->Get(i));
- }
- }
- return new(zone) HSeqStringGetChar(encoding, string, index);
-}
-
-
-#undef H_CONSTANT_INT
-#undef H_CONSTANT_DOUBLE
-
-
-std::ostream& HBitwise::PrintDataTo(std::ostream& os) const { // NOLINT
- os << Token::Name(op_) << " ";
- return HBitwiseBinaryOperation::PrintDataTo(os);
-}
-
-
-void HPhi::SimplifyConstantInputs() {
- // Convert constant inputs to integers when all uses are truncating.
- // This must happen before representation inference takes place.
- if (!CheckUsesForFlag(kTruncatingToInt32)) return;
- for (int i = 0; i < OperandCount(); ++i) {
- if (!OperandAt(i)->IsConstant()) return;
- }
- HGraph* graph = block()->graph();
- for (int i = 0; i < OperandCount(); ++i) {
- HConstant* operand = HConstant::cast(OperandAt(i));
- if (operand->HasInteger32Value()) {
- continue;
- } else if (operand->HasDoubleValue()) {
- HConstant* integer_input = HConstant::New(
- graph->isolate(), graph->zone(), graph->GetInvalidContext(),
- DoubleToInt32(operand->DoubleValue()));
- integer_input->InsertAfter(operand);
- SetOperandAt(i, integer_input);
- } else if (operand->HasBooleanValue()) {
- SetOperandAt(i, operand->BooleanValue() ? graph->GetConstant1()
- : graph->GetConstant0());
- } else if (operand->ImmortalImmovable()) {
- SetOperandAt(i, graph->GetConstant0());
- }
- }
- // Overwrite observed input representations because they are likely Tagged.
- for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
- HValue* use = it.value();
- if (use->IsBinaryOperation()) {
- HBinaryOperation::cast(use)->set_observed_input_representation(
- it.index(), Representation::Smi());
- }
- }
-}
-
-
-void HPhi::InferRepresentation(HInferRepresentationPhase* h_infer) {
- DCHECK(CheckFlag(kFlexibleRepresentation));
- Representation new_rep = RepresentationFromUses();
- UpdateRepresentation(new_rep, h_infer, "uses");
- new_rep = RepresentationFromInputs();
- UpdateRepresentation(new_rep, h_infer, "inputs");
- new_rep = RepresentationFromUseRequirements();
- UpdateRepresentation(new_rep, h_infer, "use requirements");
-}
-
-
-Representation HPhi::RepresentationFromInputs() {
- Representation r = representation();
- for (int i = 0; i < OperandCount(); ++i) {
- // Ignore conservative Tagged assumption of parameters if we have
- // reason to believe that it's too conservative.
- if (has_type_feedback_from_uses() && OperandAt(i)->IsParameter()) {
- continue;
- }
-
- r = r.generalize(OperandAt(i)->KnownOptimalRepresentation());
- }
- return r;
-}
-
-
-// Returns a representation if all uses agree on the same representation.
-// Integer32 is also returned when some uses are Smi but others are Integer32.
-Representation HValue::RepresentationFromUseRequirements() {
- Representation rep = Representation::None();
- for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
- // Ignore the use requirement from never run code
- if (it.value()->block()->IsUnreachable()) continue;
-
- // We check for observed_input_representation elsewhere.
- Representation use_rep =
- it.value()->RequiredInputRepresentation(it.index());
- if (rep.IsNone()) {
- rep = use_rep;
- continue;
- }
- if (use_rep.IsNone() || rep.Equals(use_rep)) continue;
- if (rep.generalize(use_rep).IsInteger32()) {
- rep = Representation::Integer32();
- continue;
- }
- return Representation::None();
- }
- return rep;
-}
-
-
-bool HValue::HasNonSmiUse() {
- for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
- // We check for observed_input_representation elsewhere.
- Representation use_rep =
- it.value()->RequiredInputRepresentation(it.index());
- if (!use_rep.IsNone() &&
- !use_rep.IsSmi() &&
- !use_rep.IsTagged()) {
- return true;
- }
- }
- return false;
-}
-
-
-// Node-specific verification code is only included in debug mode.
-#ifdef DEBUG
-
-void HPhi::Verify() {
- DCHECK(OperandCount() == block()->predecessors()->length());
- for (int i = 0; i < OperandCount(); ++i) {
- HValue* value = OperandAt(i);
- HBasicBlock* defining_block = value->block();
- HBasicBlock* predecessor_block = block()->predecessors()->at(i);
- DCHECK(defining_block == predecessor_block ||
- defining_block->Dominates(predecessor_block));
- }
-}
-
-
-void HSimulate::Verify() {
- HInstruction::Verify();
- DCHECK(HasAstId() || next()->IsEnterInlined());
-}
-
-
-void HCheckHeapObject::Verify() {
- HInstruction::Verify();
- DCHECK(HasNoUses());
-}
-
-
-void HCheckValue::Verify() {
- HInstruction::Verify();
- DCHECK(HasNoUses());
-}
-
-#endif
-
-
-HObjectAccess HObjectAccess::ForFixedArrayHeader(int offset) {
- DCHECK(offset >= 0);
- DCHECK(offset < FixedArray::kHeaderSize);
- if (offset == FixedArray::kLengthOffset) return ForFixedArrayLength();
- return HObjectAccess(kInobject, offset);
-}
-
-
-HObjectAccess HObjectAccess::ForMapAndOffset(Handle<Map> map, int offset,
- Representation representation) {
- DCHECK(offset >= 0);
- Portion portion = kInobject;
-
- if (offset == JSObject::kElementsOffset) {
- portion = kElementsPointer;
- } else if (offset == JSObject::kMapOffset) {
- portion = kMaps;
- }
- bool existing_inobject_property = true;
- if (!map.is_null()) {
- existing_inobject_property = (offset <
- map->instance_size() - map->unused_property_fields() * kPointerSize);
- }
- return HObjectAccess(portion, offset, representation, Handle<String>::null(),
- false, existing_inobject_property);
-}
-
-
-HObjectAccess HObjectAccess::ForAllocationSiteOffset(int offset) {
- switch (offset) {
- case AllocationSite::kTransitionInfoOffset:
- return HObjectAccess(kInobject, offset, Representation::Tagged());
- case AllocationSite::kNestedSiteOffset:
- return HObjectAccess(kInobject, offset, Representation::Tagged());
- case AllocationSite::kPretenureDataOffset:
- return HObjectAccess(kInobject, offset, Representation::Smi());
- case AllocationSite::kPretenureCreateCountOffset:
- return HObjectAccess(kInobject, offset, Representation::Smi());
- case AllocationSite::kDependentCodeOffset:
- return HObjectAccess(kInobject, offset, Representation::Tagged());
- case AllocationSite::kWeakNextOffset:
- return HObjectAccess(kInobject, offset, Representation::Tagged());
- default:
- UNREACHABLE();
- }
- return HObjectAccess(kInobject, offset);
-}
-
-
-HObjectAccess HObjectAccess::ForContextSlot(int index) {
- DCHECK(index >= 0);
- Portion portion = kInobject;
- int offset = Context::kHeaderSize + index * kPointerSize;
- DCHECK_EQ(offset, Context::SlotOffset(index) + kHeapObjectTag);
- return HObjectAccess(portion, offset, Representation::Tagged());
-}
-
-
-HObjectAccess HObjectAccess::ForScriptContext(int index) {
- DCHECK(index >= 0);
- Portion portion = kInobject;
- int offset = ScriptContextTable::GetContextOffset(index);
- return HObjectAccess(portion, offset, Representation::Tagged());
-}
-
-
-HObjectAccess HObjectAccess::ForJSArrayOffset(int offset) {
- DCHECK(offset >= 0);
- Portion portion = kInobject;
-
- if (offset == JSObject::kElementsOffset) {
- portion = kElementsPointer;
- } else if (offset == JSArray::kLengthOffset) {
- portion = kArrayLengths;
- } else if (offset == JSObject::kMapOffset) {
- portion = kMaps;
- }
- return HObjectAccess(portion, offset);
-}
-
-
-HObjectAccess HObjectAccess::ForBackingStoreOffset(int offset,
- Representation representation) {
- DCHECK(offset >= 0);
- return HObjectAccess(kBackingStore, offset, representation,
- Handle<String>::null(), false, false);
-}
-
-
-HObjectAccess HObjectAccess::ForField(Handle<Map> map, int index,
- Representation representation,
- Handle<Name> name) {
- if (index < 0) {
- // Negative property indices are in-object properties, indexed
- // from the end of the fixed part of the object.
- int offset = (index * kPointerSize) + map->instance_size();
- return HObjectAccess(kInobject, offset, representation, name, false, true);
- } else {
- // Non-negative property indices are in the properties array.
- int offset = (index * kPointerSize) + FixedArray::kHeaderSize;
- return HObjectAccess(kBackingStore, offset, representation, name,
- false, false);
- }
-}
-
-
-void HObjectAccess::SetGVNFlags(HValue *instr, PropertyAccessType access_type) {
- // set the appropriate GVN flags for a given load or store instruction
- if (access_type == STORE) {
- // track dominating allocations in order to eliminate write barriers
- instr->SetDependsOnFlag(::v8::internal::kNewSpacePromotion);
- instr->SetFlag(HValue::kTrackSideEffectDominators);
- } else {
- // try to GVN loads, but don't hoist above map changes
- instr->SetFlag(HValue::kUseGVN);
- instr->SetDependsOnFlag(::v8::internal::kMaps);
- }
-
- switch (portion()) {
- case kArrayLengths:
- if (access_type == STORE) {
- instr->SetChangesFlag(::v8::internal::kArrayLengths);
- } else {
- instr->SetDependsOnFlag(::v8::internal::kArrayLengths);
- }
- break;
- case kStringLengths:
- if (access_type == STORE) {
- instr->SetChangesFlag(::v8::internal::kStringLengths);
- } else {
- instr->SetDependsOnFlag(::v8::internal::kStringLengths);
- }
- break;
- case kInobject:
- if (access_type == STORE) {
- instr->SetChangesFlag(::v8::internal::kInobjectFields);
- } else {
- instr->SetDependsOnFlag(::v8::internal::kInobjectFields);
- }
- break;
- case kDouble:
- if (access_type == STORE) {
- instr->SetChangesFlag(::v8::internal::kDoubleFields);
- } else {
- instr->SetDependsOnFlag(::v8::internal::kDoubleFields);
- }
- break;
- case kBackingStore:
- if (access_type == STORE) {
- instr->SetChangesFlag(::v8::internal::kBackingStoreFields);
- } else {
- instr->SetDependsOnFlag(::v8::internal::kBackingStoreFields);
- }
- break;
- case kElementsPointer:
- if (access_type == STORE) {
- instr->SetChangesFlag(::v8::internal::kElementsPointer);
- } else {
- instr->SetDependsOnFlag(::v8::internal::kElementsPointer);
- }
- break;
- case kMaps:
- if (access_type == STORE) {
- instr->SetChangesFlag(::v8::internal::kMaps);
- } else {
- instr->SetDependsOnFlag(::v8::internal::kMaps);
- }
- break;
- case kExternalMemory:
- if (access_type == STORE) {
- instr->SetChangesFlag(::v8::internal::kExternalMemory);
- } else {
- instr->SetDependsOnFlag(::v8::internal::kExternalMemory);
- }
- break;
- }
-}
-
-
-std::ostream& operator<<(std::ostream& os, const HObjectAccess& access) {
- os << ".";
-
- switch (access.portion()) {
- case HObjectAccess::kArrayLengths:
- case HObjectAccess::kStringLengths:
- os << "%length";
- break;
- case HObjectAccess::kElementsPointer:
- os << "%elements";
- break;
- case HObjectAccess::kMaps:
- os << "%map";
- break;
- case HObjectAccess::kDouble: // fall through
- case HObjectAccess::kInobject:
- if (!access.name().is_null() && access.name()->IsString()) {
- os << Handle<String>::cast(access.name())->ToCString().get();
- }
- os << "[in-object]";
- break;
- case HObjectAccess::kBackingStore:
- if (!access.name().is_null() && access.name()->IsString()) {
- os << Handle<String>::cast(access.name())->ToCString().get();
- }
- os << "[backing-store]";
- break;
- case HObjectAccess::kExternalMemory:
- os << "[external-memory]";
- break;
- }
-
- return os << "@" << access.offset();
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/hydrogen-instructions.h b/deps/v8/src/crankshaft/hydrogen-instructions.h
deleted file mode 100644
index 8874f9aabd..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-instructions.h
+++ /dev/null
@@ -1,6751 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_HYDROGEN_INSTRUCTIONS_H_
-#define V8_CRANKSHAFT_HYDROGEN_INSTRUCTIONS_H_
-
-#include <cstring>
-#include <iosfwd>
-
-#include "src/allocation.h"
-#include "src/ast/ast.h"
-#include "src/base/bits.h"
-#include "src/bit-vector.h"
-#include "src/conversions.h"
-#include "src/crankshaft/hydrogen-types.h"
-#include "src/crankshaft/unique.h"
-#include "src/deoptimizer.h"
-#include "src/globals.h"
-#include "src/interface-descriptors.h"
-#include "src/small-pointer-list.h"
-#include "src/utils.h"
-#include "src/zone/zone.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-struct ChangesOf;
-class HBasicBlock;
-class HDiv;
-class HEnvironment;
-class HInferRepresentationPhase;
-class HInstruction;
-class HLoopInformation;
-class HStoreNamedField;
-class HValue;
-class LInstruction;
-class LChunkBuilder;
-class SmallMapList;
-
-#define HYDROGEN_ABSTRACT_INSTRUCTION_LIST(V) \
- V(ArithmeticBinaryOperation) \
- V(BinaryOperation) \
- V(BitwiseBinaryOperation) \
- V(ControlInstruction) \
- V(Instruction)
-
-#define HYDROGEN_CONCRETE_INSTRUCTION_LIST(V) \
- V(AbnormalExit) \
- V(AccessArgumentsAt) \
- V(Add) \
- V(Allocate) \
- V(ApplyArguments) \
- V(ArgumentsElements) \
- V(ArgumentsLength) \
- V(ArgumentsObject) \
- V(Bitwise) \
- V(BlockEntry) \
- V(BoundsCheck) \
- V(Branch) \
- V(CallWithDescriptor) \
- V(CallNewArray) \
- V(CallRuntime) \
- V(CapturedObject) \
- V(Change) \
- V(CheckArrayBufferNotNeutered) \
- V(CheckHeapObject) \
- V(CheckInstanceType) \
- V(CheckMaps) \
- V(CheckMapValue) \
- V(CheckSmi) \
- V(CheckValue) \
- V(ClampToUint8) \
- V(ClassOfTestAndBranch) \
- V(CompareNumericAndBranch) \
- V(CompareHoleAndBranch) \
- V(CompareGeneric) \
- V(CompareObjectEqAndBranch) \
- V(CompareMap) \
- V(Constant) \
- V(Context) \
- V(DebugBreak) \
- V(DeclareGlobals) \
- V(Deoptimize) \
- V(Div) \
- V(DummyUse) \
- V(EnterInlined) \
- V(EnvironmentMarker) \
- V(ForceRepresentation) \
- V(ForInCacheArray) \
- V(ForInPrepareMap) \
- V(Goto) \
- V(HasInstanceTypeAndBranch) \
- V(InnerAllocatedObject) \
- V(InvokeFunction) \
- V(HasInPrototypeChainAndBranch) \
- V(IsStringAndBranch) \
- V(IsSmiAndBranch) \
- V(IsUndetectableAndBranch) \
- V(LeaveInlined) \
- V(LoadContextSlot) \
- V(LoadFieldByIndex) \
- V(LoadFunctionPrototype) \
- V(LoadKeyed) \
- V(LoadNamedField) \
- V(LoadRoot) \
- V(MathFloorOfDiv) \
- V(MathMinMax) \
- V(MaybeGrowElements) \
- V(Mod) \
- V(Mul) \
- V(OsrEntry) \
- V(Parameter) \
- V(Power) \
- V(Prologue) \
- V(PushArguments) \
- V(Return) \
- V(Ror) \
- V(Sar) \
- V(SeqStringGetChar) \
- V(SeqStringSetChar) \
- V(Shl) \
- V(Shr) \
- V(Simulate) \
- V(StackCheck) \
- V(StoreCodeEntry) \
- V(StoreContextSlot) \
- V(StoreKeyed) \
- V(StoreNamedField) \
- V(StringAdd) \
- V(StringCharCodeAt) \
- V(StringCharFromCode) \
- V(StringCompareAndBranch) \
- V(Sub) \
- V(ThisFunction) \
- V(TransitionElementsKind) \
- V(TrapAllocationMemento) \
- V(Typeof) \
- V(TypeofIsAndBranch) \
- V(UnaryMathOperation) \
- V(UnknownOSRValue) \
- V(UseConst) \
- V(WrapReceiver)
-
-#define GVN_TRACKED_FLAG_LIST(V) \
- V(NewSpacePromotion)
-
-#define GVN_UNTRACKED_FLAG_LIST(V) \
- V(ArrayElements) \
- V(ArrayLengths) \
- V(StringLengths) \
- V(BackingStoreFields) \
- V(Calls) \
- V(ContextSlots) \
- V(DoubleArrayElements) \
- V(DoubleFields) \
- V(ElementsKind) \
- V(ElementsPointer) \
- V(GlobalVars) \
- V(InobjectFields) \
- V(Maps) \
- V(OsrEntries) \
- V(ExternalMemory) \
- V(StringChars) \
- V(TypedArrayElements)
-
-
-#define DECLARE_ABSTRACT_INSTRUCTION(type) \
- bool Is##type() const final { return true; } \
- static H##type* cast(HValue* value) { \
- DCHECK(value->Is##type()); \
- return reinterpret_cast<H##type*>(value); \
- }
-
-
-#define DECLARE_CONCRETE_INSTRUCTION(type) \
- LInstruction* CompileToLithium(LChunkBuilder* builder) final; \
- static H##type* cast(HValue* value) { \
- DCHECK(value->Is##type()); \
- return reinterpret_cast<H##type*>(value); \
- } \
- Opcode opcode() const final { return HValue::k##type; }
-
-
-enum PropertyAccessType { LOAD, STORE };
-
-Representation RepresentationFromMachineType(MachineType type);
-
-class Range final : public ZoneObject {
- public:
- Range()
- : lower_(kMinInt),
- upper_(kMaxInt),
- next_(NULL),
- can_be_minus_zero_(false) { }
-
- Range(int32_t lower, int32_t upper)
- : lower_(lower),
- upper_(upper),
- next_(NULL),
- can_be_minus_zero_(false) { }
-
- int32_t upper() const { return upper_; }
- int32_t lower() const { return lower_; }
- Range* next() const { return next_; }
- Range* CopyClearLower(Zone* zone) const {
- return new(zone) Range(kMinInt, upper_);
- }
- Range* CopyClearUpper(Zone* zone) const {
- return new(zone) Range(lower_, kMaxInt);
- }
- Range* Copy(Zone* zone) const {
- Range* result = new(zone) Range(lower_, upper_);
- result->set_can_be_minus_zero(CanBeMinusZero());
- return result;
- }
- int32_t Mask() const;
- void set_can_be_minus_zero(bool b) { can_be_minus_zero_ = b; }
- bool CanBeMinusZero() const { return CanBeZero() && can_be_minus_zero_; }
- bool CanBeZero() const { return upper_ >= 0 && lower_ <= 0; }
- bool CanBeNegative() const { return lower_ < 0; }
- bool CanBePositive() const { return upper_ > 0; }
- bool Includes(int value) const { return lower_ <= value && upper_ >= value; }
- bool IsMostGeneric() const {
- return lower_ == kMinInt && upper_ == kMaxInt && CanBeMinusZero();
- }
- bool IsInSmiRange() const {
- return lower_ >= Smi::kMinValue && upper_ <= Smi::kMaxValue;
- }
- void ClampToSmi() {
- lower_ = Max(lower_, Smi::kMinValue);
- upper_ = Min(upper_, Smi::kMaxValue);
- }
- void Clear();
- void KeepOrder();
-#ifdef DEBUG
- void Verify() const;
-#endif
-
- void StackUpon(Range* other) {
- Intersect(other);
- next_ = other;
- }
-
- void Intersect(Range* other);
- void Union(Range* other);
- void CombinedMax(Range* other);
- void CombinedMin(Range* other);
-
- void AddConstant(int32_t value);
- void Sar(int32_t value);
- void Shl(int32_t value);
- bool AddAndCheckOverflow(const Representation& r, Range* other);
- bool SubAndCheckOverflow(const Representation& r, Range* other);
- bool MulAndCheckOverflow(const Representation& r, Range* other);
-
- private:
- int32_t lower_;
- int32_t upper_;
- Range* next_;
- bool can_be_minus_zero_;
-};
-
-
-class HUseListNode: public ZoneObject {
- public:
- HUseListNode(HValue* value, int index, HUseListNode* tail)
- : tail_(tail), value_(value), index_(index) {
- }
-
- HUseListNode* tail();
- HValue* value() const { return value_; }
- int index() const { return index_; }
-
- void set_tail(HUseListNode* list) { tail_ = list; }
-
-#ifdef DEBUG
- void Zap() {
- tail_ = reinterpret_cast<HUseListNode*>(1);
- value_ = NULL;
- index_ = -1;
- }
-#endif
-
- private:
- HUseListNode* tail_;
- HValue* value_;
- int index_;
-};
-
-
-// We reuse use list nodes behind the scenes as uses are added and deleted.
-// This class is the safe way to iterate uses while deleting them.
-class HUseIterator final BASE_EMBEDDED {
- public:
- bool Done() { return current_ == NULL; }
- void Advance();
-
- HValue* value() {
- DCHECK(!Done());
- return value_;
- }
-
- int index() {
- DCHECK(!Done());
- return index_;
- }
-
- private:
- explicit HUseIterator(HUseListNode* head);
-
- HUseListNode* current_;
- HUseListNode* next_;
- HValue* value_;
- int index_;
-
- friend class HValue;
-};
-
-
-// All tracked flags should appear before untracked ones.
-enum GVNFlag {
- // Declare global value numbering flags.
-#define DECLARE_FLAG(Type) k##Type,
- GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
- GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
-#undef DECLARE_FLAG
-#define COUNT_FLAG(Type) + 1
- kNumberOfTrackedSideEffects = 0 GVN_TRACKED_FLAG_LIST(COUNT_FLAG),
- kNumberOfUntrackedSideEffects = 0 GVN_UNTRACKED_FLAG_LIST(COUNT_FLAG),
-#undef COUNT_FLAG
- kNumberOfFlags = kNumberOfTrackedSideEffects + kNumberOfUntrackedSideEffects
-};
-
-
-static inline GVNFlag GVNFlagFromInt(int i) {
- DCHECK(i >= 0);
- DCHECK(i < kNumberOfFlags);
- return static_cast<GVNFlag>(i);
-}
-
-
-class DecompositionResult final BASE_EMBEDDED {
- public:
- DecompositionResult() : base_(NULL), offset_(0), scale_(0) {}
-
- HValue* base() { return base_; }
- int offset() { return offset_; }
- int scale() { return scale_; }
-
- bool Apply(HValue* other_base, int other_offset, int other_scale = 0) {
- if (base_ == NULL) {
- base_ = other_base;
- offset_ = other_offset;
- scale_ = other_scale;
- return true;
- } else {
- if (scale_ == 0) {
- base_ = other_base;
- offset_ += other_offset;
- scale_ = other_scale;
- return true;
- } else {
- return false;
- }
- }
- }
-
- void SwapValues(HValue** other_base, int* other_offset, int* other_scale) {
- swap(&base_, other_base);
- swap(&offset_, other_offset);
- swap(&scale_, other_scale);
- }
-
- private:
- template <class T> void swap(T* a, T* b) {
- T c(*a);
- *a = *b;
- *b = c;
- }
-
- HValue* base_;
- int offset_;
- int scale_;
-};
-
-
-typedef EnumSet<GVNFlag, int32_t> GVNFlagSet;
-
-
-class HValue : public ZoneObject {
- public:
- static const int kNoNumber = -1;
-
- enum Flag {
- kFlexibleRepresentation,
- kCannotBeTagged,
- // Participate in Global Value Numbering, i.e. elimination of
- // unnecessary recomputations. If an instruction sets this flag, it must
- // implement DataEquals(), which will be used to determine if other
- // occurrences of the instruction are indeed the same.
- kUseGVN,
- // Track instructions that are dominating side effects. If an instruction
- // sets this flag, it must implement HandleSideEffectDominator() and should
- // indicate which side effects to track by setting GVN flags.
- kTrackSideEffectDominators,
- kCanOverflow,
- kBailoutOnMinusZero,
- kCanBeDivByZero,
- kLeftCanBeMinInt,
- kLeftCanBeNegative,
- kLeftCanBePositive,
- kTruncatingToNumber,
- kIsArguments,
- kTruncatingToInt32,
- kAllUsesTruncatingToInt32,
- kTruncatingToSmi,
- kAllUsesTruncatingToSmi,
- // Set after an instruction is killed.
- kIsDead,
- // Instructions that are allowed to produce full range unsigned integer
- // values are marked with kUint32 flag. If arithmetic shift or a load from
- // EXTERNAL_UINT32_ELEMENTS array is not marked with this flag
- // it will deoptimize if result does not fit into signed integer range.
- // HGraph::ComputeSafeUint32Operations is responsible for setting this
- // flag.
- kUint32,
- kHasNoObservableSideEffects,
- // Indicates an instruction shouldn't be replaced by optimization, this flag
- // is useful to set in cases where recomputing a value is cheaper than
- // extending the value's live range and spilling it.
- kCantBeReplaced,
- // Indicates the instruction is live during dead code elimination.
- kIsLive,
-
- // HEnvironmentMarkers are deleted before dead code
- // elimination takes place, so they can repurpose the kIsLive flag:
- kEndsLiveRange = kIsLive,
-
- // TODO(everyone): Don't forget to update this!
- kLastFlag = kIsLive
- };
-
- STATIC_ASSERT(kLastFlag < kBitsPerInt);
-
- static HValue* cast(HValue* value) { return value; }
-
- enum Opcode {
- // Declare a unique enum value for each hydrogen instruction.
- #define DECLARE_OPCODE(type) k##type,
- HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
- kPhi
- #undef DECLARE_OPCODE
- };
- virtual Opcode opcode() const = 0;
-
- // Declare a non-virtual predicates for each concrete HInstruction or HValue.
- #define DECLARE_PREDICATE(type) \
- bool Is##type() const { return opcode() == k##type; }
- HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
- #undef DECLARE_PREDICATE
- bool IsPhi() const { return opcode() == kPhi; }
-
- // Declare virtual predicates for abstract HInstruction or HValue
- #define DECLARE_PREDICATE(type) \
- virtual bool Is##type() const { return false; }
- HYDROGEN_ABSTRACT_INSTRUCTION_LIST(DECLARE_PREDICATE)
- #undef DECLARE_PREDICATE
-
- bool IsBitwiseBinaryShift() {
- return IsShl() || IsShr() || IsSar();
- }
-
- explicit HValue(HType type = HType::Tagged())
- : block_(NULL),
- id_(kNoNumber),
- type_(type),
- use_list_(NULL),
- range_(NULL),
-#ifdef DEBUG
- range_poisoned_(false),
-#endif
- flags_(0) {}
- virtual ~HValue() {}
-
- virtual SourcePosition position() const { return SourcePosition::Unknown(); }
-
- HBasicBlock* block() const { return block_; }
- void SetBlock(HBasicBlock* block);
-
- // Note: Never call this method for an unlinked value.
- Isolate* isolate() const;
-
- int id() const { return id_; }
- void set_id(int id) { id_ = id; }
-
- HUseIterator uses() const { return HUseIterator(use_list_); }
-
- virtual bool EmitAtUses() { return false; }
-
- Representation representation() const { return representation_; }
- void ChangeRepresentation(Representation r) {
- DCHECK(CheckFlag(kFlexibleRepresentation));
- DCHECK(!CheckFlag(kCannotBeTagged) || !r.IsTagged());
- RepresentationChanged(r);
- representation_ = r;
- if (r.IsTagged()) {
- // Tagged is the bottom of the lattice, don't go any further.
- ClearFlag(kFlexibleRepresentation);
- }
- }
- virtual void AssumeRepresentation(Representation r);
-
- virtual Representation KnownOptimalRepresentation() {
- Representation r = representation();
- if (r.IsTagged()) {
- HType t = type();
- if (t.IsSmi()) return Representation::Smi();
- if (t.IsHeapNumber()) return Representation::Double();
- if (t.IsHeapObject()) return r;
- return Representation::None();
- }
- return r;
- }
-
- HType type() const { return type_; }
- void set_type(HType new_type) {
- DCHECK(new_type.IsSubtypeOf(type_));
- type_ = new_type;
- }
-
- // There are HInstructions that do not really change a value, they
- // only add pieces of information to it (like bounds checks, map checks,
- // smi checks...).
- // We call these instructions "informative definitions", or "iDef".
- // One of the iDef operands is special because it is the value that is
- // "transferred" to the output, we call it the "redefined operand".
- // If an HValue is an iDef it must override RedefinedOperandIndex() so that
- // it does not return kNoRedefinedOperand;
- static const int kNoRedefinedOperand = -1;
- virtual int RedefinedOperandIndex() { return kNoRedefinedOperand; }
- bool IsInformativeDefinition() {
- return RedefinedOperandIndex() != kNoRedefinedOperand;
- }
- HValue* RedefinedOperand() {
- int index = RedefinedOperandIndex();
- return index == kNoRedefinedOperand ? NULL : OperandAt(index);
- }
-
- bool CanReplaceWithDummyUses();
-
- virtual int argument_delta() const { return 0; }
-
- // A purely informative definition is an idef that will not emit code and
- // should therefore be removed from the graph in the RestoreActualValues
- // phase (so that live ranges will be shorter).
- virtual bool IsPurelyInformativeDefinition() { return false; }
-
- // This method must always return the original HValue SSA definition,
- // regardless of any chain of iDefs of this value.
- HValue* ActualValue() {
- HValue* value = this;
- int index;
- while ((index = value->RedefinedOperandIndex()) != kNoRedefinedOperand) {
- value = value->OperandAt(index);
- }
- return value;
- }
-
- bool IsInteger32Constant();
- int32_t GetInteger32Constant();
- bool EqualsInteger32Constant(int32_t value);
-
- bool IsDefinedAfter(HBasicBlock* other) const;
-
- // Operands.
- virtual int OperandCount() const = 0;
- virtual HValue* OperandAt(int index) const = 0;
- void SetOperandAt(int index, HValue* value);
-
- void DeleteAndReplaceWith(HValue* other);
- void ReplaceAllUsesWith(HValue* other);
- bool HasNoUses() const { return use_list_ == NULL; }
- bool HasOneUse() const {
- return use_list_ != NULL && use_list_->tail() == NULL;
- }
- bool HasMultipleUses() const {
- return use_list_ != NULL && use_list_->tail() != NULL;
- }
- int UseCount() const;
-
- // Mark this HValue as dead and to be removed from other HValues' use lists.
- void Kill();
-
- int flags() const { return flags_; }
- void SetFlag(Flag f) { flags_ |= (1 << f); }
- void ClearFlag(Flag f) { flags_ &= ~(1 << f); }
- bool CheckFlag(Flag f) const { return (flags_ & (1 << f)) != 0; }
- void CopyFlag(Flag f, HValue* other) {
- if (other->CheckFlag(f)) SetFlag(f);
- }
-
- // Returns true if the flag specified is set for all uses, false otherwise.
- bool CheckUsesForFlag(Flag f) const;
- // Same as before and the first one without the flag is returned in value.
- bool CheckUsesForFlag(Flag f, HValue** value) const;
- // Returns true if the flag specified is set for all uses, and this set
- // of uses is non-empty.
- bool HasAtLeastOneUseWithFlagAndNoneWithout(Flag f) const;
-
- GVNFlagSet ChangesFlags() const { return changes_flags_; }
- GVNFlagSet DependsOnFlags() const { return depends_on_flags_; }
- void SetChangesFlag(GVNFlag f) { changes_flags_.Add(f); }
- void SetDependsOnFlag(GVNFlag f) { depends_on_flags_.Add(f); }
- void ClearChangesFlag(GVNFlag f) { changes_flags_.Remove(f); }
- void ClearDependsOnFlag(GVNFlag f) { depends_on_flags_.Remove(f); }
- bool CheckChangesFlag(GVNFlag f) const {
- return changes_flags_.Contains(f);
- }
- bool CheckDependsOnFlag(GVNFlag f) const {
- return depends_on_flags_.Contains(f);
- }
- void SetAllSideEffects() { changes_flags_.Add(AllSideEffectsFlagSet()); }
- void ClearAllSideEffects() {
- changes_flags_.Remove(AllSideEffectsFlagSet());
- }
- bool HasSideEffects() const {
- return changes_flags_.ContainsAnyOf(AllSideEffectsFlagSet());
- }
- bool HasObservableSideEffects() const {
- return !CheckFlag(kHasNoObservableSideEffects) &&
- changes_flags_.ContainsAnyOf(AllObservableSideEffectsFlagSet());
- }
-
- GVNFlagSet SideEffectFlags() const {
- GVNFlagSet result = ChangesFlags();
- result.Intersect(AllSideEffectsFlagSet());
- return result;
- }
-
- GVNFlagSet ObservableChangesFlags() const {
- GVNFlagSet result = ChangesFlags();
- result.Intersect(AllObservableSideEffectsFlagSet());
- return result;
- }
-
- Range* range() const {
- DCHECK(!range_poisoned_);
- return range_;
- }
- bool HasRange() const {
- DCHECK(!range_poisoned_);
- return range_ != NULL;
- }
-#ifdef DEBUG
- void PoisonRange() { range_poisoned_ = true; }
-#endif
- void AddNewRange(Range* r, Zone* zone);
- void RemoveLastAddedRange();
- void ComputeInitialRange(Zone* zone);
-
- // Escape analysis helpers.
- virtual bool HasEscapingOperandAt(int index) { return true; }
- virtual bool HasOutOfBoundsAccess(int size) { return false; }
-
- // Representation helpers.
- virtual Representation observed_input_representation(int index) {
- return Representation::None();
- }
- virtual Representation RequiredInputRepresentation(int index) = 0;
- virtual void InferRepresentation(HInferRepresentationPhase* h_infer);
-
- // This gives the instruction an opportunity to replace itself with an
- // instruction that does the same in some better way. To replace an
- // instruction with a new one, first add the new instruction to the graph,
- // then return it. Return NULL to have the instruction deleted.
- virtual HValue* Canonicalize() { return this; }
-
- bool Equals(HValue* other);
- virtual intptr_t Hashcode();
-
- // Compute unique ids upfront that is safe wrt GC and concurrent compilation.
- virtual void FinalizeUniqueness() { }
-
- // Printing support.
- virtual std::ostream& PrintTo(std::ostream& os) const = 0; // NOLINT
-
- const char* Mnemonic() const;
-
- // Type information helpers.
- bool HasMonomorphicJSObjectType();
-
- // TODO(mstarzinger): For now instructions can override this function to
- // specify statically known types, once HType can convey more information
- // it should be based on the HType.
- virtual Handle<Map> GetMonomorphicJSObjectMap() { return Handle<Map>(); }
-
- // Updated the inferred type of this instruction and returns true if
- // it has changed.
- bool UpdateInferredType();
-
- virtual HType CalculateInferredType();
-
- // This function must be overridden for instructions which have the
- // kTrackSideEffectDominators flag set, to track instructions that are
- // dominating side effects.
- // It returns true if it removed an instruction which had side effects.
- virtual bool HandleSideEffectDominator(GVNFlag side_effect,
- HValue* dominator) {
- UNREACHABLE();
- return false;
- }
-
- // Check if this instruction has some reason that prevents elimination.
- bool CannotBeEliminated() const {
- return HasObservableSideEffects() || !IsDeletable();
- }
-
-#ifdef DEBUG
- virtual void Verify() = 0;
-#endif
-
- // Returns true conservatively if the program might be able to observe a
- // ToString() operation on this value.
- bool ToStringCanBeObserved() const {
- return ToStringOrToNumberCanBeObserved();
- }
-
- // Returns true conservatively if the program might be able to observe a
- // ToNumber() operation on this value.
- bool ToNumberCanBeObserved() const {
- return ToStringOrToNumberCanBeObserved();
- }
-
- MinusZeroMode GetMinusZeroMode() {
- return CheckFlag(kBailoutOnMinusZero)
- ? FAIL_ON_MINUS_ZERO : TREAT_MINUS_ZERO_AS_ZERO;
- }
-
- protected:
- // This function must be overridden for instructions with flag kUseGVN, to
- // compare the non-Operand parts of the instruction.
- virtual bool DataEquals(HValue* other) {
- UNREACHABLE();
- return false;
- }
-
- bool ToStringOrToNumberCanBeObserved() const {
- if (type().IsTaggedPrimitive()) return false;
- if (type().IsJSReceiver()) return true;
- return !representation().IsSmiOrInteger32() && !representation().IsDouble();
- }
-
- virtual Representation RepresentationFromInputs() {
- return representation();
- }
- virtual Representation RepresentationFromUses();
- Representation RepresentationFromUseRequirements();
- bool HasNonSmiUse();
- virtual void UpdateRepresentation(Representation new_rep,
- HInferRepresentationPhase* h_infer,
- const char* reason);
- void AddDependantsToWorklist(HInferRepresentationPhase* h_infer);
-
- virtual void RepresentationChanged(Representation to) { }
-
- virtual Range* InferRange(Zone* zone);
- virtual void DeleteFromGraph() = 0;
- virtual void InternalSetOperandAt(int index, HValue* value) = 0;
- void clear_block() {
- DCHECK(block_ != NULL);
- block_ = NULL;
- }
-
- void set_representation(Representation r) {
- DCHECK(representation_.IsNone() && !r.IsNone());
- representation_ = r;
- }
-
- static GVNFlagSet AllFlagSet() {
- GVNFlagSet result;
-#define ADD_FLAG(Type) result.Add(k##Type);
- GVN_TRACKED_FLAG_LIST(ADD_FLAG)
- GVN_UNTRACKED_FLAG_LIST(ADD_FLAG)
-#undef ADD_FLAG
- return result;
- }
-
- // A flag mask to mark an instruction as having arbitrary side effects.
- static GVNFlagSet AllSideEffectsFlagSet() {
- GVNFlagSet result = AllFlagSet();
- result.Remove(kOsrEntries);
- return result;
- }
- friend std::ostream& operator<<(std::ostream& os, const ChangesOf& v);
-
- // A flag mask of all side effects that can make observable changes in
- // an executing program (i.e. are not safe to repeat, move or remove);
- static GVNFlagSet AllObservableSideEffectsFlagSet() {
- GVNFlagSet result = AllFlagSet();
- result.Remove(kNewSpacePromotion);
- result.Remove(kElementsKind);
- result.Remove(kElementsPointer);
- result.Remove(kMaps);
- return result;
- }
-
- // Remove the matching use from the use list if present. Returns the
- // removed list node or NULL.
- HUseListNode* RemoveUse(HValue* value, int index);
-
- void RegisterUse(int index, HValue* new_value);
-
- HBasicBlock* block_;
-
- // The id of this instruction in the hydrogen graph, assigned when first
- // added to the graph. Reflects creation order.
- int id_;
-
- Representation representation_;
- HType type_;
- HUseListNode* use_list_;
- Range* range_;
-#ifdef DEBUG
- bool range_poisoned_;
-#endif
- int flags_;
- GVNFlagSet changes_flags_;
- GVNFlagSet depends_on_flags_;
-
- private:
- virtual bool IsDeletable() const { return false; }
-
- DISALLOW_COPY_AND_ASSIGN(HValue);
-};
-
-// Support for printing various aspects of an HValue.
-struct NameOf {
- explicit NameOf(const HValue* const v) : value(v) {}
- const HValue* value;
-};
-
-
-struct TypeOf {
- explicit TypeOf(const HValue* const v) : value(v) {}
- const HValue* value;
-};
-
-
-struct ChangesOf {
- explicit ChangesOf(const HValue* const v) : value(v) {}
- const HValue* value;
-};
-
-
-std::ostream& operator<<(std::ostream& os, const HValue& v);
-std::ostream& operator<<(std::ostream& os, const NameOf& v);
-std::ostream& operator<<(std::ostream& os, const TypeOf& v);
-std::ostream& operator<<(std::ostream& os, const ChangesOf& v);
-
-
-#define DECLARE_INSTRUCTION_FACTORY_P0(I) \
- static I* New(Isolate* isolate, Zone* zone, HValue* context) { \
- return new (zone) I(); \
- }
-
-#define DECLARE_INSTRUCTION_FACTORY_P1(I, P1) \
- static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1) { \
- return new (zone) I(p1); \
- }
-
-#define DECLARE_INSTRUCTION_FACTORY_P2(I, P1, P2) \
- static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2) { \
- return new (zone) I(p1, p2); \
- }
-
-#define DECLARE_INSTRUCTION_FACTORY_P3(I, P1, P2, P3) \
- static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \
- P3 p3) { \
- return new (zone) I(p1, p2, p3); \
- }
-
-#define DECLARE_INSTRUCTION_FACTORY_P4(I, P1, P2, P3, P4) \
- static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \
- P3 p3, P4 p4) { \
- return new (zone) I(p1, p2, p3, p4); \
- }
-
-#define DECLARE_INSTRUCTION_FACTORY_P5(I, P1, P2, P3, P4, P5) \
- static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \
- P3 p3, P4 p4, P5 p5) { \
- return new (zone) I(p1, p2, p3, p4, p5); \
- }
-
-#define DECLARE_INSTRUCTION_FACTORY_P6(I, P1, P2, P3, P4, P5, P6) \
- static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \
- P3 p3, P4 p4, P5 p5, P6 p6) { \
- return new (zone) I(p1, p2, p3, p4, p5, p6); \
- }
-
-#define DECLARE_INSTRUCTION_FACTORY_P7(I, P1, P2, P3, P4, P5, P6, P7) \
- static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \
- P3 p3, P4 p4, P5 p5, P6 p6, P7 p7) { \
- return new (zone) I(p1, p2, p3, p4, p5, p6, p7); \
- }
-
-#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P0(I) \
- static I* New(Isolate* isolate, Zone* zone, HValue* context) { \
- return new (zone) I(context); \
- }
-
-#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(I, P1) \
- static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1) { \
- return new (zone) I(context, p1); \
- }
-
-#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(I, P1, P2) \
- static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2) { \
- return new (zone) I(context, p1, p2); \
- }
-
-#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(I, P1, P2, P3) \
- static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \
- P3 p3) { \
- return new (zone) I(context, p1, p2, p3); \
- }
-
-#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(I, P1, P2, P3, P4) \
- static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \
- P3 p3, P4 p4) { \
- return new (zone) I(context, p1, p2, p3, p4); \
- }
-
-#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P5(I, P1, P2, P3, P4, P5) \
- static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \
- P3 p3, P4 p4, P5 p5) { \
- return new (zone) I(context, p1, p2, p3, p4, p5); \
- }
-
-#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P6(I, P1, P2, P3, P4, P5, P6) \
- static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \
- P3 p3, P4 p4, P5 p5, P6 p6) { \
- return new (zone) I(context, p1, p2, p3, p4, p5, p6); \
- }
-
-class HInstruction : public HValue {
- public:
- HInstruction* next() const { return next_; }
- HInstruction* previous() const { return previous_; }
-
- std::ostream& PrintTo(std::ostream& os) const override; // NOLINT
- virtual std::ostream& PrintDataTo(std::ostream& os) const; // NOLINT
-
- bool IsLinked() const { return block() != NULL; }
- void Unlink();
-
- void InsertBefore(HInstruction* next);
-
- template<class T> T* Prepend(T* instr) {
- instr->InsertBefore(this);
- return instr;
- }
-
- void InsertAfter(HInstruction* previous);
-
- template<class T> T* Append(T* instr) {
- instr->InsertAfter(this);
- return instr;
- }
-
- // The position is a write-once variable.
- SourcePosition position() const override { return position_; }
- bool has_position() const { return position_.IsKnown(); }
- void set_position(SourcePosition position) {
- DCHECK(position.IsKnown());
- position_ = position;
- }
-
- bool Dominates(HInstruction* other);
- bool CanTruncateToSmi() const { return CheckFlag(kTruncatingToSmi); }
- bool CanTruncateToInt32() const { return CheckFlag(kTruncatingToInt32); }
- bool CanTruncateToNumber() const { return CheckFlag(kTruncatingToNumber); }
-
- virtual LInstruction* CompileToLithium(LChunkBuilder* builder) = 0;
-
-#ifdef DEBUG
- void Verify() override;
-#endif
-
- bool CanDeoptimize();
-
- virtual bool HasStackCheck() { return false; }
-
- DECLARE_ABSTRACT_INSTRUCTION(Instruction)
-
- protected:
- explicit HInstruction(HType type = HType::Tagged())
- : HValue(type),
- next_(NULL),
- previous_(NULL),
- position_(SourcePosition::Unknown()) {
- SetDependsOnFlag(kOsrEntries);
- }
-
- void DeleteFromGraph() override { Unlink(); }
-
- private:
- void InitializeAsFirst(HBasicBlock* block) {
- DCHECK(!IsLinked());
- SetBlock(block);
- }
-
- HInstruction* next_;
- HInstruction* previous_;
- SourcePosition position_;
-
- friend class HBasicBlock;
-};
-
-
-template<int V>
-class HTemplateInstruction : public HInstruction {
- public:
- int OperandCount() const final { return V; }
- HValue* OperandAt(int i) const final { return inputs_[i]; }
-
- protected:
- explicit HTemplateInstruction(HType type = HType::Tagged())
- : HInstruction(type) {}
-
- void InternalSetOperandAt(int i, HValue* value) final { inputs_[i] = value; }
-
- private:
- EmbeddedContainer<HValue*, V> inputs_;
-};
-
-
-class HControlInstruction : public HInstruction {
- public:
- virtual HBasicBlock* SuccessorAt(int i) const = 0;
- virtual int SuccessorCount() const = 0;
- virtual void SetSuccessorAt(int i, HBasicBlock* block) = 0;
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- virtual bool KnownSuccessorBlock(HBasicBlock** block) {
- *block = NULL;
- return false;
- }
-
- HBasicBlock* FirstSuccessor() {
- return SuccessorCount() > 0 ? SuccessorAt(0) : NULL;
- }
- HBasicBlock* SecondSuccessor() {
- return SuccessorCount() > 1 ? SuccessorAt(1) : NULL;
- }
-
- void Not() {
- HBasicBlock* swap = SuccessorAt(0);
- SetSuccessorAt(0, SuccessorAt(1));
- SetSuccessorAt(1, swap);
- }
-
- DECLARE_ABSTRACT_INSTRUCTION(ControlInstruction)
-};
-
-
-class HSuccessorIterator final BASE_EMBEDDED {
- public:
- explicit HSuccessorIterator(const HControlInstruction* instr)
- : instr_(instr), current_(0) {}
-
- bool Done() { return current_ >= instr_->SuccessorCount(); }
- HBasicBlock* Current() { return instr_->SuccessorAt(current_); }
- void Advance() { current_++; }
-
- private:
- const HControlInstruction* instr_;
- int current_;
-};
-
-
-template<int S, int V>
-class HTemplateControlInstruction : public HControlInstruction {
- public:
- int SuccessorCount() const override { return S; }
- HBasicBlock* SuccessorAt(int i) const override { return successors_[i]; }
- void SetSuccessorAt(int i, HBasicBlock* block) override {
- successors_[i] = block;
- }
-
- int OperandCount() const override { return V; }
- HValue* OperandAt(int i) const override { return inputs_[i]; }
-
-
- protected:
- void InternalSetOperandAt(int i, HValue* value) override {
- inputs_[i] = value;
- }
-
- private:
- EmbeddedContainer<HBasicBlock*, S> successors_;
- EmbeddedContainer<HValue*, V> inputs_;
-};
-
-
-class HBlockEntry final : public HTemplateInstruction<0> {
- public:
- Representation RequiredInputRepresentation(int index) override {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(BlockEntry)
-};
-
-
-class HDummyUse final : public HTemplateInstruction<1> {
- public:
- explicit HDummyUse(HValue* value)
- : HTemplateInstruction<1>(HType::Smi()) {
- SetOperandAt(0, value);
- // Pretend to be a Smi so that the HChange instructions inserted
- // before any use generate as little code as possible.
- set_representation(Representation::Tagged());
- }
-
- HValue* value() const { return OperandAt(0); }
-
- bool HasEscapingOperandAt(int index) override { return false; }
- Representation RequiredInputRepresentation(int index) override {
- return Representation::None();
- }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- DECLARE_CONCRETE_INSTRUCTION(DummyUse);
-};
-
-
-// Inserts an int3/stop break instruction for debugging purposes.
-class HDebugBreak final : public HTemplateInstruction<0> {
- public:
- DECLARE_INSTRUCTION_FACTORY_P0(HDebugBreak);
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(DebugBreak)
-};
-
-
-class HPrologue final : public HTemplateInstruction<0> {
- public:
- static HPrologue* New(Zone* zone) { return new (zone) HPrologue(); }
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Prologue)
-};
-
-
-class HGoto final : public HTemplateControlInstruction<1, 0> {
- public:
- explicit HGoto(HBasicBlock* target) {
- SetSuccessorAt(0, target);
- }
-
- bool KnownSuccessorBlock(HBasicBlock** block) override {
- *block = FirstSuccessor();
- return true;
- }
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::None();
- }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- DECLARE_CONCRETE_INSTRUCTION(Goto)
-};
-
-
-class HDeoptimize final : public HTemplateControlInstruction<1, 0> {
- public:
- static HDeoptimize* New(Isolate* isolate, Zone* zone, HValue* context,
- DeoptimizeReason reason,
- Deoptimizer::BailoutType type,
- HBasicBlock* unreachable_continuation) {
- return new(zone) HDeoptimize(reason, type, unreachable_continuation);
- }
-
- bool KnownSuccessorBlock(HBasicBlock** block) override {
- *block = NULL;
- return true;
- }
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::None();
- }
-
- DeoptimizeReason reason() const { return reason_; }
- Deoptimizer::BailoutType type() { return type_; }
-
- DECLARE_CONCRETE_INSTRUCTION(Deoptimize)
-
- private:
- explicit HDeoptimize(DeoptimizeReason reason, Deoptimizer::BailoutType type,
- HBasicBlock* unreachable_continuation)
- : reason_(reason), type_(type) {
- SetSuccessorAt(0, unreachable_continuation);
- }
-
- DeoptimizeReason reason_;
- Deoptimizer::BailoutType type_;
-};
-
-
-class HUnaryControlInstruction : public HTemplateControlInstruction<2, 1> {
- public:
- HUnaryControlInstruction(HValue* value,
- HBasicBlock* true_target,
- HBasicBlock* false_target) {
- SetOperandAt(0, value);
- SetSuccessorAt(0, true_target);
- SetSuccessorAt(1, false_target);
- }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- HValue* value() const { return OperandAt(0); }
-};
-
-
-class HBranch final : public HUnaryControlInstruction {
- public:
- DECLARE_INSTRUCTION_FACTORY_P1(HBranch, HValue*);
- DECLARE_INSTRUCTION_FACTORY_P2(HBranch, HValue*, ToBooleanHints);
- DECLARE_INSTRUCTION_FACTORY_P4(HBranch, HValue*, ToBooleanHints, HBasicBlock*,
- HBasicBlock*);
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::None();
- }
- Representation observed_input_representation(int index) override;
-
- bool KnownSuccessorBlock(HBasicBlock** block) override;
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- ToBooleanHints expected_input_types() const { return expected_input_types_; }
-
- DECLARE_CONCRETE_INSTRUCTION(Branch)
-
- private:
- HBranch(HValue* value,
- ToBooleanHints expected_input_types = ToBooleanHint::kNone,
- HBasicBlock* true_target = NULL, HBasicBlock* false_target = NULL)
- : HUnaryControlInstruction(value, true_target, false_target),
- expected_input_types_(expected_input_types) {}
-
- ToBooleanHints expected_input_types_;
-};
-
-
-class HCompareMap final : public HUnaryControlInstruction {
- public:
- DECLARE_INSTRUCTION_FACTORY_P2(HCompareMap, HValue*, Handle<Map>);
- DECLARE_INSTRUCTION_FACTORY_P4(HCompareMap, HValue*, Handle<Map>,
- HBasicBlock*, HBasicBlock*);
-
- bool KnownSuccessorBlock(HBasicBlock** block) override {
- if (known_successor_index() != kNoKnownSuccessorIndex) {
- *block = SuccessorAt(known_successor_index());
- return true;
- }
- *block = NULL;
- return false;
- }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- static const int kNoKnownSuccessorIndex = -1;
- int known_successor_index() const {
- return KnownSuccessorIndexField::decode(bit_field_) -
- kInternalKnownSuccessorOffset;
- }
- void set_known_successor_index(int index) {
- DCHECK(index >= 0 - kInternalKnownSuccessorOffset);
- bit_field_ = KnownSuccessorIndexField::update(
- bit_field_, index + kInternalKnownSuccessorOffset);
- }
-
- Unique<Map> map() const { return map_; }
- bool map_is_stable() const { return MapIsStableField::decode(bit_field_); }
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CompareMap)
-
- protected:
- int RedefinedOperandIndex() override { return 0; }
-
- private:
- HCompareMap(HValue* value, Handle<Map> map, HBasicBlock* true_target = NULL,
- HBasicBlock* false_target = NULL)
- : HUnaryControlInstruction(value, true_target, false_target),
- bit_field_(KnownSuccessorIndexField::encode(
- kNoKnownSuccessorIndex + kInternalKnownSuccessorOffset) |
- MapIsStableField::encode(map->is_stable())),
- map_(Unique<Map>::CreateImmovable(map)) {
- set_representation(Representation::Tagged());
- }
-
- // BitFields can only store unsigned values, so use an offset.
- // Adding kInternalKnownSuccessorOffset must yield an unsigned value.
- static const int kInternalKnownSuccessorOffset = 1;
- STATIC_ASSERT(kNoKnownSuccessorIndex + kInternalKnownSuccessorOffset >= 0);
-
- class KnownSuccessorIndexField : public BitField<int, 0, 31> {};
- class MapIsStableField : public BitField<bool, 31, 1> {};
-
- uint32_t bit_field_;
- Unique<Map> map_;
-};
-
-
-class HContext final : public HTemplateInstruction<0> {
- public:
- static HContext* New(Zone* zone) {
- return new(zone) HContext();
- }
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Context)
-
- protected:
- bool DataEquals(HValue* other) override { return true; }
-
- private:
- HContext() {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- bool IsDeletable() const override { return true; }
-};
-
-
-class HReturn final : public HTemplateControlInstruction<0, 3> {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HReturn, HValue*, HValue*);
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HReturn, HValue*);
-
- Representation RequiredInputRepresentation(int index) override {
- // TODO(titzer): require an Int32 input for faster returns.
- if (index == 2) return Representation::Smi();
- return Representation::Tagged();
- }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- HValue* value() const { return OperandAt(0); }
- HValue* context() const { return OperandAt(1); }
- HValue* parameter_count() const { return OperandAt(2); }
-
- DECLARE_CONCRETE_INSTRUCTION(Return)
-
- private:
- HReturn(HValue* context, HValue* value, HValue* parameter_count = 0) {
- SetOperandAt(0, value);
- SetOperandAt(1, context);
- SetOperandAt(2, parameter_count);
- }
-};
-
-
-class HAbnormalExit final : public HTemplateControlInstruction<0, 0> {
- public:
- DECLARE_INSTRUCTION_FACTORY_P0(HAbnormalExit);
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(AbnormalExit)
- private:
- HAbnormalExit() {}
-};
-
-
-class HUnaryOperation : public HTemplateInstruction<1> {
- public:
- explicit HUnaryOperation(HValue* value, HType type = HType::Tagged())
- : HTemplateInstruction<1>(type) {
- SetOperandAt(0, value);
- }
-
- static HUnaryOperation* cast(HValue* value) {
- return reinterpret_cast<HUnaryOperation*>(value);
- }
-
- HValue* value() const { return OperandAt(0); }
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-};
-
-
-class HUseConst final : public HUnaryOperation {
- public:
- DECLARE_INSTRUCTION_FACTORY_P1(HUseConst, HValue*);
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(UseConst)
-
- private:
- explicit HUseConst(HValue* old_value) : HUnaryOperation(old_value) { }
-};
-
-
-class HForceRepresentation final : public HTemplateInstruction<1> {
- public:
- static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* value,
- Representation required_representation);
-
- HValue* value() const { return OperandAt(0); }
-
- Representation observed_input_representation(int index) override {
- // We haven't actually *observed* this, but it's closer to the truth
- // than 'None'.
- return representation(); // Same as the output representation.
- }
- Representation RequiredInputRepresentation(int index) override {
- return representation(); // Same as the output representation.
- }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- DECLARE_CONCRETE_INSTRUCTION(ForceRepresentation)
-
- private:
- HForceRepresentation(HValue* value, Representation required_representation) {
- SetOperandAt(0, value);
- set_representation(required_representation);
- }
-};
-
-class HChange final : public HUnaryOperation {
- public:
- HChange(HValue* value, Representation to, bool is_truncating_to_smi,
- bool is_truncating_to_int32, bool is_truncating_to_number)
- : HUnaryOperation(value) {
- DCHECK(!value->representation().IsNone());
- DCHECK(!to.IsNone());
- DCHECK(!value->representation().Equals(to));
- set_representation(to);
- SetFlag(kUseGVN);
- SetFlag(kCanOverflow);
- if (is_truncating_to_smi && to.IsSmi()) {
- SetFlag(kTruncatingToSmi);
- SetFlag(kTruncatingToInt32);
- SetFlag(kTruncatingToNumber);
- } else if (is_truncating_to_int32) {
- SetFlag(kTruncatingToInt32);
- SetFlag(kTruncatingToNumber);
- } else if (is_truncating_to_number) {
- SetFlag(kTruncatingToNumber);
- }
- if (value->representation().IsSmi() || value->type().IsSmi()) {
- set_type(HType::Smi());
- } else {
- set_type(HType::TaggedNumber());
- if (to.IsTagged()) SetChangesFlag(kNewSpacePromotion);
- }
- }
-
- HType CalculateInferredType() override;
- HValue* Canonicalize() override;
-
- Representation from() const { return value()->representation(); }
- Representation to() const { return representation(); }
- bool deoptimize_on_minus_zero() const {
- return CheckFlag(kBailoutOnMinusZero);
- }
- Representation RequiredInputRepresentation(int index) override {
- return from();
- }
-
- Range* InferRange(Zone* zone) override;
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- DECLARE_CONCRETE_INSTRUCTION(Change)
-
- protected:
- bool DataEquals(HValue* other) override { return true; }
-
- private:
- bool IsDeletable() const override {
- return !from().IsTagged() || value()->type().IsSmi();
- }
-};
-
-
-class HClampToUint8 final : public HUnaryOperation {
- public:
- DECLARE_INSTRUCTION_FACTORY_P1(HClampToUint8, HValue*);
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampToUint8)
-
- protected:
- bool DataEquals(HValue* other) override { return true; }
-
- private:
- explicit HClampToUint8(HValue* value)
- : HUnaryOperation(value) {
- set_representation(Representation::Integer32());
- SetFlag(kTruncatingToNumber);
- SetFlag(kUseGVN);
- }
-
- bool IsDeletable() const override { return true; }
-};
-
-
-enum RemovableSimulate {
- REMOVABLE_SIMULATE,
- FIXED_SIMULATE
-};
-
-
-class HSimulate final : public HInstruction {
- public:
- HSimulate(BailoutId ast_id, int pop_count, Zone* zone,
- RemovableSimulate removable)
- : ast_id_(ast_id),
- pop_count_(pop_count),
- values_(2, zone),
- assigned_indexes_(2, zone),
- zone_(zone),
- bit_field_(RemovableField::encode(removable) |
- DoneWithReplayField::encode(false)) {}
- ~HSimulate() {}
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- bool HasAstId() const { return !ast_id_.IsNone(); }
- BailoutId ast_id() const { return ast_id_; }
- void set_ast_id(BailoutId id) {
- DCHECK(!HasAstId());
- ast_id_ = id;
- }
-
- int pop_count() const { return pop_count_; }
- const ZoneList<HValue*>* values() const { return &values_; }
- int GetAssignedIndexAt(int index) const {
- DCHECK(HasAssignedIndexAt(index));
- return assigned_indexes_[index];
- }
- bool HasAssignedIndexAt(int index) const {
- return assigned_indexes_[index] != kNoIndex;
- }
- void AddAssignedValue(int index, HValue* value) {
- AddValue(index, value);
- }
- void AddPushedValue(HValue* value) {
- AddValue(kNoIndex, value);
- }
- int ToOperandIndex(int environment_index) {
- for (int i = 0; i < assigned_indexes_.length(); ++i) {
- if (assigned_indexes_[i] == environment_index) return i;
- }
- return -1;
- }
- int OperandCount() const override { return values_.length(); }
- HValue* OperandAt(int index) const override { return values_[index]; }
-
- bool HasEscapingOperandAt(int index) override { return false; }
- Representation RequiredInputRepresentation(int index) override {
- return Representation::None();
- }
-
- void MergeWith(ZoneList<HSimulate*>* list);
- bool is_candidate_for_removal() {
- return RemovableField::decode(bit_field_) == REMOVABLE_SIMULATE;
- }
-
- // Replay effects of this instruction on the given environment.
- void ReplayEnvironment(HEnvironment* env);
-
- DECLARE_CONCRETE_INSTRUCTION(Simulate)
-
-#ifdef DEBUG
- void Verify() override;
- void set_closure(Handle<JSFunction> closure) { closure_ = closure; }
- Handle<JSFunction> closure() const { return closure_; }
-#endif
-
- protected:
- void InternalSetOperandAt(int index, HValue* value) override {
- values_[index] = value;
- }
-
- private:
- static const int kNoIndex = -1;
- void AddValue(int index, HValue* value) {
- assigned_indexes_.Add(index, zone_);
- // Resize the list of pushed values.
- values_.Add(NULL, zone_);
- // Set the operand through the base method in HValue to make sure that the
- // use lists are correctly updated.
- SetOperandAt(values_.length() - 1, value);
- }
- bool HasValueForIndex(int index) {
- for (int i = 0; i < assigned_indexes_.length(); ++i) {
- if (assigned_indexes_[i] == index) return true;
- }
- return false;
- }
- bool is_done_with_replay() const {
- return DoneWithReplayField::decode(bit_field_);
- }
- void set_done_with_replay() {
- bit_field_ = DoneWithReplayField::update(bit_field_, true);
- }
-
- class RemovableField : public BitField<RemovableSimulate, 0, 1> {};
- class DoneWithReplayField : public BitField<bool, 1, 1> {};
-
- BailoutId ast_id_;
- int pop_count_;
- ZoneList<HValue*> values_;
- ZoneList<int> assigned_indexes_;
- Zone* zone_;
- uint32_t bit_field_;
-
-#ifdef DEBUG
- Handle<JSFunction> closure_;
-#endif
-};
-
-
-class HEnvironmentMarker final : public HTemplateInstruction<1> {
- public:
- enum Kind { BIND, LOOKUP };
-
- DECLARE_INSTRUCTION_FACTORY_P2(HEnvironmentMarker, Kind, int);
-
- Kind kind() const { return kind_; }
- int index() const { return index_; }
- HSimulate* next_simulate() { return next_simulate_; }
- void set_next_simulate(HSimulate* simulate) {
- next_simulate_ = simulate;
- }
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::None();
- }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
-#ifdef DEBUG
- void set_closure(Handle<JSFunction> closure) {
- DCHECK(closure_.is_null());
- DCHECK(!closure.is_null());
- closure_ = closure;
- }
- Handle<JSFunction> closure() const { return closure_; }
-#endif
-
- DECLARE_CONCRETE_INSTRUCTION(EnvironmentMarker);
-
- private:
- HEnvironmentMarker(Kind kind, int index)
- : kind_(kind), index_(index), next_simulate_(NULL) { }
-
- Kind kind_;
- int index_;
- HSimulate* next_simulate_;
-
-#ifdef DEBUG
- Handle<JSFunction> closure_;
-#endif
-};
-
-
-class HStackCheck final : public HTemplateInstruction<1> {
- public:
- enum Type {
- kFunctionEntry,
- kBackwardsBranch
- };
-
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HStackCheck, Type);
-
- HValue* context() { return OperandAt(0); }
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- void Eliminate() {
- // The stack check eliminator might try to eliminate the same stack
- // check instruction multiple times.
- if (IsLinked()) {
- DeleteAndReplaceWith(NULL);
- }
- }
-
- bool is_function_entry() { return type_ == kFunctionEntry; }
- bool is_backwards_branch() { return type_ == kBackwardsBranch; }
-
- DECLARE_CONCRETE_INSTRUCTION(StackCheck)
-
- private:
- HStackCheck(HValue* context, Type type) : type_(type) {
- SetOperandAt(0, context);
- SetChangesFlag(kNewSpacePromotion);
- }
-
- Type type_;
-};
-
-
-enum InliningKind {
- NORMAL_RETURN, // Drop the function from the environment on return.
- CONSTRUCT_CALL_RETURN, // Either use allocated receiver or return value.
- GETTER_CALL_RETURN, // Returning from a getter, need to restore context.
- SETTER_CALL_RETURN // Use the RHS of the assignment as the return value.
-};
-
-
-class HArgumentsObject;
-class HConstant;
-
-
-class HEnterInlined final : public HTemplateInstruction<0> {
- public:
- static HEnterInlined* New(Isolate* isolate, Zone* zone, HValue* context,
- BailoutId return_id, Handle<JSFunction> closure,
- HConstant* closure_context, int arguments_count,
- FunctionLiteral* function,
- InliningKind inlining_kind, Variable* arguments_var,
- HArgumentsObject* arguments_object,
- TailCallMode syntactic_tail_call_mode) {
- return new (zone)
- HEnterInlined(return_id, closure, closure_context, arguments_count,
- function, inlining_kind, arguments_var, arguments_object,
- syntactic_tail_call_mode, zone);
- }
-
- void RegisterReturnTarget(HBasicBlock* return_target, Zone* zone);
- ZoneList<HBasicBlock*>* return_targets() { return &return_targets_; }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- Handle<SharedFunctionInfo> shared() const { return shared_; }
- Handle<JSFunction> closure() const { return closure_; }
- HConstant* closure_context() const { return closure_context_; }
- int arguments_count() const { return arguments_count_; }
- bool arguments_pushed() const { return arguments_pushed_; }
- void set_arguments_pushed() { arguments_pushed_ = true; }
- FunctionLiteral* function() const { return function_; }
- InliningKind inlining_kind() const { return inlining_kind_; }
- TailCallMode syntactic_tail_call_mode() const {
- return syntactic_tail_call_mode_;
- }
- BailoutId ReturnId() const { return return_id_; }
- int inlining_id() const { return inlining_id_; }
- void set_inlining_id(int inlining_id) { inlining_id_ = inlining_id; }
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::None();
- }
-
- Variable* arguments_var() { return arguments_var_; }
- HArgumentsObject* arguments_object() { return arguments_object_; }
-
- DECLARE_CONCRETE_INSTRUCTION(EnterInlined)
-
- private:
- HEnterInlined(BailoutId return_id, Handle<JSFunction> closure,
- HConstant* closure_context, int arguments_count,
- FunctionLiteral* function, InliningKind inlining_kind,
- Variable* arguments_var, HArgumentsObject* arguments_object,
- TailCallMode syntactic_tail_call_mode, Zone* zone)
- : return_id_(return_id),
- shared_(handle(closure->shared())),
- closure_(closure),
- closure_context_(closure_context),
- arguments_count_(arguments_count),
- arguments_pushed_(false),
- function_(function),
- inlining_kind_(inlining_kind),
- syntactic_tail_call_mode_(syntactic_tail_call_mode),
- inlining_id_(-1),
- arguments_var_(arguments_var),
- arguments_object_(arguments_object),
- return_targets_(2, zone) {}
-
- BailoutId return_id_;
- Handle<SharedFunctionInfo> shared_;
- Handle<JSFunction> closure_;
- HConstant* closure_context_;
- int arguments_count_;
- bool arguments_pushed_;
- FunctionLiteral* function_;
- InliningKind inlining_kind_;
- TailCallMode syntactic_tail_call_mode_;
- int inlining_id_;
- Variable* arguments_var_;
- HArgumentsObject* arguments_object_;
- ZoneList<HBasicBlock*> return_targets_;
-};
-
-
-class HLeaveInlined final : public HTemplateInstruction<0> {
- public:
- HLeaveInlined(HEnterInlined* entry,
- int drop_count)
- : entry_(entry),
- drop_count_(drop_count) { }
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::None();
- }
-
- int argument_delta() const override {
- return entry_->arguments_pushed() ? -drop_count_ : 0;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LeaveInlined)
-
- private:
- HEnterInlined* entry_;
- int drop_count_;
-};
-
-
-class HPushArguments final : public HInstruction {
- public:
- static HPushArguments* New(Isolate* isolate, Zone* zone, HValue* context) {
- return new(zone) HPushArguments(zone);
- }
- static HPushArguments* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* arg1) {
- HPushArguments* instr = new(zone) HPushArguments(zone);
- instr->AddInput(arg1);
- return instr;
- }
- static HPushArguments* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* arg1, HValue* arg2) {
- HPushArguments* instr = new(zone) HPushArguments(zone);
- instr->AddInput(arg1);
- instr->AddInput(arg2);
- return instr;
- }
- static HPushArguments* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* arg1, HValue* arg2, HValue* arg3) {
- HPushArguments* instr = new(zone) HPushArguments(zone);
- instr->AddInput(arg1);
- instr->AddInput(arg2);
- instr->AddInput(arg3);
- return instr;
- }
- static HPushArguments* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* arg1, HValue* arg2, HValue* arg3,
- HValue* arg4) {
- HPushArguments* instr = new(zone) HPushArguments(zone);
- instr->AddInput(arg1);
- instr->AddInput(arg2);
- instr->AddInput(arg3);
- instr->AddInput(arg4);
- return instr;
- }
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- int argument_delta() const override { return inputs_.length(); }
- HValue* argument(int i) { return OperandAt(i); }
-
- int OperandCount() const final { return inputs_.length(); }
- HValue* OperandAt(int i) const final { return inputs_[i]; }
-
- void AddInput(HValue* value);
-
- DECLARE_CONCRETE_INSTRUCTION(PushArguments)
-
- protected:
- void InternalSetOperandAt(int i, HValue* value) final { inputs_[i] = value; }
-
- private:
- explicit HPushArguments(Zone* zone)
- : HInstruction(HType::Tagged()), inputs_(4, zone) {
- set_representation(Representation::Tagged());
- }
-
- ZoneList<HValue*> inputs_;
-};
-
-
-class HThisFunction final : public HTemplateInstruction<0> {
- public:
- DECLARE_INSTRUCTION_FACTORY_P0(HThisFunction);
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ThisFunction)
-
- protected:
- bool DataEquals(HValue* other) override { return true; }
-
- private:
- HThisFunction() {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- bool IsDeletable() const override { return true; }
-};
-
-
-class HDeclareGlobals final : public HUnaryOperation {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HDeclareGlobals,
- Handle<FixedArray>, int,
- Handle<FeedbackVector>);
-
- HValue* context() { return OperandAt(0); }
- Handle<FixedArray> declarations() const { return declarations_; }
- int flags() const { return flags_; }
- Handle<FeedbackVector> feedback_vector() const { return feedback_vector_; }
-
- DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals)
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- private:
- HDeclareGlobals(HValue* context, Handle<FixedArray> declarations, int flags,
- Handle<FeedbackVector> feedback_vector)
- : HUnaryOperation(context),
- declarations_(declarations),
- feedback_vector_(feedback_vector),
- flags_(flags) {
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
-
- Handle<FixedArray> declarations_;
- Handle<FeedbackVector> feedback_vector_;
- int flags_;
-};
-
-
-template <int V>
-class HCall : public HTemplateInstruction<V> {
- public:
- // The argument count includes the receiver.
- explicit HCall<V>(int argument_count) : argument_count_(argument_count) {
- this->set_representation(Representation::Tagged());
- this->SetAllSideEffects();
- }
-
- virtual int argument_count() const {
- return argument_count_;
- }
-
- int argument_delta() const override { return -argument_count(); }
-
- private:
- int argument_count_;
-};
-
-
-class HUnaryCall : public HCall<1> {
- public:
- HUnaryCall(HValue* value, int argument_count)
- : HCall<1>(argument_count) {
- SetOperandAt(0, value);
- }
-
- Representation RequiredInputRepresentation(int index) final {
- return Representation::Tagged();
- }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- HValue* value() const { return OperandAt(0); }
-};
-
-
-class HBinaryCall : public HCall<2> {
- public:
- HBinaryCall(HValue* first, HValue* second, int argument_count)
- : HCall<2>(argument_count) {
- SetOperandAt(0, first);
- SetOperandAt(1, second);
- }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- Representation RequiredInputRepresentation(int index) final {
- return Representation::Tagged();
- }
-
- HValue* first() const { return OperandAt(0); }
- HValue* second() const { return OperandAt(1); }
-};
-
-
-class HCallWithDescriptor final : public HInstruction {
- public:
- static HCallWithDescriptor* New(
- Isolate* isolate, Zone* zone, HValue* context, HValue* target,
- int argument_count, CallInterfaceDescriptor descriptor,
- const Vector<HValue*>& operands,
- TailCallMode syntactic_tail_call_mode = TailCallMode::kDisallow,
- TailCallMode tail_call_mode = TailCallMode::kDisallow) {
- HCallWithDescriptor* res = new (zone) HCallWithDescriptor(
- Code::STUB, context, target, argument_count, descriptor, operands,
- syntactic_tail_call_mode, tail_call_mode, zone);
- return res;
- }
-
- static HCallWithDescriptor* New(
- Isolate* isolate, Zone* zone, HValue* context, Code::Kind kind,
- HValue* target, int argument_count, CallInterfaceDescriptor descriptor,
- const Vector<HValue*>& operands,
- TailCallMode syntactic_tail_call_mode = TailCallMode::kDisallow,
- TailCallMode tail_call_mode = TailCallMode::kDisallow) {
- HCallWithDescriptor* res = new (zone) HCallWithDescriptor(
- kind, context, target, argument_count, descriptor, operands,
- syntactic_tail_call_mode, tail_call_mode, zone);
- return res;
- }
-
- int OperandCount() const final { return values_.length(); }
- HValue* OperandAt(int index) const final { return values_[index]; }
-
- Representation RequiredInputRepresentation(int index) final {
- if (index == 0 || index == 1) {
- // Target + context
- return Representation::Tagged();
- } else {
- int par_index = index - 2;
- DCHECK(par_index < GetParameterCount());
- return RepresentationFromMachineType(
- descriptor_.GetParameterType(par_index));
- }
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor)
-
- // Defines whether this instruction corresponds to a JS call at tail position.
- TailCallMode syntactic_tail_call_mode() const {
- return SyntacticTailCallModeField::decode(bit_field_);
- }
-
- // Defines whether this call should be generated as a tail call.
- TailCallMode tail_call_mode() const {
- return TailCallModeField::decode(bit_field_);
- }
- bool IsTailCall() const { return tail_call_mode() == TailCallMode::kAllow; }
-
- Code::Kind kind() const { return KindField::decode(bit_field_); }
-
- virtual int argument_count() const {
- return argument_count_;
- }
-
- int argument_delta() const override { return -argument_count_; }
-
- CallInterfaceDescriptor descriptor() const { return descriptor_; }
-
- HValue* target() { return OperandAt(0); }
- HValue* context() { return OperandAt(1); }
- HValue* parameter(int index) {
- DCHECK_LT(index, GetParameterCount());
- return OperandAt(index + 2);
- }
-
- HValue* Canonicalize() override;
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- private:
- // The argument count includes the receiver.
- HCallWithDescriptor(Code::Kind kind, HValue* context, HValue* target,
- int argument_count, CallInterfaceDescriptor descriptor,
- const Vector<HValue*>& operands,
- TailCallMode syntactic_tail_call_mode,
- TailCallMode tail_call_mode, Zone* zone)
- : descriptor_(descriptor),
- values_(GetParameterCount() + 2, zone), // +2 for context and target.
- argument_count_(argument_count),
- bit_field_(
- TailCallModeField::encode(tail_call_mode) |
- SyntacticTailCallModeField::encode(syntactic_tail_call_mode) |
- KindField::encode(kind)) {
- DCHECK_EQ(operands.length(), GetParameterCount());
- // We can only tail call without any stack arguments.
- DCHECK(tail_call_mode != TailCallMode::kAllow || argument_count == 0);
- AddOperand(target, zone);
- AddOperand(context, zone);
- for (int i = 0; i < operands.length(); i++) {
- AddOperand(operands[i], zone);
- }
- this->set_representation(Representation::Tagged());
- this->SetAllSideEffects();
- }
-
- void AddOperand(HValue* v, Zone* zone) {
- values_.Add(NULL, zone);
- SetOperandAt(values_.length() - 1, v);
- }
-
- int GetParameterCount() const { return descriptor_.GetParameterCount(); }
-
- void InternalSetOperandAt(int index, HValue* value) final {
- values_[index] = value;
- }
-
- CallInterfaceDescriptor descriptor_;
- ZoneList<HValue*> values_;
- int argument_count_;
- class TailCallModeField : public BitField<TailCallMode, 0, 1> {};
- class SyntacticTailCallModeField
- : public BitField<TailCallMode, TailCallModeField::kNext, 1> {};
- class KindField
- : public BitField<Code::Kind, SyntacticTailCallModeField::kNext, 5> {};
- uint32_t bit_field_;
-};
-
-
-class HInvokeFunction final : public HBinaryCall {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P5(HInvokeFunction, HValue*,
- Handle<JSFunction>, int,
- TailCallMode, TailCallMode);
-
- HValue* context() { return first(); }
- HValue* function() { return second(); }
- Handle<JSFunction> known_function() { return known_function_; }
- int formal_parameter_count() const { return formal_parameter_count_; }
-
- bool HasStackCheck() final { return HasStackCheckField::decode(bit_field_); }
-
- // Defines whether this instruction corresponds to a JS call at tail position.
- TailCallMode syntactic_tail_call_mode() const {
- return SyntacticTailCallModeField::decode(bit_field_);
- }
-
- // Defines whether this call should be generated as a tail call.
- TailCallMode tail_call_mode() const {
- return TailCallModeField::decode(bit_field_);
- }
-
- DECLARE_CONCRETE_INSTRUCTION(InvokeFunction)
-
- std::ostream& PrintTo(std::ostream& os) const override; // NOLINT
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- private:
- void set_has_stack_check(bool has_stack_check) {
- bit_field_ = HasStackCheckField::update(bit_field_, has_stack_check);
- }
-
- HInvokeFunction(HValue* context, HValue* function,
- Handle<JSFunction> known_function, int argument_count,
- TailCallMode syntactic_tail_call_mode,
- TailCallMode tail_call_mode)
- : HBinaryCall(context, function, argument_count),
- known_function_(known_function),
- bit_field_(
- TailCallModeField::encode(tail_call_mode) |
- SyntacticTailCallModeField::encode(syntactic_tail_call_mode)) {
- DCHECK(tail_call_mode != TailCallMode::kAllow ||
- syntactic_tail_call_mode == TailCallMode::kAllow);
- formal_parameter_count_ =
- known_function.is_null()
- ? 0
- : known_function->shared()->internal_formal_parameter_count();
- set_has_stack_check(
- !known_function.is_null() &&
- (known_function->code()->kind() == Code::FUNCTION ||
- known_function->code()->kind() == Code::OPTIMIZED_FUNCTION));
- }
-
- Handle<JSFunction> known_function_;
- int formal_parameter_count_;
-
- class HasStackCheckField : public BitField<bool, 0, 1> {};
- class TailCallModeField
- : public BitField<TailCallMode, HasStackCheckField::kNext, 1> {};
- class SyntacticTailCallModeField
- : public BitField<TailCallMode, TailCallModeField::kNext, 1> {};
- uint32_t bit_field_;
-};
-
-
-class HCallNewArray final : public HBinaryCall {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HCallNewArray, HValue*, int,
- ElementsKind,
- Handle<AllocationSite>);
-
- HValue* context() { return first(); }
- HValue* constructor() { return second(); }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- ElementsKind elements_kind() const { return elements_kind_; }
- Handle<AllocationSite> site() const { return site_; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNewArray)
-
- private:
- HCallNewArray(HValue* context, HValue* constructor, int argument_count,
- ElementsKind elements_kind, Handle<AllocationSite> site)
- : HBinaryCall(context, constructor, argument_count),
- elements_kind_(elements_kind),
- site_(site) {}
-
- ElementsKind elements_kind_;
- Handle<AllocationSite> site_;
-};
-
-
-class HCallRuntime final : public HCall<1> {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallRuntime,
- const Runtime::Function*, int);
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- HValue* context() { return OperandAt(0); }
- const Runtime::Function* function() const { return c_function_; }
- SaveFPRegsMode save_doubles() const { return save_doubles_; }
- void set_save_doubles(SaveFPRegsMode save_doubles) {
- save_doubles_ = save_doubles;
- }
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CallRuntime)
-
- private:
- HCallRuntime(HValue* context, const Runtime::Function* c_function,
- int argument_count)
- : HCall<1>(argument_count),
- c_function_(c_function),
- save_doubles_(kDontSaveFPRegs) {
- SetOperandAt(0, context);
- }
-
- const Runtime::Function* c_function_;
- SaveFPRegsMode save_doubles_;
-};
-
-
-class HUnaryMathOperation final : public HTemplateInstruction<2> {
- public:
- static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* value, BuiltinFunctionId op);
-
- HValue* context() const { return OperandAt(0); }
- HValue* value() const { return OperandAt(1); }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- Representation RequiredInputRepresentation(int index) override {
- if (index == 0) {
- return Representation::Tagged();
- } else {
- switch (op_) {
- case kMathCos:
- case kMathFloor:
- case kMathRound:
- case kMathFround:
- case kMathSin:
- case kMathSqrt:
- case kMathPowHalf:
- case kMathLog:
- case kMathExp:
- return Representation::Double();
- case kMathAbs:
- return representation();
- case kMathClz32:
- return Representation::Integer32();
- default:
- UNREACHABLE();
- return Representation::None();
- }
- }
- }
-
- Range* InferRange(Zone* zone) override;
-
- HValue* Canonicalize() override;
- Representation RepresentationFromUses() override;
- Representation RepresentationFromInputs() override;
-
- BuiltinFunctionId op() const { return op_; }
- const char* OpName() const;
-
- DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation)
-
- protected:
- bool DataEquals(HValue* other) override {
- HUnaryMathOperation* b = HUnaryMathOperation::cast(other);
- return op_ == b->op();
- }
-
- private:
- // Indicates if we support a double (and int32) output for Math.floor and
- // Math.round.
- bool SupportsFlexibleFloorAndRound() const {
-#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC
- return true;
-#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
- return CpuFeatures::IsSupported(SSE4_1);
-#else
- return false;
-#endif
- }
- HUnaryMathOperation(HValue* context, HValue* value, BuiltinFunctionId op)
- : HTemplateInstruction<2>(HType::TaggedNumber()), op_(op) {
- SetOperandAt(0, context);
- SetOperandAt(1, value);
- switch (op) {
- case kMathFloor:
- case kMathRound:
- if (SupportsFlexibleFloorAndRound()) {
- SetFlag(kFlexibleRepresentation);
- } else {
- set_representation(Representation::Integer32());
- }
- break;
- case kMathClz32:
- set_representation(Representation::Integer32());
- break;
- case kMathAbs:
- // Not setting representation here: it is None intentionally.
- SetFlag(kFlexibleRepresentation);
- // TODO(svenpanne) This flag is actually only needed if representation()
- // is tagged, and not when it is an unboxed double or unboxed integer.
- SetChangesFlag(kNewSpacePromotion);
- break;
- case kMathCos:
- case kMathFround:
- case kMathLog:
- case kMathExp:
- case kMathSin:
- case kMathSqrt:
- case kMathPowHalf:
- set_representation(Representation::Double());
- break;
- default:
- UNREACHABLE();
- }
- SetFlag(kUseGVN);
- SetFlag(kTruncatingToNumber);
- }
-
- bool IsDeletable() const override {
- // TODO(crankshaft): This should be true, however the semantics of this
- // instruction also include the ToNumber conversion that is mentioned in the
- // spec, which is of course observable.
- return false;
- }
-
- HValue* SimplifiedDividendForMathFloorOfDiv(HDiv* hdiv);
- HValue* SimplifiedDivisorForMathFloorOfDiv(HDiv* hdiv);
-
- BuiltinFunctionId op_;
-};
-
-
-class HLoadRoot final : public HTemplateInstruction<0> {
- public:
- DECLARE_INSTRUCTION_FACTORY_P1(HLoadRoot, Heap::RootListIndex);
- DECLARE_INSTRUCTION_FACTORY_P2(HLoadRoot, Heap::RootListIndex, HType);
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::None();
- }
-
- Heap::RootListIndex index() const { return index_; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadRoot)
-
- protected:
- bool DataEquals(HValue* other) override {
- HLoadRoot* b = HLoadRoot::cast(other);
- return index_ == b->index_;
- }
-
- private:
- explicit HLoadRoot(Heap::RootListIndex index, HType type = HType::Tagged())
- : HTemplateInstruction<0>(type), index_(index) {
- SetFlag(kUseGVN);
- // TODO(bmeurer): We'll need kDependsOnRoots once we add the
- // corresponding HStoreRoot instruction.
- SetDependsOnFlag(kCalls);
- set_representation(Representation::Tagged());
- }
-
- bool IsDeletable() const override { return true; }
-
- const Heap::RootListIndex index_;
-};
-
-
-class HCheckMaps final : public HTemplateInstruction<2> {
- public:
- static HCheckMaps* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* value, Handle<Map> map,
- HValue* typecheck = NULL) {
- return new(zone) HCheckMaps(value, new(zone) UniqueSet<Map>(
- Unique<Map>::CreateImmovable(map), zone), typecheck);
- }
- static HCheckMaps* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* value, SmallMapList* map_list,
- HValue* typecheck = NULL) {
- UniqueSet<Map>* maps = new(zone) UniqueSet<Map>(map_list->length(), zone);
- for (int i = 0; i < map_list->length(); ++i) {
- maps->Add(Unique<Map>::CreateImmovable(map_list->at(i)), zone);
- }
- return new(zone) HCheckMaps(value, maps, typecheck);
- }
-
- bool IsStabilityCheck() const {
- return IsStabilityCheckField::decode(bit_field_);
- }
- void MarkAsStabilityCheck() {
- bit_field_ = MapsAreStableField::encode(true) |
- HasMigrationTargetField::encode(false) |
- IsStabilityCheckField::encode(true);
- ClearChangesFlag(kNewSpacePromotion);
- ClearDependsOnFlag(kElementsKind);
- ClearDependsOnFlag(kMaps);
- }
-
- bool HasEscapingOperandAt(int index) override { return false; }
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- HType CalculateInferredType() override {
- if (value()->type().IsHeapObject()) return value()->type();
- return HType::HeapObject();
- }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- HValue* value() const { return OperandAt(0); }
- HValue* typecheck() const { return OperandAt(1); }
-
- const UniqueSet<Map>* maps() const { return maps_; }
- void set_maps(const UniqueSet<Map>* maps) { maps_ = maps; }
-
- bool maps_are_stable() const {
- return MapsAreStableField::decode(bit_field_);
- }
-
- bool HasMigrationTarget() const {
- return HasMigrationTargetField::decode(bit_field_);
- }
-
- HValue* Canonicalize() override;
-
- static HCheckMaps* CreateAndInsertAfter(Zone* zone,
- HValue* value,
- Unique<Map> map,
- bool map_is_stable,
- HInstruction* instr) {
- return instr->Append(new(zone) HCheckMaps(
- value, new(zone) UniqueSet<Map>(map, zone), map_is_stable));
- }
-
- static HCheckMaps* CreateAndInsertBefore(Zone* zone,
- HValue* value,
- const UniqueSet<Map>* maps,
- bool maps_are_stable,
- HInstruction* instr) {
- return instr->Prepend(new(zone) HCheckMaps(value, maps, maps_are_stable));
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMaps)
-
- protected:
- bool DataEquals(HValue* other) override {
- return this->maps()->Equals(HCheckMaps::cast(other)->maps());
- }
-
- int RedefinedOperandIndex() override { return 0; }
-
- private:
- HCheckMaps(HValue* value, const UniqueSet<Map>* maps, bool maps_are_stable)
- : HTemplateInstruction<2>(HType::HeapObject()),
- maps_(maps),
- bit_field_(HasMigrationTargetField::encode(false) |
- IsStabilityCheckField::encode(false) |
- MapsAreStableField::encode(maps_are_stable)) {
- DCHECK_NE(0, maps->size());
- SetOperandAt(0, value);
- // Use the object value for the dependency.
- SetOperandAt(1, value);
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetDependsOnFlag(kMaps);
- SetDependsOnFlag(kElementsKind);
- }
-
- HCheckMaps(HValue* value, const UniqueSet<Map>* maps, HValue* typecheck)
- : HTemplateInstruction<2>(HType::HeapObject()),
- maps_(maps),
- bit_field_(HasMigrationTargetField::encode(false) |
- IsStabilityCheckField::encode(false) |
- MapsAreStableField::encode(true)) {
- DCHECK_NE(0, maps->size());
- SetOperandAt(0, value);
- // Use the object value for the dependency if NULL is passed.
- SetOperandAt(1, typecheck ? typecheck : value);
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetDependsOnFlag(kMaps);
- SetDependsOnFlag(kElementsKind);
- for (int i = 0; i < maps->size(); ++i) {
- Handle<Map> map = maps->at(i).handle();
- if (map->is_migration_target()) {
- bit_field_ = HasMigrationTargetField::update(bit_field_, true);
- }
- if (!map->is_stable()) {
- bit_field_ = MapsAreStableField::update(bit_field_, false);
- }
- }
- if (HasMigrationTarget()) SetChangesFlag(kNewSpacePromotion);
- }
-
- class HasMigrationTargetField : public BitField<bool, 0, 1> {};
- class IsStabilityCheckField : public BitField<bool, 1, 1> {};
- class MapsAreStableField : public BitField<bool, 2, 1> {};
-
- const UniqueSet<Map>* maps_;
- uint32_t bit_field_;
-};
-
-
-class HCheckValue final : public HUnaryOperation {
- public:
- static HCheckValue* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* value, Handle<JSFunction> func) {
- bool in_new_space = isolate->heap()->InNewSpace(*func);
- // NOTE: We create an uninitialized Unique and initialize it later.
- // This is because a JSFunction can move due to GC during graph creation.
- Unique<JSFunction> target = Unique<JSFunction>::CreateUninitialized(func);
- HCheckValue* check = new(zone) HCheckValue(value, target, in_new_space);
- return check;
- }
- static HCheckValue* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* value, Unique<HeapObject> target,
- bool object_in_new_space) {
- return new(zone) HCheckValue(value, target, object_in_new_space);
- }
-
- void FinalizeUniqueness() override {
- object_ = Unique<HeapObject>(object_.handle());
- }
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- HValue* Canonicalize() override;
-
-#ifdef DEBUG
- void Verify() override;
-#endif
-
- Unique<HeapObject> object() const { return object_; }
- bool object_in_new_space() const { return object_in_new_space_; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckValue)
-
- protected:
- bool DataEquals(HValue* other) override {
- HCheckValue* b = HCheckValue::cast(other);
- return object_ == b->object_;
- }
-
- private:
- HCheckValue(HValue* value, Unique<HeapObject> object,
- bool object_in_new_space)
- : HUnaryOperation(value, value->type()),
- object_(object),
- object_in_new_space_(object_in_new_space) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- Unique<HeapObject> object_;
- bool object_in_new_space_;
-};
-
-
-class HCheckInstanceType final : public HUnaryOperation {
- public:
- enum Check {
- IS_JS_RECEIVER,
- IS_JS_ARRAY,
- IS_JS_FUNCTION,
- IS_JS_DATE,
- IS_STRING,
- IS_INTERNALIZED_STRING,
- LAST_INTERVAL_CHECK = IS_JS_DATE
- };
-
- DECLARE_INSTRUCTION_FACTORY_P2(HCheckInstanceType, HValue*, Check);
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- HType CalculateInferredType() override {
- switch (check_) {
- case IS_JS_RECEIVER: return HType::JSReceiver();
- case IS_JS_ARRAY: return HType::JSArray();
- case IS_JS_FUNCTION:
- return HType::JSObject();
- case IS_JS_DATE: return HType::JSObject();
- case IS_STRING: return HType::String();
- case IS_INTERNALIZED_STRING: return HType::String();
- }
- UNREACHABLE();
- return HType::Tagged();
- }
-
- HValue* Canonicalize() override;
-
- bool is_interval_check() const { return check_ <= LAST_INTERVAL_CHECK; }
- void GetCheckInterval(InstanceType* first, InstanceType* last);
- void GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag);
-
- Check check() const { return check_; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType)
-
- protected:
- // TODO(ager): It could be nice to allow the ommision of instance
- // type checks if we have already performed an instance type check
- // with a larger range.
- bool DataEquals(HValue* other) override {
- HCheckInstanceType* b = HCheckInstanceType::cast(other);
- return check_ == b->check_;
- }
-
- int RedefinedOperandIndex() override { return 0; }
-
- private:
- const char* GetCheckName() const;
-
- HCheckInstanceType(HValue* value, Check check)
- : HUnaryOperation(value, HType::HeapObject()), check_(check) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- const Check check_;
-};
-
-
-class HCheckSmi final : public HUnaryOperation {
- public:
- DECLARE_INSTRUCTION_FACTORY_P1(HCheckSmi, HValue*);
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- HValue* Canonicalize() override {
- HType value_type = value()->type();
- if (value_type.IsSmi()) {
- return NULL;
- }
- return this;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckSmi)
-
- protected:
- bool DataEquals(HValue* other) override { return true; }
-
- private:
- explicit HCheckSmi(HValue* value) : HUnaryOperation(value, HType::Smi()) {
- set_representation(Representation::Smi());
- SetFlag(kUseGVN);
- }
-};
-
-
-class HCheckArrayBufferNotNeutered final : public HUnaryOperation {
- public:
- DECLARE_INSTRUCTION_FACTORY_P1(HCheckArrayBufferNotNeutered, HValue*);
-
- bool HasEscapingOperandAt(int index) override { return false; }
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- HType CalculateInferredType() override {
- if (value()->type().IsHeapObject()) return value()->type();
- return HType::HeapObject();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckArrayBufferNotNeutered)
-
- protected:
- bool DataEquals(HValue* other) override { return true; }
- int RedefinedOperandIndex() override { return 0; }
-
- private:
- explicit HCheckArrayBufferNotNeutered(HValue* value)
- : HUnaryOperation(value) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetDependsOnFlag(kCalls);
- }
-};
-
-
-class HCheckHeapObject final : public HUnaryOperation {
- public:
- DECLARE_INSTRUCTION_FACTORY_P1(HCheckHeapObject, HValue*);
-
- bool HasEscapingOperandAt(int index) override { return false; }
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- HType CalculateInferredType() override {
- if (value()->type().IsHeapObject()) return value()->type();
- return HType::HeapObject();
- }
-
-#ifdef DEBUG
- void Verify() override;
-#endif
-
- HValue* Canonicalize() override {
- return value()->type().IsHeapObject() ? NULL : this;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckHeapObject)
-
- protected:
- bool DataEquals(HValue* other) override { return true; }
-
- private:
- explicit HCheckHeapObject(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-};
-
-
-class HPhi final : public HValue {
- public:
- HPhi(int merged_index, Zone* zone)
- : inputs_(2, zone), merged_index_(merged_index) {
- DCHECK(merged_index >= 0 || merged_index == kInvalidMergedIndex);
- SetFlag(kFlexibleRepresentation);
- }
-
- Representation RepresentationFromInputs() override;
-
- Range* InferRange(Zone* zone) override;
- void InferRepresentation(HInferRepresentationPhase* h_infer) override;
- Representation RequiredInputRepresentation(int index) override {
- return representation();
- }
- Representation KnownOptimalRepresentation() override {
- return representation();
- }
- HType CalculateInferredType() override;
- int OperandCount() const override { return inputs_.length(); }
- HValue* OperandAt(int index) const override { return inputs_[index]; }
- HValue* GetRedundantReplacement();
- void AddInput(HValue* value);
- bool HasRealUses();
-
- bool IsReceiver() const { return merged_index_ == 0; }
- bool HasMergedIndex() const { return merged_index_ != kInvalidMergedIndex; }
-
- SourcePosition position() const override;
-
- int merged_index() const { return merged_index_; }
-
- std::ostream& PrintTo(std::ostream& os) const override; // NOLINT
-
-#ifdef DEBUG
- void Verify() override;
-#endif
-
- void InitRealUses(int id);
- void AddNonPhiUsesFrom(HPhi* other);
-
- Representation representation_from_indirect_uses() const {
- return representation_from_indirect_uses_;
- }
-
- bool has_type_feedback_from_uses() const {
- return has_type_feedback_from_uses_;
- }
-
- int phi_id() { return phi_id_; }
-
- static HPhi* cast(HValue* value) {
- DCHECK(value->IsPhi());
- return reinterpret_cast<HPhi*>(value);
- }
- Opcode opcode() const override { return HValue::kPhi; }
-
- void SimplifyConstantInputs();
-
- // Marker value representing an invalid merge index.
- static const int kInvalidMergedIndex = -1;
-
- protected:
- void DeleteFromGraph() override;
- void InternalSetOperandAt(int index, HValue* value) override {
- inputs_[index] = value;
- }
-
- private:
- Representation representation_from_non_phi_uses() const {
- return representation_from_non_phi_uses_;
- }
-
- ZoneList<HValue*> inputs_;
- int merged_index_ = 0;
-
- int phi_id_ = -1;
-
- Representation representation_from_indirect_uses_ = Representation::None();
- Representation representation_from_non_phi_uses_ = Representation::None();
- bool has_type_feedback_from_uses_ = false;
-
- bool IsDeletable() const override { return !IsReceiver(); }
-};
-
-
-// Common base class for HArgumentsObject and HCapturedObject.
-class HDematerializedObject : public HInstruction {
- public:
- HDematerializedObject(int count, Zone* zone) : values_(count, zone) {}
-
- int OperandCount() const final { return values_.length(); }
- HValue* OperandAt(int index) const final { return values_[index]; }
-
- bool HasEscapingOperandAt(int index) final { return false; }
- Representation RequiredInputRepresentation(int index) final {
- return Representation::None();
- }
-
- protected:
- void InternalSetOperandAt(int index, HValue* value) final {
- values_[index] = value;
- }
-
- // List of values tracked by this marker.
- ZoneList<HValue*> values_;
-};
-
-
-class HArgumentsObject final : public HDematerializedObject {
- public:
- static HArgumentsObject* New(Isolate* isolate, Zone* zone, HValue* context,
- int count) {
- return new(zone) HArgumentsObject(count, zone);
- }
-
- // The values contain a list of all elements in the arguments object
- // including the receiver object, which is skipped when materializing.
- const ZoneList<HValue*>* arguments_values() const { return &values_; }
- int arguments_count() const { return values_.length(); }
-
- void AddArgument(HValue* argument, Zone* zone) {
- values_.Add(NULL, zone); // Resize list.
- SetOperandAt(values_.length() - 1, argument);
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsObject)
-
- private:
- HArgumentsObject(int count, Zone* zone)
- : HDematerializedObject(count, zone) {
- set_representation(Representation::Tagged());
- SetFlag(kIsArguments);
- }
-};
-
-
-class HCapturedObject final : public HDematerializedObject {
- public:
- HCapturedObject(int length, int id, Zone* zone)
- : HDematerializedObject(length, zone), capture_id_(id) {
- set_representation(Representation::Tagged());
- values_.AddBlock(NULL, length, zone); // Resize list.
- }
-
- // The values contain a list of all in-object properties inside the
- // captured object and is index by field index. Properties in the
- // properties or elements backing store are not tracked here.
- const ZoneList<HValue*>* values() const { return &values_; }
- int length() const { return values_.length(); }
- int capture_id() const { return capture_id_; }
-
- // Shortcut for the map value of this captured object.
- HValue* map_value() const { return values()->first(); }
-
- void ReuseSideEffectsFromStore(HInstruction* store) {
- DCHECK(store->HasObservableSideEffects());
- DCHECK(store->IsStoreNamedField());
- changes_flags_.Add(store->ChangesFlags());
- }
-
- // Replay effects of this instruction on the given environment.
- void ReplayEnvironment(HEnvironment* env);
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- DECLARE_CONCRETE_INSTRUCTION(CapturedObject)
-
- private:
- int capture_id_;
-
- // Note that we cannot DCE captured objects as they are used to replay
- // the environment. This method is here as an explicit reminder.
- // TODO(mstarzinger): Turn HSimulates into full snapshots maybe?
- bool IsDeletable() const final { return false; }
-};
-
-
-class HConstant final : public HTemplateInstruction<0> {
- public:
- enum Special { kHoleNaN };
-
- DECLARE_INSTRUCTION_FACTORY_P1(HConstant, Special);
- DECLARE_INSTRUCTION_FACTORY_P1(HConstant, int32_t);
- DECLARE_INSTRUCTION_FACTORY_P2(HConstant, int32_t, Representation);
- DECLARE_INSTRUCTION_FACTORY_P1(HConstant, double);
- DECLARE_INSTRUCTION_FACTORY_P1(HConstant, Handle<Object>);
- DECLARE_INSTRUCTION_FACTORY_P1(HConstant, ExternalReference);
-
- static HConstant* CreateAndInsertAfter(Isolate* isolate, Zone* zone,
- HValue* context, int32_t value,
- Representation representation,
- HInstruction* instruction) {
- return instruction->Append(
- HConstant::New(isolate, zone, context, value, representation));
- }
-
- Handle<Map> GetMonomorphicJSObjectMap() override {
- Handle<Object> object = object_.handle();
- if (!object.is_null() && object->IsHeapObject()) {
- return v8::internal::handle(HeapObject::cast(*object)->map());
- }
- return Handle<Map>();
- }
-
- static HConstant* CreateAndInsertBefore(Isolate* isolate, Zone* zone,
- HValue* context, int32_t value,
- Representation representation,
- HInstruction* instruction) {
- return instruction->Prepend(
- HConstant::New(isolate, zone, context, value, representation));
- }
-
- static HConstant* CreateAndInsertBefore(Zone* zone,
- Unique<Map> map,
- bool map_is_stable,
- HInstruction* instruction) {
- return instruction->Prepend(new(zone) HConstant(
- map, Unique<Map>(Handle<Map>::null()), map_is_stable,
- Representation::Tagged(), HType::HeapObject(), true,
- false, false, MAP_TYPE));
- }
-
- static HConstant* CreateAndInsertAfter(Zone* zone,
- Unique<Map> map,
- bool map_is_stable,
- HInstruction* instruction) {
- return instruction->Append(new(zone) HConstant(
- map, Unique<Map>(Handle<Map>::null()), map_is_stable,
- Representation::Tagged(), HType::HeapObject(), true,
- false, false, MAP_TYPE));
- }
-
- Handle<Object> handle(Isolate* isolate) {
- if (object_.handle().is_null()) {
- // Default arguments to is_not_in_new_space depend on this heap number
- // to be tenured so that it's guaranteed not to be located in new space.
- object_ = Unique<Object>::CreateUninitialized(
- isolate->factory()->NewNumber(double_value_, TENURED));
- }
- AllowDeferredHandleDereference smi_check;
- DCHECK(HasInteger32Value() || !object_.handle()->IsSmi());
- return object_.handle();
- }
-
- bool IsSpecialDouble() const {
- return HasDoubleValue() &&
- (bit_cast<int64_t>(double_value_) == bit_cast<int64_t>(-0.0) ||
- std::isnan(double_value_));
- }
-
- bool NotInNewSpace() const {
- return IsNotInNewSpaceField::decode(bit_field_);
- }
-
- bool ImmortalImmovable() const;
-
- bool IsCell() const {
- InstanceType instance_type = GetInstanceType();
- return instance_type == CELL_TYPE;
- }
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::None();
- }
-
- Representation KnownOptimalRepresentation() override {
- if (HasSmiValue() && SmiValuesAre31Bits()) return Representation::Smi();
- if (HasInteger32Value()) return Representation::Integer32();
- if (HasNumberValue()) return Representation::Double();
- if (HasExternalReferenceValue()) return Representation::External();
- return Representation::Tagged();
- }
-
- bool EmitAtUses() override;
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
- HConstant* CopyToRepresentation(Representation r, Zone* zone) const;
- Maybe<HConstant*> CopyToTruncatedInt32(Zone* zone);
- Maybe<HConstant*> CopyToTruncatedNumber(Isolate* isolate, Zone* zone);
- bool HasInteger32Value() const {
- return HasInt32ValueField::decode(bit_field_);
- }
- int32_t Integer32Value() const {
- DCHECK(HasInteger32Value());
- return int32_value_;
- }
- bool HasSmiValue() const { return HasSmiValueField::decode(bit_field_); }
- bool HasDoubleValue() const {
- return HasDoubleValueField::decode(bit_field_);
- }
- double DoubleValue() const {
- DCHECK(HasDoubleValue());
- return double_value_;
- }
- uint64_t DoubleValueAsBits() const {
- DCHECK(HasDoubleValue());
- return bit_cast<uint64_t>(double_value_);
- }
- bool IsTheHole() const {
- if (HasDoubleValue() && DoubleValueAsBits() == kHoleNanInt64) {
- return true;
- }
- return object_.IsInitialized() &&
- object_.IsKnownGlobal(isolate()->heap()->the_hole_value());
- }
- bool HasNumberValue() const { return HasDoubleValue(); }
- int32_t NumberValueAsInteger32() const {
- DCHECK(HasNumberValue());
- // Irrespective of whether a numeric HConstant can be safely
- // represented as an int32, we store the (in some cases lossy)
- // representation of the number in int32_value_.
- return int32_value_;
- }
- bool HasStringValue() const {
- if (HasNumberValue()) return false;
- DCHECK(!object_.handle().is_null());
- return GetInstanceType() < FIRST_NONSTRING_TYPE;
- }
- Handle<String> StringValue() const {
- DCHECK(HasStringValue());
- return Handle<String>::cast(object_.handle());
- }
- bool HasInternalizedStringValue() const {
- return HasStringValue() && StringShape(GetInstanceType()).IsInternalized();
- }
-
- bool HasExternalReferenceValue() const {
- return HasExternalReferenceValueField::decode(bit_field_);
- }
- ExternalReference ExternalReferenceValue() const {
- return external_reference_value_;
- }
-
- bool HasBooleanValue() const { return type_.IsBoolean(); }
- bool BooleanValue() const { return BooleanValueField::decode(bit_field_); }
- bool IsCallable() const { return IsCallableField::decode(bit_field_); }
- bool IsUndetectable() const {
- return IsUndetectableField::decode(bit_field_);
- }
- InstanceType GetInstanceType() const {
- return InstanceTypeField::decode(bit_field_);
- }
-
- bool HasMapValue() const { return GetInstanceType() == MAP_TYPE; }
- Unique<Map> MapValue() const {
- DCHECK(HasMapValue());
- return Unique<Map>::cast(GetUnique());
- }
- bool HasStableMapValue() const {
- DCHECK(HasMapValue() || !HasStableMapValueField::decode(bit_field_));
- return HasStableMapValueField::decode(bit_field_);
- }
-
- bool HasObjectMap() const { return !object_map_.IsNull(); }
- Unique<Map> ObjectMap() const {
- DCHECK(HasObjectMap());
- return object_map_;
- }
-
- intptr_t Hashcode() override {
- if (HasInteger32Value()) {
- return static_cast<intptr_t>(int32_value_);
- } else if (HasDoubleValue()) {
- uint64_t bits = DoubleValueAsBits();
- if (sizeof(bits) > sizeof(intptr_t)) {
- bits ^= (bits >> 32);
- }
- return static_cast<intptr_t>(bits);
- } else if (HasExternalReferenceValue()) {
- return reinterpret_cast<intptr_t>(external_reference_value_.address());
- } else {
- DCHECK(!object_.handle().is_null());
- return object_.Hashcode();
- }
- }
-
- void FinalizeUniqueness() override {
- if (!HasDoubleValue() && !HasExternalReferenceValue()) {
- DCHECK(!object_.handle().is_null());
- object_ = Unique<Object>(object_.handle());
- }
- }
-
- Unique<Object> GetUnique() const {
- return object_;
- }
-
- bool EqualsUnique(Unique<Object> other) const {
- return object_.IsInitialized() && object_ == other;
- }
-
- bool DataEquals(HValue* other) override {
- HConstant* other_constant = HConstant::cast(other);
- if (HasInteger32Value()) {
- return other_constant->HasInteger32Value() &&
- int32_value_ == other_constant->int32_value_;
- } else if (HasDoubleValue()) {
- return other_constant->HasDoubleValue() &&
- std::memcmp(&double_value_, &other_constant->double_value_,
- sizeof(double_value_)) == 0;
- } else if (HasExternalReferenceValue()) {
- return other_constant->HasExternalReferenceValue() &&
- external_reference_value_ ==
- other_constant->external_reference_value_;
- } else {
- if (other_constant->HasInteger32Value() ||
- other_constant->HasDoubleValue() ||
- other_constant->HasExternalReferenceValue()) {
- return false;
- }
- DCHECK(!object_.handle().is_null());
- return other_constant->object_ == object_;
- }
- }
-
-#ifdef DEBUG
- void Verify() override {}
-#endif
-
- DECLARE_CONCRETE_INSTRUCTION(Constant)
-
- protected:
- Range* InferRange(Zone* zone) override;
-
- private:
- friend class HGraph;
- explicit HConstant(Special special);
- explicit HConstant(Handle<Object> handle,
- Representation r = Representation::None());
- HConstant(int32_t value,
- Representation r = Representation::None(),
- bool is_not_in_new_space = true,
- Unique<Object> optional = Unique<Object>(Handle<Object>::null()));
- HConstant(double value,
- Representation r = Representation::None(),
- bool is_not_in_new_space = true,
- Unique<Object> optional = Unique<Object>(Handle<Object>::null()));
- HConstant(Unique<Object> object,
- Unique<Map> object_map,
- bool has_stable_map_value,
- Representation r,
- HType type,
- bool is_not_in_new_space,
- bool boolean_value,
- bool is_undetectable,
- InstanceType instance_type);
-
- explicit HConstant(ExternalReference reference);
-
- void Initialize(Representation r);
-
- bool IsDeletable() const override { return true; }
-
- // If object_ is a map, this indicates whether the map is stable.
- class HasStableMapValueField : public BitField<bool, 0, 1> {};
-
- // We store the HConstant in the most specific form safely possible.
- // These flags tell us if the respective member fields hold valid, safe
- // representations of the constant. More specific flags imply more general
- // flags, but not the converse (i.e. smi => int32 => double).
- class HasSmiValueField : public BitField<bool, 1, 1> {};
- class HasInt32ValueField : public BitField<bool, 2, 1> {};
- class HasDoubleValueField : public BitField<bool, 3, 1> {};
-
- class HasExternalReferenceValueField : public BitField<bool, 4, 1> {};
- class IsNotInNewSpaceField : public BitField<bool, 5, 1> {};
- class BooleanValueField : public BitField<bool, 6, 1> {};
- class IsUndetectableField : public BitField<bool, 7, 1> {};
- class IsCallableField : public BitField<bool, 8, 1> {};
-
- static const InstanceType kUnknownInstanceType = FILLER_TYPE;
- class InstanceTypeField : public BitField<InstanceType, 16, 8> {};
-
- // If this is a numerical constant, object_ either points to the
- // HeapObject the constant originated from or is null. If the
- // constant is non-numeric, object_ always points to a valid
- // constant HeapObject.
- Unique<Object> object_;
-
- // If object_ is a heap object, this points to the stable map of the object.
- Unique<Map> object_map_;
-
- uint32_t bit_field_;
-
- int32_t int32_value_;
- double double_value_;
- ExternalReference external_reference_value_;
-};
-
-
-class HBinaryOperation : public HTemplateInstruction<3> {
- public:
- HBinaryOperation(HValue* context, HValue* left, HValue* right,
- HType type = HType::Tagged())
- : HTemplateInstruction<3>(type),
- observed_output_representation_(Representation::None()) {
- DCHECK(left != NULL && right != NULL);
- SetOperandAt(0, context);
- SetOperandAt(1, left);
- SetOperandAt(2, right);
- observed_input_representation_[0] = Representation::None();
- observed_input_representation_[1] = Representation::None();
- }
-
- HValue* context() const { return OperandAt(0); }
- HValue* left() const { return OperandAt(1); }
- HValue* right() const { return OperandAt(2); }
-
- // True if switching left and right operands likely generates better code.
- bool AreOperandsBetterSwitched() {
- if (!IsCommutative()) return false;
-
- // Constant operands are better off on the right, they can be inlined in
- // many situations on most platforms.
- if (left()->IsConstant()) return true;
- if (right()->IsConstant()) return false;
-
- // Otherwise, if there is only one use of the right operand, it would be
- // better off on the left for platforms that only have 2-arg arithmetic
- // ops (e.g ia32, x64) that clobber the left operand.
- return right()->HasOneUse();
- }
-
- HValue* BetterLeftOperand() {
- return AreOperandsBetterSwitched() ? right() : left();
- }
-
- HValue* BetterRightOperand() {
- return AreOperandsBetterSwitched() ? left() : right();
- }
-
- void set_observed_input_representation(int index, Representation rep) {
- DCHECK(index >= 1 && index <= 2);
- observed_input_representation_[index - 1] = rep;
- }
-
- virtual void initialize_output_representation(Representation observed) {
- observed_output_representation_ = observed;
- }
-
- Representation observed_input_representation(int index) override {
- if (index == 0) return Representation::Tagged();
- return observed_input_representation_[index - 1];
- }
-
- void UpdateRepresentation(Representation new_rep,
- HInferRepresentationPhase* h_infer,
- const char* reason) override {
- Representation rep = !FLAG_smi_binop && new_rep.IsSmi()
- ? Representation::Integer32() : new_rep;
- HValue::UpdateRepresentation(rep, h_infer, reason);
- }
-
- void InferRepresentation(HInferRepresentationPhase* h_infer) override;
- Representation RepresentationFromInputs() override;
- Representation RepresentationFromOutput();
- void AssumeRepresentation(Representation r) override;
-
- virtual bool IsCommutative() const { return false; }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- Representation RequiredInputRepresentation(int index) override {
- if (index == 0) return Representation::Tagged();
- return representation();
- }
-
- bool RightIsPowerOf2() {
- if (!right()->IsInteger32Constant()) return false;
- int32_t value = right()->GetInteger32Constant();
- if (value < 0) {
- return base::bits::IsPowerOfTwo32(static_cast<uint32_t>(-value));
- }
- return base::bits::IsPowerOfTwo32(static_cast<uint32_t>(value));
- }
-
- DECLARE_ABSTRACT_INSTRUCTION(BinaryOperation)
-
- private:
- bool IgnoreObservedOutputRepresentation(Representation current_rep);
-
- Representation observed_input_representation_[2];
- Representation observed_output_representation_;
-};
-
-
-class HWrapReceiver final : public HTemplateInstruction<2> {
- public:
- DECLARE_INSTRUCTION_FACTORY_P2(HWrapReceiver, HValue*, HValue*);
-
- bool DataEquals(HValue* other) override { return true; }
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- HValue* receiver() const { return OperandAt(0); }
- HValue* function() const { return OperandAt(1); }
-
- HValue* Canonicalize() override;
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
- bool known_function() const { return known_function_; }
-
- DECLARE_CONCRETE_INSTRUCTION(WrapReceiver)
-
- private:
- HWrapReceiver(HValue* receiver, HValue* function) {
- known_function_ = function->IsConstant() &&
- HConstant::cast(function)->handle(function->isolate())->IsJSFunction();
- set_representation(Representation::Tagged());
- SetOperandAt(0, receiver);
- SetOperandAt(1, function);
- SetFlag(kUseGVN);
- }
-
- bool known_function_;
-};
-
-
-class HApplyArguments final : public HTemplateInstruction<4> {
- public:
- DECLARE_INSTRUCTION_FACTORY_P5(HApplyArguments, HValue*, HValue*, HValue*,
- HValue*, TailCallMode);
-
- Representation RequiredInputRepresentation(int index) override {
- // The length is untagged, all other inputs are tagged.
- return (index == 2)
- ? Representation::Integer32()
- : Representation::Tagged();
- }
-
- HValue* function() { return OperandAt(0); }
- HValue* receiver() { return OperandAt(1); }
- HValue* length() { return OperandAt(2); }
- HValue* elements() { return OperandAt(3); }
-
- TailCallMode tail_call_mode() const {
- return TailCallModeField::decode(bit_field_);
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ApplyArguments)
-
- private:
- HApplyArguments(HValue* function, HValue* receiver, HValue* length,
- HValue* elements, TailCallMode tail_call_mode)
- : bit_field_(TailCallModeField::encode(tail_call_mode)) {
- set_representation(Representation::Tagged());
- SetOperandAt(0, function);
- SetOperandAt(1, receiver);
- SetOperandAt(2, length);
- SetOperandAt(3, elements);
- SetAllSideEffects();
- }
-
- class TailCallModeField : public BitField<TailCallMode, 0, 1> {};
- uint32_t bit_field_;
-};
-
-
-class HArgumentsElements final : public HTemplateInstruction<0> {
- public:
- DECLARE_INSTRUCTION_FACTORY_P1(HArgumentsElements, bool);
- DECLARE_INSTRUCTION_FACTORY_P2(HArgumentsElements, bool, bool);
-
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements)
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::None();
- }
-
- bool from_inlined() const { return from_inlined_; }
- bool arguments_adaptor() const { return arguments_adaptor_; }
-
- protected:
- bool DataEquals(HValue* other) override { return true; }
-
- private:
- explicit HArgumentsElements(bool from_inlined, bool arguments_adaptor = true)
- : from_inlined_(from_inlined), arguments_adaptor_(arguments_adaptor) {
- // The value produced by this instruction is a pointer into the stack
- // that looks as if it was a smi because of alignment.
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- bool IsDeletable() const override { return true; }
-
- bool from_inlined_;
- bool arguments_adaptor_;
-};
-
-
-class HArgumentsLength final : public HUnaryOperation {
- public:
- DECLARE_INSTRUCTION_FACTORY_P1(HArgumentsLength, HValue*);
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength)
-
- protected:
- bool DataEquals(HValue* other) override { return true; }
-
- private:
- explicit HArgumentsLength(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Integer32());
- SetFlag(kUseGVN);
- }
-
- bool IsDeletable() const override { return true; }
-};
-
-
-class HAccessArgumentsAt final : public HTemplateInstruction<3> {
- public:
- DECLARE_INSTRUCTION_FACTORY_P3(HAccessArgumentsAt, HValue*, HValue*, HValue*);
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- Representation RequiredInputRepresentation(int index) override {
- // The arguments elements is considered tagged.
- return index == 0
- ? Representation::Tagged()
- : Representation::Integer32();
- }
-
- HValue* arguments() const { return OperandAt(0); }
- HValue* length() const { return OperandAt(1); }
- HValue* index() const { return OperandAt(2); }
-
- DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt)
-
- private:
- HAccessArgumentsAt(HValue* arguments, HValue* length, HValue* index) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetOperandAt(0, arguments);
- SetOperandAt(1, length);
- SetOperandAt(2, index);
- }
-
- bool DataEquals(HValue* other) override { return true; }
-};
-
-
-class HBoundsCheck final : public HTemplateInstruction<2> {
- public:
- DECLARE_INSTRUCTION_FACTORY_P2(HBoundsCheck, HValue*, HValue*);
-
- bool skip_check() const { return skip_check_; }
- void set_skip_check() { skip_check_ = true; }
-
- HValue* base() const { return base_; }
- int offset() const { return offset_; }
- int scale() const { return scale_; }
-
- Representation RequiredInputRepresentation(int index) override {
- return representation();
- }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
- void InferRepresentation(HInferRepresentationPhase* h_infer) override;
-
- HValue* index() const { return OperandAt(0); }
- HValue* length() const { return OperandAt(1); }
- bool allow_equality() const { return allow_equality_; }
- void set_allow_equality(bool v) { allow_equality_ = v; }
-
- int RedefinedOperandIndex() override { return 0; }
- bool IsPurelyInformativeDefinition() override { return skip_check(); }
-
- DECLARE_CONCRETE_INSTRUCTION(BoundsCheck)
-
- protected:
- Range* InferRange(Zone* zone) override;
-
- bool DataEquals(HValue* other) override { return true; }
- bool skip_check_;
- HValue* base_;
- int offset_;
- int scale_;
- bool allow_equality_;
-
- private:
- // Normally HBoundsCheck should be created using the
- // HGraphBuilder::AddBoundsCheck() helper.
- // However when building stubs, where we know that the arguments are Int32,
- // it makes sense to invoke this constructor directly.
- HBoundsCheck(HValue* index, HValue* length)
- : skip_check_(false),
- base_(NULL), offset_(0), scale_(0),
- allow_equality_(false) {
- SetOperandAt(0, index);
- SetOperandAt(1, length);
- SetFlag(kFlexibleRepresentation);
- SetFlag(kUseGVN);
- }
-
- bool IsDeletable() const override { return skip_check() && !FLAG_debug_code; }
-};
-
-
-class HBitwiseBinaryOperation : public HBinaryOperation {
- public:
- HBitwiseBinaryOperation(HValue* context, HValue* left, HValue* right,
- HType type = HType::TaggedNumber())
- : HBinaryOperation(context, left, right, type) {
- SetFlag(kFlexibleRepresentation);
- SetFlag(kTruncatingToInt32);
- SetFlag(kTruncatingToNumber);
- SetAllSideEffects();
- }
-
- void RepresentationChanged(Representation to) override {
- if (to.IsTagged() &&
- (left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved())) {
- SetAllSideEffects();
- ClearFlag(kUseGVN);
- } else {
- ClearAllSideEffects();
- SetFlag(kUseGVN);
- }
- if (to.IsTagged()) SetChangesFlag(kNewSpacePromotion);
- }
-
- void UpdateRepresentation(Representation new_rep,
- HInferRepresentationPhase* h_infer,
- const char* reason) override {
- // We only generate either int32 or generic tagged bitwise operations.
- if (new_rep.IsDouble()) new_rep = Representation::Integer32();
- HBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
- }
-
- Representation observed_input_representation(int index) override {
- Representation r = HBinaryOperation::observed_input_representation(index);
- if (r.IsDouble()) return Representation::Integer32();
- return r;
- }
-
- void initialize_output_representation(Representation observed) override {
- if (observed.IsDouble()) observed = Representation::Integer32();
- HBinaryOperation::initialize_output_representation(observed);
- }
-
- DECLARE_ABSTRACT_INSTRUCTION(BitwiseBinaryOperation)
-
- private:
- bool IsDeletable() const override { return true; }
-};
-
-
-class HMathFloorOfDiv final : public HBinaryOperation {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HMathFloorOfDiv,
- HValue*,
- HValue*);
-
- DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv)
-
- protected:
- bool DataEquals(HValue* other) override { return true; }
-
- private:
- HMathFloorOfDiv(HValue* context, HValue* left, HValue* right)
- : HBinaryOperation(context, left, right) {
- set_representation(Representation::Integer32());
- SetFlag(kUseGVN);
- SetFlag(kCanOverflow);
- SetFlag(kCanBeDivByZero);
- SetFlag(kLeftCanBeMinInt);
- SetFlag(kLeftCanBeNegative);
- SetFlag(kLeftCanBePositive);
- SetFlag(kTruncatingToNumber);
- }
-
- Range* InferRange(Zone* zone) override;
-
- bool IsDeletable() const override { return true; }
-};
-
-
-class HArithmeticBinaryOperation : public HBinaryOperation {
- public:
- HArithmeticBinaryOperation(HValue* context, HValue* left, HValue* right,
- HType type = HType::TaggedNumber())
- : HBinaryOperation(context, left, right, type) {
- SetAllSideEffects();
- SetFlag(kFlexibleRepresentation);
- SetFlag(kTruncatingToNumber);
- }
-
- void RepresentationChanged(Representation to) override {
- if (to.IsTagged() &&
- (left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved())) {
- SetAllSideEffects();
- ClearFlag(kUseGVN);
- } else {
- ClearAllSideEffects();
- SetFlag(kUseGVN);
- }
- if (to.IsTagged()) SetChangesFlag(kNewSpacePromotion);
- }
-
- DECLARE_ABSTRACT_INSTRUCTION(ArithmeticBinaryOperation)
-
- private:
- bool IsDeletable() const override { return true; }
-};
-
-
-class HCompareGeneric final : public HBinaryOperation {
- public:
- static HCompareGeneric* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right, Token::Value token) {
- return new (zone) HCompareGeneric(context, left, right, token);
- }
-
- Representation RequiredInputRepresentation(int index) override {
- return index == 0
- ? Representation::Tagged()
- : representation();
- }
-
- Token::Value token() const { return token_; }
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- DECLARE_CONCRETE_INSTRUCTION(CompareGeneric)
-
- private:
- HCompareGeneric(HValue* context, HValue* left, HValue* right,
- Token::Value token)
- : HBinaryOperation(context, left, right, HType::Boolean()),
- token_(token) {
- DCHECK(Token::IsCompareOp(token));
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
-
- Token::Value token_;
-};
-
-
-class HCompareNumericAndBranch : public HTemplateControlInstruction<2, 2> {
- public:
- static HCompareNumericAndBranch* New(Isolate* isolate, Zone* zone,
- HValue* context, HValue* left,
- HValue* right, Token::Value token,
- HBasicBlock* true_target = NULL,
- HBasicBlock* false_target = NULL) {
- return new (zone)
- HCompareNumericAndBranch(left, right, token, true_target, false_target);
- }
-
- HValue* left() const { return OperandAt(0); }
- HValue* right() const { return OperandAt(1); }
- Token::Value token() const { return token_; }
-
- void set_observed_input_representation(Representation left,
- Representation right) {
- observed_input_representation_[0] = left;
- observed_input_representation_[1] = right;
- }
-
- void InferRepresentation(HInferRepresentationPhase* h_infer) override;
-
- Representation RequiredInputRepresentation(int index) override {
- return representation();
- }
- Representation observed_input_representation(int index) override {
- return observed_input_representation_[index];
- }
-
- bool KnownSuccessorBlock(HBasicBlock** block) override;
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch)
-
- private:
- HCompareNumericAndBranch(HValue* left, HValue* right, Token::Value token,
- HBasicBlock* true_target, HBasicBlock* false_target)
- : token_(token) {
- SetFlag(kFlexibleRepresentation);
- DCHECK(Token::IsCompareOp(token));
- SetOperandAt(0, left);
- SetOperandAt(1, right);
- SetSuccessorAt(0, true_target);
- SetSuccessorAt(1, false_target);
- }
-
- Representation observed_input_representation_[2];
- Token::Value token_;
-};
-
-
-class HCompareHoleAndBranch final : public HUnaryControlInstruction {
- public:
- DECLARE_INSTRUCTION_FACTORY_P1(HCompareHoleAndBranch, HValue*);
- DECLARE_INSTRUCTION_FACTORY_P3(HCompareHoleAndBranch, HValue*,
- HBasicBlock*, HBasicBlock*);
-
- void InferRepresentation(HInferRepresentationPhase* h_infer) override;
-
- Representation RequiredInputRepresentation(int index) override {
- return representation();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CompareHoleAndBranch)
-
- private:
- HCompareHoleAndBranch(HValue* value,
- HBasicBlock* true_target = NULL,
- HBasicBlock* false_target = NULL)
- : HUnaryControlInstruction(value, true_target, false_target) {
- SetFlag(kFlexibleRepresentation);
- }
-};
-
-
-class HCompareObjectEqAndBranch : public HTemplateControlInstruction<2, 2> {
- public:
- DECLARE_INSTRUCTION_FACTORY_P2(HCompareObjectEqAndBranch, HValue*, HValue*);
- DECLARE_INSTRUCTION_FACTORY_P4(HCompareObjectEqAndBranch, HValue*, HValue*,
- HBasicBlock*, HBasicBlock*);
-
- bool KnownSuccessorBlock(HBasicBlock** block) override;
-
- static const int kNoKnownSuccessorIndex = -1;
- int known_successor_index() const { return known_successor_index_; }
- void set_known_successor_index(int known_successor_index) {
- known_successor_index_ = known_successor_index;
- }
-
- HValue* left() const { return OperandAt(0); }
- HValue* right() const { return OperandAt(1); }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- Representation observed_input_representation(int index) override {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CompareObjectEqAndBranch)
-
- private:
- HCompareObjectEqAndBranch(HValue* left,
- HValue* right,
- HBasicBlock* true_target = NULL,
- HBasicBlock* false_target = NULL)
- : known_successor_index_(kNoKnownSuccessorIndex) {
- SetOperandAt(0, left);
- SetOperandAt(1, right);
- SetSuccessorAt(0, true_target);
- SetSuccessorAt(1, false_target);
- }
-
- int known_successor_index_;
-};
-
-
-class HIsStringAndBranch final : public HUnaryControlInstruction {
- public:
- DECLARE_INSTRUCTION_FACTORY_P1(HIsStringAndBranch, HValue*);
- DECLARE_INSTRUCTION_FACTORY_P3(HIsStringAndBranch, HValue*,
- HBasicBlock*, HBasicBlock*);
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- bool KnownSuccessorBlock(HBasicBlock** block) override;
-
- static const int kNoKnownSuccessorIndex = -1;
- int known_successor_index() const { return known_successor_index_; }
- void set_known_successor_index(int known_successor_index) {
- known_successor_index_ = known_successor_index;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch)
-
- protected:
- int RedefinedOperandIndex() override { return 0; }
-
- private:
- HIsStringAndBranch(HValue* value, HBasicBlock* true_target = NULL,
- HBasicBlock* false_target = NULL)
- : HUnaryControlInstruction(value, true_target, false_target),
- known_successor_index_(kNoKnownSuccessorIndex) {
- set_representation(Representation::Tagged());
- }
-
- int known_successor_index_;
-};
-
-
-class HIsSmiAndBranch final : public HUnaryControlInstruction {
- public:
- DECLARE_INSTRUCTION_FACTORY_P1(HIsSmiAndBranch, HValue*);
- DECLARE_INSTRUCTION_FACTORY_P3(HIsSmiAndBranch, HValue*,
- HBasicBlock*, HBasicBlock*);
-
- DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch)
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- protected:
- bool DataEquals(HValue* other) override { return true; }
- int RedefinedOperandIndex() override { return 0; }
-
- private:
- HIsSmiAndBranch(HValue* value,
- HBasicBlock* true_target = NULL,
- HBasicBlock* false_target = NULL)
- : HUnaryControlInstruction(value, true_target, false_target) {
- set_representation(Representation::Tagged());
- }
-};
-
-
-class HIsUndetectableAndBranch final : public HUnaryControlInstruction {
- public:
- DECLARE_INSTRUCTION_FACTORY_P1(HIsUndetectableAndBranch, HValue*);
- DECLARE_INSTRUCTION_FACTORY_P3(HIsUndetectableAndBranch, HValue*,
- HBasicBlock*, HBasicBlock*);
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- bool KnownSuccessorBlock(HBasicBlock** block) override;
-
- DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch)
-
- private:
- HIsUndetectableAndBranch(HValue* value,
- HBasicBlock* true_target = NULL,
- HBasicBlock* false_target = NULL)
- : HUnaryControlInstruction(value, true_target, false_target) {}
-};
-
-
-class HStringCompareAndBranch final : public HTemplateControlInstruction<2, 3> {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HStringCompareAndBranch,
- HValue*,
- HValue*,
- Token::Value);
-
- HValue* context() const { return OperandAt(0); }
- HValue* left() const { return OperandAt(1); }
- HValue* right() const { return OperandAt(2); }
- Token::Value token() const { return token_; }
-
- std::ostream& PrintDataTo(std::ostream& os) const final; // NOLINT
-
- Representation RequiredInputRepresentation(int index) final {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch)
-
- private:
- HStringCompareAndBranch(HValue* context, HValue* left, HValue* right,
- Token::Value token)
- : token_(token) {
- DCHECK(Token::IsCompareOp(token));
- SetOperandAt(0, context);
- SetOperandAt(1, left);
- SetOperandAt(2, right);
- set_representation(Representation::Tagged());
- SetChangesFlag(kNewSpacePromotion);
- SetDependsOnFlag(kStringChars);
- SetDependsOnFlag(kStringLengths);
- }
-
- Token::Value const token_;
-};
-
-
-class HHasInstanceTypeAndBranch final : public HUnaryControlInstruction {
- public:
- DECLARE_INSTRUCTION_FACTORY_P2(
- HHasInstanceTypeAndBranch, HValue*, InstanceType);
- DECLARE_INSTRUCTION_FACTORY_P3(
- HHasInstanceTypeAndBranch, HValue*, InstanceType, InstanceType);
-
- InstanceType from() { return from_; }
- InstanceType to() { return to_; }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- bool KnownSuccessorBlock(HBasicBlock** block) override;
-
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch)
-
- private:
- HHasInstanceTypeAndBranch(HValue* value, InstanceType type)
- : HUnaryControlInstruction(value, NULL, NULL), from_(type), to_(type) { }
- HHasInstanceTypeAndBranch(HValue* value, InstanceType from, InstanceType to)
- : HUnaryControlInstruction(value, NULL, NULL), from_(from), to_(to) {
- DCHECK(to == LAST_TYPE); // Others not implemented yet in backend.
- }
-
- InstanceType from_;
- InstanceType to_; // Inclusive range, not all combinations work.
-};
-
-class HClassOfTestAndBranch final : public HUnaryControlInstruction {
- public:
- DECLARE_INSTRUCTION_FACTORY_P2(HClassOfTestAndBranch, HValue*,
- Handle<String>);
-
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch)
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- Handle<String> class_name() const { return class_name_; }
-
- private:
- HClassOfTestAndBranch(HValue* value, Handle<String> class_name)
- : HUnaryControlInstruction(value, NULL, NULL), class_name_(class_name) {}
-
- Handle<String> class_name_;
-};
-
-class HTypeofIsAndBranch final : public HUnaryControlInstruction {
- public:
- DECLARE_INSTRUCTION_FACTORY_P2(HTypeofIsAndBranch, HValue*, Handle<String>);
-
- Handle<String> type_literal() const { return type_literal_.handle(); }
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch)
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::None();
- }
-
- bool KnownSuccessorBlock(HBasicBlock** block) override;
-
- void FinalizeUniqueness() override {
- type_literal_ = Unique<String>(type_literal_.handle());
- }
-
- private:
- HTypeofIsAndBranch(HValue* value, Handle<String> type_literal)
- : HUnaryControlInstruction(value, NULL, NULL),
- type_literal_(Unique<String>::CreateUninitialized(type_literal)) { }
-
- Unique<String> type_literal_;
-};
-
-
-class HHasInPrototypeChainAndBranch final
- : public HTemplateControlInstruction<2, 2> {
- public:
- DECLARE_INSTRUCTION_FACTORY_P2(HHasInPrototypeChainAndBranch, HValue*,
- HValue*);
-
- HValue* object() const { return OperandAt(0); }
- HValue* prototype() const { return OperandAt(1); }
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- bool ObjectNeedsSmiCheck() const {
- return !object()->type().IsHeapObject() &&
- !object()->representation().IsHeapObject();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch)
-
- private:
- HHasInPrototypeChainAndBranch(HValue* object, HValue* prototype) {
- SetOperandAt(0, object);
- SetOperandAt(1, prototype);
- SetDependsOnFlag(kCalls);
- }
-};
-
-
-class HPower final : public HTemplateInstruction<2> {
- public:
- static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right);
-
- HValue* left() { return OperandAt(0); }
- HValue* right() const { return OperandAt(1); }
-
- Representation RequiredInputRepresentation(int index) override {
- return index == 0
- ? Representation::Double()
- : Representation::None();
- }
- Representation observed_input_representation(int index) override {
- return RequiredInputRepresentation(index);
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Power)
-
- protected:
- bool DataEquals(HValue* other) override { return true; }
-
- private:
- HPower(HValue* left, HValue* right) {
- SetOperandAt(0, left);
- SetOperandAt(1, right);
- set_representation(Representation::Double());
- SetFlag(kUseGVN);
- SetChangesFlag(kNewSpacePromotion);
- }
-
- bool IsDeletable() const override {
- return !right()->representation().IsTagged();
- }
-};
-
-
-enum ExternalAddType {
- AddOfExternalAndTagged,
- AddOfExternalAndInt32,
- NoExternalAdd
-};
-
-
-class HAdd final : public HArithmeticBinaryOperation {
- public:
- static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right);
- static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right,
- ExternalAddType external_add_type);
-
- // Add is only commutative if two integer values are added and not if two
- // tagged values are added (because it might be a String concatenation).
- // We also do not commute (pointer + offset).
- bool IsCommutative() const override {
- return !representation().IsTagged() && !representation().IsExternal();
- }
-
- HValue* Canonicalize() override;
-
- void RepresentationChanged(Representation to) override {
- if (to.IsTagged() &&
- (left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved() ||
- left()->ToStringCanBeObserved() || right()->ToStringCanBeObserved())) {
- SetAllSideEffects();
- ClearFlag(kUseGVN);
- } else {
- ClearAllSideEffects();
- SetFlag(kUseGVN);
- }
- if (to.IsTagged()) {
- SetChangesFlag(kNewSpacePromotion);
- ClearFlag(kTruncatingToNumber);
- }
- if (!right()->type().IsTaggedNumber() &&
- !right()->representation().IsDouble() &&
- !right()->representation().IsSmiOrInteger32()) {
- ClearFlag(kTruncatingToNumber);
- }
- }
-
- Representation RepresentationFromInputs() override;
-
- Representation RequiredInputRepresentation(int index) override;
-
- bool IsConsistentExternalRepresentation() {
- return left()->representation().IsExternal() &&
- ((external_add_type_ == AddOfExternalAndInt32 &&
- right()->representation().IsInteger32()) ||
- (external_add_type_ == AddOfExternalAndTagged &&
- right()->representation().IsTagged()));
- }
-
- ExternalAddType external_add_type() const { return external_add_type_; }
-
- DECLARE_CONCRETE_INSTRUCTION(Add)
-
- protected:
- bool DataEquals(HValue* other) override { return true; }
-
- Range* InferRange(Zone* zone) override;
-
- private:
- HAdd(HValue* context, HValue* left, HValue* right,
- ExternalAddType external_add_type = NoExternalAdd)
- : HArithmeticBinaryOperation(context, left, right, HType::Tagged()),
- external_add_type_(external_add_type) {
- SetFlag(kCanOverflow);
- switch (external_add_type_) {
- case AddOfExternalAndTagged:
- DCHECK(left->representation().IsExternal());
- DCHECK(right->representation().IsTagged());
- SetDependsOnFlag(kNewSpacePromotion);
- ClearFlag(HValue::kCanOverflow);
- SetFlag(kHasNoObservableSideEffects);
- break;
-
- case NoExternalAdd:
- // This is a bit of a hack: The call to this constructor is generated
- // by a macro that also supports sub and mul, so it doesn't pass in
- // a value for external_add_type but uses the default.
- if (left->representation().IsExternal()) {
- external_add_type_ = AddOfExternalAndInt32;
- }
- break;
-
- case AddOfExternalAndInt32:
- // See comment above.
- UNREACHABLE();
- break;
- }
- }
-
- ExternalAddType external_add_type_;
-};
-
-
-class HSub final : public HArithmeticBinaryOperation {
- public:
- static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right);
-
- HValue* Canonicalize() override;
-
- DECLARE_CONCRETE_INSTRUCTION(Sub)
-
- protected:
- bool DataEquals(HValue* other) override { return true; }
-
- Range* InferRange(Zone* zone) override;
-
- private:
- HSub(HValue* context, HValue* left, HValue* right)
- : HArithmeticBinaryOperation(context, left, right) {
- SetFlag(kCanOverflow);
- }
-};
-
-
-class HMul final : public HArithmeticBinaryOperation {
- public:
- static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right);
-
- static HInstruction* NewImul(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right) {
- HInstruction* instr = HMul::New(isolate, zone, context, left, right);
- if (!instr->IsMul()) return instr;
- HMul* mul = HMul::cast(instr);
- // TODO(mstarzinger): Prevent bailout on minus zero for imul.
- mul->AssumeRepresentation(Representation::Integer32());
- mul->ClearFlag(HValue::kCanOverflow);
- return mul;
- }
-
- HValue* Canonicalize() override;
-
- // Only commutative if it is certain that not two objects are multiplicated.
- bool IsCommutative() const override { return !representation().IsTagged(); }
-
- void UpdateRepresentation(Representation new_rep,
- HInferRepresentationPhase* h_infer,
- const char* reason) override {
- HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
- }
-
- bool MulMinusOne();
-
- DECLARE_CONCRETE_INSTRUCTION(Mul)
-
- protected:
- bool DataEquals(HValue* other) override { return true; }
-
- Range* InferRange(Zone* zone) override;
-
- private:
- HMul(HValue* context, HValue* left, HValue* right)
- : HArithmeticBinaryOperation(context, left, right) {
- SetFlag(kCanOverflow);
- }
-};
-
-
-class HMod final : public HArithmeticBinaryOperation {
- public:
- static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right);
-
- HValue* Canonicalize() override;
-
- void UpdateRepresentation(Representation new_rep,
- HInferRepresentationPhase* h_infer,
- const char* reason) override {
- if (new_rep.IsSmi()) new_rep = Representation::Integer32();
- HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Mod)
-
- protected:
- bool DataEquals(HValue* other) override { return true; }
-
- Range* InferRange(Zone* zone) override;
-
- private:
- HMod(HValue* context, HValue* left, HValue* right)
- : HArithmeticBinaryOperation(context, left, right) {
- SetFlag(kCanBeDivByZero);
- SetFlag(kCanOverflow);
- SetFlag(kLeftCanBeNegative);
- }
-};
-
-
-class HDiv final : public HArithmeticBinaryOperation {
- public:
- static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right);
-
- HValue* Canonicalize() override;
-
- void UpdateRepresentation(Representation new_rep,
- HInferRepresentationPhase* h_infer,
- const char* reason) override {
- if (new_rep.IsSmi()) new_rep = Representation::Integer32();
- HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Div)
-
- protected:
- bool DataEquals(HValue* other) override { return true; }
-
- Range* InferRange(Zone* zone) override;
-
- private:
- HDiv(HValue* context, HValue* left, HValue* right)
- : HArithmeticBinaryOperation(context, left, right) {
- SetFlag(kCanBeDivByZero);
- SetFlag(kCanOverflow);
- }
-};
-
-
-class HMathMinMax final : public HArithmeticBinaryOperation {
- public:
- enum Operation { kMathMin, kMathMax };
-
- static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right, Operation op);
-
- Representation observed_input_representation(int index) override {
- return RequiredInputRepresentation(index);
- }
-
- void InferRepresentation(HInferRepresentationPhase* h_infer) override;
-
- Representation RepresentationFromInputs() override {
- Representation left_rep = left()->representation();
- Representation right_rep = right()->representation();
- Representation result = Representation::Smi();
- result = result.generalize(left_rep);
- result = result.generalize(right_rep);
- if (result.IsTagged()) return Representation::Double();
- return result;
- }
-
- bool IsCommutative() const override { return true; }
-
- Operation operation() { return operation_; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathMinMax)
-
- protected:
- bool DataEquals(HValue* other) override {
- return other->IsMathMinMax() &&
- HMathMinMax::cast(other)->operation_ == operation_;
- }
-
- Range* InferRange(Zone* zone) override;
-
- private:
- HMathMinMax(HValue* context, HValue* left, HValue* right, Operation op)
- : HArithmeticBinaryOperation(context, left, right), operation_(op) {}
-
- Operation operation_;
-};
-
-
-class HBitwise final : public HBitwiseBinaryOperation {
- public:
- static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
- Token::Value op, HValue* left, HValue* right);
-
- Token::Value op() const { return op_; }
-
- bool IsCommutative() const override { return true; }
-
- HValue* Canonicalize() override;
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- DECLARE_CONCRETE_INSTRUCTION(Bitwise)
-
- protected:
- bool DataEquals(HValue* other) override {
- return op() == HBitwise::cast(other)->op();
- }
-
- Range* InferRange(Zone* zone) override;
-
- private:
- HBitwise(HValue* context, Token::Value op, HValue* left, HValue* right)
- : HBitwiseBinaryOperation(context, left, right), op_(op) {
- DCHECK(op == Token::BIT_AND || op == Token::BIT_OR || op == Token::BIT_XOR);
- // BIT_AND with a smi-range positive value will always unset the
- // entire sign-extension of the smi-sign.
- if (op == Token::BIT_AND &&
- ((left->IsConstant() &&
- left->representation().IsSmi() &&
- HConstant::cast(left)->Integer32Value() >= 0) ||
- (right->IsConstant() &&
- right->representation().IsSmi() &&
- HConstant::cast(right)->Integer32Value() >= 0))) {
- SetFlag(kTruncatingToSmi);
- SetFlag(kTruncatingToInt32);
- // BIT_OR with a smi-range negative value will always set the entire
- // sign-extension of the smi-sign.
- } else if (op == Token::BIT_OR &&
- ((left->IsConstant() &&
- left->representation().IsSmi() &&
- HConstant::cast(left)->Integer32Value() < 0) ||
- (right->IsConstant() &&
- right->representation().IsSmi() &&
- HConstant::cast(right)->Integer32Value() < 0))) {
- SetFlag(kTruncatingToSmi);
- SetFlag(kTruncatingToInt32);
- }
- }
-
- Token::Value op_;
-};
-
-
-class HShl final : public HBitwiseBinaryOperation {
- public:
- static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right);
-
- Range* InferRange(Zone* zone) override;
-
- void UpdateRepresentation(Representation new_rep,
- HInferRepresentationPhase* h_infer,
- const char* reason) override {
- if (new_rep.IsSmi() &&
- !(right()->IsInteger32Constant() &&
- right()->GetInteger32Constant() >= 0)) {
- new_rep = Representation::Integer32();
- }
- HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Shl)
-
- protected:
- bool DataEquals(HValue* other) override { return true; }
-
- private:
- HShl(HValue* context, HValue* left, HValue* right)
- : HBitwiseBinaryOperation(context, left, right) {}
-};
-
-
-class HShr final : public HBitwiseBinaryOperation {
- public:
- static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right);
-
- Range* InferRange(Zone* zone) override;
-
- void UpdateRepresentation(Representation new_rep,
- HInferRepresentationPhase* h_infer,
- const char* reason) override {
- if (new_rep.IsSmi()) new_rep = Representation::Integer32();
- HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Shr)
-
- protected:
- bool DataEquals(HValue* other) override { return true; }
-
- private:
- HShr(HValue* context, HValue* left, HValue* right)
- : HBitwiseBinaryOperation(context, left, right) {}
-};
-
-
-class HSar final : public HBitwiseBinaryOperation {
- public:
- static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right);
-
- Range* InferRange(Zone* zone) override;
-
- void UpdateRepresentation(Representation new_rep,
- HInferRepresentationPhase* h_infer,
- const char* reason) override {
- if (new_rep.IsSmi()) new_rep = Representation::Integer32();
- HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Sar)
-
- protected:
- bool DataEquals(HValue* other) override { return true; }
-
- private:
- HSar(HValue* context, HValue* left, HValue* right)
- : HBitwiseBinaryOperation(context, left, right) {}
-};
-
-
-class HRor final : public HBitwiseBinaryOperation {
- public:
- static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right) {
- return new (zone) HRor(context, left, right);
- }
-
- void UpdateRepresentation(Representation new_rep,
- HInferRepresentationPhase* h_infer,
- const char* reason) override {
- if (new_rep.IsSmi()) new_rep = Representation::Integer32();
- HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Ror)
-
- protected:
- bool DataEquals(HValue* other) override { return true; }
-
- private:
- HRor(HValue* context, HValue* left, HValue* right)
- : HBitwiseBinaryOperation(context, left, right) {
- ChangeRepresentation(Representation::Integer32());
- }
-};
-
-
-class HOsrEntry final : public HTemplateInstruction<0> {
- public:
- DECLARE_INSTRUCTION_FACTORY_P1(HOsrEntry, BailoutId);
-
- BailoutId ast_id() const { return ast_id_; }
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(OsrEntry)
-
- private:
- explicit HOsrEntry(BailoutId ast_id) : ast_id_(ast_id) {
- SetChangesFlag(kOsrEntries);
- SetChangesFlag(kNewSpacePromotion);
- }
-
- BailoutId ast_id_;
-};
-
-
-class HParameter final : public HTemplateInstruction<0> {
- public:
- enum ParameterKind {
- STACK_PARAMETER,
- REGISTER_PARAMETER
- };
-
- DECLARE_INSTRUCTION_FACTORY_P1(HParameter, unsigned);
- DECLARE_INSTRUCTION_FACTORY_P2(HParameter, unsigned, ParameterKind);
- DECLARE_INSTRUCTION_FACTORY_P3(HParameter, unsigned, ParameterKind,
- Representation);
-
- unsigned index() const { return index_; }
- ParameterKind kind() const { return kind_; }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::None();
- }
-
- Representation KnownOptimalRepresentation() override {
- // If a parameter is an input to a phi, that phi should not
- // choose any more optimistic representation than Tagged.
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Parameter)
-
- private:
- explicit HParameter(unsigned index,
- ParameterKind kind = STACK_PARAMETER)
- : index_(index),
- kind_(kind) {
- set_representation(Representation::Tagged());
- }
-
- explicit HParameter(unsigned index,
- ParameterKind kind,
- Representation r)
- : index_(index),
- kind_(kind) {
- set_representation(r);
- }
-
- unsigned index_;
- ParameterKind kind_;
-};
-
-
-class HUnknownOSRValue final : public HTemplateInstruction<0> {
- public:
- DECLARE_INSTRUCTION_FACTORY_P2(HUnknownOSRValue, HEnvironment*, int);
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::None();
- }
-
- void set_incoming_value(HPhi* value) { incoming_value_ = value; }
- HPhi* incoming_value() { return incoming_value_; }
- HEnvironment *environment() { return environment_; }
- int index() { return index_; }
-
- Representation KnownOptimalRepresentation() override {
- if (incoming_value_ == NULL) return Representation::None();
- return incoming_value_->KnownOptimalRepresentation();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue)
-
- private:
- HUnknownOSRValue(HEnvironment* environment, int index)
- : environment_(environment),
- index_(index),
- incoming_value_(NULL) {
- set_representation(Representation::Tagged());
- }
-
- HEnvironment* environment_;
- int index_;
- HPhi* incoming_value_;
-};
-
-class HAllocate final : public HTemplateInstruction<3> {
- public:
- static bool CompatibleInstanceTypes(InstanceType type1,
- InstanceType type2) {
- return ComputeFlags(TENURED, type1) == ComputeFlags(TENURED, type2) &&
- ComputeFlags(NOT_TENURED, type1) == ComputeFlags(NOT_TENURED, type2);
- }
-
- static HAllocate* New(
- Isolate* isolate, Zone* zone, HValue* context, HValue* size, HType type,
- PretenureFlag pretenure_flag, InstanceType instance_type,
- HValue* dominator,
- Handle<AllocationSite> allocation_site = Handle<AllocationSite>::null()) {
- return new (zone) HAllocate(context, size, type, pretenure_flag,
- instance_type, dominator, allocation_site);
- }
-
- // Maximum instance size for which allocations will be inlined.
- static const int kMaxInlineSize = 64 * kPointerSize;
-
- HValue* context() const { return OperandAt(0); }
- HValue* size() const { return OperandAt(1); }
- HValue* allocation_folding_dominator() const { return OperandAt(2); }
-
- Representation RequiredInputRepresentation(int index) override {
- if (index == 0) {
- return Representation::Tagged();
- } else {
- return Representation::Integer32();
- }
- }
-
- Handle<Map> GetMonomorphicJSObjectMap() override {
- return known_initial_map_;
- }
-
- void set_known_initial_map(Handle<Map> known_initial_map) {
- known_initial_map_ = known_initial_map;
- }
-
- bool IsNewSpaceAllocation() const {
- return (flags_ & ALLOCATE_IN_NEW_SPACE) != 0;
- }
-
- bool IsOldSpaceAllocation() const {
- return (flags_ & ALLOCATE_IN_OLD_SPACE) != 0;
- }
-
- bool MustAllocateDoubleAligned() const {
- return (flags_ & ALLOCATE_DOUBLE_ALIGNED) != 0;
- }
-
- bool MustPrefillWithFiller() const {
- return (flags_ & PREFILL_WITH_FILLER) != 0;
- }
-
- void MakePrefillWithFiller() {
- flags_ = static_cast<HAllocate::Flags>(flags_ | PREFILL_WITH_FILLER);
- }
-
- void MakeDoubleAligned() {
- flags_ = static_cast<HAllocate::Flags>(flags_ | ALLOCATE_DOUBLE_ALIGNED);
- }
-
- void MakeAllocationFoldingDominator() {
- flags_ =
- static_cast<HAllocate::Flags>(flags_ | ALLOCATION_FOLDING_DOMINATOR);
- }
-
- bool IsAllocationFoldingDominator() const {
- return (flags_ & ALLOCATION_FOLDING_DOMINATOR) != 0;
- }
-
- void MakeFoldedAllocation(HAllocate* dominator) {
- flags_ = static_cast<HAllocate::Flags>(flags_ | ALLOCATION_FOLDED);
- ClearFlag(kTrackSideEffectDominators);
- ClearChangesFlag(kNewSpacePromotion);
- SetOperandAt(2, dominator);
- }
-
- bool IsAllocationFolded() const { return (flags_ & ALLOCATION_FOLDED) != 0; }
-
- bool HandleSideEffectDominator(GVNFlag side_effect,
- HValue* dominator) override;
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- DECLARE_CONCRETE_INSTRUCTION(Allocate)
-
- private:
- enum Flags {
- ALLOCATE_IN_NEW_SPACE = 1 << 0,
- ALLOCATE_IN_OLD_SPACE = 1 << 2,
- ALLOCATE_DOUBLE_ALIGNED = 1 << 3,
- PREFILL_WITH_FILLER = 1 << 4,
- ALLOCATION_FOLDING_DOMINATOR = 1 << 5,
- ALLOCATION_FOLDED = 1 << 6
- };
-
- HAllocate(
- HValue* context, HValue* size, HType type, PretenureFlag pretenure_flag,
- InstanceType instance_type, HValue* dominator,
- Handle<AllocationSite> allocation_site = Handle<AllocationSite>::null())
- : HTemplateInstruction<3>(type),
- flags_(ComputeFlags(pretenure_flag, instance_type)) {
- SetOperandAt(0, context);
- UpdateSize(size);
- SetOperandAt(2, dominator);
- set_representation(Representation::Tagged());
- SetFlag(kTrackSideEffectDominators);
- SetChangesFlag(kNewSpacePromotion);
- SetDependsOnFlag(kNewSpacePromotion);
-
- if (FLAG_trace_pretenuring) {
- PrintF("HAllocate with AllocationSite %p %s\n",
- allocation_site.is_null()
- ? static_cast<void*>(NULL)
- : static_cast<void*>(*allocation_site),
- pretenure_flag == TENURED ? "tenured" : "not tenured");
- }
- }
-
- static Flags ComputeFlags(PretenureFlag pretenure_flag,
- InstanceType instance_type) {
- Flags flags = pretenure_flag == TENURED ? ALLOCATE_IN_OLD_SPACE
- : ALLOCATE_IN_NEW_SPACE;
- if (instance_type == FIXED_DOUBLE_ARRAY_TYPE) {
- flags = static_cast<Flags>(flags | ALLOCATE_DOUBLE_ALIGNED);
- }
- // We have to fill the allocated object with one word fillers if we do
- // not use allocation folding since some allocations may depend on each
- // other, i.e., have a pointer to each other. A GC in between these
- // allocations may leave such objects behind in a not completely initialized
- // state.
- if (!FLAG_use_gvn || !FLAG_use_allocation_folding) {
- flags = static_cast<Flags>(flags | PREFILL_WITH_FILLER);
- }
- return flags;
- }
-
- void UpdateSize(HValue* size) {
- SetOperandAt(1, size);
- }
-
- bool IsFoldable(HAllocate* allocate) {
- return (IsNewSpaceAllocation() && allocate->IsNewSpaceAllocation()) ||
- (IsOldSpaceAllocation() && allocate->IsOldSpaceAllocation());
- }
-
- Flags flags_;
- Handle<Map> known_initial_map_;
-};
-
-
-class HStoreCodeEntry final : public HTemplateInstruction<2> {
- public:
- static HStoreCodeEntry* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* function, HValue* code) {
- return new(zone) HStoreCodeEntry(function, code);
- }
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- HValue* function() { return OperandAt(0); }
- HValue* code_object() { return OperandAt(1); }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry)
-
- private:
- HStoreCodeEntry(HValue* function, HValue* code) {
- SetOperandAt(0, function);
- SetOperandAt(1, code);
- }
-};
-
-
-class HInnerAllocatedObject final : public HTemplateInstruction<2> {
- public:
- static HInnerAllocatedObject* New(Isolate* isolate, Zone* zone,
- HValue* context, HValue* value,
- HValue* offset, HType type) {
- return new(zone) HInnerAllocatedObject(value, offset, type);
- }
-
- HValue* base_object() const { return OperandAt(0); }
- HValue* offset() const { return OperandAt(1); }
-
- Representation RequiredInputRepresentation(int index) override {
- return index == 0 ? Representation::Tagged() : Representation::Integer32();
- }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject)
-
- private:
- HInnerAllocatedObject(HValue* value,
- HValue* offset,
- HType type) : HTemplateInstruction<2>(type) {
- DCHECK(value->IsAllocate());
- DCHECK(type.IsHeapObject());
- SetOperandAt(0, value);
- SetOperandAt(1, offset);
- set_representation(Representation::Tagged());
- }
-};
-
-
-inline bool StoringValueNeedsWriteBarrier(HValue* value) {
- return !value->type().IsSmi()
- && !value->type().IsNull()
- && !value->type().IsBoolean()
- && !value->type().IsUndefined()
- && !(value->IsConstant() && HConstant::cast(value)->ImmortalImmovable());
-}
-
-
-inline bool ReceiverObjectNeedsWriteBarrier(HValue* object,
- HValue* value,
- HValue* dominator) {
- // There may be multiple inner allocates dominated by one allocate.
- while (object->IsInnerAllocatedObject()) {
- object = HInnerAllocatedObject::cast(object)->base_object();
- }
-
- if (object->IsAllocate()) {
- HAllocate* allocate = HAllocate::cast(object);
- if (allocate->IsAllocationFolded()) {
- HValue* dominator = allocate->allocation_folding_dominator();
- // There is no guarantee that all allocations are folded together because
- // GVN performs a fixpoint.
- if (HAllocate::cast(dominator)->IsAllocationFoldingDominator()) {
- object = dominator;
- }
- }
- }
-
- if (object->IsConstant() &&
- HConstant::cast(object)->HasExternalReferenceValue()) {
- // Stores to external references require no write barriers
- return false;
- }
- // We definitely need a write barrier unless the object is the allocation
- // dominator.
- if (object == dominator && object->IsAllocate()) {
- // Stores to new space allocations require no write barriers.
- if (HAllocate::cast(object)->IsNewSpaceAllocation()) {
- return false;
- }
- }
- return true;
-}
-
-
-inline PointersToHereCheck PointersToHereCheckForObject(HValue* object,
- HValue* dominator) {
- while (object->IsInnerAllocatedObject()) {
- object = HInnerAllocatedObject::cast(object)->base_object();
- }
- if (object == dominator &&
- object->IsAllocate() &&
- HAllocate::cast(object)->IsNewSpaceAllocation()) {
- return kPointersToHereAreAlwaysInteresting;
- }
- return kPointersToHereMaybeInteresting;
-}
-
-
-class HLoadContextSlot final : public HUnaryOperation {
- public:
- enum Mode {
- // Perform a normal load of the context slot without checking its value.
- kNoCheck,
- // Load and check the value of the context slot. Deoptimize if it's the
- // hole value. This is used for checking for loading of uninitialized
- // harmony bindings where we deoptimize into full-codegen generated code
- // which will subsequently throw a reference error.
- kCheckDeoptimize
- };
-
- HLoadContextSlot(HValue* context, int slot_index, Mode mode)
- : HUnaryOperation(context), slot_index_(slot_index), mode_(mode) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetDependsOnFlag(kContextSlots);
- }
-
- int slot_index() const { return slot_index_; }
- Mode mode() const { return mode_; }
-
- bool DeoptimizesOnHole() {
- return mode_ == kCheckDeoptimize;
- }
-
- bool RequiresHoleCheck() const {
- return mode_ != kNoCheck;
- }
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot)
-
- protected:
- bool DataEquals(HValue* other) override {
- HLoadContextSlot* b = HLoadContextSlot::cast(other);
- return (slot_index() == b->slot_index());
- }
-
- private:
- bool IsDeletable() const override { return !RequiresHoleCheck(); }
-
- int slot_index_;
- Mode mode_;
-};
-
-
-class HStoreContextSlot final : public HTemplateInstruction<2> {
- public:
- enum Mode {
- // Perform a normal store to the context slot without checking its previous
- // value.
- kNoCheck,
- // Check the previous value of the context slot and deoptimize if it's the
- // hole value. This is used for checking for assignments to uninitialized
- // harmony bindings where we deoptimize into full-codegen generated code
- // which will subsequently throw a reference error.
- kCheckDeoptimize
- };
-
- DECLARE_INSTRUCTION_FACTORY_P4(HStoreContextSlot, HValue*, int,
- Mode, HValue*);
-
- HValue* context() const { return OperandAt(0); }
- HValue* value() const { return OperandAt(1); }
- int slot_index() const { return slot_index_; }
- Mode mode() const { return mode_; }
-
- bool NeedsWriteBarrier() {
- return StoringValueNeedsWriteBarrier(value());
- }
-
- bool DeoptimizesOnHole() {
- return mode_ == kCheckDeoptimize;
- }
-
- bool RequiresHoleCheck() {
- return mode_ != kNoCheck;
- }
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot)
-
- private:
- HStoreContextSlot(HValue* context, int slot_index, Mode mode, HValue* value)
- : slot_index_(slot_index), mode_(mode) {
- SetOperandAt(0, context);
- SetOperandAt(1, value);
- SetChangesFlag(kContextSlots);
- }
-
- int slot_index_;
- Mode mode_;
-};
-
-
-// Represents an access to a portion of an object, such as the map pointer,
-// array elements pointer, etc, but not accesses to array elements themselves.
-class HObjectAccess final {
- public:
- inline bool IsInobject() const {
- return portion() != kBackingStore && portion() != kExternalMemory;
- }
-
- inline bool IsExternalMemory() const {
- return portion() == kExternalMemory;
- }
-
- inline bool IsStringLength() const {
- return portion() == kStringLengths;
- }
-
- inline bool IsMap() const {
- return portion() == kMaps;
- }
-
- inline int offset() const {
- return OffsetField::decode(value_);
- }
-
- inline Representation representation() const {
- return Representation::FromKind(RepresentationField::decode(value_));
- }
-
- inline Handle<Name> name() const { return name_; }
-
- inline bool immutable() const {
- return ImmutableField::decode(value_);
- }
-
- // Returns true if access is being made to an in-object property that
- // was already added to the object.
- inline bool existing_inobject_property() const {
- return ExistingInobjectPropertyField::decode(value_);
- }
-
- inline HObjectAccess WithRepresentation(Representation representation) {
- return HObjectAccess(portion(), offset(), representation, name(),
- immutable(), existing_inobject_property());
- }
-
- static HObjectAccess ForHeapNumberValue() {
- return HObjectAccess(
- kDouble, HeapNumber::kValueOffset, Representation::Double());
- }
-
- static HObjectAccess ForHeapNumberValueLowestBits() {
- return HObjectAccess(kDouble,
- HeapNumber::kValueOffset,
- Representation::Integer32());
- }
-
- static HObjectAccess ForHeapNumberValueHighestBits() {
- return HObjectAccess(kDouble,
- HeapNumber::kValueOffset + kIntSize,
- Representation::Integer32());
- }
-
- static HObjectAccess ForOddballToNumber(
- Representation representation = Representation::Tagged()) {
- return HObjectAccess(kInobject, Oddball::kToNumberOffset, representation);
- }
-
- static HObjectAccess ForOddballTypeOf() {
- return HObjectAccess(kInobject, Oddball::kTypeOfOffset,
- Representation::HeapObject());
- }
-
- static HObjectAccess ForElementsPointer() {
- return HObjectAccess(kElementsPointer, JSObject::kElementsOffset);
- }
-
- static HObjectAccess ForNextFunctionLinkPointer() {
- return HObjectAccess(kInobject, JSFunction::kNextFunctionLinkOffset);
- }
-
- static HObjectAccess ForArrayLength(ElementsKind elements_kind) {
- return HObjectAccess(
- kArrayLengths,
- JSArray::kLengthOffset,
- IsFastElementsKind(elements_kind)
- ? Representation::Smi() : Representation::Tagged());
- }
-
- static HObjectAccess ForAllocationSiteOffset(int offset);
-
- static HObjectAccess ForAllocationSiteList() {
- return HObjectAccess(kExternalMemory, 0, Representation::Tagged(),
- Handle<Name>::null(), false, false);
- }
-
- static HObjectAccess ForFixedArrayLength() {
- return HObjectAccess(
- kArrayLengths,
- FixedArray::kLengthOffset,
- Representation::Smi());
- }
-
- static HObjectAccess ForFixedTypedArrayBaseBasePointer() {
- return HObjectAccess(kInobject, FixedTypedArrayBase::kBasePointerOffset,
- Representation::Tagged());
- }
-
- static HObjectAccess ForFixedTypedArrayBaseExternalPointer() {
- return HObjectAccess::ForObservableJSObjectOffset(
- FixedTypedArrayBase::kExternalPointerOffset,
- Representation::External());
- }
-
- static HObjectAccess ForStringHashField() {
- return HObjectAccess(kInobject,
- String::kHashFieldOffset,
- Representation::Integer32());
- }
-
- static HObjectAccess ForStringLength() {
- STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
- return HObjectAccess(
- kStringLengths,
- String::kLengthOffset,
- Representation::Smi());
- }
-
- static HObjectAccess ForConsStringFirst() {
- return HObjectAccess(kInobject, ConsString::kFirstOffset);
- }
-
- static HObjectAccess ForConsStringSecond() {
- return HObjectAccess(kInobject, ConsString::kSecondOffset);
- }
-
- static HObjectAccess ForPropertiesPointer() {
- return HObjectAccess(kInobject, JSObject::kPropertiesOffset);
- }
-
- static HObjectAccess ForPrototypeOrInitialMap() {
- return HObjectAccess(kInobject, JSFunction::kPrototypeOrInitialMapOffset);
- }
-
- static HObjectAccess ForSharedFunctionInfoPointer() {
- return HObjectAccess(kInobject, JSFunction::kSharedFunctionInfoOffset);
- }
-
- static HObjectAccess ForCodeEntryPointer() {
- return HObjectAccess(kInobject, JSFunction::kCodeEntryOffset);
- }
-
- static HObjectAccess ForCodeOffset() {
- return HObjectAccess(kInobject, SharedFunctionInfo::kCodeOffset);
- }
-
- static HObjectAccess ForFunctionContextPointer() {
- return HObjectAccess(kInobject, JSFunction::kContextOffset);
- }
-
- static HObjectAccess ForMap() {
- return HObjectAccess(kMaps, JSObject::kMapOffset);
- }
-
- static HObjectAccess ForPrototype() {
- return HObjectAccess(kMaps, Map::kPrototypeOffset);
- }
-
- static HObjectAccess ForMapAsInteger32() {
- return HObjectAccess(kMaps, JSObject::kMapOffset,
- Representation::Integer32());
- }
-
- static HObjectAccess ForMapInObjectPropertiesOrConstructorFunctionIndex() {
- return HObjectAccess(
- kInobject, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset,
- Representation::UInteger8());
- }
-
- static HObjectAccess ForMapInstanceType() {
- return HObjectAccess(kInobject,
- Map::kInstanceTypeOffset,
- Representation::UInteger8());
- }
-
- static HObjectAccess ForMapInstanceSize() {
- return HObjectAccess(kInobject,
- Map::kInstanceSizeOffset,
- Representation::UInteger8());
- }
-
- static HObjectAccess ForMapBitField() {
- return HObjectAccess(kInobject,
- Map::kBitFieldOffset,
- Representation::UInteger8());
- }
-
- static HObjectAccess ForMapBitField2() {
- return HObjectAccess(kInobject,
- Map::kBitField2Offset,
- Representation::UInteger8());
- }
-
- static HObjectAccess ForMapBitField3() {
- return HObjectAccess(kInobject, Map::kBitField3Offset,
- Representation::Integer32());
- }
-
- static HObjectAccess ForMapDescriptors() {
- return HObjectAccess(kInobject, Map::kDescriptorsOffset);
- }
-
- static HObjectAccess ForNameHashField() {
- return HObjectAccess(kInobject,
- Name::kHashFieldOffset,
- Representation::Integer32());
- }
-
- static HObjectAccess ForMapInstanceTypeAndBitField() {
- STATIC_ASSERT((Map::kInstanceTypeAndBitFieldOffset & 1) == 0);
- // Ensure the two fields share one 16-bit word, endian-independent.
- STATIC_ASSERT((Map::kBitFieldOffset & ~1) ==
- (Map::kInstanceTypeOffset & ~1));
- return HObjectAccess(kInobject,
- Map::kInstanceTypeAndBitFieldOffset,
- Representation::UInteger16());
- }
-
- static HObjectAccess ForPropertyCellValue() {
- return HObjectAccess(kInobject, PropertyCell::kValueOffset);
- }
-
- static HObjectAccess ForPropertyCellDetails() {
- return HObjectAccess(kInobject, PropertyCell::kDetailsOffset,
- Representation::Smi());
- }
-
- static HObjectAccess ForCellValue() {
- return HObjectAccess(kInobject, Cell::kValueOffset);
- }
-
- static HObjectAccess ForWeakCellValue() {
- return HObjectAccess(kInobject, WeakCell::kValueOffset);
- }
-
- static HObjectAccess ForWeakCellNext() {
- return HObjectAccess(kInobject, WeakCell::kNextOffset);
- }
-
- static HObjectAccess ForAllocationMementoSite() {
- return HObjectAccess(kInobject, AllocationMemento::kAllocationSiteOffset);
- }
-
- static HObjectAccess ForCounter() {
- return HObjectAccess(kExternalMemory, 0, Representation::Integer32(),
- Handle<Name>::null(), false, false);
- }
-
- static HObjectAccess ForExternalUInteger8() {
- return HObjectAccess(kExternalMemory, 0, Representation::UInteger8(),
- Handle<Name>::null(), false, false);
- }
-
- static HObjectAccess ForBoundTargetFunction() {
- return HObjectAccess(kInobject,
- JSBoundFunction::kBoundTargetFunctionOffset);
- }
-
- static HObjectAccess ForBoundThis() {
- return HObjectAccess(kInobject, JSBoundFunction::kBoundThisOffset);
- }
-
- static HObjectAccess ForBoundArguments() {
- return HObjectAccess(kInobject, JSBoundFunction::kBoundArgumentsOffset);
- }
-
- // Create an access to an offset in a fixed array header.
- static HObjectAccess ForFixedArrayHeader(int offset);
-
- // Create an access to an in-object property in a JSObject.
- // This kind of access must be used when the object |map| is known and
- // in-object properties are being accessed. Accesses of the in-object
- // properties can have different semantics depending on whether corresponding
- // property was added to the map or not.
- static HObjectAccess ForMapAndOffset(Handle<Map> map, int offset,
- Representation representation = Representation::Tagged());
-
- // Create an access to an in-object property in a JSObject.
- // This kind of access can be used for accessing object header fields or
- // in-object properties if the map of the object is not known.
- static HObjectAccess ForObservableJSObjectOffset(int offset,
- Representation representation = Representation::Tagged()) {
- return ForMapAndOffset(Handle<Map>::null(), offset, representation);
- }
-
- // Create an access to an in-object property in a JSArray.
- static HObjectAccess ForJSArrayOffset(int offset);
-
- static HObjectAccess ForContextSlot(int index);
-
- static HObjectAccess ForScriptContext(int index);
-
- // Create an access to the backing store of an object.
- static HObjectAccess ForBackingStoreOffset(int offset,
- Representation representation = Representation::Tagged());
-
- // Create an access to a resolved field (in-object or backing store).
- static HObjectAccess ForField(Handle<Map> map, int index,
- Representation representation,
- Handle<Name> name);
-
- static HObjectAccess ForJSTypedArrayLength() {
- return HObjectAccess::ForObservableJSObjectOffset(
- JSTypedArray::kLengthOffset);
- }
-
- static HObjectAccess ForJSArrayBufferBackingStore() {
- return HObjectAccess::ForObservableJSObjectOffset(
- JSArrayBuffer::kBackingStoreOffset, Representation::External());
- }
-
- static HObjectAccess ForJSArrayBufferByteLength() {
- return HObjectAccess::ForObservableJSObjectOffset(
- JSArrayBuffer::kByteLengthOffset, Representation::Tagged());
- }
-
- static HObjectAccess ForJSArrayBufferBitField() {
- return HObjectAccess::ForObservableJSObjectOffset(
- JSArrayBuffer::kBitFieldOffset, Representation::Integer32());
- }
-
- static HObjectAccess ForJSArrayBufferBitFieldSlot() {
- return HObjectAccess::ForObservableJSObjectOffset(
- JSArrayBuffer::kBitFieldSlot, Representation::Smi());
- }
-
- static HObjectAccess ForJSArrayBufferViewBuffer() {
- return HObjectAccess::ForObservableJSObjectOffset(
- JSArrayBufferView::kBufferOffset);
- }
-
- static HObjectAccess ForJSArrayBufferViewByteOffset() {
- return HObjectAccess::ForObservableJSObjectOffset(
- JSArrayBufferView::kByteOffsetOffset);
- }
-
- static HObjectAccess ForJSArrayBufferViewByteLength() {
- return HObjectAccess::ForObservableJSObjectOffset(
- JSArrayBufferView::kByteLengthOffset);
- }
-
- static HObjectAccess ForJSGlobalObjectNativeContext() {
- return HObjectAccess(kInobject, JSGlobalObject::kNativeContextOffset);
- }
-
- static HObjectAccess ForJSRegExpFlags() {
- return HObjectAccess(kInobject, JSRegExp::kFlagsOffset);
- }
-
- static HObjectAccess ForJSRegExpSource() {
- return HObjectAccess(kInobject, JSRegExp::kSourceOffset);
- }
-
- static HObjectAccess ForJSCollectionTable() {
- return HObjectAccess::ForObservableJSObjectOffset(
- JSCollection::kTableOffset);
- }
-
- template <typename CollectionType>
- static HObjectAccess ForOrderedHashTableNumberOfBuckets() {
- return HObjectAccess(kInobject, CollectionType::kNumberOfBucketsOffset,
- Representation::Smi());
- }
-
- template <typename CollectionType>
- static HObjectAccess ForOrderedHashTableNumberOfElements() {
- return HObjectAccess(kInobject, CollectionType::kNumberOfElementsOffset,
- Representation::Smi());
- }
-
- template <typename CollectionType>
- static HObjectAccess ForOrderedHashTableNumberOfDeletedElements() {
- return HObjectAccess(kInobject,
- CollectionType::kNumberOfDeletedElementsOffset,
- Representation::Smi());
- }
-
- template <typename CollectionType>
- static HObjectAccess ForOrderedHashTableNextTable() {
- return HObjectAccess(kInobject, CollectionType::kNextTableOffset);
- }
-
- template <typename CollectionType>
- static HObjectAccess ForOrderedHashTableBucket(int bucket) {
- return HObjectAccess(kInobject, CollectionType::kHashTableStartOffset +
- (bucket * kPointerSize),
- Representation::Smi());
- }
-
- // Access into the data table of an OrderedHashTable with a
- // known-at-compile-time bucket count.
- template <typename CollectionType, int kBucketCount>
- static HObjectAccess ForOrderedHashTableDataTableIndex(int index) {
- return HObjectAccess(kInobject, CollectionType::kHashTableStartOffset +
- (kBucketCount * kPointerSize) +
- (index * kPointerSize));
- }
-
- inline bool Equals(HObjectAccess that) const {
- return value_ == that.value_; // portion and offset must match
- }
-
- protected:
- void SetGVNFlags(HValue *instr, PropertyAccessType access_type);
-
- private:
- // internal use only; different parts of an object or array
- enum Portion {
- kMaps, // map of an object
- kArrayLengths, // the length of an array
- kStringLengths, // the length of a string
- kElementsPointer, // elements pointer
- kBackingStore, // some field in the backing store
- kDouble, // some double field
- kInobject, // some other in-object field
- kExternalMemory // some field in external memory
- };
-
- HObjectAccess() : value_(0) {}
-
- HObjectAccess(Portion portion, int offset,
- Representation representation = Representation::Tagged(),
- Handle<Name> name = Handle<Name>::null(),
- bool immutable = false, bool existing_inobject_property = true)
- : value_(PortionField::encode(portion) |
- RepresentationField::encode(representation.kind()) |
- ImmutableField::encode(immutable ? 1 : 0) |
- ExistingInobjectPropertyField::encode(
- existing_inobject_property ? 1 : 0) |
- OffsetField::encode(offset)),
- name_(name) {
- // assert that the fields decode correctly
- DCHECK(this->offset() == offset);
- DCHECK(this->portion() == portion);
- DCHECK(this->immutable() == immutable);
- DCHECK(this->existing_inobject_property() == existing_inobject_property);
- DCHECK(RepresentationField::decode(value_) == representation.kind());
- DCHECK(!this->existing_inobject_property() || IsInobject());
- }
-
- class PortionField : public BitField<Portion, 0, 3> {};
- class RepresentationField : public BitField<Representation::Kind, 3, 4> {};
- class ImmutableField : public BitField<bool, 7, 1> {};
- class ExistingInobjectPropertyField : public BitField<bool, 8, 1> {};
- class OffsetField : public BitField<int, 9, 23> {};
-
- uint32_t value_; // encodes portion, representation, immutable, and offset
- Handle<Name> name_;
-
- friend class HLoadNamedField;
- friend class HStoreNamedField;
- friend class SideEffectsTracker;
- friend std::ostream& operator<<(std::ostream& os,
- const HObjectAccess& access);
-
- inline Portion portion() const {
- return PortionField::decode(value_);
- }
-};
-
-
-std::ostream& operator<<(std::ostream& os, const HObjectAccess& access);
-
-
-class HLoadNamedField final : public HTemplateInstruction<2> {
- public:
- DECLARE_INSTRUCTION_FACTORY_P3(HLoadNamedField, HValue*,
- HValue*, HObjectAccess);
- DECLARE_INSTRUCTION_FACTORY_P5(HLoadNamedField, HValue*, HValue*,
- HObjectAccess, const UniqueSet<Map>*, HType);
-
- HValue* object() const { return OperandAt(0); }
- HValue* dependency() const {
- DCHECK(HasDependency());
- return OperandAt(1);
- }
- bool HasDependency() const { return OperandAt(0) != OperandAt(1); }
- HObjectAccess access() const { return access_; }
- Representation field_representation() const {
- return access_.representation();
- }
-
- const UniqueSet<Map>* maps() const { return maps_; }
-
- bool HasEscapingOperandAt(int index) override { return false; }
- bool HasOutOfBoundsAccess(int size) override {
- return !access().IsInobject() || access().offset() >= size;
- }
- Representation RequiredInputRepresentation(int index) override {
- if (index == 0) {
- // object must be external in case of external memory access
- return access().IsExternalMemory() ? Representation::External()
- : Representation::Tagged();
- }
- DCHECK(index == 1);
- return Representation::None();
- }
- Range* InferRange(Zone* zone) override;
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- bool CanBeReplacedWith(HValue* other) const {
- if (!CheckFlag(HValue::kCantBeReplaced)) return false;
- if (!type().Equals(other->type())) return false;
- if (!representation().Equals(other->representation())) return false;
- if (!other->IsLoadNamedField()) return true;
- HLoadNamedField* that = HLoadNamedField::cast(other);
- if (this->maps_ == that->maps_) return true;
- if (this->maps_ == NULL || that->maps_ == NULL) return false;
- return this->maps_->IsSubset(that->maps_);
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField)
-
- protected:
- bool DataEquals(HValue* other) override {
- HLoadNamedField* that = HLoadNamedField::cast(other);
- if (!this->access_.Equals(that->access_)) return false;
- if (this->maps_ == that->maps_) return true;
- return (this->maps_ != NULL &&
- that->maps_ != NULL &&
- this->maps_->Equals(that->maps_));
- }
-
- private:
- HLoadNamedField(HValue* object,
- HValue* dependency,
- HObjectAccess access)
- : access_(access), maps_(NULL) {
- DCHECK_NOT_NULL(object);
- SetOperandAt(0, object);
- SetOperandAt(1, dependency ? dependency : object);
-
- Representation representation = access.representation();
- if (representation.IsInteger8() ||
- representation.IsUInteger8() ||
- representation.IsInteger16() ||
- representation.IsUInteger16()) {
- set_representation(Representation::Integer32());
- } else if (representation.IsSmi()) {
- set_type(HType::Smi());
- if (SmiValuesAre32Bits()) {
- set_representation(Representation::Integer32());
- } else {
- set_representation(representation);
- }
- } else if (representation.IsDouble() ||
- representation.IsExternal() ||
- representation.IsInteger32()) {
- set_representation(representation);
- } else if (representation.IsHeapObject()) {
- set_type(HType::HeapObject());
- set_representation(Representation::Tagged());
- } else {
- set_representation(Representation::Tagged());
- }
- access.SetGVNFlags(this, LOAD);
- }
-
- HLoadNamedField(HValue* object,
- HValue* dependency,
- HObjectAccess access,
- const UniqueSet<Map>* maps,
- HType type)
- : HTemplateInstruction<2>(type), access_(access), maps_(maps) {
- DCHECK_NOT_NULL(maps);
- DCHECK_NE(0, maps->size());
-
- DCHECK_NOT_NULL(object);
- SetOperandAt(0, object);
- SetOperandAt(1, dependency ? dependency : object);
-
- DCHECK(access.representation().IsHeapObject());
- DCHECK(type.IsHeapObject());
- set_representation(Representation::Tagged());
-
- access.SetGVNFlags(this, LOAD);
- }
-
- bool IsDeletable() const override { return true; }
-
- HObjectAccess access_;
- const UniqueSet<Map>* maps_;
-};
-
-
-class HLoadFunctionPrototype final : public HUnaryOperation {
- public:
- DECLARE_INSTRUCTION_FACTORY_P1(HLoadFunctionPrototype, HValue*);
-
- HValue* function() { return OperandAt(0); }
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype)
-
- protected:
- bool DataEquals(HValue* other) override { return true; }
-
- private:
- explicit HLoadFunctionPrototype(HValue* function)
- : HUnaryOperation(function) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetDependsOnFlag(kCalls);
- }
-};
-
-class ArrayInstructionInterface {
- public:
- virtual HValue* GetKey() = 0;
- virtual void SetKey(HValue* key) = 0;
- virtual ElementsKind elements_kind() const = 0;
- // TryIncreaseBaseOffset returns false if overflow would result.
- virtual bool TryIncreaseBaseOffset(uint32_t increase_by_value) = 0;
- virtual bool IsDehoisted() const = 0;
- virtual void SetDehoisted(bool is_dehoisted) = 0;
- virtual ~ArrayInstructionInterface() { }
-
- static Representation KeyedAccessIndexRequirement(Representation r) {
- return r.IsInteger32() || SmiValuesAre32Bits()
- ? Representation::Integer32() : Representation::Smi();
- }
-};
-
-
-static const int kDefaultKeyedHeaderOffsetSentinel = -1;
-
-enum LoadKeyedHoleMode {
- NEVER_RETURN_HOLE,
- ALLOW_RETURN_HOLE,
- CONVERT_HOLE_TO_UNDEFINED
-};
-
-
-class HLoadKeyed final : public HTemplateInstruction<4>,
- public ArrayInstructionInterface {
- public:
- DECLARE_INSTRUCTION_FACTORY_P5(HLoadKeyed, HValue*, HValue*, HValue*, HValue*,
- ElementsKind);
- DECLARE_INSTRUCTION_FACTORY_P6(HLoadKeyed, HValue*, HValue*, HValue*, HValue*,
- ElementsKind, LoadKeyedHoleMode);
- DECLARE_INSTRUCTION_FACTORY_P7(HLoadKeyed, HValue*, HValue*, HValue*, HValue*,
- ElementsKind, LoadKeyedHoleMode, int);
-
- bool is_fixed_typed_array() const {
- return IsFixedTypedArrayElementsKind(elements_kind());
- }
- HValue* elements() const { return OperandAt(0); }
- HValue* key() const { return OperandAt(1); }
- HValue* dependency() const {
- DCHECK(HasDependency());
- return OperandAt(2);
- }
- bool HasDependency() const { return OperandAt(0) != OperandAt(2); }
- HValue* backing_store_owner() const {
- DCHECK(HasBackingStoreOwner());
- return OperandAt(3);
- }
- bool HasBackingStoreOwner() const { return OperandAt(0) != OperandAt(3); }
- uint32_t base_offset() const { return BaseOffsetField::decode(bit_field_); }
- bool TryIncreaseBaseOffset(uint32_t increase_by_value) override;
- HValue* GetKey() override { return key(); }
- void SetKey(HValue* key) override { SetOperandAt(1, key); }
- bool IsDehoisted() const override {
- return IsDehoistedField::decode(bit_field_);
- }
- void SetDehoisted(bool is_dehoisted) override {
- bit_field_ = IsDehoistedField::update(bit_field_, is_dehoisted);
- }
- ElementsKind elements_kind() const override {
- return ElementsKindField::decode(bit_field_);
- }
- LoadKeyedHoleMode hole_mode() const {
- return HoleModeField::decode(bit_field_);
- }
-
- Representation RequiredInputRepresentation(int index) override {
- // kind_fast: tagged[int32] (none)
- // kind_double: tagged[int32] (none)
- // kind_fixed_typed_array: external[int32] (none)
- // kind_external: external[int32] (none)
- if (index == 0) {
- return is_fixed_typed_array() ? Representation::External()
- : Representation::Tagged();
- }
- if (index == 1) {
- return ArrayInstructionInterface::KeyedAccessIndexRequirement(
- OperandAt(1)->representation());
- }
- if (index == 2) {
- return Representation::None();
- }
- DCHECK_EQ(3, index);
- return HasBackingStoreOwner() ? Representation::Tagged()
- : Representation::None();
- }
-
- Representation observed_input_representation(int index) override {
- return RequiredInputRepresentation(index);
- }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- bool UsesMustHandleHole() const;
- bool AllUsesCanTreatHoleAsNaN() const;
- bool RequiresHoleCheck() const;
-
- Range* InferRange(Zone* zone) override;
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyed)
-
- protected:
- bool DataEquals(HValue* other) override {
- if (!other->IsLoadKeyed()) return false;
- HLoadKeyed* other_load = HLoadKeyed::cast(other);
-
- if (base_offset() != other_load->base_offset()) return false;
- return elements_kind() == other_load->elements_kind();
- }
-
- private:
- HLoadKeyed(HValue* obj, HValue* key, HValue* dependency,
- HValue* backing_store_owner, ElementsKind elements_kind,
- LoadKeyedHoleMode mode = NEVER_RETURN_HOLE,
- int offset = kDefaultKeyedHeaderOffsetSentinel)
- : bit_field_(0) {
- offset = offset == kDefaultKeyedHeaderOffsetSentinel
- ? GetDefaultHeaderSizeForElementsKind(elements_kind)
- : offset;
- bit_field_ = ElementsKindField::encode(elements_kind) |
- HoleModeField::encode(mode) |
- BaseOffsetField::encode(offset);
-
- SetOperandAt(0, obj);
- SetOperandAt(1, key);
- SetOperandAt(2, dependency != nullptr ? dependency : obj);
- SetOperandAt(3, backing_store_owner != nullptr ? backing_store_owner : obj);
- DCHECK_EQ(HasBackingStoreOwner(), is_fixed_typed_array());
-
- if (!is_fixed_typed_array()) {
- // I can detect the case between storing double (holey and fast) and
- // smi/object by looking at elements_kind_.
- DCHECK(IsFastSmiOrObjectElementsKind(elements_kind) ||
- IsFastDoubleElementsKind(elements_kind));
-
- if (IsFastSmiOrObjectElementsKind(elements_kind)) {
- if (IsFastSmiElementsKind(elements_kind) &&
- (!IsHoleyElementsKind(elements_kind) ||
- mode == NEVER_RETURN_HOLE)) {
- set_type(HType::Smi());
- if (SmiValuesAre32Bits() && !RequiresHoleCheck()) {
- set_representation(Representation::Integer32());
- } else {
- set_representation(Representation::Smi());
- }
- } else {
- set_representation(Representation::Tagged());
- }
-
- SetDependsOnFlag(kArrayElements);
- } else {
- set_representation(Representation::Double());
- SetDependsOnFlag(kDoubleArrayElements);
- }
- } else {
- if (elements_kind == FLOAT32_ELEMENTS ||
- elements_kind == FLOAT64_ELEMENTS) {
- set_representation(Representation::Double());
- } else {
- set_representation(Representation::Integer32());
- }
-
- if (is_fixed_typed_array()) {
- SetDependsOnFlag(kExternalMemory);
- SetDependsOnFlag(kTypedArrayElements);
- } else {
- UNREACHABLE();
- }
- // Native code could change the specialized array.
- SetDependsOnFlag(kCalls);
- }
-
- SetFlag(kUseGVN);
- }
-
- bool IsDeletable() const override { return !RequiresHoleCheck(); }
-
- // Establish some checks around our packed fields
- enum LoadKeyedBits {
- kBitsForElementsKind = 5,
- kBitsForHoleMode = 2,
- kBitsForBaseOffset = 24,
- kBitsForIsDehoisted = 1,
-
- kStartElementsKind = 0,
- kStartHoleMode = kStartElementsKind + kBitsForElementsKind,
- kStartBaseOffset = kStartHoleMode + kBitsForHoleMode,
- kStartIsDehoisted = kStartBaseOffset + kBitsForBaseOffset
- };
-
- STATIC_ASSERT((kBitsForElementsKind + kBitsForHoleMode + kBitsForBaseOffset +
- kBitsForIsDehoisted) <= sizeof(uint32_t) * 8);
- STATIC_ASSERT(kElementsKindCount <= (1 << kBitsForElementsKind));
- class ElementsKindField:
- public BitField<ElementsKind, kStartElementsKind, kBitsForElementsKind>
- {}; // NOLINT
- class HoleModeField:
- public BitField<LoadKeyedHoleMode, kStartHoleMode, kBitsForHoleMode>
- {}; // NOLINT
- class BaseOffsetField:
- public BitField<uint32_t, kStartBaseOffset, kBitsForBaseOffset>
- {}; // NOLINT
- class IsDehoistedField:
- public BitField<bool, kStartIsDehoisted, kBitsForIsDehoisted>
- {}; // NOLINT
- uint32_t bit_field_;
-};
-
-
-// Indicates whether the store is a store to an entry that was previously
-// initialized or not.
-enum StoreFieldOrKeyedMode {
- // The entry could be either previously initialized or not.
- INITIALIZING_STORE,
- // At the time of this store it is guaranteed that the entry is already
- // initialized.
- STORE_TO_INITIALIZED_ENTRY
-};
-
-
-class HStoreNamedField final : public HTemplateInstruction<3> {
- public:
- DECLARE_INSTRUCTION_FACTORY_P3(HStoreNamedField, HValue*,
- HObjectAccess, HValue*);
- DECLARE_INSTRUCTION_FACTORY_P4(HStoreNamedField, HValue*,
- HObjectAccess, HValue*, StoreFieldOrKeyedMode);
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedField)
-
- bool HasEscapingOperandAt(int index) override { return index == 1; }
- bool HasOutOfBoundsAccess(int size) override {
- return !access().IsInobject() || access().offset() >= size;
- }
- Representation RequiredInputRepresentation(int index) override {
- if (index == 0 && access().IsExternalMemory()) {
- // object must be external in case of external memory access
- return Representation::External();
- } else if (index == 1) {
- if (field_representation().IsInteger8() ||
- field_representation().IsUInteger8() ||
- field_representation().IsInteger16() ||
- field_representation().IsUInteger16() ||
- field_representation().IsInteger32()) {
- return Representation::Integer32();
- } else if (field_representation().IsDouble()) {
- return field_representation();
- } else if (field_representation().IsSmi()) {
- if (SmiValuesAre32Bits() &&
- store_mode() == STORE_TO_INITIALIZED_ENTRY) {
- return Representation::Integer32();
- }
- return field_representation();
- } else if (field_representation().IsExternal()) {
- return Representation::External();
- }
- }
- return Representation::Tagged();
- }
- bool HandleSideEffectDominator(GVNFlag side_effect,
- HValue* dominator) override {
- DCHECK(side_effect == kNewSpacePromotion);
- if (!FLAG_use_write_barrier_elimination) return false;
- dominator_ = dominator;
- return false;
- }
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- HValue* object() const { return OperandAt(0); }
- HValue* value() const { return OperandAt(1); }
- HValue* transition() const { return OperandAt(2); }
-
- HObjectAccess access() const { return access_; }
- HValue* dominator() const { return dominator_; }
- bool has_transition() const { return HasTransitionField::decode(bit_field_); }
- StoreFieldOrKeyedMode store_mode() const {
- return StoreModeField::decode(bit_field_);
- }
-
- Handle<Map> transition_map() const {
- if (has_transition()) {
- return Handle<Map>::cast(
- HConstant::cast(transition())->handle(isolate()));
- } else {
- return Handle<Map>();
- }
- }
-
- void SetTransition(HConstant* transition) {
- DCHECK(!has_transition()); // Only set once.
- SetOperandAt(2, transition);
- bit_field_ = HasTransitionField::update(bit_field_, true);
- SetChangesFlag(kMaps);
- }
-
- bool NeedsWriteBarrier() const {
- DCHECK(!field_representation().IsDouble() ||
- (FLAG_unbox_double_fields && access_.IsInobject()) ||
- !has_transition());
- if (field_representation().IsDouble()) return false;
- if (field_representation().IsSmi()) return false;
- if (field_representation().IsInteger32()) return false;
- if (field_representation().IsExternal()) return false;
- return StoringValueNeedsWriteBarrier(value()) &&
- ReceiverObjectNeedsWriteBarrier(object(), value(), dominator());
- }
-
- bool NeedsWriteBarrierForMap() {
- return ReceiverObjectNeedsWriteBarrier(object(), transition(),
- dominator());
- }
-
- SmiCheck SmiCheckForWriteBarrier() const {
- if (field_representation().IsHeapObject()) return OMIT_SMI_CHECK;
- if (value()->type().IsHeapObject()) return OMIT_SMI_CHECK;
- return INLINE_SMI_CHECK;
- }
-
- PointersToHereCheck PointersToHereCheckForValue() const {
- return PointersToHereCheckForObject(value(), dominator());
- }
-
- Representation field_representation() const {
- return access_.representation();
- }
-
- void UpdateValue(HValue* value) {
- SetOperandAt(1, value);
- }
-
- bool CanBeReplacedWith(HStoreNamedField* that) const {
- if (!this->access().Equals(that->access())) return false;
- if (SmiValuesAre32Bits() &&
- this->field_representation().IsSmi() &&
- this->store_mode() == INITIALIZING_STORE &&
- that->store_mode() == STORE_TO_INITIALIZED_ENTRY) {
- // We cannot replace an initializing store to a smi field with a store to
- // an initialized entry on 64-bit architectures (with 32-bit smis).
- return false;
- }
- return true;
- }
-
- private:
- HStoreNamedField(HValue* obj, HObjectAccess access, HValue* val,
- StoreFieldOrKeyedMode store_mode = INITIALIZING_STORE)
- : access_(access),
- dominator_(NULL),
- bit_field_(HasTransitionField::encode(false) |
- StoreModeField::encode(store_mode)) {
- // Stores to a non existing in-object property are allowed only to the
- // newly allocated objects (via HAllocate or HInnerAllocatedObject).
- DCHECK(!access.IsInobject() || access.existing_inobject_property() ||
- obj->IsAllocate() || obj->IsInnerAllocatedObject());
- SetOperandAt(0, obj);
- SetOperandAt(1, val);
- SetOperandAt(2, obj);
- access.SetGVNFlags(this, STORE);
- }
-
- class HasTransitionField : public BitField<bool, 0, 1> {};
- class StoreModeField : public BitField<StoreFieldOrKeyedMode, 1, 1> {};
-
- HObjectAccess access_;
- HValue* dominator_;
- uint32_t bit_field_;
-};
-
-class HStoreKeyed final : public HTemplateInstruction<4>,
- public ArrayInstructionInterface {
- public:
- DECLARE_INSTRUCTION_FACTORY_P5(HStoreKeyed, HValue*, HValue*, HValue*,
- HValue*, ElementsKind);
- DECLARE_INSTRUCTION_FACTORY_P6(HStoreKeyed, HValue*, HValue*, HValue*,
- HValue*, ElementsKind, StoreFieldOrKeyedMode);
- DECLARE_INSTRUCTION_FACTORY_P7(HStoreKeyed, HValue*, HValue*, HValue*,
- HValue*, ElementsKind, StoreFieldOrKeyedMode,
- int);
-
- Representation RequiredInputRepresentation(int index) override {
- // kind_fast: tagged[int32] = tagged
- // kind_double: tagged[int32] = double
- // kind_smi : tagged[int32] = smi
- // kind_fixed_typed_array: tagged[int32] = (double | int32)
- // kind_external: external[int32] = (double | int32)
- if (index == 0) {
- return is_fixed_typed_array() ? Representation::External()
- : Representation::Tagged();
- } else if (index == 1) {
- return ArrayInstructionInterface::KeyedAccessIndexRequirement(
- OperandAt(1)->representation());
- } else if (index == 2) {
- return RequiredValueRepresentation(elements_kind(), store_mode());
- }
-
- DCHECK_EQ(3, index);
- return HasBackingStoreOwner() ? Representation::Tagged()
- : Representation::None();
- }
-
- static Representation RequiredValueRepresentation(
- ElementsKind kind, StoreFieldOrKeyedMode mode) {
- if (IsDoubleOrFloatElementsKind(kind)) {
- return Representation::Double();
- }
-
- if (kind == FAST_SMI_ELEMENTS && SmiValuesAre32Bits() &&
- mode == STORE_TO_INITIALIZED_ENTRY) {
- return Representation::Integer32();
- }
-
- if (IsFastSmiElementsKind(kind)) {
- return Representation::Smi();
- }
-
- if (IsFixedTypedArrayElementsKind(kind)) {
- return Representation::Integer32();
- }
- return Representation::Tagged();
- }
-
- bool is_fixed_typed_array() const {
- return IsFixedTypedArrayElementsKind(elements_kind());
- }
-
- Representation observed_input_representation(int index) override {
- if (index != 2) return RequiredInputRepresentation(index);
- if (IsUninitialized()) {
- return Representation::None();
- }
- Representation r =
- RequiredValueRepresentation(elements_kind(), store_mode());
- // For fast object elements kinds, don't assume anything.
- if (r.IsTagged()) return Representation::None();
- return r;
- }
-
- HValue* elements() const { return OperandAt(0); }
- HValue* key() const { return OperandAt(1); }
- HValue* value() const { return OperandAt(2); }
- HValue* backing_store_owner() const {
- DCHECK(HasBackingStoreOwner());
- return OperandAt(3);
- }
- bool HasBackingStoreOwner() const { return OperandAt(0) != OperandAt(3); }
- bool value_is_smi() const { return IsFastSmiElementsKind(elements_kind()); }
- StoreFieldOrKeyedMode store_mode() const {
- return StoreModeField::decode(bit_field_);
- }
- ElementsKind elements_kind() const override {
- return ElementsKindField::decode(bit_field_);
- }
- uint32_t base_offset() const { return base_offset_; }
- bool TryIncreaseBaseOffset(uint32_t increase_by_value) override;
- HValue* GetKey() override { return key(); }
- void SetKey(HValue* key) override { SetOperandAt(1, key); }
- bool IsDehoisted() const override {
- return IsDehoistedField::decode(bit_field_);
- }
- void SetDehoisted(bool is_dehoisted) override {
- bit_field_ = IsDehoistedField::update(bit_field_, is_dehoisted);
- }
- bool IsUninitialized() { return IsUninitializedField::decode(bit_field_); }
- void SetUninitialized(bool is_uninitialized) {
- bit_field_ = IsUninitializedField::update(bit_field_, is_uninitialized);
- }
-
- bool IsConstantHoleStore() {
- return value()->IsConstant() && HConstant::cast(value())->IsTheHole();
- }
-
- bool HandleSideEffectDominator(GVNFlag side_effect,
- HValue* dominator) override {
- DCHECK(side_effect == kNewSpacePromotion);
- dominator_ = dominator;
- return false;
- }
-
- HValue* dominator() const { return dominator_; }
-
- bool NeedsWriteBarrier() {
- if (value_is_smi()) {
- return false;
- } else {
- return StoringValueNeedsWriteBarrier(value()) &&
- ReceiverObjectNeedsWriteBarrier(elements(), value(), dominator());
- }
- }
-
- PointersToHereCheck PointersToHereCheckForValue() const {
- return PointersToHereCheckForObject(value(), dominator());
- }
-
- bool NeedsCanonicalization();
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyed)
-
- private:
- HStoreKeyed(HValue* obj, HValue* key, HValue* val,
- HValue* backing_store_owner, ElementsKind elements_kind,
- StoreFieldOrKeyedMode store_mode = INITIALIZING_STORE,
- int offset = kDefaultKeyedHeaderOffsetSentinel)
- : base_offset_(offset == kDefaultKeyedHeaderOffsetSentinel
- ? GetDefaultHeaderSizeForElementsKind(elements_kind)
- : offset),
- bit_field_(IsDehoistedField::encode(false) |
- IsUninitializedField::encode(false) |
- StoreModeField::encode(store_mode) |
- ElementsKindField::encode(elements_kind)),
- dominator_(NULL) {
- SetOperandAt(0, obj);
- SetOperandAt(1, key);
- SetOperandAt(2, val);
- SetOperandAt(3, backing_store_owner != nullptr ? backing_store_owner : obj);
- DCHECK_EQ(HasBackingStoreOwner(), is_fixed_typed_array());
-
- if (IsFastObjectElementsKind(elements_kind)) {
- SetFlag(kTrackSideEffectDominators);
- SetDependsOnFlag(kNewSpacePromotion);
- }
- if (IsFastDoubleElementsKind(elements_kind)) {
- SetChangesFlag(kDoubleArrayElements);
- } else if (IsFastSmiElementsKind(elements_kind)) {
- SetChangesFlag(kArrayElements);
- } else if (is_fixed_typed_array()) {
- SetChangesFlag(kTypedArrayElements);
- SetChangesFlag(kExternalMemory);
- SetFlag(kTruncatingToNumber);
- } else {
- SetChangesFlag(kArrayElements);
- }
-
- // {UNSIGNED_,}{BYTE,SHORT,INT}_ELEMENTS are truncating.
- if (elements_kind >= UINT8_ELEMENTS && elements_kind <= INT32_ELEMENTS) {
- SetFlag(kTruncatingToInt32);
- }
- }
-
- class IsDehoistedField : public BitField<bool, 0, 1> {};
- class IsUninitializedField : public BitField<bool, 1, 1> {};
- class StoreModeField : public BitField<StoreFieldOrKeyedMode, 2, 1> {};
- class ElementsKindField : public BitField<ElementsKind, 3, 5> {};
-
- uint32_t base_offset_;
- uint32_t bit_field_;
- HValue* dominator_;
-};
-
-class HTransitionElementsKind final : public HTemplateInstruction<2> {
- public:
- inline static HTransitionElementsKind* New(Isolate* isolate, Zone* zone,
- HValue* context, HValue* object,
- Handle<Map> original_map,
- Handle<Map> transitioned_map) {
- return new(zone) HTransitionElementsKind(context, object,
- original_map, transitioned_map);
- }
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- HValue* object() const { return OperandAt(0); }
- HValue* context() const { return OperandAt(1); }
- Unique<Map> original_map() const { return original_map_; }
- Unique<Map> transitioned_map() const { return transitioned_map_; }
- ElementsKind from_kind() const {
- return FromElementsKindField::decode(bit_field_);
- }
- ElementsKind to_kind() const {
- return ToElementsKindField::decode(bit_field_);
- }
- bool map_is_stable() const { return MapIsStableField::decode(bit_field_); }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind)
-
- protected:
- bool DataEquals(HValue* other) override {
- HTransitionElementsKind* instr = HTransitionElementsKind::cast(other);
- return original_map_ == instr->original_map_ &&
- transitioned_map_ == instr->transitioned_map_;
- }
-
- int RedefinedOperandIndex() override { return 0; }
-
- private:
- HTransitionElementsKind(HValue* context, HValue* object,
- Handle<Map> original_map,
- Handle<Map> transitioned_map)
- : original_map_(Unique<Map>(original_map)),
- transitioned_map_(Unique<Map>(transitioned_map)),
- bit_field_(
- FromElementsKindField::encode(original_map->elements_kind()) |
- ToElementsKindField::encode(transitioned_map->elements_kind()) |
- MapIsStableField::encode(transitioned_map->is_stable())) {
- SetOperandAt(0, object);
- SetOperandAt(1, context);
- SetFlag(kUseGVN);
- SetChangesFlag(kElementsKind);
- if (!IsSimpleMapChangeTransition(from_kind(), to_kind())) {
- SetChangesFlag(kElementsPointer);
- SetChangesFlag(kNewSpacePromotion);
- }
- set_representation(Representation::Tagged());
- }
-
- class FromElementsKindField : public BitField<ElementsKind, 0, 5> {};
- class ToElementsKindField : public BitField<ElementsKind, 5, 5> {};
- class MapIsStableField : public BitField<bool, 10, 1> {};
-
- Unique<Map> original_map_;
- Unique<Map> transitioned_map_;
- uint32_t bit_field_;
-};
-
-
-class HStringAdd final : public HBinaryOperation {
- public:
- static HInstruction* New(
- Isolate* isolate, Zone* zone, HValue* context, HValue* left,
- HValue* right, PretenureFlag pretenure_flag = NOT_TENURED,
- StringAddFlags flags = STRING_ADD_CHECK_BOTH,
- Handle<AllocationSite> allocation_site = Handle<AllocationSite>::null());
-
- StringAddFlags flags() const { return flags_; }
- PretenureFlag pretenure_flag() const { return pretenure_flag_; }
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- DECLARE_CONCRETE_INSTRUCTION(StringAdd)
-
- protected:
- bool DataEquals(HValue* other) override {
- return flags_ == HStringAdd::cast(other)->flags_ &&
- pretenure_flag_ == HStringAdd::cast(other)->pretenure_flag_;
- }
-
- private:
- HStringAdd(HValue* context, HValue* left, HValue* right,
- PretenureFlag pretenure_flag, StringAddFlags flags,
- Handle<AllocationSite> allocation_site)
- : HBinaryOperation(context, left, right, HType::String()),
- flags_(flags),
- pretenure_flag_(pretenure_flag) {
- set_representation(Representation::Tagged());
- if ((flags & STRING_ADD_CONVERT) == STRING_ADD_CONVERT) {
- SetAllSideEffects();
- ClearFlag(kUseGVN);
- } else {
- SetChangesFlag(kNewSpacePromotion);
- SetFlag(kUseGVN);
- }
- SetDependsOnFlag(kMaps);
- if (FLAG_trace_pretenuring) {
- PrintF("HStringAdd with AllocationSite %p %s\n",
- allocation_site.is_null()
- ? static_cast<void*>(NULL)
- : static_cast<void*>(*allocation_site),
- pretenure_flag == TENURED ? "tenured" : "not tenured");
- }
- }
-
- bool IsDeletable() const final {
- return (flags_ & STRING_ADD_CONVERT) != STRING_ADD_CONVERT;
- }
-
- const StringAddFlags flags_;
- const PretenureFlag pretenure_flag_;
-};
-
-
-class HStringCharCodeAt final : public HTemplateInstruction<3> {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HStringCharCodeAt,
- HValue*,
- HValue*);
-
- Representation RequiredInputRepresentation(int index) override {
- // The index is supposed to be Integer32.
- return index == 2
- ? Representation::Integer32()
- : Representation::Tagged();
- }
-
- HValue* context() const { return OperandAt(0); }
- HValue* string() const { return OperandAt(1); }
- HValue* index() const { return OperandAt(2); }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt)
-
- protected:
- bool DataEquals(HValue* other) override { return true; }
-
- Range* InferRange(Zone* zone) override {
- return new(zone) Range(0, String::kMaxUtf16CodeUnit);
- }
-
- private:
- HStringCharCodeAt(HValue* context, HValue* string, HValue* index) {
- SetOperandAt(0, context);
- SetOperandAt(1, string);
- SetOperandAt(2, index);
- set_representation(Representation::Integer32());
- SetFlag(kUseGVN);
- SetDependsOnFlag(kMaps);
- SetDependsOnFlag(kStringChars);
- SetChangesFlag(kNewSpacePromotion);
- }
-
- // No side effects: runtime function assumes string + number inputs.
- bool IsDeletable() const override { return true; }
-};
-
-
-class HStringCharFromCode final : public HTemplateInstruction<2> {
- public:
- static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* char_code);
-
- Representation RequiredInputRepresentation(int index) override {
- return index == 0
- ? Representation::Tagged()
- : Representation::Integer32();
- }
-
- HValue* context() const { return OperandAt(0); }
- HValue* value() const { return OperandAt(1); }
-
- bool DataEquals(HValue* other) override { return true; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode)
-
- private:
- HStringCharFromCode(HValue* context, HValue* char_code)
- : HTemplateInstruction<2>(HType::String()) {
- SetOperandAt(0, context);
- SetOperandAt(1, char_code);
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetChangesFlag(kNewSpacePromotion);
- }
-
- bool IsDeletable() const override {
- return !value()->ToNumberCanBeObserved();
- }
-};
-
-
-class HTypeof final : public HTemplateInstruction<2> {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HTypeof, HValue*);
-
- HValue* context() const { return OperandAt(0); }
- HValue* value() const { return OperandAt(1); }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Typeof)
-
- private:
- explicit HTypeof(HValue* context, HValue* value) {
- SetOperandAt(0, context);
- SetOperandAt(1, value);
- set_representation(Representation::Tagged());
- }
-
- bool IsDeletable() const override { return true; }
-};
-
-
-class HTrapAllocationMemento final : public HTemplateInstruction<1> {
- public:
- DECLARE_INSTRUCTION_FACTORY_P1(HTrapAllocationMemento, HValue*);
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- HValue* object() { return OperandAt(0); }
-
- DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento)
-
- private:
- explicit HTrapAllocationMemento(HValue* obj) {
- SetOperandAt(0, obj);
- }
-};
-
-
-class HMaybeGrowElements final : public HTemplateInstruction<5> {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P6(HMaybeGrowElements, HValue*,
- HValue*, HValue*, HValue*, bool,
- ElementsKind);
-
- Representation RequiredInputRepresentation(int index) override {
- if (index < 3) {
- return Representation::Tagged();
- }
- DCHECK(index == 3 || index == 4);
- return Representation::Integer32();
- }
-
- HValue* context() const { return OperandAt(0); }
- HValue* object() const { return OperandAt(1); }
- HValue* elements() const { return OperandAt(2); }
- HValue* key() const { return OperandAt(3); }
- HValue* current_capacity() const { return OperandAt(4); }
-
- bool is_js_array() const { return is_js_array_; }
- ElementsKind kind() const { return kind_; }
-
- DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements)
-
- protected:
- bool DataEquals(HValue* other) override { return true; }
-
- private:
- explicit HMaybeGrowElements(HValue* context, HValue* object, HValue* elements,
- HValue* key, HValue* current_capacity,
- bool is_js_array, ElementsKind kind) {
- is_js_array_ = is_js_array;
- kind_ = kind;
-
- SetOperandAt(0, context);
- SetOperandAt(1, object);
- SetOperandAt(2, elements);
- SetOperandAt(3, key);
- SetOperandAt(4, current_capacity);
-
- SetFlag(kUseGVN);
- SetChangesFlag(kElementsPointer);
- SetChangesFlag(kNewSpacePromotion);
- set_representation(Representation::Tagged());
- }
-
- bool is_js_array_;
- ElementsKind kind_;
-};
-
-
-class HSeqStringGetChar final : public HTemplateInstruction<2> {
- public:
- static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
- String::Encoding encoding, HValue* string,
- HValue* index);
-
- Representation RequiredInputRepresentation(int index) override {
- return (index == 0) ? Representation::Tagged()
- : Representation::Integer32();
- }
-
- String::Encoding encoding() const { return encoding_; }
- HValue* string() const { return OperandAt(0); }
- HValue* index() const { return OperandAt(1); }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar)
-
- protected:
- bool DataEquals(HValue* other) override {
- return encoding() == HSeqStringGetChar::cast(other)->encoding();
- }
-
- Range* InferRange(Zone* zone) override {
- if (encoding() == String::ONE_BYTE_ENCODING) {
- return new(zone) Range(0, String::kMaxOneByteCharCode);
- } else {
- DCHECK_EQ(String::TWO_BYTE_ENCODING, encoding());
- return new(zone) Range(0, String::kMaxUtf16CodeUnit);
- }
- }
-
- private:
- HSeqStringGetChar(String::Encoding encoding,
- HValue* string,
- HValue* index) : encoding_(encoding) {
- SetOperandAt(0, string);
- SetOperandAt(1, index);
- set_representation(Representation::Integer32());
- SetFlag(kUseGVN);
- SetDependsOnFlag(kStringChars);
- }
-
- bool IsDeletable() const override { return true; }
-
- String::Encoding encoding_;
-};
-
-
-class HSeqStringSetChar final : public HTemplateInstruction<4> {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(
- HSeqStringSetChar, String::Encoding,
- HValue*, HValue*, HValue*);
-
- String::Encoding encoding() { return encoding_; }
- HValue* context() { return OperandAt(0); }
- HValue* string() { return OperandAt(1); }
- HValue* index() { return OperandAt(2); }
- HValue* value() { return OperandAt(3); }
-
- Representation RequiredInputRepresentation(int index) override {
- return (index <= 1) ? Representation::Tagged()
- : Representation::Integer32();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar)
-
- private:
- HSeqStringSetChar(HValue* context,
- String::Encoding encoding,
- HValue* string,
- HValue* index,
- HValue* value) : encoding_(encoding) {
- SetOperandAt(0, context);
- SetOperandAt(1, string);
- SetOperandAt(2, index);
- SetOperandAt(3, value);
- set_representation(Representation::Tagged());
- SetChangesFlag(kStringChars);
- }
-
- String::Encoding encoding_;
-};
-
-
-class HCheckMapValue final : public HTemplateInstruction<2> {
- public:
- DECLARE_INSTRUCTION_FACTORY_P2(HCheckMapValue, HValue*, HValue*);
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- HType CalculateInferredType() override {
- if (value()->type().IsHeapObject()) return value()->type();
- return HType::HeapObject();
- }
-
- HValue* value() const { return OperandAt(0); }
- HValue* map() const { return OperandAt(1); }
-
- HValue* Canonicalize() override;
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMapValue)
-
- protected:
- int RedefinedOperandIndex() override { return 0; }
-
- bool DataEquals(HValue* other) override { return true; }
-
- private:
- HCheckMapValue(HValue* value, HValue* map)
- : HTemplateInstruction<2>(HType::HeapObject()) {
- SetOperandAt(0, value);
- SetOperandAt(1, map);
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetDependsOnFlag(kMaps);
- SetDependsOnFlag(kElementsKind);
- }
-};
-
-
-class HForInPrepareMap final : public HTemplateInstruction<2> {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HForInPrepareMap, HValue*);
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- HValue* context() const { return OperandAt(0); }
- HValue* enumerable() const { return OperandAt(1); }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- HType CalculateInferredType() override { return HType::Tagged(); }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap);
-
- private:
- HForInPrepareMap(HValue* context,
- HValue* object) {
- SetOperandAt(0, context);
- SetOperandAt(1, object);
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
-};
-
-
-class HForInCacheArray final : public HTemplateInstruction<2> {
- public:
- DECLARE_INSTRUCTION_FACTORY_P3(HForInCacheArray, HValue*, HValue*, int);
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- HValue* enumerable() const { return OperandAt(0); }
- HValue* map() const { return OperandAt(1); }
- int idx() const { return idx_; }
-
- HForInCacheArray* index_cache() {
- return index_cache_;
- }
-
- void set_index_cache(HForInCacheArray* index_cache) {
- index_cache_ = index_cache;
- }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- HType CalculateInferredType() override { return HType::Tagged(); }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray);
-
- private:
- HForInCacheArray(HValue* enumerable,
- HValue* keys,
- int idx) : idx_(idx) {
- SetOperandAt(0, enumerable);
- SetOperandAt(1, keys);
- set_representation(Representation::Tagged());
- }
-
- int idx_;
- HForInCacheArray* index_cache_;
-};
-
-
-class HLoadFieldByIndex final : public HTemplateInstruction<2> {
- public:
- DECLARE_INSTRUCTION_FACTORY_P2(HLoadFieldByIndex, HValue*, HValue*);
-
- HLoadFieldByIndex(HValue* object,
- HValue* index) {
- SetOperandAt(0, object);
- SetOperandAt(1, index);
- SetChangesFlag(kNewSpacePromotion);
- set_representation(Representation::Tagged());
- }
-
- Representation RequiredInputRepresentation(int index) override {
- if (index == 1) {
- return Representation::Smi();
- } else {
- return Representation::Tagged();
- }
- }
-
- HValue* object() const { return OperandAt(0); }
- HValue* index() const { return OperandAt(1); }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- HType CalculateInferredType() override { return HType::Tagged(); }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex);
-
- private:
- bool IsDeletable() const override { return true; }
-};
-
-#undef DECLARE_INSTRUCTION
-#undef DECLARE_CONCRETE_INSTRUCTION
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_HYDROGEN_INSTRUCTIONS_H_
diff --git a/deps/v8/src/crankshaft/hydrogen-load-elimination.cc b/deps/v8/src/crankshaft/hydrogen-load-elimination.cc
deleted file mode 100644
index 99f4947a84..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-load-elimination.cc
+++ /dev/null
@@ -1,512 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/hydrogen-load-elimination.h"
-
-#include "src/crankshaft/hydrogen-alias-analysis.h"
-#include "src/crankshaft/hydrogen-flow-engine.h"
-#include "src/crankshaft/hydrogen-instructions.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define GLOBAL true
-#define TRACE(x) if (FLAG_trace_load_elimination) PrintF x
-
-static const int kMaxTrackedFields = 16;
-static const int kMaxTrackedObjects = 5;
-
-// An element in the field approximation list.
-class HFieldApproximation : public ZoneObject {
- public: // Just a data blob.
- HValue* object_;
- HValue* last_value_;
- HFieldApproximation* next_;
-
- // Recursively copy the entire linked list of field approximations.
- HFieldApproximation* Copy(Zone* zone) {
- HFieldApproximation* copy = new(zone) HFieldApproximation();
- copy->object_ = this->object_;
- copy->last_value_ = this->last_value_;
- copy->next_ = this->next_ == NULL ? NULL : this->next_->Copy(zone);
- return copy;
- }
-};
-
-
-// The main datastructure used during load/store elimination. Each in-object
-// field is tracked separately. For each field, store a list of known field
-// values for known objects.
-class HLoadEliminationTable : public ZoneObject {
- public:
- HLoadEliminationTable(Zone* zone, HAliasAnalyzer* aliasing)
- : zone_(zone), fields_(kMaxTrackedFields, zone), aliasing_(aliasing) { }
-
- // The main processing of instructions.
- HLoadEliminationTable* Process(HInstruction* instr, Zone* zone) {
- switch (instr->opcode()) {
- case HValue::kLoadNamedField: {
- HLoadNamedField* l = HLoadNamedField::cast(instr);
- TRACE((" process L%d field %d (o%d)\n",
- instr->id(),
- FieldOf(l->access()),
- l->object()->ActualValue()->id()));
- HValue* result = load(l);
- if (result != instr && l->CanBeReplacedWith(result)) {
- // The load can be replaced with a previous load or a value.
- TRACE((" replace L%d -> v%d\n", instr->id(), result->id()));
- instr->DeleteAndReplaceWith(result);
- }
- break;
- }
- case HValue::kStoreNamedField: {
- HStoreNamedField* s = HStoreNamedField::cast(instr);
- TRACE((" process S%d field %d (o%d) = v%d\n",
- instr->id(),
- FieldOf(s->access()),
- s->object()->ActualValue()->id(),
- s->value()->id()));
- HValue* result = store(s);
- if (result == NULL) {
- // The store is redundant. Remove it.
- TRACE((" remove S%d\n", instr->id()));
- instr->DeleteAndReplaceWith(NULL);
- }
- break;
- }
- case HValue::kTransitionElementsKind: {
- HTransitionElementsKind* t = HTransitionElementsKind::cast(instr);
- HValue* object = t->object()->ActualValue();
- KillFieldInternal(object, FieldOf(JSArray::kElementsOffset), NULL);
- KillFieldInternal(object, FieldOf(JSObject::kMapOffset), NULL);
- break;
- }
- default: {
- if (instr->CheckChangesFlag(kInobjectFields)) {
- TRACE((" kill-all i%d\n", instr->id()));
- Kill();
- break;
- }
- if (instr->CheckChangesFlag(kMaps)) {
- TRACE((" kill-maps i%d\n", instr->id()));
- KillOffset(JSObject::kMapOffset);
- }
- if (instr->CheckChangesFlag(kElementsKind)) {
- TRACE((" kill-elements-kind i%d\n", instr->id()));
- KillOffset(JSObject::kMapOffset);
- KillOffset(JSObject::kElementsOffset);
- }
- if (instr->CheckChangesFlag(kElementsPointer)) {
- TRACE((" kill-elements i%d\n", instr->id()));
- KillOffset(JSObject::kElementsOffset);
- }
- if (instr->CheckChangesFlag(kOsrEntries)) {
- TRACE((" kill-osr i%d\n", instr->id()));
- Kill();
- }
- }
- // Improvements possible:
- // - learn from HCheckMaps for field 0
- // - remove unobservable stores (write-after-write)
- // - track cells
- // - track globals
- // - track roots
- }
- return this;
- }
-
- // Support for global analysis with HFlowEngine: Merge given state with
- // the other incoming state.
- static HLoadEliminationTable* Merge(HLoadEliminationTable* succ_state,
- HBasicBlock* succ_block,
- HLoadEliminationTable* pred_state,
- HBasicBlock* pred_block,
- Zone* zone) {
- DCHECK(pred_state != NULL);
- if (succ_state == NULL) {
- return pred_state->Copy(succ_block, pred_block, zone);
- } else {
- return succ_state->Merge(succ_block, pred_state, pred_block, zone);
- }
- }
-
- // Support for global analysis with HFlowEngine: Given state merged with all
- // the other incoming states, prepare it for use.
- static HLoadEliminationTable* Finish(HLoadEliminationTable* state,
- HBasicBlock* block,
- Zone* zone) {
- DCHECK(state != NULL);
- return state;
- }
-
- private:
- // Copy state to successor block.
- HLoadEliminationTable* Copy(HBasicBlock* succ, HBasicBlock* from_block,
- Zone* zone) {
- HLoadEliminationTable* copy =
- new(zone) HLoadEliminationTable(zone, aliasing_);
- copy->EnsureFields(fields_.length());
- for (int i = 0; i < fields_.length(); i++) {
- copy->fields_[i] = fields_[i] == NULL ? NULL : fields_[i]->Copy(zone);
- }
- if (FLAG_trace_load_elimination) {
- TRACE((" copy-to B%d\n", succ->block_id()));
- copy->Print();
- }
- return copy;
- }
-
- // Merge this state with the other incoming state.
- HLoadEliminationTable* Merge(HBasicBlock* succ, HLoadEliminationTable* that,
- HBasicBlock* that_block, Zone* zone) {
- if (that->fields_.length() < fields_.length()) {
- // Drop fields not in the other table.
- fields_.Rewind(that->fields_.length());
- }
- for (int i = 0; i < fields_.length(); i++) {
- // Merge the field approximations for like fields.
- HFieldApproximation* approx = fields_[i];
- HFieldApproximation* prev = NULL;
- while (approx != NULL) {
- // TODO(titzer): Merging is O(N * M); sort?
- HFieldApproximation* other = that->Find(approx->object_, i);
- if (other == NULL || !Equal(approx->last_value_, other->last_value_)) {
- // Kill an entry that doesn't agree with the other value.
- if (prev != NULL) {
- prev->next_ = approx->next_;
- } else {
- fields_[i] = approx->next_;
- }
- approx = approx->next_;
- continue;
- }
- prev = approx;
- approx = approx->next_;
- }
- }
- if (FLAG_trace_load_elimination) {
- TRACE((" merge-to B%d\n", succ->block_id()));
- Print();
- }
- return this;
- }
-
- friend class HLoadEliminationEffects; // Calls Kill() and others.
- friend class HLoadEliminationPhase;
-
- private:
- // Process a load instruction, updating internal table state. If a previous
- // load or store for this object and field exists, return the new value with
- // which the load should be replaced. Otherwise, return {instr}.
- HValue* load(HLoadNamedField* instr) {
- // There must be no loads from non observable in-object properties.
- DCHECK(!instr->access().IsInobject() ||
- instr->access().existing_inobject_property());
-
- int field = FieldOf(instr->access());
- if (field < 0) return instr;
-
- HValue* object = instr->object()->ActualValue();
- HFieldApproximation* approx = FindOrCreate(object, field);
-
- if (approx->last_value_ == NULL) {
- // Load is not redundant. Fill out a new entry.
- approx->last_value_ = instr;
- return instr;
- } else if (approx->last_value_->block()->EqualToOrDominates(
- instr->block())) {
- // Eliminate the load. Reuse previously stored value or load instruction.
- return approx->last_value_;
- } else {
- return instr;
- }
- }
-
- // Process a store instruction, updating internal table state. If a previous
- // store to the same object and field makes this store redundant (e.g. because
- // the stored values are the same), return NULL indicating that this store
- // instruction is redundant. Otherwise, return {instr}.
- HValue* store(HStoreNamedField* instr) {
- if (instr->access().IsInobject() &&
- !instr->access().existing_inobject_property()) {
- TRACE((" skipping non existing property initialization store\n"));
- return instr;
- }
-
- int field = FieldOf(instr->access());
- if (field < 0) return KillIfMisaligned(instr);
-
- HValue* object = instr->object()->ActualValue();
- HValue* value = instr->value();
-
- if (instr->has_transition()) {
- // A transition introduces a new field and alters the map of the object.
- // Since the field in the object is new, it cannot alias existing entries.
- KillFieldInternal(object, FieldOf(JSObject::kMapOffset), NULL);
- } else {
- // Kill non-equivalent may-alias entries.
- KillFieldInternal(object, field, value);
- }
- HFieldApproximation* approx = FindOrCreate(object, field);
-
- if (Equal(approx->last_value_, value)) {
- // The store is redundant because the field already has this value.
- return NULL;
- } else {
- // The store is not redundant. Update the entry.
- approx->last_value_ = value;
- return instr;
- }
- }
-
- // Kill everything in this table.
- void Kill() {
- fields_.Rewind(0);
- }
-
- // Kill all entries matching the given offset.
- void KillOffset(int offset) {
- int field = FieldOf(offset);
- if (field >= 0 && field < fields_.length()) {
- fields_[field] = NULL;
- }
- }
-
- // Kill all entries aliasing the given store.
- void KillStore(HStoreNamedField* s) {
- int field = FieldOf(s->access());
- if (field >= 0) {
- KillFieldInternal(s->object()->ActualValue(), field, s->value());
- } else {
- KillIfMisaligned(s);
- }
- }
-
- // Kill multiple entries in the case of a misaligned store.
- HValue* KillIfMisaligned(HStoreNamedField* instr) {
- HObjectAccess access = instr->access();
- if (access.IsInobject()) {
- int offset = access.offset();
- if ((offset % kPointerSize) != 0) {
- // Kill the field containing the first word of the access.
- HValue* object = instr->object()->ActualValue();
- int field = offset / kPointerSize;
- KillFieldInternal(object, field, NULL);
-
- // Kill the next field in case of overlap.
- int size = access.representation().size();
- int next_field = (offset + size - 1) / kPointerSize;
- if (next_field != field) KillFieldInternal(object, next_field, NULL);
- }
- }
- return instr;
- }
-
- // Find an entry for the given object and field pair.
- HFieldApproximation* Find(HValue* object, int field) {
- // Search for a field approximation for this object.
- HFieldApproximation* approx = fields_[field];
- while (approx != NULL) {
- if (aliasing_->MustAlias(object, approx->object_)) return approx;
- approx = approx->next_;
- }
- return NULL;
- }
-
- // Find or create an entry for the given object and field pair.
- HFieldApproximation* FindOrCreate(HValue* object, int field) {
- EnsureFields(field + 1);
-
- // Search for a field approximation for this object.
- HFieldApproximation* approx = fields_[field];
- int count = 0;
- while (approx != NULL) {
- if (aliasing_->MustAlias(object, approx->object_)) return approx;
- count++;
- approx = approx->next_;
- }
-
- if (count >= kMaxTrackedObjects) {
- // Pull the last entry off the end and repurpose it for this object.
- approx = ReuseLastApproximation(field);
- } else {
- // Allocate a new entry.
- approx = new(zone_) HFieldApproximation();
- }
-
- // Insert the entry at the head of the list.
- approx->object_ = object;
- approx->last_value_ = NULL;
- approx->next_ = fields_[field];
- fields_[field] = approx;
-
- return approx;
- }
-
- // Kill all entries for a given field that _may_ alias the given object
- // and do _not_ have the given value.
- void KillFieldInternal(HValue* object, int field, HValue* value) {
- if (field >= fields_.length()) return; // Nothing to do.
-
- HFieldApproximation* approx = fields_[field];
- HFieldApproximation* prev = NULL;
- while (approx != NULL) {
- if (aliasing_->MayAlias(object, approx->object_)) {
- if (!Equal(approx->last_value_, value)) {
- // Kill an aliasing entry that doesn't agree on the value.
- if (prev != NULL) {
- prev->next_ = approx->next_;
- } else {
- fields_[field] = approx->next_;
- }
- approx = approx->next_;
- continue;
- }
- }
- prev = approx;
- approx = approx->next_;
- }
- }
-
- bool Equal(HValue* a, HValue* b) {
- if (a == b) return true;
- if (a != NULL && b != NULL && a->CheckFlag(HValue::kUseGVN)) {
- return a->Equals(b);
- }
- return false;
- }
-
- // Remove the last approximation for a field so that it can be reused.
- // We reuse the last entry because it was the first inserted and is thus
- // farthest away from the current instruction.
- HFieldApproximation* ReuseLastApproximation(int field) {
- HFieldApproximation* approx = fields_[field];
- DCHECK(approx != NULL);
-
- HFieldApproximation* prev = NULL;
- while (approx->next_ != NULL) {
- prev = approx;
- approx = approx->next_;
- }
- if (prev != NULL) prev->next_ = NULL;
- return approx;
- }
-
- // Compute the field index for the given object access; -1 if not tracked.
- int FieldOf(HObjectAccess access) {
- return access.IsInobject() ? FieldOf(access.offset()) : -1;
- }
-
- // Compute the field index for the given in-object offset; -1 if not tracked.
- int FieldOf(int offset) {
- if (offset >= kMaxTrackedFields * kPointerSize) return -1;
- if ((offset % kPointerSize) != 0) return -1; // Ignore misaligned accesses.
- return offset / kPointerSize;
- }
-
- // Ensure internal storage for the given number of fields.
- void EnsureFields(int num_fields) {
- if (fields_.length() < num_fields) {
- fields_.AddBlock(NULL, num_fields - fields_.length(), zone_);
- }
- }
-
- // Print this table to stdout.
- void Print() {
- for (int i = 0; i < fields_.length(); i++) {
- PrintF(" field %d: ", i);
- for (HFieldApproximation* a = fields_[i]; a != NULL; a = a->next_) {
- PrintF("[o%d =", a->object_->id());
- if (a->last_value_ != NULL) PrintF(" v%d", a->last_value_->id());
- PrintF("] ");
- }
- PrintF("\n");
- }
- }
-
- Zone* zone_;
- ZoneList<HFieldApproximation*> fields_;
- HAliasAnalyzer* aliasing_;
-};
-
-
-// Support for HFlowEngine: collect store effects within loops.
-class HLoadEliminationEffects : public ZoneObject {
- public:
- explicit HLoadEliminationEffects(Zone* zone)
- : zone_(zone), stores_(5, zone) { }
-
- inline bool Disabled() {
- return false; // Effects are _not_ disabled.
- }
-
- // Process a possibly side-effecting instruction.
- void Process(HInstruction* instr, Zone* zone) {
- if (instr->IsStoreNamedField()) {
- stores_.Add(HStoreNamedField::cast(instr), zone_);
- } else {
- flags_.Add(instr->ChangesFlags());
- }
- }
-
- // Apply these effects to the given load elimination table.
- void Apply(HLoadEliminationTable* table) {
- // Loads must not be hoisted past the OSR entry, therefore we kill
- // everything if we see an OSR entry.
- if (flags_.Contains(kInobjectFields) || flags_.Contains(kOsrEntries)) {
- table->Kill();
- return;
- }
- if (flags_.Contains(kElementsKind) || flags_.Contains(kMaps)) {
- table->KillOffset(JSObject::kMapOffset);
- }
- if (flags_.Contains(kElementsKind) || flags_.Contains(kElementsPointer)) {
- table->KillOffset(JSObject::kElementsOffset);
- }
-
- // Kill non-agreeing fields for each store contained in these effects.
- for (int i = 0; i < stores_.length(); i++) {
- table->KillStore(stores_[i]);
- }
- }
-
- // Union these effects with the other effects.
- void Union(HLoadEliminationEffects* that, Zone* zone) {
- flags_.Add(that->flags_);
- for (int i = 0; i < that->stores_.length(); i++) {
- stores_.Add(that->stores_[i], zone);
- }
- }
-
- private:
- Zone* zone_;
- GVNFlagSet flags_;
- ZoneList<HStoreNamedField*> stores_;
-};
-
-
-// The main routine of the analysis phase. Use the HFlowEngine for either a
-// local or a global analysis.
-void HLoadEliminationPhase::Run() {
- HFlowEngine<HLoadEliminationTable, HLoadEliminationEffects>
- engine(graph(), zone());
- HAliasAnalyzer aliasing;
- HLoadEliminationTable* table =
- new(zone()) HLoadEliminationTable(zone(), &aliasing);
-
- if (GLOBAL) {
- // Perform a global analysis.
- engine.AnalyzeDominatedBlocks(graph()->blocks()->at(0), table);
- } else {
- // Perform only local analysis.
- for (int i = 0; i < graph()->blocks()->length(); i++) {
- table->Kill();
- engine.AnalyzeOneBlock(graph()->blocks()->at(i), table);
- }
- }
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/hydrogen-load-elimination.h b/deps/v8/src/crankshaft/hydrogen-load-elimination.h
deleted file mode 100644
index e5656459c9..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-load-elimination.h
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_HYDROGEN_LOAD_ELIMINATION_H_
-#define V8_CRANKSHAFT_HYDROGEN_LOAD_ELIMINATION_H_
-
-#include "src/crankshaft/hydrogen.h"
-
-namespace v8 {
-namespace internal {
-
-class HLoadEliminationPhase : public HPhase {
- public:
- explicit HLoadEliminationPhase(HGraph* graph)
- : HPhase("H_Load elimination", graph) { }
-
- void Run();
-
- private:
- void EliminateLoads(HBasicBlock* block);
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_HYDROGEN_LOAD_ELIMINATION_H_
diff --git a/deps/v8/src/crankshaft/hydrogen-mark-unreachable.cc b/deps/v8/src/crankshaft/hydrogen-mark-unreachable.cc
deleted file mode 100644
index 2393b5a8a4..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-mark-unreachable.cc
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/hydrogen-mark-unreachable.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-void HMarkUnreachableBlocksPhase::MarkUnreachableBlocks() {
- // If there is unreachable code in the graph, propagate the unreachable marks
- // using a fixed-point iteration.
- bool changed = true;
- const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
- while (changed) {
- changed = false;
- for (int i = 0; i < blocks->length(); i++) {
- HBasicBlock* block = blocks->at(i);
- if (!block->IsReachable()) continue;
- bool is_reachable = blocks->at(0) == block;
- for (HPredecessorIterator it(block); !it.Done(); it.Advance()) {
- HBasicBlock* predecessor = it.Current();
- // A block is reachable if one of its predecessors is reachable,
- // doesn't deoptimize and either is known to transfer control to the
- // block or has a control flow instruction for which the next block
- // cannot be determined.
- if (predecessor->IsReachable() && !predecessor->IsDeoptimizing()) {
- HBasicBlock* pred_succ;
- bool known_pred_succ =
- predecessor->end()->KnownSuccessorBlock(&pred_succ);
- if (!known_pred_succ || pred_succ == block) {
- is_reachable = true;
- break;
- }
- }
- if (block->is_osr_entry()) {
- is_reachable = true;
- }
- }
- if (!is_reachable) {
- block->MarkUnreachable();
- changed = true;
- }
- }
- }
-}
-
-
-void HMarkUnreachableBlocksPhase::Run() {
- MarkUnreachableBlocks();
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/hydrogen-mark-unreachable.h b/deps/v8/src/crankshaft/hydrogen-mark-unreachable.h
deleted file mode 100644
index 1243b1fcbe..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-mark-unreachable.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_HYDROGEN_MARK_UNREACHABLE_H_
-#define V8_CRANKSHAFT_HYDROGEN_MARK_UNREACHABLE_H_
-
-#include "src/crankshaft/hydrogen.h"
-
-namespace v8 {
-namespace internal {
-
-
-class HMarkUnreachableBlocksPhase : public HPhase {
- public:
- explicit HMarkUnreachableBlocksPhase(HGraph* graph)
- : HPhase("H_Mark unreachable blocks", graph) { }
-
- void Run();
-
- private:
- void MarkUnreachableBlocks();
-
- DISALLOW_COPY_AND_ASSIGN(HMarkUnreachableBlocksPhase);
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_HYDROGEN_MARK_UNREACHABLE_H_
diff --git a/deps/v8/src/crankshaft/hydrogen-osr.cc b/deps/v8/src/crankshaft/hydrogen-osr.cc
deleted file mode 100644
index 093f94b83f..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-osr.cc
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/hydrogen-osr.h"
-
-#include "src/crankshaft/hydrogen.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// True iff. we are compiling for OSR and the statement is the entry.
-bool HOsrBuilder::HasOsrEntryAt(IterationStatement* statement) {
- return statement->OsrEntryId() == builder_->current_info()->osr_ast_id();
-}
-
-
-HBasicBlock* HOsrBuilder::BuildOsrLoopEntry(IterationStatement* statement) {
- DCHECK(HasOsrEntryAt(statement));
-
- Zone* zone = builder_->zone();
- HGraph* graph = builder_->graph();
-
- // only one OSR point per compile is allowed.
- DCHECK(graph->osr() == NULL);
-
- // remember this builder as the one OSR builder in the graph.
- graph->set_osr(this);
-
- HBasicBlock* non_osr_entry = graph->CreateBasicBlock();
- osr_entry_ = graph->CreateBasicBlock();
- HValue* true_value = graph->GetConstantTrue();
- HBranch* test = builder_->New<HBranch>(true_value, ToBooleanHint::kNone,
- non_osr_entry, osr_entry_);
- builder_->FinishCurrentBlock(test);
-
- HBasicBlock* loop_predecessor = graph->CreateBasicBlock();
- builder_->Goto(non_osr_entry, loop_predecessor);
-
- builder_->set_current_block(osr_entry_);
- osr_entry_->set_osr_entry();
- BailoutId osr_entry_id = statement->OsrEntryId();
-
- HEnvironment *environment = builder_->environment();
- int first_expression_index = environment->first_expression_index();
- int length = environment->length();
- osr_values_ = new(zone) ZoneList<HUnknownOSRValue*>(length, zone);
-
- for (int i = 0; i < first_expression_index; ++i) {
- HUnknownOSRValue* osr_value
- = builder_->Add<HUnknownOSRValue>(environment, i);
- environment->Bind(i, osr_value);
- osr_values_->Add(osr_value, zone);
- }
-
- if (first_expression_index != length) {
- environment->Drop(length - first_expression_index);
- for (int i = first_expression_index; i < length; ++i) {
- HUnknownOSRValue* osr_value
- = builder_->Add<HUnknownOSRValue>(environment, i);
- environment->Push(osr_value);
- osr_values_->Add(osr_value, zone);
- }
- }
-
- unoptimized_frame_slots_ =
- environment->local_count() + environment->push_count();
-
- // Keep a copy of the old environment, since the OSR values need it
- // to figure out where exactly they are located in the unoptimized frame.
- environment = environment->Copy();
- builder_->current_block()->UpdateEnvironment(environment);
-
- builder_->Add<HSimulate>(osr_entry_id);
- builder_->Add<HOsrEntry>(osr_entry_id);
- HContext* context = builder_->Add<HContext>();
- environment->BindContext(context);
- builder_->Goto(loop_predecessor);
- loop_predecessor->SetJoinId(statement->EntryId());
- builder_->set_current_block(loop_predecessor);
-
- // Create the final loop entry
- osr_loop_entry_ = builder_->BuildLoopEntry();
- return osr_loop_entry_;
-}
-
-
-void HOsrBuilder::FinishGraph() {
- // do nothing for now.
-}
-
-
-void HOsrBuilder::FinishOsrValues() {
- const ZoneList<HPhi*>* phis = osr_loop_entry_->phis();
- for (int j = 0; j < phis->length(); j++) {
- HPhi* phi = phis->at(j);
- if (phi->HasMergedIndex()) {
- osr_values_->at(phi->merged_index())->set_incoming_value(phi);
- }
- }
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/hydrogen-osr.h b/deps/v8/src/crankshaft/hydrogen-osr.h
deleted file mode 100644
index 3bd9b6edad..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-osr.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_HYDROGEN_OSR_H_
-#define V8_CRANKSHAFT_HYDROGEN_OSR_H_
-
-#include "src/crankshaft/hydrogen.h"
-#include "src/zone/zone.h"
-
-namespace v8 {
-namespace internal {
-
-class IterationStatement;
-
-// Responsible for building graph parts related to OSR and otherwise
-// setting up the graph to do an OSR compile.
-class HOsrBuilder : public ZoneObject {
- public:
- explicit HOsrBuilder(HOptimizedGraphBuilder* builder)
- : unoptimized_frame_slots_(0),
- builder_(builder),
- osr_entry_(NULL),
- osr_loop_entry_(NULL),
- osr_values_(NULL) { }
-
- // Creates the loop entry block for the given statement, setting up OSR
- // entries as necessary, and sets the current block to the new block.
- HBasicBlock* BuildOsrLoopEntry(IterationStatement* statement);
-
- // Process the hydrogen graph after it has been completed, performing
- // any OSR-specific cleanups or changes.
- void FinishGraph();
-
- // Process the OSR values and phis after initial graph optimization.
- void FinishOsrValues();
-
- // Return the number of slots in the unoptimized frame at the entry to OSR.
- int UnoptimizedFrameSlots() const {
- return unoptimized_frame_slots_;
- }
-
- bool HasOsrEntryAt(IterationStatement* statement);
-
- private:
- int unoptimized_frame_slots_;
- HOptimizedGraphBuilder* builder_;
- HBasicBlock* osr_entry_;
- HBasicBlock* osr_loop_entry_;
- ZoneList<HUnknownOSRValue*>* osr_values_;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_HYDROGEN_OSR_H_
diff --git a/deps/v8/src/crankshaft/hydrogen-range-analysis.cc b/deps/v8/src/crankshaft/hydrogen-range-analysis.cc
deleted file mode 100644
index 50592d32ca..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-range-analysis.cc
+++ /dev/null
@@ -1,286 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/hydrogen-range-analysis.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-class Pending {
- public:
- Pending(HBasicBlock* block, int last_changed_range)
- : block_(block), last_changed_range_(last_changed_range) {}
-
- HBasicBlock* block() const { return block_; }
- int last_changed_range() const { return last_changed_range_; }
-
- private:
- HBasicBlock* block_;
- int last_changed_range_;
-};
-
-
-void HRangeAnalysisPhase::TraceRange(const char* msg, ...) {
- if (FLAG_trace_range) {
- va_list arguments;
- va_start(arguments, msg);
- base::OS::VPrint(msg, arguments);
- va_end(arguments);
- }
-}
-
-
-void HRangeAnalysisPhase::Run() {
- HBasicBlock* block(graph()->entry_block());
- ZoneList<Pending> stack(graph()->blocks()->length(), zone());
- while (block != NULL) {
- TraceRange("Analyzing block B%d\n", block->block_id());
-
- // Infer range based on control flow.
- if (block->predecessors()->length() == 1) {
- HBasicBlock* pred = block->predecessors()->first();
- if (pred->end()->IsCompareNumericAndBranch()) {
- InferControlFlowRange(HCompareNumericAndBranch::cast(pred->end()),
- block);
- }
- }
-
- // Process phi instructions.
- for (int i = 0; i < block->phis()->length(); ++i) {
- HPhi* phi = block->phis()->at(i);
- InferRange(phi);
- }
-
- // Go through all instructions of the current block.
- for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- HValue* value = it.Current();
- InferRange(value);
-
- // Compute the bailout-on-minus-zero flag.
- if (value->IsChange()) {
- HChange* instr = HChange::cast(value);
- // Propagate flags for negative zero checks upwards from conversions
- // int32-to-tagged and int32-to-double.
- Representation from = instr->value()->representation();
- DCHECK(from.Equals(instr->from()));
- if (from.IsSmiOrInteger32()) {
- DCHECK(instr->to().IsTagged() ||
- instr->to().IsDouble() ||
- instr->to().IsSmiOrInteger32());
- PropagateMinusZeroChecks(instr->value());
- }
- }
- }
-
- // Continue analysis in all dominated blocks.
- const ZoneList<HBasicBlock*>* dominated_blocks(block->dominated_blocks());
- if (!dominated_blocks->is_empty()) {
- // Continue with first dominated block, and push the
- // remaining blocks on the stack (in reverse order).
- int last_changed_range = changed_ranges_.length();
- for (int i = dominated_blocks->length() - 1; i > 0; --i) {
- stack.Add(Pending(dominated_blocks->at(i), last_changed_range), zone());
- }
- block = dominated_blocks->at(0);
- } else if (!stack.is_empty()) {
- // Pop next pending block from stack.
- Pending pending = stack.RemoveLast();
- RollBackTo(pending.last_changed_range());
- block = pending.block();
- } else {
- // All blocks done.
- block = NULL;
- }
- }
-
- // The ranges are not valid anymore due to SSI vs. SSA!
- PoisonRanges();
-}
-
-
-void HRangeAnalysisPhase::PoisonRanges() {
-#ifdef DEBUG
- for (int i = 0; i < graph()->blocks()->length(); ++i) {
- HBasicBlock* block = graph()->blocks()->at(i);
- for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- HInstruction* instr = it.Current();
- if (instr->HasRange()) instr->PoisonRange();
- }
- }
-#endif
-}
-
-
-void HRangeAnalysisPhase::InferControlFlowRange(HCompareNumericAndBranch* test,
- HBasicBlock* dest) {
- DCHECK((test->FirstSuccessor() == dest) == (test->SecondSuccessor() != dest));
- if (test->representation().IsSmiOrInteger32()) {
- Token::Value op = test->token();
- if (test->SecondSuccessor() == dest) {
- op = Token::NegateCompareOp(op);
- }
- Token::Value inverted_op = Token::ReverseCompareOp(op);
- UpdateControlFlowRange(op, test->left(), test->right());
- UpdateControlFlowRange(inverted_op, test->right(), test->left());
- }
-}
-
-
-// We know that value [op] other. Use this information to update the range on
-// value.
-void HRangeAnalysisPhase::UpdateControlFlowRange(Token::Value op,
- HValue* value,
- HValue* other) {
- Range temp_range;
- Range* range = other->range() != NULL ? other->range() : &temp_range;
- Range* new_range = NULL;
-
- TraceRange("Control flow range infer %d %s %d\n",
- value->id(),
- Token::Name(op),
- other->id());
-
- if (op == Token::EQ || op == Token::EQ_STRICT) {
- // The same range has to apply for value.
- new_range = range->Copy(graph()->zone());
- } else if (op == Token::LT || op == Token::LTE) {
- new_range = range->CopyClearLower(graph()->zone());
- if (op == Token::LT) {
- new_range->AddConstant(-1);
- }
- } else if (op == Token::GT || op == Token::GTE) {
- new_range = range->CopyClearUpper(graph()->zone());
- if (op == Token::GT) {
- new_range->AddConstant(1);
- }
- }
-
- if (new_range != NULL && !new_range->IsMostGeneric()) {
- AddRange(value, new_range);
- }
-}
-
-
-void HRangeAnalysisPhase::InferRange(HValue* value) {
- DCHECK(!value->HasRange());
- if (!value->representation().IsNone()) {
- value->ComputeInitialRange(graph()->zone());
- Range* range = value->range();
- TraceRange("Initial inferred range of %d (%s) set to [%d,%d]\n",
- value->id(),
- value->Mnemonic(),
- range->lower(),
- range->upper());
- }
-}
-
-
-void HRangeAnalysisPhase::RollBackTo(int index) {
- DCHECK(index <= changed_ranges_.length());
- for (int i = index; i < changed_ranges_.length(); ++i) {
- changed_ranges_[i]->RemoveLastAddedRange();
- }
- changed_ranges_.Rewind(index);
-}
-
-
-void HRangeAnalysisPhase::AddRange(HValue* value, Range* range) {
- Range* original_range = value->range();
- value->AddNewRange(range, graph()->zone());
- changed_ranges_.Add(value, zone());
- Range* new_range = value->range();
- TraceRange("Updated range of %d set to [%d,%d]\n",
- value->id(),
- new_range->lower(),
- new_range->upper());
- if (original_range != NULL) {
- TraceRange("Original range was [%d,%d]\n",
- original_range->lower(),
- original_range->upper());
- }
- TraceRange("New information was [%d,%d]\n",
- range->lower(),
- range->upper());
-}
-
-
-void HRangeAnalysisPhase::PropagateMinusZeroChecks(HValue* value) {
- DCHECK(worklist_.is_empty());
- DCHECK(in_worklist_.IsEmpty());
-
- AddToWorklist(value);
- while (!worklist_.is_empty()) {
- value = worklist_.RemoveLast();
-
- if (value->IsPhi()) {
- // For phis, we must propagate the check to all of its inputs.
- HPhi* phi = HPhi::cast(value);
- for (int i = 0; i < phi->OperandCount(); ++i) {
- AddToWorklist(phi->OperandAt(i));
- }
- } else if (value->IsUnaryMathOperation()) {
- HUnaryMathOperation* instr = HUnaryMathOperation::cast(value);
- if (instr->representation().IsSmiOrInteger32() &&
- !instr->value()->representation().Equals(instr->representation())) {
- if (instr->value()->range() == NULL ||
- instr->value()->range()->CanBeMinusZero()) {
- instr->SetFlag(HValue::kBailoutOnMinusZero);
- }
- }
- if (instr->RequiredInputRepresentation(0).IsSmiOrInteger32() &&
- instr->representation().Equals(
- instr->RequiredInputRepresentation(0))) {
- AddToWorklist(instr->value());
- }
- } else if (value->IsChange()) {
- HChange* instr = HChange::cast(value);
- if (!instr->from().IsSmiOrInteger32() &&
- !instr->CanTruncateToInt32() &&
- (instr->value()->range() == NULL ||
- instr->value()->range()->CanBeMinusZero())) {
- instr->SetFlag(HValue::kBailoutOnMinusZero);
- }
- } else if (value->IsForceRepresentation()) {
- HForceRepresentation* instr = HForceRepresentation::cast(value);
- AddToWorklist(instr->value());
- } else if (value->IsMod()) {
- HMod* instr = HMod::cast(value);
- if (instr->range() == NULL || instr->range()->CanBeMinusZero()) {
- instr->SetFlag(HValue::kBailoutOnMinusZero);
- AddToWorklist(instr->left());
- }
- } else if (value->IsDiv() || value->IsMul()) {
- HBinaryOperation* instr = HBinaryOperation::cast(value);
- if (instr->range() == NULL || instr->range()->CanBeMinusZero()) {
- instr->SetFlag(HValue::kBailoutOnMinusZero);
- }
- AddToWorklist(instr->right());
- AddToWorklist(instr->left());
- } else if (value->IsMathFloorOfDiv()) {
- HMathFloorOfDiv* instr = HMathFloorOfDiv::cast(value);
- instr->SetFlag(HValue::kBailoutOnMinusZero);
- } else if (value->IsAdd() || value->IsSub()) {
- HBinaryOperation* instr = HBinaryOperation::cast(value);
- if (instr->range() == NULL || instr->range()->CanBeMinusZero()) {
- // Propagate to the left argument. If the left argument cannot be -0,
- // then the result of the add/sub operation cannot be either.
- AddToWorklist(instr->left());
- }
- } else if (value->IsMathMinMax()) {
- HMathMinMax* instr = HMathMinMax::cast(value);
- AddToWorklist(instr->right());
- AddToWorklist(instr->left());
- }
- }
-
- in_worklist_.Clear();
- DCHECK(in_worklist_.IsEmpty());
- DCHECK(worklist_.is_empty());
-}
-
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/hydrogen-range-analysis.h b/deps/v8/src/crankshaft/hydrogen-range-analysis.h
deleted file mode 100644
index eeac690e62..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-range-analysis.h
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_HYDROGEN_RANGE_ANALYSIS_H_
-#define V8_CRANKSHAFT_HYDROGEN_RANGE_ANALYSIS_H_
-
-#include "src/base/compiler-specific.h"
-#include "src/crankshaft/hydrogen.h"
-
-namespace v8 {
-namespace internal {
-
-
-class HRangeAnalysisPhase : public HPhase {
- public:
- explicit HRangeAnalysisPhase(HGraph* graph)
- : HPhase("H_Range analysis", graph), changed_ranges_(16, zone()),
- in_worklist_(graph->GetMaximumValueID(), zone()),
- worklist_(32, zone()) {}
-
- void Run();
-
- private:
- PRINTF_FORMAT(2, 3) void TraceRange(const char* msg, ...);
- void InferControlFlowRange(HCompareNumericAndBranch* test,
- HBasicBlock* dest);
- void UpdateControlFlowRange(Token::Value op, HValue* value, HValue* other);
- void InferRange(HValue* value);
- void RollBackTo(int index);
- void AddRange(HValue* value, Range* range);
- void AddToWorklist(HValue* value) {
- if (in_worklist_.Contains(value->id())) return;
- in_worklist_.Add(value->id());
- worklist_.Add(value, zone());
- }
- void PropagateMinusZeroChecks(HValue* value);
- void PoisonRanges();
-
- ZoneList<HValue*> changed_ranges_;
-
- BitVector in_worklist_;
- ZoneList<HValue*> worklist_;
-
- DISALLOW_COPY_AND_ASSIGN(HRangeAnalysisPhase);
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_HYDROGEN_RANGE_ANALYSIS_H_
diff --git a/deps/v8/src/crankshaft/hydrogen-redundant-phi.cc b/deps/v8/src/crankshaft/hydrogen-redundant-phi.cc
deleted file mode 100644
index 08644c874c..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-redundant-phi.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/hydrogen-redundant-phi.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-void HRedundantPhiEliminationPhase::Run() {
- // Gather all phis from all blocks first.
- const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
- ZoneList<HPhi*> all_phis(blocks->length(), zone());
- for (int i = 0; i < blocks->length(); ++i) {
- HBasicBlock* block = blocks->at(i);
- for (int j = 0; j < block->phis()->length(); j++) {
- all_phis.Add(block->phis()->at(j), zone());
- }
- }
-
- // Iteratively reduce all phis in the list.
- ProcessPhis(&all_phis);
-
-#if DEBUG
- // Make sure that we *really* removed all redundant phis.
- for (int i = 0; i < blocks->length(); ++i) {
- for (int j = 0; j < blocks->at(i)->phis()->length(); j++) {
- DCHECK(blocks->at(i)->phis()->at(j)->GetRedundantReplacement() == NULL);
- }
- }
-#endif
-}
-
-
-void HRedundantPhiEliminationPhase::ProcessBlock(HBasicBlock* block) {
- ProcessPhis(block->phis());
-}
-
-
-void HRedundantPhiEliminationPhase::ProcessPhis(const ZoneList<HPhi*>* phis) {
- bool updated;
- do {
- // Iterately replace all redundant phis in the given list.
- updated = false;
- for (int i = 0; i < phis->length(); i++) {
- HPhi* phi = phis->at(i);
- if (phi->CheckFlag(HValue::kIsDead)) continue; // Already replaced.
-
- HValue* replacement = phi->GetRedundantReplacement();
- if (replacement != NULL) {
- phi->SetFlag(HValue::kIsDead);
- for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
- HValue* value = it.value();
- value->SetOperandAt(it.index(), replacement);
- // Iterate again if used in another non-dead phi.
- updated |= value->IsPhi() && !value->CheckFlag(HValue::kIsDead);
- }
- phi->block()->RemovePhi(phi);
- }
- }
- } while (updated);
-}
-
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/hydrogen-redundant-phi.h b/deps/v8/src/crankshaft/hydrogen-redundant-phi.h
deleted file mode 100644
index e8735c82d3..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-redundant-phi.h
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_HYDROGEN_REDUNDANT_PHI_H_
-#define V8_CRANKSHAFT_HYDROGEN_REDUNDANT_PHI_H_
-
-#include "src/crankshaft/hydrogen.h"
-
-namespace v8 {
-namespace internal {
-
-
-// Replace all phis consisting of a single non-loop operand plus any number of
-// loop operands by that single non-loop operand.
-class HRedundantPhiEliminationPhase : public HPhase {
- public:
- explicit HRedundantPhiEliminationPhase(HGraph* graph)
- : HPhase("H_Redundant phi elimination", graph) { }
-
- void Run();
- void ProcessBlock(HBasicBlock* block);
-
- private:
- void ProcessPhis(const ZoneList<HPhi*>* phis);
-
- DISALLOW_COPY_AND_ASSIGN(HRedundantPhiEliminationPhase);
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_HYDROGEN_REDUNDANT_PHI_H_
diff --git a/deps/v8/src/crankshaft/hydrogen-removable-simulates.cc b/deps/v8/src/crankshaft/hydrogen-removable-simulates.cc
deleted file mode 100644
index e68168cf9c..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-removable-simulates.cc
+++ /dev/null
@@ -1,190 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/hydrogen-removable-simulates.h"
-
-#include "src/crankshaft/hydrogen-flow-engine.h"
-#include "src/crankshaft/hydrogen-instructions.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-class State : public ZoneObject {
- public:
- explicit State(Zone* zone)
- : zone_(zone), mergelist_(2, zone), first_(true), mode_(NORMAL) { }
-
- State* Process(HInstruction* instr, Zone* zone) {
- if (FLAG_trace_removable_simulates) {
- PrintF("[%s with state %p in B%d: #%d %s]\n",
- mode_ == NORMAL ? "processing" : "collecting",
- reinterpret_cast<void*>(this), instr->block()->block_id(),
- instr->id(), instr->Mnemonic());
- }
- // Forward-merge "trains" of simulates after an instruction with observable
- // side effects to keep live ranges short.
- if (mode_ == COLLECT_CONSECUTIVE_SIMULATES) {
- if (instr->IsSimulate()) {
- HSimulate* current_simulate = HSimulate::cast(instr);
- if (current_simulate->is_candidate_for_removal() &&
- !current_simulate->ast_id().IsNone()) {
- Remember(current_simulate);
- return this;
- }
- }
- FlushSimulates();
- mode_ = NORMAL;
- }
- // Ensure there's a non-foldable HSimulate before an HEnterInlined to avoid
- // folding across HEnterInlined.
- DCHECK(!(instr->IsEnterInlined() &&
- HSimulate::cast(instr->previous())->is_candidate_for_removal()));
- if (instr->IsLeaveInlined() || instr->IsReturn()) {
- // Never fold simulates from inlined environments into simulates in the
- // outer environment. Simply remove all accumulated simulates without
- // merging. This is safe because simulates after instructions with side
- // effects are never added to the merge list. The same reasoning holds for
- // return instructions.
- RemoveSimulates();
- return this;
- }
- if (instr->IsControlInstruction()) {
- // Merge the accumulated simulates at the end of the block.
- FlushSimulates();
- return this;
- }
- if (instr->IsCapturedObject()) {
- // Do not merge simulates across captured objects - captured objects
- // change environments during environment replay, and such changes
- // would not be reflected in the simulate.
- FlushSimulates();
- return this;
- }
- // Skip the non-simulates and the first simulate.
- if (!instr->IsSimulate()) return this;
- if (first_) {
- first_ = false;
- return this;
- }
- HSimulate* current_simulate = HSimulate::cast(instr);
- if (!current_simulate->is_candidate_for_removal()) {
- Remember(current_simulate);
- FlushSimulates();
- } else if (current_simulate->ast_id().IsNone()) {
- DCHECK(current_simulate->next()->IsEnterInlined());
- FlushSimulates();
- } else if (current_simulate->previous()->HasObservableSideEffects()) {
- Remember(current_simulate);
- mode_ = COLLECT_CONSECUTIVE_SIMULATES;
- } else {
- Remember(current_simulate);
- }
-
- return this;
- }
-
- static State* Merge(State* succ_state,
- HBasicBlock* succ_block,
- State* pred_state,
- HBasicBlock* pred_block,
- Zone* zone) {
- return (succ_state == NULL)
- ? pred_state->Copy(succ_block, pred_block, zone)
- : succ_state->Merge(succ_block, pred_state, pred_block, zone);
- }
-
- static State* Finish(State* state, HBasicBlock* block, Zone* zone) {
- if (FLAG_trace_removable_simulates) {
- PrintF("[preparing state %p for B%d]\n", reinterpret_cast<void*>(state),
- block->block_id());
- }
- // For our current local analysis, we should not remember simulates across
- // block boundaries.
- DCHECK(!state->HasRememberedSimulates());
- // Nasty heuristic: Never remove the first simulate in a block. This
- // just so happens to have a beneficial effect on register allocation.
- state->first_ = true;
- return state;
- }
-
- private:
- explicit State(const State& other)
- : zone_(other.zone_),
- mergelist_(other.mergelist_, other.zone_),
- first_(other.first_),
- mode_(other.mode_) { }
-
- enum Mode { NORMAL, COLLECT_CONSECUTIVE_SIMULATES };
-
- bool HasRememberedSimulates() const { return !mergelist_.is_empty(); }
-
- void Remember(HSimulate* sim) {
- mergelist_.Add(sim, zone_);
- }
-
- void FlushSimulates() {
- if (HasRememberedSimulates()) {
- mergelist_.RemoveLast()->MergeWith(&mergelist_);
- }
- }
-
- void RemoveSimulates() {
- while (HasRememberedSimulates()) {
- mergelist_.RemoveLast()->DeleteAndReplaceWith(NULL);
- }
- }
-
- State* Copy(HBasicBlock* succ_block, HBasicBlock* pred_block, Zone* zone) {
- State* copy = new(zone) State(*this);
- if (FLAG_trace_removable_simulates) {
- PrintF("[copy state %p from B%d to new state %p for B%d]\n",
- reinterpret_cast<void*>(this), pred_block->block_id(),
- reinterpret_cast<void*>(copy), succ_block->block_id());
- }
- return copy;
- }
-
- State* Merge(HBasicBlock* succ_block,
- State* pred_state,
- HBasicBlock* pred_block,
- Zone* zone) {
- // For our current local analysis, we should not remember simulates across
- // block boundaries.
- DCHECK(!pred_state->HasRememberedSimulates());
- DCHECK(!HasRememberedSimulates());
- if (FLAG_trace_removable_simulates) {
- PrintF("[merge state %p from B%d into %p for B%d]\n",
- reinterpret_cast<void*>(pred_state), pred_block->block_id(),
- reinterpret_cast<void*>(this), succ_block->block_id());
- }
- return this;
- }
-
- Zone* zone_;
- ZoneList<HSimulate*> mergelist_;
- bool first_;
- Mode mode_;
-};
-
-
-// We don't use effects here.
-class Effects : public ZoneObject {
- public:
- explicit Effects(Zone* zone) { }
- bool Disabled() { return true; }
- void Process(HInstruction* instr, Zone* zone) { }
- void Apply(State* state) { }
- void Union(Effects* that, Zone* zone) { }
-};
-
-
-void HMergeRemovableSimulatesPhase::Run() {
- HFlowEngine<State, Effects> engine(graph(), zone());
- State* state = new(zone()) State(zone());
- engine.AnalyzeDominatedBlocks(graph()->blocks()->at(0), state);
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/hydrogen-removable-simulates.h b/deps/v8/src/crankshaft/hydrogen-removable-simulates.h
deleted file mode 100644
index 34500012cb..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-removable-simulates.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_HYDROGEN_REMOVABLE_SIMULATES_H_
-#define V8_CRANKSHAFT_HYDROGEN_REMOVABLE_SIMULATES_H_
-
-#include "src/crankshaft/hydrogen.h"
-
-namespace v8 {
-namespace internal {
-
-
-class HMergeRemovableSimulatesPhase : public HPhase {
- public:
- explicit HMergeRemovableSimulatesPhase(HGraph* graph)
- : HPhase("H_Merge removable simulates", graph) { }
-
- void Run();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(HMergeRemovableSimulatesPhase);
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_HYDROGEN_REMOVABLE_SIMULATES_H_
diff --git a/deps/v8/src/crankshaft/hydrogen-representation-changes.cc b/deps/v8/src/crankshaft/hydrogen-representation-changes.cc
deleted file mode 100644
index 5fd72618fa..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-representation-changes.cc
+++ /dev/null
@@ -1,245 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/hydrogen-representation-changes.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-void HRepresentationChangesPhase::InsertRepresentationChangeForUse(
- HValue* value, HValue* use_value, int use_index, Representation to) {
- // Insert the representation change right before its use. For phi-uses we
- // insert at the end of the corresponding predecessor.
- HInstruction* next = NULL;
- if (use_value->IsPhi()) {
- next = use_value->block()->predecessors()->at(use_index)->end();
- } else {
- next = HInstruction::cast(use_value);
- }
- // For constants we try to make the representation change at compile
- // time. When a representation change is not possible without loss of
- // information we treat constants like normal instructions and insert the
- // change instructions for them.
- HInstruction* new_value = NULL;
- bool is_truncating_to_smi = use_value->CheckFlag(HValue::kTruncatingToSmi);
- bool is_truncating_to_int = use_value->CheckFlag(HValue::kTruncatingToInt32);
- bool is_truncating_to_number =
- use_value->CheckFlag(HValue::kTruncatingToNumber);
- if (value->IsConstant()) {
- HConstant* constant = HConstant::cast(value);
- // Try to create a new copy of the constant with the new representation.
- if (is_truncating_to_int && to.IsInteger32()) {
- Maybe<HConstant*> res = constant->CopyToTruncatedInt32(graph()->zone());
- if (res.IsJust()) new_value = res.FromJust();
- } else {
- new_value = constant->CopyToRepresentation(to, graph()->zone());
- }
- }
-
- if (new_value == NULL) {
- new_value = new (graph()->zone())
- HChange(value, to, is_truncating_to_smi, is_truncating_to_int,
- is_truncating_to_number);
- }
-
- new_value->InsertBefore(next);
- use_value->SetOperandAt(use_index, new_value);
-}
-
-
-static bool IsNonDeoptingIntToSmiChange(HChange* change) {
- Representation from_rep = change->from();
- Representation to_rep = change->to();
- // Flags indicating Uint32 operations are set in a later Hydrogen phase.
- DCHECK(!change->CheckFlag(HValue::kUint32));
- return from_rep.IsInteger32() && to_rep.IsSmi() && SmiValuesAre32Bits();
-}
-
-
-void HRepresentationChangesPhase::InsertRepresentationChangesForValue(
- HValue* value) {
- Representation r = value->representation();
- if (r.IsNone()) {
-#ifdef DEBUG
- for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
- HValue* use_value = it.value();
- int use_index = it.index();
- Representation req = use_value->RequiredInputRepresentation(use_index);
- DCHECK(req.IsNone());
- }
-#endif
- return;
- }
- if (value->HasNoUses()) {
- if (value->IsForceRepresentation()) value->DeleteAndReplaceWith(NULL);
- return;
- }
-
- for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
- HValue* use_value = it.value();
- int use_index = it.index();
- Representation req = use_value->RequiredInputRepresentation(use_index);
- if (req.IsNone() || req.Equals(r)) continue;
-
- // If this is an HForceRepresentation instruction, and an HChange has been
- // inserted above it, examine the input representation of the HChange. If
- // that's int32, and this HForceRepresentation use is int32, and int32 to
- // smi changes can't cause deoptimisation, set the input of the use to the
- // input of the HChange.
- if (value->IsForceRepresentation()) {
- HValue* input = HForceRepresentation::cast(value)->value();
- if (input->IsChange()) {
- HChange* change = HChange::cast(input);
- if (change->from().Equals(req) && IsNonDeoptingIntToSmiChange(change)) {
- use_value->SetOperandAt(use_index, change->value());
- continue;
- }
- }
- }
- InsertRepresentationChangeForUse(value, use_value, use_index, req);
- }
- if (value->HasNoUses()) {
- DCHECK(value->IsConstant() || value->IsForceRepresentation());
- value->DeleteAndReplaceWith(NULL);
- } else {
- // The only purpose of a HForceRepresentation is to represent the value
- // after the (possible) HChange instruction. We make it disappear.
- if (value->IsForceRepresentation()) {
- value->DeleteAndReplaceWith(HForceRepresentation::cast(value)->value());
- }
- }
-}
-
-
-void HRepresentationChangesPhase::Run() {
- // Compute truncation flag for phis:
- //
- // - Initially assume that all phis allow truncation to number and iteratively
- // remove the ones that are used in an operation that not do an implicit
- // ToNumber conversion.
- // - Also assume that all Integer32 phis allow ToInt32 truncation and all
- // Smi phis allow truncation to Smi.
- //
- ZoneList<HPhi*> number_worklist(8, zone());
- ZoneList<HPhi*> int_worklist(8, zone());
- ZoneList<HPhi*> smi_worklist(8, zone());
-
- const ZoneList<HPhi*>* phi_list(graph()->phi_list());
- for (int i = 0; i < phi_list->length(); i++) {
- HPhi* phi = phi_list->at(i);
- if (phi->representation().IsInteger32()) {
- phi->SetFlag(HValue::kTruncatingToInt32);
- } else if (phi->representation().IsSmi()) {
- phi->SetFlag(HValue::kTruncatingToSmi);
- phi->SetFlag(HValue::kTruncatingToInt32);
- }
- phi->SetFlag(HValue::kTruncatingToNumber);
- }
-
- for (int i = 0; i < phi_list->length(); i++) {
- HPhi* phi = phi_list->at(i);
- HValue* value = NULL;
-
- if (phi->CheckFlag(HValue::kTruncatingToNumber) &&
- !phi->CheckUsesForFlag(HValue::kTruncatingToNumber, &value)) {
- number_worklist.Add(phi, zone());
- phi->ClearFlag(HValue::kTruncatingToNumber);
- phi->ClearFlag(HValue::kTruncatingToInt32);
- phi->ClearFlag(HValue::kTruncatingToSmi);
- if (FLAG_trace_representation) {
- PrintF("#%d Phi is not truncating Number because of #%d %s\n",
- phi->id(), value->id(), value->Mnemonic());
- }
- } else if (phi->representation().IsSmiOrInteger32() &&
- !phi->CheckUsesForFlag(HValue::kTruncatingToInt32, &value)) {
- int_worklist.Add(phi, zone());
- phi->ClearFlag(HValue::kTruncatingToInt32);
- phi->ClearFlag(HValue::kTruncatingToSmi);
- if (FLAG_trace_representation) {
- PrintF("#%d Phi is not truncating Int32 because of #%d %s\n",
- phi->id(), value->id(), value->Mnemonic());
- }
- } else if (phi->representation().IsSmi() &&
- !phi->CheckUsesForFlag(HValue::kTruncatingToSmi, &value)) {
- smi_worklist.Add(phi, zone());
- phi->ClearFlag(HValue::kTruncatingToSmi);
- if (FLAG_trace_representation) {
- PrintF("#%d Phi is not truncating Smi because of #%d %s\n",
- phi->id(), value->id(), value->Mnemonic());
- }
- }
- }
-
- while (!number_worklist.is_empty()) {
- HPhi* current = number_worklist.RemoveLast();
- for (int i = current->OperandCount() - 1; i >= 0; --i) {
- HValue* input = current->OperandAt(i);
- if (input->IsPhi() && input->CheckFlag(HValue::kTruncatingToNumber)) {
- if (FLAG_trace_representation) {
- PrintF("#%d Phi is not truncating Number because of #%d %s\n",
- input->id(), current->id(), current->Mnemonic());
- }
- input->ClearFlag(HValue::kTruncatingToNumber);
- input->ClearFlag(HValue::kTruncatingToInt32);
- input->ClearFlag(HValue::kTruncatingToSmi);
- number_worklist.Add(HPhi::cast(input), zone());
- }
- }
- }
-
- while (!int_worklist.is_empty()) {
- HPhi* current = int_worklist.RemoveLast();
- for (int i = 0; i < current->OperandCount(); ++i) {
- HValue* input = current->OperandAt(i);
- if (input->IsPhi() &&
- input->representation().IsSmiOrInteger32() &&
- input->CheckFlag(HValue::kTruncatingToInt32)) {
- if (FLAG_trace_representation) {
- PrintF("#%d Phi is not truncating Int32 because of #%d %s\n",
- input->id(), current->id(), current->Mnemonic());
- }
- input->ClearFlag(HValue::kTruncatingToInt32);
- int_worklist.Add(HPhi::cast(input), zone());
- }
- }
- }
-
- while (!smi_worklist.is_empty()) {
- HPhi* current = smi_worklist.RemoveLast();
- for (int i = 0; i < current->OperandCount(); ++i) {
- HValue* input = current->OperandAt(i);
- if (input->IsPhi() &&
- input->representation().IsSmi() &&
- input->CheckFlag(HValue::kTruncatingToSmi)) {
- if (FLAG_trace_representation) {
- PrintF("#%d Phi is not truncating Smi because of #%d %s\n",
- input->id(), current->id(), current->Mnemonic());
- }
- input->ClearFlag(HValue::kTruncatingToSmi);
- smi_worklist.Add(HPhi::cast(input), zone());
- }
- }
- }
-
- const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
- for (int i = 0; i < blocks->length(); ++i) {
- // Process phi instructions first.
- const HBasicBlock* block(blocks->at(i));
- const ZoneList<HPhi*>* phis = block->phis();
- for (int j = 0; j < phis->length(); j++) {
- InsertRepresentationChangesForValue(phis->at(j));
- }
-
- // Process normal instructions.
- for (HInstruction* current = block->first(); current != NULL; ) {
- HInstruction* next = current->next();
- InsertRepresentationChangesForValue(current);
- current = next;
- }
- }
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/hydrogen-representation-changes.h b/deps/v8/src/crankshaft/hydrogen-representation-changes.h
deleted file mode 100644
index d8403947c3..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-representation-changes.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_HYDROGEN_REPRESENTATION_CHANGES_H_
-#define V8_CRANKSHAFT_HYDROGEN_REPRESENTATION_CHANGES_H_
-
-#include "src/crankshaft/hydrogen.h"
-
-namespace v8 {
-namespace internal {
-
-
-class HRepresentationChangesPhase : public HPhase {
- public:
- explicit HRepresentationChangesPhase(HGraph* graph)
- : HPhase("H_Representation changes", graph) { }
-
- void Run();
-
- private:
- void InsertRepresentationChangeForUse(HValue* value,
- HValue* use_value,
- int use_index,
- Representation to);
- void InsertRepresentationChangesForValue(HValue* value);
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_HYDROGEN_REPRESENTATION_CHANGES_H_
diff --git a/deps/v8/src/crankshaft/hydrogen-sce.cc b/deps/v8/src/crankshaft/hydrogen-sce.cc
deleted file mode 100644
index a08190de3e..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-sce.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/hydrogen-sce.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-void HStackCheckEliminationPhase::Run() {
- // For each loop block walk the dominator tree from the backwards branch to
- // the loop header. If a call instruction is encountered the backwards branch
- // is dominated by a call and the stack check in the backwards branch can be
- // removed.
- for (int i = 0; i < graph()->blocks()->length(); i++) {
- HBasicBlock* block = graph()->blocks()->at(i);
- if (block->IsLoopHeader()) {
- HBasicBlock* back_edge = block->loop_information()->GetLastBackEdge();
- HBasicBlock* dominator = back_edge;
- while (true) {
- for (HInstructionIterator it(dominator); !it.Done(); it.Advance()) {
- if (it.Current()->HasStackCheck()) {
- block->loop_information()->stack_check()->Eliminate();
- break;
- }
- }
-
- // Done when the loop header is processed.
- if (dominator == block) break;
-
- // Move up the dominator tree.
- dominator = dominator->dominator();
- }
- }
- }
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/hydrogen-sce.h b/deps/v8/src/crankshaft/hydrogen-sce.h
deleted file mode 100644
index bb896bad6b..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-sce.h
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_HYDROGEN_SCE_H_
-#define V8_CRANKSHAFT_HYDROGEN_SCE_H_
-
-#include "src/crankshaft/hydrogen.h"
-
-namespace v8 {
-namespace internal {
-
-
-class HStackCheckEliminationPhase : public HPhase {
- public:
- explicit HStackCheckEliminationPhase(HGraph* graph)
- : HPhase("H_Stack check elimination", graph) { }
-
- void Run();
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_HYDROGEN_SCE_H_
diff --git a/deps/v8/src/crankshaft/hydrogen-store-elimination.cc b/deps/v8/src/crankshaft/hydrogen-store-elimination.cc
deleted file mode 100644
index b081c21984..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-store-elimination.cc
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/hydrogen-store-elimination.h"
-
-#include "src/crankshaft/hydrogen-instructions.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define TRACE(x) if (FLAG_trace_store_elimination) PrintF x
-
-// Performs a block-by-block local analysis for removable stores.
-void HStoreEliminationPhase::Run() {
- GVNFlagSet flags; // Use GVN flags as an approximation for some instructions.
- flags.RemoveAll();
-
- flags.Add(kArrayElements);
- flags.Add(kArrayLengths);
- flags.Add(kStringLengths);
- flags.Add(kBackingStoreFields);
- flags.Add(kDoubleArrayElements);
- flags.Add(kDoubleFields);
- flags.Add(kElementsPointer);
- flags.Add(kInobjectFields);
- flags.Add(kExternalMemory);
- flags.Add(kStringChars);
- flags.Add(kTypedArrayElements);
-
- for (int i = 0; i < graph()->blocks()->length(); i++) {
- unobserved_.Rewind(0);
- HBasicBlock* block = graph()->blocks()->at(i);
- if (!block->IsReachable()) continue;
- for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- HInstruction* instr = it.Current();
- if (instr->CheckFlag(HValue::kIsDead)) continue;
-
- switch (instr->opcode()) {
- case HValue::kStoreNamedField:
- // Remove any unobserved stores overwritten by this store.
- ProcessStore(HStoreNamedField::cast(instr));
- break;
- case HValue::kLoadNamedField:
- // Observe any unobserved stores on this object + field.
- ProcessLoad(HLoadNamedField::cast(instr));
- break;
- default:
- ProcessInstr(instr, flags);
- break;
- }
- }
- }
-}
-
-
-void HStoreEliminationPhase::ProcessStore(HStoreNamedField* store) {
- HValue* object = store->object()->ActualValue();
- int i = 0;
- while (i < unobserved_.length()) {
- HStoreNamedField* prev = unobserved_.at(i);
- if (aliasing_->MustAlias(object, prev->object()->ActualValue()) &&
- prev->CanBeReplacedWith(store)) {
- // This store is guaranteed to overwrite the previous store.
- prev->DeleteAndReplaceWith(NULL);
- TRACE(("++ Unobserved store S%d overwritten by S%d\n",
- prev->id(), store->id()));
- unobserved_.Remove(i);
- } else {
- i++;
- }
- }
- // Only non-transitioning stores are removable.
- if (!store->has_transition()) {
- TRACE(("-- Might remove store S%d\n", store->id()));
- unobserved_.Add(store, zone());
- }
-}
-
-
-void HStoreEliminationPhase::ProcessLoad(HLoadNamedField* load) {
- HValue* object = load->object()->ActualValue();
- int i = 0;
- while (i < unobserved_.length()) {
- HStoreNamedField* prev = unobserved_.at(i);
- if (aliasing_->MayAlias(object, prev->object()->ActualValue()) &&
- load->access().Equals(prev->access())) {
- TRACE(("-- Observed store S%d by load L%d\n", prev->id(), load->id()));
- unobserved_.Remove(i);
- } else {
- i++;
- }
- }
-}
-
-
-void HStoreEliminationPhase::ProcessInstr(HInstruction* instr,
- GVNFlagSet flags) {
- if (unobserved_.length() == 0) return; // Nothing to do.
- if (instr->CanDeoptimize()) {
- TRACE(("-- Observed stores at I%d (%s might deoptimize)\n",
- instr->id(), instr->Mnemonic()));
- unobserved_.Rewind(0);
- return;
- }
- if (instr->CheckChangesFlag(kNewSpacePromotion)) {
- TRACE(("-- Observed stores at I%d (%s might GC)\n",
- instr->id(), instr->Mnemonic()));
- unobserved_.Rewind(0);
- return;
- }
- if (instr->DependsOnFlags().ContainsAnyOf(flags)) {
- TRACE(("-- Observed stores at I%d (GVN flags of %s)\n",
- instr->id(), instr->Mnemonic()));
- unobserved_.Rewind(0);
- return;
- }
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/hydrogen-store-elimination.h b/deps/v8/src/crankshaft/hydrogen-store-elimination.h
deleted file mode 100644
index 2a9e0c1488..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-store-elimination.h
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_HYDROGEN_STORE_ELIMINATION_H_
-#define V8_CRANKSHAFT_HYDROGEN_STORE_ELIMINATION_H_
-
-#include "src/crankshaft/hydrogen.h"
-#include "src/crankshaft/hydrogen-alias-analysis.h"
-
-namespace v8 {
-namespace internal {
-
-class HStoreEliminationPhase : public HPhase {
- public:
- explicit HStoreEliminationPhase(HGraph* graph)
- : HPhase("H_Store elimination", graph),
- unobserved_(10, zone()),
- aliasing_() { }
-
- void Run();
- private:
- ZoneList<HStoreNamedField*> unobserved_;
- HAliasAnalyzer* aliasing_;
-
- void ProcessStore(HStoreNamedField* store);
- void ProcessLoad(HLoadNamedField* load);
- void ProcessInstr(HInstruction* instr, GVNFlagSet flags);
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_HYDROGEN_STORE_ELIMINATION_H_
diff --git a/deps/v8/src/crankshaft/hydrogen-types.cc b/deps/v8/src/crankshaft/hydrogen-types.cc
deleted file mode 100644
index ad2d461fe8..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-types.cc
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/hydrogen-types.h"
-
-#include "src/field-type.h"
-#include "src/handles-inl.h"
-#include "src/objects-inl.h"
-#include "src/ostreams.h"
-
-namespace v8 {
-namespace internal {
-
-// static
-HType HType::FromType(AstType* type) {
- if (AstType::Any()->Is(type)) return HType::Any();
- if (!type->IsInhabited()) return HType::None();
- if (type->Is(AstType::SignedSmall())) return HType::Smi();
- if (type->Is(AstType::Number())) return HType::TaggedNumber();
- if (type->Is(AstType::Null())) return HType::Null();
- if (type->Is(AstType::String())) return HType::String();
- if (type->Is(AstType::Boolean())) return HType::Boolean();
- if (type->Is(AstType::Undefined())) return HType::Undefined();
- if (type->Is(AstType::Object())) return HType::JSObject();
- if (type->Is(AstType::DetectableReceiver())) return HType::JSReceiver();
- return HType::Tagged();
-}
-
-
-// static
-HType HType::FromFieldType(Handle<FieldType> type, Zone* temp_zone) {
- return FromType(type->Convert(temp_zone));
-}
-
-// static
-HType HType::FromValue(Handle<Object> value) {
- Object* raw_value = *value;
- if (raw_value->IsSmi()) return HType::Smi();
- DCHECK(raw_value->IsHeapObject());
- Isolate* isolate = HeapObject::cast(*value)->GetIsolate();
- if (raw_value->IsNull(isolate)) return HType::Null();
- if (raw_value->IsHeapNumber()) {
- double n = Handle<v8::internal::HeapNumber>::cast(value)->value();
- return IsSmiDouble(n) ? HType::Smi() : HType::HeapNumber();
- }
- if (raw_value->IsString()) return HType::String();
- if (raw_value->IsBoolean()) return HType::Boolean();
- if (raw_value->IsUndefined(isolate)) return HType::Undefined();
- if (raw_value->IsJSArray()) {
- DCHECK(!raw_value->IsUndetectable());
- return HType::JSArray();
- }
- if (raw_value->IsJSObject() && !raw_value->IsUndetectable()) {
- return HType::JSObject();
- }
- return HType::HeapObject();
-}
-
-
-std::ostream& operator<<(std::ostream& os, const HType& t) {
- // Note: The c1visualizer syntax for locals allows only a sequence of the
- // following characters: A-Za-z0-9_-|:
- switch (t.kind_) {
-#define DEFINE_CASE(Name, mask) \
- case HType::k##Name: \
- return os << #Name;
- HTYPE_LIST(DEFINE_CASE)
-#undef DEFINE_CASE
- }
- UNREACHABLE();
- return os;
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/hydrogen-types.h b/deps/v8/src/crankshaft/hydrogen-types.h
deleted file mode 100644
index 3e68872924..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-types.h
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_HYDROGEN_TYPES_H_
-#define V8_CRANKSHAFT_HYDROGEN_TYPES_H_
-
-#include <climits>
-#include <iosfwd>
-
-#include "src/ast/ast-types.h"
-#include "src/base/macros.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-template <typename T> class Handle;
-class FieldType;
-class Object;
-
-#define HTYPE_LIST(V) \
- V(Any, 0x0) /* 0000 0000 0000 0000 */ \
- V(Tagged, 0x1) /* 0000 0000 0000 0001 */ \
- V(TaggedPrimitive, 0x5) /* 0000 0000 0000 0101 */ \
- V(TaggedNumber, 0xd) /* 0000 0000 0000 1101 */ \
- V(Smi, 0x1d) /* 0000 0000 0001 1101 */ \
- V(HeapObject, 0x21) /* 0000 0000 0010 0001 */ \
- V(HeapPrimitive, 0x25) /* 0000 0000 0010 0101 */ \
- V(Null, 0x27) /* 0000 0000 0010 0111 */ \
- V(HeapNumber, 0x2d) /* 0000 0000 0010 1101 */ \
- V(String, 0x65) /* 0000 0000 0110 0101 */ \
- V(Boolean, 0xa5) /* 0000 0000 1010 0101 */ \
- V(Undefined, 0x125) /* 0000 0001 0010 0101 */ \
- V(JSReceiver, 0x221) /* 0000 0010 0010 0001 */ \
- V(JSObject, 0x621) /* 0000 0110 0010 0001 */ \
- V(JSArray, 0xe21) /* 0000 1110 0010 0001 */ \
- V(None, 0xfff) /* 0000 1111 1111 1111 */
-
-class HType final {
- public:
- #define DECLARE_CONSTRUCTOR(Name, mask) \
- static HType Name() WARN_UNUSED_RESULT { return HType(k##Name); }
- HTYPE_LIST(DECLARE_CONSTRUCTOR)
- #undef DECLARE_CONSTRUCTOR
-
- // Return the weakest (least precise) common type.
- HType Combine(HType other) const WARN_UNUSED_RESULT {
- return HType(static_cast<Kind>(kind_ & other.kind_));
- }
-
- bool Equals(HType other) const WARN_UNUSED_RESULT {
- return kind_ == other.kind_;
- }
-
- bool IsSubtypeOf(HType other) const WARN_UNUSED_RESULT {
- return Combine(other).Equals(other);
- }
-
- #define DECLARE_IS_TYPE(Name, mask) \
- bool Is##Name() const WARN_UNUSED_RESULT { \
- return IsSubtypeOf(HType::Name()); \
- }
- HTYPE_LIST(DECLARE_IS_TYPE)
- #undef DECLARE_IS_TYPE
-
- static HType FromType(AstType* type) WARN_UNUSED_RESULT;
- static HType FromFieldType(Handle<FieldType> type,
- Zone* temp_zone) WARN_UNUSED_RESULT;
- static HType FromValue(Handle<Object> value) WARN_UNUSED_RESULT;
-
- friend std::ostream& operator<<(std::ostream& os, const HType& t);
-
- private:
- enum Kind {
- #define DECLARE_TYPE(Name, mask) k##Name = mask,
- HTYPE_LIST(DECLARE_TYPE)
- #undef DECLARE_TYPE
- LAST_KIND = kNone
- };
-
- // Make sure type fits in int16.
- STATIC_ASSERT(LAST_KIND < (1 << (CHAR_BIT * sizeof(int16_t))));
-
- explicit HType(Kind kind) : kind_(kind) { }
-
- int16_t kind_;
-};
-
-
-std::ostream& operator<<(std::ostream& os, const HType& t);
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_HYDROGEN_TYPES_H_
diff --git a/deps/v8/src/crankshaft/hydrogen-uint32-analysis.cc b/deps/v8/src/crankshaft/hydrogen-uint32-analysis.cc
deleted file mode 100644
index de31a616c1..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-uint32-analysis.cc
+++ /dev/null
@@ -1,238 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/hydrogen-uint32-analysis.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-static bool IsUnsignedLoad(HLoadKeyed* instr) {
- switch (instr->elements_kind()) {
- case UINT8_ELEMENTS:
- case UINT16_ELEMENTS:
- case UINT32_ELEMENTS:
- case UINT8_CLAMPED_ELEMENTS:
- return true;
- default:
- return false;
- }
-}
-
-
-static bool IsUint32Operation(HValue* instr) {
- return instr->IsShr() ||
- (instr->IsLoadKeyed() && IsUnsignedLoad(HLoadKeyed::cast(instr))) ||
- (instr->IsInteger32Constant() && instr->GetInteger32Constant() >= 0);
-}
-
-
-bool HUint32AnalysisPhase::IsSafeUint32Use(HValue* val, HValue* use) {
- // Operations that operate on bits are safe.
- if (use->IsBitwise() || use->IsShl() || use->IsSar() || use->IsShr()) {
- return true;
- } else if (use->IsSimulate() || use->IsArgumentsObject()) {
- // Deoptimization has special support for uint32.
- return true;
- } else if (use->IsChange()) {
- // Conversions have special support for uint32.
- // This DCHECK guards that the conversion in question is actually
- // implemented. Do not extend the whitelist without adding
- // support to LChunkBuilder::DoChange().
- DCHECK(HChange::cast(use)->to().IsDouble() ||
- HChange::cast(use)->to().IsSmi() ||
- HChange::cast(use)->to().IsTagged());
- return true;
- } else if (use->IsStoreKeyed()) {
- HStoreKeyed* store = HStoreKeyed::cast(use);
- if (store->is_fixed_typed_array()) {
- // Storing a value into an external integer array is a bit level
- // operation.
- if (store->value() == val) {
- // Clamping or a conversion to double should have beed inserted.
- DCHECK(store->elements_kind() != UINT8_CLAMPED_ELEMENTS);
- DCHECK(store->elements_kind() != FLOAT32_ELEMENTS);
- DCHECK(store->elements_kind() != FLOAT64_ELEMENTS);
- return true;
- }
- }
- } else if (use->IsCompareNumericAndBranch()) {
- HCompareNumericAndBranch* c = HCompareNumericAndBranch::cast(use);
- return IsUint32Operation(c->left()) && IsUint32Operation(c->right());
- }
-
- return false;
-}
-
-
-// Iterate over all uses and verify that they are uint32 safe: either don't
-// distinguish between int32 and uint32 due to their bitwise nature or
-// have special support for uint32 values.
-// Encountered phis are optimistically treated as safe uint32 uses,
-// marked with kUint32 flag and collected in the phis_ list. A separate
-// pass will be performed later by UnmarkUnsafePhis to clear kUint32 from
-// phis that are not actually uint32-safe (it requires fix point iteration).
-bool HUint32AnalysisPhase::Uint32UsesAreSafe(HValue* uint32val) {
- bool collect_phi_uses = false;
- for (HUseIterator it(uint32val->uses()); !it.Done(); it.Advance()) {
- HValue* use = it.value();
-
- if (use->IsPhi()) {
- if (!use->CheckFlag(HInstruction::kUint32)) {
- // There is a phi use of this value from a phi that is not yet
- // collected in phis_ array. Separate pass is required.
- collect_phi_uses = true;
- }
-
- // Optimistically treat phis as uint32 safe.
- continue;
- }
-
- if (!IsSafeUint32Use(uint32val, use)) {
- return false;
- }
- }
-
- if (collect_phi_uses) {
- for (HUseIterator it(uint32val->uses()); !it.Done(); it.Advance()) {
- HValue* use = it.value();
-
- // There is a phi use of this value from a phi that is not yet
- // collected in phis_ array. Separate pass is required.
- if (use->IsPhi() && !use->CheckFlag(HInstruction::kUint32)) {
- use->SetFlag(HInstruction::kUint32);
- phis_.Add(HPhi::cast(use), zone());
- }
- }
- }
-
- return true;
-}
-
-
-// Check if all operands to the given phi are marked with kUint32 flag.
-bool HUint32AnalysisPhase::CheckPhiOperands(HPhi* phi) {
- if (!phi->CheckFlag(HInstruction::kUint32)) {
- // This phi is not uint32 safe. No need to check operands.
- return false;
- }
-
- for (int j = 0; j < phi->OperandCount(); j++) {
- HValue* operand = phi->OperandAt(j);
- if (!operand->CheckFlag(HInstruction::kUint32)) {
- // Lazily mark constants that fit into uint32 range with kUint32 flag.
- if (operand->IsInteger32Constant() &&
- operand->GetInteger32Constant() >= 0) {
- operand->SetFlag(HInstruction::kUint32);
- continue;
- }
-
- // This phi is not safe, some operands are not uint32 values.
- return false;
- }
- }
-
- return true;
-}
-
-
-// Remove kUint32 flag from the phi itself and its operands. If any operand
-// was a phi marked with kUint32 place it into a worklist for
-// transitive clearing of kUint32 flag.
-void HUint32AnalysisPhase::UnmarkPhi(HPhi* phi, ZoneList<HPhi*>* worklist) {
- phi->ClearFlag(HInstruction::kUint32);
- for (int j = 0; j < phi->OperandCount(); j++) {
- HValue* operand = phi->OperandAt(j);
- if (operand->CheckFlag(HInstruction::kUint32)) {
- operand->ClearFlag(HInstruction::kUint32);
- if (operand->IsPhi()) {
- worklist->Add(HPhi::cast(operand), zone());
- }
- }
- }
-}
-
-
-void HUint32AnalysisPhase::UnmarkUnsafePhis() {
- // No phis were collected. Nothing to do.
- if (phis_.length() == 0) return;
-
- // Worklist used to transitively clear kUint32 from phis that
- // are used as arguments to other phis.
- ZoneList<HPhi*> worklist(phis_.length(), zone());
-
- // Phi can be used as a uint32 value if and only if
- // all its operands are uint32 values and all its
- // uses are uint32 safe.
-
- // Iterate over collected phis and unmark those that
- // are unsafe. When unmarking phi unmark its operands
- // and add it to the worklist if it is a phi as well.
- // Phis that are still marked as safe are shifted down
- // so that all safe phis form a prefix of the phis_ array.
- int phi_count = 0;
- for (int i = 0; i < phis_.length(); i++) {
- HPhi* phi = phis_[i];
-
- if (CheckPhiOperands(phi) && Uint32UsesAreSafe(phi)) {
- phis_[phi_count++] = phi;
- } else {
- UnmarkPhi(phi, &worklist);
- }
- }
-
- // Now phis array contains only those phis that have safe
- // non-phi uses. Start transitively clearing kUint32 flag
- // from phi operands of discovered non-safe phis until
- // only safe phis are left.
- while (!worklist.is_empty()) {
- while (!worklist.is_empty()) {
- HPhi* phi = worklist.RemoveLast();
- UnmarkPhi(phi, &worklist);
- }
-
- // Check if any operands to safe phis were unmarked
- // turning a safe phi into unsafe. The same value
- // can flow into several phis.
- int new_phi_count = 0;
- for (int i = 0; i < phi_count; i++) {
- HPhi* phi = phis_[i];
-
- if (CheckPhiOperands(phi)) {
- phis_[new_phi_count++] = phi;
- } else {
- UnmarkPhi(phi, &worklist);
- }
- }
- phi_count = new_phi_count;
- }
-}
-
-
-void HUint32AnalysisPhase::Run() {
- if (!graph()->has_uint32_instructions()) return;
-
- ZoneList<HInstruction*>* uint32_instructions = graph()->uint32_instructions();
- for (int i = 0; i < uint32_instructions->length(); ++i) {
- // Analyze instruction and mark it with kUint32 if all
- // its uses are uint32 safe.
- HInstruction* current = uint32_instructions->at(i);
- if (current->IsLinked() &&
- current->representation().IsInteger32() &&
- Uint32UsesAreSafe(current)) {
- current->SetFlag(HInstruction::kUint32);
- }
- }
-
- // Some phis might have been optimistically marked with kUint32 flag.
- // Remove this flag from those phis that are unsafe and propagate
- // this information transitively potentially clearing kUint32 flag
- // from some non-phi operations that are used as operands to unsafe phis.
- UnmarkUnsafePhis();
-}
-
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/hydrogen-uint32-analysis.h b/deps/v8/src/crankshaft/hydrogen-uint32-analysis.h
deleted file mode 100644
index 0d959b5953..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-uint32-analysis.h
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_HYDROGEN_UINT32_ANALYSIS_H_
-#define V8_CRANKSHAFT_HYDROGEN_UINT32_ANALYSIS_H_
-
-#include "src/crankshaft/hydrogen.h"
-
-namespace v8 {
-namespace internal {
-
-
-// Discover instructions that can be marked with kUint32 flag allowing
-// them to produce full range uint32 values.
-class HUint32AnalysisPhase : public HPhase {
- public:
- explicit HUint32AnalysisPhase(HGraph* graph)
- : HPhase("H_Compute safe UInt32 operations", graph), phis_(4, zone()) { }
-
- void Run();
-
- private:
- INLINE(bool IsSafeUint32Use(HValue* val, HValue* use));
- INLINE(bool Uint32UsesAreSafe(HValue* uint32val));
- INLINE(bool CheckPhiOperands(HPhi* phi));
- INLINE(void UnmarkPhi(HPhi* phi, ZoneList<HPhi*>* worklist));
- INLINE(void UnmarkUnsafePhis());
-
- ZoneList<HPhi*> phis_;
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_HYDROGEN_UINT32_ANALYSIS_H_
diff --git a/deps/v8/src/crankshaft/hydrogen.cc b/deps/v8/src/crankshaft/hydrogen.cc
deleted file mode 100644
index 5a110f4aa8..0000000000
--- a/deps/v8/src/crankshaft/hydrogen.cc
+++ /dev/null
@@ -1,12535 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/hydrogen.h"
-
-#include <memory>
-#include <sstream>
-
-#include "src/allocation-site-scopes.h"
-#include "src/ast/ast-numbering.h"
-#include "src/ast/compile-time-value.h"
-#include "src/ast/scopes.h"
-#include "src/code-factory.h"
-#include "src/crankshaft/hydrogen-bce.h"
-#include "src/crankshaft/hydrogen-canonicalize.h"
-#include "src/crankshaft/hydrogen-check-elimination.h"
-#include "src/crankshaft/hydrogen-dce.h"
-#include "src/crankshaft/hydrogen-dehoist.h"
-#include "src/crankshaft/hydrogen-environment-liveness.h"
-#include "src/crankshaft/hydrogen-escape-analysis.h"
-#include "src/crankshaft/hydrogen-gvn.h"
-#include "src/crankshaft/hydrogen-infer-representation.h"
-#include "src/crankshaft/hydrogen-infer-types.h"
-#include "src/crankshaft/hydrogen-load-elimination.h"
-#include "src/crankshaft/hydrogen-mark-unreachable.h"
-#include "src/crankshaft/hydrogen-osr.h"
-#include "src/crankshaft/hydrogen-range-analysis.h"
-#include "src/crankshaft/hydrogen-redundant-phi.h"
-#include "src/crankshaft/hydrogen-removable-simulates.h"
-#include "src/crankshaft/hydrogen-representation-changes.h"
-#include "src/crankshaft/hydrogen-sce.h"
-#include "src/crankshaft/hydrogen-store-elimination.h"
-#include "src/crankshaft/hydrogen-uint32-analysis.h"
-#include "src/crankshaft/lithium-allocator.h"
-#include "src/crankshaft/typing.h"
-#include "src/field-type.h"
-#include "src/full-codegen/full-codegen.h"
-#include "src/globals.h"
-#include "src/ic/call-optimization.h"
-#include "src/ic/ic.h"
-// GetRootConstructor
-#include "src/ic/ic-inl.h"
-#include "src/isolate-inl.h"
-#include "src/objects/map.h"
-#include "src/runtime/runtime.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "src/crankshaft/ia32/lithium-codegen-ia32.h" // NOLINT
-#elif V8_TARGET_ARCH_X64
-#include "src/crankshaft/x64/lithium-codegen-x64.h" // NOLINT
-#elif V8_TARGET_ARCH_ARM64
-#include "src/crankshaft/arm64/lithium-codegen-arm64.h" // NOLINT
-#elif V8_TARGET_ARCH_ARM
-#include "src/crankshaft/arm/lithium-codegen-arm.h" // NOLINT
-#elif V8_TARGET_ARCH_PPC
-#include "src/crankshaft/ppc/lithium-codegen-ppc.h" // NOLINT
-#elif V8_TARGET_ARCH_MIPS
-#include "src/crankshaft/mips/lithium-codegen-mips.h" // NOLINT
-#elif V8_TARGET_ARCH_MIPS64
-#include "src/crankshaft/mips64/lithium-codegen-mips64.h" // NOLINT
-#elif V8_TARGET_ARCH_S390
-#include "src/crankshaft/s390/lithium-codegen-s390.h" // NOLINT
-#elif V8_TARGET_ARCH_X87
-#include "src/crankshaft/x87/lithium-codegen-x87.h" // NOLINT
-#else
-#error Unsupported target architecture.
-#endif
-
-namespace v8 {
-namespace internal {
-
-const auto GetRegConfig = RegisterConfiguration::Crankshaft;
-
-class HOptimizedGraphBuilderWithPositions : public HOptimizedGraphBuilder {
- public:
- explicit HOptimizedGraphBuilderWithPositions(CompilationInfo* info)
- : HOptimizedGraphBuilder(info, true) {
- SetSourcePosition(info->shared_info()->start_position());
- }
-
-#define DEF_VISIT(type) \
- void Visit##type(type* node) override { \
- SourcePosition old_position = SourcePosition::Unknown(); \
- if (node->position() != kNoSourcePosition) { \
- old_position = source_position(); \
- SetSourcePosition(node->position()); \
- } \
- HOptimizedGraphBuilder::Visit##type(node); \
- if (old_position.IsKnown()) { \
- set_source_position(old_position); \
- } \
- }
- EXPRESSION_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
-#define DEF_VISIT(type) \
- void Visit##type(type* node) override { \
- SourcePosition old_position = SourcePosition::Unknown(); \
- if (node->position() != kNoSourcePosition) { \
- old_position = source_position(); \
- SetSourcePosition(node->position()); \
- } \
- HOptimizedGraphBuilder::Visit##type(node); \
- if (old_position.IsKnown()) { \
- set_source_position(old_position); \
- } \
- }
- STATEMENT_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
-#define DEF_VISIT(type) \
- void Visit##type(type* node) override { \
- HOptimizedGraphBuilder::Visit##type(node); \
- }
- DECLARATION_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-};
-
-HCompilationJob::Status HCompilationJob::PrepareJobImpl() {
- if (!isolate()->use_optimizer() ||
- info()->shared_info()->must_use_ignition_turbo()) {
- // Crankshaft is entirely disabled.
- return FAILED;
- }
-
- // Optimization requires a version of fullcode with deoptimization support.
- // Recompile the unoptimized version of the code if the current version
- // doesn't have deoptimization support already.
- // Otherwise, if we are gathering compilation time and space statistics
- // for hydrogen, gather baseline statistics for a fullcode compilation.
- bool should_recompile = !info()->shared_info()->has_deoptimization_support();
- if (should_recompile || FLAG_hydrogen_stats) {
- base::ElapsedTimer timer;
- if (FLAG_hydrogen_stats) {
- timer.Start();
- }
- if (!Compiler::EnsureDeoptimizationSupport(info())) {
- return FAILED;
- }
- if (FLAG_hydrogen_stats) {
- isolate()->GetHStatistics()->IncrementFullCodeGen(timer.Elapsed());
- }
- }
- DCHECK(info()->shared_info()->has_deoptimization_support());
-
- // Check the whitelist for Crankshaft.
- if (!info()->shared_info()->PassesFilter(FLAG_hydrogen_filter)) {
- return AbortOptimization(kHydrogenFilter);
- }
-
- Scope* scope = info()->scope();
- if (LUnallocated::TooManyParameters(scope->num_parameters())) {
- // Crankshaft would require too many Lithium operands.
- return AbortOptimization(kTooManyParameters);
- }
-
- if (info()->is_osr() &&
- LUnallocated::TooManyParametersOrStackSlots(scope->num_parameters(),
- scope->num_stack_slots())) {
- // Crankshaft would require too many Lithium operands.
- return AbortOptimization(kTooManyParametersLocals);
- }
-
- if (IsGeneratorFunction(info()->shared_info()->kind())) {
- // Crankshaft does not support generators.
- return AbortOptimization(kGenerator);
- }
-
- if (FLAG_trace_hydrogen) {
- isolate()->GetHTracer()->TraceCompilation(info());
- }
-
- // Optimization could have been disabled by the parser. Note that this check
- // is only needed because the Hydrogen graph builder is missing some bailouts.
- if (info()->shared_info()->optimization_disabled()) {
- return AbortOptimization(
- info()->shared_info()->disable_optimization_reason());
- }
-
- HOptimizedGraphBuilder* graph_builder =
- (FLAG_hydrogen_track_positions || isolate()->is_profiling() ||
- FLAG_trace_ic)
- ? new (info()->zone()) HOptimizedGraphBuilderWithPositions(info())
- : new (info()->zone()) HOptimizedGraphBuilder(info(), false);
-
- // Type-check the function.
- AstTyper(info()->isolate(), info()->zone(), info()->closure(),
- info()->scope(), info()->osr_ast_id(), info()->literal(),
- graph_builder->bounds())
- .Run();
-
- graph_ = graph_builder->CreateGraph();
-
- if (isolate()->has_pending_exception()) {
- return FAILED;
- }
-
- if (graph_ == NULL) return FAILED;
-
- if (info()->dependencies()->HasAborted()) {
- // Dependency has changed during graph creation. Let's try again later.
- return RetryOptimization(kBailedOutDueToDependencyChange);
- }
-
- return SUCCEEDED;
-}
-
-HCompilationJob::Status HCompilationJob::ExecuteJobImpl() {
- DCHECK(graph_ != NULL);
- BailoutReason bailout_reason = kNoReason;
-
- if (graph_->Optimize(&bailout_reason)) {
- chunk_ = LChunk::NewChunk(graph_);
- if (chunk_ != NULL) return SUCCEEDED;
- } else if (bailout_reason != kNoReason) {
- info()->AbortOptimization(bailout_reason);
- }
-
- return FAILED;
-}
-
-HCompilationJob::Status HCompilationJob::FinalizeJobImpl() {
- DCHECK(chunk_ != NULL);
- DCHECK(graph_ != NULL);
- {
- // Deferred handles reference objects that were accessible during
- // graph creation. To make sure that we don't encounter inconsistencies
- // between graph creation and code generation, we disallow accessing
- // objects through deferred handles during the latter, with exceptions.
- DisallowDeferredHandleDereference no_deferred_handle_deref;
- Handle<Code> optimized_code = chunk_->Codegen();
- if (optimized_code.is_null()) {
- if (info()->bailout_reason() == kNoReason) {
- return AbortOptimization(kCodeGenerationFailed);
- }
- return FAILED;
- }
- RegisterWeakObjectsInOptimizedCode(optimized_code);
- info()->SetCode(optimized_code);
- }
- // Add to the weak list of optimized code objects.
- info()->context()->native_context()->AddOptimizedCode(*info()->code());
- return SUCCEEDED;
-}
-
-HBasicBlock::HBasicBlock(HGraph* graph)
- : block_id_(graph->GetNextBlockID()),
- graph_(graph),
- phis_(4, graph->zone()),
- first_(NULL),
- last_(NULL),
- end_(NULL),
- loop_information_(NULL),
- predecessors_(2, graph->zone()),
- dominator_(NULL),
- dominated_blocks_(4, graph->zone()),
- last_environment_(NULL),
- argument_count_(-1),
- first_instruction_index_(-1),
- last_instruction_index_(-1),
- deleted_phis_(4, graph->zone()),
- parent_loop_header_(NULL),
- inlined_entry_block_(NULL),
- is_inline_return_target_(false),
- is_reachable_(true),
- dominates_loop_successors_(false),
- is_osr_entry_(false),
- is_ordered_(false) { }
-
-
-Isolate* HBasicBlock::isolate() const {
- return graph_->isolate();
-}
-
-
-void HBasicBlock::MarkUnreachable() {
- is_reachable_ = false;
-}
-
-
-void HBasicBlock::AttachLoopInformation() {
- DCHECK(!IsLoopHeader());
- loop_information_ = new(zone()) HLoopInformation(this, zone());
-}
-
-
-void HBasicBlock::DetachLoopInformation() {
- DCHECK(IsLoopHeader());
- loop_information_ = NULL;
-}
-
-
-void HBasicBlock::AddPhi(HPhi* phi) {
- DCHECK(!IsStartBlock());
- phis_.Add(phi, zone());
- phi->SetBlock(this);
-}
-
-
-void HBasicBlock::RemovePhi(HPhi* phi) {
- DCHECK(phi->block() == this);
- DCHECK(phis_.Contains(phi));
- phi->Kill();
- phis_.RemoveElement(phi);
- phi->SetBlock(NULL);
-}
-
-
-void HBasicBlock::AddInstruction(HInstruction* instr, SourcePosition position) {
- DCHECK(!IsStartBlock() || !IsFinished());
- DCHECK(!instr->IsLinked());
- DCHECK(!IsFinished());
-
- if (position.IsKnown()) {
- instr->set_position(position);
- }
- if (first_ == NULL) {
- DCHECK(last_environment() != NULL);
- DCHECK(!last_environment()->ast_id().IsNone());
- HBlockEntry* entry = new(zone()) HBlockEntry();
- entry->InitializeAsFirst(this);
- if (position.IsKnown()) {
- entry->set_position(position);
- } else {
- DCHECK(!FLAG_hydrogen_track_positions ||
- !graph()->info()->IsOptimizing() || instr->IsAbnormalExit());
- }
- first_ = last_ = entry;
- }
- instr->InsertAfter(last_);
-}
-
-
-HPhi* HBasicBlock::AddNewPhi(int merged_index) {
- if (graph()->IsInsideNoSideEffectsScope()) {
- merged_index = HPhi::kInvalidMergedIndex;
- }
- HPhi* phi = new(zone()) HPhi(merged_index, zone());
- AddPhi(phi);
- return phi;
-}
-
-
-HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id,
- RemovableSimulate removable) {
- DCHECK(HasEnvironment());
- HEnvironment* environment = last_environment();
- DCHECK(ast_id.IsNone() ||
- ast_id == BailoutId::StubEntry() ||
- environment->closure()->shared()->VerifyBailoutId(ast_id));
-
- int push_count = environment->push_count();
- int pop_count = environment->pop_count();
-
- HSimulate* instr =
- new(zone()) HSimulate(ast_id, pop_count, zone(), removable);
-#ifdef DEBUG
- instr->set_closure(environment->closure());
-#endif
- // Order of pushed values: newest (top of stack) first. This allows
- // HSimulate::MergeWith() to easily append additional pushed values
- // that are older (from further down the stack).
- for (int i = 0; i < push_count; ++i) {
- instr->AddPushedValue(environment->ExpressionStackAt(i));
- }
- for (GrowableBitVector::Iterator it(environment->assigned_variables(),
- zone());
- !it.Done();
- it.Advance()) {
- int index = it.Current();
- instr->AddAssignedValue(index, environment->Lookup(index));
- }
- environment->ClearHistory();
- return instr;
-}
-
-
-void HBasicBlock::Finish(HControlInstruction* end, SourcePosition position) {
- DCHECK(!IsFinished());
- AddInstruction(end, position);
- end_ = end;
- for (HSuccessorIterator it(end); !it.Done(); it.Advance()) {
- it.Current()->RegisterPredecessor(this);
- }
-}
-
-
-void HBasicBlock::Goto(HBasicBlock* block, SourcePosition position,
- FunctionState* state, bool add_simulate) {
- bool drop_extra = state != NULL &&
- state->inlining_kind() == NORMAL_RETURN;
-
- if (block->IsInlineReturnTarget()) {
- HEnvironment* env = last_environment();
- int argument_count = env->arguments_environment()->parameter_count();
- AddInstruction(new(zone())
- HLeaveInlined(state->entry(), argument_count),
- position);
- UpdateEnvironment(last_environment()->DiscardInlined(drop_extra));
- }
-
- if (add_simulate) AddNewSimulate(BailoutId::None(), position);
- HGoto* instr = new(zone()) HGoto(block);
- Finish(instr, position);
-}
-
-
-void HBasicBlock::AddLeaveInlined(HValue* return_value, FunctionState* state,
- SourcePosition position) {
- HBasicBlock* target = state->function_return();
- bool drop_extra = state->inlining_kind() == NORMAL_RETURN;
-
- DCHECK(target->IsInlineReturnTarget());
- DCHECK(return_value != NULL);
- HEnvironment* env = last_environment();
- int argument_count = env->arguments_environment()->parameter_count();
- AddInstruction(new(zone()) HLeaveInlined(state->entry(), argument_count),
- position);
- UpdateEnvironment(last_environment()->DiscardInlined(drop_extra));
- last_environment()->Push(return_value);
- AddNewSimulate(BailoutId::None(), position);
- HGoto* instr = new(zone()) HGoto(target);
- Finish(instr, position);
-}
-
-
-void HBasicBlock::SetInitialEnvironment(HEnvironment* env) {
- DCHECK(!HasEnvironment());
- DCHECK(first() == NULL);
- UpdateEnvironment(env);
-}
-
-
-void HBasicBlock::UpdateEnvironment(HEnvironment* env) {
- last_environment_ = env;
- graph()->update_maximum_environment_size(env->first_expression_index());
-}
-
-
-void HBasicBlock::SetJoinId(BailoutId ast_id) {
- int length = predecessors_.length();
- DCHECK(length > 0);
- for (int i = 0; i < length; i++) {
- HBasicBlock* predecessor = predecessors_[i];
- DCHECK(predecessor->end()->IsGoto());
- HSimulate* simulate = HSimulate::cast(predecessor->end()->previous());
- DCHECK(i != 0 ||
- (predecessor->last_environment()->closure().is_null() ||
- predecessor->last_environment()->closure()->shared()
- ->VerifyBailoutId(ast_id)));
- simulate->set_ast_id(ast_id);
- predecessor->last_environment()->set_ast_id(ast_id);
- }
-}
-
-
-bool HBasicBlock::Dominates(HBasicBlock* other) const {
- HBasicBlock* current = other->dominator();
- while (current != NULL) {
- if (current == this) return true;
- current = current->dominator();
- }
- return false;
-}
-
-
-bool HBasicBlock::EqualToOrDominates(HBasicBlock* other) const {
- if (this == other) return true;
- return Dominates(other);
-}
-
-
-int HBasicBlock::LoopNestingDepth() const {
- const HBasicBlock* current = this;
- int result = (current->IsLoopHeader()) ? 1 : 0;
- while (current->parent_loop_header() != NULL) {
- current = current->parent_loop_header();
- result++;
- }
- return result;
-}
-
-
-void HBasicBlock::PostProcessLoopHeader(IterationStatement* stmt) {
- DCHECK(IsLoopHeader());
-
- SetJoinId(stmt->EntryId());
- if (predecessors()->length() == 1) {
- // This is a degenerated loop.
- DetachLoopInformation();
- return;
- }
-
- // Only the first entry into the loop is from outside the loop. All other
- // entries must be back edges.
- for (int i = 1; i < predecessors()->length(); ++i) {
- loop_information()->RegisterBackEdge(predecessors()->at(i));
- }
-}
-
-
-void HBasicBlock::MarkSuccEdgeUnreachable(int succ) {
- DCHECK(IsFinished());
- HBasicBlock* succ_block = end()->SuccessorAt(succ);
-
- DCHECK(succ_block->predecessors()->length() == 1);
- succ_block->MarkUnreachable();
-}
-
-
-void HBasicBlock::RegisterPredecessor(HBasicBlock* pred) {
- if (HasPredecessor()) {
- // Only loop header blocks can have a predecessor added after
- // instructions have been added to the block (they have phis for all
- // values in the environment, these phis may be eliminated later).
- DCHECK(IsLoopHeader() || first_ == NULL);
- HEnvironment* incoming_env = pred->last_environment();
- if (IsLoopHeader()) {
- DCHECK_EQ(phis()->length(), incoming_env->length());
- for (int i = 0; i < phis_.length(); ++i) {
- phis_[i]->AddInput(incoming_env->values()->at(i));
- }
- } else {
- last_environment()->AddIncomingEdge(this, pred->last_environment());
- }
- } else if (!HasEnvironment() && !IsFinished()) {
- DCHECK(!IsLoopHeader());
- SetInitialEnvironment(pred->last_environment()->Copy());
- }
-
- predecessors_.Add(pred, zone());
-}
-
-
-void HBasicBlock::AddDominatedBlock(HBasicBlock* block) {
- DCHECK(!dominated_blocks_.Contains(block));
- // Keep the list of dominated blocks sorted such that if there is two
- // succeeding block in this list, the predecessor is before the successor.
- int index = 0;
- while (index < dominated_blocks_.length() &&
- dominated_blocks_[index]->block_id() < block->block_id()) {
- ++index;
- }
- dominated_blocks_.InsertAt(index, block, zone());
-}
-
-
-void HBasicBlock::AssignCommonDominator(HBasicBlock* other) {
- if (dominator_ == NULL) {
- dominator_ = other;
- other->AddDominatedBlock(this);
- } else if (other->dominator() != NULL) {
- HBasicBlock* first = dominator_;
- HBasicBlock* second = other;
-
- while (first != second) {
- if (first->block_id() > second->block_id()) {
- first = first->dominator();
- } else {
- second = second->dominator();
- }
- DCHECK(first != NULL && second != NULL);
- }
-
- if (dominator_ != first) {
- DCHECK(dominator_->dominated_blocks_.Contains(this));
- dominator_->dominated_blocks_.RemoveElement(this);
- dominator_ = first;
- first->AddDominatedBlock(this);
- }
- }
-}
-
-
-void HBasicBlock::AssignLoopSuccessorDominators() {
- // Mark blocks that dominate all subsequent reachable blocks inside their
- // loop. Exploit the fact that blocks are sorted in reverse post order. When
- // the loop is visited in increasing block id order, if the number of
- // non-loop-exiting successor edges at the dominator_candidate block doesn't
- // exceed the number of previously encountered predecessor edges, there is no
- // path from the loop header to any block with higher id that doesn't go
- // through the dominator_candidate block. In this case, the
- // dominator_candidate block is guaranteed to dominate all blocks reachable
- // from it with higher ids.
- HBasicBlock* last = loop_information()->GetLastBackEdge();
- int outstanding_successors = 1; // one edge from the pre-header
- // Header always dominates everything.
- MarkAsLoopSuccessorDominator();
- for (int j = block_id(); j <= last->block_id(); ++j) {
- HBasicBlock* dominator_candidate = graph_->blocks()->at(j);
- for (HPredecessorIterator it(dominator_candidate); !it.Done();
- it.Advance()) {
- HBasicBlock* predecessor = it.Current();
- // Don't count back edges.
- if (predecessor->block_id() < dominator_candidate->block_id()) {
- outstanding_successors--;
- }
- }
-
- // If more successors than predecessors have been seen in the loop up to
- // now, it's not possible to guarantee that the current block dominates
- // all of the blocks with higher IDs. In this case, assume conservatively
- // that those paths through loop that don't go through the current block
- // contain all of the loop's dependencies. Also be careful to record
- // dominator information about the current loop that's being processed,
- // and not nested loops, which will be processed when
- // AssignLoopSuccessorDominators gets called on their header.
- DCHECK(outstanding_successors >= 0);
- HBasicBlock* parent_loop_header = dominator_candidate->parent_loop_header();
- if (outstanding_successors == 0 &&
- (parent_loop_header == this && !dominator_candidate->IsLoopHeader())) {
- dominator_candidate->MarkAsLoopSuccessorDominator();
- }
- HControlInstruction* end = dominator_candidate->end();
- for (HSuccessorIterator it(end); !it.Done(); it.Advance()) {
- HBasicBlock* successor = it.Current();
- // Only count successors that remain inside the loop and don't loop back
- // to a loop header.
- if (successor->block_id() > dominator_candidate->block_id() &&
- successor->block_id() <= last->block_id()) {
- // Backwards edges must land on loop headers.
- DCHECK(successor->block_id() > dominator_candidate->block_id() ||
- successor->IsLoopHeader());
- outstanding_successors++;
- }
- }
- }
-}
-
-
-int HBasicBlock::PredecessorIndexOf(HBasicBlock* predecessor) const {
- for (int i = 0; i < predecessors_.length(); ++i) {
- if (predecessors_[i] == predecessor) return i;
- }
- UNREACHABLE();
- return -1;
-}
-
-
-#ifdef DEBUG
-void HBasicBlock::Verify() {
- // Check that every block is finished.
- DCHECK(IsFinished());
- DCHECK(block_id() >= 0);
-
- // Check that the incoming edges are in edge split form.
- if (predecessors_.length() > 1) {
- for (int i = 0; i < predecessors_.length(); ++i) {
- DCHECK(predecessors_[i]->end()->SecondSuccessor() == NULL);
- }
- }
-}
-#endif
-
-
-void HLoopInformation::RegisterBackEdge(HBasicBlock* block) {
- this->back_edges_.Add(block, block->zone());
- AddBlock(block);
-}
-
-
-HBasicBlock* HLoopInformation::GetLastBackEdge() const {
- int max_id = -1;
- HBasicBlock* result = NULL;
- for (int i = 0; i < back_edges_.length(); ++i) {
- HBasicBlock* cur = back_edges_[i];
- if (cur->block_id() > max_id) {
- max_id = cur->block_id();
- result = cur;
- }
- }
- return result;
-}
-
-
-void HLoopInformation::AddBlock(HBasicBlock* block) {
- if (block == loop_header()) return;
- if (block->parent_loop_header() == loop_header()) return;
- if (block->parent_loop_header() != NULL) {
- AddBlock(block->parent_loop_header());
- } else {
- block->set_parent_loop_header(loop_header());
- blocks_.Add(block, block->zone());
- for (int i = 0; i < block->predecessors()->length(); ++i) {
- AddBlock(block->predecessors()->at(i));
- }
- }
-}
-
-
-#ifdef DEBUG
-
-// Checks reachability of the blocks in this graph and stores a bit in
-// the BitVector "reachable()" for every block that can be reached
-// from the start block of the graph. If "dont_visit" is non-null, the given
-// block is treated as if it would not be part of the graph. "visited_count()"
-// returns the number of reachable blocks.
-class ReachabilityAnalyzer BASE_EMBEDDED {
- public:
- ReachabilityAnalyzer(HBasicBlock* entry_block,
- int block_count,
- HBasicBlock* dont_visit)
- : visited_count_(0),
- stack_(16, entry_block->zone()),
- reachable_(block_count, entry_block->zone()),
- dont_visit_(dont_visit) {
- PushBlock(entry_block);
- Analyze();
- }
-
- int visited_count() const { return visited_count_; }
- const BitVector* reachable() const { return &reachable_; }
-
- private:
- void PushBlock(HBasicBlock* block) {
- if (block != NULL && block != dont_visit_ &&
- !reachable_.Contains(block->block_id())) {
- reachable_.Add(block->block_id());
- stack_.Add(block, block->zone());
- visited_count_++;
- }
- }
-
- void Analyze() {
- while (!stack_.is_empty()) {
- HControlInstruction* end = stack_.RemoveLast()->end();
- for (HSuccessorIterator it(end); !it.Done(); it.Advance()) {
- PushBlock(it.Current());
- }
- }
- }
-
- int visited_count_;
- ZoneList<HBasicBlock*> stack_;
- BitVector reachable_;
- HBasicBlock* dont_visit_;
-};
-
-
-void HGraph::Verify(bool do_full_verify) const {
- base::LockGuard<base::Mutex> guard(isolate()->heap()->relocation_mutex());
- AllowHandleDereference allow_deref;
- AllowDeferredHandleDereference allow_deferred_deref;
- for (int i = 0; i < blocks_.length(); i++) {
- HBasicBlock* block = blocks_.at(i);
-
- block->Verify();
-
- // Check that every block contains at least one node and that only the last
- // node is a control instruction.
- HInstruction* current = block->first();
- DCHECK(current != NULL && current->IsBlockEntry());
- while (current != NULL) {
- DCHECK((current->next() == NULL) == current->IsControlInstruction());
- DCHECK(current->block() == block);
- current->Verify();
- current = current->next();
- }
-
- // Check that successors are correctly set.
- HBasicBlock* first = block->end()->FirstSuccessor();
- HBasicBlock* second = block->end()->SecondSuccessor();
- DCHECK(second == NULL || first != NULL);
-
- // Check that the predecessor array is correct.
- if (first != NULL) {
- DCHECK(first->predecessors()->Contains(block));
- if (second != NULL) {
- DCHECK(second->predecessors()->Contains(block));
- }
- }
-
- // Check that phis have correct arguments.
- for (int j = 0; j < block->phis()->length(); j++) {
- HPhi* phi = block->phis()->at(j);
- phi->Verify();
- }
-
- // Check that all join blocks have predecessors that end with an
- // unconditional goto and agree on their environment node id.
- if (block->predecessors()->length() >= 2) {
- BailoutId id =
- block->predecessors()->first()->last_environment()->ast_id();
- for (int k = 0; k < block->predecessors()->length(); k++) {
- HBasicBlock* predecessor = block->predecessors()->at(k);
- DCHECK(predecessor->end()->IsGoto() ||
- predecessor->end()->IsDeoptimize());
- DCHECK(predecessor->last_environment()->ast_id() == id);
- }
- }
- }
-
- // Check special property of first block to have no predecessors.
- DCHECK(blocks_.at(0)->predecessors()->is_empty());
-
- if (do_full_verify) {
- // Check that the graph is fully connected.
- ReachabilityAnalyzer analyzer(entry_block_, blocks_.length(), NULL);
- DCHECK(analyzer.visited_count() == blocks_.length());
-
- // Check that entry block dominator is NULL.
- DCHECK(entry_block_->dominator() == NULL);
-
- // Check dominators.
- for (int i = 0; i < blocks_.length(); ++i) {
- HBasicBlock* block = blocks_.at(i);
- if (block->dominator() == NULL) {
- // Only start block may have no dominator assigned to.
- DCHECK(i == 0);
- } else {
- // Assert that block is unreachable if dominator must not be visited.
- ReachabilityAnalyzer dominator_analyzer(entry_block_,
- blocks_.length(),
- block->dominator());
- DCHECK(!dominator_analyzer.reachable()->Contains(block->block_id()));
- }
- }
- }
-}
-
-#endif
-
-
-HConstant* HGraph::GetConstant(SetOncePointer<HConstant>* pointer,
- int32_t value) {
- if (!pointer->is_set()) {
- // Can't pass GetInvalidContext() to HConstant::New, because that will
- // recursively call GetConstant
- HConstant* constant = HConstant::New(isolate(), zone(), NULL, value);
- constant->InsertAfter(entry_block()->first());
- pointer->set(constant);
- return constant;
- }
- return ReinsertConstantIfNecessary(pointer->get());
-}
-
-
-HConstant* HGraph::ReinsertConstantIfNecessary(HConstant* constant) {
- if (!constant->IsLinked()) {
- // The constant was removed from the graph. Reinsert.
- constant->ClearFlag(HValue::kIsDead);
- constant->InsertAfter(entry_block()->first());
- }
- return constant;
-}
-
-
-HConstant* HGraph::GetConstant0() {
- return GetConstant(&constant_0_, 0);
-}
-
-
-HConstant* HGraph::GetConstant1() {
- return GetConstant(&constant_1_, 1);
-}
-
-
-HConstant* HGraph::GetConstantMinus1() {
- return GetConstant(&constant_minus1_, -1);
-}
-
-
-HConstant* HGraph::GetConstantBool(bool value) {
- return value ? GetConstantTrue() : GetConstantFalse();
-}
-
-#define DEFINE_GET_CONSTANT(Name, name, constant, type, htype, boolean_value, \
- undetectable) \
- HConstant* HGraph::GetConstant##Name() { \
- if (!constant_##name##_.is_set()) { \
- HConstant* constant = new (zone()) HConstant( \
- Unique<Object>::CreateImmovable(isolate()->factory()->constant()), \
- Unique<Map>::CreateImmovable(isolate()->factory()->type##_map()), \
- false, Representation::Tagged(), htype, true, boolean_value, \
- undetectable, ODDBALL_TYPE); \
- constant->InsertAfter(entry_block()->first()); \
- constant_##name##_.set(constant); \
- } \
- return ReinsertConstantIfNecessary(constant_##name##_.get()); \
- }
-
-DEFINE_GET_CONSTANT(Undefined, undefined, undefined_value, undefined,
- HType::Undefined(), false, true)
-DEFINE_GET_CONSTANT(True, true, true_value, boolean, HType::Boolean(), true,
- false)
-DEFINE_GET_CONSTANT(False, false, false_value, boolean, HType::Boolean(), false,
- false)
-DEFINE_GET_CONSTANT(Hole, the_hole, the_hole_value, the_hole, HType::None(),
- false, false)
-DEFINE_GET_CONSTANT(Null, null, null_value, null, HType::Null(), false, true)
-DEFINE_GET_CONSTANT(OptimizedOut, optimized_out, optimized_out, optimized_out,
- HType::None(), false, false)
-
-#undef DEFINE_GET_CONSTANT
-
-#define DEFINE_IS_CONSTANT(Name, name) \
-bool HGraph::IsConstant##Name(HConstant* constant) { \
- return constant_##name##_.is_set() && constant == constant_##name##_.get(); \
-}
-DEFINE_IS_CONSTANT(Undefined, undefined)
-DEFINE_IS_CONSTANT(0, 0)
-DEFINE_IS_CONSTANT(1, 1)
-DEFINE_IS_CONSTANT(Minus1, minus1)
-DEFINE_IS_CONSTANT(True, true)
-DEFINE_IS_CONSTANT(False, false)
-DEFINE_IS_CONSTANT(Hole, the_hole)
-DEFINE_IS_CONSTANT(Null, null)
-
-#undef DEFINE_IS_CONSTANT
-
-
-HConstant* HGraph::GetInvalidContext() {
- return GetConstant(&constant_invalid_context_, 0xFFFFC0C7);
-}
-
-
-bool HGraph::IsStandardConstant(HConstant* constant) {
- if (IsConstantUndefined(constant)) return true;
- if (IsConstant0(constant)) return true;
- if (IsConstant1(constant)) return true;
- if (IsConstantMinus1(constant)) return true;
- if (IsConstantTrue(constant)) return true;
- if (IsConstantFalse(constant)) return true;
- if (IsConstantHole(constant)) return true;
- if (IsConstantNull(constant)) return true;
- return false;
-}
-
-
-HGraphBuilder::IfBuilder::IfBuilder() : builder_(NULL), needs_compare_(true) {}
-
-
-HGraphBuilder::IfBuilder::IfBuilder(HGraphBuilder* builder)
- : needs_compare_(true) {
- Initialize(builder);
-}
-
-
-HGraphBuilder::IfBuilder::IfBuilder(HGraphBuilder* builder,
- HIfContinuation* continuation)
- : needs_compare_(false), first_true_block_(NULL), first_false_block_(NULL) {
- InitializeDontCreateBlocks(builder);
- continuation->Continue(&first_true_block_, &first_false_block_);
-}
-
-
-void HGraphBuilder::IfBuilder::InitializeDontCreateBlocks(
- HGraphBuilder* builder) {
- builder_ = builder;
- finished_ = false;
- did_then_ = false;
- did_else_ = false;
- did_else_if_ = false;
- did_and_ = false;
- did_or_ = false;
- captured_ = false;
- pending_merge_block_ = false;
- split_edge_merge_block_ = NULL;
- merge_at_join_blocks_ = NULL;
- normal_merge_at_join_block_count_ = 0;
- deopt_merge_at_join_block_count_ = 0;
-}
-
-
-void HGraphBuilder::IfBuilder::Initialize(HGraphBuilder* builder) {
- InitializeDontCreateBlocks(builder);
- HEnvironment* env = builder->environment();
- first_true_block_ = builder->CreateBasicBlock(env->Copy());
- first_false_block_ = builder->CreateBasicBlock(env->Copy());
-}
-
-
-HControlInstruction* HGraphBuilder::IfBuilder::AddCompare(
- HControlInstruction* compare) {
- DCHECK(did_then_ == did_else_);
- if (did_else_) {
- // Handle if-then-elseif
- did_else_if_ = true;
- did_else_ = false;
- did_then_ = false;
- did_and_ = false;
- did_or_ = false;
- pending_merge_block_ = false;
- split_edge_merge_block_ = NULL;
- HEnvironment* env = builder()->environment();
- first_true_block_ = builder()->CreateBasicBlock(env->Copy());
- first_false_block_ = builder()->CreateBasicBlock(env->Copy());
- }
- if (split_edge_merge_block_ != NULL) {
- HEnvironment* env = first_false_block_->last_environment();
- HBasicBlock* split_edge = builder()->CreateBasicBlock(env->Copy());
- if (did_or_) {
- compare->SetSuccessorAt(0, split_edge);
- compare->SetSuccessorAt(1, first_false_block_);
- } else {
- compare->SetSuccessorAt(0, first_true_block_);
- compare->SetSuccessorAt(1, split_edge);
- }
- builder()->GotoNoSimulate(split_edge, split_edge_merge_block_);
- } else {
- compare->SetSuccessorAt(0, first_true_block_);
- compare->SetSuccessorAt(1, first_false_block_);
- }
- builder()->FinishCurrentBlock(compare);
- needs_compare_ = false;
- return compare;
-}
-
-
-void HGraphBuilder::IfBuilder::Or() {
- DCHECK(!needs_compare_);
- DCHECK(!did_and_);
- did_or_ = true;
- HEnvironment* env = first_false_block_->last_environment();
- if (split_edge_merge_block_ == NULL) {
- split_edge_merge_block_ = builder()->CreateBasicBlock(env->Copy());
- builder()->GotoNoSimulate(first_true_block_, split_edge_merge_block_);
- first_true_block_ = split_edge_merge_block_;
- }
- builder()->set_current_block(first_false_block_);
- first_false_block_ = builder()->CreateBasicBlock(env->Copy());
-}
-
-
-void HGraphBuilder::IfBuilder::And() {
- DCHECK(!needs_compare_);
- DCHECK(!did_or_);
- did_and_ = true;
- HEnvironment* env = first_false_block_->last_environment();
- if (split_edge_merge_block_ == NULL) {
- split_edge_merge_block_ = builder()->CreateBasicBlock(env->Copy());
- builder()->GotoNoSimulate(first_false_block_, split_edge_merge_block_);
- first_false_block_ = split_edge_merge_block_;
- }
- builder()->set_current_block(first_true_block_);
- first_true_block_ = builder()->CreateBasicBlock(env->Copy());
-}
-
-
-void HGraphBuilder::IfBuilder::CaptureContinuation(
- HIfContinuation* continuation) {
- DCHECK(!did_else_if_);
- DCHECK(!finished_);
- DCHECK(!captured_);
-
- HBasicBlock* true_block = NULL;
- HBasicBlock* false_block = NULL;
- Finish(&true_block, &false_block);
- DCHECK(true_block != NULL);
- DCHECK(false_block != NULL);
- continuation->Capture(true_block, false_block);
- captured_ = true;
- builder()->set_current_block(NULL);
- End();
-}
-
-
-void HGraphBuilder::IfBuilder::JoinContinuation(HIfContinuation* continuation) {
- DCHECK(!did_else_if_);
- DCHECK(!finished_);
- DCHECK(!captured_);
- HBasicBlock* true_block = NULL;
- HBasicBlock* false_block = NULL;
- Finish(&true_block, &false_block);
- merge_at_join_blocks_ = NULL;
- if (true_block != NULL && !true_block->IsFinished()) {
- DCHECK(continuation->IsTrueReachable());
- builder()->GotoNoSimulate(true_block, continuation->true_branch());
- }
- if (false_block != NULL && !false_block->IsFinished()) {
- DCHECK(continuation->IsFalseReachable());
- builder()->GotoNoSimulate(false_block, continuation->false_branch());
- }
- captured_ = true;
- End();
-}
-
-
-void HGraphBuilder::IfBuilder::Then() {
- DCHECK(!captured_);
- DCHECK(!finished_);
- did_then_ = true;
- if (needs_compare_) {
- // Handle if's without any expressions, they jump directly to the "else"
- // branch. However, we must pretend that the "then" branch is reachable,
- // so that the graph builder visits it and sees any live range extending
- // constructs within it.
- HConstant* constant_false = builder()->graph()->GetConstantFalse();
- ToBooleanHints boolean_type = ToBooleanHint::kBoolean;
- HBranch* branch = builder()->New<HBranch>(
- constant_false, boolean_type, first_true_block_, first_false_block_);
- builder()->FinishCurrentBlock(branch);
- }
- builder()->set_current_block(first_true_block_);
- pending_merge_block_ = true;
-}
-
-
-void HGraphBuilder::IfBuilder::Else() {
- DCHECK(did_then_);
- DCHECK(!captured_);
- DCHECK(!finished_);
- AddMergeAtJoinBlock(false);
- builder()->set_current_block(first_false_block_);
- pending_merge_block_ = true;
- did_else_ = true;
-}
-
-void HGraphBuilder::IfBuilder::Deopt(DeoptimizeReason reason) {
- DCHECK(did_then_);
- builder()->Add<HDeoptimize>(reason, Deoptimizer::EAGER);
- AddMergeAtJoinBlock(true);
-}
-
-
-void HGraphBuilder::IfBuilder::Return(HValue* value) {
- HValue* parameter_count = builder()->graph()->GetConstantMinus1();
- builder()->FinishExitCurrentBlock(
- builder()->New<HReturn>(value, parameter_count));
- AddMergeAtJoinBlock(false);
-}
-
-
-void HGraphBuilder::IfBuilder::AddMergeAtJoinBlock(bool deopt) {
- if (!pending_merge_block_) return;
- HBasicBlock* block = builder()->current_block();
- DCHECK(block == NULL || !block->IsFinished());
- MergeAtJoinBlock* record = new (builder()->zone())
- MergeAtJoinBlock(block, deopt, merge_at_join_blocks_);
- merge_at_join_blocks_ = record;
- if (block != NULL) {
- DCHECK(block->end() == NULL);
- if (deopt) {
- normal_merge_at_join_block_count_++;
- } else {
- deopt_merge_at_join_block_count_++;
- }
- }
- builder()->set_current_block(NULL);
- pending_merge_block_ = false;
-}
-
-
-void HGraphBuilder::IfBuilder::Finish() {
- DCHECK(!finished_);
- if (!did_then_) {
- Then();
- }
- AddMergeAtJoinBlock(false);
- if (!did_else_) {
- Else();
- AddMergeAtJoinBlock(false);
- }
- finished_ = true;
-}
-
-
-void HGraphBuilder::IfBuilder::Finish(HBasicBlock** then_continuation,
- HBasicBlock** else_continuation) {
- Finish();
-
- MergeAtJoinBlock* else_record = merge_at_join_blocks_;
- if (else_continuation != NULL) {
- *else_continuation = else_record->block_;
- }
- MergeAtJoinBlock* then_record = else_record->next_;
- if (then_continuation != NULL) {
- *then_continuation = then_record->block_;
- }
- DCHECK(then_record->next_ == NULL);
-}
-
-
-void HGraphBuilder::IfBuilder::EndUnreachable() {
- if (captured_) return;
- Finish();
- builder()->set_current_block(nullptr);
-}
-
-
-void HGraphBuilder::IfBuilder::End() {
- if (captured_) return;
- Finish();
-
- int total_merged_blocks = normal_merge_at_join_block_count_ +
- deopt_merge_at_join_block_count_;
- DCHECK(total_merged_blocks >= 1);
- HBasicBlock* merge_block =
- total_merged_blocks == 1 ? NULL : builder()->graph()->CreateBasicBlock();
-
- // Merge non-deopt blocks first to ensure environment has right size for
- // padding.
- MergeAtJoinBlock* current = merge_at_join_blocks_;
- while (current != NULL) {
- if (!current->deopt_ && current->block_ != NULL) {
- // If there is only one block that makes it through to the end of the
- // if, then just set it as the current block and continue rather then
- // creating an unnecessary merge block.
- if (total_merged_blocks == 1) {
- builder()->set_current_block(current->block_);
- return;
- }
- builder()->GotoNoSimulate(current->block_, merge_block);
- }
- current = current->next_;
- }
-
- // Merge deopt blocks, padding when necessary.
- current = merge_at_join_blocks_;
- while (current != NULL) {
- if (current->deopt_ && current->block_ != NULL) {
- current->block_->FinishExit(
- HAbnormalExit::New(builder()->isolate(), builder()->zone(), NULL),
- SourcePosition::Unknown());
- }
- current = current->next_;
- }
- builder()->set_current_block(merge_block);
-}
-
-
-HGraphBuilder::LoopBuilder::LoopBuilder(HGraphBuilder* builder) {
- Initialize(builder, NULL, kWhileTrue, NULL);
-}
-
-
-HGraphBuilder::LoopBuilder::LoopBuilder(HGraphBuilder* builder, HValue* context,
- LoopBuilder::Direction direction) {
- Initialize(builder, context, direction, builder->graph()->GetConstant1());
-}
-
-
-HGraphBuilder::LoopBuilder::LoopBuilder(HGraphBuilder* builder, HValue* context,
- LoopBuilder::Direction direction,
- HValue* increment_amount) {
- Initialize(builder, context, direction, increment_amount);
- increment_amount_ = increment_amount;
-}
-
-
-void HGraphBuilder::LoopBuilder::Initialize(HGraphBuilder* builder,
- HValue* context,
- Direction direction,
- HValue* increment_amount) {
- builder_ = builder;
- context_ = context;
- direction_ = direction;
- increment_amount_ = increment_amount;
-
- finished_ = false;
- header_block_ = builder->CreateLoopHeaderBlock();
- body_block_ = NULL;
- exit_block_ = NULL;
- exit_trampoline_block_ = NULL;
-}
-
-
-HValue* HGraphBuilder::LoopBuilder::BeginBody(
- HValue* initial,
- HValue* terminating,
- Token::Value token) {
- DCHECK(direction_ != kWhileTrue);
- HEnvironment* env = builder_->environment();
- phi_ = header_block_->AddNewPhi(env->values()->length());
- phi_->AddInput(initial);
- env->Push(initial);
- builder_->GotoNoSimulate(header_block_);
-
- HEnvironment* body_env = env->Copy();
- HEnvironment* exit_env = env->Copy();
- // Remove the phi from the expression stack
- body_env->Pop();
- exit_env->Pop();
- body_block_ = builder_->CreateBasicBlock(body_env);
- exit_block_ = builder_->CreateBasicBlock(exit_env);
-
- builder_->set_current_block(header_block_);
- env->Pop();
- builder_->FinishCurrentBlock(builder_->New<HCompareNumericAndBranch>(
- phi_, terminating, token, body_block_, exit_block_));
-
- builder_->set_current_block(body_block_);
- if (direction_ == kPreIncrement || direction_ == kPreDecrement) {
- Isolate* isolate = builder_->isolate();
- HValue* one = builder_->graph()->GetConstant1();
- if (direction_ == kPreIncrement) {
- increment_ = HAdd::New(isolate, zone(), context_, phi_, one);
- } else {
- increment_ = HSub::New(isolate, zone(), context_, phi_, one);
- }
- increment_->ClearFlag(HValue::kCanOverflow);
- builder_->AddInstruction(increment_);
- return increment_;
- } else {
- return phi_;
- }
-}
-
-
-void HGraphBuilder::LoopBuilder::BeginBody(int drop_count) {
- DCHECK(direction_ == kWhileTrue);
- HEnvironment* env = builder_->environment();
- builder_->GotoNoSimulate(header_block_);
- builder_->set_current_block(header_block_);
- env->Drop(drop_count);
-}
-
-
-void HGraphBuilder::LoopBuilder::Break() {
- if (exit_trampoline_block_ == NULL) {
- // Its the first time we saw a break.
- if (direction_ == kWhileTrue) {
- HEnvironment* env = builder_->environment()->Copy();
- exit_trampoline_block_ = builder_->CreateBasicBlock(env);
- } else {
- HEnvironment* env = exit_block_->last_environment()->Copy();
- exit_trampoline_block_ = builder_->CreateBasicBlock(env);
- builder_->GotoNoSimulate(exit_block_, exit_trampoline_block_);
- }
- }
-
- builder_->GotoNoSimulate(exit_trampoline_block_);
- builder_->set_current_block(NULL);
-}
-
-
-void HGraphBuilder::LoopBuilder::EndBody() {
- DCHECK(!finished_);
-
- if (direction_ == kPostIncrement || direction_ == kPostDecrement) {
- Isolate* isolate = builder_->isolate();
- if (direction_ == kPostIncrement) {
- increment_ =
- HAdd::New(isolate, zone(), context_, phi_, increment_amount_);
- } else {
- increment_ =
- HSub::New(isolate, zone(), context_, phi_, increment_amount_);
- }
- increment_->ClearFlag(HValue::kCanOverflow);
- builder_->AddInstruction(increment_);
- }
-
- if (direction_ != kWhileTrue) {
- // Push the new increment value on the expression stack to merge into
- // the phi.
- builder_->environment()->Push(increment_);
- }
- HBasicBlock* last_block = builder_->current_block();
- builder_->GotoNoSimulate(last_block, header_block_);
- header_block_->loop_information()->RegisterBackEdge(last_block);
-
- if (exit_trampoline_block_ != NULL) {
- builder_->set_current_block(exit_trampoline_block_);
- } else {
- builder_->set_current_block(exit_block_);
- }
- finished_ = true;
-}
-
-
-HGraph* HGraphBuilder::CreateGraph() {
- DCHECK(!FLAG_minimal);
- graph_ = new (zone()) HGraph(info_, descriptor_);
- if (FLAG_hydrogen_stats) isolate()->GetHStatistics()->Initialize(info_);
- CompilationPhase phase("H_Block building", info_);
- set_current_block(graph()->entry_block());
- if (!BuildGraph()) return NULL;
- graph()->FinalizeUniqueness();
- return graph_;
-}
-
-
-HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
- DCHECK(current_block() != NULL);
- DCHECK(!FLAG_hydrogen_track_positions || position_.IsKnown() ||
- !info_->IsOptimizing());
- current_block()->AddInstruction(instr, source_position());
- if (graph()->IsInsideNoSideEffectsScope()) {
- instr->SetFlag(HValue::kHasNoObservableSideEffects);
- }
- return instr;
-}
-
-
-void HGraphBuilder::FinishCurrentBlock(HControlInstruction* last) {
- DCHECK(!FLAG_hydrogen_track_positions || !info_->IsOptimizing() ||
- position_.IsKnown());
- current_block()->Finish(last, source_position());
- if (last->IsReturn() || last->IsAbnormalExit()) {
- set_current_block(NULL);
- }
-}
-
-
-void HGraphBuilder::FinishExitCurrentBlock(HControlInstruction* instruction) {
- DCHECK(!FLAG_hydrogen_track_positions || !info_->IsOptimizing() ||
- position_.IsKnown());
- current_block()->FinishExit(instruction, source_position());
- if (instruction->IsReturn() || instruction->IsAbnormalExit()) {
- set_current_block(NULL);
- }
-}
-
-
-void HGraphBuilder::AddIncrementCounter(StatsCounter* counter) {
- if (FLAG_native_code_counters && counter->Enabled()) {
- HValue* reference = Add<HConstant>(ExternalReference(counter));
- HValue* old_value =
- Add<HLoadNamedField>(reference, nullptr, HObjectAccess::ForCounter());
- HValue* new_value = AddUncasted<HAdd>(old_value, graph()->GetConstant1());
- new_value->ClearFlag(HValue::kCanOverflow); // Ignore counter overflow
- Add<HStoreNamedField>(reference, HObjectAccess::ForCounter(),
- new_value, STORE_TO_INITIALIZED_ENTRY);
- }
-}
-
-
-void HGraphBuilder::AddSimulate(BailoutId id,
- RemovableSimulate removable) {
- DCHECK(current_block() != NULL);
- DCHECK(!graph()->IsInsideNoSideEffectsScope());
- current_block()->AddNewSimulate(id, source_position(), removable);
-}
-
-
-HBasicBlock* HGraphBuilder::CreateBasicBlock(HEnvironment* env) {
- HBasicBlock* b = graph()->CreateBasicBlock();
- b->SetInitialEnvironment(env);
- return b;
-}
-
-
-HBasicBlock* HGraphBuilder::CreateLoopHeaderBlock() {
- HBasicBlock* header = graph()->CreateBasicBlock();
- HEnvironment* entry_env = environment()->CopyAsLoopHeader(header);
- header->SetInitialEnvironment(entry_env);
- header->AttachLoopInformation();
- return header;
-}
-
-
-HValue* HGraphBuilder::BuildGetElementsKind(HValue* object) {
- HValue* map = Add<HLoadNamedField>(object, nullptr, HObjectAccess::ForMap());
-
- HValue* bit_field2 =
- Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField2());
- return BuildDecodeField<Map::ElementsKindBits>(bit_field2);
-}
-
-
-HValue* HGraphBuilder::BuildEnumLength(HValue* map) {
- NoObservableSideEffectsScope scope(this);
- HValue* bit_field3 =
- Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField3());
- return BuildDecodeField<Map::EnumLengthBits>(bit_field3);
-}
-
-
-HValue* HGraphBuilder::BuildCheckHeapObject(HValue* obj) {
- if (obj->type().IsHeapObject()) return obj;
- return Add<HCheckHeapObject>(obj);
-}
-
-void HGraphBuilder::FinishExitWithHardDeoptimization(DeoptimizeReason reason) {
- Add<HDeoptimize>(reason, Deoptimizer::EAGER);
- FinishExitCurrentBlock(New<HAbnormalExit>());
-}
-
-
-HValue* HGraphBuilder::BuildCheckString(HValue* string) {
- if (!string->type().IsString()) {
- DCHECK(!string->IsConstant() ||
- !HConstant::cast(string)->HasStringValue());
- BuildCheckHeapObject(string);
- return Add<HCheckInstanceType>(string, HCheckInstanceType::IS_STRING);
- }
- return string;
-}
-
-HValue* HGraphBuilder::BuildWrapReceiver(HValue* object, HValue* checked) {
- if (object->type().IsJSObject()) return object;
- HValue* function = checked->ActualValue();
- if (function->IsConstant() &&
- HConstant::cast(function)->handle(isolate())->IsJSFunction()) {
- Handle<JSFunction> f = Handle<JSFunction>::cast(
- HConstant::cast(function)->handle(isolate()));
- SharedFunctionInfo* shared = f->shared();
- if (is_strict(shared->language_mode()) || shared->native()) return object;
- }
- return Add<HWrapReceiver>(object, checked);
-}
-
-
-HValue* HGraphBuilder::BuildCheckAndGrowElementsCapacity(
- HValue* object, HValue* elements, ElementsKind kind, HValue* length,
- HValue* capacity, HValue* key) {
- HValue* max_gap = Add<HConstant>(static_cast<int32_t>(JSObject::kMaxGap));
- HValue* max_capacity = AddUncasted<HAdd>(capacity, max_gap);
- Add<HBoundsCheck>(key, max_capacity);
-
- HValue* new_capacity = BuildNewElementsCapacity(key);
- HValue* new_elements = BuildGrowElementsCapacity(object, elements, kind, kind,
- length, new_capacity);
- return new_elements;
-}
-
-
-HValue* HGraphBuilder::BuildCheckForCapacityGrow(
- HValue* object,
- HValue* elements,
- ElementsKind kind,
- HValue* length,
- HValue* key,
- bool is_js_array,
- PropertyAccessType access_type) {
- IfBuilder length_checker(this);
-
- Token::Value token = IsHoleyElementsKind(kind) ? Token::GTE : Token::EQ;
- length_checker.If<HCompareNumericAndBranch>(key, length, token);
-
- length_checker.Then();
-
- HValue* current_capacity = AddLoadFixedArrayLength(elements);
-
- if (top_info()->IsStub()) {
- IfBuilder capacity_checker(this);
- capacity_checker.If<HCompareNumericAndBranch>(key, current_capacity,
- Token::GTE);
- capacity_checker.Then();
- HValue* new_elements = BuildCheckAndGrowElementsCapacity(
- object, elements, kind, length, current_capacity, key);
- environment()->Push(new_elements);
- capacity_checker.Else();
- environment()->Push(elements);
- capacity_checker.End();
- } else {
- HValue* result = Add<HMaybeGrowElements>(
- object, elements, key, current_capacity, is_js_array, kind);
- environment()->Push(result);
- }
-
- if (is_js_array) {
- HValue* new_length = AddUncasted<HAdd>(key, graph_->GetConstant1());
- new_length->ClearFlag(HValue::kCanOverflow);
-
- Add<HStoreNamedField>(object, HObjectAccess::ForArrayLength(kind),
- new_length);
- }
-
- if (access_type == STORE && kind == FAST_SMI_ELEMENTS) {
- HValue* checked_elements = environment()->Top();
-
- // Write zero to ensure that the new element is initialized with some smi.
- Add<HStoreKeyed>(checked_elements, key, graph()->GetConstant0(), nullptr,
- kind);
- }
-
- length_checker.Else();
- Add<HBoundsCheck>(key, length);
-
- environment()->Push(elements);
- length_checker.End();
-
- return environment()->Pop();
-}
-
-
-HValue* HGraphBuilder::BuildCopyElementsOnWrite(HValue* object,
- HValue* elements,
- ElementsKind kind,
- HValue* length) {
- Factory* factory = isolate()->factory();
-
- IfBuilder cow_checker(this);
-
- cow_checker.If<HCompareMap>(elements, factory->fixed_cow_array_map());
- cow_checker.Then();
-
- HValue* capacity = AddLoadFixedArrayLength(elements);
-
- HValue* new_elements = BuildGrowElementsCapacity(object, elements, kind,
- kind, length, capacity);
-
- environment()->Push(new_elements);
-
- cow_checker.Else();
-
- environment()->Push(elements);
-
- cow_checker.End();
-
- return environment()->Pop();
-}
-
-HValue* HGraphBuilder::BuildCreateIterResultObject(HValue* value,
- HValue* done) {
- NoObservableSideEffectsScope scope(this);
-
- // Allocate the JSIteratorResult object.
- HValue* result =
- Add<HAllocate>(Add<HConstant>(JSIteratorResult::kSize), HType::JSObject(),
- NOT_TENURED, JS_OBJECT_TYPE, graph()->GetConstant0());
-
- // Initialize the JSIteratorResult object.
- HValue* native_context = BuildGetNativeContext();
- HValue* map = Add<HLoadNamedField>(
- native_context, nullptr,
- HObjectAccess::ForContextSlot(Context::ITERATOR_RESULT_MAP_INDEX));
- Add<HStoreNamedField>(result, HObjectAccess::ForMap(), map);
- HValue* empty_fixed_array = Add<HLoadRoot>(Heap::kEmptyFixedArrayRootIndex);
- Add<HStoreNamedField>(result, HObjectAccess::ForPropertiesPointer(),
- empty_fixed_array);
- Add<HStoreNamedField>(result, HObjectAccess::ForElementsPointer(),
- empty_fixed_array);
- Add<HStoreNamedField>(result, HObjectAccess::ForObservableJSObjectOffset(
- JSIteratorResult::kValueOffset),
- value);
- Add<HStoreNamedField>(result, HObjectAccess::ForObservableJSObjectOffset(
- JSIteratorResult::kDoneOffset),
- done);
- STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
- return result;
-}
-
-
-HValue* HGraphBuilder::BuildNumberToString(HValue* object, AstType* type) {
- NoObservableSideEffectsScope scope(this);
-
- // Convert constant numbers at compile time.
- if (object->IsConstant() && HConstant::cast(object)->HasNumberValue()) {
- Handle<Object> number = HConstant::cast(object)->handle(isolate());
- Handle<String> result = isolate()->factory()->NumberToString(number);
- return Add<HConstant>(result);
- }
-
- // Create a joinable continuation.
- HIfContinuation found(graph()->CreateBasicBlock(),
- graph()->CreateBasicBlock());
-
- // Load the number string cache.
- HValue* number_string_cache =
- Add<HLoadRoot>(Heap::kNumberStringCacheRootIndex);
-
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- HValue* mask = AddLoadFixedArrayLength(number_string_cache);
- mask->set_type(HType::Smi());
- mask = AddUncasted<HSar>(mask, graph()->GetConstant1());
- mask = AddUncasted<HSub>(mask, graph()->GetConstant1());
-
- // Check whether object is a smi.
- IfBuilder if_objectissmi(this);
- if_objectissmi.If<HIsSmiAndBranch>(object);
- if_objectissmi.Then();
- {
- // Compute hash for smi similar to smi_get_hash().
- HValue* hash = AddUncasted<HBitwise>(Token::BIT_AND, object, mask);
-
- // Load the key.
- HValue* key_index = AddUncasted<HShl>(hash, graph()->GetConstant1());
- HValue* key = Add<HLoadKeyed>(number_string_cache, key_index, nullptr,
- nullptr, FAST_ELEMENTS, ALLOW_RETURN_HOLE);
-
- // Check if object == key.
- IfBuilder if_objectiskey(this);
- if_objectiskey.If<HCompareObjectEqAndBranch>(object, key);
- if_objectiskey.Then();
- {
- // Make the key_index available.
- Push(key_index);
- }
- if_objectiskey.JoinContinuation(&found);
- }
- if_objectissmi.Else();
- {
- if (type->Is(AstType::SignedSmall())) {
- if_objectissmi.Deopt(DeoptimizeReason::kExpectedSmi);
- } else {
- // Check if the object is a heap number.
- IfBuilder if_objectisnumber(this);
- HValue* objectisnumber = if_objectisnumber.If<HCompareMap>(
- object, isolate()->factory()->heap_number_map());
- if_objectisnumber.Then();
- {
- // Compute hash for heap number similar to double_get_hash().
- HValue* low = Add<HLoadNamedField>(
- object, objectisnumber,
- HObjectAccess::ForHeapNumberValueLowestBits());
- HValue* high = Add<HLoadNamedField>(
- object, objectisnumber,
- HObjectAccess::ForHeapNumberValueHighestBits());
- HValue* hash = AddUncasted<HBitwise>(Token::BIT_XOR, low, high);
- hash = AddUncasted<HBitwise>(Token::BIT_AND, hash, mask);
-
- // Load the key.
- HValue* key_index = AddUncasted<HShl>(hash, graph()->GetConstant1());
- HValue* key =
- Add<HLoadKeyed>(number_string_cache, key_index, nullptr, nullptr,
- FAST_ELEMENTS, ALLOW_RETURN_HOLE);
-
- // Check if the key is a heap number and compare it with the object.
- IfBuilder if_keyisnotsmi(this);
- HValue* keyisnotsmi = if_keyisnotsmi.IfNot<HIsSmiAndBranch>(key);
- if_keyisnotsmi.Then();
- {
- IfBuilder if_keyisheapnumber(this);
- if_keyisheapnumber.If<HCompareMap>(
- key, isolate()->factory()->heap_number_map());
- if_keyisheapnumber.Then();
- {
- // Check if values of key and object match.
- IfBuilder if_keyeqobject(this);
- if_keyeqobject.If<HCompareNumericAndBranch>(
- Add<HLoadNamedField>(key, keyisnotsmi,
- HObjectAccess::ForHeapNumberValue()),
- Add<HLoadNamedField>(object, objectisnumber,
- HObjectAccess::ForHeapNumberValue()),
- Token::EQ);
- if_keyeqobject.Then();
- {
- // Make the key_index available.
- Push(key_index);
- }
- if_keyeqobject.JoinContinuation(&found);
- }
- if_keyisheapnumber.JoinContinuation(&found);
- }
- if_keyisnotsmi.JoinContinuation(&found);
- }
- if_objectisnumber.Else();
- {
- if (type->Is(AstType::Number())) {
- if_objectisnumber.Deopt(DeoptimizeReason::kExpectedHeapNumber);
- }
- }
- if_objectisnumber.JoinContinuation(&found);
- }
- }
- if_objectissmi.JoinContinuation(&found);
-
- // Check for cache hit.
- IfBuilder if_found(this, &found);
- if_found.Then();
- {
- // Count number to string operation in native code.
- AddIncrementCounter(isolate()->counters()->number_to_string_native());
-
- // Load the value in case of cache hit.
- HValue* key_index = Pop();
- HValue* value_index = AddUncasted<HAdd>(key_index, graph()->GetConstant1());
- Push(Add<HLoadKeyed>(number_string_cache, value_index, nullptr, nullptr,
- FAST_ELEMENTS, ALLOW_RETURN_HOLE));
- }
- if_found.Else();
- {
- // Cache miss, fallback to runtime.
- Add<HPushArguments>(object);
- Push(Add<HCallRuntime>(
- Runtime::FunctionForId(Runtime::kNumberToStringSkipCache),
- 1));
- }
- if_found.End();
-
- return Pop();
-}
-
-HValue* HGraphBuilder::BuildToNumber(HValue* input) {
- if (input->type().IsTaggedNumber() ||
- input->representation().IsSpecialization()) {
- return input;
- }
- Callable callable = CodeFactory::ToNumber(isolate());
- HValue* stub = Add<HConstant>(callable.code());
- HValue* values[] = {input};
- HCallWithDescriptor* instr = Add<HCallWithDescriptor>(
- stub, 0, callable.descriptor(), ArrayVector(values));
- instr->set_type(HType::TaggedNumber());
- return instr;
-}
-
-
-HValue* HGraphBuilder::BuildToObject(HValue* receiver) {
- NoObservableSideEffectsScope scope(this);
-
- // Create a joinable continuation.
- HIfContinuation wrap(graph()->CreateBasicBlock(),
- graph()->CreateBasicBlock());
-
- // Determine the proper global constructor function required to wrap
- // {receiver} into a JSValue, unless {receiver} is already a {JSReceiver}, in
- // which case we just return it. Deopts to Runtime::kToObject if {receiver}
- // is undefined or null.
- IfBuilder receiver_is_smi(this);
- receiver_is_smi.If<HIsSmiAndBranch>(receiver);
- receiver_is_smi.Then();
- {
- // Use global Number function.
- Push(Add<HConstant>(Context::NUMBER_FUNCTION_INDEX));
- }
- receiver_is_smi.Else();
- {
- // Determine {receiver} map and instance type.
- HValue* receiver_map =
- Add<HLoadNamedField>(receiver, nullptr, HObjectAccess::ForMap());
- HValue* receiver_instance_type = Add<HLoadNamedField>(
- receiver_map, nullptr, HObjectAccess::ForMapInstanceType());
-
- // First check whether {receiver} is already a spec object (fast case).
- IfBuilder receiver_is_not_spec_object(this);
- receiver_is_not_spec_object.If<HCompareNumericAndBranch>(
- receiver_instance_type, Add<HConstant>(FIRST_JS_RECEIVER_TYPE),
- Token::LT);
- receiver_is_not_spec_object.Then();
- {
- // Load the constructor function index from the {receiver} map.
- HValue* constructor_function_index = Add<HLoadNamedField>(
- receiver_map, nullptr,
- HObjectAccess::ForMapInObjectPropertiesOrConstructorFunctionIndex());
-
- // Check if {receiver} has a constructor (null and undefined have no
- // constructors, so we deoptimize to the runtime to throw an exception).
- IfBuilder constructor_function_index_is_invalid(this);
- constructor_function_index_is_invalid.If<HCompareNumericAndBranch>(
- constructor_function_index,
- Add<HConstant>(Map::kNoConstructorFunctionIndex), Token::EQ);
- constructor_function_index_is_invalid.ThenDeopt(
- DeoptimizeReason::kUndefinedOrNullInToObject);
- constructor_function_index_is_invalid.End();
-
- // Use the global constructor function.
- Push(constructor_function_index);
- }
- receiver_is_not_spec_object.JoinContinuation(&wrap);
- }
- receiver_is_smi.JoinContinuation(&wrap);
-
- // Wrap the receiver if necessary.
- IfBuilder if_wrap(this, &wrap);
- if_wrap.Then();
- {
- // Grab the constructor function index.
- HValue* constructor_index = Pop();
-
- // Load native context.
- HValue* native_context = BuildGetNativeContext();
-
- // Determine the initial map for the global constructor.
- HValue* constructor = Add<HLoadKeyed>(native_context, constructor_index,
- nullptr, nullptr, FAST_ELEMENTS);
- HValue* constructor_initial_map = Add<HLoadNamedField>(
- constructor, nullptr, HObjectAccess::ForPrototypeOrInitialMap());
- // Allocate and initialize a JSValue wrapper.
- HValue* value =
- BuildAllocate(Add<HConstant>(JSValue::kSize), HType::JSObject(),
- JS_VALUE_TYPE, HAllocationMode());
- Add<HStoreNamedField>(value, HObjectAccess::ForMap(),
- constructor_initial_map);
- HValue* empty_fixed_array = Add<HLoadRoot>(Heap::kEmptyFixedArrayRootIndex);
- Add<HStoreNamedField>(value, HObjectAccess::ForPropertiesPointer(),
- empty_fixed_array);
- Add<HStoreNamedField>(value, HObjectAccess::ForElementsPointer(),
- empty_fixed_array);
- Add<HStoreNamedField>(value, HObjectAccess::ForObservableJSObjectOffset(
- JSValue::kValueOffset),
- receiver);
- Push(value);
- }
- if_wrap.Else();
- { Push(receiver); }
- if_wrap.End();
- return Pop();
-}
-
-
-HAllocate* HGraphBuilder::BuildAllocate(
- HValue* object_size,
- HType type,
- InstanceType instance_type,
- HAllocationMode allocation_mode) {
- // Compute the effective allocation size.
- HValue* size = object_size;
- if (allocation_mode.CreateAllocationMementos()) {
- size = AddUncasted<HAdd>(size, Add<HConstant>(AllocationMemento::kSize));
- size->ClearFlag(HValue::kCanOverflow);
- }
-
- // Perform the actual allocation.
- HAllocate* object = Add<HAllocate>(
- size, type, allocation_mode.GetPretenureMode(), instance_type,
- graph()->GetConstant0(), allocation_mode.feedback_site());
-
- // Setup the allocation memento.
- if (allocation_mode.CreateAllocationMementos()) {
- BuildCreateAllocationMemento(
- object, object_size, allocation_mode.current_site());
- }
-
- return object;
-}
-
-
-HValue* HGraphBuilder::BuildAddStringLengths(HValue* left_length,
- HValue* right_length) {
- // Compute the combined string length and check against max string length.
- HValue* length = AddUncasted<HAdd>(left_length, right_length);
- // Check that length <= kMaxLength <=> length < MaxLength + 1.
- HValue* max_length = Add<HConstant>(String::kMaxLength + 1);
- if (top_info()->IsStub() || !isolate()->IsStringLengthOverflowIntact()) {
- // This is a mitigation for crbug.com/627934; the real fix
- // will be to migrate the StringAddStub to TurboFan one day.
- IfBuilder if_invalid(this);
- if_invalid.If<HCompareNumericAndBranch>(length, max_length, Token::GT);
- if_invalid.Then();
- {
- Add<HCallRuntime>(
- Runtime::FunctionForId(Runtime::kThrowInvalidStringLength), 0);
- }
- if_invalid.End();
- } else {
- graph()->MarkDependsOnStringLengthOverflow();
- Add<HBoundsCheck>(length, max_length);
- }
- return length;
-}
-
-
-HValue* HGraphBuilder::BuildCreateConsString(
- HValue* length,
- HValue* left,
- HValue* right,
- HAllocationMode allocation_mode) {
- // Determine the string instance types.
- HInstruction* left_instance_type = AddLoadStringInstanceType(left);
- HInstruction* right_instance_type = AddLoadStringInstanceType(right);
-
- // Allocate the cons string object. HAllocate does not care whether we
- // pass CONS_STRING_TYPE or CONS_ONE_BYTE_STRING_TYPE here, so we just use
- // CONS_STRING_TYPE here. Below we decide whether the cons string is
- // one-byte or two-byte and set the appropriate map.
- DCHECK(HAllocate::CompatibleInstanceTypes(CONS_STRING_TYPE,
- CONS_ONE_BYTE_STRING_TYPE));
- HAllocate* result = BuildAllocate(Add<HConstant>(ConsString::kSize),
- HType::String(), CONS_STRING_TYPE,
- allocation_mode);
-
- // Compute intersection and difference of instance types.
- HValue* anded_instance_types = AddUncasted<HBitwise>(
- Token::BIT_AND, left_instance_type, right_instance_type);
- HValue* xored_instance_types = AddUncasted<HBitwise>(
- Token::BIT_XOR, left_instance_type, right_instance_type);
-
- // We create a one-byte cons string if
- // 1. both strings are one-byte, or
- // 2. at least one of the strings is two-byte, but happens to contain only
- // one-byte characters.
- // To do this, we check
- // 1. if both strings are one-byte, or if the one-byte data hint is set in
- // both strings, or
- // 2. if one of the strings has the one-byte data hint set and the other
- // string is one-byte.
- IfBuilder if_onebyte(this);
- STATIC_ASSERT(kOneByteStringTag != 0);
- STATIC_ASSERT(kOneByteDataHintMask != 0);
- if_onebyte.If<HCompareNumericAndBranch>(
- AddUncasted<HBitwise>(
- Token::BIT_AND, anded_instance_types,
- Add<HConstant>(static_cast<int32_t>(
- kStringEncodingMask | kOneByteDataHintMask))),
- graph()->GetConstant0(), Token::NE);
- if_onebyte.Or();
- STATIC_ASSERT(kOneByteStringTag != 0 &&
- kOneByteDataHintTag != 0 &&
- kOneByteDataHintTag != kOneByteStringTag);
- if_onebyte.If<HCompareNumericAndBranch>(
- AddUncasted<HBitwise>(
- Token::BIT_AND, xored_instance_types,
- Add<HConstant>(static_cast<int32_t>(
- kOneByteStringTag | kOneByteDataHintTag))),
- Add<HConstant>(static_cast<int32_t>(
- kOneByteStringTag | kOneByteDataHintTag)), Token::EQ);
- if_onebyte.Then();
- {
- // We can safely skip the write barrier for storing the map here.
- Add<HStoreNamedField>(
- result, HObjectAccess::ForMap(),
- Add<HConstant>(isolate()->factory()->cons_one_byte_string_map()));
- }
- if_onebyte.Else();
- {
- // We can safely skip the write barrier for storing the map here.
- Add<HStoreNamedField>(
- result, HObjectAccess::ForMap(),
- Add<HConstant>(isolate()->factory()->cons_string_map()));
- }
- if_onebyte.End();
-
- // Initialize the cons string fields.
- Add<HStoreNamedField>(result, HObjectAccess::ForStringHashField(),
- Add<HConstant>(String::kEmptyHashField));
- Add<HStoreNamedField>(result, HObjectAccess::ForStringLength(), length);
- Add<HStoreNamedField>(result, HObjectAccess::ForConsStringFirst(), left);
- Add<HStoreNamedField>(result, HObjectAccess::ForConsStringSecond(), right);
-
- // Count the native string addition.
- AddIncrementCounter(isolate()->counters()->string_add_native());
-
- return result;
-}
-
-
-void HGraphBuilder::BuildCopySeqStringChars(HValue* src,
- HValue* src_offset,
- String::Encoding src_encoding,
- HValue* dst,
- HValue* dst_offset,
- String::Encoding dst_encoding,
- HValue* length) {
- DCHECK(dst_encoding != String::ONE_BYTE_ENCODING ||
- src_encoding == String::ONE_BYTE_ENCODING);
- LoopBuilder loop(this, context(), LoopBuilder::kPostIncrement);
- HValue* index = loop.BeginBody(graph()->GetConstant0(), length, Token::LT);
- {
- HValue* src_index = AddUncasted<HAdd>(src_offset, index);
- HValue* value =
- AddUncasted<HSeqStringGetChar>(src_encoding, src, src_index);
- HValue* dst_index = AddUncasted<HAdd>(dst_offset, index);
- Add<HSeqStringSetChar>(dst_encoding, dst, dst_index, value);
- }
- loop.EndBody();
-}
-
-
-HValue* HGraphBuilder::BuildObjectSizeAlignment(
- HValue* unaligned_size, int header_size) {
- DCHECK((header_size & kObjectAlignmentMask) == 0);
- HValue* size = AddUncasted<HAdd>(
- unaligned_size, Add<HConstant>(static_cast<int32_t>(
- header_size + kObjectAlignmentMask)));
- size->ClearFlag(HValue::kCanOverflow);
- return AddUncasted<HBitwise>(
- Token::BIT_AND, size, Add<HConstant>(static_cast<int32_t>(
- ~kObjectAlignmentMask)));
-}
-
-
-HValue* HGraphBuilder::BuildUncheckedStringAdd(
- HValue* left,
- HValue* right,
- HAllocationMode allocation_mode) {
- // Determine the string lengths.
- HValue* left_length = AddLoadStringLength(left);
- HValue* right_length = AddLoadStringLength(right);
-
- // Compute the combined string length.
- HValue* length = BuildAddStringLengths(left_length, right_length);
-
- // Do some manual constant folding here.
- if (left_length->IsConstant()) {
- HConstant* c_left_length = HConstant::cast(left_length);
- DCHECK_NE(0, c_left_length->Integer32Value());
- if (c_left_length->Integer32Value() + 1 >= ConsString::kMinLength) {
- // The right string contains at least one character.
- return BuildCreateConsString(length, left, right, allocation_mode);
- }
- } else if (right_length->IsConstant()) {
- HConstant* c_right_length = HConstant::cast(right_length);
- DCHECK_NE(0, c_right_length->Integer32Value());
- if (c_right_length->Integer32Value() + 1 >= ConsString::kMinLength) {
- // The left string contains at least one character.
- return BuildCreateConsString(length, left, right, allocation_mode);
- }
- }
-
- // Check if we should create a cons string.
- IfBuilder if_createcons(this);
- if_createcons.If<HCompareNumericAndBranch>(
- length, Add<HConstant>(ConsString::kMinLength), Token::GTE);
- if_createcons.And();
- if_createcons.If<HCompareNumericAndBranch>(
- length, Add<HConstant>(ConsString::kMaxLength), Token::LTE);
- if_createcons.Then();
- {
- // Create a cons string.
- Push(BuildCreateConsString(length, left, right, allocation_mode));
- }
- if_createcons.Else();
- {
- // Determine the string instance types.
- HValue* left_instance_type = AddLoadStringInstanceType(left);
- HValue* right_instance_type = AddLoadStringInstanceType(right);
-
- // Compute union and difference of instance types.
- HValue* ored_instance_types = AddUncasted<HBitwise>(
- Token::BIT_OR, left_instance_type, right_instance_type);
- HValue* xored_instance_types = AddUncasted<HBitwise>(
- Token::BIT_XOR, left_instance_type, right_instance_type);
-
- // Check if both strings have the same encoding and both are
- // sequential.
- IfBuilder if_sameencodingandsequential(this);
- if_sameencodingandsequential.If<HCompareNumericAndBranch>(
- AddUncasted<HBitwise>(
- Token::BIT_AND, xored_instance_types,
- Add<HConstant>(static_cast<int32_t>(kStringEncodingMask))),
- graph()->GetConstant0(), Token::EQ);
- if_sameencodingandsequential.And();
- STATIC_ASSERT(kSeqStringTag == 0);
- if_sameencodingandsequential.If<HCompareNumericAndBranch>(
- AddUncasted<HBitwise>(
- Token::BIT_AND, ored_instance_types,
- Add<HConstant>(static_cast<int32_t>(kStringRepresentationMask))),
- graph()->GetConstant0(), Token::EQ);
- if_sameencodingandsequential.Then();
- {
- HConstant* string_map =
- Add<HConstant>(isolate()->factory()->string_map());
- HConstant* one_byte_string_map =
- Add<HConstant>(isolate()->factory()->one_byte_string_map());
-
- // Determine map and size depending on whether result is one-byte string.
- IfBuilder if_onebyte(this);
- STATIC_ASSERT(kOneByteStringTag != 0);
- if_onebyte.If<HCompareNumericAndBranch>(
- AddUncasted<HBitwise>(
- Token::BIT_AND, ored_instance_types,
- Add<HConstant>(static_cast<int32_t>(kStringEncodingMask))),
- graph()->GetConstant0(), Token::NE);
- if_onebyte.Then();
- {
- // Allocate sequential one-byte string object.
- Push(length);
- Push(one_byte_string_map);
- }
- if_onebyte.Else();
- {
- // Allocate sequential two-byte string object.
- HValue* size = AddUncasted<HShl>(length, graph()->GetConstant1());
- size->ClearFlag(HValue::kCanOverflow);
- size->SetFlag(HValue::kUint32);
- Push(size);
- Push(string_map);
- }
- if_onebyte.End();
- HValue* map = Pop();
-
- // Calculate the number of bytes needed for the characters in the
- // string while observing object alignment.
- STATIC_ASSERT((SeqString::kHeaderSize & kObjectAlignmentMask) == 0);
- HValue* size = BuildObjectSizeAlignment(Pop(), SeqString::kHeaderSize);
-
- IfBuilder if_size(this);
- if_size.If<HCompareNumericAndBranch>(
- size, Add<HConstant>(kMaxRegularHeapObjectSize), Token::LT);
- if_size.Then();
- {
- // Allocate the string object. HAllocate does not care whether we pass
- // STRING_TYPE or ONE_BYTE_STRING_TYPE here, so we just use STRING_TYPE.
- HAllocate* result =
- BuildAllocate(size, HType::String(), STRING_TYPE, allocation_mode);
- Add<HStoreNamedField>(result, HObjectAccess::ForMap(), map);
-
- // Initialize the string fields.
- Add<HStoreNamedField>(result, HObjectAccess::ForStringHashField(),
- Add<HConstant>(String::kEmptyHashField));
- Add<HStoreNamedField>(result, HObjectAccess::ForStringLength(), length);
-
- // Copy characters to the result string.
- IfBuilder if_twobyte(this);
- if_twobyte.If<HCompareObjectEqAndBranch>(map, string_map);
- if_twobyte.Then();
- {
- // Copy characters from the left string.
- BuildCopySeqStringChars(
- left, graph()->GetConstant0(), String::TWO_BYTE_ENCODING, result,
- graph()->GetConstant0(), String::TWO_BYTE_ENCODING, left_length);
-
- // Copy characters from the right string.
- BuildCopySeqStringChars(
- right, graph()->GetConstant0(), String::TWO_BYTE_ENCODING, result,
- left_length, String::TWO_BYTE_ENCODING, right_length);
- }
- if_twobyte.Else();
- {
- // Copy characters from the left string.
- BuildCopySeqStringChars(
- left, graph()->GetConstant0(), String::ONE_BYTE_ENCODING, result,
- graph()->GetConstant0(), String::ONE_BYTE_ENCODING, left_length);
-
- // Copy characters from the right string.
- BuildCopySeqStringChars(
- right, graph()->GetConstant0(), String::ONE_BYTE_ENCODING, result,
- left_length, String::ONE_BYTE_ENCODING, right_length);
- }
- if_twobyte.End();
-
- // Count the native string addition.
- AddIncrementCounter(isolate()->counters()->string_add_native());
-
- // Return the sequential string.
- Push(result);
- }
- if_size.Else();
- {
- // Fallback to the runtime to add the two strings. The string has to be
- // allocated in LO space.
- Add<HPushArguments>(left, right);
- Push(Add<HCallRuntime>(Runtime::FunctionForId(Runtime::kStringAdd), 2));
- }
- if_size.End();
- }
- if_sameencodingandsequential.Else();
- {
- // Fallback to the runtime to add the two strings.
- Add<HPushArguments>(left, right);
- Push(Add<HCallRuntime>(Runtime::FunctionForId(Runtime::kStringAdd), 2));
- }
- if_sameencodingandsequential.End();
- }
- if_createcons.End();
-
- return Pop();
-}
-
-
-HValue* HGraphBuilder::BuildStringAdd(
- HValue* left,
- HValue* right,
- HAllocationMode allocation_mode) {
- NoObservableSideEffectsScope no_effects(this);
-
- // Determine string lengths.
- HValue* left_length = AddLoadStringLength(left);
- HValue* right_length = AddLoadStringLength(right);
-
- // Check if left string is empty.
- IfBuilder if_leftempty(this);
- if_leftempty.If<HCompareNumericAndBranch>(
- left_length, graph()->GetConstant0(), Token::EQ);
- if_leftempty.Then();
- {
- // Count the native string addition.
- AddIncrementCounter(isolate()->counters()->string_add_native());
-
- // Just return the right string.
- Push(right);
- }
- if_leftempty.Else();
- {
- // Check if right string is empty.
- IfBuilder if_rightempty(this);
- if_rightempty.If<HCompareNumericAndBranch>(
- right_length, graph()->GetConstant0(), Token::EQ);
- if_rightempty.Then();
- {
- // Count the native string addition.
- AddIncrementCounter(isolate()->counters()->string_add_native());
-
- // Just return the left string.
- Push(left);
- }
- if_rightempty.Else();
- {
- // Add the two non-empty strings.
- Push(BuildUncheckedStringAdd(left, right, allocation_mode));
- }
- if_rightempty.End();
- }
- if_leftempty.End();
-
- return Pop();
-}
-
-
-HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
- HValue* checked_object,
- HValue* key,
- HValue* val,
- bool is_js_array,
- ElementsKind elements_kind,
- PropertyAccessType access_type,
- LoadKeyedHoleMode load_mode,
- KeyedAccessStoreMode store_mode) {
- DCHECK(top_info()->IsStub() || checked_object->IsCompareMap() ||
- checked_object->IsCheckMaps());
- DCHECK(!IsFixedTypedArrayElementsKind(elements_kind) || !is_js_array);
- // No GVNFlag is necessary for ElementsKind if there is an explicit dependency
- // on a HElementsTransition instruction. The flag can also be removed if the
- // map to check has FAST_HOLEY_ELEMENTS, since there can be no further
- // ElementsKind transitions. Finally, the dependency can be removed for stores
- // for FAST_ELEMENTS, since a transition to HOLEY elements won't change the
- // generated store code.
- if ((elements_kind == FAST_HOLEY_ELEMENTS) ||
- (elements_kind == FAST_ELEMENTS && access_type == STORE)) {
- checked_object->ClearDependsOnFlag(kElementsKind);
- }
-
- bool fast_smi_only_elements = IsFastSmiElementsKind(elements_kind);
- bool fast_elements = IsFastObjectElementsKind(elements_kind);
- HValue* elements = AddLoadElements(checked_object);
- if (access_type == STORE && (fast_elements || fast_smi_only_elements) &&
- store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
- HCheckMaps* check_cow_map = Add<HCheckMaps>(
- elements, isolate()->factory()->fixed_array_map());
- check_cow_map->ClearDependsOnFlag(kElementsKind);
- }
- HInstruction* length = NULL;
- if (is_js_array) {
- length = Add<HLoadNamedField>(
- checked_object->ActualValue(), checked_object,
- HObjectAccess::ForArrayLength(elements_kind));
- } else {
- length = AddLoadFixedArrayLength(elements);
- }
- length->set_type(HType::Smi());
- HValue* checked_key = NULL;
- if (IsFixedTypedArrayElementsKind(elements_kind)) {
- checked_object = Add<HCheckArrayBufferNotNeutered>(checked_object);
-
- HValue* external_pointer = Add<HLoadNamedField>(
- elements, nullptr,
- HObjectAccess::ForFixedTypedArrayBaseExternalPointer());
- HValue* base_pointer = Add<HLoadNamedField>(
- elements, nullptr, HObjectAccess::ForFixedTypedArrayBaseBasePointer());
- HValue* backing_store = AddUncasted<HAdd>(external_pointer, base_pointer,
- AddOfExternalAndTagged);
-
- if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
- NoObservableSideEffectsScope no_effects(this);
- IfBuilder length_checker(this);
- length_checker.If<HCompareNumericAndBranch>(key, length, Token::LT);
- length_checker.Then();
- IfBuilder negative_checker(this);
- HValue* bounds_check = negative_checker.If<HCompareNumericAndBranch>(
- key, graph()->GetConstant0(), Token::GTE);
- negative_checker.Then();
- HInstruction* result = AddElementAccess(
- backing_store, key, val, bounds_check, checked_object->ActualValue(),
- elements_kind, access_type);
- negative_checker.ElseDeopt(DeoptimizeReason::kNegativeKeyEncountered);
- negative_checker.End();
- length_checker.End();
- return result;
- } else {
- DCHECK(store_mode == STANDARD_STORE);
- checked_key = Add<HBoundsCheck>(key, length);
- return AddElementAccess(backing_store, checked_key, val, checked_object,
- checked_object->ActualValue(), elements_kind,
- access_type);
- }
- }
- DCHECK(fast_smi_only_elements ||
- fast_elements ||
- IsFastDoubleElementsKind(elements_kind));
-
- // In case val is stored into a fast smi array, assure that the value is a smi
- // before manipulating the backing store. Otherwise the actual store may
- // deopt, leaving the backing store in an invalid state.
- if (access_type == STORE && IsFastSmiElementsKind(elements_kind) &&
- !val->type().IsSmi()) {
- val = AddUncasted<HForceRepresentation>(val, Representation::Smi());
- }
-
- if (IsGrowStoreMode(store_mode)) {
- NoObservableSideEffectsScope no_effects(this);
- Representation representation = HStoreKeyed::RequiredValueRepresentation(
- elements_kind, STORE_TO_INITIALIZED_ENTRY);
- val = AddUncasted<HForceRepresentation>(val, representation);
- elements = BuildCheckForCapacityGrow(checked_object, elements,
- elements_kind, length, key,
- is_js_array, access_type);
- checked_key = key;
- } else {
- checked_key = Add<HBoundsCheck>(key, length);
-
- if (access_type == STORE && (fast_elements || fast_smi_only_elements)) {
- if (store_mode == STORE_NO_TRANSITION_HANDLE_COW) {
- NoObservableSideEffectsScope no_effects(this);
- elements = BuildCopyElementsOnWrite(checked_object, elements,
- elements_kind, length);
- } else {
- HCheckMaps* check_cow_map = Add<HCheckMaps>(
- elements, isolate()->factory()->fixed_array_map());
- check_cow_map->ClearDependsOnFlag(kElementsKind);
- }
- }
- }
- return AddElementAccess(elements, checked_key, val, checked_object, nullptr,
- elements_kind, access_type, load_mode);
-}
-
-
-HValue* HGraphBuilder::BuildCalculateElementsSize(ElementsKind kind,
- HValue* capacity) {
- int elements_size = IsFastDoubleElementsKind(kind)
- ? kDoubleSize
- : kPointerSize;
-
- HConstant* elements_size_value = Add<HConstant>(elements_size);
- HInstruction* mul =
- HMul::NewImul(isolate(), zone(), context(), capacity->ActualValue(),
- elements_size_value);
- AddInstruction(mul);
- mul->ClearFlag(HValue::kCanOverflow);
-
- STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
-
- HConstant* header_size = Add<HConstant>(FixedArray::kHeaderSize);
- HValue* total_size = AddUncasted<HAdd>(mul, header_size);
- total_size->ClearFlag(HValue::kCanOverflow);
- return total_size;
-}
-
-
-HAllocate* HGraphBuilder::AllocateJSArrayObject(AllocationSiteMode mode) {
- int base_size = JSArray::kSize;
- if (mode == TRACK_ALLOCATION_SITE) {
- base_size += AllocationMemento::kSize;
- }
- HConstant* size_in_bytes = Add<HConstant>(base_size);
- return Add<HAllocate>(size_in_bytes, HType::JSArray(), NOT_TENURED,
- JS_OBJECT_TYPE, graph()->GetConstant0());
-}
-
-
-HConstant* HGraphBuilder::EstablishElementsAllocationSize(
- ElementsKind kind,
- int capacity) {
- int base_size = IsFastDoubleElementsKind(kind)
- ? FixedDoubleArray::SizeFor(capacity)
- : FixedArray::SizeFor(capacity);
-
- return Add<HConstant>(base_size);
-}
-
-
-HAllocate* HGraphBuilder::BuildAllocateElements(ElementsKind kind,
- HValue* size_in_bytes) {
- InstanceType instance_type = IsFastDoubleElementsKind(kind)
- ? FIXED_DOUBLE_ARRAY_TYPE
- : FIXED_ARRAY_TYPE;
-
- return Add<HAllocate>(size_in_bytes, HType::HeapObject(), NOT_TENURED,
- instance_type, graph()->GetConstant0());
-}
-
-
-void HGraphBuilder::BuildInitializeElementsHeader(HValue* elements,
- ElementsKind kind,
- HValue* capacity) {
- Factory* factory = isolate()->factory();
- Handle<Map> map = IsFastDoubleElementsKind(kind)
- ? factory->fixed_double_array_map()
- : factory->fixed_array_map();
-
- Add<HStoreNamedField>(elements, HObjectAccess::ForMap(), Add<HConstant>(map));
- Add<HStoreNamedField>(elements, HObjectAccess::ForFixedArrayLength(),
- capacity);
-}
-
-
-HValue* HGraphBuilder::BuildAllocateAndInitializeArray(ElementsKind kind,
- HValue* capacity) {
- // The HForceRepresentation is to prevent possible deopt on int-smi
- // conversion after allocation but before the new object fields are set.
- capacity = AddUncasted<HForceRepresentation>(capacity, Representation::Smi());
- HValue* size_in_bytes = BuildCalculateElementsSize(kind, capacity);
- HValue* new_array = BuildAllocateElements(kind, size_in_bytes);
- BuildInitializeElementsHeader(new_array, kind, capacity);
- return new_array;
-}
-
-
-void HGraphBuilder::BuildJSArrayHeader(HValue* array,
- HValue* array_map,
- HValue* elements,
- AllocationSiteMode mode,
- ElementsKind elements_kind,
- HValue* allocation_site_payload,
- HValue* length_field) {
- Add<HStoreNamedField>(array, HObjectAccess::ForMap(), array_map);
-
- HValue* empty_fixed_array = Add<HLoadRoot>(Heap::kEmptyFixedArrayRootIndex);
-
- Add<HStoreNamedField>(
- array, HObjectAccess::ForPropertiesPointer(), empty_fixed_array);
-
- Add<HStoreNamedField>(array, HObjectAccess::ForElementsPointer(),
- elements != nullptr ? elements : empty_fixed_array);
-
- Add<HStoreNamedField>(
- array, HObjectAccess::ForArrayLength(elements_kind), length_field);
-
- if (mode == TRACK_ALLOCATION_SITE) {
- BuildCreateAllocationMemento(
- array, Add<HConstant>(JSArray::kSize), allocation_site_payload);
- }
-}
-
-
-HInstruction* HGraphBuilder::AddElementAccess(
- HValue* elements, HValue* checked_key, HValue* val, HValue* dependency,
- HValue* backing_store_owner, ElementsKind elements_kind,
- PropertyAccessType access_type, LoadKeyedHoleMode load_mode) {
- if (access_type == STORE) {
- DCHECK(val != NULL);
- if (elements_kind == UINT8_CLAMPED_ELEMENTS) {
- val = Add<HClampToUint8>(val);
- }
- return Add<HStoreKeyed>(elements, checked_key, val, backing_store_owner,
- elements_kind, STORE_TO_INITIALIZED_ENTRY);
- }
-
- DCHECK(access_type == LOAD);
- DCHECK(val == NULL);
- HLoadKeyed* load =
- Add<HLoadKeyed>(elements, checked_key, dependency, backing_store_owner,
- elements_kind, load_mode);
- if (elements_kind == UINT32_ELEMENTS) {
- graph()->RecordUint32Instruction(load);
- }
- return load;
-}
-
-
-HLoadNamedField* HGraphBuilder::AddLoadMap(HValue* object,
- HValue* dependency) {
- return Add<HLoadNamedField>(object, dependency, HObjectAccess::ForMap());
-}
-
-
-HLoadNamedField* HGraphBuilder::AddLoadElements(HValue* object,
- HValue* dependency) {
- return Add<HLoadNamedField>(
- object, dependency, HObjectAccess::ForElementsPointer());
-}
-
-
-HLoadNamedField* HGraphBuilder::AddLoadFixedArrayLength(
- HValue* array,
- HValue* dependency) {
- return Add<HLoadNamedField>(
- array, dependency, HObjectAccess::ForFixedArrayLength());
-}
-
-
-HLoadNamedField* HGraphBuilder::AddLoadArrayLength(HValue* array,
- ElementsKind kind,
- HValue* dependency) {
- return Add<HLoadNamedField>(
- array, dependency, HObjectAccess::ForArrayLength(kind));
-}
-
-
-HValue* HGraphBuilder::BuildNewElementsCapacity(HValue* old_capacity) {
- HValue* half_old_capacity = AddUncasted<HShr>(old_capacity,
- graph_->GetConstant1());
-
- HValue* new_capacity = AddUncasted<HAdd>(half_old_capacity, old_capacity);
- new_capacity->ClearFlag(HValue::kCanOverflow);
-
- HValue* min_growth = Add<HConstant>(16);
-
- new_capacity = AddUncasted<HAdd>(new_capacity, min_growth);
- new_capacity->ClearFlag(HValue::kCanOverflow);
-
- return new_capacity;
-}
-
-
-HValue* HGraphBuilder::BuildGrowElementsCapacity(HValue* object,
- HValue* elements,
- ElementsKind kind,
- ElementsKind new_kind,
- HValue* length,
- HValue* new_capacity) {
- Add<HBoundsCheck>(
- new_capacity,
- Add<HConstant>((kMaxRegularHeapObjectSize - FixedArray::kHeaderSize) >>
- ElementsKindToShiftSize(new_kind)));
-
- HValue* new_elements =
- BuildAllocateAndInitializeArray(new_kind, new_capacity);
-
- BuildCopyElements(elements, kind, new_elements,
- new_kind, length, new_capacity);
-
- Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(),
- new_elements);
-
- return new_elements;
-}
-
-
-void HGraphBuilder::BuildFillElementsWithValue(HValue* elements,
- ElementsKind elements_kind,
- HValue* from,
- HValue* to,
- HValue* value) {
- if (to == NULL) {
- to = AddLoadFixedArrayLength(elements);
- }
-
- // Special loop unfolding case
- STATIC_ASSERT(JSArray::kPreallocatedArrayElements <=
- kElementLoopUnrollThreshold);
- int initial_capacity = -1;
- if (from->IsInteger32Constant() && to->IsInteger32Constant()) {
- int constant_from = from->GetInteger32Constant();
- int constant_to = to->GetInteger32Constant();
-
- if (constant_from == 0 && constant_to <= kElementLoopUnrollThreshold) {
- initial_capacity = constant_to;
- }
- }
-
- if (initial_capacity >= 0) {
- for (int i = 0; i < initial_capacity; i++) {
- HInstruction* key = Add<HConstant>(i);
- Add<HStoreKeyed>(elements, key, value, nullptr, elements_kind);
- }
- } else {
- // Carefully loop backwards so that the "from" remains live through the loop
- // rather than the to. This often corresponds to keeping length live rather
- // then capacity, which helps register allocation, since length is used more
- // other than capacity after filling with holes.
- LoopBuilder builder(this, context(), LoopBuilder::kPostDecrement);
-
- HValue* key = builder.BeginBody(to, from, Token::GT);
-
- HValue* adjusted_key = AddUncasted<HSub>(key, graph()->GetConstant1());
- adjusted_key->ClearFlag(HValue::kCanOverflow);
-
- Add<HStoreKeyed>(elements, adjusted_key, value, nullptr, elements_kind);
-
- builder.EndBody();
- }
-}
-
-
-void HGraphBuilder::BuildFillElementsWithHole(HValue* elements,
- ElementsKind elements_kind,
- HValue* from,
- HValue* to) {
- // Fast elements kinds need to be initialized in case statements below cause a
- // garbage collection.
-
- HValue* hole = IsFastSmiOrObjectElementsKind(elements_kind)
- ? graph()->GetConstantHole()
- : Add<HConstant>(HConstant::kHoleNaN);
-
- // Since we're about to store a hole value, the store instruction below must
- // assume an elements kind that supports heap object values.
- if (IsFastSmiOrObjectElementsKind(elements_kind)) {
- elements_kind = FAST_HOLEY_ELEMENTS;
- }
-
- BuildFillElementsWithValue(elements, elements_kind, from, to, hole);
-}
-
-
-void HGraphBuilder::BuildCopyProperties(HValue* from_properties,
- HValue* to_properties, HValue* length,
- HValue* capacity) {
- ElementsKind kind = FAST_ELEMENTS;
-
- BuildFillElementsWithValue(to_properties, kind, length, capacity,
- graph()->GetConstantUndefined());
-
- LoopBuilder builder(this, context(), LoopBuilder::kPostDecrement);
-
- HValue* key = builder.BeginBody(length, graph()->GetConstant0(), Token::GT);
-
- key = AddUncasted<HSub>(key, graph()->GetConstant1());
- key->ClearFlag(HValue::kCanOverflow);
-
- HValue* element =
- Add<HLoadKeyed>(from_properties, key, nullptr, nullptr, kind);
-
- Add<HStoreKeyed>(to_properties, key, element, nullptr, kind);
-
- builder.EndBody();
-}
-
-
-void HGraphBuilder::BuildCopyElements(HValue* from_elements,
- ElementsKind from_elements_kind,
- HValue* to_elements,
- ElementsKind to_elements_kind,
- HValue* length,
- HValue* capacity) {
- int constant_capacity = -1;
- if (capacity != NULL &&
- capacity->IsConstant() &&
- HConstant::cast(capacity)->HasInteger32Value()) {
- int constant_candidate = HConstant::cast(capacity)->Integer32Value();
- if (constant_candidate <= kElementLoopUnrollThreshold) {
- constant_capacity = constant_candidate;
- }
- }
-
- bool pre_fill_with_holes =
- IsFastDoubleElementsKind(from_elements_kind) &&
- IsFastObjectElementsKind(to_elements_kind);
- if (pre_fill_with_holes) {
- // If the copy might trigger a GC, make sure that the FixedArray is
- // pre-initialized with holes to make sure that it's always in a
- // consistent state.
- BuildFillElementsWithHole(to_elements, to_elements_kind,
- graph()->GetConstant0(), NULL);
- }
-
- if (constant_capacity != -1) {
- // Unroll the loop for small elements kinds.
- for (int i = 0; i < constant_capacity; i++) {
- HValue* key_constant = Add<HConstant>(i);
- HInstruction* value = Add<HLoadKeyed>(
- from_elements, key_constant, nullptr, nullptr, from_elements_kind);
- Add<HStoreKeyed>(to_elements, key_constant, value, nullptr,
- to_elements_kind);
- }
- } else {
- if (!pre_fill_with_holes &&
- (capacity == NULL || !length->Equals(capacity))) {
- BuildFillElementsWithHole(to_elements, to_elements_kind,
- length, NULL);
- }
-
- LoopBuilder builder(this, context(), LoopBuilder::kPostDecrement);
-
- HValue* key = builder.BeginBody(length, graph()->GetConstant0(),
- Token::GT);
-
- key = AddUncasted<HSub>(key, graph()->GetConstant1());
- key->ClearFlag(HValue::kCanOverflow);
-
- HValue* element = Add<HLoadKeyed>(from_elements, key, nullptr, nullptr,
- from_elements_kind, ALLOW_RETURN_HOLE);
-
- ElementsKind kind = (IsHoleyElementsKind(from_elements_kind) &&
- IsFastSmiElementsKind(to_elements_kind))
- ? FAST_HOLEY_ELEMENTS : to_elements_kind;
-
- if (IsHoleyElementsKind(from_elements_kind) &&
- from_elements_kind != to_elements_kind) {
- IfBuilder if_hole(this);
- if_hole.If<HCompareHoleAndBranch>(element);
- if_hole.Then();
- HConstant* hole_constant = IsFastDoubleElementsKind(to_elements_kind)
- ? Add<HConstant>(HConstant::kHoleNaN)
- : graph()->GetConstantHole();
- Add<HStoreKeyed>(to_elements, key, hole_constant, nullptr, kind);
- if_hole.Else();
- HStoreKeyed* store =
- Add<HStoreKeyed>(to_elements, key, element, nullptr, kind);
- store->SetFlag(HValue::kTruncatingToNumber);
- if_hole.End();
- } else {
- HStoreKeyed* store =
- Add<HStoreKeyed>(to_elements, key, element, nullptr, kind);
- store->SetFlag(HValue::kTruncatingToNumber);
- }
-
- builder.EndBody();
- }
-
- Counters* counters = isolate()->counters();
- AddIncrementCounter(counters->inlined_copied_elements());
-}
-
-void HGraphBuilder::BuildCreateAllocationMemento(
- HValue* previous_object,
- HValue* previous_object_size,
- HValue* allocation_site) {
- DCHECK(allocation_site != NULL);
- HInnerAllocatedObject* allocation_memento = Add<HInnerAllocatedObject>(
- previous_object, previous_object_size, HType::HeapObject());
- AddStoreMapConstant(
- allocation_memento, isolate()->factory()->allocation_memento_map());
- Add<HStoreNamedField>(
- allocation_memento,
- HObjectAccess::ForAllocationMementoSite(),
- allocation_site);
- if (FLAG_allocation_site_pretenuring) {
- HValue* memento_create_count =
- Add<HLoadNamedField>(allocation_site, nullptr,
- HObjectAccess::ForAllocationSiteOffset(
- AllocationSite::kPretenureCreateCountOffset));
- memento_create_count = AddUncasted<HAdd>(
- memento_create_count, graph()->GetConstant1());
- // This smi value is reset to zero after every gc, overflow isn't a problem
- // since the counter is bounded by the new space size.
- memento_create_count->ClearFlag(HValue::kCanOverflow);
- Add<HStoreNamedField>(
- allocation_site, HObjectAccess::ForAllocationSiteOffset(
- AllocationSite::kPretenureCreateCountOffset), memento_create_count);
- }
-}
-
-
-HInstruction* HGraphBuilder::BuildGetNativeContext() {
- return Add<HLoadNamedField>(
- context(), nullptr,
- HObjectAccess::ForContextSlot(Context::NATIVE_CONTEXT_INDEX));
-}
-
-HValue* HGraphBuilder::BuildArrayBufferViewFieldAccessor(HValue* object,
- HValue* checked_object,
- FieldIndex index) {
- NoObservableSideEffectsScope scope(this);
- HObjectAccess access = HObjectAccess::ForObservableJSObjectOffset(
- index.offset(), Representation::Tagged());
- HInstruction* buffer = Add<HLoadNamedField>(
- object, checked_object, HObjectAccess::ForJSArrayBufferViewBuffer());
- HInstruction* field = Add<HLoadNamedField>(object, checked_object, access);
-
- HInstruction* flags = Add<HLoadNamedField>(
- buffer, nullptr, HObjectAccess::ForJSArrayBufferBitField());
- HValue* was_neutered_mask =
- Add<HConstant>(1 << JSArrayBuffer::WasNeutered::kShift);
- HValue* was_neutered_test =
- AddUncasted<HBitwise>(Token::BIT_AND, flags, was_neutered_mask);
-
- IfBuilder if_was_neutered(this);
- if_was_neutered.If<HCompareNumericAndBranch>(
- was_neutered_test, graph()->GetConstant0(), Token::NE);
- if_was_neutered.Then();
- Push(graph()->GetConstant0());
- if_was_neutered.Else();
- Push(field);
- if_was_neutered.End();
-
- return Pop();
-}
-
-HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info,
- bool track_positions)
- : HGraphBuilder(info, CallInterfaceDescriptor(), track_positions),
- function_state_(NULL),
- initial_function_state_(this, info, NORMAL_RETURN, -1,
- TailCallMode::kAllow),
- ast_context_(NULL),
- break_scope_(NULL),
- inlined_count_(0),
- globals_(10, info->zone()),
- osr_(new (info->zone()) HOsrBuilder(this)),
- bounds_(info->zone()) {
- // This is not initialized in the initializer list because the
- // constructor for the initial state relies on function_state_ == NULL
- // to know it's the initial state.
- function_state_ = &initial_function_state_;
- InitializeAstVisitor(info->isolate());
-}
-
-
-HBasicBlock* HOptimizedGraphBuilder::CreateJoin(HBasicBlock* first,
- HBasicBlock* second,
- BailoutId join_id) {
- if (first == NULL) {
- return second;
- } else if (second == NULL) {
- return first;
- } else {
- HBasicBlock* join_block = graph()->CreateBasicBlock();
- Goto(first, join_block);
- Goto(second, join_block);
- join_block->SetJoinId(join_id);
- return join_block;
- }
-}
-
-HBasicBlock* HOptimizedGraphBuilder::JoinContinue(IterationStatement* statement,
- BailoutId continue_id,
- HBasicBlock* exit_block,
- HBasicBlock* continue_block) {
- if (continue_block != NULL) {
- if (exit_block != NULL) Goto(exit_block, continue_block);
- continue_block->SetJoinId(continue_id);
- return continue_block;
- }
- return exit_block;
-}
-
-
-HBasicBlock* HOptimizedGraphBuilder::CreateLoop(IterationStatement* statement,
- HBasicBlock* loop_entry,
- HBasicBlock* body_exit,
- HBasicBlock* loop_successor,
- HBasicBlock* break_block) {
- if (body_exit != NULL) Goto(body_exit, loop_entry);
- loop_entry->PostProcessLoopHeader(statement);
- if (break_block != NULL) {
- if (loop_successor != NULL) Goto(loop_successor, break_block);
- break_block->SetJoinId(statement->ExitId());
- return break_block;
- }
- return loop_successor;
-}
-
-
-// Build a new loop header block and set it as the current block.
-HBasicBlock* HOptimizedGraphBuilder::BuildLoopEntry() {
- HBasicBlock* loop_entry = CreateLoopHeaderBlock();
- Goto(loop_entry);
- set_current_block(loop_entry);
- return loop_entry;
-}
-
-
-HBasicBlock* HOptimizedGraphBuilder::BuildLoopEntry(
- IterationStatement* statement) {
- HBasicBlock* loop_entry;
-
- if (osr()->HasOsrEntryAt(statement)) {
- loop_entry = osr()->BuildOsrLoopEntry(statement);
- if (function_state()->IsInsideDoExpressionScope()) {
- Bailout(kDoExpressionUnmodelable);
- }
- } else {
- loop_entry = BuildLoopEntry();
- }
- return loop_entry;
-}
-
-
-void HBasicBlock::FinishExit(HControlInstruction* instruction,
- SourcePosition position) {
- Finish(instruction, position);
- ClearEnvironment();
-}
-
-
-std::ostream& operator<<(std::ostream& os, const HBasicBlock& b) {
- return os << "B" << b.block_id();
-}
-
-HGraph::HGraph(CompilationInfo* info, CallInterfaceDescriptor descriptor)
- : isolate_(info->isolate()),
- next_block_id_(0),
- entry_block_(NULL),
- blocks_(8, info->zone()),
- values_(16, info->zone()),
- phi_list_(NULL),
- uint32_instructions_(NULL),
- osr_(NULL),
- info_(info),
- descriptor_(descriptor),
- zone_(info->zone()),
- allow_code_motion_(false),
- use_optimistic_licm_(false),
- depends_on_empty_array_proto_elements_(false),
- depends_on_string_length_overflow_(false),
- type_change_checksum_(0),
- maximum_environment_size_(0),
- no_side_effects_scope_count_(0),
- disallow_adding_new_values_(false) {
- if (info->IsStub()) {
- // For stubs, explicitly add the context to the environment.
- start_environment_ =
- new (zone_) HEnvironment(zone_, descriptor.GetParameterCount() + 1);
- } else {
- start_environment_ =
- new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_);
- }
- start_environment_->set_ast_id(BailoutId::FunctionContext());
- entry_block_ = CreateBasicBlock();
- entry_block_->SetInitialEnvironment(start_environment_);
-}
-
-
-HBasicBlock* HGraph::CreateBasicBlock() {
- HBasicBlock* result = new(zone()) HBasicBlock(this);
- blocks_.Add(result, zone());
- return result;
-}
-
-
-void HGraph::FinalizeUniqueness() {
- DisallowHeapAllocation no_gc;
- for (int i = 0; i < blocks()->length(); ++i) {
- for (HInstructionIterator it(blocks()->at(i)); !it.Done(); it.Advance()) {
- it.Current()->FinalizeUniqueness();
- }
- }
-}
-
-
-// Block ordering was implemented with two mutually recursive methods,
-// HGraph::Postorder and HGraph::PostorderLoopBlocks.
-// The recursion could lead to stack overflow so the algorithm has been
-// implemented iteratively.
-// At a high level the algorithm looks like this:
-//
-// Postorder(block, loop_header) : {
-// if (block has already been visited or is of another loop) return;
-// mark block as visited;
-// if (block is a loop header) {
-// VisitLoopMembers(block, loop_header);
-// VisitSuccessorsOfLoopHeader(block);
-// } else {
-// VisitSuccessors(block)
-// }
-// put block in result list;
-// }
-//
-// VisitLoopMembers(block, outer_loop_header) {
-// foreach (block b in block loop members) {
-// VisitSuccessorsOfLoopMember(b, outer_loop_header);
-// if (b is loop header) VisitLoopMembers(b);
-// }
-// }
-//
-// VisitSuccessorsOfLoopMember(block, outer_loop_header) {
-// foreach (block b in block successors) Postorder(b, outer_loop_header)
-// }
-//
-// VisitSuccessorsOfLoopHeader(block) {
-// foreach (block b in block successors) Postorder(b, block)
-// }
-//
-// VisitSuccessors(block, loop_header) {
-// foreach (block b in block successors) Postorder(b, loop_header)
-// }
-//
-// The ordering is started calling Postorder(entry, NULL).
-//
-// Each instance of PostorderProcessor represents the "stack frame" of the
-// recursion, and particularly keeps the state of the loop (iteration) of the
-// "Visit..." function it represents.
-// To recycle memory we keep all the frames in a double linked list but
-// this means that we cannot use constructors to initialize the frames.
-//
-class PostorderProcessor : public ZoneObject {
- public:
- // Back link (towards the stack bottom).
- PostorderProcessor* parent() {return father_; }
- // Forward link (towards the stack top).
- PostorderProcessor* child() {return child_; }
- HBasicBlock* block() { return block_; }
- HLoopInformation* loop() { return loop_; }
- HBasicBlock* loop_header() { return loop_header_; }
-
- static PostorderProcessor* CreateEntryProcessor(Zone* zone,
- HBasicBlock* block) {
- PostorderProcessor* result = new(zone) PostorderProcessor(NULL);
- return result->SetupSuccessors(zone, block, NULL);
- }
-
- PostorderProcessor* PerformStep(Zone* zone,
- ZoneList<HBasicBlock*>* order) {
- PostorderProcessor* next =
- PerformNonBacktrackingStep(zone, order);
- if (next != NULL) {
- return next;
- } else {
- return Backtrack(zone, order);
- }
- }
-
- private:
- explicit PostorderProcessor(PostorderProcessor* father)
- : father_(father), child_(NULL), successor_iterator(NULL) { }
-
- // Each enum value states the cycle whose state is kept by this instance.
- enum LoopKind {
- NONE,
- SUCCESSORS,
- SUCCESSORS_OF_LOOP_HEADER,
- LOOP_MEMBERS,
- SUCCESSORS_OF_LOOP_MEMBER
- };
-
- // Each "Setup..." method is like a constructor for a cycle state.
- PostorderProcessor* SetupSuccessors(Zone* zone,
- HBasicBlock* block,
- HBasicBlock* loop_header) {
- if (block == NULL || block->IsOrdered() ||
- block->parent_loop_header() != loop_header) {
- kind_ = NONE;
- block_ = NULL;
- loop_ = NULL;
- loop_header_ = NULL;
- return this;
- } else {
- block_ = block;
- loop_ = NULL;
- block->MarkAsOrdered();
-
- if (block->IsLoopHeader()) {
- kind_ = SUCCESSORS_OF_LOOP_HEADER;
- loop_header_ = block;
- InitializeSuccessors();
- PostorderProcessor* result = Push(zone);
- return result->SetupLoopMembers(zone, block, block->loop_information(),
- loop_header);
- } else {
- DCHECK(block->IsFinished());
- kind_ = SUCCESSORS;
- loop_header_ = loop_header;
- InitializeSuccessors();
- return this;
- }
- }
- }
-
- PostorderProcessor* SetupLoopMembers(Zone* zone,
- HBasicBlock* block,
- HLoopInformation* loop,
- HBasicBlock* loop_header) {
- kind_ = LOOP_MEMBERS;
- block_ = block;
- loop_ = loop;
- loop_header_ = loop_header;
- InitializeLoopMembers();
- return this;
- }
-
- PostorderProcessor* SetupSuccessorsOfLoopMember(
- HBasicBlock* block,
- HLoopInformation* loop,
- HBasicBlock* loop_header) {
- kind_ = SUCCESSORS_OF_LOOP_MEMBER;
- block_ = block;
- loop_ = loop;
- loop_header_ = loop_header;
- InitializeSuccessors();
- return this;
- }
-
- // This method "allocates" a new stack frame.
- PostorderProcessor* Push(Zone* zone) {
- if (child_ == NULL) {
- child_ = new(zone) PostorderProcessor(this);
- }
- return child_;
- }
-
- void ClosePostorder(ZoneList<HBasicBlock*>* order, Zone* zone) {
- DCHECK(block_->end()->FirstSuccessor() == NULL ||
- order->Contains(block_->end()->FirstSuccessor()) ||
- block_->end()->FirstSuccessor()->IsLoopHeader());
- DCHECK(block_->end()->SecondSuccessor() == NULL ||
- order->Contains(block_->end()->SecondSuccessor()) ||
- block_->end()->SecondSuccessor()->IsLoopHeader());
- order->Add(block_, zone);
- }
-
- // This method is the basic block to walk up the stack.
- PostorderProcessor* Pop(Zone* zone,
- ZoneList<HBasicBlock*>* order) {
- switch (kind_) {
- case SUCCESSORS:
- case SUCCESSORS_OF_LOOP_HEADER:
- ClosePostorder(order, zone);
- return father_;
- case LOOP_MEMBERS:
- return father_;
- case SUCCESSORS_OF_LOOP_MEMBER:
- if (block()->IsLoopHeader() && block() != loop_->loop_header()) {
- // In this case we need to perform a LOOP_MEMBERS cycle so we
- // initialize it and return this instead of father.
- return SetupLoopMembers(zone, block(),
- block()->loop_information(), loop_header_);
- } else {
- return father_;
- }
- case NONE:
- return father_;
- }
- UNREACHABLE();
- return NULL;
- }
-
- // Walks up the stack.
- PostorderProcessor* Backtrack(Zone* zone,
- ZoneList<HBasicBlock*>* order) {
- PostorderProcessor* parent = Pop(zone, order);
- while (parent != NULL) {
- PostorderProcessor* next =
- parent->PerformNonBacktrackingStep(zone, order);
- if (next != NULL) {
- return next;
- } else {
- parent = parent->Pop(zone, order);
- }
- }
- return NULL;
- }
-
- PostorderProcessor* PerformNonBacktrackingStep(
- Zone* zone,
- ZoneList<HBasicBlock*>* order) {
- HBasicBlock* next_block;
- switch (kind_) {
- case SUCCESSORS:
- next_block = AdvanceSuccessors();
- if (next_block != NULL) {
- PostorderProcessor* result = Push(zone);
- return result->SetupSuccessors(zone, next_block, loop_header_);
- }
- break;
- case SUCCESSORS_OF_LOOP_HEADER:
- next_block = AdvanceSuccessors();
- if (next_block != NULL) {
- PostorderProcessor* result = Push(zone);
- return result->SetupSuccessors(zone, next_block, block());
- }
- break;
- case LOOP_MEMBERS:
- next_block = AdvanceLoopMembers();
- if (next_block != NULL) {
- PostorderProcessor* result = Push(zone);
- return result->SetupSuccessorsOfLoopMember(next_block,
- loop_, loop_header_);
- }
- break;
- case SUCCESSORS_OF_LOOP_MEMBER:
- next_block = AdvanceSuccessors();
- if (next_block != NULL) {
- PostorderProcessor* result = Push(zone);
- return result->SetupSuccessors(zone, next_block, loop_header_);
- }
- break;
- case NONE:
- return NULL;
- }
- return NULL;
- }
-
- // The following two methods implement a "foreach b in successors" cycle.
- void InitializeSuccessors() {
- loop_index = 0;
- loop_length = 0;
- successor_iterator = HSuccessorIterator(block_->end());
- }
-
- HBasicBlock* AdvanceSuccessors() {
- if (!successor_iterator.Done()) {
- HBasicBlock* result = successor_iterator.Current();
- successor_iterator.Advance();
- return result;
- }
- return NULL;
- }
-
- // The following two methods implement a "foreach b in loop members" cycle.
- void InitializeLoopMembers() {
- loop_index = 0;
- loop_length = loop_->blocks()->length();
- }
-
- HBasicBlock* AdvanceLoopMembers() {
- if (loop_index < loop_length) {
- HBasicBlock* result = loop_->blocks()->at(loop_index);
- loop_index++;
- return result;
- } else {
- return NULL;
- }
- }
-
- LoopKind kind_;
- PostorderProcessor* father_;
- PostorderProcessor* child_;
- HLoopInformation* loop_;
- HBasicBlock* block_;
- HBasicBlock* loop_header_;
- int loop_index;
- int loop_length;
- HSuccessorIterator successor_iterator;
-};
-
-
-void HGraph::OrderBlocks() {
- CompilationPhase phase("H_Block ordering", info());
-
-#ifdef DEBUG
- // Initially the blocks must not be ordered.
- for (int i = 0; i < blocks_.length(); ++i) {
- DCHECK(!blocks_[i]->IsOrdered());
- }
-#endif
-
- PostorderProcessor* postorder =
- PostorderProcessor::CreateEntryProcessor(zone(), blocks_[0]);
- blocks_.Rewind(0);
- while (postorder) {
- postorder = postorder->PerformStep(zone(), &blocks_);
- }
-
-#ifdef DEBUG
- // Now all blocks must be marked as ordered.
- for (int i = 0; i < blocks_.length(); ++i) {
- DCHECK(blocks_[i]->IsOrdered());
- }
-#endif
-
- // Reverse block list and assign block IDs.
- for (int i = 0, j = blocks_.length(); --j >= i; ++i) {
- HBasicBlock* bi = blocks_[i];
- HBasicBlock* bj = blocks_[j];
- bi->set_block_id(j);
- bj->set_block_id(i);
- blocks_[i] = bj;
- blocks_[j] = bi;
- }
-}
-
-
-void HGraph::AssignDominators() {
- HPhase phase("H_Assign dominators", this);
- for (int i = 0; i < blocks_.length(); ++i) {
- HBasicBlock* block = blocks_[i];
- if (block->IsLoopHeader()) {
- // Only the first predecessor of a loop header is from outside the loop.
- // All others are back edges, and thus cannot dominate the loop header.
- block->AssignCommonDominator(block->predecessors()->first());
- block->AssignLoopSuccessorDominators();
- } else {
- for (int j = blocks_[i]->predecessors()->length() - 1; j >= 0; --j) {
- blocks_[i]->AssignCommonDominator(blocks_[i]->predecessors()->at(j));
- }
- }
- }
-}
-
-
-bool HGraph::CheckArgumentsPhiUses() {
- int block_count = blocks_.length();
- for (int i = 0; i < block_count; ++i) {
- for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
- HPhi* phi = blocks_[i]->phis()->at(j);
- // We don't support phi uses of arguments for now.
- if (phi->CheckFlag(HValue::kIsArguments)) return false;
- }
- }
- return true;
-}
-
-
-bool HGraph::CheckConstPhiUses() {
- int block_count = blocks_.length();
- for (int i = 0; i < block_count; ++i) {
- for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
- HPhi* phi = blocks_[i]->phis()->at(j);
- // Check for the hole value (from an uninitialized const).
- for (int k = 0; k < phi->OperandCount(); k++) {
- if (phi->OperandAt(k) == GetConstantHole()) return false;
- }
- }
- }
- return true;
-}
-
-
-void HGraph::CollectPhis() {
- int block_count = blocks_.length();
- phi_list_ = new(zone()) ZoneList<HPhi*>(block_count, zone());
- for (int i = 0; i < block_count; ++i) {
- for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
- HPhi* phi = blocks_[i]->phis()->at(j);
- phi_list_->Add(phi, zone());
- }
- }
-}
-
-
-// Implementation of utility class to encapsulate the translation state for
-// a (possibly inlined) function.
-FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
- CompilationInfo* info, InliningKind inlining_kind,
- int inlining_id, TailCallMode tail_call_mode)
- : owner_(owner),
- compilation_info_(info),
- call_context_(NULL),
- inlining_kind_(inlining_kind),
- tail_call_mode_(tail_call_mode),
- function_return_(NULL),
- test_context_(NULL),
- entry_(NULL),
- arguments_object_(NULL),
- arguments_elements_(NULL),
- inlining_id_(inlining_id),
- outer_source_position_(SourcePosition::Unknown()),
- do_expression_scope_count_(0),
- outer_(owner->function_state()) {
- if (outer_ != NULL) {
- // State for an inline function.
- if (owner->ast_context()->IsTest()) {
- HBasicBlock* if_true = owner->graph()->CreateBasicBlock();
- HBasicBlock* if_false = owner->graph()->CreateBasicBlock();
- if_true->MarkAsInlineReturnTarget(owner->current_block());
- if_false->MarkAsInlineReturnTarget(owner->current_block());
- TestContext* outer_test_context = TestContext::cast(owner->ast_context());
- Expression* cond = outer_test_context->condition();
- // The AstContext constructor pushed on the context stack. This newed
- // instance is the reason that AstContext can't be BASE_EMBEDDED.
- test_context_ = new TestContext(owner, cond, if_true, if_false);
- } else {
- function_return_ = owner->graph()->CreateBasicBlock();
- function_return()->MarkAsInlineReturnTarget(owner->current_block());
- }
- // Set this after possibly allocating a new TestContext above.
- call_context_ = owner->ast_context();
- }
-
- // Push on the state stack.
- owner->set_function_state(this);
-
- if (owner->is_tracking_positions()) {
- outer_source_position_ = owner->source_position();
- owner->EnterInlinedSource(inlining_id);
- owner->SetSourcePosition(info->shared_info()->start_position());
- }
-}
-
-
-FunctionState::~FunctionState() {
- delete test_context_;
- owner_->set_function_state(outer_);
-
- if (owner_->is_tracking_positions()) {
- owner_->set_source_position(outer_source_position_);
- owner_->EnterInlinedSource(outer_->inlining_id());
- }
-}
-
-
-// Implementation of utility classes to represent an expression's context in
-// the AST.
-AstContext::AstContext(HOptimizedGraphBuilder* owner, Expression::Context kind)
- : owner_(owner),
- kind_(kind),
- outer_(owner->ast_context()),
- typeof_mode_(NOT_INSIDE_TYPEOF) {
- owner->set_ast_context(this); // Push.
-#ifdef DEBUG
- DCHECK_EQ(JS_FUNCTION, owner->environment()->frame_type());
- original_length_ = owner->environment()->length();
-#endif
-}
-
-
-AstContext::~AstContext() {
- owner_->set_ast_context(outer_); // Pop.
-}
-
-
-EffectContext::~EffectContext() {
- DCHECK(owner()->HasStackOverflow() || owner()->current_block() == NULL ||
- (owner()->environment()->length() == original_length_ &&
- (owner()->environment()->frame_type() == JS_FUNCTION ||
- owner()->environment()->frame_type() == TAIL_CALLER_FUNCTION)));
-}
-
-
-ValueContext::~ValueContext() {
- DCHECK(owner()->HasStackOverflow() || owner()->current_block() == NULL ||
- (owner()->environment()->length() == original_length_ + 1 &&
- (owner()->environment()->frame_type() == JS_FUNCTION ||
- owner()->environment()->frame_type() == TAIL_CALLER_FUNCTION)));
-}
-
-
-void EffectContext::ReturnValue(HValue* value) {
- // The value is simply ignored.
-}
-
-
-void ValueContext::ReturnValue(HValue* value) {
- // The value is tracked in the bailout environment, and communicated
- // through the environment as the result of the expression.
- if (value->CheckFlag(HValue::kIsArguments)) {
- if (flag_ == ARGUMENTS_FAKED) {
- value = owner()->graph()->GetConstantUndefined();
- } else if (!arguments_allowed()) {
- owner()->Bailout(kBadValueContextForArgumentsValue);
- }
- }
- owner()->Push(value);
-}
-
-
-void TestContext::ReturnValue(HValue* value) {
- BuildBranch(value);
-}
-
-
-void EffectContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
- DCHECK(!instr->IsControlInstruction());
- owner()->AddInstruction(instr);
- if (instr->HasObservableSideEffects()) {
- owner()->Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
- }
-}
-
-
-void EffectContext::ReturnControl(HControlInstruction* instr,
- BailoutId ast_id) {
- DCHECK(!instr->HasObservableSideEffects());
- HBasicBlock* empty_true = owner()->graph()->CreateBasicBlock();
- HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
- instr->SetSuccessorAt(0, empty_true);
- instr->SetSuccessorAt(1, empty_false);
- owner()->FinishCurrentBlock(instr);
- HBasicBlock* join = owner()->CreateJoin(empty_true, empty_false, ast_id);
- owner()->set_current_block(join);
-}
-
-
-void EffectContext::ReturnContinuation(HIfContinuation* continuation,
- BailoutId ast_id) {
- HBasicBlock* true_branch = NULL;
- HBasicBlock* false_branch = NULL;
- continuation->Continue(&true_branch, &false_branch);
- if (!continuation->IsTrueReachable()) {
- owner()->set_current_block(false_branch);
- } else if (!continuation->IsFalseReachable()) {
- owner()->set_current_block(true_branch);
- } else {
- HBasicBlock* join = owner()->CreateJoin(true_branch, false_branch, ast_id);
- owner()->set_current_block(join);
- }
-}
-
-
-void ValueContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
- DCHECK(!instr->IsControlInstruction());
- if (!arguments_allowed() && instr->CheckFlag(HValue::kIsArguments)) {
- return owner()->Bailout(kBadValueContextForArgumentsObjectValue);
- }
- owner()->AddInstruction(instr);
- owner()->Push(instr);
- if (instr->HasObservableSideEffects()) {
- owner()->Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
- }
-}
-
-
-void ValueContext::ReturnControl(HControlInstruction* instr, BailoutId ast_id) {
- DCHECK(!instr->HasObservableSideEffects());
- if (!arguments_allowed() && instr->CheckFlag(HValue::kIsArguments)) {
- return owner()->Bailout(kBadValueContextForArgumentsObjectValue);
- }
- HBasicBlock* materialize_false = owner()->graph()->CreateBasicBlock();
- HBasicBlock* materialize_true = owner()->graph()->CreateBasicBlock();
- instr->SetSuccessorAt(0, materialize_true);
- instr->SetSuccessorAt(1, materialize_false);
- owner()->FinishCurrentBlock(instr);
- owner()->set_current_block(materialize_true);
- owner()->Push(owner()->graph()->GetConstantTrue());
- owner()->set_current_block(materialize_false);
- owner()->Push(owner()->graph()->GetConstantFalse());
- HBasicBlock* join =
- owner()->CreateJoin(materialize_true, materialize_false, ast_id);
- owner()->set_current_block(join);
-}
-
-
-void ValueContext::ReturnContinuation(HIfContinuation* continuation,
- BailoutId ast_id) {
- HBasicBlock* materialize_true = NULL;
- HBasicBlock* materialize_false = NULL;
- continuation->Continue(&materialize_true, &materialize_false);
- if (continuation->IsTrueReachable()) {
- owner()->set_current_block(materialize_true);
- owner()->Push(owner()->graph()->GetConstantTrue());
- owner()->set_current_block(materialize_true);
- }
- if (continuation->IsFalseReachable()) {
- owner()->set_current_block(materialize_false);
- owner()->Push(owner()->graph()->GetConstantFalse());
- owner()->set_current_block(materialize_false);
- }
- if (continuation->TrueAndFalseReachable()) {
- HBasicBlock* join =
- owner()->CreateJoin(materialize_true, materialize_false, ast_id);
- owner()->set_current_block(join);
- }
-}
-
-
-void TestContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
- DCHECK(!instr->IsControlInstruction());
- HOptimizedGraphBuilder* builder = owner();
- builder->AddInstruction(instr);
- // We expect a simulate after every expression with side effects, though
- // this one isn't actually needed (and wouldn't work if it were targeted).
- if (instr->HasObservableSideEffects()) {
- builder->Push(instr);
- builder->Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
- builder->Pop();
- }
- BuildBranch(instr);
-}
-
-
-void TestContext::ReturnControl(HControlInstruction* instr, BailoutId ast_id) {
- DCHECK(!instr->HasObservableSideEffects());
- HBasicBlock* empty_true = owner()->graph()->CreateBasicBlock();
- HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
- instr->SetSuccessorAt(0, empty_true);
- instr->SetSuccessorAt(1, empty_false);
- owner()->FinishCurrentBlock(instr);
- owner()->Goto(empty_true, if_true(), owner()->function_state());
- owner()->Goto(empty_false, if_false(), owner()->function_state());
- owner()->set_current_block(NULL);
-}
-
-
-void TestContext::ReturnContinuation(HIfContinuation* continuation,
- BailoutId ast_id) {
- HBasicBlock* true_branch = NULL;
- HBasicBlock* false_branch = NULL;
- continuation->Continue(&true_branch, &false_branch);
- if (continuation->IsTrueReachable()) {
- owner()->Goto(true_branch, if_true(), owner()->function_state());
- }
- if (continuation->IsFalseReachable()) {
- owner()->Goto(false_branch, if_false(), owner()->function_state());
- }
- owner()->set_current_block(NULL);
-}
-
-
-void TestContext::BuildBranch(HValue* value) {
- // We expect the graph to be in edge-split form: there is no edge that
- // connects a branch node to a join node. We conservatively ensure that
- // property by always adding an empty block on the outgoing edges of this
- // branch.
- HOptimizedGraphBuilder* builder = owner();
- if (value != NULL && value->CheckFlag(HValue::kIsArguments)) {
- builder->Bailout(kArgumentsObjectValueInATestContext);
- }
- ToBooleanHints expected(condition()->to_boolean_types());
- ReturnControl(owner()->New<HBranch>(value, expected), BailoutId::None());
-}
-
-
-// HOptimizedGraphBuilder infrastructure for bailing out and checking bailouts.
-#define CHECK_BAILOUT(call) \
- do { \
- call; \
- if (HasStackOverflow()) return; \
- } while (false)
-
-
-#define CHECK_ALIVE(call) \
- do { \
- call; \
- if (HasStackOverflow() || current_block() == NULL) return; \
- } while (false)
-
-
-#define CHECK_ALIVE_OR_RETURN(call, value) \
- do { \
- call; \
- if (HasStackOverflow() || current_block() == NULL) return value; \
- } while (false)
-
-
-void HOptimizedGraphBuilder::Bailout(BailoutReason reason) {
- current_info()->AbortOptimization(reason);
- SetStackOverflow();
-}
-
-
-void HOptimizedGraphBuilder::VisitForEffect(Expression* expr) {
- EffectContext for_effect(this);
- Visit(expr);
-}
-
-
-void HOptimizedGraphBuilder::VisitForValue(Expression* expr,
- ArgumentsAllowedFlag flag) {
- ValueContext for_value(this, flag);
- Visit(expr);
-}
-
-
-void HOptimizedGraphBuilder::VisitForTypeOf(Expression* expr) {
- ValueContext for_value(this, ARGUMENTS_NOT_ALLOWED);
- for_value.set_typeof_mode(INSIDE_TYPEOF);
- Visit(expr);
-}
-
-
-void HOptimizedGraphBuilder::VisitForControl(Expression* expr,
- HBasicBlock* true_block,
- HBasicBlock* false_block) {
- TestContext for_control(this, expr, true_block, false_block);
- Visit(expr);
-}
-
-
-void HOptimizedGraphBuilder::VisitExpressions(
- ZoneList<Expression*>* exprs) {
- for (int i = 0; i < exprs->length(); ++i) {
- CHECK_ALIVE(VisitForValue(exprs->at(i)));
- }
-}
-
-
-void HOptimizedGraphBuilder::VisitExpressions(ZoneList<Expression*>* exprs,
- ArgumentsAllowedFlag flag) {
- for (int i = 0; i < exprs->length(); ++i) {
- CHECK_ALIVE(VisitForValue(exprs->at(i), flag));
- }
-}
-
-
-bool HOptimizedGraphBuilder::BuildGraph() {
- if (IsDerivedConstructor(current_info()->literal()->kind())) {
- Bailout(kSuperReference);
- return false;
- }
-
- DeclarationScope* scope = current_info()->scope();
- SetUpScope(scope);
-
- // Add an edge to the body entry. This is warty: the graph's start
- // environment will be used by the Lithium translation as the initial
- // environment on graph entry, but it has now been mutated by the
- // Hydrogen translation of the instructions in the start block. This
- // environment uses values which have not been defined yet. These
- // Hydrogen instructions will then be replayed by the Lithium
- // translation, so they cannot have an environment effect. The edge to
- // the body's entry block (along with some special logic for the start
- // block in HInstruction::InsertAfter) seals the start block from
- // getting unwanted instructions inserted.
- //
- // TODO(kmillikin): Fix this. Stop mutating the initial environment.
- // Make the Hydrogen instructions in the initial block into Hydrogen
- // values (but not instructions), present in the initial environment and
- // not replayed by the Lithium translation.
- HEnvironment* initial_env = environment()->CopyWithoutHistory();
- HBasicBlock* body_entry = CreateBasicBlock(initial_env);
- Goto(body_entry);
- body_entry->SetJoinId(BailoutId::FunctionEntry());
- set_current_block(body_entry);
-
- VisitDeclarations(scope->declarations());
- Add<HSimulate>(BailoutId::Declarations());
-
- Add<HStackCheck>(HStackCheck::kFunctionEntry);
-
- VisitStatements(current_info()->literal()->body());
- if (HasStackOverflow()) return false;
-
- if (current_block() != NULL) {
- Add<HReturn>(graph()->GetConstantUndefined());
- set_current_block(NULL);
- }
-
- // If the checksum of the number of type info changes is the same as the
- // last time this function was compiled, then this recompile is likely not
- // due to missing/inadequate type feedback, but rather too aggressive
- // optimization. Disable optimistic LICM in that case.
- Handle<Code> unoptimized_code(current_info()->shared_info()->code());
- DCHECK(unoptimized_code->kind() == Code::FUNCTION);
- Handle<TypeFeedbackInfo> type_info(
- TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
- int checksum = type_info->own_type_change_checksum();
- int composite_checksum = graph()->update_type_change_checksum(checksum);
- graph()->set_use_optimistic_licm(
- !type_info->matches_inlined_type_change_checksum(composite_checksum));
- type_info->set_inlined_type_change_checksum(composite_checksum);
-
- // Set this predicate early to avoid handle deref during graph optimization.
- graph()->set_allow_code_motion(
- current_info()->IsStub() ||
- current_info()->shared_info()->deopt_count() + 1 < FLAG_max_deopt_count);
-
- // Perform any necessary OSR-specific cleanups or changes to the graph.
- osr()->FinishGraph();
-
- return true;
-}
-
-
-bool HGraph::Optimize(BailoutReason* bailout_reason) {
- OrderBlocks();
- AssignDominators();
-
- // We need to create a HConstant "zero" now so that GVN will fold every
- // zero-valued constant in the graph together.
- // The constant is needed to make idef-based bounds check work: the pass
- // evaluates relations with "zero" and that zero cannot be created after GVN.
- GetConstant0();
-
-#ifdef DEBUG
- // Do a full verify after building the graph and computing dominators.
- Verify(true);
-#endif
-
- if (FLAG_analyze_environment_liveness && maximum_environment_size() != 0) {
- Run<HEnvironmentLivenessAnalysisPhase>();
- }
-
- if (!CheckConstPhiUses()) {
- *bailout_reason = kUnsupportedPhiUseOfConstVariable;
- return false;
- }
- Run<HRedundantPhiEliminationPhase>();
- if (!CheckArgumentsPhiUses()) {
- *bailout_reason = kUnsupportedPhiUseOfArguments;
- return false;
- }
-
- // Find and mark unreachable code to simplify optimizations, especially gvn,
- // where unreachable code could unnecessarily defeat LICM.
- Run<HMarkUnreachableBlocksPhase>();
-
- if (FLAG_dead_code_elimination) Run<HDeadCodeEliminationPhase>();
- if (FLAG_use_escape_analysis) Run<HEscapeAnalysisPhase>();
-
- if (FLAG_load_elimination) Run<HLoadEliminationPhase>();
-
- CollectPhis();
-
- if (has_osr()) osr()->FinishOsrValues();
-
- Run<HInferRepresentationPhase>();
-
- // Remove HSimulate instructions that have turned out not to be needed
- // after all by folding them into the following HSimulate.
- // This must happen after inferring representations.
- Run<HMergeRemovableSimulatesPhase>();
-
- Run<HRepresentationChangesPhase>();
-
- Run<HInferTypesPhase>();
-
- // Must be performed before canonicalization to ensure that Canonicalize
- // will not remove semantically meaningful ToInt32 operations e.g. BIT_OR with
- // zero.
- Run<HUint32AnalysisPhase>();
-
- if (FLAG_use_canonicalizing) Run<HCanonicalizePhase>();
-
- if (FLAG_use_gvn) Run<HGlobalValueNumberingPhase>();
-
- if (FLAG_check_elimination) Run<HCheckEliminationPhase>();
-
- if (FLAG_store_elimination) Run<HStoreEliminationPhase>();
-
- Run<HRangeAnalysisPhase>();
-
- // Eliminate redundant stack checks on backwards branches.
- Run<HStackCheckEliminationPhase>();
-
- if (FLAG_array_bounds_checks_elimination) Run<HBoundsCheckEliminationPhase>();
- if (FLAG_array_index_dehoisting) Run<HDehoistIndexComputationsPhase>();
- if (FLAG_dead_code_elimination) Run<HDeadCodeEliminationPhase>();
-
- RestoreActualValues();
-
- // Find unreachable code a second time, GVN and other optimizations may have
- // made blocks unreachable that were previously reachable.
- Run<HMarkUnreachableBlocksPhase>();
-
- return true;
-}
-
-
-void HGraph::RestoreActualValues() {
- HPhase phase("H_Restore actual values", this);
-
- for (int block_index = 0; block_index < blocks()->length(); block_index++) {
- HBasicBlock* block = blocks()->at(block_index);
-
-#ifdef DEBUG
- for (int i = 0; i < block->phis()->length(); i++) {
- HPhi* phi = block->phis()->at(i);
- DCHECK(phi->ActualValue() == phi);
- }
-#endif
-
- for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- HInstruction* instruction = it.Current();
- if (instruction->ActualValue() == instruction) continue;
- if (instruction->CheckFlag(HValue::kIsDead)) {
- // The instruction was marked as deleted but left in the graph
- // as a control flow dependency point for subsequent
- // instructions.
- instruction->DeleteAndReplaceWith(instruction->ActualValue());
- } else {
- DCHECK(instruction->IsInformativeDefinition());
- if (instruction->IsPurelyInformativeDefinition()) {
- instruction->DeleteAndReplaceWith(instruction->RedefinedOperand());
- } else {
- instruction->ReplaceAllUsesWith(instruction->ActualValue());
- }
- }
- }
- }
-}
-
-
-void HOptimizedGraphBuilder::PushArgumentsFromEnvironment(int count) {
- ZoneList<HValue*> arguments(count, zone());
- for (int i = 0; i < count; ++i) {
- arguments.Add(Pop(), zone());
- }
-
- HPushArguments* push_args = New<HPushArguments>();
- while (!arguments.is_empty()) {
- push_args->AddInput(arguments.RemoveLast());
- }
- AddInstruction(push_args);
-}
-
-
-template <class Instruction>
-HInstruction* HOptimizedGraphBuilder::PreProcessCall(Instruction* call) {
- PushArgumentsFromEnvironment(call->argument_count());
- return call;
-}
-
-void HOptimizedGraphBuilder::SetUpScope(DeclarationScope* scope) {
- HEnvironment* prolog_env = environment();
- int parameter_count = environment()->parameter_count();
- ZoneList<HValue*> parameters(parameter_count, zone());
- for (int i = 0; i < parameter_count; ++i) {
- HInstruction* parameter = Add<HParameter>(static_cast<unsigned>(i));
- parameters.Add(parameter, zone());
- environment()->Bind(i, parameter);
- }
-
- HConstant* undefined_constant = graph()->GetConstantUndefined();
- // Initialize specials and locals to undefined.
- for (int i = parameter_count + 1; i < environment()->length(); ++i) {
- environment()->Bind(i, undefined_constant);
- }
- Add<HPrologue>();
-
- HEnvironment* initial_env = environment()->CopyWithoutHistory();
- HBasicBlock* body_entry = CreateBasicBlock(initial_env);
- GotoNoSimulate(body_entry);
- set_current_block(body_entry);
-
- // Initialize context of prolog environment to undefined.
- prolog_env->BindContext(undefined_constant);
-
- // First special is HContext.
- HInstruction* context = Add<HContext>();
- environment()->BindContext(context);
-
- // Create an arguments object containing the initial parameters. Set the
- // initial values of parameters including "this" having parameter index 0.
- DCHECK_EQ(scope->num_parameters() + 1, parameter_count);
- HArgumentsObject* arguments_object = New<HArgumentsObject>(parameter_count);
- for (int i = 0; i < parameter_count; ++i) {
- HValue* parameter = parameters.at(i);
- arguments_object->AddArgument(parameter, zone());
- }
-
- AddInstruction(arguments_object);
-
- // Handle the arguments and arguments shadow variables specially (they do
- // not have declarations).
- if (scope->arguments() != NULL) {
- environment()->Bind(scope->arguments(), arguments_object);
- }
-
- if (scope->rest_parameter() != nullptr) {
- return Bailout(kRestParameter);
- }
-
- if (scope->this_function_var() != nullptr ||
- scope->new_target_var() != nullptr) {
- return Bailout(kSuperReference);
- }
-
- // Trace the call.
- if (FLAG_trace && top_info()->IsOptimizing()) {
- Add<HCallRuntime>(Runtime::FunctionForId(Runtime::kTraceEnter), 0);
- }
-}
-
-
-void HOptimizedGraphBuilder::VisitStatements(ZoneList<Statement*>* statements) {
- for (int i = 0; i < statements->length(); i++) {
- Statement* stmt = statements->at(i);
- CHECK_ALIVE(Visit(stmt));
- if (stmt->IsJump()) break;
- }
-}
-
-
-void HOptimizedGraphBuilder::VisitBlock(Block* stmt) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
-
- Scope* outer_scope = scope();
- Scope* scope = stmt->scope();
- BreakAndContinueInfo break_info(stmt, outer_scope);
-
- { BreakAndContinueScope push(&break_info, this);
- if (scope != NULL) {
- if (scope->NeedsContext()) {
- // Load the function object.
- DeclarationScope* declaration_scope = scope->GetDeclarationScope();
- HInstruction* function;
- HValue* outer_context = environment()->context();
- if (declaration_scope->is_script_scope() ||
- declaration_scope->is_eval_scope()) {
- function = new (zone())
- HLoadContextSlot(outer_context, Context::CLOSURE_INDEX,
- HLoadContextSlot::kNoCheck);
- } else {
- function = New<HThisFunction>();
- }
- AddInstruction(function);
- // Allocate a block context and store it to the stack frame.
- HValue* scope_info = Add<HConstant>(scope->scope_info());
- Add<HPushArguments>(scope_info, function);
- HInstruction* inner_context = Add<HCallRuntime>(
- Runtime::FunctionForId(Runtime::kPushBlockContext), 2);
- inner_context->SetFlag(HValue::kHasNoObservableSideEffects);
- set_scope(scope);
- environment()->BindContext(inner_context);
- }
- VisitDeclarations(scope->declarations());
- AddSimulate(stmt->DeclsId(), REMOVABLE_SIMULATE);
- }
- CHECK_BAILOUT(VisitStatements(stmt->statements()));
- }
- set_scope(outer_scope);
- if (scope != NULL && current_block() != NULL &&
- scope->ContextLocalCount() > 0) {
- HValue* inner_context = environment()->context();
- HValue* outer_context = Add<HLoadNamedField>(
- inner_context, nullptr,
- HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
-
- environment()->BindContext(outer_context);
- }
- HBasicBlock* break_block = break_info.break_block();
- if (break_block != NULL) {
- if (current_block() != NULL) Goto(break_block);
- break_block->SetJoinId(stmt->ExitId());
- set_current_block(break_block);
- }
-}
-
-
-void HOptimizedGraphBuilder::VisitExpressionStatement(
- ExpressionStatement* stmt) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
- VisitForEffect(stmt->expression());
-}
-
-
-void HOptimizedGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
-}
-
-
-void HOptimizedGraphBuilder::VisitSloppyBlockFunctionStatement(
- SloppyBlockFunctionStatement* stmt) {
- Visit(stmt->statement());
-}
-
-
-void HOptimizedGraphBuilder::VisitIfStatement(IfStatement* stmt) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
- if (stmt->condition()->ToBooleanIsTrue()) {
- Add<HSimulate>(stmt->ThenId());
- Visit(stmt->then_statement());
- } else if (stmt->condition()->ToBooleanIsFalse()) {
- Add<HSimulate>(stmt->ElseId());
- Visit(stmt->else_statement());
- } else {
- HBasicBlock* cond_true = graph()->CreateBasicBlock();
- HBasicBlock* cond_false = graph()->CreateBasicBlock();
- CHECK_BAILOUT(VisitForControl(stmt->condition(), cond_true, cond_false));
-
- // Technically, we should be able to handle the case when one side of
- // the test is not connected, but this can trip up liveness analysis
- // if we did not fully connect the test context based on some optimistic
- // assumption. If such an assumption was violated, we would end up with
- // an environment with optimized-out values. So we should always
- // conservatively connect the test context.
- CHECK(cond_true->HasPredecessor());
- CHECK(cond_false->HasPredecessor());
-
- cond_true->SetJoinId(stmt->ThenId());
- set_current_block(cond_true);
- CHECK_BAILOUT(Visit(stmt->then_statement()));
- cond_true = current_block();
-
- cond_false->SetJoinId(stmt->ElseId());
- set_current_block(cond_false);
- CHECK_BAILOUT(Visit(stmt->else_statement()));
- cond_false = current_block();
-
- HBasicBlock* join = CreateJoin(cond_true, cond_false, stmt->IfId());
- set_current_block(join);
- }
-}
-
-
-HBasicBlock* HOptimizedGraphBuilder::BreakAndContinueScope::Get(
- BreakableStatement* stmt,
- BreakType type,
- Scope** scope,
- int* drop_extra) {
- *drop_extra = 0;
- BreakAndContinueScope* current = this;
- while (current != NULL && current->info()->target() != stmt) {
- *drop_extra += current->info()->drop_extra();
- current = current->next();
- }
- DCHECK(current != NULL); // Always found (unless stack is malformed).
- *scope = current->info()->scope();
-
- if (type == BREAK) {
- *drop_extra += current->info()->drop_extra();
- }
-
- HBasicBlock* block = NULL;
- switch (type) {
- case BREAK:
- block = current->info()->break_block();
- if (block == NULL) {
- block = current->owner()->graph()->CreateBasicBlock();
- current->info()->set_break_block(block);
- }
- break;
-
- case CONTINUE:
- block = current->info()->continue_block();
- if (block == NULL) {
- block = current->owner()->graph()->CreateBasicBlock();
- current->info()->set_continue_block(block);
- }
- break;
- }
-
- return block;
-}
-
-
-void HOptimizedGraphBuilder::VisitContinueStatement(
- ContinueStatement* stmt) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
-
- if (function_state()->IsInsideDoExpressionScope()) {
- return Bailout(kDoExpressionUnmodelable);
- }
-
- Scope* outer_scope = NULL;
- Scope* inner_scope = scope();
- int drop_extra = 0;
- HBasicBlock* continue_block = break_scope()->Get(
- stmt->target(), BreakAndContinueScope::CONTINUE,
- &outer_scope, &drop_extra);
- HValue* context = environment()->context();
- Drop(drop_extra);
- int context_pop_count = inner_scope->ContextChainLength(outer_scope);
- if (context_pop_count > 0) {
- while (context_pop_count-- > 0) {
- HInstruction* context_instruction = Add<HLoadNamedField>(
- context, nullptr,
- HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
- context = context_instruction;
- }
- environment()->BindContext(context);
- }
-
- Goto(continue_block);
- set_current_block(NULL);
-}
-
-
-void HOptimizedGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
-
- if (function_state()->IsInsideDoExpressionScope()) {
- return Bailout(kDoExpressionUnmodelable);
- }
-
- Scope* outer_scope = NULL;
- Scope* inner_scope = scope();
- int drop_extra = 0;
- HBasicBlock* break_block = break_scope()->Get(
- stmt->target(), BreakAndContinueScope::BREAK,
- &outer_scope, &drop_extra);
- HValue* context = environment()->context();
- Drop(drop_extra);
- int context_pop_count = inner_scope->ContextChainLength(outer_scope);
- if (context_pop_count > 0) {
- while (context_pop_count-- > 0) {
- HInstruction* context_instruction = Add<HLoadNamedField>(
- context, nullptr,
- HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
- context = context_instruction;
- }
- environment()->BindContext(context);
- }
- Goto(break_block);
- set_current_block(NULL);
-}
-
-
-void HOptimizedGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
- FunctionState* state = function_state();
- AstContext* context = call_context();
- if (context == NULL) {
- // Not an inlined return, so an actual one.
- CHECK_ALIVE(VisitForValue(stmt->expression()));
- HValue* result = environment()->Pop();
- Add<HReturn>(result);
- } else if (state->inlining_kind() == CONSTRUCT_CALL_RETURN) {
- // Return from an inlined construct call. In a test context the return value
- // will always evaluate to true, in a value context the return value needs
- // to be a JSObject.
- if (context->IsTest()) {
- CHECK_ALIVE(VisitForEffect(stmt->expression()));
- context->ReturnValue(graph()->GetConstantTrue());
- } else if (context->IsEffect()) {
- CHECK_ALIVE(VisitForEffect(stmt->expression()));
- Goto(function_return(), state);
- } else {
- DCHECK(context->IsValue());
- CHECK_ALIVE(VisitForValue(stmt->expression()));
- HValue* return_value = Pop();
- HValue* receiver = environment()->arguments_environment()->Lookup(0);
- HHasInstanceTypeAndBranch* typecheck =
- New<HHasInstanceTypeAndBranch>(return_value,
- FIRST_JS_RECEIVER_TYPE,
- LAST_JS_RECEIVER_TYPE);
- HBasicBlock* if_spec_object = graph()->CreateBasicBlock();
- HBasicBlock* not_spec_object = graph()->CreateBasicBlock();
- typecheck->SetSuccessorAt(0, if_spec_object);
- typecheck->SetSuccessorAt(1, not_spec_object);
- FinishCurrentBlock(typecheck);
- AddLeaveInlined(if_spec_object, return_value, state);
- AddLeaveInlined(not_spec_object, receiver, state);
- }
- } else if (state->inlining_kind() == SETTER_CALL_RETURN) {
- // Return from an inlined setter call. The returned value is never used, the
- // value of an assignment is always the value of the RHS of the assignment.
- CHECK_ALIVE(VisitForEffect(stmt->expression()));
- if (context->IsTest()) {
- HValue* rhs = environment()->arguments_environment()->Lookup(1);
- context->ReturnValue(rhs);
- } else if (context->IsEffect()) {
- Goto(function_return(), state);
- } else {
- DCHECK(context->IsValue());
- HValue* rhs = environment()->arguments_environment()->Lookup(1);
- AddLeaveInlined(rhs, state);
- }
- } else {
- // Return from a normal inlined function. Visit the subexpression in the
- // expression context of the call.
- if (context->IsTest()) {
- TestContext* test = TestContext::cast(context);
- VisitForControl(stmt->expression(), test->if_true(), test->if_false());
- } else if (context->IsEffect()) {
- // Visit in value context and ignore the result. This is needed to keep
- // environment in sync with full-codegen since some visitors (e.g.
- // VisitCountOperation) use the operand stack differently depending on
- // context.
- CHECK_ALIVE(VisitForValue(stmt->expression()));
- Pop();
- Goto(function_return(), state);
- } else {
- DCHECK(context->IsValue());
- CHECK_ALIVE(VisitForValue(stmt->expression()));
- AddLeaveInlined(Pop(), state);
- }
- }
- set_current_block(NULL);
-}
-
-
-void HOptimizedGraphBuilder::VisitWithStatement(WithStatement* stmt) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
- return Bailout(kWithStatement);
-}
-
-
-void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
-
- ZoneList<CaseClause*>* clauses = stmt->cases();
- int clause_count = clauses->length();
- ZoneList<HBasicBlock*> body_blocks(clause_count, zone());
-
- CHECK_ALIVE(VisitForValue(stmt->tag()));
- Add<HSimulate>(stmt->EntryId());
- HValue* tag_value = Top();
- AstType* tag_type = bounds_.get(stmt->tag()).lower;
-
- // 1. Build all the tests, with dangling true branches
- BailoutId default_id = BailoutId::None();
- for (int i = 0; i < clause_count; ++i) {
- CaseClause* clause = clauses->at(i);
- if (clause->is_default()) {
- body_blocks.Add(NULL, zone());
- if (default_id.IsNone()) default_id = clause->EntryId();
- continue;
- }
-
- // Generate a compare and branch.
- CHECK_BAILOUT(VisitForValue(clause->label()));
- if (current_block() == NULL) return Bailout(kUnsupportedSwitchStatement);
- HValue* label_value = Pop();
-
- AstType* label_type = bounds_.get(clause->label()).lower;
- AstType* combined_type = clause->compare_type();
- HControlInstruction* compare = BuildCompareInstruction(
- Token::EQ_STRICT, tag_value, label_value, tag_type, label_type,
- combined_type,
- ScriptPositionToSourcePosition(stmt->tag()->position()),
- ScriptPositionToSourcePosition(clause->label()->position()),
- PUSH_BEFORE_SIMULATE, clause->id());
-
- HBasicBlock* next_test_block = graph()->CreateBasicBlock();
- HBasicBlock* body_block = graph()->CreateBasicBlock();
- body_blocks.Add(body_block, zone());
- compare->SetSuccessorAt(0, body_block);
- compare->SetSuccessorAt(1, next_test_block);
- FinishCurrentBlock(compare);
-
- set_current_block(body_block);
- Drop(1); // tag_value
-
- set_current_block(next_test_block);
- }
-
- // Save the current block to use for the default or to join with the
- // exit.
- HBasicBlock* last_block = current_block();
- Drop(1); // tag_value
-
- // 2. Loop over the clauses and the linked list of tests in lockstep,
- // translating the clause bodies.
- HBasicBlock* fall_through_block = NULL;
-
- BreakAndContinueInfo break_info(stmt, scope());
- { BreakAndContinueScope push(&break_info, this);
- for (int i = 0; i < clause_count; ++i) {
- CaseClause* clause = clauses->at(i);
-
- // Identify the block where normal (non-fall-through) control flow
- // goes to.
- HBasicBlock* normal_block = NULL;
- if (clause->is_default()) {
- if (last_block == NULL) continue;
- normal_block = last_block;
- last_block = NULL; // Cleared to indicate we've handled it.
- } else {
- normal_block = body_blocks[i];
- }
-
- if (fall_through_block == NULL) {
- set_current_block(normal_block);
- } else {
- HBasicBlock* join = CreateJoin(fall_through_block,
- normal_block,
- clause->EntryId());
- set_current_block(join);
- }
-
- CHECK_BAILOUT(VisitStatements(clause->statements()));
- fall_through_block = current_block();
- }
- }
-
- // Create an up-to-3-way join. Use the break block if it exists since
- // it's already a join block.
- HBasicBlock* break_block = break_info.break_block();
- if (break_block == NULL) {
- set_current_block(CreateJoin(fall_through_block,
- last_block,
- stmt->ExitId()));
- } else {
- if (fall_through_block != NULL) Goto(fall_through_block, break_block);
- if (last_block != NULL) Goto(last_block, break_block);
- break_block->SetJoinId(stmt->ExitId());
- set_current_block(break_block);
- }
-}
-
-void HOptimizedGraphBuilder::VisitLoopBody(IterationStatement* stmt,
- BailoutId stack_check_id,
- HBasicBlock* loop_entry) {
- Add<HSimulate>(stack_check_id);
- HStackCheck* stack_check =
- HStackCheck::cast(Add<HStackCheck>(HStackCheck::kBackwardsBranch));
- DCHECK(loop_entry->IsLoopHeader());
- loop_entry->loop_information()->set_stack_check(stack_check);
- CHECK_BAILOUT(Visit(stmt->body()));
-}
-
-
-void HOptimizedGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
- DCHECK(current_block() != NULL);
- HBasicBlock* loop_entry = BuildLoopEntry(stmt);
-
- BreakAndContinueInfo break_info(stmt, scope());
- {
- BreakAndContinueScope push(&break_info, this);
- CHECK_BAILOUT(VisitLoopBody(stmt, stmt->StackCheckId(), loop_entry));
- }
- HBasicBlock* body_exit = JoinContinue(
- stmt, stmt->ContinueId(), current_block(), break_info.continue_block());
- HBasicBlock* loop_successor = NULL;
- if (body_exit != NULL) {
- set_current_block(body_exit);
- loop_successor = graph()->CreateBasicBlock();
- if (stmt->cond()->ToBooleanIsFalse()) {
- loop_entry->loop_information()->stack_check()->Eliminate();
- Goto(loop_successor);
- body_exit = NULL;
- } else {
- // The block for a true condition, the actual predecessor block of the
- // back edge.
- body_exit = graph()->CreateBasicBlock();
- CHECK_BAILOUT(VisitForControl(stmt->cond(), body_exit, loop_successor));
- }
- if (body_exit != NULL && body_exit->HasPredecessor()) {
- body_exit->SetJoinId(stmt->BackEdgeId());
- } else {
- body_exit = NULL;
- }
- if (loop_successor->HasPredecessor()) {
- loop_successor->SetJoinId(stmt->ExitId());
- } else {
- loop_successor = NULL;
- }
- }
- HBasicBlock* loop_exit = CreateLoop(stmt,
- loop_entry,
- body_exit,
- loop_successor,
- break_info.break_block());
- set_current_block(loop_exit);
-}
-
-
-void HOptimizedGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
- DCHECK(current_block() != NULL);
- HBasicBlock* loop_entry = BuildLoopEntry(stmt);
-
- // If the condition is constant true, do not generate a branch.
- HBasicBlock* loop_successor = NULL;
- HBasicBlock* body_entry = graph()->CreateBasicBlock();
- loop_successor = graph()->CreateBasicBlock();
- CHECK_BAILOUT(VisitForControl(stmt->cond(), body_entry, loop_successor));
- if (body_entry->HasPredecessor()) {
- body_entry->SetJoinId(stmt->BodyId());
- set_current_block(body_entry);
- }
- if (loop_successor->HasPredecessor()) {
- loop_successor->SetJoinId(stmt->ExitId());
- } else {
- loop_successor = NULL;
- }
-
- BreakAndContinueInfo break_info(stmt, scope());
- if (current_block() != NULL) {
- BreakAndContinueScope push(&break_info, this);
- CHECK_BAILOUT(VisitLoopBody(stmt, stmt->StackCheckId(), loop_entry));
- }
- HBasicBlock* body_exit = JoinContinue(
- stmt, stmt->ContinueId(), current_block(), break_info.continue_block());
- HBasicBlock* loop_exit = CreateLoop(stmt,
- loop_entry,
- body_exit,
- loop_successor,
- break_info.break_block());
- set_current_block(loop_exit);
-}
-
-
-void HOptimizedGraphBuilder::VisitForStatement(ForStatement* stmt) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
- if (stmt->init() != NULL) {
- CHECK_ALIVE(Visit(stmt->init()));
- }
- DCHECK(current_block() != NULL);
- HBasicBlock* loop_entry = BuildLoopEntry(stmt);
-
- HBasicBlock* loop_successor = graph()->CreateBasicBlock();
- HBasicBlock* body_entry = graph()->CreateBasicBlock();
- if (stmt->cond() != NULL) {
- CHECK_BAILOUT(VisitForControl(stmt->cond(), body_entry, loop_successor));
- if (body_entry->HasPredecessor()) {
- body_entry->SetJoinId(stmt->BodyId());
- set_current_block(body_entry);
- }
- if (loop_successor->HasPredecessor()) {
- loop_successor->SetJoinId(stmt->ExitId());
- } else {
- loop_successor = NULL;
- }
- } else {
- // Create dummy control flow so that variable liveness analysis
- // produces teh correct result.
- HControlInstruction* branch = New<HBranch>(graph()->GetConstantTrue());
- branch->SetSuccessorAt(0, body_entry);
- branch->SetSuccessorAt(1, loop_successor);
- FinishCurrentBlock(branch);
- set_current_block(body_entry);
- }
-
- BreakAndContinueInfo break_info(stmt, scope());
- if (current_block() != NULL) {
- BreakAndContinueScope push(&break_info, this);
- CHECK_BAILOUT(VisitLoopBody(stmt, stmt->StackCheckId(), loop_entry));
- }
- HBasicBlock* body_exit = JoinContinue(
- stmt, stmt->ContinueId(), current_block(), break_info.continue_block());
-
- if (stmt->next() != NULL && body_exit != NULL) {
- set_current_block(body_exit);
- CHECK_BAILOUT(Visit(stmt->next()));
- body_exit = current_block();
- }
-
- HBasicBlock* loop_exit = CreateLoop(stmt,
- loop_entry,
- body_exit,
- loop_successor,
- break_info.break_block());
- set_current_block(loop_exit);
-}
-
-
-void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
-
- if (!stmt->each()->IsVariableProxy() ||
- !stmt->each()->AsVariableProxy()->var()->IsStackLocal()) {
- return Bailout(kForInStatementWithNonLocalEachVariable);
- }
-
- Variable* each_var = stmt->each()->AsVariableProxy()->var();
-
- CHECK_ALIVE(VisitForValue(stmt->enumerable()));
- HValue* enumerable = Top(); // Leave enumerable at the top.
-
- IfBuilder if_undefined_or_null(this);
- if_undefined_or_null.If<HCompareObjectEqAndBranch>(
- enumerable, graph()->GetConstantUndefined());
- if_undefined_or_null.Or();
- if_undefined_or_null.If<HCompareObjectEqAndBranch>(
- enumerable, graph()->GetConstantNull());
- if_undefined_or_null.ThenDeopt(DeoptimizeReason::kUndefinedOrNullInForIn);
- if_undefined_or_null.End();
- BuildForInBody(stmt, each_var, enumerable);
-}
-
-
-void HOptimizedGraphBuilder::BuildForInBody(ForInStatement* stmt,
- Variable* each_var,
- HValue* enumerable) {
- Handle<Map> meta_map = isolate()->factory()->meta_map();
- bool fast = stmt->for_in_type() == ForInStatement::FAST_FOR_IN;
- BuildCheckHeapObject(enumerable);
- Add<HCheckInstanceType>(enumerable, HCheckInstanceType::IS_JS_RECEIVER);
- Add<HSimulate>(stmt->ToObjectId());
- if (fast) {
- HForInPrepareMap* map = Add<HForInPrepareMap>(enumerable);
- Push(map);
- Add<HSimulate>(stmt->EnumId());
- Drop(1);
- Add<HCheckMaps>(map, meta_map);
-
- HForInCacheArray* array = Add<HForInCacheArray>(
- enumerable, map, DescriptorArray::kEnumCacheBridgeCacheIndex);
- HValue* enum_length = BuildEnumLength(map);
-
- HForInCacheArray* index_cache = Add<HForInCacheArray>(
- enumerable, map, DescriptorArray::kEnumCacheBridgeIndicesCacheIndex);
- array->set_index_cache(index_cache);
-
- Push(map);
- Push(array);
- Push(enum_length);
- Add<HSimulate>(stmt->PrepareId());
- } else {
- Runtime::FunctionId function_id = Runtime::kForInEnumerate;
- Add<HPushArguments>(enumerable);
- HCallRuntime* array =
- Add<HCallRuntime>(Runtime::FunctionForId(function_id), 1);
- Push(array);
- Add<HSimulate>(stmt->EnumId());
- Drop(1);
-
- IfBuilder if_fast(this);
- if_fast.If<HCompareMap>(array, meta_map);
- if_fast.Then();
- {
- HValue* cache_map = array;
- HForInCacheArray* cache = Add<HForInCacheArray>(
- enumerable, cache_map, DescriptorArray::kEnumCacheBridgeCacheIndex);
- HValue* enum_length = BuildEnumLength(cache_map);
- Push(cache_map);
- Push(cache);
- Push(enum_length);
- Add<HSimulate>(stmt->PrepareId(), FIXED_SIMULATE);
- }
- if_fast.Else();
- {
- Push(graph()->GetConstant1());
- Push(array);
- Push(AddLoadFixedArrayLength(array));
- Add<HSimulate>(stmt->PrepareId(), FIXED_SIMULATE);
- }
- }
-
- Push(graph()->GetConstant0());
-
- HBasicBlock* loop_entry = BuildLoopEntry(stmt);
-
- // Reload the values to ensure we have up-to-date values inside of the loop.
- // This is relevant especially for OSR where the values don't come from the
- // computation above, but from the OSR entry block.
- HValue* index = environment()->ExpressionStackAt(0);
- HValue* limit = environment()->ExpressionStackAt(1);
- HValue* array = environment()->ExpressionStackAt(2);
- HValue* type = environment()->ExpressionStackAt(3);
- enumerable = environment()->ExpressionStackAt(4);
-
- // Check that we still have more keys.
- HCompareNumericAndBranch* compare_index =
- New<HCompareNumericAndBranch>(index, limit, Token::LT);
- compare_index->set_observed_input_representation(
- Representation::Smi(), Representation::Smi());
-
- HBasicBlock* loop_body = graph()->CreateBasicBlock();
- HBasicBlock* loop_successor = graph()->CreateBasicBlock();
-
- compare_index->SetSuccessorAt(0, loop_body);
- compare_index->SetSuccessorAt(1, loop_successor);
- FinishCurrentBlock(compare_index);
-
- set_current_block(loop_successor);
- Drop(5);
-
- set_current_block(loop_body);
-
- // Compute the next enumerated value.
- HValue* key = Add<HLoadKeyed>(array, index, index, nullptr, FAST_ELEMENTS);
-
- HBasicBlock* continue_block = nullptr;
- if (fast) {
- // Check if expected map still matches that of the enumerable.
- Add<HCheckMapValue>(enumerable, type);
- Add<HSimulate>(stmt->FilterId());
- } else {
- // We need the continue block here to be able to skip over invalidated keys.
- continue_block = graph()->CreateBasicBlock();
-
- // We cannot use the IfBuilder here, since we need to be able to jump
- // over the loop body in case of undefined result from %ForInFilter,
- // and the poor soul that is the IfBuilder get's really confused about
- // such "advanced control flow requirements".
- HBasicBlock* if_fast = graph()->CreateBasicBlock();
- HBasicBlock* if_slow = graph()->CreateBasicBlock();
- HBasicBlock* if_slow_pass = graph()->CreateBasicBlock();
- HBasicBlock* if_slow_skip = graph()->CreateBasicBlock();
- HBasicBlock* if_join = graph()->CreateBasicBlock();
-
- // Check if expected map still matches that of the enumerable.
- HValue* enumerable_map =
- Add<HLoadNamedField>(enumerable, nullptr, HObjectAccess::ForMap());
- FinishCurrentBlock(
- New<HCompareObjectEqAndBranch>(enumerable_map, type, if_fast, if_slow));
- set_current_block(if_fast);
- {
- // The enum cache for enumerable is still valid, no need to check key.
- Push(key);
- Goto(if_join);
- }
- set_current_block(if_slow);
- {
- Callable callable = CodeFactory::ForInFilter(isolate());
- HValue* values[] = {key, enumerable};
- HConstant* stub_value = Add<HConstant>(callable.code());
- Push(Add<HCallWithDescriptor>(stub_value, 0, callable.descriptor(),
- ArrayVector(values)));
- Add<HSimulate>(stmt->FilterId());
- FinishCurrentBlock(New<HCompareObjectEqAndBranch>(
- Top(), graph()->GetConstantUndefined(), if_slow_skip, if_slow_pass));
- }
- set_current_block(if_slow_pass);
- { Goto(if_join); }
- set_current_block(if_slow_skip);
- {
- // The key is no longer valid for enumerable, skip it.
- Drop(1);
- Goto(continue_block);
- }
- if_join->SetJoinId(stmt->FilterId());
- set_current_block(if_join);
- key = Pop();
- }
-
- Bind(each_var, key);
- Add<HSimulate>(stmt->AssignmentId());
-
- BreakAndContinueInfo break_info(stmt, scope(), 5);
- break_info.set_continue_block(continue_block);
- {
- BreakAndContinueScope push(&break_info, this);
- CHECK_BAILOUT(VisitLoopBody(stmt, stmt->StackCheckId(), loop_entry));
- }
-
- HBasicBlock* body_exit = JoinContinue(
- stmt, stmt->IncrementId(), current_block(), break_info.continue_block());
-
- if (body_exit != NULL) {
- set_current_block(body_exit);
-
- HValue* current_index = Pop();
- HValue* increment =
- AddUncasted<HAdd>(current_index, graph()->GetConstant1());
- increment->ClearFlag(HValue::kCanOverflow);
- Push(increment);
- body_exit = current_block();
- }
-
- HBasicBlock* loop_exit = CreateLoop(stmt,
- loop_entry,
- body_exit,
- loop_successor,
- break_info.break_block());
-
- set_current_block(loop_exit);
-}
-
-
-void HOptimizedGraphBuilder::VisitForOfStatement(ForOfStatement* stmt) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
- return Bailout(kForOfStatement);
-}
-
-
-void HOptimizedGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
- return Bailout(kTryCatchStatement);
-}
-
-
-void HOptimizedGraphBuilder::VisitTryFinallyStatement(
- TryFinallyStatement* stmt) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
- return Bailout(kTryFinallyStatement);
-}
-
-
-void HOptimizedGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
- return Bailout(kDebuggerStatement);
-}
-
-
-void HOptimizedGraphBuilder::VisitCaseClause(CaseClause* clause) {
- UNREACHABLE();
-}
-
-
-void HOptimizedGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
- Handle<SharedFunctionInfo> shared_info = Compiler::GetSharedFunctionInfo(
- expr, current_info()->script(), top_info());
- // We also have a stack overflow if the recursive compilation did.
- if (HasStackOverflow()) return;
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need pretenuring.
- HConstant* shared_info_value = Add<HConstant>(shared_info);
- HInstruction* instr;
- Handle<FeedbackVector> vector(current_feedback_vector(), isolate());
- HValue* vector_value = Add<HConstant>(vector);
- int index = FeedbackVector::GetIndex(expr->LiteralFeedbackSlot());
- HValue* index_value = Add<HConstant>(index);
- if (!expr->pretenure()) {
- Callable callable = CodeFactory::FastNewClosure(isolate());
- HValue* values[] = {shared_info_value, vector_value, index_value};
- HConstant* stub_value = Add<HConstant>(callable.code());
- instr = New<HCallWithDescriptor>(stub_value, 0, callable.descriptor(),
- ArrayVector(values));
- } else {
- Add<HPushArguments>(shared_info_value);
- Add<HPushArguments>(vector_value);
- Add<HPushArguments>(index_value);
- Runtime::FunctionId function_id =
- expr->pretenure() ? Runtime::kNewClosure_Tenured : Runtime::kNewClosure;
- instr = New<HCallRuntime>(Runtime::FunctionForId(function_id), 3);
- }
- return ast_context()->ReturnInstruction(instr, expr->id());
-}
-
-
-void HOptimizedGraphBuilder::VisitClassLiteral(ClassLiteral* lit) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
- return Bailout(kClassLiteral);
-}
-
-
-void HOptimizedGraphBuilder::VisitNativeFunctionLiteral(
- NativeFunctionLiteral* expr) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
- return Bailout(kNativeFunctionLiteral);
-}
-
-
-void HOptimizedGraphBuilder::VisitDoExpression(DoExpression* expr) {
- DoExpressionScope scope(this);
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
- CHECK_ALIVE(VisitBlock(expr->block()));
- Visit(expr->result());
-}
-
-
-void HOptimizedGraphBuilder::VisitConditional(Conditional* expr) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
- HBasicBlock* cond_true = graph()->CreateBasicBlock();
- HBasicBlock* cond_false = graph()->CreateBasicBlock();
- CHECK_BAILOUT(VisitForControl(expr->condition(), cond_true, cond_false));
-
- // Visit the true and false subexpressions in the same AST context as the
- // whole expression.
- if (cond_true->HasPredecessor()) {
- cond_true->SetJoinId(expr->ThenId());
- set_current_block(cond_true);
- CHECK_BAILOUT(Visit(expr->then_expression()));
- cond_true = current_block();
- } else {
- cond_true = NULL;
- }
-
- if (cond_false->HasPredecessor()) {
- cond_false->SetJoinId(expr->ElseId());
- set_current_block(cond_false);
- CHECK_BAILOUT(Visit(expr->else_expression()));
- cond_false = current_block();
- } else {
- cond_false = NULL;
- }
-
- if (!ast_context()->IsTest()) {
- HBasicBlock* join = CreateJoin(cond_true, cond_false, expr->id());
- set_current_block(join);
- if (join != NULL && !ast_context()->IsEffect()) {
- return ast_context()->ReturnValue(Pop());
- }
- }
-}
-
-bool HOptimizedGraphBuilder::CanInlineGlobalPropertyAccess(
- Variable* var, LookupIterator* it, PropertyAccessType access_type) {
- if (var->is_this()) return false;
- return CanInlineGlobalPropertyAccess(it, access_type);
-}
-
-bool HOptimizedGraphBuilder::CanInlineGlobalPropertyAccess(
- LookupIterator* it, PropertyAccessType access_type) {
- if (!current_info()->has_global_object()) {
- return false;
- }
-
- switch (it->state()) {
- case LookupIterator::ACCESSOR:
- case LookupIterator::ACCESS_CHECK:
- case LookupIterator::INTERCEPTOR:
- case LookupIterator::INTEGER_INDEXED_EXOTIC:
- case LookupIterator::NOT_FOUND:
- return false;
- case LookupIterator::DATA:
- if (access_type == STORE && it->IsReadOnly()) return false;
- if (!it->GetHolder<JSObject>()->IsJSGlobalObject()) return false;
- return true;
- case LookupIterator::JSPROXY:
- case LookupIterator::TRANSITION:
- UNREACHABLE();
- }
- UNREACHABLE();
- return false;
-}
-
-
-HValue* HOptimizedGraphBuilder::BuildContextChainWalk(Variable* var) {
- DCHECK(var->IsContextSlot());
- HValue* context = environment()->context();
- int length = scope()->ContextChainLength(var->scope());
- while (length-- > 0) {
- context = Add<HLoadNamedField>(
- context, nullptr,
- HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
- }
- return context;
-}
-
-void HOptimizedGraphBuilder::InlineGlobalPropertyLoad(LookupIterator* it,
- BailoutId ast_id) {
- Handle<PropertyCell> cell = it->GetPropertyCell();
- top_info()->dependencies()->AssumePropertyCell(cell);
- auto cell_type = it->property_details().cell_type();
- if (cell_type == PropertyCellType::kConstant ||
- cell_type == PropertyCellType::kUndefined) {
- Handle<Object> constant_object(cell->value(), isolate());
- if (constant_object->IsConsString()) {
- constant_object = String::Flatten(Handle<String>::cast(constant_object));
- }
- HConstant* constant = New<HConstant>(constant_object);
- return ast_context()->ReturnInstruction(constant, ast_id);
- } else {
- auto access = HObjectAccess::ForPropertyCellValue();
- UniqueSet<Map>* field_maps = nullptr;
- if (cell_type == PropertyCellType::kConstantType) {
- switch (cell->GetConstantType()) {
- case PropertyCellConstantType::kSmi:
- access = access.WithRepresentation(Representation::Smi());
- break;
- case PropertyCellConstantType::kStableMap: {
- // Check that the map really is stable. The heap object could
- // have mutated without the cell updating state. In that case,
- // make no promises about the loaded value except that it's a
- // heap object.
- access = access.WithRepresentation(Representation::HeapObject());
- Handle<Map> map(HeapObject::cast(cell->value())->map());
- if (map->is_stable()) {
- field_maps = new (zone())
- UniqueSet<Map>(Unique<Map>::CreateImmovable(map), zone());
- }
- break;
- }
- }
- }
- HConstant* cell_constant = Add<HConstant>(cell);
- HLoadNamedField* instr;
- if (field_maps == nullptr) {
- instr = New<HLoadNamedField>(cell_constant, nullptr, access);
- } else {
- instr = New<HLoadNamedField>(cell_constant, nullptr, access, field_maps,
- HType::HeapObject());
- }
- instr->ClearDependsOnFlag(kInobjectFields);
- instr->SetDependsOnFlag(kGlobalVars);
- return ast_context()->ReturnInstruction(instr, ast_id);
- }
-}
-
-void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
- Variable* variable = expr->var();
- switch (variable->location()) {
- case VariableLocation::UNALLOCATED: {
- if (IsLexicalVariableMode(variable->mode())) {
- // TODO(rossberg): should this be an DCHECK?
- return Bailout(kReferenceToGlobalLexicalVariable);
- }
- // Handle known global constants like 'undefined' specially to avoid a
- // load from a global cell for them.
- Handle<Object> constant_value =
- isolate()->factory()->GlobalConstantFor(variable->name());
- if (!constant_value.is_null()) {
- HConstant* instr = New<HConstant>(constant_value);
- return ast_context()->ReturnInstruction(instr, expr->id());
- }
-
- Handle<JSGlobalObject> global(current_info()->global_object());
-
- // Lookup in script contexts.
- {
- Handle<ScriptContextTable> script_contexts(
- global->native_context()->script_context_table());
- ScriptContextTable::LookupResult lookup;
- if (ScriptContextTable::Lookup(script_contexts, variable->name(),
- &lookup)) {
- Handle<Context> script_context = ScriptContextTable::GetContext(
- script_contexts, lookup.context_index);
- Handle<Object> current_value =
- FixedArray::get(*script_context, lookup.slot_index, isolate());
-
- // If the values is not the hole, it will stay initialized,
- // so no need to generate a check.
- if (current_value->IsTheHole(isolate())) {
- return Bailout(kReferenceToUninitializedVariable);
- }
- HInstruction* result = New<HLoadNamedField>(
- Add<HConstant>(script_context), nullptr,
- HObjectAccess::ForContextSlot(lookup.slot_index));
- return ast_context()->ReturnInstruction(result, expr->id());
- }
- }
-
- LookupIterator it(global, variable->name(), LookupIterator::OWN);
- it.TryLookupCachedProperty();
- if (CanInlineGlobalPropertyAccess(variable, &it, LOAD)) {
- InlineGlobalPropertyLoad(&it, expr->id());
- return;
- } else {
- Handle<FeedbackVector> vector(current_feedback_vector(), isolate());
- FeedbackSlot slot = expr->VariableFeedbackSlot();
- DCHECK(vector->IsLoadGlobalIC(slot));
-
- HValue* vector_value = Add<HConstant>(vector);
- HValue* slot_value = Add<HConstant>(vector->GetIndex(slot));
- Callable callable = CodeFactory::LoadGlobalICInOptimizedCode(
- isolate(), ast_context()->typeof_mode());
- HValue* stub = Add<HConstant>(callable.code());
- HValue* name = Add<HConstant>(variable->name());
- HValue* values[] = {name, slot_value, vector_value};
- HCallWithDescriptor* instr = New<HCallWithDescriptor>(
- Code::LOAD_GLOBAL_IC, stub, 0, callable.descriptor(),
- ArrayVector(values));
- return ast_context()->ReturnInstruction(instr, expr->id());
- }
- }
-
- case VariableLocation::PARAMETER:
- case VariableLocation::LOCAL: {
- HValue* value = LookupAndMakeLive(variable);
- if (value == graph()->GetConstantHole()) {
- DCHECK(IsDeclaredVariableMode(variable->mode()) &&
- variable->mode() != VAR);
- return Bailout(kReferenceToUninitializedVariable);
- }
- return ast_context()->ReturnValue(value);
- }
-
- case VariableLocation::CONTEXT: {
- HValue* context = BuildContextChainWalk(variable);
- HLoadContextSlot::Mode mode;
- switch (variable->mode()) {
- case LET:
- case CONST:
- mode = HLoadContextSlot::kCheckDeoptimize;
- break;
- default:
- mode = HLoadContextSlot::kNoCheck;
- break;
- }
- HLoadContextSlot* instr =
- new(zone()) HLoadContextSlot(context, variable->index(), mode);
- return ast_context()->ReturnInstruction(instr, expr->id());
- }
-
- case VariableLocation::LOOKUP:
- return Bailout(kReferenceToAVariableWhichRequiresDynamicLookup);
-
- case VariableLocation::MODULE:
- UNREACHABLE();
- }
-}
-
-
-void HOptimizedGraphBuilder::VisitLiteral(Literal* expr) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
- HConstant* instr = New<HConstant>(expr->value());
- return ast_context()->ReturnInstruction(instr, expr->id());
-}
-
-
-void HOptimizedGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
- Callable callable = CodeFactory::FastCloneRegExp(isolate());
- int index = FeedbackVector::GetIndex(expr->literal_slot());
- HValue* values[] = {AddThisFunction(), Add<HConstant>(index),
- Add<HConstant>(expr->pattern()),
- Add<HConstant>(expr->flags())};
- HConstant* stub_value = Add<HConstant>(callable.code());
- HInstruction* instr = New<HCallWithDescriptor>(
- stub_value, 0, callable.descriptor(), ArrayVector(values));
- return ast_context()->ReturnInstruction(instr, expr->id());
-}
-
-
-static bool CanInlinePropertyAccess(Handle<Map> map) {
- if (map->instance_type() == HEAP_NUMBER_TYPE) return true;
- if (map->instance_type() < FIRST_NONSTRING_TYPE) return true;
- return map->IsJSObjectMap() && !map->is_dictionary_map() &&
- !map->has_named_interceptor() &&
- // TODO(verwaest): Whitelist contexts to which we have access.
- !map->is_access_check_needed();
-}
-
-
-// Determines whether the given array or object literal boilerplate satisfies
-// all limits to be considered for fast deep-copying and computes the total
-// size of all objects that are part of the graph.
-static bool IsFastLiteral(Handle<JSObject> boilerplate,
- int max_depth,
- int* max_properties) {
- if (boilerplate->map()->is_deprecated() &&
- !JSObject::TryMigrateInstance(boilerplate)) {
- return false;
- }
-
- DCHECK(max_depth >= 0 && *max_properties >= 0);
- if (max_depth == 0) return false;
-
- Isolate* isolate = boilerplate->GetIsolate();
- Handle<FixedArrayBase> elements(boilerplate->elements());
- if (elements->length() > 0 &&
- elements->map() != isolate->heap()->fixed_cow_array_map()) {
- if (boilerplate->HasFastSmiOrObjectElements()) {
- Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
- int length = elements->length();
- for (int i = 0; i < length; i++) {
- if ((*max_properties)-- == 0) return false;
- Handle<Object> value(fast_elements->get(i), isolate);
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- if (!IsFastLiteral(value_object,
- max_depth - 1,
- max_properties)) {
- return false;
- }
- }
- }
- } else if (boilerplate->HasFastDoubleElements()) {
- if (elements->Size() > kMaxRegularHeapObjectSize) return false;
- } else {
- return false;
- }
- }
-
- Handle<FixedArray> properties(boilerplate->properties());
- if (properties->length() > 0) {
- return false;
- } else {
- Handle<DescriptorArray> descriptors(
- boilerplate->map()->instance_descriptors());
- int limit = boilerplate->map()->NumberOfOwnDescriptors();
- for (int i = 0; i < limit; i++) {
- PropertyDetails details = descriptors->GetDetails(i);
- if (details.location() != kField) continue;
- DCHECK_EQ(kData, details.kind());
- if ((*max_properties)-- == 0) return false;
- FieldIndex field_index = FieldIndex::ForDescriptor(boilerplate->map(), i);
- if (boilerplate->IsUnboxedDoubleField(field_index)) continue;
- Handle<Object> value(boilerplate->RawFastPropertyAt(field_index),
- isolate);
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- if (!IsFastLiteral(value_object,
- max_depth - 1,
- max_properties)) {
- return false;
- }
- }
- }
- }
- return true;
-}
-
-
-void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
-
- Handle<JSFunction> closure = function_state()->compilation_info()->closure();
- HInstruction* literal;
-
- // Check whether to use fast or slow deep-copying for boilerplate.
- int max_properties = kMaxFastLiteralProperties;
- Handle<Object> literals_cell(
- closure->feedback_vector()->Get(expr->literal_slot()), isolate());
- Handle<AllocationSite> site;
- Handle<JSObject> boilerplate;
- if (!literals_cell->IsUndefined(isolate())) {
- // Retrieve the boilerplate
- site = Handle<AllocationSite>::cast(literals_cell);
- boilerplate = Handle<JSObject>(JSObject::cast(site->transition_info()),
- isolate());
- }
-
- if (!boilerplate.is_null() &&
- IsFastLiteral(boilerplate, kMaxFastLiteralDepth, &max_properties)) {
- AllocationSiteUsageContext site_context(isolate(), site, false);
- site_context.EnterNewScope();
- literal = BuildFastLiteral(boilerplate, &site_context);
- site_context.ExitScope(site, boilerplate);
- } else {
- NoObservableSideEffectsScope no_effects(this);
- Handle<BoilerplateDescription> constant_properties =
- expr->GetOrBuildConstantProperties(isolate());
- int literal_index = FeedbackVector::GetIndex(expr->literal_slot());
- int flags = expr->ComputeFlags(true);
-
- Add<HPushArguments>(AddThisFunction(), Add<HConstant>(literal_index),
- Add<HConstant>(constant_properties),
- Add<HConstant>(flags));
-
- Runtime::FunctionId function_id = Runtime::kCreateObjectLiteral;
- literal = Add<HCallRuntime>(Runtime::FunctionForId(function_id), 4);
- }
-
- // The object is expected in the bailout environment during computation
- // of the property values and is the value of the entire expression.
- Push(literal);
- for (int i = 0; i < expr->properties()->length(); i++) {
- ObjectLiteral::Property* property = expr->properties()->at(i);
- if (property->is_computed_name()) return Bailout(kComputedPropertyName);
- if (property->IsCompileTimeValue()) continue;
-
- Literal* key = property->key()->AsLiteral();
- Expression* value = property->value();
-
- switch (property->kind()) {
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- DCHECK(!CompileTimeValue::IsCompileTimeValue(value));
- // Fall through.
- case ObjectLiteral::Property::COMPUTED:
- // It is safe to use [[Put]] here because the boilerplate already
- // contains computed properties with an uninitialized value.
- if (key->IsStringLiteral()) {
- DCHECK(key->IsPropertyName());
- if (property->emit_store()) {
- CHECK_ALIVE(VisitForValue(value));
- HValue* value = Pop();
-
- Handle<Map> map = property->GetReceiverType();
- Handle<String> name = key->AsPropertyName();
- HValue* store;
- FeedbackSlot slot = property->GetSlot();
- if (map.is_null()) {
- // If we don't know the monomorphic type, do a generic store.
- CHECK_ALIVE(store = BuildNamedGeneric(STORE, NULL, slot, literal,
- name, value));
- } else {
- PropertyAccessInfo info(this, STORE, map, name);
- if (info.CanAccessMonomorphic()) {
- HValue* checked_literal = Add<HCheckMaps>(literal, map);
- DCHECK(!info.IsAccessorConstant());
- info.MarkAsInitializingStore();
- store = BuildMonomorphicAccess(
- &info, literal, checked_literal, value,
- BailoutId::None(), BailoutId::None());
- DCHECK_NOT_NULL(store);
- } else {
- CHECK_ALIVE(store = BuildNamedGeneric(STORE, NULL, slot,
- literal, name, value));
- }
- }
- if (store->IsInstruction()) {
- AddInstruction(HInstruction::cast(store));
- }
- DCHECK(store->HasObservableSideEffects());
- Add<HSimulate>(key->id(), REMOVABLE_SIMULATE);
-
- // Add [[HomeObject]] to function literals.
- if (FunctionLiteral::NeedsHomeObject(property->value())) {
- Handle<Symbol> sym = isolate()->factory()->home_object_symbol();
- HInstruction* store_home = BuildNamedGeneric(
- STORE, NULL, property->GetSlot(1), value, sym, literal);
- AddInstruction(store_home);
- DCHECK(store_home->HasObservableSideEffects());
- Add<HSimulate>(property->value()->id(), REMOVABLE_SIMULATE);
- }
- } else {
- CHECK_ALIVE(VisitForEffect(value));
- }
- break;
- }
- // Fall through.
- case ObjectLiteral::Property::PROTOTYPE:
- case ObjectLiteral::Property::SETTER:
- case ObjectLiteral::Property::GETTER:
- return Bailout(kObjectLiteralWithComplexProperty);
- default: UNREACHABLE();
- }
- }
-
- return ast_context()->ReturnValue(Pop());
-}
-
-
-void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
- ZoneList<Expression*>* subexprs = expr->values();
- int length = subexprs->length();
- HInstruction* literal;
-
- Handle<AllocationSite> site;
- Handle<FeedbackVector> vector(environment()->closure()->feedback_vector(),
- isolate());
- Handle<Object> literals_cell(vector->Get(expr->literal_slot()), isolate());
- Handle<JSObject> boilerplate_object;
- if (!literals_cell->IsUndefined(isolate())) {
- DCHECK(literals_cell->IsAllocationSite());
- site = Handle<AllocationSite>::cast(literals_cell);
- boilerplate_object = Handle<JSObject>(
- JSObject::cast(site->transition_info()), isolate());
- }
-
- // Check whether to use fast or slow deep-copying for boilerplate.
- int max_properties = kMaxFastLiteralProperties;
- if (!boilerplate_object.is_null() &&
- IsFastLiteral(boilerplate_object, kMaxFastLiteralDepth,
- &max_properties)) {
- DCHECK(site->SitePointsToLiteral());
- AllocationSiteUsageContext site_context(isolate(), site, false);
- site_context.EnterNewScope();
- literal = BuildFastLiteral(boilerplate_object, &site_context);
- site_context.ExitScope(site, boilerplate_object);
- } else {
- NoObservableSideEffectsScope no_effects(this);
- Handle<ConstantElementsPair> constants =
- expr->GetOrBuildConstantElements(isolate());
- int literal_index = FeedbackVector::GetIndex(expr->literal_slot());
- int flags = expr->ComputeFlags(true);
-
- Add<HPushArguments>(AddThisFunction(), Add<HConstant>(literal_index),
- Add<HConstant>(constants), Add<HConstant>(flags));
-
- Runtime::FunctionId function_id = Runtime::kCreateArrayLiteral;
- literal = Add<HCallRuntime>(Runtime::FunctionForId(function_id), 4);
-
- // Register to deopt if the boilerplate ElementsKind changes.
- if (!site.is_null()) {
- top_info()->dependencies()->AssumeTransitionStable(site);
- }
- }
-
- // The array is expected in the bailout environment during computation
- // of the property values and is the value of the entire expression.
- Push(literal);
-
- HInstruction* elements = NULL;
-
- for (int i = 0; i < length; i++) {
- Expression* subexpr = subexprs->at(i);
- DCHECK(!subexpr->IsSpread());
-
- // If the subexpression is a literal or a simple materialized literal it
- // is already set in the cloned array.
- if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
-
- CHECK_ALIVE(VisitForValue(subexpr));
- HValue* value = Pop();
- if (!Smi::IsValid(i)) return Bailout(kNonSmiKeyInArrayLiteral);
-
- elements = AddLoadElements(literal);
-
- HValue* key = Add<HConstant>(i);
-
- if (!boilerplate_object.is_null()) {
- ElementsKind boilerplate_elements_kind =
- boilerplate_object->GetElementsKind();
- switch (boilerplate_elements_kind) {
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS: {
- Add<HStoreKeyed>(elements, key, value, nullptr,
- boilerplate_elements_kind);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- } else {
- HInstruction* instr = BuildKeyedGeneric(
- STORE, expr, expr->LiteralFeedbackSlot(), literal, key, value);
- AddInstruction(instr);
- }
-
- Add<HSimulate>(expr->GetIdForElement(i));
- }
-
- return ast_context()->ReturnValue(Pop());
-}
-
-
-HCheckMaps* HOptimizedGraphBuilder::AddCheckMap(HValue* object,
- Handle<Map> map) {
- BuildCheckHeapObject(object);
- return Add<HCheckMaps>(object, map);
-}
-
-
-HInstruction* HOptimizedGraphBuilder::BuildLoadNamedField(
- PropertyAccessInfo* info,
- HValue* checked_object) {
- // Check if this is a load of an immutable or constant property.
- if (checked_object->ActualValue()->IsConstant()) {
- Handle<Object> object(
- HConstant::cast(checked_object->ActualValue())->handle(isolate()));
-
- if (object->IsJSObject()) {
- LookupIterator it(object, info->name(),
- LookupIterator::OWN_SKIP_INTERCEPTOR);
- if (it.IsFound()) {
- bool is_reaonly_non_configurable =
- it.IsReadOnly() && !it.IsConfigurable();
- if (is_reaonly_non_configurable ||
- (FLAG_track_constant_fields && info->IsDataConstantField())) {
- Handle<Object> value = JSReceiver::GetDataProperty(&it);
- if (!is_reaonly_non_configurable) {
- DCHECK(!it.is_dictionary_holder());
- // Add dependency on the map that introduced the field.
- Handle<Map> field_owner_map = it.GetFieldOwnerMap();
- top_info()->dependencies()->AssumeFieldOwner(field_owner_map);
- }
- return New<HConstant>(value);
- }
- }
- }
- }
-
- HObjectAccess access = info->access();
- if (access.representation().IsDouble() &&
- (!FLAG_unbox_double_fields || !access.IsInobject())) {
- // Load the heap number.
- checked_object = Add<HLoadNamedField>(
- checked_object, nullptr,
- access.WithRepresentation(Representation::Tagged()));
- // Load the double value from it.
- access = HObjectAccess::ForHeapNumberValue();
- }
-
- SmallMapList* map_list = info->field_maps();
- if (map_list->length() == 0) {
- return New<HLoadNamedField>(checked_object, checked_object, access);
- }
-
- UniqueSet<Map>* maps = new(zone()) UniqueSet<Map>(map_list->length(), zone());
- for (int i = 0; i < map_list->length(); ++i) {
- maps->Add(Unique<Map>::CreateImmovable(map_list->at(i)), zone());
- }
- return New<HLoadNamedField>(
- checked_object, checked_object, access, maps, info->field_type());
-}
-
-HValue* HOptimizedGraphBuilder::BuildStoreNamedField(PropertyAccessInfo* info,
- HValue* checked_object,
- HValue* value) {
- bool transition_to_field = info->IsTransition();
- // TODO(verwaest): Move this logic into PropertyAccessInfo.
- HObjectAccess field_access = info->access();
-
- bool store_to_constant_field = FLAG_track_constant_fields &&
- info->StoreMode() != INITIALIZING_STORE &&
- info->IsDataConstantField();
-
- HStoreNamedField *instr;
- if (field_access.representation().IsDouble() &&
- (!FLAG_unbox_double_fields || !field_access.IsInobject())) {
- HObjectAccess heap_number_access =
- field_access.WithRepresentation(Representation::Tagged());
- if (transition_to_field) {
- // The store requires a mutable HeapNumber to be allocated.
- NoObservableSideEffectsScope no_side_effects(this);
- HInstruction* heap_number_size = Add<HConstant>(HeapNumber::kSize);
-
- // TODO(hpayer): Allocation site pretenuring support.
- HInstruction* heap_number =
- Add<HAllocate>(heap_number_size, HType::HeapObject(), NOT_TENURED,
- MUTABLE_HEAP_NUMBER_TYPE, graph()->GetConstant0());
- AddStoreMapConstant(
- heap_number, isolate()->factory()->mutable_heap_number_map());
- Add<HStoreNamedField>(heap_number, HObjectAccess::ForHeapNumberValue(),
- value);
- instr = New<HStoreNamedField>(checked_object->ActualValue(),
- heap_number_access,
- heap_number);
- } else {
- // Already holds a HeapNumber; load the box and write its value field.
- HInstruction* heap_number =
- Add<HLoadNamedField>(checked_object, nullptr, heap_number_access);
-
- if (store_to_constant_field) {
- // If the field is constant check that the value we are going to store
- // matches current value.
- HInstruction* current_value = Add<HLoadNamedField>(
- heap_number, nullptr, HObjectAccess::ForHeapNumberValue());
- IfBuilder value_checker(this);
- value_checker.IfNot<HCompareNumericAndBranch>(current_value, value,
- Token::EQ);
- value_checker.ThenDeopt(DeoptimizeReason::kValueMismatch);
- value_checker.End();
- return nullptr;
-
- } else {
- instr = New<HStoreNamedField>(heap_number,
- HObjectAccess::ForHeapNumberValue(),
- value, STORE_TO_INITIALIZED_ENTRY);
- }
- }
- } else {
- if (store_to_constant_field) {
- // If the field is constant check that the value we are going to store
- // matches current value.
- HInstruction* current_value = Add<HLoadNamedField>(
- checked_object->ActualValue(), checked_object, field_access);
-
- IfBuilder value_checker(this);
- if (field_access.representation().IsDouble()) {
- value_checker.IfNot<HCompareNumericAndBranch>(current_value, value,
- Token::EQ);
- } else {
- value_checker.IfNot<HCompareObjectEqAndBranch>(current_value, value);
- }
- value_checker.ThenDeopt(DeoptimizeReason::kValueMismatch);
- value_checker.End();
- return nullptr;
-
- } else {
- if (field_access.representation().IsHeapObject()) {
- BuildCheckHeapObject(value);
- }
-
- if (!info->field_maps()->is_empty()) {
- DCHECK(field_access.representation().IsHeapObject());
- value = Add<HCheckMaps>(value, info->field_maps());
- }
-
- // This is a normal store.
- instr = New<HStoreNamedField>(checked_object->ActualValue(), field_access,
- value, info->StoreMode());
- }
- }
-
- if (transition_to_field) {
- Handle<Map> transition(info->transition());
- DCHECK(!transition->is_deprecated());
- instr->SetTransition(Add<HConstant>(transition));
- }
- return instr;
-}
-
-Handle<FieldType>
-HOptimizedGraphBuilder::PropertyAccessInfo::GetFieldTypeFromMap(
- Handle<Map> map) const {
- DCHECK(IsFound());
- DCHECK(number_ < map->NumberOfOwnDescriptors());
- return handle(map->instance_descriptors()->GetFieldType(number_), isolate());
-}
-
-bool HOptimizedGraphBuilder::PropertyAccessInfo::IsCompatible(
- PropertyAccessInfo* info) {
- if (!CanInlinePropertyAccess(map_)) return false;
-
- // Currently only handle AstType::Number as a polymorphic case.
- // TODO(verwaest): Support monomorphic handling of numbers with a HCheckNumber
- // instruction.
- if (IsNumberType()) return false;
-
- // Values are only compatible for monomorphic load if they all behave the same
- // regarding value wrappers.
- if (IsValueWrapped() != info->IsValueWrapped()) return false;
-
- if (!LookupDescriptor()) return false;
-
- if (!IsFound()) {
- return (!info->IsFound() || info->has_holder()) &&
- map()->prototype() == info->map()->prototype();
- }
-
- // Mismatch if the other access info found the property in the prototype
- // chain.
- if (info->has_holder()) return false;
-
- if (IsAccessorConstant()) {
- return accessor_.is_identical_to(info->accessor_) &&
- api_holder_.is_identical_to(info->api_holder_);
- }
-
- if (IsDataConstant()) {
- return constant_.is_identical_to(info->constant_);
- }
-
- DCHECK(IsData());
- if (!info->IsData()) return false;
-
- Representation r = access_.representation();
- if (IsLoad()) {
- if (!info->access_.representation().IsCompatibleForLoad(r)) return false;
- } else {
- if (!info->access_.representation().IsCompatibleForStore(r)) return false;
- }
- if (info->access_.offset() != access_.offset()) return false;
- if (info->access_.IsInobject() != access_.IsInobject()) return false;
- if (IsLoad()) {
- if (field_maps_.is_empty()) {
- info->field_maps_.Clear();
- } else if (!info->field_maps_.is_empty()) {
- for (int i = 0; i < field_maps_.length(); ++i) {
- info->field_maps_.AddMapIfMissing(field_maps_.at(i), info->zone());
- }
- info->field_maps_.Sort();
- }
- } else {
- // We can only merge stores that agree on their field maps. The comparison
- // below is safe, since we keep the field maps sorted.
- if (field_maps_.length() != info->field_maps_.length()) return false;
- for (int i = 0; i < field_maps_.length(); ++i) {
- if (!field_maps_.at(i).is_identical_to(info->field_maps_.at(i))) {
- return false;
- }
- }
- }
- info->GeneralizeRepresentation(r);
- info->field_type_ = info->field_type_.Combine(field_type_);
- return true;
-}
-
-
-bool HOptimizedGraphBuilder::PropertyAccessInfo::LookupDescriptor() {
- if (!map_->IsJSObjectMap()) return true;
- LookupDescriptor(*map_, *name_);
- return LoadResult(map_);
-}
-
-
-bool HOptimizedGraphBuilder::PropertyAccessInfo::LoadResult(Handle<Map> map) {
- if (!IsLoad() && IsProperty() && IsReadOnly()) {
- return false;
- }
-
- if (IsData()) {
- // Construct the object field access.
- int index = GetLocalFieldIndexFromMap(map);
- access_ = HObjectAccess::ForField(map, index, representation(), name_);
-
- // Load field map for heap objects.
- return LoadFieldMaps(map);
- } else if (IsAccessorConstant()) {
- Handle<Object> accessors = GetAccessorsFromMap(map);
- if (!accessors->IsAccessorPair()) return false;
- Object* raw_accessor =
- IsLoad() ? Handle<AccessorPair>::cast(accessors)->getter()
- : Handle<AccessorPair>::cast(accessors)->setter();
- if (!raw_accessor->IsJSFunction() &&
- !raw_accessor->IsFunctionTemplateInfo())
- return false;
- Handle<Object> accessor = handle(HeapObject::cast(raw_accessor));
- CallOptimization call_optimization(accessor);
- if (call_optimization.is_simple_api_call()) {
- CallOptimization::HolderLookup holder_lookup;
- api_holder_ =
- call_optimization.LookupHolderOfExpectedType(map_, &holder_lookup);
- }
- accessor_ = accessor;
- } else if (IsDataConstant()) {
- constant_ = GetConstantFromMap(map);
- }
-
- return true;
-}
-
-
-bool HOptimizedGraphBuilder::PropertyAccessInfo::LoadFieldMaps(
- Handle<Map> map) {
- // Clear any previously collected field maps/type.
- field_maps_.Clear();
- field_type_ = HType::Tagged();
-
- // Figure out the field type from the accessor map.
- Handle<FieldType> field_type = GetFieldTypeFromMap(map);
-
- // Collect the (stable) maps from the field type.
- if (field_type->IsClass()) {
- DCHECK(access_.representation().IsHeapObject());
- Handle<Map> field_map = field_type->AsClass();
- if (field_map->is_stable()) {
- field_maps_.Add(field_map, zone());
- }
- }
-
- if (field_maps_.is_empty()) {
- // Store is not safe if the field map was cleared.
- return IsLoad() || !field_type->IsNone();
- }
-
- // Determine field HType from field type.
- field_type_ = HType::FromFieldType(field_type, zone());
- DCHECK(field_type_.IsHeapObject());
-
- // Add dependency on the map that introduced the field.
- top_info()->dependencies()->AssumeFieldOwner(GetFieldOwnerFromMap(map));
- return true;
-}
-
-
-bool HOptimizedGraphBuilder::PropertyAccessInfo::LookupInPrototypes() {
- Handle<Map> map = this->map();
- if (name_->IsPrivate()) {
- NotFound();
- return !map->has_hidden_prototype();
- }
-
- while (map->prototype()->IsJSObject()) {
- holder_ = handle(JSObject::cast(map->prototype()));
- if (holder_->map()->is_deprecated()) {
- JSObject::TryMigrateInstance(holder_);
- }
- map = Handle<Map>(holder_->map());
- if (!CanInlinePropertyAccess(map)) {
- NotFound();
- return false;
- }
- LookupDescriptor(*map, *name_);
- if (IsFound()) return LoadResult(map);
- }
-
- NotFound();
- return !map->prototype()->IsJSReceiver();
-}
-
-
-bool HOptimizedGraphBuilder::PropertyAccessInfo::IsIntegerIndexedExotic() {
- InstanceType instance_type = map_->instance_type();
- return instance_type == JS_TYPED_ARRAY_TYPE && name_->IsString() &&
- IsSpecialIndex(isolate()->unicode_cache(), String::cast(*name_));
-}
-
-
-bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessMonomorphic() {
- if (!CanInlinePropertyAccess(map_)) return false;
- if (IsJSObjectFieldAccessor()) return IsLoad();
- if (map_->IsJSFunctionMap() && map_->is_constructor() &&
- !map_->has_non_instance_prototype() &&
- name_.is_identical_to(isolate()->factory()->prototype_string())) {
- return IsLoad();
- }
- if (!LookupDescriptor()) return false;
- if (IsFound()) return IsLoad() || !IsReadOnly();
- if (IsIntegerIndexedExotic()) return false;
- if (!LookupInPrototypes()) return false;
- if (IsLoad()) return true;
-
- if (IsAccessorConstant()) return true;
- LookupTransition(*map_, *name_, NONE);
- if (IsTransitionToData() && map_->unused_property_fields() > 0) {
- // Construct the object field access.
- int descriptor = transition()->LastAdded();
- int index =
- transition()->instance_descriptors()->GetFieldIndex(descriptor) -
- map_->GetInObjectProperties();
- PropertyDetails details =
- transition()->instance_descriptors()->GetDetails(descriptor);
- Representation representation = details.representation();
- access_ = HObjectAccess::ForField(map_, index, representation, name_);
-
- // Load field map for heap objects.
- return LoadFieldMaps(transition());
- }
- return false;
-}
-
-
-bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessAsMonomorphic(
- SmallMapList* maps) {
- DCHECK(map_.is_identical_to(maps->first()));
- if (!CanAccessMonomorphic()) return false;
- STATIC_ASSERT(kMaxLoadPolymorphism == kMaxStorePolymorphism);
- if (maps->length() > kMaxLoadPolymorphism) return false;
- HObjectAccess access = HObjectAccess::ForMap(); // bogus default
- if (GetJSObjectFieldAccess(&access)) {
- for (int i = 1; i < maps->length(); ++i) {
- PropertyAccessInfo test_info(builder_, access_type_, maps->at(i), name_);
- HObjectAccess test_access = HObjectAccess::ForMap(); // bogus default
- if (!test_info.GetJSObjectFieldAccess(&test_access)) return false;
- if (!access.Equals(test_access)) return false;
- }
- return true;
- }
-
- // Currently only handle numbers as a polymorphic case.
- // TODO(verwaest): Support monomorphic handling of numbers with a HCheckNumber
- // instruction.
- if (IsNumberType()) return false;
-
- // Multiple maps cannot transition to the same target map.
- DCHECK(!IsLoad() || !IsTransition());
- if (IsTransition() && maps->length() > 1) return false;
-
- for (int i = 1; i < maps->length(); ++i) {
- PropertyAccessInfo test_info(builder_, access_type_, maps->at(i), name_);
- if (!test_info.IsCompatible(this)) return false;
- }
-
- return true;
-}
-
-
-Handle<Map> HOptimizedGraphBuilder::PropertyAccessInfo::map() {
- Handle<JSFunction> ctor;
- if (Map::GetConstructorFunction(
- map_, handle(current_info()->closure()->context()->native_context()))
- .ToHandle(&ctor)) {
- return handle(ctor->initial_map());
- }
- return map_;
-}
-
-
-static bool NeedsWrapping(Handle<Map> map, Handle<JSFunction> target) {
- return !map->IsJSObjectMap() &&
- is_sloppy(target->shared()->language_mode()) &&
- !target->shared()->native();
-}
-
-
-bool HOptimizedGraphBuilder::PropertyAccessInfo::NeedsWrappingFor(
- Handle<JSFunction> target) const {
- return NeedsWrapping(map_, target);
-}
-
-
-HValue* HOptimizedGraphBuilder::BuildMonomorphicAccess(
- PropertyAccessInfo* info, HValue* object, HValue* checked_object,
- HValue* value, BailoutId ast_id, BailoutId return_id,
- bool can_inline_accessor) {
- HObjectAccess access = HObjectAccess::ForMap(); // bogus default
- if (info->GetJSObjectFieldAccess(&access)) {
- DCHECK(info->IsLoad());
- return New<HLoadNamedField>(object, checked_object, access);
- }
-
- if (info->name().is_identical_to(isolate()->factory()->prototype_string()) &&
- info->map()->IsJSFunctionMap() && info->map()->is_constructor()) {
- DCHECK(!info->map()->has_non_instance_prototype());
- return New<HLoadFunctionPrototype>(checked_object);
- }
-
- HValue* checked_holder = checked_object;
- if (info->has_holder()) {
- Handle<JSObject> prototype(JSObject::cast(info->map()->prototype()));
- checked_holder = BuildCheckPrototypeMaps(prototype, info->holder());
- }
-
- if (!info->IsFound()) {
- DCHECK(info->IsLoad());
- return graph()->GetConstantUndefined();
- }
-
- if (info->IsData()) {
- if (info->IsLoad()) {
- return BuildLoadNamedField(info, checked_holder);
- } else {
- return BuildStoreNamedField(info, checked_object, value);
- }
- }
-
- if (info->IsTransition()) {
- DCHECK(!info->IsLoad());
- return BuildStoreNamedField(info, checked_object, value);
- }
-
- if (info->IsAccessorConstant()) {
- MaybeHandle<Name> maybe_name =
- FunctionTemplateInfo::TryGetCachedPropertyName(isolate(),
- info->accessor());
- if (!maybe_name.is_null()) {
- Handle<Name> name = maybe_name.ToHandleChecked();
- PropertyAccessInfo cache_info(this, LOAD, info->map(), name);
- // Load new target.
- if (cache_info.CanAccessMonomorphic()) {
- return BuildLoadNamedField(&cache_info, checked_object);
- }
- }
-
- Push(checked_object);
- int argument_count = 1;
- if (!info->IsLoad()) {
- argument_count = 2;
- Push(value);
- }
-
- if (info->accessor()->IsJSFunction() &&
- info->NeedsWrappingFor(Handle<JSFunction>::cast(info->accessor()))) {
- HValue* function = Add<HConstant>(info->accessor());
- PushArgumentsFromEnvironment(argument_count);
- return NewCallFunction(function, argument_count, TailCallMode::kDisallow,
- ConvertReceiverMode::kNotNullOrUndefined,
- TailCallMode::kDisallow);
- } else if (FLAG_inline_accessors && can_inline_accessor) {
- bool success = info->IsLoad()
- ? TryInlineGetter(info->accessor(), info->map(), ast_id, return_id)
- : TryInlineSetter(
- info->accessor(), info->map(), ast_id, return_id, value);
- if (success || HasStackOverflow()) return NULL;
- }
-
- PushArgumentsFromEnvironment(argument_count);
- if (!info->accessor()->IsJSFunction()) {
- Bailout(kInliningBailedOut);
- return nullptr;
- }
- return NewCallConstantFunction(Handle<JSFunction>::cast(info->accessor()),
- argument_count, TailCallMode::kDisallow,
- TailCallMode::kDisallow);
- }
-
- DCHECK(info->IsDataConstant());
- if (info->IsLoad()) {
- return New<HConstant>(info->constant());
- } else {
- return New<HCheckValue>(value, Handle<JSFunction>::cast(info->constant()));
- }
-}
-
-void HOptimizedGraphBuilder::HandlePolymorphicNamedFieldAccess(
- PropertyAccessType access_type, Expression* expr, FeedbackSlot slot,
- BailoutId ast_id, BailoutId return_id, HValue* object, HValue* value,
- SmallMapList* maps, Handle<Name> name) {
- // Something did not match; must use a polymorphic load.
- int count = 0;
- HBasicBlock* join = NULL;
- HBasicBlock* number_block = NULL;
- bool handled_string = false;
-
- bool handle_smi = false;
- STATIC_ASSERT(kMaxLoadPolymorphism == kMaxStorePolymorphism);
- int i;
- for (i = 0; i < maps->length() && count < kMaxLoadPolymorphism; ++i) {
- PropertyAccessInfo info(this, access_type, maps->at(i), name);
- if (info.IsStringType()) {
- if (handled_string) continue;
- handled_string = true;
- }
- if (info.CanAccessMonomorphic()) {
- count++;
- if (info.IsNumberType()) {
- handle_smi = true;
- break;
- }
- }
- }
-
- if (i < maps->length()) {
- count = -1;
- maps->Clear();
- } else {
- count = 0;
- }
- HControlInstruction* smi_check = NULL;
- handled_string = false;
-
- for (i = 0; i < maps->length() && count < kMaxLoadPolymorphism; ++i) {
- PropertyAccessInfo info(this, access_type, maps->at(i), name);
- if (info.IsStringType()) {
- if (handled_string) continue;
- handled_string = true;
- }
- if (!info.CanAccessMonomorphic()) continue;
-
- if (count == 0) {
- join = graph()->CreateBasicBlock();
- if (handle_smi) {
- HBasicBlock* empty_smi_block = graph()->CreateBasicBlock();
- HBasicBlock* not_smi_block = graph()->CreateBasicBlock();
- number_block = graph()->CreateBasicBlock();
- smi_check = New<HIsSmiAndBranch>(
- object, empty_smi_block, not_smi_block);
- FinishCurrentBlock(smi_check);
- GotoNoSimulate(empty_smi_block, number_block);
- set_current_block(not_smi_block);
- } else {
- BuildCheckHeapObject(object);
- }
- }
- ++count;
- HBasicBlock* if_true = graph()->CreateBasicBlock();
- HBasicBlock* if_false = graph()->CreateBasicBlock();
- HUnaryControlInstruction* compare;
-
- HValue* dependency;
- if (info.IsNumberType()) {
- Handle<Map> heap_number_map = isolate()->factory()->heap_number_map();
- compare = New<HCompareMap>(object, heap_number_map, if_true, if_false);
- dependency = smi_check;
- } else if (info.IsStringType()) {
- compare = New<HIsStringAndBranch>(object, if_true, if_false);
- dependency = compare;
- } else {
- compare = New<HCompareMap>(object, info.map(), if_true, if_false);
- dependency = compare;
- }
- FinishCurrentBlock(compare);
-
- if (info.IsNumberType()) {
- GotoNoSimulate(if_true, number_block);
- if_true = number_block;
- }
-
- set_current_block(if_true);
-
- HValue* access =
- BuildMonomorphicAccess(&info, object, dependency, value, ast_id,
- return_id, FLAG_polymorphic_inlining);
-
- HValue* result = NULL;
- switch (access_type) {
- case LOAD:
- result = access;
- break;
- case STORE:
- result = value;
- break;
- }
-
- if (access == NULL) {
- if (HasStackOverflow()) return;
- } else {
- if (access->IsInstruction()) {
- HInstruction* instr = HInstruction::cast(access);
- if (!instr->IsLinked()) AddInstruction(instr);
- }
- if (!ast_context()->IsEffect()) Push(result);
- }
-
- if (current_block() != NULL) Goto(join);
- set_current_block(if_false);
- }
-
- // Finish up. Unconditionally deoptimize if we've handled all the maps we
- // know about and do not want to handle ones we've never seen. Otherwise
- // use a generic IC.
- if (count == maps->length() && FLAG_deoptimize_uncommon_cases) {
- FinishExitWithHardDeoptimization(
- DeoptimizeReason::kUnknownMapInPolymorphicAccess);
- } else {
- HInstruction* instr =
- BuildNamedGeneric(access_type, expr, slot, object, name, value);
- AddInstruction(instr);
- if (!ast_context()->IsEffect()) Push(access_type == LOAD ? instr : value);
-
- if (join != NULL) {
- Goto(join);
- } else {
- Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
- if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
- return;
- }
- }
-
- DCHECK(join != NULL);
- if (join->HasPredecessor()) {
- join->SetJoinId(ast_id);
- set_current_block(join);
- if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
- } else {
- set_current_block(NULL);
- }
-}
-
-static bool ComputeReceiverTypes(Expression* expr, HValue* receiver,
- SmallMapList** t,
- HOptimizedGraphBuilder* builder) {
- Zone* zone = builder->zone();
- SmallMapList* maps = expr->GetReceiverTypes();
- *t = maps;
- bool monomorphic = expr->IsMonomorphic();
- if (maps != nullptr && receiver->HasMonomorphicJSObjectType()) {
- if (maps->length() > 0) {
- Map* root_map = receiver->GetMonomorphicJSObjectMap()->FindRootMap();
- maps->FilterForPossibleTransitions(root_map);
- monomorphic = maps->length() == 1;
- } else {
- // No type feedback, see if we can infer the type. This is safely
- // possible if the receiver had a known map at some point, and no
- // map-changing stores have happened to it since.
- Handle<Map> candidate_map = receiver->GetMonomorphicJSObjectMap();
- for (HInstruction* current = builder->current_block()->last();
- current != nullptr; current = current->previous()) {
- if (current->IsBlockEntry()) break;
- if (current->CheckChangesFlag(kMaps)) {
- // Only allow map changes that store the candidate map. We don't
- // need to care which object the map is being written into.
- if (!current->IsStoreNamedField()) break;
- HStoreNamedField* map_change = HStoreNamedField::cast(current);
- if (!map_change->value()->IsConstant()) break;
- HConstant* map_constant = HConstant::cast(map_change->value());
- if (!map_constant->representation().IsTagged()) break;
- Handle<Object> map = map_constant->handle(builder->isolate());
- if (!map.is_identical_to(candidate_map)) break;
- }
- if (current == receiver) {
- // We made it all the way back to the receiver without encountering
- // a map change! So we can assume that the receiver still has the
- // candidate_map we know about.
- maps->Add(candidate_map, zone);
- monomorphic = true;
- break;
- }
- }
- }
- }
- return monomorphic && CanInlinePropertyAccess(maps->first());
-}
-
-
-static bool AreStringTypes(SmallMapList* maps) {
- for (int i = 0; i < maps->length(); i++) {
- if (maps->at(i)->instance_type() >= FIRST_NONSTRING_TYPE) return false;
- }
- return true;
-}
-
-void HOptimizedGraphBuilder::BuildStore(Expression* expr, Property* prop,
- FeedbackSlot slot, BailoutId ast_id,
- BailoutId return_id,
- bool is_uninitialized) {
- if (!prop->key()->IsPropertyName()) {
- // Keyed store.
- HValue* value = Pop();
- HValue* key = Pop();
- HValue* object = Pop();
- bool has_side_effects = false;
- HValue* result =
- HandleKeyedElementAccess(object, key, value, expr, slot, ast_id,
- return_id, STORE, &has_side_effects);
- if (has_side_effects) {
- if (!ast_context()->IsEffect()) Push(value);
- Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
- if (!ast_context()->IsEffect()) Drop(1);
- }
- if (result == NULL) return;
- return ast_context()->ReturnValue(value);
- }
-
- // Named store.
- HValue* value = Pop();
- HValue* object = Pop();
-
- Literal* key = prop->key()->AsLiteral();
- Handle<String> name = Handle<String>::cast(key->value());
- DCHECK(!name.is_null());
-
- HValue* access = BuildNamedAccess(STORE, ast_id, return_id, expr, slot,
- object, name, value, is_uninitialized);
- if (access == NULL) return;
-
- if (!ast_context()->IsEffect()) Push(value);
- if (access->IsInstruction()) AddInstruction(HInstruction::cast(access));
- if (access->HasObservableSideEffects()) {
- Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
- }
- if (!ast_context()->IsEffect()) Drop(1);
- return ast_context()->ReturnValue(value);
-}
-
-
-void HOptimizedGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
- Property* prop = expr->target()->AsProperty();
- DCHECK(prop != NULL);
- CHECK_ALIVE(VisitForValue(prop->obj()));
- if (!prop->key()->IsPropertyName()) {
- CHECK_ALIVE(VisitForValue(prop->key()));
- }
- CHECK_ALIVE(VisitForValue(expr->value()));
- BuildStore(expr, prop, expr->AssignmentSlot(), expr->id(),
- expr->AssignmentId(), expr->IsUninitialized());
-}
-
-HInstruction* HOptimizedGraphBuilder::InlineGlobalPropertyStore(
- LookupIterator* it, HValue* value, BailoutId ast_id) {
- Handle<PropertyCell> cell = it->GetPropertyCell();
- top_info()->dependencies()->AssumePropertyCell(cell);
- auto cell_type = it->property_details().cell_type();
- if (cell_type == PropertyCellType::kConstant ||
- cell_type == PropertyCellType::kUndefined) {
- Handle<Object> constant(cell->value(), isolate());
- if (value->IsConstant()) {
- HConstant* c_value = HConstant::cast(value);
- if (!constant.is_identical_to(c_value->handle(isolate()))) {
- Add<HDeoptimize>(DeoptimizeReason::kConstantGlobalVariableAssignment,
- Deoptimizer::EAGER);
- }
- } else {
- HValue* c_constant = Add<HConstant>(constant);
- IfBuilder builder(this);
- if (constant->IsNumber()) {
- builder.If<HCompareNumericAndBranch>(value, c_constant, Token::EQ);
- } else {
- builder.If<HCompareObjectEqAndBranch>(value, c_constant);
- }
- builder.Then();
- builder.Else();
- Add<HDeoptimize>(DeoptimizeReason::kConstantGlobalVariableAssignment,
- Deoptimizer::EAGER);
- builder.End();
- }
- }
- HConstant* cell_constant = Add<HConstant>(cell);
- auto access = HObjectAccess::ForPropertyCellValue();
- if (cell_type == PropertyCellType::kConstantType) {
- switch (cell->GetConstantType()) {
- case PropertyCellConstantType::kSmi:
- access = access.WithRepresentation(Representation::Smi());
- break;
- case PropertyCellConstantType::kStableMap: {
- // First check that the previous value of the {cell} still has the
- // map that we are about to check the new {value} for. If not, then
- // the stable map assumption was invalidated and we cannot continue
- // with the optimized code.
- Handle<HeapObject> cell_value(HeapObject::cast(cell->value()));
- Handle<Map> cell_value_map(cell_value->map());
- if (!cell_value_map->is_stable()) {
- Bailout(kUnstableConstantTypeHeapObject);
- return nullptr;
- }
- top_info()->dependencies()->AssumeMapStable(cell_value_map);
- // Now check that the new {value} is a HeapObject with the same map
- Add<HCheckHeapObject>(value);
- value = Add<HCheckMaps>(value, cell_value_map);
- access = access.WithRepresentation(Representation::HeapObject());
- break;
- }
- }
- }
- HInstruction* instr = New<HStoreNamedField>(cell_constant, access, value);
- instr->ClearChangesFlag(kInobjectFields);
- instr->SetChangesFlag(kGlobalVars);
- return instr;
-}
-
-// Because not every expression has a position and there is not common
-// superclass of Assignment and CountOperation, we cannot just pass the
-// owning expression instead of position and ast_id separately.
-void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(Variable* var,
- HValue* value,
- FeedbackSlot slot,
- BailoutId ast_id) {
- Handle<JSGlobalObject> global(current_info()->global_object());
-
- // Lookup in script contexts.
- {
- Handle<ScriptContextTable> script_contexts(
- global->native_context()->script_context_table());
- ScriptContextTable::LookupResult lookup;
- if (ScriptContextTable::Lookup(script_contexts, var->name(), &lookup)) {
- if (lookup.mode == CONST) {
- return Bailout(kNonInitializerAssignmentToConst);
- }
- Handle<Context> script_context =
- ScriptContextTable::GetContext(script_contexts, lookup.context_index);
-
- Handle<Object> current_value =
- FixedArray::get(*script_context, lookup.slot_index, isolate());
-
- // If the values is not the hole, it will stay initialized,
- // so no need to generate a check.
- if (current_value->IsTheHole(isolate())) {
- return Bailout(kReferenceToUninitializedVariable);
- }
-
- HStoreNamedField* instr = Add<HStoreNamedField>(
- Add<HConstant>(script_context),
- HObjectAccess::ForContextSlot(lookup.slot_index), value);
- USE(instr);
- DCHECK(instr->HasObservableSideEffects());
- Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
- return;
- }
- }
-
- LookupIterator it(global, var->name(), LookupIterator::OWN);
- if (CanInlineGlobalPropertyAccess(var, &it, STORE)) {
- HInstruction* instr = InlineGlobalPropertyStore(&it, value, ast_id);
- if (!instr) return;
- AddInstruction(instr);
- if (instr->HasObservableSideEffects()) {
- Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
- }
- } else {
- HValue* global_object = Add<HLoadNamedField>(
- BuildGetNativeContext(), nullptr,
- HObjectAccess::ForContextSlot(Context::EXTENSION_INDEX));
- Handle<FeedbackVector> vector =
- handle(current_feedback_vector(), isolate());
- HValue* name = Add<HConstant>(var->name());
- HValue* vector_value = Add<HConstant>(vector);
- HValue* slot_value = Add<HConstant>(vector->GetIndex(slot));
- DCHECK(vector->IsStoreGlobalIC(slot));
- DCHECK_EQ(vector->GetLanguageMode(slot), function_language_mode());
- Callable callable = CodeFactory::StoreGlobalICInOptimizedCode(
- isolate(), function_language_mode());
- HValue* stub = Add<HConstant>(callable.code());
- HValue* values[] = {global_object, name, value, slot_value, vector_value};
- HCallWithDescriptor* instr =
- Add<HCallWithDescriptor>(Code::STORE_GLOBAL_IC, stub, 0,
- callable.descriptor(), ArrayVector(values));
- USE(instr);
- DCHECK(instr->HasObservableSideEffects());
- Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
- }
-}
-
-
-void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
- Expression* target = expr->target();
- VariableProxy* proxy = target->AsVariableProxy();
- Property* prop = target->AsProperty();
- DCHECK(proxy == NULL || prop == NULL);
-
- // We have a second position recorded in the FullCodeGenerator to have
- // type feedback for the binary operation.
- BinaryOperation* operation = expr->binary_operation();
-
- if (proxy != NULL) {
- Variable* var = proxy->var();
- if (var->mode() == LET) {
- return Bailout(kUnsupportedLetCompoundAssignment);
- }
-
- CHECK_ALIVE(VisitForValue(operation));
-
- switch (var->location()) {
- case VariableLocation::UNALLOCATED:
- HandleGlobalVariableAssignment(var, Top(), expr->AssignmentSlot(),
- expr->AssignmentId());
- break;
-
- case VariableLocation::PARAMETER:
- case VariableLocation::LOCAL:
- if (var->mode() == CONST) {
- return Bailout(kNonInitializerAssignmentToConst);
- }
- BindIfLive(var, Top());
- break;
-
- case VariableLocation::CONTEXT: {
- // Bail out if we try to mutate a parameter value in a function
- // using the arguments object. We do not (yet) correctly handle the
- // arguments property of the function.
- if (current_info()->scope()->arguments() != NULL) {
- // Parameters will be allocated to context slots. We have no
- // direct way to detect that the variable is a parameter so we do
- // a linear search of the parameter variables.
- int count = current_info()->scope()->num_parameters();
- for (int i = 0; i < count; ++i) {
- if (var == current_info()->scope()->parameter(i)) {
- Bailout(kAssignmentToParameterFunctionUsesArgumentsObject);
- }
- }
- }
-
- HStoreContextSlot::Mode mode;
-
- switch (var->mode()) {
- case LET:
- mode = HStoreContextSlot::kCheckDeoptimize;
- break;
- case CONST:
- if (var->throw_on_const_assignment(function_language_mode())) {
- return Bailout(kNonInitializerAssignmentToConst);
- } else {
- return ast_context()->ReturnValue(Pop());
- }
- default:
- mode = HStoreContextSlot::kNoCheck;
- }
-
- HValue* context = BuildContextChainWalk(var);
- HStoreContextSlot* instr = Add<HStoreContextSlot>(
- context, var->index(), mode, Top());
- if (instr->HasObservableSideEffects()) {
- Add<HSimulate>(expr->AssignmentId(), REMOVABLE_SIMULATE);
- }
- break;
- }
-
- case VariableLocation::LOOKUP:
- return Bailout(kCompoundAssignmentToLookupSlot);
-
- case VariableLocation::MODULE:
- UNREACHABLE();
- }
- return ast_context()->ReturnValue(Pop());
-
- } else if (prop != NULL) {
- CHECK_ALIVE(VisitForValue(prop->obj()));
- HValue* object = Top();
- HValue* key = NULL;
- if (!prop->key()->IsPropertyName() || prop->IsStringAccess()) {
- CHECK_ALIVE(VisitForValue(prop->key()));
- key = Top();
- }
-
- CHECK_ALIVE(PushLoad(prop, object, key));
-
- CHECK_ALIVE(VisitForValue(expr->value()));
- HValue* right = Pop();
- HValue* left = Pop();
-
- Push(BuildBinaryOperation(operation, left, right, PUSH_BEFORE_SIMULATE));
-
- BuildStore(expr, prop, expr->AssignmentSlot(), expr->id(),
- expr->AssignmentId(), expr->IsUninitialized());
- } else {
- return Bailout(kInvalidLhsInCompoundAssignment);
- }
-}
-
-
-void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
-
- VariableProxy* proxy = expr->target()->AsVariableProxy();
- Property* prop = expr->target()->AsProperty();
- DCHECK(proxy == NULL || prop == NULL);
-
- if (expr->is_compound()) {
- HandleCompoundAssignment(expr);
- return;
- }
-
- if (prop != NULL) {
- HandlePropertyAssignment(expr);
- } else if (proxy != NULL) {
- Variable* var = proxy->var();
-
- if (var->mode() == CONST) {
- if (expr->op() != Token::INIT) {
- if (var->throw_on_const_assignment(function_language_mode())) {
- return Bailout(kNonInitializerAssignmentToConst);
- } else {
- CHECK_ALIVE(VisitForValue(expr->value()));
- return ast_context()->ReturnValue(Pop());
- }
- }
- }
-
- // Handle the assignment.
- switch (var->location()) {
- case VariableLocation::UNALLOCATED:
- CHECK_ALIVE(VisitForValue(expr->value()));
- HandleGlobalVariableAssignment(var, Top(), expr->AssignmentSlot(),
- expr->AssignmentId());
- return ast_context()->ReturnValue(Pop());
-
- case VariableLocation::PARAMETER:
- case VariableLocation::LOCAL: {
- // Perform an initialization check for let declared variables
- // or parameters.
- if (var->mode() == LET && expr->op() == Token::ASSIGN) {
- HValue* env_value = environment()->Lookup(var);
- if (env_value == graph()->GetConstantHole()) {
- return Bailout(kAssignmentToLetVariableBeforeInitialization);
- }
- }
- // We do not allow the arguments object to occur in a context where it
- // may escape, but assignments to stack-allocated locals are
- // permitted.
- CHECK_ALIVE(VisitForValue(expr->value(), ARGUMENTS_ALLOWED));
- HValue* value = Pop();
- BindIfLive(var, value);
- return ast_context()->ReturnValue(value);
- }
-
- case VariableLocation::CONTEXT: {
- // Bail out if we try to mutate a parameter value in a function using
- // the arguments object. We do not (yet) correctly handle the
- // arguments property of the function.
- if (current_info()->scope()->arguments() != NULL) {
- // Parameters will rewrite to context slots. We have no direct way
- // to detect that the variable is a parameter.
- int count = current_info()->scope()->num_parameters();
- for (int i = 0; i < count; ++i) {
- if (var == current_info()->scope()->parameter(i)) {
- return Bailout(kAssignmentToParameterInArgumentsObject);
- }
- }
- }
-
- CHECK_ALIVE(VisitForValue(expr->value()));
- HStoreContextSlot::Mode mode;
- if (expr->op() == Token::ASSIGN) {
- switch (var->mode()) {
- case LET:
- mode = HStoreContextSlot::kCheckDeoptimize;
- break;
- case CONST:
- // If we reached this point, the only possibility
- // is a sloppy assignment to a function name.
- DCHECK(function_language_mode() == SLOPPY &&
- !var->throw_on_const_assignment(SLOPPY));
- return ast_context()->ReturnValue(Pop());
- default:
- mode = HStoreContextSlot::kNoCheck;
- }
- } else {
- DCHECK_EQ(Token::INIT, expr->op());
- mode = HStoreContextSlot::kNoCheck;
- }
-
- HValue* context = BuildContextChainWalk(var);
- HStoreContextSlot* instr = Add<HStoreContextSlot>(
- context, var->index(), mode, Top());
- if (instr->HasObservableSideEffects()) {
- Add<HSimulate>(expr->AssignmentId(), REMOVABLE_SIMULATE);
- }
- return ast_context()->ReturnValue(Pop());
- }
-
- case VariableLocation::LOOKUP:
- return Bailout(kAssignmentToLOOKUPVariable);
-
- case VariableLocation::MODULE:
- UNREACHABLE();
- }
- } else {
- return Bailout(kInvalidLeftHandSideInAssignment);
- }
-}
-
-void HOptimizedGraphBuilder::VisitSuspend(Suspend* expr) {
- // Generators are not optimized, so we should never get here.
- UNREACHABLE();
-}
-
-
-void HOptimizedGraphBuilder::VisitThrow(Throw* expr) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
- if (!ast_context()->IsEffect()) {
- // The parser turns invalid left-hand sides in assignments into throw
- // statements, which may not be in effect contexts. We might still try
- // to optimize such functions; bail out now if we do.
- return Bailout(kInvalidLeftHandSideInAssignment);
- }
- CHECK_ALIVE(VisitForValue(expr->exception()));
-
- HValue* value = environment()->Pop();
- if (!is_tracking_positions()) SetSourcePosition(expr->position());
- Add<HPushArguments>(value);
- Add<HCallRuntime>(Runtime::FunctionForId(Runtime::kThrow), 1);
- Add<HSimulate>(expr->id());
-
- // If the throw definitely exits the function, we can finish with a dummy
- // control flow at this point. This is not the case if the throw is inside
- // an inlined function which may be replaced.
- if (call_context() == NULL) {
- FinishExitCurrentBlock(New<HAbnormalExit>());
- }
-}
-
-
-HInstruction* HGraphBuilder::AddLoadStringInstanceType(HValue* string) {
- if (string->IsConstant()) {
- HConstant* c_string = HConstant::cast(string);
- if (c_string->HasStringValue()) {
- return Add<HConstant>(c_string->StringValue()->map()->instance_type());
- }
- }
- return Add<HLoadNamedField>(
- Add<HLoadNamedField>(string, nullptr, HObjectAccess::ForMap()), nullptr,
- HObjectAccess::ForMapInstanceType());
-}
-
-
-HInstruction* HGraphBuilder::AddLoadStringLength(HValue* string) {
- return AddInstruction(BuildLoadStringLength(string));
-}
-
-
-HInstruction* HGraphBuilder::BuildLoadStringLength(HValue* string) {
- if (string->IsConstant()) {
- HConstant* c_string = HConstant::cast(string);
- if (c_string->HasStringValue()) {
- return New<HConstant>(c_string->StringValue()->length());
- }
- }
- return New<HLoadNamedField>(string, nullptr,
- HObjectAccess::ForStringLength());
-}
-
-HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
- PropertyAccessType access_type, Expression* expr, FeedbackSlot slot,
- HValue* object, Handle<Name> name, HValue* value, bool is_uninitialized) {
- if (is_uninitialized) {
- Add<HDeoptimize>(
- DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess,
- Deoptimizer::SOFT);
- }
- Handle<FeedbackVector> vector(current_feedback_vector(), isolate());
-
- HValue* key = Add<HConstant>(name);
- HValue* vector_value = Add<HConstant>(vector);
- HValue* slot_value = Add<HConstant>(vector->GetIndex(slot));
-
- if (access_type == LOAD) {
- HValue* values[] = {object, key, slot_value, vector_value};
- if (!expr->AsProperty()->key()->IsPropertyName()) {
- DCHECK(vector->IsKeyedLoadIC(slot));
- // It's possible that a keyed load of a constant string was converted
- // to a named load. Here, at the last minute, we need to make sure to
- // use a generic Keyed Load if we are using the type vector, because
- // it has to share information with full code.
- Callable callable = CodeFactory::KeyedLoadICInOptimizedCode(isolate());
- HValue* stub = Add<HConstant>(callable.code());
- HCallWithDescriptor* result =
- New<HCallWithDescriptor>(Code::KEYED_LOAD_IC, stub, 0,
- callable.descriptor(), ArrayVector(values));
- return result;
- }
- DCHECK(vector->IsLoadIC(slot));
- Callable callable = CodeFactory::LoadICInOptimizedCode(isolate());
- HValue* stub = Add<HConstant>(callable.code());
- HCallWithDescriptor* result = New<HCallWithDescriptor>(
- Code::LOAD_IC, stub, 0, callable.descriptor(), ArrayVector(values));
- return result;
-
- } else {
- HValue* values[] = {object, key, value, slot_value, vector_value};
- if (vector->IsKeyedStoreIC(slot)) {
- // It's possible that a keyed store of a constant string was converted
- // to a named store. Here, at the last minute, we need to make sure to
- // use a generic Keyed Store if we are using the type vector, because
- // it has to share information with full code.
- DCHECK_EQ(vector->GetLanguageMode(slot), function_language_mode());
- Callable callable = CodeFactory::KeyedStoreICInOptimizedCode(
- isolate(), function_language_mode());
- HValue* stub = Add<HConstant>(callable.code());
- HCallWithDescriptor* result =
- New<HCallWithDescriptor>(Code::KEYED_STORE_IC, stub, 0,
- callable.descriptor(), ArrayVector(values));
- return result;
- }
- HCallWithDescriptor* result;
- if (vector->IsStoreOwnIC(slot)) {
- Callable callable = CodeFactory::StoreOwnICInOptimizedCode(isolate());
- HValue* stub = Add<HConstant>(callable.code());
- result = New<HCallWithDescriptor>(
- Code::STORE_IC, stub, 0, callable.descriptor(), ArrayVector(values));
- } else {
- DCHECK(vector->IsStoreIC(slot));
- DCHECK_EQ(vector->GetLanguageMode(slot), function_language_mode());
- Callable callable = CodeFactory::StoreICInOptimizedCode(
- isolate(), function_language_mode());
- HValue* stub = Add<HConstant>(callable.code());
- result = New<HCallWithDescriptor>(
- Code::STORE_IC, stub, 0, callable.descriptor(), ArrayVector(values));
- }
- return result;
- }
-}
-
-HInstruction* HOptimizedGraphBuilder::BuildKeyedGeneric(
- PropertyAccessType access_type, Expression* expr, FeedbackSlot slot,
- HValue* object, HValue* key, HValue* value) {
- Handle<FeedbackVector> vector(current_feedback_vector(), isolate());
- HValue* vector_value = Add<HConstant>(vector);
- HValue* slot_value = Add<HConstant>(vector->GetIndex(slot));
-
- if (access_type == LOAD) {
- HValue* values[] = {object, key, slot_value, vector_value};
-
- Callable callable = CodeFactory::KeyedLoadICInOptimizedCode(isolate());
- HValue* stub = Add<HConstant>(callable.code());
- HCallWithDescriptor* result =
- New<HCallWithDescriptor>(Code::KEYED_LOAD_IC, stub, 0,
- callable.descriptor(), ArrayVector(values));
- return result;
- } else {
- HValue* values[] = {object, key, value, slot_value, vector_value};
-
- Callable callable = CodeFactory::KeyedStoreICInOptimizedCode(
- isolate(), function_language_mode());
- HValue* stub = Add<HConstant>(callable.code());
- HCallWithDescriptor* result =
- New<HCallWithDescriptor>(Code::KEYED_STORE_IC, stub, 0,
- callable.descriptor(), ArrayVector(values));
- return result;
- }
-}
-
-
-LoadKeyedHoleMode HOptimizedGraphBuilder::BuildKeyedHoleMode(Handle<Map> map) {
- // Loads from a "stock" fast holey double arrays can elide the hole check.
- // Loads from a "stock" fast holey array can convert the hole to undefined
- // with impunity.
- LoadKeyedHoleMode load_mode = NEVER_RETURN_HOLE;
- bool holey_double_elements =
- *map == isolate()->get_initial_js_array_map(FAST_HOLEY_DOUBLE_ELEMENTS);
- bool holey_elements =
- *map == isolate()->get_initial_js_array_map(FAST_HOLEY_ELEMENTS);
- if ((holey_double_elements || holey_elements) &&
- isolate()->IsFastArrayConstructorPrototypeChainIntact()) {
- load_mode =
- holey_double_elements ? ALLOW_RETURN_HOLE : CONVERT_HOLE_TO_UNDEFINED;
-
- Handle<JSObject> prototype(JSObject::cast(map->prototype()), isolate());
- Handle<JSObject> object_prototype = isolate()->initial_object_prototype();
- BuildCheckPrototypeMaps(prototype, object_prototype);
- graph()->MarkDependsOnEmptyArrayProtoElements();
- }
- return load_mode;
-}
-
-
-HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
- HValue* object,
- HValue* key,
- HValue* val,
- HValue* dependency,
- Handle<Map> map,
- PropertyAccessType access_type,
- KeyedAccessStoreMode store_mode) {
- HCheckMaps* checked_object = Add<HCheckMaps>(object, map, dependency);
-
- if (access_type == STORE && map->prototype()->IsJSObject()) {
- // monomorphic stores need a prototype chain check because shape
- // changes could allow callbacks on elements in the chain that
- // aren't compatible with monomorphic keyed stores.
- PrototypeIterator iter(map);
- JSObject* holder = NULL;
- while (!iter.IsAtEnd()) {
- // JSProxies can't occur here because we wouldn't have installed a
- // non-generic IC if there were any.
- holder = *PrototypeIterator::GetCurrent<JSObject>(iter);
- iter.Advance();
- }
- DCHECK(holder && holder->IsJSObject());
-
- BuildCheckPrototypeMaps(handle(JSObject::cast(map->prototype())),
- Handle<JSObject>(holder));
- }
-
- LoadKeyedHoleMode load_mode = BuildKeyedHoleMode(map);
- return BuildUncheckedMonomorphicElementAccess(
- checked_object, key, val,
- map->instance_type() == JS_ARRAY_TYPE,
- map->elements_kind(), access_type,
- load_mode, store_mode);
-}
-
-
-static bool CanInlineElementAccess(Handle<Map> map) {
- return map->IsJSObjectMap() &&
- (map->has_fast_elements() || map->has_fixed_typed_array_elements()) &&
- !map->has_indexed_interceptor() && !map->is_access_check_needed();
-}
-
-
-HInstruction* HOptimizedGraphBuilder::TryBuildConsolidatedElementLoad(
- HValue* object,
- HValue* key,
- HValue* val,
- SmallMapList* maps) {
- // For polymorphic loads of similar elements kinds (i.e. all tagged or all
- // double), always use the "worst case" code without a transition. This is
- // much faster than transitioning the elements to the worst case, trading a
- // HTransitionElements for a HCheckMaps, and avoiding mutation of the array.
- bool has_double_maps = false;
- bool has_smi_or_object_maps = false;
- bool has_js_array_access = false;
- bool has_non_js_array_access = false;
- bool has_seen_holey_elements = false;
- Handle<Map> most_general_consolidated_map;
- for (int i = 0; i < maps->length(); ++i) {
- Handle<Map> map = maps->at(i);
- if (!CanInlineElementAccess(map)) return NULL;
- // Don't allow mixing of JSArrays with JSObjects.
- if (map->instance_type() == JS_ARRAY_TYPE) {
- if (has_non_js_array_access) return NULL;
- has_js_array_access = true;
- } else if (has_js_array_access) {
- return NULL;
- } else {
- has_non_js_array_access = true;
- }
- // Don't allow mixed, incompatible elements kinds.
- if (map->has_fast_double_elements()) {
- if (has_smi_or_object_maps) return NULL;
- has_double_maps = true;
- } else if (map->has_fast_smi_or_object_elements()) {
- if (has_double_maps) return NULL;
- has_smi_or_object_maps = true;
- } else {
- return NULL;
- }
- // Remember if we've ever seen holey elements.
- if (IsHoleyElementsKind(map->elements_kind())) {
- has_seen_holey_elements = true;
- }
- // Remember the most general elements kind, the code for its load will
- // properly handle all of the more specific cases.
- if ((i == 0) || IsMoreGeneralElementsKindTransition(
- most_general_consolidated_map->elements_kind(),
- map->elements_kind())) {
- most_general_consolidated_map = map;
- }
- }
- if (!has_double_maps && !has_smi_or_object_maps) return NULL;
-
- HCheckMaps* checked_object = Add<HCheckMaps>(object, maps);
- // FAST_ELEMENTS is considered more general than FAST_HOLEY_SMI_ELEMENTS.
- // If we've seen both, the consolidated load must use FAST_HOLEY_ELEMENTS.
- ElementsKind consolidated_elements_kind = has_seen_holey_elements
- ? GetHoleyElementsKind(most_general_consolidated_map->elements_kind())
- : most_general_consolidated_map->elements_kind();
- LoadKeyedHoleMode load_mode = NEVER_RETURN_HOLE;
- if (has_seen_holey_elements) {
- // Make sure that all of the maps we are handling have the initial array
- // prototype.
- bool saw_non_array_prototype = false;
- for (int i = 0; i < maps->length(); ++i) {
- Handle<Map> map = maps->at(i);
- if (map->prototype() != *isolate()->initial_array_prototype()) {
- // We can't guarantee that loading the hole is safe. The prototype may
- // have an element at this position.
- saw_non_array_prototype = true;
- break;
- }
- }
-
- if (!saw_non_array_prototype) {
- Handle<Map> holey_map = handle(
- isolate()->get_initial_js_array_map(consolidated_elements_kind));
- load_mode = BuildKeyedHoleMode(holey_map);
- if (load_mode != NEVER_RETURN_HOLE) {
- for (int i = 0; i < maps->length(); ++i) {
- Handle<Map> map = maps->at(i);
- // The prototype check was already done for the holey map in
- // BuildKeyedHoleMode.
- if (!map.is_identical_to(holey_map)) {
- Handle<JSObject> prototype(JSObject::cast(map->prototype()),
- isolate());
- Handle<JSObject> object_prototype =
- isolate()->initial_object_prototype();
- BuildCheckPrototypeMaps(prototype, object_prototype);
- }
- }
- }
- }
- }
- HInstruction* instr = BuildUncheckedMonomorphicElementAccess(
- checked_object, key, val,
- most_general_consolidated_map->instance_type() == JS_ARRAY_TYPE,
- consolidated_elements_kind, LOAD, load_mode, STANDARD_STORE);
- return instr;
-}
-
-HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
- Expression* expr, FeedbackSlot slot, HValue* object, HValue* key,
- HValue* val, SmallMapList* maps, PropertyAccessType access_type,
- KeyedAccessStoreMode store_mode, bool* has_side_effects) {
- *has_side_effects = false;
- BuildCheckHeapObject(object);
-
- if (access_type == LOAD) {
- HInstruction* consolidated_load =
- TryBuildConsolidatedElementLoad(object, key, val, maps);
- if (consolidated_load != NULL) {
- *has_side_effects |= consolidated_load->HasObservableSideEffects();
- return consolidated_load;
- }
- }
-
- // Elements_kind transition support.
- MapHandles transition_target;
- transition_target.reserve(maps->length());
- // Collect possible transition targets.
- MapHandles possible_transitioned_maps;
- possible_transitioned_maps.reserve(maps->length());
- for (int i = 0; i < maps->length(); ++i) {
- Handle<Map> map = maps->at(i);
- // Loads from strings or loads with a mix of string and non-string maps
- // shouldn't be handled polymorphically.
- DCHECK(access_type != LOAD || !map->IsStringMap());
- ElementsKind elements_kind = map->elements_kind();
- if (CanInlineElementAccess(map) && IsFastElementsKind(elements_kind) &&
- elements_kind != GetInitialFastElementsKind()) {
- possible_transitioned_maps.push_back(map);
- }
- if (IsSloppyArgumentsElementsKind(elements_kind)) {
- HInstruction* result =
- BuildKeyedGeneric(access_type, expr, slot, object, key, val);
- *has_side_effects = result->HasObservableSideEffects();
- return AddInstruction(result);
- }
- }
- // Get transition target for each map (NULL == no transition).
- for (int i = 0; i < maps->length(); ++i) {
- Handle<Map> map = maps->at(i);
- // Don't generate elements kind transitions from stable maps.
- Map* transitioned_map =
- map->is_stable()
- ? nullptr
- : map->FindElementsKindTransitionedMap(possible_transitioned_maps);
- if (transitioned_map != nullptr) {
- transition_target.push_back(handle(transitioned_map));
- } else {
- transition_target.push_back(Handle<Map>());
- }
- }
-
- MapHandles untransitionable_maps;
- untransitionable_maps.reserve(maps->length());
- HTransitionElementsKind* transition = NULL;
- for (int i = 0; i < maps->length(); ++i) {
- Handle<Map> map = maps->at(i);
- DCHECK(map->IsMap());
- if (!transition_target.at(i).is_null()) {
- DCHECK(Map::IsValidElementsTransition(
- map->elements_kind(),
- transition_target.at(i)->elements_kind()));
- transition = Add<HTransitionElementsKind>(object, map,
- transition_target.at(i));
- } else {
- untransitionable_maps.push_back(map);
- }
- }
-
- // If only one map is left after transitioning, handle this case
- // monomorphically.
- DCHECK(untransitionable_maps.size() >= 1);
- if (untransitionable_maps.size() == 1) {
- Handle<Map> untransitionable_map = untransitionable_maps[0];
- HInstruction* instr = NULL;
- if (!CanInlineElementAccess(untransitionable_map)) {
- instr = AddInstruction(
- BuildKeyedGeneric(access_type, expr, slot, object, key, val));
- } else {
- instr = BuildMonomorphicElementAccess(
- object, key, val, transition, untransitionable_map, access_type,
- store_mode);
- }
- *has_side_effects |= instr->HasObservableSideEffects();
- return access_type == STORE ? val : instr;
- }
-
- HBasicBlock* join = graph()->CreateBasicBlock();
-
- for (Handle<Map> map : untransitionable_maps) {
- ElementsKind elements_kind = map->elements_kind();
- HBasicBlock* this_map = graph()->CreateBasicBlock();
- HBasicBlock* other_map = graph()->CreateBasicBlock();
- HCompareMap* mapcompare =
- New<HCompareMap>(object, map, this_map, other_map);
- FinishCurrentBlock(mapcompare);
-
- set_current_block(this_map);
- HInstruction* access = NULL;
- if (!CanInlineElementAccess(map)) {
- access = AddInstruction(
- BuildKeyedGeneric(access_type, expr, slot, object, key, val));
- } else {
- DCHECK(IsFastElementsKind(elements_kind) ||
- IsFixedTypedArrayElementsKind(elements_kind));
- LoadKeyedHoleMode load_mode = BuildKeyedHoleMode(map);
- // Happily, mapcompare is a checked object.
- access = BuildUncheckedMonomorphicElementAccess(
- mapcompare, key, val,
- map->instance_type() == JS_ARRAY_TYPE,
- elements_kind, access_type,
- load_mode,
- store_mode);
- }
- *has_side_effects |= access->HasObservableSideEffects();
- // The caller will use has_side_effects and add a correct Simulate.
- access->SetFlag(HValue::kHasNoObservableSideEffects);
- if (access_type == LOAD) {
- Push(access);
- }
- NoObservableSideEffectsScope scope(this);
- GotoNoSimulate(join);
- set_current_block(other_map);
- }
-
- // Ensure that we visited at least one map above that goes to join. This is
- // necessary because FinishExitWithHardDeoptimization does an AbnormalExit
- // rather than joining the join block. If this becomes an issue, insert a
- // generic access in the case length() == 0.
- DCHECK(join->predecessors()->length() > 0);
- // Deopt if none of the cases matched.
- NoObservableSideEffectsScope scope(this);
- FinishExitWithHardDeoptimization(
- DeoptimizeReason::kUnknownMapInPolymorphicElementAccess);
- set_current_block(join);
- return access_type == STORE ? val : Pop();
-}
-
-HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
- HValue* obj, HValue* key, HValue* val, Expression* expr, FeedbackSlot slot,
- BailoutId ast_id, BailoutId return_id, PropertyAccessType access_type,
- bool* has_side_effects) {
- // A keyed name access with type feedback may contain the name.
- Handle<FeedbackVector> vector = handle(current_feedback_vector(), isolate());
- HValue* expected_key = key;
- if (!key->ActualValue()->IsConstant()) {
- Name* name = nullptr;
- if (access_type == LOAD) {
- KeyedLoadICNexus nexus(vector, slot);
- name = nexus.FindFirstName();
- } else {
- KeyedStoreICNexus nexus(vector, slot);
- name = nexus.FindFirstName();
- }
- if (name != nullptr) {
- Handle<Name> handle_name(name);
- expected_key = Add<HConstant>(handle_name);
- // We need a check against the key.
- bool in_new_space = isolate()->heap()->InNewSpace(*handle_name);
- Unique<Name> unique_name = Unique<Name>::CreateUninitialized(handle_name);
- Add<HCheckValue>(key, unique_name, in_new_space);
- }
- }
- if (expected_key->ActualValue()->IsConstant()) {
- Handle<Object> constant =
- HConstant::cast(expected_key->ActualValue())->handle(isolate());
- uint32_t array_index;
- if ((constant->IsString() &&
- !Handle<String>::cast(constant)->AsArrayIndex(&array_index)) ||
- constant->IsSymbol()) {
- if (!constant->IsUniqueName()) {
- constant = isolate()->factory()->InternalizeString(
- Handle<String>::cast(constant));
- }
- HValue* access =
- BuildNamedAccess(access_type, ast_id, return_id, expr, slot, obj,
- Handle<Name>::cast(constant), val, false);
- if (access == NULL || access->IsPhi() ||
- HInstruction::cast(access)->IsLinked()) {
- *has_side_effects = false;
- } else {
- HInstruction* instr = HInstruction::cast(access);
- AddInstruction(instr);
- *has_side_effects = instr->HasObservableSideEffects();
- }
- return access;
- }
- }
-
- DCHECK(!expr->IsPropertyName());
- HInstruction* instr = NULL;
-
- SmallMapList* maps;
- bool monomorphic = ComputeReceiverTypes(expr, obj, &maps, this);
-
- bool force_generic = false;
- if (expr->GetKeyType() == PROPERTY) {
- // Non-Generic accesses assume that elements are being accessed, and will
- // deopt for non-index keys, which the IC knows will occur.
- // TODO(jkummerow): Consider adding proper support for property accesses.
- force_generic = true;
- monomorphic = false;
- } else if (access_type == STORE &&
- (monomorphic || (maps != NULL && !maps->is_empty()))) {
- // Stores can't be mono/polymorphic if their prototype chain has dictionary
- // elements. However a receiver map that has dictionary elements itself
- // should be left to normal mono/poly behavior (the other maps may benefit
- // from highly optimized stores).
- for (int i = 0; i < maps->length(); i++) {
- Handle<Map> current_map = maps->at(i);
- if (current_map->DictionaryElementsInPrototypeChainOnly()) {
- force_generic = true;
- monomorphic = false;
- break;
- }
- }
- } else if (access_type == LOAD && !monomorphic &&
- (maps != NULL && !maps->is_empty())) {
- // Polymorphic loads have to go generic if any of the maps are strings.
- // If some, but not all of the maps are strings, we should go generic
- // because polymorphic access wants to key on ElementsKind and isn't
- // compatible with strings.
- for (int i = 0; i < maps->length(); i++) {
- Handle<Map> current_map = maps->at(i);
- if (current_map->IsStringMap()) {
- force_generic = true;
- break;
- }
- }
- }
-
- if (monomorphic) {
- Handle<Map> map = maps->first();
- if (!CanInlineElementAccess(map)) {
- instr = AddInstruction(
- BuildKeyedGeneric(access_type, expr, slot, obj, key, val));
- } else {
- BuildCheckHeapObject(obj);
- instr = BuildMonomorphicElementAccess(
- obj, key, val, NULL, map, access_type, expr->GetStoreMode());
- }
- } else if (!force_generic && (maps != NULL && !maps->is_empty())) {
- return HandlePolymorphicElementAccess(expr, slot, obj, key, val, maps,
- access_type, expr->GetStoreMode(),
- has_side_effects);
- } else {
- if (access_type == STORE) {
- if (expr->IsAssignment() &&
- expr->AsAssignment()->HasNoTypeInformation()) {
- Add<HDeoptimize>(
- DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess,
- Deoptimizer::SOFT);
- }
- } else {
- if (expr->AsProperty()->HasNoTypeInformation()) {
- Add<HDeoptimize>(
- DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess,
- Deoptimizer::SOFT);
- }
- }
- instr = AddInstruction(
- BuildKeyedGeneric(access_type, expr, slot, obj, key, val));
- }
- *has_side_effects = instr->HasObservableSideEffects();
- return instr;
-}
-
-
-void HOptimizedGraphBuilder::EnsureArgumentsArePushedForAccess() {
- // Outermost function already has arguments on the stack.
- if (function_state()->outer() == NULL) return;
-
- if (function_state()->arguments_pushed()) return;
-
- // Push arguments when entering inlined function.
- HEnterInlined* entry = function_state()->entry();
- entry->set_arguments_pushed();
-
- HArgumentsObject* arguments = entry->arguments_object();
- const ZoneList<HValue*>* arguments_values = arguments->arguments_values();
-
- HInstruction* insert_after = entry;
- for (int i = 0; i < arguments_values->length(); i++) {
- HValue* argument = arguments_values->at(i);
- HInstruction* push_argument = New<HPushArguments>(argument);
- push_argument->InsertAfter(insert_after);
- insert_after = push_argument;
- }
-
- HArgumentsElements* arguments_elements = New<HArgumentsElements>(true);
- arguments_elements->ClearFlag(HValue::kUseGVN);
- arguments_elements->InsertAfter(insert_after);
- function_state()->set_arguments_elements(arguments_elements);
-}
-
-bool HOptimizedGraphBuilder::IsAnyParameterContextAllocated() {
- int count = current_info()->scope()->num_parameters();
- for (int i = 0; i < count; ++i) {
- if (current_info()->scope()->parameter(i)->location() ==
- VariableLocation::CONTEXT) {
- return true;
- }
- }
- return false;
-}
-
-bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
- VariableProxy* proxy = expr->obj()->AsVariableProxy();
- if (proxy == NULL) return false;
- if (!proxy->var()->IsStackAllocated()) return false;
- if (!environment()->Lookup(proxy->var())->CheckFlag(HValue::kIsArguments)) {
- return false;
- }
-
- HInstruction* result = NULL;
- if (expr->key()->IsPropertyName()) {
- Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
- if (!String::Equals(name, isolate()->factory()->length_string())) {
- return false;
- }
-
- // Make sure we visit the arguments object so that the liveness analysis
- // still records the access.
- CHECK_ALIVE_OR_RETURN(VisitForValue(expr->obj(), ARGUMENTS_ALLOWED), true);
- Drop(1);
-
- if (function_state()->outer() == NULL) {
- HInstruction* elements = Add<HArgumentsElements>(false);
- result = New<HArgumentsLength>(elements);
- } else {
- // Number of arguments without receiver.
- int argument_count = environment()->
- arguments_environment()->parameter_count() - 1;
- result = New<HConstant>(argument_count);
- }
- } else {
- // We need to take into account the KEYED_LOAD_IC feedback to guard the
- // HBoundsCheck instructions below.
- if (!expr->IsMonomorphic() && !expr->IsUninitialized()) return false;
- if (IsAnyParameterContextAllocated()) return false;
- CHECK_ALIVE_OR_RETURN(VisitForValue(expr->obj(), ARGUMENTS_ALLOWED), true);
- CHECK_ALIVE_OR_RETURN(VisitForValue(expr->key()), true);
- HValue* key = Pop();
- Drop(1); // Arguments object.
- if (function_state()->outer() == NULL) {
- HInstruction* elements = Add<HArgumentsElements>(false);
- HInstruction* length = Add<HArgumentsLength>(elements);
- HInstruction* checked_key = Add<HBoundsCheck>(key, length);
- result = New<HAccessArgumentsAt>(elements, length, checked_key);
- } else {
- EnsureArgumentsArePushedForAccess();
-
- // Number of arguments without receiver.
- HInstruction* elements = function_state()->arguments_elements();
- int argument_count = environment()->
- arguments_environment()->parameter_count() - 1;
- HInstruction* length = Add<HConstant>(argument_count);
- HInstruction* checked_key = Add<HBoundsCheck>(key, length);
- result = New<HAccessArgumentsAt>(elements, length, checked_key);
- }
- }
- ast_context()->ReturnInstruction(result, expr->id());
- return true;
-}
-
-HValue* HOptimizedGraphBuilder::BuildNamedAccess(
- PropertyAccessType access, BailoutId ast_id, BailoutId return_id,
- Expression* expr, FeedbackSlot slot, HValue* object, Handle<Name> name,
- HValue* value, bool is_uninitialized) {
- SmallMapList* maps;
- ComputeReceiverTypes(expr, object, &maps, this);
- DCHECK(maps != NULL);
-
- // Check for special case: Access via a single map to the global proxy
- // can also be handled monomorphically.
- if (maps->length() > 0) {
- Handle<Object> map_constructor =
- handle(maps->first()->GetConstructor(), isolate());
- if (map_constructor->IsJSFunction()) {
- Handle<Context> map_context =
- handle(Handle<JSFunction>::cast(map_constructor)->context());
- Handle<Context> current_context(current_info()->context());
- bool is_same_context_global_proxy_access =
- maps->length() == 1 && // >1 map => fallback to polymorphic
- maps->first()->IsJSGlobalProxyMap() &&
- (*map_context == *current_context);
- if (is_same_context_global_proxy_access) {
- Handle<JSGlobalObject> global_object(current_info()->global_object());
- LookupIterator it(global_object, name, LookupIterator::OWN);
- if (CanInlineGlobalPropertyAccess(&it, access)) {
- BuildCheckHeapObject(object);
- Add<HCheckMaps>(object, maps);
- if (access == LOAD) {
- InlineGlobalPropertyLoad(&it, expr->id());
- return nullptr;
- } else {
- return InlineGlobalPropertyStore(&it, value, expr->id());
- }
- }
- }
- }
-
- PropertyAccessInfo info(this, access, maps->first(), name);
- if (!info.CanAccessAsMonomorphic(maps)) {
- HandlePolymorphicNamedFieldAccess(access, expr, slot, ast_id, return_id,
- object, value, maps, name);
- return NULL;
- }
-
- HValue* checked_object;
- // AstType::Number() is only supported by polymorphic load/call handling.
- DCHECK(!info.IsNumberType());
- BuildCheckHeapObject(object);
- if (AreStringTypes(maps)) {
- checked_object =
- Add<HCheckInstanceType>(object, HCheckInstanceType::IS_STRING);
- } else {
- checked_object = Add<HCheckMaps>(object, maps);
- }
- return BuildMonomorphicAccess(
- &info, object, checked_object, value, ast_id, return_id);
- }
-
- return BuildNamedGeneric(access, expr, slot, object, name, value,
- is_uninitialized);
-}
-
-
-void HOptimizedGraphBuilder::PushLoad(Property* expr,
- HValue* object,
- HValue* key) {
- ValueContext for_value(this, ARGUMENTS_NOT_ALLOWED);
- Push(object);
- if (key != NULL) Push(key);
- BuildLoad(expr, expr->LoadId());
-}
-
-
-void HOptimizedGraphBuilder::BuildLoad(Property* expr,
- BailoutId ast_id) {
- HInstruction* instr = NULL;
- if (expr->IsStringAccess() && expr->GetKeyType() == ELEMENT) {
- HValue* index = Pop();
- HValue* string = Pop();
- HInstruction* char_code = BuildStringCharCodeAt(string, index);
- AddInstruction(char_code);
- if (char_code->IsConstant()) {
- HConstant* c_code = HConstant::cast(char_code);
- if (c_code->HasNumberValue() && std::isnan(c_code->DoubleValue())) {
- Add<HDeoptimize>(DeoptimizeReason::kOutOfBounds, Deoptimizer::EAGER);
- }
- }
- instr = NewUncasted<HStringCharFromCode>(char_code);
-
- } else if (expr->key()->IsPropertyName()) {
- Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
- HValue* object = Pop();
-
- HValue* value = BuildNamedAccess(LOAD, ast_id, expr->LoadId(), expr,
- expr->PropertyFeedbackSlot(), object, name,
- NULL, expr->IsUninitialized());
- if (value == NULL) return;
- if (value->IsPhi()) return ast_context()->ReturnValue(value);
- instr = HInstruction::cast(value);
- if (instr->IsLinked()) return ast_context()->ReturnValue(instr);
-
- } else {
- HValue* key = Pop();
- HValue* obj = Pop();
-
- bool has_side_effects = false;
- HValue* load = HandleKeyedElementAccess(
- obj, key, NULL, expr, expr->PropertyFeedbackSlot(), ast_id,
- expr->LoadId(), LOAD, &has_side_effects);
- if (has_side_effects) {
- if (ast_context()->IsEffect()) {
- Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
- } else {
- Push(load);
- Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
- Drop(1);
- }
- }
- if (load == NULL) return;
- return ast_context()->ReturnValue(load);
- }
- return ast_context()->ReturnInstruction(instr, ast_id);
-}
-
-
-void HOptimizedGraphBuilder::VisitProperty(Property* expr) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
-
- if (TryArgumentsAccess(expr)) return;
-
- CHECK_ALIVE(VisitForValue(expr->obj()));
- if (!expr->key()->IsPropertyName() || expr->IsStringAccess()) {
- CHECK_ALIVE(VisitForValue(expr->key()));
- }
-
- BuildLoad(expr, expr->id());
-}
-
-HInstruction* HGraphBuilder::BuildConstantMapCheck(Handle<JSObject> constant,
- bool ensure_no_elements) {
- HCheckMaps* check = Add<HCheckMaps>(
- Add<HConstant>(constant), handle(constant->map()));
- check->ClearDependsOnFlag(kElementsKind);
- if (ensure_no_elements) {
- // TODO(ishell): remove this once we support NO_ELEMENTS elements kind.
- HValue* elements = AddLoadElements(check, nullptr);
- HValue* empty_elements =
- Add<HConstant>(isolate()->factory()->empty_fixed_array());
- IfBuilder if_empty(this);
- if_empty.IfNot<HCompareObjectEqAndBranch>(elements, empty_elements);
- if_empty.ThenDeopt(DeoptimizeReason::kWrongMap);
- if_empty.End();
- }
- return check;
-}
-
-HInstruction* HGraphBuilder::BuildCheckPrototypeMaps(Handle<JSObject> prototype,
- Handle<JSObject> holder,
- bool ensure_no_elements) {
- PrototypeIterator iter(isolate(), prototype, kStartAtReceiver);
- while (holder.is_null() ||
- !PrototypeIterator::GetCurrent(iter).is_identical_to(holder)) {
- BuildConstantMapCheck(PrototypeIterator::GetCurrent<JSObject>(iter),
- ensure_no_elements);
- iter.Advance();
- if (iter.IsAtEnd()) {
- return NULL;
- }
- }
- return BuildConstantMapCheck(holder);
-}
-
-
-void HOptimizedGraphBuilder::AddCheckPrototypeMaps(Handle<JSObject> holder,
- Handle<Map> receiver_map) {
- if (!holder.is_null()) {
- Handle<JSObject> prototype(JSObject::cast(receiver_map->prototype()));
- BuildCheckPrototypeMaps(prototype, holder);
- }
-}
-
-void HOptimizedGraphBuilder::BuildEnsureCallable(HValue* object) {
- NoObservableSideEffectsScope scope(this);
- const Runtime::Function* throw_called_non_callable =
- Runtime::FunctionForId(Runtime::kThrowCalledNonCallable);
-
- IfBuilder is_not_function(this);
- HValue* smi_check = is_not_function.If<HIsSmiAndBranch>(object);
- is_not_function.Or();
- HValue* map = AddLoadMap(object, smi_check);
- HValue* bit_field =
- Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField());
- HValue* bit_field_masked = AddUncasted<HBitwise>(
- Token::BIT_AND, bit_field, Add<HConstant>(1 << Map::kIsCallable));
- is_not_function.IfNot<HCompareNumericAndBranch>(
- bit_field_masked, Add<HConstant>(1 << Map::kIsCallable), Token::EQ);
- is_not_function.Then();
- {
- Add<HPushArguments>(object);
- Add<HCallRuntime>(throw_called_non_callable, 1);
- }
- is_not_function.End();
-}
-
-HInstruction* HOptimizedGraphBuilder::NewCallFunction(
- HValue* function, int argument_count, TailCallMode syntactic_tail_call_mode,
- ConvertReceiverMode convert_mode, TailCallMode tail_call_mode) {
- if (syntactic_tail_call_mode == TailCallMode::kAllow) {
- BuildEnsureCallable(function);
- } else {
- DCHECK_EQ(TailCallMode::kDisallow, tail_call_mode);
- }
- HValue* arity = Add<HConstant>(argument_count - 1);
-
- HValue* op_vals[] = {function, arity};
-
- Callable callable =
- CodeFactory::Call(isolate(), convert_mode, tail_call_mode);
- HConstant* stub = Add<HConstant>(callable.code());
-
- return New<HCallWithDescriptor>(stub, argument_count, callable.descriptor(),
- ArrayVector(op_vals),
- syntactic_tail_call_mode);
-}
-
-HInstruction* HOptimizedGraphBuilder::NewCallFunctionViaIC(
- HValue* function, int argument_count, TailCallMode syntactic_tail_call_mode,
- ConvertReceiverMode convert_mode, TailCallMode tail_call_mode,
- FeedbackSlot slot) {
- if (syntactic_tail_call_mode == TailCallMode::kAllow) {
- BuildEnsureCallable(function);
- } else {
- DCHECK_EQ(TailCallMode::kDisallow, tail_call_mode);
- }
- int arity = argument_count - 1;
- Handle<FeedbackVector> vector(current_feedback_vector(), isolate());
- HValue* arity_val = Add<HConstant>(arity);
- HValue* index_val = Add<HConstant>(vector->GetIndex(slot));
- HValue* vector_val = Add<HConstant>(vector);
-
- HValue* op_vals[] = {function, arity_val, index_val, vector_val};
- Callable callable =
- CodeFactory::CallIC(isolate(), convert_mode, tail_call_mode);
- HConstant* stub = Add<HConstant>(callable.code());
-
- return New<HCallWithDescriptor>(stub, argument_count, callable.descriptor(),
- ArrayVector(op_vals),
- syntactic_tail_call_mode);
-}
-
-HInstruction* HOptimizedGraphBuilder::NewCallConstantFunction(
- Handle<JSFunction> function, int argument_count,
- TailCallMode syntactic_tail_call_mode, TailCallMode tail_call_mode) {
- HValue* target = Add<HConstant>(function);
- return New<HInvokeFunction>(target, function, argument_count,
- syntactic_tail_call_mode, tail_call_mode);
-}
-
-
-class FunctionSorter {
- public:
- explicit FunctionSorter(int index = 0, int ticks = 0, int size = 0)
- : index_(index), ticks_(ticks), size_(size) {}
-
- int index() const { return index_; }
- int ticks() const { return ticks_; }
- int size() const { return size_; }
-
- private:
- int index_;
- int ticks_;
- int size_;
-};
-
-
-inline bool operator<(const FunctionSorter& lhs, const FunctionSorter& rhs) {
- int diff = lhs.ticks() - rhs.ticks();
- if (diff != 0) return diff > 0;
- return lhs.size() < rhs.size();
-}
-
-
-void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
- HValue* receiver,
- SmallMapList* maps,
- Handle<String> name) {
- int argument_count = expr->arguments()->length() + 1; // Includes receiver.
- FunctionSorter order[kMaxCallPolymorphism];
-
- bool handle_smi = false;
- bool handled_string = false;
- int ordered_functions = 0;
-
- TailCallMode syntactic_tail_call_mode = expr->tail_call_mode();
- TailCallMode tail_call_mode =
- function_state()->ComputeTailCallMode(syntactic_tail_call_mode);
-
- int i;
- for (i = 0; i < maps->length() && ordered_functions < kMaxCallPolymorphism;
- ++i) {
- PropertyAccessInfo info(this, LOAD, maps->at(i), name);
- if (info.CanAccessMonomorphic() && info.IsDataConstant() &&
- info.constant()->IsJSFunction()) {
- if (info.IsStringType()) {
- if (handled_string) continue;
- handled_string = true;
- }
- Handle<JSFunction> target = Handle<JSFunction>::cast(info.constant());
- if (info.IsNumberType()) {
- handle_smi = true;
- }
- expr->set_target(target);
- order[ordered_functions++] = FunctionSorter(
- i, target->shared()->profiler_ticks(), InliningAstSize(target));
- }
- }
-
- std::sort(order, order + ordered_functions);
-
- if (i < maps->length()) {
- maps->Clear();
- ordered_functions = -1;
- }
-
- HBasicBlock* number_block = NULL;
- HBasicBlock* join = NULL;
- handled_string = false;
- int count = 0;
-
- for (int fn = 0; fn < ordered_functions; ++fn) {
- int i = order[fn].index();
- PropertyAccessInfo info(this, LOAD, maps->at(i), name);
- if (info.IsStringType()) {
- if (handled_string) continue;
- handled_string = true;
- }
- // Reloads the target.
- info.CanAccessMonomorphic();
- Handle<JSFunction> target = Handle<JSFunction>::cast(info.constant());
-
- expr->set_target(target);
- if (count == 0) {
- // Only needed once.
- join = graph()->CreateBasicBlock();
- if (handle_smi) {
- HBasicBlock* empty_smi_block = graph()->CreateBasicBlock();
- HBasicBlock* not_smi_block = graph()->CreateBasicBlock();
- number_block = graph()->CreateBasicBlock();
- FinishCurrentBlock(New<HIsSmiAndBranch>(
- receiver, empty_smi_block, not_smi_block));
- GotoNoSimulate(empty_smi_block, number_block);
- set_current_block(not_smi_block);
- } else {
- BuildCheckHeapObject(receiver);
- }
- }
- ++count;
- HBasicBlock* if_true = graph()->CreateBasicBlock();
- HBasicBlock* if_false = graph()->CreateBasicBlock();
- HUnaryControlInstruction* compare;
-
- Handle<Map> map = info.map();
- if (info.IsNumberType()) {
- Handle<Map> heap_number_map = isolate()->factory()->heap_number_map();
- compare = New<HCompareMap>(receiver, heap_number_map, if_true, if_false);
- } else if (info.IsStringType()) {
- compare = New<HIsStringAndBranch>(receiver, if_true, if_false);
- } else {
- compare = New<HCompareMap>(receiver, map, if_true, if_false);
- }
- FinishCurrentBlock(compare);
-
- if (info.IsNumberType()) {
- GotoNoSimulate(if_true, number_block);
- if_true = number_block;
- }
-
- set_current_block(if_true);
-
- AddCheckPrototypeMaps(info.holder(), map);
-
- HValue* function = Add<HConstant>(expr->target());
- environment()->SetExpressionStackAt(0, function);
- Push(receiver);
- CHECK_ALIVE(VisitExpressions(expr->arguments()));
- bool needs_wrapping = info.NeedsWrappingFor(target);
- bool try_inline = FLAG_polymorphic_inlining && !needs_wrapping;
- if (FLAG_trace_inlining && try_inline) {
- Handle<JSFunction> caller = current_info()->closure();
- std::unique_ptr<char[]> caller_name =
- caller->shared()->DebugName()->ToCString();
- PrintF("Trying to inline the polymorphic call to %s from %s\n",
- name->ToCString().get(),
- caller_name.get());
- }
- if (try_inline && TryInlineCall(expr)) {
- // Trying to inline will signal that we should bailout from the
- // entire compilation by setting stack overflow on the visitor.
- if (HasStackOverflow()) return;
- } else {
- // Since HWrapReceiver currently cannot actually wrap numbers and strings,
- // use the regular call builtin for method calls to wrap the receiver.
- // TODO(verwaest): Support creation of value wrappers directly in
- // HWrapReceiver.
- HInstruction* call =
- needs_wrapping
- ? NewCallFunction(
- function, argument_count, syntactic_tail_call_mode,
- ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode)
- : NewCallConstantFunction(target, argument_count,
- syntactic_tail_call_mode,
- tail_call_mode);
- PushArgumentsFromEnvironment(argument_count);
- AddInstruction(call);
- Drop(1); // Drop the function.
- if (!ast_context()->IsEffect()) Push(call);
- }
-
- if (current_block() != NULL) Goto(join);
- set_current_block(if_false);
- }
-
- // Finish up. Unconditionally deoptimize if we've handled all the maps we
- // know about and do not want to handle ones we've never seen. Otherwise
- // use a generic IC.
- if (ordered_functions == maps->length() && FLAG_deoptimize_uncommon_cases) {
- FinishExitWithHardDeoptimization(
- DeoptimizeReason::kUnknownMapInPolymorphicCall);
- } else {
- Property* prop = expr->expression()->AsProperty();
- HInstruction* function =
- BuildNamedGeneric(LOAD, prop, prop->PropertyFeedbackSlot(), receiver,
- name, NULL, prop->IsUninitialized());
- AddInstruction(function);
- Push(function);
- AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
-
- environment()->SetExpressionStackAt(1, function);
- environment()->SetExpressionStackAt(0, receiver);
- CHECK_ALIVE(VisitExpressions(expr->arguments()));
-
- HInstruction* call = NewCallFunction(
- function, argument_count, syntactic_tail_call_mode,
- ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode);
-
- PushArgumentsFromEnvironment(argument_count);
-
- Drop(1); // Function.
-
- if (join != NULL) {
- AddInstruction(call);
- if (!ast_context()->IsEffect()) Push(call);
- Goto(join);
- } else {
- return ast_context()->ReturnInstruction(call, expr->id());
- }
- }
-
- // We assume that control flow is always live after an expression. So
- // even without predecessors to the join block, we set it as the exit
- // block and continue by adding instructions there.
- DCHECK(join != NULL);
- if (join->HasPredecessor()) {
- set_current_block(join);
- join->SetJoinId(expr->id());
- if (!ast_context()->IsEffect()) return ast_context()->ReturnValue(Pop());
- } else {
- set_current_block(NULL);
- }
-}
-
-void HOptimizedGraphBuilder::TraceInline(Handle<JSFunction> target,
- Handle<JSFunction> caller,
- const char* reason,
- TailCallMode tail_call_mode) {
- if (FLAG_trace_inlining) {
- std::unique_ptr<char[]> target_name =
- target->shared()->DebugName()->ToCString();
- std::unique_ptr<char[]> caller_name =
- caller->shared()->DebugName()->ToCString();
- if (reason == NULL) {
- const char* call_mode =
- tail_call_mode == TailCallMode::kAllow ? "tail called" : "called";
- PrintF("Inlined %s %s from %s.\n", target_name.get(), call_mode,
- caller_name.get());
- } else {
- PrintF("Did not inline %s called from %s (%s).\n",
- target_name.get(), caller_name.get(), reason);
- }
- }
-}
-
-
-static const int kNotInlinable = 1000000000;
-
-
-int HOptimizedGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
- if (!FLAG_use_inlining) return kNotInlinable;
-
- // Precondition: call is monomorphic and we have found a target with the
- // appropriate arity.
- Handle<JSFunction> caller = current_info()->closure();
- Handle<SharedFunctionInfo> target_shared(target->shared());
-
- // Always inline functions that force inlining.
- if (target_shared->force_inline()) {
- return 0;
- }
- if (!target->shared()->IsUserJavaScript()) {
- return kNotInlinable;
- }
-
- if (target_shared->IsApiFunction()) {
- TraceInline(target, caller, "target is api function");
- return kNotInlinable;
- }
-
- // Do a quick check on source code length to avoid parsing large
- // inlining candidates.
- if (target_shared->SourceSize() >
- Min(FLAG_max_inlined_source_size, kUnlimitedMaxInlinedSourceSize)) {
- TraceInline(target, caller, "target text too big");
- return kNotInlinable;
- }
-
- // Target must be inlineable.
- BailoutReason noopt_reason = target_shared->disable_optimization_reason();
- if (!target_shared->IsInlineable() && noopt_reason != kHydrogenFilter) {
- TraceInline(target, caller, "target not inlineable");
- return kNotInlinable;
- }
- if (noopt_reason != kNoReason && noopt_reason != kHydrogenFilter) {
- TraceInline(target, caller, "target contains unsupported syntax [early]");
- return kNotInlinable;
- }
-
- int nodes_added = target_shared->ast_node_count();
- return nodes_added;
-}
-
-bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
- int arguments_count,
- HValue* implicit_return_value,
- BailoutId ast_id, BailoutId return_id,
- InliningKind inlining_kind,
- TailCallMode syntactic_tail_call_mode) {
- if (target->context()->native_context() !=
- top_info()->closure()->context()->native_context()) {
- return false;
- }
- int nodes_added = InliningAstSize(target);
- if (nodes_added == kNotInlinable) return false;
-
- Handle<JSFunction> caller = current_info()->closure();
- if (nodes_added > Min(FLAG_max_inlined_nodes, kUnlimitedMaxInlinedNodes)) {
- TraceInline(target, caller, "target AST is too large [early]");
- return false;
- }
-
- // Don't inline deeper than the maximum number of inlining levels.
- HEnvironment* env = environment();
- int current_level = 1;
- while (env->outer() != NULL) {
- if (current_level == FLAG_max_inlining_levels) {
- TraceInline(target, caller, "inline depth limit reached");
- return false;
- }
- if (env->outer()->frame_type() == JS_FUNCTION) {
- current_level++;
- }
- env = env->outer();
- }
-
- // Don't inline recursive functions.
- for (FunctionState* state = function_state();
- state != NULL;
- state = state->outer()) {
- if (*state->compilation_info()->closure() == *target) {
- TraceInline(target, caller, "target is recursive");
- return false;
- }
- }
-
- // We don't want to add more than a certain number of nodes from inlining.
- // Always inline small methods (<= 10 nodes).
- if (inlined_count_ > Min(FLAG_max_inlined_nodes_cumulative,
- kUnlimitedMaxInlinedNodesCumulative)) {
- TraceInline(target, caller, "cumulative AST node limit reached");
- return false;
- }
-
- // Parse and allocate variables.
- // Use the same AstValueFactory for creating strings in the sub-compilation
- // step, but don't transfer ownership to target_info.
- Handle<SharedFunctionInfo> target_shared(target->shared());
- ParseInfo parse_info(target_shared, top_info()->parse_info()->zone_shared());
- parse_info.set_ast_value_factory(
- top_info()->parse_info()->ast_value_factory());
- parse_info.set_ast_value_factory_owned(false);
-
- CompilationInfo target_info(parse_info.zone(), &parse_info,
- target->GetIsolate(), target);
-
- if (inlining_kind != CONSTRUCT_CALL_RETURN &&
- IsClassConstructor(target_shared->kind())) {
- TraceInline(target, caller, "target is classConstructor");
- return false;
- }
-
- if (target_shared->HasDebugInfo()) {
- TraceInline(target, caller, "target is being debugged");
- return false;
- }
- if (!Compiler::ParseAndAnalyze(&target_info)) {
- if (target_info.isolate()->has_pending_exception()) {
- // Parse or scope error, never optimize this function.
- SetStackOverflow();
- target_shared->DisableOptimization(kParseScopeError);
- }
- TraceInline(target, caller, "parse failure");
- return false;
- }
- if (target_shared->must_use_ignition_turbo()) {
- TraceInline(target, caller, "ParseAndAnalyze found incompatibility");
- return false;
- }
-
- if (target_info.scope()->NeedsContext()) {
- TraceInline(target, caller, "target has context-allocated variables");
- return false;
- }
-
- if (target_info.scope()->rest_parameter() != nullptr) {
- TraceInline(target, caller, "target uses rest parameters");
- return false;
- }
-
- FunctionLiteral* function = target_info.literal();
-
- // The following conditions must be checked again after re-parsing, because
- // earlier the information might not have been complete due to lazy parsing.
- nodes_added = function->ast_node_count();
- if (nodes_added > Min(FLAG_max_inlined_nodes, kUnlimitedMaxInlinedNodes)) {
- TraceInline(target, caller, "target AST is too large [late]");
- return false;
- }
- if (function->dont_optimize()) {
- TraceInline(target, caller, "target contains unsupported syntax [late]");
- return false;
- }
-
- // If the function uses the arguments object check that inlining of functions
- // with arguments object is enabled and the arguments-variable is
- // stack allocated.
- if (function->scope()->arguments() != NULL) {
- if (!FLAG_inline_arguments) {
- TraceInline(target, caller, "target uses arguments object");
- return false;
- }
- }
-
- // Unsupported variable references present.
- if (function->scope()->this_function_var() != nullptr ||
- function->scope()->new_target_var() != nullptr) {
- TraceInline(target, caller, "target uses new target or this function");
- return false;
- }
-
- // All declarations must be inlineable.
- Declaration::List* decls = target_info.scope()->declarations();
- for (Declaration* decl : *decls) {
- if (decl->IsFunctionDeclaration() ||
- !decl->proxy()->var()->IsStackAllocated()) {
- TraceInline(target, caller, "target has non-trivial declaration");
- return false;
- }
- }
-
- // Generate the deoptimization data for the unoptimized version of
- // the target function if we don't already have it.
- if (!Compiler::EnsureDeoptimizationSupport(&target_info)) {
- TraceInline(target, caller, "could not generate deoptimization info");
- return false;
- }
-
- // Remember that we inlined this function. This needs to be called right
- // after the EnsureDeoptimizationSupport call so that the code flusher
- // does not remove the code with the deoptimization support.
- int inlining_id = top_info()->AddInlinedFunction(target_info.shared_info(),
- source_position());
-
- // ----------------------------------------------------------------
- // After this point, we've made a decision to inline this function (so
- // TryInline should always return true).
-
- // If target was lazily compiled, it's literals array may not yet be set up.
- JSFunction::EnsureLiterals(target);
-
- // Type-check the inlined function.
- DCHECK(target_shared->has_deoptimization_support());
- AstTyper(target_info.isolate(), target_info.zone(), target_info.closure(),
- target_info.scope(), target_info.osr_ast_id(), target_info.literal(),
- &bounds_)
- .Run();
-
- // Save the pending call context. Set up new one for the inlined function.
- // The function state is new-allocated because we need to delete it
- // in two different places.
- FunctionState* target_state = new FunctionState(
- this, &target_info, inlining_kind, inlining_id,
- function_state()->ComputeTailCallMode(syntactic_tail_call_mode));
-
- HConstant* undefined = graph()->GetConstantUndefined();
-
- HEnvironment* inner_env = environment()->CopyForInlining(
- target, arguments_count, function, undefined,
- function_state()->inlining_kind(), syntactic_tail_call_mode);
-
- HConstant* context = Add<HConstant>(Handle<Context>(target->context()));
- inner_env->BindContext(context);
-
- // Create a dematerialized arguments object for the function, also copy the
- // current arguments values to use them for materialization.
- HEnvironment* arguments_env = inner_env->arguments_environment();
- int parameter_count = arguments_env->parameter_count();
- HArgumentsObject* arguments_object = Add<HArgumentsObject>(parameter_count);
- for (int i = 0; i < parameter_count; i++) {
- arguments_object->AddArgument(arguments_env->Lookup(i), zone());
- }
-
- // If the function uses arguments object then bind bind one.
- if (function->scope()->arguments() != NULL) {
- DCHECK(function->scope()->arguments()->IsStackAllocated());
- inner_env->Bind(function->scope()->arguments(), arguments_object);
- }
-
- // Capture the state before invoking the inlined function for deopt in the
- // inlined function. This simulate has no bailout-id since it's not directly
- // reachable for deopt, and is only used to capture the state. If the simulate
- // becomes reachable by merging, the ast id of the simulate merged into it is
- // adopted.
- Add<HSimulate>(BailoutId::None());
-
- current_block()->UpdateEnvironment(inner_env);
- Scope* saved_scope = scope();
- set_scope(target_info.scope());
- HEnterInlined* enter_inlined = Add<HEnterInlined>(
- return_id, target, context, arguments_count, function,
- function_state()->inlining_kind(), function->scope()->arguments(),
- arguments_object, syntactic_tail_call_mode);
- if (is_tracking_positions()) {
- enter_inlined->set_inlining_id(inlining_id);
- }
-
- function_state()->set_entry(enter_inlined);
-
- VisitDeclarations(target_info.scope()->declarations());
- VisitStatements(function->body());
- set_scope(saved_scope);
- if (HasStackOverflow()) {
- // Bail out if the inline function did, as we cannot residualize a call
- // instead, but do not disable optimization for the outer function.
- TraceInline(target, caller, "inline graph construction failed");
- target_shared->DisableOptimization(kInliningBailedOut);
- current_info()->RetryOptimization(kInliningBailedOut);
- delete target_state;
- return true;
- }
-
- // Update inlined nodes count.
- inlined_count_ += nodes_added;
-
- Handle<Code> unoptimized_code(target_shared->code());
- DCHECK(unoptimized_code->kind() == Code::FUNCTION);
- Handle<TypeFeedbackInfo> type_info(
- TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
- graph()->update_type_change_checksum(type_info->own_type_change_checksum());
-
- TraceInline(target, caller, NULL, syntactic_tail_call_mode);
-
- if (current_block() != NULL) {
- FunctionState* state = function_state();
- if (state->inlining_kind() == CONSTRUCT_CALL_RETURN) {
- // Falling off the end of an inlined construct call. In a test context the
- // return value will always evaluate to true, in a value context the
- // return value is the newly allocated receiver.
- if (call_context()->IsTest()) {
- inlined_test_context()->ReturnValue(graph()->GetConstantTrue());
- } else if (call_context()->IsEffect()) {
- Goto(function_return(), state);
- } else {
- DCHECK(call_context()->IsValue());
- AddLeaveInlined(implicit_return_value, state);
- }
- } else if (state->inlining_kind() == SETTER_CALL_RETURN) {
- // Falling off the end of an inlined setter call. The returned value is
- // never used, the value of an assignment is always the value of the RHS
- // of the assignment.
- if (call_context()->IsTest()) {
- inlined_test_context()->ReturnValue(implicit_return_value);
- } else if (call_context()->IsEffect()) {
- Goto(function_return(), state);
- } else {
- DCHECK(call_context()->IsValue());
- AddLeaveInlined(implicit_return_value, state);
- }
- } else {
- // Falling off the end of a normal inlined function. This basically means
- // returning undefined.
- if (call_context()->IsTest()) {
- inlined_test_context()->ReturnValue(graph()->GetConstantFalse());
- } else if (call_context()->IsEffect()) {
- Goto(function_return(), state);
- } else {
- DCHECK(call_context()->IsValue());
- AddLeaveInlined(undefined, state);
- }
- }
- }
-
- // Fix up the function exits.
- if (inlined_test_context() != NULL) {
- HBasicBlock* if_true = inlined_test_context()->if_true();
- HBasicBlock* if_false = inlined_test_context()->if_false();
-
- HEnterInlined* entry = function_state()->entry();
-
- // Pop the return test context from the expression context stack.
- DCHECK(ast_context() == inlined_test_context());
- ClearInlinedTestContext();
- delete target_state;
-
- // Forward to the real test context.
- if (if_true->HasPredecessor()) {
- entry->RegisterReturnTarget(if_true, zone());
- if_true->SetJoinId(ast_id);
- HBasicBlock* true_target = TestContext::cast(ast_context())->if_true();
- Goto(if_true, true_target, function_state());
- }
- if (if_false->HasPredecessor()) {
- entry->RegisterReturnTarget(if_false, zone());
- if_false->SetJoinId(ast_id);
- HBasicBlock* false_target = TestContext::cast(ast_context())->if_false();
- Goto(if_false, false_target, function_state());
- }
- set_current_block(NULL);
- return true;
-
- } else if (function_return()->HasPredecessor()) {
- function_state()->entry()->RegisterReturnTarget(function_return(), zone());
- function_return()->SetJoinId(ast_id);
- set_current_block(function_return());
- } else {
- set_current_block(NULL);
- }
- delete target_state;
- return true;
-}
-
-
-bool HOptimizedGraphBuilder::TryInlineCall(Call* expr) {
- return TryInline(expr->target(), expr->arguments()->length(), NULL,
- expr->id(), expr->ReturnId(), NORMAL_RETURN,
- expr->tail_call_mode());
-}
-
-
-bool HOptimizedGraphBuilder::TryInlineConstruct(CallNew* expr,
- HValue* implicit_return_value) {
- return TryInline(expr->target(), expr->arguments()->length(),
- implicit_return_value, expr->id(), expr->ReturnId(),
- CONSTRUCT_CALL_RETURN, TailCallMode::kDisallow);
-}
-
-bool HOptimizedGraphBuilder::TryInlineGetter(Handle<Object> getter,
- Handle<Map> receiver_map,
- BailoutId ast_id,
- BailoutId return_id) {
- if (TryInlineApiGetter(getter, receiver_map, ast_id)) return true;
- if (getter->IsJSFunction()) {
- Handle<JSFunction> getter_function = Handle<JSFunction>::cast(getter);
- return TryInlineBuiltinGetterCall(getter_function, receiver_map, ast_id) ||
- TryInline(getter_function, 0, NULL, ast_id, return_id,
- GETTER_CALL_RETURN, TailCallMode::kDisallow);
- }
- return false;
-}
-
-bool HOptimizedGraphBuilder::TryInlineSetter(Handle<Object> setter,
- Handle<Map> receiver_map,
- BailoutId id,
- BailoutId assignment_id,
- HValue* implicit_return_value) {
- if (TryInlineApiSetter(setter, receiver_map, id)) return true;
- return setter->IsJSFunction() &&
- TryInline(Handle<JSFunction>::cast(setter), 1, implicit_return_value,
- id, assignment_id, SETTER_CALL_RETURN,
- TailCallMode::kDisallow);
-}
-
-
-bool HOptimizedGraphBuilder::TryInlineIndirectCall(Handle<JSFunction> function,
- Call* expr,
- int arguments_count) {
- return TryInline(function, arguments_count, NULL, expr->id(),
- expr->ReturnId(), NORMAL_RETURN, expr->tail_call_mode());
-}
-
-
-bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr) {
- if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
- BuiltinFunctionId id = expr->target()->shared()->builtin_function_id();
- // We intentionally ignore expr->tail_call_mode() here because builtins
- // we inline here do not observe if they were tail called or not.
- switch (id) {
- case kMathCos:
- case kMathExp:
- case kMathRound:
- case kMathFround:
- case kMathFloor:
- case kMathAbs:
- case kMathSin:
- case kMathSqrt:
- case kMathLog:
- case kMathClz32:
- if (expr->arguments()->length() == 1) {
- HValue* argument = Pop();
- Drop(2); // Receiver and function.
- HInstruction* op = NewUncasted<HUnaryMathOperation>(argument, id);
- ast_context()->ReturnInstruction(op, expr->id());
- return true;
- }
- break;
- case kMathImul:
- if (expr->arguments()->length() == 2) {
- HValue* right = Pop();
- HValue* left = Pop();
- Drop(2); // Receiver and function.
- HInstruction* op =
- HMul::NewImul(isolate(), zone(), context(), left, right);
- ast_context()->ReturnInstruction(op, expr->id());
- return true;
- }
- break;
- default:
- // Not supported for inlining yet.
- break;
- }
- return false;
-}
-
-
-// static
-bool HOptimizedGraphBuilder::IsReadOnlyLengthDescriptor(
- Handle<Map> jsarray_map) {
- DCHECK(!jsarray_map->is_dictionary_map());
- Isolate* isolate = jsarray_map->GetIsolate();
- Handle<Name> length_string = isolate->factory()->length_string();
- DescriptorArray* descriptors = jsarray_map->instance_descriptors();
- int number =
- descriptors->SearchWithCache(isolate, *length_string, *jsarray_map);
- DCHECK_NE(DescriptorArray::kNotFound, number);
- return descriptors->GetDetails(number).IsReadOnly();
-}
-
-
-// static
-bool HOptimizedGraphBuilder::CanInlineArrayResizeOperation(
- Handle<Map> receiver_map) {
- return !receiver_map.is_null() && receiver_map->prototype()->IsJSObject() &&
- receiver_map->instance_type() == JS_ARRAY_TYPE &&
- IsFastElementsKind(receiver_map->elements_kind()) &&
- !receiver_map->is_dictionary_map() && receiver_map->is_extensible() &&
- (!receiver_map->is_prototype_map() || receiver_map->is_stable()) &&
- !IsReadOnlyLengthDescriptor(receiver_map);
-}
-
-bool HOptimizedGraphBuilder::TryInlineBuiltinGetterCall(
- Handle<JSFunction> function, Handle<Map> receiver_map, BailoutId ast_id) {
- if (!function->shared()->HasBuiltinFunctionId()) return false;
- BuiltinFunctionId id = function->shared()->builtin_function_id();
-
- // Try to inline getter calls like DataView.prototype.byteLength/byteOffset
- // as operations in the calling function.
- switch (id) {
- case kDataViewBuffer: {
- if (!receiver_map->IsJSDataViewMap()) return false;
- HObjectAccess access = HObjectAccess::ForMapAndOffset(
- receiver_map, JSDataView::kBufferOffset);
- HValue* object = Pop(); // receiver
- HInstruction* result = New<HLoadNamedField>(object, object, access);
- ast_context()->ReturnInstruction(result, ast_id);
- return true;
- }
- case kDataViewByteLength:
- case kDataViewByteOffset: {
- if (!receiver_map->IsJSDataViewMap()) return false;
- int offset = (id == kDataViewByteLength) ? JSDataView::kByteLengthOffset
- : JSDataView::kByteOffsetOffset;
- HObjectAccess access =
- HObjectAccess::ForMapAndOffset(receiver_map, offset);
- HValue* object = Pop(); // receiver
- HValue* checked_object = Add<HCheckArrayBufferNotNeutered>(object);
- HInstruction* result =
- New<HLoadNamedField>(object, checked_object, access);
- ast_context()->ReturnInstruction(result, ast_id);
- return true;
- }
- case kTypedArrayByteLength:
- case kTypedArrayByteOffset:
- case kTypedArrayLength: {
- if (!receiver_map->IsJSTypedArrayMap()) return false;
- int offset = (id == kTypedArrayLength)
- ? JSTypedArray::kLengthOffset
- : (id == kTypedArrayByteLength)
- ? JSTypedArray::kByteLengthOffset
- : JSTypedArray::kByteOffsetOffset;
- HObjectAccess access =
- HObjectAccess::ForMapAndOffset(receiver_map, offset);
- HValue* object = Pop(); // receiver
- HValue* checked_object = Add<HCheckArrayBufferNotNeutered>(object);
- HInstruction* result =
- New<HLoadNamedField>(object, checked_object, access);
- ast_context()->ReturnInstruction(result, ast_id);
- return true;
- }
- default:
- return false;
- }
-}
-
-// static
-bool HOptimizedGraphBuilder::NoElementsInPrototypeChain(
- Handle<Map> receiver_map) {
- // TODO(ishell): remove this once we support NO_ELEMENTS elements kind.
- PrototypeIterator iter(receiver_map);
- Handle<Object> empty_fixed_array =
- iter.isolate()->factory()->empty_fixed_array();
- while (true) {
- Handle<JSObject> current = PrototypeIterator::GetCurrent<JSObject>(iter);
- if (current->elements() != *empty_fixed_array) return false;
- iter.Advance();
- if (iter.IsAtEnd()) {
- return true;
- }
- }
-}
-
-bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
- Handle<JSFunction> function, Handle<Map> receiver_map, BailoutId ast_id,
- int args_count_no_receiver) {
- if (!function->shared()->HasBuiltinFunctionId()) return false;
- BuiltinFunctionId id = function->shared()->builtin_function_id();
- int argument_count = args_count_no_receiver + 1; // Plus receiver.
-
- if (receiver_map.is_null()) {
- HValue* receiver = environment()->ExpressionStackAt(args_count_no_receiver);
- if (receiver->IsConstant() &&
- HConstant::cast(receiver)->handle(isolate())->IsHeapObject()) {
- receiver_map =
- handle(Handle<HeapObject>::cast(
- HConstant::cast(receiver)->handle(isolate()))->map());
- }
- }
- // Try to inline calls like Math.* as operations in the calling function.
- switch (id) {
- case kObjectHasOwnProperty: {
- // It's not safe to look through the phi for elements if we're compiling
- // for osr.
- if (top_info()->is_osr()) return false;
- if (argument_count != 2) return false;
- HValue* key = Top();
- if (!key->IsLoadKeyed()) return false;
- HValue* elements = HLoadKeyed::cast(key)->elements();
- if (!elements->IsPhi() || elements->OperandCount() != 1) return false;
- if (!elements->OperandAt(0)->IsForInCacheArray()) return false;
- HForInCacheArray* cache = HForInCacheArray::cast(elements->OperandAt(0));
- HValue* receiver = environment()->ExpressionStackAt(1);
- if (!receiver->IsPhi() || receiver->OperandCount() != 1) return false;
- if (cache->enumerable() != receiver->OperandAt(0)) return false;
- Drop(3); // key, receiver, function
- Add<HCheckMapValue>(receiver, cache->map());
- ast_context()->ReturnValue(graph()->GetConstantTrue());
- return true;
- }
- case kStringCharCodeAt:
- case kStringCharAt:
- if (argument_count == 2) {
- HValue* index = Pop();
- HValue* string = Pop();
- Drop(1); // Function.
- HInstruction* char_code =
- BuildStringCharCodeAt(string, index);
- if (id == kStringCharCodeAt) {
- ast_context()->ReturnInstruction(char_code, ast_id);
- return true;
- }
- AddInstruction(char_code);
- HInstruction* result = NewUncasted<HStringCharFromCode>(char_code);
- ast_context()->ReturnInstruction(result, ast_id);
- return true;
- }
- break;
- case kStringFromCharCode:
- if (argument_count == 2) {
- HValue* argument = Pop();
- Drop(2); // Receiver and function.
- argument = AddUncasted<HForceRepresentation>(
- argument, Representation::Integer32());
- argument->SetFlag(HValue::kTruncatingToInt32);
- HInstruction* result = NewUncasted<HStringCharFromCode>(argument);
- ast_context()->ReturnInstruction(result, ast_id);
- return true;
- }
- break;
- case kMathCos:
- case kMathExp:
- case kMathRound:
- case kMathFround:
- case kMathFloor:
- case kMathAbs:
- case kMathSin:
- case kMathSqrt:
- case kMathLog:
- case kMathClz32:
- if (argument_count == 2) {
- HValue* argument = Pop();
- Drop(2); // Receiver and function.
- HInstruction* op = NewUncasted<HUnaryMathOperation>(argument, id);
- ast_context()->ReturnInstruction(op, ast_id);
- return true;
- }
- break;
- case kMathPow:
- if (argument_count == 3) {
- HValue* right = Pop();
- HValue* left = Pop();
- Drop(2); // Receiver and function.
- HInstruction* result = NULL;
- // Use sqrt() if exponent is 0.5 or -0.5.
- if (right->IsConstant() && HConstant::cast(right)->HasDoubleValue()) {
- double exponent = HConstant::cast(right)->DoubleValue();
- if (exponent == 0.5) {
- result = NewUncasted<HUnaryMathOperation>(left, kMathPowHalf);
- } else if (exponent == -0.5) {
- HValue* one = graph()->GetConstant1();
- HInstruction* sqrt = AddUncasted<HUnaryMathOperation>(
- left, kMathPowHalf);
- // MathPowHalf doesn't have side effects so there's no need for
- // an environment simulation here.
- DCHECK(!sqrt->HasObservableSideEffects());
- result = NewUncasted<HDiv>(one, sqrt);
- } else if (exponent == 2.0) {
- result = NewUncasted<HMul>(left, left);
- }
- }
-
- if (result == NULL) {
- result = NewUncasted<HPower>(left, right);
- }
- ast_context()->ReturnInstruction(result, ast_id);
- return true;
- }
- break;
- case kMathMax:
- case kMathMin:
- if (argument_count == 3) {
- HValue* right = Pop();
- HValue* left = Pop();
- Drop(2); // Receiver and function.
- HMathMinMax::Operation op = (id == kMathMin) ? HMathMinMax::kMathMin
- : HMathMinMax::kMathMax;
- HInstruction* result = NewUncasted<HMathMinMax>(left, right, op);
- ast_context()->ReturnInstruction(result, ast_id);
- return true;
- }
- break;
- case kMathImul:
- if (argument_count == 3) {
- HValue* right = Pop();
- HValue* left = Pop();
- Drop(2); // Receiver and function.
- HInstruction* result =
- HMul::NewImul(isolate(), zone(), context(), left, right);
- ast_context()->ReturnInstruction(result, ast_id);
- return true;
- }
- break;
- case kArrayPop: {
- if (!CanInlineArrayResizeOperation(receiver_map)) return false;
- ElementsKind elements_kind = receiver_map->elements_kind();
-
- Drop(args_count_no_receiver);
- HValue* result;
- HValue* reduced_length;
- HValue* receiver = Pop();
-
- HValue* checked_object = AddCheckMap(receiver, receiver_map);
- HValue* length =
- Add<HLoadNamedField>(checked_object, nullptr,
- HObjectAccess::ForArrayLength(elements_kind));
-
- Drop(1); // Function.
-
- { NoObservableSideEffectsScope scope(this);
- IfBuilder length_checker(this);
-
- HValue* bounds_check = length_checker.If<HCompareNumericAndBranch>(
- length, graph()->GetConstant0(), Token::EQ);
- length_checker.Then();
-
- if (!ast_context()->IsEffect()) Push(graph()->GetConstantUndefined());
-
- length_checker.Else();
- HValue* elements = AddLoadElements(checked_object);
- // Ensure that we aren't popping from a copy-on-write array.
- if (IsFastSmiOrObjectElementsKind(elements_kind)) {
- elements = BuildCopyElementsOnWrite(checked_object, elements,
- elements_kind, length);
- }
- reduced_length = AddUncasted<HSub>(length, graph()->GetConstant1());
- result = AddElementAccess(elements, reduced_length, nullptr,
- bounds_check, nullptr, elements_kind, LOAD);
- HValue* hole = IsFastSmiOrObjectElementsKind(elements_kind)
- ? graph()->GetConstantHole()
- : Add<HConstant>(HConstant::kHoleNaN);
- if (IsFastSmiOrObjectElementsKind(elements_kind)) {
- elements_kind = FAST_HOLEY_ELEMENTS;
- }
- AddElementAccess(elements, reduced_length, hole, bounds_check, nullptr,
- elements_kind, STORE);
- Add<HStoreNamedField>(
- checked_object, HObjectAccess::ForArrayLength(elements_kind),
- reduced_length, STORE_TO_INITIALIZED_ENTRY);
-
- if (!ast_context()->IsEffect()) Push(result);
-
- length_checker.End();
- }
- result = ast_context()->IsEffect() ? graph()->GetConstant0() : Top();
- Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
- if (!ast_context()->IsEffect()) Drop(1);
-
- ast_context()->ReturnValue(result);
- return true;
- }
- case kArrayPush: {
- if (!CanInlineArrayResizeOperation(receiver_map)) return false;
- ElementsKind elements_kind = receiver_map->elements_kind();
-
- // If there may be elements accessors in the prototype chain, the fast
- // inlined version can't be used.
- if (receiver_map->DictionaryElementsInPrototypeChainOnly()) return false;
- // If there currently can be no elements accessors on the prototype chain,
- // it doesn't mean that there won't be any later. Install a full prototype
- // chain check to trap element accessors being installed on the prototype
- // chain, which would cause elements to go to dictionary mode and result
- // in a map change.
- Handle<JSObject> prototype(JSObject::cast(receiver_map->prototype()));
- BuildCheckPrototypeMaps(prototype, Handle<JSObject>());
-
- // Protect against adding elements to the Array prototype, which needs to
- // route through appropriate bottlenecks.
- if (isolate()->IsFastArrayConstructorPrototypeChainIntact() &&
- !prototype->IsJSArray()) {
- return false;
- }
-
- const int argc = args_count_no_receiver;
- if (argc != 1) return false;
-
- HValue* value_to_push = Pop();
- HValue* array = Pop();
- Drop(1); // Drop function.
-
- HInstruction* new_size = NULL;
- HValue* length = NULL;
-
- {
- NoObservableSideEffectsScope scope(this);
-
- length = Add<HLoadNamedField>(
- array, nullptr, HObjectAccess::ForArrayLength(elements_kind));
-
- new_size = AddUncasted<HAdd>(length, graph()->GetConstant1());
-
- bool is_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- HValue* checked_array = Add<HCheckMaps>(array, receiver_map);
- BuildUncheckedMonomorphicElementAccess(
- checked_array, length, value_to_push, is_array, elements_kind,
- STORE, NEVER_RETURN_HOLE, STORE_AND_GROW_NO_TRANSITION);
-
- if (!ast_context()->IsEffect()) Push(new_size);
- Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
- if (!ast_context()->IsEffect()) Drop(1);
- }
-
- ast_context()->ReturnValue(new_size);
- return true;
- }
- case kArrayShift: {
- if (!CanInlineArrayResizeOperation(receiver_map)) return false;
- if (!NoElementsInPrototypeChain(receiver_map)) return false;
- ElementsKind kind = receiver_map->elements_kind();
-
- // If there may be elements accessors in the prototype chain, the fast
- // inlined version can't be used.
- if (receiver_map->DictionaryElementsInPrototypeChainOnly()) return false;
-
- // If there currently can be no elements accessors on the prototype chain,
- // it doesn't mean that there won't be any later. Install a full prototype
- // chain check to trap element accessors being installed on the prototype
- // chain, which would cause elements to go to dictionary mode and result
- // in a map change.
- BuildCheckPrototypeMaps(
- handle(JSObject::cast(receiver_map->prototype()), isolate()),
- Handle<JSObject>::null(), true);
-
- // Threshold for fast inlined Array.shift().
- HConstant* inline_threshold = Add<HConstant>(JSArray::kMaxCopyElements);
-
- Drop(args_count_no_receiver);
- HValue* result;
- HValue* receiver = Pop();
- HValue* checked_object = AddCheckMap(receiver, receiver_map);
- HValue* length = Add<HLoadNamedField>(
- receiver, checked_object, HObjectAccess::ForArrayLength(kind));
-
- Drop(1); // Function.
- {
- NoObservableSideEffectsScope scope(this);
-
- IfBuilder if_lengthiszero(this);
- HValue* lengthiszero = if_lengthiszero.If<HCompareNumericAndBranch>(
- length, graph()->GetConstant0(), Token::EQ);
- if_lengthiszero.Then();
- {
- if (!ast_context()->IsEffect()) Push(graph()->GetConstantUndefined());
- }
- if_lengthiszero.Else();
- {
- HValue* elements = AddLoadElements(receiver);
-
- // Check if we can use the fast inlined Array.shift().
- IfBuilder if_inline(this);
- if_inline.If<HCompareNumericAndBranch>(
- length, inline_threshold, Token::LTE);
- if (IsFastSmiOrObjectElementsKind(kind)) {
- // We cannot handle copy-on-write backing stores here.
- if_inline.AndIf<HCompareMap>(
- elements, isolate()->factory()->fixed_array_map());
- }
- if_inline.Then();
- {
- // Remember the result.
- if (!ast_context()->IsEffect()) {
- Push(AddElementAccess(elements, graph()->GetConstant0(), nullptr,
- lengthiszero, nullptr, kind, LOAD));
- }
-
- // Compute the new length.
- HValue* new_length = AddUncasted<HSub>(
- length, graph()->GetConstant1());
- new_length->ClearFlag(HValue::kCanOverflow);
-
- // Copy the remaining elements.
- LoopBuilder loop(this, context(), LoopBuilder::kPostIncrement);
- {
- HValue* new_key = loop.BeginBody(
- graph()->GetConstant0(), new_length, Token::LT);
- HValue* key = AddUncasted<HAdd>(new_key, graph()->GetConstant1());
- key->ClearFlag(HValue::kCanOverflow);
- ElementsKind copy_kind =
- kind == FAST_HOLEY_SMI_ELEMENTS ? FAST_HOLEY_ELEMENTS : kind;
- HValue* element =
- AddUncasted<HLoadKeyed>(elements, key, lengthiszero, nullptr,
- copy_kind, ALLOW_RETURN_HOLE);
- HStoreKeyed* store = Add<HStoreKeyed>(elements, new_key, element,
- nullptr, copy_kind);
- store->SetFlag(HValue::kTruncatingToNumber);
- }
- loop.EndBody();
-
- // Put a hole at the end.
- HValue* hole = IsFastSmiOrObjectElementsKind(kind)
- ? graph()->GetConstantHole()
- : Add<HConstant>(HConstant::kHoleNaN);
- if (IsFastSmiOrObjectElementsKind(kind)) kind = FAST_HOLEY_ELEMENTS;
- Add<HStoreKeyed>(elements, new_length, hole, nullptr, kind,
- INITIALIZING_STORE);
-
- // Remember new length.
- Add<HStoreNamedField>(
- receiver, HObjectAccess::ForArrayLength(kind),
- new_length, STORE_TO_INITIALIZED_ENTRY);
- }
- if_inline.Else();
- {
- Add<HPushArguments>(receiver);
- result = AddInstruction(NewCallConstantFunction(
- function, 1, TailCallMode::kDisallow, TailCallMode::kDisallow));
- if (!ast_context()->IsEffect()) Push(result);
- }
- if_inline.End();
- }
- if_lengthiszero.End();
- }
- result = ast_context()->IsEffect() ? graph()->GetConstant0() : Top();
- Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
- if (!ast_context()->IsEffect()) Drop(1);
- ast_context()->ReturnValue(result);
- return true;
- }
- case kArrayIndexOf:
- case kArrayLastIndexOf: {
- if (receiver_map.is_null()) return false;
- if (receiver_map->instance_type() != JS_ARRAY_TYPE) return false;
- if (!receiver_map->prototype()->IsJSObject()) return false;
- ElementsKind kind = receiver_map->elements_kind();
- if (!IsFastElementsKind(kind)) return false;
- if (argument_count != 2) return false;
- if (!receiver_map->is_extensible()) return false;
-
- // If there may be elements accessors in the prototype chain, the fast
- // inlined version can't be used.
- if (receiver_map->DictionaryElementsInPrototypeChainOnly()) return false;
-
- // If there currently can be no elements accessors on the prototype chain,
- // it doesn't mean that there won't be any later. Install a full prototype
- // chain check to trap element accessors being installed on the prototype
- // chain, which would cause elements to go to dictionary mode and result
- // in a map change.
- BuildCheckPrototypeMaps(
- handle(JSObject::cast(receiver_map->prototype()), isolate()),
- Handle<JSObject>::null());
-
- HValue* search_element = Pop();
- HValue* receiver = Pop();
- Drop(1); // Drop function.
-
- ArrayIndexOfMode mode = (id == kArrayIndexOf)
- ? kFirstIndexOf : kLastIndexOf;
- HValue* index = BuildArrayIndexOf(receiver, search_element, kind, mode);
-
- if (!ast_context()->IsEffect()) Push(index);
- Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
- if (!ast_context()->IsEffect()) Drop(1);
- ast_context()->ReturnValue(index);
- return true;
- }
- default:
- // Not yet supported for inlining.
- break;
- }
- return false;
-}
-
-
-bool HOptimizedGraphBuilder::TryInlineApiFunctionCall(Call* expr,
- HValue* receiver) {
- if (V8_UNLIKELY(FLAG_runtime_stats)) return false;
- Handle<JSFunction> function = expr->target();
- int argc = expr->arguments()->length();
- SmallMapList receiver_maps;
- return TryInlineApiCall(function, receiver, &receiver_maps, argc, expr->id(),
- kCallApiFunction, expr->tail_call_mode());
-}
-
-
-bool HOptimizedGraphBuilder::TryInlineApiMethodCall(
- Call* expr,
- HValue* receiver,
- SmallMapList* receiver_maps) {
- if (V8_UNLIKELY(FLAG_runtime_stats)) return false;
- Handle<JSFunction> function = expr->target();
- int argc = expr->arguments()->length();
- return TryInlineApiCall(function, receiver, receiver_maps, argc, expr->id(),
- kCallApiMethod, expr->tail_call_mode());
-}
-
-bool HOptimizedGraphBuilder::TryInlineApiGetter(Handle<Object> function,
- Handle<Map> receiver_map,
- BailoutId ast_id) {
- if (V8_UNLIKELY(FLAG_runtime_stats)) return false;
- SmallMapList receiver_maps(1, zone());
- receiver_maps.Add(receiver_map, zone());
- return TryInlineApiCall(function,
- NULL, // Receiver is on expression stack.
- &receiver_maps, 0, ast_id, kCallApiGetter,
- TailCallMode::kDisallow);
-}
-
-bool HOptimizedGraphBuilder::TryInlineApiSetter(Handle<Object> function,
- Handle<Map> receiver_map,
- BailoutId ast_id) {
- SmallMapList receiver_maps(1, zone());
- receiver_maps.Add(receiver_map, zone());
- return TryInlineApiCall(function,
- NULL, // Receiver is on expression stack.
- &receiver_maps, 1, ast_id, kCallApiSetter,
- TailCallMode::kDisallow);
-}
-
-bool HOptimizedGraphBuilder::TryInlineApiCall(
- Handle<Object> function, HValue* receiver, SmallMapList* receiver_maps,
- int argc, BailoutId ast_id, ApiCallType call_type,
- TailCallMode syntactic_tail_call_mode) {
- if (V8_UNLIKELY(FLAG_runtime_stats)) return false;
- if (function->IsJSFunction() &&
- Handle<JSFunction>::cast(function)->context()->native_context() !=
- top_info()->closure()->context()->native_context()) {
- return false;
- }
- if (argc > CallApiCallbackStub::kArgMax) {
- return false;
- }
-
- CallOptimization optimization(function);
- if (!optimization.is_simple_api_call()) return false;
- Handle<Map> holder_map;
- for (int i = 0; i < receiver_maps->length(); ++i) {
- auto map = receiver_maps->at(i);
- // Don't inline calls to receivers requiring accesschecks.
- if (map->is_access_check_needed()) return false;
- }
- if (call_type == kCallApiFunction) {
- // Cannot embed a direct reference to the global proxy map
- // as it maybe dropped on deserialization.
- CHECK(!isolate()->serializer_enabled());
- DCHECK(function->IsJSFunction());
- DCHECK_EQ(0, receiver_maps->length());
- receiver_maps->Add(
- handle(Handle<JSFunction>::cast(function)->global_proxy()->map()),
- zone());
- }
- CallOptimization::HolderLookup holder_lookup =
- CallOptimization::kHolderNotFound;
- Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType(
- receiver_maps->first(), &holder_lookup);
- if (holder_lookup == CallOptimization::kHolderNotFound) return false;
-
- if (FLAG_trace_inlining) {
- PrintF("Inlining api function ");
- function->ShortPrint();
- PrintF("\n");
- }
-
- bool is_function = false;
- bool is_store = false;
- switch (call_type) {
- case kCallApiFunction:
- case kCallApiMethod:
- // Need to check that none of the receiver maps could have changed.
- Add<HCheckMaps>(receiver, receiver_maps);
- // Need to ensure the chain between receiver and api_holder is intact.
- if (holder_lookup == CallOptimization::kHolderFound) {
- AddCheckPrototypeMaps(api_holder, receiver_maps->first());
- } else {
- DCHECK_EQ(holder_lookup, CallOptimization::kHolderIsReceiver);
- }
- // Includes receiver.
- PushArgumentsFromEnvironment(argc + 1);
- is_function = true;
- break;
- case kCallApiGetter:
- // Receiver and prototype chain cannot have changed.
- DCHECK_EQ(0, argc);
- DCHECK_NULL(receiver);
- // Receiver is on expression stack.
- receiver = Pop();
- Add<HPushArguments>(receiver);
- break;
- case kCallApiSetter:
- {
- is_store = true;
- // Receiver and prototype chain cannot have changed.
- DCHECK_EQ(1, argc);
- DCHECK_NULL(receiver);
- // Receiver and value are on expression stack.
- HValue* value = Pop();
- receiver = Pop();
- Add<HPushArguments>(receiver, value);
- break;
- }
- }
-
- HValue* holder = NULL;
- switch (holder_lookup) {
- case CallOptimization::kHolderFound:
- holder = Add<HConstant>(api_holder);
- break;
- case CallOptimization::kHolderIsReceiver:
- holder = receiver;
- break;
- case CallOptimization::kHolderNotFound:
- UNREACHABLE();
- break;
- }
- Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data_obj(api_call_info->data(), isolate());
- HValue* call_data = Add<HConstant>(call_data_obj);
- ApiFunction fun(v8::ToCData<Address>(api_call_info->callback()));
- ExternalReference ref = ExternalReference(&fun,
- ExternalReference::DIRECT_API_CALL,
- isolate());
- HValue* api_function_address = Add<HConstant>(ExternalReference(ref));
-
- HValue* op_vals[] = {Add<HConstant>(function), call_data, holder,
- api_function_address};
-
- HInstruction* call = nullptr;
- CHECK(argc <= CallApiCallbackStub::kArgMax);
- if (!is_function) {
- CallApiCallbackStub stub(isolate(), is_store,
- !optimization.is_constant_call());
- Handle<Code> code = stub.GetCode();
- HConstant* code_value = Add<HConstant>(code);
- call = New<HCallWithDescriptor>(
- code_value, argc + 1, stub.GetCallInterfaceDescriptor(),
- Vector<HValue*>(op_vals, arraysize(op_vals)), syntactic_tail_call_mode);
- } else {
- CallApiCallbackStub stub(isolate(), argc, false);
- Handle<Code> code = stub.GetCode();
- HConstant* code_value = Add<HConstant>(code);
- call = New<HCallWithDescriptor>(
- code_value, argc + 1, stub.GetCallInterfaceDescriptor(),
- Vector<HValue*>(op_vals, arraysize(op_vals)), syntactic_tail_call_mode);
- Drop(1); // Drop function.
- }
-
- ast_context()->ReturnInstruction(call, ast_id);
- return true;
-}
-
-
-void HOptimizedGraphBuilder::HandleIndirectCall(Call* expr, HValue* function,
- int arguments_count) {
- Handle<JSFunction> known_function;
- int args_count_no_receiver = arguments_count - 1;
- if (function->IsConstant() &&
- HConstant::cast(function)->handle(isolate())->IsJSFunction()) {
- known_function =
- Handle<JSFunction>::cast(HConstant::cast(function)->handle(isolate()));
- if (TryInlineBuiltinMethodCall(known_function, Handle<Map>(), expr->id(),
- args_count_no_receiver)) {
- if (FLAG_trace_inlining) {
- PrintF("Inlining builtin ");
- known_function->ShortPrint();
- PrintF("\n");
- }
- return;
- }
-
- if (TryInlineIndirectCall(known_function, expr, args_count_no_receiver)) {
- return;
- }
- }
-
- TailCallMode syntactic_tail_call_mode = expr->tail_call_mode();
- TailCallMode tail_call_mode =
- function_state()->ComputeTailCallMode(syntactic_tail_call_mode);
-
- PushArgumentsFromEnvironment(arguments_count);
- HInvokeFunction* call =
- New<HInvokeFunction>(function, known_function, arguments_count,
- syntactic_tail_call_mode, tail_call_mode);
- Drop(1); // Function
- ast_context()->ReturnInstruction(call, expr->id());
-}
-
-
-bool HOptimizedGraphBuilder::TryIndirectCall(Call* expr) {
- DCHECK(expr->expression()->IsProperty());
-
- if (!expr->IsMonomorphic()) {
- return false;
- }
- Handle<Map> function_map = expr->GetReceiverTypes()->first();
- if (function_map->instance_type() != JS_FUNCTION_TYPE ||
- !expr->target()->shared()->HasBuiltinFunctionId()) {
- return false;
- }
-
- switch (expr->target()->shared()->builtin_function_id()) {
- case kFunctionCall: {
- if (expr->arguments()->length() == 0) return false;
- BuildFunctionCall(expr);
- return true;
- }
- case kFunctionApply: {
- // For .apply, only the pattern f.apply(receiver, arguments)
- // is supported.
- if (!CanBeFunctionApplyArguments(expr)) return false;
-
- BuildFunctionApply(expr);
- return true;
- }
- default: { return false; }
- }
- UNREACHABLE();
-}
-
-
-// f.apply(...)
-void HOptimizedGraphBuilder::BuildFunctionApply(Call* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- CHECK_ALIVE(VisitForValue(args->at(0)));
- HValue* receiver = Pop(); // receiver
- HValue* function = Pop(); // f
- Drop(1); // apply
-
- // Make sure the arguments object is live.
- VariableProxy* arg_two = args->at(1)->AsVariableProxy();
- LookupAndMakeLive(arg_two->var());
-
- Handle<Map> function_map = expr->GetReceiverTypes()->first();
- HValue* checked_function = AddCheckMap(function, function_map);
-
- if (function_state()->outer() == NULL) {
- TailCallMode syntactic_tail_call_mode = expr->tail_call_mode();
- TailCallMode tail_call_mode =
- function_state()->ComputeTailCallMode(syntactic_tail_call_mode);
-
- HInstruction* elements = Add<HArgumentsElements>(false);
- HInstruction* length = Add<HArgumentsLength>(elements);
- HValue* wrapped_receiver = BuildWrapReceiver(receiver, checked_function);
- HInstruction* result = New<HApplyArguments>(
- function, wrapped_receiver, length, elements, tail_call_mode);
- ast_context()->ReturnInstruction(result, expr->id());
- } else {
- // We are inside inlined function and we know exactly what is inside
- // arguments object. But we need to be able to materialize at deopt.
- DCHECK_EQ(environment()->arguments_environment()->parameter_count(),
- function_state()->entry()->arguments_object()->arguments_count());
- HArgumentsObject* args = function_state()->entry()->arguments_object();
- const ZoneList<HValue*>* arguments_values = args->arguments_values();
- int arguments_count = arguments_values->length();
- Push(function);
- Push(BuildWrapReceiver(receiver, checked_function));
- for (int i = 1; i < arguments_count; i++) {
- Push(arguments_values->at(i));
- }
- HandleIndirectCall(expr, function, arguments_count);
- }
-}
-
-
-// f.call(...)
-void HOptimizedGraphBuilder::BuildFunctionCall(Call* expr) {
- HValue* function = Top(); // f
- Handle<Map> function_map = expr->GetReceiverTypes()->first();
- HValue* checked_function = AddCheckMap(function, function_map);
-
- // f and call are on the stack in the unoptimized code
- // during evaluation of the arguments.
- CHECK_ALIVE(VisitExpressions(expr->arguments()));
-
- int args_length = expr->arguments()->length();
- int receiver_index = args_length - 1;
- // Patch the receiver.
- HValue* receiver = BuildWrapReceiver(
- environment()->ExpressionStackAt(receiver_index), checked_function);
- environment()->SetExpressionStackAt(receiver_index, receiver);
-
- // Call must not be on the stack from now on.
- int call_index = args_length + 1;
- environment()->RemoveExpressionStackAt(call_index);
-
- HandleIndirectCall(expr, function, args_length);
-}
-
-
-HValue* HOptimizedGraphBuilder::ImplicitReceiverFor(HValue* function,
- Handle<JSFunction> target) {
- SharedFunctionInfo* shared = target->shared();
- if (is_sloppy(shared->language_mode()) && !shared->native()) {
- // Cannot embed a direct reference to the global proxy
- // as is it dropped on deserialization.
- CHECK(!isolate()->serializer_enabled());
- Handle<JSObject> global_proxy(target->context()->global_proxy());
- return Add<HConstant>(global_proxy);
- }
- return graph()->GetConstantUndefined();
-}
-
-
-HValue* HOptimizedGraphBuilder::BuildArrayIndexOf(HValue* receiver,
- HValue* search_element,
- ElementsKind kind,
- ArrayIndexOfMode mode) {
- DCHECK(IsFastElementsKind(kind));
-
- NoObservableSideEffectsScope no_effects(this);
-
- HValue* elements = AddLoadElements(receiver);
- HValue* length = AddLoadArrayLength(receiver, kind);
-
- HValue* initial;
- HValue* terminating;
- Token::Value token;
- LoopBuilder::Direction direction;
- if (mode == kFirstIndexOf) {
- initial = graph()->GetConstant0();
- terminating = length;
- token = Token::LT;
- direction = LoopBuilder::kPostIncrement;
- } else {
- DCHECK_EQ(kLastIndexOf, mode);
- initial = length;
- terminating = graph()->GetConstant0();
- token = Token::GT;
- direction = LoopBuilder::kPreDecrement;
- }
-
- Push(graph()->GetConstantMinus1());
- if (IsFastDoubleElementsKind(kind) || IsFastSmiElementsKind(kind)) {
- // Make sure that we can actually compare numbers correctly below, see
- // https://code.google.com/p/chromium/issues/detail?id=407946 for details.
- search_element = AddUncasted<HForceRepresentation>(
- search_element, IsFastSmiElementsKind(kind) ? Representation::Smi()
- : Representation::Double());
-
- LoopBuilder loop(this, context(), direction);
- {
- HValue* index = loop.BeginBody(initial, terminating, token);
- HValue* element = AddUncasted<HLoadKeyed>(
- elements, index, nullptr, nullptr, kind, ALLOW_RETURN_HOLE);
- IfBuilder if_issame(this);
- if_issame.If<HCompareNumericAndBranch>(element, search_element,
- Token::EQ_STRICT);
- if_issame.Then();
- {
- Drop(1);
- Push(index);
- loop.Break();
- }
- if_issame.End();
- }
- loop.EndBody();
- } else {
- IfBuilder if_isstring(this);
- if_isstring.If<HIsStringAndBranch>(search_element);
- if_isstring.Then();
- {
- LoopBuilder loop(this, context(), direction);
- {
- HValue* index = loop.BeginBody(initial, terminating, token);
- HValue* element = AddUncasted<HLoadKeyed>(
- elements, index, nullptr, nullptr, kind, ALLOW_RETURN_HOLE);
- IfBuilder if_issame(this);
- if_issame.If<HIsStringAndBranch>(element);
- if_issame.AndIf<HStringCompareAndBranch>(
- element, search_element, Token::EQ_STRICT);
- if_issame.Then();
- {
- Drop(1);
- Push(index);
- loop.Break();
- }
- if_issame.End();
- }
- loop.EndBody();
- }
- if_isstring.Else();
- {
- IfBuilder if_isnumber(this);
- if_isnumber.If<HIsSmiAndBranch>(search_element);
- if_isnumber.OrIf<HCompareMap>(
- search_element, isolate()->factory()->heap_number_map());
- if_isnumber.Then();
- {
- HValue* search_number =
- AddUncasted<HForceRepresentation>(search_element,
- Representation::Double());
- LoopBuilder loop(this, context(), direction);
- {
- HValue* index = loop.BeginBody(initial, terminating, token);
- HValue* element = AddUncasted<HLoadKeyed>(
- elements, index, nullptr, nullptr, kind, ALLOW_RETURN_HOLE);
-
- IfBuilder if_element_isnumber(this);
- if_element_isnumber.If<HIsSmiAndBranch>(element);
- if_element_isnumber.OrIf<HCompareMap>(
- element, isolate()->factory()->heap_number_map());
- if_element_isnumber.Then();
- {
- HValue* number =
- AddUncasted<HForceRepresentation>(element,
- Representation::Double());
- IfBuilder if_issame(this);
- if_issame.If<HCompareNumericAndBranch>(
- number, search_number, Token::EQ_STRICT);
- if_issame.Then();
- {
- Drop(1);
- Push(index);
- loop.Break();
- }
- if_issame.End();
- }
- if_element_isnumber.End();
- }
- loop.EndBody();
- }
- if_isnumber.Else();
- {
- LoopBuilder loop(this, context(), direction);
- {
- HValue* index = loop.BeginBody(initial, terminating, token);
- HValue* element = AddUncasted<HLoadKeyed>(
- elements, index, nullptr, nullptr, kind, ALLOW_RETURN_HOLE);
- IfBuilder if_issame(this);
- if_issame.If<HCompareObjectEqAndBranch>(
- element, search_element);
- if_issame.Then();
- {
- Drop(1);
- Push(index);
- loop.Break();
- }
- if_issame.End();
- }
- loop.EndBody();
- }
- if_isnumber.End();
- }
- if_isstring.End();
- }
-
- return Pop();
-}
-
-template <class T>
-bool HOptimizedGraphBuilder::TryHandleArrayCall(T* expr, HValue* function) {
- if (!array_function().is_identical_to(expr->target())) {
- return false;
- }
-
- Handle<AllocationSite> site = expr->allocation_site();
- if (site.is_null()) return false;
-
- Add<HCheckValue>(function, array_function());
-
- int arguments_count = expr->arguments()->length();
- if (TryInlineArrayCall(expr, arguments_count, site)) return true;
-
- HInstruction* call = PreProcessCall(New<HCallNewArray>(
- function, arguments_count + 1, site->GetElementsKind(), site));
- if (expr->IsCall()) Drop(1);
- ast_context()->ReturnInstruction(call, expr->id());
-
- return true;
-}
-
-
-bool HOptimizedGraphBuilder::CanBeFunctionApplyArguments(Call* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- if (args->length() != 2) return false;
- VariableProxy* arg_two = args->at(1)->AsVariableProxy();
- if (arg_two == NULL || !arg_two->var()->IsStackAllocated()) return false;
- HValue* arg_two_value = environment()->Lookup(arg_two->var());
- if (!arg_two_value->CheckFlag(HValue::kIsArguments)) return false;
- DCHECK_NOT_NULL(current_info()->scope()->arguments());
- return true;
-}
-
-
-void HOptimizedGraphBuilder::VisitCall(Call* expr) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
- if (!is_tracking_positions()) SetSourcePosition(expr->position());
- Expression* callee = expr->expression();
- int argument_count = expr->arguments()->length() + 1; // Plus receiver.
- HInstruction* call = NULL;
-
- TailCallMode syntactic_tail_call_mode = expr->tail_call_mode();
- TailCallMode tail_call_mode =
- function_state()->ComputeTailCallMode(syntactic_tail_call_mode);
-
- Property* prop = callee->AsProperty();
- if (prop != NULL) {
- CHECK_ALIVE(VisitForValue(prop->obj()));
- HValue* receiver = Top();
-
- SmallMapList* maps;
- ComputeReceiverTypes(expr, receiver, &maps, this);
-
- if (prop->key()->IsPropertyName() && maps->length() > 0) {
- Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
- PropertyAccessInfo info(this, LOAD, maps->first(), name);
- if (!info.CanAccessAsMonomorphic(maps)) {
- HandlePolymorphicCallNamed(expr, receiver, maps, name);
- return;
- }
- }
- HValue* key = NULL;
- if (!prop->key()->IsPropertyName()) {
- CHECK_ALIVE(VisitForValue(prop->key()));
- key = Pop();
- }
-
- CHECK_ALIVE(PushLoad(prop, receiver, key));
- HValue* function = Pop();
-
- if (function->IsConstant() &&
- HConstant::cast(function)->handle(isolate())->IsJSFunction()) {
- // Push the function under the receiver.
- environment()->SetExpressionStackAt(0, function);
- Push(receiver);
-
- Handle<JSFunction> known_function = Handle<JSFunction>::cast(
- HConstant::cast(function)->handle(isolate()));
- expr->set_target(known_function);
-
- if (TryIndirectCall(expr)) return;
- CHECK_ALIVE(VisitExpressions(expr->arguments()));
-
- Handle<Map> map = maps->length() == 1 ? maps->first() : Handle<Map>();
- if (TryInlineBuiltinMethodCall(known_function, map, expr->id(),
- expr->arguments()->length())) {
- if (FLAG_trace_inlining) {
- PrintF("Inlining builtin ");
- known_function->ShortPrint();
- PrintF("\n");
- }
- return;
- }
- if (TryInlineApiMethodCall(expr, receiver, maps)) return;
-
- // Wrap the receiver if necessary.
- if (NeedsWrapping(maps->first(), known_function)) {
- // Since HWrapReceiver currently cannot actually wrap numbers and
- // strings, use the regular call builtin for method calls to wrap
- // the receiver.
- // TODO(verwaest): Support creation of value wrappers directly in
- // HWrapReceiver.
- call = NewCallFunction(
- function, argument_count, syntactic_tail_call_mode,
- ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode);
- } else if (TryInlineCall(expr)) {
- return;
- } else {
- call =
- NewCallConstantFunction(known_function, argument_count,
- syntactic_tail_call_mode, tail_call_mode);
- }
-
- } else {
- ArgumentsAllowedFlag arguments_flag = ARGUMENTS_NOT_ALLOWED;
- if (CanBeFunctionApplyArguments(expr) && expr->is_uninitialized()) {
- // We have to use EAGER deoptimization here because Deoptimizer::SOFT
- // gets ignored by the always-opt flag, which leads to incorrect code.
- Add<HDeoptimize>(
- DeoptimizeReason::kInsufficientTypeFeedbackForCallWithArguments,
- Deoptimizer::EAGER);
- arguments_flag = ARGUMENTS_FAKED;
- }
-
- // Push the function under the receiver.
- environment()->SetExpressionStackAt(0, function);
- Push(receiver);
-
- CHECK_ALIVE(VisitExpressions(expr->arguments(), arguments_flag));
- call = NewCallFunction(function, argument_count, syntactic_tail_call_mode,
- ConvertReceiverMode::kNotNullOrUndefined,
- tail_call_mode);
- }
- PushArgumentsFromEnvironment(argument_count);
-
- } else {
- if (expr->is_possibly_eval()) {
- return Bailout(kPossibleDirectCallToEval);
- }
-
- // The function is on the stack in the unoptimized code during
- // evaluation of the arguments.
- CHECK_ALIVE(VisitForValue(expr->expression()));
- HValue* function = Top();
- if (function->IsConstant() &&
- HConstant::cast(function)->handle(isolate())->IsJSFunction()) {
- Handle<Object> constant = HConstant::cast(function)->handle(isolate());
- Handle<JSFunction> target = Handle<JSFunction>::cast(constant);
- expr->SetKnownGlobalTarget(target);
- }
-
- // Placeholder for the receiver.
- Push(graph()->GetConstantUndefined());
- CHECK_ALIVE(VisitExpressions(expr->arguments()));
-
- if (expr->IsMonomorphic() &&
- !IsClassConstructor(expr->target()->shared()->kind())) {
- Add<HCheckValue>(function, expr->target());
-
- // Patch the global object on the stack by the expected receiver.
- HValue* receiver = ImplicitReceiverFor(function, expr->target());
- const int receiver_index = argument_count - 1;
- environment()->SetExpressionStackAt(receiver_index, receiver);
-
- if (TryInlineBuiltinFunctionCall(expr)) {
- if (FLAG_trace_inlining) {
- PrintF("Inlining builtin ");
- expr->target()->ShortPrint();
- PrintF("\n");
- }
- return;
- }
- if (TryInlineApiFunctionCall(expr, receiver)) return;
- if (TryHandleArrayCall(expr, function)) return;
- if (TryInlineCall(expr)) return;
-
- PushArgumentsFromEnvironment(argument_count);
- call = NewCallConstantFunction(expr->target(), argument_count,
- syntactic_tail_call_mode, tail_call_mode);
- } else {
- PushArgumentsFromEnvironment(argument_count);
- if (expr->is_uninitialized()) {
- // We've never seen this call before, so let's have Crankshaft learn
- // through the type vector.
- call = NewCallFunctionViaIC(function, argument_count,
- syntactic_tail_call_mode,
- ConvertReceiverMode::kNullOrUndefined,
- tail_call_mode, expr->CallFeedbackICSlot());
- } else {
- call = NewCallFunction(
- function, argument_count, syntactic_tail_call_mode,
- ConvertReceiverMode::kNullOrUndefined, tail_call_mode);
- }
- }
- }
-
- Drop(1); // Drop the function.
- return ast_context()->ReturnInstruction(call, expr->id());
-}
-
-bool HOptimizedGraphBuilder::TryInlineArrayCall(Expression* expression,
- int argument_count,
- Handle<AllocationSite> site) {
- Handle<JSFunction> caller = current_info()->closure();
- Handle<JSFunction> target = array_function();
-
- if (!site->CanInlineCall()) {
- TraceInline(target, caller, "AllocationSite requested no inlining.");
- return false;
- }
-
- if (argument_count > 1) {
- TraceInline(target, caller, "Too many arguments to inline.");
- return false;
- }
-
- int array_length = 0;
- // Do not inline if the constant length argument is not a smi or outside the
- // valid range for unrolled loop initialization.
- if (argument_count == 1) {
- HValue* argument = Top();
- if (!argument->IsConstant()) {
- TraceInline(target, caller,
- "Dont inline [new] Array(n) where n isn't constant.");
- return false;
- }
-
- HConstant* constant_argument = HConstant::cast(argument);
- if (!constant_argument->HasSmiValue()) {
- TraceInline(target, caller,
- "Constant length outside of valid inlining range.");
- return false;
- }
- array_length = constant_argument->Integer32Value();
- if (array_length < 0 || array_length > kElementLoopUnrollThreshold) {
- TraceInline(target, caller,
- "Constant length outside of valid inlining range.");
- return false;
- }
- }
-
- TraceInline(target, caller, NULL);
-
- NoObservableSideEffectsScope no_effects(this);
-
- // Register on the site for deoptimization if the transition feedback changes.
- top_info()->dependencies()->AssumeTransitionStable(site);
-
- // Build the array.
- ElementsKind kind = site->GetElementsKind();
- HValue* capacity;
- HValue* length;
- if (array_length == 0) {
- STATIC_ASSERT(0 < JSArray::kPreallocatedArrayElements);
- const int initial_capacity = JSArray::kPreallocatedArrayElements;
- capacity = Add<HConstant>(initial_capacity);
- length = graph()->GetConstant0();
- } else {
- length = Top();
- capacity = length;
- kind = GetHoleyElementsKind(kind);
- }
-
- // These HForceRepresentations are because we store these as fields in the
- // objects we construct, and an int32-to-smi HChange could deopt. Accept
- // the deopt possibility now, before allocation occurs.
- length = AddUncasted<HForceRepresentation>(length, Representation::Smi());
- capacity = AddUncasted<HForceRepresentation>(capacity, Representation::Smi());
-
- // Generate size calculation code here in order to make it dominate
- // the JSArray allocation.
- HValue* elements_size = BuildCalculateElementsSize(kind, capacity);
-
- // Bail out for large objects.
- HValue* max_size = Add<HConstant>(kMaxRegularHeapObjectSize);
- Add<HBoundsCheck>(elements_size, max_size);
-
- // Allocate (dealing with failure appropriately).
- AllocationSiteMode mode = DONT_TRACK_ALLOCATION_SITE;
- HAllocate* new_object = AllocateJSArrayObject(mode);
-
- // Fill in the fields: map, properties, length.
- Handle<Map> map_constant(isolate()->get_initial_js_array_map(kind));
- HValue* map = Add<HConstant>(map_constant);
-
- BuildJSArrayHeader(new_object, map,
- nullptr, // set elements to empty fixed array
- mode, kind, nullptr, length);
-
- // Allocate and initialize the elements.
- HAllocate* elements = BuildAllocateElements(kind, elements_size);
- BuildInitializeElementsHeader(elements, kind, capacity);
- BuildFillElementsWithHole(elements, kind, graph()->GetConstant0(), capacity);
-
- // Set the elements.
- Add<HStoreNamedField>(new_object, HObjectAccess::ForElementsPointer(),
- elements);
-
- int args_to_drop = argument_count + (expression->IsCall() ? 2 : 1);
- Drop(args_to_drop);
- ast_context()->ReturnValue(new_object);
- return true;
-}
-
-
-// Checks whether allocation using the given constructor can be inlined.
-static bool IsAllocationInlineable(Handle<JSFunction> constructor) {
- return constructor->has_initial_map() &&
- !IsDerivedConstructor(constructor->shared()->kind()) &&
- !constructor->initial_map()->is_dictionary_map() &&
- constructor->initial_map()->instance_type() == JS_OBJECT_TYPE &&
- constructor->initial_map()->instance_size() <
- HAllocate::kMaxInlineSize;
-}
-
-void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
- if (!is_tracking_positions()) SetSourcePosition(expr->position());
- int argument_count = expr->arguments()->length() + 1; // Plus constructor.
- Factory* factory = isolate()->factory();
-
- // The constructor function is on the stack in the unoptimized code
- // during evaluation of the arguments.
- CHECK_ALIVE(VisitForValue(expr->expression()));
- HValue* function = Top();
- CHECK_ALIVE(VisitExpressions(expr->arguments()));
-
- if (function->IsConstant() &&
- HConstant::cast(function)->handle(isolate())->IsJSFunction()) {
- Handle<Object> constant = HConstant::cast(function)->handle(isolate());
- expr->SetKnownGlobalTarget(Handle<JSFunction>::cast(constant));
- }
-
- if (FLAG_inline_construct &&
- expr->IsMonomorphic() &&
- IsAllocationInlineable(expr->target())) {
- Handle<JSFunction> constructor = expr->target();
- DCHECK(constructor->shared()->construct_stub() ==
- isolate()->builtins()->builtin(
- Builtins::kJSConstructStubGenericRestrictedReturn) ||
- constructor->shared()->construct_stub() ==
- isolate()->builtins()->builtin(
- Builtins::kJSConstructStubGenericUnrestrictedReturn) ||
- constructor->shared()->construct_stub() ==
- isolate()->builtins()->builtin(Builtins::kJSConstructStubApi));
- HValue* check = Add<HCheckValue>(function, constructor);
-
- // Force completion of inobject slack tracking before generating
- // allocation code to finalize instance size.
- constructor->CompleteInobjectSlackTrackingIfActive();
-
- // Calculate instance size from initial map of constructor.
- DCHECK(constructor->has_initial_map());
- Handle<Map> initial_map(constructor->initial_map());
- int instance_size = initial_map->instance_size();
-
- // Allocate an instance of the implicit receiver object.
- HValue* size_in_bytes = Add<HConstant>(instance_size);
- HAllocationMode allocation_mode;
- HAllocate* receiver = BuildAllocate(
- size_in_bytes, HType::JSObject(), JS_OBJECT_TYPE, allocation_mode);
- receiver->set_known_initial_map(initial_map);
-
- // Initialize map and fields of the newly allocated object.
- { NoObservableSideEffectsScope no_effects(this);
- DCHECK(initial_map->instance_type() == JS_OBJECT_TYPE);
- Add<HStoreNamedField>(receiver,
- HObjectAccess::ForMapAndOffset(initial_map, JSObject::kMapOffset),
- Add<HConstant>(initial_map));
- HValue* empty_fixed_array = Add<HConstant>(factory->empty_fixed_array());
- Add<HStoreNamedField>(receiver,
- HObjectAccess::ForMapAndOffset(initial_map,
- JSObject::kPropertiesOffset),
- empty_fixed_array);
- Add<HStoreNamedField>(receiver,
- HObjectAccess::ForMapAndOffset(initial_map,
- JSObject::kElementsOffset),
- empty_fixed_array);
- BuildInitializeInobjectProperties(receiver, initial_map);
- }
-
- // Replace the constructor function with a newly allocated receiver using
- // the index of the receiver from the top of the expression stack.
- const int receiver_index = argument_count - 1;
- DCHECK(environment()->ExpressionStackAt(receiver_index) == function);
- environment()->SetExpressionStackAt(receiver_index, receiver);
-
- if (TryInlineConstruct(expr, receiver)) {
- // Inlining worked, add a dependency on the initial map to make sure that
- // this code is deoptimized whenever the initial map of the constructor
- // changes.
- top_info()->dependencies()->AssumeInitialMapCantChange(initial_map);
- return;
- }
-
- // TODO(mstarzinger): For now we remove the previous HAllocate and all
- // corresponding instructions and instead add HPushArguments for the
- // arguments in case inlining failed. What we actually should do is for
- // inlining to try to build a subgraph without mutating the parent graph.
- HInstruction* instr = current_block()->last();
- do {
- HInstruction* prev_instr = instr->previous();
- instr->DeleteAndReplaceWith(NULL);
- instr = prev_instr;
- } while (instr != check);
- environment()->SetExpressionStackAt(receiver_index, function);
- } else {
- // The constructor function is both an operand to the instruction and an
- // argument to the construct call.
- if (TryHandleArrayCall(expr, function)) return;
- }
-
- HValue* arity = Add<HConstant>(argument_count - 1);
- HValue* op_vals[] = {function, function, arity};
- Callable callable = CodeFactory::Construct(isolate());
- HConstant* stub = Add<HConstant>(callable.code());
- PushArgumentsFromEnvironment(argument_count);
- HInstruction* construct = New<HCallWithDescriptor>(
- stub, argument_count, callable.descriptor(), ArrayVector(op_vals));
- return ast_context()->ReturnInstruction(construct, expr->id());
-}
-
-
-void HOptimizedGraphBuilder::BuildInitializeInobjectProperties(
- HValue* receiver, Handle<Map> initial_map) {
- if (initial_map->GetInObjectProperties() != 0) {
- HConstant* undefined = graph()->GetConstantUndefined();
- for (int i = 0; i < initial_map->GetInObjectProperties(); i++) {
- int property_offset = initial_map->GetInObjectPropertyOffset(i);
- Add<HStoreNamedField>(receiver, HObjectAccess::ForMapAndOffset(
- initial_map, property_offset),
- undefined);
- }
- }
-}
-
-void HOptimizedGraphBuilder::GenerateMaxSmi(CallRuntime* expr) {
- DCHECK(expr->arguments()->length() == 0);
- HConstant* max_smi = New<HConstant>(static_cast<int32_t>(Smi::kMaxValue));
- return ast_context()->ReturnInstruction(max_smi, expr->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateTypedArrayMaxSizeInHeap(
- CallRuntime* expr) {
- DCHECK(expr->arguments()->length() == 0);
- HConstant* result = New<HConstant>(static_cast<int32_t>(
- FLAG_typed_array_max_size_in_heap));
- return ast_context()->ReturnInstruction(result, expr->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateArrayBufferGetByteLength(
- CallRuntime* expr) {
- DCHECK(expr->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(expr->arguments()->at(0)));
- HValue* buffer = Pop();
- HInstruction* result = New<HLoadNamedField>(
- buffer, nullptr, HObjectAccess::ForJSArrayBufferByteLength());
- return ast_context()->ReturnInstruction(result, expr->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateArrayBufferViewGetByteLength(
- CallRuntime* expr) {
- NoObservableSideEffectsScope scope(this);
- DCHECK(expr->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(expr->arguments()->at(0)));
- HValue* view = Pop();
-
- return ast_context()->ReturnValue(BuildArrayBufferViewFieldAccessor(
- view, nullptr,
- FieldIndex::ForInObjectOffset(JSArrayBufferView::kByteLengthOffset)));
-}
-
-
-void HOptimizedGraphBuilder::GenerateArrayBufferViewGetByteOffset(
- CallRuntime* expr) {
- NoObservableSideEffectsScope scope(this);
- DCHECK(expr->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(expr->arguments()->at(0)));
- HValue* view = Pop();
-
- return ast_context()->ReturnValue(BuildArrayBufferViewFieldAccessor(
- view, nullptr,
- FieldIndex::ForInObjectOffset(JSArrayBufferView::kByteOffsetOffset)));
-}
-
-void HOptimizedGraphBuilder::GenerateArrayBufferViewWasNeutered(
- CallRuntime* expr) {
- NoObservableSideEffectsScope scope(this);
- DCHECK_EQ(expr->arguments()->length(), 1);
- CHECK_ALIVE(VisitForValue(expr->arguments()->at(0)));
- HValue* view = Pop();
-
- HInstruction* buffer = Add<HLoadNamedField>(
- view, nullptr, HObjectAccess::ForJSArrayBufferViewBuffer());
- HInstruction* flags = Add<HLoadNamedField>(
- buffer, nullptr, HObjectAccess::ForJSArrayBufferBitField());
- HValue* was_neutered_mask =
- Add<HConstant>(1 << JSArrayBuffer::WasNeutered::kShift);
- HValue* was_neutered =
- AddUncasted<HBitwise>(Token::BIT_AND, flags, was_neutered_mask);
- return ast_context()->ReturnValue(was_neutered);
-}
-
-void HOptimizedGraphBuilder::GenerateTypedArrayGetLength(
- CallRuntime* expr) {
- NoObservableSideEffectsScope scope(this);
- DCHECK(expr->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(expr->arguments()->at(0)));
- HValue* view = Pop();
-
- return ast_context()->ReturnValue(BuildArrayBufferViewFieldAccessor(
- view, nullptr,
- FieldIndex::ForInObjectOffset(JSTypedArray::kLengthOffset)));
-}
-
-
-void HOptimizedGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
- if (expr->is_jsruntime()) {
- // Crankshaft always specializes to the native context, so we can just grab
- // the constant function from the current native context and embed that into
- // the code object.
- Handle<JSFunction> known_function(
- JSFunction::cast(
- current_info()->native_context()->get(expr->context_index())),
- isolate());
-
- // The callee and the receiver both have to be pushed onto the operand stack
- // before arguments are being evaluated.
- HConstant* function = Add<HConstant>(known_function);
- HValue* receiver = ImplicitReceiverFor(function, known_function);
- Push(function);
- Push(receiver);
-
- int argument_count = expr->arguments()->length() + 1; // Count receiver.
- CHECK_ALIVE(VisitExpressions(expr->arguments()));
- PushArgumentsFromEnvironment(argument_count);
- HInstruction* call = NewCallConstantFunction(known_function, argument_count,
- TailCallMode::kDisallow,
- TailCallMode::kDisallow);
- Drop(1); // Function
- return ast_context()->ReturnInstruction(call, expr->id());
- }
-
- const Runtime::Function* function = expr->function();
- DCHECK(function != NULL);
- switch (function->function_id) {
-#define CALL_INTRINSIC_GENERATOR(Name) \
- case Runtime::kInline##Name: \
- return Generate##Name(expr);
-
- FOR_EACH_HYDROGEN_INTRINSIC(CALL_INTRINSIC_GENERATOR)
-#undef CALL_INTRINSIC_GENERATOR
- default: {
- int argument_count = expr->arguments()->length();
- CHECK_ALIVE(VisitExpressions(expr->arguments()));
- PushArgumentsFromEnvironment(argument_count);
- HCallRuntime* call = New<HCallRuntime>(function, argument_count);
- return ast_context()->ReturnInstruction(call, expr->id());
- }
- }
-}
-
-
-void HOptimizedGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
- switch (expr->op()) {
- case Token::DELETE: return VisitDelete(expr);
- case Token::VOID: return VisitVoid(expr);
- case Token::TYPEOF: return VisitTypeof(expr);
- case Token::NOT: return VisitNot(expr);
- default: UNREACHABLE();
- }
-}
-
-
-void HOptimizedGraphBuilder::VisitDelete(UnaryOperation* expr) {
- Property* prop = expr->expression()->AsProperty();
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
- if (prop != NULL) {
- CHECK_ALIVE(VisitForValue(prop->obj()));
- CHECK_ALIVE(VisitForValue(prop->key()));
- HValue* key = Pop();
- HValue* obj = Pop();
- HValue* language_mode = Add<HConstant>(
- static_cast<int32_t>(function_language_mode()), Representation::Smi());
- Add<HPushArguments>(obj, key, language_mode);
- HInstruction* instr =
- New<HCallRuntime>(Runtime::FunctionForId(Runtime::kDeleteProperty), 3);
- return ast_context()->ReturnInstruction(instr, expr->id());
- } else if (proxy != NULL) {
- Variable* var = proxy->var();
- if (var->IsUnallocated()) {
- Bailout(kDeleteWithGlobalVariable);
- } else if (var->IsStackAllocated() || var->IsContextSlot()) {
- // Result of deleting non-global variables is false. 'this' is not really
- // a variable, though we implement it as one. The subexpression does not
- // have side effects.
- HValue* value = var->is_this() ? graph()->GetConstantTrue()
- : graph()->GetConstantFalse();
- return ast_context()->ReturnValue(value);
- } else {
- Bailout(kDeleteWithNonGlobalVariable);
- }
- } else {
- // Result of deleting non-property, non-variable reference is true.
- // Evaluate the subexpression for side effects.
- CHECK_ALIVE(VisitForEffect(expr->expression()));
- return ast_context()->ReturnValue(graph()->GetConstantTrue());
- }
-}
-
-
-void HOptimizedGraphBuilder::VisitVoid(UnaryOperation* expr) {
- CHECK_ALIVE(VisitForEffect(expr->expression()));
- return ast_context()->ReturnValue(graph()->GetConstantUndefined());
-}
-
-
-void HOptimizedGraphBuilder::VisitTypeof(UnaryOperation* expr) {
- CHECK_ALIVE(VisitForTypeOf(expr->expression()));
- HValue* value = Pop();
- HInstruction* instr = New<HTypeof>(value);
- return ast_context()->ReturnInstruction(instr, expr->id());
-}
-
-
-void HOptimizedGraphBuilder::VisitNot(UnaryOperation* expr) {
- if (ast_context()->IsTest()) {
- TestContext* context = TestContext::cast(ast_context());
- VisitForControl(expr->expression(),
- context->if_false(),
- context->if_true());
- return;
- }
-
- if (ast_context()->IsEffect()) {
- VisitForEffect(expr->expression());
- return;
- }
-
- DCHECK(ast_context()->IsValue());
- HBasicBlock* materialize_false = graph()->CreateBasicBlock();
- HBasicBlock* materialize_true = graph()->CreateBasicBlock();
- CHECK_BAILOUT(VisitForControl(expr->expression(),
- materialize_false,
- materialize_true));
-
- if (materialize_false->HasPredecessor()) {
- materialize_false->SetJoinId(expr->MaterializeFalseId());
- set_current_block(materialize_false);
- Push(graph()->GetConstantFalse());
- } else {
- materialize_false = NULL;
- }
-
- if (materialize_true->HasPredecessor()) {
- materialize_true->SetJoinId(expr->MaterializeTrueId());
- set_current_block(materialize_true);
- Push(graph()->GetConstantTrue());
- } else {
- materialize_true = NULL;
- }
-
- HBasicBlock* join =
- CreateJoin(materialize_false, materialize_true, expr->id());
- set_current_block(join);
- if (join != NULL) return ast_context()->ReturnValue(Pop());
-}
-
-static Representation RepresentationFor(AstType* type) {
- DisallowHeapAllocation no_allocation;
- if (type->Is(AstType::None())) return Representation::None();
- if (type->Is(AstType::SignedSmall())) return Representation::Smi();
- if (type->Is(AstType::Signed32())) return Representation::Integer32();
- if (type->Is(AstType::Number())) return Representation::Double();
- return Representation::Tagged();
-}
-
-HInstruction* HOptimizedGraphBuilder::BuildIncrement(CountOperation* expr) {
- // The input to the count operation is on top of the expression stack.
- Representation rep = RepresentationFor(expr->type());
- if (rep.IsNone() || rep.IsTagged()) {
- rep = Representation::Smi();
- }
-
- // We need an explicit HValue representing ToNumber(input). The
- // actual HChange instruction we need is (sometimes) added in a later
- // phase, so it is not available now to be used as an input to HAdd and
- // as the return value.
- HInstruction* number_input = AddUncasted<HForceRepresentation>(Pop(), rep);
- if (!rep.IsDouble()) {
- number_input->SetFlag(HInstruction::kFlexibleRepresentation);
- number_input->SetFlag(HInstruction::kCannotBeTagged);
- }
- Push(number_input);
-
- // The addition has no side effects, so we do not need
- // to simulate the expression stack after this instruction.
- // Any later failures deopt to the load of the input or earlier.
- HConstant* delta = (expr->op() == Token::INC)
- ? graph()->GetConstant1()
- : graph()->GetConstantMinus1();
- HInstruction* instr = AddUncasted<HAdd>(Top(), delta);
- if (instr->IsAdd()) {
- HAdd* add = HAdd::cast(instr);
- add->set_observed_input_representation(1, rep);
- add->set_observed_input_representation(2, Representation::Smi());
- }
- instr->ClearAllSideEffects();
- instr->SetFlag(HInstruction::kCannotBeTagged);
- return instr;
-}
-
-void HOptimizedGraphBuilder::BuildStoreForEffect(
- Expression* expr, Property* prop, FeedbackSlot slot, BailoutId ast_id,
- BailoutId return_id, HValue* object, HValue* key, HValue* value) {
- EffectContext for_effect(this);
- Push(object);
- if (key != NULL) Push(key);
- Push(value);
- BuildStore(expr, prop, slot, ast_id, return_id);
-}
-
-
-void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
- if (!is_tracking_positions()) SetSourcePosition(expr->position());
- Expression* target = expr->expression();
- VariableProxy* proxy = target->AsVariableProxy();
- Property* prop = target->AsProperty();
- if (proxy == NULL && prop == NULL) {
- return Bailout(kInvalidLhsInCountOperation);
- }
-
- // Match the full code generator stack by simulating an extra stack
- // element for postfix operations in a non-effect context. The return
- // value is ToNumber(input).
- bool returns_original_input =
- expr->is_postfix() && !ast_context()->IsEffect();
- HValue* input = NULL; // ToNumber(original_input).
- HValue* after = NULL; // The result after incrementing or decrementing.
-
- if (proxy != NULL) {
- Variable* var = proxy->var();
- if (var->mode() == CONST) {
- return Bailout(kNonInitializerAssignmentToConst);
- }
- // Argument of the count operation is a variable, not a property.
- DCHECK(prop == NULL);
- CHECK_ALIVE(VisitForValue(target));
-
- after = BuildIncrement(expr);
- input = returns_original_input ? Top() : Pop();
- Push(after);
-
- switch (var->location()) {
- case VariableLocation::UNALLOCATED:
- HandleGlobalVariableAssignment(var, after, expr->CountSlot(),
- expr->AssignmentId());
- break;
-
- case VariableLocation::PARAMETER:
- case VariableLocation::LOCAL:
- BindIfLive(var, after);
- break;
-
- case VariableLocation::CONTEXT: {
- HValue* context = BuildContextChainWalk(var);
- HStoreContextSlot::Mode mode = IsLexicalVariableMode(var->mode())
- ? HStoreContextSlot::kCheckDeoptimize : HStoreContextSlot::kNoCheck;
- HStoreContextSlot* instr = Add<HStoreContextSlot>(context, var->index(),
- mode, after);
- if (instr->HasObservableSideEffects()) {
- Add<HSimulate>(expr->AssignmentId(), REMOVABLE_SIMULATE);
- }
- break;
- }
-
- case VariableLocation::LOOKUP:
- return Bailout(kLookupVariableInCountOperation);
-
- case VariableLocation::MODULE:
- UNREACHABLE();
- }
-
- Drop(returns_original_input ? 2 : 1);
- return ast_context()->ReturnValue(expr->is_postfix() ? input : after);
- }
-
- // Argument of the count operation is a property.
- DCHECK(prop != NULL);
- if (returns_original_input) Push(graph()->GetConstantUndefined());
-
- CHECK_ALIVE(VisitForValue(prop->obj()));
- HValue* object = Top();
-
- HValue* key = NULL;
- if (!prop->key()->IsPropertyName() || prop->IsStringAccess()) {
- CHECK_ALIVE(VisitForValue(prop->key()));
- key = Top();
- }
-
- CHECK_ALIVE(PushLoad(prop, object, key));
-
- after = BuildIncrement(expr);
-
- if (returns_original_input) {
- input = Pop();
- // Drop object and key to push it again in the effect context below.
- Drop(key == NULL ? 1 : 2);
- environment()->SetExpressionStackAt(0, input);
- CHECK_ALIVE(BuildStoreForEffect(expr, prop, expr->CountSlot(), expr->id(),
- expr->AssignmentId(), object, key, after));
- return ast_context()->ReturnValue(Pop());
- }
-
- environment()->SetExpressionStackAt(0, after);
- return BuildStore(expr, prop, expr->CountSlot(), expr->id(),
- expr->AssignmentId());
-}
-
-
-HInstruction* HOptimizedGraphBuilder::BuildStringCharCodeAt(
- HValue* string,
- HValue* index) {
- if (string->IsConstant() && index->IsConstant()) {
- HConstant* c_string = HConstant::cast(string);
- HConstant* c_index = HConstant::cast(index);
- if (c_string->HasStringValue() && c_index->HasNumberValue()) {
- int32_t i = c_index->NumberValueAsInteger32();
- Handle<String> s = c_string->StringValue();
- if (i < 0 || i >= s->length()) {
- return New<HConstant>(std::numeric_limits<double>::quiet_NaN());
- }
- return New<HConstant>(s->Get(i));
- }
- }
- string = BuildCheckString(string);
- index = Add<HBoundsCheck>(index, AddLoadStringLength(string));
- return New<HStringCharCodeAt>(string, index);
-}
-
-
-// Checks if the given shift amounts have following forms:
-// (N1) and (N2) with N1 + N2 = 32; (sa) and (32 - sa).
-static bool ShiftAmountsAllowReplaceByRotate(HValue* sa,
- HValue* const32_minus_sa) {
- if (sa->IsConstant() && const32_minus_sa->IsConstant()) {
- const HConstant* c1 = HConstant::cast(sa);
- const HConstant* c2 = HConstant::cast(const32_minus_sa);
- return c1->HasInteger32Value() && c2->HasInteger32Value() &&
- (c1->Integer32Value() + c2->Integer32Value() == 32);
- }
- if (!const32_minus_sa->IsSub()) return false;
- HSub* sub = HSub::cast(const32_minus_sa);
- return sub->left()->EqualsInteger32Constant(32) && sub->right() == sa;
-}
-
-
-// Checks if the left and the right are shift instructions with the oposite
-// directions that can be replaced by one rotate right instruction or not.
-// Returns the operand and the shift amount for the rotate instruction in the
-// former case.
-bool HGraphBuilder::MatchRotateRight(HValue* left,
- HValue* right,
- HValue** operand,
- HValue** shift_amount) {
- HShl* shl;
- HShr* shr;
- if (left->IsShl() && right->IsShr()) {
- shl = HShl::cast(left);
- shr = HShr::cast(right);
- } else if (left->IsShr() && right->IsShl()) {
- shl = HShl::cast(right);
- shr = HShr::cast(left);
- } else {
- return false;
- }
- if (shl->left() != shr->left()) return false;
-
- if (!ShiftAmountsAllowReplaceByRotate(shl->right(), shr->right()) &&
- !ShiftAmountsAllowReplaceByRotate(shr->right(), shl->right())) {
- return false;
- }
- *operand = shr->left();
- *shift_amount = shr->right();
- return true;
-}
-
-
-bool CanBeZero(HValue* right) {
- if (right->IsConstant()) {
- HConstant* right_const = HConstant::cast(right);
- if (right_const->HasInteger32Value() &&
- (right_const->Integer32Value() & 0x1f) != 0) {
- return false;
- }
- }
- return true;
-}
-
-HValue* HGraphBuilder::EnforceNumberType(HValue* number, AstType* expected) {
- if (expected->Is(AstType::SignedSmall())) {
- return AddUncasted<HForceRepresentation>(number, Representation::Smi());
- }
- if (expected->Is(AstType::Signed32())) {
- return AddUncasted<HForceRepresentation>(number,
- Representation::Integer32());
- }
- return number;
-}
-
-HValue* HGraphBuilder::TruncateToNumber(HValue* value, AstType** expected) {
- if (value->IsConstant()) {
- HConstant* constant = HConstant::cast(value);
- Maybe<HConstant*> number =
- constant->CopyToTruncatedNumber(isolate(), zone());
- if (number.IsJust()) {
- *expected = AstType::Number();
- return AddInstruction(number.FromJust());
- }
- }
-
- // We put temporary values on the stack, which don't correspond to anything
- // in baseline code. Since nothing is observable we avoid recording those
- // pushes with a NoObservableSideEffectsScope.
- NoObservableSideEffectsScope no_effects(this);
-
- AstType* expected_type = *expected;
-
- // Separate the number type from the rest.
- AstType* expected_obj =
- AstType::Intersect(expected_type, AstType::NonNumber(), zone());
- AstType* expected_number =
- AstType::Intersect(expected_type, AstType::Number(), zone());
-
- // We expect to get a number.
- // (We need to check first, since AstType::None->Is(AstType::Any()) == true.
- if (expected_obj->Is(AstType::None())) {
- DCHECK(!expected_number->Is(AstType::None()));
- return value;
- }
-
- if (expected_obj->Is(AstType::Undefined())) {
- // This is already done by HChange.
- *expected = AstType::Union(expected_number, AstType::Number(), zone());
- return value;
- }
-
- return value;
-}
-
-
-HValue* HOptimizedGraphBuilder::BuildBinaryOperation(
- BinaryOperation* expr,
- HValue* left,
- HValue* right,
- PushBeforeSimulateBehavior push_sim_result) {
- AstType* left_type = bounds_.get(expr->left()).lower;
- AstType* right_type = bounds_.get(expr->right()).lower;
- AstType* result_type = bounds_.get(expr).lower;
- Maybe<int> fixed_right_arg = expr->fixed_right_arg();
- Handle<AllocationSite> allocation_site = expr->allocation_site();
-
- HAllocationMode allocation_mode;
- if (FLAG_allocation_site_pretenuring && !allocation_site.is_null()) {
- allocation_mode = HAllocationMode(allocation_site);
- }
- HValue* result = HGraphBuilder::BuildBinaryOperation(
- expr->op(), left, right, left_type, right_type, result_type,
- fixed_right_arg, allocation_mode, expr->id());
- // Add a simulate after instructions with observable side effects, and
- // after phis, which are the result of BuildBinaryOperation when we
- // inlined some complex subgraph.
- if (result->HasObservableSideEffects() || result->IsPhi()) {
- if (push_sim_result == PUSH_BEFORE_SIMULATE) {
- Push(result);
- Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
- Drop(1);
- } else {
- Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
- }
- }
- return result;
-}
-
-HValue* HGraphBuilder::BuildBinaryOperation(
- Token::Value op, HValue* left, HValue* right, AstType* left_type,
- AstType* right_type, AstType* result_type, Maybe<int> fixed_right_arg,
- HAllocationMode allocation_mode, BailoutId opt_id) {
- bool maybe_string_add = false;
- if (op == Token::ADD) {
- // If we are adding constant string with something for which we don't have
- // a feedback yet, assume that it's also going to be a string and don't
- // generate deopt instructions.
- if (!left_type->IsInhabited() && right->IsConstant() &&
- HConstant::cast(right)->HasStringValue()) {
- left_type = AstType::String();
- }
-
- if (!right_type->IsInhabited() && left->IsConstant() &&
- HConstant::cast(left)->HasStringValue()) {
- right_type = AstType::String();
- }
-
- maybe_string_add = (left_type->Maybe(AstType::String()) ||
- left_type->Maybe(AstType::Receiver()) ||
- right_type->Maybe(AstType::String()) ||
- right_type->Maybe(AstType::Receiver()));
- }
-
- Representation left_rep = RepresentationFor(left_type);
- Representation right_rep = RepresentationFor(right_type);
-
- if (!left_type->IsInhabited()) {
- Add<HDeoptimize>(
- DeoptimizeReason::kInsufficientTypeFeedbackForLHSOfBinaryOperation,
- Deoptimizer::SOFT);
- left_type = AstType::Any();
- left_rep = RepresentationFor(left_type);
- maybe_string_add = op == Token::ADD;
- }
-
- if (!right_type->IsInhabited()) {
- Add<HDeoptimize>(
- DeoptimizeReason::kInsufficientTypeFeedbackForRHSOfBinaryOperation,
- Deoptimizer::SOFT);
- right_type = AstType::Any();
- right_rep = RepresentationFor(right_type);
- maybe_string_add = op == Token::ADD;
- }
-
- if (!maybe_string_add) {
- left = TruncateToNumber(left, &left_type);
- right = TruncateToNumber(right, &right_type);
- }
-
- // Special case for string addition here.
- if (op == Token::ADD &&
- (left_type->Is(AstType::String()) || right_type->Is(AstType::String()))) {
- // Validate type feedback for left argument.
- if (left_type->Is(AstType::String())) {
- left = BuildCheckString(left);
- }
-
- // Validate type feedback for right argument.
- if (right_type->Is(AstType::String())) {
- right = BuildCheckString(right);
- }
-
- // Convert left argument as necessary.
- if (left_type->Is(AstType::Number())) {
- DCHECK(right_type->Is(AstType::String()));
- left = BuildNumberToString(left, left_type);
- } else if (!left_type->Is(AstType::String())) {
- DCHECK(right_type->Is(AstType::String()));
- return AddUncasted<HStringAdd>(
- left, right, allocation_mode.GetPretenureMode(),
- STRING_ADD_CONVERT_LEFT, allocation_mode.feedback_site());
- }
-
- // Convert right argument as necessary.
- if (right_type->Is(AstType::Number())) {
- DCHECK(left_type->Is(AstType::String()));
- right = BuildNumberToString(right, right_type);
- } else if (!right_type->Is(AstType::String())) {
- DCHECK(left_type->Is(AstType::String()));
- return AddUncasted<HStringAdd>(
- left, right, allocation_mode.GetPretenureMode(),
- STRING_ADD_CONVERT_RIGHT, allocation_mode.feedback_site());
- }
-
- // Fast paths for empty constant strings.
- Handle<String> left_string =
- left->IsConstant() && HConstant::cast(left)->HasStringValue()
- ? HConstant::cast(left)->StringValue()
- : Handle<String>();
- Handle<String> right_string =
- right->IsConstant() && HConstant::cast(right)->HasStringValue()
- ? HConstant::cast(right)->StringValue()
- : Handle<String>();
- if (!left_string.is_null() && left_string->length() == 0) return right;
- if (!right_string.is_null() && right_string->length() == 0) return left;
- if (!left_string.is_null() && !right_string.is_null()) {
- return AddUncasted<HStringAdd>(
- left, right, allocation_mode.GetPretenureMode(),
- STRING_ADD_CHECK_NONE, allocation_mode.feedback_site());
- }
-
- // Register the dependent code with the allocation site.
- if (!allocation_mode.feedback_site().is_null()) {
- DCHECK(!graph()->info()->IsStub());
- Handle<AllocationSite> site(allocation_mode.feedback_site());
- top_info()->dependencies()->AssumeTenuringDecision(site);
- }
-
- // Inline the string addition into the stub when creating allocation
- // mementos to gather allocation site feedback, or if we can statically
- // infer that we're going to create a cons string.
- if ((graph()->info()->IsStub() &&
- allocation_mode.CreateAllocationMementos()) ||
- (left->IsConstant() &&
- HConstant::cast(left)->HasStringValue() &&
- HConstant::cast(left)->StringValue()->length() + 1 >=
- ConsString::kMinLength) ||
- (right->IsConstant() &&
- HConstant::cast(right)->HasStringValue() &&
- HConstant::cast(right)->StringValue()->length() + 1 >=
- ConsString::kMinLength)) {
- return BuildStringAdd(left, right, allocation_mode);
- }
-
- // Fallback to using the string add stub.
- return AddUncasted<HStringAdd>(
- left, right, allocation_mode.GetPretenureMode(), STRING_ADD_CHECK_NONE,
- allocation_mode.feedback_site());
- }
-
- // Special case for +x here.
- if (op == Token::MUL) {
- if (left->EqualsInteger32Constant(1)) {
- return BuildToNumber(right);
- }
- if (right->EqualsInteger32Constant(1)) {
- return BuildToNumber(left);
- }
- }
-
- if (graph()->info()->IsStub()) {
- left = EnforceNumberType(left, left_type);
- right = EnforceNumberType(right, right_type);
- }
-
- Representation result_rep = RepresentationFor(result_type);
-
- bool is_non_primitive = (left_rep.IsTagged() && !left_rep.IsSmi()) ||
- (right_rep.IsTagged() && !right_rep.IsSmi());
-
- HInstruction* instr = NULL;
- // Only the stub is allowed to call into the runtime, since otherwise we would
- // inline several instructions (including the two pushes) for every tagged
- // operation in optimized code, which is more expensive, than a stub call.
- if (graph()->info()->IsStub() && is_non_primitive) {
- HValue* values[] = {left, right};
-#define GET_STUB(Name) \
- do { \
- Callable callable = CodeFactory::Name(isolate()); \
- HValue* stub = Add<HConstant>(callable.code()); \
- instr = AddUncasted<HCallWithDescriptor>(stub, 0, callable.descriptor(), \
- ArrayVector(values)); \
- } while (false)
-
- switch (op) {
- default:
- UNREACHABLE();
- case Token::ADD:
- GET_STUB(Add);
- break;
- case Token::SUB:
- GET_STUB(Subtract);
- break;
- case Token::MUL:
- GET_STUB(Multiply);
- break;
- case Token::DIV:
- GET_STUB(Divide);
- break;
- case Token::MOD:
- GET_STUB(Modulus);
- break;
- case Token::BIT_OR:
- GET_STUB(BitwiseOr);
- break;
- case Token::BIT_AND:
- GET_STUB(BitwiseAnd);
- break;
- case Token::BIT_XOR:
- GET_STUB(BitwiseXor);
- break;
- case Token::SAR:
- GET_STUB(ShiftRight);
- break;
- case Token::SHR:
- GET_STUB(ShiftRightLogical);
- break;
- case Token::SHL:
- GET_STUB(ShiftLeft);
- break;
- }
-#undef GET_STUB
- } else {
- switch (op) {
- case Token::ADD:
- instr = AddUncasted<HAdd>(left, right);
- break;
- case Token::SUB:
- instr = AddUncasted<HSub>(left, right);
- break;
- case Token::MUL:
- instr = AddUncasted<HMul>(left, right);
- break;
- case Token::MOD: {
- if (fixed_right_arg.IsJust() &&
- !right->EqualsInteger32Constant(fixed_right_arg.FromJust())) {
- HConstant* fixed_right =
- Add<HConstant>(static_cast<int>(fixed_right_arg.FromJust()));
- IfBuilder if_same(this);
- if_same.If<HCompareNumericAndBranch>(right, fixed_right, Token::EQ);
- if_same.Then();
- if_same.ElseDeopt(DeoptimizeReason::kUnexpectedRHSOfBinaryOperation);
- right = fixed_right;
- }
- instr = AddUncasted<HMod>(left, right);
- break;
- }
- case Token::DIV:
- instr = AddUncasted<HDiv>(left, right);
- break;
- case Token::BIT_XOR:
- case Token::BIT_AND:
- instr = AddUncasted<HBitwise>(op, left, right);
- break;
- case Token::BIT_OR: {
- HValue *operand, *shift_amount;
- if (left_type->Is(AstType::Signed32()) &&
- right_type->Is(AstType::Signed32()) &&
- MatchRotateRight(left, right, &operand, &shift_amount)) {
- instr = AddUncasted<HRor>(operand, shift_amount);
- } else {
- instr = AddUncasted<HBitwise>(op, left, right);
- }
- break;
- }
- case Token::SAR:
- instr = AddUncasted<HSar>(left, right);
- break;
- case Token::SHR:
- instr = AddUncasted<HShr>(left, right);
- if (instr->IsShr() && CanBeZero(right)) {
- graph()->RecordUint32Instruction(instr);
- }
- break;
- case Token::SHL:
- instr = AddUncasted<HShl>(left, right);
- break;
- default:
- UNREACHABLE();
- }
- }
-
- if (instr->IsBinaryOperation()) {
- HBinaryOperation* binop = HBinaryOperation::cast(instr);
- binop->set_observed_input_representation(1, left_rep);
- binop->set_observed_input_representation(2, right_rep);
- binop->initialize_output_representation(result_rep);
- if (graph()->info()->IsStub()) {
- // Stub should not call into stub.
- instr->SetFlag(HValue::kCannotBeTagged);
- // And should truncate on HForceRepresentation already.
- if (left->IsForceRepresentation()) {
- left->CopyFlag(HValue::kTruncatingToSmi, instr);
- left->CopyFlag(HValue::kTruncatingToInt32, instr);
- }
- if (right->IsForceRepresentation()) {
- right->CopyFlag(HValue::kTruncatingToSmi, instr);
- right->CopyFlag(HValue::kTruncatingToInt32, instr);
- }
- }
- }
- return instr;
-}
-
-// Check for the form (%_ClassOf(foo) === 'BarClass').
-static bool IsClassOfTest(CompareOperation* expr) {
- if (expr->op() != Token::EQ_STRICT) return false;
- CallRuntime* call = expr->left()->AsCallRuntime();
- if (call == NULL) return false;
- Literal* literal = expr->right()->AsLiteral();
- if (literal == NULL) return false;
- if (!literal->value()->IsString()) return false;
- if (call->is_jsruntime()) return false;
- if (call->function()->function_id != Runtime::kInlineClassOf) return false;
- DCHECK_EQ(call->arguments()->length(), 1);
- return true;
-}
-
-void HOptimizedGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
- switch (expr->op()) {
- case Token::COMMA:
- return VisitComma(expr);
- case Token::OR:
- case Token::AND:
- return VisitLogicalExpression(expr);
- default:
- return VisitArithmeticExpression(expr);
- }
-}
-
-
-void HOptimizedGraphBuilder::VisitComma(BinaryOperation* expr) {
- CHECK_ALIVE(VisitForEffect(expr->left()));
- // Visit the right subexpression in the same AST context as the entire
- // expression.
- Visit(expr->right());
-}
-
-
-void HOptimizedGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
- bool is_logical_and = expr->op() == Token::AND;
- if (ast_context()->IsTest()) {
- TestContext* context = TestContext::cast(ast_context());
- // Translate left subexpression.
- HBasicBlock* eval_right = graph()->CreateBasicBlock();
- if (is_logical_and) {
- CHECK_BAILOUT(VisitForControl(expr->left(),
- eval_right,
- context->if_false()));
- } else {
- CHECK_BAILOUT(VisitForControl(expr->left(),
- context->if_true(),
- eval_right));
- }
-
- // Translate right subexpression by visiting it in the same AST
- // context as the entire expression.
- CHECK(eval_right->HasPredecessor());
- eval_right->SetJoinId(expr->RightId());
- set_current_block(eval_right);
- Visit(expr->right());
- } else if (ast_context()->IsValue()) {
- CHECK_ALIVE(VisitForValue(expr->left()));
- DCHECK(current_block() != NULL);
- HValue* left_value = Top();
-
- // Short-circuit left values that always evaluate to the same boolean value.
- if (expr->left()->ToBooleanIsTrue() || expr->left()->ToBooleanIsFalse()) {
- // l (evals true) && r -> r
- // l (evals true) || r -> l
- // l (evals false) && r -> l
- // l (evals false) || r -> r
- if (is_logical_and == expr->left()->ToBooleanIsTrue()) {
- Drop(1);
- CHECK_ALIVE(VisitForValue(expr->right()));
- }
- return ast_context()->ReturnValue(Pop());
- }
-
- // We need an extra block to maintain edge-split form.
- HBasicBlock* empty_block = graph()->CreateBasicBlock();
- HBasicBlock* eval_right = graph()->CreateBasicBlock();
- ToBooleanHints expected(expr->left()->to_boolean_types());
- HBranch* test = is_logical_and
- ? New<HBranch>(left_value, expected, eval_right, empty_block)
- : New<HBranch>(left_value, expected, empty_block, eval_right);
- FinishCurrentBlock(test);
-
- set_current_block(eval_right);
- Drop(1); // Value of the left subexpression.
- CHECK_BAILOUT(VisitForValue(expr->right()));
-
- HBasicBlock* join_block =
- CreateJoin(empty_block, current_block(), expr->id());
- set_current_block(join_block);
- return ast_context()->ReturnValue(Pop());
-
- } else {
- DCHECK(ast_context()->IsEffect());
- // In an effect context, we don't need the value of the left subexpression,
- // only its control flow and side effects. We need an extra block to
- // maintain edge-split form.
- HBasicBlock* empty_block = graph()->CreateBasicBlock();
- HBasicBlock* right_block = graph()->CreateBasicBlock();
- if (is_logical_and) {
- CHECK_BAILOUT(VisitForControl(expr->left(), right_block, empty_block));
- } else {
- CHECK_BAILOUT(VisitForControl(expr->left(), empty_block, right_block));
- }
-
- // TODO(kmillikin): Find a way to fix this. It's ugly that there are
- // actually two empty blocks (one here and one inserted by
- // TestContext::BuildBranch, and that they both have an HSimulate though the
- // second one is not a merge node, and that we really have no good AST ID to
- // put on that first HSimulate.
-
- // Technically, we should be able to handle the case when one side of
- // the test is not connected, but this can trip up liveness analysis
- // if we did not fully connect the test context based on some optimistic
- // assumption. If such an assumption was violated, we would end up with
- // an environment with optimized-out values. So we should always
- // conservatively connect the test context.
-
- CHECK(right_block->HasPredecessor());
- CHECK(empty_block->HasPredecessor());
-
- empty_block->SetJoinId(expr->id());
-
- right_block->SetJoinId(expr->RightId());
- set_current_block(right_block);
- CHECK_BAILOUT(VisitForEffect(expr->right()));
- right_block = current_block();
-
- HBasicBlock* join_block =
- CreateJoin(empty_block, right_block, expr->id());
- set_current_block(join_block);
- // We did not materialize any value in the predecessor environments,
- // so there is no need to handle it here.
- }
-}
-
-
-void HOptimizedGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
- CHECK_ALIVE(VisitForValue(expr->left()));
- CHECK_ALIVE(VisitForValue(expr->right()));
- SetSourcePosition(expr->position());
- HValue* right = Pop();
- HValue* left = Pop();
- HValue* result =
- BuildBinaryOperation(expr, left, right,
- ast_context()->IsEffect() ? NO_PUSH_BEFORE_SIMULATE
- : PUSH_BEFORE_SIMULATE);
- return ast_context()->ReturnValue(result);
-}
-
-
-void HOptimizedGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr,
- Expression* sub_expr,
- Handle<String> check) {
- CHECK_ALIVE(VisitForTypeOf(sub_expr));
- SetSourcePosition(expr->position());
- HValue* value = Pop();
- HTypeofIsAndBranch* instr = New<HTypeofIsAndBranch>(value, check);
- return ast_context()->ReturnControl(instr, expr->id());
-}
-
-namespace {
-
-bool IsLiteralCompareStrict(Isolate* isolate, HValue* left, Token::Value op,
- HValue* right) {
- return op == Token::EQ_STRICT &&
- ((left->IsConstant() &&
- !HConstant::cast(left)->handle(isolate)->IsNumber() &&
- !HConstant::cast(left)->handle(isolate)->IsString()) ||
- (right->IsConstant() &&
- !HConstant::cast(right)->handle(isolate)->IsNumber() &&
- !HConstant::cast(right)->handle(isolate)->IsString()));
-}
-
-} // namespace
-
-void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
-
- if (!is_tracking_positions()) SetSourcePosition(expr->position());
-
- // Check for a few fast cases. The AST visiting behavior must be in sync
- // with the full codegen: We don't push both left and right values onto
- // the expression stack when one side is a special-case literal.
- Expression* sub_expr = NULL;
- Literal* literal;
- if (expr->IsLiteralCompareTypeof(&sub_expr, &literal)) {
- return HandleLiteralCompareTypeof(expr, sub_expr,
- Handle<String>::cast(literal->value()));
- }
- if (expr->IsLiteralCompareUndefined(&sub_expr)) {
- return HandleLiteralCompareNil(expr, sub_expr, kUndefinedValue);
- }
- if (expr->IsLiteralCompareNull(&sub_expr)) {
- return HandleLiteralCompareNil(expr, sub_expr, kNullValue);
- }
-
- if (IsClassOfTest(expr)) {
- CallRuntime* call = expr->left()->AsCallRuntime();
- DCHECK(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- Literal* literal = expr->right()->AsLiteral();
- Handle<String> rhs = Handle<String>::cast(literal->value());
- HClassOfTestAndBranch* instr = New<HClassOfTestAndBranch>(value, rhs);
- return ast_context()->ReturnControl(instr, expr->id());
- }
-
- AstType* left_type = bounds_.get(expr->left()).lower;
- AstType* right_type = bounds_.get(expr->right()).lower;
- AstType* combined_type = expr->combined_type();
-
- CHECK_ALIVE(VisitForValue(expr->left()));
- CHECK_ALIVE(VisitForValue(expr->right()));
-
- HValue* right = Pop();
- HValue* left = Pop();
- Token::Value op = expr->op();
-
- if (IsLiteralCompareStrict(isolate(), left, op, right)) {
- HCompareObjectEqAndBranch* result =
- New<HCompareObjectEqAndBranch>(left, right);
- return ast_context()->ReturnControl(result, expr->id());
- }
-
- if (op == Token::INSTANCEOF) {
- // Check to see if the rhs of the instanceof is a known function.
- if (right->IsConstant() &&
- HConstant::cast(right)->handle(isolate())->IsJSFunction()) {
- Handle<JSFunction> function =
- Handle<JSFunction>::cast(HConstant::cast(right)->handle(isolate()));
- // Make sure that the {function} already has a meaningful initial map
- // (i.e. we constructed at least one instance using the constructor
- // {function}), and has an instance as .prototype.
- if (function->has_initial_map() &&
- !function->map()->has_non_instance_prototype()) {
- // Lookup @@hasInstance on the {function}.
- Handle<Map> function_map(function->map(), isolate());
- PropertyAccessInfo has_instance(
- this, LOAD, function_map,
- isolate()->factory()->has_instance_symbol());
- // Check if we are using the Function.prototype[@@hasInstance].
- if (has_instance.CanAccessMonomorphic() &&
- has_instance.IsDataConstant() &&
- has_instance.constant().is_identical_to(
- isolate()->function_has_instance())) {
- // Add appropriate receiver map check and prototype chain
- // checks to guard the @@hasInstance lookup chain.
- AddCheckMap(right, function_map);
- if (has_instance.has_holder()) {
- Handle<JSObject> prototype(
- JSObject::cast(has_instance.map()->prototype()), isolate());
- BuildCheckPrototypeMaps(prototype, has_instance.holder());
- }
- // Perform the prototype chain walk.
- Handle<Map> initial_map(function->initial_map(), isolate());
- top_info()->dependencies()->AssumeInitialMapCantChange(initial_map);
- HInstruction* prototype =
- Add<HConstant>(handle(initial_map->prototype(), isolate()));
- HHasInPrototypeChainAndBranch* result =
- New<HHasInPrototypeChainAndBranch>(left, prototype);
- return ast_context()->ReturnControl(result, expr->id());
- }
- }
- }
-
- Callable callable = CodeFactory::InstanceOf(isolate());
- HValue* stub = Add<HConstant>(callable.code());
- HValue* values[] = {left, right};
- HCallWithDescriptor* result = New<HCallWithDescriptor>(
- stub, 0, callable.descriptor(), ArrayVector(values));
- result->set_type(HType::Boolean());
- return ast_context()->ReturnInstruction(result, expr->id());
-
- } else if (op == Token::IN) {
- Callable callable = CodeFactory::HasProperty(isolate());
- HValue* stub = Add<HConstant>(callable.code());
- HValue* values[] = {left, right};
- HInstruction* result =
- New<HCallWithDescriptor>(stub, 0, callable.descriptor(),
- Vector<HValue*>(values, arraysize(values)));
- return ast_context()->ReturnInstruction(result, expr->id());
- }
-
- PushBeforeSimulateBehavior push_behavior =
- ast_context()->IsEffect() ? NO_PUSH_BEFORE_SIMULATE
- : PUSH_BEFORE_SIMULATE;
- HControlInstruction* compare = BuildCompareInstruction(
- op, left, right, left_type, right_type, combined_type,
- ScriptPositionToSourcePosition(expr->left()->position()),
- ScriptPositionToSourcePosition(expr->right()->position()),
- push_behavior, expr->id());
- if (compare == NULL) return; // Bailed out.
- return ast_context()->ReturnControl(compare, expr->id());
-}
-
-HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
- Token::Value op, HValue* left, HValue* right, AstType* left_type,
- AstType* right_type, AstType* combined_type, SourcePosition left_position,
- SourcePosition right_position, PushBeforeSimulateBehavior push_sim_result,
- BailoutId bailout_id) {
- // Cases handled below depend on collected type feedback. They should
- // soft deoptimize when there is no type feedback.
- if (!combined_type->IsInhabited()) {
- Add<HDeoptimize>(
- DeoptimizeReason::
- kInsufficientTypeFeedbackForCombinedTypeOfBinaryOperation,
- Deoptimizer::SOFT);
- combined_type = left_type = right_type = AstType::Any();
- }
-
- Representation left_rep = RepresentationFor(left_type);
- Representation right_rep = RepresentationFor(right_type);
- Representation combined_rep = RepresentationFor(combined_type);
-
- if (combined_type->Is(AstType::Receiver())) {
- if (Token::IsEqualityOp(op)) {
- // HCompareObjectEqAndBranch can only deal with object, so
- // exclude numbers.
- if ((left->IsConstant() &&
- HConstant::cast(left)->HasNumberValue()) ||
- (right->IsConstant() &&
- HConstant::cast(right)->HasNumberValue())) {
- Add<HDeoptimize>(
- DeoptimizeReason::kTypeMismatchBetweenFeedbackAndConstant,
- Deoptimizer::SOFT);
- // The caller expects a branch instruction, so make it happy.
- return New<HBranch>(graph()->GetConstantTrue());
- }
- if (op == Token::EQ) {
- // For abstract equality we need to check both sides are receivers.
- if (combined_type->IsClass()) {
- Handle<Map> map = combined_type->AsClass()->Map();
- AddCheckMap(left, map);
- AddCheckMap(right, map);
- } else {
- BuildCheckHeapObject(left);
- Add<HCheckInstanceType>(left, HCheckInstanceType::IS_JS_RECEIVER);
- BuildCheckHeapObject(right);
- Add<HCheckInstanceType>(right, HCheckInstanceType::IS_JS_RECEIVER);
- }
- } else {
- // For strict equality we only need to check one side.
- HValue* operand_to_check =
- left->block()->block_id() < right->block()->block_id() ? left
- : right;
- if (combined_type->IsClass()) {
- Handle<Map> map = combined_type->AsClass()->Map();
- AddCheckMap(operand_to_check, map);
- } else {
- BuildCheckHeapObject(operand_to_check);
- Add<HCheckInstanceType>(operand_to_check,
- HCheckInstanceType::IS_JS_RECEIVER);
- }
- }
- HCompareObjectEqAndBranch* result =
- New<HCompareObjectEqAndBranch>(left, right);
- return result;
- } else {
- if (combined_type->IsClass()) {
- // TODO(bmeurer): This is an optimized version of an x < y, x > y,
- // x <= y or x >= y, where both x and y are spec objects with the
- // same map. The CompareIC collects this map for us. So if we know
- // that there's no @@toPrimitive on the map (including the prototype
- // chain), and both valueOf and toString are the default initial
- // implementations (on the %ObjectPrototype%), then we can reduce
- // the comparison to map checks on x and y, because the comparison
- // will turn into a comparison of "[object CLASS]" to itself (the
- // default outcome of toString, since valueOf returns a spec object).
- // This is pretty much adhoc, so in TurboFan we could do a lot better
- // and inline the interesting parts of ToPrimitive (actually we could
- // even do that in Crankshaft but we don't want to waste too much
- // time on this now).
- DCHECK(Token::IsOrderedRelationalCompareOp(op));
- Handle<Map> map = combined_type->AsClass()->Map();
- PropertyAccessInfo value_of(this, LOAD, map,
- isolate()->factory()->valueOf_string());
- PropertyAccessInfo to_primitive(
- this, LOAD, map, isolate()->factory()->to_primitive_symbol());
- PropertyAccessInfo to_string(this, LOAD, map,
- isolate()->factory()->toString_string());
- PropertyAccessInfo to_string_tag(
- this, LOAD, map, isolate()->factory()->to_string_tag_symbol());
- if (to_primitive.CanAccessMonomorphic() && !to_primitive.IsFound() &&
- to_string_tag.CanAccessMonomorphic() &&
- (!to_string_tag.IsFound() || to_string_tag.IsData() ||
- to_string_tag.IsDataConstant()) &&
- value_of.CanAccessMonomorphic() && value_of.IsDataConstant() &&
- value_of.constant().is_identical_to(isolate()->object_value_of()) &&
- to_string.CanAccessMonomorphic() && to_string.IsDataConstant() &&
- to_string.constant().is_identical_to(
- isolate()->object_to_string())) {
- // We depend on the prototype chain to stay the same, because we
- // also need to deoptimize when someone installs @@toPrimitive
- // or @@toStringTag somewhere in the prototype chain.
- Handle<Object> prototype(map->prototype(), isolate());
- if (prototype->IsJSObject()) {
- BuildCheckPrototypeMaps(Handle<JSObject>::cast(prototype),
- Handle<JSObject>::null());
- }
- AddCheckMap(left, map);
- AddCheckMap(right, map);
- // The caller expects a branch instruction, so make it happy.
- return New<HBranch>(
- graph()->GetConstantBool(op == Token::LTE || op == Token::GTE));
- }
- }
- Bailout(kUnsupportedNonPrimitiveCompare);
- return NULL;
- }
- } else if (combined_type->Is(AstType::InternalizedString()) &&
- Token::IsEqualityOp(op)) {
- // If we have a constant argument, it should be consistent with the type
- // feedback (otherwise we fail assertions in HCompareObjectEqAndBranch).
- if ((left->IsConstant() &&
- !HConstant::cast(left)->HasInternalizedStringValue()) ||
- (right->IsConstant() &&
- !HConstant::cast(right)->HasInternalizedStringValue())) {
- Add<HDeoptimize>(
- DeoptimizeReason::kTypeMismatchBetweenFeedbackAndConstant,
- Deoptimizer::SOFT);
- // The caller expects a branch instruction, so make it happy.
- return New<HBranch>(graph()->GetConstantTrue());
- }
- BuildCheckHeapObject(left);
- Add<HCheckInstanceType>(left, HCheckInstanceType::IS_INTERNALIZED_STRING);
- BuildCheckHeapObject(right);
- Add<HCheckInstanceType>(right, HCheckInstanceType::IS_INTERNALIZED_STRING);
- HCompareObjectEqAndBranch* result =
- New<HCompareObjectEqAndBranch>(left, right);
- return result;
- } else if (combined_type->Is(AstType::String())) {
- BuildCheckHeapObject(left);
- Add<HCheckInstanceType>(left, HCheckInstanceType::IS_STRING);
- BuildCheckHeapObject(right);
- Add<HCheckInstanceType>(right, HCheckInstanceType::IS_STRING);
- HStringCompareAndBranch* result =
- New<HStringCompareAndBranch>(left, right, op);
- return result;
- } else if (combined_type->Is(AstType::Boolean())) {
- AddCheckMap(left, isolate()->factory()->boolean_map());
- AddCheckMap(right, isolate()->factory()->boolean_map());
- if (Token::IsEqualityOp(op)) {
- HCompareObjectEqAndBranch* result =
- New<HCompareObjectEqAndBranch>(left, right);
- return result;
- }
- left = Add<HLoadNamedField>(
- left, nullptr,
- HObjectAccess::ForOddballToNumber(Representation::Smi()));
- right = Add<HLoadNamedField>(
- right, nullptr,
- HObjectAccess::ForOddballToNumber(Representation::Smi()));
- HCompareNumericAndBranch* result =
- New<HCompareNumericAndBranch>(left, right, op);
- return result;
- } else {
- if (op == Token::EQ) {
- if (left->IsConstant() &&
- HConstant::cast(left)->GetInstanceType() == ODDBALL_TYPE &&
- HConstant::cast(left)->IsUndetectable()) {
- return New<HIsUndetectableAndBranch>(right);
- }
-
- if (right->IsConstant() &&
- HConstant::cast(right)->GetInstanceType() == ODDBALL_TYPE &&
- HConstant::cast(right)->IsUndetectable()) {
- return New<HIsUndetectableAndBranch>(left);
- }
- }
-
- if (combined_rep.IsTagged() || combined_rep.IsNone()) {
- HCompareGeneric* result = Add<HCompareGeneric>(left, right, op);
- result->set_observed_input_representation(1, left_rep);
- result->set_observed_input_representation(2, right_rep);
- if (result->HasObservableSideEffects()) {
- if (push_sim_result == PUSH_BEFORE_SIMULATE) {
- Push(result);
- AddSimulate(bailout_id, REMOVABLE_SIMULATE);
- Drop(1);
- } else {
- AddSimulate(bailout_id, REMOVABLE_SIMULATE);
- }
- }
- // TODO(jkummerow): Can we make this more efficient?
- HBranch* branch = New<HBranch>(result);
- return branch;
- } else {
- HCompareNumericAndBranch* result =
- New<HCompareNumericAndBranch>(left, right, op);
- result->set_observed_input_representation(left_rep, right_rep);
- return result;
- }
- }
-}
-
-
-void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
- Expression* sub_expr,
- NilValue nil) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
- DCHECK(expr->op() == Token::EQ || expr->op() == Token::EQ_STRICT);
- if (!is_tracking_positions()) SetSourcePosition(expr->position());
- CHECK_ALIVE(VisitForValue(sub_expr));
- HValue* value = Pop();
- HControlInstruction* instr;
- if (expr->op() == Token::EQ_STRICT) {
- HConstant* nil_constant = nil == kNullValue
- ? graph()->GetConstantNull()
- : graph()->GetConstantUndefined();
- instr = New<HCompareObjectEqAndBranch>(value, nil_constant);
- } else {
- DCHECK_EQ(Token::EQ, expr->op());
- instr = New<HIsUndetectableAndBranch>(value);
- }
- return ast_context()->ReturnControl(instr, expr->id());
-}
-
-
-void HOptimizedGraphBuilder::VisitSpread(Spread* expr) { UNREACHABLE(); }
-
-
-void HOptimizedGraphBuilder::VisitEmptyParentheses(EmptyParentheses* expr) {
- UNREACHABLE();
-}
-
-void HOptimizedGraphBuilder::VisitGetIterator(GetIterator* expr) {
- UNREACHABLE();
-}
-
-void HOptimizedGraphBuilder::VisitImportCallExpression(
- ImportCallExpression* expr) {
- UNREACHABLE();
-}
-
-HValue* HOptimizedGraphBuilder::AddThisFunction() {
- return AddInstruction(BuildThisFunction());
-}
-
-
-HInstruction* HOptimizedGraphBuilder::BuildThisFunction() {
- // If we share optimized code between different closures, the
- // this-function is not a constant, except inside an inlined body.
- if (function_state()->outer() != NULL) {
- return New<HConstant>(
- function_state()->compilation_info()->closure());
- } else {
- return New<HThisFunction>();
- }
-}
-
-
-HInstruction* HOptimizedGraphBuilder::BuildFastLiteral(
- Handle<JSObject> boilerplate_object,
- AllocationSiteUsageContext* site_context) {
- NoObservableSideEffectsScope no_effects(this);
- Handle<Map> initial_map(boilerplate_object->map());
- InstanceType instance_type = initial_map->instance_type();
- DCHECK(instance_type == JS_ARRAY_TYPE || instance_type == JS_OBJECT_TYPE);
-
- HType type = instance_type == JS_ARRAY_TYPE
- ? HType::JSArray() : HType::JSObject();
- HValue* object_size_constant = Add<HConstant>(initial_map->instance_size());
-
- PretenureFlag pretenure_flag = NOT_TENURED;
- Handle<AllocationSite> top_site(*site_context->top(), isolate());
- if (FLAG_allocation_site_pretenuring) {
- pretenure_flag = top_site->GetPretenureMode();
- }
-
- Handle<AllocationSite> current_site(*site_context->current(), isolate());
- if (*top_site == *current_site) {
- // We install a dependency for pretenuring only on the outermost literal.
- top_info()->dependencies()->AssumeTenuringDecision(top_site);
- }
- top_info()->dependencies()->AssumeTransitionStable(current_site);
-
- HInstruction* object =
- Add<HAllocate>(object_size_constant, type, pretenure_flag, instance_type,
- graph()->GetConstant0(), top_site);
-
- // If allocation folding reaches kMaxRegularHeapObjectSize the
- // elements array may not get folded into the object. Hence, we set the
- // elements pointer to empty fixed array and let store elimination remove
- // this store in the folding case.
- HConstant* empty_fixed_array = Add<HConstant>(
- isolate()->factory()->empty_fixed_array());
- Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(),
- empty_fixed_array);
-
- BuildEmitObjectHeader(boilerplate_object, object);
-
- // Similarly to the elements pointer, there is no guarantee that all
- // property allocations can get folded, so pre-initialize all in-object
- // properties to a safe value.
- BuildInitializeInobjectProperties(object, initial_map);
-
- // Copy in-object properties.
- if (initial_map->NumberOfFields() != 0 ||
- initial_map->unused_property_fields() > 0) {
- BuildEmitInObjectProperties(boilerplate_object, object, site_context,
- pretenure_flag);
- }
-
- // Copy elements.
- Handle<FixedArrayBase> elements(boilerplate_object->elements());
- int elements_size = (elements->length() > 0 &&
- elements->map() != isolate()->heap()->fixed_cow_array_map()) ?
- elements->Size() : 0;
-
- if (pretenure_flag == TENURED &&
- elements->map() == isolate()->heap()->fixed_cow_array_map() &&
- isolate()->heap()->InNewSpace(*elements)) {
- // If we would like to pretenure a fixed cow array, we must ensure that the
- // array is already in old space, otherwise we'll create too many old-to-
- // new-space pointers (overflowing the store buffer).
- elements = Handle<FixedArrayBase>(
- isolate()->factory()->CopyAndTenureFixedCOWArray(
- Handle<FixedArray>::cast(elements)));
- boilerplate_object->set_elements(*elements);
- }
-
- HInstruction* object_elements = NULL;
- if (elements_size > 0) {
- HValue* object_elements_size = Add<HConstant>(elements_size);
- InstanceType instance_type = boilerplate_object->HasFastDoubleElements()
- ? FIXED_DOUBLE_ARRAY_TYPE : FIXED_ARRAY_TYPE;
- object_elements = Add<HAllocate>(object_elements_size, HType::HeapObject(),
- pretenure_flag, instance_type,
- graph()->GetConstant0(), top_site);
- BuildEmitElements(boilerplate_object, elements, object_elements,
- site_context);
- Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(),
- object_elements);
- } else {
- Handle<Object> elements_field =
- Handle<Object>(boilerplate_object->elements(), isolate());
- HInstruction* object_elements_cow = Add<HConstant>(elements_field);
- Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(),
- object_elements_cow);
- }
-
- return object;
-}
-
-
-void HOptimizedGraphBuilder::BuildEmitObjectHeader(
- Handle<JSObject> boilerplate_object,
- HInstruction* object) {
- DCHECK(boilerplate_object->properties()->length() == 0);
-
- Handle<Map> boilerplate_object_map(boilerplate_object->map());
- AddStoreMapConstant(object, boilerplate_object_map);
-
- Handle<Object> properties_field =
- Handle<Object>(boilerplate_object->properties(), isolate());
- DCHECK(*properties_field == isolate()->heap()->empty_fixed_array());
- HInstruction* properties = Add<HConstant>(properties_field);
- HObjectAccess access = HObjectAccess::ForPropertiesPointer();
- Add<HStoreNamedField>(object, access, properties);
-
- if (boilerplate_object->IsJSArray()) {
- Handle<JSArray> boilerplate_array =
- Handle<JSArray>::cast(boilerplate_object);
- Handle<Object> length_field =
- Handle<Object>(boilerplate_array->length(), isolate());
- HInstruction* length = Add<HConstant>(length_field);
-
- DCHECK(boilerplate_array->length()->IsSmi());
- Add<HStoreNamedField>(object, HObjectAccess::ForArrayLength(
- boilerplate_array->GetElementsKind()), length);
- }
-}
-
-
-void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
- Handle<JSObject> boilerplate_object,
- HInstruction* object,
- AllocationSiteUsageContext* site_context,
- PretenureFlag pretenure_flag) {
- Handle<Map> boilerplate_map(boilerplate_object->map());
- Handle<DescriptorArray> descriptors(boilerplate_map->instance_descriptors());
- int limit = boilerplate_map->NumberOfOwnDescriptors();
-
- int copied_fields = 0;
- for (int i = 0; i < limit; i++) {
- PropertyDetails details = descriptors->GetDetails(i);
- if (details.location() != kField) continue;
- DCHECK_EQ(kData, details.kind());
- copied_fields++;
- FieldIndex field_index = FieldIndex::ForDescriptor(*boilerplate_map, i);
-
-
- int property_offset = field_index.offset();
- Handle<Name> name(descriptors->GetKey(i));
-
- // The access for the store depends on the type of the boilerplate.
- HObjectAccess access = boilerplate_object->IsJSArray() ?
- HObjectAccess::ForJSArrayOffset(property_offset) :
- HObjectAccess::ForMapAndOffset(boilerplate_map, property_offset);
-
- if (boilerplate_object->IsUnboxedDoubleField(field_index)) {
- CHECK(!boilerplate_object->IsJSArray());
- double value = boilerplate_object->RawFastDoublePropertyAt(field_index);
- access = access.WithRepresentation(Representation::Double());
- Add<HStoreNamedField>(object, access, Add<HConstant>(value));
- continue;
- }
- Handle<Object> value(boilerplate_object->RawFastPropertyAt(field_index),
- isolate());
-
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- Handle<AllocationSite> current_site = site_context->EnterNewScope();
- HInstruction* result =
- BuildFastLiteral(value_object, site_context);
- site_context->ExitScope(current_site, value_object);
- Add<HStoreNamedField>(object, access, result);
- } else {
- Representation representation = details.representation();
- HInstruction* value_instruction;
-
- if (representation.IsDouble()) {
- // Allocate a HeapNumber box and store the value into it.
- HValue* heap_number_constant = Add<HConstant>(HeapNumber::kSize);
- HInstruction* double_box = Add<HAllocate>(
- heap_number_constant, HType::HeapObject(), pretenure_flag,
- MUTABLE_HEAP_NUMBER_TYPE, graph()->GetConstant0());
- AddStoreMapConstant(double_box,
- isolate()->factory()->mutable_heap_number_map());
- // Unwrap the mutable heap number from the boilerplate.
- HValue* double_value =
- Add<HConstant>(Handle<HeapNumber>::cast(value)->value());
- Add<HStoreNamedField>(
- double_box, HObjectAccess::ForHeapNumberValue(), double_value);
- value_instruction = double_box;
- } else if (representation.IsSmi()) {
- value_instruction = value->IsUninitialized(isolate())
- ? graph()->GetConstant0()
- : Add<HConstant>(value);
- // Ensure that value is stored as smi.
- access = access.WithRepresentation(representation);
- } else {
- value_instruction = Add<HConstant>(value);
- }
-
- Add<HStoreNamedField>(object, access, value_instruction);
- }
- }
-
- int inobject_properties = boilerplate_object->map()->GetInObjectProperties();
- HInstruction* value_instruction =
- Add<HConstant>(isolate()->factory()->one_pointer_filler_map());
- for (int i = copied_fields; i < inobject_properties; i++) {
- DCHECK(boilerplate_object->IsJSObject());
- int property_offset = boilerplate_object->GetInObjectPropertyOffset(i);
- HObjectAccess access =
- HObjectAccess::ForMapAndOffset(boilerplate_map, property_offset);
- Add<HStoreNamedField>(object, access, value_instruction);
- }
-}
-
-
-void HOptimizedGraphBuilder::BuildEmitElements(
- Handle<JSObject> boilerplate_object,
- Handle<FixedArrayBase> elements,
- HValue* object_elements,
- AllocationSiteUsageContext* site_context) {
- ElementsKind kind = boilerplate_object->map()->elements_kind();
- int elements_length = elements->length();
- HValue* object_elements_length = Add<HConstant>(elements_length);
- BuildInitializeElementsHeader(object_elements, kind, object_elements_length);
-
- // Copy elements backing store content.
- if (elements->IsFixedDoubleArray()) {
- BuildEmitFixedDoubleArray(elements, kind, object_elements);
- } else if (elements->IsFixedArray()) {
- BuildEmitFixedArray(elements, kind, object_elements,
- site_context);
- } else {
- UNREACHABLE();
- }
-}
-
-
-void HOptimizedGraphBuilder::BuildEmitFixedDoubleArray(
- Handle<FixedArrayBase> elements,
- ElementsKind kind,
- HValue* object_elements) {
- HInstruction* boilerplate_elements = Add<HConstant>(elements);
- int elements_length = elements->length();
- for (int i = 0; i < elements_length; i++) {
- HValue* key_constant = Add<HConstant>(i);
- HInstruction* value_instruction =
- Add<HLoadKeyed>(boilerplate_elements, key_constant, nullptr, nullptr,
- kind, ALLOW_RETURN_HOLE);
- HInstruction* store = Add<HStoreKeyed>(object_elements, key_constant,
- value_instruction, nullptr, kind);
- store->SetFlag(HValue::kTruncatingToNumber);
- }
-}
-
-
-void HOptimizedGraphBuilder::BuildEmitFixedArray(
- Handle<FixedArrayBase> elements,
- ElementsKind kind,
- HValue* object_elements,
- AllocationSiteUsageContext* site_context) {
- HInstruction* boilerplate_elements = Add<HConstant>(elements);
- int elements_length = elements->length();
- Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
- for (int i = 0; i < elements_length; i++) {
- Handle<Object> value(fast_elements->get(i), isolate());
- HValue* key_constant = Add<HConstant>(i);
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- Handle<AllocationSite> current_site = site_context->EnterNewScope();
- HInstruction* result =
- BuildFastLiteral(value_object, site_context);
- site_context->ExitScope(current_site, value_object);
- Add<HStoreKeyed>(object_elements, key_constant, result, nullptr, kind);
- } else {
- ElementsKind copy_kind =
- kind == FAST_HOLEY_SMI_ELEMENTS ? FAST_HOLEY_ELEMENTS : kind;
- HInstruction* value_instruction =
- Add<HLoadKeyed>(boilerplate_elements, key_constant, nullptr, nullptr,
- copy_kind, ALLOW_RETURN_HOLE);
- Add<HStoreKeyed>(object_elements, key_constant, value_instruction,
- nullptr, copy_kind);
- }
- }
-}
-
-
-void HOptimizedGraphBuilder::VisitThisFunction(ThisFunction* expr) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
- HInstruction* instr = BuildThisFunction();
- return ast_context()->ReturnInstruction(instr, expr->id());
-}
-
-
-void HOptimizedGraphBuilder::VisitSuperPropertyReference(
- SuperPropertyReference* expr) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
- return Bailout(kSuperReference);
-}
-
-
-void HOptimizedGraphBuilder::VisitSuperCallReference(SuperCallReference* expr) {
- DCHECK(!HasStackOverflow());
- DCHECK(current_block() != NULL);
- DCHECK(current_block()->HasPredecessor());
- return Bailout(kSuperReference);
-}
-
-void HOptimizedGraphBuilder::VisitDeclarations(
- Declaration::List* declarations) {
- DCHECK(globals_.is_empty());
- AstVisitor<HOptimizedGraphBuilder>::VisitDeclarations(declarations);
- if (!globals_.is_empty()) {
- Handle<FixedArray> array =
- isolate()->factory()->NewFixedArray(globals_.length(), TENURED);
- for (int i = 0; i < globals_.length(); ++i) array->set(i, *globals_.at(i));
- int flags = current_info()->GetDeclareGlobalsFlags();
- Handle<FeedbackVector> vector(current_feedback_vector(), isolate());
- Add<HDeclareGlobals>(array, flags, vector);
- globals_.Rewind(0);
- }
-}
-
-
-void HOptimizedGraphBuilder::VisitVariableDeclaration(
- VariableDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case VariableLocation::UNALLOCATED: {
- DCHECK(!variable->binding_needs_init());
- globals_.Add(variable->name(), zone());
- FeedbackSlot slot = proxy->VariableFeedbackSlot();
- DCHECK(!slot.IsInvalid());
- globals_.Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
- globals_.Add(isolate()->factory()->undefined_value(), zone());
- globals_.Add(isolate()->factory()->undefined_value(), zone());
- return;
- }
- case VariableLocation::PARAMETER:
- case VariableLocation::LOCAL:
- if (variable->binding_needs_init()) {
- HValue* value = graph()->GetConstantHole();
- environment()->Bind(variable, value);
- }
- break;
- case VariableLocation::CONTEXT:
- if (variable->binding_needs_init()) {
- HValue* value = graph()->GetConstantHole();
- HValue* context = environment()->context();
- HStoreContextSlot* store = Add<HStoreContextSlot>(
- context, variable->index(), HStoreContextSlot::kNoCheck, value);
- if (store->HasObservableSideEffects()) {
- Add<HSimulate>(proxy->id(), REMOVABLE_SIMULATE);
- }
- }
- break;
- case VariableLocation::LOOKUP:
- return Bailout(kUnsupportedLookupSlotInDeclaration);
- case VariableLocation::MODULE:
- UNREACHABLE();
- }
-}
-
-
-void HOptimizedGraphBuilder::VisitFunctionDeclaration(
- FunctionDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case VariableLocation::UNALLOCATED: {
- globals_.Add(variable->name(), zone());
- FeedbackSlot slot = proxy->VariableFeedbackSlot();
- DCHECK(!slot.IsInvalid());
- globals_.Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
-
- // We need the slot where the literals array lives, too.
- slot = declaration->fun()->LiteralFeedbackSlot();
- DCHECK(!slot.IsInvalid());
- globals_.Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
-
- Handle<SharedFunctionInfo> function = Compiler::GetSharedFunctionInfo(
- declaration->fun(), current_info()->script(), top_info());
- // Check for stack-overflow exception.
- if (function.is_null()) return SetStackOverflow();
- globals_.Add(function, zone());
- return;
- }
- case VariableLocation::PARAMETER:
- case VariableLocation::LOCAL: {
- CHECK_ALIVE(VisitForValue(declaration->fun()));
- HValue* value = Pop();
- BindIfLive(variable, value);
- break;
- }
- case VariableLocation::CONTEXT: {
- CHECK_ALIVE(VisitForValue(declaration->fun()));
- HValue* value = Pop();
- HValue* context = environment()->context();
- HStoreContextSlot* store = Add<HStoreContextSlot>(
- context, variable->index(), HStoreContextSlot::kNoCheck, value);
- if (store->HasObservableSideEffects()) {
- Add<HSimulate>(proxy->id(), REMOVABLE_SIMULATE);
- }
- break;
- }
- case VariableLocation::LOOKUP:
- return Bailout(kUnsupportedLookupSlotInDeclaration);
- case VariableLocation::MODULE:
- UNREACHABLE();
- }
-}
-
-
-void HOptimizedGraphBuilder::VisitRewritableExpression(
- RewritableExpression* node) {
- CHECK_ALIVE(Visit(node->expression()));
-}
-
-
-// Generators for inline runtime functions.
-// Support for types.
-void HOptimizedGraphBuilder::GenerateIsSmi(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HIsSmiAndBranch* result = New<HIsSmiAndBranch>(value);
- return ast_context()->ReturnControl(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateIsJSReceiver(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HHasInstanceTypeAndBranch* result =
- New<HHasInstanceTypeAndBranch>(value,
- FIRST_JS_RECEIVER_TYPE,
- LAST_JS_RECEIVER_TYPE);
- return ast_context()->ReturnControl(result, call->id());
-}
-
-void HOptimizedGraphBuilder::GenerateIsArray(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HHasInstanceTypeAndBranch* result =
- New<HHasInstanceTypeAndBranch>(value, JS_ARRAY_TYPE);
- return ast_context()->ReturnControl(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateIsTypedArray(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HHasInstanceTypeAndBranch* result =
- New<HHasInstanceTypeAndBranch>(value, JS_TYPED_ARRAY_TYPE);
- return ast_context()->ReturnControl(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateToInteger(CallRuntime* call) {
- DCHECK_EQ(1, call->arguments()->length());
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* input = Pop();
- if (input->type().IsSmi()) {
- return ast_context()->ReturnValue(input);
- } else {
- Callable callable = CodeFactory::ToInteger(isolate());
- HValue* stub = Add<HConstant>(callable.code());
- HValue* values[] = {input};
- HInstruction* result = New<HCallWithDescriptor>(
- stub, 0, callable.descriptor(), ArrayVector(values));
- return ast_context()->ReturnInstruction(result, call->id());
- }
-}
-
-
-void HOptimizedGraphBuilder::GenerateToObject(CallRuntime* call) {
- DCHECK_EQ(1, call->arguments()->length());
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HValue* result = BuildToObject(value);
- return ast_context()->ReturnValue(result);
-}
-
-
-void HOptimizedGraphBuilder::GenerateToString(CallRuntime* call) {
- DCHECK_EQ(1, call->arguments()->length());
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* input = Pop();
- if (input->type().IsString()) {
- return ast_context()->ReturnValue(input);
- } else {
- Callable callable = CodeFactory::ToString(isolate());
- HValue* stub = Add<HConstant>(callable.code());
- HValue* values[] = {input};
- HInstruction* result = New<HCallWithDescriptor>(
- stub, 0, callable.descriptor(), ArrayVector(values));
- return ast_context()->ReturnInstruction(result, call->id());
- }
-}
-
-
-void HOptimizedGraphBuilder::GenerateToLength(CallRuntime* call) {
- DCHECK_EQ(1, call->arguments()->length());
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- Callable callable = CodeFactory::ToLength(isolate());
- HValue* input = Pop();
- HValue* stub = Add<HConstant>(callable.code());
- HValue* values[] = {input};
- HInstruction* result = New<HCallWithDescriptor>(
- stub, 0, callable.descriptor(), ArrayVector(values));
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateToNumber(CallRuntime* call) {
- DCHECK_EQ(1, call->arguments()->length());
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- Callable callable = CodeFactory::ToNumber(isolate());
- HValue* input = Pop();
- HValue* result = BuildToNumber(input);
- if (result->HasObservableSideEffects()) {
- if (!ast_context()->IsEffect()) Push(result);
- Add<HSimulate>(call->id(), REMOVABLE_SIMULATE);
- if (!ast_context()->IsEffect()) result = Pop();
- }
- return ast_context()->ReturnValue(result);
-}
-
-
-void HOptimizedGraphBuilder::GenerateIsJSProxy(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HIfContinuation continuation;
- IfBuilder if_proxy(this);
-
- HValue* smicheck = if_proxy.IfNot<HIsSmiAndBranch>(value);
- if_proxy.And();
- HValue* map = Add<HLoadNamedField>(value, smicheck, HObjectAccess::ForMap());
- HValue* instance_type =
- Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapInstanceType());
- if_proxy.If<HCompareNumericAndBranch>(
- instance_type, Add<HConstant>(JS_PROXY_TYPE), Token::EQ);
-
- if_proxy.CaptureContinuation(&continuation);
- return ast_context()->ReturnContinuation(&continuation, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateHasFastPackedElements(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* object = Pop();
- HIfContinuation continuation(graph()->CreateBasicBlock(),
- graph()->CreateBasicBlock());
- IfBuilder if_not_smi(this);
- if_not_smi.IfNot<HIsSmiAndBranch>(object);
- if_not_smi.Then();
- {
- NoObservableSideEffectsScope no_effects(this);
-
- IfBuilder if_fast_packed(this);
- HValue* elements_kind = BuildGetElementsKind(object);
- if_fast_packed.If<HCompareNumericAndBranch>(
- elements_kind, Add<HConstant>(FAST_SMI_ELEMENTS), Token::EQ);
- if_fast_packed.Or();
- if_fast_packed.If<HCompareNumericAndBranch>(
- elements_kind, Add<HConstant>(FAST_ELEMENTS), Token::EQ);
- if_fast_packed.Or();
- if_fast_packed.If<HCompareNumericAndBranch>(
- elements_kind, Add<HConstant>(FAST_DOUBLE_ELEMENTS), Token::EQ);
- if_fast_packed.JoinContinuation(&continuation);
- }
- if_not_smi.JoinContinuation(&continuation);
- return ast_context()->ReturnContinuation(&continuation, call->id());
-}
-
-
-// Fast support for charCodeAt(n).
-void HOptimizedGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 2);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
- HValue* index = Pop();
- HValue* string = Pop();
- HInstruction* result = BuildStringCharCodeAt(string, index);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-// Fast support for SubString.
-void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
- DCHECK_EQ(3, call->arguments()->length());
- CHECK_ALIVE(VisitExpressions(call->arguments()));
- Callable callable = CodeFactory::SubString(isolate());
- HValue* stub = Add<HConstant>(callable.code());
- HValue* to = Pop();
- HValue* from = Pop();
- HValue* string = Pop();
- HValue* values[] = {string, from, to};
- HInstruction* result = New<HCallWithDescriptor>(
- stub, 0, callable.descriptor(), ArrayVector(values));
- result->set_type(HType::String());
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-// Fast support for calls.
-void HOptimizedGraphBuilder::GenerateCall(CallRuntime* call) {
- DCHECK_LE(2, call->arguments()->length());
- CHECK_ALIVE(VisitExpressions(call->arguments()));
- CallTrampolineDescriptor descriptor(isolate());
- PushArgumentsFromEnvironment(call->arguments()->length() - 1);
- HValue* trampoline = Add<HConstant>(isolate()->builtins()->Call());
- HValue* target = Pop();
- HValue* values[] = {target, Add<HConstant>(call->arguments()->length() - 2)};
- HInstruction* result =
- New<HCallWithDescriptor>(trampoline, call->arguments()->length() - 1,
- descriptor, ArrayVector(values));
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateFixedArrayGet(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 2);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
- HValue* index = Pop();
- HValue* object = Pop();
- HInstruction* result = New<HLoadKeyed>(
- object, index, nullptr, nullptr, FAST_HOLEY_ELEMENTS, ALLOW_RETURN_HOLE);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateFixedArraySet(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 3);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
- HValue* value = Pop();
- HValue* index = Pop();
- HValue* object = Pop();
- NoObservableSideEffectsScope no_effects(this);
- Add<HStoreKeyed>(object, index, value, nullptr, FAST_HOLEY_ELEMENTS);
- return ast_context()->ReturnValue(graph()->GetConstantUndefined());
-}
-
-
-void HOptimizedGraphBuilder::GenerateTheHole(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 0);
- return ast_context()->ReturnValue(graph()->GetConstantHole());
-}
-
-
-void HOptimizedGraphBuilder::GenerateCreateIterResultObject(CallRuntime* call) {
- DCHECK_EQ(2, call->arguments()->length());
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
- HValue* done = Pop();
- HValue* value = Pop();
- HValue* result = BuildCreateIterResultObject(value, done);
- return ast_context()->ReturnValue(result);
-}
-
-
-void HOptimizedGraphBuilder::GenerateJSCollectionGetTable(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* receiver = Pop();
- HInstruction* result = New<HLoadNamedField>(
- receiver, nullptr, HObjectAccess::ForJSCollectionTable());
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateStringGetRawHashField(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* object = Pop();
- HInstruction* result = New<HLoadNamedField>(
- object, nullptr, HObjectAccess::ForStringHashField());
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-template <typename CollectionType>
-HValue* HOptimizedGraphBuilder::BuildAllocateOrderedHashTable() {
- static const int kCapacity = CollectionType::kMinCapacity;
- static const int kBucketCount = kCapacity / CollectionType::kLoadFactor;
- static const int kFixedArrayLength = CollectionType::kHashTableStartIndex +
- kBucketCount +
- (kCapacity * CollectionType::kEntrySize);
- static const int kSizeInBytes =
- FixedArray::kHeaderSize + (kFixedArrayLength * kPointerSize);
-
- // Allocate the table and add the proper map.
- HValue* table =
- Add<HAllocate>(Add<HConstant>(kSizeInBytes), HType::HeapObject(),
- NOT_TENURED, FIXED_ARRAY_TYPE, graph()->GetConstant0());
- AddStoreMapConstant(table, isolate()->factory()->ordered_hash_table_map());
-
- // Initialize the FixedArray...
- HValue* length = Add<HConstant>(kFixedArrayLength);
- Add<HStoreNamedField>(table, HObjectAccess::ForFixedArrayLength(), length);
-
- // ...and the OrderedHashTable fields.
- Add<HStoreNamedField>(
- table,
- HObjectAccess::ForOrderedHashTableNumberOfBuckets<CollectionType>(),
- Add<HConstant>(kBucketCount));
- Add<HStoreNamedField>(
- table,
- HObjectAccess::ForOrderedHashTableNumberOfElements<CollectionType>(),
- graph()->GetConstant0());
- Add<HStoreNamedField>(
- table, HObjectAccess::ForOrderedHashTableNumberOfDeletedElements<
- CollectionType>(),
- graph()->GetConstant0());
-
- // Fill the buckets with kNotFound.
- HValue* not_found = Add<HConstant>(CollectionType::kNotFound);
- for (int i = 0; i < kBucketCount; ++i) {
- Add<HStoreNamedField>(
- table, HObjectAccess::ForOrderedHashTableBucket<CollectionType>(i),
- not_found);
- }
-
- // Fill the data table with undefined.
- HValue* undefined = graph()->GetConstantUndefined();
- for (int i = 0; i < (kCapacity * CollectionType::kEntrySize); ++i) {
- Add<HStoreNamedField>(table,
- HObjectAccess::ForOrderedHashTableDataTableIndex<
- CollectionType, kBucketCount>(i),
- undefined);
- }
-
- return table;
-}
-
-
-void HOptimizedGraphBuilder::GenerateSetInitialize(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* receiver = Pop();
-
- NoObservableSideEffectsScope no_effects(this);
- HValue* table = BuildAllocateOrderedHashTable<OrderedHashSet>();
- Add<HStoreNamedField>(receiver, HObjectAccess::ForJSCollectionTable(), table);
- return ast_context()->ReturnValue(receiver);
-}
-
-
-void HOptimizedGraphBuilder::GenerateMapInitialize(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* receiver = Pop();
-
- NoObservableSideEffectsScope no_effects(this);
- HValue* table = BuildAllocateOrderedHashTable<OrderedHashMap>();
- Add<HStoreNamedField>(receiver, HObjectAccess::ForJSCollectionTable(), table);
- return ast_context()->ReturnValue(receiver);
-}
-
-
-template <typename CollectionType>
-void HOptimizedGraphBuilder::BuildOrderedHashTableClear(HValue* receiver) {
- HValue* old_table = Add<HLoadNamedField>(
- receiver, nullptr, HObjectAccess::ForJSCollectionTable());
- HValue* new_table = BuildAllocateOrderedHashTable<CollectionType>();
- Add<HStoreNamedField>(
- old_table, HObjectAccess::ForOrderedHashTableNextTable<CollectionType>(),
- new_table);
- Add<HStoreNamedField>(
- old_table, HObjectAccess::ForOrderedHashTableNumberOfDeletedElements<
- CollectionType>(),
- Add<HConstant>(CollectionType::kClearedTableSentinel));
- Add<HStoreNamedField>(receiver, HObjectAccess::ForJSCollectionTable(),
- new_table);
-}
-
-
-void HOptimizedGraphBuilder::GenerateSetClear(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* receiver = Pop();
-
- NoObservableSideEffectsScope no_effects(this);
- BuildOrderedHashTableClear<OrderedHashSet>(receiver);
- return ast_context()->ReturnValue(graph()->GetConstantUndefined());
-}
-
-
-void HOptimizedGraphBuilder::GenerateMapClear(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* receiver = Pop();
-
- NoObservableSideEffectsScope no_effects(this);
- BuildOrderedHashTableClear<OrderedHashMap>(receiver);
- return ast_context()->ReturnValue(graph()->GetConstantUndefined());
-}
-
-void HOptimizedGraphBuilder::GenerateDebugBreakInOptimizedCode(
- CallRuntime* call) {
- Add<HDebugBreak>();
- return ast_context()->ReturnValue(graph()->GetConstant0());
-}
-
-
-void HOptimizedGraphBuilder::GenerateDebugIsActive(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 0);
- HValue* ref =
- Add<HConstant>(ExternalReference::debug_is_active_address(isolate()));
- HValue* value =
- Add<HLoadNamedField>(ref, nullptr, HObjectAccess::ForExternalUInteger8());
- return ast_context()->ReturnValue(value);
-}
-
-#undef CHECK_BAILOUT
-#undef CHECK_ALIVE
-
-
-HEnvironment::HEnvironment(HEnvironment* outer,
- Scope* scope,
- Handle<JSFunction> closure,
- Zone* zone)
- : closure_(closure),
- values_(0, zone),
- frame_type_(JS_FUNCTION),
- parameter_count_(0),
- specials_count_(1),
- local_count_(0),
- outer_(outer),
- entry_(NULL),
- pop_count_(0),
- push_count_(0),
- ast_id_(BailoutId::None()),
- zone_(zone) {
- DeclarationScope* declaration_scope = scope->GetDeclarationScope();
- Initialize(declaration_scope->num_parameters() + 1,
- declaration_scope->num_stack_slots(), 0);
-}
-
-
-HEnvironment::HEnvironment(Zone* zone, int parameter_count)
- : values_(0, zone),
- frame_type_(STUB),
- parameter_count_(parameter_count),
- specials_count_(1),
- local_count_(0),
- outer_(NULL),
- entry_(NULL),
- pop_count_(0),
- push_count_(0),
- ast_id_(BailoutId::None()),
- zone_(zone) {
- Initialize(parameter_count, 0, 0);
-}
-
-
-HEnvironment::HEnvironment(const HEnvironment* other, Zone* zone)
- : values_(0, zone),
- frame_type_(JS_FUNCTION),
- parameter_count_(0),
- specials_count_(0),
- local_count_(0),
- outer_(NULL),
- entry_(NULL),
- pop_count_(0),
- push_count_(0),
- ast_id_(other->ast_id()),
- zone_(zone) {
- Initialize(other);
-}
-
-
-HEnvironment::HEnvironment(HEnvironment* outer,
- Handle<JSFunction> closure,
- FrameType frame_type,
- int arguments,
- Zone* zone)
- : closure_(closure),
- values_(arguments, zone),
- frame_type_(frame_type),
- parameter_count_(arguments),
- specials_count_(0),
- local_count_(0),
- outer_(outer),
- entry_(NULL),
- pop_count_(0),
- push_count_(0),
- ast_id_(BailoutId::None()),
- zone_(zone) {
-}
-
-
-void HEnvironment::Initialize(int parameter_count,
- int local_count,
- int stack_height) {
- parameter_count_ = parameter_count;
- local_count_ = local_count;
-
- // Avoid reallocating the temporaries' backing store on the first Push.
- int total = parameter_count + specials_count_ + local_count + stack_height;
- values_.Initialize(total + 4, zone());
- for (int i = 0; i < total; ++i) values_.Add(NULL, zone());
-}
-
-
-void HEnvironment::Initialize(const HEnvironment* other) {
- closure_ = other->closure();
- values_.AddAll(other->values_, zone());
- assigned_variables_.Union(other->assigned_variables_, zone());
- frame_type_ = other->frame_type_;
- parameter_count_ = other->parameter_count_;
- local_count_ = other->local_count_;
- if (other->outer_ != NULL) outer_ = other->outer_->Copy(); // Deep copy.
- entry_ = other->entry_;
- pop_count_ = other->pop_count_;
- push_count_ = other->push_count_;
- specials_count_ = other->specials_count_;
- ast_id_ = other->ast_id_;
-}
-
-
-void HEnvironment::AddIncomingEdge(HBasicBlock* block, HEnvironment* other) {
- DCHECK(!block->IsLoopHeader());
- DCHECK(values_.length() == other->values_.length());
-
- int length = values_.length();
- for (int i = 0; i < length; ++i) {
- HValue* value = values_[i];
- if (value != NULL && value->IsPhi() && value->block() == block) {
- // There is already a phi for the i'th value.
- HPhi* phi = HPhi::cast(value);
- // Assert index is correct and that we haven't missed an incoming edge.
- DCHECK(phi->merged_index() == i || !phi->HasMergedIndex());
- DCHECK(phi->OperandCount() == block->predecessors()->length());
- phi->AddInput(other->values_[i]);
- } else if (values_[i] != other->values_[i]) {
- // There is a fresh value on the incoming edge, a phi is needed.
- DCHECK(values_[i] != NULL && other->values_[i] != NULL);
- HPhi* phi = block->AddNewPhi(i);
- HValue* old_value = values_[i];
- for (int j = 0; j < block->predecessors()->length(); j++) {
- phi->AddInput(old_value);
- }
- phi->AddInput(other->values_[i]);
- this->values_[i] = phi;
- }
- }
-}
-
-
-void HEnvironment::Bind(int index, HValue* value) {
- DCHECK(value != NULL);
- assigned_variables_.Add(index, zone());
- values_[index] = value;
-}
-
-
-bool HEnvironment::HasExpressionAt(int index) const {
- return index >= parameter_count_ + specials_count_ + local_count_;
-}
-
-
-bool HEnvironment::ExpressionStackIsEmpty() const {
- DCHECK(length() >= first_expression_index());
- return length() == first_expression_index();
-}
-
-
-void HEnvironment::SetExpressionStackAt(int index_from_top, HValue* value) {
- int count = index_from_top + 1;
- int index = values_.length() - count;
- DCHECK(HasExpressionAt(index));
- // The push count must include at least the element in question or else
- // the new value will not be included in this environment's history.
- if (push_count_ < count) {
- // This is the same effect as popping then re-pushing 'count' elements.
- pop_count_ += (count - push_count_);
- push_count_ = count;
- }
- values_[index] = value;
-}
-
-
-HValue* HEnvironment::RemoveExpressionStackAt(int index_from_top) {
- int count = index_from_top + 1;
- int index = values_.length() - count;
- DCHECK(HasExpressionAt(index));
- // Simulate popping 'count' elements and then
- // pushing 'count - 1' elements back.
- pop_count_ += Max(count - push_count_, 0);
- push_count_ = Max(push_count_ - count, 0) + (count - 1);
- return values_.Remove(index);
-}
-
-
-void HEnvironment::Drop(int count) {
- for (int i = 0; i < count; ++i) {
- Pop();
- }
-}
-
-
-void HEnvironment::Print() const {
- OFStream os(stdout);
- os << *this << "\n";
-}
-
-
-HEnvironment* HEnvironment::Copy() const {
- return new(zone()) HEnvironment(this, zone());
-}
-
-
-HEnvironment* HEnvironment::CopyWithoutHistory() const {
- HEnvironment* result = Copy();
- result->ClearHistory();
- return result;
-}
-
-
-HEnvironment* HEnvironment::CopyAsLoopHeader(HBasicBlock* loop_header) const {
- HEnvironment* new_env = Copy();
- for (int i = 0; i < values_.length(); ++i) {
- HPhi* phi = loop_header->AddNewPhi(i);
- phi->AddInput(values_[i]);
- new_env->values_[i] = phi;
- }
- new_env->ClearHistory();
- return new_env;
-}
-
-
-HEnvironment* HEnvironment::CreateStubEnvironment(HEnvironment* outer,
- Handle<JSFunction> target,
- FrameType frame_type,
- int arguments) const {
- HEnvironment* new_env =
- new(zone()) HEnvironment(outer, target, frame_type,
- arguments + 1, zone());
- for (int i = 0; i <= arguments; ++i) { // Include receiver.
- new_env->Push(ExpressionStackAt(arguments - i));
- }
- new_env->ClearHistory();
- return new_env;
-}
-
-void HEnvironment::MarkAsTailCaller() {
- DCHECK_EQ(JS_FUNCTION, frame_type());
- frame_type_ = TAIL_CALLER_FUNCTION;
-}
-
-void HEnvironment::ClearTailCallerMark() {
- DCHECK_EQ(TAIL_CALLER_FUNCTION, frame_type());
- frame_type_ = JS_FUNCTION;
-}
-
-HEnvironment* HEnvironment::CopyForInlining(
- Handle<JSFunction> target, int arguments, FunctionLiteral* function,
- HConstant* undefined, InliningKind inlining_kind,
- TailCallMode syntactic_tail_call_mode) const {
- DCHECK_EQ(JS_FUNCTION, frame_type());
-
- // Outer environment is a copy of this one without the arguments.
- int arity = function->scope()->num_parameters();
-
- HEnvironment* outer = Copy();
- outer->Drop(arguments + 1); // Including receiver.
- outer->ClearHistory();
-
- if (syntactic_tail_call_mode == TailCallMode::kAllow) {
- DCHECK_EQ(NORMAL_RETURN, inlining_kind);
- outer->MarkAsTailCaller();
- }
-
- if (inlining_kind == CONSTRUCT_CALL_RETURN) {
- // Create artificial constructor stub environment. The receiver should
- // actually be the constructor function, but we pass the newly allocated
- // object instead, DoComputeConstructStubFrame() relies on that.
- outer = CreateStubEnvironment(outer, target, JS_CONSTRUCT, arguments);
- } else if (inlining_kind == GETTER_CALL_RETURN) {
- // We need an additional StackFrame::INTERNAL frame for restoring the
- // correct context.
- outer = CreateStubEnvironment(outer, target, JS_GETTER, arguments);
- } else if (inlining_kind == SETTER_CALL_RETURN) {
- // We need an additional StackFrame::INTERNAL frame for temporarily saving
- // the argument of the setter, see StoreStubCompiler::CompileStoreViaSetter.
- outer = CreateStubEnvironment(outer, target, JS_SETTER, arguments);
- }
-
- if (arity != arguments) {
- // Create artificial arguments adaptation environment.
- outer = CreateStubEnvironment(outer, target, ARGUMENTS_ADAPTOR, arguments);
- }
-
- HEnvironment* inner =
- new(zone()) HEnvironment(outer, function->scope(), target, zone());
- // Get the argument values from the original environment.
- for (int i = 0; i <= arity; ++i) { // Include receiver.
- HValue* push = (i <= arguments) ?
- ExpressionStackAt(arguments - i) : undefined;
- inner->SetValueAt(i, push);
- }
- inner->SetValueAt(arity + 1, context());
- for (int i = arity + 2; i < inner->length(); ++i) {
- inner->SetValueAt(i, undefined);
- }
-
- inner->set_ast_id(BailoutId::FunctionEntry());
- return inner;
-}
-
-
-std::ostream& operator<<(std::ostream& os, const HEnvironment& env) {
- for (int i = 0; i < env.length(); i++) {
- if (i == 0) os << "parameters\n";
- if (i == env.parameter_count()) os << "specials\n";
- if (i == env.parameter_count() + env.specials_count()) os << "locals\n";
- if (i == env.parameter_count() + env.specials_count() + env.local_count()) {
- os << "expressions\n";
- }
- HValue* val = env.values()->at(i);
- os << i << ": ";
- if (val != NULL) {
- os << val;
- } else {
- os << "NULL";
- }
- os << "\n";
- }
- return os << "\n";
-}
-
-
-void HTracer::TraceCompilation(CompilationInfo* info) {
- Tag tag(this, "compilation");
- std::string name;
- if (info->parse_info()) {
- Object* source_name = info->script()->name();
- if (source_name->IsString()) {
- String* str = String::cast(source_name);
- if (str->length() > 0) {
- name.append(str->ToCString().get());
- name.append(":");
- }
- }
- }
- std::unique_ptr<char[]> method_name = info->GetDebugName();
- name.append(method_name.get());
- if (info->IsOptimizing()) {
- PrintStringProperty("name", name.c_str());
- PrintIndent();
- trace_.Add("method \"%s:%d\"\n", method_name.get(),
- info->optimization_id());
- } else {
- PrintStringProperty("name", name.c_str());
- PrintStringProperty("method", "stub");
- }
- PrintLongProperty("date",
- static_cast<int64_t>(base::OS::TimeCurrentMillis()));
-}
-
-
-void HTracer::TraceLithium(const char* name, LChunk* chunk) {
- DCHECK(!chunk->isolate()->concurrent_recompilation_enabled());
- AllowHandleDereference allow_deref;
- AllowDeferredHandleDereference allow_deferred_deref;
- Trace(name, chunk->graph(), chunk);
-}
-
-
-void HTracer::TraceHydrogen(const char* name, HGraph* graph) {
- DCHECK(!graph->isolate()->concurrent_recompilation_enabled());
- AllowHandleDereference allow_deref;
- AllowDeferredHandleDereference allow_deferred_deref;
- Trace(name, graph, NULL);
-}
-
-
-void HTracer::Trace(const char* name, HGraph* graph, LChunk* chunk) {
- Tag tag(this, "cfg");
- PrintStringProperty("name", name);
- const ZoneList<HBasicBlock*>* blocks = graph->blocks();
- for (int i = 0; i < blocks->length(); i++) {
- HBasicBlock* current = blocks->at(i);
- Tag block_tag(this, "block");
- PrintBlockProperty("name", current->block_id());
- PrintIntProperty("from_bci", -1);
- PrintIntProperty("to_bci", -1);
-
- if (!current->predecessors()->is_empty()) {
- PrintIndent();
- trace_.Add("predecessors");
- for (int j = 0; j < current->predecessors()->length(); ++j) {
- trace_.Add(" \"B%d\"", current->predecessors()->at(j)->block_id());
- }
- trace_.Add("\n");
- } else {
- PrintEmptyProperty("predecessors");
- }
-
- if (current->end()->SuccessorCount() == 0) {
- PrintEmptyProperty("successors");
- } else {
- PrintIndent();
- trace_.Add("successors");
- for (HSuccessorIterator it(current->end()); !it.Done(); it.Advance()) {
- trace_.Add(" \"B%d\"", it.Current()->block_id());
- }
- trace_.Add("\n");
- }
-
- PrintEmptyProperty("xhandlers");
-
- {
- PrintIndent();
- trace_.Add("flags");
- if (current->IsLoopSuccessorDominator()) {
- trace_.Add(" \"dom-loop-succ\"");
- }
- if (current->IsUnreachable()) {
- trace_.Add(" \"dead\"");
- }
- if (current->is_osr_entry()) {
- trace_.Add(" \"osr\"");
- }
- trace_.Add("\n");
- }
-
- if (current->dominator() != NULL) {
- PrintBlockProperty("dominator", current->dominator()->block_id());
- }
-
- PrintIntProperty("loop_depth", current->LoopNestingDepth());
-
- if (chunk != NULL) {
- int first_index = current->first_instruction_index();
- int last_index = current->last_instruction_index();
- PrintIntProperty(
- "first_lir_id",
- LifetimePosition::FromInstructionIndex(first_index).Value());
- PrintIntProperty(
- "last_lir_id",
- LifetimePosition::FromInstructionIndex(last_index).Value());
- }
-
- {
- Tag states_tag(this, "states");
- Tag locals_tag(this, "locals");
- int total = current->phis()->length();
- PrintIntProperty("size", current->phis()->length());
- PrintStringProperty("method", "None");
- for (int j = 0; j < total; ++j) {
- HPhi* phi = current->phis()->at(j);
- PrintIndent();
- std::ostringstream os;
- os << phi->merged_index() << " " << NameOf(phi) << " " << *phi << "\n";
- trace_.Add(os.str().c_str());
- }
- }
-
- {
- Tag HIR_tag(this, "HIR");
- for (HInstructionIterator it(current); !it.Done(); it.Advance()) {
- HInstruction* instruction = it.Current();
- int uses = instruction->UseCount();
- PrintIndent();
- std::ostringstream os;
- os << "0 " << uses << " " << NameOf(instruction) << " " << *instruction;
- if (instruction->has_position()) {
- const SourcePosition pos = instruction->position();
- os << " pos:";
- if (pos.isInlined()) os << "inlining(" << pos.InliningId() << "),";
- os << pos.ScriptOffset();
- }
- os << " <|@\n";
- trace_.Add(os.str().c_str());
- }
- }
-
-
- if (chunk != NULL) {
- Tag LIR_tag(this, "LIR");
- int first_index = current->first_instruction_index();
- int last_index = current->last_instruction_index();
- if (first_index != -1 && last_index != -1) {
- const ZoneList<LInstruction*>* instructions = chunk->instructions();
- for (int i = first_index; i <= last_index; ++i) {
- LInstruction* linstr = instructions->at(i);
- if (linstr != NULL) {
- PrintIndent();
- trace_.Add("%d ",
- LifetimePosition::FromInstructionIndex(i).Value());
- linstr->PrintTo(&trace_);
- std::ostringstream os;
- os << " [hir:" << NameOf(linstr->hydrogen_value()) << "] <|@\n";
- trace_.Add(os.str().c_str());
- }
- }
- }
- }
- }
-}
-
-
-void HTracer::TraceLiveRanges(const char* name, LAllocator* allocator) {
- Tag tag(this, "intervals");
- PrintStringProperty("name", name);
-
- const Vector<LiveRange*>* fixed_d = allocator->fixed_double_live_ranges();
- for (int i = 0; i < fixed_d->length(); ++i) {
- TraceLiveRange(fixed_d->at(i), "fixed", allocator->zone());
- }
-
- const Vector<LiveRange*>* fixed = allocator->fixed_live_ranges();
- for (int i = 0; i < fixed->length(); ++i) {
- TraceLiveRange(fixed->at(i), "fixed", allocator->zone());
- }
-
- const ZoneList<LiveRange*>* live_ranges = allocator->live_ranges();
- for (int i = 0; i < live_ranges->length(); ++i) {
- TraceLiveRange(live_ranges->at(i), "object", allocator->zone());
- }
-}
-
-
-void HTracer::TraceLiveRange(LiveRange* range, const char* type,
- Zone* zone) {
- if (range != NULL && !range->IsEmpty()) {
- PrintIndent();
- trace_.Add("%d %s", range->id(), type);
- if (range->HasRegisterAssigned()) {
- LOperand* op = range->CreateAssignedOperand(zone);
- int assigned_reg = op->index();
- if (op->IsDoubleRegister()) {
- trace_.Add(" \"%s\"",
- GetRegConfig()->GetDoubleRegisterName(assigned_reg));
- } else {
- DCHECK(op->IsRegister());
- trace_.Add(" \"%s\"",
- GetRegConfig()->GetGeneralRegisterName(assigned_reg));
- }
- } else if (range->IsSpilled()) {
- LOperand* op = range->TopLevel()->GetSpillOperand();
- if (op->IsDoubleStackSlot()) {
- trace_.Add(" \"double_stack:%d\"", op->index());
- } else {
- DCHECK(op->IsStackSlot());
- trace_.Add(" \"stack:%d\"", op->index());
- }
- }
- int parent_index = -1;
- if (range->IsChild()) {
- parent_index = range->parent()->id();
- } else {
- parent_index = range->id();
- }
- LOperand* op = range->FirstHint();
- int hint_index = -1;
- if (op != NULL && op->IsUnallocated()) {
- hint_index = LUnallocated::cast(op)->virtual_register();
- }
- trace_.Add(" %d %d", parent_index, hint_index);
- UseInterval* cur_interval = range->first_interval();
- while (cur_interval != NULL && range->Covers(cur_interval->start())) {
- trace_.Add(" [%d, %d[",
- cur_interval->start().Value(),
- cur_interval->end().Value());
- cur_interval = cur_interval->next();
- }
-
- UsePosition* current_pos = range->first_pos();
- while (current_pos != NULL) {
- if (current_pos->RegisterIsBeneficial() || FLAG_trace_all_uses) {
- trace_.Add(" %d M", current_pos->pos().Value());
- }
- current_pos = current_pos->next();
- }
-
- trace_.Add(" \"\"\n");
- }
-}
-
-
-void HTracer::FlushToFile() {
- AppendChars(filename_.start(), trace_.ToCString().get(), trace_.length(),
- false);
- trace_.Reset();
-}
-
-
-void HStatistics::Initialize(CompilationInfo* info) {
- if (!info->has_shared_info()) return;
- source_size_ += info->shared_info()->SourceSize();
-}
-
-
-void HStatistics::Print() {
- PrintF(
- "\n"
- "----------------------------------------"
- "----------------------------------------\n"
- "--- Hydrogen timing results:\n"
- "----------------------------------------"
- "----------------------------------------\n");
- base::TimeDelta sum;
- for (int i = 0; i < times_.length(); ++i) {
- sum += times_[i];
- }
-
- for (int i = 0; i < names_.length(); ++i) {
- PrintF("%33s", names_[i]);
- double ms = times_[i].InMillisecondsF();
- double percent = times_[i].PercentOf(sum);
- PrintF(" %8.3f ms / %4.1f %% ", ms, percent);
-
- size_t size = sizes_[i];
- double size_percent = static_cast<double>(size) * 100 / total_size_;
- PrintF(" %9zu bytes / %4.1f %%\n", size, size_percent);
- }
-
- PrintF(
- "----------------------------------------"
- "----------------------------------------\n");
- base::TimeDelta total = create_graph_ + optimize_graph_ + generate_code_;
- PrintF("%33s %8.3f ms / %4.1f %% \n", "Create graph",
- create_graph_.InMillisecondsF(), create_graph_.PercentOf(total));
- PrintF("%33s %8.3f ms / %4.1f %% \n", "Optimize graph",
- optimize_graph_.InMillisecondsF(), optimize_graph_.PercentOf(total));
- PrintF("%33s %8.3f ms / %4.1f %% \n", "Generate and install code",
- generate_code_.InMillisecondsF(), generate_code_.PercentOf(total));
- PrintF(
- "----------------------------------------"
- "----------------------------------------\n");
- PrintF("%33s %8.3f ms %9zu bytes\n", "Total",
- total.InMillisecondsF(), total_size_);
- PrintF("%33s (%.1f times slower than full code gen)\n", "",
- total.TimesOf(full_code_gen_));
-
- double source_size_in_kb = static_cast<double>(source_size_) / 1024;
- double normalized_time = source_size_in_kb > 0
- ? total.InMillisecondsF() / source_size_in_kb
- : 0;
- double normalized_size_in_kb =
- source_size_in_kb > 0
- ? static_cast<double>(total_size_) / 1024 / source_size_in_kb
- : 0;
- PrintF("%33s %8.3f ms %7.3f kB allocated\n",
- "Average per kB source", normalized_time, normalized_size_in_kb);
-}
-
-
-void HStatistics::SaveTiming(const char* name, base::TimeDelta time,
- size_t size) {
- total_size_ += size;
- for (int i = 0; i < names_.length(); ++i) {
- if (strcmp(names_[i], name) == 0) {
- times_[i] += time;
- sizes_[i] += size;
- return;
- }
- }
- names_.Add(name);
- times_.Add(time);
- sizes_.Add(size);
-}
-
-
-HPhase::~HPhase() {
- if (ShouldProduceTraceOutput()) {
- isolate()->GetHTracer()->TraceHydrogen(name(), graph_);
- }
-
-#ifdef DEBUG
- graph_->Verify(false); // No full verify.
-#endif
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/hydrogen.h b/deps/v8/src/crankshaft/hydrogen.h
deleted file mode 100644
index 656bbf0e8e..0000000000
--- a/deps/v8/src/crankshaft/hydrogen.h
+++ /dev/null
@@ -1,2996 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_HYDROGEN_H_
-#define V8_CRANKSHAFT_HYDROGEN_H_
-
-#include "src/accessors.h"
-#include "src/allocation.h"
-#include "src/ast/ast-type-bounds.h"
-#include "src/ast/scopes.h"
-#include "src/bailout-reason.h"
-#include "src/compilation-info.h"
-#include "src/compiler.h"
-#include "src/counters.h"
-#include "src/crankshaft/compilation-phase.h"
-#include "src/crankshaft/hydrogen-instructions.h"
-#include "src/globals.h"
-#include "src/parsing/parse-info.h"
-#include "src/string-stream.h"
-#include "src/transitions.h"
-#include "src/zone/zone.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class BitVector;
-class FunctionState;
-class HEnvironment;
-class HGraph;
-class HLoopInformation;
-class HOsrBuilder;
-class HTracer;
-class LAllocator;
-class LChunk;
-class LiveRange;
-
-class HCompilationJob final : public CompilationJob {
- public:
- explicit HCompilationJob(Handle<JSFunction> function)
- : CompilationJob(function->GetIsolate(), &info_, "Crankshaft"),
- parse_info_(handle(function->shared())),
- info_(parse_info_.zone(), &parse_info_, function->GetIsolate(),
- function),
- graph_(nullptr),
- chunk_(nullptr) {}
-
- protected:
- virtual Status PrepareJobImpl();
- virtual Status ExecuteJobImpl();
- virtual Status FinalizeJobImpl();
-
- private:
- ParseInfo parse_info_;
- CompilationInfo info_;
- HGraph* graph_;
- LChunk* chunk_;
-};
-
-class HBasicBlock final : public ZoneObject {
- public:
- explicit HBasicBlock(HGraph* graph);
- ~HBasicBlock() { }
-
- // Simple accessors.
- int block_id() const { return block_id_; }
- void set_block_id(int id) { block_id_ = id; }
- HGraph* graph() const { return graph_; }
- Isolate* isolate() const;
- const ZoneList<HPhi*>* phis() const { return &phis_; }
- HInstruction* first() const { return first_; }
- HInstruction* last() const { return last_; }
- void set_last(HInstruction* instr) { last_ = instr; }
- HControlInstruction* end() const { return end_; }
- HLoopInformation* loop_information() const { return loop_information_; }
- HLoopInformation* current_loop() const {
- return IsLoopHeader() ? loop_information()
- : (parent_loop_header() != NULL
- ? parent_loop_header()->loop_information() : NULL);
- }
- const ZoneList<HBasicBlock*>* predecessors() const { return &predecessors_; }
- bool HasPredecessor() const { return predecessors_.length() > 0; }
- const ZoneList<HBasicBlock*>* dominated_blocks() const {
- return &dominated_blocks_;
- }
- const ZoneList<int>* deleted_phis() const {
- return &deleted_phis_;
- }
- void RecordDeletedPhi(int merge_index) {
- deleted_phis_.Add(merge_index, zone());
- }
- HBasicBlock* dominator() const { return dominator_; }
- HEnvironment* last_environment() const { return last_environment_; }
- int argument_count() const { return argument_count_; }
- void set_argument_count(int count) { argument_count_ = count; }
- int first_instruction_index() const { return first_instruction_index_; }
- void set_first_instruction_index(int index) {
- first_instruction_index_ = index;
- }
- int last_instruction_index() const { return last_instruction_index_; }
- void set_last_instruction_index(int index) {
- last_instruction_index_ = index;
- }
- bool is_osr_entry() { return is_osr_entry_; }
- void set_osr_entry() { is_osr_entry_ = true; }
-
- void AttachLoopInformation();
- void DetachLoopInformation();
- bool IsLoopHeader() const { return loop_information() != NULL; }
- bool IsStartBlock() const { return block_id() == 0; }
- void PostProcessLoopHeader(IterationStatement* stmt);
-
- bool IsFinished() const { return end_ != NULL; }
- void AddPhi(HPhi* phi);
- void RemovePhi(HPhi* phi);
- void AddInstruction(HInstruction* instr, SourcePosition position);
- bool Dominates(HBasicBlock* other) const;
- bool EqualToOrDominates(HBasicBlock* other) const;
- int LoopNestingDepth() const;
-
- void SetInitialEnvironment(HEnvironment* env);
- void ClearEnvironment() {
- DCHECK(IsFinished());
- DCHECK(end()->SuccessorCount() == 0);
- last_environment_ = NULL;
- }
- bool HasEnvironment() const { return last_environment_ != NULL; }
- void UpdateEnvironment(HEnvironment* env);
- HBasicBlock* parent_loop_header() const { return parent_loop_header_; }
-
- void set_parent_loop_header(HBasicBlock* block) {
- DCHECK(parent_loop_header_ == NULL);
- parent_loop_header_ = block;
- }
-
- bool HasParentLoopHeader() const { return parent_loop_header_ != NULL; }
-
- void SetJoinId(BailoutId ast_id);
-
- int PredecessorIndexOf(HBasicBlock* predecessor) const;
- HPhi* AddNewPhi(int merged_index);
- HSimulate* AddNewSimulate(BailoutId ast_id, SourcePosition position,
- RemovableSimulate removable = FIXED_SIMULATE) {
- HSimulate* instr = CreateSimulate(ast_id, removable);
- AddInstruction(instr, position);
- return instr;
- }
- void AssignCommonDominator(HBasicBlock* other);
- void AssignLoopSuccessorDominators();
-
- // If a target block is tagged as an inline function return, all
- // predecessors should contain the inlined exit sequence:
- //
- // LeaveInlined
- // Simulate (caller's environment)
- // Goto (target block)
- bool IsInlineReturnTarget() const { return is_inline_return_target_; }
- void MarkAsInlineReturnTarget(HBasicBlock* inlined_entry_block) {
- is_inline_return_target_ = true;
- inlined_entry_block_ = inlined_entry_block;
- }
- HBasicBlock* inlined_entry_block() { return inlined_entry_block_; }
-
- bool IsDeoptimizing() const {
- return end() != NULL && end()->IsDeoptimize();
- }
-
- void MarkUnreachable();
- bool IsUnreachable() const { return !is_reachable_; }
- bool IsReachable() const { return is_reachable_; }
-
- bool IsLoopSuccessorDominator() const {
- return dominates_loop_successors_;
- }
- void MarkAsLoopSuccessorDominator() {
- dominates_loop_successors_ = true;
- }
-
- bool IsOrdered() const { return is_ordered_; }
- void MarkAsOrdered() { is_ordered_ = true; }
-
- void MarkSuccEdgeUnreachable(int succ);
-
- inline Zone* zone() const;
-
-#ifdef DEBUG
- void Verify();
-#endif
-
- protected:
- friend class HGraphBuilder;
-
- HSimulate* CreateSimulate(BailoutId ast_id, RemovableSimulate removable);
- void Finish(HControlInstruction* last, SourcePosition position);
- void FinishExit(HControlInstruction* instruction, SourcePosition position);
- void Goto(HBasicBlock* block, SourcePosition position,
- FunctionState* state = NULL, bool add_simulate = true);
- void GotoNoSimulate(HBasicBlock* block, SourcePosition position) {
- Goto(block, position, NULL, false);
- }
-
- // Add the inlined function exit sequence, adding an HLeaveInlined
- // instruction and updating the bailout environment.
- void AddLeaveInlined(HValue* return_value, FunctionState* state,
- SourcePosition position);
-
- private:
- void RegisterPredecessor(HBasicBlock* pred);
- void AddDominatedBlock(HBasicBlock* block);
-
- int block_id_;
- HGraph* graph_;
- ZoneList<HPhi*> phis_;
- HInstruction* first_;
- HInstruction* last_;
- HControlInstruction* end_;
- HLoopInformation* loop_information_;
- ZoneList<HBasicBlock*> predecessors_;
- HBasicBlock* dominator_;
- ZoneList<HBasicBlock*> dominated_blocks_;
- HEnvironment* last_environment_;
- // Outgoing parameter count at block exit, set during lithium translation.
- int argument_count_;
- // Instruction indices into the lithium code stream.
- int first_instruction_index_;
- int last_instruction_index_;
- ZoneList<int> deleted_phis_;
- HBasicBlock* parent_loop_header_;
- // For blocks marked as inline return target: the block with HEnterInlined.
- HBasicBlock* inlined_entry_block_;
- bool is_inline_return_target_ : 1;
- bool is_reachable_ : 1;
- bool dominates_loop_successors_ : 1;
- bool is_osr_entry_ : 1;
- bool is_ordered_ : 1;
-};
-
-
-std::ostream& operator<<(std::ostream& os, const HBasicBlock& b);
-
-
-class HPredecessorIterator final BASE_EMBEDDED {
- public:
- explicit HPredecessorIterator(HBasicBlock* block)
- : predecessor_list_(block->predecessors()), current_(0) { }
-
- bool Done() { return current_ >= predecessor_list_->length(); }
- HBasicBlock* Current() { return predecessor_list_->at(current_); }
- void Advance() { current_++; }
-
- private:
- const ZoneList<HBasicBlock*>* predecessor_list_;
- int current_;
-};
-
-
-class HInstructionIterator final BASE_EMBEDDED {
- public:
- explicit HInstructionIterator(HBasicBlock* block)
- : instr_(block->first()) {
- next_ = Done() ? NULL : instr_->next();
- }
-
- inline bool Done() const { return instr_ == NULL; }
- inline HInstruction* Current() { return instr_; }
- inline void Advance() {
- instr_ = next_;
- next_ = Done() ? NULL : instr_->next();
- }
-
- private:
- HInstruction* instr_;
- HInstruction* next_;
-};
-
-
-class HLoopInformation final : public ZoneObject {
- public:
- HLoopInformation(HBasicBlock* loop_header, Zone* zone)
- : back_edges_(4, zone),
- loop_header_(loop_header),
- blocks_(8, zone),
- stack_check_(NULL) {
- blocks_.Add(loop_header, zone);
- }
- ~HLoopInformation() {}
-
- const ZoneList<HBasicBlock*>* back_edges() const { return &back_edges_; }
- const ZoneList<HBasicBlock*>* blocks() const { return &blocks_; }
- HBasicBlock* loop_header() const { return loop_header_; }
- HBasicBlock* GetLastBackEdge() const;
- void RegisterBackEdge(HBasicBlock* block);
-
- HStackCheck* stack_check() const { return stack_check_; }
- void set_stack_check(HStackCheck* stack_check) {
- stack_check_ = stack_check;
- }
-
- bool IsNestedInThisLoop(HLoopInformation* other) {
- while (other != NULL) {
- if (other == this) {
- return true;
- }
- other = other->parent_loop();
- }
- return false;
- }
- HLoopInformation* parent_loop() {
- HBasicBlock* parent_header = loop_header()->parent_loop_header();
- return parent_header != NULL ? parent_header->loop_information() : NULL;
- }
-
- private:
- void AddBlock(HBasicBlock* block);
-
- ZoneList<HBasicBlock*> back_edges_;
- HBasicBlock* loop_header_;
- ZoneList<HBasicBlock*> blocks_;
- HStackCheck* stack_check_;
-};
-
-class HGraph final : public ZoneObject {
- public:
- explicit HGraph(CompilationInfo* info, CallInterfaceDescriptor descriptor);
-
- Isolate* isolate() const { return isolate_; }
- Zone* zone() const { return zone_; }
- CompilationInfo* info() const { return info_; }
- CallInterfaceDescriptor descriptor() const { return descriptor_; }
-
- const ZoneList<HBasicBlock*>* blocks() const { return &blocks_; }
- const ZoneList<HPhi*>* phi_list() const { return phi_list_; }
- HBasicBlock* entry_block() const { return entry_block_; }
- HEnvironment* start_environment() const { return start_environment_; }
-
- void FinalizeUniqueness();
- void OrderBlocks();
- void AssignDominators();
- void RestoreActualValues();
-
- // Returns false if there are phi-uses of the arguments-object
- // which are not supported by the optimizing compiler.
- bool CheckArgumentsPhiUses();
-
- // Returns false if there are phi-uses of an uninitialized const
- // which are not supported by the optimizing compiler.
- bool CheckConstPhiUses();
-
- void CollectPhis();
-
- HConstant* GetConstantUndefined();
- HConstant* GetConstant0();
- HConstant* GetConstant1();
- HConstant* GetConstantMinus1();
- HConstant* GetConstantTrue();
- HConstant* GetConstantFalse();
- HConstant* GetConstantBool(bool value);
- HConstant* GetConstantHole();
- HConstant* GetConstantNull();
- HConstant* GetConstantOptimizedOut();
- HConstant* GetInvalidContext();
-
- bool IsConstantUndefined(HConstant* constant);
- bool IsConstant0(HConstant* constant);
- bool IsConstant1(HConstant* constant);
- bool IsConstantMinus1(HConstant* constant);
- bool IsConstantTrue(HConstant* constant);
- bool IsConstantFalse(HConstant* constant);
- bool IsConstantHole(HConstant* constant);
- bool IsConstantNull(HConstant* constant);
- bool IsStandardConstant(HConstant* constant);
-
- HBasicBlock* CreateBasicBlock();
-
- int GetMaximumValueID() const { return values_.length(); }
- int GetNextBlockID() { return next_block_id_++; }
- int GetNextValueID(HValue* value) {
- DCHECK(!disallow_adding_new_values_);
- values_.Add(value, zone());
- return values_.length() - 1;
- }
- HValue* LookupValue(int id) const {
- if (id >= 0 && id < values_.length()) return values_[id];
- return NULL;
- }
- void DisallowAddingNewValues() {
- disallow_adding_new_values_ = true;
- }
-
- bool Optimize(BailoutReason* bailout_reason);
-
-#ifdef DEBUG
- void Verify(bool do_full_verify) const;
-#endif
-
- bool has_osr() {
- return osr_ != NULL;
- }
-
- void set_osr(HOsrBuilder* osr) {
- osr_ = osr;
- }
-
- HOsrBuilder* osr() {
- return osr_;
- }
-
- int update_type_change_checksum(int delta) {
- type_change_checksum_ += delta;
- return type_change_checksum_;
- }
-
- void update_maximum_environment_size(int environment_size) {
- if (environment_size > maximum_environment_size_) {
- maximum_environment_size_ = environment_size;
- }
- }
- int maximum_environment_size() { return maximum_environment_size_; }
-
- bool allow_code_motion() const { return allow_code_motion_; }
- void set_allow_code_motion(bool value) { allow_code_motion_ = value; }
-
- bool use_optimistic_licm() const { return use_optimistic_licm_; }
- void set_use_optimistic_licm(bool value) { use_optimistic_licm_ = value; }
-
- void MarkDependsOnEmptyArrayProtoElements() {
- // Add map dependency if not already added.
- if (depends_on_empty_array_proto_elements_) return;
- info()->dependencies()->AssumePropertyCell(
- isolate()->factory()->array_protector());
- depends_on_empty_array_proto_elements_ = true;
- }
-
- bool depends_on_empty_array_proto_elements() {
- return depends_on_empty_array_proto_elements_;
- }
-
- void MarkDependsOnStringLengthOverflow() {
- if (depends_on_string_length_overflow_) return;
- info()->dependencies()->AssumePropertyCell(
- isolate()->factory()->string_length_protector());
- depends_on_string_length_overflow_ = true;
- }
-
- bool has_uint32_instructions() {
- DCHECK(uint32_instructions_ == NULL || !uint32_instructions_->is_empty());
- return uint32_instructions_ != NULL;
- }
-
- ZoneList<HInstruction*>* uint32_instructions() {
- DCHECK(uint32_instructions_ == NULL || !uint32_instructions_->is_empty());
- return uint32_instructions_;
- }
-
- void RecordUint32Instruction(HInstruction* instr) {
- DCHECK(uint32_instructions_ == NULL || !uint32_instructions_->is_empty());
- if (uint32_instructions_ == NULL) {
- uint32_instructions_ = new(zone()) ZoneList<HInstruction*>(4, zone());
- }
- uint32_instructions_->Add(instr, zone());
- }
-
- void IncrementInNoSideEffectsScope() { no_side_effects_scope_count_++; }
- void DecrementInNoSideEffectsScope() { no_side_effects_scope_count_--; }
- bool IsInsideNoSideEffectsScope() { return no_side_effects_scope_count_ > 0; }
-
- private:
- HConstant* ReinsertConstantIfNecessary(HConstant* constant);
- HConstant* GetConstant(SetOncePointer<HConstant>* pointer,
- int32_t integer_value);
-
- template<class Phase>
- void Run() {
- Phase phase(this);
- phase.Run();
- }
-
- Isolate* isolate_;
- int next_block_id_;
- HBasicBlock* entry_block_;
- HEnvironment* start_environment_;
- ZoneList<HBasicBlock*> blocks_;
- ZoneList<HValue*> values_;
- ZoneList<HPhi*>* phi_list_;
- ZoneList<HInstruction*>* uint32_instructions_;
- SetOncePointer<HConstant> constant_undefined_;
- SetOncePointer<HConstant> constant_0_;
- SetOncePointer<HConstant> constant_1_;
- SetOncePointer<HConstant> constant_minus1_;
- SetOncePointer<HConstant> constant_true_;
- SetOncePointer<HConstant> constant_false_;
- SetOncePointer<HConstant> constant_the_hole_;
- SetOncePointer<HConstant> constant_null_;
- SetOncePointer<HConstant> constant_optimized_out_;
- SetOncePointer<HConstant> constant_invalid_context_;
-
- HOsrBuilder* osr_;
-
- CompilationInfo* info_;
- CallInterfaceDescriptor descriptor_;
- Zone* zone_;
-
- bool allow_code_motion_;
- bool use_optimistic_licm_;
- bool depends_on_empty_array_proto_elements_;
- bool depends_on_string_length_overflow_;
- int type_change_checksum_;
- int maximum_environment_size_;
- int no_side_effects_scope_count_;
- bool disallow_adding_new_values_;
-
- DISALLOW_COPY_AND_ASSIGN(HGraph);
-};
-
-
-Zone* HBasicBlock::zone() const { return graph_->zone(); }
-
-
-// Type of stack frame an environment might refer to.
-enum FrameType {
- JS_FUNCTION,
- JS_CONSTRUCT,
- JS_GETTER,
- JS_SETTER,
- ARGUMENTS_ADAPTOR,
- TAIL_CALLER_FUNCTION,
- STUB
-};
-
-class HEnvironment final : public ZoneObject {
- public:
- HEnvironment(HEnvironment* outer,
- Scope* scope,
- Handle<JSFunction> closure,
- Zone* zone);
-
- HEnvironment(Zone* zone, int parameter_count);
-
- HEnvironment* arguments_environment() {
- return outer()->frame_type() == ARGUMENTS_ADAPTOR ? outer() : this;
- }
-
- // Simple accessors.
- Handle<JSFunction> closure() const { return closure_; }
- const ZoneList<HValue*>* values() const { return &values_; }
- const GrowableBitVector* assigned_variables() const {
- return &assigned_variables_;
- }
- FrameType frame_type() const { return frame_type_; }
- int parameter_count() const { return parameter_count_; }
- int specials_count() const { return specials_count_; }
- int local_count() const { return local_count_; }
- HEnvironment* outer() const { return outer_; }
- int pop_count() const { return pop_count_; }
- int push_count() const { return push_count_; }
-
- BailoutId ast_id() const { return ast_id_; }
- void set_ast_id(BailoutId id) { ast_id_ = id; }
-
- HEnterInlined* entry() const { return entry_; }
- void set_entry(HEnterInlined* entry) { entry_ = entry; }
-
- int length() const { return values_.length(); }
-
- int first_expression_index() const {
- return parameter_count() + specials_count() + local_count();
- }
-
- int first_local_index() const {
- return parameter_count() + specials_count();
- }
-
- void Bind(Variable* variable, HValue* value) {
- Bind(IndexFor(variable), value);
- }
-
- void Bind(int index, HValue* value);
-
- void BindContext(HValue* value) {
- Bind(parameter_count(), value);
- }
-
- HValue* Lookup(Variable* variable) const {
- return Lookup(IndexFor(variable));
- }
-
- HValue* Lookup(int index) const {
- HValue* result = values_[index];
- DCHECK(result != NULL);
- return result;
- }
-
- HValue* context() const {
- // Return first special.
- return Lookup(parameter_count());
- }
-
- void Push(HValue* value) {
- DCHECK(value != NULL);
- ++push_count_;
- values_.Add(value, zone());
- }
-
- HValue* Pop() {
- DCHECK(!ExpressionStackIsEmpty());
- if (push_count_ > 0) {
- --push_count_;
- } else {
- ++pop_count_;
- }
- return values_.RemoveLast();
- }
-
- void Drop(int count);
-
- HValue* Top() const { return ExpressionStackAt(0); }
-
- bool ExpressionStackIsEmpty() const;
-
- HValue* ExpressionStackAt(int index_from_top) const {
- int index = length() - index_from_top - 1;
- DCHECK(HasExpressionAt(index));
- return values_[index];
- }
-
- void SetExpressionStackAt(int index_from_top, HValue* value);
- HValue* RemoveExpressionStackAt(int index_from_top);
-
- void Print() const;
-
- HEnvironment* Copy() const;
- HEnvironment* CopyWithoutHistory() const;
- HEnvironment* CopyAsLoopHeader(HBasicBlock* block) const;
-
- // Create an "inlined version" of this environment, where the original
- // environment is the outer environment but the top expression stack
- // elements are moved to an inner environment as parameters.
- HEnvironment* CopyForInlining(Handle<JSFunction> target, int arguments,
- FunctionLiteral* function, HConstant* undefined,
- InliningKind inlining_kind,
- TailCallMode syntactic_tail_call_mode) const;
-
- HEnvironment* DiscardInlined(bool drop_extra) {
- HEnvironment* outer = outer_;
- while (outer->frame_type() != JS_FUNCTION &&
- outer->frame_type() != TAIL_CALLER_FUNCTION) {
- outer = outer->outer_;
- }
- if (drop_extra) outer->Drop(1);
- if (outer->frame_type() == TAIL_CALLER_FUNCTION) {
- outer->ClearTailCallerMark();
- }
- return outer;
- }
-
- void AddIncomingEdge(HBasicBlock* block, HEnvironment* other);
-
- void ClearHistory() {
- pop_count_ = 0;
- push_count_ = 0;
- assigned_variables_.Clear();
- }
-
- void SetValueAt(int index, HValue* value) {
- DCHECK(index < length());
- values_[index] = value;
- }
-
- // Map a variable to an environment index. Parameter indices are shifted
- // by 1 (receiver is parameter index -1 but environment index 0).
- // Stack-allocated local indices are shifted by the number of parameters.
- int IndexFor(Variable* variable) const {
- DCHECK(variable->IsStackAllocated());
- int shift = variable->IsParameter()
- ? 1
- : parameter_count_ + specials_count_;
- return variable->index() + shift;
- }
-
- bool is_local_index(int i) const {
- return i >= first_local_index() && i < first_expression_index();
- }
-
- bool is_parameter_index(int i) const {
- return i >= 0 && i < parameter_count();
- }
-
- bool is_special_index(int i) const {
- return i >= parameter_count() && i < parameter_count() + specials_count();
- }
-
- Zone* zone() const { return zone_; }
-
- private:
- HEnvironment(const HEnvironment* other, Zone* zone);
-
- HEnvironment(HEnvironment* outer,
- Handle<JSFunction> closure,
- FrameType frame_type,
- int arguments,
- Zone* zone);
-
- // Create an artificial stub environment (e.g. for argument adaptor or
- // constructor stub).
- HEnvironment* CreateStubEnvironment(HEnvironment* outer,
- Handle<JSFunction> target,
- FrameType frame_type,
- int arguments) const;
-
- // Marks current environment as tail caller by setting frame type to
- // TAIL_CALLER_FUNCTION.
- void MarkAsTailCaller();
- void ClearTailCallerMark();
-
- // True if index is included in the expression stack part of the environment.
- bool HasExpressionAt(int index) const;
-
- void Initialize(int parameter_count, int local_count, int stack_height);
- void Initialize(const HEnvironment* other);
-
- Handle<JSFunction> closure_;
- // Value array [parameters] [specials] [locals] [temporaries].
- ZoneList<HValue*> values_;
- GrowableBitVector assigned_variables_;
- FrameType frame_type_;
- int parameter_count_;
- int specials_count_;
- int local_count_;
- HEnvironment* outer_;
- HEnterInlined* entry_;
- int pop_count_;
- int push_count_;
- BailoutId ast_id_;
- Zone* zone_;
-};
-
-
-std::ostream& operator<<(std::ostream& os, const HEnvironment& env);
-
-
-class HOptimizedGraphBuilder;
-
-enum ArgumentsAllowedFlag {
- ARGUMENTS_NOT_ALLOWED,
- ARGUMENTS_ALLOWED,
- ARGUMENTS_FAKED
-};
-
-
-class HIfContinuation;
-
-// This class is not BASE_EMBEDDED because our inlining implementation uses
-// new and delete.
-class AstContext {
- public:
- bool IsEffect() const { return kind_ == Expression::kEffect; }
- bool IsValue() const { return kind_ == Expression::kValue; }
- bool IsTest() const { return kind_ == Expression::kTest; }
-
- // 'Fill' this context with a hydrogen value. The value is assumed to
- // have already been inserted in the instruction stream (or not need to
- // be, e.g., HPhi). Call this function in tail position in the Visit
- // functions for expressions.
- virtual void ReturnValue(HValue* value) = 0;
-
- // Add a hydrogen instruction to the instruction stream (recording an
- // environment simulation if necessary) and then fill this context with
- // the instruction as value.
- virtual void ReturnInstruction(HInstruction* instr, BailoutId ast_id) = 0;
-
- // Finishes the current basic block and materialize a boolean for
- // value context, nothing for effect, generate a branch for test context.
- // Call this function in tail position in the Visit functions for
- // expressions.
- virtual void ReturnControl(HControlInstruction* instr, BailoutId ast_id) = 0;
-
- // Finishes the current basic block and materialize a boolean for
- // value context, nothing for effect, generate a branch for test context.
- // Call this function in tail position in the Visit functions for
- // expressions that use an IfBuilder.
- virtual void ReturnContinuation(HIfContinuation* continuation,
- BailoutId ast_id) = 0;
-
- void set_typeof_mode(TypeofMode typeof_mode) { typeof_mode_ = typeof_mode; }
- TypeofMode typeof_mode() { return typeof_mode_; }
-
- protected:
- AstContext(HOptimizedGraphBuilder* owner, Expression::Context kind);
- virtual ~AstContext();
-
- HOptimizedGraphBuilder* owner() const { return owner_; }
-
- inline Zone* zone() const;
-
- // We want to be able to assert, in a context-specific way, that the stack
- // height makes sense when the context is filled.
-#ifdef DEBUG
- int original_length_;
-#endif
-
- private:
- HOptimizedGraphBuilder* owner_;
- Expression::Context kind_;
- AstContext* outer_;
- TypeofMode typeof_mode_;
-};
-
-
-class EffectContext final : public AstContext {
- public:
- explicit EffectContext(HOptimizedGraphBuilder* owner)
- : AstContext(owner, Expression::kEffect) {
- }
- ~EffectContext() override;
-
- void ReturnValue(HValue* value) override;
- void ReturnInstruction(HInstruction* instr, BailoutId ast_id) override;
- void ReturnControl(HControlInstruction* instr, BailoutId ast_id) override;
- void ReturnContinuation(HIfContinuation* continuation,
- BailoutId ast_id) override;
-};
-
-
-class ValueContext final : public AstContext {
- public:
- ValueContext(HOptimizedGraphBuilder* owner, ArgumentsAllowedFlag flag)
- : AstContext(owner, Expression::kValue), flag_(flag) {
- }
- ~ValueContext() override;
-
- void ReturnValue(HValue* value) override;
- void ReturnInstruction(HInstruction* instr, BailoutId ast_id) override;
- void ReturnControl(HControlInstruction* instr, BailoutId ast_id) override;
- void ReturnContinuation(HIfContinuation* continuation,
- BailoutId ast_id) override;
-
- bool arguments_allowed() { return flag_ == ARGUMENTS_ALLOWED; }
-
- private:
- ArgumentsAllowedFlag flag_;
-};
-
-
-class TestContext final : public AstContext {
- public:
- TestContext(HOptimizedGraphBuilder* owner,
- Expression* condition,
- HBasicBlock* if_true,
- HBasicBlock* if_false)
- : AstContext(owner, Expression::kTest),
- condition_(condition),
- if_true_(if_true),
- if_false_(if_false) {
- }
-
- void ReturnValue(HValue* value) override;
- void ReturnInstruction(HInstruction* instr, BailoutId ast_id) override;
- void ReturnControl(HControlInstruction* instr, BailoutId ast_id) override;
- void ReturnContinuation(HIfContinuation* continuation,
- BailoutId ast_id) override;
-
- static TestContext* cast(AstContext* context) {
- DCHECK(context->IsTest());
- return reinterpret_cast<TestContext*>(context);
- }
-
- Expression* condition() const { return condition_; }
- HBasicBlock* if_true() const { return if_true_; }
- HBasicBlock* if_false() const { return if_false_; }
-
- private:
- // Build the shared core part of the translation unpacking a value into
- // control flow.
- void BuildBranch(HValue* value);
-
- Expression* condition_;
- HBasicBlock* if_true_;
- HBasicBlock* if_false_;
-};
-
-
-class FunctionState final {
- public:
- FunctionState(HOptimizedGraphBuilder* owner, CompilationInfo* info,
- InliningKind inlining_kind, int inlining_id,
- TailCallMode tail_call_mode);
- ~FunctionState();
-
- CompilationInfo* compilation_info() { return compilation_info_; }
- AstContext* call_context() { return call_context_; }
- InliningKind inlining_kind() const { return inlining_kind_; }
- HBasicBlock* function_return() { return function_return_; }
- TestContext* test_context() { return test_context_; }
- void ClearInlinedTestContext() {
- delete test_context_;
- test_context_ = NULL;
- }
-
- FunctionState* outer() { return outer_; }
-
- TailCallMode ComputeTailCallMode(TailCallMode tail_call_mode) const {
- if (tail_call_mode_ == TailCallMode::kDisallow) return tail_call_mode_;
- return tail_call_mode;
- }
-
- HEnterInlined* entry() { return entry_; }
- void set_entry(HEnterInlined* entry) { entry_ = entry; }
-
- HArgumentsObject* arguments_object() { return arguments_object_; }
- void set_arguments_object(HArgumentsObject* arguments_object) {
- arguments_object_ = arguments_object;
- }
-
- HArgumentsElements* arguments_elements() { return arguments_elements_; }
- void set_arguments_elements(HArgumentsElements* arguments_elements) {
- arguments_elements_ = arguments_elements;
- }
-
- bool arguments_pushed() { return arguments_elements() != NULL; }
-
- int inlining_id() const { return inlining_id_; }
-
- void IncrementInDoExpressionScope() { do_expression_scope_count_++; }
- void DecrementInDoExpressionScope() { do_expression_scope_count_--; }
- bool IsInsideDoExpressionScope() { return do_expression_scope_count_ > 0; }
-
- private:
- HOptimizedGraphBuilder* owner_;
-
- CompilationInfo* compilation_info_;
-
- // During function inlining, expression context of the call being
- // inlined. NULL when not inlining.
- AstContext* call_context_;
-
- // The kind of call which is currently being inlined.
- InliningKind inlining_kind_;
-
- // Defines whether the calls with TailCallMode::kAllow in the function body
- // can be generated as tail calls.
- TailCallMode tail_call_mode_;
-
- // When inlining in an effect or value context, this is the return block.
- // It is NULL otherwise. When inlining in a test context, there are a
- // pair of return blocks in the context. When not inlining, there is no
- // local return point.
- HBasicBlock* function_return_;
-
- // When inlining a call in a test context, a context containing a pair of
- // return blocks. NULL in all other cases.
- TestContext* test_context_;
-
- // When inlining HEnterInlined instruction corresponding to the function
- // entry.
- HEnterInlined* entry_;
-
- HArgumentsObject* arguments_object_;
- HArgumentsElements* arguments_elements_;
-
- int inlining_id_;
- SourcePosition outer_source_position_;
-
- int do_expression_scope_count_;
-
- FunctionState* outer_;
-};
-
-
-class HIfContinuation final {
- public:
- HIfContinuation()
- : continuation_captured_(false),
- true_branch_(NULL),
- false_branch_(NULL) {}
- HIfContinuation(HBasicBlock* true_branch,
- HBasicBlock* false_branch)
- : continuation_captured_(true), true_branch_(true_branch),
- false_branch_(false_branch) {}
- ~HIfContinuation() { DCHECK(!continuation_captured_); }
-
- void Capture(HBasicBlock* true_branch,
- HBasicBlock* false_branch) {
- DCHECK(!continuation_captured_);
- true_branch_ = true_branch;
- false_branch_ = false_branch;
- continuation_captured_ = true;
- }
-
- void Continue(HBasicBlock** true_branch,
- HBasicBlock** false_branch) {
- DCHECK(continuation_captured_);
- *true_branch = true_branch_;
- *false_branch = false_branch_;
- continuation_captured_ = false;
- }
-
- bool IsTrueReachable() { return true_branch_ != NULL; }
- bool IsFalseReachable() { return false_branch_ != NULL; }
- bool TrueAndFalseReachable() {
- return IsTrueReachable() || IsFalseReachable();
- }
-
- HBasicBlock* true_branch() const { return true_branch_; }
- HBasicBlock* false_branch() const { return false_branch_; }
-
- private:
- bool continuation_captured_;
- HBasicBlock* true_branch_;
- HBasicBlock* false_branch_;
-};
-
-
-class HAllocationMode final BASE_EMBEDDED {
- public:
- explicit HAllocationMode(Handle<AllocationSite> feedback_site)
- : current_site_(NULL), feedback_site_(feedback_site),
- pretenure_flag_(NOT_TENURED) {}
- explicit HAllocationMode(HValue* current_site)
- : current_site_(current_site), pretenure_flag_(NOT_TENURED) {}
- explicit HAllocationMode(PretenureFlag pretenure_flag)
- : current_site_(NULL), pretenure_flag_(pretenure_flag) {}
- HAllocationMode()
- : current_site_(NULL), pretenure_flag_(NOT_TENURED) {}
-
- HValue* current_site() const { return current_site_; }
- Handle<AllocationSite> feedback_site() const { return feedback_site_; }
-
- bool CreateAllocationMementos() const WARN_UNUSED_RESULT {
- return current_site() != NULL;
- }
-
- PretenureFlag GetPretenureMode() const WARN_UNUSED_RESULT {
- if (!feedback_site().is_null()) return feedback_site()->GetPretenureMode();
- return pretenure_flag_;
- }
-
- private:
- HValue* current_site_;
- Handle<AllocationSite> feedback_site_;
- PretenureFlag pretenure_flag_;
-};
-
-
-class HGraphBuilder {
- public:
- explicit HGraphBuilder(CompilationInfo* info,
- CallInterfaceDescriptor descriptor,
- bool track_positions)
- : info_(info),
- descriptor_(descriptor),
- graph_(NULL),
- current_block_(NULL),
- scope_(info->scope()),
- position_(SourcePosition::Unknown()),
- track_positions_(track_positions) {}
- virtual ~HGraphBuilder() {}
-
- Scope* scope() const { return scope_; }
- void set_scope(Scope* scope) { scope_ = scope; }
-
- HBasicBlock* current_block() const { return current_block_; }
- void set_current_block(HBasicBlock* block) { current_block_ = block; }
- HEnvironment* environment() const {
- return current_block()->last_environment();
- }
- Zone* zone() const { return info_->zone(); }
- HGraph* graph() const { return graph_; }
- Isolate* isolate() const { return graph_->isolate(); }
- CompilationInfo* top_info() { return info_; }
-
- HGraph* CreateGraph();
-
- // Bailout environment manipulation.
- void Push(HValue* value) { environment()->Push(value); }
- HValue* Pop() { return environment()->Pop(); }
-
- virtual HValue* context() = 0;
-
- // Adding instructions.
- HInstruction* AddInstruction(HInstruction* instr);
- void FinishCurrentBlock(HControlInstruction* last);
- void FinishExitCurrentBlock(HControlInstruction* instruction);
-
- void Goto(HBasicBlock* from,
- HBasicBlock* target,
- FunctionState* state = NULL,
- bool add_simulate = true) {
- from->Goto(target, source_position(), state, add_simulate);
- }
- void Goto(HBasicBlock* target,
- FunctionState* state = NULL,
- bool add_simulate = true) {
- Goto(current_block(), target, state, add_simulate);
- }
- void GotoNoSimulate(HBasicBlock* from, HBasicBlock* target) {
- Goto(from, target, NULL, false);
- }
- void GotoNoSimulate(HBasicBlock* target) {
- Goto(target, NULL, false);
- }
- void AddLeaveInlined(HBasicBlock* block,
- HValue* return_value,
- FunctionState* state) {
- block->AddLeaveInlined(return_value, state, source_position());
- }
- void AddLeaveInlined(HValue* return_value, FunctionState* state) {
- return AddLeaveInlined(current_block(), return_value, state);
- }
-
- template <class I>
- HInstruction* NewUncasted() {
- return I::New(isolate(), zone(), context());
- }
-
- template <class I>
- I* New() {
- return I::New(isolate(), zone(), context());
- }
-
- template<class I>
- HInstruction* AddUncasted() { return AddInstruction(NewUncasted<I>());}
-
- template<class I>
- I* Add() { return AddInstructionTyped(New<I>());}
-
- template<class I, class P1>
- HInstruction* NewUncasted(P1 p1) {
- return I::New(isolate(), zone(), context(), p1);
- }
-
- template <class I, class P1>
- I* New(P1 p1) {
- return I::New(isolate(), zone(), context(), p1);
- }
-
- template<class I, class P1>
- HInstruction* AddUncasted(P1 p1) {
- HInstruction* result = AddInstruction(NewUncasted<I>(p1));
- // Specializations must have their parameters properly casted
- // to avoid landing here.
- DCHECK(!result->IsReturn() && !result->IsSimulate() &&
- !result->IsDeoptimize());
- return result;
- }
-
- template<class I, class P1>
- I* Add(P1 p1) {
- I* result = AddInstructionTyped(New<I>(p1));
- // Specializations must have their parameters properly casted
- // to avoid landing here.
- DCHECK(!result->IsReturn() && !result->IsSimulate() &&
- !result->IsDeoptimize());
- return result;
- }
-
- template<class I, class P1, class P2>
- HInstruction* NewUncasted(P1 p1, P2 p2) {
- return I::New(isolate(), zone(), context(), p1, p2);
- }
-
- template<class I, class P1, class P2>
- I* New(P1 p1, P2 p2) {
- return I::New(isolate(), zone(), context(), p1, p2);
- }
-
- template<class I, class P1, class P2>
- HInstruction* AddUncasted(P1 p1, P2 p2) {
- HInstruction* result = AddInstruction(NewUncasted<I>(p1, p2));
- // Specializations must have their parameters properly casted
- // to avoid landing here.
- DCHECK(!result->IsSimulate());
- return result;
- }
-
- template<class I, class P1, class P2>
- I* Add(P1 p1, P2 p2) {
- I* result = AddInstructionTyped(New<I>(p1, p2));
- // Specializations must have their parameters properly casted
- // to avoid landing here.
- DCHECK(!result->IsSimulate());
- return result;
- }
-
- template<class I, class P1, class P2, class P3>
- HInstruction* NewUncasted(P1 p1, P2 p2, P3 p3) {
- return I::New(isolate(), zone(), context(), p1, p2, p3);
- }
-
- template<class I, class P1, class P2, class P3>
- I* New(P1 p1, P2 p2, P3 p3) {
- return I::New(isolate(), zone(), context(), p1, p2, p3);
- }
-
- template<class I, class P1, class P2, class P3>
- HInstruction* AddUncasted(P1 p1, P2 p2, P3 p3) {
- return AddInstruction(NewUncasted<I>(p1, p2, p3));
- }
-
- template<class I, class P1, class P2, class P3>
- I* Add(P1 p1, P2 p2, P3 p3) {
- return AddInstructionTyped(New<I>(p1, p2, p3));
- }
-
- template<class I, class P1, class P2, class P3, class P4>
- HInstruction* NewUncasted(P1 p1, P2 p2, P3 p3, P4 p4) {
- return I::New(isolate(), zone(), context(), p1, p2, p3, p4);
- }
-
- template<class I, class P1, class P2, class P3, class P4>
- I* New(P1 p1, P2 p2, P3 p3, P4 p4) {
- return I::New(isolate(), zone(), context(), p1, p2, p3, p4);
- }
-
- template<class I, class P1, class P2, class P3, class P4>
- HInstruction* AddUncasted(P1 p1, P2 p2, P3 p3, P4 p4) {
- return AddInstruction(NewUncasted<I>(p1, p2, p3, p4));
- }
-
- template<class I, class P1, class P2, class P3, class P4>
- I* Add(P1 p1, P2 p2, P3 p3, P4 p4) {
- return AddInstructionTyped(New<I>(p1, p2, p3, p4));
- }
-
- template<class I, class P1, class P2, class P3, class P4, class P5>
- HInstruction* NewUncasted(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) {
- return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5);
- }
-
- template<class I, class P1, class P2, class P3, class P4, class P5>
- I* New(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) {
- return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5);
- }
-
- template<class I, class P1, class P2, class P3, class P4, class P5>
- HInstruction* AddUncasted(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) {
- return AddInstruction(NewUncasted<I>(p1, p2, p3, p4, p5));
- }
-
- template<class I, class P1, class P2, class P3, class P4, class P5>
- I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) {
- return AddInstructionTyped(New<I>(p1, p2, p3, p4, p5));
- }
-
- template<class I, class P1, class P2, class P3, class P4, class P5, class P6>
- HInstruction* NewUncasted(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6) {
- return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5, p6);
- }
-
- template<class I, class P1, class P2, class P3, class P4, class P5, class P6>
- I* New(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6) {
- return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5, p6);
- }
-
- template<class I, class P1, class P2, class P3, class P4, class P5, class P6>
- HInstruction* AddUncasted(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6) {
- return AddInstruction(NewUncasted<I>(p1, p2, p3, p4, p5, p6));
- }
-
- template<class I, class P1, class P2, class P3, class P4, class P5, class P6>
- I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6) {
- return AddInstructionTyped(New<I>(p1, p2, p3, p4, p5, p6));
- }
-
- template<class I, class P1, class P2, class P3, class P4,
- class P5, class P6, class P7>
- HInstruction* NewUncasted(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7) {
- return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5, p6, p7);
- }
-
- template<class I, class P1, class P2, class P3, class P4,
- class P5, class P6, class P7>
- I* New(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7) {
- return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5, p6, p7);
- }
-
- template<class I, class P1, class P2, class P3,
- class P4, class P5, class P6, class P7>
- HInstruction* AddUncasted(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7) {
- return AddInstruction(NewUncasted<I>(p1, p2, p3, p4, p5, p6, p7));
- }
-
- template<class I, class P1, class P2, class P3,
- class P4, class P5, class P6, class P7>
- I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7) {
- return AddInstructionTyped(New<I>(p1, p2, p3, p4, p5, p6, p7));
- }
-
- template<class I, class P1, class P2, class P3, class P4,
- class P5, class P6, class P7, class P8>
- HInstruction* NewUncasted(P1 p1, P2 p2, P3 p3, P4 p4,
- P5 p5, P6 p6, P7 p7, P8 p8) {
- return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5, p6, p7, p8);
- }
-
- template<class I, class P1, class P2, class P3, class P4,
- class P5, class P6, class P7, class P8>
- I* New(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, P8 p8) {
- return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5, p6, p7, p8);
- }
-
- template<class I, class P1, class P2, class P3, class P4,
- class P5, class P6, class P7, class P8>
- HInstruction* AddUncasted(P1 p1, P2 p2, P3 p3, P4 p4,
- P5 p5, P6 p6, P7 p7, P8 p8) {
- return AddInstruction(NewUncasted<I>(p1, p2, p3, p4, p5, p6, p7, p8));
- }
-
- template<class I, class P1, class P2, class P3, class P4,
- class P5, class P6, class P7, class P8>
- I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, P8 p8) {
- return AddInstructionTyped(New<I>(p1, p2, p3, p4, p5, p6, p7, p8));
- }
-
- template <class I, class P1, class P2, class P3, class P4, class P5, class P6,
- class P7, class P8, class P9>
- I* New(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, P8 p8, P9 p9) {
- return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5, p6, p7, p8,
- p9);
- }
-
- template <class I, class P1, class P2, class P3, class P4, class P5, class P6,
- class P7, class P8, class P9>
- HInstruction* AddUncasted(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7,
- P8 p8, P9 p9) {
- return AddInstruction(NewUncasted<I>(p1, p2, p3, p4, p5, p6, p7, p8, p9));
- }
-
- template <class I, class P1, class P2, class P3, class P4, class P5, class P6,
- class P7, class P8, class P9>
- I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, P8 p8, P9 p9) {
- return AddInstructionTyped(New<I>(p1, p2, p3, p4, p5, p6, p7, p8, p9));
- }
-
- void AddSimulate(BailoutId id, RemovableSimulate removable = FIXED_SIMULATE);
-
- // When initializing arrays, we'll unfold the loop if the number of elements
- // is known at compile time and is <= kElementLoopUnrollThreshold.
- static const int kElementLoopUnrollThreshold = 8;
-
- protected:
- virtual bool BuildGraph() = 0;
-
- HBasicBlock* CreateBasicBlock(HEnvironment* env);
- HBasicBlock* CreateLoopHeaderBlock();
-
- template <class BitFieldClass>
- HValue* BuildDecodeField(HValue* encoded_field) {
- HValue* mask_value = Add<HConstant>(static_cast<int>(BitFieldClass::kMask));
- HValue* masked_field =
- AddUncasted<HBitwise>(Token::BIT_AND, encoded_field, mask_value);
- return AddUncasted<HShr>(masked_field,
- Add<HConstant>(static_cast<int>(BitFieldClass::kShift)));
- }
-
- HValue* BuildGetElementsKind(HValue* object);
-
- HValue* BuildEnumLength(HValue* map);
-
- HValue* BuildCheckHeapObject(HValue* object);
- HValue* BuildCheckString(HValue* string);
- HValue* BuildWrapReceiver(HValue* object, HValue* function);
-
- // Building common constructs
- HValue* BuildCheckForCapacityGrow(HValue* object,
- HValue* elements,
- ElementsKind kind,
- HValue* length,
- HValue* key,
- bool is_js_array,
- PropertyAccessType access_type);
-
- HValue* BuildCheckAndGrowElementsCapacity(HValue* object, HValue* elements,
- ElementsKind kind, HValue* length,
- HValue* capacity, HValue* key);
-
- HValue* BuildCopyElementsOnWrite(HValue* object,
- HValue* elements,
- ElementsKind kind,
- HValue* length);
-
- HValue* BuildNumberToString(HValue* object, AstType* type);
- HValue* BuildToNumber(HValue* input);
- HValue* BuildToObject(HValue* receiver);
-
- // ES6 section 7.4.7 CreateIterResultObject ( value, done )
- HValue* BuildCreateIterResultObject(HValue* value, HValue* done);
-
- // Allocates a new object according with the given allocation properties.
- HAllocate* BuildAllocate(HValue* object_size,
- HType type,
- InstanceType instance_type,
- HAllocationMode allocation_mode);
- // Computes the sum of two string lengths, taking care of overflow handling.
- HValue* BuildAddStringLengths(HValue* left_length, HValue* right_length);
- // Creates a cons string using the two input strings.
- HValue* BuildCreateConsString(HValue* length,
- HValue* left,
- HValue* right,
- HAllocationMode allocation_mode);
- // Copies characters from one sequential string to another.
- void BuildCopySeqStringChars(HValue* src,
- HValue* src_offset,
- String::Encoding src_encoding,
- HValue* dst,
- HValue* dst_offset,
- String::Encoding dst_encoding,
- HValue* length);
-
- // Align an object size to object alignment boundary
- HValue* BuildObjectSizeAlignment(HValue* unaligned_size, int header_size);
-
- // Both operands are non-empty strings.
- HValue* BuildUncheckedStringAdd(HValue* left,
- HValue* right,
- HAllocationMode allocation_mode);
- // Add two strings using allocation mode, validating type feedback.
- HValue* BuildStringAdd(HValue* left,
- HValue* right,
- HAllocationMode allocation_mode);
-
- HInstruction* BuildUncheckedMonomorphicElementAccess(
- HValue* checked_object,
- HValue* key,
- HValue* val,
- bool is_js_array,
- ElementsKind elements_kind,
- PropertyAccessType access_type,
- LoadKeyedHoleMode load_mode,
- KeyedAccessStoreMode store_mode);
-
- HInstruction* AddElementAccess(
- HValue* elements, HValue* checked_key, HValue* val, HValue* dependency,
- HValue* backing_store_owner, ElementsKind elements_kind,
- PropertyAccessType access_type,
- LoadKeyedHoleMode load_mode = NEVER_RETURN_HOLE);
-
- HInstruction* AddLoadStringInstanceType(HValue* string);
- HInstruction* AddLoadStringLength(HValue* string);
- HInstruction* BuildLoadStringLength(HValue* string);
- HStoreNamedField* AddStoreMapConstant(HValue* object, Handle<Map> map) {
- return Add<HStoreNamedField>(object, HObjectAccess::ForMap(),
- Add<HConstant>(map));
- }
- HLoadNamedField* AddLoadMap(HValue* object,
- HValue* dependency = NULL);
- HLoadNamedField* AddLoadElements(HValue* object,
- HValue* dependency = NULL);
-
- bool MatchRotateRight(HValue* left,
- HValue* right,
- HValue** operand,
- HValue** shift_amount);
-
- HValue* BuildBinaryOperation(Token::Value op, HValue* left, HValue* right,
- AstType* left_type, AstType* right_type,
- AstType* result_type, Maybe<int> fixed_right_arg,
- HAllocationMode allocation_mode,
- BailoutId opt_id = BailoutId::None());
-
- HLoadNamedField* AddLoadFixedArrayLength(HValue *object,
- HValue *dependency = NULL);
-
- HLoadNamedField* AddLoadArrayLength(HValue *object,
- ElementsKind kind,
- HValue *dependency = NULL);
-
- HValue* EnforceNumberType(HValue* number, AstType* expected);
- HValue* TruncateToNumber(HValue* value, AstType** expected);
-
- void FinishExitWithHardDeoptimization(DeoptimizeReason reason);
-
- void AddIncrementCounter(StatsCounter* counter);
-
- class IfBuilder final {
- public:
- // If using this constructor, Initialize() must be called explicitly!
- IfBuilder();
-
- explicit IfBuilder(HGraphBuilder* builder);
- IfBuilder(HGraphBuilder* builder,
- HIfContinuation* continuation);
-
- ~IfBuilder() {
- if (!finished_) End();
- }
-
- void Initialize(HGraphBuilder* builder);
-
- template<class Condition>
- Condition* If(HValue *p) {
- Condition* compare = builder()->New<Condition>(p);
- AddCompare(compare);
- return compare;
- }
-
- template<class Condition, class P2>
- Condition* If(HValue* p1, P2 p2) {
- Condition* compare = builder()->New<Condition>(p1, p2);
- AddCompare(compare);
- return compare;
- }
-
- template<class Condition, class P2, class P3>
- Condition* If(HValue* p1, P2 p2, P3 p3) {
- Condition* compare = builder()->New<Condition>(p1, p2, p3);
- AddCompare(compare);
- return compare;
- }
-
- template<class Condition>
- Condition* IfNot(HValue* p) {
- Condition* compare = If<Condition>(p);
- compare->Not();
- return compare;
- }
-
- template<class Condition, class P2>
- Condition* IfNot(HValue* p1, P2 p2) {
- Condition* compare = If<Condition>(p1, p2);
- compare->Not();
- return compare;
- }
-
- template<class Condition, class P2, class P3>
- Condition* IfNot(HValue* p1, P2 p2, P3 p3) {
- Condition* compare = If<Condition>(p1, p2, p3);
- compare->Not();
- return compare;
- }
-
- template<class Condition>
- Condition* OrIf(HValue *p) {
- Or();
- return If<Condition>(p);
- }
-
- template<class Condition, class P2>
- Condition* OrIf(HValue* p1, P2 p2) {
- Or();
- return If<Condition>(p1, p2);
- }
-
- template<class Condition, class P2, class P3>
- Condition* OrIf(HValue* p1, P2 p2, P3 p3) {
- Or();
- return If<Condition>(p1, p2, p3);
- }
-
- template<class Condition>
- Condition* AndIf(HValue *p) {
- And();
- return If<Condition>(p);
- }
-
- template<class Condition, class P2>
- Condition* AndIf(HValue* p1, P2 p2) {
- And();
- return If<Condition>(p1, p2);
- }
-
- template<class Condition, class P2, class P3>
- Condition* AndIf(HValue* p1, P2 p2, P3 p3) {
- And();
- return If<Condition>(p1, p2, p3);
- }
-
- void Or();
- void And();
-
- // Captures the current state of this IfBuilder in the specified
- // continuation and ends this IfBuilder.
- void CaptureContinuation(HIfContinuation* continuation);
-
- // Joins the specified continuation from this IfBuilder and ends this
- // IfBuilder. This appends a Goto instruction from the true branch of
- // this IfBuilder to the true branch of the continuation unless the
- // true branch of this IfBuilder is already finished. And vice versa
- // for the false branch.
- //
- // The basic idea is as follows: You have several nested IfBuilder's
- // that you want to join based on two possible outcomes (i.e. success
- // and failure, or whatever). You can do this easily using this method
- // now, for example:
- //
- // HIfContinuation cont(graph()->CreateBasicBlock(),
- // graph()->CreateBasicBlock());
- // ...
- // IfBuilder if_whatever(this);
- // if_whatever.If<Condition>(arg);
- // if_whatever.Then();
- // ...
- // if_whatever.Else();
- // ...
- // if_whatever.JoinContinuation(&cont);
- // ...
- // IfBuilder if_something(this);
- // if_something.If<Condition>(arg1, arg2);
- // if_something.Then();
- // ...
- // if_something.Else();
- // ...
- // if_something.JoinContinuation(&cont);
- // ...
- // IfBuilder if_finally(this, &cont);
- // if_finally.Then();
- // // continues after then code of if_whatever or if_something.
- // ...
- // if_finally.Else();
- // // continues after else code of if_whatever or if_something.
- // ...
- // if_finally.End();
- void JoinContinuation(HIfContinuation* continuation);
-
- void Then();
- void Else();
- void End();
- void EndUnreachable();
-
- void Deopt(DeoptimizeReason reason);
- void ThenDeopt(DeoptimizeReason reason) {
- Then();
- Deopt(reason);
- }
- void ElseDeopt(DeoptimizeReason reason) {
- Else();
- Deopt(reason);
- }
-
- void Return(HValue* value);
-
- private:
- void InitializeDontCreateBlocks(HGraphBuilder* builder);
-
- HControlInstruction* AddCompare(HControlInstruction* compare);
-
- HGraphBuilder* builder() const {
- DCHECK(builder_ != NULL); // Have you called "Initialize"?
- return builder_;
- }
-
- void AddMergeAtJoinBlock(bool deopt);
-
- void Finish();
- void Finish(HBasicBlock** then_continuation,
- HBasicBlock** else_continuation);
-
- class MergeAtJoinBlock : public ZoneObject {
- public:
- MergeAtJoinBlock(HBasicBlock* block,
- bool deopt,
- MergeAtJoinBlock* next)
- : block_(block),
- deopt_(deopt),
- next_(next) {}
- HBasicBlock* block_;
- bool deopt_;
- MergeAtJoinBlock* next_;
- };
-
- HGraphBuilder* builder_;
- bool finished_ : 1;
- bool did_then_ : 1;
- bool did_else_ : 1;
- bool did_else_if_ : 1;
- bool did_and_ : 1;
- bool did_or_ : 1;
- bool captured_ : 1;
- bool needs_compare_ : 1;
- bool pending_merge_block_ : 1;
- HBasicBlock* first_true_block_;
- HBasicBlock* first_false_block_;
- HBasicBlock* split_edge_merge_block_;
- MergeAtJoinBlock* merge_at_join_blocks_;
- int normal_merge_at_join_block_count_;
- int deopt_merge_at_join_block_count_;
- };
-
- class LoopBuilder final {
- public:
- enum Direction {
- kPreIncrement,
- kPostIncrement,
- kPreDecrement,
- kPostDecrement,
- kWhileTrue
- };
-
- explicit LoopBuilder(HGraphBuilder* builder); // while (true) {...}
- LoopBuilder(HGraphBuilder* builder,
- HValue* context,
- Direction direction);
- LoopBuilder(HGraphBuilder* builder,
- HValue* context,
- Direction direction,
- HValue* increment_amount);
-
- ~LoopBuilder() {
- DCHECK(finished_);
- }
-
- HValue* BeginBody(
- HValue* initial,
- HValue* terminating,
- Token::Value token);
-
- void BeginBody(int drop_count);
-
- void Break();
-
- void EndBody();
-
- private:
- void Initialize(HGraphBuilder* builder, HValue* context,
- Direction direction, HValue* increment_amount);
- Zone* zone() { return builder_->zone(); }
-
- HGraphBuilder* builder_;
- HValue* context_;
- HValue* increment_amount_;
- HInstruction* increment_;
- HPhi* phi_;
- HBasicBlock* header_block_;
- HBasicBlock* body_block_;
- HBasicBlock* exit_block_;
- HBasicBlock* exit_trampoline_block_;
- Direction direction_;
- bool finished_;
- };
-
- HValue* BuildNewElementsCapacity(HValue* old_capacity);
-
- HValue* BuildCalculateElementsSize(ElementsKind kind,
- HValue* capacity);
- HAllocate* AllocateJSArrayObject(AllocationSiteMode mode);
- HConstant* EstablishElementsAllocationSize(ElementsKind kind, int capacity);
-
- HAllocate* BuildAllocateElements(ElementsKind kind, HValue* size_in_bytes);
-
- void BuildInitializeElementsHeader(HValue* elements,
- ElementsKind kind,
- HValue* capacity);
-
- // Build allocation and header initialization code for respective successor
- // of FixedArrayBase.
- HValue* BuildAllocateAndInitializeArray(ElementsKind kind, HValue* capacity);
-
- // |array| must have been allocated with enough room for
- // 1) the JSArray and 2) an AllocationMemento if mode requires it.
- // If the |elements| value provided is NULL then the array elements storage
- // is initialized with empty array.
- void BuildJSArrayHeader(HValue* array,
- HValue* array_map,
- HValue* elements,
- AllocationSiteMode mode,
- ElementsKind elements_kind,
- HValue* allocation_site_payload,
- HValue* length_field);
-
- HValue* BuildGrowElementsCapacity(HValue* object,
- HValue* elements,
- ElementsKind kind,
- ElementsKind new_kind,
- HValue* length,
- HValue* new_capacity);
-
- void BuildFillElementsWithValue(HValue* elements,
- ElementsKind elements_kind,
- HValue* from,
- HValue* to,
- HValue* value);
-
- void BuildFillElementsWithHole(HValue* elements,
- ElementsKind elements_kind,
- HValue* from,
- HValue* to);
-
- void BuildCopyProperties(HValue* from_properties, HValue* to_properties,
- HValue* length, HValue* capacity);
-
- void BuildCopyElements(HValue* from_elements,
- ElementsKind from_elements_kind,
- HValue* to_elements,
- ElementsKind to_elements_kind,
- HValue* length,
- HValue* capacity);
-
- void BuildCreateAllocationMemento(HValue* previous_object,
- HValue* previous_object_size,
- HValue* payload);
-
- HInstruction* BuildConstantMapCheck(Handle<JSObject> constant,
- bool ensure_no_elements = false);
- HInstruction* BuildCheckPrototypeMaps(Handle<JSObject> prototype,
- Handle<JSObject> holder,
- bool ensure_no_elements = false);
-
- HInstruction* BuildGetNativeContext();
-
- HValue* BuildArrayBufferViewFieldAccessor(HValue* object,
- HValue* checked_object,
- FieldIndex index);
-
-
- protected:
- void SetSourcePosition(int position) {
- if (position != kNoSourcePosition) {
- position_.SetScriptOffset(position);
- }
- // Otherwise position remains unknown.
- }
-
- void EnterInlinedSource(int inlining_id) {
- if (is_tracking_positions()) {
- position_.SetInliningId(inlining_id);
- }
- }
-
- // Convert the given absolute offset from the start of the script to
- // the SourcePosition assuming that this position corresponds to the
- // same function as position_.
- SourcePosition ScriptPositionToSourcePosition(int position) {
- if (position == kNoSourcePosition) {
- return SourcePosition::Unknown();
- }
- return SourcePosition(position, position_.InliningId());
- }
-
- SourcePosition source_position() { return position_; }
- void set_source_position(SourcePosition position) { position_ = position; }
-
- bool is_tracking_positions() { return track_positions_; }
-
- HValue* BuildAllocateEmptyArrayBuffer(HValue* byte_length);
- template <typename ViewClass>
- void BuildArrayBufferViewInitialization(HValue* obj,
- HValue* buffer,
- HValue* byte_offset,
- HValue* byte_length);
-
- private:
- HGraphBuilder();
-
- template <class I>
- I* AddInstructionTyped(I* instr) {
- return I::cast(AddInstruction(instr));
- }
-
- CompilationInfo* info_;
- CallInterfaceDescriptor descriptor_;
- HGraph* graph_;
- HBasicBlock* current_block_;
- Scope* scope_;
- SourcePosition position_;
- bool track_positions_;
-};
-
-template <>
-inline HDeoptimize* HGraphBuilder::Add<HDeoptimize>(
- DeoptimizeReason reason, Deoptimizer::BailoutType type) {
- if (type == Deoptimizer::SOFT) {
- isolate()->counters()->soft_deopts_requested()->Increment();
- if (FLAG_always_opt) return NULL;
- }
- if (current_block()->IsDeoptimizing()) return NULL;
- HBasicBlock* after_deopt_block = CreateBasicBlock(
- current_block()->last_environment());
- HDeoptimize* instr = New<HDeoptimize>(reason, type, after_deopt_block);
- if (type == Deoptimizer::SOFT) {
- isolate()->counters()->soft_deopts_inserted()->Increment();
- }
- FinishCurrentBlock(instr);
- set_current_block(after_deopt_block);
- return instr;
-}
-
-template <>
-inline HInstruction* HGraphBuilder::AddUncasted<HDeoptimize>(
- DeoptimizeReason reason, Deoptimizer::BailoutType type) {
- return Add<HDeoptimize>(reason, type);
-}
-
-
-template<>
-inline HSimulate* HGraphBuilder::Add<HSimulate>(
- BailoutId id,
- RemovableSimulate removable) {
- HSimulate* instr = current_block()->CreateSimulate(id, removable);
- AddInstruction(instr);
- return instr;
-}
-
-
-template<>
-inline HSimulate* HGraphBuilder::Add<HSimulate>(
- BailoutId id) {
- return Add<HSimulate>(id, FIXED_SIMULATE);
-}
-
-
-template<>
-inline HInstruction* HGraphBuilder::AddUncasted<HSimulate>(BailoutId id) {
- return Add<HSimulate>(id, FIXED_SIMULATE);
-}
-
-
-template<>
-inline HReturn* HGraphBuilder::Add<HReturn>(HValue* value) {
- int num_parameters = graph()->info()->num_parameters();
- HValue* params = AddUncasted<HConstant>(num_parameters);
- HReturn* return_instruction = New<HReturn>(value, params);
- FinishExitCurrentBlock(return_instruction);
- return return_instruction;
-}
-
-
-template<>
-inline HReturn* HGraphBuilder::Add<HReturn>(HConstant* value) {
- return Add<HReturn>(static_cast<HValue*>(value));
-}
-
-template<>
-inline HInstruction* HGraphBuilder::AddUncasted<HReturn>(HValue* value) {
- return Add<HReturn>(value);
-}
-
-
-template<>
-inline HInstruction* HGraphBuilder::AddUncasted<HReturn>(HConstant* value) {
- return Add<HReturn>(value);
-}
-
-
-template<>
-inline HCallRuntime* HGraphBuilder::Add<HCallRuntime>(
- const Runtime::Function* c_function,
- int argument_count) {
- HCallRuntime* instr = New<HCallRuntime>(c_function, argument_count);
- if (graph()->info()->IsStub()) {
- // When compiling code stubs, we don't want to save all double registers
- // upon entry to the stub, but instead have the call runtime instruction
- // save the double registers only on-demand (in the fallback case).
- instr->set_save_doubles(kSaveFPRegs);
- }
- AddInstruction(instr);
- return instr;
-}
-
-
-template<>
-inline HInstruction* HGraphBuilder::AddUncasted<HCallRuntime>(
- Handle<String> name,
- const Runtime::Function* c_function,
- int argument_count) {
- return Add<HCallRuntime>(c_function, argument_count);
-}
-
-
-template <>
-inline HParameter* HGraphBuilder::New<HParameter>(unsigned index) {
- return HParameter::New(isolate(), zone(), nullptr, index);
-}
-
-
-template <>
-inline HParameter* HGraphBuilder::New<HParameter>(
- unsigned index, HParameter::ParameterKind kind) {
- return HParameter::New(isolate(), zone(), nullptr, index, kind);
-}
-
-
-template <>
-inline HParameter* HGraphBuilder::New<HParameter>(
- unsigned index, HParameter::ParameterKind kind, Representation r) {
- return HParameter::New(isolate(), zone(), nullptr, index, kind, r);
-}
-
-
-template <>
-inline HPrologue* HGraphBuilder::New<HPrologue>() {
- return HPrologue::New(zone());
-}
-
-
-template <>
-inline HContext* HGraphBuilder::New<HContext>() {
- return HContext::New(zone());
-}
-
-// This AstVistor is not final, and provides the AstVisitor methods as virtual
-// methods so they can be specialized by subclasses.
-class HOptimizedGraphBuilder : public HGraphBuilder,
- public AstVisitor<HOptimizedGraphBuilder> {
- public:
- // A class encapsulating (lazily-allocated) break and continue blocks for
- // a breakable statement. Separated from BreakAndContinueScope so that it
- // can have a separate lifetime.
- class BreakAndContinueInfo final BASE_EMBEDDED {
- public:
- explicit BreakAndContinueInfo(BreakableStatement* target,
- Scope* scope,
- int drop_extra = 0)
- : target_(target),
- break_block_(NULL),
- continue_block_(NULL),
- scope_(scope),
- drop_extra_(drop_extra) {
- }
-
- BreakableStatement* target() { return target_; }
- HBasicBlock* break_block() { return break_block_; }
- void set_break_block(HBasicBlock* block) { break_block_ = block; }
- HBasicBlock* continue_block() { return continue_block_; }
- void set_continue_block(HBasicBlock* block) { continue_block_ = block; }
- Scope* scope() { return scope_; }
- int drop_extra() { return drop_extra_; }
-
- private:
- BreakableStatement* target_;
- HBasicBlock* break_block_;
- HBasicBlock* continue_block_;
- Scope* scope_;
- int drop_extra_;
- };
-
- // A helper class to maintain a stack of current BreakAndContinueInfo
- // structures mirroring BreakableStatement nesting.
- class BreakAndContinueScope final BASE_EMBEDDED {
- public:
- BreakAndContinueScope(BreakAndContinueInfo* info,
- HOptimizedGraphBuilder* owner)
- : info_(info), owner_(owner), next_(owner->break_scope()) {
- owner->set_break_scope(this);
- }
-
- ~BreakAndContinueScope() { owner_->set_break_scope(next_); }
-
- BreakAndContinueInfo* info() { return info_; }
- HOptimizedGraphBuilder* owner() { return owner_; }
- BreakAndContinueScope* next() { return next_; }
-
- // Search the break stack for a break or continue target.
- enum BreakType { BREAK, CONTINUE };
- HBasicBlock* Get(BreakableStatement* stmt, BreakType type,
- Scope** scope, int* drop_extra);
-
- private:
- BreakAndContinueInfo* info_;
- HOptimizedGraphBuilder* owner_;
- BreakAndContinueScope* next_;
- };
-
- explicit HOptimizedGraphBuilder(CompilationInfo* info, bool track_positions);
-
- bool BuildGraph() override;
-
- // Simple accessors.
- BreakAndContinueScope* break_scope() const { return break_scope_; }
- void set_break_scope(BreakAndContinueScope* head) { break_scope_ = head; }
-
- HValue* context() override { return environment()->context(); }
-
- HOsrBuilder* osr() const { return osr_; }
-
- void Bailout(BailoutReason reason);
-
- HBasicBlock* CreateJoin(HBasicBlock* first,
- HBasicBlock* second,
- BailoutId join_id);
-
- FunctionState* function_state() const { return function_state_; }
-
- void VisitDeclarations(Declaration::List* declarations);
-
- AstTypeBounds* bounds() { return &bounds_; }
-
- void* operator new(size_t size, Zone* zone) { return zone->New(size); }
- void operator delete(void* pointer, Zone* zone) { }
- void operator delete(void* pointer) { }
-
- DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
-
- protected:
- // Forward declarations for inner scope classes.
- class SubgraphScope;
-
- static const int kMaxCallPolymorphism = 4;
- static const int kMaxLoadPolymorphism = 4;
- static const int kMaxStorePolymorphism = 4;
-
- // Even in the 'unlimited' case we have to have some limit in order not to
- // overflow the stack.
- static const int kUnlimitedMaxInlinedSourceSize = 100000;
- static const int kUnlimitedMaxInlinedNodes = 10000;
- static const int kUnlimitedMaxInlinedNodesCumulative = 10000;
-
- // Maximum depth and total number of elements and properties for literal
- // graphs to be considered for fast deep-copying. The limit is chosen to
- // match the maximum number of inobject properties, to ensure that the
- // performance of using object literals is not worse than using constructor
- // functions, see crbug.com/v8/6211 for details.
- static const int kMaxFastLiteralDepth = 3;
- static const int kMaxFastLiteralProperties =
- (JSObject::kMaxInstanceSize - JSObject::kHeaderSize) >> kPointerSizeLog2;
-
- // Simple accessors.
- void set_function_state(FunctionState* state) { function_state_ = state; }
-
- AstContext* ast_context() const { return ast_context_; }
- void set_ast_context(AstContext* context) { ast_context_ = context; }
-
- // Accessors forwarded to the function state.
- CompilationInfo* current_info() const {
- return function_state()->compilation_info();
- }
- AstContext* call_context() const {
- return function_state()->call_context();
- }
- HBasicBlock* function_return() const {
- return function_state()->function_return();
- }
- TestContext* inlined_test_context() const {
- return function_state()->test_context();
- }
- Handle<JSFunction> current_closure() const {
- return current_info()->closure();
- }
- Handle<SharedFunctionInfo> current_shared_info() const {
- return current_info()->shared_info();
- }
- FeedbackVector* current_feedback_vector() const {
- return current_closure()->feedback_vector();
- }
- void ClearInlinedTestContext() {
- function_state()->ClearInlinedTestContext();
- }
- LanguageMode function_language_mode() {
- return function_state()->compilation_info()->parse_info()->language_mode();
- }
-
-#define FOR_EACH_HYDROGEN_INTRINSIC(F) \
- F(IsSmi) \
- F(IsArray) \
- F(IsTypedArray) \
- F(IsJSProxy) \
- F(Call) \
- F(ToInteger) \
- F(ToObject) \
- F(ToString) \
- F(ToLength) \
- F(ToNumber) \
- F(IsJSReceiver) \
- F(DebugBreakInOptimizedCode) \
- F(StringCharCodeAt) \
- F(SubString) \
- F(DebugIsActive) \
- /* Typed Arrays */ \
- F(MaxSmi) \
- F(TypedArrayMaxSizeInHeap) \
- F(ArrayBufferViewGetByteLength) \
- F(ArrayBufferViewGetByteOffset) \
- F(ArrayBufferViewWasNeutered) \
- F(TypedArrayGetLength) \
- /* ArrayBuffer */ \
- F(ArrayBufferGetByteLength) \
- /* ES6 Collections */ \
- F(MapClear) \
- F(MapInitialize) \
- F(SetClear) \
- F(SetInitialize) \
- F(FixedArrayGet) \
- F(FixedArraySet) \
- F(JSCollectionGetTable) \
- F(StringGetRawHashField) \
- F(TheHole) \
- /* ES6 Iterators */ \
- F(CreateIterResultObject) \
- /* Arrays */ \
- F(HasFastPackedElements)
-
-#define GENERATOR_DECLARATION(Name) void Generate##Name(CallRuntime* call);
- FOR_EACH_HYDROGEN_INTRINSIC(GENERATOR_DECLARATION)
-#undef GENERATOR_DECLARATION
-
- void VisitDelete(UnaryOperation* expr);
- void VisitVoid(UnaryOperation* expr);
- void VisitTypeof(UnaryOperation* expr);
- void VisitNot(UnaryOperation* expr);
-
- void VisitComma(BinaryOperation* expr);
- void VisitLogicalExpression(BinaryOperation* expr);
- void VisitArithmeticExpression(BinaryOperation* expr);
-
- void VisitLoopBody(IterationStatement* stmt, BailoutId stack_check_id,
- HBasicBlock* loop_entry);
-
- void BuildForInBody(ForInStatement* stmt, Variable* each_var,
- HValue* enumerable);
-
- // Create a back edge in the flow graph. body_exit is the predecessor
- // block and loop_entry is the successor block. loop_successor is the
- // block where control flow exits the loop normally (e.g., via failure of
- // the condition) and break_block is the block where control flow breaks
- // from the loop. All blocks except loop_entry can be NULL. The return
- // value is the new successor block which is the join of loop_successor
- // and break_block, or NULL.
- HBasicBlock* CreateLoop(IterationStatement* statement,
- HBasicBlock* loop_entry,
- HBasicBlock* body_exit,
- HBasicBlock* loop_successor,
- HBasicBlock* break_block);
-
- // Build a loop entry
- HBasicBlock* BuildLoopEntry();
-
- // Builds a loop entry respectful of OSR requirements
- HBasicBlock* BuildLoopEntry(IterationStatement* statement);
-
- HBasicBlock* JoinContinue(IterationStatement* statement,
- BailoutId continue_id, HBasicBlock* exit_block,
- HBasicBlock* continue_block);
-
- HValue* Top() const { return environment()->Top(); }
- void Drop(int n) { environment()->Drop(n); }
- void Bind(Variable* var, HValue* value) { environment()->Bind(var, value); }
- bool IsEligibleForEnvironmentLivenessAnalysis(Variable* var,
- int index,
- HEnvironment* env) {
- if (!FLAG_analyze_environment_liveness) return false;
- // Zapping parameters isn't safe because function.arguments can inspect them
- // at any time.
- return env->is_local_index(index);
- }
- void BindIfLive(Variable* var, HValue* value) {
- HEnvironment* env = environment();
- int index = env->IndexFor(var);
- env->Bind(index, value);
- if (IsEligibleForEnvironmentLivenessAnalysis(var, index, env)) {
- HEnvironmentMarker* bind =
- Add<HEnvironmentMarker>(HEnvironmentMarker::BIND, index);
- USE(bind);
-#ifdef DEBUG
- bind->set_closure(env->closure());
-#endif
- }
- }
- HValue* LookupAndMakeLive(Variable* var) {
- HEnvironment* env = environment();
- int index = env->IndexFor(var);
- if (IsEligibleForEnvironmentLivenessAnalysis(var, index, env)) {
- HEnvironmentMarker* lookup =
- Add<HEnvironmentMarker>(HEnvironmentMarker::LOOKUP, index);
- USE(lookup);
-#ifdef DEBUG
- lookup->set_closure(env->closure());
-#endif
- }
- return env->Lookup(index);
- }
-
- // The value of the arguments object is allowed in some but not most value
- // contexts. (It's allowed in all effect contexts and disallowed in all
- // test contexts.)
- void VisitForValue(Expression* expr,
- ArgumentsAllowedFlag flag = ARGUMENTS_NOT_ALLOWED);
- void VisitForTypeOf(Expression* expr);
- void VisitForEffect(Expression* expr);
- void VisitForControl(Expression* expr,
- HBasicBlock* true_block,
- HBasicBlock* false_block);
-
- // Visit a list of expressions from left to right, each in a value context.
- void VisitExpressions(ZoneList<Expression*>* exprs);
- void VisitExpressions(ZoneList<Expression*>* exprs,
- ArgumentsAllowedFlag flag);
-
- // Remove the arguments from the bailout environment and emit instructions
- // to push them as outgoing parameters.
- template <class Instruction> HInstruction* PreProcessCall(Instruction* call);
- void PushArgumentsFromEnvironment(int count);
-
- void SetUpScope(DeclarationScope* scope);
- void VisitStatements(ZoneList<Statement*>* statements);
-
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- private:
- bool CanInlineGlobalPropertyAccess(Variable* var, LookupIterator* it,
- PropertyAccessType access_type);
-
- bool CanInlineGlobalPropertyAccess(LookupIterator* it,
- PropertyAccessType access_type);
-
- void InlineGlobalPropertyLoad(LookupIterator* it, BailoutId ast_id);
- HInstruction* InlineGlobalPropertyStore(LookupIterator* it, HValue* value,
- BailoutId ast_id);
-
- void EnsureArgumentsArePushedForAccess();
- bool TryArgumentsAccess(Property* expr);
-
- // Shared code for .call and .apply optimizations.
- void HandleIndirectCall(Call* expr, HValue* function, int arguments_count);
- // Try to optimize indirect calls such as fun.apply(receiver, arguments)
- // or fun.call(...).
- bool TryIndirectCall(Call* expr);
- void BuildFunctionApply(Call* expr);
- void BuildFunctionCall(Call* expr);
-
- template <class T>
- bool TryHandleArrayCall(T* expr, HValue* function);
-
- enum ArrayIndexOfMode { kFirstIndexOf, kLastIndexOf };
- HValue* BuildArrayIndexOf(HValue* receiver,
- HValue* search_element,
- ElementsKind kind,
- ArrayIndexOfMode mode);
-
- HValue* ImplicitReceiverFor(HValue* function,
- Handle<JSFunction> target);
-
- int InliningAstSize(Handle<JSFunction> target);
- bool TryInline(Handle<JSFunction> target, int arguments_count,
- HValue* implicit_return_value, BailoutId ast_id,
- BailoutId return_id, InliningKind inlining_kind,
- TailCallMode syntactic_tail_call_mode);
-
- bool TryInlineCall(Call* expr);
- bool TryInlineConstruct(CallNew* expr, HValue* implicit_return_value);
- bool TryInlineGetter(Handle<Object> getter, Handle<Map> receiver_map,
- BailoutId ast_id, BailoutId return_id);
- bool TryInlineSetter(Handle<Object> setter, Handle<Map> receiver_map,
- BailoutId id, BailoutId assignment_id,
- HValue* implicit_return_value);
- bool TryInlineIndirectCall(Handle<JSFunction> function, Call* expr,
- int arguments_count);
- bool TryInlineBuiltinGetterCall(Handle<JSFunction> function,
- Handle<Map> receiver_map, BailoutId ast_id);
- bool TryInlineBuiltinMethodCall(Handle<JSFunction> function,
- Handle<Map> receiver_map, BailoutId ast_id,
- int args_count_no_receiver);
- bool TryInlineBuiltinFunctionCall(Call* expr);
- enum ApiCallType {
- kCallApiFunction,
- kCallApiMethod,
- kCallApiGetter,
- kCallApiSetter
- };
- bool TryInlineApiMethodCall(Call* expr,
- HValue* receiver,
- SmallMapList* receiver_types);
- bool TryInlineApiFunctionCall(Call* expr, HValue* receiver);
- bool TryInlineApiGetter(Handle<Object> function, Handle<Map> receiver_map,
- BailoutId ast_id);
- bool TryInlineApiSetter(Handle<Object> function, Handle<Map> receiver_map,
- BailoutId ast_id);
- bool TryInlineApiCall(Handle<Object> function, HValue* receiver,
- SmallMapList* receiver_maps, int argc, BailoutId ast_id,
- ApiCallType call_type,
- TailCallMode syntactic_tail_call_mode);
- static bool IsReadOnlyLengthDescriptor(Handle<Map> jsarray_map);
- static bool CanInlineArrayResizeOperation(Handle<Map> receiver_map);
- static bool NoElementsInPrototypeChain(Handle<Map> receiver_map);
-
- // If --trace-inlining, print a line of the inlining trace. Inlining
- // succeeded if the reason string is NULL and failed if there is a
- // non-NULL reason string.
- void TraceInline(Handle<JSFunction> target, Handle<JSFunction> caller,
- const char* failure_reason,
- TailCallMode tail_call_mode = TailCallMode::kDisallow);
-
- void HandleGlobalVariableAssignment(Variable* var, HValue* value,
- FeedbackSlot slot, BailoutId ast_id);
-
- void HandlePropertyAssignment(Assignment* expr);
- void HandleCompoundAssignment(Assignment* expr);
- void HandlePolymorphicNamedFieldAccess(PropertyAccessType access_type,
- Expression* expr, FeedbackSlot slot,
- BailoutId ast_id, BailoutId return_id,
- HValue* object, HValue* value,
- SmallMapList* types,
- Handle<Name> name);
-
- HValue* BuildAllocateExternalElements(
- ExternalArrayType array_type,
- bool is_zero_byte_offset,
- HValue* buffer, HValue* byte_offset, HValue* length);
- HValue* BuildAllocateFixedTypedArray(ExternalArrayType array_type,
- size_t element_size,
- ElementsKind fixed_elements_kind,
- HValue* byte_length, HValue* length,
- bool initialize);
-
- // TODO(adamk): Move all OrderedHashTable functions to their own class.
- HValue* BuildOrderedHashTableHashToBucket(HValue* hash, HValue* num_buckets);
- template <typename CollectionType>
- HValue* BuildOrderedHashTableHashToEntry(HValue* table, HValue* hash,
- HValue* num_buckets);
- template <typename CollectionType>
- HValue* BuildOrderedHashTableEntryToIndex(HValue* entry, HValue* num_buckets);
- template <typename CollectionType>
- HValue* BuildOrderedHashTableFindEntry(HValue* table, HValue* key,
- HValue* hash);
- template <typename CollectionType>
- HValue* BuildOrderedHashTableAddEntry(HValue* table, HValue* key,
- HValue* hash,
- HIfContinuation* join_continuation);
- template <typename CollectionType>
- HValue* BuildAllocateOrderedHashTable();
- template <typename CollectionType>
- void BuildOrderedHashTableClear(HValue* receiver);
- template <typename CollectionType>
- void BuildJSCollectionDelete(CallRuntime* call,
- const Runtime::Function* c_function);
- template <typename CollectionType>
- void BuildJSCollectionHas(CallRuntime* call,
- const Runtime::Function* c_function);
- HValue* BuildStringHashLoadIfIsStringAndHashComputed(
- HValue* object, HIfContinuation* continuation);
-
- Handle<JSFunction> array_function() {
- return handle(isolate()->native_context()->array_function());
- }
-
- bool TryInlineArrayCall(Expression* expression, int argument_count,
- Handle<AllocationSite> site);
-
- void BuildInitializeInobjectProperties(HValue* receiver,
- Handle<Map> initial_map);
-
- class PropertyAccessInfo {
- public:
- PropertyAccessInfo(HOptimizedGraphBuilder* builder,
- PropertyAccessType access_type, Handle<Map> map,
- Handle<Name> name)
- : builder_(builder),
- access_type_(access_type),
- map_(map),
- name_(isolate()->factory()->InternalizeName(name)),
- field_type_(HType::Tagged()),
- access_(HObjectAccess::ForMap()),
- lookup_type_(NOT_FOUND),
- details_(PropertyDetails::Empty()),
- store_mode_(STORE_TO_INITIALIZED_ENTRY) {}
-
- // Ensure the full store is performed.
- void MarkAsInitializingStore() {
- DCHECK_EQ(STORE, access_type_);
- store_mode_ = INITIALIZING_STORE;
- }
-
- StoreFieldOrKeyedMode StoreMode() {
- DCHECK_EQ(STORE, access_type_);
- return store_mode_;
- }
-
- // Checkes whether this PropertyAccessInfo can be handled as a monomorphic
- // load named. It additionally fills in the fields necessary to generate the
- // lookup code.
- bool CanAccessMonomorphic();
-
- // Checks whether all types behave uniform when loading name. If all maps
- // behave the same, a single monomorphic load instruction can be emitted,
- // guarded by a single map-checks instruction that whether the receiver is
- // an instance of any of the types.
- // This method skips the first type in types, assuming that this
- // PropertyAccessInfo is built for types->first().
- bool CanAccessAsMonomorphic(SmallMapList* types);
-
- bool NeedsWrappingFor(Handle<JSFunction> target) const;
-
- Handle<Map> map();
- Handle<Name> name() const { return name_; }
-
- bool IsJSObjectFieldAccessor() {
- int offset; // unused
- return Accessors::IsJSObjectFieldAccessor(map_, name_, &offset);
- }
-
- bool GetJSObjectFieldAccess(HObjectAccess* access) {
- int offset;
- if (Accessors::IsJSObjectFieldAccessor(map_, name_, &offset)) {
- if (IsStringType()) {
- DCHECK(Name::Equals(isolate()->factory()->length_string(), name_));
- *access = HObjectAccess::ForStringLength();
- } else if (IsArrayType()) {
- DCHECK(Name::Equals(isolate()->factory()->length_string(), name_));
- *access = HObjectAccess::ForArrayLength(map_->elements_kind());
- } else {
- *access = HObjectAccess::ForMapAndOffset(map_, offset);
- }
- return true;
- }
- return false;
- }
-
- bool has_holder() { return !holder_.is_null(); }
- bool IsLoad() const { return access_type_ == LOAD; }
-
- Isolate* isolate() const { return builder_->isolate(); }
- Handle<JSObject> holder() { return holder_; }
- Handle<Object> accessor() { return accessor_; }
- Handle<Object> constant() { return constant_; }
- Handle<Map> transition() { return transition_; }
- SmallMapList* field_maps() { return &field_maps_; }
- HType field_type() const { return field_type_; }
- HObjectAccess access() { return access_; }
-
- bool IsFound() const { return lookup_type_ != NOT_FOUND; }
- bool IsProperty() const { return IsFound() && !IsTransition(); }
- bool IsTransition() const { return lookup_type_ == TRANSITION_TYPE; }
- // TODO(ishell): rename to IsDataConstant() once constant field tracking
- // is done.
- bool IsDataConstantField() const {
- return lookup_type_ == DESCRIPTOR_TYPE && details_.kind() == kData &&
- details_.location() == kField && details_.constness() == kConst;
- }
- bool IsData() const {
- return lookup_type_ == DESCRIPTOR_TYPE && details_.kind() == kData &&
- details_.location() == kField;
- }
- bool IsDataConstant() const {
- return lookup_type_ == DESCRIPTOR_TYPE && details_.kind() == kData &&
- details_.location() == kDescriptor;
- }
- bool IsAccessorConstant() const {
- return !IsTransition() && details_.kind() == kAccessor &&
- details_.location() == kDescriptor;
- }
- bool IsConfigurable() const { return details_.IsConfigurable(); }
- bool IsReadOnly() const { return details_.IsReadOnly(); }
-
- bool IsStringType() { return map_->instance_type() < FIRST_NONSTRING_TYPE; }
- bool IsNumberType() { return map_->instance_type() == HEAP_NUMBER_TYPE; }
- bool IsValueWrapped() { return IsStringType() || IsNumberType(); }
- bool IsArrayType() { return map_->instance_type() == JS_ARRAY_TYPE; }
-
- private:
- Handle<Object> GetConstantFromMap(Handle<Map> map) const {
- DCHECK_EQ(DESCRIPTOR_TYPE, lookup_type_);
- DCHECK(number_ < map->NumberOfOwnDescriptors());
- return handle(map->instance_descriptors()->GetValue(number_), isolate());
- }
- Handle<Object> GetAccessorsFromMap(Handle<Map> map) const {
- return GetConstantFromMap(map);
- }
- Handle<FieldType> GetFieldTypeFromMap(Handle<Map> map) const;
- Handle<Map> GetFieldOwnerFromMap(Handle<Map> map) const {
- DCHECK(IsFound());
- DCHECK(number_ < map->NumberOfOwnDescriptors());
- return handle(map->FindFieldOwner(number_));
- }
- int GetLocalFieldIndexFromMap(Handle<Map> map) const {
- DCHECK(lookup_type_ == DESCRIPTOR_TYPE ||
- lookup_type_ == TRANSITION_TYPE);
- DCHECK(number_ < map->NumberOfOwnDescriptors());
- int field_index = map->instance_descriptors()->GetFieldIndex(number_);
- return field_index - map->GetInObjectProperties();
- }
-
- void LookupDescriptor(Map* map, Name* name) {
- DescriptorArray* descriptors = map->instance_descriptors();
- int number = descriptors->SearchWithCache(isolate(), name, map);
- if (number == DescriptorArray::kNotFound) return NotFound();
- lookup_type_ = DESCRIPTOR_TYPE;
- details_ = descriptors->GetDetails(number);
- number_ = number;
- }
- void LookupTransition(Map* map, Name* name, PropertyAttributes attributes) {
- Map* target =
- TransitionArray::SearchTransition(map, kData, name, attributes);
- if (target == NULL) return NotFound();
- lookup_type_ = TRANSITION_TYPE;
- transition_ = handle(target);
- number_ = transition_->LastAdded();
- details_ = transition_->instance_descriptors()->GetDetails(number_);
- MarkAsInitializingStore();
- }
- void NotFound() {
- lookup_type_ = NOT_FOUND;
- details_ = PropertyDetails::Empty();
- }
- Representation representation() const {
- DCHECK(IsFound());
- return details_.representation();
- }
- bool IsTransitionToData() const {
- return IsTransition() && details_.kind() == kData &&
- details_.location() == kField;
- }
-
- Zone* zone() { return builder_->zone(); }
- CompilationInfo* top_info() { return builder_->top_info(); }
- CompilationInfo* current_info() { return builder_->current_info(); }
-
- bool LoadResult(Handle<Map> map);
- bool LoadFieldMaps(Handle<Map> map);
- bool LookupDescriptor();
- bool LookupInPrototypes();
- bool IsIntegerIndexedExotic();
- bool IsCompatible(PropertyAccessInfo* other);
-
- void GeneralizeRepresentation(Representation r) {
- access_ = access_.WithRepresentation(
- access_.representation().generalize(r));
- }
-
- HOptimizedGraphBuilder* builder_;
- PropertyAccessType access_type_;
- Handle<Map> map_;
- Handle<Name> name_;
- Handle<JSObject> holder_;
- Handle<Object> accessor_;
- Handle<JSObject> api_holder_;
- Handle<Object> constant_;
- SmallMapList field_maps_;
- HType field_type_;
- HObjectAccess access_;
-
- enum { NOT_FOUND, DESCRIPTOR_TYPE, TRANSITION_TYPE } lookup_type_;
- Handle<Map> transition_;
- int number_;
- PropertyDetails details_;
- StoreFieldOrKeyedMode store_mode_;
- };
-
- HValue* BuildMonomorphicAccess(PropertyAccessInfo* info, HValue* object,
- HValue* checked_object, HValue* value,
- BailoutId ast_id, BailoutId return_id,
- bool can_inline_accessor = true);
-
- HValue* BuildNamedAccess(PropertyAccessType access, BailoutId ast_id,
- BailoutId reutrn_id, Expression* expr,
- FeedbackSlot slot, HValue* object, Handle<Name> name,
- HValue* value, bool is_uninitialized = false);
-
- void HandlePolymorphicCallNamed(Call* expr,
- HValue* receiver,
- SmallMapList* types,
- Handle<String> name);
- void HandleLiteralCompareTypeof(CompareOperation* expr,
- Expression* sub_expr,
- Handle<String> check);
- void HandleLiteralCompareNil(CompareOperation* expr,
- Expression* sub_expr,
- NilValue nil);
-
- enum PushBeforeSimulateBehavior {
- PUSH_BEFORE_SIMULATE,
- NO_PUSH_BEFORE_SIMULATE
- };
-
- HControlInstruction* BuildCompareInstruction(
- Token::Value op, HValue* left, HValue* right, AstType* left_type,
- AstType* right_type, AstType* combined_type, SourcePosition left_position,
- SourcePosition right_position, PushBeforeSimulateBehavior push_sim_result,
- BailoutId bailout_id);
-
- HInstruction* BuildStringCharCodeAt(HValue* string,
- HValue* index);
-
- HValue* BuildBinaryOperation(
- BinaryOperation* expr,
- HValue* left,
- HValue* right,
- PushBeforeSimulateBehavior push_sim_result);
- HInstruction* BuildIncrement(CountOperation* expr);
- HInstruction* BuildKeyedGeneric(PropertyAccessType access_type,
- Expression* expr, FeedbackSlot slot,
- HValue* object, HValue* key, HValue* value);
-
- HInstruction* TryBuildConsolidatedElementLoad(HValue* object,
- HValue* key,
- HValue* val,
- SmallMapList* maps);
-
- LoadKeyedHoleMode BuildKeyedHoleMode(Handle<Map> map);
-
- HInstruction* BuildMonomorphicElementAccess(HValue* object,
- HValue* key,
- HValue* val,
- HValue* dependency,
- Handle<Map> map,
- PropertyAccessType access_type,
- KeyedAccessStoreMode store_mode);
-
- HValue* HandlePolymorphicElementAccess(Expression* expr, FeedbackSlot slot,
- HValue* object, HValue* key,
- HValue* val, SmallMapList* maps,
- PropertyAccessType access_type,
- KeyedAccessStoreMode store_mode,
- bool* has_side_effects);
-
- HValue* HandleKeyedElementAccess(HValue* obj, HValue* key, HValue* val,
- Expression* expr, FeedbackSlot slot,
- BailoutId ast_id, BailoutId return_id,
- PropertyAccessType access_type,
- bool* has_side_effects);
-
- HInstruction* BuildNamedGeneric(PropertyAccessType access, Expression* expr,
- FeedbackSlot slot, HValue* object,
- Handle<Name> name, HValue* value,
- bool is_uninitialized = false);
-
- HCheckMaps* AddCheckMap(HValue* object, Handle<Map> map);
-
- void BuildLoad(Property* property,
- BailoutId ast_id);
- void PushLoad(Property* property,
- HValue* object,
- HValue* key);
-
- void BuildStoreForEffect(Expression* expression, Property* prop,
- FeedbackSlot slot, BailoutId ast_id,
- BailoutId return_id, HValue* object, HValue* key,
- HValue* value);
-
- void BuildStore(Expression* expression, Property* prop, FeedbackSlot slot,
- BailoutId ast_id, BailoutId return_id,
- bool is_uninitialized = false);
-
- HInstruction* BuildLoadNamedField(PropertyAccessInfo* info,
- HValue* checked_object);
- HValue* BuildStoreNamedField(PropertyAccessInfo* info, HValue* checked_object,
- HValue* value);
-
- HValue* BuildContextChainWalk(Variable* var);
-
- HValue* AddThisFunction();
- HInstruction* BuildThisFunction();
-
- HInstruction* BuildFastLiteral(Handle<JSObject> boilerplate_object,
- AllocationSiteUsageContext* site_context);
-
- void BuildEmitObjectHeader(Handle<JSObject> boilerplate_object,
- HInstruction* object);
-
- void BuildEmitInObjectProperties(Handle<JSObject> boilerplate_object,
- HInstruction* object,
- AllocationSiteUsageContext* site_context,
- PretenureFlag pretenure_flag);
-
- void BuildEmitElements(Handle<JSObject> boilerplate_object,
- Handle<FixedArrayBase> elements,
- HValue* object_elements,
- AllocationSiteUsageContext* site_context);
-
- void BuildEmitFixedDoubleArray(Handle<FixedArrayBase> elements,
- ElementsKind kind,
- HValue* object_elements);
-
- void BuildEmitFixedArray(Handle<FixedArrayBase> elements,
- ElementsKind kind,
- HValue* object_elements,
- AllocationSiteUsageContext* site_context);
-
- void AddCheckPrototypeMaps(Handle<JSObject> holder,
- Handle<Map> receiver_map);
-
- void BuildEnsureCallable(HValue* object);
-
- HInstruction* NewCallFunction(HValue* function, int argument_count,
- TailCallMode syntactic_tail_call_mode,
- ConvertReceiverMode convert_mode,
- TailCallMode tail_call_mode);
-
- HInstruction* NewCallFunctionViaIC(HValue* function, int argument_count,
- TailCallMode syntactic_tail_call_mode,
- ConvertReceiverMode convert_mode,
- TailCallMode tail_call_mode,
- FeedbackSlot slot);
-
- HInstruction* NewCallConstantFunction(Handle<JSFunction> target,
- int argument_count,
- TailCallMode syntactic_tail_call_mode,
- TailCallMode tail_call_mode);
-
- bool CanBeFunctionApplyArguments(Call* expr);
-
- bool IsAnyParameterContextAllocated();
-
- // The translation state of the currently-being-translated function.
- FunctionState* function_state_;
-
- // The base of the function state stack.
- FunctionState initial_function_state_;
-
- // Expression context of the currently visited subexpression. NULL when
- // visiting statements.
- AstContext* ast_context_;
-
- // A stack of breakable statements entered.
- BreakAndContinueScope* break_scope_;
-
- int inlined_count_;
- ZoneList<Handle<Object> > globals_;
-
- bool inline_bailout_;
-
- HOsrBuilder* osr_;
-
- AstTypeBounds bounds_;
-
- friend class FunctionState; // Pushes and pops the state stack.
- friend class AstContext; // Pushes and pops the AST context stack.
- friend class HOsrBuilder;
-
- DISALLOW_COPY_AND_ASSIGN(HOptimizedGraphBuilder);
-};
-
-
-Zone* AstContext::zone() const { return owner_->zone(); }
-
-
-class HStatistics final : public Malloced {
- public:
- HStatistics()
- : times_(5),
- names_(5),
- sizes_(5),
- total_size_(0),
- source_size_(0) { }
-
- void Initialize(CompilationInfo* info);
- void Print();
- void SaveTiming(const char* name, base::TimeDelta time, size_t size);
-
- void IncrementFullCodeGen(base::TimeDelta full_code_gen) {
- full_code_gen_ += full_code_gen;
- }
-
- void IncrementCreateGraph(base::TimeDelta delta) { create_graph_ += delta; }
-
- void IncrementOptimizeGraph(base::TimeDelta delta) {
- optimize_graph_ += delta;
- }
-
- void IncrementGenerateCode(base::TimeDelta delta) { generate_code_ += delta; }
-
- void IncrementSubtotals(base::TimeDelta create_graph,
- base::TimeDelta optimize_graph,
- base::TimeDelta generate_code) {
- IncrementCreateGraph(create_graph);
- IncrementOptimizeGraph(optimize_graph);
- IncrementGenerateCode(generate_code);
- }
-
- private:
- List<base::TimeDelta> times_;
- List<const char*> names_;
- List<size_t> sizes_;
- base::TimeDelta create_graph_;
- base::TimeDelta optimize_graph_;
- base::TimeDelta generate_code_;
- size_t total_size_;
- base::TimeDelta full_code_gen_;
- double source_size_;
-};
-
-
-class HPhase : public CompilationPhase {
- public:
- HPhase(const char* name, HGraph* graph)
- : CompilationPhase(name, graph->info()),
- graph_(graph) { }
- ~HPhase();
-
- protected:
- HGraph* graph() const { return graph_; }
-
- private:
- HGraph* graph_;
-
- DISALLOW_COPY_AND_ASSIGN(HPhase);
-};
-
-
-class HTracer final : public Malloced {
- public:
- explicit HTracer(int isolate_id)
- : trace_(&string_allocator_), indent_(0) {
- if (FLAG_trace_hydrogen_file == NULL) {
- SNPrintF(filename_,
- "hydrogen-%d-%d.cfg",
- base::OS::GetCurrentProcessId(),
- isolate_id);
- } else {
- StrNCpy(filename_, FLAG_trace_hydrogen_file, filename_.length());
- }
- WriteChars(filename_.start(), "", 0, false);
- }
-
- void TraceCompilation(CompilationInfo* info);
- void TraceHydrogen(const char* name, HGraph* graph);
- void TraceLithium(const char* name, LChunk* chunk);
- void TraceLiveRanges(const char* name, LAllocator* allocator);
-
- private:
- class Tag final BASE_EMBEDDED {
- public:
- Tag(HTracer* tracer, const char* name) {
- name_ = name;
- tracer_ = tracer;
- tracer->PrintIndent();
- tracer->trace_.Add("begin_%s\n", name);
- tracer->indent_++;
- }
-
- ~Tag() {
- tracer_->indent_--;
- tracer_->PrintIndent();
- tracer_->trace_.Add("end_%s\n", name_);
- DCHECK(tracer_->indent_ >= 0);
- tracer_->FlushToFile();
- }
-
- private:
- HTracer* tracer_;
- const char* name_;
- };
-
- void TraceLiveRange(LiveRange* range, const char* type, Zone* zone);
- void Trace(const char* name, HGraph* graph, LChunk* chunk);
- void FlushToFile();
-
- void PrintEmptyProperty(const char* name) {
- PrintIndent();
- trace_.Add("%s\n", name);
- }
-
- void PrintStringProperty(const char* name, const char* value) {
- PrintIndent();
- trace_.Add("%s \"%s\"\n", name, value);
- }
-
- void PrintLongProperty(const char* name, int64_t value) {
- PrintIndent();
- trace_.Add("%s %d000\n", name, static_cast<int>(value / 1000));
- }
-
- void PrintBlockProperty(const char* name, int block_id) {
- PrintIndent();
- trace_.Add("%s \"B%d\"\n", name, block_id);
- }
-
- void PrintIntProperty(const char* name, int value) {
- PrintIndent();
- trace_.Add("%s %d\n", name, value);
- }
-
- void PrintIndent() {
- for (int i = 0; i < indent_; i++) {
- trace_.Add(" ");
- }
- }
-
- EmbeddedVector<char, 64> filename_;
- HeapStringAllocator string_allocator_;
- StringStream trace_;
- int indent_;
-};
-
-
-class NoObservableSideEffectsScope final {
- public:
- explicit NoObservableSideEffectsScope(HGraphBuilder* builder) :
- builder_(builder) {
- builder_->graph()->IncrementInNoSideEffectsScope();
- }
- ~NoObservableSideEffectsScope() {
- builder_->graph()->DecrementInNoSideEffectsScope();
- }
-
- private:
- HGraphBuilder* builder_;
-};
-
-class DoExpressionScope final {
- public:
- explicit DoExpressionScope(HOptimizedGraphBuilder* builder)
- : builder_(builder) {
- builder_->function_state()->IncrementInDoExpressionScope();
- }
- ~DoExpressionScope() {
- builder_->function_state()->DecrementInDoExpressionScope();
- }
-
- private:
- HOptimizedGraphBuilder* builder_;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_HYDROGEN_H_
diff --git a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
deleted file mode 100644
index 1c9c1999d0..0000000000
--- a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
+++ /dev/null
@@ -1,5155 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_IA32
-
-#include "src/crankshaft/ia32/lithium-codegen-ia32.h"
-
-#include "src/base/bits.h"
-#include "src/builtins/builtins-constructor.h"
-#include "src/code-factory.h"
-#include "src/code-stubs.h"
-#include "src/codegen.h"
-#include "src/crankshaft/hydrogen-osr.h"
-#include "src/deoptimizer.h"
-#include "src/ia32/frames-ia32.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-// When invoking builtins, we need to record the safepoint in the middle of
-// the invoke instruction sequence generated by the macro assembler.
-class SafepointGenerator final : public CallWrapper {
- public:
- SafepointGenerator(LCodeGen* codegen,
- LPointerMap* pointers,
- Safepoint::DeoptMode mode)
- : codegen_(codegen),
- pointers_(pointers),
- deopt_mode_(mode) {}
- virtual ~SafepointGenerator() {}
-
- void BeforeCall(int call_size) const override {}
-
- void AfterCall() const override {
- codegen_->RecordSafepoint(pointers_, deopt_mode_);
- }
-
- private:
- LCodeGen* codegen_;
- LPointerMap* pointers_;
- Safepoint::DeoptMode deopt_mode_;
-};
-
-
-#define __ masm()->
-
-bool LCodeGen::GenerateCode() {
- LPhase phase("Z_Code generation", chunk());
- DCHECK(is_unused());
- status_ = GENERATING;
-
- // Open a frame scope to indicate that there is a frame on the stack. The
- // MANUAL indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done in GeneratePrologue).
- FrameScope frame_scope(masm_, StackFrame::MANUAL);
-
- return GeneratePrologue() &&
- GenerateBody() &&
- GenerateDeferredCode() &&
- GenerateJumpTable() &&
- GenerateSafepointTable();
-}
-
-
-void LCodeGen::FinishCode(Handle<Code> code) {
- DCHECK(is_done());
- code->set_stack_slots(GetTotalFrameSlotCount());
- code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- PopulateDeoptimizationData(code);
- if (info()->ShouldEnsureSpaceForLazyDeopt()) {
- Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
- }
-}
-
-
-#ifdef _MSC_VER
-void LCodeGen::MakeSureStackPagesMapped(int offset) {
- const int kPageSize = 4 * KB;
- for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
- __ mov(Operand(esp, offset), eax);
- }
-}
-#endif
-
-
-void LCodeGen::SaveCallerDoubles() {
- DCHECK(info()->saves_caller_doubles());
- DCHECK(NeedsEagerFrame());
- Comment(";;; Save clobbered callee double registers");
- int count = 0;
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- while (!save_iterator.Done()) {
- __ movsd(MemOperand(esp, count * kDoubleSize),
- XMMRegister::from_code(save_iterator.Current()));
- save_iterator.Advance();
- count++;
- }
-}
-
-
-void LCodeGen::RestoreCallerDoubles() {
- DCHECK(info()->saves_caller_doubles());
- DCHECK(NeedsEagerFrame());
- Comment(";;; Restore clobbered callee double registers");
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- int count = 0;
- while (!save_iterator.Done()) {
- __ movsd(XMMRegister::from_code(save_iterator.Current()),
- MemOperand(esp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
-}
-
-
-bool LCodeGen::GeneratePrologue() {
- DCHECK(is_generating());
-
- if (info()->IsOptimizing()) {
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
- }
-
- info()->set_prologue_offset(masm_->pc_offset());
- if (NeedsEagerFrame()) {
- DCHECK(!frame_is_built_);
- frame_is_built_ = true;
- if (info()->IsStub()) {
- __ StubPrologue(StackFrame::STUB);
- } else {
- __ Prologue(info()->GeneratePreagedPrologue());
- }
- }
-
- // Reserve space for the stack slots needed by the code.
- int slots = GetStackSlotCount();
- DCHECK(slots != 0 || !info()->IsOptimizing());
- if (slots > 0) {
- __ sub(Operand(esp), Immediate(slots * kPointerSize));
-#ifdef _MSC_VER
- MakeSureStackPagesMapped(slots * kPointerSize);
-#endif
- if (FLAG_debug_code) {
- __ push(eax);
- __ mov(Operand(eax), Immediate(slots));
- Label loop;
- __ bind(&loop);
- __ mov(MemOperand(esp, eax, times_4, 0), Immediate(kSlotsZapValue));
- __ dec(eax);
- __ j(not_zero, &loop);
- __ pop(eax);
- }
-
- if (info()->saves_caller_doubles()) SaveCallerDoubles();
- }
- return !is_aborted();
-}
-
-
-void LCodeGen::DoPrologue(LPrologue* instr) {
- Comment(";;; Prologue begin");
-
- // Possibly allocate a local context.
- if (info_->scope()->NeedsContext()) {
- Comment(";;; Allocate local context");
- bool need_write_barrier = true;
- // Argument to NewContext is the function, which is still in edi.
- int slots = info_->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
- if (info()->scope()->is_script_scope()) {
- __ push(edi);
- __ Push(info()->scope()->scope_info());
- __ CallRuntime(Runtime::kNewScriptContext);
- deopt_mode = Safepoint::kLazyDeopt;
- } else {
- if (slots <= ConstructorBuiltins::MaximumFunctionContextSlots()) {
- Callable callable = CodeFactory::FastNewFunctionContext(
- isolate(), info()->scope()->scope_type());
- __ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
- Immediate(slots));
- __ Call(callable.code(), RelocInfo::CODE_TARGET);
- // Result of the FastNewFunctionContext builtin is always in new space.
- need_write_barrier = false;
- } else {
- __ Push(edi);
- __ Push(Smi::FromInt(info()->scope()->scope_type()));
- __ CallRuntime(Runtime::kNewFunctionContext);
- }
- }
- RecordSafepoint(deopt_mode);
-
- // Context is returned in eax. It replaces the context passed to us.
- // It's saved in the stack and kept live in esi.
- __ mov(esi, eax);
- __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), eax);
-
- // Copy parameters into context if necessary.
- int num_parameters = info()->scope()->num_parameters();
- int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0;
- for (int i = first_parameter; i < num_parameters; i++) {
- Variable* var = (i == -1) ? info()->scope()->receiver()
- : info()->scope()->parameter(i);
- if (var->IsContextSlot()) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ mov(eax, Operand(ebp, parameter_offset));
- // Store it in the context.
- int context_offset = Context::SlotOffset(var->index());
- __ mov(Operand(esi, context_offset), eax);
- // Update the write barrier. This clobbers eax and ebx.
- if (need_write_barrier) {
- __ RecordWriteContextSlot(esi,
- context_offset,
- eax,
- ebx,
- kDontSaveFPRegs);
- } else if (FLAG_debug_code) {
- Label done;
- __ JumpIfInNewSpace(esi, eax, &done, Label::kNear);
- __ Abort(kExpectedNewSpaceObject);
- __ bind(&done);
- }
- }
- }
- Comment(";;; End allocate local context");
- }
-
- Comment(";;; Prologue end");
-}
-
-
-void LCodeGen::GenerateOsrPrologue() {
- // Generate the OSR entry prologue at the first unknown OSR value, or if there
- // are none, at the OSR entrypoint instruction.
- if (osr_pc_offset_ >= 0) return;
-
- osr_pc_offset_ = masm()->pc_offset();
-
- // Adjust the frame size, subsuming the unoptimized frame into the
- // optimized frame.
- int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
- DCHECK(slots >= 0);
- __ sub(esp, Immediate(slots * kPointerSize));
-}
-
-
-void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
- if (instr->IsCall()) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- }
- if (!instr->IsLazyBailout() && !instr->IsGap()) {
- safepoints_.BumpLastLazySafepointIndex();
- }
-}
-
-
-void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { }
-
-
-bool LCodeGen::GenerateJumpTable() {
- if (!jump_table_.length()) return !is_aborted();
-
- Label needs_frame;
- Comment(";;; -------------------- Jump table --------------------");
-
- for (int i = 0; i < jump_table_.length(); i++) {
- Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
- __ bind(&table_entry->label);
- Address entry = table_entry->address;
- DeoptComment(table_entry->deopt_info);
- if (table_entry->needs_frame) {
- DCHECK(!info()->saves_caller_doubles());
- __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
- __ call(&needs_frame);
- } else {
- if (info()->saves_caller_doubles()) RestoreCallerDoubles();
- __ call(entry, RelocInfo::RUNTIME_ENTRY);
- }
- }
- if (needs_frame.is_linked()) {
- __ bind(&needs_frame);
- /* stack layout
- 3: entry address
- 2: return address <-- esp
- 1: garbage
- 0: garbage
- */
- __ push(MemOperand(esp, 0)); // Copy return address.
- __ push(MemOperand(esp, 2 * kPointerSize)); // Copy entry address.
-
- /* stack layout
- 4: entry address
- 3: return address
- 1: return address
- 0: entry address <-- esp
- */
- __ mov(MemOperand(esp, 3 * kPointerSize), ebp); // Save ebp.
- // Fill ebp with the right stack frame address.
- __ lea(ebp, MemOperand(esp, 3 * kPointerSize));
-
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- DCHECK(info()->IsStub());
- __ mov(MemOperand(esp, 2 * kPointerSize),
- Immediate(StackFrame::TypeToMarker(StackFrame::STUB)));
-
- /* stack layout
- 3: old ebp
- 2: stub marker
- 1: return address
- 0: entry address <-- esp
- */
- __ ret(0); // Call the continuation without clobbering registers.
- }
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateDeferredCode() {
- DCHECK(is_generating());
- if (deferred_.length() > 0) {
- for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
- LDeferredCode* code = deferred_[i];
-
- HValue* value =
- instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(value->position());
-
- Comment(";;; <@%d,#%d> "
- "-------------------- Deferred %s --------------------",
- code->instruction_index(),
- code->instr()->hydrogen_value()->id(),
- code->instr()->Mnemonic());
- __ bind(code->entry());
- if (NeedsDeferredFrame()) {
- Comment(";;; Build frame");
- DCHECK(!frame_is_built_);
- DCHECK(info()->IsStub());
- frame_is_built_ = true;
- // Build the frame in such a way that esi isn't trashed.
- __ push(ebp); // Caller's frame pointer.
- __ push(Immediate(StackFrame::TypeToMarker(StackFrame::STUB)));
- __ lea(ebp, Operand(esp, TypedFrameConstants::kFixedFrameSizeFromFp));
- Comment(";;; Deferred code");
- }
- code->Generate();
- if (NeedsDeferredFrame()) {
- __ bind(code->done());
- Comment(";;; Destroy frame");
- DCHECK(frame_is_built_);
- frame_is_built_ = false;
- __ mov(esp, ebp);
- __ pop(ebp);
- }
- __ jmp(code->exit());
- }
- }
-
- // Deferred code is the last part of the instruction sequence. Mark
- // the generated code as done unless we bailed out.
- if (!is_aborted()) status_ = DONE;
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateSafepointTable() {
- DCHECK(is_done());
- if (info()->ShouldEnsureSpaceForLazyDeopt()) {
- // For lazy deoptimization we need space to patch a call after every call.
- // Ensure there is always space for such patching, even if the code ends
- // in a call.
- int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
- while (masm()->pc_offset() < target_offset) {
- masm()->nop();
- }
- }
- safepoints_.Emit(masm(), GetTotalFrameSlotCount());
- return !is_aborted();
-}
-
-
-Register LCodeGen::ToRegister(int code) const {
- return Register::from_code(code);
-}
-
-
-XMMRegister LCodeGen::ToDoubleRegister(int code) const {
- return XMMRegister::from_code(code);
-}
-
-
-Register LCodeGen::ToRegister(LOperand* op) const {
- DCHECK(op->IsRegister());
- return ToRegister(op->index());
-}
-
-
-XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
- DCHECK(op->IsDoubleRegister());
- return ToDoubleRegister(op->index());
-}
-
-
-int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
- return ToRepresentation(op, Representation::Integer32());
-}
-
-
-int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
- const Representation& r) const {
- HConstant* constant = chunk_->LookupConstant(op);
- if (r.IsExternal()) {
- return reinterpret_cast<int32_t>(
- constant->ExternalReferenceValue().address());
- }
- int32_t value = constant->Integer32Value();
- if (r.IsInteger32()) return value;
- DCHECK(r.IsSmiOrTagged());
- return reinterpret_cast<int32_t>(Smi::FromInt(value));
-}
-
-
-Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
- return constant->handle(isolate());
-}
-
-
-double LCodeGen::ToDouble(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- DCHECK(constant->HasDoubleValue());
- return constant->DoubleValue();
-}
-
-
-ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- DCHECK(constant->HasExternalReferenceValue());
- return constant->ExternalReferenceValue();
-}
-
-
-bool LCodeGen::IsInteger32(LConstantOperand* op) const {
- return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
-}
-
-
-bool LCodeGen::IsSmi(LConstantOperand* op) const {
- return chunk_->LookupLiteralRepresentation(op).IsSmi();
-}
-
-
-static int ArgumentsOffsetWithoutFrame(int index) {
- DCHECK(index < 0);
- return -(index + 1) * kPointerSize + kPCOnStackSize;
-}
-
-
-Operand LCodeGen::ToOperand(LOperand* op) const {
- if (op->IsRegister()) return Operand(ToRegister(op));
- if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
- DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- if (NeedsEagerFrame()) {
- return Operand(ebp, FrameSlotToFPOffset(op->index()));
- } else {
- // Retrieve parameter without eager stack-frame relative to the
- // stack-pointer.
- return Operand(esp, ArgumentsOffsetWithoutFrame(op->index()));
- }
-}
-
-
-Operand LCodeGen::HighOperand(LOperand* op) {
- DCHECK(op->IsDoubleStackSlot());
- if (NeedsEagerFrame()) {
- return Operand(ebp, FrameSlotToFPOffset(op->index()) + kPointerSize);
- } else {
- // Retrieve parameter without eager stack-frame relative to the
- // stack-pointer.
- return Operand(
- esp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
- }
-}
-
-
-void LCodeGen::WriteTranslation(LEnvironment* environment,
- Translation* translation) {
- if (environment == NULL) return;
-
- // The translation includes one command per value in the environment.
- int translation_size = environment->translation_size();
-
- WriteTranslation(environment->outer(), translation);
- WriteTranslationFrame(environment, translation);
-
- int object_index = 0;
- int dematerialized_index = 0;
- for (int i = 0; i < translation_size; ++i) {
- LOperand* value = environment->values()->at(i);
- AddToTranslation(
- environment, translation, value, environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
- }
-}
-
-
-void LCodeGen::AddToTranslation(LEnvironment* environment,
- Translation* translation,
- LOperand* op,
- bool is_tagged,
- bool is_uint32,
- int* object_index_pointer,
- int* dematerialized_index_pointer) {
- if (op == LEnvironment::materialization_marker()) {
- int object_index = (*object_index_pointer)++;
- if (environment->ObjectIsDuplicateAt(object_index)) {
- int dupe_of = environment->ObjectDuplicateOfAt(object_index);
- translation->DuplicateObject(dupe_of);
- return;
- }
- int object_length = environment->ObjectLengthAt(object_index);
- if (environment->ObjectIsArgumentsAt(object_index)) {
- translation->BeginArgumentsObject(object_length);
- } else {
- translation->BeginCapturedObject(object_length);
- }
- int dematerialized_index = *dematerialized_index_pointer;
- int env_offset = environment->translation_size() + dematerialized_index;
- *dematerialized_index_pointer += object_length;
- for (int i = 0; i < object_length; ++i) {
- LOperand* value = environment->values()->at(env_offset + i);
- AddToTranslation(environment,
- translation,
- value,
- environment->HasTaggedValueAt(env_offset + i),
- environment->HasUint32ValueAt(env_offset + i),
- object_index_pointer,
- dematerialized_index_pointer);
- }
- return;
- }
-
- if (op->IsStackSlot()) {
- int index = op->index();
- if (is_tagged) {
- translation->StoreStackSlot(index);
- } else if (is_uint32) {
- translation->StoreUint32StackSlot(index);
- } else {
- translation->StoreInt32StackSlot(index);
- }
- } else if (op->IsDoubleStackSlot()) {
- int index = op->index();
- translation->StoreDoubleStackSlot(index);
- } else if (op->IsRegister()) {
- Register reg = ToRegister(op);
- if (is_tagged) {
- translation->StoreRegister(reg);
- } else if (is_uint32) {
- translation->StoreUint32Register(reg);
- } else {
- translation->StoreInt32Register(reg);
- }
- } else if (op->IsDoubleRegister()) {
- XMMRegister reg = ToDoubleRegister(op);
- translation->StoreDoubleRegister(reg);
- } else if (op->IsConstantOperand()) {
- HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
- translation->StoreLiteral(src_index);
- } else {
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::CallCodeGeneric(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode) {
- DCHECK(instr != NULL);
- __ call(code, mode);
- RecordSafepointWithLazyDeopt(instr, safepoint_mode);
-
- // Signal that we don't inline smi code before these stubs in the
- // optimizing code generator.
- if (code->kind() == Code::BINARY_OP_IC ||
- code->kind() == Code::COMPARE_IC) {
- __ nop();
- }
-}
-
-
-void LCodeGen::CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr) {
- CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::CallRuntime(const Runtime::Function* fun,
- int argc,
- LInstruction* instr,
- SaveFPRegsMode save_doubles) {
- DCHECK(instr != NULL);
- DCHECK(instr->HasPointerMap());
-
- __ CallRuntime(fun, argc, save_doubles);
-
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-
- DCHECK(info()->is_calling());
-}
-
-
-void LCodeGen::LoadContextFromDeferred(LOperand* context) {
- if (context->IsRegister()) {
- if (!ToRegister(context).is(esi)) {
- __ mov(esi, ToRegister(context));
- }
- } else if (context->IsStackSlot()) {
- __ mov(esi, ToOperand(context));
- } else if (context->IsConstantOperand()) {
- HConstant* constant =
- chunk_->LookupConstant(LConstantOperand::cast(context));
- __ LoadObject(esi, Handle<Object>::cast(constant->handle(isolate())));
- } else {
- UNREACHABLE();
- }
-}
-
-void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
- int argc,
- LInstruction* instr,
- LOperand* context) {
- LoadContextFromDeferred(context);
-
- __ CallRuntimeSaveDoubles(id);
- RecordSafepointWithRegisters(
- instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
-
- DCHECK(info()->is_calling());
-}
-
-
-void LCodeGen::RegisterEnvironmentForDeoptimization(
- LEnvironment* environment, Safepoint::DeoptMode mode) {
- environment->set_has_been_used();
- if (!environment->HasBeenRegistered()) {
- // Physical stack frame layout:
- // -x ............. -4 0 ..................................... y
- // [incoming arguments] [spill slots] [pushed outgoing arguments]
-
- // Layout of the environment:
- // 0 ..................................................... size-1
- // [parameters] [locals] [expression stack including arguments]
-
- // Layout of the translation:
- // 0 ........................................................ size - 1 + 4
- // [expression stack including arguments] [locals] [4 words] [parameters]
- // |>------------ translation_size ------------<|
-
- int frame_count = 0;
- int jsframe_count = 0;
- for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
- ++frame_count;
- if (e->frame_type() == JS_FUNCTION) {
- ++jsframe_count;
- }
- }
- Translation translation(&translations_, frame_count, jsframe_count, zone());
- WriteTranslation(environment, &translation);
- int deoptimization_index = deoptimizations_.length();
- int pc_offset = masm()->pc_offset();
- environment->Register(deoptimization_index,
- translation.index(),
- (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
- deoptimizations_.Add(environment, zone());
- }
-}
-
-void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
- DeoptimizeReason deopt_reason,
- Deoptimizer::BailoutType bailout_type) {
- LEnvironment* environment = instr->environment();
- RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- DCHECK(environment->HasBeenRegistered());
- int id = environment->deoptimization_index();
- Address entry =
- Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
- if (entry == NULL) {
- Abort(kBailoutWasNotPrepared);
- return;
- }
-
- if (DeoptEveryNTimes()) {
- ExternalReference count = ExternalReference::stress_deopt_count(isolate());
- Label no_deopt;
- __ pushfd();
- __ push(eax);
- __ mov(eax, Operand::StaticVariable(count));
- __ sub(eax, Immediate(1));
- __ j(not_zero, &no_deopt, Label::kNear);
- if (FLAG_trap_on_deopt) __ int3();
- __ mov(eax, Immediate(FLAG_deopt_every_n_times));
- __ mov(Operand::StaticVariable(count), eax);
- __ pop(eax);
- __ popfd();
- DCHECK(frame_is_built_);
- __ call(entry, RelocInfo::RUNTIME_ENTRY);
- __ bind(&no_deopt);
- __ mov(Operand::StaticVariable(count), eax);
- __ pop(eax);
- __ popfd();
- }
-
- if (info()->ShouldTrapOnDeopt()) {
- Label done;
- if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
- __ int3();
- __ bind(&done);
- }
-
- Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
-
- DCHECK(info()->IsStub() || frame_is_built_);
- if (cc == no_condition && frame_is_built_) {
- DeoptComment(deopt_info);
- __ call(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
- !frame_is_built_);
- // We often have several deopts to the same entry, reuse the last
- // jump entry if this is the case.
- if (FLAG_trace_deopt || isolate()->is_profiling() ||
- jump_table_.is_empty() ||
- !table_entry.IsEquivalentTo(jump_table_.last())) {
- jump_table_.Add(table_entry, zone());
- }
- if (cc == no_condition) {
- __ jmp(&jump_table_.last().label);
- } else {
- __ j(cc, &jump_table_.last().label);
- }
- }
-}
-
-void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
- DeoptimizeReason deopt_reason) {
- Deoptimizer::BailoutType bailout_type = info()->IsStub()
- ? Deoptimizer::LAZY
- : Deoptimizer::EAGER;
- DeoptimizeIf(cc, instr, deopt_reason, bailout_type);
-}
-
-
-void LCodeGen::RecordSafepointWithLazyDeopt(
- LInstruction* instr, SafepointMode safepoint_mode) {
- if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
- RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
- } else {
- DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kLazyDeopt);
- }
-}
-
-
-void LCodeGen::RecordSafepoint(
- LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- Safepoint::DeoptMode deopt_mode) {
- DCHECK(kind == expected_safepoint_kind_);
- const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
- Safepoint safepoint =
- safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
- for (int i = 0; i < operands->length(); i++) {
- LOperand* pointer = operands->at(i);
- if (pointer->IsStackSlot()) {
- safepoint.DefinePointerSlot(pointer->index(), zone());
- } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
- safepoint.DefinePointerRegister(ToRegister(pointer), zone());
- }
- }
-}
-
-
-void LCodeGen::RecordSafepoint(LPointerMap* pointers,
- Safepoint::DeoptMode mode) {
- RecordSafepoint(pointers, Safepoint::kSimple, 0, mode);
-}
-
-
-void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
- LPointerMap empty_pointers(zone());
- RecordSafepoint(&empty_pointers, mode);
-}
-
-
-void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode mode) {
- RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode);
-}
-
-
-static const char* LabelType(LLabel* label) {
- if (label->is_loop_header()) return " (loop header)";
- if (label->is_osr_entry()) return " (OSR entry)";
- return "";
-}
-
-
-void LCodeGen::DoLabel(LLabel* label) {
- Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
- current_instruction_,
- label->hydrogen_value()->id(),
- label->block_id(),
- LabelType(label));
- __ bind(label->label());
- current_block_ = label->block_id();
- DoGap(label);
-}
-
-
-void LCodeGen::DoParallelMove(LParallelMove* move) {
- resolver_.Resolve(move);
-}
-
-
-void LCodeGen::DoGap(LGap* gap) {
- for (int i = LGap::FIRST_INNER_POSITION;
- i <= LGap::LAST_INNER_POSITION;
- i++) {
- LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
- LParallelMove* move = gap->GetParallelMove(inner_pos);
- if (move != NULL) DoParallelMove(move);
- }
-}
-
-
-void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
- DoGap(instr);
-}
-
-
-void LCodeGen::DoParameter(LParameter* instr) {
- // Nothing to do.
-}
-
-
-void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- GenerateOsrPrologue();
-}
-
-
-void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- DCHECK(dividend.is(ToRegister(instr->result())));
-
- // Theoretically, a variation of the branch-free code for integer division by
- // a power of 2 (calculating the remainder via an additional multiplication
- // (which gets simplified to an 'and') and subtraction) should be faster, and
- // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
- // indicate that positive dividends are heavily favored, so the branching
- // version performs better.
- HMod* hmod = instr->hydrogen();
- int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
- Label dividend_is_not_negative, done;
- if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
- __ test(dividend, dividend);
- __ j(not_sign, &dividend_is_not_negative, Label::kNear);
- // Note that this is correct even for kMinInt operands.
- __ neg(dividend);
- __ and_(dividend, mask);
- __ neg(dividend);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
- }
- __ jmp(&done, Label::kNear);
- }
-
- __ bind(&dividend_is_not_negative);
- __ and_(dividend, mask);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoModByConstI(LModByConstI* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- DCHECK(ToRegister(instr->result()).is(eax));
-
- if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
- return;
- }
-
- __ TruncatingDiv(dividend, Abs(divisor));
- __ imul(edx, edx, Abs(divisor));
- __ mov(eax, dividend);
- __ sub(eax, edx);
-
- // Check for negative zero.
- HMod* hmod = instr->hydrogen();
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label remainder_not_zero;
- __ j(not_zero, &remainder_not_zero, Label::kNear);
- __ cmp(dividend, Immediate(0));
- DeoptimizeIf(less, instr, DeoptimizeReason::kMinusZero);
- __ bind(&remainder_not_zero);
- }
-}
-
-
-void LCodeGen::DoModI(LModI* instr) {
- HMod* hmod = instr->hydrogen();
-
- Register left_reg = ToRegister(instr->left());
- DCHECK(left_reg.is(eax));
- Register right_reg = ToRegister(instr->right());
- DCHECK(!right_reg.is(eax));
- DCHECK(!right_reg.is(edx));
- Register result_reg = ToRegister(instr->result());
- DCHECK(result_reg.is(edx));
-
- Label done;
- // Check for x % 0, idiv would signal a divide error. We have to
- // deopt in this case because we can't return a NaN.
- if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
- __ test(right_reg, Operand(right_reg));
- DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
- }
-
- // Check for kMinInt % -1, idiv would signal a divide error. We
- // have to deopt if we care about -0, because we can't return that.
- if (hmod->CheckFlag(HValue::kCanOverflow)) {
- Label no_overflow_possible;
- __ cmp(left_reg, kMinInt);
- __ j(not_equal, &no_overflow_possible, Label::kNear);
- __ cmp(right_reg, -1);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(equal, instr, DeoptimizeReason::kMinusZero);
- } else {
- __ j(not_equal, &no_overflow_possible, Label::kNear);
- __ Move(result_reg, Immediate(0));
- __ jmp(&done, Label::kNear);
- }
- __ bind(&no_overflow_possible);
- }
-
- // Sign extend dividend in eax into edx:eax.
- __ cdq();
-
- // If we care about -0, test if the dividend is <0 and the result is 0.
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label positive_left;
- __ test(left_reg, Operand(left_reg));
- __ j(not_sign, &positive_left, Label::kNear);
- __ idiv(right_reg);
- __ test(result_reg, Operand(result_reg));
- DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
- __ jmp(&done, Label::kNear);
- __ bind(&positive_left);
- }
- __ idiv(right_reg);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- Register result = ToRegister(instr->result());
- DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
- DCHECK(!result.is(dividend));
-
- // Check for (0 / -x) that will produce negative zero.
- HDiv* hdiv = instr->hydrogen();
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- __ test(dividend, dividend);
- DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
- }
- // Check for (kMinInt / -1).
- if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
- __ cmp(dividend, kMinInt);
- DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
- }
- // Deoptimize if remainder will not be 0.
- if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
- divisor != 1 && divisor != -1) {
- int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
- __ test(dividend, Immediate(mask));
- DeoptimizeIf(not_zero, instr, DeoptimizeReason::kLostPrecision);
- }
- __ Move(result, dividend);
- int32_t shift = WhichPowerOf2Abs(divisor);
- if (shift > 0) {
- // The arithmetic shift is always OK, the 'if' is an optimization only.
- if (shift > 1) __ sar(result, 31);
- __ shr(result, 32 - shift);
- __ add(result, dividend);
- __ sar(result, shift);
- }
- if (divisor < 0) __ neg(result);
-}
-
-
-void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- DCHECK(ToRegister(instr->result()).is(edx));
-
- if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
- return;
- }
-
- // Check for (0 / -x) that will produce negative zero.
- HDiv* hdiv = instr->hydrogen();
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- __ test(dividend, dividend);
- DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
- }
-
- __ TruncatingDiv(dividend, Abs(divisor));
- if (divisor < 0) __ neg(edx);
-
- if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
- __ mov(eax, edx);
- __ imul(eax, eax, divisor);
- __ sub(eax, dividend);
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kLostPrecision);
- }
-}
-
-
-// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
-void LCodeGen::DoDivI(LDivI* instr) {
- HBinaryOperation* hdiv = instr->hydrogen();
- Register dividend = ToRegister(instr->dividend());
- Register divisor = ToRegister(instr->divisor());
- Register remainder = ToRegister(instr->temp());
- DCHECK(dividend.is(eax));
- DCHECK(remainder.is(edx));
- DCHECK(ToRegister(instr->result()).is(eax));
- DCHECK(!divisor.is(eax));
- DCHECK(!divisor.is(edx));
-
- // Check for x / 0.
- if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- __ test(divisor, divisor);
- DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
- }
-
- // Check for (0 / -x) that will produce negative zero.
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label dividend_not_zero;
- __ test(dividend, dividend);
- __ j(not_zero, &dividend_not_zero, Label::kNear);
- __ test(divisor, divisor);
- DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
- __ bind(&dividend_not_zero);
- }
-
- // Check for (kMinInt / -1).
- if (hdiv->CheckFlag(HValue::kCanOverflow)) {
- Label dividend_not_min_int;
- __ cmp(dividend, kMinInt);
- __ j(not_zero, &dividend_not_min_int, Label::kNear);
- __ cmp(divisor, -1);
- DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
- __ bind(&dividend_not_min_int);
- }
-
- // Sign extend to edx (= remainder).
- __ cdq();
- __ idiv(divisor);
-
- if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
- // Deoptimize if remainder is not 0.
- __ test(remainder, remainder);
- DeoptimizeIf(not_zero, instr, DeoptimizeReason::kLostPrecision);
- }
-}
-
-
-void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- DCHECK(dividend.is(ToRegister(instr->result())));
-
- // If the divisor is positive, things are easy: There can be no deopts and we
- // can simply do an arithmetic right shift.
- if (divisor == 1) return;
- int32_t shift = WhichPowerOf2Abs(divisor);
- if (divisor > 1) {
- __ sar(dividend, shift);
- return;
- }
-
- // If the divisor is negative, we have to negate and handle edge cases.
- __ neg(dividend);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
- }
-
- // Dividing by -1 is basically negation, unless we overflow.
- if (divisor == -1) {
- if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
- }
- return;
- }
-
- // If the negation could not overflow, simply shifting is OK.
- if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- __ sar(dividend, shift);
- return;
- }
-
- Label not_kmin_int, done;
- __ j(no_overflow, &not_kmin_int, Label::kNear);
- __ mov(dividend, Immediate(kMinInt / divisor));
- __ jmp(&done, Label::kNear);
- __ bind(&not_kmin_int);
- __ sar(dividend, shift);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- DCHECK(ToRegister(instr->result()).is(edx));
-
- if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
- return;
- }
-
- // Check for (0 / -x) that will produce negative zero.
- HMathFloorOfDiv* hdiv = instr->hydrogen();
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- __ test(dividend, dividend);
- DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
- }
-
- // Easy case: We need no dynamic check for the dividend and the flooring
- // division is the same as the truncating division.
- if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
- (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
- __ TruncatingDiv(dividend, Abs(divisor));
- if (divisor < 0) __ neg(edx);
- return;
- }
-
- // In the general case we may need to adjust before and after the truncating
- // division to get a flooring division.
- Register temp = ToRegister(instr->temp3());
- DCHECK(!temp.is(dividend) && !temp.is(eax) && !temp.is(edx));
- Label needs_adjustment, done;
- __ cmp(dividend, Immediate(0));
- __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
- __ TruncatingDiv(dividend, Abs(divisor));
- if (divisor < 0) __ neg(edx);
- __ jmp(&done, Label::kNear);
- __ bind(&needs_adjustment);
- __ lea(temp, Operand(dividend, divisor > 0 ? 1 : -1));
- __ TruncatingDiv(temp, Abs(divisor));
- if (divisor < 0) __ neg(edx);
- __ dec(edx);
- __ bind(&done);
-}
-
-
-// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
-void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
- HBinaryOperation* hdiv = instr->hydrogen();
- Register dividend = ToRegister(instr->dividend());
- Register divisor = ToRegister(instr->divisor());
- Register remainder = ToRegister(instr->temp());
- Register result = ToRegister(instr->result());
- DCHECK(dividend.is(eax));
- DCHECK(remainder.is(edx));
- DCHECK(result.is(eax));
- DCHECK(!divisor.is(eax));
- DCHECK(!divisor.is(edx));
-
- // Check for x / 0.
- if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- __ test(divisor, divisor);
- DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
- }
-
- // Check for (0 / -x) that will produce negative zero.
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label dividend_not_zero;
- __ test(dividend, dividend);
- __ j(not_zero, &dividend_not_zero, Label::kNear);
- __ test(divisor, divisor);
- DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
- __ bind(&dividend_not_zero);
- }
-
- // Check for (kMinInt / -1).
- if (hdiv->CheckFlag(HValue::kCanOverflow)) {
- Label dividend_not_min_int;
- __ cmp(dividend, kMinInt);
- __ j(not_zero, &dividend_not_min_int, Label::kNear);
- __ cmp(divisor, -1);
- DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
- __ bind(&dividend_not_min_int);
- }
-
- // Sign extend to edx (= remainder).
- __ cdq();
- __ idiv(divisor);
-
- Label done;
- __ test(remainder, remainder);
- __ j(zero, &done, Label::kNear);
- __ xor_(remainder, divisor);
- __ sar(remainder, 31);
- __ add(result, remainder);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoMulI(LMulI* instr) {
- Register left = ToRegister(instr->left());
- LOperand* right = instr->right();
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ mov(ToRegister(instr->temp()), left);
- }
-
- if (right->IsConstantOperand()) {
- // Try strength reductions on the multiplication.
- // All replacement instructions are at most as long as the imul
- // and have better latency.
- int constant = ToInteger32(LConstantOperand::cast(right));
- if (constant == -1) {
- __ neg(left);
- } else if (constant == 0) {
- __ xor_(left, Operand(left));
- } else if (constant == 2) {
- __ add(left, Operand(left));
- } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- // If we know that the multiplication can't overflow, it's safe to
- // use instructions that don't set the overflow flag for the
- // multiplication.
- switch (constant) {
- case 1:
- // Do nothing.
- break;
- case 3:
- __ lea(left, Operand(left, left, times_2, 0));
- break;
- case 4:
- __ shl(left, 2);
- break;
- case 5:
- __ lea(left, Operand(left, left, times_4, 0));
- break;
- case 8:
- __ shl(left, 3);
- break;
- case 9:
- __ lea(left, Operand(left, left, times_8, 0));
- break;
- case 16:
- __ shl(left, 4);
- break;
- default:
- __ imul(left, left, constant);
- break;
- }
- } else {
- __ imul(left, left, constant);
- }
- } else {
- if (instr->hydrogen()->representation().IsSmi()) {
- __ SmiUntag(left);
- }
- __ imul(left, ToOperand(right));
- }
-
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
- }
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Bail out if the result is supposed to be negative zero.
- Label done;
- __ test(left, Operand(left));
- __ j(not_zero, &done, Label::kNear);
- if (right->IsConstantOperand()) {
- if (ToInteger32(LConstantOperand::cast(right)) < 0) {
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
- } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
- __ cmp(ToRegister(instr->temp()), Immediate(0));
- DeoptimizeIf(less, instr, DeoptimizeReason::kMinusZero);
- }
- } else {
- // Test the non-zero operand for negative sign.
- __ or_(ToRegister(instr->temp()), ToOperand(right));
- DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
- }
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoBitI(LBitI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- DCHECK(left->Equals(instr->result()));
- DCHECK(left->IsRegister());
-
- if (right->IsConstantOperand()) {
- int32_t right_operand =
- ToRepresentation(LConstantOperand::cast(right),
- instr->hydrogen()->representation());
- switch (instr->op()) {
- case Token::BIT_AND:
- __ and_(ToRegister(left), right_operand);
- break;
- case Token::BIT_OR:
- __ or_(ToRegister(left), right_operand);
- break;
- case Token::BIT_XOR:
- if (right_operand == int32_t(~0)) {
- __ not_(ToRegister(left));
- } else {
- __ xor_(ToRegister(left), right_operand);
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- switch (instr->op()) {
- case Token::BIT_AND:
- __ and_(ToRegister(left), ToOperand(right));
- break;
- case Token::BIT_OR:
- __ or_(ToRegister(left), ToOperand(right));
- break;
- case Token::BIT_XOR:
- __ xor_(ToRegister(left), ToOperand(right));
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoShiftI(LShiftI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- DCHECK(left->Equals(instr->result()));
- DCHECK(left->IsRegister());
- if (right->IsRegister()) {
- DCHECK(ToRegister(right).is(ecx));
-
- switch (instr->op()) {
- case Token::ROR:
- __ ror_cl(ToRegister(left));
- break;
- case Token::SAR:
- __ sar_cl(ToRegister(left));
- break;
- case Token::SHR:
- __ shr_cl(ToRegister(left));
- if (instr->can_deopt()) {
- __ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr, DeoptimizeReason::kNegativeValue);
- }
- break;
- case Token::SHL:
- __ shl_cl(ToRegister(left));
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- int value = ToInteger32(LConstantOperand::cast(right));
- uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
- switch (instr->op()) {
- case Token::ROR:
- if (shift_count == 0 && instr->can_deopt()) {
- __ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr, DeoptimizeReason::kNegativeValue);
- } else {
- __ ror(ToRegister(left), shift_count);
- }
- break;
- case Token::SAR:
- if (shift_count != 0) {
- __ sar(ToRegister(left), shift_count);
- }
- break;
- case Token::SHR:
- if (shift_count != 0) {
- __ shr(ToRegister(left), shift_count);
- } else if (instr->can_deopt()) {
- __ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr, DeoptimizeReason::kNegativeValue);
- }
- break;
- case Token::SHL:
- if (shift_count != 0) {
- if (instr->hydrogen_value()->representation().IsSmi() &&
- instr->can_deopt()) {
- if (shift_count != 1) {
- __ shl(ToRegister(left), shift_count - 1);
- }
- __ SmiTag(ToRegister(left));
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
- } else {
- __ shl(ToRegister(left), shift_count);
- }
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoSubI(LSubI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- DCHECK(left->Equals(instr->result()));
-
- if (right->IsConstantOperand()) {
- __ sub(ToOperand(left),
- ToImmediate(right, instr->hydrogen()->representation()));
- } else {
- __ sub(ToRegister(left), ToOperand(right));
- }
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
- }
-}
-
-
-void LCodeGen::DoConstantI(LConstantI* instr) {
- __ Move(ToRegister(instr->result()), Immediate(instr->value()));
-}
-
-
-void LCodeGen::DoConstantS(LConstantS* instr) {
- __ Move(ToRegister(instr->result()), Immediate(instr->value()));
-}
-
-
-void LCodeGen::DoConstantD(LConstantD* instr) {
- uint64_t const bits = instr->bits();
- uint32_t const lower = static_cast<uint32_t>(bits);
- uint32_t const upper = static_cast<uint32_t>(bits >> 32);
- DCHECK(instr->result()->IsDoubleRegister());
-
- XMMRegister result = ToDoubleRegister(instr->result());
- if (bits == 0u) {
- __ xorps(result, result);
- } else {
- Register temp = ToRegister(instr->temp());
- if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatureScope scope2(masm(), SSE4_1);
- if (lower != 0) {
- __ Move(temp, Immediate(lower));
- __ movd(result, Operand(temp));
- __ Move(temp, Immediate(upper));
- __ pinsrd(result, Operand(temp), 1);
- } else {
- __ xorps(result, result);
- __ Move(temp, Immediate(upper));
- __ pinsrd(result, Operand(temp), 1);
- }
- } else {
- __ Move(temp, Immediate(upper));
- __ movd(result, Operand(temp));
- __ psllq(result, 32);
- if (lower != 0u) {
- XMMRegister xmm_scratch = double_scratch0();
- __ Move(temp, Immediate(lower));
- __ movd(xmm_scratch, Operand(temp));
- __ orps(result, xmm_scratch);
- }
- }
- }
-}
-
-
-void LCodeGen::DoConstantE(LConstantE* instr) {
- __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value()));
-}
-
-
-void LCodeGen::DoConstantT(LConstantT* instr) {
- Register reg = ToRegister(instr->result());
- Handle<Object> object = instr->value(isolate());
- AllowDeferredHandleDereference smi_check;
- __ LoadObject(reg, object);
-}
-
-
-Operand LCodeGen::BuildSeqStringOperand(Register string,
- LOperand* index,
- String::Encoding encoding) {
- if (index->IsConstantOperand()) {
- int offset = ToRepresentation(LConstantOperand::cast(index),
- Representation::Integer32());
- if (encoding == String::TWO_BYTE_ENCODING) {
- offset *= kUC16Size;
- }
- STATIC_ASSERT(kCharSize == 1);
- return FieldOperand(string, SeqString::kHeaderSize + offset);
- }
- return FieldOperand(
- string, ToRegister(index),
- encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
- SeqString::kHeaderSize);
-}
-
-
-void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
- String::Encoding encoding = instr->hydrogen()->encoding();
- Register result = ToRegister(instr->result());
- Register string = ToRegister(instr->string());
-
- if (FLAG_debug_code) {
- __ push(string);
- __ mov(string, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(string, FieldOperand(string, Map::kInstanceTypeOffset));
-
- __ and_(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ cmp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type));
- __ Check(equal, kUnexpectedStringType);
- __ pop(string);
- }
-
- Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ movzx_b(result, operand);
- } else {
- __ movzx_w(result, operand);
- }
-}
-
-
-void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
- String::Encoding encoding = instr->hydrogen()->encoding();
- Register string = ToRegister(instr->string());
-
- if (FLAG_debug_code) {
- Register value = ToRegister(instr->value());
- Register index = ToRegister(instr->index());
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- int encoding_mask =
- instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type;
- __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
- }
-
- Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
- if (instr->value()->IsConstantOperand()) {
- int value = ToRepresentation(LConstantOperand::cast(instr->value()),
- Representation::Integer32());
- DCHECK_LE(0, value);
- if (encoding == String::ONE_BYTE_ENCODING) {
- DCHECK_LE(value, String::kMaxOneByteCharCode);
- __ mov_b(operand, static_cast<int8_t>(value));
- } else {
- DCHECK_LE(value, String::kMaxUtf16CodeUnit);
- __ mov_w(operand, static_cast<int16_t>(value));
- }
- } else {
- Register value = ToRegister(instr->value());
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ mov_b(operand, value);
- } else {
- __ mov_w(operand, value);
- }
- }
-}
-
-
-void LCodeGen::DoAddI(LAddI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
-
- if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
- if (right->IsConstantOperand()) {
- int32_t offset = ToRepresentation(LConstantOperand::cast(right),
- instr->hydrogen()->representation());
- __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset));
- } else {
- Operand address(ToRegister(left), ToRegister(right), times_1, 0);
- __ lea(ToRegister(instr->result()), address);
- }
- } else {
- if (right->IsConstantOperand()) {
- __ add(ToOperand(left),
- ToImmediate(right, instr->hydrogen()->representation()));
- } else {
- __ add(ToRegister(left), ToOperand(right));
- }
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
- }
- }
-}
-
-
-void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- DCHECK(left->Equals(instr->result()));
- HMathMinMax::Operation operation = instr->hydrogen()->operation();
- if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
- Label return_left;
- Condition condition = (operation == HMathMinMax::kMathMin)
- ? less_equal
- : greater_equal;
- if (right->IsConstantOperand()) {
- Operand left_op = ToOperand(left);
- Immediate immediate = ToImmediate(LConstantOperand::cast(instr->right()),
- instr->hydrogen()->representation());
- __ cmp(left_op, immediate);
- __ j(condition, &return_left, Label::kNear);
- __ mov(left_op, immediate);
- } else {
- Register left_reg = ToRegister(left);
- Operand right_op = ToOperand(right);
- __ cmp(left_reg, right_op);
- __ j(condition, &return_left, Label::kNear);
- __ mov(left_reg, right_op);
- }
- __ bind(&return_left);
- } else {
- DCHECK(instr->hydrogen()->representation().IsDouble());
- Label check_nan_left, check_zero, return_left, return_right;
- Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
- XMMRegister left_reg = ToDoubleRegister(left);
- XMMRegister right_reg = ToDoubleRegister(right);
- __ ucomisd(left_reg, right_reg);
- __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
- __ j(equal, &check_zero, Label::kNear); // left == right.
- __ j(condition, &return_left, Label::kNear);
- __ jmp(&return_right, Label::kNear);
-
- __ bind(&check_zero);
- XMMRegister xmm_scratch = double_scratch0();
- __ xorps(xmm_scratch, xmm_scratch);
- __ ucomisd(left_reg, xmm_scratch);
- __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
- // At this point, both left and right are either 0 or -0.
- if (operation == HMathMinMax::kMathMin) {
- __ orpd(left_reg, right_reg);
- } else {
- // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
- __ addsd(left_reg, right_reg);
- }
- __ jmp(&return_left, Label::kNear);
-
- __ bind(&check_nan_left);
- __ ucomisd(left_reg, left_reg); // NaN check.
- __ j(parity_even, &return_left, Label::kNear); // left == NaN.
- __ bind(&return_right);
- __ movaps(left_reg, right_reg);
-
- __ bind(&return_left);
- }
-}
-
-
-void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- XMMRegister left = ToDoubleRegister(instr->left());
- XMMRegister right = ToDoubleRegister(instr->right());
- XMMRegister result = ToDoubleRegister(instr->result());
- switch (instr->op()) {
- case Token::ADD:
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(masm(), AVX);
- __ vaddsd(result, left, right);
- } else {
- DCHECK(result.is(left));
- __ addsd(left, right);
- }
- break;
- case Token::SUB:
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(masm(), AVX);
- __ vsubsd(result, left, right);
- } else {
- DCHECK(result.is(left));
- __ subsd(left, right);
- }
- break;
- case Token::MUL:
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(masm(), AVX);
- __ vmulsd(result, left, right);
- } else {
- DCHECK(result.is(left));
- __ mulsd(left, right);
- }
- break;
- case Token::DIV:
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(masm(), AVX);
- __ vdivsd(result, left, right);
- } else {
- DCHECK(result.is(left));
- __ divsd(left, right);
- }
- // Don't delete this mov. It may improve performance on some CPUs,
- // when there is a (v)mulsd depending on the result
- __ movaps(result, result);
- break;
- case Token::MOD: {
- // Pass two doubles as arguments on the stack.
- __ PrepareCallCFunction(4, eax);
- __ movsd(Operand(esp, 0 * kDoubleSize), left);
- __ movsd(Operand(esp, 1 * kDoubleSize), right);
- __ CallCFunction(
- ExternalReference::mod_two_doubles_operation(isolate()),
- 4);
-
- // Return value is in st(0) on ia32.
- // Store it into the result register.
- __ sub(Operand(esp), Immediate(kDoubleSize));
- __ fstp_d(Operand(esp, 0));
- __ movsd(result, Operand(esp, 0));
- __ add(Operand(esp), Immediate(kDoubleSize));
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->left()).is(edx));
- DCHECK(ToRegister(instr->right()).is(eax));
- DCHECK(ToRegister(instr->result()).is(eax));
-
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
- CallCode(code, RelocInfo::CODE_TARGET, instr);
-}
-
-
-template<class InstrType>
-void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
- int left_block = instr->TrueDestination(chunk_);
- int right_block = instr->FalseDestination(chunk_);
-
- int next_block = GetNextEmittedBlock();
-
- if (right_block == left_block || cc == no_condition) {
- EmitGoto(left_block);
- } else if (left_block == next_block) {
- __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
- } else if (right_block == next_block) {
- __ j(cc, chunk_->GetAssemblyLabel(left_block));
- } else {
- __ j(cc, chunk_->GetAssemblyLabel(left_block));
- __ jmp(chunk_->GetAssemblyLabel(right_block));
- }
-}
-
-
-template <class InstrType>
-void LCodeGen::EmitTrueBranch(InstrType instr, Condition cc) {
- int true_block = instr->TrueDestination(chunk_);
- if (cc == no_condition) {
- __ jmp(chunk_->GetAssemblyLabel(true_block));
- } else {
- __ j(cc, chunk_->GetAssemblyLabel(true_block));
- }
-}
-
-
-template<class InstrType>
-void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
- int false_block = instr->FalseDestination(chunk_);
- if (cc == no_condition) {
- __ jmp(chunk_->GetAssemblyLabel(false_block));
- } else {
- __ j(cc, chunk_->GetAssemblyLabel(false_block));
- }
-}
-
-
-void LCodeGen::DoBranch(LBranch* instr) {
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsSmiOrInteger32()) {
- Register reg = ToRegister(instr->value());
- __ test(reg, Operand(reg));
- EmitBranch(instr, not_zero);
- } else if (r.IsDouble()) {
- DCHECK(!info()->IsStub());
- XMMRegister reg = ToDoubleRegister(instr->value());
- XMMRegister xmm_scratch = double_scratch0();
- __ xorps(xmm_scratch, xmm_scratch);
- __ ucomisd(reg, xmm_scratch);
- EmitBranch(instr, not_equal);
- } else {
- DCHECK(r.IsTagged());
- Register reg = ToRegister(instr->value());
- HType type = instr->hydrogen()->value()->type();
- if (type.IsBoolean()) {
- DCHECK(!info()->IsStub());
- __ cmp(reg, factory()->true_value());
- EmitBranch(instr, equal);
- } else if (type.IsSmi()) {
- DCHECK(!info()->IsStub());
- __ test(reg, Operand(reg));
- EmitBranch(instr, not_equal);
- } else if (type.IsJSArray()) {
- DCHECK(!info()->IsStub());
- EmitBranch(instr, no_condition);
- } else if (type.IsHeapNumber()) {
- DCHECK(!info()->IsStub());
- XMMRegister xmm_scratch = double_scratch0();
- __ xorps(xmm_scratch, xmm_scratch);
- __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
- EmitBranch(instr, not_equal);
- } else if (type.IsString()) {
- DCHECK(!info()->IsStub());
- __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
- EmitBranch(instr, not_equal);
- } else {
- ToBooleanHints expected = instr->hydrogen()->expected_input_types();
- if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
-
- if (expected & ToBooleanHint::kUndefined) {
- // undefined -> false.
- __ cmp(reg, factory()->undefined_value());
- __ j(equal, instr->FalseLabel(chunk_));
- }
- if (expected & ToBooleanHint::kBoolean) {
- // true -> true.
- __ cmp(reg, factory()->true_value());
- __ j(equal, instr->TrueLabel(chunk_));
- // false -> false.
- __ cmp(reg, factory()->false_value());
- __ j(equal, instr->FalseLabel(chunk_));
- }
- if (expected & ToBooleanHint::kNull) {
- // 'null' -> false.
- __ cmp(reg, factory()->null_value());
- __ j(equal, instr->FalseLabel(chunk_));
- }
-
- if (expected & ToBooleanHint::kSmallInteger) {
- // Smis: 0 -> false, all other -> true.
- __ test(reg, Operand(reg));
- __ j(equal, instr->FalseLabel(chunk_));
- __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
- } else if (expected & ToBooleanHint::kNeedsMap) {
- // If we need a map later and have a Smi -> deopt.
- __ test(reg, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr, DeoptimizeReason::kSmi);
- }
-
- Register map = no_reg; // Keep the compiler happy.
- if (expected & ToBooleanHint::kNeedsMap) {
- map = ToRegister(instr->temp());
- DCHECK(!map.is(reg));
- __ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
-
- if (expected & ToBooleanHint::kCanBeUndetectable) {
- // Undetectable -> false.
- __ test_b(FieldOperand(map, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, instr->FalseLabel(chunk_));
- }
- }
-
- if (expected & ToBooleanHint::kReceiver) {
- // spec object -> true.
- __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
- __ j(above_equal, instr->TrueLabel(chunk_));
- }
-
- if (expected & ToBooleanHint::kString) {
- // String value -> false iff empty.
- Label not_string;
- __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &not_string, Label::kNear);
- __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
- __ j(not_zero, instr->TrueLabel(chunk_));
- __ jmp(instr->FalseLabel(chunk_));
- __ bind(&not_string);
- }
-
- if (expected & ToBooleanHint::kSymbol) {
- // Symbol value -> true.
- __ CmpInstanceType(map, SYMBOL_TYPE);
- __ j(equal, instr->TrueLabel(chunk_));
- }
-
- if (expected & ToBooleanHint::kHeapNumber) {
- // heap number -> false iff +0, -0, or NaN.
- Label not_heap_number;
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- __ j(not_equal, &not_heap_number, Label::kNear);
- XMMRegister xmm_scratch = double_scratch0();
- __ xorps(xmm_scratch, xmm_scratch);
- __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
- __ j(zero, instr->FalseLabel(chunk_));
- __ jmp(instr->TrueLabel(chunk_));
- __ bind(&not_heap_number);
- }
-
- if (expected != ToBooleanHint::kAny) {
- // We've seen something for the first time -> deopt.
- // This can only happen if we are not generic already.
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kUnexpectedObject);
- }
- }
- }
-}
-
-
-void LCodeGen::EmitGoto(int block) {
- if (!IsNextEmittedBlock(block)) {
- __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
- }
-}
-
-
-void LCodeGen::DoGoto(LGoto* instr) {
- EmitGoto(instr->block_id());
-}
-
-
-Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
- Condition cond = no_condition;
- switch (op) {
- case Token::EQ:
- case Token::EQ_STRICT:
- cond = equal;
- break;
- case Token::NE:
- case Token::NE_STRICT:
- cond = not_equal;
- break;
- case Token::LT:
- cond = is_unsigned ? below : less;
- break;
- case Token::GT:
- cond = is_unsigned ? above : greater;
- break;
- case Token::LTE:
- cond = is_unsigned ? below_equal : less_equal;
- break;
- case Token::GTE:
- cond = is_unsigned ? above_equal : greater_equal;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
- return cond;
-}
-
-
-void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- bool is_unsigned =
- instr->is_double() ||
- instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
- instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
- Condition cc = TokenToCondition(instr->op(), is_unsigned);
-
- if (left->IsConstantOperand() && right->IsConstantOperand()) {
- // We can statically evaluate the comparison.
- double left_val = ToDouble(LConstantOperand::cast(left));
- double right_val = ToDouble(LConstantOperand::cast(right));
- int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
- ? instr->TrueDestination(chunk_)
- : instr->FalseDestination(chunk_);
- EmitGoto(next_block);
- } else {
- if (instr->is_double()) {
- __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
- // Don't base result on EFLAGS when a NaN is involved. Instead
- // jump to the false block.
- __ j(parity_even, instr->FalseLabel(chunk_));
- } else {
- if (right->IsConstantOperand()) {
- __ cmp(ToOperand(left),
- ToImmediate(right, instr->hydrogen()->representation()));
- } else if (left->IsConstantOperand()) {
- __ cmp(ToOperand(right),
- ToImmediate(left, instr->hydrogen()->representation()));
- // We commuted the operands, so commute the condition.
- cc = CommuteCondition(cc);
- } else {
- __ cmp(ToRegister(left), ToOperand(right));
- }
- }
- EmitBranch(instr, cc);
- }
-}
-
-
-void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
- Register left = ToRegister(instr->left());
-
- if (instr->right()->IsConstantOperand()) {
- Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
- __ CmpObject(left, right);
- } else {
- Operand right = ToOperand(instr->right());
- __ cmp(left, right);
- }
- EmitBranch(instr, equal);
-}
-
-
-void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
- if (instr->hydrogen()->representation().IsTagged()) {
- Register input_reg = ToRegister(instr->object());
- __ cmp(input_reg, factory()->the_hole_value());
- EmitBranch(instr, equal);
- return;
- }
-
- XMMRegister input_reg = ToDoubleRegister(instr->object());
- __ ucomisd(input_reg, input_reg);
- EmitFalseBranch(instr, parity_odd);
-
- __ sub(esp, Immediate(kDoubleSize));
- __ movsd(MemOperand(esp, 0), input_reg);
-
- __ add(esp, Immediate(kDoubleSize));
- int offset = sizeof(kHoleNanUpper32);
- __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
- EmitBranch(instr, equal);
-}
-
-
-Condition LCodeGen::EmitIsString(Register input,
- Register temp1,
- Label* is_not_string,
- SmiCheck check_needed = INLINE_SMI_CHECK) {
- if (check_needed == INLINE_SMI_CHECK) {
- __ JumpIfSmi(input, is_not_string);
- }
-
- Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
-
- return cond;
-}
-
-
-void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- SmiCheck check_needed =
- instr->hydrogen()->value()->type().IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
-
- Condition true_cond = EmitIsString(
- reg, temp, instr->FalseLabel(chunk_), check_needed);
-
- EmitBranch(instr, true_cond);
-}
-
-
-void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
- Operand input = ToOperand(instr->value());
-
- __ test(input, Immediate(kSmiTagMask));
- EmitBranch(instr, zero);
-}
-
-
-void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(input, instr->FalseLabel(chunk_));
- }
- __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
- __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- EmitBranch(instr, not_zero);
-}
-
-
-static Condition ComputeCompareCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return equal;
- case Token::LT:
- return less;
- case Token::GT:
- return greater;
- case Token::LTE:
- return less_equal;
- case Token::GTE:
- return greater_equal;
- default:
- UNREACHABLE();
- return no_condition;
- }
-}
-
-
-void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->left()).is(edx));
- DCHECK(ToRegister(instr->right()).is(eax));
-
- Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
- CallCode(code, RelocInfo::CODE_TARGET, instr);
- __ CompareRoot(eax, Heap::kTrueValueRootIndex);
- EmitBranch(instr, equal);
-}
-
-
-static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == FIRST_TYPE) return to;
- DCHECK(from == to || to == LAST_TYPE);
- return from;
-}
-
-
-static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == to) return equal;
- if (to == LAST_TYPE) return above_equal;
- if (from == FIRST_TYPE) return below_equal;
- UNREACHABLE();
- return equal;
-}
-
-
-void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- __ JumpIfSmi(input, instr->FalseLabel(chunk_));
- }
-
- __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
- EmitBranch(instr, BranchCondition(instr->hydrogen()));
-}
-
-// Branches to a label or falls through with the answer in the z flag. Trashes
-// the temp registers, but not the input.
-void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
- Handle<String> class_name, Register input,
- Register temp, Register temp2) {
- DCHECK(!input.is(temp));
- DCHECK(!input.is(temp2));
- DCHECK(!temp.is(temp2));
- __ JumpIfSmi(input, is_false);
-
- __ CmpObjectType(input, FIRST_FUNCTION_TYPE, temp);
- STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
- if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
- __ j(above_equal, is_true);
- } else {
- __ j(above_equal, is_false);
- }
-
- // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
- // Check if the constructor in the map is a function.
- __ GetMapConstructor(temp, temp, temp2);
- // Objects with a non-function constructor have class 'Object'.
- __ CmpInstanceType(temp2, JS_FUNCTION_TYPE);
- if (String::Equals(class_name, isolate()->factory()->Object_string())) {
- __ j(not_equal, is_true);
- } else {
- __ j(not_equal, is_false);
- }
-
- // temp now contains the constructor function. Grab the
- // instance class name from there.
- __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
- __ mov(temp,
- FieldOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset));
- // The class name we are testing against is internalized since it's a literal.
- // The name in the constructor is internalized because of the way the context
- // is booted. This routine isn't expected to work for random API-created
- // classes and it doesn't have to because you can't access it with natives
- // syntax. Since both sides are internalized it is sufficient to use an
- // identity comparison.
- __ cmp(temp, class_name);
- // End with the answer in the z flag.
-}
-
-void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
- Register temp2 = ToRegister(instr->temp2());
-
- Handle<String> class_name = instr->hydrogen()->class_name();
-
- EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
- class_name, input, temp, temp2);
-
- EmitBranch(instr, equal);
-}
-
-void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
- EmitBranch(instr, equal);
-}
-
-
-void LCodeGen::DoHasInPrototypeChainAndBranch(
- LHasInPrototypeChainAndBranch* instr) {
- Register const object = ToRegister(instr->object());
- Register const object_map = ToRegister(instr->scratch());
- Register const object_prototype = object_map;
- Register const prototype = ToRegister(instr->prototype());
-
- // The {object} must be a spec object. It's sufficient to know that {object}
- // is not a smi, since all other non-spec objects have {null} prototypes and
- // will be ruled out below.
- if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
- __ test(object, Immediate(kSmiTagMask));
- EmitFalseBranch(instr, zero);
- }
-
- // Loop through the {object}s prototype chain looking for the {prototype}.
- __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset));
- Label loop;
- __ bind(&loop);
-
- // Deoptimize if the object needs to be access checked.
- __ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsAccessCheckNeeded));
- DeoptimizeIf(not_zero, instr, DeoptimizeReason::kAccessCheck);
- // Deoptimize for proxies.
- __ CmpInstanceType(object_map, JS_PROXY_TYPE);
- DeoptimizeIf(equal, instr, DeoptimizeReason::kProxy);
-
- __ mov(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
- __ cmp(object_prototype, factory()->null_value());
- EmitFalseBranch(instr, equal);
- __ cmp(object_prototype, prototype);
- EmitTrueBranch(instr, equal);
- __ mov(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset));
- __ jmp(&loop);
-}
-
-
-void LCodeGen::DoCmpT(LCmpT* instr) {
- Token::Value op = instr->op();
-
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-
- Condition condition = ComputeCompareCondition(op);
- Label true_value, done;
- __ test(eax, Operand(eax));
- __ j(condition, &true_value, Label::kNear);
- __ mov(ToRegister(instr->result()), factory()->false_value());
- __ jmp(&done, Label::kNear);
- __ bind(&true_value);
- __ mov(ToRegister(instr->result()), factory()->true_value());
- __ bind(&done);
-}
-
-void LCodeGen::EmitReturn(LReturn* instr) {
- int extra_value_count = 1;
-
- if (instr->has_constant_parameter_count()) {
- int parameter_count = ToInteger32(instr->constant_parameter_count());
- __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
- } else {
- DCHECK(info()->IsStub()); // Functions would need to drop one more value.
- Register reg = ToRegister(instr->parameter_count());
- // The argument count parameter is a smi
- __ SmiUntag(reg);
- Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
-
- // emit code to restore stack based on instr->parameter_count()
- __ pop(return_addr_reg); // save return address
- __ shl(reg, kPointerSizeLog2);
- __ add(esp, reg);
- __ jmp(return_addr_reg);
- }
-}
-
-
-void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace && info()->IsOptimizing()) {
- // Preserve the return value on the stack and rely on the runtime call
- // to return the value in the same register. We're leaving the code
- // managed by the register allocator and tearing down the frame, it's
- // safe to write to the context register.
- __ push(eax);
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kTraceExit);
- }
- if (info()->saves_caller_doubles()) RestoreCallerDoubles();
- if (NeedsEagerFrame()) {
- __ mov(esp, ebp);
- __ pop(ebp);
- }
-
- EmitReturn(instr);
-}
-
-
-void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ mov(result, ContextOperand(context, instr->slot_index()));
-
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ cmp(result, factory()->the_hole_value());
- if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
- } else {
- Label is_not_hole;
- __ j(not_equal, &is_not_hole, Label::kNear);
- __ mov(result, factory()->undefined_value());
- __ bind(&is_not_hole);
- }
- }
-}
-
-
-void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register value = ToRegister(instr->value());
-
- Label skip_assignment;
-
- Operand target = ContextOperand(context, instr->slot_index());
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ cmp(target, factory()->the_hole_value());
- if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
- } else {
- __ j(not_equal, &skip_assignment, Label::kNear);
- }
- }
-
- __ mov(target, value);
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- SmiCheck check_needed =
- instr->hydrogen()->value()->type().IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- Register temp = ToRegister(instr->temp());
- int offset = Context::SlotOffset(instr->slot_index());
- __ RecordWriteContextSlot(context,
- offset,
- value,
- temp,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
-
- __ bind(&skip_assignment);
-}
-
-
-void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- HObjectAccess access = instr->hydrogen()->access();
- int offset = access.offset();
-
- if (access.IsExternalMemory()) {
- Register result = ToRegister(instr->result());
- MemOperand operand = instr->object()->IsConstantOperand()
- ? MemOperand::StaticVariable(ToExternalReference(
- LConstantOperand::cast(instr->object())))
- : MemOperand(ToRegister(instr->object()), offset);
- __ Load(result, operand, access.representation());
- return;
- }
-
- Register object = ToRegister(instr->object());
- if (instr->hydrogen()->representation().IsDouble()) {
- XMMRegister result = ToDoubleRegister(instr->result());
- __ movsd(result, FieldOperand(object, offset));
- return;
- }
-
- Register result = ToRegister(instr->result());
- if (!access.IsInobject()) {
- __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
- object = result;
- }
- __ Load(result, FieldOperand(object, offset), access.representation());
-}
-
-
-void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
- DCHECK(!operand->IsDoubleRegister());
- if (operand->IsConstantOperand()) {
- Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
- AllowDeferredHandleDereference smi_check;
- if (object->IsSmi()) {
- __ Push(Handle<Smi>::cast(object));
- } else {
- __ PushHeapObject(Handle<HeapObject>::cast(object));
- }
- } else if (operand->IsRegister()) {
- __ push(ToRegister(operand));
- } else {
- __ push(ToOperand(operand));
- }
-}
-
-
-void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
- Register function = ToRegister(instr->function());
- Register temp = ToRegister(instr->temp());
- Register result = ToRegister(instr->result());
-
- // Get the prototype or initial map from the function.
- __ mov(result,
- FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Check that the function has a prototype or an initial map.
- __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
- DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
-
- // If the function does not have an initial map, we're done.
- Label done;
- __ CmpObjectType(result, MAP_TYPE, temp);
- __ j(not_equal, &done, Label::kNear);
-
- // Get the prototype from the initial map.
- __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
-
- // All done.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
- Register result = ToRegister(instr->result());
- __ LoadRoot(result, instr->index());
-}
-
-
-void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
- Register arguments = ToRegister(instr->arguments());
- Register result = ToRegister(instr->result());
- if (instr->length()->IsConstantOperand() &&
- instr->index()->IsConstantOperand()) {
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
- int index = (const_length - const_index) + 1;
- __ mov(result, Operand(arguments, index * kPointerSize));
- } else {
- Register length = ToRegister(instr->length());
- Operand index = ToOperand(instr->index());
- // There are two words between the frame pointer and the last argument.
- // Subtracting from length accounts for one of them add one more.
- __ sub(length, index);
- __ mov(result, Operand(arguments, length, times_4, kPointerSize));
- }
-}
-
-
-void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- LOperand* key = instr->key();
- if (!key->IsConstantOperand() &&
- ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
- elements_kind)) {
- __ SmiUntag(ToRegister(key));
- }
- Operand operand(BuildFastArrayOperand(
- instr->elements(),
- key,
- instr->hydrogen()->key()->representation(),
- elements_kind,
- instr->base_offset()));
- if (elements_kind == FLOAT32_ELEMENTS) {
- XMMRegister result(ToDoubleRegister(instr->result()));
- __ movss(result, operand);
- __ cvtss2sd(result, result);
- } else if (elements_kind == FLOAT64_ELEMENTS) {
- __ movsd(ToDoubleRegister(instr->result()), operand);
- } else {
- Register result(ToRegister(instr->result()));
- switch (elements_kind) {
- case INT8_ELEMENTS:
- __ movsx_b(result, operand);
- break;
- case UINT8_ELEMENTS:
- case UINT8_CLAMPED_ELEMENTS:
- __ movzx_b(result, operand);
- break;
- case INT16_ELEMENTS:
- __ movsx_w(result, operand);
- break;
- case UINT16_ELEMENTS:
- __ movzx_w(result, operand);
- break;
- case INT32_ELEMENTS:
- __ mov(result, operand);
- break;
- case UINT32_ELEMENTS:
- __ mov(result, operand);
- if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- __ test(result, Operand(result));
- DeoptimizeIf(negative, instr, DeoptimizeReason::kNegativeValue);
- }
- break;
- case FLOAT32_ELEMENTS:
- case FLOAT64_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
- case FAST_STRING_WRAPPER_ELEMENTS:
- case SLOW_STRING_WRAPPER_ELEMENTS:
- case NO_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
- if (instr->hydrogen()->RequiresHoleCheck()) {
- Operand hole_check_operand = BuildFastArrayOperand(
- instr->elements(), instr->key(),
- instr->hydrogen()->key()->representation(),
- FAST_DOUBLE_ELEMENTS,
- instr->base_offset() + sizeof(kHoleNanLower32));
- __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
- DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
- }
-
- Operand double_load_operand = BuildFastArrayOperand(
- instr->elements(),
- instr->key(),
- instr->hydrogen()->key()->representation(),
- FAST_DOUBLE_ELEMENTS,
- instr->base_offset());
- XMMRegister result = ToDoubleRegister(instr->result());
- __ movsd(result, double_load_operand);
-}
-
-
-void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
- Register result = ToRegister(instr->result());
-
- // Load the result.
- __ mov(result,
- BuildFastArrayOperand(instr->elements(), instr->key(),
- instr->hydrogen()->key()->representation(),
- FAST_ELEMENTS, instr->base_offset()));
-
- // Check for the hole value.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- __ test(result, Immediate(kSmiTagMask));
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotASmi);
- } else {
- __ cmp(result, factory()->the_hole_value());
- DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
- }
- } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
- DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
- Label done;
- __ cmp(result, factory()->the_hole_value());
- __ j(not_equal, &done);
- if (info()->IsStub()) {
- // A stub can safely convert the hole to undefined only if the array
- // protector cell contains (Smi) Isolate::kProtectorValid.
- // Otherwise it needs to bail out.
- __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
- __ cmp(FieldOperand(result, PropertyCell::kValueOffset),
- Immediate(Smi::FromInt(Isolate::kProtectorValid)));
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kHole);
- }
- __ mov(result, isolate()->factory()->undefined_value());
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_fixed_typed_array()) {
- DoLoadKeyedExternalArray(instr);
- } else if (instr->hydrogen()->representation().IsDouble()) {
- DoLoadKeyedFixedDoubleArray(instr);
- } else {
- DoLoadKeyedFixedArray(instr);
- }
-}
-
-
-Operand LCodeGen::BuildFastArrayOperand(
- LOperand* elements_pointer,
- LOperand* key,
- Representation key_representation,
- ElementsKind elements_kind,
- uint32_t base_offset) {
- Register elements_pointer_reg = ToRegister(elements_pointer);
- int element_shift_size = ElementsKindToShiftSize(elements_kind);
- int shift_size = element_shift_size;
- if (key->IsConstantOperand()) {
- int constant_value = ToInteger32(LConstantOperand::cast(key));
- if (constant_value & 0xF0000000) {
- Abort(kArrayIndexConstantValueTooBig);
- }
- return Operand(elements_pointer_reg,
- ((constant_value) << shift_size)
- + base_offset);
- } else {
- // Take the tag bit into account while computing the shift size.
- if (key_representation.IsSmi() && (shift_size >= 1)) {
- shift_size -= kSmiTagSize;
- }
- ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
- return Operand(elements_pointer_reg,
- ToRegister(key),
- scale_factor,
- base_offset);
- }
-}
-
-
-void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
- Register result = ToRegister(instr->result());
-
- if (instr->hydrogen()->from_inlined()) {
- __ lea(result, Operand(esp, -2 * kPointerSize));
- } else if (instr->hydrogen()->arguments_adaptor()) {
- // Check for arguments adapter frame.
- Label done, adapted;
- __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(result,
- Operand(result, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ cmp(Operand(result),
- Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adapted, Label::kNear);
-
- // No arguments adaptor frame.
- __ mov(result, Operand(ebp));
- __ jmp(&done, Label::kNear);
-
- // Arguments adaptor frame present.
- __ bind(&adapted);
- __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-
- // Result is the frame pointer for the frame if not adapted and for the real
- // frame below the adaptor frame if adapted.
- __ bind(&done);
- } else {
- __ mov(result, Operand(ebp));
- }
-}
-
-
-void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
- Operand elem = ToOperand(instr->elements());
- Register result = ToRegister(instr->result());
-
- Label done;
-
- // If no arguments adaptor frame the number of arguments is fixed.
- __ cmp(ebp, elem);
- __ mov(result, Immediate(scope()->num_parameters()));
- __ j(equal, &done, Label::kNear);
-
- // Arguments adaptor frame present. Get argument length from there.
- __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(result, Operand(result,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(result);
-
- // Argument length is in result register.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
-
- // If the receiver is null or undefined, we have to pass the global
- // object as a receiver to normal functions. Values have to be
- // passed unchanged to builtins and strict-mode functions.
- Label receiver_ok, global_object;
- Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
- Register scratch = ToRegister(instr->temp());
-
- if (!instr->hydrogen()->known_function()) {
- // Do not transform the receiver to object for strict mode
- // functions.
- __ mov(scratch,
- FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
- Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
- __ j(not_equal, &receiver_ok, dist);
-
- // Do not transform the receiver to object for builtins.
- __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
- Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
- __ j(not_equal, &receiver_ok, dist);
- }
-
- // Normal function. Replace undefined or null with global receiver.
- __ cmp(receiver, factory()->null_value());
- __ j(equal, &global_object, dist);
- __ cmp(receiver, factory()->undefined_value());
- __ j(equal, &global_object, dist);
-
- // The receiver should be a JS object.
- __ test(receiver, Immediate(kSmiTagMask));
- DeoptimizeIf(equal, instr, DeoptimizeReason::kSmi);
- __ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, scratch);
- DeoptimizeIf(below, instr, DeoptimizeReason::kNotAJavaScriptObject);
-
- __ jmp(&receiver_ok, dist);
- __ bind(&global_object);
- __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset));
- __ mov(receiver, ContextOperand(receiver, Context::NATIVE_CONTEXT_INDEX));
- __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_PROXY_INDEX));
- __ bind(&receiver_ok);
-}
-
-
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
- Register length = ToRegister(instr->length());
- Register elements = ToRegister(instr->elements());
- DCHECK(receiver.is(eax)); // Used for parameter count.
- DCHECK(function.is(edi)); // Required by InvokeFunction.
- DCHECK(ToRegister(instr->result()).is(eax));
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- const uint32_t kArgumentsLimit = 1 * KB;
- __ cmp(length, kArgumentsLimit);
- DeoptimizeIf(above, instr, DeoptimizeReason::kTooManyArguments);
-
- __ push(receiver);
- __ mov(receiver, length);
-
- // Loop through the arguments pushing them onto the execution
- // stack.
- Label invoke, loop;
- // length is a small non-negative integer, due to the test above.
- __ test(length, Operand(length));
- __ j(zero, &invoke, Label::kNear);
- __ bind(&loop);
- __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
- __ dec(length);
- __ j(not_zero, &loop);
-
- // Invoke the function.
- __ bind(&invoke);
-
- InvokeFlag flag = CALL_FUNCTION;
- if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
- DCHECK(!info()->saves_caller_doubles());
- // TODO(ishell): drop current frame before pushing arguments to the stack.
- flag = JUMP_FUNCTION;
- ParameterCount actual(eax);
- // It is safe to use ebx, ecx and edx as scratch registers here given that
- // 1) we are not going to return to caller function anyway,
- // 2) ebx (expected arguments count) and edx (new.target) will be
- // initialized below.
- PrepareForTailCall(actual, ebx, ecx, edx);
- }
-
- DCHECK(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount actual(eax);
- __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
-}
-
-
-void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
- __ int3();
-}
-
-
-void LCodeGen::DoPushArgument(LPushArgument* instr) {
- LOperand* argument = instr->value();
- EmitPushTaggedOperand(argument);
-}
-
-
-void LCodeGen::DoDrop(LDrop* instr) {
- __ Drop(instr->count());
-}
-
-
-void LCodeGen::DoThisFunction(LThisFunction* instr) {
- Register result = ToRegister(instr->result());
- __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-}
-
-
-void LCodeGen::DoContext(LContext* instr) {
- Register result = ToRegister(instr->result());
- if (info()->IsOptimizing()) {
- __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
- } else {
- // If there is no frame, the context must be in esi.
- DCHECK(result.is(esi));
- }
-}
-
-
-void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- __ push(Immediate(instr->hydrogen()->declarations()));
- __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
- __ push(Immediate(instr->hydrogen()->feedback_vector()));
- CallRuntime(Runtime::kDeclareGlobals, instr);
-}
-
-void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
- int formal_parameter_count, int arity,
- bool is_tail_call, LInstruction* instr) {
- bool dont_adapt_arguments =
- formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
- bool can_invoke_directly =
- dont_adapt_arguments || formal_parameter_count == arity;
-
- Register function_reg = edi;
-
- if (can_invoke_directly) {
- // Change context.
- __ mov(esi, FieldOperand(function_reg, JSFunction::kContextOffset));
-
- // Always initialize new target and number of actual arguments.
- __ mov(edx, factory()->undefined_value());
- __ mov(eax, arity);
-
- bool is_self_call = function.is_identical_to(info()->closure());
-
- // Invoke function directly.
- if (is_self_call) {
- Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
- if (is_tail_call) {
- __ Jump(self, RelocInfo::CODE_TARGET);
- } else {
- __ Call(self, RelocInfo::CODE_TARGET);
- }
- } else {
- Operand target = FieldOperand(function_reg, JSFunction::kCodeEntryOffset);
- if (is_tail_call) {
- __ jmp(target);
- } else {
- __ call(target);
- }
- }
-
- if (!is_tail_call) {
- // Set up deoptimization.
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
- }
- } else {
- // We need to adapt arguments.
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(
- this, pointers, Safepoint::kLazyDeopt);
- ParameterCount actual(arity);
- ParameterCount expected(formal_parameter_count);
- InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
- __ InvokeFunction(function_reg, expected, actual, flag, generator);
- }
-}
-
-
-void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
- DCHECK(ToRegister(instr->result()).is(eax));
-
- if (instr->hydrogen()->IsTailCall()) {
- if (NeedsEagerFrame()) __ leave();
-
- if (instr->target()->IsConstantOperand()) {
- LConstantOperand* target = LConstantOperand::cast(instr->target());
- Handle<Code> code = Handle<Code>::cast(ToHandle(target));
- __ jmp(code, RelocInfo::CODE_TARGET);
- } else {
- DCHECK(instr->target()->IsRegister());
- Register target = ToRegister(instr->target());
- __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(target);
- }
- } else {
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
-
- if (instr->target()->IsConstantOperand()) {
- LConstantOperand* target = LConstantOperand::cast(instr->target());
- Handle<Code> code = Handle<Code>::cast(ToHandle(target));
- generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
- __ call(code, RelocInfo::CODE_TARGET);
- } else {
- DCHECK(instr->target()->IsRegister());
- Register target = ToRegister(instr->target());
- generator.BeforeCall(__ CallSize(Operand(target)));
- __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ call(target);
- }
- generator.AfterCall();
- }
-}
-
-
-void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
- Register input_reg = ToRegister(instr->value());
- __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
-
- Label slow, allocated, done;
- uint32_t available_regs = eax.bit() | ecx.bit() | edx.bit() | ebx.bit();
- available_regs &= ~input_reg.bit();
- if (instr->context()->IsRegister()) {
- // Make sure that the context isn't overwritten in the AllocateHeapNumber
- // macro below.
- available_regs &= ~ToRegister(instr->context()).bit();
- }
-
- Register tmp =
- Register::from_code(base::bits::CountTrailingZeros32(available_regs));
- available_regs &= ~tmp.bit();
- Register tmp2 =
- Register::from_code(base::bits::CountTrailingZeros32(available_regs));
-
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this);
-
- __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
- // Check the sign of the argument. If the argument is positive, just
- // return it. We do not need to patch the stack since |input| and
- // |result| are the same register and |input| will be restored
- // unchanged by popping safepoint registers.
- __ test(tmp, Immediate(HeapNumber::kSignMask));
- __ j(zero, &done, Label::kNear);
-
- __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
- __ jmp(&allocated, Label::kNear);
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
- instr, instr->context());
- // Set the pointer to the new heap number in tmp.
- if (!tmp.is(eax)) __ mov(tmp, eax);
- // Restore input_reg after call to runtime.
- __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
-
- __ bind(&allocated);
- __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
- __ and_(tmp2, ~HeapNumber::kSignMask);
- __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
- __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
- __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
- __ StoreToSafepointRegisterSlot(input_reg, tmp);
-
- __ bind(&done);
-}
-
-
-void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
- Register input_reg = ToRegister(instr->value());
- __ test(input_reg, Operand(input_reg));
- Label is_positive;
- __ j(not_sign, &is_positive, Label::kNear);
- __ neg(input_reg); // Sets flags.
- DeoptimizeIf(negative, instr, DeoptimizeReason::kOverflow);
- __ bind(&is_positive);
-}
-
-
-void LCodeGen::DoMathAbs(LMathAbs* instr) {
- // Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
- public:
- DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
- LMathAbs* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override {
- codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LMathAbs* instr_;
- };
-
- DCHECK(instr->value()->Equals(instr->result()));
- Representation r = instr->hydrogen()->value()->representation();
-
- if (r.IsDouble()) {
- XMMRegister scratch = double_scratch0();
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- __ xorps(scratch, scratch);
- __ subsd(scratch, input_reg);
- __ andps(input_reg, scratch);
- } else if (r.IsSmiOrInteger32()) {
- EmitIntegerMathAbs(instr);
- } else { // Tagged case.
- DeferredMathAbsTaggedHeapNumber* deferred =
- new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
- Register input_reg = ToRegister(instr->value());
- // Smi check.
- __ JumpIfNotSmi(input_reg, deferred->entry());
- EmitIntegerMathAbs(instr);
- __ bind(deferred->exit());
- }
-}
-
-void LCodeGen::DoMathFloorD(LMathFloorD* instr) {
- XMMRegister output_reg = ToDoubleRegister(instr->result());
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- CpuFeatureScope scope(masm(), SSE4_1);
- __ roundsd(output_reg, input_reg, kRoundDown);
-}
-
-void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
- XMMRegister xmm_scratch = double_scratch0();
- Register output_reg = ToRegister(instr->result());
- XMMRegister input_reg = ToDoubleRegister(instr->value());
-
- if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatureScope scope(masm(), SSE4_1);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Deoptimize on negative zero.
- Label non_zero;
- __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
- __ ucomisd(input_reg, xmm_scratch);
- __ j(not_equal, &non_zero, Label::kNear);
- __ movmskpd(output_reg, input_reg);
- __ test(output_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
- __ bind(&non_zero);
- }
- __ roundsd(xmm_scratch, input_reg, kRoundDown);
- __ cvttsd2si(output_reg, Operand(xmm_scratch));
- // Overflow is signalled with minint.
- __ cmp(output_reg, 0x1);
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
- } else {
- Label negative_sign, done;
- // Deoptimize on unordered.
- __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
- __ ucomisd(input_reg, xmm_scratch);
- DeoptimizeIf(parity_even, instr, DeoptimizeReason::kNaN);
- __ j(below, &negative_sign, Label::kNear);
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Check for negative zero.
- Label positive_sign;
- __ j(above, &positive_sign, Label::kNear);
- __ movmskpd(output_reg, input_reg);
- __ test(output_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
- __ Move(output_reg, Immediate(0));
- __ jmp(&done, Label::kNear);
- __ bind(&positive_sign);
- }
-
- // Use truncating instruction (OK because input is positive).
- __ cvttsd2si(output_reg, Operand(input_reg));
- // Overflow is signalled with minint.
- __ cmp(output_reg, 0x1);
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
- __ jmp(&done, Label::kNear);
-
- // Non-zero negative reaches here.
- __ bind(&negative_sign);
- // Truncate, then compare and compensate.
- __ cvttsd2si(output_reg, Operand(input_reg));
- __ Cvtsi2sd(xmm_scratch, output_reg);
- __ ucomisd(input_reg, xmm_scratch);
- __ j(equal, &done, Label::kNear);
- __ sub(output_reg, Immediate(1));
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
-
- __ bind(&done);
- }
-}
-
-void LCodeGen::DoMathRoundD(LMathRoundD* instr) {
- XMMRegister xmm_scratch = double_scratch0();
- XMMRegister output_reg = ToDoubleRegister(instr->result());
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- CpuFeatureScope scope(masm(), SSE4_1);
- Label done;
- __ roundsd(output_reg, input_reg, kRoundUp);
- __ Move(xmm_scratch, -0.5);
- __ addsd(xmm_scratch, output_reg);
- __ ucomisd(xmm_scratch, input_reg);
- __ j(below_equal, &done, Label::kNear);
- __ Move(xmm_scratch, 1.0);
- __ subsd(output_reg, xmm_scratch);
- __ bind(&done);
-}
-
-void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
- Register output_reg = ToRegister(instr->result());
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- XMMRegister xmm_scratch = double_scratch0();
- XMMRegister input_temp = ToDoubleRegister(instr->temp());
- ExternalReference one_half = ExternalReference::address_of_one_half();
- ExternalReference minus_one_half =
- ExternalReference::address_of_minus_one_half();
-
- Label done, round_to_zero, below_one_half, do_not_compensate;
- Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
-
- __ movsd(xmm_scratch, Operand::StaticVariable(one_half));
- __ ucomisd(xmm_scratch, input_reg);
- __ j(above, &below_one_half, Label::kNear);
-
- // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
- __ addsd(xmm_scratch, input_reg);
- __ cvttsd2si(output_reg, Operand(xmm_scratch));
- // Overflow is signalled with minint.
- __ cmp(output_reg, 0x1);
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
- __ jmp(&done, dist);
-
- __ bind(&below_one_half);
- __ movsd(xmm_scratch, Operand::StaticVariable(minus_one_half));
- __ ucomisd(xmm_scratch, input_reg);
- __ j(below_equal, &round_to_zero, Label::kNear);
-
- // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
- // compare and compensate.
- __ movaps(input_temp, input_reg); // Do not alter input_reg.
- __ subsd(input_temp, xmm_scratch);
- __ cvttsd2si(output_reg, Operand(input_temp));
- // Catch minint due to overflow, and to prevent overflow when compensating.
- __ cmp(output_reg, 0x1);
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
-
- __ Cvtsi2sd(xmm_scratch, output_reg);
- __ ucomisd(xmm_scratch, input_temp);
- __ j(equal, &done, dist);
- __ sub(output_reg, Immediate(1));
- // No overflow because we already ruled out minint.
- __ jmp(&done, dist);
-
- __ bind(&round_to_zero);
- // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
- // we can ignore the difference between a result of -0 and +0.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // If the sign is positive, we return +0.
- __ movmskpd(output_reg, input_reg);
- __ test(output_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
- }
- __ Move(output_reg, Immediate(0));
- __ bind(&done);
-}
-
-
-void LCodeGen::DoMathFround(LMathFround* instr) {
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- XMMRegister output_reg = ToDoubleRegister(instr->result());
- __ cvtsd2ss(output_reg, input_reg);
- __ cvtss2sd(output_reg, output_reg);
-}
-
-
-void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
- Operand input = ToOperand(instr->value());
- XMMRegister output = ToDoubleRegister(instr->result());
- __ sqrtsd(output, input);
-}
-
-
-void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
- XMMRegister xmm_scratch = double_scratch0();
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- Register scratch = ToRegister(instr->temp());
- DCHECK(ToDoubleRegister(instr->result()).is(input_reg));
-
- // Note that according to ECMA-262 15.8.2.13:
- // Math.pow(-Infinity, 0.5) == Infinity
- // Math.sqrt(-Infinity) == NaN
- Label done, sqrt;
- // Check base for -Infinity. According to IEEE-754, single-precision
- // -Infinity has the highest 9 bits set and the lowest 23 bits cleared.
- __ mov(scratch, 0xFF800000);
- __ movd(xmm_scratch, scratch);
- __ cvtss2sd(xmm_scratch, xmm_scratch);
- __ ucomisd(input_reg, xmm_scratch);
- // Comparing -Infinity with NaN results in "unordered", which sets the
- // zero flag as if both were equal. However, it also sets the carry flag.
- __ j(not_equal, &sqrt, Label::kNear);
- __ j(carry, &sqrt, Label::kNear);
- // If input is -Infinity, return Infinity.
- __ xorps(input_reg, input_reg);
- __ subsd(input_reg, xmm_scratch);
- __ jmp(&done, Label::kNear);
-
- // Square root.
- __ bind(&sqrt);
- __ xorps(xmm_scratch, xmm_scratch);
- __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
- __ sqrtsd(input_reg, input_reg);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoPower(LPower* instr) {
- Representation exponent_type = instr->hydrogen()->right()->representation();
- // Having marked this as a call, we can use any registers.
- // Just make sure that the input/output registers are the expected ones.
- Register tagged_exponent = MathPowTaggedDescriptor::exponent();
- DCHECK(!instr->right()->IsDoubleRegister() ||
- ToDoubleRegister(instr->right()).is(xmm1));
- DCHECK(!instr->right()->IsRegister() ||
- ToRegister(instr->right()).is(tagged_exponent));
- DCHECK(ToDoubleRegister(instr->left()).is(xmm2));
- DCHECK(ToDoubleRegister(instr->result()).is(xmm3));
-
- if (exponent_type.IsSmi()) {
- MathPowStub stub(isolate(), MathPowStub::TAGGED);
- __ CallStub(&stub);
- } else if (exponent_type.IsTagged()) {
- Label no_deopt;
- __ JumpIfSmi(tagged_exponent, &no_deopt);
- DCHECK(!ecx.is(tagged_exponent));
- __ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, ecx);
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
- __ bind(&no_deopt);
- MathPowStub stub(isolate(), MathPowStub::TAGGED);
- __ CallStub(&stub);
- } else if (exponent_type.IsInteger32()) {
- MathPowStub stub(isolate(), MathPowStub::INTEGER);
- __ CallStub(&stub);
- } else {
- DCHECK(exponent_type.IsDouble());
- MathPowStub stub(isolate(), MathPowStub::DOUBLE);
- __ CallStub(&stub);
- }
-}
-
-
-void LCodeGen::DoMathLog(LMathLog* instr) {
- XMMRegister input = ToDoubleRegister(instr->value());
- XMMRegister result = ToDoubleRegister(instr->result());
- // Pass one double as argument on the stack.
- __ PrepareCallCFunction(2, eax);
- __ movsd(Operand(esp, 0 * kDoubleSize), input);
- __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 2);
- // Return value is in st(0) on ia32.
- // Store it into the result register.
- __ sub(esp, Immediate(kDoubleSize));
- __ fstp_d(Operand(esp, 0));
- __ movsd(result, Operand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
-}
-
-
-void LCodeGen::DoMathClz32(LMathClz32* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
-
- __ Lzcnt(result, input);
-}
-
-void LCodeGen::DoMathCos(LMathCos* instr) {
- XMMRegister input = ToDoubleRegister(instr->value());
- XMMRegister result = ToDoubleRegister(instr->result());
- // Pass one double as argument on the stack.
- __ PrepareCallCFunction(2, eax);
- __ movsd(Operand(esp, 0 * kDoubleSize), input);
- __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 2);
- // Return value is in st(0) on ia32.
- // Store it into the result register.
- __ sub(esp, Immediate(kDoubleSize));
- __ fstp_d(Operand(esp, 0));
- __ movsd(result, Operand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
-}
-
-void LCodeGen::DoMathSin(LMathSin* instr) {
- XMMRegister input = ToDoubleRegister(instr->value());
- XMMRegister result = ToDoubleRegister(instr->result());
- // Pass one double as argument on the stack.
- __ PrepareCallCFunction(2, eax);
- __ movsd(Operand(esp, 0 * kDoubleSize), input);
- __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 2);
- // Return value is in st(0) on ia32.
- // Store it into the result register.
- __ sub(esp, Immediate(kDoubleSize));
- __ fstp_d(Operand(esp, 0));
- __ movsd(result, Operand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
-}
-
-void LCodeGen::DoMathExp(LMathExp* instr) {
- XMMRegister input = ToDoubleRegister(instr->value());
- XMMRegister result = ToDoubleRegister(instr->result());
- // Pass one double as argument on the stack.
- __ PrepareCallCFunction(2, eax);
- __ movsd(Operand(esp, 0 * kDoubleSize), input);
- __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 2);
- // Return value is in st(0) on ia32.
- // Store it into the result register.
- __ sub(esp, Immediate(kDoubleSize));
- __ fstp_d(Operand(esp, 0));
- __ movsd(result, Operand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
-}
-
-void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
- Register scratch1, Register scratch2,
- Register scratch3) {
-#if DEBUG
- if (actual.is_reg()) {
- DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
- } else {
- DCHECK(!AreAliased(scratch1, scratch2, scratch3));
- }
-#endif
- if (FLAG_code_comments) {
- if (actual.is_reg()) {
- Comment(";;; PrepareForTailCall, actual: %s {",
- RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
- actual.reg().code()));
- } else {
- Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
- }
- }
-
- // Check if next frame is an arguments adaptor frame.
- Register caller_args_count_reg = scratch1;
- Label no_arguments_adaptor, formal_parameter_count_loaded;
- __ mov(scratch2, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ cmp(Operand(scratch2, StandardFrameConstants::kContextOffset),
- Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &no_arguments_adaptor, Label::kNear);
-
- // Drop current frame and load arguments count from arguments adaptor frame.
- __ mov(ebp, scratch2);
- __ mov(caller_args_count_reg,
- Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
- __ jmp(&formal_parameter_count_loaded, Label::kNear);
-
- __ bind(&no_arguments_adaptor);
- // Load caller's formal parameter count.
- __ mov(caller_args_count_reg,
- Immediate(info()->literal()->parameter_count()));
-
- __ bind(&formal_parameter_count_loaded);
- __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3,
- ReturnAddressState::kNotOnStack, 0);
- Comment(";;; }");
-}
-
-void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
- HInvokeFunction* hinstr = instr->hydrogen();
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->function()).is(edi));
- DCHECK(instr->HasPointerMap());
-
- bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
-
- if (is_tail_call) {
- DCHECK(!info()->saves_caller_doubles());
- ParameterCount actual(instr->arity());
- // It is safe to use ebx, ecx and edx as scratch registers here given that
- // 1) we are not going to return to caller function anyway,
- // 2) ebx (expected arguments count) and edx (new.target) will be
- // initialized below.
- PrepareForTailCall(actual, ebx, ecx, edx);
- }
-
- Handle<JSFunction> known_function = hinstr->known_function();
- if (known_function.is_null()) {
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount actual(instr->arity());
- InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
- __ InvokeFunction(edi, no_reg, actual, flag, generator);
- } else {
- CallKnownFunction(known_function, hinstr->formal_parameter_count(),
- instr->arity(), is_tail_call, instr);
- }
-}
-
-
-void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->constructor()).is(edi));
- DCHECK(ToRegister(instr->result()).is(eax));
-
- __ Move(eax, Immediate(instr->arity()));
- __ mov(ebx, instr->hydrogen()->site());
-
- ElementsKind kind = instr->hydrogen()->elements_kind();
- AllocationSiteOverrideMode override_mode =
- (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
- ? DISABLE_ALLOCATION_SITES
- : DONT_OVERRIDE;
-
- if (instr->arity() == 0) {
- ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- } else if (instr->arity() == 1) {
- Label done;
- if (IsFastPackedElementsKind(kind)) {
- Label packed_case;
- // We might need a change here
- // look at the first argument
- __ mov(ecx, Operand(esp, 0));
- __ test(ecx, ecx);
- __ j(zero, &packed_case, Label::kNear);
-
- ElementsKind holey_kind = GetHoleyElementsKind(kind);
- ArraySingleArgumentConstructorStub stub(isolate(),
- holey_kind,
- override_mode);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ jmp(&done, Label::kNear);
- __ bind(&packed_case);
- }
-
- ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ bind(&done);
- } else {
- ArrayNArgumentsConstructorStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
-void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
-}
-
-
-void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
- Register function = ToRegister(instr->function());
- Register code_object = ToRegister(instr->code_object());
- __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
- __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
-}
-
-
-void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
- Register result = ToRegister(instr->result());
- Register base = ToRegister(instr->base_object());
- if (instr->offset()->IsConstantOperand()) {
- LConstantOperand* offset = LConstantOperand::cast(instr->offset());
- __ lea(result, Operand(base, ToInteger32(offset)));
- } else {
- Register offset = ToRegister(instr->offset());
- __ lea(result, Operand(base, offset, times_1, 0));
- }
-}
-
-
-void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
- Representation representation = instr->hydrogen()->field_representation();
-
- HObjectAccess access = instr->hydrogen()->access();
- int offset = access.offset();
-
- if (access.IsExternalMemory()) {
- DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
- MemOperand operand = instr->object()->IsConstantOperand()
- ? MemOperand::StaticVariable(
- ToExternalReference(LConstantOperand::cast(instr->object())))
- : MemOperand(ToRegister(instr->object()), offset);
- if (instr->value()->IsConstantOperand()) {
- LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- __ mov(operand, Immediate(ToInteger32(operand_value)));
- } else {
- Register value = ToRegister(instr->value());
- __ Store(value, operand, representation);
- }
- return;
- }
-
- Register object = ToRegister(instr->object());
- __ AssertNotSmi(object);
-
- DCHECK(!representation.IsSmi() ||
- !instr->value()->IsConstantOperand() ||
- IsSmi(LConstantOperand::cast(instr->value())));
- if (representation.IsDouble()) {
- DCHECK(access.IsInobject());
- DCHECK(!instr->hydrogen()->has_transition());
- DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
- XMMRegister value = ToDoubleRegister(instr->value());
- __ movsd(FieldOperand(object, offset), value);
- return;
- }
-
- if (instr->hydrogen()->has_transition()) {
- Handle<Map> transition = instr->hydrogen()->transition_map();
- AddDeprecationDependency(transition);
- __ mov(FieldOperand(object, HeapObject::kMapOffset), transition);
- if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
- Register temp = ToRegister(instr->temp());
- Register temp_map = ToRegister(instr->temp_map());
- // Update the write barrier for the map field.
- __ RecordWriteForMap(object, transition, temp_map, temp, kSaveFPRegs);
- }
- }
-
- // Do the store.
- Register write_register = object;
- if (!access.IsInobject()) {
- write_register = ToRegister(instr->temp());
- __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
- }
-
- MemOperand operand = FieldOperand(write_register, offset);
- if (instr->value()->IsConstantOperand()) {
- LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- if (operand_value->IsRegister()) {
- Register value = ToRegister(operand_value);
- __ Store(value, operand, representation);
- } else if (representation.IsInteger32() || representation.IsExternal()) {
- Immediate immediate = ToImmediate(operand_value, representation);
- DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
- __ mov(operand, immediate);
- } else {
- Handle<Object> handle_value = ToHandle(operand_value);
- DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
- __ mov(operand, handle_value);
- }
- } else {
- Register value = ToRegister(instr->value());
- __ Store(value, operand, representation);
- }
-
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- Register value = ToRegister(instr->value());
- Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
- // Update the write barrier for the object for in-object properties.
- __ RecordWriteField(write_register,
- offset,
- value,
- temp,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- instr->hydrogen()->SmiCheckForWriteBarrier(),
- instr->hydrogen()->PointersToHereCheckForValue());
- }
-}
-
-
-void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal;
- if (instr->index()->IsConstantOperand()) {
- __ cmp(ToOperand(instr->length()),
- ToImmediate(LConstantOperand::cast(instr->index()),
- instr->hydrogen()->length()->representation()));
- cc = CommuteCondition(cc);
- } else if (instr->length()->IsConstantOperand()) {
- __ cmp(ToOperand(instr->index()),
- ToImmediate(LConstantOperand::cast(instr->length()),
- instr->hydrogen()->index()->representation()));
- } else {
- __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
- }
- if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
- Label done;
- __ j(NegateCondition(cc), &done, Label::kNear);
- __ int3();
- __ bind(&done);
- } else {
- DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds);
- }
-}
-
-
-void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- LOperand* key = instr->key();
- if (!key->IsConstantOperand() &&
- ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
- elements_kind)) {
- __ SmiUntag(ToRegister(key));
- }
- Operand operand(BuildFastArrayOperand(
- instr->elements(),
- key,
- instr->hydrogen()->key()->representation(),
- elements_kind,
- instr->base_offset()));
- if (elements_kind == FLOAT32_ELEMENTS) {
- XMMRegister xmm_scratch = double_scratch0();
- __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value()));
- __ movss(operand, xmm_scratch);
- } else if (elements_kind == FLOAT64_ELEMENTS) {
- __ movsd(operand, ToDoubleRegister(instr->value()));
- } else {
- Register value = ToRegister(instr->value());
- switch (elements_kind) {
- case UINT8_ELEMENTS:
- case INT8_ELEMENTS:
- case UINT8_CLAMPED_ELEMENTS:
- __ mov_b(operand, value);
- break;
- case UINT16_ELEMENTS:
- case INT16_ELEMENTS:
- __ mov_w(operand, value);
- break;
- case UINT32_ELEMENTS:
- case INT32_ELEMENTS:
- __ mov(operand, value);
- break;
- case FLOAT32_ELEMENTS:
- case FLOAT64_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
- case FAST_STRING_WRAPPER_ELEMENTS:
- case SLOW_STRING_WRAPPER_ELEMENTS:
- case NO_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
- Operand double_store_operand = BuildFastArrayOperand(
- instr->elements(),
- instr->key(),
- instr->hydrogen()->key()->representation(),
- FAST_DOUBLE_ELEMENTS,
- instr->base_offset());
-
- XMMRegister value = ToDoubleRegister(instr->value());
-
- if (instr->NeedsCanonicalization()) {
- XMMRegister xmm_scratch = double_scratch0();
- // Turn potential sNaN value into qNaN.
- __ xorps(xmm_scratch, xmm_scratch);
- __ subsd(value, xmm_scratch);
- }
-
- __ movsd(double_store_operand, value);
-}
-
-
-void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
- Register elements = ToRegister(instr->elements());
- Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
-
- Operand operand = BuildFastArrayOperand(
- instr->elements(),
- instr->key(),
- instr->hydrogen()->key()->representation(),
- FAST_ELEMENTS,
- instr->base_offset());
- if (instr->value()->IsRegister()) {
- __ mov(operand, ToRegister(instr->value()));
- } else {
- LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- if (IsSmi(operand_value)) {
- Immediate immediate = ToImmediate(operand_value, Representation::Smi());
- __ mov(operand, immediate);
- } else {
- DCHECK(!IsInteger32(operand_value));
- Handle<Object> handle_value = ToHandle(operand_value);
- __ mov(operand, handle_value);
- }
- }
-
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- DCHECK(instr->value()->IsRegister());
- Register value = ToRegister(instr->value());
- DCHECK(!instr->key()->IsConstantOperand());
- SmiCheck check_needed =
- instr->hydrogen()->value()->type().IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- // Compute address of modified element and store it into key register.
- __ lea(key, operand);
- __ RecordWrite(elements,
- key,
- value,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed,
- instr->hydrogen()->PointersToHereCheckForValue());
- }
-}
-
-
-void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
- // By cases...external, fast-double, fast
- if (instr->is_fixed_typed_array()) {
- DoStoreKeyedExternalArray(instr);
- } else if (instr->hydrogen()->value()->representation().IsDouble()) {
- DoStoreKeyedFixedDoubleArray(instr);
- } else {
- DoStoreKeyedFixedArray(instr);
- }
-}
-
-
-void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
- Register object = ToRegister(instr->object());
- Register temp = ToRegister(instr->temp());
- Label no_memento_found;
- __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
- DeoptimizeIf(equal, instr, DeoptimizeReason::kMementoFound);
- __ bind(&no_memento_found);
-}
-
-
-void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
- class DeferredMaybeGrowElements final : public LDeferredCode {
- public:
- DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
- : LDeferredCode(codegen), instr_(instr) {}
- void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LMaybeGrowElements* instr_;
- };
-
- Register result = eax;
- DeferredMaybeGrowElements* deferred =
- new (zone()) DeferredMaybeGrowElements(this, instr);
- LOperand* key = instr->key();
- LOperand* current_capacity = instr->current_capacity();
-
- DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
- DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
- DCHECK(key->IsConstantOperand() || key->IsRegister());
- DCHECK(current_capacity->IsConstantOperand() ||
- current_capacity->IsRegister());
-
- if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
- int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
- int32_t constant_capacity =
- ToInteger32(LConstantOperand::cast(current_capacity));
- if (constant_key >= constant_capacity) {
- // Deferred case.
- __ jmp(deferred->entry());
- }
- } else if (key->IsConstantOperand()) {
- int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
- __ cmp(ToOperand(current_capacity), Immediate(constant_key));
- __ j(less_equal, deferred->entry());
- } else if (current_capacity->IsConstantOperand()) {
- int32_t constant_capacity =
- ToInteger32(LConstantOperand::cast(current_capacity));
- __ cmp(ToRegister(key), Immediate(constant_capacity));
- __ j(greater_equal, deferred->entry());
- } else {
- __ cmp(ToRegister(key), ToRegister(current_capacity));
- __ j(greater_equal, deferred->entry());
- }
-
- __ mov(result, ToOperand(instr->elements()));
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- Register result = eax;
- __ Move(result, Immediate(0));
-
- // We have to call a stub.
- {
- PushSafepointRegistersScope scope(this);
- if (instr->object()->IsRegister()) {
- __ Move(result, ToRegister(instr->object()));
- } else {
- __ mov(result, ToOperand(instr->object()));
- }
-
- LOperand* key = instr->key();
- if (key->IsConstantOperand()) {
- LConstantOperand* constant_key = LConstantOperand::cast(key);
- int32_t int_key = ToInteger32(constant_key);
- if (Smi::IsValid(int_key)) {
- __ mov(ebx, Immediate(Smi::FromInt(int_key)));
- } else {
- Abort(kArrayIndexConstantValueTooBig);
- }
- } else {
- Label is_smi;
- __ Move(ebx, ToRegister(key));
- __ SmiTag(ebx);
- // Deopt if the key is outside Smi range. The stub expects Smi and would
- // bump the elements into dictionary mode (and trigger a deopt) anyways.
- __ j(no_overflow, &is_smi);
- __ PopSafepointRegisters();
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kOverflow);
- __ bind(&is_smi);
- }
-
- GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
- __ CallStub(&stub);
- RecordSafepointWithLazyDeopt(
- instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- __ StoreToSafepointRegisterSlot(result, result);
- }
-
- // Deopt on smi, which means the elements array changed to dictionary mode.
- __ test(result, Immediate(kSmiTagMask));
- DeoptimizeIf(equal, instr, DeoptimizeReason::kSmi);
-}
-
-
-void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
- Register object_reg = ToRegister(instr->object());
-
- Handle<Map> from_map = instr->original_map();
- Handle<Map> to_map = instr->transitioned_map();
- ElementsKind from_kind = instr->from_kind();
- ElementsKind to_kind = instr->to_kind();
-
- Label not_applicable;
- bool is_simple_map_transition =
- IsSimpleMapChangeTransition(from_kind, to_kind);
- Label::Distance branch_distance =
- is_simple_map_transition ? Label::kNear : Label::kFar;
- __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
- __ j(not_equal, &not_applicable, branch_distance);
- if (is_simple_map_transition) {
- Register new_map_reg = ToRegister(instr->new_map_temp());
- __ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
- Immediate(to_map));
- // Write barrier.
- DCHECK_NOT_NULL(instr->temp());
- __ RecordWriteForMap(object_reg, to_map, new_map_reg,
- ToRegister(instr->temp()),
- kDontSaveFPRegs);
- } else {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(object_reg.is(eax));
- PushSafepointRegistersScope scope(this);
- __ mov(ebx, to_map);
- TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
- __ CallStub(&stub);
- RecordSafepointWithLazyDeopt(instr,
- RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- }
- __ bind(&not_applicable);
-}
-
-
-void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt final : public LDeferredCode {
- public:
- DeferredStringCharCodeAt(LCodeGen* codegen,
- LStringCharCodeAt* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LStringCharCodeAt* instr_;
- };
-
- DeferredStringCharCodeAt* deferred =
- new(zone()) DeferredStringCharCodeAt(this, instr);
-
- StringCharLoadGenerator::Generate(masm(),
- factory(),
- ToRegister(instr->string()),
- ToRegister(instr->index()),
- ToRegister(instr->result()),
- deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Move(result, Immediate(0));
-
- PushSafepointRegistersScope scope(this);
- __ push(string);
- // Push the index as a smi. This is safe because of the checks in
- // DoStringCharCodeAt above.
- STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
- if (instr->index()->IsConstantOperand()) {
- Immediate immediate = ToImmediate(LConstantOperand::cast(instr->index()),
- Representation::Smi());
- __ push(immediate);
- } else {
- Register index = ToRegister(instr->index());
- __ SmiTag(index);
- __ push(index);
- }
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2,
- instr, instr->context());
- __ AssertSmi(eax);
- __ SmiUntag(eax);
- __ StoreToSafepointRegisterSlot(result, eax);
-}
-
-
-void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode final : public LDeferredCode {
- public:
- DeferredStringCharFromCode(LCodeGen* codegen,
- LStringCharFromCode* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override {
- codegen()->DoDeferredStringCharFromCode(instr_);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LStringCharFromCode* instr_;
- };
-
- DeferredStringCharFromCode* deferred =
- new(zone()) DeferredStringCharFromCode(this, instr);
-
- DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
- DCHECK(!char_code.is(result));
-
- __ cmp(char_code, String::kMaxOneByteCharCode);
- __ j(above, deferred->entry());
- __ Move(result, Immediate(factory()->single_character_string_cache()));
- __ mov(result, FieldOperand(result,
- char_code, times_pointer_size,
- FixedArray::kHeaderSize));
- __ cmp(result, factory()->undefined_value());
- __ j(equal, deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Move(result, Immediate(0));
-
- PushSafepointRegistersScope scope(this);
- __ SmiTag(char_code);
- __ push(char_code);
- CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
- instr->context());
- __ StoreToSafepointRegisterSlot(result, eax);
-}
-
-
-void LCodeGen::DoStringAdd(LStringAdd* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->left()).is(edx));
- DCHECK(ToRegister(instr->right()).is(eax));
- StringAddStub stub(isolate(),
- instr->hydrogen()->flags(),
- instr->hydrogen()->pretenure_flag());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- LOperand* input = instr->value();
- LOperand* output = instr->result();
- DCHECK(input->IsRegister() || input->IsStackSlot());
- DCHECK(output->IsDoubleRegister());
- __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
-}
-
-
-void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
- LOperand* input = instr->value();
- LOperand* output = instr->result();
- __ LoadUint32(ToDoubleRegister(output), ToRegister(input));
-}
-
-
-void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- class DeferredNumberTagI final : public LDeferredCode {
- public:
- DeferredNumberTagI(LCodeGen* codegen,
- LNumberTagI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override {
- codegen()->DoDeferredNumberTagIU(
- instr_, instr_->value(), instr_->temp(), SIGNED_INT32);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LNumberTagI* instr_;
- };
-
- LOperand* input = instr->value();
- DCHECK(input->IsRegister() && input->Equals(instr->result()));
- Register reg = ToRegister(input);
-
- DeferredNumberTagI* deferred =
- new(zone()) DeferredNumberTagI(this, instr);
- __ SmiTag(reg);
- __ j(overflow, deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU final : public LDeferredCode {
- public:
- DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override {
- codegen()->DoDeferredNumberTagIU(
- instr_, instr_->value(), instr_->temp(), UNSIGNED_INT32);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LNumberTagU* instr_;
- };
-
- LOperand* input = instr->value();
- DCHECK(input->IsRegister() && input->Equals(instr->result()));
- Register reg = ToRegister(input);
-
- DeferredNumberTagU* deferred =
- new(zone()) DeferredNumberTagU(this, instr);
- __ cmp(reg, Immediate(Smi::kMaxValue));
- __ j(above, deferred->entry());
- __ SmiTag(reg);
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
- LOperand* value,
- LOperand* temp,
- IntegerSignedness signedness) {
- Label done, slow;
- Register reg = ToRegister(value);
- Register tmp = ToRegister(temp);
- XMMRegister xmm_scratch = double_scratch0();
-
- if (signedness == SIGNED_INT32) {
- // There was overflow, so bits 30 and 31 of the original integer
- // disagree. Try to allocate a heap number in new space and store
- // the value in there. If that fails, call the runtime system.
- __ SmiUntag(reg);
- __ xor_(reg, 0x80000000);
- __ Cvtsi2sd(xmm_scratch, Operand(reg));
- } else {
- __ LoadUint32(xmm_scratch, reg);
- }
-
- if (FLAG_inline_new) {
- __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
- __ jmp(&done, Label::kNear);
- }
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
- {
- // TODO(3095996): Put a valid pointer value in the stack slot where the
- // result register is stored, as this register is in the pointer map, but
- // contains an integer value.
- __ Move(reg, Immediate(0));
-
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this);
- // Reset the context register.
- if (!reg.is(esi)) {
- __ Move(esi, Immediate(0));
- }
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(reg, eax);
- }
-
- // Done. Put the value in xmm_scratch into the value of the allocated heap
- // number.
- __ bind(&done);
- __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch);
-}
-
-
-void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD final : public LDeferredCode {
- public:
- DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LNumberTagD* instr_;
- };
-
- Register reg = ToRegister(instr->result());
-
- DeferredNumberTagD* deferred =
- new(zone()) DeferredNumberTagD(this, instr);
- if (FLAG_inline_new) {
- Register tmp = ToRegister(instr->temp());
- __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
- } else {
- __ jmp(deferred->entry());
- }
- __ bind(deferred->exit());
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
-}
-
-
-void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- Register reg = ToRegister(instr->result());
- __ Move(reg, Immediate(0));
-
- PushSafepointRegistersScope scope(this);
- // Reset the context register.
- if (!reg.is(esi)) {
- __ Move(esi, Immediate(0));
- }
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(reg, eax);
-}
-
-
-void LCodeGen::DoSmiTag(LSmiTag* instr) {
- HChange* hchange = instr->hydrogen();
- Register input = ToRegister(instr->value());
- if (hchange->CheckFlag(HValue::kCanOverflow) &&
- hchange->value()->CheckFlag(HValue::kUint32)) {
- __ test(input, Immediate(0xc0000000));
- DeoptimizeIf(not_zero, instr, DeoptimizeReason::kOverflow);
- }
- __ SmiTag(input);
- if (hchange->CheckFlag(HValue::kCanOverflow) &&
- !hchange->value()->CheckFlag(HValue::kUint32)) {
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
- }
-}
-
-
-void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
- LOperand* input = instr->value();
- Register result = ToRegister(input);
- DCHECK(input->IsRegister() && input->Equals(instr->result()));
- if (instr->needs_check()) {
- __ test(result, Immediate(kSmiTagMask));
- DeoptimizeIf(not_zero, instr, DeoptimizeReason::kNotASmi);
- } else {
- __ AssertSmi(result);
- }
- __ SmiUntag(result);
-}
-
-
-void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
- Register temp_reg, XMMRegister result_reg,
- NumberUntagDMode mode) {
- bool can_convert_undefined_to_nan = instr->truncating();
- bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
-
- Label convert, load_smi, done;
-
- if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
- // Smi check.
- __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
-
- // Heap number map check.
- __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- if (can_convert_undefined_to_nan) {
- __ j(not_equal, &convert, Label::kNear);
- } else {
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
- }
-
- // Heap number to XMM conversion.
- __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
-
- if (deoptimize_on_minus_zero) {
- XMMRegister xmm_scratch = double_scratch0();
- __ xorps(xmm_scratch, xmm_scratch);
- __ ucomisd(result_reg, xmm_scratch);
- __ j(not_zero, &done, Label::kNear);
- __ movmskpd(temp_reg, result_reg);
- __ test_b(temp_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
- }
- __ jmp(&done, Label::kNear);
-
- if (can_convert_undefined_to_nan) {
- __ bind(&convert);
-
- // Convert undefined to NaN.
- __ cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, instr,
- DeoptimizeReason::kNotAHeapNumberUndefined);
-
- __ xorpd(result_reg, result_reg);
- __ divsd(result_reg, result_reg);
- __ jmp(&done, Label::kNear);
- }
- } else {
- DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
- }
-
- __ bind(&load_smi);
- // Smi to XMM conversion. Clobbering a temp is faster than re-tagging the
- // input register since we avoid dependencies.
- __ mov(temp_reg, input_reg);
- __ SmiUntag(temp_reg); // Untag smi before converting to float.
- __ Cvtsi2sd(result_reg, Operand(temp_reg));
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
- Register input_reg = ToRegister(instr->value());
-
- // The input was optimistically untagged; revert it.
- STATIC_ASSERT(kSmiTagSize == 1);
- __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag));
-
- if (instr->truncating()) {
- Label truncate;
- Label::Distance truncate_distance =
- DeoptEveryNTimes() ? Label::kFar : Label::kNear;
- __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- __ j(equal, &truncate, truncate_distance);
- __ push(input_reg);
- __ CmpObjectType(input_reg, ODDBALL_TYPE, input_reg);
- __ pop(input_reg);
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotANumberOrOddball);
- __ bind(&truncate);
- __ TruncateHeapNumberToI(input_reg, input_reg);
- } else {
- XMMRegister scratch = ToDoubleRegister(instr->temp());
- DCHECK(!scratch.is(xmm0));
- __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- isolate()->factory()->heap_number_map());
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
- __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ cvttsd2si(input_reg, Operand(xmm0));
- __ Cvtsi2sd(scratch, Operand(input_reg));
- __ ucomisd(xmm0, scratch);
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kLostPrecision);
- DeoptimizeIf(parity_even, instr, DeoptimizeReason::kNaN);
- if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
- __ test(input_reg, Operand(input_reg));
- __ j(not_zero, done);
- __ movmskpd(input_reg, xmm0);
- __ and_(input_reg, 1);
- DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
- }
- }
-}
-
-
-void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI final : public LDeferredCode {
- public:
- DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override { codegen()->DoDeferredTaggedToI(instr_, done()); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LTaggedToI* instr_;
- };
-
- LOperand* input = instr->value();
- DCHECK(input->IsRegister());
- Register input_reg = ToRegister(input);
- DCHECK(input_reg.is(ToRegister(instr->result())));
-
- if (instr->hydrogen()->value()->representation().IsSmi()) {
- __ SmiUntag(input_reg);
- } else {
- DeferredTaggedToI* deferred =
- new(zone()) DeferredTaggedToI(this, instr);
- // Optimistically untag the input.
- // If the input is a HeapObject, SmiUntag will set the carry flag.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ SmiUntag(input_reg);
- // Branch to deferred code if the input was tagged.
- // The deferred code will take care of restoring the tag.
- __ j(carry, deferred->entry());
- __ bind(deferred->exit());
- }
-}
-
-
-void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
- LOperand* input = instr->value();
- DCHECK(input->IsRegister());
- LOperand* temp = instr->temp();
- DCHECK(temp->IsRegister());
- LOperand* result = instr->result();
- DCHECK(result->IsDoubleRegister());
-
- Register input_reg = ToRegister(input);
- Register temp_reg = ToRegister(temp);
-
- HValue* value = instr->hydrogen()->value();
- NumberUntagDMode mode = value->representation().IsSmi()
- ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
-
- XMMRegister result_reg = ToDoubleRegister(result);
- EmitNumberUntagD(instr, input_reg, temp_reg, result_reg, mode);
-}
-
-
-void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
- LOperand* input = instr->value();
- DCHECK(input->IsDoubleRegister());
- LOperand* result = instr->result();
- DCHECK(result->IsRegister());
- Register result_reg = ToRegister(result);
-
- if (instr->truncating()) {
- XMMRegister input_reg = ToDoubleRegister(input);
- __ TruncateDoubleToI(result_reg, input_reg);
- } else {
- Label lost_precision, is_nan, minus_zero, done;
- XMMRegister input_reg = ToDoubleRegister(input);
- XMMRegister xmm_scratch = double_scratch0();
- Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
- __ DoubleToI(result_reg, input_reg, xmm_scratch,
- instr->hydrogen()->GetMinusZeroMode(), &lost_precision,
- &is_nan, &minus_zero, dist);
- __ jmp(&done, dist);
- __ bind(&lost_precision);
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision);
- __ bind(&is_nan);
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN);
- __ bind(&minus_zero);
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
- LOperand* input = instr->value();
- DCHECK(input->IsDoubleRegister());
- LOperand* result = instr->result();
- DCHECK(result->IsRegister());
- Register result_reg = ToRegister(result);
-
- Label lost_precision, is_nan, minus_zero, done;
- XMMRegister input_reg = ToDoubleRegister(input);
- XMMRegister xmm_scratch = double_scratch0();
- Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
- __ DoubleToI(result_reg, input_reg, xmm_scratch,
- instr->hydrogen()->GetMinusZeroMode(), &lost_precision, &is_nan,
- &minus_zero, dist);
- __ jmp(&done, dist);
- __ bind(&lost_precision);
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision);
- __ bind(&is_nan);
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN);
- __ bind(&minus_zero);
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
- __ bind(&done);
- __ SmiTag(result_reg);
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
-}
-
-
-void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
- LOperand* input = instr->value();
- __ test(ToOperand(input), Immediate(kSmiTagMask));
- DeoptimizeIf(not_zero, instr, DeoptimizeReason::kNotASmi);
-}
-
-
-void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- LOperand* input = instr->value();
- __ test(ToOperand(input), Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr, DeoptimizeReason::kSmi);
- }
-}
-
-
-void LCodeGen::DoCheckArrayBufferNotNeutered(
- LCheckArrayBufferNotNeutered* instr) {
- Register view = ToRegister(instr->view());
- Register scratch = ToRegister(instr->scratch());
-
- __ mov(scratch, FieldOperand(view, JSArrayBufferView::kBufferOffset));
- __ test_b(FieldOperand(scratch, JSArrayBuffer::kBitFieldOffset),
- Immediate(1 << JSArrayBuffer::WasNeutered::kShift));
- DeoptimizeIf(not_zero, instr, DeoptimizeReason::kOutOfBounds);
-}
-
-
-void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
- Register input = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
-
- if (instr->hydrogen()->is_interval_check()) {
- InstanceType first;
- InstanceType last;
- instr->hydrogen()->GetCheckInterval(&first, &last);
-
- __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(first));
-
- // If there is only one type in the interval check for equality.
- if (first == last) {
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongInstanceType);
- } else {
- DeoptimizeIf(below, instr, DeoptimizeReason::kWrongInstanceType);
- // Omit check for the last type.
- if (last != LAST_TYPE) {
- __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(last));
- DeoptimizeIf(above, instr, DeoptimizeReason::kWrongInstanceType);
- }
- }
- } else {
- uint8_t mask;
- uint8_t tag;
- instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
-
- if (base::bits::IsPowerOfTwo32(mask)) {
- DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
- __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(mask));
- DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
- DeoptimizeReason::kWrongInstanceType);
- } else {
- __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ and_(temp, mask);
- __ cmp(temp, tag);
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongInstanceType);
- }
- }
-}
-
-
-void LCodeGen::DoCheckValue(LCheckValue* instr) {
- Handle<HeapObject> object = instr->hydrogen()->object().handle();
- if (instr->hydrogen()->object_in_new_space()) {
- Register reg = ToRegister(instr->value());
- Handle<Cell> cell = isolate()->factory()->NewCell(object);
- __ cmp(reg, Operand::ForCell(cell));
- } else {
- Operand operand = ToOperand(instr->value());
- __ cmp(operand, object);
- }
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kValueMismatch);
-}
-
-
-void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
- Label deopt, done;
- // If the map is not deprecated the migration attempt does not make sense.
- __ push(object);
- __ mov(object, FieldOperand(object, HeapObject::kMapOffset));
- __ test(FieldOperand(object, Map::kBitField3Offset),
- Immediate(Map::Deprecated::kMask));
- __ pop(object);
- __ j(zero, &deopt);
-
- {
- PushSafepointRegistersScope scope(this);
- __ push(object);
- __ xor_(esi, esi);
- __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
-
- __ test(eax, Immediate(kSmiTagMask));
- }
- __ j(not_zero, &done);
-
- __ bind(&deopt);
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kInstanceMigrationFailed);
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- class DeferredCheckMaps final : public LDeferredCode {
- public:
- DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
- : LDeferredCode(codegen), instr_(instr), object_(object) {
- SetExit(check_maps());
- }
- void Generate() override {
- codegen()->DoDeferredInstanceMigration(instr_, object_);
- }
- Label* check_maps() { return &check_maps_; }
- LInstruction* instr() override { return instr_; }
-
- private:
- LCheckMaps* instr_;
- Label check_maps_;
- Register object_;
- };
-
- if (instr->hydrogen()->IsStabilityCheck()) {
- const UniqueSet<Map>* maps = instr->hydrogen()->maps();
- for (int i = 0; i < maps->size(); ++i) {
- AddStabilityDependency(maps->at(i).handle());
- }
- return;
- }
-
- LOperand* input = instr->value();
- DCHECK(input->IsRegister());
- Register reg = ToRegister(input);
-
- DeferredCheckMaps* deferred = NULL;
- if (instr->hydrogen()->HasMigrationTarget()) {
- deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
- __ bind(deferred->check_maps());
- }
-
- const UniqueSet<Map>* maps = instr->hydrogen()->maps();
- Label success;
- for (int i = 0; i < maps->size() - 1; i++) {
- Handle<Map> map = maps->at(i).handle();
- __ CompareMap(reg, map);
- __ j(equal, &success, Label::kNear);
- }
-
- Handle<Map> map = maps->at(maps->size() - 1).handle();
- __ CompareMap(reg, map);
- if (instr->hydrogen()->HasMigrationTarget()) {
- __ j(not_equal, deferred->entry());
- } else {
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongMap);
- }
-
- __ bind(&success);
-}
-
-
-void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
- XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
- XMMRegister xmm_scratch = double_scratch0();
- Register result_reg = ToRegister(instr->result());
- __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
-}
-
-
-void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
- DCHECK(instr->unclamped()->Equals(instr->result()));
- Register value_reg = ToRegister(instr->result());
- __ ClampUint8(value_reg);
-}
-
-
-void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
- DCHECK(instr->unclamped()->Equals(instr->result()));
- Register input_reg = ToRegister(instr->unclamped());
- XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
- XMMRegister xmm_scratch = double_scratch0();
- Label is_smi, done, heap_number;
-
- __ JumpIfSmi(input_reg, &is_smi);
-
- // Check for heap number
- __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- __ j(equal, &heap_number, Label::kNear);
-
- // Check for undefined. Undefined is converted to zero for clamping
- // conversions.
- __ cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
- __ mov(input_reg, 0);
- __ jmp(&done, Label::kNear);
-
- // Heap number
- __ bind(&heap_number);
- __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
- __ jmp(&done, Label::kNear);
-
- // smi
- __ bind(&is_smi);
- __ SmiUntag(input_reg);
- __ ClampUint8(input_reg);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoAllocate(LAllocate* instr) {
- class DeferredAllocate final : public LDeferredCode {
- public:
- DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override { codegen()->DoDeferredAllocate(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LAllocate* instr_;
- };
-
- DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr);
-
- Register result = ToRegister(instr->result());
- Register temp = ToRegister(instr->temp());
-
- // Allocate memory for the object.
- AllocationFlags flags = NO_ALLOCATION_FLAGS;
- if (instr->hydrogen()->MustAllocateDoubleAligned()) {
- flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
- }
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- flags = static_cast<AllocationFlags>(flags | PRETENURE);
- }
- if (instr->hydrogen()->IsAllocationFoldingDominator()) {
- flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
- }
- DCHECK(!instr->hydrogen()->IsAllocationFolded());
-
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= kMaxRegularHeapObjectSize);
- __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
- } else {
- Register size = ToRegister(instr->size());
- __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
- }
-
- __ bind(deferred->exit());
-
- if (instr->hydrogen()->MustPrefillWithFiller()) {
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ mov(temp, (size / kPointerSize) - 1);
- } else {
- temp = ToRegister(instr->size());
- __ shr(temp, kPointerSizeLog2);
- __ dec(temp);
- }
- Label loop;
- __ bind(&loop);
- __ mov(FieldOperand(result, temp, times_pointer_size, 0),
- isolate()->factory()->one_pointer_filler_map());
- __ dec(temp);
- __ j(not_zero, &loop);
- }
-}
-
-void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
- DCHECK(instr->hydrogen()->IsAllocationFolded());
- DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
- Register result = ToRegister(instr->result());
- Register temp = ToRegister(instr->temp());
-
- AllocationFlags flags = ALLOCATION_FOLDED;
- if (instr->hydrogen()->MustAllocateDoubleAligned()) {
- flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
- }
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- flags = static_cast<AllocationFlags>(flags | PRETENURE);
- }
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= kMaxRegularHeapObjectSize);
- __ FastAllocate(size, result, temp, flags);
- } else {
- Register size = ToRegister(instr->size());
- __ FastAllocate(size, result, temp, flags);
- }
-}
-
-void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Move(result, Immediate(Smi::kZero));
-
- PushSafepointRegistersScope scope(this);
- if (instr->size()->IsRegister()) {
- Register size = ToRegister(instr->size());
- DCHECK(!size.is(result));
- __ SmiTag(ToRegister(instr->size()));
- __ push(size);
- } else {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- if (size >= 0 && size <= Smi::kMaxValue) {
- __ push(Immediate(Smi::FromInt(size)));
- } else {
- // We should never get here at runtime => abort
- __ int3();
- return;
- }
- }
-
- int flags = AllocateDoubleAlignFlag::encode(
- instr->hydrogen()->MustAllocateDoubleAligned());
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- flags = AllocateTargetSpace::update(flags, OLD_SPACE);
- } else {
- flags = AllocateTargetSpace::update(flags, NEW_SPACE);
- }
- __ push(Immediate(Smi::FromInt(flags)));
-
- CallRuntimeFromDeferred(
- Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
- __ StoreToSafepointRegisterSlot(result, eax);
-
- if (instr->hydrogen()->IsAllocationFoldingDominator()) {
- AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
- }
- // If the allocation folding dominator allocate triggered a GC, allocation
- // happend in the runtime. We have to reset the top pointer to virtually
- // undo the allocation.
- ExternalReference allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
- __ sub(eax, Immediate(kHeapObjectTag));
- __ mov(Operand::StaticVariable(allocation_top), eax);
- __ add(eax, Immediate(kHeapObjectTag));
- }
-}
-
-
-void LCodeGen::DoTypeof(LTypeof* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->value()).is(ebx));
- Label end, do_call;
- Register value_register = ToRegister(instr->value());
- __ JumpIfNotSmi(value_register, &do_call);
- __ mov(eax, Immediate(isolate()->factory()->number_string()));
- __ jmp(&end);
- __ bind(&do_call);
- Callable callable = CodeFactory::Typeof(isolate());
- CallCode(callable.code(), RelocInfo::CODE_TARGET, instr);
- __ bind(&end);
-}
-
-
-void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Condition final_branch_condition = EmitTypeofIs(instr, input);
- if (final_branch_condition != no_condition) {
- EmitBranch(instr, final_branch_condition);
- }
-}
-
-
-Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
- Label* true_label = instr->TrueLabel(chunk_);
- Label* false_label = instr->FalseLabel(chunk_);
- Handle<String> type_name = instr->type_literal();
- int left_block = instr->TrueDestination(chunk_);
- int right_block = instr->FalseDestination(chunk_);
- int next_block = GetNextEmittedBlock();
-
- Label::Distance true_distance = left_block == next_block ? Label::kNear
- : Label::kFar;
- Label::Distance false_distance = right_block == next_block ? Label::kNear
- : Label::kFar;
- Condition final_branch_condition = no_condition;
- if (String::Equals(type_name, factory()->number_string())) {
- __ JumpIfSmi(input, true_label, true_distance);
- __ cmp(FieldOperand(input, HeapObject::kMapOffset),
- factory()->heap_number_map());
- final_branch_condition = equal;
-
- } else if (String::Equals(type_name, factory()->string_string())) {
- __ JumpIfSmi(input, false_label, false_distance);
- __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
- final_branch_condition = below;
-
- } else if (String::Equals(type_name, factory()->symbol_string())) {
- __ JumpIfSmi(input, false_label, false_distance);
- __ CmpObjectType(input, SYMBOL_TYPE, input);
- final_branch_condition = equal;
-
- } else if (String::Equals(type_name, factory()->boolean_string())) {
- __ cmp(input, factory()->true_value());
- __ j(equal, true_label, true_distance);
- __ cmp(input, factory()->false_value());
- final_branch_condition = equal;
-
- } else if (String::Equals(type_name, factory()->undefined_string())) {
- __ cmp(input, factory()->null_value());
- __ j(equal, false_label, false_distance);
- __ JumpIfSmi(input, false_label, false_distance);
- // Check for undetectable objects => true.
- __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
- __ test_b(FieldOperand(input, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- final_branch_condition = not_zero;
-
- } else if (String::Equals(type_name, factory()->function_string())) {
- __ JumpIfSmi(input, false_label, false_distance);
- // Check for callable and not undetectable objects => true.
- __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
- __ movzx_b(input, FieldOperand(input, Map::kBitFieldOffset));
- __ and_(input, (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
- __ cmp(input, 1 << Map::kIsCallable);
- final_branch_condition = equal;
-
- } else if (String::Equals(type_name, factory()->object_string())) {
- __ JumpIfSmi(input, false_label, false_distance);
- __ cmp(input, factory()->null_value());
- __ j(equal, true_label, true_distance);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CmpObjectType(input, FIRST_JS_RECEIVER_TYPE, input);
- __ j(below, false_label, false_distance);
- // Check for callable or undetectable objects => false.
- __ test_b(FieldOperand(input, Map::kBitFieldOffset),
- Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
- final_branch_condition = zero;
- } else {
- __ jmp(false_label, false_distance);
- }
- return final_branch_condition;
-}
-
-
-void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
- if (info()->ShouldEnsureSpaceForLazyDeopt()) {
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- if (current_pc < last_lazy_deopt_pc_ + space_needed) {
- int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- __ Nop(padding_size);
- }
- }
- last_lazy_deopt_pc_ = masm()->pc_offset();
-}
-
-
-void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- last_lazy_deopt_pc_ = masm()->pc_offset();
- DCHECK(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- Deoptimizer::BailoutType type = instr->hydrogen()->type();
- // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
- // needed return address), even though the implementation of LAZY and EAGER is
- // now identical. When LAZY is eventually completely folded into EAGER, remove
- // the special case below.
- if (info()->IsStub() && type == Deoptimizer::EAGER) {
- type = Deoptimizer::LAZY;
- }
- DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type);
-}
-
-
-void LCodeGen::DoDummy(LDummy* instr) {
- // Nothing to see here, move on!
-}
-
-
-void LCodeGen::DoDummyUse(LDummyUse* instr) {
- // Nothing to see here, move on!
-}
-
-
-void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
- PushSafepointRegistersScope scope(this);
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
- RecordSafepointWithLazyDeopt(
- instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- DCHECK(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck final : public LDeferredCode {
- public:
- DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LStackCheck* instr_;
- };
-
- DCHECK(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- // There is no LLazyBailout instruction for stack-checks. We have to
- // prepare for lazy deoptimization explicitly here.
- if (instr->hydrogen()->is_function_entry()) {
- // Perform stack overflow check.
- Label done;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &done, Label::kNear);
-
- DCHECK(instr->context()->IsRegister());
- DCHECK(ToRegister(instr->context()).is(esi));
- CallCode(isolate()->builtins()->StackCheck(),
- RelocInfo::CODE_TARGET,
- instr);
- __ bind(&done);
- } else {
- DCHECK(instr->hydrogen()->is_backwards_branch());
- // Perform stack overflow check if this goto needs it before jumping.
- DeferredStackCheck* deferred_stack_check =
- new(zone()) DeferredStackCheck(this, instr);
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(below, deferred_stack_check->entry());
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- __ bind(instr->done_label());
- deferred_stack_check->SetExit(instr->done_label());
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- // Don't record a deoptimization index for the safepoint here.
- // This will be done explicitly when emitting call and the safepoint in
- // the deferred code.
- }
-}
-
-
-void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
- // This is a pseudo-instruction that ensures that the environment here is
- // properly registered for deoptimization and records the assembler's PC
- // offset.
- LEnvironment* environment = instr->environment();
-
- // If the environment were already registered, we would have no way of
- // backpatching it with the spill slot operands.
- DCHECK(!environment->HasBeenRegistered());
- RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
-
- GenerateOsrPrologue();
-}
-
-
-void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
-
- Label use_cache, call_runtime;
- __ CheckEnumCache(&call_runtime);
-
- __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
- __ jmp(&use_cache, Label::kNear);
-
- // Get the set of properties to enumerate.
- __ bind(&call_runtime);
- __ push(eax);
- CallRuntime(Runtime::kForInEnumerate, instr);
- __ bind(&use_cache);
-}
-
-
-void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
- Register map = ToRegister(instr->map());
- Register result = ToRegister(instr->result());
- Label load_cache, done;
- __ EnumLength(result, map);
- __ cmp(result, Immediate(Smi::kZero));
- __ j(not_equal, &load_cache, Label::kNear);
- __ mov(result, isolate()->factory()->empty_fixed_array());
- __ jmp(&done, Label::kNear);
-
- __ bind(&load_cache);
- __ LoadInstanceDescriptors(map, result);
- __ mov(result, FieldOperand(result, DescriptorArray::kEnumCacheBridgeOffset));
- __ mov(result,
- FieldOperand(result, FixedArray::SizeFor(instr->idx())));
- __ bind(&done);
- __ test(result, result);
- DeoptimizeIf(equal, instr, DeoptimizeReason::kNoCache);
-}
-
-
-void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
- Register object = ToRegister(instr->value());
- __ cmp(ToRegister(instr->map()),
- FieldOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongMap);
-}
-
-
-void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
- Register object,
- Register index) {
- PushSafepointRegistersScope scope(this);
- __ push(object);
- __ push(index);
- __ xor_(esi, esi);
- __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(object, eax);
-}
-
-
-void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
- class DeferredLoadMutableDouble final : public LDeferredCode {
- public:
- DeferredLoadMutableDouble(LCodeGen* codegen,
- LLoadFieldByIndex* instr,
- Register object,
- Register index)
- : LDeferredCode(codegen),
- instr_(instr),
- object_(object),
- index_(index) {
- }
- void Generate() override {
- codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LLoadFieldByIndex* instr_;
- Register object_;
- Register index_;
- };
-
- Register object = ToRegister(instr->object());
- Register index = ToRegister(instr->index());
-
- DeferredLoadMutableDouble* deferred;
- deferred = new(zone()) DeferredLoadMutableDouble(
- this, instr, object, index);
-
- Label out_of_object, done;
- __ test(index, Immediate(Smi::FromInt(1)));
- __ j(not_zero, deferred->entry());
-
- __ sar(index, 1);
-
- __ cmp(index, Immediate(0));
- __ j(less, &out_of_object, Label::kNear);
- __ mov(object, FieldOperand(object,
- index,
- times_half_pointer_size,
- JSObject::kHeaderSize));
- __ jmp(&done, Label::kNear);
-
- __ bind(&out_of_object);
- __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset));
- __ neg(index);
- // Index is now equal to out of object property index plus 1.
- __ mov(object, FieldOperand(object,
- index,
- times_half_pointer_size,
- FixedArray::kHeaderSize - kPointerSize));
- __ bind(deferred->exit());
- __ bind(&done);
-}
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h
deleted file mode 100644
index 133b8b99a0..0000000000
--- a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h
+++ /dev/null
@@ -1,387 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_IA32_LITHIUM_CODEGEN_IA32_H_
-#define V8_CRANKSHAFT_IA32_LITHIUM_CODEGEN_IA32_H_
-
-#include "src/ast/scopes.h"
-#include "src/base/logging.h"
-#include "src/crankshaft/ia32/lithium-gap-resolver-ia32.h"
-#include "src/crankshaft/ia32/lithium-ia32.h"
-#include "src/crankshaft/lithium-codegen.h"
-#include "src/deoptimizer.h"
-#include "src/safepoint-table.h"
-#include "src/utils.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LDeferredCode;
-class LGapNode;
-class SafepointGenerator;
-
-class LCodeGen: public LCodeGenBase {
- public:
- LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : LCodeGenBase(chunk, assembler, info),
- jump_table_(4, info->zone()),
- scope_(info->scope()),
- deferred_(8, info->zone()),
- frame_is_built_(false),
- safepoints_(info->zone()),
- resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple) {
- PopulateDeoptimizationLiteralsWithInlinedFunctions();
- }
-
- int LookupDestination(int block_id) const {
- return chunk()->LookupDestination(block_id);
- }
-
- bool IsNextEmittedBlock(int block_id) const {
- return LookupDestination(block_id) == GetNextEmittedBlock();
- }
-
- bool NeedsEagerFrame() const {
- return HasAllocatedStackSlots() || info()->is_non_deferred_calling() ||
- !info()->IsStub() || info()->requires_frame();
- }
- bool NeedsDeferredFrame() const {
- return !NeedsEagerFrame() && info()->is_deferred_calling();
- }
-
- // Support for converting LOperands to assembler types.
- Operand ToOperand(LOperand* op) const;
- Register ToRegister(LOperand* op) const;
- XMMRegister ToDoubleRegister(LOperand* op) const;
-
- bool IsInteger32(LConstantOperand* op) const;
- bool IsSmi(LConstantOperand* op) const;
- Immediate ToImmediate(LOperand* op, const Representation& r) const {
- return Immediate(ToRepresentation(LConstantOperand::cast(op), r));
- }
- double ToDouble(LConstantOperand* op) const;
-
- Handle<Object> ToHandle(LConstantOperand* op) const;
-
- // The operand denoting the second word (the one with a higher address) of
- // a double stack slot.
- Operand HighOperand(LOperand* op);
-
- // Try to generate code for the entire chunk, but it may fail if the
- // chunk contains constructs we cannot handle. Returns true if the
- // code generation attempt succeeded.
- bool GenerateCode();
-
- // Finish the code by setting stack height, safepoint, and bailout
- // information on it.
- void FinishCode(Handle<Code> code);
-
- // Deferred code support.
- void DoDeferredNumberTagD(LNumberTagD* instr);
-
- enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
- void DoDeferredNumberTagIU(LInstruction* instr,
- LOperand* value,
- LOperand* temp,
- IntegerSignedness signedness);
-
- void DoDeferredTaggedToI(LTaggedToI* instr, Label* done);
- void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
- void DoDeferredStackCheck(LStackCheck* instr);
- void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
- void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
- void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredAllocate(LAllocate* instr);
- void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
- void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
- Register object,
- Register index);
-
- // Parallel move support.
- void DoParallelMove(LParallelMove* move);
- void DoGap(LGap* instr);
-
- // Emit frame translation commands for an environment.
- void WriteTranslation(LEnvironment* environment, Translation* translation);
-
- void EnsureRelocSpaceForDeoptimization();
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) void Do##type(L##type* node);
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- private:
- Scope* scope() const { return scope_; }
-
- XMMRegister double_scratch0() const { return xmm0; }
-
- void EmitClassOfTest(Label* if_true, Label* if_false,
- Handle<String> class_name, Register input,
- Register temporary, Register temporary2);
-
- bool HasAllocatedStackSlots() const {
- return chunk()->HasAllocatedStackSlots();
- }
- int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); }
- int GetTotalFrameSlotCount() const {
- return chunk()->GetTotalFrameSlotCount();
- }
-
- void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
-
- void SaveCallerDoubles();
- void RestoreCallerDoubles();
-
- // Code generation passes. Returns true if code generation should
- // continue.
- void GenerateBodyInstructionPre(LInstruction* instr) override;
- void GenerateBodyInstructionPost(LInstruction* instr) override;
- bool GeneratePrologue();
- bool GenerateDeferredCode();
- bool GenerateJumpTable();
- bool GenerateSafepointTable();
-
- // Generates the custom OSR entrypoint and sets the osr_pc_offset.
- void GenerateOsrPrologue();
-
- enum SafepointMode {
- RECORD_SIMPLE_SAFEPOINT,
- RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
- };
-
- void CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr);
-
- void CallCodeGeneric(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode);
-
- void CallRuntime(const Runtime::Function* fun,
- int argc,
- LInstruction* instr,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
-
- void CallRuntime(Runtime::FunctionId id,
- int argc,
- LInstruction* instr) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, argc, instr);
- }
-
- void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, function->nargs, instr);
- }
-
- void CallRuntimeFromDeferred(Runtime::FunctionId id,
- int argc,
- LInstruction* instr,
- LOperand* context);
-
- void LoadContextFromDeferred(LOperand* context);
-
- void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
- Register scratch2, Register scratch3);
-
- // Generate a direct call to a known function. Expects the function
- // to be in edi.
- void CallKnownFunction(Handle<JSFunction> function,
- int formal_parameter_count, int arity,
- bool is_tail_call, LInstruction* instr);
-
- void RecordSafepointWithLazyDeopt(LInstruction* instr,
- SafepointMode safepoint_mode);
-
- void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
- Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition cc, LInstruction* instr,
- DeoptimizeReason deopt_reason,
- Deoptimizer::BailoutType bailout_type);
- void DeoptimizeIf(Condition cc, LInstruction* instr,
- DeoptimizeReason deopt_reason);
-
- bool DeoptEveryNTimes() {
- return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
- }
-
- void AddToTranslation(LEnvironment* environment,
- Translation* translation,
- LOperand* op,
- bool is_tagged,
- bool is_uint32,
- int* object_index_pointer,
- int* dematerialized_index_pointer);
-
- Register ToRegister(int index) const;
- XMMRegister ToDoubleRegister(int index) const;
- int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
- int32_t ToInteger32(LConstantOperand* op) const;
- ExternalReference ToExternalReference(LConstantOperand* op) const;
-
- Operand BuildFastArrayOperand(LOperand* elements_pointer,
- LOperand* key,
- Representation key_representation,
- ElementsKind elements_kind,
- uint32_t base_offset);
-
- Operand BuildSeqStringOperand(Register string,
- LOperand* index,
- String::Encoding encoding);
-
- void EmitIntegerMathAbs(LMathAbs* instr);
-
- // Support for recording safepoint information.
- void RecordSafepoint(LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- Safepoint::DeoptMode mode);
- void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
- void RecordSafepoint(Safepoint::DeoptMode mode);
- void RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode mode);
-
- static Condition TokenToCondition(Token::Value op, bool is_unsigned);
- void EmitGoto(int block);
-
- // EmitBranch expects to be the last instruction of a block.
- template<class InstrType>
- void EmitBranch(InstrType instr, Condition cc);
- template <class InstrType>
- void EmitTrueBranch(InstrType instr, Condition cc);
- template <class InstrType>
- void EmitFalseBranch(InstrType instr, Condition cc);
- void EmitNumberUntagD(LNumberUntagD* instr, Register input, Register temp,
- XMMRegister result, NumberUntagDMode mode);
-
- // Emits optimized code for typeof x == "y". Modifies input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitTypeofIs(LTypeofIsAndBranch* instr, Register input);
-
- // Emits optimized code for %_IsString(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsString(Register input,
- Register temp1,
- Label* is_not_string,
- SmiCheck check_needed);
-
- // Emits optimized code to deep-copy the contents of statically known
- // object graphs (e.g. object literal boilerplate).
- void EmitDeepCopy(Handle<JSObject> object,
- Register result,
- Register source,
- int* offset,
- AllocationSiteMode mode);
-
- void EnsureSpaceForLazyDeopt(int space_needed) override;
- void DoLoadKeyedExternalArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedArray(LLoadKeyed* instr);
- void DoStoreKeyedExternalArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedArray(LStoreKeyed* instr);
-
- template <class T>
- void EmitVectorLoadICRegisters(T* instr);
-
- void EmitReturn(LReturn* instr);
-
- // Emits code for pushing either a tagged constant, a (non-double)
- // register, or a stack slot operand.
- void EmitPushTaggedOperand(LOperand* operand);
-
- friend class LGapResolver;
-
-#ifdef _MSC_VER
- // On windows, you may not access the stack more than one page below
- // the most recently mapped page. To make the allocated area randomly
- // accessible, we write an arbitrary value to each page in range
- // esp + offset - page_size .. esp in turn.
- void MakeSureStackPagesMapped(int offset);
-#endif
-
- ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
- Scope* const scope_;
- ZoneList<LDeferredCode*> deferred_;
- bool frame_is_built_;
-
- // Builder that keeps track of safepoints in the code. The table
- // itself is emitted at the end of the generated code.
- SafepointTableBuilder safepoints_;
-
- // Compiler from a set of parallel moves to a sequential list of moves.
- LGapResolver resolver_;
-
- Safepoint::Kind expected_safepoint_kind_;
-
- class PushSafepointRegistersScope final BASE_EMBEDDED {
- public:
- explicit PushSafepointRegistersScope(LCodeGen* codegen)
- : codegen_(codegen) {
- DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
- codegen_->masm_->PushSafepointRegisters();
- codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
- DCHECK(codegen_->info()->is_calling());
- }
-
- ~PushSafepointRegistersScope() {
- DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
- codegen_->masm_->PopSafepointRegisters();
- codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
- }
-
- private:
- LCodeGen* codegen_;
- };
-
- friend class LDeferredCode;
- friend class LEnvironment;
- friend class SafepointGenerator;
- DISALLOW_COPY_AND_ASSIGN(LCodeGen);
-};
-
-
-class LDeferredCode : public ZoneObject {
- public:
- explicit LDeferredCode(LCodeGen* codegen)
- : codegen_(codegen),
- external_exit_(NULL),
- instruction_index_(codegen->current_instruction_) {
- codegen->AddDeferredCode(this);
- }
-
- virtual ~LDeferredCode() {}
- virtual void Generate() = 0;
- virtual LInstruction* instr() = 0;
-
- void SetExit(Label* exit) { external_exit_ = exit; }
- Label* entry() { return &entry_; }
- Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
- Label* done() { return codegen_->NeedsDeferredFrame() ? &done_ : exit(); }
- int instruction_index() const { return instruction_index_; }
-
- protected:
- LCodeGen* codegen() const { return codegen_; }
- MacroAssembler* masm() const { return codegen_->masm(); }
-
- private:
- LCodeGen* codegen_;
- Label entry_;
- Label exit_;
- Label* external_exit_;
- Label done_;
- int instruction_index_;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_IA32_LITHIUM_CODEGEN_IA32_H_
diff --git a/deps/v8/src/crankshaft/ia32/lithium-gap-resolver-ia32.cc b/deps/v8/src/crankshaft/ia32/lithium-gap-resolver-ia32.cc
deleted file mode 100644
index be8251cffb..0000000000
--- a/deps/v8/src/crankshaft/ia32/lithium-gap-resolver-ia32.cc
+++ /dev/null
@@ -1,490 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_IA32
-
-#include "src/crankshaft/ia32/lithium-codegen-ia32.h"
-#include "src/crankshaft/ia32/lithium-gap-resolver-ia32.h"
-#include "src/register-configuration.h"
-
-namespace v8 {
-namespace internal {
-
-LGapResolver::LGapResolver(LCodeGen* owner)
- : cgen_(owner),
- moves_(32, owner->zone()),
- source_uses_(),
- destination_uses_(),
- spilled_register_(-1) {}
-
-
-void LGapResolver::Resolve(LParallelMove* parallel_move) {
- DCHECK(HasBeenReset());
- // Build up a worklist of moves.
- BuildInitialMoveList(parallel_move);
-
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands move = moves_[i];
- // Skip constants to perform them last. They don't block other moves
- // and skipping such moves with register destinations keeps those
- // registers free for the whole algorithm.
- if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
- PerformMove(i);
- }
- }
-
- // Perform the moves with constant sources.
- for (int i = 0; i < moves_.length(); ++i) {
- if (!moves_[i].IsEliminated()) {
- DCHECK(moves_[i].source()->IsConstantOperand());
- EmitMove(i);
- }
- }
-
- Finish();
- DCHECK(HasBeenReset());
-}
-
-
-void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
- // Perform a linear sweep of the moves to add them to the initial list of
- // moves to perform, ignoring any move that is redundant (the source is
- // the same as the destination, the destination is ignored and
- // unallocated, or the move was already eliminated).
- const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
- for (int i = 0; i < moves->length(); ++i) {
- LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) AddMove(move);
- }
- Verify();
-}
-
-
-void LGapResolver::PerformMove(int index) {
- // Each call to this function performs a move and deletes it from the move
- // graph. We first recursively perform any move blocking this one. We
- // mark a move as "pending" on entry to PerformMove in order to detect
- // cycles in the move graph. We use operand swaps to resolve cycles,
- // which means that a call to PerformMove could change any source operand
- // in the move graph.
-
- DCHECK(!moves_[index].IsPending());
- DCHECK(!moves_[index].IsRedundant());
-
- // Clear this move's destination to indicate a pending move. The actual
- // destination is saved on the side.
- DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated.
- LOperand* destination = moves_[index].destination();
- moves_[index].set_destination(NULL);
-
- // Perform a depth-first traversal of the move graph to resolve
- // dependencies. Any unperformed, unpending move with a source the same
- // as this one's destination blocks this one so recursively perform all
- // such moves.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(destination) && !other_move.IsPending()) {
- // Though PerformMove can change any source operand in the move graph,
- // this call cannot create a blocking move via a swap (this loop does
- // not miss any). Assume there is a non-blocking move with source A
- // and this move is blocked on source B and there is a swap of A and
- // B. Then A and B must be involved in the same cycle (or they would
- // not be swapped). Since this move's destination is B and there is
- // only a single incoming edge to an operand, this move must also be
- // involved in the same cycle. In that case, the blocking move will
- // be created but will be "pending" when we return from PerformMove.
- PerformMove(i);
- }
- }
-
- // We are about to resolve this move and don't need it marked as
- // pending, so restore its destination.
- moves_[index].set_destination(destination);
-
- // This move's source may have changed due to swaps to resolve cycles and
- // so it may now be the last move in the cycle. If so remove it.
- if (moves_[index].source()->Equals(destination)) {
- RemoveMove(index);
- return;
- }
-
- // The move may be blocked on a (at most one) pending move, in which case
- // we have a cycle. Search for such a blocking move and perform a swap to
- // resolve it.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(destination)) {
- DCHECK(other_move.IsPending());
- EmitSwap(index);
- return;
- }
- }
-
- // This move is not blocked.
- EmitMove(index);
-}
-
-
-void LGapResolver::AddMove(LMoveOperands move) {
- LOperand* source = move.source();
- if (source->IsRegister()) ++source_uses_[source->index()];
-
- LOperand* destination = move.destination();
- if (destination->IsRegister()) ++destination_uses_[destination->index()];
-
- moves_.Add(move, cgen_->zone());
-}
-
-
-void LGapResolver::RemoveMove(int index) {
- LOperand* source = moves_[index].source();
- if (source->IsRegister()) {
- --source_uses_[source->index()];
- DCHECK(source_uses_[source->index()] >= 0);
- }
-
- LOperand* destination = moves_[index].destination();
- if (destination->IsRegister()) {
- --destination_uses_[destination->index()];
- DCHECK(destination_uses_[destination->index()] >= 0);
- }
-
- moves_[index].Eliminate();
-}
-
-
-int LGapResolver::CountSourceUses(LOperand* operand) {
- int count = 0;
- for (int i = 0; i < moves_.length(); ++i) {
- if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) {
- ++count;
- }
- }
- return count;
-}
-
-
-Register LGapResolver::GetFreeRegisterNot(Register reg) {
- int skip_index = reg.is(no_reg) ? -1 : reg.code();
- const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
- for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
- int code = config->GetAllocatableGeneralCode(i);
- if (source_uses_[code] == 0 && destination_uses_[code] > 0 &&
- code != skip_index) {
- return Register::from_code(code);
- }
- }
- return no_reg;
-}
-
-
-bool LGapResolver::HasBeenReset() {
- if (!moves_.is_empty()) return false;
- if (spilled_register_ >= 0) return false;
- const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
- for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
- int code = config->GetAllocatableGeneralCode(i);
- if (source_uses_[code] != 0) return false;
- if (destination_uses_[code] != 0) return false;
- }
- return true;
-}
-
-
-void LGapResolver::Verify() {
-#ifdef ENABLE_SLOW_DCHECKS
- // No operand should be the destination for more than one move.
- for (int i = 0; i < moves_.length(); ++i) {
- LOperand* destination = moves_[i].destination();
- for (int j = i + 1; j < moves_.length(); ++j) {
- SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
- }
- }
-#endif
-}
-
-
-#define __ ACCESS_MASM(cgen_->masm())
-
-void LGapResolver::Finish() {
- if (spilled_register_ >= 0) {
- __ pop(Register::from_code(spilled_register_));
- spilled_register_ = -1;
- }
- moves_.Rewind(0);
-}
-
-
-void LGapResolver::EnsureRestored(LOperand* operand) {
- if (operand->IsRegister() && operand->index() == spilled_register_) {
- __ pop(Register::from_code(spilled_register_));
- spilled_register_ = -1;
- }
-}
-
-
-Register LGapResolver::EnsureTempRegister() {
- // 1. We may have already spilled to create a temp register.
- if (spilled_register_ >= 0) {
- return Register::from_code(spilled_register_);
- }
-
- // 2. We may have a free register that we can use without spilling.
- Register free = GetFreeRegisterNot(no_reg);
- if (!free.is(no_reg)) return free;
-
- // 3. Prefer to spill a register that is not used in any remaining move
- // because it will not need to be restored until the end.
- const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
- for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
- int code = config->GetAllocatableGeneralCode(i);
- if (source_uses_[code] == 0 && destination_uses_[code] == 0) {
- Register scratch = Register::from_code(code);
- __ push(scratch);
- spilled_register_ = code;
- return scratch;
- }
- }
-
- // 4. Use an arbitrary register. Register 0 is as arbitrary as any other.
- spilled_register_ = config->GetAllocatableGeneralCode(0);
- Register scratch = Register::from_code(spilled_register_);
- __ push(scratch);
- return scratch;
-}
-
-
-void LGapResolver::EmitMove(int index) {
- LOperand* source = moves_[index].source();
- LOperand* destination = moves_[index].destination();
- EnsureRestored(source);
- EnsureRestored(destination);
-
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister()) {
- DCHECK(destination->IsRegister() || destination->IsStackSlot());
- Register src = cgen_->ToRegister(source);
- Operand dst = cgen_->ToOperand(destination);
- __ mov(dst, src);
-
- } else if (source->IsStackSlot()) {
- DCHECK(destination->IsRegister() || destination->IsStackSlot());
- Operand src = cgen_->ToOperand(source);
- if (destination->IsRegister()) {
- Register dst = cgen_->ToRegister(destination);
- __ mov(dst, src);
- } else {
- // Spill on demand to use a temporary register for memory-to-memory
- // moves.
- Register tmp = EnsureTempRegister();
- Operand dst = cgen_->ToOperand(destination);
- __ mov(tmp, src);
- __ mov(dst, tmp);
- }
-
- } else if (source->IsConstantOperand()) {
- LConstantOperand* constant_source = LConstantOperand::cast(source);
- if (destination->IsRegister()) {
- Register dst = cgen_->ToRegister(destination);
- Representation r = cgen_->IsSmi(constant_source)
- ? Representation::Smi() : Representation::Integer32();
- if (cgen_->IsInteger32(constant_source)) {
- __ Move(dst, cgen_->ToImmediate(constant_source, r));
- } else {
- __ LoadObject(dst, cgen_->ToHandle(constant_source));
- }
- } else if (destination->IsDoubleRegister()) {
- double v = cgen_->ToDouble(constant_source);
- uint64_t int_val = bit_cast<uint64_t, double>(v);
- int32_t lower = static_cast<int32_t>(int_val);
- int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
- XMMRegister dst = cgen_->ToDoubleRegister(destination);
- if (int_val == 0) {
- __ xorps(dst, dst);
- } else {
- __ push(Immediate(upper));
- __ push(Immediate(lower));
- __ movsd(dst, Operand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
- }
- } else {
- DCHECK(destination->IsStackSlot());
- Operand dst = cgen_->ToOperand(destination);
- Representation r = cgen_->IsSmi(constant_source)
- ? Representation::Smi() : Representation::Integer32();
- if (cgen_->IsInteger32(constant_source)) {
- __ Move(dst, cgen_->ToImmediate(constant_source, r));
- } else {
- Register tmp = EnsureTempRegister();
- __ LoadObject(tmp, cgen_->ToHandle(constant_source));
- __ mov(dst, tmp);
- }
- }
-
- } else if (source->IsDoubleRegister()) {
- XMMRegister src = cgen_->ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
- XMMRegister dst = cgen_->ToDoubleRegister(destination);
- __ movaps(dst, src);
- } else {
- DCHECK(destination->IsDoubleStackSlot());
- Operand dst = cgen_->ToOperand(destination);
- __ movsd(dst, src);
- }
- } else if (source->IsDoubleStackSlot()) {
- DCHECK(destination->IsDoubleRegister() ||
- destination->IsDoubleStackSlot());
- Operand src = cgen_->ToOperand(source);
- if (destination->IsDoubleRegister()) {
- XMMRegister dst = cgen_->ToDoubleRegister(destination);
- __ movsd(dst, src);
- } else {
- // We rely on having xmm0 available as a fixed scratch register.
- Operand dst = cgen_->ToOperand(destination);
- __ movsd(xmm0, src);
- __ movsd(dst, xmm0);
- }
- } else {
- UNREACHABLE();
- }
-
- RemoveMove(index);
-}
-
-
-void LGapResolver::EmitSwap(int index) {
- LOperand* source = moves_[index].source();
- LOperand* destination = moves_[index].destination();
- EnsureRestored(source);
- EnsureRestored(destination);
-
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister() && destination->IsRegister()) {
- // Register-register.
- Register src = cgen_->ToRegister(source);
- Register dst = cgen_->ToRegister(destination);
- __ push(src);
- __ mov(src, dst);
- __ pop(dst);
-
- } else if ((source->IsRegister() && destination->IsStackSlot()) ||
- (source->IsStackSlot() && destination->IsRegister())) {
- // Register-memory. Use a free register as a temp if possible. Do not
- // spill on demand because the simple spill implementation cannot avoid
- // spilling src at this point.
- Register tmp = GetFreeRegisterNot(no_reg);
- Register reg =
- cgen_->ToRegister(source->IsRegister() ? source : destination);
- Operand mem =
- cgen_->ToOperand(source->IsRegister() ? destination : source);
- if (tmp.is(no_reg)) {
- __ xor_(reg, mem);
- __ xor_(mem, reg);
- __ xor_(reg, mem);
- } else {
- __ mov(tmp, mem);
- __ mov(mem, reg);
- __ mov(reg, tmp);
- }
-
- } else if (source->IsStackSlot() && destination->IsStackSlot()) {
- // Memory-memory. Spill on demand to use a temporary. If there is a
- // free register after that, use it as a second temporary.
- Register tmp0 = EnsureTempRegister();
- Register tmp1 = GetFreeRegisterNot(tmp0);
- Operand src = cgen_->ToOperand(source);
- Operand dst = cgen_->ToOperand(destination);
- if (tmp1.is(no_reg)) {
- // Only one temp register available to us.
- __ mov(tmp0, dst);
- __ xor_(tmp0, src);
- __ xor_(src, tmp0);
- __ xor_(tmp0, src);
- __ mov(dst, tmp0);
- } else {
- __ mov(tmp0, dst);
- __ mov(tmp1, src);
- __ mov(dst, tmp1);
- __ mov(src, tmp0);
- }
- } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
- // XMM register-register swap. We rely on having xmm0
- // available as a fixed scratch register.
- XMMRegister src = cgen_->ToDoubleRegister(source);
- XMMRegister dst = cgen_->ToDoubleRegister(destination);
- __ movaps(xmm0, src);
- __ movaps(src, dst);
- __ movaps(dst, xmm0);
- } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
- // XMM register-memory swap. We rely on having xmm0
- // available as a fixed scratch register.
- DCHECK(source->IsDoubleStackSlot() || destination->IsDoubleStackSlot());
- XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
- ? source
- : destination);
- Operand other =
- cgen_->ToOperand(source->IsDoubleRegister() ? destination : source);
- __ movsd(xmm0, other);
- __ movsd(other, reg);
- __ movaps(reg, xmm0);
- } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
- // Double-width memory-to-memory. Spill on demand to use a general
- // purpose temporary register and also rely on having xmm0 available as
- // a fixed scratch register.
- Register tmp = EnsureTempRegister();
- Operand src0 = cgen_->ToOperand(source);
- Operand src1 = cgen_->HighOperand(source);
- Operand dst0 = cgen_->ToOperand(destination);
- Operand dst1 = cgen_->HighOperand(destination);
- __ movsd(xmm0, dst0); // Save destination in xmm0.
- __ mov(tmp, src0); // Then use tmp to copy source to destination.
- __ mov(dst0, tmp);
- __ mov(tmp, src1);
- __ mov(dst1, tmp);
- __ movsd(src0, xmm0);
-
- } else {
- // No other combinations are possible.
- UNREACHABLE();
- }
-
- // The swap of source and destination has executed a move from source to
- // destination.
- RemoveMove(index);
-
- // Any unperformed (including pending) move with a source of either
- // this move's source or destination needs to have their source
- // changed to reflect the state of affairs after the swap.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(source)) {
- moves_[i].set_source(destination);
- } else if (other_move.Blocks(destination)) {
- moves_[i].set_source(source);
- }
- }
-
- // In addition to swapping the actual uses as sources, we need to update
- // the use counts.
- if (source->IsRegister() && destination->IsRegister()) {
- int temp = source_uses_[source->index()];
- source_uses_[source->index()] = source_uses_[destination->index()];
- source_uses_[destination->index()] = temp;
- } else if (source->IsRegister()) {
- // We don't have use counts for non-register operands like destination.
- // Compute those counts now.
- source_uses_[source->index()] = CountSourceUses(source);
- } else if (destination->IsRegister()) {
- source_uses_[destination->index()] = CountSourceUses(destination);
- }
-}
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/crankshaft/ia32/lithium-gap-resolver-ia32.h b/deps/v8/src/crankshaft/ia32/lithium-gap-resolver-ia32.h
deleted file mode 100644
index 687087feb3..0000000000
--- a/deps/v8/src/crankshaft/ia32/lithium-gap-resolver-ia32.h
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
-#define V8_CRANKSHAFT_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
-
-#include "src/crankshaft/lithium.h"
-
-namespace v8 {
-namespace internal {
-
-class LCodeGen;
-class LGapResolver;
-
-class LGapResolver final BASE_EMBEDDED {
- public:
- explicit LGapResolver(LCodeGen* owner);
-
- // Resolve a set of parallel moves, emitting assembler instructions.
- void Resolve(LParallelMove* parallel_move);
-
- private:
- // Build the initial list of moves.
- void BuildInitialMoveList(LParallelMove* parallel_move);
-
- // Perform the move at the moves_ index in question (possibly requiring
- // other moves to satisfy dependencies).
- void PerformMove(int index);
-
- // Emit any code necessary at the end of a gap move.
- void Finish();
-
- // Add or delete a move from the move graph without emitting any code.
- // Used to build up the graph and remove trivial moves.
- void AddMove(LMoveOperands move);
- void RemoveMove(int index);
-
- // Report the count of uses of operand as a source in a not-yet-performed
- // move. Used to rebuild use counts.
- int CountSourceUses(LOperand* operand);
-
- // Emit a move and remove it from the move graph.
- void EmitMove(int index);
-
- // Execute a move by emitting a swap of two operands. The move from
- // source to destination is removed from the move graph.
- void EmitSwap(int index);
-
- // Ensure that the given operand is not spilled.
- void EnsureRestored(LOperand* operand);
-
- // Return a register that can be used as a temp register, spilling
- // something if necessary.
- Register EnsureTempRegister();
-
- // Return a known free register different from the given one (which could
- // be no_reg---returning any free register), or no_reg if there is no such
- // register.
- Register GetFreeRegisterNot(Register reg);
-
- // Verify that the state is the initial one, ready to resolve a single
- // parallel move.
- bool HasBeenReset();
-
- // Verify the move list before performing moves.
- void Verify();
-
- LCodeGen* cgen_;
-
- // List of moves not yet resolved.
- ZoneList<LMoveOperands> moves_;
-
- // Source and destination use counts for the general purpose registers.
- int source_uses_[Register::kNumRegisters];
- int destination_uses_[DoubleRegister::kMaxNumRegisters];
-
- // If we had to spill on demand, the currently spilled register's
- // allocation index.
- int spilled_register_;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
diff --git a/deps/v8/src/crankshaft/ia32/lithium-ia32.cc b/deps/v8/src/crankshaft/ia32/lithium-ia32.cc
deleted file mode 100644
index 14d8e41ebe..0000000000
--- a/deps/v8/src/crankshaft/ia32/lithium-ia32.cc
+++ /dev/null
@@ -1,2467 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/ia32/lithium-ia32.h"
-
-#include <sstream>
-
-#if V8_TARGET_ARCH_IA32
-
-#include "src/crankshaft/hydrogen-osr.h"
-#include "src/crankshaft/ia32/lithium-codegen-ia32.h"
-#include "src/crankshaft/lithium-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define DEFINE_COMPILE(type) \
- void L##type::CompileToNative(LCodeGen* generator) { \
- generator->Do##type(this); \
- }
-LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
-#undef DEFINE_COMPILE
-
-
-#ifdef DEBUG
-void LInstruction::VerifyCall() {
- // Call instructions can use only fixed registers as temporaries and
- // outputs because all registers are blocked by the calling convention.
- // Inputs operands must use a fixed register or use-at-start policy or
- // a non-register policy.
- DCHECK(Output() == NULL ||
- LUnallocated::cast(Output())->HasFixedPolicy() ||
- !LUnallocated::cast(Output())->HasRegisterPolicy());
- for (UseIterator it(this); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- DCHECK(operand->HasFixedPolicy() ||
- operand->IsUsedAtStart());
- }
- for (TempIterator it(this); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
- }
-}
-#endif
-
-
-bool LInstruction::HasDoubleRegisterResult() {
- return HasResult() && result()->IsDoubleRegister();
-}
-
-
-bool LInstruction::HasDoubleRegisterInput() {
- for (int i = 0; i < InputCount(); i++) {
- LOperand* op = InputAt(i);
- if (op != NULL && op->IsDoubleRegister()) {
- return true;
- }
- }
- return false;
-}
-
-
-void LInstruction::PrintTo(StringStream* stream) {
- stream->Add("%s ", this->Mnemonic());
-
- PrintOutputOperandTo(stream);
-
- PrintDataTo(stream);
-
- if (HasEnvironment()) {
- stream->Add(" ");
- environment()->PrintTo(stream);
- }
-
- if (HasPointerMap()) {
- stream->Add(" ");
- pointer_map()->PrintTo(stream);
- }
-}
-
-
-void LInstruction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- for (int i = 0; i < InputCount(); i++) {
- if (i > 0) stream->Add(" ");
- if (InputAt(i) == NULL) {
- stream->Add("NULL");
- } else {
- InputAt(i)->PrintTo(stream);
- }
- }
-}
-
-
-void LInstruction::PrintOutputOperandTo(StringStream* stream) {
- if (HasResult()) result()->PrintTo(stream);
-}
-
-
-void LLabel::PrintDataTo(StringStream* stream) {
- LGap::PrintDataTo(stream);
- LLabel* rep = replacement();
- if (rep != NULL) {
- stream->Add(" Dead block replaced with B%d", rep->block_id());
- }
-}
-
-
-bool LGap::IsRedundant() const {
- for (int i = 0; i < 4; i++) {
- if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
- return false;
- }
- }
-
- return true;
-}
-
-
-void LGap::PrintDataTo(StringStream* stream) {
- for (int i = 0; i < 4; i++) {
- stream->Add("(");
- if (parallel_moves_[i] != NULL) {
- parallel_moves_[i]->PrintDataTo(stream);
- }
- stream->Add(") ");
- }
-}
-
-
-const char* LArithmeticD::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-d";
- case Token::SUB: return "sub-d";
- case Token::MUL: return "mul-d";
- case Token::DIV: return "div-d";
- case Token::MOD: return "mod-d";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-const char* LArithmeticT::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-t";
- case Token::SUB: return "sub-t";
- case Token::MUL: return "mul-t";
- case Token::MOD: return "mod-t";
- case Token::DIV: return "div-t";
- case Token::BIT_AND: return "bit-and-t";
- case Token::BIT_OR: return "bit-or-t";
- case Token::BIT_XOR: return "bit-xor-t";
- case Token::ROR: return "ror-t";
- case Token::SHL: return "sal-t";
- case Token::SAR: return "sar-t";
- case Token::SHR: return "shr-t";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-bool LGoto::HasInterestingComment(LCodeGen* gen) const {
- return !gen->IsNextEmittedBlock(block_id());
-}
-
-
-void LGoto::PrintDataTo(StringStream* stream) {
- stream->Add("B%d", block_id());
-}
-
-
-void LBranch::PrintDataTo(StringStream* stream) {
- stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
- value()->PrintTo(stream);
-}
-
-
-void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- left()->PrintTo(stream);
- stream->Add(" %s ", Token::String(op()));
- right()->PrintTo(stream);
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_string(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_smi(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_undetectable(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if string_compare(");
- left()->PrintTo(stream);
- right()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_instance_type(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if class_of_test(");
- value()->PrintTo(stream);
- stream->Add(", \"%o\") then B%d else B%d", *hydrogen()->class_name(),
- true_block_id(), false_block_id());
-}
-
-void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if typeof ");
- value()->PrintTo(stream);
- stream->Add(" == \"%s\" then B%d else B%d",
- hydrogen()->type_literal()->ToCString().get(),
- true_block_id(), false_block_id());
-}
-
-
-void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
- stream->Add(" = ");
- function()->PrintTo(stream);
- stream->Add(".code_entry = ");
- code_object()->PrintTo(stream);
-}
-
-
-void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
- stream->Add(" = ");
- base_object()->PrintTo(stream);
- stream->Add(" + ");
- offset()->PrintTo(stream);
-}
-
-
-void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
- for (int i = 0; i < InputCount(); i++) {
- InputAt(i)->PrintTo(stream);
- stream->Add(" ");
- }
- stream->Add("#%d / ", arity());
-}
-
-
-void LLoadContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add("[%d]", slot_index());
-}
-
-
-void LStoreContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add("[%d] <- ", slot_index());
- value()->PrintTo(stream);
-}
-
-
-void LInvokeFunction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- context()->PrintTo(stream);
- stream->Add(" ");
- function()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
-void LCallNewArray::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- context()->PrintTo(stream);
- stream->Add(" ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
- ElementsKind kind = hydrogen()->elements_kind();
- stream->Add(" (%s) ", ElementsKindToString(kind));
-}
-
-
-void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
- arguments()->PrintTo(stream);
-
- stream->Add(" length ");
- length()->PrintTo(stream);
-
- stream->Add(" index ");
- index()->PrintTo(stream);
-}
-
-
-int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
- // Skip a slot if for a double-width slot.
- if (kind == DOUBLE_REGISTERS) {
- current_frame_slots_++;
- current_frame_slots_ |= 1;
- num_double_slots_++;
- }
- return current_frame_slots_++;
-}
-
-
-LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
- int index = GetNextSpillIndex(kind);
- if (kind == DOUBLE_REGISTERS) {
- return LDoubleStackSlot::Create(index, zone());
- } else {
- DCHECK(kind == GENERAL_REGISTERS);
- return LStackSlot::Create(index, zone());
- }
-}
-
-
-void LStoreNamedField::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- std::ostringstream os;
- os << hydrogen()->access() << " <- ";
- stream->Add(os.str().c_str());
- value()->PrintTo(stream);
-}
-
-
-void LLoadKeyed::PrintDataTo(StringStream* stream) {
- elements()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d]", base_offset());
- } else {
- stream->Add("]");
- }
-}
-
-
-void LStoreKeyed::PrintDataTo(StringStream* stream) {
- elements()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d] <-", base_offset());
- } else {
- stream->Add("] <- ");
- }
-
- if (value() == NULL) {
- DCHECK(hydrogen()->IsConstantHoleStore() &&
- hydrogen()->value()->representation().IsDouble());
- stream->Add("<the hole(nan)>");
- } else {
- value()->PrintTo(stream);
- }
-}
-
-
-void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(" %p -> %p", *original_map(), *transitioned_map());
-}
-
-
-LPlatformChunk* LChunkBuilder::Build() {
- DCHECK(is_unused());
- chunk_ = new(zone()) LPlatformChunk(info(), graph());
- LPhase phase("L_Building chunk", chunk_);
- status_ = BUILDING;
-
- // If compiling for OSR, reserve space for the unoptimized frame,
- // which will be subsumed into this frame.
- if (graph()->has_osr()) {
- for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
- chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
- }
- }
-
- const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
- for (int i = 0; i < blocks->length(); i++) {
- HBasicBlock* next = NULL;
- if (i < blocks->length() - 1) next = blocks->at(i + 1);
- DoBasicBlock(blocks->at(i), next);
- if (is_aborted()) return NULL;
- }
- status_ = DONE;
- return chunk_;
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) {
- return new (zone())
- LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
-}
-
-
-LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
- return Use(value, ToUnallocated(fixed_register));
-}
-
-
-LOperand* LChunkBuilder::UseFixedDouble(HValue* value, XMMRegister reg) {
- return Use(value, ToUnallocated(reg));
-}
-
-
-LOperand* LChunkBuilder::UseRegister(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
- return Use(value,
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
- LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
-}
-
-
-LOperand* LChunkBuilder::UseAtStart(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
- LUnallocated::USED_AT_START));
-}
-
-
-static inline bool CanBeImmediateConstant(HValue* value) {
- return value->IsConstant() && HConstant::cast(value)->NotInNewSpace();
-}
-
-
-LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
- return CanBeImmediateConstant(value)
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value);
-}
-
-
-LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
- return CanBeImmediateConstant(value)
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseFixedOrConstant(HValue* value,
- Register fixed_register) {
- return CanBeImmediateConstant(value)
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseFixed(value, fixed_register);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
- return CanBeImmediateConstant(value)
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegister(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
- return CanBeImmediateConstant(value)
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegisterAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseConstant(HValue* value) {
- return chunk_->DefineConstantOperand(HConstant::cast(value));
-}
-
-
-LOperand* LChunkBuilder::UseAny(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
- if (value->EmitAtUses()) {
- HInstruction* instr = HInstruction::cast(value);
- VisitInstruction(instr);
- }
- operand->set_virtual_register(value->id());
- return operand;
-}
-
-
-LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
- LUnallocated* result) {
- result->set_virtual_register(current_instruction_->id());
- instr->set_result(result);
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::DefineAsRegister(
- LTemplateResultInstruction<1>* instr) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-LInstruction* LChunkBuilder::DefineAsSpilled(
- LTemplateResultInstruction<1>* instr,
- int index) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
-}
-
-
-LInstruction* LChunkBuilder::DefineSameAsFirst(
- LTemplateResultInstruction<1>* instr) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
-}
-
-
-LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr,
- Register reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-LInstruction* LChunkBuilder::DefineFixedDouble(
- LTemplateResultInstruction<1>* instr,
- XMMRegister reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
- HEnvironment* hydrogen_env = current_block_->last_environment();
- return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
-}
-
-
-LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize) {
- info()->MarkAsNonDeferredCalling();
-
-#ifdef DEBUG
- instr->VerifyCall();
-#endif
- instr->MarkAsCall();
- instr = AssignPointerMap(instr);
-
- // If instruction does not have side-effects lazy deoptimization
- // after the call will try to deoptimize to the point before the call.
- // Thus we still need to attach environment to this call even if
- // call sequence can not deoptimize eagerly.
- bool needs_environment =
- (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
- !hinstr->HasObservableSideEffects();
- if (needs_environment && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- // We can't really figure out if the environment is needed or not.
- instr->environment()->set_has_been_used();
- }
-
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
- DCHECK(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(zone()));
- return instr;
-}
-
-
-LUnallocated* LChunkBuilder::TempRegister() {
- LUnallocated* operand =
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
- int vreg = allocator_->GetVirtualRegister();
- if (!allocator_->AllocationOk()) {
- Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
- vreg = 0;
- }
- operand->set_virtual_register(vreg);
- return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(Register reg) {
- LUnallocated* operand = ToUnallocated(reg);
- DCHECK(operand->HasFixedPolicy());
- return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(XMMRegister reg) {
- LUnallocated* operand = ToUnallocated(reg);
- DCHECK(operand->HasFixedPolicy());
- return operand;
-}
-
-
-LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
- return new(zone()) LLabel(instr->block());
-}
-
-
-LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
- return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
- return AssignEnvironment(new(zone()) LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoShift(Token::Value op,
- HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->left());
-
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- int constant_value = 0;
- bool does_deopt = false;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
- // Left shifts can deoptimize if we shift by > 0 and the result cannot be
- // truncated to smi.
- if (instr->representation().IsSmi() && constant_value > 0) {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
- }
- } else {
- right = UseFixed(right_value, ecx);
- }
-
- // Shift operations can only deoptimize if we do a logical shift by 0 and
- // the result cannot be truncated to int32.
- if (op == Token::SHR && constant_value == 0) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
- }
-
- LInstruction* result =
- DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
- return does_deopt ? AssignEnvironment(result) : result;
- } else {
- return DoArithmeticT(op, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->left()->representation().IsDouble());
- DCHECK(instr->right()->representation().IsDouble());
- if (op == Token::MOD) {
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return MarkAsCall(DefineSameAsFirst(result), instr);
- } else {
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return CpuFeatures::IsSupported(AVX) ? DefineAsRegister(result)
- : DefineSameAsFirst(result);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HBinaryOperation* instr) {
- HValue* left = instr->left();
- HValue* right = instr->right();
- DCHECK(left->representation().IsTagged());
- DCHECK(right->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left_operand = UseFixed(left, edx);
- LOperand* right_operand = UseFixed(right, eax);
- LArithmeticT* result =
- new(zone()) LArithmeticT(op, context, left_operand, right_operand);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
- DCHECK(is_building());
- current_block_ = block;
- next_block_ = next_block;
- if (block->IsStartBlock()) {
- block->UpdateEnvironment(graph_->start_environment());
- argument_count_ = 0;
- } else if (block->predecessors()->length() == 1) {
- // We have a single predecessor => copy environment and outgoing
- // argument count from the predecessor.
- DCHECK(block->phis()->length() == 0);
- HBasicBlock* pred = block->predecessors()->at(0);
- HEnvironment* last_environment = pred->last_environment();
- DCHECK(last_environment != NULL);
- // Only copy the environment, if it is later used again.
- if (pred->end()->SecondSuccessor() == NULL) {
- DCHECK(pred->end()->FirstSuccessor() == block);
- } else {
- if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
- pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
- last_environment = last_environment->Copy();
- }
- }
- block->UpdateEnvironment(last_environment);
- DCHECK(pred->argument_count() >= 0);
- argument_count_ = pred->argument_count();
- } else {
- // We are at a state join => process phis.
- HBasicBlock* pred = block->predecessors()->at(0);
- // No need to copy the environment, it cannot be used later.
- HEnvironment* last_environment = pred->last_environment();
- for (int i = 0; i < block->phis()->length(); ++i) {
- HPhi* phi = block->phis()->at(i);
- if (phi->HasMergedIndex()) {
- last_environment->SetValueAt(phi->merged_index(), phi);
- }
- }
- for (int i = 0; i < block->deleted_phis()->length(); ++i) {
- if (block->deleted_phis()->at(i) < last_environment->length()) {
- last_environment->SetValueAt(block->deleted_phis()->at(i),
- graph_->GetConstantUndefined());
- }
- }
- block->UpdateEnvironment(last_environment);
- // Pick up the outgoing argument count of one of the predecessors.
- argument_count_ = pred->argument_count();
- }
- HInstruction* current = block->first();
- int start = chunk_->instructions()->length();
- while (current != NULL && !is_aborted()) {
- // Code for constants in registers is generated lazily.
- if (!current->EmitAtUses()) {
- VisitInstruction(current);
- }
- current = current->next();
- }
- int end = chunk_->instructions()->length() - 1;
- if (end >= start) {
- block->set_first_instruction_index(start);
- block->set_last_instruction_index(end);
- }
- block->set_argument_count(argument_count_);
- next_block_ = NULL;
- current_block_ = NULL;
-}
-
-
-void LChunkBuilder::VisitInstruction(HInstruction* current) {
- HInstruction* old_current = current_instruction_;
- current_instruction_ = current;
-
- LInstruction* instr = NULL;
- if (current->CanReplaceWithDummyUses()) {
- if (current->OperandCount() == 0) {
- instr = DefineAsRegister(new(zone()) LDummy());
- } else {
- DCHECK(!current->OperandAt(0)->IsControlInstruction());
- instr = DefineAsRegister(new(zone())
- LDummyUse(UseAny(current->OperandAt(0))));
- }
- for (int i = 1; i < current->OperandCount(); ++i) {
- if (current->OperandAt(i)->IsControlInstruction()) continue;
- LInstruction* dummy =
- new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
- dummy->set_hydrogen_value(current);
- chunk_->AddInstruction(dummy, current_block_);
- }
- } else {
- HBasicBlock* successor;
- if (current->IsControlInstruction() &&
- HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
- successor != NULL) {
- instr = new(zone()) LGoto(successor);
- } else {
- instr = current->CompileToLithium(this);
- }
- }
-
- argument_count_ += current->argument_delta();
- DCHECK(argument_count_ >= 0);
-
- if (instr != NULL) {
- AddInstruction(instr, current);
- }
-
- current_instruction_ = old_current;
-}
-
-
-void LChunkBuilder::AddInstruction(LInstruction* instr,
- HInstruction* hydrogen_val) {
- // Associate the hydrogen instruction first, since we may need it for
- // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
- instr->set_hydrogen_value(hydrogen_val);
-
-#if DEBUG
- // Make sure that the lithium instruction has either no fixed register
- // constraints in temps or the result OR no uses that are only used at
- // start. If this invariant doesn't hold, the register allocator can decide
- // to insert a split of a range immediately before the instruction due to an
- // already allocated register needing to be used for the instruction's fixed
- // register constraint. In this case, The register allocator won't see an
- // interference between the split child and the use-at-start (it would if
- // the it was just a plain use), so it is free to move the split child into
- // the same register that is used for the use-at-start.
- // See https://code.google.com/p/chromium/issues/detail?id=201590
- if (!(instr->ClobbersRegisters() &&
- instr->ClobbersDoubleRegisters(isolate()))) {
- int fixed = 0;
- int used_at_start = 0;
- for (UseIterator it(instr); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- if (operand->IsUsedAtStart()) ++used_at_start;
- }
- if (instr->Output() != NULL) {
- if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
- }
- for (TempIterator it(instr); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- if (operand->HasFixedPolicy()) ++fixed;
- }
- DCHECK(fixed == 0 || used_at_start == 0);
- }
-#endif
-
- if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
- instr = AssignPointerMap(instr);
- }
- if (FLAG_stress_environments && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
- chunk_->AddInstruction(instr, current_block_);
-
- CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
-}
-
-
-LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
- LInstruction* result = new (zone()) LPrologue();
- if (info_->scope()->NeedsContext()) {
- result = MarkAsCall(result, instr);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- return new(zone()) LGoto(instr->FirstSuccessor());
-}
-
-
-LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* value = instr->value();
- Representation r = value->representation();
- HType type = value->type();
- ToBooleanHints expected = instr->expected_input_types();
- if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
-
- bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
- type.IsJSArray() || type.IsHeapNumber() || type.IsString();
- LOperand* temp = !easy_case && (expected & ToBooleanHint::kNeedsMap)
- ? TempRegister()
- : NULL;
- LInstruction* branch = new(zone()) LBranch(UseRegister(value), temp);
- if (!easy_case && ((!(expected & ToBooleanHint::kSmallInteger) &&
- (expected & ToBooleanHint::kNeedsMap)) ||
- expected != ToBooleanHint::kAny)) {
- branch = AssignEnvironment(branch);
- }
- return branch;
-}
-
-
-LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
- return new(zone()) LDebugBreak();
-}
-
-
-LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LCmpMapAndBranch(value);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
- info()->MarkAsRequiresFrame();
- return DefineAsRegister(new(zone()) LArgumentsLength(Use(length->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
- info()->MarkAsRequiresFrame();
- return DefineAsRegister(new(zone()) LArgumentsElements);
-}
-
-
-LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
- HHasInPrototypeChainAndBranch* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* prototype = UseRegister(instr->prototype());
- LOperand* temp = TempRegister();
- LHasInPrototypeChainAndBranch* result =
- new (zone()) LHasInPrototypeChainAndBranch(object, prototype, temp);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
- LOperand* receiver = UseRegister(instr->receiver());
- LOperand* function = UseRegister(instr->function());
- LOperand* temp = TempRegister();
- LWrapReceiver* result =
- new(zone()) LWrapReceiver(receiver, function, temp);
- return AssignEnvironment(DefineSameAsFirst(result));
-}
-
-
-LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
- LOperand* function = UseFixed(instr->function(), edi);
- LOperand* receiver = UseFixed(instr->receiver(), eax);
- LOperand* length = UseFixed(instr->length(), ebx);
- LOperand* elements = UseFixed(instr->elements(), ecx);
- LApplyArguments* result = new(zone()) LApplyArguments(function,
- receiver,
- length,
- elements);
- return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
- int argc = instr->OperandCount();
- for (int i = 0; i < argc; ++i) {
- LOperand* argument = UseAny(instr->argument(i));
- AddInstruction(new(zone()) LPushArgument(argument), instr);
- }
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreCodeEntry(
- HStoreCodeEntry* store_code_entry) {
- LOperand* function = UseRegister(store_code_entry->function());
- LOperand* code_object = UseTempRegister(store_code_entry->code_object());
- return new(zone()) LStoreCodeEntry(function, code_object);
-}
-
-
-LInstruction* LChunkBuilder::DoInnerAllocatedObject(
- HInnerAllocatedObject* instr) {
- LOperand* base_object = UseRegisterAtStart(instr->base_object());
- LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
- return DefineAsRegister(
- new(zone()) LInnerAllocatedObject(base_object, offset));
-}
-
-
-LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
- return instr->HasNoUses()
- ? NULL
- : DefineAsRegister(new(zone()) LThisFunction);
-}
-
-
-LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- if (instr->HasNoUses()) return NULL;
-
- if (info()->IsStub()) {
- return DefineFixed(new(zone()) LContext, esi);
- }
-
- return DefineAsRegister(new(zone()) LContext);
-}
-
-
-LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallWithDescriptor(
- HCallWithDescriptor* instr) {
- CallInterfaceDescriptor descriptor = instr->descriptor();
- DCHECK_EQ(descriptor.GetParameterCount() +
- LCallWithDescriptor::kImplicitRegisterParameterCount,
- instr->OperandCount());
-
- LOperand* target = UseRegisterOrConstantAtStart(instr->target());
- ZoneList<LOperand*> ops(instr->OperandCount(), zone());
- // Target
- ops.Add(target, zone());
- // Context
- LOperand* op = UseFixed(instr->OperandAt(1), esi);
- ops.Add(op, zone());
- // Load register parameters.
- int i = 0;
- for (; i < descriptor.GetRegisterParameterCount(); i++) {
- op = UseFixed(instr->OperandAt(
- i + LCallWithDescriptor::kImplicitRegisterParameterCount),
- descriptor.GetRegisterParameter(i));
- ops.Add(op, zone());
- }
- // Push stack parameters.
- for (; i < descriptor.GetParameterCount(); i++) {
- op = UseAny(instr->OperandAt(
- i + LCallWithDescriptor::kImplicitRegisterParameterCount));
- AddInstruction(new (zone()) LPushArgument(op), instr);
- }
-
- LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
- descriptor, ops, zone());
- if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
- result->MarkAsSyntacticTailCall();
- }
- return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* function = UseFixed(instr->function(), edi);
- LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
- if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
- result->MarkAsSyntacticTailCall();
- }
- return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
- switch (instr->op()) {
- case kMathCos:
- return DoMathCos(instr);
- case kMathFloor:
- return DoMathFloor(instr);
- case kMathRound:
- return DoMathRound(instr);
- case kMathFround:
- return DoMathFround(instr);
- case kMathAbs:
- return DoMathAbs(instr);
- case kMathLog:
- return DoMathLog(instr);
- case kMathExp:
- return DoMathExp(instr);
- case kMathSqrt:
- return DoMathSqrt(instr);
- case kMathPowHalf:
- return DoMathPowHalf(instr);
- case kMathClz32:
- return DoMathClz32(instr);
- case kMathSin:
- return DoMathSin(instr);
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseRegisterAtStart(instr->value());
- if (instr->representation().IsInteger32()) {
- LMathFloorI* result = new (zone()) LMathFloorI(input);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- } else {
- DCHECK(instr->representation().IsDouble());
- LMathFloorD* result = new (zone()) LMathFloorD(input);
- return DefineAsRegister(result);
- }
-}
-
-LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseRegister(instr->value());
- if (instr->representation().IsInteger32()) {
- LOperand* temp = FixedTemp(xmm4);
- LMathRoundI* result = new (zone()) LMathRoundI(input, temp);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- } else {
- DCHECK(instr->representation().IsDouble());
- LMathRoundD* result = new (zone()) LMathRoundD(input);
- return DefineAsRegister(result);
- }
-}
-
-LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
- LOperand* input = UseRegister(instr->value());
- LMathFround* result = new (zone()) LMathFround(input);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
- LOperand* context = UseAny(instr->context()); // Deferred use.
- LOperand* input = UseRegisterAtStart(instr->value());
- LInstruction* result =
- DefineSameAsFirst(new(zone()) LMathAbs(context, input));
- Representation r = instr->value()->representation();
- if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
- if (!r.IsDouble()) result = AssignEnvironment(result);
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseRegisterAtStart(instr->value());
- return MarkAsCall(DefineSameAsFirst(new(zone()) LMathLog(input)), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- LMathClz32* result = new(zone()) LMathClz32(input);
- return DefineAsRegister(result);
-}
-
-LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseRegisterAtStart(instr->value());
- return MarkAsCall(DefineSameAsFirst(new (zone()) LMathCos(input)), instr);
-}
-
-LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseRegisterAtStart(instr->value());
- return MarkAsCall(DefineSameAsFirst(new (zone()) LMathSin(input)), instr);
-}
-
-LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseRegisterAtStart(instr->value());
- return MarkAsCall(DefineSameAsFirst(new (zone()) LMathExp(input)), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
- LOperand* input = UseAtStart(instr->value());
- return DefineAsRegister(new(zone()) LMathSqrt(input));
-}
-
-
-LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp);
- return DefineSameAsFirst(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* constructor = UseFixed(instr->constructor(), edi);
- LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoRor(HRor* instr) {
- return DoShift(Token::ROR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShr(HShr* instr) {
- return DoShift(Token::SHR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoSar(HSar* instr) {
- return DoShift(Token::SAR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShl(HShl* instr) {
- return DoShift(Token::SHL, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32));
-
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
- return DefineSameAsFirst(new(zone()) LBitI(left, right));
- } else {
- return DoArithmeticT(instr->op(), instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
- dividend, divisor));
- if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
- (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
- (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
- divisor != 1 && divisor != -1)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
- DCHECK(instr->representation().IsInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LOperand* temp1 = FixedTemp(eax);
- LOperand* temp2 = FixedTemp(edx);
- LInstruction* result = DefineFixed(new(zone()) LDivByConstI(
- dividend, divisor, temp1, temp2), edx);
- if (divisor == 0 ||
- (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
- !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseFixed(instr->left(), eax);
- LOperand* divisor = UseRegister(instr->right());
- LOperand* temp = FixedTemp(edx);
- LInstruction* result = DefineFixed(new(zone()) LDivI(
- dividend, divisor, temp), eax);
- if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- instr->CheckFlag(HValue::kCanOverflow) ||
- !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- if (instr->RightIsPowerOf2()) {
- return DoDivByPowerOf2I(instr);
- } else if (instr->right()->IsConstant()) {
- return DoDivByConstI(instr);
- } else {
- return DoDivI(instr);
- }
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else {
- return DoArithmeticT(Token::DIV, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
- LOperand* dividend = UseRegisterAtStart(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result = DefineSameAsFirst(new(zone()) LFlooringDivByPowerOf2I(
- dividend, divisor));
- if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
- (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
- DCHECK(instr->representation().IsInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LOperand* temp1 = FixedTemp(eax);
- LOperand* temp2 = FixedTemp(edx);
- LOperand* temp3 =
- ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
- (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
- NULL : TempRegister();
- LInstruction* result =
- DefineFixed(new(zone()) LFlooringDivByConstI(dividend,
- divisor,
- temp1,
- temp2,
- temp3),
- edx);
- if (divisor == 0 ||
- (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseFixed(instr->left(), eax);
- LOperand* divisor = UseRegister(instr->right());
- LOperand* temp = FixedTemp(edx);
- LInstruction* result = DefineFixed(new(zone()) LFlooringDivI(
- dividend, divisor, temp), eax);
- if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- if (instr->RightIsPowerOf2()) {
- return DoFlooringDivByPowerOf2I(instr);
- } else if (instr->right()->IsConstant()) {
- return DoFlooringDivByConstI(instr);
- } else {
- return DoFlooringDivI(instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegisterAtStart(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
- dividend, divisor));
- if (instr->CheckFlag(HValue::kLeftCanBeNegative) &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LOperand* temp1 = FixedTemp(eax);
- LOperand* temp2 = FixedTemp(edx);
- LInstruction* result = DefineFixed(new(zone()) LModByConstI(
- dividend, divisor, temp1, temp2), eax);
- if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoModI(HMod* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseFixed(instr->left(), eax);
- LOperand* divisor = UseRegister(instr->right());
- LOperand* temp = FixedTemp(edx);
- LInstruction* result = DefineFixed(new(zone()) LModI(
- dividend, divisor, temp), edx);
- if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- if (instr->RightIsPowerOf2()) {
- return DoModByPowerOf2I(instr);
- } else if (instr->right()->IsConstant()) {
- return DoModByConstI(instr);
- } else {
- return DoModI(instr);
- }
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::MOD, instr);
- } else {
- return DoArithmeticT(Token::MOD, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMul(HMul* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- HValue* h_right = instr->BetterRightOperand();
- LOperand* right = UseOrConstant(h_right);
- LOperand* temp = NULL;
- if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- temp = TempRegister();
- }
- LMulI* mul = new(zone()) LMulI(left, right, temp);
- int constant_value =
- h_right->IsConstant() ? HConstant::cast(h_right)->Integer32Value() : 0;
- // |needs_environment| must mirror the cases where LCodeGen::DoMulI calls
- // |DeoptimizeIf|.
- bool needs_environment =
- instr->CheckFlag(HValue::kCanOverflow) ||
- (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
- (!right->IsConstantOperand() || constant_value <= 0));
- if (needs_environment) {
- AssignEnvironment(mul);
- }
- return DefineSameAsFirst(mul);
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::MUL, instr);
- } else {
- return DoArithmeticT(Token::MUL, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoSub(HSub* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- LSubI* sub = new(zone()) LSubI(left, right);
- LInstruction* result = DefineSameAsFirst(sub);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::SUB, instr);
- } else {
- return DoArithmeticT(Token::SUB, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- // Check to see if it would be advantageous to use an lea instruction rather
- // than an add. This is the case when no overflow check is needed and there
- // are multiple uses of the add's inputs, so using a 3-register add will
- // preserve all input values for later uses.
- bool use_lea = LAddI::UseLea(instr);
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- HValue* right_candidate = instr->BetterRightOperand();
- LOperand* right = use_lea
- ? UseRegisterOrConstantAtStart(right_candidate)
- : UseOrConstantAtStart(right_candidate);
- LAddI* add = new(zone()) LAddI(left, right);
- bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
- LInstruction* result = use_lea
- ? DefineAsRegister(add)
- : DefineSameAsFirst(add);
- if (can_overflow) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::ADD, instr);
- } else if (instr->representation().IsExternal()) {
- DCHECK(instr->IsConsistentExternalRepresentation());
- DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
- bool use_lea = LAddI::UseLea(instr);
- LOperand* left = UseRegisterAtStart(instr->left());
- HValue* right_candidate = instr->right();
- LOperand* right = use_lea
- ? UseRegisterOrConstantAtStart(right_candidate)
- : UseOrConstantAtStart(right_candidate);
- LAddI* add = new(zone()) LAddI(left, right);
- LInstruction* result = use_lea
- ? DefineAsRegister(add)
- : DefineSameAsFirst(add);
- return result;
- } else {
- return DoArithmeticT(Token::ADD, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
- LOperand* left = NULL;
- LOperand* right = NULL;
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- left = UseRegisterAtStart(instr->BetterLeftOperand());
- right = UseOrConstantAtStart(instr->BetterRightOperand());
- } else {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->left()->representation().IsDouble());
- DCHECK(instr->right()->representation().IsDouble());
- left = UseRegisterAtStart(instr->left());
- right = UseRegisterAtStart(instr->right());
- }
- LMathMinMax* minmax = new(zone()) LMathMinMax(left, right);
- return DefineSameAsFirst(minmax);
-}
-
-
-LInstruction* LChunkBuilder::DoPower(HPower* instr) {
- DCHECK(instr->representation().IsDouble());
- // We call a C function for double power. It can't trigger a GC.
- // We need to use fixed result register for the call.
- Representation exponent_type = instr->right()->representation();
- DCHECK(instr->left()->representation().IsDouble());
- LOperand* left = UseFixedDouble(instr->left(), xmm2);
- LOperand* right =
- exponent_type.IsDouble()
- ? UseFixedDouble(instr->right(), xmm1)
- : UseFixed(instr->right(), MathPowTaggedDescriptor::exponent());
- LPower* result = new(zone()) LPower(left, right);
- return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
- CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
- DCHECK(instr->left()->representation().IsSmiOrTagged());
- DCHECK(instr->right()->representation().IsSmiOrTagged());
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left = UseFixed(instr->left(), edx);
- LOperand* right = UseFixed(instr->right(), eax);
- LCmpT* result = new(zone()) LCmpT(context, left, right);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
- HCompareNumericAndBranch* instr) {
- Representation r = instr->representation();
- if (r.IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(r));
- DCHECK(instr->right()->representation().Equals(r));
- LOperand* left = UseRegisterOrConstantAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- return new(zone()) LCompareNumericAndBranch(left, right);
- } else {
- DCHECK(r.IsDouble());
- DCHECK(instr->left()->representation().IsDouble());
- DCHECK(instr->right()->representation().IsDouble());
- LOperand* left;
- LOperand* right;
- if (CanBeImmediateConstant(instr->left()) &&
- CanBeImmediateConstant(instr->right())) {
- // The code generator requires either both inputs to be constant
- // operands, or neither.
- left = UseConstant(instr->left());
- right = UseConstant(instr->right());
- } else {
- left = UseRegisterAtStart(instr->left());
- right = UseRegisterAtStart(instr->right());
- }
- return new(zone()) LCompareNumericAndBranch(left, right);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
- HCompareObjectEqAndBranch* instr) {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- return new(zone()) LCmpObjectEqAndBranch(left, right);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
- HCompareHoleAndBranch* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LCmpHoleAndBranch(value);
-}
-
-
-LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* temp = TempRegister();
- return new(zone()) LIsStringAndBranch(UseRegister(instr->value()), temp);
-}
-
-
-LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- return new(zone()) LIsSmiAndBranch(Use(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
- HIsUndetectableAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- return new(zone()) LIsUndetectableAndBranch(
- UseRegisterAtStart(instr->value()), TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoStringCompareAndBranch(
- HStringCompareAndBranch* instr) {
- DCHECK(instr->left()->representation().IsTagged());
- DCHECK(instr->right()->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left = UseFixed(instr->left(), edx);
- LOperand* right = UseFixed(instr->right(), eax);
-
- LStringCompareAndBranch* result = new(zone())
- LStringCompareAndBranch(context, left, right);
-
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
- HHasInstanceTypeAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- return new(zone()) LHasInstanceTypeAndBranch(
- UseRegisterAtStart(instr->value()),
- TempRegister());
-}
-
-LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
- HClassOfTestAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- return new (zone()) LClassOfTestAndBranch(UseRegister(instr->value()),
- TempRegister(), TempRegister());
-}
-
-LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
- LOperand* string = UseRegisterAtStart(instr->string());
- LOperand* index = UseRegisterOrConstantAtStart(instr->index());
- return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index));
-}
-
-
-LOperand* LChunkBuilder::GetSeqStringSetCharOperand(HSeqStringSetChar* instr) {
- if (instr->encoding() == String::ONE_BYTE_ENCODING) {
- if (FLAG_debug_code) {
- return UseFixed(instr->value(), eax);
- } else {
- return UseFixedOrConstant(instr->value(), eax);
- }
- } else {
- if (FLAG_debug_code) {
- return UseRegisterAtStart(instr->value());
- } else {
- return UseRegisterOrConstantAtStart(instr->value());
- }
- }
-}
-
-
-LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
- LOperand* string = UseRegisterAtStart(instr->string());
- LOperand* index = FLAG_debug_code
- ? UseRegisterAtStart(instr->index())
- : UseRegisterOrConstantAtStart(instr->index());
- LOperand* value = GetSeqStringSetCharOperand(instr);
- LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), esi) : NULL;
- LInstruction* result = new(zone()) LSeqStringSetChar(context, string,
- index, value);
- if (FLAG_debug_code) {
- result = MarkAsCall(result, instr);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- if (!FLAG_debug_code && instr->skip_check()) return NULL;
- LOperand* index = UseRegisterOrConstantAtStart(instr->index());
- LOperand* length = !index->IsConstantOperand()
- ? UseOrConstantAtStart(instr->length())
- : UseAtStart(instr->length());
- LInstruction* result = new(zone()) LBoundsCheck(index, length);
- if (!FLAG_debug_code || !instr->skip_check()) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
- // The control instruction marking the end of a block that completed
- // abruptly (e.g., threw an exception). There is nothing specific to do.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
- // All HForceRepresentation instructions should be eliminated in the
- // representation change phase of Hydrogen.
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoChange(HChange* instr) {
- Representation from = instr->from();
- Representation to = instr->to();
- HValue* val = instr->value();
- if (from.IsSmi()) {
- if (to.IsTagged()) {
- LOperand* value = UseRegister(val);
- return DefineSameAsFirst(new(zone()) LDummyUse(value));
- }
- from = Representation::Tagged();
- }
- if (from.IsTagged()) {
- if (to.IsDouble()) {
- LOperand* value = UseRegister(val);
- LOperand* temp = TempRegister();
- LInstruction* result =
- DefineAsRegister(new(zone()) LNumberUntagD(value, temp));
- if (!val->representation().IsSmi()) result = AssignEnvironment(result);
- return result;
- } else if (to.IsSmi()) {
- LOperand* value = UseRegister(val);
- if (val->type().IsSmi()) {
- return DefineSameAsFirst(new(zone()) LDummyUse(value));
- }
- return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
- } else {
- DCHECK(to.IsInteger32());
- if (val->type().IsSmi() || val->representation().IsSmi()) {
- LOperand* value = UseRegister(val);
- return DefineSameAsFirst(new(zone()) LSmiUntag(value, false));
- } else {
- LOperand* value = UseRegister(val);
- bool truncating = instr->CanTruncateToInt32();
- LOperand* xmm_temp = !truncating ? FixedTemp(xmm1) : NULL;
- LInstruction* result =
- DefineSameAsFirst(new(zone()) LTaggedToI(value, xmm_temp));
- if (!val->representation().IsSmi()) result = AssignEnvironment(result);
- return result;
- }
- }
- } else if (from.IsDouble()) {
- if (to.IsTagged()) {
- info()->MarkAsDeferredCalling();
- LOperand* value = UseRegisterAtStart(val);
- LOperand* temp = FLAG_inline_new ? TempRegister() : NULL;
- LUnallocated* result_temp = TempRegister();
- LNumberTagD* result = new(zone()) LNumberTagD(value, temp);
- return AssignPointerMap(Define(result, result_temp));
- } else if (to.IsSmi()) {
- LOperand* value = UseRegister(val);
- return AssignEnvironment(
- DefineAsRegister(new(zone()) LDoubleToSmi(value)));
- } else {
- DCHECK(to.IsInteger32());
- bool truncating = instr->CanTruncateToInt32();
- bool needs_temp = !truncating;
- LOperand* value = needs_temp ? UseTempRegister(val) : UseRegister(val);
- LOperand* temp = needs_temp ? TempRegister() : NULL;
- LInstruction* result =
- DefineAsRegister(new(zone()) LDoubleToI(value, temp));
- if (!truncating) result = AssignEnvironment(result);
- return result;
- }
- } else if (from.IsInteger32()) {
- info()->MarkAsDeferredCalling();
- if (to.IsTagged()) {
- LOperand* value = UseRegister(val);
- if (!instr->CheckFlag(HValue::kCanOverflow)) {
- return DefineSameAsFirst(new(zone()) LSmiTag(value));
- } else if (val->CheckFlag(HInstruction::kUint32)) {
- LOperand* temp = TempRegister();
- LNumberTagU* result = new(zone()) LNumberTagU(value, temp);
- return AssignPointerMap(DefineSameAsFirst(result));
- } else {
- LOperand* temp = TempRegister();
- LNumberTagI* result = new(zone()) LNumberTagI(value, temp);
- return AssignPointerMap(DefineSameAsFirst(result));
- }
- } else if (to.IsSmi()) {
- LOperand* value = UseRegister(val);
- LInstruction* result = DefineSameAsFirst(new(zone()) LSmiTag(value));
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else {
- DCHECK(to.IsDouble());
- if (val->CheckFlag(HInstruction::kUint32)) {
- return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val)));
- } else {
- return DefineAsRegister(new(zone()) LInteger32ToDouble(Use(val)));
- }
- }
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
- LOperand* value = UseAtStart(instr->value());
- LInstruction* result = new(zone()) LCheckNonSmi(value);
- if (!instr->value()->type().IsHeapObject()) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckArrayBufferNotNeutered(
- HCheckArrayBufferNotNeutered* instr) {
- LOperand* view = UseRegisterAtStart(instr->value());
- LOperand* scratch = TempRegister();
- LCheckArrayBufferNotNeutered* result =
- new (zone()) LCheckArrayBufferNotNeutered(view, scratch);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- LCheckInstanceType* result = new(zone()) LCheckInstanceType(value, temp);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
- // If the object is in new space, we'll emit a global cell compare and so
- // want the value in a register. If the object gets promoted before we
- // emit code, we will still get the register but will do an immediate
- // compare instead of the cell compare. This is safe.
- LOperand* value = instr->object_in_new_space()
- ? UseRegisterAtStart(instr->value()) : UseAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckValue(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
- if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps;
- LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value));
- if (instr->HasMigrationTarget()) {
- info()->MarkAsDeferredCalling();
- result = AssignPointerMap(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
- HValue* value = instr->value();
- Representation input_rep = value->representation();
- if (input_rep.IsDouble()) {
- LOperand* reg = UseRegister(value);
- return DefineFixed(new(zone()) LClampDToUint8(reg), eax);
- } else if (input_rep.IsInteger32()) {
- LOperand* reg = UseFixed(value, eax);
- return DefineFixed(new(zone()) LClampIToUint8(reg), eax);
- } else {
- DCHECK(input_rep.IsSmiOrTagged());
- LOperand* reg = UseFixed(value, eax);
- // Register allocator doesn't (yet) support allocation of double
- // temps. Reserve xmm1 explicitly.
- LOperand* temp = FixedTemp(xmm1);
- LClampTToUint8* result = new(zone()) LClampTToUint8(reg, temp);
- return AssignEnvironment(DefineFixed(result, eax));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
- LOperand* context = info()->IsStub() ? UseFixed(instr->context(), esi) : NULL;
- LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
- return new(zone()) LReturn(
- UseFixed(instr->value(), eax), context, parameter_count);
-}
-
-
-LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
- Representation r = instr->representation();
- if (r.IsSmi()) {
- return DefineAsRegister(new(zone()) LConstantS);
- } else if (r.IsInteger32()) {
- return DefineAsRegister(new(zone()) LConstantI);
- } else if (r.IsDouble()) {
- uint64_t const bits = instr->DoubleValueAsBits();
- LOperand* temp = bits ? TempRegister() : nullptr;
- return DefineAsRegister(new(zone()) LConstantD(temp));
- } else if (r.IsExternal()) {
- return DefineAsRegister(new(zone()) LConstantE);
- } else if (r.IsTagged()) {
- return DefineAsRegister(new(zone()) LConstantT);
- } else {
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- LInstruction* result =
- DefineAsRegister(new(zone()) LLoadContextSlot(context));
- if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
- LOperand* value;
- LOperand* temp;
- LOperand* context = UseRegister(instr->context());
- if (instr->NeedsWriteBarrier()) {
- value = UseTempRegister(instr->value());
- temp = TempRegister();
- } else {
- value = UseRegister(instr->value());
- temp = NULL;
- }
- LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
- if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- LOperand* obj = (instr->access().IsExternalMemory() &&
- instr->access().offset() == 0)
- ? UseRegisterOrConstantAtStart(instr->object())
- : UseRegisterAtStart(instr->object());
- return DefineAsRegister(new(zone()) LLoadNamedField(obj));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
- HLoadFunctionPrototype* instr) {
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()),
- TempRegister())));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
- return DefineAsRegister(new(zone()) LLoadRoot);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
- DCHECK(instr->key()->representation().IsSmiOrInteger32());
- ElementsKind elements_kind = instr->elements_kind();
- bool clobbers_key = ExternalArrayOpRequiresTemp(
- instr->key()->representation(), elements_kind);
- LOperand* key = clobbers_key
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- LInstruction* result = NULL;
-
- if (!instr->is_fixed_typed_array()) {
- LOperand* obj = UseRegisterAtStart(instr->elements());
- result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr));
- } else {
- DCHECK(
- (instr->representation().IsInteger32() &&
- !(IsDoubleOrFloatElementsKind(instr->elements_kind()))) ||
- (instr->representation().IsDouble() &&
- (IsDoubleOrFloatElementsKind(instr->elements_kind()))));
- LOperand* backing_store = UseRegister(instr->elements());
- LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
- result = DefineAsRegister(
- new (zone()) LLoadKeyed(backing_store, key, backing_store_owner));
- }
-
- bool needs_environment;
- if (instr->is_fixed_typed_array()) {
- // see LCodeGen::DoLoadKeyedExternalArray
- needs_environment = elements_kind == UINT32_ELEMENTS &&
- !instr->CheckFlag(HInstruction::kUint32);
- } else {
- // see LCodeGen::DoLoadKeyedFixedDoubleArray and
- // LCodeGen::DoLoadKeyedFixedArray
- needs_environment =
- instr->RequiresHoleCheck() ||
- (instr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED && info()->IsStub());
- }
-
- if (needs_environment) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LOperand* LChunkBuilder::GetStoreKeyedValueOperand(HStoreKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
-
- // Determine if we need a byte register in this case for the value.
- bool val_is_fixed_register =
- elements_kind == UINT8_ELEMENTS ||
- elements_kind == INT8_ELEMENTS ||
- elements_kind == UINT8_CLAMPED_ELEMENTS;
- if (val_is_fixed_register) {
- return UseFixed(instr->value(), eax);
- }
-
- return UseRegister(instr->value());
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- if (!instr->is_fixed_typed_array()) {
- DCHECK(instr->elements()->representation().IsTagged());
- DCHECK(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsSmi());
-
- if (instr->value()->representation().IsDouble()) {
- LOperand* object = UseRegisterAtStart(instr->elements());
- LOperand* val = NULL;
- val = UseRegisterAtStart(instr->value());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- return new (zone()) LStoreKeyed(object, key, val, nullptr);
- } else {
- DCHECK(instr->value()->representation().IsSmiOrTagged());
- bool needs_write_barrier = instr->NeedsWriteBarrier();
-
- LOperand* obj = UseRegister(instr->elements());
- LOperand* val;
- LOperand* key;
- if (needs_write_barrier) {
- val = UseTempRegister(instr->value());
- key = UseTempRegister(instr->key());
- } else {
- val = UseRegisterOrConstantAtStart(instr->value());
- key = UseRegisterOrConstantAtStart(instr->key());
- }
- return new (zone()) LStoreKeyed(obj, key, val, nullptr);
- }
- }
-
- ElementsKind elements_kind = instr->elements_kind();
- DCHECK(
- (instr->value()->representation().IsInteger32() &&
- !IsDoubleOrFloatElementsKind(elements_kind)) ||
- (instr->value()->representation().IsDouble() &&
- IsDoubleOrFloatElementsKind(elements_kind)));
- DCHECK(instr->elements()->representation().IsExternal());
-
- LOperand* backing_store = UseRegister(instr->elements());
- LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
- LOperand* val = GetStoreKeyedValueOperand(instr);
- bool clobbers_key = ExternalArrayOpRequiresTemp(
- instr->key()->representation(), elements_kind);
- LOperand* key = clobbers_key
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner);
-}
-
-
-LInstruction* LChunkBuilder::DoTransitionElementsKind(
- HTransitionElementsKind* instr) {
- if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
- LOperand* object = UseRegister(instr->object());
- LOperand* new_map_reg = TempRegister();
- LOperand* temp_reg = TempRegister();
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, NULL,
- new_map_reg, temp_reg);
- return result;
- } else {
- LOperand* object = UseFixed(instr->object(), eax);
- LOperand* context = UseFixed(instr->context(), esi);
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, context, NULL, NULL);
- return MarkAsCall(result, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoTrapAllocationMemento(
- HTrapAllocationMemento* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* temp = TempRegister();
- LTrapAllocationMemento* result =
- new(zone()) LTrapAllocationMemento(object, temp);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) {
- info()->MarkAsDeferredCalling();
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object = Use(instr->object());
- LOperand* elements = Use(instr->elements());
- LOperand* key = UseRegisterOrConstant(instr->key());
- LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity());
-
- LMaybeGrowElements* result = new (zone())
- LMaybeGrowElements(context, object, elements, key, current_capacity);
- DefineFixed(result, eax);
- return AssignPointerMap(AssignEnvironment(result));
-}
-
-
-LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
- bool is_in_object = instr->access().IsInobject();
- bool is_external_location = instr->access().IsExternalMemory() &&
- instr->access().offset() == 0;
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- bool needs_write_barrier_for_map = instr->has_transition() &&
- instr->NeedsWriteBarrierForMap();
-
- LOperand* obj;
- if (needs_write_barrier) {
- obj = is_in_object
- ? UseRegister(instr->object())
- : UseTempRegister(instr->object());
- } else if (is_external_location) {
- DCHECK(!is_in_object);
- DCHECK(!needs_write_barrier);
- DCHECK(!needs_write_barrier_for_map);
- obj = UseRegisterOrConstant(instr->object());
- } else {
- obj = needs_write_barrier_for_map
- ? UseRegister(instr->object())
- : UseRegisterAtStart(instr->object());
- }
-
- bool can_be_constant = instr->value()->IsConstant() &&
- HConstant::cast(instr->value())->NotInNewSpace() &&
- !instr->field_representation().IsDouble();
-
- LOperand* val;
- if (instr->field_representation().IsInteger8() ||
- instr->field_representation().IsUInteger8()) {
- // mov_b requires a byte register (i.e. any of eax, ebx, ecx, edx).
- // Just force the value to be in eax and we're safe here.
- val = UseFixed(instr->value(), eax);
- } else if (needs_write_barrier) {
- val = UseTempRegister(instr->value());
- } else if (can_be_constant) {
- val = UseRegisterOrConstant(instr->value());
- } else if (instr->field_representation().IsDouble()) {
- val = UseRegisterAtStart(instr->value());
- } else {
- val = UseRegister(instr->value());
- }
-
- // We only need a scratch register if we have a write barrier or we
- // have a store into the properties array (not in-object-property).
- LOperand* temp = (!is_in_object || needs_write_barrier ||
- needs_write_barrier_for_map) ? TempRegister() : NULL;
-
- // We need a temporary register for write barrier of the map field.
- LOperand* temp_map = needs_write_barrier_for_map ? TempRegister() : NULL;
-
- return new(zone()) LStoreNamedField(obj, val, temp, temp_map);
-}
-
-
-LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left = UseFixed(instr->left(), edx);
- LOperand* right = UseFixed(instr->right(), eax);
- LStringAdd* string_add = new(zone()) LStringAdd(context, left, right);
- return MarkAsCall(DefineFixed(string_add, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
- LOperand* string = UseTempRegister(instr->string());
- LOperand* index = UseTempRegister(instr->index());
- LOperand* context = UseAny(instr->context());
- LStringCharCodeAt* result =
- new(zone()) LStringCharCodeAt(context, string, index);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
- LOperand* char_code = UseRegister(instr->value());
- LOperand* context = UseAny(instr->context());
- LStringCharFromCode* result =
- new(zone()) LStringCharFromCode(context, char_code);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
- LOperand* size = instr->size()->IsConstant() ? UseConstant(instr->size())
- : UseRegister(instr->size());
- if (instr->IsAllocationFolded()) {
- LOperand* temp = TempRegister();
- LFastAllocate* result = new (zone()) LFastAllocate(size, temp);
- return DefineAsRegister(result);
- } else {
- info()->MarkAsDeferredCalling();
- LOperand* context = UseAny(instr->context());
- LOperand* temp = TempRegister();
- LAllocate* result = new (zone()) LAllocate(context, size, temp);
- return AssignPointerMap(DefineAsRegister(result));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
- DCHECK(argument_count_ == 0);
- allocator_->MarkAsOsrEntry();
- current_block_->last_environment()->set_ast_id(instr->ast_id());
- return AssignEnvironment(new(zone()) LOsrEntry);
-}
-
-
-LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- LParameter* result = new(zone()) LParameter;
- if (instr->kind() == HParameter::STACK_PARAMETER) {
- int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(result, spill_index);
- } else {
- DCHECK(info()->IsStub());
- CallInterfaceDescriptor descriptor = graph()->descriptor();
- int index = static_cast<int>(instr->index());
- Register reg = descriptor.GetRegisterParameter(index);
- return DefineFixed(result, reg);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- // Use an index that corresponds to the location in the unoptimized frame,
- // which the optimized frame will subsume.
- int env_index = instr->index();
- int spill_index = 0;
- if (instr->environment()->is_parameter_index(env_index)) {
- spill_index = chunk()->GetParameterStackSlot(env_index);
- } else {
- spill_index = env_index - instr->environment()->first_local_index();
- if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
- Retry(kNotEnoughSpillSlotsForOsr);
- spill_index = 0;
- }
- spill_index += StandardFrameConstants::kFixedSlotCount;
- }
- return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
- // There are no real uses of the arguments object.
- // arguments.length and element access are supported directly on
- // stack arguments, and any real arguments object use causes a bailout.
- // So this value is never used.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
- instr->ReplayEnvironment(current_block_->last_environment());
-
- // There are no real uses of a captured object.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
- info()->MarkAsRequiresFrame();
- LOperand* args = UseRegister(instr->arguments());
- LOperand* length;
- LOperand* index;
- if (instr->length()->IsConstant() && instr->index()->IsConstant()) {
- length = UseRegisterOrConstant(instr->length());
- index = UseOrConstant(instr->index());
- } else {
- length = UseTempRegister(instr->length());
- index = Use(instr->index());
- }
- return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
-}
-
-
-LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* value = UseFixed(instr->value(), ebx);
- LTypeof* result = new(zone()) LTypeof(context, value);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
- return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
- instr->ReplayEnvironment(current_block_->last_environment());
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
- info()->MarkAsDeferredCalling();
- if (instr->is_function_entry()) {
- LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(new(zone()) LStackCheck(context), instr);
- } else {
- DCHECK(instr->is_backwards_branch());
- LOperand* context = UseAny(instr->context());
- return AssignEnvironment(
- AssignPointerMap(new(zone()) LStackCheck(context)));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
- HEnvironment* outer = current_block_->last_environment();
- outer->set_ast_id(instr->ReturnId());
- HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner = outer->CopyForInlining(
- instr->closure(), instr->arguments_count(), instr->function(), undefined,
- instr->inlining_kind(), instr->syntactic_tail_call_mode());
- // Only replay binding of arguments object if it wasn't removed from graph.
- if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
- inner->Bind(instr->arguments_var(), instr->arguments_object());
- }
- inner->BindContext(instr->closure_context());
- inner->set_entry(instr);
- current_block_->UpdateEnvironment(inner);
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
- LInstruction* pop = NULL;
-
- HEnvironment* env = current_block_->last_environment();
-
- if (env->entry()->arguments_pushed()) {
- int argument_count = env->arguments_environment()->parameter_count();
- pop = new(zone()) LDrop(argument_count);
- DCHECK(instr->argument_delta() == -argument_count);
- }
-
- HEnvironment* outer = current_block_->last_environment()->
- DiscardInlined(false);
- current_block_->UpdateEnvironment(outer);
- return pop;
-}
-
-
-LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object = UseFixed(instr->enumerable(), eax);
- LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
- return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
- LOperand* map = UseRegister(instr->map());
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LForInCacheArray(map)));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* map = UseRegisterAtStart(instr->map());
- return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* index = UseTempRegister(instr->index());
- LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
- LInstruction* result = DefineSameAsFirst(load);
- return AssignPointerMap(result);
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/crankshaft/ia32/lithium-ia32.h b/deps/v8/src/crankshaft/ia32/lithium-ia32.h
deleted file mode 100644
index ce30e1d0cc..0000000000
--- a/deps/v8/src/crankshaft/ia32/lithium-ia32.h
+++ /dev/null
@@ -1,2514 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_IA32_LITHIUM_IA32_H_
-#define V8_CRANKSHAFT_IA32_LITHIUM_IA32_H_
-
-#include "src/crankshaft/hydrogen.h"
-#include "src/crankshaft/lithium.h"
-#include "src/crankshaft/lithium-allocator.h"
-#include "src/safepoint-table.h"
-#include "src/utils.h"
-
-namespace v8 {
-namespace internal {
-
-namespace compiler {
-class RCodeVisualizer;
-}
-
-// Forward declarations.
-class LCodeGen;
-
-#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
- V(AccessArgumentsAt) \
- V(AddI) \
- V(Allocate) \
- V(ApplyArguments) \
- V(ArgumentsElements) \
- V(ArgumentsLength) \
- V(ArithmeticD) \
- V(ArithmeticT) \
- V(BitI) \
- V(BoundsCheck) \
- V(Branch) \
- V(CallWithDescriptor) \
- V(CallNewArray) \
- V(CallRuntime) \
- V(CheckArrayBufferNotNeutered) \
- V(CheckInstanceType) \
- V(CheckMaps) \
- V(CheckMapValue) \
- V(CheckNonSmi) \
- V(CheckSmi) \
- V(CheckValue) \
- V(ClampDToUint8) \
- V(ClampIToUint8) \
- V(ClampTToUint8) \
- V(ClassOfTestAndBranch) \
- V(CompareNumericAndBranch) \
- V(CmpObjectEqAndBranch) \
- V(CmpHoleAndBranch) \
- V(CmpMapAndBranch) \
- V(CmpT) \
- V(ConstantD) \
- V(ConstantE) \
- V(ConstantI) \
- V(ConstantS) \
- V(ConstantT) \
- V(Context) \
- V(DebugBreak) \
- V(DeclareGlobals) \
- V(Deoptimize) \
- V(DivByConstI) \
- V(DivByPowerOf2I) \
- V(DivI) \
- V(DoubleToI) \
- V(DoubleToSmi) \
- V(Drop) \
- V(Dummy) \
- V(DummyUse) \
- V(FastAllocate) \
- V(FlooringDivByConstI) \
- V(FlooringDivByPowerOf2I) \
- V(FlooringDivI) \
- V(ForInCacheArray) \
- V(ForInPrepareMap) \
- V(Goto) \
- V(HasInPrototypeChainAndBranch) \
- V(HasInstanceTypeAndBranch) \
- V(InnerAllocatedObject) \
- V(InstructionGap) \
- V(Integer32ToDouble) \
- V(InvokeFunction) \
- V(IsStringAndBranch) \
- V(IsSmiAndBranch) \
- V(IsUndetectableAndBranch) \
- V(Label) \
- V(LazyBailout) \
- V(LoadContextSlot) \
- V(LoadFieldByIndex) \
- V(LoadFunctionPrototype) \
- V(LoadKeyed) \
- V(LoadNamedField) \
- V(LoadRoot) \
- V(MathAbs) \
- V(MathClz32) \
- V(MathCos) \
- V(MathExp) \
- V(MathFloorD) \
- V(MathFloorI) \
- V(MathFround) \
- V(MathLog) \
- V(MathMinMax) \
- V(MathPowHalf) \
- V(MathRoundD) \
- V(MathRoundI) \
- V(MathSin) \
- V(MathSqrt) \
- V(MaybeGrowElements) \
- V(ModByConstI) \
- V(ModByPowerOf2I) \
- V(ModI) \
- V(MulI) \
- V(NumberTagD) \
- V(NumberTagI) \
- V(NumberTagU) \
- V(NumberUntagD) \
- V(OsrEntry) \
- V(Parameter) \
- V(Power) \
- V(Prologue) \
- V(PushArgument) \
- V(Return) \
- V(SeqStringGetChar) \
- V(SeqStringSetChar) \
- V(ShiftI) \
- V(SmiTag) \
- V(SmiUntag) \
- V(StackCheck) \
- V(StoreCodeEntry) \
- V(StoreContextSlot) \
- V(StoreKeyed) \
- V(StoreNamedField) \
- V(StringAdd) \
- V(StringCharCodeAt) \
- V(StringCharFromCode) \
- V(StringCompareAndBranch) \
- V(SubI) \
- V(TaggedToI) \
- V(ThisFunction) \
- V(TransitionElementsKind) \
- V(TrapAllocationMemento) \
- V(Typeof) \
- V(TypeofIsAndBranch) \
- V(Uint32ToDouble) \
- V(UnknownOSRValue) \
- V(WrapReceiver)
-
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- Opcode opcode() const final { return LInstruction::k##type; } \
- void CompileToNative(LCodeGen* generator) final; \
- const char* Mnemonic() const final { return mnemonic; } \
- static L##type* cast(LInstruction* instr) { \
- DCHECK(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
- }
-
-
-#define DECLARE_HYDROGEN_ACCESSOR(type) \
- H##type* hydrogen() const { \
- return H##type::cast(hydrogen_value()); \
- }
-
-
-class LInstruction : public ZoneObject {
- public:
- LInstruction()
- : environment_(NULL),
- hydrogen_value_(NULL),
- bit_field_(IsCallBits::encode(false)) {
- }
-
- virtual ~LInstruction() {}
-
- virtual void CompileToNative(LCodeGen* generator) = 0;
- virtual const char* Mnemonic() const = 0;
- virtual void PrintTo(StringStream* stream);
- virtual void PrintDataTo(StringStream* stream);
- virtual void PrintOutputOperandTo(StringStream* stream);
-
- enum Opcode {
- // Declare a unique enum value for each instruction.
-#define DECLARE_OPCODE(type) k##type,
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE) kAdapter,
- kNumberOfInstructions
-#undef DECLARE_OPCODE
- };
-
- virtual Opcode opcode() const = 0;
-
- // Declare non-virtual type testers for all leaf IR classes.
-#define DECLARE_PREDICATE(type) \
- bool Is##type() const { return opcode() == k##type; }
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
-#undef DECLARE_PREDICATE
-
- // Declare virtual predicates for instructions that don't have
- // an opcode.
- virtual bool IsGap() const { return false; }
-
- virtual bool IsControl() const { return false; }
-
- // Try deleting this instruction if possible.
- virtual bool TryDelete() { return false; }
-
- void set_environment(LEnvironment* env) { environment_ = env; }
- LEnvironment* environment() const { return environment_; }
- bool HasEnvironment() const { return environment_ != NULL; }
-
- void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
- LPointerMap* pointer_map() const { return pointer_map_.get(); }
- bool HasPointerMap() const { return pointer_map_.is_set(); }
-
- void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
- HValue* hydrogen_value() const { return hydrogen_value_; }
-
- void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
- bool IsCall() const { return IsCallBits::decode(bit_field_); }
-
- void MarkAsSyntacticTailCall() {
- bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
- }
- bool IsSyntacticTailCall() const {
- return IsSyntacticTailCallBits::decode(bit_field_);
- }
-
- // Interface to the register allocator and iterators.
- bool ClobbersTemps() const { return IsCall(); }
- bool ClobbersRegisters() const { return IsCall(); }
- virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
- return IsCall();
- }
-
- virtual bool HasResult() const = 0;
- virtual LOperand* result() const = 0;
-
- bool HasDoubleRegisterResult();
- bool HasDoubleRegisterInput();
-
- LOperand* FirstInput() { return InputAt(0); }
- LOperand* Output() { return HasResult() ? result() : NULL; }
-
- virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
-
-#ifdef DEBUG
- void VerifyCall();
-#endif
-
- virtual int InputCount() = 0;
- virtual LOperand* InputAt(int i) = 0;
-
- private:
- // Iterator support.
- friend class InputIterator;
-
- friend class TempIterator;
- virtual int TempCount() = 0;
- virtual LOperand* TempAt(int i) = 0;
-
- class IsCallBits: public BitField<bool, 0, 1> {};
- class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
- };
-
- LEnvironment* environment_;
- SetOncePointer<LPointerMap> pointer_map_;
- HValue* hydrogen_value_;
- int bit_field_;
-};
-
-
-// R = number of result operands (0 or 1).
-template<int R>
-class LTemplateResultInstruction : public LInstruction {
- public:
- // Allow 0 or 1 output operands.
- STATIC_ASSERT(R == 0 || R == 1);
- bool HasResult() const final { return R != 0 && result() != NULL; }
- void set_result(LOperand* operand) { results_[0] = operand; }
- LOperand* result() const override { return results_[0]; }
-
- protected:
- EmbeddedContainer<LOperand*, R> results_;
-};
-
-
-// R = number of result operands (0 or 1).
-// I = number of input operands.
-// T = number of temporary operands.
-template<int R, int I, int T>
-class LTemplateInstruction : public LTemplateResultInstruction<R> {
- protected:
- EmbeddedContainer<LOperand*, I> inputs_;
- EmbeddedContainer<LOperand*, T> temps_;
-
- private:
- // Iterator support.
- int InputCount() final { return I; }
- LOperand* InputAt(int i) final { return inputs_[i]; }
-
- int TempCount() final { return T; }
- LOperand* TempAt(int i) final { return temps_[i]; }
-};
-
-
-class LGap : public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGap(HBasicBlock* block) : block_(block) {
- parallel_moves_[BEFORE] = NULL;
- parallel_moves_[START] = NULL;
- parallel_moves_[END] = NULL;
- parallel_moves_[AFTER] = NULL;
- }
-
- // Can't use the DECLARE-macro here because of sub-classes.
- bool IsGap() const final { return true; }
- void PrintDataTo(StringStream* stream) override;
- static LGap* cast(LInstruction* instr) {
- DCHECK(instr->IsGap());
- return reinterpret_cast<LGap*>(instr);
- }
-
- bool IsRedundant() const;
-
- HBasicBlock* block() const { return block_; }
-
- enum InnerPosition {
- BEFORE,
- START,
- END,
- AFTER,
- FIRST_INNER_POSITION = BEFORE,
- LAST_INNER_POSITION = AFTER
- };
-
- LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
- if (parallel_moves_[pos] == NULL) {
- parallel_moves_[pos] = new(zone) LParallelMove(zone);
- }
- return parallel_moves_[pos];
- }
-
- LParallelMove* GetParallelMove(InnerPosition pos) {
- return parallel_moves_[pos];
- }
-
- private:
- LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
- HBasicBlock* block_;
-};
-
-
-class LInstructionGap final : public LGap {
- public:
- explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
-
- bool HasInterestingComment(LCodeGen* gen) const override {
- return !IsRedundant();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
-};
-
-
-class LGoto final : public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGoto(HBasicBlock* block) : block_(block) { }
-
- bool HasInterestingComment(LCodeGen* gen) const override;
- DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- void PrintDataTo(StringStream* stream) override;
- bool IsControl() const override { return true; }
-
- int block_id() const { return block_->block_id(); }
- bool ClobbersDoubleRegisters(Isolate* isolate) const override {
- return false;
- }
-
- bool jumps_to_join() const { return block_->predecessors()->length() > 1; }
-
- private:
- HBasicBlock* block_;
-};
-
-
-class LPrologue final : public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue")
-};
-
-
-class LLazyBailout final : public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
-};
-
-
-class LDummy final : public LTemplateInstruction<1, 0, 0> {
- public:
- LDummy() {}
- DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
-};
-
-
-class LDummyUse final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDummyUse(LOperand* value) {
- inputs_[0] = value;
- }
- DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
-};
-
-
-class LDeoptimize final : public LTemplateInstruction<0, 0, 0> {
- public:
- bool IsControl() const override { return true; }
- DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
- DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
-};
-
-
-class LLabel final : public LGap {
- public:
- explicit LLabel(HBasicBlock* block)
- : LGap(block), replacement_(NULL) { }
-
- bool HasInterestingComment(LCodeGen* gen) const override { return false; }
- DECLARE_CONCRETE_INSTRUCTION(Label, "label")
-
- void PrintDataTo(StringStream* stream) override;
-
- int block_id() const { return block()->block_id(); }
- bool is_loop_header() const { return block()->IsLoopHeader(); }
- bool is_osr_entry() const { return block()->is_osr_entry(); }
- Label* label() { return &label_; }
- LLabel* replacement() const { return replacement_; }
- void set_replacement(LLabel* label) { replacement_ = label; }
- bool HasReplacement() const { return replacement_ != NULL; }
-
- private:
- Label label_;
- LLabel* replacement_;
-};
-
-
-class LParameter final : public LTemplateInstruction<1, 0, 0> {
- public:
- bool HasInterestingComment(LCodeGen* gen) const override { return false; }
- DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
-};
-
-
-class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
- public:
- bool HasInterestingComment(LCodeGen* gen) const override { return false; }
- DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
-};
-
-
-template<int I, int T>
-class LControlInstruction: public LTemplateInstruction<0, I, T> {
- public:
- LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
-
- bool IsControl() const final { return true; }
-
- int SuccessorCount() { return hydrogen()->SuccessorCount(); }
- HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
-
- int TrueDestination(LChunk* chunk) {
- return chunk->LookupDestination(true_block_id());
- }
- int FalseDestination(LChunk* chunk) {
- return chunk->LookupDestination(false_block_id());
- }
-
- Label* TrueLabel(LChunk* chunk) {
- if (true_label_ == NULL) {
- true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
- }
- return true_label_;
- }
- Label* FalseLabel(LChunk* chunk) {
- if (false_label_ == NULL) {
- false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
- }
- return false_label_;
- }
-
- protected:
- int true_block_id() { return SuccessorAt(0)->block_id(); }
- int false_block_id() { return SuccessorAt(1)->block_id(); }
-
- private:
- HControlInstruction* hydrogen() {
- return HControlInstruction::cast(this->hydrogen_value());
- }
-
- Label* false_label_;
- Label* true_label_;
-};
-
-
-class LWrapReceiver final : public LTemplateInstruction<1, 2, 1> {
- public:
- LWrapReceiver(LOperand* receiver,
- LOperand* function,
- LOperand* temp) {
- inputs_[0] = receiver;
- inputs_[1] = function;
- temps_[0] = temp;
- }
-
- LOperand* receiver() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
- DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
-};
-
-
-class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
- public:
- LApplyArguments(LOperand* function,
- LOperand* receiver,
- LOperand* length,
- LOperand* elements) {
- inputs_[0] = function;
- inputs_[1] = receiver;
- inputs_[2] = length;
- inputs_[3] = elements;
- }
-
- LOperand* function() { return inputs_[0]; }
- LOperand* receiver() { return inputs_[1]; }
- LOperand* length() { return inputs_[2]; }
- LOperand* elements() { return inputs_[3]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
- DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
-};
-
-
-class LAccessArgumentsAt final : public LTemplateInstruction<1, 3, 0> {
- public:
- LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
- inputs_[0] = arguments;
- inputs_[1] = length;
- inputs_[2] = index;
- }
-
- LOperand* arguments() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LArgumentsLength final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LArgumentsLength(LOperand* elements) {
- inputs_[0] = elements;
- }
-
- LOperand* elements() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
-};
-
-
-class LArgumentsElements final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
- DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
-};
-
-
-class LDebugBreak final : public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
-};
-
-
-class LModByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
- public:
- LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-
- private:
- int32_t divisor_;
-};
-
-
-class LModByConstI final : public LTemplateInstruction<1, 1, 2> {
- public:
- LModByConstI(LOperand* dividend,
- int32_t divisor,
- LOperand* temp1,
- LOperand* temp2) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-
- private:
- int32_t divisor_;
-};
-
-
-class LModI final : public LTemplateInstruction<1, 2, 1> {
- public:
- LModI(LOperand* left, LOperand* right, LOperand* temp) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-};
-
-
-class LDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
- public:
- LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
-
- private:
- int32_t divisor_;
-};
-
-
-class LDivByConstI final : public LTemplateInstruction<1, 1, 2> {
- public:
- LDivByConstI(LOperand* dividend,
- int32_t divisor,
- LOperand* temp1,
- LOperand* temp2) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
-
- private:
- int32_t divisor_;
-};
-
-
-class LDivI final : public LTemplateInstruction<1, 2, 1> {
- public:
- LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
- inputs_[0] = dividend;
- inputs_[1] = divisor;
- temps_[0] = temp;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- LOperand* divisor() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
- DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
-};
-
-
-class LFlooringDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
- public:
- LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
- "flooring-div-by-power-of-2-i")
- DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-
- private:
- int32_t divisor_;
-};
-
-
-class LFlooringDivByConstI final : public LTemplateInstruction<1, 1, 3> {
- public:
- LFlooringDivByConstI(LOperand* dividend,
- int32_t divisor,
- LOperand* temp1,
- LOperand* temp2,
- LOperand* temp3) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- temps_[0] = temp1;
- temps_[1] = temp2;
- temps_[2] = temp3;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
- LOperand* temp3() { return temps_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
- DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-
- private:
- int32_t divisor_;
-};
-
-
-class LFlooringDivI final : public LTemplateInstruction<1, 2, 1> {
- public:
- LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
- inputs_[0] = dividend;
- inputs_[1] = divisor;
- temps_[0] = temp;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- LOperand* divisor() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
- DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-};
-
-
-class LMulI final : public LTemplateInstruction<1, 2, 1> {
- public:
- LMulI(LOperand* left, LOperand* right, LOperand* temp) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
- DECLARE_HYDROGEN_ACCESSOR(Mul)
-};
-
-
-class LCompareNumericAndBranch final : public LControlInstruction<2, 0> {
- public:
- LCompareNumericAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
- "compare-numeric-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
-
- Token::Value op() const { return hydrogen()->token(); }
- bool is_double() const {
- return hydrogen()->representation().IsDouble();
- }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-// Math.floor with a double result.
-class LMathFloorD final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathFloorD(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathFloorD, "math-floor-d")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-// Math.floor with an integer result.
-class LMathFloorI final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathFloorI(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathFloorI, "math-floor-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-// Math.round with a double result.
-class LMathRoundD final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathRoundD(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathRoundD, "math-round-d")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-// Math.round with an integer result.
-class LMathRoundI final : public LTemplateInstruction<1, 1, 1> {
- public:
- LMathRoundI(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathRoundI, "math-round-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-
-class LMathFround final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathFround(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround")
-};
-
-
-class LMathAbs final : public LTemplateInstruction<1, 2, 0> {
- public:
- LMathAbs(LOperand* context, LOperand* value) {
- inputs_[1] = context;
- inputs_[0] = value;
- }
-
- LOperand* context() { return inputs_[1]; }
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-
-class LMathLog final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathLog(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
-};
-
-
-class LMathClz32 final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathClz32(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
-};
-
-class LMathCos final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathCos(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
-};
-
-class LMathSin final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathSin(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
-};
-
-class LMathExp final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathExp(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
-};
-
-
-class LMathSqrt final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathSqrt(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
-};
-
-
-class LMathPowHalf final : public LTemplateInstruction<1, 1, 1> {
- public:
- LMathPowHalf(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
-};
-
-
-class LCmpObjectEqAndBranch final : public LControlInstruction<2, 0> {
- public:
- LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
-};
-
-
-class LCmpHoleAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LCmpHoleAndBranch(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
-};
-
-
-class LIsStringAndBranch final : public LControlInstruction<1, 1> {
- public:
- LIsStringAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LIsSmiAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LIsSmiAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LIsUndetectableAndBranch final : public LControlInstruction<1, 1> {
- public:
- LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
- "is-undetectable-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LStringCompareAndBranch final : public LControlInstruction<3, 0> {
- public:
- LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
- "string-compare-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 1> {
- public:
- LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
- "has-instance-type-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-class LClassOfTestAndBranch final : public LControlInstruction<1, 2> {
- public:
- LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch, "class-of-test-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-class LCmpT final : public LTemplateInstruction<1, 3, 0> {
- public:
- LCmpT(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
- DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
-
- LOperand* context() { return inputs_[0]; }
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 1> {
- public:
- LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype,
- LOperand* scratch) {
- inputs_[0] = object;
- inputs_[1] = prototype;
- temps_[0] = scratch;
- }
-
- LOperand* object() const { return inputs_[0]; }
- LOperand* prototype() const { return inputs_[1]; }
- LOperand* scratch() const { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch,
- "has-in-prototype-chain-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch)
-};
-
-
-class LBoundsCheck final : public LTemplateInstruction<0, 2, 0> {
- public:
- LBoundsCheck(LOperand* index, LOperand* length) {
- inputs_[0] = index;
- inputs_[1] = length;
- }
-
- LOperand* index() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
- DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
-};
-
-
-class LBitI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LBitI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
- DECLARE_HYDROGEN_ACCESSOR(Bitwise)
-
- Token::Value op() const { return hydrogen()->op(); }
-};
-
-
-class LShiftI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
- : op_(op), can_deopt_(can_deopt) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
-
- Token::Value op() const { return op_; }
- bool can_deopt() const { return can_deopt_; }
-
- private:
- Token::Value op_;
- bool can_deopt_;
-};
-
-
-class LSubI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LSubI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
- DECLARE_HYDROGEN_ACCESSOR(Sub)
-};
-
-
-class LConstantI final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- int32_t value() const { return hydrogen()->Integer32Value(); }
-};
-
-
-class LConstantS final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
-};
-
-
-class LConstantD final : public LTemplateInstruction<1, 0, 1> {
- public:
- explicit LConstantD(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- uint64_t bits() const { return hydrogen()->DoubleValueAsBits(); }
-};
-
-
-class LConstantE final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- ExternalReference value() const {
- return hydrogen()->ExternalReferenceValue();
- }
-};
-
-
-class LConstantT final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- Handle<Object> value(Isolate* isolate) const {
- return hydrogen()->handle(isolate);
- }
-};
-
-
-class LBranch final : public LControlInstruction<1, 1> {
- public:
- LBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
- DECLARE_HYDROGEN_ACCESSOR(Branch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LCmpMapAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LCmpMapAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMap)
-
- Handle<Map> map() const { return hydrogen()->map().handle(); }
-};
-
-
-class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
- public:
- LSeqStringGetChar(LOperand* string, LOperand* index) {
- inputs_[0] = string;
- inputs_[1] = index;
- }
-
- LOperand* string() const { return inputs_[0]; }
- LOperand* index() const { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
-};
-
-
-class LSeqStringSetChar final : public LTemplateInstruction<1, 4, 0> {
- public:
- LSeqStringSetChar(LOperand* context,
- LOperand* string,
- LOperand* index,
- LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = string;
- inputs_[2] = index;
- inputs_[3] = value;
- }
-
- LOperand* string() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
- LOperand* value() { return inputs_[3]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
-};
-
-
-class LAddI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LAddI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- static bool UseLea(HAdd* add) {
- return !add->CheckFlag(HValue::kCanOverflow) &&
- add->BetterLeftOperand()->UseCount() > 1;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
- DECLARE_HYDROGEN_ACCESSOR(Add)
-};
-
-
-class LMathMinMax final : public LTemplateInstruction<1, 2, 0> {
- public:
- LMathMinMax(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
- DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
-};
-
-
-class LPower final : public LTemplateInstruction<1, 2, 0> {
- public:
- LPower(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Power, "power")
- DECLARE_HYDROGEN_ACCESSOR(Power)
-};
-
-
-class LArithmeticD final : public LTemplateInstruction<1, 2, 0> {
- public:
- LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
- : op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- Token::Value op() const { return op_; }
-
- Opcode opcode() const override { return LInstruction::kArithmeticD; }
- void CompileToNative(LCodeGen* generator) override;
- const char* Mnemonic() const override;
-
- private:
- Token::Value op_;
-};
-
-
-class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
- public:
- LArithmeticT(Token::Value op,
- LOperand* context,
- LOperand* left,
- LOperand* right)
- : op_(op) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
- Token::Value op() const { return op_; }
-
- Opcode opcode() const override { return LInstruction::kArithmeticT; }
- void CompileToNative(LCodeGen* generator) override;
- const char* Mnemonic() const override;
-
- DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
-
- private:
- Token::Value op_;
-};
-
-
-class LReturn final : public LTemplateInstruction<0, 3, 0> {
- public:
- explicit LReturn(LOperand* value,
- LOperand* context,
- LOperand* parameter_count) {
- inputs_[0] = value;
- inputs_[1] = context;
- inputs_[2] = parameter_count;
- }
-
- bool has_constant_parameter_count() {
- return parameter_count()->IsConstantOperand();
- }
- LConstantOperand* constant_parameter_count() {
- DCHECK(has_constant_parameter_count());
- return LConstantOperand::cast(parameter_count());
- }
- LOperand* parameter_count() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Return, "return")
- DECLARE_HYDROGEN_ACCESSOR(Return)
-};
-
-
-class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedField(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
-};
-
-
-class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 1> {
- public:
- LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
- inputs_[0] = function;
- temps_[0] = temp;
- }
-
- LOperand* function() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
- DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
-};
-
-
-class LLoadRoot final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
- DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
-
- Heap::RootListIndex index() const { return hydrogen()->index(); }
-};
-
-
-class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
- public:
- LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
- inputs_[0] = elements;
- inputs_[1] = key;
- inputs_[2] = backing_store_owner;
- }
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* backing_store_owner() { return inputs_[2]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
- bool is_fixed_typed_array() const {
- return hydrogen()->is_fixed_typed_array();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
-
- void PrintDataTo(StringStream* stream) override;
- uint32_t base_offset() const { return hydrogen()->base_offset(); }
- bool key_is_smi() {
- return hydrogen()->key()->representation().IsTagged();
- }
-};
-
-
-inline static bool ExternalArrayOpRequiresTemp(
- Representation key_representation,
- ElementsKind elements_kind) {
- // Operations that require the key to be divided by two to be converted into
- // an index cannot fold the scale operation into a load and need an extra
- // temp register to do the work.
- return key_representation.IsSmi() &&
- (elements_kind == UINT8_ELEMENTS || elements_kind == INT8_ELEMENTS ||
- elements_kind == UINT8_CLAMPED_ELEMENTS);
-}
-
-
-class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadContextSlot(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
-
- int slot_index() { return hydrogen()->slot_index(); }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LStoreContextSlot final : public LTemplateInstruction<0, 2, 1> {
- public:
- LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
- inputs_[0] = context;
- inputs_[1] = value;
- temps_[0] = temp;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
-
- int slot_index() { return hydrogen()->slot_index(); }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LPushArgument final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LPushArgument(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
-};
-
-
-class LDrop final : public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LDrop(int count) : count_(count) { }
-
- int count() const { return count_; }
-
- DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
-
- private:
- int count_;
-};
-
-
-class LStoreCodeEntry final : public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreCodeEntry(LOperand* function, LOperand* code_object) {
- inputs_[0] = function;
- inputs_[1] = code_object;
- }
-
- LOperand* function() { return inputs_[0]; }
- LOperand* code_object() { return inputs_[1]; }
-
- void PrintDataTo(StringStream* stream) override;
-
- DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
- DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
-};
-
-
-class LInnerAllocatedObject final : public LTemplateInstruction<1, 2, 0> {
- public:
- LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
- inputs_[0] = base_object;
- inputs_[1] = offset;
- }
-
- LOperand* base_object() const { return inputs_[0]; }
- LOperand* offset() const { return inputs_[1]; }
-
- void PrintDataTo(StringStream* stream) override;
-
- DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
-};
-
-
-class LThisFunction final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
- DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
-};
-
-
-class LContext final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Context, "context")
- DECLARE_HYDROGEN_ACCESSOR(Context)
-};
-
-
-class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LDeclareGlobals(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
- DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
-};
-
-
-class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
- public:
- LCallWithDescriptor(CallInterfaceDescriptor descriptor,
- const ZoneList<LOperand*>& operands, Zone* zone)
- : inputs_(descriptor.GetRegisterParameterCount() +
- kImplicitRegisterParameterCount,
- zone) {
- DCHECK(descriptor.GetRegisterParameterCount() +
- kImplicitRegisterParameterCount ==
- operands.length());
- inputs_.AddAll(operands, zone);
- }
-
- LOperand* target() const { return inputs_[0]; }
-
- DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
-
- // The target and context are passed as implicit parameters that are not
- // explicitly listed in the descriptor.
- static const int kImplicitRegisterParameterCount = 2;
-
- private:
- DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-
- ZoneList<LOperand*> inputs_;
-
- // Iterator support.
- int InputCount() final { return inputs_.length(); }
- LOperand* InputAt(int i) final { return inputs_[i]; }
-
- int TempCount() final { return 0; }
- LOperand* TempAt(int i) final { return NULL; }
-};
-
-
-class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
- public:
- LInvokeFunction(LOperand* context, LOperand* function) {
- inputs_[0] = context;
- inputs_[1] = function;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
- DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallNewArray(LOperand* context, LOperand* constructor) {
- inputs_[0] = context;
- inputs_[1] = constructor;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* constructor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
- DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallRuntime final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallRuntime(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
- DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
-
- bool ClobbersDoubleRegisters(Isolate* isolate) const override {
- return save_doubles() == kDontSaveFPRegs;
- }
-
- const Runtime::Function* function() const { return hydrogen()->function(); }
- int arity() const { return hydrogen()->argument_count(); }
- SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
-};
-
-
-class LInteger32ToDouble final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInteger32ToDouble(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
-};
-
-
-class LUint32ToDouble final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LUint32ToDouble(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
-};
-
-
-class LNumberTagI final : public LTemplateInstruction<1, 1, 1> {
- public:
- LNumberTagI(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
-};
-
-
-class LNumberTagU final : public LTemplateInstruction<1, 1, 1> {
- public:
- LNumberTagU(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
-};
-
-
-class LNumberTagD final : public LTemplateInstruction<1, 1, 1> {
- public:
- LNumberTagD(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI final : public LTemplateInstruction<1, 1, 1> {
- public:
- LDoubleToI(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-class LDoubleToSmi final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDoubleToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-};
-
-
-// Truncating conversion from a tagged value to an int32.
-class LTaggedToI final : public LTemplateInstruction<1, 1, 1> {
- public:
- LTaggedToI(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-class LSmiTag final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LSmiTag(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LNumberUntagD final : public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LNumberUntagD(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
- DECLARE_HYDROGEN_ACCESSOR(Change);
-
- bool truncating() { return hydrogen()->CanTruncateToNumber(); }
-};
-
-
-class LSmiUntag final : public LTemplateInstruction<1, 1, 0> {
- public:
- LSmiUntag(LOperand* value, bool needs_check)
- : needs_check_(needs_check) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
-
- bool needs_check() const { return needs_check_; }
-
- private:
- bool needs_check_;
-};
-
-
-class LStoreNamedField final : public LTemplateInstruction<0, 2, 2> {
- public:
- LStoreNamedField(LOperand* obj,
- LOperand* val,
- LOperand* temp,
- LOperand* temp_map) {
- inputs_[0] = obj;
- inputs_[1] = val;
- temps_[0] = temp;
- temps_[1] = temp_map;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp_map() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
- public:
- LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val,
- LOperand* backing_store_owner) {
- inputs_[0] = obj;
- inputs_[1] = key;
- inputs_[2] = val;
- inputs_[3] = backing_store_owner;
- }
-
- bool is_fixed_typed_array() const {
- return hydrogen()->is_fixed_typed_array();
- }
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- LOperand* backing_store_owner() { return inputs_[3]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
-
- void PrintDataTo(StringStream* stream) override;
- uint32_t base_offset() const { return hydrogen()->base_offset(); }
- bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
-};
-
-
-class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 2> {
- public:
- LTransitionElementsKind(LOperand* object,
- LOperand* context,
- LOperand* new_map_temp,
- LOperand* temp) {
- inputs_[0] = object;
- inputs_[1] = context;
- temps_[0] = new_map_temp;
- temps_[1] = temp;
- }
-
- LOperand* context() { return inputs_[1]; }
- LOperand* object() { return inputs_[0]; }
- LOperand* new_map_temp() { return temps_[0]; }
- LOperand* temp() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
- "transition-elements-kind")
- DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
-
- void PrintDataTo(StringStream* stream) override;
-
- Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
- Handle<Map> transitioned_map() {
- return hydrogen()->transitioned_map().handle();
- }
- ElementsKind from_kind() { return hydrogen()->from_kind(); }
- ElementsKind to_kind() { return hydrogen()->to_kind(); }
-};
-
-
-class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 1> {
- public:
- LTrapAllocationMemento(LOperand* object,
- LOperand* temp) {
- inputs_[0] = object;
- temps_[0] = temp;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
- "trap-allocation-memento")
-};
-
-
-class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
- public:
- LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
- LOperand* key, LOperand* current_capacity) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = elements;
- inputs_[3] = key;
- inputs_[4] = current_capacity;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* elements() { return inputs_[2]; }
- LOperand* key() { return inputs_[3]; }
- LOperand* current_capacity() { return inputs_[4]; }
-
- bool ClobbersDoubleRegisters(Isolate* isolate) const override { return true; }
-
- DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
- DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
-};
-
-
-class LStringAdd final : public LTemplateInstruction<1, 3, 0> {
- public:
- LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
- DECLARE_HYDROGEN_ACCESSOR(StringAdd)
-};
-
-
-class LStringCharCodeAt final : public LTemplateInstruction<1, 3, 0> {
- public:
- LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
- inputs_[0] = context;
- inputs_[1] = string;
- inputs_[2] = index;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* string() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
- DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
-};
-
-
-class LStringCharFromCode final : public LTemplateInstruction<1, 2, 0> {
- public:
- LStringCharFromCode(LOperand* context, LOperand* char_code) {
- inputs_[0] = context;
- inputs_[1] = char_code;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* char_code() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
- DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
-};
-
-
-class LCheckValue final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckValue(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
- DECLARE_HYDROGEN_ACCESSOR(CheckValue)
-};
-
-
-class LCheckArrayBufferNotNeutered final
- : public LTemplateInstruction<0, 1, 1> {
- public:
- explicit LCheckArrayBufferNotNeutered(LOperand* view, LOperand* scratch) {
- inputs_[0] = view;
- temps_[0] = scratch;
- }
-
- LOperand* view() { return inputs_[0]; }
- LOperand* scratch() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckArrayBufferNotNeutered,
- "check-array-buffer-not-neutered")
- DECLARE_HYDROGEN_ACCESSOR(CheckArrayBufferNotNeutered)
-};
-
-
-class LCheckInstanceType final : public LTemplateInstruction<0, 1, 1> {
- public:
- LCheckInstanceType(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
- DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
-};
-
-
-class LCheckMaps final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckMaps(LOperand* value = NULL) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
-};
-
-
-class LCheckSmi final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCheckSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
-};
-
-
-class LClampDToUint8 final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LClampDToUint8(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
-};
-
-
-class LClampIToUint8 final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LClampIToUint8(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
-};
-
-
-class LClampTToUint8 final : public LTemplateInstruction<1, 1, 1> {
- public:
- LClampTToUint8(LOperand* value, LOperand* temp_xmm) {
- inputs_[0] = value;
- temps_[0] = temp_xmm;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
- LOperand* temp_xmm() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
-};
-
-
-class LCheckNonSmi final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckNonSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
- DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
-};
-
-
-class LAllocate final : public LTemplateInstruction<1, 2, 1> {
- public:
- LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
- inputs_[0] = context;
- inputs_[1] = size;
- temps_[0] = temp;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* size() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
- DECLARE_HYDROGEN_ACCESSOR(Allocate)
-};
-
-class LFastAllocate final : public LTemplateInstruction<1, 1, 1> {
- public:
- LFastAllocate(LOperand* size, LOperand* temp) {
- inputs_[0] = size;
- temps_[0] = temp;
- }
-
- LOperand* size() const { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
- DECLARE_HYDROGEN_ACCESSOR(Allocate)
-};
-
-class LTypeof final : public LTemplateInstruction<1, 2, 0> {
- public:
- LTypeof(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
-};
-
-
-class LTypeofIsAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LTypeofIsAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
-
- Handle<String> type_literal() { return hydrogen()->type_literal(); }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LOsrEntry final : public LTemplateInstruction<0, 0, 0> {
- public:
- bool HasInterestingComment(LCodeGen* gen) const override { return false; }
- DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
-};
-
-
-class LStackCheck final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LStackCheck(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
- DECLARE_HYDROGEN_ACCESSOR(StackCheck)
-
- Label* done_label() { return &done_label_; }
-
- private:
- Label done_label_;
-};
-
-
-class LForInPrepareMap final : public LTemplateInstruction<1, 2, 0> {
- public:
- LForInPrepareMap(LOperand* context, LOperand* object) {
- inputs_[0] = context;
- inputs_[1] = object;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
-};
-
-
-class LForInCacheArray final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LForInCacheArray(LOperand* map) {
- inputs_[0] = map;
- }
-
- LOperand* map() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
-
- int idx() {
- return HForInCacheArray::cast(this->hydrogen_value())->idx();
- }
-};
-
-
-class LCheckMapValue final : public LTemplateInstruction<0, 2, 0> {
- public:
- LCheckMapValue(LOperand* value, LOperand* map) {
- inputs_[0] = value;
- inputs_[1] = map;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* map() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
-};
-
-
-class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadFieldByIndex(LOperand* object, LOperand* index) {
- inputs_[0] = object;
- inputs_[1] = index;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
-};
-
-
-class LChunkBuilder;
-class LPlatformChunk final : public LChunk {
- public:
- LPlatformChunk(CompilationInfo* info, HGraph* graph)
- : LChunk(info, graph),
- num_double_slots_(0) { }
-
- int GetNextSpillIndex(RegisterKind kind);
- LOperand* GetNextSpillSlot(RegisterKind kind);
-
- int num_double_slots() const { return num_double_slots_; }
-
- private:
- int num_double_slots_;
-};
-
-
-class LChunkBuilder final : public LChunkBuilderBase {
- public:
- LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : LChunkBuilderBase(info, graph),
- current_instruction_(NULL),
- current_block_(NULL),
- next_block_(NULL),
- allocator_(allocator) {}
-
- // Build the sequence for the graph.
- LPlatformChunk* Build();
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
- HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- LInstruction* DoMathFloor(HUnaryMathOperation* instr);
- LInstruction* DoMathRound(HUnaryMathOperation* instr);
- LInstruction* DoMathFround(HUnaryMathOperation* instr);
- LInstruction* DoMathAbs(HUnaryMathOperation* instr);
- LInstruction* DoMathLog(HUnaryMathOperation* instr);
- LInstruction* DoMathCos(HUnaryMathOperation* instr);
- LInstruction* DoMathSin(HUnaryMathOperation* instr);
- LInstruction* DoMathExp(HUnaryMathOperation* instr);
- LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
- LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
- LInstruction* DoMathClz32(HUnaryMathOperation* instr);
- LInstruction* DoDivByPowerOf2I(HDiv* instr);
- LInstruction* DoDivByConstI(HDiv* instr);
- LInstruction* DoDivI(HDiv* instr);
- LInstruction* DoModByPowerOf2I(HMod* instr);
- LInstruction* DoModByConstI(HMod* instr);
- LInstruction* DoModI(HMod* instr);
- LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
- LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
- LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
-
- private:
- // Methods for getting operands for Use / Define / Temp.
- LUnallocated* ToUnallocated(Register reg);
- LUnallocated* ToUnallocated(XMMRegister reg);
-
- // Methods for setting up define-use relationships.
- MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
- MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
- MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
- XMMRegister fixed_register);
-
- // A value that is guaranteed to be allocated to a register.
- // Operand created by UseRegister is guaranteed to be live until the end of
- // instruction. This means that register allocator will not reuse it's
- // register for any other operand inside instruction.
- // Operand created by UseRegisterAtStart is guaranteed to be live only at
- // instruction start. Register allocator is free to assign the same register
- // to some other operand used inside instruction (i.e. temporary or
- // output).
- MUST_USE_RESULT LOperand* UseRegister(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
-
- // An input operand in a register that may be trashed.
- MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
-
- // An input operand in a register or stack slot.
- MUST_USE_RESULT LOperand* Use(HValue* value);
- MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
-
- // An input operand in a register, stack slot or a constant operand.
- MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
-
- // An input operand in a fixed register or a constant operand.
- MUST_USE_RESULT LOperand* UseFixedOrConstant(HValue* value,
- Register fixed_register);
-
- // An input operand in a register or a constant operand.
- MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
-
- // An input operand in a constant operand.
- MUST_USE_RESULT LOperand* UseConstant(HValue* value);
-
- // An input operand in register, stack slot or a constant operand.
- // Will not be moved to a register even if one is freely available.
- MUST_USE_RESULT LOperand* UseAny(HValue* value) override;
-
- // Temporary operand that must be in a register.
- MUST_USE_RESULT LUnallocated* TempRegister();
- MUST_USE_RESULT LOperand* FixedTemp(Register reg);
- MUST_USE_RESULT LOperand* FixedTemp(XMMRegister reg);
-
- // Methods for setting up define-use relationships.
- // Return the same instruction that they are passed.
- LInstruction* Define(LTemplateResultInstruction<1>* instr,
- LUnallocated* result);
- LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
- LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
- int index);
- LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
- LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
- Register reg);
- LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
- XMMRegister reg);
- // Assigns an environment to an instruction. An instruction which can
- // deoptimize must have an environment.
- LInstruction* AssignEnvironment(LInstruction* instr);
- // Assigns a pointer map to an instruction. An instruction which can
- // trigger a GC or a lazy deoptimization must have a pointer map.
- LInstruction* AssignPointerMap(LInstruction* instr);
-
- enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
-
- LOperand* GetSeqStringSetCharOperand(HSeqStringSetChar* instr);
-
- // Marks a call for the register allocator. Assigns a pointer map to
- // support GC and lazy deoptimization. Assigns an environment to support
- // eager deoptimization if CAN_DEOPTIMIZE_EAGERLY.
- LInstruction* MarkAsCall(
- LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
-
- void VisitInstruction(HInstruction* current);
- void AddInstruction(LInstruction* instr, HInstruction* current);
-
- void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
- LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
- LInstruction* DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr);
- LInstruction* DoArithmeticT(Token::Value op,
- HBinaryOperation* instr);
-
- LOperand* GetStoreKeyedValueOperand(HStoreKeyed* instr);
-
- HInstruction* current_instruction_;
- HBasicBlock* current_block_;
- HBasicBlock* next_block_;
- LAllocator* allocator_;
-
- DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
-};
-
-#undef DECLARE_HYDROGEN_ACCESSOR
-#undef DECLARE_CONCRETE_INSTRUCTION
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_IA32_LITHIUM_IA32_H_
diff --git a/deps/v8/src/crankshaft/lithium-allocator-inl.h b/deps/v8/src/crankshaft/lithium-allocator-inl.h
deleted file mode 100644
index 631af6024b..0000000000
--- a/deps/v8/src/crankshaft/lithium-allocator-inl.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_LITHIUM_ALLOCATOR_INL_H_
-#define V8_CRANKSHAFT_LITHIUM_ALLOCATOR_INL_H_
-
-#include "src/crankshaft/lithium-allocator.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "src/crankshaft/ia32/lithium-ia32.h" // NOLINT
-#elif V8_TARGET_ARCH_X64
-#include "src/crankshaft/x64/lithium-x64.h" // NOLINT
-#elif V8_TARGET_ARCH_ARM64
-#include "src/crankshaft/arm64/lithium-arm64.h" // NOLINT
-#elif V8_TARGET_ARCH_ARM
-#include "src/crankshaft/arm/lithium-arm.h" // NOLINT
-#elif V8_TARGET_ARCH_PPC
-#include "src/crankshaft/ppc/lithium-ppc.h" // NOLINT
-#elif V8_TARGET_ARCH_MIPS
-#include "src/crankshaft/mips/lithium-mips.h" // NOLINT
-#elif V8_TARGET_ARCH_MIPS64
-#include "src/crankshaft/mips64/lithium-mips64.h" // NOLINT
-#elif V8_TARGET_ARCH_S390
-#include "src/crankshaft/s390/lithium-s390.h" // NOLINT
-#elif V8_TARGET_ARCH_X87
-#include "src/crankshaft/x87/lithium-x87.h" // NOLINT
-#else
-#error "Unknown architecture."
-#endif
-
-namespace v8 {
-namespace internal {
-
-bool LAllocator::IsGapAt(int index) { return chunk_->IsGapAt(index); }
-
-
-LInstruction* LAllocator::InstructionAt(int index) {
- return chunk_->instructions()->at(index);
-}
-
-
-LGap* LAllocator::GapAt(int index) {
- return chunk_->GetGapAt(index);
-}
-
-
-void LAllocator::SetLiveRangeAssignedRegister(LiveRange* range, int reg) {
- if (range->Kind() == DOUBLE_REGISTERS) {
- assigned_double_registers_->Add(reg);
- } else {
- DCHECK(range->Kind() == GENERAL_REGISTERS);
- assigned_registers_->Add(reg);
- }
- range->set_assigned_register(reg, chunk()->zone());
-}
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_LITHIUM_ALLOCATOR_INL_H_
diff --git a/deps/v8/src/crankshaft/lithium-allocator.cc b/deps/v8/src/crankshaft/lithium-allocator.cc
deleted file mode 100644
index 201c6062a8..0000000000
--- a/deps/v8/src/crankshaft/lithium-allocator.cc
+++ /dev/null
@@ -1,2192 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/lithium-allocator.h"
-
-#include "src/crankshaft/hydrogen.h"
-#include "src/crankshaft/lithium-allocator-inl.h"
-#include "src/crankshaft/lithium-inl.h"
-#include "src/objects-inl.h"
-#include "src/register-configuration.h"
-#include "src/string-stream.h"
-
-namespace v8 {
-namespace internal {
-
-const auto GetRegConfig = RegisterConfiguration::Crankshaft;
-
-static inline LifetimePosition Min(LifetimePosition a, LifetimePosition b) {
- return a.Value() < b.Value() ? a : b;
-}
-
-
-static inline LifetimePosition Max(LifetimePosition a, LifetimePosition b) {
- return a.Value() > b.Value() ? a : b;
-}
-
-
-UsePosition::UsePosition(LifetimePosition pos,
- LOperand* operand,
- LOperand* hint)
- : operand_(operand),
- hint_(hint),
- pos_(pos),
- next_(NULL),
- requires_reg_(false),
- register_beneficial_(true) {
- if (operand_ != NULL && operand_->IsUnallocated()) {
- LUnallocated* unalloc = LUnallocated::cast(operand_);
- requires_reg_ = unalloc->HasRegisterPolicy() ||
- unalloc->HasDoubleRegisterPolicy();
- register_beneficial_ = !unalloc->HasAnyPolicy();
- }
- DCHECK(pos_.IsValid());
-}
-
-
-bool UsePosition::HasHint() const {
- return hint_ != NULL && !hint_->IsUnallocated();
-}
-
-
-bool UsePosition::RequiresRegister() const {
- return requires_reg_;
-}
-
-
-bool UsePosition::RegisterIsBeneficial() const {
- return register_beneficial_;
-}
-
-
-void UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
- DCHECK(Contains(pos) && pos.Value() != start().Value());
- UseInterval* after = new(zone) UseInterval(pos, end_);
- after->next_ = next_;
- next_ = after;
- end_ = pos;
-}
-
-
-#ifdef DEBUG
-
-
-void LiveRange::Verify() const {
- UsePosition* cur = first_pos_;
- while (cur != NULL) {
- DCHECK(Start().Value() <= cur->pos().Value() &&
- cur->pos().Value() <= End().Value());
- cur = cur->next();
- }
-}
-
-
-bool LiveRange::HasOverlap(UseInterval* target) const {
- UseInterval* current_interval = first_interval_;
- while (current_interval != NULL) {
- // Intervals overlap if the start of one is contained in the other.
- if (current_interval->Contains(target->start()) ||
- target->Contains(current_interval->start())) {
- return true;
- }
- current_interval = current_interval->next();
- }
- return false;
-}
-
-
-#endif
-
-
-LiveRange::LiveRange(int id, Zone* zone)
- : id_(id),
- spilled_(false),
- kind_(UNALLOCATED_REGISTERS),
- assigned_register_(kInvalidAssignment),
- last_interval_(NULL),
- first_interval_(NULL),
- first_pos_(NULL),
- parent_(NULL),
- next_(NULL),
- current_interval_(NULL),
- last_processed_use_(NULL),
- current_hint_operand_(NULL),
- spill_operand_(new (zone) LOperand()),
- spill_start_index_(kMaxInt) {}
-
-
-void LiveRange::set_assigned_register(int reg, Zone* zone) {
- DCHECK(!HasRegisterAssigned() && !IsSpilled());
- assigned_register_ = reg;
- ConvertOperands(zone);
-}
-
-
-void LiveRange::MakeSpilled(Zone* zone) {
- DCHECK(!IsSpilled());
- DCHECK(TopLevel()->HasAllocatedSpillOperand());
- spilled_ = true;
- assigned_register_ = kInvalidAssignment;
- ConvertOperands(zone);
-}
-
-
-bool LiveRange::HasAllocatedSpillOperand() const {
- DCHECK(spill_operand_ != NULL);
- return !spill_operand_->IsIgnored();
-}
-
-
-void LiveRange::SetSpillOperand(LOperand* operand) {
- DCHECK(!operand->IsUnallocated());
- DCHECK(spill_operand_ != NULL);
- DCHECK(spill_operand_->IsIgnored());
- spill_operand_->ConvertTo(operand->kind(), operand->index());
-}
-
-
-UsePosition* LiveRange::NextUsePosition(LifetimePosition start) {
- UsePosition* use_pos = last_processed_use_;
- if (use_pos == NULL) use_pos = first_pos();
- while (use_pos != NULL && use_pos->pos().Value() < start.Value()) {
- use_pos = use_pos->next();
- }
- last_processed_use_ = use_pos;
- return use_pos;
-}
-
-
-UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial(
- LifetimePosition start) {
- UsePosition* pos = NextUsePosition(start);
- while (pos != NULL && !pos->RegisterIsBeneficial()) {
- pos = pos->next();
- }
- return pos;
-}
-
-
-UsePosition* LiveRange::PreviousUsePositionRegisterIsBeneficial(
- LifetimePosition start) {
- UsePosition* pos = first_pos();
- UsePosition* prev = NULL;
- while (pos != NULL && pos->pos().Value() < start.Value()) {
- if (pos->RegisterIsBeneficial()) prev = pos;
- pos = pos->next();
- }
- return prev;
-}
-
-
-UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) {
- UsePosition* pos = NextUsePosition(start);
- while (pos != NULL && !pos->RequiresRegister()) {
- pos = pos->next();
- }
- return pos;
-}
-
-
-bool LiveRange::CanBeSpilled(LifetimePosition pos) {
- // We cannot spill a live range that has a use requiring a register
- // at the current or the immediate next position.
- UsePosition* use_pos = NextRegisterPosition(pos);
- if (use_pos == NULL) return true;
- return
- use_pos->pos().Value() > pos.NextInstruction().InstructionEnd().Value();
-}
-
-
-LOperand* LiveRange::CreateAssignedOperand(Zone* zone) {
- LOperand* op = NULL;
- if (HasRegisterAssigned()) {
- DCHECK(!IsSpilled());
- switch (Kind()) {
- case GENERAL_REGISTERS:
- op = LRegister::Create(assigned_register(), zone);
- break;
- case DOUBLE_REGISTERS:
- op = LDoubleRegister::Create(assigned_register(), zone);
- break;
- default:
- UNREACHABLE();
- }
- } else if (IsSpilled()) {
- DCHECK(!HasRegisterAssigned());
- op = TopLevel()->GetSpillOperand();
- DCHECK(!op->IsUnallocated());
- } else {
- LUnallocated* unalloc = new(zone) LUnallocated(LUnallocated::NONE);
- unalloc->set_virtual_register(id_);
- op = unalloc;
- }
- return op;
-}
-
-
-UseInterval* LiveRange::FirstSearchIntervalForPosition(
- LifetimePosition position) const {
- if (current_interval_ == NULL) return first_interval_;
- if (current_interval_->start().Value() > position.Value()) {
- current_interval_ = NULL;
- return first_interval_;
- }
- return current_interval_;
-}
-
-
-void LiveRange::AdvanceLastProcessedMarker(
- UseInterval* to_start_of, LifetimePosition but_not_past) const {
- if (to_start_of == NULL) return;
- if (to_start_of->start().Value() > but_not_past.Value()) return;
- LifetimePosition start =
- current_interval_ == NULL ? LifetimePosition::Invalid()
- : current_interval_->start();
- if (to_start_of->start().Value() > start.Value()) {
- current_interval_ = to_start_of;
- }
-}
-
-
-void LiveRange::SplitAt(LifetimePosition position,
- LiveRange* result,
- Zone* zone) {
- DCHECK(Start().Value() < position.Value());
- DCHECK(result->IsEmpty());
- // Find the last interval that ends before the position. If the
- // position is contained in one of the intervals in the chain, we
- // split that interval and use the first part.
- UseInterval* current = FirstSearchIntervalForPosition(position);
-
- // If the split position coincides with the beginning of a use interval
- // we need to split use positons in a special way.
- bool split_at_start = false;
-
- if (current->start().Value() == position.Value()) {
- // When splitting at start we need to locate the previous use interval.
- current = first_interval_;
- }
-
- while (current != NULL) {
- if (current->Contains(position)) {
- current->SplitAt(position, zone);
- break;
- }
- UseInterval* next = current->next();
- if (next->start().Value() >= position.Value()) {
- split_at_start = (next->start().Value() == position.Value());
- break;
- }
- current = next;
- }
-
- // Partition original use intervals to the two live ranges.
- UseInterval* before = current;
- UseInterval* after = before->next();
- result->last_interval_ = (last_interval_ == before)
- ? after // Only interval in the range after split.
- : last_interval_; // Last interval of the original range.
- result->first_interval_ = after;
- last_interval_ = before;
-
- // Find the last use position before the split and the first use
- // position after it.
- UsePosition* use_after = first_pos_;
- UsePosition* use_before = NULL;
- if (split_at_start) {
- // The split position coincides with the beginning of a use interval (the
- // end of a lifetime hole). Use at this position should be attributed to
- // the split child because split child owns use interval covering it.
- while (use_after != NULL && use_after->pos().Value() < position.Value()) {
- use_before = use_after;
- use_after = use_after->next();
- }
- } else {
- while (use_after != NULL && use_after->pos().Value() <= position.Value()) {
- use_before = use_after;
- use_after = use_after->next();
- }
- }
-
- // Partition original use positions to the two live ranges.
- if (use_before != NULL) {
- use_before->next_ = NULL;
- } else {
- first_pos_ = NULL;
- }
- result->first_pos_ = use_after;
-
- // Discard cached iteration state. It might be pointing
- // to the use that no longer belongs to this live range.
- last_processed_use_ = NULL;
- current_interval_ = NULL;
-
- // Link the new live range in the chain before any of the other
- // ranges linked from the range before the split.
- result->parent_ = (parent_ == NULL) ? this : parent_;
- result->kind_ = result->parent_->kind_;
- result->next_ = next_;
- next_ = result;
-
-#ifdef DEBUG
- Verify();
- result->Verify();
-#endif
-}
-
-
-// This implements an ordering on live ranges so that they are ordered by their
-// start positions. This is needed for the correctness of the register
-// allocation algorithm. If two live ranges start at the same offset then there
-// is a tie breaker based on where the value is first used. This part of the
-// ordering is merely a heuristic.
-bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const {
- LifetimePosition start = Start();
- LifetimePosition other_start = other->Start();
- if (start.Value() == other_start.Value()) {
- UsePosition* pos = first_pos();
- if (pos == NULL) return false;
- UsePosition* other_pos = other->first_pos();
- if (other_pos == NULL) return true;
- return pos->pos().Value() < other_pos->pos().Value();
- }
- return start.Value() < other_start.Value();
-}
-
-
-void LiveRange::ShortenTo(LifetimePosition start) {
- LAllocator::TraceAlloc("Shorten live range %d to [%d\n", id_, start.Value());
- DCHECK(first_interval_ != NULL);
- DCHECK(first_interval_->start().Value() <= start.Value());
- DCHECK(start.Value() < first_interval_->end().Value());
- first_interval_->set_start(start);
-}
-
-
-void LiveRange::EnsureInterval(LifetimePosition start,
- LifetimePosition end,
- Zone* zone) {
- LAllocator::TraceAlloc("Ensure live range %d in interval [%d %d[\n",
- id_,
- start.Value(),
- end.Value());
- LifetimePosition new_end = end;
- while (first_interval_ != NULL &&
- first_interval_->start().Value() <= end.Value()) {
- if (first_interval_->end().Value() > end.Value()) {
- new_end = first_interval_->end();
- }
- first_interval_ = first_interval_->next();
- }
-
- UseInterval* new_interval = new(zone) UseInterval(start, new_end);
- new_interval->next_ = first_interval_;
- first_interval_ = new_interval;
- if (new_interval->next() == NULL) {
- last_interval_ = new_interval;
- }
-}
-
-
-void LiveRange::AddUseInterval(LifetimePosition start,
- LifetimePosition end,
- Zone* zone) {
- LAllocator::TraceAlloc("Add to live range %d interval [%d %d[\n",
- id_,
- start.Value(),
- end.Value());
- if (first_interval_ == NULL) {
- UseInterval* interval = new(zone) UseInterval(start, end);
- first_interval_ = interval;
- last_interval_ = interval;
- } else {
- if (end.Value() == first_interval_->start().Value()) {
- first_interval_->set_start(start);
- } else if (end.Value() < first_interval_->start().Value()) {
- UseInterval* interval = new(zone) UseInterval(start, end);
- interval->set_next(first_interval_);
- first_interval_ = interval;
- } else {
- // Order of instruction's processing (see ProcessInstructions) guarantees
- // that each new use interval either precedes or intersects with
- // last added interval.
- DCHECK(start.Value() < first_interval_->end().Value());
- first_interval_->start_ = Min(start, first_interval_->start_);
- first_interval_->end_ = Max(end, first_interval_->end_);
- }
- }
-}
-
-
-void LiveRange::AddUsePosition(LifetimePosition pos,
- LOperand* operand,
- LOperand* hint,
- Zone* zone) {
- LAllocator::TraceAlloc("Add to live range %d use position %d\n",
- id_,
- pos.Value());
- UsePosition* use_pos = new(zone) UsePosition(pos, operand, hint);
- UsePosition* prev_hint = NULL;
- UsePosition* prev = NULL;
- UsePosition* current = first_pos_;
- while (current != NULL && current->pos().Value() < pos.Value()) {
- prev_hint = current->HasHint() ? current : prev_hint;
- prev = current;
- current = current->next();
- }
-
- if (prev == NULL) {
- use_pos->set_next(first_pos_);
- first_pos_ = use_pos;
- } else {
- use_pos->next_ = prev->next_;
- prev->next_ = use_pos;
- }
-
- if (prev_hint == NULL && use_pos->HasHint()) {
- current_hint_operand_ = hint;
- }
-}
-
-
-void LiveRange::ConvertOperands(Zone* zone) {
- LOperand* op = CreateAssignedOperand(zone);
- UsePosition* use_pos = first_pos();
- while (use_pos != NULL) {
- DCHECK(Start().Value() <= use_pos->pos().Value() &&
- use_pos->pos().Value() <= End().Value());
-
- if (use_pos->HasOperand()) {
- DCHECK(op->IsRegister() || op->IsDoubleRegister() ||
- !use_pos->RequiresRegister());
- use_pos->operand()->ConvertTo(op->kind(), op->index());
- }
- use_pos = use_pos->next();
- }
-}
-
-
-bool LiveRange::CanCover(LifetimePosition position) const {
- if (IsEmpty()) return false;
- return Start().Value() <= position.Value() &&
- position.Value() < End().Value();
-}
-
-
-bool LiveRange::Covers(LifetimePosition position) {
- if (!CanCover(position)) return false;
- UseInterval* start_search = FirstSearchIntervalForPosition(position);
- for (UseInterval* interval = start_search;
- interval != NULL;
- interval = interval->next()) {
- DCHECK(interval->next() == NULL ||
- interval->next()->start().Value() >= interval->start().Value());
- AdvanceLastProcessedMarker(interval, position);
- if (interval->Contains(position)) return true;
- if (interval->start().Value() > position.Value()) return false;
- }
- return false;
-}
-
-
-LifetimePosition LiveRange::FirstIntersection(LiveRange* other) {
- UseInterval* b = other->first_interval();
- if (b == NULL) return LifetimePosition::Invalid();
- LifetimePosition advance_last_processed_up_to = b->start();
- UseInterval* a = FirstSearchIntervalForPosition(b->start());
- while (a != NULL && b != NULL) {
- if (a->start().Value() > other->End().Value()) break;
- if (b->start().Value() > End().Value()) break;
- LifetimePosition cur_intersection = a->Intersect(b);
- if (cur_intersection.IsValid()) {
- return cur_intersection;
- }
- if (a->start().Value() < b->start().Value()) {
- a = a->next();
- if (a == NULL || a->start().Value() > other->End().Value()) break;
- AdvanceLastProcessedMarker(a, advance_last_processed_up_to);
- } else {
- b = b->next();
- }
- }
- return LifetimePosition::Invalid();
-}
-
-LAllocator::LAllocator(int num_values, HGraph* graph)
- : zone_(graph->isolate()->allocator(), ZONE_NAME),
- chunk_(NULL),
- live_in_sets_(graph->blocks()->length(), zone()),
- live_ranges_(num_values * 2, zone()),
- fixed_live_ranges_(NULL),
- fixed_double_live_ranges_(NULL),
- unhandled_live_ranges_(num_values * 2, zone()),
- active_live_ranges_(8, zone()),
- inactive_live_ranges_(8, zone()),
- reusable_slots_(8, zone()),
- next_virtual_register_(num_values),
- first_artificial_register_(num_values),
- mode_(UNALLOCATED_REGISTERS),
- num_registers_(-1),
- graph_(graph),
- has_osr_entry_(false),
- allocation_ok_(true) {}
-
-void LAllocator::InitializeLivenessAnalysis() {
- // Initialize the live_in sets for each block to NULL.
- int block_count = graph_->blocks()->length();
- live_in_sets_.Initialize(block_count, zone());
- live_in_sets_.AddBlock(NULL, block_count, zone());
-}
-
-
-BitVector* LAllocator::ComputeLiveOut(HBasicBlock* block) {
- // Compute live out for the given block, except not including backward
- // successor edges.
- BitVector* live_out = new(zone()) BitVector(next_virtual_register_, zone());
-
- // Process all successor blocks.
- for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
- // Add values live on entry to the successor. Note the successor's
- // live_in will not be computed yet for backwards edges.
- HBasicBlock* successor = it.Current();
- BitVector* live_in = live_in_sets_[successor->block_id()];
- if (live_in != NULL) live_out->Union(*live_in);
-
- // All phi input operands corresponding to this successor edge are live
- // out from this block.
- int index = successor->PredecessorIndexOf(block);
- const ZoneList<HPhi*>* phis = successor->phis();
- for (int i = 0; i < phis->length(); ++i) {
- HPhi* phi = phis->at(i);
- if (!phi->OperandAt(index)->IsConstant()) {
- live_out->Add(phi->OperandAt(index)->id());
- }
- }
- }
-
- return live_out;
-}
-
-
-void LAllocator::AddInitialIntervals(HBasicBlock* block,
- BitVector* live_out) {
- // Add an interval that includes the entire block to the live range for
- // each live_out value.
- LifetimePosition start = LifetimePosition::FromInstructionIndex(
- block->first_instruction_index());
- LifetimePosition end = LifetimePosition::FromInstructionIndex(
- block->last_instruction_index()).NextInstruction();
- BitVector::Iterator iterator(live_out);
- while (!iterator.Done()) {
- int operand_index = iterator.Current();
- LiveRange* range = LiveRangeFor(operand_index);
- range->AddUseInterval(start, end, zone());
- iterator.Advance();
- }
-}
-
-
-int LAllocator::FixedDoubleLiveRangeID(int index) {
- return -index - 1 - Register::kNumRegisters;
-}
-
-
-LOperand* LAllocator::AllocateFixed(LUnallocated* operand,
- int pos,
- bool is_tagged) {
- TraceAlloc("Allocating fixed reg for op %d\n", operand->virtual_register());
- DCHECK(operand->HasFixedPolicy());
- if (operand->HasFixedSlotPolicy()) {
- operand->ConvertTo(LOperand::STACK_SLOT, operand->fixed_slot_index());
- } else if (operand->HasFixedRegisterPolicy()) {
- int reg_index = operand->fixed_register_index();
- operand->ConvertTo(LOperand::REGISTER, reg_index);
- } else if (operand->HasFixedDoubleRegisterPolicy()) {
- int reg_index = operand->fixed_register_index();
- operand->ConvertTo(LOperand::DOUBLE_REGISTER, reg_index);
- } else {
- UNREACHABLE();
- }
- if (is_tagged) {
- TraceAlloc("Fixed reg is tagged at %d\n", pos);
- LInstruction* instr = InstructionAt(pos);
- if (instr->HasPointerMap()) {
- instr->pointer_map()->RecordPointer(operand, chunk()->zone());
- }
- }
- return operand;
-}
-
-
-LiveRange* LAllocator::FixedLiveRangeFor(int index) {
- DCHECK(index < Register::kNumRegisters);
- LiveRange* result = fixed_live_ranges_[index];
- if (result == NULL) {
- result = new(zone()) LiveRange(FixedLiveRangeID(index), chunk()->zone());
- DCHECK(result->IsFixed());
- result->kind_ = GENERAL_REGISTERS;
- SetLiveRangeAssignedRegister(result, index);
- fixed_live_ranges_[index] = result;
- }
- return result;
-}
-
-
-LiveRange* LAllocator::FixedDoubleLiveRangeFor(int index) {
- DCHECK(index < DoubleRegister::kMaxNumRegisters);
- LiveRange* result = fixed_double_live_ranges_[index];
- if (result == NULL) {
- result = new(zone()) LiveRange(FixedDoubleLiveRangeID(index),
- chunk()->zone());
- DCHECK(result->IsFixed());
- result->kind_ = DOUBLE_REGISTERS;
- SetLiveRangeAssignedRegister(result, index);
- fixed_double_live_ranges_[index] = result;
- }
- return result;
-}
-
-
-LiveRange* LAllocator::LiveRangeFor(int index) {
- if (index >= live_ranges_.length()) {
- live_ranges_.AddBlock(NULL, index - live_ranges_.length() + 1, zone());
- }
- LiveRange* result = live_ranges_[index];
- if (result == NULL) {
- result = new(zone()) LiveRange(index, chunk()->zone());
- live_ranges_[index] = result;
- }
- return result;
-}
-
-
-LGap* LAllocator::GetLastGap(HBasicBlock* block) {
- int last_instruction = block->last_instruction_index();
- int index = chunk_->NearestGapPos(last_instruction);
- return GapAt(index);
-}
-
-
-HPhi* LAllocator::LookupPhi(LOperand* operand) const {
- if (!operand->IsUnallocated()) return NULL;
- int index = LUnallocated::cast(operand)->virtual_register();
- HValue* instr = graph_->LookupValue(index);
- if (instr != NULL && instr->IsPhi()) {
- return HPhi::cast(instr);
- }
- return NULL;
-}
-
-
-LiveRange* LAllocator::LiveRangeFor(LOperand* operand) {
- if (operand->IsUnallocated()) {
- return LiveRangeFor(LUnallocated::cast(operand)->virtual_register());
- } else if (operand->IsRegister()) {
- return FixedLiveRangeFor(operand->index());
- } else if (operand->IsDoubleRegister()) {
- return FixedDoubleLiveRangeFor(operand->index());
- } else {
- return NULL;
- }
-}
-
-
-void LAllocator::Define(LifetimePosition position,
- LOperand* operand,
- LOperand* hint) {
- LiveRange* range = LiveRangeFor(operand);
- if (range == NULL) return;
-
- if (range->IsEmpty() || range->Start().Value() > position.Value()) {
- // Can happen if there is a definition without use.
- range->AddUseInterval(position, position.NextInstruction(), zone());
- range->AddUsePosition(position.NextInstruction(), NULL, NULL, zone());
- } else {
- range->ShortenTo(position);
- }
-
- if (operand->IsUnallocated()) {
- LUnallocated* unalloc_operand = LUnallocated::cast(operand);
- range->AddUsePosition(position, unalloc_operand, hint, zone());
- }
-}
-
-
-void LAllocator::Use(LifetimePosition block_start,
- LifetimePosition position,
- LOperand* operand,
- LOperand* hint) {
- LiveRange* range = LiveRangeFor(operand);
- if (range == NULL) return;
- if (operand->IsUnallocated()) {
- LUnallocated* unalloc_operand = LUnallocated::cast(operand);
- range->AddUsePosition(position, unalloc_operand, hint, zone());
- }
- range->AddUseInterval(block_start, position, zone());
-}
-
-
-void LAllocator::AddConstraintsGapMove(int index,
- LOperand* from,
- LOperand* to) {
- LGap* gap = GapAt(index);
- LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START,
- chunk()->zone());
- if (from->IsUnallocated()) {
- const ZoneList<LMoveOperands>* move_operands = move->move_operands();
- for (int i = 0; i < move_operands->length(); ++i) {
- LMoveOperands cur = move_operands->at(i);
- LOperand* cur_to = cur.destination();
- if (cur_to->IsUnallocated()) {
- if (LUnallocated::cast(cur_to)->virtual_register() ==
- LUnallocated::cast(from)->virtual_register()) {
- move->AddMove(cur.source(), to, chunk()->zone());
- return;
- }
- }
- }
- }
- move->AddMove(from, to, chunk()->zone());
-}
-
-
-void LAllocator::MeetRegisterConstraints(HBasicBlock* block) {
- int start = block->first_instruction_index();
- int end = block->last_instruction_index();
- if (start == -1) return;
- for (int i = start; i <= end; ++i) {
- if (IsGapAt(i)) {
- LInstruction* instr = NULL;
- LInstruction* prev_instr = NULL;
- if (i < end) instr = InstructionAt(i + 1);
- if (i > start) prev_instr = InstructionAt(i - 1);
- MeetConstraintsBetween(prev_instr, instr, i);
- if (!AllocationOk()) return;
- }
- }
-}
-
-
-void LAllocator::MeetConstraintsBetween(LInstruction* first,
- LInstruction* second,
- int gap_index) {
- // Handle fixed temporaries.
- if (first != NULL) {
- for (TempIterator it(first); !it.Done(); it.Advance()) {
- LUnallocated* temp = LUnallocated::cast(it.Current());
- if (temp->HasFixedPolicy()) {
- AllocateFixed(temp, gap_index - 1, false);
- }
- }
- }
-
- // Handle fixed output operand.
- if (first != NULL && first->Output() != NULL) {
- LUnallocated* first_output = LUnallocated::cast(first->Output());
- LiveRange* range = LiveRangeFor(first_output->virtual_register());
- bool assigned = false;
- if (first_output->HasFixedPolicy()) {
- LUnallocated* output_copy = first_output->CopyUnconstrained(
- chunk()->zone());
- bool is_tagged = HasTaggedValue(first_output->virtual_register());
- AllocateFixed(first_output, gap_index, is_tagged);
-
- // This value is produced on the stack, we never need to spill it.
- if (first_output->IsStackSlot()) {
- range->SetSpillOperand(first_output);
- range->SetSpillStartIndex(gap_index - 1);
- assigned = true;
- }
- chunk_->AddGapMove(gap_index, first_output, output_copy);
- }
-
- if (!assigned) {
- range->SetSpillStartIndex(gap_index);
-
- // This move to spill operand is not a real use. Liveness analysis
- // and splitting of live ranges do not account for it.
- // Thus it should be inserted to a lifetime position corresponding to
- // the instruction end.
- LGap* gap = GapAt(gap_index);
- LParallelMove* move = gap->GetOrCreateParallelMove(LGap::BEFORE,
- chunk()->zone());
- move->AddMove(first_output, range->GetSpillOperand(),
- chunk()->zone());
- }
- }
-
- // Handle fixed input operands of second instruction.
- if (second != NULL) {
- for (UseIterator it(second); !it.Done(); it.Advance()) {
- LUnallocated* cur_input = LUnallocated::cast(it.Current());
- if (cur_input->HasFixedPolicy()) {
- LUnallocated* input_copy = cur_input->CopyUnconstrained(
- chunk()->zone());
- bool is_tagged = HasTaggedValue(cur_input->virtual_register());
- AllocateFixed(cur_input, gap_index + 1, is_tagged);
- AddConstraintsGapMove(gap_index, input_copy, cur_input);
- } else if (cur_input->HasWritableRegisterPolicy()) {
- // The live range of writable input registers always goes until the end
- // of the instruction.
- DCHECK(!cur_input->IsUsedAtStart());
-
- LUnallocated* input_copy = cur_input->CopyUnconstrained(
- chunk()->zone());
- int vreg = GetVirtualRegister();
- if (!AllocationOk()) return;
- cur_input->set_virtual_register(vreg);
-
- if (RequiredRegisterKind(input_copy->virtual_register()) ==
- DOUBLE_REGISTERS) {
- double_artificial_registers_.Add(
- cur_input->virtual_register() - first_artificial_register_,
- zone());
- }
-
- AddConstraintsGapMove(gap_index, input_copy, cur_input);
- }
- }
- }
-
- // Handle "output same as input" for second instruction.
- if (second != NULL && second->Output() != NULL) {
- LUnallocated* second_output = LUnallocated::cast(second->Output());
- if (second_output->HasSameAsInputPolicy()) {
- LUnallocated* cur_input = LUnallocated::cast(second->FirstInput());
- int output_vreg = second_output->virtual_register();
- int input_vreg = cur_input->virtual_register();
-
- LUnallocated* input_copy = cur_input->CopyUnconstrained(
- chunk()->zone());
- cur_input->set_virtual_register(second_output->virtual_register());
- AddConstraintsGapMove(gap_index, input_copy, cur_input);
-
- if (HasTaggedValue(input_vreg) && !HasTaggedValue(output_vreg)) {
- int index = gap_index + 1;
- LInstruction* instr = InstructionAt(index);
- if (instr->HasPointerMap()) {
- instr->pointer_map()->RecordPointer(input_copy, chunk()->zone());
- }
- } else if (!HasTaggedValue(input_vreg) && HasTaggedValue(output_vreg)) {
- // The input is assumed to immediately have a tagged representation,
- // before the pointer map can be used. I.e. the pointer map at the
- // instruction will include the output operand (whose value at the
- // beginning of the instruction is equal to the input operand). If
- // this is not desired, then the pointer map at this instruction needs
- // to be adjusted manually.
- }
- }
- }
-}
-
-
-void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
- int block_start = block->first_instruction_index();
- int index = block->last_instruction_index();
-
- LifetimePosition block_start_position =
- LifetimePosition::FromInstructionIndex(block_start);
-
- while (index >= block_start) {
- LifetimePosition curr_position =
- LifetimePosition::FromInstructionIndex(index);
-
- if (IsGapAt(index)) {
- // We have a gap at this position.
- LGap* gap = GapAt(index);
- LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START,
- chunk()->zone());
- const ZoneList<LMoveOperands>* move_operands = move->move_operands();
- for (int i = 0; i < move_operands->length(); ++i) {
- LMoveOperands* cur = &move_operands->at(i);
- if (cur->IsIgnored()) continue;
- LOperand* from = cur->source();
- LOperand* to = cur->destination();
- HPhi* phi = LookupPhi(to);
- LOperand* hint = to;
- if (phi != NULL) {
- // This is a phi resolving move.
- if (!phi->block()->IsLoopHeader()) {
- hint = LiveRangeFor(phi->id())->current_hint_operand();
- }
- } else {
- if (to->IsUnallocated()) {
- if (live->Contains(LUnallocated::cast(to)->virtual_register())) {
- Define(curr_position, to, from);
- live->Remove(LUnallocated::cast(to)->virtual_register());
- } else {
- cur->Eliminate();
- continue;
- }
- } else {
- Define(curr_position, to, from);
- }
- }
- Use(block_start_position, curr_position, from, hint);
- if (from->IsUnallocated()) {
- live->Add(LUnallocated::cast(from)->virtual_register());
- }
- }
- } else {
- DCHECK(!IsGapAt(index));
- LInstruction* instr = InstructionAt(index);
-
- if (instr != NULL) {
- LOperand* output = instr->Output();
- if (output != NULL) {
- if (output->IsUnallocated()) {
- live->Remove(LUnallocated::cast(output)->virtual_register());
- }
- Define(curr_position, output, NULL);
- }
-
- if (instr->ClobbersRegisters()) {
- for (int i = 0; i < Register::kNumRegisters; ++i) {
- if (GetRegConfig()->IsAllocatableGeneralCode(i)) {
- if (output == NULL || !output->IsRegister() ||
- output->index() != i) {
- LiveRange* range = FixedLiveRangeFor(i);
- range->AddUseInterval(curr_position,
- curr_position.InstructionEnd(), zone());
- }
- }
- }
- }
-
- if (instr->ClobbersDoubleRegisters(isolate())) {
- for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
- if (GetRegConfig()->IsAllocatableDoubleCode(i)) {
- if (output == NULL || !output->IsDoubleRegister() ||
- output->index() != i) {
- LiveRange* range = FixedDoubleLiveRangeFor(i);
- range->AddUseInterval(curr_position,
- curr_position.InstructionEnd(), zone());
- }
- }
- }
- }
-
- for (UseIterator it(instr); !it.Done(); it.Advance()) {
- LOperand* input = it.Current();
-
- LifetimePosition use_pos;
- if (input->IsUnallocated() &&
- LUnallocated::cast(input)->IsUsedAtStart()) {
- use_pos = curr_position;
- } else {
- use_pos = curr_position.InstructionEnd();
- }
-
- Use(block_start_position, use_pos, input, NULL);
- if (input->IsUnallocated()) {
- live->Add(LUnallocated::cast(input)->virtual_register());
- }
- }
-
- for (TempIterator it(instr); !it.Done(); it.Advance()) {
- LOperand* temp = it.Current();
- if (instr->ClobbersTemps()) {
- if (temp->IsRegister()) continue;
- if (temp->IsUnallocated()) {
- LUnallocated* temp_unalloc = LUnallocated::cast(temp);
- if (temp_unalloc->HasFixedPolicy()) {
- continue;
- }
- }
- }
- Use(block_start_position, curr_position.InstructionEnd(), temp, NULL);
- Define(curr_position, temp, NULL);
-
- if (temp->IsUnallocated()) {
- LUnallocated* temp_unalloc = LUnallocated::cast(temp);
- if (temp_unalloc->HasDoubleRegisterPolicy()) {
- double_artificial_registers_.Add(
- temp_unalloc->virtual_register() - first_artificial_register_,
- zone());
- }
- }
- }
- }
- }
-
- index = index - 1;
- }
-}
-
-
-void LAllocator::ResolvePhis(HBasicBlock* block) {
- const ZoneList<HPhi*>* phis = block->phis();
- for (int i = 0; i < phis->length(); ++i) {
- HPhi* phi = phis->at(i);
- LUnallocated* phi_operand =
- new (chunk()->zone()) LUnallocated(LUnallocated::NONE);
- phi_operand->set_virtual_register(phi->id());
- for (int j = 0; j < phi->OperandCount(); ++j) {
- HValue* op = phi->OperandAt(j);
- LOperand* operand = NULL;
- if (op->IsConstant() && op->EmitAtUses()) {
- HConstant* constant = HConstant::cast(op);
- operand = chunk_->DefineConstantOperand(constant);
- } else {
- DCHECK(!op->EmitAtUses());
- LUnallocated* unalloc =
- new(chunk()->zone()) LUnallocated(LUnallocated::ANY);
- unalloc->set_virtual_register(op->id());
- operand = unalloc;
- }
- HBasicBlock* cur_block = block->predecessors()->at(j);
- // The gap move must be added without any special processing as in
- // the AddConstraintsGapMove.
- chunk_->AddGapMove(cur_block->last_instruction_index() - 1,
- operand,
- phi_operand);
-
- // We are going to insert a move before the branch instruction.
- // Some branch instructions (e.g. loops' back edges)
- // can potentially cause a GC so they have a pointer map.
- // By inserting a move we essentially create a copy of a
- // value which is invisible to PopulatePointerMaps(), because we store
- // it into a location different from the operand of a live range
- // covering a branch instruction.
- // Thus we need to manually record a pointer.
- LInstruction* branch =
- InstructionAt(cur_block->last_instruction_index());
- if (branch->HasPointerMap()) {
- if (phi->representation().IsTagged() && !phi->type().IsSmi()) {
- branch->pointer_map()->RecordPointer(phi_operand, chunk()->zone());
- } else if (!phi->representation().IsDouble()) {
- branch->pointer_map()->RecordUntagged(phi_operand, chunk()->zone());
- }
- }
- }
-
- LiveRange* live_range = LiveRangeFor(phi->id());
- LLabel* label = chunk_->GetLabel(phi->block()->block_id());
- label->GetOrCreateParallelMove(LGap::START, chunk()->zone())->
- AddMove(phi_operand, live_range->GetSpillOperand(), chunk()->zone());
- live_range->SetSpillStartIndex(phi->block()->first_instruction_index());
- }
-}
-
-
-bool LAllocator::Allocate(LChunk* chunk) {
- DCHECK(chunk_ == NULL);
- chunk_ = static_cast<LPlatformChunk*>(chunk);
- assigned_registers_ =
- new (chunk->zone()) BitVector(Register::kNumRegisters, chunk->zone());
- assigned_double_registers_ = new (chunk->zone())
- BitVector(DoubleRegister::kMaxNumRegisters, chunk->zone());
- MeetRegisterConstraints();
- if (!AllocationOk()) return false;
- ResolvePhis();
- BuildLiveRanges();
- AllocateGeneralRegisters();
- if (!AllocationOk()) return false;
- AllocateDoubleRegisters();
- if (!AllocationOk()) return false;
- PopulatePointerMaps();
- ConnectRanges();
- ResolveControlFlow();
- return true;
-}
-
-
-void LAllocator::MeetRegisterConstraints() {
- LAllocatorPhase phase("L_Register constraints", this);
- const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
- for (int i = 0; i < blocks->length(); ++i) {
- HBasicBlock* block = blocks->at(i);
- MeetRegisterConstraints(block);
- if (!AllocationOk()) return;
- }
-}
-
-
-void LAllocator::ResolvePhis() {
- LAllocatorPhase phase("L_Resolve phis", this);
-
- // Process the blocks in reverse order.
- const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
- for (int block_id = blocks->length() - 1; block_id >= 0; --block_id) {
- HBasicBlock* block = blocks->at(block_id);
- ResolvePhis(block);
- }
-}
-
-
-void LAllocator::ResolveControlFlow(LiveRange* range,
- HBasicBlock* block,
- HBasicBlock* pred) {
- LifetimePosition pred_end =
- LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
- LifetimePosition cur_start =
- LifetimePosition::FromInstructionIndex(block->first_instruction_index());
- LiveRange* pred_cover = NULL;
- LiveRange* cur_cover = NULL;
- LiveRange* cur_range = range;
- while (cur_range != NULL && (cur_cover == NULL || pred_cover == NULL)) {
- if (cur_range->CanCover(cur_start)) {
- DCHECK(cur_cover == NULL);
- cur_cover = cur_range;
- }
- if (cur_range->CanCover(pred_end)) {
- DCHECK(pred_cover == NULL);
- pred_cover = cur_range;
- }
- cur_range = cur_range->next();
- }
-
- if (cur_cover->IsSpilled()) return;
- DCHECK(pred_cover != NULL && cur_cover != NULL);
- if (pred_cover != cur_cover) {
- LOperand* pred_op = pred_cover->CreateAssignedOperand(chunk()->zone());
- LOperand* cur_op = cur_cover->CreateAssignedOperand(chunk()->zone());
- if (!pred_op->Equals(cur_op)) {
- LGap* gap = NULL;
- if (block->predecessors()->length() == 1) {
- gap = GapAt(block->first_instruction_index());
- } else {
- DCHECK(pred->end()->SecondSuccessor() == NULL);
- gap = GetLastGap(pred);
-
- // We are going to insert a move before the branch instruction.
- // Some branch instructions (e.g. loops' back edges)
- // can potentially cause a GC so they have a pointer map.
- // By inserting a move we essentially create a copy of a
- // value which is invisible to PopulatePointerMaps(), because we store
- // it into a location different from the operand of a live range
- // covering a branch instruction.
- // Thus we need to manually record a pointer.
- LInstruction* branch = InstructionAt(pred->last_instruction_index());
- if (branch->HasPointerMap()) {
- if (HasTaggedValue(range->id())) {
- branch->pointer_map()->RecordPointer(cur_op, chunk()->zone());
- } else if (!cur_op->IsDoubleStackSlot() &&
- !cur_op->IsDoubleRegister()) {
- branch->pointer_map()->RemovePointer(cur_op);
- }
- }
- }
- gap->GetOrCreateParallelMove(
- LGap::START, chunk()->zone())->AddMove(pred_op, cur_op,
- chunk()->zone());
- }
- }
-}
-
-
-LParallelMove* LAllocator::GetConnectingParallelMove(LifetimePosition pos) {
- int index = pos.InstructionIndex();
- if (IsGapAt(index)) {
- LGap* gap = GapAt(index);
- return gap->GetOrCreateParallelMove(
- pos.IsInstructionStart() ? LGap::START : LGap::END, chunk()->zone());
- }
- int gap_pos = pos.IsInstructionStart() ? (index - 1) : (index + 1);
- return GapAt(gap_pos)->GetOrCreateParallelMove(
- (gap_pos < index) ? LGap::AFTER : LGap::BEFORE, chunk()->zone());
-}
-
-
-HBasicBlock* LAllocator::GetBlock(LifetimePosition pos) {
- LGap* gap = GapAt(chunk_->NearestGapPos(pos.InstructionIndex()));
- return gap->block();
-}
-
-
-void LAllocator::ConnectRanges() {
- LAllocatorPhase phase("L_Connect ranges", this);
- for (int i = 0; i < live_ranges()->length(); ++i) {
- LiveRange* first_range = live_ranges()->at(i);
- if (first_range == NULL || first_range->parent() != NULL) continue;
-
- LiveRange* second_range = first_range->next();
- while (second_range != NULL) {
- LifetimePosition pos = second_range->Start();
-
- if (!second_range->IsSpilled()) {
- // Add gap move if the two live ranges touch and there is no block
- // boundary.
- if (first_range->End().Value() == pos.Value()) {
- bool should_insert = true;
- if (IsBlockBoundary(pos)) {
- should_insert = CanEagerlyResolveControlFlow(GetBlock(pos));
- }
- if (should_insert) {
- LParallelMove* move = GetConnectingParallelMove(pos);
- LOperand* prev_operand = first_range->CreateAssignedOperand(
- chunk()->zone());
- LOperand* cur_operand = second_range->CreateAssignedOperand(
- chunk()->zone());
- move->AddMove(prev_operand, cur_operand,
- chunk()->zone());
- }
- }
- }
-
- first_range = second_range;
- second_range = second_range->next();
- }
- }
-}
-
-
-bool LAllocator::CanEagerlyResolveControlFlow(HBasicBlock* block) const {
- if (block->predecessors()->length() != 1) return false;
- return block->predecessors()->first()->block_id() == block->block_id() - 1;
-}
-
-
-void LAllocator::ResolveControlFlow() {
- LAllocatorPhase phase("L_Resolve control flow", this);
- const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
- for (int block_id = 1; block_id < blocks->length(); ++block_id) {
- HBasicBlock* block = blocks->at(block_id);
- if (CanEagerlyResolveControlFlow(block)) continue;
- BitVector* live = live_in_sets_[block->block_id()];
- BitVector::Iterator iterator(live);
- while (!iterator.Done()) {
- int operand_index = iterator.Current();
- for (int i = 0; i < block->predecessors()->length(); ++i) {
- HBasicBlock* cur = block->predecessors()->at(i);
- LiveRange* cur_range = LiveRangeFor(operand_index);
- ResolveControlFlow(cur_range, block, cur);
- }
- iterator.Advance();
- }
- }
-}
-
-
-void LAllocator::BuildLiveRanges() {
- LAllocatorPhase phase("L_Build live ranges", this);
- InitializeLivenessAnalysis();
- // Process the blocks in reverse order.
- const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
- for (int block_id = blocks->length() - 1; block_id >= 0; --block_id) {
- HBasicBlock* block = blocks->at(block_id);
- BitVector* live = ComputeLiveOut(block);
- // Initially consider all live_out values live for the entire block. We
- // will shorten these intervals if necessary.
- AddInitialIntervals(block, live);
-
- // Process the instructions in reverse order, generating and killing
- // live values.
- ProcessInstructions(block, live);
- // All phi output operands are killed by this block.
- const ZoneList<HPhi*>* phis = block->phis();
- for (int i = 0; i < phis->length(); ++i) {
- // The live range interval already ends at the first instruction of the
- // block.
- HPhi* phi = phis->at(i);
- live->Remove(phi->id());
-
- LOperand* hint = NULL;
- LOperand* phi_operand = NULL;
- LGap* gap = GetLastGap(phi->block()->predecessors()->at(0));
- LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START,
- chunk()->zone());
- for (int j = 0; j < move->move_operands()->length(); ++j) {
- LOperand* to = move->move_operands()->at(j).destination();
- if (to->IsUnallocated() &&
- LUnallocated::cast(to)->virtual_register() == phi->id()) {
- hint = move->move_operands()->at(j).source();
- phi_operand = to;
- break;
- }
- }
- DCHECK(hint != NULL);
-
- LifetimePosition block_start = LifetimePosition::FromInstructionIndex(
- block->first_instruction_index());
- Define(block_start, phi_operand, hint);
- }
-
- // Now live is live_in for this block except not including values live
- // out on backward successor edges.
- live_in_sets_[block_id] = live;
-
- // If this block is a loop header go back and patch up the necessary
- // predecessor blocks.
- if (block->IsLoopHeader()) {
- // TODO(kmillikin): Need to be able to get the last block of the loop
- // in the loop information. Add a live range stretching from the first
- // loop instruction to the last for each value live on entry to the
- // header.
- HBasicBlock* back_edge = block->loop_information()->GetLastBackEdge();
- BitVector::Iterator iterator(live);
- LifetimePosition start = LifetimePosition::FromInstructionIndex(
- block->first_instruction_index());
- LifetimePosition end = LifetimePosition::FromInstructionIndex(
- back_edge->last_instruction_index()).NextInstruction();
- while (!iterator.Done()) {
- int operand_index = iterator.Current();
- LiveRange* range = LiveRangeFor(operand_index);
- range->EnsureInterval(start, end, zone());
- iterator.Advance();
- }
-
- for (int i = block->block_id() + 1; i <= back_edge->block_id(); ++i) {
- live_in_sets_[i]->Union(*live);
- }
- }
-
-#ifdef DEBUG
- if (block_id == 0) {
- BitVector::Iterator iterator(live);
- bool found = false;
- while (!iterator.Done()) {
- found = true;
- int operand_index = iterator.Current();
- {
- AllowHandleDereference allow_deref;
- PrintF("Function: %s\n", chunk_->info()->GetDebugName().get());
- }
- PrintF("Value %d used before first definition!\n", operand_index);
- LiveRange* range = LiveRangeFor(operand_index);
- PrintF("First use is at %d\n", range->first_pos()->pos().Value());
- iterator.Advance();
- }
- DCHECK(!found);
- }
-#endif
- }
-
- for (int i = 0; i < live_ranges_.length(); ++i) {
- if (live_ranges_[i] != NULL) {
- live_ranges_[i]->kind_ = RequiredRegisterKind(live_ranges_[i]->id());
- }
- }
-}
-
-
-bool LAllocator::SafePointsAreInOrder() const {
- const ZoneList<LPointerMap*>* pointer_maps = chunk_->pointer_maps();
- int safe_point = 0;
- for (int i = 0; i < pointer_maps->length(); ++i) {
- LPointerMap* map = pointer_maps->at(i);
- if (safe_point > map->lithium_position()) return false;
- safe_point = map->lithium_position();
- }
- return true;
-}
-
-
-void LAllocator::PopulatePointerMaps() {
- LAllocatorPhase phase("L_Populate pointer maps", this);
- const ZoneList<LPointerMap*>* pointer_maps = chunk_->pointer_maps();
-
- DCHECK(SafePointsAreInOrder());
-
- // Iterate over all safe point positions and record a pointer
- // for all spilled live ranges at this point.
- int first_safe_point_index = 0;
- int last_range_start = 0;
- for (int range_idx = 0; range_idx < live_ranges()->length(); ++range_idx) {
- LiveRange* range = live_ranges()->at(range_idx);
- if (range == NULL) continue;
- // Iterate over the first parts of multi-part live ranges.
- if (range->parent() != NULL) continue;
- // Skip non-pointer values.
- if (!HasTaggedValue(range->id())) continue;
- // Skip empty live ranges.
- if (range->IsEmpty()) continue;
-
- // Find the extent of the range and its children.
- int start = range->Start().InstructionIndex();
- int end = 0;
- for (LiveRange* cur = range; cur != NULL; cur = cur->next()) {
- LifetimePosition this_end = cur->End();
- if (this_end.InstructionIndex() > end) end = this_end.InstructionIndex();
- DCHECK(cur->Start().InstructionIndex() >= start);
- }
-
- // Most of the ranges are in order, but not all. Keep an eye on when
- // they step backwards and reset the first_safe_point_index so we don't
- // miss any safe points.
- if (start < last_range_start) {
- first_safe_point_index = 0;
- }
- last_range_start = start;
-
- // Step across all the safe points that are before the start of this range,
- // recording how far we step in order to save doing this for the next range.
- while (first_safe_point_index < pointer_maps->length()) {
- LPointerMap* map = pointer_maps->at(first_safe_point_index);
- int safe_point = map->lithium_position();
- if (safe_point >= start) break;
- first_safe_point_index++;
- }
-
- // Step through the safe points to see whether they are in the range.
- for (int safe_point_index = first_safe_point_index;
- safe_point_index < pointer_maps->length();
- ++safe_point_index) {
- LPointerMap* map = pointer_maps->at(safe_point_index);
- int safe_point = map->lithium_position();
-
- // The safe points are sorted so we can stop searching here.
- if (safe_point - 1 > end) break;
-
- // Advance to the next active range that covers the current
- // safe point position.
- LifetimePosition safe_point_pos =
- LifetimePosition::FromInstructionIndex(safe_point);
- LiveRange* cur = range;
- while (cur != NULL && !cur->Covers(safe_point_pos)) {
- cur = cur->next();
- }
- if (cur == NULL) continue;
-
- // Check if the live range is spilled and the safe point is after
- // the spill position.
- if (range->HasAllocatedSpillOperand() &&
- safe_point >= range->spill_start_index()) {
- TraceAlloc("Pointer for range %d (spilled at %d) at safe point %d\n",
- range->id(), range->spill_start_index(), safe_point);
- map->RecordPointer(range->GetSpillOperand(), chunk()->zone());
- }
-
- if (!cur->IsSpilled()) {
- TraceAlloc("Pointer in register for range %d (start at %d) "
- "at safe point %d\n",
- cur->id(), cur->Start().Value(), safe_point);
- LOperand* operand = cur->CreateAssignedOperand(chunk()->zone());
- DCHECK(!operand->IsStackSlot());
- map->RecordPointer(operand, chunk()->zone());
- }
- }
- }
-}
-
-
-void LAllocator::AllocateGeneralRegisters() {
- LAllocatorPhase phase("L_Allocate general registers", this);
- num_registers_ = GetRegConfig()->num_allocatable_general_registers();
- allocatable_register_codes_ = GetRegConfig()->allocatable_general_codes();
- mode_ = GENERAL_REGISTERS;
- AllocateRegisters();
-}
-
-
-void LAllocator::AllocateDoubleRegisters() {
- LAllocatorPhase phase("L_Allocate double registers", this);
- num_registers_ = GetRegConfig()->num_allocatable_double_registers();
- allocatable_register_codes_ = GetRegConfig()->allocatable_double_codes();
- mode_ = DOUBLE_REGISTERS;
- AllocateRegisters();
-}
-
-
-void LAllocator::AllocateRegisters() {
- DCHECK(unhandled_live_ranges_.is_empty());
-
- for (int i = 0; i < live_ranges_.length(); ++i) {
- if (live_ranges_[i] != NULL) {
- if (live_ranges_[i]->Kind() == mode_) {
- AddToUnhandledUnsorted(live_ranges_[i]);
- }
- }
- }
- SortUnhandled();
- DCHECK(UnhandledIsSorted());
-
- DCHECK(reusable_slots_.is_empty());
- DCHECK(active_live_ranges_.is_empty());
- DCHECK(inactive_live_ranges_.is_empty());
-
- if (mode_ == DOUBLE_REGISTERS) {
- for (int i = 0; i < fixed_double_live_ranges_.length(); ++i) {
- LiveRange* current = fixed_double_live_ranges_.at(i);
- if (current != NULL) {
- AddToInactive(current);
- }
- }
- } else {
- DCHECK(mode_ == GENERAL_REGISTERS);
- for (int i = 0; i < fixed_live_ranges_.length(); ++i) {
- LiveRange* current = fixed_live_ranges_.at(i);
- if (current != NULL) {
- AddToInactive(current);
- }
- }
- }
-
- while (!unhandled_live_ranges_.is_empty()) {
- DCHECK(UnhandledIsSorted());
- LiveRange* current = unhandled_live_ranges_.RemoveLast();
- DCHECK(UnhandledIsSorted());
- LifetimePosition position = current->Start();
-#ifdef DEBUG
- allocation_finger_ = position;
-#endif
- TraceAlloc("Processing interval %d start=%d\n",
- current->id(),
- position.Value());
-
- if (current->HasAllocatedSpillOperand()) {
- TraceAlloc("Live range %d already has a spill operand\n", current->id());
- LifetimePosition next_pos = position;
- if (IsGapAt(next_pos.InstructionIndex())) {
- next_pos = next_pos.NextInstruction();
- }
- UsePosition* pos = current->NextUsePositionRegisterIsBeneficial(next_pos);
- // If the range already has a spill operand and it doesn't need a
- // register immediately, split it and spill the first part of the range.
- if (pos == NULL) {
- Spill(current);
- continue;
- } else if (pos->pos().Value() >
- current->Start().NextInstruction().Value()) {
- // Do not spill live range eagerly if use position that can benefit from
- // the register is too close to the start of live range.
- SpillBetween(current, current->Start(), pos->pos());
- if (!AllocationOk()) return;
- DCHECK(UnhandledIsSorted());
- continue;
- }
- }
-
- for (int i = 0; i < active_live_ranges_.length(); ++i) {
- LiveRange* cur_active = active_live_ranges_.at(i);
- if (cur_active->End().Value() <= position.Value()) {
- ActiveToHandled(cur_active);
- --i; // The live range was removed from the list of active live ranges.
- } else if (!cur_active->Covers(position)) {
- ActiveToInactive(cur_active);
- --i; // The live range was removed from the list of active live ranges.
- }
- }
-
- for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
- LiveRange* cur_inactive = inactive_live_ranges_.at(i);
- if (cur_inactive->End().Value() <= position.Value()) {
- InactiveToHandled(cur_inactive);
- --i; // Live range was removed from the list of inactive live ranges.
- } else if (cur_inactive->Covers(position)) {
- InactiveToActive(cur_inactive);
- --i; // Live range was removed from the list of inactive live ranges.
- }
- }
-
- DCHECK(!current->HasRegisterAssigned() && !current->IsSpilled());
-
- bool result = TryAllocateFreeReg(current);
- if (!AllocationOk()) return;
-
- if (!result) AllocateBlockedReg(current);
- if (!AllocationOk()) return;
-
- if (current->HasRegisterAssigned()) {
- AddToActive(current);
- }
- }
-
- reusable_slots_.Rewind(0);
- active_live_ranges_.Rewind(0);
- inactive_live_ranges_.Rewind(0);
-}
-
-
-const char* LAllocator::RegisterName(int allocation_index) {
- if (mode_ == GENERAL_REGISTERS) {
- return GetRegConfig()->GetGeneralRegisterName(allocation_index);
- } else {
- return GetRegConfig()->GetDoubleRegisterName(allocation_index);
- }
-}
-
-
-void LAllocator::TraceAlloc(const char* msg, ...) {
- if (FLAG_trace_alloc) {
- va_list arguments;
- va_start(arguments, msg);
- base::OS::VPrint(msg, arguments);
- va_end(arguments);
- }
-}
-
-
-bool LAllocator::HasTaggedValue(int virtual_register) const {
- HValue* value = graph_->LookupValue(virtual_register);
- if (value == NULL) return false;
- return value->representation().IsTagged() && !value->type().IsSmi();
-}
-
-
-RegisterKind LAllocator::RequiredRegisterKind(int virtual_register) const {
- if (virtual_register < first_artificial_register_) {
- HValue* value = graph_->LookupValue(virtual_register);
- if (value != NULL && value->representation().IsDouble()) {
- return DOUBLE_REGISTERS;
- }
- } else if (double_artificial_registers_.Contains(
- virtual_register - first_artificial_register_)) {
- return DOUBLE_REGISTERS;
- }
-
- return GENERAL_REGISTERS;
-}
-
-
-void LAllocator::AddToActive(LiveRange* range) {
- TraceAlloc("Add live range %d to active\n", range->id());
- active_live_ranges_.Add(range, zone());
-}
-
-
-void LAllocator::AddToInactive(LiveRange* range) {
- TraceAlloc("Add live range %d to inactive\n", range->id());
- inactive_live_ranges_.Add(range, zone());
-}
-
-
-void LAllocator::AddToUnhandledSorted(LiveRange* range) {
- if (range == NULL || range->IsEmpty()) return;
- DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled());
- DCHECK(allocation_finger_.Value() <= range->Start().Value());
- for (int i = unhandled_live_ranges_.length() - 1; i >= 0; --i) {
- LiveRange* cur_range = unhandled_live_ranges_.at(i);
- if (range->ShouldBeAllocatedBefore(cur_range)) {
- TraceAlloc("Add live range %d to unhandled at %d\n", range->id(), i + 1);
- unhandled_live_ranges_.InsertAt(i + 1, range, zone());
- DCHECK(UnhandledIsSorted());
- return;
- }
- }
- TraceAlloc("Add live range %d to unhandled at start\n", range->id());
- unhandled_live_ranges_.InsertAt(0, range, zone());
- DCHECK(UnhandledIsSorted());
-}
-
-
-void LAllocator::AddToUnhandledUnsorted(LiveRange* range) {
- if (range == NULL || range->IsEmpty()) return;
- DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled());
- TraceAlloc("Add live range %d to unhandled unsorted at end\n", range->id());
- unhandled_live_ranges_.Add(range, zone());
-}
-
-
-static int UnhandledSortHelper(LiveRange* const* a, LiveRange* const* b) {
- DCHECK(!(*a)->ShouldBeAllocatedBefore(*b) ||
- !(*b)->ShouldBeAllocatedBefore(*a));
- if ((*a)->ShouldBeAllocatedBefore(*b)) return 1;
- if ((*b)->ShouldBeAllocatedBefore(*a)) return -1;
- return (*a)->id() - (*b)->id();
-}
-
-
-// Sort the unhandled live ranges so that the ranges to be processed first are
-// at the end of the array list. This is convenient for the register allocation
-// algorithm because it is efficient to remove elements from the end.
-void LAllocator::SortUnhandled() {
- TraceAlloc("Sort unhandled\n");
- unhandled_live_ranges_.Sort(&UnhandledSortHelper);
-}
-
-
-bool LAllocator::UnhandledIsSorted() {
- int len = unhandled_live_ranges_.length();
- for (int i = 1; i < len; i++) {
- LiveRange* a = unhandled_live_ranges_.at(i - 1);
- LiveRange* b = unhandled_live_ranges_.at(i);
- if (a->Start().Value() < b->Start().Value()) return false;
- }
- return true;
-}
-
-
-void LAllocator::FreeSpillSlot(LiveRange* range) {
- // Check that we are the last range.
- if (range->next() != NULL) return;
-
- if (!range->TopLevel()->HasAllocatedSpillOperand()) return;
-
- int index = range->TopLevel()->GetSpillOperand()->index();
- if (index >= 0) {
- reusable_slots_.Add(range, zone());
- }
-}
-
-
-LOperand* LAllocator::TryReuseSpillSlot(LiveRange* range) {
- if (reusable_slots_.is_empty()) return NULL;
- if (reusable_slots_.first()->End().Value() >
- range->TopLevel()->Start().Value()) {
- return NULL;
- }
- LOperand* result = reusable_slots_.first()->TopLevel()->GetSpillOperand();
- reusable_slots_.Remove(0);
- return result;
-}
-
-
-void LAllocator::ActiveToHandled(LiveRange* range) {
- DCHECK(active_live_ranges_.Contains(range));
- active_live_ranges_.RemoveElement(range);
- TraceAlloc("Moving live range %d from active to handled\n", range->id());
- FreeSpillSlot(range);
-}
-
-
-void LAllocator::ActiveToInactive(LiveRange* range) {
- DCHECK(active_live_ranges_.Contains(range));
- active_live_ranges_.RemoveElement(range);
- inactive_live_ranges_.Add(range, zone());
- TraceAlloc("Moving live range %d from active to inactive\n", range->id());
-}
-
-
-void LAllocator::InactiveToHandled(LiveRange* range) {
- DCHECK(inactive_live_ranges_.Contains(range));
- inactive_live_ranges_.RemoveElement(range);
- TraceAlloc("Moving live range %d from inactive to handled\n", range->id());
- FreeSpillSlot(range);
-}
-
-
-void LAllocator::InactiveToActive(LiveRange* range) {
- DCHECK(inactive_live_ranges_.Contains(range));
- inactive_live_ranges_.RemoveElement(range);
- active_live_ranges_.Add(range, zone());
- TraceAlloc("Moving live range %d from inactive to active\n", range->id());
-}
-
-
-bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
- DCHECK(DoubleRegister::kMaxNumRegisters >= Register::kNumRegisters);
-
- LifetimePosition free_until_pos[DoubleRegister::kMaxNumRegisters];
-
- for (int i = 0; i < DoubleRegister::kMaxNumRegisters; i++) {
- free_until_pos[i] = LifetimePosition::MaxPosition();
- }
-
- for (int i = 0; i < active_live_ranges_.length(); ++i) {
- LiveRange* cur_active = active_live_ranges_.at(i);
- free_until_pos[cur_active->assigned_register()] =
- LifetimePosition::FromInstructionIndex(0);
- }
-
- for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
- LiveRange* cur_inactive = inactive_live_ranges_.at(i);
- DCHECK(cur_inactive->End().Value() > current->Start().Value());
- LifetimePosition next_intersection =
- cur_inactive->FirstIntersection(current);
- if (!next_intersection.IsValid()) continue;
- int cur_reg = cur_inactive->assigned_register();
- free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
- }
-
- LOperand* hint = current->FirstHint();
- if (hint != NULL && (hint->IsRegister() || hint->IsDoubleRegister())) {
- int register_index = hint->index();
- TraceAlloc(
- "Found reg hint %s (free until [%d) for live range %d (end %d[).\n",
- RegisterName(register_index),
- free_until_pos[register_index].Value(),
- current->id(),
- current->End().Value());
-
- // The desired register is free until the end of the current live range.
- if (free_until_pos[register_index].Value() >= current->End().Value()) {
- TraceAlloc("Assigning preferred reg %s to live range %d\n",
- RegisterName(register_index),
- current->id());
- SetLiveRangeAssignedRegister(current, register_index);
- return true;
- }
- }
-
- // Find the register which stays free for the longest time.
- int reg = allocatable_register_codes_[0];
- for (int i = 1; i < RegisterCount(); ++i) {
- int code = allocatable_register_codes_[i];
- if (free_until_pos[code].Value() > free_until_pos[reg].Value()) {
- reg = code;
- }
- }
-
- LifetimePosition pos = free_until_pos[reg];
-
- if (pos.Value() <= current->Start().Value()) {
- // All registers are blocked.
- return false;
- }
-
- if (pos.Value() < current->End().Value()) {
- // Register reg is available at the range start but becomes blocked before
- // the range end. Split current at position where it becomes blocked.
- LiveRange* tail = SplitRangeAt(current, pos);
- if (!AllocationOk()) return false;
- AddToUnhandledSorted(tail);
- }
-
-
- // Register reg is available at the range start and is free until
- // the range end.
- DCHECK(pos.Value() >= current->End().Value());
- TraceAlloc("Assigning free reg %s to live range %d\n",
- RegisterName(reg),
- current->id());
- SetLiveRangeAssignedRegister(current, reg);
-
- return true;
-}
-
-
-void LAllocator::AllocateBlockedReg(LiveRange* current) {
- UsePosition* register_use = current->NextRegisterPosition(current->Start());
- if (register_use == NULL) {
- // There is no use in the current live range that requires a register.
- // We can just spill it.
- Spill(current);
- return;
- }
-
-
- LifetimePosition use_pos[DoubleRegister::kMaxNumRegisters];
- LifetimePosition block_pos[DoubleRegister::kMaxNumRegisters];
-
- for (int i = 0; i < DoubleRegister::kMaxNumRegisters; i++) {
- use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
- }
-
- for (int i = 0; i < active_live_ranges_.length(); ++i) {
- LiveRange* range = active_live_ranges_[i];
- int cur_reg = range->assigned_register();
- if (range->IsFixed() || !range->CanBeSpilled(current->Start())) {
- block_pos[cur_reg] = use_pos[cur_reg] =
- LifetimePosition::FromInstructionIndex(0);
- } else {
- UsePosition* next_use = range->NextUsePositionRegisterIsBeneficial(
- current->Start());
- if (next_use == NULL) {
- use_pos[cur_reg] = range->End();
- } else {
- use_pos[cur_reg] = next_use->pos();
- }
- }
- }
-
- for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
- LiveRange* range = inactive_live_ranges_.at(i);
- DCHECK(range->End().Value() > current->Start().Value());
- LifetimePosition next_intersection = range->FirstIntersection(current);
- if (!next_intersection.IsValid()) continue;
- int cur_reg = range->assigned_register();
- if (range->IsFixed()) {
- block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
- use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
- } else {
- use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
- }
- }
-
- int reg = allocatable_register_codes_[0];
- for (int i = 1; i < RegisterCount(); ++i) {
- int code = allocatable_register_codes_[i];
- if (use_pos[code].Value() > use_pos[reg].Value()) {
- reg = code;
- }
- }
-
- LifetimePosition pos = use_pos[reg];
-
- if (pos.Value() < register_use->pos().Value()) {
- // All registers are blocked before the first use that requires a register.
- // Spill starting part of live range up to that use.
- SpillBetween(current, current->Start(), register_use->pos());
- return;
- }
-
- if (block_pos[reg].Value() < current->End().Value()) {
- // Register becomes blocked before the current range end. Split before that
- // position.
- LiveRange* tail = SplitBetween(current,
- current->Start(),
- block_pos[reg].InstructionStart());
- if (!AllocationOk()) return;
- AddToUnhandledSorted(tail);
- }
-
- // Register reg is not blocked for the whole range.
- DCHECK(block_pos[reg].Value() >= current->End().Value());
- TraceAlloc("Assigning blocked reg %s to live range %d\n",
- RegisterName(reg),
- current->id());
- SetLiveRangeAssignedRegister(current, reg);
-
- // This register was not free. Thus we need to find and spill
- // parts of active and inactive live regions that use the same register
- // at the same lifetime positions as current.
- SplitAndSpillIntersecting(current);
-}
-
-
-LifetimePosition LAllocator::FindOptimalSpillingPos(LiveRange* range,
- LifetimePosition pos) {
- HBasicBlock* block = GetBlock(pos.InstructionStart());
- HBasicBlock* loop_header =
- block->IsLoopHeader() ? block : block->parent_loop_header();
-
- if (loop_header == NULL) return pos;
-
- UsePosition* prev_use =
- range->PreviousUsePositionRegisterIsBeneficial(pos);
-
- while (loop_header != NULL) {
- // We are going to spill live range inside the loop.
- // If possible try to move spilling position backwards to loop header.
- // This will reduce number of memory moves on the back edge.
- LifetimePosition loop_start = LifetimePosition::FromInstructionIndex(
- loop_header->first_instruction_index());
-
- if (range->Covers(loop_start)) {
- if (prev_use == NULL || prev_use->pos().Value() < loop_start.Value()) {
- // No register beneficial use inside the loop before the pos.
- pos = loop_start;
- }
- }
-
- // Try hoisting out to an outer loop.
- loop_header = loop_header->parent_loop_header();
- }
-
- return pos;
-}
-
-
-void LAllocator::SplitAndSpillIntersecting(LiveRange* current) {
- DCHECK(current->HasRegisterAssigned());
- int reg = current->assigned_register();
- LifetimePosition split_pos = current->Start();
- for (int i = 0; i < active_live_ranges_.length(); ++i) {
- LiveRange* range = active_live_ranges_[i];
- if (range->assigned_register() == reg) {
- UsePosition* next_pos = range->NextRegisterPosition(current->Start());
- LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
- if (next_pos == NULL) {
- SpillAfter(range, spill_pos);
- } else {
- // When spilling between spill_pos and next_pos ensure that the range
- // remains spilled at least until the start of the current live range.
- // This guarantees that we will not introduce new unhandled ranges that
- // start before the current range as this violates allocation invariant
- // and will lead to an inconsistent state of active and inactive
- // live-ranges: ranges are allocated in order of their start positions,
- // ranges are retired from active/inactive when the start of the
- // current live-range is larger than their end.
- SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
- }
- if (!AllocationOk()) return;
- ActiveToHandled(range);
- --i;
- }
- }
-
- for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
- LiveRange* range = inactive_live_ranges_[i];
- DCHECK(range->End().Value() > current->Start().Value());
- if (range->assigned_register() == reg && !range->IsFixed()) {
- LifetimePosition next_intersection = range->FirstIntersection(current);
- if (next_intersection.IsValid()) {
- UsePosition* next_pos = range->NextRegisterPosition(current->Start());
- if (next_pos == NULL) {
- SpillAfter(range, split_pos);
- } else {
- next_intersection = Min(next_intersection, next_pos->pos());
- SpillBetween(range, split_pos, next_intersection);
- }
- if (!AllocationOk()) return;
- InactiveToHandled(range);
- --i;
- }
- }
- }
-}
-
-
-bool LAllocator::IsBlockBoundary(LifetimePosition pos) {
- return pos.IsInstructionStart() &&
- InstructionAt(pos.InstructionIndex())->IsLabel();
-}
-
-
-LiveRange* LAllocator::SplitRangeAt(LiveRange* range, LifetimePosition pos) {
- DCHECK(!range->IsFixed());
- TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value());
-
- if (pos.Value() <= range->Start().Value()) return range;
-
- // We can't properly connect liveranges if split occured at the end
- // of control instruction.
- DCHECK(pos.IsInstructionStart() ||
- !chunk_->instructions()->at(pos.InstructionIndex())->IsControl());
-
- int vreg = GetVirtualRegister();
- if (!AllocationOk()) return NULL;
- LiveRange* result = LiveRangeFor(vreg);
- range->SplitAt(pos, result, zone());
- return result;
-}
-
-
-LiveRange* LAllocator::SplitBetween(LiveRange* range,
- LifetimePosition start,
- LifetimePosition end) {
- DCHECK(!range->IsFixed());
- TraceAlloc("Splitting live range %d in position between [%d, %d]\n",
- range->id(),
- start.Value(),
- end.Value());
-
- LifetimePosition split_pos = FindOptimalSplitPos(start, end);
- DCHECK(split_pos.Value() >= start.Value());
- return SplitRangeAt(range, split_pos);
-}
-
-
-LifetimePosition LAllocator::FindOptimalSplitPos(LifetimePosition start,
- LifetimePosition end) {
- int start_instr = start.InstructionIndex();
- int end_instr = end.InstructionIndex();
- DCHECK(start_instr <= end_instr);
-
- // We have no choice
- if (start_instr == end_instr) return end;
-
- HBasicBlock* start_block = GetBlock(start);
- HBasicBlock* end_block = GetBlock(end);
-
- if (end_block == start_block) {
- // The interval is split in the same basic block. Split at the latest
- // possible position.
- return end;
- }
-
- HBasicBlock* block = end_block;
- // Find header of outermost loop.
- while (block->parent_loop_header() != NULL &&
- block->parent_loop_header()->block_id() > start_block->block_id()) {
- block = block->parent_loop_header();
- }
-
- // We did not find any suitable outer loop. Split at the latest possible
- // position unless end_block is a loop header itself.
- if (block == end_block && !end_block->IsLoopHeader()) return end;
-
- return LifetimePosition::FromInstructionIndex(
- block->first_instruction_index());
-}
-
-
-void LAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
- LiveRange* second_part = SplitRangeAt(range, pos);
- if (!AllocationOk()) return;
- Spill(second_part);
-}
-
-
-void LAllocator::SpillBetween(LiveRange* range,
- LifetimePosition start,
- LifetimePosition end) {
- SpillBetweenUntil(range, start, start, end);
-}
-
-
-void LAllocator::SpillBetweenUntil(LiveRange* range,
- LifetimePosition start,
- LifetimePosition until,
- LifetimePosition end) {
- CHECK(start.Value() < end.Value());
- LiveRange* second_part = SplitRangeAt(range, start);
- if (!AllocationOk()) return;
-
- if (second_part->Start().Value() < end.Value()) {
- // The split result intersects with [start, end[.
- // Split it at position between ]start+1, end[, spill the middle part
- // and put the rest to unhandled.
- LiveRange* third_part = SplitBetween(
- second_part,
- Max(second_part->Start().InstructionEnd(), until),
- end.PrevInstruction().InstructionEnd());
- if (!AllocationOk()) return;
-
- DCHECK(third_part != second_part);
-
- Spill(second_part);
- AddToUnhandledSorted(third_part);
- } else {
- // The split result does not intersect with [start, end[.
- // Nothing to spill. Just put it to unhandled as whole.
- AddToUnhandledSorted(second_part);
- }
-}
-
-
-void LAllocator::Spill(LiveRange* range) {
- DCHECK(!range->IsSpilled());
- TraceAlloc("Spilling live range %d\n", range->id());
- LiveRange* first = range->TopLevel();
-
- if (!first->HasAllocatedSpillOperand()) {
- LOperand* op = TryReuseSpillSlot(range);
- if (op == NULL) op = chunk_->GetNextSpillSlot(range->Kind());
- first->SetSpillOperand(op);
- }
- range->MakeSpilled(chunk()->zone());
-}
-
-
-int LAllocator::RegisterCount() const {
- return num_registers_;
-}
-
-
-#ifdef DEBUG
-
-
-void LAllocator::Verify() const {
- for (int i = 0; i < live_ranges()->length(); ++i) {
- LiveRange* current = live_ranges()->at(i);
- if (current != NULL) current->Verify();
- }
-}
-
-
-#endif
-
-
-LAllocatorPhase::LAllocatorPhase(const char* name, LAllocator* allocator)
- : CompilationPhase(name, allocator->graph()->info()),
- allocator_(allocator) {
- if (FLAG_hydrogen_stats) {
- allocator_zone_start_allocation_size_ =
- allocator->zone()->allocation_size();
- }
-}
-
-
-LAllocatorPhase::~LAllocatorPhase() {
- if (FLAG_hydrogen_stats) {
- size_t size = allocator_->zone()->allocation_size() -
- allocator_zone_start_allocation_size_;
- isolate()->GetHStatistics()->SaveTiming(name(), base::TimeDelta(), size);
- }
-
- if (ShouldProduceTraceOutput()) {
- isolate()->GetHTracer()->TraceLithium(name(), allocator_->chunk());
- isolate()->GetHTracer()->TraceLiveRanges(name(), allocator_);
- }
-
-#ifdef DEBUG
- if (allocator_ != NULL) allocator_->Verify();
-#endif
-}
-
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/lithium-allocator.h b/deps/v8/src/crankshaft/lithium-allocator.h
deleted file mode 100644
index d28ad7f9e7..0000000000
--- a/deps/v8/src/crankshaft/lithium-allocator.h
+++ /dev/null
@@ -1,576 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_LITHIUM_ALLOCATOR_H_
-#define V8_CRANKSHAFT_LITHIUM_ALLOCATOR_H_
-
-#include "src/allocation.h"
-#include "src/base/compiler-specific.h"
-#include "src/crankshaft/compilation-phase.h"
-#include "src/crankshaft/lithium.h"
-#include "src/zone/zone.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class HBasicBlock;
-class HGraph;
-class HPhi;
-class HTracer;
-class HValue;
-class BitVector;
-class StringStream;
-
-class LPlatformChunk;
-class LOperand;
-class LUnallocated;
-class LGap;
-class LParallelMove;
-class LPointerMap;
-
-
-// This class represents a single point of a LOperand's lifetime.
-// For each lithium instruction there are exactly two lifetime positions:
-// the beginning and the end of the instruction. Lifetime positions for
-// different lithium instructions are disjoint.
-class LifetimePosition {
- public:
- // Return the lifetime position that corresponds to the beginning of
- // the instruction with the given index.
- static LifetimePosition FromInstructionIndex(int index) {
- return LifetimePosition(index * kStep);
- }
-
- // Returns a numeric representation of this lifetime position.
- int Value() const {
- return value_;
- }
-
- // Returns the index of the instruction to which this lifetime position
- // corresponds.
- int InstructionIndex() const {
- DCHECK(IsValid());
- return value_ / kStep;
- }
-
- // Returns true if this lifetime position corresponds to the instruction
- // start.
- bool IsInstructionStart() const {
- return (value_ & (kStep - 1)) == 0;
- }
-
- // Returns the lifetime position for the start of the instruction which
- // corresponds to this lifetime position.
- LifetimePosition InstructionStart() const {
- DCHECK(IsValid());
- return LifetimePosition(value_ & ~(kStep - 1));
- }
-
- // Returns the lifetime position for the end of the instruction which
- // corresponds to this lifetime position.
- LifetimePosition InstructionEnd() const {
- DCHECK(IsValid());
- return LifetimePosition(InstructionStart().Value() + kStep/2);
- }
-
- // Returns the lifetime position for the beginning of the next instruction.
- LifetimePosition NextInstruction() const {
- DCHECK(IsValid());
- return LifetimePosition(InstructionStart().Value() + kStep);
- }
-
- // Returns the lifetime position for the beginning of the previous
- // instruction.
- LifetimePosition PrevInstruction() const {
- DCHECK(IsValid());
- DCHECK(value_ > 1);
- return LifetimePosition(InstructionStart().Value() - kStep);
- }
-
- // Constructs the lifetime position which does not correspond to any
- // instruction.
- LifetimePosition() : value_(-1) {}
-
- // Returns true if this lifetime positions corrensponds to some
- // instruction.
- bool IsValid() const { return value_ != -1; }
-
- static inline LifetimePosition Invalid() { return LifetimePosition(); }
-
- static inline LifetimePosition MaxPosition() {
- // We have to use this kind of getter instead of static member due to
- // crash bug in GDB.
- return LifetimePosition(kMaxInt);
- }
-
- private:
- static const int kStep = 2;
-
- // Code relies on kStep being a power of two.
- STATIC_ASSERT(IS_POWER_OF_TWO(kStep));
-
- explicit LifetimePosition(int value) : value_(value) { }
-
- int value_;
-};
-
-
-// Representation of the non-empty interval [start,end[.
-class UseInterval: public ZoneObject {
- public:
- UseInterval(LifetimePosition start, LifetimePosition end)
- : start_(start), end_(end), next_(NULL) {
- DCHECK(start.Value() < end.Value());
- }
-
- LifetimePosition start() const { return start_; }
- LifetimePosition end() const { return end_; }
- UseInterval* next() const { return next_; }
-
- // Split this interval at the given position without effecting the
- // live range that owns it. The interval must contain the position.
- void SplitAt(LifetimePosition pos, Zone* zone);
-
- // If this interval intersects with other return smallest position
- // that belongs to both of them.
- LifetimePosition Intersect(const UseInterval* other) const {
- if (other->start().Value() < start_.Value()) return other->Intersect(this);
- if (other->start().Value() < end_.Value()) return other->start();
- return LifetimePosition::Invalid();
- }
-
- bool Contains(LifetimePosition point) const {
- return start_.Value() <= point.Value() && point.Value() < end_.Value();
- }
-
- private:
- void set_start(LifetimePosition start) { start_ = start; }
- void set_next(UseInterval* next) { next_ = next; }
-
- LifetimePosition start_;
- LifetimePosition end_;
- UseInterval* next_;
-
- friend class LiveRange; // Assigns to start_.
-};
-
-// Representation of a use position.
-class UsePosition: public ZoneObject {
- public:
- UsePosition(LifetimePosition pos, LOperand* operand, LOperand* hint);
-
- LOperand* operand() const { return operand_; }
- bool HasOperand() const { return operand_ != NULL; }
-
- LOperand* hint() const { return hint_; }
- bool HasHint() const;
- bool RequiresRegister() const;
- bool RegisterIsBeneficial() const;
-
- LifetimePosition pos() const { return pos_; }
- UsePosition* next() const { return next_; }
-
- private:
- void set_next(UsePosition* next) { next_ = next; }
-
- LOperand* const operand_;
- LOperand* const hint_;
- LifetimePosition const pos_;
- UsePosition* next_;
- bool requires_reg_;
- bool register_beneficial_;
-
- friend class LiveRange;
-};
-
-// Representation of SSA values' live ranges as a collection of (continuous)
-// intervals over the instruction ordering.
-class LiveRange: public ZoneObject {
- public:
- static const int kInvalidAssignment = 0x7fffffff;
-
- LiveRange(int id, Zone* zone);
-
- UseInterval* first_interval() const { return first_interval_; }
- UsePosition* first_pos() const { return first_pos_; }
- LiveRange* parent() const { return parent_; }
- LiveRange* TopLevel() { return (parent_ == NULL) ? this : parent_; }
- LiveRange* next() const { return next_; }
- bool IsChild() const { return parent() != NULL; }
- int id() const { return id_; }
- bool IsFixed() const { return id_ < 0; }
- bool IsEmpty() const { return first_interval() == NULL; }
- LOperand* CreateAssignedOperand(Zone* zone);
- int assigned_register() const { return assigned_register_; }
- int spill_start_index() const { return spill_start_index_; }
- void set_assigned_register(int reg, Zone* zone);
- void MakeSpilled(Zone* zone);
-
- // Returns use position in this live range that follows both start
- // and last processed use position.
- // Modifies internal state of live range!
- UsePosition* NextUsePosition(LifetimePosition start);
-
- // Returns use position for which register is required in this live
- // range and which follows both start and last processed use position
- // Modifies internal state of live range!
- UsePosition* NextRegisterPosition(LifetimePosition start);
-
- // Returns use position for which register is beneficial in this live
- // range and which follows both start and last processed use position
- // Modifies internal state of live range!
- UsePosition* NextUsePositionRegisterIsBeneficial(LifetimePosition start);
-
- // Returns use position for which register is beneficial in this live
- // range and which precedes start.
- UsePosition* PreviousUsePositionRegisterIsBeneficial(LifetimePosition start);
-
- // Can this live range be spilled at this position.
- bool CanBeSpilled(LifetimePosition pos);
-
- // Split this live range at the given position which must follow the start of
- // the range.
- // All uses following the given position will be moved from this
- // live range to the result live range.
- void SplitAt(LifetimePosition position, LiveRange* result, Zone* zone);
-
- RegisterKind Kind() const { return kind_; }
- bool HasRegisterAssigned() const {
- return assigned_register_ != kInvalidAssignment;
- }
- bool IsSpilled() const { return spilled_; }
-
- LOperand* current_hint_operand() const {
- DCHECK(current_hint_operand_ == FirstHint());
- return current_hint_operand_;
- }
- LOperand* FirstHint() const {
- UsePosition* pos = first_pos_;
- while (pos != NULL && !pos->HasHint()) pos = pos->next();
- if (pos != NULL) return pos->hint();
- return NULL;
- }
-
- LifetimePosition Start() const {
- DCHECK(!IsEmpty());
- return first_interval()->start();
- }
-
- LifetimePosition End() const {
- DCHECK(!IsEmpty());
- return last_interval_->end();
- }
-
- bool HasAllocatedSpillOperand() const;
- LOperand* GetSpillOperand() const { return spill_operand_; }
- void SetSpillOperand(LOperand* operand);
-
- void SetSpillStartIndex(int start) {
- spill_start_index_ = Min(start, spill_start_index_);
- }
-
- bool ShouldBeAllocatedBefore(const LiveRange* other) const;
- bool CanCover(LifetimePosition position) const;
- bool Covers(LifetimePosition position);
- LifetimePosition FirstIntersection(LiveRange* other);
-
- // Add a new interval or a new use position to this live range.
- void EnsureInterval(LifetimePosition start,
- LifetimePosition end,
- Zone* zone);
- void AddUseInterval(LifetimePosition start,
- LifetimePosition end,
- Zone* zone);
- void AddUsePosition(LifetimePosition pos,
- LOperand* operand,
- LOperand* hint,
- Zone* zone);
-
- // Shorten the most recently added interval by setting a new start.
- void ShortenTo(LifetimePosition start);
-
-#ifdef DEBUG
- // True if target overlaps an existing interval.
- bool HasOverlap(UseInterval* target) const;
- void Verify() const;
-#endif
-
- private:
- void ConvertOperands(Zone* zone);
- UseInterval* FirstSearchIntervalForPosition(LifetimePosition position) const;
- void AdvanceLastProcessedMarker(UseInterval* to_start_of,
- LifetimePosition but_not_past) const;
-
- int id_;
- bool spilled_;
- RegisterKind kind_;
- int assigned_register_;
- UseInterval* last_interval_;
- UseInterval* first_interval_;
- UsePosition* first_pos_;
- LiveRange* parent_;
- LiveRange* next_;
- // This is used as a cache, it doesn't affect correctness.
- mutable UseInterval* current_interval_;
- UsePosition* last_processed_use_;
- // This is used as a cache, it's invalid outside of BuildLiveRanges.
- LOperand* current_hint_operand_;
- LOperand* spill_operand_;
- int spill_start_index_;
-
- friend class LAllocator; // Assigns to kind_.
-};
-
-
-class LAllocator BASE_EMBEDDED {
- public:
- LAllocator(int first_virtual_register, HGraph* graph);
-
- static PRINTF_FORMAT(1, 2) void TraceAlloc(const char* msg, ...);
-
- // Checks whether the value of a given virtual register is tagged.
- bool HasTaggedValue(int virtual_register) const;
-
- // Returns the register kind required by the given virtual register.
- RegisterKind RequiredRegisterKind(int virtual_register) const;
-
- bool Allocate(LChunk* chunk);
-
- const ZoneList<LiveRange*>* live_ranges() const { return &live_ranges_; }
- const Vector<LiveRange*>* fixed_live_ranges() const {
- return &fixed_live_ranges_;
- }
- const Vector<LiveRange*>* fixed_double_live_ranges() const {
- return &fixed_double_live_ranges_;
- }
-
- LPlatformChunk* chunk() const { return chunk_; }
- HGraph* graph() const { return graph_; }
- Isolate* isolate() const { return graph_->isolate(); }
- Zone* zone() { return &zone_; }
-
- int GetVirtualRegister() {
- if (next_virtual_register_ >= LUnallocated::kMaxVirtualRegisters) {
- allocation_ok_ = false;
- // Maintain the invariant that we return something below the maximum.
- return 0;
- }
- return next_virtual_register_++;
- }
-
- bool AllocationOk() { return allocation_ok_; }
-
- void MarkAsOsrEntry() {
- // There can be only one.
- DCHECK(!has_osr_entry_);
- // Simply set a flag to find and process instruction later.
- has_osr_entry_ = true;
- }
-
-#ifdef DEBUG
- void Verify() const;
-#endif
-
- BitVector* assigned_registers() {
- return assigned_registers_;
- }
- BitVector* assigned_double_registers() {
- return assigned_double_registers_;
- }
-
- private:
- void MeetRegisterConstraints();
- void ResolvePhis();
- void BuildLiveRanges();
- void AllocateGeneralRegisters();
- void AllocateDoubleRegisters();
- void ConnectRanges();
- void ResolveControlFlow();
- void PopulatePointerMaps();
- void AllocateRegisters();
- bool CanEagerlyResolveControlFlow(HBasicBlock* block) const;
- inline bool SafePointsAreInOrder() const;
-
- // Liveness analysis support.
- void InitializeLivenessAnalysis();
- BitVector* ComputeLiveOut(HBasicBlock* block);
- void AddInitialIntervals(HBasicBlock* block, BitVector* live_out);
- void ProcessInstructions(HBasicBlock* block, BitVector* live);
- void MeetRegisterConstraints(HBasicBlock* block);
- void MeetConstraintsBetween(LInstruction* first,
- LInstruction* second,
- int gap_index);
- void ResolvePhis(HBasicBlock* block);
-
- // Helper methods for building intervals.
- LOperand* AllocateFixed(LUnallocated* operand, int pos, bool is_tagged);
- LiveRange* LiveRangeFor(LOperand* operand);
- void Define(LifetimePosition position, LOperand* operand, LOperand* hint);
- void Use(LifetimePosition block_start,
- LifetimePosition position,
- LOperand* operand,
- LOperand* hint);
- void AddConstraintsGapMove(int index, LOperand* from, LOperand* to);
-
- // Helper methods for updating the life range lists.
- void AddToActive(LiveRange* range);
- void AddToInactive(LiveRange* range);
- void AddToUnhandledSorted(LiveRange* range);
- void AddToUnhandledUnsorted(LiveRange* range);
- void SortUnhandled();
- bool UnhandledIsSorted();
- void ActiveToHandled(LiveRange* range);
- void ActiveToInactive(LiveRange* range);
- void InactiveToHandled(LiveRange* range);
- void InactiveToActive(LiveRange* range);
- void FreeSpillSlot(LiveRange* range);
- LOperand* TryReuseSpillSlot(LiveRange* range);
-
- // Helper methods for allocating registers.
- bool TryAllocateFreeReg(LiveRange* range);
- void AllocateBlockedReg(LiveRange* range);
-
- // Live range splitting helpers.
-
- // Split the given range at the given position.
- // If range starts at or after the given position then the
- // original range is returned.
- // Otherwise returns the live range that starts at pos and contains
- // all uses from the original range that follow pos. Uses at pos will
- // still be owned by the original range after splitting.
- LiveRange* SplitRangeAt(LiveRange* range, LifetimePosition pos);
-
- // Split the given range in a position from the interval [start, end].
- LiveRange* SplitBetween(LiveRange* range,
- LifetimePosition start,
- LifetimePosition end);
-
- // Find a lifetime position in the interval [start, end] which
- // is optimal for splitting: it is either header of the outermost
- // loop covered by this interval or the latest possible position.
- LifetimePosition FindOptimalSplitPos(LifetimePosition start,
- LifetimePosition end);
-
- // Spill the given life range after position pos.
- void SpillAfter(LiveRange* range, LifetimePosition pos);
-
- // Spill the given life range after position [start] and up to position [end].
- void SpillBetween(LiveRange* range,
- LifetimePosition start,
- LifetimePosition end);
-
- // Spill the given life range after position [start] and up to position [end].
- // Range is guaranteed to be spilled at least until position [until].
- void SpillBetweenUntil(LiveRange* range,
- LifetimePosition start,
- LifetimePosition until,
- LifetimePosition end);
-
- void SplitAndSpillIntersecting(LiveRange* range);
-
- // If we are trying to spill a range inside the loop try to
- // hoist spill position out to the point just before the loop.
- LifetimePosition FindOptimalSpillingPos(LiveRange* range,
- LifetimePosition pos);
-
- void Spill(LiveRange* range);
- bool IsBlockBoundary(LifetimePosition pos);
-
- // Helper methods for resolving control flow.
- void ResolveControlFlow(LiveRange* range,
- HBasicBlock* block,
- HBasicBlock* pred);
-
- inline void SetLiveRangeAssignedRegister(LiveRange* range, int reg);
-
- // Return parallel move that should be used to connect ranges split at the
- // given position.
- LParallelMove* GetConnectingParallelMove(LifetimePosition pos);
-
- // Return the block which contains give lifetime position.
- HBasicBlock* GetBlock(LifetimePosition pos);
-
- // Helper methods for the fixed registers.
- int RegisterCount() const;
- static int FixedLiveRangeID(int index) { return -index - 1; }
- static int FixedDoubleLiveRangeID(int index);
- LiveRange* FixedLiveRangeFor(int index);
- LiveRange* FixedDoubleLiveRangeFor(int index);
- LiveRange* LiveRangeFor(int index);
- HPhi* LookupPhi(LOperand* operand) const;
- LGap* GetLastGap(HBasicBlock* block);
-
- const char* RegisterName(int allocation_index);
-
- inline bool IsGapAt(int index);
-
- inline LInstruction* InstructionAt(int index);
-
- inline LGap* GapAt(int index);
-
- Zone zone_;
-
- LPlatformChunk* chunk_;
-
- // During liveness analysis keep a mapping from block id to live_in sets
- // for blocks already analyzed.
- ZoneList<BitVector*> live_in_sets_;
-
- // Liveness analysis results.
- ZoneList<LiveRange*> live_ranges_;
-
- // Lists of live ranges
- EmbeddedVector<LiveRange*, Register::kNumRegisters> fixed_live_ranges_;
- EmbeddedVector<LiveRange*, DoubleRegister::kMaxNumRegisters>
- fixed_double_live_ranges_;
- ZoneList<LiveRange*> unhandled_live_ranges_;
- ZoneList<LiveRange*> active_live_ranges_;
- ZoneList<LiveRange*> inactive_live_ranges_;
- ZoneList<LiveRange*> reusable_slots_;
-
- // Next virtual register number to be assigned to temporaries.
- int next_virtual_register_;
- int first_artificial_register_;
- GrowableBitVector double_artificial_registers_;
-
- RegisterKind mode_;
- int num_registers_;
- const int* allocatable_register_codes_;
-
- BitVector* assigned_registers_;
- BitVector* assigned_double_registers_;
-
- HGraph* graph_;
-
- bool has_osr_entry_;
-
- // Indicates success or failure during register allocation.
- bool allocation_ok_;
-
-#ifdef DEBUG
- LifetimePosition allocation_finger_;
-#endif
-
- DISALLOW_COPY_AND_ASSIGN(LAllocator);
-};
-
-
-class LAllocatorPhase : public CompilationPhase {
- public:
- LAllocatorPhase(const char* name, LAllocator* allocator);
- ~LAllocatorPhase();
-
- private:
- LAllocator* allocator_;
- size_t allocator_zone_start_allocation_size_;
-
- DISALLOW_COPY_AND_ASSIGN(LAllocatorPhase);
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_LITHIUM_ALLOCATOR_H_
diff --git a/deps/v8/src/crankshaft/lithium-codegen.cc b/deps/v8/src/crankshaft/lithium-codegen.cc
deleted file mode 100644
index 71dba3e47b..0000000000
--- a/deps/v8/src/crankshaft/lithium-codegen.cc
+++ /dev/null
@@ -1,416 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/lithium-codegen.h"
-
-#include <sstream>
-
-#include "src/objects-inl.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "src/crankshaft/ia32/lithium-ia32.h" // NOLINT
-#include "src/crankshaft/ia32/lithium-codegen-ia32.h" // NOLINT
-#elif V8_TARGET_ARCH_X64
-#include "src/crankshaft/x64/lithium-x64.h" // NOLINT
-#include "src/crankshaft/x64/lithium-codegen-x64.h" // NOLINT
-#elif V8_TARGET_ARCH_ARM
-#include "src/crankshaft/arm/lithium-arm.h" // NOLINT
-#include "src/crankshaft/arm/lithium-codegen-arm.h" // NOLINT
-#elif V8_TARGET_ARCH_ARM64
-#include "src/crankshaft/arm64/lithium-arm64.h" // NOLINT
-#include "src/crankshaft/arm64/lithium-codegen-arm64.h" // NOLINT
-#elif V8_TARGET_ARCH_MIPS
-#include "src/crankshaft/mips/lithium-mips.h" // NOLINT
-#include "src/crankshaft/mips/lithium-codegen-mips.h" // NOLINT
-#elif V8_TARGET_ARCH_MIPS64
-#include "src/crankshaft/mips64/lithium-mips64.h" // NOLINT
-#include "src/crankshaft/mips64/lithium-codegen-mips64.h" // NOLINT
-#elif V8_TARGET_ARCH_X87
-#include "src/crankshaft/x87/lithium-x87.h" // NOLINT
-#include "src/crankshaft/x87/lithium-codegen-x87.h" // NOLINT
-#elif V8_TARGET_ARCH_PPC
-#include "src/crankshaft/ppc/lithium-ppc.h" // NOLINT
-#include "src/crankshaft/ppc/lithium-codegen-ppc.h" // NOLINT
-#elif V8_TARGET_ARCH_S390
-#include "src/crankshaft/s390/lithium-s390.h" // NOLINT
-#include "src/crankshaft/s390/lithium-codegen-s390.h" // NOLINT
-#else
-#error Unsupported target architecture.
-#endif
-
-#include "src/globals.h"
-
-namespace v8 {
-namespace internal {
-
-
-HGraph* LCodeGenBase::graph() const {
- return chunk()->graph();
-}
-
-LCodeGenBase::LCodeGenBase(LChunk* chunk, MacroAssembler* assembler,
- CompilationInfo* info)
- : chunk_(static_cast<LPlatformChunk*>(chunk)),
- masm_(assembler),
- info_(info),
- zone_(info->zone()),
- status_(UNUSED),
- current_block_(-1),
- current_instruction_(-1),
- instructions_(chunk->instructions()),
- deoptimizations_(4, info->zone()),
- deoptimization_literals_(8, info->zone()),
- translations_(info->zone()),
- inlined_function_count_(0),
- last_lazy_deopt_pc_(0),
- osr_pc_offset_(-1),
- source_position_table_builder_(info->zone(),
- info->SourcePositionRecordingMode()) {}
-
-Isolate* LCodeGenBase::isolate() const { return info_->isolate(); }
-
-bool LCodeGenBase::GenerateBody() {
- DCHECK(is_generating());
- bool emit_instructions = true;
- LCodeGen* codegen = static_cast<LCodeGen*>(this);
- for (current_instruction_ = 0;
- !is_aborted() && current_instruction_ < instructions_->length();
- current_instruction_++) {
- LInstruction* instr = instructions_->at(current_instruction_);
-
- // Don't emit code for basic blocks with a replacement.
- if (instr->IsLabel()) {
- emit_instructions = !LLabel::cast(instr)->HasReplacement() &&
- (!FLAG_unreachable_code_elimination ||
- instr->hydrogen_value()->block()->IsReachable());
- if (FLAG_code_comments && !emit_instructions) {
- Comment(
- ";;; <@%d,#%d> -------------------- B%d (unreachable/replaced) "
- "--------------------",
- current_instruction_,
- instr->hydrogen_value()->id(),
- instr->hydrogen_value()->block()->block_id());
- }
- }
- if (!emit_instructions) continue;
-
- if (FLAG_code_comments && instr->HasInterestingComment(codegen)) {
- Comment(";;; <@%d,#%d> %s",
- current_instruction_,
- instr->hydrogen_value()->id(),
- instr->Mnemonic());
- }
-
- GenerateBodyInstructionPre(instr);
-
- HValue* value = instr->hydrogen_value();
- if (value->position().IsKnown()) {
- RecordAndWritePosition(value->position());
- }
-
- instr->CompileToNative(codegen);
-
- GenerateBodyInstructionPost(instr);
- }
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- last_lazy_deopt_pc_ = masm()->pc_offset();
- return !is_aborted();
-}
-
-
-void LCodeGenBase::CheckEnvironmentUsage() {
-#ifdef DEBUG
- bool dead_block = false;
- for (int i = 0; i < instructions_->length(); i++) {
- LInstruction* instr = instructions_->at(i);
- HValue* hval = instr->hydrogen_value();
- if (instr->IsLabel()) dead_block = LLabel::cast(instr)->HasReplacement();
- if (dead_block || !hval->block()->IsReachable()) continue;
-
- HInstruction* hinstr = HInstruction::cast(hval);
- if (!hinstr->CanDeoptimize() && instr->HasEnvironment()) {
- V8_Fatal(__FILE__, __LINE__, "CanDeoptimize is wrong for %s (%s)",
- hinstr->Mnemonic(), instr->Mnemonic());
- }
-
- if (instr->HasEnvironment() && !instr->environment()->has_been_used()) {
- V8_Fatal(__FILE__, __LINE__, "unused environment for %s (%s)",
- hinstr->Mnemonic(), instr->Mnemonic());
- }
- }
-#endif
-}
-
-void LCodeGenBase::RecordAndWritePosition(SourcePosition pos) {
- if (!pos.IsKnown()) return;
- source_position_table_builder_.AddPosition(masm_->pc_offset(), pos, false);
-}
-
-void LCodeGenBase::Comment(const char* format, ...) {
- if (!FLAG_code_comments) return;
- char buffer[4 * KB];
- StringBuilder builder(buffer, arraysize(buffer));
- va_list arguments;
- va_start(arguments, format);
- builder.AddFormattedList(format, arguments);
- va_end(arguments);
-
- // Copy the string before recording it in the assembler to avoid
- // issues when the stack allocated buffer goes out of scope.
- size_t length = builder.position();
- Vector<char> copy = Vector<char>::New(static_cast<int>(length) + 1);
- MemCopy(copy.start(), builder.Finalize(), copy.length());
- masm()->RecordComment(copy.start());
-}
-
-
-void LCodeGenBase::DeoptComment(const Deoptimizer::DeoptInfo& deopt_info) {
- SourcePosition position = deopt_info.position;
- int deopt_id = deopt_info.deopt_id;
- if (masm()->isolate()->NeedsSourcePositionsForProfiling()) {
- masm()->RecordDeoptReason(deopt_info.deopt_reason, position, deopt_id);
- }
-}
-
-
-int LCodeGenBase::GetNextEmittedBlock() const {
- for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) {
- if (!graph()->blocks()->at(i)->IsReachable()) continue;
- if (!chunk_->GetLabel(i)->HasReplacement()) return i;
- }
- return -1;
-}
-
-
-void LCodeGenBase::Abort(BailoutReason reason) {
- info()->AbortOptimization(reason);
- status_ = ABORTED;
-}
-
-
-void LCodeGenBase::Retry(BailoutReason reason) {
- info()->RetryOptimization(reason);
- status_ = ABORTED;
-}
-
-
-void LCodeGenBase::AddDeprecationDependency(Handle<Map> map) {
- if (map->is_deprecated()) return Retry(kMapBecameDeprecated);
- chunk_->AddDeprecationDependency(map);
-}
-
-
-void LCodeGenBase::AddStabilityDependency(Handle<Map> map) {
- if (!map->is_stable()) return Retry(kMapBecameUnstable);
- chunk_->AddStabilityDependency(map);
-}
-
-
-int LCodeGenBase::DefineDeoptimizationLiteral(Handle<Object> literal) {
- int result = deoptimization_literals_.length();
- for (int i = 0; i < deoptimization_literals_.length(); ++i) {
- if (deoptimization_literals_[i].is_identical_to(literal)) return i;
- }
- deoptimization_literals_.Add(literal, zone());
- return result;
-}
-
-
-void LCodeGenBase::WriteTranslationFrame(LEnvironment* environment,
- Translation* translation) {
- int translation_size = environment->translation_size();
- // The output frame height does not include the parameters.
- int height = translation_size - environment->parameter_count();
-
- switch (environment->frame_type()) {
- case JS_FUNCTION: {
- int shared_id = DefineDeoptimizationLiteral(
- environment->entry() ? environment->entry()->shared()
- : info()->shared_info());
- translation->BeginJSFrame(environment->ast_id(), shared_id, height);
- if (info()->closure().is_identical_to(environment->closure())) {
- translation->StoreJSFrameFunction();
- } else {
- int closure_id = DefineDeoptimizationLiteral(environment->closure());
- translation->StoreLiteral(closure_id);
- }
- break;
- }
- case JS_CONSTRUCT: {
- int shared_id = DefineDeoptimizationLiteral(
- environment->entry() ? environment->entry()->shared()
- : info()->shared_info());
- translation->BeginConstructStubFrame(BailoutId::ConstructStubInvoke(),
- shared_id, translation_size);
- if (info()->closure().is_identical_to(environment->closure())) {
- translation->StoreJSFrameFunction();
- } else {
- int closure_id = DefineDeoptimizationLiteral(environment->closure());
- translation->StoreLiteral(closure_id);
- }
- break;
- }
- case JS_GETTER: {
- DCHECK_EQ(1, translation_size);
- DCHECK_EQ(0, height);
- int shared_id = DefineDeoptimizationLiteral(
- environment->entry() ? environment->entry()->shared()
- : info()->shared_info());
- translation->BeginGetterStubFrame(shared_id);
- if (info()->closure().is_identical_to(environment->closure())) {
- translation->StoreJSFrameFunction();
- } else {
- int closure_id = DefineDeoptimizationLiteral(environment->closure());
- translation->StoreLiteral(closure_id);
- }
- break;
- }
- case JS_SETTER: {
- DCHECK_EQ(2, translation_size);
- DCHECK_EQ(0, height);
- int shared_id = DefineDeoptimizationLiteral(
- environment->entry() ? environment->entry()->shared()
- : info()->shared_info());
- translation->BeginSetterStubFrame(shared_id);
- if (info()->closure().is_identical_to(environment->closure())) {
- translation->StoreJSFrameFunction();
- } else {
- int closure_id = DefineDeoptimizationLiteral(environment->closure());
- translation->StoreLiteral(closure_id);
- }
- break;
- }
- case TAIL_CALLER_FUNCTION: {
- DCHECK_EQ(0, translation_size);
- int shared_id = DefineDeoptimizationLiteral(
- environment->entry() ? environment->entry()->shared()
- : info()->shared_info());
- translation->BeginTailCallerFrame(shared_id);
- if (info()->closure().is_identical_to(environment->closure())) {
- translation->StoreJSFrameFunction();
- } else {
- int closure_id = DefineDeoptimizationLiteral(environment->closure());
- translation->StoreLiteral(closure_id);
- }
- break;
- }
- case ARGUMENTS_ADAPTOR: {
- int shared_id = DefineDeoptimizationLiteral(
- environment->entry() ? environment->entry()->shared()
- : info()->shared_info());
- translation->BeginArgumentsAdaptorFrame(shared_id, translation_size);
- if (info()->closure().is_identical_to(environment->closure())) {
- translation->StoreJSFrameFunction();
- } else {
- int closure_id = DefineDeoptimizationLiteral(environment->closure());
- translation->StoreLiteral(closure_id);
- }
- break;
- }
- case STUB:
- translation->BeginCompiledStubFrame(translation_size);
- break;
- }
-}
-
-namespace {
-
-Handle<PodArray<InliningPosition>> CreateInliningPositions(
- CompilationInfo* info) {
- const CompilationInfo::InlinedFunctionList& inlined_functions =
- info->inlined_functions();
- if (inlined_functions.size() == 0) {
- return Handle<PodArray<InliningPosition>>::cast(
- info->isolate()->factory()->empty_byte_array());
- }
- Handle<PodArray<InliningPosition>> inl_positions =
- PodArray<InliningPosition>::New(
- info->isolate(), static_cast<int>(inlined_functions.size()), TENURED);
- for (size_t i = 0; i < inlined_functions.size(); ++i) {
- inl_positions->set(static_cast<int>(i), inlined_functions[i].position);
- }
- return inl_positions;
-}
-
-} // namespace
-
-void LCodeGenBase::PopulateDeoptimizationData(Handle<Code> code) {
- int length = deoptimizations_.length();
- if (length == 0) return;
- Handle<DeoptimizationInputData> data =
- DeoptimizationInputData::New(isolate(), length, TENURED);
-
- Handle<ByteArray> translations =
- translations_.CreateByteArray(isolate()->factory());
- data->SetTranslationByteArray(*translations);
- data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
- data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
- if (info_->IsOptimizing()) {
- // Reference to shared function info does not change between phases.
- AllowDeferredHandleDereference allow_handle_dereference;
- data->SetSharedFunctionInfo(*info_->shared_info());
- } else {
- data->SetSharedFunctionInfo(Smi::kZero);
- }
- data->SetWeakCellCache(Smi::kZero);
-
- Handle<FixedArray> literals =
- factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- {
- AllowDeferredHandleDereference copy_handles;
- for (int i = 0; i < deoptimization_literals_.length(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
- }
- data->SetLiteralArray(*literals);
- }
-
- Handle<PodArray<InliningPosition>> inl_pos = CreateInliningPositions(info_);
- data->SetInliningPositions(*inl_pos);
-
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
- data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
-
- // Populate the deoptimization entries.
- for (int i = 0; i < length; i++) {
- LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, env->ast_id());
- data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
- data->SetArgumentsStackHeight(i,
- Smi::FromInt(env->arguments_stack_height()));
- data->SetPc(i, Smi::FromInt(env->pc_offset()));
- }
- code->set_deoptimization_data(*data);
-}
-
-
-void LCodeGenBase::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- DCHECK_EQ(0, deoptimization_literals_.length());
- for (CompilationInfo::InlinedFunctionHolder& inlined :
- info()->inlined_functions()) {
- if (!inlined.shared_info.is_identical_to(info()->shared_info())) {
- int index = DefineDeoptimizationLiteral(inlined.shared_info);
- inlined.RegisterInlinedFunctionId(index);
- }
- }
- inlined_function_count_ = deoptimization_literals_.length();
-
- // Define deoptimization literals for all unoptimized code objects of inlined
- // functions. This ensures unoptimized code is kept alive by optimized code.
- for (const CompilationInfo::InlinedFunctionHolder& inlined :
- info()->inlined_functions()) {
- if (!inlined.shared_info.is_identical_to(info()->shared_info())) {
- DefineDeoptimizationLiteral(inlined.inlined_code_object_root);
- }
- }
-}
-
-Deoptimizer::DeoptInfo LCodeGenBase::MakeDeoptInfo(
- LInstruction* instr, DeoptimizeReason deopt_reason, int deopt_id) {
- Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position(),
- deopt_reason, deopt_id);
- return deopt_info;
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/lithium-codegen.h b/deps/v8/src/crankshaft/lithium-codegen.h
deleted file mode 100644
index 03ece53bf4..0000000000
--- a/deps/v8/src/crankshaft/lithium-codegen.h
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_LITHIUM_CODEGEN_H_
-#define V8_CRANKSHAFT_LITHIUM_CODEGEN_H_
-
-#include "src/bailout-reason.h"
-#include "src/deoptimizer.h"
-#include "src/source-position-table.h"
-
-namespace v8 {
-namespace internal {
-
-class CompilationInfo;
-class HGraph;
-class LChunk;
-class LEnvironment;
-class LInstruction;
-class LPlatformChunk;
-
-class LCodeGenBase BASE_EMBEDDED {
- public:
- LCodeGenBase(LChunk* chunk,
- MacroAssembler* assembler,
- CompilationInfo* info);
- virtual ~LCodeGenBase() {}
-
- // Simple accessors.
- MacroAssembler* masm() const { return masm_; }
- CompilationInfo* info() const { return info_; }
- Isolate* isolate() const;
- Factory* factory() const { return isolate()->factory(); }
- Heap* heap() const { return isolate()->heap(); }
- Zone* zone() const { return zone_; }
- LPlatformChunk* chunk() const { return chunk_; }
- HGraph* graph() const;
- SourcePositionTableBuilder* source_position_table_builder() {
- return &source_position_table_builder_;
- }
-
- void PRINTF_FORMAT(2, 3) Comment(const char* format, ...);
- void DeoptComment(const Deoptimizer::DeoptInfo& deopt_info);
- static Deoptimizer::DeoptInfo MakeDeoptInfo(LInstruction* instr,
- DeoptimizeReason deopt_reason,
- int deopt_id);
-
- bool GenerateBody();
- virtual void GenerateBodyInstructionPre(LInstruction* instr) {}
- virtual void GenerateBodyInstructionPost(LInstruction* instr) {}
-
- virtual void EnsureSpaceForLazyDeopt(int space_needed) = 0;
- void RecordAndWritePosition(SourcePosition position);
-
- int GetNextEmittedBlock() const;
-
- void WriteTranslationFrame(LEnvironment* environment,
- Translation* translation);
- int DefineDeoptimizationLiteral(Handle<Object> literal);
-
- void PopulateDeoptimizationData(Handle<Code> code);
- void PopulateDeoptimizationLiteralsWithInlinedFunctions();
-
- // Check that an environment assigned via AssignEnvironment is actually being
- // used. Redundant assignments keep things alive longer than necessary, and
- // consequently lead to worse code, so it's important to minimize this.
- void CheckEnvironmentUsage();
-
- protected:
- enum Status {
- UNUSED,
- GENERATING,
- DONE,
- ABORTED
- };
-
- LPlatformChunk* const chunk_;
- MacroAssembler* const masm_;
- CompilationInfo* const info_;
- Zone* zone_;
- Status status_;
- int current_block_;
- int current_instruction_;
- const ZoneList<LInstruction*>* instructions_;
- ZoneList<LEnvironment*> deoptimizations_;
- ZoneList<Handle<Object> > deoptimization_literals_;
- TranslationBuffer translations_;
- int inlined_function_count_;
- int last_lazy_deopt_pc_;
- int osr_pc_offset_;
- SourcePositionTableBuilder source_position_table_builder_;
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_generating() const { return status_ == GENERATING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
- void Abort(BailoutReason reason);
- void Retry(BailoutReason reason);
-
- // Methods for code dependencies.
- void AddDeprecationDependency(Handle<Map> map);
- void AddStabilityDependency(Handle<Map> map);
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_LITHIUM_CODEGEN_H_
diff --git a/deps/v8/src/crankshaft/lithium-inl.h b/deps/v8/src/crankshaft/lithium-inl.h
deleted file mode 100644
index 938588e396..0000000000
--- a/deps/v8/src/crankshaft/lithium-inl.h
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_LITHIUM_INL_H_
-#define V8_CRANKSHAFT_LITHIUM_INL_H_
-
-#include "src/crankshaft/lithium.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "src/crankshaft/ia32/lithium-ia32.h" // NOLINT
-#elif V8_TARGET_ARCH_X64
-#include "src/crankshaft/x64/lithium-x64.h" // NOLINT
-#elif V8_TARGET_ARCH_ARM64
-#include "src/crankshaft/arm64/lithium-arm64.h" // NOLINT
-#elif V8_TARGET_ARCH_ARM
-#include "src/crankshaft/arm/lithium-arm.h" // NOLINT
-#elif V8_TARGET_ARCH_MIPS
-#include "src/crankshaft/mips/lithium-mips.h" // NOLINT
-#elif V8_TARGET_ARCH_MIPS64
-#include "src/crankshaft/mips64/lithium-mips64.h" // NOLINT
-#elif V8_TARGET_ARCH_PPC
-#include "src/crankshaft/ppc/lithium-ppc.h" // NOLINT
-#elif V8_TARGET_ARCH_S390
-#include "src/crankshaft/s390/lithium-s390.h" // NOLINT
-#elif V8_TARGET_ARCH_X87
-#include "src/crankshaft/x87/lithium-x87.h" // NOLINT
-#else
-#error "Unknown architecture."
-#endif
-
-namespace v8 {
-namespace internal {
-
-TempIterator::TempIterator(LInstruction* instr)
- : instr_(instr), limit_(instr->TempCount()), current_(0) {
- SkipUninteresting();
-}
-
-
-bool TempIterator::Done() { return current_ >= limit_; }
-
-
-LOperand* TempIterator::Current() {
- DCHECK(!Done());
- return instr_->TempAt(current_);
-}
-
-
-void TempIterator::SkipUninteresting() {
- while (current_ < limit_ && instr_->TempAt(current_) == NULL) ++current_;
-}
-
-
-void TempIterator::Advance() {
- ++current_;
- SkipUninteresting();
-}
-
-
-InputIterator::InputIterator(LInstruction* instr)
- : instr_(instr), limit_(instr->InputCount()), current_(0) {
- SkipUninteresting();
-}
-
-
-bool InputIterator::Done() { return current_ >= limit_; }
-
-
-LOperand* InputIterator::Current() {
- DCHECK(!Done());
- DCHECK(instr_->InputAt(current_) != NULL);
- return instr_->InputAt(current_);
-}
-
-
-void InputIterator::Advance() {
- ++current_;
- SkipUninteresting();
-}
-
-
-void InputIterator::SkipUninteresting() {
- while (current_ < limit_) {
- LOperand* current = instr_->InputAt(current_);
- if (current != NULL && !current->IsConstantOperand()) break;
- ++current_;
- }
-}
-
-
-UseIterator::UseIterator(LInstruction* instr)
- : input_iterator_(instr), env_iterator_(instr->environment()) {}
-
-
-bool UseIterator::Done() {
- return input_iterator_.Done() && env_iterator_.Done();
-}
-
-
-LOperand* UseIterator::Current() {
- DCHECK(!Done());
- LOperand* result = input_iterator_.Done() ? env_iterator_.Current()
- : input_iterator_.Current();
- DCHECK(result != NULL);
- return result;
-}
-
-
-void UseIterator::Advance() {
- input_iterator_.Done() ? env_iterator_.Advance() : input_iterator_.Advance();
-}
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_LITHIUM_INL_H_
diff --git a/deps/v8/src/crankshaft/lithium.cc b/deps/v8/src/crankshaft/lithium.cc
deleted file mode 100644
index 5f0e9e386d..0000000000
--- a/deps/v8/src/crankshaft/lithium.cc
+++ /dev/null
@@ -1,730 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/lithium.h"
-
-#include "src/ast/scopes.h"
-#include "src/codegen.h"
-#include "src/objects-inl.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "src/crankshaft/ia32/lithium-ia32.h" // NOLINT
-#include "src/crankshaft/ia32/lithium-codegen-ia32.h" // NOLINT
-#elif V8_TARGET_ARCH_X64
-#include "src/crankshaft/x64/lithium-x64.h" // NOLINT
-#include "src/crankshaft/x64/lithium-codegen-x64.h" // NOLINT
-#elif V8_TARGET_ARCH_ARM
-#include "src/crankshaft/arm/lithium-arm.h" // NOLINT
-#include "src/crankshaft/arm/lithium-codegen-arm.h" // NOLINT
-#elif V8_TARGET_ARCH_PPC
-#include "src/crankshaft/ppc/lithium-ppc.h" // NOLINT
-#include "src/crankshaft/ppc/lithium-codegen-ppc.h" // NOLINT
-#elif V8_TARGET_ARCH_MIPS
-#include "src/crankshaft/mips/lithium-mips.h" // NOLINT
-#include "src/crankshaft/mips/lithium-codegen-mips.h" // NOLINT
-#elif V8_TARGET_ARCH_ARM64
-#include "src/crankshaft/arm64/lithium-arm64.h" // NOLINT
-#include "src/crankshaft/arm64/lithium-codegen-arm64.h" // NOLINT
-#elif V8_TARGET_ARCH_MIPS64
-#include "src/crankshaft/mips64/lithium-mips64.h" // NOLINT
-#include "src/crankshaft/mips64/lithium-codegen-mips64.h" // NOLINT
-#elif V8_TARGET_ARCH_X87
-#include "src/crankshaft/x87/lithium-x87.h" // NOLINT
-#include "src/crankshaft/x87/lithium-codegen-x87.h" // NOLINT
-#elif V8_TARGET_ARCH_S390
-#include "src/crankshaft/s390/lithium-s390.h" // NOLINT
-#include "src/crankshaft/s390/lithium-codegen-s390.h" // NOLINT
-#else
-#error "Unknown architecture."
-#endif
-
-namespace v8 {
-namespace internal {
-
-const auto GetRegConfig = RegisterConfiguration::Crankshaft;
-
-void LOperand::PrintTo(StringStream* stream) {
- LUnallocated* unalloc = NULL;
- switch (kind()) {
- case INVALID:
- stream->Add("(0)");
- break;
- case UNALLOCATED:
- unalloc = LUnallocated::cast(this);
- stream->Add("v%d", unalloc->virtual_register());
- if (unalloc->basic_policy() == LUnallocated::FIXED_SLOT) {
- stream->Add("(=%dS)", unalloc->fixed_slot_index());
- break;
- }
- switch (unalloc->extended_policy()) {
- case LUnallocated::NONE:
- break;
- case LUnallocated::FIXED_REGISTER: {
- int reg_index = unalloc->fixed_register_index();
- if (reg_index < 0 || reg_index >= Register::kNumRegisters) {
- stream->Add("(=invalid_reg#%d)", reg_index);
- } else {
- const char* register_name =
- GetRegConfig()->GetGeneralRegisterName(reg_index);
- stream->Add("(=%s)", register_name);
- }
- break;
- }
- case LUnallocated::FIXED_DOUBLE_REGISTER: {
- int reg_index = unalloc->fixed_register_index();
- if (reg_index < 0 || reg_index >= DoubleRegister::kMaxNumRegisters) {
- stream->Add("(=invalid_double_reg#%d)", reg_index);
- } else {
- const char* double_register_name =
- GetRegConfig()->GetDoubleRegisterName(reg_index);
- stream->Add("(=%s)", double_register_name);
- }
- break;
- }
- case LUnallocated::MUST_HAVE_REGISTER:
- stream->Add("(R)");
- break;
- case LUnallocated::MUST_HAVE_DOUBLE_REGISTER:
- stream->Add("(D)");
- break;
- case LUnallocated::WRITABLE_REGISTER:
- stream->Add("(WR)");
- break;
- case LUnallocated::SAME_AS_FIRST_INPUT:
- stream->Add("(1)");
- break;
- case LUnallocated::ANY:
- stream->Add("(-)");
- break;
- }
- break;
- case CONSTANT_OPERAND:
- stream->Add("[constant:%d]", index());
- break;
- case STACK_SLOT:
- stream->Add("[stack:%d]", index());
- break;
- case DOUBLE_STACK_SLOT:
- stream->Add("[double_stack:%d]", index());
- break;
- case REGISTER: {
- int reg_index = index();
- if (reg_index < 0 || reg_index >= Register::kNumRegisters) {
- stream->Add("(=invalid_reg#%d|R)", reg_index);
- } else {
- stream->Add("[%s|R]",
- GetRegConfig()->GetGeneralRegisterName(reg_index));
- }
- break;
- }
- case DOUBLE_REGISTER: {
- int reg_index = index();
- if (reg_index < 0 || reg_index >= DoubleRegister::kMaxNumRegisters) {
- stream->Add("(=invalid_double_reg#%d|R)", reg_index);
- } else {
- stream->Add("[%s|R]", GetRegConfig()->GetDoubleRegisterName(reg_index));
- }
- break;
- }
- }
-}
-
-
-template<LOperand::Kind kOperandKind, int kNumCachedOperands>
-LSubKindOperand<kOperandKind, kNumCachedOperands>*
-LSubKindOperand<kOperandKind, kNumCachedOperands>::cache = NULL;
-
-
-template<LOperand::Kind kOperandKind, int kNumCachedOperands>
-void LSubKindOperand<kOperandKind, kNumCachedOperands>::SetUpCache() {
- if (cache) return;
- cache = new LSubKindOperand[kNumCachedOperands];
- for (int i = 0; i < kNumCachedOperands; i++) {
- cache[i].ConvertTo(kOperandKind, i);
- }
-}
-
-
-template<LOperand::Kind kOperandKind, int kNumCachedOperands>
-void LSubKindOperand<kOperandKind, kNumCachedOperands>::TearDownCache() {
- delete[] cache;
- cache = NULL;
-}
-
-
-void LOperand::SetUpCaches() {
-#define LITHIUM_OPERAND_SETUP(name, type, number) L##name::SetUpCache();
- LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_SETUP)
-#undef LITHIUM_OPERAND_SETUP
-}
-
-
-void LOperand::TearDownCaches() {
-#define LITHIUM_OPERAND_TEARDOWN(name, type, number) L##name::TearDownCache();
- LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_TEARDOWN)
-#undef LITHIUM_OPERAND_TEARDOWN
-}
-
-
-bool LParallelMove::IsRedundant() const {
- for (int i = 0; i < move_operands_.length(); ++i) {
- if (!move_operands_[i].IsRedundant()) return false;
- }
- return true;
-}
-
-
-void LParallelMove::PrintDataTo(StringStream* stream) const {
- bool first = true;
- for (int i = 0; i < move_operands_.length(); ++i) {
- if (!move_operands_[i].IsEliminated()) {
- LOperand* source = move_operands_[i].source();
- LOperand* destination = move_operands_[i].destination();
- if (!first) stream->Add(" ");
- first = false;
- if (source->Equals(destination)) {
- destination->PrintTo(stream);
- } else {
- destination->PrintTo(stream);
- stream->Add(" = ");
- source->PrintTo(stream);
- }
- stream->Add(";");
- }
- }
-}
-
-
-void LEnvironment::PrintTo(StringStream* stream) {
- stream->Add("[id=%d|", ast_id().ToInt());
- if (deoptimization_index() != Safepoint::kNoDeoptimizationIndex) {
- stream->Add("deopt_id=%d|", deoptimization_index());
- }
- stream->Add("parameters=%d|", parameter_count());
- stream->Add("arguments_stack_height=%d|", arguments_stack_height());
- for (int i = 0; i < values_.length(); ++i) {
- if (i != 0) stream->Add(";");
- if (values_[i] == NULL) {
- stream->Add("[hole]");
- } else {
- values_[i]->PrintTo(stream);
- }
- }
- stream->Add("]");
-}
-
-
-void LPointerMap::RecordPointer(LOperand* op, Zone* zone) {
- // Do not record arguments as pointers.
- if (op->IsStackSlot() && op->index() < 0) return;
- DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
- pointer_operands_.Add(op, zone);
-}
-
-
-void LPointerMap::RemovePointer(LOperand* op) {
- // Do not record arguments as pointers.
- if (op->IsStackSlot() && op->index() < 0) return;
- DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
- for (int i = 0; i < pointer_operands_.length(); ++i) {
- if (pointer_operands_[i]->Equals(op)) {
- pointer_operands_.Remove(i);
- --i;
- }
- }
-}
-
-
-void LPointerMap::RecordUntagged(LOperand* op, Zone* zone) {
- // Do not record arguments as pointers.
- if (op->IsStackSlot() && op->index() < 0) return;
- DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
- untagged_operands_.Add(op, zone);
-}
-
-
-void LPointerMap::PrintTo(StringStream* stream) {
- stream->Add("{");
- for (int i = 0; i < pointer_operands_.length(); ++i) {
- if (i != 0) stream->Add(";");
- pointer_operands_[i]->PrintTo(stream);
- }
- stream->Add("}");
-}
-
-LChunk::LChunk(CompilationInfo* info, HGraph* graph)
- : base_frame_slots_(info->IsStub()
- ? TypedFrameConstants::kFixedSlotCount
- : StandardFrameConstants::kFixedSlotCount),
- current_frame_slots_(base_frame_slots_),
- info_(info),
- graph_(graph),
- instructions_(32, info->zone()),
- pointer_maps_(8, info->zone()),
- deprecation_dependencies_(32, info->zone()),
- stability_dependencies_(8, info->zone()) {}
-
-LLabel* LChunk::GetLabel(int block_id) const {
- HBasicBlock* block = graph_->blocks()->at(block_id);
- int first_instruction = block->first_instruction_index();
- return LLabel::cast(instructions_[first_instruction]);
-}
-
-
-int LChunk::LookupDestination(int block_id) const {
- LLabel* cur = GetLabel(block_id);
- while (cur->replacement() != NULL) {
- cur = cur->replacement();
- }
- return cur->block_id();
-}
-
-Label* LChunk::GetAssemblyLabel(int block_id) const {
- LLabel* label = GetLabel(block_id);
- DCHECK(!label->HasReplacement());
- return label->label();
-}
-
-
-void LChunk::MarkEmptyBlocks() {
- LPhase phase("L_Mark empty blocks", this);
- for (int i = 0; i < graph()->blocks()->length(); ++i) {
- HBasicBlock* block = graph()->blocks()->at(i);
- int first = block->first_instruction_index();
- int last = block->last_instruction_index();
- LInstruction* first_instr = instructions()->at(first);
- LInstruction* last_instr = instructions()->at(last);
-
- LLabel* label = LLabel::cast(first_instr);
- if (last_instr->IsGoto()) {
- LGoto* goto_instr = LGoto::cast(last_instr);
- if (label->IsRedundant() &&
- !label->is_loop_header()) {
- bool can_eliminate = true;
- for (int i = first + 1; i < last && can_eliminate; ++i) {
- LInstruction* cur = instructions()->at(i);
- if (cur->IsGap()) {
- LGap* gap = LGap::cast(cur);
- if (!gap->IsRedundant()) {
- can_eliminate = false;
- }
- } else {
- can_eliminate = false;
- }
- }
- if (can_eliminate) {
- label->set_replacement(GetLabel(goto_instr->block_id()));
- }
- }
- }
- }
-}
-
-
-void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
- LInstructionGap* gap = new (zone()) LInstructionGap(block);
- gap->set_hydrogen_value(instr->hydrogen_value());
- int index = -1;
- if (instr->IsControl()) {
- instructions_.Add(gap, zone());
- index = instructions_.length();
- instructions_.Add(instr, zone());
- } else {
- index = instructions_.length();
- instructions_.Add(instr, zone());
- instructions_.Add(gap, zone());
- }
- if (instr->HasPointerMap()) {
- pointer_maps_.Add(instr->pointer_map(), zone());
- instr->pointer_map()->set_lithium_position(index);
- }
-}
-
-LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
- return LConstantOperand::Create(constant->id(), zone());
-}
-
-
-int LChunk::GetParameterStackSlot(int index) const {
- // The receiver is at index 0, the first parameter at index 1, so we
- // shift all parameter indexes down by the number of parameters, and
- // make sure they end up negative so they are distinguishable from
- // spill slots.
- int result = index - info()->num_parameters() - 1;
-
- DCHECK(result < 0);
- return result;
-}
-
-
-// A parameter relative to ebp in the arguments stub.
-int LChunk::ParameterAt(int index) {
- DCHECK(-1 <= index); // -1 is the receiver.
- return (1 + info()->scope()->num_parameters() - index) *
- kPointerSize;
-}
-
-
-LGap* LChunk::GetGapAt(int index) const {
- return LGap::cast(instructions_[index]);
-}
-
-
-bool LChunk::IsGapAt(int index) const {
- return instructions_[index]->IsGap();
-}
-
-
-int LChunk::NearestGapPos(int index) const {
- while (!IsGapAt(index)) index--;
- return index;
-}
-
-
-void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
- GetGapAt(index)->GetOrCreateParallelMove(
- LGap::START, zone())->AddMove(from, to, zone());
-}
-
-
-HConstant* LChunk::LookupConstant(LConstantOperand* operand) const {
- return HConstant::cast(graph_->LookupValue(operand->index()));
-}
-
-
-Representation LChunk::LookupLiteralRepresentation(
- LConstantOperand* operand) const {
- return graph_->LookupValue(operand->index())->representation();
-}
-
-
-void LChunk::CommitDependencies(Handle<Code> code) const {
- if (!code->is_optimized_code()) return;
- HandleScope scope(isolate());
-
- for (Handle<Map> map : deprecation_dependencies_) {
- DCHECK(!map->is_deprecated());
- DCHECK(map->CanBeDeprecated());
- Map::AddDependentCode(map, DependentCode::kTransitionGroup, code);
- }
-
- for (Handle<Map> map : stability_dependencies_) {
- DCHECK(map->is_stable());
- DCHECK(map->CanTransition());
- Map::AddDependentCode(map, DependentCode::kPrototypeCheckGroup, code);
- }
-
- info_->dependencies()->Commit(code);
-}
-
-
-LChunk* LChunk::NewChunk(HGraph* graph) {
- DisallowHandleAllocation no_handles;
- DisallowHeapAllocation no_gc;
- graph->DisallowAddingNewValues();
- int values = graph->GetMaximumValueID();
- CompilationInfo* info = graph->info();
- if (values > LUnallocated::kMaxVirtualRegisters) {
- info->AbortOptimization(kNotEnoughVirtualRegistersForValues);
- return NULL;
- }
- LAllocator allocator(values, graph);
- LChunkBuilder builder(info, graph, &allocator);
- LChunk* chunk = builder.Build();
- if (chunk == NULL) return NULL;
-
- if (!allocator.Allocate(chunk)) {
- info->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
- return NULL;
- }
-
- chunk->set_allocated_double_registers(
- allocator.assigned_double_registers());
-
- return chunk;
-}
-
-
-Handle<Code> LChunk::Codegen() {
- MacroAssembler assembler(info()->isolate(), NULL, 0,
- CodeObjectRequired::kYes);
- // Code serializer only takes unoptimized code.
- DCHECK(!info()->will_serialize());
- LCodeGen generator(this, &assembler, info());
-
- MarkEmptyBlocks();
-
- if (generator.GenerateCode()) {
- generator.CheckEnvironmentUsage();
- CodeGenerator::MakeCodePrologue(info(), "optimized");
- Handle<Code> code = CodeGenerator::MakeCodeEpilogue(
- &assembler, nullptr, info(), assembler.CodeObject());
- generator.FinishCode(code);
- CommitDependencies(code);
- Handle<ByteArray> source_positions =
- generator.source_position_table_builder()->ToSourcePositionTable(
- info()->isolate(), Handle<AbstractCode>::cast(code));
- code->set_source_position_table(*source_positions);
- code->set_is_crankshafted(true);
-
- CodeGenerator::PrintCode(code, info());
- return code;
- }
- assembler.AbortedCodeGeneration();
- return Handle<Code>::null();
-}
-
-
-void LChunk::set_allocated_double_registers(BitVector* allocated_registers) {
- allocated_double_registers_ = allocated_registers;
- BitVector* doubles = allocated_double_registers();
- BitVector::Iterator iterator(doubles);
- while (!iterator.Done()) {
- if (info()->saves_caller_doubles()) {
- if (kDoubleSize == kPointerSize * 2) {
- current_frame_slots_ += 2;
- } else {
- current_frame_slots_++;
- }
- }
- iterator.Advance();
- }
-}
-
-
-void LChunkBuilderBase::Abort(BailoutReason reason) {
- info()->AbortOptimization(reason);
- status_ = ABORTED;
-}
-
-
-void LChunkBuilderBase::Retry(BailoutReason reason) {
- info()->RetryOptimization(reason);
- status_ = ABORTED;
-}
-
-void LChunkBuilderBase::CreateLazyBailoutForCall(HBasicBlock* current_block,
- LInstruction* instr,
- HInstruction* hydrogen_val) {
- if (!instr->IsCall()) return;
-
- HEnvironment* hydrogen_env = current_block->last_environment();
- HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
- DCHECK_NOT_NULL(hydrogen_env);
- if (instr->IsSyntacticTailCall()) {
- // If it was a syntactic tail call we need to drop the current frame and
- // all the frames on top of it that are either an arguments adaptor frame
- // or a tail caller frame.
- hydrogen_env = hydrogen_env->outer();
- while (hydrogen_env != nullptr &&
- (hydrogen_env->frame_type() == ARGUMENTS_ADAPTOR ||
- hydrogen_env->frame_type() == TAIL_CALLER_FUNCTION)) {
- hydrogen_env = hydrogen_env->outer();
- }
- if (hydrogen_env != nullptr) {
- if (hydrogen_env->frame_type() == JS_FUNCTION) {
- // In case an outer frame is a function frame we have to replay
- // environment manually because
- // 1) it does not contain a result of inlined function yet,
- // 2) we can't find the proper simulate that corresponds to the point
- // after inlined call to do a ReplayEnvironment() on.
- // So we push return value on top of outer environment.
- // As for JS_GETTER/JS_SETTER/JS_CONSTRUCT nothing has to be done here,
- // the deoptimizer ensures that the result of the callee is correctly
- // propagated to result register during deoptimization.
- hydrogen_env = hydrogen_env->Copy();
- hydrogen_env->Push(hydrogen_val);
- }
- } else {
- // Although we don't need this lazy bailout for normal execution
- // (because when we tail call from the outermost function we should pop
- // its frame) we still need it when debugger is on.
- hydrogen_env = current_block->last_environment();
- }
- } else {
- if (hydrogen_val->HasObservableSideEffects()) {
- HSimulate* sim = HSimulate::cast(hydrogen_val->next());
- sim->ReplayEnvironment(hydrogen_env);
- hydrogen_value_for_lazy_bailout = sim;
- }
- }
- LInstruction* bailout = LChunkBuilderBase::AssignEnvironment(
- new (zone()) LLazyBailout(), hydrogen_env);
- bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
- chunk_->AddInstruction(bailout, current_block);
-}
-
-LInstruction* LChunkBuilderBase::AssignEnvironment(LInstruction* instr,
- HEnvironment* hydrogen_env) {
- int argument_index_accumulator = 0;
- ZoneList<HValue*> objects_to_materialize(0, zone());
- DCHECK_NE(TAIL_CALLER_FUNCTION, hydrogen_env->frame_type());
- instr->set_environment(CreateEnvironment(
- hydrogen_env, &argument_index_accumulator, &objects_to_materialize));
- return instr;
-}
-
-LEnvironment* LChunkBuilderBase::CreateEnvironment(
- HEnvironment* hydrogen_env, int* argument_index_accumulator,
- ZoneList<HValue*>* objects_to_materialize) {
- if (hydrogen_env == NULL) return NULL;
-
- BailoutId ast_id = hydrogen_env->ast_id();
- DCHECK(!ast_id.IsNone() ||
- (hydrogen_env->frame_type() != JS_FUNCTION &&
- hydrogen_env->frame_type() != TAIL_CALLER_FUNCTION));
-
- if (hydrogen_env->frame_type() == TAIL_CALLER_FUNCTION) {
- // Skip potential outer arguments adaptor frame.
- HEnvironment* outer_hydrogen_env = hydrogen_env->outer();
- if (outer_hydrogen_env != nullptr &&
- outer_hydrogen_env->frame_type() == ARGUMENTS_ADAPTOR) {
- outer_hydrogen_env = outer_hydrogen_env->outer();
- }
- LEnvironment* outer = CreateEnvironment(
- outer_hydrogen_env, argument_index_accumulator, objects_to_materialize);
- return new (zone())
- LEnvironment(hydrogen_env->closure(), hydrogen_env->frame_type(),
- ast_id, 0, 0, 0, outer, hydrogen_env->entry(), zone());
- }
-
- LEnvironment* outer =
- CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator,
- objects_to_materialize);
-
- int omitted_count = (hydrogen_env->frame_type() == JS_FUNCTION)
- ? 0
- : hydrogen_env->specials_count();
-
- int value_count = hydrogen_env->length() - omitted_count;
- LEnvironment* result =
- new(zone()) LEnvironment(hydrogen_env->closure(),
- hydrogen_env->frame_type(),
- ast_id,
- hydrogen_env->parameter_count(),
- argument_count_,
- value_count,
- outer,
- hydrogen_env->entry(),
- zone());
- int argument_index = *argument_index_accumulator;
-
- // Store the environment description into the environment
- // (with holes for nested objects)
- for (int i = 0; i < hydrogen_env->length(); ++i) {
- if (hydrogen_env->is_special_index(i) &&
- hydrogen_env->frame_type() != JS_FUNCTION) {
- continue;
- }
- LOperand* op;
- HValue* value = hydrogen_env->values()->at(i);
- CHECK(!value->IsPushArguments()); // Do not deopt outgoing arguments
- if (value->IsArgumentsObject() || value->IsCapturedObject()) {
- op = LEnvironment::materialization_marker();
- } else {
- op = UseAny(value);
- }
- result->AddValue(op,
- value->representation(),
- value->CheckFlag(HInstruction::kUint32));
- }
-
- // Recursively store the nested objects into the environment
- for (int i = 0; i < hydrogen_env->length(); ++i) {
- if (hydrogen_env->is_special_index(i)) continue;
-
- HValue* value = hydrogen_env->values()->at(i);
- if (value->IsArgumentsObject() || value->IsCapturedObject()) {
- AddObjectToMaterialize(value, objects_to_materialize, result);
- }
- }
-
- if (hydrogen_env->frame_type() == JS_FUNCTION) {
- *argument_index_accumulator = argument_index;
- }
-
- return result;
-}
-
-
-// Add an object to the supplied environment and object materialization list.
-//
-// Notes:
-//
-// We are building three lists here:
-//
-// 1. In the result->object_mapping_ list (added to by the
-// LEnvironment::Add*Object methods), we store the lengths (number
-// of fields) of the captured objects in depth-first traversal order, or
-// in case of duplicated objects, we store the index to the duplicate object
-// (with a tag to differentiate between captured and duplicated objects).
-//
-// 2. The object fields are stored in the result->values_ list
-// (added to by the LEnvironment.AddValue method) sequentially as lists
-// of fields with holes for nested objects (the holes will be expanded
-// later by LCodegen::AddToTranslation according to the
-// LEnvironment.object_mapping_ list).
-//
-// 3. The auxiliary objects_to_materialize array stores the hydrogen values
-// in the same order as result->object_mapping_ list. This is used
-// to detect duplicate values and calculate the corresponding object index.
-void LChunkBuilderBase::AddObjectToMaterialize(HValue* value,
- ZoneList<HValue*>* objects_to_materialize, LEnvironment* result) {
- int object_index = objects_to_materialize->length();
- // Store the hydrogen value into the de-duplication array
- objects_to_materialize->Add(value, zone());
- // Find out whether we are storing a duplicated value
- int previously_materialized_object = -1;
- for (int prev = 0; prev < object_index; ++prev) {
- if (objects_to_materialize->at(prev) == value) {
- previously_materialized_object = prev;
- break;
- }
- }
- // Store the captured object length (or duplicated object index)
- // into the environment. For duplicated objects, we stop here.
- int length = value->OperandCount();
- bool is_arguments = value->IsArgumentsObject();
- if (previously_materialized_object >= 0) {
- result->AddDuplicateObject(previously_materialized_object);
- return;
- } else {
- result->AddNewObject(is_arguments ? length - 1 : length, is_arguments);
- }
- // Store the captured object's fields into the environment
- for (int i = is_arguments ? 1 : 0; i < length; ++i) {
- LOperand* op;
- HValue* arg_value = value->OperandAt(i);
- if (arg_value->IsArgumentsObject() || arg_value->IsCapturedObject()) {
- // Insert a hole for nested objects
- op = LEnvironment::materialization_marker();
- } else {
- DCHECK(!arg_value->IsPushArguments());
- // For ordinary values, tell the register allocator we need the value
- // to be alive here
- op = UseAny(arg_value);
- }
- result->AddValue(op,
- arg_value->representation(),
- arg_value->CheckFlag(HInstruction::kUint32));
- }
- // Recursively store all the nested captured objects into the environment
- for (int i = is_arguments ? 1 : 0; i < length; ++i) {
- HValue* arg_value = value->OperandAt(i);
- if (arg_value->IsArgumentsObject() || arg_value->IsCapturedObject()) {
- AddObjectToMaterialize(arg_value, objects_to_materialize, result);
- }
- }
-}
-
-
-LPhase::~LPhase() {
- if (ShouldProduceTraceOutput()) {
- isolate()->GetHTracer()->TraceLithium(name(), chunk_);
- }
-}
-
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/lithium.h b/deps/v8/src/crankshaft/lithium.h
deleted file mode 100644
index d67c4908eb..0000000000
--- a/deps/v8/src/crankshaft/lithium.h
+++ /dev/null
@@ -1,847 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_LITHIUM_H_
-#define V8_CRANKSHAFT_LITHIUM_H_
-
-#include <set>
-
-#include "src/allocation.h"
-#include "src/bailout-reason.h"
-#include "src/crankshaft/compilation-phase.h"
-#include "src/crankshaft/hydrogen.h"
-#include "src/safepoint-table.h"
-#include "src/zone/zone-allocator.h"
-
-namespace v8 {
-namespace internal {
-
-#define LITHIUM_OPERAND_LIST(V) \
- V(ConstantOperand, CONSTANT_OPERAND, 128) \
- V(StackSlot, STACK_SLOT, 128) \
- V(DoubleStackSlot, DOUBLE_STACK_SLOT, 128) \
- V(Register, REGISTER, 16) \
- V(DoubleRegister, DOUBLE_REGISTER, 16)
-
-class LOperand : public ZoneObject {
- public:
- enum Kind {
- INVALID,
- UNALLOCATED,
- CONSTANT_OPERAND,
- STACK_SLOT,
- DOUBLE_STACK_SLOT,
- REGISTER,
- DOUBLE_REGISTER
- };
-
- LOperand() : value_(KindField::encode(INVALID)) { }
-
- Kind kind() const { return KindField::decode(value_); }
- int index() const { return static_cast<int>(value_) >> kKindFieldWidth; }
-#define LITHIUM_OPERAND_PREDICATE(name, type, number) \
- bool Is##name() const { return kind() == type; }
- LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_PREDICATE)
- LITHIUM_OPERAND_PREDICATE(Unallocated, UNALLOCATED, 0)
- LITHIUM_OPERAND_PREDICATE(Ignored, INVALID, 0)
-#undef LITHIUM_OPERAND_PREDICATE
- bool Equals(LOperand* other) const { return value_ == other->value_; }
-
- void PrintTo(StringStream* stream);
- void ConvertTo(Kind kind, int index) {
- if (kind == REGISTER) DCHECK(index >= 0);
- value_ = KindField::encode(kind);
- value_ |= index << kKindFieldWidth;
- DCHECK(this->index() == index);
- }
-
- // Calls SetUpCache()/TearDownCache() for each subclass.
- static void SetUpCaches();
- static void TearDownCaches();
-
- protected:
- static const int kKindFieldWidth = 3;
- class KindField : public BitField<Kind, 0, kKindFieldWidth> { };
-
- LOperand(Kind kind, int index) { ConvertTo(kind, index); }
-
- unsigned value_;
-};
-
-
-class LUnallocated : public LOperand {
- public:
- enum BasicPolicy {
- FIXED_SLOT,
- EXTENDED_POLICY
- };
-
- enum ExtendedPolicy {
- NONE,
- ANY,
- FIXED_REGISTER,
- FIXED_DOUBLE_REGISTER,
- MUST_HAVE_REGISTER,
- MUST_HAVE_DOUBLE_REGISTER,
- WRITABLE_REGISTER,
- SAME_AS_FIRST_INPUT
- };
-
- // Lifetime of operand inside the instruction.
- enum Lifetime {
- // USED_AT_START operand is guaranteed to be live only at
- // instruction start. Register allocator is free to assign the same register
- // to some other operand used inside instruction (i.e. temporary or
- // output).
- USED_AT_START,
-
- // USED_AT_END operand is treated as live until the end of
- // instruction. This means that register allocator will not reuse it's
- // register for any other operand inside instruction.
- USED_AT_END
- };
-
- explicit LUnallocated(ExtendedPolicy policy) : LOperand(UNALLOCATED, 0) {
- value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
- value_ |= ExtendedPolicyField::encode(policy);
- value_ |= LifetimeField::encode(USED_AT_END);
- }
-
- LUnallocated(BasicPolicy policy, int index) : LOperand(UNALLOCATED, 0) {
- DCHECK(policy == FIXED_SLOT);
- value_ |= BasicPolicyField::encode(policy);
- value_ |= index << FixedSlotIndexField::kShift;
- DCHECK(this->fixed_slot_index() == index);
- }
-
- LUnallocated(ExtendedPolicy policy, int index) : LOperand(UNALLOCATED, 0) {
- DCHECK(policy == FIXED_REGISTER || policy == FIXED_DOUBLE_REGISTER);
- value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
- value_ |= ExtendedPolicyField::encode(policy);
- value_ |= LifetimeField::encode(USED_AT_END);
- value_ |= FixedRegisterField::encode(index);
- }
-
- LUnallocated(ExtendedPolicy policy, Lifetime lifetime)
- : LOperand(UNALLOCATED, 0) {
- value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
- value_ |= ExtendedPolicyField::encode(policy);
- value_ |= LifetimeField::encode(lifetime);
- }
-
- LUnallocated* CopyUnconstrained(Zone* zone) {
- LUnallocated* result = new(zone) LUnallocated(ANY);
- result->set_virtual_register(virtual_register());
- return result;
- }
-
- static LUnallocated* cast(LOperand* op) {
- DCHECK(op->IsUnallocated());
- return reinterpret_cast<LUnallocated*>(op);
- }
-
- // The encoding used for LUnallocated operands depends on the policy that is
- // stored within the operand. The FIXED_SLOT policy uses a compact encoding
- // because it accommodates a larger pay-load.
- //
- // For FIXED_SLOT policy:
- // +------------------------------------------+
- // | slot_index | vreg | 0 | 001 |
- // +------------------------------------------+
- //
- // For all other (extended) policies:
- // +------------------------------------------+
- // | reg_index | L | PPP | vreg | 1 | 001 | L ... Lifetime
- // +------------------------------------------+ P ... Policy
- //
- // The slot index is a signed value which requires us to decode it manually
- // instead of using the BitField utility class.
-
- // The superclass has a KindField.
- STATIC_ASSERT(kKindFieldWidth == 3);
-
- // BitFields for all unallocated operands.
- class BasicPolicyField : public BitField<BasicPolicy, 3, 1> {};
- class VirtualRegisterField : public BitField<unsigned, 4, 18> {};
-
- // BitFields specific to BasicPolicy::FIXED_SLOT.
- class FixedSlotIndexField : public BitField<int, 22, 10> {};
-
- // BitFields specific to BasicPolicy::EXTENDED_POLICY.
- class ExtendedPolicyField : public BitField<ExtendedPolicy, 22, 3> {};
- class LifetimeField : public BitField<Lifetime, 25, 1> {};
- class FixedRegisterField : public BitField<int, 26, 6> {};
-
- static const int kMaxVirtualRegisters = VirtualRegisterField::kMax + 1;
- static const int kFixedSlotIndexWidth = FixedSlotIndexField::kSize;
- static const int kMaxFixedSlotIndex = (1 << (kFixedSlotIndexWidth - 1)) - 1;
- static const int kMinFixedSlotIndex = -(1 << (kFixedSlotIndexWidth - 1));
-
- // Predicates for the operand policy.
- bool HasAnyPolicy() const {
- return basic_policy() == EXTENDED_POLICY &&
- extended_policy() == ANY;
- }
- bool HasFixedPolicy() const {
- return basic_policy() == FIXED_SLOT ||
- extended_policy() == FIXED_REGISTER ||
- extended_policy() == FIXED_DOUBLE_REGISTER;
- }
- bool HasRegisterPolicy() const {
- return basic_policy() == EXTENDED_POLICY && (
- extended_policy() == WRITABLE_REGISTER ||
- extended_policy() == MUST_HAVE_REGISTER);
- }
- bool HasDoubleRegisterPolicy() const {
- return basic_policy() == EXTENDED_POLICY &&
- extended_policy() == MUST_HAVE_DOUBLE_REGISTER;
- }
- bool HasSameAsInputPolicy() const {
- return basic_policy() == EXTENDED_POLICY &&
- extended_policy() == SAME_AS_FIRST_INPUT;
- }
- bool HasFixedSlotPolicy() const {
- return basic_policy() == FIXED_SLOT;
- }
- bool HasFixedRegisterPolicy() const {
- return basic_policy() == EXTENDED_POLICY &&
- extended_policy() == FIXED_REGISTER;
- }
- bool HasFixedDoubleRegisterPolicy() const {
- return basic_policy() == EXTENDED_POLICY &&
- extended_policy() == FIXED_DOUBLE_REGISTER;
- }
- bool HasWritableRegisterPolicy() const {
- return basic_policy() == EXTENDED_POLICY &&
- extended_policy() == WRITABLE_REGISTER;
- }
-
- // [basic_policy]: Distinguish between FIXED_SLOT and all other policies.
- BasicPolicy basic_policy() const {
- return BasicPolicyField::decode(value_);
- }
-
- // [extended_policy]: Only for non-FIXED_SLOT. The finer-grained policy.
- ExtendedPolicy extended_policy() const {
- DCHECK(basic_policy() == EXTENDED_POLICY);
- return ExtendedPolicyField::decode(value_);
- }
-
- // [fixed_slot_index]: Only for FIXED_SLOT.
- int fixed_slot_index() const {
- DCHECK(HasFixedSlotPolicy());
- return static_cast<int>(value_) >> FixedSlotIndexField::kShift;
- }
-
- // [fixed_register_index]: Only for FIXED_REGISTER or FIXED_DOUBLE_REGISTER.
- int fixed_register_index() const {
- DCHECK(HasFixedRegisterPolicy() || HasFixedDoubleRegisterPolicy());
- return FixedRegisterField::decode(value_);
- }
-
- // [virtual_register]: The virtual register ID for this operand.
- int virtual_register() const {
- return VirtualRegisterField::decode(value_);
- }
- void set_virtual_register(unsigned id) {
- value_ = VirtualRegisterField::update(value_, id);
- }
-
- // [lifetime]: Only for non-FIXED_SLOT.
- bool IsUsedAtStart() {
- DCHECK(basic_policy() == EXTENDED_POLICY);
- return LifetimeField::decode(value_) == USED_AT_START;
- }
-
- static bool TooManyParameters(int num_parameters) {
- const int parameter_limit = -LUnallocated::kMinFixedSlotIndex;
- return num_parameters + 1 > parameter_limit;
- }
-
- static bool TooManyParametersOrStackSlots(int num_parameters,
- int num_stack_slots) {
- const int locals_limit = LUnallocated::kMaxFixedSlotIndex;
- return num_parameters + 1 + num_stack_slots > locals_limit;
- }
-};
-
-
-class LMoveOperands final BASE_EMBEDDED {
- public:
- LMoveOperands(LOperand* source, LOperand* destination)
- : source_(source), destination_(destination) {
- }
-
- LOperand* source() const { return source_; }
- void set_source(LOperand* operand) { source_ = operand; }
-
- LOperand* destination() const { return destination_; }
- void set_destination(LOperand* operand) { destination_ = operand; }
-
- // The gap resolver marks moves as "in-progress" by clearing the
- // destination (but not the source).
- bool IsPending() const {
- return destination_ == NULL && source_ != NULL;
- }
-
- // True if this move a move into the given destination operand.
- bool Blocks(LOperand* operand) const {
- return !IsEliminated() && source()->Equals(operand);
- }
-
- // A move is redundant if it's been eliminated, if its source and
- // destination are the same, or if its destination is unneeded or constant.
- bool IsRedundant() const {
- return IsEliminated() || source_->Equals(destination_) || IsIgnored() ||
- (destination_ != NULL && destination_->IsConstantOperand());
- }
-
- bool IsIgnored() const {
- return destination_ != NULL && destination_->IsIgnored();
- }
-
- // We clear both operands to indicate move that's been eliminated.
- void Eliminate() { source_ = destination_ = NULL; }
- bool IsEliminated() const {
- DCHECK(source_ != NULL || destination_ == NULL);
- return source_ == NULL;
- }
-
- private:
- LOperand* source_;
- LOperand* destination_;
-};
-
-
-template <LOperand::Kind kOperandKind, int kNumCachedOperands>
-class LSubKindOperand final : public LOperand {
- public:
- static LSubKindOperand* Create(int index, Zone* zone) {
- DCHECK(index >= 0);
- if (index < kNumCachedOperands) return &cache[index];
- return new(zone) LSubKindOperand(index);
- }
-
- static LSubKindOperand* cast(LOperand* op) {
- DCHECK(op->kind() == kOperandKind);
- return reinterpret_cast<LSubKindOperand*>(op);
- }
-
- static void SetUpCache();
- static void TearDownCache();
-
- private:
- static LSubKindOperand* cache;
-
- LSubKindOperand() : LOperand() { }
- explicit LSubKindOperand(int index) : LOperand(kOperandKind, index) { }
-};
-
-
-#define LITHIUM_TYPEDEF_SUBKIND_OPERAND_CLASS(name, type, number) \
-typedef LSubKindOperand<LOperand::type, number> L##name;
-LITHIUM_OPERAND_LIST(LITHIUM_TYPEDEF_SUBKIND_OPERAND_CLASS)
-#undef LITHIUM_TYPEDEF_SUBKIND_OPERAND_CLASS
-
-
-class LParallelMove final : public ZoneObject {
- public:
- explicit LParallelMove(Zone* zone) : move_operands_(4, zone) { }
-
- void AddMove(LOperand* from, LOperand* to, Zone* zone) {
- move_operands_.Add(LMoveOperands(from, to), zone);
- }
-
- bool IsRedundant() const;
-
- ZoneList<LMoveOperands>* move_operands() { return &move_operands_; }
-
- void PrintDataTo(StringStream* stream) const;
-
- private:
- ZoneList<LMoveOperands> move_operands_;
-};
-
-
-class LPointerMap final : public ZoneObject {
- public:
- explicit LPointerMap(Zone* zone)
- : pointer_operands_(8, zone),
- untagged_operands_(0, zone),
- lithium_position_(-1) { }
-
- const ZoneList<LOperand*>* GetNormalizedOperands() {
- for (int i = 0; i < untagged_operands_.length(); ++i) {
- RemovePointer(untagged_operands_[i]);
- }
- untagged_operands_.Clear();
- return &pointer_operands_;
- }
- int lithium_position() const { return lithium_position_; }
-
- void set_lithium_position(int pos) {
- DCHECK(lithium_position_ == -1);
- lithium_position_ = pos;
- }
-
- void RecordPointer(LOperand* op, Zone* zone);
- void RemovePointer(LOperand* op);
- void RecordUntagged(LOperand* op, Zone* zone);
- void PrintTo(StringStream* stream);
-
- private:
- ZoneList<LOperand*> pointer_operands_;
- ZoneList<LOperand*> untagged_operands_;
- int lithium_position_;
-};
-
-
-class LEnvironment final : public ZoneObject {
- public:
- LEnvironment(Handle<JSFunction> closure,
- FrameType frame_type,
- BailoutId ast_id,
- int parameter_count,
- int argument_count,
- int value_count,
- LEnvironment* outer,
- HEnterInlined* entry,
- Zone* zone)
- : closure_(closure),
- frame_type_(frame_type),
- arguments_stack_height_(argument_count),
- deoptimization_index_(Safepoint::kNoDeoptimizationIndex),
- translation_index_(-1),
- ast_id_(ast_id),
- translation_size_(value_count),
- parameter_count_(parameter_count),
- pc_offset_(-1),
- values_(value_count, zone),
- is_tagged_(value_count, zone),
- is_uint32_(value_count, zone),
- object_mapping_(0, zone),
- outer_(outer),
- entry_(entry),
- zone_(zone),
- has_been_used_(false) { }
-
- Handle<JSFunction> closure() const { return closure_; }
- FrameType frame_type() const { return frame_type_; }
- int arguments_stack_height() const { return arguments_stack_height_; }
- int deoptimization_index() const { return deoptimization_index_; }
- int translation_index() const { return translation_index_; }
- BailoutId ast_id() const { return ast_id_; }
- int translation_size() const { return translation_size_; }
- int parameter_count() const { return parameter_count_; }
- int pc_offset() const { return pc_offset_; }
- const ZoneList<LOperand*>* values() const { return &values_; }
- LEnvironment* outer() const { return outer_; }
- HEnterInlined* entry() { return entry_; }
- Zone* zone() const { return zone_; }
-
- bool has_been_used() const { return has_been_used_; }
- void set_has_been_used() { has_been_used_ = true; }
-
- void AddValue(LOperand* operand,
- Representation representation,
- bool is_uint32) {
- values_.Add(operand, zone());
- if (representation.IsSmiOrTagged()) {
- DCHECK(!is_uint32);
- is_tagged_.Add(values_.length() - 1, zone());
- }
-
- if (is_uint32) {
- is_uint32_.Add(values_.length() - 1, zone());
- }
- }
-
- bool HasTaggedValueAt(int index) const {
- return is_tagged_.Contains(index);
- }
-
- bool HasUint32ValueAt(int index) const {
- return is_uint32_.Contains(index);
- }
-
- void AddNewObject(int length, bool is_arguments) {
- uint32_t encoded = LengthOrDupeField::encode(length) |
- IsArgumentsField::encode(is_arguments) |
- IsDuplicateField::encode(false);
- object_mapping_.Add(encoded, zone());
- }
-
- void AddDuplicateObject(int dupe_of) {
- uint32_t encoded = LengthOrDupeField::encode(dupe_of) |
- IsDuplicateField::encode(true);
- object_mapping_.Add(encoded, zone());
- }
-
- int ObjectDuplicateOfAt(int index) {
- DCHECK(ObjectIsDuplicateAt(index));
- return LengthOrDupeField::decode(object_mapping_[index]);
- }
-
- int ObjectLengthAt(int index) {
- DCHECK(!ObjectIsDuplicateAt(index));
- return LengthOrDupeField::decode(object_mapping_[index]);
- }
-
- bool ObjectIsArgumentsAt(int index) {
- DCHECK(!ObjectIsDuplicateAt(index));
- return IsArgumentsField::decode(object_mapping_[index]);
- }
-
- bool ObjectIsDuplicateAt(int index) {
- return IsDuplicateField::decode(object_mapping_[index]);
- }
-
- void Register(int deoptimization_index,
- int translation_index,
- int pc_offset) {
- DCHECK(!HasBeenRegistered());
- deoptimization_index_ = deoptimization_index;
- translation_index_ = translation_index;
- pc_offset_ = pc_offset;
- }
- bool HasBeenRegistered() const {
- return deoptimization_index_ != Safepoint::kNoDeoptimizationIndex;
- }
-
- void PrintTo(StringStream* stream);
-
- // Marker value indicating a de-materialized object.
- static LOperand* materialization_marker() { return NULL; }
-
- // Encoding used for the object_mapping map below.
- class LengthOrDupeField : public BitField<int, 0, 30> { };
- class IsArgumentsField : public BitField<bool, 30, 1> { };
- class IsDuplicateField : public BitField<bool, 31, 1> { };
-
- private:
- Handle<JSFunction> closure_;
- FrameType frame_type_;
- int arguments_stack_height_;
- int deoptimization_index_;
- int translation_index_;
- BailoutId ast_id_;
- int translation_size_;
- int parameter_count_;
- int pc_offset_;
-
- // Value array: [parameters] [locals] [expression stack] [de-materialized].
- // |>--------- translation_size ---------<|
- ZoneList<LOperand*> values_;
- GrowableBitVector is_tagged_;
- GrowableBitVector is_uint32_;
-
- // Map with encoded information about materialization_marker operands.
- ZoneList<uint32_t> object_mapping_;
-
- LEnvironment* outer_;
- HEnterInlined* entry_;
- Zone* zone_;
- bool has_been_used_;
-};
-
-
-// Iterates over the non-null, non-constant operands in an environment.
-class ShallowIterator final BASE_EMBEDDED {
- public:
- explicit ShallowIterator(LEnvironment* env)
- : env_(env),
- limit_(env != NULL ? env->values()->length() : 0),
- current_(0) {
- SkipUninteresting();
- }
-
- bool Done() { return current_ >= limit_; }
-
- LOperand* Current() {
- DCHECK(!Done());
- DCHECK(env_->values()->at(current_) != NULL);
- return env_->values()->at(current_);
- }
-
- void Advance() {
- DCHECK(!Done());
- ++current_;
- SkipUninteresting();
- }
-
- LEnvironment* env() { return env_; }
-
- private:
- bool ShouldSkip(LOperand* op) {
- return op == NULL || op->IsConstantOperand();
- }
-
- // Skip until something interesting, beginning with and including current_.
- void SkipUninteresting() {
- while (current_ < limit_ && ShouldSkip(env_->values()->at(current_))) {
- ++current_;
- }
- }
-
- LEnvironment* env_;
- int limit_;
- int current_;
-};
-
-
-// Iterator for non-null, non-constant operands incl. outer environments.
-class DeepIterator final BASE_EMBEDDED {
- public:
- explicit DeepIterator(LEnvironment* env)
- : current_iterator_(env) {
- SkipUninteresting();
- }
-
- bool Done() { return current_iterator_.Done(); }
-
- LOperand* Current() {
- DCHECK(!current_iterator_.Done());
- DCHECK(current_iterator_.Current() != NULL);
- return current_iterator_.Current();
- }
-
- void Advance() {
- current_iterator_.Advance();
- SkipUninteresting();
- }
-
- private:
- void SkipUninteresting() {
- while (current_iterator_.env() != NULL && current_iterator_.Done()) {
- current_iterator_ = ShallowIterator(current_iterator_.env()->outer());
- }
- }
-
- ShallowIterator current_iterator_;
-};
-
-
-class LPlatformChunk;
-class LGap;
-class LLabel;
-
-// Superclass providing data and behavior common to all the
-// arch-specific LPlatformChunk classes.
-class LChunk : public ZoneObject {
- public:
- static LChunk* NewChunk(HGraph* graph);
-
- void AddInstruction(LInstruction* instruction, HBasicBlock* block);
- LConstantOperand* DefineConstantOperand(HConstant* constant);
- HConstant* LookupConstant(LConstantOperand* operand) const;
- Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
-
- int ParameterAt(int index);
- int GetParameterStackSlot(int index) const;
- bool HasAllocatedStackSlots() const {
- return current_frame_slots_ != base_frame_slots_;
- }
- int GetSpillSlotCount() const {
- return current_frame_slots_ - base_frame_slots_;
- }
- int GetTotalFrameSlotCount() const { return current_frame_slots_; }
- CompilationInfo* info() const { return info_; }
- HGraph* graph() const { return graph_; }
- Isolate* isolate() const { return graph_->isolate(); }
- const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
- void AddGapMove(int index, LOperand* from, LOperand* to);
- LGap* GetGapAt(int index) const;
- bool IsGapAt(int index) const;
- int NearestGapPos(int index) const;
- void MarkEmptyBlocks();
- const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
- LLabel* GetLabel(int block_id) const;
- int LookupDestination(int block_id) const;
- Label* GetAssemblyLabel(int block_id) const;
-
- void AddDeprecationDependency(Handle<Map> map) {
- DCHECK(!map->is_deprecated());
- if (!map->CanBeDeprecated()) return;
- DCHECK(!info_->IsStub());
- deprecation_dependencies_.Add(map, zone());
- }
-
- void AddStabilityDependency(Handle<Map> map) {
- DCHECK(map->is_stable());
- if (!map->CanTransition()) return;
- DCHECK(!info_->IsStub());
- stability_dependencies_.Add(map, zone());
- }
-
- Zone* zone() const { return info_->zone(); }
-
- Handle<Code> Codegen();
-
- void set_allocated_double_registers(BitVector* allocated_registers);
- BitVector* allocated_double_registers() {
- return allocated_double_registers_;
- }
-
- protected:
- LChunk(CompilationInfo* info, HGraph* graph);
-
- int base_frame_slots_;
- int current_frame_slots_;
-
- private:
- void CommitDependencies(Handle<Code> code) const;
-
- CompilationInfo* info_;
- HGraph* const graph_;
- BitVector* allocated_double_registers_;
- ZoneList<LInstruction*> instructions_;
- ZoneList<LPointerMap*> pointer_maps_;
- ZoneList<Handle<Map>> deprecation_dependencies_;
- ZoneList<Handle<Map>> stability_dependencies_;
-};
-
-
-class LChunkBuilderBase BASE_EMBEDDED {
- public:
- explicit LChunkBuilderBase(CompilationInfo* info, HGraph* graph)
- : argument_count_(0),
- chunk_(NULL),
- info_(info),
- graph_(graph),
- status_(UNUSED),
- zone_(graph->zone()) {}
-
- virtual ~LChunkBuilderBase() { }
-
- void Abort(BailoutReason reason);
- void Retry(BailoutReason reason);
-
- protected:
- enum Status { UNUSED, BUILDING, DONE, ABORTED };
-
- LPlatformChunk* chunk() const { return chunk_; }
- CompilationInfo* info() const { return info_; }
- HGraph* graph() const { return graph_; }
- int argument_count() const { return argument_count_; }
- Isolate* isolate() const { return graph_->isolate(); }
- Heap* heap() const { return isolate()->heap(); }
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_building() const { return status_ == BUILDING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
- // An input operand in register, stack slot or a constant operand.
- // Will not be moved to a register even if one is freely available.
- virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) = 0;
-
- // Constructs proper environment for a lazy bailout point after call, creates
- // LLazyBailout instruction and adds it to current block.
- void CreateLazyBailoutForCall(HBasicBlock* current_block, LInstruction* instr,
- HInstruction* hydrogen_val);
-
- // Assigns given environment to an instruction. An instruction which can
- // deoptimize must have an environment.
- LInstruction* AssignEnvironment(LInstruction* instr,
- HEnvironment* hydrogen_env);
-
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
- int* argument_index_accumulator,
- ZoneList<HValue*>* objects_to_materialize);
- void AddObjectToMaterialize(HValue* value,
- ZoneList<HValue*>* objects_to_materialize,
- LEnvironment* result);
-
- Zone* zone() const { return zone_; }
-
- int argument_count_;
- LPlatformChunk* chunk_;
- CompilationInfo* info_;
- HGraph* const graph_;
- Status status_;
-
- private:
- Zone* zone_;
-};
-
-
-enum NumberUntagDMode {
- NUMBER_CANDIDATE_IS_SMI,
- NUMBER_CANDIDATE_IS_ANY_TAGGED
-};
-
-
-class LPhase : public CompilationPhase {
- public:
- LPhase(const char* name, LChunk* chunk)
- : CompilationPhase(name, chunk->info()),
- chunk_(chunk) { }
- ~LPhase();
-
- private:
- LChunk* chunk_;
-
- DISALLOW_COPY_AND_ASSIGN(LPhase);
-};
-
-
-// A register-allocator view of a Lithium instruction. It contains the id of
-// the output operand and a list of input operand uses.
-
-enum RegisterKind {
- UNALLOCATED_REGISTERS,
- GENERAL_REGISTERS,
- DOUBLE_REGISTERS
-};
-
-// Iterator for non-null temp operands.
-class TempIterator BASE_EMBEDDED {
- public:
- inline explicit TempIterator(LInstruction* instr);
- inline bool Done();
- inline LOperand* Current();
- inline void Advance();
-
- private:
- inline void SkipUninteresting();
- LInstruction* instr_;
- int limit_;
- int current_;
-};
-
-
-// Iterator for non-constant input operands.
-class InputIterator BASE_EMBEDDED {
- public:
- inline explicit InputIterator(LInstruction* instr);
- inline bool Done();
- inline LOperand* Current();
- inline void Advance();
-
- private:
- inline void SkipUninteresting();
- LInstruction* instr_;
- int limit_;
- int current_;
-};
-
-
-class UseIterator BASE_EMBEDDED {
- public:
- inline explicit UseIterator(LInstruction* instr);
- inline bool Done();
- inline LOperand* Current();
- inline void Advance();
-
- private:
- InputIterator input_iterator_;
- DeepIterator env_iterator_;
-};
-
-class LInstruction;
-class LCodeGen;
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_LITHIUM_H_
diff --git a/deps/v8/src/crankshaft/mips/OWNERS b/deps/v8/src/crankshaft/mips/OWNERS
deleted file mode 100644
index 3f8fbfc7c8..0000000000
--- a/deps/v8/src/crankshaft/mips/OWNERS
+++ /dev/null
@@ -1,3 +0,0 @@
-ivica.bogosavljevic@imgtec.com
-Miran.Karic@imgtec.com
-dusan.simicic@imgtec.com
diff --git a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc
deleted file mode 100644
index 942baa08c0..0000000000
--- a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc
+++ /dev/null
@@ -1,5417 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.7
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "src/crankshaft/mips/lithium-codegen-mips.h"
-
-#include "src/base/bits.h"
-#include "src/builtins/builtins-constructor.h"
-#include "src/code-factory.h"
-#include "src/code-stubs.h"
-#include "src/crankshaft/hydrogen-osr.h"
-#include "src/crankshaft/mips/lithium-gap-resolver-mips.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-class SafepointGenerator final : public CallWrapper {
- public:
- SafepointGenerator(LCodeGen* codegen,
- LPointerMap* pointers,
- Safepoint::DeoptMode mode)
- : codegen_(codegen),
- pointers_(pointers),
- deopt_mode_(mode) { }
- virtual ~SafepointGenerator() {}
-
- void BeforeCall(int call_size) const override {}
-
- void AfterCall() const override {
- codegen_->RecordSafepoint(pointers_, deopt_mode_);
- }
-
- private:
- LCodeGen* codegen_;
- LPointerMap* pointers_;
- Safepoint::DeoptMode deopt_mode_;
-};
-
-LCodeGen::PushSafepointRegistersScope::PushSafepointRegistersScope(
- LCodeGen* codegen)
- : codegen_(codegen) {
- DCHECK(codegen_->info()->is_calling());
- DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
- codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
-
- StoreRegistersStateStub stub(codegen_->isolate());
- codegen_->masm_->push(ra);
- codegen_->masm_->CallStub(&stub);
-}
-
-LCodeGen::PushSafepointRegistersScope::~PushSafepointRegistersScope() {
- DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
- RestoreRegistersStateStub stub(codegen_->isolate());
- codegen_->masm_->push(ra);
- codegen_->masm_->CallStub(&stub);
- codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
-}
-
-#define __ masm()->
-
-bool LCodeGen::GenerateCode() {
- LPhase phase("Z_Code generation", chunk());
- DCHECK(is_unused());
- status_ = GENERATING;
-
- // Open a frame scope to indicate that there is a frame on the stack. The
- // NONE indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done in GeneratePrologue).
- FrameScope frame_scope(masm_, StackFrame::NONE);
-
- return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
- GenerateJumpTable() && GenerateSafepointTable();
-}
-
-
-void LCodeGen::FinishCode(Handle<Code> code) {
- DCHECK(is_done());
- code->set_stack_slots(GetTotalFrameSlotCount());
- code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- PopulateDeoptimizationData(code);
-}
-
-
-void LCodeGen::SaveCallerDoubles() {
- DCHECK(info()->saves_caller_doubles());
- DCHECK(NeedsEagerFrame());
- Comment(";;; Save clobbered callee double registers");
- int count = 0;
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- while (!save_iterator.Done()) {
- __ Sdc1(DoubleRegister::from_code(save_iterator.Current()),
- MemOperand(sp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
-}
-
-
-void LCodeGen::RestoreCallerDoubles() {
- DCHECK(info()->saves_caller_doubles());
- DCHECK(NeedsEagerFrame());
- Comment(";;; Restore clobbered callee double registers");
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- int count = 0;
- while (!save_iterator.Done()) {
- __ Ldc1(DoubleRegister::from_code(save_iterator.Current()),
- MemOperand(sp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
-}
-
-
-bool LCodeGen::GeneratePrologue() {
- DCHECK(is_generating());
-
- if (info()->IsOptimizing()) {
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
- // a1: Callee's JS function.
- // cp: Callee's context.
- // fp: Caller's frame pointer.
- // lr: Caller's pc.
- }
-
- info()->set_prologue_offset(masm_->pc_offset());
- if (NeedsEagerFrame()) {
- if (info()->IsStub()) {
- __ StubPrologue(StackFrame::STUB);
- } else {
- __ Prologue(info()->GeneratePreagedPrologue());
- }
- frame_is_built_ = true;
- }
-
- // Reserve space for the stack slots needed by the code.
- int slots = GetStackSlotCount();
- if (slots > 0) {
- if (FLAG_debug_code) {
- __ Subu(sp, sp, Operand(slots * kPointerSize));
- __ Push(a0, a1);
- __ Addu(a0, sp, Operand(slots * kPointerSize));
- __ li(a1, Operand(kSlotsZapValue));
- Label loop;
- __ bind(&loop);
- __ Subu(a0, a0, Operand(kPointerSize));
- __ sw(a1, MemOperand(a0, 2 * kPointerSize));
- __ Branch(&loop, ne, a0, Operand(sp));
- __ Pop(a0, a1);
- } else {
- __ Subu(sp, sp, Operand(slots * kPointerSize));
- }
- }
-
- if (info()->saves_caller_doubles()) {
- SaveCallerDoubles();
- }
- return !is_aborted();
-}
-
-
-void LCodeGen::DoPrologue(LPrologue* instr) {
- Comment(";;; Prologue begin");
-
- // Possibly allocate a local context.
- if (info()->scope()->NeedsContext()) {
- Comment(";;; Allocate local context");
- bool need_write_barrier = true;
- // Argument to NewContext is the function, which is in a1.
- int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
- if (info()->scope()->is_script_scope()) {
- __ push(a1);
- __ Push(info()->scope()->scope_info());
- __ CallRuntime(Runtime::kNewScriptContext);
- deopt_mode = Safepoint::kLazyDeopt;
- } else {
- if (slots <= ConstructorBuiltins::MaximumFunctionContextSlots()) {
- Callable callable = CodeFactory::FastNewFunctionContext(
- isolate(), info()->scope()->scope_type());
- __ li(FastNewFunctionContextDescriptor::SlotsRegister(),
- Operand(slots));
- __ Call(callable.code(), RelocInfo::CODE_TARGET);
- // Result of the FastNewFunctionContext builtin is always in new space.
- need_write_barrier = false;
- } else {
- __ push(a1);
- __ Push(Smi::FromInt(info()->scope()->scope_type()));
- __ CallRuntime(Runtime::kNewFunctionContext);
- }
- }
- RecordSafepoint(deopt_mode);
-
- // Context is returned in both v0. It replaces the context passed to us.
- // It's saved in the stack and kept live in cp.
- __ mov(cp, v0);
- __ sw(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Copy any necessary parameters into the context.
- int num_parameters = info()->scope()->num_parameters();
- int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0;
- for (int i = first_parameter; i < num_parameters; i++) {
- Variable* var = (i == -1) ? info()->scope()->receiver()
- : info()->scope()->parameter(i);
- if (var->IsContextSlot()) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ lw(a0, MemOperand(fp, parameter_offset));
- // Store it in the context.
- MemOperand target = ContextMemOperand(cp, var->index());
- __ sw(a0, target);
- // Update the write barrier. This clobbers a3 and a0.
- if (need_write_barrier) {
- __ RecordWriteContextSlot(
- cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
- } else if (FLAG_debug_code) {
- Label done;
- __ JumpIfInNewSpace(cp, a0, &done);
- __ Abort(kExpectedNewSpaceObject);
- __ bind(&done);
- }
- }
- }
- Comment(";;; End allocate local context");
- }
-
- Comment(";;; Prologue end");
-}
-
-
-void LCodeGen::GenerateOsrPrologue() {
- // Generate the OSR entry prologue at the first unknown OSR value, or if there
- // are none, at the OSR entrypoint instruction.
- if (osr_pc_offset_ >= 0) return;
-
- osr_pc_offset_ = masm()->pc_offset();
-
- // Adjust the frame size, subsuming the unoptimized frame into the
- // optimized frame.
- int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
- DCHECK(slots >= 0);
- __ Subu(sp, sp, Operand(slots * kPointerSize));
-}
-
-
-void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
- if (instr->IsCall()) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- }
- if (!instr->IsLazyBailout() && !instr->IsGap()) {
- safepoints_.BumpLastLazySafepointIndex();
- }
-}
-
-
-bool LCodeGen::GenerateDeferredCode() {
- DCHECK(is_generating());
- if (deferred_.length() > 0) {
- for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
- LDeferredCode* code = deferred_[i];
-
- HValue* value =
- instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(value->position());
-
- Comment(";;; <@%d,#%d> "
- "-------------------- Deferred %s --------------------",
- code->instruction_index(),
- code->instr()->hydrogen_value()->id(),
- code->instr()->Mnemonic());
- __ bind(code->entry());
- if (NeedsDeferredFrame()) {
- Comment(";;; Build frame");
- DCHECK(!frame_is_built_);
- DCHECK(info()->IsStub());
- frame_is_built_ = true;
- __ li(scratch0(), Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
- __ PushCommonFrame(scratch0());
- Comment(";;; Deferred code");
- }
- code->Generate();
- if (NeedsDeferredFrame()) {
- Comment(";;; Destroy frame");
- DCHECK(frame_is_built_);
- __ PopCommonFrame(scratch0());
- frame_is_built_ = false;
- }
- __ jmp(code->exit());
- }
- }
- // Deferred code is the last part of the instruction sequence. Mark
- // the generated code as done unless we bailed out.
- if (!is_aborted()) status_ = DONE;
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateJumpTable() {
- if (jump_table_.length() > 0) {
- Label needs_frame, call_deopt_entry;
-
- Comment(";;; -------------------- Jump table --------------------");
- Address base = jump_table_[0].address;
-
- Register entry_offset = t9;
-
- int length = jump_table_.length();
- for (int i = 0; i < length; i++) {
- Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
- __ bind(&table_entry->label);
-
- DCHECK(table_entry->bailout_type == jump_table_[0].bailout_type);
- Address entry = table_entry->address;
- DeoptComment(table_entry->deopt_info);
-
- // Second-level deopt table entries are contiguous and small, so instead
- // of loading the full, absolute address of each one, load an immediate
- // offset which will be added to the base address later.
- __ li(entry_offset, Operand(entry - base));
-
- if (table_entry->needs_frame) {
- DCHECK(!info()->saves_caller_doubles());
- Comment(";;; call deopt with frame");
- __ PushCommonFrame();
- __ Call(&needs_frame);
- } else {
- __ Call(&call_deopt_entry);
- }
- }
-
- if (needs_frame.is_linked()) {
- __ bind(&needs_frame);
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- __ li(at, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
- __ push(at);
- DCHECK(info()->IsStub());
- }
-
- Comment(";;; call deopt");
- __ bind(&call_deopt_entry);
-
- if (info()->saves_caller_doubles()) {
- DCHECK(info()->IsStub());
- RestoreCallerDoubles();
- }
-
- // Add the base address to the offset previously loaded in entry_offset.
- __ Jump(entry_offset, Operand(ExternalReference::ForDeoptEntry(base)));
- }
- __ RecordComment("]");
-
- // The deoptimization jump table is the last part of the instruction
- // sequence. Mark the generated code as done unless we bailed out.
- if (!is_aborted()) status_ = DONE;
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateSafepointTable() {
- DCHECK(is_done());
- safepoints_.Emit(masm(), GetTotalFrameSlotCount());
- return !is_aborted();
-}
-
-
-Register LCodeGen::ToRegister(int index) const {
- return Register::from_code(index);
-}
-
-
-DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
- return DoubleRegister::from_code(index);
-}
-
-
-Register LCodeGen::ToRegister(LOperand* op) const {
- DCHECK(op->IsRegister());
- return ToRegister(op->index());
-}
-
-
-Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
- if (op->IsRegister()) {
- return ToRegister(op->index());
- } else if (op->IsConstantOperand()) {
- LConstantOperand* const_op = LConstantOperand::cast(op);
- HConstant* constant = chunk_->LookupConstant(const_op);
- Handle<Object> literal = constant->handle(isolate());
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
- AllowDeferredHandleDereference get_number;
- DCHECK(literal->IsNumber());
- __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
- } else if (r.IsSmi()) {
- DCHECK(constant->HasSmiValue());
- __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value())));
- } else if (r.IsDouble()) {
- Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
- } else {
- DCHECK(r.IsSmiOrTagged());
- __ li(scratch, literal);
- }
- return scratch;
- } else if (op->IsStackSlot()) {
- __ lw(scratch, ToMemOperand(op));
- return scratch;
- }
- UNREACHABLE();
- return scratch;
-}
-
-
-DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
- DCHECK(op->IsDoubleRegister());
- return ToDoubleRegister(op->index());
-}
-
-
-DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
- FloatRegister flt_scratch,
- DoubleRegister dbl_scratch) {
- if (op->IsDoubleRegister()) {
- return ToDoubleRegister(op->index());
- } else if (op->IsConstantOperand()) {
- LConstantOperand* const_op = LConstantOperand::cast(op);
- HConstant* constant = chunk_->LookupConstant(const_op);
- Handle<Object> literal = constant->handle(isolate());
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
- DCHECK(literal->IsNumber());
- __ li(at, Operand(static_cast<int32_t>(literal->Number())));
- __ mtc1(at, flt_scratch);
- __ cvt_d_w(dbl_scratch, flt_scratch);
- return dbl_scratch;
- } else if (r.IsDouble()) {
- Abort(kUnsupportedDoubleImmediate);
- } else if (r.IsTagged()) {
- Abort(kUnsupportedTaggedImmediate);
- }
- } else if (op->IsStackSlot()) {
- MemOperand mem_op = ToMemOperand(op);
- __ Ldc1(dbl_scratch, mem_op);
- return dbl_scratch;
- }
- UNREACHABLE();
- return dbl_scratch;
-}
-
-
-Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
- return constant->handle(isolate());
-}
-
-
-bool LCodeGen::IsInteger32(LConstantOperand* op) const {
- return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
-}
-
-
-bool LCodeGen::IsSmi(LConstantOperand* op) const {
- return chunk_->LookupLiteralRepresentation(op).IsSmi();
-}
-
-
-int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
- return ToRepresentation(op, Representation::Integer32());
-}
-
-
-int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
- const Representation& r) const {
- HConstant* constant = chunk_->LookupConstant(op);
- int32_t value = constant->Integer32Value();
- if (r.IsInteger32()) return value;
- DCHECK(r.IsSmiOrTagged());
- return reinterpret_cast<int32_t>(Smi::FromInt(value));
-}
-
-
-Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- return Smi::FromInt(constant->Integer32Value());
-}
-
-
-double LCodeGen::ToDouble(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- DCHECK(constant->HasDoubleValue());
- return constant->DoubleValue();
-}
-
-
-Operand LCodeGen::ToOperand(LOperand* op) {
- if (op->IsConstantOperand()) {
- LConstantOperand* const_op = LConstantOperand::cast(op);
- HConstant* constant = chunk()->LookupConstant(const_op);
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsSmi()) {
- DCHECK(constant->HasSmiValue());
- return Operand(Smi::FromInt(constant->Integer32Value()));
- } else if (r.IsInteger32()) {
- DCHECK(constant->HasInteger32Value());
- return Operand(constant->Integer32Value());
- } else if (r.IsDouble()) {
- Abort(kToOperandUnsupportedDoubleImmediate);
- }
- DCHECK(r.IsTagged());
- return Operand(constant->handle(isolate()));
- } else if (op->IsRegister()) {
- return Operand(ToRegister(op));
- } else if (op->IsDoubleRegister()) {
- Abort(kToOperandIsDoubleRegisterUnimplemented);
- return Operand(0);
- }
- // Stack slots not implemented, use ToMemOperand instead.
- UNREACHABLE();
- return Operand(0);
-}
-
-
-static int ArgumentsOffsetWithoutFrame(int index) {
- DCHECK(index < 0);
- return -(index + 1) * kPointerSize;
-}
-
-
-MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
- DCHECK(!op->IsRegister());
- DCHECK(!op->IsDoubleRegister());
- DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- if (NeedsEagerFrame()) {
- return MemOperand(fp, FrameSlotToFPOffset(op->index()));
- } else {
- // Retrieve parameter without eager stack-frame relative to the
- // stack-pointer.
- return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
- }
-}
-
-
-MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
- DCHECK(op->IsDoubleStackSlot());
- if (NeedsEagerFrame()) {
- return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize);
- } else {
- // Retrieve parameter without eager stack-frame relative to the
- // stack-pointer.
- return MemOperand(
- sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
- }
-}
-
-
-void LCodeGen::WriteTranslation(LEnvironment* environment,
- Translation* translation) {
- if (environment == NULL) return;
-
- // The translation includes one command per value in the environment.
- int translation_size = environment->translation_size();
-
- WriteTranslation(environment->outer(), translation);
- WriteTranslationFrame(environment, translation);
-
- int object_index = 0;
- int dematerialized_index = 0;
- for (int i = 0; i < translation_size; ++i) {
- LOperand* value = environment->values()->at(i);
- AddToTranslation(
- environment, translation, value, environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
- }
-}
-
-
-void LCodeGen::AddToTranslation(LEnvironment* environment,
- Translation* translation,
- LOperand* op,
- bool is_tagged,
- bool is_uint32,
- int* object_index_pointer,
- int* dematerialized_index_pointer) {
- if (op == LEnvironment::materialization_marker()) {
- int object_index = (*object_index_pointer)++;
- if (environment->ObjectIsDuplicateAt(object_index)) {
- int dupe_of = environment->ObjectDuplicateOfAt(object_index);
- translation->DuplicateObject(dupe_of);
- return;
- }
- int object_length = environment->ObjectLengthAt(object_index);
- if (environment->ObjectIsArgumentsAt(object_index)) {
- translation->BeginArgumentsObject(object_length);
- } else {
- translation->BeginCapturedObject(object_length);
- }
- int dematerialized_index = *dematerialized_index_pointer;
- int env_offset = environment->translation_size() + dematerialized_index;
- *dematerialized_index_pointer += object_length;
- for (int i = 0; i < object_length; ++i) {
- LOperand* value = environment->values()->at(env_offset + i);
- AddToTranslation(environment,
- translation,
- value,
- environment->HasTaggedValueAt(env_offset + i),
- environment->HasUint32ValueAt(env_offset + i),
- object_index_pointer,
- dematerialized_index_pointer);
- }
- return;
- }
-
- if (op->IsStackSlot()) {
- int index = op->index();
- if (is_tagged) {
- translation->StoreStackSlot(index);
- } else if (is_uint32) {
- translation->StoreUint32StackSlot(index);
- } else {
- translation->StoreInt32StackSlot(index);
- }
- } else if (op->IsDoubleStackSlot()) {
- int index = op->index();
- translation->StoreDoubleStackSlot(index);
- } else if (op->IsRegister()) {
- Register reg = ToRegister(op);
- if (is_tagged) {
- translation->StoreRegister(reg);
- } else if (is_uint32) {
- translation->StoreUint32Register(reg);
- } else {
- translation->StoreInt32Register(reg);
- }
- } else if (op->IsDoubleRegister()) {
- DoubleRegister reg = ToDoubleRegister(op);
- translation->StoreDoubleRegister(reg);
- } else if (op->IsConstantOperand()) {
- HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
- translation->StoreLiteral(src_index);
- } else {
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr) {
- CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::CallCodeGeneric(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode) {
- DCHECK(instr != NULL);
- __ Call(code, mode);
- RecordSafepointWithLazyDeopt(instr, safepoint_mode);
-}
-
-
-void LCodeGen::CallRuntime(const Runtime::Function* function,
- int num_arguments,
- LInstruction* instr,
- SaveFPRegsMode save_doubles) {
- DCHECK(instr != NULL);
-
- __ CallRuntime(function, num_arguments, save_doubles);
-
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::LoadContextFromDeferred(LOperand* context) {
- if (context->IsRegister()) {
- __ Move(cp, ToRegister(context));
- } else if (context->IsStackSlot()) {
- __ lw(cp, ToMemOperand(context));
- } else if (context->IsConstantOperand()) {
- HConstant* constant =
- chunk_->LookupConstant(LConstantOperand::cast(context));
- __ li(cp, Handle<Object>::cast(constant->handle(isolate())));
- } else {
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
- int argc,
- LInstruction* instr,
- LOperand* context) {
- LoadContextFromDeferred(context);
- __ CallRuntimeSaveDoubles(id);
- RecordSafepointWithRegisters(
- instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
-}
-
-
-void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
- Safepoint::DeoptMode mode) {
- environment->set_has_been_used();
- if (!environment->HasBeenRegistered()) {
- // Physical stack frame layout:
- // -x ............. -4 0 ..................................... y
- // [incoming arguments] [spill slots] [pushed outgoing arguments]
-
- // Layout of the environment:
- // 0 ..................................................... size-1
- // [parameters] [locals] [expression stack including arguments]
-
- // Layout of the translation:
- // 0 ........................................................ size - 1 + 4
- // [expression stack including arguments] [locals] [4 words] [parameters]
- // |>------------ translation_size ------------<|
-
- int frame_count = 0;
- int jsframe_count = 0;
- for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
- ++frame_count;
- if (e->frame_type() == JS_FUNCTION) {
- ++jsframe_count;
- }
- }
- Translation translation(&translations_, frame_count, jsframe_count, zone());
- WriteTranslation(environment, &translation);
- int deoptimization_index = deoptimizations_.length();
- int pc_offset = masm()->pc_offset();
- environment->Register(deoptimization_index,
- translation.index(),
- (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
- deoptimizations_.Add(environment, zone());
- }
-}
-
-void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
- DeoptimizeReason deopt_reason,
- Deoptimizer::BailoutType bailout_type,
- Register src1, const Operand& src2) {
- LEnvironment* environment = instr->environment();
- RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- DCHECK(environment->HasBeenRegistered());
- int id = environment->deoptimization_index();
- Address entry =
- Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
- if (entry == NULL) {
- Abort(kBailoutWasNotPrepared);
- return;
- }
-
- if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
- Register scratch = scratch0();
- ExternalReference count = ExternalReference::stress_deopt_count(isolate());
- Label no_deopt;
- __ Push(a1, scratch);
- __ li(scratch, Operand(count));
- __ lw(a1, MemOperand(scratch));
- __ Subu(a1, a1, Operand(1));
- __ Branch(&no_deopt, ne, a1, Operand(zero_reg));
- __ li(a1, Operand(FLAG_deopt_every_n_times));
- __ sw(a1, MemOperand(scratch));
- __ Pop(a1, scratch);
-
- __ Call(entry, RelocInfo::RUNTIME_ENTRY);
- __ bind(&no_deopt);
- __ sw(a1, MemOperand(scratch));
- __ Pop(a1, scratch);
- }
-
- if (info()->ShouldTrapOnDeopt()) {
- Label skip;
- if (condition != al) {
- __ Branch(&skip, NegateCondition(condition), src1, src2);
- }
- __ stop("trap_on_deopt");
- __ bind(&skip);
- }
-
- Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
-
- DCHECK(info()->IsStub() || frame_is_built_);
- // Go through jump table if we need to handle condition, build frame, or
- // restore caller doubles.
- if (condition == al && frame_is_built_ &&
- !info()->saves_caller_doubles()) {
- DeoptComment(deopt_info);
- __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
- } else {
- Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
- !frame_is_built_);
- // We often have several deopts to the same entry, reuse the last
- // jump entry if this is the case.
- if (FLAG_trace_deopt || isolate()->is_profiling() ||
- jump_table_.is_empty() ||
- !table_entry.IsEquivalentTo(jump_table_.last())) {
- jump_table_.Add(table_entry, zone());
- }
- __ Branch(&jump_table_.last().label, condition, src1, src2);
- }
-}
-
-void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
- DeoptimizeReason deopt_reason, Register src1,
- const Operand& src2) {
- Deoptimizer::BailoutType bailout_type = info()->IsStub()
- ? Deoptimizer::LAZY
- : Deoptimizer::EAGER;
- DeoptimizeIf(condition, instr, deopt_reason, bailout_type, src1, src2);
-}
-
-
-void LCodeGen::RecordSafepointWithLazyDeopt(
- LInstruction* instr, SafepointMode safepoint_mode) {
- if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
- RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
- } else {
- DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kLazyDeopt);
- }
-}
-
-
-void LCodeGen::RecordSafepoint(
- LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- Safepoint::DeoptMode deopt_mode) {
- DCHECK(expected_safepoint_kind_ == kind);
-
- const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
- Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
- kind, arguments, deopt_mode);
- for (int i = 0; i < operands->length(); i++) {
- LOperand* pointer = operands->at(i);
- if (pointer->IsStackSlot()) {
- safepoint.DefinePointerSlot(pointer->index(), zone());
- } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
- safepoint.DefinePointerRegister(ToRegister(pointer), zone());
- }
- }
-}
-
-
-void LCodeGen::RecordSafepoint(LPointerMap* pointers,
- Safepoint::DeoptMode deopt_mode) {
- RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
-}
-
-
-void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
- LPointerMap empty_pointers(zone());
- RecordSafepoint(&empty_pointers, deopt_mode);
-}
-
-
-void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode deopt_mode) {
- RecordSafepoint(
- pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
-}
-
-
-static const char* LabelType(LLabel* label) {
- if (label->is_loop_header()) return " (loop header)";
- if (label->is_osr_entry()) return " (OSR entry)";
- return "";
-}
-
-
-void LCodeGen::DoLabel(LLabel* label) {
- Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
- current_instruction_,
- label->hydrogen_value()->id(),
- label->block_id(),
- LabelType(label));
- __ bind(label->label());
- current_block_ = label->block_id();
- DoGap(label);
-}
-
-
-void LCodeGen::DoParallelMove(LParallelMove* move) {
- resolver_.Resolve(move);
-}
-
-
-void LCodeGen::DoGap(LGap* gap) {
- for (int i = LGap::FIRST_INNER_POSITION;
- i <= LGap::LAST_INNER_POSITION;
- i++) {
- LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
- LParallelMove* move = gap->GetParallelMove(inner_pos);
- if (move != NULL) DoParallelMove(move);
- }
-}
-
-
-void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
- DoGap(instr);
-}
-
-
-void LCodeGen::DoParameter(LParameter* instr) {
- // Nothing to do.
-}
-
-
-void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- GenerateOsrPrologue();
-}
-
-
-void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- DCHECK(dividend.is(ToRegister(instr->result())));
-
- // Theoretically, a variation of the branch-free code for integer division by
- // a power of 2 (calculating the remainder via an additional multiplication
- // (which gets simplified to an 'and') and subtraction) should be faster, and
- // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
- // indicate that positive dividends are heavily favored, so the branching
- // version performs better.
- HMod* hmod = instr->hydrogen();
- int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
- Label dividend_is_not_negative, done;
-
- if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
- __ Branch(&dividend_is_not_negative, ge, dividend, Operand(zero_reg));
- // Note: The code below even works when right contains kMinInt.
- __ subu(dividend, zero_reg, dividend);
- __ And(dividend, dividend, Operand(mask));
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
- Operand(zero_reg));
- }
- __ Branch(USE_DELAY_SLOT, &done);
- __ subu(dividend, zero_reg, dividend);
- }
-
- __ bind(&dividend_is_not_negative);
- __ And(dividend, dividend, Operand(mask));
- __ bind(&done);
-}
-
-
-void LCodeGen::DoModByConstI(LModByConstI* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- Register result = ToRegister(instr->result());
- DCHECK(!dividend.is(result));
-
- if (divisor == 0) {
- DeoptimizeIf(al, instr);
- return;
- }
-
- __ TruncatingDiv(result, dividend, Abs(divisor));
- __ Mul(result, result, Operand(Abs(divisor)));
- __ Subu(result, dividend, Operand(result));
-
- // Check for negative zero.
- HMod* hmod = instr->hydrogen();
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label remainder_not_zero;
- __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
- DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, dividend,
- Operand(zero_reg));
- __ bind(&remainder_not_zero);
- }
-}
-
-
-void LCodeGen::DoModI(LModI* instr) {
- HMod* hmod = instr->hydrogen();
- const Register left_reg = ToRegister(instr->left());
- const Register right_reg = ToRegister(instr->right());
- const Register result_reg = ToRegister(instr->result());
-
- // div runs in the background while we check for special cases.
- __ Mod(result_reg, left_reg, right_reg);
-
- Label done;
- // Check for x % 0, we have to deopt in this case because we can't return a
- // NaN.
- if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, right_reg,
- Operand(zero_reg));
- }
-
- // Check for kMinInt % -1, div will return kMinInt, which is not what we
- // want. We have to deopt if we care about -0, because we can't return that.
- if (hmod->CheckFlag(HValue::kCanOverflow)) {
- Label no_overflow_possible;
- __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, right_reg,
- Operand(-1));
- } else {
- __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
- __ Branch(USE_DELAY_SLOT, &done);
- __ mov(result_reg, zero_reg);
- }
- __ bind(&no_overflow_possible);
- }
-
- // If we care about -0, test if the dividend is <0 and the result is 0.
- __ Branch(&done, ge, left_reg, Operand(zero_reg));
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result_reg,
- Operand(zero_reg));
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- Register result = ToRegister(instr->result());
- DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
- DCHECK(!result.is(dividend));
-
- // Check for (0 / -x) that will produce negative zero.
- HDiv* hdiv = instr->hydrogen();
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
- Operand(zero_reg));
- }
- // Check for (kMinInt / -1).
- if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, dividend,
- Operand(kMinInt));
- }
- // Deoptimize if remainder will not be 0.
- if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
- divisor != 1 && divisor != -1) {
- int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
- __ And(at, dividend, Operand(mask));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, at,
- Operand(zero_reg));
- }
-
- if (divisor == -1) { // Nice shortcut, not needed for correctness.
- __ Subu(result, zero_reg, dividend);
- return;
- }
- uint16_t shift = WhichPowerOf2Abs(divisor);
- if (shift == 0) {
- __ Move(result, dividend);
- } else if (shift == 1) {
- __ srl(result, dividend, 31);
- __ Addu(result, dividend, Operand(result));
- } else {
- __ sra(result, dividend, 31);
- __ srl(result, result, 32 - shift);
- __ Addu(result, dividend, Operand(result));
- }
- if (shift > 0) __ sra(result, result, shift);
- if (divisor < 0) __ Subu(result, zero_reg, result);
-}
-
-
-void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- Register result = ToRegister(instr->result());
- DCHECK(!dividend.is(result));
-
- if (divisor == 0) {
- DeoptimizeIf(al, instr);
- return;
- }
-
- // Check for (0 / -x) that will produce negative zero.
- HDiv* hdiv = instr->hydrogen();
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
- Operand(zero_reg));
- }
-
- __ TruncatingDiv(result, dividend, Abs(divisor));
- if (divisor < 0) __ Subu(result, zero_reg, result);
-
- if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
- __ Mul(scratch0(), result, Operand(divisor));
- __ Subu(scratch0(), scratch0(), dividend);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, scratch0(),
- Operand(zero_reg));
- }
-}
-
-
-// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
-void LCodeGen::DoDivI(LDivI* instr) {
- HBinaryOperation* hdiv = instr->hydrogen();
- Register dividend = ToRegister(instr->dividend());
- Register divisor = ToRegister(instr->divisor());
- const Register result = ToRegister(instr->result());
- Register remainder = ToRegister(instr->temp());
-
- // On MIPS div is asynchronous - it will run in the background while we
- // check for special cases.
- __ Div(remainder, result, dividend, divisor);
-
- // Check for x / 0.
- if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, divisor,
- Operand(zero_reg));
- }
-
- // Check for (0 / -x) that will produce negative zero.
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label left_not_zero;
- __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
- DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, divisor,
- Operand(zero_reg));
- __ bind(&left_not_zero);
- }
-
- // Check for (kMinInt / -1).
- if (hdiv->CheckFlag(HValue::kCanOverflow) &&
- !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
- Label left_not_min_int;
- __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, divisor, Operand(-1));
- __ bind(&left_not_min_int);
- }
-
- if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, remainder,
- Operand(zero_reg));
- }
-}
-
-
-void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
- DoubleRegister addend = ToDoubleRegister(instr->addend());
- DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
- DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
-
- // This is computed in-place.
- DCHECK(addend.is(ToDoubleRegister(instr->result())));
-
- __ madd_d(addend, addend, multiplier, multiplicand);
-}
-
-
-void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
- Register dividend = ToRegister(instr->dividend());
- Register result = ToRegister(instr->result());
- int32_t divisor = instr->divisor();
- Register scratch = result.is(dividend) ? scratch0() : dividend;
- DCHECK(!result.is(dividend) || !scratch.is(dividend));
-
- // If the divisor is 1, return the dividend.
- if (divisor == 1) {
- __ Move(result, dividend);
- return;
- }
-
- // If the divisor is positive, things are easy: There can be no deopts and we
- // can simply do an arithmetic right shift.
- uint16_t shift = WhichPowerOf2Abs(divisor);
- if (divisor > 1) {
- __ sra(result, dividend, shift);
- return;
- }
-
- // If the divisor is negative, we have to negate and handle edge cases.
-
- // dividend can be the same register as result so save the value of it
- // for checking overflow.
- __ Move(scratch, dividend);
-
- __ Subu(result, zero_reg, dividend);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result,
- Operand(zero_reg));
- }
-
- // Dividing by -1 is basically negation, unless we overflow.
- __ Xor(scratch, scratch, result);
- if (divisor == -1) {
- if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(ge, instr, DeoptimizeReason::kOverflow, scratch,
- Operand(zero_reg));
- }
- return;
- }
-
- // If the negation could not overflow, simply shifting is OK.
- if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- __ sra(result, result, shift);
- return;
- }
-
- Label no_overflow, done;
- __ Branch(&no_overflow, lt, scratch, Operand(zero_reg));
- __ li(result, Operand(kMinInt / divisor));
- __ Branch(&done);
- __ bind(&no_overflow);
- __ sra(result, result, shift);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- Register result = ToRegister(instr->result());
- DCHECK(!dividend.is(result));
-
- if (divisor == 0) {
- DeoptimizeIf(al, instr);
- return;
- }
-
- // Check for (0 / -x) that will produce negative zero.
- HMathFloorOfDiv* hdiv = instr->hydrogen();
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
- Operand(zero_reg));
- }
-
- // Easy case: We need no dynamic check for the dividend and the flooring
- // division is the same as the truncating division.
- if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
- (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
- __ TruncatingDiv(result, dividend, Abs(divisor));
- if (divisor < 0) __ Subu(result, zero_reg, result);
- return;
- }
-
- // In the general case we may need to adjust before and after the truncating
- // division to get a flooring division.
- Register temp = ToRegister(instr->temp());
- DCHECK(!temp.is(dividend) && !temp.is(result));
- Label needs_adjustment, done;
- __ Branch(&needs_adjustment, divisor > 0 ? lt : gt,
- dividend, Operand(zero_reg));
- __ TruncatingDiv(result, dividend, Abs(divisor));
- if (divisor < 0) __ Subu(result, zero_reg, result);
- __ jmp(&done);
- __ bind(&needs_adjustment);
- __ Addu(temp, dividend, Operand(divisor > 0 ? 1 : -1));
- __ TruncatingDiv(result, temp, Abs(divisor));
- if (divisor < 0) __ Subu(result, zero_reg, result);
- __ Subu(result, result, Operand(1));
- __ bind(&done);
-}
-
-
-// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
-void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
- HBinaryOperation* hdiv = instr->hydrogen();
- Register dividend = ToRegister(instr->dividend());
- Register divisor = ToRegister(instr->divisor());
- const Register result = ToRegister(instr->result());
- Register remainder = scratch0();
- // On MIPS div is asynchronous - it will run in the background while we
- // check for special cases.
- __ Div(remainder, result, dividend, divisor);
-
- // Check for x / 0.
- if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, divisor,
- Operand(zero_reg));
- }
-
- // Check for (0 / -x) that will produce negative zero.
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label left_not_zero;
- __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
- DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, divisor,
- Operand(zero_reg));
- __ bind(&left_not_zero);
- }
-
- // Check for (kMinInt / -1).
- if (hdiv->CheckFlag(HValue::kCanOverflow) &&
- !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
- Label left_not_min_int;
- __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, divisor, Operand(-1));
- __ bind(&left_not_min_int);
- }
-
- // We performed a truncating division. Correct the result if necessary.
- Label done;
- __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
- __ Xor(remainder, remainder, Operand(divisor));
- __ Branch(&done, ge, remainder, Operand(zero_reg));
- __ Subu(result, result, Operand(1));
- __ bind(&done);
-}
-
-
-void LCodeGen::DoMulI(LMulI* instr) {
- Register scratch = scratch0();
- Register result = ToRegister(instr->result());
- // Note that result may alias left.
- Register left = ToRegister(instr->left());
- LOperand* right_op = instr->right();
-
- bool bailout_on_minus_zero =
- instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
- bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
-
- if (right_op->IsConstantOperand()) {
- int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
-
- if (bailout_on_minus_zero && (constant < 0)) {
- // The case of a null constant will be handled separately.
- // If constant is negative and left is null, the result should be -0.
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, left,
- Operand(zero_reg));
- }
-
- switch (constant) {
- case -1:
- if (overflow) {
- Label no_overflow;
- __ SubBranchNoOvf(result, zero_reg, Operand(left), &no_overflow);
- DeoptimizeIf(al, instr);
- __ bind(&no_overflow);
- } else {
- __ Subu(result, zero_reg, left);
- }
- break;
- case 0:
- if (bailout_on_minus_zero) {
- // If left is strictly negative and the constant is null, the
- // result is -0. Deoptimize if required, otherwise return 0.
- DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, left,
- Operand(zero_reg));
- }
- __ mov(result, zero_reg);
- break;
- case 1:
- // Nothing to do.
- __ Move(result, left);
- break;
- default:
- // Multiplying by powers of two and powers of two plus or minus
- // one can be done faster with shifted operands.
- // For other constants we emit standard code.
- int32_t mask = constant >> 31;
- uint32_t constant_abs = (constant + mask) ^ mask;
-
- if (base::bits::IsPowerOfTwo32(constant_abs)) {
- int32_t shift = WhichPowerOf2(constant_abs);
- __ sll(result, left, shift);
- // Correct the sign of the result if the constant is negative.
- if (constant < 0) __ Subu(result, zero_reg, result);
- } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
- int32_t shift = WhichPowerOf2(constant_abs - 1);
- __ Lsa(result, left, left, shift);
- // Correct the sign of the result if the constant is negative.
- if (constant < 0) __ Subu(result, zero_reg, result);
- } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
- int32_t shift = WhichPowerOf2(constant_abs + 1);
- __ sll(scratch, left, shift);
- __ Subu(result, scratch, left);
- // Correct the sign of the result if the constant is negative.
- if (constant < 0) __ Subu(result, zero_reg, result);
- } else {
- // Generate standard code.
- __ li(at, constant);
- __ Mul(result, left, at);
- }
- }
-
- } else {
- DCHECK(right_op->IsRegister());
- Register right = ToRegister(right_op);
-
- if (overflow) {
- // hi:lo = left * right.
- if (instr->hydrogen()->representation().IsSmi()) {
- __ SmiUntag(result, left);
- __ Mul(scratch, result, result, right);
- } else {
- __ Mul(scratch, result, left, right);
- }
- __ sra(at, result, 31);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, scratch,
- Operand(at));
- } else {
- if (instr->hydrogen()->representation().IsSmi()) {
- __ SmiUntag(result, left);
- __ Mul(result, result, right);
- } else {
- __ Mul(result, left, right);
- }
- }
-
- if (bailout_on_minus_zero) {
- Label done;
- __ Xor(at, left, right);
- __ Branch(&done, ge, at, Operand(zero_reg));
- // Bail out if the result is minus zero.
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result,
- Operand(zero_reg));
- __ bind(&done);
- }
- }
-}
-
-
-void LCodeGen::DoBitI(LBitI* instr) {
- LOperand* left_op = instr->left();
- LOperand* right_op = instr->right();
- DCHECK(left_op->IsRegister());
- Register left = ToRegister(left_op);
- Register result = ToRegister(instr->result());
- Operand right(no_reg);
-
- if (right_op->IsStackSlot()) {
- right = Operand(EmitLoadRegister(right_op, at));
- } else {
- DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
- right = ToOperand(right_op);
- }
-
- switch (instr->op()) {
- case Token::BIT_AND:
- __ And(result, left, right);
- break;
- case Token::BIT_OR:
- __ Or(result, left, right);
- break;
- case Token::BIT_XOR:
- if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
- __ Nor(result, zero_reg, left);
- } else {
- __ Xor(result, left, right);
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void LCodeGen::DoShiftI(LShiftI* instr) {
- // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
- // result may alias either of them.
- LOperand* right_op = instr->right();
- Register left = ToRegister(instr->left());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- if (right_op->IsRegister()) {
- // No need to mask the right operand on MIPS, it is built into the variable
- // shift instructions.
- switch (instr->op()) {
- case Token::ROR:
- __ Ror(result, left, Operand(ToRegister(right_op)));
- break;
- case Token::SAR:
- __ srav(result, left, ToRegister(right_op));
- break;
- case Token::SHR:
- __ srlv(result, left, ToRegister(right_op));
- if (instr->can_deopt()) {
- DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue, result,
- Operand(zero_reg));
- }
- break;
- case Token::SHL:
- __ sllv(result, left, ToRegister(right_op));
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- // Mask the right_op operand.
- int value = ToInteger32(LConstantOperand::cast(right_op));
- uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
- switch (instr->op()) {
- case Token::ROR:
- if (shift_count != 0) {
- __ Ror(result, left, Operand(shift_count));
- } else {
- __ Move(result, left);
- }
- break;
- case Token::SAR:
- if (shift_count != 0) {
- __ sra(result, left, shift_count);
- } else {
- __ Move(result, left);
- }
- break;
- case Token::SHR:
- if (shift_count != 0) {
- __ srl(result, left, shift_count);
- } else {
- if (instr->can_deopt()) {
- __ And(at, left, Operand(0x80000000));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNegativeValue, at,
- Operand(zero_reg));
- }
- __ Move(result, left);
- }
- break;
- case Token::SHL:
- if (shift_count != 0) {
- if (instr->hydrogen_value()->representation().IsSmi() &&
- instr->can_deopt()) {
- if (shift_count != 1) {
- __ sll(result, left, shift_count - 1);
- __ SmiTagCheckOverflow(result, result, scratch);
- } else {
- __ SmiTagCheckOverflow(result, left, scratch);
- }
- DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, scratch,
- Operand(zero_reg));
- } else {
- __ sll(result, left, shift_count);
- }
- } else {
- __ Move(result, left);
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoSubI(LSubI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- LOperand* result = instr->result();
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
-
- if (!can_overflow) {
- if (right->IsStackSlot()) {
- Register right_reg = EmitLoadRegister(right, at);
- __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
- } else {
- DCHECK(right->IsRegister() || right->IsConstantOperand());
- __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
- }
- } else { // can_overflow.
- Register scratch = scratch0();
- Label no_overflow_label;
- if (right->IsStackSlot()) {
- Register right_reg = EmitLoadRegister(right, scratch);
- __ SubBranchNoOvf(ToRegister(result), ToRegister(left),
- Operand(right_reg), &no_overflow_label);
- } else {
- DCHECK(right->IsRegister() || right->IsConstantOperand());
- __ SubBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
- &no_overflow_label, scratch);
- }
- DeoptimizeIf(al, instr);
- __ bind(&no_overflow_label);
- }
-}
-
-
-void LCodeGen::DoConstantI(LConstantI* instr) {
- __ li(ToRegister(instr->result()), Operand(instr->value()));
-}
-
-
-void LCodeGen::DoConstantS(LConstantS* instr) {
- __ li(ToRegister(instr->result()), Operand(instr->value()));
-}
-
-
-void LCodeGen::DoConstantD(LConstantD* instr) {
- DCHECK(instr->result()->IsDoubleRegister());
- DoubleRegister result = ToDoubleRegister(instr->result());
- double v = instr->value();
- __ Move(result, v);
-}
-
-
-void LCodeGen::DoConstantE(LConstantE* instr) {
- __ li(ToRegister(instr->result()), Operand(instr->value()));
-}
-
-
-void LCodeGen::DoConstantT(LConstantT* instr) {
- Handle<Object> object = instr->value(isolate());
- AllowDeferredHandleDereference smi_check;
- __ li(ToRegister(instr->result()), object);
-}
-
-
-MemOperand LCodeGen::BuildSeqStringOperand(Register string,
- LOperand* index,
- String::Encoding encoding) {
- if (index->IsConstantOperand()) {
- int offset = ToInteger32(LConstantOperand::cast(index));
- if (encoding == String::TWO_BYTE_ENCODING) {
- offset *= kUC16Size;
- }
- STATIC_ASSERT(kCharSize == 1);
- return FieldMemOperand(string, SeqString::kHeaderSize + offset);
- }
- Register scratch = scratch0();
- DCHECK(!scratch.is(string));
- DCHECK(!scratch.is(ToRegister(index)));
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ Addu(scratch, string, ToRegister(index));
- } else {
- STATIC_ASSERT(kUC16Size == 2);
- __ sll(scratch, ToRegister(index), 1);
- __ Addu(scratch, string, scratch);
- }
- return FieldMemOperand(scratch, SeqString::kHeaderSize);
-}
-
-
-void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
- String::Encoding encoding = instr->hydrogen()->encoding();
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
-
- if (FLAG_debug_code) {
- Register scratch = scratch0();
- __ lw(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
- __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
-
- __ And(scratch, scratch,
- Operand(kStringRepresentationMask | kStringEncodingMask));
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ Subu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type));
- __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
- }
-
- MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ lbu(result, operand);
- } else {
- __ lhu(result, operand);
- }
-}
-
-
-void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
- String::Encoding encoding = instr->hydrogen()->encoding();
- Register string = ToRegister(instr->string());
- Register value = ToRegister(instr->value());
-
- if (FLAG_debug_code) {
- Register scratch = scratch0();
- Register index = ToRegister(instr->index());
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- int encoding_mask =
- instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type;
- __ EmitSeqStringSetCharCheck(string, index, value, scratch, encoding_mask);
- }
-
- MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ sb(value, operand);
- } else {
- __ sh(value, operand);
- }
-}
-
-
-void LCodeGen::DoAddI(LAddI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- LOperand* result = instr->result();
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
-
- if (!can_overflow) {
- if (right->IsStackSlot()) {
- Register right_reg = EmitLoadRegister(right, at);
- __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
- } else {
- DCHECK(right->IsRegister() || right->IsConstantOperand());
- __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
- }
- } else { // can_overflow.
- Register scratch = scratch1();
- Label no_overflow_label;
- if (right->IsStackSlot()) {
- Register right_reg = EmitLoadRegister(right, scratch);
- __ AddBranchNoOvf(ToRegister(result), ToRegister(left),
- Operand(right_reg), &no_overflow_label);
- } else {
- DCHECK(right->IsRegister() || right->IsConstantOperand());
- __ AddBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
- &no_overflow_label, scratch);
- }
- DeoptimizeIf(al, instr);
- __ bind(&no_overflow_label);
- }
-}
-
-
-void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- HMathMinMax::Operation operation = instr->hydrogen()->operation();
- Register scratch = scratch1();
- if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
- Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
- Register left_reg = ToRegister(left);
- Register right_reg = EmitLoadRegister(right, scratch0());
- Register result_reg = ToRegister(instr->result());
- Label return_right, done;
- __ Slt(scratch, left_reg, Operand(right_reg));
- if (condition == ge) {
- __ Movz(result_reg, left_reg, scratch);
- __ Movn(result_reg, right_reg, scratch);
- } else {
- DCHECK(condition == le);
- __ Movn(result_reg, left_reg, scratch);
- __ Movz(result_reg, right_reg, scratch);
- }
- } else {
- DCHECK(instr->hydrogen()->representation().IsDouble());
- FPURegister left_reg = ToDoubleRegister(left);
- FPURegister right_reg = ToDoubleRegister(right);
- FPURegister result_reg = ToDoubleRegister(instr->result());
-
- Label nan, done;
- if (operation == HMathMinMax::kMathMax) {
- __ Float64Max(result_reg, left_reg, right_reg, &nan);
- } else {
- DCHECK(operation == HMathMinMax::kMathMin);
- __ Float64Min(result_reg, left_reg, right_reg, &nan);
- }
- __ Branch(&done);
-
- __ bind(&nan);
- __ add_d(result_reg, left_reg, right_reg);
-
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- DoubleRegister left = ToDoubleRegister(instr->left());
- DoubleRegister right = ToDoubleRegister(instr->right());
- DoubleRegister result = ToDoubleRegister(instr->result());
- switch (instr->op()) {
- case Token::ADD:
- __ add_d(result, left, right);
- break;
- case Token::SUB:
- __ sub_d(result, left, right);
- break;
- case Token::MUL:
- __ mul_d(result, left, right);
- break;
- case Token::DIV:
- __ div_d(result, left, right);
- break;
- case Token::MOD: {
- // Save a0-a3 on the stack.
- RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
- __ MultiPush(saved_regs);
-
- __ PrepareCallCFunction(0, 2, scratch0());
- __ MovToFloatParameters(left, right);
- __ CallCFunction(
- ExternalReference::mod_two_doubles_operation(isolate()),
- 0, 2);
- // Move the result in the double result register.
- __ MovFromFloatResult(result);
-
- // Restore saved register.
- __ MultiPop(saved_regs);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->left()).is(a1));
- DCHECK(ToRegister(instr->right()).is(a0));
- DCHECK(ToRegister(instr->result()).is(v0));
-
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
- CallCode(code, RelocInfo::CODE_TARGET, instr);
- // Other arch use a nop here, to signal that there is no inlined
- // patchable code. Mips does not need the nop, since our marker
- // instruction (andi zero_reg) will never be used in normal code.
-}
-
-
-template<class InstrType>
-void LCodeGen::EmitBranch(InstrType instr,
- Condition condition,
- Register src1,
- const Operand& src2) {
- int left_block = instr->TrueDestination(chunk_);
- int right_block = instr->FalseDestination(chunk_);
-
- int next_block = GetNextEmittedBlock();
- if (right_block == left_block || condition == al) {
- EmitGoto(left_block);
- } else if (left_block == next_block) {
- __ Branch(chunk_->GetAssemblyLabel(right_block),
- NegateCondition(condition), src1, src2);
- } else if (right_block == next_block) {
- __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
- } else {
- __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
- __ Branch(chunk_->GetAssemblyLabel(right_block));
- }
-}
-
-
-template<class InstrType>
-void LCodeGen::EmitBranchF(InstrType instr,
- Condition condition,
- FPURegister src1,
- FPURegister src2) {
- int right_block = instr->FalseDestination(chunk_);
- int left_block = instr->TrueDestination(chunk_);
-
- int next_block = GetNextEmittedBlock();
- if (right_block == left_block) {
- EmitGoto(left_block);
- } else if (left_block == next_block) {
- __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
- NegateFpuCondition(condition), src1, src2);
- } else if (right_block == next_block) {
- __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
- condition, src1, src2);
- } else {
- __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
- condition, src1, src2);
- __ Branch(chunk_->GetAssemblyLabel(right_block));
- }
-}
-
-
-template <class InstrType>
-void LCodeGen::EmitTrueBranch(InstrType instr, Condition condition,
- Register src1, const Operand& src2) {
- int true_block = instr->TrueDestination(chunk_);
- __ Branch(chunk_->GetAssemblyLabel(true_block), condition, src1, src2);
-}
-
-
-template <class InstrType>
-void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition,
- Register src1, const Operand& src2) {
- int false_block = instr->FalseDestination(chunk_);
- __ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2);
-}
-
-
-template<class InstrType>
-void LCodeGen::EmitFalseBranchF(InstrType instr,
- Condition condition,
- FPURegister src1,
- FPURegister src2) {
- int false_block = instr->FalseDestination(chunk_);
- __ BranchF(chunk_->GetAssemblyLabel(false_block), NULL,
- condition, src1, src2);
-}
-
-
-void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
- __ stop("LDebugBreak");
-}
-
-
-void LCodeGen::DoBranch(LBranch* instr) {
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsInteger32() || r.IsSmi()) {
- DCHECK(!info()->IsStub());
- Register reg = ToRegister(instr->value());
- EmitBranch(instr, ne, reg, Operand(zero_reg));
- } else if (r.IsDouble()) {
- DCHECK(!info()->IsStub());
- DoubleRegister reg = ToDoubleRegister(instr->value());
- // Test the double value. Zero and NaN are false.
- EmitBranchF(instr, ogl, reg, kDoubleRegZero);
- } else {
- DCHECK(r.IsTagged());
- Register reg = ToRegister(instr->value());
- HType type = instr->hydrogen()->value()->type();
- if (type.IsBoolean()) {
- DCHECK(!info()->IsStub());
- __ LoadRoot(at, Heap::kTrueValueRootIndex);
- EmitBranch(instr, eq, reg, Operand(at));
- } else if (type.IsSmi()) {
- DCHECK(!info()->IsStub());
- EmitBranch(instr, ne, reg, Operand(zero_reg));
- } else if (type.IsJSArray()) {
- DCHECK(!info()->IsStub());
- EmitBranch(instr, al, zero_reg, Operand(zero_reg));
- } else if (type.IsHeapNumber()) {
- DCHECK(!info()->IsStub());
- DoubleRegister dbl_scratch = double_scratch0();
- __ Ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
- // Test the double value. Zero and NaN are false.
- EmitBranchF(instr, ogl, dbl_scratch, kDoubleRegZero);
- } else if (type.IsString()) {
- DCHECK(!info()->IsStub());
- __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
- EmitBranch(instr, ne, at, Operand(zero_reg));
- } else {
- ToBooleanHints expected = instr->hydrogen()->expected_input_types();
- // Avoid deopts in the case where we've never executed this path before.
- if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
-
- if (expected & ToBooleanHint::kUndefined) {
- // undefined -> false.
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
- }
- if (expected & ToBooleanHint::kBoolean) {
- // Boolean -> its value.
- __ LoadRoot(at, Heap::kTrueValueRootIndex);
- __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
- __ LoadRoot(at, Heap::kFalseValueRootIndex);
- __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
- }
- if (expected & ToBooleanHint::kNull) {
- // 'null' -> false.
- __ LoadRoot(at, Heap::kNullValueRootIndex);
- __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
- }
-
- if (expected & ToBooleanHint::kSmallInteger) {
- // Smis: 0 -> false, all other -> true.
- __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
- __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
- } else if (expected & ToBooleanHint::kNeedsMap) {
- // If we need a map later and have a Smi -> deopt.
- __ SmiTst(reg, at);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg));
- }
-
- const Register map = scratch0();
- if (expected & ToBooleanHint::kNeedsMap) {
- __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset));
- if (expected & ToBooleanHint::kCanBeUndetectable) {
- // Undetectable -> false.
- __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
- __ And(at, at, Operand(1 << Map::kIsUndetectable));
- __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
- }
- }
-
- if (expected & ToBooleanHint::kReceiver) {
- // spec object -> true.
- __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Branch(instr->TrueLabel(chunk_),
- ge, at, Operand(FIRST_JS_RECEIVER_TYPE));
- }
-
- if (expected & ToBooleanHint::kString) {
- // String value -> false iff empty.
- Label not_string;
- __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
- __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
- __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg));
- __ Branch(instr->FalseLabel(chunk_));
- __ bind(&not_string);
- }
-
- if (expected & ToBooleanHint::kSymbol) {
- // Symbol value -> true.
- const Register scratch = scratch1();
- __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
- }
-
- if (expected & ToBooleanHint::kHeapNumber) {
- // heap number -> false iff +0, -0, or NaN.
- DoubleRegister dbl_scratch = double_scratch0();
- Label not_heap_number;
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- __ Branch(&not_heap_number, ne, map, Operand(at));
- __ Ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
- __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
- ne, dbl_scratch, kDoubleRegZero);
- // Falls through if dbl_scratch == 0.
- __ Branch(instr->FalseLabel(chunk_));
- __ bind(&not_heap_number);
- }
-
- if (expected != ToBooleanHint::kAny) {
- // We've seen something for the first time -> deopt.
- // This can only happen if we are not generic already.
- DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject, zero_reg,
- Operand(zero_reg));
- }
- }
- }
-}
-
-
-void LCodeGen::EmitGoto(int block) {
- if (!IsNextEmittedBlock(block)) {
- __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
- }
-}
-
-
-void LCodeGen::DoGoto(LGoto* instr) {
- EmitGoto(instr->block_id());
-}
-
-
-Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
- Condition cond = kNoCondition;
- switch (op) {
- case Token::EQ:
- case Token::EQ_STRICT:
- cond = eq;
- break;
- case Token::NE:
- case Token::NE_STRICT:
- cond = ne;
- break;
- case Token::LT:
- cond = is_unsigned ? lo : lt;
- break;
- case Token::GT:
- cond = is_unsigned ? hi : gt;
- break;
- case Token::LTE:
- cond = is_unsigned ? ls : le;
- break;
- case Token::GTE:
- cond = is_unsigned ? hs : ge;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
- return cond;
-}
-
-
-void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- bool is_unsigned =
- instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
- instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
- Condition cond = TokenToCondition(instr->op(), is_unsigned);
-
- if (left->IsConstantOperand() && right->IsConstantOperand()) {
- // We can statically evaluate the comparison.
- double left_val = ToDouble(LConstantOperand::cast(left));
- double right_val = ToDouble(LConstantOperand::cast(right));
- int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
- ? instr->TrueDestination(chunk_)
- : instr->FalseDestination(chunk_);
- EmitGoto(next_block);
- } else {
- if (instr->is_double()) {
- // Compare left and right as doubles and load the
- // resulting flags into the normal status register.
- FPURegister left_reg = ToDoubleRegister(left);
- FPURegister right_reg = ToDoubleRegister(right);
-
- // If a NaN is involved, i.e. the result is unordered,
- // jump to false block label.
- __ BranchF(NULL, instr->FalseLabel(chunk_), eq,
- left_reg, right_reg);
-
- EmitBranchF(instr, cond, left_reg, right_reg);
- } else {
- Register cmp_left;
- Operand cmp_right = Operand(0);
-
- if (right->IsConstantOperand()) {
- int32_t value = ToInteger32(LConstantOperand::cast(right));
- if (instr->hydrogen_value()->representation().IsSmi()) {
- cmp_left = ToRegister(left);
- cmp_right = Operand(Smi::FromInt(value));
- } else {
- cmp_left = ToRegister(left);
- cmp_right = Operand(value);
- }
- } else if (left->IsConstantOperand()) {
- int32_t value = ToInteger32(LConstantOperand::cast(left));
- if (instr->hydrogen_value()->representation().IsSmi()) {
- cmp_left = ToRegister(right);
- cmp_right = Operand(Smi::FromInt(value));
- } else {
- cmp_left = ToRegister(right);
- cmp_right = Operand(value);
- }
- // We commuted the operands, so commute the condition.
- cond = CommuteCondition(cond);
- } else {
- cmp_left = ToRegister(left);
- cmp_right = Operand(ToRegister(right));
- }
-
- EmitBranch(instr, cond, cmp_left, cmp_right);
- }
- }
-}
-
-
-void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
- Register left = ToRegister(instr->left());
- Register right = ToRegister(instr->right());
-
- EmitBranch(instr, eq, left, Operand(right));
-}
-
-
-void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
- if (instr->hydrogen()->representation().IsTagged()) {
- Register input_reg = ToRegister(instr->object());
- __ li(at, Operand(factory()->the_hole_value()));
- EmitBranch(instr, eq, input_reg, Operand(at));
- return;
- }
-
- DoubleRegister input_reg = ToDoubleRegister(instr->object());
- EmitFalseBranchF(instr, eq, input_reg, input_reg);
-
- Register scratch = scratch0();
- __ FmoveHigh(scratch, input_reg);
- EmitBranch(instr, eq, scratch, Operand(kHoleNanUpper32));
-}
-
-
-Condition LCodeGen::EmitIsString(Register input,
- Register temp1,
- Label* is_not_string,
- SmiCheck check_needed = INLINE_SMI_CHECK) {
- if (check_needed == INLINE_SMI_CHECK) {
- __ JumpIfSmi(input, is_not_string);
- }
- __ GetObjectType(input, temp1, temp1);
-
- return lt;
-}
-
-
-void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- Register temp1 = ToRegister(instr->temp());
-
- SmiCheck check_needed =
- instr->hydrogen()->value()->type().IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- Condition true_cond =
- EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
-
- EmitBranch(instr, true_cond, temp1,
- Operand(FIRST_NONSTRING_TYPE));
-}
-
-
-void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
- Register input_reg = EmitLoadRegister(instr->value(), at);
- __ And(at, input_reg, kSmiTagMask);
- EmitBranch(instr, eq, at, Operand(zero_reg));
-}
-
-
-void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- __ JumpIfSmi(input, instr->FalseLabel(chunk_));
- }
- __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
- __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
- __ And(at, temp, Operand(1 << Map::kIsUndetectable));
- EmitBranch(instr, ne, at, Operand(zero_reg));
-}
-
-
-static Condition ComputeCompareCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return eq;
- case Token::LT:
- return lt;
- case Token::GT:
- return gt;
- case Token::LTE:
- return le;
- case Token::GTE:
- return ge;
- default:
- UNREACHABLE();
- return kNoCondition;
- }
-}
-
-
-void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->left()).is(a1));
- DCHECK(ToRegister(instr->right()).is(a0));
-
- Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
- CallCode(code, RelocInfo::CODE_TARGET, instr);
- __ LoadRoot(at, Heap::kTrueValueRootIndex);
- EmitBranch(instr, eq, v0, Operand(at));
-}
-
-
-static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == FIRST_TYPE) return to;
- DCHECK(from == to || to == LAST_TYPE);
- return from;
-}
-
-
-static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == to) return eq;
- if (to == LAST_TYPE) return hs;
- if (from == FIRST_TYPE) return ls;
- UNREACHABLE();
- return eq;
-}
-
-
-void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
- Register scratch = scratch0();
- Register input = ToRegister(instr->value());
-
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- __ JumpIfSmi(input, instr->FalseLabel(chunk_));
- }
-
- __ GetObjectType(input, scratch, scratch);
- EmitBranch(instr,
- BranchCondition(instr->hydrogen()),
- scratch,
- Operand(TestType(instr->hydrogen())));
-}
-
-// Branches to a label or falls through with the answer in flags. Trashes
-// the temp registers, but not the input.
-void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
- Handle<String> class_name, Register input,
- Register temp, Register temp2) {
- DCHECK(!input.is(temp));
- DCHECK(!input.is(temp2));
- DCHECK(!temp.is(temp2));
-
- __ JumpIfSmi(input, is_false);
- __ GetObjectType(input, temp, temp2);
- STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
- if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
- __ Branch(is_true, hs, temp2, Operand(FIRST_FUNCTION_TYPE));
- } else {
- __ Branch(is_false, hs, temp2, Operand(FIRST_FUNCTION_TYPE));
- }
-
- // Check if the constructor in the map is a function.
- Register instance_type = scratch1();
- DCHECK(!instance_type.is(temp));
- __ GetMapConstructor(temp, temp, temp2, instance_type);
-
- // Objects with a non-function constructor have class 'Object'.
- if (String::Equals(class_name, isolate()->factory()->Object_string())) {
- __ Branch(is_true, ne, instance_type, Operand(JS_FUNCTION_TYPE));
- } else {
- __ Branch(is_false, ne, instance_type, Operand(JS_FUNCTION_TYPE));
- }
-
- // temp now contains the constructor function. Grab the
- // instance class name from there.
- __ lw(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
- __ lw(temp,
- FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset));
- // The class name we are testing against is internalized since it's a literal.
- // The name in the constructor is internalized because of the way the context
- // is booted. This routine isn't expected to work for random API-created
- // classes and it doesn't have to because you can't access it with natives
- // syntax. Since both sides are internalized it is sufficient to use an
- // identity comparison.
-
- // End with the address of this class_name instance in temp register.
- // On MIPS, the caller must do the comparison with Handle<String>class_name.
-}
-
-void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = scratch0();
- Register temp2 = ToRegister(instr->temp());
- Handle<String> class_name = instr->hydrogen()->class_name();
-
- EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
- class_name, input, temp, temp2);
-
- EmitBranch(instr, eq, temp, Operand(class_name));
-}
-
-void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- __ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
- EmitBranch(instr, eq, temp, Operand(instr->map()));
-}
-
-
-void LCodeGen::DoHasInPrototypeChainAndBranch(
- LHasInPrototypeChainAndBranch* instr) {
- Register const object = ToRegister(instr->object());
- Register const object_map = scratch0();
- Register const object_instance_type = scratch1();
- Register const object_prototype = object_map;
- Register const prototype = ToRegister(instr->prototype());
-
- // The {object} must be a spec object. It's sufficient to know that {object}
- // is not a smi, since all other non-spec objects have {null} prototypes and
- // will be ruled out below.
- if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
- __ SmiTst(object, at);
- EmitFalseBranch(instr, eq, at, Operand(zero_reg));
- }
-
- // Loop through the {object}s prototype chain looking for the {prototype}.
- __ lw(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
- Label loop;
- __ bind(&loop);
-
- // Deoptimize if the object needs to be access checked.
- __ lbu(object_instance_type,
- FieldMemOperand(object_map, Map::kBitFieldOffset));
- __ And(object_instance_type, object_instance_type,
- Operand(1 << Map::kIsAccessCheckNeeded));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck, object_instance_type,
- Operand(zero_reg));
- // Deoptimize for proxies.
- __ lbu(object_instance_type,
- FieldMemOperand(object_map, Map::kInstanceTypeOffset));
- DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy, object_instance_type,
- Operand(JS_PROXY_TYPE));
-
- __ lw(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
- __ LoadRoot(at, Heap::kNullValueRootIndex);
- EmitFalseBranch(instr, eq, object_prototype, Operand(at));
- EmitTrueBranch(instr, eq, object_prototype, Operand(prototype));
- __ Branch(USE_DELAY_SLOT, &loop);
- __ lw(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
-}
-
-
-void LCodeGen::DoCmpT(LCmpT* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- Token::Value op = instr->op();
-
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- // On MIPS there is no need for a "no inlined smi code" marker (nop).
-
- Condition condition = ComputeCompareCondition(op);
- // A minor optimization that relies on LoadRoot always emitting one
- // instruction.
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
- Label done, check;
- __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
- __ bind(&check);
- __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
- DCHECK_EQ(1, masm()->InstructionsGeneratedSince(&check));
- __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace && info()->IsOptimizing()) {
- // Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns its parameter in v0. We're leaving the code
- // managed by the register allocator and tearing down the frame, it's
- // safe to write to the context register.
- __ push(v0);
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kTraceExit);
- }
- if (info()->saves_caller_doubles()) {
- RestoreCallerDoubles();
- }
- if (NeedsEagerFrame()) {
- __ mov(sp, fp);
- __ Pop(ra, fp);
- }
- if (instr->has_constant_parameter_count()) {
- int parameter_count = ToInteger32(instr->constant_parameter_count());
- int32_t sp_delta = (parameter_count + 1) * kPointerSize;
- if (sp_delta != 0) {
- __ Addu(sp, sp, Operand(sp_delta));
- }
- } else {
- DCHECK(info()->IsStub()); // Functions would need to drop one more value.
- Register reg = ToRegister(instr->parameter_count());
- // The argument count parameter is a smi
- __ SmiUntag(reg);
- __ Lsa(sp, sp, reg, kPointerSizeLog2);
- }
-
- __ Jump(ra);
-}
-
-
-void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
-
- __ lw(result, ContextMemOperand(context, instr->slot_index()));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-
- if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result, Operand(at));
- } else {
- Label is_not_hole;
- __ Branch(&is_not_hole, ne, result, Operand(at));
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ bind(&is_not_hole);
- }
- }
-}
-
-
-void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register value = ToRegister(instr->value());
- Register scratch = scratch0();
- MemOperand target = ContextMemOperand(context, instr->slot_index());
-
- Label skip_assignment;
-
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ lw(scratch, target);
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-
- if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, scratch, Operand(at));
- } else {
- __ Branch(&skip_assignment, ne, scratch, Operand(at));
- }
- }
-
- __ sw(value, target);
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- SmiCheck check_needed =
- instr->hydrogen()->value()->type().IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- __ RecordWriteContextSlot(context,
- target.offset(),
- value,
- scratch0(),
- GetRAState(),
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
-
- __ bind(&skip_assignment);
-}
-
-
-void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- HObjectAccess access = instr->hydrogen()->access();
- int offset = access.offset();
- Register object = ToRegister(instr->object());
-
- if (access.IsExternalMemory()) {
- Register result = ToRegister(instr->result());
- MemOperand operand = MemOperand(object, offset);
- __ Load(result, operand, access.representation());
- return;
- }
-
- if (instr->hydrogen()->representation().IsDouble()) {
- DoubleRegister result = ToDoubleRegister(instr->result());
- __ Ldc1(result, FieldMemOperand(object, offset));
- return;
- }
-
- Register result = ToRegister(instr->result());
- if (!access.IsInobject()) {
- __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- object = result;
- }
- MemOperand operand = FieldMemOperand(object, offset);
- __ Load(result, operand, access.representation());
-}
-
-
-void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
- Register scratch = scratch0();
- Register function = ToRegister(instr->function());
- Register result = ToRegister(instr->result());
-
- // Get the prototype or initial map from the function.
- __ lw(result,
- FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Check that the function has a prototype or an initial map.
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result, Operand(at));
-
- // If the function does not have an initial map, we're done.
- Label done;
- __ GetObjectType(result, scratch, scratch);
- __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
-
- // Get the prototype from the initial map.
- __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
-
- // All done.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
- Register result = ToRegister(instr->result());
- __ LoadRoot(result, instr->index());
-}
-
-
-void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
- Register arguments = ToRegister(instr->arguments());
- Register result = ToRegister(instr->result());
- // There are two words between the frame pointer and the last argument.
- // Subtracting from length accounts for one of them add one more.
- if (instr->length()->IsConstantOperand()) {
- int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
- if (instr->index()->IsConstantOperand()) {
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- int index = (const_length - const_index) + 1;
- __ lw(result, MemOperand(arguments, index * kPointerSize));
- } else {
- Register index = ToRegister(instr->index());
- __ li(at, Operand(const_length + 1));
- __ Subu(result, at, index);
- __ Lsa(at, arguments, result, kPointerSizeLog2);
- __ lw(result, MemOperand(at));
- }
- } else if (instr->index()->IsConstantOperand()) {
- Register length = ToRegister(instr->length());
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- int loc = const_index - 1;
- if (loc != 0) {
- __ Subu(result, length, Operand(loc));
- __ Lsa(at, arguments, result, kPointerSizeLog2);
- __ lw(result, MemOperand(at));
- } else {
- __ Lsa(at, arguments, length, kPointerSizeLog2);
- __ lw(result, MemOperand(at));
- }
- } else {
- Register length = ToRegister(instr->length());
- Register index = ToRegister(instr->index());
- __ Subu(result, length, index);
- __ Addu(result, result, 1);
- __ Lsa(at, arguments, result, kPointerSizeLog2);
- __ lw(result, MemOperand(at));
- }
-}
-
-
-void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
- Register external_pointer = ToRegister(instr->elements());
- Register key = no_reg;
- ElementsKind elements_kind = instr->elements_kind();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort(kArrayIndexConstantValueTooBig);
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(elements_kind);
- int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- int base_offset = instr->base_offset();
-
- if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
- FPURegister result = ToDoubleRegister(instr->result());
- if (key_is_constant) {
- __ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
- } else {
- __ sll(scratch0(), key, shift_size);
- __ Addu(scratch0(), scratch0(), external_pointer);
- }
- if (elements_kind == FLOAT32_ELEMENTS) {
- __ lwc1(result, MemOperand(scratch0(), base_offset));
- __ cvt_d_s(result, result);
- } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ Ldc1(result, MemOperand(scratch0(), base_offset));
- }
- } else {
- Register result = ToRegister(instr->result());
- MemOperand mem_operand = PrepareKeyedOperand(
- key, external_pointer, key_is_constant, constant_key,
- element_size_shift, shift_size, base_offset);
- switch (elements_kind) {
- case INT8_ELEMENTS:
- __ lb(result, mem_operand);
- break;
- case UINT8_ELEMENTS:
- case UINT8_CLAMPED_ELEMENTS:
- __ lbu(result, mem_operand);
- break;
- case INT16_ELEMENTS:
- __ lh(result, mem_operand);
- break;
- case UINT16_ELEMENTS:
- __ lhu(result, mem_operand);
- break;
- case INT32_ELEMENTS:
- __ lw(result, mem_operand);
- break;
- case UINT32_ELEMENTS:
- __ lw(result, mem_operand);
- if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- DeoptimizeIf(Ugreater_equal, instr, DeoptimizeReason::kNegativeValue,
- result, Operand(0x80000000));
- }
- break;
- case FLOAT32_ELEMENTS:
- case FLOAT64_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
- case FAST_STRING_WRAPPER_ELEMENTS:
- case SLOW_STRING_WRAPPER_ELEMENTS:
- case NO_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
- Register elements = ToRegister(instr->elements());
- bool key_is_constant = instr->key()->IsConstantOperand();
- Register key = no_reg;
- DoubleRegister result = ToDoubleRegister(instr->result());
- Register scratch = scratch0();
-
- int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
-
- int base_offset = instr->base_offset();
- if (key_is_constant) {
- int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort(kArrayIndexConstantValueTooBig);
- }
- base_offset += constant_key * kDoubleSize;
- }
- __ Addu(scratch, elements, Operand(base_offset));
-
- if (!key_is_constant) {
- key = ToRegister(instr->key());
- int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- __ Lsa(scratch, scratch, key, shift_size);
- }
-
- __ Ldc1(result, MemOperand(scratch));
-
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ lw(scratch, MemOperand(scratch, kHoleNanUpper32Offset));
- DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, scratch,
- Operand(kHoleNanUpper32));
- }
-}
-
-
-void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
- Register elements = ToRegister(instr->elements());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
- Register store_base = scratch;
- int offset = instr->base_offset();
-
- if (instr->key()->IsConstantOperand()) {
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset += ToInteger32(const_operand) * kPointerSize;
- store_base = elements;
- } else {
- Register key = ToRegister(instr->key());
- // Even though the HLoadKeyed instruction forces the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsSmi()) {
- __ Lsa(scratch, elements, key, kPointerSizeLog2 - kSmiTagSize);
- } else {
- __ Lsa(scratch, elements, key, kPointerSizeLog2);
- }
- }
- __ lw(result, MemOperand(store_base, offset));
-
- // Check for the hole value.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- __ SmiTst(result, scratch);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, scratch,
- Operand(zero_reg));
- } else {
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result,
- Operand(scratch));
- }
- } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
- DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
- Label done;
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ Branch(&done, ne, result, Operand(scratch));
- if (info()->IsStub()) {
- // A stub can safely convert the hole to undefined only if the array
- // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
- // it needs to bail out.
- __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
- __ lw(result, FieldMemOperand(result, PropertyCell::kValueOffset));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kHole, result,
- Operand(Smi::FromInt(Isolate::kProtectorValid)));
- }
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_fixed_typed_array()) {
- DoLoadKeyedExternalArray(instr);
- } else if (instr->hydrogen()->representation().IsDouble()) {
- DoLoadKeyedFixedDoubleArray(instr);
- } else {
- DoLoadKeyedFixedArray(instr);
- }
-}
-
-
-MemOperand LCodeGen::PrepareKeyedOperand(Register key,
- Register base,
- bool key_is_constant,
- int constant_key,
- int element_size,
- int shift_size,
- int base_offset) {
- if (key_is_constant) {
- return MemOperand(base, (constant_key << element_size) + base_offset);
- }
-
- if (base_offset == 0) {
- if (shift_size >= 0) {
- __ sll(scratch0(), key, shift_size);
- __ Addu(scratch0(), base, scratch0());
- return MemOperand(scratch0());
- } else {
- DCHECK_EQ(-1, shift_size);
- __ srl(scratch0(), key, 1);
- __ Addu(scratch0(), base, scratch0());
- return MemOperand(scratch0());
- }
- }
-
- if (shift_size >= 0) {
- __ sll(scratch0(), key, shift_size);
- __ Addu(scratch0(), base, scratch0());
- return MemOperand(scratch0(), base_offset);
- } else {
- DCHECK_EQ(-1, shift_size);
- __ sra(scratch0(), key, 1);
- __ Addu(scratch0(), base, scratch0());
- return MemOperand(scratch0(), base_offset);
- }
-}
-
-
-void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
- Register scratch = scratch0();
- Register temp = scratch1();
- Register result = ToRegister(instr->result());
-
- if (instr->hydrogen()->from_inlined()) {
- __ Subu(result, sp, 2 * kPointerSize);
- } else if (instr->hydrogen()->arguments_adaptor()) {
- // Check if the calling frame is an arguments adaptor frame.
- Label done, adapted;
- __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(result,
- MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Xor(temp, result,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Result is the frame pointer for the frame if not adapted and for the real
- // frame below the adaptor frame if adapted.
- __ Movn(result, fp, temp); // Move only if temp is not equal to zero (ne).
- __ Movz(result, scratch, temp); // Move only if temp is equal to zero (eq).
- } else {
- __ mov(result, fp);
- }
-}
-
-
-void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
- Register elem = ToRegister(instr->elements());
- Register result = ToRegister(instr->result());
-
- Label done;
-
- // If no arguments adaptor frame the number of arguments is fixed.
- __ Addu(result, zero_reg, Operand(scope()->num_parameters()));
- __ Branch(&done, eq, fp, Operand(elem));
-
- // Arguments adaptor frame present. Get argument length from there.
- __ lw(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(result,
- MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(result);
-
- // Argument length is in result register.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- // If the receiver is null or undefined, we have to pass the global
- // object as a receiver to normal functions. Values have to be
- // passed unchanged to builtins and strict-mode functions.
- Label global_object, result_in_receiver;
-
- if (!instr->hydrogen()->known_function()) {
- // Do not transform the receiver to object for strict mode
- // functions.
- __ lw(scratch,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ lw(scratch,
- FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
-
- // Do not transform the receiver to object for builtins.
- int32_t strict_mode_function_mask =
- 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
- int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
- __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
- __ Branch(&result_in_receiver, ne, scratch, Operand(zero_reg));
- }
-
- // Normal function. Replace undefined or null with global receiver.
- __ LoadRoot(scratch, Heap::kNullValueRootIndex);
- __ Branch(&global_object, eq, receiver, Operand(scratch));
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- __ Branch(&global_object, eq, receiver, Operand(scratch));
-
- // Deoptimize if the receiver is not a JS object.
- __ SmiTst(receiver, scratch);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, scratch, Operand(zero_reg));
-
- __ GetObjectType(receiver, scratch, scratch);
- DeoptimizeIf(lt, instr, DeoptimizeReason::kNotAJavaScriptObject, scratch,
- Operand(FIRST_JS_RECEIVER_TYPE));
-
- __ Branch(&result_in_receiver);
- __ bind(&global_object);
- __ lw(result, FieldMemOperand(function, JSFunction::kContextOffset));
- __ lw(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
- __ lw(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
-
- if (result.is(receiver)) {
- __ bind(&result_in_receiver);
- } else {
- Label result_ok;
- __ Branch(&result_ok);
- __ bind(&result_in_receiver);
- __ mov(result, receiver);
- __ bind(&result_ok);
- }
-}
-
-
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
- Register length = ToRegister(instr->length());
- Register elements = ToRegister(instr->elements());
- Register scratch = scratch0();
- DCHECK(receiver.is(a0)); // Used for parameter count.
- DCHECK(function.is(a1)); // Required by InvokeFunction.
- DCHECK(ToRegister(instr->result()).is(v0));
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- const uint32_t kArgumentsLimit = 1 * KB;
- DeoptimizeIf(hi, instr, DeoptimizeReason::kTooManyArguments, length,
- Operand(kArgumentsLimit));
-
- // Push the receiver and use the register to keep the original
- // number of arguments.
- __ push(receiver);
- __ Move(receiver, length);
- // The arguments are at a one pointer size offset from elements.
- __ Addu(elements, elements, Operand(1 * kPointerSize));
-
- // Loop through the arguments pushing them onto the execution
- // stack.
- Label invoke, loop;
- // length is a small non-negative integer, due to the test above.
- __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
- __ sll(scratch, length, 2);
- __ bind(&loop);
- __ Addu(scratch, elements, scratch);
- __ lw(scratch, MemOperand(scratch));
- __ push(scratch);
- __ Subu(length, length, Operand(1));
- __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
- __ sll(scratch, length, 2);
-
- __ bind(&invoke);
-
- InvokeFlag flag = CALL_FUNCTION;
- if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
- DCHECK(!info()->saves_caller_doubles());
- // TODO(ishell): drop current frame before pushing arguments to the stack.
- flag = JUMP_FUNCTION;
- ParameterCount actual(a0);
- // It is safe to use t0, t1 and t2 as scratch registers here given that
- // we are not going to return to caller function anyway.
- PrepareForTailCall(actual, t0, t1, t2);
- }
-
- DCHECK(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
- // The number of arguments is stored in receiver which is a0, as expected
- // by InvokeFunction.
- ParameterCount actual(receiver);
- __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
-}
-
-
-void LCodeGen::DoPushArgument(LPushArgument* instr) {
- LOperand* argument = instr->value();
- if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
- Abort(kDoPushArgumentNotImplementedForDoubleType);
- } else {
- Register argument_reg = EmitLoadRegister(argument, at);
- __ push(argument_reg);
- }
-}
-
-
-void LCodeGen::DoDrop(LDrop* instr) {
- __ Drop(instr->count());
-}
-
-
-void LCodeGen::DoThisFunction(LThisFunction* instr) {
- Register result = ToRegister(instr->result());
- __ lw(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-}
-
-
-void LCodeGen::DoContext(LContext* instr) {
- // If there is a non-return use, the context must be moved to a register.
- Register result = ToRegister(instr->result());
- if (info()->IsOptimizing()) {
- __ lw(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
- } else {
- // If there is no frame, the context must be in cp.
- DCHECK(result.is(cp));
- }
-}
-
-
-void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- __ li(scratch0(), instr->hydrogen()->declarations());
- __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
- __ Push(scratch0(), scratch1());
- __ li(scratch0(), instr->hydrogen()->feedback_vector());
- __ Push(scratch0());
- CallRuntime(Runtime::kDeclareGlobals, instr);
-}
-
-void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
- int formal_parameter_count, int arity,
- bool is_tail_call, LInstruction* instr) {
- bool dont_adapt_arguments =
- formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
- bool can_invoke_directly =
- dont_adapt_arguments || formal_parameter_count == arity;
-
- Register function_reg = a1;
- LPointerMap* pointers = instr->pointer_map();
-
- if (can_invoke_directly) {
- // Change context.
- __ lw(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
-
- // Always initialize new target and number of actual arguments.
- __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
- __ li(a0, Operand(arity));
-
- bool is_self_call = function.is_identical_to(info()->closure());
-
- // Invoke function.
- if (is_self_call) {
- Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
- if (is_tail_call) {
- __ Jump(self, RelocInfo::CODE_TARGET);
- } else {
- __ Call(self, RelocInfo::CODE_TARGET);
- }
- } else {
- __ lw(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
- if (is_tail_call) {
- __ Jump(at);
- } else {
- __ Call(at);
- }
- }
-
- if (!is_tail_call) {
- // Set up deoptimization.
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
- }
- } else {
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount actual(arity);
- ParameterCount expected(formal_parameter_count);
- InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
- __ InvokeFunction(function_reg, expected, actual, flag, generator);
- }
-}
-
-
-void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
- DCHECK(instr->context() != NULL);
- DCHECK(ToRegister(instr->context()).is(cp));
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- // Deoptimize if not a heap number.
- __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch,
- Operand(at));
-
- Label done;
- Register exponent = scratch0();
- scratch = no_reg;
- __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
- // Check the sign of the argument. If the argument is positive, just
- // return it.
- __ Move(result, input);
- __ And(at, exponent, Operand(HeapNumber::kSignMask));
- __ Branch(&done, eq, at, Operand(zero_reg));
-
- // Input is negative. Reverse its sign.
- // Preserve the value of all registers.
- {
- PushSafepointRegistersScope scope(this);
-
- // Registers were saved at the safepoint, so we can use
- // many scratch registers.
- Register tmp1 = input.is(a1) ? a0 : a1;
- Register tmp2 = input.is(a2) ? a0 : a2;
- Register tmp3 = input.is(a3) ? a0 : a3;
- Register tmp4 = input.is(t0) ? a0 : t0;
-
- // exponent: floating point exponent value.
-
- Label allocated, slow;
- __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
- __ Branch(&allocated);
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
-
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
- instr->context());
- // Set the pointer to the new heap number in tmp.
- if (!tmp1.is(v0))
- __ mov(tmp1, v0);
- // Restore input_reg after call to runtime.
- __ LoadFromSafepointRegisterSlot(input, input);
- __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
-
- __ bind(&allocated);
- // exponent: floating point exponent value.
- // tmp1: allocated heap number.
- __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
- __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
- __ lw(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
- __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
-
- __ StoreToSafepointRegisterSlot(tmp1, result);
- }
-
- __ bind(&done);
-}
-
-
-void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
- Label done;
- __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
- __ mov(result, input);
- __ subu(result, zero_reg, input);
- // Overflow if result is still negative, i.e. 0x80000000.
- DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, result,
- Operand(zero_reg));
- __ bind(&done);
-}
-
-
-void LCodeGen::DoMathAbs(LMathAbs* instr) {
- // Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
- public:
- DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override {
- codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LMathAbs* instr_;
- };
-
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsDouble()) {
- FPURegister input = ToDoubleRegister(instr->value());
- FPURegister result = ToDoubleRegister(instr->result());
- __ abs_d(result, input);
- } else if (r.IsSmiOrInteger32()) {
- EmitIntegerMathAbs(instr);
- } else {
- // Representation is tagged.
- DeferredMathAbsTaggedHeapNumber* deferred =
- new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
- Register input = ToRegister(instr->value());
- // Smi check.
- __ JumpIfNotSmi(input, deferred->entry());
- // If smi, handle it directly.
- EmitIntegerMathAbs(instr);
- __ bind(deferred->exit());
- }
-}
-
-
-void LCodeGen::DoMathFloor(LMathFloor* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- Register result = ToRegister(instr->result());
- Register scratch1 = scratch0();
- Register except_flag = ToRegister(instr->temp());
-
- __ EmitFPUTruncate(kRoundToMinusInf,
- result,
- input,
- scratch1,
- double_scratch0(),
- except_flag);
-
- // Deopt if the operation did not succeed.
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
- Operand(zero_reg));
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Test for -0.
- Label done;
- __ Branch(&done, ne, result, Operand(zero_reg));
- __ Mfhc1(scratch1, input);
- __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
- Operand(zero_reg));
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoMathRound(LMathRound* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- Register result = ToRegister(instr->result());
- DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
- Register scratch = scratch0();
- Label done, check_sign_on_zero;
-
- // Extract exponent bits.
- __ Mfhc1(result, input);
- __ Ext(scratch,
- result,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
-
- // If the number is in ]-0.5, +0.5[, the result is +/- 0.
- Label skip1;
- __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
- __ mov(result, zero_reg);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ Branch(&check_sign_on_zero);
- } else {
- __ Branch(&done);
- }
- __ bind(&skip1);
-
- // The following conversion will not work with numbers
- // outside of ]-2^32, 2^32[.
- DeoptimizeIf(ge, instr, DeoptimizeReason::kOverflow, scratch,
- Operand(HeapNumber::kExponentBias + 32));
-
- // Save the original sign for later comparison.
- __ And(scratch, result, Operand(HeapNumber::kSignMask));
-
- __ Move(double_scratch0(), 0.5);
- __ add_d(double_scratch0(), input, double_scratch0());
-
- // Check sign of the result: if the sign changed, the input
- // value was in ]0.5, 0[ and the result should be -0.
- __ Mfhc1(result, double_scratch0());
- __ Xor(result, result, Operand(scratch));
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // ARM uses 'mi' here, which is 'lt'
- DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, result,
- Operand(zero_reg));
- } else {
- Label skip2;
- // ARM uses 'mi' here, which is 'lt'
- // Negating it results in 'ge'
- __ Branch(&skip2, ge, result, Operand(zero_reg));
- __ mov(result, zero_reg);
- __ Branch(&done);
- __ bind(&skip2);
- }
-
- Register except_flag = scratch;
- __ EmitFPUTruncate(kRoundToMinusInf,
- result,
- double_scratch0(),
- at,
- double_scratch1,
- except_flag);
-
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
- Operand(zero_reg));
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Test for -0.
- __ Branch(&done, ne, result, Operand(zero_reg));
- __ bind(&check_sign_on_zero);
- __ Mfhc1(scratch, input);
- __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch,
- Operand(zero_reg));
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoMathFround(LMathFround* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- __ cvt_s_d(result.low(), input);
- __ cvt_d_s(result, result.low());
-}
-
-
-void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- __ sqrt_d(result, input);
-}
-
-
-void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- DoubleRegister temp = ToDoubleRegister(instr->temp());
-
- DCHECK(!input.is(result));
-
- // Note that according to ECMA-262 15.8.2.13:
- // Math.pow(-Infinity, 0.5) == Infinity
- // Math.sqrt(-Infinity) == NaN
- Label done;
- __ Move(temp, static_cast<double>(-V8_INFINITY));
- // Set up Infinity.
- __ Neg_d(result, temp);
- // result is overwritten if the branch is not taken.
- __ BranchF(&done, NULL, eq, temp, input);
-
- // Add +0 to convert -0 to +0.
- __ add_d(result, input, kDoubleRegZero);
- __ sqrt_d(result, result);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoPower(LPower* instr) {
- Representation exponent_type = instr->hydrogen()->right()->representation();
- // Having marked this as a call, we can use any registers.
- // Just make sure that the input/output registers are the expected ones.
- Register tagged_exponent = MathPowTaggedDescriptor::exponent();
- DCHECK(!instr->right()->IsDoubleRegister() ||
- ToDoubleRegister(instr->right()).is(f4));
- DCHECK(!instr->right()->IsRegister() ||
- ToRegister(instr->right()).is(tagged_exponent));
- DCHECK(ToDoubleRegister(instr->left()).is(f2));
- DCHECK(ToDoubleRegister(instr->result()).is(f0));
-
- if (exponent_type.IsSmi()) {
- MathPowStub stub(isolate(), MathPowStub::TAGGED);
- __ CallStub(&stub);
- } else if (exponent_type.IsTagged()) {
- Label no_deopt;
- __ JumpIfSmi(tagged_exponent, &no_deopt);
- DCHECK(!t3.is(tagged_exponent));
- __ lw(t3, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, t3, Operand(at));
- __ bind(&no_deopt);
- MathPowStub stub(isolate(), MathPowStub::TAGGED);
- __ CallStub(&stub);
- } else if (exponent_type.IsInteger32()) {
- MathPowStub stub(isolate(), MathPowStub::INTEGER);
- __ CallStub(&stub);
- } else {
- DCHECK(exponent_type.IsDouble());
- MathPowStub stub(isolate(), MathPowStub::DOUBLE);
- __ CallStub(&stub);
- }
-}
-
-void LCodeGen::DoMathCos(LMathCos* instr) {
- __ PrepareCallCFunction(0, 1, scratch0());
- __ MovToFloatParameter(ToDoubleRegister(instr->value()));
- __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1);
- __ MovFromFloatResult(ToDoubleRegister(instr->result()));
-}
-
-void LCodeGen::DoMathSin(LMathSin* instr) {
- __ PrepareCallCFunction(0, 1, scratch0());
- __ MovToFloatParameter(ToDoubleRegister(instr->value()));
- __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1);
- __ MovFromFloatResult(ToDoubleRegister(instr->result()));
-}
-
-void LCodeGen::DoMathExp(LMathExp* instr) {
- __ PrepareCallCFunction(0, 1, scratch0());
- __ MovToFloatParameter(ToDoubleRegister(instr->value()));
- __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1);
- __ MovFromFloatResult(ToDoubleRegister(instr->result()));
-}
-
-
-void LCodeGen::DoMathLog(LMathLog* instr) {
- __ PrepareCallCFunction(0, 1, scratch0());
- __ MovToFloatParameter(ToDoubleRegister(instr->value()));
- __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1);
- __ MovFromFloatResult(ToDoubleRegister(instr->result()));
-}
-
-
-void LCodeGen::DoMathClz32(LMathClz32* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- __ Clz(result, input);
-}
-
-void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
- Register scratch1, Register scratch2,
- Register scratch3) {
-#if DEBUG
- if (actual.is_reg()) {
- DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
- } else {
- DCHECK(!AreAliased(scratch1, scratch2, scratch3));
- }
-#endif
- if (FLAG_code_comments) {
- if (actual.is_reg()) {
- Comment(";;; PrepareForTailCall, actual: %s {",
- RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
- actual.reg().code()));
- } else {
- Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
- }
- }
-
- // Check if next frame is an arguments adaptor frame.
- Register caller_args_count_reg = scratch1;
- Label no_arguments_adaptor, formal_parameter_count_loaded;
- __ lw(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset));
- __ Branch(&no_arguments_adaptor, ne, scratch3,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Drop current frame and load arguments count from arguments adaptor frame.
- __ mov(fp, scratch2);
- __ lw(caller_args_count_reg,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
- __ Branch(&formal_parameter_count_loaded);
-
- __ bind(&no_arguments_adaptor);
- // Load caller's formal parameter count
- __ lw(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ lw(scratch1,
- FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
- __ li(caller_args_count_reg, Operand(info()->literal()->parameter_count()));
-
- __ bind(&formal_parameter_count_loaded);
- __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
-
- Comment(";;; }");
-}
-
-void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
- HInvokeFunction* hinstr = instr->hydrogen();
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->function()).is(a1));
- DCHECK(instr->HasPointerMap());
-
- bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
-
- if (is_tail_call) {
- DCHECK(!info()->saves_caller_doubles());
- ParameterCount actual(instr->arity());
- // It is safe to use t0, t1 and t2 as scratch registers here given that
- // we are not going to return to caller function anyway.
- PrepareForTailCall(actual, t0, t1, t2);
- }
-
- Handle<JSFunction> known_function = hinstr->known_function();
- if (known_function.is_null()) {
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount actual(instr->arity());
- InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
- __ InvokeFunction(a1, no_reg, actual, flag, generator);
- } else {
- CallKnownFunction(known_function, hinstr->formal_parameter_count(),
- instr->arity(), is_tail_call, instr);
- }
-}
-
-
-void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
- DCHECK(ToRegister(instr->result()).is(v0));
-
- if (instr->hydrogen()->IsTailCall()) {
- if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
-
- if (instr->target()->IsConstantOperand()) {
- LConstantOperand* target = LConstantOperand::cast(instr->target());
- Handle<Code> code = Handle<Code>::cast(ToHandle(target));
- __ Jump(code, RelocInfo::CODE_TARGET);
- } else {
- DCHECK(instr->target()->IsRegister());
- Register target = ToRegister(instr->target());
- __ Jump(target, Code::kHeaderSize - kHeapObjectTag);
- }
- } else {
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
-
- if (instr->target()->IsConstantOperand()) {
- LConstantOperand* target = LConstantOperand::cast(instr->target());
- Handle<Code> code = Handle<Code>::cast(ToHandle(target));
- generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
- __ Call(code, RelocInfo::CODE_TARGET);
- } else {
- DCHECK(instr->target()->IsRegister());
- Register target = ToRegister(instr->target());
- generator.BeforeCall(__ CallSize(target));
- __ Call(target, Code::kHeaderSize - kHeapObjectTag);
- }
- generator.AfterCall();
- }
-}
-
-
-void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->constructor()).is(a1));
- DCHECK(ToRegister(instr->result()).is(v0));
-
- __ li(a0, Operand(instr->arity()));
- __ li(a2, instr->hydrogen()->site());
-
- ElementsKind kind = instr->hydrogen()->elements_kind();
- AllocationSiteOverrideMode override_mode =
- (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
- ? DISABLE_ALLOCATION_SITES
- : DONT_OVERRIDE;
-
- if (instr->arity() == 0) {
- ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- } else if (instr->arity() == 1) {
- Label done;
- if (IsFastPackedElementsKind(kind)) {
- Label packed_case;
- // We might need a change here,
- // look at the first argument.
- __ lw(t1, MemOperand(sp, 0));
- __ Branch(&packed_case, eq, t1, Operand(zero_reg));
-
- ElementsKind holey_kind = GetHoleyElementsKind(kind);
- ArraySingleArgumentConstructorStub stub(isolate(),
- holey_kind,
- override_mode);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ jmp(&done);
- __ bind(&packed_case);
- }
-
- ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ bind(&done);
- } else {
- ArrayNArgumentsConstructorStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
-void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- CallRuntime(instr->function(), instr->arity(), instr);
-}
-
-
-void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
- Register function = ToRegister(instr->function());
- Register code_object = ToRegister(instr->code_object());
- __ Addu(code_object, code_object,
- Operand(Code::kHeaderSize - kHeapObjectTag));
- __ sw(code_object,
- FieldMemOperand(function, JSFunction::kCodeEntryOffset));
-}
-
-
-void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
- Register result = ToRegister(instr->result());
- Register base = ToRegister(instr->base_object());
- if (instr->offset()->IsConstantOperand()) {
- LConstantOperand* offset = LConstantOperand::cast(instr->offset());
- __ Addu(result, base, Operand(ToInteger32(offset)));
- } else {
- Register offset = ToRegister(instr->offset());
- __ Addu(result, base, offset);
- }
-}
-
-
-void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
- Representation representation = instr->representation();
-
- Register object = ToRegister(instr->object());
- Register scratch = scratch0();
- HObjectAccess access = instr->hydrogen()->access();
- int offset = access.offset();
-
- if (access.IsExternalMemory()) {
- Register value = ToRegister(instr->value());
- MemOperand operand = MemOperand(object, offset);
- __ Store(value, operand, representation);
- return;
- }
-
- __ AssertNotSmi(object);
-
- DCHECK(!representation.IsSmi() ||
- !instr->value()->IsConstantOperand() ||
- IsSmi(LConstantOperand::cast(instr->value())));
- if (representation.IsDouble()) {
- DCHECK(access.IsInobject());
- DCHECK(!instr->hydrogen()->has_transition());
- DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
- DoubleRegister value = ToDoubleRegister(instr->value());
- __ Sdc1(value, FieldMemOperand(object, offset));
- return;
- }
-
- if (instr->hydrogen()->has_transition()) {
- Handle<Map> transition = instr->hydrogen()->transition_map();
- AddDeprecationDependency(transition);
- __ li(scratch, Operand(transition));
- __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
- Register temp = ToRegister(instr->temp());
- // Update the write barrier for the map field.
- __ RecordWriteForMap(object,
- scratch,
- temp,
- GetRAState(),
- kSaveFPRegs);
- }
- }
-
- // Do the store.
- Register value = ToRegister(instr->value());
- if (access.IsInobject()) {
- MemOperand operand = FieldMemOperand(object, offset);
- __ Store(value, operand, representation);
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- // Update the write barrier for the object for in-object properties.
- __ RecordWriteField(object,
- offset,
- value,
- scratch,
- GetRAState(),
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- instr->hydrogen()->SmiCheckForWriteBarrier(),
- instr->hydrogen()->PointersToHereCheckForValue());
- }
- } else {
- __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
- MemOperand operand = FieldMemOperand(scratch, offset);
- __ Store(value, operand, representation);
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- // Update the write barrier for the properties array.
- // object is used as a scratch register.
- __ RecordWriteField(scratch,
- offset,
- value,
- object,
- GetRAState(),
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- instr->hydrogen()->SmiCheckForWriteBarrier(),
- instr->hydrogen()->PointersToHereCheckForValue());
- }
- }
-}
-
-
-void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
- Operand operand(0);
- Register reg;
- if (instr->index()->IsConstantOperand()) {
- operand = ToOperand(instr->index());
- reg = ToRegister(instr->length());
- cc = CommuteCondition(cc);
- } else {
- reg = ToRegister(instr->index());
- operand = ToOperand(instr->length());
- }
- if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
- Label done;
- __ Branch(&done, NegateCondition(cc), reg, operand);
- __ stop("eliminated bounds check failed");
- __ bind(&done);
- } else {
- DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds, reg, operand);
- }
-}
-
-
-void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
- Register external_pointer = ToRegister(instr->elements());
- Register key = no_reg;
- ElementsKind elements_kind = instr->elements_kind();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort(kArrayIndexConstantValueTooBig);
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(elements_kind);
- int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- int base_offset = instr->base_offset();
-
- if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
- Register address = scratch0();
- FPURegister value(ToDoubleRegister(instr->value()));
- if (key_is_constant) {
- if (constant_key != 0) {
- __ Addu(address, external_pointer,
- Operand(constant_key << element_size_shift));
- } else {
- address = external_pointer;
- }
- } else {
- __ Lsa(address, external_pointer, key, shift_size);
- }
-
- if (elements_kind == FLOAT32_ELEMENTS) {
- __ cvt_s_d(double_scratch0(), value);
- __ swc1(double_scratch0(), MemOperand(address, base_offset));
- } else { // Storing doubles, not floats.
- __ Sdc1(value, MemOperand(address, base_offset));
- }
- } else {
- Register value(ToRegister(instr->value()));
- MemOperand mem_operand = PrepareKeyedOperand(
- key, external_pointer, key_is_constant, constant_key,
- element_size_shift, shift_size,
- base_offset);
- switch (elements_kind) {
- case UINT8_ELEMENTS:
- case UINT8_CLAMPED_ELEMENTS:
- case INT8_ELEMENTS:
- __ sb(value, mem_operand);
- break;
- case INT16_ELEMENTS:
- case UINT16_ELEMENTS:
- __ sh(value, mem_operand);
- break;
- case INT32_ELEMENTS:
- case UINT32_ELEMENTS:
- __ sw(value, mem_operand);
- break;
- case FLOAT32_ELEMENTS:
- case FLOAT64_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
- case FAST_STRING_WRAPPER_ELEMENTS:
- case SLOW_STRING_WRAPPER_ELEMENTS:
- case NO_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
- DoubleRegister value = ToDoubleRegister(instr->value());
- Register elements = ToRegister(instr->elements());
- Register scratch = scratch0();
- Register scratch_1 = scratch1();
- DoubleRegister double_scratch = double_scratch0();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int base_offset = instr->base_offset();
- Label not_nan, done;
-
- // Calculate the effective address of the slot in the array to store the
- // double value.
- int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- if (key_is_constant) {
- int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort(kArrayIndexConstantValueTooBig);
- }
- __ Addu(scratch, elements,
- Operand((constant_key << element_size_shift) + base_offset));
- } else {
- int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- __ Addu(scratch, elements, Operand(base_offset));
- __ sll(at, ToRegister(instr->key()), shift_size);
- __ Addu(scratch, scratch, at);
- }
-
- if (instr->NeedsCanonicalization()) {
- Label is_nan;
- // Check for NaN. All NaNs must be canonicalized.
- __ BranchF(NULL, &is_nan, eq, value, value);
- __ Branch(&not_nan);
-
- // Only load canonical NaN if the comparison above set the overflow.
- __ bind(&is_nan);
- __ LoadRoot(scratch_1, Heap::kNanValueRootIndex);
- __ Ldc1(double_scratch,
- FieldMemOperand(scratch_1, HeapNumber::kValueOffset));
- __ Sdc1(double_scratch, MemOperand(scratch, 0));
- __ Branch(&done);
- }
-
- __ bind(&not_nan);
- __ Sdc1(value, MemOperand(scratch, 0));
- __ bind(&done);
-}
-
-
-void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
- Register value = ToRegister(instr->value());
- Register elements = ToRegister(instr->elements());
- Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
- : no_reg;
- Register scratch = scratch0();
- Register store_base = scratch;
- int offset = instr->base_offset();
-
- // Do the store.
- if (instr->key()->IsConstantOperand()) {
- DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset += ToInteger32(const_operand) * kPointerSize;
- store_base = elements;
- } else {
- // Even though the HLoadKeyed instruction forces the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsSmi()) {
- __ Lsa(scratch, elements, key, kPointerSizeLog2 - kSmiTagSize);
- } else {
- __ Lsa(scratch, elements, key, kPointerSizeLog2);
- }
- }
- __ sw(value, MemOperand(store_base, offset));
-
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- SmiCheck check_needed =
- instr->hydrogen()->value()->type().IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- // Compute address of modified element and store it into key register.
- __ Addu(key, store_base, Operand(offset));
- __ RecordWrite(elements,
- key,
- value,
- GetRAState(),
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed,
- instr->hydrogen()->PointersToHereCheckForValue());
- }
-}
-
-
-void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
- // By cases: external, fast double
- if (instr->is_fixed_typed_array()) {
- DoStoreKeyedExternalArray(instr);
- } else if (instr->hydrogen()->value()->representation().IsDouble()) {
- DoStoreKeyedFixedDoubleArray(instr);
- } else {
- DoStoreKeyedFixedArray(instr);
- }
-}
-
-
-void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
- class DeferredMaybeGrowElements final : public LDeferredCode {
- public:
- DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
- : LDeferredCode(codegen), instr_(instr) {}
- void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LMaybeGrowElements* instr_;
- };
-
- Register result = v0;
- DeferredMaybeGrowElements* deferred =
- new (zone()) DeferredMaybeGrowElements(this, instr);
- LOperand* key = instr->key();
- LOperand* current_capacity = instr->current_capacity();
-
- DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
- DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
- DCHECK(key->IsConstantOperand() || key->IsRegister());
- DCHECK(current_capacity->IsConstantOperand() ||
- current_capacity->IsRegister());
-
- if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
- int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
- int32_t constant_capacity =
- ToInteger32(LConstantOperand::cast(current_capacity));
- if (constant_key >= constant_capacity) {
- // Deferred case.
- __ jmp(deferred->entry());
- }
- } else if (key->IsConstantOperand()) {
- int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
- __ Branch(deferred->entry(), le, ToRegister(current_capacity),
- Operand(constant_key));
- } else if (current_capacity->IsConstantOperand()) {
- int32_t constant_capacity =
- ToInteger32(LConstantOperand::cast(current_capacity));
- __ Branch(deferred->entry(), ge, ToRegister(key),
- Operand(constant_capacity));
- } else {
- __ Branch(deferred->entry(), ge, ToRegister(key),
- Operand(ToRegister(current_capacity)));
- }
-
- if (instr->elements()->IsRegister()) {
- __ mov(result, ToRegister(instr->elements()));
- } else {
- __ lw(result, ToMemOperand(instr->elements()));
- }
-
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- Register result = v0;
- __ mov(result, zero_reg);
-
- // We have to call a stub.
- {
- PushSafepointRegistersScope scope(this);
- if (instr->object()->IsRegister()) {
- __ mov(result, ToRegister(instr->object()));
- } else {
- __ lw(result, ToMemOperand(instr->object()));
- }
-
- LOperand* key = instr->key();
- if (key->IsConstantOperand()) {
- LConstantOperand* constant_key = LConstantOperand::cast(key);
- int32_t int_key = ToInteger32(constant_key);
- if (Smi::IsValid(int_key)) {
- __ li(a3, Operand(Smi::FromInt(int_key)));
- } else {
- Abort(kArrayIndexConstantValueTooBig);
- }
- } else {
- Label is_smi;
- __ SmiTagCheckOverflow(a3, ToRegister(key), at);
- // Deopt if the key is outside Smi range. The stub expects Smi and would
- // bump the elements into dictionary mode (and trigger a deopt) anyways.
- __ BranchOnNoOverflow(&is_smi, at);
- RestoreRegistersStateStub stub(isolate());
- __ push(ra);
- __ CallStub(&stub);
- DeoptimizeIf(al, instr, DeoptimizeReason::kOverflow);
- __ bind(&is_smi);
- }
-
- GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
- __ mov(a0, result);
- __ CallStub(&stub);
- RecordSafepointWithLazyDeopt(
- instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- __ StoreToSafepointRegisterSlot(result, result);
- }
-
- // Deopt on smi, which means the elements array changed to dictionary mode.
- __ SmiTst(result, at);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg));
-}
-
-
-void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
- Register object_reg = ToRegister(instr->object());
- Register scratch = scratch0();
-
- Handle<Map> from_map = instr->original_map();
- Handle<Map> to_map = instr->transitioned_map();
- ElementsKind from_kind = instr->from_kind();
- ElementsKind to_kind = instr->to_kind();
-
- Label not_applicable;
- __ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
- __ Branch(&not_applicable, ne, scratch, Operand(from_map));
-
- if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
- Register new_map_reg = ToRegister(instr->new_map_temp());
- __ li(new_map_reg, Operand(to_map));
- __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
- // Write barrier.
- __ RecordWriteForMap(object_reg,
- new_map_reg,
- scratch,
- GetRAState(),
- kDontSaveFPRegs);
- } else {
- DCHECK(object_reg.is(a0));
- DCHECK(ToRegister(instr->context()).is(cp));
- PushSafepointRegistersScope scope(this);
- __ li(a1, Operand(to_map));
- TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
- __ CallStub(&stub);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kLazyDeopt);
- }
- __ bind(&not_applicable);
-}
-
-
-void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
- Register object = ToRegister(instr->object());
- Register temp = ToRegister(instr->temp());
- Label no_memento_found;
- __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
- DeoptimizeIf(al, instr);
- __ bind(&no_memento_found);
-}
-
-
-void LCodeGen::DoStringAdd(LStringAdd* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->left()).is(a1));
- DCHECK(ToRegister(instr->right()).is(a0));
- StringAddStub stub(isolate(),
- instr->hydrogen()->flags(),
- instr->hydrogen()->pretenure_flag());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt final : public LDeferredCode {
- public:
- DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LStringCharCodeAt* instr_;
- };
-
- DeferredStringCharCodeAt* deferred =
- new(zone()) DeferredStringCharCodeAt(this, instr);
- StringCharLoadGenerator::Generate(masm(),
- ToRegister(instr->string()),
- ToRegister(instr->index()),
- ToRegister(instr->result()),
- deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ mov(result, zero_reg);
-
- PushSafepointRegistersScope scope(this);
- __ push(string);
- // Push the index as a smi. This is safe because of the checks in
- // DoStringCharCodeAt above.
- if (instr->index()->IsConstantOperand()) {
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- __ Addu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
- __ push(scratch);
- } else {
- Register index = ToRegister(instr->index());
- __ SmiTag(index);
- __ push(index);
- }
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
- instr->context());
- __ AssertSmi(v0);
- __ SmiUntag(v0);
- __ StoreToSafepointRegisterSlot(v0, result);
-}
-
-
-void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode final : public LDeferredCode {
- public:
- DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override {
- codegen()->DoDeferredStringCharFromCode(instr_);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LStringCharFromCode* instr_;
- };
-
- DeferredStringCharFromCode* deferred =
- new(zone()) DeferredStringCharFromCode(this, instr);
-
- DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
- DCHECK(!char_code.is(result));
-
- __ Branch(deferred->entry(), hi,
- char_code, Operand(String::kMaxOneByteCharCode));
- __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
- __ Lsa(result, result, char_code, kPointerSizeLog2);
- __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize));
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- __ Branch(deferred->entry(), eq, result, Operand(scratch));
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ mov(result, zero_reg);
-
- PushSafepointRegistersScope scope(this);
- __ SmiTag(char_code);
- __ push(char_code);
- CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
- instr->context());
- __ StoreToSafepointRegisterSlot(v0, result);
-}
-
-
-void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- LOperand* input = instr->value();
- DCHECK(input->IsRegister() || input->IsStackSlot());
- LOperand* output = instr->result();
- DCHECK(output->IsDoubleRegister());
- FPURegister single_scratch = double_scratch0().low();
- if (input->IsStackSlot()) {
- Register scratch = scratch0();
- __ lw(scratch, ToMemOperand(input));
- __ mtc1(scratch, single_scratch);
- } else {
- __ mtc1(ToRegister(input), single_scratch);
- }
- __ cvt_d_w(ToDoubleRegister(output), single_scratch);
-}
-
-
-void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
- LOperand* input = instr->value();
- LOperand* output = instr->result();
-
- __ Cvt_d_uw(ToDoubleRegister(output), ToRegister(input), f22);
-}
-
-
-void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- class DeferredNumberTagI final : public LDeferredCode {
- public:
- DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override {
- codegen()->DoDeferredNumberTagIU(instr_,
- instr_->value(),
- instr_->temp1(),
- instr_->temp2(),
- SIGNED_INT32);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LNumberTagI* instr_;
- };
-
- Register src = ToRegister(instr->value());
- Register dst = ToRegister(instr->result());
- Register overflow = scratch0();
-
- DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
- __ SmiTagCheckOverflow(dst, src, overflow);
- __ BranchOnOverflow(deferred->entry(), overflow);
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU final : public LDeferredCode {
- public:
- DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override {
- codegen()->DoDeferredNumberTagIU(instr_,
- instr_->value(),
- instr_->temp1(),
- instr_->temp2(),
- UNSIGNED_INT32);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LNumberTagU* instr_;
- };
-
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
-
- DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
- __ Branch(deferred->entry(), hi, input, Operand(Smi::kMaxValue));
- __ SmiTag(result, input);
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
- LOperand* value,
- LOperand* temp1,
- LOperand* temp2,
- IntegerSignedness signedness) {
- Label done, slow;
- Register src = ToRegister(value);
- Register dst = ToRegister(instr->result());
- Register tmp1 = scratch0();
- Register tmp2 = ToRegister(temp1);
- Register tmp3 = ToRegister(temp2);
- DoubleRegister dbl_scratch = double_scratch0();
-
- if (signedness == SIGNED_INT32) {
- // There was overflow, so bits 30 and 31 of the original integer
- // disagree. Try to allocate a heap number in new space and store
- // the value in there. If that fails, call the runtime system.
- if (dst.is(src)) {
- __ SmiUntag(src, dst);
- __ Xor(src, src, Operand(0x80000000));
- }
- __ mtc1(src, dbl_scratch);
- __ cvt_d_w(dbl_scratch, dbl_scratch);
- } else {
- __ Cvt_d_uw(dbl_scratch, src, f22);
- }
-
- if (FLAG_inline_new) {
- __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
- __ Branch(&done);
- }
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
- {
- // TODO(3095996): Put a valid pointer value in the stack slot where the
- // result register is stored, as this register is in the pointer map, but
- // contains an integer value.
- __ mov(dst, zero_reg);
-
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this);
- // Reset the context register.
- if (!dst.is(cp)) {
- __ mov(cp, zero_reg);
- }
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(v0, dst);
- }
-
- // Done. Put the value in dbl_scratch into the value of the allocated heap
- // number.
- __ bind(&done);
- __ Sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
-}
-
-
-void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD final : public LDeferredCode {
- public:
- DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LNumberTagD* instr_;
- };
-
- DoubleRegister input_reg = ToDoubleRegister(instr->value());
- Register scratch = scratch0();
- Register reg = ToRegister(instr->result());
- Register temp1 = ToRegister(instr->temp());
- Register temp2 = ToRegister(instr->temp2());
-
- DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
- if (FLAG_inline_new) {
- __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
- } else {
- __ Branch(deferred->entry());
- }
- __ bind(deferred->exit());
- __ Sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
- // Now that we have finished with the object's real address tag it
-}
-
-
-void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- Register reg = ToRegister(instr->result());
- __ mov(reg, zero_reg);
-
- PushSafepointRegistersScope scope(this);
- // Reset the context register.
- if (!reg.is(cp)) {
- __ mov(cp, zero_reg);
- }
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(v0, reg);
-}
-
-
-void LCodeGen::DoSmiTag(LSmiTag* instr) {
- HChange* hchange = instr->hydrogen();
- Register input = ToRegister(instr->value());
- Register output = ToRegister(instr->result());
- if (hchange->CheckFlag(HValue::kCanOverflow) &&
- hchange->value()->CheckFlag(HValue::kUint32)) {
- __ And(at, input, Operand(0xc0000000));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, at, Operand(zero_reg));
- }
- if (hchange->CheckFlag(HValue::kCanOverflow) &&
- !hchange->value()->CheckFlag(HValue::kUint32)) {
- __ SmiTagCheckOverflow(output, input, at);
- DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, at, Operand(zero_reg));
- } else {
- __ SmiTag(output, input);
- }
-}
-
-
-void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
- Register scratch = scratch0();
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- if (instr->needs_check()) {
- STATIC_ASSERT(kHeapObjectTag == 1);
- // If the input is a HeapObject, value of scratch won't be zero.
- __ And(scratch, input, Operand(kHeapObjectTag));
- __ SmiUntag(result, input);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, scratch,
- Operand(zero_reg));
- } else {
- __ SmiUntag(result, input);
- }
-}
-
-
-void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
- DoubleRegister result_reg,
- NumberUntagDMode mode) {
- bool can_convert_undefined_to_nan = instr->truncating();
- bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
-
- Register scratch = scratch0();
- Label convert, load_smi, done;
- if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
- // Smi check.
- __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
- // Heap number map check.
- __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- if (can_convert_undefined_to_nan) {
- __ Branch(&convert, ne, scratch, Operand(at));
- } else {
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch,
- Operand(at));
- }
- // Load heap number.
- __ Ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
- if (deoptimize_on_minus_zero) {
- __ mfc1(at, result_reg.low());
- __ Branch(&done, ne, at, Operand(zero_reg));
- __ Mfhc1(scratch, result_reg);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, scratch,
- Operand(HeapNumber::kSignMask));
- }
- __ Branch(&done);
- if (can_convert_undefined_to_nan) {
- __ bind(&convert);
- // Convert undefined (and hole) to NaN.
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined,
- input_reg, Operand(at));
- __ LoadRoot(scratch, Heap::kNanValueRootIndex);
- __ Ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
- __ Branch(&done);
- }
- } else {
- __ SmiUntag(scratch, input_reg);
- DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
- }
- // Smi to double register conversion
- __ bind(&load_smi);
- // scratch: untagged value of input_reg
- __ mtc1(scratch, result_reg);
- __ cvt_d_w(result_reg, result_reg);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
- Register input_reg = ToRegister(instr->value());
- Register scratch1 = scratch0();
- Register scratch2 = ToRegister(instr->temp());
- DoubleRegister double_scratch = double_scratch0();
- DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
-
- DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
- DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
-
- Label done;
-
- // The input is a tagged HeapObject.
- // Heap number map check.
- __ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- // This 'at' value and scratch1 map value are used for tests in both clauses
- // of the if.
-
- if (instr->truncating()) {
- Label truncate;
- __ Branch(USE_DELAY_SLOT, &truncate, eq, scratch1, Operand(at));
- __ mov(scratch2, input_reg); // In delay slot.
- __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotANumberOrOddball, scratch1,
- Operand(ODDBALL_TYPE));
- __ bind(&truncate);
- __ TruncateHeapNumberToI(input_reg, scratch2);
- } else {
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch1,
- Operand(at));
-
- // Load the double value.
- __ Ldc1(double_scratch,
- FieldMemOperand(input_reg, HeapNumber::kValueOffset));
-
- Register except_flag = scratch2;
- __ EmitFPUTruncate(kRoundToZero,
- input_reg,
- double_scratch,
- scratch1,
- double_scratch2,
- except_flag,
- kCheckForInexactConversion);
-
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
- Operand(zero_reg));
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ Branch(&done, ne, input_reg, Operand(zero_reg));
-
- __ Mfhc1(scratch1, double_scratch);
- __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
- Operand(zero_reg));
- }
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI final : public LDeferredCode {
- public:
- DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LTaggedToI* instr_;
- };
-
- LOperand* input = instr->value();
- DCHECK(input->IsRegister());
- DCHECK(input->Equals(instr->result()));
-
- Register input_reg = ToRegister(input);
-
- if (instr->hydrogen()->value()->representation().IsSmi()) {
- __ SmiUntag(input_reg);
- } else {
- DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
-
- // Let the deferred code handle the HeapObject case.
- __ JumpIfNotSmi(input_reg, deferred->entry());
-
- // Smi to int32 conversion.
- __ SmiUntag(input_reg);
- __ bind(deferred->exit());
- }
-}
-
-
-void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
- LOperand* input = instr->value();
- DCHECK(input->IsRegister());
- LOperand* result = instr->result();
- DCHECK(result->IsDoubleRegister());
-
- Register input_reg = ToRegister(input);
- DoubleRegister result_reg = ToDoubleRegister(result);
-
- HValue* value = instr->hydrogen()->value();
- NumberUntagDMode mode = value->representation().IsSmi()
- ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
-
- EmitNumberUntagD(instr, input_reg, result_reg, mode);
-}
-
-
-void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
- Register result_reg = ToRegister(instr->result());
- Register scratch1 = scratch0();
- DoubleRegister double_input = ToDoubleRegister(instr->value());
-
- if (instr->truncating()) {
- __ TruncateDoubleToI(result_reg, double_input);
- } else {
- Register except_flag = LCodeGen::scratch1();
-
- __ EmitFPUTruncate(kRoundToMinusInf,
- result_reg,
- double_input,
- scratch1,
- double_scratch0(),
- except_flag,
- kCheckForInexactConversion);
-
- // Deopt if the operation did not succeed (except_flag != 0).
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
- Operand(zero_reg));
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label done;
- __ Branch(&done, ne, result_reg, Operand(zero_reg));
- __ Mfhc1(scratch1, double_input);
- __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
- Operand(zero_reg));
- __ bind(&done);
- }
- }
-}
-
-
-void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
- Register result_reg = ToRegister(instr->result());
- Register scratch1 = LCodeGen::scratch0();
- DoubleRegister double_input = ToDoubleRegister(instr->value());
-
- if (instr->truncating()) {
- __ TruncateDoubleToI(result_reg, double_input);
- } else {
- Register except_flag = LCodeGen::scratch1();
-
- __ EmitFPUTruncate(kRoundToMinusInf,
- result_reg,
- double_input,
- scratch1,
- double_scratch0(),
- except_flag,
- kCheckForInexactConversion);
-
- // Deopt if the operation did not succeed (except_flag != 0).
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
- Operand(zero_reg));
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label done;
- __ Branch(&done, ne, result_reg, Operand(zero_reg));
- __ Mfhc1(scratch1, double_input);
- __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
- Operand(zero_reg));
- __ bind(&done);
- }
- }
- __ SmiTagCheckOverflow(result_reg, result_reg, scratch1);
- DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, scratch1,
- Operand(zero_reg));
-}
-
-
-void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
- LOperand* input = instr->value();
- __ SmiTst(ToRegister(input), at);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, at, Operand(zero_reg));
-}
-
-
-void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- LOperand* input = instr->value();
- __ SmiTst(ToRegister(input), at);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg));
- }
-}
-
-
-void LCodeGen::DoCheckArrayBufferNotNeutered(
- LCheckArrayBufferNotNeutered* instr) {
- Register view = ToRegister(instr->view());
- Register scratch = scratch0();
-
- __ lw(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
- __ lw(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
- __ And(at, scratch, 1 << JSArrayBuffer::WasNeutered::kShift);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds, at,
- Operand(zero_reg));
-}
-
-
-void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
- Register input = ToRegister(instr->value());
- Register scratch = scratch0();
-
- __ GetObjectType(input, scratch, scratch);
-
- if (instr->hydrogen()->is_interval_check()) {
- InstanceType first;
- InstanceType last;
- instr->hydrogen()->GetCheckInterval(&first, &last);
-
- // If there is only one type in the interval check for equality.
- if (first == last) {
- DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType, scratch,
- Operand(first));
- } else {
- DeoptimizeIf(lo, instr, DeoptimizeReason::kWrongInstanceType, scratch,
- Operand(first));
- // Omit check for the last type.
- if (last != LAST_TYPE) {
- DeoptimizeIf(hi, instr, DeoptimizeReason::kWrongInstanceType, scratch,
- Operand(last));
- }
- }
- } else {
- uint8_t mask;
- uint8_t tag;
- instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
-
- if (base::bits::IsPowerOfTwo32(mask)) {
- DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
- __ And(at, scratch, mask);
- DeoptimizeIf(tag == 0 ? ne : eq, instr,
- DeoptimizeReason::kWrongInstanceType, at, Operand(zero_reg));
- } else {
- __ And(scratch, scratch, Operand(mask));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType, scratch,
- Operand(tag));
- }
- }
-}
-
-
-void LCodeGen::DoCheckValue(LCheckValue* instr) {
- Register reg = ToRegister(instr->value());
- Handle<HeapObject> object = instr->hydrogen()->object().handle();
- AllowDeferredHandleDereference smi_check;
- if (isolate()->heap()->InNewSpace(*object)) {
- Register reg = ToRegister(instr->value());
- Handle<Cell> cell = isolate()->factory()->NewCell(object);
- __ li(at, Operand(cell));
- __ lw(at, FieldMemOperand(at, Cell::kValueOffset));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch, reg, Operand(at));
- } else {
- DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch, reg,
- Operand(object));
- }
-}
-
-
-void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
- Label deopt, done;
- // If the map is not deprecated the migration attempt does not make sense.
- __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
- __ lw(scratch0(), FieldMemOperand(scratch0(), Map::kBitField3Offset));
- __ And(at, scratch0(), Operand(Map::Deprecated::kMask));
- __ Branch(&deopt, eq, at, Operand(zero_reg));
-
- {
- PushSafepointRegistersScope scope(this);
- __ push(object);
- __ mov(cp, zero_reg);
- __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(v0, scratch0());
- }
- __ SmiTst(scratch0(), at);
- __ Branch(&done, ne, at, Operand(zero_reg));
-
- __ bind(&deopt);
- // In case of "al" condition the operands are not used so just pass zero_reg
- // there.
- DeoptimizeIf(al, instr, DeoptimizeReason::kInstanceMigrationFailed, zero_reg,
- Operand(zero_reg));
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- class DeferredCheckMaps final : public LDeferredCode {
- public:
- DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
- : LDeferredCode(codegen), instr_(instr), object_(object) {
- SetExit(check_maps());
- }
- void Generate() override {
- codegen()->DoDeferredInstanceMigration(instr_, object_);
- }
- Label* check_maps() { return &check_maps_; }
- LInstruction* instr() override { return instr_; }
-
- private:
- LCheckMaps* instr_;
- Label check_maps_;
- Register object_;
- };
-
- if (instr->hydrogen()->IsStabilityCheck()) {
- const UniqueSet<Map>* maps = instr->hydrogen()->maps();
- for (int i = 0; i < maps->size(); ++i) {
- AddStabilityDependency(maps->at(i).handle());
- }
- return;
- }
-
- Register map_reg = scratch0();
- LOperand* input = instr->value();
- DCHECK(input->IsRegister());
- Register reg = ToRegister(input);
- __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
-
- DeferredCheckMaps* deferred = NULL;
- if (instr->hydrogen()->HasMigrationTarget()) {
- deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
- __ bind(deferred->check_maps());
- }
-
- const UniqueSet<Map>* maps = instr->hydrogen()->maps();
- Label success;
- for (int i = 0; i < maps->size() - 1; i++) {
- Handle<Map> map = maps->at(i).handle();
- __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
- }
- Handle<Map> map = maps->at(maps->size() - 1).handle();
- // Do the CompareMap() directly within the Branch() and DeoptimizeIf().
- if (instr->hydrogen()->HasMigrationTarget()) {
- __ Branch(deferred->entry(), ne, map_reg, Operand(map));
- } else {
- DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap, map_reg, Operand(map));
- }
-
- __ bind(&success);
-}
-
-
-void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
- DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
- Register result_reg = ToRegister(instr->result());
- DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
- __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
-}
-
-
-void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
- Register unclamped_reg = ToRegister(instr->unclamped());
- Register result_reg = ToRegister(instr->result());
- __ ClampUint8(result_reg, unclamped_reg);
-}
-
-
-void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
- Register scratch = scratch0();
- Register input_reg = ToRegister(instr->unclamped());
- Register result_reg = ToRegister(instr->result());
- DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
- Label is_smi, done, heap_number;
-
- // Both smi and heap number cases are handled.
- __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
-
- // Check for heap number
- __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
- __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
-
- // Check for undefined. Undefined is converted to zero for clamping
- // conversions.
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined, input_reg,
- Operand(factory()->undefined_value()));
- __ mov(result_reg, zero_reg);
- __ jmp(&done);
-
- // Heap number
- __ bind(&heap_number);
- __ Ldc1(double_scratch0(),
- FieldMemOperand(input_reg, HeapNumber::kValueOffset));
- __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
- __ jmp(&done);
-
- __ bind(&is_smi);
- __ ClampUint8(result_reg, scratch);
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoAllocate(LAllocate* instr) {
- class DeferredAllocate final : public LDeferredCode {
- public:
- DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override { codegen()->DoDeferredAllocate(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LAllocate* instr_;
- };
-
- DeferredAllocate* deferred =
- new(zone()) DeferredAllocate(this, instr);
-
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp1());
- Register scratch2 = ToRegister(instr->temp2());
-
- // Allocate memory for the object.
- AllocationFlags flags = NO_ALLOCATION_FLAGS;
- if (instr->hydrogen()->MustAllocateDoubleAligned()) {
- flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
- }
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- flags = static_cast<AllocationFlags>(flags | PRETENURE);
- }
-
- if (instr->hydrogen()->IsAllocationFoldingDominator()) {
- flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
- }
- DCHECK(!instr->hydrogen()->IsAllocationFolded());
-
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= kMaxRegularHeapObjectSize);
- __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
- } else {
- Register size = ToRegister(instr->size());
- __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
- }
-
- __ bind(deferred->exit());
-
- if (instr->hydrogen()->MustPrefillWithFiller()) {
- STATIC_ASSERT(kHeapObjectTag == 1);
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ li(scratch, Operand(size - kHeapObjectTag));
- } else {
- __ Subu(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
- }
- __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
- Label loop;
- __ bind(&loop);
- __ Subu(scratch, scratch, Operand(kPointerSize));
- __ Addu(at, result, Operand(scratch));
- __ sw(scratch2, MemOperand(at));
- __ Branch(&loop, ge, scratch, Operand(zero_reg));
- }
-}
-
-
-void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ mov(result, zero_reg);
-
- PushSafepointRegistersScope scope(this);
- if (instr->size()->IsRegister()) {
- Register size = ToRegister(instr->size());
- DCHECK(!size.is(result));
- __ SmiTag(size);
- __ push(size);
- } else {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- if (size >= 0 && size <= Smi::kMaxValue) {
- __ Push(Smi::FromInt(size));
- } else {
- // We should never get here at runtime => abort
- __ stop("invalid allocation size");
- return;
- }
- }
-
- int flags = AllocateDoubleAlignFlag::encode(
- instr->hydrogen()->MustAllocateDoubleAligned());
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- flags = AllocateTargetSpace::update(flags, OLD_SPACE);
- } else {
- flags = AllocateTargetSpace::update(flags, NEW_SPACE);
- }
- __ Push(Smi::FromInt(flags));
-
- CallRuntimeFromDeferred(
- Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
- __ StoreToSafepointRegisterSlot(v0, result);
-
- if (instr->hydrogen()->IsAllocationFoldingDominator()) {
- AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
- }
- // If the allocation folding dominator allocate triggered a GC, allocation
- // happend in the runtime. We have to reset the top pointer to virtually
- // undo the allocation.
- ExternalReference allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
- Register top_address = scratch0();
- __ Subu(v0, v0, Operand(kHeapObjectTag));
- __ li(top_address, Operand(allocation_top));
- __ sw(v0, MemOperand(top_address));
- __ Addu(v0, v0, Operand(kHeapObjectTag));
- }
-}
-
-void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
- DCHECK(instr->hydrogen()->IsAllocationFolded());
- DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
- Register result = ToRegister(instr->result());
- Register scratch1 = ToRegister(instr->temp1());
- Register scratch2 = ToRegister(instr->temp2());
-
- AllocationFlags flags = ALLOCATION_FOLDED;
- if (instr->hydrogen()->MustAllocateDoubleAligned()) {
- flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
- }
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- flags = static_cast<AllocationFlags>(flags | PRETENURE);
- }
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= kMaxRegularHeapObjectSize);
- __ FastAllocate(size, result, scratch1, scratch2, flags);
- } else {
- Register size = ToRegister(instr->size());
- __ FastAllocate(size, result, scratch1, scratch2, flags);
- }
-}
-
-
-void LCodeGen::DoTypeof(LTypeof* instr) {
- DCHECK(ToRegister(instr->value()).is(a3));
- DCHECK(ToRegister(instr->result()).is(v0));
- Label end, do_call;
- Register value_register = ToRegister(instr->value());
- __ JumpIfNotSmi(value_register, &do_call);
- __ li(v0, Operand(isolate()->factory()->number_string()));
- __ jmp(&end);
- __ bind(&do_call);
- Callable callable = CodeFactory::Typeof(isolate());
- CallCode(callable.code(), RelocInfo::CODE_TARGET, instr);
- __ bind(&end);
-}
-
-
-void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
- Register input = ToRegister(instr->value());
-
- Register cmp1 = no_reg;
- Operand cmp2 = Operand(no_reg);
-
- Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
- instr->FalseLabel(chunk_),
- input,
- instr->type_literal(),
- &cmp1,
- &cmp2);
-
- DCHECK(cmp1.is_valid());
- DCHECK(!cmp2.is_reg() || cmp2.rm().is_valid());
-
- if (final_branch_condition != kNoCondition) {
- EmitBranch(instr, final_branch_condition, cmp1, cmp2);
- }
-}
-
-
-Condition LCodeGen::EmitTypeofIs(Label* true_label,
- Label* false_label,
- Register input,
- Handle<String> type_name,
- Register* cmp1,
- Operand* cmp2) {
- // This function utilizes the delay slot heavily. This is used to load
- // values that are always usable without depending on the type of the input
- // register.
- Condition final_branch_condition = kNoCondition;
- Register scratch = scratch0();
- Factory* factory = isolate()->factory();
- if (String::Equals(type_name, factory->number_string())) {
- __ JumpIfSmi(input, true_label);
- __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- *cmp1 = input;
- *cmp2 = Operand(at);
- final_branch_condition = eq;
-
- } else if (String::Equals(type_name, factory->string_string())) {
- __ JumpIfSmi(input, false_label);
- __ GetObjectType(input, input, scratch);
- *cmp1 = scratch;
- *cmp2 = Operand(FIRST_NONSTRING_TYPE);
- final_branch_condition = lt;
-
- } else if (String::Equals(type_name, factory->symbol_string())) {
- __ JumpIfSmi(input, false_label);
- __ GetObjectType(input, input, scratch);
- *cmp1 = scratch;
- *cmp2 = Operand(SYMBOL_TYPE);
- final_branch_condition = eq;
-
- } else if (String::Equals(type_name, factory->boolean_string())) {
- __ LoadRoot(at, Heap::kTrueValueRootIndex);
- __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
- __ LoadRoot(at, Heap::kFalseValueRootIndex);
- *cmp1 = at;
- *cmp2 = Operand(input);
- final_branch_condition = eq;
-
- } else if (String::Equals(type_name, factory->undefined_string())) {
- __ LoadRoot(at, Heap::kNullValueRootIndex);
- __ Branch(USE_DELAY_SLOT, false_label, eq, at, Operand(input));
- // The first instruction of JumpIfSmi is an And - it is safe in the delay
- // slot.
- __ JumpIfSmi(input, false_label);
- // Check for undetectable objects => true.
- __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
- __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
- __ And(at, at, 1 << Map::kIsUndetectable);
- *cmp1 = at;
- *cmp2 = Operand(zero_reg);
- final_branch_condition = ne;
-
- } else if (String::Equals(type_name, factory->function_string())) {
- __ JumpIfSmi(input, false_label);
- __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
- __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ And(scratch, scratch,
- Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
- *cmp1 = scratch;
- *cmp2 = Operand(1 << Map::kIsCallable);
- final_branch_condition = eq;
-
- } else if (String::Equals(type_name, factory->object_string())) {
- __ JumpIfSmi(input, false_label);
- __ LoadRoot(at, Heap::kNullValueRootIndex);
- __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ GetObjectType(input, scratch, scratch1());
- __ Branch(false_label, lt, scratch1(), Operand(FIRST_JS_RECEIVER_TYPE));
- // Check for callable or undetectable objects => false.
- __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ And(at, scratch,
- Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
- *cmp1 = at;
- *cmp2 = Operand(zero_reg);
- final_branch_condition = eq;
-
- } else {
- *cmp1 = at;
- *cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion.
- __ Branch(false_label);
- }
-
- return final_branch_condition;
-}
-
-
-void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
- if (info()->ShouldEnsureSpaceForLazyDeopt()) {
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- if (current_pc < last_lazy_deopt_pc_ + space_needed) {
- int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
- while (padding_size > 0) {
- __ nop();
- padding_size -= Assembler::kInstrSize;
- }
- }
- }
- last_lazy_deopt_pc_ = masm()->pc_offset();
-}
-
-
-void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- last_lazy_deopt_pc_ = masm()->pc_offset();
- DCHECK(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- Deoptimizer::BailoutType type = instr->hydrogen()->type();
- // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
- // needed return address), even though the implementation of LAZY and EAGER is
- // now identical. When LAZY is eventually completely folded into EAGER, remove
- // the special case below.
- if (info()->IsStub() && type == Deoptimizer::EAGER) {
- type = Deoptimizer::LAZY;
- }
-
- DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type, zero_reg,
- Operand(zero_reg));
-}
-
-
-void LCodeGen::DoDummy(LDummy* instr) {
- // Nothing to see here, move on!
-}
-
-
-void LCodeGen::DoDummyUse(LDummyUse* instr) {
- // Nothing to see here, move on!
-}
-
-
-void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
- PushSafepointRegistersScope scope(this);
- LoadContextFromDeferred(instr->context());
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
- RecordSafepointWithLazyDeopt(
- instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- DCHECK(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck final : public LDeferredCode {
- public:
- DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LStackCheck* instr_;
- };
-
- DCHECK(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- // There is no LLazyBailout instruction for stack-checks. We have to
- // prepare for lazy deoptimization explicitly here.
- if (instr->hydrogen()->is_function_entry()) {
- // Perform stack overflow check.
- Label done;
- __ LoadRoot(at, Heap::kStackLimitRootIndex);
- __ Branch(&done, hs, sp, Operand(at));
- DCHECK(instr->context()->IsRegister());
- DCHECK(ToRegister(instr->context()).is(cp));
- CallCode(isolate()->builtins()->StackCheck(),
- RelocInfo::CODE_TARGET,
- instr);
- __ bind(&done);
- } else {
- DCHECK(instr->hydrogen()->is_backwards_branch());
- // Perform stack overflow check if this goto needs it before jumping.
- DeferredStackCheck* deferred_stack_check =
- new(zone()) DeferredStackCheck(this, instr);
- __ LoadRoot(at, Heap::kStackLimitRootIndex);
- __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- __ bind(instr->done_label());
- deferred_stack_check->SetExit(instr->done_label());
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- // Don't record a deoptimization index for the safepoint here.
- // This will be done explicitly when emitting call and the safepoint in
- // the deferred code.
- }
-}
-
-
-void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
- // This is a pseudo-instruction that ensures that the environment here is
- // properly registered for deoptimization and records the assembler's PC
- // offset.
- LEnvironment* environment = instr->environment();
-
- // If the environment were already registered, we would have no way of
- // backpatching it with the spill slot operands.
- DCHECK(!environment->HasBeenRegistered());
- RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
-
- GenerateOsrPrologue();
-}
-
-
-void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
- Register result = ToRegister(instr->result());
- Register object = ToRegister(instr->object());
-
- Label use_cache, call_runtime;
- DCHECK(object.is(a0));
- __ CheckEnumCache(&call_runtime);
-
- __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
- __ Branch(&use_cache);
-
- // Get the set of properties to enumerate.
- __ bind(&call_runtime);
- __ push(object);
- CallRuntime(Runtime::kForInEnumerate, instr);
- __ bind(&use_cache);
-}
-
-
-void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
- Register map = ToRegister(instr->map());
- Register result = ToRegister(instr->result());
- Label load_cache, done;
- __ EnumLength(result, map);
- __ Branch(&load_cache, ne, result, Operand(Smi::kZero));
- __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
- __ jmp(&done);
-
- __ bind(&load_cache);
- __ LoadInstanceDescriptors(map, result);
- __ lw(result,
- FieldMemOperand(result, DescriptorArray::kEnumCacheBridgeOffset));
- __ lw(result,
- FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
- DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache, result,
- Operand(zero_reg));
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
- Register object = ToRegister(instr->value());
- Register map = ToRegister(instr->map());
- __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap, map,
- Operand(scratch0()));
-}
-
-
-void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
- Register result,
- Register object,
- Register index) {
- PushSafepointRegistersScope scope(this);
- __ Push(object, index);
- __ mov(cp, zero_reg);
- __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(v0, result);
-}
-
-
-void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
- class DeferredLoadMutableDouble final : public LDeferredCode {
- public:
- DeferredLoadMutableDouble(LCodeGen* codegen,
- LLoadFieldByIndex* instr,
- Register result,
- Register object,
- Register index)
- : LDeferredCode(codegen),
- instr_(instr),
- result_(result),
- object_(object),
- index_(index) {
- }
- void Generate() override {
- codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LLoadFieldByIndex* instr_;
- Register result_;
- Register object_;
- Register index_;
- };
-
- Register object = ToRegister(instr->object());
- Register index = ToRegister(instr->index());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- DeferredLoadMutableDouble* deferred;
- deferred = new(zone()) DeferredLoadMutableDouble(
- this, instr, result, object, index);
-
- Label out_of_object, done;
-
- __ And(scratch, index, Operand(Smi::FromInt(1)));
- __ Branch(deferred->entry(), ne, scratch, Operand(zero_reg));
- __ sra(index, index, 1);
-
- __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
- __ sll(scratch, index, kPointerSizeLog2 - kSmiTagSize); // In delay slot.
-
- STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
- __ Addu(scratch, object, scratch);
- __ lw(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
-
- __ Branch(&done);
-
- __ bind(&out_of_object);
- __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- // Index is equal to negated out of object property index plus 1.
- __ Subu(scratch, result, scratch);
- __ lw(result, FieldMemOperand(scratch,
- FixedArray::kHeaderSize - kPointerSize));
- __ bind(deferred->exit());
- __ bind(&done);
-}
-
-#undef __
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.h b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.h
deleted file mode 100644
index 7d471ebbb8..0000000000
--- a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.h
+++ /dev/null
@@ -1,405 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_MIPS_LITHIUM_CODEGEN_MIPS_H_
-#define V8_CRANKSHAFT_MIPS_LITHIUM_CODEGEN_MIPS_H_
-
-#include "src/ast/scopes.h"
-#include "src/crankshaft/lithium-codegen.h"
-#include "src/crankshaft/mips/lithium-gap-resolver-mips.h"
-#include "src/crankshaft/mips/lithium-mips.h"
-#include "src/deoptimizer.h"
-#include "src/safepoint-table.h"
-#include "src/utils.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LDeferredCode;
-class SafepointGenerator;
-
-class LCodeGen: public LCodeGenBase {
- public:
- LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : LCodeGenBase(chunk, assembler, info),
- jump_table_(4, info->zone()),
- scope_(info->scope()),
- deferred_(8, info->zone()),
- frame_is_built_(false),
- safepoints_(info->zone()),
- resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple) {
- PopulateDeoptimizationLiteralsWithInlinedFunctions();
- }
-
-
- int LookupDestination(int block_id) const {
- return chunk()->LookupDestination(block_id);
- }
-
- bool IsNextEmittedBlock(int block_id) const {
- return LookupDestination(block_id) == GetNextEmittedBlock();
- }
-
- bool NeedsEagerFrame() const {
- return HasAllocatedStackSlots() || info()->is_non_deferred_calling() ||
- !info()->IsStub() || info()->requires_frame();
- }
- bool NeedsDeferredFrame() const {
- return !NeedsEagerFrame() && info()->is_deferred_calling();
- }
-
- RAStatus GetRAState() const {
- return frame_is_built_ ? kRAHasBeenSaved : kRAHasNotBeenSaved;
- }
-
- // Support for converting LOperands to assembler types.
- // LOperand must be a register.
- Register ToRegister(LOperand* op) const;
-
- // LOperand is loaded into scratch, unless already a register.
- Register EmitLoadRegister(LOperand* op, Register scratch);
-
- // LOperand must be a double register.
- DoubleRegister ToDoubleRegister(LOperand* op) const;
-
- // LOperand is loaded into dbl_scratch, unless already a double register.
- DoubleRegister EmitLoadDoubleRegister(LOperand* op,
- FloatRegister flt_scratch,
- DoubleRegister dbl_scratch);
- int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
- int32_t ToInteger32(LConstantOperand* op) const;
- Smi* ToSmi(LConstantOperand* op) const;
- double ToDouble(LConstantOperand* op) const;
- Operand ToOperand(LOperand* op);
- MemOperand ToMemOperand(LOperand* op) const;
- // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
- MemOperand ToHighMemOperand(LOperand* op) const;
-
- bool IsInteger32(LConstantOperand* op) const;
- bool IsSmi(LConstantOperand* op) const;
- Handle<Object> ToHandle(LConstantOperand* op) const;
-
- // Try to generate code for the entire chunk, but it may fail if the
- // chunk contains constructs we cannot handle. Returns true if the
- // code generation attempt succeeded.
- bool GenerateCode();
-
- // Finish the code by setting stack height, safepoint, and bailout
- // information on it.
- void FinishCode(Handle<Code> code);
-
- void DoDeferredNumberTagD(LNumberTagD* instr);
-
- enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
- void DoDeferredNumberTagIU(LInstruction* instr,
- LOperand* value,
- LOperand* temp1,
- LOperand* temp2,
- IntegerSignedness signedness);
-
- void DoDeferredTaggedToI(LTaggedToI* instr);
- void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
- void DoDeferredStackCheck(LStackCheck* instr);
- void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
- void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
- void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredAllocate(LAllocate* instr);
- void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
- void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
- Register result,
- Register object,
- Register index);
-
- // Parallel move support.
- void DoParallelMove(LParallelMove* move);
- void DoGap(LGap* instr);
-
- MemOperand PrepareKeyedOperand(Register key,
- Register base,
- bool key_is_constant,
- int constant_key,
- int element_size,
- int shift_size,
- int base_offset);
-
- // Emit frame translation commands for an environment.
- void WriteTranslation(LEnvironment* environment, Translation* translation);
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) void Do##type(L##type* node);
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- private:
- Scope* scope() const { return scope_; }
-
- Register scratch0() { return kLithiumScratchReg; }
- Register scratch1() { return kLithiumScratchReg2; }
- DoubleRegister double_scratch0() { return kLithiumScratchDouble; }
-
- LInstruction* GetNextInstruction();
-
- void EmitClassOfTest(Label* if_true, Label* if_false,
- Handle<String> class_name, Register input,
- Register temporary, Register temporary2);
-
- bool HasAllocatedStackSlots() const {
- return chunk()->HasAllocatedStackSlots();
- }
- int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); }
- int GetTotalFrameSlotCount() const {
- return chunk()->GetTotalFrameSlotCount();
- }
-
- void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
-
- void SaveCallerDoubles();
- void RestoreCallerDoubles();
-
- // Code generation passes. Returns true if code generation should
- // continue.
- void GenerateBodyInstructionPre(LInstruction* instr) override;
- bool GeneratePrologue();
- bool GenerateDeferredCode();
- bool GenerateJumpTable();
- bool GenerateSafepointTable();
-
- // Generates the custom OSR entrypoint and sets the osr_pc_offset.
- void GenerateOsrPrologue();
-
- enum SafepointMode {
- RECORD_SIMPLE_SAFEPOINT,
- RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
- };
-
- void CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr);
-
- void CallCodeGeneric(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode);
-
- void CallRuntime(const Runtime::Function* function,
- int num_arguments,
- LInstruction* instr,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
-
- void CallRuntime(Runtime::FunctionId id,
- int num_arguments,
- LInstruction* instr) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, num_arguments, instr);
- }
-
- void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, function->nargs, instr);
- }
-
- void LoadContextFromDeferred(LOperand* context);
- void CallRuntimeFromDeferred(Runtime::FunctionId id,
- int argc,
- LInstruction* instr,
- LOperand* context);
-
- void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
- Register scratch2, Register scratch3);
-
- // Generate a direct call to a known function. Expects the function
- // to be in a1.
- void CallKnownFunction(Handle<JSFunction> function,
- int formal_parameter_count, int arity,
- bool is_tail_call, LInstruction* instr);
-
- void RecordSafepointWithLazyDeopt(LInstruction* instr,
- SafepointMode safepoint_mode);
-
- void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
- Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition condition, LInstruction* instr,
- DeoptimizeReason deopt_reason,
- Deoptimizer::BailoutType bailout_type,
- Register src1 = zero_reg,
- const Operand& src2 = Operand(zero_reg));
- void DeoptimizeIf(Condition condition, LInstruction* instr,
- DeoptimizeReason deopt_reason = DeoptimizeReason::kNoReason,
- Register src1 = zero_reg,
- const Operand& src2 = Operand(zero_reg));
-
- void AddToTranslation(LEnvironment* environment,
- Translation* translation,
- LOperand* op,
- bool is_tagged,
- bool is_uint32,
- int* object_index_pointer,
- int* dematerialized_index_pointer);
-
- Register ToRegister(int index) const;
- DoubleRegister ToDoubleRegister(int index) const;
-
- MemOperand BuildSeqStringOperand(Register string,
- LOperand* index,
- String::Encoding encoding);
-
- void EmitIntegerMathAbs(LMathAbs* instr);
-
- // Support for recording safepoint information.
- void RecordSafepoint(LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- Safepoint::DeoptMode mode);
- void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
- void RecordSafepoint(Safepoint::DeoptMode mode);
- void RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode mode);
-
- static Condition TokenToCondition(Token::Value op, bool is_unsigned);
- void EmitGoto(int block);
-
- // EmitBranch expects to be the last instruction of a block.
- template<class InstrType>
- void EmitBranch(InstrType instr,
- Condition condition,
- Register src1,
- const Operand& src2);
- template<class InstrType>
- void EmitBranchF(InstrType instr,
- Condition condition,
- FPURegister src1,
- FPURegister src2);
- template <class InstrType>
- void EmitTrueBranch(InstrType instr, Condition condition, Register src1,
- const Operand& src2);
- template <class InstrType>
- void EmitFalseBranch(InstrType instr, Condition condition, Register src1,
- const Operand& src2);
- template<class InstrType>
- void EmitFalseBranchF(InstrType instr,
- Condition condition,
- FPURegister src1,
- FPURegister src2);
- void EmitCmpI(LOperand* left, LOperand* right);
- void EmitNumberUntagD(LNumberUntagD* instr, Register input,
- DoubleRegister result, NumberUntagDMode mode);
-
- // Emits optimized code for typeof x == "y". Modifies input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- // Returns two registers in cmp1 and cmp2 that can be used in the
- // Branch instruction after EmitTypeofIs.
- Condition EmitTypeofIs(Label* true_label,
- Label* false_label,
- Register input,
- Handle<String> type_name,
- Register* cmp1,
- Operand* cmp2);
-
- // Emits optimized code for %_IsString(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsString(Register input,
- Register temp1,
- Label* is_not_string,
- SmiCheck check_needed);
-
- // Emits optimized code to deep-copy the contents of statically known
- // object graphs (e.g. object literal boilerplate).
- void EmitDeepCopy(Handle<JSObject> object,
- Register result,
- Register source,
- int* offset,
- AllocationSiteMode mode);
- // Emit optimized code for integer division.
- // Inputs are signed.
- // All registers are clobbered.
- // If 'remainder' is no_reg, it is not computed.
- void EmitSignedIntegerDivisionByConstant(Register result,
- Register dividend,
- int32_t divisor,
- Register remainder,
- Register scratch,
- LEnvironment* environment);
-
-
- void EnsureSpaceForLazyDeopt(int space_needed) override;
- void DoLoadKeyedExternalArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedArray(LLoadKeyed* instr);
- void DoStoreKeyedExternalArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedArray(LStoreKeyed* instr);
-
- template <class T>
- void EmitVectorLoadICRegisters(T* instr);
-
- ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
- Scope* const scope_;
- ZoneList<LDeferredCode*> deferred_;
- bool frame_is_built_;
-
- // Builder that keeps track of safepoints in the code. The table
- // itself is emitted at the end of the generated code.
- SafepointTableBuilder safepoints_;
-
- // Compiler from a set of parallel moves to a sequential list of moves.
- LGapResolver resolver_;
-
- Safepoint::Kind expected_safepoint_kind_;
-
- class PushSafepointRegistersScope final BASE_EMBEDDED {
- public:
- explicit PushSafepointRegistersScope(LCodeGen* codegen);
-
- ~PushSafepointRegistersScope();
-
- private:
- LCodeGen* codegen_;
- };
-
- friend class LDeferredCode;
- friend class LEnvironment;
- friend class SafepointGenerator;
- DISALLOW_COPY_AND_ASSIGN(LCodeGen);
-};
-
-
-class LDeferredCode : public ZoneObject {
- public:
- explicit LDeferredCode(LCodeGen* codegen)
- : codegen_(codegen),
- external_exit_(NULL),
- instruction_index_(codegen->current_instruction_) {
- codegen->AddDeferredCode(this);
- }
-
- virtual ~LDeferredCode() {}
- virtual void Generate() = 0;
- virtual LInstruction* instr() = 0;
-
- void SetExit(Label* exit) { external_exit_ = exit; }
- Label* entry() { return &entry_; }
- Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
- int instruction_index() const { return instruction_index_; }
-
- protected:
- LCodeGen* codegen() const { return codegen_; }
- MacroAssembler* masm() const { return codegen_->masm(); }
-
- private:
- LCodeGen* codegen_;
- Label entry_;
- Label exit_;
- Label* external_exit_;
- int instruction_index_;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_MIPS_LITHIUM_CODEGEN_MIPS_H_
diff --git a/deps/v8/src/crankshaft/mips/lithium-gap-resolver-mips.cc b/deps/v8/src/crankshaft/mips/lithium-gap-resolver-mips.cc
deleted file mode 100644
index 12e1ae77e9..0000000000
--- a/deps/v8/src/crankshaft/mips/lithium-gap-resolver-mips.cc
+++ /dev/null
@@ -1,298 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/mips/lithium-gap-resolver-mips.h"
-
-#include "src/crankshaft/mips/lithium-codegen-mips.h"
-
-namespace v8 {
-namespace internal {
-
-LGapResolver::LGapResolver(LCodeGen* owner)
- : cgen_(owner),
- moves_(32, owner->zone()),
- root_index_(0),
- in_cycle_(false),
- saved_destination_(NULL) {}
-
-
-void LGapResolver::Resolve(LParallelMove* parallel_move) {
- DCHECK(moves_.is_empty());
- // Build up a worklist of moves.
- BuildInitialMoveList(parallel_move);
-
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands move = moves_[i];
- // Skip constants to perform them last. They don't block other moves
- // and skipping such moves with register destinations keeps those
- // registers free for the whole algorithm.
- if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
- root_index_ = i; // Any cycle is found when by reaching this move again.
- PerformMove(i);
- if (in_cycle_) {
- RestoreValue();
- }
- }
- }
-
- // Perform the moves with constant sources.
- for (int i = 0; i < moves_.length(); ++i) {
- if (!moves_[i].IsEliminated()) {
- DCHECK(moves_[i].source()->IsConstantOperand());
- EmitMove(i);
- }
- }
-
- moves_.Rewind(0);
-}
-
-
-void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
- // Perform a linear sweep of the moves to add them to the initial list of
- // moves to perform, ignoring any move that is redundant (the source is
- // the same as the destination, the destination is ignored and
- // unallocated, or the move was already eliminated).
- const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
- for (int i = 0; i < moves->length(); ++i) {
- LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
- }
- Verify();
-}
-
-
-void LGapResolver::PerformMove(int index) {
- // Each call to this function performs a move and deletes it from the move
- // graph. We first recursively perform any move blocking this one. We
- // mark a move as "pending" on entry to PerformMove in order to detect
- // cycles in the move graph.
-
- // We can only find a cycle, when doing a depth-first traversal of moves,
- // be encountering the starting move again. So by spilling the source of
- // the starting move, we break the cycle. All moves are then unblocked,
- // and the starting move is completed by writing the spilled value to
- // its destination. All other moves from the spilled source have been
- // completed prior to breaking the cycle.
- // An additional complication is that moves to MemOperands with large
- // offsets (more than 1K or 4K) require us to spill this spilled value to
- // the stack, to free up the register.
- DCHECK(!moves_[index].IsPending());
- DCHECK(!moves_[index].IsRedundant());
-
- // Clear this move's destination to indicate a pending move. The actual
- // destination is saved in a stack allocated local. Multiple moves can
- // be pending because this function is recursive.
- DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated.
- LOperand* destination = moves_[index].destination();
- moves_[index].set_destination(NULL);
-
- // Perform a depth-first traversal of the move graph to resolve
- // dependencies. Any unperformed, unpending move with a source the same
- // as this one's destination blocks this one so recursively perform all
- // such moves.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(destination) && !other_move.IsPending()) {
- PerformMove(i);
- // If there is a blocking, pending move it must be moves_[root_index_]
- // and all other moves with the same source as moves_[root_index_] are
- // sucessfully executed (because they are cycle-free) by this loop.
- }
- }
-
- // We are about to resolve this move and don't need it marked as
- // pending, so restore its destination.
- moves_[index].set_destination(destination);
-
- // The move may be blocked on a pending move, which must be the starting move.
- // In this case, we have a cycle, and we save the source of this move to
- // a scratch register to break it.
- LMoveOperands other_move = moves_[root_index_];
- if (other_move.Blocks(destination)) {
- DCHECK(other_move.IsPending());
- BreakCycle(index);
- return;
- }
-
- // This move is no longer blocked.
- EmitMove(index);
-}
-
-
-void LGapResolver::Verify() {
-#ifdef ENABLE_SLOW_DCHECKS
- // No operand should be the destination for more than one move.
- for (int i = 0; i < moves_.length(); ++i) {
- LOperand* destination = moves_[i].destination();
- for (int j = i + 1; j < moves_.length(); ++j) {
- SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
- }
- }
-#endif
-}
-
-#define __ ACCESS_MASM(cgen_->masm())
-
-void LGapResolver::BreakCycle(int index) {
- // We save in a register the value that should end up in the source of
- // moves_[root_index]. After performing all moves in the tree rooted
- // in that move, we save the value to that source.
- DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source()));
- DCHECK(!in_cycle_);
- in_cycle_ = true;
- LOperand* source = moves_[index].source();
- saved_destination_ = moves_[index].destination();
- if (source->IsRegister()) {
- __ mov(kLithiumScratchReg, cgen_->ToRegister(source));
- } else if (source->IsStackSlot()) {
- __ lw(kLithiumScratchReg, cgen_->ToMemOperand(source));
- } else if (source->IsDoubleRegister()) {
- __ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source));
- } else if (source->IsDoubleStackSlot()) {
- __ Ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source));
- } else {
- UNREACHABLE();
- }
- // This move will be done by restoring the saved value to the destination.
- moves_[index].Eliminate();
-}
-
-
-void LGapResolver::RestoreValue() {
- DCHECK(in_cycle_);
- DCHECK(saved_destination_ != NULL);
-
- // Spilled value is in kLithiumScratchReg or kLithiumScratchDouble.
- if (saved_destination_->IsRegister()) {
- __ mov(cgen_->ToRegister(saved_destination_), kLithiumScratchReg);
- } else if (saved_destination_->IsStackSlot()) {
- __ sw(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_));
- } else if (saved_destination_->IsDoubleRegister()) {
- __ mov_d(cgen_->ToDoubleRegister(saved_destination_),
- kLithiumScratchDouble);
- } else if (saved_destination_->IsDoubleStackSlot()) {
- __ Sdc1(kLithiumScratchDouble, cgen_->ToMemOperand(saved_destination_));
- } else {
- UNREACHABLE();
- }
-
- in_cycle_ = false;
- saved_destination_ = NULL;
-}
-
-
-void LGapResolver::EmitMove(int index) {
- LOperand* source = moves_[index].source();
- LOperand* destination = moves_[index].destination();
-
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
-
- if (source->IsRegister()) {
- Register source_register = cgen_->ToRegister(source);
- if (destination->IsRegister()) {
- __ mov(cgen_->ToRegister(destination), source_register);
- } else {
- DCHECK(destination->IsStackSlot());
- __ sw(source_register, cgen_->ToMemOperand(destination));
- }
- } else if (source->IsStackSlot()) {
- MemOperand source_operand = cgen_->ToMemOperand(source);
- if (destination->IsRegister()) {
- __ lw(cgen_->ToRegister(destination), source_operand);
- } else {
- DCHECK(destination->IsStackSlot());
- MemOperand destination_operand = cgen_->ToMemOperand(destination);
- if (in_cycle_) {
- if (!destination_operand.OffsetIsInt16Encodable()) {
- // 'at' is overwritten while saving the value to the destination.
- // Therefore we can't use 'at'. It is OK if the read from the source
- // destroys 'at', since that happens before the value is read.
- // This uses only a single reg of the double reg-pair.
- __ lwc1(kLithiumScratchDouble, source_operand);
- __ swc1(kLithiumScratchDouble, destination_operand);
- } else {
- __ lw(at, source_operand);
- __ sw(at, destination_operand);
- }
- } else {
- __ lw(kLithiumScratchReg, source_operand);
- __ sw(kLithiumScratchReg, destination_operand);
- }
- }
-
- } else if (source->IsConstantOperand()) {
- LConstantOperand* constant_source = LConstantOperand::cast(source);
- if (destination->IsRegister()) {
- Register dst = cgen_->ToRegister(destination);
- Representation r = cgen_->IsSmi(constant_source)
- ? Representation::Smi() : Representation::Integer32();
- if (cgen_->IsInteger32(constant_source)) {
- __ li(dst, Operand(cgen_->ToRepresentation(constant_source, r)));
- } else {
- __ li(dst, cgen_->ToHandle(constant_source));
- }
- } else if (destination->IsDoubleRegister()) {
- DoubleRegister result = cgen_->ToDoubleRegister(destination);
- double v = cgen_->ToDouble(constant_source);
- __ Move(result, v);
- } else {
- DCHECK(destination->IsStackSlot());
- DCHECK(!in_cycle_); // Constant moves happen after all cycles are gone.
- Representation r = cgen_->IsSmi(constant_source)
- ? Representation::Smi() : Representation::Integer32();
- if (cgen_->IsInteger32(constant_source)) {
- __ li(kLithiumScratchReg,
- Operand(cgen_->ToRepresentation(constant_source, r)));
- } else {
- __ li(kLithiumScratchReg, cgen_->ToHandle(constant_source));
- }
- __ sw(kLithiumScratchReg, cgen_->ToMemOperand(destination));
- }
-
- } else if (source->IsDoubleRegister()) {
- DoubleRegister source_register = cgen_->ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
- __ mov_d(cgen_->ToDoubleRegister(destination), source_register);
- } else {
- DCHECK(destination->IsDoubleStackSlot());
- MemOperand destination_operand = cgen_->ToMemOperand(destination);
- __ Sdc1(source_register, destination_operand);
- }
-
- } else if (source->IsDoubleStackSlot()) {
- MemOperand source_operand = cgen_->ToMemOperand(source);
- if (destination->IsDoubleRegister()) {
- __ Ldc1(cgen_->ToDoubleRegister(destination), source_operand);
- } else {
- DCHECK(destination->IsDoubleStackSlot());
- MemOperand destination_operand = cgen_->ToMemOperand(destination);
- if (in_cycle_) {
- // kLithiumScratchDouble was used to break the cycle,
- // but kLithiumScratchReg is free.
- MemOperand source_high_operand =
- cgen_->ToHighMemOperand(source);
- MemOperand destination_high_operand =
- cgen_->ToHighMemOperand(destination);
- __ lw(kLithiumScratchReg, source_operand);
- __ sw(kLithiumScratchReg, destination_operand);
- __ lw(kLithiumScratchReg, source_high_operand);
- __ sw(kLithiumScratchReg, destination_high_operand);
- } else {
- __ Ldc1(kLithiumScratchDouble, source_operand);
- __ Sdc1(kLithiumScratchDouble, destination_operand);
- }
- }
- } else {
- UNREACHABLE();
- }
-
- moves_[index].Eliminate();
-}
-
-
-#undef __
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/mips/lithium-gap-resolver-mips.h b/deps/v8/src/crankshaft/mips/lithium-gap-resolver-mips.h
deleted file mode 100644
index 6c5fd037a3..0000000000
--- a/deps/v8/src/crankshaft/mips/lithium-gap-resolver-mips.h
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
-#define V8_CRANKSHAFT_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
-
-#include "src/crankshaft/lithium.h"
-
-namespace v8 {
-namespace internal {
-
-class LCodeGen;
-class LGapResolver;
-
-class LGapResolver final BASE_EMBEDDED {
- public:
- explicit LGapResolver(LCodeGen* owner);
-
- // Resolve a set of parallel moves, emitting assembler instructions.
- void Resolve(LParallelMove* parallel_move);
-
- private:
- // Build the initial list of moves.
- void BuildInitialMoveList(LParallelMove* parallel_move);
-
- // Perform the move at the moves_ index in question (possibly requiring
- // other moves to satisfy dependencies).
- void PerformMove(int index);
-
- // If a cycle is found in the series of moves, save the blocking value to
- // a scratch register. The cycle must be found by hitting the root of the
- // depth-first search.
- void BreakCycle(int index);
-
- // After a cycle has been resolved, restore the value from the scratch
- // register to its proper destination.
- void RestoreValue();
-
- // Emit a move and remove it from the move graph.
- void EmitMove(int index);
-
- // Verify the move list before performing moves.
- void Verify();
-
- LCodeGen* cgen_;
-
- // List of moves not yet resolved.
- ZoneList<LMoveOperands> moves_;
-
- int root_index_;
- bool in_cycle_;
- LOperand* saved_destination_;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
diff --git a/deps/v8/src/crankshaft/mips/lithium-mips.cc b/deps/v8/src/crankshaft/mips/lithium-mips.cc
deleted file mode 100644
index da6e78de16..0000000000
--- a/deps/v8/src/crankshaft/mips/lithium-mips.cc
+++ /dev/null
@@ -1,2345 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/mips/lithium-mips.h"
-
-#include <sstream>
-
-#if V8_TARGET_ARCH_MIPS
-
-#include "src/crankshaft/hydrogen-osr.h"
-#include "src/crankshaft/lithium-inl.h"
-#include "src/crankshaft/mips/lithium-codegen-mips.h"
-
-namespace v8 {
-namespace internal {
-
-#define DEFINE_COMPILE(type) \
- void L##type::CompileToNative(LCodeGen* generator) { \
- generator->Do##type(this); \
- }
-LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
-#undef DEFINE_COMPILE
-
-#ifdef DEBUG
-void LInstruction::VerifyCall() {
- // Call instructions can use only fixed registers as temporaries and
- // outputs because all registers are blocked by the calling convention.
- // Inputs operands must use a fixed register or use-at-start policy or
- // a non-register policy.
- DCHECK(Output() == NULL ||
- LUnallocated::cast(Output())->HasFixedPolicy() ||
- !LUnallocated::cast(Output())->HasRegisterPolicy());
- for (UseIterator it(this); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- DCHECK(operand->HasFixedPolicy() ||
- operand->IsUsedAtStart());
- }
- for (TempIterator it(this); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
- }
-}
-#endif
-
-
-void LInstruction::PrintTo(StringStream* stream) {
- stream->Add("%s ", this->Mnemonic());
-
- PrintOutputOperandTo(stream);
-
- PrintDataTo(stream);
-
- if (HasEnvironment()) {
- stream->Add(" ");
- environment()->PrintTo(stream);
- }
-
- if (HasPointerMap()) {
- stream->Add(" ");
- pointer_map()->PrintTo(stream);
- }
-}
-
-
-void LInstruction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- for (int i = 0; i < InputCount(); i++) {
- if (i > 0) stream->Add(" ");
- if (InputAt(i) == NULL) {
- stream->Add("NULL");
- } else {
- InputAt(i)->PrintTo(stream);
- }
- }
-}
-
-
-void LInstruction::PrintOutputOperandTo(StringStream* stream) {
- if (HasResult()) result()->PrintTo(stream);
-}
-
-
-void LLabel::PrintDataTo(StringStream* stream) {
- LGap::PrintDataTo(stream);
- LLabel* rep = replacement();
- if (rep != NULL) {
- stream->Add(" Dead block replaced with B%d", rep->block_id());
- }
-}
-
-
-bool LGap::IsRedundant() const {
- for (int i = 0; i < 4; i++) {
- if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
- return false;
- }
- }
-
- return true;
-}
-
-
-void LGap::PrintDataTo(StringStream* stream) {
- for (int i = 0; i < 4; i++) {
- stream->Add("(");
- if (parallel_moves_[i] != NULL) {
- parallel_moves_[i]->PrintDataTo(stream);
- }
- stream->Add(") ");
- }
-}
-
-
-const char* LArithmeticD::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-d";
- case Token::SUB: return "sub-d";
- case Token::MUL: return "mul-d";
- case Token::DIV: return "div-d";
- case Token::MOD: return "mod-d";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-const char* LArithmeticT::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-t";
- case Token::SUB: return "sub-t";
- case Token::MUL: return "mul-t";
- case Token::MOD: return "mod-t";
- case Token::DIV: return "div-t";
- case Token::BIT_AND: return "bit-and-t";
- case Token::BIT_OR: return "bit-or-t";
- case Token::BIT_XOR: return "bit-xor-t";
- case Token::ROR: return "ror-t";
- case Token::SHL: return "sll-t";
- case Token::SAR: return "sra-t";
- case Token::SHR: return "srl-t";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-bool LGoto::HasInterestingComment(LCodeGen* gen) const {
- return !gen->IsNextEmittedBlock(block_id());
-}
-
-
-void LGoto::PrintDataTo(StringStream* stream) {
- stream->Add("B%d", block_id());
-}
-
-
-void LBranch::PrintDataTo(StringStream* stream) {
- stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
- value()->PrintTo(stream);
-}
-
-
-LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
- return new(zone()) LDebugBreak();
-}
-
-
-void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- left()->PrintTo(stream);
- stream->Add(" %s ", Token::String(op()));
- right()->PrintTo(stream);
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_string(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_smi(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_undetectable(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if string_compare(");
- left()->PrintTo(stream);
- right()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_instance_type(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if class_of_test(");
- value()->PrintTo(stream);
- stream->Add(", \"%o\") then B%d else B%d", *hydrogen()->class_name(),
- true_block_id(), false_block_id());
-}
-
-void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if typeof ");
- value()->PrintTo(stream);
- stream->Add(" == \"%s\" then B%d else B%d",
- hydrogen()->type_literal()->ToCString().get(),
- true_block_id(), false_block_id());
-}
-
-
-void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
- stream->Add(" = ");
- function()->PrintTo(stream);
- stream->Add(".code_entry = ");
- code_object()->PrintTo(stream);
-}
-
-
-void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
- stream->Add(" = ");
- base_object()->PrintTo(stream);
- stream->Add(" + ");
- offset()->PrintTo(stream);
-}
-
-
-void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
- for (int i = 0; i < InputCount(); i++) {
- InputAt(i)->PrintTo(stream);
- stream->Add(" ");
- }
- stream->Add("#%d / ", arity());
-}
-
-
-void LLoadContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add("[%d]", slot_index());
-}
-
-
-void LStoreContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add("[%d] <- ", slot_index());
- value()->PrintTo(stream);
-}
-
-
-void LInvokeFunction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- function()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
-void LCallNewArray::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
- ElementsKind kind = hydrogen()->elements_kind();
- stream->Add(" (%s) ", ElementsKindToString(kind));
-}
-
-
-void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
- arguments()->PrintTo(stream);
- stream->Add(" length ");
- length()->PrintTo(stream);
- stream->Add(" index ");
- index()->PrintTo(stream);
-}
-
-
-void LStoreNamedField::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- std::ostringstream os;
- os << hydrogen()->access() << " <- ";
- stream->Add(os.str().c_str());
- value()->PrintTo(stream);
-}
-
-
-void LLoadKeyed::PrintDataTo(StringStream* stream) {
- elements()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d]", base_offset());
- } else {
- stream->Add("]");
- }
-}
-
-
-void LStoreKeyed::PrintDataTo(StringStream* stream) {
- elements()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d] <-", base_offset());
- } else {
- stream->Add("] <- ");
- }
-
- if (value() == NULL) {
- DCHECK(hydrogen()->IsConstantHoleStore() &&
- hydrogen()->value()->representation().IsDouble());
- stream->Add("<the hole(nan)>");
- } else {
- value()->PrintTo(stream);
- }
-}
-
-
-void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(" %p -> %p", *original_map(), *transitioned_map());
-}
-
-
-int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
- // Skip a slot if for a double-width slot.
- if (kind == DOUBLE_REGISTERS) current_frame_slots_++;
- return current_frame_slots_++;
-}
-
-
-LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
- int index = GetNextSpillIndex(kind);
- if (kind == DOUBLE_REGISTERS) {
- return LDoubleStackSlot::Create(index, zone());
- } else {
- DCHECK(kind == GENERAL_REGISTERS);
- return LStackSlot::Create(index, zone());
- }
-}
-
-
-LPlatformChunk* LChunkBuilder::Build() {
- DCHECK(is_unused());
- chunk_ = new(zone()) LPlatformChunk(info(), graph());
- LPhase phase("L_Building chunk", chunk_);
- status_ = BUILDING;
-
- // If compiling for OSR, reserve space for the unoptimized frame,
- // which will be subsumed into this frame.
- if (graph()->has_osr()) {
- for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
- chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
- }
- }
-
- const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
- for (int i = 0; i < blocks->length(); i++) {
- HBasicBlock* next = NULL;
- if (i < blocks->length() - 1) next = blocks->at(i + 1);
- DoBasicBlock(blocks->at(i), next);
- if (is_aborted()) return NULL;
- }
- status_ = DONE;
- return chunk_;
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
- return new (zone())
- LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
-}
-
-
-LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
- return Use(value, ToUnallocated(fixed_register));
-}
-
-
-LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) {
- return Use(value, ToUnallocated(reg));
-}
-
-
-LOperand* LChunkBuilder::UseRegister(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
- return Use(value,
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
- LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
-}
-
-
-LOperand* LChunkBuilder::UseAtStart(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
- LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value);
-}
-
-
-LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegister(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegisterAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseConstant(HValue* value) {
- return chunk_->DefineConstantOperand(HConstant::cast(value));
-}
-
-
-LOperand* LChunkBuilder::UseAny(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
- if (value->EmitAtUses()) {
- HInstruction* instr = HInstruction::cast(value);
- VisitInstruction(instr);
- }
- operand->set_virtual_register(value->id());
- return operand;
-}
-
-
-LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
- LUnallocated* result) {
- result->set_virtual_register(current_instruction_->id());
- instr->set_result(result);
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::DefineAsRegister(
- LTemplateResultInstruction<1>* instr) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-LInstruction* LChunkBuilder::DefineAsSpilled(
- LTemplateResultInstruction<1>* instr, int index) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
-}
-
-
-LInstruction* LChunkBuilder::DefineSameAsFirst(
- LTemplateResultInstruction<1>* instr) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
-}
-
-
-LInstruction* LChunkBuilder::DefineFixed(
- LTemplateResultInstruction<1>* instr, Register reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-LInstruction* LChunkBuilder::DefineFixedDouble(
- LTemplateResultInstruction<1>* instr, DoubleRegister reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
- HEnvironment* hydrogen_env = current_block_->last_environment();
- return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
-}
-
-
-LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize) {
- info()->MarkAsNonDeferredCalling();
-#ifdef DEBUG
- instr->VerifyCall();
-#endif
- instr->MarkAsCall();
- instr = AssignPointerMap(instr);
-
- // If instruction does not have side-effects lazy deoptimization
- // after the call will try to deoptimize to the point before the call.
- // Thus we still need to attach environment to this call even if
- // call sequence can not deoptimize eagerly.
- bool needs_environment =
- (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
- !hinstr->HasObservableSideEffects();
- if (needs_environment && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- // We can't really figure out if the environment is needed or not.
- instr->environment()->set_has_been_used();
- }
-
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
- DCHECK(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(zone()));
- return instr;
-}
-
-
-LUnallocated* LChunkBuilder::TempRegister() {
- LUnallocated* operand =
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
- int vreg = allocator_->GetVirtualRegister();
- if (!allocator_->AllocationOk()) {
- Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
- vreg = 0;
- }
- operand->set_virtual_register(vreg);
- return operand;
-}
-
-
-LUnallocated* LChunkBuilder::TempDoubleRegister() {
- LUnallocated* operand =
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_DOUBLE_REGISTER);
- int vreg = allocator_->GetVirtualRegister();
- if (!allocator_->AllocationOk()) {
- Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
- vreg = 0;
- }
- operand->set_virtual_register(vreg);
- return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(Register reg) {
- LUnallocated* operand = ToUnallocated(reg);
- DCHECK(operand->HasFixedPolicy());
- return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
- LUnallocated* operand = ToUnallocated(reg);
- DCHECK(operand->HasFixedPolicy());
- return operand;
-}
-
-
-LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
- return new(zone()) LLabel(instr->block());
-}
-
-
-LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
- return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
- return AssignEnvironment(new(zone()) LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoShift(Token::Value op,
- HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->left());
-
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- int constant_value = 0;
- bool does_deopt = false;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
- // Left shifts can deoptimize if we shift by > 0 and the result cannot be
- // truncated to smi.
- if (instr->representation().IsSmi() && constant_value > 0) {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
- }
- } else {
- right = UseRegisterAtStart(right_value);
- }
-
- // Shift operations can only deoptimize if we do a logical shift
- // by 0 and the result cannot be truncated to int32.
- if (op == Token::SHR && constant_value == 0) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
- }
-
- LInstruction* result =
- DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
- return does_deopt ? AssignEnvironment(result) : result;
- } else {
- return DoArithmeticT(op, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->left()->representation().IsDouble());
- DCHECK(instr->right()->representation().IsDouble());
- if (op == Token::MOD) {
- LOperand* left = UseFixedDouble(instr->left(), f2);
- LOperand* right = UseFixedDouble(instr->right(), f4);
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- // We call a C function for double modulo. It can't trigger a GC. We need
- // to use fixed result register for the call.
- // TODO(fschneider): Allow any register as input registers.
- return MarkAsCall(DefineFixedDouble(result, f2), instr);
- } else {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineAsRegister(result);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HBinaryOperation* instr) {
- HValue* left = instr->left();
- HValue* right = instr->right();
- DCHECK(left->representation().IsTagged());
- DCHECK(right->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* left_operand = UseFixed(left, a1);
- LOperand* right_operand = UseFixed(right, a0);
- LArithmeticT* result =
- new(zone()) LArithmeticT(op, context, left_operand, right_operand);
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
-void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
- DCHECK(is_building());
- current_block_ = block;
- next_block_ = next_block;
- if (block->IsStartBlock()) {
- block->UpdateEnvironment(graph_->start_environment());
- argument_count_ = 0;
- } else if (block->predecessors()->length() == 1) {
- // We have a single predecessor => copy environment and outgoing
- // argument count from the predecessor.
- DCHECK(block->phis()->length() == 0);
- HBasicBlock* pred = block->predecessors()->at(0);
- HEnvironment* last_environment = pred->last_environment();
- DCHECK(last_environment != NULL);
- // Only copy the environment, if it is later used again.
- if (pred->end()->SecondSuccessor() == NULL) {
- DCHECK(pred->end()->FirstSuccessor() == block);
- } else {
- if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
- pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
- last_environment = last_environment->Copy();
- }
- }
- block->UpdateEnvironment(last_environment);
- DCHECK(pred->argument_count() >= 0);
- argument_count_ = pred->argument_count();
- } else {
- // We are at a state join => process phis.
- HBasicBlock* pred = block->predecessors()->at(0);
- // No need to copy the environment, it cannot be used later.
- HEnvironment* last_environment = pred->last_environment();
- for (int i = 0; i < block->phis()->length(); ++i) {
- HPhi* phi = block->phis()->at(i);
- if (phi->HasMergedIndex()) {
- last_environment->SetValueAt(phi->merged_index(), phi);
- }
- }
- for (int i = 0; i < block->deleted_phis()->length(); ++i) {
- if (block->deleted_phis()->at(i) < last_environment->length()) {
- last_environment->SetValueAt(block->deleted_phis()->at(i),
- graph_->GetConstantUndefined());
- }
- }
- block->UpdateEnvironment(last_environment);
- // Pick up the outgoing argument count of one of the predecessors.
- argument_count_ = pred->argument_count();
- }
- HInstruction* current = block->first();
- int start = chunk_->instructions()->length();
- while (current != NULL && !is_aborted()) {
- // Code for constants in registers is generated lazily.
- if (!current->EmitAtUses()) {
- VisitInstruction(current);
- }
- current = current->next();
- }
- int end = chunk_->instructions()->length() - 1;
- if (end >= start) {
- block->set_first_instruction_index(start);
- block->set_last_instruction_index(end);
- }
- block->set_argument_count(argument_count_);
- next_block_ = NULL;
- current_block_ = NULL;
-}
-
-
-void LChunkBuilder::VisitInstruction(HInstruction* current) {
- HInstruction* old_current = current_instruction_;
- current_instruction_ = current;
-
- LInstruction* instr = NULL;
- if (current->CanReplaceWithDummyUses()) {
- if (current->OperandCount() == 0) {
- instr = DefineAsRegister(new(zone()) LDummy());
- } else {
- DCHECK(!current->OperandAt(0)->IsControlInstruction());
- instr = DefineAsRegister(new(zone())
- LDummyUse(UseAny(current->OperandAt(0))));
- }
- for (int i = 1; i < current->OperandCount(); ++i) {
- if (current->OperandAt(i)->IsControlInstruction()) continue;
- LInstruction* dummy =
- new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
- dummy->set_hydrogen_value(current);
- chunk_->AddInstruction(dummy, current_block_);
- }
- } else {
- HBasicBlock* successor;
- if (current->IsControlInstruction() &&
- HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
- successor != NULL) {
- instr = new(zone()) LGoto(successor);
- } else {
- instr = current->CompileToLithium(this);
- }
- }
-
- argument_count_ += current->argument_delta();
- DCHECK(argument_count_ >= 0);
-
- if (instr != NULL) {
- AddInstruction(instr, current);
- }
-
- current_instruction_ = old_current;
-}
-
-
-void LChunkBuilder::AddInstruction(LInstruction* instr,
- HInstruction* hydrogen_val) {
-// Associate the hydrogen instruction first, since we may need it for
- // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
- instr->set_hydrogen_value(hydrogen_val);
-
-#if DEBUG
- // Make sure that the lithium instruction has either no fixed register
- // constraints in temps or the result OR no uses that are only used at
- // start. If this invariant doesn't hold, the register allocator can decide
- // to insert a split of a range immediately before the instruction due to an
- // already allocated register needing to be used for the instruction's fixed
- // register constraint. In this case, The register allocator won't see an
- // interference between the split child and the use-at-start (it would if
- // the it was just a plain use), so it is free to move the split child into
- // the same register that is used for the use-at-start.
- // See https://code.google.com/p/chromium/issues/detail?id=201590
- if (!(instr->ClobbersRegisters() &&
- instr->ClobbersDoubleRegisters(isolate()))) {
- int fixed = 0;
- int used_at_start = 0;
- for (UseIterator it(instr); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- if (operand->IsUsedAtStart()) ++used_at_start;
- }
- if (instr->Output() != NULL) {
- if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
- }
- for (TempIterator it(instr); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- if (operand->HasFixedPolicy()) ++fixed;
- }
- DCHECK(fixed == 0 || used_at_start == 0);
- }
-#endif
-
- if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
- instr = AssignPointerMap(instr);
- }
- if (FLAG_stress_environments && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
- chunk_->AddInstruction(instr, current_block_);
-
- CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
-}
-
-
-LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
- LInstruction* result = new (zone()) LPrologue();
- if (info_->scope()->NeedsContext()) {
- result = MarkAsCall(result, instr);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- return new(zone()) LGoto(instr->FirstSuccessor());
-}
-
-
-LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* value = instr->value();
- Representation r = value->representation();
- HType type = value->type();
- ToBooleanHints expected = instr->expected_input_types();
- if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
-
- bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
- type.IsJSArray() || type.IsHeapNumber() || type.IsString();
- LInstruction* branch = new(zone()) LBranch(UseRegister(value));
- if (!easy_case && ((!(expected & ToBooleanHint::kSmallInteger) &&
- (expected & ToBooleanHint::kNeedsMap)) ||
- expected != ToBooleanHint::kAny)) {
- branch = AssignEnvironment(branch);
- }
- return branch;
-}
-
-
-LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- return new(zone()) LCmpMapAndBranch(value, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
- info()->MarkAsRequiresFrame();
- return DefineAsRegister(
- new(zone()) LArgumentsLength(UseRegister(length->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
- info()->MarkAsRequiresFrame();
- return DefineAsRegister(new(zone()) LArgumentsElements);
-}
-
-
-LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
- HHasInPrototypeChainAndBranch* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* prototype = UseRegister(instr->prototype());
- LHasInPrototypeChainAndBranch* result =
- new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
- LOperand* receiver = UseRegisterAtStart(instr->receiver());
- LOperand* function = UseRegisterAtStart(instr->function());
- LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
- return AssignEnvironment(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
- LOperand* function = UseFixed(instr->function(), a1);
- LOperand* receiver = UseFixed(instr->receiver(), a0);
- LOperand* length = UseFixed(instr->length(), a2);
- LOperand* elements = UseFixed(instr->elements(), a3);
- LApplyArguments* result = new(zone()) LApplyArguments(function,
- receiver,
- length,
- elements);
- return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
- int argc = instr->OperandCount();
- for (int i = 0; i < argc; ++i) {
- LOperand* argument = Use(instr->argument(i));
- AddInstruction(new(zone()) LPushArgument(argument), instr);
- }
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreCodeEntry(
- HStoreCodeEntry* store_code_entry) {
- LOperand* function = UseRegister(store_code_entry->function());
- LOperand* code_object = UseTempRegister(store_code_entry->code_object());
- return new(zone()) LStoreCodeEntry(function, code_object);
-}
-
-
-LInstruction* LChunkBuilder::DoInnerAllocatedObject(
- HInnerAllocatedObject* instr) {
- LOperand* base_object = UseRegisterAtStart(instr->base_object());
- LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
- return DefineAsRegister(
- new(zone()) LInnerAllocatedObject(base_object, offset));
-}
-
-
-LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
- return instr->HasNoUses()
- ? NULL
- : DefineAsRegister(new(zone()) LThisFunction);
-}
-
-
-LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- if (instr->HasNoUses()) return NULL;
-
- if (info()->IsStub()) {
- return DefineFixed(new(zone()) LContext, cp);
- }
-
- return DefineAsRegister(new(zone()) LContext);
-}
-
-
-LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallWithDescriptor(
- HCallWithDescriptor* instr) {
- CallInterfaceDescriptor descriptor = instr->descriptor();
- DCHECK_EQ(descriptor.GetParameterCount() +
- LCallWithDescriptor::kImplicitRegisterParameterCount,
- instr->OperandCount());
-
- LOperand* target = UseRegisterOrConstantAtStart(instr->target());
- ZoneList<LOperand*> ops(instr->OperandCount(), zone());
- // Target
- ops.Add(target, zone());
- // Context
- LOperand* op = UseFixed(instr->OperandAt(1), cp);
- ops.Add(op, zone());
- // Load register parameters.
- int i = 0;
- for (; i < descriptor.GetRegisterParameterCount(); i++) {
- op = UseFixed(instr->OperandAt(
- i + LCallWithDescriptor::kImplicitRegisterParameterCount),
- descriptor.GetRegisterParameter(i));
- ops.Add(op, zone());
- }
- // Push stack parameters.
- for (; i < descriptor.GetParameterCount(); i++) {
- op = UseAny(instr->OperandAt(
- i + LCallWithDescriptor::kImplicitRegisterParameterCount));
- AddInstruction(new (zone()) LPushArgument(op), instr);
- }
-
- LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
- descriptor, ops, zone());
- if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
- result->MarkAsSyntacticTailCall();
- }
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* function = UseFixed(instr->function(), a1);
- LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
- if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
- result->MarkAsSyntacticTailCall();
- }
- return MarkAsCall(DefineFixed(result, v0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
- switch (instr->op()) {
- case kMathFloor:
- return DoMathFloor(instr);
- case kMathRound:
- return DoMathRound(instr);
- case kMathFround:
- return DoMathFround(instr);
- case kMathAbs:
- return DoMathAbs(instr);
- case kMathLog:
- return DoMathLog(instr);
- case kMathCos:
- return DoMathCos(instr);
- case kMathSin:
- return DoMathSin(instr);
- case kMathExp:
- return DoMathExp(instr);
- case kMathSqrt:
- return DoMathSqrt(instr);
- case kMathPowHalf:
- return DoMathPowHalf(instr);
- case kMathClz32:
- return DoMathClz32(instr);
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseFixedDouble(instr->value(), f4);
- return MarkAsCall(DefineFixedDouble(new(zone()) LMathLog(input), f4), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- LMathClz32* result = new(zone()) LMathClz32(input);
- return DefineAsRegister(result);
-}
-
-LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseFixedDouble(instr->value(), f4);
- return MarkAsCall(DefineFixedDouble(new (zone()) LMathCos(input), f4), instr);
-}
-
-LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseFixedDouble(instr->value(), f4);
- return MarkAsCall(DefineFixedDouble(new (zone()) LMathSin(input), f4), instr);
-}
-
-LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseFixedDouble(instr->value(), f4);
- return MarkAsCall(DefineFixedDouble(new (zone()) LMathExp(input), f4), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
- // Input cannot be the same as the result, see LCodeGen::DoMathPowHalf.
- LOperand* input = UseFixedDouble(instr->value(), f8);
- LOperand* temp = TempDoubleRegister();
- LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp);
- return DefineFixedDouble(result, f4);
-}
-
-
-LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
- LOperand* input = UseRegister(instr->value());
- LMathFround* result = new (zone()) LMathFround(input);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
- Representation r = instr->value()->representation();
- LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32())
- ? NULL
- : UseFixed(instr->context(), cp);
- LOperand* input = UseRegister(instr->value());
- LInstruction* result =
- DefineAsRegister(new(zone()) LMathAbs(context, input));
- if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
- if (!r.IsDouble()) result = AssignEnvironment(result);
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
- LOperand* input = UseRegister(instr->value());
- LOperand* temp = TempRegister();
- LMathFloor* result = new(zone()) LMathFloor(input, temp);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
-}
-
-
-LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
- LOperand* input = UseRegister(instr->value());
- LMathSqrt* result = new(zone()) LMathSqrt(input);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
- LOperand* input = UseRegister(instr->value());
- LOperand* temp = TempDoubleRegister();
- LMathRound* result = new(zone()) LMathRound(input, temp);
- return AssignEnvironment(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* constructor = UseFixed(instr->constructor(), a1);
- LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoRor(HRor* instr) {
- return DoShift(Token::ROR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShr(HShr* instr) {
- return DoShift(Token::SHR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoSar(HSar* instr) {
- return DoShift(Token::SAR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShl(HShl* instr) {
- return DoShift(Token::SHL, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32));
-
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
- return DefineAsRegister(new(zone()) LBitI(left, right));
- } else {
- return DoArithmeticT(instr->op(), instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
- dividend, divisor));
- if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
- (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
- (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
- divisor != 1 && divisor != -1)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
- DCHECK(instr->representation().IsInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result = DefineAsRegister(new(zone()) LDivByConstI(
- dividend, divisor));
- if (divisor == 0 ||
- (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
- !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(instr->right());
- LOperand* temp = TempRegister();
- LInstruction* result =
- DefineAsRegister(new(zone()) LDivI(dividend, divisor, temp));
- if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- (instr->CheckFlag(HValue::kCanOverflow) &&
- !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) ||
- (!instr->IsMathFloorOfDiv() &&
- !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- if (instr->RightIsPowerOf2()) {
- return DoDivByPowerOf2I(instr);
- } else if (instr->right()->IsConstant()) {
- return DoDivByConstI(instr);
- } else {
- return DoDivI(instr);
- }
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else {
- return DoArithmeticT(Token::DIV, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
- LOperand* dividend = UseRegisterAtStart(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result = DefineAsRegister(new(zone()) LFlooringDivByPowerOf2I(
- dividend, divisor));
- if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
- (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
- DCHECK(instr->representation().IsInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LOperand* temp =
- ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
- (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
- NULL : TempRegister();
- LInstruction* result = DefineAsRegister(
- new(zone()) LFlooringDivByConstI(dividend, divisor, temp));
- if (divisor == 0 ||
- (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(instr->right());
- LInstruction* result =
- DefineAsRegister(new (zone()) LFlooringDivI(dividend, divisor));
- if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- (instr->CheckFlag(HValue::kCanOverflow))) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- if (instr->RightIsPowerOf2()) {
- return DoFlooringDivByPowerOf2I(instr);
- } else if (instr->right()->IsConstant()) {
- return DoFlooringDivByConstI(instr);
- } else {
- return DoFlooringDivI(instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegisterAtStart(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
- dividend, divisor));
- if (instr->CheckFlag(HValue::kLeftCanBeNegative) &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result = DefineAsRegister(new(zone()) LModByConstI(
- dividend, divisor));
- if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoModI(HMod* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(instr->right());
- LInstruction* result = DefineAsRegister(new(zone()) LModI(
- dividend, divisor));
- if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- return instr->RightIsPowerOf2() ? DoModByPowerOf2I(instr) : DoModI(instr);
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::MOD, instr);
- } else {
- return DoArithmeticT(Token::MOD, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMul(HMul* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- HValue* left = instr->BetterLeftOperand();
- HValue* right = instr->BetterRightOperand();
- LOperand* left_op;
- LOperand* right_op;
- bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
- bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
-
- int32_t constant_value = 0;
- if (right->IsConstant()) {
- HConstant* constant = HConstant::cast(right);
- constant_value = constant->Integer32Value();
- // Constants -1, 0 and 1 can be optimized if the result can overflow.
- // For other constants, it can be optimized only without overflow.
- if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) {
- left_op = UseRegisterAtStart(left);
- right_op = UseConstant(right);
- } else {
- if (bailout_on_minus_zero) {
- left_op = UseRegister(left);
- } else {
- left_op = UseRegisterAtStart(left);
- }
- right_op = UseRegister(right);
- }
- } else {
- if (bailout_on_minus_zero) {
- left_op = UseRegister(left);
- } else {
- left_op = UseRegisterAtStart(left);
- }
- right_op = UseRegister(right);
- }
- LMulI* mul = new(zone()) LMulI(left_op, right_op);
- if (right_op->IsConstantOperand()
- ? ((can_overflow && constant_value == -1) ||
- (bailout_on_minus_zero && constant_value <= 0))
- : (can_overflow || bailout_on_minus_zero)) {
- AssignEnvironment(mul);
- }
- return DefineAsRegister(mul);
-
- } else if (instr->representation().IsDouble()) {
- if (IsMipsArchVariant(kMips32r2)) {
- if (instr->HasOneUse() && instr->uses().value()->IsAdd()) {
- HAdd* add = HAdd::cast(instr->uses().value());
- if (instr == add->left()) {
- // This mul is the lhs of an add. The add and mul will be folded
- // into a multiply-add.
- return NULL;
- }
- if (instr == add->right() && !add->left()->IsMul()) {
- // This mul is the rhs of an add, where the lhs is not another mul.
- // The add and mul will be folded into a multiply-add.
- return NULL;
- }
- }
- }
- return DoArithmeticD(Token::MUL, instr);
- } else {
- return DoArithmeticT(Token::MUL, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoSub(HSub* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- LSubI* sub = new(zone()) LSubI(left, right);
- LInstruction* result = DefineAsRegister(sub);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::SUB, instr);
- } else {
- return DoArithmeticT(Token::SUB, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) {
- LOperand* multiplier_op = UseRegisterAtStart(mul->left());
- LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
- LOperand* addend_op = UseRegisterAtStart(addend);
- return DefineSameAsFirst(new(zone()) LMultiplyAddD(addend_op, multiplier_op,
- multiplicand_op));
-}
-
-
-LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
- LAddI* add = new(zone()) LAddI(left, right);
- LInstruction* result = DefineAsRegister(add);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsExternal()) {
- DCHECK(instr->IsConsistentExternalRepresentation());
- DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- LAddI* add = new(zone()) LAddI(left, right);
- LInstruction* result = DefineAsRegister(add);
- return result;
- } else if (instr->representation().IsDouble()) {
- if (IsMipsArchVariant(kMips32r2)) {
- if (instr->left()->IsMul())
- return DoMultiplyAdd(HMul::cast(instr->left()), instr->right());
-
- if (instr->right()->IsMul()) {
- DCHECK(!instr->left()->IsMul());
- return DoMultiplyAdd(HMul::cast(instr->right()), instr->left());
- }
- }
- return DoArithmeticD(Token::ADD, instr);
- } else {
- return DoArithmeticT(Token::ADD, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
- LOperand* left = NULL;
- LOperand* right = NULL;
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- left = UseRegisterAtStart(instr->BetterLeftOperand());
- right = UseOrConstantAtStart(instr->BetterRightOperand());
- } else {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->left()->representation().IsDouble());
- DCHECK(instr->right()->representation().IsDouble());
- left = UseRegisterAtStart(instr->left());
- right = UseRegisterAtStart(instr->right());
- }
- return DefineAsRegister(new(zone()) LMathMinMax(left, right));
-}
-
-
-LInstruction* LChunkBuilder::DoPower(HPower* instr) {
- DCHECK(instr->representation().IsDouble());
- // We call a C function for double power. It can't trigger a GC.
- // We need to use fixed result register for the call.
- Representation exponent_type = instr->right()->representation();
- DCHECK(instr->left()->representation().IsDouble());
- LOperand* left = UseFixedDouble(instr->left(), f2);
- LOperand* right =
- exponent_type.IsDouble()
- ? UseFixedDouble(instr->right(), f4)
- : UseFixed(instr->right(), MathPowTaggedDescriptor::exponent());
- LPower* result = new(zone()) LPower(left, right);
- return MarkAsCall(DefineFixedDouble(result, f0),
- instr,
- CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
- DCHECK(instr->left()->representation().IsTagged());
- DCHECK(instr->right()->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* left = UseFixed(instr->left(), a1);
- LOperand* right = UseFixed(instr->right(), a0);
- LCmpT* result = new(zone()) LCmpT(context, left, right);
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
- HCompareNumericAndBranch* instr) {
- Representation r = instr->representation();
- if (r.IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(r));
- DCHECK(instr->right()->representation().Equals(r));
- LOperand* left = UseRegisterOrConstantAtStart(instr->left());
- LOperand* right = UseRegisterOrConstantAtStart(instr->right());
- return new(zone()) LCompareNumericAndBranch(left, right);
- } else {
- DCHECK(r.IsDouble());
- DCHECK(instr->left()->representation().IsDouble());
- DCHECK(instr->right()->representation().IsDouble());
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- return new(zone()) LCompareNumericAndBranch(left, right);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
- HCompareObjectEqAndBranch* instr) {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- return new(zone()) LCmpObjectEqAndBranch(left, right);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
- HCompareHoleAndBranch* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LCmpHoleAndBranch(value);
-}
-
-
-LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* temp = TempRegister();
- return new(zone()) LIsStringAndBranch(UseRegisterAtStart(instr->value()),
- temp);
-}
-
-
-LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- return new(zone()) LIsSmiAndBranch(Use(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
- HIsUndetectableAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- return new(zone()) LIsUndetectableAndBranch(
- UseRegisterAtStart(instr->value()), TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoStringCompareAndBranch(
- HStringCompareAndBranch* instr) {
- DCHECK(instr->left()->representation().IsTagged());
- DCHECK(instr->right()->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* left = UseFixed(instr->left(), a1);
- LOperand* right = UseFixed(instr->right(), a0);
- LStringCompareAndBranch* result =
- new(zone()) LStringCompareAndBranch(context, left, right);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
- HHasInstanceTypeAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LHasInstanceTypeAndBranch(value);
-}
-
-LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
- HClassOfTestAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- return new (zone())
- LClassOfTestAndBranch(UseRegister(instr->value()), TempRegister());
-}
-
-LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
- LOperand* string = UseRegisterAtStart(instr->string());
- LOperand* index = UseRegisterOrConstantAtStart(instr->index());
- return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index));
-}
-
-
-LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
- LOperand* string = UseRegisterAtStart(instr->string());
- LOperand* index = FLAG_debug_code
- ? UseRegisterAtStart(instr->index())
- : UseRegisterOrConstantAtStart(instr->index());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL;
- return new(zone()) LSeqStringSetChar(context, string, index, value);
-}
-
-
-LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- if (!FLAG_debug_code && instr->skip_check()) return NULL;
- LOperand* index = UseRegisterOrConstantAtStart(instr->index());
- LOperand* length = !index->IsConstantOperand()
- ? UseRegisterOrConstantAtStart(instr->length())
- : UseRegisterAtStart(instr->length());
- LInstruction* result = new(zone()) LBoundsCheck(index, length);
- if (!FLAG_debug_code || !instr->skip_check()) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
- // The control instruction marking the end of a block that completed
- // abruptly (e.g., threw an exception). There is nothing specific to do.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
- // All HForceRepresentation instructions should be eliminated in the
- // representation change phase of Hydrogen.
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoChange(HChange* instr) {
- Representation from = instr->from();
- Representation to = instr->to();
- HValue* val = instr->value();
- if (from.IsSmi()) {
- if (to.IsTagged()) {
- LOperand* value = UseRegister(val);
- return DefineSameAsFirst(new(zone()) LDummyUse(value));
- }
- from = Representation::Tagged();
- }
- if (from.IsTagged()) {
- if (to.IsDouble()) {
- LOperand* value = UseRegister(val);
- LInstruction* result = DefineAsRegister(new(zone()) LNumberUntagD(value));
- if (!val->representation().IsSmi()) result = AssignEnvironment(result);
- return result;
- } else if (to.IsSmi()) {
- LOperand* value = UseRegister(val);
- if (val->type().IsSmi()) {
- return DefineSameAsFirst(new(zone()) LDummyUse(value));
- }
- return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
- } else {
- DCHECK(to.IsInteger32());
- if (val->type().IsSmi() || val->representation().IsSmi()) {
- LOperand* value = UseRegisterAtStart(val);
- return DefineAsRegister(new(zone()) LSmiUntag(value, false));
- } else {
- LOperand* value = UseRegister(val);
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempDoubleRegister();
- LInstruction* result =
- DefineSameAsFirst(new(zone()) LTaggedToI(value, temp1, temp2));
- if (!val->representation().IsSmi()) result = AssignEnvironment(result);
- return result;
- }
- }
- } else if (from.IsDouble()) {
- if (to.IsTagged()) {
- info()->MarkAsDeferredCalling();
- LOperand* value = UseRegister(val);
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LUnallocated* result_temp = TempRegister();
- LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
- return AssignPointerMap(Define(result, result_temp));
- } else if (to.IsSmi()) {
- LOperand* value = UseRegister(val);
- return AssignEnvironment(
- DefineAsRegister(new(zone()) LDoubleToSmi(value)));
- } else {
- DCHECK(to.IsInteger32());
- LOperand* value = UseRegister(val);
- LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value));
- if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result);
- return result;
- }
- } else if (from.IsInteger32()) {
- info()->MarkAsDeferredCalling();
- if (to.IsTagged()) {
- if (!instr->CheckFlag(HValue::kCanOverflow)) {
- LOperand* value = UseRegisterAtStart(val);
- return DefineAsRegister(new(zone()) LSmiTag(value));
- } else if (val->CheckFlag(HInstruction::kUint32)) {
- LOperand* value = UseRegisterAtStart(val);
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2);
- return AssignPointerMap(DefineAsRegister(result));
- } else {
- LOperand* value = UseRegisterAtStart(val);
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LNumberTagI* result = new(zone()) LNumberTagI(value, temp1, temp2);
- return AssignPointerMap(DefineAsRegister(result));
- }
- } else if (to.IsSmi()) {
- LOperand* value = UseRegister(val);
- LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else {
- DCHECK(to.IsDouble());
- if (val->CheckFlag(HInstruction::kUint32)) {
- return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val)));
- } else {
- return DefineAsRegister(new(zone()) LInteger32ToDouble(Use(val)));
- }
- }
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* result = new(zone()) LCheckNonSmi(value);
- if (!instr->value()->type().IsHeapObject()) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckArrayBufferNotNeutered(
- HCheckArrayBufferNotNeutered* instr) {
- LOperand* view = UseRegisterAtStart(instr->value());
- LCheckArrayBufferNotNeutered* result =
- new (zone()) LCheckArrayBufferNotNeutered(view);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* result = new(zone()) LCheckInstanceType(value);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckValue(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
- if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps;
- LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value));
- if (instr->HasMigrationTarget()) {
- info()->MarkAsDeferredCalling();
- result = AssignPointerMap(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
- HValue* value = instr->value();
- Representation input_rep = value->representation();
- LOperand* reg = UseRegister(value);
- if (input_rep.IsDouble()) {
- // Revisit this decision, here and 8 lines below.
- return DefineAsRegister(new(zone()) LClampDToUint8(reg,
- TempDoubleRegister()));
- } else if (input_rep.IsInteger32()) {
- return DefineAsRegister(new(zone()) LClampIToUint8(reg));
- } else {
- DCHECK(input_rep.IsSmiOrTagged());
- LClampTToUint8* result =
- new(zone()) LClampTToUint8(reg, TempDoubleRegister());
- return AssignEnvironment(DefineAsRegister(result));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
- LOperand* context = info()->IsStub()
- ? UseFixed(instr->context(), cp)
- : NULL;
- LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
- return new(zone()) LReturn(UseFixed(instr->value(), v0), context,
- parameter_count);
-}
-
-
-LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
- Representation r = instr->representation();
- if (r.IsSmi()) {
- return DefineAsRegister(new(zone()) LConstantS);
- } else if (r.IsInteger32()) {
- return DefineAsRegister(new(zone()) LConstantI);
- } else if (r.IsDouble()) {
- return DefineAsRegister(new(zone()) LConstantD);
- } else if (r.IsExternal()) {
- return DefineAsRegister(new(zone()) LConstantE);
- } else if (r.IsTagged()) {
- return DefineAsRegister(new(zone()) LConstantT);
- } else {
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- LInstruction* result =
- DefineAsRegister(new(zone()) LLoadContextSlot(context));
- if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
- LOperand* context;
- LOperand* value;
- if (instr->NeedsWriteBarrier()) {
- context = UseTempRegister(instr->context());
- value = UseTempRegister(instr->value());
- } else {
- context = UseRegister(instr->context());
- value = UseRegister(instr->value());
- }
- LInstruction* result = new(zone()) LStoreContextSlot(context, value);
- if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- LOperand* obj = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new(zone()) LLoadNamedField(obj));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
- HLoadFunctionPrototype* instr) {
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()))));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
- return DefineAsRegister(new(zone()) LLoadRoot);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
- DCHECK(instr->key()->representation().IsSmiOrInteger32());
- ElementsKind elements_kind = instr->elements_kind();
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LInstruction* result = NULL;
-
- if (!instr->is_fixed_typed_array()) {
- LOperand* obj = NULL;
- if (instr->representation().IsDouble()) {
- obj = UseRegister(instr->elements());
- } else {
- DCHECK(instr->representation().IsSmiOrTagged());
- obj = UseRegisterAtStart(instr->elements());
- }
- result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr));
- } else {
- DCHECK(
- (instr->representation().IsInteger32() &&
- !IsDoubleOrFloatElementsKind(elements_kind)) ||
- (instr->representation().IsDouble() &&
- IsDoubleOrFloatElementsKind(elements_kind)));
- LOperand* backing_store = UseRegister(instr->elements());
- LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
- result = DefineAsRegister(
- new (zone()) LLoadKeyed(backing_store, key, backing_store_owner));
- }
-
- bool needs_environment;
- if (instr->is_fixed_typed_array()) {
- // see LCodeGen::DoLoadKeyedExternalArray
- needs_environment = elements_kind == UINT32_ELEMENTS &&
- !instr->CheckFlag(HInstruction::kUint32);
- } else {
- // see LCodeGen::DoLoadKeyedFixedDoubleArray and
- // LCodeGen::DoLoadKeyedFixedArray
- needs_environment =
- instr->RequiresHoleCheck() ||
- (instr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED && info()->IsStub());
- }
-
- if (needs_environment) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- if (!instr->is_fixed_typed_array()) {
- DCHECK(instr->elements()->representation().IsTagged());
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- LOperand* object = NULL;
- LOperand* val = NULL;
- LOperand* key = NULL;
-
- if (instr->value()->representation().IsDouble()) {
- object = UseRegisterAtStart(instr->elements());
- key = UseRegisterOrConstantAtStart(instr->key());
- val = UseRegister(instr->value());
- } else {
- DCHECK(instr->value()->representation().IsSmiOrTagged());
- if (needs_write_barrier) {
- object = UseTempRegister(instr->elements());
- val = UseTempRegister(instr->value());
- key = UseTempRegister(instr->key());
- } else {
- object = UseRegisterAtStart(instr->elements());
- val = UseRegisterAtStart(instr->value());
- key = UseRegisterOrConstantAtStart(instr->key());
- }
- }
-
- return new (zone()) LStoreKeyed(object, key, val, nullptr);
- }
-
- DCHECK(
- (instr->value()->representation().IsInteger32() &&
- !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
- (instr->value()->representation().IsDouble() &&
- IsDoubleOrFloatElementsKind(instr->elements_kind())));
- DCHECK(instr->elements()->representation().IsExternal());
- LOperand* val = UseRegister(instr->value());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LOperand* backing_store = UseRegister(instr->elements());
- LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
- return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner);
-}
-
-
-LInstruction* LChunkBuilder::DoTransitionElementsKind(
- HTransitionElementsKind* instr) {
- if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
- LOperand* object = UseRegister(instr->object());
- LOperand* new_map_reg = TempRegister();
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, NULL, new_map_reg);
- return result;
- } else {
- LOperand* object = UseFixed(instr->object(), a0);
- LOperand* context = UseFixed(instr->context(), cp);
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, context, NULL);
- return MarkAsCall(result, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoTrapAllocationMemento(
- HTrapAllocationMemento* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* temp = TempRegister();
- LTrapAllocationMemento* result =
- new(zone()) LTrapAllocationMemento(object, temp);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) {
- info()->MarkAsDeferredCalling();
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object = Use(instr->object());
- LOperand* elements = Use(instr->elements());
- LOperand* key = UseRegisterOrConstant(instr->key());
- LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity());
-
- LMaybeGrowElements* result = new (zone())
- LMaybeGrowElements(context, object, elements, key, current_capacity);
- DefineFixed(result, v0);
- return AssignPointerMap(AssignEnvironment(result));
-}
-
-
-LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
- bool is_in_object = instr->access().IsInobject();
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- bool needs_write_barrier_for_map = instr->has_transition() &&
- instr->NeedsWriteBarrierForMap();
-
- LOperand* obj;
- if (needs_write_barrier) {
- obj = is_in_object
- ? UseRegister(instr->object())
- : UseTempRegister(instr->object());
- } else {
- obj = needs_write_barrier_for_map
- ? UseRegister(instr->object())
- : UseRegisterAtStart(instr->object());
- }
-
- LOperand* val;
- if (needs_write_barrier) {
- val = UseTempRegister(instr->value());
- } else if (instr->field_representation().IsDouble()) {
- val = UseRegisterAtStart(instr->value());
- } else {
- val = UseRegister(instr->value());
- }
-
- // We need a temporary register for write barrier of the map field.
- LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
-
- return new(zone()) LStoreNamedField(obj, val, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* left = UseFixed(instr->left(), a1);
- LOperand* right = UseFixed(instr->right(), a0);
- return MarkAsCall(
- DefineFixed(new(zone()) LStringAdd(context, left, right), v0),
- instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
- LOperand* string = UseTempRegister(instr->string());
- LOperand* index = UseTempRegister(instr->index());
- LOperand* context = UseAny(instr->context());
- LStringCharCodeAt* result =
- new(zone()) LStringCharCodeAt(context, string, index);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
- LOperand* char_code = UseRegister(instr->value());
- LOperand* context = UseAny(instr->context());
- LStringCharFromCode* result =
- new(zone()) LStringCharFromCode(context, char_code);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
- LOperand* size = UseRegisterOrConstant(instr->size());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- if (instr->IsAllocationFolded()) {
- LFastAllocate* result = new (zone()) LFastAllocate(size, temp1, temp2);
- return DefineAsRegister(result);
- } else {
- info()->MarkAsDeferredCalling();
- LOperand* context = UseAny(instr->context());
- LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2);
- return AssignPointerMap(DefineAsRegister(result));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
- DCHECK(argument_count_ == 0);
- allocator_->MarkAsOsrEntry();
- current_block_->last_environment()->set_ast_id(instr->ast_id());
- return AssignEnvironment(new(zone()) LOsrEntry);
-}
-
-
-LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- LParameter* result = new(zone()) LParameter;
- if (instr->kind() == HParameter::STACK_PARAMETER) {
- int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(result, spill_index);
- } else {
- DCHECK(info()->IsStub());
- CallInterfaceDescriptor descriptor = graph()->descriptor();
- int index = static_cast<int>(instr->index());
- Register reg = descriptor.GetRegisterParameter(index);
- return DefineFixed(result, reg);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- // Use an index that corresponds to the location in the unoptimized frame,
- // which the optimized frame will subsume.
- int env_index = instr->index();
- int spill_index = 0;
- if (instr->environment()->is_parameter_index(env_index)) {
- spill_index = chunk()->GetParameterStackSlot(env_index);
- } else {
- spill_index = env_index - instr->environment()->first_local_index();
- if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
- Retry(kTooManySpillSlotsNeededForOSR);
- spill_index = 0;
- }
- spill_index += StandardFrameConstants::kFixedSlotCount;
- }
- return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
- // There are no real uses of the arguments object.
- // arguments.length and element access are supported directly on
- // stack arguments, and any real arguments object use causes a bailout.
- // So this value is never used.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
- instr->ReplayEnvironment(current_block_->last_environment());
-
- // There are no real uses of a captured object.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
- info()->MarkAsRequiresFrame();
- LOperand* args = UseRegister(instr->arguments());
- LOperand* length = UseRegisterOrConstantAtStart(instr->length());
- LOperand* index = UseRegisterOrConstantAtStart(instr->index());
- return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
-}
-
-
-LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* value = UseFixed(instr->value(), a3);
- LTypeof* result = new (zone()) LTypeof(context, value);
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
- return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
- instr->ReplayEnvironment(current_block_->last_environment());
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
- if (instr->is_function_entry()) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(new(zone()) LStackCheck(context), instr);
- } else {
- DCHECK(instr->is_backwards_branch());
- LOperand* context = UseAny(instr->context());
- return AssignEnvironment(
- AssignPointerMap(new(zone()) LStackCheck(context)));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
- HEnvironment* outer = current_block_->last_environment();
- outer->set_ast_id(instr->ReturnId());
- HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner = outer->CopyForInlining(
- instr->closure(), instr->arguments_count(), instr->function(), undefined,
- instr->inlining_kind(), instr->syntactic_tail_call_mode());
- // Only replay binding of arguments object if it wasn't removed from graph.
- if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
- inner->Bind(instr->arguments_var(), instr->arguments_object());
- }
- inner->BindContext(instr->closure_context());
- inner->set_entry(instr);
- current_block_->UpdateEnvironment(inner);
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
- LInstruction* pop = NULL;
-
- HEnvironment* env = current_block_->last_environment();
-
- if (env->entry()->arguments_pushed()) {
- int argument_count = env->arguments_environment()->parameter_count();
- pop = new(zone()) LDrop(argument_count);
- DCHECK(instr->argument_delta() == -argument_count);
- }
-
- HEnvironment* outer = current_block_->last_environment()->
- DiscardInlined(false);
- current_block_->UpdateEnvironment(outer);
-
- return pop;
-}
-
-
-LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object = UseFixed(instr->enumerable(), a0);
- LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
- return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
- LOperand* map = UseRegister(instr->map());
- return AssignEnvironment(DefineAsRegister(new(zone()) LForInCacheArray(map)));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* map = UseRegisterAtStart(instr->map());
- return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* index = UseTempRegister(instr->index());
- LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
- LInstruction* result = DefineSameAsFirst(load);
- return AssignPointerMap(result);
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/crankshaft/mips/lithium-mips.h b/deps/v8/src/crankshaft/mips/lithium-mips.h
deleted file mode 100644
index c7fbfafa2e..0000000000
--- a/deps/v8/src/crankshaft/mips/lithium-mips.h
+++ /dev/null
@@ -1,2450 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_MIPS_LITHIUM_MIPS_H_
-#define V8_CRANKSHAFT_MIPS_LITHIUM_MIPS_H_
-
-#include "src/crankshaft/hydrogen.h"
-#include "src/crankshaft/lithium.h"
-#include "src/crankshaft/lithium-allocator.h"
-#include "src/safepoint-table.h"
-#include "src/utils.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LCodeGen;
-
-#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
- V(AccessArgumentsAt) \
- V(AddI) \
- V(Allocate) \
- V(ApplyArguments) \
- V(ArgumentsElements) \
- V(ArgumentsLength) \
- V(ArithmeticD) \
- V(ArithmeticT) \
- V(BitI) \
- V(BoundsCheck) \
- V(Branch) \
- V(CallWithDescriptor) \
- V(CallNewArray) \
- V(CallRuntime) \
- V(CheckArrayBufferNotNeutered) \
- V(CheckInstanceType) \
- V(CheckMaps) \
- V(CheckMapValue) \
- V(CheckNonSmi) \
- V(CheckSmi) \
- V(CheckValue) \
- V(ClampDToUint8) \
- V(ClampIToUint8) \
- V(ClampTToUint8) \
- V(ClassOfTestAndBranch) \
- V(CompareNumericAndBranch) \
- V(CmpObjectEqAndBranch) \
- V(CmpHoleAndBranch) \
- V(CmpMapAndBranch) \
- V(CmpT) \
- V(ConstantD) \
- V(ConstantE) \
- V(ConstantI) \
- V(ConstantS) \
- V(ConstantT) \
- V(Context) \
- V(DebugBreak) \
- V(DeclareGlobals) \
- V(Deoptimize) \
- V(DivByConstI) \
- V(DivByPowerOf2I) \
- V(DivI) \
- V(DoubleToI) \
- V(DoubleToSmi) \
- V(Drop) \
- V(Dummy) \
- V(DummyUse) \
- V(FastAllocate) \
- V(FlooringDivByConstI) \
- V(FlooringDivByPowerOf2I) \
- V(FlooringDivI) \
- V(ForInCacheArray) \
- V(ForInPrepareMap) \
- V(Goto) \
- V(HasInPrototypeChainAndBranch) \
- V(HasInstanceTypeAndBranch) \
- V(InnerAllocatedObject) \
- V(InstructionGap) \
- V(Integer32ToDouble) \
- V(InvokeFunction) \
- V(IsStringAndBranch) \
- V(IsSmiAndBranch) \
- V(IsUndetectableAndBranch) \
- V(Label) \
- V(LazyBailout) \
- V(LoadContextSlot) \
- V(LoadRoot) \
- V(LoadFieldByIndex) \
- V(LoadFunctionPrototype) \
- V(LoadKeyed) \
- V(LoadNamedField) \
- V(MathAbs) \
- V(MathCos) \
- V(MathSin) \
- V(MathExp) \
- V(MathClz32) \
- V(MathFloor) \
- V(MathFround) \
- V(MathLog) \
- V(MathMinMax) \
- V(MathPowHalf) \
- V(MathRound) \
- V(MathSqrt) \
- V(MaybeGrowElements) \
- V(ModByConstI) \
- V(ModByPowerOf2I) \
- V(ModI) \
- V(MulI) \
- V(MultiplyAddD) \
- V(NumberTagD) \
- V(NumberTagI) \
- V(NumberTagU) \
- V(NumberUntagD) \
- V(OsrEntry) \
- V(Parameter) \
- V(Power) \
- V(Prologue) \
- V(PushArgument) \
- V(Return) \
- V(SeqStringGetChar) \
- V(SeqStringSetChar) \
- V(ShiftI) \
- V(SmiTag) \
- V(SmiUntag) \
- V(StackCheck) \
- V(StoreCodeEntry) \
- V(StoreContextSlot) \
- V(StoreKeyed) \
- V(StoreNamedField) \
- V(StringAdd) \
- V(StringCharCodeAt) \
- V(StringCharFromCode) \
- V(StringCompareAndBranch) \
- V(SubI) \
- V(TaggedToI) \
- V(ThisFunction) \
- V(TransitionElementsKind) \
- V(TrapAllocationMemento) \
- V(Typeof) \
- V(TypeofIsAndBranch) \
- V(Uint32ToDouble) \
- V(UnknownOSRValue) \
- V(WrapReceiver)
-
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- Opcode opcode() const final { return LInstruction::k##type; } \
- void CompileToNative(LCodeGen* generator) final; \
- const char* Mnemonic() const final { return mnemonic; } \
- static L##type* cast(LInstruction* instr) { \
- DCHECK(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
- }
-
-
-#define DECLARE_HYDROGEN_ACCESSOR(type) \
- H##type* hydrogen() const { \
- return H##type::cast(hydrogen_value()); \
- }
-
-
-class LInstruction : public ZoneObject {
- public:
- LInstruction()
- : environment_(NULL),
- hydrogen_value_(NULL),
- bit_field_(IsCallBits::encode(false)) {
- }
-
- virtual ~LInstruction() {}
-
- virtual void CompileToNative(LCodeGen* generator) = 0;
- virtual const char* Mnemonic() const = 0;
- virtual void PrintTo(StringStream* stream);
- virtual void PrintDataTo(StringStream* stream);
- virtual void PrintOutputOperandTo(StringStream* stream);
-
- enum Opcode {
- // Declare a unique enum value for each instruction.
-#define DECLARE_OPCODE(type) k##type,
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
- kNumberOfInstructions
-#undef DECLARE_OPCODE
- };
-
- virtual Opcode opcode() const = 0;
-
- // Declare non-virtual type testers for all leaf IR classes.
-#define DECLARE_PREDICATE(type) \
- bool Is##type() const { return opcode() == k##type; }
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
-#undef DECLARE_PREDICATE
-
- // Declare virtual predicates for instructions that don't have
- // an opcode.
- virtual bool IsGap() const { return false; }
-
- virtual bool IsControl() const { return false; }
-
- // Try deleting this instruction if possible.
- virtual bool TryDelete() { return false; }
-
- void set_environment(LEnvironment* env) { environment_ = env; }
- LEnvironment* environment() const { return environment_; }
- bool HasEnvironment() const { return environment_ != NULL; }
-
- void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
- LPointerMap* pointer_map() const { return pointer_map_.get(); }
- bool HasPointerMap() const { return pointer_map_.is_set(); }
-
- void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
- HValue* hydrogen_value() const { return hydrogen_value_; }
-
- void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
- bool IsCall() const { return IsCallBits::decode(bit_field_); }
-
- void MarkAsSyntacticTailCall() {
- bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
- }
- bool IsSyntacticTailCall() const {
- return IsSyntacticTailCallBits::decode(bit_field_);
- }
-
- // Interface to the register allocator and iterators.
- bool ClobbersTemps() const { return IsCall(); }
- bool ClobbersRegisters() const { return IsCall(); }
- virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
- return IsCall();
- }
-
- // Interface to the register allocator and iterators.
- bool IsMarkedAsCall() const { return IsCall(); }
-
- virtual bool HasResult() const = 0;
- virtual LOperand* result() const = 0;
-
- LOperand* FirstInput() { return InputAt(0); }
- LOperand* Output() { return HasResult() ? result() : NULL; }
-
- virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
-
-#ifdef DEBUG
- void VerifyCall();
-#endif
-
- virtual int InputCount() = 0;
- virtual LOperand* InputAt(int i) = 0;
-
- private:
- // Iterator interface.
- friend class InputIterator;
-
- friend class TempIterator;
- virtual int TempCount() = 0;
- virtual LOperand* TempAt(int i) = 0;
-
- class IsCallBits: public BitField<bool, 0, 1> {};
- class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
- };
-
- LEnvironment* environment_;
- SetOncePointer<LPointerMap> pointer_map_;
- HValue* hydrogen_value_;
- int bit_field_;
-};
-
-
-// R = number of result operands (0 or 1).
-template<int R>
-class LTemplateResultInstruction : public LInstruction {
- public:
- // Allow 0 or 1 output operands.
- STATIC_ASSERT(R == 0 || R == 1);
- bool HasResult() const final { return R != 0 && result() != NULL; }
- void set_result(LOperand* operand) { results_[0] = operand; }
- LOperand* result() const override { return results_[0]; }
-
- protected:
- EmbeddedContainer<LOperand*, R> results_;
-};
-
-
-// R = number of result operands (0 or 1).
-// I = number of input operands.
-// T = number of temporary operands.
-template<int R, int I, int T>
-class LTemplateInstruction : public LTemplateResultInstruction<R> {
- protected:
- EmbeddedContainer<LOperand*, I> inputs_;
- EmbeddedContainer<LOperand*, T> temps_;
-
- private:
- // Iterator support.
- int InputCount() final { return I; }
- LOperand* InputAt(int i) final { return inputs_[i]; }
-
- int TempCount() final { return T; }
- LOperand* TempAt(int i) final { return temps_[i]; }
-};
-
-
-class LGap : public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGap(HBasicBlock* block)
- : block_(block) {
- parallel_moves_[BEFORE] = NULL;
- parallel_moves_[START] = NULL;
- parallel_moves_[END] = NULL;
- parallel_moves_[AFTER] = NULL;
- }
-
- // Can't use the DECLARE-macro here because of sub-classes.
- bool IsGap() const final { return true; }
- void PrintDataTo(StringStream* stream) override;
- static LGap* cast(LInstruction* instr) {
- DCHECK(instr->IsGap());
- return reinterpret_cast<LGap*>(instr);
- }
-
- bool IsRedundant() const;
-
- HBasicBlock* block() const { return block_; }
-
- enum InnerPosition {
- BEFORE,
- START,
- END,
- AFTER,
- FIRST_INNER_POSITION = BEFORE,
- LAST_INNER_POSITION = AFTER
- };
-
- LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
- if (parallel_moves_[pos] == NULL) {
- parallel_moves_[pos] = new(zone) LParallelMove(zone);
- }
- return parallel_moves_[pos];
- }
-
- LParallelMove* GetParallelMove(InnerPosition pos) {
- return parallel_moves_[pos];
- }
-
- private:
- LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
- HBasicBlock* block_;
-};
-
-
-class LInstructionGap final : public LGap {
- public:
- explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
-
- bool HasInterestingComment(LCodeGen* gen) const override {
- return !IsRedundant();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
-};
-
-
-class LGoto final : public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGoto(HBasicBlock* block) : block_(block) { }
-
- bool HasInterestingComment(LCodeGen* gen) const override;
- DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- void PrintDataTo(StringStream* stream) override;
- bool IsControl() const override { return true; }
-
- int block_id() const { return block_->block_id(); }
-
- private:
- HBasicBlock* block_;
-};
-
-
-class LPrologue final : public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue")
-};
-
-
-class LLazyBailout final : public LTemplateInstruction<0, 0, 0> {
- public:
- LLazyBailout() : gap_instructions_size_(0) { }
-
- DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
-
- void set_gap_instructions_size(int gap_instructions_size) {
- gap_instructions_size_ = gap_instructions_size;
- }
- int gap_instructions_size() { return gap_instructions_size_; }
-
- private:
- int gap_instructions_size_;
-};
-
-
-class LDummy final : public LTemplateInstruction<1, 0, 0> {
- public:
- LDummy() {}
- DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
-};
-
-
-class LDummyUse final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDummyUse(LOperand* value) {
- inputs_[0] = value;
- }
- DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
-};
-
-
-class LDeoptimize final : public LTemplateInstruction<0, 0, 0> {
- public:
- bool IsControl() const override { return true; }
- DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
- DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
-};
-
-
-class LLabel final : public LGap {
- public:
- explicit LLabel(HBasicBlock* block)
- : LGap(block), replacement_(NULL) { }
-
- bool HasInterestingComment(LCodeGen* gen) const override { return false; }
- DECLARE_CONCRETE_INSTRUCTION(Label, "label")
-
- void PrintDataTo(StringStream* stream) override;
-
- int block_id() const { return block()->block_id(); }
- bool is_loop_header() const { return block()->IsLoopHeader(); }
- bool is_osr_entry() const { return block()->is_osr_entry(); }
- Label* label() { return &label_; }
- LLabel* replacement() const { return replacement_; }
- void set_replacement(LLabel* label) { replacement_ = label; }
- bool HasReplacement() const { return replacement_ != NULL; }
-
- private:
- Label label_;
- LLabel* replacement_;
-};
-
-
-class LParameter final : public LTemplateInstruction<1, 0, 0> {
- public:
- bool HasInterestingComment(LCodeGen* gen) const override { return false; }
- DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
-};
-
-
-class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
- public:
- bool HasInterestingComment(LCodeGen* gen) const override { return false; }
- DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
-};
-
-
-template<int I, int T>
-class LControlInstruction : public LTemplateInstruction<0, I, T> {
- public:
- LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
-
- bool IsControl() const final { return true; }
-
- int SuccessorCount() { return hydrogen()->SuccessorCount(); }
- HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
-
- int TrueDestination(LChunk* chunk) {
- return chunk->LookupDestination(true_block_id());
- }
- int FalseDestination(LChunk* chunk) {
- return chunk->LookupDestination(false_block_id());
- }
-
- Label* TrueLabel(LChunk* chunk) {
- if (true_label_ == NULL) {
- true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
- }
- return true_label_;
- }
- Label* FalseLabel(LChunk* chunk) {
- if (false_label_ == NULL) {
- false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
- }
- return false_label_;
- }
-
- protected:
- int true_block_id() { return SuccessorAt(0)->block_id(); }
- int false_block_id() { return SuccessorAt(1)->block_id(); }
-
- private:
- HControlInstruction* hydrogen() {
- return HControlInstruction::cast(this->hydrogen_value());
- }
-
- Label* false_label_;
- Label* true_label_;
-};
-
-
-class LWrapReceiver final : public LTemplateInstruction<1, 2, 0> {
- public:
- LWrapReceiver(LOperand* receiver, LOperand* function) {
- inputs_[0] = receiver;
- inputs_[1] = function;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
- DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
-
- LOperand* receiver() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-};
-
-
-class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
- public:
- LApplyArguments(LOperand* function,
- LOperand* receiver,
- LOperand* length,
- LOperand* elements) {
- inputs_[0] = function;
- inputs_[1] = receiver;
- inputs_[2] = length;
- inputs_[3] = elements;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
- DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
-
- LOperand* function() { return inputs_[0]; }
- LOperand* receiver() { return inputs_[1]; }
- LOperand* length() { return inputs_[2]; }
- LOperand* elements() { return inputs_[3]; }
-};
-
-
-class LAccessArgumentsAt final : public LTemplateInstruction<1, 3, 0> {
- public:
- LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
- inputs_[0] = arguments;
- inputs_[1] = length;
- inputs_[2] = index;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
-
- LOperand* arguments() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LArgumentsLength final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LArgumentsLength(LOperand* elements) {
- inputs_[0] = elements;
- }
-
- LOperand* elements() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
-};
-
-
-class LArgumentsElements final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
- DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
-};
-
-
-class LModByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
- public:
- LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-
- private:
- int32_t divisor_;
-};
-
-
-class LModByConstI final : public LTemplateInstruction<1, 1, 0> {
- public:
- LModByConstI(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-
- private:
- int32_t divisor_;
-};
-
-
-class LModI final : public LTemplateInstruction<1, 2, 3> {
- public:
- LModI(LOperand* left,
- LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-};
-
-
-class LDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
- public:
- LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
-
- private:
- int32_t divisor_;
-};
-
-
-class LDivByConstI final : public LTemplateInstruction<1, 1, 0> {
- public:
- LDivByConstI(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
-
- private:
- int32_t divisor_;
-};
-
-
-class LDivI final : public LTemplateInstruction<1, 2, 1> {
- public:
- LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
- inputs_[0] = dividend;
- inputs_[1] = divisor;
- temps_[0] = temp;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- LOperand* divisor() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
- DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
-};
-
-
-class LFlooringDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
- public:
- LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
- "flooring-div-by-power-of-2-i")
- DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-
- private:
- int32_t divisor_;
-};
-
-
-class LFlooringDivByConstI final : public LTemplateInstruction<1, 1, 2> {
- public:
- LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- temps_[0] = temp;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
- DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-
- private:
- int32_t divisor_;
-};
-
-
-class LFlooringDivI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LFlooringDivI(LOperand* dividend, LOperand* divisor) {
- inputs_[0] = dividend;
- inputs_[1] = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- LOperand* divisor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
- DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-};
-
-
-class LMulI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LMulI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
- DECLARE_HYDROGEN_ACCESSOR(Mul)
-};
-
-
-// Instruction for computing multiplier * multiplicand + addend.
-class LMultiplyAddD final : public LTemplateInstruction<1, 3, 0> {
- public:
- LMultiplyAddD(LOperand* addend, LOperand* multiplier,
- LOperand* multiplicand) {
- inputs_[0] = addend;
- inputs_[1] = multiplier;
- inputs_[2] = multiplicand;
- }
-
- LOperand* addend() { return inputs_[0]; }
- LOperand* multiplier() { return inputs_[1]; }
- LOperand* multiplicand() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d")
-};
-
-
-class LDebugBreak final : public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
-};
-
-
-class LCompareNumericAndBranch final : public LControlInstruction<2, 0> {
- public:
- LCompareNumericAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
- "compare-numeric-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
-
- Token::Value op() const { return hydrogen()->token(); }
- bool is_double() const {
- return hydrogen()->representation().IsDouble();
- }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LMathFloor final : public LTemplateInstruction<1, 1, 1> {
- public:
- LMathFloor(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-
-class LMathRound final : public LTemplateInstruction<1, 1, 1> {
- public:
- LMathRound(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-
-class LMathFround final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathFround(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround")
-};
-
-
-class LMathAbs final : public LTemplateInstruction<1, 2, 0> {
- public:
- LMathAbs(LOperand* context, LOperand* value) {
- inputs_[1] = context;
- inputs_[0] = value;
- }
-
- LOperand* context() { return inputs_[1]; }
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-
-class LMathLog final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathLog(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
-};
-
-
-class LMathClz32 final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathClz32(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
-};
-
-class LMathCos final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathCos(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
-};
-
-class LMathSin final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathSin(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
-};
-
-class LMathExp final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathExp(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
-};
-
-
-class LMathSqrt final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathSqrt(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
-};
-
-
-class LMathPowHalf final : public LTemplateInstruction<1, 1, 1> {
- public:
- LMathPowHalf(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
-};
-
-
-class LCmpObjectEqAndBranch final : public LControlInstruction<2, 0> {
- public:
- LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
-};
-
-
-class LCmpHoleAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LCmpHoleAndBranch(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
-};
-
-
-class LIsStringAndBranch final : public LControlInstruction<1, 1> {
- public:
- LIsStringAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LIsSmiAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LIsSmiAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LIsUndetectableAndBranch final : public LControlInstruction<1, 1> {
- public:
- explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
- "is-undetectable-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LStringCompareAndBranch final : public LControlInstruction<3, 0> {
- public:
- LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
- "string-compare-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
-
- Token::Value op() const { return hydrogen()->token(); }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LHasInstanceTypeAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
- "has-instance-type-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-class LClassOfTestAndBranch final : public LControlInstruction<1, 1> {
- public:
- LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch, "class-of-test-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-class LCmpT final : public LTemplateInstruction<1, 3, 0> {
- public:
- LCmpT(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
- DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
-
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> {
- public:
- LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) {
- inputs_[0] = object;
- inputs_[1] = prototype;
- }
-
- LOperand* object() const { return inputs_[0]; }
- LOperand* prototype() const { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch,
- "has-in-prototype-chain-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch)
-};
-
-
-class LBoundsCheck final : public LTemplateInstruction<0, 2, 0> {
- public:
- LBoundsCheck(LOperand* index, LOperand* length) {
- inputs_[0] = index;
- inputs_[1] = length;
- }
-
- LOperand* index() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
- DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
-};
-
-
-class LBitI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LBitI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- Token::Value op() const { return hydrogen()->op(); }
-
- DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
- DECLARE_HYDROGEN_ACCESSOR(Bitwise)
-};
-
-
-class LShiftI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
- : op_(op), can_deopt_(can_deopt) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- bool can_deopt() const { return can_deopt_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
-
- private:
- Token::Value op_;
- bool can_deopt_;
-};
-
-
-class LSubI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LSubI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
- DECLARE_HYDROGEN_ACCESSOR(Sub)
-};
-
-
-class LConstantI final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- int32_t value() const { return hydrogen()->Integer32Value(); }
-};
-
-
-class LConstantS final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
-};
-
-
-class LConstantD final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- double value() const { return hydrogen()->DoubleValue(); }
- uint64_t bits() const { return hydrogen()->DoubleValueAsBits(); }
-};
-
-
-class LConstantE final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- ExternalReference value() const {
- return hydrogen()->ExternalReferenceValue();
- }
-};
-
-
-class LConstantT final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- Handle<Object> value(Isolate* isolate) const {
- return hydrogen()->handle(isolate);
- }
-};
-
-
-class LBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
- DECLARE_HYDROGEN_ACCESSOR(Branch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LCmpMapAndBranch final : public LControlInstruction<1, 1> {
- public:
- LCmpMapAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMap)
-
- Handle<Map> map() const { return hydrogen()->map().handle(); }
-};
-
-
-class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
- public:
- LSeqStringGetChar(LOperand* string, LOperand* index) {
- inputs_[0] = string;
- inputs_[1] = index;
- }
-
- LOperand* string() const { return inputs_[0]; }
- LOperand* index() const { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
-};
-
-
-class LSeqStringSetChar final : public LTemplateInstruction<1, 4, 0> {
- public:
- LSeqStringSetChar(LOperand* context,
- LOperand* string,
- LOperand* index,
- LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = string;
- inputs_[2] = index;
- inputs_[3] = value;
- }
-
- LOperand* string() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
- LOperand* value() { return inputs_[3]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
-};
-
-
-class LAddI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LAddI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
- DECLARE_HYDROGEN_ACCESSOR(Add)
-};
-
-
-class LMathMinMax final : public LTemplateInstruction<1, 2, 0> {
- public:
- LMathMinMax(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
- DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
-};
-
-
-class LPower final : public LTemplateInstruction<1, 2, 0> {
- public:
- LPower(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Power, "power")
- DECLARE_HYDROGEN_ACCESSOR(Power)
-};
-
-
-class LArithmeticD final : public LTemplateInstruction<1, 2, 0> {
- public:
- LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
- : op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- Opcode opcode() const override { return LInstruction::kArithmeticD; }
- void CompileToNative(LCodeGen* generator) override;
- const char* Mnemonic() const override;
-
- private:
- Token::Value op_;
-};
-
-
-class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
- public:
- LArithmeticT(Token::Value op,
- LOperand* context,
- LOperand* left,
- LOperand* right)
- : op_(op) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
- Token::Value op() const { return op_; }
-
- Opcode opcode() const final { return LInstruction::kArithmeticT; }
- void CompileToNative(LCodeGen* generator) override;
- const char* Mnemonic() const override;
-
- DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
-
- private:
- Token::Value op_;
-};
-
-
-class LReturn final : public LTemplateInstruction<0, 3, 0> {
- public:
- LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
- inputs_[0] = value;
- inputs_[1] = context;
- inputs_[2] = parameter_count;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- bool has_constant_parameter_count() {
- return parameter_count()->IsConstantOperand();
- }
- LConstantOperand* constant_parameter_count() {
- DCHECK(has_constant_parameter_count());
- return LConstantOperand::cast(parameter_count());
- }
- LOperand* parameter_count() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Return, "return")
-};
-
-
-class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedField(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
-};
-
-
-class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadFunctionPrototype(LOperand* function) {
- inputs_[0] = function;
- }
-
- LOperand* function() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
- DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
-};
-
-
-class LLoadRoot final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
- DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
-
- Heap::RootListIndex index() const { return hydrogen()->index(); }
-};
-
-
-class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
- public:
- LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
- inputs_[0] = elements;
- inputs_[1] = key;
- inputs_[2] = backing_store_owner;
- }
-
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* backing_store_owner() { return inputs_[2]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
- bool is_fixed_typed_array() const {
- return hydrogen()->is_fixed_typed_array();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
-
- void PrintDataTo(StringStream* stream) override;
- uint32_t base_offset() const { return hydrogen()->base_offset(); }
-};
-
-
-class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadContextSlot(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
-
- int slot_index() { return hydrogen()->slot_index(); }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LStoreContextSlot final : public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreContextSlot(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
-
- int slot_index() { return hydrogen()->slot_index(); }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LPushArgument final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LPushArgument(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
-};
-
-
-class LDrop final : public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LDrop(int count) : count_(count) { }
-
- int count() const { return count_; }
-
- DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
-
- private:
- int count_;
-};
-
-
-class LStoreCodeEntry final : public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreCodeEntry(LOperand* function, LOperand* code_object) {
- inputs_[0] = function;
- inputs_[1] = code_object;
- }
-
- LOperand* function() { return inputs_[0]; }
- LOperand* code_object() { return inputs_[1]; }
-
- void PrintDataTo(StringStream* stream) override;
-
- DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
- DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
-};
-
-
-class LInnerAllocatedObject final : public LTemplateInstruction<1, 2, 0> {
- public:
- LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
- inputs_[0] = base_object;
- inputs_[1] = offset;
- }
-
- LOperand* base_object() const { return inputs_[0]; }
- LOperand* offset() const { return inputs_[1]; }
-
- void PrintDataTo(StringStream* stream) override;
-
- DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
-};
-
-
-class LThisFunction final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
- DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
-};
-
-
-class LContext final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Context, "context")
- DECLARE_HYDROGEN_ACCESSOR(Context)
-};
-
-
-class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LDeclareGlobals(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
- DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
-};
-
-
-class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
- public:
- LCallWithDescriptor(CallInterfaceDescriptor descriptor,
- const ZoneList<LOperand*>& operands, Zone* zone)
- : descriptor_(descriptor),
- inputs_(descriptor.GetRegisterParameterCount() +
- kImplicitRegisterParameterCount,
- zone) {
- DCHECK(descriptor.GetRegisterParameterCount() +
- kImplicitRegisterParameterCount ==
- operands.length());
- inputs_.AddAll(operands, zone);
- }
-
- LOperand* target() const { return inputs_[0]; }
-
- const CallInterfaceDescriptor descriptor() { return descriptor_; }
-
- DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
-
- // The target and context are passed as implicit parameters that are not
- // explicitly listed in the descriptor.
- static const int kImplicitRegisterParameterCount = 2;
-
- private:
- DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-
- CallInterfaceDescriptor descriptor_;
- ZoneList<LOperand*> inputs_;
-
- // Iterator support.
- int InputCount() final { return inputs_.length(); }
- LOperand* InputAt(int i) final { return inputs_[i]; }
-
- int TempCount() final { return 0; }
- LOperand* TempAt(int i) final { return NULL; }
-};
-
-
-class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
- public:
- LInvokeFunction(LOperand* context, LOperand* function) {
- inputs_[0] = context;
- inputs_[1] = function;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
- DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallNewArray(LOperand* context, LOperand* constructor) {
- inputs_[0] = context;
- inputs_[1] = constructor;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* constructor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
- DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallRuntime final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallRuntime(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
- DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
-
- bool ClobbersDoubleRegisters(Isolate* isolate) const override {
- return save_doubles() == kDontSaveFPRegs;
- }
-
- const Runtime::Function* function() const { return hydrogen()->function(); }
- int arity() const { return hydrogen()->argument_count(); }
- SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
-};
-
-
-class LInteger32ToDouble final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInteger32ToDouble(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
-};
-
-
-class LUint32ToDouble final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LUint32ToDouble(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
-};
-
-
-class LNumberTagI final : public LTemplateInstruction<1, 1, 2> {
- public:
- LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
-};
-
-
-class LNumberTagU final : public LTemplateInstruction<1, 1, 2> {
- public:
- LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
-};
-
-
-class LNumberTagD final : public LTemplateInstruction<1, 1, 2> {
- public:
- LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LDoubleToSmi final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDoubleToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDoubleToI(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-// Truncating conversion from a tagged value to an int32.
-class LTaggedToI final : public LTemplateInstruction<1, 1, 2> {
- public:
- LTaggedToI(LOperand* value,
- LOperand* temp,
- LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-class LSmiTag final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LSmiTag(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LNumberUntagD final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberUntagD(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-
- bool truncating() { return hydrogen()->CanTruncateToNumber(); }
-};
-
-
-class LSmiUntag final : public LTemplateInstruction<1, 1, 0> {
- public:
- LSmiUntag(LOperand* value, bool needs_check)
- : needs_check_(needs_check) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
- bool needs_check() const { return needs_check_; }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
-
- private:
- bool needs_check_;
-};
-
-
-class LStoreNamedField final : public LTemplateInstruction<0, 2, 1> {
- public:
- LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
- inputs_[0] = object;
- inputs_[1] = value;
- temps_[0] = temp;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
-
- void PrintDataTo(StringStream* stream) override;
-
- Representation representation() const {
- return hydrogen()->field_representation();
- }
-};
-
-
-class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
- public:
- LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
- LOperand* backing_store_owner) {
- inputs_[0] = object;
- inputs_[1] = key;
- inputs_[2] = value;
- inputs_[3] = backing_store_owner;
- }
-
- bool is_fixed_typed_array() const {
- return hydrogen()->is_fixed_typed_array();
- }
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- LOperand* backing_store_owner() { return inputs_[3]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
-
- void PrintDataTo(StringStream* stream) override;
- bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
- uint32_t base_offset() const { return hydrogen()->base_offset(); }
-};
-
-
-class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 1> {
- public:
- LTransitionElementsKind(LOperand* object,
- LOperand* context,
- LOperand* new_map_temp) {
- inputs_[0] = object;
- inputs_[1] = context;
- temps_[0] = new_map_temp;
- }
-
- LOperand* context() { return inputs_[1]; }
- LOperand* object() { return inputs_[0]; }
- LOperand* new_map_temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
- "transition-elements-kind")
- DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
-
- void PrintDataTo(StringStream* stream) override;
-
- Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
- Handle<Map> transitioned_map() {
- return hydrogen()->transitioned_map().handle();
- }
- ElementsKind from_kind() { return hydrogen()->from_kind(); }
- ElementsKind to_kind() { return hydrogen()->to_kind(); }
-};
-
-
-class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 1> {
- public:
- LTrapAllocationMemento(LOperand* object,
- LOperand* temp) {
- inputs_[0] = object;
- temps_[0] = temp;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
- "trap-allocation-memento")
-};
-
-
-class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
- public:
- LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
- LOperand* key, LOperand* current_capacity) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = elements;
- inputs_[3] = key;
- inputs_[4] = current_capacity;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* elements() { return inputs_[2]; }
- LOperand* key() { return inputs_[3]; }
- LOperand* current_capacity() { return inputs_[4]; }
-
- bool ClobbersDoubleRegisters(Isolate* isolate) const override { return true; }
-
- DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
- DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
-};
-
-
-class LStringAdd final : public LTemplateInstruction<1, 3, 0> {
- public:
- LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
- DECLARE_HYDROGEN_ACCESSOR(StringAdd)
-};
-
-
-class LStringCharCodeAt final : public LTemplateInstruction<1, 3, 0> {
- public:
- LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
- inputs_[0] = context;
- inputs_[1] = string;
- inputs_[2] = index;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* string() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
- DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
-};
-
-
-class LStringCharFromCode final : public LTemplateInstruction<1, 2, 0> {
- public:
- explicit LStringCharFromCode(LOperand* context, LOperand* char_code) {
- inputs_[0] = context;
- inputs_[1] = char_code;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* char_code() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
- DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
-};
-
-
-class LCheckValue final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckValue(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
- DECLARE_HYDROGEN_ACCESSOR(CheckValue)
-};
-
-
-class LCheckArrayBufferNotNeutered final
- : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckArrayBufferNotNeutered(LOperand* view) { inputs_[0] = view; }
-
- LOperand* view() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckArrayBufferNotNeutered,
- "check-array-buffer-not-neutered")
- DECLARE_HYDROGEN_ACCESSOR(CheckArrayBufferNotNeutered)
-};
-
-
-class LCheckInstanceType final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckInstanceType(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
- DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
-};
-
-
-class LCheckMaps final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckMaps(LOperand* value = NULL) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
-};
-
-
-class LCheckSmi final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCheckSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
-};
-
-
-class LCheckNonSmi final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckNonSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
- DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
-};
-
-
-class LClampDToUint8 final : public LTemplateInstruction<1, 1, 1> {
- public:
- LClampDToUint8(LOperand* unclamped, LOperand* temp) {
- inputs_[0] = unclamped;
- temps_[0] = temp;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
-};
-
-
-class LClampIToUint8 final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LClampIToUint8(LOperand* unclamped) {
- inputs_[0] = unclamped;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
-};
-
-
-class LClampTToUint8 final : public LTemplateInstruction<1, 1, 1> {
- public:
- LClampTToUint8(LOperand* unclamped, LOperand* temp) {
- inputs_[0] = unclamped;
- temps_[0] = temp;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
-};
-
-
-class LAllocate final : public LTemplateInstruction<1, 2, 2> {
- public:
- LAllocate(LOperand* context,
- LOperand* size,
- LOperand* temp1,
- LOperand* temp2) {
- inputs_[0] = context;
- inputs_[1] = size;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* size() { return inputs_[1]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
- DECLARE_HYDROGEN_ACCESSOR(Allocate)
-};
-
-class LFastAllocate final : public LTemplateInstruction<1, 1, 2> {
- public:
- LFastAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = size;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* size() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
- DECLARE_HYDROGEN_ACCESSOR(Allocate)
-};
-
-class LTypeof final : public LTemplateInstruction<1, 2, 0> {
- public:
- LTypeof(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
-};
-
-
-class LTypeofIsAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LTypeofIsAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
-
- Handle<String> type_literal() { return hydrogen()->type_literal(); }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LOsrEntry final : public LTemplateInstruction<0, 0, 0> {
- public:
- LOsrEntry() {}
-
- bool HasInterestingComment(LCodeGen* gen) const override { return false; }
- DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
-};
-
-
-class LStackCheck final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LStackCheck(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
- DECLARE_HYDROGEN_ACCESSOR(StackCheck)
-
- Label* done_label() { return &done_label_; }
-
- private:
- Label done_label_;
-};
-
-
-class LForInPrepareMap final : public LTemplateInstruction<1, 2, 0> {
- public:
- LForInPrepareMap(LOperand* context, LOperand* object) {
- inputs_[0] = context;
- inputs_[1] = object;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
-};
-
-
-class LForInCacheArray final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LForInCacheArray(LOperand* map) {
- inputs_[0] = map;
- }
-
- LOperand* map() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
-
- int idx() {
- return HForInCacheArray::cast(this->hydrogen_value())->idx();
- }
-};
-
-
-class LCheckMapValue final : public LTemplateInstruction<0, 2, 0> {
- public:
- LCheckMapValue(LOperand* value, LOperand* map) {
- inputs_[0] = value;
- inputs_[1] = map;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* map() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
-};
-
-
-class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadFieldByIndex(LOperand* object, LOperand* index) {
- inputs_[0] = object;
- inputs_[1] = index;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
-};
-
-
-class LChunkBuilder;
-class LPlatformChunk final : public LChunk {
- public:
- LPlatformChunk(CompilationInfo* info, HGraph* graph)
- : LChunk(info, graph) { }
-
- int GetNextSpillIndex(RegisterKind kind);
- LOperand* GetNextSpillSlot(RegisterKind kind);
-};
-
-
-class LChunkBuilder final : public LChunkBuilderBase {
- public:
- LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : LChunkBuilderBase(info, graph),
- current_instruction_(NULL),
- current_block_(NULL),
- next_block_(NULL),
- allocator_(allocator) {}
-
- // Build the sequence for the graph.
- LPlatformChunk* Build();
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
- HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
-
- static bool HasMagicNumberForDivisor(int32_t divisor);
-
- LInstruction* DoMathFloor(HUnaryMathOperation* instr);
- LInstruction* DoMathRound(HUnaryMathOperation* instr);
- LInstruction* DoMathFround(HUnaryMathOperation* instr);
- LInstruction* DoMathAbs(HUnaryMathOperation* instr);
- LInstruction* DoMathLog(HUnaryMathOperation* instr);
- LInstruction* DoMathCos(HUnaryMathOperation* instr);
- LInstruction* DoMathSin(HUnaryMathOperation* instr);
- LInstruction* DoMathExp(HUnaryMathOperation* instr);
- LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
- LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
- LInstruction* DoMathClz32(HUnaryMathOperation* instr);
- LInstruction* DoDivByPowerOf2I(HDiv* instr);
- LInstruction* DoDivByConstI(HDiv* instr);
- LInstruction* DoDivI(HDiv* instr);
- LInstruction* DoModByPowerOf2I(HMod* instr);
- LInstruction* DoModByConstI(HMod* instr);
- LInstruction* DoModI(HMod* instr);
- LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
- LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
- LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
-
- private:
- // Methods for getting operands for Use / Define / Temp.
- LUnallocated* ToUnallocated(Register reg);
- LUnallocated* ToUnallocated(DoubleRegister reg);
-
- // Methods for setting up define-use relationships.
- MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
- MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
- MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
- DoubleRegister fixed_register);
-
- // A value that is guaranteed to be allocated to a register.
- // Operand created by UseRegister is guaranteed to be live until the end of
- // instruction. This means that register allocator will not reuse it's
- // register for any other operand inside instruction.
- // Operand created by UseRegisterAtStart is guaranteed to be live only at
- // instruction start. Register allocator is free to assign the same register
- // to some other operand used inside instruction (i.e. temporary or
- // output).
- MUST_USE_RESULT LOperand* UseRegister(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
-
- // An input operand in a register that may be trashed.
- MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
-
- // An input operand in a register or stack slot.
- MUST_USE_RESULT LOperand* Use(HValue* value);
- MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
-
- // An input operand in a register, stack slot or a constant operand.
- MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
-
- // An input operand in a register or a constant operand.
- MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
-
- // An input operand in a constant operand.
- MUST_USE_RESULT LOperand* UseConstant(HValue* value);
-
- // An input operand in register, stack slot or a constant operand.
- // Will not be moved to a register even if one is freely available.
- MUST_USE_RESULT LOperand* UseAny(HValue* value) override;
-
- // Temporary operand that must be in a register.
- MUST_USE_RESULT LUnallocated* TempRegister();
- MUST_USE_RESULT LUnallocated* TempDoubleRegister();
- MUST_USE_RESULT LOperand* FixedTemp(Register reg);
- MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
-
- // Methods for setting up define-use relationships.
- // Return the same instruction that they are passed.
- LInstruction* Define(LTemplateResultInstruction<1>* instr,
- LUnallocated* result);
- LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
- LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
- int index);
- LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
- LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
- Register reg);
- LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
- DoubleRegister reg);
- LInstruction* AssignEnvironment(LInstruction* instr);
- LInstruction* AssignPointerMap(LInstruction* instr);
-
- enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
-
- // By default we assume that instruction sequences generated for calls
- // cannot deoptimize eagerly and we do not attach environment to this
- // instruction.
- LInstruction* MarkAsCall(
- LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
-
- void VisitInstruction(HInstruction* current);
- void AddInstruction(LInstruction* instr, HInstruction* current);
-
- void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
- LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
- LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
- LInstruction* DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr);
- LInstruction* DoArithmeticT(Token::Value op,
- HBinaryOperation* instr);
-
- HInstruction* current_instruction_;
- HBasicBlock* current_block_;
- HBasicBlock* next_block_;
- LAllocator* allocator_;
-
- DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
-};
-
-#undef DECLARE_HYDROGEN_ACCESSOR
-#undef DECLARE_CONCRETE_INSTRUCTION
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_MIPS_LITHIUM_MIPS_H_
diff --git a/deps/v8/src/crankshaft/mips64/OWNERS b/deps/v8/src/crankshaft/mips64/OWNERS
deleted file mode 100644
index 3f8fbfc7c8..0000000000
--- a/deps/v8/src/crankshaft/mips64/OWNERS
+++ /dev/null
@@ -1,3 +0,0 @@
-ivica.bogosavljevic@imgtec.com
-Miran.Karic@imgtec.com
-dusan.simicic@imgtec.com
diff --git a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
deleted file mode 100644
index 38d04cfd39..0000000000
--- a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
+++ /dev/null
@@ -1,5609 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/mips64/lithium-codegen-mips64.h"
-
-#include "src/builtins/builtins-constructor.h"
-#include "src/code-factory.h"
-#include "src/code-stubs.h"
-#include "src/crankshaft/hydrogen-osr.h"
-#include "src/crankshaft/mips64/lithium-gap-resolver-mips64.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-class SafepointGenerator final : public CallWrapper {
- public:
- SafepointGenerator(LCodeGen* codegen,
- LPointerMap* pointers,
- Safepoint::DeoptMode mode)
- : codegen_(codegen),
- pointers_(pointers),
- deopt_mode_(mode) { }
- virtual ~SafepointGenerator() {}
-
- void BeforeCall(int call_size) const override {}
-
- void AfterCall() const override {
- codegen_->RecordSafepoint(pointers_, deopt_mode_);
- }
-
- private:
- LCodeGen* codegen_;
- LPointerMap* pointers_;
- Safepoint::DeoptMode deopt_mode_;
-};
-
-LCodeGen::PushSafepointRegistersScope::PushSafepointRegistersScope(
- LCodeGen* codegen)
- : codegen_(codegen) {
- DCHECK(codegen_->info()->is_calling());
- DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
- codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
-
- StoreRegistersStateStub stub(codegen_->isolate());
- codegen_->masm_->push(ra);
- codegen_->masm_->CallStub(&stub);
-}
-
-LCodeGen::PushSafepointRegistersScope::~PushSafepointRegistersScope() {
- DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
- RestoreRegistersStateStub stub(codegen_->isolate());
- codegen_->masm_->push(ra);
- codegen_->masm_->CallStub(&stub);
- codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
-}
-
-#define __ masm()->
-
-bool LCodeGen::GenerateCode() {
- LPhase phase("Z_Code generation", chunk());
- DCHECK(is_unused());
- status_ = GENERATING;
-
- // Open a frame scope to indicate that there is a frame on the stack. The
- // NONE indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done in GeneratePrologue).
- FrameScope frame_scope(masm_, StackFrame::NONE);
-
- return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
- GenerateJumpTable() && GenerateSafepointTable();
-}
-
-
-void LCodeGen::FinishCode(Handle<Code> code) {
- DCHECK(is_done());
- code->set_stack_slots(GetTotalFrameSlotCount());
- code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- PopulateDeoptimizationData(code);
-}
-
-
-void LCodeGen::SaveCallerDoubles() {
- DCHECK(info()->saves_caller_doubles());
- DCHECK(NeedsEagerFrame());
- Comment(";;; Save clobbered callee double registers");
- int count = 0;
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- while (!save_iterator.Done()) {
- __ Sdc1(DoubleRegister::from_code(save_iterator.Current()),
- MemOperand(sp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
-}
-
-
-void LCodeGen::RestoreCallerDoubles() {
- DCHECK(info()->saves_caller_doubles());
- DCHECK(NeedsEagerFrame());
- Comment(";;; Restore clobbered callee double registers");
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- int count = 0;
- while (!save_iterator.Done()) {
- __ Ldc1(DoubleRegister::from_code(save_iterator.Current()),
- MemOperand(sp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
-}
-
-
-bool LCodeGen::GeneratePrologue() {
- DCHECK(is_generating());
-
- if (info()->IsOptimizing()) {
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
- // a1: Callee's JS function.
- // cp: Callee's context.
- // fp: Caller's frame pointer.
- // lr: Caller's pc.
- }
-
- info()->set_prologue_offset(masm_->pc_offset());
- if (NeedsEagerFrame()) {
- if (info()->IsStub()) {
- __ StubPrologue(StackFrame::STUB);
- } else {
- __ Prologue(info()->GeneratePreagedPrologue());
- }
- frame_is_built_ = true;
- }
-
- // Reserve space for the stack slots needed by the code.
- int slots = GetStackSlotCount();
- if (slots > 0) {
- if (FLAG_debug_code) {
- __ Dsubu(sp, sp, Operand(slots * kPointerSize));
- __ Push(a0, a1);
- __ Daddu(a0, sp, Operand(slots * kPointerSize));
- __ li(a1, Operand(kSlotsZapValue));
- Label loop;
- __ bind(&loop);
- __ Dsubu(a0, a0, Operand(kPointerSize));
- __ Sd(a1, MemOperand(a0, 2 * kPointerSize));
- __ Branch(&loop, ne, a0, Operand(sp));
- __ Pop(a0, a1);
- } else {
- __ Dsubu(sp, sp, Operand(slots * kPointerSize));
- }
- }
-
- if (info()->saves_caller_doubles()) {
- SaveCallerDoubles();
- }
- return !is_aborted();
-}
-
-
-void LCodeGen::DoPrologue(LPrologue* instr) {
- Comment(";;; Prologue begin");
-
- // Possibly allocate a local context.
- if (info()->scope()->NeedsContext()) {
- Comment(";;; Allocate local context");
- bool need_write_barrier = true;
- // Argument to NewContext is the function, which is in a1.
- int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
- if (info()->scope()->is_script_scope()) {
- __ push(a1);
- __ Push(info()->scope()->scope_info());
- __ CallRuntime(Runtime::kNewScriptContext);
- deopt_mode = Safepoint::kLazyDeopt;
- } else {
- if (slots <= ConstructorBuiltins::MaximumFunctionContextSlots()) {
- Callable callable = CodeFactory::FastNewFunctionContext(
- isolate(), info()->scope()->scope_type());
- __ li(FastNewFunctionContextDescriptor::SlotsRegister(),
- Operand(slots));
- __ Call(callable.code(), RelocInfo::CODE_TARGET);
- // Result of the FastNewFunctionContext builtin is always in new space.
- need_write_barrier = false;
- } else {
- __ push(a1);
- __ Push(Smi::FromInt(info()->scope()->scope_type()));
- __ CallRuntime(Runtime::kNewFunctionContext);
- }
- }
- RecordSafepoint(deopt_mode);
-
- // Context is returned in both v0. It replaces the context passed to us.
- // It's saved in the stack and kept live in cp.
- __ mov(cp, v0);
- __ Sd(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Copy any necessary parameters into the context.
- int num_parameters = info()->scope()->num_parameters();
- int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0;
- for (int i = first_parameter; i < num_parameters; i++) {
- Variable* var = (i == -1) ? info()->scope()->receiver()
- : info()->scope()->parameter(i);
- if (var->IsContextSlot()) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ Ld(a0, MemOperand(fp, parameter_offset));
- // Store it in the context.
- MemOperand target = ContextMemOperand(cp, var->index());
- __ Sd(a0, target);
- // Update the write barrier. This clobbers a3 and a0.
- if (need_write_barrier) {
- __ RecordWriteContextSlot(
- cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
- } else if (FLAG_debug_code) {
- Label done;
- __ JumpIfInNewSpace(cp, a0, &done);
- __ Abort(kExpectedNewSpaceObject);
- __ bind(&done);
- }
- }
- }
- Comment(";;; End allocate local context");
- }
-
- Comment(";;; Prologue end");
-}
-
-
-void LCodeGen::GenerateOsrPrologue() {
- // Generate the OSR entry prologue at the first unknown OSR value, or if there
- // are none, at the OSR entrypoint instruction.
- if (osr_pc_offset_ >= 0) return;
-
- osr_pc_offset_ = masm()->pc_offset();
-
- // Adjust the frame size, subsuming the unoptimized frame into the
- // optimized frame.
- int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
- DCHECK(slots >= 0);
- __ Dsubu(sp, sp, Operand(slots * kPointerSize));
-}
-
-
-void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
- if (instr->IsCall()) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- }
- if (!instr->IsLazyBailout() && !instr->IsGap()) {
- safepoints_.BumpLastLazySafepointIndex();
- }
-}
-
-
-bool LCodeGen::GenerateDeferredCode() {
- DCHECK(is_generating());
- if (deferred_.length() > 0) {
- for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
- LDeferredCode* code = deferred_[i];
-
- HValue* value =
- instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(value->position());
-
- Comment(";;; <@%d,#%d> "
- "-------------------- Deferred %s --------------------",
- code->instruction_index(),
- code->instr()->hydrogen_value()->id(),
- code->instr()->Mnemonic());
- __ bind(code->entry());
- if (NeedsDeferredFrame()) {
- Comment(";;; Build frame");
- DCHECK(!frame_is_built_);
- DCHECK(info()->IsStub());
- frame_is_built_ = true;
- __ li(scratch0(), Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
- __ PushCommonFrame(scratch0());
- Comment(";;; Deferred code");
- }
- code->Generate();
- if (NeedsDeferredFrame()) {
- Comment(";;; Destroy frame");
- DCHECK(frame_is_built_);
- __ PopCommonFrame(scratch0());
- frame_is_built_ = false;
- }
- __ jmp(code->exit());
- }
- }
- // Deferred code is the last part of the instruction sequence. Mark
- // the generated code as done unless we bailed out.
- if (!is_aborted()) status_ = DONE;
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateJumpTable() {
- if (jump_table_.length() > 0) {
- Comment(";;; -------------------- Jump table --------------------");
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
- Label table_start, call_deopt_entry;
-
- __ bind(&table_start);
- Label needs_frame;
- Address base = jump_table_[0]->address;
- for (int i = 0; i < jump_table_.length(); i++) {
- Deoptimizer::JumpTableEntry* table_entry = jump_table_[i];
- __ bind(&table_entry->label);
- Address entry = table_entry->address;
- DeoptComment(table_entry->deopt_info);
-
- // Second-level deopt table entries are contiguous and small, so instead
- // of loading the full, absolute address of each one, load the base
- // address and add an immediate offset.
- if (is_int16(entry - base)) {
- if (table_entry->needs_frame) {
- DCHECK(!info()->saves_caller_doubles());
- Comment(";;; call deopt with frame");
- __ PushCommonFrame();
- __ BranchAndLink(&needs_frame, USE_DELAY_SLOT);
- __ li(t9, Operand(entry - base));
- } else {
- __ BranchAndLink(&call_deopt_entry, USE_DELAY_SLOT);
- __ li(t9, Operand(entry - base));
- }
-
- } else {
- __ li(t9, Operand(entry - base));
- if (table_entry->needs_frame) {
- DCHECK(!info()->saves_caller_doubles());
- Comment(";;; call deopt with frame");
- __ PushCommonFrame();
- __ BranchAndLink(&needs_frame);
- } else {
- __ BranchAndLink(&call_deopt_entry);
- }
- }
- }
- if (needs_frame.is_linked()) {
- __ bind(&needs_frame);
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- __ li(at, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
- __ push(at);
- DCHECK(info()->IsStub());
- }
-
- Comment(";;; call deopt");
- __ bind(&call_deopt_entry);
-
- if (info()->saves_caller_doubles()) {
- DCHECK(info()->IsStub());
- RestoreCallerDoubles();
- }
-
- __ li(at,
- Operand(reinterpret_cast<int64_t>(base), RelocInfo::RUNTIME_ENTRY));
- __ Daddu(t9, t9, Operand(at));
- __ Jump(t9);
- }
- // The deoptimization jump table is the last part of the instruction
- // sequence. Mark the generated code as done unless we bailed out.
- if (!is_aborted()) status_ = DONE;
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateSafepointTable() {
- DCHECK(is_done());
- safepoints_.Emit(masm(), GetTotalFrameSlotCount());
- return !is_aborted();
-}
-
-
-Register LCodeGen::ToRegister(int index) const {
- return Register::from_code(index);
-}
-
-
-DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
- return DoubleRegister::from_code(index);
-}
-
-
-Register LCodeGen::ToRegister(LOperand* op) const {
- DCHECK(op->IsRegister());
- return ToRegister(op->index());
-}
-
-
-Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
- if (op->IsRegister()) {
- return ToRegister(op->index());
- } else if (op->IsConstantOperand()) {
- LConstantOperand* const_op = LConstantOperand::cast(op);
- HConstant* constant = chunk_->LookupConstant(const_op);
- Handle<Object> literal = constant->handle(isolate());
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
- AllowDeferredHandleDereference get_number;
- DCHECK(literal->IsNumber());
- __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
- } else if (r.IsSmi()) {
- DCHECK(constant->HasSmiValue());
- __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value())));
- } else if (r.IsDouble()) {
- Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
- } else {
- DCHECK(r.IsSmiOrTagged());
- __ li(scratch, literal);
- }
- return scratch;
- } else if (op->IsStackSlot()) {
- __ Ld(scratch, ToMemOperand(op));
- return scratch;
- }
- UNREACHABLE();
- return scratch;
-}
-
-
-DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
- DCHECK(op->IsDoubleRegister());
- return ToDoubleRegister(op->index());
-}
-
-
-DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
- FloatRegister flt_scratch,
- DoubleRegister dbl_scratch) {
- if (op->IsDoubleRegister()) {
- return ToDoubleRegister(op->index());
- } else if (op->IsConstantOperand()) {
- LConstantOperand* const_op = LConstantOperand::cast(op);
- HConstant* constant = chunk_->LookupConstant(const_op);
- Handle<Object> literal = constant->handle(isolate());
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
- DCHECK(literal->IsNumber());
- __ li(at, Operand(static_cast<int32_t>(literal->Number())));
- __ mtc1(at, flt_scratch);
- __ cvt_d_w(dbl_scratch, flt_scratch);
- return dbl_scratch;
- } else if (r.IsDouble()) {
- Abort(kUnsupportedDoubleImmediate);
- } else if (r.IsTagged()) {
- Abort(kUnsupportedTaggedImmediate);
- }
- } else if (op->IsStackSlot()) {
- MemOperand mem_op = ToMemOperand(op);
- __ Ldc1(dbl_scratch, mem_op);
- return dbl_scratch;
- }
- UNREACHABLE();
- return dbl_scratch;
-}
-
-
-Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
- return constant->handle(isolate());
-}
-
-
-bool LCodeGen::IsInteger32(LConstantOperand* op) const {
- return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
-}
-
-
-bool LCodeGen::IsSmi(LConstantOperand* op) const {
- return chunk_->LookupLiteralRepresentation(op).IsSmi();
-}
-
-
-int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
- // return ToRepresentation(op, Representation::Integer32());
- HConstant* constant = chunk_->LookupConstant(op);
- return constant->Integer32Value();
-}
-
-
-int64_t LCodeGen::ToRepresentation_donotuse(LConstantOperand* op,
- const Representation& r) const {
- HConstant* constant = chunk_->LookupConstant(op);
- int32_t value = constant->Integer32Value();
- if (r.IsInteger32()) return value;
- DCHECK(r.IsSmiOrTagged());
- return reinterpret_cast<int64_t>(Smi::FromInt(value));
-}
-
-
-Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- return Smi::FromInt(constant->Integer32Value());
-}
-
-
-double LCodeGen::ToDouble(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- DCHECK(constant->HasDoubleValue());
- return constant->DoubleValue();
-}
-
-
-Operand LCodeGen::ToOperand(LOperand* op) {
- if (op->IsConstantOperand()) {
- LConstantOperand* const_op = LConstantOperand::cast(op);
- HConstant* constant = chunk()->LookupConstant(const_op);
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsSmi()) {
- DCHECK(constant->HasSmiValue());
- return Operand(Smi::FromInt(constant->Integer32Value()));
- } else if (r.IsInteger32()) {
- DCHECK(constant->HasInteger32Value());
- return Operand(constant->Integer32Value());
- } else if (r.IsDouble()) {
- Abort(kToOperandUnsupportedDoubleImmediate);
- }
- DCHECK(r.IsTagged());
- return Operand(constant->handle(isolate()));
- } else if (op->IsRegister()) {
- return Operand(ToRegister(op));
- } else if (op->IsDoubleRegister()) {
- Abort(kToOperandIsDoubleRegisterUnimplemented);
- return Operand((int64_t)0);
- }
- // Stack slots not implemented, use ToMemOperand instead.
- UNREACHABLE();
- return Operand((int64_t)0);
-}
-
-
-static int ArgumentsOffsetWithoutFrame(int index) {
- DCHECK(index < 0);
- return -(index + 1) * kPointerSize;
-}
-
-
-MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
- DCHECK(!op->IsRegister());
- DCHECK(!op->IsDoubleRegister());
- DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- if (NeedsEagerFrame()) {
- return MemOperand(fp, FrameSlotToFPOffset(op->index()));
- } else {
- // Retrieve parameter without eager stack-frame relative to the
- // stack-pointer.
- return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
- }
-}
-
-
-MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
- DCHECK(op->IsDoubleStackSlot());
- if (NeedsEagerFrame()) {
- // return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize);
- return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kIntSize);
- } else {
- // Retrieve parameter without eager stack-frame relative to the
- // stack-pointer.
- // return MemOperand(
- // sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
- return MemOperand(
- sp, ArgumentsOffsetWithoutFrame(op->index()) + kIntSize);
- }
-}
-
-
-void LCodeGen::WriteTranslation(LEnvironment* environment,
- Translation* translation) {
- if (environment == NULL) return;
-
- // The translation includes one command per value in the environment.
- int translation_size = environment->translation_size();
-
- WriteTranslation(environment->outer(), translation);
- WriteTranslationFrame(environment, translation);
-
- int object_index = 0;
- int dematerialized_index = 0;
- for (int i = 0; i < translation_size; ++i) {
- LOperand* value = environment->values()->at(i);
- AddToTranslation(
- environment, translation, value, environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
- }
-}
-
-
-void LCodeGen::AddToTranslation(LEnvironment* environment,
- Translation* translation,
- LOperand* op,
- bool is_tagged,
- bool is_uint32,
- int* object_index_pointer,
- int* dematerialized_index_pointer) {
- if (op == LEnvironment::materialization_marker()) {
- int object_index = (*object_index_pointer)++;
- if (environment->ObjectIsDuplicateAt(object_index)) {
- int dupe_of = environment->ObjectDuplicateOfAt(object_index);
- translation->DuplicateObject(dupe_of);
- return;
- }
- int object_length = environment->ObjectLengthAt(object_index);
- if (environment->ObjectIsArgumentsAt(object_index)) {
- translation->BeginArgumentsObject(object_length);
- } else {
- translation->BeginCapturedObject(object_length);
- }
- int dematerialized_index = *dematerialized_index_pointer;
- int env_offset = environment->translation_size() + dematerialized_index;
- *dematerialized_index_pointer += object_length;
- for (int i = 0; i < object_length; ++i) {
- LOperand* value = environment->values()->at(env_offset + i);
- AddToTranslation(environment,
- translation,
- value,
- environment->HasTaggedValueAt(env_offset + i),
- environment->HasUint32ValueAt(env_offset + i),
- object_index_pointer,
- dematerialized_index_pointer);
- }
- return;
- }
-
- if (op->IsStackSlot()) {
- int index = op->index();
- if (is_tagged) {
- translation->StoreStackSlot(index);
- } else if (is_uint32) {
- translation->StoreUint32StackSlot(index);
- } else {
- translation->StoreInt32StackSlot(index);
- }
- } else if (op->IsDoubleStackSlot()) {
- int index = op->index();
- translation->StoreDoubleStackSlot(index);
- } else if (op->IsRegister()) {
- Register reg = ToRegister(op);
- if (is_tagged) {
- translation->StoreRegister(reg);
- } else if (is_uint32) {
- translation->StoreUint32Register(reg);
- } else {
- translation->StoreInt32Register(reg);
- }
- } else if (op->IsDoubleRegister()) {
- DoubleRegister reg = ToDoubleRegister(op);
- translation->StoreDoubleRegister(reg);
- } else if (op->IsConstantOperand()) {
- HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
- translation->StoreLiteral(src_index);
- } else {
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr) {
- CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::CallCodeGeneric(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode) {
- DCHECK(instr != NULL);
- __ Call(code, mode);
- RecordSafepointWithLazyDeopt(instr, safepoint_mode);
-}
-
-
-void LCodeGen::CallRuntime(const Runtime::Function* function,
- int num_arguments,
- LInstruction* instr,
- SaveFPRegsMode save_doubles) {
- DCHECK(instr != NULL);
-
- __ CallRuntime(function, num_arguments, save_doubles);
-
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::LoadContextFromDeferred(LOperand* context) {
- if (context->IsRegister()) {
- __ Move(cp, ToRegister(context));
- } else if (context->IsStackSlot()) {
- __ Ld(cp, ToMemOperand(context));
- } else if (context->IsConstantOperand()) {
- HConstant* constant =
- chunk_->LookupConstant(LConstantOperand::cast(context));
- __ li(cp, Handle<Object>::cast(constant->handle(isolate())));
- } else {
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
- int argc,
- LInstruction* instr,
- LOperand* context) {
- LoadContextFromDeferred(context);
- __ CallRuntimeSaveDoubles(id);
- RecordSafepointWithRegisters(
- instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
-}
-
-
-void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
- Safepoint::DeoptMode mode) {
- environment->set_has_been_used();
- if (!environment->HasBeenRegistered()) {
- // Physical stack frame layout:
- // -x ............. -4 0 ..................................... y
- // [incoming arguments] [spill slots] [pushed outgoing arguments]
-
- // Layout of the environment:
- // 0 ..................................................... size-1
- // [parameters] [locals] [expression stack including arguments]
-
- // Layout of the translation:
- // 0 ........................................................ size - 1 + 4
- // [expression stack including arguments] [locals] [4 words] [parameters]
- // |>------------ translation_size ------------<|
-
- int frame_count = 0;
- int jsframe_count = 0;
- for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
- ++frame_count;
- if (e->frame_type() == JS_FUNCTION) {
- ++jsframe_count;
- }
- }
- Translation translation(&translations_, frame_count, jsframe_count, zone());
- WriteTranslation(environment, &translation);
- int deoptimization_index = deoptimizations_.length();
- int pc_offset = masm()->pc_offset();
- environment->Register(deoptimization_index,
- translation.index(),
- (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
- deoptimizations_.Add(environment, zone());
- }
-}
-
-void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
- DeoptimizeReason deopt_reason,
- Deoptimizer::BailoutType bailout_type,
- Register src1, const Operand& src2) {
- LEnvironment* environment = instr->environment();
- RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- DCHECK(environment->HasBeenRegistered());
- int id = environment->deoptimization_index();
- Address entry =
- Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
- if (entry == NULL) {
- Abort(kBailoutWasNotPrepared);
- return;
- }
-
- if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
- Register scratch = scratch0();
- ExternalReference count = ExternalReference::stress_deopt_count(isolate());
- Label no_deopt;
- __ Push(a1, scratch);
- __ li(scratch, Operand(count));
- __ Lw(a1, MemOperand(scratch));
- __ Subu(a1, a1, Operand(1));
- __ Branch(&no_deopt, ne, a1, Operand(zero_reg));
- __ li(a1, Operand(FLAG_deopt_every_n_times));
- __ Sw(a1, MemOperand(scratch));
- __ Pop(a1, scratch);
-
- __ Call(entry, RelocInfo::RUNTIME_ENTRY);
- __ bind(&no_deopt);
- __ Sw(a1, MemOperand(scratch));
- __ Pop(a1, scratch);
- }
-
- if (info()->ShouldTrapOnDeopt()) {
- Label skip;
- if (condition != al) {
- __ Branch(&skip, NegateCondition(condition), src1, src2);
- }
- __ stop("trap_on_deopt");
- __ bind(&skip);
- }
-
- Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
-
- DCHECK(info()->IsStub() || frame_is_built_);
- // Go through jump table if we need to handle condition, build frame, or
- // restore caller doubles.
- if (condition == al && frame_is_built_ &&
- !info()->saves_caller_doubles()) {
- DeoptComment(deopt_info);
- __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
- } else {
- Deoptimizer::JumpTableEntry* table_entry =
- new (zone()) Deoptimizer::JumpTableEntry(
- entry, deopt_info, bailout_type, !frame_is_built_);
- // We often have several deopts to the same entry, reuse the last
- // jump entry if this is the case.
- if (FLAG_trace_deopt || isolate()->is_profiling() ||
- jump_table_.is_empty() ||
- !table_entry->IsEquivalentTo(*jump_table_.last())) {
- jump_table_.Add(table_entry, zone());
- }
- __ Branch(&jump_table_.last()->label, condition, src1, src2);
- }
-}
-
-void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
- DeoptimizeReason deopt_reason, Register src1,
- const Operand& src2) {
- Deoptimizer::BailoutType bailout_type = info()->IsStub()
- ? Deoptimizer::LAZY
- : Deoptimizer::EAGER;
- DeoptimizeIf(condition, instr, deopt_reason, bailout_type, src1, src2);
-}
-
-
-void LCodeGen::RecordSafepointWithLazyDeopt(
- LInstruction* instr, SafepointMode safepoint_mode) {
- if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
- RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
- } else {
- DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kLazyDeopt);
- }
-}
-
-
-void LCodeGen::RecordSafepoint(
- LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- Safepoint::DeoptMode deopt_mode) {
- DCHECK(expected_safepoint_kind_ == kind);
-
- const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
- Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
- kind, arguments, deopt_mode);
- for (int i = 0; i < operands->length(); i++) {
- LOperand* pointer = operands->at(i);
- if (pointer->IsStackSlot()) {
- safepoint.DefinePointerSlot(pointer->index(), zone());
- } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
- safepoint.DefinePointerRegister(ToRegister(pointer), zone());
- }
- }
-}
-
-
-void LCodeGen::RecordSafepoint(LPointerMap* pointers,
- Safepoint::DeoptMode deopt_mode) {
- RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
-}
-
-
-void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
- LPointerMap empty_pointers(zone());
- RecordSafepoint(&empty_pointers, deopt_mode);
-}
-
-
-void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode deopt_mode) {
- RecordSafepoint(
- pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
-}
-
-
-static const char* LabelType(LLabel* label) {
- if (label->is_loop_header()) return " (loop header)";
- if (label->is_osr_entry()) return " (OSR entry)";
- return "";
-}
-
-
-void LCodeGen::DoLabel(LLabel* label) {
- Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
- current_instruction_,
- label->hydrogen_value()->id(),
- label->block_id(),
- LabelType(label));
- __ bind(label->label());
- current_block_ = label->block_id();
- DoGap(label);
-}
-
-
-void LCodeGen::DoParallelMove(LParallelMove* move) {
- resolver_.Resolve(move);
-}
-
-
-void LCodeGen::DoGap(LGap* gap) {
- for (int i = LGap::FIRST_INNER_POSITION;
- i <= LGap::LAST_INNER_POSITION;
- i++) {
- LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
- LParallelMove* move = gap->GetParallelMove(inner_pos);
- if (move != NULL) DoParallelMove(move);
- }
-}
-
-
-void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
- DoGap(instr);
-}
-
-
-void LCodeGen::DoParameter(LParameter* instr) {
- // Nothing to do.
-}
-
-
-void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- GenerateOsrPrologue();
-}
-
-
-void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- DCHECK(dividend.is(ToRegister(instr->result())));
-
- // Theoretically, a variation of the branch-free code for integer division by
- // a power of 2 (calculating the remainder via an additional multiplication
- // (which gets simplified to an 'and') and subtraction) should be faster, and
- // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
- // indicate that positive dividends are heavily favored, so the branching
- // version performs better.
- HMod* hmod = instr->hydrogen();
- int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
- Label dividend_is_not_negative, done;
-
- if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
- __ Branch(&dividend_is_not_negative, ge, dividend, Operand(zero_reg));
- // Note: The code below even works when right contains kMinInt.
- __ dsubu(dividend, zero_reg, dividend);
- __ And(dividend, dividend, Operand(mask));
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
- Operand(zero_reg));
- }
- __ Branch(USE_DELAY_SLOT, &done);
- __ dsubu(dividend, zero_reg, dividend);
- }
-
- __ bind(&dividend_is_not_negative);
- __ And(dividend, dividend, Operand(mask));
- __ bind(&done);
-}
-
-
-void LCodeGen::DoModByConstI(LModByConstI* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- Register result = ToRegister(instr->result());
- DCHECK(!dividend.is(result));
-
- if (divisor == 0) {
- DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
- return;
- }
-
- __ TruncatingDiv(result, dividend, Abs(divisor));
- __ Dmul(result, result, Operand(Abs(divisor)));
- __ Dsubu(result, dividend, Operand(result));
-
- // Check for negative zero.
- HMod* hmod = instr->hydrogen();
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label remainder_not_zero;
- __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
- DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, dividend,
- Operand(zero_reg));
- __ bind(&remainder_not_zero);
- }
-}
-
-
-void LCodeGen::DoModI(LModI* instr) {
- HMod* hmod = instr->hydrogen();
- const Register left_reg = ToRegister(instr->left());
- const Register right_reg = ToRegister(instr->right());
- const Register result_reg = ToRegister(instr->result());
-
- // div runs in the background while we check for special cases.
- __ Dmod(result_reg, left_reg, right_reg);
-
- Label done;
- // Check for x % 0, we have to deopt in this case because we can't return a
- // NaN.
- if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, right_reg,
- Operand(zero_reg));
- }
-
- // Check for kMinInt % -1, div will return kMinInt, which is not what we
- // want. We have to deopt if we care about -0, because we can't return that.
- if (hmod->CheckFlag(HValue::kCanOverflow)) {
- Label no_overflow_possible;
- __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, right_reg,
- Operand(-1));
- } else {
- __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
- __ Branch(USE_DELAY_SLOT, &done);
- __ mov(result_reg, zero_reg);
- }
- __ bind(&no_overflow_possible);
- }
-
- // If we care about -0, test if the dividend is <0 and the result is 0.
- __ Branch(&done, ge, left_reg, Operand(zero_reg));
-
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result_reg,
- Operand(zero_reg));
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- Register result = ToRegister(instr->result());
- DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
- DCHECK(!result.is(dividend));
-
- // Check for (0 / -x) that will produce negative zero.
- HDiv* hdiv = instr->hydrogen();
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
- Operand(zero_reg));
- }
- // Check for (kMinInt / -1).
- if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, dividend,
- Operand(kMinInt));
- }
- // Deoptimize if remainder will not be 0.
- if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
- divisor != 1 && divisor != -1) {
- int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
- __ And(at, dividend, Operand(mask));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, at,
- Operand(zero_reg));
- }
-
- if (divisor == -1) { // Nice shortcut, not needed for correctness.
- __ Dsubu(result, zero_reg, dividend);
- return;
- }
- uint16_t shift = WhichPowerOf2Abs(divisor);
- if (shift == 0) {
- __ Move(result, dividend);
- } else if (shift == 1) {
- __ dsrl32(result, dividend, 31);
- __ Daddu(result, dividend, Operand(result));
- } else {
- __ dsra32(result, dividend, 31);
- __ dsrl32(result, result, 32 - shift);
- __ Daddu(result, dividend, Operand(result));
- }
- if (shift > 0) __ dsra(result, result, shift);
- if (divisor < 0) __ Dsubu(result, zero_reg, result);
-}
-
-
-void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- Register result = ToRegister(instr->result());
- DCHECK(!dividend.is(result));
-
- if (divisor == 0) {
- DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
- return;
- }
-
- // Check for (0 / -x) that will produce negative zero.
- HDiv* hdiv = instr->hydrogen();
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
- Operand(zero_reg));
- }
-
- __ TruncatingDiv(result, dividend, Abs(divisor));
- if (divisor < 0) __ Subu(result, zero_reg, result);
-
- if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
- __ Dmul(scratch0(), result, Operand(divisor));
- __ Dsubu(scratch0(), scratch0(), dividend);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, scratch0(),
- Operand(zero_reg));
- }
-}
-
-
-// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
-void LCodeGen::DoDivI(LDivI* instr) {
- HBinaryOperation* hdiv = instr->hydrogen();
- Register dividend = ToRegister(instr->dividend());
- Register divisor = ToRegister(instr->divisor());
- const Register result = ToRegister(instr->result());
-
- // On MIPS div is asynchronous - it will run in the background while we
- // check for special cases.
- __ Div(result, dividend, divisor);
-
- // Check for x / 0.
- if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, divisor,
- Operand(zero_reg));
- }
-
- // Check for (0 / -x) that will produce negative zero.
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label left_not_zero;
- __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
- DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, divisor,
- Operand(zero_reg));
- __ bind(&left_not_zero);
- }
-
- // Check for (kMinInt / -1).
- if (hdiv->CheckFlag(HValue::kCanOverflow) &&
- !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
- Label left_not_min_int;
- __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, divisor, Operand(-1));
- __ bind(&left_not_min_int);
- }
-
- if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
- // Calculate remainder.
- Register remainder = ToRegister(instr->temp());
- if (kArchVariant != kMips64r6) {
- __ mfhi(remainder);
- } else {
- __ dmod(remainder, dividend, divisor);
- }
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, remainder,
- Operand(zero_reg));
- }
-}
-
-
-void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
- DoubleRegister addend = ToDoubleRegister(instr->addend());
- DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
- DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
-
- // This is computed in-place.
- DCHECK(addend.is(ToDoubleRegister(instr->result())));
-
- __ Madd_d(addend, addend, multiplier, multiplicand, double_scratch0());
-}
-
-
-void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
- Register dividend = ToRegister(instr->dividend());
- Register result = ToRegister(instr->result());
- int32_t divisor = instr->divisor();
- Register scratch = result.is(dividend) ? scratch0() : dividend;
- DCHECK(!result.is(dividend) || !scratch.is(dividend));
-
- // If the divisor is 1, return the dividend.
- if (divisor == 0) {
- __ Move(result, dividend);
- return;
- }
-
- // If the divisor is positive, things are easy: There can be no deopts and we
- // can simply do an arithmetic right shift.
- uint16_t shift = WhichPowerOf2Abs(divisor);
- if (divisor > 1) {
- __ dsra(result, dividend, shift);
- return;
- }
-
- // If the divisor is negative, we have to negate and handle edge cases.
- // Dividend can be the same register as result so save the value of it
- // for checking overflow.
- __ Move(scratch, dividend);
-
- __ Dsubu(result, zero_reg, dividend);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result,
- Operand(zero_reg));
- }
-
- __ Xor(scratch, scratch, result);
- // Dividing by -1 is basically negation, unless we overflow.
- if (divisor == -1) {
- if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(gt, instr, DeoptimizeReason::kOverflow, result,
- Operand(kMaxInt));
- }
- return;
- }
-
- // If the negation could not overflow, simply shifting is OK.
- if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- __ dsra(result, result, shift);
- return;
- }
-
- Label no_overflow, done;
- __ Branch(&no_overflow, lt, scratch, Operand(zero_reg));
- __ li(result, Operand(kMinInt / divisor), CONSTANT_SIZE);
- __ Branch(&done);
- __ bind(&no_overflow);
- __ dsra(result, result, shift);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- Register result = ToRegister(instr->result());
- DCHECK(!dividend.is(result));
-
- if (divisor == 0) {
- DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
- return;
- }
-
- // Check for (0 / -x) that will produce negative zero.
- HMathFloorOfDiv* hdiv = instr->hydrogen();
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
- Operand(zero_reg));
- }
-
- // Easy case: We need no dynamic check for the dividend and the flooring
- // division is the same as the truncating division.
- if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
- (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
- __ TruncatingDiv(result, dividend, Abs(divisor));
- if (divisor < 0) __ Dsubu(result, zero_reg, result);
- return;
- }
-
- // In the general case we may need to adjust before and after the truncating
- // division to get a flooring division.
- Register temp = ToRegister(instr->temp());
- DCHECK(!temp.is(dividend) && !temp.is(result));
- Label needs_adjustment, done;
- __ Branch(&needs_adjustment, divisor > 0 ? lt : gt,
- dividend, Operand(zero_reg));
- __ TruncatingDiv(result, dividend, Abs(divisor));
- if (divisor < 0) __ Dsubu(result, zero_reg, result);
- __ jmp(&done);
- __ bind(&needs_adjustment);
- __ Daddu(temp, dividend, Operand(divisor > 0 ? 1 : -1));
- __ TruncatingDiv(result, temp, Abs(divisor));
- if (divisor < 0) __ Dsubu(result, zero_reg, result);
- __ Dsubu(result, result, Operand(1));
- __ bind(&done);
-}
-
-
-// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
-void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
- HBinaryOperation* hdiv = instr->hydrogen();
- Register dividend = ToRegister(instr->dividend());
- Register divisor = ToRegister(instr->divisor());
- const Register result = ToRegister(instr->result());
-
- // On MIPS div is asynchronous - it will run in the background while we
- // check for special cases.
- __ Ddiv(result, dividend, divisor);
-
- // Check for x / 0.
- if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, divisor,
- Operand(zero_reg));
- }
-
- // Check for (0 / -x) that will produce negative zero.
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label left_not_zero;
- __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
- DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, divisor,
- Operand(zero_reg));
- __ bind(&left_not_zero);
- }
-
- // Check for (kMinInt / -1).
- if (hdiv->CheckFlag(HValue::kCanOverflow) &&
- !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
- Label left_not_min_int;
- __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, divisor, Operand(-1));
- __ bind(&left_not_min_int);
- }
-
- // We performed a truncating division. Correct the result if necessary.
- Label done;
- Register remainder = scratch0();
- if (kArchVariant != kMips64r6) {
- __ mfhi(remainder);
- } else {
- __ dmod(remainder, dividend, divisor);
- }
- __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
- __ Xor(remainder, remainder, Operand(divisor));
- __ Branch(&done, ge, remainder, Operand(zero_reg));
- __ Dsubu(result, result, Operand(1));
- __ bind(&done);
-}
-
-
-void LCodeGen::DoMulS(LMulS* instr) {
- Register scratch = scratch0();
- Register result = ToRegister(instr->result());
- // Note that result may alias left.
- Register left = ToRegister(instr->left());
- LOperand* right_op = instr->right();
-
- bool bailout_on_minus_zero =
- instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
- bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
-
- if (right_op->IsConstantOperand()) {
- int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
-
- if (bailout_on_minus_zero && (constant < 0)) {
- // The case of a null constant will be handled separately.
- // If constant is negative and left is null, the result should be -0.
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, left,
- Operand(zero_reg));
- }
-
- switch (constant) {
- case -1:
- if (overflow) {
- Label no_overflow;
- __ DsubBranchNoOvf(result, zero_reg, Operand(left), &no_overflow);
- DeoptimizeIf(al, instr);
- __ bind(&no_overflow);
- } else {
- __ Dsubu(result, zero_reg, left);
- }
- break;
- case 0:
- if (bailout_on_minus_zero) {
- // If left is strictly negative and the constant is null, the
- // result is -0. Deoptimize if required, otherwise return 0.
- DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, left,
- Operand(zero_reg));
- }
- __ mov(result, zero_reg);
- break;
- case 1:
- // Nothing to do.
- __ Move(result, left);
- break;
- default:
- // Multiplying by powers of two and powers of two plus or minus
- // one can be done faster with shifted operands.
- // For other constants we emit standard code.
- int32_t mask = constant >> 31;
- uint32_t constant_abs = (constant + mask) ^ mask;
-
- if (base::bits::IsPowerOfTwo32(constant_abs)) {
- int32_t shift = WhichPowerOf2(constant_abs);
- __ dsll(result, left, shift);
- // Correct the sign of the result if the constant is negative.
- if (constant < 0) __ Dsubu(result, zero_reg, result);
- } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
- int32_t shift = WhichPowerOf2(constant_abs - 1);
- __ Dlsa(result, left, left, shift);
- // Correct the sign of the result if the constant is negative.
- if (constant < 0) __ Dsubu(result, zero_reg, result);
- } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
- int32_t shift = WhichPowerOf2(constant_abs + 1);
- __ dsll(scratch, left, shift);
- __ Dsubu(result, scratch, left);
- // Correct the sign of the result if the constant is negative.
- if (constant < 0) __ Dsubu(result, zero_reg, result);
- } else {
- // Generate standard code.
- __ li(at, constant);
- __ Dmul(result, left, at);
- }
- }
- } else {
- DCHECK(right_op->IsRegister());
- Register right = ToRegister(right_op);
-
- if (overflow) {
- // hi:lo = left * right.
- __ Dmulh(result, left, right);
- __ dsra32(scratch, result, 0);
- __ sra(at, result, 31);
- __ SmiTag(result);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, scratch,
- Operand(at));
- } else {
- __ SmiUntag(result, left);
- __ dmul(result, result, right);
- }
-
- if (bailout_on_minus_zero) {
- Label done;
- __ Xor(at, left, right);
- __ Branch(&done, ge, at, Operand(zero_reg));
- // Bail out if the result is minus zero.
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result,
- Operand(zero_reg));
- __ bind(&done);
- }
- }
-}
-
-
-void LCodeGen::DoMulI(LMulI* instr) {
- Register scratch = scratch0();
- Register result = ToRegister(instr->result());
- // Note that result may alias left.
- Register left = ToRegister(instr->left());
- LOperand* right_op = instr->right();
-
- bool bailout_on_minus_zero =
- instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
- bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
-
- if (right_op->IsConstantOperand()) {
- int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
-
- if (bailout_on_minus_zero && (constant < 0)) {
- // The case of a null constant will be handled separately.
- // If constant is negative and left is null, the result should be -0.
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, left,
- Operand(zero_reg));
- }
-
- switch (constant) {
- case -1:
- if (overflow) {
- Label no_overflow;
- __ SubBranchNoOvf(result, zero_reg, Operand(left), &no_overflow);
- DeoptimizeIf(al, instr);
- __ bind(&no_overflow);
- } else {
- __ Subu(result, zero_reg, left);
- }
- break;
- case 0:
- if (bailout_on_minus_zero) {
- // If left is strictly negative and the constant is null, the
- // result is -0. Deoptimize if required, otherwise return 0.
- DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, left,
- Operand(zero_reg));
- }
- __ mov(result, zero_reg);
- break;
- case 1:
- // Nothing to do.
- __ Move(result, left);
- break;
- default:
- // Multiplying by powers of two and powers of two plus or minus
- // one can be done faster with shifted operands.
- // For other constants we emit standard code.
- int32_t mask = constant >> 31;
- uint32_t constant_abs = (constant + mask) ^ mask;
-
- if (base::bits::IsPowerOfTwo32(constant_abs)) {
- int32_t shift = WhichPowerOf2(constant_abs);
- __ sll(result, left, shift);
- // Correct the sign of the result if the constant is negative.
- if (constant < 0) __ Subu(result, zero_reg, result);
- } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
- int32_t shift = WhichPowerOf2(constant_abs - 1);
- __ Lsa(result, left, left, shift);
- // Correct the sign of the result if the constant is negative.
- if (constant < 0) __ Subu(result, zero_reg, result);
- } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
- int32_t shift = WhichPowerOf2(constant_abs + 1);
- __ sll(scratch, left, shift);
- __ Subu(result, scratch, left);
- // Correct the sign of the result if the constant is negative.
- if (constant < 0) __ Subu(result, zero_reg, result);
- } else {
- // Generate standard code.
- __ li(at, constant);
- __ Mul(result, left, at);
- }
- }
-
- } else {
- DCHECK(right_op->IsRegister());
- Register right = ToRegister(right_op);
-
- if (overflow) {
- // hi:lo = left * right.
- __ Dmul(result, left, right);
- __ dsra32(scratch, result, 0);
- __ sra(at, result, 31);
-
- DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, scratch,
- Operand(at));
- } else {
- __ mul(result, left, right);
- }
-
- if (bailout_on_minus_zero) {
- Label done;
- __ Xor(at, left, right);
- __ Branch(&done, ge, at, Operand(zero_reg));
- // Bail out if the result is minus zero.
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result,
- Operand(zero_reg));
- __ bind(&done);
- }
- }
-}
-
-
-void LCodeGen::DoBitI(LBitI* instr) {
- LOperand* left_op = instr->left();
- LOperand* right_op = instr->right();
- DCHECK(left_op->IsRegister());
- Register left = ToRegister(left_op);
- Register result = ToRegister(instr->result());
- Operand right(no_reg);
-
- if (right_op->IsStackSlot()) {
- right = Operand(EmitLoadRegister(right_op, at));
- } else {
- DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
- right = ToOperand(right_op);
- }
-
- switch (instr->op()) {
- case Token::BIT_AND:
- __ And(result, left, right);
- break;
- case Token::BIT_OR:
- __ Or(result, left, right);
- break;
- case Token::BIT_XOR:
- if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
- __ Nor(result, zero_reg, left);
- } else {
- __ Xor(result, left, right);
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void LCodeGen::DoShiftI(LShiftI* instr) {
- // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
- // result may alias either of them.
- LOperand* right_op = instr->right();
- Register left = ToRegister(instr->left());
- Register result = ToRegister(instr->result());
-
- if (right_op->IsRegister()) {
- // No need to mask the right operand on MIPS, it is built into the variable
- // shift instructions.
- switch (instr->op()) {
- case Token::ROR:
- __ Ror(result, left, Operand(ToRegister(right_op)));
- break;
- case Token::SAR:
- __ srav(result, left, ToRegister(right_op));
- break;
- case Token::SHR:
- __ srlv(result, left, ToRegister(right_op));
- if (instr->can_deopt()) {
- // TODO(yy): (-1) >>> 0. anything else?
- DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue, result,
- Operand(zero_reg));
- DeoptimizeIf(gt, instr, DeoptimizeReason::kNegativeValue, result,
- Operand(kMaxInt));
- }
- break;
- case Token::SHL:
- __ sllv(result, left, ToRegister(right_op));
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- // Mask the right_op operand.
- int value = ToInteger32(LConstantOperand::cast(right_op));
- uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
- switch (instr->op()) {
- case Token::ROR:
- if (shift_count != 0) {
- __ Ror(result, left, Operand(shift_count));
- } else {
- __ Move(result, left);
- }
- break;
- case Token::SAR:
- if (shift_count != 0) {
- __ sra(result, left, shift_count);
- } else {
- __ Move(result, left);
- }
- break;
- case Token::SHR:
- if (shift_count != 0) {
- __ srl(result, left, shift_count);
- } else {
- if (instr->can_deopt()) {
- __ And(at, left, Operand(0x80000000));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNegativeValue, at,
- Operand(zero_reg));
- }
- __ Move(result, left);
- }
- break;
- case Token::SHL:
- if (shift_count != 0) {
- if (instr->hydrogen_value()->representation().IsSmi()) {
- __ dsll(result, left, shift_count);
- } else {
- __ sll(result, left, shift_count);
- }
- } else {
- __ Move(result, left);
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoSubS(LSubS* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- LOperand* result = instr->result();
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
-
- if (!can_overflow) {
- DCHECK(right->IsRegister() || right->IsConstantOperand());
- __ Dsubu(ToRegister(result), ToRegister(left), ToOperand(right));
- } else { // can_overflow.
- Register scratch = scratch0();
- Label no_overflow_label;
- DCHECK(right->IsRegister() || right->IsConstantOperand());
- __ DsubBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
- &no_overflow_label, scratch);
- DeoptimizeIf(al, instr);
- __ bind(&no_overflow_label);
- }
-}
-
-
-void LCodeGen::DoSubI(LSubI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- LOperand* result = instr->result();
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
-
- if (!can_overflow) {
- DCHECK(right->IsRegister() || right->IsConstantOperand());
- __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
- } else { // can_overflow.
- Register scratch = scratch0();
- Label no_overflow_label;
- DCHECK(right->IsRegister() || right->IsConstantOperand());
- __ SubBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
- &no_overflow_label, scratch);
- DeoptimizeIf(al, instr);
- __ bind(&no_overflow_label);
- }
-}
-
-
-void LCodeGen::DoConstantI(LConstantI* instr) {
- __ li(ToRegister(instr->result()), Operand(instr->value()));
-}
-
-
-void LCodeGen::DoConstantS(LConstantS* instr) {
- __ li(ToRegister(instr->result()), Operand(instr->value()));
-}
-
-
-void LCodeGen::DoConstantD(LConstantD* instr) {
- DCHECK(instr->result()->IsDoubleRegister());
- DoubleRegister result = ToDoubleRegister(instr->result());
- double v = instr->value();
- __ Move(result, v);
-}
-
-
-void LCodeGen::DoConstantE(LConstantE* instr) {
- __ li(ToRegister(instr->result()), Operand(instr->value()));
-}
-
-
-void LCodeGen::DoConstantT(LConstantT* instr) {
- Handle<Object> object = instr->value(isolate());
- AllowDeferredHandleDereference smi_check;
- __ li(ToRegister(instr->result()), object);
-}
-
-
-MemOperand LCodeGen::BuildSeqStringOperand(Register string,
- LOperand* index,
- String::Encoding encoding) {
- if (index->IsConstantOperand()) {
- int offset = ToInteger32(LConstantOperand::cast(index));
- if (encoding == String::TWO_BYTE_ENCODING) {
- offset *= kUC16Size;
- }
- STATIC_ASSERT(kCharSize == 1);
- return FieldMemOperand(string, SeqString::kHeaderSize + offset);
- }
- Register scratch = scratch0();
- DCHECK(!scratch.is(string));
- DCHECK(!scratch.is(ToRegister(index)));
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ Daddu(scratch, string, ToRegister(index));
- } else {
- STATIC_ASSERT(kUC16Size == 2);
- __ dsll(scratch, ToRegister(index), 1);
- __ Daddu(scratch, string, scratch);
- }
- return FieldMemOperand(scratch, SeqString::kHeaderSize);
-}
-
-
-void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
- String::Encoding encoding = instr->hydrogen()->encoding();
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
-
- if (FLAG_debug_code) {
- Register scratch = scratch0();
- __ Ld(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
- __ Lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
-
- __ And(scratch, scratch,
- Operand(kStringRepresentationMask | kStringEncodingMask));
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ Dsubu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type));
- __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
- }
-
- MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ Lbu(result, operand);
- } else {
- __ Lhu(result, operand);
- }
-}
-
-
-void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
- String::Encoding encoding = instr->hydrogen()->encoding();
- Register string = ToRegister(instr->string());
- Register value = ToRegister(instr->value());
-
- if (FLAG_debug_code) {
- Register scratch = scratch0();
- Register index = ToRegister(instr->index());
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- int encoding_mask =
- instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type;
- __ EmitSeqStringSetCharCheck(string, index, value, scratch, encoding_mask);
- }
-
- MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ Sb(value, operand);
- } else {
- __ Sh(value, operand);
- }
-}
-
-
-void LCodeGen::DoAddE(LAddE* instr) {
- LOperand* result = instr->result();
- LOperand* left = instr->left();
- LOperand* right = instr->right();
-
- DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
- DCHECK(right->IsRegister() || right->IsConstantOperand());
- __ Daddu(ToRegister(result), ToRegister(left), ToOperand(right));
-}
-
-
-void LCodeGen::DoAddS(LAddS* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- LOperand* result = instr->result();
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
-
- if (!can_overflow) {
- DCHECK(right->IsRegister() || right->IsConstantOperand());
- __ Daddu(ToRegister(result), ToRegister(left), ToOperand(right));
- } else { // can_overflow.
- Label no_overflow_label;
- Register scratch = scratch1();
- DCHECK(right->IsRegister() || right->IsConstantOperand());
- __ DaddBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
- &no_overflow_label, scratch);
- DeoptimizeIf(al, instr);
- __ bind(&no_overflow_label);
- }
-}
-
-
-void LCodeGen::DoAddI(LAddI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- LOperand* result = instr->result();
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
-
- if (!can_overflow) {
- DCHECK(right->IsRegister() || right->IsConstantOperand());
- __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
- } else { // can_overflow.
- Label no_overflow_label;
- Register scratch = scratch1();
- DCHECK(right->IsRegister() || right->IsConstantOperand());
- __ AddBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
- &no_overflow_label, scratch);
- DeoptimizeIf(al, instr);
- __ bind(&no_overflow_label);
- }
-}
-
-
-void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- HMathMinMax::Operation operation = instr->hydrogen()->operation();
- Register scratch = scratch1();
- if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
- Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
- Register left_reg = ToRegister(left);
- Register right_reg = EmitLoadRegister(right, scratch0());
- Register result_reg = ToRegister(instr->result());
- Label return_right, done;
- __ Slt(scratch, left_reg, Operand(right_reg));
- if (condition == ge) {
- __ Movz(result_reg, left_reg, scratch);
- __ Movn(result_reg, right_reg, scratch);
- } else {
- DCHECK(condition == le);
- __ Movn(result_reg, left_reg, scratch);
- __ Movz(result_reg, right_reg, scratch);
- }
- } else {
- DCHECK(instr->hydrogen()->representation().IsDouble());
- FPURegister left_reg = ToDoubleRegister(left);
- FPURegister right_reg = ToDoubleRegister(right);
- FPURegister result_reg = ToDoubleRegister(instr->result());
- Label nan, done;
- if (operation == HMathMinMax::kMathMax) {
- __ Float64Max(result_reg, left_reg, right_reg, &nan);
- } else {
- DCHECK(operation == HMathMinMax::kMathMin);
- __ Float64Min(result_reg, left_reg, right_reg, &nan);
- }
- __ Branch(&done);
-
- __ bind(&nan);
- __ add_d(result_reg, left_reg, right_reg);
-
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- DoubleRegister left = ToDoubleRegister(instr->left());
- DoubleRegister right = ToDoubleRegister(instr->right());
- DoubleRegister result = ToDoubleRegister(instr->result());
- switch (instr->op()) {
- case Token::ADD:
- __ add_d(result, left, right);
- break;
- case Token::SUB:
- __ sub_d(result, left, right);
- break;
- case Token::MUL:
- __ mul_d(result, left, right);
- break;
- case Token::DIV:
- __ div_d(result, left, right);
- break;
- case Token::MOD: {
- // Save a0-a3 on the stack.
- RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
- __ MultiPush(saved_regs);
-
- __ PrepareCallCFunction(0, 2, scratch0());
- __ MovToFloatParameters(left, right);
- __ CallCFunction(
- ExternalReference::mod_two_doubles_operation(isolate()),
- 0, 2);
- // Move the result in the double result register.
- __ MovFromFloatResult(result);
-
- // Restore saved register.
- __ MultiPop(saved_regs);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->left()).is(a1));
- DCHECK(ToRegister(instr->right()).is(a0));
- DCHECK(ToRegister(instr->result()).is(v0));
-
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
- CallCode(code, RelocInfo::CODE_TARGET, instr);
- // Other arch use a nop here, to signal that there is no inlined
- // patchable code. Mips does not need the nop, since our marker
- // instruction (andi zero_reg) will never be used in normal code.
-}
-
-
-template<class InstrType>
-void LCodeGen::EmitBranch(InstrType instr,
- Condition condition,
- Register src1,
- const Operand& src2) {
- int left_block = instr->TrueDestination(chunk_);
- int right_block = instr->FalseDestination(chunk_);
-
- int next_block = GetNextEmittedBlock();
- if (right_block == left_block || condition == al) {
- EmitGoto(left_block);
- } else if (left_block == next_block) {
- __ Branch(chunk_->GetAssemblyLabel(right_block),
- NegateCondition(condition), src1, src2);
- } else if (right_block == next_block) {
- __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
- } else {
- __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
- __ Branch(chunk_->GetAssemblyLabel(right_block));
- }
-}
-
-
-template<class InstrType>
-void LCodeGen::EmitBranchF(InstrType instr,
- Condition condition,
- FPURegister src1,
- FPURegister src2) {
- int right_block = instr->FalseDestination(chunk_);
- int left_block = instr->TrueDestination(chunk_);
-
- int next_block = GetNextEmittedBlock();
- if (right_block == left_block) {
- EmitGoto(left_block);
- } else if (left_block == next_block) {
- __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
- NegateFpuCondition(condition), src1, src2);
- } else if (right_block == next_block) {
- __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
- condition, src1, src2);
- } else {
- __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
- condition, src1, src2);
- __ Branch(chunk_->GetAssemblyLabel(right_block));
- }
-}
-
-
-template <class InstrType>
-void LCodeGen::EmitTrueBranch(InstrType instr, Condition condition,
- Register src1, const Operand& src2) {
- int true_block = instr->TrueDestination(chunk_);
- __ Branch(chunk_->GetAssemblyLabel(true_block), condition, src1, src2);
-}
-
-
-template <class InstrType>
-void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition,
- Register src1, const Operand& src2) {
- int false_block = instr->FalseDestination(chunk_);
- __ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2);
-}
-
-
-template<class InstrType>
-void LCodeGen::EmitFalseBranchF(InstrType instr,
- Condition condition,
- FPURegister src1,
- FPURegister src2) {
- int false_block = instr->FalseDestination(chunk_);
- __ BranchF(chunk_->GetAssemblyLabel(false_block), NULL,
- condition, src1, src2);
-}
-
-
-void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
- __ stop("LDebugBreak");
-}
-
-
-void LCodeGen::DoBranch(LBranch* instr) {
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsInteger32() || r.IsSmi()) {
- DCHECK(!info()->IsStub());
- Register reg = ToRegister(instr->value());
- EmitBranch(instr, ne, reg, Operand(zero_reg));
- } else if (r.IsDouble()) {
- DCHECK(!info()->IsStub());
- DoubleRegister reg = ToDoubleRegister(instr->value());
- // Test the double value. Zero and NaN are false.
- EmitBranchF(instr, ogl, reg, kDoubleRegZero);
- } else {
- DCHECK(r.IsTagged());
- Register reg = ToRegister(instr->value());
- HType type = instr->hydrogen()->value()->type();
- if (type.IsBoolean()) {
- DCHECK(!info()->IsStub());
- __ LoadRoot(at, Heap::kTrueValueRootIndex);
- EmitBranch(instr, eq, reg, Operand(at));
- } else if (type.IsSmi()) {
- DCHECK(!info()->IsStub());
- EmitBranch(instr, ne, reg, Operand(zero_reg));
- } else if (type.IsJSArray()) {
- DCHECK(!info()->IsStub());
- EmitBranch(instr, al, zero_reg, Operand(zero_reg));
- } else if (type.IsHeapNumber()) {
- DCHECK(!info()->IsStub());
- DoubleRegister dbl_scratch = double_scratch0();
- __ Ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
- // Test the double value. Zero and NaN are false.
- EmitBranchF(instr, ogl, dbl_scratch, kDoubleRegZero);
- } else if (type.IsString()) {
- DCHECK(!info()->IsStub());
- __ Ld(at, FieldMemOperand(reg, String::kLengthOffset));
- EmitBranch(instr, ne, at, Operand(zero_reg));
- } else {
- ToBooleanHints expected = instr->hydrogen()->expected_input_types();
- // Avoid deopts in the case where we've never executed this path before.
- if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
-
- if (expected & ToBooleanHint::kUndefined) {
- // undefined -> false.
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
- }
- if (expected & ToBooleanHint::kBoolean) {
- // Boolean -> its value.
- __ LoadRoot(at, Heap::kTrueValueRootIndex);
- __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
- __ LoadRoot(at, Heap::kFalseValueRootIndex);
- __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
- }
- if (expected & ToBooleanHint::kNull) {
- // 'null' -> false.
- __ LoadRoot(at, Heap::kNullValueRootIndex);
- __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
- }
-
- if (expected & ToBooleanHint::kSmallInteger) {
- // Smis: 0 -> false, all other -> true.
- __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
- __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
- } else if (expected & ToBooleanHint::kNeedsMap) {
- // If we need a map later and have a Smi -> deopt.
- __ SmiTst(reg, at);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg));
- }
-
- const Register map = scratch0();
- if (expected & ToBooleanHint::kNeedsMap) {
- __ Ld(map, FieldMemOperand(reg, HeapObject::kMapOffset));
- if (expected & ToBooleanHint::kCanBeUndetectable) {
- // Undetectable -> false.
- __ Lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
- __ And(at, at, Operand(1 << Map::kIsUndetectable));
- __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
- }
- }
-
- if (expected & ToBooleanHint::kReceiver) {
- // spec object -> true.
- __ Lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Branch(instr->TrueLabel(chunk_),
- ge, at, Operand(FIRST_JS_RECEIVER_TYPE));
- }
-
- if (expected & ToBooleanHint::kString) {
- // String value -> false iff empty.
- Label not_string;
- __ Lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
- __ Ld(at, FieldMemOperand(reg, String::kLengthOffset));
- __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg));
- __ Branch(instr->FalseLabel(chunk_));
- __ bind(&not_string);
- }
-
- if (expected & ToBooleanHint::kSymbol) {
- // Symbol value -> true.
- const Register scratch = scratch1();
- __ Lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
- }
-
- if (expected & ToBooleanHint::kHeapNumber) {
- // heap number -> false iff +0, -0, or NaN.
- DoubleRegister dbl_scratch = double_scratch0();
- Label not_heap_number;
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- __ Branch(&not_heap_number, ne, map, Operand(at));
- __ Ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
- __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
- ne, dbl_scratch, kDoubleRegZero);
- // Falls through if dbl_scratch == 0.
- __ Branch(instr->FalseLabel(chunk_));
- __ bind(&not_heap_number);
- }
-
- if (expected != ToBooleanHint::kAny) {
- // We've seen something for the first time -> deopt.
- // This can only happen if we are not generic already.
- DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject, zero_reg,
- Operand(zero_reg));
- }
- }
- }
-}
-
-
-void LCodeGen::EmitGoto(int block) {
- if (!IsNextEmittedBlock(block)) {
- __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
- }
-}
-
-
-void LCodeGen::DoGoto(LGoto* instr) {
- EmitGoto(instr->block_id());
-}
-
-
-Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
- Condition cond = kNoCondition;
- switch (op) {
- case Token::EQ:
- case Token::EQ_STRICT:
- cond = eq;
- break;
- case Token::NE:
- case Token::NE_STRICT:
- cond = ne;
- break;
- case Token::LT:
- cond = is_unsigned ? lo : lt;
- break;
- case Token::GT:
- cond = is_unsigned ? hi : gt;
- break;
- case Token::LTE:
- cond = is_unsigned ? ls : le;
- break;
- case Token::GTE:
- cond = is_unsigned ? hs : ge;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
- return cond;
-}
-
-
-void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- bool is_unsigned =
- instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
- instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
- Condition cond = TokenToCondition(instr->op(), is_unsigned);
-
- if (left->IsConstantOperand() && right->IsConstantOperand()) {
- // We can statically evaluate the comparison.
- double left_val = ToDouble(LConstantOperand::cast(left));
- double right_val = ToDouble(LConstantOperand::cast(right));
- int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
- ? instr->TrueDestination(chunk_)
- : instr->FalseDestination(chunk_);
- EmitGoto(next_block);
- } else {
- if (instr->is_double()) {
- // Compare left and right as doubles and load the
- // resulting flags into the normal status register.
- FPURegister left_reg = ToDoubleRegister(left);
- FPURegister right_reg = ToDoubleRegister(right);
-
- // If a NaN is involved, i.e. the result is unordered,
- // jump to false block label.
- __ BranchF(NULL, instr->FalseLabel(chunk_), eq,
- left_reg, right_reg);
-
- EmitBranchF(instr, cond, left_reg, right_reg);
- } else {
- Register cmp_left;
- Operand cmp_right = Operand((int64_t)0);
- if (right->IsConstantOperand()) {
- int32_t value = ToInteger32(LConstantOperand::cast(right));
- if (instr->hydrogen_value()->representation().IsSmi()) {
- cmp_left = ToRegister(left);
- cmp_right = Operand(Smi::FromInt(value));
- } else {
- cmp_left = ToRegister(left);
- cmp_right = Operand(value);
- }
- } else if (left->IsConstantOperand()) {
- int32_t value = ToInteger32(LConstantOperand::cast(left));
- if (instr->hydrogen_value()->representation().IsSmi()) {
- cmp_left = ToRegister(right);
- cmp_right = Operand(Smi::FromInt(value));
- } else {
- cmp_left = ToRegister(right);
- cmp_right = Operand(value);
- }
- // We commuted the operands, so commute the condition.
- cond = CommuteCondition(cond);
- } else {
- cmp_left = ToRegister(left);
- cmp_right = Operand(ToRegister(right));
- }
-
- EmitBranch(instr, cond, cmp_left, cmp_right);
- }
- }
-}
-
-
-void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
- Register left = ToRegister(instr->left());
- Register right = ToRegister(instr->right());
-
- EmitBranch(instr, eq, left, Operand(right));
-}
-
-
-void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
- if (instr->hydrogen()->representation().IsTagged()) {
- Register input_reg = ToRegister(instr->object());
- __ li(at, Operand(factory()->the_hole_value()));
- EmitBranch(instr, eq, input_reg, Operand(at));
- return;
- }
-
- DoubleRegister input_reg = ToDoubleRegister(instr->object());
- EmitFalseBranchF(instr, eq, input_reg, input_reg);
-
- Register scratch = scratch0();
- __ FmoveHigh(scratch, input_reg);
- EmitBranch(instr, eq, scratch,
- Operand(static_cast<int32_t>(kHoleNanUpper32)));
-}
-
-
-Condition LCodeGen::EmitIsString(Register input,
- Register temp1,
- Label* is_not_string,
- SmiCheck check_needed = INLINE_SMI_CHECK) {
- if (check_needed == INLINE_SMI_CHECK) {
- __ JumpIfSmi(input, is_not_string);
- }
- __ GetObjectType(input, temp1, temp1);
-
- return lt;
-}
-
-
-void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- Register temp1 = ToRegister(instr->temp());
-
- SmiCheck check_needed =
- instr->hydrogen()->value()->type().IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- Condition true_cond =
- EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
-
- EmitBranch(instr, true_cond, temp1,
- Operand(FIRST_NONSTRING_TYPE));
-}
-
-
-void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
- Register input_reg = EmitLoadRegister(instr->value(), at);
- __ And(at, input_reg, kSmiTagMask);
- EmitBranch(instr, eq, at, Operand(zero_reg));
-}
-
-
-void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- __ JumpIfSmi(input, instr->FalseLabel(chunk_));
- }
- __ Ld(temp, FieldMemOperand(input, HeapObject::kMapOffset));
- __ Lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
- __ And(at, temp, Operand(1 << Map::kIsUndetectable));
- EmitBranch(instr, ne, at, Operand(zero_reg));
-}
-
-
-static Condition ComputeCompareCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return eq;
- case Token::LT:
- return lt;
- case Token::GT:
- return gt;
- case Token::LTE:
- return le;
- case Token::GTE:
- return ge;
- default:
- UNREACHABLE();
- return kNoCondition;
- }
-}
-
-
-void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->left()).is(a1));
- DCHECK(ToRegister(instr->right()).is(a0));
-
- Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
- CallCode(code, RelocInfo::CODE_TARGET, instr);
- __ LoadRoot(at, Heap::kTrueValueRootIndex);
- EmitBranch(instr, eq, v0, Operand(at));
-}
-
-
-static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == FIRST_TYPE) return to;
- DCHECK(from == to || to == LAST_TYPE);
- return from;
-}
-
-
-static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == to) return eq;
- if (to == LAST_TYPE) return hs;
- if (from == FIRST_TYPE) return ls;
- UNREACHABLE();
- return eq;
-}
-
-
-void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
- Register scratch = scratch0();
- Register input = ToRegister(instr->value());
-
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- __ JumpIfSmi(input, instr->FalseLabel(chunk_));
- }
-
- __ GetObjectType(input, scratch, scratch);
- EmitBranch(instr,
- BranchCondition(instr->hydrogen()),
- scratch,
- Operand(TestType(instr->hydrogen())));
-}
-
-// Branches to a label or falls through with the answer in flags. Trashes
-// the temp registers, but not the input.
-void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
- Handle<String> class_name, Register input,
- Register temp, Register temp2) {
- DCHECK(!input.is(temp));
- DCHECK(!input.is(temp2));
- DCHECK(!temp.is(temp2));
-
- __ JumpIfSmi(input, is_false);
-
- __ GetObjectType(input, temp, temp2);
- STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
- if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
- __ Branch(is_true, hs, temp2, Operand(FIRST_FUNCTION_TYPE));
- } else {
- __ Branch(is_false, hs, temp2, Operand(FIRST_FUNCTION_TYPE));
- }
-
- // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
- // Check if the constructor in the map is a function.
- Register instance_type = scratch1();
- DCHECK(!instance_type.is(temp));
- __ GetMapConstructor(temp, temp, temp2, instance_type);
-
- // Objects with a non-function constructor have class 'Object'.
- if (String::Equals(class_name, isolate()->factory()->Object_string())) {
- __ Branch(is_true, ne, instance_type, Operand(JS_FUNCTION_TYPE));
- } else {
- __ Branch(is_false, ne, instance_type, Operand(JS_FUNCTION_TYPE));
- }
-
- // temp now contains the constructor function. Grab the
- // instance class name from there.
- __ Ld(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
- __ Ld(temp,
- FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset));
- // The class name we are testing against is internalized since it's a literal.
- // The name in the constructor is internalized because of the way the context
- // is booted. This routine isn't expected to work for random API-created
- // classes and it doesn't have to because you can't access it with natives
- // syntax. Since both sides are internalized it is sufficient to use an
- // identity comparison.
-
- // End with the address of this class_name instance in temp register.
- // On MIPS, the caller must do the comparison with Handle<String>class_name.
-}
-
-void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = scratch0();
- Register temp2 = ToRegister(instr->temp());
- Handle<String> class_name = instr->hydrogen()->class_name();
-
- EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
- class_name, input, temp, temp2);
-
- EmitBranch(instr, eq, temp, Operand(class_name));
-}
-
-void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- __ Ld(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
- EmitBranch(instr, eq, temp, Operand(instr->map()));
-}
-
-
-void LCodeGen::DoHasInPrototypeChainAndBranch(
- LHasInPrototypeChainAndBranch* instr) {
- Register const object = ToRegister(instr->object());
- Register const object_map = scratch0();
- Register const object_instance_type = scratch1();
- Register const object_prototype = object_map;
- Register const prototype = ToRegister(instr->prototype());
-
- // The {object} must be a spec object. It's sufficient to know that {object}
- // is not a smi, since all other non-spec objects have {null} prototypes and
- // will be ruled out below.
- if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
- __ SmiTst(object, at);
- EmitFalseBranch(instr, eq, at, Operand(zero_reg));
- }
-
- // Loop through the {object}s prototype chain looking for the {prototype}.
- __ Ld(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
- Label loop;
- __ bind(&loop);
-
- // Deoptimize if the object needs to be access checked.
- __ Lbu(object_instance_type,
- FieldMemOperand(object_map, Map::kBitFieldOffset));
- __ And(object_instance_type, object_instance_type,
- Operand(1 << Map::kIsAccessCheckNeeded));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck, object_instance_type,
- Operand(zero_reg));
- __ Lbu(object_instance_type,
- FieldMemOperand(object_map, Map::kInstanceTypeOffset));
- DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy, object_instance_type,
- Operand(JS_PROXY_TYPE));
-
- __ Ld(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
- __ LoadRoot(at, Heap::kNullValueRootIndex);
- EmitFalseBranch(instr, eq, object_prototype, Operand(at));
- EmitTrueBranch(instr, eq, object_prototype, Operand(prototype));
- __ Branch(&loop, USE_DELAY_SLOT);
- __ Ld(object_map, FieldMemOperand(object_prototype,
- HeapObject::kMapOffset)); // In delay slot.
-}
-
-
-void LCodeGen::DoCmpT(LCmpT* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- Token::Value op = instr->op();
-
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- // On MIPS there is no need for a "no inlined smi code" marker (nop).
-
- Condition condition = ComputeCompareCondition(op);
- // A minor optimization that relies on LoadRoot always emitting one
- // instruction.
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
- Label done, check;
- __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
- __ bind(&check);
- __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
- DCHECK_EQ(1, masm()->InstructionsGeneratedSince(&check));
- __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace && info()->IsOptimizing()) {
- // Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns its parameter in v0. We're leaving the code
- // managed by the register allocator and tearing down the frame, it's
- // safe to write to the context register.
- __ push(v0);
- __ Ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kTraceExit);
- }
- if (info()->saves_caller_doubles()) {
- RestoreCallerDoubles();
- }
- if (NeedsEagerFrame()) {
- __ mov(sp, fp);
- __ Pop(ra, fp);
- }
- if (instr->has_constant_parameter_count()) {
- int parameter_count = ToInteger32(instr->constant_parameter_count());
- int32_t sp_delta = (parameter_count + 1) * kPointerSize;
- if (sp_delta != 0) {
- __ Daddu(sp, sp, Operand(sp_delta));
- }
- } else {
- DCHECK(info()->IsStub()); // Functions would need to drop one more value.
- Register reg = ToRegister(instr->parameter_count());
- // The argument count parameter is a smi
- __ SmiUntag(reg);
- __ Dlsa(sp, sp, reg, kPointerSizeLog2);
- }
-
- __ Jump(ra);
-}
-
-
-void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
-
- __ Ld(result, ContextMemOperand(context, instr->slot_index()));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-
- if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result, Operand(at));
- } else {
- Label is_not_hole;
- __ Branch(&is_not_hole, ne, result, Operand(at));
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ bind(&is_not_hole);
- }
- }
-}
-
-
-void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register value = ToRegister(instr->value());
- Register scratch = scratch0();
- MemOperand target = ContextMemOperand(context, instr->slot_index());
-
- Label skip_assignment;
-
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ Ld(scratch, target);
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-
- if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, scratch, Operand(at));
- } else {
- __ Branch(&skip_assignment, ne, scratch, Operand(at));
- }
- }
-
- __ Sd(value, target);
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- SmiCheck check_needed =
- instr->hydrogen()->value()->type().IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- __ RecordWriteContextSlot(context,
- target.offset(),
- value,
- scratch0(),
- GetRAState(),
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
-
- __ bind(&skip_assignment);
-}
-
-
-void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- HObjectAccess access = instr->hydrogen()->access();
- int offset = access.offset();
- Register object = ToRegister(instr->object());
- if (access.IsExternalMemory()) {
- Register result = ToRegister(instr->result());
- MemOperand operand = MemOperand(object, offset);
- __ Load(result, operand, access.representation());
- return;
- }
-
- if (instr->hydrogen()->representation().IsDouble()) {
- DoubleRegister result = ToDoubleRegister(instr->result());
- __ Ldc1(result, FieldMemOperand(object, offset));
- return;
- }
-
- Register result = ToRegister(instr->result());
- if (!access.IsInobject()) {
- __ Ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- object = result;
- }
-
- Representation representation = access.representation();
- if (representation.IsSmi() && SmiValuesAre32Bits() &&
- instr->hydrogen()->representation().IsInteger32()) {
- if (FLAG_debug_code) {
- // Verify this is really an Smi.
- Register scratch = scratch0();
- __ Load(scratch, FieldMemOperand(object, offset), representation);
- __ AssertSmi(scratch);
- }
-
- // Read int value directly from upper half of the smi.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
- offset = SmiWordOffset(offset);
- representation = Representation::Integer32();
- }
- __ Load(result, FieldMemOperand(object, offset), representation);
-}
-
-
-void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
- Register scratch = scratch0();
- Register function = ToRegister(instr->function());
- Register result = ToRegister(instr->result());
-
- // Get the prototype or initial map from the function.
- __ Ld(result,
- FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Check that the function has a prototype or an initial map.
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result, Operand(at));
-
- // If the function does not have an initial map, we're done.
- Label done;
- __ GetObjectType(result, scratch, scratch);
- __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
-
- // Get the prototype from the initial map.
- __ Ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
-
- // All done.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
- Register result = ToRegister(instr->result());
- __ LoadRoot(result, instr->index());
-}
-
-
-void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
- Register arguments = ToRegister(instr->arguments());
- Register result = ToRegister(instr->result());
- // There are two words between the frame pointer and the last argument.
- // Subtracting from length accounts for one of them add one more.
- if (instr->length()->IsConstantOperand()) {
- int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
- if (instr->index()->IsConstantOperand()) {
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- int index = (const_length - const_index) + 1;
- __ Ld(result, MemOperand(arguments, index * kPointerSize));
- } else {
- Register index = ToRegister(instr->index());
- __ li(at, Operand(const_length + 1));
- __ Dsubu(result, at, index);
- __ Dlsa(at, arguments, result, kPointerSizeLog2);
- __ Ld(result, MemOperand(at));
- }
- } else if (instr->index()->IsConstantOperand()) {
- Register length = ToRegister(instr->length());
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- int loc = const_index - 1;
- if (loc != 0) {
- __ Dsubu(result, length, Operand(loc));
- __ Dlsa(at, arguments, result, kPointerSizeLog2);
- __ Ld(result, MemOperand(at));
- } else {
- __ Dlsa(at, arguments, length, kPointerSizeLog2);
- __ Ld(result, MemOperand(at));
- }
- } else {
- Register length = ToRegister(instr->length());
- Register index = ToRegister(instr->index());
- __ Dsubu(result, length, index);
- __ Daddu(result, result, 1);
- __ Dlsa(at, arguments, result, kPointerSizeLog2);
- __ Ld(result, MemOperand(at));
- }
-}
-
-
-void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
- Register external_pointer = ToRegister(instr->elements());
- Register key = no_reg;
- ElementsKind elements_kind = instr->elements_kind();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort(kArrayIndexConstantValueTooBig);
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(elements_kind);
- int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
- ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
- : element_size_shift;
- int base_offset = instr->base_offset();
-
- if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
- FPURegister result = ToDoubleRegister(instr->result());
- if (key_is_constant) {
- __ Daddu(scratch0(), external_pointer,
- constant_key << element_size_shift);
- } else {
- if (shift_size < 0) {
- if (shift_size == -32) {
- __ dsra32(scratch0(), key, 0);
- } else {
- __ dsra(scratch0(), key, -shift_size);
- }
- } else {
- __ dsll(scratch0(), key, shift_size);
- }
- __ Daddu(scratch0(), scratch0(), external_pointer);
- }
- if (elements_kind == FLOAT32_ELEMENTS) {
- __ Lwc1(result, MemOperand(scratch0(), base_offset));
- __ cvt_d_s(result, result);
- } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ Ldc1(result, MemOperand(scratch0(), base_offset));
- }
- } else {
- Register result = ToRegister(instr->result());
- MemOperand mem_operand = PrepareKeyedOperand(
- key, external_pointer, key_is_constant, constant_key,
- element_size_shift, shift_size, base_offset);
- switch (elements_kind) {
- case INT8_ELEMENTS:
- __ Lb(result, mem_operand);
- break;
- case UINT8_ELEMENTS:
- case UINT8_CLAMPED_ELEMENTS:
- __ Lbu(result, mem_operand);
- break;
- case INT16_ELEMENTS:
- __ Lh(result, mem_operand);
- break;
- case UINT16_ELEMENTS:
- __ Lhu(result, mem_operand);
- break;
- case INT32_ELEMENTS:
- __ Lw(result, mem_operand);
- break;
- case UINT32_ELEMENTS:
- __ Lw(result, mem_operand);
- if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- DeoptimizeIf(Ugreater_equal, instr, DeoptimizeReason::kNegativeValue,
- result, Operand(0x80000000));
- }
- break;
- case FLOAT32_ELEMENTS:
- case FLOAT64_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
- case FAST_STRING_WRAPPER_ELEMENTS:
- case SLOW_STRING_WRAPPER_ELEMENTS:
- case NO_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
- Register elements = ToRegister(instr->elements());
- bool key_is_constant = instr->key()->IsConstantOperand();
- Register key = no_reg;
- DoubleRegister result = ToDoubleRegister(instr->result());
- Register scratch = scratch0();
-
- int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
-
- int base_offset = instr->base_offset();
- if (key_is_constant) {
- int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort(kArrayIndexConstantValueTooBig);
- }
- base_offset += constant_key * kDoubleSize;
- }
- __ Daddu(scratch, elements, Operand(base_offset));
-
- if (!key_is_constant) {
- key = ToRegister(instr->key());
- int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
- ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
- : element_size_shift;
- if (shift_size > 0) {
- __ dsll(at, key, shift_size);
- } else if (shift_size == -32) {
- __ dsra32(at, key, 0);
- } else {
- __ dsra(at, key, -shift_size);
- }
- __ Daddu(scratch, scratch, at);
- }
-
- __ Ldc1(result, MemOperand(scratch));
-
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ FmoveHigh(scratch, result);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, scratch,
- Operand(static_cast<int32_t>(kHoleNanUpper32)));
- }
-}
-
-
-void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
- HLoadKeyed* hinstr = instr->hydrogen();
- Register elements = ToRegister(instr->elements());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
- Register store_base = scratch;
- int offset = instr->base_offset();
-
- if (instr->key()->IsConstantOperand()) {
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset += ToInteger32(const_operand) * kPointerSize;
- store_base = elements;
- } else {
- Register key = ToRegister(instr->key());
- // Even though the HLoadKeyed instruction forces the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsSmi()) {
- __ SmiScale(scratch, key, kPointerSizeLog2);
- __ daddu(scratch, elements, scratch);
- } else {
- __ Dlsa(scratch, elements, key, kPointerSizeLog2);
- }
- }
-
- Representation representation = hinstr->representation();
- if (representation.IsInteger32() && SmiValuesAre32Bits() &&
- hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
- DCHECK(!hinstr->RequiresHoleCheck());
- if (FLAG_debug_code) {
- Register temp = scratch1();
- __ Load(temp, MemOperand(store_base, offset), Representation::Smi());
- __ AssertSmi(temp);
- }
-
- // Read int value directly from upper half of the smi.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
- offset = SmiWordOffset(offset);
- }
-
- __ Load(result, MemOperand(store_base, offset), representation);
-
- // Check for the hole value.
- if (hinstr->RequiresHoleCheck()) {
- if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- __ SmiTst(result, scratch);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, scratch,
- Operand(zero_reg));
- } else {
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result,
- Operand(scratch));
- }
- } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
- DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
- Label done;
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ Branch(&done, ne, result, Operand(scratch));
- if (info()->IsStub()) {
- // A stub can safely convert the hole to undefined only if the array
- // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
- // it needs to bail out.
- __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
- // The comparison only needs LS bits of value, which is a smi.
- __ Ld(result, FieldMemOperand(result, PropertyCell::kValueOffset));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kHole, result,
- Operand(Smi::FromInt(Isolate::kProtectorValid)));
- }
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_fixed_typed_array()) {
- DoLoadKeyedExternalArray(instr);
- } else if (instr->hydrogen()->representation().IsDouble()) {
- DoLoadKeyedFixedDoubleArray(instr);
- } else {
- DoLoadKeyedFixedArray(instr);
- }
-}
-
-
-MemOperand LCodeGen::PrepareKeyedOperand(Register key,
- Register base,
- bool key_is_constant,
- int constant_key,
- int element_size,
- int shift_size,
- int base_offset) {
- if (key_is_constant) {
- return MemOperand(base, (constant_key << element_size) + base_offset);
- }
-
- if (base_offset == 0) {
- if (shift_size >= 0) {
- __ dsll(scratch0(), key, shift_size);
- __ Daddu(scratch0(), base, scratch0());
- return MemOperand(scratch0());
- } else {
- if (shift_size == -32) {
- __ dsra32(scratch0(), key, 0);
- } else {
- __ dsra(scratch0(), key, -shift_size);
- }
- __ Daddu(scratch0(), base, scratch0());
- return MemOperand(scratch0());
- }
- }
-
- if (shift_size >= 0) {
- __ dsll(scratch0(), key, shift_size);
- __ Daddu(scratch0(), base, scratch0());
- return MemOperand(scratch0(), base_offset);
- } else {
- if (shift_size == -32) {
- __ dsra32(scratch0(), key, 0);
- } else {
- __ dsra(scratch0(), key, -shift_size);
- }
- __ Daddu(scratch0(), base, scratch0());
- return MemOperand(scratch0(), base_offset);
- }
-}
-
-
-void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
- Register scratch = scratch0();
- Register temp = scratch1();
- Register result = ToRegister(instr->result());
-
- if (instr->hydrogen()->from_inlined()) {
- __ Dsubu(result, sp, 2 * kPointerSize);
- } else if (instr->hydrogen()->arguments_adaptor()) {
- // Check if the calling frame is an arguments adaptor frame.
- Label done, adapted;
- __ Ld(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ld(result,
- MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Xor(temp, result,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Result is the frame pointer for the frame if not adapted and for the real
- // frame below the adaptor frame if adapted.
- __ Movn(result, fp, temp); // Move only if temp is not equal to zero (ne).
- __ Movz(result, scratch, temp); // Move only if temp is equal to zero (eq).
- } else {
- __ mov(result, fp);
- }
-}
-
-
-void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
- Register elem = ToRegister(instr->elements());
- Register result = ToRegister(instr->result());
-
- Label done;
-
- // If no arguments adaptor frame the number of arguments is fixed.
- __ Daddu(result, zero_reg, Operand(scope()->num_parameters()));
- __ Branch(&done, eq, fp, Operand(elem));
-
- // Arguments adaptor frame present. Get argument length from there.
- __ Ld(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ld(result,
- MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(result);
-
- // Argument length is in result register.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- // If the receiver is null or undefined, we have to pass the global
- // object as a receiver to normal functions. Values have to be
- // passed unchanged to builtins and strict-mode functions.
- Label global_object, result_in_receiver;
-
- if (!instr->hydrogen()->known_function()) {
- // Do not transform the receiver to object for strict mode functions.
- __ Ld(scratch,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
-
- // Do not transform the receiver to object for builtins.
- int32_t strict_mode_function_mask =
- 1 << SharedFunctionInfo::kStrictModeBitWithinByte;
- int32_t native_mask = 1 << SharedFunctionInfo::kNativeBitWithinByte;
-
- __ Lbu(at,
- FieldMemOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset));
- __ And(at, at, Operand(strict_mode_function_mask));
- __ Branch(&result_in_receiver, ne, at, Operand(zero_reg));
- __ Lbu(at, FieldMemOperand(scratch, SharedFunctionInfo::kNativeByteOffset));
- __ And(at, at, Operand(native_mask));
- __ Branch(&result_in_receiver, ne, at, Operand(zero_reg));
- }
-
- // Normal function. Replace undefined or null with global receiver.
- __ LoadRoot(scratch, Heap::kNullValueRootIndex);
- __ Branch(&global_object, eq, receiver, Operand(scratch));
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- __ Branch(&global_object, eq, receiver, Operand(scratch));
-
- // Deoptimize if the receiver is not a JS object.
- __ SmiTst(receiver, scratch);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, scratch, Operand(zero_reg));
-
- __ GetObjectType(receiver, scratch, scratch);
- DeoptimizeIf(lt, instr, DeoptimizeReason::kNotAJavaScriptObject, scratch,
- Operand(FIRST_JS_RECEIVER_TYPE));
- __ Branch(&result_in_receiver);
-
- __ bind(&global_object);
- __ Ld(result, FieldMemOperand(function, JSFunction::kContextOffset));
- __ Ld(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
- __ Ld(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
-
- if (result.is(receiver)) {
- __ bind(&result_in_receiver);
- } else {
- Label result_ok;
- __ Branch(&result_ok);
- __ bind(&result_in_receiver);
- __ mov(result, receiver);
- __ bind(&result_ok);
- }
-}
-
-
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
- Register length = ToRegister(instr->length());
- Register elements = ToRegister(instr->elements());
- Register scratch = scratch0();
- DCHECK(receiver.is(a0)); // Used for parameter count.
- DCHECK(function.is(a1)); // Required by InvokeFunction.
- DCHECK(ToRegister(instr->result()).is(v0));
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- const uint32_t kArgumentsLimit = 1 * KB;
- DeoptimizeIf(hi, instr, DeoptimizeReason::kTooManyArguments, length,
- Operand(kArgumentsLimit));
-
- // Push the receiver and use the register to keep the original
- // number of arguments.
- __ push(receiver);
- __ Move(receiver, length);
- // The arguments are at a one pointer size offset from elements.
- __ Daddu(elements, elements, Operand(1 * kPointerSize));
-
- // Loop through the arguments pushing them onto the execution
- // stack.
- Label invoke, loop;
- // length is a small non-negative integer, due to the test above.
- __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
- __ dsll(scratch, length, kPointerSizeLog2);
- __ bind(&loop);
- __ Daddu(scratch, elements, scratch);
- __ Ld(scratch, MemOperand(scratch));
- __ push(scratch);
- __ Dsubu(length, length, Operand(1));
- __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
- __ dsll(scratch, length, kPointerSizeLog2);
-
- __ bind(&invoke);
-
- InvokeFlag flag = CALL_FUNCTION;
- if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
- DCHECK(!info()->saves_caller_doubles());
- // TODO(ishell): drop current frame before pushing arguments to the stack.
- flag = JUMP_FUNCTION;
- ParameterCount actual(a0);
- // It is safe to use t0, t1 and t2 as scratch registers here given that
- // we are not going to return to caller function anyway.
- PrepareForTailCall(actual, t0, t1, t2);
- }
-
- DCHECK(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
- // The number of arguments is stored in receiver which is a0, as expected
- // by InvokeFunction.
- ParameterCount actual(receiver);
- __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
-}
-
-
-void LCodeGen::DoPushArgument(LPushArgument* instr) {
- LOperand* argument = instr->value();
- if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
- Abort(kDoPushArgumentNotImplementedForDoubleType);
- } else {
- Register argument_reg = EmitLoadRegister(argument, at);
- __ push(argument_reg);
- }
-}
-
-
-void LCodeGen::DoDrop(LDrop* instr) {
- __ Drop(instr->count());
-}
-
-
-void LCodeGen::DoThisFunction(LThisFunction* instr) {
- Register result = ToRegister(instr->result());
- __ Ld(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-}
-
-
-void LCodeGen::DoContext(LContext* instr) {
- // If there is a non-return use, the context must be moved to a register.
- Register result = ToRegister(instr->result());
- if (info()->IsOptimizing()) {
- __ Ld(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
- } else {
- // If there is no frame, the context must be in cp.
- DCHECK(result.is(cp));
- }
-}
-
-
-void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- __ li(scratch0(), instr->hydrogen()->declarations());
- __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
- __ Push(scratch0(), scratch1());
- __ li(scratch0(), instr->hydrogen()->feedback_vector());
- __ Push(scratch0());
- CallRuntime(Runtime::kDeclareGlobals, instr);
-}
-
-void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
- int formal_parameter_count, int arity,
- bool is_tail_call, LInstruction* instr) {
- bool dont_adapt_arguments =
- formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
- bool can_invoke_directly =
- dont_adapt_arguments || formal_parameter_count == arity;
-
- Register function_reg = a1;
- LPointerMap* pointers = instr->pointer_map();
-
- if (can_invoke_directly) {
- // Change context.
- __ Ld(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
-
- // Always initialize new target and number of actual arguments.
- __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
- __ li(a0, Operand(arity));
-
- bool is_self_call = function.is_identical_to(info()->closure());
-
- // Invoke function.
- if (is_self_call) {
- Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
- if (is_tail_call) {
- __ Jump(self, RelocInfo::CODE_TARGET);
- } else {
- __ Call(self, RelocInfo::CODE_TARGET);
- }
- } else {
- __ Ld(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
- if (is_tail_call) {
- __ Jump(at);
- } else {
- __ Call(at);
- }
- }
-
- if (!is_tail_call) {
- // Set up deoptimization.
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
- }
- } else {
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount actual(arity);
- ParameterCount expected(formal_parameter_count);
- InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
- __ InvokeFunction(function_reg, expected, actual, flag, generator);
- }
-}
-
-
-void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
- DCHECK(instr->context() != NULL);
- DCHECK(ToRegister(instr->context()).is(cp));
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- // Deoptimize if not a heap number.
- __ Ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch,
- Operand(at));
-
- Label done;
- Register exponent = scratch0();
- scratch = no_reg;
- __ Lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
- // Check the sign of the argument. If the argument is positive, just
- // return it.
- __ Move(result, input);
- __ And(at, exponent, Operand(HeapNumber::kSignMask));
- __ Branch(&done, eq, at, Operand(zero_reg));
-
- // Input is negative. Reverse its sign.
- // Preserve the value of all registers.
- {
- PushSafepointRegistersScope scope(this);
-
- // Registers were saved at the safepoint, so we can use
- // many scratch registers.
- Register tmp1 = input.is(a1) ? a0 : a1;
- Register tmp2 = input.is(a2) ? a0 : a2;
- Register tmp3 = input.is(a3) ? a0 : a3;
- Register tmp4 = input.is(a4) ? a0 : a4;
-
- // exponent: floating point exponent value.
-
- Label allocated, slow;
- __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
- __ Branch(&allocated);
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
-
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
- instr->context());
- // Set the pointer to the new heap number in tmp.
- if (!tmp1.is(v0))
- __ mov(tmp1, v0);
- // Restore input_reg after call to runtime.
- __ LoadFromSafepointRegisterSlot(input, input);
- __ Lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
-
- __ bind(&allocated);
- // exponent: floating point exponent value.
- // tmp1: allocated heap number.
- __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
- __ Sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
- __ Lwu(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
- __ Sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
-
- __ StoreToSafepointRegisterSlot(tmp1, result);
- }
-
- __ bind(&done);
-}
-
-
-void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
- Label done;
- __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
- __ mov(result, input);
- __ subu(result, zero_reg, input);
- // Overflow if result is still negative, i.e. 0x80000000.
- DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, result,
- Operand(zero_reg));
- __ bind(&done);
-}
-
-
-void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
- Label done;
- __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
- __ mov(result, input);
- __ dsubu(result, zero_reg, input);
- // Overflow if result is still negative, i.e. 0x80000000 00000000.
- DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, result,
- Operand(zero_reg));
- __ bind(&done);
-}
-
-
-void LCodeGen::DoMathAbs(LMathAbs* instr) {
- // Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
- public:
- DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override {
- codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LMathAbs* instr_;
- };
-
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsDouble()) {
- FPURegister input = ToDoubleRegister(instr->value());
- FPURegister result = ToDoubleRegister(instr->result());
- __ abs_d(result, input);
- } else if (r.IsInteger32()) {
- EmitIntegerMathAbs(instr);
- } else if (r.IsSmi()) {
- EmitSmiMathAbs(instr);
- } else {
- // Representation is tagged.
- DeferredMathAbsTaggedHeapNumber* deferred =
- new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
- Register input = ToRegister(instr->value());
- // Smi check.
- __ JumpIfNotSmi(input, deferred->entry());
- // If smi, handle it directly.
- EmitSmiMathAbs(instr);
- __ bind(deferred->exit());
- }
-}
-
-
-void LCodeGen::DoMathFloor(LMathFloor* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- Register result = ToRegister(instr->result());
- Register scratch1 = scratch0();
- Register except_flag = ToRegister(instr->temp());
-
- __ EmitFPUTruncate(kRoundToMinusInf,
- result,
- input,
- scratch1,
- double_scratch0(),
- except_flag);
-
- // Deopt if the operation did not succeed.
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
- Operand(zero_reg));
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Test for -0.
- Label done;
- __ Branch(&done, ne, result, Operand(zero_reg));
- __ mfhc1(scratch1, input); // Get exponent/sign bits.
- __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
- Operand(zero_reg));
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoMathRound(LMathRound* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- Register result = ToRegister(instr->result());
- DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
- Register scratch = scratch0();
- Label done, check_sign_on_zero;
-
- // Extract exponent bits.
- __ mfhc1(result, input);
- __ Ext(scratch,
- result,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
-
- // If the number is in ]-0.5, +0.5[, the result is +/- 0.
- Label skip1;
- __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
- __ mov(result, zero_reg);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ Branch(&check_sign_on_zero);
- } else {
- __ Branch(&done);
- }
- __ bind(&skip1);
-
- // The following conversion will not work with numbers
- // outside of ]-2^32, 2^32[.
- DeoptimizeIf(ge, instr, DeoptimizeReason::kOverflow, scratch,
- Operand(HeapNumber::kExponentBias + 32));
-
- // Save the original sign for later comparison.
- __ And(scratch, result, Operand(HeapNumber::kSignMask));
-
- __ Move(double_scratch0(), 0.5);
- __ add_d(double_scratch0(), input, double_scratch0());
-
- // Check sign of the result: if the sign changed, the input
- // value was in ]0.5, 0[ and the result should be -0.
- __ mfhc1(result, double_scratch0());
- // mfhc1 sign-extends, clear the upper bits.
- __ dsll32(result, result, 0);
- __ dsrl32(result, result, 0);
- __ Xor(result, result, Operand(scratch));
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // ARM uses 'mi' here, which is 'lt'
- DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, result,
- Operand(zero_reg));
- } else {
- Label skip2;
- // ARM uses 'mi' here, which is 'lt'
- // Negating it results in 'ge'
- __ Branch(&skip2, ge, result, Operand(zero_reg));
- __ mov(result, zero_reg);
- __ Branch(&done);
- __ bind(&skip2);
- }
-
- Register except_flag = scratch;
- __ EmitFPUTruncate(kRoundToMinusInf,
- result,
- double_scratch0(),
- at,
- double_scratch1,
- except_flag);
-
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
- Operand(zero_reg));
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Test for -0.
- __ Branch(&done, ne, result, Operand(zero_reg));
- __ bind(&check_sign_on_zero);
- __ mfhc1(scratch, input); // Get exponent/sign bits.
- __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch,
- Operand(zero_reg));
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoMathFround(LMathFround* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- __ cvt_s_d(result, input);
- __ cvt_d_s(result, result);
-}
-
-
-void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- __ sqrt_d(result, input);
-}
-
-
-void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- DoubleRegister temp = ToDoubleRegister(instr->temp());
-
- DCHECK(!input.is(result));
-
- // Note that according to ECMA-262 15.8.2.13:
- // Math.pow(-Infinity, 0.5) == Infinity
- // Math.sqrt(-Infinity) == NaN
- Label done;
- __ Move(temp, static_cast<double>(-V8_INFINITY));
- // Set up Infinity.
- __ Neg_d(result, temp);
- // result is overwritten if the branch is not taken.
- __ BranchF(&done, NULL, eq, temp, input);
-
- // Add +0 to convert -0 to +0.
- __ add_d(result, input, kDoubleRegZero);
- __ sqrt_d(result, result);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoPower(LPower* instr) {
- Representation exponent_type = instr->hydrogen()->right()->representation();
- // Having marked this as a call, we can use any registers.
- // Just make sure that the input/output registers are the expected ones.
- Register tagged_exponent = MathPowTaggedDescriptor::exponent();
- DCHECK(!instr->right()->IsDoubleRegister() ||
- ToDoubleRegister(instr->right()).is(f4));
- DCHECK(!instr->right()->IsRegister() ||
- ToRegister(instr->right()).is(tagged_exponent));
- DCHECK(ToDoubleRegister(instr->left()).is(f2));
- DCHECK(ToDoubleRegister(instr->result()).is(f0));
-
- if (exponent_type.IsSmi()) {
- MathPowStub stub(isolate(), MathPowStub::TAGGED);
- __ CallStub(&stub);
- } else if (exponent_type.IsTagged()) {
- Label no_deopt;
- __ JumpIfSmi(tagged_exponent, &no_deopt);
- DCHECK(!a7.is(tagged_exponent));
- __ Lw(a7, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, a7, Operand(at));
- __ bind(&no_deopt);
- MathPowStub stub(isolate(), MathPowStub::TAGGED);
- __ CallStub(&stub);
- } else if (exponent_type.IsInteger32()) {
- MathPowStub stub(isolate(), MathPowStub::INTEGER);
- __ CallStub(&stub);
- } else {
- DCHECK(exponent_type.IsDouble());
- MathPowStub stub(isolate(), MathPowStub::DOUBLE);
- __ CallStub(&stub);
- }
-}
-
-void LCodeGen::DoMathCos(LMathCos* instr) {
- __ PrepareCallCFunction(0, 1, scratch0());
- __ MovToFloatParameter(ToDoubleRegister(instr->value()));
- __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1);
- __ MovFromFloatResult(ToDoubleRegister(instr->result()));
-}
-
-void LCodeGen::DoMathSin(LMathSin* instr) {
- __ PrepareCallCFunction(0, 1, scratch0());
- __ MovToFloatParameter(ToDoubleRegister(instr->value()));
- __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1);
- __ MovFromFloatResult(ToDoubleRegister(instr->result()));
-}
-
-void LCodeGen::DoMathExp(LMathExp* instr) {
- __ PrepareCallCFunction(0, 1, scratch0());
- __ MovToFloatParameter(ToDoubleRegister(instr->value()));
- __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1);
- __ MovFromFloatResult(ToDoubleRegister(instr->result()));
-}
-
-
-void LCodeGen::DoMathLog(LMathLog* instr) {
- __ PrepareCallCFunction(0, 1, scratch0());
- __ MovToFloatParameter(ToDoubleRegister(instr->value()));
- __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1);
- __ MovFromFloatResult(ToDoubleRegister(instr->result()));
-}
-
-
-void LCodeGen::DoMathClz32(LMathClz32* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- __ Clz(result, input);
-}
-
-void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
- Register scratch1, Register scratch2,
- Register scratch3) {
-#if DEBUG
- if (actual.is_reg()) {
- DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
- } else {
- DCHECK(!AreAliased(scratch1, scratch2, scratch3));
- }
-#endif
- if (FLAG_code_comments) {
- if (actual.is_reg()) {
- Comment(";;; PrepareForTailCall, actual: %s {",
- RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
- actual.reg().code()));
- } else {
- Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
- }
- }
-
- // Check if next frame is an arguments adaptor frame.
- Register caller_args_count_reg = scratch1;
- Label no_arguments_adaptor, formal_parameter_count_loaded;
- __ Ld(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ld(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset));
- __ Branch(&no_arguments_adaptor, ne, scratch3,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Drop current frame and load arguments count from arguments adaptor frame.
- __ mov(fp, scratch2);
- __ Ld(caller_args_count_reg,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
- __ Branch(&formal_parameter_count_loaded);
-
- __ bind(&no_arguments_adaptor);
- // Load caller's formal parameter count
- __ li(caller_args_count_reg, Operand(info()->literal()->parameter_count()));
-
- __ bind(&formal_parameter_count_loaded);
- __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
-
- Comment(";;; }");
-}
-
-void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
- HInvokeFunction* hinstr = instr->hydrogen();
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->function()).is(a1));
- DCHECK(instr->HasPointerMap());
-
- bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
-
- if (is_tail_call) {
- DCHECK(!info()->saves_caller_doubles());
- ParameterCount actual(instr->arity());
- // It is safe to use t0, t1 and t2 as scratch registers here given that
- // we are not going to return to caller function anyway.
- PrepareForTailCall(actual, t0, t1, t2);
- }
-
- Handle<JSFunction> known_function = hinstr->known_function();
- if (known_function.is_null()) {
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount actual(instr->arity());
- InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
- __ InvokeFunction(a1, no_reg, actual, flag, generator);
- } else {
- CallKnownFunction(known_function, hinstr->formal_parameter_count(),
- instr->arity(), is_tail_call, instr);
- }
-}
-
-
-void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
- DCHECK(ToRegister(instr->result()).is(v0));
-
- if (instr->hydrogen()->IsTailCall()) {
- if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
-
- if (instr->target()->IsConstantOperand()) {
- LConstantOperand* target = LConstantOperand::cast(instr->target());
- Handle<Code> code = Handle<Code>::cast(ToHandle(target));
- __ Jump(code, RelocInfo::CODE_TARGET);
- } else {
- DCHECK(instr->target()->IsRegister());
- Register target = ToRegister(instr->target());
- __ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(target);
- }
- } else {
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
-
- if (instr->target()->IsConstantOperand()) {
- LConstantOperand* target = LConstantOperand::cast(instr->target());
- Handle<Code> code = Handle<Code>::cast(ToHandle(target));
- generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
- __ Call(code, RelocInfo::CODE_TARGET);
- } else {
- DCHECK(instr->target()->IsRegister());
- Register target = ToRegister(instr->target());
- generator.BeforeCall(__ CallSize(target));
- __ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(target);
- }
- generator.AfterCall();
- }
-}
-
-
-void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->constructor()).is(a1));
- DCHECK(ToRegister(instr->result()).is(v0));
-
- __ li(a0, Operand(instr->arity()));
- __ li(a2, instr->hydrogen()->site());
-
- ElementsKind kind = instr->hydrogen()->elements_kind();
- AllocationSiteOverrideMode override_mode =
- (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
- ? DISABLE_ALLOCATION_SITES
- : DONT_OVERRIDE;
-
- if (instr->arity() == 0) {
- ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- } else if (instr->arity() == 1) {
- Label done;
- if (IsFastPackedElementsKind(kind)) {
- Label packed_case;
- // We might need a change here,
- // look at the first argument.
- __ Ld(a5, MemOperand(sp, 0));
- __ Branch(&packed_case, eq, a5, Operand(zero_reg));
-
- ElementsKind holey_kind = GetHoleyElementsKind(kind);
- ArraySingleArgumentConstructorStub stub(isolate(),
- holey_kind,
- override_mode);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ jmp(&done);
- __ bind(&packed_case);
- }
-
- ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ bind(&done);
- } else {
- ArrayNArgumentsConstructorStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
-void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- CallRuntime(instr->function(), instr->arity(), instr);
-}
-
-
-void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
- Register function = ToRegister(instr->function());
- Register code_object = ToRegister(instr->code_object());
- __ Daddu(code_object, code_object,
- Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Sd(code_object, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
-}
-
-
-void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
- Register result = ToRegister(instr->result());
- Register base = ToRegister(instr->base_object());
- if (instr->offset()->IsConstantOperand()) {
- LConstantOperand* offset = LConstantOperand::cast(instr->offset());
- __ Daddu(result, base, Operand(ToInteger32(offset)));
- } else {
- Register offset = ToRegister(instr->offset());
- __ Daddu(result, base, offset);
- }
-}
-
-
-void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
- Representation representation = instr->representation();
-
- Register object = ToRegister(instr->object());
- Register scratch2 = scratch1();
- Register scratch1 = scratch0();
-
- HObjectAccess access = instr->hydrogen()->access();
- int offset = access.offset();
- if (access.IsExternalMemory()) {
- Register value = ToRegister(instr->value());
- MemOperand operand = MemOperand(object, offset);
- __ Store(value, operand, representation);
- return;
- }
-
- __ AssertNotSmi(object);
-
- DCHECK(!representation.IsSmi() ||
- !instr->value()->IsConstantOperand() ||
- IsSmi(LConstantOperand::cast(instr->value())));
- if (!FLAG_unbox_double_fields && representation.IsDouble()) {
- DCHECK(access.IsInobject());
- DCHECK(!instr->hydrogen()->has_transition());
- DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
- DoubleRegister value = ToDoubleRegister(instr->value());
- __ Sdc1(value, FieldMemOperand(object, offset));
- return;
- }
-
- if (instr->hydrogen()->has_transition()) {
- Handle<Map> transition = instr->hydrogen()->transition_map();
- AddDeprecationDependency(transition);
- __ li(scratch1, Operand(transition));
- __ Sd(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
- if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
- Register temp = ToRegister(instr->temp());
- // Update the write barrier for the map field.
- __ RecordWriteForMap(object,
- scratch1,
- temp,
- GetRAState(),
- kSaveFPRegs);
- }
- }
-
- // Do the store.
- Register destination = object;
- if (!access.IsInobject()) {
- destination = scratch1;
- __ Ld(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
- }
-
- if (representation.IsSmi() && SmiValuesAre32Bits() &&
- instr->hydrogen()->value()->representation().IsInteger32()) {
- DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
- if (FLAG_debug_code) {
- __ Load(scratch2, FieldMemOperand(destination, offset), representation);
- __ AssertSmi(scratch2);
- }
- // Store int value directly to upper half of the smi.
- offset = SmiWordOffset(offset);
- representation = Representation::Integer32();
- }
- MemOperand operand = FieldMemOperand(destination, offset);
-
- if (FLAG_unbox_double_fields && representation.IsDouble()) {
- DCHECK(access.IsInobject());
- DoubleRegister value = ToDoubleRegister(instr->value());
- __ Sdc1(value, operand);
- } else {
- DCHECK(instr->value()->IsRegister());
- Register value = ToRegister(instr->value());
- __ Store(value, operand, representation);
- }
-
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- // Update the write barrier for the object for in-object properties.
- Register value = ToRegister(instr->value());
- __ RecordWriteField(destination,
- offset,
- value,
- scratch2,
- GetRAState(),
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- instr->hydrogen()->SmiCheckForWriteBarrier(),
- instr->hydrogen()->PointersToHereCheckForValue());
- }
-}
-
-
-void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
- Operand operand((int64_t)0);
- Register reg;
- if (instr->index()->IsConstantOperand()) {
- operand = ToOperand(instr->index());
- reg = ToRegister(instr->length());
- cc = CommuteCondition(cc);
- } else {
- reg = ToRegister(instr->index());
- operand = ToOperand(instr->length());
- }
- if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
- Label done;
- __ Branch(&done, NegateCondition(cc), reg, operand);
- __ stop("eliminated bounds check failed");
- __ bind(&done);
- } else {
- DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds, reg, operand);
- }
-}
-
-
-void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
- Register external_pointer = ToRegister(instr->elements());
- Register key = no_reg;
- ElementsKind elements_kind = instr->elements_kind();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort(kArrayIndexConstantValueTooBig);
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(elements_kind);
- int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
- ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
- : element_size_shift;
- int base_offset = instr->base_offset();
-
- if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
- Register address = scratch0();
- FPURegister value(ToDoubleRegister(instr->value()));
- if (key_is_constant) {
- if (constant_key != 0) {
- __ Daddu(address, external_pointer,
- Operand(constant_key << element_size_shift));
- } else {
- address = external_pointer;
- }
- } else {
- if (shift_size < 0) {
- if (shift_size == -32) {
- __ dsra32(address, key, 0);
- } else {
- __ dsra(address, key, -shift_size);
- }
- } else {
- __ dsll(address, key, shift_size);
- }
- __ Daddu(address, external_pointer, address);
- }
-
- if (elements_kind == FLOAT32_ELEMENTS) {
- __ cvt_s_d(double_scratch0(), value);
- __ Swc1(double_scratch0(), MemOperand(address, base_offset));
- } else { // Storing doubles, not floats.
- __ Sdc1(value, MemOperand(address, base_offset));
- }
- } else {
- Register value(ToRegister(instr->value()));
- MemOperand mem_operand = PrepareKeyedOperand(
- key, external_pointer, key_is_constant, constant_key,
- element_size_shift, shift_size,
- base_offset);
- switch (elements_kind) {
- case UINT8_ELEMENTS:
- case UINT8_CLAMPED_ELEMENTS:
- case INT8_ELEMENTS:
- __ Sb(value, mem_operand);
- break;
- case INT16_ELEMENTS:
- case UINT16_ELEMENTS:
- __ Sh(value, mem_operand);
- break;
- case INT32_ELEMENTS:
- case UINT32_ELEMENTS:
- __ Sw(value, mem_operand);
- break;
- case FLOAT32_ELEMENTS:
- case FLOAT64_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
- case FAST_STRING_WRAPPER_ELEMENTS:
- case SLOW_STRING_WRAPPER_ELEMENTS:
- case NO_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
- DoubleRegister value = ToDoubleRegister(instr->value());
- Register elements = ToRegister(instr->elements());
- Register scratch = scratch0();
- DoubleRegister double_scratch = double_scratch0();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int base_offset = instr->base_offset();
- Label not_nan, done;
-
- // Calculate the effective address of the slot in the array to store the
- // double value.
- int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- if (key_is_constant) {
- int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort(kArrayIndexConstantValueTooBig);
- }
- __ Daddu(scratch, elements,
- Operand((constant_key << element_size_shift) + base_offset));
- } else {
- int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
- ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
- : element_size_shift;
- __ Daddu(scratch, elements, Operand(base_offset));
- DCHECK((shift_size == 3) || (shift_size == -29));
- if (shift_size == 3) {
- __ dsll(at, ToRegister(instr->key()), 3);
- } else if (shift_size == -29) {
- __ dsra(at, ToRegister(instr->key()), 29);
- }
- __ Daddu(scratch, scratch, at);
- }
-
- if (instr->NeedsCanonicalization()) {
- __ FPUCanonicalizeNaN(double_scratch, value);
- __ Sdc1(double_scratch, MemOperand(scratch, 0));
- } else {
- __ Sdc1(value, MemOperand(scratch, 0));
- }
-}
-
-
-void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
- Register value = ToRegister(instr->value());
- Register elements = ToRegister(instr->elements());
- Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
- : no_reg;
- Register scratch = scratch0();
- Register store_base = scratch;
- int offset = instr->base_offset();
-
- // Do the store.
- if (instr->key()->IsConstantOperand()) {
- DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset += ToInteger32(const_operand) * kPointerSize;
- store_base = elements;
- } else {
- // Even though the HLoadKeyed instruction forces the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsSmi()) {
- __ SmiScale(scratch, key, kPointerSizeLog2);
- __ daddu(store_base, elements, scratch);
- } else {
- __ Dlsa(store_base, elements, key, kPointerSizeLog2);
- }
- }
-
- Representation representation = instr->hydrogen()->value()->representation();
- if (representation.IsInteger32() && SmiValuesAre32Bits()) {
- DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
- DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
- if (FLAG_debug_code) {
- Register temp = scratch1();
- __ Load(temp, MemOperand(store_base, offset), Representation::Smi());
- __ AssertSmi(temp);
- }
-
- // Store int value directly to upper half of the smi.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
- offset = SmiWordOffset(offset);
- representation = Representation::Integer32();
- }
-
- __ Store(value, MemOperand(store_base, offset), representation);
-
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- SmiCheck check_needed =
- instr->hydrogen()->value()->type().IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- // Compute address of modified element and store it into key register.
- __ Daddu(key, store_base, Operand(offset));
- __ RecordWrite(elements,
- key,
- value,
- GetRAState(),
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed,
- instr->hydrogen()->PointersToHereCheckForValue());
- }
-}
-
-
-void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
- // By cases: external, fast double
- if (instr->is_fixed_typed_array()) {
- DoStoreKeyedExternalArray(instr);
- } else if (instr->hydrogen()->value()->representation().IsDouble()) {
- DoStoreKeyedFixedDoubleArray(instr);
- } else {
- DoStoreKeyedFixedArray(instr);
- }
-}
-
-
-void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
- class DeferredMaybeGrowElements final : public LDeferredCode {
- public:
- DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
- : LDeferredCode(codegen), instr_(instr) {}
- void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LMaybeGrowElements* instr_;
- };
-
- Register result = v0;
- DeferredMaybeGrowElements* deferred =
- new (zone()) DeferredMaybeGrowElements(this, instr);
- LOperand* key = instr->key();
- LOperand* current_capacity = instr->current_capacity();
-
- DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
- DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
- DCHECK(key->IsConstantOperand() || key->IsRegister());
- DCHECK(current_capacity->IsConstantOperand() ||
- current_capacity->IsRegister());
-
- if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
- int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
- int32_t constant_capacity =
- ToInteger32(LConstantOperand::cast(current_capacity));
- if (constant_key >= constant_capacity) {
- // Deferred case.
- __ jmp(deferred->entry());
- }
- } else if (key->IsConstantOperand()) {
- int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
- __ Branch(deferred->entry(), le, ToRegister(current_capacity),
- Operand(constant_key));
- } else if (current_capacity->IsConstantOperand()) {
- int32_t constant_capacity =
- ToInteger32(LConstantOperand::cast(current_capacity));
- __ Branch(deferred->entry(), ge, ToRegister(key),
- Operand(constant_capacity));
- } else {
- __ Branch(deferred->entry(), ge, ToRegister(key),
- Operand(ToRegister(current_capacity)));
- }
-
- if (instr->elements()->IsRegister()) {
- __ mov(result, ToRegister(instr->elements()));
- } else {
- __ Ld(result, ToMemOperand(instr->elements()));
- }
-
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- Register result = v0;
- __ mov(result, zero_reg);
-
- // We have to call a stub.
- {
- PushSafepointRegistersScope scope(this);
- if (instr->object()->IsRegister()) {
- __ mov(result, ToRegister(instr->object()));
- } else {
- __ Ld(result, ToMemOperand(instr->object()));
- }
-
- LOperand* key = instr->key();
- if (key->IsConstantOperand()) {
- __ li(a3, Operand(ToSmi(LConstantOperand::cast(key))));
- } else {
- __ mov(a3, ToRegister(key));
- __ SmiTag(a3);
- }
-
- GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
- __ mov(a0, result);
- __ CallStub(&stub);
- RecordSafepointWithLazyDeopt(
- instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- __ StoreToSafepointRegisterSlot(result, result);
- }
-
- // Deopt on smi, which means the elements array changed to dictionary mode.
- __ SmiTst(result, at);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg));
-}
-
-
-void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
- Register object_reg = ToRegister(instr->object());
- Register scratch = scratch0();
-
- Handle<Map> from_map = instr->original_map();
- Handle<Map> to_map = instr->transitioned_map();
- ElementsKind from_kind = instr->from_kind();
- ElementsKind to_kind = instr->to_kind();
-
- Label not_applicable;
- __ Ld(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
- __ Branch(&not_applicable, ne, scratch, Operand(from_map));
-
- if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
- Register new_map_reg = ToRegister(instr->new_map_temp());
- __ li(new_map_reg, Operand(to_map));
- __ Sd(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
- // Write barrier.
- __ RecordWriteForMap(object_reg,
- new_map_reg,
- scratch,
- GetRAState(),
- kDontSaveFPRegs);
- } else {
- DCHECK(object_reg.is(a0));
- DCHECK(ToRegister(instr->context()).is(cp));
- PushSafepointRegistersScope scope(this);
- __ li(a1, Operand(to_map));
- TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
- __ CallStub(&stub);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kLazyDeopt);
- }
- __ bind(&not_applicable);
-}
-
-
-void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
- Register object = ToRegister(instr->object());
- Register temp = ToRegister(instr->temp());
- Label no_memento_found;
- __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
- DeoptimizeIf(al, instr, DeoptimizeReason::kMementoFound);
- __ bind(&no_memento_found);
-}
-
-
-void LCodeGen::DoStringAdd(LStringAdd* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->left()).is(a1));
- DCHECK(ToRegister(instr->right()).is(a0));
- StringAddStub stub(isolate(),
- instr->hydrogen()->flags(),
- instr->hydrogen()->pretenure_flag());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt final : public LDeferredCode {
- public:
- DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LStringCharCodeAt* instr_;
- };
-
- DeferredStringCharCodeAt* deferred =
- new(zone()) DeferredStringCharCodeAt(this, instr);
- StringCharLoadGenerator::Generate(masm(),
- ToRegister(instr->string()),
- ToRegister(instr->index()),
- ToRegister(instr->result()),
- deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ mov(result, zero_reg);
-
- PushSafepointRegistersScope scope(this);
- __ push(string);
- // Push the index as a smi. This is safe because of the checks in
- // DoStringCharCodeAt above.
- if (instr->index()->IsConstantOperand()) {
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- __ Daddu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
- __ push(scratch);
- } else {
- Register index = ToRegister(instr->index());
- __ SmiTag(index);
- __ push(index);
- }
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
- instr->context());
- __ AssertSmi(v0);
- __ SmiUntag(v0);
- __ StoreToSafepointRegisterSlot(v0, result);
-}
-
-
-void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode final : public LDeferredCode {
- public:
- DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override {
- codegen()->DoDeferredStringCharFromCode(instr_);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LStringCharFromCode* instr_;
- };
-
- DeferredStringCharFromCode* deferred =
- new(zone()) DeferredStringCharFromCode(this, instr);
-
- DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
- DCHECK(!char_code.is(result));
-
- __ Branch(deferred->entry(), hi,
- char_code, Operand(String::kMaxOneByteCharCode));
- __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
- __ Dlsa(result, result, char_code, kPointerSizeLog2);
- __ Ld(result, FieldMemOperand(result, FixedArray::kHeaderSize));
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- __ Branch(deferred->entry(), eq, result, Operand(scratch));
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ mov(result, zero_reg);
-
- PushSafepointRegistersScope scope(this);
- __ SmiTag(char_code);
- __ push(char_code);
- CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
- instr->context());
- __ StoreToSafepointRegisterSlot(v0, result);
-}
-
-
-void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- LOperand* input = instr->value();
- DCHECK(input->IsRegister() || input->IsStackSlot());
- LOperand* output = instr->result();
- DCHECK(output->IsDoubleRegister());
- FPURegister single_scratch = double_scratch0().low();
- if (input->IsStackSlot()) {
- Register scratch = scratch0();
- __ Ld(scratch, ToMemOperand(input));
- __ mtc1(scratch, single_scratch);
- } else {
- __ mtc1(ToRegister(input), single_scratch);
- }
- __ cvt_d_w(ToDoubleRegister(output), single_scratch);
-}
-
-
-void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
- LOperand* input = instr->value();
- LOperand* output = instr->result();
-
- FPURegister dbl_scratch = double_scratch0();
- __ mtc1(ToRegister(input), dbl_scratch);
- __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch);
-}
-
-
-void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU final : public LDeferredCode {
- public:
- DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override {
- codegen()->DoDeferredNumberTagIU(instr_,
- instr_->value(),
- instr_->temp1(),
- instr_->temp2(),
- UNSIGNED_INT32);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LNumberTagU* instr_;
- };
-
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
-
- DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
- __ Branch(deferred->entry(), hi, input, Operand(Smi::kMaxValue));
- __ SmiTag(result, input);
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
- LOperand* value,
- LOperand* temp1,
- LOperand* temp2,
- IntegerSignedness signedness) {
- Label done, slow;
- Register src = ToRegister(value);
- Register dst = ToRegister(instr->result());
- Register tmp1 = scratch0();
- Register tmp2 = ToRegister(temp1);
- Register tmp3 = ToRegister(temp2);
- DoubleRegister dbl_scratch = double_scratch0();
-
- if (signedness == SIGNED_INT32) {
- // There was overflow, so bits 30 and 31 of the original integer
- // disagree. Try to allocate a heap number in new space and store
- // the value in there. If that fails, call the runtime system.
- if (dst.is(src)) {
- __ SmiUntag(src, dst);
- __ Xor(src, src, Operand(0x80000000));
- }
- __ mtc1(src, dbl_scratch);
- __ cvt_d_w(dbl_scratch, dbl_scratch);
- } else {
- __ mtc1(src, dbl_scratch);
- __ Cvt_d_uw(dbl_scratch, dbl_scratch);
- }
-
- if (FLAG_inline_new) {
- __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
- __ Branch(&done);
- }
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
- {
- // TODO(3095996): Put a valid pointer value in the stack slot where the
- // result register is stored, as this register is in the pointer map, but
- // contains an integer value.
- __ mov(dst, zero_reg);
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this);
- // Reset the context register.
- if (!dst.is(cp)) {
- __ mov(cp, zero_reg);
- }
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(v0, dst);
- }
-
- // Done. Put the value in dbl_scratch into the value of the allocated heap
- // number.
- __ bind(&done);
- __ Sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
-}
-
-
-void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD final : public LDeferredCode {
- public:
- DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LNumberTagD* instr_;
- };
-
- DoubleRegister input_reg = ToDoubleRegister(instr->value());
- Register scratch = scratch0();
- Register reg = ToRegister(instr->result());
- Register temp1 = ToRegister(instr->temp());
- Register temp2 = ToRegister(instr->temp2());
-
- DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
- if (FLAG_inline_new) {
- __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
- // We want the untagged address first for performance
- __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
- } else {
- __ Branch(deferred->entry());
- }
- __ bind(deferred->exit());
- __ Sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
-}
-
-
-void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- Register reg = ToRegister(instr->result());
- __ mov(reg, zero_reg);
-
- PushSafepointRegistersScope scope(this);
- // Reset the context register.
- if (!reg.is(cp)) {
- __ mov(cp, zero_reg);
- }
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(v0, reg);
-}
-
-
-void LCodeGen::DoSmiTag(LSmiTag* instr) {
- HChange* hchange = instr->hydrogen();
- Register input = ToRegister(instr->value());
- Register output = ToRegister(instr->result());
- if (hchange->CheckFlag(HValue::kCanOverflow) &&
- hchange->value()->CheckFlag(HValue::kUint32)) {
- __ And(at, input, Operand(0x80000000));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, at, Operand(zero_reg));
- }
- if (hchange->CheckFlag(HValue::kCanOverflow) &&
- !hchange->value()->CheckFlag(HValue::kUint32)) {
- __ SmiTagCheckOverflow(output, input, at);
- DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, at, Operand(zero_reg));
- } else {
- __ SmiTag(output, input);
- }
-}
-
-
-void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
- Register scratch = scratch0();
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- if (instr->needs_check()) {
- STATIC_ASSERT(kHeapObjectTag == 1);
- // If the input is a HeapObject, value of scratch won't be zero.
- __ And(scratch, input, Operand(kHeapObjectTag));
- __ SmiUntag(result, input);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, scratch,
- Operand(zero_reg));
- } else {
- __ SmiUntag(result, input);
- }
-}
-
-
-void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
- DoubleRegister result_reg,
- NumberUntagDMode mode) {
- bool can_convert_undefined_to_nan = instr->truncating();
- bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
-
- Register scratch = scratch0();
- Label convert, load_smi, done;
- if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
- // Smi check.
- __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
- // Heap number map check.
- __ Ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- if (can_convert_undefined_to_nan) {
- __ Branch(&convert, ne, scratch, Operand(at));
- } else {
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch,
- Operand(at));
- }
- // Load heap number.
- __ Ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
- if (deoptimize_on_minus_zero) {
- __ mfc1(at, result_reg);
- __ Branch(&done, ne, at, Operand(zero_reg));
- __ mfhc1(scratch, result_reg); // Get exponent/sign bits.
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, scratch,
- Operand(HeapNumber::kSignMask));
- }
- __ Branch(&done);
- if (can_convert_undefined_to_nan) {
- __ bind(&convert);
- // Convert undefined (and hole) to NaN.
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined,
- input_reg, Operand(at));
- __ LoadRoot(scratch, Heap::kNanValueRootIndex);
- __ Ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
- __ Branch(&done);
- }
- } else {
- __ SmiUntag(scratch, input_reg);
- DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
- }
- // Smi to double register conversion
- __ bind(&load_smi);
- // scratch: untagged value of input_reg
- __ mtc1(scratch, result_reg);
- __ cvt_d_w(result_reg, result_reg);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
- Register input_reg = ToRegister(instr->value());
- Register scratch1 = scratch0();
- Register scratch2 = ToRegister(instr->temp());
- DoubleRegister double_scratch = double_scratch0();
- DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
-
- DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
- DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
-
- Label done;
-
- // The input is a tagged HeapObject.
- // Heap number map check.
- __ Ld(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- // This 'at' value and scratch1 map value are used for tests in both clauses
- // of the if.
-
- if (instr->truncating()) {
- Label truncate;
- __ Branch(USE_DELAY_SLOT, &truncate, eq, scratch1, Operand(at));
- __ mov(scratch2, input_reg); // In delay slot.
- __ Lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotANumberOrOddball, scratch1,
- Operand(ODDBALL_TYPE));
- __ bind(&truncate);
- __ TruncateHeapNumberToI(input_reg, scratch2);
- } else {
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch1,
- Operand(at));
-
- // Load the double value.
- __ Ldc1(double_scratch,
- FieldMemOperand(input_reg, HeapNumber::kValueOffset));
-
- Register except_flag = scratch2;
- __ EmitFPUTruncate(kRoundToZero,
- input_reg,
- double_scratch,
- scratch1,
- double_scratch2,
- except_flag,
- kCheckForInexactConversion);
-
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
- Operand(zero_reg));
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ Branch(&done, ne, input_reg, Operand(zero_reg));
-
- __ mfhc1(scratch1, double_scratch); // Get exponent/sign bits.
- __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
- Operand(zero_reg));
- }
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI final : public LDeferredCode {
- public:
- DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LTaggedToI* instr_;
- };
-
- LOperand* input = instr->value();
- DCHECK(input->IsRegister());
- DCHECK(input->Equals(instr->result()));
-
- Register input_reg = ToRegister(input);
-
- if (instr->hydrogen()->value()->representation().IsSmi()) {
- __ SmiUntag(input_reg);
- } else {
- DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
-
- // Let the deferred code handle the HeapObject case.
- __ JumpIfNotSmi(input_reg, deferred->entry());
-
- // Smi to int32 conversion.
- __ SmiUntag(input_reg);
- __ bind(deferred->exit());
- }
-}
-
-
-void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
- LOperand* input = instr->value();
- DCHECK(input->IsRegister());
- LOperand* result = instr->result();
- DCHECK(result->IsDoubleRegister());
-
- Register input_reg = ToRegister(input);
- DoubleRegister result_reg = ToDoubleRegister(result);
-
- HValue* value = instr->hydrogen()->value();
- NumberUntagDMode mode = value->representation().IsSmi()
- ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
-
- EmitNumberUntagD(instr, input_reg, result_reg, mode);
-}
-
-
-void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
- Register result_reg = ToRegister(instr->result());
- Register scratch1 = scratch0();
- DoubleRegister double_input = ToDoubleRegister(instr->value());
-
- if (instr->truncating()) {
- __ TruncateDoubleToI(result_reg, double_input);
- } else {
- Register except_flag = LCodeGen::scratch1();
-
- __ EmitFPUTruncate(kRoundToMinusInf,
- result_reg,
- double_input,
- scratch1,
- double_scratch0(),
- except_flag,
- kCheckForInexactConversion);
-
- // Deopt if the operation did not succeed (except_flag != 0).
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
- Operand(zero_reg));
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label done;
- __ Branch(&done, ne, result_reg, Operand(zero_reg));
- __ mfhc1(scratch1, double_input); // Get exponent/sign bits.
- __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
- Operand(zero_reg));
- __ bind(&done);
- }
- }
-}
-
-
-void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
- Register result_reg = ToRegister(instr->result());
- Register scratch1 = LCodeGen::scratch0();
- DoubleRegister double_input = ToDoubleRegister(instr->value());
-
- if (instr->truncating()) {
- __ TruncateDoubleToI(result_reg, double_input);
- } else {
- Register except_flag = LCodeGen::scratch1();
-
- __ EmitFPUTruncate(kRoundToMinusInf,
- result_reg,
- double_input,
- scratch1,
- double_scratch0(),
- except_flag,
- kCheckForInexactConversion);
-
- // Deopt if the operation did not succeed (except_flag != 0).
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
- Operand(zero_reg));
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label done;
- __ Branch(&done, ne, result_reg, Operand(zero_reg));
- __ mfhc1(scratch1, double_input); // Get exponent/sign bits.
- __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
- Operand(zero_reg));
- __ bind(&done);
- }
- }
- __ SmiTag(result_reg, result_reg);
-}
-
-
-void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
- LOperand* input = instr->value();
- __ SmiTst(ToRegister(input), at);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, at, Operand(zero_reg));
-}
-
-
-void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- LOperand* input = instr->value();
- __ SmiTst(ToRegister(input), at);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg));
- }
-}
-
-
-void LCodeGen::DoCheckArrayBufferNotNeutered(
- LCheckArrayBufferNotNeutered* instr) {
- Register view = ToRegister(instr->view());
- Register scratch = scratch0();
-
- __ Ld(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
- __ Lw(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
- __ And(at, scratch, 1 << JSArrayBuffer::WasNeutered::kShift);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds, at,
- Operand(zero_reg));
-}
-
-
-void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
- Register input = ToRegister(instr->value());
- Register scratch = scratch0();
-
- __ GetObjectType(input, scratch, scratch);
-
- if (instr->hydrogen()->is_interval_check()) {
- InstanceType first;
- InstanceType last;
- instr->hydrogen()->GetCheckInterval(&first, &last);
-
- // If there is only one type in the interval check for equality.
- if (first == last) {
- DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType, scratch,
- Operand(first));
- } else {
- DeoptimizeIf(lo, instr, DeoptimizeReason::kWrongInstanceType, scratch,
- Operand(first));
- // Omit check for the last type.
- if (last != LAST_TYPE) {
- DeoptimizeIf(hi, instr, DeoptimizeReason::kWrongInstanceType, scratch,
- Operand(last));
- }
- }
- } else {
- uint8_t mask;
- uint8_t tag;
- instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
-
- if (base::bits::IsPowerOfTwo32(mask)) {
- DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
- __ And(at, scratch, mask);
- DeoptimizeIf(tag == 0 ? ne : eq, instr,
- DeoptimizeReason::kWrongInstanceType, at, Operand(zero_reg));
- } else {
- __ And(scratch, scratch, Operand(mask));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType, scratch,
- Operand(tag));
- }
- }
-}
-
-
-void LCodeGen::DoCheckValue(LCheckValue* instr) {
- Register reg = ToRegister(instr->value());
- Handle<HeapObject> object = instr->hydrogen()->object().handle();
- AllowDeferredHandleDereference smi_check;
- if (isolate()->heap()->InNewSpace(*object)) {
- Register reg = ToRegister(instr->value());
- Handle<Cell> cell = isolate()->factory()->NewCell(object);
- __ li(at, Operand(cell));
- __ Ld(at, FieldMemOperand(at, Cell::kValueOffset));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch, reg, Operand(at));
- } else {
- DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch, reg,
- Operand(object));
- }
-}
-
-
-void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
- Label deopt, done;
- // If the map is not deprecated the migration attempt does not make sense.
- __ Ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
- __ Lwu(scratch0(), FieldMemOperand(scratch0(), Map::kBitField3Offset));
- __ And(at, scratch0(), Operand(Map::Deprecated::kMask));
- __ Branch(&deopt, eq, at, Operand(zero_reg));
-
- {
- PushSafepointRegistersScope scope(this);
- __ push(object);
- __ mov(cp, zero_reg);
- __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(v0, scratch0());
- }
- __ SmiTst(scratch0(), at);
- __ Branch(&done, ne, at, Operand(zero_reg));
-
- __ bind(&deopt);
- // In case of "al" condition the operands are not used so just pass zero_reg
- // there.
- DeoptimizeIf(al, instr, DeoptimizeReason::kInstanceMigrationFailed, zero_reg,
- Operand(zero_reg));
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- class DeferredCheckMaps final : public LDeferredCode {
- public:
- DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
- : LDeferredCode(codegen), instr_(instr), object_(object) {
- SetExit(check_maps());
- }
- void Generate() override {
- codegen()->DoDeferredInstanceMigration(instr_, object_);
- }
- Label* check_maps() { return &check_maps_; }
- LInstruction* instr() override { return instr_; }
-
- private:
- LCheckMaps* instr_;
- Label check_maps_;
- Register object_;
- };
-
- if (instr->hydrogen()->IsStabilityCheck()) {
- const UniqueSet<Map>* maps = instr->hydrogen()->maps();
- for (int i = 0; i < maps->size(); ++i) {
- AddStabilityDependency(maps->at(i).handle());
- }
- return;
- }
-
- Register map_reg = scratch0();
- LOperand* input = instr->value();
- DCHECK(input->IsRegister());
- Register reg = ToRegister(input);
- __ Ld(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
-
- DeferredCheckMaps* deferred = NULL;
- if (instr->hydrogen()->HasMigrationTarget()) {
- deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
- __ bind(deferred->check_maps());
- }
-
- const UniqueSet<Map>* maps = instr->hydrogen()->maps();
- Label success;
- for (int i = 0; i < maps->size() - 1; i++) {
- Handle<Map> map = maps->at(i).handle();
- __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
- }
- Handle<Map> map = maps->at(maps->size() - 1).handle();
- // Do the CompareMap() directly within the Branch() and DeoptimizeIf().
- if (instr->hydrogen()->HasMigrationTarget()) {
- __ Branch(deferred->entry(), ne, map_reg, Operand(map));
- } else {
- DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap, map_reg, Operand(map));
- }
-
- __ bind(&success);
-}
-
-
-void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
- DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
- Register result_reg = ToRegister(instr->result());
- DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
- __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
-}
-
-
-void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
- Register unclamped_reg = ToRegister(instr->unclamped());
- Register result_reg = ToRegister(instr->result());
- __ ClampUint8(result_reg, unclamped_reg);
-}
-
-
-void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
- Register scratch = scratch0();
- Register input_reg = ToRegister(instr->unclamped());
- Register result_reg = ToRegister(instr->result());
- DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
- Label is_smi, done, heap_number;
-
- // Both smi and heap number cases are handled.
- __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
-
- // Check for heap number
- __ Ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
- __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
-
- // Check for undefined. Undefined is converted to zero for clamping
- // conversions.
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined, input_reg,
- Operand(factory()->undefined_value()));
- __ mov(result_reg, zero_reg);
- __ jmp(&done);
-
- // Heap number
- __ bind(&heap_number);
- __ Ldc1(double_scratch0(),
- FieldMemOperand(input_reg, HeapNumber::kValueOffset));
- __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
- __ jmp(&done);
-
- __ bind(&is_smi);
- __ ClampUint8(result_reg, scratch);
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoAllocate(LAllocate* instr) {
- class DeferredAllocate final : public LDeferredCode {
- public:
- DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override { codegen()->DoDeferredAllocate(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LAllocate* instr_;
- };
-
- DeferredAllocate* deferred =
- new(zone()) DeferredAllocate(this, instr);
-
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp1());
- Register scratch2 = ToRegister(instr->temp2());
-
- // Allocate memory for the object.
- AllocationFlags flags = NO_ALLOCATION_FLAGS;
- if (instr->hydrogen()->MustAllocateDoubleAligned()) {
- flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
- }
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- flags = static_cast<AllocationFlags>(flags | PRETENURE);
- }
-
- if (instr->hydrogen()->IsAllocationFoldingDominator()) {
- flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
- }
- DCHECK(!instr->hydrogen()->IsAllocationFolded());
-
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= kMaxRegularHeapObjectSize);
- __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
- } else {
- Register size = ToRegister(instr->size());
- __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
- }
-
- __ bind(deferred->exit());
-
- if (instr->hydrogen()->MustPrefillWithFiller()) {
- STATIC_ASSERT(kHeapObjectTag == 1);
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ li(scratch, Operand(size - kHeapObjectTag));
- } else {
- __ Dsubu(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
- }
- __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
- Label loop;
- __ bind(&loop);
- __ Dsubu(scratch, scratch, Operand(kPointerSize));
- __ Daddu(at, result, Operand(scratch));
- __ Sd(scratch2, MemOperand(at));
- __ Branch(&loop, ge, scratch, Operand(zero_reg));
- }
-}
-
-
-void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ mov(result, zero_reg);
-
- PushSafepointRegistersScope scope(this);
- if (instr->size()->IsRegister()) {
- Register size = ToRegister(instr->size());
- DCHECK(!size.is(result));
- __ SmiTag(size);
- __ push(size);
- } else {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- if (size >= 0 && size <= Smi::kMaxValue) {
- __ li(v0, Operand(Smi::FromInt(size)));
- __ Push(v0);
- } else {
- // We should never get here at runtime => abort
- __ stop("invalid allocation size");
- return;
- }
- }
-
- int flags = AllocateDoubleAlignFlag::encode(
- instr->hydrogen()->MustAllocateDoubleAligned());
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- flags = AllocateTargetSpace::update(flags, OLD_SPACE);
- } else {
- flags = AllocateTargetSpace::update(flags, NEW_SPACE);
- }
- __ li(v0, Operand(Smi::FromInt(flags)));
- __ Push(v0);
-
- CallRuntimeFromDeferred(
- Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
- __ StoreToSafepointRegisterSlot(v0, result);
-
- if (instr->hydrogen()->IsAllocationFoldingDominator()) {
- AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
- }
- // If the allocation folding dominator allocate triggered a GC, allocation
- // happend in the runtime. We have to reset the top pointer to virtually
- // undo the allocation.
- ExternalReference allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
- Register top_address = scratch0();
- __ Dsubu(v0, v0, Operand(kHeapObjectTag));
- __ li(top_address, Operand(allocation_top));
- __ Sd(v0, MemOperand(top_address));
- __ Daddu(v0, v0, Operand(kHeapObjectTag));
- }
-}
-
-void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
- DCHECK(instr->hydrogen()->IsAllocationFolded());
- DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
- Register result = ToRegister(instr->result());
- Register scratch1 = ToRegister(instr->temp1());
- Register scratch2 = ToRegister(instr->temp2());
-
- AllocationFlags flags = ALLOCATION_FOLDED;
- if (instr->hydrogen()->MustAllocateDoubleAligned()) {
- flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
- }
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- flags = static_cast<AllocationFlags>(flags | PRETENURE);
- }
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= kMaxRegularHeapObjectSize);
- __ FastAllocate(size, result, scratch1, scratch2, flags);
- } else {
- Register size = ToRegister(instr->size());
- __ FastAllocate(size, result, scratch1, scratch2, flags);
- }
-}
-
-
-void LCodeGen::DoTypeof(LTypeof* instr) {
- DCHECK(ToRegister(instr->value()).is(a3));
- DCHECK(ToRegister(instr->result()).is(v0));
- Label end, do_call;
- Register value_register = ToRegister(instr->value());
- __ JumpIfNotSmi(value_register, &do_call);
- __ li(v0, Operand(isolate()->factory()->number_string()));
- __ jmp(&end);
- __ bind(&do_call);
- Callable callable = CodeFactory::Typeof(isolate());
- CallCode(callable.code(), RelocInfo::CODE_TARGET, instr);
- __ bind(&end);
-}
-
-
-void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
- Register input = ToRegister(instr->value());
-
- Register cmp1 = no_reg;
- Operand cmp2 = Operand(no_reg);
-
- Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
- instr->FalseLabel(chunk_),
- input,
- instr->type_literal(),
- &cmp1,
- &cmp2);
-
- DCHECK(cmp1.is_valid());
- DCHECK(!cmp2.is_reg() || cmp2.rm().is_valid());
-
- if (final_branch_condition != kNoCondition) {
- EmitBranch(instr, final_branch_condition, cmp1, cmp2);
- }
-}
-
-
-Condition LCodeGen::EmitTypeofIs(Label* true_label,
- Label* false_label,
- Register input,
- Handle<String> type_name,
- Register* cmp1,
- Operand* cmp2) {
- // This function utilizes the delay slot heavily. This is used to load
- // values that are always usable without depending on the type of the input
- // register.
- Condition final_branch_condition = kNoCondition;
- Register scratch = scratch0();
- Factory* factory = isolate()->factory();
- if (String::Equals(type_name, factory->number_string())) {
- __ JumpIfSmi(input, true_label);
- __ Ld(input, FieldMemOperand(input, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- *cmp1 = input;
- *cmp2 = Operand(at);
- final_branch_condition = eq;
-
- } else if (String::Equals(type_name, factory->string_string())) {
- __ JumpIfSmi(input, false_label);
- __ GetObjectType(input, input, scratch);
- *cmp1 = scratch;
- *cmp2 = Operand(FIRST_NONSTRING_TYPE);
- final_branch_condition = lt;
-
- } else if (String::Equals(type_name, factory->symbol_string())) {
- __ JumpIfSmi(input, false_label);
- __ GetObjectType(input, input, scratch);
- *cmp1 = scratch;
- *cmp2 = Operand(SYMBOL_TYPE);
- final_branch_condition = eq;
-
- } else if (String::Equals(type_name, factory->boolean_string())) {
- __ LoadRoot(at, Heap::kTrueValueRootIndex);
- __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
- __ LoadRoot(at, Heap::kFalseValueRootIndex);
- *cmp1 = at;
- *cmp2 = Operand(input);
- final_branch_condition = eq;
-
- } else if (String::Equals(type_name, factory->undefined_string())) {
- __ LoadRoot(at, Heap::kNullValueRootIndex);
- __ Branch(USE_DELAY_SLOT, false_label, eq, at, Operand(input));
- // The first instruction of JumpIfSmi is an And - it is safe in the delay
- // slot.
- __ JumpIfSmi(input, false_label);
- // Check for undetectable objects => true.
- __ Ld(input, FieldMemOperand(input, HeapObject::kMapOffset));
- __ Lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
- __ And(at, at, 1 << Map::kIsUndetectable);
- *cmp1 = at;
- *cmp2 = Operand(zero_reg);
- final_branch_condition = ne;
-
- } else if (String::Equals(type_name, factory->function_string())) {
- __ JumpIfSmi(input, false_label);
- __ Ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
- __ Lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ And(scratch, scratch,
- Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
- *cmp1 = scratch;
- *cmp2 = Operand(1 << Map::kIsCallable);
- final_branch_condition = eq;
-
- } else if (String::Equals(type_name, factory->object_string())) {
- __ JumpIfSmi(input, false_label);
- __ LoadRoot(at, Heap::kNullValueRootIndex);
- __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ GetObjectType(input, scratch, scratch1());
- __ Branch(false_label, lt, scratch1(), Operand(FIRST_JS_RECEIVER_TYPE));
- // Check for callable or undetectable objects => false.
- __ Lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ And(at, scratch,
- Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
- *cmp1 = at;
- *cmp2 = Operand(zero_reg);
- final_branch_condition = eq;
-
- } else {
- *cmp1 = at;
- *cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion.
- __ Branch(false_label);
- }
-
- return final_branch_condition;
-}
-
-
-void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
- if (info()->ShouldEnsureSpaceForLazyDeopt()) {
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- if (current_pc < last_lazy_deopt_pc_ + space_needed) {
- int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
- while (padding_size > 0) {
- __ nop();
- padding_size -= Assembler::kInstrSize;
- }
- }
- }
- last_lazy_deopt_pc_ = masm()->pc_offset();
-}
-
-
-void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- last_lazy_deopt_pc_ = masm()->pc_offset();
- DCHECK(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- Deoptimizer::BailoutType type = instr->hydrogen()->type();
- // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
- // needed return address), even though the implementation of LAZY and EAGER is
- // now identical. When LAZY is eventually completely folded into EAGER, remove
- // the special case below.
- if (info()->IsStub() && type == Deoptimizer::EAGER) {
- type = Deoptimizer::LAZY;
- }
-
- DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type, zero_reg,
- Operand(zero_reg));
-}
-
-
-void LCodeGen::DoDummy(LDummy* instr) {
- // Nothing to see here, move on!
-}
-
-
-void LCodeGen::DoDummyUse(LDummyUse* instr) {
- // Nothing to see here, move on!
-}
-
-
-void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
- PushSafepointRegistersScope scope(this);
- LoadContextFromDeferred(instr->context());
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
- RecordSafepointWithLazyDeopt(
- instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- DCHECK(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck final : public LDeferredCode {
- public:
- DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LStackCheck* instr_;
- };
-
- DCHECK(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- // There is no LLazyBailout instruction for stack-checks. We have to
- // prepare for lazy deoptimization explicitly here.
- if (instr->hydrogen()->is_function_entry()) {
- // Perform stack overflow check.
- Label done;
- __ LoadRoot(at, Heap::kStackLimitRootIndex);
- __ Branch(&done, hs, sp, Operand(at));
- DCHECK(instr->context()->IsRegister());
- DCHECK(ToRegister(instr->context()).is(cp));
- CallCode(isolate()->builtins()->StackCheck(),
- RelocInfo::CODE_TARGET,
- instr);
- __ bind(&done);
- } else {
- DCHECK(instr->hydrogen()->is_backwards_branch());
- // Perform stack overflow check if this goto needs it before jumping.
- DeferredStackCheck* deferred_stack_check =
- new(zone()) DeferredStackCheck(this, instr);
- __ LoadRoot(at, Heap::kStackLimitRootIndex);
- __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- __ bind(instr->done_label());
- deferred_stack_check->SetExit(instr->done_label());
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- // Don't record a deoptimization index for the safepoint here.
- // This will be done explicitly when emitting call and the safepoint in
- // the deferred code.
- }
-}
-
-
-void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
- // This is a pseudo-instruction that ensures that the environment here is
- // properly registered for deoptimization and records the assembler's PC
- // offset.
- LEnvironment* environment = instr->environment();
-
- // If the environment were already registered, we would have no way of
- // backpatching it with the spill slot operands.
- DCHECK(!environment->HasBeenRegistered());
- RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
-
- GenerateOsrPrologue();
-}
-
-
-void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
- Register result = ToRegister(instr->result());
- Register object = ToRegister(instr->object());
-
- Label use_cache, call_runtime;
- DCHECK(object.is(a0));
- __ CheckEnumCache(&call_runtime);
-
- __ Ld(result, FieldMemOperand(object, HeapObject::kMapOffset));
- __ Branch(&use_cache);
-
- // Get the set of properties to enumerate.
- __ bind(&call_runtime);
- __ push(object);
- CallRuntime(Runtime::kForInEnumerate, instr);
- __ bind(&use_cache);
-}
-
-
-void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
- Register map = ToRegister(instr->map());
- Register result = ToRegister(instr->result());
- Label load_cache, done;
- __ EnumLength(result, map);
- __ Branch(&load_cache, ne, result, Operand(Smi::kZero));
- __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
- __ jmp(&done);
-
- __ bind(&load_cache);
- __ LoadInstanceDescriptors(map, result);
- __ Ld(result,
- FieldMemOperand(result, DescriptorArray::kEnumCacheBridgeOffset));
- __ Ld(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
- DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache, result,
- Operand(zero_reg));
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
- Register object = ToRegister(instr->value());
- Register map = ToRegister(instr->map());
- __ Ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap, map,
- Operand(scratch0()));
-}
-
-
-void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
- Register result,
- Register object,
- Register index) {
- PushSafepointRegistersScope scope(this);
- __ Push(object, index);
- __ mov(cp, zero_reg);
- __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(v0, result);
-}
-
-
-void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
- class DeferredLoadMutableDouble final : public LDeferredCode {
- public:
- DeferredLoadMutableDouble(LCodeGen* codegen,
- LLoadFieldByIndex* instr,
- Register result,
- Register object,
- Register index)
- : LDeferredCode(codegen),
- instr_(instr),
- result_(result),
- object_(object),
- index_(index) {
- }
- void Generate() override {
- codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LLoadFieldByIndex* instr_;
- Register result_;
- Register object_;
- Register index_;
- };
-
- Register object = ToRegister(instr->object());
- Register index = ToRegister(instr->index());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- DeferredLoadMutableDouble* deferred;
- deferred = new(zone()) DeferredLoadMutableDouble(
- this, instr, result, object, index);
-
- Label out_of_object, done;
-
- __ And(scratch, index, Operand(Smi::FromInt(1)));
- __ Branch(deferred->entry(), ne, scratch, Operand(zero_reg));
- __ dsra(index, index, 1);
-
- __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
- __ SmiScale(scratch, index, kPointerSizeLog2); // In delay slot.
- __ Daddu(scratch, object, scratch);
- __ Ld(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
-
- __ Branch(&done);
-
- __ bind(&out_of_object);
- __ Ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- // Index is equal to negated out of object property index plus 1.
- __ Dsubu(scratch, result, scratch);
- __ Ld(result,
- FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize));
- __ bind(deferred->exit());
- __ bind(&done);
-}
-
-#undef __
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h
deleted file mode 100644
index 58c907e602..0000000000
--- a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h
+++ /dev/null
@@ -1,408 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_MIPS64_LITHIUM_CODEGEN_MIPS_H_
-#define V8_CRANKSHAFT_MIPS64_LITHIUM_CODEGEN_MIPS_H_
-
-#include "src/ast/scopes.h"
-#include "src/crankshaft/lithium-codegen.h"
-#include "src/crankshaft/mips64/lithium-gap-resolver-mips64.h"
-#include "src/crankshaft/mips64/lithium-mips64.h"
-#include "src/deoptimizer.h"
-#include "src/safepoint-table.h"
-#include "src/utils.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LDeferredCode;
-class SafepointGenerator;
-
-class LCodeGen: public LCodeGenBase {
- public:
- LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : LCodeGenBase(chunk, assembler, info),
- jump_table_(4, info->zone()),
- scope_(info->scope()),
- deferred_(8, info->zone()),
- frame_is_built_(false),
- safepoints_(info->zone()),
- resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple) {
- PopulateDeoptimizationLiteralsWithInlinedFunctions();
- }
-
-
- int LookupDestination(int block_id) const {
- return chunk()->LookupDestination(block_id);
- }
-
- bool IsNextEmittedBlock(int block_id) const {
- return LookupDestination(block_id) == GetNextEmittedBlock();
- }
-
- bool NeedsEagerFrame() const {
- return HasAllocatedStackSlots() || info()->is_non_deferred_calling() ||
- !info()->IsStub() || info()->requires_frame();
- }
- bool NeedsDeferredFrame() const {
- return !NeedsEagerFrame() && info()->is_deferred_calling();
- }
-
- RAStatus GetRAState() const {
- return frame_is_built_ ? kRAHasBeenSaved : kRAHasNotBeenSaved;
- }
-
- // Support for converting LOperands to assembler types.
- // LOperand must be a register.
- Register ToRegister(LOperand* op) const;
-
- // LOperand is loaded into scratch, unless already a register.
- Register EmitLoadRegister(LOperand* op, Register scratch);
-
- // LOperand must be a double register.
- DoubleRegister ToDoubleRegister(LOperand* op) const;
-
- // LOperand is loaded into dbl_scratch, unless already a double register.
- DoubleRegister EmitLoadDoubleRegister(LOperand* op,
- FloatRegister flt_scratch,
- DoubleRegister dbl_scratch);
- int64_t ToRepresentation_donotuse(LConstantOperand* op,
- const Representation& r) const;
- int32_t ToInteger32(LConstantOperand* op) const;
- Smi* ToSmi(LConstantOperand* op) const;
- double ToDouble(LConstantOperand* op) const;
- Operand ToOperand(LOperand* op);
- MemOperand ToMemOperand(LOperand* op) const;
- // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
- MemOperand ToHighMemOperand(LOperand* op) const;
-
- bool IsInteger32(LConstantOperand* op) const;
- bool IsSmi(LConstantOperand* op) const;
- Handle<Object> ToHandle(LConstantOperand* op) const;
-
- // Try to generate code for the entire chunk, but it may fail if the
- // chunk contains constructs we cannot handle. Returns true if the
- // code generation attempt succeeded.
- bool GenerateCode();
-
- // Finish the code by setting stack height, safepoint, and bailout
- // information on it.
- void FinishCode(Handle<Code> code);
-
- void DoDeferredNumberTagD(LNumberTagD* instr);
-
- enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
- void DoDeferredNumberTagIU(LInstruction* instr,
- LOperand* value,
- LOperand* temp1,
- LOperand* temp2,
- IntegerSignedness signedness);
-
- void DoDeferredTaggedToI(LTaggedToI* instr);
- void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
- void DoDeferredStackCheck(LStackCheck* instr);
- void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
- void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
- void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredAllocate(LAllocate* instr);
-
- void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
- void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
- Register result,
- Register object,
- Register index);
-
- // Parallel move support.
- void DoParallelMove(LParallelMove* move);
- void DoGap(LGap* instr);
-
- MemOperand PrepareKeyedOperand(Register key,
- Register base,
- bool key_is_constant,
- int constant_key,
- int element_size,
- int shift_size,
- int base_offset);
-
- // Emit frame translation commands for an environment.
- void WriteTranslation(LEnvironment* environment, Translation* translation);
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) void Do##type(L##type* node);
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- private:
- Scope* scope() const { return scope_; }
-
- Register scratch0() { return kLithiumScratchReg; }
- Register scratch1() { return kLithiumScratchReg2; }
- DoubleRegister double_scratch0() { return kLithiumScratchDouble; }
-
- LInstruction* GetNextInstruction();
-
- void EmitClassOfTest(Label* if_true, Label* if_false,
- Handle<String> class_name, Register input,
- Register temporary, Register temporary2);
-
- bool HasAllocatedStackSlots() const {
- return chunk()->HasAllocatedStackSlots();
- }
- int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); }
- int GetTotalFrameSlotCount() const {
- return chunk()->GetTotalFrameSlotCount();
- }
-
- void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
-
- void SaveCallerDoubles();
- void RestoreCallerDoubles();
-
- // Code generation passes. Returns true if code generation should
- // continue.
- void GenerateBodyInstructionPre(LInstruction* instr) override;
- bool GeneratePrologue();
- bool GenerateDeferredCode();
- bool GenerateJumpTable();
- bool GenerateSafepointTable();
-
- // Generates the custom OSR entrypoint and sets the osr_pc_offset.
- void GenerateOsrPrologue();
-
- enum SafepointMode {
- RECORD_SIMPLE_SAFEPOINT,
- RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
- };
-
- void CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr);
-
- void CallCodeGeneric(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode);
-
- void CallRuntime(const Runtime::Function* function,
- int num_arguments,
- LInstruction* instr,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
-
- void CallRuntime(Runtime::FunctionId id,
- int num_arguments,
- LInstruction* instr) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, num_arguments, instr);
- }
-
- void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, function->nargs, instr);
- }
-
- void LoadContextFromDeferred(LOperand* context);
- void CallRuntimeFromDeferred(Runtime::FunctionId id,
- int argc,
- LInstruction* instr,
- LOperand* context);
-
- void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
- Register scratch2, Register scratch3);
-
- // Generate a direct call to a known function. Expects the function
- // to be in a1.
- void CallKnownFunction(Handle<JSFunction> function,
- int formal_parameter_count, int arity,
- bool is_tail_call, LInstruction* instr);
-
- void RecordSafepointWithLazyDeopt(LInstruction* instr,
- SafepointMode safepoint_mode);
-
- void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
- Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition condition, LInstruction* instr,
- DeoptimizeReason deopt_reason,
- Deoptimizer::BailoutType bailout_type,
- Register src1 = zero_reg,
- const Operand& src2 = Operand(zero_reg));
- void DeoptimizeIf(Condition condition, LInstruction* instr,
- DeoptimizeReason deopt_reason = DeoptimizeReason::kNoReason,
- Register src1 = zero_reg,
- const Operand& src2 = Operand(zero_reg));
-
- void AddToTranslation(LEnvironment* environment,
- Translation* translation,
- LOperand* op,
- bool is_tagged,
- bool is_uint32,
- int* object_index_pointer,
- int* dematerialized_index_pointer);
-
- Register ToRegister(int index) const;
- DoubleRegister ToDoubleRegister(int index) const;
-
- MemOperand BuildSeqStringOperand(Register string,
- LOperand* index,
- String::Encoding encoding);
-
- void EmitIntegerMathAbs(LMathAbs* instr);
- void EmitSmiMathAbs(LMathAbs* instr);
-
- // Support for recording safepoint information.
- void RecordSafepoint(LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- Safepoint::DeoptMode mode);
- void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
- void RecordSafepoint(Safepoint::DeoptMode mode);
- void RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode mode);
-
- static Condition TokenToCondition(Token::Value op, bool is_unsigned);
- void EmitGoto(int block);
-
- // EmitBranch expects to be the last instruction of a block.
- template<class InstrType>
- void EmitBranch(InstrType instr,
- Condition condition,
- Register src1,
- const Operand& src2);
- template<class InstrType>
- void EmitBranchF(InstrType instr,
- Condition condition,
- FPURegister src1,
- FPURegister src2);
- template <class InstrType>
- void EmitTrueBranch(InstrType instr, Condition condition, Register src1,
- const Operand& src2);
- template <class InstrType>
- void EmitFalseBranch(InstrType instr, Condition condition, Register src1,
- const Operand& src2);
- template<class InstrType>
- void EmitFalseBranchF(InstrType instr,
- Condition condition,
- FPURegister src1,
- FPURegister src2);
- void EmitCmpI(LOperand* left, LOperand* right);
- void EmitNumberUntagD(LNumberUntagD* instr, Register input,
- DoubleRegister result, NumberUntagDMode mode);
-
- // Emits optimized code for typeof x == "y". Modifies input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- // Returns two registers in cmp1 and cmp2 that can be used in the
- // Branch instruction after EmitTypeofIs.
- Condition EmitTypeofIs(Label* true_label,
- Label* false_label,
- Register input,
- Handle<String> type_name,
- Register* cmp1,
- Operand* cmp2);
-
- // Emits optimized code for %_IsString(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsString(Register input,
- Register temp1,
- Label* is_not_string,
- SmiCheck check_needed);
-
- // Emits optimized code to deep-copy the contents of statically known
- // object graphs (e.g. object literal boilerplate).
- void EmitDeepCopy(Handle<JSObject> object,
- Register result,
- Register source,
- int* offset,
- AllocationSiteMode mode);
- // Emit optimized code for integer division.
- // Inputs are signed.
- // All registers are clobbered.
- // If 'remainder' is no_reg, it is not computed.
- void EmitSignedIntegerDivisionByConstant(Register result,
- Register dividend,
- int32_t divisor,
- Register remainder,
- Register scratch,
- LEnvironment* environment);
-
-
- void EnsureSpaceForLazyDeopt(int space_needed) override;
- void DoLoadKeyedExternalArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedArray(LLoadKeyed* instr);
- void DoStoreKeyedExternalArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedArray(LStoreKeyed* instr);
-
- template <class T>
- void EmitVectorLoadICRegisters(T* instr);
-
- ZoneList<Deoptimizer::JumpTableEntry*> jump_table_;
- Scope* const scope_;
- ZoneList<LDeferredCode*> deferred_;
- bool frame_is_built_;
-
- // Builder that keeps track of safepoints in the code. The table
- // itself is emitted at the end of the generated code.
- SafepointTableBuilder safepoints_;
-
- // Compiler from a set of parallel moves to a sequential list of moves.
- LGapResolver resolver_;
-
- Safepoint::Kind expected_safepoint_kind_;
-
- class PushSafepointRegistersScope final BASE_EMBEDDED {
- public:
- explicit PushSafepointRegistersScope(LCodeGen* codegen);
-
- ~PushSafepointRegistersScope();
-
- private:
- LCodeGen* codegen_;
- };
-
- friend class LDeferredCode;
- friend class LEnvironment;
- friend class SafepointGenerator;
- DISALLOW_COPY_AND_ASSIGN(LCodeGen);
-};
-
-
-class LDeferredCode : public ZoneObject {
- public:
- explicit LDeferredCode(LCodeGen* codegen)
- : codegen_(codegen),
- external_exit_(NULL),
- instruction_index_(codegen->current_instruction_) {
- codegen->AddDeferredCode(this);
- }
-
- virtual ~LDeferredCode() {}
- virtual void Generate() = 0;
- virtual LInstruction* instr() = 0;
-
- void SetExit(Label* exit) { external_exit_ = exit; }
- Label* entry() { return &entry_; }
- Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
- int instruction_index() const { return instruction_index_; }
-
- protected:
- LCodeGen* codegen() const { return codegen_; }
- MacroAssembler* masm() const { return codegen_->masm(); }
-
- private:
- LCodeGen* codegen_;
- Label entry_;
- Label exit_;
- Label* external_exit_;
- int instruction_index_;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_MIPS64_LITHIUM_CODEGEN_MIPS_H_
diff --git a/deps/v8/src/crankshaft/mips64/lithium-gap-resolver-mips64.cc b/deps/v8/src/crankshaft/mips64/lithium-gap-resolver-mips64.cc
deleted file mode 100644
index eb50d4b2f1..0000000000
--- a/deps/v8/src/crankshaft/mips64/lithium-gap-resolver-mips64.cc
+++ /dev/null
@@ -1,299 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/mips64/lithium-gap-resolver-mips64.h"
-
-#include "src/crankshaft/mips64/lithium-codegen-mips64.h"
-
-namespace v8 {
-namespace internal {
-
-LGapResolver::LGapResolver(LCodeGen* owner)
- : cgen_(owner),
- moves_(32, owner->zone()),
- root_index_(0),
- in_cycle_(false),
- saved_destination_(NULL) {}
-
-
-void LGapResolver::Resolve(LParallelMove* parallel_move) {
- DCHECK(moves_.is_empty());
- // Build up a worklist of moves.
- BuildInitialMoveList(parallel_move);
-
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands move = moves_[i];
- // Skip constants to perform them last. They don't block other moves
- // and skipping such moves with register destinations keeps those
- // registers free for the whole algorithm.
- if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
- root_index_ = i; // Any cycle is found when by reaching this move again.
- PerformMove(i);
- if (in_cycle_) {
- RestoreValue();
- }
- }
- }
-
- // Perform the moves with constant sources.
- for (int i = 0; i < moves_.length(); ++i) {
- if (!moves_[i].IsEliminated()) {
- DCHECK(moves_[i].source()->IsConstantOperand());
- EmitMove(i);
- }
- }
-
- moves_.Rewind(0);
-}
-
-
-void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
- // Perform a linear sweep of the moves to add them to the initial list of
- // moves to perform, ignoring any move that is redundant (the source is
- // the same as the destination, the destination is ignored and
- // unallocated, or the move was already eliminated).
- const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
- for (int i = 0; i < moves->length(); ++i) {
- LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
- }
- Verify();
-}
-
-
-void LGapResolver::PerformMove(int index) {
- // Each call to this function performs a move and deletes it from the move
- // graph. We first recursively perform any move blocking this one. We
- // mark a move as "pending" on entry to PerformMove in order to detect
- // cycles in the move graph.
-
- // We can only find a cycle, when doing a depth-first traversal of moves,
- // be encountering the starting move again. So by spilling the source of
- // the starting move, we break the cycle. All moves are then unblocked,
- // and the starting move is completed by writing the spilled value to
- // its destination. All other moves from the spilled source have been
- // completed prior to breaking the cycle.
- // An additional complication is that moves to MemOperands with large
- // offsets (more than 1K or 4K) require us to spill this spilled value to
- // the stack, to free up the register.
- DCHECK(!moves_[index].IsPending());
- DCHECK(!moves_[index].IsRedundant());
-
- // Clear this move's destination to indicate a pending move. The actual
- // destination is saved in a stack allocated local. Multiple moves can
- // be pending because this function is recursive.
- DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated.
- LOperand* destination = moves_[index].destination();
- moves_[index].set_destination(NULL);
-
- // Perform a depth-first traversal of the move graph to resolve
- // dependencies. Any unperformed, unpending move with a source the same
- // as this one's destination blocks this one so recursively perform all
- // such moves.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(destination) && !other_move.IsPending()) {
- PerformMove(i);
- // If there is a blocking, pending move it must be moves_[root_index_]
- // and all other moves with the same source as moves_[root_index_] are
- // sucessfully executed (because they are cycle-free) by this loop.
- }
- }
-
- // We are about to resolve this move and don't need it marked as
- // pending, so restore its destination.
- moves_[index].set_destination(destination);
-
- // The move may be blocked on a pending move, which must be the starting move.
- // In this case, we have a cycle, and we save the source of this move to
- // a scratch register to break it.
- LMoveOperands other_move = moves_[root_index_];
- if (other_move.Blocks(destination)) {
- DCHECK(other_move.IsPending());
- BreakCycle(index);
- return;
- }
-
- // This move is no longer blocked.
- EmitMove(index);
-}
-
-
-void LGapResolver::Verify() {
-#ifdef ENABLE_SLOW_DCHECKS
- // No operand should be the destination for more than one move.
- for (int i = 0; i < moves_.length(); ++i) {
- LOperand* destination = moves_[i].destination();
- for (int j = i + 1; j < moves_.length(); ++j) {
- SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
- }
- }
-#endif
-}
-
-#define __ ACCESS_MASM(cgen_->masm())
-
-void LGapResolver::BreakCycle(int index) {
- // We save in a register the value that should end up in the source of
- // moves_[root_index]. After performing all moves in the tree rooted
- // in that move, we save the value to that source.
- DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source()));
- DCHECK(!in_cycle_);
- in_cycle_ = true;
- LOperand* source = moves_[index].source();
- saved_destination_ = moves_[index].destination();
- if (source->IsRegister()) {
- __ mov(kLithiumScratchReg, cgen_->ToRegister(source));
- } else if (source->IsStackSlot()) {
- __ Ld(kLithiumScratchReg, cgen_->ToMemOperand(source));
- } else if (source->IsDoubleRegister()) {
- __ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source));
- } else if (source->IsDoubleStackSlot()) {
- __ Ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source));
- } else {
- UNREACHABLE();
- }
- // This move will be done by restoring the saved value to the destination.
- moves_[index].Eliminate();
-}
-
-
-void LGapResolver::RestoreValue() {
- DCHECK(in_cycle_);
- DCHECK(saved_destination_ != NULL);
-
- // Spilled value is in kLithiumScratchReg or kLithiumScratchDouble.
- if (saved_destination_->IsRegister()) {
- __ mov(cgen_->ToRegister(saved_destination_), kLithiumScratchReg);
- } else if (saved_destination_->IsStackSlot()) {
- __ Sd(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_));
- } else if (saved_destination_->IsDoubleRegister()) {
- __ mov_d(cgen_->ToDoubleRegister(saved_destination_),
- kLithiumScratchDouble);
- } else if (saved_destination_->IsDoubleStackSlot()) {
- __ Sdc1(kLithiumScratchDouble, cgen_->ToMemOperand(saved_destination_));
- } else {
- UNREACHABLE();
- }
-
- in_cycle_ = false;
- saved_destination_ = NULL;
-}
-
-
-void LGapResolver::EmitMove(int index) {
- LOperand* source = moves_[index].source();
- LOperand* destination = moves_[index].destination();
-
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
-
- if (source->IsRegister()) {
- Register source_register = cgen_->ToRegister(source);
- if (destination->IsRegister()) {
- __ mov(cgen_->ToRegister(destination), source_register);
- } else {
- DCHECK(destination->IsStackSlot());
- __ Sd(source_register, cgen_->ToMemOperand(destination));
- }
- } else if (source->IsStackSlot()) {
- MemOperand source_operand = cgen_->ToMemOperand(source);
- if (destination->IsRegister()) {
- __ Ld(cgen_->ToRegister(destination), source_operand);
- } else {
- DCHECK(destination->IsStackSlot());
- MemOperand destination_operand = cgen_->ToMemOperand(destination);
- if (in_cycle_) {
- if (!destination_operand.OffsetIsInt16Encodable()) {
- // 'at' is overwritten while saving the value to the destination.
- // Therefore we can't use 'at'. It is OK if the read from the source
- // destroys 'at', since that happens before the value is read.
- // This uses only a single reg of the double reg-pair.
- __ Ldc1(kLithiumScratchDouble, source_operand);
- __ Sdc1(kLithiumScratchDouble, destination_operand);
- } else {
- __ Ld(at, source_operand);
- __ Sd(at, destination_operand);
- }
- } else {
- __ Ld(kLithiumScratchReg, source_operand);
- __ Sd(kLithiumScratchReg, destination_operand);
- }
- }
-
- } else if (source->IsConstantOperand()) {
- LConstantOperand* constant_source = LConstantOperand::cast(source);
- if (destination->IsRegister()) {
- Register dst = cgen_->ToRegister(destination);
- if (cgen_->IsSmi(constant_source)) {
- __ li(dst, Operand(cgen_->ToSmi(constant_source)));
- } else if (cgen_->IsInteger32(constant_source)) {
- __ li(dst, Operand(cgen_->ToInteger32(constant_source)));
- } else {
- __ li(dst, cgen_->ToHandle(constant_source));
- }
- } else if (destination->IsDoubleRegister()) {
- DoubleRegister result = cgen_->ToDoubleRegister(destination);
- double v = cgen_->ToDouble(constant_source);
- __ Move(result, v);
- } else {
- DCHECK(destination->IsStackSlot());
- DCHECK(!in_cycle_); // Constant moves happen after all cycles are gone.
- if (cgen_->IsSmi(constant_source)) {
- __ li(kLithiumScratchReg, Operand(cgen_->ToSmi(constant_source)));
- __ Sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
- } else if (cgen_->IsInteger32(constant_source)) {
- __ li(kLithiumScratchReg, Operand(cgen_->ToInteger32(constant_source)));
- __ Sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
- } else {
- __ li(kLithiumScratchReg, cgen_->ToHandle(constant_source));
- __ Sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
- }
- }
-
- } else if (source->IsDoubleRegister()) {
- DoubleRegister source_register = cgen_->ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
- __ mov_d(cgen_->ToDoubleRegister(destination), source_register);
- } else {
- DCHECK(destination->IsDoubleStackSlot());
- MemOperand destination_operand = cgen_->ToMemOperand(destination);
- __ Sdc1(source_register, destination_operand);
- }
-
- } else if (source->IsDoubleStackSlot()) {
- MemOperand source_operand = cgen_->ToMemOperand(source);
- if (destination->IsDoubleRegister()) {
- __ Ldc1(cgen_->ToDoubleRegister(destination), source_operand);
- } else {
- DCHECK(destination->IsDoubleStackSlot());
- MemOperand destination_operand = cgen_->ToMemOperand(destination);
- if (in_cycle_) {
- // kLithiumScratchDouble was used to break the cycle,
- // but kLithiumScratchReg is free.
- MemOperand source_high_operand =
- cgen_->ToHighMemOperand(source);
- MemOperand destination_high_operand =
- cgen_->ToHighMemOperand(destination);
- __ Lw(kLithiumScratchReg, source_operand);
- __ Sw(kLithiumScratchReg, destination_operand);
- __ Lw(kLithiumScratchReg, source_high_operand);
- __ Sw(kLithiumScratchReg, destination_high_operand);
- } else {
- __ Ldc1(kLithiumScratchDouble, source_operand);
- __ Sdc1(kLithiumScratchDouble, destination_operand);
- }
- }
- } else {
- UNREACHABLE();
- }
-
- moves_[index].Eliminate();
-}
-
-
-#undef __
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/mips64/lithium-gap-resolver-mips64.h b/deps/v8/src/crankshaft/mips64/lithium-gap-resolver-mips64.h
deleted file mode 100644
index 85d8e2920c..0000000000
--- a/deps/v8/src/crankshaft/mips64/lithium-gap-resolver-mips64.h
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_MIPS64_LITHIUM_GAP_RESOLVER_MIPS_H_
-#define V8_CRANKSHAFT_MIPS64_LITHIUM_GAP_RESOLVER_MIPS_H_
-
-#include "src/crankshaft/lithium.h"
-
-namespace v8 {
-namespace internal {
-
-class LCodeGen;
-class LGapResolver;
-
-class LGapResolver final BASE_EMBEDDED {
- public:
- explicit LGapResolver(LCodeGen* owner);
-
- // Resolve a set of parallel moves, emitting assembler instructions.
- void Resolve(LParallelMove* parallel_move);
-
- private:
- // Build the initial list of moves.
- void BuildInitialMoveList(LParallelMove* parallel_move);
-
- // Perform the move at the moves_ index in question (possibly requiring
- // other moves to satisfy dependencies).
- void PerformMove(int index);
-
- // If a cycle is found in the series of moves, save the blocking value to
- // a scratch register. The cycle must be found by hitting the root of the
- // depth-first search.
- void BreakCycle(int index);
-
- // After a cycle has been resolved, restore the value from the scratch
- // register to its proper destination.
- void RestoreValue();
-
- // Emit a move and remove it from the move graph.
- void EmitMove(int index);
-
- // Verify the move list before performing moves.
- void Verify();
-
- LCodeGen* cgen_;
-
- // List of moves not yet resolved.
- ZoneList<LMoveOperands> moves_;
-
- int root_index_;
- bool in_cycle_;
- LOperand* saved_destination_;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_MIPS64_LITHIUM_GAP_RESOLVER_MIPS_H_
diff --git a/deps/v8/src/crankshaft/mips64/lithium-mips64.cc b/deps/v8/src/crankshaft/mips64/lithium-mips64.cc
deleted file mode 100644
index 763e92963c..0000000000
--- a/deps/v8/src/crankshaft/mips64/lithium-mips64.cc
+++ /dev/null
@@ -1,2350 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/mips64/lithium-mips64.h"
-
-#include <sstream>
-
-#if V8_TARGET_ARCH_MIPS64
-
-#include "src/crankshaft/hydrogen-osr.h"
-#include "src/crankshaft/lithium-inl.h"
-#include "src/crankshaft/mips64/lithium-codegen-mips64.h"
-
-namespace v8 {
-namespace internal {
-
-#define DEFINE_COMPILE(type) \
- void L##type::CompileToNative(LCodeGen* generator) { \
- generator->Do##type(this); \
- }
-LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
-#undef DEFINE_COMPILE
-
-#ifdef DEBUG
-void LInstruction::VerifyCall() {
- // Call instructions can use only fixed registers as temporaries and
- // outputs because all registers are blocked by the calling convention.
- // Inputs operands must use a fixed register or use-at-start policy or
- // a non-register policy.
- DCHECK(Output() == NULL ||
- LUnallocated::cast(Output())->HasFixedPolicy() ||
- !LUnallocated::cast(Output())->HasRegisterPolicy());
- for (UseIterator it(this); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- DCHECK(operand->HasFixedPolicy() ||
- operand->IsUsedAtStart());
- }
- for (TempIterator it(this); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
- }
-}
-#endif
-
-
-void LInstruction::PrintTo(StringStream* stream) {
- stream->Add("%s ", this->Mnemonic());
-
- PrintOutputOperandTo(stream);
-
- PrintDataTo(stream);
-
- if (HasEnvironment()) {
- stream->Add(" ");
- environment()->PrintTo(stream);
- }
-
- if (HasPointerMap()) {
- stream->Add(" ");
- pointer_map()->PrintTo(stream);
- }
-}
-
-
-void LInstruction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- for (int i = 0; i < InputCount(); i++) {
- if (i > 0) stream->Add(" ");
- if (InputAt(i) == NULL) {
- stream->Add("NULL");
- } else {
- InputAt(i)->PrintTo(stream);
- }
- }
-}
-
-
-void LInstruction::PrintOutputOperandTo(StringStream* stream) {
- if (HasResult()) result()->PrintTo(stream);
-}
-
-
-void LLabel::PrintDataTo(StringStream* stream) {
- LGap::PrintDataTo(stream);
- LLabel* rep = replacement();
- if (rep != NULL) {
- stream->Add(" Dead block replaced with B%d", rep->block_id());
- }
-}
-
-
-bool LGap::IsRedundant() const {
- for (int i = 0; i < 4; i++) {
- if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
- return false;
- }
- }
-
- return true;
-}
-
-
-void LGap::PrintDataTo(StringStream* stream) {
- for (int i = 0; i < 4; i++) {
- stream->Add("(");
- if (parallel_moves_[i] != NULL) {
- parallel_moves_[i]->PrintDataTo(stream);
- }
- stream->Add(") ");
- }
-}
-
-
-const char* LArithmeticD::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-d";
- case Token::SUB: return "sub-d";
- case Token::MUL: return "mul-d";
- case Token::DIV: return "div-d";
- case Token::MOD: return "mod-d";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-const char* LArithmeticT::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-t";
- case Token::SUB: return "sub-t";
- case Token::MUL: return "mul-t";
- case Token::MOD: return "mod-t";
- case Token::DIV: return "div-t";
- case Token::BIT_AND: return "bit-and-t";
- case Token::BIT_OR: return "bit-or-t";
- case Token::BIT_XOR: return "bit-xor-t";
- case Token::ROR: return "ror-t";
- case Token::SHL: return "sll-t";
- case Token::SAR: return "sra-t";
- case Token::SHR: return "srl-t";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-bool LGoto::HasInterestingComment(LCodeGen* gen) const {
- return !gen->IsNextEmittedBlock(block_id());
-}
-
-
-void LGoto::PrintDataTo(StringStream* stream) {
- stream->Add("B%d", block_id());
-}
-
-
-void LBranch::PrintDataTo(StringStream* stream) {
- stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
- value()->PrintTo(stream);
-}
-
-
-LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
- return new(zone()) LDebugBreak();
-}
-
-
-void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- left()->PrintTo(stream);
- stream->Add(" %s ", Token::String(op()));
- right()->PrintTo(stream);
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_string(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_smi(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_undetectable(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if string_compare(");
- left()->PrintTo(stream);
- right()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_instance_type(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if class_of_test(");
- value()->PrintTo(stream);
- stream->Add(", \"%o\") then B%d else B%d", *hydrogen()->class_name(),
- true_block_id(), false_block_id());
-}
-
-void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if typeof ");
- value()->PrintTo(stream);
- stream->Add(" == \"%s\" then B%d else B%d",
- hydrogen()->type_literal()->ToCString().get(),
- true_block_id(), false_block_id());
-}
-
-
-void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
- stream->Add(" = ");
- function()->PrintTo(stream);
- stream->Add(".code_entry = ");
- code_object()->PrintTo(stream);
-}
-
-
-void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
- stream->Add(" = ");
- base_object()->PrintTo(stream);
- stream->Add(" + ");
- offset()->PrintTo(stream);
-}
-
-
-void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
- for (int i = 0; i < InputCount(); i++) {
- InputAt(i)->PrintTo(stream);
- stream->Add(" ");
- }
- stream->Add("#%d / ", arity());
-}
-
-
-void LLoadContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add("[%d]", slot_index());
-}
-
-
-void LStoreContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add("[%d] <- ", slot_index());
- value()->PrintTo(stream);
-}
-
-
-void LInvokeFunction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- function()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
-void LCallNewArray::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
- ElementsKind kind = hydrogen()->elements_kind();
- stream->Add(" (%s) ", ElementsKindToString(kind));
-}
-
-
-void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
- arguments()->PrintTo(stream);
- stream->Add(" length ");
- length()->PrintTo(stream);
- stream->Add(" index ");
- index()->PrintTo(stream);
-}
-
-
-void LStoreNamedField::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- std::ostringstream os;
- os << hydrogen()->access() << " <- ";
- stream->Add(os.str().c_str());
- value()->PrintTo(stream);
-}
-
-
-void LLoadKeyed::PrintDataTo(StringStream* stream) {
- elements()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d]", base_offset());
- } else {
- stream->Add("]");
- }
-}
-
-
-void LStoreKeyed::PrintDataTo(StringStream* stream) {
- elements()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d] <-", base_offset());
- } else {
- stream->Add("] <- ");
- }
-
- if (value() == NULL) {
- DCHECK(hydrogen()->IsConstantHoleStore() &&
- hydrogen()->value()->representation().IsDouble());
- stream->Add("<the hole(nan)>");
- } else {
- value()->PrintTo(stream);
- }
-}
-
-
-void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(" %p -> %p", *original_map(), *transitioned_map());
-}
-
-
-int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
- // Skip a slot if for a double-width slot.
- if (kind == DOUBLE_REGISTERS) current_frame_slots_++;
- return current_frame_slots_++;
-}
-
-
-LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
- int index = GetNextSpillIndex(kind);
- if (kind == DOUBLE_REGISTERS) {
- return LDoubleStackSlot::Create(index, zone());
- } else {
- DCHECK(kind == GENERAL_REGISTERS);
- return LStackSlot::Create(index, zone());
- }
-}
-
-
-LPlatformChunk* LChunkBuilder::Build() {
- DCHECK(is_unused());
- chunk_ = new(zone()) LPlatformChunk(info(), graph());
- LPhase phase("L_Building chunk", chunk_);
- status_ = BUILDING;
-
- // If compiling for OSR, reserve space for the unoptimized frame,
- // which will be subsumed into this frame.
- if (graph()->has_osr()) {
- for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
- chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
- }
- }
-
- const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
- for (int i = 0; i < blocks->length(); i++) {
- HBasicBlock* next = NULL;
- if (i < blocks->length() - 1) next = blocks->at(i + 1);
- DoBasicBlock(blocks->at(i), next);
- if (is_aborted()) return NULL;
- }
- status_ = DONE;
- return chunk_;
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
- return new (zone())
- LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
-}
-
-
-LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
- return Use(value, ToUnallocated(fixed_register));
-}
-
-
-LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) {
- return Use(value, ToUnallocated(reg));
-}
-
-
-LOperand* LChunkBuilder::UseRegister(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
- return Use(value,
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
- LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
-}
-
-
-LOperand* LChunkBuilder::UseAtStart(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
- LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value);
-}
-
-
-LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegister(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegisterAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseConstant(HValue* value) {
- return chunk_->DefineConstantOperand(HConstant::cast(value));
-}
-
-
-LOperand* LChunkBuilder::UseAny(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
- if (value->EmitAtUses()) {
- HInstruction* instr = HInstruction::cast(value);
- VisitInstruction(instr);
- }
- operand->set_virtual_register(value->id());
- return operand;
-}
-
-
-LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
- LUnallocated* result) {
- result->set_virtual_register(current_instruction_->id());
- instr->set_result(result);
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::DefineAsRegister(
- LTemplateResultInstruction<1>* instr) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-LInstruction* LChunkBuilder::DefineAsSpilled(
- LTemplateResultInstruction<1>* instr, int index) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
-}
-
-
-LInstruction* LChunkBuilder::DefineSameAsFirst(
- LTemplateResultInstruction<1>* instr) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
-}
-
-
-LInstruction* LChunkBuilder::DefineFixed(
- LTemplateResultInstruction<1>* instr, Register reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-LInstruction* LChunkBuilder::DefineFixedDouble(
- LTemplateResultInstruction<1>* instr, DoubleRegister reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
- HEnvironment* hydrogen_env = current_block_->last_environment();
- return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
-}
-
-
-LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize) {
- info()->MarkAsNonDeferredCalling();
-#ifdef DEBUG
- instr->VerifyCall();
-#endif
- instr->MarkAsCall();
- instr = AssignPointerMap(instr);
-
- // If instruction does not have side-effects lazy deoptimization
- // after the call will try to deoptimize to the point before the call.
- // Thus we still need to attach environment to this call even if
- // call sequence can not deoptimize eagerly.
- bool needs_environment =
- (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
- !hinstr->HasObservableSideEffects();
- if (needs_environment && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- // We can't really figure out if the environment is needed or not.
- instr->environment()->set_has_been_used();
- }
-
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
- DCHECK(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(zone()));
- return instr;
-}
-
-
-LUnallocated* LChunkBuilder::TempRegister() {
- LUnallocated* operand =
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
- int vreg = allocator_->GetVirtualRegister();
- if (!allocator_->AllocationOk()) {
- Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
- vreg = 0;
- }
- operand->set_virtual_register(vreg);
- return operand;
-}
-
-
-LUnallocated* LChunkBuilder::TempDoubleRegister() {
- LUnallocated* operand =
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_DOUBLE_REGISTER);
- int vreg = allocator_->GetVirtualRegister();
- if (!allocator_->AllocationOk()) {
- Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
- vreg = 0;
- }
- operand->set_virtual_register(vreg);
- return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(Register reg) {
- LUnallocated* operand = ToUnallocated(reg);
- DCHECK(operand->HasFixedPolicy());
- return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
- LUnallocated* operand = ToUnallocated(reg);
- DCHECK(operand->HasFixedPolicy());
- return operand;
-}
-
-
-LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
- return new(zone()) LLabel(instr->block());
-}
-
-
-LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
- return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
- return AssignEnvironment(new(zone()) LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoShift(Token::Value op,
- HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->left());
-
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- int constant_value = 0;
- bool does_deopt = false;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
- // Left shifts can deoptimize if we shift by > 0 and the result cannot be
- // truncated to smi.
- if (instr->representation().IsSmi() && constant_value > 0) {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
- }
- } else {
- right = UseRegisterAtStart(right_value);
- }
-
- // Shift operations can only deoptimize if we do a logical shift
- // by 0 and the result cannot be truncated to int32.
- if (op == Token::SHR && constant_value == 0) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
- }
-
- LInstruction* result =
- DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
- return does_deopt ? AssignEnvironment(result) : result;
- } else {
- return DoArithmeticT(op, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->left()->representation().IsDouble());
- DCHECK(instr->right()->representation().IsDouble());
- if (op == Token::MOD) {
- LOperand* left = UseFixedDouble(instr->left(), f2);
- LOperand* right = UseFixedDouble(instr->right(), f4);
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- // We call a C function for double modulo. It can't trigger a GC. We need
- // to use fixed result register for the call.
- // TODO(fschneider): Allow any register as input registers.
- return MarkAsCall(DefineFixedDouble(result, f2), instr);
- } else {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineAsRegister(result);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HBinaryOperation* instr) {
- HValue* left = instr->left();
- HValue* right = instr->right();
- DCHECK(left->representation().IsTagged());
- DCHECK(right->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* left_operand = UseFixed(left, a1);
- LOperand* right_operand = UseFixed(right, a0);
- LArithmeticT* result =
- new(zone()) LArithmeticT(op, context, left_operand, right_operand);
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
-void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
- DCHECK(is_building());
- current_block_ = block;
- next_block_ = next_block;
- if (block->IsStartBlock()) {
- block->UpdateEnvironment(graph_->start_environment());
- argument_count_ = 0;
- } else if (block->predecessors()->length() == 1) {
- // We have a single predecessor => copy environment and outgoing
- // argument count from the predecessor.
- DCHECK(block->phis()->length() == 0);
- HBasicBlock* pred = block->predecessors()->at(0);
- HEnvironment* last_environment = pred->last_environment();
- DCHECK(last_environment != NULL);
- // Only copy the environment, if it is later used again.
- if (pred->end()->SecondSuccessor() == NULL) {
- DCHECK(pred->end()->FirstSuccessor() == block);
- } else {
- if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
- pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
- last_environment = last_environment->Copy();
- }
- }
- block->UpdateEnvironment(last_environment);
- DCHECK(pred->argument_count() >= 0);
- argument_count_ = pred->argument_count();
- } else {
- // We are at a state join => process phis.
- HBasicBlock* pred = block->predecessors()->at(0);
- // No need to copy the environment, it cannot be used later.
- HEnvironment* last_environment = pred->last_environment();
- for (int i = 0; i < block->phis()->length(); ++i) {
- HPhi* phi = block->phis()->at(i);
- if (phi->HasMergedIndex()) {
- last_environment->SetValueAt(phi->merged_index(), phi);
- }
- }
- for (int i = 0; i < block->deleted_phis()->length(); ++i) {
- if (block->deleted_phis()->at(i) < last_environment->length()) {
- last_environment->SetValueAt(block->deleted_phis()->at(i),
- graph_->GetConstantUndefined());
- }
- }
- block->UpdateEnvironment(last_environment);
- // Pick up the outgoing argument count of one of the predecessors.
- argument_count_ = pred->argument_count();
- }
- HInstruction* current = block->first();
- int start = chunk_->instructions()->length();
- while (current != NULL && !is_aborted()) {
- // Code for constants in registers is generated lazily.
- if (!current->EmitAtUses()) {
- VisitInstruction(current);
- }
- current = current->next();
- }
- int end = chunk_->instructions()->length() - 1;
- if (end >= start) {
- block->set_first_instruction_index(start);
- block->set_last_instruction_index(end);
- }
- block->set_argument_count(argument_count_);
- next_block_ = NULL;
- current_block_ = NULL;
-}
-
-
-void LChunkBuilder::VisitInstruction(HInstruction* current) {
- HInstruction* old_current = current_instruction_;
- current_instruction_ = current;
-
- LInstruction* instr = NULL;
- if (current->CanReplaceWithDummyUses()) {
- if (current->OperandCount() == 0) {
- instr = DefineAsRegister(new(zone()) LDummy());
- } else {
- DCHECK(!current->OperandAt(0)->IsControlInstruction());
- instr = DefineAsRegister(new(zone())
- LDummyUse(UseAny(current->OperandAt(0))));
- }
- for (int i = 1; i < current->OperandCount(); ++i) {
- if (current->OperandAt(i)->IsControlInstruction()) continue;
- LInstruction* dummy =
- new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
- dummy->set_hydrogen_value(current);
- chunk_->AddInstruction(dummy, current_block_);
- }
- } else {
- HBasicBlock* successor;
- if (current->IsControlInstruction() &&
- HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
- successor != NULL) {
- instr = new(zone()) LGoto(successor);
- } else {
- instr = current->CompileToLithium(this);
- }
- }
-
- argument_count_ += current->argument_delta();
- DCHECK(argument_count_ >= 0);
-
- if (instr != NULL) {
- AddInstruction(instr, current);
- }
-
- current_instruction_ = old_current;
-}
-
-
-void LChunkBuilder::AddInstruction(LInstruction* instr,
- HInstruction* hydrogen_val) {
-// Associate the hydrogen instruction first, since we may need it for
- // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
- instr->set_hydrogen_value(hydrogen_val);
-
-#if DEBUG
- // Make sure that the lithium instruction has either no fixed register
- // constraints in temps or the result OR no uses that are only used at
- // start. If this invariant doesn't hold, the register allocator can decide
- // to insert a split of a range immediately before the instruction due to an
- // already allocated register needing to be used for the instruction's fixed
- // register constraint. In this case, The register allocator won't see an
- // interference between the split child and the use-at-start (it would if
- // the it was just a plain use), so it is free to move the split child into
- // the same register that is used for the use-at-start.
- // See https://code.google.com/p/chromium/issues/detail?id=201590
- if (!(instr->ClobbersRegisters() &&
- instr->ClobbersDoubleRegisters(isolate()))) {
- int fixed = 0;
- int used_at_start = 0;
- for (UseIterator it(instr); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- if (operand->IsUsedAtStart()) ++used_at_start;
- }
- if (instr->Output() != NULL) {
- if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
- }
- for (TempIterator it(instr); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- if (operand->HasFixedPolicy()) ++fixed;
- }
- DCHECK(fixed == 0 || used_at_start == 0);
- }
-#endif
-
- if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
- instr = AssignPointerMap(instr);
- }
- if (FLAG_stress_environments && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
- chunk_->AddInstruction(instr, current_block_);
-
- CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
-}
-
-
-LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
- LInstruction* result = new (zone()) LPrologue();
- if (info_->scope()->NeedsContext()) {
- result = MarkAsCall(result, instr);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- return new(zone()) LGoto(instr->FirstSuccessor());
-}
-
-
-LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* value = instr->value();
- Representation r = value->representation();
- HType type = value->type();
- ToBooleanHints expected = instr->expected_input_types();
- if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
-
- bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
- type.IsJSArray() || type.IsHeapNumber() || type.IsString();
- LInstruction* branch = new(zone()) LBranch(UseRegister(value));
- if (!easy_case && ((!(expected & ToBooleanHint::kSmallInteger) &&
- (expected & ToBooleanHint::kNeedsMap)) ||
- expected != ToBooleanHint::kAny)) {
- branch = AssignEnvironment(branch);
- }
- return branch;
-}
-
-
-LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- return new(zone()) LCmpMapAndBranch(value, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
- info()->MarkAsRequiresFrame();
- return DefineAsRegister(
- new(zone()) LArgumentsLength(UseRegister(length->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
- info()->MarkAsRequiresFrame();
- return DefineAsRegister(new(zone()) LArgumentsElements);
-}
-
-
-LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
- HHasInPrototypeChainAndBranch* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* prototype = UseRegister(instr->prototype());
- LHasInPrototypeChainAndBranch* result =
- new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
- LOperand* receiver = UseRegisterAtStart(instr->receiver());
- LOperand* function = UseRegisterAtStart(instr->function());
- LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
- return AssignEnvironment(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
- LOperand* function = UseFixed(instr->function(), a1);
- LOperand* receiver = UseFixed(instr->receiver(), a0);
- LOperand* length = UseFixed(instr->length(), a2);
- LOperand* elements = UseFixed(instr->elements(), a3);
- LApplyArguments* result = new(zone()) LApplyArguments(function,
- receiver,
- length,
- elements);
- return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
- int argc = instr->OperandCount();
- for (int i = 0; i < argc; ++i) {
- LOperand* argument = Use(instr->argument(i));
- AddInstruction(new(zone()) LPushArgument(argument), instr);
- }
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreCodeEntry(
- HStoreCodeEntry* store_code_entry) {
- LOperand* function = UseRegister(store_code_entry->function());
- LOperand* code_object = UseTempRegister(store_code_entry->code_object());
- return new(zone()) LStoreCodeEntry(function, code_object);
-}
-
-
-LInstruction* LChunkBuilder::DoInnerAllocatedObject(
- HInnerAllocatedObject* instr) {
- LOperand* base_object = UseRegisterAtStart(instr->base_object());
- LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
- return DefineAsRegister(
- new(zone()) LInnerAllocatedObject(base_object, offset));
-}
-
-
-LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
- return instr->HasNoUses()
- ? NULL
- : DefineAsRegister(new(zone()) LThisFunction);
-}
-
-
-LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- if (instr->HasNoUses()) return NULL;
-
- if (info()->IsStub()) {
- return DefineFixed(new(zone()) LContext, cp);
- }
-
- return DefineAsRegister(new(zone()) LContext);
-}
-
-
-LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallWithDescriptor(
- HCallWithDescriptor* instr) {
- CallInterfaceDescriptor descriptor = instr->descriptor();
- DCHECK_EQ(descriptor.GetParameterCount() +
- LCallWithDescriptor::kImplicitRegisterParameterCount,
- instr->OperandCount());
-
- LOperand* target = UseRegisterOrConstantAtStart(instr->target());
- ZoneList<LOperand*> ops(instr->OperandCount(), zone());
- // Target
- ops.Add(target, zone());
- // Context
- LOperand* op = UseFixed(instr->OperandAt(1), cp);
- ops.Add(op, zone());
- // Load register parameters.
- int i = 0;
- for (; i < descriptor.GetRegisterParameterCount(); i++) {
- op = UseFixed(instr->OperandAt(
- i + LCallWithDescriptor::kImplicitRegisterParameterCount),
- descriptor.GetRegisterParameter(i));
- ops.Add(op, zone());
- }
- // Push stack parameters.
- for (; i < descriptor.GetParameterCount(); i++) {
- op = UseAny(instr->OperandAt(
- i + LCallWithDescriptor::kImplicitRegisterParameterCount));
- AddInstruction(new (zone()) LPushArgument(op), instr);
- }
-
- LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
- descriptor, ops, zone());
- if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
- result->MarkAsSyntacticTailCall();
- }
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* function = UseFixed(instr->function(), a1);
- LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
- if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
- result->MarkAsSyntacticTailCall();
- }
- return MarkAsCall(DefineFixed(result, v0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
- switch (instr->op()) {
- case kMathFloor:
- return DoMathFloor(instr);
- case kMathRound:
- return DoMathRound(instr);
- case kMathFround:
- return DoMathFround(instr);
- case kMathAbs:
- return DoMathAbs(instr);
- case kMathLog:
- return DoMathLog(instr);
- case kMathCos:
- return DoMathCos(instr);
- case kMathSin:
- return DoMathSin(instr);
- case kMathExp:
- return DoMathExp(instr);
- case kMathSqrt:
- return DoMathSqrt(instr);
- case kMathPowHalf:
- return DoMathPowHalf(instr);
- case kMathClz32:
- return DoMathClz32(instr);
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseFixedDouble(instr->value(), f4);
- return MarkAsCall(DefineFixedDouble(new(zone()) LMathLog(input), f4), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- LMathClz32* result = new(zone()) LMathClz32(input);
- return DefineAsRegister(result);
-}
-
-LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseFixedDouble(instr->value(), f4);
- return MarkAsCall(DefineFixedDouble(new (zone()) LMathCos(input), f4), instr);
-}
-
-LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseFixedDouble(instr->value(), f4);
- return MarkAsCall(DefineFixedDouble(new (zone()) LMathSin(input), f4), instr);
-}
-
-LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseFixedDouble(instr->value(), f4);
- return MarkAsCall(DefineFixedDouble(new (zone()) LMathExp(input), f4), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
- // Input cannot be the same as the result, see LCodeGen::DoMathPowHalf.
- LOperand* input = UseFixedDouble(instr->value(), f8);
- LOperand* temp = TempDoubleRegister();
- LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp);
- return DefineFixedDouble(result, f4);
-}
-
-
-LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
- LOperand* input = UseRegister(instr->value());
- LMathFround* result = new (zone()) LMathFround(input);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
- Representation r = instr->value()->representation();
- LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32())
- ? NULL
- : UseFixed(instr->context(), cp);
- LOperand* input = UseRegister(instr->value());
- LInstruction* result =
- DefineAsRegister(new(zone()) LMathAbs(context, input));
- if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
- if (!r.IsDouble()) result = AssignEnvironment(result);
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
- LOperand* input = UseRegister(instr->value());
- LOperand* temp = TempRegister();
- LMathFloor* result = new(zone()) LMathFloor(input, temp);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
-}
-
-
-LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
- LOperand* input = UseRegister(instr->value());
- LMathSqrt* result = new(zone()) LMathSqrt(input);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
- LOperand* input = UseRegister(instr->value());
- LOperand* temp = TempDoubleRegister();
- LMathRound* result = new(zone()) LMathRound(input, temp);
- return AssignEnvironment(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* constructor = UseFixed(instr->constructor(), a1);
- LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoRor(HRor* instr) {
- return DoShift(Token::ROR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShr(HShr* instr) {
- return DoShift(Token::SHR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoSar(HSar* instr) {
- return DoShift(Token::SAR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShl(HShl* instr) {
- return DoShift(Token::SHL, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32));
-
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
- return DefineAsRegister(new(zone()) LBitI(left, right));
- } else {
- return DoArithmeticT(instr->op(), instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
- dividend, divisor));
- if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
- (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
- (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
- divisor != 1 && divisor != -1)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
- DCHECK(instr->representation().IsInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result = DefineAsRegister(new(zone()) LDivByConstI(
- dividend, divisor));
- if (divisor == 0 ||
- (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
- !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(instr->right());
- LOperand* temp = instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)
- ? NULL : TempRegister();
- LInstruction* result =
- DefineAsRegister(new(zone()) LDivI(dividend, divisor, temp));
- if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- (instr->CheckFlag(HValue::kCanOverflow) &&
- !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) ||
- (!instr->IsMathFloorOfDiv() &&
- !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- if (instr->RightIsPowerOf2()) {
- return DoDivByPowerOf2I(instr);
- } else if (instr->right()->IsConstant()) {
- return DoDivByConstI(instr);
- } else {
- return DoDivI(instr);
- }
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else {
- return DoArithmeticT(Token::DIV, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
- LOperand* dividend = UseRegisterAtStart(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result = DefineAsRegister(new(zone()) LFlooringDivByPowerOf2I(
- dividend, divisor));
- if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
- (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
- DCHECK(instr->representation().IsInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LOperand* temp =
- ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
- (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
- NULL : TempRegister();
- LInstruction* result = DefineAsRegister(
- new(zone()) LFlooringDivByConstI(dividend, divisor, temp));
- if (divisor == 0 ||
- (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(instr->right());
- LInstruction* result =
- DefineAsRegister(new (zone()) LFlooringDivI(dividend, divisor));
- if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- (instr->CheckFlag(HValue::kCanOverflow))) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- if (instr->RightIsPowerOf2()) {
- return DoFlooringDivByPowerOf2I(instr);
- } else if (instr->right()->IsConstant()) {
- return DoFlooringDivByConstI(instr);
- } else {
- return DoFlooringDivI(instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegisterAtStart(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
- dividend, divisor));
- if (instr->CheckFlag(HValue::kLeftCanBeNegative) &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result = DefineAsRegister(new(zone()) LModByConstI(
- dividend, divisor));
- if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoModI(HMod* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(instr->right());
- LInstruction* result = DefineAsRegister(new(zone()) LModI(
- dividend, divisor));
- if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- return instr->RightIsPowerOf2() ? DoModByPowerOf2I(instr) : DoModI(instr);
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::MOD, instr);
- } else {
- return DoArithmeticT(Token::MOD, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMul(HMul* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- HValue* left = instr->BetterLeftOperand();
- HValue* right = instr->BetterRightOperand();
- LOperand* left_op;
- LOperand* right_op;
- bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
- bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
-
- int32_t constant_value = 0;
- if (right->IsConstant()) {
- HConstant* constant = HConstant::cast(right);
- constant_value = constant->Integer32Value();
- // Constants -1, 0 and 1 can be optimized if the result can overflow.
- // For other constants, it can be optimized only without overflow.
- if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) {
- left_op = UseRegisterAtStart(left);
- right_op = UseConstant(right);
- } else {
- if (bailout_on_minus_zero) {
- left_op = UseRegister(left);
- } else {
- left_op = UseRegisterAtStart(left);
- }
- right_op = UseRegister(right);
- }
- } else {
- if (bailout_on_minus_zero) {
- left_op = UseRegister(left);
- } else {
- left_op = UseRegisterAtStart(left);
- }
- right_op = UseRegister(right);
- }
- LInstruction* result =
- instr->representation().IsSmi()
- ? DefineAsRegister(new (zone()) LMulS(left_op, right_op))
- : DefineAsRegister(new (zone()) LMulI(left_op, right_op));
- if (right_op->IsConstantOperand()
- ? ((can_overflow && constant_value == -1) ||
- (bailout_on_minus_zero && constant_value <= 0))
- : (can_overflow || bailout_on_minus_zero)) {
- AssignEnvironment(result);
- }
- return result;
-
- } else if (instr->representation().IsDouble()) {
- if (kArchVariant == kMips64r2) {
- if (instr->HasOneUse() && instr->uses().value()->IsAdd()) {
- HAdd* add = HAdd::cast(instr->uses().value());
- if (instr == add->left()) {
- // This mul is the lhs of an add. The add and mul will be folded
- // into a multiply-add.
- return NULL;
- }
- if (instr == add->right() && !add->left()->IsMul()) {
- // This mul is the rhs of an add, where the lhs is not another mul.
- // The add and mul will be folded into a multiply-add.
- return NULL;
- }
- }
- }
- return DoArithmeticD(Token::MUL, instr);
- } else {
- return DoArithmeticT(Token::MUL, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoSub(HSub* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterOrConstantAtStart(instr->right());
- LInstruction* result =
- instr->representation().IsSmi()
- ? DefineAsRegister(new (zone()) LSubS(left, right))
- : DefineAsRegister(new (zone()) LSubI(left, right));
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::SUB, instr);
- } else {
- return DoArithmeticT(Token::SUB, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) {
- LOperand* multiplier_op = UseRegisterAtStart(mul->left());
- LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
- LOperand* addend_op = UseRegisterAtStart(addend);
- return DefineSameAsFirst(new(zone()) LMultiplyAddD(addend_op, multiplier_op,
- multiplicand_op));
-}
-
-
-LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- LOperand* right = UseRegisterOrConstantAtStart(instr->BetterRightOperand());
- LInstruction* result =
- instr->representation().IsSmi()
- ? DefineAsRegister(new (zone()) LAddS(left, right))
- : DefineAsRegister(new (zone()) LAddI(left, right));
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsExternal()) {
- DCHECK(instr->IsConsistentExternalRepresentation());
- DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterOrConstantAtStart(instr->right());
- return DefineAsRegister(new (zone()) LAddE(left, right));
- } else if (instr->representation().IsDouble()) {
- if (kArchVariant == kMips64r2) {
- if (instr->left()->IsMul())
- return DoMultiplyAdd(HMul::cast(instr->left()), instr->right());
-
- if (instr->right()->IsMul()) {
- DCHECK(!instr->left()->IsMul());
- return DoMultiplyAdd(HMul::cast(instr->right()), instr->left());
- }
- }
- return DoArithmeticD(Token::ADD, instr);
- } else {
- return DoArithmeticT(Token::ADD, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
- LOperand* left = NULL;
- LOperand* right = NULL;
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- left = UseRegisterAtStart(instr->BetterLeftOperand());
- right = UseOrConstantAtStart(instr->BetterRightOperand());
- } else {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->left()->representation().IsDouble());
- DCHECK(instr->right()->representation().IsDouble());
- left = UseRegisterAtStart(instr->left());
- right = UseRegisterAtStart(instr->right());
- }
- return DefineAsRegister(new(zone()) LMathMinMax(left, right));
-}
-
-
-LInstruction* LChunkBuilder::DoPower(HPower* instr) {
- DCHECK(instr->representation().IsDouble());
- // We call a C function for double power. It can't trigger a GC.
- // We need to use fixed result register for the call.
- Representation exponent_type = instr->right()->representation();
- DCHECK(instr->left()->representation().IsDouble());
- LOperand* left = UseFixedDouble(instr->left(), f2);
- LOperand* right =
- exponent_type.IsDouble()
- ? UseFixedDouble(instr->right(), f4)
- : UseFixed(instr->right(), MathPowTaggedDescriptor::exponent());
- LPower* result = new(zone()) LPower(left, right);
- return MarkAsCall(DefineFixedDouble(result, f0),
- instr,
- CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
- DCHECK(instr->left()->representation().IsTagged());
- DCHECK(instr->right()->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* left = UseFixed(instr->left(), a1);
- LOperand* right = UseFixed(instr->right(), a0);
- LCmpT* result = new(zone()) LCmpT(context, left, right);
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
- HCompareNumericAndBranch* instr) {
- Representation r = instr->representation();
- if (r.IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(r));
- DCHECK(instr->right()->representation().Equals(r));
- LOperand* left = UseRegisterOrConstantAtStart(instr->left());
- LOperand* right = UseRegisterOrConstantAtStart(instr->right());
- return new(zone()) LCompareNumericAndBranch(left, right);
- } else {
- DCHECK(r.IsDouble());
- DCHECK(instr->left()->representation().IsDouble());
- DCHECK(instr->right()->representation().IsDouble());
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- return new(zone()) LCompareNumericAndBranch(left, right);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
- HCompareObjectEqAndBranch* instr) {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- return new(zone()) LCmpObjectEqAndBranch(left, right);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
- HCompareHoleAndBranch* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LCmpHoleAndBranch(value);
-}
-
-
-LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* temp = TempRegister();
- return new(zone()) LIsStringAndBranch(UseRegisterAtStart(instr->value()),
- temp);
-}
-
-
-LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- return new(zone()) LIsSmiAndBranch(Use(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
- HIsUndetectableAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- return new(zone()) LIsUndetectableAndBranch(
- UseRegisterAtStart(instr->value()), TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoStringCompareAndBranch(
- HStringCompareAndBranch* instr) {
- DCHECK(instr->left()->representation().IsTagged());
- DCHECK(instr->right()->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* left = UseFixed(instr->left(), a1);
- LOperand* right = UseFixed(instr->right(), a0);
- LStringCompareAndBranch* result =
- new(zone()) LStringCompareAndBranch(context, left, right);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
- HHasInstanceTypeAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LHasInstanceTypeAndBranch(value);
-}
-
-LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
- HClassOfTestAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- return new (zone())
- LClassOfTestAndBranch(UseRegister(instr->value()), TempRegister());
-}
-
-LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
- LOperand* string = UseRegisterAtStart(instr->string());
- LOperand* index = UseRegisterOrConstantAtStart(instr->index());
- return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index));
-}
-
-
-LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
- LOperand* string = UseRegisterAtStart(instr->string());
- LOperand* index = FLAG_debug_code
- ? UseRegisterAtStart(instr->index())
- : UseRegisterOrConstantAtStart(instr->index());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL;
- return new(zone()) LSeqStringSetChar(context, string, index, value);
-}
-
-
-LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- if (!FLAG_debug_code && instr->skip_check()) return NULL;
- LOperand* index = UseRegisterOrConstantAtStart(instr->index());
- LOperand* length = !index->IsConstantOperand()
- ? UseRegisterOrConstantAtStart(instr->length())
- : UseRegisterAtStart(instr->length());
- LInstruction* result = new(zone()) LBoundsCheck(index, length);
- if (!FLAG_debug_code || !instr->skip_check()) {
- result = AssignEnvironment(result);
- }
-return result;
-}
-
-
-LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
- // The control instruction marking the end of a block that completed
- // abruptly (e.g., threw an exception). There is nothing specific to do.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
- // All HForceRepresentation instructions should be eliminated in the
- // representation change phase of Hydrogen.
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoChange(HChange* instr) {
- Representation from = instr->from();
- Representation to = instr->to();
- HValue* val = instr->value();
- if (from.IsSmi()) {
- if (to.IsTagged()) {
- LOperand* value = UseRegister(val);
- return DefineSameAsFirst(new(zone()) LDummyUse(value));
- }
- from = Representation::Tagged();
- }
- if (from.IsTagged()) {
- if (to.IsDouble()) {
- LOperand* value = UseRegister(val);
- LInstruction* result = DefineAsRegister(new(zone()) LNumberUntagD(value));
- if (!val->representation().IsSmi()) result = AssignEnvironment(result);
- return result;
- } else if (to.IsSmi()) {
- LOperand* value = UseRegister(val);
- if (val->type().IsSmi()) {
- return DefineSameAsFirst(new(zone()) LDummyUse(value));
- }
- return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
- } else {
- DCHECK(to.IsInteger32());
- if (val->type().IsSmi() || val->representation().IsSmi()) {
- LOperand* value = UseRegisterAtStart(val);
- return DefineAsRegister(new(zone()) LSmiUntag(value, false));
- } else {
- LOperand* value = UseRegister(val);
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempDoubleRegister();
- LInstruction* result =
- DefineSameAsFirst(new(zone()) LTaggedToI(value, temp1, temp2));
- if (!val->representation().IsSmi()) result = AssignEnvironment(result);
- return result;
- }
- }
- } else if (from.IsDouble()) {
- if (to.IsTagged()) {
- info()->MarkAsDeferredCalling();
- LOperand* value = UseRegister(val);
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
-
- LUnallocated* result_temp = TempRegister();
- LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
- return AssignPointerMap(Define(result, result_temp));
- } else if (to.IsSmi()) {
- LOperand* value = UseRegister(val);
- return AssignEnvironment(
- DefineAsRegister(new(zone()) LDoubleToSmi(value)));
- } else {
- DCHECK(to.IsInteger32());
- LOperand* value = UseRegister(val);
- LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value));
- if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result);
- return result;
- }
- } else if (from.IsInteger32()) {
- info()->MarkAsDeferredCalling();
- if (to.IsTagged()) {
- if (val->CheckFlag(HInstruction::kUint32)) {
- LOperand* value = UseRegisterAtStart(val);
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2);
- return AssignPointerMap(DefineAsRegister(result));
- } else {
- STATIC_ASSERT((kMinInt == Smi::kMinValue) &&
- (kMaxInt == Smi::kMaxValue));
- LOperand* value = UseRegisterAtStart(val);
- return DefineAsRegister(new(zone()) LSmiTag(value));
- }
- } else if (to.IsSmi()) {
- LOperand* value = UseRegister(val);
- LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else {
- DCHECK(to.IsDouble());
- if (val->CheckFlag(HInstruction::kUint32)) {
- return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val)));
- } else {
- return DefineAsRegister(new(zone()) LInteger32ToDouble(Use(val)));
- }
- }
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* result = new(zone()) LCheckNonSmi(value);
- if (!instr->value()->type().IsHeapObject()) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckArrayBufferNotNeutered(
- HCheckArrayBufferNotNeutered* instr) {
- LOperand* view = UseRegisterAtStart(instr->value());
- LCheckArrayBufferNotNeutered* result =
- new (zone()) LCheckArrayBufferNotNeutered(view);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* result = new(zone()) LCheckInstanceType(value);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckValue(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
- if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps;
- LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value));
- if (instr->HasMigrationTarget()) {
- info()->MarkAsDeferredCalling();
- result = AssignPointerMap(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
- HValue* value = instr->value();
- Representation input_rep = value->representation();
- LOperand* reg = UseRegister(value);
- if (input_rep.IsDouble()) {
- // Revisit this decision, here and 8 lines below.
- return DefineAsRegister(new(zone()) LClampDToUint8(reg,
- TempDoubleRegister()));
- } else if (input_rep.IsInteger32()) {
- return DefineAsRegister(new(zone()) LClampIToUint8(reg));
- } else {
- DCHECK(input_rep.IsSmiOrTagged());
- LClampTToUint8* result =
- new(zone()) LClampTToUint8(reg, TempDoubleRegister());
- return AssignEnvironment(DefineAsRegister(result));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
- LOperand* context = info()->IsStub()
- ? UseFixed(instr->context(), cp)
- : NULL;
- LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
- return new(zone()) LReturn(UseFixed(instr->value(), v0), context,
- parameter_count);
-}
-
-
-LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
- Representation r = instr->representation();
- if (r.IsSmi()) {
- return DefineAsRegister(new(zone()) LConstantS);
- } else if (r.IsInteger32()) {
- return DefineAsRegister(new(zone()) LConstantI);
- } else if (r.IsDouble()) {
- return DefineAsRegister(new(zone()) LConstantD);
- } else if (r.IsExternal()) {
- return DefineAsRegister(new(zone()) LConstantE);
- } else if (r.IsTagged()) {
- return DefineAsRegister(new(zone()) LConstantT);
- } else {
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- LInstruction* result =
- DefineAsRegister(new(zone()) LLoadContextSlot(context));
- if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
- LOperand* context;
- LOperand* value;
- if (instr->NeedsWriteBarrier()) {
- context = UseTempRegister(instr->context());
- value = UseTempRegister(instr->value());
- } else {
- context = UseRegister(instr->context());
- value = UseRegister(instr->value());
- }
- LInstruction* result = new(zone()) LStoreContextSlot(context, value);
- if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- LOperand* obj = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new(zone()) LLoadNamedField(obj));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
- HLoadFunctionPrototype* instr) {
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()))));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
- return DefineAsRegister(new(zone()) LLoadRoot);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
- DCHECK(instr->key()->representation().IsSmiOrInteger32());
- ElementsKind elements_kind = instr->elements_kind();
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LInstruction* result = NULL;
-
- if (!instr->is_fixed_typed_array()) {
- LOperand* obj = NULL;
- if (instr->representation().IsDouble()) {
- obj = UseRegister(instr->elements());
- } else {
- DCHECK(instr->representation().IsSmiOrTagged() ||
- instr->representation().IsInteger32());
- obj = UseRegisterAtStart(instr->elements());
- }
- result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr));
- } else {
- DCHECK(
- (instr->representation().IsInteger32() &&
- !IsDoubleOrFloatElementsKind(elements_kind)) ||
- (instr->representation().IsDouble() &&
- IsDoubleOrFloatElementsKind(elements_kind)));
- LOperand* backing_store = UseRegister(instr->elements());
- LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
- result = DefineAsRegister(
- new (zone()) LLoadKeyed(backing_store, key, backing_store_owner));
- }
-
- bool needs_environment;
- if (instr->is_fixed_typed_array()) {
- // see LCodeGen::DoLoadKeyedExternalArray
- needs_environment = elements_kind == UINT32_ELEMENTS &&
- !instr->CheckFlag(HInstruction::kUint32);
- } else {
- // see LCodeGen::DoLoadKeyedFixedDoubleArray and
- // LCodeGen::DoLoadKeyedFixedArray
- needs_environment =
- instr->RequiresHoleCheck() ||
- (instr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED && info()->IsStub());
- }
-
- if (needs_environment) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- if (!instr->is_fixed_typed_array()) {
- DCHECK(instr->elements()->representation().IsTagged());
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- LOperand* object = NULL;
- LOperand* val = NULL;
- LOperand* key = NULL;
-
- if (instr->value()->representation().IsDouble()) {
- object = UseRegisterAtStart(instr->elements());
- key = UseRegisterOrConstantAtStart(instr->key());
- val = UseRegister(instr->value());
- } else {
- DCHECK(instr->value()->representation().IsSmiOrTagged() ||
- instr->value()->representation().IsInteger32());
- if (needs_write_barrier) {
- object = UseTempRegister(instr->elements());
- val = UseTempRegister(instr->value());
- key = UseTempRegister(instr->key());
- } else {
- object = UseRegisterAtStart(instr->elements());
- val = UseRegisterAtStart(instr->value());
- key = UseRegisterOrConstantAtStart(instr->key());
- }
- }
-
- return new (zone()) LStoreKeyed(object, key, val, nullptr);
- }
-
- DCHECK(
- (instr->value()->representation().IsInteger32() &&
- !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
- (instr->value()->representation().IsDouble() &&
- IsDoubleOrFloatElementsKind(instr->elements_kind())));
- DCHECK(instr->elements()->representation().IsExternal());
- LOperand* val = UseRegister(instr->value());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LOperand* backing_store = UseRegister(instr->elements());
- LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
- return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner);
-}
-
-
-LInstruction* LChunkBuilder::DoTransitionElementsKind(
- HTransitionElementsKind* instr) {
- if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
- LOperand* object = UseRegister(instr->object());
- LOperand* new_map_reg = TempRegister();
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, NULL, new_map_reg);
- return result;
- } else {
- LOperand* object = UseFixed(instr->object(), a0);
- LOperand* context = UseFixed(instr->context(), cp);
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, context, NULL);
- return MarkAsCall(result, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoTrapAllocationMemento(
- HTrapAllocationMemento* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* temp = TempRegister();
- LTrapAllocationMemento* result =
- new(zone()) LTrapAllocationMemento(object, temp);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) {
- info()->MarkAsDeferredCalling();
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object = Use(instr->object());
- LOperand* elements = Use(instr->elements());
- LOperand* key = UseRegisterOrConstant(instr->key());
- LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity());
-
- LMaybeGrowElements* result = new (zone())
- LMaybeGrowElements(context, object, elements, key, current_capacity);
- DefineFixed(result, v0);
- return AssignPointerMap(AssignEnvironment(result));
-}
-
-
-LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
- bool is_in_object = instr->access().IsInobject();
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- bool needs_write_barrier_for_map = instr->has_transition() &&
- instr->NeedsWriteBarrierForMap();
-
- LOperand* obj;
- if (needs_write_barrier) {
- obj = is_in_object
- ? UseRegister(instr->object())
- : UseTempRegister(instr->object());
- } else {
- obj = needs_write_barrier_for_map
- ? UseRegister(instr->object())
- : UseRegisterAtStart(instr->object());
- }
-
- LOperand* val;
- if (needs_write_barrier) {
- val = UseTempRegister(instr->value());
- } else if (instr->field_representation().IsDouble()) {
- val = UseRegisterAtStart(instr->value());
- } else {
- val = UseRegister(instr->value());
- }
-
- // We need a temporary register for write barrier of the map field.
- LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
-
- return new(zone()) LStoreNamedField(obj, val, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* left = UseFixed(instr->left(), a1);
- LOperand* right = UseFixed(instr->right(), a0);
- return MarkAsCall(
- DefineFixed(new(zone()) LStringAdd(context, left, right), v0),
- instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
- LOperand* string = UseTempRegister(instr->string());
- LOperand* index = UseTempRegister(instr->index());
- LOperand* context = UseAny(instr->context());
- LStringCharCodeAt* result =
- new(zone()) LStringCharCodeAt(context, string, index);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
- LOperand* char_code = UseRegister(instr->value());
- LOperand* context = UseAny(instr->context());
- LStringCharFromCode* result =
- new(zone()) LStringCharFromCode(context, char_code);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
- LOperand* size = UseRegisterOrConstant(instr->size());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- if (instr->IsAllocationFolded()) {
- LFastAllocate* result = new (zone()) LFastAllocate(size, temp1, temp2);
- return DefineAsRegister(result);
- } else {
- info()->MarkAsDeferredCalling();
- LOperand* context = UseAny(instr->context());
- LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2);
- return AssignPointerMap(DefineAsRegister(result));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
- DCHECK(argument_count_ == 0);
- allocator_->MarkAsOsrEntry();
- current_block_->last_environment()->set_ast_id(instr->ast_id());
- return AssignEnvironment(new(zone()) LOsrEntry);
-}
-
-
-LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- LParameter* result = new(zone()) LParameter;
- if (instr->kind() == HParameter::STACK_PARAMETER) {
- int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(result, spill_index);
- } else {
- DCHECK(info()->IsStub());
- CallInterfaceDescriptor descriptor = graph()->descriptor();
- int index = static_cast<int>(instr->index());
- Register reg = descriptor.GetRegisterParameter(index);
- return DefineFixed(result, reg);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- // Use an index that corresponds to the location in the unoptimized frame,
- // which the optimized frame will subsume.
- int env_index = instr->index();
- int spill_index = 0;
- if (instr->environment()->is_parameter_index(env_index)) {
- spill_index = chunk()->GetParameterStackSlot(env_index);
- } else {
- spill_index = env_index - instr->environment()->first_local_index();
- if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
- Retry(kTooManySpillSlotsNeededForOSR);
- spill_index = 0;
- }
- spill_index += StandardFrameConstants::kFixedSlotCount;
- }
- return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
- // There are no real uses of the arguments object.
- // arguments.length and element access are supported directly on
- // stack arguments, and any real arguments object use causes a bailout.
- // So this value is never used.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
- instr->ReplayEnvironment(current_block_->last_environment());
-
- // There are no real uses of a captured object.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
- info()->MarkAsRequiresFrame();
- LOperand* args = UseRegister(instr->arguments());
- LOperand* length = UseRegisterOrConstantAtStart(instr->length());
- LOperand* index = UseRegisterOrConstantAtStart(instr->index());
- return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
-}
-
-
-LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* value = UseFixed(instr->value(), a3);
- LTypeof* result = new (zone()) LTypeof(context, value);
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
- return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
- instr->ReplayEnvironment(current_block_->last_environment());
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
- if (instr->is_function_entry()) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(new(zone()) LStackCheck(context), instr);
- } else {
- DCHECK(instr->is_backwards_branch());
- LOperand* context = UseAny(instr->context());
- return AssignEnvironment(
- AssignPointerMap(new(zone()) LStackCheck(context)));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
- HEnvironment* outer = current_block_->last_environment();
- outer->set_ast_id(instr->ReturnId());
- HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner = outer->CopyForInlining(
- instr->closure(), instr->arguments_count(), instr->function(), undefined,
- instr->inlining_kind(), instr->syntactic_tail_call_mode());
- // Only replay binding of arguments object if it wasn't removed from graph.
- if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
- inner->Bind(instr->arguments_var(), instr->arguments_object());
- }
- inner->BindContext(instr->closure_context());
- inner->set_entry(instr);
- current_block_->UpdateEnvironment(inner);
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
- LInstruction* pop = NULL;
-
- HEnvironment* env = current_block_->last_environment();
-
- if (env->entry()->arguments_pushed()) {
- int argument_count = env->arguments_environment()->parameter_count();
- pop = new(zone()) LDrop(argument_count);
- DCHECK(instr->argument_delta() == -argument_count);
- }
-
- HEnvironment* outer = current_block_->last_environment()->
- DiscardInlined(false);
- current_block_->UpdateEnvironment(outer);
-
- return pop;
-}
-
-
-LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object = UseFixed(instr->enumerable(), a0);
- LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
- return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
- LOperand* map = UseRegister(instr->map());
- return AssignEnvironment(DefineAsRegister(new(zone()) LForInCacheArray(map)));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* map = UseRegisterAtStart(instr->map());
- return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* index = UseTempRegister(instr->index());
- LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
- LInstruction* result = DefineSameAsFirst(load);
- return AssignPointerMap(result);
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/crankshaft/mips64/lithium-mips64.h b/deps/v8/src/crankshaft/mips64/lithium-mips64.h
deleted file mode 100644
index c75959a248..0000000000
--- a/deps/v8/src/crankshaft/mips64/lithium-mips64.h
+++ /dev/null
@@ -1,2496 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_MIPS64_LITHIUM_MIPS_H_
-#define V8_CRANKSHAFT_MIPS64_LITHIUM_MIPS_H_
-
-#include "src/crankshaft/hydrogen.h"
-#include "src/crankshaft/lithium.h"
-#include "src/crankshaft/lithium-allocator.h"
-#include "src/safepoint-table.h"
-#include "src/utils.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LCodeGen;
-
-#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
- V(AccessArgumentsAt) \
- V(AddE) \
- V(AddI) \
- V(AddS) \
- V(Allocate) \
- V(ApplyArguments) \
- V(ArgumentsElements) \
- V(ArgumentsLength) \
- V(ArithmeticD) \
- V(ArithmeticT) \
- V(BitI) \
- V(BoundsCheck) \
- V(Branch) \
- V(CallWithDescriptor) \
- V(CallNewArray) \
- V(CallRuntime) \
- V(CheckArrayBufferNotNeutered) \
- V(CheckInstanceType) \
- V(CheckMaps) \
- V(CheckMapValue) \
- V(CheckNonSmi) \
- V(CheckSmi) \
- V(CheckValue) \
- V(ClampDToUint8) \
- V(ClampIToUint8) \
- V(ClampTToUint8) \
- V(ClassOfTestAndBranch) \
- V(CompareNumericAndBranch) \
- V(CmpObjectEqAndBranch) \
- V(CmpHoleAndBranch) \
- V(CmpMapAndBranch) \
- V(CmpT) \
- V(ConstantD) \
- V(ConstantE) \
- V(ConstantI) \
- V(ConstantS) \
- V(ConstantT) \
- V(Context) \
- V(DebugBreak) \
- V(DeclareGlobals) \
- V(Deoptimize) \
- V(DivByConstI) \
- V(DivByPowerOf2I) \
- V(DivI) \
- V(DoubleToI) \
- V(DoubleToSmi) \
- V(Drop) \
- V(Dummy) \
- V(DummyUse) \
- V(FastAllocate) \
- V(FlooringDivByConstI) \
- V(FlooringDivByPowerOf2I) \
- V(FlooringDivI) \
- V(ForInCacheArray) \
- V(ForInPrepareMap) \
- V(Goto) \
- V(HasInPrototypeChainAndBranch) \
- V(HasInstanceTypeAndBranch) \
- V(InnerAllocatedObject) \
- V(InstructionGap) \
- V(Integer32ToDouble) \
- V(InvokeFunction) \
- V(IsStringAndBranch) \
- V(IsSmiAndBranch) \
- V(IsUndetectableAndBranch) \
- V(Label) \
- V(LazyBailout) \
- V(LoadContextSlot) \
- V(LoadRoot) \
- V(LoadFieldByIndex) \
- V(LoadFunctionPrototype) \
- V(LoadKeyed) \
- V(LoadNamedField) \
- V(MathAbs) \
- V(MathCos) \
- V(MathSin) \
- V(MathExp) \
- V(MathClz32) \
- V(MathFloor) \
- V(MathFround) \
- V(MathLog) \
- V(MathMinMax) \
- V(MathPowHalf) \
- V(MathRound) \
- V(MathSqrt) \
- V(MaybeGrowElements) \
- V(ModByConstI) \
- V(ModByPowerOf2I) \
- V(ModI) \
- V(MulI) \
- V(MulS) \
- V(MultiplyAddD) \
- V(NumberTagD) \
- V(NumberTagU) \
- V(NumberUntagD) \
- V(OsrEntry) \
- V(Parameter) \
- V(Power) \
- V(Prologue) \
- V(PushArgument) \
- V(Return) \
- V(SeqStringGetChar) \
- V(SeqStringSetChar) \
- V(ShiftI) \
- V(SmiTag) \
- V(SmiUntag) \
- V(StackCheck) \
- V(StoreCodeEntry) \
- V(StoreContextSlot) \
- V(StoreKeyed) \
- V(StoreNamedField) \
- V(StringAdd) \
- V(StringCharCodeAt) \
- V(StringCharFromCode) \
- V(StringCompareAndBranch) \
- V(SubI) \
- V(SubS) \
- V(TaggedToI) \
- V(ThisFunction) \
- V(TransitionElementsKind) \
- V(TrapAllocationMemento) \
- V(Typeof) \
- V(TypeofIsAndBranch) \
- V(Uint32ToDouble) \
- V(UnknownOSRValue) \
- V(WrapReceiver)
-
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- Opcode opcode() const final { return LInstruction::k##type; } \
- void CompileToNative(LCodeGen* generator) final; \
- const char* Mnemonic() const final { return mnemonic; } \
- static L##type* cast(LInstruction* instr) { \
- DCHECK(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
- }
-
-
-#define DECLARE_HYDROGEN_ACCESSOR(type) \
- H##type* hydrogen() const { \
- return H##type::cast(hydrogen_value()); \
- }
-
-
-class LInstruction : public ZoneObject {
- public:
- LInstruction()
- : environment_(NULL),
- hydrogen_value_(NULL),
- bit_field_(IsCallBits::encode(false)) {
- }
-
- virtual ~LInstruction() {}
-
- virtual void CompileToNative(LCodeGen* generator) = 0;
- virtual const char* Mnemonic() const = 0;
- virtual void PrintTo(StringStream* stream);
- virtual void PrintDataTo(StringStream* stream);
- virtual void PrintOutputOperandTo(StringStream* stream);
-
- enum Opcode {
- // Declare a unique enum value for each instruction.
-#define DECLARE_OPCODE(type) k##type,
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
- kNumberOfInstructions
-#undef DECLARE_OPCODE
- };
-
- virtual Opcode opcode() const = 0;
-
- // Declare non-virtual type testers for all leaf IR classes.
-#define DECLARE_PREDICATE(type) \
- bool Is##type() const { return opcode() == k##type; }
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
-#undef DECLARE_PREDICATE
-
- // Declare virtual predicates for instructions that don't have
- // an opcode.
- virtual bool IsGap() const { return false; }
-
- virtual bool IsControl() const { return false; }
-
- // Try deleting this instruction if possible.
- virtual bool TryDelete() { return false; }
-
- void set_environment(LEnvironment* env) { environment_ = env; }
- LEnvironment* environment() const { return environment_; }
- bool HasEnvironment() const { return environment_ != NULL; }
-
- void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
- LPointerMap* pointer_map() const { return pointer_map_.get(); }
- bool HasPointerMap() const { return pointer_map_.is_set(); }
-
- void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
- HValue* hydrogen_value() const { return hydrogen_value_; }
-
- void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
- bool IsCall() const { return IsCallBits::decode(bit_field_); }
-
- void MarkAsSyntacticTailCall() {
- bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
- }
- bool IsSyntacticTailCall() const {
- return IsSyntacticTailCallBits::decode(bit_field_);
- }
-
- // Interface to the register allocator and iterators.
- bool ClobbersTemps() const { return IsCall(); }
- bool ClobbersRegisters() const { return IsCall(); }
- virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
- return IsCall();
- }
-
- // Interface to the register allocator and iterators.
- bool IsMarkedAsCall() const { return IsCall(); }
-
- virtual bool HasResult() const = 0;
- virtual LOperand* result() const = 0;
-
- LOperand* FirstInput() { return InputAt(0); }
- LOperand* Output() { return HasResult() ? result() : NULL; }
-
- virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
-
-#ifdef DEBUG
- void VerifyCall();
-#endif
-
- virtual int InputCount() = 0;
- virtual LOperand* InputAt(int i) = 0;
-
- private:
- // Iterator interface.
- friend class InputIterator;
-
- friend class TempIterator;
- virtual int TempCount() = 0;
- virtual LOperand* TempAt(int i) = 0;
-
- class IsCallBits: public BitField<bool, 0, 1> {};
- class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
- };
-
- LEnvironment* environment_;
- SetOncePointer<LPointerMap> pointer_map_;
- HValue* hydrogen_value_;
- int bit_field_;
-};
-
-
-// R = number of result operands (0 or 1).
-template<int R>
-class LTemplateResultInstruction : public LInstruction {
- public:
- // Allow 0 or 1 output operands.
- STATIC_ASSERT(R == 0 || R == 1);
- bool HasResult() const final { return R != 0 && result() != NULL; }
- void set_result(LOperand* operand) { results_[0] = operand; }
- LOperand* result() const override { return results_[0]; }
-
- protected:
- EmbeddedContainer<LOperand*, R> results_;
-};
-
-
-// R = number of result operands (0 or 1).
-// I = number of input operands.
-// T = number of temporary operands.
-template<int R, int I, int T>
-class LTemplateInstruction : public LTemplateResultInstruction<R> {
- protected:
- EmbeddedContainer<LOperand*, I> inputs_;
- EmbeddedContainer<LOperand*, T> temps_;
-
- private:
- // Iterator support.
- int InputCount() final { return I; }
- LOperand* InputAt(int i) final { return inputs_[i]; }
-
- int TempCount() final { return T; }
- LOperand* TempAt(int i) final { return temps_[i]; }
-};
-
-
-class LGap : public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGap(HBasicBlock* block)
- : block_(block) {
- parallel_moves_[BEFORE] = NULL;
- parallel_moves_[START] = NULL;
- parallel_moves_[END] = NULL;
- parallel_moves_[AFTER] = NULL;
- }
-
- // Can't use the DECLARE-macro here because of sub-classes.
- bool IsGap() const final { return true; }
- void PrintDataTo(StringStream* stream) override;
- static LGap* cast(LInstruction* instr) {
- DCHECK(instr->IsGap());
- return reinterpret_cast<LGap*>(instr);
- }
-
- bool IsRedundant() const;
-
- HBasicBlock* block() const { return block_; }
-
- enum InnerPosition {
- BEFORE,
- START,
- END,
- AFTER,
- FIRST_INNER_POSITION = BEFORE,
- LAST_INNER_POSITION = AFTER
- };
-
- LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
- if (parallel_moves_[pos] == NULL) {
- parallel_moves_[pos] = new(zone) LParallelMove(zone);
- }
- return parallel_moves_[pos];
- }
-
- LParallelMove* GetParallelMove(InnerPosition pos) {
- return parallel_moves_[pos];
- }
-
- private:
- LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
- HBasicBlock* block_;
-};
-
-
-class LInstructionGap final : public LGap {
- public:
- explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
-
- bool HasInterestingComment(LCodeGen* gen) const override {
- return !IsRedundant();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
-};
-
-
-class LGoto final : public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGoto(HBasicBlock* block) : block_(block) { }
-
- bool HasInterestingComment(LCodeGen* gen) const override;
- DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- void PrintDataTo(StringStream* stream) override;
- bool IsControl() const override { return true; }
-
- int block_id() const { return block_->block_id(); }
-
- private:
- HBasicBlock* block_;
-};
-
-
-class LPrologue final : public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue")
-};
-
-
-class LLazyBailout final : public LTemplateInstruction<0, 0, 0> {
- public:
- LLazyBailout() : gap_instructions_size_(0) { }
-
- DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
-
- void set_gap_instructions_size(int gap_instructions_size) {
- gap_instructions_size_ = gap_instructions_size;
- }
- int gap_instructions_size() { return gap_instructions_size_; }
-
- private:
- int gap_instructions_size_;
-};
-
-
-class LDummy final : public LTemplateInstruction<1, 0, 0> {
- public:
- LDummy() {}
- DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
-};
-
-
-class LDummyUse final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDummyUse(LOperand* value) {
- inputs_[0] = value;
- }
- DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
-};
-
-
-class LDeoptimize final : public LTemplateInstruction<0, 0, 0> {
- public:
- bool IsControl() const override { return true; }
- DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
- DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
-};
-
-
-class LLabel final : public LGap {
- public:
- explicit LLabel(HBasicBlock* block)
- : LGap(block), replacement_(NULL) { }
-
- bool HasInterestingComment(LCodeGen* gen) const override { return false; }
- DECLARE_CONCRETE_INSTRUCTION(Label, "label")
-
- void PrintDataTo(StringStream* stream) override;
-
- int block_id() const { return block()->block_id(); }
- bool is_loop_header() const { return block()->IsLoopHeader(); }
- bool is_osr_entry() const { return block()->is_osr_entry(); }
- Label* label() { return &label_; }
- LLabel* replacement() const { return replacement_; }
- void set_replacement(LLabel* label) { replacement_ = label; }
- bool HasReplacement() const { return replacement_ != NULL; }
-
- private:
- Label label_;
- LLabel* replacement_;
-};
-
-
-class LParameter final : public LTemplateInstruction<1, 0, 0> {
- public:
- bool HasInterestingComment(LCodeGen* gen) const override { return false; }
- DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
-};
-
-
-class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
- public:
- bool HasInterestingComment(LCodeGen* gen) const override { return false; }
- DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
-};
-
-
-template<int I, int T>
-class LControlInstruction : public LTemplateInstruction<0, I, T> {
- public:
- LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
-
- bool IsControl() const final { return true; }
-
- int SuccessorCount() { return hydrogen()->SuccessorCount(); }
- HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
-
- int TrueDestination(LChunk* chunk) {
- return chunk->LookupDestination(true_block_id());
- }
- int FalseDestination(LChunk* chunk) {
- return chunk->LookupDestination(false_block_id());
- }
-
- Label* TrueLabel(LChunk* chunk) {
- if (true_label_ == NULL) {
- true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
- }
- return true_label_;
- }
- Label* FalseLabel(LChunk* chunk) {
- if (false_label_ == NULL) {
- false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
- }
- return false_label_;
- }
-
- protected:
- int true_block_id() { return SuccessorAt(0)->block_id(); }
- int false_block_id() { return SuccessorAt(1)->block_id(); }
-
- private:
- HControlInstruction* hydrogen() {
- return HControlInstruction::cast(this->hydrogen_value());
- }
-
- Label* false_label_;
- Label* true_label_;
-};
-
-
-class LWrapReceiver final : public LTemplateInstruction<1, 2, 0> {
- public:
- LWrapReceiver(LOperand* receiver, LOperand* function) {
- inputs_[0] = receiver;
- inputs_[1] = function;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
- DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
-
- LOperand* receiver() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-};
-
-
-class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
- public:
- LApplyArguments(LOperand* function,
- LOperand* receiver,
- LOperand* length,
- LOperand* elements) {
- inputs_[0] = function;
- inputs_[1] = receiver;
- inputs_[2] = length;
- inputs_[3] = elements;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
- DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
-
- LOperand* function() { return inputs_[0]; }
- LOperand* receiver() { return inputs_[1]; }
- LOperand* length() { return inputs_[2]; }
- LOperand* elements() { return inputs_[3]; }
-};
-
-
-class LAccessArgumentsAt final : public LTemplateInstruction<1, 3, 0> {
- public:
- LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
- inputs_[0] = arguments;
- inputs_[1] = length;
- inputs_[2] = index;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
-
- LOperand* arguments() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LArgumentsLength final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LArgumentsLength(LOperand* elements) {
- inputs_[0] = elements;
- }
-
- LOperand* elements() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
-};
-
-
-class LArgumentsElements final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
- DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
-};
-
-
-class LModByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
- public:
- LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-
- private:
- int32_t divisor_;
-};
-
-
-class LModByConstI final : public LTemplateInstruction<1, 1, 0> {
- public:
- LModByConstI(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-
- private:
- int32_t divisor_;
-};
-
-
-class LModI final : public LTemplateInstruction<1, 2, 3> {
- public:
- LModI(LOperand* left,
- LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-};
-
-
-class LDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
- public:
- LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
-
- private:
- int32_t divisor_;
-};
-
-
-class LDivByConstI final : public LTemplateInstruction<1, 1, 0> {
- public:
- LDivByConstI(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
-
- private:
- int32_t divisor_;
-};
-
-
-class LDivI final : public LTemplateInstruction<1, 2, 1> {
- public:
- LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
- inputs_[0] = dividend;
- inputs_[1] = divisor;
- temps_[0] = temp;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- LOperand* divisor() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
- DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
-};
-
-
-class LFlooringDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
- public:
- LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
- "flooring-div-by-power-of-2-i")
- DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-
- private:
- int32_t divisor_;
-};
-
-
-class LFlooringDivByConstI final : public LTemplateInstruction<1, 1, 2> {
- public:
- LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- temps_[0] = temp;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
- DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-
- private:
- int32_t divisor_;
-};
-
-
-class LFlooringDivI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LFlooringDivI(LOperand* dividend, LOperand* divisor) {
- inputs_[0] = dividend;
- inputs_[1] = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- LOperand* divisor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
- DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-};
-
-
-class LMulS final : public LTemplateInstruction<1, 2, 0> {
- public:
- LMulS(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MulS, "mul-s")
- DECLARE_HYDROGEN_ACCESSOR(Mul)
-};
-
-
-class LMulI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LMulI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
- DECLARE_HYDROGEN_ACCESSOR(Mul)
-};
-
-
-// Instruction for computing multiplier * multiplicand + addend.
-class LMultiplyAddD final : public LTemplateInstruction<1, 3, 0> {
- public:
- LMultiplyAddD(LOperand* addend, LOperand* multiplier,
- LOperand* multiplicand) {
- inputs_[0] = addend;
- inputs_[1] = multiplier;
- inputs_[2] = multiplicand;
- }
-
- LOperand* addend() { return inputs_[0]; }
- LOperand* multiplier() { return inputs_[1]; }
- LOperand* multiplicand() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d")
-};
-
-
-class LDebugBreak final : public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
-};
-
-
-class LCompareNumericAndBranch final : public LControlInstruction<2, 0> {
- public:
- LCompareNumericAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
- "compare-numeric-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
-
- Token::Value op() const { return hydrogen()->token(); }
- bool is_double() const {
- return hydrogen()->representation().IsDouble();
- }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LMathFloor final : public LTemplateInstruction<1, 1, 1> {
- public:
- LMathFloor(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-
-class LMathRound final : public LTemplateInstruction<1, 1, 1> {
- public:
- LMathRound(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-
-class LMathFround final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathFround(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround")
-};
-
-
-class LMathAbs final : public LTemplateInstruction<1, 2, 0> {
- public:
- LMathAbs(LOperand* context, LOperand* value) {
- inputs_[1] = context;
- inputs_[0] = value;
- }
-
- LOperand* context() { return inputs_[1]; }
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-
-class LMathLog final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathLog(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
-};
-
-
-class LMathClz32 final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathClz32(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
-};
-
-class LMathCos final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathCos(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
-};
-
-class LMathSin final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathSin(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
-};
-
-class LMathExp final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathExp(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
-};
-
-
-class LMathSqrt final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathSqrt(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
-};
-
-
-class LMathPowHalf final : public LTemplateInstruction<1, 1, 1> {
- public:
- LMathPowHalf(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
-};
-
-
-class LCmpObjectEqAndBranch final : public LControlInstruction<2, 0> {
- public:
- LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
-};
-
-
-class LCmpHoleAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LCmpHoleAndBranch(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
-};
-
-
-class LIsStringAndBranch final : public LControlInstruction<1, 1> {
- public:
- LIsStringAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LIsSmiAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LIsSmiAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LIsUndetectableAndBranch final : public LControlInstruction<1, 1> {
- public:
- explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
- "is-undetectable-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LStringCompareAndBranch final : public LControlInstruction<3, 0> {
- public:
- LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
- "string-compare-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
-
- Token::Value op() const { return hydrogen()->token(); }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LHasInstanceTypeAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
- "has-instance-type-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-class LClassOfTestAndBranch final : public LControlInstruction<1, 1> {
- public:
- LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch, "class-of-test-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-class LCmpT final : public LTemplateInstruction<1, 3, 0> {
- public:
- LCmpT(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
- DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
-
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> {
- public:
- LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) {
- inputs_[0] = object;
- inputs_[1] = prototype;
- }
-
- LOperand* object() const { return inputs_[0]; }
- LOperand* prototype() const { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch,
- "has-in-prototype-chain-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch)
-};
-
-
-class LBoundsCheck final : public LTemplateInstruction<0, 2, 0> {
- public:
- LBoundsCheck(LOperand* index, LOperand* length) {
- inputs_[0] = index;
- inputs_[1] = length;
- }
-
- LOperand* index() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
- DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
-};
-
-
-class LBitI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LBitI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- Token::Value op() const { return hydrogen()->op(); }
-
- DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
- DECLARE_HYDROGEN_ACCESSOR(Bitwise)
-};
-
-
-class LShiftI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
- : op_(op), can_deopt_(can_deopt) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- bool can_deopt() const { return can_deopt_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
-
- private:
- Token::Value op_;
- bool can_deopt_;
-};
-
-
-class LSubI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LSubI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
- DECLARE_HYDROGEN_ACCESSOR(Sub)
-};
-
-
-class LSubS final : public LTemplateInstruction<1, 2, 0> {
- public:
- LSubS(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SubS, "sub-s")
- DECLARE_HYDROGEN_ACCESSOR(Sub)
-};
-
-
-class LConstantI final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- int32_t value() const { return hydrogen()->Integer32Value(); }
-};
-
-
-class LConstantS final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
-};
-
-
-class LConstantD final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- double value() const { return hydrogen()->DoubleValue(); }
-};
-
-
-class LConstantE final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- ExternalReference value() const {
- return hydrogen()->ExternalReferenceValue();
- }
-};
-
-
-class LConstantT final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- Handle<Object> value(Isolate* isolate) const {
- return hydrogen()->handle(isolate);
- }
-};
-
-
-class LBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
- DECLARE_HYDROGEN_ACCESSOR(Branch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LCmpMapAndBranch final : public LControlInstruction<1, 1> {
- public:
- LCmpMapAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMap)
-
- Handle<Map> map() const { return hydrogen()->map().handle(); }
-};
-
-
-class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
- public:
- LSeqStringGetChar(LOperand* string, LOperand* index) {
- inputs_[0] = string;
- inputs_[1] = index;
- }
-
- LOperand* string() const { return inputs_[0]; }
- LOperand* index() const { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
-};
-
-
-class LSeqStringSetChar final : public LTemplateInstruction<1, 4, 0> {
- public:
- LSeqStringSetChar(LOperand* context,
- LOperand* string,
- LOperand* index,
- LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = string;
- inputs_[2] = index;
- inputs_[3] = value;
- }
-
- LOperand* string() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
- LOperand* value() { return inputs_[3]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
-};
-
-
-class LAddE final : public LTemplateInstruction<1, 2, 0> {
- public:
- LAddE(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AddE, "add-e")
- DECLARE_HYDROGEN_ACCESSOR(Add)
-};
-
-
-class LAddI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LAddI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
- DECLARE_HYDROGEN_ACCESSOR(Add)
-};
-
-
-class LAddS final : public LTemplateInstruction<1, 2, 0> {
- public:
- LAddS(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AddS, "add-s")
- DECLARE_HYDROGEN_ACCESSOR(Add)
-};
-
-
-class LMathMinMax final : public LTemplateInstruction<1, 2, 0> {
- public:
- LMathMinMax(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
- DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
-};
-
-
-class LPower final : public LTemplateInstruction<1, 2, 0> {
- public:
- LPower(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Power, "power")
- DECLARE_HYDROGEN_ACCESSOR(Power)
-};
-
-
-class LArithmeticD final : public LTemplateInstruction<1, 2, 0> {
- public:
- LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
- : op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- Opcode opcode() const override { return LInstruction::kArithmeticD; }
- void CompileToNative(LCodeGen* generator) override;
- const char* Mnemonic() const override;
-
- private:
- Token::Value op_;
-};
-
-
-class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
- public:
- LArithmeticT(Token::Value op,
- LOperand* context,
- LOperand* left,
- LOperand* right)
- : op_(op) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
- Token::Value op() const { return op_; }
-
- Opcode opcode() const final { return LInstruction::kArithmeticT; }
- void CompileToNative(LCodeGen* generator) override;
- const char* Mnemonic() const override;
-
- DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
-
- private:
- Token::Value op_;
-};
-
-
-class LReturn final : public LTemplateInstruction<0, 3, 0> {
- public:
- LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
- inputs_[0] = value;
- inputs_[1] = context;
- inputs_[2] = parameter_count;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- bool has_constant_parameter_count() {
- return parameter_count()->IsConstantOperand();
- }
- LConstantOperand* constant_parameter_count() {
- DCHECK(has_constant_parameter_count());
- return LConstantOperand::cast(parameter_count());
- }
- LOperand* parameter_count() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Return, "return")
-};
-
-
-class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedField(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
-};
-
-
-class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadFunctionPrototype(LOperand* function) {
- inputs_[0] = function;
- }
-
- LOperand* function() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
- DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
-};
-
-
-class LLoadRoot final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
- DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
-
- Heap::RootListIndex index() const { return hydrogen()->index(); }
-};
-
-
-class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
- public:
- LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
- inputs_[0] = elements;
- inputs_[1] = key;
- inputs_[2] = backing_store_owner;
- }
-
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* backing_store_owner() { return inputs_[2]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
- bool is_fixed_typed_array() const {
- return hydrogen()->is_fixed_typed_array();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
-
- void PrintDataTo(StringStream* stream) override;
- uint32_t base_offset() const { return hydrogen()->base_offset(); }
-};
-
-
-class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadContextSlot(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
-
- int slot_index() { return hydrogen()->slot_index(); }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LStoreContextSlot final : public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreContextSlot(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
-
- int slot_index() { return hydrogen()->slot_index(); }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LPushArgument final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LPushArgument(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
-};
-
-
-class LDrop final : public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LDrop(int count) : count_(count) { }
-
- int count() const { return count_; }
-
- DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
-
- private:
- int count_;
-};
-
-
-class LStoreCodeEntry final : public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreCodeEntry(LOperand* function, LOperand* code_object) {
- inputs_[0] = function;
- inputs_[1] = code_object;
- }
-
- LOperand* function() { return inputs_[0]; }
- LOperand* code_object() { return inputs_[1]; }
-
- void PrintDataTo(StringStream* stream) override;
-
- DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
- DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
-};
-
-
-class LInnerAllocatedObject final : public LTemplateInstruction<1, 2, 0> {
- public:
- LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
- inputs_[0] = base_object;
- inputs_[1] = offset;
- }
-
- LOperand* base_object() const { return inputs_[0]; }
- LOperand* offset() const { return inputs_[1]; }
-
- void PrintDataTo(StringStream* stream) override;
-
- DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
-};
-
-
-class LThisFunction final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
- DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
-};
-
-
-class LContext final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Context, "context")
- DECLARE_HYDROGEN_ACCESSOR(Context)
-};
-
-
-class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LDeclareGlobals(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
- DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
-};
-
-
-class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
- public:
- LCallWithDescriptor(CallInterfaceDescriptor descriptor,
- const ZoneList<LOperand*>& operands, Zone* zone)
- : descriptor_(descriptor),
- inputs_(descriptor.GetRegisterParameterCount() +
- kImplicitRegisterParameterCount,
- zone) {
- DCHECK(descriptor.GetRegisterParameterCount() +
- kImplicitRegisterParameterCount ==
- operands.length());
- inputs_.AddAll(operands, zone);
- }
-
- LOperand* target() const { return inputs_[0]; }
-
- const CallInterfaceDescriptor descriptor() { return descriptor_; }
-
- DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
-
- // The target and context are passed as implicit parameters that are not
- // explicitly listed in the descriptor.
- static const int kImplicitRegisterParameterCount = 2;
-
- private:
- DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-
- CallInterfaceDescriptor descriptor_;
- ZoneList<LOperand*> inputs_;
-
- // Iterator support.
- int InputCount() final { return inputs_.length(); }
- LOperand* InputAt(int i) final { return inputs_[i]; }
-
- int TempCount() final { return 0; }
- LOperand* TempAt(int i) final { return NULL; }
-};
-
-
-class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
- public:
- LInvokeFunction(LOperand* context, LOperand* function) {
- inputs_[0] = context;
- inputs_[1] = function;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
- DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallNewArray(LOperand* context, LOperand* constructor) {
- inputs_[0] = context;
- inputs_[1] = constructor;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* constructor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
- DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallRuntime final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallRuntime(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
- DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
-
- bool ClobbersDoubleRegisters(Isolate* isolate) const override {
- return save_doubles() == kDontSaveFPRegs;
- }
-
- const Runtime::Function* function() const { return hydrogen()->function(); }
- int arity() const { return hydrogen()->argument_count(); }
- SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
-};
-
-
-class LInteger32ToDouble final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInteger32ToDouble(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
-};
-
-
-class LUint32ToDouble final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LUint32ToDouble(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
-};
-
-
-class LNumberTagU final : public LTemplateInstruction<1, 1, 2> {
- public:
- LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
-};
-
-
-class LNumberTagD final : public LTemplateInstruction<1, 1, 2> {
- public:
- LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LDoubleToSmi final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDoubleToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDoubleToI(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-// Truncating conversion from a tagged value to an int32.
-class LTaggedToI final : public LTemplateInstruction<1, 1, 2> {
- public:
- LTaggedToI(LOperand* value,
- LOperand* temp,
- LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-class LSmiTag final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LSmiTag(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LNumberUntagD final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberUntagD(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-
- bool truncating() { return hydrogen()->CanTruncateToNumber(); }
-};
-
-
-class LSmiUntag final : public LTemplateInstruction<1, 1, 0> {
- public:
- LSmiUntag(LOperand* value, bool needs_check)
- : needs_check_(needs_check) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
- bool needs_check() const { return needs_check_; }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
-
- private:
- bool needs_check_;
-};
-
-
-class LStoreNamedField final : public LTemplateInstruction<0, 2, 1> {
- public:
- LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
- inputs_[0] = object;
- inputs_[1] = value;
- temps_[0] = temp;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
-
- void PrintDataTo(StringStream* stream) override;
-
- Representation representation() const {
- return hydrogen()->field_representation();
- }
-};
-
-
-class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
- public:
- LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
- LOperand* backing_store_owner) {
- inputs_[0] = object;
- inputs_[1] = key;
- inputs_[2] = value;
- inputs_[3] = backing_store_owner;
- }
-
- bool is_fixed_typed_array() const {
- return hydrogen()->is_fixed_typed_array();
- }
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- LOperand* backing_store_owner() { return inputs_[3]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
-
- void PrintDataTo(StringStream* stream) override;
- bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
- uint32_t base_offset() const { return hydrogen()->base_offset(); }
-};
-
-
-class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 1> {
- public:
- LTransitionElementsKind(LOperand* object,
- LOperand* context,
- LOperand* new_map_temp) {
- inputs_[0] = object;
- inputs_[1] = context;
- temps_[0] = new_map_temp;
- }
-
- LOperand* context() { return inputs_[1]; }
- LOperand* object() { return inputs_[0]; }
- LOperand* new_map_temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
- "transition-elements-kind")
- DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
-
- void PrintDataTo(StringStream* stream) override;
-
- Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
- Handle<Map> transitioned_map() {
- return hydrogen()->transitioned_map().handle();
- }
- ElementsKind from_kind() { return hydrogen()->from_kind(); }
- ElementsKind to_kind() { return hydrogen()->to_kind(); }
-};
-
-
-class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 1> {
- public:
- LTrapAllocationMemento(LOperand* object,
- LOperand* temp) {
- inputs_[0] = object;
- temps_[0] = temp;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
- "trap-allocation-memento")
-};
-
-
-class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
- public:
- LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
- LOperand* key, LOperand* current_capacity) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = elements;
- inputs_[3] = key;
- inputs_[4] = current_capacity;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* elements() { return inputs_[2]; }
- LOperand* key() { return inputs_[3]; }
- LOperand* current_capacity() { return inputs_[4]; }
-
- bool ClobbersDoubleRegisters(Isolate* isolate) const override { return true; }
-
- DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
- DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
-};
-
-
-class LStringAdd final : public LTemplateInstruction<1, 3, 0> {
- public:
- LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
- DECLARE_HYDROGEN_ACCESSOR(StringAdd)
-};
-
-
-class LStringCharCodeAt final : public LTemplateInstruction<1, 3, 0> {
- public:
- LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
- inputs_[0] = context;
- inputs_[1] = string;
- inputs_[2] = index;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* string() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
- DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
-};
-
-
-class LStringCharFromCode final : public LTemplateInstruction<1, 2, 0> {
- public:
- explicit LStringCharFromCode(LOperand* context, LOperand* char_code) {
- inputs_[0] = context;
- inputs_[1] = char_code;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* char_code() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
- DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
-};
-
-
-class LCheckValue final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckValue(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
- DECLARE_HYDROGEN_ACCESSOR(CheckValue)
-};
-
-
-class LCheckArrayBufferNotNeutered final
- : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckArrayBufferNotNeutered(LOperand* view) { inputs_[0] = view; }
-
- LOperand* view() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckArrayBufferNotNeutered,
- "check-array-buffer-not-neutered")
- DECLARE_HYDROGEN_ACCESSOR(CheckArrayBufferNotNeutered)
-};
-
-
-class LCheckInstanceType final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckInstanceType(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
- DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
-};
-
-
-class LCheckMaps final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckMaps(LOperand* value = NULL) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
-};
-
-
-class LCheckSmi final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCheckSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
-};
-
-
-class LCheckNonSmi final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckNonSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
- DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
-};
-
-
-class LClampDToUint8 final : public LTemplateInstruction<1, 1, 1> {
- public:
- LClampDToUint8(LOperand* unclamped, LOperand* temp) {
- inputs_[0] = unclamped;
- temps_[0] = temp;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
-};
-
-
-class LClampIToUint8 final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LClampIToUint8(LOperand* unclamped) {
- inputs_[0] = unclamped;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
-};
-
-
-class LClampTToUint8 final : public LTemplateInstruction<1, 1, 1> {
- public:
- LClampTToUint8(LOperand* unclamped, LOperand* temp) {
- inputs_[0] = unclamped;
- temps_[0] = temp;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
-};
-
-
-class LAllocate final : public LTemplateInstruction<1, 2, 2> {
- public:
- LAllocate(LOperand* context,
- LOperand* size,
- LOperand* temp1,
- LOperand* temp2) {
- inputs_[0] = context;
- inputs_[1] = size;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* size() { return inputs_[1]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
- DECLARE_HYDROGEN_ACCESSOR(Allocate)
-};
-
-class LFastAllocate final : public LTemplateInstruction<1, 1, 2> {
- public:
- LFastAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = size;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* size() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
- DECLARE_HYDROGEN_ACCESSOR(Allocate)
-};
-
-class LTypeof final : public LTemplateInstruction<1, 2, 0> {
- public:
- LTypeof(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
-};
-
-
-class LTypeofIsAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LTypeofIsAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
-
- Handle<String> type_literal() { return hydrogen()->type_literal(); }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LOsrEntry final : public LTemplateInstruction<0, 0, 0> {
- public:
- LOsrEntry() {}
-
- bool HasInterestingComment(LCodeGen* gen) const override { return false; }
- DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
-};
-
-
-class LStackCheck final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LStackCheck(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
- DECLARE_HYDROGEN_ACCESSOR(StackCheck)
-
- Label* done_label() { return &done_label_; }
-
- private:
- Label done_label_;
-};
-
-
-class LForInPrepareMap final : public LTemplateInstruction<1, 2, 0> {
- public:
- LForInPrepareMap(LOperand* context, LOperand* object) {
- inputs_[0] = context;
- inputs_[1] = object;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
-};
-
-
-class LForInCacheArray final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LForInCacheArray(LOperand* map) {
- inputs_[0] = map;
- }
-
- LOperand* map() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
-
- int idx() {
- return HForInCacheArray::cast(this->hydrogen_value())->idx();
- }
-};
-
-
-class LCheckMapValue final : public LTemplateInstruction<0, 2, 0> {
- public:
- LCheckMapValue(LOperand* value, LOperand* map) {
- inputs_[0] = value;
- inputs_[1] = map;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* map() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
-};
-
-
-class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadFieldByIndex(LOperand* object, LOperand* index) {
- inputs_[0] = object;
- inputs_[1] = index;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
-};
-
-
-class LChunkBuilder;
-class LPlatformChunk final : public LChunk {
- public:
- LPlatformChunk(CompilationInfo* info, HGraph* graph)
- : LChunk(info, graph) { }
-
- int GetNextSpillIndex(RegisterKind kind);
- LOperand* GetNextSpillSlot(RegisterKind kind);
-};
-
-
-class LChunkBuilder final : public LChunkBuilderBase {
- public:
- LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : LChunkBuilderBase(info, graph),
- current_instruction_(NULL),
- current_block_(NULL),
- next_block_(NULL),
- allocator_(allocator) {}
-
- // Build the sequence for the graph.
- LPlatformChunk* Build();
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
- HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
-
- static bool HasMagicNumberForDivisor(int32_t divisor);
-
- LInstruction* DoMathFloor(HUnaryMathOperation* instr);
- LInstruction* DoMathRound(HUnaryMathOperation* instr);
- LInstruction* DoMathFround(HUnaryMathOperation* instr);
- LInstruction* DoMathAbs(HUnaryMathOperation* instr);
- LInstruction* DoMathLog(HUnaryMathOperation* instr);
- LInstruction* DoMathCos(HUnaryMathOperation* instr);
- LInstruction* DoMathSin(HUnaryMathOperation* instr);
- LInstruction* DoMathExp(HUnaryMathOperation* instr);
- LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
- LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
- LInstruction* DoMathClz32(HUnaryMathOperation* instr);
- LInstruction* DoDivByPowerOf2I(HDiv* instr);
- LInstruction* DoDivByConstI(HDiv* instr);
- LInstruction* DoDivI(HDiv* instr);
- LInstruction* DoModByPowerOf2I(HMod* instr);
- LInstruction* DoModByConstI(HMod* instr);
- LInstruction* DoModI(HMod* instr);
- LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
- LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
- LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
-
- private:
- // Methods for getting operands for Use / Define / Temp.
- LUnallocated* ToUnallocated(Register reg);
- LUnallocated* ToUnallocated(DoubleRegister reg);
-
- // Methods for setting up define-use relationships.
- MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
- MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
- MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
- DoubleRegister fixed_register);
-
- // A value that is guaranteed to be allocated to a register.
- // Operand created by UseRegister is guaranteed to be live until the end of
- // instruction. This means that register allocator will not reuse it's
- // register for any other operand inside instruction.
- // Operand created by UseRegisterAtStart is guaranteed to be live only at
- // instruction start. Register allocator is free to assign the same register
- // to some other operand used inside instruction (i.e. temporary or
- // output).
- MUST_USE_RESULT LOperand* UseRegister(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
-
- // An input operand in a register that may be trashed.
- MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
-
- // An input operand in a register or stack slot.
- MUST_USE_RESULT LOperand* Use(HValue* value);
- MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
-
- // An input operand in a register, stack slot or a constant operand.
- MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
-
- // An input operand in a register or a constant operand.
- MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
-
- // An input operand in a constant operand.
- MUST_USE_RESULT LOperand* UseConstant(HValue* value);
-
- // An input operand in register, stack slot or a constant operand.
- // Will not be moved to a register even if one is freely available.
- MUST_USE_RESULT LOperand* UseAny(HValue* value) override;
-
- // Temporary operand that must be in a register.
- MUST_USE_RESULT LUnallocated* TempRegister();
- MUST_USE_RESULT LUnallocated* TempDoubleRegister();
- MUST_USE_RESULT LOperand* FixedTemp(Register reg);
- MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
-
- // Methods for setting up define-use relationships.
- // Return the same instruction that they are passed.
- LInstruction* Define(LTemplateResultInstruction<1>* instr,
- LUnallocated* result);
- LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
- LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
- int index);
- LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
- LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
- Register reg);
- LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
- DoubleRegister reg);
- LInstruction* AssignEnvironment(LInstruction* instr);
- LInstruction* AssignPointerMap(LInstruction* instr);
-
- enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
-
- // By default we assume that instruction sequences generated for calls
- // cannot deoptimize eagerly and we do not attach environment to this
- // instruction.
- LInstruction* MarkAsCall(
- LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
-
- void VisitInstruction(HInstruction* current);
- void AddInstruction(LInstruction* instr, HInstruction* current);
-
- void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
- LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
- LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
- LInstruction* DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr);
- LInstruction* DoArithmeticT(Token::Value op,
- HBinaryOperation* instr);
-
- HInstruction* current_instruction_;
- HBasicBlock* current_block_;
- HBasicBlock* next_block_;
- LAllocator* allocator_;
-
- DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
-};
-
-#undef DECLARE_HYDROGEN_ACCESSOR
-#undef DECLARE_CONCRETE_INSTRUCTION
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_MIPS64_LITHIUM_MIPS_H_
diff --git a/deps/v8/src/crankshaft/ppc/OWNERS b/deps/v8/src/crankshaft/ppc/OWNERS
deleted file mode 100644
index 752e8e3d81..0000000000
--- a/deps/v8/src/crankshaft/ppc/OWNERS
+++ /dev/null
@@ -1,6 +0,0 @@
-jyan@ca.ibm.com
-dstence@us.ibm.com
-joransiu@ca.ibm.com
-mbrandy@us.ibm.com
-michael_dawson@ca.ibm.com
-bjaideep@ca.ibm.com
diff --git a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
deleted file mode 100644
index 877d62ceaa..0000000000
--- a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
+++ /dev/null
@@ -1,5688 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/ppc/lithium-codegen-ppc.h"
-
-#include "src/base/bits.h"
-#include "src/builtins/builtins-constructor.h"
-#include "src/code-factory.h"
-#include "src/code-stubs.h"
-#include "src/crankshaft/hydrogen-osr.h"
-#include "src/crankshaft/ppc/lithium-gap-resolver-ppc.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-class SafepointGenerator final : public CallWrapper {
- public:
- SafepointGenerator(LCodeGen* codegen, LPointerMap* pointers,
- Safepoint::DeoptMode mode)
- : codegen_(codegen), pointers_(pointers), deopt_mode_(mode) {}
- virtual ~SafepointGenerator() {}
-
- void BeforeCall(int call_size) const override {}
-
- void AfterCall() const override {
- codegen_->RecordSafepoint(pointers_, deopt_mode_);
- }
-
- private:
- LCodeGen* codegen_;
- LPointerMap* pointers_;
- Safepoint::DeoptMode deopt_mode_;
-};
-
-LCodeGen::PushSafepointRegistersScope::PushSafepointRegistersScope(
- LCodeGen* codegen)
- : codegen_(codegen) {
- DCHECK(codegen_->info()->is_calling());
- DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
- codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
- StoreRegistersStateStub stub(codegen_->isolate());
- codegen_->masm_->CallStub(&stub);
-}
-
-LCodeGen::PushSafepointRegistersScope::~PushSafepointRegistersScope() {
- DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
- RestoreRegistersStateStub stub(codegen_->isolate());
- codegen_->masm_->CallStub(&stub);
- codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
-}
-
-#define __ masm()->
-
-bool LCodeGen::GenerateCode() {
- LPhase phase("Z_Code generation", chunk());
- DCHECK(is_unused());
- status_ = GENERATING;
-
- // Open a frame scope to indicate that there is a frame on the stack. The
- // NONE indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done in GeneratePrologue).
- FrameScope frame_scope(masm_, StackFrame::NONE);
-
- bool rc = GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
- GenerateJumpTable() && GenerateSafepointTable();
- if (FLAG_enable_embedded_constant_pool && !rc) {
- masm()->AbortConstantPoolBuilding();
- }
- return rc;
-}
-
-
-void LCodeGen::FinishCode(Handle<Code> code) {
- DCHECK(is_done());
- code->set_stack_slots(GetTotalFrameSlotCount());
- code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- PopulateDeoptimizationData(code);
-}
-
-
-void LCodeGen::SaveCallerDoubles() {
- DCHECK(info()->saves_caller_doubles());
- DCHECK(NeedsEagerFrame());
- Comment(";;; Save clobbered callee double registers");
- int count = 0;
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- while (!save_iterator.Done()) {
- __ stfd(DoubleRegister::from_code(save_iterator.Current()),
- MemOperand(sp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
-}
-
-
-void LCodeGen::RestoreCallerDoubles() {
- DCHECK(info()->saves_caller_doubles());
- DCHECK(NeedsEagerFrame());
- Comment(";;; Restore clobbered callee double registers");
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- int count = 0;
- while (!save_iterator.Done()) {
- __ lfd(DoubleRegister::from_code(save_iterator.Current()),
- MemOperand(sp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
-}
-
-
-bool LCodeGen::GeneratePrologue() {
- DCHECK(is_generating());
-
- if (info()->IsOptimizing()) {
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
- // r4: Callee's JS function.
- // cp: Callee's context.
- // pp: Callee's constant pool pointer (if enabled)
- // fp: Caller's frame pointer.
- // lr: Caller's pc.
- // ip: Our own function entry (required by the prologue)
- }
-
- int prologue_offset = masm_->pc_offset();
-
- if (prologue_offset) {
- // Prologue logic requires it's starting address in ip and the
- // corresponding offset from the function entry.
- prologue_offset += Instruction::kInstrSize;
- __ addi(ip, ip, Operand(prologue_offset));
- }
- info()->set_prologue_offset(prologue_offset);
- if (NeedsEagerFrame()) {
- if (info()->IsStub()) {
- __ StubPrologue(StackFrame::STUB, ip, prologue_offset);
- } else {
- __ Prologue(info()->GeneratePreagedPrologue(), ip, prologue_offset);
- }
- frame_is_built_ = true;
- }
-
- // Reserve space for the stack slots needed by the code.
- int slots = GetStackSlotCount();
- if (slots > 0) {
- __ subi(sp, sp, Operand(slots * kPointerSize));
- if (FLAG_debug_code) {
- __ Push(r3, r4);
- __ li(r0, Operand(slots));
- __ mtctr(r0);
- __ addi(r3, sp, Operand((slots + 2) * kPointerSize));
- __ mov(r4, Operand(kSlotsZapValue));
- Label loop;
- __ bind(&loop);
- __ StorePU(r4, MemOperand(r3, -kPointerSize));
- __ bdnz(&loop);
- __ Pop(r3, r4);
- }
- }
-
- if (info()->saves_caller_doubles()) {
- SaveCallerDoubles();
- }
- return !is_aborted();
-}
-
-
-void LCodeGen::DoPrologue(LPrologue* instr) {
- Comment(";;; Prologue begin");
-
- // Possibly allocate a local context.
- if (info()->scope()->NeedsContext()) {
- Comment(";;; Allocate local context");
- bool need_write_barrier = true;
- // Argument to NewContext is the function, which is in r4.
- int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
- if (info()->scope()->is_script_scope()) {
- __ push(r4);
- __ Push(info()->scope()->scope_info());
- __ CallRuntime(Runtime::kNewScriptContext);
- deopt_mode = Safepoint::kLazyDeopt;
- } else {
- if (slots <= ConstructorBuiltins::MaximumFunctionContextSlots()) {
- Callable callable = CodeFactory::FastNewFunctionContext(
- isolate(), info()->scope()->scope_type());
- __ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
- Operand(slots));
- __ Call(callable.code(), RelocInfo::CODE_TARGET);
- // Result of the FastNewFunctionContext builtin is always in new space.
- need_write_barrier = false;
- } else {
- __ push(r4);
- __ Push(Smi::FromInt(info()->scope()->scope_type()));
- __ CallRuntime(Runtime::kNewFunctionContext);
- }
- }
- RecordSafepoint(deopt_mode);
-
- // Context is returned in both r3 and cp. It replaces the context
- // passed to us. It's saved in the stack and kept live in cp.
- __ mr(cp, r3);
- __ StoreP(r3, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Copy any necessary parameters into the context.
- int num_parameters = info()->scope()->num_parameters();
- int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0;
- for (int i = first_parameter; i < num_parameters; i++) {
- Variable* var = (i == -1) ? info()->scope()->receiver()
- : info()->scope()->parameter(i);
- if (var->IsContextSlot()) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ LoadP(r3, MemOperand(fp, parameter_offset));
- // Store it in the context.
- MemOperand target = ContextMemOperand(cp, var->index());
- __ StoreP(r3, target, r0);
- // Update the write barrier. This clobbers r6 and r3.
- if (need_write_barrier) {
- __ RecordWriteContextSlot(cp, target.offset(), r3, r6,
- GetLinkRegisterState(), kSaveFPRegs);
- } else if (FLAG_debug_code) {
- Label done;
- __ JumpIfInNewSpace(cp, r3, &done);
- __ Abort(kExpectedNewSpaceObject);
- __ bind(&done);
- }
- }
- }
- Comment(";;; End allocate local context");
- }
-
- Comment(";;; Prologue end");
-}
-
-
-void LCodeGen::GenerateOsrPrologue() {
- // Generate the OSR entry prologue at the first unknown OSR value, or if there
- // are none, at the OSR entrypoint instruction.
- if (osr_pc_offset_ >= 0) return;
-
- osr_pc_offset_ = masm()->pc_offset();
-
- // Adjust the frame size, subsuming the unoptimized frame into the
- // optimized frame.
- int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
- DCHECK(slots >= 0);
- __ subi(sp, sp, Operand(slots * kPointerSize));
-}
-
-
-void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
- if (instr->IsCall()) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- }
- if (!instr->IsLazyBailout() && !instr->IsGap()) {
- safepoints_.BumpLastLazySafepointIndex();
- }
-}
-
-
-bool LCodeGen::GenerateDeferredCode() {
- DCHECK(is_generating());
- if (deferred_.length() > 0) {
- for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
- LDeferredCode* code = deferred_[i];
-
- HValue* value =
- instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(value->position());
-
- Comment(
- ";;; <@%d,#%d> "
- "-------------------- Deferred %s --------------------",
- code->instruction_index(), code->instr()->hydrogen_value()->id(),
- code->instr()->Mnemonic());
- __ bind(code->entry());
- if (NeedsDeferredFrame()) {
- Comment(";;; Build frame");
- DCHECK(!frame_is_built_);
- DCHECK(info()->IsStub());
- frame_is_built_ = true;
- __ mov(scratch0(), Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
- __ PushCommonFrame(scratch0());
- Comment(";;; Deferred code");
- }
- code->Generate();
- if (NeedsDeferredFrame()) {
- Comment(";;; Destroy frame");
- DCHECK(frame_is_built_);
- __ PopCommonFrame(scratch0());
- frame_is_built_ = false;
- }
- __ b(code->exit());
- }
- }
-
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateJumpTable() {
- // Check that the jump table is accessible from everywhere in the function
- // code, i.e. that offsets to the table can be encoded in the 24bit signed
- // immediate of a branch instruction.
- // To simplify we consider the code size from the first instruction to the
- // end of the jump table. We also don't consider the pc load delta.
- // Each entry in the jump table generates one instruction and inlines one
- // 32bit data after it.
- if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
- jump_table_.length() * 7)) {
- Abort(kGeneratedCodeIsTooLarge);
- }
-
- if (jump_table_.length() > 0) {
- Label needs_frame, call_deopt_entry;
-
- Comment(";;; -------------------- Jump table --------------------");
- Address base = jump_table_[0].address;
-
- Register entry_offset = scratch0();
-
- int length = jump_table_.length();
- for (int i = 0; i < length; i++) {
- Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
- __ bind(&table_entry->label);
-
- DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
- Address entry = table_entry->address;
- DeoptComment(table_entry->deopt_info);
-
- // Second-level deopt table entries are contiguous and small, so instead
- // of loading the full, absolute address of each one, load an immediate
- // offset which will be added to the base address later.
- __ mov(entry_offset, Operand(entry - base));
-
- if (table_entry->needs_frame) {
- DCHECK(!info()->saves_caller_doubles());
- Comment(";;; call deopt with frame");
- __ PushCommonFrame();
- __ b(&needs_frame, SetLK);
- } else {
- __ b(&call_deopt_entry, SetLK);
- }
- }
-
- if (needs_frame.is_linked()) {
- __ bind(&needs_frame);
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- __ mov(ip, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
- __ push(ip);
- DCHECK(info()->IsStub());
- }
-
- Comment(";;; call deopt");
- __ bind(&call_deopt_entry);
-
- if (info()->saves_caller_doubles()) {
- DCHECK(info()->IsStub());
- RestoreCallerDoubles();
- }
-
- // Add the base address to the offset previously loaded in entry_offset.
- __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
- __ add(ip, entry_offset, ip);
- __ Jump(ip);
- }
-
- // The deoptimization jump table is the last part of the instruction
- // sequence. Mark the generated code as done unless we bailed out.
- if (!is_aborted()) status_ = DONE;
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateSafepointTable() {
- DCHECK(is_done());
- safepoints_.Emit(masm(), GetTotalFrameSlotCount());
- return !is_aborted();
-}
-
-
-Register LCodeGen::ToRegister(int code) const {
- return Register::from_code(code);
-}
-
-
-DoubleRegister LCodeGen::ToDoubleRegister(int code) const {
- return DoubleRegister::from_code(code);
-}
-
-
-Register LCodeGen::ToRegister(LOperand* op) const {
- DCHECK(op->IsRegister());
- return ToRegister(op->index());
-}
-
-
-Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
- if (op->IsRegister()) {
- return ToRegister(op->index());
- } else if (op->IsConstantOperand()) {
- LConstantOperand* const_op = LConstantOperand::cast(op);
- HConstant* constant = chunk_->LookupConstant(const_op);
- Handle<Object> literal = constant->handle(isolate());
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
- AllowDeferredHandleDereference get_number;
- DCHECK(literal->IsNumber());
- __ LoadIntLiteral(scratch, static_cast<int32_t>(literal->Number()));
- } else if (r.IsDouble()) {
- Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
- } else {
- DCHECK(r.IsSmiOrTagged());
- __ Move(scratch, literal);
- }
- return scratch;
- } else if (op->IsStackSlot()) {
- __ LoadP(scratch, ToMemOperand(op));
- return scratch;
- }
- UNREACHABLE();
- return scratch;
-}
-
-
-void LCodeGen::EmitLoadIntegerConstant(LConstantOperand* const_op,
- Register dst) {
- DCHECK(IsInteger32(const_op));
- HConstant* constant = chunk_->LookupConstant(const_op);
- int32_t value = constant->Integer32Value();
- if (IsSmi(const_op)) {
- __ LoadSmiLiteral(dst, Smi::FromInt(value));
- } else {
- __ LoadIntLiteral(dst, value);
- }
-}
-
-
-DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
- DCHECK(op->IsDoubleRegister());
- return ToDoubleRegister(op->index());
-}
-
-
-Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
- return constant->handle(isolate());
-}
-
-
-bool LCodeGen::IsInteger32(LConstantOperand* op) const {
- return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
-}
-
-
-bool LCodeGen::IsSmi(LConstantOperand* op) const {
- return chunk_->LookupLiteralRepresentation(op).IsSmi();
-}
-
-
-int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
- return ToRepresentation(op, Representation::Integer32());
-}
-
-
-intptr_t LCodeGen::ToRepresentation(LConstantOperand* op,
- const Representation& r) const {
- HConstant* constant = chunk_->LookupConstant(op);
- int32_t value = constant->Integer32Value();
- if (r.IsInteger32()) return value;
- DCHECK(r.IsSmiOrTagged());
- return reinterpret_cast<intptr_t>(Smi::FromInt(value));
-}
-
-
-Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- return Smi::FromInt(constant->Integer32Value());
-}
-
-
-double LCodeGen::ToDouble(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- DCHECK(constant->HasDoubleValue());
- return constant->DoubleValue();
-}
-
-
-Operand LCodeGen::ToOperand(LOperand* op) {
- if (op->IsConstantOperand()) {
- LConstantOperand* const_op = LConstantOperand::cast(op);
- HConstant* constant = chunk()->LookupConstant(const_op);
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsSmi()) {
- DCHECK(constant->HasSmiValue());
- return Operand(Smi::FromInt(constant->Integer32Value()));
- } else if (r.IsInteger32()) {
- DCHECK(constant->HasInteger32Value());
- return Operand(constant->Integer32Value());
- } else if (r.IsDouble()) {
- Abort(kToOperandUnsupportedDoubleImmediate);
- }
- DCHECK(r.IsTagged());
- return Operand(constant->handle(isolate()));
- } else if (op->IsRegister()) {
- return Operand(ToRegister(op));
- } else if (op->IsDoubleRegister()) {
- Abort(kToOperandIsDoubleRegisterUnimplemented);
- return Operand::Zero();
- }
- // Stack slots not implemented, use ToMemOperand instead.
- UNREACHABLE();
- return Operand::Zero();
-}
-
-
-static int ArgumentsOffsetWithoutFrame(int index) {
- DCHECK(index < 0);
- return -(index + 1) * kPointerSize;
-}
-
-
-MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
- DCHECK(!op->IsRegister());
- DCHECK(!op->IsDoubleRegister());
- DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- if (NeedsEagerFrame()) {
- return MemOperand(fp, FrameSlotToFPOffset(op->index()));
- } else {
- // Retrieve parameter without eager stack-frame relative to the
- // stack-pointer.
- return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
- }
-}
-
-
-MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
- DCHECK(op->IsDoubleStackSlot());
- if (NeedsEagerFrame()) {
- return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize);
- } else {
- // Retrieve parameter without eager stack-frame relative to the
- // stack-pointer.
- return MemOperand(sp,
- ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
- }
-}
-
-
-void LCodeGen::WriteTranslation(LEnvironment* environment,
- Translation* translation) {
- if (environment == NULL) return;
-
- // The translation includes one command per value in the environment.
- int translation_size = environment->translation_size();
-
- WriteTranslation(environment->outer(), translation);
- WriteTranslationFrame(environment, translation);
-
- int object_index = 0;
- int dematerialized_index = 0;
- for (int i = 0; i < translation_size; ++i) {
- LOperand* value = environment->values()->at(i);
- AddToTranslation(
- environment, translation, value, environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
- }
-}
-
-
-void LCodeGen::AddToTranslation(LEnvironment* environment,
- Translation* translation, LOperand* op,
- bool is_tagged, bool is_uint32,
- int* object_index_pointer,
- int* dematerialized_index_pointer) {
- if (op == LEnvironment::materialization_marker()) {
- int object_index = (*object_index_pointer)++;
- if (environment->ObjectIsDuplicateAt(object_index)) {
- int dupe_of = environment->ObjectDuplicateOfAt(object_index);
- translation->DuplicateObject(dupe_of);
- return;
- }
- int object_length = environment->ObjectLengthAt(object_index);
- if (environment->ObjectIsArgumentsAt(object_index)) {
- translation->BeginArgumentsObject(object_length);
- } else {
- translation->BeginCapturedObject(object_length);
- }
- int dematerialized_index = *dematerialized_index_pointer;
- int env_offset = environment->translation_size() + dematerialized_index;
- *dematerialized_index_pointer += object_length;
- for (int i = 0; i < object_length; ++i) {
- LOperand* value = environment->values()->at(env_offset + i);
- AddToTranslation(environment, translation, value,
- environment->HasTaggedValueAt(env_offset + i),
- environment->HasUint32ValueAt(env_offset + i),
- object_index_pointer, dematerialized_index_pointer);
- }
- return;
- }
-
- if (op->IsStackSlot()) {
- int index = op->index();
- if (is_tagged) {
- translation->StoreStackSlot(index);
- } else if (is_uint32) {
- translation->StoreUint32StackSlot(index);
- } else {
- translation->StoreInt32StackSlot(index);
- }
- } else if (op->IsDoubleStackSlot()) {
- int index = op->index();
- translation->StoreDoubleStackSlot(index);
- } else if (op->IsRegister()) {
- Register reg = ToRegister(op);
- if (is_tagged) {
- translation->StoreRegister(reg);
- } else if (is_uint32) {
- translation->StoreUint32Register(reg);
- } else {
- translation->StoreInt32Register(reg);
- }
- } else if (op->IsDoubleRegister()) {
- DoubleRegister reg = ToDoubleRegister(op);
- translation->StoreDoubleRegister(reg);
- } else if (op->IsConstantOperand()) {
- HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
- translation->StoreLiteral(src_index);
- } else {
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::CallCode(Handle<Code> code, RelocInfo::Mode mode,
- LInstruction* instr) {
- CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode) {
- DCHECK(instr != NULL);
- __ Call(code, mode);
- RecordSafepointWithLazyDeopt(instr, safepoint_mode);
-
- // Signal that we don't inline smi code before these stubs in the
- // optimizing code generator.
- if (code->kind() == Code::BINARY_OP_IC || code->kind() == Code::COMPARE_IC) {
- __ nop();
- }
-}
-
-
-void LCodeGen::CallRuntime(const Runtime::Function* function, int num_arguments,
- LInstruction* instr, SaveFPRegsMode save_doubles) {
- DCHECK(instr != NULL);
-
- __ CallRuntime(function, num_arguments, save_doubles);
-
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::LoadContextFromDeferred(LOperand* context) {
- if (context->IsRegister()) {
- __ Move(cp, ToRegister(context));
- } else if (context->IsStackSlot()) {
- __ LoadP(cp, ToMemOperand(context));
- } else if (context->IsConstantOperand()) {
- HConstant* constant =
- chunk_->LookupConstant(LConstantOperand::cast(context));
- __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
- } else {
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
- LInstruction* instr, LOperand* context) {
- LoadContextFromDeferred(context);
- __ CallRuntimeSaveDoubles(id);
- RecordSafepointWithRegisters(instr->pointer_map(), argc,
- Safepoint::kNoLazyDeopt);
-}
-
-
-void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
- Safepoint::DeoptMode mode) {
- environment->set_has_been_used();
- if (!environment->HasBeenRegistered()) {
- // Physical stack frame layout:
- // -x ............. -4 0 ..................................... y
- // [incoming arguments] [spill slots] [pushed outgoing arguments]
-
- // Layout of the environment:
- // 0 ..................................................... size-1
- // [parameters] [locals] [expression stack including arguments]
-
- // Layout of the translation:
- // 0 ........................................................ size - 1 + 4
- // [expression stack including arguments] [locals] [4 words] [parameters]
- // |>------------ translation_size ------------<|
-
- int frame_count = 0;
- int jsframe_count = 0;
- for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
- ++frame_count;
- if (e->frame_type() == JS_FUNCTION) {
- ++jsframe_count;
- }
- }
- Translation translation(&translations_, frame_count, jsframe_count, zone());
- WriteTranslation(environment, &translation);
- int deoptimization_index = deoptimizations_.length();
- int pc_offset = masm()->pc_offset();
- environment->Register(deoptimization_index, translation.index(),
- (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
- deoptimizations_.Add(environment, zone());
- }
-}
-
-void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
- DeoptimizeReason deopt_reason,
- Deoptimizer::BailoutType bailout_type,
- CRegister cr) {
- LEnvironment* environment = instr->environment();
- RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- DCHECK(environment->HasBeenRegistered());
- int id = environment->deoptimization_index();
- Address entry =
- Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
- if (entry == NULL) {
- Abort(kBailoutWasNotPrepared);
- return;
- }
-
- if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
- CRegister alt_cr = cr6;
- Register scratch = scratch0();
- ExternalReference count = ExternalReference::stress_deopt_count(isolate());
- Label no_deopt;
- DCHECK(!alt_cr.is(cr));
- __ Push(r4, scratch);
- __ mov(scratch, Operand(count));
- __ lwz(r4, MemOperand(scratch));
- __ subi(r4, r4, Operand(1));
- __ cmpi(r4, Operand::Zero(), alt_cr);
- __ bne(&no_deopt, alt_cr);
- __ li(r4, Operand(FLAG_deopt_every_n_times));
- __ stw(r4, MemOperand(scratch));
- __ Pop(r4, scratch);
-
- __ Call(entry, RelocInfo::RUNTIME_ENTRY);
- __ bind(&no_deopt);
- __ stw(r4, MemOperand(scratch));
- __ Pop(r4, scratch);
- }
-
- if (info()->ShouldTrapOnDeopt()) {
- __ stop("trap_on_deopt", cond, kDefaultStopCode, cr);
- }
-
- Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
-
- DCHECK(info()->IsStub() || frame_is_built_);
- // Go through jump table if we need to handle condition, build frame, or
- // restore caller doubles.
- if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) {
- DeoptComment(deopt_info);
- __ Call(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
- !frame_is_built_);
- // We often have several deopts to the same entry, reuse the last
- // jump entry if this is the case.
- if (FLAG_trace_deopt || isolate()->is_profiling() ||
- jump_table_.is_empty() ||
- !table_entry.IsEquivalentTo(jump_table_.last())) {
- jump_table_.Add(table_entry, zone());
- }
- __ b(cond, &jump_table_.last().label, cr);
- }
-}
-
-void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
- DeoptimizeReason deopt_reason, CRegister cr) {
- Deoptimizer::BailoutType bailout_type =
- info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
- DeoptimizeIf(condition, instr, deopt_reason, bailout_type, cr);
-}
-
-
-void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
- SafepointMode safepoint_mode) {
- if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
- RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
- } else {
- DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- RecordSafepointWithRegisters(instr->pointer_map(), 0,
- Safepoint::kLazyDeopt);
- }
-}
-
-
-void LCodeGen::RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
- int arguments, Safepoint::DeoptMode deopt_mode) {
- DCHECK(expected_safepoint_kind_ == kind);
-
- const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
- Safepoint safepoint =
- safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
- for (int i = 0; i < operands->length(); i++) {
- LOperand* pointer = operands->at(i);
- if (pointer->IsStackSlot()) {
- safepoint.DefinePointerSlot(pointer->index(), zone());
- } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
- safepoint.DefinePointerRegister(ToRegister(pointer), zone());
- }
- }
-}
-
-
-void LCodeGen::RecordSafepoint(LPointerMap* pointers,
- Safepoint::DeoptMode deopt_mode) {
- RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
-}
-
-
-void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
- LPointerMap empty_pointers(zone());
- RecordSafepoint(&empty_pointers, deopt_mode);
-}
-
-
-void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode deopt_mode) {
- RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
-}
-
-
-static const char* LabelType(LLabel* label) {
- if (label->is_loop_header()) return " (loop header)";
- if (label->is_osr_entry()) return " (OSR entry)";
- return "";
-}
-
-
-void LCodeGen::DoLabel(LLabel* label) {
- Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
- current_instruction_, label->hydrogen_value()->id(),
- label->block_id(), LabelType(label));
- __ bind(label->label());
- current_block_ = label->block_id();
- DoGap(label);
-}
-
-
-void LCodeGen::DoParallelMove(LParallelMove* move) { resolver_.Resolve(move); }
-
-
-void LCodeGen::DoGap(LGap* gap) {
- for (int i = LGap::FIRST_INNER_POSITION; i <= LGap::LAST_INNER_POSITION;
- i++) {
- LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
- LParallelMove* move = gap->GetParallelMove(inner_pos);
- if (move != NULL) DoParallelMove(move);
- }
-}
-
-
-void LCodeGen::DoInstructionGap(LInstructionGap* instr) { DoGap(instr); }
-
-
-void LCodeGen::DoParameter(LParameter* instr) {
- // Nothing to do.
-}
-
-
-void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- GenerateOsrPrologue();
-}
-
-
-void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- DCHECK(dividend.is(ToRegister(instr->result())));
-
- // Theoretically, a variation of the branch-free code for integer division by
- // a power of 2 (calculating the remainder via an additional multiplication
- // (which gets simplified to an 'and') and subtraction) should be faster, and
- // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
- // indicate that positive dividends are heavily favored, so the branching
- // version performs better.
- HMod* hmod = instr->hydrogen();
- int32_t shift = WhichPowerOf2Abs(divisor);
- Label dividend_is_not_negative, done;
- if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
- __ cmpwi(dividend, Operand::Zero());
- __ bge(&dividend_is_not_negative);
- if (shift) {
- // Note that this is correct even for kMinInt operands.
- __ neg(dividend, dividend);
- __ ExtractBitRange(dividend, dividend, shift - 1, 0);
- __ neg(dividend, dividend, LeaveOE, SetRC);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, cr0);
- }
- } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ li(dividend, Operand::Zero());
- } else {
- DeoptimizeIf(al, instr, DeoptimizeReason::kMinusZero);
- }
- __ b(&done);
- }
-
- __ bind(&dividend_is_not_negative);
- if (shift) {
- __ ExtractBitRange(dividend, dividend, shift - 1, 0);
- } else {
- __ li(dividend, Operand::Zero());
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoModByConstI(LModByConstI* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- Register result = ToRegister(instr->result());
- DCHECK(!dividend.is(result));
-
- if (divisor == 0) {
- DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
- return;
- }
-
- __ TruncatingDiv(result, dividend, Abs(divisor));
- __ mov(ip, Operand(Abs(divisor)));
- __ mullw(result, result, ip);
- __ sub(result, dividend, result, LeaveOE, SetRC);
-
- // Check for negative zero.
- HMod* hmod = instr->hydrogen();
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label remainder_not_zero;
- __ bne(&remainder_not_zero, cr0);
- __ cmpwi(dividend, Operand::Zero());
- DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
- __ bind(&remainder_not_zero);
- }
-}
-
-
-void LCodeGen::DoModI(LModI* instr) {
- HMod* hmod = instr->hydrogen();
- Register left_reg = ToRegister(instr->left());
- Register right_reg = ToRegister(instr->right());
- Register result_reg = ToRegister(instr->result());
- Register scratch = scratch0();
- bool can_overflow = hmod->CheckFlag(HValue::kCanOverflow);
- Label done;
-
- if (can_overflow) {
- __ li(r0, Operand::Zero()); // clear xer
- __ mtxer(r0);
- }
-
- __ divw(scratch, left_reg, right_reg, SetOE, SetRC);
-
- // Check for x % 0.
- if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
- __ cmpwi(right_reg, Operand::Zero());
- DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
- }
-
- // Check for kMinInt % -1, divw will return undefined, which is not what we
- // want. We have to deopt if we care about -0, because we can't return that.
- if (can_overflow) {
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kMinusZero, cr0);
- } else {
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ isel(overflow, result_reg, r0, result_reg, cr0);
- __ boverflow(&done, cr0);
- } else {
- Label no_overflow_possible;
- __ bnooverflow(&no_overflow_possible, cr0);
- __ li(result_reg, Operand::Zero());
- __ b(&done);
- __ bind(&no_overflow_possible);
- }
- }
- }
-
- __ mullw(scratch, right_reg, scratch);
- __ sub(result_reg, left_reg, scratch, LeaveOE, SetRC);
-
- // If we care about -0, test if the dividend is <0 and the result is 0.
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ bne(&done, cr0);
- __ cmpwi(left_reg, Operand::Zero());
- DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
- }
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- Register result = ToRegister(instr->result());
- DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
- DCHECK(!result.is(dividend));
-
- // Check for (0 / -x) that will produce negative zero.
- HDiv* hdiv = instr->hydrogen();
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- __ cmpwi(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
- }
- // Check for (kMinInt / -1).
- if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
- __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
- __ cmpw(dividend, r0);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
- }
-
- int32_t shift = WhichPowerOf2Abs(divisor);
-
- // Deoptimize if remainder will not be 0.
- if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) {
- __ TestBitRange(dividend, shift - 1, 0, r0);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, cr0);
- }
-
- if (divisor == -1) { // Nice shortcut, not needed for correctness.
- __ neg(result, dividend);
- return;
- }
- if (shift == 0) {
- __ mr(result, dividend);
- } else {
- if (shift == 1) {
- __ srwi(result, dividend, Operand(31));
- } else {
- __ srawi(result, dividend, 31);
- __ srwi(result, result, Operand(32 - shift));
- }
- __ add(result, dividend, result);
- __ srawi(result, result, shift);
- }
- if (divisor < 0) __ neg(result, result);
-}
-
-
-void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- Register result = ToRegister(instr->result());
- DCHECK(!dividend.is(result));
-
- if (divisor == 0) {
- DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
- return;
- }
-
- // Check for (0 / -x) that will produce negative zero.
- HDiv* hdiv = instr->hydrogen();
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- __ cmpwi(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
- }
-
- __ TruncatingDiv(result, dividend, Abs(divisor));
- if (divisor < 0) __ neg(result, result);
-
- if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
- Register scratch = scratch0();
- __ mov(ip, Operand(divisor));
- __ mullw(scratch, result, ip);
- __ cmpw(scratch, dividend);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
- }
-}
-
-
-// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
-void LCodeGen::DoDivI(LDivI* instr) {
- HBinaryOperation* hdiv = instr->hydrogen();
- const Register dividend = ToRegister(instr->dividend());
- const Register divisor = ToRegister(instr->divisor());
- Register result = ToRegister(instr->result());
- bool can_overflow = hdiv->CheckFlag(HValue::kCanOverflow);
-
- DCHECK(!dividend.is(result));
- DCHECK(!divisor.is(result));
-
- if (can_overflow) {
- __ li(r0, Operand::Zero()); // clear xer
- __ mtxer(r0);
- }
-
- __ divw(result, dividend, divisor, SetOE, SetRC);
-
- // Check for x / 0.
- if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- __ cmpwi(divisor, Operand::Zero());
- DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
- }
-
- // Check for (0 / -x) that will produce negative zero.
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label dividend_not_zero;
- __ cmpwi(dividend, Operand::Zero());
- __ bne(&dividend_not_zero);
- __ cmpwi(divisor, Operand::Zero());
- DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
- __ bind(&dividend_not_zero);
- }
-
- // Check for (kMinInt / -1).
- if (can_overflow) {
- if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
- } else {
- // When truncating, we want kMinInt / -1 = kMinInt.
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ isel(overflow, result, dividend, result, cr0);
- } else {
- Label no_overflow_possible;
- __ bnooverflow(&no_overflow_possible, cr0);
- __ mr(result, dividend);
- __ bind(&no_overflow_possible);
- }
- }
- }
-
-#if V8_TARGET_ARCH_PPC64
- __ extsw(result, result);
-#endif
-
- if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
- // Deoptimize if remainder is not 0.
- Register scratch = scratch0();
- __ mullw(scratch, divisor, result);
- __ cmpw(dividend, scratch);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
- }
-}
-
-
-void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
- HBinaryOperation* hdiv = instr->hydrogen();
- Register dividend = ToRegister(instr->dividend());
- Register result = ToRegister(instr->result());
- int32_t divisor = instr->divisor();
- bool can_overflow = hdiv->CheckFlag(HValue::kLeftCanBeMinInt);
-
- // If the divisor is positive, things are easy: There can be no deopts and we
- // can simply do an arithmetic right shift.
- int32_t shift = WhichPowerOf2Abs(divisor);
- if (divisor > 0) {
- if (shift || !result.is(dividend)) {
- __ srawi(result, dividend, shift);
- }
- return;
- }
-
- // If the divisor is negative, we have to negate and handle edge cases.
- OEBit oe = LeaveOE;
-#if V8_TARGET_ARCH_PPC64
- if (divisor == -1 && can_overflow) {
- __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
- __ cmpw(dividend, r0);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
- }
-#else
- if (can_overflow) {
- __ li(r0, Operand::Zero()); // clear xer
- __ mtxer(r0);
- oe = SetOE;
- }
-#endif
-
- __ neg(result, dividend, oe, SetRC);
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, cr0);
- }
-
-// If the negation could not overflow, simply shifting is OK.
-#if !V8_TARGET_ARCH_PPC64
- if (!can_overflow) {
-#endif
- if (shift) {
- __ ShiftRightArithImm(result, result, shift);
- }
- return;
-#if !V8_TARGET_ARCH_PPC64
- }
-
- // Dividing by -1 is basically negation, unless we overflow.
- if (divisor == -1) {
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
- return;
- }
-
- Label overflow, done;
- __ boverflow(&overflow, cr0);
- __ srawi(result, result, shift);
- __ b(&done);
- __ bind(&overflow);
- __ mov(result, Operand(kMinInt / divisor));
- __ bind(&done);
-#endif
-}
-
-
-void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- Register result = ToRegister(instr->result());
- DCHECK(!dividend.is(result));
-
- if (divisor == 0) {
- DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
- return;
- }
-
- // Check for (0 / -x) that will produce negative zero.
- HMathFloorOfDiv* hdiv = instr->hydrogen();
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- __ cmpwi(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
- }
-
- // Easy case: We need no dynamic check for the dividend and the flooring
- // division is the same as the truncating division.
- if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
- (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
- __ TruncatingDiv(result, dividend, Abs(divisor));
- if (divisor < 0) __ neg(result, result);
- return;
- }
-
- // In the general case we may need to adjust before and after the truncating
- // division to get a flooring division.
- Register temp = ToRegister(instr->temp());
- DCHECK(!temp.is(dividend) && !temp.is(result));
- Label needs_adjustment, done;
- __ cmpwi(dividend, Operand::Zero());
- __ b(divisor > 0 ? lt : gt, &needs_adjustment);
- __ TruncatingDiv(result, dividend, Abs(divisor));
- if (divisor < 0) __ neg(result, result);
- __ b(&done);
- __ bind(&needs_adjustment);
- __ addi(temp, dividend, Operand(divisor > 0 ? 1 : -1));
- __ TruncatingDiv(result, temp, Abs(divisor));
- if (divisor < 0) __ neg(result, result);
- __ subi(result, result, Operand(1));
- __ bind(&done);
-}
-
-
-// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
-void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
- HBinaryOperation* hdiv = instr->hydrogen();
- const Register dividend = ToRegister(instr->dividend());
- const Register divisor = ToRegister(instr->divisor());
- Register result = ToRegister(instr->result());
- bool can_overflow = hdiv->CheckFlag(HValue::kCanOverflow);
-
- DCHECK(!dividend.is(result));
- DCHECK(!divisor.is(result));
-
- if (can_overflow) {
- __ li(r0, Operand::Zero()); // clear xer
- __ mtxer(r0);
- }
-
- __ divw(result, dividend, divisor, SetOE, SetRC);
-
- // Check for x / 0.
- if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- __ cmpwi(divisor, Operand::Zero());
- DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
- }
-
- // Check for (0 / -x) that will produce negative zero.
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label dividend_not_zero;
- __ cmpwi(dividend, Operand::Zero());
- __ bne(&dividend_not_zero);
- __ cmpwi(divisor, Operand::Zero());
- DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
- __ bind(&dividend_not_zero);
- }
-
- // Check for (kMinInt / -1).
- if (can_overflow) {
- if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
- } else {
- // When truncating, we want kMinInt / -1 = kMinInt.
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ isel(overflow, result, dividend, result, cr0);
- } else {
- Label no_overflow_possible;
- __ bnooverflow(&no_overflow_possible, cr0);
- __ mr(result, dividend);
- __ bind(&no_overflow_possible);
- }
- }
- }
-
- Label done;
- Register scratch = scratch0();
-// If both operands have the same sign then we are done.
-#if V8_TARGET_ARCH_PPC64
- __ xor_(scratch, dividend, divisor);
- __ cmpwi(scratch, Operand::Zero());
- __ bge(&done);
-#else
- __ xor_(scratch, dividend, divisor, SetRC);
- __ bge(&done, cr0);
-#endif
-
- // If there is no remainder then we are done.
- __ mullw(scratch, divisor, result);
- __ cmpw(dividend, scratch);
- __ beq(&done);
-
- // We performed a truncating division. Correct the result.
- __ subi(result, result, Operand(1));
- __ bind(&done);
-#if V8_TARGET_ARCH_PPC64
- __ extsw(result, result);
-#endif
-}
-
-
-void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
- DoubleRegister addend = ToDoubleRegister(instr->addend());
- DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
- DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
- DoubleRegister result = ToDoubleRegister(instr->result());
-
- __ fmadd(result, multiplier, multiplicand, addend);
-}
-
-
-void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
- DoubleRegister minuend = ToDoubleRegister(instr->minuend());
- DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
- DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
- DoubleRegister result = ToDoubleRegister(instr->result());
-
- __ fmsub(result, multiplier, multiplicand, minuend);
-}
-
-
-void LCodeGen::DoMulI(LMulI* instr) {
- Register scratch = scratch0();
- Register result = ToRegister(instr->result());
- // Note that result may alias left.
- Register left = ToRegister(instr->left());
- LOperand* right_op = instr->right();
-
- bool bailout_on_minus_zero =
- instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
-
- if (right_op->IsConstantOperand()) {
- int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
-
- if (bailout_on_minus_zero && (constant < 0)) {
- // The case of a null constant will be handled separately.
- // If constant is negative and left is null, the result should be -0.
- __ cmpi(left, Operand::Zero());
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
- }
-
- switch (constant) {
- case -1:
- if (can_overflow) {
-#if V8_TARGET_ARCH_PPC64
- if (instr->hydrogen()->representation().IsSmi()) {
-#endif
- __ li(r0, Operand::Zero()); // clear xer
- __ mtxer(r0);
- __ neg(result, left, SetOE, SetRC);
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
-#if V8_TARGET_ARCH_PPC64
- } else {
- __ neg(result, left);
- __ TestIfInt32(result, r0);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
- }
-#endif
- } else {
- __ neg(result, left);
- }
- break;
- case 0:
- if (bailout_on_minus_zero) {
-// If left is strictly negative and the constant is null, the
-// result is -0. Deoptimize if required, otherwise return 0.
-#if V8_TARGET_ARCH_PPC64
- if (instr->hydrogen()->representation().IsSmi()) {
-#endif
- __ cmpi(left, Operand::Zero());
-#if V8_TARGET_ARCH_PPC64
- } else {
- __ cmpwi(left, Operand::Zero());
- }
-#endif
- DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
- }
- __ li(result, Operand::Zero());
- break;
- case 1:
- __ Move(result, left);
- break;
- default:
- // Multiplying by powers of two and powers of two plus or minus
- // one can be done faster with shifted operands.
- // For other constants we emit standard code.
- int32_t mask = constant >> 31;
- uint32_t constant_abs = (constant + mask) ^ mask;
-
- if (base::bits::IsPowerOfTwo32(constant_abs)) {
- int32_t shift = WhichPowerOf2(constant_abs);
- __ ShiftLeftImm(result, left, Operand(shift));
- // Correct the sign of the result if the constant is negative.
- if (constant < 0) __ neg(result, result);
- } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
- int32_t shift = WhichPowerOf2(constant_abs - 1);
- __ ShiftLeftImm(scratch, left, Operand(shift));
- __ add(result, scratch, left);
- // Correct the sign of the result if the constant is negative.
- if (constant < 0) __ neg(result, result);
- } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
- int32_t shift = WhichPowerOf2(constant_abs + 1);
- __ ShiftLeftImm(scratch, left, Operand(shift));
- __ sub(result, scratch, left);
- // Correct the sign of the result if the constant is negative.
- if (constant < 0) __ neg(result, result);
- } else {
- // Generate standard code.
- __ mov(ip, Operand(constant));
- __ Mul(result, left, ip);
- }
- }
-
- } else {
- DCHECK(right_op->IsRegister());
- Register right = ToRegister(right_op);
-
- if (can_overflow) {
-#if V8_TARGET_ARCH_PPC64
- // result = left * right.
- if (instr->hydrogen()->representation().IsSmi()) {
- __ SmiUntag(result, left);
- __ SmiUntag(scratch, right);
- __ Mul(result, result, scratch);
- } else {
- __ Mul(result, left, right);
- }
- __ TestIfInt32(result, r0);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
- if (instr->hydrogen()->representation().IsSmi()) {
- __ SmiTag(result);
- }
-#else
- // scratch:result = left * right.
- if (instr->hydrogen()->representation().IsSmi()) {
- __ SmiUntag(result, left);
- __ mulhw(scratch, result, right);
- __ mullw(result, result, right);
- } else {
- __ mulhw(scratch, left, right);
- __ mullw(result, left, right);
- }
- __ TestIfInt32(scratch, result, r0);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
-#endif
- } else {
- if (instr->hydrogen()->representation().IsSmi()) {
- __ SmiUntag(result, left);
- __ Mul(result, result, right);
- } else {
- __ Mul(result, left, right);
- }
- }
-
- if (bailout_on_minus_zero) {
- Label done;
-#if V8_TARGET_ARCH_PPC64
- if (instr->hydrogen()->representation().IsSmi()) {
-#endif
- __ xor_(r0, left, right, SetRC);
- __ bge(&done, cr0);
-#if V8_TARGET_ARCH_PPC64
- } else {
- __ xor_(r0, left, right);
- __ cmpwi(r0, Operand::Zero());
- __ bge(&done);
- }
-#endif
- // Bail out if the result is minus zero.
- __ cmpi(result, Operand::Zero());
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
- __ bind(&done);
- }
- }
-}
-
-
-void LCodeGen::DoBitI(LBitI* instr) {
- LOperand* left_op = instr->left();
- LOperand* right_op = instr->right();
- DCHECK(left_op->IsRegister());
- Register left = ToRegister(left_op);
- Register result = ToRegister(instr->result());
- Operand right(no_reg);
-
- if (right_op->IsStackSlot()) {
- right = Operand(EmitLoadRegister(right_op, ip));
- } else {
- DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
- right = ToOperand(right_op);
-
- if (right_op->IsConstantOperand() && is_uint16(right.immediate())) {
- switch (instr->op()) {
- case Token::BIT_AND:
- __ andi(result, left, right);
- break;
- case Token::BIT_OR:
- __ ori(result, left, right);
- break;
- case Token::BIT_XOR:
- __ xori(result, left, right);
- break;
- default:
- UNREACHABLE();
- break;
- }
- return;
- }
- }
-
- switch (instr->op()) {
- case Token::BIT_AND:
- __ And(result, left, right);
- break;
- case Token::BIT_OR:
- __ Or(result, left, right);
- break;
- case Token::BIT_XOR:
- if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
- __ notx(result, left);
- } else {
- __ Xor(result, left, right);
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void LCodeGen::DoShiftI(LShiftI* instr) {
- // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
- // result may alias either of them.
- LOperand* right_op = instr->right();
- Register left = ToRegister(instr->left());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
- if (right_op->IsRegister()) {
- // Mask the right_op operand.
- __ andi(scratch, ToRegister(right_op), Operand(0x1F));
- switch (instr->op()) {
- case Token::ROR:
- // rotate_right(a, b) == rotate_left(a, 32 - b)
- __ subfic(scratch, scratch, Operand(32));
- __ rotlw(result, left, scratch);
- break;
- case Token::SAR:
- __ sraw(result, left, scratch);
- break;
- case Token::SHR:
- if (instr->can_deopt()) {
- __ srw(result, left, scratch, SetRC);
-#if V8_TARGET_ARCH_PPC64
- __ extsw(result, result, SetRC);
-#endif
- DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue, cr0);
- } else {
- __ srw(result, left, scratch);
- }
- break;
- case Token::SHL:
- __ slw(result, left, scratch);
-#if V8_TARGET_ARCH_PPC64
- __ extsw(result, result);
-#endif
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- // Mask the right_op operand.
- int value = ToInteger32(LConstantOperand::cast(right_op));
- uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
- switch (instr->op()) {
- case Token::ROR:
- if (shift_count != 0) {
- __ rotrwi(result, left, shift_count);
- } else {
- __ Move(result, left);
- }
- break;
- case Token::SAR:
- if (shift_count != 0) {
- __ srawi(result, left, shift_count);
- } else {
- __ Move(result, left);
- }
- break;
- case Token::SHR:
- if (shift_count != 0) {
- __ srwi(result, left, Operand(shift_count));
- } else {
- if (instr->can_deopt()) {
- __ cmpwi(left, Operand::Zero());
- DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue);
- }
- __ Move(result, left);
- }
- break;
- case Token::SHL:
- if (shift_count != 0) {
-#if V8_TARGET_ARCH_PPC64
- if (instr->hydrogen_value()->representation().IsSmi()) {
- __ sldi(result, left, Operand(shift_count));
-#else
- if (instr->hydrogen_value()->representation().IsSmi() &&
- instr->can_deopt()) {
- if (shift_count != 1) {
- __ slwi(result, left, Operand(shift_count - 1));
- __ SmiTagCheckOverflow(result, result, scratch);
- } else {
- __ SmiTagCheckOverflow(result, left, scratch);
- }
- DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
-#endif
- } else {
- __ slwi(result, left, Operand(shift_count));
-#if V8_TARGET_ARCH_PPC64
- __ extsw(result, result);
-#endif
- }
- } else {
- __ Move(result, left);
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoSubI(LSubI* instr) {
- LOperand* right = instr->right();
- Register left = ToRegister(instr->left());
- Register result = ToRegister(instr->result());
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
-#if V8_TARGET_ARCH_PPC64
- const bool isInteger = !instr->hydrogen()->representation().IsSmi();
-#else
- const bool isInteger = false;
-#endif
- if (!can_overflow || isInteger) {
- if (right->IsConstantOperand()) {
- __ Add(result, left, -(ToOperand(right).immediate()), r0);
- } else {
- __ sub(result, left, EmitLoadRegister(right, ip));
- }
- if (can_overflow) {
-#if V8_TARGET_ARCH_PPC64
- __ TestIfInt32(result, r0);
-#else
- __ TestIfInt32(scratch0(), result, r0);
-#endif
- DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
- }
-
- } else {
- if (right->IsConstantOperand()) {
- __ AddAndCheckForOverflow(result, left, -(ToOperand(right).immediate()),
- scratch0(), r0);
- } else {
- __ SubAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
- scratch0(), r0);
- }
- DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
- }
-}
-
-
-void LCodeGen::DoRSubI(LRSubI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- LOperand* result = instr->result();
-
- DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow) &&
- right->IsConstantOperand());
-
- Operand right_operand = ToOperand(right);
- if (is_int16(right_operand.immediate())) {
- __ subfic(ToRegister(result), ToRegister(left), right_operand);
- } else {
- __ mov(r0, right_operand);
- __ sub(ToRegister(result), r0, ToRegister(left));
- }
-}
-
-
-void LCodeGen::DoConstantI(LConstantI* instr) {
- __ mov(ToRegister(instr->result()), Operand(instr->value()));
-}
-
-
-void LCodeGen::DoConstantS(LConstantS* instr) {
- __ LoadSmiLiteral(ToRegister(instr->result()), instr->value());
-}
-
-
-void LCodeGen::DoConstantD(LConstantD* instr) {
- DCHECK(instr->result()->IsDoubleRegister());
- DoubleRegister result = ToDoubleRegister(instr->result());
-#if V8_HOST_ARCH_IA32
- // Need some crappy work-around for x87 sNaN -> qNaN breakage in simulator
- // builds.
- uint64_t bits = instr->bits();
- if ((bits & V8_UINT64_C(0x7FF8000000000000)) ==
- V8_UINT64_C(0x7FF0000000000000)) {
- uint32_t lo = static_cast<uint32_t>(bits);
- uint32_t hi = static_cast<uint32_t>(bits >> 32);
- __ mov(ip, Operand(lo));
- __ mov(scratch0(), Operand(hi));
- __ MovInt64ToDouble(result, scratch0(), ip);
- return;
- }
-#endif
- double v = instr->value();
- __ LoadDoubleLiteral(result, v, scratch0());
-}
-
-
-void LCodeGen::DoConstantE(LConstantE* instr) {
- __ mov(ToRegister(instr->result()), Operand(instr->value()));
-}
-
-
-void LCodeGen::DoConstantT(LConstantT* instr) {
- Handle<Object> object = instr->value(isolate());
- AllowDeferredHandleDereference smi_check;
- __ Move(ToRegister(instr->result()), object);
-}
-
-
-MemOperand LCodeGen::BuildSeqStringOperand(Register string, LOperand* index,
- String::Encoding encoding) {
- if (index->IsConstantOperand()) {
- int offset = ToInteger32(LConstantOperand::cast(index));
- if (encoding == String::TWO_BYTE_ENCODING) {
- offset *= kUC16Size;
- }
- STATIC_ASSERT(kCharSize == 1);
- return FieldMemOperand(string, SeqString::kHeaderSize + offset);
- }
- Register scratch = scratch0();
- DCHECK(!scratch.is(string));
- DCHECK(!scratch.is(ToRegister(index)));
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ add(scratch, string, ToRegister(index));
- } else {
- STATIC_ASSERT(kUC16Size == 2);
- __ ShiftLeftImm(scratch, ToRegister(index), Operand(1));
- __ add(scratch, string, scratch);
- }
- return FieldMemOperand(scratch, SeqString::kHeaderSize);
-}
-
-
-void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
- String::Encoding encoding = instr->hydrogen()->encoding();
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
-
- if (FLAG_debug_code) {
- Register scratch = scratch0();
- __ LoadP(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
- __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
-
- __ andi(scratch, scratch,
- Operand(kStringRepresentationMask | kStringEncodingMask));
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ cmpi(scratch,
- Operand(encoding == String::ONE_BYTE_ENCODING ? one_byte_seq_type
- : two_byte_seq_type));
- __ Check(eq, kUnexpectedStringType);
- }
-
- MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ lbz(result, operand);
- } else {
- __ lhz(result, operand);
- }
-}
-
-
-void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
- String::Encoding encoding = instr->hydrogen()->encoding();
- Register string = ToRegister(instr->string());
- Register value = ToRegister(instr->value());
-
- if (FLAG_debug_code) {
- Register index = ToRegister(instr->index());
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- int encoding_mask =
- instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type
- : two_byte_seq_type;
- __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
- }
-
- MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ stb(value, operand);
- } else {
- __ sth(value, operand);
- }
-}
-
-
-void LCodeGen::DoAddI(LAddI* instr) {
- LOperand* right = instr->right();
- Register left = ToRegister(instr->left());
- Register result = ToRegister(instr->result());
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
-#if V8_TARGET_ARCH_PPC64
- const bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
- instr->hydrogen()->representation().IsExternal());
-#else
- const bool isInteger = false;
-#endif
-
- if (!can_overflow || isInteger) {
- if (right->IsConstantOperand()) {
- __ Add(result, left, ToOperand(right).immediate(), r0);
- } else {
- __ add(result, left, EmitLoadRegister(right, ip));
- }
-#if V8_TARGET_ARCH_PPC64
- if (can_overflow) {
- __ TestIfInt32(result, r0);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
- }
-#endif
- } else {
- if (right->IsConstantOperand()) {
- __ AddAndCheckForOverflow(result, left, ToOperand(right).immediate(),
- scratch0(), r0);
- } else {
- __ AddAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
- scratch0(), r0);
- }
- DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
- }
-}
-
-
-void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- HMathMinMax::Operation operation = instr->hydrogen()->operation();
- Condition cond = (operation == HMathMinMax::kMathMin) ? le : ge;
- if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
- Register left_reg = ToRegister(left);
- Register right_reg = EmitLoadRegister(right, ip);
- Register result_reg = ToRegister(instr->result());
- Label return_left, done;
-#if V8_TARGET_ARCH_PPC64
- if (instr->hydrogen_value()->representation().IsSmi()) {
-#endif
- __ cmp(left_reg, right_reg);
-#if V8_TARGET_ARCH_PPC64
- } else {
- __ cmpw(left_reg, right_reg);
- }
-#endif
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ isel(cond, result_reg, left_reg, right_reg);
- } else {
- __ b(cond, &return_left);
- __ Move(result_reg, right_reg);
- __ b(&done);
- __ bind(&return_left);
- __ Move(result_reg, left_reg);
- __ bind(&done);
- }
- } else {
- DCHECK(instr->hydrogen()->representation().IsDouble());
- DoubleRegister left_reg = ToDoubleRegister(left);
- DoubleRegister right_reg = ToDoubleRegister(right);
- DoubleRegister result_reg = ToDoubleRegister(instr->result());
- Label check_nan_left, check_zero, return_left, return_right, done;
- __ fcmpu(left_reg, right_reg);
- __ bunordered(&check_nan_left);
- __ beq(&check_zero);
- __ b(cond, &return_left);
- __ b(&return_right);
-
- __ bind(&check_zero);
- __ fcmpu(left_reg, kDoubleRegZero);
- __ bne(&return_left); // left == right != 0.
-
- // At this point, both left and right are either 0 or -0.
- if (operation == HMathMinMax::kMathMin) {
- // Min: The algorithm is: -((-L) + (-R)), which in case of L and R being
- // different registers is most efficiently expressed as -((-L) - R).
- __ fneg(left_reg, left_reg);
- if (left_reg.is(right_reg)) {
- __ fadd(result_reg, left_reg, right_reg);
- } else {
- __ fsub(result_reg, left_reg, right_reg);
- }
- __ fneg(result_reg, result_reg);
- } else {
- // Max: The following works because +0 + -0 == +0
- __ fadd(result_reg, left_reg, right_reg);
- }
- __ b(&done);
-
- __ bind(&check_nan_left);
- __ fcmpu(left_reg, left_reg);
- __ bunordered(&return_left); // left == NaN.
-
- __ bind(&return_right);
- if (!right_reg.is(result_reg)) {
- __ fmr(result_reg, right_reg);
- }
- __ b(&done);
-
- __ bind(&return_left);
- if (!left_reg.is(result_reg)) {
- __ fmr(result_reg, left_reg);
- }
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- DoubleRegister left = ToDoubleRegister(instr->left());
- DoubleRegister right = ToDoubleRegister(instr->right());
- DoubleRegister result = ToDoubleRegister(instr->result());
- switch (instr->op()) {
- case Token::ADD:
- if (CpuFeatures::IsSupported(VSX)) {
- __ xsadddp(result, left, right);
- } else {
- __ fadd(result, left, right);
- }
- break;
- case Token::SUB:
- if (CpuFeatures::IsSupported(VSX)) {
- __ xssubdp(result, left, right);
- } else {
- __ fsub(result, left, right);
- }
- break;
- case Token::MUL:
- if (CpuFeatures::IsSupported(VSX)) {
- __ xsmuldp(result, left, right);
- } else {
- __ fmul(result, left, right);
- }
- break;
- case Token::DIV:
- if (CpuFeatures::IsSupported(VSX)) {
- __ xsdivdp(result, left, right);
- } else {
- __ fdiv(result, left, right);
- }
- break;
- case Token::MOD: {
- __ PrepareCallCFunction(0, 2, scratch0());
- __ MovToFloatParameters(left, right);
- __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
- 0, 2);
- // Move the result in the double result register.
- __ MovFromFloatResult(result);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->left()).is(r4));
- DCHECK(ToRegister(instr->right()).is(r3));
- DCHECK(ToRegister(instr->result()).is(r3));
-
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
- CallCode(code, RelocInfo::CODE_TARGET, instr);
-}
-
-
-template <class InstrType>
-void LCodeGen::EmitBranch(InstrType instr, Condition cond, CRegister cr) {
- int left_block = instr->TrueDestination(chunk_);
- int right_block = instr->FalseDestination(chunk_);
-
- int next_block = GetNextEmittedBlock();
-
- if (right_block == left_block || cond == al) {
- EmitGoto(left_block);
- } else if (left_block == next_block) {
- __ b(NegateCondition(cond), chunk_->GetAssemblyLabel(right_block), cr);
- } else if (right_block == next_block) {
- __ b(cond, chunk_->GetAssemblyLabel(left_block), cr);
- } else {
- __ b(cond, chunk_->GetAssemblyLabel(left_block), cr);
- __ b(chunk_->GetAssemblyLabel(right_block));
- }
-}
-
-
-template <class InstrType>
-void LCodeGen::EmitTrueBranch(InstrType instr, Condition cond, CRegister cr) {
- int true_block = instr->TrueDestination(chunk_);
- __ b(cond, chunk_->GetAssemblyLabel(true_block), cr);
-}
-
-
-template <class InstrType>
-void LCodeGen::EmitFalseBranch(InstrType instr, Condition cond, CRegister cr) {
- int false_block = instr->FalseDestination(chunk_);
- __ b(cond, chunk_->GetAssemblyLabel(false_block), cr);
-}
-
-
-void LCodeGen::DoDebugBreak(LDebugBreak* instr) { __ stop("LBreak"); }
-
-
-void LCodeGen::DoBranch(LBranch* instr) {
- Representation r = instr->hydrogen()->value()->representation();
- DoubleRegister dbl_scratch = double_scratch0();
- const uint crZOrNaNBits = (1 << (31 - Assembler::encode_crbit(cr7, CR_EQ)) |
- 1 << (31 - Assembler::encode_crbit(cr7, CR_FU)));
-
- if (r.IsInteger32()) {
- DCHECK(!info()->IsStub());
- Register reg = ToRegister(instr->value());
- __ cmpwi(reg, Operand::Zero());
- EmitBranch(instr, ne);
- } else if (r.IsSmi()) {
- DCHECK(!info()->IsStub());
- Register reg = ToRegister(instr->value());
- __ cmpi(reg, Operand::Zero());
- EmitBranch(instr, ne);
- } else if (r.IsDouble()) {
- DCHECK(!info()->IsStub());
- DoubleRegister reg = ToDoubleRegister(instr->value());
- // Test the double value. Zero and NaN are false.
- __ fcmpu(reg, kDoubleRegZero, cr7);
- __ mfcr(r0);
- __ andi(r0, r0, Operand(crZOrNaNBits));
- EmitBranch(instr, eq, cr0);
- } else {
- DCHECK(r.IsTagged());
- Register reg = ToRegister(instr->value());
- HType type = instr->hydrogen()->value()->type();
- if (type.IsBoolean()) {
- DCHECK(!info()->IsStub());
- __ CompareRoot(reg, Heap::kTrueValueRootIndex);
- EmitBranch(instr, eq);
- } else if (type.IsSmi()) {
- DCHECK(!info()->IsStub());
- __ cmpi(reg, Operand::Zero());
- EmitBranch(instr, ne);
- } else if (type.IsJSArray()) {
- DCHECK(!info()->IsStub());
- EmitBranch(instr, al);
- } else if (type.IsHeapNumber()) {
- DCHECK(!info()->IsStub());
- __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
- // Test the double value. Zero and NaN are false.
- __ fcmpu(dbl_scratch, kDoubleRegZero, cr7);
- __ mfcr(r0);
- __ andi(r0, r0, Operand(crZOrNaNBits));
- EmitBranch(instr, eq, cr0);
- } else if (type.IsString()) {
- DCHECK(!info()->IsStub());
- __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
- __ cmpi(ip, Operand::Zero());
- EmitBranch(instr, ne);
- } else {
- ToBooleanHints expected = instr->hydrogen()->expected_input_types();
- // Avoid deopts in the case where we've never executed this path before.
- if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
-
- if (expected & ToBooleanHint::kUndefined) {
- // undefined -> false.
- __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
- __ beq(instr->FalseLabel(chunk_));
- }
- if (expected & ToBooleanHint::kBoolean) {
- // Boolean -> its value.
- __ CompareRoot(reg, Heap::kTrueValueRootIndex);
- __ beq(instr->TrueLabel(chunk_));
- __ CompareRoot(reg, Heap::kFalseValueRootIndex);
- __ beq(instr->FalseLabel(chunk_));
- }
- if (expected & ToBooleanHint::kNull) {
- // 'null' -> false.
- __ CompareRoot(reg, Heap::kNullValueRootIndex);
- __ beq(instr->FalseLabel(chunk_));
- }
-
- if (expected & ToBooleanHint::kSmallInteger) {
- // Smis: 0 -> false, all other -> true.
- __ cmpi(reg, Operand::Zero());
- __ beq(instr->FalseLabel(chunk_));
- __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
- } else if (expected & ToBooleanHint::kNeedsMap) {
- // If we need a map later and have a Smi -> deopt.
- __ TestIfSmi(reg, r0);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
- }
-
- const Register map = scratch0();
- if (expected & ToBooleanHint::kNeedsMap) {
- __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset));
-
- if (expected & ToBooleanHint::kCanBeUndetectable) {
- // Undetectable -> false.
- __ lbz(ip, FieldMemOperand(map, Map::kBitFieldOffset));
- __ TestBit(ip, Map::kIsUndetectable, r0);
- __ bne(instr->FalseLabel(chunk_), cr0);
- }
- }
-
- if (expected & ToBooleanHint::kReceiver) {
- // spec object -> true.
- __ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE);
- __ bge(instr->TrueLabel(chunk_));
- }
-
- if (expected & ToBooleanHint::kString) {
- // String value -> false iff empty.
- Label not_string;
- __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
- __ bge(&not_string);
- __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
- __ cmpi(ip, Operand::Zero());
- __ bne(instr->TrueLabel(chunk_));
- __ b(instr->FalseLabel(chunk_));
- __ bind(&not_string);
- }
-
- if (expected & ToBooleanHint::kSymbol) {
- // Symbol value -> true.
- __ CompareInstanceType(map, ip, SYMBOL_TYPE);
- __ beq(instr->TrueLabel(chunk_));
- }
-
- if (expected & ToBooleanHint::kHeapNumber) {
- // heap number -> false iff +0, -0, or NaN.
- Label not_heap_number;
- __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- __ bne(&not_heap_number);
- __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
- // Test the double value. Zero and NaN are false.
- __ fcmpu(dbl_scratch, kDoubleRegZero, cr7);
- __ mfcr(r0);
- __ andi(r0, r0, Operand(crZOrNaNBits));
- __ bne(instr->FalseLabel(chunk_), cr0);
- __ b(instr->TrueLabel(chunk_));
- __ bind(&not_heap_number);
- }
-
- if (expected != ToBooleanHint::kAny) {
- // We've seen something for the first time -> deopt.
- // This can only happen if we are not generic already.
- DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject);
- }
- }
- }
-}
-
-
-void LCodeGen::EmitGoto(int block) {
- if (!IsNextEmittedBlock(block)) {
- __ b(chunk_->GetAssemblyLabel(LookupDestination(block)));
- }
-}
-
-
-void LCodeGen::DoGoto(LGoto* instr) { EmitGoto(instr->block_id()); }
-
-
-Condition LCodeGen::TokenToCondition(Token::Value op) {
- Condition cond = kNoCondition;
- switch (op) {
- case Token::EQ:
- case Token::EQ_STRICT:
- cond = eq;
- break;
- case Token::NE:
- case Token::NE_STRICT:
- cond = ne;
- break;
- case Token::LT:
- cond = lt;
- break;
- case Token::GT:
- cond = gt;
- break;
- case Token::LTE:
- cond = le;
- break;
- case Token::GTE:
- cond = ge;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
- return cond;
-}
-
-
-void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- bool is_unsigned =
- instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
- instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
- Condition cond = TokenToCondition(instr->op());
-
- if (left->IsConstantOperand() && right->IsConstantOperand()) {
- // We can statically evaluate the comparison.
- double left_val = ToDouble(LConstantOperand::cast(left));
- double right_val = ToDouble(LConstantOperand::cast(right));
- int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
- ? instr->TrueDestination(chunk_)
- : instr->FalseDestination(chunk_);
- EmitGoto(next_block);
- } else {
- if (instr->is_double()) {
- // Compare left and right operands as doubles and load the
- // resulting flags into the normal status register.
- __ fcmpu(ToDoubleRegister(left), ToDoubleRegister(right));
- // If a NaN is involved, i.e. the result is unordered,
- // jump to false block label.
- __ bunordered(instr->FalseLabel(chunk_));
- } else {
- if (right->IsConstantOperand()) {
- int32_t value = ToInteger32(LConstantOperand::cast(right));
- if (instr->hydrogen_value()->representation().IsSmi()) {
- if (is_unsigned) {
- __ CmplSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
- } else {
- __ CmpSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
- }
- } else {
- if (is_unsigned) {
- __ Cmplwi(ToRegister(left), Operand(value), r0);
- } else {
- __ Cmpwi(ToRegister(left), Operand(value), r0);
- }
- }
- } else if (left->IsConstantOperand()) {
- int32_t value = ToInteger32(LConstantOperand::cast(left));
- if (instr->hydrogen_value()->representation().IsSmi()) {
- if (is_unsigned) {
- __ CmplSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
- } else {
- __ CmpSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
- }
- } else {
- if (is_unsigned) {
- __ Cmplwi(ToRegister(right), Operand(value), r0);
- } else {
- __ Cmpwi(ToRegister(right), Operand(value), r0);
- }
- }
- // We commuted the operands, so commute the condition.
- cond = CommuteCondition(cond);
- } else if (instr->hydrogen_value()->representation().IsSmi()) {
- if (is_unsigned) {
- __ cmpl(ToRegister(left), ToRegister(right));
- } else {
- __ cmp(ToRegister(left), ToRegister(right));
- }
- } else {
- if (is_unsigned) {
- __ cmplw(ToRegister(left), ToRegister(right));
- } else {
- __ cmpw(ToRegister(left), ToRegister(right));
- }
- }
- }
- EmitBranch(instr, cond);
- }
-}
-
-
-void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
- Register left = ToRegister(instr->left());
- Register right = ToRegister(instr->right());
-
- __ cmp(left, right);
- EmitBranch(instr, eq);
-}
-
-
-void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
- if (instr->hydrogen()->representation().IsTagged()) {
- Register input_reg = ToRegister(instr->object());
- __ mov(ip, Operand(factory()->the_hole_value()));
- __ cmp(input_reg, ip);
- EmitBranch(instr, eq);
- return;
- }
-
- DoubleRegister input_reg = ToDoubleRegister(instr->object());
- __ fcmpu(input_reg, input_reg);
- EmitFalseBranch(instr, ordered);
-
- Register scratch = scratch0();
- __ MovDoubleHighToInt(scratch, input_reg);
- __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
- EmitBranch(instr, eq);
-}
-
-
-Condition LCodeGen::EmitIsString(Register input, Register temp1,
- Label* is_not_string,
- SmiCheck check_needed = INLINE_SMI_CHECK) {
- if (check_needed == INLINE_SMI_CHECK) {
- __ JumpIfSmi(input, is_not_string);
- }
- __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
-
- return lt;
-}
-
-
-void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- Register temp1 = ToRegister(instr->temp());
-
- SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
- ? OMIT_SMI_CHECK
- : INLINE_SMI_CHECK;
- Condition true_cond =
- EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
-
- EmitBranch(instr, true_cond);
-}
-
-
-void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
- Register input_reg = EmitLoadRegister(instr->value(), ip);
- __ TestIfSmi(input_reg, r0);
- EmitBranch(instr, eq, cr0);
-}
-
-
-void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- __ JumpIfSmi(input, instr->FalseLabel(chunk_));
- }
- __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset));
- __ lbz(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
- __ TestBit(temp, Map::kIsUndetectable, r0);
- EmitBranch(instr, ne, cr0);
-}
-
-
-static Condition ComputeCompareCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return eq;
- case Token::LT:
- return lt;
- case Token::GT:
- return gt;
- case Token::LTE:
- return le;
- case Token::GTE:
- return ge;
- default:
- UNREACHABLE();
- return kNoCondition;
- }
-}
-
-
-void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->left()).is(r4));
- DCHECK(ToRegister(instr->right()).is(r3));
-
- Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
- CallCode(code, RelocInfo::CODE_TARGET, instr);
- __ CompareRoot(r3, Heap::kTrueValueRootIndex);
- EmitBranch(instr, eq);
-}
-
-
-static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == FIRST_TYPE) return to;
- DCHECK(from == to || to == LAST_TYPE);
- return from;
-}
-
-
-static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == to) return eq;
- if (to == LAST_TYPE) return ge;
- if (from == FIRST_TYPE) return le;
- UNREACHABLE();
- return eq;
-}
-
-
-void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
- Register scratch = scratch0();
- Register input = ToRegister(instr->value());
-
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- __ JumpIfSmi(input, instr->FalseLabel(chunk_));
- }
-
- __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
- EmitBranch(instr, BranchCondition(instr->hydrogen()));
-}
-
-// Branches to a label or falls through with the answer in flags. Trashes
-// the temp registers, but not the input.
-void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
- Handle<String> class_name, Register input,
- Register temp, Register temp2) {
- DCHECK(!input.is(temp));
- DCHECK(!input.is(temp2));
- DCHECK(!temp.is(temp2));
-
- __ JumpIfSmi(input, is_false);
-
- __ CompareObjectType(input, temp, temp2, FIRST_FUNCTION_TYPE);
- STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
- if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
- __ bge(is_true);
- } else {
- __ bge(is_false);
- }
-
- // Check if the constructor in the map is a function.
- Register instance_type = ip;
- __ GetMapConstructor(temp, temp, temp2, instance_type);
-
- // Objects with a non-function constructor have class 'Object'.
- __ cmpi(instance_type, Operand(JS_FUNCTION_TYPE));
- if (String::Equals(isolate()->factory()->Object_string(), class_name)) {
- __ bne(is_true);
- } else {
- __ bne(is_false);
- }
-
- // temp now contains the constructor function. Grab the
- // instance class name from there.
- __ LoadP(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(temp,
- FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset));
- // The class name we are testing against is internalized since it's a literal.
- // The name in the constructor is internalized because of the way the context
- // is booted. This routine isn't expected to work for random API-created
- // classes and it doesn't have to because you can't access it with natives
- // syntax. Since both sides are internalized it is sufficient to use an
- // identity comparison.
- __ Cmpi(temp, Operand(class_name), r0);
- // End with the answer in flags.
-}
-
-void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = scratch0();
- Register temp2 = ToRegister(instr->temp());
- Handle<String> class_name = instr->hydrogen()->class_name();
-
- EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
- class_name, input, temp, temp2);
-
- EmitBranch(instr, eq);
-}
-
-void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- __ LoadP(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ Cmpi(temp, Operand(instr->map()), r0);
- EmitBranch(instr, eq);
-}
-
-
-void LCodeGen::DoHasInPrototypeChainAndBranch(
- LHasInPrototypeChainAndBranch* instr) {
- Register const object = ToRegister(instr->object());
- Register const object_map = scratch0();
- Register const object_instance_type = ip;
- Register const object_prototype = object_map;
- Register const prototype = ToRegister(instr->prototype());
-
- // The {object} must be a spec object. It's sufficient to know that {object}
- // is not a smi, since all other non-spec objects have {null} prototypes and
- // will be ruled out below.
- if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
- __ TestIfSmi(object, r0);
- EmitFalseBranch(instr, eq, cr0);
- }
-
- // Loop through the {object}s prototype chain looking for the {prototype}.
- __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
- Label loop;
- __ bind(&loop);
-
- // Deoptimize if the object needs to be access checked.
- __ lbz(object_instance_type,
- FieldMemOperand(object_map, Map::kBitFieldOffset));
- __ TestBit(object_instance_type, Map::kIsAccessCheckNeeded, r0);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck, cr0);
- // Deoptimize for proxies.
- __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy);
- __ LoadP(object_prototype,
- FieldMemOperand(object_map, Map::kPrototypeOffset));
- __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
- EmitFalseBranch(instr, eq);
- __ cmp(object_prototype, prototype);
- EmitTrueBranch(instr, eq);
- __ LoadP(object_map,
- FieldMemOperand(object_prototype, HeapObject::kMapOffset));
- __ b(&loop);
-}
-
-
-void LCodeGen::DoCmpT(LCmpT* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- Token::Value op = instr->op();
-
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- // This instruction also signals no smi code inlined
- __ cmpi(r3, Operand::Zero());
-
- Condition condition = ComputeCompareCondition(op);
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ LoadRoot(r4, Heap::kTrueValueRootIndex);
- __ LoadRoot(r5, Heap::kFalseValueRootIndex);
- __ isel(condition, ToRegister(instr->result()), r4, r5);
- } else {
- Label true_value, done;
-
- __ b(condition, &true_value);
-
- __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
- __ b(&done);
-
- __ bind(&true_value);
- __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
-
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace && info()->IsOptimizing()) {
- // Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns its parameter in r3. We're leaving the code
- // managed by the register allocator and tearing down the frame, it's
- // safe to write to the context register.
- __ push(r3);
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kTraceExit);
- }
- if (info()->saves_caller_doubles()) {
- RestoreCallerDoubles();
- }
- if (instr->has_constant_parameter_count()) {
- int parameter_count = ToInteger32(instr->constant_parameter_count());
- int32_t sp_delta = (parameter_count + 1) * kPointerSize;
- if (NeedsEagerFrame()) {
- masm_->LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
- } else if (sp_delta != 0) {
- __ addi(sp, sp, Operand(sp_delta));
- }
- } else {
- DCHECK(info()->IsStub()); // Functions would need to drop one more value.
- Register reg = ToRegister(instr->parameter_count());
- // The argument count parameter is a smi
- if (NeedsEagerFrame()) {
- masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
- }
- __ SmiToPtrArrayOffset(r0, reg);
- __ add(sp, sp, r0);
- }
-
- __ blr();
-}
-
-
-void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ LoadP(result, ContextMemOperand(context, instr->slot_index()));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- if (instr->hydrogen()->DeoptimizesOnHole()) {
- __ cmp(result, ip);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
- } else {
- if (CpuFeatures::IsSupported(ISELECT)) {
- Register scratch = scratch0();
- __ mov(scratch, Operand(factory()->undefined_value()));
- __ cmp(result, ip);
- __ isel(eq, result, scratch, result);
- } else {
- Label skip;
- __ cmp(result, ip);
- __ bne(&skip);
- __ mov(result, Operand(factory()->undefined_value()));
- __ bind(&skip);
- }
- }
- }
-}
-
-
-void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register value = ToRegister(instr->value());
- Register scratch = scratch0();
- MemOperand target = ContextMemOperand(context, instr->slot_index());
-
- Label skip_assignment;
-
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ LoadP(scratch, target);
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch, ip);
- if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
- } else {
- __ bne(&skip_assignment);
- }
- }
-
- __ StoreP(value, target, r0);
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
- ? OMIT_SMI_CHECK
- : INLINE_SMI_CHECK;
- __ RecordWriteContextSlot(context, target.offset(), value, scratch,
- GetLinkRegisterState(), kSaveFPRegs,
- EMIT_REMEMBERED_SET, check_needed);
- }
-
- __ bind(&skip_assignment);
-}
-
-
-void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- HObjectAccess access = instr->hydrogen()->access();
- int offset = access.offset();
- Register object = ToRegister(instr->object());
-
- if (access.IsExternalMemory()) {
- Register result = ToRegister(instr->result());
- MemOperand operand = MemOperand(object, offset);
- __ LoadRepresentation(result, operand, access.representation(), r0);
- return;
- }
-
- if (instr->hydrogen()->representation().IsDouble()) {
- DCHECK(access.IsInobject());
- DoubleRegister result = ToDoubleRegister(instr->result());
- __ lfd(result, FieldMemOperand(object, offset));
- return;
- }
-
- Register result = ToRegister(instr->result());
- if (!access.IsInobject()) {
- __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- object = result;
- }
-
- Representation representation = access.representation();
-
-#if V8_TARGET_ARCH_PPC64
- // 64-bit Smi optimization
- if (representation.IsSmi() &&
- instr->hydrogen()->representation().IsInteger32()) {
- // Read int value directly from upper half of the smi.
- offset = SmiWordOffset(offset);
- representation = Representation::Integer32();
- }
-#endif
-
- __ LoadRepresentation(result, FieldMemOperand(object, offset), representation,
- r0);
-}
-
-
-void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
- Register scratch = scratch0();
- Register function = ToRegister(instr->function());
- Register result = ToRegister(instr->result());
-
- // Get the prototype or initial map from the function.
- __ LoadP(result,
- FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Check that the function has a prototype or an initial map.
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(result, ip);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
-
- // If the function does not have an initial map, we're done.
- if (CpuFeatures::IsSupported(ISELECT)) {
- // Get the prototype from the initial map (optimistic).
- __ LoadP(ip, FieldMemOperand(result, Map::kPrototypeOffset));
- __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
- __ isel(eq, result, ip, result);
- } else {
- Label done;
- __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
- __ bne(&done);
-
- // Get the prototype from the initial map.
- __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
-
- // All done.
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
- Register result = ToRegister(instr->result());
- __ LoadRoot(result, instr->index());
-}
-
-
-void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
- Register arguments = ToRegister(instr->arguments());
- Register result = ToRegister(instr->result());
- // There are two words between the frame pointer and the last argument.
- // Subtracting from length accounts for one of them add one more.
- if (instr->length()->IsConstantOperand()) {
- int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
- if (instr->index()->IsConstantOperand()) {
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- int index = (const_length - const_index) + 1;
- __ LoadP(result, MemOperand(arguments, index * kPointerSize), r0);
- } else {
- Register index = ToRegister(instr->index());
- __ subfic(result, index, Operand(const_length + 1));
- __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
- __ LoadPX(result, MemOperand(arguments, result));
- }
- } else if (instr->index()->IsConstantOperand()) {
- Register length = ToRegister(instr->length());
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- int loc = const_index - 1;
- if (loc != 0) {
- __ subi(result, length, Operand(loc));
- __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
- __ LoadPX(result, MemOperand(arguments, result));
- } else {
- __ ShiftLeftImm(result, length, Operand(kPointerSizeLog2));
- __ LoadPX(result, MemOperand(arguments, result));
- }
- } else {
- Register length = ToRegister(instr->length());
- Register index = ToRegister(instr->index());
- __ sub(result, length, index);
- __ addi(result, result, Operand(1));
- __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
- __ LoadPX(result, MemOperand(arguments, result));
- }
-}
-
-
-void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
- Register external_pointer = ToRegister(instr->elements());
- Register key = no_reg;
- ElementsKind elements_kind = instr->elements_kind();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort(kArrayIndexConstantValueTooBig);
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(elements_kind);
- bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
- int base_offset = instr->base_offset();
-
- if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
- DoubleRegister result = ToDoubleRegister(instr->result());
- if (key_is_constant) {
- __ Add(scratch0(), external_pointer, constant_key << element_size_shift,
- r0);
- } else {
- __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
- __ add(scratch0(), external_pointer, r0);
- }
- if (elements_kind == FLOAT32_ELEMENTS) {
- __ lfs(result, MemOperand(scratch0(), base_offset));
- } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ lfd(result, MemOperand(scratch0(), base_offset));
- }
- } else {
- Register result = ToRegister(instr->result());
- MemOperand mem_operand =
- PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
- constant_key, element_size_shift, base_offset);
- switch (elements_kind) {
- case INT8_ELEMENTS:
- if (key_is_constant) {
- __ LoadByte(result, mem_operand, r0);
- } else {
- __ lbzx(result, mem_operand);
- }
- __ extsb(result, result);
- break;
- case UINT8_ELEMENTS:
- case UINT8_CLAMPED_ELEMENTS:
- if (key_is_constant) {
- __ LoadByte(result, mem_operand, r0);
- } else {
- __ lbzx(result, mem_operand);
- }
- break;
- case INT16_ELEMENTS:
- if (key_is_constant) {
- __ LoadHalfWordArith(result, mem_operand, r0);
- } else {
- __ lhax(result, mem_operand);
- }
- break;
- case UINT16_ELEMENTS:
- if (key_is_constant) {
- __ LoadHalfWord(result, mem_operand, r0);
- } else {
- __ lhzx(result, mem_operand);
- }
- break;
- case INT32_ELEMENTS:
- if (key_is_constant) {
- __ LoadWordArith(result, mem_operand, r0);
- } else {
- __ lwax(result, mem_operand);
- }
- break;
- case UINT32_ELEMENTS:
- if (key_is_constant) {
- __ LoadWord(result, mem_operand, r0);
- } else {
- __ lwzx(result, mem_operand);
- }
- if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
- __ cmplw(result, r0);
- DeoptimizeIf(ge, instr, DeoptimizeReason::kNegativeValue);
- }
- break;
- case FLOAT32_ELEMENTS:
- case FLOAT64_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
- case FAST_STRING_WRAPPER_ELEMENTS:
- case SLOW_STRING_WRAPPER_ELEMENTS:
- case NO_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
- Register elements = ToRegister(instr->elements());
- bool key_is_constant = instr->key()->IsConstantOperand();
- Register key = no_reg;
- DoubleRegister result = ToDoubleRegister(instr->result());
- Register scratch = scratch0();
-
- int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort(kArrayIndexConstantValueTooBig);
- }
- } else {
- key = ToRegister(instr->key());
- }
-
- int base_offset = instr->base_offset() + constant_key * kDoubleSize;
- if (!key_is_constant) {
- __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
- __ add(scratch, elements, r0);
- elements = scratch;
- }
- if (!is_int16(base_offset)) {
- __ Add(scratch, elements, base_offset, r0);
- base_offset = 0;
- elements = scratch;
- }
- __ lfd(result, MemOperand(elements, base_offset));
-
- if (instr->hydrogen()->RequiresHoleCheck()) {
- if (is_int16(base_offset + Register::kExponentOffset)) {
- __ lwz(scratch,
- MemOperand(elements, base_offset + Register::kExponentOffset));
- } else {
- __ addi(scratch, elements, Operand(base_offset));
- __ lwz(scratch, MemOperand(scratch, Register::kExponentOffset));
- }
- __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
- }
-}
-
-
-void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
- HLoadKeyed* hinstr = instr->hydrogen();
- Register elements = ToRegister(instr->elements());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
- Register store_base = scratch;
- int offset = instr->base_offset();
-
- if (instr->key()->IsConstantOperand()) {
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset += ToInteger32(const_operand) * kPointerSize;
- store_base = elements;
- } else {
- Register key = ToRegister(instr->key());
- // Even though the HLoadKeyed instruction forces the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (hinstr->key()->representation().IsSmi()) {
- __ SmiToPtrArrayOffset(r0, key);
- } else {
- __ ShiftLeftImm(r0, key, Operand(kPointerSizeLog2));
- }
- __ add(scratch, elements, r0);
- }
-
- bool requires_hole_check = hinstr->RequiresHoleCheck();
- Representation representation = hinstr->representation();
-
-#if V8_TARGET_ARCH_PPC64
- // 64-bit Smi optimization
- if (representation.IsInteger32() &&
- hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
- DCHECK(!requires_hole_check);
- // Read int value directly from upper half of the smi.
- offset = SmiWordOffset(offset);
- }
-#endif
-
- __ LoadRepresentation(result, MemOperand(store_base, offset), representation,
- r0);
-
- // Check for the hole value.
- if (requires_hole_check) {
- if (IsFastSmiElementsKind(hinstr->elements_kind())) {
- __ TestIfSmi(result, r0);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0);
- } else {
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ cmp(result, scratch);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
- }
- } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
- DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
- Label done;
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ cmp(result, scratch);
- __ bne(&done);
- if (info()->IsStub()) {
- // A stub can safely convert the hole to undefined only if the array
- // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
- // it needs to bail out.
- __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
- __ LoadP(result, FieldMemOperand(result, PropertyCell::kValueOffset));
- __ CmpSmiLiteral(result, Smi::FromInt(Isolate::kProtectorValid), r0);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
- }
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_fixed_typed_array()) {
- DoLoadKeyedExternalArray(instr);
- } else if (instr->hydrogen()->representation().IsDouble()) {
- DoLoadKeyedFixedDoubleArray(instr);
- } else {
- DoLoadKeyedFixedArray(instr);
- }
-}
-
-
-MemOperand LCodeGen::PrepareKeyedOperand(Register key, Register base,
- bool key_is_constant, bool key_is_smi,
- int constant_key,
- int element_size_shift,
- int base_offset) {
- Register scratch = scratch0();
-
- if (key_is_constant) {
- return MemOperand(base, (constant_key << element_size_shift) + base_offset);
- }
-
- bool needs_shift =
- (element_size_shift != (key_is_smi ? kSmiTagSize + kSmiShiftSize : 0));
-
- if (!(base_offset || needs_shift)) {
- return MemOperand(base, key);
- }
-
- if (needs_shift) {
- __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
- key = scratch;
- }
-
- if (base_offset) {
- __ Add(scratch, key, base_offset, r0);
- }
-
- return MemOperand(base, scratch);
-}
-
-
-void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
- Register scratch = scratch0();
- Register result = ToRegister(instr->result());
-
- if (instr->hydrogen()->from_inlined()) {
- __ subi(result, sp, Operand(2 * kPointerSize));
- } else if (instr->hydrogen()->arguments_adaptor()) {
- // Check if the calling frame is an arguments adaptor frame.
- __ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(
- result,
- MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ cmpi(result,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Result is the frame pointer for the frame if not adapted and for the real
- // frame below the adaptor frame if adapted.
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ isel(eq, result, scratch, fp);
- } else {
- Label done, adapted;
- __ beq(&adapted);
- __ mr(result, fp);
- __ b(&done);
-
- __ bind(&adapted);
- __ mr(result, scratch);
- __ bind(&done);
- }
- } else {
- __ mr(result, fp);
- }
-}
-
-
-void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
- Register elem = ToRegister(instr->elements());
- Register result = ToRegister(instr->result());
-
- Label done;
-
- // If no arguments adaptor frame the number of arguments is fixed.
- __ cmp(fp, elem);
- __ mov(result, Operand(scope()->num_parameters()));
- __ beq(&done);
-
- // Arguments adaptor frame present. Get argument length from there.
- __ LoadP(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(result,
- MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(result);
-
- // Argument length is in result register.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- // If the receiver is null or undefined, we have to pass the global
- // object as a receiver to normal functions. Values have to be
- // passed unchanged to builtins and strict-mode functions.
- Label global_object, result_in_receiver;
-
- if (!instr->hydrogen()->known_function()) {
- // Do not transform the receiver to object for strict mode
- // functions or builtins.
- __ LoadP(scratch,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ lwz(scratch,
- FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
- __ andi(r0, scratch, Operand((1 << SharedFunctionInfo::kStrictModeBit) |
- (1 << SharedFunctionInfo::kNativeBit)));
- __ bne(&result_in_receiver, cr0);
- }
-
- // Normal function. Replace undefined or null with global receiver.
- __ LoadRoot(scratch, Heap::kNullValueRootIndex);
- __ cmp(receiver, scratch);
- __ beq(&global_object);
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- __ cmp(receiver, scratch);
- __ beq(&global_object);
-
- // Deoptimize if the receiver is not a JS object.
- __ TestIfSmi(receiver, r0);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
- __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE);
- DeoptimizeIf(lt, instr, DeoptimizeReason::kNotAJavaScriptObject);
-
- __ b(&result_in_receiver);
- __ bind(&global_object);
- __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset));
- __ LoadP(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
- __ LoadP(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
-
- if (result.is(receiver)) {
- __ bind(&result_in_receiver);
- } else {
- Label result_ok;
- __ b(&result_ok);
- __ bind(&result_in_receiver);
- __ mr(result, receiver);
- __ bind(&result_ok);
- }
-}
-
-
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
- Register length = ToRegister(instr->length());
- Register elements = ToRegister(instr->elements());
- Register scratch = scratch0();
- DCHECK(receiver.is(r3)); // Used for parameter count.
- DCHECK(function.is(r4)); // Required by InvokeFunction.
- DCHECK(ToRegister(instr->result()).is(r3));
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- const uint32_t kArgumentsLimit = 1 * KB;
- __ cmpli(length, Operand(kArgumentsLimit));
- DeoptimizeIf(gt, instr, DeoptimizeReason::kTooManyArguments);
-
- // Push the receiver and use the register to keep the original
- // number of arguments.
- __ push(receiver);
- __ mr(receiver, length);
- // The arguments are at a one pointer size offset from elements.
- __ addi(elements, elements, Operand(1 * kPointerSize));
-
- // Loop through the arguments pushing them onto the execution
- // stack.
- Label invoke, loop;
- // length is a small non-negative integer, due to the test above.
- __ cmpi(length, Operand::Zero());
- __ beq(&invoke);
- __ mtctr(length);
- __ bind(&loop);
- __ ShiftLeftImm(r0, length, Operand(kPointerSizeLog2));
- __ LoadPX(scratch, MemOperand(elements, r0));
- __ push(scratch);
- __ addi(length, length, Operand(-1));
- __ bdnz(&loop);
-
- __ bind(&invoke);
-
- InvokeFlag flag = CALL_FUNCTION;
- if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
- DCHECK(!info()->saves_caller_doubles());
- // TODO(ishell): drop current frame before pushing arguments to the stack.
- flag = JUMP_FUNCTION;
- ParameterCount actual(r3);
- // It is safe to use r6, r7 and r8 as scratch registers here given that
- // 1) we are not going to return to caller function anyway,
- // 2) r6 (new.target) will be initialized below.
- PrepareForTailCall(actual, r6, r7, r8);
- }
-
- DCHECK(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
- // The number of arguments is stored in receiver which is r3, as expected
- // by InvokeFunction.
- ParameterCount actual(receiver);
- __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
-}
-
-
-void LCodeGen::DoPushArgument(LPushArgument* instr) {
- LOperand* argument = instr->value();
- if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
- Abort(kDoPushArgumentNotImplementedForDoubleType);
- } else {
- Register argument_reg = EmitLoadRegister(argument, ip);
- __ push(argument_reg);
- }
-}
-
-
-void LCodeGen::DoDrop(LDrop* instr) { __ Drop(instr->count()); }
-
-
-void LCodeGen::DoThisFunction(LThisFunction* instr) {
- Register result = ToRegister(instr->result());
- __ LoadP(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-}
-
-
-void LCodeGen::DoContext(LContext* instr) {
- // If there is a non-return use, the context must be moved to a register.
- Register result = ToRegister(instr->result());
- if (info()->IsOptimizing()) {
- __ LoadP(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
- } else {
- // If there is no frame, the context must be in cp.
- DCHECK(result.is(cp));
- }
-}
-
-
-void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- __ Move(scratch0(), instr->hydrogen()->declarations());
- __ push(scratch0());
- __ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags()));
- __ push(scratch0());
- __ Move(scratch0(), instr->hydrogen()->feedback_vector());
- __ push(scratch0());
- CallRuntime(Runtime::kDeclareGlobals, instr);
-}
-
-void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
- int formal_parameter_count, int arity,
- bool is_tail_call, LInstruction* instr) {
- bool dont_adapt_arguments =
- formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
- bool can_invoke_directly =
- dont_adapt_arguments || formal_parameter_count == arity;
-
- Register function_reg = r4;
-
- LPointerMap* pointers = instr->pointer_map();
-
- if (can_invoke_directly) {
- // Change context.
- __ LoadP(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
-
- // Always initialize new target and number of actual arguments.
- __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
- __ mov(r3, Operand(arity));
-
- bool is_self_call = function.is_identical_to(info()->closure());
-
- // Invoke function.
- if (is_self_call) {
- Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
- if (is_tail_call) {
- __ Jump(self, RelocInfo::CODE_TARGET);
- } else {
- __ Call(self, RelocInfo::CODE_TARGET);
- }
- } else {
- __ LoadP(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
- if (is_tail_call) {
- __ JumpToJSEntry(ip);
- } else {
- __ CallJSEntry(ip);
- }
- }
-
- if (!is_tail_call) {
- // Set up deoptimization.
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
- }
- } else {
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount actual(arity);
- ParameterCount expected(formal_parameter_count);
- InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
- __ InvokeFunction(function_reg, expected, actual, flag, generator);
- }
-}
-
-
-void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
- DCHECK(instr->context() != NULL);
- DCHECK(ToRegister(instr->context()).is(cp));
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- // Deoptimize if not a heap number.
- __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch, ip);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
-
- Label done;
- Register exponent = scratch0();
- scratch = no_reg;
- __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
- // Check the sign of the argument. If the argument is positive, just
- // return it.
- __ cmpwi(exponent, Operand::Zero());
- // Move the input to the result if necessary.
- __ Move(result, input);
- __ bge(&done);
-
- // Input is negative. Reverse its sign.
- // Preserve the value of all registers.
- {
- PushSafepointRegistersScope scope(this);
-
- // Registers were saved at the safepoint, so we can use
- // many scratch registers.
- Register tmp1 = input.is(r4) ? r3 : r4;
- Register tmp2 = input.is(r5) ? r3 : r5;
- Register tmp3 = input.is(r6) ? r3 : r6;
- Register tmp4 = input.is(r7) ? r3 : r7;
-
- // exponent: floating point exponent value.
-
- Label allocated, slow;
- __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
- __ b(&allocated);
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
-
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
- instr->context());
- // Set the pointer to the new heap number in tmp.
- if (!tmp1.is(r3)) __ mr(tmp1, r3);
- // Restore input_reg after call to runtime.
- __ LoadFromSafepointRegisterSlot(input, input);
- __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
-
- __ bind(&allocated);
- // exponent: floating point exponent value.
- // tmp1: allocated heap number.
- STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
- __ clrlwi(exponent, exponent, Operand(1)); // clear sign bit
- __ stw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
- __ lwz(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
- __ stw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
-
- __ StoreToSafepointRegisterSlot(tmp1, result);
- }
-
- __ bind(&done);
-}
-
-
-void LCodeGen::EmitMathAbs(LMathAbs* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- Label done;
- __ cmpi(input, Operand::Zero());
- __ Move(result, input);
- __ bge(&done);
- __ li(r0, Operand::Zero()); // clear xer
- __ mtxer(r0);
- __ neg(result, result, SetOE, SetRC);
- // Deoptimize on overflow.
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
- __ bind(&done);
-}
-
-
-#if V8_TARGET_ARCH_PPC64
-void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- Label done;
- __ cmpwi(input, Operand::Zero());
- __ Move(result, input);
- __ bge(&done);
-
- // Deoptimize on overflow.
- __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
- __ cmpw(input, r0);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
-
- __ neg(result, result);
- __ bind(&done);
-}
-#endif
-
-
-void LCodeGen::DoMathAbs(LMathAbs* instr) {
- // Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
- public:
- DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
- : LDeferredCode(codegen), instr_(instr) {}
- void Generate() override {
- codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LMathAbs* instr_;
- };
-
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsDouble()) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- __ fabs(result, input);
-#if V8_TARGET_ARCH_PPC64
- } else if (r.IsInteger32()) {
- EmitInteger32MathAbs(instr);
- } else if (r.IsSmi()) {
-#else
- } else if (r.IsSmiOrInteger32()) {
-#endif
- EmitMathAbs(instr);
- } else {
- // Representation is tagged.
- DeferredMathAbsTaggedHeapNumber* deferred =
- new (zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
- Register input = ToRegister(instr->value());
- // Smi check.
- __ JumpIfNotSmi(input, deferred->entry());
- // If smi, handle it directly.
- EmitMathAbs(instr);
- __ bind(deferred->exit());
- }
-}
-
-void LCodeGen::DoMathFloorD(LMathFloorD* instr) {
- DoubleRegister input_reg = ToDoubleRegister(instr->value());
- DoubleRegister output_reg = ToDoubleRegister(instr->result());
- __ frim(output_reg, input_reg);
-}
-
-void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- Register result = ToRegister(instr->result());
- Register input_high = scratch0();
- Register scratch = ip;
- Label done, exact;
-
- __ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done,
- &exact);
- DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN);
-
- __ bind(&exact);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Test for -0.
- __ cmpi(result, Operand::Zero());
- __ bne(&done);
- __ cmpwi(input_high, Operand::Zero());
- DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
- }
- __ bind(&done);
-}
-
-void LCodeGen::DoMathRoundD(LMathRoundD* instr) {
- DoubleRegister input_reg = ToDoubleRegister(instr->value());
- DoubleRegister output_reg = ToDoubleRegister(instr->result());
- DoubleRegister dot_five = double_scratch0();
- Label done;
-
- __ frin(output_reg, input_reg);
- __ fcmpu(input_reg, kDoubleRegZero);
- __ bge(&done);
- __ fcmpu(output_reg, input_reg);
- __ beq(&done);
-
- // Negative, non-integer case
- __ LoadDoubleLiteral(dot_five, 0.5, r0);
- __ fadd(output_reg, input_reg, dot_five);
- __ frim(output_reg, output_reg);
- // The range [-0.5, -0.0[ yielded +0.0. Force the sign to negative.
- __ fabs(output_reg, output_reg);
- __ fneg(output_reg, output_reg);
-
- __ bind(&done);
-}
-
-void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- Register result = ToRegister(instr->result());
- DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
- DoubleRegister input_plus_dot_five = double_scratch1;
- Register scratch1 = scratch0();
- Register scratch2 = ip;
- DoubleRegister dot_five = double_scratch0();
- Label convert, done;
-
- __ LoadDoubleLiteral(dot_five, 0.5, r0);
- __ fabs(double_scratch1, input);
- __ fcmpu(double_scratch1, dot_five);
- DeoptimizeIf(unordered, instr, DeoptimizeReason::kLostPrecisionOrNaN);
- // If input is in [-0.5, -0], the result is -0.
- // If input is in [+0, +0.5[, the result is +0.
- // If the input is +0.5, the result is 1.
- __ bgt(&convert); // Out of [-0.5, +0.5].
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // [-0.5, -0] (negative) yields minus zero.
- __ TestDoubleSign(input, scratch1);
- DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
- }
- __ fcmpu(input, dot_five);
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ li(result, Operand(1));
- __ isel(lt, result, r0, result);
- __ b(&done);
- } else {
- Label return_zero;
- __ bne(&return_zero);
- __ li(result, Operand(1)); // +0.5.
- __ b(&done);
- // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
- // flag kBailoutOnMinusZero.
- __ bind(&return_zero);
- __ li(result, Operand::Zero());
- __ b(&done);
- }
-
- __ bind(&convert);
- __ fadd(input_plus_dot_five, input, dot_five);
- // Reuse dot_five (double_scratch0) as we no longer need this value.
- __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2,
- double_scratch0(), &done, &done);
- DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoMathFround(LMathFround* instr) {
- DoubleRegister input_reg = ToDoubleRegister(instr->value());
- DoubleRegister output_reg = ToDoubleRegister(instr->result());
- __ frsp(output_reg, input_reg);
-}
-
-
-void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- __ fsqrt(result, input);
-}
-
-
-void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- DoubleRegister temp = double_scratch0();
-
- // Note that according to ECMA-262 15.8.2.13:
- // Math.pow(-Infinity, 0.5) == Infinity
- // Math.sqrt(-Infinity) == NaN
- Label skip, done;
-
- __ LoadDoubleLiteral(temp, -V8_INFINITY, scratch0());
- __ fcmpu(input, temp);
- __ bne(&skip);
- __ fneg(result, temp);
- __ b(&done);
-
- // Add +0 to convert -0 to +0.
- __ bind(&skip);
- __ fadd(result, input, kDoubleRegZero);
- __ fsqrt(result, result);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoPower(LPower* instr) {
- Representation exponent_type = instr->hydrogen()->right()->representation();
-// Having marked this as a call, we can use any registers.
-// Just make sure that the input/output registers are the expected ones.
- Register tagged_exponent = MathPowTaggedDescriptor::exponent();
- DCHECK(!instr->right()->IsDoubleRegister() ||
- ToDoubleRegister(instr->right()).is(d2));
- DCHECK(!instr->right()->IsRegister() ||
- ToRegister(instr->right()).is(tagged_exponent));
- DCHECK(ToDoubleRegister(instr->left()).is(d1));
- DCHECK(ToDoubleRegister(instr->result()).is(d3));
-
- if (exponent_type.IsSmi()) {
- MathPowStub stub(isolate(), MathPowStub::TAGGED);
- __ CallStub(&stub);
- } else if (exponent_type.IsTagged()) {
- Label no_deopt;
- __ JumpIfSmi(tagged_exponent, &no_deopt);
- DCHECK(!r10.is(tagged_exponent));
- __ LoadP(r10, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(r10, ip);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
- __ bind(&no_deopt);
- MathPowStub stub(isolate(), MathPowStub::TAGGED);
- __ CallStub(&stub);
- } else if (exponent_type.IsInteger32()) {
- MathPowStub stub(isolate(), MathPowStub::INTEGER);
- __ CallStub(&stub);
- } else {
- DCHECK(exponent_type.IsDouble());
- MathPowStub stub(isolate(), MathPowStub::DOUBLE);
- __ CallStub(&stub);
- }
-}
-
-void LCodeGen::DoMathCos(LMathCos* instr) {
- __ PrepareCallCFunction(0, 1, scratch0());
- __ MovToFloatParameter(ToDoubleRegister(instr->value()));
- __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1);
- __ MovFromFloatResult(ToDoubleRegister(instr->result()));
-}
-
-void LCodeGen::DoMathSin(LMathSin* instr) {
- __ PrepareCallCFunction(0, 1, scratch0());
- __ MovToFloatParameter(ToDoubleRegister(instr->value()));
- __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1);
- __ MovFromFloatResult(ToDoubleRegister(instr->result()));
-}
-
-void LCodeGen::DoMathExp(LMathExp* instr) {
- __ PrepareCallCFunction(0, 1, scratch0());
- __ MovToFloatParameter(ToDoubleRegister(instr->value()));
- __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1);
- __ MovFromFloatResult(ToDoubleRegister(instr->result()));
-}
-
-void LCodeGen::DoMathLog(LMathLog* instr) {
- __ PrepareCallCFunction(0, 1, scratch0());
- __ MovToFloatParameter(ToDoubleRegister(instr->value()));
- __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1);
- __ MovFromFloatResult(ToDoubleRegister(instr->result()));
-}
-
-void LCodeGen::DoMathClz32(LMathClz32* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- __ cntlzw(result, input);
-}
-
-void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
- Register scratch1, Register scratch2,
- Register scratch3) {
-#if DEBUG
- if (actual.is_reg()) {
- DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
- } else {
- DCHECK(!AreAliased(scratch1, scratch2, scratch3));
- }
-#endif
- if (FLAG_code_comments) {
- if (actual.is_reg()) {
- Comment(";;; PrepareForTailCall, actual: %s {",
- RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
- actual.reg().code()));
- } else {
- Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
- }
- }
-
- // Check if next frame is an arguments adaptor frame.
- Register caller_args_count_reg = scratch1;
- Label no_arguments_adaptor, formal_parameter_count_loaded;
- __ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(scratch3,
- MemOperand(scratch2, StandardFrameConstants::kContextOffset));
- __ cmpi(scratch3,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ bne(&no_arguments_adaptor);
-
- // Drop current frame and load arguments count from arguments adaptor frame.
- __ mr(fp, scratch2);
- __ LoadP(caller_args_count_reg,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
- __ b(&formal_parameter_count_loaded);
-
- __ bind(&no_arguments_adaptor);
- // Load caller's formal parameter count
- __ mov(caller_args_count_reg, Operand(info()->literal()->parameter_count()));
-
- __ bind(&formal_parameter_count_loaded);
- __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
-
- Comment(";;; }");
-}
-
-void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
- HInvokeFunction* hinstr = instr->hydrogen();
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->function()).is(r4));
- DCHECK(instr->HasPointerMap());
-
- bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
-
- if (is_tail_call) {
- DCHECK(!info()->saves_caller_doubles());
- ParameterCount actual(instr->arity());
- // It is safe to use r6, r7 and r8 as scratch registers here given that
- // 1) we are not going to return to caller function anyway,
- // 2) r6 (new.target) will be initialized below.
- PrepareForTailCall(actual, r6, r7, r8);
- }
-
- Handle<JSFunction> known_function = hinstr->known_function();
- if (known_function.is_null()) {
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount actual(instr->arity());
- InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
- __ InvokeFunction(r4, no_reg, actual, flag, generator);
- } else {
- CallKnownFunction(known_function, hinstr->formal_parameter_count(),
- instr->arity(), is_tail_call, instr);
- }
-}
-
-
-void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
- DCHECK(ToRegister(instr->result()).is(r3));
-
- if (instr->hydrogen()->IsTailCall()) {
- if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
-
- if (instr->target()->IsConstantOperand()) {
- LConstantOperand* target = LConstantOperand::cast(instr->target());
- Handle<Code> code = Handle<Code>::cast(ToHandle(target));
- __ Jump(code, RelocInfo::CODE_TARGET);
- } else {
- DCHECK(instr->target()->IsRegister());
- Register target = ToRegister(instr->target());
- __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(ip);
- }
- } else {
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
-
- if (instr->target()->IsConstantOperand()) {
- LConstantOperand* target = LConstantOperand::cast(instr->target());
- Handle<Code> code = Handle<Code>::cast(ToHandle(target));
- generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
- __ Call(code, RelocInfo::CODE_TARGET);
- } else {
- DCHECK(instr->target()->IsRegister());
- Register target = ToRegister(instr->target());
- generator.BeforeCall(__ CallSize(target));
- __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ CallJSEntry(ip);
- }
- generator.AfterCall();
- }
-}
-
-
-void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->constructor()).is(r4));
- DCHECK(ToRegister(instr->result()).is(r3));
-
- __ mov(r3, Operand(instr->arity()));
- __ Move(r5, instr->hydrogen()->site());
-
- ElementsKind kind = instr->hydrogen()->elements_kind();
- AllocationSiteOverrideMode override_mode =
- (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
- ? DISABLE_ALLOCATION_SITES
- : DONT_OVERRIDE;
-
- if (instr->arity() == 0) {
- ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- } else if (instr->arity() == 1) {
- Label done;
- if (IsFastPackedElementsKind(kind)) {
- Label packed_case;
- // We might need a change here
- // look at the first argument
- __ LoadP(r8, MemOperand(sp, 0));
- __ cmpi(r8, Operand::Zero());
- __ beq(&packed_case);
-
- ElementsKind holey_kind = GetHoleyElementsKind(kind);
- ArraySingleArgumentConstructorStub stub(isolate(), holey_kind,
- override_mode);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ b(&done);
- __ bind(&packed_case);
- }
-
- ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ bind(&done);
- } else {
- ArrayNArgumentsConstructorStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
-void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- CallRuntime(instr->function(), instr->arity(), instr);
-}
-
-
-void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
- Register function = ToRegister(instr->function());
- Register code_object = ToRegister(instr->code_object());
- __ addi(code_object, code_object,
- Operand(Code::kHeaderSize - kHeapObjectTag));
- __ StoreP(code_object,
- FieldMemOperand(function, JSFunction::kCodeEntryOffset), r0);
-}
-
-
-void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
- Register result = ToRegister(instr->result());
- Register base = ToRegister(instr->base_object());
- if (instr->offset()->IsConstantOperand()) {
- LConstantOperand* offset = LConstantOperand::cast(instr->offset());
- __ Add(result, base, ToInteger32(offset), r0);
- } else {
- Register offset = ToRegister(instr->offset());
- __ add(result, base, offset);
- }
-}
-
-
-void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
- HStoreNamedField* hinstr = instr->hydrogen();
- Representation representation = instr->representation();
-
- Register object = ToRegister(instr->object());
- Register scratch = scratch0();
- HObjectAccess access = hinstr->access();
- int offset = access.offset();
-
- if (access.IsExternalMemory()) {
- Register value = ToRegister(instr->value());
- MemOperand operand = MemOperand(object, offset);
- __ StoreRepresentation(value, operand, representation, r0);
- return;
- }
-
- __ AssertNotSmi(object);
-
-#if V8_TARGET_ARCH_PPC64
- DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
- IsInteger32(LConstantOperand::cast(instr->value())));
-#else
- DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
- IsSmi(LConstantOperand::cast(instr->value())));
-#endif
- if (!FLAG_unbox_double_fields && representation.IsDouble()) {
- DCHECK(access.IsInobject());
- DCHECK(!hinstr->has_transition());
- DCHECK(!hinstr->NeedsWriteBarrier());
- DoubleRegister value = ToDoubleRegister(instr->value());
- __ stfd(value, FieldMemOperand(object, offset));
- return;
- }
-
- if (hinstr->has_transition()) {
- Handle<Map> transition = hinstr->transition_map();
- AddDeprecationDependency(transition);
- __ mov(scratch, Operand(transition));
- __ StoreP(scratch, FieldMemOperand(object, HeapObject::kMapOffset), r0);
- if (hinstr->NeedsWriteBarrierForMap()) {
- Register temp = ToRegister(instr->temp());
- // Update the write barrier for the map field.
- __ RecordWriteForMap(object, scratch, temp, GetLinkRegisterState(),
- kSaveFPRegs);
- }
- }
-
- // Do the store.
- Register record_dest = object;
- Register record_value = no_reg;
- Register record_scratch = scratch;
-#if V8_TARGET_ARCH_PPC64
- if (FLAG_unbox_double_fields && representation.IsDouble()) {
- DCHECK(access.IsInobject());
- DoubleRegister value = ToDoubleRegister(instr->value());
- __ stfd(value, FieldMemOperand(object, offset));
- if (hinstr->NeedsWriteBarrier()) {
- record_value = ToRegister(instr->value());
- }
- } else {
- if (representation.IsSmi() &&
- hinstr->value()->representation().IsInteger32()) {
- DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
- // 64-bit Smi optimization
- // Store int value directly to upper half of the smi.
- offset = SmiWordOffset(offset);
- representation = Representation::Integer32();
- }
-#endif
- if (access.IsInobject()) {
- Register value = ToRegister(instr->value());
- MemOperand operand = FieldMemOperand(object, offset);
- __ StoreRepresentation(value, operand, representation, r0);
- record_value = value;
- } else {
- Register value = ToRegister(instr->value());
- __ LoadP(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
- MemOperand operand = FieldMemOperand(scratch, offset);
- __ StoreRepresentation(value, operand, representation, r0);
- record_dest = scratch;
- record_value = value;
- record_scratch = object;
- }
-#if V8_TARGET_ARCH_PPC64
- }
-#endif
-
- if (hinstr->NeedsWriteBarrier()) {
- __ RecordWriteField(record_dest, offset, record_value, record_scratch,
- GetLinkRegisterState(), kSaveFPRegs,
- EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(),
- hinstr->PointersToHereCheckForValue());
- }
-}
-
-
-void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- Representation representation = instr->hydrogen()->length()->representation();
- DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
- DCHECK(representation.IsSmiOrInteger32());
-
- Condition cc = instr->hydrogen()->allow_equality() ? lt : le;
- if (instr->length()->IsConstantOperand()) {
- int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
- Register index = ToRegister(instr->index());
- if (representation.IsSmi()) {
- __ CmplSmiLiteral(index, Smi::FromInt(length), r0);
- } else {
- __ Cmplwi(index, Operand(length), r0);
- }
- cc = CommuteCondition(cc);
- } else if (instr->index()->IsConstantOperand()) {
- int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
- Register length = ToRegister(instr->length());
- if (representation.IsSmi()) {
- __ CmplSmiLiteral(length, Smi::FromInt(index), r0);
- } else {
- __ Cmplwi(length, Operand(index), r0);
- }
- } else {
- Register index = ToRegister(instr->index());
- Register length = ToRegister(instr->length());
- if (representation.IsSmi()) {
- __ cmpl(length, index);
- } else {
- __ cmplw(length, index);
- }
- }
- if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
- Label done;
- __ b(NegateCondition(cc), &done);
- __ stop("eliminated bounds check failed");
- __ bind(&done);
- } else {
- DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds);
- }
-}
-
-
-void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
- Register external_pointer = ToRegister(instr->elements());
- Register key = no_reg;
- ElementsKind elements_kind = instr->elements_kind();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort(kArrayIndexConstantValueTooBig);
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(elements_kind);
- bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
- int base_offset = instr->base_offset();
-
- if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
- Register address = scratch0();
- DoubleRegister value(ToDoubleRegister(instr->value()));
- if (key_is_constant) {
- if (constant_key != 0) {
- __ Add(address, external_pointer, constant_key << element_size_shift,
- r0);
- } else {
- address = external_pointer;
- }
- } else {
- __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
- __ add(address, external_pointer, r0);
- }
- if (elements_kind == FLOAT32_ELEMENTS) {
- __ frsp(double_scratch0(), value);
- __ stfs(double_scratch0(), MemOperand(address, base_offset));
- } else { // Storing doubles, not floats.
- __ stfd(value, MemOperand(address, base_offset));
- }
- } else {
- Register value(ToRegister(instr->value()));
- MemOperand mem_operand =
- PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
- constant_key, element_size_shift, base_offset);
- switch (elements_kind) {
- case UINT8_ELEMENTS:
- case UINT8_CLAMPED_ELEMENTS:
- case INT8_ELEMENTS:
- if (key_is_constant) {
- __ StoreByte(value, mem_operand, r0);
- } else {
- __ stbx(value, mem_operand);
- }
- break;
- case INT16_ELEMENTS:
- case UINT16_ELEMENTS:
- if (key_is_constant) {
- __ StoreHalfWord(value, mem_operand, r0);
- } else {
- __ sthx(value, mem_operand);
- }
- break;
- case INT32_ELEMENTS:
- case UINT32_ELEMENTS:
- if (key_is_constant) {
- __ StoreWord(value, mem_operand, r0);
- } else {
- __ stwx(value, mem_operand);
- }
- break;
- case FLOAT32_ELEMENTS:
- case FLOAT64_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
- case FAST_STRING_WRAPPER_ELEMENTS:
- case SLOW_STRING_WRAPPER_ELEMENTS:
- case NO_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
- DoubleRegister value = ToDoubleRegister(instr->value());
- Register elements = ToRegister(instr->elements());
- Register key = no_reg;
- Register scratch = scratch0();
- DoubleRegister double_scratch = double_scratch0();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
-
- // Calculate the effective address of the slot in the array to store the
- // double value.
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort(kArrayIndexConstantValueTooBig);
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
- int base_offset = instr->base_offset() + constant_key * kDoubleSize;
- if (!key_is_constant) {
- __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
- __ add(scratch, elements, scratch);
- elements = scratch;
- }
- if (!is_int16(base_offset)) {
- __ Add(scratch, elements, base_offset, r0);
- base_offset = 0;
- elements = scratch;
- }
-
- if (instr->NeedsCanonicalization()) {
- // Turn potential sNaN value into qNaN.
- __ CanonicalizeNaN(double_scratch, value);
- __ stfd(double_scratch, MemOperand(elements, base_offset));
- } else {
- __ stfd(value, MemOperand(elements, base_offset));
- }
-}
-
-
-void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
- HStoreKeyed* hinstr = instr->hydrogen();
- Register value = ToRegister(instr->value());
- Register elements = ToRegister(instr->elements());
- Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
- Register scratch = scratch0();
- Register store_base = scratch;
- int offset = instr->base_offset();
-
- // Do the store.
- if (instr->key()->IsConstantOperand()) {
- DCHECK(!hinstr->NeedsWriteBarrier());
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset += ToInteger32(const_operand) * kPointerSize;
- store_base = elements;
- } else {
- // Even though the HLoadKeyed instruction forces the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (hinstr->key()->representation().IsSmi()) {
- __ SmiToPtrArrayOffset(scratch, key);
- } else {
- __ ShiftLeftImm(scratch, key, Operand(kPointerSizeLog2));
- }
- __ add(scratch, elements, scratch);
- }
-
- Representation representation = hinstr->value()->representation();
-
-#if V8_TARGET_ARCH_PPC64
- // 64-bit Smi optimization
- if (representation.IsInteger32()) {
- DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
- DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
- // Store int value directly to upper half of the smi.
- offset = SmiWordOffset(offset);
- }
-#endif
-
- __ StoreRepresentation(value, MemOperand(store_base, offset), representation,
- r0);
-
- if (hinstr->NeedsWriteBarrier()) {
- SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
- ? OMIT_SMI_CHECK
- : INLINE_SMI_CHECK;
- // Compute address of modified element and store it into key register.
- __ Add(key, store_base, offset, r0);
- __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs,
- EMIT_REMEMBERED_SET, check_needed,
- hinstr->PointersToHereCheckForValue());
- }
-}
-
-
-void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
- // By cases: external, fast double
- if (instr->is_fixed_typed_array()) {
- DoStoreKeyedExternalArray(instr);
- } else if (instr->hydrogen()->value()->representation().IsDouble()) {
- DoStoreKeyedFixedDoubleArray(instr);
- } else {
- DoStoreKeyedFixedArray(instr);
- }
-}
-
-
-void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
- class DeferredMaybeGrowElements final : public LDeferredCode {
- public:
- DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
- : LDeferredCode(codegen), instr_(instr) {}
- void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LMaybeGrowElements* instr_;
- };
-
- Register result = r3;
- DeferredMaybeGrowElements* deferred =
- new (zone()) DeferredMaybeGrowElements(this, instr);
- LOperand* key = instr->key();
- LOperand* current_capacity = instr->current_capacity();
-
- DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
- DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
- DCHECK(key->IsConstantOperand() || key->IsRegister());
- DCHECK(current_capacity->IsConstantOperand() ||
- current_capacity->IsRegister());
-
- if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
- int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
- int32_t constant_capacity =
- ToInteger32(LConstantOperand::cast(current_capacity));
- if (constant_key >= constant_capacity) {
- // Deferred case.
- __ b(deferred->entry());
- }
- } else if (key->IsConstantOperand()) {
- int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
- __ Cmpwi(ToRegister(current_capacity), Operand(constant_key), r0);
- __ ble(deferred->entry());
- } else if (current_capacity->IsConstantOperand()) {
- int32_t constant_capacity =
- ToInteger32(LConstantOperand::cast(current_capacity));
- __ Cmpwi(ToRegister(key), Operand(constant_capacity), r0);
- __ bge(deferred->entry());
- } else {
- __ cmpw(ToRegister(key), ToRegister(current_capacity));
- __ bge(deferred->entry());
- }
-
- if (instr->elements()->IsRegister()) {
- __ Move(result, ToRegister(instr->elements()));
- } else {
- __ LoadP(result, ToMemOperand(instr->elements()));
- }
-
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- Register result = r3;
- __ li(result, Operand::Zero());
-
- // We have to call a stub.
- {
- PushSafepointRegistersScope scope(this);
- if (instr->object()->IsRegister()) {
- __ Move(result, ToRegister(instr->object()));
- } else {
- __ LoadP(result, ToMemOperand(instr->object()));
- }
-
- LOperand* key = instr->key();
- if (key->IsConstantOperand()) {
- LConstantOperand* constant_key = LConstantOperand::cast(key);
- int32_t int_key = ToInteger32(constant_key);
- if (Smi::IsValid(int_key)) {
- __ LoadSmiLiteral(r6, Smi::FromInt(int_key));
- } else {
- Abort(kArrayIndexConstantValueTooBig);
- }
- } else {
- Label is_smi;
-#if V8_TARGET_ARCH_PPC64
- __ SmiTag(r6, ToRegister(key));
-#else
- // Deopt if the key is outside Smi range. The stub expects Smi and would
- // bump the elements into dictionary mode (and trigger a deopt) anyways.
- __ SmiTagCheckOverflow(r6, ToRegister(key), r0);
- __ BranchOnNoOverflow(&is_smi);
- __ PopSafepointRegisters();
- DeoptimizeIf(al, instr, DeoptimizeReason::kOverflow, cr0);
- __ bind(&is_smi);
-#endif
- }
-
- GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
- __ CallStub(&stub);
- RecordSafepointWithLazyDeopt(
- instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- __ StoreToSafepointRegisterSlot(result, result);
- }
-
- // Deopt on smi, which means the elements array changed to dictionary mode.
- __ TestIfSmi(result, r0);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
-}
-
-
-void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
- Register object_reg = ToRegister(instr->object());
- Register scratch = scratch0();
-
- Handle<Map> from_map = instr->original_map();
- Handle<Map> to_map = instr->transitioned_map();
- ElementsKind from_kind = instr->from_kind();
- ElementsKind to_kind = instr->to_kind();
-
- Label not_applicable;
- __ LoadP(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
- __ Cmpi(scratch, Operand(from_map), r0);
- __ bne(&not_applicable);
-
- if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
- Register new_map_reg = ToRegister(instr->new_map_temp());
- __ mov(new_map_reg, Operand(to_map));
- __ StoreP(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset),
- r0);
- // Write barrier.
- __ RecordWriteForMap(object_reg, new_map_reg, scratch,
- GetLinkRegisterState(), kDontSaveFPRegs);
- } else {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(object_reg.is(r3));
- PushSafepointRegistersScope scope(this);
- __ Move(r4, to_map);
- TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
- __ CallStub(&stub);
- RecordSafepointWithRegisters(instr->pointer_map(), 0,
- Safepoint::kLazyDeopt);
- }
- __ bind(&not_applicable);
-}
-
-
-void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
- Register object = ToRegister(instr->object());
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
- Label no_memento_found;
- __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMementoFound);
- __ bind(&no_memento_found);
-}
-
-
-void LCodeGen::DoStringAdd(LStringAdd* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->left()).is(r4));
- DCHECK(ToRegister(instr->right()).is(r3));
- StringAddStub stub(isolate(), instr->hydrogen()->flags(),
- instr->hydrogen()->pretenure_flag());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt final : public LDeferredCode {
- public:
- DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
- : LDeferredCode(codegen), instr_(instr) {}
- void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LStringCharCodeAt* instr_;
- };
-
- DeferredStringCharCodeAt* deferred =
- new (zone()) DeferredStringCharCodeAt(this, instr);
-
- StringCharLoadGenerator::Generate(
- masm(), ToRegister(instr->string()), ToRegister(instr->index()),
- ToRegister(instr->result()), deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ li(result, Operand::Zero());
-
- PushSafepointRegistersScope scope(this);
- __ push(string);
- // Push the index as a smi. This is safe because of the checks in
- // DoStringCharCodeAt above.
- if (instr->index()->IsConstantOperand()) {
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- __ LoadSmiLiteral(scratch, Smi::FromInt(const_index));
- __ push(scratch);
- } else {
- Register index = ToRegister(instr->index());
- __ SmiTag(index);
- __ push(index);
- }
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
- instr->context());
- __ AssertSmi(r3);
- __ SmiUntag(r3);
- __ StoreToSafepointRegisterSlot(r3, result);
-}
-
-
-void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode final : public LDeferredCode {
- public:
- DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
- : LDeferredCode(codegen), instr_(instr) {}
- void Generate() override {
- codegen()->DoDeferredStringCharFromCode(instr_);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LStringCharFromCode* instr_;
- };
-
- DeferredStringCharFromCode* deferred =
- new (zone()) DeferredStringCharFromCode(this, instr);
-
- DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
- DCHECK(!char_code.is(result));
-
- __ cmpli(char_code, Operand(String::kMaxOneByteCharCode));
- __ bgt(deferred->entry());
- __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
- __ ShiftLeftImm(r0, char_code, Operand(kPointerSizeLog2));
- __ add(result, result, r0);
- __ LoadP(result, FieldMemOperand(result, FixedArray::kHeaderSize));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(result, ip);
- __ beq(deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ li(result, Operand::Zero());
-
- PushSafepointRegistersScope scope(this);
- __ SmiTag(char_code);
- __ push(char_code);
- CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
- instr->context());
- __ StoreToSafepointRegisterSlot(r3, result);
-}
-
-
-void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- LOperand* input = instr->value();
- DCHECK(input->IsRegister() || input->IsStackSlot());
- LOperand* output = instr->result();
- DCHECK(output->IsDoubleRegister());
- if (input->IsStackSlot()) {
- Register scratch = scratch0();
- __ LoadP(scratch, ToMemOperand(input));
- __ ConvertIntToDouble(scratch, ToDoubleRegister(output));
- } else {
- __ ConvertIntToDouble(ToRegister(input), ToDoubleRegister(output));
- }
-}
-
-
-void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
- LOperand* input = instr->value();
- LOperand* output = instr->result();
- __ ConvertUnsignedIntToDouble(ToRegister(input), ToDoubleRegister(output));
-}
-
-
-void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- class DeferredNumberTagI final : public LDeferredCode {
- public:
- DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
- : LDeferredCode(codegen), instr_(instr) {}
- void Generate() override {
- codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
- instr_->temp2(), SIGNED_INT32);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LNumberTagI* instr_;
- };
-
- Register src = ToRegister(instr->value());
- Register dst = ToRegister(instr->result());
-
- DeferredNumberTagI* deferred = new (zone()) DeferredNumberTagI(this, instr);
-#if V8_TARGET_ARCH_PPC64
- __ SmiTag(dst, src);
-#else
- __ SmiTagCheckOverflow(dst, src, r0);
- __ BranchOnOverflow(deferred->entry());
-#endif
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU final : public LDeferredCode {
- public:
- DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
- : LDeferredCode(codegen), instr_(instr) {}
- void Generate() override {
- codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
- instr_->temp2(), UNSIGNED_INT32);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LNumberTagU* instr_;
- };
-
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
-
- DeferredNumberTagU* deferred = new (zone()) DeferredNumberTagU(this, instr);
- __ Cmpli(input, Operand(Smi::kMaxValue), r0);
- __ bgt(deferred->entry());
- __ SmiTag(result, input);
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
- LOperand* temp1, LOperand* temp2,
- IntegerSignedness signedness) {
- Label done, slow;
- Register src = ToRegister(value);
- Register dst = ToRegister(instr->result());
- Register tmp1 = scratch0();
- Register tmp2 = ToRegister(temp1);
- Register tmp3 = ToRegister(temp2);
- DoubleRegister dbl_scratch = double_scratch0();
-
- if (signedness == SIGNED_INT32) {
- // There was overflow, so bits 30 and 31 of the original integer
- // disagree. Try to allocate a heap number in new space and store
- // the value in there. If that fails, call the runtime system.
- if (dst.is(src)) {
- __ SmiUntag(src, dst);
- __ xoris(src, src, Operand(HeapNumber::kSignMask >> 16));
- }
- __ ConvertIntToDouble(src, dbl_scratch);
- } else {
- __ ConvertUnsignedIntToDouble(src, dbl_scratch);
- }
-
- if (FLAG_inline_new) {
- __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
- __ b(&done);
- }
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
- {
- // TODO(3095996): Put a valid pointer value in the stack slot where the
- // result register is stored, as this register is in the pointer map, but
- // contains an integer value.
- __ li(dst, Operand::Zero());
-
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this);
- // Reset the context register.
- if (!dst.is(cp)) {
- __ li(cp, Operand::Zero());
- }
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(instr->pointer_map(), 0,
- Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(r3, dst);
- }
-
- // Done. Put the value in dbl_scratch into the value of the allocated heap
- // number.
- __ bind(&done);
- __ stfd(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
-}
-
-
-void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD final : public LDeferredCode {
- public:
- DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
- : LDeferredCode(codegen), instr_(instr) {}
- void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LNumberTagD* instr_;
- };
-
- DoubleRegister input_reg = ToDoubleRegister(instr->value());
- Register scratch = scratch0();
- Register reg = ToRegister(instr->result());
- Register temp1 = ToRegister(instr->temp());
- Register temp2 = ToRegister(instr->temp2());
-
- DeferredNumberTagD* deferred = new (zone()) DeferredNumberTagD(this, instr);
- if (FLAG_inline_new) {
- __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
- } else {
- __ b(deferred->entry());
- }
- __ bind(deferred->exit());
- __ stfd(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
-}
-
-
-void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- Register reg = ToRegister(instr->result());
- __ li(reg, Operand::Zero());
-
- PushSafepointRegistersScope scope(this);
- // Reset the context register.
- if (!reg.is(cp)) {
- __ li(cp, Operand::Zero());
- }
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(instr->pointer_map(), 0,
- Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(r3, reg);
-}
-
-
-void LCodeGen::DoSmiTag(LSmiTag* instr) {
- HChange* hchange = instr->hydrogen();
- Register input = ToRegister(instr->value());
- Register output = ToRegister(instr->result());
- if (hchange->CheckFlag(HValue::kCanOverflow) &&
- hchange->value()->CheckFlag(HValue::kUint32)) {
- __ TestUnsignedSmiCandidate(input, r0);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, cr0);
- }
-#if !V8_TARGET_ARCH_PPC64
- if (hchange->CheckFlag(HValue::kCanOverflow) &&
- !hchange->value()->CheckFlag(HValue::kUint32)) {
- __ SmiTagCheckOverflow(output, input, r0);
- DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
- } else {
-#endif
- __ SmiTag(output, input);
-#if !V8_TARGET_ARCH_PPC64
- }
-#endif
-}
-
-
-void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
- Register scratch = scratch0();
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- if (instr->needs_check()) {
- // If the input is a HeapObject, value of scratch won't be zero.
- __ andi(scratch, input, Operand(kHeapObjectTag));
- __ SmiUntag(result, input);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0);
- } else {
- __ SmiUntag(result, input);
- }
-}
-
-
-void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
- DoubleRegister result_reg,
- NumberUntagDMode mode) {
- bool can_convert_undefined_to_nan = instr->truncating();
- bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
-
- Register scratch = scratch0();
- DCHECK(!result_reg.is(double_scratch0()));
-
- Label convert, load_smi, done;
-
- if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
- // Smi check.
- __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
-
- // Heap number map check.
- __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch, ip);
- if (can_convert_undefined_to_nan) {
- __ bne(&convert);
- } else {
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
- }
- // load heap number
- __ lfd(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
- if (deoptimize_on_minus_zero) {
- __ TestDoubleIsMinusZero(result_reg, scratch, ip);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
- }
- __ b(&done);
- if (can_convert_undefined_to_nan) {
- __ bind(&convert);
- // Convert undefined (and hole) to NaN.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(input_reg, ip);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
- __ LoadRoot(scratch, Heap::kNanValueRootIndex);
- __ lfd(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
- __ b(&done);
- }
- } else {
- __ SmiUntag(scratch, input_reg);
- DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
- }
- // Smi to double register conversion
- __ bind(&load_smi);
- // scratch: untagged value of input_reg
- __ ConvertIntToDouble(scratch, result_reg);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
- Register input_reg = ToRegister(instr->value());
- Register scratch1 = scratch0();
- Register scratch2 = ToRegister(instr->temp());
- DoubleRegister double_scratch = double_scratch0();
- DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
-
- DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
- DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
-
- Label done;
-
- // Heap number map check.
- __ LoadP(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch1, ip);
-
- if (instr->truncating()) {
- Label truncate;
- __ beq(&truncate);
- __ CompareInstanceType(scratch1, scratch1, ODDBALL_TYPE);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotANumberOrOddball);
- __ bind(&truncate);
- __ mr(scratch2, input_reg);
- __ TruncateHeapNumberToI(input_reg, scratch2);
- } else {
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
-
- __ lfd(double_scratch2,
- FieldMemOperand(input_reg, HeapNumber::kValueOffset));
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // preserve heap number pointer in scratch2 for minus zero check below
- __ mr(scratch2, input_reg);
- }
- __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1,
- double_scratch);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ cmpi(input_reg, Operand::Zero());
- __ bne(&done);
- __ TestHeapNumberSign(scratch2, scratch1);
- DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
- }
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI final : public LDeferredCode {
- public:
- DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
- : LDeferredCode(codegen), instr_(instr) {}
- void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LTaggedToI* instr_;
- };
-
- LOperand* input = instr->value();
- DCHECK(input->IsRegister());
- DCHECK(input->Equals(instr->result()));
-
- Register input_reg = ToRegister(input);
-
- if (instr->hydrogen()->value()->representation().IsSmi()) {
- __ SmiUntag(input_reg);
- } else {
- DeferredTaggedToI* deferred = new (zone()) DeferredTaggedToI(this, instr);
-
- // Branch to deferred code if the input is a HeapObject.
- __ JumpIfNotSmi(input_reg, deferred->entry());
-
- __ SmiUntag(input_reg);
- __ bind(deferred->exit());
- }
-}
-
-
-void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
- LOperand* input = instr->value();
- DCHECK(input->IsRegister());
- LOperand* result = instr->result();
- DCHECK(result->IsDoubleRegister());
-
- Register input_reg = ToRegister(input);
- DoubleRegister result_reg = ToDoubleRegister(result);
-
- HValue* value = instr->hydrogen()->value();
- NumberUntagDMode mode = value->representation().IsSmi()
- ? NUMBER_CANDIDATE_IS_SMI
- : NUMBER_CANDIDATE_IS_ANY_TAGGED;
-
- EmitNumberUntagD(instr, input_reg, result_reg, mode);
-}
-
-
-void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
- Register result_reg = ToRegister(instr->result());
- Register scratch1 = scratch0();
- DoubleRegister double_input = ToDoubleRegister(instr->value());
- DoubleRegister double_scratch = double_scratch0();
-
- if (instr->truncating()) {
- __ TruncateDoubleToI(result_reg, double_input);
- } else {
- __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
- double_scratch);
- // Deoptimize if the input wasn't a int32 (inside a double).
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label done;
- __ cmpi(result_reg, Operand::Zero());
- __ bne(&done);
- __ TestDoubleSign(double_input, scratch1);
- DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
- __ bind(&done);
- }
- }
-}
-
-
-void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
- Register result_reg = ToRegister(instr->result());
- Register scratch1 = scratch0();
- DoubleRegister double_input = ToDoubleRegister(instr->value());
- DoubleRegister double_scratch = double_scratch0();
-
- if (instr->truncating()) {
- __ TruncateDoubleToI(result_reg, double_input);
- } else {
- __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
- double_scratch);
- // Deoptimize if the input wasn't a int32 (inside a double).
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label done;
- __ cmpi(result_reg, Operand::Zero());
- __ bne(&done);
- __ TestDoubleSign(double_input, scratch1);
- DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
- __ bind(&done);
- }
- }
-#if V8_TARGET_ARCH_PPC64
- __ SmiTag(result_reg);
-#else
- __ SmiTagCheckOverflow(result_reg, r0);
- DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
-#endif
-}
-
-
-void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
- LOperand* input = instr->value();
- __ TestIfSmi(ToRegister(input), r0);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0);
-}
-
-
-void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- LOperand* input = instr->value();
- __ TestIfSmi(ToRegister(input), r0);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
- }
-}
-
-
-void LCodeGen::DoCheckArrayBufferNotNeutered(
- LCheckArrayBufferNotNeutered* instr) {
- Register view = ToRegister(instr->view());
- Register scratch = scratch0();
-
- __ LoadP(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
- __ lwz(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
- __ andi(r0, scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds, cr0);
-}
-
-
-void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
- Register input = ToRegister(instr->value());
- Register scratch = scratch0();
-
- __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
- __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
-
- if (instr->hydrogen()->is_interval_check()) {
- InstanceType first;
- InstanceType last;
- instr->hydrogen()->GetCheckInterval(&first, &last);
-
- __ cmpli(scratch, Operand(first));
-
- // If there is only one type in the interval check for equality.
- if (first == last) {
- DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
- } else {
- DeoptimizeIf(lt, instr, DeoptimizeReason::kWrongInstanceType);
- // Omit check for the last type.
- if (last != LAST_TYPE) {
- __ cmpli(scratch, Operand(last));
- DeoptimizeIf(gt, instr, DeoptimizeReason::kWrongInstanceType);
- }
- }
- } else {
- uint8_t mask;
- uint8_t tag;
- instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
-
- if (base::bits::IsPowerOfTwo32(mask)) {
- DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
- __ andi(r0, scratch, Operand(mask));
- DeoptimizeIf(tag == 0 ? ne : eq, instr,
- DeoptimizeReason::kWrongInstanceType, cr0);
- } else {
- __ andi(scratch, scratch, Operand(mask));
- __ cmpi(scratch, Operand(tag));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
- }
- }
-}
-
-
-void LCodeGen::DoCheckValue(LCheckValue* instr) {
- Register reg = ToRegister(instr->value());
- Handle<HeapObject> object = instr->hydrogen()->object().handle();
- AllowDeferredHandleDereference smi_check;
- if (isolate()->heap()->InNewSpace(*object)) {
- Register reg = ToRegister(instr->value());
- Handle<Cell> cell = isolate()->factory()->NewCell(object);
- __ mov(ip, Operand(cell));
- __ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset));
- __ cmp(reg, ip);
- } else {
- __ Cmpi(reg, Operand(object), r0);
- }
- DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch);
-}
-
-
-void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
- Register temp = ToRegister(instr->temp());
- Label deopt, done;
- // If the map is not deprecated the migration attempt does not make sense.
- __ LoadP(temp, FieldMemOperand(object, HeapObject::kMapOffset));
- __ lwz(temp, FieldMemOperand(temp, Map::kBitField3Offset));
- __ TestBitMask(temp, Map::Deprecated::kMask, r0);
- __ beq(&deopt, cr0);
-
- {
- PushSafepointRegistersScope scope(this);
- __ push(object);
- __ li(cp, Operand::Zero());
- __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
- RecordSafepointWithRegisters(instr->pointer_map(), 1,
- Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(r3, temp);
- }
- __ TestIfSmi(temp, r0);
- __ bne(&done, cr0);
-
- __ bind(&deopt);
- // In case of "al" condition the operand is not used so just pass cr0 there.
- DeoptimizeIf(al, instr, DeoptimizeReason::kInstanceMigrationFailed, cr0);
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- class DeferredCheckMaps final : public LDeferredCode {
- public:
- DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
- : LDeferredCode(codegen), instr_(instr), object_(object) {
- SetExit(check_maps());
- }
- void Generate() override {
- codegen()->DoDeferredInstanceMigration(instr_, object_);
- }
- Label* check_maps() { return &check_maps_; }
- LInstruction* instr() override { return instr_; }
-
- private:
- LCheckMaps* instr_;
- Label check_maps_;
- Register object_;
- };
-
- if (instr->hydrogen()->IsStabilityCheck()) {
- const UniqueSet<Map>* maps = instr->hydrogen()->maps();
- for (int i = 0; i < maps->size(); ++i) {
- AddStabilityDependency(maps->at(i).handle());
- }
- return;
- }
-
- Register object = ToRegister(instr->value());
- Register map_reg = ToRegister(instr->temp());
-
- __ LoadP(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
-
- DeferredCheckMaps* deferred = NULL;
- if (instr->hydrogen()->HasMigrationTarget()) {
- deferred = new (zone()) DeferredCheckMaps(this, instr, object);
- __ bind(deferred->check_maps());
- }
-
- const UniqueSet<Map>* maps = instr->hydrogen()->maps();
- Label success;
- for (int i = 0; i < maps->size() - 1; i++) {
- Handle<Map> map = maps->at(i).handle();
- __ CompareMap(map_reg, map, &success);
- __ beq(&success);
- }
-
- Handle<Map> map = maps->at(maps->size() - 1).handle();
- __ CompareMap(map_reg, map, &success);
- if (instr->hydrogen()->HasMigrationTarget()) {
- __ bne(deferred->entry());
- } else {
- DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
- }
-
- __ bind(&success);
-}
-
-
-void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
- DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
- Register result_reg = ToRegister(instr->result());
- __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
-}
-
-
-void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
- Register unclamped_reg = ToRegister(instr->unclamped());
- Register result_reg = ToRegister(instr->result());
- __ ClampUint8(result_reg, unclamped_reg);
-}
-
-
-void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
- Register scratch = scratch0();
- Register input_reg = ToRegister(instr->unclamped());
- Register result_reg = ToRegister(instr->result());
- DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
- Label is_smi, done, heap_number;
-
- // Both smi and heap number cases are handled.
- __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
-
- // Check for heap number
- __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
- __ Cmpi(scratch, Operand(factory()->heap_number_map()), r0);
- __ beq(&heap_number);
-
- // Check for undefined. Undefined is converted to zero for clamping
- // conversions.
- __ Cmpi(input_reg, Operand(factory()->undefined_value()), r0);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
- __ li(result_reg, Operand::Zero());
- __ b(&done);
-
- // Heap number
- __ bind(&heap_number);
- __ lfd(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
- __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
- __ b(&done);
-
- // smi
- __ bind(&is_smi);
- __ ClampUint8(result_reg, result_reg);
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoAllocate(LAllocate* instr) {
- class DeferredAllocate final : public LDeferredCode {
- public:
- DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override { codegen()->DoDeferredAllocate(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LAllocate* instr_;
- };
-
- DeferredAllocate* deferred =
- new(zone()) DeferredAllocate(this, instr);
-
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp1());
- Register scratch2 = ToRegister(instr->temp2());
-
- // Allocate memory for the object.
- AllocationFlags flags = NO_ALLOCATION_FLAGS;
- if (instr->hydrogen()->MustAllocateDoubleAligned()) {
- flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
- }
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- flags = static_cast<AllocationFlags>(flags | PRETENURE);
- }
-
- if (instr->hydrogen()->IsAllocationFoldingDominator()) {
- flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
- }
-
- DCHECK(!instr->hydrogen()->IsAllocationFolded());
-
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= kMaxRegularHeapObjectSize);
- __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
- } else {
- Register size = ToRegister(instr->size());
- __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
- }
-
- __ bind(deferred->exit());
-
- if (instr->hydrogen()->MustPrefillWithFiller()) {
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ LoadIntLiteral(scratch, size - kHeapObjectTag);
- } else {
- __ subi(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
- }
- __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
- Label loop;
- __ bind(&loop);
- __ subi(scratch, scratch, Operand(kPointerSize));
- __ StorePX(scratch2, MemOperand(result, scratch));
- __ cmpi(scratch, Operand::Zero());
- __ bge(&loop);
- }
-}
-
-
-void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ LoadSmiLiteral(result, Smi::kZero);
-
- PushSafepointRegistersScope scope(this);
- if (instr->size()->IsRegister()) {
- Register size = ToRegister(instr->size());
- DCHECK(!size.is(result));
- __ SmiTag(size);
- __ push(size);
- } else {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
-#if !V8_TARGET_ARCH_PPC64
- if (size >= 0 && size <= Smi::kMaxValue) {
-#endif
- __ Push(Smi::FromInt(size));
-#if !V8_TARGET_ARCH_PPC64
- } else {
- // We should never get here at runtime => abort
- __ stop("invalid allocation size");
- return;
- }
-#endif
- }
-
- int flags = AllocateDoubleAlignFlag::encode(
- instr->hydrogen()->MustAllocateDoubleAligned());
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- flags = AllocateTargetSpace::update(flags, OLD_SPACE);
- } else {
- flags = AllocateTargetSpace::update(flags, NEW_SPACE);
- }
- __ Push(Smi::FromInt(flags));
-
- CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr,
- instr->context());
- __ StoreToSafepointRegisterSlot(r3, result);
-
- if (instr->hydrogen()->IsAllocationFoldingDominator()) {
- AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
- }
- // If the allocation folding dominator allocate triggered a GC, allocation
- // happend in the runtime. We have to reset the top pointer to virtually
- // undo the allocation.
- ExternalReference allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
- Register top_address = scratch0();
- __ subi(r3, r3, Operand(kHeapObjectTag));
- __ mov(top_address, Operand(allocation_top));
- __ StoreP(r3, MemOperand(top_address));
- __ addi(r3, r3, Operand(kHeapObjectTag));
- }
-}
-
-void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
- DCHECK(instr->hydrogen()->IsAllocationFolded());
- DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
- Register result = ToRegister(instr->result());
- Register scratch1 = ToRegister(instr->temp1());
- Register scratch2 = ToRegister(instr->temp2());
-
- AllocationFlags flags = ALLOCATION_FOLDED;
- if (instr->hydrogen()->MustAllocateDoubleAligned()) {
- flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
- }
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- flags = static_cast<AllocationFlags>(flags | PRETENURE);
- }
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= kMaxRegularHeapObjectSize);
- __ FastAllocate(size, result, scratch1, scratch2, flags);
- } else {
- Register size = ToRegister(instr->size());
- __ FastAllocate(size, result, scratch1, scratch2, flags);
- }
-}
-
-
-void LCodeGen::DoTypeof(LTypeof* instr) {
- DCHECK(ToRegister(instr->value()).is(r6));
- DCHECK(ToRegister(instr->result()).is(r3));
- Label end, do_call;
- Register value_register = ToRegister(instr->value());
- __ JumpIfNotSmi(value_register, &do_call);
- __ mov(r3, Operand(isolate()->factory()->number_string()));
- __ b(&end);
- __ bind(&do_call);
- Callable callable = CodeFactory::Typeof(isolate());
- CallCode(callable.code(), RelocInfo::CODE_TARGET, instr);
- __ bind(&end);
-}
-
-
-void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
- Register input = ToRegister(instr->value());
-
- Condition final_branch_condition =
- EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), input,
- instr->type_literal());
- if (final_branch_condition != kNoCondition) {
- EmitBranch(instr, final_branch_condition);
- }
-}
-
-
-Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
- Register input, Handle<String> type_name) {
- Condition final_branch_condition = kNoCondition;
- Register scratch = scratch0();
- Factory* factory = isolate()->factory();
- if (String::Equals(type_name, factory->number_string())) {
- __ JumpIfSmi(input, true_label);
- __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
- __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
- final_branch_condition = eq;
-
- } else if (String::Equals(type_name, factory->string_string())) {
- __ JumpIfSmi(input, false_label);
- __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
- final_branch_condition = lt;
-
- } else if (String::Equals(type_name, factory->symbol_string())) {
- __ JumpIfSmi(input, false_label);
- __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
- final_branch_condition = eq;
-
- } else if (String::Equals(type_name, factory->boolean_string())) {
- __ CompareRoot(input, Heap::kTrueValueRootIndex);
- __ beq(true_label);
- __ CompareRoot(input, Heap::kFalseValueRootIndex);
- final_branch_condition = eq;
-
- } else if (String::Equals(type_name, factory->undefined_string())) {
- __ CompareRoot(input, Heap::kNullValueRootIndex);
- __ beq(false_label);
- __ JumpIfSmi(input, false_label);
- // Check for undetectable objects => true.
- __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
- __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ ExtractBit(r0, scratch, Map::kIsUndetectable);
- __ cmpi(r0, Operand::Zero());
- final_branch_condition = ne;
-
- } else if (String::Equals(type_name, factory->function_string())) {
- __ JumpIfSmi(input, false_label);
- __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
- __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ andi(scratch, scratch,
- Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
- __ cmpi(scratch, Operand(1 << Map::kIsCallable));
- final_branch_condition = eq;
-
- } else if (String::Equals(type_name, factory->object_string())) {
- __ JumpIfSmi(input, false_label);
- __ CompareRoot(input, Heap::kNullValueRootIndex);
- __ beq(true_label);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CompareObjectType(input, scratch, ip, FIRST_JS_RECEIVER_TYPE);
- __ blt(false_label);
- // Check for callable or undetectable objects => false.
- __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ andi(r0, scratch,
- Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
- __ cmpi(r0, Operand::Zero());
- final_branch_condition = eq;
-
- } else {
- __ b(false_label);
- }
-
- return final_branch_condition;
-}
-
-
-void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
- if (info()->ShouldEnsureSpaceForLazyDeopt()) {
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- if (current_pc < last_lazy_deopt_pc_ + space_needed) {
- int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
- while (padding_size > 0) {
- __ nop();
- padding_size -= Assembler::kInstrSize;
- }
- }
- }
- last_lazy_deopt_pc_ = masm()->pc_offset();
-}
-
-
-void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- last_lazy_deopt_pc_ = masm()->pc_offset();
- DCHECK(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- Deoptimizer::BailoutType type = instr->hydrogen()->type();
- // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
- // needed return address), even though the implementation of LAZY and EAGER is
- // now identical. When LAZY is eventually completely folded into EAGER, remove
- // the special case below.
- if (info()->IsStub() && type == Deoptimizer::EAGER) {
- type = Deoptimizer::LAZY;
- }
-
- DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
-}
-
-
-void LCodeGen::DoDummy(LDummy* instr) {
- // Nothing to see here, move on!
-}
-
-
-void LCodeGen::DoDummyUse(LDummyUse* instr) {
- // Nothing to see here, move on!
-}
-
-
-void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
- PushSafepointRegistersScope scope(this);
- LoadContextFromDeferred(instr->context());
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
- RecordSafepointWithLazyDeopt(
- instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- DCHECK(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck final : public LDeferredCode {
- public:
- DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
- : LDeferredCode(codegen), instr_(instr) {}
- void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LStackCheck* instr_;
- };
-
- DCHECK(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- // There is no LLazyBailout instruction for stack-checks. We have to
- // prepare for lazy deoptimization explicitly here.
- if (instr->hydrogen()->is_function_entry()) {
- // Perform stack overflow check.
- Label done;
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmpl(sp, ip);
- __ bge(&done);
- DCHECK(instr->context()->IsRegister());
- DCHECK(ToRegister(instr->context()).is(cp));
- CallCode(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET,
- instr);
- __ bind(&done);
- } else {
- DCHECK(instr->hydrogen()->is_backwards_branch());
- // Perform stack overflow check if this goto needs it before jumping.
- DeferredStackCheck* deferred_stack_check =
- new (zone()) DeferredStackCheck(this, instr);
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmpl(sp, ip);
- __ blt(deferred_stack_check->entry());
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- __ bind(instr->done_label());
- deferred_stack_check->SetExit(instr->done_label());
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- // Don't record a deoptimization index for the safepoint here.
- // This will be done explicitly when emitting call and the safepoint in
- // the deferred code.
- }
-}
-
-
-void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
- // This is a pseudo-instruction that ensures that the environment here is
- // properly registered for deoptimization and records the assembler's PC
- // offset.
- LEnvironment* environment = instr->environment();
-
- // If the environment were already registered, we would have no way of
- // backpatching it with the spill slot operands.
- DCHECK(!environment->HasBeenRegistered());
- RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
-
- GenerateOsrPrologue();
-}
-
-
-void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
- Label use_cache, call_runtime;
- __ CheckEnumCache(&call_runtime);
-
- __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ b(&use_cache);
-
- // Get the set of properties to enumerate.
- __ bind(&call_runtime);
- __ push(r3);
- CallRuntime(Runtime::kForInEnumerate, instr);
- __ bind(&use_cache);
-}
-
-
-void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
- Register map = ToRegister(instr->map());
- Register result = ToRegister(instr->result());
- Label load_cache, done;
- __ EnumLength(result, map);
- __ CmpSmiLiteral(result, Smi::kZero, r0);
- __ bne(&load_cache);
- __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
- __ b(&done);
-
- __ bind(&load_cache);
- __ LoadInstanceDescriptors(map, result);
- __ LoadP(result,
- FieldMemOperand(result, DescriptorArray::kEnumCacheBridgeOffset));
- __ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
- __ cmpi(result, Operand::Zero());
- DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache);
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
- Register object = ToRegister(instr->value());
- Register map = ToRegister(instr->map());
- __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
- __ cmp(map, scratch0());
- DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
-}
-
-
-void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
- Register result, Register object,
- Register index) {
- PushSafepointRegistersScope scope(this);
- __ Push(object, index);
- __ li(cp, Operand::Zero());
- __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
- RecordSafepointWithRegisters(instr->pointer_map(), 2,
- Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(r3, result);
-}
-
-
-void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
- class DeferredLoadMutableDouble final : public LDeferredCode {
- public:
- DeferredLoadMutableDouble(LCodeGen* codegen, LLoadFieldByIndex* instr,
- Register result, Register object, Register index)
- : LDeferredCode(codegen),
- instr_(instr),
- result_(result),
- object_(object),
- index_(index) {}
- void Generate() override {
- codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LLoadFieldByIndex* instr_;
- Register result_;
- Register object_;
- Register index_;
- };
-
- Register object = ToRegister(instr->object());
- Register index = ToRegister(instr->index());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- DeferredLoadMutableDouble* deferred;
- deferred = new (zone())
- DeferredLoadMutableDouble(this, instr, result, object, index);
-
- Label out_of_object, done;
-
- __ TestBitMask(index, reinterpret_cast<uintptr_t>(Smi::FromInt(1)), r0);
- __ bne(deferred->entry(), cr0);
- __ ShiftRightArithImm(index, index, 1);
-
- __ cmpi(index, Operand::Zero());
- __ blt(&out_of_object);
-
- __ SmiToPtrArrayOffset(r0, index);
- __ add(scratch, object, r0);
- __ LoadP(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
-
- __ b(&done);
-
- __ bind(&out_of_object);
- __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- // Index is equal to negated out of object property index plus 1.
- __ SmiToPtrArrayOffset(r0, index);
- __ sub(scratch, result, r0);
- __ LoadP(result,
- FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize));
- __ bind(deferred->exit());
- __ bind(&done);
-}
-
-#undef __
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h
deleted file mode 100644
index 32b9e18487..0000000000
--- a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h
+++ /dev/null
@@ -1,344 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_PPC_LITHIUM_CODEGEN_PPC_H_
-#define V8_CRANKSHAFT_PPC_LITHIUM_CODEGEN_PPC_H_
-
-#include "src/ast/scopes.h"
-#include "src/crankshaft/lithium-codegen.h"
-#include "src/crankshaft/ppc/lithium-gap-resolver-ppc.h"
-#include "src/crankshaft/ppc/lithium-ppc.h"
-#include "src/deoptimizer.h"
-#include "src/safepoint-table.h"
-#include "src/utils.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LDeferredCode;
-class SafepointGenerator;
-
-class LCodeGen : public LCodeGenBase {
- public:
- LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : LCodeGenBase(chunk, assembler, info),
- jump_table_(4, info->zone()),
- scope_(info->scope()),
- deferred_(8, info->zone()),
- frame_is_built_(false),
- safepoints_(info->zone()),
- resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple) {
- PopulateDeoptimizationLiteralsWithInlinedFunctions();
- }
-
-
- int LookupDestination(int block_id) const {
- return chunk()->LookupDestination(block_id);
- }
-
- bool IsNextEmittedBlock(int block_id) const {
- return LookupDestination(block_id) == GetNextEmittedBlock();
- }
-
- bool NeedsEagerFrame() const {
- return HasAllocatedStackSlots() || info()->is_non_deferred_calling() ||
- !info()->IsStub() || info()->requires_frame();
- }
- bool NeedsDeferredFrame() const {
- return !NeedsEagerFrame() && info()->is_deferred_calling();
- }
-
- LinkRegisterStatus GetLinkRegisterState() const {
- return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved;
- }
-
- // Support for converting LOperands to assembler types.
- // LOperand must be a register.
- Register ToRegister(LOperand* op) const;
-
- // LOperand is loaded into scratch, unless already a register.
- Register EmitLoadRegister(LOperand* op, Register scratch);
-
- // LConstantOperand must be an Integer32 or Smi
- void EmitLoadIntegerConstant(LConstantOperand* const_op, Register dst);
-
- // LOperand must be a double register.
- DoubleRegister ToDoubleRegister(LOperand* op) const;
-
- intptr_t ToRepresentation(LConstantOperand* op,
- const Representation& r) const;
- int32_t ToInteger32(LConstantOperand* op) const;
- Smi* ToSmi(LConstantOperand* op) const;
- double ToDouble(LConstantOperand* op) const;
- Operand ToOperand(LOperand* op);
- MemOperand ToMemOperand(LOperand* op) const;
- // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
- MemOperand ToHighMemOperand(LOperand* op) const;
-
- bool IsInteger32(LConstantOperand* op) const;
- bool IsSmi(LConstantOperand* op) const;
- Handle<Object> ToHandle(LConstantOperand* op) const;
-
- // Try to generate code for the entire chunk, but it may fail if the
- // chunk contains constructs we cannot handle. Returns true if the
- // code generation attempt succeeded.
- bool GenerateCode();
-
- // Finish the code by setting stack height, safepoint, and bailout
- // information on it.
- void FinishCode(Handle<Code> code);
-
- // Deferred code support.
- void DoDeferredNumberTagD(LNumberTagD* instr);
-
- enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
- void DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
- LOperand* temp1, LOperand* temp2,
- IntegerSignedness signedness);
-
- void DoDeferredTaggedToI(LTaggedToI* instr);
- void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
- void DoDeferredStackCheck(LStackCheck* instr);
- void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
- void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
- void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredAllocate(LAllocate* instr);
- void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
- void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, Register result,
- Register object, Register index);
-
- // Parallel move support.
- void DoParallelMove(LParallelMove* move);
- void DoGap(LGap* instr);
-
- MemOperand PrepareKeyedOperand(Register key, Register base,
- bool key_is_constant, bool key_is_tagged,
- int constant_key, int element_size_shift,
- int base_offset);
-
- // Emit frame translation commands for an environment.
- void WriteTranslation(LEnvironment* environment, Translation* translation);
-
-// Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) void Do##type(L##type* node);
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- private:
- Scope* scope() const { return scope_; }
-
- Register scratch0() { return kLithiumScratch; }
- DoubleRegister double_scratch0() { return kScratchDoubleReg; }
-
- LInstruction* GetNextInstruction();
-
- void EmitClassOfTest(Label* if_true, Label* if_false,
- Handle<String> class_name, Register input,
- Register temporary, Register temporary2);
-
- bool HasAllocatedStackSlots() const {
- return chunk()->HasAllocatedStackSlots();
- }
- int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); }
- int GetTotalFrameSlotCount() const {
- return chunk()->GetTotalFrameSlotCount();
- }
-
- void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
-
- void SaveCallerDoubles();
- void RestoreCallerDoubles();
-
- // Code generation passes. Returns true if code generation should
- // continue.
- void GenerateBodyInstructionPre(LInstruction* instr) override;
- bool GeneratePrologue();
- bool GenerateDeferredCode();
- bool GenerateJumpTable();
- bool GenerateSafepointTable();
-
- // Generates the custom OSR entrypoint and sets the osr_pc_offset.
- void GenerateOsrPrologue();
-
- enum SafepointMode {
- RECORD_SIMPLE_SAFEPOINT,
- RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
- };
-
- void CallCode(Handle<Code> code, RelocInfo::Mode mode, LInstruction* instr);
-
- void CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode,
- LInstruction* instr, SafepointMode safepoint_mode);
-
- void CallRuntime(const Runtime::Function* function, int num_arguments,
- LInstruction* instr,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
-
- void CallRuntime(Runtime::FunctionId id, int num_arguments,
- LInstruction* instr) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, num_arguments, instr);
- }
-
- void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, function->nargs, instr);
- }
-
- void LoadContextFromDeferred(LOperand* context);
- void CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
- LInstruction* instr, LOperand* context);
-
- void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
- Register scratch2, Register scratch3);
-
- // Generate a direct call to a known function. Expects the function
- // to be in r4.
- void CallKnownFunction(Handle<JSFunction> function,
- int formal_parameter_count, int arity,
- bool is_tail_call, LInstruction* instr);
-
- void RecordSafepointWithLazyDeopt(LInstruction* instr,
- SafepointMode safepoint_mode);
-
- void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
- Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition condition, LInstruction* instr,
- DeoptimizeReason deopt_reason,
- Deoptimizer::BailoutType bailout_type, CRegister cr = cr7);
- void DeoptimizeIf(Condition condition, LInstruction* instr,
- DeoptimizeReason deopt_reason, CRegister cr = cr7);
-
- void AddToTranslation(LEnvironment* environment, Translation* translation,
- LOperand* op, bool is_tagged, bool is_uint32,
- int* object_index_pointer,
- int* dematerialized_index_pointer);
-
- Register ToRegister(int index) const;
- DoubleRegister ToDoubleRegister(int index) const;
-
- MemOperand BuildSeqStringOperand(Register string, LOperand* index,
- String::Encoding encoding);
-
- void EmitMathAbs(LMathAbs* instr);
-#if V8_TARGET_ARCH_PPC64
- void EmitInteger32MathAbs(LMathAbs* instr);
-#endif
-
- // Support for recording safepoint information.
- void RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
- int arguments, Safepoint::DeoptMode mode);
- void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
- void RecordSafepoint(Safepoint::DeoptMode mode);
- void RecordSafepointWithRegisters(LPointerMap* pointers, int arguments,
- Safepoint::DeoptMode mode);
-
- static Condition TokenToCondition(Token::Value op);
- void EmitGoto(int block);
-
- // EmitBranch expects to be the last instruction of a block.
- template <class InstrType>
- void EmitBranch(InstrType instr, Condition condition, CRegister cr = cr7);
- template <class InstrType>
- void EmitTrueBranch(InstrType instr, Condition condition, CRegister cr = cr7);
- template <class InstrType>
- void EmitFalseBranch(InstrType instr, Condition condition,
- CRegister cr = cr7);
- void EmitNumberUntagD(LNumberUntagD* instr, Register input,
- DoubleRegister result, NumberUntagDMode mode);
-
- // Emits optimized code for typeof x == "y". Modifies input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitTypeofIs(Label* true_label, Label* false_label, Register input,
- Handle<String> type_name);
-
- // Emits optimized code for %_IsString(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
- SmiCheck check_needed);
-
- // Emits optimized code to deep-copy the contents of statically known
- // object graphs (e.g. object literal boilerplate).
- void EmitDeepCopy(Handle<JSObject> object, Register result, Register source,
- int* offset, AllocationSiteMode mode);
-
- void EnsureSpaceForLazyDeopt(int space_needed) override;
- void DoLoadKeyedExternalArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedArray(LLoadKeyed* instr);
- void DoStoreKeyedExternalArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedArray(LStoreKeyed* instr);
-
- template <class T>
- void EmitVectorLoadICRegisters(T* instr);
-
- ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
- Scope* const scope_;
- ZoneList<LDeferredCode*> deferred_;
- bool frame_is_built_;
-
- // Builder that keeps track of safepoints in the code. The table
- // itself is emitted at the end of the generated code.
- SafepointTableBuilder safepoints_;
-
- // Compiler from a set of parallel moves to a sequential list of moves.
- LGapResolver resolver_;
-
- Safepoint::Kind expected_safepoint_kind_;
-
- class PushSafepointRegistersScope final BASE_EMBEDDED {
- public:
- explicit PushSafepointRegistersScope(LCodeGen* codegen);
-
- ~PushSafepointRegistersScope();
-
- private:
- LCodeGen* codegen_;
- };
-
- friend class LDeferredCode;
- friend class LEnvironment;
- friend class SafepointGenerator;
- DISALLOW_COPY_AND_ASSIGN(LCodeGen);
-};
-
-
-class LDeferredCode : public ZoneObject {
- public:
- explicit LDeferredCode(LCodeGen* codegen)
- : codegen_(codegen),
- external_exit_(NULL),
- instruction_index_(codegen->current_instruction_) {
- codegen->AddDeferredCode(this);
- }
-
- virtual ~LDeferredCode() {}
- virtual void Generate() = 0;
- virtual LInstruction* instr() = 0;
-
- void SetExit(Label* exit) { external_exit_ = exit; }
- Label* entry() { return &entry_; }
- Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
- int instruction_index() const { return instruction_index_; }
-
- protected:
- LCodeGen* codegen() const { return codegen_; }
- MacroAssembler* masm() const { return codegen_->masm(); }
-
- private:
- LCodeGen* codegen_;
- Label entry_;
- Label exit_;
- Label* external_exit_;
- int instruction_index_;
-};
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_PPC_LITHIUM_CODEGEN_PPC_H_
diff --git a/deps/v8/src/crankshaft/ppc/lithium-gap-resolver-ppc.cc b/deps/v8/src/crankshaft/ppc/lithium-gap-resolver-ppc.cc
deleted file mode 100644
index 4e249808f7..0000000000
--- a/deps/v8/src/crankshaft/ppc/lithium-gap-resolver-ppc.cc
+++ /dev/null
@@ -1,287 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/ppc/lithium-gap-resolver-ppc.h"
-
-#include "src/crankshaft/ppc/lithium-codegen-ppc.h"
-
-namespace v8 {
-namespace internal {
-
-static const Register kSavedValueRegister = {11};
-
-LGapResolver::LGapResolver(LCodeGen* owner)
- : cgen_(owner),
- moves_(32, owner->zone()),
- root_index_(0),
- in_cycle_(false),
- saved_destination_(NULL) {}
-
-
-void LGapResolver::Resolve(LParallelMove* parallel_move) {
- DCHECK(moves_.is_empty());
- // Build up a worklist of moves.
- BuildInitialMoveList(parallel_move);
-
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands move = moves_[i];
- // Skip constants to perform them last. They don't block other moves
- // and skipping such moves with register destinations keeps those
- // registers free for the whole algorithm.
- if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
- root_index_ = i; // Any cycle is found when by reaching this move again.
- PerformMove(i);
- if (in_cycle_) {
- RestoreValue();
- }
- }
- }
-
- // Perform the moves with constant sources.
- for (int i = 0; i < moves_.length(); ++i) {
- if (!moves_[i].IsEliminated()) {
- DCHECK(moves_[i].source()->IsConstantOperand());
- EmitMove(i);
- }
- }
-
- moves_.Rewind(0);
-}
-
-
-void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
- // Perform a linear sweep of the moves to add them to the initial list of
- // moves to perform, ignoring any move that is redundant (the source is
- // the same as the destination, the destination is ignored and
- // unallocated, or the move was already eliminated).
- const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
- for (int i = 0; i < moves->length(); ++i) {
- LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
- }
- Verify();
-}
-
-
-void LGapResolver::PerformMove(int index) {
- // Each call to this function performs a move and deletes it from the move
- // graph. We first recursively perform any move blocking this one. We
- // mark a move as "pending" on entry to PerformMove in order to detect
- // cycles in the move graph.
-
- // We can only find a cycle, when doing a depth-first traversal of moves,
- // be encountering the starting move again. So by spilling the source of
- // the starting move, we break the cycle. All moves are then unblocked,
- // and the starting move is completed by writing the spilled value to
- // its destination. All other moves from the spilled source have been
- // completed prior to breaking the cycle.
- // An additional complication is that moves to MemOperands with large
- // offsets (more than 1K or 4K) require us to spill this spilled value to
- // the stack, to free up the register.
- DCHECK(!moves_[index].IsPending());
- DCHECK(!moves_[index].IsRedundant());
-
- // Clear this move's destination to indicate a pending move. The actual
- // destination is saved in a stack allocated local. Multiple moves can
- // be pending because this function is recursive.
- DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated.
- LOperand* destination = moves_[index].destination();
- moves_[index].set_destination(NULL);
-
- // Perform a depth-first traversal of the move graph to resolve
- // dependencies. Any unperformed, unpending move with a source the same
- // as this one's destination blocks this one so recursively perform all
- // such moves.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(destination) && !other_move.IsPending()) {
- PerformMove(i);
- // If there is a blocking, pending move it must be moves_[root_index_]
- // and all other moves with the same source as moves_[root_index_] are
- // sucessfully executed (because they are cycle-free) by this loop.
- }
- }
-
- // We are about to resolve this move and don't need it marked as
- // pending, so restore its destination.
- moves_[index].set_destination(destination);
-
- // The move may be blocked on a pending move, which must be the starting move.
- // In this case, we have a cycle, and we save the source of this move to
- // a scratch register to break it.
- LMoveOperands other_move = moves_[root_index_];
- if (other_move.Blocks(destination)) {
- DCHECK(other_move.IsPending());
- BreakCycle(index);
- return;
- }
-
- // This move is no longer blocked.
- EmitMove(index);
-}
-
-
-void LGapResolver::Verify() {
-#ifdef ENABLE_SLOW_DCHECKS
- // No operand should be the destination for more than one move.
- for (int i = 0; i < moves_.length(); ++i) {
- LOperand* destination = moves_[i].destination();
- for (int j = i + 1; j < moves_.length(); ++j) {
- SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
- }
- }
-#endif
-}
-
-#define __ ACCESS_MASM(cgen_->masm())
-
-void LGapResolver::BreakCycle(int index) {
- // We save in a register the value that should end up in the source of
- // moves_[root_index]. After performing all moves in the tree rooted
- // in that move, we save the value to that source.
- DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source()));
- DCHECK(!in_cycle_);
- in_cycle_ = true;
- LOperand* source = moves_[index].source();
- saved_destination_ = moves_[index].destination();
- if (source->IsRegister()) {
- __ mr(kSavedValueRegister, cgen_->ToRegister(source));
- } else if (source->IsStackSlot()) {
- __ LoadP(kSavedValueRegister, cgen_->ToMemOperand(source));
- } else if (source->IsDoubleRegister()) {
- __ fmr(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
- } else if (source->IsDoubleStackSlot()) {
- __ lfd(kScratchDoubleReg, cgen_->ToMemOperand(source));
- } else {
- UNREACHABLE();
- }
- // This move will be done by restoring the saved value to the destination.
- moves_[index].Eliminate();
-}
-
-
-void LGapResolver::RestoreValue() {
- DCHECK(in_cycle_);
- DCHECK(saved_destination_ != NULL);
-
- // Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister.
- if (saved_destination_->IsRegister()) {
- __ mr(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
- } else if (saved_destination_->IsStackSlot()) {
- __ StoreP(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
- } else if (saved_destination_->IsDoubleRegister()) {
- __ fmr(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg);
- } else if (saved_destination_->IsDoubleStackSlot()) {
- __ stfd(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_));
- } else {
- UNREACHABLE();
- }
-
- in_cycle_ = false;
- saved_destination_ = NULL;
-}
-
-
-void LGapResolver::EmitMove(int index) {
- LOperand* source = moves_[index].source();
- LOperand* destination = moves_[index].destination();
-
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
-
- if (source->IsRegister()) {
- Register source_register = cgen_->ToRegister(source);
- if (destination->IsRegister()) {
- __ mr(cgen_->ToRegister(destination), source_register);
- } else {
- DCHECK(destination->IsStackSlot());
- __ StoreP(source_register, cgen_->ToMemOperand(destination));
- }
- } else if (source->IsStackSlot()) {
- MemOperand source_operand = cgen_->ToMemOperand(source);
- if (destination->IsRegister()) {
- __ LoadP(cgen_->ToRegister(destination), source_operand);
- } else {
- DCHECK(destination->IsStackSlot());
- MemOperand destination_operand = cgen_->ToMemOperand(destination);
- if (in_cycle_) {
- __ LoadP(ip, source_operand);
- __ StoreP(ip, destination_operand);
- } else {
- __ LoadP(kSavedValueRegister, source_operand);
- __ StoreP(kSavedValueRegister, destination_operand);
- }
- }
-
- } else if (source->IsConstantOperand()) {
- LConstantOperand* constant_source = LConstantOperand::cast(source);
- if (destination->IsRegister()) {
- Register dst = cgen_->ToRegister(destination);
- if (cgen_->IsInteger32(constant_source)) {
- cgen_->EmitLoadIntegerConstant(constant_source, dst);
- } else {
- __ Move(dst, cgen_->ToHandle(constant_source));
- }
- } else if (destination->IsDoubleRegister()) {
- DoubleRegister result = cgen_->ToDoubleRegister(destination);
- double v = cgen_->ToDouble(constant_source);
- __ LoadDoubleLiteral(result, v, ip);
- } else {
- DCHECK(destination->IsStackSlot());
- DCHECK(!in_cycle_); // Constant moves happen after all cycles are gone.
- if (cgen_->IsInteger32(constant_source)) {
- cgen_->EmitLoadIntegerConstant(constant_source, kSavedValueRegister);
- } else {
- __ Move(kSavedValueRegister, cgen_->ToHandle(constant_source));
- }
- __ StoreP(kSavedValueRegister, cgen_->ToMemOperand(destination));
- }
-
- } else if (source->IsDoubleRegister()) {
- DoubleRegister source_register = cgen_->ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
- __ fmr(cgen_->ToDoubleRegister(destination), source_register);
- } else {
- DCHECK(destination->IsDoubleStackSlot());
- __ stfd(source_register, cgen_->ToMemOperand(destination));
- }
-
- } else if (source->IsDoubleStackSlot()) {
- MemOperand source_operand = cgen_->ToMemOperand(source);
- if (destination->IsDoubleRegister()) {
- __ lfd(cgen_->ToDoubleRegister(destination), source_operand);
- } else {
- DCHECK(destination->IsDoubleStackSlot());
- MemOperand destination_operand = cgen_->ToMemOperand(destination);
- if (in_cycle_) {
-// kSavedDoubleValueRegister was used to break the cycle,
-// but kSavedValueRegister is free.
-#if V8_TARGET_ARCH_PPC64
- __ ld(kSavedValueRegister, source_operand);
- __ std(kSavedValueRegister, destination_operand);
-#else
- MemOperand source_high_operand = cgen_->ToHighMemOperand(source);
- MemOperand destination_high_operand =
- cgen_->ToHighMemOperand(destination);
- __ lwz(kSavedValueRegister, source_operand);
- __ stw(kSavedValueRegister, destination_operand);
- __ lwz(kSavedValueRegister, source_high_operand);
- __ stw(kSavedValueRegister, destination_high_operand);
-#endif
- } else {
- __ lfd(kScratchDoubleReg, source_operand);
- __ stfd(kScratchDoubleReg, destination_operand);
- }
- }
- } else {
- UNREACHABLE();
- }
-
- moves_[index].Eliminate();
-}
-
-
-#undef __
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/ppc/lithium-gap-resolver-ppc.h b/deps/v8/src/crankshaft/ppc/lithium-gap-resolver-ppc.h
deleted file mode 100644
index 6eeea5eee5..0000000000
--- a/deps/v8/src/crankshaft/ppc/lithium-gap-resolver-ppc.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_PPC_LITHIUM_GAP_RESOLVER_PPC_H_
-#define V8_CRANKSHAFT_PPC_LITHIUM_GAP_RESOLVER_PPC_H_
-
-#include "src/crankshaft/lithium.h"
-
-namespace v8 {
-namespace internal {
-
-class LCodeGen;
-class LGapResolver;
-
-class LGapResolver final BASE_EMBEDDED {
- public:
- explicit LGapResolver(LCodeGen* owner);
-
- // Resolve a set of parallel moves, emitting assembler instructions.
- void Resolve(LParallelMove* parallel_move);
-
- private:
- // Build the initial list of moves.
- void BuildInitialMoveList(LParallelMove* parallel_move);
-
- // Perform the move at the moves_ index in question (possibly requiring
- // other moves to satisfy dependencies).
- void PerformMove(int index);
-
- // If a cycle is found in the series of moves, save the blocking value to
- // a scratch register. The cycle must be found by hitting the root of the
- // depth-first search.
- void BreakCycle(int index);
-
- // After a cycle has been resolved, restore the value from the scratch
- // register to its proper destination.
- void RestoreValue();
-
- // Emit a move and remove it from the move graph.
- void EmitMove(int index);
-
- // Verify the move list before performing moves.
- void Verify();
-
- LCodeGen* cgen_;
-
- // List of moves not yet resolved.
- ZoneList<LMoveOperands> moves_;
-
- int root_index_;
- bool in_cycle_;
- LOperand* saved_destination_;
-};
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_PPC_LITHIUM_GAP_RESOLVER_PPC_H_
diff --git a/deps/v8/src/crankshaft/ppc/lithium-ppc.cc b/deps/v8/src/crankshaft/ppc/lithium-ppc.cc
deleted file mode 100644
index 42ec45242f..0000000000
--- a/deps/v8/src/crankshaft/ppc/lithium-ppc.cc
+++ /dev/null
@@ -1,2368 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/ppc/lithium-ppc.h"
-
-#include <sstream>
-
-#include "src/crankshaft/hydrogen-osr.h"
-#include "src/crankshaft/lithium-inl.h"
-#include "src/crankshaft/ppc/lithium-codegen-ppc.h"
-
-namespace v8 {
-namespace internal {
-
-#define DEFINE_COMPILE(type) \
- void L##type::CompileToNative(LCodeGen* generator) { \
- generator->Do##type(this); \
- }
-LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
-#undef DEFINE_COMPILE
-
-#ifdef DEBUG
-void LInstruction::VerifyCall() {
- // Call instructions can use only fixed registers as temporaries and
- // outputs because all registers are blocked by the calling convention.
- // Inputs operands must use a fixed register or use-at-start policy or
- // a non-register policy.
- DCHECK(Output() == NULL || LUnallocated::cast(Output())->HasFixedPolicy() ||
- !LUnallocated::cast(Output())->HasRegisterPolicy());
- for (UseIterator it(this); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- DCHECK(operand->HasFixedPolicy() || operand->IsUsedAtStart());
- }
- for (TempIterator it(this); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- DCHECK(operand->HasFixedPolicy() || !operand->HasRegisterPolicy());
- }
-}
-#endif
-
-
-void LInstruction::PrintTo(StringStream* stream) {
- stream->Add("%s ", this->Mnemonic());
-
- PrintOutputOperandTo(stream);
-
- PrintDataTo(stream);
-
- if (HasEnvironment()) {
- stream->Add(" ");
- environment()->PrintTo(stream);
- }
-
- if (HasPointerMap()) {
- stream->Add(" ");
- pointer_map()->PrintTo(stream);
- }
-}
-
-
-void LInstruction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- for (int i = 0; i < InputCount(); i++) {
- if (i > 0) stream->Add(" ");
- if (InputAt(i) == NULL) {
- stream->Add("NULL");
- } else {
- InputAt(i)->PrintTo(stream);
- }
- }
-}
-
-
-void LInstruction::PrintOutputOperandTo(StringStream* stream) {
- if (HasResult()) result()->PrintTo(stream);
-}
-
-
-void LLabel::PrintDataTo(StringStream* stream) {
- LGap::PrintDataTo(stream);
- LLabel* rep = replacement();
- if (rep != NULL) {
- stream->Add(" Dead block replaced with B%d", rep->block_id());
- }
-}
-
-
-bool LGap::IsRedundant() const {
- for (int i = 0; i < 4; i++) {
- if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
- return false;
- }
- }
-
- return true;
-}
-
-
-void LGap::PrintDataTo(StringStream* stream) {
- for (int i = 0; i < 4; i++) {
- stream->Add("(");
- if (parallel_moves_[i] != NULL) {
- parallel_moves_[i]->PrintDataTo(stream);
- }
- stream->Add(") ");
- }
-}
-
-
-const char* LArithmeticD::Mnemonic() const {
- switch (op()) {
- case Token::ADD:
- return "add-d";
- case Token::SUB:
- return "sub-d";
- case Token::MUL:
- return "mul-d";
- case Token::DIV:
- return "div-d";
- case Token::MOD:
- return "mod-d";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-const char* LArithmeticT::Mnemonic() const {
- switch (op()) {
- case Token::ADD:
- return "add-t";
- case Token::SUB:
- return "sub-t";
- case Token::MUL:
- return "mul-t";
- case Token::MOD:
- return "mod-t";
- case Token::DIV:
- return "div-t";
- case Token::BIT_AND:
- return "bit-and-t";
- case Token::BIT_OR:
- return "bit-or-t";
- case Token::BIT_XOR:
- return "bit-xor-t";
- case Token::ROR:
- return "ror-t";
- case Token::SHL:
- return "shl-t";
- case Token::SAR:
- return "sar-t";
- case Token::SHR:
- return "shr-t";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-bool LGoto::HasInterestingComment(LCodeGen* gen) const {
- return !gen->IsNextEmittedBlock(block_id());
-}
-
-
-void LGoto::PrintDataTo(StringStream* stream) {
- stream->Add("B%d", block_id());
-}
-
-
-void LBranch::PrintDataTo(StringStream* stream) {
- stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
- value()->PrintTo(stream);
-}
-
-
-void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- left()->PrintTo(stream);
- stream->Add(" %s ", Token::String(op()));
- right()->PrintTo(stream);
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_string(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_smi(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_undetectable(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if string_compare(");
- left()->PrintTo(stream);
- right()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_instance_type(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if class_of_test(");
- value()->PrintTo(stream);
- stream->Add(", \"%o\") then B%d else B%d", *hydrogen()->class_name(),
- true_block_id(), false_block_id());
-}
-
-void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if typeof ");
- value()->PrintTo(stream);
- stream->Add(" == \"%s\" then B%d else B%d",
- hydrogen()->type_literal()->ToCString().get(), true_block_id(),
- false_block_id());
-}
-
-
-void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
- stream->Add(" = ");
- function()->PrintTo(stream);
- stream->Add(".code_entry = ");
- code_object()->PrintTo(stream);
-}
-
-
-void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
- stream->Add(" = ");
- base_object()->PrintTo(stream);
- stream->Add(" + ");
- offset()->PrintTo(stream);
-}
-
-
-void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
- for (int i = 0; i < InputCount(); i++) {
- InputAt(i)->PrintTo(stream);
- stream->Add(" ");
- }
- stream->Add("#%d / ", arity());
-}
-
-
-void LLoadContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add("[%d]", slot_index());
-}
-
-
-void LStoreContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add("[%d] <- ", slot_index());
- value()->PrintTo(stream);
-}
-
-
-void LInvokeFunction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- function()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
-void LCallNewArray::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
- ElementsKind kind = hydrogen()->elements_kind();
- stream->Add(" (%s) ", ElementsKindToString(kind));
-}
-
-
-void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
- arguments()->PrintTo(stream);
- stream->Add(" length ");
- length()->PrintTo(stream);
- stream->Add(" index ");
- index()->PrintTo(stream);
-}
-
-
-void LStoreNamedField::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- std::ostringstream os;
- os << hydrogen()->access() << " <- ";
- stream->Add(os.str().c_str());
- value()->PrintTo(stream);
-}
-
-
-void LLoadKeyed::PrintDataTo(StringStream* stream) {
- elements()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d]", base_offset());
- } else {
- stream->Add("]");
- }
-}
-
-
-void LStoreKeyed::PrintDataTo(StringStream* stream) {
- elements()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d] <-", base_offset());
- } else {
- stream->Add("] <- ");
- }
-
- if (value() == NULL) {
- DCHECK(hydrogen()->IsConstantHoleStore() &&
- hydrogen()->value()->representation().IsDouble());
- stream->Add("<the hole(nan)>");
- } else {
- value()->PrintTo(stream);
- }
-}
-
-
-void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(" %p -> %p", *original_map(), *transitioned_map());
-}
-
-
-int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
- // Skip a slot if for a double-width slot.
- if (kind == DOUBLE_REGISTERS) current_frame_slots_++;
- return current_frame_slots_++;
-}
-
-
-LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
- int index = GetNextSpillIndex(kind);
- if (kind == DOUBLE_REGISTERS) {
- return LDoubleStackSlot::Create(index, zone());
- } else {
- DCHECK(kind == GENERAL_REGISTERS);
- return LStackSlot::Create(index, zone());
- }
-}
-
-
-LPlatformChunk* LChunkBuilder::Build() {
- DCHECK(is_unused());
- chunk_ = new (zone()) LPlatformChunk(info(), graph());
- LPhase phase("L_Building chunk", chunk_);
- status_ = BUILDING;
-
- // If compiling for OSR, reserve space for the unoptimized frame,
- // which will be subsumed into this frame.
- if (graph()->has_osr()) {
- for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
- chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
- }
- }
-
- const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
- for (int i = 0; i < blocks->length(); i++) {
- HBasicBlock* next = NULL;
- if (i < blocks->length() - 1) next = blocks->at(i + 1);
- DoBasicBlock(blocks->at(i), next);
- if (is_aborted()) return NULL;
- }
- status_ = DONE;
- return chunk_;
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
- return new (zone())
- LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
-}
-
-
-LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
- return Use(value, ToUnallocated(fixed_register));
-}
-
-
-LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) {
- return Use(value, ToUnallocated(reg));
-}
-
-
-LOperand* LChunkBuilder::UseRegister(HValue* value) {
- return Use(value,
- new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
- return Use(value, new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
- LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
- return Use(value, new (zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value) {
- return Use(value, new (zone()) LUnallocated(LUnallocated::NONE));
-}
-
-
-LOperand* LChunkBuilder::UseAtStart(HValue* value) {
- return Use(value, new (zone())
- LUnallocated(LUnallocated::NONE, LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value);
-}
-
-
-LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegister(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegisterAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseConstant(HValue* value) {
- return chunk_->DefineConstantOperand(HConstant::cast(value));
-}
-
-
-LOperand* LChunkBuilder::UseAny(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value, new (zone()) LUnallocated(LUnallocated::ANY));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
- if (value->EmitAtUses()) {
- HInstruction* instr = HInstruction::cast(value);
- VisitInstruction(instr);
- }
- operand->set_virtual_register(value->id());
- return operand;
-}
-
-
-LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
- LUnallocated* result) {
- result->set_virtual_register(current_instruction_->id());
- instr->set_result(result);
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::DefineAsRegister(
- LTemplateResultInstruction<1>* instr) {
- return Define(instr,
- new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-LInstruction* LChunkBuilder::DefineAsSpilled(
- LTemplateResultInstruction<1>* instr, int index) {
- return Define(instr,
- new (zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
-}
-
-
-LInstruction* LChunkBuilder::DefineSameAsFirst(
- LTemplateResultInstruction<1>* instr) {
- return Define(instr,
- new (zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
-}
-
-
-LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr,
- Register reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-LInstruction* LChunkBuilder::DefineFixedDouble(
- LTemplateResultInstruction<1>* instr, DoubleRegister reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
- HEnvironment* hydrogen_env = current_block_->last_environment();
- return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
-}
-
-
-LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize) {
- info()->MarkAsNonDeferredCalling();
-#ifdef DEBUG
- instr->VerifyCall();
-#endif
- instr->MarkAsCall();
- instr = AssignPointerMap(instr);
-
- // If instruction does not have side-effects lazy deoptimization
- // after the call will try to deoptimize to the point before the call.
- // Thus we still need to attach environment to this call even if
- // call sequence can not deoptimize eagerly.
- bool needs_environment = (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
- !hinstr->HasObservableSideEffects();
- if (needs_environment && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- // We can't really figure out if the environment is needed or not.
- instr->environment()->set_has_been_used();
- }
-
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
- DCHECK(!instr->HasPointerMap());
- instr->set_pointer_map(new (zone()) LPointerMap(zone()));
- return instr;
-}
-
-
-LUnallocated* LChunkBuilder::TempRegister() {
- LUnallocated* operand =
- new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
- int vreg = allocator_->GetVirtualRegister();
- if (!allocator_->AllocationOk()) {
- Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
- vreg = 0;
- }
- operand->set_virtual_register(vreg);
- return operand;
-}
-
-
-LUnallocated* LChunkBuilder::TempDoubleRegister() {
- LUnallocated* operand =
- new (zone()) LUnallocated(LUnallocated::MUST_HAVE_DOUBLE_REGISTER);
- int vreg = allocator_->GetVirtualRegister();
- if (!allocator_->AllocationOk()) {
- Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
- vreg = 0;
- }
- operand->set_virtual_register(vreg);
- return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(Register reg) {
- LUnallocated* operand = ToUnallocated(reg);
- DCHECK(operand->HasFixedPolicy());
- return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
- LUnallocated* operand = ToUnallocated(reg);
- DCHECK(operand->HasFixedPolicy());
- return operand;
-}
-
-
-LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
- return new (zone()) LLabel(instr->block());
-}
-
-
-LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
- return DefineAsRegister(new (zone()) LDummyUse(UseAny(instr->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
- return AssignEnvironment(new (zone()) LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoShift(Token::Value op,
- HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->left());
-
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- int constant_value = 0;
- bool does_deopt = false;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
- // Left shifts can deoptimize if we shift by > 0 and the result cannot be
- // truncated to smi.
- if (instr->representation().IsSmi() && constant_value > 0) {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
- }
- } else {
- right = UseRegisterAtStart(right_value);
- }
-
- // Shift operations can only deoptimize if we do a logical shift
- // by 0 and the result cannot be truncated to int32.
- if (op == Token::SHR && constant_value == 0) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
- }
-
- LInstruction* result =
- DefineAsRegister(new (zone()) LShiftI(op, left, right, does_deopt));
- return does_deopt ? AssignEnvironment(result) : result;
- } else {
- return DoArithmeticT(op, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->left()->representation().IsDouble());
- DCHECK(instr->right()->representation().IsDouble());
- if (op == Token::MOD) {
- LOperand* left = UseFixedDouble(instr->left(), d1);
- LOperand* right = UseFixedDouble(instr->right(), d2);
- LArithmeticD* result = new (zone()) LArithmeticD(op, left, right);
- // We call a C function for double modulo. It can't trigger a GC. We need
- // to use fixed result register for the call.
- // TODO(fschneider): Allow any register as input registers.
- return MarkAsCall(DefineFixedDouble(result, d1), instr);
- } else {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LArithmeticD* result = new (zone()) LArithmeticD(op, left, right);
- return DefineAsRegister(result);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HBinaryOperation* instr) {
- HValue* left = instr->left();
- HValue* right = instr->right();
- DCHECK(left->representation().IsTagged());
- DCHECK(right->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* left_operand = UseFixed(left, r4);
- LOperand* right_operand = UseFixed(right, r3);
- LArithmeticT* result =
- new (zone()) LArithmeticT(op, context, left_operand, right_operand);
- return MarkAsCall(DefineFixed(result, r3), instr);
-}
-
-
-void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
- DCHECK(is_building());
- current_block_ = block;
- next_block_ = next_block;
- if (block->IsStartBlock()) {
- block->UpdateEnvironment(graph_->start_environment());
- argument_count_ = 0;
- } else if (block->predecessors()->length() == 1) {
- // We have a single predecessor => copy environment and outgoing
- // argument count from the predecessor.
- DCHECK(block->phis()->length() == 0);
- HBasicBlock* pred = block->predecessors()->at(0);
- HEnvironment* last_environment = pred->last_environment();
- DCHECK(last_environment != NULL);
- // Only copy the environment, if it is later used again.
- if (pred->end()->SecondSuccessor() == NULL) {
- DCHECK(pred->end()->FirstSuccessor() == block);
- } else {
- if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
- pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
- last_environment = last_environment->Copy();
- }
- }
- block->UpdateEnvironment(last_environment);
- DCHECK(pred->argument_count() >= 0);
- argument_count_ = pred->argument_count();
- } else {
- // We are at a state join => process phis.
- HBasicBlock* pred = block->predecessors()->at(0);
- // No need to copy the environment, it cannot be used later.
- HEnvironment* last_environment = pred->last_environment();
- for (int i = 0; i < block->phis()->length(); ++i) {
- HPhi* phi = block->phis()->at(i);
- if (phi->HasMergedIndex()) {
- last_environment->SetValueAt(phi->merged_index(), phi);
- }
- }
- for (int i = 0; i < block->deleted_phis()->length(); ++i) {
- if (block->deleted_phis()->at(i) < last_environment->length()) {
- last_environment->SetValueAt(block->deleted_phis()->at(i),
- graph_->GetConstantUndefined());
- }
- }
- block->UpdateEnvironment(last_environment);
- // Pick up the outgoing argument count of one of the predecessors.
- argument_count_ = pred->argument_count();
- }
- HInstruction* current = block->first();
- int start = chunk_->instructions()->length();
- while (current != NULL && !is_aborted()) {
- // Code for constants in registers is generated lazily.
- if (!current->EmitAtUses()) {
- VisitInstruction(current);
- }
- current = current->next();
- }
- int end = chunk_->instructions()->length() - 1;
- if (end >= start) {
- block->set_first_instruction_index(start);
- block->set_last_instruction_index(end);
- }
- block->set_argument_count(argument_count_);
- next_block_ = NULL;
- current_block_ = NULL;
-}
-
-
-void LChunkBuilder::VisitInstruction(HInstruction* current) {
- HInstruction* old_current = current_instruction_;
- current_instruction_ = current;
-
- LInstruction* instr = NULL;
- if (current->CanReplaceWithDummyUses()) {
- if (current->OperandCount() == 0) {
- instr = DefineAsRegister(new (zone()) LDummy());
- } else {
- DCHECK(!current->OperandAt(0)->IsControlInstruction());
- instr = DefineAsRegister(new (zone())
- LDummyUse(UseAny(current->OperandAt(0))));
- }
- for (int i = 1; i < current->OperandCount(); ++i) {
- if (current->OperandAt(i)->IsControlInstruction()) continue;
- LInstruction* dummy =
- new (zone()) LDummyUse(UseAny(current->OperandAt(i)));
- dummy->set_hydrogen_value(current);
- chunk_->AddInstruction(dummy, current_block_);
- }
- } else {
- HBasicBlock* successor;
- if (current->IsControlInstruction() &&
- HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
- successor != NULL) {
- instr = new (zone()) LGoto(successor);
- } else {
- instr = current->CompileToLithium(this);
- }
- }
-
- argument_count_ += current->argument_delta();
- DCHECK(argument_count_ >= 0);
-
- if (instr != NULL) {
- AddInstruction(instr, current);
- }
-
- current_instruction_ = old_current;
-}
-
-
-void LChunkBuilder::AddInstruction(LInstruction* instr,
- HInstruction* hydrogen_val) {
- // Associate the hydrogen instruction first, since we may need it for
- // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
- instr->set_hydrogen_value(hydrogen_val);
-
-#if DEBUG
- // Make sure that the lithium instruction has either no fixed register
- // constraints in temps or the result OR no uses that are only used at
- // start. If this invariant doesn't hold, the register allocator can decide
- // to insert a split of a range immediately before the instruction due to an
- // already allocated register needing to be used for the instruction's fixed
- // register constraint. In this case, The register allocator won't see an
- // interference between the split child and the use-at-start (it would if
- // the it was just a plain use), so it is free to move the split child into
- // the same register that is used for the use-at-start.
- // See https://code.google.com/p/chromium/issues/detail?id=201590
- if (!(instr->ClobbersRegisters() &&
- instr->ClobbersDoubleRegisters(isolate()))) {
- int fixed = 0;
- int used_at_start = 0;
- for (UseIterator it(instr); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- if (operand->IsUsedAtStart()) ++used_at_start;
- }
- if (instr->Output() != NULL) {
- if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
- }
- for (TempIterator it(instr); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- if (operand->HasFixedPolicy()) ++fixed;
- }
- DCHECK(fixed == 0 || used_at_start == 0);
- }
-#endif
-
- if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
- instr = AssignPointerMap(instr);
- }
- if (FLAG_stress_environments && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
- chunk_->AddInstruction(instr, current_block_);
-
- CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
-}
-
-
-LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
- LInstruction* result = new (zone()) LPrologue();
- if (info_->scope()->NeedsContext()) {
- result = MarkAsCall(result, instr);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- return new (zone()) LGoto(instr->FirstSuccessor());
-}
-
-
-LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* value = instr->value();
- Representation r = value->representation();
- HType type = value->type();
- ToBooleanHints expected = instr->expected_input_types();
- if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
-
- bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
- type.IsJSArray() || type.IsHeapNumber() || type.IsString();
- LInstruction* branch = new (zone()) LBranch(UseRegister(value));
- if (!easy_case && ((!(expected & ToBooleanHint::kSmallInteger) &&
- (expected & ToBooleanHint::kNeedsMap)) ||
- expected != ToBooleanHint::kAny)) {
- branch = AssignEnvironment(branch);
- }
- return branch;
-}
-
-
-LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
- return new (zone()) LDebugBreak();
-}
-
-
-LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- return new (zone()) LCmpMapAndBranch(value, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) {
- info()->MarkAsRequiresFrame();
- LOperand* value = UseRegister(instr->value());
- return DefineAsRegister(new (zone()) LArgumentsLength(value));
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
- info()->MarkAsRequiresFrame();
- return DefineAsRegister(new (zone()) LArgumentsElements);
-}
-
-
-LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
- HHasInPrototypeChainAndBranch* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* prototype = UseRegister(instr->prototype());
- LHasInPrototypeChainAndBranch* result =
- new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
- LOperand* receiver = UseRegisterAtStart(instr->receiver());
- LOperand* function = UseRegisterAtStart(instr->function());
- LWrapReceiver* result = new (zone()) LWrapReceiver(receiver, function);
- return AssignEnvironment(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
- LOperand* function = UseFixed(instr->function(), r4);
- LOperand* receiver = UseFixed(instr->receiver(), r3);
- LOperand* length = UseFixed(instr->length(), r5);
- LOperand* elements = UseFixed(instr->elements(), r6);
- LApplyArguments* result =
- new (zone()) LApplyArguments(function, receiver, length, elements);
- return MarkAsCall(DefineFixed(result, r3), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
- int argc = instr->OperandCount();
- for (int i = 0; i < argc; ++i) {
- LOperand* argument = Use(instr->argument(i));
- AddInstruction(new (zone()) LPushArgument(argument), instr);
- }
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreCodeEntry(
- HStoreCodeEntry* store_code_entry) {
- LOperand* function = UseRegister(store_code_entry->function());
- LOperand* code_object = UseTempRegister(store_code_entry->code_object());
- return new (zone()) LStoreCodeEntry(function, code_object);
-}
-
-
-LInstruction* LChunkBuilder::DoInnerAllocatedObject(
- HInnerAllocatedObject* instr) {
- LOperand* base_object = UseRegisterAtStart(instr->base_object());
- LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
- return DefineAsRegister(new (zone())
- LInnerAllocatedObject(base_object, offset));
-}
-
-
-LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
- return instr->HasNoUses() ? NULL
- : DefineAsRegister(new (zone()) LThisFunction);
-}
-
-
-LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- if (instr->HasNoUses()) return NULL;
-
- if (info()->IsStub()) {
- return DefineFixed(new (zone()) LContext, cp);
- }
-
- return DefineAsRegister(new (zone()) LContext);
-}
-
-
-LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(new (zone()) LDeclareGlobals(context), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallWithDescriptor(HCallWithDescriptor* instr) {
- CallInterfaceDescriptor descriptor = instr->descriptor();
- DCHECK_EQ(descriptor.GetParameterCount() +
- LCallWithDescriptor::kImplicitRegisterParameterCount,
- instr->OperandCount());
-
- LOperand* target = UseRegisterOrConstantAtStart(instr->target());
- ZoneList<LOperand*> ops(instr->OperandCount(), zone());
- // Target
- ops.Add(target, zone());
- // Context
- LOperand* op = UseFixed(instr->OperandAt(1), cp);
- ops.Add(op, zone());
- // Load register parameters.
- int i = 0;
- for (; i < descriptor.GetRegisterParameterCount(); i++) {
- op = UseFixed(instr->OperandAt(
- i + LCallWithDescriptor::kImplicitRegisterParameterCount),
- descriptor.GetRegisterParameter(i));
- ops.Add(op, zone());
- }
- // Push stack parameters.
- for (; i < descriptor.GetParameterCount(); i++) {
- op = UseAny(instr->OperandAt(
- i + LCallWithDescriptor::kImplicitRegisterParameterCount));
- AddInstruction(new (zone()) LPushArgument(op), instr);
- }
-
- LCallWithDescriptor* result =
- new (zone()) LCallWithDescriptor(descriptor, ops, zone());
- if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
- result->MarkAsSyntacticTailCall();
- }
- return MarkAsCall(DefineFixed(result, r3), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* function = UseFixed(instr->function(), r4);
- LInvokeFunction* result = new (zone()) LInvokeFunction(context, function);
- if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
- result->MarkAsSyntacticTailCall();
- }
- return MarkAsCall(DefineFixed(result, r3), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
- switch (instr->op()) {
- case kMathFloor:
- return DoMathFloor(instr);
- case kMathRound:
- return DoMathRound(instr);
- case kMathFround:
- return DoMathFround(instr);
- case kMathAbs:
- return DoMathAbs(instr);
- case kMathLog:
- return DoMathLog(instr);
- case kMathCos:
- return DoMathCos(instr);
- case kMathSin:
- return DoMathSin(instr);
- case kMathExp:
- return DoMathExp(instr);
- case kMathSqrt:
- return DoMathSqrt(instr);
- case kMathPowHalf:
- return DoMathPowHalf(instr);
- case kMathClz32:
- return DoMathClz32(instr);
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseRegister(instr->value());
- if (instr->representation().IsInteger32()) {
- LMathFloorI* result = new (zone()) LMathFloorI(input);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- } else {
- DCHECK(instr->representation().IsDouble());
- LMathFloorD* result = new (zone()) LMathFloorD(input);
- return DefineAsRegister(result);
- }
-}
-
-LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseRegister(instr->value());
- if (instr->representation().IsInteger32()) {
- LOperand* temp = TempDoubleRegister();
- LMathRoundI* result = new (zone()) LMathRoundI(input, temp);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- } else {
- DCHECK(instr->representation().IsDouble());
- LMathRoundD* result = new (zone()) LMathRoundD(input);
- return DefineAsRegister(result);
- }
-}
-
-LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
- LOperand* input = UseRegister(instr->value());
- LMathFround* result = new (zone()) LMathFround(input);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
- Representation r = instr->value()->representation();
- LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32())
- ? NULL
- : UseFixed(instr->context(), cp);
- LOperand* input = UseRegister(instr->value());
- LInstruction* result =
- DefineAsRegister(new (zone()) LMathAbs(context, input));
- if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
- if (!r.IsDouble()) result = AssignEnvironment(result);
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseFixedDouble(instr->value(), d0);
- return MarkAsCall(DefineFixedDouble(new (zone()) LMathLog(input), d0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- LMathClz32* result = new (zone()) LMathClz32(input);
- return DefineAsRegister(result);
-}
-
-LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseFixedDouble(instr->value(), d0);
- return MarkAsCall(DefineFixedDouble(new (zone()) LMathCos(input), d0), instr);
-}
-
-LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseFixedDouble(instr->value(), d0);
- return MarkAsCall(DefineFixedDouble(new (zone()) LMathSin(input), d0), instr);
-}
-
-LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseFixedDouble(instr->value(), d0);
- return MarkAsCall(DefineFixedDouble(new (zone()) LMathExp(input), d0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- LMathSqrt* result = new (zone()) LMathSqrt(input);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- LMathPowHalf* result = new (zone()) LMathPowHalf(input);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* constructor = UseFixed(instr->constructor(), r4);
- LCallNewArray* result = new (zone()) LCallNewArray(context, constructor);
- return MarkAsCall(DefineFixed(result, r3), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(DefineFixed(new (zone()) LCallRuntime(context), r3), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoRor(HRor* instr) {
- return DoShift(Token::ROR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShr(HShr* instr) {
- return DoShift(Token::SHR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoSar(HSar* instr) {
- return DoShift(Token::SAR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShl(HShl* instr) {
- return DoShift(Token::SHL, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32));
-
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
- return DefineAsRegister(new (zone()) LBitI(left, right));
- } else {
- return DoArithmeticT(instr->op(), instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result =
- DefineAsRegister(new (zone()) LDivByPowerOf2I(dividend, divisor));
- if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
- (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
- (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
- divisor != 1 && divisor != -1)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
- DCHECK(instr->representation().IsInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result =
- DefineAsRegister(new (zone()) LDivByConstI(dividend, divisor));
- if (divisor == 0 ||
- (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
- !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(instr->right());
- LInstruction* result =
- DefineAsRegister(new (zone()) LDivI(dividend, divisor));
- if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- (instr->CheckFlag(HValue::kCanOverflow) &&
- !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) ||
- (!instr->IsMathFloorOfDiv() &&
- !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- if (instr->RightIsPowerOf2()) {
- return DoDivByPowerOf2I(instr);
- } else if (instr->right()->IsConstant()) {
- return DoDivByConstI(instr);
- } else {
- return DoDivI(instr);
- }
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else {
- return DoArithmeticT(Token::DIV, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
- LOperand* dividend = UseRegisterAtStart(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result =
- DefineAsRegister(new (zone()) LFlooringDivByPowerOf2I(dividend, divisor));
- if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
- (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
- DCHECK(instr->representation().IsInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LOperand* temp =
- ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
- (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive)))
- ? NULL
- : TempRegister();
- LInstruction* result = DefineAsRegister(
- new (zone()) LFlooringDivByConstI(dividend, divisor, temp));
- if (divisor == 0 ||
- (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(instr->right());
- LInstruction* result =
- DefineAsRegister(new (zone()) LFlooringDivI(dividend, divisor));
- if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- (instr->CheckFlag(HValue::kCanOverflow) &&
- !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- if (instr->RightIsPowerOf2()) {
- return DoFlooringDivByPowerOf2I(instr);
- } else if (instr->right()->IsConstant()) {
- return DoFlooringDivByConstI(instr);
- } else {
- return DoFlooringDivI(instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegisterAtStart(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result =
- DefineSameAsFirst(new (zone()) LModByPowerOf2I(dividend, divisor));
- if (instr->CheckFlag(HValue::kLeftCanBeNegative) &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result =
- DefineAsRegister(new (zone()) LModByConstI(dividend, divisor));
- if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoModI(HMod* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(instr->right());
- LInstruction* result =
- DefineAsRegister(new (zone()) LModI(dividend, divisor));
- if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- if (instr->RightIsPowerOf2()) {
- return DoModByPowerOf2I(instr);
- } else if (instr->right()->IsConstant()) {
- return DoModByConstI(instr);
- } else {
- return DoModI(instr);
- }
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::MOD, instr);
- } else {
- return DoArithmeticT(Token::MOD, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMul(HMul* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- HValue* left = instr->BetterLeftOperand();
- HValue* right = instr->BetterRightOperand();
- LOperand* left_op;
- LOperand* right_op;
- bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
- bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
-
- int32_t constant_value = 0;
- if (right->IsConstant()) {
- HConstant* constant = HConstant::cast(right);
- constant_value = constant->Integer32Value();
- // Constants -1, 0 and 1 can be optimized if the result can overflow.
- // For other constants, it can be optimized only without overflow.
- if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) {
- left_op = UseRegisterAtStart(left);
- right_op = UseConstant(right);
- } else {
- if (bailout_on_minus_zero) {
- left_op = UseRegister(left);
- } else {
- left_op = UseRegisterAtStart(left);
- }
- right_op = UseRegister(right);
- }
- } else {
- if (bailout_on_minus_zero) {
- left_op = UseRegister(left);
- } else {
- left_op = UseRegisterAtStart(left);
- }
- right_op = UseRegister(right);
- }
- LMulI* mul = new (zone()) LMulI(left_op, right_op);
- if (right_op->IsConstantOperand()
- ? ((can_overflow && constant_value == -1) ||
- (bailout_on_minus_zero && constant_value <= 0))
- : (can_overflow || bailout_on_minus_zero)) {
- AssignEnvironment(mul);
- }
- return DefineAsRegister(mul);
-
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::MUL, instr);
- } else {
- return DoArithmeticT(Token::MUL, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoSub(HSub* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
-
- if (instr->left()->IsConstant() &&
- !instr->CheckFlag(HValue::kCanOverflow)) {
- // If lhs is constant, do reverse subtraction instead.
- return DoRSub(instr);
- }
-
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- LSubI* sub = new (zone()) LSubI(left, right);
- LInstruction* result = DefineAsRegister(sub);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::SUB, instr);
- } else {
- return DoArithmeticT(Token::SUB, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoRSub(HSub* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
-
- // Note: The lhs of the subtraction becomes the rhs of the
- // reverse-subtraction.
- LOperand* left = UseRegisterAtStart(instr->right());
- LOperand* right = UseOrConstantAtStart(instr->left());
- LRSubI* rsb = new (zone()) LRSubI(left, right);
- LInstruction* result = DefineAsRegister(rsb);
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) {
- LOperand* multiplier_op = UseRegisterAtStart(mul->left());
- LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
- LOperand* addend_op = UseRegisterAtStart(addend);
- return DefineSameAsFirst(
- new (zone()) LMultiplyAddD(addend_op, multiplier_op, multiplicand_op));
-}
-
-
-LInstruction* LChunkBuilder::DoMultiplySub(HValue* minuend, HMul* mul) {
- LOperand* minuend_op = UseRegisterAtStart(minuend);
- LOperand* multiplier_op = UseRegisterAtStart(mul->left());
- LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
-
- return DefineSameAsFirst(
- new (zone()) LMultiplySubD(minuend_op, multiplier_op, multiplicand_op));
-}
-
-
-LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
- LAddI* add = new (zone()) LAddI(left, right);
- LInstruction* result = DefineAsRegister(add);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsExternal()) {
- DCHECK(instr->IsConsistentExternalRepresentation());
- DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- LAddI* add = new (zone()) LAddI(left, right);
- LInstruction* result = DefineAsRegister(add);
- return result;
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::ADD, instr);
- } else {
- return DoArithmeticT(Token::ADD, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
- LOperand* left = NULL;
- LOperand* right = NULL;
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- left = UseRegisterAtStart(instr->BetterLeftOperand());
- right = UseOrConstantAtStart(instr->BetterRightOperand());
- } else {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->left()->representation().IsDouble());
- DCHECK(instr->right()->representation().IsDouble());
- left = UseRegisterAtStart(instr->left());
- right = UseRegisterAtStart(instr->right());
- }
- return DefineAsRegister(new (zone()) LMathMinMax(left, right));
-}
-
-
-LInstruction* LChunkBuilder::DoPower(HPower* instr) {
- DCHECK(instr->representation().IsDouble());
- // We call a C function for double power. It can't trigger a GC.
- // We need to use fixed result register for the call.
- Representation exponent_type = instr->right()->representation();
- DCHECK(instr->left()->representation().IsDouble());
- LOperand* left = UseFixedDouble(instr->left(), d1);
- LOperand* right =
- exponent_type.IsDouble()
- ? UseFixedDouble(instr->right(), d2)
- : UseFixed(instr->right(), MathPowTaggedDescriptor::exponent());
- LPower* result = new (zone()) LPower(left, right);
- return MarkAsCall(DefineFixedDouble(result, d3), instr,
- CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
- DCHECK(instr->left()->representation().IsTagged());
- DCHECK(instr->right()->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* left = UseFixed(instr->left(), r4);
- LOperand* right = UseFixed(instr->right(), r3);
- LCmpT* result = new (zone()) LCmpT(context, left, right);
- return MarkAsCall(DefineFixed(result, r3), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
- HCompareNumericAndBranch* instr) {
- Representation r = instr->representation();
- if (r.IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(r));
- DCHECK(instr->right()->representation().Equals(r));
- LOperand* left = UseRegisterOrConstantAtStart(instr->left());
- LOperand* right = UseRegisterOrConstantAtStart(instr->right());
- return new (zone()) LCompareNumericAndBranch(left, right);
- } else {
- DCHECK(r.IsDouble());
- DCHECK(instr->left()->representation().IsDouble());
- DCHECK(instr->right()->representation().IsDouble());
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- return new (zone()) LCompareNumericAndBranch(left, right);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
- HCompareObjectEqAndBranch* instr) {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- return new (zone()) LCmpObjectEqAndBranch(left, right);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
- HCompareHoleAndBranch* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return new (zone()) LCmpHoleAndBranch(value);
-}
-
-
-LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- return new (zone()) LIsStringAndBranch(value, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- return new (zone()) LIsSmiAndBranch(Use(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
- HIsUndetectableAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return new (zone()) LIsUndetectableAndBranch(value, TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoStringCompareAndBranch(
- HStringCompareAndBranch* instr) {
- DCHECK(instr->left()->representation().IsTagged());
- DCHECK(instr->right()->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* left = UseFixed(instr->left(), r4);
- LOperand* right = UseFixed(instr->right(), r3);
- LStringCompareAndBranch* result =
- new (zone()) LStringCompareAndBranch(context, left, right);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
- HHasInstanceTypeAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return new (zone()) LHasInstanceTypeAndBranch(value);
-}
-
-LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
- HClassOfTestAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegister(instr->value());
- return new (zone()) LClassOfTestAndBranch(value, TempRegister());
-}
-
-LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
- LOperand* string = UseRegisterAtStart(instr->string());
- LOperand* index = UseRegisterOrConstantAtStart(instr->index());
- return DefineAsRegister(new (zone()) LSeqStringGetChar(string, index));
-}
-
-
-LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
- LOperand* string = UseRegisterAtStart(instr->string());
- LOperand* index = FLAG_debug_code
- ? UseRegisterAtStart(instr->index())
- : UseRegisterOrConstantAtStart(instr->index());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL;
- return new (zone()) LSeqStringSetChar(context, string, index, value);
-}
-
-
-LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- if (!FLAG_debug_code && instr->skip_check()) return NULL;
- LOperand* index = UseRegisterOrConstantAtStart(instr->index());
- LOperand* length = !index->IsConstantOperand()
- ? UseRegisterOrConstantAtStart(instr->length())
- : UseRegisterAtStart(instr->length());
- LInstruction* result = new (zone()) LBoundsCheck(index, length);
- if (!FLAG_debug_code || !instr->skip_check()) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
- // The control instruction marking the end of a block that completed
- // abruptly (e.g., threw an exception). There is nothing specific to do.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) { return NULL; }
-
-
-LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
- // All HForceRepresentation instructions should be eliminated in the
- // representation change phase of Hydrogen.
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoChange(HChange* instr) {
- Representation from = instr->from();
- Representation to = instr->to();
- HValue* val = instr->value();
- if (from.IsSmi()) {
- if (to.IsTagged()) {
- LOperand* value = UseRegister(val);
- return DefineSameAsFirst(new (zone()) LDummyUse(value));
- }
- from = Representation::Tagged();
- }
- if (from.IsTagged()) {
- if (to.IsDouble()) {
- LOperand* value = UseRegister(val);
- LInstruction* result =
- DefineAsRegister(new (zone()) LNumberUntagD(value));
- if (!val->representation().IsSmi()) result = AssignEnvironment(result);
- return result;
- } else if (to.IsSmi()) {
- LOperand* value = UseRegister(val);
- if (val->type().IsSmi()) {
- return DefineSameAsFirst(new (zone()) LDummyUse(value));
- }
- return AssignEnvironment(
- DefineSameAsFirst(new (zone()) LCheckSmi(value)));
- } else {
- DCHECK(to.IsInteger32());
- if (val->type().IsSmi() || val->representation().IsSmi()) {
- LOperand* value = UseRegisterAtStart(val);
- return DefineAsRegister(new (zone()) LSmiUntag(value, false));
- } else {
- LOperand* value = UseRegister(val);
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempDoubleRegister();
- LInstruction* result =
- DefineSameAsFirst(new (zone()) LTaggedToI(value, temp1, temp2));
- if (!val->representation().IsSmi()) result = AssignEnvironment(result);
- return result;
- }
- }
- } else if (from.IsDouble()) {
- if (to.IsTagged()) {
- info()->MarkAsDeferredCalling();
- LOperand* value = UseRegister(val);
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LUnallocated* result_temp = TempRegister();
- LNumberTagD* result = new (zone()) LNumberTagD(value, temp1, temp2);
- return AssignPointerMap(Define(result, result_temp));
- } else if (to.IsSmi()) {
- LOperand* value = UseRegister(val);
- return AssignEnvironment(
- DefineAsRegister(new (zone()) LDoubleToSmi(value)));
- } else {
- DCHECK(to.IsInteger32());
- LOperand* value = UseRegister(val);
- LInstruction* result = DefineAsRegister(new (zone()) LDoubleToI(value));
- if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result);
- return result;
- }
- } else if (from.IsInteger32()) {
- info()->MarkAsDeferredCalling();
- if (to.IsTagged()) {
- if (!instr->CheckFlag(HValue::kCanOverflow)) {
- LOperand* value = UseRegisterAtStart(val);
- return DefineAsRegister(new (zone()) LSmiTag(value));
- } else if (val->CheckFlag(HInstruction::kUint32)) {
- LOperand* value = UseRegisterAtStart(val);
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LNumberTagU* result = new (zone()) LNumberTagU(value, temp1, temp2);
- return AssignPointerMap(DefineAsRegister(result));
- } else {
- LOperand* value = UseRegisterAtStart(val);
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LNumberTagI* result = new (zone()) LNumberTagI(value, temp1, temp2);
- return AssignPointerMap(DefineAsRegister(result));
- }
- } else if (to.IsSmi()) {
- LOperand* value = UseRegister(val);
- LInstruction* result = DefineAsRegister(new (zone()) LSmiTag(value));
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else {
- DCHECK(to.IsDouble());
- if (val->CheckFlag(HInstruction::kUint32)) {
- return DefineAsRegister(new (zone()) LUint32ToDouble(UseRegister(val)));
- } else {
- return DefineAsRegister(new (zone()) LInteger32ToDouble(Use(val)));
- }
- }
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* result = new (zone()) LCheckNonSmi(value);
- if (!instr->value()->type().IsHeapObject()) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new (zone()) LCheckSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckArrayBufferNotNeutered(
- HCheckArrayBufferNotNeutered* instr) {
- LOperand* view = UseRegisterAtStart(instr->value());
- LCheckArrayBufferNotNeutered* result =
- new (zone()) LCheckArrayBufferNotNeutered(view);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* result = new (zone()) LCheckInstanceType(value);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new (zone()) LCheckValue(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
- if (instr->IsStabilityCheck()) return new (zone()) LCheckMaps;
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- LInstruction* result =
- AssignEnvironment(new (zone()) LCheckMaps(value, temp));
- if (instr->HasMigrationTarget()) {
- info()->MarkAsDeferredCalling();
- result = AssignPointerMap(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
- HValue* value = instr->value();
- Representation input_rep = value->representation();
- LOperand* reg = UseRegister(value);
- if (input_rep.IsDouble()) {
- return DefineAsRegister(new (zone()) LClampDToUint8(reg));
- } else if (input_rep.IsInteger32()) {
- return DefineAsRegister(new (zone()) LClampIToUint8(reg));
- } else {
- DCHECK(input_rep.IsSmiOrTagged());
- LClampTToUint8* result =
- new (zone()) LClampTToUint8(reg, TempDoubleRegister());
- return AssignEnvironment(DefineAsRegister(result));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
- LOperand* context = info()->IsStub() ? UseFixed(instr->context(), cp) : NULL;
- LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
- return new (zone())
- LReturn(UseFixed(instr->value(), r3), context, parameter_count);
-}
-
-
-LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
- Representation r = instr->representation();
- if (r.IsSmi()) {
- return DefineAsRegister(new (zone()) LConstantS);
- } else if (r.IsInteger32()) {
- return DefineAsRegister(new (zone()) LConstantI);
- } else if (r.IsDouble()) {
- return DefineAsRegister(new (zone()) LConstantD);
- } else if (r.IsExternal()) {
- return DefineAsRegister(new (zone()) LConstantE);
- } else if (r.IsTagged()) {
- return DefineAsRegister(new (zone()) LConstantT);
- } else {
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- LInstruction* result =
- DefineAsRegister(new (zone()) LLoadContextSlot(context));
- if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
- LOperand* context;
- LOperand* value;
- if (instr->NeedsWriteBarrier()) {
- context = UseTempRegister(instr->context());
- value = UseTempRegister(instr->value());
- } else {
- context = UseRegister(instr->context());
- value = UseRegister(instr->value());
- }
- LInstruction* result = new (zone()) LStoreContextSlot(context, value);
- if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- LOperand* obj = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new (zone()) LLoadNamedField(obj));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
- HLoadFunctionPrototype* instr) {
- return AssignEnvironment(DefineAsRegister(
- new (zone()) LLoadFunctionPrototype(UseRegister(instr->function()))));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
- return DefineAsRegister(new (zone()) LLoadRoot);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
- DCHECK(instr->key()->representation().IsSmiOrInteger32());
- ElementsKind elements_kind = instr->elements_kind();
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LInstruction* result = NULL;
-
- if (!instr->is_fixed_typed_array()) {
- LOperand* obj = NULL;
- if (instr->representation().IsDouble()) {
- obj = UseRegister(instr->elements());
- } else {
- obj = UseRegisterAtStart(instr->elements());
- }
- result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr));
- } else {
- DCHECK((instr->representation().IsInteger32() &&
- !IsDoubleOrFloatElementsKind(elements_kind)) ||
- (instr->representation().IsDouble() &&
- IsDoubleOrFloatElementsKind(elements_kind)));
- LOperand* backing_store = UseRegister(instr->elements());
- LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
- result = DefineAsRegister(
- new (zone()) LLoadKeyed(backing_store, key, backing_store_owner));
- }
-
- bool needs_environment;
- if (instr->is_fixed_typed_array()) {
- // see LCodeGen::DoLoadKeyedExternalArray
- needs_environment = elements_kind == UINT32_ELEMENTS &&
- !instr->CheckFlag(HInstruction::kUint32);
- } else {
- // see LCodeGen::DoLoadKeyedFixedDoubleArray and
- // LCodeGen::DoLoadKeyedFixedArray
- needs_environment =
- instr->RequiresHoleCheck() ||
- (instr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED && info()->IsStub());
- }
-
- if (needs_environment) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- if (!instr->is_fixed_typed_array()) {
- DCHECK(instr->elements()->representation().IsTagged());
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- LOperand* object = NULL;
- LOperand* key = NULL;
- LOperand* val = NULL;
-
- if (instr->value()->representation().IsDouble()) {
- object = UseRegisterAtStart(instr->elements());
- val = UseRegister(instr->value());
- key = UseRegisterOrConstantAtStart(instr->key());
- } else {
- if (needs_write_barrier) {
- object = UseTempRegister(instr->elements());
- val = UseTempRegister(instr->value());
- key = UseTempRegister(instr->key());
- } else {
- object = UseRegisterAtStart(instr->elements());
- val = UseRegisterAtStart(instr->value());
- key = UseRegisterOrConstantAtStart(instr->key());
- }
- }
-
- return new (zone()) LStoreKeyed(object, key, val, nullptr);
- }
-
- DCHECK((instr->value()->representation().IsInteger32() &&
- !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
- (instr->value()->representation().IsDouble() &&
- IsDoubleOrFloatElementsKind(instr->elements_kind())));
- DCHECK(instr->elements()->representation().IsExternal());
- LOperand* val = UseRegister(instr->value());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LOperand* backing_store = UseRegister(instr->elements());
- LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
- return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner);
-}
-
-
-LInstruction* LChunkBuilder::DoTransitionElementsKind(
- HTransitionElementsKind* instr) {
- if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
- LOperand* object = UseRegister(instr->object());
- LOperand* new_map_reg = TempRegister();
- LTransitionElementsKind* result =
- new (zone()) LTransitionElementsKind(object, NULL, new_map_reg);
- return result;
- } else {
- LOperand* object = UseFixed(instr->object(), r3);
- LOperand* context = UseFixed(instr->context(), cp);
- LTransitionElementsKind* result =
- new (zone()) LTransitionElementsKind(object, context, NULL);
- return MarkAsCall(result, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoTrapAllocationMemento(
- HTrapAllocationMemento* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LTrapAllocationMemento* result =
- new (zone()) LTrapAllocationMemento(object, temp1, temp2);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) {
- info()->MarkAsDeferredCalling();
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object = Use(instr->object());
- LOperand* elements = Use(instr->elements());
- LOperand* key = UseRegisterOrConstant(instr->key());
- LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity());
-
- LMaybeGrowElements* result = new (zone())
- LMaybeGrowElements(context, object, elements, key, current_capacity);
- DefineFixed(result, r3);
- return AssignPointerMap(AssignEnvironment(result));
-}
-
-
-LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
- bool is_in_object = instr->access().IsInobject();
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- bool needs_write_barrier_for_map =
- instr->has_transition() && instr->NeedsWriteBarrierForMap();
-
- LOperand* obj;
- if (needs_write_barrier) {
- obj = is_in_object ? UseRegister(instr->object())
- : UseTempRegister(instr->object());
- } else {
- obj = needs_write_barrier_for_map ? UseRegister(instr->object())
- : UseRegisterAtStart(instr->object());
- }
-
- LOperand* val;
- if (needs_write_barrier) {
- val = UseTempRegister(instr->value());
- } else if (instr->field_representation().IsDouble()) {
- val = UseRegisterAtStart(instr->value());
- } else {
- val = UseRegister(instr->value());
- }
-
- // We need a temporary register for write barrier of the map field.
- LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
-
- return new (zone()) LStoreNamedField(obj, val, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* left = UseFixed(instr->left(), r4);
- LOperand* right = UseFixed(instr->right(), r3);
- return MarkAsCall(
- DefineFixed(new (zone()) LStringAdd(context, left, right), r3), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
- LOperand* string = UseTempRegister(instr->string());
- LOperand* index = UseTempRegister(instr->index());
- LOperand* context = UseAny(instr->context());
- LStringCharCodeAt* result =
- new (zone()) LStringCharCodeAt(context, string, index);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
- LOperand* char_code = UseRegister(instr->value());
- LOperand* context = UseAny(instr->context());
- LStringCharFromCode* result =
- new (zone()) LStringCharFromCode(context, char_code);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
- LOperand* size = UseRegisterOrConstant(instr->size());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- if (instr->IsAllocationFolded()) {
- LFastAllocate* result = new (zone()) LFastAllocate(size, temp1, temp2);
- return DefineAsRegister(result);
- } else {
- info()->MarkAsDeferredCalling();
- LOperand* context = UseAny(instr->context());
- LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2);
- return AssignPointerMap(DefineAsRegister(result));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
- DCHECK(argument_count_ == 0);
- allocator_->MarkAsOsrEntry();
- current_block_->last_environment()->set_ast_id(instr->ast_id());
- return AssignEnvironment(new (zone()) LOsrEntry);
-}
-
-
-LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- LParameter* result = new (zone()) LParameter;
- if (instr->kind() == HParameter::STACK_PARAMETER) {
- int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(result, spill_index);
- } else {
- DCHECK(info()->IsStub());
- CallInterfaceDescriptor descriptor = graph()->descriptor();
- int index = static_cast<int>(instr->index());
- Register reg = descriptor.GetRegisterParameter(index);
- return DefineFixed(result, reg);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- // Use an index that corresponds to the location in the unoptimized frame,
- // which the optimized frame will subsume.
- int env_index = instr->index();
- int spill_index = 0;
- if (instr->environment()->is_parameter_index(env_index)) {
- spill_index = chunk()->GetParameterStackSlot(env_index);
- } else {
- spill_index = env_index - instr->environment()->first_local_index();
- if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
- Retry(kTooManySpillSlotsNeededForOSR);
- spill_index = 0;
- }
- spill_index += StandardFrameConstants::kFixedSlotCount;
- }
- return DefineAsSpilled(new (zone()) LUnknownOSRValue, spill_index);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
- // There are no real uses of the arguments object.
- // arguments.length and element access are supported directly on
- // stack arguments, and any real arguments object use causes a bailout.
- // So this value is never used.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
- instr->ReplayEnvironment(current_block_->last_environment());
-
- // There are no real uses of a captured object.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
- info()->MarkAsRequiresFrame();
- LOperand* args = UseRegister(instr->arguments());
- LOperand* length = UseRegisterOrConstantAtStart(instr->length());
- LOperand* index = UseRegisterOrConstantAtStart(instr->index());
- return DefineAsRegister(new (zone()) LAccessArgumentsAt(args, length, index));
-}
-
-
-LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* value = UseFixed(instr->value(), r6);
- LTypeof* result = new (zone()) LTypeof(context, value);
- return MarkAsCall(DefineFixed(result, r3), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
- return new (zone()) LTypeofIsAndBranch(UseRegister(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
- instr->ReplayEnvironment(current_block_->last_environment());
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
- if (instr->is_function_entry()) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(new (zone()) LStackCheck(context), instr);
- } else {
- DCHECK(instr->is_backwards_branch());
- LOperand* context = UseAny(instr->context());
- return AssignEnvironment(
- AssignPointerMap(new (zone()) LStackCheck(context)));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
- HEnvironment* outer = current_block_->last_environment();
- outer->set_ast_id(instr->ReturnId());
- HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner = outer->CopyForInlining(
- instr->closure(), instr->arguments_count(), instr->function(), undefined,
- instr->inlining_kind(), instr->syntactic_tail_call_mode());
- // Only replay binding of arguments object if it wasn't removed from graph.
- if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
- inner->Bind(instr->arguments_var(), instr->arguments_object());
- }
- inner->BindContext(instr->closure_context());
- inner->set_entry(instr);
- current_block_->UpdateEnvironment(inner);
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
- LInstruction* pop = NULL;
-
- HEnvironment* env = current_block_->last_environment();
-
- if (env->entry()->arguments_pushed()) {
- int argument_count = env->arguments_environment()->parameter_count();
- pop = new (zone()) LDrop(argument_count);
- DCHECK(instr->argument_delta() == -argument_count);
- }
-
- HEnvironment* outer =
- current_block_->last_environment()->DiscardInlined(false);
- current_block_->UpdateEnvironment(outer);
-
- return pop;
-}
-
-
-LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object = UseFixed(instr->enumerable(), r3);
- LForInPrepareMap* result = new (zone()) LForInPrepareMap(context, object);
- return MarkAsCall(DefineFixed(result, r3), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
- LOperand* map = UseRegister(instr->map());
- return AssignEnvironment(
- DefineAsRegister(new (zone()) LForInCacheArray(map)));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* map = UseRegisterAtStart(instr->map());
- return AssignEnvironment(new (zone()) LCheckMapValue(value, map));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* index = UseTempRegister(instr->index());
- LLoadFieldByIndex* load = new (zone()) LLoadFieldByIndex(object, index);
- LInstruction* result = DefineSameAsFirst(load);
- return AssignPointerMap(result);
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/ppc/lithium-ppc.h b/deps/v8/src/crankshaft/ppc/lithium-ppc.h
deleted file mode 100644
index a504c67e48..0000000000
--- a/deps/v8/src/crankshaft/ppc/lithium-ppc.h
+++ /dev/null
@@ -1,2415 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_PPC_LITHIUM_PPC_H_
-#define V8_CRANKSHAFT_PPC_LITHIUM_PPC_H_
-
-#include "src/crankshaft/hydrogen.h"
-#include "src/crankshaft/lithium.h"
-#include "src/crankshaft/lithium-allocator.h"
-#include "src/safepoint-table.h"
-#include "src/utils.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LCodeGen;
-
-#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
- V(AccessArgumentsAt) \
- V(AddI) \
- V(Allocate) \
- V(ApplyArguments) \
- V(ArgumentsElements) \
- V(ArgumentsLength) \
- V(ArithmeticD) \
- V(ArithmeticT) \
- V(BitI) \
- V(BoundsCheck) \
- V(Branch) \
- V(CallWithDescriptor) \
- V(CallNewArray) \
- V(CallRuntime) \
- V(CheckArrayBufferNotNeutered) \
- V(CheckInstanceType) \
- V(CheckNonSmi) \
- V(CheckMaps) \
- V(CheckMapValue) \
- V(CheckSmi) \
- V(CheckValue) \
- V(ClampDToUint8) \
- V(ClampIToUint8) \
- V(ClampTToUint8) \
- V(ClassOfTestAndBranch) \
- V(CompareNumericAndBranch) \
- V(CmpObjectEqAndBranch) \
- V(CmpHoleAndBranch) \
- V(CmpMapAndBranch) \
- V(CmpT) \
- V(ConstantD) \
- V(ConstantE) \
- V(ConstantI) \
- V(ConstantS) \
- V(ConstantT) \
- V(Context) \
- V(DebugBreak) \
- V(DeclareGlobals) \
- V(Deoptimize) \
- V(DivByConstI) \
- V(DivByPowerOf2I) \
- V(DivI) \
- V(DoubleToI) \
- V(DoubleToSmi) \
- V(Drop) \
- V(Dummy) \
- V(DummyUse) \
- V(FastAllocate) \
- V(FlooringDivByConstI) \
- V(FlooringDivByPowerOf2I) \
- V(FlooringDivI) \
- V(ForInCacheArray) \
- V(ForInPrepareMap) \
- V(Goto) \
- V(HasInPrototypeChainAndBranch) \
- V(HasInstanceTypeAndBranch) \
- V(InnerAllocatedObject) \
- V(InstructionGap) \
- V(Integer32ToDouble) \
- V(InvokeFunction) \
- V(IsStringAndBranch) \
- V(IsSmiAndBranch) \
- V(IsUndetectableAndBranch) \
- V(Label) \
- V(LazyBailout) \
- V(LoadContextSlot) \
- V(LoadRoot) \
- V(LoadFieldByIndex) \
- V(LoadFunctionPrototype) \
- V(LoadKeyed) \
- V(LoadNamedField) \
- V(MathAbs) \
- V(MathClz32) \
- V(MathCos) \
- V(MathSin) \
- V(MathExp) \
- V(MathFloorD) \
- V(MathFloorI) \
- V(MathFround) \
- V(MathLog) \
- V(MathMinMax) \
- V(MathPowHalf) \
- V(MathRoundD) \
- V(MathRoundI) \
- V(MathSqrt) \
- V(MaybeGrowElements) \
- V(ModByConstI) \
- V(ModByPowerOf2I) \
- V(ModI) \
- V(MulI) \
- V(MultiplyAddD) \
- V(MultiplySubD) \
- V(NumberTagD) \
- V(NumberTagI) \
- V(NumberTagU) \
- V(NumberUntagD) \
- V(OsrEntry) \
- V(Parameter) \
- V(Power) \
- V(Prologue) \
- V(PushArgument) \
- V(Return) \
- V(SeqStringGetChar) \
- V(SeqStringSetChar) \
- V(ShiftI) \
- V(SmiTag) \
- V(SmiUntag) \
- V(StackCheck) \
- V(StoreCodeEntry) \
- V(StoreContextSlot) \
- V(StoreKeyed) \
- V(StoreNamedField) \
- V(StringAdd) \
- V(StringCharCodeAt) \
- V(StringCharFromCode) \
- V(StringCompareAndBranch) \
- V(SubI) \
- V(RSubI) \
- V(TaggedToI) \
- V(ThisFunction) \
- V(TransitionElementsKind) \
- V(TrapAllocationMemento) \
- V(Typeof) \
- V(TypeofIsAndBranch) \
- V(Uint32ToDouble) \
- V(UnknownOSRValue) \
- V(WrapReceiver)
-
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- Opcode opcode() const final { return LInstruction::k##type; } \
- void CompileToNative(LCodeGen* generator) final; \
- const char* Mnemonic() const final { return mnemonic; } \
- static L##type* cast(LInstruction* instr) { \
- DCHECK(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
- }
-
-
-#define DECLARE_HYDROGEN_ACCESSOR(type) \
- H##type* hydrogen() const { return H##type::cast(hydrogen_value()); }
-
-
-class LInstruction : public ZoneObject {
- public:
- LInstruction()
- : environment_(NULL),
- hydrogen_value_(NULL),
- bit_field_(IsCallBits::encode(false)) {}
-
- virtual ~LInstruction() {}
-
- virtual void CompileToNative(LCodeGen* generator) = 0;
- virtual const char* Mnemonic() const = 0;
- virtual void PrintTo(StringStream* stream);
- virtual void PrintDataTo(StringStream* stream);
- virtual void PrintOutputOperandTo(StringStream* stream);
-
- enum Opcode {
-// Declare a unique enum value for each instruction.
-#define DECLARE_OPCODE(type) k##type,
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE) kNumberOfInstructions
-#undef DECLARE_OPCODE
- };
-
- virtual Opcode opcode() const = 0;
-
-// Declare non-virtual type testers for all leaf IR classes.
-#define DECLARE_PREDICATE(type) \
- bool Is##type() const { return opcode() == k##type; }
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
-#undef DECLARE_PREDICATE
-
- // Declare virtual predicates for instructions that don't have
- // an opcode.
- virtual bool IsGap() const { return false; }
-
- virtual bool IsControl() const { return false; }
-
- // Try deleting this instruction if possible.
- virtual bool TryDelete() { return false; }
-
- void set_environment(LEnvironment* env) { environment_ = env; }
- LEnvironment* environment() const { return environment_; }
- bool HasEnvironment() const { return environment_ != NULL; }
-
- void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
- LPointerMap* pointer_map() const { return pointer_map_.get(); }
- bool HasPointerMap() const { return pointer_map_.is_set(); }
-
- void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
- HValue* hydrogen_value() const { return hydrogen_value_; }
-
- void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
- bool IsCall() const { return IsCallBits::decode(bit_field_); }
-
- void MarkAsSyntacticTailCall() {
- bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
- }
- bool IsSyntacticTailCall() const {
- return IsSyntacticTailCallBits::decode(bit_field_);
- }
-
- // Interface to the register allocator and iterators.
- bool ClobbersTemps() const { return IsCall(); }
- bool ClobbersRegisters() const { return IsCall(); }
- virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
- return IsCall();
- }
-
- // Interface to the register allocator and iterators.
- bool IsMarkedAsCall() const { return IsCall(); }
-
- virtual bool HasResult() const = 0;
- virtual LOperand* result() const = 0;
-
- LOperand* FirstInput() { return InputAt(0); }
- LOperand* Output() { return HasResult() ? result() : NULL; }
-
- virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
-
-#ifdef DEBUG
- void VerifyCall();
-#endif
-
- virtual int InputCount() = 0;
- virtual LOperand* InputAt(int i) = 0;
-
- private:
- // Iterator support.
- friend class InputIterator;
-
- friend class TempIterator;
- virtual int TempCount() = 0;
- virtual LOperand* TempAt(int i) = 0;
-
- class IsCallBits : public BitField<bool, 0, 1> {};
- class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
- };
-
- LEnvironment* environment_;
- SetOncePointer<LPointerMap> pointer_map_;
- HValue* hydrogen_value_;
- int bit_field_;
-};
-
-
-// R = number of result operands (0 or 1).
-template <int R>
-class LTemplateResultInstruction : public LInstruction {
- public:
- // Allow 0 or 1 output operands.
- STATIC_ASSERT(R == 0 || R == 1);
- bool HasResult() const final { return R != 0 && result() != NULL; }
- void set_result(LOperand* operand) { results_[0] = operand; }
- LOperand* result() const override { return results_[0]; }
-
- protected:
- EmbeddedContainer<LOperand*, R> results_;
-};
-
-
-// R = number of result operands (0 or 1).
-// I = number of input operands.
-// T = number of temporary operands.
-template <int R, int I, int T>
-class LTemplateInstruction : public LTemplateResultInstruction<R> {
- protected:
- EmbeddedContainer<LOperand*, I> inputs_;
- EmbeddedContainer<LOperand*, T> temps_;
-
- private:
- // Iterator support.
- int InputCount() final { return I; }
- LOperand* InputAt(int i) final { return inputs_[i]; }
-
- int TempCount() final { return T; }
- LOperand* TempAt(int i) final { return temps_[i]; }
-};
-
-
-class LGap : public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGap(HBasicBlock* block) : block_(block) {
- parallel_moves_[BEFORE] = NULL;
- parallel_moves_[START] = NULL;
- parallel_moves_[END] = NULL;
- parallel_moves_[AFTER] = NULL;
- }
-
- // Can't use the DECLARE-macro here because of sub-classes.
- bool IsGap() const override { return true; }
- void PrintDataTo(StringStream* stream) override;
- static LGap* cast(LInstruction* instr) {
- DCHECK(instr->IsGap());
- return reinterpret_cast<LGap*>(instr);
- }
-
- bool IsRedundant() const;
-
- HBasicBlock* block() const { return block_; }
-
- enum InnerPosition {
- BEFORE,
- START,
- END,
- AFTER,
- FIRST_INNER_POSITION = BEFORE,
- LAST_INNER_POSITION = AFTER
- };
-
- LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
- if (parallel_moves_[pos] == NULL) {
- parallel_moves_[pos] = new (zone) LParallelMove(zone);
- }
- return parallel_moves_[pos];
- }
-
- LParallelMove* GetParallelMove(InnerPosition pos) {
- return parallel_moves_[pos];
- }
-
- private:
- LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
- HBasicBlock* block_;
-};
-
-
-class LInstructionGap final : public LGap {
- public:
- explicit LInstructionGap(HBasicBlock* block) : LGap(block) {}
-
- bool HasInterestingComment(LCodeGen* gen) const override {
- return !IsRedundant();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
-};
-
-
-class LGoto final : public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGoto(HBasicBlock* block) : block_(block) {}
-
- bool HasInterestingComment(LCodeGen* gen) const override;
- DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- void PrintDataTo(StringStream* stream) override;
- bool IsControl() const override { return true; }
-
- int block_id() const { return block_->block_id(); }
-
- private:
- HBasicBlock* block_;
-};
-
-
-class LPrologue final : public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue")
-};
-
-
-class LLazyBailout final : public LTemplateInstruction<0, 0, 0> {
- public:
- LLazyBailout() : gap_instructions_size_(0) {}
-
- DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
-
- void set_gap_instructions_size(int gap_instructions_size) {
- gap_instructions_size_ = gap_instructions_size;
- }
- int gap_instructions_size() { return gap_instructions_size_; }
-
- private:
- int gap_instructions_size_;
-};
-
-
-class LDummy final : public LTemplateInstruction<1, 0, 0> {
- public:
- LDummy() {}
- DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
-};
-
-
-class LDummyUse final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDummyUse(LOperand* value) { inputs_[0] = value; }
- DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
-};
-
-
-class LDeoptimize final : public LTemplateInstruction<0, 0, 0> {
- public:
- bool IsControl() const override { return true; }
- DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
- DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
-};
-
-
-class LLabel final : public LGap {
- public:
- explicit LLabel(HBasicBlock* block) : LGap(block), replacement_(NULL) {}
-
- bool HasInterestingComment(LCodeGen* gen) const override { return false; }
- DECLARE_CONCRETE_INSTRUCTION(Label, "label")
-
- void PrintDataTo(StringStream* stream) override;
-
- int block_id() const { return block()->block_id(); }
- bool is_loop_header() const { return block()->IsLoopHeader(); }
- bool is_osr_entry() const { return block()->is_osr_entry(); }
- Label* label() { return &label_; }
- LLabel* replacement() const { return replacement_; }
- void set_replacement(LLabel* label) { replacement_ = label; }
- bool HasReplacement() const { return replacement_ != NULL; }
-
- private:
- Label label_;
- LLabel* replacement_;
-};
-
-
-class LParameter final : public LTemplateInstruction<1, 0, 0> {
- public:
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
- DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
-};
-
-
-class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
- public:
- bool HasInterestingComment(LCodeGen* gen) const override { return false; }
- DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
-};
-
-
-template <int I, int T>
-class LControlInstruction : public LTemplateInstruction<0, I, T> {
- public:
- LControlInstruction() : false_label_(NULL), true_label_(NULL) {}
-
- bool IsControl() const final { return true; }
-
- int SuccessorCount() { return hydrogen()->SuccessorCount(); }
- HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
-
- int TrueDestination(LChunk* chunk) {
- return chunk->LookupDestination(true_block_id());
- }
- int FalseDestination(LChunk* chunk) {
- return chunk->LookupDestination(false_block_id());
- }
-
- Label* TrueLabel(LChunk* chunk) {
- if (true_label_ == NULL) {
- true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
- }
- return true_label_;
- }
- Label* FalseLabel(LChunk* chunk) {
- if (false_label_ == NULL) {
- false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
- }
- return false_label_;
- }
-
- protected:
- int true_block_id() { return SuccessorAt(0)->block_id(); }
- int false_block_id() { return SuccessorAt(1)->block_id(); }
-
- private:
- HControlInstruction* hydrogen() {
- return HControlInstruction::cast(this->hydrogen_value());
- }
-
- Label* false_label_;
- Label* true_label_;
-};
-
-
-class LWrapReceiver final : public LTemplateInstruction<1, 2, 0> {
- public:
- LWrapReceiver(LOperand* receiver, LOperand* function) {
- inputs_[0] = receiver;
- inputs_[1] = function;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
- DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
-
- LOperand* receiver() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-};
-
-
-class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
- public:
- LApplyArguments(LOperand* function, LOperand* receiver, LOperand* length,
- LOperand* elements) {
- inputs_[0] = function;
- inputs_[1] = receiver;
- inputs_[2] = length;
- inputs_[3] = elements;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
- DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
-
- LOperand* function() { return inputs_[0]; }
- LOperand* receiver() { return inputs_[1]; }
- LOperand* length() { return inputs_[2]; }
- LOperand* elements() { return inputs_[3]; }
-};
-
-
-class LAccessArgumentsAt final : public LTemplateInstruction<1, 3, 0> {
- public:
- LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
- inputs_[0] = arguments;
- inputs_[1] = length;
- inputs_[2] = index;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
-
- LOperand* arguments() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LArgumentsLength final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LArgumentsLength(LOperand* elements) { inputs_[0] = elements; }
-
- LOperand* elements() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
-};
-
-
-class LArgumentsElements final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
- DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
-};
-
-
-class LModByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
- public:
- LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-
- private:
- int32_t divisor_;
-};
-
-
-class LModByConstI final : public LTemplateInstruction<1, 1, 0> {
- public:
- LModByConstI(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-
- private:
- int32_t divisor_;
-};
-
-
-class LModI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LModI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-};
-
-
-class LDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
- public:
- LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
-
- private:
- int32_t divisor_;
-};
-
-
-class LDivByConstI final : public LTemplateInstruction<1, 1, 0> {
- public:
- LDivByConstI(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
-
- private:
- int32_t divisor_;
-};
-
-
-class LDivI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LDivI(LOperand* dividend, LOperand* divisor) {
- inputs_[0] = dividend;
- inputs_[1] = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- LOperand* divisor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
- DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
-};
-
-
-class LFlooringDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
- public:
- LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
- "flooring-div-by-power-of-2-i")
- DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-
- private:
- int32_t divisor_;
-};
-
-
-class LFlooringDivByConstI final : public LTemplateInstruction<1, 1, 1> {
- public:
- LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- temps_[0] = temp;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
- DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-
- private:
- int32_t divisor_;
-};
-
-
-class LFlooringDivI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LFlooringDivI(LOperand* dividend, LOperand* divisor) {
- inputs_[0] = dividend;
- inputs_[1] = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- LOperand* divisor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
- DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-};
-
-
-class LMulI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LMulI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
- DECLARE_HYDROGEN_ACCESSOR(Mul)
-};
-
-
-// Instruction for computing multiplier * multiplicand + addend.
-class LMultiplyAddD final : public LTemplateInstruction<1, 3, 0> {
- public:
- LMultiplyAddD(LOperand* addend, LOperand* multiplier,
- LOperand* multiplicand) {
- inputs_[0] = addend;
- inputs_[1] = multiplier;
- inputs_[2] = multiplicand;
- }
-
- LOperand* addend() { return inputs_[0]; }
- LOperand* multiplier() { return inputs_[1]; }
- LOperand* multiplicand() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d")
-};
-
-
-// Instruction for computing minuend - multiplier * multiplicand.
-class LMultiplySubD final : public LTemplateInstruction<1, 3, 0> {
- public:
- LMultiplySubD(LOperand* minuend, LOperand* multiplier,
- LOperand* multiplicand) {
- inputs_[0] = minuend;
- inputs_[1] = multiplier;
- inputs_[2] = multiplicand;
- }
-
- LOperand* minuend() { return inputs_[0]; }
- LOperand* multiplier() { return inputs_[1]; }
- LOperand* multiplicand() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MultiplySubD, "multiply-sub-d")
-};
-
-
-class LDebugBreak final : public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
-};
-
-
-class LCompareNumericAndBranch final : public LControlInstruction<2, 0> {
- public:
- LCompareNumericAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
- "compare-numeric-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
-
- Token::Value op() const { return hydrogen()->token(); }
- bool is_double() const { return hydrogen()->representation().IsDouble(); }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-// Math.floor with a double result.
-class LMathFloorD final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathFloorD(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathFloorD, "math-floor-d")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-// Math.floor with an integer result.
-class LMathFloorI final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathFloorI(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathFloorI, "math-floor-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-// Math.round with a double result.
-class LMathRoundD final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathRoundD(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathRoundD, "math-round-d")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-// Math.round with an integer result.
-class LMathRoundI final : public LTemplateInstruction<1, 1, 1> {
- public:
- LMathRoundI(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathRoundI, "math-round-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-
-class LMathFround final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathFround(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround")
-};
-
-
-class LMathAbs final : public LTemplateInstruction<1, 2, 0> {
- public:
- LMathAbs(LOperand* context, LOperand* value) {
- inputs_[1] = context;
- inputs_[0] = value;
- }
-
- LOperand* context() { return inputs_[1]; }
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-
-class LMathLog final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathLog(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
-};
-
-
-class LMathClz32 final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathClz32(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
-};
-
-class LMathCos final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathCos(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
-};
-
-
-class LMathSin final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathSin(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
-};
-
-
-class LMathExp final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathExp(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
-};
-
-
-class LMathSqrt final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathSqrt(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
-};
-
-
-class LMathPowHalf final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathPowHalf(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
-};
-
-
-class LCmpObjectEqAndBranch final : public LControlInstruction<2, 0> {
- public:
- LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
-};
-
-
-class LCmpHoleAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LCmpHoleAndBranch(LOperand* object) { inputs_[0] = object; }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
-};
-
-
-class LIsStringAndBranch final : public LControlInstruction<1, 1> {
- public:
- LIsStringAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LIsSmiAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LIsSmiAndBranch(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LIsUndetectableAndBranch final : public LControlInstruction<1, 1> {
- public:
- explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
- "is-undetectable-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LStringCompareAndBranch final : public LControlInstruction<3, 0> {
- public:
- LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
- "string-compare-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
-
- Token::Value op() const { return hydrogen()->token(); }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LHasInstanceTypeAndBranch(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
- "has-instance-type-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-class LClassOfTestAndBranch final : public LControlInstruction<1, 1> {
- public:
- LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch, "class-of-test-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-class LCmpT final : public LTemplateInstruction<1, 3, 0> {
- public:
- LCmpT(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
- DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
-
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> {
- public:
- LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) {
- inputs_[0] = object;
- inputs_[1] = prototype;
- }
-
- LOperand* object() const { return inputs_[0]; }
- LOperand* prototype() const { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch,
- "has-in-prototype-chain-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch)
-};
-
-
-class LBoundsCheck final : public LTemplateInstruction<0, 2, 0> {
- public:
- LBoundsCheck(LOperand* index, LOperand* length) {
- inputs_[0] = index;
- inputs_[1] = length;
- }
-
- LOperand* index() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
- DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
-};
-
-
-class LBitI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LBitI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- Token::Value op() const { return hydrogen()->op(); }
-
- DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
- DECLARE_HYDROGEN_ACCESSOR(Bitwise)
-};
-
-
-class LShiftI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
- : op_(op), can_deopt_(can_deopt) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- bool can_deopt() const { return can_deopt_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
-
- private:
- Token::Value op_;
- bool can_deopt_;
-};
-
-
-class LSubI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LSubI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
- DECLARE_HYDROGEN_ACCESSOR(Sub)
-};
-
-
-class LRSubI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LRSubI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(RSubI, "rsub-i")
- DECLARE_HYDROGEN_ACCESSOR(Sub)
-};
-
-
-class LConstantI final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- int32_t value() const { return hydrogen()->Integer32Value(); }
-};
-
-
-class LConstantS final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
-};
-
-
-class LConstantD final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- double value() const { return hydrogen()->DoubleValue(); }
- uint64_t bits() const { return hydrogen()->DoubleValueAsBits(); }
-};
-
-
-class LConstantE final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- ExternalReference value() const {
- return hydrogen()->ExternalReferenceValue();
- }
-};
-
-
-class LConstantT final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- Handle<Object> value(Isolate* isolate) const {
- return hydrogen()->handle(isolate);
- }
-};
-
-
-class LBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LBranch(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
- DECLARE_HYDROGEN_ACCESSOR(Branch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LCmpMapAndBranch final : public LControlInstruction<1, 1> {
- public:
- LCmpMapAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMap)
-
- Handle<Map> map() const { return hydrogen()->map().handle(); }
-};
-
-
-class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
- public:
- LSeqStringGetChar(LOperand* string, LOperand* index) {
- inputs_[0] = string;
- inputs_[1] = index;
- }
-
- LOperand* string() const { return inputs_[0]; }
- LOperand* index() const { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
-};
-
-
-class LSeqStringSetChar final : public LTemplateInstruction<1, 4, 0> {
- public:
- LSeqStringSetChar(LOperand* context, LOperand* string, LOperand* index,
- LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = string;
- inputs_[2] = index;
- inputs_[3] = value;
- }
-
- LOperand* string() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
- LOperand* value() { return inputs_[3]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
-};
-
-
-class LAddI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LAddI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
- DECLARE_HYDROGEN_ACCESSOR(Add)
-};
-
-
-class LMathMinMax final : public LTemplateInstruction<1, 2, 0> {
- public:
- LMathMinMax(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
- DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
-};
-
-
-class LPower final : public LTemplateInstruction<1, 2, 0> {
- public:
- LPower(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Power, "power")
- DECLARE_HYDROGEN_ACCESSOR(Power)
-};
-
-
-class LArithmeticD final : public LTemplateInstruction<1, 2, 0> {
- public:
- LArithmeticD(Token::Value op, LOperand* left, LOperand* right) : op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- Opcode opcode() const override { return LInstruction::kArithmeticD; }
- void CompileToNative(LCodeGen* generator) override;
- const char* Mnemonic() const override;
-
- private:
- Token::Value op_;
-};
-
-
-class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
- public:
- LArithmeticT(Token::Value op, LOperand* context, LOperand* left,
- LOperand* right)
- : op_(op) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
- Token::Value op() const { return op_; }
-
- Opcode opcode() const override { return LInstruction::kArithmeticT; }
- void CompileToNative(LCodeGen* generator) override;
- const char* Mnemonic() const override;
-
- DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
-
- private:
- Token::Value op_;
-};
-
-
-class LReturn final : public LTemplateInstruction<0, 3, 0> {
- public:
- LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
- inputs_[0] = value;
- inputs_[1] = context;
- inputs_[2] = parameter_count;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- bool has_constant_parameter_count() {
- return parameter_count()->IsConstantOperand();
- }
- LConstantOperand* constant_parameter_count() {
- DCHECK(has_constant_parameter_count());
- return LConstantOperand::cast(parameter_count());
- }
- LOperand* parameter_count() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Return, "return")
-};
-
-
-class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedField(LOperand* object) { inputs_[0] = object; }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
-};
-
-
-class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadFunctionPrototype(LOperand* function) { inputs_[0] = function; }
-
- LOperand* function() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
- DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
-};
-
-
-class LLoadRoot final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
- DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
-
- Heap::RootListIndex index() const { return hydrogen()->index(); }
-};
-
-
-class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
- public:
- LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
- inputs_[0] = elements;
- inputs_[1] = key;
- inputs_[2] = backing_store_owner;
- }
-
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* backing_store_owner() { return inputs_[2]; }
- ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
- bool is_fixed_typed_array() const {
- return hydrogen()->is_fixed_typed_array();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
-
- void PrintDataTo(StringStream* stream) override;
- uint32_t base_offset() const { return hydrogen()->base_offset(); }
-};
-
-
-class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadContextSlot(LOperand* context) { inputs_[0] = context; }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
-
- int slot_index() { return hydrogen()->slot_index(); }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LStoreContextSlot final : public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreContextSlot(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
-
- int slot_index() { return hydrogen()->slot_index(); }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LPushArgument final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LPushArgument(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
-};
-
-
-class LDrop final : public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LDrop(int count) : count_(count) {}
-
- int count() const { return count_; }
-
- DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
-
- private:
- int count_;
-};
-
-
-class LStoreCodeEntry final : public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreCodeEntry(LOperand* function, LOperand* code_object) {
- inputs_[0] = function;
- inputs_[1] = code_object;
- }
-
- LOperand* function() { return inputs_[0]; }
- LOperand* code_object() { return inputs_[1]; }
-
- void PrintDataTo(StringStream* stream) override;
-
- DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
- DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
-};
-
-
-class LInnerAllocatedObject final : public LTemplateInstruction<1, 2, 0> {
- public:
- LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
- inputs_[0] = base_object;
- inputs_[1] = offset;
- }
-
- LOperand* base_object() const { return inputs_[0]; }
- LOperand* offset() const { return inputs_[1]; }
-
- void PrintDataTo(StringStream* stream) override;
-
- DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
-};
-
-
-class LThisFunction final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
- DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
-};
-
-
-class LContext final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Context, "context")
- DECLARE_HYDROGEN_ACCESSOR(Context)
-};
-
-
-class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LDeclareGlobals(LOperand* context) { inputs_[0] = context; }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
- DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
-};
-
-
-class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
- public:
- LCallWithDescriptor(CallInterfaceDescriptor descriptor,
- const ZoneList<LOperand*>& operands, Zone* zone)
- : descriptor_(descriptor),
- inputs_(descriptor.GetRegisterParameterCount() +
- kImplicitRegisterParameterCount,
- zone) {
- DCHECK(descriptor.GetRegisterParameterCount() +
- kImplicitRegisterParameterCount ==
- operands.length());
- inputs_.AddAll(operands, zone);
- }
-
- LOperand* target() const { return inputs_[0]; }
-
- const CallInterfaceDescriptor descriptor() { return descriptor_; }
-
- DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
-
- // The target and context are passed as implicit parameters that are not
- // explicitly listed in the descriptor.
- static const int kImplicitRegisterParameterCount = 2;
-
- private:
- DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-
- CallInterfaceDescriptor descriptor_;
- ZoneList<LOperand*> inputs_;
-
- // Iterator support.
- int InputCount() final { return inputs_.length(); }
- LOperand* InputAt(int i) final { return inputs_[i]; }
-
- int TempCount() final { return 0; }
- LOperand* TempAt(int i) final { return NULL; }
-};
-
-
-class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
- public:
- LInvokeFunction(LOperand* context, LOperand* function) {
- inputs_[0] = context;
- inputs_[1] = function;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
- DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallNewArray(LOperand* context, LOperand* constructor) {
- inputs_[0] = context;
- inputs_[1] = constructor;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* constructor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
- DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallRuntime final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallRuntime(LOperand* context) { inputs_[0] = context; }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
- DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
-
- bool ClobbersDoubleRegisters(Isolate* isolate) const override {
- return save_doubles() == kDontSaveFPRegs;
- }
-
- const Runtime::Function* function() const { return hydrogen()->function(); }
- int arity() const { return hydrogen()->argument_count(); }
- SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
-};
-
-
-class LInteger32ToDouble final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInteger32ToDouble(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
-};
-
-
-class LUint32ToDouble final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LUint32ToDouble(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
-};
-
-
-class LNumberTagI final : public LTemplateInstruction<1, 1, 2> {
- public:
- LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
-};
-
-
-class LNumberTagU final : public LTemplateInstruction<1, 1, 2> {
- public:
- LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
-};
-
-
-class LNumberTagD final : public LTemplateInstruction<1, 1, 2> {
- public:
- LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LDoubleToSmi final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDoubleToSmi(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDoubleToI(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-// Truncating conversion from a tagged value to an int32.
-class LTaggedToI final : public LTemplateInstruction<1, 1, 2> {
- public:
- LTaggedToI(LOperand* value, LOperand* temp, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-class LSmiTag final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LSmiTag(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LNumberUntagD final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberUntagD(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-
- bool truncating() { return hydrogen()->CanTruncateToNumber(); }
-};
-
-
-class LSmiUntag final : public LTemplateInstruction<1, 1, 0> {
- public:
- LSmiUntag(LOperand* value, bool needs_check) : needs_check_(needs_check) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
- bool needs_check() const { return needs_check_; }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
-
- private:
- bool needs_check_;
-};
-
-
-class LStoreNamedField final : public LTemplateInstruction<0, 2, 1> {
- public:
- LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
- inputs_[0] = object;
- inputs_[1] = value;
- temps_[0] = temp;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
-
- void PrintDataTo(StringStream* stream) override;
-
- Representation representation() const {
- return hydrogen()->field_representation();
- }
-};
-
-
-class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
- public:
- LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
- LOperand* backing_store_owner) {
- inputs_[0] = object;
- inputs_[1] = key;
- inputs_[2] = value;
- inputs_[3] = backing_store_owner;
- }
-
- bool is_fixed_typed_array() const {
- return hydrogen()->is_fixed_typed_array();
- }
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- LOperand* backing_store_owner() { return inputs_[3]; }
- ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
-
- void PrintDataTo(StringStream* stream) override;
- bool NeedsCanonicalization() {
- if (hydrogen()->value()->IsAdd() || hydrogen()->value()->IsSub() ||
- hydrogen()->value()->IsMul() || hydrogen()->value()->IsDiv()) {
- return false;
- }
- return hydrogen()->NeedsCanonicalization();
- }
- uint32_t base_offset() const { return hydrogen()->base_offset(); }
-};
-
-
-class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 1> {
- public:
- LTransitionElementsKind(LOperand* object, LOperand* context,
- LOperand* new_map_temp) {
- inputs_[0] = object;
- inputs_[1] = context;
- temps_[0] = new_map_temp;
- }
-
- LOperand* context() { return inputs_[1]; }
- LOperand* object() { return inputs_[0]; }
- LOperand* new_map_temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
- "transition-elements-kind")
- DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
-
- void PrintDataTo(StringStream* stream) override;
-
- Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
- Handle<Map> transitioned_map() {
- return hydrogen()->transitioned_map().handle();
- }
- ElementsKind from_kind() { return hydrogen()->from_kind(); }
- ElementsKind to_kind() { return hydrogen()->to_kind(); }
-};
-
-class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 2> {
- public:
- LTrapAllocationMemento(LOperand* object, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = object;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento, "trap-allocation-memento")
-};
-
-
-class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
- public:
- LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
- LOperand* key, LOperand* current_capacity) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = elements;
- inputs_[3] = key;
- inputs_[4] = current_capacity;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* elements() { return inputs_[2]; }
- LOperand* key() { return inputs_[3]; }
- LOperand* current_capacity() { return inputs_[4]; }
-
- bool ClobbersDoubleRegisters(Isolate* isolate) const override { return true; }
-
- DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
- DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
-};
-
-
-class LStringAdd final : public LTemplateInstruction<1, 3, 0> {
- public:
- LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
- DECLARE_HYDROGEN_ACCESSOR(StringAdd)
-};
-
-
-class LStringCharCodeAt final : public LTemplateInstruction<1, 3, 0> {
- public:
- LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
- inputs_[0] = context;
- inputs_[1] = string;
- inputs_[2] = index;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* string() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
- DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
-};
-
-
-class LStringCharFromCode final : public LTemplateInstruction<1, 2, 0> {
- public:
- explicit LStringCharFromCode(LOperand* context, LOperand* char_code) {
- inputs_[0] = context;
- inputs_[1] = char_code;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* char_code() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
- DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
-};
-
-
-class LCheckValue final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckValue(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
- DECLARE_HYDROGEN_ACCESSOR(CheckValue)
-};
-
-
-class LCheckArrayBufferNotNeutered final
- : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckArrayBufferNotNeutered(LOperand* view) { inputs_[0] = view; }
-
- LOperand* view() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckArrayBufferNotNeutered,
- "check-array-buffer-not-neutered")
- DECLARE_HYDROGEN_ACCESSOR(CheckArrayBufferNotNeutered)
-};
-
-
-class LCheckInstanceType final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckInstanceType(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
- DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
-};
-
-
-class LCheckMaps final : public LTemplateInstruction<0, 1, 1> {
- public:
- explicit LCheckMaps(LOperand* value = NULL, LOperand* temp = NULL) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
-};
-
-
-class LCheckSmi final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCheckSmi(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
-};
-
-
-class LCheckNonSmi final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckNonSmi(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
- DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
-};
-
-
-class LClampDToUint8 final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LClampDToUint8(LOperand* unclamped) { inputs_[0] = unclamped; }
-
- LOperand* unclamped() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
-};
-
-
-class LClampIToUint8 final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LClampIToUint8(LOperand* unclamped) { inputs_[0] = unclamped; }
-
- LOperand* unclamped() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
-};
-
-
-class LClampTToUint8 final : public LTemplateInstruction<1, 1, 1> {
- public:
- LClampTToUint8(LOperand* unclamped, LOperand* temp) {
- inputs_[0] = unclamped;
- temps_[0] = temp;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
-};
-
-
-class LAllocate final : public LTemplateInstruction<1, 2, 2> {
- public:
- LAllocate(LOperand* context, LOperand* size, LOperand* temp1,
- LOperand* temp2) {
- inputs_[0] = context;
- inputs_[1] = size;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* size() { return inputs_[1]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
- DECLARE_HYDROGEN_ACCESSOR(Allocate)
-};
-
-class LFastAllocate final : public LTemplateInstruction<1, 1, 2> {
- public:
- LFastAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = size;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* size() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
- DECLARE_HYDROGEN_ACCESSOR(Allocate)
-};
-
-
-class LTypeof final : public LTemplateInstruction<1, 2, 0> {
- public:
- LTypeof(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
-};
-
-
-class LTypeofIsAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LTypeofIsAndBranch(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
-
- Handle<String> type_literal() { return hydrogen()->type_literal(); }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LOsrEntry final : public LTemplateInstruction<0, 0, 0> {
- public:
- LOsrEntry() {}
-
- bool HasInterestingComment(LCodeGen* gen) const override { return false; }
- DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
-};
-
-
-class LStackCheck final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LStackCheck(LOperand* context) { inputs_[0] = context; }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
- DECLARE_HYDROGEN_ACCESSOR(StackCheck)
-
- Label* done_label() { return &done_label_; }
-
- private:
- Label done_label_;
-};
-
-
-class LForInPrepareMap final : public LTemplateInstruction<1, 2, 0> {
- public:
- LForInPrepareMap(LOperand* context, LOperand* object) {
- inputs_[0] = context;
- inputs_[1] = object;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
-};
-
-
-class LForInCacheArray final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LForInCacheArray(LOperand* map) { inputs_[0] = map; }
-
- LOperand* map() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
-
- int idx() { return HForInCacheArray::cast(this->hydrogen_value())->idx(); }
-};
-
-
-class LCheckMapValue final : public LTemplateInstruction<0, 2, 0> {
- public:
- LCheckMapValue(LOperand* value, LOperand* map) {
- inputs_[0] = value;
- inputs_[1] = map;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* map() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
-};
-
-
-class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadFieldByIndex(LOperand* object, LOperand* index) {
- inputs_[0] = object;
- inputs_[1] = index;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
-};
-
-
-class LChunkBuilder;
-class LPlatformChunk final : public LChunk {
- public:
- LPlatformChunk(CompilationInfo* info, HGraph* graph) : LChunk(info, graph) {}
-
- int GetNextSpillIndex(RegisterKind kind);
- LOperand* GetNextSpillSlot(RegisterKind kind);
-};
-
-
-class LChunkBuilder final : public LChunkBuilderBase {
- public:
- LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : LChunkBuilderBase(info, graph),
- current_instruction_(NULL),
- current_block_(NULL),
- next_block_(NULL),
- allocator_(allocator) {}
-
- // Build the sequence for the graph.
- LPlatformChunk* Build();
-
-// Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
- HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
- LInstruction* DoMultiplySub(HValue* minuend, HMul* mul);
- LInstruction* DoRSub(HSub* instr);
-
- static bool HasMagicNumberForDivisor(int32_t divisor);
-
- LInstruction* DoMathFloor(HUnaryMathOperation* instr);
- LInstruction* DoMathRound(HUnaryMathOperation* instr);
- LInstruction* DoMathFround(HUnaryMathOperation* instr);
- LInstruction* DoMathAbs(HUnaryMathOperation* instr);
- LInstruction* DoMathLog(HUnaryMathOperation* instr);
- LInstruction* DoMathCos(HUnaryMathOperation* instr);
- LInstruction* DoMathSin(HUnaryMathOperation* instr);
- LInstruction* DoMathExp(HUnaryMathOperation* instr);
- LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
- LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
- LInstruction* DoMathClz32(HUnaryMathOperation* instr);
- LInstruction* DoDivByPowerOf2I(HDiv* instr);
- LInstruction* DoDivByConstI(HDiv* instr);
- LInstruction* DoDivI(HDiv* instr);
- LInstruction* DoModByPowerOf2I(HMod* instr);
- LInstruction* DoModByConstI(HMod* instr);
- LInstruction* DoModI(HMod* instr);
- LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
- LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
- LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
-
- private:
- // Methods for getting operands for Use / Define / Temp.
- LUnallocated* ToUnallocated(Register reg);
- LUnallocated* ToUnallocated(DoubleRegister reg);
-
- // Methods for setting up define-use relationships.
- MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
- MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
- MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
- DoubleRegister fixed_register);
-
- // A value that is guaranteed to be allocated to a register.
- // Operand created by UseRegister is guaranteed to be live until the end of
- // instruction. This means that register allocator will not reuse it's
- // register for any other operand inside instruction.
- // Operand created by UseRegisterAtStart is guaranteed to be live only at
- // instruction start. Register allocator is free to assign the same register
- // to some other operand used inside instruction (i.e. temporary or
- // output).
- MUST_USE_RESULT LOperand* UseRegister(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
-
- // An input operand in a register that may be trashed.
- MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
-
- // An input operand in a register or stack slot.
- MUST_USE_RESULT LOperand* Use(HValue* value);
- MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
-
- // An input operand in a register, stack slot or a constant operand.
- MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
-
- // An input operand in a register or a constant operand.
- MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
-
- // An input operand in a constant operand.
- MUST_USE_RESULT LOperand* UseConstant(HValue* value);
-
- // An input operand in register, stack slot or a constant operand.
- // Will not be moved to a register even if one is freely available.
- MUST_USE_RESULT LOperand* UseAny(HValue* value) override;
-
- // Temporary operand that must be in a register.
- MUST_USE_RESULT LUnallocated* TempRegister();
- MUST_USE_RESULT LUnallocated* TempDoubleRegister();
- MUST_USE_RESULT LOperand* FixedTemp(Register reg);
- MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
-
- // Methods for setting up define-use relationships.
- // Return the same instruction that they are passed.
- LInstruction* Define(LTemplateResultInstruction<1>* instr,
- LUnallocated* result);
- LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
- LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
- int index);
- LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
- LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr, Register reg);
- LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
- DoubleRegister reg);
- LInstruction* AssignEnvironment(LInstruction* instr);
- LInstruction* AssignPointerMap(LInstruction* instr);
-
- enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
-
- // By default we assume that instruction sequences generated for calls
- // cannot deoptimize eagerly and we do not attach environment to this
- // instruction.
- LInstruction* MarkAsCall(
- LInstruction* instr, HInstruction* hinstr,
- CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
-
- void VisitInstruction(HInstruction* current);
- void AddInstruction(LInstruction* instr, HInstruction* current);
-
- void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
- LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
- LInstruction* DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr);
- LInstruction* DoArithmeticT(Token::Value op, HBinaryOperation* instr);
-
- HInstruction* current_instruction_;
- HBasicBlock* current_block_;
- HBasicBlock* next_block_;
- LAllocator* allocator_;
-
- DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
-};
-
-#undef DECLARE_HYDROGEN_ACCESSOR
-#undef DECLARE_CONCRETE_INSTRUCTION
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_PPC_LITHIUM_PPC_H_
diff --git a/deps/v8/src/crankshaft/s390/OWNERS b/deps/v8/src/crankshaft/s390/OWNERS
deleted file mode 100644
index 752e8e3d81..0000000000
--- a/deps/v8/src/crankshaft/s390/OWNERS
+++ /dev/null
@@ -1,6 +0,0 @@
-jyan@ca.ibm.com
-dstence@us.ibm.com
-joransiu@ca.ibm.com
-mbrandy@us.ibm.com
-michael_dawson@ca.ibm.com
-bjaideep@ca.ibm.com
diff --git a/deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc b/deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc
deleted file mode 100644
index 91bb03e647..0000000000
--- a/deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc
+++ /dev/null
@@ -1,5616 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/s390/lithium-codegen-s390.h"
-
-#include "src/base/bits.h"
-#include "src/builtins/builtins-constructor.h"
-#include "src/code-factory.h"
-#include "src/code-stubs.h"
-#include "src/crankshaft/hydrogen-osr.h"
-#include "src/crankshaft/s390/lithium-gap-resolver-s390.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-class SafepointGenerator final : public CallWrapper {
- public:
- SafepointGenerator(LCodeGen* codegen, LPointerMap* pointers,
- Safepoint::DeoptMode mode)
- : codegen_(codegen), pointers_(pointers), deopt_mode_(mode) {}
- virtual ~SafepointGenerator() {}
-
- void BeforeCall(int call_size) const override {}
-
- void AfterCall() const override {
- codegen_->RecordSafepoint(pointers_, deopt_mode_);
- }
-
- private:
- LCodeGen* codegen_;
- LPointerMap* pointers_;
- Safepoint::DeoptMode deopt_mode_;
-};
-
-LCodeGen::PushSafepointRegistersScope::PushSafepointRegistersScope(
- LCodeGen* codegen)
- : codegen_(codegen) {
- DCHECK(codegen_->info()->is_calling());
- DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
- codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
- StoreRegistersStateStub stub(codegen_->isolate());
- codegen_->masm_->CallStub(&stub);
-}
-
-LCodeGen::PushSafepointRegistersScope::~PushSafepointRegistersScope() {
- DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
- RestoreRegistersStateStub stub(codegen_->isolate());
- codegen_->masm_->CallStub(&stub);
- codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
-}
-
-#define __ masm()->
-
-bool LCodeGen::GenerateCode() {
- LPhase phase("Z_Code generation", chunk());
- DCHECK(is_unused());
- status_ = GENERATING;
-
- // Open a frame scope to indicate that there is a frame on the stack. The
- // NONE indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done in GeneratePrologue).
- FrameScope frame_scope(masm_, StackFrame::NONE);
-
- return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
- GenerateJumpTable() && GenerateSafepointTable();
-}
-
-void LCodeGen::FinishCode(Handle<Code> code) {
- DCHECK(is_done());
- code->set_stack_slots(GetTotalFrameSlotCount());
- code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- PopulateDeoptimizationData(code);
-}
-
-void LCodeGen::SaveCallerDoubles() {
- DCHECK(info()->saves_caller_doubles());
- DCHECK(NeedsEagerFrame());
- Comment(";;; Save clobbered callee double registers");
- int count = 0;
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- while (!save_iterator.Done()) {
- __ StoreDouble(DoubleRegister::from_code(save_iterator.Current()),
- MemOperand(sp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
-}
-
-void LCodeGen::RestoreCallerDoubles() {
- DCHECK(info()->saves_caller_doubles());
- DCHECK(NeedsEagerFrame());
- Comment(";;; Restore clobbered callee double registers");
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- int count = 0;
- while (!save_iterator.Done()) {
- __ LoadDouble(DoubleRegister::from_code(save_iterator.Current()),
- MemOperand(sp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
-}
-
-bool LCodeGen::GeneratePrologue() {
- DCHECK(is_generating());
-
- if (info()->IsOptimizing()) {
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
- // r3: Callee's JS function.
- // cp: Callee's context.
- // fp: Caller's frame pointer.
- // lr: Caller's pc.
- // ip: Our own function entry (required by the prologue)
- }
-
- int prologue_offset = masm_->pc_offset();
-
- if (prologue_offset) {
- // Prologue logic requires its starting address in ip and the
- // corresponding offset from the function entry. Need to add
- // 4 bytes for the size of AHI/AGHI that AddP expands into.
- prologue_offset += sizeof(FourByteInstr);
- __ AddP(ip, ip, Operand(prologue_offset));
- }
- info()->set_prologue_offset(prologue_offset);
- if (NeedsEagerFrame()) {
- if (info()->IsStub()) {
- __ StubPrologue(StackFrame::STUB, ip, prologue_offset);
- } else {
- __ Prologue(info()->GeneratePreagedPrologue(), ip, prologue_offset);
- }
- frame_is_built_ = true;
- }
-
- // Reserve space for the stack slots needed by the code.
- int slots = GetStackSlotCount();
- if (slots > 0) {
- __ lay(sp, MemOperand(sp, -(slots * kPointerSize)));
- if (FLAG_debug_code) {
- __ Push(r2, r3);
- __ mov(r2, Operand(slots * kPointerSize));
- __ mov(r3, Operand(kSlotsZapValue));
- Label loop;
- __ bind(&loop);
- __ StoreP(r3, MemOperand(sp, r2, kPointerSize));
- __ lay(r2, MemOperand(r2, -kPointerSize));
- __ CmpP(r2, Operand::Zero());
- __ bne(&loop);
- __ Pop(r2, r3);
- }
- }
-
- if (info()->saves_caller_doubles()) {
- SaveCallerDoubles();
- }
- return !is_aborted();
-}
-
-void LCodeGen::DoPrologue(LPrologue* instr) {
- Comment(";;; Prologue begin");
-
- // Possibly allocate a local context.
- if (info()->scope()->NeedsContext()) {
- Comment(";;; Allocate local context");
- bool need_write_barrier = true;
- // Argument to NewContext is the function, which is in r3.
- int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
- if (info()->scope()->is_script_scope()) {
- __ push(r3);
- __ Push(info()->scope()->scope_info());
- __ CallRuntime(Runtime::kNewScriptContext);
- deopt_mode = Safepoint::kLazyDeopt;
- } else {
- if (slots <= ConstructorBuiltins::MaximumFunctionContextSlots()) {
- Callable callable = CodeFactory::FastNewFunctionContext(
- isolate(), info()->scope()->scope_type());
- __ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
- Operand(slots));
- __ Call(callable.code(), RelocInfo::CODE_TARGET);
- // Result of the FastNewFunctionContext builtin is always in new space.
- need_write_barrier = false;
- } else {
- __ push(r3);
- __ Push(Smi::FromInt(info()->scope()->scope_type()));
- __ CallRuntime(Runtime::kNewFunctionContext);
- }
- }
- RecordSafepoint(deopt_mode);
-
- // Context is returned in both r2 and cp. It replaces the context
- // passed to us. It's saved in the stack and kept live in cp.
- __ LoadRR(cp, r2);
- __ StoreP(r2, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Copy any necessary parameters into the context.
- int num_parameters = info()->scope()->num_parameters();
- int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0;
- for (int i = first_parameter; i < num_parameters; i++) {
- Variable* var = (i == -1) ? info()->scope()->receiver()
- : info()->scope()->parameter(i);
- if (var->IsContextSlot()) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ LoadP(r2, MemOperand(fp, parameter_offset));
- // Store it in the context.
- MemOperand target = ContextMemOperand(cp, var->index());
- __ StoreP(r2, target);
- // Update the write barrier. This clobbers r5 and r2.
- if (need_write_barrier) {
- __ RecordWriteContextSlot(cp, target.offset(), r2, r5,
- GetLinkRegisterState(), kSaveFPRegs);
- } else if (FLAG_debug_code) {
- Label done;
- __ JumpIfInNewSpace(cp, r2, &done);
- __ Abort(kExpectedNewSpaceObject);
- __ bind(&done);
- }
- }
- }
- Comment(";;; End allocate local context");
- }
-
- Comment(";;; Prologue end");
-}
-
-void LCodeGen::GenerateOsrPrologue() {
- // Generate the OSR entry prologue at the first unknown OSR value, or if there
- // are none, at the OSR entrypoint instruction.
- if (osr_pc_offset_ >= 0) return;
-
- osr_pc_offset_ = masm()->pc_offset();
-
- // Adjust the frame size, subsuming the unoptimized frame into the
- // optimized frame.
- int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
- DCHECK(slots >= 0);
- __ lay(sp, MemOperand(sp, -slots * kPointerSize));
-}
-
-void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
- if (instr->IsCall()) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- }
- if (!instr->IsLazyBailout() && !instr->IsGap()) {
- safepoints_.BumpLastLazySafepointIndex();
- }
-}
-
-bool LCodeGen::GenerateDeferredCode() {
- DCHECK(is_generating());
- if (deferred_.length() > 0) {
- for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
- LDeferredCode* code = deferred_[i];
-
- HValue* value =
- instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(value->position());
-
- Comment(
- ";;; <@%d,#%d> "
- "-------------------- Deferred %s --------------------",
- code->instruction_index(), code->instr()->hydrogen_value()->id(),
- code->instr()->Mnemonic());
- __ bind(code->entry());
- if (NeedsDeferredFrame()) {
- Comment(";;; Build frame");
- DCHECK(!frame_is_built_);
- DCHECK(info()->IsStub());
- frame_is_built_ = true;
- __ Load(scratch0(),
- Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
- __ PushCommonFrame(scratch0());
- Comment(";;; Deferred code");
- }
- code->Generate();
- if (NeedsDeferredFrame()) {
- Comment(";;; Destroy frame");
- DCHECK(frame_is_built_);
- __ PopCommonFrame(scratch0());
- frame_is_built_ = false;
- }
- __ b(code->exit());
- }
- }
-
- return !is_aborted();
-}
-
-bool LCodeGen::GenerateJumpTable() {
- // Check that the jump table is accessible from everywhere in the function
- // code, i.e. that offsets in halfworld to the table can be encoded in the
- // 32-bit signed immediate of a branch instruction.
- // To simplify we consider the code size from the first instruction to the
- // end of the jump table. We also don't consider the pc load delta.
- // Each entry in the jump table generates one instruction and inlines one
- // 32bit data after it.
- // TODO(joransiu): The Int24 condition can likely be relaxed for S390
- if (!is_int24(masm()->pc_offset() + jump_table_.length() * 7)) {
- Abort(kGeneratedCodeIsTooLarge);
- }
-
- if (jump_table_.length() > 0) {
- Label needs_frame, call_deopt_entry;
-
- Comment(";;; -------------------- Jump table --------------------");
- Address base = jump_table_[0].address;
-
- Register entry_offset = scratch0();
-
- int length = jump_table_.length();
- for (int i = 0; i < length; i++) {
- Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
- __ bind(&table_entry->label);
-
- DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
- Address entry = table_entry->address;
- DeoptComment(table_entry->deopt_info);
-
- // Second-level deopt table entries are contiguous and small, so instead
- // of loading the full, absolute address of each one, load an immediate
- // offset which will be added to the base address later.
- __ mov(entry_offset, Operand(entry - base));
-
- if (table_entry->needs_frame) {
- DCHECK(!info()->saves_caller_doubles());
- Comment(";;; call deopt with frame");
- __ PushCommonFrame();
- __ b(r14, &needs_frame);
- } else {
- __ b(r14, &call_deopt_entry);
- }
- }
-
- if (needs_frame.is_linked()) {
- __ bind(&needs_frame);
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- DCHECK(info()->IsStub());
- __ Load(ip, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
- __ push(ip);
- DCHECK(info()->IsStub());
- }
-
- Comment(";;; call deopt");
- __ bind(&call_deopt_entry);
-
- if (info()->saves_caller_doubles()) {
- DCHECK(info()->IsStub());
- RestoreCallerDoubles();
- }
-
- // Add the base address to the offset previously loaded in entry_offset.
- __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
- __ AddP(ip, entry_offset, ip);
- __ Jump(ip);
- }
-
- // The deoptimization jump table is the last part of the instruction
- // sequence. Mark the generated code as done unless we bailed out.
- if (!is_aborted()) status_ = DONE;
- return !is_aborted();
-}
-
-bool LCodeGen::GenerateSafepointTable() {
- DCHECK(is_done());
- safepoints_.Emit(masm(), GetTotalFrameSlotCount());
- return !is_aborted();
-}
-
-Register LCodeGen::ToRegister(int code) const {
- return Register::from_code(code);
-}
-
-DoubleRegister LCodeGen::ToDoubleRegister(int code) const {
- return DoubleRegister::from_code(code);
-}
-
-Register LCodeGen::ToRegister(LOperand* op) const {
- DCHECK(op->IsRegister());
- return ToRegister(op->index());
-}
-
-Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
- if (op->IsRegister()) {
- return ToRegister(op->index());
- } else if (op->IsConstantOperand()) {
- LConstantOperand* const_op = LConstantOperand::cast(op);
- HConstant* constant = chunk_->LookupConstant(const_op);
- Handle<Object> literal = constant->handle(isolate());
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
- AllowDeferredHandleDereference get_number;
- DCHECK(literal->IsNumber());
- __ LoadIntLiteral(scratch, static_cast<int32_t>(literal->Number()));
- } else if (r.IsDouble()) {
- Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
- } else {
- DCHECK(r.IsSmiOrTagged());
- __ Move(scratch, literal);
- }
- return scratch;
- } else if (op->IsStackSlot()) {
- __ LoadP(scratch, ToMemOperand(op));
- return scratch;
- }
- UNREACHABLE();
- return scratch;
-}
-
-void LCodeGen::EmitLoadIntegerConstant(LConstantOperand* const_op,
- Register dst) {
- DCHECK(IsInteger32(const_op));
- HConstant* constant = chunk_->LookupConstant(const_op);
- int32_t value = constant->Integer32Value();
- if (IsSmi(const_op)) {
- __ LoadSmiLiteral(dst, Smi::FromInt(value));
- } else {
- __ LoadIntLiteral(dst, value);
- }
-}
-
-DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
- DCHECK(op->IsDoubleRegister());
- return ToDoubleRegister(op->index());
-}
-
-Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
- return constant->handle(isolate());
-}
-
-bool LCodeGen::IsInteger32(LConstantOperand* op) const {
- return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
-}
-
-bool LCodeGen::IsSmi(LConstantOperand* op) const {
- return chunk_->LookupLiteralRepresentation(op).IsSmi();
-}
-
-int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
- return ToRepresentation(op, Representation::Integer32());
-}
-
-intptr_t LCodeGen::ToRepresentation(LConstantOperand* op,
- const Representation& r) const {
- HConstant* constant = chunk_->LookupConstant(op);
- int32_t value = constant->Integer32Value();
- if (r.IsInteger32()) return value;
- DCHECK(r.IsSmiOrTagged());
- return reinterpret_cast<intptr_t>(Smi::FromInt(value));
-}
-
-Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- return Smi::FromInt(constant->Integer32Value());
-}
-
-double LCodeGen::ToDouble(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- DCHECK(constant->HasDoubleValue());
- return constant->DoubleValue();
-}
-
-Operand LCodeGen::ToOperand(LOperand* op) {
- if (op->IsConstantOperand()) {
- LConstantOperand* const_op = LConstantOperand::cast(op);
- HConstant* constant = chunk()->LookupConstant(const_op);
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsSmi()) {
- DCHECK(constant->HasSmiValue());
- return Operand(Smi::FromInt(constant->Integer32Value()));
- } else if (r.IsInteger32()) {
- DCHECK(constant->HasInteger32Value());
- return Operand(constant->Integer32Value());
- } else if (r.IsDouble()) {
- Abort(kToOperandUnsupportedDoubleImmediate);
- }
- DCHECK(r.IsTagged());
- return Operand(constant->handle(isolate()));
- } else if (op->IsRegister()) {
- return Operand(ToRegister(op));
- } else if (op->IsDoubleRegister()) {
- Abort(kToOperandIsDoubleRegisterUnimplemented);
- return Operand::Zero();
- }
- // Stack slots not implemented, use ToMemOperand instead.
- UNREACHABLE();
- return Operand::Zero();
-}
-
-static int ArgumentsOffsetWithoutFrame(int index) {
- DCHECK(index < 0);
- return -(index + 1) * kPointerSize;
-}
-
-MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
- DCHECK(!op->IsRegister());
- DCHECK(!op->IsDoubleRegister());
- DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- if (NeedsEagerFrame()) {
- return MemOperand(fp, FrameSlotToFPOffset(op->index()));
- } else {
- // Retrieve parameter without eager stack-frame relative to the
- // stack-pointer.
- return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
- }
-}
-
-MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
- DCHECK(op->IsDoubleStackSlot());
- if (NeedsEagerFrame()) {
- return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize);
- } else {
- // Retrieve parameter without eager stack-frame relative to the
- // stack-pointer.
- return MemOperand(sp,
- ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
- }
-}
-
-void LCodeGen::WriteTranslation(LEnvironment* environment,
- Translation* translation) {
- if (environment == NULL) return;
-
- // The translation includes one command per value in the environment.
- int translation_size = environment->translation_size();
-
- WriteTranslation(environment->outer(), translation);
- WriteTranslationFrame(environment, translation);
-
- int object_index = 0;
- int dematerialized_index = 0;
- for (int i = 0; i < translation_size; ++i) {
- LOperand* value = environment->values()->at(i);
- AddToTranslation(
- environment, translation, value, environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
- }
-}
-
-void LCodeGen::AddToTranslation(LEnvironment* environment,
- Translation* translation, LOperand* op,
- bool is_tagged, bool is_uint32,
- int* object_index_pointer,
- int* dematerialized_index_pointer) {
- if (op == LEnvironment::materialization_marker()) {
- int object_index = (*object_index_pointer)++;
- if (environment->ObjectIsDuplicateAt(object_index)) {
- int dupe_of = environment->ObjectDuplicateOfAt(object_index);
- translation->DuplicateObject(dupe_of);
- return;
- }
- int object_length = environment->ObjectLengthAt(object_index);
- if (environment->ObjectIsArgumentsAt(object_index)) {
- translation->BeginArgumentsObject(object_length);
- } else {
- translation->BeginCapturedObject(object_length);
- }
- int dematerialized_index = *dematerialized_index_pointer;
- int env_offset = environment->translation_size() + dematerialized_index;
- *dematerialized_index_pointer += object_length;
- for (int i = 0; i < object_length; ++i) {
- LOperand* value = environment->values()->at(env_offset + i);
- AddToTranslation(environment, translation, value,
- environment->HasTaggedValueAt(env_offset + i),
- environment->HasUint32ValueAt(env_offset + i),
- object_index_pointer, dematerialized_index_pointer);
- }
- return;
- }
-
- if (op->IsStackSlot()) {
- int index = op->index();
- if (is_tagged) {
- translation->StoreStackSlot(index);
- } else if (is_uint32) {
- translation->StoreUint32StackSlot(index);
- } else {
- translation->StoreInt32StackSlot(index);
- }
- } else if (op->IsDoubleStackSlot()) {
- int index = op->index();
- translation->StoreDoubleStackSlot(index);
- } else if (op->IsRegister()) {
- Register reg = ToRegister(op);
- if (is_tagged) {
- translation->StoreRegister(reg);
- } else if (is_uint32) {
- translation->StoreUint32Register(reg);
- } else {
- translation->StoreInt32Register(reg);
- }
- } else if (op->IsDoubleRegister()) {
- DoubleRegister reg = ToDoubleRegister(op);
- translation->StoreDoubleRegister(reg);
- } else if (op->IsConstantOperand()) {
- HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
- translation->StoreLiteral(src_index);
- } else {
- UNREACHABLE();
- }
-}
-
-void LCodeGen::CallCode(Handle<Code> code, RelocInfo::Mode mode,
- LInstruction* instr) {
- CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-void LCodeGen::CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode) {
- DCHECK(instr != NULL);
- __ Call(code, mode);
- RecordSafepointWithLazyDeopt(instr, safepoint_mode);
-
- // Signal that we don't inline smi code before these stubs in the
- // optimizing code generator.
- if (code->kind() == Code::BINARY_OP_IC || code->kind() == Code::COMPARE_IC) {
- __ nop();
- }
-}
-
-void LCodeGen::CallRuntime(const Runtime::Function* function, int num_arguments,
- LInstruction* instr, SaveFPRegsMode save_doubles) {
- DCHECK(instr != NULL);
-
- __ CallRuntime(function, num_arguments, save_doubles);
-
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-void LCodeGen::LoadContextFromDeferred(LOperand* context) {
- if (context->IsRegister()) {
- __ Move(cp, ToRegister(context));
- } else if (context->IsStackSlot()) {
- __ LoadP(cp, ToMemOperand(context));
- } else if (context->IsConstantOperand()) {
- HConstant* constant =
- chunk_->LookupConstant(LConstantOperand::cast(context));
- __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
- } else {
- UNREACHABLE();
- }
-}
-
-void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
- LInstruction* instr, LOperand* context) {
- LoadContextFromDeferred(context);
- __ CallRuntimeSaveDoubles(id);
- RecordSafepointWithRegisters(instr->pointer_map(), argc,
- Safepoint::kNoLazyDeopt);
-}
-
-void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
- Safepoint::DeoptMode mode) {
- environment->set_has_been_used();
- if (!environment->HasBeenRegistered()) {
- // Physical stack frame layout:
- // -x ............. -4 0 ..................................... y
- // [incoming arguments] [spill slots] [pushed outgoing arguments]
-
- // Layout of the environment:
- // 0 ..................................................... size-1
- // [parameters] [locals] [expression stack including arguments]
-
- // Layout of the translation:
- // 0 ........................................................ size - 1 + 4
- // [expression stack including arguments] [locals] [4 words] [parameters]
- // |>------------ translation_size ------------<|
-
- int frame_count = 0;
- int jsframe_count = 0;
- for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
- ++frame_count;
- if (e->frame_type() == JS_FUNCTION) {
- ++jsframe_count;
- }
- }
- Translation translation(&translations_, frame_count, jsframe_count, zone());
- WriteTranslation(environment, &translation);
- int deoptimization_index = deoptimizations_.length();
- int pc_offset = masm()->pc_offset();
- environment->Register(deoptimization_index, translation.index(),
- (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
- deoptimizations_.Add(environment, zone());
- }
-}
-
-void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
- DeoptimizeReason deopt_reason,
- Deoptimizer::BailoutType bailout_type,
- CRegister cr) {
- LEnvironment* environment = instr->environment();
- RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- DCHECK(environment->HasBeenRegistered());
- int id = environment->deoptimization_index();
- Address entry =
- Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
- if (entry == NULL) {
- Abort(kBailoutWasNotPrepared);
- return;
- }
-
- if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
- Register scratch = scratch0();
- ExternalReference count = ExternalReference::stress_deopt_count(isolate());
- Label no_deopt;
-
- // Store the condition on the stack if necessary
- if (cond != al) {
- Label done;
- __ LoadImmP(scratch, Operand::Zero());
- __ b(NegateCondition(cond), &done, Label::kNear);
- __ LoadImmP(scratch, Operand(1));
- __ bind(&done);
- __ push(scratch);
- }
-
- Label done;
- __ Push(r3);
- __ mov(scratch, Operand(count));
- __ LoadW(r3, MemOperand(scratch));
- __ Sub32(r3, r3, Operand(1));
- __ Cmp32(r3, Operand::Zero());
- __ bne(&no_deopt, Label::kNear);
-
- __ LoadImmP(r3, Operand(FLAG_deopt_every_n_times));
- __ StoreW(r3, MemOperand(scratch));
- __ Pop(r3);
-
- if (cond != al) {
- // Clean up the stack before the deoptimizer call
- __ pop(scratch);
- }
-
- __ Call(entry, RelocInfo::RUNTIME_ENTRY);
-
- __ b(&done);
-
- __ bind(&no_deopt);
- __ StoreW(r3, MemOperand(scratch));
- __ Pop(r3);
-
- if (cond != al) {
- // Clean up the stack before the deoptimizer call
- __ pop(scratch);
- }
-
- __ bind(&done);
-
- if (cond != al) {
- cond = ne;
- __ CmpP(scratch, Operand::Zero());
- }
- }
-
- if (info()->ShouldTrapOnDeopt()) {
- __ stop("trap_on_deopt", cond, kDefaultStopCode, cr);
- }
-
- Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
-
- DCHECK(info()->IsStub() || frame_is_built_);
- // Go through jump table if we need to handle condition, build frame, or
- // restore caller doubles.
- if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) {
- __ Call(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
- !frame_is_built_);
- // We often have several deopts to the same entry, reuse the last
- // jump entry if this is the case.
- if (FLAG_trace_deopt || isolate()->is_profiling() ||
- jump_table_.is_empty() ||
- !table_entry.IsEquivalentTo(jump_table_.last())) {
- jump_table_.Add(table_entry, zone());
- }
- __ b(cond, &jump_table_.last().label /*, cr*/);
- }
-}
-
-void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
- DeoptimizeReason deopt_reason, CRegister cr) {
- Deoptimizer::BailoutType bailout_type =
- info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
- DeoptimizeIf(cond, instr, deopt_reason, bailout_type, cr);
-}
-
-void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
- SafepointMode safepoint_mode) {
- if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
- RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
- } else {
- DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- RecordSafepointWithRegisters(instr->pointer_map(), 0,
- Safepoint::kLazyDeopt);
- }
-}
-
-void LCodeGen::RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
- int arguments, Safepoint::DeoptMode deopt_mode) {
- DCHECK(expected_safepoint_kind_ == kind);
-
- const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
- Safepoint safepoint =
- safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
- for (int i = 0; i < operands->length(); i++) {
- LOperand* pointer = operands->at(i);
- if (pointer->IsStackSlot()) {
- safepoint.DefinePointerSlot(pointer->index(), zone());
- } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
- safepoint.DefinePointerRegister(ToRegister(pointer), zone());
- }
- }
-}
-
-void LCodeGen::RecordSafepoint(LPointerMap* pointers,
- Safepoint::DeoptMode deopt_mode) {
- RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
-}
-
-void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
- LPointerMap empty_pointers(zone());
- RecordSafepoint(&empty_pointers, deopt_mode);
-}
-
-void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode deopt_mode) {
- RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
-}
-
-static const char* LabelType(LLabel* label) {
- if (label->is_loop_header()) return " (loop header)";
- if (label->is_osr_entry()) return " (OSR entry)";
- return "";
-}
-
-void LCodeGen::DoLabel(LLabel* label) {
- Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
- current_instruction_, label->hydrogen_value()->id(),
- label->block_id(), LabelType(label));
- __ bind(label->label());
- current_block_ = label->block_id();
- DoGap(label);
-}
-
-void LCodeGen::DoParallelMove(LParallelMove* move) { resolver_.Resolve(move); }
-
-void LCodeGen::DoGap(LGap* gap) {
- for (int i = LGap::FIRST_INNER_POSITION; i <= LGap::LAST_INNER_POSITION;
- i++) {
- LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
- LParallelMove* move = gap->GetParallelMove(inner_pos);
- if (move != NULL) DoParallelMove(move);
- }
-}
-
-void LCodeGen::DoInstructionGap(LInstructionGap* instr) { DoGap(instr); }
-
-void LCodeGen::DoParameter(LParameter* instr) {
- // Nothing to do.
-}
-
-void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- GenerateOsrPrologue();
-}
-
-void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- DCHECK(dividend.is(ToRegister(instr->result())));
-
- // Theoretically, a variation of the branch-free code for integer division by
- // a power of 2 (calculating the remainder via an additional multiplication
- // (which gets simplified to an 'and') and subtraction) should be faster, and
- // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
- // indicate that positive dividends are heavily favored, so the branching
- // version performs better.
- HMod* hmod = instr->hydrogen();
- int32_t shift = WhichPowerOf2Abs(divisor);
- Label dividend_is_not_negative, done;
- if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
- __ CmpP(dividend, Operand::Zero());
- __ bge(&dividend_is_not_negative, Label::kNear);
- if (shift) {
- // Note that this is correct even for kMinInt operands.
- __ LoadComplementRR(dividend, dividend);
- __ ExtractBitRange(dividend, dividend, shift - 1, 0);
- __ LoadComplementRR(dividend, dividend);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
- }
- } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ mov(dividend, Operand::Zero());
- } else {
- DeoptimizeIf(al, instr, DeoptimizeReason::kMinusZero);
- }
- __ b(&done, Label::kNear);
- }
-
- __ bind(&dividend_is_not_negative);
- if (shift) {
- __ ExtractBitRange(dividend, dividend, shift - 1, 0);
- } else {
- __ mov(dividend, Operand::Zero());
- }
- __ bind(&done);
-}
-
-void LCodeGen::DoModByConstI(LModByConstI* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- Register result = ToRegister(instr->result());
- DCHECK(!dividend.is(result));
-
- if (divisor == 0) {
- DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
- return;
- }
-
- __ TruncatingDiv(result, dividend, Abs(divisor));
- __ mov(ip, Operand(Abs(divisor)));
- __ Mul(result, result, ip);
- __ SubP(result, dividend, result /*, LeaveOE, SetRC*/);
-
- // Check for negative zero.
- HMod* hmod = instr->hydrogen();
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label remainder_not_zero;
- __ bne(&remainder_not_zero, Label::kNear /*, cr0*/);
- __ Cmp32(dividend, Operand::Zero());
- DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
- __ bind(&remainder_not_zero);
- }
-}
-
-void LCodeGen::DoModI(LModI* instr) {
- HMod* hmod = instr->hydrogen();
- Register left_reg = ToRegister(instr->left());
- Register right_reg = ToRegister(instr->right());
- Register result_reg = ToRegister(instr->result());
- Label done;
-
- // Check for x % 0.
- if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
- __ Cmp32(right_reg, Operand::Zero());
- DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
- }
-
- // Check for kMinInt % -1, dr will return undefined, which is not what we
- // want. We have to deopt if we care about -0, because we can't return that.
- if (hmod->CheckFlag(HValue::kCanOverflow)) {
- Label no_overflow_possible;
- __ Cmp32(left_reg, Operand(kMinInt));
- __ bne(&no_overflow_possible, Label::kNear);
- __ Cmp32(right_reg, Operand(-1));
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
- } else {
- __ b(ne, &no_overflow_possible, Label::kNear);
- __ mov(result_reg, Operand::Zero());
- __ b(&done, Label::kNear);
- }
- __ bind(&no_overflow_possible);
- }
-
- // Divide instruction dr will implicity use register pair
- // r0 & r1 below.
- DCHECK(!left_reg.is(r1));
- DCHECK(!right_reg.is(r1));
- DCHECK(!result_reg.is(r1));
- __ LoadRR(r0, left_reg);
- __ srda(r0, Operand(32));
- __ dr(r0, right_reg); // R0:R1 = R1 / divisor - R0 remainder
-
- __ LoadAndTestP_ExtendSrc(result_reg, r0); // Copy remainder to resultreg
-
- // If we care about -0, test if the dividend is <0 and the result is 0.
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ bne(&done, Label::kNear);
- __ Cmp32(left_reg, Operand::Zero());
- DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
- }
-
- __ bind(&done);
-}
-
-void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- Register result = ToRegister(instr->result());
- DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
- DCHECK(!result.is(dividend));
-
- // Check for (0 / -x) that will produce negative zero.
- HDiv* hdiv = instr->hydrogen();
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- __ Cmp32(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
- }
- // Check for (kMinInt / -1).
- if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
- __ Cmp32(dividend, Operand(0x80000000));
- DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
- }
-
- int32_t shift = WhichPowerOf2Abs(divisor);
-
- // Deoptimize if remainder will not be 0.
- if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) {
- __ TestBitRange(dividend, shift - 1, 0, r0);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, cr0);
- }
-
- if (divisor == -1) { // Nice shortcut, not needed for correctness.
- __ LoadComplementRR(result, dividend);
- return;
- }
- if (shift == 0) {
- __ LoadRR(result, dividend);
- } else {
- if (shift == 1) {
- __ ShiftRight(result, dividend, Operand(31));
- } else {
- __ ShiftRightArith(result, dividend, Operand(31));
- __ ShiftRight(result, result, Operand(32 - shift));
- }
- __ AddP(result, dividend, result);
- __ ShiftRightArith(result, result, Operand(shift));
-#if V8_TARGET_ARCH_S390X
- __ lgfr(result, result);
-#endif
- }
- if (divisor < 0) __ LoadComplementRR(result, result);
-}
-
-void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- Register result = ToRegister(instr->result());
- DCHECK(!dividend.is(result));
-
- if (divisor == 0) {
- DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
- return;
- }
-
- // Check for (0 / -x) that will produce negative zero.
- HDiv* hdiv = instr->hydrogen();
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- __ Cmp32(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
- }
-
- __ TruncatingDiv(result, dividend, Abs(divisor));
- if (divisor < 0) __ LoadComplementRR(result, result);
-
- if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
- Register scratch = scratch0();
- __ mov(ip, Operand(divisor));
- __ Mul(scratch, result, ip);
- __ Cmp32(scratch, dividend);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
- }
-}
-
-// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
-void LCodeGen::DoDivI(LDivI* instr) {
- HBinaryOperation* hdiv = instr->hydrogen();
- const Register dividend = ToRegister(instr->dividend());
- const Register divisor = ToRegister(instr->divisor());
- Register result = ToRegister(instr->result());
-
- DCHECK(!dividend.is(result));
- DCHECK(!divisor.is(result));
-
- // Check for x / 0.
- if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- __ Cmp32(divisor, Operand::Zero());
- DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
- }
-
- // Check for (0 / -x) that will produce negative zero.
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label dividend_not_zero;
- __ Cmp32(dividend, Operand::Zero());
- __ bne(&dividend_not_zero, Label::kNear);
- __ Cmp32(divisor, Operand::Zero());
- DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
- __ bind(&dividend_not_zero);
- }
-
- // Check for (kMinInt / -1).
- if (hdiv->CheckFlag(HValue::kCanOverflow)) {
- Label dividend_not_min_int;
- __ Cmp32(dividend, Operand(kMinInt));
- __ bne(&dividend_not_min_int, Label::kNear);
- __ Cmp32(divisor, Operand(-1));
- DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
- __ bind(&dividend_not_min_int);
- }
-
- __ LoadRR(r0, dividend);
- __ srda(r0, Operand(32));
- __ dr(r0, divisor); // R0:R1 = R1 / divisor - R0 remainder - R1 quotient
-
- __ LoadAndTestP_ExtendSrc(result, r1); // Move quotient to result register
-
- if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
- // Deoptimize if remainder is not 0.
- __ Cmp32(r0, Operand::Zero());
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
- }
-}
-
-void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
- HBinaryOperation* hdiv = instr->hydrogen();
- Register dividend = ToRegister(instr->dividend());
- Register result = ToRegister(instr->result());
- int32_t divisor = instr->divisor();
- bool can_overflow = hdiv->CheckFlag(HValue::kLeftCanBeMinInt);
-
- // If the divisor is positive, things are easy: There can be no deopts and we
- // can simply do an arithmetic right shift.
- int32_t shift = WhichPowerOf2Abs(divisor);
- if (divisor > 0) {
- if (shift || !result.is(dividend)) {
- __ ShiftRightArith(result, dividend, Operand(shift));
-#if V8_TARGET_ARCH_S390X
- __ lgfr(result, result);
-#endif
- }
- return;
- }
-
-// If the divisor is negative, we have to negate and handle edge cases.
-#if V8_TARGET_ARCH_S390X
- if (divisor == -1 && can_overflow) {
- __ Cmp32(dividend, Operand(0x80000000));
- DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
- }
-#endif
-
- __ LoadComplementRR(result, dividend);
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, cr0);
- }
-
-// If the negation could not overflow, simply shifting is OK.
-#if !V8_TARGET_ARCH_S390X
- if (!can_overflow) {
-#endif
- if (shift) {
- __ ShiftRightArithP(result, result, Operand(shift));
- }
- return;
-#if !V8_TARGET_ARCH_S390X
- }
-
- // Dividing by -1 is basically negation, unless we overflow.
- if (divisor == -1) {
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
- return;
- }
-
- Label overflow_label, done;
- __ b(overflow, &overflow_label, Label::kNear);
- __ ShiftRightArith(result, result, Operand(shift));
-#if V8_TARGET_ARCH_S390X
- __ lgfr(result, result);
-#endif
- __ b(&done, Label::kNear);
- __ bind(&overflow_label);
- __ mov(result, Operand(kMinInt / divisor));
- __ bind(&done);
-#endif
-}
-
-void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- Register result = ToRegister(instr->result());
- DCHECK(!dividend.is(result));
-
- if (divisor == 0) {
- DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
- return;
- }
-
- // Check for (0 / -x) that will produce negative zero.
- HMathFloorOfDiv* hdiv = instr->hydrogen();
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- __ Cmp32(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
- }
-
- // Easy case: We need no dynamic check for the dividend and the flooring
- // division is the same as the truncating division.
- if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
- (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
- __ TruncatingDiv(result, dividend, Abs(divisor));
- if (divisor < 0) __ LoadComplementRR(result, result);
- return;
- }
-
- // In the general case we may need to adjust before and after the truncating
- // division to get a flooring division.
- Register temp = ToRegister(instr->temp());
- DCHECK(!temp.is(dividend) && !temp.is(result));
- Label needs_adjustment, done;
- __ Cmp32(dividend, Operand::Zero());
- __ b(divisor > 0 ? lt : gt, &needs_adjustment);
- __ TruncatingDiv(result, dividend, Abs(divisor));
- if (divisor < 0) __ LoadComplementRR(result, result);
- __ b(&done, Label::kNear);
- __ bind(&needs_adjustment);
- __ AddP(temp, dividend, Operand(divisor > 0 ? 1 : -1));
- __ TruncatingDiv(result, temp, Abs(divisor));
- if (divisor < 0) __ LoadComplementRR(result, result);
- __ SubP(result, result, Operand(1));
- __ bind(&done);
-}
-
-// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
-void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
- HBinaryOperation* hdiv = instr->hydrogen();
- const Register dividend = ToRegister(instr->dividend());
- const Register divisor = ToRegister(instr->divisor());
- Register result = ToRegister(instr->result());
-
- DCHECK(!dividend.is(result));
- DCHECK(!divisor.is(result));
-
- // Check for x / 0.
- if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- __ Cmp32(divisor, Operand::Zero());
- DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
- }
-
- // Check for (0 / -x) that will produce negative zero.
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label dividend_not_zero;
- __ Cmp32(dividend, Operand::Zero());
- __ bne(&dividend_not_zero, Label::kNear);
- __ Cmp32(divisor, Operand::Zero());
- DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
- __ bind(&dividend_not_zero);
- }
-
- // Check for (kMinInt / -1).
- if (hdiv->CheckFlag(HValue::kCanOverflow)) {
- Label no_overflow_possible;
- __ Cmp32(dividend, Operand(kMinInt));
- __ bne(&no_overflow_possible, Label::kNear);
- __ Cmp32(divisor, Operand(-1));
- if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
- } else {
- __ bne(&no_overflow_possible, Label::kNear);
- __ LoadRR(result, dividend);
- }
- __ bind(&no_overflow_possible);
- }
-
- __ LoadRR(r0, dividend);
- __ srda(r0, Operand(32));
- __ dr(r0, divisor); // R0:R1 = R1 / divisor - R0 remainder - R1 quotient
-
- __ lr(result, r1); // Move quotient to result register
-
- Label done;
- Register scratch = scratch0();
- // If both operands have the same sign then we are done.
- __ Xor(scratch, dividend, divisor);
- __ ltr(scratch, scratch); // use 32 bit version LoadAndTestRR even in 64 bit
- __ bge(&done, Label::kNear);
-
- // If there is no remainder then we are done.
- if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
- __ msrkc(scratch, result, divisor);
- } else {
- __ lr(scratch, result);
- __ msr(scratch, divisor);
- }
- __ Cmp32(dividend, scratch);
- __ beq(&done, Label::kNear);
-
- // We performed a truncating division. Correct the result.
- __ Sub32(result, result, Operand(1));
- __ bind(&done);
-}
-
-void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
- DoubleRegister addend = ToDoubleRegister(instr->addend());
- DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
- DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
- DoubleRegister result = ToDoubleRegister(instr->result());
-
- // Unable to use madbr as the intermediate value is not rounded
- // to proper precision
- __ ldr(result, multiplier);
- __ mdbr(result, multiplicand);
- __ adbr(result, addend);
-}
-
-void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
- DoubleRegister minuend = ToDoubleRegister(instr->minuend());
- DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
- DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
- DoubleRegister result = ToDoubleRegister(instr->result());
-
- // Unable to use msdbr as the intermediate value is not rounded
- // to proper precision
- __ ldr(result, multiplier);
- __ mdbr(result, multiplicand);
- __ sdbr(result, minuend);
-}
-
-void LCodeGen::DoMulI(LMulI* instr) {
- Register scratch = scratch0();
- Register result = ToRegister(instr->result());
- // Note that result may alias left.
- Register left = ToRegister(instr->left());
- LOperand* right_op = instr->right();
-
- bool bailout_on_minus_zero =
- instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
-
- if (right_op->IsConstantOperand()) {
- int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
-
- if (bailout_on_minus_zero && (constant < 0)) {
- // The case of a null constant will be handled separately.
- // If constant is negative and left is null, the result should be -0.
- __ CmpP(left, Operand::Zero());
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
- }
-
- switch (constant) {
- case -1:
- if (can_overflow) {
-#if V8_TARGET_ARCH_S390X
- if (instr->hydrogen()->representation().IsSmi()) {
-#endif
- __ LoadComplementRR(result, left);
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
-#if V8_TARGET_ARCH_S390X
- } else {
- __ LoadComplementRR(result, left);
- __ TestIfInt32(result);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
- }
-#endif
- } else {
- __ LoadComplementRR(result, left);
- }
- break;
- case 0:
- if (bailout_on_minus_zero) {
-// If left is strictly negative and the constant is null, the
-// result is -0. Deoptimize if required, otherwise return 0.
-#if V8_TARGET_ARCH_S390X
- if (instr->hydrogen()->representation().IsSmi()) {
-#endif
- __ Cmp32(left, Operand::Zero());
-#if V8_TARGET_ARCH_S390X
- } else {
- __ Cmp32(left, Operand::Zero());
- }
-#endif
- DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
- }
- __ LoadImmP(result, Operand::Zero());
- break;
- case 1:
- __ Move(result, left);
- break;
- default:
- // Multiplying by powers of two and powers of two plus or minus
- // one can be done faster with shifted operands.
- // For other constants we emit standard code.
- int32_t mask = constant >> 31;
- uint32_t constant_abs = (constant + mask) ^ mask;
-
- if (base::bits::IsPowerOfTwo32(constant_abs)) {
- int32_t shift = WhichPowerOf2(constant_abs);
- __ ShiftLeftP(result, left, Operand(shift));
- // Correct the sign of the result if the constant is negative.
- if (constant < 0) __ LoadComplementRR(result, result);
- } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
- int32_t shift = WhichPowerOf2(constant_abs - 1);
- __ ShiftLeftP(scratch, left, Operand(shift));
- __ AddP(result, scratch, left);
- // Correct the sign of the result if the constant is negative.
- if (constant < 0) __ LoadComplementRR(result, result);
- } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
- int32_t shift = WhichPowerOf2(constant_abs + 1);
- __ ShiftLeftP(scratch, left, Operand(shift));
- __ SubP(result, scratch, left);
- // Correct the sign of the result if the constant is negative.
- if (constant < 0) __ LoadComplementRR(result, result);
- } else {
- // Generate standard code.
- __ Move(result, left);
- __ MulP(result, Operand(constant));
- }
- }
-
- } else {
- DCHECK(right_op->IsRegister());
- Register right = ToRegister(right_op);
-
- if (can_overflow) {
- if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
- // result = left * right.
- if (instr->hydrogen()->representation().IsSmi()) {
- __ SmiUntag(scratch, right);
- __ MulPWithCondition(result, left, scratch);
- } else {
- __ msrkc(result, left, right);
- __ LoadW(result, result);
- }
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
- } else {
-#if V8_TARGET_ARCH_S390X
- // result = left * right.
- if (instr->hydrogen()->representation().IsSmi()) {
- __ SmiUntag(result, left);
- __ SmiUntag(scratch, right);
- __ msgr(result, scratch);
- } else {
- __ LoadRR(result, left);
- __ msgr(result, right);
- }
- __ TestIfInt32(result);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
- if (instr->hydrogen()->representation().IsSmi()) {
- __ SmiTag(result);
- }
-#else
- // r0:scratch = scratch * right
- if (instr->hydrogen()->representation().IsSmi()) {
- __ SmiUntag(result, left);
- __ lgfr(result, result);
- __ msgfr(result, right);
- } else {
- // r0:scratch = scratch * right
- __ lgfr(result, left);
- __ msgfr(result, right);
- }
- __ TestIfInt32(result);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
-#endif
- }
- } else {
- if (instr->hydrogen()->representation().IsSmi()) {
- __ SmiUntag(result, left);
- __ Mul(result, result, right);
- } else {
- __ Mul(result, left, right);
- }
- }
-
- if (bailout_on_minus_zero) {
- Label done;
-#if V8_TARGET_ARCH_S390X
- if (instr->hydrogen()->representation().IsSmi()) {
-#endif
- __ XorP(r0, left, right);
- __ LoadAndTestRR(r0, r0);
- __ bge(&done, Label::kNear);
-#if V8_TARGET_ARCH_S390X
- } else {
- __ XorP(r0, left, right);
- __ Cmp32(r0, Operand::Zero());
- __ bge(&done, Label::kNear);
- }
-#endif
- // Bail out if the result is minus zero.
- __ CmpP(result, Operand::Zero());
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
- __ bind(&done);
- }
- }
-}
-
-void LCodeGen::DoBitI(LBitI* instr) {
- LOperand* left_op = instr->left();
- LOperand* right_op = instr->right();
- DCHECK(left_op->IsRegister());
- Register left = ToRegister(left_op);
- Register result = ToRegister(instr->result());
-
- if (right_op->IsConstantOperand()) {
- switch (instr->op()) {
- case Token::BIT_AND:
- __ AndP(result, left, Operand(ToOperand(right_op)));
- break;
- case Token::BIT_OR:
- __ OrP(result, left, Operand(ToOperand(right_op)));
- break;
- case Token::BIT_XOR:
- __ XorP(result, left, Operand(ToOperand(right_op)));
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else if (right_op->IsStackSlot()) {
- // Reg-Mem instruction clobbers, so copy src to dst first.
- if (!left.is(result)) __ LoadRR(result, left);
- switch (instr->op()) {
- case Token::BIT_AND:
- __ AndP(result, ToMemOperand(right_op));
- break;
- case Token::BIT_OR:
- __ OrP(result, ToMemOperand(right_op));
- break;
- case Token::BIT_XOR:
- __ XorP(result, ToMemOperand(right_op));
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- DCHECK(right_op->IsRegister());
-
- switch (instr->op()) {
- case Token::BIT_AND:
- __ AndP(result, left, ToRegister(right_op));
- break;
- case Token::BIT_OR:
- __ OrP(result, left, ToRegister(right_op));
- break;
- case Token::BIT_XOR:
- __ XorP(result, left, ToRegister(right_op));
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-void LCodeGen::DoShiftI(LShiftI* instr) {
- // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
- // result may alias either of them.
- LOperand* right_op = instr->right();
- Register left = ToRegister(instr->left());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
- if (right_op->IsRegister()) {
- // Mask the right_op operand.
- __ AndP(scratch, ToRegister(right_op), Operand(0x1F));
- switch (instr->op()) {
- case Token::ROR:
- // rotate_right(a, b) == rotate_left(a, 32 - b)
- __ LoadComplementRR(scratch, scratch);
- __ rll(result, left, scratch, Operand(32));
-#if V8_TARGET_ARCH_S390X
- __ lgfr(result, result);
-#endif
- break;
- case Token::SAR:
- __ ShiftRightArith(result, left, scratch);
-#if V8_TARGET_ARCH_S390X
- __ lgfr(result, result);
-#endif
- break;
- case Token::SHR:
- __ ShiftRight(result, left, scratch);
-#if V8_TARGET_ARCH_S390X
- __ lgfr(result, result);
-#endif
- if (instr->can_deopt()) {
-#if V8_TARGET_ARCH_S390X
- __ ltgfr(result, result /*, SetRC*/);
-#else
- __ ltr(result, result); // Set the <,==,> condition
-#endif
- DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue, cr0);
- }
- break;
- case Token::SHL:
- __ ShiftLeft(result, left, scratch);
-#if V8_TARGET_ARCH_S390X
- __ lgfr(result, result);
-#endif
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- // Mask the right_op operand.
- int value = ToInteger32(LConstantOperand::cast(right_op));
- uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
- switch (instr->op()) {
- case Token::ROR:
- if (shift_count != 0) {
- __ rll(result, left, Operand(32 - shift_count));
-#if V8_TARGET_ARCH_S390X
- __ lgfr(result, result);
-#endif
- } else {
- __ Move(result, left);
- }
- break;
- case Token::SAR:
- if (shift_count != 0) {
- __ ShiftRightArith(result, left, Operand(shift_count));
-#if V8_TARGET_ARCH_S390X
- __ lgfr(result, result);
-#endif
- } else {
- __ Move(result, left);
- }
- break;
- case Token::SHR:
- if (shift_count != 0) {
- __ ShiftRight(result, left, Operand(shift_count));
-#if V8_TARGET_ARCH_S390X
- __ lgfr(result, result);
-#endif
- } else {
- if (instr->can_deopt()) {
- __ Cmp32(left, Operand::Zero());
- DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue);
- }
- __ Move(result, left);
- }
- break;
- case Token::SHL:
- if (shift_count != 0) {
-#if V8_TARGET_ARCH_S390X
- if (instr->hydrogen_value()->representation().IsSmi()) {
- __ ShiftLeftP(result, left, Operand(shift_count));
-#else
- if (instr->hydrogen_value()->representation().IsSmi() &&
- instr->can_deopt()) {
- if (shift_count != 1) {
- __ ShiftLeft(result, left, Operand(shift_count - 1));
-#if V8_TARGET_ARCH_S390X
- __ lgfr(result, result);
-#endif
- __ SmiTagCheckOverflow(result, result, scratch);
- } else {
- __ SmiTagCheckOverflow(result, left, scratch);
- }
- DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
-#endif
- } else {
- __ ShiftLeft(result, left, Operand(shift_count));
-#if V8_TARGET_ARCH_S390X
- __ lgfr(result, result);
-#endif
- }
- } else {
- __ Move(result, left);
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-void LCodeGen::DoSubI(LSubI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- LOperand* result = instr->result();
-
- bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
- instr->hydrogen()->representation().IsExternal());
-
-#if V8_TARGET_ARCH_S390X
- // The overflow detection needs to be tested on the lower 32-bits.
- // As a result, on 64-bit, we need to force 32-bit arithmetic operations
- // to set the CC overflow bit properly. The result is then sign-extended.
- bool checkOverflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
-#else
- bool checkOverflow = true;
-#endif
-
- if (right->IsConstantOperand()) {
- if (!isInteger || !checkOverflow) {
- __ SubP(ToRegister(result), ToRegister(left), ToOperand(right));
- } else {
- // -(MinInt) will overflow
- if (ToInteger32(LConstantOperand::cast(right)) == kMinInt) {
- __ Load(scratch0(), ToOperand(right));
- __ Sub32(ToRegister(result), ToRegister(left), scratch0());
- } else {
- __ Sub32(ToRegister(result), ToRegister(left), ToOperand(right));
- }
- }
- } else if (right->IsRegister()) {
- if (!isInteger)
- __ SubP(ToRegister(result), ToRegister(left), ToRegister(right));
- else if (!checkOverflow)
- __ SubP_ExtendSrc(ToRegister(result), ToRegister(left),
- ToRegister(right));
- else
- __ Sub32(ToRegister(result), ToRegister(left), ToRegister(right));
- } else {
- if (!left->Equals(instr->result()))
- __ LoadRR(ToRegister(result), ToRegister(left));
-
- MemOperand mem = ToMemOperand(right);
- if (!isInteger) {
- __ SubP(ToRegister(result), mem);
- } else {
-#if V8_TARGET_ARCH_S390X && !V8_TARGET_LITTLE_ENDIAN
- // We want to read the 32-bits directly from memory
- MemOperand Upper32Mem = MemOperand(mem.rb(), mem.rx(), mem.offset() + 4);
-#else
- MemOperand Upper32Mem = ToMemOperand(right);
-#endif
- if (checkOverflow) {
- __ Sub32(ToRegister(result), Upper32Mem);
- } else {
- __ SubP_ExtendSrc(ToRegister(result), Upper32Mem);
- }
- }
- }
-
-#if V8_TARGET_ARCH_S390X
- if (isInteger && checkOverflow)
- __ lgfr(ToRegister(result), ToRegister(result));
-#endif
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
- }
-}
-
-void LCodeGen::DoConstantI(LConstantI* instr) {
- Register dst = ToRegister(instr->result());
- if (instr->value() == 0)
- __ XorP(dst, dst);
- else
- __ Load(dst, Operand(instr->value()));
-}
-
-void LCodeGen::DoConstantS(LConstantS* instr) {
- __ LoadSmiLiteral(ToRegister(instr->result()), instr->value());
-}
-
-void LCodeGen::DoConstantD(LConstantD* instr) {
- DCHECK(instr->result()->IsDoubleRegister());
- DoubleRegister result = ToDoubleRegister(instr->result());
- uint64_t bits = instr->bits();
- __ LoadDoubleLiteral(result, bits, scratch0());
-}
-
-void LCodeGen::DoConstantE(LConstantE* instr) {
- __ mov(ToRegister(instr->result()), Operand(instr->value()));
-}
-
-void LCodeGen::DoConstantT(LConstantT* instr) {
- Handle<Object> object = instr->value(isolate());
- AllowDeferredHandleDereference smi_check;
- __ Move(ToRegister(instr->result()), object);
-}
-
-MemOperand LCodeGen::BuildSeqStringOperand(Register string, LOperand* index,
- String::Encoding encoding) {
- if (index->IsConstantOperand()) {
- int offset = ToInteger32(LConstantOperand::cast(index));
- if (encoding == String::TWO_BYTE_ENCODING) {
- offset *= kUC16Size;
- }
- STATIC_ASSERT(kCharSize == 1);
- return FieldMemOperand(string, SeqString::kHeaderSize + offset);
- }
- Register scratch = scratch0();
- DCHECK(!scratch.is(string));
- DCHECK(!scratch.is(ToRegister(index)));
- // TODO(joransiu) : Fold Add into FieldMemOperand
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ AddP(scratch, string, ToRegister(index));
- } else {
- STATIC_ASSERT(kUC16Size == 2);
- __ ShiftLeftP(scratch, ToRegister(index), Operand(1));
- __ AddP(scratch, string, scratch);
- }
- return FieldMemOperand(scratch, SeqString::kHeaderSize);
-}
-
-void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
- String::Encoding encoding = instr->hydrogen()->encoding();
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
-
- if (FLAG_debug_code) {
- Register scratch = scratch0();
- __ LoadP(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
- __ llc(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
-
- __ AndP(scratch, scratch,
- Operand(kStringRepresentationMask | kStringEncodingMask));
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ CmpP(scratch,
- Operand(encoding == String::ONE_BYTE_ENCODING ? one_byte_seq_type
- : two_byte_seq_type));
- __ Check(eq, kUnexpectedStringType);
- }
-
- MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ llc(result, operand);
- } else {
- __ llh(result, operand);
- }
-}
-
-void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
- String::Encoding encoding = instr->hydrogen()->encoding();
- Register string = ToRegister(instr->string());
- Register value = ToRegister(instr->value());
-
- if (FLAG_debug_code) {
- Register index = ToRegister(instr->index());
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- int encoding_mask =
- instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type
- : two_byte_seq_type;
- __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
- }
-
- MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ stc(value, operand);
- } else {
- __ sth(value, operand);
- }
-}
-
-void LCodeGen::DoAddI(LAddI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- LOperand* result = instr->result();
- bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
- instr->hydrogen()->representation().IsExternal());
-#if V8_TARGET_ARCH_S390X
- // The overflow detection needs to be tested on the lower 32-bits.
- // As a result, on 64-bit, we need to force 32-bit arithmetic operations
- // to set the CC overflow bit properly. The result is then sign-extended.
- bool checkOverflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
-#else
- bool checkOverflow = true;
-#endif
-
- if (right->IsConstantOperand()) {
- if (!isInteger || !checkOverflow)
- __ AddP(ToRegister(result), ToRegister(left), ToOperand(right));
- else
- __ Add32(ToRegister(result), ToRegister(left), ToOperand(right));
- } else if (right->IsRegister()) {
- if (!isInteger)
- __ AddP(ToRegister(result), ToRegister(left), ToRegister(right));
- else if (!checkOverflow)
- __ AddP_ExtendSrc(ToRegister(result), ToRegister(left),
- ToRegister(right));
- else
- __ Add32(ToRegister(result), ToRegister(left), ToRegister(right));
- } else {
- if (!left->Equals(instr->result()))
- __ LoadRR(ToRegister(result), ToRegister(left));
-
- MemOperand mem = ToMemOperand(right);
- if (!isInteger) {
- __ AddP(ToRegister(result), mem);
- } else {
-#if V8_TARGET_ARCH_S390X && !V8_TARGET_LITTLE_ENDIAN
- // We want to read the 32-bits directly from memory
- MemOperand Upper32Mem = MemOperand(mem.rb(), mem.rx(), mem.offset() + 4);
-#else
- MemOperand Upper32Mem = ToMemOperand(right);
-#endif
- if (checkOverflow) {
- __ Add32(ToRegister(result), Upper32Mem);
- } else {
- __ AddP_ExtendSrc(ToRegister(result), Upper32Mem);
- }
- }
- }
-
-#if V8_TARGET_ARCH_S390X
- if (isInteger && checkOverflow)
- __ lgfr(ToRegister(result), ToRegister(result));
-#endif
- // Doptimize on overflow
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
- }
-}
-
-void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- HMathMinMax::Operation operation = instr->hydrogen()->operation();
- Condition cond = (operation == HMathMinMax::kMathMin) ? le : ge;
- if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
- Register left_reg = ToRegister(left);
- Register right_reg = EmitLoadRegister(right, ip);
- Register result_reg = ToRegister(instr->result());
- Label return_left, done;
-#if V8_TARGET_ARCH_S390X
- if (instr->hydrogen_value()->representation().IsSmi()) {
-#endif
- __ CmpP(left_reg, right_reg);
-#if V8_TARGET_ARCH_S390X
- } else {
- __ Cmp32(left_reg, right_reg);
- }
-#endif
- __ b(cond, &return_left, Label::kNear);
- __ Move(result_reg, right_reg);
- __ b(&done, Label::kNear);
- __ bind(&return_left);
- __ Move(result_reg, left_reg);
- __ bind(&done);
- } else {
- DCHECK(instr->hydrogen()->representation().IsDouble());
- DoubleRegister left_reg = ToDoubleRegister(left);
- DoubleRegister right_reg = ToDoubleRegister(right);
- DoubleRegister result_reg = ToDoubleRegister(instr->result());
- Label check_nan_left, check_zero, return_left, return_right, done;
- __ cdbr(left_reg, right_reg);
- __ bunordered(&check_nan_left, Label::kNear);
- __ beq(&check_zero);
- __ b(cond, &return_left, Label::kNear);
- __ b(&return_right, Label::kNear);
-
- __ bind(&check_zero);
- __ lzdr(kDoubleRegZero);
- __ cdbr(left_reg, kDoubleRegZero);
- __ bne(&return_left, Label::kNear); // left == right != 0.
-
- // At this point, both left and right are either 0 or -0.
- // N.B. The following works because +0 + -0 == +0
- if (operation == HMathMinMax::kMathMin) {
- // For min we want logical-or of sign bit: -(-L + -R)
- __ lcdbr(left_reg, left_reg);
- __ ldr(result_reg, left_reg);
- if (left_reg.is(right_reg)) {
- __ adbr(result_reg, right_reg);
- } else {
- __ sdbr(result_reg, right_reg);
- }
- __ lcdbr(result_reg, result_reg);
- } else {
- // For max we want logical-and of sign bit: (L + R)
- __ ldr(result_reg, left_reg);
- __ adbr(result_reg, right_reg);
- }
- __ b(&done, Label::kNear);
-
- __ bind(&check_nan_left);
- __ cdbr(left_reg, left_reg);
- __ bunordered(&return_left, Label::kNear); // left == NaN.
-
- __ bind(&return_right);
- if (!right_reg.is(result_reg)) {
- __ ldr(result_reg, right_reg);
- }
- __ b(&done, Label::kNear);
-
- __ bind(&return_left);
- if (!left_reg.is(result_reg)) {
- __ ldr(result_reg, left_reg);
- }
- __ bind(&done);
- }
-}
-
-void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- DoubleRegister left = ToDoubleRegister(instr->left());
- DoubleRegister right = ToDoubleRegister(instr->right());
- DoubleRegister result = ToDoubleRegister(instr->result());
- switch (instr->op()) {
- case Token::ADD:
- if (CpuFeatures::IsSupported(VECTOR_FACILITY)) {
- __ vfa(result, left, right);
- } else {
- DCHECK(result.is(left));
- __ adbr(result, right);
- }
- break;
- case Token::SUB:
- if (CpuFeatures::IsSupported(VECTOR_FACILITY)) {
- __ vfs(result, left, right);
- } else {
- DCHECK(result.is(left));
- __ sdbr(result, right);
- }
- break;
- case Token::MUL:
- if (CpuFeatures::IsSupported(VECTOR_FACILITY)) {
- __ vfm(result, left, right);
- } else {
- DCHECK(result.is(left));
- __ mdbr(result, right);
- }
- break;
- case Token::DIV:
- if (CpuFeatures::IsSupported(VECTOR_FACILITY)) {
- __ vfd(result, left, right);
- } else {
- DCHECK(result.is(left));
- __ ddbr(result, right);
- }
- break;
- case Token::MOD: {
- __ PrepareCallCFunction(0, 2, scratch0());
- __ MovToFloatParameters(left, right);
- __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
- 0, 2);
- // Move the result in the double result register.
- __ MovFromFloatResult(result);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-}
-
-void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->left()).is(r3));
- DCHECK(ToRegister(instr->right()).is(r2));
- DCHECK(ToRegister(instr->result()).is(r2));
-
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
- CallCode(code, RelocInfo::CODE_TARGET, instr);
-}
-
-template <class InstrType>
-void LCodeGen::EmitBranch(InstrType instr, Condition cond) {
- int left_block = instr->TrueDestination(chunk_);
- int right_block = instr->FalseDestination(chunk_);
-
- int next_block = GetNextEmittedBlock();
-
- if (right_block == left_block || cond == al) {
- EmitGoto(left_block);
- } else if (left_block == next_block) {
- __ b(NegateCondition(cond), chunk_->GetAssemblyLabel(right_block));
- } else if (right_block == next_block) {
- __ b(cond, chunk_->GetAssemblyLabel(left_block));
- } else {
- __ b(cond, chunk_->GetAssemblyLabel(left_block));
- __ b(chunk_->GetAssemblyLabel(right_block));
- }
-}
-
-template <class InstrType>
-void LCodeGen::EmitTrueBranch(InstrType instr, Condition cond) {
- int true_block = instr->TrueDestination(chunk_);
- __ b(cond, chunk_->GetAssemblyLabel(true_block));
-}
-
-template <class InstrType>
-void LCodeGen::EmitFalseBranch(InstrType instr, Condition cond) {
- int false_block = instr->FalseDestination(chunk_);
- __ b(cond, chunk_->GetAssemblyLabel(false_block));
-}
-
-void LCodeGen::DoDebugBreak(LDebugBreak* instr) { __ stop("LBreak"); }
-
-void LCodeGen::DoBranch(LBranch* instr) {
- Representation r = instr->hydrogen()->value()->representation();
- DoubleRegister dbl_scratch = double_scratch0();
-
- if (r.IsInteger32()) {
- DCHECK(!info()->IsStub());
- Register reg = ToRegister(instr->value());
- __ Cmp32(reg, Operand::Zero());
- EmitBranch(instr, ne);
- } else if (r.IsSmi()) {
- DCHECK(!info()->IsStub());
- Register reg = ToRegister(instr->value());
- __ CmpP(reg, Operand::Zero());
- EmitBranch(instr, ne);
- } else if (r.IsDouble()) {
- DCHECK(!info()->IsStub());
- DoubleRegister reg = ToDoubleRegister(instr->value());
- __ lzdr(kDoubleRegZero);
- __ cdbr(reg, kDoubleRegZero);
- // Test the double value. Zero and NaN are false.
- Condition lt_gt = static_cast<Condition>(lt | gt);
-
- EmitBranch(instr, lt_gt);
- } else {
- DCHECK(r.IsTagged());
- Register reg = ToRegister(instr->value());
- HType type = instr->hydrogen()->value()->type();
- if (type.IsBoolean()) {
- DCHECK(!info()->IsStub());
- __ CompareRoot(reg, Heap::kTrueValueRootIndex);
- EmitBranch(instr, eq);
- } else if (type.IsSmi()) {
- DCHECK(!info()->IsStub());
- __ CmpP(reg, Operand::Zero());
- EmitBranch(instr, ne);
- } else if (type.IsJSArray()) {
- DCHECK(!info()->IsStub());
- EmitBranch(instr, al);
- } else if (type.IsHeapNumber()) {
- DCHECK(!info()->IsStub());
- __ LoadDouble(dbl_scratch,
- FieldMemOperand(reg, HeapNumber::kValueOffset));
- // Test the double value. Zero and NaN are false.
- __ lzdr(kDoubleRegZero);
- __ cdbr(dbl_scratch, kDoubleRegZero);
- Condition lt_gt = static_cast<Condition>(lt | gt);
- EmitBranch(instr, lt_gt);
- } else if (type.IsString()) {
- DCHECK(!info()->IsStub());
- __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
- __ CmpP(ip, Operand::Zero());
- EmitBranch(instr, ne);
- } else {
- ToBooleanHints expected = instr->hydrogen()->expected_input_types();
- // Avoid deopts in the case where we've never executed this path before.
- if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
-
- if (expected & ToBooleanHint::kUndefined) {
- // undefined -> false.
- __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
- __ beq(instr->FalseLabel(chunk_));
- }
- if (expected & ToBooleanHint::kBoolean) {
- // Boolean -> its value.
- __ CompareRoot(reg, Heap::kTrueValueRootIndex);
- __ beq(instr->TrueLabel(chunk_));
- __ CompareRoot(reg, Heap::kFalseValueRootIndex);
- __ beq(instr->FalseLabel(chunk_));
- }
- if (expected & ToBooleanHint::kNull) {
- // 'null' -> false.
- __ CompareRoot(reg, Heap::kNullValueRootIndex);
- __ beq(instr->FalseLabel(chunk_));
- }
-
- if (expected & ToBooleanHint::kSmallInteger) {
- // Smis: 0 -> false, all other -> true.
- __ CmpP(reg, Operand::Zero());
- __ beq(instr->FalseLabel(chunk_));
- __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
- } else if (expected & ToBooleanHint::kNeedsMap) {
- // If we need a map later and have a Smi -> deopt.
- __ TestIfSmi(reg);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
- }
-
- const Register map = scratch0();
- if (expected & ToBooleanHint::kNeedsMap) {
- __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset));
-
- if (expected & ToBooleanHint::kCanBeUndetectable) {
- // Undetectable -> false.
- __ tm(FieldMemOperand(map, Map::kBitFieldOffset),
- Operand(1 << Map::kIsUndetectable));
- __ bne(instr->FalseLabel(chunk_));
- }
- }
-
- if (expected & ToBooleanHint::kReceiver) {
- // spec object -> true.
- __ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE);
- __ bge(instr->TrueLabel(chunk_));
- }
-
- if (expected & ToBooleanHint::kString) {
- // String value -> false iff empty.
- Label not_string;
- __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
- __ bge(&not_string, Label::kNear);
- __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
- __ CmpP(ip, Operand::Zero());
- __ bne(instr->TrueLabel(chunk_));
- __ b(instr->FalseLabel(chunk_));
- __ bind(&not_string);
- }
-
- if (expected & ToBooleanHint::kSymbol) {
- // Symbol value -> true.
- __ CompareInstanceType(map, ip, SYMBOL_TYPE);
- __ beq(instr->TrueLabel(chunk_));
- }
-
- if (expected & ToBooleanHint::kHeapNumber) {
- // heap number -> false iff +0, -0, or NaN.
- Label not_heap_number;
- __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- __ bne(&not_heap_number, Label::kNear);
- __ LoadDouble(dbl_scratch,
- FieldMemOperand(reg, HeapNumber::kValueOffset));
- __ lzdr(kDoubleRegZero);
- __ cdbr(dbl_scratch, kDoubleRegZero);
- __ bunordered(instr->FalseLabel(chunk_)); // NaN -> false.
- __ beq(instr->FalseLabel(chunk_)); // +0, -0 -> false.
- __ b(instr->TrueLabel(chunk_));
- __ bind(&not_heap_number);
- }
-
- if (expected != ToBooleanHint::kAny) {
- // We've seen something for the first time -> deopt.
- // This can only happen if we are not generic already.
- DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject);
- }
- }
- }
-}
-
-void LCodeGen::EmitGoto(int block) {
- if (!IsNextEmittedBlock(block)) {
- __ b(chunk_->GetAssemblyLabel(LookupDestination(block)));
- }
-}
-
-void LCodeGen::DoGoto(LGoto* instr) { EmitGoto(instr->block_id()); }
-
-Condition LCodeGen::TokenToCondition(Token::Value op) {
- Condition cond = kNoCondition;
- switch (op) {
- case Token::EQ:
- case Token::EQ_STRICT:
- cond = eq;
- break;
- case Token::NE:
- case Token::NE_STRICT:
- cond = ne;
- break;
- case Token::LT:
- cond = lt;
- break;
- case Token::GT:
- cond = gt;
- break;
- case Token::LTE:
- cond = le;
- break;
- case Token::GTE:
- cond = ge;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
- return cond;
-}
-
-void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- bool is_unsigned =
- instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
- instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
- Condition cond = TokenToCondition(instr->op());
-
- if (left->IsConstantOperand() && right->IsConstantOperand()) {
- // We can statically evaluate the comparison.
- double left_val = ToDouble(LConstantOperand::cast(left));
- double right_val = ToDouble(LConstantOperand::cast(right));
- int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
- ? instr->TrueDestination(chunk_)
- : instr->FalseDestination(chunk_);
- EmitGoto(next_block);
- } else {
- if (instr->is_double()) {
- // Compare left and right operands as doubles and load the
- // resulting flags into the normal status register.
- __ cdbr(ToDoubleRegister(left), ToDoubleRegister(right));
- // If a NaN is involved, i.e. the result is unordered,
- // jump to false block label.
- __ bunordered(instr->FalseLabel(chunk_));
- } else {
- if (right->IsConstantOperand()) {
- int32_t value = ToInteger32(LConstantOperand::cast(right));
- if (instr->hydrogen_value()->representation().IsSmi()) {
- if (is_unsigned) {
- __ CmpLogicalSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
- } else {
- __ CmpSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
- }
- } else {
- if (is_unsigned) {
- __ CmpLogical32(ToRegister(left), ToOperand(right));
- } else {
- __ Cmp32(ToRegister(left), ToOperand(right));
- }
- }
- } else if (left->IsConstantOperand()) {
- int32_t value = ToInteger32(LConstantOperand::cast(left));
- if (instr->hydrogen_value()->representation().IsSmi()) {
- if (is_unsigned) {
- __ CmpLogicalSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
- } else {
- __ CmpSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
- }
- } else {
- if (is_unsigned) {
- __ CmpLogical32(ToRegister(right), ToOperand(left));
- } else {
- __ Cmp32(ToRegister(right), ToOperand(left));
- }
- }
- // We commuted the operands, so commute the condition.
- cond = CommuteCondition(cond);
- } else if (instr->hydrogen_value()->representation().IsSmi()) {
- if (is_unsigned) {
- __ CmpLogicalP(ToRegister(left), ToRegister(right));
- } else {
- __ CmpP(ToRegister(left), ToRegister(right));
- }
- } else {
- if (is_unsigned) {
- __ CmpLogical32(ToRegister(left), ToRegister(right));
- } else {
- __ Cmp32(ToRegister(left), ToRegister(right));
- }
- }
- }
- EmitBranch(instr, cond);
- }
-}
-
-void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
- Register left = ToRegister(instr->left());
- Register right = ToRegister(instr->right());
-
- __ CmpP(left, right);
- EmitBranch(instr, eq);
-}
-
-void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
- if (instr->hydrogen()->representation().IsTagged()) {
- Register input_reg = ToRegister(instr->object());
- __ CmpP(input_reg, Operand(factory()->the_hole_value()));
- EmitBranch(instr, eq);
- return;
- }
-
- DoubleRegister input_reg = ToDoubleRegister(instr->object());
- __ cdbr(input_reg, input_reg);
- EmitFalseBranch(instr, ordered);
-
- Register scratch = scratch0();
- // Convert to GPR and examine the upper 32 bits
- __ lgdr(scratch, input_reg);
- __ srlg(scratch, scratch, Operand(32));
- __ Cmp32(scratch, Operand(kHoleNanUpper32));
- EmitBranch(instr, eq);
-}
-
-Condition LCodeGen::EmitIsString(Register input, Register temp1,
- Label* is_not_string,
- SmiCheck check_needed = INLINE_SMI_CHECK) {
- if (check_needed == INLINE_SMI_CHECK) {
- __ JumpIfSmi(input, is_not_string);
- }
- __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
-
- return lt;
-}
-
-void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- Register temp1 = ToRegister(instr->temp());
-
- SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
- ? OMIT_SMI_CHECK
- : INLINE_SMI_CHECK;
- Condition true_cond =
- EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
-
- EmitBranch(instr, true_cond);
-}
-
-void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
- Register input_reg = EmitLoadRegister(instr->value(), ip);
- __ TestIfSmi(input_reg);
- EmitBranch(instr, eq);
-}
-
-void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- __ JumpIfSmi(input, instr->FalseLabel(chunk_));
- }
- __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset));
- __ tm(FieldMemOperand(temp, Map::kBitFieldOffset),
- Operand(1 << Map::kIsUndetectable));
- EmitBranch(instr, ne);
-}
-
-static Condition ComputeCompareCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return eq;
- case Token::LT:
- return lt;
- case Token::GT:
- return gt;
- case Token::LTE:
- return le;
- case Token::GTE:
- return ge;
- default:
- UNREACHABLE();
- return kNoCondition;
- }
-}
-
-void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->left()).is(r3));
- DCHECK(ToRegister(instr->right()).is(r2));
-
- Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
- CallCode(code, RelocInfo::CODE_TARGET, instr);
- __ CompareRoot(r2, Heap::kTrueValueRootIndex);
- EmitBranch(instr, eq);
-}
-
-static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == FIRST_TYPE) return to;
- DCHECK(from == to || to == LAST_TYPE);
- return from;
-}
-
-static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == to) return eq;
- if (to == LAST_TYPE) return ge;
- if (from == FIRST_TYPE) return le;
- UNREACHABLE();
- return eq;
-}
-
-void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
- Register scratch = scratch0();
- Register input = ToRegister(instr->value());
-
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- __ JumpIfSmi(input, instr->FalseLabel(chunk_));
- }
-
- __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
- EmitBranch(instr, BranchCondition(instr->hydrogen()));
-}
-
-// Branches to a label or falls through with the answer in flags. Trashes
-// the temp registers, but not the input.
-void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
- Handle<String> class_name, Register input,
- Register temp, Register temp2) {
- DCHECK(!input.is(temp));
- DCHECK(!input.is(temp2));
- DCHECK(!temp.is(temp2));
-
- __ JumpIfSmi(input, is_false);
-
- __ CompareObjectType(input, temp, temp2, FIRST_FUNCTION_TYPE);
- STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
- if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
- __ bge(is_true);
- } else {
- __ bge(is_false);
- }
-
- // Check if the constructor in the map is a function.
- Register instance_type = ip;
- __ GetMapConstructor(temp, temp, temp2, instance_type);
-
- // Objects with a non-function constructor have class 'Object'.
- __ CmpP(instance_type, Operand(JS_FUNCTION_TYPE));
- if (String::Equals(isolate()->factory()->Object_string(), class_name)) {
- __ bne(is_true);
- } else {
- __ bne(is_false);
- }
-
- // temp now contains the constructor function. Grab the
- // instance class name from there.
- __ LoadP(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(temp,
- FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset));
- // The class name we are testing against is internalized since it's a literal.
- // The name in the constructor is internalized because of the way the context
- // is booted. This routine isn't expected to work for random API-created
- // classes and it doesn't have to because you can't access it with natives
- // syntax. Since both sides are internalized it is sufficient to use an
- // identity comparison.
- __ CmpP(temp, Operand(class_name));
- // End with the answer in flags.
-}
-
-void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = scratch0();
- Register temp2 = ToRegister(instr->temp());
- Handle<String> class_name = instr->hydrogen()->class_name();
-
- EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
- class_name, input, temp, temp2);
-
- EmitBranch(instr, eq);
-}
-
-void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- __ mov(temp, Operand(instr->map()));
- __ CmpP(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
- EmitBranch(instr, eq);
-}
-
-void LCodeGen::DoHasInPrototypeChainAndBranch(
- LHasInPrototypeChainAndBranch* instr) {
- Register const object = ToRegister(instr->object());
- Register const object_map = scratch0();
- Register const object_instance_type = ip;
- Register const object_prototype = object_map;
- Register const prototype = ToRegister(instr->prototype());
-
- // The {object} must be a spec object. It's sufficient to know that {object}
- // is not a smi, since all other non-spec objects have {null} prototypes and
- // will be ruled out below.
- if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
- __ TestIfSmi(object);
- EmitFalseBranch(instr, eq);
- }
- // Loop through the {object}s prototype chain looking for the {prototype}.
- __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
- Label loop;
- __ bind(&loop);
-
- // Deoptimize if the object needs to be access checked.
- __ LoadlB(object_instance_type,
- FieldMemOperand(object_map, Map::kBitFieldOffset));
- __ TestBit(object_instance_type, Map::kIsAccessCheckNeeded, r0);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck, cr0);
- // Deoptimize for proxies.
- __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy);
- __ LoadP(object_prototype,
- FieldMemOperand(object_map, Map::kPrototypeOffset));
- __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
- EmitFalseBranch(instr, eq);
- __ CmpP(object_prototype, prototype);
- EmitTrueBranch(instr, eq);
- __ LoadP(object_map,
- FieldMemOperand(object_prototype, HeapObject::kMapOffset));
- __ b(&loop);
-}
-
-void LCodeGen::DoCmpT(LCmpT* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- Token::Value op = instr->op();
-
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- // This instruction also signals no smi code inlined
- __ CmpP(r2, Operand::Zero());
-
- Condition condition = ComputeCompareCondition(op);
- Label true_value, done;
-
- __ b(condition, &true_value, Label::kNear);
-
- __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
- __ b(&done, Label::kNear);
-
- __ bind(&true_value);
- __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
-
- __ bind(&done);
-}
-
-void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace && info()->IsOptimizing()) {
- // Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns its parameter in r2. We're leaving the code
- // managed by the register allocator and tearing down the frame, it's
- // safe to write to the context register.
- __ push(r2);
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kTraceExit);
- }
- if (info()->saves_caller_doubles()) {
- RestoreCallerDoubles();
- }
- if (instr->has_constant_parameter_count()) {
- int parameter_count = ToInteger32(instr->constant_parameter_count());
- int32_t sp_delta = (parameter_count + 1) * kPointerSize;
- if (NeedsEagerFrame()) {
- masm_->LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
- } else if (sp_delta != 0) {
- // TODO(joransiu): Clean this up into Macro Assembler
- if (sp_delta >= 0 && sp_delta < 4096)
- __ la(sp, MemOperand(sp, sp_delta));
- else
- __ lay(sp, MemOperand(sp, sp_delta));
- }
- } else {
- DCHECK(info()->IsStub()); // Functions would need to drop one more value.
- Register reg = ToRegister(instr->parameter_count());
- // The argument count parameter is a smi
- if (NeedsEagerFrame()) {
- masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
- }
- __ SmiToPtrArrayOffset(r0, reg);
- __ AddP(sp, sp, r0);
- }
-
- __ Ret();
-}
-
-void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ LoadP(result, ContextMemOperand(context, instr->slot_index()));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
- } else {
- Label skip;
- __ bne(&skip, Label::kNear);
- __ mov(result, Operand(factory()->undefined_value()));
- __ bind(&skip);
- }
- }
-}
-
-void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register value = ToRegister(instr->value());
- Register scratch = scratch0();
- MemOperand target = ContextMemOperand(context, instr->slot_index());
-
- Label skip_assignment;
-
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ LoadP(scratch, target);
- __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
- if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
- } else {
- __ bne(&skip_assignment);
- }
- }
-
- __ StoreP(value, target);
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
- ? OMIT_SMI_CHECK
- : INLINE_SMI_CHECK;
- __ RecordWriteContextSlot(context, target.offset(), value, scratch,
- GetLinkRegisterState(), kSaveFPRegs,
- EMIT_REMEMBERED_SET, check_needed);
- }
-
- __ bind(&skip_assignment);
-}
-
-void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- HObjectAccess access = instr->hydrogen()->access();
- int offset = access.offset();
- Register object = ToRegister(instr->object());
-
- if (access.IsExternalMemory()) {
- Register result = ToRegister(instr->result());
- MemOperand operand = MemOperand(object, offset);
- __ LoadRepresentation(result, operand, access.representation(), r0);
- return;
- }
-
- if (instr->hydrogen()->representation().IsDouble()) {
- DCHECK(access.IsInobject());
- DoubleRegister result = ToDoubleRegister(instr->result());
- __ LoadDouble(result, FieldMemOperand(object, offset));
- return;
- }
-
- Register result = ToRegister(instr->result());
- if (!access.IsInobject()) {
- __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- object = result;
- }
-
- Representation representation = access.representation();
-
-#if V8_TARGET_ARCH_S390X
- // 64-bit Smi optimization
- if (representation.IsSmi() &&
- instr->hydrogen()->representation().IsInteger32()) {
- // Read int value directly from upper half of the smi.
- offset = SmiWordOffset(offset);
- representation = Representation::Integer32();
- }
-#endif
-
- __ LoadRepresentation(result, FieldMemOperand(object, offset), representation,
- r0);
-}
-
-void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
- Register scratch = scratch0();
- Register function = ToRegister(instr->function());
- Register result = ToRegister(instr->result());
-
- // Get the prototype or initial map from the function.
- __ LoadP(result,
- FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Check that the function has a prototype or an initial map.
- __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
-
- // If the function does not have an initial map, we're done.
- Label done;
- __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
- __ bne(&done, Label::kNear);
-
- // Get the prototype from the initial map.
- __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
-
- // All done.
- __ bind(&done);
-}
-
-void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
- Register result = ToRegister(instr->result());
- __ LoadRoot(result, instr->index());
-}
-
-void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
- Register arguments = ToRegister(instr->arguments());
- Register result = ToRegister(instr->result());
- // There are two words between the frame pointer and the last argument.
- // Subtracting from length accounts for one of them add one more.
- if (instr->length()->IsConstantOperand()) {
- int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
- if (instr->index()->IsConstantOperand()) {
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- int index = (const_length - const_index) + 1;
- __ LoadP(result, MemOperand(arguments, index * kPointerSize));
- } else {
- Register index = ToRegister(instr->index());
- __ SubP(result, index, Operand(const_length + 1));
- __ LoadComplementRR(result, result);
- __ ShiftLeftP(result, result, Operand(kPointerSizeLog2));
- __ LoadP(result, MemOperand(arguments, result));
- }
- } else if (instr->index()->IsConstantOperand()) {
- Register length = ToRegister(instr->length());
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- int loc = const_index - 1;
- if (loc != 0) {
- __ SubP(result, length, Operand(loc));
- __ ShiftLeftP(result, result, Operand(kPointerSizeLog2));
- __ LoadP(result, MemOperand(arguments, result));
- } else {
- __ ShiftLeftP(result, length, Operand(kPointerSizeLog2));
- __ LoadP(result, MemOperand(arguments, result));
- }
- } else {
- Register length = ToRegister(instr->length());
- Register index = ToRegister(instr->index());
- __ SubP(result, length, index);
- __ AddP(result, result, Operand(1));
- __ ShiftLeftP(result, result, Operand(kPointerSizeLog2));
- __ LoadP(result, MemOperand(arguments, result));
- }
-}
-
-void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
- Register external_pointer = ToRegister(instr->elements());
- Register key = no_reg;
- ElementsKind elements_kind = instr->elements_kind();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort(kArrayIndexConstantValueTooBig);
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(elements_kind);
- bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
- bool keyMaybeNegative = instr->hydrogen()->IsDehoisted();
- int base_offset = instr->base_offset();
- bool use_scratch = false;
-
- if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
- DoubleRegister result = ToDoubleRegister(instr->result());
- if (key_is_constant) {
- base_offset += constant_key << element_size_shift;
- if (!is_int20(base_offset)) {
- __ mov(scratch0(), Operand(base_offset));
- base_offset = 0;
- use_scratch = true;
- }
- } else {
- __ IndexToArrayOffset(scratch0(), key, element_size_shift, key_is_smi,
- keyMaybeNegative);
- use_scratch = true;
- }
- if (elements_kind == FLOAT32_ELEMENTS) {
- if (!use_scratch) {
- __ ldeb(result, MemOperand(external_pointer, base_offset));
- } else {
- __ ldeb(result, MemOperand(scratch0(), external_pointer, base_offset));
- }
- } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- if (!use_scratch) {
- __ LoadDouble(result, MemOperand(external_pointer, base_offset));
- } else {
- __ LoadDouble(result,
- MemOperand(scratch0(), external_pointer, base_offset));
- }
- }
- } else {
- Register result = ToRegister(instr->result());
- MemOperand mem_operand =
- PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
- constant_key, element_size_shift, base_offset,
- keyMaybeNegative);
- switch (elements_kind) {
- case INT8_ELEMENTS:
- __ LoadB(result, mem_operand);
- break;
- case UINT8_ELEMENTS:
- case UINT8_CLAMPED_ELEMENTS:
- __ LoadlB(result, mem_operand);
- break;
- case INT16_ELEMENTS:
- __ LoadHalfWordP(result, mem_operand);
- break;
- case UINT16_ELEMENTS:
- __ LoadLogicalHalfWordP(result, mem_operand);
- break;
- case INT32_ELEMENTS:
- __ LoadW(result, mem_operand, r0);
- break;
- case UINT32_ELEMENTS:
- __ LoadlW(result, mem_operand, r0);
- if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- __ CmpLogical32(result, Operand(0x80000000));
- DeoptimizeIf(ge, instr, DeoptimizeReason::kNegativeValue);
- }
- break;
- case FLOAT32_ELEMENTS:
- case FLOAT64_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
- case FAST_STRING_WRAPPER_ELEMENTS:
- case SLOW_STRING_WRAPPER_ELEMENTS:
- case NO_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
- Register elements = ToRegister(instr->elements());
- bool key_is_constant = instr->key()->IsConstantOperand();
- Register key = no_reg;
- DoubleRegister result = ToDoubleRegister(instr->result());
- Register scratch = scratch0();
-
- int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
- bool keyMaybeNegative = instr->hydrogen()->IsDehoisted();
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort(kArrayIndexConstantValueTooBig);
- }
- } else {
- key = ToRegister(instr->key());
- }
-
- bool use_scratch = false;
- intptr_t base_offset = instr->base_offset() + constant_key * kDoubleSize;
- if (!key_is_constant) {
- use_scratch = true;
- __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi,
- keyMaybeNegative);
- }
-
- // Memory references support up to 20-bits signed displacement in RXY form
- // Include Register::kExponentOffset in check, so we are guaranteed not to
- // overflow displacement later.
- if (!is_int20(base_offset + Register::kExponentOffset)) {
- use_scratch = true;
- if (key_is_constant) {
- __ mov(scratch, Operand(base_offset));
- } else {
- __ AddP(scratch, Operand(base_offset));
- }
- base_offset = 0;
- }
-
- if (!use_scratch) {
- __ LoadDouble(result, MemOperand(elements, base_offset));
- } else {
- __ LoadDouble(result, MemOperand(scratch, elements, base_offset));
- }
-
- if (instr->hydrogen()->RequiresHoleCheck()) {
- if (!use_scratch) {
- __ LoadlW(r0,
- MemOperand(elements, base_offset + Register::kExponentOffset));
- } else {
- __ LoadlW(r0, MemOperand(scratch, elements,
- base_offset + Register::kExponentOffset));
- }
- __ Cmp32(r0, Operand(kHoleNanUpper32));
- DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
- }
-}
-
-void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
- HLoadKeyed* hinstr = instr->hydrogen();
- Register elements = ToRegister(instr->elements());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
- int offset = instr->base_offset();
-
- if (instr->key()->IsConstantOperand()) {
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset += ToInteger32(const_operand) * kPointerSize;
- } else {
- Register key = ToRegister(instr->key());
- // Even though the HLoadKeyed instruction forces the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (hinstr->key()->representation().IsSmi()) {
- __ SmiToPtrArrayOffset(scratch, key);
- } else {
- __ ShiftLeftP(scratch, key, Operand(kPointerSizeLog2));
- }
- }
-
- bool requires_hole_check = hinstr->RequiresHoleCheck();
- Representation representation = hinstr->representation();
-
-#if V8_TARGET_ARCH_S390X
- // 64-bit Smi optimization
- if (representation.IsInteger32() &&
- hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
- DCHECK(!requires_hole_check);
- // Read int value directly from upper half of the smi.
- offset = SmiWordOffset(offset);
- }
-#endif
-
- if (instr->key()->IsConstantOperand()) {
- __ LoadRepresentation(result, MemOperand(elements, offset), representation,
- r1);
- } else {
- __ LoadRepresentation(result, MemOperand(scratch, elements, offset),
- representation, r1);
- }
-
- // Check for the hole value.
- if (requires_hole_check) {
- if (IsFastSmiElementsKind(hinstr->elements_kind())) {
- __ TestIfSmi(result);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0);
- } else {
- __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
- }
- } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
- DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
- Label done;
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ CmpP(result, scratch);
- __ bne(&done);
- if (info()->IsStub()) {
- // A stub can safely convert the hole to undefined only if the array
- // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
- // it needs to bail out.
- __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
- __ LoadP(result, FieldMemOperand(result, PropertyCell::kValueOffset));
- __ CmpSmiLiteral(result, Smi::FromInt(Isolate::kProtectorValid), r0);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
- }
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ bind(&done);
- }
-}
-
-void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_fixed_typed_array()) {
- DoLoadKeyedExternalArray(instr);
- } else if (instr->hydrogen()->representation().IsDouble()) {
- DoLoadKeyedFixedDoubleArray(instr);
- } else {
- DoLoadKeyedFixedArray(instr);
- }
-}
-
-MemOperand LCodeGen::PrepareKeyedOperand(Register key, Register base,
- bool key_is_constant, bool key_is_smi,
- int constant_key,
- int element_size_shift,
- int base_offset,
- bool keyMaybeNegative) {
- Register scratch = scratch0();
-
- if (key_is_constant) {
- int offset = (base_offset + (constant_key << element_size_shift));
- if (!is_int20(offset)) {
- __ mov(scratch, Operand(offset));
- return MemOperand(base, scratch);
- } else {
- return MemOperand(base,
- (constant_key << element_size_shift) + base_offset);
- }
- }
-
- bool needs_shift =
- (element_size_shift != (key_is_smi ? kSmiTagSize + kSmiShiftSize : 0));
-
- if (needs_shift) {
- __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi,
- keyMaybeNegative);
- } else {
- scratch = key;
- }
-
- if (!is_int20(base_offset)) {
- __ AddP(scratch, Operand(base_offset));
- base_offset = 0;
- }
- return MemOperand(scratch, base, base_offset);
-}
-
-void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
- Register scratch = scratch0();
- Register result = ToRegister(instr->result());
-
- if (instr->hydrogen()->from_inlined()) {
- __ lay(result, MemOperand(sp, -2 * kPointerSize));
- } else if (instr->hydrogen()->arguments_adaptor()) {
- // Check if the calling frame is an arguments adaptor frame.
- Label done, adapted;
- __ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(
- result,
- MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ CmpP(result,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Result is the frame pointer for the frame if not adapted and for the real
- // frame below the adaptor frame if adapted.
- __ beq(&adapted, Label::kNear);
- __ LoadRR(result, fp);
- __ b(&done, Label::kNear);
-
- __ bind(&adapted);
- __ LoadRR(result, scratch);
- __ bind(&done);
- } else {
- __ LoadRR(result, fp);
- }
-}
-
-void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
- Register elem = ToRegister(instr->elements());
- Register result = ToRegister(instr->result());
-
- Label done;
-
- // If no arguments adaptor frame the number of arguments is fixed.
- __ CmpP(fp, elem);
- __ mov(result, Operand(scope()->num_parameters()));
- __ beq(&done, Label::kNear);
-
- // Arguments adaptor frame present. Get argument length from there.
- __ LoadP(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(result,
- MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(result);
-
- // Argument length is in result register.
- __ bind(&done);
-}
-
-void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- // If the receiver is null or undefined, we have to pass the global
- // object as a receiver to normal functions. Values have to be
- // passed unchanged to builtins and strict-mode functions.
- Label global_object, result_in_receiver;
-
- if (!instr->hydrogen()->known_function()) {
- // Do not transform the receiver to object for strict mode
- // functions or builtins.
- __ LoadP(scratch,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ LoadlW(scratch, FieldMemOperand(
- scratch, SharedFunctionInfo::kCompilerHintsOffset));
- __ AndP(r0, scratch, Operand((1 << SharedFunctionInfo::kStrictModeBit) |
- (1 << SharedFunctionInfo::kNativeBit)));
- __ bne(&result_in_receiver, Label::kNear);
- }
-
- // Normal function. Replace undefined or null with global receiver.
- __ CompareRoot(receiver, Heap::kNullValueRootIndex);
- __ beq(&global_object, Label::kNear);
- __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
- __ beq(&global_object, Label::kNear);
-
- // Deoptimize if the receiver is not a JS object.
- __ TestIfSmi(receiver);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
- __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE);
- DeoptimizeIf(lt, instr, DeoptimizeReason::kNotAJavaScriptObject);
-
- __ b(&result_in_receiver, Label::kNear);
- __ bind(&global_object);
- __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset));
- __ LoadP(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
- __ LoadP(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
-
- if (result.is(receiver)) {
- __ bind(&result_in_receiver);
- } else {
- Label result_ok;
- __ b(&result_ok, Label::kNear);
- __ bind(&result_in_receiver);
- __ LoadRR(result, receiver);
- __ bind(&result_ok);
- }
-}
-
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
- Register length = ToRegister(instr->length());
- Register elements = ToRegister(instr->elements());
- Register scratch = scratch0();
- DCHECK(receiver.is(r2)); // Used for parameter count.
- DCHECK(function.is(r3)); // Required by InvokeFunction.
- DCHECK(ToRegister(instr->result()).is(r2));
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- const uint32_t kArgumentsLimit = 1 * KB;
- __ CmpLogicalP(length, Operand(kArgumentsLimit));
- DeoptimizeIf(gt, instr, DeoptimizeReason::kTooManyArguments);
-
- // Push the receiver and use the register to keep the original
- // number of arguments.
- __ push(receiver);
- __ LoadRR(receiver, length);
- // The arguments are at a one pointer size offset from elements.
- __ AddP(elements, Operand(1 * kPointerSize));
-
- // Loop through the arguments pushing them onto the execution
- // stack.
- Label invoke, loop;
- // length is a small non-negative integer, due to the test above.
- __ CmpP(length, Operand::Zero());
- __ beq(&invoke, Label::kNear);
- __ bind(&loop);
- __ ShiftLeftP(r1, length, Operand(kPointerSizeLog2));
- __ LoadP(scratch, MemOperand(elements, r1));
- __ push(scratch);
- __ BranchOnCount(length, &loop);
-
- __ bind(&invoke);
-
- InvokeFlag flag = CALL_FUNCTION;
- if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
- DCHECK(!info()->saves_caller_doubles());
- // TODO(ishell): drop current frame before pushing arguments to the stack.
- flag = JUMP_FUNCTION;
- ParameterCount actual(r2);
- // It is safe to use r5, r6 and r7 as scratch registers here given that
- // 1) we are not going to return to caller function anyway,
- // 2) r5 (new.target) will be initialized below.
- PrepareForTailCall(actual, r5, r6, r7);
- }
-
- DCHECK(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
- // The number of arguments is stored in receiver which is r2, as expected
- // by InvokeFunction.
- ParameterCount actual(receiver);
- __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
-}
-
-void LCodeGen::DoPushArgument(LPushArgument* instr) {
- LOperand* argument = instr->value();
- if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
- Abort(kDoPushArgumentNotImplementedForDoubleType);
- } else {
- Register argument_reg = EmitLoadRegister(argument, ip);
- __ push(argument_reg);
- }
-}
-
-void LCodeGen::DoDrop(LDrop* instr) { __ Drop(instr->count()); }
-
-void LCodeGen::DoThisFunction(LThisFunction* instr) {
- Register result = ToRegister(instr->result());
- __ LoadP(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-}
-
-void LCodeGen::DoContext(LContext* instr) {
- // If there is a non-return use, the context must be moved to a register.
- Register result = ToRegister(instr->result());
- if (info()->IsOptimizing()) {
- __ LoadP(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
- } else {
- // If there is no frame, the context must be in cp.
- DCHECK(result.is(cp));
- }
-}
-
-void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- __ Move(scratch0(), instr->hydrogen()->declarations());
- __ push(scratch0());
- __ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags()));
- __ push(scratch0());
- __ Move(scratch0(), instr->hydrogen()->feedback_vector());
- __ push(scratch0());
- CallRuntime(Runtime::kDeclareGlobals, instr);
-}
-
-void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
- int formal_parameter_count, int arity,
- bool is_tail_call, LInstruction* instr) {
- bool dont_adapt_arguments =
- formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
- bool can_invoke_directly =
- dont_adapt_arguments || formal_parameter_count == arity;
-
- Register function_reg = r3;
-
- LPointerMap* pointers = instr->pointer_map();
-
- if (can_invoke_directly) {
- // Change context.
- __ LoadP(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
-
- // Always initialize new target and number of actual arguments.
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- __ mov(r2, Operand(arity));
-
- bool is_self_call = function.is_identical_to(info()->closure());
-
- // Invoke function.
- if (is_self_call) {
- Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
- if (is_tail_call) {
- __ Jump(self, RelocInfo::CODE_TARGET);
- } else {
- __ Call(self, RelocInfo::CODE_TARGET);
- }
- } else {
- __ LoadP(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
- if (is_tail_call) {
- __ JumpToJSEntry(ip);
- } else {
- __ CallJSEntry(ip);
- }
- }
-
- if (!is_tail_call) {
- // Set up deoptimization.
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
- }
- } else {
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount actual(arity);
- ParameterCount expected(formal_parameter_count);
- InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
- __ InvokeFunction(function_reg, expected, actual, flag, generator);
- }
-}
-
-void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
- DCHECK(instr->context() != NULL);
- DCHECK(ToRegister(instr->context()).is(cp));
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- // Deoptimize if not a heap number.
- __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
- __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
-
- Label done;
- Register exponent = scratch0();
- scratch = no_reg;
- __ LoadlW(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
- // Check the sign of the argument. If the argument is positive, just
- // return it.
- __ Cmp32(exponent, Operand::Zero());
- // Move the input to the result if necessary.
- __ Move(result, input);
- __ bge(&done);
-
- // Input is negative. Reverse its sign.
- // Preserve the value of all registers.
- {
- PushSafepointRegistersScope scope(this);
-
- // Registers were saved at the safepoint, so we can use
- // many scratch registers.
- Register tmp1 = input.is(r3) ? r2 : r3;
- Register tmp2 = input.is(r4) ? r2 : r4;
- Register tmp3 = input.is(r5) ? r2 : r5;
- Register tmp4 = input.is(r6) ? r2 : r6;
-
- // exponent: floating point exponent value.
-
- Label allocated, slow;
- __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
- __ b(&allocated);
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
-
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
- instr->context());
- // Set the pointer to the new heap number in tmp.
- if (!tmp1.is(r2)) __ LoadRR(tmp1, r2);
- // Restore input_reg after call to runtime.
- __ LoadFromSafepointRegisterSlot(input, input);
- __ LoadlW(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
-
- __ bind(&allocated);
- // exponent: floating point exponent value.
- // tmp1: allocated heap number.
-
- // Clear the sign bit.
- __ nilf(exponent, Operand(~HeapNumber::kSignMask));
- __ StoreW(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
- __ LoadlW(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
- __ StoreW(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
-
- __ StoreToSafepointRegisterSlot(tmp1, result);
- }
-
- __ bind(&done);
-}
-
-void LCodeGen::EmitMathAbs(LMathAbs* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- __ LoadPositiveP(result, input);
- // Deoptimize on overflow.
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
-}
-
-#if V8_TARGET_ARCH_S390X
-void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- __ LoadPositive32(result, input);
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
-}
-#endif
-
-void LCodeGen::DoMathAbs(LMathAbs* instr) {
- // Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
- public:
- DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
- : LDeferredCode(codegen), instr_(instr) {}
- void Generate() override {
- codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LMathAbs* instr_;
- };
-
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsDouble()) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- __ lpdbr(result, input);
-#if V8_TARGET_ARCH_S390X
- } else if (r.IsInteger32()) {
- EmitInteger32MathAbs(instr);
- } else if (r.IsSmi()) {
-#else
- } else if (r.IsSmiOrInteger32()) {
-#endif
- EmitMathAbs(instr);
- } else {
- // Representation is tagged.
- DeferredMathAbsTaggedHeapNumber* deferred =
- new (zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
- Register input = ToRegister(instr->value());
- // Smi check.
- __ JumpIfNotSmi(input, deferred->entry());
- // If smi, handle it directly.
- EmitMathAbs(instr);
- __ bind(deferred->exit());
- }
-}
-
-void LCodeGen::DoMathFloor(LMathFloor* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- Register result = ToRegister(instr->result());
- Register input_high = scratch0();
- Register scratch = ip;
- Label done, exact;
-
- __ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done,
- &exact);
- DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN);
-
- __ bind(&exact);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Test for -0.
- __ CmpP(result, Operand::Zero());
- __ bne(&done, Label::kNear);
- __ Cmp32(input_high, Operand::Zero());
- DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
- }
- __ bind(&done);
-}
-
-void LCodeGen::DoMathRound(LMathRound* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- Register result = ToRegister(instr->result());
- DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
- DoubleRegister input_plus_dot_five = double_scratch1;
- Register scratch1 = scratch0();
- Register scratch2 = ip;
- DoubleRegister dot_five = double_scratch0();
- Label convert, done;
-
- __ LoadDoubleLiteral(dot_five, 0.5, r0);
- __ lpdbr(double_scratch1, input);
- __ cdbr(double_scratch1, dot_five);
- DeoptimizeIf(unordered, instr, DeoptimizeReason::kLostPrecisionOrNaN);
- // If input is in [-0.5, -0], the result is -0.
- // If input is in [+0, +0.5[, the result is +0.
- // If the input is +0.5, the result is 1.
- __ bgt(&convert, Label::kNear); // Out of [-0.5, +0.5].
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // [-0.5, -0] (negative) yields minus zero.
- __ TestDoubleSign(input, scratch1);
- DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
- }
- Label return_zero;
- __ cdbr(input, dot_five);
- __ bne(&return_zero, Label::kNear);
- __ LoadImmP(result, Operand(1)); // +0.5.
- __ b(&done, Label::kNear);
- // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
- // flag kBailoutOnMinusZero.
- __ bind(&return_zero);
- __ LoadImmP(result, Operand::Zero());
- __ b(&done, Label::kNear);
-
- __ bind(&convert);
- __ ldr(input_plus_dot_five, input);
- __ adbr(input_plus_dot_five, dot_five);
- // Reuse dot_five (double_scratch0) as we no longer need this value.
- __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2,
- double_scratch0(), &done, &done);
- DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN);
- __ bind(&done);
-}
-
-void LCodeGen::DoMathFround(LMathFround* instr) {
- DoubleRegister input_reg = ToDoubleRegister(instr->value());
- DoubleRegister output_reg = ToDoubleRegister(instr->result());
-
- // Round double to float
- __ ledbr(output_reg, input_reg);
- // Extend from float to double
- __ ldebr(output_reg, output_reg);
-}
-
-void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
- DoubleRegister result = ToDoubleRegister(instr->result());
- LOperand* input = instr->value();
- if (input->IsDoubleRegister()) {
- __ Sqrt(result, ToDoubleRegister(instr->value()));
- } else {
- __ Sqrt(result, ToMemOperand(input));
- }
-}
-
-void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- DoubleRegister temp = double_scratch0();
-
- // Note that according to ECMA-262 15.8.2.13:
- // Math.pow(-Infinity, 0.5) == Infinity
- // Math.sqrt(-Infinity) == NaN
- Label skip, done;
-
- __ LoadDoubleLiteral(temp, -V8_INFINITY, scratch0());
- __ cdbr(input, temp);
- __ bne(&skip, Label::kNear);
- __ lcdbr(result, temp);
- __ b(&done, Label::kNear);
-
- // Add +0 to convert -0 to +0.
- __ bind(&skip);
- __ ldr(result, input);
- __ lzdr(kDoubleRegZero);
- __ adbr(result, kDoubleRegZero);
- __ sqdbr(result, result);
- __ bind(&done);
-}
-
-void LCodeGen::DoPower(LPower* instr) {
- Representation exponent_type = instr->hydrogen()->right()->representation();
- // Having marked this as a call, we can use any registers.
- // Just make sure that the input/output registers are the expected ones.
- Register tagged_exponent = MathPowTaggedDescriptor::exponent();
- DCHECK(!instr->right()->IsDoubleRegister() ||
- ToDoubleRegister(instr->right()).is(d2));
- DCHECK(!instr->right()->IsRegister() ||
- ToRegister(instr->right()).is(tagged_exponent));
- DCHECK(ToDoubleRegister(instr->left()).is(d1));
- DCHECK(ToDoubleRegister(instr->result()).is(d3));
-
- if (exponent_type.IsSmi()) {
- MathPowStub stub(isolate(), MathPowStub::TAGGED);
- __ CallStub(&stub);
- } else if (exponent_type.IsTagged()) {
- Label no_deopt;
- __ JumpIfSmi(tagged_exponent, &no_deopt);
- __ LoadP(r9, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
- __ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
- __ bind(&no_deopt);
- MathPowStub stub(isolate(), MathPowStub::TAGGED);
- __ CallStub(&stub);
- } else if (exponent_type.IsInteger32()) {
- MathPowStub stub(isolate(), MathPowStub::INTEGER);
- __ CallStub(&stub);
- } else {
- DCHECK(exponent_type.IsDouble());
- MathPowStub stub(isolate(), MathPowStub::DOUBLE);
- __ CallStub(&stub);
- }
-}
-
-void LCodeGen::DoMathCos(LMathCos* instr) {
- __ PrepareCallCFunction(0, 1, scratch0());
- __ MovToFloatParameter(ToDoubleRegister(instr->value()));
- __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1);
- __ MovFromFloatResult(ToDoubleRegister(instr->result()));
-}
-
-void LCodeGen::DoMathSin(LMathSin* instr) {
- __ PrepareCallCFunction(0, 1, scratch0());
- __ MovToFloatParameter(ToDoubleRegister(instr->value()));
- __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1);
- __ MovFromFloatResult(ToDoubleRegister(instr->result()));
-}
-
-void LCodeGen::DoMathExp(LMathExp* instr) {
- __ PrepareCallCFunction(0, 1, scratch0());
- __ MovToFloatParameter(ToDoubleRegister(instr->value()));
- __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1);
- __ MovFromFloatResult(ToDoubleRegister(instr->result()));
-}
-
-void LCodeGen::DoMathLog(LMathLog* instr) {
- __ PrepareCallCFunction(0, 1, scratch0());
- __ MovToFloatParameter(ToDoubleRegister(instr->value()));
- __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1);
- __ MovFromFloatResult(ToDoubleRegister(instr->result()));
-}
-
-void LCodeGen::DoMathClz32(LMathClz32* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- Label done;
- __ llgfr(result, input);
- __ flogr(r0, result);
- __ LoadRR(result, r0);
- __ CmpP(r0, Operand::Zero());
- __ beq(&done, Label::kNear);
- __ SubP(result, Operand(32));
- __ bind(&done);
-}
-
-void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
- Register scratch1, Register scratch2,
- Register scratch3) {
-#if DEBUG
- if (actual.is_reg()) {
- DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
- } else {
- DCHECK(!AreAliased(scratch1, scratch2, scratch3));
- }
-#endif
- if (FLAG_code_comments) {
- if (actual.is_reg()) {
- Comment(";;; PrepareForTailCall, actual: %s {",
- RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
- actual.reg().code()));
- } else {
- Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
- }
- }
-
- // Check if next frame is an arguments adaptor frame.
- Register caller_args_count_reg = scratch1;
- Label no_arguments_adaptor, formal_parameter_count_loaded;
- __ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(scratch3,
- MemOperand(scratch2, StandardFrameConstants::kContextOffset));
- __ CmpP(scratch3,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ bne(&no_arguments_adaptor);
-
- // Drop current frame and load arguments count from arguments adaptor frame.
- __ LoadRR(fp, scratch2);
- __ LoadP(caller_args_count_reg,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
- __ b(&formal_parameter_count_loaded);
-
- __ bind(&no_arguments_adaptor);
- // Load caller's formal parameter count
- __ mov(caller_args_count_reg, Operand(info()->literal()->parameter_count()));
-
- __ bind(&formal_parameter_count_loaded);
- __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
-
- Comment(";;; }");
-}
-
-void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
- HInvokeFunction* hinstr = instr->hydrogen();
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->function()).is(r3));
- DCHECK(instr->HasPointerMap());
-
- bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
-
- if (is_tail_call) {
- DCHECK(!info()->saves_caller_doubles());
- ParameterCount actual(instr->arity());
- // It is safe to use r5, r6 and r7 as scratch registers here given that
- // 1) we are not going to return to caller function anyway,
- // 2) r5 (new.target) will be initialized below.
- PrepareForTailCall(actual, r5, r6, r7);
- }
-
- Handle<JSFunction> known_function = hinstr->known_function();
- if (known_function.is_null()) {
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount actual(instr->arity());
- InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
- __ InvokeFunction(r3, no_reg, actual, flag, generator);
- } else {
- CallKnownFunction(known_function, hinstr->formal_parameter_count(),
- instr->arity(), is_tail_call, instr);
- }
-}
-
-void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
- DCHECK(ToRegister(instr->result()).is(r2));
-
- if (instr->hydrogen()->IsTailCall()) {
- if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
-
- if (instr->target()->IsConstantOperand()) {
- LConstantOperand* target = LConstantOperand::cast(instr->target());
- Handle<Code> code = Handle<Code>::cast(ToHandle(target));
- __ Jump(code, RelocInfo::CODE_TARGET);
- } else {
- DCHECK(instr->target()->IsRegister());
- Register target = ToRegister(instr->target());
- __ AddP(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(ip);
- }
- } else {
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
-
- if (instr->target()->IsConstantOperand()) {
- LConstantOperand* target = LConstantOperand::cast(instr->target());
- Handle<Code> code = Handle<Code>::cast(ToHandle(target));
- generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
- __ Call(code, RelocInfo::CODE_TARGET);
- } else {
- DCHECK(instr->target()->IsRegister());
- Register target = ToRegister(instr->target());
- generator.BeforeCall(__ CallSize(target));
- __ AddP(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ CallJSEntry(ip);
- }
- generator.AfterCall();
- }
-}
-
-void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->constructor()).is(r3));
- DCHECK(ToRegister(instr->result()).is(r2));
-
- __ mov(r2, Operand(instr->arity()));
- __ Move(r4, instr->hydrogen()->site());
-
- ElementsKind kind = instr->hydrogen()->elements_kind();
- AllocationSiteOverrideMode override_mode =
- (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
- ? DISABLE_ALLOCATION_SITES
- : DONT_OVERRIDE;
-
- if (instr->arity() == 0) {
- ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- } else if (instr->arity() == 1) {
- Label done;
- if (IsFastPackedElementsKind(kind)) {
- Label packed_case;
- // We might need a change here
- // look at the first argument
- __ LoadP(r7, MemOperand(sp, 0));
- __ CmpP(r7, Operand::Zero());
- __ beq(&packed_case, Label::kNear);
-
- ElementsKind holey_kind = GetHoleyElementsKind(kind);
- ArraySingleArgumentConstructorStub stub(isolate(), holey_kind,
- override_mode);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ b(&done, Label::kNear);
- __ bind(&packed_case);
- }
-
- ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ bind(&done);
- } else {
- ArrayNArgumentsConstructorStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- CallRuntime(instr->function(), instr->arity(), instr);
-}
-
-void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
- Register function = ToRegister(instr->function());
- Register code_object = ToRegister(instr->code_object());
- __ lay(code_object,
- MemOperand(code_object, Code::kHeaderSize - kHeapObjectTag));
- __ StoreP(code_object,
- FieldMemOperand(function, JSFunction::kCodeEntryOffset), r0);
-}
-
-void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
- Register result = ToRegister(instr->result());
- Register base = ToRegister(instr->base_object());
- if (instr->offset()->IsConstantOperand()) {
- LConstantOperand* offset = LConstantOperand::cast(instr->offset());
- __ lay(result, MemOperand(base, ToInteger32(offset)));
- } else {
- Register offset = ToRegister(instr->offset());
- __ lay(result, MemOperand(base, offset));
- }
-}
-
-void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
- HStoreNamedField* hinstr = instr->hydrogen();
- Representation representation = instr->representation();
-
- Register object = ToRegister(instr->object());
- Register scratch = scratch0();
- HObjectAccess access = hinstr->access();
- int offset = access.offset();
-
- if (access.IsExternalMemory()) {
- Register value = ToRegister(instr->value());
- MemOperand operand = MemOperand(object, offset);
- __ StoreRepresentation(value, operand, representation, r0);
- return;
- }
-
- __ AssertNotSmi(object);
-
-#if V8_TARGET_ARCH_S390X
- DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
- IsInteger32(LConstantOperand::cast(instr->value())));
-#else
- DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
- IsSmi(LConstantOperand::cast(instr->value())));
-#endif
- if (!FLAG_unbox_double_fields && representation.IsDouble()) {
- DCHECK(access.IsInobject());
- DCHECK(!hinstr->has_transition());
- DCHECK(!hinstr->NeedsWriteBarrier());
- DoubleRegister value = ToDoubleRegister(instr->value());
- DCHECK(offset >= 0);
- __ StoreDouble(value, FieldMemOperand(object, offset));
- return;
- }
-
- if (hinstr->has_transition()) {
- Handle<Map> transition = hinstr->transition_map();
- AddDeprecationDependency(transition);
- __ mov(scratch, Operand(transition));
- __ StoreP(scratch, FieldMemOperand(object, HeapObject::kMapOffset), r0);
- if (hinstr->NeedsWriteBarrierForMap()) {
- Register temp = ToRegister(instr->temp());
- // Update the write barrier for the map field.
- __ RecordWriteForMap(object, scratch, temp, GetLinkRegisterState(),
- kSaveFPRegs);
- }
- }
-
- // Do the store.
- Register record_dest = object;
- Register record_value = no_reg;
- Register record_scratch = scratch;
-#if V8_TARGET_ARCH_S390X
- if (FLAG_unbox_double_fields && representation.IsDouble()) {
- DCHECK(access.IsInobject());
- DoubleRegister value = ToDoubleRegister(instr->value());
- __ StoreDouble(value, FieldMemOperand(object, offset));
- if (hinstr->NeedsWriteBarrier()) {
- record_value = ToRegister(instr->value());
- }
- } else {
- if (representation.IsSmi() &&
- hinstr->value()->representation().IsInteger32()) {
- DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
- // 64-bit Smi optimization
- // Store int value directly to upper half of the smi.
- offset = SmiWordOffset(offset);
- representation = Representation::Integer32();
- }
-#endif
- if (access.IsInobject()) {
- Register value = ToRegister(instr->value());
- MemOperand operand = FieldMemOperand(object, offset);
- __ StoreRepresentation(value, operand, representation, r0);
- record_value = value;
- } else {
- Register value = ToRegister(instr->value());
- __ LoadP(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
- MemOperand operand = FieldMemOperand(scratch, offset);
- __ StoreRepresentation(value, operand, representation, r0);
- record_dest = scratch;
- record_value = value;
- record_scratch = object;
- }
-#if V8_TARGET_ARCH_S390X
- }
-#endif
-
- if (hinstr->NeedsWriteBarrier()) {
- __ RecordWriteField(record_dest, offset, record_value, record_scratch,
- GetLinkRegisterState(), kSaveFPRegs,
- EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(),
- hinstr->PointersToHereCheckForValue());
- }
-}
-
-void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- Representation representation = instr->hydrogen()->length()->representation();
- DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
- DCHECK(representation.IsSmiOrInteger32());
- Register temp = scratch0();
-
- Condition cc = instr->hydrogen()->allow_equality() ? lt : le;
- if (instr->length()->IsConstantOperand()) {
- int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
- Register index = ToRegister(instr->index());
- if (representation.IsSmi()) {
- __ CmpLogicalSmiLiteral(index, Smi::FromInt(length), temp);
- } else {
- __ CmpLogical32(index, Operand(length));
- }
- cc = CommuteCondition(cc);
- } else if (instr->index()->IsConstantOperand()) {
- int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
- Register length = ToRegister(instr->length());
- if (representation.IsSmi()) {
- __ CmpLogicalSmiLiteral(length, Smi::FromInt(index), temp);
- } else {
- __ CmpLogical32(length, Operand(index));
- }
- } else {
- Register index = ToRegister(instr->index());
- Register length = ToRegister(instr->length());
- if (representation.IsSmi()) {
- __ CmpLogicalP(length, index);
- } else {
- __ CmpLogical32(length, index);
- }
- }
- if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
- Label done;
- __ b(NegateCondition(cc), &done, Label::kNear);
- __ stop("eliminated bounds check failed");
- __ bind(&done);
- } else {
- DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds);
- }
-}
-
-void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
- Register external_pointer = ToRegister(instr->elements());
- Register key = no_reg;
- ElementsKind elements_kind = instr->elements_kind();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort(kArrayIndexConstantValueTooBig);
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(elements_kind);
- bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
- bool keyMaybeNegative = instr->hydrogen()->IsDehoisted();
- int base_offset = instr->base_offset();
-
- if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
- Register address = scratch0();
- DoubleRegister value(ToDoubleRegister(instr->value()));
- if (key_is_constant) {
- if (constant_key != 0) {
- base_offset += constant_key << element_size_shift;
- if (!is_int20(base_offset)) {
- __ mov(address, Operand(base_offset));
- __ AddP(address, external_pointer);
- } else {
- __ AddP(address, external_pointer, Operand(base_offset));
- }
- base_offset = 0;
- } else {
- address = external_pointer;
- }
- } else {
- __ IndexToArrayOffset(address, key, element_size_shift, key_is_smi,
- keyMaybeNegative);
- __ AddP(address, external_pointer);
- }
- if (elements_kind == FLOAT32_ELEMENTS) {
- __ ledbr(double_scratch0(), value);
- __ StoreFloat32(double_scratch0(), MemOperand(address, base_offset));
- } else { // Storing doubles, not floats.
- __ StoreDouble(value, MemOperand(address, base_offset));
- }
- } else {
- Register value(ToRegister(instr->value()));
- MemOperand mem_operand =
- PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
- constant_key, element_size_shift, base_offset,
- keyMaybeNegative);
- switch (elements_kind) {
- case UINT8_ELEMENTS:
- case UINT8_CLAMPED_ELEMENTS:
- case INT8_ELEMENTS:
- if (key_is_constant) {
- __ StoreByte(value, mem_operand, r0);
- } else {
- __ StoreByte(value, mem_operand);
- }
- break;
- case INT16_ELEMENTS:
- case UINT16_ELEMENTS:
- if (key_is_constant) {
- __ StoreHalfWord(value, mem_operand, r0);
- } else {
- __ StoreHalfWord(value, mem_operand);
- }
- break;
- case INT32_ELEMENTS:
- case UINT32_ELEMENTS:
- if (key_is_constant) {
- __ StoreW(value, mem_operand, r0);
- } else {
- __ StoreW(value, mem_operand);
- }
- break;
- case FLOAT32_ELEMENTS:
- case FLOAT64_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
- case FAST_STRING_WRAPPER_ELEMENTS:
- case SLOW_STRING_WRAPPER_ELEMENTS:
- case NO_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
- DoubleRegister value = ToDoubleRegister(instr->value());
- Register elements = ToRegister(instr->elements());
- Register key = no_reg;
- Register scratch = scratch0();
- DoubleRegister double_scratch = double_scratch0();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
-
- // Calculate the effective address of the slot in the array to store the
- // double value.
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort(kArrayIndexConstantValueTooBig);
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
- bool keyMaybeNegative = instr->hydrogen()->IsDehoisted();
- int base_offset = instr->base_offset() + constant_key * kDoubleSize;
- bool use_scratch = false;
- intptr_t address_offset = base_offset;
-
- if (key_is_constant) {
- // Memory references support up to 20-bits signed displacement in RXY form
- if (!is_int20((address_offset))) {
- __ mov(scratch, Operand(address_offset));
- address_offset = 0;
- use_scratch = true;
- }
- } else {
- use_scratch = true;
- __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi,
- keyMaybeNegative);
- // Memory references support up to 20-bits signed displacement in RXY form
- if (!is_int20((address_offset))) {
- __ AddP(scratch, Operand(address_offset));
- address_offset = 0;
- }
- }
-
- if (instr->NeedsCanonicalization()) {
- // Turn potential sNaN value into qNaN.
- __ CanonicalizeNaN(double_scratch, value);
- DCHECK(address_offset >= 0);
- if (use_scratch)
- __ StoreDouble(double_scratch,
- MemOperand(scratch, elements, address_offset));
- else
- __ StoreDouble(double_scratch, MemOperand(elements, address_offset));
- } else {
- if (use_scratch)
- __ StoreDouble(value, MemOperand(scratch, elements, address_offset));
- else
- __ StoreDouble(value, MemOperand(elements, address_offset));
- }
-}
-
-void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
- HStoreKeyed* hinstr = instr->hydrogen();
- Register value = ToRegister(instr->value());
- Register elements = ToRegister(instr->elements());
- Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
- Register scratch = scratch0();
- int offset = instr->base_offset();
-
- // Do the store.
- if (instr->key()->IsConstantOperand()) {
- DCHECK(!hinstr->NeedsWriteBarrier());
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset += ToInteger32(const_operand) * kPointerSize;
- } else {
- // Even though the HLoadKeyed instruction forces the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (hinstr->key()->representation().IsSmi()) {
- __ SmiToPtrArrayOffset(scratch, key);
- } else {
- if (instr->hydrogen()->IsDehoisted() ||
- !CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
-#if V8_TARGET_ARCH_S390X
- // If array access is dehoisted, the key, being an int32, can contain
- // a negative value, as needs to be sign-extended to 64-bit for
- // memory access.
- __ lgfr(key, key);
-#endif
- __ ShiftLeftP(scratch, key, Operand(kPointerSizeLog2));
- } else {
- // Small optimization to reduce pathlength. After Bounds Check,
- // the key is guaranteed to be non-negative. Leverage RISBG,
- // which also performs zero-extension.
- __ risbg(scratch, key, Operand(32 - kPointerSizeLog2),
- Operand(63 - kPointerSizeLog2), Operand(kPointerSizeLog2),
- true);
- }
- }
- }
-
- Representation representation = hinstr->value()->representation();
-
-#if V8_TARGET_ARCH_S390X
- // 64-bit Smi optimization
- if (representation.IsInteger32()) {
- DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
- DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
- // Store int value directly to upper half of the smi.
- offset = SmiWordOffset(offset);
- }
-#endif
-
- if (instr->key()->IsConstantOperand()) {
- __ StoreRepresentation(value, MemOperand(elements, offset), representation,
- scratch);
- } else {
- __ StoreRepresentation(value, MemOperand(scratch, elements, offset),
- representation, r0);
- }
-
- if (hinstr->NeedsWriteBarrier()) {
- SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
- ? OMIT_SMI_CHECK
- : INLINE_SMI_CHECK;
- // Compute address of modified element and store it into key register.
- if (instr->key()->IsConstantOperand()) {
- __ lay(key, MemOperand(elements, offset));
- } else {
- __ lay(key, MemOperand(scratch, elements, offset));
- }
- __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs,
- EMIT_REMEMBERED_SET, check_needed,
- hinstr->PointersToHereCheckForValue());
- }
-}
-
-void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
- // By cases: external, fast double
- if (instr->is_fixed_typed_array()) {
- DoStoreKeyedExternalArray(instr);
- } else if (instr->hydrogen()->value()->representation().IsDouble()) {
- DoStoreKeyedFixedDoubleArray(instr);
- } else {
- DoStoreKeyedFixedArray(instr);
- }
-}
-
-void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
- class DeferredMaybeGrowElements final : public LDeferredCode {
- public:
- DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
- : LDeferredCode(codegen), instr_(instr) {}
- void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LMaybeGrowElements* instr_;
- };
-
- Register result = r2;
- DeferredMaybeGrowElements* deferred =
- new (zone()) DeferredMaybeGrowElements(this, instr);
- LOperand* key = instr->key();
- LOperand* current_capacity = instr->current_capacity();
-
- DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
- DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
- DCHECK(key->IsConstantOperand() || key->IsRegister());
- DCHECK(current_capacity->IsConstantOperand() ||
- current_capacity->IsRegister());
-
- if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
- int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
- int32_t constant_capacity =
- ToInteger32(LConstantOperand::cast(current_capacity));
- if (constant_key >= constant_capacity) {
- // Deferred case.
- __ b(deferred->entry());
- }
- } else if (key->IsConstantOperand()) {
- int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
- __ Cmp32(ToRegister(current_capacity), Operand(constant_key));
- __ ble(deferred->entry());
- } else if (current_capacity->IsConstantOperand()) {
- int32_t constant_capacity =
- ToInteger32(LConstantOperand::cast(current_capacity));
- __ Cmp32(ToRegister(key), Operand(constant_capacity));
- __ bge(deferred->entry());
- } else {
- __ Cmp32(ToRegister(key), ToRegister(current_capacity));
- __ bge(deferred->entry());
- }
-
- if (instr->elements()->IsRegister()) {
- __ Move(result, ToRegister(instr->elements()));
- } else {
- __ LoadP(result, ToMemOperand(instr->elements()));
- }
-
- __ bind(deferred->exit());
-}
-
-void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- Register result = r2;
- __ LoadImmP(result, Operand::Zero());
-
- // We have to call a stub.
- {
- PushSafepointRegistersScope scope(this);
- if (instr->object()->IsRegister()) {
- __ Move(result, ToRegister(instr->object()));
- } else {
- __ LoadP(result, ToMemOperand(instr->object()));
- }
-
- LOperand* key = instr->key();
- if (key->IsConstantOperand()) {
- LConstantOperand* constant_key = LConstantOperand::cast(key);
- int32_t int_key = ToInteger32(constant_key);
- if (Smi::IsValid(int_key)) {
- __ LoadSmiLiteral(r5, Smi::FromInt(int_key));
- } else {
- Abort(kArrayIndexConstantValueTooBig);
- }
- } else {
- Label is_smi;
-#if V8_TARGET_ARCH_S390X
- __ SmiTag(r5, ToRegister(key));
-#else
- // Deopt if the key is outside Smi range. The stub expects Smi and would
- // bump the elements into dictionary mode (and trigger a deopt) anyways.
- __ Add32(r5, ToRegister(key), ToRegister(key));
- __ b(nooverflow, &is_smi);
- __ PopSafepointRegisters();
- DeoptimizeIf(al, instr, DeoptimizeReason::kOverflow, cr0);
- __ bind(&is_smi);
-#endif
- }
-
- GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
- __ CallStub(&stub);
- RecordSafepointWithLazyDeopt(
- instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- __ StoreToSafepointRegisterSlot(result, result);
- }
-
- // Deopt on smi, which means the elements array changed to dictionary mode.
- __ TestIfSmi(result);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
-}
-
-void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
- Register object_reg = ToRegister(instr->object());
- Register scratch = scratch0();
-
- Handle<Map> from_map = instr->original_map();
- Handle<Map> to_map = instr->transitioned_map();
- ElementsKind from_kind = instr->from_kind();
- ElementsKind to_kind = instr->to_kind();
-
- Label not_applicable;
- __ LoadP(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
- __ CmpP(scratch, Operand(from_map));
- __ bne(&not_applicable);
-
- if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
- Register new_map_reg = ToRegister(instr->new_map_temp());
- __ mov(new_map_reg, Operand(to_map));
- __ StoreP(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
- // Write barrier.
- __ RecordWriteForMap(object_reg, new_map_reg, scratch,
- GetLinkRegisterState(), kDontSaveFPRegs);
- } else {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(object_reg.is(r2));
- PushSafepointRegistersScope scope(this);
- __ Move(r3, to_map);
- TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
- __ CallStub(&stub);
- RecordSafepointWithRegisters(instr->pointer_map(), 0,
- Safepoint::kLazyDeopt);
- }
- __ bind(&not_applicable);
-}
-
-void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
- Register object = ToRegister(instr->object());
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
- Label no_memento_found;
- __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMementoFound);
- __ bind(&no_memento_found);
-}
-
-void LCodeGen::DoStringAdd(LStringAdd* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->left()).is(r3));
- DCHECK(ToRegister(instr->right()).is(r2));
- StringAddStub stub(isolate(), instr->hydrogen()->flags(),
- instr->hydrogen()->pretenure_flag());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt final : public LDeferredCode {
- public:
- DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
- : LDeferredCode(codegen), instr_(instr) {}
- void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LStringCharCodeAt* instr_;
- };
-
- DeferredStringCharCodeAt* deferred =
- new (zone()) DeferredStringCharCodeAt(this, instr);
-
- StringCharLoadGenerator::Generate(
- masm(), ToRegister(instr->string()), ToRegister(instr->index()),
- ToRegister(instr->result()), deferred->entry());
- __ bind(deferred->exit());
-}
-
-void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ LoadImmP(result, Operand::Zero());
-
- PushSafepointRegistersScope scope(this);
- __ push(string);
- // Push the index as a smi. This is safe because of the checks in
- // DoStringCharCodeAt above.
- if (instr->index()->IsConstantOperand()) {
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- __ LoadSmiLiteral(scratch, Smi::FromInt(const_index));
- __ push(scratch);
- } else {
- Register index = ToRegister(instr->index());
- __ SmiTag(index);
- __ push(index);
- }
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
- instr->context());
- __ AssertSmi(r2);
- __ SmiUntag(r2);
- __ StoreToSafepointRegisterSlot(r2, result);
-}
-
-void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode final : public LDeferredCode {
- public:
- DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
- : LDeferredCode(codegen), instr_(instr) {}
- void Generate() override {
- codegen()->DoDeferredStringCharFromCode(instr_);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LStringCharFromCode* instr_;
- };
-
- DeferredStringCharFromCode* deferred =
- new (zone()) DeferredStringCharFromCode(this, instr);
-
- DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
- DCHECK(!char_code.is(result));
-
- __ CmpLogicalP(char_code, Operand(String::kMaxOneByteCharCode));
- __ bgt(deferred->entry());
- __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
- __ ShiftLeftP(r0, char_code, Operand(kPointerSizeLog2));
- __ AddP(result, r0);
- __ LoadP(result, FieldMemOperand(result, FixedArray::kHeaderSize));
- __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
- __ beq(deferred->entry());
- __ bind(deferred->exit());
-}
-
-void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ LoadImmP(result, Operand::Zero());
-
- PushSafepointRegistersScope scope(this);
- __ SmiTag(char_code);
- __ push(char_code);
- CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
- instr->context());
- __ StoreToSafepointRegisterSlot(r2, result);
-}
-
-void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- LOperand* input = instr->value();
- DCHECK(input->IsRegister() || input->IsStackSlot());
- LOperand* output = instr->result();
- DCHECK(output->IsDoubleRegister());
- if (input->IsStackSlot()) {
- Register scratch = scratch0();
- __ LoadP(scratch, ToMemOperand(input));
- __ ConvertIntToDouble(ToDoubleRegister(output), scratch);
- } else {
- __ ConvertIntToDouble(ToDoubleRegister(output), ToRegister(input));
- }
-}
-
-void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
- LOperand* input = instr->value();
- LOperand* output = instr->result();
- __ ConvertUnsignedIntToDouble(ToDoubleRegister(output), ToRegister(input));
-}
-
-void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- class DeferredNumberTagI final : public LDeferredCode {
- public:
- DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
- : LDeferredCode(codegen), instr_(instr) {}
- void Generate() override {
- codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
- instr_->temp2(), SIGNED_INT32);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LNumberTagI* instr_;
- };
-
- Register src = ToRegister(instr->value());
- Register dst = ToRegister(instr->result());
-
- DeferredNumberTagI* deferred = new (zone()) DeferredNumberTagI(this, instr);
-#if V8_TARGET_ARCH_S390X
- __ SmiTag(dst, src);
-#else
- // Add src to itself to defect SMI overflow.
- __ Add32(dst, src, src);
- __ b(overflow, deferred->entry());
-#endif
- __ bind(deferred->exit());
-}
-
-void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU final : public LDeferredCode {
- public:
- DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
- : LDeferredCode(codegen), instr_(instr) {}
- void Generate() override {
- codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
- instr_->temp2(), UNSIGNED_INT32);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LNumberTagU* instr_;
- };
-
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
-
- DeferredNumberTagU* deferred = new (zone()) DeferredNumberTagU(this, instr);
- __ CmpLogicalP(input, Operand(Smi::kMaxValue));
- __ bgt(deferred->entry());
- __ SmiTag(result, input);
- __ bind(deferred->exit());
-}
-
-void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
- LOperand* temp1, LOperand* temp2,
- IntegerSignedness signedness) {
- Label done, slow;
- Register src = ToRegister(value);
- Register dst = ToRegister(instr->result());
- Register tmp1 = scratch0();
- Register tmp2 = ToRegister(temp1);
- Register tmp3 = ToRegister(temp2);
- DoubleRegister dbl_scratch = double_scratch0();
-
- if (signedness == SIGNED_INT32) {
- // There was overflow, so bits 30 and 31 of the original integer
- // disagree. Try to allocate a heap number in new space and store
- // the value in there. If that fails, call the runtime system.
- if (dst.is(src)) {
- __ SmiUntag(src, dst);
- __ xilf(src, Operand(HeapNumber::kSignMask));
- }
- __ ConvertIntToDouble(dbl_scratch, src);
- } else {
- __ ConvertUnsignedIntToDouble(dbl_scratch, src);
- }
-
- if (FLAG_inline_new) {
- __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
- __ b(&done);
- }
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
- {
- // TODO(3095996): Put a valid pointer value in the stack slot where the
- // result register is stored, as this register is in the pointer map, but
- // contains an integer value.
- __ LoadImmP(dst, Operand::Zero());
-
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this);
- // Reset the context register.
- if (!dst.is(cp)) {
- __ LoadImmP(cp, Operand::Zero());
- }
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(instr->pointer_map(), 0,
- Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(r2, dst);
- }
-
- // Done. Put the value in dbl_scratch into the value of the allocated heap
- // number.
- __ bind(&done);
- __ StoreDouble(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
-}
-
-void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD final : public LDeferredCode {
- public:
- DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
- : LDeferredCode(codegen), instr_(instr) {}
- void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LNumberTagD* instr_;
- };
-
- DoubleRegister input_reg = ToDoubleRegister(instr->value());
- Register scratch = scratch0();
- Register reg = ToRegister(instr->result());
- Register temp1 = ToRegister(instr->temp());
- Register temp2 = ToRegister(instr->temp2());
-
- DeferredNumberTagD* deferred = new (zone()) DeferredNumberTagD(this, instr);
- if (FLAG_inline_new) {
- __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
- } else {
- __ b(deferred->entry());
- }
- __ bind(deferred->exit());
- __ StoreDouble(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
-}
-
-void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- Register reg = ToRegister(instr->result());
- __ LoadImmP(reg, Operand::Zero());
-
- PushSafepointRegistersScope scope(this);
- // Reset the context register.
- if (!reg.is(cp)) {
- __ LoadImmP(cp, Operand::Zero());
- }
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(instr->pointer_map(), 0,
- Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(r2, reg);
-}
-
-void LCodeGen::DoSmiTag(LSmiTag* instr) {
- HChange* hchange = instr->hydrogen();
- Register input = ToRegister(instr->value());
- Register output = ToRegister(instr->result());
- if (hchange->CheckFlag(HValue::kCanOverflow) &&
- hchange->value()->CheckFlag(HValue::kUint32)) {
- __ TestUnsignedSmiCandidate(input, r0);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, cr0);
- }
-#if !V8_TARGET_ARCH_S390X
- if (hchange->CheckFlag(HValue::kCanOverflow) &&
- !hchange->value()->CheckFlag(HValue::kUint32)) {
- __ SmiTagCheckOverflow(output, input, r0);
- DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
- } else {
-#endif
- __ SmiTag(output, input);
-#if !V8_TARGET_ARCH_S390X
- }
-#endif
-}
-
-void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- if (instr->needs_check()) {
- __ tmll(input, Operand(kHeapObjectTag));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0);
- __ SmiUntag(result, input);
- } else {
- __ SmiUntag(result, input);
- }
-}
-
-void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
- DoubleRegister result_reg,
- NumberUntagDMode mode) {
- bool can_convert_undefined_to_nan = instr->truncating();
- bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
-
- Register scratch = scratch0();
- DCHECK(!result_reg.is(double_scratch0()));
-
- Label convert, load_smi, done;
-
- if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
- // Smi check.
- __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
-
- // Heap number map check.
- __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
- __ CmpP(scratch, RootMemOperand(Heap::kHeapNumberMapRootIndex));
-
- if (can_convert_undefined_to_nan) {
- __ bne(&convert, Label::kNear);
- } else {
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
- }
- // load heap number
- __ LoadDouble(result_reg,
- FieldMemOperand(input_reg, HeapNumber::kValueOffset));
- if (deoptimize_on_minus_zero) {
- __ TestDoubleIsMinusZero(result_reg, scratch, ip);
- DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
- }
- __ b(&done, Label::kNear);
- if (can_convert_undefined_to_nan) {
- __ bind(&convert);
- // Convert undefined (and hole) to NaN.
- __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
- __ LoadRoot(scratch, Heap::kNanValueRootIndex);
- __ LoadDouble(result_reg,
- FieldMemOperand(scratch, HeapNumber::kValueOffset));
- __ b(&done, Label::kNear);
- }
- } else {
- __ SmiUntag(scratch, input_reg);
- DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
- }
- // Smi to double register conversion
- __ bind(&load_smi);
- // scratch: untagged value of input_reg
- __ ConvertIntToDouble(result_reg, scratch);
- __ bind(&done);
-}
-
-void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
- Register input_reg = ToRegister(instr->value());
- Register scratch1 = scratch0();
- Register scratch2 = ToRegister(instr->temp());
- DoubleRegister double_scratch = double_scratch0();
- DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
-
- DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
- DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
-
- Label done;
-
- // Heap number map check.
- __ LoadP(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
- __ CompareRoot(scratch1, Heap::kHeapNumberMapRootIndex);
-
- if (instr->truncating()) {
- Label truncate;
- __ beq(&truncate);
- __ CompareInstanceType(scratch1, scratch1, ODDBALL_TYPE);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotANumberOrOddball);
- __ bind(&truncate);
- __ LoadRR(scratch2, input_reg);
- __ TruncateHeapNumberToI(input_reg, scratch2);
- } else {
- // Deoptimize if we don't have a heap number.
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
-
- __ LoadDouble(double_scratch2,
- FieldMemOperand(input_reg, HeapNumber::kValueOffset));
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // preserve heap number pointer in scratch2 for minus zero check below
- __ LoadRR(scratch2, input_reg);
- }
- __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1,
- double_scratch);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ CmpP(input_reg, Operand::Zero());
- __ bne(&done, Label::kNear);
- __ TestHeapNumberSign(scratch2, scratch1);
- DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
- }
- }
- __ bind(&done);
-}
-
-void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI final : public LDeferredCode {
- public:
- DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
- : LDeferredCode(codegen), instr_(instr) {}
- void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LTaggedToI* instr_;
- };
-
- LOperand* input = instr->value();
- DCHECK(input->IsRegister());
- DCHECK(input->Equals(instr->result()));
-
- Register input_reg = ToRegister(input);
-
- if (instr->hydrogen()->value()->representation().IsSmi()) {
- __ SmiUntag(input_reg);
- } else {
- DeferredTaggedToI* deferred = new (zone()) DeferredTaggedToI(this, instr);
-
- // Branch to deferred code if the input is a HeapObject.
- __ JumpIfNotSmi(input_reg, deferred->entry());
-
- __ SmiUntag(input_reg);
- __ bind(deferred->exit());
- }
-}
-
-void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
- LOperand* input = instr->value();
- DCHECK(input->IsRegister());
- LOperand* result = instr->result();
- DCHECK(result->IsDoubleRegister());
-
- Register input_reg = ToRegister(input);
- DoubleRegister result_reg = ToDoubleRegister(result);
-
- HValue* value = instr->hydrogen()->value();
- NumberUntagDMode mode = value->representation().IsSmi()
- ? NUMBER_CANDIDATE_IS_SMI
- : NUMBER_CANDIDATE_IS_ANY_TAGGED;
-
- EmitNumberUntagD(instr, input_reg, result_reg, mode);
-}
-
-void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
- Register result_reg = ToRegister(instr->result());
- Register scratch1 = scratch0();
- DoubleRegister double_input = ToDoubleRegister(instr->value());
- DoubleRegister double_scratch = double_scratch0();
-
- if (instr->truncating()) {
- __ TruncateDoubleToI(result_reg, double_input);
- } else {
- __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
- double_scratch);
- // Deoptimize if the input wasn't a int32 (inside a double).
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label done;
- __ CmpP(result_reg, Operand::Zero());
- __ bne(&done, Label::kNear);
- __ TestDoubleSign(double_input, scratch1);
- DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
- __ bind(&done);
- }
- }
-}
-
-void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
- Register result_reg = ToRegister(instr->result());
- Register scratch1 = scratch0();
- DoubleRegister double_input = ToDoubleRegister(instr->value());
- DoubleRegister double_scratch = double_scratch0();
-
- if (instr->truncating()) {
- __ TruncateDoubleToI(result_reg, double_input);
- } else {
- __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
- double_scratch);
- // Deoptimize if the input wasn't a int32 (inside a double).
- DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label done;
- __ CmpP(result_reg, Operand::Zero());
- __ bne(&done, Label::kNear);
- __ TestDoubleSign(double_input, scratch1);
- DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
- __ bind(&done);
- }
- }
-#if V8_TARGET_ARCH_S390X
- __ SmiTag(result_reg);
-#else
- __ SmiTagCheckOverflow(result_reg, r0);
- DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
-#endif
-}
-
-void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
- LOperand* input = instr->value();
- if (input->IsRegister()) {
- __ TestIfSmi(ToRegister(input));
- } else if (input->IsStackSlot()) {
- MemOperand value = ToMemOperand(input);
-#if !V8_TARGET_LITTLE_ENDIAN
-#if V8_TARGET_ARCH_S390X
- __ TestIfSmi(MemOperand(value.rb(), value.offset() + 7));
-#else
- __ TestIfSmi(MemOperand(value.rb(), value.offset() + 3));
-#endif
-#else
- __ TestIfSmi(value);
-#endif
- }
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0);
-}
-
-void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- LOperand* input = instr->value();
- if (input->IsRegister()) {
- __ TestIfSmi(ToRegister(input));
- } else if (input->IsStackSlot()) {
- MemOperand value = ToMemOperand(input);
-#if !V8_TARGET_LITTLE_ENDIAN
-#if V8_TARGET_ARCH_S390X
- __ TestIfSmi(MemOperand(value.rb(), value.offset() + 7));
-#else
- __ TestIfSmi(MemOperand(value.rb(), value.offset() + 3));
-#endif
-#else
- __ TestIfSmi(value);
-#endif
- } else {
- UNIMPLEMENTED();
- }
- DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
- }
-}
-
-void LCodeGen::DoCheckArrayBufferNotNeutered(
- LCheckArrayBufferNotNeutered* instr) {
- Register view = ToRegister(instr->view());
- Register scratch = scratch0();
-
- __ LoadP(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
- __ LoadlW(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
- __ And(r0, scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds, cr0);
-}
-
-void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
- Register input = ToRegister(instr->value());
- Register scratch = scratch0();
-
- __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
-
- if (instr->hydrogen()->is_interval_check()) {
- InstanceType first;
- InstanceType last;
- instr->hydrogen()->GetCheckInterval(&first, &last);
-
- __ CmpLogicalByte(FieldMemOperand(scratch, Map::kInstanceTypeOffset),
- Operand(first));
-
- // If there is only one type in the interval check for equality.
- if (first == last) {
- DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
- } else {
- DeoptimizeIf(lt, instr, DeoptimizeReason::kWrongInstanceType);
- // Omit check for the last type.
- if (last != LAST_TYPE) {
- __ CmpLogicalByte(FieldMemOperand(scratch, Map::kInstanceTypeOffset),
- Operand(last));
- DeoptimizeIf(gt, instr, DeoptimizeReason::kWrongInstanceType);
- }
- }
- } else {
- uint8_t mask;
- uint8_t tag;
- instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
-
- __ LoadlB(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
-
- if (base::bits::IsPowerOfTwo32(mask)) {
- DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
- __ AndP(scratch, Operand(mask));
- DeoptimizeIf(tag == 0 ? ne : eq, instr,
- DeoptimizeReason::kWrongInstanceType);
- } else {
- __ AndP(scratch, Operand(mask));
- __ CmpP(scratch, Operand(tag));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
- }
- }
-}
-
-void LCodeGen::DoCheckValue(LCheckValue* instr) {
- Register reg = ToRegister(instr->value());
- Handle<HeapObject> object = instr->hydrogen()->object().handle();
- AllowDeferredHandleDereference smi_check;
- if (isolate()->heap()->InNewSpace(*object)) {
- Register reg = ToRegister(instr->value());
- Handle<Cell> cell = isolate()->factory()->NewCell(object);
- __ mov(ip, Operand(cell));
- __ CmpP(reg, FieldMemOperand(ip, Cell::kValueOffset));
- } else {
- __ CmpP(reg, Operand(object));
- }
- DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch);
-}
-
-void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
- Register temp = ToRegister(instr->temp());
- Label deopt, done;
- // If the map is not deprecated the migration attempt does not make sense.
- __ LoadP(temp, FieldMemOperand(object, HeapObject::kMapOffset));
- __ LoadlW(temp, FieldMemOperand(temp, Map::kBitField3Offset));
- __ TestBitMask(temp, Map::Deprecated::kMask, r0);
- __ beq(&deopt);
-
- {
- PushSafepointRegistersScope scope(this);
- __ push(object);
- __ LoadImmP(cp, Operand::Zero());
- __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
- RecordSafepointWithRegisters(instr->pointer_map(), 1,
- Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(r2, temp);
- }
- __ TestIfSmi(temp);
- __ bne(&done);
-
- __ bind(&deopt);
- // In case of "al" condition the operand is not used so just pass cr0 there.
- DeoptimizeIf(al, instr, DeoptimizeReason::kInstanceMigrationFailed, cr0);
-
- __ bind(&done);
-}
-
-void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- class DeferredCheckMaps final : public LDeferredCode {
- public:
- DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
- : LDeferredCode(codegen), instr_(instr), object_(object) {
- SetExit(check_maps());
- }
- void Generate() override {
- codegen()->DoDeferredInstanceMigration(instr_, object_);
- }
- Label* check_maps() { return &check_maps_; }
- LInstruction* instr() override { return instr_; }
-
- private:
- LCheckMaps* instr_;
- Label check_maps_;
- Register object_;
- };
-
- if (instr->hydrogen()->IsStabilityCheck()) {
- const UniqueSet<Map>* maps = instr->hydrogen()->maps();
- for (int i = 0; i < maps->size(); ++i) {
- AddStabilityDependency(maps->at(i).handle());
- }
- return;
- }
-
- LOperand* input = instr->value();
- DCHECK(input->IsRegister());
- Register reg = ToRegister(input);
-
- DeferredCheckMaps* deferred = NULL;
- if (instr->hydrogen()->HasMigrationTarget()) {
- deferred = new (zone()) DeferredCheckMaps(this, instr, reg);
- __ bind(deferred->check_maps());
- }
-
- const UniqueSet<Map>* maps = instr->hydrogen()->maps();
- Label success;
- for (int i = 0; i < maps->size() - 1; i++) {
- Handle<Map> map = maps->at(i).handle();
- __ CompareMap(reg, map, &success);
- __ beq(&success);
- }
-
- Handle<Map> map = maps->at(maps->size() - 1).handle();
- __ CompareMap(reg, map, &success);
- if (instr->hydrogen()->HasMigrationTarget()) {
- __ bne(deferred->entry());
- } else {
- DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
- }
-
- __ bind(&success);
-}
-
-void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
- DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
- Register result_reg = ToRegister(instr->result());
- __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
-}
-
-void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
- Register unclamped_reg = ToRegister(instr->unclamped());
- Register result_reg = ToRegister(instr->result());
- __ ClampUint8(result_reg, unclamped_reg);
-}
-
-void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
- Register scratch = scratch0();
- Register input_reg = ToRegister(instr->unclamped());
- Register result_reg = ToRegister(instr->result());
- DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
- Label is_smi, done, heap_number;
-
- // Both smi and heap number cases are handled.
- __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
-
- // Check for heap number
- __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
- __ CmpP(scratch, Operand(factory()->heap_number_map()));
- __ beq(&heap_number, Label::kNear);
-
- // Check for undefined. Undefined is converted to zero for clamping
- // conversions.
- __ CmpP(input_reg, Operand(factory()->undefined_value()));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
- __ LoadImmP(result_reg, Operand::Zero());
- __ b(&done, Label::kNear);
-
- // Heap number
- __ bind(&heap_number);
- __ LoadDouble(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
- __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
- __ b(&done, Label::kNear);
-
- // smi
- __ bind(&is_smi);
- __ ClampUint8(result_reg, result_reg);
-
- __ bind(&done);
-}
-
-void LCodeGen::DoAllocate(LAllocate* instr) {
- class DeferredAllocate final : public LDeferredCode {
- public:
- DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
- : LDeferredCode(codegen), instr_(instr) {}
- void Generate() override { codegen()->DoDeferredAllocate(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LAllocate* instr_;
- };
-
- DeferredAllocate* deferred = new (zone()) DeferredAllocate(this, instr);
-
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp1());
- Register scratch2 = ToRegister(instr->temp2());
-
- // Allocate memory for the object.
- AllocationFlags flags = NO_ALLOCATION_FLAGS;
- if (instr->hydrogen()->MustAllocateDoubleAligned()) {
- flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
- }
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- flags = static_cast<AllocationFlags>(flags | PRETENURE);
- }
-
- if (instr->hydrogen()->IsAllocationFoldingDominator()) {
- flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
- }
-
- DCHECK(!instr->hydrogen()->IsAllocationFolded());
-
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= kMaxRegularHeapObjectSize);
- __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
- } else {
- Register size = ToRegister(instr->size());
- __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
- }
-
- __ bind(deferred->exit());
-
- if (instr->hydrogen()->MustPrefillWithFiller()) {
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ LoadIntLiteral(scratch, size);
- } else {
- scratch = ToRegister(instr->size());
- }
- __ lay(scratch, MemOperand(scratch, -kPointerSize));
- Label loop;
- __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
- __ bind(&loop);
- __ StoreP(scratch2, MemOperand(scratch, result, -kHeapObjectTag));
-#if V8_TARGET_ARCH_S390X
- __ lay(scratch, MemOperand(scratch, -kPointerSize));
-#else
- // TODO(joransiu): Improve the following sequence.
- // Need to use AHI instead of LAY as top nibble is not set with LAY, causing
- // incorrect result with the signed compare
- __ AddP(scratch, Operand(-kPointerSize));
-#endif
- __ CmpP(scratch, Operand::Zero());
- __ bge(&loop);
- }
-}
-
-void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ LoadSmiLiteral(result, Smi::kZero);
-
- PushSafepointRegistersScope scope(this);
- if (instr->size()->IsRegister()) {
- Register size = ToRegister(instr->size());
- DCHECK(!size.is(result));
- __ SmiTag(size);
- __ push(size);
- } else {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
-#if !V8_TARGET_ARCH_S390X
- if (size >= 0 && size <= Smi::kMaxValue) {
-#endif
- __ Push(Smi::FromInt(size));
-#if !V8_TARGET_ARCH_S390X
- } else {
- // We should never get here at runtime => abort
- __ stop("invalid allocation size");
- return;
- }
-#endif
- }
-
- int flags = AllocateDoubleAlignFlag::encode(
- instr->hydrogen()->MustAllocateDoubleAligned());
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- flags = AllocateTargetSpace::update(flags, OLD_SPACE);
- } else {
- flags = AllocateTargetSpace::update(flags, NEW_SPACE);
- }
- __ Push(Smi::FromInt(flags));
-
- CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr,
- instr->context());
- __ StoreToSafepointRegisterSlot(r2, result);
-
- if (instr->hydrogen()->IsAllocationFoldingDominator()) {
- AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
- }
- // If the allocation folding dominator allocate triggered a GC, allocation
- // happend in the runtime. We have to reset the top pointer to virtually
- // undo the allocation.
- ExternalReference allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
- Register top_address = scratch0();
- __ SubP(r2, r2, Operand(kHeapObjectTag));
- __ mov(top_address, Operand(allocation_top));
- __ StoreP(r2, MemOperand(top_address));
- __ AddP(r2, r2, Operand(kHeapObjectTag));
- }
-}
-
-void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
- DCHECK(instr->hydrogen()->IsAllocationFolded());
- DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
- Register result = ToRegister(instr->result());
- Register scratch1 = ToRegister(instr->temp1());
- Register scratch2 = ToRegister(instr->temp2());
-
- AllocationFlags flags = ALLOCATION_FOLDED;
- if (instr->hydrogen()->MustAllocateDoubleAligned()) {
- flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
- }
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- flags = static_cast<AllocationFlags>(flags | PRETENURE);
- }
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= kMaxRegularHeapObjectSize);
- __ FastAllocate(size, result, scratch1, scratch2, flags);
- } else {
- Register size = ToRegister(instr->size());
- __ FastAllocate(size, result, scratch1, scratch2, flags);
- }
-}
-
-void LCodeGen::DoTypeof(LTypeof* instr) {
- DCHECK(ToRegister(instr->value()).is(r5));
- DCHECK(ToRegister(instr->result()).is(r2));
- Label end, do_call;
- Register value_register = ToRegister(instr->value());
- __ JumpIfNotSmi(value_register, &do_call);
- __ mov(r2, Operand(isolate()->factory()->number_string()));
- __ b(&end);
- __ bind(&do_call);
- Callable callable = CodeFactory::Typeof(isolate());
- CallCode(callable.code(), RelocInfo::CODE_TARGET, instr);
- __ bind(&end);
-}
-
-void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
- Register input = ToRegister(instr->value());
-
- Condition final_branch_condition =
- EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), input,
- instr->type_literal());
- if (final_branch_condition != kNoCondition) {
- EmitBranch(instr, final_branch_condition);
- }
-}
-
-Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
- Register input, Handle<String> type_name) {
- Condition final_branch_condition = kNoCondition;
- Register scratch = scratch0();
- Factory* factory = isolate()->factory();
- if (String::Equals(type_name, factory->number_string())) {
- __ JumpIfSmi(input, true_label);
- __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
- __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
- final_branch_condition = eq;
-
- } else if (String::Equals(type_name, factory->string_string())) {
- __ JumpIfSmi(input, false_label);
- __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
- final_branch_condition = lt;
-
- } else if (String::Equals(type_name, factory->symbol_string())) {
- __ JumpIfSmi(input, false_label);
- __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
- final_branch_condition = eq;
-
- } else if (String::Equals(type_name, factory->boolean_string())) {
- __ CompareRoot(input, Heap::kTrueValueRootIndex);
- __ beq(true_label);
- __ CompareRoot(input, Heap::kFalseValueRootIndex);
- final_branch_condition = eq;
-
- } else if (String::Equals(type_name, factory->undefined_string())) {
- __ CompareRoot(input, Heap::kNullValueRootIndex);
- __ beq(false_label);
- __ JumpIfSmi(input, false_label);
- // Check for undetectable objects => true.
- __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
- __ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ ExtractBit(r0, scratch, Map::kIsUndetectable);
- __ CmpP(r0, Operand::Zero());
- final_branch_condition = ne;
-
- } else if (String::Equals(type_name, factory->function_string())) {
- __ JumpIfSmi(input, false_label);
- __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
- __ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ AndP(scratch, scratch,
- Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
- __ CmpP(scratch, Operand(1 << Map::kIsCallable));
- final_branch_condition = eq;
-
- } else if (String::Equals(type_name, factory->object_string())) {
- __ JumpIfSmi(input, false_label);
- __ CompareRoot(input, Heap::kNullValueRootIndex);
- __ beq(true_label);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CompareObjectType(input, scratch, ip, FIRST_JS_RECEIVER_TYPE);
- __ blt(false_label);
- // Check for callable or undetectable objects => false.
- __ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ AndP(r0, scratch,
- Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
- __ CmpP(r0, Operand::Zero());
- final_branch_condition = eq;
-
- } else {
- __ b(false_label);
- }
-
- return final_branch_condition;
-}
-
-void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
- if (info()->ShouldEnsureSpaceForLazyDeopt()) {
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- if (current_pc < last_lazy_deopt_pc_ + space_needed) {
- int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- DCHECK_EQ(0, padding_size % 2);
- while (padding_size > 0) {
- __ nop();
- padding_size -= 2;
- }
- }
- }
- last_lazy_deopt_pc_ = masm()->pc_offset();
-}
-
-void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- last_lazy_deopt_pc_ = masm()->pc_offset();
- DCHECK(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- Deoptimizer::BailoutType type = instr->hydrogen()->type();
- // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
- // needed return address), even though the implementation of LAZY and EAGER is
- // now identical. When LAZY is eventually completely folded into EAGER, remove
- // the special case below.
- if (info()->IsStub() && type == Deoptimizer::EAGER) {
- type = Deoptimizer::LAZY;
- }
-
- DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
-}
-
-void LCodeGen::DoDummy(LDummy* instr) {
- // Nothing to see here, move on!
-}
-
-void LCodeGen::DoDummyUse(LDummyUse* instr) {
- // Nothing to see here, move on!
-}
-
-void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
- PushSafepointRegistersScope scope(this);
- LoadContextFromDeferred(instr->context());
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
- RecordSafepointWithLazyDeopt(
- instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- DCHECK(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck final : public LDeferredCode {
- public:
- DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
- : LDeferredCode(codegen), instr_(instr) {}
- void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LStackCheck* instr_;
- };
-
- DCHECK(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- // There is no LLazyBailout instruction for stack-checks. We have to
- // prepare for lazy deoptimization explicitly here.
- if (instr->hydrogen()->is_function_entry()) {
- // Perform stack overflow check.
- Label done;
- __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex));
- __ bge(&done, Label::kNear);
- DCHECK(instr->context()->IsRegister());
- DCHECK(ToRegister(instr->context()).is(cp));
- CallCode(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET,
- instr);
- __ bind(&done);
- } else {
- DCHECK(instr->hydrogen()->is_backwards_branch());
- // Perform stack overflow check if this goto needs it before jumping.
- DeferredStackCheck* deferred_stack_check =
- new (zone()) DeferredStackCheck(this, instr);
- __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex));
- __ blt(deferred_stack_check->entry());
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- __ bind(instr->done_label());
- deferred_stack_check->SetExit(instr->done_label());
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- // Don't record a deoptimization index for the safepoint here.
- // This will be done explicitly when emitting call and the safepoint in
- // the deferred code.
- }
-}
-
-void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
- // This is a pseudo-instruction that ensures that the environment here is
- // properly registered for deoptimization and records the assembler's PC
- // offset.
- LEnvironment* environment = instr->environment();
-
- // If the environment were already registered, we would have no way of
- // backpatching it with the spill slot operands.
- DCHECK(!environment->HasBeenRegistered());
- RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
-
- GenerateOsrPrologue();
-}
-
-void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
- Label use_cache, call_runtime;
- __ CheckEnumCache(&call_runtime);
-
- __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ b(&use_cache);
-
- // Get the set of properties to enumerate.
- __ bind(&call_runtime);
- __ push(r2);
- CallRuntime(Runtime::kForInEnumerate, instr);
- __ bind(&use_cache);
-}
-
-void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
- Register map = ToRegister(instr->map());
- Register result = ToRegister(instr->result());
- Label load_cache, done;
- __ EnumLength(result, map);
- __ CmpSmiLiteral(result, Smi::kZero, r0);
- __ bne(&load_cache, Label::kNear);
- __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
- __ b(&done, Label::kNear);
-
- __ bind(&load_cache);
- __ LoadInstanceDescriptors(map, result);
- __ LoadP(result,
- FieldMemOperand(result, DescriptorArray::kEnumCacheBridgeOffset));
- __ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
- __ CmpP(result, Operand::Zero());
- DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache);
-
- __ bind(&done);
-}
-
-void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
- Register object = ToRegister(instr->value());
- Register map = ToRegister(instr->map());
- __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
- __ CmpP(map, scratch0());
- DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
-}
-
-void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
- Register result, Register object,
- Register index) {
- PushSafepointRegistersScope scope(this);
- __ Push(object, index);
- __ LoadImmP(cp, Operand::Zero());
- __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
- RecordSafepointWithRegisters(instr->pointer_map(), 2,
- Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(r2, result);
-}
-
-void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
- class DeferredLoadMutableDouble final : public LDeferredCode {
- public:
- DeferredLoadMutableDouble(LCodeGen* codegen, LLoadFieldByIndex* instr,
- Register result, Register object, Register index)
- : LDeferredCode(codegen),
- instr_(instr),
- result_(result),
- object_(object),
- index_(index) {}
- void Generate() override {
- codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LLoadFieldByIndex* instr_;
- Register result_;
- Register object_;
- Register index_;
- };
-
- Register object = ToRegister(instr->object());
- Register index = ToRegister(instr->index());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- DeferredLoadMutableDouble* deferred;
- deferred = new (zone())
- DeferredLoadMutableDouble(this, instr, result, object, index);
-
- Label out_of_object, done;
-
- __ TestBitMask(index, reinterpret_cast<uintptr_t>(Smi::FromInt(1)), r0);
- __ bne(deferred->entry());
- __ ShiftRightArithP(index, index, Operand(1));
-
- __ CmpP(index, Operand::Zero());
- __ blt(&out_of_object, Label::kNear);
-
- __ SmiToPtrArrayOffset(r0, index);
- __ AddP(scratch, object, r0);
- __ LoadP(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
-
- __ b(&done, Label::kNear);
-
- __ bind(&out_of_object);
- __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- // Index is equal to negated out of object property index plus 1.
- __ SmiToPtrArrayOffset(r0, index);
- __ SubP(scratch, result, r0);
- __ LoadP(result,
- FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize));
- __ bind(deferred->exit());
- __ bind(&done);
-}
-
-#undef __
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/s390/lithium-codegen-s390.h b/deps/v8/src/crankshaft/s390/lithium-codegen-s390.h
deleted file mode 100644
index a8d59ff5b1..0000000000
--- a/deps/v8/src/crankshaft/s390/lithium-codegen-s390.h
+++ /dev/null
@@ -1,342 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_S390_LITHIUM_CODEGEN_S390_H_
-#define V8_CRANKSHAFT_S390_LITHIUM_CODEGEN_S390_H_
-
-#include "src/ast/scopes.h"
-#include "src/crankshaft/lithium-codegen.h"
-#include "src/crankshaft/s390/lithium-gap-resolver-s390.h"
-#include "src/crankshaft/s390/lithium-s390.h"
-#include "src/deoptimizer.h"
-#include "src/safepoint-table.h"
-#include "src/utils.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LDeferredCode;
-class SafepointGenerator;
-
-class LCodeGen : public LCodeGenBase {
- public:
- LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : LCodeGenBase(chunk, assembler, info),
- jump_table_(4, info->zone()),
- scope_(info->scope()),
- deferred_(8, info->zone()),
- frame_is_built_(false),
- safepoints_(info->zone()),
- resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple) {
- PopulateDeoptimizationLiteralsWithInlinedFunctions();
- }
-
- int LookupDestination(int block_id) const {
- return chunk()->LookupDestination(block_id);
- }
-
- bool IsNextEmittedBlock(int block_id) const {
- return LookupDestination(block_id) == GetNextEmittedBlock();
- }
-
- bool NeedsEagerFrame() const {
- return HasAllocatedStackSlots() || info()->is_non_deferred_calling() ||
- !info()->IsStub() || info()->requires_frame();
- }
- bool NeedsDeferredFrame() const {
- return !NeedsEagerFrame() && info()->is_deferred_calling();
- }
-
- LinkRegisterStatus GetLinkRegisterState() const {
- return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved;
- }
-
- // Support for converting LOperands to assembler types.
- // LOperand must be a register.
- Register ToRegister(LOperand* op) const;
-
- // LOperand is loaded into scratch, unless already a register.
- Register EmitLoadRegister(LOperand* op, Register scratch);
-
- // LConstantOperand must be an Integer32 or Smi
- void EmitLoadIntegerConstant(LConstantOperand* const_op, Register dst);
-
- // LOperand must be a double register.
- DoubleRegister ToDoubleRegister(LOperand* op) const;
-
- intptr_t ToRepresentation(LConstantOperand* op,
- const Representation& r) const;
- int32_t ToInteger32(LConstantOperand* op) const;
- Smi* ToSmi(LConstantOperand* op) const;
- double ToDouble(LConstantOperand* op) const;
- Operand ToOperand(LOperand* op);
- MemOperand ToMemOperand(LOperand* op) const;
- // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
- MemOperand ToHighMemOperand(LOperand* op) const;
-
- bool IsInteger32(LConstantOperand* op) const;
- bool IsSmi(LConstantOperand* op) const;
- Handle<Object> ToHandle(LConstantOperand* op) const;
-
- // Try to generate code for the entire chunk, but it may fail if the
- // chunk contains constructs we cannot handle. Returns true if the
- // code generation attempt succeeded.
- bool GenerateCode();
-
- // Finish the code by setting stack height, safepoint, and bailout
- // information on it.
- void FinishCode(Handle<Code> code);
-
- // Deferred code support.
- void DoDeferredNumberTagD(LNumberTagD* instr);
-
- enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
- void DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
- LOperand* temp1, LOperand* temp2,
- IntegerSignedness signedness);
-
- void DoDeferredTaggedToI(LTaggedToI* instr);
- void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
- void DoDeferredStackCheck(LStackCheck* instr);
- void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
- void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
- void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredAllocate(LAllocate* instr);
- void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
- void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, Register result,
- Register object, Register index);
-
- // Parallel move support.
- void DoParallelMove(LParallelMove* move);
- void DoGap(LGap* instr);
-
- MemOperand PrepareKeyedOperand(Register key, Register base,
- bool key_is_constant, bool key_is_tagged,
- int constant_key, int element_size_shift,
- int base_offset,
- bool keyMaybeNegative = true);
-
- // Emit frame translation commands for an environment.
- void WriteTranslation(LEnvironment* environment, Translation* translation);
-
-// Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) void Do##type(L##type* node);
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- private:
- Scope* scope() const { return scope_; }
-
- Register scratch0() { return kLithiumScratch; }
- DoubleRegister double_scratch0() { return kScratchDoubleReg; }
-
- LInstruction* GetNextInstruction();
-
- void EmitClassOfTest(Label* if_true, Label* if_false,
- Handle<String> class_name, Register input,
- Register temporary, Register temporary2);
-
- bool HasAllocatedStackSlots() const {
- return chunk()->HasAllocatedStackSlots();
- }
- int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); }
- int GetTotalFrameSlotCount() const {
- return chunk()->GetTotalFrameSlotCount();
- }
-
- void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
-
- void SaveCallerDoubles();
- void RestoreCallerDoubles();
-
- // Code generation passes. Returns true if code generation should
- // continue.
- void GenerateBodyInstructionPre(LInstruction* instr) override;
- bool GeneratePrologue();
- bool GenerateDeferredCode();
- bool GenerateJumpTable();
- bool GenerateSafepointTable();
-
- // Generates the custom OSR entrypoint and sets the osr_pc_offset.
- void GenerateOsrPrologue();
-
- enum SafepointMode {
- RECORD_SIMPLE_SAFEPOINT,
- RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
- };
-
- void CallCode(Handle<Code> code, RelocInfo::Mode mode, LInstruction* instr);
-
- void CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode,
- LInstruction* instr, SafepointMode safepoint_mode);
-
- void CallRuntime(const Runtime::Function* function, int num_arguments,
- LInstruction* instr,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
-
- void CallRuntime(Runtime::FunctionId id, int num_arguments,
- LInstruction* instr) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, num_arguments, instr);
- }
-
- void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, function->nargs, instr);
- }
-
- void LoadContextFromDeferred(LOperand* context);
- void CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
- LInstruction* instr, LOperand* context);
-
- void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
- Register scratch2, Register scratch3);
-
- // Generate a direct call to a known function. Expects the function
- // to be in r4.
- void CallKnownFunction(Handle<JSFunction> function,
- int formal_parameter_count, int arity,
- bool is_tail_call, LInstruction* instr);
-
- void RecordSafepointWithLazyDeopt(LInstruction* instr,
- SafepointMode safepoint_mode);
-
- void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
- Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition condition, LInstruction* instr,
- DeoptimizeReason deopt_reason,
- Deoptimizer::BailoutType bailout_type, CRegister cr = cr7);
- void DeoptimizeIf(Condition condition, LInstruction* instr,
- DeoptimizeReason deopt_reason, CRegister cr = cr7);
-
- void AddToTranslation(LEnvironment* environment, Translation* translation,
- LOperand* op, bool is_tagged, bool is_uint32,
- int* object_index_pointer,
- int* dematerialized_index_pointer);
-
- Register ToRegister(int index) const;
- DoubleRegister ToDoubleRegister(int index) const;
-
- MemOperand BuildSeqStringOperand(Register string, LOperand* index,
- String::Encoding encoding);
-
- void EmitMathAbs(LMathAbs* instr);
-#if V8_TARGET_ARCH_S390X
- void EmitInteger32MathAbs(LMathAbs* instr);
-#endif
-
- // Support for recording safepoint information.
- void RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
- int arguments, Safepoint::DeoptMode mode);
- void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
- void RecordSafepoint(Safepoint::DeoptMode mode);
- void RecordSafepointWithRegisters(LPointerMap* pointers, int arguments,
- Safepoint::DeoptMode mode);
-
- static Condition TokenToCondition(Token::Value op);
- void EmitGoto(int block);
-
- // EmitBranch expects to be the last instruction of a block.
- template <class InstrType>
- void EmitBranch(InstrType instr, Condition condition);
- template <class InstrType>
- void EmitTrueBranch(InstrType instr, Condition condition);
- template <class InstrType>
- void EmitFalseBranch(InstrType instr, Condition condition);
- void EmitNumberUntagD(LNumberUntagD* instr, Register input,
- DoubleRegister result, NumberUntagDMode mode);
-
- // Emits optimized code for typeof x == "y". Modifies input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitTypeofIs(Label* true_label, Label* false_label, Register input,
- Handle<String> type_name);
-
- // Emits optimized code for %_IsString(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
- SmiCheck check_needed);
-
- // Emits optimized code to deep-copy the contents of statically known
- // object graphs (e.g. object literal boilerplate).
- void EmitDeepCopy(Handle<JSObject> object, Register result, Register source,
- int* offset, AllocationSiteMode mode);
-
- void EnsureSpaceForLazyDeopt(int space_needed) override;
- void DoLoadKeyedExternalArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedArray(LLoadKeyed* instr);
- void DoStoreKeyedExternalArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedArray(LStoreKeyed* instr);
-
- template <class T>
- void EmitVectorLoadICRegisters(T* instr);
-
- ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
- Scope* const scope_;
- ZoneList<LDeferredCode*> deferred_;
- bool frame_is_built_;
-
- // Builder that keeps track of safepoints in the code. The table
- // itself is emitted at the end of the generated code.
- SafepointTableBuilder safepoints_;
-
- // Compiler from a set of parallel moves to a sequential list of moves.
- LGapResolver resolver_;
-
- Safepoint::Kind expected_safepoint_kind_;
-
- class PushSafepointRegistersScope final BASE_EMBEDDED {
- public:
- explicit PushSafepointRegistersScope(LCodeGen* codegen);
-
- ~PushSafepointRegistersScope();
-
- private:
- LCodeGen* codegen_;
- };
-
- friend class LDeferredCode;
- friend class LEnvironment;
- friend class SafepointGenerator;
- DISALLOW_COPY_AND_ASSIGN(LCodeGen);
-};
-
-class LDeferredCode : public ZoneObject {
- public:
- explicit LDeferredCode(LCodeGen* codegen)
- : codegen_(codegen),
- external_exit_(NULL),
- instruction_index_(codegen->current_instruction_) {
- codegen->AddDeferredCode(this);
- }
-
- virtual ~LDeferredCode() {}
- virtual void Generate() = 0;
- virtual LInstruction* instr() = 0;
-
- void SetExit(Label* exit) { external_exit_ = exit; }
- Label* entry() { return &entry_; }
- Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
- int instruction_index() const { return instruction_index_; }
-
- protected:
- LCodeGen* codegen() const { return codegen_; }
- MacroAssembler* masm() const { return codegen_->masm(); }
-
- private:
- LCodeGen* codegen_;
- Label entry_;
- Label exit_;
- Label* external_exit_;
- int instruction_index_;
-};
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_S390_LITHIUM_CODEGEN_S390_H_
diff --git a/deps/v8/src/crankshaft/s390/lithium-gap-resolver-s390.cc b/deps/v8/src/crankshaft/s390/lithium-gap-resolver-s390.cc
deleted file mode 100644
index cffcede226..0000000000
--- a/deps/v8/src/crankshaft/s390/lithium-gap-resolver-s390.cc
+++ /dev/null
@@ -1,280 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/s390/lithium-gap-resolver-s390.h"
-
-#include "src/crankshaft/s390/lithium-codegen-s390.h"
-
-namespace v8 {
-namespace internal {
-
-static const Register kSavedValueRegister = {1};
-
-LGapResolver::LGapResolver(LCodeGen* owner)
- : cgen_(owner),
- moves_(32, owner->zone()),
- root_index_(0),
- in_cycle_(false),
- saved_destination_(NULL) {}
-
-void LGapResolver::Resolve(LParallelMove* parallel_move) {
- DCHECK(moves_.is_empty());
- // Build up a worklist of moves.
- BuildInitialMoveList(parallel_move);
-
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands move = moves_[i];
- // Skip constants to perform them last. They don't block other moves
- // and skipping such moves with register destinations keeps those
- // registers free for the whole algorithm.
- if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
- root_index_ = i; // Any cycle is found when by reaching this move again.
- PerformMove(i);
- if (in_cycle_) {
- RestoreValue();
- }
- }
- }
-
- // Perform the moves with constant sources.
- for (int i = 0; i < moves_.length(); ++i) {
- if (!moves_[i].IsEliminated()) {
- DCHECK(moves_[i].source()->IsConstantOperand());
- EmitMove(i);
- }
- }
-
- moves_.Rewind(0);
-}
-
-void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
- // Perform a linear sweep of the moves to add them to the initial list of
- // moves to perform, ignoring any move that is redundant (the source is
- // the same as the destination, the destination is ignored and
- // unallocated, or the move was already eliminated).
- const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
- for (int i = 0; i < moves->length(); ++i) {
- LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
- }
- Verify();
-}
-
-void LGapResolver::PerformMove(int index) {
- // Each call to this function performs a move and deletes it from the move
- // graph. We first recursively perform any move blocking this one. We
- // mark a move as "pending" on entry to PerformMove in order to detect
- // cycles in the move graph.
-
- // We can only find a cycle, when doing a depth-first traversal of moves,
- // be encountering the starting move again. So by spilling the source of
- // the starting move, we break the cycle. All moves are then unblocked,
- // and the starting move is completed by writing the spilled value to
- // its destination. All other moves from the spilled source have been
- // completed prior to breaking the cycle.
- // An additional complication is that moves to MemOperands with large
- // offsets (more than 1K or 4K) require us to spill this spilled value to
- // the stack, to free up the register.
- DCHECK(!moves_[index].IsPending());
- DCHECK(!moves_[index].IsRedundant());
-
- // Clear this move's destination to indicate a pending move. The actual
- // destination is saved in a stack allocated local. Multiple moves can
- // be pending because this function is recursive.
- DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated.
- LOperand* destination = moves_[index].destination();
- moves_[index].set_destination(NULL);
-
- // Perform a depth-first traversal of the move graph to resolve
- // dependencies. Any unperformed, unpending move with a source the same
- // as this one's destination blocks this one so recursively perform all
- // such moves.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(destination) && !other_move.IsPending()) {
- PerformMove(i);
- // If there is a blocking, pending move it must be moves_[root_index_]
- // and all other moves with the same source as moves_[root_index_] are
- // sucessfully executed (because they are cycle-free) by this loop.
- }
- }
-
- // We are about to resolve this move and don't need it marked as
- // pending, so restore its destination.
- moves_[index].set_destination(destination);
-
- // The move may be blocked on a pending move, which must be the starting move.
- // In this case, we have a cycle, and we save the source of this move to
- // a scratch register to break it.
- LMoveOperands other_move = moves_[root_index_];
- if (other_move.Blocks(destination)) {
- DCHECK(other_move.IsPending());
- BreakCycle(index);
- return;
- }
-
- // This move is no longer blocked.
- EmitMove(index);
-}
-
-void LGapResolver::Verify() {
-#ifdef ENABLE_SLOW_DCHECKS
- // No operand should be the destination for more than one move.
- for (int i = 0; i < moves_.length(); ++i) {
- LOperand* destination = moves_[i].destination();
- for (int j = i + 1; j < moves_.length(); ++j) {
- SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
- }
- }
-#endif
-}
-
-#define __ ACCESS_MASM(cgen_->masm())
-
-void LGapResolver::BreakCycle(int index) {
- // We save in a register the value that should end up in the source of
- // moves_[root_index]. After performing all moves in the tree rooted
- // in that move, we save the value to that source.
- DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source()));
- DCHECK(!in_cycle_);
- in_cycle_ = true;
- LOperand* source = moves_[index].source();
- saved_destination_ = moves_[index].destination();
- if (source->IsRegister()) {
- __ LoadRR(kSavedValueRegister, cgen_->ToRegister(source));
- } else if (source->IsStackSlot()) {
- __ LoadP(kSavedValueRegister, cgen_->ToMemOperand(source));
- } else if (source->IsDoubleRegister()) {
- __ ldr(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
- } else if (source->IsDoubleStackSlot()) {
- __ LoadDouble(kScratchDoubleReg, cgen_->ToMemOperand(source));
- } else {
- UNREACHABLE();
- }
- // This move will be done by restoring the saved value to the destination.
- moves_[index].Eliminate();
-}
-
-void LGapResolver::RestoreValue() {
- DCHECK(in_cycle_);
- DCHECK(saved_destination_ != NULL);
-
- // Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister.
- if (saved_destination_->IsRegister()) {
- __ LoadRR(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
- } else if (saved_destination_->IsStackSlot()) {
- __ StoreP(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
- } else if (saved_destination_->IsDoubleRegister()) {
- __ ldr(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg);
- } else if (saved_destination_->IsDoubleStackSlot()) {
- __ StoreDouble(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_));
- } else {
- UNREACHABLE();
- }
-
- in_cycle_ = false;
- saved_destination_ = NULL;
-}
-
-void LGapResolver::EmitMove(int index) {
- LOperand* source = moves_[index].source();
- LOperand* destination = moves_[index].destination();
-
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
-
- if (source->IsRegister()) {
- Register source_register = cgen_->ToRegister(source);
- if (destination->IsRegister()) {
- __ LoadRR(cgen_->ToRegister(destination), source_register);
- } else {
- DCHECK(destination->IsStackSlot());
- __ StoreP(source_register, cgen_->ToMemOperand(destination));
- }
- } else if (source->IsStackSlot()) {
- MemOperand source_operand = cgen_->ToMemOperand(source);
- if (destination->IsRegister()) {
- __ LoadP(cgen_->ToRegister(destination), source_operand);
- } else {
- DCHECK(destination->IsStackSlot());
- MemOperand destination_operand = cgen_->ToMemOperand(destination);
- if (in_cycle_) {
- __ LoadP(ip, source_operand);
- __ StoreP(ip, destination_operand);
- } else {
- __ LoadP(kSavedValueRegister, source_operand);
- __ StoreP(kSavedValueRegister, destination_operand);
- }
- }
-
- } else if (source->IsConstantOperand()) {
- LConstantOperand* constant_source = LConstantOperand::cast(source);
- if (destination->IsRegister()) {
- Register dst = cgen_->ToRegister(destination);
- if (cgen_->IsInteger32(constant_source)) {
- cgen_->EmitLoadIntegerConstant(constant_source, dst);
- } else {
- __ Move(dst, cgen_->ToHandle(constant_source));
- }
- } else if (destination->IsDoubleRegister()) {
- DoubleRegister result = cgen_->ToDoubleRegister(destination);
- double v = cgen_->ToDouble(constant_source);
- __ LoadDoubleLiteral(result, v, ip);
- } else {
- DCHECK(destination->IsStackSlot());
- DCHECK(!in_cycle_); // Constant moves happen after all cycles are gone.
- if (cgen_->IsInteger32(constant_source)) {
- cgen_->EmitLoadIntegerConstant(constant_source, kSavedValueRegister);
- } else {
- __ Move(kSavedValueRegister, cgen_->ToHandle(constant_source));
- }
- __ StoreP(kSavedValueRegister, cgen_->ToMemOperand(destination));
- }
-
- } else if (source->IsDoubleRegister()) {
- DoubleRegister source_register = cgen_->ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
- __ ldr(cgen_->ToDoubleRegister(destination), source_register);
- } else {
- DCHECK(destination->IsDoubleStackSlot());
- __ StoreDouble(source_register, cgen_->ToMemOperand(destination));
- }
-
- } else if (source->IsDoubleStackSlot()) {
- MemOperand source_operand = cgen_->ToMemOperand(source);
- if (destination->IsDoubleRegister()) {
- __ LoadDouble(cgen_->ToDoubleRegister(destination), source_operand);
- } else {
- DCHECK(destination->IsDoubleStackSlot());
- MemOperand destination_operand = cgen_->ToMemOperand(destination);
- if (in_cycle_) {
-// kSavedDoubleValueRegister was used to break the cycle,
-// but kSavedValueRegister is free.
-#if V8_TARGET_ARCH_S390X
- __ lg(kSavedValueRegister, source_operand);
- __ stg(kSavedValueRegister, destination_operand);
-#else
- MemOperand source_high_operand = cgen_->ToHighMemOperand(source);
- MemOperand destination_high_operand =
- cgen_->ToHighMemOperand(destination);
- __ LoadlW(kSavedValueRegister, source_operand);
- __ StoreW(kSavedValueRegister, destination_operand);
- __ LoadlW(kSavedValueRegister, source_high_operand);
- __ StoreW(kSavedValueRegister, destination_high_operand);
-#endif
- } else {
- __ LoadDouble(kScratchDoubleReg, source_operand);
- __ StoreDouble(kScratchDoubleReg, destination_operand);
- }
- }
- } else {
- UNREACHABLE();
- }
-
- moves_[index].Eliminate();
-}
-
-#undef __
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/s390/lithium-gap-resolver-s390.h b/deps/v8/src/crankshaft/s390/lithium-gap-resolver-s390.h
deleted file mode 100644
index 087224c861..0000000000
--- a/deps/v8/src/crankshaft/s390/lithium-gap-resolver-s390.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_S390_LITHIUM_GAP_RESOLVER_S390_H_
-#define V8_CRANKSHAFT_S390_LITHIUM_GAP_RESOLVER_S390_H_
-
-#include "src/crankshaft/lithium.h"
-
-namespace v8 {
-namespace internal {
-
-class LCodeGen;
-class LGapResolver;
-
-class LGapResolver final BASE_EMBEDDED {
- public:
- explicit LGapResolver(LCodeGen* owner);
-
- // Resolve a set of parallel moves, emitting assembler instructions.
- void Resolve(LParallelMove* parallel_move);
-
- private:
- // Build the initial list of moves.
- void BuildInitialMoveList(LParallelMove* parallel_move);
-
- // Perform the move at the moves_ index in question (possibly requiring
- // other moves to satisfy dependencies).
- void PerformMove(int index);
-
- // If a cycle is found in the series of moves, save the blocking value to
- // a scratch register. The cycle must be found by hitting the root of the
- // depth-first search.
- void BreakCycle(int index);
-
- // After a cycle has been resolved, restore the value from the scratch
- // register to its proper destination.
- void RestoreValue();
-
- // Emit a move and remove it from the move graph.
- void EmitMove(int index);
-
- // Verify the move list before performing moves.
- void Verify();
-
- LCodeGen* cgen_;
-
- // List of moves not yet resolved.
- ZoneList<LMoveOperands> moves_;
-
- int root_index_;
- bool in_cycle_;
- LOperand* saved_destination_;
-};
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_S390_LITHIUM_GAP_RESOLVER_S390_H_
diff --git a/deps/v8/src/crankshaft/s390/lithium-s390.cc b/deps/v8/src/crankshaft/s390/lithium-s390.cc
deleted file mode 100644
index 79868f5579..0000000000
--- a/deps/v8/src/crankshaft/s390/lithium-s390.cc
+++ /dev/null
@@ -1,2156 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/s390/lithium-s390.h"
-
-#include <sstream>
-
-#include "src/crankshaft/hydrogen-osr.h"
-#include "src/crankshaft/lithium-inl.h"
-#include "src/crankshaft/s390/lithium-codegen-s390.h"
-
-namespace v8 {
-namespace internal {
-
-#define DEFINE_COMPILE(type) \
- void L##type::CompileToNative(LCodeGen* generator) { \
- generator->Do##type(this); \
- }
-LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
-#undef DEFINE_COMPILE
-
-#ifdef DEBUG
-void LInstruction::VerifyCall() {
- // Call instructions can use only fixed registers as temporaries and
- // outputs because all registers are blocked by the calling convention.
- // Inputs operands must use a fixed register or use-at-start policy or
- // a non-register policy.
- DCHECK(Output() == NULL || LUnallocated::cast(Output())->HasFixedPolicy() ||
- !LUnallocated::cast(Output())->HasRegisterPolicy());
- for (UseIterator it(this); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- DCHECK(operand->HasFixedPolicy() || operand->IsUsedAtStart());
- }
- for (TempIterator it(this); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- DCHECK(operand->HasFixedPolicy() || !operand->HasRegisterPolicy());
- }
-}
-#endif
-
-void LInstruction::PrintTo(StringStream* stream) {
- stream->Add("%s ", this->Mnemonic());
-
- PrintOutputOperandTo(stream);
-
- PrintDataTo(stream);
-
- if (HasEnvironment()) {
- stream->Add(" ");
- environment()->PrintTo(stream);
- }
-
- if (HasPointerMap()) {
- stream->Add(" ");
- pointer_map()->PrintTo(stream);
- }
-}
-
-void LInstruction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- for (int i = 0; i < InputCount(); i++) {
- if (i > 0) stream->Add(" ");
- if (InputAt(i) == NULL) {
- stream->Add("NULL");
- } else {
- InputAt(i)->PrintTo(stream);
- }
- }
-}
-
-void LInstruction::PrintOutputOperandTo(StringStream* stream) {
- if (HasResult()) result()->PrintTo(stream);
-}
-
-void LLabel::PrintDataTo(StringStream* stream) {
- LGap::PrintDataTo(stream);
- LLabel* rep = replacement();
- if (rep != NULL) {
- stream->Add(" Dead block replaced with B%d", rep->block_id());
- }
-}
-
-bool LGap::IsRedundant() const {
- for (int i = 0; i < 4; i++) {
- if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
- return false;
- }
- }
-
- return true;
-}
-
-void LGap::PrintDataTo(StringStream* stream) {
- for (int i = 0; i < 4; i++) {
- stream->Add("(");
- if (parallel_moves_[i] != NULL) {
- parallel_moves_[i]->PrintDataTo(stream);
- }
- stream->Add(") ");
- }
-}
-
-const char* LArithmeticD::Mnemonic() const {
- switch (op()) {
- case Token::ADD:
- return "add-d";
- case Token::SUB:
- return "sub-d";
- case Token::MUL:
- return "mul-d";
- case Token::DIV:
- return "div-d";
- case Token::MOD:
- return "mod-d";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-const char* LArithmeticT::Mnemonic() const {
- switch (op()) {
- case Token::ADD:
- return "add-t";
- case Token::SUB:
- return "sub-t";
- case Token::MUL:
- return "mul-t";
- case Token::MOD:
- return "mod-t";
- case Token::DIV:
- return "div-t";
- case Token::BIT_AND:
- return "bit-and-t";
- case Token::BIT_OR:
- return "bit-or-t";
- case Token::BIT_XOR:
- return "bit-xor-t";
- case Token::ROR:
- return "ror-t";
- case Token::SHL:
- return "shl-t";
- case Token::SAR:
- return "sar-t";
- case Token::SHR:
- return "shr-t";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-bool LGoto::HasInterestingComment(LCodeGen* gen) const {
- return !gen->IsNextEmittedBlock(block_id());
-}
-
-void LGoto::PrintDataTo(StringStream* stream) {
- stream->Add("B%d", block_id());
-}
-
-void LBranch::PrintDataTo(StringStream* stream) {
- stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
- value()->PrintTo(stream);
-}
-
-void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- left()->PrintTo(stream);
- stream->Add(" %s ", Token::String(op()));
- right()->PrintTo(stream);
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_string(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_smi(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_undetectable(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if string_compare(");
- left()->PrintTo(stream);
- right()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_instance_type(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if class_of_test(");
- value()->PrintTo(stream);
- stream->Add(", \"%o\") then B%d else B%d", *hydrogen()->class_name(),
- true_block_id(), false_block_id());
-}
-
-void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if typeof ");
- value()->PrintTo(stream);
- stream->Add(" == \"%s\" then B%d else B%d",
- hydrogen()->type_literal()->ToCString().get(), true_block_id(),
- false_block_id());
-}
-
-void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
- stream->Add(" = ");
- function()->PrintTo(stream);
- stream->Add(".code_entry = ");
- code_object()->PrintTo(stream);
-}
-
-void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
- stream->Add(" = ");
- base_object()->PrintTo(stream);
- stream->Add(" + ");
- offset()->PrintTo(stream);
-}
-
-void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
- for (int i = 0; i < InputCount(); i++) {
- InputAt(i)->PrintTo(stream);
- stream->Add(" ");
- }
- stream->Add("#%d / ", arity());
-}
-
-void LLoadContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add("[%d]", slot_index());
-}
-
-void LStoreContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add("[%d] <- ", slot_index());
- value()->PrintTo(stream);
-}
-
-void LInvokeFunction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- function()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-void LCallNewArray::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
- ElementsKind kind = hydrogen()->elements_kind();
- stream->Add(" (%s) ", ElementsKindToString(kind));
-}
-
-void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
- arguments()->PrintTo(stream);
- stream->Add(" length ");
- length()->PrintTo(stream);
- stream->Add(" index ");
- index()->PrintTo(stream);
-}
-
-void LStoreNamedField::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- std::ostringstream os;
- os << hydrogen()->access() << " <- ";
- stream->Add(os.str().c_str());
- value()->PrintTo(stream);
-}
-
-void LLoadKeyed::PrintDataTo(StringStream* stream) {
- elements()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d]", base_offset());
- } else {
- stream->Add("]");
- }
-}
-
-void LStoreKeyed::PrintDataTo(StringStream* stream) {
- elements()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d] <-", base_offset());
- } else {
- stream->Add("] <- ");
- }
-
- if (value() == NULL) {
- DCHECK(hydrogen()->IsConstantHoleStore() &&
- hydrogen()->value()->representation().IsDouble());
- stream->Add("<the hole(nan)>");
- } else {
- value()->PrintTo(stream);
- }
-}
-
-void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(" %p -> %p", *original_map(), *transitioned_map());
-}
-
-int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
- // Skip a slot if for a double-width slot.
- if (kind == DOUBLE_REGISTERS) current_frame_slots_++;
- return current_frame_slots_++;
-}
-
-LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
- int index = GetNextSpillIndex(kind);
- if (kind == DOUBLE_REGISTERS) {
- return LDoubleStackSlot::Create(index, zone());
- } else {
- DCHECK(kind == GENERAL_REGISTERS);
- return LStackSlot::Create(index, zone());
- }
-}
-
-LPlatformChunk* LChunkBuilder::Build() {
- DCHECK(is_unused());
- chunk_ = new (zone()) LPlatformChunk(info(), graph());
- LPhase phase("L_Building chunk", chunk_);
- status_ = BUILDING;
-
- // If compiling for OSR, reserve space for the unoptimized frame,
- // which will be subsumed into this frame.
- if (graph()->has_osr()) {
- for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
- chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
- }
- }
-
- const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
- for (int i = 0; i < blocks->length(); i++) {
- HBasicBlock* next = NULL;
- if (i < blocks->length() - 1) next = blocks->at(i + 1);
- DoBasicBlock(blocks->at(i), next);
- if (is_aborted()) return NULL;
- }
- status_ = DONE;
- return chunk_;
-}
-
-LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
-}
-
-LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
- return new (zone())
- LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
-}
-
-LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
- return Use(value, ToUnallocated(fixed_register));
-}
-
-LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) {
- return Use(value, ToUnallocated(reg));
-}
-
-LOperand* LChunkBuilder::UseRegister(HValue* value) {
- return Use(value,
- new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
- return Use(value, new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
- LUnallocated::USED_AT_START));
-}
-
-LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
- return Use(value, new (zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
-}
-
-LOperand* LChunkBuilder::Use(HValue* value) {
- return Use(value, new (zone()) LUnallocated(LUnallocated::NONE));
-}
-
-LOperand* LChunkBuilder::UseAtStart(HValue* value) {
- return Use(value, new (zone()) LUnallocated(LUnallocated::NONE,
- LUnallocated::USED_AT_START));
-}
-
-LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value);
-}
-
-LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseAtStart(value);
-}
-
-LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegister(value);
-}
-
-LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegisterAtStart(value);
-}
-
-LOperand* LChunkBuilder::UseConstant(HValue* value) {
- return chunk_->DefineConstantOperand(HConstant::cast(value));
-}
-
-LOperand* LChunkBuilder::UseAny(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value, new (zone()) LUnallocated(LUnallocated::ANY));
-}
-
-LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
- if (value->EmitAtUses()) {
- HInstruction* instr = HInstruction::cast(value);
- VisitInstruction(instr);
- }
- operand->set_virtual_register(value->id());
- return operand;
-}
-
-LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
- LUnallocated* result) {
- result->set_virtual_register(current_instruction_->id());
- instr->set_result(result);
- return instr;
-}
-
-LInstruction* LChunkBuilder::DefineAsRegister(
- LTemplateResultInstruction<1>* instr) {
- return Define(instr,
- new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-LInstruction* LChunkBuilder::DefineAsSpilled(
- LTemplateResultInstruction<1>* instr, int index) {
- return Define(instr,
- new (zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
-}
-
-LInstruction* LChunkBuilder::DefineSameAsFirst(
- LTemplateResultInstruction<1>* instr) {
- return Define(instr,
- new (zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
-}
-
-LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr,
- Register reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-LInstruction* LChunkBuilder::DefineFixedDouble(
- LTemplateResultInstruction<1>* instr, DoubleRegister reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
- HEnvironment* hydrogen_env = current_block_->last_environment();
- return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
-}
-
-LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize) {
- info()->MarkAsNonDeferredCalling();
-#ifdef DEBUG
- instr->VerifyCall();
-#endif
- instr->MarkAsCall();
- instr = AssignPointerMap(instr);
-
- // If instruction does not have side-effects lazy deoptimization
- // after the call will try to deoptimize to the point before the call.
- // Thus we still need to attach environment to this call even if
- // call sequence can not deoptimize eagerly.
- bool needs_environment = (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
- !hinstr->HasObservableSideEffects();
- if (needs_environment && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- // We can't really figure out if the environment is needed or not.
- instr->environment()->set_has_been_used();
- }
-
- return instr;
-}
-
-LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
- DCHECK(!instr->HasPointerMap());
- instr->set_pointer_map(new (zone()) LPointerMap(zone()));
- return instr;
-}
-
-LUnallocated* LChunkBuilder::TempRegister() {
- LUnallocated* operand =
- new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
- int vreg = allocator_->GetVirtualRegister();
- if (!allocator_->AllocationOk()) {
- Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
- vreg = 0;
- }
- operand->set_virtual_register(vreg);
- return operand;
-}
-
-LUnallocated* LChunkBuilder::TempDoubleRegister() {
- LUnallocated* operand =
- new (zone()) LUnallocated(LUnallocated::MUST_HAVE_DOUBLE_REGISTER);
- int vreg = allocator_->GetVirtualRegister();
- if (!allocator_->AllocationOk()) {
- Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
- vreg = 0;
- }
- operand->set_virtual_register(vreg);
- return operand;
-}
-
-LOperand* LChunkBuilder::FixedTemp(Register reg) {
- LUnallocated* operand = ToUnallocated(reg);
- DCHECK(operand->HasFixedPolicy());
- return operand;
-}
-
-LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
- LUnallocated* operand = ToUnallocated(reg);
- DCHECK(operand->HasFixedPolicy());
- return operand;
-}
-
-LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
- return new (zone()) LLabel(instr->block());
-}
-
-LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
- return DefineAsRegister(new (zone()) LDummyUse(UseAny(instr->value())));
-}
-
-LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
- UNREACHABLE();
- return NULL;
-}
-
-LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
- return AssignEnvironment(new (zone()) LDeoptimize);
-}
-
-LInstruction* LChunkBuilder::DoShift(Token::Value op,
- HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->left());
-
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- int constant_value = 0;
- bool does_deopt = false;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
- // Left shifts can deoptimize if we shift by > 0 and the result cannot be
- // truncated to smi.
- if (instr->representation().IsSmi() && constant_value > 0) {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
- }
- } else {
- right = UseRegisterAtStart(right_value);
- }
-
- // Shift operations can only deoptimize if we do a logical shift
- // by 0 and the result cannot be truncated to int32.
- if (op == Token::SHR && constant_value == 0) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
- }
-
- LInstruction* result =
- DefineAsRegister(new (zone()) LShiftI(op, left, right, does_deopt));
- return does_deopt ? AssignEnvironment(result) : result;
- } else {
- return DoArithmeticT(op, instr);
- }
-}
-
-LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->left()->representation().IsDouble());
- DCHECK(instr->right()->representation().IsDouble());
- if (op == Token::MOD) {
- LOperand* left = UseFixedDouble(instr->left(), d1);
- LOperand* right = UseFixedDouble(instr->right(), d2);
- LArithmeticD* result = new (zone()) LArithmeticD(op, left, right);
- // We call a C function for double modulo. It can't trigger a GC. We need
- // to use fixed result register for the call.
- // TODO(fschneider): Allow any register as input registers.
- return MarkAsCall(DefineFixedDouble(result, d1), instr);
- } else {
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
- LArithmeticD* result = new (zone()) LArithmeticD(op, left, right);
- return CpuFeatures::IsSupported(VECTOR_FACILITY)
- ? DefineAsRegister(result)
- : DefineSameAsFirst(result);
- }
-}
-
-LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HBinaryOperation* instr) {
- HValue* left = instr->left();
- HValue* right = instr->right();
- DCHECK(left->representation().IsTagged());
- DCHECK(right->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* left_operand = UseFixed(left, r3);
- LOperand* right_operand = UseFixed(right, r2);
- LArithmeticT* result =
- new (zone()) LArithmeticT(op, context, left_operand, right_operand);
- return MarkAsCall(DefineFixed(result, r2), instr);
-}
-
-void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
- DCHECK(is_building());
- current_block_ = block;
- next_block_ = next_block;
- if (block->IsStartBlock()) {
- block->UpdateEnvironment(graph_->start_environment());
- argument_count_ = 0;
- } else if (block->predecessors()->length() == 1) {
- // We have a single predecessor => copy environment and outgoing
- // argument count from the predecessor.
- DCHECK(block->phis()->length() == 0);
- HBasicBlock* pred = block->predecessors()->at(0);
- HEnvironment* last_environment = pred->last_environment();
- DCHECK(last_environment != NULL);
- // Only copy the environment, if it is later used again.
- if (pred->end()->SecondSuccessor() == NULL) {
- DCHECK(pred->end()->FirstSuccessor() == block);
- } else {
- if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
- pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
- last_environment = last_environment->Copy();
- }
- }
- block->UpdateEnvironment(last_environment);
- DCHECK(pred->argument_count() >= 0);
- argument_count_ = pred->argument_count();
- } else {
- // We are at a state join => process phis.
- HBasicBlock* pred = block->predecessors()->at(0);
- // No need to copy the environment, it cannot be used later.
- HEnvironment* last_environment = pred->last_environment();
- for (int i = 0; i < block->phis()->length(); ++i) {
- HPhi* phi = block->phis()->at(i);
- if (phi->HasMergedIndex()) {
- last_environment->SetValueAt(phi->merged_index(), phi);
- }
- }
- for (int i = 0; i < block->deleted_phis()->length(); ++i) {
- if (block->deleted_phis()->at(i) < last_environment->length()) {
- last_environment->SetValueAt(block->deleted_phis()->at(i),
- graph_->GetConstantUndefined());
- }
- }
- block->UpdateEnvironment(last_environment);
- // Pick up the outgoing argument count of one of the predecessors.
- argument_count_ = pred->argument_count();
- }
- HInstruction* current = block->first();
- int start = chunk_->instructions()->length();
- while (current != NULL && !is_aborted()) {
- // Code for constants in registers is generated lazily.
- if (!current->EmitAtUses()) {
- VisitInstruction(current);
- }
- current = current->next();
- }
- int end = chunk_->instructions()->length() - 1;
- if (end >= start) {
- block->set_first_instruction_index(start);
- block->set_last_instruction_index(end);
- }
- block->set_argument_count(argument_count_);
- next_block_ = NULL;
- current_block_ = NULL;
-}
-
-void LChunkBuilder::VisitInstruction(HInstruction* current) {
- HInstruction* old_current = current_instruction_;
- current_instruction_ = current;
-
- LInstruction* instr = NULL;
- if (current->CanReplaceWithDummyUses()) {
- if (current->OperandCount() == 0) {
- instr = DefineAsRegister(new (zone()) LDummy());
- } else {
- DCHECK(!current->OperandAt(0)->IsControlInstruction());
- instr = DefineAsRegister(new (zone())
- LDummyUse(UseAny(current->OperandAt(0))));
- }
- for (int i = 1; i < current->OperandCount(); ++i) {
- if (current->OperandAt(i)->IsControlInstruction()) continue;
- LInstruction* dummy =
- new (zone()) LDummyUse(UseAny(current->OperandAt(i)));
- dummy->set_hydrogen_value(current);
- chunk_->AddInstruction(dummy, current_block_);
- }
- } else {
- HBasicBlock* successor;
- if (current->IsControlInstruction() &&
- HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
- successor != NULL) {
- instr = new (zone()) LGoto(successor);
- } else {
- instr = current->CompileToLithium(this);
- }
- }
-
- argument_count_ += current->argument_delta();
- DCHECK(argument_count_ >= 0);
-
- if (instr != NULL) {
- AddInstruction(instr, current);
- }
-
- current_instruction_ = old_current;
-}
-
-void LChunkBuilder::AddInstruction(LInstruction* instr,
- HInstruction* hydrogen_val) {
- // Associate the hydrogen instruction first, since we may need it for
- // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
- instr->set_hydrogen_value(hydrogen_val);
-
-#if DEBUG
- // Make sure that the lithium instruction has either no fixed register
- // constraints in temps or the result OR no uses that are only used at
- // start. If this invariant doesn't hold, the register allocator can decide
- // to insert a split of a range immediately before the instruction due to an
- // already allocated register needing to be used for the instruction's fixed
- // register constraint. In this case, The register allocator won't see an
- // interference between the split child and the use-at-start (it would if
- // the it was just a plain use), so it is free to move the split child into
- // the same register that is used for the use-at-start.
- // See https://code.google.com/p/chromium/issues/detail?id=201590
- if (!(instr->ClobbersRegisters() &&
- instr->ClobbersDoubleRegisters(isolate()))) {
- int fixed = 0;
- int used_at_start = 0;
- for (UseIterator it(instr); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- if (operand->IsUsedAtStart()) ++used_at_start;
- }
- if (instr->Output() != NULL) {
- if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
- }
- for (TempIterator it(instr); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- if (operand->HasFixedPolicy()) ++fixed;
- }
- DCHECK(fixed == 0 || used_at_start == 0);
- }
-#endif
-
- if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
- instr = AssignPointerMap(instr);
- }
- if (FLAG_stress_environments && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
- chunk_->AddInstruction(instr, current_block_);
-
- CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
-}
-
-LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
- LInstruction* result = new (zone()) LPrologue();
- if (info_->scope()->NeedsContext()) {
- result = MarkAsCall(result, instr);
- }
- return result;
-}
-
-LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- return new (zone()) LGoto(instr->FirstSuccessor());
-}
-
-LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* value = instr->value();
- Representation r = value->representation();
- HType type = value->type();
- ToBooleanHints expected = instr->expected_input_types();
- if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
-
- bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
- type.IsJSArray() || type.IsHeapNumber() || type.IsString();
- LInstruction* branch = new (zone()) LBranch(UseRegister(value));
- if (!easy_case && ((!(expected & ToBooleanHint::kSmallInteger) &&
- (expected & ToBooleanHint::kNeedsMap)) ||
- expected != ToBooleanHint::kAny)) {
- branch = AssignEnvironment(branch);
- }
- return branch;
-}
-
-LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
- return new (zone()) LDebugBreak();
-}
-
-LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegister(instr->value());
- LOperand* temp = TempRegister();
- return new (zone()) LCmpMapAndBranch(value, temp);
-}
-
-LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) {
- info()->MarkAsRequiresFrame();
- LOperand* value = UseRegister(instr->value());
- return DefineAsRegister(new (zone()) LArgumentsLength(value));
-}
-
-LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
- info()->MarkAsRequiresFrame();
- return DefineAsRegister(new (zone()) LArgumentsElements);
-}
-
-LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
- HHasInPrototypeChainAndBranch* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* prototype = UseRegister(instr->prototype());
- LHasInPrototypeChainAndBranch* result =
- new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
- return AssignEnvironment(result);
-}
-
-LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
- LOperand* receiver = UseRegisterAtStart(instr->receiver());
- LOperand* function = UseRegisterAtStart(instr->function());
- LWrapReceiver* result = new (zone()) LWrapReceiver(receiver, function);
- return AssignEnvironment(DefineAsRegister(result));
-}
-
-LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
- LOperand* function = UseFixed(instr->function(), r3);
- LOperand* receiver = UseFixed(instr->receiver(), r2);
- LOperand* length = UseFixed(instr->length(), r4);
- LOperand* elements = UseFixed(instr->elements(), r5);
- LApplyArguments* result =
- new (zone()) LApplyArguments(function, receiver, length, elements);
- return MarkAsCall(DefineFixed(result, r2), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
- int argc = instr->OperandCount();
- for (int i = 0; i < argc; ++i) {
- LOperand* argument = Use(instr->argument(i));
- AddInstruction(new (zone()) LPushArgument(argument), instr);
- }
- return NULL;
-}
-
-LInstruction* LChunkBuilder::DoStoreCodeEntry(
- HStoreCodeEntry* store_code_entry) {
- LOperand* function = UseRegister(store_code_entry->function());
- LOperand* code_object = UseTempRegister(store_code_entry->code_object());
- return new (zone()) LStoreCodeEntry(function, code_object);
-}
-
-LInstruction* LChunkBuilder::DoInnerAllocatedObject(
- HInnerAllocatedObject* instr) {
- LOperand* base_object = UseRegisterAtStart(instr->base_object());
- LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
- return DefineAsRegister(new (zone())
- LInnerAllocatedObject(base_object, offset));
-}
-
-LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
- return instr->HasNoUses() ? NULL
- : DefineAsRegister(new (zone()) LThisFunction);
-}
-
-LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- if (instr->HasNoUses()) return NULL;
-
- if (info()->IsStub()) {
- return DefineFixed(new (zone()) LContext, cp);
- }
-
- return DefineAsRegister(new (zone()) LContext);
-}
-
-LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(new (zone()) LDeclareGlobals(context), instr);
-}
-
-LInstruction* LChunkBuilder::DoCallWithDescriptor(HCallWithDescriptor* instr) {
- CallInterfaceDescriptor descriptor = instr->descriptor();
- DCHECK_EQ(descriptor.GetParameterCount() +
- LCallWithDescriptor::kImplicitRegisterParameterCount,
- instr->OperandCount());
-
- LOperand* target = UseRegisterOrConstantAtStart(instr->target());
- ZoneList<LOperand*> ops(instr->OperandCount(), zone());
- // Target
- ops.Add(target, zone());
- // Context
- LOperand* op = UseFixed(instr->OperandAt(1), cp);
- ops.Add(op, zone());
- // Load register parameters.
- int i = 0;
- for (; i < descriptor.GetRegisterParameterCount(); i++) {
- op = UseFixed(instr->OperandAt(
- i + LCallWithDescriptor::kImplicitRegisterParameterCount),
- descriptor.GetRegisterParameter(i));
- ops.Add(op, zone());
- }
- // Push stack parameters.
- for (; i < descriptor.GetParameterCount(); i++) {
- op = UseAny(instr->OperandAt(
- i + LCallWithDescriptor::kImplicitRegisterParameterCount));
- AddInstruction(new (zone()) LPushArgument(op), instr);
- }
-
- LCallWithDescriptor* result =
- new (zone()) LCallWithDescriptor(descriptor, ops, zone());
- if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
- result->MarkAsSyntacticTailCall();
- }
- return MarkAsCall(DefineFixed(result, r2), instr);
-}
-
-LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* function = UseFixed(instr->function(), r3);
- LInvokeFunction* result = new (zone()) LInvokeFunction(context, function);
- if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
- result->MarkAsSyntacticTailCall();
- }
- return MarkAsCall(DefineFixed(result, r2), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
- switch (instr->op()) {
- case kMathFloor:
- return DoMathFloor(instr);
- case kMathRound:
- return DoMathRound(instr);
- case kMathFround:
- return DoMathFround(instr);
- case kMathAbs:
- return DoMathAbs(instr);
- case kMathLog:
- return DoMathLog(instr);
- case kMathCos:
- return DoMathCos(instr);
- case kMathSin:
- return DoMathSin(instr);
- case kMathExp:
- return DoMathExp(instr);
- case kMathSqrt:
- return DoMathSqrt(instr);
- case kMathPowHalf:
- return DoMathPowHalf(instr);
- case kMathClz32:
- return DoMathClz32(instr);
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
- LOperand* input = UseRegister(instr->value());
- LMathFloor* result = new (zone()) LMathFloor(input);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
-}
-
-LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
- LOperand* input = UseRegister(instr->value());
- LOperand* temp = TempDoubleRegister();
- LMathRound* result = new (zone()) LMathRound(input, temp);
- return AssignEnvironment(DefineAsRegister(result));
-}
-
-LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
- LOperand* input = UseRegister(instr->value());
- LMathFround* result = new (zone()) LMathFround(input);
- return DefineAsRegister(result);
-}
-
-LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
- Representation r = instr->value()->representation();
- LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32())
- ? NULL
- : UseFixed(instr->context(), cp);
- LOperand* input = UseRegister(instr->value());
- LInstruction* result =
- DefineAsRegister(new (zone()) LMathAbs(context, input));
- if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
- if (!r.IsDouble()) result = AssignEnvironment(result);
- return result;
-}
-
-LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseFixedDouble(instr->value(), d0);
- return MarkAsCall(DefineFixedDouble(new (zone()) LMathLog(input), d0), instr);
-}
-
-LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- LMathClz32* result = new (zone()) LMathClz32(input);
- return DefineAsRegister(result);
-}
-
-LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseFixedDouble(instr->value(), d0);
- return MarkAsCall(DefineFixedDouble(new (zone()) LMathCos(input), d0), instr);
-}
-
-LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseFixedDouble(instr->value(), d0);
- return MarkAsCall(DefineFixedDouble(new (zone()) LMathSin(input), d0), instr);
-}
-
-LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseFixedDouble(instr->value(), d0);
- return MarkAsCall(DefineFixedDouble(new (zone()) LMathExp(input), d0), instr);
-}
-
-LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
- LOperand* input = UseAtStart(instr->value());
- LMathSqrt* result = new (zone()) LMathSqrt(input);
- return DefineAsRegister(result);
-}
-
-LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- LMathPowHalf* result = new (zone()) LMathPowHalf(input);
- return DefineAsRegister(result);
-}
-
-LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* constructor = UseFixed(instr->constructor(), r3);
- LCallNewArray* result = new (zone()) LCallNewArray(context, constructor);
- return MarkAsCall(DefineFixed(result, r2), instr);
-}
-
-LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(DefineFixed(new (zone()) LCallRuntime(context), r2), instr);
-}
-
-LInstruction* LChunkBuilder::DoRor(HRor* instr) {
- return DoShift(Token::ROR, instr);
-}
-
-LInstruction* LChunkBuilder::DoShr(HShr* instr) {
- return DoShift(Token::SHR, instr);
-}
-
-LInstruction* LChunkBuilder::DoSar(HSar* instr) {
- return DoShift(Token::SAR, instr);
-}
-
-LInstruction* LChunkBuilder::DoShl(HShl* instr) {
- return DoShift(Token::SHL, instr);
-}
-
-LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32));
-
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
- return DefineAsRegister(new (zone()) LBitI(left, right));
- } else {
- return DoArithmeticT(instr->op(), instr);
- }
-}
-
-LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result =
- DefineAsRegister(new (zone()) LDivByPowerOf2I(dividend, divisor));
- if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
- (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
- (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
- divisor != 1 && divisor != -1)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
- DCHECK(instr->representation().IsInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result =
- DefineAsRegister(new (zone()) LDivByConstI(dividend, divisor));
- if (divisor == 0 ||
- (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
- !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(instr->right());
- LInstruction* result =
- DefineAsRegister(new (zone()) LDivI(dividend, divisor));
- if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- (instr->CheckFlag(HValue::kCanOverflow) &&
- !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) ||
- (!instr->IsMathFloorOfDiv() &&
- !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- if (instr->RightIsPowerOf2()) {
- return DoDivByPowerOf2I(instr);
- } else if (instr->right()->IsConstant()) {
- return DoDivByConstI(instr);
- } else {
- return DoDivI(instr);
- }
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else {
- return DoArithmeticT(Token::DIV, instr);
- }
-}
-
-LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
- LOperand* dividend = UseRegisterAtStart(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result =
- DefineAsRegister(new (zone()) LFlooringDivByPowerOf2I(dividend, divisor));
- if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
- (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
- DCHECK(instr->representation().IsInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LOperand* temp =
- ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
- (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive)))
- ? NULL
- : TempRegister();
- LInstruction* result = DefineAsRegister(
- new (zone()) LFlooringDivByConstI(dividend, divisor, temp));
- if (divisor == 0 ||
- (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(instr->right());
- LInstruction* result =
- DefineAsRegister(new (zone()) LFlooringDivI(dividend, divisor));
- if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- (instr->CheckFlag(HValue::kCanOverflow) &&
- !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- if (instr->RightIsPowerOf2()) {
- return DoFlooringDivByPowerOf2I(instr);
- } else if (instr->right()->IsConstant()) {
- return DoFlooringDivByConstI(instr);
- } else {
- return DoFlooringDivI(instr);
- }
-}
-
-LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegisterAtStart(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result =
- DefineSameAsFirst(new (zone()) LModByPowerOf2I(dividend, divisor));
- if (instr->CheckFlag(HValue::kLeftCanBeNegative) &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result =
- DefineAsRegister(new (zone()) LModByConstI(dividend, divisor));
- if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-LInstruction* LChunkBuilder::DoModI(HMod* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(instr->right());
- LInstruction* result =
- DefineAsRegister(new (zone()) LModI(dividend, divisor));
- if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- if (instr->RightIsPowerOf2()) {
- return DoModByPowerOf2I(instr);
- } else if (instr->right()->IsConstant()) {
- return DoModByConstI(instr);
- } else {
- return DoModI(instr);
- }
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::MOD, instr);
- } else {
- return DoArithmeticT(Token::MOD, instr);
- }
-}
-
-LInstruction* LChunkBuilder::DoMul(HMul* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- HValue* left = instr->BetterLeftOperand();
- HValue* right = instr->BetterRightOperand();
- LOperand* left_op;
- LOperand* right_op;
- bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
- bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
-
- int32_t constant_value = 0;
- if (right->IsConstant()) {
- HConstant* constant = HConstant::cast(right);
- constant_value = constant->Integer32Value();
- // Constants -1, 0 and 1 can be optimized if the result can overflow.
- // For other constants, it can be optimized only without overflow.
- if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) {
- left_op = UseRegisterAtStart(left);
- right_op = UseConstant(right);
- } else {
- if (bailout_on_minus_zero) {
- left_op = UseRegister(left);
- } else {
- left_op = UseRegisterAtStart(left);
- }
- right_op = UseRegister(right);
- }
- } else {
- if (bailout_on_minus_zero) {
- left_op = UseRegister(left);
- } else {
- left_op = UseRegisterAtStart(left);
- }
- right_op = UseRegister(right);
- }
- LMulI* mul = new (zone()) LMulI(left_op, right_op);
- if (right_op->IsConstantOperand()
- ? ((can_overflow && constant_value == -1) ||
- (bailout_on_minus_zero && constant_value <= 0))
- : (can_overflow || bailout_on_minus_zero)) {
- AssignEnvironment(mul);
- }
- return DefineAsRegister(mul);
-
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::MUL, instr);
- } else {
- return DoArithmeticT(Token::MUL, instr);
- }
-}
-
-LInstruction* LChunkBuilder::DoSub(HSub* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
-
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- LSubI* sub = new (zone()) LSubI(left, right);
- LInstruction* result = DefineAsRegister(sub);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::SUB, instr);
- } else {
- return DoArithmeticT(Token::SUB, instr);
- }
-}
-
-LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) {
- LOperand* multiplier_op = UseRegister(mul->left());
- LOperand* multiplicand_op = UseRegister(mul->right());
- LOperand* addend_op = UseRegister(addend);
- return DefineAsRegister(
- new (zone()) LMultiplyAddD(addend_op, multiplier_op, multiplicand_op));
-}
-
-LInstruction* LChunkBuilder::DoMultiplySub(HValue* minuend, HMul* mul) {
- LOperand* minuend_op = UseRegister(minuend);
- LOperand* multiplier_op = UseRegister(mul->left());
- LOperand* multiplicand_op = UseRegister(mul->right());
-
- return DefineAsRegister(
- new (zone()) LMultiplySubD(minuend_op, multiplier_op, multiplicand_op));
-}
-
-LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
- LAddI* add = new (zone()) LAddI(left, right);
- LInstruction* result = DefineAsRegister(add);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsExternal()) {
- DCHECK(instr->IsConsistentExternalRepresentation());
- DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- LAddI* add = new (zone()) LAddI(left, right);
- LInstruction* result = DefineAsRegister(add);
- return result;
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::ADD, instr);
- } else {
- return DoArithmeticT(Token::ADD, instr);
- }
-}
-
-LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
- LOperand* left = NULL;
- LOperand* right = NULL;
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- left = UseRegisterAtStart(instr->BetterLeftOperand());
- right = UseOrConstantAtStart(instr->BetterRightOperand());
- } else {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->left()->representation().IsDouble());
- DCHECK(instr->right()->representation().IsDouble());
- left = UseRegister(instr->left());
- right = UseRegister(instr->right());
- }
- return DefineAsRegister(new (zone()) LMathMinMax(left, right));
-}
-
-LInstruction* LChunkBuilder::DoPower(HPower* instr) {
- DCHECK(instr->representation().IsDouble());
- // We call a C function for double power. It can't trigger a GC.
- // We need to use fixed result register for the call.
- Representation exponent_type = instr->right()->representation();
- DCHECK(instr->left()->representation().IsDouble());
- LOperand* left = UseFixedDouble(instr->left(), d1);
- LOperand* right = exponent_type.IsDouble()
- ? UseFixedDouble(instr->right(), d2)
- : UseFixed(instr->right(), r4);
- LPower* result = new (zone()) LPower(left, right);
- return MarkAsCall(DefineFixedDouble(result, d3), instr,
- CAN_DEOPTIMIZE_EAGERLY);
-}
-
-LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
- DCHECK(instr->left()->representation().IsTagged());
- DCHECK(instr->right()->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* left = UseFixed(instr->left(), r3);
- LOperand* right = UseFixed(instr->right(), r2);
- LCmpT* result = new (zone()) LCmpT(context, left, right);
- return MarkAsCall(DefineFixed(result, r2), instr);
-}
-
-LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
- HCompareNumericAndBranch* instr) {
- Representation r = instr->representation();
- if (r.IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(r));
- DCHECK(instr->right()->representation().Equals(r));
- LOperand* left = UseRegisterOrConstantAtStart(instr->left());
- LOperand* right = UseRegisterOrConstantAtStart(instr->right());
- return new (zone()) LCompareNumericAndBranch(left, right);
- } else {
- DCHECK(r.IsDouble());
- DCHECK(instr->left()->representation().IsDouble());
- DCHECK(instr->right()->representation().IsDouble());
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- return new (zone()) LCompareNumericAndBranch(left, right);
- }
-}
-
-LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
- HCompareObjectEqAndBranch* instr) {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- return new (zone()) LCmpObjectEqAndBranch(left, right);
-}
-
-LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
- HCompareHoleAndBranch* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return new (zone()) LCmpHoleAndBranch(value);
-}
-
-LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- return new (zone()) LIsStringAndBranch(value, temp);
-}
-
-LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- return new (zone()) LIsSmiAndBranch(Use(instr->value()));
-}
-
-LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
- HIsUndetectableAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return new (zone()) LIsUndetectableAndBranch(value, TempRegister());
-}
-
-LInstruction* LChunkBuilder::DoStringCompareAndBranch(
- HStringCompareAndBranch* instr) {
- DCHECK(instr->left()->representation().IsTagged());
- DCHECK(instr->right()->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* left = UseFixed(instr->left(), r3);
- LOperand* right = UseFixed(instr->right(), r2);
- LStringCompareAndBranch* result =
- new (zone()) LStringCompareAndBranch(context, left, right);
- return MarkAsCall(result, instr);
-}
-
-LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
- HHasInstanceTypeAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return new (zone()) LHasInstanceTypeAndBranch(value);
-}
-
-LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
- HClassOfTestAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegister(instr->value());
- return new (zone()) LClassOfTestAndBranch(value, TempRegister());
-}
-
-LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
- LOperand* string = UseRegisterAtStart(instr->string());
- LOperand* index = UseRegisterOrConstantAtStart(instr->index());
- return DefineAsRegister(new (zone()) LSeqStringGetChar(string, index));
-}
-
-LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
- LOperand* string = UseRegisterAtStart(instr->string());
- LOperand* index = FLAG_debug_code
- ? UseRegisterAtStart(instr->index())
- : UseRegisterOrConstantAtStart(instr->index());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL;
- return new (zone()) LSeqStringSetChar(context, string, index, value);
-}
-
-LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- if (!FLAG_debug_code && instr->skip_check()) return NULL;
- LOperand* index = UseRegisterOrConstantAtStart(instr->index());
- LOperand* length = !index->IsConstantOperand()
- ? UseRegisterOrConstantAtStart(instr->length())
- : UseRegisterAtStart(instr->length());
- LInstruction* result = new (zone()) LBoundsCheck(index, length);
- if (!FLAG_debug_code || !instr->skip_check()) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
- // The control instruction marking the end of a block that completed
- // abruptly (e.g., threw an exception). There is nothing specific to do.
- return NULL;
-}
-
-LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) { return NULL; }
-
-LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
- // All HForceRepresentation instructions should be eliminated in the
- // representation change phase of Hydrogen.
- UNREACHABLE();
- return NULL;
-}
-
-LInstruction* LChunkBuilder::DoChange(HChange* instr) {
- Representation from = instr->from();
- Representation to = instr->to();
- HValue* val = instr->value();
- if (from.IsSmi()) {
- if (to.IsTagged()) {
- LOperand* value = UseRegister(val);
- return DefineSameAsFirst(new (zone()) LDummyUse(value));
- }
- from = Representation::Tagged();
- }
- if (from.IsTagged()) {
- if (to.IsDouble()) {
- LOperand* value = UseRegister(val);
- LInstruction* result =
- DefineAsRegister(new (zone()) LNumberUntagD(value));
- if (!val->representation().IsSmi()) result = AssignEnvironment(result);
- return result;
- } else if (to.IsSmi()) {
- LOperand* value = UseRegister(val);
- if (val->type().IsSmi()) {
- return DefineSameAsFirst(new (zone()) LDummyUse(value));
- }
- return AssignEnvironment(
- DefineSameAsFirst(new (zone()) LCheckSmi(value)));
- } else {
- DCHECK(to.IsInteger32());
- if (val->type().IsSmi() || val->representation().IsSmi()) {
- LOperand* value = UseRegisterAtStart(val);
- return DefineAsRegister(new (zone()) LSmiUntag(value, false));
- } else {
- LOperand* value = UseRegister(val);
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempDoubleRegister();
- LInstruction* result =
- DefineSameAsFirst(new (zone()) LTaggedToI(value, temp1, temp2));
- if (!val->representation().IsSmi()) result = AssignEnvironment(result);
- return result;
- }
- }
- } else if (from.IsDouble()) {
- if (to.IsTagged()) {
- info()->MarkAsDeferredCalling();
- LOperand* value = UseRegister(val);
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LUnallocated* result_temp = TempRegister();
- LNumberTagD* result = new (zone()) LNumberTagD(value, temp1, temp2);
- return AssignPointerMap(Define(result, result_temp));
- } else if (to.IsSmi()) {
- LOperand* value = UseRegister(val);
- return AssignEnvironment(
- DefineAsRegister(new (zone()) LDoubleToSmi(value)));
- } else {
- DCHECK(to.IsInteger32());
- LOperand* value = UseRegister(val);
- LInstruction* result = DefineAsRegister(new (zone()) LDoubleToI(value));
- if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result);
- return result;
- }
- } else if (from.IsInteger32()) {
- info()->MarkAsDeferredCalling();
- if (to.IsTagged()) {
- if (!instr->CheckFlag(HValue::kCanOverflow)) {
- LOperand* value = UseRegisterAtStart(val);
- return DefineAsRegister(new (zone()) LSmiTag(value));
- } else if (val->CheckFlag(HInstruction::kUint32)) {
- LOperand* value = UseRegisterAtStart(val);
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LNumberTagU* result = new (zone()) LNumberTagU(value, temp1, temp2);
- return AssignPointerMap(DefineAsRegister(result));
- } else {
- LOperand* value = UseRegisterAtStart(val);
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LNumberTagI* result = new (zone()) LNumberTagI(value, temp1, temp2);
- return AssignPointerMap(DefineAsRegister(result));
- }
- } else if (to.IsSmi()) {
- LOperand* value = UseRegister(val);
- LInstruction* result = DefineAsRegister(new (zone()) LSmiTag(value));
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else {
- DCHECK(to.IsDouble());
- if (val->CheckFlag(HInstruction::kUint32)) {
- return DefineAsRegister(new (zone()) LUint32ToDouble(UseRegister(val)));
- } else {
- return DefineAsRegister(new (zone()) LInteger32ToDouble(Use(val)));
- }
- }
- }
- UNREACHABLE();
- return NULL;
-}
-
-LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
- LOperand* value = UseAtStart(instr->value());
- LInstruction* result = new (zone()) LCheckNonSmi(value);
- if (!instr->value()->type().IsHeapObject()) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
- LOperand* value = UseAtStart(instr->value());
- return AssignEnvironment(new (zone()) LCheckSmi(value));
-}
-
-LInstruction* LChunkBuilder::DoCheckArrayBufferNotNeutered(
- HCheckArrayBufferNotNeutered* instr) {
- LOperand* view = UseRegisterAtStart(instr->value());
- LCheckArrayBufferNotNeutered* result =
- new (zone()) LCheckArrayBufferNotNeutered(view);
- return AssignEnvironment(result);
-}
-
-LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* result = new (zone()) LCheckInstanceType(value);
- return AssignEnvironment(result);
-}
-
-LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new (zone()) LCheckValue(value));
-}
-
-LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
- if (instr->IsStabilityCheck()) return new (zone()) LCheckMaps;
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- LInstruction* result =
- AssignEnvironment(new (zone()) LCheckMaps(value, temp));
- if (instr->HasMigrationTarget()) {
- info()->MarkAsDeferredCalling();
- result = AssignPointerMap(result);
- }
- return result;
-}
-
-LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
- HValue* value = instr->value();
- Representation input_rep = value->representation();
- LOperand* reg = UseRegister(value);
- if (input_rep.IsDouble()) {
- return DefineAsRegister(new (zone()) LClampDToUint8(reg));
- } else if (input_rep.IsInteger32()) {
- return DefineAsRegister(new (zone()) LClampIToUint8(reg));
- } else {
- DCHECK(input_rep.IsSmiOrTagged());
- LClampTToUint8* result =
- new (zone()) LClampTToUint8(reg, TempDoubleRegister());
- return AssignEnvironment(DefineAsRegister(result));
- }
-}
-
-LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
- LOperand* context = info()->IsStub() ? UseFixed(instr->context(), cp) : NULL;
- LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
- return new (zone())
- LReturn(UseFixed(instr->value(), r2), context, parameter_count);
-}
-
-LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
- Representation r = instr->representation();
- if (r.IsSmi()) {
- return DefineAsRegister(new (zone()) LConstantS);
- } else if (r.IsInteger32()) {
- return DefineAsRegister(new (zone()) LConstantI);
- } else if (r.IsDouble()) {
- return DefineAsRegister(new (zone()) LConstantD);
- } else if (r.IsExternal()) {
- return DefineAsRegister(new (zone()) LConstantE);
- } else if (r.IsTagged()) {
- return DefineAsRegister(new (zone()) LConstantT);
- } else {
- UNREACHABLE();
- return NULL;
- }
-}
-
-LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- LInstruction* result =
- DefineAsRegister(new (zone()) LLoadContextSlot(context));
- if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
- LOperand* context;
- LOperand* value;
- if (instr->NeedsWriteBarrier()) {
- context = UseTempRegister(instr->context());
- value = UseTempRegister(instr->value());
- } else {
- context = UseRegister(instr->context());
- value = UseRegister(instr->value());
- }
- LInstruction* result = new (zone()) LStoreContextSlot(context, value);
- if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- LOperand* obj = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new (zone()) LLoadNamedField(obj));
-}
-
-LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
- HLoadFunctionPrototype* instr) {
- return AssignEnvironment(DefineAsRegister(
- new (zone()) LLoadFunctionPrototype(UseRegister(instr->function()))));
-}
-
-LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
- return DefineAsRegister(new (zone()) LLoadRoot);
-}
-
-LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
- DCHECK(instr->key()->representation().IsSmiOrInteger32());
- ElementsKind elements_kind = instr->elements_kind();
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LInstruction* result = NULL;
-
- if (!instr->is_fixed_typed_array()) {
- LOperand* obj = NULL;
- if (instr->representation().IsDouble()) {
- obj = UseRegister(instr->elements());
- } else {
- obj = UseRegisterAtStart(instr->elements());
- }
- result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr));
- } else {
- DCHECK((instr->representation().IsInteger32() &&
- !IsDoubleOrFloatElementsKind(elements_kind)) ||
- (instr->representation().IsDouble() &&
- IsDoubleOrFloatElementsKind(elements_kind)));
- LOperand* backing_store = UseRegister(instr->elements());
- LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
- result = DefineAsRegister(
- new (zone()) LLoadKeyed(backing_store, key, backing_store_owner));
- }
-
- bool needs_environment;
- if (instr->is_fixed_typed_array()) {
- // see LCodeGen::DoLoadKeyedExternalArray
- needs_environment = elements_kind == UINT32_ELEMENTS &&
- !instr->CheckFlag(HInstruction::kUint32);
- } else {
- // see LCodeGen::DoLoadKeyedFixedDoubleArray and
- // LCodeGen::DoLoadKeyedFixedArray
- needs_environment =
- instr->RequiresHoleCheck() ||
- (instr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED && info()->IsStub());
- }
-
- if (needs_environment) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- if (!instr->is_fixed_typed_array()) {
- DCHECK(instr->elements()->representation().IsTagged());
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- LOperand* object = NULL;
- LOperand* key = NULL;
- LOperand* val = NULL;
-
- if (instr->value()->representation().IsDouble()) {
- object = UseRegisterAtStart(instr->elements());
- val = UseRegister(instr->value());
- key = UseRegisterOrConstantAtStart(instr->key());
- } else {
- if (needs_write_barrier) {
- object = UseTempRegister(instr->elements());
- val = UseTempRegister(instr->value());
- key = UseTempRegister(instr->key());
- } else {
- object = UseRegisterAtStart(instr->elements());
- val = UseRegisterAtStart(instr->value());
- key = UseRegisterOrConstantAtStart(instr->key());
- }
- }
-
- return new (zone()) LStoreKeyed(object, key, val, nullptr);
- }
-
- DCHECK((instr->value()->representation().IsInteger32() &&
- !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
- (instr->value()->representation().IsDouble() &&
- IsDoubleOrFloatElementsKind(instr->elements_kind())));
- DCHECK(instr->elements()->representation().IsExternal());
- LOperand* val = UseRegister(instr->value());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LOperand* backing_store = UseRegister(instr->elements());
- LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
- return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner);
-}
-
-LInstruction* LChunkBuilder::DoTransitionElementsKind(
- HTransitionElementsKind* instr) {
- if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
- LOperand* object = UseRegister(instr->object());
- LOperand* new_map_reg = TempRegister();
- LTransitionElementsKind* result =
- new (zone()) LTransitionElementsKind(object, NULL, new_map_reg);
- return result;
- } else {
- LOperand* object = UseFixed(instr->object(), r2);
- LOperand* context = UseFixed(instr->context(), cp);
- LTransitionElementsKind* result =
- new (zone()) LTransitionElementsKind(object, context, NULL);
- return MarkAsCall(result, instr);
- }
-}
-
-LInstruction* LChunkBuilder::DoTrapAllocationMemento(
- HTrapAllocationMemento* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LTrapAllocationMemento* result =
- new (zone()) LTrapAllocationMemento(object, temp1, temp2);
- return AssignEnvironment(result);
-}
-
-LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) {
- info()->MarkAsDeferredCalling();
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object = Use(instr->object());
- LOperand* elements = Use(instr->elements());
- LOperand* key = UseRegisterOrConstant(instr->key());
- LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity());
-
- LMaybeGrowElements* result = new (zone())
- LMaybeGrowElements(context, object, elements, key, current_capacity);
- DefineFixed(result, r2);
- return AssignPointerMap(AssignEnvironment(result));
-}
-
-LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
- bool is_in_object = instr->access().IsInobject();
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- bool needs_write_barrier_for_map =
- instr->has_transition() && instr->NeedsWriteBarrierForMap();
-
- LOperand* obj;
- if (needs_write_barrier) {
- obj = is_in_object ? UseRegister(instr->object())
- : UseTempRegister(instr->object());
- } else {
- obj = needs_write_barrier_for_map ? UseRegister(instr->object())
- : UseRegisterAtStart(instr->object());
- }
-
- LOperand* val;
- if (needs_write_barrier) {
- val = UseTempRegister(instr->value());
- } else if (instr->field_representation().IsDouble()) {
- val = UseRegisterAtStart(instr->value());
- } else {
- val = UseRegister(instr->value());
- }
-
- // We need a temporary register for write barrier of the map field.
- LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
-
- return new (zone()) LStoreNamedField(obj, val, temp);
-}
-
-LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* left = UseFixed(instr->left(), r3);
- LOperand* right = UseFixed(instr->right(), r2);
- return MarkAsCall(
- DefineFixed(new (zone()) LStringAdd(context, left, right), r2), instr);
-}
-
-LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
- LOperand* string = UseTempRegister(instr->string());
- LOperand* index = UseTempRegister(instr->index());
- LOperand* context = UseAny(instr->context());
- LStringCharCodeAt* result =
- new (zone()) LStringCharCodeAt(context, string, index);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
- LOperand* char_code = UseRegister(instr->value());
- LOperand* context = UseAny(instr->context());
- LStringCharFromCode* result =
- new (zone()) LStringCharFromCode(context, char_code);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
- LOperand* size = UseRegisterOrConstant(instr->size());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- if (instr->IsAllocationFolded()) {
- LFastAllocate* result = new (zone()) LFastAllocate(size, temp1, temp2);
- return DefineAsRegister(result);
- } else {
- info()->MarkAsDeferredCalling();
- LOperand* context = UseAny(instr->context());
- LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2);
- return AssignPointerMap(DefineAsRegister(result));
- }
-}
-
-LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
- DCHECK(argument_count_ == 0);
- allocator_->MarkAsOsrEntry();
- current_block_->last_environment()->set_ast_id(instr->ast_id());
- return AssignEnvironment(new (zone()) LOsrEntry);
-}
-
-LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- LParameter* result = new (zone()) LParameter;
- if (instr->kind() == HParameter::STACK_PARAMETER) {
- int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(result, spill_index);
- } else {
- DCHECK(info()->IsStub());
- CallInterfaceDescriptor descriptor = graph()->descriptor();
- int index = static_cast<int>(instr->index());
- Register reg = descriptor.GetRegisterParameter(index);
- return DefineFixed(result, reg);
- }
-}
-
-LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- // Use an index that corresponds to the location in the unoptimized frame,
- // which the optimized frame will subsume.
- int env_index = instr->index();
- int spill_index = 0;
- if (instr->environment()->is_parameter_index(env_index)) {
- spill_index = chunk()->GetParameterStackSlot(env_index);
- } else {
- spill_index = env_index - instr->environment()->first_local_index();
- if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
- Retry(kTooManySpillSlotsNeededForOSR);
- spill_index = 0;
- }
- spill_index += StandardFrameConstants::kFixedSlotCount;
- }
- return DefineAsSpilled(new (zone()) LUnknownOSRValue, spill_index);
-}
-
-LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
- // There are no real uses of the arguments object.
- // arguments.length and element access are supported directly on
- // stack arguments, and any real arguments object use causes a bailout.
- // So this value is never used.
- return NULL;
-}
-
-LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
- instr->ReplayEnvironment(current_block_->last_environment());
-
- // There are no real uses of a captured object.
- return NULL;
-}
-
-LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
- info()->MarkAsRequiresFrame();
- LOperand* args = UseRegister(instr->arguments());
- LOperand* length = UseRegisterOrConstantAtStart(instr->length());
- LOperand* index = UseRegisterOrConstantAtStart(instr->index());
- return DefineAsRegister(new (zone()) LAccessArgumentsAt(args, length, index));
-}
-
-LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* value = UseFixed(instr->value(), r5);
- LTypeof* result = new (zone()) LTypeof(context, value);
- return MarkAsCall(DefineFixed(result, r2), instr);
-}
-
-LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
- return new (zone()) LTypeofIsAndBranch(UseRegister(instr->value()));
-}
-
-LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
- instr->ReplayEnvironment(current_block_->last_environment());
- return NULL;
-}
-
-LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
- if (instr->is_function_entry()) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(new (zone()) LStackCheck(context), instr);
- } else {
- DCHECK(instr->is_backwards_branch());
- LOperand* context = UseAny(instr->context());
- return AssignEnvironment(
- AssignPointerMap(new (zone()) LStackCheck(context)));
- }
-}
-
-LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
- HEnvironment* outer = current_block_->last_environment();
- outer->set_ast_id(instr->ReturnId());
- HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner = outer->CopyForInlining(
- instr->closure(), instr->arguments_count(), instr->function(), undefined,
- instr->inlining_kind(), instr->syntactic_tail_call_mode());
- // Only replay binding of arguments object if it wasn't removed from graph.
- if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
- inner->Bind(instr->arguments_var(), instr->arguments_object());
- }
- inner->BindContext(instr->closure_context());
- inner->set_entry(instr);
- current_block_->UpdateEnvironment(inner);
- return NULL;
-}
-
-LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
- LInstruction* pop = NULL;
-
- HEnvironment* env = current_block_->last_environment();
-
- if (env->entry()->arguments_pushed()) {
- int argument_count = env->arguments_environment()->parameter_count();
- pop = new (zone()) LDrop(argument_count);
- DCHECK(instr->argument_delta() == -argument_count);
- }
-
- HEnvironment* outer =
- current_block_->last_environment()->DiscardInlined(false);
- current_block_->UpdateEnvironment(outer);
-
- return pop;
-}
-
-LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object = UseFixed(instr->enumerable(), r2);
- LForInPrepareMap* result = new (zone()) LForInPrepareMap(context, object);
- return MarkAsCall(DefineFixed(result, r2), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
- LOperand* map = UseRegister(instr->map());
- return AssignEnvironment(
- DefineAsRegister(new (zone()) LForInCacheArray(map)));
-}
-
-LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* map = UseRegisterAtStart(instr->map());
- return AssignEnvironment(new (zone()) LCheckMapValue(value, map));
-}
-
-LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* index = UseTempRegister(instr->index());
- LLoadFieldByIndex* load = new (zone()) LLoadFieldByIndex(object, index);
- LInstruction* result = DefineSameAsFirst(load);
- return AssignPointerMap(result);
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/s390/lithium-s390.h b/deps/v8/src/crankshaft/s390/lithium-s390.h
deleted file mode 100644
index f9710b1092..0000000000
--- a/deps/v8/src/crankshaft/s390/lithium-s390.h
+++ /dev/null
@@ -1,2248 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_S390_LITHIUM_S390_H_
-#define V8_CRANKSHAFT_S390_LITHIUM_S390_H_
-
-#include "src/crankshaft/hydrogen.h"
-#include "src/crankshaft/lithium.h"
-#include "src/crankshaft/lithium-allocator.h"
-#include "src/safepoint-table.h"
-#include "src/utils.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LCodeGen;
-
-#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
- V(AccessArgumentsAt) \
- V(AddI) \
- V(Allocate) \
- V(ApplyArguments) \
- V(ArgumentsElements) \
- V(ArgumentsLength) \
- V(ArithmeticD) \
- V(ArithmeticT) \
- V(BitI) \
- V(BoundsCheck) \
- V(Branch) \
- V(CallWithDescriptor) \
- V(CallNewArray) \
- V(CallRuntime) \
- V(CheckArrayBufferNotNeutered) \
- V(CheckInstanceType) \
- V(CheckNonSmi) \
- V(CheckMaps) \
- V(CheckMapValue) \
- V(CheckSmi) \
- V(CheckValue) \
- V(ClampDToUint8) \
- V(ClampIToUint8) \
- V(ClampTToUint8) \
- V(ClassOfTestAndBranch) \
- V(CompareNumericAndBranch) \
- V(CmpObjectEqAndBranch) \
- V(CmpHoleAndBranch) \
- V(CmpMapAndBranch) \
- V(CmpT) \
- V(ConstantD) \
- V(ConstantE) \
- V(ConstantI) \
- V(ConstantS) \
- V(ConstantT) \
- V(Context) \
- V(DebugBreak) \
- V(DeclareGlobals) \
- V(Deoptimize) \
- V(DivByConstI) \
- V(DivByPowerOf2I) \
- V(DivI) \
- V(DoubleToI) \
- V(DoubleToSmi) \
- V(Drop) \
- V(Dummy) \
- V(DummyUse) \
- V(FastAllocate) \
- V(FlooringDivByConstI) \
- V(FlooringDivByPowerOf2I) \
- V(FlooringDivI) \
- V(ForInCacheArray) \
- V(ForInPrepareMap) \
- V(Goto) \
- V(HasInPrototypeChainAndBranch) \
- V(HasInstanceTypeAndBranch) \
- V(InnerAllocatedObject) \
- V(InstructionGap) \
- V(Integer32ToDouble) \
- V(InvokeFunction) \
- V(IsStringAndBranch) \
- V(IsSmiAndBranch) \
- V(IsUndetectableAndBranch) \
- V(Label) \
- V(LazyBailout) \
- V(LoadContextSlot) \
- V(LoadRoot) \
- V(LoadFieldByIndex) \
- V(LoadFunctionPrototype) \
- V(LoadKeyed) \
- V(LoadNamedField) \
- V(MathAbs) \
- V(MathClz32) \
- V(MathCos) \
- V(MathSin) \
- V(MathExp) \
- V(MathFloor) \
- V(MathFround) \
- V(MathLog) \
- V(MathMinMax) \
- V(MathPowHalf) \
- V(MathRound) \
- V(MathSqrt) \
- V(MaybeGrowElements) \
- V(ModByConstI) \
- V(ModByPowerOf2I) \
- V(ModI) \
- V(MulI) \
- V(MultiplyAddD) \
- V(MultiplySubD) \
- V(NumberTagD) \
- V(NumberTagI) \
- V(NumberTagU) \
- V(NumberUntagD) \
- V(OsrEntry) \
- V(Parameter) \
- V(Power) \
- V(Prologue) \
- V(PushArgument) \
- V(Return) \
- V(SeqStringGetChar) \
- V(SeqStringSetChar) \
- V(ShiftI) \
- V(SmiTag) \
- V(SmiUntag) \
- V(StackCheck) \
- V(StoreCodeEntry) \
- V(StoreContextSlot) \
- V(StoreKeyed) \
- V(StoreNamedField) \
- V(StringAdd) \
- V(StringCharCodeAt) \
- V(StringCharFromCode) \
- V(StringCompareAndBranch) \
- V(SubI) \
- V(TaggedToI) \
- V(ThisFunction) \
- V(TransitionElementsKind) \
- V(TrapAllocationMemento) \
- V(Typeof) \
- V(TypeofIsAndBranch) \
- V(Uint32ToDouble) \
- V(UnknownOSRValue) \
- V(WrapReceiver)
-
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- Opcode opcode() const final { return LInstruction::k##type; } \
- void CompileToNative(LCodeGen* generator) final; \
- const char* Mnemonic() const final { return mnemonic; } \
- static L##type* cast(LInstruction* instr) { \
- DCHECK(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
- }
-
-#define DECLARE_HYDROGEN_ACCESSOR(type) \
- H##type* hydrogen() const { return H##type::cast(hydrogen_value()); }
-
-class LInstruction : public ZoneObject {
- public:
- LInstruction()
- : environment_(NULL),
- hydrogen_value_(NULL),
- bit_field_(IsCallBits::encode(false)) {}
-
- virtual ~LInstruction() {}
-
- virtual void CompileToNative(LCodeGen* generator) = 0;
- virtual const char* Mnemonic() const = 0;
- virtual void PrintTo(StringStream* stream);
- virtual void PrintDataTo(StringStream* stream);
- virtual void PrintOutputOperandTo(StringStream* stream);
-
- enum Opcode {
-// Declare a unique enum value for each instruction.
-#define DECLARE_OPCODE(type) k##type,
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE) kNumberOfInstructions
-#undef DECLARE_OPCODE
- };
-
- virtual Opcode opcode() const = 0;
-
-// Declare non-virtual type testers for all leaf IR classes.
-#define DECLARE_PREDICATE(type) \
- bool Is##type() const { return opcode() == k##type; }
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
-#undef DECLARE_PREDICATE
-
- // Declare virtual predicates for instructions that don't have
- // an opcode.
- virtual bool IsGap() const { return false; }
-
- virtual bool IsControl() const { return false; }
-
- // Try deleting this instruction if possible.
- virtual bool TryDelete() { return false; }
-
- void set_environment(LEnvironment* env) { environment_ = env; }
- LEnvironment* environment() const { return environment_; }
- bool HasEnvironment() const { return environment_ != NULL; }
-
- void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
- LPointerMap* pointer_map() const { return pointer_map_.get(); }
- bool HasPointerMap() const { return pointer_map_.is_set(); }
-
- void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
- HValue* hydrogen_value() const { return hydrogen_value_; }
-
- void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
- bool IsCall() const { return IsCallBits::decode(bit_field_); }
-
- void MarkAsSyntacticTailCall() {
- bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
- }
- bool IsSyntacticTailCall() const {
- return IsSyntacticTailCallBits::decode(bit_field_);
- }
-
- // Interface to the register allocator and iterators.
- bool ClobbersTemps() const { return IsCall(); }
- bool ClobbersRegisters() const { return IsCall(); }
- virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
- return IsCall();
- }
-
- // Interface to the register allocator and iterators.
- bool IsMarkedAsCall() const { return IsCall(); }
-
- virtual bool HasResult() const = 0;
- virtual LOperand* result() const = 0;
-
- LOperand* FirstInput() { return InputAt(0); }
- LOperand* Output() { return HasResult() ? result() : NULL; }
-
- virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
-
-#ifdef DEBUG
- void VerifyCall();
-#endif
-
- virtual int InputCount() = 0;
- virtual LOperand* InputAt(int i) = 0;
-
- private:
- // Iterator support.
- friend class InputIterator;
-
- friend class TempIterator;
- virtual int TempCount() = 0;
- virtual LOperand* TempAt(int i) = 0;
-
- class IsCallBits : public BitField<bool, 0, 1> {};
- class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
- };
-
- LEnvironment* environment_;
- SetOncePointer<LPointerMap> pointer_map_;
- HValue* hydrogen_value_;
- int bit_field_;
-};
-
-// R = number of result operands (0 or 1).
-template <int R>
-class LTemplateResultInstruction : public LInstruction {
- public:
- // Allow 0 or 1 output operands.
- STATIC_ASSERT(R == 0 || R == 1);
- bool HasResult() const final { return R != 0 && result() != NULL; }
- void set_result(LOperand* operand) { results_[0] = operand; }
- LOperand* result() const override { return results_[0]; }
-
- protected:
- EmbeddedContainer<LOperand*, R> results_;
-};
-
-// R = number of result operands (0 or 1).
-// I = number of input operands.
-// T = number of temporary operands.
-template <int R, int I, int T>
-class LTemplateInstruction : public LTemplateResultInstruction<R> {
- protected:
- EmbeddedContainer<LOperand*, I> inputs_;
- EmbeddedContainer<LOperand*, T> temps_;
-
- private:
- // Iterator support.
- int InputCount() final { return I; }
- LOperand* InputAt(int i) final { return inputs_[i]; }
-
- int TempCount() final { return T; }
- LOperand* TempAt(int i) final { return temps_[i]; }
-};
-
-class LGap : public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGap(HBasicBlock* block) : block_(block) {
- parallel_moves_[BEFORE] = NULL;
- parallel_moves_[START] = NULL;
- parallel_moves_[END] = NULL;
- parallel_moves_[AFTER] = NULL;
- }
-
- // Can't use the DECLARE-macro here because of sub-classes.
- bool IsGap() const override { return true; }
- void PrintDataTo(StringStream* stream) override;
- static LGap* cast(LInstruction* instr) {
- DCHECK(instr->IsGap());
- return reinterpret_cast<LGap*>(instr);
- }
-
- bool IsRedundant() const;
-
- HBasicBlock* block() const { return block_; }
-
- enum InnerPosition {
- BEFORE,
- START,
- END,
- AFTER,
- FIRST_INNER_POSITION = BEFORE,
- LAST_INNER_POSITION = AFTER
- };
-
- LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
- if (parallel_moves_[pos] == NULL) {
- parallel_moves_[pos] = new (zone) LParallelMove(zone);
- }
- return parallel_moves_[pos];
- }
-
- LParallelMove* GetParallelMove(InnerPosition pos) {
- return parallel_moves_[pos];
- }
-
- private:
- LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
- HBasicBlock* block_;
-};
-
-class LInstructionGap final : public LGap {
- public:
- explicit LInstructionGap(HBasicBlock* block) : LGap(block) {}
-
- bool HasInterestingComment(LCodeGen* gen) const override {
- return !IsRedundant();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
-};
-
-class LGoto final : public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGoto(HBasicBlock* block) : block_(block) {}
-
- bool HasInterestingComment(LCodeGen* gen) const override;
- DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- void PrintDataTo(StringStream* stream) override;
- bool IsControl() const override { return true; }
-
- int block_id() const { return block_->block_id(); }
-
- private:
- HBasicBlock* block_;
-};
-
-class LPrologue final : public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue")
-};
-
-class LLazyBailout final : public LTemplateInstruction<0, 0, 0> {
- public:
- LLazyBailout() : gap_instructions_size_(0) {}
-
- DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
-
- void set_gap_instructions_size(int gap_instructions_size) {
- gap_instructions_size_ = gap_instructions_size;
- }
- int gap_instructions_size() { return gap_instructions_size_; }
-
- private:
- int gap_instructions_size_;
-};
-
-class LDummy final : public LTemplateInstruction<1, 0, 0> {
- public:
- LDummy() {}
- DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
-};
-
-class LDummyUse final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDummyUse(LOperand* value) { inputs_[0] = value; }
- DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
-};
-
-class LDeoptimize final : public LTemplateInstruction<0, 0, 0> {
- public:
- bool IsControl() const override { return true; }
- DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
- DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
-};
-
-class LLabel final : public LGap {
- public:
- explicit LLabel(HBasicBlock* block) : LGap(block), replacement_(NULL) {}
-
- bool HasInterestingComment(LCodeGen* gen) const override { return false; }
- DECLARE_CONCRETE_INSTRUCTION(Label, "label")
-
- void PrintDataTo(StringStream* stream) override;
-
- int block_id() const { return block()->block_id(); }
- bool is_loop_header() const { return block()->IsLoopHeader(); }
- bool is_osr_entry() const { return block()->is_osr_entry(); }
- Label* label() { return &label_; }
- LLabel* replacement() const { return replacement_; }
- void set_replacement(LLabel* label) { replacement_ = label; }
- bool HasReplacement() const { return replacement_ != NULL; }
-
- private:
- Label label_;
- LLabel* replacement_;
-};
-
-class LParameter final : public LTemplateInstruction<1, 0, 0> {
- public:
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
- DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
-};
-
-class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
- public:
- bool HasInterestingComment(LCodeGen* gen) const override { return false; }
- DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
-};
-
-template <int I, int T>
-class LControlInstruction : public LTemplateInstruction<0, I, T> {
- public:
- LControlInstruction() : false_label_(NULL), true_label_(NULL) {}
-
- bool IsControl() const final { return true; }
-
- int SuccessorCount() { return hydrogen()->SuccessorCount(); }
- HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
-
- int TrueDestination(LChunk* chunk) {
- return chunk->LookupDestination(true_block_id());
- }
- int FalseDestination(LChunk* chunk) {
- return chunk->LookupDestination(false_block_id());
- }
-
- Label* TrueLabel(LChunk* chunk) {
- if (true_label_ == NULL) {
- true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
- }
- return true_label_;
- }
- Label* FalseLabel(LChunk* chunk) {
- if (false_label_ == NULL) {
- false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
- }
- return false_label_;
- }
-
- protected:
- int true_block_id() { return SuccessorAt(0)->block_id(); }
- int false_block_id() { return SuccessorAt(1)->block_id(); }
-
- private:
- HControlInstruction* hydrogen() {
- return HControlInstruction::cast(this->hydrogen_value());
- }
-
- Label* false_label_;
- Label* true_label_;
-};
-
-class LWrapReceiver final : public LTemplateInstruction<1, 2, 0> {
- public:
- LWrapReceiver(LOperand* receiver, LOperand* function) {
- inputs_[0] = receiver;
- inputs_[1] = function;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
- DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
-
- LOperand* receiver() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-};
-
-class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
- public:
- LApplyArguments(LOperand* function, LOperand* receiver, LOperand* length,
- LOperand* elements) {
- inputs_[0] = function;
- inputs_[1] = receiver;
- inputs_[2] = length;
- inputs_[3] = elements;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
- DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
-
- LOperand* function() { return inputs_[0]; }
- LOperand* receiver() { return inputs_[1]; }
- LOperand* length() { return inputs_[2]; }
- LOperand* elements() { return inputs_[3]; }
-};
-
-class LAccessArgumentsAt final : public LTemplateInstruction<1, 3, 0> {
- public:
- LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
- inputs_[0] = arguments;
- inputs_[1] = length;
- inputs_[2] = index;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
-
- LOperand* arguments() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-class LArgumentsLength final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LArgumentsLength(LOperand* elements) { inputs_[0] = elements; }
-
- LOperand* elements() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
-};
-
-class LArgumentsElements final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
- DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
-};
-
-class LModByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
- public:
- LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-
- private:
- int32_t divisor_;
-};
-
-class LModByConstI final : public LTemplateInstruction<1, 1, 0> {
- public:
- LModByConstI(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-
- private:
- int32_t divisor_;
-};
-
-class LModI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LModI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-};
-
-class LDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
- public:
- LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
-
- private:
- int32_t divisor_;
-};
-
-class LDivByConstI final : public LTemplateInstruction<1, 1, 0> {
- public:
- LDivByConstI(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
-
- private:
- int32_t divisor_;
-};
-
-class LDivI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LDivI(LOperand* dividend, LOperand* divisor) {
- inputs_[0] = dividend;
- inputs_[1] = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- LOperand* divisor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
- DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
-};
-
-class LFlooringDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
- public:
- LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
- "flooring-div-by-power-of-2-i")
- DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-
- private:
- int32_t divisor_;
-};
-
-class LFlooringDivByConstI final : public LTemplateInstruction<1, 1, 1> {
- public:
- LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- temps_[0] = temp;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
- DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-
- private:
- int32_t divisor_;
-};
-
-class LFlooringDivI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LFlooringDivI(LOperand* dividend, LOperand* divisor) {
- inputs_[0] = dividend;
- inputs_[1] = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- LOperand* divisor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
- DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-};
-
-class LMulI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LMulI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
- DECLARE_HYDROGEN_ACCESSOR(Mul)
-};
-
-// Instruction for computing multiplier * multiplicand + addend.
-class LMultiplyAddD final : public LTemplateInstruction<1, 3, 0> {
- public:
- LMultiplyAddD(LOperand* addend, LOperand* multiplier,
- LOperand* multiplicand) {
- inputs_[0] = addend;
- inputs_[1] = multiplier;
- inputs_[2] = multiplicand;
- }
-
- LOperand* addend() { return inputs_[0]; }
- LOperand* multiplier() { return inputs_[1]; }
- LOperand* multiplicand() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d")
-};
-
-// Instruction for computing minuend - multiplier * multiplicand.
-class LMultiplySubD final : public LTemplateInstruction<1, 3, 0> {
- public:
- LMultiplySubD(LOperand* minuend, LOperand* multiplier,
- LOperand* multiplicand) {
- inputs_[0] = minuend;
- inputs_[1] = multiplier;
- inputs_[2] = multiplicand;
- }
-
- LOperand* minuend() { return inputs_[0]; }
- LOperand* multiplier() { return inputs_[1]; }
- LOperand* multiplicand() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MultiplySubD, "multiply-sub-d")
-};
-
-class LDebugBreak final : public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
-};
-
-class LCompareNumericAndBranch final : public LControlInstruction<2, 0> {
- public:
- LCompareNumericAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
- "compare-numeric-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
-
- Token::Value op() const { return hydrogen()->token(); }
- bool is_double() const { return hydrogen()->representation().IsDouble(); }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-class LMathFloor final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathFloor(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-class LMathRound final : public LTemplateInstruction<1, 1, 1> {
- public:
- LMathRound(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-class LMathFround final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathFround(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround")
-};
-
-class LMathAbs final : public LTemplateInstruction<1, 2, 0> {
- public:
- LMathAbs(LOperand* context, LOperand* value) {
- inputs_[1] = context;
- inputs_[0] = value;
- }
-
- LOperand* context() { return inputs_[1]; }
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-class LMathLog final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathLog(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
-};
-
-class LMathClz32 final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathClz32(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
-};
-
-class LMathCos final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathCos(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
-};
-
-class LMathSin final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathSin(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
-};
-
-class LMathExp final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathExp(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
-};
-
-class LMathSqrt final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathSqrt(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
-};
-
-class LMathPowHalf final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathPowHalf(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
-};
-
-class LCmpObjectEqAndBranch final : public LControlInstruction<2, 0> {
- public:
- LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
-};
-
-class LCmpHoleAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LCmpHoleAndBranch(LOperand* object) { inputs_[0] = object; }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
-};
-
-class LIsStringAndBranch final : public LControlInstruction<1, 1> {
- public:
- LIsStringAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-class LIsSmiAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LIsSmiAndBranch(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-class LIsUndetectableAndBranch final : public LControlInstruction<1, 1> {
- public:
- explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
- "is-undetectable-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-class LStringCompareAndBranch final : public LControlInstruction<3, 0> {
- public:
- LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
- "string-compare-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
-
- Token::Value op() const { return hydrogen()->token(); }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LHasInstanceTypeAndBranch(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
- "has-instance-type-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-class LClassOfTestAndBranch final : public LControlInstruction<1, 1> {
- public:
- LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch, "class-of-test-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-class LCmpT final : public LTemplateInstruction<1, 3, 0> {
- public:
- LCmpT(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
- DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
-
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> {
- public:
- LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) {
- inputs_[0] = object;
- inputs_[1] = prototype;
- }
-
- LOperand* object() const { return inputs_[0]; }
- LOperand* prototype() const { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch,
- "has-in-prototype-chain-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch)
-};
-
-class LBoundsCheck final : public LTemplateInstruction<0, 2, 0> {
- public:
- LBoundsCheck(LOperand* index, LOperand* length) {
- inputs_[0] = index;
- inputs_[1] = length;
- }
-
- LOperand* index() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
- DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
-};
-
-class LBitI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LBitI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- Token::Value op() const { return hydrogen()->op(); }
-
- DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
- DECLARE_HYDROGEN_ACCESSOR(Bitwise)
-};
-
-class LShiftI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
- : op_(op), can_deopt_(can_deopt) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- bool can_deopt() const { return can_deopt_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
-
- private:
- Token::Value op_;
- bool can_deopt_;
-};
-
-class LSubI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LSubI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
- DECLARE_HYDROGEN_ACCESSOR(Sub)
-};
-
-class LConstantI final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- int32_t value() const { return hydrogen()->Integer32Value(); }
-};
-
-class LConstantS final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
-};
-
-class LConstantD final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- double value() const { return hydrogen()->DoubleValue(); }
-
- uint64_t bits() const { return hydrogen()->DoubleValueAsBits(); }
-};
-
-class LConstantE final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- ExternalReference value() const {
- return hydrogen()->ExternalReferenceValue();
- }
-};
-
-class LConstantT final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- Handle<Object> value(Isolate* isolate) const {
- return hydrogen()->handle(isolate);
- }
-};
-
-class LBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LBranch(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
- DECLARE_HYDROGEN_ACCESSOR(Branch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-class LCmpMapAndBranch final : public LControlInstruction<1, 1> {
- public:
- LCmpMapAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMap)
-
- Handle<Map> map() const { return hydrogen()->map().handle(); }
-};
-
-class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
- public:
- LSeqStringGetChar(LOperand* string, LOperand* index) {
- inputs_[0] = string;
- inputs_[1] = index;
- }
-
- LOperand* string() const { return inputs_[0]; }
- LOperand* index() const { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
-};
-
-class LSeqStringSetChar final : public LTemplateInstruction<1, 4, 0> {
- public:
- LSeqStringSetChar(LOperand* context, LOperand* string, LOperand* index,
- LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = string;
- inputs_[2] = index;
- inputs_[3] = value;
- }
-
- LOperand* string() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
- LOperand* value() { return inputs_[3]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
-};
-
-class LAddI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LAddI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
- DECLARE_HYDROGEN_ACCESSOR(Add)
-};
-
-class LMathMinMax final : public LTemplateInstruction<1, 2, 0> {
- public:
- LMathMinMax(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
- DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
-};
-
-class LPower final : public LTemplateInstruction<1, 2, 0> {
- public:
- LPower(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Power, "power")
- DECLARE_HYDROGEN_ACCESSOR(Power)
-};
-
-class LArithmeticD final : public LTemplateInstruction<1, 2, 0> {
- public:
- LArithmeticD(Token::Value op, LOperand* left, LOperand* right) : op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- Opcode opcode() const override { return LInstruction::kArithmeticD; }
- void CompileToNative(LCodeGen* generator) override;
- const char* Mnemonic() const override;
-
- private:
- Token::Value op_;
-};
-
-class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
- public:
- LArithmeticT(Token::Value op, LOperand* context, LOperand* left,
- LOperand* right)
- : op_(op) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
- Token::Value op() const { return op_; }
-
- Opcode opcode() const override { return LInstruction::kArithmeticT; }
- void CompileToNative(LCodeGen* generator) override;
- const char* Mnemonic() const override;
-
- DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
-
- private:
- Token::Value op_;
-};
-
-class LReturn final : public LTemplateInstruction<0, 3, 0> {
- public:
- LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
- inputs_[0] = value;
- inputs_[1] = context;
- inputs_[2] = parameter_count;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- bool has_constant_parameter_count() {
- return parameter_count()->IsConstantOperand();
- }
- LConstantOperand* constant_parameter_count() {
- DCHECK(has_constant_parameter_count());
- return LConstantOperand::cast(parameter_count());
- }
- LOperand* parameter_count() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Return, "return")
-};
-
-class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedField(LOperand* object) { inputs_[0] = object; }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
-};
-
-class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadFunctionPrototype(LOperand* function) { inputs_[0] = function; }
-
- LOperand* function() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
- DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
-};
-
-class LLoadRoot final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
- DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
-
- Heap::RootListIndex index() const { return hydrogen()->index(); }
-};
-
-class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
- public:
- LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
- inputs_[0] = elements;
- inputs_[1] = key;
- inputs_[2] = backing_store_owner;
- }
-
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* backing_store_owner() { return inputs_[2]; }
- ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
- bool is_fixed_typed_array() const {
- return hydrogen()->is_fixed_typed_array();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
-
- void PrintDataTo(StringStream* stream) override;
- uint32_t base_offset() const { return hydrogen()->base_offset(); }
-};
-
-class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadContextSlot(LOperand* context) { inputs_[0] = context; }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
-
- int slot_index() { return hydrogen()->slot_index(); }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-class LStoreContextSlot final : public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreContextSlot(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
-
- int slot_index() { return hydrogen()->slot_index(); }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-class LPushArgument final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LPushArgument(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
-};
-
-class LDrop final : public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LDrop(int count) : count_(count) {}
-
- int count() const { return count_; }
-
- DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
-
- private:
- int count_;
-};
-
-class LStoreCodeEntry final : public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreCodeEntry(LOperand* function, LOperand* code_object) {
- inputs_[0] = function;
- inputs_[1] = code_object;
- }
-
- LOperand* function() { return inputs_[0]; }
- LOperand* code_object() { return inputs_[1]; }
-
- void PrintDataTo(StringStream* stream) override;
-
- DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
- DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
-};
-
-class LInnerAllocatedObject final : public LTemplateInstruction<1, 2, 0> {
- public:
- LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
- inputs_[0] = base_object;
- inputs_[1] = offset;
- }
-
- LOperand* base_object() const { return inputs_[0]; }
- LOperand* offset() const { return inputs_[1]; }
-
- void PrintDataTo(StringStream* stream) override;
-
- DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
-};
-
-class LThisFunction final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
- DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
-};
-
-class LContext final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Context, "context")
- DECLARE_HYDROGEN_ACCESSOR(Context)
-};
-
-class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LDeclareGlobals(LOperand* context) { inputs_[0] = context; }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
- DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
-};
-
-class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
- public:
- LCallWithDescriptor(CallInterfaceDescriptor descriptor,
- const ZoneList<LOperand*>& operands, Zone* zone)
- : descriptor_(descriptor),
- inputs_(descriptor.GetRegisterParameterCount() +
- kImplicitRegisterParameterCount,
- zone) {
- DCHECK(descriptor.GetRegisterParameterCount() +
- kImplicitRegisterParameterCount ==
- operands.length());
- inputs_.AddAll(operands, zone);
- }
-
- LOperand* target() const { return inputs_[0]; }
-
- const CallInterfaceDescriptor descriptor() { return descriptor_; }
-
- DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
-
- // The target and context are passed as implicit parameters that are not
- // explicitly listed in the descriptor.
- static const int kImplicitRegisterParameterCount = 2;
-
- private:
- DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-
- CallInterfaceDescriptor descriptor_;
- ZoneList<LOperand*> inputs_;
-
- // Iterator support.
- int InputCount() final { return inputs_.length(); }
- LOperand* InputAt(int i) final { return inputs_[i]; }
-
- int TempCount() final { return 0; }
- LOperand* TempAt(int i) final { return NULL; }
-};
-
-class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
- public:
- LInvokeFunction(LOperand* context, LOperand* function) {
- inputs_[0] = context;
- inputs_[1] = function;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
- DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallNewArray(LOperand* context, LOperand* constructor) {
- inputs_[0] = context;
- inputs_[1] = constructor;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* constructor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
- DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-class LCallRuntime final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallRuntime(LOperand* context) { inputs_[0] = context; }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
- DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
-
- bool ClobbersDoubleRegisters(Isolate* isolate) const override {
- return save_doubles() == kDontSaveFPRegs;
- }
-
- const Runtime::Function* function() const { return hydrogen()->function(); }
- int arity() const { return hydrogen()->argument_count(); }
- SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
-};
-
-class LInteger32ToDouble final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInteger32ToDouble(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
-};
-
-class LUint32ToDouble final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LUint32ToDouble(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
-};
-
-class LNumberTagI final : public LTemplateInstruction<1, 1, 2> {
- public:
- LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
-};
-
-class LNumberTagU final : public LTemplateInstruction<1, 1, 2> {
- public:
- LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
-};
-
-class LNumberTagD final : public LTemplateInstruction<1, 1, 2> {
- public:
- LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-class LDoubleToSmi final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDoubleToSmi(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDoubleToI(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-// Truncating conversion from a tagged value to an int32.
-class LTaggedToI final : public LTemplateInstruction<1, 1, 2> {
- public:
- LTaggedToI(LOperand* value, LOperand* temp, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-class LSmiTag final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LSmiTag(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-class LNumberUntagD final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberUntagD(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-
- bool truncating() { return hydrogen()->CanTruncateToNumber(); }
-};
-
-class LSmiUntag final : public LTemplateInstruction<1, 1, 0> {
- public:
- LSmiUntag(LOperand* value, bool needs_check) : needs_check_(needs_check) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
- bool needs_check() const { return needs_check_; }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
-
- private:
- bool needs_check_;
-};
-
-class LStoreNamedField final : public LTemplateInstruction<0, 2, 1> {
- public:
- LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
- inputs_[0] = object;
- inputs_[1] = value;
- temps_[0] = temp;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
-
- void PrintDataTo(StringStream* stream) override;
-
- Representation representation() const {
- return hydrogen()->field_representation();
- }
-};
-
-class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
- public:
- LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
- LOperand* backing_store_owner) {
- inputs_[0] = object;
- inputs_[1] = key;
- inputs_[2] = value;
- inputs_[3] = backing_store_owner;
- }
-
- bool is_fixed_typed_array() const {
- return hydrogen()->is_fixed_typed_array();
- }
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- LOperand* backing_store_owner() { return inputs_[3]; }
- ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
-
- void PrintDataTo(StringStream* stream) override;
- bool NeedsCanonicalization() {
- if (hydrogen()->value()->IsAdd() || hydrogen()->value()->IsSub() ||
- hydrogen()->value()->IsMul() || hydrogen()->value()->IsDiv()) {
- return false;
- }
- return hydrogen()->NeedsCanonicalization();
- }
- uint32_t base_offset() const { return hydrogen()->base_offset(); }
-};
-
-class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 1> {
- public:
- LTransitionElementsKind(LOperand* object, LOperand* context,
- LOperand* new_map_temp) {
- inputs_[0] = object;
- inputs_[1] = context;
- temps_[0] = new_map_temp;
- }
-
- LOperand* context() { return inputs_[1]; }
- LOperand* object() { return inputs_[0]; }
- LOperand* new_map_temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
- "transition-elements-kind")
- DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
-
- void PrintDataTo(StringStream* stream) override;
-
- Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
- Handle<Map> transitioned_map() {
- return hydrogen()->transitioned_map().handle();
- }
- ElementsKind from_kind() { return hydrogen()->from_kind(); }
- ElementsKind to_kind() { return hydrogen()->to_kind(); }
-};
-
-class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 2> {
- public:
- LTrapAllocationMemento(LOperand* object, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = object;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento, "trap-allocation-memento")
-};
-
-class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
- public:
- LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
- LOperand* key, LOperand* current_capacity) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = elements;
- inputs_[3] = key;
- inputs_[4] = current_capacity;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* elements() { return inputs_[2]; }
- LOperand* key() { return inputs_[3]; }
- LOperand* current_capacity() { return inputs_[4]; }
-
- bool ClobbersDoubleRegisters(Isolate* isolate) const override { return true; }
-
- DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
- DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
-};
-
-class LStringAdd final : public LTemplateInstruction<1, 3, 0> {
- public:
- LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
- DECLARE_HYDROGEN_ACCESSOR(StringAdd)
-};
-
-class LStringCharCodeAt final : public LTemplateInstruction<1, 3, 0> {
- public:
- LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
- inputs_[0] = context;
- inputs_[1] = string;
- inputs_[2] = index;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* string() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
- DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
-};
-
-class LStringCharFromCode final : public LTemplateInstruction<1, 2, 0> {
- public:
- explicit LStringCharFromCode(LOperand* context, LOperand* char_code) {
- inputs_[0] = context;
- inputs_[1] = char_code;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* char_code() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
- DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
-};
-
-class LCheckValue final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckValue(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
- DECLARE_HYDROGEN_ACCESSOR(CheckValue)
-};
-
-class LCheckArrayBufferNotNeutered final
- : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckArrayBufferNotNeutered(LOperand* view) { inputs_[0] = view; }
-
- LOperand* view() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckArrayBufferNotNeutered,
- "check-array-buffer-not-neutered")
- DECLARE_HYDROGEN_ACCESSOR(CheckArrayBufferNotNeutered)
-};
-
-class LCheckInstanceType final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckInstanceType(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
- DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
-};
-
-class LCheckMaps final : public LTemplateInstruction<0, 1, 1> {
- public:
- explicit LCheckMaps(LOperand* value = NULL, LOperand* temp = NULL) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
-};
-
-class LCheckSmi final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCheckSmi(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
-};
-
-class LCheckNonSmi final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckNonSmi(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
- DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
-};
-
-class LClampDToUint8 final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LClampDToUint8(LOperand* unclamped) { inputs_[0] = unclamped; }
-
- LOperand* unclamped() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
-};
-
-class LClampIToUint8 final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LClampIToUint8(LOperand* unclamped) { inputs_[0] = unclamped; }
-
- LOperand* unclamped() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
-};
-
-class LClampTToUint8 final : public LTemplateInstruction<1, 1, 1> {
- public:
- LClampTToUint8(LOperand* unclamped, LOperand* temp) {
- inputs_[0] = unclamped;
- temps_[0] = temp;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
-};
-
-class LAllocate final : public LTemplateInstruction<1, 2, 2> {
- public:
- LAllocate(LOperand* context, LOperand* size, LOperand* temp1,
- LOperand* temp2) {
- inputs_[0] = context;
- inputs_[1] = size;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* size() { return inputs_[1]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
- DECLARE_HYDROGEN_ACCESSOR(Allocate)
-};
-
-class LFastAllocate final : public LTemplateInstruction<1, 1, 2> {
- public:
- LFastAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = size;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* size() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
- DECLARE_HYDROGEN_ACCESSOR(Allocate)
-};
-
-class LTypeof final : public LTemplateInstruction<1, 2, 0> {
- public:
- LTypeof(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
-};
-
-class LTypeofIsAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LTypeofIsAndBranch(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
-
- Handle<String> type_literal() { return hydrogen()->type_literal(); }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-class LOsrEntry final : public LTemplateInstruction<0, 0, 0> {
- public:
- LOsrEntry() {}
-
- bool HasInterestingComment(LCodeGen* gen) const override { return false; }
- DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
-};
-
-class LStackCheck final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LStackCheck(LOperand* context) { inputs_[0] = context; }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
- DECLARE_HYDROGEN_ACCESSOR(StackCheck)
-
- Label* done_label() { return &done_label_; }
-
- private:
- Label done_label_;
-};
-
-class LForInPrepareMap final : public LTemplateInstruction<1, 2, 0> {
- public:
- LForInPrepareMap(LOperand* context, LOperand* object) {
- inputs_[0] = context;
- inputs_[1] = object;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
-};
-
-class LForInCacheArray final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LForInCacheArray(LOperand* map) { inputs_[0] = map; }
-
- LOperand* map() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
-
- int idx() { return HForInCacheArray::cast(this->hydrogen_value())->idx(); }
-};
-
-class LCheckMapValue final : public LTemplateInstruction<0, 2, 0> {
- public:
- LCheckMapValue(LOperand* value, LOperand* map) {
- inputs_[0] = value;
- inputs_[1] = map;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* map() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
-};
-
-class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadFieldByIndex(LOperand* object, LOperand* index) {
- inputs_[0] = object;
- inputs_[1] = index;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
-};
-
-class LChunkBuilder;
-class LPlatformChunk final : public LChunk {
- public:
- LPlatformChunk(CompilationInfo* info, HGraph* graph) : LChunk(info, graph) {}
-
- int GetNextSpillIndex(RegisterKind kind);
- LOperand* GetNextSpillSlot(RegisterKind kind);
-};
-
-class LChunkBuilder final : public LChunkBuilderBase {
- public:
- LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : LChunkBuilderBase(info, graph),
- current_instruction_(NULL),
- current_block_(NULL),
- next_block_(NULL),
- allocator_(allocator) {}
-
- // Build the sequence for the graph.
- LPlatformChunk* Build();
-
-// Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
- HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
- LInstruction* DoMultiplySub(HValue* minuend, HMul* mul);
-
- static bool HasMagicNumberForDivisor(int32_t divisor);
-
- LInstruction* DoMathFloor(HUnaryMathOperation* instr);
- LInstruction* DoMathRound(HUnaryMathOperation* instr);
- LInstruction* DoMathFround(HUnaryMathOperation* instr);
- LInstruction* DoMathAbs(HUnaryMathOperation* instr);
- LInstruction* DoMathLog(HUnaryMathOperation* instr);
- LInstruction* DoMathCos(HUnaryMathOperation* instr);
- LInstruction* DoMathSin(HUnaryMathOperation* instr);
- LInstruction* DoMathExp(HUnaryMathOperation* instr);
- LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
- LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
- LInstruction* DoMathClz32(HUnaryMathOperation* instr);
- LInstruction* DoDivByPowerOf2I(HDiv* instr);
- LInstruction* DoDivByConstI(HDiv* instr);
- LInstruction* DoDivI(HDiv* instr);
- LInstruction* DoModByPowerOf2I(HMod* instr);
- LInstruction* DoModByConstI(HMod* instr);
- LInstruction* DoModI(HMod* instr);
- LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
- LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
- LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
-
- private:
- // Methods for getting operands for Use / Define / Temp.
- LUnallocated* ToUnallocated(Register reg);
- LUnallocated* ToUnallocated(DoubleRegister reg);
-
- // Methods for setting up define-use relationships.
- MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
- MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
- MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
- DoubleRegister fixed_register);
-
- // A value that is guaranteed to be allocated to a register.
- // Operand created by UseRegister is guaranteed to be live until the end of
- // instruction. This means that register allocator will not reuse it's
- // register for any other operand inside instruction.
- // Operand created by UseRegisterAtStart is guaranteed to be live only at
- // instruction start. Register allocator is free to assign the same register
- // to some other operand used inside instruction (i.e. temporary or
- // output).
- MUST_USE_RESULT LOperand* UseRegister(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
-
- // An input operand in a register that may be trashed.
- MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
-
- // An input operand in a register or stack slot.
- MUST_USE_RESULT LOperand* Use(HValue* value);
- MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
-
- // An input operand in a register, stack slot or a constant operand.
- MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
-
- // An input operand in a register or a constant operand.
- MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
-
- // An input operand in a constant operand.
- MUST_USE_RESULT LOperand* UseConstant(HValue* value);
-
- // An input operand in register, stack slot or a constant operand.
- // Will not be moved to a register even if one is freely available.
- MUST_USE_RESULT LOperand* UseAny(HValue* value) override;
-
- // Temporary operand that must be in a register.
- MUST_USE_RESULT LUnallocated* TempRegister();
- MUST_USE_RESULT LUnallocated* TempDoubleRegister();
- MUST_USE_RESULT LOperand* FixedTemp(Register reg);
- MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
-
- // Methods for setting up define-use relationships.
- // Return the same instruction that they are passed.
- LInstruction* Define(LTemplateResultInstruction<1>* instr,
- LUnallocated* result);
- LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
- LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
- int index);
- LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
- LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr, Register reg);
- LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
- DoubleRegister reg);
- LInstruction* AssignEnvironment(LInstruction* instr);
- LInstruction* AssignPointerMap(LInstruction* instr);
-
- enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
-
- // By default we assume that instruction sequences generated for calls
- // cannot deoptimize eagerly and we do not attach environment to this
- // instruction.
- LInstruction* MarkAsCall(
- LInstruction* instr, HInstruction* hinstr,
- CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
-
- void VisitInstruction(HInstruction* current);
- void AddInstruction(LInstruction* instr, HInstruction* current);
-
- void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
- LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
- LInstruction* DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr);
- LInstruction* DoArithmeticT(Token::Value op, HBinaryOperation* instr);
-
- HInstruction* current_instruction_;
- HBasicBlock* current_block_;
- HBasicBlock* next_block_;
- LAllocator* allocator_;
-
- DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
-};
-
-#undef DECLARE_HYDROGEN_ACCESSOR
-#undef DECLARE_CONCRETE_INSTRUCTION
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_S390_LITHIUM_S390_H_
diff --git a/deps/v8/src/crankshaft/typing.cc b/deps/v8/src/crankshaft/typing.cc
deleted file mode 100644
index c82a3ca637..0000000000
--- a/deps/v8/src/crankshaft/typing.cc
+++ /dev/null
@@ -1,802 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/typing.h"
-
-#include "src/ast/compile-time-value.h"
-#include "src/ast/scopes.h"
-#include "src/ast/variables.h"
-#include "src/frames-inl.h"
-#include "src/frames.h"
-#include "src/ostreams.h"
-#include "src/splay-tree-inl.h"
-
-namespace v8 {
-namespace internal {
-
-AstTyper::AstTyper(Isolate* isolate, Zone* zone, Handle<JSFunction> closure,
- DeclarationScope* scope, BailoutId osr_ast_id,
- FunctionLiteral* root, AstTypeBounds* bounds)
- : isolate_(isolate),
- zone_(zone),
- closure_(closure),
- scope_(scope),
- osr_ast_id_(osr_ast_id),
- root_(root),
- oracle_(isolate, zone, handle(closure->shared()->code()),
- handle(closure->feedback_vector()),
- handle(closure->context()->native_context())),
- store_(zone),
- bounds_(bounds) {
- InitializeAstVisitor(isolate);
-}
-
-
-#ifdef OBJECT_PRINT
-static void PrintObserved(Variable* var, Object* value, AstType* type) {
- OFStream os(stdout);
- os << " observed " << (var->IsParameter() ? "param" : "local") << " ";
- var->name()->Print(os);
- os << " : " << Brief(value) << " -> ";
- type->PrintTo(os);
- os << std::endl;
- }
-#endif // OBJECT_PRINT
-
-
-Effect AstTyper::ObservedOnStack(Object* value) {
- AstType* lower = AstType::NowOf(value, zone());
- return Effect(AstBounds(lower, AstType::Any()));
-}
-
-
-void AstTyper::ObserveTypesAtOsrEntry(IterationStatement* stmt) {
- if (stmt->OsrEntryId() != osr_ast_id_) return;
-
- DisallowHeapAllocation no_gc;
- JavaScriptFrameIterator it(isolate_);
- JavaScriptFrame* frame = it.frame();
-
- // Assert that the frame on the stack belongs to the function we want to OSR.
- DCHECK_EQ(*closure_, frame->function());
-
- int params = scope_->num_parameters();
- int locals = scope_->StackLocalCount();
-
- // Use sequential composition to achieve desired narrowing.
- // The receiver is a parameter with index -1.
- store_.Seq(parameter_index(-1), ObservedOnStack(frame->receiver()));
- for (int i = 0; i < params; i++) {
- store_.Seq(parameter_index(i), ObservedOnStack(frame->GetParameter(i)));
- }
-
- for (int i = 0; i < locals; i++) {
- store_.Seq(stack_local_index(i), ObservedOnStack(frame->GetExpression(i)));
- }
-
-#ifdef OBJECT_PRINT
- if (FLAG_trace_osr && FLAG_print_scopes) {
- PrintObserved(scope_->receiver(), frame->receiver(),
- store_.LookupBounds(parameter_index(-1)).lower);
-
- for (int i = 0; i < params; i++) {
- PrintObserved(scope_->parameter(i), frame->GetParameter(i),
- store_.LookupBounds(parameter_index(i)).lower);
- }
-
- int local_index = 0;
- for (Variable* var : *scope_->locals()) {
- if (var->IsStackLocal()) {
- PrintObserved(
- var, frame->GetExpression(local_index),
- store_.LookupBounds(stack_local_index(local_index)).lower);
- local_index++;
- }
- }
- }
-#endif // OBJECT_PRINT
-}
-
-
-#define RECURSE(call) \
- do { \
- DCHECK(!HasStackOverflow()); \
- call; \
- if (HasStackOverflow()) return; \
- } while (false)
-
-
-void AstTyper::Run() {
- RECURSE(VisitDeclarations(scope_->declarations()));
- RECURSE(VisitStatements(root_->body()));
-}
-
-
-void AstTyper::VisitStatements(ZoneList<Statement*>* stmts) {
- for (int i = 0; i < stmts->length(); ++i) {
- Statement* stmt = stmts->at(i);
- RECURSE(Visit(stmt));
- if (stmt->IsJump()) break;
- }
-}
-
-
-void AstTyper::VisitBlock(Block* stmt) {
- RECURSE(VisitStatements(stmt->statements()));
- if (stmt->labels() != NULL) {
- store_.Forget(); // Control may transfer here via 'break l'.
- }
-}
-
-
-void AstTyper::VisitExpressionStatement(ExpressionStatement* stmt) {
- RECURSE(Visit(stmt->expression()));
-}
-
-
-void AstTyper::VisitEmptyStatement(EmptyStatement* stmt) {
-}
-
-
-void AstTyper::VisitSloppyBlockFunctionStatement(
- SloppyBlockFunctionStatement* stmt) {
- Visit(stmt->statement());
-}
-
-
-void AstTyper::VisitIfStatement(IfStatement* stmt) {
- // Collect type feedback.
- if (!stmt->condition()->ToBooleanIsTrue() &&
- !stmt->condition()->ToBooleanIsFalse()) {
- stmt->condition()->RecordToBooleanTypeFeedback(oracle());
- }
-
- RECURSE(Visit(stmt->condition()));
- Effects then_effects = EnterEffects();
- RECURSE(Visit(stmt->then_statement()));
- ExitEffects();
- Effects else_effects = EnterEffects();
- RECURSE(Visit(stmt->else_statement()));
- ExitEffects();
- then_effects.Alt(else_effects);
- store_.Seq(then_effects);
-}
-
-
-void AstTyper::VisitContinueStatement(ContinueStatement* stmt) {
- // TODO(rossberg): is it worth having a non-termination effect?
-}
-
-
-void AstTyper::VisitBreakStatement(BreakStatement* stmt) {
- // TODO(rossberg): is it worth having a non-termination effect?
-}
-
-
-void AstTyper::VisitReturnStatement(ReturnStatement* stmt) {
- // Collect type feedback.
- // TODO(rossberg): we only need this for inlining into test contexts...
- stmt->expression()->RecordToBooleanTypeFeedback(oracle());
-
- RECURSE(Visit(stmt->expression()));
- // TODO(rossberg): is it worth having a non-termination effect?
-}
-
-
-void AstTyper::VisitWithStatement(WithStatement* stmt) {
- RECURSE(stmt->expression());
- RECURSE(stmt->statement());
-}
-
-
-void AstTyper::VisitSwitchStatement(SwitchStatement* stmt) {
- RECURSE(Visit(stmt->tag()));
-
- ZoneList<CaseClause*>* clauses = stmt->cases();
- Effects local_effects(zone());
- bool complex_effects = false; // True for label effects or fall-through.
-
- for (int i = 0; i < clauses->length(); ++i) {
- CaseClause* clause = clauses->at(i);
-
- Effects clause_effects = EnterEffects();
-
- if (!clause->is_default()) {
- Expression* label = clause->label();
- // Collect type feedback.
- AstType* tag_type;
- AstType* label_type;
- AstType* combined_type;
- oracle()->CompareType(clause->CompareId(),
- clause->CompareOperationFeedbackSlot(), &tag_type,
- &label_type, &combined_type);
- NarrowLowerType(stmt->tag(), tag_type);
- NarrowLowerType(label, label_type);
- clause->set_compare_type(combined_type);
-
- RECURSE(Visit(label));
- if (!clause_effects.IsEmpty()) complex_effects = true;
- }
-
- ZoneList<Statement*>* stmts = clause->statements();
- RECURSE(VisitStatements(stmts));
- ExitEffects();
- if (stmts->is_empty() || stmts->last()->IsJump()) {
- local_effects.Alt(clause_effects);
- } else {
- complex_effects = true;
- }
- }
-
- if (complex_effects) {
- store_.Forget(); // Reached this in unknown state.
- } else {
- store_.Seq(local_effects);
- }
-}
-
-
-void AstTyper::VisitCaseClause(CaseClause* clause) {
- UNREACHABLE();
-}
-
-
-void AstTyper::VisitDoWhileStatement(DoWhileStatement* stmt) {
- // Collect type feedback.
- if (!stmt->cond()->ToBooleanIsTrue()) {
- stmt->cond()->RecordToBooleanTypeFeedback(oracle());
- }
-
- // TODO(rossberg): refine the unconditional Forget (here and elsewhere) by
- // computing the set of variables assigned in only some of the origins of the
- // control transfer (such as the loop body here).
- store_.Forget(); // Control may transfer here via looping or 'continue'.
- ObserveTypesAtOsrEntry(stmt);
- RECURSE(Visit(stmt->body()));
- RECURSE(Visit(stmt->cond()));
- store_.Forget(); // Control may transfer here via 'break'.
-}
-
-
-void AstTyper::VisitWhileStatement(WhileStatement* stmt) {
- // Collect type feedback.
- if (!stmt->cond()->ToBooleanIsTrue()) {
- stmt->cond()->RecordToBooleanTypeFeedback(oracle());
- }
-
- store_.Forget(); // Control may transfer here via looping or 'continue'.
- RECURSE(Visit(stmt->cond()));
- ObserveTypesAtOsrEntry(stmt);
- RECURSE(Visit(stmt->body()));
- store_.Forget(); // Control may transfer here via termination or 'break'.
-}
-
-
-void AstTyper::VisitForStatement(ForStatement* stmt) {
- if (stmt->init() != NULL) {
- RECURSE(Visit(stmt->init()));
- }
- store_.Forget(); // Control may transfer here via looping.
- if (stmt->cond() != NULL) {
- // Collect type feedback.
- stmt->cond()->RecordToBooleanTypeFeedback(oracle());
-
- RECURSE(Visit(stmt->cond()));
- }
- ObserveTypesAtOsrEntry(stmt);
- RECURSE(Visit(stmt->body()));
- if (stmt->next() != NULL) {
- store_.Forget(); // Control may transfer here via 'continue'.
- RECURSE(Visit(stmt->next()));
- }
- store_.Forget(); // Control may transfer here via termination or 'break'.
-}
-
-
-void AstTyper::VisitForInStatement(ForInStatement* stmt) {
- // Collect type feedback.
- stmt->set_for_in_type(static_cast<ForInStatement::ForInType>(
- oracle()->ForInType(stmt->ForInFeedbackSlot())));
-
- RECURSE(Visit(stmt->enumerable()));
- store_.Forget(); // Control may transfer here via looping or 'continue'.
- ObserveTypesAtOsrEntry(stmt);
- RECURSE(Visit(stmt->body()));
- store_.Forget(); // Control may transfer here via 'break'.
-}
-
-void AstTyper::VisitForOfStatement(ForOfStatement* stmt) {}
-
-void AstTyper::VisitTryCatchStatement(TryCatchStatement* stmt) {
- Effects try_effects = EnterEffects();
- RECURSE(Visit(stmt->try_block()));
- ExitEffects();
- Effects catch_effects = EnterEffects();
- store_.Forget(); // Control may transfer here via 'throw'.
- RECURSE(Visit(stmt->catch_block()));
- ExitEffects();
- try_effects.Alt(catch_effects);
- store_.Seq(try_effects);
- // At this point, only variables that were reassigned in the catch block are
- // still remembered.
-}
-
-
-void AstTyper::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- RECURSE(Visit(stmt->try_block()));
- store_.Forget(); // Control may transfer here via 'throw'.
- RECURSE(Visit(stmt->finally_block()));
-}
-
-
-void AstTyper::VisitDebuggerStatement(DebuggerStatement* stmt) {
- store_.Forget(); // May do whatever.
-}
-
-
-void AstTyper::VisitFunctionLiteral(FunctionLiteral* expr) {}
-
-
-void AstTyper::VisitClassLiteral(ClassLiteral* expr) {}
-
-
-void AstTyper::VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
-}
-
-
-void AstTyper::VisitDoExpression(DoExpression* expr) {
- RECURSE(VisitBlock(expr->block()));
- RECURSE(VisitVariableProxy(expr->result()));
- NarrowType(expr, bounds_->get(expr->result()));
-}
-
-
-void AstTyper::VisitConditional(Conditional* expr) {
- // Collect type feedback.
- expr->condition()->RecordToBooleanTypeFeedback(oracle());
-
- RECURSE(Visit(expr->condition()));
- Effects then_effects = EnterEffects();
- RECURSE(Visit(expr->then_expression()));
- ExitEffects();
- Effects else_effects = EnterEffects();
- RECURSE(Visit(expr->else_expression()));
- ExitEffects();
- then_effects.Alt(else_effects);
- store_.Seq(then_effects);
-
- NarrowType(expr,
- AstBounds::Either(bounds_->get(expr->then_expression()),
- bounds_->get(expr->else_expression()), zone()));
-}
-
-
-void AstTyper::VisitVariableProxy(VariableProxy* expr) {
- Variable* var = expr->var();
- if (var->IsStackAllocated()) {
- NarrowType(expr, store_.LookupBounds(variable_index(var)));
- }
-}
-
-
-void AstTyper::VisitLiteral(Literal* expr) {
- AstType* type = AstType::Constant(expr->value(), zone());
- NarrowType(expr, AstBounds(type));
-}
-
-
-void AstTyper::VisitRegExpLiteral(RegExpLiteral* expr) {
- // TODO(rossberg): Reintroduce RegExp type.
- NarrowType(expr, AstBounds(AstType::Object()));
-}
-
-
-void AstTyper::VisitObjectLiteral(ObjectLiteral* expr) {
- ZoneList<ObjectLiteral::Property*>* properties = expr->properties();
- for (int i = 0; i < properties->length(); ++i) {
- ObjectLiteral::Property* prop = properties->at(i);
-
- // Collect type feedback.
- if ((prop->kind() == ObjectLiteral::Property::MATERIALIZED_LITERAL &&
- !CompileTimeValue::IsCompileTimeValue(prop->value())) ||
- prop->kind() == ObjectLiteral::Property::COMPUTED) {
- if (!prop->is_computed_name() &&
- prop->key()->AsLiteral()->value()->IsInternalizedString() &&
- prop->emit_store()) {
- // Record type feed back for the property.
- FeedbackSlot slot = prop->GetSlot();
- SmallMapList maps;
- oracle()->CollectReceiverTypes(slot, &maps);
- prop->set_receiver_type(maps.length() == 1 ? maps.at(0)
- : Handle<Map>::null());
- }
- }
-
- RECURSE(Visit(prop->value()));
- }
-
- NarrowType(expr, AstBounds(AstType::Object()));
-}
-
-
-void AstTyper::VisitArrayLiteral(ArrayLiteral* expr) {
- ZoneList<Expression*>* values = expr->values();
- for (int i = 0; i < values->length(); ++i) {
- Expression* value = values->at(i);
- RECURSE(Visit(value));
- }
-
- NarrowType(expr, AstBounds(AstType::Object()));
-}
-
-
-void AstTyper::VisitAssignment(Assignment* expr) {
- // Collect type feedback.
- Property* prop = expr->target()->AsProperty();
- if (prop != NULL) {
- FeedbackSlot slot = expr->AssignmentSlot();
- expr->set_is_uninitialized(oracle()->StoreIsUninitialized(slot));
- if (!expr->IsUninitialized()) {
- SmallMapList* receiver_types = expr->GetReceiverTypes();
- if (prop->key()->IsPropertyName()) {
- Literal* lit_key = prop->key()->AsLiteral();
- DCHECK(lit_key != NULL && lit_key->value()->IsString());
- Handle<String> name = Handle<String>::cast(lit_key->value());
- oracle()->AssignmentReceiverTypes(slot, name, receiver_types);
- } else {
- KeyedAccessStoreMode store_mode;
- IcCheckType key_type;
- oracle()->KeyedAssignmentReceiverTypes(slot, receiver_types,
- &store_mode, &key_type);
- expr->set_store_mode(store_mode);
- expr->set_key_type(key_type);
- }
- }
- }
-
- Expression* rhs =
- expr->is_compound() ? expr->binary_operation() : expr->value();
- RECURSE(Visit(expr->target()));
- RECURSE(Visit(rhs));
- NarrowType(expr, bounds_->get(rhs));
-
- VariableProxy* proxy = expr->target()->AsVariableProxy();
- if (proxy != NULL && proxy->var()->IsStackAllocated()) {
- store_.Seq(variable_index(proxy->var()), Effect(bounds_->get(expr)));
- }
-}
-
-void AstTyper::VisitSuspend(Suspend* expr) {
- RECURSE(Visit(expr->generator_object()));
- RECURSE(Visit(expr->expression()));
-
- // We don't know anything about the result type.
-}
-
-
-void AstTyper::VisitThrow(Throw* expr) {
- RECURSE(Visit(expr->exception()));
- // TODO(rossberg): is it worth having a non-termination effect?
-
- NarrowType(expr, AstBounds(AstType::None()));
-}
-
-
-void AstTyper::VisitProperty(Property* expr) {
- // Collect type feedback.
- FeedbackSlot slot = expr->PropertyFeedbackSlot();
- expr->set_inline_cache_state(oracle()->LoadInlineCacheState(slot));
-
- if (!expr->IsUninitialized()) {
- if (expr->key()->IsPropertyName()) {
- Literal* lit_key = expr->key()->AsLiteral();
- DCHECK(lit_key != NULL && lit_key->value()->IsString());
- Handle<String> name = Handle<String>::cast(lit_key->value());
- oracle()->PropertyReceiverTypes(slot, name, expr->GetReceiverTypes());
- } else {
- bool is_string;
- IcCheckType key_type;
- oracle()->KeyedPropertyReceiverTypes(slot, expr->GetReceiverTypes(),
- &is_string, &key_type);
- expr->set_is_string_access(is_string);
- expr->set_key_type(key_type);
- }
- }
-
- RECURSE(Visit(expr->obj()));
- RECURSE(Visit(expr->key()));
-
- // We don't know anything about the result type.
-}
-
-
-void AstTyper::VisitCall(Call* expr) {
- // Collect type feedback.
- RECURSE(Visit(expr->expression()));
- FeedbackSlot slot = expr->CallFeedbackICSlot();
- bool is_uninitialized = oracle()->CallIsUninitialized(slot);
- if (!expr->expression()->IsProperty() && oracle()->CallIsMonomorphic(slot)) {
- expr->set_target(oracle()->GetCallTarget(slot));
- Handle<AllocationSite> site = oracle()->GetCallAllocationSite(slot);
- expr->set_allocation_site(site);
- }
-
- expr->set_is_uninitialized(is_uninitialized);
-
- ZoneList<Expression*>* args = expr->arguments();
- for (int i = 0; i < args->length(); ++i) {
- Expression* arg = args->at(i);
- RECURSE(Visit(arg));
- }
-
- if (expr->is_possibly_eval()) {
- store_.Forget(); // Eval could do whatever to local variables.
- }
-
- // We don't know anything about the result type.
-}
-
-
-void AstTyper::VisitCallNew(CallNew* expr) {
- // Collect type feedback.
- FeedbackSlot allocation_site_feedback_slot = expr->CallNewFeedbackSlot();
- expr->set_allocation_site(
- oracle()->GetCallNewAllocationSite(allocation_site_feedback_slot));
- bool monomorphic =
- oracle()->CallNewIsMonomorphic(expr->CallNewFeedbackSlot());
- expr->set_is_monomorphic(monomorphic);
- if (monomorphic) {
- expr->set_target(oracle()->GetCallNewTarget(expr->CallNewFeedbackSlot()));
- }
-
- RECURSE(Visit(expr->expression()));
- ZoneList<Expression*>* args = expr->arguments();
- for (int i = 0; i < args->length(); ++i) {
- Expression* arg = args->at(i);
- RECURSE(Visit(arg));
- }
-
- NarrowType(expr, AstBounds(AstType::None(), AstType::Receiver()));
-}
-
-
-void AstTyper::VisitCallRuntime(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- for (int i = 0; i < args->length(); ++i) {
- Expression* arg = args->at(i);
- RECURSE(Visit(arg));
- }
-
- // We don't know anything about the result type.
-}
-
-
-void AstTyper::VisitUnaryOperation(UnaryOperation* expr) {
- // Collect type feedback.
- if (expr->op() == Token::NOT) {
- // TODO(rossberg): only do in test or value context.
- expr->expression()->RecordToBooleanTypeFeedback(oracle());
- }
-
- RECURSE(Visit(expr->expression()));
-
- switch (expr->op()) {
- case Token::NOT:
- case Token::DELETE:
- NarrowType(expr, AstBounds(AstType::Boolean()));
- break;
- case Token::VOID:
- NarrowType(expr, AstBounds(AstType::Undefined()));
- break;
- case Token::TYPEOF:
- NarrowType(expr, AstBounds(AstType::InternalizedString()));
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void AstTyper::VisitCountOperation(CountOperation* expr) {
- // Collect type feedback.
- FeedbackSlot slot = expr->CountSlot();
- KeyedAccessStoreMode store_mode;
- IcCheckType key_type;
- oracle()->GetStoreModeAndKeyType(slot, &store_mode, &key_type);
- oracle()->CountReceiverTypes(slot, expr->GetReceiverTypes());
- expr->set_store_mode(store_mode);
- expr->set_key_type(key_type);
- expr->set_type(oracle()->CountType(expr->CountBinOpFeedbackId(),
- expr->CountBinaryOpFeedbackSlot()));
- // TODO(rossberg): merge the count type with the generic expression type.
-
- RECURSE(Visit(expr->expression()));
-
- NarrowType(expr, AstBounds(AstType::SignedSmall(), AstType::Number()));
-
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
- if (proxy != NULL && proxy->var()->IsStackAllocated()) {
- store_.Seq(variable_index(proxy->var()), Effect(bounds_->get(expr)));
- }
-}
-
-void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
- // Collect type feedback.
- AstType* type;
- AstType* left_type;
- AstType* right_type;
- Maybe<int> fixed_right_arg = Nothing<int>();
- Handle<AllocationSite> allocation_site;
- oracle()->BinaryType(expr->BinaryOperationFeedbackId(),
- expr->BinaryOperationFeedbackSlot(), &left_type,
- &right_type, &type, &fixed_right_arg, &allocation_site,
- expr->op());
-
- NarrowLowerType(expr, type);
- NarrowLowerType(expr->left(), left_type);
- NarrowLowerType(expr->right(), right_type);
- expr->set_allocation_site(allocation_site);
- expr->set_fixed_right_arg(fixed_right_arg);
- if (expr->op() == Token::OR || expr->op() == Token::AND) {
- expr->left()->RecordToBooleanTypeFeedback(oracle());
- }
-
- switch (expr->op()) {
- case Token::COMMA:
- RECURSE(Visit(expr->left()));
- RECURSE(Visit(expr->right()));
- NarrowType(expr, bounds_->get(expr->right()));
- break;
- case Token::OR:
- case Token::AND: {
- Effects left_effects = EnterEffects();
- RECURSE(Visit(expr->left()));
- ExitEffects();
- Effects right_effects = EnterEffects();
- RECURSE(Visit(expr->right()));
- ExitEffects();
- left_effects.Alt(right_effects);
- store_.Seq(left_effects);
-
- NarrowType(expr, AstBounds::Either(bounds_->get(expr->left()),
- bounds_->get(expr->right()), zone()));
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND: {
- RECURSE(Visit(expr->left()));
- RECURSE(Visit(expr->right()));
- AstType* upper =
- AstType::Union(bounds_->get(expr->left()).upper,
- bounds_->get(expr->right()).upper, zone());
- if (!upper->Is(AstType::Signed32())) upper = AstType::Signed32();
- AstType* lower =
- AstType::Intersect(AstType::SignedSmall(), upper, zone());
- NarrowType(expr, AstBounds(lower, upper));
- break;
- }
- case Token::BIT_XOR:
- case Token::SHL:
- case Token::SAR:
- RECURSE(Visit(expr->left()));
- RECURSE(Visit(expr->right()));
- NarrowType(expr, AstBounds(AstType::SignedSmall(), AstType::Signed32()));
- break;
- case Token::SHR:
- RECURSE(Visit(expr->left()));
- RECURSE(Visit(expr->right()));
- // TODO(rossberg): The upper bound would be Unsigned32, but since there
- // is no 'positive Smi' type for the lower bound, we use the smallest
- // union of Smi and Unsigned32 as upper bound instead.
- NarrowType(expr, AstBounds(AstType::SignedSmall(), AstType::Number()));
- break;
- case Token::ADD: {
- RECURSE(Visit(expr->left()));
- RECURSE(Visit(expr->right()));
- AstBounds l = bounds_->get(expr->left());
- AstBounds r = bounds_->get(expr->right());
- AstType* lower =
- !l.lower->IsInhabited() || !r.lower->IsInhabited()
- ? AstType::None()
- : l.lower->Is(AstType::String()) || r.lower->Is(AstType::String())
- ? AstType::String()
- : l.lower->Is(AstType::Number()) &&
- r.lower->Is(AstType::Number())
- ? AstType::SignedSmall()
- : AstType::None();
- AstType* upper =
- l.upper->Is(AstType::String()) || r.upper->Is(AstType::String())
- ? AstType::String()
- : l.upper->Is(AstType::Number()) && r.upper->Is(AstType::Number())
- ? AstType::Number()
- : AstType::NumberOrString();
- NarrowType(expr, AstBounds(lower, upper));
- break;
- }
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- RECURSE(Visit(expr->left()));
- RECURSE(Visit(expr->right()));
- NarrowType(expr, AstBounds(AstType::SignedSmall(), AstType::Number()));
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void AstTyper::VisitCompareOperation(CompareOperation* expr) {
- // Collect type feedback.
- AstType* left_type;
- AstType* right_type;
- AstType* combined_type;
- oracle()->CompareType(expr->CompareOperationFeedbackId(),
- expr->CompareOperationFeedbackSlot(), &left_type,
- &right_type, &combined_type);
- NarrowLowerType(expr->left(), left_type);
- NarrowLowerType(expr->right(), right_type);
- expr->set_combined_type(combined_type);
-
- RECURSE(Visit(expr->left()));
- RECURSE(Visit(expr->right()));
-
- NarrowType(expr, AstBounds(AstType::Boolean()));
-}
-
-
-void AstTyper::VisitSpread(Spread* expr) { UNREACHABLE(); }
-
-
-void AstTyper::VisitEmptyParentheses(EmptyParentheses* expr) {
- UNREACHABLE();
-}
-
-void AstTyper::VisitGetIterator(GetIterator* expr) { UNREACHABLE(); }
-
-void AstTyper::VisitImportCallExpression(ImportCallExpression* expr) {
- UNREACHABLE();
-}
-
-void AstTyper::VisitThisFunction(ThisFunction* expr) {}
-
-
-void AstTyper::VisitSuperPropertyReference(SuperPropertyReference* expr) {}
-
-
-void AstTyper::VisitSuperCallReference(SuperCallReference* expr) {}
-
-
-void AstTyper::VisitRewritableExpression(RewritableExpression* expr) {
- Visit(expr->expression());
-}
-
-int AstTyper::variable_index(Variable* var) {
- // Stack locals have the range [0 .. l]
- // Parameters have the range [-1 .. p]
- // We map this to [-p-2 .. -1, 0 .. l]
- return var->IsStackLocal()
- ? stack_local_index(var->index())
- : var->IsParameter() ? parameter_index(var->index()) : kNoVar;
-}
-
-void AstTyper::VisitDeclarations(Declaration::List* decls) {
- for (Declaration* decl : *decls) {
- RECURSE(Visit(decl));
- }
-}
-
-
-void AstTyper::VisitVariableDeclaration(VariableDeclaration* declaration) {
-}
-
-
-void AstTyper::VisitFunctionDeclaration(FunctionDeclaration* declaration) {
- RECURSE(Visit(declaration->fun()));
-}
-
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/typing.h b/deps/v8/src/crankshaft/typing.h
deleted file mode 100644
index add457bfe3..0000000000
--- a/deps/v8/src/crankshaft/typing.h
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_TYPING_H_
-#define V8_CRANKSHAFT_TYPING_H_
-
-#include "src/allocation.h"
-#include "src/ast/ast-type-bounds.h"
-#include "src/ast/ast-types.h"
-#include "src/ast/ast.h"
-#include "src/ast/variables.h"
-#include "src/effects.h"
-#include "src/type-info.h"
-#include "src/zone/zone.h"
-
-namespace v8 {
-namespace internal {
-
-class DeclarationScope;
-class Isolate;
-class FunctionLiteral;
-
-class AstTyper final : public AstVisitor<AstTyper> {
- public:
- AstTyper(Isolate* isolate, Zone* zone, Handle<JSFunction> closure,
- DeclarationScope* scope, BailoutId osr_ast_id, FunctionLiteral* root,
- AstTypeBounds* bounds);
- void Run();
-
- DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
-
- private:
- Effect ObservedOnStack(Object* value);
- void ObserveTypesAtOsrEntry(IterationStatement* stmt);
-
- static const int kNoVar = INT_MIN;
- typedef v8::internal::Effects<int, kNoVar> Effects;
- typedef v8::internal::NestedEffects<int, kNoVar> Store;
-
- Isolate* isolate_;
- Zone* zone_;
- Handle<JSFunction> closure_;
- DeclarationScope* scope_;
- BailoutId osr_ast_id_;
- FunctionLiteral* root_;
- TypeFeedbackOracle oracle_;
- Store store_;
- AstTypeBounds* bounds_;
-
- Zone* zone() const { return zone_; }
- TypeFeedbackOracle* oracle() { return &oracle_; }
-
- void NarrowType(Expression* e, AstBounds b) {
- bounds_->set(e, AstBounds::Both(bounds_->get(e), b, zone()));
- }
- void NarrowLowerType(Expression* e, AstType* t) {
- bounds_->set(e, AstBounds::NarrowLower(bounds_->get(e), t, zone()));
- }
-
- Effects EnterEffects() {
- store_ = store_.Push();
- return store_.Top();
- }
- void ExitEffects() { store_ = store_.Pop(); }
-
- int parameter_index(int index) { return -index - 2; }
- int stack_local_index(int index) { return index; }
-
- int variable_index(Variable* var);
-
- void VisitDeclarations(Declaration::List* declarations);
- void VisitStatements(ZoneList<Statement*>* statements);
-
-#define DECLARE_VISIT(type) void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- DISALLOW_COPY_AND_ASSIGN(AstTyper);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_TYPING_H_
diff --git a/deps/v8/src/crankshaft/unique.h b/deps/v8/src/crankshaft/unique.h
deleted file mode 100644
index 4c6a0976f8..0000000000
--- a/deps/v8/src/crankshaft/unique.h
+++ /dev/null
@@ -1,362 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_UNIQUE_H_
-#define V8_CRANKSHAFT_UNIQUE_H_
-
-#include <ostream> // NOLINT(readability/streams)
-
-#include "src/assert-scope.h"
-#include "src/base/functional.h"
-#include "src/handles.h"
-#include "src/utils.h"
-#include "src/zone/zone.h"
-
-namespace v8 {
-namespace internal {
-
-
-template <typename T>
-class UniqueSet;
-
-
-// Represents a handle to an object on the heap, but with the additional
-// ability of checking for equality and hashing without accessing the heap.
-//
-// Creating a Unique<T> requires first dereferencing the handle to obtain
-// the address of the object, which is used as the hashcode and the basis for
-// comparison. The object can be moved later by the GC, but comparison
-// and hashing use the old address of the object, without dereferencing it.
-//
-// Careful! Comparison of two Uniques is only correct if both were created
-// in the same "era" of GC or if at least one is a non-movable object.
-template <typename T>
-class Unique final {
- public:
- Unique<T>() : raw_address_(NULL) {}
-
- // TODO(titzer): make private and introduce a uniqueness scope.
- explicit Unique(Handle<T> handle) {
- if (handle.is_null()) {
- raw_address_ = NULL;
- } else {
- // This is a best-effort check to prevent comparing Unique<T>'s created
- // in different GC eras; we require heap allocation to be disallowed at
- // creation time.
- // NOTE: we currently consider maps to be non-movable, so no special
- // assurance is required for creating a Unique<Map>.
- // TODO(titzer): other immortable immovable objects are also fine.
- DCHECK(!AllowHeapAllocation::IsAllowed() || handle->IsMap());
- raw_address_ = reinterpret_cast<Address>(*handle);
- DCHECK_NOT_NULL(raw_address_); // Non-null should imply non-zero address.
- }
- handle_ = handle;
- }
-
- // Constructor for handling automatic up casting.
- // Eg. Unique<JSFunction> can be passed when Unique<Object> is expected.
- template <class S> Unique(Unique<S> uniq) {
-#ifdef DEBUG
- T* a = NULL;
- S* b = NULL;
- a = b; // Fake assignment to enforce type checks.
- USE(a);
-#endif
- raw_address_ = uniq.raw_address_;
- handle_ = uniq.handle_;
- }
-
- template <typename U>
- inline bool operator==(const Unique<U>& other) const {
- DCHECK(IsInitialized() && other.IsInitialized());
- return raw_address_ == other.raw_address_;
- }
-
- template <typename U>
- inline bool operator!=(const Unique<U>& other) const {
- DCHECK(IsInitialized() && other.IsInitialized());
- return raw_address_ != other.raw_address_;
- }
-
- friend inline size_t hash_value(Unique<T> const& unique) {
- DCHECK(unique.IsInitialized());
- return base::hash<void*>()(unique.raw_address_);
- }
-
- inline intptr_t Hashcode() const {
- DCHECK(IsInitialized());
- return reinterpret_cast<intptr_t>(raw_address_);
- }
-
- inline bool IsNull() const {
- DCHECK(IsInitialized());
- return raw_address_ == NULL;
- }
-
- inline bool IsKnownGlobal(void* global) const {
- DCHECK(IsInitialized());
- return raw_address_ == reinterpret_cast<Address>(global);
- }
-
- inline Handle<T> handle() const {
- return handle_;
- }
-
- template <class S> static Unique<T> cast(Unique<S> that) {
- // Allow fetching location() to unsafe-cast the handle. This is necessary
- // since we can't concurrently safe-cast. Safe-casting requires looking at
- // the heap which may be moving concurrently to the compiler thread.
- AllowHandleDereference allow_deref;
- return Unique<T>(that.raw_address_,
- Handle<T>(reinterpret_cast<T**>(that.handle_.location())));
- }
-
- inline bool IsInitialized() const {
- return raw_address_ != NULL || handle_.is_null();
- }
-
- // TODO(titzer): this is a hack to migrate to Unique<T> incrementally.
- static Unique<T> CreateUninitialized(Handle<T> handle) {
- return Unique<T>(NULL, handle);
- }
-
- static Unique<T> CreateImmovable(Handle<T> handle) {
- return Unique<T>(reinterpret_cast<Address>(*handle), handle);
- }
-
- private:
- Unique(Address raw_address, Handle<T> handle)
- : raw_address_(raw_address), handle_(handle) {}
-
- Address raw_address_;
- Handle<T> handle_;
-
- friend class UniqueSet<T>; // Uses internal details for speed.
- template <class U>
- friend class Unique; // For comparing raw_address values.
-};
-
-template <typename T>
-inline std::ostream& operator<<(std::ostream& os, Unique<T> uniq) {
- return os << Brief(*uniq.handle());
-}
-
-
-template <typename T>
-class UniqueSet final : public ZoneObject {
- public:
- // Constructor. A new set will be empty.
- UniqueSet() : size_(0), capacity_(0), array_(NULL) { }
-
- // Capacity constructor. A new set will be empty.
- UniqueSet(int capacity, Zone* zone)
- : size_(0), capacity_(capacity),
- array_(zone->NewArray<Unique<T> >(capacity)) {
- DCHECK(capacity <= kMaxCapacity);
- }
-
- // Singleton constructor.
- UniqueSet(Unique<T> uniq, Zone* zone)
- : size_(1), capacity_(1), array_(zone->NewArray<Unique<T> >(1)) {
- array_[0] = uniq;
- }
-
- // Add a new element to this unique set. Mutates this set. O(|this|).
- void Add(Unique<T> uniq, Zone* zone) {
- DCHECK(uniq.IsInitialized());
- // Keep the set sorted by the {raw_address} of the unique elements.
- for (int i = 0; i < size_; i++) {
- if (array_[i] == uniq) return;
- if (array_[i].raw_address_ > uniq.raw_address_) {
- // Insert in the middle.
- Grow(size_ + 1, zone);
- for (int j = size_ - 1; j >= i; j--) array_[j + 1] = array_[j];
- array_[i] = uniq;
- size_++;
- return;
- }
- }
- // Append the element to the the end.
- Grow(size_ + 1, zone);
- array_[size_++] = uniq;
- }
-
- // Remove an element from this set. Mutates this set. O(|this|)
- void Remove(Unique<T> uniq) {
- for (int i = 0; i < size_; i++) {
- if (array_[i] == uniq) {
- while (++i < size_) array_[i - 1] = array_[i];
- size_--;
- return;
- }
- }
- }
-
- // Compare this set against another set. O(|this|).
- bool Equals(const UniqueSet<T>* that) const {
- if (that->size_ != this->size_) return false;
- for (int i = 0; i < this->size_; i++) {
- if (this->array_[i] != that->array_[i]) return false;
- }
- return true;
- }
-
- // Check whether this set contains the given element. O(|this|)
- // TODO(titzer): use binary search for large sets to make this O(log|this|)
- template <typename U>
- bool Contains(const Unique<U> elem) const {
- for (int i = 0; i < this->size_; ++i) {
- Unique<T> cand = this->array_[i];
- if (cand.raw_address_ >= elem.raw_address_) {
- return cand.raw_address_ == elem.raw_address_;
- }
- }
- return false;
- }
-
- // Check if this set is a subset of the given set. O(|this| + |that|).
- bool IsSubset(const UniqueSet<T>* that) const {
- if (that->size_ < this->size_) return false;
- int j = 0;
- for (int i = 0; i < this->size_; i++) {
- Unique<T> sought = this->array_[i];
- while (true) {
- if (sought == that->array_[j++]) break;
- // Fail whenever there are more elements in {this} than {that}.
- if ((this->size_ - i) > (that->size_ - j)) return false;
- }
- }
- return true;
- }
-
- // Returns a new set representing the intersection of this set and the other.
- // O(|this| + |that|).
- UniqueSet<T>* Intersect(const UniqueSet<T>* that, Zone* zone) const {
- if (that->size_ == 0 || this->size_ == 0) return new(zone) UniqueSet<T>();
-
- UniqueSet<T>* out = new(zone) UniqueSet<T>(
- Min(this->size_, that->size_), zone);
-
- int i = 0, j = 0, k = 0;
- while (i < this->size_ && j < that->size_) {
- Unique<T> a = this->array_[i];
- Unique<T> b = that->array_[j];
- if (a == b) {
- out->array_[k++] = a;
- i++;
- j++;
- } else if (a.raw_address_ < b.raw_address_) {
- i++;
- } else {
- j++;
- }
- }
-
- out->size_ = k;
- return out;
- }
-
- // Returns a new set representing the union of this set and the other.
- // O(|this| + |that|).
- UniqueSet<T>* Union(const UniqueSet<T>* that, Zone* zone) const {
- if (that->size_ == 0) return this->Copy(zone);
- if (this->size_ == 0) return that->Copy(zone);
-
- UniqueSet<T>* out = new(zone) UniqueSet<T>(
- this->size_ + that->size_, zone);
-
- int i = 0, j = 0, k = 0;
- while (i < this->size_ && j < that->size_) {
- Unique<T> a = this->array_[i];
- Unique<T> b = that->array_[j];
- if (a == b) {
- out->array_[k++] = a;
- i++;
- j++;
- } else if (a.raw_address_ < b.raw_address_) {
- out->array_[k++] = a;
- i++;
- } else {
- out->array_[k++] = b;
- j++;
- }
- }
-
- while (i < this->size_) out->array_[k++] = this->array_[i++];
- while (j < that->size_) out->array_[k++] = that->array_[j++];
-
- out->size_ = k;
- return out;
- }
-
- // Returns a new set representing all elements from this set which are not in
- // that set. O(|this| * |that|).
- UniqueSet<T>* Subtract(const UniqueSet<T>* that, Zone* zone) const {
- if (that->size_ == 0) return this->Copy(zone);
-
- UniqueSet<T>* out = new(zone) UniqueSet<T>(this->size_, zone);
-
- int i = 0, j = 0;
- while (i < this->size_) {
- Unique<T> cand = this->array_[i];
- if (!that->Contains(cand)) {
- out->array_[j++] = cand;
- }
- i++;
- }
-
- out->size_ = j;
- return out;
- }
-
- // Makes an exact copy of this set. O(|this|).
- UniqueSet<T>* Copy(Zone* zone) const {
- UniqueSet<T>* copy = new(zone) UniqueSet<T>(this->size_, zone);
- copy->size_ = this->size_;
- memcpy(copy->array_, this->array_, this->size_ * sizeof(Unique<T>));
- return copy;
- }
-
- void Clear() {
- size_ = 0;
- }
-
- inline int size() const {
- return size_;
- }
-
- inline Unique<T> at(int index) const {
- DCHECK(index >= 0 && index < size_);
- return array_[index];
- }
-
- private:
- // These sets should be small, since operations are implemented with simple
- // linear algorithms. Enforce a maximum size.
- static const int kMaxCapacity = 65535;
-
- uint16_t size_;
- uint16_t capacity_;
- Unique<T>* array_;
-
- // Grow the size of internal storage to be at least {size} elements.
- void Grow(int size, Zone* zone) {
- CHECK(size < kMaxCapacity); // Enforce maximum size.
- if (capacity_ < size) {
- int new_capacity = 2 * capacity_ + size;
- if (new_capacity > kMaxCapacity) new_capacity = kMaxCapacity;
- Unique<T>* new_array = zone->NewArray<Unique<T> >(new_capacity);
- if (size_ > 0) {
- memcpy(new_array, array_, size_ * sizeof(Unique<T>));
- }
- capacity_ = new_capacity;
- array_ = new_array;
- }
- }
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_UNIQUE_H_
diff --git a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc
deleted file mode 100644
index 3eddd47bc4..0000000000
--- a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc
+++ /dev/null
@@ -1,5436 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X64
-
-#include "src/crankshaft/x64/lithium-codegen-x64.h"
-
-#include "src/base/bits.h"
-#include "src/builtins/builtins-constructor.h"
-#include "src/code-factory.h"
-#include "src/code-stubs.h"
-#include "src/crankshaft/hydrogen-osr.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-// When invoking builtins, we need to record the safepoint in the middle of
-// the invoke instruction sequence generated by the macro assembler.
-class SafepointGenerator final : public CallWrapper {
- public:
- SafepointGenerator(LCodeGen* codegen,
- LPointerMap* pointers,
- Safepoint::DeoptMode mode)
- : codegen_(codegen),
- pointers_(pointers),
- deopt_mode_(mode) { }
- virtual ~SafepointGenerator() {}
-
- void BeforeCall(int call_size) const override {}
-
- void AfterCall() const override {
- codegen_->RecordSafepoint(pointers_, deopt_mode_);
- }
-
- private:
- LCodeGen* codegen_;
- LPointerMap* pointers_;
- Safepoint::DeoptMode deopt_mode_;
-};
-
-
-#define __ masm()->
-
-bool LCodeGen::GenerateCode() {
- LPhase phase("Z_Code generation", chunk());
- DCHECK(is_unused());
- status_ = GENERATING;
-
- // Open a frame scope to indicate that there is a frame on the stack. The
- // MANUAL indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done in GeneratePrologue).
- FrameScope frame_scope(masm_, StackFrame::MANUAL);
-
- return GeneratePrologue() &&
- GenerateBody() &&
- GenerateDeferredCode() &&
- GenerateJumpTable() &&
- GenerateSafepointTable();
-}
-
-
-void LCodeGen::FinishCode(Handle<Code> code) {
- DCHECK(is_done());
- code->set_stack_slots(GetTotalFrameSlotCount());
- code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- PopulateDeoptimizationData(code);
-}
-
-
-#ifdef _MSC_VER
-void LCodeGen::MakeSureStackPagesMapped(int offset) {
- const int kPageSize = 4 * KB;
- for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
- __ movp(Operand(rsp, offset), rax);
- }
-}
-#endif
-
-
-void LCodeGen::SaveCallerDoubles() {
- DCHECK(info()->saves_caller_doubles());
- DCHECK(NeedsEagerFrame());
- Comment(";;; Save clobbered callee double registers");
- int count = 0;
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- while (!save_iterator.Done()) {
- __ Movsd(MemOperand(rsp, count * kDoubleSize),
- XMMRegister::from_code(save_iterator.Current()));
- save_iterator.Advance();
- count++;
- }
-}
-
-
-void LCodeGen::RestoreCallerDoubles() {
- DCHECK(info()->saves_caller_doubles());
- DCHECK(NeedsEagerFrame());
- Comment(";;; Restore clobbered callee double registers");
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- int count = 0;
- while (!save_iterator.Done()) {
- __ Movsd(XMMRegister::from_code(save_iterator.Current()),
- MemOperand(rsp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
-}
-
-
-bool LCodeGen::GeneratePrologue() {
- DCHECK(is_generating());
-
- if (info()->IsOptimizing()) {
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
- }
-
- info()->set_prologue_offset(masm_->pc_offset());
- if (NeedsEagerFrame()) {
- DCHECK(!frame_is_built_);
- frame_is_built_ = true;
- if (info()->IsStub()) {
- __ StubPrologue(StackFrame::STUB);
- } else {
- __ Prologue(info()->GeneratePreagedPrologue());
- }
- }
-
- // Reserve space for the stack slots needed by the code.
- int slots = GetStackSlotCount();
- if (slots > 0) {
- if (FLAG_debug_code) {
- __ subp(rsp, Immediate(slots * kPointerSize));
-#ifdef _MSC_VER
- MakeSureStackPagesMapped(slots * kPointerSize);
-#endif
- __ Push(rax);
- __ Set(rax, slots);
- __ Set(kScratchRegister, kSlotsZapValue);
- Label loop;
- __ bind(&loop);
- __ movp(MemOperand(rsp, rax, times_pointer_size, 0),
- kScratchRegister);
- __ decl(rax);
- __ j(not_zero, &loop);
- __ Pop(rax);
- } else {
- __ subp(rsp, Immediate(slots * kPointerSize));
-#ifdef _MSC_VER
- MakeSureStackPagesMapped(slots * kPointerSize);
-#endif
- }
-
- if (info()->saves_caller_doubles()) {
- SaveCallerDoubles();
- }
- }
- return !is_aborted();
-}
-
-
-void LCodeGen::DoPrologue(LPrologue* instr) {
- Comment(";;; Prologue begin");
-
- // Possibly allocate a local context.
- if (info_->scope()->NeedsContext()) {
- Comment(";;; Allocate local context");
- bool need_write_barrier = true;
- // Argument to NewContext is the function, which is still in rdi.
- int slots = info_->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
- if (info()->scope()->is_script_scope()) {
- __ Push(rdi);
- __ Push(info()->scope()->scope_info());
- __ CallRuntime(Runtime::kNewScriptContext);
- deopt_mode = Safepoint::kLazyDeopt;
- } else {
- if (slots <= ConstructorBuiltins::MaximumFunctionContextSlots()) {
- Callable callable = CodeFactory::FastNewFunctionContext(
- isolate(), info()->scope()->scope_type());
- __ Set(FastNewFunctionContextDescriptor::SlotsRegister(), slots);
- __ Call(callable.code(), RelocInfo::CODE_TARGET);
- // Result of FastNewFunctionContextStub is always in new space.
- need_write_barrier = false;
- } else {
- __ Push(rdi);
- __ Push(Smi::FromInt(info()->scope()->scope_type()));
- __ CallRuntime(Runtime::kNewFunctionContext);
- }
- }
- RecordSafepoint(deopt_mode);
-
- // Context is returned in rax. It replaces the context passed to us.
- // It's saved in the stack and kept live in rsi.
- __ movp(rsi, rax);
- __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rax);
-
- // Copy any necessary parameters into the context.
- int num_parameters = info()->scope()->num_parameters();
- int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0;
- for (int i = first_parameter; i < num_parameters; i++) {
- Variable* var = (i == -1) ? info()->scope()->receiver()
- : info()->scope()->parameter(i);
- if (var->IsContextSlot()) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ movp(rax, Operand(rbp, parameter_offset));
- // Store it in the context.
- int context_offset = Context::SlotOffset(var->index());
- __ movp(Operand(rsi, context_offset), rax);
- // Update the write barrier. This clobbers rax and rbx.
- if (need_write_barrier) {
- __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
- } else if (FLAG_debug_code) {
- Label done;
- __ JumpIfInNewSpace(rsi, rax, &done, Label::kNear);
- __ Abort(kExpectedNewSpaceObject);
- __ bind(&done);
- }
- }
- }
- Comment(";;; End allocate local context");
- }
-
- Comment(";;; Prologue end");
-}
-
-
-void LCodeGen::GenerateOsrPrologue() {
- // Generate the OSR entry prologue at the first unknown OSR value, or if there
- // are none, at the OSR entrypoint instruction.
- if (osr_pc_offset_ >= 0) return;
-
- osr_pc_offset_ = masm()->pc_offset();
-
- // Adjust the frame size, subsuming the unoptimized frame into the
- // optimized frame.
- int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
- DCHECK(slots >= 0);
- __ subp(rsp, Immediate(slots * kPointerSize));
-}
-
-
-void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
- if (instr->IsCall()) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- }
- if (!instr->IsLazyBailout() && !instr->IsGap()) {
- safepoints_.BumpLastLazySafepointIndex();
- }
-}
-
-
-void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
- if (FLAG_debug_code && FLAG_enable_slow_asserts && instr->HasResult() &&
- instr->hydrogen_value()->representation().IsInteger32() &&
- instr->result()->IsRegister()) {
- __ AssertZeroExtended(ToRegister(instr->result()));
- }
-
- if (instr->HasResult() && instr->MustSignExtendResult(chunk())) {
- // We sign extend the dehoisted key at the definition point when the pointer
- // size is 64-bit. For x32 port, we sign extend the dehoisted key at the use
- // points and MustSignExtendResult is always false. We can't use
- // STATIC_ASSERT here as the pointer size is 32-bit for x32.
- DCHECK(kPointerSize == kInt64Size);
- if (instr->result()->IsRegister()) {
- Register result_reg = ToRegister(instr->result());
- __ movsxlq(result_reg, result_reg);
- } else {
- // Sign extend the 32bit result in the stack slots.
- DCHECK(instr->result()->IsStackSlot());
- Operand src = ToOperand(instr->result());
- __ movsxlq(kScratchRegister, src);
- __ movq(src, kScratchRegister);
- }
- }
-}
-
-
-bool LCodeGen::GenerateJumpTable() {
- if (jump_table_.length() == 0) return !is_aborted();
-
- Label needs_frame;
- Comment(";;; -------------------- Jump table --------------------");
- for (int i = 0; i < jump_table_.length(); i++) {
- Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
- __ bind(&table_entry->label);
- Address entry = table_entry->address;
- DeoptComment(table_entry->deopt_info);
- if (table_entry->needs_frame) {
- DCHECK(!info()->saves_caller_doubles());
- __ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
- __ call(&needs_frame);
- } else {
- if (info()->saves_caller_doubles()) {
- DCHECK(info()->IsStub());
- RestoreCallerDoubles();
- }
- __ call(entry, RelocInfo::RUNTIME_ENTRY);
- }
- }
-
- if (needs_frame.is_linked()) {
- __ bind(&needs_frame);
- /* stack layout
- 3: return address <-- rsp
- 2: garbage
- 1: garbage
- 0: garbage
- */
- // Reserve space for stub marker.
- __ subp(rsp, Immediate(TypedFrameConstants::kFrameTypeSize));
- __ Push(MemOperand(
- rsp, TypedFrameConstants::kFrameTypeSize)); // Copy return address.
- __ Push(kScratchRegister);
-
- /* stack layout
- 3: return address
- 2: garbage
- 1: return address
- 0: entry address <-- rsp
- */
-
- // Create a stack frame.
- __ movp(MemOperand(rsp, 3 * kPointerSize), rbp);
- __ leap(rbp, MemOperand(rsp, 3 * kPointerSize));
-
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- DCHECK(info()->IsStub());
- __ movp(MemOperand(rsp, 2 * kPointerSize),
- Immediate(StackFrame::TypeToMarker(StackFrame::STUB)));
-
- /* stack layout
- 3: old rbp
- 2: stub marker
- 1: return address
- 0: entry address <-- rsp
- */
- __ ret(0);
- }
-
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateDeferredCode() {
- DCHECK(is_generating());
- if (deferred_.length() > 0) {
- for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
- LDeferredCode* code = deferred_[i];
-
- HValue* value =
- instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(value->position());
-
- Comment(";;; <@%d,#%d> "
- "-------------------- Deferred %s --------------------",
- code->instruction_index(),
- code->instr()->hydrogen_value()->id(),
- code->instr()->Mnemonic());
- __ bind(code->entry());
- if (NeedsDeferredFrame()) {
- Comment(";;; Build frame");
- DCHECK(!frame_is_built_);
- DCHECK(info()->IsStub());
- frame_is_built_ = true;
- // Build the frame in such a way that esi isn't trashed.
- __ pushq(rbp); // Caller's frame pointer.
- __ Push(Immediate(StackFrame::TypeToMarker(StackFrame::STUB)));
- __ leap(rbp, Operand(rsp, TypedFrameConstants::kFixedFrameSizeFromFp));
- Comment(";;; Deferred code");
- }
- code->Generate();
- if (NeedsDeferredFrame()) {
- __ bind(code->done());
- Comment(";;; Destroy frame");
- DCHECK(frame_is_built_);
- frame_is_built_ = false;
- __ movp(rsp, rbp);
- __ popq(rbp);
- }
- __ jmp(code->exit());
- }
- }
-
- // Deferred code is the last part of the instruction sequence. Mark
- // the generated code as done unless we bailed out.
- if (!is_aborted()) status_ = DONE;
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateSafepointTable() {
- DCHECK(is_done());
- safepoints_.Emit(masm(), GetTotalFrameSlotCount());
- return !is_aborted();
-}
-
-
-Register LCodeGen::ToRegister(int index) const {
- return Register::from_code(index);
-}
-
-
-XMMRegister LCodeGen::ToDoubleRegister(int index) const {
- return XMMRegister::from_code(index);
-}
-
-
-Register LCodeGen::ToRegister(LOperand* op) const {
- DCHECK(op->IsRegister());
- return ToRegister(op->index());
-}
-
-
-XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
- DCHECK(op->IsDoubleRegister());
- return ToDoubleRegister(op->index());
-}
-
-
-bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
- return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
-}
-
-
-bool LCodeGen::IsExternalConstant(LConstantOperand* op) const {
- return chunk_->LookupLiteralRepresentation(op).IsExternal();
-}
-
-
-bool LCodeGen::IsDehoistedKeyConstant(LConstantOperand* op) const {
- return op->IsConstantOperand() &&
- chunk_->IsDehoistedKey(chunk_->LookupConstant(op));
-}
-
-
-bool LCodeGen::IsSmiConstant(LConstantOperand* op) const {
- return chunk_->LookupLiteralRepresentation(op).IsSmi();
-}
-
-
-int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
- return ToRepresentation(op, Representation::Integer32());
-}
-
-
-int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
- const Representation& r) const {
- HConstant* constant = chunk_->LookupConstant(op);
- int32_t value = constant->Integer32Value();
- if (r.IsInteger32()) return value;
- DCHECK(SmiValuesAre31Bits() && r.IsSmiOrTagged());
- return static_cast<int32_t>(reinterpret_cast<intptr_t>(Smi::FromInt(value)));
-}
-
-
-Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- return Smi::FromInt(constant->Integer32Value());
-}
-
-
-double LCodeGen::ToDouble(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- DCHECK(constant->HasDoubleValue());
- return constant->DoubleValue();
-}
-
-
-ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- DCHECK(constant->HasExternalReferenceValue());
- return constant->ExternalReferenceValue();
-}
-
-
-Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
- return constant->handle(isolate());
-}
-
-
-static int ArgumentsOffsetWithoutFrame(int index) {
- DCHECK(index < 0);
- return -(index + 1) * kPointerSize + kPCOnStackSize;
-}
-
-
-Operand LCodeGen::ToOperand(LOperand* op) const {
- // Does not handle registers. In X64 assembler, plain registers are not
- // representable as an Operand.
- DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- if (NeedsEagerFrame()) {
- return Operand(rbp, FrameSlotToFPOffset(op->index()));
- } else {
- // Retrieve parameter without eager stack-frame relative to the
- // stack-pointer.
- return Operand(rsp, ArgumentsOffsetWithoutFrame(op->index()));
- }
-}
-
-
-void LCodeGen::WriteTranslation(LEnvironment* environment,
- Translation* translation) {
- if (environment == NULL) return;
-
- // The translation includes one command per value in the environment.
- int translation_size = environment->translation_size();
-
- WriteTranslation(environment->outer(), translation);
- WriteTranslationFrame(environment, translation);
-
- int object_index = 0;
- int dematerialized_index = 0;
- for (int i = 0; i < translation_size; ++i) {
- LOperand* value = environment->values()->at(i);
- AddToTranslation(
- environment, translation, value, environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
- }
-}
-
-
-void LCodeGen::AddToTranslation(LEnvironment* environment,
- Translation* translation,
- LOperand* op,
- bool is_tagged,
- bool is_uint32,
- int* object_index_pointer,
- int* dematerialized_index_pointer) {
- if (op == LEnvironment::materialization_marker()) {
- int object_index = (*object_index_pointer)++;
- if (environment->ObjectIsDuplicateAt(object_index)) {
- int dupe_of = environment->ObjectDuplicateOfAt(object_index);
- translation->DuplicateObject(dupe_of);
- return;
- }
- int object_length = environment->ObjectLengthAt(object_index);
- if (environment->ObjectIsArgumentsAt(object_index)) {
- translation->BeginArgumentsObject(object_length);
- } else {
- translation->BeginCapturedObject(object_length);
- }
- int dematerialized_index = *dematerialized_index_pointer;
- int env_offset = environment->translation_size() + dematerialized_index;
- *dematerialized_index_pointer += object_length;
- for (int i = 0; i < object_length; ++i) {
- LOperand* value = environment->values()->at(env_offset + i);
- AddToTranslation(environment,
- translation,
- value,
- environment->HasTaggedValueAt(env_offset + i),
- environment->HasUint32ValueAt(env_offset + i),
- object_index_pointer,
- dematerialized_index_pointer);
- }
- return;
- }
-
- if (op->IsStackSlot()) {
- int index = op->index();
- if (is_tagged) {
- translation->StoreStackSlot(index);
- } else if (is_uint32) {
- translation->StoreUint32StackSlot(index);
- } else {
- translation->StoreInt32StackSlot(index);
- }
- } else if (op->IsDoubleStackSlot()) {
- int index = op->index();
- translation->StoreDoubleStackSlot(index);
- } else if (op->IsRegister()) {
- Register reg = ToRegister(op);
- if (is_tagged) {
- translation->StoreRegister(reg);
- } else if (is_uint32) {
- translation->StoreUint32Register(reg);
- } else {
- translation->StoreInt32Register(reg);
- }
- } else if (op->IsDoubleRegister()) {
- XMMRegister reg = ToDoubleRegister(op);
- translation->StoreDoubleRegister(reg);
- } else if (op->IsConstantOperand()) {
- HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
- translation->StoreLiteral(src_index);
- } else {
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::CallCodeGeneric(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode,
- int argc) {
- DCHECK(instr != NULL);
- __ call(code, mode);
- RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);
-
- // Signal that we don't inline smi code before these stubs in the
- // optimizing code generator.
- if (code->kind() == Code::BINARY_OP_IC ||
- code->kind() == Code::COMPARE_IC) {
- __ nop();
- }
-}
-
-
-void LCodeGen::CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr) {
- CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0);
-}
-
-
-void LCodeGen::CallRuntime(const Runtime::Function* function,
- int num_arguments,
- LInstruction* instr,
- SaveFPRegsMode save_doubles) {
- DCHECK(instr != NULL);
- DCHECK(instr->HasPointerMap());
-
- __ CallRuntime(function, num_arguments, save_doubles);
-
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
-}
-
-
-void LCodeGen::LoadContextFromDeferred(LOperand* context) {
- if (context->IsRegister()) {
- if (!ToRegister(context).is(rsi)) {
- __ movp(rsi, ToRegister(context));
- }
- } else if (context->IsStackSlot()) {
- __ movp(rsi, ToOperand(context));
- } else if (context->IsConstantOperand()) {
- HConstant* constant =
- chunk_->LookupConstant(LConstantOperand::cast(context));
- __ Move(rsi, Handle<Object>::cast(constant->handle(isolate())));
- } else {
- UNREACHABLE();
- }
-}
-
-
-
-void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
- int argc,
- LInstruction* instr,
- LOperand* context) {
- LoadContextFromDeferred(context);
-
- __ CallRuntimeSaveDoubles(id);
- RecordSafepointWithRegisters(
- instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
-}
-
-
-void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
- Safepoint::DeoptMode mode) {
- environment->set_has_been_used();
- if (!environment->HasBeenRegistered()) {
- // Physical stack frame layout:
- // -x ............. -4 0 ..................................... y
- // [incoming arguments] [spill slots] [pushed outgoing arguments]
-
- // Layout of the environment:
- // 0 ..................................................... size-1
- // [parameters] [locals] [expression stack including arguments]
-
- // Layout of the translation:
- // 0 ........................................................ size - 1 + 4
- // [expression stack including arguments] [locals] [4 words] [parameters]
- // |>------------ translation_size ------------<|
-
- int frame_count = 0;
- int jsframe_count = 0;
- for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
- ++frame_count;
- if (e->frame_type() == JS_FUNCTION) {
- ++jsframe_count;
- }
- }
- Translation translation(&translations_, frame_count, jsframe_count, zone());
- WriteTranslation(environment, &translation);
- int deoptimization_index = deoptimizations_.length();
- int pc_offset = masm()->pc_offset();
- environment->Register(deoptimization_index,
- translation.index(),
- (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
- deoptimizations_.Add(environment, environment->zone());
- }
-}
-
-void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
- DeoptimizeReason deopt_reason,
- Deoptimizer::BailoutType bailout_type) {
- LEnvironment* environment = instr->environment();
- RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- DCHECK(environment->HasBeenRegistered());
- int id = environment->deoptimization_index();
- Address entry =
- Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
- if (entry == NULL) {
- Abort(kBailoutWasNotPrepared);
- return;
- }
-
- if (DeoptEveryNTimes()) {
- ExternalReference count = ExternalReference::stress_deopt_count(isolate());
- Label no_deopt;
- __ pushfq();
- __ pushq(rax);
- Operand count_operand = masm()->ExternalOperand(count, kScratchRegister);
- __ movl(rax, count_operand);
- __ subl(rax, Immediate(1));
- __ j(not_zero, &no_deopt, Label::kNear);
- if (FLAG_trap_on_deopt) __ int3();
- __ movl(rax, Immediate(FLAG_deopt_every_n_times));
- __ movl(count_operand, rax);
- __ popq(rax);
- __ popfq();
- DCHECK(frame_is_built_);
- __ call(entry, RelocInfo::RUNTIME_ENTRY);
- __ bind(&no_deopt);
- __ movl(count_operand, rax);
- __ popq(rax);
- __ popfq();
- }
-
- if (info()->ShouldTrapOnDeopt()) {
- Label done;
- if (cc != no_condition) {
- __ j(NegateCondition(cc), &done, Label::kNear);
- }
- __ int3();
- __ bind(&done);
- }
-
- Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
-
- DCHECK(info()->IsStub() || frame_is_built_);
- // Go through jump table if we need to handle condition, build frame, or
- // restore caller doubles.
- if (cc == no_condition && frame_is_built_ &&
- !info()->saves_caller_doubles()) {
- DeoptComment(deopt_info);
- __ call(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
- !frame_is_built_);
- // We often have several deopts to the same entry, reuse the last
- // jump entry if this is the case.
- if (FLAG_trace_deopt || isolate()->is_profiling() ||
- jump_table_.is_empty() ||
- !table_entry.IsEquivalentTo(jump_table_.last())) {
- jump_table_.Add(table_entry, zone());
- }
- if (cc == no_condition) {
- __ jmp(&jump_table_.last().label);
- } else {
- __ j(cc, &jump_table_.last().label);
- }
- }
-}
-
-void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
- DeoptimizeReason deopt_reason) {
- Deoptimizer::BailoutType bailout_type = info()->IsStub()
- ? Deoptimizer::LAZY
- : Deoptimizer::EAGER;
- DeoptimizeIf(cc, instr, deopt_reason, bailout_type);
-}
-
-
-void LCodeGen::RecordSafepointWithLazyDeopt(
- LInstruction* instr, SafepointMode safepoint_mode, int argc) {
- if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
- RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
- } else {
- DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
- RecordSafepointWithRegisters(
- instr->pointer_map(), argc, Safepoint::kLazyDeopt);
- }
-}
-
-
-void LCodeGen::RecordSafepoint(
- LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- Safepoint::DeoptMode deopt_mode) {
- DCHECK(kind == expected_safepoint_kind_);
-
- const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
-
- Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
- kind, arguments, deopt_mode);
- for (int i = 0; i < operands->length(); i++) {
- LOperand* pointer = operands->at(i);
- if (pointer->IsStackSlot()) {
- safepoint.DefinePointerSlot(pointer->index(), zone());
- } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
- safepoint.DefinePointerRegister(ToRegister(pointer), zone());
- }
- }
-}
-
-
-void LCodeGen::RecordSafepoint(LPointerMap* pointers,
- Safepoint::DeoptMode deopt_mode) {
- RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
-}
-
-
-void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
- LPointerMap empty_pointers(zone());
- RecordSafepoint(&empty_pointers, deopt_mode);
-}
-
-
-void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode deopt_mode) {
- RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
-}
-
-
-static const char* LabelType(LLabel* label) {
- if (label->is_loop_header()) return " (loop header)";
- if (label->is_osr_entry()) return " (OSR entry)";
- return "";
-}
-
-
-void LCodeGen::DoLabel(LLabel* label) {
- Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
- current_instruction_,
- label->hydrogen_value()->id(),
- label->block_id(),
- LabelType(label));
- __ bind(label->label());
- current_block_ = label->block_id();
- DoGap(label);
-}
-
-
-void LCodeGen::DoParallelMove(LParallelMove* move) {
- resolver_.Resolve(move);
-}
-
-
-void LCodeGen::DoGap(LGap* gap) {
- for (int i = LGap::FIRST_INNER_POSITION;
- i <= LGap::LAST_INNER_POSITION;
- i++) {
- LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
- LParallelMove* move = gap->GetParallelMove(inner_pos);
- if (move != NULL) DoParallelMove(move);
- }
-}
-
-
-void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
- DoGap(instr);
-}
-
-
-void LCodeGen::DoParameter(LParameter* instr) {
- // Nothing to do.
-}
-
-
-void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- GenerateOsrPrologue();
-}
-
-
-void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- DCHECK(dividend.is(ToRegister(instr->result())));
-
- // Theoretically, a variation of the branch-free code for integer division by
- // a power of 2 (calculating the remainder via an additional multiplication
- // (which gets simplified to an 'and') and subtraction) should be faster, and
- // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
- // indicate that positive dividends are heavily favored, so the branching
- // version performs better.
- HMod* hmod = instr->hydrogen();
- int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
- Label dividend_is_not_negative, done;
- if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
- __ testl(dividend, dividend);
- __ j(not_sign, &dividend_is_not_negative, Label::kNear);
- // Note that this is correct even for kMinInt operands.
- __ negl(dividend);
- __ andl(dividend, Immediate(mask));
- __ negl(dividend);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
- }
- __ jmp(&done, Label::kNear);
- }
-
- __ bind(&dividend_is_not_negative);
- __ andl(dividend, Immediate(mask));
- __ bind(&done);
-}
-
-
-void LCodeGen::DoModByConstI(LModByConstI* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- DCHECK(ToRegister(instr->result()).is(rax));
-
- if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
- return;
- }
-
- __ TruncatingDiv(dividend, Abs(divisor));
- __ imull(rdx, rdx, Immediate(Abs(divisor)));
- __ movl(rax, dividend);
- __ subl(rax, rdx);
-
- // Check for negative zero.
- HMod* hmod = instr->hydrogen();
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label remainder_not_zero;
- __ j(not_zero, &remainder_not_zero, Label::kNear);
- __ cmpl(dividend, Immediate(0));
- DeoptimizeIf(less, instr, DeoptimizeReason::kMinusZero);
- __ bind(&remainder_not_zero);
- }
-}
-
-
-void LCodeGen::DoModI(LModI* instr) {
- HMod* hmod = instr->hydrogen();
-
- Register left_reg = ToRegister(instr->left());
- DCHECK(left_reg.is(rax));
- Register right_reg = ToRegister(instr->right());
- DCHECK(!right_reg.is(rax));
- DCHECK(!right_reg.is(rdx));
- Register result_reg = ToRegister(instr->result());
- DCHECK(result_reg.is(rdx));
-
- Label done;
- // Check for x % 0, idiv would signal a divide error. We have to
- // deopt in this case because we can't return a NaN.
- if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
- __ testl(right_reg, right_reg);
- DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
- }
-
- // Check for kMinInt % -1, idiv would signal a divide error. We
- // have to deopt if we care about -0, because we can't return that.
- if (hmod->CheckFlag(HValue::kCanOverflow)) {
- Label no_overflow_possible;
- __ cmpl(left_reg, Immediate(kMinInt));
- __ j(not_zero, &no_overflow_possible, Label::kNear);
- __ cmpl(right_reg, Immediate(-1));
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(equal, instr, DeoptimizeReason::kMinusZero);
- } else {
- __ j(not_equal, &no_overflow_possible, Label::kNear);
- __ Set(result_reg, 0);
- __ jmp(&done, Label::kNear);
- }
- __ bind(&no_overflow_possible);
- }
-
- // Sign extend dividend in eax into edx:eax, since we are using only the low
- // 32 bits of the values.
- __ cdq();
-
- // If we care about -0, test if the dividend is <0 and the result is 0.
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label positive_left;
- __ testl(left_reg, left_reg);
- __ j(not_sign, &positive_left, Label::kNear);
- __ idivl(right_reg);
- __ testl(result_reg, result_reg);
- DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
- __ jmp(&done, Label::kNear);
- __ bind(&positive_left);
- }
- __ idivl(right_reg);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- DCHECK(dividend.is(ToRegister(instr->result())));
-
- // If the divisor is positive, things are easy: There can be no deopts and we
- // can simply do an arithmetic right shift.
- if (divisor == 1) return;
- int32_t shift = WhichPowerOf2Abs(divisor);
- if (divisor > 1) {
- __ sarl(dividend, Immediate(shift));
- return;
- }
-
- // If the divisor is negative, we have to negate and handle edge cases.
- __ negl(dividend);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
- }
-
- // Dividing by -1 is basically negation, unless we overflow.
- if (divisor == -1) {
- if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
- }
- return;
- }
-
- // If the negation could not overflow, simply shifting is OK.
- if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- __ sarl(dividend, Immediate(shift));
- return;
- }
-
- Label not_kmin_int, done;
- __ j(no_overflow, &not_kmin_int, Label::kNear);
- __ movl(dividend, Immediate(kMinInt / divisor));
- __ jmp(&done, Label::kNear);
- __ bind(&not_kmin_int);
- __ sarl(dividend, Immediate(shift));
- __ bind(&done);
-}
-
-
-void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- DCHECK(ToRegister(instr->result()).is(rdx));
-
- if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
- return;
- }
-
- // Check for (0 / -x) that will produce negative zero.
- HMathFloorOfDiv* hdiv = instr->hydrogen();
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- __ testl(dividend, dividend);
- DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
- }
-
- // Easy case: We need no dynamic check for the dividend and the flooring
- // division is the same as the truncating division.
- if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
- (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
- __ TruncatingDiv(dividend, Abs(divisor));
- if (divisor < 0) __ negl(rdx);
- return;
- }
-
- // In the general case we may need to adjust before and after the truncating
- // division to get a flooring division.
- Register temp = ToRegister(instr->temp3());
- DCHECK(!temp.is(dividend) && !temp.is(rax) && !temp.is(rdx));
- Label needs_adjustment, done;
- __ cmpl(dividend, Immediate(0));
- __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
- __ TruncatingDiv(dividend, Abs(divisor));
- if (divisor < 0) __ negl(rdx);
- __ jmp(&done, Label::kNear);
- __ bind(&needs_adjustment);
- __ leal(temp, Operand(dividend, divisor > 0 ? 1 : -1));
- __ TruncatingDiv(temp, Abs(divisor));
- if (divisor < 0) __ negl(rdx);
- __ decl(rdx);
- __ bind(&done);
-}
-
-
-// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
-void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
- HBinaryOperation* hdiv = instr->hydrogen();
- Register dividend = ToRegister(instr->dividend());
- Register divisor = ToRegister(instr->divisor());
- Register remainder = ToRegister(instr->temp());
- Register result = ToRegister(instr->result());
- DCHECK(dividend.is(rax));
- DCHECK(remainder.is(rdx));
- DCHECK(result.is(rax));
- DCHECK(!divisor.is(rax));
- DCHECK(!divisor.is(rdx));
-
- // Check for x / 0.
- if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- __ testl(divisor, divisor);
- DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
- }
-
- // Check for (0 / -x) that will produce negative zero.
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label dividend_not_zero;
- __ testl(dividend, dividend);
- __ j(not_zero, &dividend_not_zero, Label::kNear);
- __ testl(divisor, divisor);
- DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
- __ bind(&dividend_not_zero);
- }
-
- // Check for (kMinInt / -1).
- if (hdiv->CheckFlag(HValue::kCanOverflow)) {
- Label dividend_not_min_int;
- __ cmpl(dividend, Immediate(kMinInt));
- __ j(not_zero, &dividend_not_min_int, Label::kNear);
- __ cmpl(divisor, Immediate(-1));
- DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
- __ bind(&dividend_not_min_int);
- }
-
- // Sign extend to rdx (= remainder).
- __ cdq();
- __ idivl(divisor);
-
- Label done;
- __ testl(remainder, remainder);
- __ j(zero, &done, Label::kNear);
- __ xorl(remainder, divisor);
- __ sarl(remainder, Immediate(31));
- __ addl(result, remainder);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- Register result = ToRegister(instr->result());
- DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
- DCHECK(!result.is(dividend));
-
- // Check for (0 / -x) that will produce negative zero.
- HDiv* hdiv = instr->hydrogen();
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- __ testl(dividend, dividend);
- DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
- }
- // Check for (kMinInt / -1).
- if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
- __ cmpl(dividend, Immediate(kMinInt));
- DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
- }
- // Deoptimize if remainder will not be 0.
- if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
- divisor != 1 && divisor != -1) {
- int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
- __ testl(dividend, Immediate(mask));
- DeoptimizeIf(not_zero, instr, DeoptimizeReason::kLostPrecision);
- }
- __ Move(result, dividend);
- int32_t shift = WhichPowerOf2Abs(divisor);
- if (shift > 0) {
- // The arithmetic shift is always OK, the 'if' is an optimization only.
- if (shift > 1) __ sarl(result, Immediate(31));
- __ shrl(result, Immediate(32 - shift));
- __ addl(result, dividend);
- __ sarl(result, Immediate(shift));
- }
- if (divisor < 0) __ negl(result);
-}
-
-
-void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- DCHECK(ToRegister(instr->result()).is(rdx));
-
- if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
- return;
- }
-
- // Check for (0 / -x) that will produce negative zero.
- HDiv* hdiv = instr->hydrogen();
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- __ testl(dividend, dividend);
- DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
- }
-
- __ TruncatingDiv(dividend, Abs(divisor));
- if (divisor < 0) __ negl(rdx);
-
- if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
- __ movl(rax, rdx);
- __ imull(rax, rax, Immediate(divisor));
- __ subl(rax, dividend);
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kLostPrecision);
- }
-}
-
-
-// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
-void LCodeGen::DoDivI(LDivI* instr) {
- HBinaryOperation* hdiv = instr->hydrogen();
- Register dividend = ToRegister(instr->dividend());
- Register divisor = ToRegister(instr->divisor());
- Register remainder = ToRegister(instr->temp());
- DCHECK(dividend.is(rax));
- DCHECK(remainder.is(rdx));
- DCHECK(ToRegister(instr->result()).is(rax));
- DCHECK(!divisor.is(rax));
- DCHECK(!divisor.is(rdx));
-
- // Check for x / 0.
- if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- __ testl(divisor, divisor);
- DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
- }
-
- // Check for (0 / -x) that will produce negative zero.
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label dividend_not_zero;
- __ testl(dividend, dividend);
- __ j(not_zero, &dividend_not_zero, Label::kNear);
- __ testl(divisor, divisor);
- DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
- __ bind(&dividend_not_zero);
- }
-
- // Check for (kMinInt / -1).
- if (hdiv->CheckFlag(HValue::kCanOverflow)) {
- Label dividend_not_min_int;
- __ cmpl(dividend, Immediate(kMinInt));
- __ j(not_zero, &dividend_not_min_int, Label::kNear);
- __ cmpl(divisor, Immediate(-1));
- DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
- __ bind(&dividend_not_min_int);
- }
-
- // Sign extend to rdx (= remainder).
- __ cdq();
- __ idivl(divisor);
-
- if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
- // Deoptimize if remainder is not 0.
- __ testl(remainder, remainder);
- DeoptimizeIf(not_zero, instr, DeoptimizeReason::kLostPrecision);
- }
-}
-
-
-void LCodeGen::DoMulI(LMulI* instr) {
- Register left = ToRegister(instr->left());
- LOperand* right = instr->right();
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- if (instr->hydrogen_value()->representation().IsSmi()) {
- __ movp(kScratchRegister, left);
- } else {
- __ movl(kScratchRegister, left);
- }
- }
-
- bool can_overflow =
- instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- if (right->IsConstantOperand()) {
- int32_t right_value = ToInteger32(LConstantOperand::cast(right));
- if (right_value == -1) {
- __ negl(left);
- } else if (right_value == 0) {
- __ xorl(left, left);
- } else if (right_value == 2) {
- __ addl(left, left);
- } else if (!can_overflow) {
- // If the multiplication is known to not overflow, we
- // can use operations that don't set the overflow flag
- // correctly.
- switch (right_value) {
- case 1:
- // Do nothing.
- break;
- case 3:
- __ leal(left, Operand(left, left, times_2, 0));
- break;
- case 4:
- __ shll(left, Immediate(2));
- break;
- case 5:
- __ leal(left, Operand(left, left, times_4, 0));
- break;
- case 8:
- __ shll(left, Immediate(3));
- break;
- case 9:
- __ leal(left, Operand(left, left, times_8, 0));
- break;
- case 16:
- __ shll(left, Immediate(4));
- break;
- default:
- __ imull(left, left, Immediate(right_value));
- break;
- }
- } else {
- __ imull(left, left, Immediate(right_value));
- }
- } else if (right->IsStackSlot()) {
- if (instr->hydrogen_value()->representation().IsSmi()) {
- __ SmiToInteger64(left, left);
- __ imulp(left, ToOperand(right));
- } else {
- __ imull(left, ToOperand(right));
- }
- } else {
- if (instr->hydrogen_value()->representation().IsSmi()) {
- __ SmiToInteger64(left, left);
- __ imulp(left, ToRegister(right));
- } else {
- __ imull(left, ToRegister(right));
- }
- }
-
- if (can_overflow) {
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
- }
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Bail out if the result is supposed to be negative zero.
- Label done;
- if (instr->hydrogen_value()->representation().IsSmi()) {
- __ testp(left, left);
- } else {
- __ testl(left, left);
- }
- __ j(not_zero, &done, Label::kNear);
- if (right->IsConstantOperand()) {
- // Constant can't be represented as 32-bit Smi due to immediate size
- // limit.
- DCHECK(SmiValuesAre32Bits()
- ? !instr->hydrogen_value()->representation().IsSmi()
- : SmiValuesAre31Bits());
- if (ToInteger32(LConstantOperand::cast(right)) < 0) {
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
- } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
- __ cmpl(kScratchRegister, Immediate(0));
- DeoptimizeIf(less, instr, DeoptimizeReason::kMinusZero);
- }
- } else if (right->IsStackSlot()) {
- if (instr->hydrogen_value()->representation().IsSmi()) {
- __ orp(kScratchRegister, ToOperand(right));
- } else {
- __ orl(kScratchRegister, ToOperand(right));
- }
- DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
- } else {
- // Test the non-zero operand for negative sign.
- if (instr->hydrogen_value()->representation().IsSmi()) {
- __ orp(kScratchRegister, ToRegister(right));
- } else {
- __ orl(kScratchRegister, ToRegister(right));
- }
- DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
- }
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoBitI(LBitI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- DCHECK(left->Equals(instr->result()));
- DCHECK(left->IsRegister());
-
- if (right->IsConstantOperand()) {
- int32_t right_operand =
- ToRepresentation(LConstantOperand::cast(right),
- instr->hydrogen()->right()->representation());
- switch (instr->op()) {
- case Token::BIT_AND:
- __ andl(ToRegister(left), Immediate(right_operand));
- break;
- case Token::BIT_OR:
- __ orl(ToRegister(left), Immediate(right_operand));
- break;
- case Token::BIT_XOR:
- if (right_operand == int32_t(~0)) {
- __ notl(ToRegister(left));
- } else {
- __ xorl(ToRegister(left), Immediate(right_operand));
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else if (right->IsStackSlot()) {
- switch (instr->op()) {
- case Token::BIT_AND:
- if (instr->IsInteger32()) {
- __ andl(ToRegister(left), ToOperand(right));
- } else {
- __ andp(ToRegister(left), ToOperand(right));
- }
- break;
- case Token::BIT_OR:
- if (instr->IsInteger32()) {
- __ orl(ToRegister(left), ToOperand(right));
- } else {
- __ orp(ToRegister(left), ToOperand(right));
- }
- break;
- case Token::BIT_XOR:
- if (instr->IsInteger32()) {
- __ xorl(ToRegister(left), ToOperand(right));
- } else {
- __ xorp(ToRegister(left), ToOperand(right));
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- DCHECK(right->IsRegister());
- switch (instr->op()) {
- case Token::BIT_AND:
- if (instr->IsInteger32()) {
- __ andl(ToRegister(left), ToRegister(right));
- } else {
- __ andp(ToRegister(left), ToRegister(right));
- }
- break;
- case Token::BIT_OR:
- if (instr->IsInteger32()) {
- __ orl(ToRegister(left), ToRegister(right));
- } else {
- __ orp(ToRegister(left), ToRegister(right));
- }
- break;
- case Token::BIT_XOR:
- if (instr->IsInteger32()) {
- __ xorl(ToRegister(left), ToRegister(right));
- } else {
- __ xorp(ToRegister(left), ToRegister(right));
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoShiftI(LShiftI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- DCHECK(left->Equals(instr->result()));
- DCHECK(left->IsRegister());
- if (right->IsRegister()) {
- DCHECK(ToRegister(right).is(rcx));
-
- switch (instr->op()) {
- case Token::ROR:
- __ rorl_cl(ToRegister(left));
- break;
- case Token::SAR:
- __ sarl_cl(ToRegister(left));
- break;
- case Token::SHR:
- __ shrl_cl(ToRegister(left));
- if (instr->can_deopt()) {
- __ testl(ToRegister(left), ToRegister(left));
- DeoptimizeIf(negative, instr, DeoptimizeReason::kNegativeValue);
- }
- break;
- case Token::SHL:
- __ shll_cl(ToRegister(left));
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- int32_t value = ToInteger32(LConstantOperand::cast(right));
- uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
- switch (instr->op()) {
- case Token::ROR:
- if (shift_count != 0) {
- __ rorl(ToRegister(left), Immediate(shift_count));
- }
- break;
- case Token::SAR:
- if (shift_count != 0) {
- __ sarl(ToRegister(left), Immediate(shift_count));
- }
- break;
- case Token::SHR:
- if (shift_count != 0) {
- __ shrl(ToRegister(left), Immediate(shift_count));
- } else if (instr->can_deopt()) {
- __ testl(ToRegister(left), ToRegister(left));
- DeoptimizeIf(negative, instr, DeoptimizeReason::kNegativeValue);
- }
- break;
- case Token::SHL:
- if (shift_count != 0) {
- if (instr->hydrogen_value()->representation().IsSmi()) {
- if (SmiValuesAre32Bits()) {
- __ shlp(ToRegister(left), Immediate(shift_count));
- } else {
- DCHECK(SmiValuesAre31Bits());
- if (instr->can_deopt()) {
- if (shift_count != 1) {
- __ shll(ToRegister(left), Immediate(shift_count - 1));
- }
- __ Integer32ToSmi(ToRegister(left), ToRegister(left));
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
- } else {
- __ shll(ToRegister(left), Immediate(shift_count));
- }
- }
- } else {
- __ shll(ToRegister(left), Immediate(shift_count));
- }
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoSubI(LSubI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- DCHECK(left->Equals(instr->result()));
-
- if (right->IsConstantOperand()) {
- int32_t right_operand =
- ToRepresentation(LConstantOperand::cast(right),
- instr->hydrogen()->right()->representation());
- __ subl(ToRegister(left), Immediate(right_operand));
- } else if (right->IsRegister()) {
- if (instr->hydrogen_value()->representation().IsSmi()) {
- __ subp(ToRegister(left), ToRegister(right));
- } else {
- __ subl(ToRegister(left), ToRegister(right));
- }
- } else {
- if (instr->hydrogen_value()->representation().IsSmi()) {
- __ subp(ToRegister(left), ToOperand(right));
- } else {
- __ subl(ToRegister(left), ToOperand(right));
- }
- }
-
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
- }
-}
-
-
-void LCodeGen::DoConstantI(LConstantI* instr) {
- Register dst = ToRegister(instr->result());
- if (instr->value() == 0) {
- __ xorl(dst, dst);
- } else {
- __ movl(dst, Immediate(instr->value()));
- }
-}
-
-
-void LCodeGen::DoConstantS(LConstantS* instr) {
- __ Move(ToRegister(instr->result()), instr->value());
-}
-
-
-void LCodeGen::DoConstantD(LConstantD* instr) {
- __ Move(ToDoubleRegister(instr->result()), instr->bits());
-}
-
-
-void LCodeGen::DoConstantE(LConstantE* instr) {
- __ LoadAddress(ToRegister(instr->result()), instr->value());
-}
-
-
-void LCodeGen::DoConstantT(LConstantT* instr) {
- Handle<Object> object = instr->value(isolate());
- AllowDeferredHandleDereference smi_check;
- __ Move(ToRegister(instr->result()), object);
-}
-
-
-Operand LCodeGen::BuildSeqStringOperand(Register string,
- LOperand* index,
- String::Encoding encoding) {
- if (index->IsConstantOperand()) {
- int offset = ToInteger32(LConstantOperand::cast(index));
- if (encoding == String::TWO_BYTE_ENCODING) {
- offset *= kUC16Size;
- }
- STATIC_ASSERT(kCharSize == 1);
- return FieldOperand(string, SeqString::kHeaderSize + offset);
- }
- return FieldOperand(
- string, ToRegister(index),
- encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
- SeqString::kHeaderSize);
-}
-
-
-void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
- String::Encoding encoding = instr->hydrogen()->encoding();
- Register result = ToRegister(instr->result());
- Register string = ToRegister(instr->string());
-
- if (FLAG_debug_code) {
- __ Push(string);
- __ movp(string, FieldOperand(string, HeapObject::kMapOffset));
- __ movzxbp(string, FieldOperand(string, Map::kInstanceTypeOffset));
-
- __ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ cmpp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type));
- __ Check(equal, kUnexpectedStringType);
- __ Pop(string);
- }
-
- Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ movzxbl(result, operand);
- } else {
- __ movzxwl(result, operand);
- }
-}
-
-
-void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
- String::Encoding encoding = instr->hydrogen()->encoding();
- Register string = ToRegister(instr->string());
-
- if (FLAG_debug_code) {
- Register value = ToRegister(instr->value());
- Register index = ToRegister(instr->index());
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- int encoding_mask =
- instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type;
- __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
- }
-
- Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
- if (instr->value()->IsConstantOperand()) {
- int value = ToInteger32(LConstantOperand::cast(instr->value()));
- DCHECK_LE(0, value);
- if (encoding == String::ONE_BYTE_ENCODING) {
- DCHECK_LE(value, String::kMaxOneByteCharCode);
- __ movb(operand, Immediate(value));
- } else {
- DCHECK_LE(value, String::kMaxUtf16CodeUnit);
- __ movw(operand, Immediate(value));
- }
- } else {
- Register value = ToRegister(instr->value());
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ movb(operand, value);
- } else {
- __ movw(operand, value);
- }
- }
-}
-
-
-void LCodeGen::DoAddI(LAddI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
-
- Representation target_rep = instr->hydrogen()->representation();
- bool is_p = target_rep.IsSmi() || target_rep.IsExternal();
-
- if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
- if (right->IsConstantOperand()) {
- // No support for smi-immediates for 32-bit SMI.
- DCHECK(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
- int32_t offset =
- ToRepresentation(LConstantOperand::cast(right),
- instr->hydrogen()->right()->representation());
- if (is_p) {
- __ leap(ToRegister(instr->result()),
- MemOperand(ToRegister(left), offset));
- } else {
- __ leal(ToRegister(instr->result()),
- MemOperand(ToRegister(left), offset));
- }
- } else {
- Operand address(ToRegister(left), ToRegister(right), times_1, 0);
- if (is_p) {
- __ leap(ToRegister(instr->result()), address);
- } else {
- __ leal(ToRegister(instr->result()), address);
- }
- }
- } else {
- if (right->IsConstantOperand()) {
- // No support for smi-immediates for 32-bit SMI.
- DCHECK(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
- int32_t right_operand =
- ToRepresentation(LConstantOperand::cast(right),
- instr->hydrogen()->right()->representation());
- if (is_p) {
- __ addp(ToRegister(left), Immediate(right_operand));
- } else {
- __ addl(ToRegister(left), Immediate(right_operand));
- }
- } else if (right->IsRegister()) {
- if (is_p) {
- __ addp(ToRegister(left), ToRegister(right));
- } else {
- __ addl(ToRegister(left), ToRegister(right));
- }
- } else {
- if (is_p) {
- __ addp(ToRegister(left), ToOperand(right));
- } else {
- __ addl(ToRegister(left), ToOperand(right));
- }
- }
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
- }
- }
-}
-
-
-void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- DCHECK(left->Equals(instr->result()));
- HMathMinMax::Operation operation = instr->hydrogen()->operation();
- if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
- Label return_left;
- Condition condition = (operation == HMathMinMax::kMathMin)
- ? less_equal
- : greater_equal;
- Register left_reg = ToRegister(left);
- if (right->IsConstantOperand()) {
- Immediate right_imm = Immediate(
- ToRepresentation(LConstantOperand::cast(right),
- instr->hydrogen()->right()->representation()));
- DCHECK(SmiValuesAre32Bits()
- ? !instr->hydrogen()->representation().IsSmi()
- : SmiValuesAre31Bits());
- __ cmpl(left_reg, right_imm);
- __ j(condition, &return_left, Label::kNear);
- __ movl(left_reg, right_imm);
- } else if (right->IsRegister()) {
- Register right_reg = ToRegister(right);
- if (instr->hydrogen_value()->representation().IsSmi()) {
- __ cmpp(left_reg, right_reg);
- } else {
- __ cmpl(left_reg, right_reg);
- }
- __ j(condition, &return_left, Label::kNear);
- __ movp(left_reg, right_reg);
- } else {
- Operand right_op = ToOperand(right);
- if (instr->hydrogen_value()->representation().IsSmi()) {
- __ cmpp(left_reg, right_op);
- } else {
- __ cmpl(left_reg, right_op);
- }
- __ j(condition, &return_left, Label::kNear);
- __ movp(left_reg, right_op);
- }
- __ bind(&return_left);
- } else {
- DCHECK(instr->hydrogen()->representation().IsDouble());
- Label not_nan, distinct, return_left, return_right;
- Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
- XMMRegister left_reg = ToDoubleRegister(left);
- XMMRegister right_reg = ToDoubleRegister(right);
- __ Ucomisd(left_reg, right_reg);
- __ j(parity_odd, &not_nan, Label::kNear); // Both are not NaN.
-
- // One of the numbers is NaN. Find which one and return it.
- __ Ucomisd(left_reg, left_reg);
- __ j(parity_even, &return_left, Label::kNear); // left is NaN.
- __ jmp(&return_right, Label::kNear); // right is NaN.
-
- __ bind(&not_nan);
- __ j(not_equal, &distinct, Label::kNear); // left != right.
-
- // left == right
- XMMRegister xmm_scratch = double_scratch0();
- __ Xorpd(xmm_scratch, xmm_scratch);
- __ Ucomisd(left_reg, xmm_scratch);
- __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
-
- // At this point, both left and right are either +0 or -0.
- if (operation == HMathMinMax::kMathMin) {
- __ Orpd(left_reg, right_reg);
- } else {
- __ Andpd(left_reg, right_reg);
- }
- __ jmp(&return_left, Label::kNear);
-
- __ bind(&distinct);
- __ j(condition, &return_left, Label::kNear);
-
- __ bind(&return_right);
- __ Movapd(left_reg, right_reg);
-
- __ bind(&return_left);
- }
-}
-
-
-void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- XMMRegister left = ToDoubleRegister(instr->left());
- XMMRegister right = ToDoubleRegister(instr->right());
- XMMRegister result = ToDoubleRegister(instr->result());
- switch (instr->op()) {
- case Token::ADD:
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(masm(), AVX);
- __ vaddsd(result, left, right);
- } else {
- DCHECK(result.is(left));
- __ addsd(left, right);
- }
- break;
- case Token::SUB:
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(masm(), AVX);
- __ vsubsd(result, left, right);
- } else {
- DCHECK(result.is(left));
- __ subsd(left, right);
- }
- break;
- case Token::MUL:
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(masm(), AVX);
- __ vmulsd(result, left, right);
- } else {
- DCHECK(result.is(left));
- __ mulsd(left, right);
- }
- break;
- case Token::DIV:
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(masm(), AVX);
- __ vdivsd(result, left, right);
- } else {
- DCHECK(result.is(left));
- __ divsd(left, right);
- }
- // Don't delete this mov. It may improve performance on some CPUs,
- // when there is a (v)mulsd depending on the result
- __ Movapd(result, result);
- break;
- case Token::MOD: {
- DCHECK(left.is(xmm0));
- DCHECK(right.is(xmm1));
- DCHECK(result.is(xmm0));
- __ PrepareCallCFunction(2);
- __ CallCFunction(
- ExternalReference::mod_two_doubles_operation(isolate()), 2);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
- DCHECK(ToRegister(instr->context()).is(rsi));
- DCHECK(ToRegister(instr->left()).is(rdx));
- DCHECK(ToRegister(instr->right()).is(rax));
- DCHECK(ToRegister(instr->result()).is(rax));
-
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
- CallCode(code, RelocInfo::CODE_TARGET, instr);
-}
-
-
-template<class InstrType>
-void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
- int left_block = instr->TrueDestination(chunk_);
- int right_block = instr->FalseDestination(chunk_);
-
- int next_block = GetNextEmittedBlock();
-
- if (right_block == left_block || cc == no_condition) {
- EmitGoto(left_block);
- } else if (left_block == next_block) {
- __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
- } else if (right_block == next_block) {
- __ j(cc, chunk_->GetAssemblyLabel(left_block));
- } else {
- __ j(cc, chunk_->GetAssemblyLabel(left_block));
- if (cc != always) {
- __ jmp(chunk_->GetAssemblyLabel(right_block));
- }
- }
-}
-
-
-template <class InstrType>
-void LCodeGen::EmitTrueBranch(InstrType instr, Condition cc) {
- int true_block = instr->TrueDestination(chunk_);
- __ j(cc, chunk_->GetAssemblyLabel(true_block));
-}
-
-
-template <class InstrType>
-void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
- int false_block = instr->FalseDestination(chunk_);
- __ j(cc, chunk_->GetAssemblyLabel(false_block));
-}
-
-
-void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
- __ int3();
-}
-
-
-void LCodeGen::DoBranch(LBranch* instr) {
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsInteger32()) {
- DCHECK(!info()->IsStub());
- Register reg = ToRegister(instr->value());
- __ testl(reg, reg);
- EmitBranch(instr, not_zero);
- } else if (r.IsSmi()) {
- DCHECK(!info()->IsStub());
- Register reg = ToRegister(instr->value());
- __ testp(reg, reg);
- EmitBranch(instr, not_zero);
- } else if (r.IsDouble()) {
- DCHECK(!info()->IsStub());
- XMMRegister reg = ToDoubleRegister(instr->value());
- XMMRegister xmm_scratch = double_scratch0();
- __ Xorpd(xmm_scratch, xmm_scratch);
- __ Ucomisd(reg, xmm_scratch);
- EmitBranch(instr, not_equal);
- } else {
- DCHECK(r.IsTagged());
- Register reg = ToRegister(instr->value());
- HType type = instr->hydrogen()->value()->type();
- if (type.IsBoolean()) {
- DCHECK(!info()->IsStub());
- __ CompareRoot(reg, Heap::kTrueValueRootIndex);
- EmitBranch(instr, equal);
- } else if (type.IsSmi()) {
- DCHECK(!info()->IsStub());
- __ SmiCompare(reg, Smi::kZero);
- EmitBranch(instr, not_equal);
- } else if (type.IsJSArray()) {
- DCHECK(!info()->IsStub());
- EmitBranch(instr, no_condition);
- } else if (type.IsHeapNumber()) {
- DCHECK(!info()->IsStub());
- XMMRegister xmm_scratch = double_scratch0();
- __ Xorpd(xmm_scratch, xmm_scratch);
- __ Ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
- EmitBranch(instr, not_equal);
- } else if (type.IsString()) {
- DCHECK(!info()->IsStub());
- __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
- EmitBranch(instr, not_equal);
- } else {
- ToBooleanHints expected = instr->hydrogen()->expected_input_types();
- // Avoid deopts in the case where we've never executed this path before.
- if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
-
- if (expected & ToBooleanHint::kUndefined) {
- // undefined -> false.
- __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
- __ j(equal, instr->FalseLabel(chunk_));
- }
- if (expected & ToBooleanHint::kBoolean) {
- // true -> true.
- __ CompareRoot(reg, Heap::kTrueValueRootIndex);
- __ j(equal, instr->TrueLabel(chunk_));
- // false -> false.
- __ CompareRoot(reg, Heap::kFalseValueRootIndex);
- __ j(equal, instr->FalseLabel(chunk_));
- }
- if (expected & ToBooleanHint::kNull) {
- // 'null' -> false.
- __ CompareRoot(reg, Heap::kNullValueRootIndex);
- __ j(equal, instr->FalseLabel(chunk_));
- }
-
- if (expected & ToBooleanHint::kSmallInteger) {
- // Smis: 0 -> false, all other -> true.
- __ Cmp(reg, Smi::kZero);
- __ j(equal, instr->FalseLabel(chunk_));
- __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
- } else if (expected & ToBooleanHint::kNeedsMap) {
- // If we need a map later and have a Smi -> deopt.
- __ testb(reg, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr, DeoptimizeReason::kSmi);
- }
-
- const Register map = kScratchRegister;
- if (expected & ToBooleanHint::kNeedsMap) {
- __ movp(map, FieldOperand(reg, HeapObject::kMapOffset));
-
- if (expected & ToBooleanHint::kCanBeUndetectable) {
- // Undetectable -> false.
- __ testb(FieldOperand(map, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, instr->FalseLabel(chunk_));
- }
- }
-
- if (expected & ToBooleanHint::kReceiver) {
- // spec object -> true.
- __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
- __ j(above_equal, instr->TrueLabel(chunk_));
- }
-
- if (expected & ToBooleanHint::kString) {
- // String value -> false iff empty.
- Label not_string;
- __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &not_string, Label::kNear);
- __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
- __ j(not_zero, instr->TrueLabel(chunk_));
- __ jmp(instr->FalseLabel(chunk_));
- __ bind(&not_string);
- }
-
- if (expected & ToBooleanHint::kSymbol) {
- // Symbol value -> true.
- __ CmpInstanceType(map, SYMBOL_TYPE);
- __ j(equal, instr->TrueLabel(chunk_));
- }
-
- if (expected & ToBooleanHint::kHeapNumber) {
- // heap number -> false iff +0, -0, or NaN.
- Label not_heap_number;
- __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &not_heap_number, Label::kNear);
- XMMRegister xmm_scratch = double_scratch0();
- __ Xorpd(xmm_scratch, xmm_scratch);
- __ Ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
- __ j(zero, instr->FalseLabel(chunk_));
- __ jmp(instr->TrueLabel(chunk_));
- __ bind(&not_heap_number);
- }
-
- if (expected != ToBooleanHint::kAny) {
- // We've seen something for the first time -> deopt.
- // This can only happen if we are not generic already.
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kUnexpectedObject);
- }
- }
- }
-}
-
-
-void LCodeGen::EmitGoto(int block) {
- if (!IsNextEmittedBlock(block)) {
- __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block)));
- }
-}
-
-
-void LCodeGen::DoGoto(LGoto* instr) {
- EmitGoto(instr->block_id());
-}
-
-
-inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
- Condition cond = no_condition;
- switch (op) {
- case Token::EQ:
- case Token::EQ_STRICT:
- cond = equal;
- break;
- case Token::NE:
- case Token::NE_STRICT:
- cond = not_equal;
- break;
- case Token::LT:
- cond = is_unsigned ? below : less;
- break;
- case Token::GT:
- cond = is_unsigned ? above : greater;
- break;
- case Token::LTE:
- cond = is_unsigned ? below_equal : less_equal;
- break;
- case Token::GTE:
- cond = is_unsigned ? above_equal : greater_equal;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
- return cond;
-}
-
-
-void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- bool is_unsigned =
- instr->is_double() ||
- instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
- instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
- Condition cc = TokenToCondition(instr->op(), is_unsigned);
-
- if (left->IsConstantOperand() && right->IsConstantOperand()) {
- // We can statically evaluate the comparison.
- double left_val = ToDouble(LConstantOperand::cast(left));
- double right_val = ToDouble(LConstantOperand::cast(right));
- int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
- ? instr->TrueDestination(chunk_)
- : instr->FalseDestination(chunk_);
- EmitGoto(next_block);
- } else {
- if (instr->is_double()) {
- // Don't base result on EFLAGS when a NaN is involved. Instead
- // jump to the false block.
- __ Ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
- __ j(parity_even, instr->FalseLabel(chunk_));
- } else {
- int32_t value;
- if (right->IsConstantOperand()) {
- value = ToInteger32(LConstantOperand::cast(right));
- if (instr->hydrogen_value()->representation().IsSmi()) {
- __ Cmp(ToRegister(left), Smi::FromInt(value));
- } else {
- __ cmpl(ToRegister(left), Immediate(value));
- }
- } else if (left->IsConstantOperand()) {
- value = ToInteger32(LConstantOperand::cast(left));
- if (instr->hydrogen_value()->representation().IsSmi()) {
- if (right->IsRegister()) {
- __ Cmp(ToRegister(right), Smi::FromInt(value));
- } else {
- __ Cmp(ToOperand(right), Smi::FromInt(value));
- }
- } else if (right->IsRegister()) {
- __ cmpl(ToRegister(right), Immediate(value));
- } else {
- __ cmpl(ToOperand(right), Immediate(value));
- }
- // We commuted the operands, so commute the condition.
- cc = CommuteCondition(cc);
- } else if (instr->hydrogen_value()->representation().IsSmi()) {
- if (right->IsRegister()) {
- __ cmpp(ToRegister(left), ToRegister(right));
- } else {
- __ cmpp(ToRegister(left), ToOperand(right));
- }
- } else {
- if (right->IsRegister()) {
- __ cmpl(ToRegister(left), ToRegister(right));
- } else {
- __ cmpl(ToRegister(left), ToOperand(right));
- }
- }
- }
- EmitBranch(instr, cc);
- }
-}
-
-
-void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
- Register left = ToRegister(instr->left());
-
- if (instr->right()->IsConstantOperand()) {
- Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
- __ Cmp(left, right);
- } else {
- Register right = ToRegister(instr->right());
- __ cmpp(left, right);
- }
- EmitBranch(instr, equal);
-}
-
-
-void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
- if (instr->hydrogen()->representation().IsTagged()) {
- Register input_reg = ToRegister(instr->object());
- __ Cmp(input_reg, factory()->the_hole_value());
- EmitBranch(instr, equal);
- return;
- }
-
- XMMRegister input_reg = ToDoubleRegister(instr->object());
- __ Ucomisd(input_reg, input_reg);
- EmitFalseBranch(instr, parity_odd);
-
- __ subp(rsp, Immediate(kDoubleSize));
- __ Movsd(MemOperand(rsp, 0), input_reg);
- __ addp(rsp, Immediate(kDoubleSize));
-
- int offset = sizeof(kHoleNanUpper32);
- __ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32));
- EmitBranch(instr, equal);
-}
-
-
-Condition LCodeGen::EmitIsString(Register input,
- Register temp1,
- Label* is_not_string,
- SmiCheck check_needed = INLINE_SMI_CHECK) {
- if (check_needed == INLINE_SMI_CHECK) {
- __ JumpIfSmi(input, is_not_string);
- }
-
- Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
-
- return cond;
-}
-
-
-void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- SmiCheck check_needed =
- instr->hydrogen()->value()->type().IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
-
- Condition true_cond = EmitIsString(
- reg, temp, instr->FalseLabel(chunk_), check_needed);
-
- EmitBranch(instr, true_cond);
-}
-
-
-void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
- Condition is_smi;
- if (instr->value()->IsRegister()) {
- Register input = ToRegister(instr->value());
- is_smi = masm()->CheckSmi(input);
- } else {
- Operand input = ToOperand(instr->value());
- is_smi = masm()->CheckSmi(input);
- }
- EmitBranch(instr, is_smi);
-}
-
-
-void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- __ JumpIfSmi(input, instr->FalseLabel(chunk_));
- }
- __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
- __ testb(FieldOperand(temp, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- EmitBranch(instr, not_zero);
-}
-
-
-void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
- DCHECK(ToRegister(instr->context()).is(rsi));
- DCHECK(ToRegister(instr->left()).is(rdx));
- DCHECK(ToRegister(instr->right()).is(rax));
-
- Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
- CallCode(code, RelocInfo::CODE_TARGET, instr);
- __ CompareRoot(rax, Heap::kTrueValueRootIndex);
- EmitBranch(instr, equal);
-}
-
-
-static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == FIRST_TYPE) return to;
- DCHECK(from == to || to == LAST_TYPE);
- return from;
-}
-
-
-static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == to) return equal;
- if (to == LAST_TYPE) return above_equal;
- if (from == FIRST_TYPE) return below_equal;
- UNREACHABLE();
- return equal;
-}
-
-
-void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
- Register input = ToRegister(instr->value());
-
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- __ JumpIfSmi(input, instr->FalseLabel(chunk_));
- }
-
- __ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister);
- EmitBranch(instr, BranchCondition(instr->hydrogen()));
-}
-
-// Branches to a label or falls through with the answer in the z flag.
-// Trashes the temp register.
-void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
- Handle<String> class_name, Register input,
- Register temp, Register temp2) {
- DCHECK(!input.is(temp));
- DCHECK(!input.is(temp2));
- DCHECK(!temp.is(temp2));
-
- __ JumpIfSmi(input, is_false);
-
- __ CmpObjectType(input, FIRST_FUNCTION_TYPE, temp);
- STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
- if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
- __ j(above_equal, is_true);
- } else {
- __ j(above_equal, is_false);
- }
-
- // Check if the constructor in the map is a function.
- __ GetMapConstructor(temp, temp, kScratchRegister);
-
- // Objects with a non-function constructor have class 'Object'.
- __ CmpInstanceType(kScratchRegister, JS_FUNCTION_TYPE);
- if (String::Equals(class_name, isolate()->factory()->Object_string())) {
- __ j(not_equal, is_true);
- } else {
- __ j(not_equal, is_false);
- }
-
- // temp now contains the constructor function. Grab the
- // instance class name from there.
- __ movp(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
- __ movp(temp,
- FieldOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset));
- // The class name we are testing against is internalized since it's a literal.
- // The name in the constructor is internalized because of the way the context
- // is booted. This routine isn't expected to work for random API-created
- // classes and it doesn't have to because you can't access it with natives
- // syntax. Since both sides are internalized it is sufficient to use an
- // identity comparison.
- DCHECK(class_name->IsInternalizedString());
- __ Cmp(temp, class_name);
- // End with the answer in the z flag.
-}
-
-void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
- Register temp2 = ToRegister(instr->temp2());
- Handle<String> class_name = instr->hydrogen()->class_name();
-
- EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
- class_name, input, temp, temp2);
-
- EmitBranch(instr, equal);
-}
-
-void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
- Register reg = ToRegister(instr->value());
-
- __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
- EmitBranch(instr, equal);
-}
-
-
-void LCodeGen::DoHasInPrototypeChainAndBranch(
- LHasInPrototypeChainAndBranch* instr) {
- Register const object = ToRegister(instr->object());
- Register const object_map = kScratchRegister;
- Register const object_prototype = object_map;
- Register const prototype = ToRegister(instr->prototype());
-
- // The {object} must be a spec object. It's sufficient to know that {object}
- // is not a smi, since all other non-spec objects have {null} prototypes and
- // will be ruled out below.
- if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
- Condition is_smi = __ CheckSmi(object);
- EmitFalseBranch(instr, is_smi);
- }
-
- // Loop through the {object}s prototype chain looking for the {prototype}.
- __ movp(object_map, FieldOperand(object, HeapObject::kMapOffset));
- Label loop;
- __ bind(&loop);
-
- // Deoptimize if the object needs to be access checked.
- __ testb(FieldOperand(object_map, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsAccessCheckNeeded));
- DeoptimizeIf(not_zero, instr, DeoptimizeReason::kAccessCheck);
- // Deoptimize for proxies.
- __ CmpInstanceType(object_map, JS_PROXY_TYPE);
- DeoptimizeIf(equal, instr, DeoptimizeReason::kProxy);
-
- __ movp(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
- __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
- EmitFalseBranch(instr, equal);
- __ cmpp(object_prototype, prototype);
- EmitTrueBranch(instr, equal);
- __ movp(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset));
- __ jmp(&loop);
-}
-
-
-void LCodeGen::DoCmpT(LCmpT* instr) {
- DCHECK(ToRegister(instr->context()).is(rsi));
- Token::Value op = instr->op();
-
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-
- Condition condition = TokenToCondition(op, false);
- Label true_value, done;
- __ testp(rax, rax);
- __ j(condition, &true_value, Label::kNear);
- __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
- __ jmp(&done, Label::kNear);
- __ bind(&true_value);
- __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace && info()->IsOptimizing()) {
- // Preserve the return value on the stack and rely on the runtime call
- // to return the value in the same register. We're leaving the code
- // managed by the register allocator and tearing down the frame, it's
- // safe to write to the context register.
- __ Push(rax);
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kTraceExit);
- }
- if (info()->saves_caller_doubles()) {
- RestoreCallerDoubles();
- }
- if (NeedsEagerFrame()) {
- __ movp(rsp, rbp);
- __ popq(rbp);
- }
- if (instr->has_constant_parameter_count()) {
- __ Ret((ToInteger32(instr->constant_parameter_count()) + 1) * kPointerSize,
- rcx);
- } else {
- DCHECK(info()->IsStub()); // Functions would need to drop one more value.
- Register reg = ToRegister(instr->parameter_count());
- // The argument count parameter is a smi
- __ SmiToInteger32(reg, reg);
- Register return_addr_reg = reg.is(rcx) ? rbx : rcx;
- __ PopReturnAddressTo(return_addr_reg);
- __ shlp(reg, Immediate(kPointerSizeLog2));
- __ addp(rsp, reg);
- __ jmp(return_addr_reg);
- }
-}
-
-
-void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ movp(result, ContextOperand(context, instr->slot_index()));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
- } else {
- Label is_not_hole;
- __ j(not_equal, &is_not_hole, Label::kNear);
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ bind(&is_not_hole);
- }
- }
-}
-
-
-void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register value = ToRegister(instr->value());
-
- Operand target = ContextOperand(context, instr->slot_index());
-
- Label skip_assignment;
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ CompareRoot(target, Heap::kTheHoleValueRootIndex);
- if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
- } else {
- __ j(not_equal, &skip_assignment);
- }
- }
- __ movp(target, value);
-
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- SmiCheck check_needed =
- instr->hydrogen()->value()->type().IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- int offset = Context::SlotOffset(instr->slot_index());
- Register scratch = ToRegister(instr->temp());
- __ RecordWriteContextSlot(context,
- offset,
- value,
- scratch,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
-
- __ bind(&skip_assignment);
-}
-
-
-void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- HObjectAccess access = instr->hydrogen()->access();
- int offset = access.offset();
-
- if (access.IsExternalMemory()) {
- Register result = ToRegister(instr->result());
- if (instr->object()->IsConstantOperand()) {
- DCHECK(result.is(rax));
- __ load_rax(ToExternalReference(LConstantOperand::cast(instr->object())));
- } else {
- Register object = ToRegister(instr->object());
- __ Load(result, MemOperand(object, offset), access.representation());
- }
- return;
- }
-
- Register object = ToRegister(instr->object());
- if (instr->hydrogen()->representation().IsDouble()) {
- DCHECK(access.IsInobject());
- XMMRegister result = ToDoubleRegister(instr->result());
- __ Movsd(result, FieldOperand(object, offset));
- return;
- }
-
- Register result = ToRegister(instr->result());
- if (!access.IsInobject()) {
- __ movp(result, FieldOperand(object, JSObject::kPropertiesOffset));
- object = result;
- }
-
- Representation representation = access.representation();
- if (representation.IsSmi() && SmiValuesAre32Bits() &&
- instr->hydrogen()->representation().IsInteger32()) {
- if (FLAG_debug_code) {
- Register scratch = kScratchRegister;
- __ Load(scratch, FieldOperand(object, offset), representation);
- __ AssertSmi(scratch);
- }
-
- // Read int value directly from upper half of the smi.
- STATIC_ASSERT(kSmiTag == 0);
- DCHECK(kSmiTagSize + kSmiShiftSize == 32);
- offset += kPointerSize / 2;
- representation = Representation::Integer32();
- }
- __ Load(result, FieldOperand(object, offset), representation);
-}
-
-
-void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
- Register function = ToRegister(instr->function());
- Register result = ToRegister(instr->result());
-
- // Get the prototype or initial map from the function.
- __ movp(result,
- FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Check that the function has a prototype or an initial map.
- __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
-
- // If the function does not have an initial map, we're done.
- Label done;
- __ CmpObjectType(result, MAP_TYPE, kScratchRegister);
- __ j(not_equal, &done, Label::kNear);
-
- // Get the prototype from the initial map.
- __ movp(result, FieldOperand(result, Map::kPrototypeOffset));
-
- // All done.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
- Register result = ToRegister(instr->result());
- __ LoadRoot(result, instr->index());
-}
-
-
-void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
- Register arguments = ToRegister(instr->arguments());
- Register result = ToRegister(instr->result());
-
- if (instr->length()->IsConstantOperand() &&
- instr->index()->IsConstantOperand()) {
- int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- int32_t const_length = ToInteger32(LConstantOperand::cast(instr->length()));
- if (const_index >= 0 && const_index < const_length) {
- StackArgumentsAccessor args(arguments, const_length,
- ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movp(result, args.GetArgumentOperand(const_index));
- } else if (FLAG_debug_code) {
- __ int3();
- }
- } else {
- Register length = ToRegister(instr->length());
- // There are two words between the frame pointer and the last argument.
- // Subtracting from length accounts for one of them add one more.
- if (instr->index()->IsRegister()) {
- __ subl(length, ToRegister(instr->index()));
- } else {
- __ subl(length, ToOperand(instr->index()));
- }
- StackArgumentsAccessor args(arguments, length,
- ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movp(result, args.GetArgumentOperand(0));
- }
-}
-
-
-void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- LOperand* key = instr->key();
- if (kPointerSize == kInt32Size && !key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- Representation key_representation =
- instr->hydrogen()->key()->representation();
- if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
- __ SmiToInteger64(key_reg, key_reg);
- } else if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
- Operand operand(BuildFastArrayOperand(
- instr->elements(),
- key,
- instr->hydrogen()->key()->representation(),
- elements_kind,
- instr->base_offset()));
-
- if (elements_kind == FLOAT32_ELEMENTS) {
- XMMRegister result(ToDoubleRegister(instr->result()));
- __ Cvtss2sd(result, operand);
- } else if (elements_kind == FLOAT64_ELEMENTS) {
- __ Movsd(ToDoubleRegister(instr->result()), operand);
- } else {
- Register result(ToRegister(instr->result()));
- switch (elements_kind) {
- case INT8_ELEMENTS:
- __ movsxbl(result, operand);
- break;
- case UINT8_ELEMENTS:
- case UINT8_CLAMPED_ELEMENTS:
- __ movzxbl(result, operand);
- break;
- case INT16_ELEMENTS:
- __ movsxwl(result, operand);
- break;
- case UINT16_ELEMENTS:
- __ movzxwl(result, operand);
- break;
- case INT32_ELEMENTS:
- __ movl(result, operand);
- break;
- case UINT32_ELEMENTS:
- __ movl(result, operand);
- if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- __ testl(result, result);
- DeoptimizeIf(negative, instr, DeoptimizeReason::kNegativeValue);
- }
- break;
- case FLOAT32_ELEMENTS:
- case FLOAT64_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
- case FAST_STRING_WRAPPER_ELEMENTS:
- case SLOW_STRING_WRAPPER_ELEMENTS:
- case NO_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
- XMMRegister result(ToDoubleRegister(instr->result()));
- LOperand* key = instr->key();
- if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
- instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(ToRegister(key), ToRegister(key));
- }
- if (instr->hydrogen()->RequiresHoleCheck()) {
- Operand hole_check_operand = BuildFastArrayOperand(
- instr->elements(),
- key,
- instr->hydrogen()->key()->representation(),
- FAST_DOUBLE_ELEMENTS,
- instr->base_offset() + sizeof(kHoleNanLower32));
- __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
- DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
- }
-
- Operand double_load_operand = BuildFastArrayOperand(
- instr->elements(),
- key,
- instr->hydrogen()->key()->representation(),
- FAST_DOUBLE_ELEMENTS,
- instr->base_offset());
- __ Movsd(result, double_load_operand);
-}
-
-
-void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
- HLoadKeyed* hinstr = instr->hydrogen();
- Register result = ToRegister(instr->result());
- LOperand* key = instr->key();
- bool requires_hole_check = hinstr->RequiresHoleCheck();
- Representation representation = hinstr->representation();
- int offset = instr->base_offset();
-
- if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
- instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(ToRegister(key), ToRegister(key));
- }
- if (representation.IsInteger32() && SmiValuesAre32Bits() &&
- hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
- DCHECK(!requires_hole_check);
- if (FLAG_debug_code) {
- Register scratch = kScratchRegister;
- __ Load(scratch,
- BuildFastArrayOperand(instr->elements(),
- key,
- instr->hydrogen()->key()->representation(),
- FAST_ELEMENTS,
- offset),
- Representation::Smi());
- __ AssertSmi(scratch);
- }
- // Read int value directly from upper half of the smi.
- STATIC_ASSERT(kSmiTag == 0);
- DCHECK(kSmiTagSize + kSmiShiftSize == 32);
- offset += kPointerSize / 2;
- }
-
- __ Load(result,
- BuildFastArrayOperand(instr->elements(), key,
- instr->hydrogen()->key()->representation(),
- FAST_ELEMENTS, offset),
- representation);
-
- // Check for the hole value.
- if (requires_hole_check) {
- if (IsFastSmiElementsKind(hinstr->elements_kind())) {
- Condition smi = __ CheckSmi(result);
- DeoptimizeIf(NegateCondition(smi), instr, DeoptimizeReason::kNotASmi);
- } else {
- __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
- }
- } else if (hinstr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
- DCHECK(hinstr->elements_kind() == FAST_HOLEY_ELEMENTS);
- Label done;
- __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &done);
- if (info()->IsStub()) {
- // A stub can safely convert the hole to undefined only if the array
- // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
- // it needs to bail out.
- __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
- __ Cmp(FieldOperand(result, PropertyCell::kValueOffset),
- Smi::FromInt(Isolate::kProtectorValid));
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kHole);
- }
- __ Move(result, isolate()->factory()->undefined_value());
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_fixed_typed_array()) {
- DoLoadKeyedExternalArray(instr);
- } else if (instr->hydrogen()->representation().IsDouble()) {
- DoLoadKeyedFixedDoubleArray(instr);
- } else {
- DoLoadKeyedFixedArray(instr);
- }
-}
-
-
-Operand LCodeGen::BuildFastArrayOperand(
- LOperand* elements_pointer,
- LOperand* key,
- Representation key_representation,
- ElementsKind elements_kind,
- uint32_t offset) {
- Register elements_pointer_reg = ToRegister(elements_pointer);
- int shift_size = ElementsKindToShiftSize(elements_kind);
- if (key->IsConstantOperand()) {
- int32_t constant_value = ToInteger32(LConstantOperand::cast(key));
- if (constant_value & 0xF0000000) {
- Abort(kArrayIndexConstantValueTooBig);
- }
- return Operand(elements_pointer_reg,
- (constant_value << shift_size) + offset);
- } else {
- // Guaranteed by ArrayInstructionInterface::KeyedAccessIndexRequirement().
- DCHECK(key_representation.IsInteger32());
-
- ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
- return Operand(elements_pointer_reg,
- ToRegister(key),
- scale_factor,
- offset);
- }
-}
-
-
-void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
- Register result = ToRegister(instr->result());
-
- if (instr->hydrogen()->from_inlined()) {
- __ leap(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize));
- } else if (instr->hydrogen()->arguments_adaptor()) {
- // Check for arguments adapter frame.
- Label done, adapted;
- __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ cmpp(Operand(result, CommonFrameConstants::kContextOrFrameTypeOffset),
- Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adapted, Label::kNear);
-
- // No arguments adaptor frame.
- __ movp(result, rbp);
- __ jmp(&done, Label::kNear);
-
- // Arguments adaptor frame present.
- __ bind(&adapted);
- __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-
- // Result is the frame pointer for the frame if not adapted and for the real
- // frame below the adaptor frame if adapted.
- __ bind(&done);
- } else {
- __ movp(result, rbp);
- }
-}
-
-
-void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
- Register result = ToRegister(instr->result());
-
- Label done;
-
- // If no arguments adaptor frame the number of arguments is fixed.
- if (instr->elements()->IsRegister()) {
- __ cmpp(rbp, ToRegister(instr->elements()));
- } else {
- __ cmpp(rbp, ToOperand(instr->elements()));
- }
- __ movl(result, Immediate(scope()->num_parameters()));
- __ j(equal, &done, Label::kNear);
-
- // Arguments adaptor frame present. Get argument length from there.
- __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ SmiToInteger32(result,
- Operand(result,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- // Argument length is in result register.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
-
- // If the receiver is null or undefined, we have to pass the global
- // object as a receiver to normal functions. Values have to be
- // passed unchanged to builtins and strict-mode functions.
- Label global_object, receiver_ok;
- Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
-
- if (!instr->hydrogen()->known_function()) {
- // Do not transform the receiver to object for strict mode
- // functions.
- __ movp(kScratchRegister,
- FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ testb(FieldOperand(kScratchRegister,
- SharedFunctionInfo::kStrictModeByteOffset),
- Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
- __ j(not_equal, &receiver_ok, dist);
-
- // Do not transform the receiver to object for builtins.
- __ testb(FieldOperand(kScratchRegister,
- SharedFunctionInfo::kNativeByteOffset),
- Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
- __ j(not_equal, &receiver_ok, dist);
- }
-
- // Normal function. Replace undefined or null with global receiver.
- __ CompareRoot(receiver, Heap::kNullValueRootIndex);
- __ j(equal, &global_object, dist);
- __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
- __ j(equal, &global_object, dist);
-
- // The receiver should be a JS object.
- Condition is_smi = __ CheckSmi(receiver);
- DeoptimizeIf(is_smi, instr, DeoptimizeReason::kSmi);
- __ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, kScratchRegister);
- DeoptimizeIf(below, instr, DeoptimizeReason::kNotAJavaScriptObject);
-
- __ jmp(&receiver_ok, dist);
- __ bind(&global_object);
- __ movp(receiver, FieldOperand(function, JSFunction::kContextOffset));
- __ movp(receiver, ContextOperand(receiver, Context::NATIVE_CONTEXT_INDEX));
- __ movp(receiver, ContextOperand(receiver, Context::GLOBAL_PROXY_INDEX));
-
- __ bind(&receiver_ok);
-}
-
-
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
- Register length = ToRegister(instr->length());
- Register elements = ToRegister(instr->elements());
- DCHECK(receiver.is(rax)); // Used for parameter count.
- DCHECK(function.is(rdi)); // Required by InvokeFunction.
- DCHECK(ToRegister(instr->result()).is(rax));
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- const uint32_t kArgumentsLimit = 1 * KB;
- __ cmpp(length, Immediate(kArgumentsLimit));
- DeoptimizeIf(above, instr, DeoptimizeReason::kTooManyArguments);
-
- __ Push(receiver);
- __ movp(receiver, length);
-
- // Loop through the arguments pushing them onto the execution
- // stack.
- Label invoke, loop;
- // length is a small non-negative integer, due to the test above.
- __ testl(length, length);
- __ j(zero, &invoke, Label::kNear);
- __ bind(&loop);
- StackArgumentsAccessor args(elements, length,
- ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ Push(args.GetArgumentOperand(0));
- __ decl(length);
- __ j(not_zero, &loop);
-
- // Invoke the function.
- __ bind(&invoke);
-
- InvokeFlag flag = CALL_FUNCTION;
- if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
- DCHECK(!info()->saves_caller_doubles());
- // TODO(ishell): drop current frame before pushing arguments to the stack.
- flag = JUMP_FUNCTION;
- ParameterCount actual(rax);
- // It is safe to use rbx, rcx and r8 as scratch registers here given that
- // 1) we are not going to return to caller function anyway,
- // 2) rbx (expected number of arguments) will be initialized below.
- PrepareForTailCall(actual, rbx, rcx, r8);
- }
-
- DCHECK(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount actual(rax);
- __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
-}
-
-
-void LCodeGen::DoPushArgument(LPushArgument* instr) {
- LOperand* argument = instr->value();
- EmitPushTaggedOperand(argument);
-}
-
-
-void LCodeGen::DoDrop(LDrop* instr) {
- __ Drop(instr->count());
-}
-
-
-void LCodeGen::DoThisFunction(LThisFunction* instr) {
- Register result = ToRegister(instr->result());
- __ movp(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
-}
-
-
-void LCodeGen::DoContext(LContext* instr) {
- Register result = ToRegister(instr->result());
- if (info()->IsOptimizing()) {
- __ movp(result, Operand(rbp, StandardFrameConstants::kContextOffset));
- } else {
- // If there is no frame, the context must be in rsi.
- DCHECK(result.is(rsi));
- }
-}
-
-
-void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
- DCHECK(ToRegister(instr->context()).is(rsi));
- __ Push(instr->hydrogen()->declarations());
- __ Push(Smi::FromInt(instr->hydrogen()->flags()));
- __ Push(instr->hydrogen()->feedback_vector());
- CallRuntime(Runtime::kDeclareGlobals, instr);
-}
-
-void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
- int formal_parameter_count, int arity,
- bool is_tail_call, LInstruction* instr) {
- bool dont_adapt_arguments =
- formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
- bool can_invoke_directly =
- dont_adapt_arguments || formal_parameter_count == arity;
-
- Register function_reg = rdi;
- LPointerMap* pointers = instr->pointer_map();
-
- if (can_invoke_directly) {
- // Change context.
- __ movp(rsi, FieldOperand(function_reg, JSFunction::kContextOffset));
-
- // Always initialize new target and number of actual arguments.
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ Set(rax, arity);
-
- bool is_self_call = function.is_identical_to(info()->closure());
-
- // Invoke function.
- if (is_self_call) {
- Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
- if (is_tail_call) {
- __ Jump(self, RelocInfo::CODE_TARGET);
- } else {
- __ Call(self, RelocInfo::CODE_TARGET);
- }
- } else {
- Operand target = FieldOperand(function_reg, JSFunction::kCodeEntryOffset);
- if (is_tail_call) {
- __ Jump(target);
- } else {
- __ Call(target);
- }
- }
-
- if (!is_tail_call) {
- // Set up deoptimization.
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
- }
- } else {
- // We need to adapt arguments.
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount actual(arity);
- ParameterCount expected(formal_parameter_count);
- InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
- __ InvokeFunction(function_reg, no_reg, expected, actual, flag, generator);
- }
-}
-
-
-void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
- DCHECK(ToRegister(instr->result()).is(rax));
-
- if (instr->hydrogen()->IsTailCall()) {
- if (NeedsEagerFrame()) __ leave();
-
- if (instr->target()->IsConstantOperand()) {
- LConstantOperand* target = LConstantOperand::cast(instr->target());
- Handle<Code> code = Handle<Code>::cast(ToHandle(target));
- __ jmp(code, RelocInfo::CODE_TARGET);
- } else {
- DCHECK(instr->target()->IsRegister());
- Register target = ToRegister(instr->target());
- __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(target);
- }
- } else {
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
-
- if (instr->target()->IsConstantOperand()) {
- LConstantOperand* target = LConstantOperand::cast(instr->target());
- Handle<Code> code = Handle<Code>::cast(ToHandle(target));
- generator.BeforeCall(__ CallSize(code));
- __ call(code, RelocInfo::CODE_TARGET);
- } else {
- DCHECK(instr->target()->IsRegister());
- Register target = ToRegister(instr->target());
- generator.BeforeCall(__ CallSize(target));
- __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ call(target);
- }
- generator.AfterCall();
- }
-}
-
-
-void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
- Register input_reg = ToRegister(instr->value());
- __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
-
- Label slow, allocated, done;
- uint32_t available_regs = rax.bit() | rcx.bit() | rdx.bit() | rbx.bit();
- available_regs &= ~input_reg.bit();
- if (instr->context()->IsRegister()) {
- // Make sure that the context isn't overwritten in the AllocateHeapNumber
- // macro below.
- available_regs &= ~ToRegister(instr->context()).bit();
- }
-
- Register tmp =
- Register::from_code(base::bits::CountTrailingZeros32(available_regs));
- available_regs &= ~tmp.bit();
- Register tmp2 =
- Register::from_code(base::bits::CountTrailingZeros32(available_regs));
-
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this);
-
- __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
- // Check the sign of the argument. If the argument is positive, just
- // return it. We do not need to patch the stack since |input| and
- // |result| are the same register and |input| will be restored
- // unchanged by popping safepoint registers.
- __ testl(tmp, Immediate(HeapNumber::kSignMask));
- __ j(zero, &done);
-
- __ AllocateHeapNumber(tmp, tmp2, &slow);
- __ jmp(&allocated, Label::kNear);
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
- CallRuntimeFromDeferred(
- Runtime::kAllocateHeapNumber, 0, instr, instr->context());
- // Set the pointer to the new heap number in tmp.
- if (!tmp.is(rax)) __ movp(tmp, rax);
- // Restore input_reg after call to runtime.
- __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
-
- __ bind(&allocated);
- __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ shlq(tmp2, Immediate(1));
- __ shrq(tmp2, Immediate(1));
- __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
- __ StoreToSafepointRegisterSlot(input_reg, tmp);
-
- __ bind(&done);
-}
-
-
-void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
- Register input_reg = ToRegister(instr->value());
- __ testl(input_reg, input_reg);
- Label is_positive;
- __ j(not_sign, &is_positive, Label::kNear);
- __ negl(input_reg); // Sets flags.
- DeoptimizeIf(negative, instr, DeoptimizeReason::kOverflow);
- __ bind(&is_positive);
-}
-
-
-void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
- Register input_reg = ToRegister(instr->value());
- __ testp(input_reg, input_reg);
- Label is_positive;
- __ j(not_sign, &is_positive, Label::kNear);
- __ negp(input_reg); // Sets flags.
- DeoptimizeIf(negative, instr, DeoptimizeReason::kOverflow);
- __ bind(&is_positive);
-}
-
-
-void LCodeGen::DoMathAbs(LMathAbs* instr) {
- // Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
- public:
- DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override {
- codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LMathAbs* instr_;
- };
-
- DCHECK(instr->value()->Equals(instr->result()));
- Representation r = instr->hydrogen()->value()->representation();
-
- if (r.IsDouble()) {
- XMMRegister scratch = double_scratch0();
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- __ Xorpd(scratch, scratch);
- __ Subsd(scratch, input_reg);
- __ Andpd(input_reg, scratch);
- } else if (r.IsInteger32()) {
- EmitIntegerMathAbs(instr);
- } else if (r.IsSmi()) {
- EmitSmiMathAbs(instr);
- } else { // Tagged case.
- DeferredMathAbsTaggedHeapNumber* deferred =
- new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
- Register input_reg = ToRegister(instr->value());
- // Smi check.
- __ JumpIfNotSmi(input_reg, deferred->entry());
- EmitSmiMathAbs(instr);
- __ bind(deferred->exit());
- }
-}
-
-void LCodeGen::DoMathFloorD(LMathFloorD* instr) {
- XMMRegister output_reg = ToDoubleRegister(instr->result());
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- CpuFeatureScope scope(masm(), SSE4_1);
- __ Roundsd(output_reg, input_reg, kRoundDown);
-}
-
-void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
- XMMRegister xmm_scratch = double_scratch0();
- Register output_reg = ToRegister(instr->result());
- XMMRegister input_reg = ToDoubleRegister(instr->value());
-
- if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatureScope scope(masm(), SSE4_1);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Deoptimize if minus zero.
- __ Movq(output_reg, input_reg);
- __ subq(output_reg, Immediate(1));
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kMinusZero);
- }
- __ Roundsd(xmm_scratch, input_reg, kRoundDown);
- __ Cvttsd2si(output_reg, xmm_scratch);
- __ cmpl(output_reg, Immediate(0x1));
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
- } else {
- Label negative_sign, done;
- // Deoptimize on unordered.
- __ Xorpd(xmm_scratch, xmm_scratch); // Zero the register.
- __ Ucomisd(input_reg, xmm_scratch);
- DeoptimizeIf(parity_even, instr, DeoptimizeReason::kNaN);
- __ j(below, &negative_sign, Label::kNear);
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Check for negative zero.
- Label positive_sign;
- __ j(above, &positive_sign, Label::kNear);
- __ Movmskpd(output_reg, input_reg);
- __ testl(output_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
- __ Set(output_reg, 0);
- __ jmp(&done);
- __ bind(&positive_sign);
- }
-
- // Use truncating instruction (OK because input is positive).
- __ Cvttsd2si(output_reg, input_reg);
- // Overflow is signalled with minint.
- __ cmpl(output_reg, Immediate(0x1));
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
- __ jmp(&done, Label::kNear);
-
- // Non-zero negative reaches here.
- __ bind(&negative_sign);
- // Truncate, then compare and compensate.
- __ Cvttsd2si(output_reg, input_reg);
- __ Cvtlsi2sd(xmm_scratch, output_reg);
- __ Ucomisd(input_reg, xmm_scratch);
- __ j(equal, &done, Label::kNear);
- __ subl(output_reg, Immediate(1));
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
-
- __ bind(&done);
- }
-}
-
-void LCodeGen::DoMathRoundD(LMathRoundD* instr) {
- XMMRegister xmm_scratch = double_scratch0();
- XMMRegister output_reg = ToDoubleRegister(instr->result());
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- CpuFeatureScope scope(masm(), SSE4_1);
- Label done;
- __ Roundsd(output_reg, input_reg, kRoundUp);
- __ Move(xmm_scratch, -0.5);
- __ Addsd(xmm_scratch, output_reg);
- __ Ucomisd(xmm_scratch, input_reg);
- __ j(below_equal, &done, Label::kNear);
- __ Move(xmm_scratch, 1.0);
- __ Subsd(output_reg, xmm_scratch);
- __ bind(&done);
-}
-
-void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
- const XMMRegister xmm_scratch = double_scratch0();
- Register output_reg = ToRegister(instr->result());
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- XMMRegister input_temp = ToDoubleRegister(instr->temp());
- static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5
- static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5
-
- Label done, round_to_zero, below_one_half;
- Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
- __ movq(kScratchRegister, one_half);
- __ Movq(xmm_scratch, kScratchRegister);
- __ Ucomisd(xmm_scratch, input_reg);
- __ j(above, &below_one_half, Label::kNear);
-
- // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
- __ Addsd(xmm_scratch, input_reg);
- __ Cvttsd2si(output_reg, xmm_scratch);
- // Overflow is signalled with minint.
- __ cmpl(output_reg, Immediate(0x1));
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
- __ jmp(&done, dist);
-
- __ bind(&below_one_half);
- __ movq(kScratchRegister, minus_one_half);
- __ Movq(xmm_scratch, kScratchRegister);
- __ Ucomisd(xmm_scratch, input_reg);
- __ j(below_equal, &round_to_zero, Label::kNear);
-
- // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
- // compare and compensate.
- __ Movapd(input_temp, input_reg); // Do not alter input_reg.
- __ Subsd(input_temp, xmm_scratch);
- __ Cvttsd2si(output_reg, input_temp);
- // Catch minint due to overflow, and to prevent overflow when compensating.
- __ cmpl(output_reg, Immediate(0x1));
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
-
- __ Cvtlsi2sd(xmm_scratch, output_reg);
- __ Ucomisd(xmm_scratch, input_temp);
- __ j(equal, &done, dist);
- __ subl(output_reg, Immediate(1));
- // No overflow because we already ruled out minint.
- __ jmp(&done, dist);
-
- __ bind(&round_to_zero);
- // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
- // we can ignore the difference between a result of -0 and +0.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ Movq(output_reg, input_reg);
- __ testq(output_reg, output_reg);
- DeoptimizeIf(negative, instr, DeoptimizeReason::kMinusZero);
- }
- __ Set(output_reg, 0);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoMathFround(LMathFround* instr) {
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- XMMRegister output_reg = ToDoubleRegister(instr->result());
- __ Cvtsd2ss(output_reg, input_reg);
- __ Cvtss2sd(output_reg, output_reg);
-}
-
-
-void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
- XMMRegister output = ToDoubleRegister(instr->result());
- if (instr->value()->IsDoubleRegister()) {
- XMMRegister input = ToDoubleRegister(instr->value());
- __ Sqrtsd(output, input);
- } else {
- Operand input = ToOperand(instr->value());
- __ Sqrtsd(output, input);
- }
-}
-
-
-void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
- XMMRegister xmm_scratch = double_scratch0();
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- DCHECK(ToDoubleRegister(instr->result()).is(input_reg));
-
- // Note that according to ECMA-262 15.8.2.13:
- // Math.pow(-Infinity, 0.5) == Infinity
- // Math.sqrt(-Infinity) == NaN
- Label done, sqrt;
- // Check base for -Infinity. According to IEEE-754, double-precision
- // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
- __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000));
- __ Movq(xmm_scratch, kScratchRegister);
- __ Ucomisd(xmm_scratch, input_reg);
- // Comparing -Infinity with NaN results in "unordered", which sets the
- // zero flag as if both were equal. However, it also sets the carry flag.
- __ j(not_equal, &sqrt, Label::kNear);
- __ j(carry, &sqrt, Label::kNear);
- // If input is -Infinity, return Infinity.
- __ Xorpd(input_reg, input_reg);
- __ Subsd(input_reg, xmm_scratch);
- __ jmp(&done, Label::kNear);
-
- // Square root.
- __ bind(&sqrt);
- __ Xorpd(xmm_scratch, xmm_scratch);
- __ Addsd(input_reg, xmm_scratch); // Convert -0 to +0.
- __ Sqrtsd(input_reg, input_reg);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoPower(LPower* instr) {
- Representation exponent_type = instr->hydrogen()->right()->representation();
- // Having marked this as a call, we can use any registers.
- // Just make sure that the input/output registers are the expected ones.
-
- Register tagged_exponent = MathPowTaggedDescriptor::exponent();
- DCHECK(!instr->right()->IsRegister() ||
- ToRegister(instr->right()).is(tagged_exponent));
- DCHECK(!instr->right()->IsDoubleRegister() ||
- ToDoubleRegister(instr->right()).is(xmm1));
- DCHECK(ToDoubleRegister(instr->left()).is(xmm2));
- DCHECK(ToDoubleRegister(instr->result()).is(xmm3));
-
- if (exponent_type.IsSmi()) {
- MathPowStub stub(isolate(), MathPowStub::TAGGED);
- __ CallStub(&stub);
- } else if (exponent_type.IsTagged()) {
- Label no_deopt;
- __ JumpIfSmi(tagged_exponent, &no_deopt, Label::kNear);
- __ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, rcx);
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
- __ bind(&no_deopt);
- MathPowStub stub(isolate(), MathPowStub::TAGGED);
- __ CallStub(&stub);
- } else if (exponent_type.IsInteger32()) {
- MathPowStub stub(isolate(), MathPowStub::INTEGER);
- __ CallStub(&stub);
- } else {
- DCHECK(exponent_type.IsDouble());
- MathPowStub stub(isolate(), MathPowStub::DOUBLE);
- __ CallStub(&stub);
- }
-}
-
-void LCodeGen::DoMathCos(LMathCos* instr) {
- DCHECK(ToDoubleRegister(instr->value()).is(xmm0));
- DCHECK(ToDoubleRegister(instr->result()).is(xmm0));
- __ PrepareCallCFunction(1);
- __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 1);
-}
-
-void LCodeGen::DoMathExp(LMathExp* instr) {
- DCHECK(ToDoubleRegister(instr->value()).is(xmm0));
- DCHECK(ToDoubleRegister(instr->result()).is(xmm0));
- __ PrepareCallCFunction(1);
- __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 1);
-}
-
-void LCodeGen::DoMathSin(LMathSin* instr) {
- DCHECK(ToDoubleRegister(instr->value()).is(xmm0));
- DCHECK(ToDoubleRegister(instr->result()).is(xmm0));
- __ PrepareCallCFunction(1);
- __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 1);
-}
-
-void LCodeGen::DoMathLog(LMathLog* instr) {
- DCHECK(ToDoubleRegister(instr->value()).is(xmm0));
- DCHECK(ToDoubleRegister(instr->result()).is(xmm0));
- __ PrepareCallCFunction(1);
- __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 1);
-}
-
-
-void LCodeGen::DoMathClz32(LMathClz32* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
-
- __ Lzcntl(result, input);
-}
-
-void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
- Register scratch1, Register scratch2,
- Register scratch3) {
-#if DEBUG
- if (actual.is_reg()) {
- DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
- } else {
- DCHECK(!AreAliased(scratch1, scratch2, scratch3));
- }
-#endif
- if (FLAG_code_comments) {
- if (actual.is_reg()) {
- Comment(";;; PrepareForTailCall, actual: %s {",
- RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
- actual.reg().code()));
- } else {
- Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
- }
- }
-
- // Check if next frame is an arguments adaptor frame.
- Register caller_args_count_reg = scratch1;
- Label no_arguments_adaptor, formal_parameter_count_loaded;
- __ movp(scratch2, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ cmpp(Operand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset),
- Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &no_arguments_adaptor, Label::kNear);
-
- // Drop current frame and load arguments count from arguments adaptor frame.
- __ movp(rbp, scratch2);
- __ SmiToInteger32(
- caller_args_count_reg,
- Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ jmp(&formal_parameter_count_loaded, Label::kNear);
-
- __ bind(&no_arguments_adaptor);
- // Load caller's formal parameter count.
- __ movp(caller_args_count_reg,
- Immediate(info()->literal()->parameter_count()));
-
- __ bind(&formal_parameter_count_loaded);
- __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3,
- ReturnAddressState::kNotOnStack);
- Comment(";;; }");
-}
-
-void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
- HInvokeFunction* hinstr = instr->hydrogen();
- DCHECK(ToRegister(instr->context()).is(rsi));
- DCHECK(ToRegister(instr->function()).is(rdi));
- DCHECK(instr->HasPointerMap());
-
- bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
-
- if (is_tail_call) {
- DCHECK(!info()->saves_caller_doubles());
- ParameterCount actual(instr->arity());
- // It is safe to use rbx, rcx and r8 as scratch registers here given that
- // 1) we are not going to return to caller function anyway,
- // 2) rbx (expected number of arguments) will be initialized below.
- PrepareForTailCall(actual, rbx, rcx, r8);
- }
-
- Handle<JSFunction> known_function = hinstr->known_function();
- if (known_function.is_null()) {
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount actual(instr->arity());
- InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
- __ InvokeFunction(rdi, no_reg, actual, flag, generator);
- } else {
- CallKnownFunction(known_function, hinstr->formal_parameter_count(),
- instr->arity(), is_tail_call, instr);
- }
-}
-
-
-void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
- DCHECK(ToRegister(instr->context()).is(rsi));
- DCHECK(ToRegister(instr->constructor()).is(rdi));
- DCHECK(ToRegister(instr->result()).is(rax));
-
- __ Set(rax, instr->arity());
- __ Move(rbx, instr->hydrogen()->site());
-
- ElementsKind kind = instr->hydrogen()->elements_kind();
- AllocationSiteOverrideMode override_mode =
- (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
- ? DISABLE_ALLOCATION_SITES
- : DONT_OVERRIDE;
-
- if (instr->arity() == 0) {
- ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- } else if (instr->arity() == 1) {
- Label done;
- if (IsFastPackedElementsKind(kind)) {
- Label packed_case;
- // We might need a change here
- // look at the first argument
- __ movp(rcx, Operand(rsp, 0));
- __ testp(rcx, rcx);
- __ j(zero, &packed_case, Label::kNear);
-
- ElementsKind holey_kind = GetHoleyElementsKind(kind);
- ArraySingleArgumentConstructorStub stub(isolate(),
- holey_kind,
- override_mode);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ jmp(&done, Label::kNear);
- __ bind(&packed_case);
- }
-
- ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ bind(&done);
- } else {
- ArrayNArgumentsConstructorStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
-void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- DCHECK(ToRegister(instr->context()).is(rsi));
- CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
-}
-
-
-void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
- Register function = ToRegister(instr->function());
- Register code_object = ToRegister(instr->code_object());
- __ leap(code_object, FieldOperand(code_object, Code::kHeaderSize));
- __ movp(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
-}
-
-
-void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
- Register result = ToRegister(instr->result());
- Register base = ToRegister(instr->base_object());
- if (instr->offset()->IsConstantOperand()) {
- LConstantOperand* offset = LConstantOperand::cast(instr->offset());
- __ leap(result, Operand(base, ToInteger32(offset)));
- } else {
- Register offset = ToRegister(instr->offset());
- __ leap(result, Operand(base, offset, times_1, 0));
- }
-}
-
-
-void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
- HStoreNamedField* hinstr = instr->hydrogen();
- Representation representation = instr->representation();
-
- HObjectAccess access = hinstr->access();
- int offset = access.offset();
-
- if (access.IsExternalMemory()) {
- DCHECK(!hinstr->NeedsWriteBarrier());
- Register value = ToRegister(instr->value());
- if (instr->object()->IsConstantOperand()) {
- DCHECK(value.is(rax));
- LConstantOperand* object = LConstantOperand::cast(instr->object());
- __ store_rax(ToExternalReference(object));
- } else {
- Register object = ToRegister(instr->object());
- __ Store(MemOperand(object, offset), value, representation);
- }
- return;
- }
-
- Register object = ToRegister(instr->object());
- __ AssertNotSmi(object);
-
- DCHECK(!representation.IsSmi() ||
- !instr->value()->IsConstantOperand() ||
- IsInteger32Constant(LConstantOperand::cast(instr->value())));
- if (!FLAG_unbox_double_fields && representation.IsDouble()) {
- DCHECK(access.IsInobject());
- DCHECK(!hinstr->has_transition());
- DCHECK(!hinstr->NeedsWriteBarrier());
- XMMRegister value = ToDoubleRegister(instr->value());
- __ Movsd(FieldOperand(object, offset), value);
- return;
- }
-
- if (hinstr->has_transition()) {
- Handle<Map> transition = hinstr->transition_map();
- AddDeprecationDependency(transition);
- if (!hinstr->NeedsWriteBarrierForMap()) {
- __ Move(FieldOperand(object, HeapObject::kMapOffset), transition);
- } else {
- Register temp = ToRegister(instr->temp());
- __ Move(kScratchRegister, transition);
- __ movp(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister);
- // Update the write barrier for the map field.
- __ RecordWriteForMap(object,
- kScratchRegister,
- temp,
- kSaveFPRegs);
- }
- }
-
- // Do the store.
- Register write_register = object;
- if (!access.IsInobject()) {
- write_register = ToRegister(instr->temp());
- __ movp(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
- }
-
- if (representation.IsSmi() && SmiValuesAre32Bits() &&
- hinstr->value()->representation().IsInteger32()) {
- DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
- if (FLAG_debug_code) {
- Register scratch = kScratchRegister;
- __ Load(scratch, FieldOperand(write_register, offset), representation);
- __ AssertSmi(scratch);
- }
- // Store int value directly to upper half of the smi.
- STATIC_ASSERT(kSmiTag == 0);
- DCHECK(kSmiTagSize + kSmiShiftSize == 32);
- offset += kPointerSize / 2;
- representation = Representation::Integer32();
- }
-
- Operand operand = FieldOperand(write_register, offset);
-
- if (FLAG_unbox_double_fields && representation.IsDouble()) {
- DCHECK(access.IsInobject());
- XMMRegister value = ToDoubleRegister(instr->value());
- __ Movsd(operand, value);
-
- } else if (instr->value()->IsRegister()) {
- Register value = ToRegister(instr->value());
- __ Store(operand, value, representation);
- } else {
- LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- if (IsInteger32Constant(operand_value)) {
- DCHECK(!hinstr->NeedsWriteBarrier());
- int32_t value = ToInteger32(operand_value);
- if (representation.IsSmi()) {
- __ Move(operand, Smi::FromInt(value));
-
- } else {
- __ movl(operand, Immediate(value));
- }
-
- } else if (IsExternalConstant(operand_value)) {
- DCHECK(!hinstr->NeedsWriteBarrier());
- ExternalReference ptr = ToExternalReference(operand_value);
- __ Move(kScratchRegister, ptr);
- __ movp(operand, kScratchRegister);
- } else {
- Handle<Object> handle_value = ToHandle(operand_value);
- DCHECK(!hinstr->NeedsWriteBarrier());
- __ Move(operand, handle_value);
- }
- }
-
- if (hinstr->NeedsWriteBarrier()) {
- Register value = ToRegister(instr->value());
- Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
- // Update the write barrier for the object for in-object properties.
- __ RecordWriteField(write_register,
- offset,
- value,
- temp,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- hinstr->SmiCheckForWriteBarrier(),
- hinstr->PointersToHereCheckForValue());
- }
-}
-
-
-void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- Representation representation = instr->hydrogen()->length()->representation();
- DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
- DCHECK(representation.IsSmiOrInteger32());
-
- Condition cc = instr->hydrogen()->allow_equality() ? below : below_equal;
- if (instr->length()->IsConstantOperand()) {
- int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
- Register index = ToRegister(instr->index());
- if (representation.IsSmi()) {
- __ Cmp(index, Smi::FromInt(length));
- } else {
- __ cmpl(index, Immediate(length));
- }
- cc = CommuteCondition(cc);
- } else if (instr->index()->IsConstantOperand()) {
- int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
- if (instr->length()->IsRegister()) {
- Register length = ToRegister(instr->length());
- if (representation.IsSmi()) {
- __ Cmp(length, Smi::FromInt(index));
- } else {
- __ cmpl(length, Immediate(index));
- }
- } else {
- Operand length = ToOperand(instr->length());
- if (representation.IsSmi()) {
- __ Cmp(length, Smi::FromInt(index));
- } else {
- __ cmpl(length, Immediate(index));
- }
- }
- } else {
- Register index = ToRegister(instr->index());
- if (instr->length()->IsRegister()) {
- Register length = ToRegister(instr->length());
- if (representation.IsSmi()) {
- __ cmpp(length, index);
- } else {
- __ cmpl(length, index);
- }
- } else {
- Operand length = ToOperand(instr->length());
- if (representation.IsSmi()) {
- __ cmpp(length, index);
- } else {
- __ cmpl(length, index);
- }
- }
- }
- if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
- Label done;
- __ j(NegateCondition(cc), &done, Label::kNear);
- __ int3();
- __ bind(&done);
- } else {
- DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds);
- }
-}
-
-
-void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- LOperand* key = instr->key();
- if (kPointerSize == kInt32Size && !key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- Representation key_representation =
- instr->hydrogen()->key()->representation();
- if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
- __ SmiToInteger64(key_reg, key_reg);
- } else if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
- Operand operand(BuildFastArrayOperand(
- instr->elements(),
- key,
- instr->hydrogen()->key()->representation(),
- elements_kind,
- instr->base_offset()));
-
- if (elements_kind == FLOAT32_ELEMENTS) {
- XMMRegister value(ToDoubleRegister(instr->value()));
- __ Cvtsd2ss(value, value);
- __ Movss(operand, value);
- } else if (elements_kind == FLOAT64_ELEMENTS) {
- __ Movsd(operand, ToDoubleRegister(instr->value()));
- } else {
- Register value(ToRegister(instr->value()));
- switch (elements_kind) {
- case INT8_ELEMENTS:
- case UINT8_ELEMENTS:
- case UINT8_CLAMPED_ELEMENTS:
- __ movb(operand, value);
- break;
- case INT16_ELEMENTS:
- case UINT16_ELEMENTS:
- __ movw(operand, value);
- break;
- case INT32_ELEMENTS:
- case UINT32_ELEMENTS:
- __ movl(operand, value);
- break;
- case FLOAT32_ELEMENTS:
- case FLOAT64_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
- case FAST_STRING_WRAPPER_ELEMENTS:
- case SLOW_STRING_WRAPPER_ELEMENTS:
- case NO_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
- XMMRegister value = ToDoubleRegister(instr->value());
- LOperand* key = instr->key();
- if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
- instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(ToRegister(key), ToRegister(key));
- }
- if (instr->NeedsCanonicalization()) {
- XMMRegister xmm_scratch = double_scratch0();
- // Turn potential sNaN value into qNaN.
- __ Xorpd(xmm_scratch, xmm_scratch);
- __ Subsd(value, xmm_scratch);
- }
-
- Operand double_store_operand = BuildFastArrayOperand(
- instr->elements(),
- key,
- instr->hydrogen()->key()->representation(),
- FAST_DOUBLE_ELEMENTS,
- instr->base_offset());
-
- __ Movsd(double_store_operand, value);
-}
-
-
-void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
- HStoreKeyed* hinstr = instr->hydrogen();
- LOperand* key = instr->key();
- int offset = instr->base_offset();
- Representation representation = hinstr->value()->representation();
-
- if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
- instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(ToRegister(key), ToRegister(key));
- }
- if (representation.IsInteger32() && SmiValuesAre32Bits()) {
- DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
- DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
- if (FLAG_debug_code) {
- Register scratch = kScratchRegister;
- __ Load(scratch,
- BuildFastArrayOperand(instr->elements(),
- key,
- instr->hydrogen()->key()->representation(),
- FAST_ELEMENTS,
- offset),
- Representation::Smi());
- __ AssertSmi(scratch);
- }
- // Store int value directly to upper half of the smi.
- STATIC_ASSERT(kSmiTag == 0);
- DCHECK(kSmiTagSize + kSmiShiftSize == 32);
- offset += kPointerSize / 2;
- }
-
- Operand operand =
- BuildFastArrayOperand(instr->elements(),
- key,
- instr->hydrogen()->key()->representation(),
- FAST_ELEMENTS,
- offset);
- if (instr->value()->IsRegister()) {
- __ Store(operand, ToRegister(instr->value()), representation);
- } else {
- LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- if (IsInteger32Constant(operand_value)) {
- int32_t value = ToInteger32(operand_value);
- if (representation.IsSmi()) {
- __ Move(operand, Smi::FromInt(value));
-
- } else {
- __ movl(operand, Immediate(value));
- }
- } else {
- Handle<Object> handle_value = ToHandle(operand_value);
- __ Move(operand, handle_value);
- }
- }
-
- if (hinstr->NeedsWriteBarrier()) {
- Register elements = ToRegister(instr->elements());
- DCHECK(instr->value()->IsRegister());
- Register value = ToRegister(instr->value());
- DCHECK(!key->IsConstantOperand());
- SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- // Compute address of modified element and store it into key register.
- Register key_reg(ToRegister(key));
- __ leap(key_reg, operand);
- __ RecordWrite(elements,
- key_reg,
- value,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed,
- hinstr->PointersToHereCheckForValue());
- }
-}
-
-
-void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
- if (instr->is_fixed_typed_array()) {
- DoStoreKeyedExternalArray(instr);
- } else if (instr->hydrogen()->value()->representation().IsDouble()) {
- DoStoreKeyedFixedDoubleArray(instr);
- } else {
- DoStoreKeyedFixedArray(instr);
- }
-}
-
-
-void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
- class DeferredMaybeGrowElements final : public LDeferredCode {
- public:
- DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
- : LDeferredCode(codegen), instr_(instr) {}
- void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LMaybeGrowElements* instr_;
- };
-
- Register result = rax;
- DeferredMaybeGrowElements* deferred =
- new (zone()) DeferredMaybeGrowElements(this, instr);
- LOperand* key = instr->key();
- LOperand* current_capacity = instr->current_capacity();
-
- DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
- DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
- DCHECK(key->IsConstantOperand() || key->IsRegister());
- DCHECK(current_capacity->IsConstantOperand() ||
- current_capacity->IsRegister());
-
- if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
- int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
- int32_t constant_capacity =
- ToInteger32(LConstantOperand::cast(current_capacity));
- if (constant_key >= constant_capacity) {
- // Deferred case.
- __ jmp(deferred->entry());
- }
- } else if (key->IsConstantOperand()) {
- int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
- __ cmpl(ToRegister(current_capacity), Immediate(constant_key));
- __ j(less_equal, deferred->entry());
- } else if (current_capacity->IsConstantOperand()) {
- int32_t constant_capacity =
- ToInteger32(LConstantOperand::cast(current_capacity));
- __ cmpl(ToRegister(key), Immediate(constant_capacity));
- __ j(greater_equal, deferred->entry());
- } else {
- __ cmpl(ToRegister(key), ToRegister(current_capacity));
- __ j(greater_equal, deferred->entry());
- }
-
- if (instr->elements()->IsRegister()) {
- __ movp(result, ToRegister(instr->elements()));
- } else {
- __ movp(result, ToOperand(instr->elements()));
- }
-
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- Register result = rax;
- __ Move(result, Smi::kZero);
-
- // We have to call a stub.
- {
- PushSafepointRegistersScope scope(this);
- if (instr->object()->IsConstantOperand()) {
- LConstantOperand* constant_object =
- LConstantOperand::cast(instr->object());
- if (IsSmiConstant(constant_object)) {
- Smi* immediate = ToSmi(constant_object);
- __ Move(result, immediate);
- } else {
- Handle<Object> handle_value = ToHandle(constant_object);
- __ Move(result, handle_value);
- }
- } else if (instr->object()->IsRegister()) {
- __ Move(result, ToRegister(instr->object()));
- } else {
- __ movp(result, ToOperand(instr->object()));
- }
-
- LOperand* key = instr->key();
- if (key->IsConstantOperand()) {
- __ Move(rbx, ToSmi(LConstantOperand::cast(key)));
- } else {
- __ Move(rbx, ToRegister(key));
- __ Integer32ToSmi(rbx, rbx);
- }
-
- GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
- __ CallStub(&stub);
- RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
- __ StoreToSafepointRegisterSlot(result, result);
- }
-
- // Deopt on smi, which means the elements array changed to dictionary mode.
- Condition is_smi = __ CheckSmi(result);
- DeoptimizeIf(is_smi, instr, DeoptimizeReason::kSmi);
-}
-
-
-void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
- Register object_reg = ToRegister(instr->object());
-
- Handle<Map> from_map = instr->original_map();
- Handle<Map> to_map = instr->transitioned_map();
- ElementsKind from_kind = instr->from_kind();
- ElementsKind to_kind = instr->to_kind();
-
- Label not_applicable;
- __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
- __ j(not_equal, &not_applicable);
- if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
- Register new_map_reg = ToRegister(instr->new_map_temp());
- __ Move(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
- __ movp(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
- // Write barrier.
- __ RecordWriteForMap(object_reg, new_map_reg, ToRegister(instr->temp()),
- kDontSaveFPRegs);
- } else {
- DCHECK(object_reg.is(rax));
- DCHECK(ToRegister(instr->context()).is(rsi));
- PushSafepointRegistersScope scope(this);
- __ Move(rbx, to_map);
- TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
- __ CallStub(&stub);
- RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
- }
- __ bind(&not_applicable);
-}
-
-
-void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
- Register object = ToRegister(instr->object());
- Register temp = ToRegister(instr->temp());
- Label no_memento_found;
- __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
- DeoptimizeIf(equal, instr, DeoptimizeReason::kMementoFound);
- __ bind(&no_memento_found);
-}
-
-
-void LCodeGen::DoStringAdd(LStringAdd* instr) {
- DCHECK(ToRegister(instr->context()).is(rsi));
- DCHECK(ToRegister(instr->left()).is(rdx));
- DCHECK(ToRegister(instr->right()).is(rax));
- StringAddStub stub(isolate(),
- instr->hydrogen()->flags(),
- instr->hydrogen()->pretenure_flag());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt final : public LDeferredCode {
- public:
- DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LStringCharCodeAt* instr_;
- };
-
- DeferredStringCharCodeAt* deferred =
- new(zone()) DeferredStringCharCodeAt(this, instr);
-
- StringCharLoadGenerator::Generate(masm(),
- ToRegister(instr->string()),
- ToRegister(instr->index()),
- ToRegister(instr->result()),
- deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Set(result, 0);
-
- PushSafepointRegistersScope scope(this);
- __ Push(string);
- // Push the index as a smi. This is safe because of the checks in
- // DoStringCharCodeAt above.
- STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
- if (instr->index()->IsConstantOperand()) {
- int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- __ Push(Smi::FromInt(const_index));
- } else {
- Register index = ToRegister(instr->index());
- __ Integer32ToSmi(index, index);
- __ Push(index);
- }
- CallRuntimeFromDeferred(
- Runtime::kStringCharCodeAtRT, 2, instr, instr->context());
- __ AssertSmi(rax);
- __ SmiToInteger32(rax, rax);
- __ StoreToSafepointRegisterSlot(result, rax);
-}
-
-
-void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode final : public LDeferredCode {
- public:
- DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override {
- codegen()->DoDeferredStringCharFromCode(instr_);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LStringCharFromCode* instr_;
- };
-
- DeferredStringCharFromCode* deferred =
- new(zone()) DeferredStringCharFromCode(this, instr);
-
- DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
- DCHECK(!char_code.is(result));
-
- __ cmpl(char_code, Immediate(String::kMaxOneByteCharCode));
- __ j(above, deferred->entry());
- __ movsxlq(char_code, char_code);
- __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
- __ movp(result, FieldOperand(result,
- char_code, times_pointer_size,
- FixedArray::kHeaderSize));
- __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
- __ j(equal, deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Set(result, 0);
-
- PushSafepointRegistersScope scope(this);
- __ Integer32ToSmi(char_code, char_code);
- __ Push(char_code);
- CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
- instr->context());
- __ StoreToSafepointRegisterSlot(result, rax);
-}
-
-
-void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- LOperand* input = instr->value();
- DCHECK(input->IsRegister() || input->IsStackSlot());
- LOperand* output = instr->result();
- DCHECK(output->IsDoubleRegister());
- if (input->IsRegister()) {
- __ Cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
- } else {
- __ Cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
- }
-}
-
-
-void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
- LOperand* input = instr->value();
- LOperand* output = instr->result();
-
- __ LoadUint32(ToDoubleRegister(output), ToRegister(input));
-}
-
-
-void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- class DeferredNumberTagI final : public LDeferredCode {
- public:
- DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override {
- codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
- instr_->temp2(), SIGNED_INT32);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LNumberTagI* instr_;
- };
-
- LOperand* input = instr->value();
- DCHECK(input->IsRegister() && input->Equals(instr->result()));
- Register reg = ToRegister(input);
-
- if (SmiValuesAre32Bits()) {
- __ Integer32ToSmi(reg, reg);
- } else {
- DCHECK(SmiValuesAre31Bits());
- DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
- __ Integer32ToSmi(reg, reg);
- __ j(overflow, deferred->entry());
- __ bind(deferred->exit());
- }
-}
-
-
-void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU final : public LDeferredCode {
- public:
- DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override {
- codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
- instr_->temp2(), UNSIGNED_INT32);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LNumberTagU* instr_;
- };
-
- LOperand* input = instr->value();
- DCHECK(input->IsRegister() && input->Equals(instr->result()));
- Register reg = ToRegister(input);
-
- DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
- __ cmpl(reg, Immediate(Smi::kMaxValue));
- __ j(above, deferred->entry());
- __ Integer32ToSmi(reg, reg);
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
- LOperand* value,
- LOperand* temp1,
- LOperand* temp2,
- IntegerSignedness signedness) {
- Label done, slow;
- Register reg = ToRegister(value);
- Register tmp = ToRegister(temp1);
- XMMRegister temp_xmm = ToDoubleRegister(temp2);
-
- // Load value into temp_xmm which will be preserved across potential call to
- // runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
- // XMM registers on x64).
- if (signedness == SIGNED_INT32) {
- DCHECK(SmiValuesAre31Bits());
- // There was overflow, so bits 30 and 31 of the original integer
- // disagree. Try to allocate a heap number in new space and store
- // the value in there. If that fails, call the runtime system.
- __ SmiToInteger32(reg, reg);
- __ xorl(reg, Immediate(0x80000000));
- __ Cvtlsi2sd(temp_xmm, reg);
- } else {
- DCHECK(signedness == UNSIGNED_INT32);
- __ LoadUint32(temp_xmm, reg);
- }
-
- if (FLAG_inline_new) {
- __ AllocateHeapNumber(reg, tmp, &slow);
- __ jmp(&done, kPointerSize == kInt64Size ? Label::kNear : Label::kFar);
- }
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
- {
- // Put a valid pointer value in the stack slot where the result
- // register is stored, as this register is in the pointer map, but contains
- // an integer value.
- __ Set(reg, 0);
-
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this);
- // Reset the context register.
- if (!reg.is(rsi)) {
- __ Set(rsi, 0);
- }
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(reg, rax);
- }
-
- // Done. Put the value in temp_xmm into the value of the allocated heap
- // number.
- __ bind(&done);
- __ Movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm);
-}
-
-
-void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD final : public LDeferredCode {
- public:
- DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LNumberTagD* instr_;
- };
-
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- Register reg = ToRegister(instr->result());
- Register tmp = ToRegister(instr->temp());
-
- DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
- if (FLAG_inline_new) {
- __ AllocateHeapNumber(reg, tmp, deferred->entry());
- } else {
- __ jmp(deferred->entry());
- }
- __ bind(deferred->exit());
- __ Movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
-}
-
-
-void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- Register reg = ToRegister(instr->result());
- __ Move(reg, Smi::kZero);
-
- {
- PushSafepointRegistersScope scope(this);
- // Reset the context register.
- if (!reg.is(rsi)) {
- __ Move(rsi, 0);
- }
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ movp(kScratchRegister, rax);
- }
- __ movp(reg, kScratchRegister);
-}
-
-
-void LCodeGen::DoSmiTag(LSmiTag* instr) {
- HChange* hchange = instr->hydrogen();
- Register input = ToRegister(instr->value());
- Register output = ToRegister(instr->result());
- if (hchange->CheckFlag(HValue::kCanOverflow) &&
- hchange->value()->CheckFlag(HValue::kUint32)) {
- Condition is_smi = __ CheckUInteger32ValidSmiValue(input);
- DeoptimizeIf(NegateCondition(is_smi), instr, DeoptimizeReason::kOverflow);
- }
- __ Integer32ToSmi(output, input);
- if (hchange->CheckFlag(HValue::kCanOverflow) &&
- !hchange->value()->CheckFlag(HValue::kUint32)) {
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
- }
-}
-
-
-void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
- DCHECK(instr->value()->Equals(instr->result()));
- Register input = ToRegister(instr->value());
- if (instr->needs_check()) {
- Condition is_smi = __ CheckSmi(input);
- DeoptimizeIf(NegateCondition(is_smi), instr, DeoptimizeReason::kNotASmi);
- } else {
- __ AssertSmi(input);
- }
- __ SmiToInteger32(input, input);
-}
-
-
-void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
- XMMRegister result_reg, NumberUntagDMode mode) {
- bool can_convert_undefined_to_nan = instr->truncating();
- bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
-
- Label convert, load_smi, done;
-
- if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
- // Smi check.
- __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
-
- // Heap number map check.
- __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
-
- // On x64 it is safe to load at heap number offset before evaluating the map
- // check, since all heap objects are at least two words long.
- __ Movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
-
- if (can_convert_undefined_to_nan) {
- __ j(not_equal, &convert, Label::kNear);
- } else {
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
- }
-
- if (deoptimize_on_minus_zero) {
- XMMRegister xmm_scratch = double_scratch0();
- __ Xorpd(xmm_scratch, xmm_scratch);
- __ Ucomisd(xmm_scratch, result_reg);
- __ j(not_equal, &done, Label::kNear);
- __ Movmskpd(kScratchRegister, result_reg);
- __ testl(kScratchRegister, Immediate(1));
- DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
- }
- __ jmp(&done, Label::kNear);
-
- if (can_convert_undefined_to_nan) {
- __ bind(&convert);
-
- // Convert undefined (and hole) to NaN. Compute NaN as 0/0.
- __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(not_equal, instr,
- DeoptimizeReason::kNotAHeapNumberUndefined);
-
- __ Xorpd(result_reg, result_reg);
- __ Divsd(result_reg, result_reg);
- __ jmp(&done, Label::kNear);
- }
- } else {
- DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
- }
-
- // Smi to XMM conversion
- __ bind(&load_smi);
- __ SmiToInteger32(kScratchRegister, input_reg);
- __ Cvtlsi2sd(result_reg, kScratchRegister);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
- Register input_reg = ToRegister(instr->value());
-
- if (instr->truncating()) {
- Register input_map_reg = kScratchRegister;
- Label truncate;
- Label::Distance truncate_distance =
- DeoptEveryNTimes() ? Label::kFar : Label::kNear;
- __ movp(input_map_reg, FieldOperand(input_reg, HeapObject::kMapOffset));
- __ JumpIfRoot(input_map_reg, Heap::kHeapNumberMapRootIndex, &truncate,
- truncate_distance);
- __ CmpInstanceType(input_map_reg, ODDBALL_TYPE);
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotANumberOrOddball);
- __ bind(&truncate);
- __ TruncateHeapNumberToI(input_reg, input_reg);
- } else {
- XMMRegister scratch = ToDoubleRegister(instr->temp());
- DCHECK(!scratch.is(double_scratch0()));
- __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
- __ Movsd(double_scratch0(),
- FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ Cvttsd2si(input_reg, double_scratch0());
- __ Cvtlsi2sd(scratch, input_reg);
- __ Ucomisd(double_scratch0(), scratch);
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kLostPrecision);
- DeoptimizeIf(parity_even, instr, DeoptimizeReason::kNaN);
- if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
- __ testl(input_reg, input_reg);
- __ j(not_zero, done);
- __ Movmskpd(input_reg, double_scratch0());
- __ andl(input_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
- }
- }
-}
-
-
-void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI final : public LDeferredCode {
- public:
- DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override { codegen()->DoDeferredTaggedToI(instr_, done()); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LTaggedToI* instr_;
- };
-
- LOperand* input = instr->value();
- DCHECK(input->IsRegister());
- DCHECK(input->Equals(instr->result()));
- Register input_reg = ToRegister(input);
-
- if (instr->hydrogen()->value()->representation().IsSmi()) {
- __ SmiToInteger32(input_reg, input_reg);
- } else {
- DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
- __ JumpIfNotSmi(input_reg, deferred->entry());
- __ SmiToInteger32(input_reg, input_reg);
- __ bind(deferred->exit());
- }
-}
-
-
-void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
- LOperand* input = instr->value();
- DCHECK(input->IsRegister());
- LOperand* result = instr->result();
- DCHECK(result->IsDoubleRegister());
-
- Register input_reg = ToRegister(input);
- XMMRegister result_reg = ToDoubleRegister(result);
-
- HValue* value = instr->hydrogen()->value();
- NumberUntagDMode mode = value->representation().IsSmi()
- ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
-
- EmitNumberUntagD(instr, input_reg, result_reg, mode);
-}
-
-
-void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
- LOperand* input = instr->value();
- DCHECK(input->IsDoubleRegister());
- LOperand* result = instr->result();
- DCHECK(result->IsRegister());
-
- XMMRegister input_reg = ToDoubleRegister(input);
- Register result_reg = ToRegister(result);
-
- if (instr->truncating()) {
- __ TruncateDoubleToI(result_reg, input_reg);
- } else {
- Label lost_precision, is_nan, minus_zero, done;
- XMMRegister xmm_scratch = double_scratch0();
- Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
- __ DoubleToI(result_reg, input_reg, xmm_scratch,
- instr->hydrogen()->GetMinusZeroMode(), &lost_precision,
- &is_nan, &minus_zero, dist);
- __ jmp(&done, dist);
- __ bind(&lost_precision);
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision);
- __ bind(&is_nan);
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN);
- __ bind(&minus_zero);
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
- LOperand* input = instr->value();
- DCHECK(input->IsDoubleRegister());
- LOperand* result = instr->result();
- DCHECK(result->IsRegister());
-
- XMMRegister input_reg = ToDoubleRegister(input);
- Register result_reg = ToRegister(result);
-
- Label lost_precision, is_nan, minus_zero, done;
- XMMRegister xmm_scratch = double_scratch0();
- Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
- __ DoubleToI(result_reg, input_reg, xmm_scratch,
- instr->hydrogen()->GetMinusZeroMode(), &lost_precision, &is_nan,
- &minus_zero, dist);
- __ jmp(&done, dist);
- __ bind(&lost_precision);
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision);
- __ bind(&is_nan);
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN);
- __ bind(&minus_zero);
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
- __ bind(&done);
- __ Integer32ToSmi(result_reg, result_reg);
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
-}
-
-
-void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
- LOperand* input = instr->value();
- Condition cc = masm()->CheckSmi(ToRegister(input));
- DeoptimizeIf(NegateCondition(cc), instr, DeoptimizeReason::kNotASmi);
-}
-
-
-void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- LOperand* input = instr->value();
- Condition cc = masm()->CheckSmi(ToRegister(input));
- DeoptimizeIf(cc, instr, DeoptimizeReason::kSmi);
- }
-}
-
-
-void LCodeGen::DoCheckArrayBufferNotNeutered(
- LCheckArrayBufferNotNeutered* instr) {
- Register view = ToRegister(instr->view());
-
- __ movp(kScratchRegister,
- FieldOperand(view, JSArrayBufferView::kBufferOffset));
- __ testb(FieldOperand(kScratchRegister, JSArrayBuffer::kBitFieldOffset),
- Immediate(1 << JSArrayBuffer::WasNeutered::kShift));
- DeoptimizeIf(not_zero, instr, DeoptimizeReason::kOutOfBounds);
-}
-
-
-void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
- Register input = ToRegister(instr->value());
-
- __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
-
- if (instr->hydrogen()->is_interval_check()) {
- InstanceType first;
- InstanceType last;
- instr->hydrogen()->GetCheckInterval(&first, &last);
-
- __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
- Immediate(static_cast<int8_t>(first)));
-
- // If there is only one type in the interval check for equality.
- if (first == last) {
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongInstanceType);
- } else {
- DeoptimizeIf(below, instr, DeoptimizeReason::kWrongInstanceType);
- // Omit check for the last type.
- if (last != LAST_TYPE) {
- __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
- Immediate(static_cast<int8_t>(last)));
- DeoptimizeIf(above, instr, DeoptimizeReason::kWrongInstanceType);
- }
- }
- } else {
- uint8_t mask;
- uint8_t tag;
- instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
-
- if (base::bits::IsPowerOfTwo32(mask)) {
- DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
- __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
- Immediate(mask));
- DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
- DeoptimizeReason::kWrongInstanceType);
- } else {
- __ movzxbl(kScratchRegister,
- FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
- __ andb(kScratchRegister, Immediate(mask));
- __ cmpb(kScratchRegister, Immediate(tag));
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongInstanceType);
- }
- }
-}
-
-
-void LCodeGen::DoCheckValue(LCheckValue* instr) {
- Register reg = ToRegister(instr->value());
- __ Cmp(reg, instr->hydrogen()->object().handle());
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kValueMismatch);
-}
-
-
-void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
- Label deopt, done;
- // If the map is not deprecated the migration attempt does not make sense.
- __ Push(object);
- __ movp(object, FieldOperand(object, HeapObject::kMapOffset));
- __ testl(FieldOperand(object, Map::kBitField3Offset),
- Immediate(Map::Deprecated::kMask));
- __ Pop(object);
- __ j(zero, &deopt);
-
- {
- PushSafepointRegistersScope scope(this);
- __ Push(object);
-
- __ Set(rsi, 0);
- __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
-
- __ testp(rax, Immediate(kSmiTagMask));
- }
- __ j(not_zero, &done);
-
- __ bind(&deopt);
- DeoptimizeIf(always, instr, DeoptimizeReason::kInstanceMigrationFailed);
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- class DeferredCheckMaps final : public LDeferredCode {
- public:
- DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
- : LDeferredCode(codegen), instr_(instr), object_(object) {
- SetExit(check_maps());
- }
- void Generate() override {
- codegen()->DoDeferredInstanceMigration(instr_, object_);
- }
- Label* check_maps() { return &check_maps_; }
- LInstruction* instr() override { return instr_; }
-
- private:
- LCheckMaps* instr_;
- Label check_maps_;
- Register object_;
- };
-
- if (instr->hydrogen()->IsStabilityCheck()) {
- const UniqueSet<Map>* maps = instr->hydrogen()->maps();
- for (int i = 0; i < maps->size(); ++i) {
- AddStabilityDependency(maps->at(i).handle());
- }
- return;
- }
-
- LOperand* input = instr->value();
- DCHECK(input->IsRegister());
- Register reg = ToRegister(input);
-
- DeferredCheckMaps* deferred = NULL;
- if (instr->hydrogen()->HasMigrationTarget()) {
- deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
- __ bind(deferred->check_maps());
- }
-
- const UniqueSet<Map>* maps = instr->hydrogen()->maps();
- Label success;
- for (int i = 0; i < maps->size() - 1; i++) {
- Handle<Map> map = maps->at(i).handle();
- __ CompareMap(reg, map);
- __ j(equal, &success, Label::kNear);
- }
-
- Handle<Map> map = maps->at(maps->size() - 1).handle();
- __ CompareMap(reg, map);
- if (instr->hydrogen()->HasMigrationTarget()) {
- __ j(not_equal, deferred->entry());
- } else {
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongMap);
- }
-
- __ bind(&success);
-}
-
-
-void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
- XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
- XMMRegister xmm_scratch = double_scratch0();
- Register result_reg = ToRegister(instr->result());
- __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
-}
-
-
-void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
- DCHECK(instr->unclamped()->Equals(instr->result()));
- Register value_reg = ToRegister(instr->result());
- __ ClampUint8(value_reg);
-}
-
-
-void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
- DCHECK(instr->unclamped()->Equals(instr->result()));
- Register input_reg = ToRegister(instr->unclamped());
- XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
- XMMRegister xmm_scratch = double_scratch0();
- Label is_smi, done, heap_number;
- Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
- __ JumpIfSmi(input_reg, &is_smi, dist);
-
- // Check for heap number
- __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- __ j(equal, &heap_number, Label::kNear);
-
- // Check for undefined. Undefined is converted to zero for clamping
- // conversions.
- __ Cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
- __ xorl(input_reg, input_reg);
- __ jmp(&done, Label::kNear);
-
- // Heap number
- __ bind(&heap_number);
- __ Movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
- __ jmp(&done, Label::kNear);
-
- // smi
- __ bind(&is_smi);
- __ SmiToInteger32(input_reg, input_reg);
- __ ClampUint8(input_reg);
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoAllocate(LAllocate* instr) {
- class DeferredAllocate final : public LDeferredCode {
- public:
- DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override { codegen()->DoDeferredAllocate(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LAllocate* instr_;
- };
-
- DeferredAllocate* deferred =
- new(zone()) DeferredAllocate(this, instr);
-
- Register result = ToRegister(instr->result());
- Register temp = ToRegister(instr->temp());
-
- // Allocate memory for the object.
- AllocationFlags flags = NO_ALLOCATION_FLAGS;
- if (instr->hydrogen()->MustAllocateDoubleAligned()) {
- flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
- }
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- flags = static_cast<AllocationFlags>(flags | PRETENURE);
- }
-
- if (instr->hydrogen()->IsAllocationFoldingDominator()) {
- flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
- }
- DCHECK(!instr->hydrogen()->IsAllocationFolded());
-
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= kMaxRegularHeapObjectSize);
- __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
- } else {
- Register size = ToRegister(instr->size());
- __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
- }
-
- __ bind(deferred->exit());
-
- if (instr->hydrogen()->MustPrefillWithFiller()) {
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ movl(temp, Immediate((size / kPointerSize) - 1));
- } else {
- temp = ToRegister(instr->size());
- __ sarp(temp, Immediate(kPointerSizeLog2));
- __ decl(temp);
- }
- Label loop;
- __ bind(&loop);
- __ Move(FieldOperand(result, temp, times_pointer_size, 0),
- isolate()->factory()->one_pointer_filler_map());
- __ decl(temp);
- __ j(not_zero, &loop);
- }
-}
-
-void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
- DCHECK(instr->hydrogen()->IsAllocationFolded());
- DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
- Register result = ToRegister(instr->result());
- Register temp = ToRegister(instr->temp());
-
- AllocationFlags flags = ALLOCATION_FOLDED;
- if (instr->hydrogen()->MustAllocateDoubleAligned()) {
- flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
- }
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- flags = static_cast<AllocationFlags>(flags | PRETENURE);
- }
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= kMaxRegularHeapObjectSize);
- __ FastAllocate(size, result, temp, flags);
- } else {
- Register size = ToRegister(instr->size());
- __ FastAllocate(size, result, temp, flags);
- }
-}
-
-void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Move(result, Smi::kZero);
-
- PushSafepointRegistersScope scope(this);
- if (instr->size()->IsRegister()) {
- Register size = ToRegister(instr->size());
- DCHECK(!size.is(result));
- __ Integer32ToSmi(size, size);
- __ Push(size);
- } else {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ Push(Smi::FromInt(size));
- }
-
- int flags = 0;
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- flags = AllocateTargetSpace::update(flags, OLD_SPACE);
- } else {
- flags = AllocateTargetSpace::update(flags, NEW_SPACE);
- }
- __ Push(Smi::FromInt(flags));
-
- CallRuntimeFromDeferred(
- Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
- __ StoreToSafepointRegisterSlot(result, rax);
-
- if (instr->hydrogen()->IsAllocationFoldingDominator()) {
- AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
- }
- // If the allocation folding dominator allocate triggered a GC, allocation
- // happend in the runtime. We have to reset the top pointer to virtually
- // undo the allocation.
- ExternalReference allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
- __ subp(rax, Immediate(kHeapObjectTag));
- __ Store(allocation_top, rax);
- __ addp(rax, Immediate(kHeapObjectTag));
- }
-}
-
-
-void LCodeGen::DoTypeof(LTypeof* instr) {
- DCHECK(ToRegister(instr->context()).is(rsi));
- DCHECK(ToRegister(instr->value()).is(rbx));
- Label end, do_call;
- Register value_register = ToRegister(instr->value());
- __ JumpIfNotSmi(value_register, &do_call);
- __ Move(rax, isolate()->factory()->number_string());
- __ jmp(&end);
- __ bind(&do_call);
- Callable callable = CodeFactory::Typeof(isolate());
- CallCode(callable.code(), RelocInfo::CODE_TARGET, instr);
- __ bind(&end);
-}
-
-
-void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
- DCHECK(!operand->IsDoubleRegister());
- if (operand->IsConstantOperand()) {
- __ Push(ToHandle(LConstantOperand::cast(operand)));
- } else if (operand->IsRegister()) {
- __ Push(ToRegister(operand));
- } else {
- __ Push(ToOperand(operand));
- }
-}
-
-
-void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Condition final_branch_condition = EmitTypeofIs(instr, input);
- if (final_branch_condition != no_condition) {
- EmitBranch(instr, final_branch_condition);
- }
-}
-
-
-Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
- Label* true_label = instr->TrueLabel(chunk_);
- Label* false_label = instr->FalseLabel(chunk_);
- Handle<String> type_name = instr->type_literal();
- int left_block = instr->TrueDestination(chunk_);
- int right_block = instr->FalseDestination(chunk_);
- int next_block = GetNextEmittedBlock();
-
- Label::Distance true_distance = left_block == next_block ? Label::kNear
- : Label::kFar;
- Label::Distance false_distance = right_block == next_block ? Label::kNear
- : Label::kFar;
- Condition final_branch_condition = no_condition;
- Factory* factory = isolate()->factory();
- if (String::Equals(type_name, factory->number_string())) {
- __ JumpIfSmi(input, true_label, true_distance);
- __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
-
- final_branch_condition = equal;
-
- } else if (String::Equals(type_name, factory->string_string())) {
- __ JumpIfSmi(input, false_label, false_distance);
- __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
- final_branch_condition = below;
-
- } else if (String::Equals(type_name, factory->symbol_string())) {
- __ JumpIfSmi(input, false_label, false_distance);
- __ CmpObjectType(input, SYMBOL_TYPE, input);
- final_branch_condition = equal;
-
- } else if (String::Equals(type_name, factory->boolean_string())) {
- __ CompareRoot(input, Heap::kTrueValueRootIndex);
- __ j(equal, true_label, true_distance);
- __ CompareRoot(input, Heap::kFalseValueRootIndex);
- final_branch_condition = equal;
-
- } else if (String::Equals(type_name, factory->undefined_string())) {
- __ CompareRoot(input, Heap::kNullValueRootIndex);
- __ j(equal, false_label, false_distance);
- __ JumpIfSmi(input, false_label, false_distance);
- // Check for undetectable objects => true.
- __ movp(input, FieldOperand(input, HeapObject::kMapOffset));
- __ testb(FieldOperand(input, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- final_branch_condition = not_zero;
-
- } else if (String::Equals(type_name, factory->function_string())) {
- __ JumpIfSmi(input, false_label, false_distance);
- // Check for callable and not undetectable objects => true.
- __ movp(input, FieldOperand(input, HeapObject::kMapOffset));
- __ movzxbl(input, FieldOperand(input, Map::kBitFieldOffset));
- __ andb(input,
- Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
- __ cmpb(input, Immediate(1 << Map::kIsCallable));
- final_branch_condition = equal;
-
- } else if (String::Equals(type_name, factory->object_string())) {
- __ JumpIfSmi(input, false_label, false_distance);
- __ CompareRoot(input, Heap::kNullValueRootIndex);
- __ j(equal, true_label, true_distance);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CmpObjectType(input, FIRST_JS_RECEIVER_TYPE, input);
- __ j(below, false_label, false_distance);
- // Check for callable or undetectable objects => false.
- __ testb(FieldOperand(input, Map::kBitFieldOffset),
- Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
- final_branch_condition = zero;
-
- } else {
- __ jmp(false_label, false_distance);
- }
-
- return final_branch_condition;
-}
-
-
-void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
- if (info()->ShouldEnsureSpaceForLazyDeopt()) {
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- if (current_pc < last_lazy_deopt_pc_ + space_needed) {
- int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- __ Nop(padding_size);
- }
- }
- last_lazy_deopt_pc_ = masm()->pc_offset();
-}
-
-
-void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- last_lazy_deopt_pc_ = masm()->pc_offset();
- DCHECK(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- Deoptimizer::BailoutType type = instr->hydrogen()->type();
- // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
- // needed return address), even though the implementation of LAZY and EAGER is
- // now identical. When LAZY is eventually completely folded into EAGER, remove
- // the special case below.
- if (info()->IsStub() && type == Deoptimizer::EAGER) {
- type = Deoptimizer::LAZY;
- }
- DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type);
-}
-
-
-void LCodeGen::DoDummy(LDummy* instr) {
- // Nothing to see here, move on!
-}
-
-
-void LCodeGen::DoDummyUse(LDummyUse* instr) {
- // Nothing to see here, move on!
-}
-
-
-void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
- PushSafepointRegistersScope scope(this);
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
- RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
- DCHECK(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck final : public LDeferredCode {
- public:
- DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LStackCheck* instr_;
- };
-
- DCHECK(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- // There is no LLazyBailout instruction for stack-checks. We have to
- // prepare for lazy deoptimization explicitly here.
- if (instr->hydrogen()->is_function_entry()) {
- // Perform stack overflow check.
- Label done;
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &done, Label::kNear);
-
- DCHECK(instr->context()->IsRegister());
- DCHECK(ToRegister(instr->context()).is(rsi));
- CallCode(isolate()->builtins()->StackCheck(),
- RelocInfo::CODE_TARGET,
- instr);
- __ bind(&done);
- } else {
- DCHECK(instr->hydrogen()->is_backwards_branch());
- // Perform stack overflow check if this goto needs it before jumping.
- DeferredStackCheck* deferred_stack_check =
- new(zone()) DeferredStackCheck(this, instr);
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(below, deferred_stack_check->entry());
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- __ bind(instr->done_label());
- deferred_stack_check->SetExit(instr->done_label());
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- // Don't record a deoptimization index for the safepoint here.
- // This will be done explicitly when emitting call and the safepoint in
- // the deferred code.
- }
-}
-
-
-void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
- // This is a pseudo-instruction that ensures that the environment here is
- // properly registered for deoptimization and records the assembler's PC
- // offset.
- LEnvironment* environment = instr->environment();
-
- // If the environment were already registered, we would have no way of
- // backpatching it with the spill slot operands.
- DCHECK(!environment->HasBeenRegistered());
- RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
-
- GenerateOsrPrologue();
-}
-
-
-void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
- DCHECK(ToRegister(instr->context()).is(rsi));
-
- Label use_cache, call_runtime;
- __ CheckEnumCache(&call_runtime);
-
- __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
- __ jmp(&use_cache, Label::kNear);
-
- // Get the set of properties to enumerate.
- __ bind(&call_runtime);
- __ Push(rax);
- CallRuntime(Runtime::kForInEnumerate, instr);
- __ bind(&use_cache);
-}
-
-
-void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
- Register map = ToRegister(instr->map());
- Register result = ToRegister(instr->result());
- Label load_cache, done;
- __ EnumLength(result, map);
- __ Cmp(result, Smi::kZero);
- __ j(not_equal, &load_cache, Label::kNear);
- __ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex);
- __ jmp(&done, Label::kNear);
- __ bind(&load_cache);
- __ LoadInstanceDescriptors(map, result);
- __ movp(result,
- FieldOperand(result, DescriptorArray::kEnumCacheBridgeOffset));
- __ movp(result,
- FieldOperand(result, FixedArray::SizeFor(instr->idx())));
- __ bind(&done);
- Condition cc = masm()->CheckSmi(result);
- DeoptimizeIf(cc, instr, DeoptimizeReason::kNoCache);
-}
-
-
-void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
- Register object = ToRegister(instr->value());
- __ cmpp(ToRegister(instr->map()),
- FieldOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongMap);
-}
-
-
-void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
- Register object,
- Register index) {
- PushSafepointRegistersScope scope(this);
- __ Push(object);
- __ Push(index);
- __ xorp(rsi, rsi);
- __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(object, rax);
-}
-
-
-void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
- class DeferredLoadMutableDouble final : public LDeferredCode {
- public:
- DeferredLoadMutableDouble(LCodeGen* codegen,
- LLoadFieldByIndex* instr,
- Register object,
- Register index)
- : LDeferredCode(codegen),
- instr_(instr),
- object_(object),
- index_(index) {
- }
- void Generate() override {
- codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LLoadFieldByIndex* instr_;
- Register object_;
- Register index_;
- };
-
- Register object = ToRegister(instr->object());
- Register index = ToRegister(instr->index());
-
- DeferredLoadMutableDouble* deferred;
- deferred = new(zone()) DeferredLoadMutableDouble(this, instr, object, index);
-
- Label out_of_object, done;
- __ Move(kScratchRegister, Smi::FromInt(1));
- __ testp(index, kScratchRegister);
- __ j(not_zero, deferred->entry());
-
- __ sarp(index, Immediate(1));
-
- __ SmiToInteger32(index, index);
- __ cmpl(index, Immediate(0));
- __ j(less, &out_of_object, Label::kNear);
- __ movp(object, FieldOperand(object,
- index,
- times_pointer_size,
- JSObject::kHeaderSize));
- __ jmp(&done, Label::kNear);
-
- __ bind(&out_of_object);
- __ movp(object, FieldOperand(object, JSObject::kPropertiesOffset));
- __ negl(index);
- // Index is now equal to out of object property index plus 1.
- __ movp(object, FieldOperand(object,
- index,
- times_pointer_size,
- FixedArray::kHeaderSize - kPointerSize));
- __ bind(deferred->exit());
- __ bind(&done);
-}
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.h b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.h
deleted file mode 100644
index 7a8c84d7b2..0000000000
--- a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.h
+++ /dev/null
@@ -1,382 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_X64_LITHIUM_CODEGEN_X64_H_
-#define V8_CRANKSHAFT_X64_LITHIUM_CODEGEN_X64_H_
-
-
-#include "src/ast/scopes.h"
-#include "src/base/logging.h"
-#include "src/crankshaft/lithium-codegen.h"
-#include "src/crankshaft/x64/lithium-gap-resolver-x64.h"
-#include "src/crankshaft/x64/lithium-x64.h"
-#include "src/deoptimizer.h"
-#include "src/safepoint-table.h"
-#include "src/utils.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LDeferredCode;
-class SafepointGenerator;
-
-class LCodeGen: public LCodeGenBase {
- public:
- LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : LCodeGenBase(chunk, assembler, info),
- jump_table_(4, info->zone()),
- scope_(info->scope()),
- deferred_(8, info->zone()),
- frame_is_built_(false),
- safepoints_(info->zone()),
- resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple) {
- PopulateDeoptimizationLiteralsWithInlinedFunctions();
- }
-
- int LookupDestination(int block_id) const {
- return chunk()->LookupDestination(block_id);
- }
-
- bool IsNextEmittedBlock(int block_id) const {
- return LookupDestination(block_id) == GetNextEmittedBlock();
- }
-
- bool NeedsEagerFrame() const {
- return HasAllocatedStackSlots() || info()->is_non_deferred_calling() ||
- !info()->IsStub() || info()->requires_frame();
- }
- bool NeedsDeferredFrame() const {
- return !NeedsEagerFrame() && info()->is_deferred_calling();
- }
-
- // Support for converting LOperands to assembler types.
- Register ToRegister(LOperand* op) const;
- XMMRegister ToDoubleRegister(LOperand* op) const;
- bool IsInteger32Constant(LConstantOperand* op) const;
- bool IsExternalConstant(LConstantOperand* op) const;
- bool IsDehoistedKeyConstant(LConstantOperand* op) const;
- bool IsSmiConstant(LConstantOperand* op) const;
- int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
- int32_t ToInteger32(LConstantOperand* op) const;
- Smi* ToSmi(LConstantOperand* op) const;
- double ToDouble(LConstantOperand* op) const;
- ExternalReference ToExternalReference(LConstantOperand* op) const;
- Handle<Object> ToHandle(LConstantOperand* op) const;
- Operand ToOperand(LOperand* op) const;
-
- // Try to generate code for the entire chunk, but it may fail if the
- // chunk contains constructs we cannot handle. Returns true if the
- // code generation attempt succeeded.
- bool GenerateCode();
-
- // Finish the code by setting stack height, safepoint, and bailout
- // information on it.
- void FinishCode(Handle<Code> code);
-
- // Deferred code support.
- void DoDeferredNumberTagD(LNumberTagD* instr);
-
- enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
- void DoDeferredNumberTagIU(LInstruction* instr,
- LOperand* value,
- LOperand* temp1,
- LOperand* temp2,
- IntegerSignedness signedness);
-
- void DoDeferredTaggedToI(LTaggedToI* instr, Label* done);
- void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
- void DoDeferredStackCheck(LStackCheck* instr);
- void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
- void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
- void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredAllocate(LAllocate* instr);
- void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
- void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
- Register object,
- Register index);
-
-// Parallel move support.
- void DoParallelMove(LParallelMove* move);
- void DoGap(LGap* instr);
-
- // Emit frame translation commands for an environment.
- void WriteTranslation(LEnvironment* environment, Translation* translation);
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) void Do##type(L##type* node);
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- private:
- LPlatformChunk* chunk() const { return chunk_; }
- Scope* scope() const { return scope_; }
- HGraph* graph() const { return chunk()->graph(); }
-
- XMMRegister double_scratch0() const { return kScratchDoubleReg; }
-
- void EmitClassOfTest(Label* if_true, Label* if_false,
- Handle<String> class_name, Register input,
- Register temporary, Register scratch);
-
- bool HasAllocatedStackSlots() const {
- return chunk()->HasAllocatedStackSlots();
- }
- int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); }
- int GetTotalFrameSlotCount() const {
- return chunk()->GetTotalFrameSlotCount();
- }
-
- void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
-
-
- void SaveCallerDoubles();
- void RestoreCallerDoubles();
-
- // Code generation passes. Returns true if code generation should
- // continue.
- void GenerateBodyInstructionPre(LInstruction* instr) override;
- void GenerateBodyInstructionPost(LInstruction* instr) override;
- bool GeneratePrologue();
- bool GenerateDeferredCode();
- bool GenerateJumpTable();
- bool GenerateSafepointTable();
-
- // Generates the custom OSR entrypoint and sets the osr_pc_offset.
- void GenerateOsrPrologue();
-
- enum SafepointMode {
- RECORD_SIMPLE_SAFEPOINT,
- RECORD_SAFEPOINT_WITH_REGISTERS
- };
-
- void CallCodeGeneric(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode,
- int argc);
-
-
- void CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr);
-
- void CallRuntime(const Runtime::Function* function,
- int num_arguments,
- LInstruction* instr,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
-
- void CallRuntime(Runtime::FunctionId id,
- int num_arguments,
- LInstruction* instr) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, num_arguments, instr);
- }
-
- void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, function->nargs, instr);
- }
-
- void CallRuntimeFromDeferred(Runtime::FunctionId id,
- int argc,
- LInstruction* instr,
- LOperand* context);
-
- void LoadContextFromDeferred(LOperand* context);
-
- void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
- Register scratch2, Register scratch3);
-
- // Generate a direct call to a known function. Expects the function
- // to be in rdi.
- void CallKnownFunction(Handle<JSFunction> function,
- int formal_parameter_count, int arity,
- bool is_tail_call, LInstruction* instr);
-
- void RecordSafepointWithLazyDeopt(LInstruction* instr,
- SafepointMode safepoint_mode,
- int argc);
- void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
- Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition cc, LInstruction* instr,
- DeoptimizeReason deopt_reason,
- Deoptimizer::BailoutType bailout_type);
- void DeoptimizeIf(Condition cc, LInstruction* instr,
- DeoptimizeReason deopt_reason);
-
- bool DeoptEveryNTimes() {
- return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
- }
-
- void AddToTranslation(LEnvironment* environment,
- Translation* translation,
- LOperand* op,
- bool is_tagged,
- bool is_uint32,
- int* object_index_pointer,
- int* dematerialized_index_pointer);
-
- Register ToRegister(int index) const;
- XMMRegister ToDoubleRegister(int index) const;
- Operand BuildFastArrayOperand(
- LOperand* elements_pointer,
- LOperand* key,
- Representation key_representation,
- ElementsKind elements_kind,
- uint32_t base_offset);
-
- Operand BuildSeqStringOperand(Register string,
- LOperand* index,
- String::Encoding encoding);
-
- void EmitIntegerMathAbs(LMathAbs* instr);
- void EmitSmiMathAbs(LMathAbs* instr);
-
- // Support for recording safepoint information.
- void RecordSafepoint(LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- Safepoint::DeoptMode mode);
- void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
- void RecordSafepoint(Safepoint::DeoptMode mode);
- void RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode mode);
-
- static Condition TokenToCondition(Token::Value op, bool is_unsigned);
- void EmitGoto(int block);
-
- // EmitBranch expects to be the last instruction of a block.
- template<class InstrType>
- void EmitBranch(InstrType instr, Condition cc);
- template <class InstrType>
- void EmitTrueBranch(InstrType instr, Condition cc);
- template <class InstrType>
- void EmitFalseBranch(InstrType instr, Condition cc);
- void EmitNumberUntagD(LNumberUntagD* instr, Register input,
- XMMRegister result, NumberUntagDMode mode);
-
- // Emits optimized code for typeof x == "y". Modifies input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitTypeofIs(LTypeofIsAndBranch* instr, Register input);
-
- // Emits optimized code for %_IsString(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsString(Register input,
- Register temp1,
- Label* is_not_string,
- SmiCheck check_needed);
-
- // Emits code for pushing either a tagged constant, a (non-double)
- // register, or a stack slot operand.
- void EmitPushTaggedOperand(LOperand* operand);
-
- // Emits optimized code to deep-copy the contents of statically known
- // object graphs (e.g. object literal boilerplate).
- void EmitDeepCopy(Handle<JSObject> object,
- Register result,
- Register source,
- int* offset,
- AllocationSiteMode mode);
-
- void EnsureSpaceForLazyDeopt(int space_needed) override;
- void DoLoadKeyedExternalArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedArray(LLoadKeyed* instr);
- void DoStoreKeyedExternalArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedArray(LStoreKeyed* instr);
-
- template <class T>
- void EmitVectorLoadICRegisters(T* instr);
-
-#ifdef _MSC_VER
- // On windows, you may not access the stack more than one page below
- // the most recently mapped page. To make the allocated area randomly
- // accessible, we write an arbitrary value to each page in range
- // rsp + offset - page_size .. rsp in turn.
- void MakeSureStackPagesMapped(int offset);
-#endif
-
- ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
- Scope* const scope_;
- ZoneList<LDeferredCode*> deferred_;
- bool frame_is_built_;
-
- // Builder that keeps track of safepoints in the code. The table
- // itself is emitted at the end of the generated code.
- SafepointTableBuilder safepoints_;
-
- // Compiler from a set of parallel moves to a sequential list of moves.
- LGapResolver resolver_;
-
- Safepoint::Kind expected_safepoint_kind_;
-
- class PushSafepointRegistersScope final BASE_EMBEDDED {
- public:
- explicit PushSafepointRegistersScope(LCodeGen* codegen)
- : codegen_(codegen) {
- DCHECK(codegen_->info()->is_calling());
- DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
- codegen_->masm_->PushSafepointRegisters();
- codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
- }
-
- ~PushSafepointRegistersScope() {
- DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
- codegen_->masm_->PopSafepointRegisters();
- codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
- }
-
- private:
- LCodeGen* codegen_;
- };
-
- friend class LDeferredCode;
- friend class LEnvironment;
- friend class SafepointGenerator;
- DISALLOW_COPY_AND_ASSIGN(LCodeGen);
-};
-
-
-class LDeferredCode: public ZoneObject {
- public:
- explicit LDeferredCode(LCodeGen* codegen)
- : codegen_(codegen),
- external_exit_(NULL),
- instruction_index_(codegen->current_instruction_) {
- codegen->AddDeferredCode(this);
- }
-
- virtual ~LDeferredCode() {}
- virtual void Generate() = 0;
- virtual LInstruction* instr() = 0;
-
- void SetExit(Label* exit) { external_exit_ = exit; }
- Label* entry() { return &entry_; }
- Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
- Label* done() { return codegen_->NeedsDeferredFrame() ? &done_ : exit(); }
- int instruction_index() const { return instruction_index_; }
-
- protected:
- LCodeGen* codegen() const { return codegen_; }
- MacroAssembler* masm() const { return codegen_->masm(); }
-
- private:
- LCodeGen* codegen_;
- Label entry_;
- Label exit_;
- Label done_;
- Label* external_exit_;
- int instruction_index_;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_X64_LITHIUM_CODEGEN_X64_H_
diff --git a/deps/v8/src/crankshaft/x64/lithium-gap-resolver-x64.cc b/deps/v8/src/crankshaft/x64/lithium-gap-resolver-x64.cc
deleted file mode 100644
index 38b7d4525a..0000000000
--- a/deps/v8/src/crankshaft/x64/lithium-gap-resolver-x64.cc
+++ /dev/null
@@ -1,322 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X64
-
-#include "src/crankshaft/x64/lithium-gap-resolver-x64.h"
-
-#include "src/crankshaft/x64/lithium-codegen-x64.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-LGapResolver::LGapResolver(LCodeGen* owner)
- : cgen_(owner), moves_(32, owner->zone()) {}
-
-
-void LGapResolver::Resolve(LParallelMove* parallel_move) {
- DCHECK(moves_.is_empty());
- // Build up a worklist of moves.
- BuildInitialMoveList(parallel_move);
-
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands move = moves_[i];
- // Skip constants to perform them last. They don't block other moves
- // and skipping such moves with register destinations keeps those
- // registers free for the whole algorithm.
- if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
- PerformMove(i);
- }
- }
-
- // Perform the moves with constant sources.
- for (int i = 0; i < moves_.length(); ++i) {
- if (!moves_[i].IsEliminated()) {
- DCHECK(moves_[i].source()->IsConstantOperand());
- EmitMove(i);
- }
- }
-
- moves_.Rewind(0);
-}
-
-
-void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
- // Perform a linear sweep of the moves to add them to the initial list of
- // moves to perform, ignoring any move that is redundant (the source is
- // the same as the destination, the destination is ignored and
- // unallocated, or the move was already eliminated).
- const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
- for (int i = 0; i < moves->length(); ++i) {
- LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
- }
- Verify();
-}
-
-
-void LGapResolver::PerformMove(int index) {
- // Each call to this function performs a move and deletes it from the move
- // graph. We first recursively perform any move blocking this one. We
- // mark a move as "pending" on entry to PerformMove in order to detect
- // cycles in the move graph. We use operand swaps to resolve cycles,
- // which means that a call to PerformMove could change any source operand
- // in the move graph.
-
- DCHECK(!moves_[index].IsPending());
- DCHECK(!moves_[index].IsRedundant());
-
- // Clear this move's destination to indicate a pending move. The actual
- // destination is saved in a stack-allocated local. Recursion may allow
- // multiple moves to be pending.
- DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated.
- LOperand* destination = moves_[index].destination();
- moves_[index].set_destination(NULL);
-
- // Perform a depth-first traversal of the move graph to resolve
- // dependencies. Any unperformed, unpending move with a source the same
- // as this one's destination blocks this one so recursively perform all
- // such moves.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(destination) && !other_move.IsPending()) {
- // Though PerformMove can change any source operand in the move graph,
- // this call cannot create a blocking move via a swap (this loop does
- // not miss any). Assume there is a non-blocking move with source A
- // and this move is blocked on source B and there is a swap of A and
- // B. Then A and B must be involved in the same cycle (or they would
- // not be swapped). Since this move's destination is B and there is
- // only a single incoming edge to an operand, this move must also be
- // involved in the same cycle. In that case, the blocking move will
- // be created but will be "pending" when we return from PerformMove.
- PerformMove(i);
- }
- }
-
- // We are about to resolve this move and don't need it marked as
- // pending, so restore its destination.
- moves_[index].set_destination(destination);
-
- // This move's source may have changed due to swaps to resolve cycles and
- // so it may now be the last move in the cycle. If so remove it.
- if (moves_[index].source()->Equals(destination)) {
- moves_[index].Eliminate();
- return;
- }
-
- // The move may be blocked on a (at most one) pending move, in which case
- // we have a cycle. Search for such a blocking move and perform a swap to
- // resolve it.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(destination)) {
- DCHECK(other_move.IsPending());
- EmitSwap(index);
- return;
- }
- }
-
- // This move is not blocked.
- EmitMove(index);
-}
-
-
-void LGapResolver::Verify() {
-#ifdef ENABLE_SLOW_DCHECKS
- // No operand should be the destination for more than one move.
- for (int i = 0; i < moves_.length(); ++i) {
- LOperand* destination = moves_[i].destination();
- for (int j = i + 1; j < moves_.length(); ++j) {
- SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
- }
- }
-#endif
-}
-
-
-#define __ ACCESS_MASM(cgen_->masm())
-
-
-void LGapResolver::EmitMove(int index) {
- LOperand* source = moves_[index].source();
- LOperand* destination = moves_[index].destination();
-
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister()) {
- Register src = cgen_->ToRegister(source);
- if (destination->IsRegister()) {
- Register dst = cgen_->ToRegister(destination);
- __ movp(dst, src);
- } else {
- DCHECK(destination->IsStackSlot());
- Operand dst = cgen_->ToOperand(destination);
- __ movp(dst, src);
- }
-
- } else if (source->IsStackSlot()) {
- Operand src = cgen_->ToOperand(source);
- if (destination->IsRegister()) {
- Register dst = cgen_->ToRegister(destination);
- __ movp(dst, src);
- } else {
- DCHECK(destination->IsStackSlot());
- Operand dst = cgen_->ToOperand(destination);
- __ movp(kScratchRegister, src);
- __ movp(dst, kScratchRegister);
- }
-
- } else if (source->IsConstantOperand()) {
- LConstantOperand* constant_source = LConstantOperand::cast(source);
- if (destination->IsRegister()) {
- Register dst = cgen_->ToRegister(destination);
- if (cgen_->IsSmiConstant(constant_source)) {
- __ Move(dst, cgen_->ToSmi(constant_source));
- } else if (cgen_->IsInteger32Constant(constant_source)) {
- int32_t constant = cgen_->ToInteger32(constant_source);
- // Do sign extension only for constant used as de-hoisted array key.
- // Others only need zero extension, which saves 2 bytes.
- if (cgen_->IsDehoistedKeyConstant(constant_source)) {
- __ Set(dst, constant);
- } else {
- __ Set(dst, static_cast<uint32_t>(constant));
- }
- } else {
- __ Move(dst, cgen_->ToHandle(constant_source));
- }
- } else if (destination->IsDoubleRegister()) {
- double v = cgen_->ToDouble(constant_source);
- uint64_t int_val = bit_cast<uint64_t, double>(v);
- XMMRegister dst = cgen_->ToDoubleRegister(destination);
- if (int_val == 0) {
- __ Xorpd(dst, dst);
- } else {
- __ Set(kScratchRegister, int_val);
- __ Movq(dst, kScratchRegister);
- }
- } else {
- DCHECK(destination->IsStackSlot());
- Operand dst = cgen_->ToOperand(destination);
- if (cgen_->IsSmiConstant(constant_source)) {
- __ Move(dst, cgen_->ToSmi(constant_source));
- } else if (cgen_->IsInteger32Constant(constant_source)) {
- // Do sign extension to 64 bits when stored into stack slot.
- __ movp(dst, Immediate(cgen_->ToInteger32(constant_source)));
- } else {
- __ Move(kScratchRegister, cgen_->ToHandle(constant_source));
- __ movp(dst, kScratchRegister);
- }
- }
-
- } else if (source->IsDoubleRegister()) {
- XMMRegister src = cgen_->ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
- __ Movapd(cgen_->ToDoubleRegister(destination), src);
- } else {
- DCHECK(destination->IsDoubleStackSlot());
- __ Movsd(cgen_->ToOperand(destination), src);
- }
- } else if (source->IsDoubleStackSlot()) {
- Operand src = cgen_->ToOperand(source);
- if (destination->IsDoubleRegister()) {
- __ Movsd(cgen_->ToDoubleRegister(destination), src);
- } else {
- DCHECK(destination->IsDoubleStackSlot());
- __ Movsd(kScratchDoubleReg, src);
- __ Movsd(cgen_->ToOperand(destination), kScratchDoubleReg);
- }
- } else {
- UNREACHABLE();
- }
-
- moves_[index].Eliminate();
-}
-
-
-void LGapResolver::EmitSwap(int index) {
- LOperand* source = moves_[index].source();
- LOperand* destination = moves_[index].destination();
-
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister() && destination->IsRegister()) {
- // Swap two general-purpose registers.
- Register src = cgen_->ToRegister(source);
- Register dst = cgen_->ToRegister(destination);
- __ movp(kScratchRegister, src);
- __ movp(src, dst);
- __ movp(dst, kScratchRegister);
-
- } else if ((source->IsRegister() && destination->IsStackSlot()) ||
- (source->IsStackSlot() && destination->IsRegister())) {
- // Swap a general-purpose register and a stack slot.
- Register reg =
- cgen_->ToRegister(source->IsRegister() ? source : destination);
- Operand mem =
- cgen_->ToOperand(source->IsRegister() ? destination : source);
- __ movp(kScratchRegister, mem);
- __ movp(mem, reg);
- __ movp(reg, kScratchRegister);
-
- } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
- (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot())) {
- // Swap two stack slots or two double stack slots.
- Operand src = cgen_->ToOperand(source);
- Operand dst = cgen_->ToOperand(destination);
- __ Movsd(kScratchDoubleReg, src);
- __ movp(kScratchRegister, dst);
- __ Movsd(dst, kScratchDoubleReg);
- __ movp(src, kScratchRegister);
-
- } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
- // Swap two double registers.
- XMMRegister source_reg = cgen_->ToDoubleRegister(source);
- XMMRegister destination_reg = cgen_->ToDoubleRegister(destination);
- __ Movapd(kScratchDoubleReg, source_reg);
- __ Movapd(source_reg, destination_reg);
- __ Movapd(destination_reg, kScratchDoubleReg);
-
- } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
- // Swap a double register and a double stack slot.
- DCHECK((source->IsDoubleRegister() && destination->IsDoubleStackSlot()) ||
- (source->IsDoubleStackSlot() && destination->IsDoubleRegister()));
- XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
- ? source
- : destination);
- LOperand* other = source->IsDoubleRegister() ? destination : source;
- DCHECK(other->IsDoubleStackSlot());
- Operand other_operand = cgen_->ToOperand(other);
- __ Movapd(kScratchDoubleReg, reg);
- __ Movsd(reg, other_operand);
- __ Movsd(other_operand, kScratchDoubleReg);
-
- } else {
- // No other combinations are possible.
- UNREACHABLE();
- }
-
- // The swap of source and destination has executed a move from source to
- // destination.
- moves_[index].Eliminate();
-
- // Any unperformed (including pending) move with a source of either
- // this move's source or destination needs to have their source
- // changed to reflect the state of affairs after the swap.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(source)) {
- moves_[i].set_source(destination);
- } else if (other_move.Blocks(destination)) {
- moves_[i].set_source(source);
- }
- }
-}
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/crankshaft/x64/lithium-gap-resolver-x64.h b/deps/v8/src/crankshaft/x64/lithium-gap-resolver-x64.h
deleted file mode 100644
index 641f0ee69f..0000000000
--- a/deps/v8/src/crankshaft/x64/lithium-gap-resolver-x64.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_X64_LITHIUM_GAP_RESOLVER_X64_H_
-#define V8_CRANKSHAFT_X64_LITHIUM_GAP_RESOLVER_X64_H_
-
-#include "src/crankshaft/lithium.h"
-
-namespace v8 {
-namespace internal {
-
-class LCodeGen;
-class LGapResolver;
-
-class LGapResolver final BASE_EMBEDDED {
- public:
- explicit LGapResolver(LCodeGen* owner);
-
- // Resolve a set of parallel moves, emitting assembler instructions.
- void Resolve(LParallelMove* parallel_move);
-
- private:
- // Build the initial list of moves.
- void BuildInitialMoveList(LParallelMove* parallel_move);
-
- // Perform the move at the moves_ index in question (possibly requiring
- // other moves to satisfy dependencies).
- void PerformMove(int index);
-
- // Emit a move and remove it from the move graph.
- void EmitMove(int index);
-
- // Execute a move by emitting a swap of two operands. The move from
- // source to destination is removed from the move graph.
- void EmitSwap(int index);
-
- // Verify the move list before performing moves.
- void Verify();
-
- LCodeGen* cgen_;
-
- // List of moves not yet resolved.
- ZoneList<LMoveOperands> moves_;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_X64_LITHIUM_GAP_RESOLVER_X64_H_
diff --git a/deps/v8/src/crankshaft/x64/lithium-x64.cc b/deps/v8/src/crankshaft/x64/lithium-x64.cc
deleted file mode 100644
index a9aa6ad03b..0000000000
--- a/deps/v8/src/crankshaft/x64/lithium-x64.cc
+++ /dev/null
@@ -1,2470 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/x64/lithium-x64.h"
-
-#include <sstream>
-
-#if V8_TARGET_ARCH_X64
-
-#include "src/crankshaft/hydrogen-osr.h"
-#include "src/crankshaft/lithium-inl.h"
-#include "src/crankshaft/x64/lithium-codegen-x64.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define DEFINE_COMPILE(type) \
- void L##type::CompileToNative(LCodeGen* generator) { \
- generator->Do##type(this); \
- }
-LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
-#undef DEFINE_COMPILE
-
-
-#ifdef DEBUG
-void LInstruction::VerifyCall() {
- // Call instructions can use only fixed registers as temporaries and
- // outputs because all registers are blocked by the calling convention.
- // Inputs operands must use a fixed register or use-at-start policy or
- // a non-register policy.
- DCHECK(Output() == NULL ||
- LUnallocated::cast(Output())->HasFixedPolicy() ||
- !LUnallocated::cast(Output())->HasRegisterPolicy());
- for (UseIterator it(this); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- DCHECK(operand->HasFixedPolicy() ||
- operand->IsUsedAtStart());
- }
- for (TempIterator it(this); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
- }
-}
-#endif
-
-
-void LInstruction::PrintTo(StringStream* stream) {
- stream->Add("%s ", this->Mnemonic());
-
- PrintOutputOperandTo(stream);
-
- PrintDataTo(stream);
-
- if (HasEnvironment()) {
- stream->Add(" ");
- environment()->PrintTo(stream);
- }
-
- if (HasPointerMap()) {
- stream->Add(" ");
- pointer_map()->PrintTo(stream);
- }
-}
-
-
-void LInstruction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- for (int i = 0; i < InputCount(); i++) {
- if (i > 0) stream->Add(" ");
- if (InputAt(i) == NULL) {
- stream->Add("NULL");
- } else {
- InputAt(i)->PrintTo(stream);
- }
- }
-}
-
-
-void LInstruction::PrintOutputOperandTo(StringStream* stream) {
- if (HasResult()) result()->PrintTo(stream);
-}
-
-
-void LLabel::PrintDataTo(StringStream* stream) {
- LGap::PrintDataTo(stream);
- LLabel* rep = replacement();
- if (rep != NULL) {
- stream->Add(" Dead block replaced with B%d", rep->block_id());
- }
-}
-
-
-bool LGap::IsRedundant() const {
- for (int i = 0; i < 4; i++) {
- if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
- return false;
- }
- }
-
- return true;
-}
-
-
-void LGap::PrintDataTo(StringStream* stream) {
- for (int i = 0; i < 4; i++) {
- stream->Add("(");
- if (parallel_moves_[i] != NULL) {
- parallel_moves_[i]->PrintDataTo(stream);
- }
- stream->Add(") ");
- }
-}
-
-
-const char* LArithmeticD::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-d";
- case Token::SUB: return "sub-d";
- case Token::MUL: return "mul-d";
- case Token::DIV: return "div-d";
- case Token::MOD: return "mod-d";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-const char* LArithmeticT::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-t";
- case Token::SUB: return "sub-t";
- case Token::MUL: return "mul-t";
- case Token::MOD: return "mod-t";
- case Token::DIV: return "div-t";
- case Token::BIT_AND: return "bit-and-t";
- case Token::BIT_OR: return "bit-or-t";
- case Token::BIT_XOR: return "bit-xor-t";
- case Token::ROR: return "ror-t";
- case Token::SHL: return "sal-t";
- case Token::SAR: return "sar-t";
- case Token::SHR: return "shr-t";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-bool LGoto::HasInterestingComment(LCodeGen* gen) const {
- return !gen->IsNextEmittedBlock(block_id());
-}
-
-
-template<int R>
-bool LTemplateResultInstruction<R>::MustSignExtendResult(
- LPlatformChunk* chunk) const {
- HValue* hvalue = this->hydrogen_value();
- return hvalue != NULL &&
- hvalue->representation().IsInteger32() &&
- chunk->GetDehoistedKeyIds()->Contains(hvalue->id());
-}
-
-
-void LGoto::PrintDataTo(StringStream* stream) {
- stream->Add("B%d", block_id());
-}
-
-
-void LBranch::PrintDataTo(StringStream* stream) {
- stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
- value()->PrintTo(stream);
-}
-
-
-void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- left()->PrintTo(stream);
- stream->Add(" %s ", Token::String(op()));
- right()->PrintTo(stream);
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_string(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_smi(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_undetectable(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if string_compare(");
- left()->PrintTo(stream);
- right()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_instance_type(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if class_of_test(");
- value()->PrintTo(stream);
- stream->Add(", \"%o\") then B%d else B%d", *hydrogen()->class_name(),
- true_block_id(), false_block_id());
-}
-
-void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if typeof ");
- value()->PrintTo(stream);
- stream->Add(" == \"%s\" then B%d else B%d",
- hydrogen()->type_literal()->ToCString().get(),
- true_block_id(), false_block_id());
-}
-
-
-void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
- stream->Add(" = ");
- function()->PrintTo(stream);
- stream->Add(".code_entry = ");
- code_object()->PrintTo(stream);
-}
-
-
-void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
- stream->Add(" = ");
- base_object()->PrintTo(stream);
- stream->Add(" + ");
- offset()->PrintTo(stream);
-}
-
-
-void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
- for (int i = 0; i < InputCount(); i++) {
- InputAt(i)->PrintTo(stream);
- stream->Add(" ");
- }
- stream->Add("#%d / ", arity());
-}
-
-
-void LLoadContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add("[%d]", slot_index());
-}
-
-
-void LStoreContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add("[%d] <- ", slot_index());
- value()->PrintTo(stream);
-}
-
-
-void LInvokeFunction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- function()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
-void LCallNewArray::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
- ElementsKind kind = hydrogen()->elements_kind();
- stream->Add(" (%s) ", ElementsKindToString(kind));
-}
-
-
-void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
- arguments()->PrintTo(stream);
-
- stream->Add(" length ");
- length()->PrintTo(stream);
-
- stream->Add(" index ");
- index()->PrintTo(stream);
-}
-
-
-int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
- if (kind == DOUBLE_REGISTERS && kDoubleSize == 2 * kPointerSize) {
- // Skip a slot if for a double-width slot for x32 port.
- current_frame_slots_++;
- // The spill slot's address is at rbp - (index + 1) * kPointerSize -
- // StandardFrameConstants::kFixedFrameSizeFromFp. kFixedFrameSizeFromFp is
- // 2 * kPointerSize, if rbp is aligned at 8-byte boundary, the below "|= 1"
- // will make sure the spilled doubles are aligned at 8-byte boundary.
- // TODO(haitao): make sure rbp is aligned at 8-byte boundary for x32 port.
- current_frame_slots_ |= 1;
- }
- return current_frame_slots_++;
-}
-
-
-LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
- // All stack slots are Double stack slots on x64.
- // Alternatively, at some point, start using half-size
- // stack slots for int32 values.
- int index = GetNextSpillIndex(kind);
- if (kind == DOUBLE_REGISTERS) {
- return LDoubleStackSlot::Create(index, zone());
- } else {
- DCHECK(kind == GENERAL_REGISTERS);
- return LStackSlot::Create(index, zone());
- }
-}
-
-
-void LStoreNamedField::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- std::ostringstream os;
- os << hydrogen()->access() << " <- ";
- stream->Add(os.str().c_str());
- value()->PrintTo(stream);
-}
-
-
-void LLoadKeyed::PrintDataTo(StringStream* stream) {
- elements()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d]", base_offset());
- } else {
- stream->Add("]");
- }
-}
-
-
-void LStoreKeyed::PrintDataTo(StringStream* stream) {
- elements()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d] <-", base_offset());
- } else {
- stream->Add("] <- ");
- }
-
- if (value() == NULL) {
- DCHECK(hydrogen()->IsConstantHoleStore() &&
- hydrogen()->value()->representation().IsDouble());
- stream->Add("<the hole(nan)>");
- } else {
- value()->PrintTo(stream);
- }
-}
-
-
-void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(" %p -> %p", *original_map(), *transitioned_map());
-}
-
-
-LPlatformChunk* LChunkBuilder::Build() {
- DCHECK(is_unused());
- chunk_ = new(zone()) LPlatformChunk(info(), graph());
- LPhase phase("L_Building chunk", chunk_);
- status_ = BUILDING;
-
- // If compiling for OSR, reserve space for the unoptimized frame,
- // which will be subsumed into this frame.
- if (graph()->has_osr()) {
- for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
- chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
- }
- }
-
- const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
- for (int i = 0; i < blocks->length(); i++) {
- HBasicBlock* next = NULL;
- if (i < blocks->length() - 1) next = blocks->at(i + 1);
- DoBasicBlock(blocks->at(i), next);
- if (is_aborted()) return NULL;
- }
- status_ = DONE;
- return chunk_;
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) {
- return new (zone())
- LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
-}
-
-
-LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
- return Use(value, ToUnallocated(fixed_register));
-}
-
-
-LOperand* LChunkBuilder::UseFixedDouble(HValue* value, XMMRegister reg) {
- return Use(value, ToUnallocated(reg));
-}
-
-
-LOperand* LChunkBuilder::UseRegister(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
- return Use(value,
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
- LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::UseTempRegisterOrConstant(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseTempRegister(value);
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
-}
-
-
-LOperand* LChunkBuilder::UseAtStart(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
- LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value);
-}
-
-
-LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegister(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegisterAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseConstant(HValue* value) {
- return chunk_->DefineConstantOperand(HConstant::cast(value));
-}
-
-
-LOperand* LChunkBuilder::UseAny(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
- if (value->EmitAtUses()) {
- HInstruction* instr = HInstruction::cast(value);
- VisitInstruction(instr);
- }
- operand->set_virtual_register(value->id());
- return operand;
-}
-
-
-LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
- LUnallocated* result) {
- result->set_virtual_register(current_instruction_->id());
- instr->set_result(result);
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::DefineAsRegister(
- LTemplateResultInstruction<1>* instr) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-LInstruction* LChunkBuilder::DefineAsSpilled(
- LTemplateResultInstruction<1>* instr,
- int index) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
-}
-
-
-LInstruction* LChunkBuilder::DefineSameAsFirst(
- LTemplateResultInstruction<1>* instr) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
-}
-
-
-LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr,
- Register reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-LInstruction* LChunkBuilder::DefineFixedDouble(
- LTemplateResultInstruction<1>* instr,
- XMMRegister reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
- HEnvironment* hydrogen_env = current_block_->last_environment();
- return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
-}
-
-
-LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize) {
- info()->MarkAsNonDeferredCalling();
-
-#ifdef DEBUG
- instr->VerifyCall();
-#endif
- instr->MarkAsCall();
- instr = AssignPointerMap(instr);
-
- // If instruction does not have side-effects lazy deoptimization
- // after the call will try to deoptimize to the point before the call.
- // Thus we still need to attach environment to this call even if
- // call sequence can not deoptimize eagerly.
- bool needs_environment =
- (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
- !hinstr->HasObservableSideEffects();
- if (needs_environment && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- // We can't really figure out if the environment is needed or not.
- instr->environment()->set_has_been_used();
- }
-
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
- DCHECK(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(zone()));
- return instr;
-}
-
-
-LUnallocated* LChunkBuilder::TempRegister() {
- LUnallocated* operand =
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
- int vreg = allocator_->GetVirtualRegister();
- if (!allocator_->AllocationOk()) {
- Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
- vreg = 0;
- }
- operand->set_virtual_register(vreg);
- return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(Register reg) {
- LUnallocated* operand = ToUnallocated(reg);
- DCHECK(operand->HasFixedPolicy());
- return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(XMMRegister reg) {
- LUnallocated* operand = ToUnallocated(reg);
- DCHECK(operand->HasFixedPolicy());
- return operand;
-}
-
-
-LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
- return new(zone()) LLabel(instr->block());
-}
-
-
-LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
- return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
- return AssignEnvironment(new(zone()) LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoShift(Token::Value op,
- HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->left());
-
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- int constant_value = 0;
- bool does_deopt = false;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
- if (SmiValuesAre31Bits() && instr->representation().IsSmi() &&
- constant_value > 0) {
- // Left shift can deoptimize if we shift by > 0 and the result
- // cannot be truncated to smi.
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
- }
- } else {
- right = UseFixed(right_value, rcx);
- }
-
- // Shift operations can only deoptimize if we do a logical shift by 0 and
- // the result cannot be truncated to int32.
- if (op == Token::SHR && constant_value == 0) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
- }
-
- LInstruction* result =
- DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
- return does_deopt ? AssignEnvironment(result) : result;
- } else {
- return DoArithmeticT(op, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->left()->representation().IsDouble());
- DCHECK(instr->right()->representation().IsDouble());
- if (op == Token::MOD) {
- LOperand* left = UseFixedDouble(instr->BetterLeftOperand(), xmm0);
- LOperand* right = UseFixedDouble(instr->BetterRightOperand(), xmm1);
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return MarkAsCall(DefineFixedDouble(result, xmm0), instr);
- } else {
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return CpuFeatures::IsSupported(AVX) ? DefineAsRegister(result)
- : DefineSameAsFirst(result);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HBinaryOperation* instr) {
- HValue* left = instr->left();
- HValue* right = instr->right();
- DCHECK(left->representation().IsTagged());
- DCHECK(right->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* left_operand = UseFixed(left, rdx);
- LOperand* right_operand = UseFixed(right, rax);
- LArithmeticT* result =
- new(zone()) LArithmeticT(op, context, left_operand, right_operand);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
- DCHECK(is_building());
- current_block_ = block;
- next_block_ = next_block;
- if (block->IsStartBlock()) {
- block->UpdateEnvironment(graph_->start_environment());
- argument_count_ = 0;
- } else if (block->predecessors()->length() == 1) {
- // We have a single predecessor => copy environment and outgoing
- // argument count from the predecessor.
- DCHECK(block->phis()->length() == 0);
- HBasicBlock* pred = block->predecessors()->at(0);
- HEnvironment* last_environment = pred->last_environment();
- DCHECK(last_environment != NULL);
- // Only copy the environment, if it is later used again.
- if (pred->end()->SecondSuccessor() == NULL) {
- DCHECK(pred->end()->FirstSuccessor() == block);
- } else {
- if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
- pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
- last_environment = last_environment->Copy();
- }
- }
- block->UpdateEnvironment(last_environment);
- DCHECK(pred->argument_count() >= 0);
- argument_count_ = pred->argument_count();
- } else {
- // We are at a state join => process phis.
- HBasicBlock* pred = block->predecessors()->at(0);
- // No need to copy the environment, it cannot be used later.
- HEnvironment* last_environment = pred->last_environment();
- for (int i = 0; i < block->phis()->length(); ++i) {
- HPhi* phi = block->phis()->at(i);
- if (phi->HasMergedIndex()) {
- last_environment->SetValueAt(phi->merged_index(), phi);
- }
- }
- for (int i = 0; i < block->deleted_phis()->length(); ++i) {
- if (block->deleted_phis()->at(i) < last_environment->length()) {
- last_environment->SetValueAt(block->deleted_phis()->at(i),
- graph_->GetConstantUndefined());
- }
- }
- block->UpdateEnvironment(last_environment);
- // Pick up the outgoing argument count of one of the predecessors.
- argument_count_ = pred->argument_count();
- }
- HInstruction* current = block->first();
- int start = chunk_->instructions()->length();
- while (current != NULL && !is_aborted()) {
- // Code for constants in registers is generated lazily.
- if (!current->EmitAtUses()) {
- VisitInstruction(current);
- }
- current = current->next();
- }
- int end = chunk_->instructions()->length() - 1;
- if (end >= start) {
- block->set_first_instruction_index(start);
- block->set_last_instruction_index(end);
- }
- block->set_argument_count(argument_count_);
- next_block_ = NULL;
- current_block_ = NULL;
-}
-
-
-void LChunkBuilder::VisitInstruction(HInstruction* current) {
- HInstruction* old_current = current_instruction_;
- current_instruction_ = current;
-
- LInstruction* instr = NULL;
- if (current->CanReplaceWithDummyUses()) {
- if (current->OperandCount() == 0) {
- instr = DefineAsRegister(new(zone()) LDummy());
- } else {
- DCHECK(!current->OperandAt(0)->IsControlInstruction());
- instr = DefineAsRegister(new(zone())
- LDummyUse(UseAny(current->OperandAt(0))));
- }
- for (int i = 1; i < current->OperandCount(); ++i) {
- if (current->OperandAt(i)->IsControlInstruction()) continue;
- LInstruction* dummy =
- new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
- dummy->set_hydrogen_value(current);
- chunk_->AddInstruction(dummy, current_block_);
- }
- } else {
- HBasicBlock* successor;
- if (current->IsControlInstruction() &&
- HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
- successor != NULL) {
- instr = new(zone()) LGoto(successor);
- } else {
- instr = current->CompileToLithium(this);
- }
- }
-
- argument_count_ += current->argument_delta();
- DCHECK(argument_count_ >= 0);
-
- if (instr != NULL) {
- AddInstruction(instr, current);
- }
-
- current_instruction_ = old_current;
-}
-
-
-void LChunkBuilder::AddInstruction(LInstruction* instr,
- HInstruction* hydrogen_val) {
- // Associate the hydrogen instruction first, since we may need it for
- // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
- instr->set_hydrogen_value(hydrogen_val);
-
-#if DEBUG
- // Make sure that the lithium instruction has either no fixed register
- // constraints in temps or the result OR no uses that are only used at
- // start. If this invariant doesn't hold, the register allocator can decide
- // to insert a split of a range immediately before the instruction due to an
- // already allocated register needing to be used for the instruction's fixed
- // register constraint. In this case, The register allocator won't see an
- // interference between the split child and the use-at-start (it would if
- // the it was just a plain use), so it is free to move the split child into
- // the same register that is used for the use-at-start.
- // See https://code.google.com/p/chromium/issues/detail?id=201590
- if (!(instr->ClobbersRegisters() &&
- instr->ClobbersDoubleRegisters(isolate()))) {
- int fixed = 0;
- int used_at_start = 0;
- for (UseIterator it(instr); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- if (operand->IsUsedAtStart()) ++used_at_start;
- }
- if (instr->Output() != NULL) {
- if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
- }
- for (TempIterator it(instr); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- if (operand->HasFixedPolicy()) ++fixed;
- }
- DCHECK(fixed == 0 || used_at_start == 0);
- }
-#endif
-
- if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
- instr = AssignPointerMap(instr);
- }
- if (FLAG_stress_environments && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
- chunk_->AddInstruction(instr, current_block_);
-
- CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
-}
-
-
-LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- return new(zone()) LGoto(instr->FirstSuccessor());
-}
-
-
-LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
- LInstruction* result = new (zone()) LPrologue();
- if (info_->scope()->NeedsContext()) {
- result = MarkAsCall(result, instr);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
- return new(zone()) LDebugBreak();
-}
-
-
-LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* value = instr->value();
- Representation r = value->representation();
- HType type = value->type();
- ToBooleanHints expected = instr->expected_input_types();
- if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
-
- bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
- type.IsJSArray() || type.IsHeapNumber() || type.IsString();
- LInstruction* branch = new(zone()) LBranch(UseRegister(value));
- if (!easy_case && ((!(expected & ToBooleanHint::kSmallInteger) &&
- (expected & ToBooleanHint::kNeedsMap)) ||
- expected != ToBooleanHint::kAny)) {
- branch = AssignEnvironment(branch);
- }
- return branch;
-}
-
-
-LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LCmpMapAndBranch(value);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
- info()->MarkAsRequiresFrame();
- return DefineAsRegister(new(zone()) LArgumentsLength(Use(length->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
- info()->MarkAsRequiresFrame();
- return DefineAsRegister(new(zone()) LArgumentsElements);
-}
-
-
-LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
- HHasInPrototypeChainAndBranch* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* prototype = UseRegister(instr->prototype());
- LHasInPrototypeChainAndBranch* result =
- new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
- LOperand* receiver = UseRegister(instr->receiver());
- LOperand* function = UseRegisterAtStart(instr->function());
- LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
- return AssignEnvironment(DefineSameAsFirst(result));
-}
-
-
-LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
- LOperand* function = UseFixed(instr->function(), rdi);
- LOperand* receiver = UseFixed(instr->receiver(), rax);
- LOperand* length = UseFixed(instr->length(), rbx);
- LOperand* elements = UseFixed(instr->elements(), rcx);
- LApplyArguments* result = new(zone()) LApplyArguments(function,
- receiver,
- length,
- elements);
- return MarkAsCall(DefineFixed(result, rax), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
- int argc = instr->OperandCount();
- for (int i = 0; i < argc; ++i) {
- LOperand* argument = UseOrConstant(instr->argument(i));
- AddInstruction(new(zone()) LPushArgument(argument), instr);
- }
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreCodeEntry(
- HStoreCodeEntry* store_code_entry) {
- LOperand* function = UseRegister(store_code_entry->function());
- LOperand* code_object = UseTempRegister(store_code_entry->code_object());
- return new(zone()) LStoreCodeEntry(function, code_object);
-}
-
-
-LInstruction* LChunkBuilder::DoInnerAllocatedObject(
- HInnerAllocatedObject* instr) {
- LOperand* base_object = UseRegisterAtStart(instr->base_object());
- LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
- return DefineAsRegister(
- new(zone()) LInnerAllocatedObject(base_object, offset));
-}
-
-
-LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
- return instr->HasNoUses()
- ? NULL
- : DefineAsRegister(new(zone()) LThisFunction);
-}
-
-
-LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- if (instr->HasNoUses()) return NULL;
-
- if (info()->IsStub()) {
- return DefineFixed(new(zone()) LContext, rsi);
- }
-
- return DefineAsRegister(new(zone()) LContext);
-}
-
-
-LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
- LOperand* context = UseFixed(instr->context(), rsi);
- return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallWithDescriptor(
- HCallWithDescriptor* instr) {
- CallInterfaceDescriptor descriptor = instr->descriptor();
- DCHECK_EQ(descriptor.GetParameterCount() +
- LCallWithDescriptor::kImplicitRegisterParameterCount,
- instr->OperandCount());
-
- LOperand* target = UseRegisterOrConstantAtStart(instr->target());
- ZoneList<LOperand*> ops(instr->OperandCount(), zone());
- // Target
- ops.Add(target, zone());
- // Context
- LOperand* op = UseFixed(instr->OperandAt(1), rsi);
- ops.Add(op, zone());
- // Load register parameters.
- int i = 0;
- for (; i < descriptor.GetRegisterParameterCount(); i++) {
- op = UseFixed(instr->OperandAt(
- i + LCallWithDescriptor::kImplicitRegisterParameterCount),
- descriptor.GetRegisterParameter(i));
- ops.Add(op, zone());
- }
- // Push stack parameters.
- for (; i < descriptor.GetParameterCount(); i++) {
- op = UseAny(instr->OperandAt(
- i + LCallWithDescriptor::kImplicitRegisterParameterCount));
- AddInstruction(new (zone()) LPushArgument(op), instr);
- }
-
- LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
- descriptor, ops, zone());
- if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
- result->MarkAsSyntacticTailCall();
- }
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
- LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* function = UseFixed(instr->function(), rdi);
- LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
- if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
- result->MarkAsSyntacticTailCall();
- }
- return MarkAsCall(DefineFixed(result, rax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
- switch (instr->op()) {
- case kMathFloor:
- return DoMathFloor(instr);
- case kMathRound:
- return DoMathRound(instr);
- case kMathFround:
- return DoMathFround(instr);
- case kMathAbs:
- return DoMathAbs(instr);
- case kMathCos:
- return DoMathCos(instr);
- case kMathLog:
- return DoMathLog(instr);
- case kMathExp:
- return DoMathExp(instr);
- case kMathSin:
- return DoMathSin(instr);
- case kMathSqrt:
- return DoMathSqrt(instr);
- case kMathPowHalf:
- return DoMathPowHalf(instr);
- case kMathClz32:
- return DoMathClz32(instr);
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseRegisterAtStart(instr->value());
- if (instr->representation().IsInteger32()) {
- LMathFloorI* result = new (zone()) LMathFloorI(input);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- } else {
- DCHECK(instr->representation().IsDouble());
- LMathFloorD* result = new (zone()) LMathFloorD(input);
- return DefineAsRegister(result);
- }
-}
-
-LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseRegister(instr->value());
- if (instr->representation().IsInteger32()) {
- LOperand* temp = FixedTemp(xmm4);
- LMathRoundI* result = new (zone()) LMathRoundI(input, temp);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- } else {
- DCHECK(instr->representation().IsDouble());
- LMathRoundD* result = new (zone()) LMathRoundD(input);
- return DefineAsRegister(result);
- }
-}
-
-LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
- LOperand* input = UseRegister(instr->value());
- LMathFround* result = new (zone()) LMathFround(input);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
- LOperand* context = UseAny(instr->context());
- LOperand* input = UseRegisterAtStart(instr->value());
- LInstruction* result =
- DefineSameAsFirst(new(zone()) LMathAbs(context, input));
- Representation r = instr->value()->representation();
- if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
- if (!r.IsDouble()) result = AssignEnvironment(result);
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseFixedDouble(instr->value(), xmm0);
- return MarkAsCall(DefineFixedDouble(new (zone()) LMathLog(input), xmm0),
- instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- LMathClz32* result = new(zone()) LMathClz32(input);
- return DefineAsRegister(result);
-}
-
-LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseFixedDouble(instr->value(), xmm0);
- return MarkAsCall(DefineFixedDouble(new (zone()) LMathCos(input), xmm0),
- instr);
-}
-
-LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseFixedDouble(instr->value(), xmm0);
- return MarkAsCall(DefineFixedDouble(new (zone()) LMathExp(input), xmm0),
- instr);
-}
-
-LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseFixedDouble(instr->value(), xmm0);
- return MarkAsCall(DefineFixedDouble(new (zone()) LMathSin(input), xmm0),
- instr);
-}
-
-LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
- LOperand* input = UseAtStart(instr->value());
- return DefineAsRegister(new(zone()) LMathSqrt(input));
-}
-
-
-LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- LMathPowHalf* result = new(zone()) LMathPowHalf(input);
- return DefineSameAsFirst(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
- LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* constructor = UseFixed(instr->constructor(), rdi);
- LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- LOperand* context = UseFixed(instr->context(), rsi);
- LCallRuntime* result = new(zone()) LCallRuntime(context);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoRor(HRor* instr) {
- return DoShift(Token::ROR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShr(HShr* instr) {
- return DoShift(Token::SHR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoSar(HSar* instr) {
- return DoShift(Token::SAR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShl(HShl* instr) {
- return DoShift(Token::SHL, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32));
-
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- LOperand* right;
- if (SmiValuesAre32Bits() && instr->representation().IsSmi()) {
- // We don't support tagged immediates, so we request it in a register.
- right = UseRegisterAtStart(instr->BetterRightOperand());
- } else {
- right = UseOrConstantAtStart(instr->BetterRightOperand());
- }
- return DefineSameAsFirst(new(zone()) LBitI(left, right));
- } else {
- return DoArithmeticT(instr->op(), instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
- dividend, divisor));
- if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
- (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
- (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
- divisor != 1 && divisor != -1)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
- DCHECK(instr->representation().IsInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LOperand* temp1 = FixedTemp(rax);
- LOperand* temp2 = FixedTemp(rdx);
- LInstruction* result = DefineFixed(new(zone()) LDivByConstI(
- dividend, divisor, temp1, temp2), rdx);
- if (divisor == 0 ||
- (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
- !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseFixed(instr->left(), rax);
- LOperand* divisor = UseRegister(instr->right());
- LOperand* temp = FixedTemp(rdx);
- LInstruction* result = DefineFixed(new(zone()) LDivI(
- dividend, divisor, temp), rax);
- if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- instr->CheckFlag(HValue::kCanOverflow) ||
- !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- if (instr->RightIsPowerOf2()) {
- return DoDivByPowerOf2I(instr);
- } else if (instr->right()->IsConstant()) {
- return DoDivByConstI(instr);
- } else {
- return DoDivI(instr);
- }
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else {
- return DoArithmeticT(Token::DIV, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
- LOperand* dividend = UseRegisterAtStart(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result = DefineSameAsFirst(new(zone()) LFlooringDivByPowerOf2I(
- dividend, divisor));
- if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
- (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
- DCHECK(instr->representation().IsInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LOperand* temp1 = FixedTemp(rax);
- LOperand* temp2 = FixedTemp(rdx);
- LOperand* temp3 =
- ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
- (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
- NULL : TempRegister();
- LInstruction* result =
- DefineFixed(new(zone()) LFlooringDivByConstI(dividend,
- divisor,
- temp1,
- temp2,
- temp3),
- rdx);
- if (divisor == 0 ||
- (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseFixed(instr->left(), rax);
- LOperand* divisor = UseRegister(instr->right());
- LOperand* temp = FixedTemp(rdx);
- LInstruction* result = DefineFixed(new(zone()) LFlooringDivI(
- dividend, divisor, temp), rax);
- if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- if (instr->RightIsPowerOf2()) {
- return DoFlooringDivByPowerOf2I(instr);
- } else if (instr->right()->IsConstant()) {
- return DoFlooringDivByConstI(instr);
- } else {
- return DoFlooringDivI(instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegisterAtStart(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
- dividend, divisor));
- if (instr->CheckFlag(HValue::kLeftCanBeNegative) &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LOperand* temp1 = FixedTemp(rax);
- LOperand* temp2 = FixedTemp(rdx);
- LInstruction* result = DefineFixed(new(zone()) LModByConstI(
- dividend, divisor, temp1, temp2), rax);
- if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoModI(HMod* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseFixed(instr->left(), rax);
- LOperand* divisor = UseRegister(instr->right());
- LOperand* temp = FixedTemp(rdx);
- LInstruction* result = DefineFixed(new(zone()) LModI(
- dividend, divisor, temp), rdx);
- if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- if (instr->RightIsPowerOf2()) {
- return DoModByPowerOf2I(instr);
- } else if (instr->right()->IsConstant()) {
- return DoModByConstI(instr);
- } else {
- return DoModI(instr);
- }
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::MOD, instr);
- } else {
- return DoArithmeticT(Token::MOD, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMul(HMul* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- HValue* h_right = instr->BetterRightOperand();
- LOperand* right = UseOrConstant(h_right);
- LMulI* mul = new(zone()) LMulI(left, right);
- int constant_value =
- h_right->IsConstant() ? HConstant::cast(h_right)->Integer32Value() : 0;
- // |needs_environment| must mirror the cases where LCodeGen::DoMulI calls
- // |DeoptimizeIf|.
- bool needs_environment =
- instr->CheckFlag(HValue::kCanOverflow) ||
- (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
- (!right->IsConstantOperand() || constant_value <= 0));
- if (needs_environment) {
- AssignEnvironment(mul);
- }
- return DefineSameAsFirst(mul);
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::MUL, instr);
- } else {
- return DoArithmeticT(Token::MUL, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoSub(HSub* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right;
- if (SmiValuesAre32Bits() && instr->representation().IsSmi()) {
- // We don't support tagged immediates, so we request it in a register.
- right = UseRegisterAtStart(instr->right());
- } else {
- right = UseOrConstantAtStart(instr->right());
- }
- LSubI* sub = new(zone()) LSubI(left, right);
- LInstruction* result = DefineSameAsFirst(sub);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::SUB, instr);
- } else {
- return DoArithmeticT(Token::SUB, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- // Check to see if it would be advantageous to use an lea instruction rather
- // than an add. This is the case when no overflow check is needed and there
- // are multiple uses of the add's inputs, so using a 3-register add will
- // preserve all input values for later uses.
- bool use_lea = LAddI::UseLea(instr);
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- HValue* right_candidate = instr->BetterRightOperand();
- LOperand* right;
- if (SmiValuesAre32Bits() && instr->representation().IsSmi()) {
- // We cannot add a tagged immediate to a tagged value,
- // so we request it in a register.
- right = UseRegisterAtStart(right_candidate);
- } else {
- right = use_lea ? UseRegisterOrConstantAtStart(right_candidate)
- : UseOrConstantAtStart(right_candidate);
- }
- LAddI* add = new(zone()) LAddI(left, right);
- bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
- LInstruction* result = use_lea ? DefineAsRegister(add)
- : DefineSameAsFirst(add);
- if (can_overflow) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsExternal()) {
- DCHECK(instr->IsConsistentExternalRepresentation());
- DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
- bool use_lea = LAddI::UseLea(instr);
- LOperand* left = UseRegisterAtStart(instr->left());
- HValue* right_candidate = instr->right();
- LOperand* right = use_lea
- ? UseRegisterOrConstantAtStart(right_candidate)
- : UseOrConstantAtStart(right_candidate);
- LAddI* add = new(zone()) LAddI(left, right);
- LInstruction* result = use_lea
- ? DefineAsRegister(add)
- : DefineSameAsFirst(add);
- return result;
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::ADD, instr);
- } else {
- return DoArithmeticT(Token::ADD, instr);
- }
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
- LOperand* left = NULL;
- LOperand* right = NULL;
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- if (instr->representation().IsSmi()) {
- left = UseRegisterAtStart(instr->BetterLeftOperand());
- right = UseAtStart(instr->BetterRightOperand());
- } else if (instr->representation().IsInteger32()) {
- left = UseRegisterAtStart(instr->BetterLeftOperand());
- right = UseOrConstantAtStart(instr->BetterRightOperand());
- } else {
- DCHECK(instr->representation().IsDouble());
- left = UseRegisterAtStart(instr->left());
- right = UseRegisterAtStart(instr->right());
- }
- LMathMinMax* minmax = new(zone()) LMathMinMax(left, right);
- return DefineSameAsFirst(minmax);
-}
-
-
-LInstruction* LChunkBuilder::DoPower(HPower* instr) {
- DCHECK(instr->representation().IsDouble());
- // We call a C function for double power. It can't trigger a GC.
- // We need to use fixed result register for the call.
- Representation exponent_type = instr->right()->representation();
- DCHECK(instr->left()->representation().IsDouble());
- LOperand* left = UseFixedDouble(instr->left(), xmm2);
- LOperand* right =
- exponent_type.IsDouble()
- ? UseFixedDouble(instr->right(), xmm1)
- : UseFixed(instr->right(), MathPowTaggedDescriptor::exponent());
- LPower* result = new(zone()) LPower(left, right);
- return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
- CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
- DCHECK(instr->left()->representation().IsTagged());
- DCHECK(instr->right()->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* left = UseFixed(instr->left(), rdx);
- LOperand* right = UseFixed(instr->right(), rax);
- LCmpT* result = new(zone()) LCmpT(context, left, right);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
- HCompareNumericAndBranch* instr) {
- Representation r = instr->representation();
- if (r.IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(r));
- DCHECK(instr->right()->representation().Equals(r));
- LOperand* left = UseRegisterOrConstantAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- return new(zone()) LCompareNumericAndBranch(left, right);
- } else {
- DCHECK(r.IsDouble());
- DCHECK(instr->left()->representation().IsDouble());
- DCHECK(instr->right()->representation().IsDouble());
- LOperand* left;
- LOperand* right;
- if (instr->left()->IsConstant() && instr->right()->IsConstant()) {
- left = UseRegisterOrConstantAtStart(instr->left());
- right = UseRegisterOrConstantAtStart(instr->right());
- } else {
- left = UseRegisterAtStart(instr->left());
- right = UseRegisterAtStart(instr->right());
- }
- return new(zone()) LCompareNumericAndBranch(left, right);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
- HCompareObjectEqAndBranch* instr) {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterOrConstantAtStart(instr->right());
- return new(zone()) LCmpObjectEqAndBranch(left, right);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
- HCompareHoleAndBranch* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LCmpHoleAndBranch(value);
-}
-
-
-LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- return new(zone()) LIsStringAndBranch(value, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- return new(zone()) LIsSmiAndBranch(Use(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
- HIsUndetectableAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- return new(zone()) LIsUndetectableAndBranch(value, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoStringCompareAndBranch(
- HStringCompareAndBranch* instr) {
-
- DCHECK(instr->left()->representation().IsTagged());
- DCHECK(instr->right()->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* left = UseFixed(instr->left(), rdx);
- LOperand* right = UseFixed(instr->right(), rax);
- LStringCompareAndBranch* result =
- new(zone()) LStringCompareAndBranch(context, left, right);
-
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
- HHasInstanceTypeAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LHasInstanceTypeAndBranch(value);
-}
-
-LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
- HClassOfTestAndBranch* instr) {
- LOperand* value = UseRegister(instr->value());
- return new (zone())
- LClassOfTestAndBranch(value, TempRegister(), TempRegister());
-}
-
-LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
- LOperand* string = UseRegisterAtStart(instr->string());
- LOperand* index = UseRegisterOrConstantAtStart(instr->index());
- return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index));
-}
-
-
-LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
- LOperand* string = UseRegisterAtStart(instr->string());
- LOperand* index = FLAG_debug_code
- ? UseRegisterAtStart(instr->index())
- : UseRegisterOrConstantAtStart(instr->index());
- LOperand* value = FLAG_debug_code
- ? UseRegisterAtStart(instr->value())
- : UseRegisterOrConstantAtStart(instr->value());
- LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), rsi) : NULL;
- LInstruction* result = new(zone()) LSeqStringSetChar(context, string,
- index, value);
- if (FLAG_debug_code) {
- result = MarkAsCall(result, instr);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- if (!FLAG_debug_code && instr->skip_check()) return NULL;
- LOperand* index = UseRegisterOrConstantAtStart(instr->index());
- LOperand* length = !index->IsConstantOperand()
- ? UseOrConstantAtStart(instr->length())
- : UseAtStart(instr->length());
- LInstruction* result = new(zone()) LBoundsCheck(index, length);
- if (!FLAG_debug_code || !instr->skip_check()) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
- // The control instruction marking the end of a block that completed
- // abruptly (e.g., threw an exception). There is nothing specific to do.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
- // All HForceRepresentation instructions should be eliminated in the
- // representation change phase of Hydrogen.
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoChange(HChange* instr) {
- Representation from = instr->from();
- Representation to = instr->to();
- HValue* val = instr->value();
- if (from.IsSmi()) {
- if (to.IsTagged()) {
- LOperand* value = UseRegister(val);
- return DefineSameAsFirst(new(zone()) LDummyUse(value));
- }
- from = Representation::Tagged();
- }
- if (from.IsTagged()) {
- if (to.IsDouble()) {
- LOperand* value = UseRegister(val);
- LInstruction* result = DefineAsRegister(new(zone()) LNumberUntagD(value));
- if (!val->representation().IsSmi()) result = AssignEnvironment(result);
- return result;
- } else if (to.IsSmi()) {
- LOperand* value = UseRegister(val);
- if (val->type().IsSmi()) {
- return DefineSameAsFirst(new(zone()) LDummyUse(value));
- }
- return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
- } else {
- DCHECK(to.IsInteger32());
- if (val->type().IsSmi() || val->representation().IsSmi()) {
- LOperand* value = UseRegister(val);
- return DefineSameAsFirst(new(zone()) LSmiUntag(value, false));
- } else {
- LOperand* value = UseRegister(val);
- bool truncating = instr->CanTruncateToInt32();
- LOperand* xmm_temp = truncating ? NULL : FixedTemp(xmm1);
- LInstruction* result =
- DefineSameAsFirst(new(zone()) LTaggedToI(value, xmm_temp));
- if (!val->representation().IsSmi()) result = AssignEnvironment(result);
- return result;
- }
- }
- } else if (from.IsDouble()) {
- if (to.IsTagged()) {
- info()->MarkAsDeferredCalling();
- LOperand* value = UseRegister(val);
- LOperand* temp = TempRegister();
- LUnallocated* result_temp = TempRegister();
- LNumberTagD* result = new(zone()) LNumberTagD(value, temp);
- return AssignPointerMap(Define(result, result_temp));
- } else if (to.IsSmi()) {
- LOperand* value = UseRegister(val);
- return AssignEnvironment(
- DefineAsRegister(new(zone()) LDoubleToSmi(value)));
- } else {
- DCHECK(to.IsInteger32());
- LOperand* value = UseRegister(val);
- LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value));
- if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result);
- return result;
- }
- } else if (from.IsInteger32()) {
- info()->MarkAsDeferredCalling();
- if (to.IsTagged()) {
- if (!instr->CheckFlag(HValue::kCanOverflow)) {
- LOperand* value = UseRegister(val);
- return DefineAsRegister(new(zone()) LSmiTag(value));
- } else if (val->CheckFlag(HInstruction::kUint32)) {
- LOperand* value = UseRegister(val);
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = FixedTemp(xmm1);
- LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2);
- return AssignPointerMap(DefineSameAsFirst(result));
- } else {
- LOperand* value = UseRegister(val);
- LOperand* temp1 = SmiValuesAre32Bits() ? NULL : TempRegister();
- LOperand* temp2 = SmiValuesAre32Bits() ? NULL : FixedTemp(xmm1);
- LNumberTagI* result = new(zone()) LNumberTagI(value, temp1, temp2);
- return AssignPointerMap(DefineSameAsFirst(result));
- }
- } else if (to.IsSmi()) {
- LOperand* value = UseRegister(val);
- LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else {
- DCHECK(to.IsDouble());
- if (val->CheckFlag(HInstruction::kUint32)) {
- return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val)));
- } else {
- LOperand* value = Use(val);
- return DefineAsRegister(new(zone()) LInteger32ToDouble(value));
- }
- }
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* result = new(zone()) LCheckNonSmi(value);
- if (!instr->value()->type().IsHeapObject()) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckArrayBufferNotNeutered(
- HCheckArrayBufferNotNeutered* instr) {
- LOperand* view = UseRegisterAtStart(instr->value());
- LCheckArrayBufferNotNeutered* result =
- new (zone()) LCheckArrayBufferNotNeutered(view);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LCheckInstanceType* result = new(zone()) LCheckInstanceType(value);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckValue(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
- if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps;
- LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value));
- if (instr->HasMigrationTarget()) {
- info()->MarkAsDeferredCalling();
- result = AssignPointerMap(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
- HValue* value = instr->value();
- Representation input_rep = value->representation();
- LOperand* reg = UseRegister(value);
- if (input_rep.IsDouble()) {
- return DefineAsRegister(new(zone()) LClampDToUint8(reg));
- } else if (input_rep.IsInteger32()) {
- return DefineSameAsFirst(new(zone()) LClampIToUint8(reg));
- } else {
- DCHECK(input_rep.IsSmiOrTagged());
- // Register allocator doesn't (yet) support allocation of double
- // temps. Reserve xmm1 explicitly.
- LClampTToUint8* result = new(zone()) LClampTToUint8(reg,
- FixedTemp(xmm1));
- return AssignEnvironment(DefineSameAsFirst(result));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
- LOperand* context = info()->IsStub() ? UseFixed(instr->context(), rsi) : NULL;
- LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
- return new(zone()) LReturn(
- UseFixed(instr->value(), rax), context, parameter_count);
-}
-
-
-LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
- Representation r = instr->representation();
- if (r.IsSmi()) {
- return DefineAsRegister(new(zone()) LConstantS);
- } else if (r.IsInteger32()) {
- return DefineAsRegister(new(zone()) LConstantI);
- } else if (r.IsDouble()) {
- return DefineAsRegister(new (zone()) LConstantD);
- } else if (r.IsExternal()) {
- return DefineAsRegister(new(zone()) LConstantE);
- } else if (r.IsTagged()) {
- return DefineAsRegister(new(zone()) LConstantT);
- } else {
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- LInstruction* result =
- DefineAsRegister(new(zone()) LLoadContextSlot(context));
- if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
- LOperand* context;
- LOperand* value;
- LOperand* temp;
- context = UseRegister(instr->context());
- if (instr->NeedsWriteBarrier()) {
- value = UseTempRegister(instr->value());
- temp = TempRegister();
- } else {
- value = UseRegister(instr->value());
- temp = NULL;
- }
- LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
- if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- // Use the special mov rax, moffs64 encoding for external
- // memory accesses with 64-bit word-sized values.
- if (instr->access().IsExternalMemory() &&
- instr->access().offset() == 0 &&
- (instr->access().representation().IsSmi() ||
- instr->access().representation().IsTagged() ||
- instr->access().representation().IsHeapObject() ||
- instr->access().representation().IsExternal())) {
- LOperand* obj = UseRegisterOrConstantAtStart(instr->object());
- return DefineFixed(new(zone()) LLoadNamedField(obj), rax);
- }
- LOperand* obj = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new(zone()) LLoadNamedField(obj));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
- HLoadFunctionPrototype* instr) {
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()))));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
- return DefineAsRegister(new(zone()) LLoadRoot);
-}
-
-
-void LChunkBuilder::FindDehoistedKeyDefinitions(HValue* candidate) {
- // We sign extend the dehoisted key at the definition point when the pointer
- // size is 64-bit. For x32 port, we sign extend the dehoisted key at the use
- // points and should not invoke this function. We can't use STATIC_ASSERT
- // here as the pointer size is 32-bit for x32.
- DCHECK(kPointerSize == kInt64Size);
- BitVector* dehoisted_key_ids = chunk_->GetDehoistedKeyIds();
- if (dehoisted_key_ids->Contains(candidate->id())) return;
- dehoisted_key_ids->Add(candidate->id());
- if (!candidate->IsPhi()) return;
- for (int i = 0; i < candidate->OperandCount(); ++i) {
- FindDehoistedKeyDefinitions(candidate->OperandAt(i));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
- DCHECK((kPointerSize == kInt64Size &&
- instr->key()->representation().IsInteger32()) ||
- (kPointerSize == kInt32Size &&
- instr->key()->representation().IsSmiOrInteger32()));
- ElementsKind elements_kind = instr->elements_kind();
- LOperand* key = NULL;
- LInstruction* result = NULL;
-
- if (kPointerSize == kInt64Size) {
- key = UseRegisterOrConstantAtStart(instr->key());
- } else {
- bool clobbers_key = ExternalArrayOpRequiresTemp(
- instr->key()->representation(), elements_kind);
- key = clobbers_key
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- }
-
- if ((kPointerSize == kInt64Size) && instr->IsDehoisted()) {
- FindDehoistedKeyDefinitions(instr->key());
- }
-
- if (!instr->is_fixed_typed_array()) {
- LOperand* obj = UseRegisterAtStart(instr->elements());
- result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr));
- } else {
- DCHECK(
- (instr->representation().IsInteger32() &&
- !(IsDoubleOrFloatElementsKind(elements_kind))) ||
- (instr->representation().IsDouble() &&
- (IsDoubleOrFloatElementsKind(elements_kind))));
- LOperand* backing_store = UseRegister(instr->elements());
- LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
- result = DefineAsRegister(
- new (zone()) LLoadKeyed(backing_store, key, backing_store_owner));
- }
-
- bool needs_environment;
- if (instr->is_fixed_typed_array()) {
- // see LCodeGen::DoLoadKeyedExternalArray
- needs_environment = elements_kind == UINT32_ELEMENTS &&
- !instr->CheckFlag(HInstruction::kUint32);
- } else {
- // see LCodeGen::DoLoadKeyedFixedDoubleArray and
- // LCodeGen::DoLoadKeyedFixedArray
- needs_environment =
- instr->RequiresHoleCheck() ||
- (instr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED && info()->IsStub());
- }
-
- if (needs_environment) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
-
- if ((kPointerSize == kInt64Size) && instr->IsDehoisted()) {
- FindDehoistedKeyDefinitions(instr->key());
- }
-
- if (!instr->is_fixed_typed_array()) {
- DCHECK(instr->elements()->representation().IsTagged());
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- LOperand* object = NULL;
- LOperand* key = NULL;
- LOperand* val = NULL;
-
- Representation value_representation = instr->value()->representation();
- if (value_representation.IsDouble()) {
- object = UseRegisterAtStart(instr->elements());
- val = UseRegisterAtStart(instr->value());
- key = UseRegisterOrConstantAtStart(instr->key());
- } else {
- DCHECK(value_representation.IsSmiOrTagged() ||
- value_representation.IsInteger32());
- if (needs_write_barrier) {
- object = UseTempRegister(instr->elements());
- val = UseTempRegister(instr->value());
- key = UseTempRegister(instr->key());
- } else {
- object = UseRegisterAtStart(instr->elements());
- val = UseRegisterOrConstantAtStart(instr->value());
- key = UseRegisterOrConstantAtStart(instr->key());
- }
- }
-
- return new (zone()) LStoreKeyed(object, key, val, nullptr);
- }
-
- DCHECK(
- (instr->value()->representation().IsInteger32() &&
- !IsDoubleOrFloatElementsKind(elements_kind)) ||
- (instr->value()->representation().IsDouble() &&
- IsDoubleOrFloatElementsKind(elements_kind)));
- DCHECK(instr->elements()->representation().IsExternal());
- bool val_is_temp_register = elements_kind == UINT8_CLAMPED_ELEMENTS ||
- elements_kind == FLOAT32_ELEMENTS;
- LOperand* val = val_is_temp_register ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
- LOperand* key = NULL;
- if (kPointerSize == kInt64Size) {
- key = UseRegisterOrConstantAtStart(instr->key());
- } else {
- bool clobbers_key = ExternalArrayOpRequiresTemp(
- instr->key()->representation(), elements_kind);
- key = clobbers_key
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- }
- LOperand* backing_store = UseRegister(instr->elements());
- LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
- return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner);
-}
-
-
-LInstruction* LChunkBuilder::DoTransitionElementsKind(
- HTransitionElementsKind* instr) {
- if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
- LOperand* object = UseRegister(instr->object());
- LOperand* new_map_reg = TempRegister();
- LOperand* temp_reg = TempRegister();
- LTransitionElementsKind* result = new(zone()) LTransitionElementsKind(
- object, NULL, new_map_reg, temp_reg);
- return result;
- } else {
- LOperand* object = UseFixed(instr->object(), rax);
- LOperand* context = UseFixed(instr->context(), rsi);
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, context, NULL, NULL);
- return MarkAsCall(result, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoTrapAllocationMemento(
- HTrapAllocationMemento* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* temp = TempRegister();
- LTrapAllocationMemento* result =
- new(zone()) LTrapAllocationMemento(object, temp);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) {
- info()->MarkAsDeferredCalling();
- LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* object = Use(instr->object());
- LOperand* elements = Use(instr->elements());
- LOperand* key = UseRegisterOrConstant(instr->key());
- LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity());
-
- LMaybeGrowElements* result = new (zone())
- LMaybeGrowElements(context, object, elements, key, current_capacity);
- DefineFixed(result, rax);
- return AssignPointerMap(AssignEnvironment(result));
-}
-
-
-LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
- bool is_in_object = instr->access().IsInobject();
- bool is_external_location = instr->access().IsExternalMemory() &&
- instr->access().offset() == 0;
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- bool needs_write_barrier_for_map = instr->has_transition() &&
- instr->NeedsWriteBarrierForMap();
-
- LOperand* obj;
- if (needs_write_barrier) {
- obj = is_in_object
- ? UseRegister(instr->object())
- : UseTempRegister(instr->object());
- } else if (is_external_location) {
- DCHECK(!is_in_object);
- DCHECK(!needs_write_barrier);
- DCHECK(!needs_write_barrier_for_map);
- obj = UseRegisterOrConstant(instr->object());
- } else {
- obj = needs_write_barrier_for_map
- ? UseRegister(instr->object())
- : UseRegisterAtStart(instr->object());
- }
-
- bool can_be_constant = instr->value()->IsConstant() &&
- HConstant::cast(instr->value())->NotInNewSpace() &&
- !instr->field_representation().IsDouble();
-
- LOperand* val;
- if (needs_write_barrier) {
- val = UseTempRegister(instr->value());
- } else if (is_external_location) {
- val = UseFixed(instr->value(), rax);
- } else if (can_be_constant) {
- val = UseRegisterOrConstant(instr->value());
- } else if (instr->field_representation().IsDouble()) {
- val = UseRegisterAtStart(instr->value());
- } else {
- val = UseRegister(instr->value());
- }
-
- // We only need a scratch register if we have a write barrier or we
- // have a store into the properties array (not in-object-property).
- LOperand* temp = (!is_in_object || needs_write_barrier ||
- needs_write_barrier_for_map) ? TempRegister() : NULL;
-
- return new(zone()) LStoreNamedField(obj, val, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
- LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* left = UseFixed(instr->left(), rdx);
- LOperand* right = UseFixed(instr->right(), rax);
- return MarkAsCall(
- DefineFixed(new(zone()) LStringAdd(context, left, right), rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
- LOperand* string = UseTempRegister(instr->string());
- LOperand* index = UseTempRegister(instr->index());
- LOperand* context = UseAny(instr->context());
- LStringCharCodeAt* result =
- new(zone()) LStringCharCodeAt(context, string, index);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
- LOperand* char_code = UseRegister(instr->value());
- LOperand* context = UseAny(instr->context());
- LStringCharFromCode* result =
- new(zone()) LStringCharFromCode(context, char_code);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
- LOperand* size = instr->size()->IsConstant() ? UseConstant(instr->size())
- : UseRegister(instr->size());
- if (instr->IsAllocationFolded()) {
- LOperand* temp = TempRegister();
- LFastAllocate* result = new (zone()) LFastAllocate(size, temp);
- return DefineAsRegister(result);
- } else {
- info()->MarkAsDeferredCalling();
- LOperand* context = UseAny(instr->context());
- LOperand* temp = TempRegister();
- LAllocate* result = new (zone()) LAllocate(context, size, temp);
- return AssignPointerMap(DefineAsRegister(result));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
- DCHECK(argument_count_ == 0);
- allocator_->MarkAsOsrEntry();
- current_block_->last_environment()->set_ast_id(instr->ast_id());
- return AssignEnvironment(new(zone()) LOsrEntry);
-}
-
-
-LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- LParameter* result = new(zone()) LParameter;
- if (instr->kind() == HParameter::STACK_PARAMETER) {
- int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(result, spill_index);
- } else {
- DCHECK(info()->IsStub());
- CallInterfaceDescriptor descriptor = graph()->descriptor();
- int index = static_cast<int>(instr->index());
- Register reg = descriptor.GetRegisterParameter(index);
- return DefineFixed(result, reg);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- // Use an index that corresponds to the location in the unoptimized frame,
- // which the optimized frame will subsume.
- int env_index = instr->index();
- int spill_index = 0;
- if (instr->environment()->is_parameter_index(env_index)) {
- spill_index = chunk()->GetParameterStackSlot(env_index);
- } else {
- spill_index = env_index - instr->environment()->first_local_index();
- if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
- Retry(kTooManySpillSlotsNeededForOSR);
- spill_index = 0;
- }
- spill_index += StandardFrameConstants::kFixedSlotCount;
- }
- return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
- // There are no real uses of the arguments object.
- // arguments.length and element access are supported directly on
- // stack arguments, and any real arguments object use causes a bailout.
- // So this value is never used.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
- instr->ReplayEnvironment(current_block_->last_environment());
-
- // There are no real uses of a captured object.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
- info()->MarkAsRequiresFrame();
- LOperand* args = UseRegister(instr->arguments());
- LOperand* length;
- LOperand* index;
- if (instr->length()->IsConstant() && instr->index()->IsConstant()) {
- length = UseRegisterOrConstant(instr->length());
- index = UseOrConstant(instr->index());
- } else {
- length = UseTempRegister(instr->length());
- index = Use(instr->index());
- }
- return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
-}
-
-
-LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* value = UseFixed(instr->value(), rbx);
- LTypeof* result = new(zone()) LTypeof(context, value);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
- return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
- instr->ReplayEnvironment(current_block_->last_environment());
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
- info()->MarkAsDeferredCalling();
- if (instr->is_function_entry()) {
- LOperand* context = UseFixed(instr->context(), rsi);
- return MarkAsCall(new(zone()) LStackCheck(context), instr);
- } else {
- DCHECK(instr->is_backwards_branch());
- LOperand* context = UseAny(instr->context());
- return AssignEnvironment(
- AssignPointerMap(new(zone()) LStackCheck(context)));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
- HEnvironment* outer = current_block_->last_environment();
- outer->set_ast_id(instr->ReturnId());
- HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner = outer->CopyForInlining(
- instr->closure(), instr->arguments_count(), instr->function(), undefined,
- instr->inlining_kind(), instr->syntactic_tail_call_mode());
- // Only replay binding of arguments object if it wasn't removed from graph.
- if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
- inner->Bind(instr->arguments_var(), instr->arguments_object());
- }
- inner->BindContext(instr->closure_context());
- inner->set_entry(instr);
- current_block_->UpdateEnvironment(inner);
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
- LInstruction* pop = NULL;
-
- HEnvironment* env = current_block_->last_environment();
-
- if (env->entry()->arguments_pushed()) {
- int argument_count = env->arguments_environment()->parameter_count();
- pop = new(zone()) LDrop(argument_count);
- DCHECK(instr->argument_delta() == -argument_count);
- }
-
- HEnvironment* outer = current_block_->last_environment()->
- DiscardInlined(false);
- current_block_->UpdateEnvironment(outer);
-
- return pop;
-}
-
-
-LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
- LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* object = UseFixed(instr->enumerable(), rax);
- LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
- return MarkAsCall(DefineFixed(result, rax), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
- LOperand* map = UseRegister(instr->map());
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LForInCacheArray(map)));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* map = UseRegisterAtStart(instr->map());
- return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* index = UseTempRegister(instr->index());
- LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
- LInstruction* result = DefineSameAsFirst(load);
- return AssignPointerMap(result);
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/crankshaft/x64/lithium-x64.h b/deps/v8/src/crankshaft/x64/lithium-x64.h
deleted file mode 100644
index 591ab47c46..0000000000
--- a/deps/v8/src/crankshaft/x64/lithium-x64.h
+++ /dev/null
@@ -1,2496 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_X64_LITHIUM_X64_H_
-#define V8_CRANKSHAFT_X64_LITHIUM_X64_H_
-
-#include "src/crankshaft/hydrogen.h"
-#include "src/crankshaft/lithium.h"
-#include "src/crankshaft/lithium-allocator.h"
-#include "src/safepoint-table.h"
-#include "src/utils.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LCodeGen;
-
-#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
- V(AccessArgumentsAt) \
- V(AddI) \
- V(Allocate) \
- V(ApplyArguments) \
- V(ArgumentsElements) \
- V(ArgumentsLength) \
- V(ArithmeticD) \
- V(ArithmeticT) \
- V(BitI) \
- V(BoundsCheck) \
- V(Branch) \
- V(CallWithDescriptor) \
- V(CallNewArray) \
- V(CallRuntime) \
- V(CheckArrayBufferNotNeutered) \
- V(CheckInstanceType) \
- V(CheckMaps) \
- V(CheckMapValue) \
- V(CheckNonSmi) \
- V(CheckSmi) \
- V(CheckValue) \
- V(ClampDToUint8) \
- V(ClampIToUint8) \
- V(ClampTToUint8) \
- V(ClassOfTestAndBranch) \
- V(CompareNumericAndBranch) \
- V(CmpObjectEqAndBranch) \
- V(CmpHoleAndBranch) \
- V(CmpMapAndBranch) \
- V(CmpT) \
- V(ConstantD) \
- V(ConstantE) \
- V(ConstantI) \
- V(ConstantS) \
- V(ConstantT) \
- V(Context) \
- V(DebugBreak) \
- V(DeclareGlobals) \
- V(Deoptimize) \
- V(DivByConstI) \
- V(DivByPowerOf2I) \
- V(DivI) \
- V(DoubleToI) \
- V(DoubleToSmi) \
- V(Drop) \
- V(DummyUse) \
- V(Dummy) \
- V(FastAllocate) \
- V(FlooringDivByConstI) \
- V(FlooringDivByPowerOf2I) \
- V(FlooringDivI) \
- V(ForInCacheArray) \
- V(ForInPrepareMap) \
- V(Goto) \
- V(HasInPrototypeChainAndBranch) \
- V(HasInstanceTypeAndBranch) \
- V(InnerAllocatedObject) \
- V(InstructionGap) \
- V(Integer32ToDouble) \
- V(InvokeFunction) \
- V(IsStringAndBranch) \
- V(IsSmiAndBranch) \
- V(IsUndetectableAndBranch) \
- V(Label) \
- V(LazyBailout) \
- V(LoadContextSlot) \
- V(LoadRoot) \
- V(LoadFieldByIndex) \
- V(LoadFunctionPrototype) \
- V(LoadKeyed) \
- V(LoadNamedField) \
- V(MathAbs) \
- V(MathClz32) \
- V(MathCos) \
- V(MathExp) \
- V(MathFloorD) \
- V(MathFloorI) \
- V(MathFround) \
- V(MathLog) \
- V(MathMinMax) \
- V(MathPowHalf) \
- V(MathRoundD) \
- V(MathRoundI) \
- V(MathSin) \
- V(MathSqrt) \
- V(MaybeGrowElements) \
- V(ModByConstI) \
- V(ModByPowerOf2I) \
- V(ModI) \
- V(MulI) \
- V(NumberTagD) \
- V(NumberTagI) \
- V(NumberTagU) \
- V(NumberUntagD) \
- V(OsrEntry) \
- V(Parameter) \
- V(Power) \
- V(Prologue) \
- V(PushArgument) \
- V(Return) \
- V(SeqStringGetChar) \
- V(SeqStringSetChar) \
- V(ShiftI) \
- V(SmiTag) \
- V(SmiUntag) \
- V(StackCheck) \
- V(StoreCodeEntry) \
- V(StoreContextSlot) \
- V(StoreKeyed) \
- V(StoreNamedField) \
- V(StringAdd) \
- V(StringCharCodeAt) \
- V(StringCharFromCode) \
- V(StringCompareAndBranch) \
- V(SubI) \
- V(TaggedToI) \
- V(ThisFunction) \
- V(TransitionElementsKind) \
- V(TrapAllocationMemento) \
- V(Typeof) \
- V(TypeofIsAndBranch) \
- V(Uint32ToDouble) \
- V(UnknownOSRValue) \
- V(WrapReceiver)
-
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- Opcode opcode() const final { return LInstruction::k##type; } \
- void CompileToNative(LCodeGen* generator) final; \
- const char* Mnemonic() const final { return mnemonic; } \
- static L##type* cast(LInstruction* instr) { \
- DCHECK(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
- }
-
-
-#define DECLARE_HYDROGEN_ACCESSOR(type) \
- H##type* hydrogen() const { \
- return H##type::cast(hydrogen_value()); \
- }
-
-
-class LInstruction : public ZoneObject {
- public:
- LInstruction()
- : environment_(NULL),
- hydrogen_value_(NULL),
- bit_field_(IsCallBits::encode(false)) {
- }
-
- virtual ~LInstruction() {}
-
- virtual void CompileToNative(LCodeGen* generator) = 0;
- virtual const char* Mnemonic() const = 0;
- virtual void PrintTo(StringStream* stream);
- virtual void PrintDataTo(StringStream* stream);
- virtual void PrintOutputOperandTo(StringStream* stream);
-
- enum Opcode {
- // Declare a unique enum value for each instruction.
-#define DECLARE_OPCODE(type) k##type,
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
- kNumberOfInstructions
-#undef DECLARE_OPCODE
- };
-
- virtual Opcode opcode() const = 0;
-
- // Declare non-virtual type testers for all leaf IR classes.
-#define DECLARE_PREDICATE(type) \
- bool Is##type() const { return opcode() == k##type; }
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
-#undef DECLARE_PREDICATE
-
- // Declare virtual predicates for instructions that don't have
- // an opcode.
- virtual bool IsGap() const { return false; }
-
- virtual bool IsControl() const { return false; }
-
- // Try deleting this instruction if possible.
- virtual bool TryDelete() { return false; }
-
- void set_environment(LEnvironment* env) { environment_ = env; }
- LEnvironment* environment() const { return environment_; }
- bool HasEnvironment() const { return environment_ != NULL; }
-
- void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
- LPointerMap* pointer_map() const { return pointer_map_.get(); }
- bool HasPointerMap() const { return pointer_map_.is_set(); }
-
- void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
- HValue* hydrogen_value() const { return hydrogen_value_; }
-
- void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
- bool IsCall() const { return IsCallBits::decode(bit_field_); }
-
- void MarkAsSyntacticTailCall() {
- bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
- }
- bool IsSyntacticTailCall() const {
- return IsSyntacticTailCallBits::decode(bit_field_);
- }
-
- // Interface to the register allocator and iterators.
- bool ClobbersTemps() const { return IsCall(); }
- bool ClobbersRegisters() const { return IsCall(); }
- virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
- return IsCall();
- }
-
- // Interface to the register allocator and iterators.
- bool IsMarkedAsCall() const { return IsCall(); }
-
- virtual bool HasResult() const = 0;
- virtual LOperand* result() const = 0;
-
- LOperand* FirstInput() { return InputAt(0); }
- LOperand* Output() { return HasResult() ? result() : NULL; }
-
- virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
-
- virtual bool MustSignExtendResult(LPlatformChunk* chunk) const {
- return false;
- }
-
-#ifdef DEBUG
- void VerifyCall();
-#endif
-
- virtual int InputCount() = 0;
- virtual LOperand* InputAt(int i) = 0;
-
- private:
- // Iterator support.
- friend class InputIterator;
-
- friend class TempIterator;
- virtual int TempCount() = 0;
- virtual LOperand* TempAt(int i) = 0;
-
- class IsCallBits: public BitField<bool, 0, 1> {};
- class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
- };
-
- LEnvironment* environment_;
- SetOncePointer<LPointerMap> pointer_map_;
- HValue* hydrogen_value_;
- int bit_field_;
-};
-
-
-// R = number of result operands (0 or 1).
-template<int R>
-class LTemplateResultInstruction : public LInstruction {
- public:
- // Allow 0 or 1 output operands.
- STATIC_ASSERT(R == 0 || R == 1);
- bool HasResult() const final { return R != 0 && result() != NULL; }
- void set_result(LOperand* operand) { results_[0] = operand; }
- LOperand* result() const override { return results_[0]; }
-
- bool MustSignExtendResult(LPlatformChunk* chunk) const final;
-
- protected:
- EmbeddedContainer<LOperand*, R> results_;
-};
-
-
-// R = number of result operands (0 or 1).
-// I = number of input operands.
-// T = number of temporary operands.
-template<int R, int I, int T>
-class LTemplateInstruction : public LTemplateResultInstruction<R> {
- protected:
- EmbeddedContainer<LOperand*, I> inputs_;
- EmbeddedContainer<LOperand*, T> temps_;
-
- private:
- // Iterator support.
- int InputCount() final { return I; }
- LOperand* InputAt(int i) final { return inputs_[i]; }
-
- int TempCount() final { return T; }
- LOperand* TempAt(int i) final { return temps_[i]; }
-};
-
-
-class LGap : public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGap(HBasicBlock* block)
- : block_(block) {
- parallel_moves_[BEFORE] = NULL;
- parallel_moves_[START] = NULL;
- parallel_moves_[END] = NULL;
- parallel_moves_[AFTER] = NULL;
- }
-
- // Can't use the DECLARE-macro here because of sub-classes.
- bool IsGap() const final { return true; }
- void PrintDataTo(StringStream* stream) override;
- static LGap* cast(LInstruction* instr) {
- DCHECK(instr->IsGap());
- return reinterpret_cast<LGap*>(instr);
- }
-
- bool IsRedundant() const;
-
- HBasicBlock* block() const { return block_; }
-
- enum InnerPosition {
- BEFORE,
- START,
- END,
- AFTER,
- FIRST_INNER_POSITION = BEFORE,
- LAST_INNER_POSITION = AFTER
- };
-
- LParallelMove* GetOrCreateParallelMove(InnerPosition pos,
- Zone* zone) {
- if (parallel_moves_[pos] == NULL) {
- parallel_moves_[pos] = new(zone) LParallelMove(zone);
- }
- return parallel_moves_[pos];
- }
-
- LParallelMove* GetParallelMove(InnerPosition pos) {
- return parallel_moves_[pos];
- }
-
- private:
- LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
- HBasicBlock* block_;
-};
-
-
-class LInstructionGap final : public LGap {
- public:
- explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
-
- bool HasInterestingComment(LCodeGen* gen) const override {
- return !IsRedundant();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
-};
-
-
-class LGoto final : public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGoto(HBasicBlock* block) : block_(block) { }
-
- bool HasInterestingComment(LCodeGen* gen) const override;
- DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- void PrintDataTo(StringStream* stream) override;
- bool IsControl() const override { return true; }
-
- int block_id() const { return block_->block_id(); }
-
- private:
- HBasicBlock* block_;
-};
-
-
-class LPrologue final : public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue")
-};
-
-
-class LLazyBailout final : public LTemplateInstruction<0, 0, 0> {
- public:
- LLazyBailout() : gap_instructions_size_(0) { }
-
- DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
-
- void set_gap_instructions_size(int gap_instructions_size) {
- gap_instructions_size_ = gap_instructions_size;
- }
- int gap_instructions_size() { return gap_instructions_size_; }
-
- private:
- int gap_instructions_size_;
-};
-
-
-class LDummy final : public LTemplateInstruction<1, 0, 0> {
- public:
- LDummy() {}
- DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
-};
-
-
-class LDummyUse final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDummyUse(LOperand* value) {
- inputs_[0] = value;
- }
- DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
-};
-
-
-class LDeoptimize final : public LTemplateInstruction<0, 0, 0> {
- public:
- bool IsControl() const override { return true; }
- DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
- DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
-};
-
-
-class LLabel final : public LGap {
- public:
- explicit LLabel(HBasicBlock* block)
- : LGap(block), replacement_(NULL) { }
-
- bool HasInterestingComment(LCodeGen* gen) const override { return false; }
- DECLARE_CONCRETE_INSTRUCTION(Label, "label")
-
- void PrintDataTo(StringStream* stream) override;
-
- int block_id() const { return block()->block_id(); }
- bool is_loop_header() const { return block()->IsLoopHeader(); }
- bool is_osr_entry() const { return block()->is_osr_entry(); }
- Label* label() { return &label_; }
- LLabel* replacement() const { return replacement_; }
- void set_replacement(LLabel* label) { replacement_ = label; }
- bool HasReplacement() const { return replacement_ != NULL; }
-
- private:
- Label label_;
- LLabel* replacement_;
-};
-
-
-class LParameter final : public LTemplateInstruction<1, 0, 0> {
- public:
- bool HasInterestingComment(LCodeGen* gen) const override { return false; }
- DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
-};
-
-
-class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
- public:
- bool HasInterestingComment(LCodeGen* gen) const override { return false; }
- DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
-};
-
-
-template<int I, int T>
-class LControlInstruction : public LTemplateInstruction<0, I, T> {
- public:
- LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
-
- bool IsControl() const final { return true; }
-
- int SuccessorCount() { return hydrogen()->SuccessorCount(); }
- HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
-
- int TrueDestination(LChunk* chunk) {
- return chunk->LookupDestination(true_block_id());
- }
- int FalseDestination(LChunk* chunk) {
- return chunk->LookupDestination(false_block_id());
- }
-
- Label* TrueLabel(LChunk* chunk) {
- if (true_label_ == NULL) {
- true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
- }
- return true_label_;
- }
- Label* FalseLabel(LChunk* chunk) {
- if (false_label_ == NULL) {
- false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
- }
- return false_label_;
- }
-
- protected:
- int true_block_id() { return SuccessorAt(0)->block_id(); }
- int false_block_id() { return SuccessorAt(1)->block_id(); }
-
- private:
- HControlInstruction* hydrogen() {
- return HControlInstruction::cast(this->hydrogen_value());
- }
-
- Label* false_label_;
- Label* true_label_;
-};
-
-
-class LWrapReceiver final : public LTemplateInstruction<1, 2, 0> {
- public:
- LWrapReceiver(LOperand* receiver, LOperand* function) {
- inputs_[0] = receiver;
- inputs_[1] = function;
- }
-
- LOperand* receiver() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
- DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
-};
-
-
-class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
- public:
- LApplyArguments(LOperand* function,
- LOperand* receiver,
- LOperand* length,
- LOperand* elements) {
- inputs_[0] = function;
- inputs_[1] = receiver;
- inputs_[2] = length;
- inputs_[3] = elements;
- }
-
- LOperand* function() { return inputs_[0]; }
- LOperand* receiver() { return inputs_[1]; }
- LOperand* length() { return inputs_[2]; }
- LOperand* elements() { return inputs_[3]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
- DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
-};
-
-
-class LAccessArgumentsAt final : public LTemplateInstruction<1, 3, 0> {
- public:
- LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
- inputs_[0] = arguments;
- inputs_[1] = length;
- inputs_[2] = index;
- }
-
- LOperand* arguments() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LArgumentsLength final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LArgumentsLength(LOperand* elements) {
- inputs_[0] = elements;
- }
-
- LOperand* elements() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
-};
-
-
-class LArgumentsElements final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
- DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
-};
-
-
-class LModByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
- public:
- LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-
- private:
- int32_t divisor_;
-};
-
-
-class LModByConstI final : public LTemplateInstruction<1, 1, 2> {
- public:
- LModByConstI(LOperand* dividend,
- int32_t divisor,
- LOperand* temp1,
- LOperand* temp2) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-
- private:
- int32_t divisor_;
-};
-
-
-class LModI final : public LTemplateInstruction<1, 2, 1> {
- public:
- LModI(LOperand* left, LOperand* right, LOperand* temp) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-};
-
-
-class LDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
- public:
- LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
-
- private:
- int32_t divisor_;
-};
-
-
-class LDivByConstI final : public LTemplateInstruction<1, 1, 2> {
- public:
- LDivByConstI(LOperand* dividend,
- int32_t divisor,
- LOperand* temp1,
- LOperand* temp2) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
-
- private:
- int32_t divisor_;
-};
-
-
-class LDivI final : public LTemplateInstruction<1, 2, 1> {
- public:
- LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
- inputs_[0] = dividend;
- inputs_[1] = divisor;
- temps_[0] = temp;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- LOperand* divisor() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
- DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
-};
-
-
-class LFlooringDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
- public:
- LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
- "flooring-div-by-power-of-2-i")
- DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-
- private:
- int32_t divisor_;
-};
-
-
-class LFlooringDivByConstI final : public LTemplateInstruction<1, 1, 3> {
- public:
- LFlooringDivByConstI(LOperand* dividend,
- int32_t divisor,
- LOperand* temp1,
- LOperand* temp2,
- LOperand* temp3) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- temps_[0] = temp1;
- temps_[1] = temp2;
- temps_[2] = temp3;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
- LOperand* temp3() { return temps_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
- DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-
- private:
- int32_t divisor_;
-};
-
-
-class LFlooringDivI final : public LTemplateInstruction<1, 2, 1> {
- public:
- LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
- inputs_[0] = dividend;
- inputs_[1] = divisor;
- temps_[0] = temp;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- LOperand* divisor() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
- DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-};
-
-
-class LMulI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LMulI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
- DECLARE_HYDROGEN_ACCESSOR(Mul)
-};
-
-
-class LCompareNumericAndBranch final : public LControlInstruction<2, 0> {
- public:
- LCompareNumericAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
- "compare-numeric-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
-
- Token::Value op() const { return hydrogen()->token(); }
- bool is_double() const {
- return hydrogen()->representation().IsDouble();
- }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-// Math.floor with a double result.
-class LMathFloorD final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathFloorD(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathFloorD, "math-floor-d")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-// Math.floor with an integer result.
-class LMathFloorI final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathFloorI(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathFloorI, "math-floor-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-// Math.round with a double result.
-class LMathRoundD final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathRoundD(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathRoundD, "math-round-d")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-// Math.round with an integer result.
-class LMathRoundI final : public LTemplateInstruction<1, 1, 1> {
- public:
- LMathRoundI(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathRoundI, "math-round-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-
-class LMathFround final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathFround(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround")
-};
-
-
-class LMathAbs final : public LTemplateInstruction<1, 2, 0> {
- public:
- explicit LMathAbs(LOperand* context, LOperand* value) {
- inputs_[1] = context;
- inputs_[0] = value;
- }
-
- LOperand* context() { return inputs_[1]; }
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-
-class LMathLog final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathLog(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
-};
-
-
-class LMathClz32 final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathClz32(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
-};
-
-class LMathCos final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathCos(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
-};
-
-class LMathExp final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathExp(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
-};
-
-class LMathSin final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathSin(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
-};
-
-class LMathSqrt final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathSqrt(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
-};
-
-
-class LMathPowHalf final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathPowHalf(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
-};
-
-
-class LCmpObjectEqAndBranch final : public LControlInstruction<2, 0> {
- public:
- LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
-};
-
-
-class LCmpHoleAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LCmpHoleAndBranch(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
-};
-
-
-class LIsStringAndBranch final : public LControlInstruction<1, 1> {
- public:
- explicit LIsStringAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LIsSmiAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LIsSmiAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LIsUndetectableAndBranch final : public LControlInstruction<1, 1> {
- public:
- explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
- "is-undetectable-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LStringCompareAndBranch final : public LControlInstruction<3, 0> {
- public:
- explicit LStringCompareAndBranch(LOperand* context,
- LOperand* left,
- LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
- "string-compare-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LHasInstanceTypeAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
- "has-instance-type-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-class LClassOfTestAndBranch final : public LControlInstruction<1, 2> {
- public:
- LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch, "class-of-test-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-class LCmpT final : public LTemplateInstruction<1, 3, 0> {
- public:
- LCmpT(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
- DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
-
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> {
- public:
- LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) {
- inputs_[0] = object;
- inputs_[1] = prototype;
- }
-
- LOperand* object() const { return inputs_[0]; }
- LOperand* prototype() const { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch,
- "has-in-prototype-chain-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch)
-};
-
-
-class LBoundsCheck final : public LTemplateInstruction<0, 2, 0> {
- public:
- LBoundsCheck(LOperand* index, LOperand* length) {
- inputs_[0] = index;
- inputs_[1] = length;
- }
-
- LOperand* index() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
- DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
-};
-
-
-class LBitI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LBitI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- Token::Value op() const { return hydrogen()->op(); }
- bool IsInteger32() const {
- return hydrogen()->representation().IsInteger32();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
- DECLARE_HYDROGEN_ACCESSOR(Bitwise)
-};
-
-
-class LShiftI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
- : op_(op), can_deopt_(can_deopt) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- bool can_deopt() const { return can_deopt_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
-
- private:
- Token::Value op_;
- bool can_deopt_;
-};
-
-
-class LSubI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LSubI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
- DECLARE_HYDROGEN_ACCESSOR(Sub)
-};
-
-
-class LConstantI final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- int32_t value() const { return hydrogen()->Integer32Value(); }
-};
-
-
-class LConstantS final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
-};
-
-
-class LConstantD final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- uint64_t bits() const { return hydrogen()->DoubleValueAsBits(); }
-};
-
-
-class LConstantE final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- ExternalReference value() const {
- return hydrogen()->ExternalReferenceValue();
- }
-};
-
-
-class LConstantT final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- Handle<Object> value(Isolate* isolate) const {
- return hydrogen()->handle(isolate);
- }
-};
-
-
-class LBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
- DECLARE_HYDROGEN_ACCESSOR(Branch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LDebugBreak final : public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
-};
-
-
-class LCmpMapAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LCmpMapAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMap)
-
- Handle<Map> map() const { return hydrogen()->map().handle(); }
-};
-
-
-class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
- public:
- LSeqStringGetChar(LOperand* string, LOperand* index) {
- inputs_[0] = string;
- inputs_[1] = index;
- }
-
- LOperand* string() const { return inputs_[0]; }
- LOperand* index() const { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
-};
-
-
-class LSeqStringSetChar final : public LTemplateInstruction<1, 4, 0> {
- public:
- LSeqStringSetChar(LOperand* context,
- LOperand* string,
- LOperand* index,
- LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = string;
- inputs_[2] = index;
- inputs_[3] = value;
- }
-
- LOperand* string() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
- LOperand* value() { return inputs_[3]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
-};
-
-
-class LAddI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LAddI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- static bool UseLea(HAdd* add) {
- return !add->CheckFlag(HValue::kCanOverflow) &&
- add->BetterLeftOperand()->UseCount() > 1;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
- DECLARE_HYDROGEN_ACCESSOR(Add)
-};
-
-
-class LMathMinMax final : public LTemplateInstruction<1, 2, 0> {
- public:
- LMathMinMax(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
- DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
-};
-
-
-class LPower final : public LTemplateInstruction<1, 2, 0> {
- public:
- LPower(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Power, "power")
- DECLARE_HYDROGEN_ACCESSOR(Power)
-};
-
-
-class LArithmeticD final : public LTemplateInstruction<1, 2, 0> {
- public:
- LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
- : op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- Opcode opcode() const override { return LInstruction::kArithmeticD; }
- void CompileToNative(LCodeGen* generator) override;
- const char* Mnemonic() const override;
-
- private:
- Token::Value op_;
-};
-
-
-class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
- public:
- LArithmeticT(Token::Value op,
- LOperand* context,
- LOperand* left,
- LOperand* right)
- : op_(op) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- Token::Value op() const { return op_; }
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- Opcode opcode() const override { return LInstruction::kArithmeticT; }
- void CompileToNative(LCodeGen* generator) override;
- const char* Mnemonic() const override;
-
- DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
-
- private:
- Token::Value op_;
-};
-
-
-class LReturn final : public LTemplateInstruction<0, 3, 0> {
- public:
- explicit LReturn(LOperand* value,
- LOperand* context,
- LOperand* parameter_count) {
- inputs_[0] = value;
- inputs_[1] = context;
- inputs_[2] = parameter_count;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* context() { return inputs_[1]; }
-
- bool has_constant_parameter_count() {
- return parameter_count()->IsConstantOperand();
- }
- LConstantOperand* constant_parameter_count() {
- DCHECK(has_constant_parameter_count());
- return LConstantOperand::cast(parameter_count());
- }
- LOperand* parameter_count() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Return, "return")
- DECLARE_HYDROGEN_ACCESSOR(Return)
-};
-
-
-class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedField(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
-};
-
-
-class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadFunctionPrototype(LOperand* function) {
- inputs_[0] = function;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
- DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
-
- LOperand* function() { return inputs_[0]; }
-};
-
-
-class LLoadRoot final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
- DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
-
- Heap::RootListIndex index() const { return hydrogen()->index(); }
-};
-
-
-inline static bool ExternalArrayOpRequiresTemp(
- Representation key_representation,
- ElementsKind elements_kind) {
- // Operations that require the key to be divided by two to be converted into
- // an index cannot fold the scale operation into a load and need an extra
- // temp register to do the work.
- return SmiValuesAre31Bits() && key_representation.IsSmi() &&
- (elements_kind == UINT8_ELEMENTS || elements_kind == INT8_ELEMENTS ||
- elements_kind == UINT8_CLAMPED_ELEMENTS);
-}
-
-
-class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
- public:
- LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
- inputs_[0] = elements;
- inputs_[1] = key;
- inputs_[2] = backing_store_owner;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
-
- bool is_fixed_typed_array() const {
- return hydrogen()->is_fixed_typed_array();
- }
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* backing_store_owner() { return inputs_[2]; }
- void PrintDataTo(StringStream* stream) override;
- uint32_t base_offset() const { return hydrogen()->base_offset(); }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
-};
-
-
-class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadContextSlot(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
-
- int slot_index() { return hydrogen()->slot_index(); }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LStoreContextSlot final : public LTemplateInstruction<0, 2, 1> {
- public:
- LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
- inputs_[0] = context;
- inputs_[1] = value;
- temps_[0] = temp;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
-
- int slot_index() { return hydrogen()->slot_index(); }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LPushArgument final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LPushArgument(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
-};
-
-
-class LDrop final : public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LDrop(int count) : count_(count) { }
-
- int count() const { return count_; }
-
- DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
-
- private:
- int count_;
-};
-
-
-class LStoreCodeEntry final : public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreCodeEntry(LOperand* function, LOperand* code_object) {
- inputs_[0] = function;
- inputs_[1] = code_object;
- }
-
- LOperand* function() { return inputs_[0]; }
- LOperand* code_object() { return inputs_[1]; }
-
- void PrintDataTo(StringStream* stream) override;
-
- DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
- DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
-};
-
-
-class LInnerAllocatedObject final : public LTemplateInstruction<1, 2, 0> {
- public:
- LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
- inputs_[0] = base_object;
- inputs_[1] = offset;
- }
-
- LOperand* base_object() const { return inputs_[0]; }
- LOperand* offset() const { return inputs_[1]; }
-
- void PrintDataTo(StringStream* stream) override;
-
- DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
-};
-
-
-class LThisFunction final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
- DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
-};
-
-
-class LContext final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Context, "context")
- DECLARE_HYDROGEN_ACCESSOR(Context)
-};
-
-
-class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LDeclareGlobals(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
- DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
-};
-
-
-class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
- public:
- LCallWithDescriptor(CallInterfaceDescriptor descriptor,
- const ZoneList<LOperand*>& operands, Zone* zone)
- : inputs_(descriptor.GetRegisterParameterCount() +
- kImplicitRegisterParameterCount,
- zone) {
- DCHECK(descriptor.GetRegisterParameterCount() +
- kImplicitRegisterParameterCount ==
- operands.length());
- inputs_.AddAll(operands, zone);
- }
-
- LOperand* target() const { return inputs_[0]; }
-
- DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
-
- // The target and context are passed as implicit parameters that are not
- // explicitly listed in the descriptor.
- static const int kImplicitRegisterParameterCount = 2;
-
- private:
- DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-
- ZoneList<LOperand*> inputs_;
-
- // Iterator support.
- int InputCount() final { return inputs_.length(); }
- LOperand* InputAt(int i) final { return inputs_[i]; }
-
- int TempCount() final { return 0; }
- LOperand* TempAt(int i) final { return NULL; }
-};
-
-
-class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
- public:
- LInvokeFunction(LOperand* context, LOperand* function) {
- inputs_[0] = context;
- inputs_[1] = function;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
- DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallNewArray(LOperand* context, LOperand* constructor) {
- inputs_[0] = context;
- inputs_[1] = constructor;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* constructor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
- DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallRuntime final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallRuntime(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
- DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
-
- bool ClobbersDoubleRegisters(Isolate* isolate) const override {
- return save_doubles() == kDontSaveFPRegs;
- }
-
- const Runtime::Function* function() const { return hydrogen()->function(); }
- int arity() const { return hydrogen()->argument_count(); }
- SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
-};
-
-
-class LInteger32ToDouble final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInteger32ToDouble(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
-};
-
-
-class LUint32ToDouble final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LUint32ToDouble(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
-};
-
-
-class LNumberTagI final : public LTemplateInstruction<1, 1, 2> {
- public:
- LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
-};
-
-
-class LNumberTagU final : public LTemplateInstruction<1, 1, 2> {
- public:
- LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
-};
-
-
-class LNumberTagD final : public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LNumberTagD(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDoubleToI(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-class LDoubleToSmi final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDoubleToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-};
-
-
-// Truncating conversion from a tagged value to an int32.
-class LTaggedToI final : public LTemplateInstruction<1, 1, 1> {
- public:
- LTaggedToI(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-class LSmiTag final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LSmiTag(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LNumberUntagD final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberUntagD(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
- DECLARE_HYDROGEN_ACCESSOR(Change);
-
- bool truncating() { return hydrogen()->CanTruncateToNumber(); }
-};
-
-
-class LSmiUntag final : public LTemplateInstruction<1, 1, 0> {
- public:
- LSmiUntag(LOperand* value, bool needs_check)
- : needs_check_(needs_check) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
- bool needs_check() const { return needs_check_; }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
-
- private:
- bool needs_check_;
-};
-
-
-class LStoreNamedField final : public LTemplateInstruction<0, 2, 1> {
- public:
- LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
- inputs_[0] = object;
- inputs_[1] = value;
- temps_[0] = temp;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
-
- void PrintDataTo(StringStream* stream) override;
-
- Representation representation() const {
- return hydrogen()->field_representation();
- }
-};
-
-
-class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
- public:
- LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
- LOperand* backing_store_owner) {
- inputs_[0] = object;
- inputs_[1] = key;
- inputs_[2] = value;
- inputs_[3] = backing_store_owner;
- }
-
- bool is_fixed_typed_array() const {
- return hydrogen()->is_fixed_typed_array();
- }
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- LOperand* backing_store_owner() { return inputs_[3]; }
- ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
-
- void PrintDataTo(StringStream* stream) override;
- bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
- uint32_t base_offset() const { return hydrogen()->base_offset(); }
-};
-
-
-class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 2> {
- public:
- LTransitionElementsKind(LOperand* object,
- LOperand* context,
- LOperand* new_map_temp,
- LOperand* temp) {
- inputs_[0] = object;
- inputs_[1] = context;
- temps_[0] = new_map_temp;
- temps_[1] = temp;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* context() { return inputs_[1]; }
- LOperand* new_map_temp() { return temps_[0]; }
- LOperand* temp() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
- "transition-elements-kind")
- DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
-
- void PrintDataTo(StringStream* stream) override;
-
- Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
- Handle<Map> transitioned_map() {
- return hydrogen()->transitioned_map().handle();
- }
- ElementsKind from_kind() { return hydrogen()->from_kind(); }
- ElementsKind to_kind() { return hydrogen()->to_kind(); }
-};
-
-
-class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 1> {
- public:
- LTrapAllocationMemento(LOperand* object,
- LOperand* temp) {
- inputs_[0] = object;
- temps_[0] = temp;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
- "trap-allocation-memento")
-};
-
-
-class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
- public:
- LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
- LOperand* key, LOperand* current_capacity) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = elements;
- inputs_[3] = key;
- inputs_[4] = current_capacity;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* elements() { return inputs_[2]; }
- LOperand* key() { return inputs_[3]; }
- LOperand* current_capacity() { return inputs_[4]; }
-
- bool ClobbersDoubleRegisters(Isolate* isolate) const override { return true; }
-
- DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
- DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
-};
-
-
-class LStringAdd final : public LTemplateInstruction<1, 3, 0> {
- public:
- LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
- DECLARE_HYDROGEN_ACCESSOR(StringAdd)
-};
-
-
-class LStringCharCodeAt final : public LTemplateInstruction<1, 3, 0> {
- public:
- LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
- inputs_[0] = context;
- inputs_[1] = string;
- inputs_[2] = index;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* string() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
- DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
-};
-
-
-class LStringCharFromCode final : public LTemplateInstruction<1, 2, 0> {
- public:
- explicit LStringCharFromCode(LOperand* context, LOperand* char_code) {
- inputs_[0] = context;
- inputs_[1] = char_code;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* char_code() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
- DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
-};
-
-
-class LCheckValue final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckValue(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
- DECLARE_HYDROGEN_ACCESSOR(CheckValue)
-};
-
-
-class LCheckArrayBufferNotNeutered final
- : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckArrayBufferNotNeutered(LOperand* view) { inputs_[0] = view; }
-
- LOperand* view() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckArrayBufferNotNeutered,
- "check-array-buffer-not-neutered")
- DECLARE_HYDROGEN_ACCESSOR(CheckArrayBufferNotNeutered)
-};
-
-
-class LCheckInstanceType final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckInstanceType(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
- DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
-};
-
-
-class LCheckMaps final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckMaps(LOperand* value = NULL) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
-};
-
-
-class LCheckSmi final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCheckSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
-};
-
-
-class LClampDToUint8 final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LClampDToUint8(LOperand* unclamped) {
- inputs_[0] = unclamped;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
-};
-
-
-class LClampIToUint8 final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LClampIToUint8(LOperand* unclamped) {
- inputs_[0] = unclamped;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
-};
-
-
-class LClampTToUint8 final : public LTemplateInstruction<1, 1, 1> {
- public:
- LClampTToUint8(LOperand* unclamped,
- LOperand* temp_xmm) {
- inputs_[0] = unclamped;
- temps_[0] = temp_xmm;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
- LOperand* temp_xmm() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
-};
-
-
-class LCheckNonSmi final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckNonSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
- DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
-};
-
-
-class LAllocate final : public LTemplateInstruction<1, 2, 1> {
- public:
- LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
- inputs_[0] = context;
- inputs_[1] = size;
- temps_[0] = temp;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* size() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
- DECLARE_HYDROGEN_ACCESSOR(Allocate)
-};
-
-class LFastAllocate final : public LTemplateInstruction<1, 1, 1> {
- public:
- LFastAllocate(LOperand* size, LOperand* temp) {
- inputs_[0] = size;
- temps_[0] = temp;
- }
-
- LOperand* size() const { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
- DECLARE_HYDROGEN_ACCESSOR(Allocate)
-};
-
-class LTypeof final : public LTemplateInstruction<1, 2, 0> {
- public:
- LTypeof(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
-};
-
-
-class LTypeofIsAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LTypeofIsAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
-
- Handle<String> type_literal() { return hydrogen()->type_literal(); }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LOsrEntry final : public LTemplateInstruction<0, 0, 0> {
- public:
- LOsrEntry() {}
-
- bool HasInterestingComment(LCodeGen* gen) const override { return false; }
- DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
-};
-
-
-class LStackCheck final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LStackCheck(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
- DECLARE_HYDROGEN_ACCESSOR(StackCheck)
-
- Label* done_label() { return &done_label_; }
-
- private:
- Label done_label_;
-};
-
-
-class LForInPrepareMap final : public LTemplateInstruction<1, 2, 0> {
- public:
- LForInPrepareMap(LOperand* context, LOperand* object) {
- inputs_[0] = context;
- inputs_[1] = object;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
-};
-
-
-class LForInCacheArray final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LForInCacheArray(LOperand* map) {
- inputs_[0] = map;
- }
-
- LOperand* map() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
-
- int idx() {
- return HForInCacheArray::cast(this->hydrogen_value())->idx();
- }
-};
-
-
-class LCheckMapValue final : public LTemplateInstruction<0, 2, 0> {
- public:
- LCheckMapValue(LOperand* value, LOperand* map) {
- inputs_[0] = value;
- inputs_[1] = map;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* map() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
-};
-
-
-class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadFieldByIndex(LOperand* object, LOperand* index) {
- inputs_[0] = object;
- inputs_[1] = index;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
-};
-
-
-class LChunkBuilder;
-class LPlatformChunk final : public LChunk {
- public:
- LPlatformChunk(CompilationInfo* info, HGraph* graph)
- : LChunk(info, graph),
- dehoisted_key_ids_(graph->GetMaximumValueID(), graph->zone()) { }
-
- int GetNextSpillIndex(RegisterKind kind);
- LOperand* GetNextSpillSlot(RegisterKind kind);
- BitVector* GetDehoistedKeyIds() { return &dehoisted_key_ids_; }
- bool IsDehoistedKey(HValue* value) {
- return dehoisted_key_ids_.Contains(value->id());
- }
-
- private:
- BitVector dehoisted_key_ids_;
-};
-
-
-class LChunkBuilder final : public LChunkBuilderBase {
- public:
- LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : LChunkBuilderBase(info, graph),
- current_instruction_(NULL),
- current_block_(NULL),
- next_block_(NULL),
- allocator_(allocator) {}
-
- // Build the sequence for the graph.
- LPlatformChunk* Build();
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
- HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- LInstruction* DoMathFloor(HUnaryMathOperation* instr);
- LInstruction* DoMathRound(HUnaryMathOperation* instr);
- LInstruction* DoMathFround(HUnaryMathOperation* instr);
- LInstruction* DoMathAbs(HUnaryMathOperation* instr);
- LInstruction* DoMathCos(HUnaryMathOperation* instr);
- LInstruction* DoMathLog(HUnaryMathOperation* instr);
- LInstruction* DoMathExp(HUnaryMathOperation* instr);
- LInstruction* DoMathSin(HUnaryMathOperation* instr);
- LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
- LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
- LInstruction* DoMathClz32(HUnaryMathOperation* instr);
- LInstruction* DoDivByPowerOf2I(HDiv* instr);
- LInstruction* DoDivByConstI(HDiv* instr);
- LInstruction* DoDivI(HDiv* instr);
- LInstruction* DoModByPowerOf2I(HMod* instr);
- LInstruction* DoModByConstI(HMod* instr);
- LInstruction* DoModI(HMod* instr);
- LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
- LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
- LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
-
- private:
- // Methods for getting operands for Use / Define / Temp.
- LUnallocated* ToUnallocated(Register reg);
- LUnallocated* ToUnallocated(XMMRegister reg);
-
- // Methods for setting up define-use relationships.
- MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
- MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
- MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
- XMMRegister fixed_register);
-
- // A value that is guaranteed to be allocated to a register.
- // Operand created by UseRegister is guaranteed to be live until the end of
- // instruction. This means that register allocator will not reuse it's
- // register for any other operand inside instruction.
- // Operand created by UseRegisterAtStart is guaranteed to be live only at
- // instruction start. Register allocator is free to assign the same register
- // to some other operand used inside instruction (i.e. temporary or
- // output).
- MUST_USE_RESULT LOperand* UseRegister(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
-
- // An input operand in a register that may be trashed.
- MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
-
- // An input operand in a register that may be trashed or a constant operand.
- MUST_USE_RESULT LOperand* UseTempRegisterOrConstant(HValue* value);
-
- // An input operand in a register or stack slot.
- MUST_USE_RESULT LOperand* Use(HValue* value);
- MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
-
- // An input operand in a register, stack slot or a constant operand.
- MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
-
- // An input operand in a register or a constant operand.
- MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
-
- // An input operand in a constant operand.
- MUST_USE_RESULT LOperand* UseConstant(HValue* value);
-
- // An input operand in register, stack slot or a constant operand.
- // Will not be moved to a register even if one is freely available.
- MUST_USE_RESULT LOperand* UseAny(HValue* value) override;
-
- // Temporary operand that must be in a register.
- MUST_USE_RESULT LUnallocated* TempRegister();
- MUST_USE_RESULT LOperand* FixedTemp(Register reg);
- MUST_USE_RESULT LOperand* FixedTemp(XMMRegister reg);
-
- // Methods for setting up define-use relationships.
- // Return the same instruction that they are passed.
- LInstruction* Define(LTemplateResultInstruction<1>* instr,
- LUnallocated* result);
- LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
- LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
- int index);
- LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
- LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
- Register reg);
- LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
- XMMRegister reg);
- // Assigns an environment to an instruction. An instruction which can
- // deoptimize must have an environment.
- LInstruction* AssignEnvironment(LInstruction* instr);
- // Assigns a pointer map to an instruction. An instruction which can
- // trigger a GC or a lazy deoptimization must have a pointer map.
- LInstruction* AssignPointerMap(LInstruction* instr);
-
- enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
-
- // Marks a call for the register allocator. Assigns a pointer map to
- // support GC and lazy deoptimization. Assigns an environment to support
- // eager deoptimization if CAN_DEOPTIMIZE_EAGERLY.
- LInstruction* MarkAsCall(
- LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
-
- void VisitInstruction(HInstruction* current);
- void AddInstruction(LInstruction* instr, HInstruction* current);
-
- void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
- LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
- LInstruction* DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr);
- LInstruction* DoArithmeticT(Token::Value op,
- HBinaryOperation* instr);
- void FindDehoistedKeyDefinitions(HValue* candidate);
-
- HInstruction* current_instruction_;
- HBasicBlock* current_block_;
- HBasicBlock* next_block_;
- LAllocator* allocator_;
-
- DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
-};
-
-#undef DECLARE_HYDROGEN_ACCESSOR
-#undef DECLARE_CONCRETE_INSTRUCTION
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_X64_LITHIUM_X64_H_
diff --git a/deps/v8/src/crankshaft/x87/OWNERS b/deps/v8/src/crankshaft/x87/OWNERS
deleted file mode 100644
index 61245ae8e2..0000000000
--- a/deps/v8/src/crankshaft/x87/OWNERS
+++ /dev/null
@@ -1,2 +0,0 @@
-weiliang.lin@intel.com
-chunyang.dai@intel.com
diff --git a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc
deleted file mode 100644
index 2a229aa92e..0000000000
--- a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc
+++ /dev/null
@@ -1,5651 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X87
-
-#include "src/crankshaft/x87/lithium-codegen-x87.h"
-
-#include "src/base/bits.h"
-#include "src/builtins/builtins-constructor.h"
-#include "src/code-factory.h"
-#include "src/code-stubs.h"
-#include "src/codegen.h"
-#include "src/crankshaft/hydrogen-osr.h"
-#include "src/deoptimizer.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/x87/frames-x87.h"
-
-namespace v8 {
-namespace internal {
-
-// When invoking builtins, we need to record the safepoint in the middle of
-// the invoke instruction sequence generated by the macro assembler.
-class SafepointGenerator final : public CallWrapper {
- public:
- SafepointGenerator(LCodeGen* codegen,
- LPointerMap* pointers,
- Safepoint::DeoptMode mode)
- : codegen_(codegen),
- pointers_(pointers),
- deopt_mode_(mode) {}
- virtual ~SafepointGenerator() {}
-
- void BeforeCall(int call_size) const override {}
-
- void AfterCall() const override {
- codegen_->RecordSafepoint(pointers_, deopt_mode_);
- }
-
- private:
- LCodeGen* codegen_;
- LPointerMap* pointers_;
- Safepoint::DeoptMode deopt_mode_;
-};
-
-
-#define __ masm()->
-
-bool LCodeGen::GenerateCode() {
- LPhase phase("Z_Code generation", chunk());
- DCHECK(is_unused());
- status_ = GENERATING;
-
- // Open a frame scope to indicate that there is a frame on the stack. The
- // MANUAL indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done in GeneratePrologue).
- FrameScope frame_scope(masm_, StackFrame::MANUAL);
-
- return GeneratePrologue() &&
- GenerateBody() &&
- GenerateDeferredCode() &&
- GenerateJumpTable() &&
- GenerateSafepointTable();
-}
-
-
-void LCodeGen::FinishCode(Handle<Code> code) {
- DCHECK(is_done());
- code->set_stack_slots(GetTotalFrameSlotCount());
- code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- PopulateDeoptimizationData(code);
- if (info()->ShouldEnsureSpaceForLazyDeopt()) {
- Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
- }
-}
-
-
-#ifdef _MSC_VER
-void LCodeGen::MakeSureStackPagesMapped(int offset) {
- const int kPageSize = 4 * KB;
- for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
- __ mov(Operand(esp, offset), eax);
- }
-}
-#endif
-
-
-bool LCodeGen::GeneratePrologue() {
- DCHECK(is_generating());
-
- if (info()->IsOptimizing()) {
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
- }
-
- info()->set_prologue_offset(masm_->pc_offset());
- if (NeedsEagerFrame()) {
- DCHECK(!frame_is_built_);
- frame_is_built_ = true;
- if (info()->IsStub()) {
- __ StubPrologue(StackFrame::STUB);
- } else {
- __ Prologue(info()->GeneratePreagedPrologue());
- }
- }
-
- // Reserve space for the stack slots needed by the code.
- int slots = GetStackSlotCount();
- DCHECK(slots != 0 || !info()->IsOptimizing());
- if (slots > 0) {
- __ sub(Operand(esp), Immediate(slots * kPointerSize));
-#ifdef _MSC_VER
- MakeSureStackPagesMapped(slots * kPointerSize);
-#endif
- if (FLAG_debug_code) {
- __ push(eax);
- __ mov(Operand(eax), Immediate(slots));
- Label loop;
- __ bind(&loop);
- __ mov(MemOperand(esp, eax, times_4, 0), Immediate(kSlotsZapValue));
- __ dec(eax);
- __ j(not_zero, &loop);
- __ pop(eax);
- }
- }
-
- // Initailize FPU state.
- __ fninit();
-
- return !is_aborted();
-}
-
-
-void LCodeGen::DoPrologue(LPrologue* instr) {
- Comment(";;; Prologue begin");
-
- // Possibly allocate a local context.
- if (info_->scope()->NeedsContext()) {
- Comment(";;; Allocate local context");
- bool need_write_barrier = true;
- // Argument to NewContext is the function, which is still in edi.
- int slots = info_->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
- if (info()->scope()->is_script_scope()) {
- __ push(edi);
- __ Push(info()->scope()->scope_info());
- __ CallRuntime(Runtime::kNewScriptContext);
- deopt_mode = Safepoint::kLazyDeopt;
- } else {
- if (slots <= ConstructorBuiltins::MaximumFunctionContextSlots()) {
- Callable callable = CodeFactory::FastNewFunctionContext(
- isolate(), info()->scope()->scope_type());
- __ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
- Immediate(slots));
- __ Call(callable.code(), RelocInfo::CODE_TARGET);
- // Result of the FastNewFunctionContext builtin is always in new space.
- need_write_barrier = false;
- } else {
- __ Push(edi);
- __ Push(Smi::FromInt(info()->scope()->scope_type()));
- __ CallRuntime(Runtime::kNewFunctionContext);
- }
- }
- RecordSafepoint(deopt_mode);
-
- // Context is returned in eax. It replaces the context passed to us.
- // It's saved in the stack and kept live in esi.
- __ mov(esi, eax);
- __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), eax);
-
- // Copy parameters into context if necessary.
- int num_parameters = info()->scope()->num_parameters();
- int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0;
- for (int i = first_parameter; i < num_parameters; i++) {
- Variable* var = (i == -1) ? info()->scope()->receiver()
- : info()->scope()->parameter(i);
- if (var->IsContextSlot()) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ mov(eax, Operand(ebp, parameter_offset));
- // Store it in the context.
- int context_offset = Context::SlotOffset(var->index());
- __ mov(Operand(esi, context_offset), eax);
- // Update the write barrier. This clobbers eax and ebx.
- if (need_write_barrier) {
- __ RecordWriteContextSlot(esi, context_offset, eax, ebx,
- kDontSaveFPRegs);
- } else if (FLAG_debug_code) {
- Label done;
- __ JumpIfInNewSpace(esi, eax, &done, Label::kNear);
- __ Abort(kExpectedNewSpaceObject);
- __ bind(&done);
- }
- }
- }
- Comment(";;; End allocate local context");
- }
-
- Comment(";;; Prologue end");
-}
-
-
-void LCodeGen::GenerateOsrPrologue() {
- // Generate the OSR entry prologue at the first unknown OSR value, or if there
- // are none, at the OSR entrypoint instruction.
- if (osr_pc_offset_ >= 0) return;
-
- osr_pc_offset_ = masm()->pc_offset();
-
- // Interpreter is the first tier compiler now. It will run the code generated
- // by TurboFan compiler which will always put "1" on x87 FPU stack.
- // This behavior will affect crankshaft's x87 FPU stack depth check under
- // debug mode.
- // Need to reset the FPU stack here for this scenario.
- __ fninit();
-
- // Adjust the frame size, subsuming the unoptimized frame into the
- // optimized frame.
- int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
- DCHECK(slots >= 0);
- __ sub(esp, Immediate(slots * kPointerSize));
-}
-
-
-void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
- if (instr->IsCall()) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- }
- if (!instr->IsLazyBailout() && !instr->IsGap()) {
- safepoints_.BumpLastLazySafepointIndex();
- }
- FlushX87StackIfNecessary(instr);
-}
-
-
-void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
- // When return from function call, FPU should be initialized again.
- if (instr->IsCall() && instr->ClobbersDoubleRegisters(isolate())) {
- bool double_result = instr->HasDoubleRegisterResult();
- if (double_result) {
- __ lea(esp, Operand(esp, -kDoubleSize));
- __ fstp_d(Operand(esp, 0));
- }
- __ fninit();
- if (double_result) {
- __ fld_d(Operand(esp, 0));
- __ lea(esp, Operand(esp, kDoubleSize));
- }
- }
- if (instr->IsGoto()) {
- x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr), this);
- } else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
- !instr->IsGap() && !instr->IsReturn()) {
- if (instr->ClobbersDoubleRegisters(isolate())) {
- if (instr->HasDoubleRegisterResult()) {
- DCHECK_EQ(1, x87_stack_.depth());
- } else {
- DCHECK_EQ(0, x87_stack_.depth());
- }
- }
- __ VerifyX87StackDepth(x87_stack_.depth());
- }
-}
-
-
-bool LCodeGen::GenerateJumpTable() {
- if (!jump_table_.length()) return !is_aborted();
-
- Label needs_frame;
- Comment(";;; -------------------- Jump table --------------------");
-
- for (int i = 0; i < jump_table_.length(); i++) {
- Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
- __ bind(&table_entry->label);
- Address entry = table_entry->address;
- DeoptComment(table_entry->deopt_info);
- if (table_entry->needs_frame) {
- DCHECK(!info()->saves_caller_doubles());
- __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
- __ call(&needs_frame);
- } else {
- __ call(entry, RelocInfo::RUNTIME_ENTRY);
- }
- }
- if (needs_frame.is_linked()) {
- __ bind(&needs_frame);
- /* stack layout
- 3: entry address
- 2: return address <-- esp
- 1: garbage
- 0: garbage
- */
- __ push(MemOperand(esp, 0)); // Copy return address.
- __ push(MemOperand(esp, 2 * kPointerSize)); // Copy entry address.
-
- /* stack layout
- 4: entry address
- 3: return address
- 1: return address
- 0: entry address <-- esp
- */
- __ mov(MemOperand(esp, 3 * kPointerSize), ebp); // Save ebp.
- // Fill ebp with the right stack frame address.
- __ lea(ebp, MemOperand(esp, 3 * kPointerSize));
-
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- DCHECK(info()->IsStub());
- __ mov(MemOperand(esp, 2 * kPointerSize),
- Immediate(Smi::FromInt(StackFrame::STUB)));
-
- /* stack layout
- 3: old ebp
- 2: stub marker
- 1: return address
- 0: entry address <-- esp
- */
- __ ret(0); // Call the continuation without clobbering registers.
- }
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateDeferredCode() {
- DCHECK(is_generating());
- if (deferred_.length() > 0) {
- for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
- LDeferredCode* code = deferred_[i];
- X87Stack copy(code->x87_stack());
- x87_stack_ = copy;
-
- HValue* value =
- instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(value->position());
-
- Comment(";;; <@%d,#%d> "
- "-------------------- Deferred %s --------------------",
- code->instruction_index(),
- code->instr()->hydrogen_value()->id(),
- code->instr()->Mnemonic());
- __ bind(code->entry());
- if (NeedsDeferredFrame()) {
- Comment(";;; Build frame");
- DCHECK(!frame_is_built_);
- DCHECK(info()->IsStub());
- frame_is_built_ = true;
- // Build the frame in such a way that esi isn't trashed.
- __ push(ebp); // Caller's frame pointer.
- __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
- __ lea(ebp, Operand(esp, TypedFrameConstants::kFixedFrameSizeFromFp));
- Comment(";;; Deferred code");
- }
- code->Generate();
- if (NeedsDeferredFrame()) {
- __ bind(code->done());
- Comment(";;; Destroy frame");
- DCHECK(frame_is_built_);
- frame_is_built_ = false;
- __ mov(esp, ebp);
- __ pop(ebp);
- }
- __ jmp(code->exit());
- }
- }
-
- // Deferred code is the last part of the instruction sequence. Mark
- // the generated code as done unless we bailed out.
- if (!is_aborted()) status_ = DONE;
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateSafepointTable() {
- DCHECK(is_done());
- if (info()->ShouldEnsureSpaceForLazyDeopt()) {
- // For lazy deoptimization we need space to patch a call after every call.
- // Ensure there is always space for such patching, even if the code ends
- // in a call.
- int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
- while (masm()->pc_offset() < target_offset) {
- masm()->nop();
- }
- }
- safepoints_.Emit(masm(), GetTotalFrameSlotCount());
- return !is_aborted();
-}
-
-
-Register LCodeGen::ToRegister(int code) const {
- return Register::from_code(code);
-}
-
-
-X87Register LCodeGen::ToX87Register(int code) const {
- return X87Register::from_code(code);
-}
-
-
-void LCodeGen::X87LoadForUsage(X87Register reg) {
- DCHECK(x87_stack_.Contains(reg));
- x87_stack_.Fxch(reg);
- x87_stack_.pop();
-}
-
-
-void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) {
- DCHECK(x87_stack_.Contains(reg1));
- DCHECK(x87_stack_.Contains(reg2));
- if (reg1.is(reg2) && x87_stack_.depth() == 1) {
- __ fld(x87_stack_.st(reg1));
- x87_stack_.push(reg1);
- x87_stack_.pop();
- x87_stack_.pop();
- } else {
- x87_stack_.Fxch(reg1, 1);
- x87_stack_.Fxch(reg2);
- x87_stack_.pop();
- x87_stack_.pop();
- }
-}
-
-
-int LCodeGen::X87Stack::GetLayout() {
- int layout = stack_depth_;
- for (int i = 0; i < stack_depth_; i++) {
- layout |= (stack_[stack_depth_ - 1 - i].code() << ((i + 1) * 3));
- }
-
- return layout;
-}
-
-
-void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) {
- DCHECK(is_mutable_);
- DCHECK(Contains(reg) && stack_depth_ > other_slot);
- int i = ArrayIndex(reg);
- int st = st2idx(i);
- if (st != other_slot) {
- int other_i = st2idx(other_slot);
- X87Register other = stack_[other_i];
- stack_[other_i] = reg;
- stack_[i] = other;
- if (st == 0) {
- __ fxch(other_slot);
- } else if (other_slot == 0) {
- __ fxch(st);
- } else {
- __ fxch(st);
- __ fxch(other_slot);
- __ fxch(st);
- }
- }
-}
-
-
-int LCodeGen::X87Stack::st2idx(int pos) {
- return stack_depth_ - pos - 1;
-}
-
-
-int LCodeGen::X87Stack::ArrayIndex(X87Register reg) {
- for (int i = 0; i < stack_depth_; i++) {
- if (stack_[i].is(reg)) return i;
- }
- UNREACHABLE();
- return -1;
-}
-
-
-bool LCodeGen::X87Stack::Contains(X87Register reg) {
- for (int i = 0; i < stack_depth_; i++) {
- if (stack_[i].is(reg)) return true;
- }
- return false;
-}
-
-
-void LCodeGen::X87Stack::Free(X87Register reg) {
- DCHECK(is_mutable_);
- DCHECK(Contains(reg));
- int i = ArrayIndex(reg);
- int st = st2idx(i);
- if (st > 0) {
- // keep track of how fstp(i) changes the order of elements
- int tos_i = st2idx(0);
- stack_[i] = stack_[tos_i];
- }
- pop();
- __ fstp(st);
-}
-
-
-void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) {
- if (x87_stack_.Contains(dst)) {
- x87_stack_.Fxch(dst);
- __ fstp(0);
- } else {
- x87_stack_.push(dst);
- }
- X87Fld(src, opts);
-}
-
-
-void LCodeGen::X87Mov(X87Register dst, X87Register src, X87OperandType opts) {
- if (x87_stack_.Contains(dst)) {
- x87_stack_.Fxch(dst);
- __ fstp(0);
- x87_stack_.pop();
- // Push ST(i) onto the FPU register stack
- __ fld(x87_stack_.st(src));
- x87_stack_.push(dst);
- } else {
- // Push ST(i) onto the FPU register stack
- __ fld(x87_stack_.st(src));
- x87_stack_.push(dst);
- }
-}
-
-
-void LCodeGen::X87Fld(Operand src, X87OperandType opts) {
- DCHECK(!src.is_reg_only());
- switch (opts) {
- case kX87DoubleOperand:
- __ fld_d(src);
- break;
- case kX87FloatOperand:
- __ fld_s(src);
- break;
- case kX87IntOperand:
- __ fild_s(src);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) {
- DCHECK(!dst.is_reg_only());
- x87_stack_.Fxch(src);
- switch (opts) {
- case kX87DoubleOperand:
- __ fst_d(dst);
- break;
- case kX87FloatOperand:
- __ fst_s(dst);
- break;
- case kX87IntOperand:
- __ fist_s(dst);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::X87Stack::PrepareToWrite(X87Register reg) {
- DCHECK(is_mutable_);
- if (Contains(reg)) {
- Free(reg);
- }
- // Mark this register as the next register to write to
- stack_[stack_depth_] = reg;
-}
-
-
-void LCodeGen::X87Stack::CommitWrite(X87Register reg) {
- DCHECK(is_mutable_);
- // Assert the reg is prepared to write, but not on the virtual stack yet
- DCHECK(!Contains(reg) && stack_[stack_depth_].is(reg) &&
- stack_depth_ < X87Register::kMaxNumAllocatableRegisters);
- stack_depth_++;
-}
-
-
-void LCodeGen::X87PrepareBinaryOp(
- X87Register left, X87Register right, X87Register result) {
- // You need to use DefineSameAsFirst for x87 instructions
- DCHECK(result.is(left));
- x87_stack_.Fxch(right, 1);
- x87_stack_.Fxch(left);
-}
-
-
-void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) {
- if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters(isolate())) {
- bool double_inputs = instr->HasDoubleRegisterInput();
-
- // Flush stack from tos down, since FreeX87() will mess with tos
- for (int i = stack_depth_-1; i >= 0; i--) {
- X87Register reg = stack_[i];
- // Skip registers which contain the inputs for the next instruction
- // when flushing the stack
- if (double_inputs && instr->IsDoubleInput(reg, cgen)) {
- continue;
- }
- Free(reg);
- if (i < stack_depth_-1) i++;
- }
- }
- if (instr->IsReturn()) {
- while (stack_depth_ > 0) {
- __ fstp(0);
- stack_depth_--;
- }
- if (FLAG_debug_code && FLAG_enable_slow_asserts) __ VerifyX87StackDepth(0);
- }
-}
-
-
-void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr,
- LCodeGen* cgen) {
- // For going to a joined block, an explicit LClobberDoubles is inserted before
- // LGoto. Because all used x87 registers are spilled to stack slots. The
- // ResolvePhis phase of register allocator could guarantee the two input's x87
- // stacks have the same layout. So don't check stack_depth_ <= 1 here.
- int goto_block_id = goto_instr->block_id();
- if (current_block_id + 1 != goto_block_id) {
- // If we have a value on the x87 stack on leaving a block, it must be a
- // phi input. If the next block we compile is not the join block, we have
- // to discard the stack state.
- // Before discarding the stack state, we need to save it if the "goto block"
- // has unreachable last predecessor when FLAG_unreachable_code_elimination.
- if (FLAG_unreachable_code_elimination) {
- int length = goto_instr->block()->predecessors()->length();
- bool has_unreachable_last_predecessor = false;
- for (int i = 0; i < length; i++) {
- HBasicBlock* block = goto_instr->block()->predecessors()->at(i);
- if (block->IsUnreachable() &&
- (block->block_id() + 1) == goto_block_id) {
- has_unreachable_last_predecessor = true;
- }
- }
- if (has_unreachable_last_predecessor) {
- if (cgen->x87_stack_map_.find(goto_block_id) ==
- cgen->x87_stack_map_.end()) {
- X87Stack* stack = new (cgen->zone()) X87Stack(*this);
- cgen->x87_stack_map_.insert(std::make_pair(goto_block_id, stack));
- }
- }
- }
-
- // Discard the stack state.
- stack_depth_ = 0;
- }
-}
-
-
-void LCodeGen::EmitFlushX87ForDeopt() {
- // The deoptimizer does not support X87 Registers. But as long as we
- // deopt from a stub its not a problem, since we will re-materialize the
- // original stub inputs, which can't be double registers.
- // DCHECK(info()->IsStub());
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ pushfd();
- __ VerifyX87StackDepth(x87_stack_.depth());
- __ popfd();
- }
-
- // Flush X87 stack in the deoptimizer entry.
-}
-
-
-Register LCodeGen::ToRegister(LOperand* op) const {
- DCHECK(op->IsRegister());
- return ToRegister(op->index());
-}
-
-
-X87Register LCodeGen::ToX87Register(LOperand* op) const {
- DCHECK(op->IsDoubleRegister());
- return ToX87Register(op->index());
-}
-
-
-int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
- return ToRepresentation(op, Representation::Integer32());
-}
-
-
-int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
- const Representation& r) const {
- HConstant* constant = chunk_->LookupConstant(op);
- if (r.IsExternal()) {
- return reinterpret_cast<int32_t>(
- constant->ExternalReferenceValue().address());
- }
- int32_t value = constant->Integer32Value();
- if (r.IsInteger32()) return value;
- DCHECK(r.IsSmiOrTagged());
- return reinterpret_cast<int32_t>(Smi::FromInt(value));
-}
-
-
-Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
- return constant->handle(isolate());
-}
-
-
-double LCodeGen::ToDouble(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- DCHECK(constant->HasDoubleValue());
- return constant->DoubleValue();
-}
-
-
-ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- DCHECK(constant->HasExternalReferenceValue());
- return constant->ExternalReferenceValue();
-}
-
-
-bool LCodeGen::IsInteger32(LConstantOperand* op) const {
- return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
-}
-
-
-bool LCodeGen::IsSmi(LConstantOperand* op) const {
- return chunk_->LookupLiteralRepresentation(op).IsSmi();
-}
-
-
-static int ArgumentsOffsetWithoutFrame(int index) {
- DCHECK(index < 0);
- return -(index + 1) * kPointerSize + kPCOnStackSize;
-}
-
-
-Operand LCodeGen::ToOperand(LOperand* op) const {
- if (op->IsRegister()) return Operand(ToRegister(op));
- DCHECK(!op->IsDoubleRegister());
- DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- if (NeedsEagerFrame()) {
- return Operand(ebp, FrameSlotToFPOffset(op->index()));
- } else {
- // Retrieve parameter without eager stack-frame relative to the
- // stack-pointer.
- return Operand(esp, ArgumentsOffsetWithoutFrame(op->index()));
- }
-}
-
-
-Operand LCodeGen::HighOperand(LOperand* op) {
- DCHECK(op->IsDoubleStackSlot());
- if (NeedsEagerFrame()) {
- return Operand(ebp, FrameSlotToFPOffset(op->index()) + kPointerSize);
- } else {
- // Retrieve parameter without eager stack-frame relative to the
- // stack-pointer.
- return Operand(
- esp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
- }
-}
-
-
-void LCodeGen::WriteTranslation(LEnvironment* environment,
- Translation* translation) {
- if (environment == NULL) return;
-
- // The translation includes one command per value in the environment.
- int translation_size = environment->translation_size();
-
- WriteTranslation(environment->outer(), translation);
- WriteTranslationFrame(environment, translation);
-
- int object_index = 0;
- int dematerialized_index = 0;
- for (int i = 0; i < translation_size; ++i) {
- LOperand* value = environment->values()->at(i);
- AddToTranslation(environment,
- translation,
- value,
- environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- &object_index,
- &dematerialized_index);
- }
-}
-
-
-void LCodeGen::AddToTranslation(LEnvironment* environment,
- Translation* translation,
- LOperand* op,
- bool is_tagged,
- bool is_uint32,
- int* object_index_pointer,
- int* dematerialized_index_pointer) {
- if (op == LEnvironment::materialization_marker()) {
- int object_index = (*object_index_pointer)++;
- if (environment->ObjectIsDuplicateAt(object_index)) {
- int dupe_of = environment->ObjectDuplicateOfAt(object_index);
- translation->DuplicateObject(dupe_of);
- return;
- }
- int object_length = environment->ObjectLengthAt(object_index);
- if (environment->ObjectIsArgumentsAt(object_index)) {
- translation->BeginArgumentsObject(object_length);
- } else {
- translation->BeginCapturedObject(object_length);
- }
- int dematerialized_index = *dematerialized_index_pointer;
- int env_offset = environment->translation_size() + dematerialized_index;
- *dematerialized_index_pointer += object_length;
- for (int i = 0; i < object_length; ++i) {
- LOperand* value = environment->values()->at(env_offset + i);
- AddToTranslation(environment,
- translation,
- value,
- environment->HasTaggedValueAt(env_offset + i),
- environment->HasUint32ValueAt(env_offset + i),
- object_index_pointer,
- dematerialized_index_pointer);
- }
- return;
- }
-
- if (op->IsStackSlot()) {
- int index = op->index();
- if (is_tagged) {
- translation->StoreStackSlot(index);
- } else if (is_uint32) {
- translation->StoreUint32StackSlot(index);
- } else {
- translation->StoreInt32StackSlot(index);
- }
- } else if (op->IsDoubleStackSlot()) {
- int index = op->index();
- translation->StoreDoubleStackSlot(index);
- } else if (op->IsRegister()) {
- Register reg = ToRegister(op);
- if (is_tagged) {
- translation->StoreRegister(reg);
- } else if (is_uint32) {
- translation->StoreUint32Register(reg);
- } else {
- translation->StoreInt32Register(reg);
- }
- } else if (op->IsDoubleRegister()) {
- X87Register reg = ToX87Register(op);
- translation->StoreDoubleRegister(reg);
- } else if (op->IsConstantOperand()) {
- HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
- translation->StoreLiteral(src_index);
- } else {
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::CallCodeGeneric(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode) {
- DCHECK(instr != NULL);
- __ call(code, mode);
- RecordSafepointWithLazyDeopt(instr, safepoint_mode);
-
- // Signal that we don't inline smi code before these stubs in the
- // optimizing code generator.
- if (code->kind() == Code::BINARY_OP_IC ||
- code->kind() == Code::COMPARE_IC) {
- __ nop();
- }
-}
-
-
-void LCodeGen::CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr) {
- CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::CallRuntime(const Runtime::Function* fun, int argc,
- LInstruction* instr, SaveFPRegsMode save_doubles) {
- DCHECK(instr != NULL);
- DCHECK(instr->HasPointerMap());
-
- __ CallRuntime(fun, argc, save_doubles);
-
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-
- DCHECK(info()->is_calling());
-}
-
-
-void LCodeGen::LoadContextFromDeferred(LOperand* context) {
- if (context->IsRegister()) {
- if (!ToRegister(context).is(esi)) {
- __ mov(esi, ToRegister(context));
- }
- } else if (context->IsStackSlot()) {
- __ mov(esi, ToOperand(context));
- } else if (context->IsConstantOperand()) {
- HConstant* constant =
- chunk_->LookupConstant(LConstantOperand::cast(context));
- __ LoadObject(esi, Handle<Object>::cast(constant->handle(isolate())));
- } else {
- UNREACHABLE();
- }
-}
-
-void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
- int argc,
- LInstruction* instr,
- LOperand* context) {
- LoadContextFromDeferred(context);
-
- __ CallRuntimeSaveDoubles(id);
- RecordSafepointWithRegisters(
- instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
-
- DCHECK(info()->is_calling());
-}
-
-
-void LCodeGen::RegisterEnvironmentForDeoptimization(
- LEnvironment* environment, Safepoint::DeoptMode mode) {
- environment->set_has_been_used();
- if (!environment->HasBeenRegistered()) {
- // Physical stack frame layout:
- // -x ............. -4 0 ..................................... y
- // [incoming arguments] [spill slots] [pushed outgoing arguments]
-
- // Layout of the environment:
- // 0 ..................................................... size-1
- // [parameters] [locals] [expression stack including arguments]
-
- // Layout of the translation:
- // 0 ........................................................ size - 1 + 4
- // [expression stack including arguments] [locals] [4 words] [parameters]
- // |>------------ translation_size ------------<|
-
- int frame_count = 0;
- int jsframe_count = 0;
- for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
- ++frame_count;
- if (e->frame_type() == JS_FUNCTION) {
- ++jsframe_count;
- }
- }
- Translation translation(&translations_, frame_count, jsframe_count, zone());
- WriteTranslation(environment, &translation);
- int deoptimization_index = deoptimizations_.length();
- int pc_offset = masm()->pc_offset();
- environment->Register(deoptimization_index,
- translation.index(),
- (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
- deoptimizations_.Add(environment, zone());
- }
-}
-
-void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
- DeoptimizeReason deopt_reason,
- Deoptimizer::BailoutType bailout_type) {
- LEnvironment* environment = instr->environment();
- RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- DCHECK(environment->HasBeenRegistered());
- int id = environment->deoptimization_index();
- Address entry =
- Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
- if (entry == NULL) {
- Abort(kBailoutWasNotPrepared);
- return;
- }
-
- if (DeoptEveryNTimes()) {
- ExternalReference count = ExternalReference::stress_deopt_count(isolate());
- Label no_deopt;
- __ pushfd();
- __ push(eax);
- __ mov(eax, Operand::StaticVariable(count));
- __ sub(eax, Immediate(1));
- __ j(not_zero, &no_deopt, Label::kNear);
- if (FLAG_trap_on_deopt) __ int3();
- __ mov(eax, Immediate(FLAG_deopt_every_n_times));
- __ mov(Operand::StaticVariable(count), eax);
- __ pop(eax);
- __ popfd();
- DCHECK(frame_is_built_);
- // Put the x87 stack layout in TOS.
- if (x87_stack_.depth() > 0) EmitFlushX87ForDeopt();
- __ push(Immediate(x87_stack_.GetLayout()));
- __ fild_s(MemOperand(esp, 0));
- // Don't touch eflags.
- __ lea(esp, Operand(esp, kPointerSize));
- __ call(entry, RelocInfo::RUNTIME_ENTRY);
- __ bind(&no_deopt);
- __ mov(Operand::StaticVariable(count), eax);
- __ pop(eax);
- __ popfd();
- }
-
- // Put the x87 stack layout in TOS, so that we can save x87 fp registers in
- // the correct location.
- {
- Label done;
- if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
- if (x87_stack_.depth() > 0) EmitFlushX87ForDeopt();
-
- int x87_stack_layout = x87_stack_.GetLayout();
- __ push(Immediate(x87_stack_layout));
- __ fild_s(MemOperand(esp, 0));
- // Don't touch eflags.
- __ lea(esp, Operand(esp, kPointerSize));
- __ bind(&done);
- }
-
- if (info()->ShouldTrapOnDeopt()) {
- Label done;
- if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
- __ int3();
- __ bind(&done);
- }
-
- Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
-
- DCHECK(info()->IsStub() || frame_is_built_);
- if (cc == no_condition && frame_is_built_) {
- DeoptComment(deopt_info);
- __ call(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
- !frame_is_built_);
- // We often have several deopts to the same entry, reuse the last
- // jump entry if this is the case.
- if (FLAG_trace_deopt || isolate()->is_profiling() ||
- jump_table_.is_empty() ||
- !table_entry.IsEquivalentTo(jump_table_.last())) {
- jump_table_.Add(table_entry, zone());
- }
- if (cc == no_condition) {
- __ jmp(&jump_table_.last().label);
- } else {
- __ j(cc, &jump_table_.last().label);
- }
- }
-}
-
-void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
- DeoptimizeReason deopt_reason) {
- Deoptimizer::BailoutType bailout_type = info()->IsStub()
- ? Deoptimizer::LAZY
- : Deoptimizer::EAGER;
- DeoptimizeIf(cc, instr, deopt_reason, bailout_type);
-}
-
-
-void LCodeGen::RecordSafepointWithLazyDeopt(
- LInstruction* instr, SafepointMode safepoint_mode) {
- if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
- RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
- } else {
- DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kLazyDeopt);
- }
-}
-
-
-void LCodeGen::RecordSafepoint(
- LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- Safepoint::DeoptMode deopt_mode) {
- DCHECK(kind == expected_safepoint_kind_);
- const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
- Safepoint safepoint =
- safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
- for (int i = 0; i < operands->length(); i++) {
- LOperand* pointer = operands->at(i);
- if (pointer->IsStackSlot()) {
- safepoint.DefinePointerSlot(pointer->index(), zone());
- } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
- safepoint.DefinePointerRegister(ToRegister(pointer), zone());
- }
- }
-}
-
-
-void LCodeGen::RecordSafepoint(LPointerMap* pointers,
- Safepoint::DeoptMode mode) {
- RecordSafepoint(pointers, Safepoint::kSimple, 0, mode);
-}
-
-
-void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
- LPointerMap empty_pointers(zone());
- RecordSafepoint(&empty_pointers, mode);
-}
-
-
-void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode mode) {
- RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode);
-}
-
-
-static const char* LabelType(LLabel* label) {
- if (label->is_loop_header()) return " (loop header)";
- if (label->is_osr_entry()) return " (OSR entry)";
- return "";
-}
-
-
-void LCodeGen::DoLabel(LLabel* label) {
- Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
- current_instruction_,
- label->hydrogen_value()->id(),
- label->block_id(),
- LabelType(label));
- __ bind(label->label());
- current_block_ = label->block_id();
- if (label->block()->predecessors()->length() > 1) {
- // A join block's x87 stack is that of its last visited predecessor.
- // If the last visited predecessor block is unreachable, the stack state
- // will be wrong. In such case, use the x87 stack of reachable predecessor.
- X87StackMap::const_iterator it = x87_stack_map_.find(current_block_);
- // Restore x87 stack.
- if (it != x87_stack_map_.end()) {
- x87_stack_ = *(it->second);
- }
- }
- DoGap(label);
-}
-
-
-void LCodeGen::DoParallelMove(LParallelMove* move) {
- resolver_.Resolve(move);
-}
-
-
-void LCodeGen::DoGap(LGap* gap) {
- for (int i = LGap::FIRST_INNER_POSITION;
- i <= LGap::LAST_INNER_POSITION;
- i++) {
- LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
- LParallelMove* move = gap->GetParallelMove(inner_pos);
- if (move != NULL) DoParallelMove(move);
- }
-}
-
-
-void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
- DoGap(instr);
-}
-
-
-void LCodeGen::DoParameter(LParameter* instr) {
- // Nothing to do.
-}
-
-
-void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- GenerateOsrPrologue();
-}
-
-
-void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- DCHECK(dividend.is(ToRegister(instr->result())));
-
- // Theoretically, a variation of the branch-free code for integer division by
- // a power of 2 (calculating the remainder via an additional multiplication
- // (which gets simplified to an 'and') and subtraction) should be faster, and
- // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
- // indicate that positive dividends are heavily favored, so the branching
- // version performs better.
- HMod* hmod = instr->hydrogen();
- int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
- Label dividend_is_not_negative, done;
- if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
- __ test(dividend, dividend);
- __ j(not_sign, &dividend_is_not_negative, Label::kNear);
- // Note that this is correct even for kMinInt operands.
- __ neg(dividend);
- __ and_(dividend, mask);
- __ neg(dividend);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
- }
- __ jmp(&done, Label::kNear);
- }
-
- __ bind(&dividend_is_not_negative);
- __ and_(dividend, mask);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoModByConstI(LModByConstI* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- DCHECK(ToRegister(instr->result()).is(eax));
-
- if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
- return;
- }
-
- __ TruncatingDiv(dividend, Abs(divisor));
- __ imul(edx, edx, Abs(divisor));
- __ mov(eax, dividend);
- __ sub(eax, edx);
-
- // Check for negative zero.
- HMod* hmod = instr->hydrogen();
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label remainder_not_zero;
- __ j(not_zero, &remainder_not_zero, Label::kNear);
- __ cmp(dividend, Immediate(0));
- DeoptimizeIf(less, instr, DeoptimizeReason::kMinusZero);
- __ bind(&remainder_not_zero);
- }
-}
-
-
-void LCodeGen::DoModI(LModI* instr) {
- HMod* hmod = instr->hydrogen();
-
- Register left_reg = ToRegister(instr->left());
- DCHECK(left_reg.is(eax));
- Register right_reg = ToRegister(instr->right());
- DCHECK(!right_reg.is(eax));
- DCHECK(!right_reg.is(edx));
- Register result_reg = ToRegister(instr->result());
- DCHECK(result_reg.is(edx));
-
- Label done;
- // Check for x % 0, idiv would signal a divide error. We have to
- // deopt in this case because we can't return a NaN.
- if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
- __ test(right_reg, Operand(right_reg));
- DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
- }
-
- // Check for kMinInt % -1, idiv would signal a divide error. We
- // have to deopt if we care about -0, because we can't return that.
- if (hmod->CheckFlag(HValue::kCanOverflow)) {
- Label no_overflow_possible;
- __ cmp(left_reg, kMinInt);
- __ j(not_equal, &no_overflow_possible, Label::kNear);
- __ cmp(right_reg, -1);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(equal, instr, DeoptimizeReason::kMinusZero);
- } else {
- __ j(not_equal, &no_overflow_possible, Label::kNear);
- __ Move(result_reg, Immediate(0));
- __ jmp(&done, Label::kNear);
- }
- __ bind(&no_overflow_possible);
- }
-
- // Sign extend dividend in eax into edx:eax.
- __ cdq();
-
- // If we care about -0, test if the dividend is <0 and the result is 0.
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label positive_left;
- __ test(left_reg, Operand(left_reg));
- __ j(not_sign, &positive_left, Label::kNear);
- __ idiv(right_reg);
- __ test(result_reg, Operand(result_reg));
- DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
- __ jmp(&done, Label::kNear);
- __ bind(&positive_left);
- }
- __ idiv(right_reg);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- Register result = ToRegister(instr->result());
- DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
- DCHECK(!result.is(dividend));
-
- // Check for (0 / -x) that will produce negative zero.
- HDiv* hdiv = instr->hydrogen();
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- __ test(dividend, dividend);
- DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
- }
- // Check for (kMinInt / -1).
- if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
- __ cmp(dividend, kMinInt);
- DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
- }
- // Deoptimize if remainder will not be 0.
- if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
- divisor != 1 && divisor != -1) {
- int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
- __ test(dividend, Immediate(mask));
- DeoptimizeIf(not_zero, instr, DeoptimizeReason::kLostPrecision);
- }
- __ Move(result, dividend);
- int32_t shift = WhichPowerOf2Abs(divisor);
- if (shift > 0) {
- // The arithmetic shift is always OK, the 'if' is an optimization only.
- if (shift > 1) __ sar(result, 31);
- __ shr(result, 32 - shift);
- __ add(result, dividend);
- __ sar(result, shift);
- }
- if (divisor < 0) __ neg(result);
-}
-
-
-void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- DCHECK(ToRegister(instr->result()).is(edx));
-
- if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
- return;
- }
-
- // Check for (0 / -x) that will produce negative zero.
- HDiv* hdiv = instr->hydrogen();
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- __ test(dividend, dividend);
- DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
- }
-
- __ TruncatingDiv(dividend, Abs(divisor));
- if (divisor < 0) __ neg(edx);
-
- if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
- __ mov(eax, edx);
- __ imul(eax, eax, divisor);
- __ sub(eax, dividend);
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kLostPrecision);
- }
-}
-
-
-// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
-void LCodeGen::DoDivI(LDivI* instr) {
- HBinaryOperation* hdiv = instr->hydrogen();
- Register dividend = ToRegister(instr->dividend());
- Register divisor = ToRegister(instr->divisor());
- Register remainder = ToRegister(instr->temp());
- DCHECK(dividend.is(eax));
- DCHECK(remainder.is(edx));
- DCHECK(ToRegister(instr->result()).is(eax));
- DCHECK(!divisor.is(eax));
- DCHECK(!divisor.is(edx));
-
- // Check for x / 0.
- if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- __ test(divisor, divisor);
- DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
- }
-
- // Check for (0 / -x) that will produce negative zero.
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label dividend_not_zero;
- __ test(dividend, dividend);
- __ j(not_zero, &dividend_not_zero, Label::kNear);
- __ test(divisor, divisor);
- DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
- __ bind(&dividend_not_zero);
- }
-
- // Check for (kMinInt / -1).
- if (hdiv->CheckFlag(HValue::kCanOverflow)) {
- Label dividend_not_min_int;
- __ cmp(dividend, kMinInt);
- __ j(not_zero, &dividend_not_min_int, Label::kNear);
- __ cmp(divisor, -1);
- DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
- __ bind(&dividend_not_min_int);
- }
-
- // Sign extend to edx (= remainder).
- __ cdq();
- __ idiv(divisor);
-
- if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
- // Deoptimize if remainder is not 0.
- __ test(remainder, remainder);
- DeoptimizeIf(not_zero, instr, DeoptimizeReason::kLostPrecision);
- }
-}
-
-
-void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- DCHECK(dividend.is(ToRegister(instr->result())));
-
- // If the divisor is positive, things are easy: There can be no deopts and we
- // can simply do an arithmetic right shift.
- if (divisor == 1) return;
- int32_t shift = WhichPowerOf2Abs(divisor);
- if (divisor > 1) {
- __ sar(dividend, shift);
- return;
- }
-
- // If the divisor is negative, we have to negate and handle edge cases.
- __ neg(dividend);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
- }
-
- // Dividing by -1 is basically negation, unless we overflow.
- if (divisor == -1) {
- if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
- }
- return;
- }
-
- // If the negation could not overflow, simply shifting is OK.
- if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- __ sar(dividend, shift);
- return;
- }
-
- Label not_kmin_int, done;
- __ j(no_overflow, &not_kmin_int, Label::kNear);
- __ mov(dividend, Immediate(kMinInt / divisor));
- __ jmp(&done, Label::kNear);
- __ bind(&not_kmin_int);
- __ sar(dividend, shift);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
- Register dividend = ToRegister(instr->dividend());
- int32_t divisor = instr->divisor();
- DCHECK(ToRegister(instr->result()).is(edx));
-
- if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
- return;
- }
-
- // Check for (0 / -x) that will produce negative zero.
- HMathFloorOfDiv* hdiv = instr->hydrogen();
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- __ test(dividend, dividend);
- DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
- }
-
- // Easy case: We need no dynamic check for the dividend and the flooring
- // division is the same as the truncating division.
- if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
- (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
- __ TruncatingDiv(dividend, Abs(divisor));
- if (divisor < 0) __ neg(edx);
- return;
- }
-
- // In the general case we may need to adjust before and after the truncating
- // division to get a flooring division.
- Register temp = ToRegister(instr->temp3());
- DCHECK(!temp.is(dividend) && !temp.is(eax) && !temp.is(edx));
- Label needs_adjustment, done;
- __ cmp(dividend, Immediate(0));
- __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
- __ TruncatingDiv(dividend, Abs(divisor));
- if (divisor < 0) __ neg(edx);
- __ jmp(&done, Label::kNear);
- __ bind(&needs_adjustment);
- __ lea(temp, Operand(dividend, divisor > 0 ? 1 : -1));
- __ TruncatingDiv(temp, Abs(divisor));
- if (divisor < 0) __ neg(edx);
- __ dec(edx);
- __ bind(&done);
-}
-
-
-// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
-void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
- HBinaryOperation* hdiv = instr->hydrogen();
- Register dividend = ToRegister(instr->dividend());
- Register divisor = ToRegister(instr->divisor());
- Register remainder = ToRegister(instr->temp());
- Register result = ToRegister(instr->result());
- DCHECK(dividend.is(eax));
- DCHECK(remainder.is(edx));
- DCHECK(result.is(eax));
- DCHECK(!divisor.is(eax));
- DCHECK(!divisor.is(edx));
-
- // Check for x / 0.
- if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- __ test(divisor, divisor);
- DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
- }
-
- // Check for (0 / -x) that will produce negative zero.
- if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label dividend_not_zero;
- __ test(dividend, dividend);
- __ j(not_zero, &dividend_not_zero, Label::kNear);
- __ test(divisor, divisor);
- DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
- __ bind(&dividend_not_zero);
- }
-
- // Check for (kMinInt / -1).
- if (hdiv->CheckFlag(HValue::kCanOverflow)) {
- Label dividend_not_min_int;
- __ cmp(dividend, kMinInt);
- __ j(not_zero, &dividend_not_min_int, Label::kNear);
- __ cmp(divisor, -1);
- DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
- __ bind(&dividend_not_min_int);
- }
-
- // Sign extend to edx (= remainder).
- __ cdq();
- __ idiv(divisor);
-
- Label done;
- __ test(remainder, remainder);
- __ j(zero, &done, Label::kNear);
- __ xor_(remainder, divisor);
- __ sar(remainder, 31);
- __ add(result, remainder);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoMulI(LMulI* instr) {
- Register left = ToRegister(instr->left());
- LOperand* right = instr->right();
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ mov(ToRegister(instr->temp()), left);
- }
-
- if (right->IsConstantOperand()) {
- // Try strength reductions on the multiplication.
- // All replacement instructions are at most as long as the imul
- // and have better latency.
- int constant = ToInteger32(LConstantOperand::cast(right));
- if (constant == -1) {
- __ neg(left);
- } else if (constant == 0) {
- __ xor_(left, Operand(left));
- } else if (constant == 2) {
- __ add(left, Operand(left));
- } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- // If we know that the multiplication can't overflow, it's safe to
- // use instructions that don't set the overflow flag for the
- // multiplication.
- switch (constant) {
- case 1:
- // Do nothing.
- break;
- case 3:
- __ lea(left, Operand(left, left, times_2, 0));
- break;
- case 4:
- __ shl(left, 2);
- break;
- case 5:
- __ lea(left, Operand(left, left, times_4, 0));
- break;
- case 8:
- __ shl(left, 3);
- break;
- case 9:
- __ lea(left, Operand(left, left, times_8, 0));
- break;
- case 16:
- __ shl(left, 4);
- break;
- default:
- __ imul(left, left, constant);
- break;
- }
- } else {
- __ imul(left, left, constant);
- }
- } else {
- if (instr->hydrogen()->representation().IsSmi()) {
- __ SmiUntag(left);
- }
- __ imul(left, ToOperand(right));
- }
-
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
- }
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Bail out if the result is supposed to be negative zero.
- Label done;
- __ test(left, Operand(left));
- __ j(not_zero, &done);
- if (right->IsConstantOperand()) {
- if (ToInteger32(LConstantOperand::cast(right)) < 0) {
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
- } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
- __ cmp(ToRegister(instr->temp()), Immediate(0));
- DeoptimizeIf(less, instr, DeoptimizeReason::kMinusZero);
- }
- } else {
- // Test the non-zero operand for negative sign.
- __ or_(ToRegister(instr->temp()), ToOperand(right));
- DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
- }
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoBitI(LBitI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- DCHECK(left->Equals(instr->result()));
- DCHECK(left->IsRegister());
-
- if (right->IsConstantOperand()) {
- int32_t right_operand =
- ToRepresentation(LConstantOperand::cast(right),
- instr->hydrogen()->representation());
- switch (instr->op()) {
- case Token::BIT_AND:
- __ and_(ToRegister(left), right_operand);
- break;
- case Token::BIT_OR:
- __ or_(ToRegister(left), right_operand);
- break;
- case Token::BIT_XOR:
- if (right_operand == int32_t(~0)) {
- __ not_(ToRegister(left));
- } else {
- __ xor_(ToRegister(left), right_operand);
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- switch (instr->op()) {
- case Token::BIT_AND:
- __ and_(ToRegister(left), ToOperand(right));
- break;
- case Token::BIT_OR:
- __ or_(ToRegister(left), ToOperand(right));
- break;
- case Token::BIT_XOR:
- __ xor_(ToRegister(left), ToOperand(right));
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoShiftI(LShiftI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- DCHECK(left->Equals(instr->result()));
- DCHECK(left->IsRegister());
- if (right->IsRegister()) {
- DCHECK(ToRegister(right).is(ecx));
-
- switch (instr->op()) {
- case Token::ROR:
- __ ror_cl(ToRegister(left));
- break;
- case Token::SAR:
- __ sar_cl(ToRegister(left));
- break;
- case Token::SHR:
- __ shr_cl(ToRegister(left));
- if (instr->can_deopt()) {
- __ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr, DeoptimizeReason::kNegativeValue);
- }
- break;
- case Token::SHL:
- __ shl_cl(ToRegister(left));
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- int value = ToInteger32(LConstantOperand::cast(right));
- uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
- switch (instr->op()) {
- case Token::ROR:
- if (shift_count == 0 && instr->can_deopt()) {
- __ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr, DeoptimizeReason::kNegativeValue);
- } else {
- __ ror(ToRegister(left), shift_count);
- }
- break;
- case Token::SAR:
- if (shift_count != 0) {
- __ sar(ToRegister(left), shift_count);
- }
- break;
- case Token::SHR:
- if (shift_count != 0) {
- __ shr(ToRegister(left), shift_count);
- } else if (instr->can_deopt()) {
- __ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr, DeoptimizeReason::kNegativeValue);
- }
- break;
- case Token::SHL:
- if (shift_count != 0) {
- if (instr->hydrogen_value()->representation().IsSmi() &&
- instr->can_deopt()) {
- if (shift_count != 1) {
- __ shl(ToRegister(left), shift_count - 1);
- }
- __ SmiTag(ToRegister(left));
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
- } else {
- __ shl(ToRegister(left), shift_count);
- }
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoSubI(LSubI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- DCHECK(left->Equals(instr->result()));
-
- if (right->IsConstantOperand()) {
- __ sub(ToOperand(left),
- ToImmediate(right, instr->hydrogen()->representation()));
- } else {
- __ sub(ToRegister(left), ToOperand(right));
- }
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
- }
-}
-
-
-void LCodeGen::DoConstantI(LConstantI* instr) {
- __ Move(ToRegister(instr->result()), Immediate(instr->value()));
-}
-
-
-void LCodeGen::DoConstantS(LConstantS* instr) {
- __ Move(ToRegister(instr->result()), Immediate(instr->value()));
-}
-
-
-void LCodeGen::DoConstantD(LConstantD* instr) {
- uint64_t const bits = instr->bits();
- uint32_t const lower = static_cast<uint32_t>(bits);
- uint32_t const upper = static_cast<uint32_t>(bits >> 32);
- DCHECK(instr->result()->IsDoubleRegister());
-
- __ push(Immediate(upper));
- __ push(Immediate(lower));
- X87Register reg = ToX87Register(instr->result());
- X87Mov(reg, Operand(esp, 0));
- __ add(Operand(esp), Immediate(kDoubleSize));
-}
-
-
-void LCodeGen::DoConstantE(LConstantE* instr) {
- __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value()));
-}
-
-
-void LCodeGen::DoConstantT(LConstantT* instr) {
- Register reg = ToRegister(instr->result());
- Handle<Object> object = instr->value(isolate());
- AllowDeferredHandleDereference smi_check;
- __ LoadObject(reg, object);
-}
-
-
-Operand LCodeGen::BuildSeqStringOperand(Register string,
- LOperand* index,
- String::Encoding encoding) {
- if (index->IsConstantOperand()) {
- int offset = ToRepresentation(LConstantOperand::cast(index),
- Representation::Integer32());
- if (encoding == String::TWO_BYTE_ENCODING) {
- offset *= kUC16Size;
- }
- STATIC_ASSERT(kCharSize == 1);
- return FieldOperand(string, SeqString::kHeaderSize + offset);
- }
- return FieldOperand(
- string, ToRegister(index),
- encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
- SeqString::kHeaderSize);
-}
-
-
-void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
- String::Encoding encoding = instr->hydrogen()->encoding();
- Register result = ToRegister(instr->result());
- Register string = ToRegister(instr->string());
-
- if (FLAG_debug_code) {
- __ push(string);
- __ mov(string, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(string, FieldOperand(string, Map::kInstanceTypeOffset));
-
- __ and_(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ cmp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type));
- __ Check(equal, kUnexpectedStringType);
- __ pop(string);
- }
-
- Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ movzx_b(result, operand);
- } else {
- __ movzx_w(result, operand);
- }
-}
-
-
-void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
- String::Encoding encoding = instr->hydrogen()->encoding();
- Register string = ToRegister(instr->string());
-
- if (FLAG_debug_code) {
- Register value = ToRegister(instr->value());
- Register index = ToRegister(instr->index());
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- int encoding_mask =
- instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type;
- __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
- }
-
- Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
- if (instr->value()->IsConstantOperand()) {
- int value = ToRepresentation(LConstantOperand::cast(instr->value()),
- Representation::Integer32());
- DCHECK_LE(0, value);
- if (encoding == String::ONE_BYTE_ENCODING) {
- DCHECK_LE(value, String::kMaxOneByteCharCode);
- __ mov_b(operand, static_cast<int8_t>(value));
- } else {
- DCHECK_LE(value, String::kMaxUtf16CodeUnit);
- __ mov_w(operand, static_cast<int16_t>(value));
- }
- } else {
- Register value = ToRegister(instr->value());
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ mov_b(operand, value);
- } else {
- __ mov_w(operand, value);
- }
- }
-}
-
-
-void LCodeGen::DoAddI(LAddI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
-
- if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
- if (right->IsConstantOperand()) {
- int32_t offset = ToRepresentation(LConstantOperand::cast(right),
- instr->hydrogen()->representation());
- __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset));
- } else {
- Operand address(ToRegister(left), ToRegister(right), times_1, 0);
- __ lea(ToRegister(instr->result()), address);
- }
- } else {
- if (right->IsConstantOperand()) {
- __ add(ToOperand(left),
- ToImmediate(right, instr->hydrogen()->representation()));
- } else {
- __ add(ToRegister(left), ToOperand(right));
- }
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
- }
- }
-}
-
-
-void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- DCHECK(left->Equals(instr->result()));
- HMathMinMax::Operation operation = instr->hydrogen()->operation();
- if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
- Label return_left;
- Condition condition = (operation == HMathMinMax::kMathMin)
- ? less_equal
- : greater_equal;
- if (right->IsConstantOperand()) {
- Operand left_op = ToOperand(left);
- Immediate immediate = ToImmediate(LConstantOperand::cast(instr->right()),
- instr->hydrogen()->representation());
- __ cmp(left_op, immediate);
- __ j(condition, &return_left, Label::kNear);
- __ mov(left_op, immediate);
- } else {
- Register left_reg = ToRegister(left);
- Operand right_op = ToOperand(right);
- __ cmp(left_reg, right_op);
- __ j(condition, &return_left, Label::kNear);
- __ mov(left_reg, right_op);
- }
- __ bind(&return_left);
- } else {
- DCHECK(instr->hydrogen()->representation().IsDouble());
- Label check_nan_left, check_zero, return_left, return_right;
- Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
- X87Register left_reg = ToX87Register(left);
- X87Register right_reg = ToX87Register(right);
-
- X87PrepareBinaryOp(left_reg, right_reg, ToX87Register(instr->result()));
- __ fld(1);
- __ fld(1);
- __ FCmp();
- __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
- __ j(equal, &check_zero, Label::kNear); // left == right.
- __ j(condition, &return_left, Label::kNear);
- __ jmp(&return_right, Label::kNear);
-
- __ bind(&check_zero);
- __ fld(0);
- __ fldz();
- __ FCmp();
- __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
- // At this point, both left and right are either 0 or -0.
- if (operation == HMathMinMax::kMathMin) {
- // Push st0 and st1 to stack, then pop them to temp registers and OR them,
- // load it to left.
- Register scratch_reg = ToRegister(instr->temp());
- __ fld(1);
- __ fld(1);
- __ sub(esp, Immediate(2 * kPointerSize));
- __ fstp_s(MemOperand(esp, 0));
- __ fstp_s(MemOperand(esp, kPointerSize));
- __ pop(scratch_reg);
- __ or_(MemOperand(esp, 0), scratch_reg);
- X87Mov(left_reg, MemOperand(esp, 0), kX87FloatOperand);
- __ pop(scratch_reg); // restore esp
- } else {
- // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
- // Should put the result in stX0
- __ fadd_i(1);
- }
- __ jmp(&return_left, Label::kNear);
-
- __ bind(&check_nan_left);
- __ fld(0);
- __ fld(0);
- __ FCmp(); // NaN check.
- __ j(parity_even, &return_left, Label::kNear); // left == NaN.
-
- __ bind(&return_right);
- X87Mov(left_reg, right_reg);
-
- __ bind(&return_left);
- }
-}
-
-
-void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- X87Register left = ToX87Register(instr->left());
- X87Register right = ToX87Register(instr->right());
- X87Register result = ToX87Register(instr->result());
- if (instr->op() != Token::MOD) {
- X87PrepareBinaryOp(left, right, result);
- }
- // Set the precision control to double-precision.
- __ X87SetFPUCW(0x027F);
- switch (instr->op()) {
- case Token::ADD:
- __ fadd_i(1);
- break;
- case Token::SUB:
- __ fsub_i(1);
- break;
- case Token::MUL:
- __ fmul_i(1);
- break;
- case Token::DIV:
- __ fdiv_i(1);
- break;
- case Token::MOD: {
- // Pass two doubles as arguments on the stack.
- __ PrepareCallCFunction(4, eax);
- X87Mov(Operand(esp, 1 * kDoubleSize), right);
- X87Mov(Operand(esp, 0), left);
- X87Free(right);
- DCHECK(left.is(result));
- X87PrepareToWrite(result);
- __ CallCFunction(
- ExternalReference::mod_two_doubles_operation(isolate()),
- 4);
-
- // Return value is in st(0) on ia32.
- X87CommitWrite(result);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-
- // Restore the default value of control word.
- __ X87SetFPUCW(0x037F);
-}
-
-
-void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->left()).is(edx));
- DCHECK(ToRegister(instr->right()).is(eax));
- DCHECK(ToRegister(instr->result()).is(eax));
-
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
- CallCode(code, RelocInfo::CODE_TARGET, instr);
-}
-
-
-template<class InstrType>
-void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
- int left_block = instr->TrueDestination(chunk_);
- int right_block = instr->FalseDestination(chunk_);
-
- int next_block = GetNextEmittedBlock();
-
- if (right_block == left_block || cc == no_condition) {
- EmitGoto(left_block);
- } else if (left_block == next_block) {
- __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
- } else if (right_block == next_block) {
- __ j(cc, chunk_->GetAssemblyLabel(left_block));
- } else {
- __ j(cc, chunk_->GetAssemblyLabel(left_block));
- __ jmp(chunk_->GetAssemblyLabel(right_block));
- }
-}
-
-
-template <class InstrType>
-void LCodeGen::EmitTrueBranch(InstrType instr, Condition cc) {
- int true_block = instr->TrueDestination(chunk_);
- if (cc == no_condition) {
- __ jmp(chunk_->GetAssemblyLabel(true_block));
- } else {
- __ j(cc, chunk_->GetAssemblyLabel(true_block));
- }
-}
-
-
-template<class InstrType>
-void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
- int false_block = instr->FalseDestination(chunk_);
- if (cc == no_condition) {
- __ jmp(chunk_->GetAssemblyLabel(false_block));
- } else {
- __ j(cc, chunk_->GetAssemblyLabel(false_block));
- }
-}
-
-
-void LCodeGen::DoBranch(LBranch* instr) {
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsSmiOrInteger32()) {
- Register reg = ToRegister(instr->value());
- __ test(reg, Operand(reg));
- EmitBranch(instr, not_zero);
- } else if (r.IsDouble()) {
- X87Register reg = ToX87Register(instr->value());
- X87LoadForUsage(reg);
- __ fldz();
- __ FCmp();
- EmitBranch(instr, not_zero);
- } else {
- DCHECK(r.IsTagged());
- Register reg = ToRegister(instr->value());
- HType type = instr->hydrogen()->value()->type();
- if (type.IsBoolean()) {
- DCHECK(!info()->IsStub());
- __ cmp(reg, factory()->true_value());
- EmitBranch(instr, equal);
- } else if (type.IsSmi()) {
- DCHECK(!info()->IsStub());
- __ test(reg, Operand(reg));
- EmitBranch(instr, not_equal);
- } else if (type.IsJSArray()) {
- DCHECK(!info()->IsStub());
- EmitBranch(instr, no_condition);
- } else if (type.IsHeapNumber()) {
- UNREACHABLE();
- } else if (type.IsString()) {
- DCHECK(!info()->IsStub());
- __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
- EmitBranch(instr, not_equal);
- } else {
- ToBooleanHints expected = instr->hydrogen()->expected_input_types();
- if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
-
- if (expected & ToBooleanHint::kUndefined) {
- // undefined -> false.
- __ cmp(reg, factory()->undefined_value());
- __ j(equal, instr->FalseLabel(chunk_));
- }
- if (expected & ToBooleanHint::kBoolean) {
- // true -> true.
- __ cmp(reg, factory()->true_value());
- __ j(equal, instr->TrueLabel(chunk_));
- // false -> false.
- __ cmp(reg, factory()->false_value());
- __ j(equal, instr->FalseLabel(chunk_));
- }
- if (expected & ToBooleanHint::kNull) {
- // 'null' -> false.
- __ cmp(reg, factory()->null_value());
- __ j(equal, instr->FalseLabel(chunk_));
- }
-
- if (expected & ToBooleanHint::kSmallInteger) {
- // Smis: 0 -> false, all other -> true.
- __ test(reg, Operand(reg));
- __ j(equal, instr->FalseLabel(chunk_));
- __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
- } else if (expected & ToBooleanHint::kNeedsMap) {
- // If we need a map later and have a Smi -> deopt.
- __ test(reg, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr, DeoptimizeReason::kSmi);
- }
-
- Register map = no_reg; // Keep the compiler happy.
- if (expected & ToBooleanHint::kNeedsMap) {
- map = ToRegister(instr->temp());
- DCHECK(!map.is(reg));
- __ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
-
- if (expected & ToBooleanHint::kCanBeUndetectable) {
- // Undetectable -> false.
- __ test_b(FieldOperand(map, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, instr->FalseLabel(chunk_));
- }
- }
-
- if (expected & ToBooleanHint::kReceiver) {
- // spec object -> true.
- __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
- __ j(above_equal, instr->TrueLabel(chunk_));
- }
-
- if (expected & ToBooleanHint::kString) {
- // String value -> false iff empty.
- Label not_string;
- __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &not_string, Label::kNear);
- __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
- __ j(not_zero, instr->TrueLabel(chunk_));
- __ jmp(instr->FalseLabel(chunk_));
- __ bind(&not_string);
- }
-
- if (expected & ToBooleanHint::kSymbol) {
- // Symbol value -> true.
- __ CmpInstanceType(map, SYMBOL_TYPE);
- __ j(equal, instr->TrueLabel(chunk_));
- }
-
- if (expected & ToBooleanHint::kHeapNumber) {
- // heap number -> false iff +0, -0, or NaN.
- Label not_heap_number;
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- __ j(not_equal, &not_heap_number, Label::kNear);
- __ fldz();
- __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
- __ FCmp();
- __ j(zero, instr->FalseLabel(chunk_));
- __ jmp(instr->TrueLabel(chunk_));
- __ bind(&not_heap_number);
- }
-
- if (expected != ToBooleanHint::kAny) {
- // We've seen something for the first time -> deopt.
- // This can only happen if we are not generic already.
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kUnexpectedObject);
- }
- }
- }
-}
-
-
-void LCodeGen::EmitGoto(int block) {
- if (!IsNextEmittedBlock(block)) {
- __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
- }
-}
-
-
-void LCodeGen::DoClobberDoubles(LClobberDoubles* instr) {
-}
-
-
-void LCodeGen::DoGoto(LGoto* instr) {
- EmitGoto(instr->block_id());
-}
-
-
-Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
- Condition cond = no_condition;
- switch (op) {
- case Token::EQ:
- case Token::EQ_STRICT:
- cond = equal;
- break;
- case Token::NE:
- case Token::NE_STRICT:
- cond = not_equal;
- break;
- case Token::LT:
- cond = is_unsigned ? below : less;
- break;
- case Token::GT:
- cond = is_unsigned ? above : greater;
- break;
- case Token::LTE:
- cond = is_unsigned ? below_equal : less_equal;
- break;
- case Token::GTE:
- cond = is_unsigned ? above_equal : greater_equal;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
- return cond;
-}
-
-
-void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- bool is_unsigned =
- instr->is_double() ||
- instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
- instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
- Condition cc = TokenToCondition(instr->op(), is_unsigned);
-
- if (left->IsConstantOperand() && right->IsConstantOperand()) {
- // We can statically evaluate the comparison.
- double left_val = ToDouble(LConstantOperand::cast(left));
- double right_val = ToDouble(LConstantOperand::cast(right));
- int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
- ? instr->TrueDestination(chunk_)
- : instr->FalseDestination(chunk_);
- EmitGoto(next_block);
- } else {
- if (instr->is_double()) {
- X87LoadForUsage(ToX87Register(right), ToX87Register(left));
- __ FCmp();
- // Don't base result on EFLAGS when a NaN is involved. Instead
- // jump to the false block.
- __ j(parity_even, instr->FalseLabel(chunk_));
- } else {
- if (right->IsConstantOperand()) {
- __ cmp(ToOperand(left),
- ToImmediate(right, instr->hydrogen()->representation()));
- } else if (left->IsConstantOperand()) {
- __ cmp(ToOperand(right),
- ToImmediate(left, instr->hydrogen()->representation()));
- // We commuted the operands, so commute the condition.
- cc = CommuteCondition(cc);
- } else {
- __ cmp(ToRegister(left), ToOperand(right));
- }
- }
- EmitBranch(instr, cc);
- }
-}
-
-
-void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
- Register left = ToRegister(instr->left());
-
- if (instr->right()->IsConstantOperand()) {
- Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
- __ CmpObject(left, right);
- } else {
- Operand right = ToOperand(instr->right());
- __ cmp(left, right);
- }
- EmitBranch(instr, equal);
-}
-
-
-void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
- if (instr->hydrogen()->representation().IsTagged()) {
- Register input_reg = ToRegister(instr->object());
- __ cmp(input_reg, factory()->the_hole_value());
- EmitBranch(instr, equal);
- return;
- }
-
- // Put the value to the top of stack
- X87Register src = ToX87Register(instr->object());
- X87LoadForUsage(src);
- __ fld(0);
- __ fld(0);
- __ FCmp();
- Label ok;
- __ j(parity_even, &ok, Label::kNear);
- __ fstp(0);
- EmitFalseBranch(instr, no_condition);
- __ bind(&ok);
-
-
- __ sub(esp, Immediate(kDoubleSize));
- __ fstp_d(MemOperand(esp, 0));
-
- __ add(esp, Immediate(kDoubleSize));
- int offset = sizeof(kHoleNanUpper32);
- __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
- EmitBranch(instr, equal);
-}
-
-
-Condition LCodeGen::EmitIsString(Register input,
- Register temp1,
- Label* is_not_string,
- SmiCheck check_needed = INLINE_SMI_CHECK) {
- if (check_needed == INLINE_SMI_CHECK) {
- __ JumpIfSmi(input, is_not_string);
- }
-
- Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
-
- return cond;
-}
-
-
-void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- SmiCheck check_needed =
- instr->hydrogen()->value()->type().IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
-
- Condition true_cond = EmitIsString(
- reg, temp, instr->FalseLabel(chunk_), check_needed);
-
- EmitBranch(instr, true_cond);
-}
-
-
-void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
- Operand input = ToOperand(instr->value());
-
- __ test(input, Immediate(kSmiTagMask));
- EmitBranch(instr, zero);
-}
-
-
-void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(input, instr->FalseLabel(chunk_));
- }
- __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
- __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- EmitBranch(instr, not_zero);
-}
-
-
-static Condition ComputeCompareCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return equal;
- case Token::LT:
- return less;
- case Token::GT:
- return greater;
- case Token::LTE:
- return less_equal;
- case Token::GTE:
- return greater_equal;
- default:
- UNREACHABLE();
- return no_condition;
- }
-}
-
-
-void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->left()).is(edx));
- DCHECK(ToRegister(instr->right()).is(eax));
-
- Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
- CallCode(code, RelocInfo::CODE_TARGET, instr);
- __ CompareRoot(eax, Heap::kTrueValueRootIndex);
- EmitBranch(instr, equal);
-}
-
-
-static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == FIRST_TYPE) return to;
- DCHECK(from == to || to == LAST_TYPE);
- return from;
-}
-
-
-static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == to) return equal;
- if (to == LAST_TYPE) return above_equal;
- if (from == FIRST_TYPE) return below_equal;
- UNREACHABLE();
- return equal;
-}
-
-
-void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- __ JumpIfSmi(input, instr->FalseLabel(chunk_));
- }
-
- __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
- EmitBranch(instr, BranchCondition(instr->hydrogen()));
-}
-
-// Branches to a label or falls through with the answer in the z flag. Trashes
-// the temp registers, but not the input.
-void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
- Handle<String> class_name, Register input,
- Register temp, Register temp2) {
- DCHECK(!input.is(temp));
- DCHECK(!input.is(temp2));
- DCHECK(!temp.is(temp2));
- __ JumpIfSmi(input, is_false);
-
- __ CmpObjectType(input, FIRST_FUNCTION_TYPE, temp);
- STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
- if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
- __ j(above_equal, is_true);
- } else {
- __ j(above_equal, is_false);
- }
-
- // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
- // Check if the constructor in the map is a function.
- __ GetMapConstructor(temp, temp, temp2);
- // Objects with a non-function constructor have class 'Object'.
- __ CmpInstanceType(temp2, JS_FUNCTION_TYPE);
- if (String::Equals(class_name, isolate()->factory()->Object_string())) {
- __ j(not_equal, is_true);
- } else {
- __ j(not_equal, is_false);
- }
-
- // temp now contains the constructor function. Grab the
- // instance class name from there.
- __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
- __ mov(temp,
- FieldOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset));
- // The class name we are testing against is internalized since it's a literal.
- // The name in the constructor is internalized because of the way the context
- // is booted. This routine isn't expected to work for random API-created
- // classes and it doesn't have to because you can't access it with natives
- // syntax. Since both sides are internalized it is sufficient to use an
- // identity comparison.
- __ cmp(temp, class_name);
- // End with the answer in the z flag.
-}
-
-void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
- Register temp2 = ToRegister(instr->temp2());
-
- Handle<String> class_name = instr->hydrogen()->class_name();
-
- EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
- class_name, input, temp, temp2);
-
- EmitBranch(instr, equal);
-}
-
-void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
- EmitBranch(instr, equal);
-}
-
-
-void LCodeGen::DoHasInPrototypeChainAndBranch(
- LHasInPrototypeChainAndBranch* instr) {
- Register const object = ToRegister(instr->object());
- Register const object_map = ToRegister(instr->scratch());
- Register const object_prototype = object_map;
- Register const prototype = ToRegister(instr->prototype());
-
- // The {object} must be a spec object. It's sufficient to know that {object}
- // is not a smi, since all other non-spec objects have {null} prototypes and
- // will be ruled out below.
- if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
- __ test(object, Immediate(kSmiTagMask));
- EmitFalseBranch(instr, zero);
- }
-
- // Loop through the {object}s prototype chain looking for the {prototype}.
- __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset));
- Label loop;
- __ bind(&loop);
-
- // Deoptimize if the object needs to be access checked.
- __ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsAccessCheckNeeded));
- DeoptimizeIf(not_zero, instr, DeoptimizeReason::kAccessCheck);
- // Deoptimize for proxies.
- __ CmpInstanceType(object_map, JS_PROXY_TYPE);
- DeoptimizeIf(equal, instr, DeoptimizeReason::kProxy);
-
- __ mov(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
- __ cmp(object_prototype, factory()->null_value());
- EmitFalseBranch(instr, equal);
- __ cmp(object_prototype, prototype);
- EmitTrueBranch(instr, equal);
- __ mov(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset));
- __ jmp(&loop);
-}
-
-
-void LCodeGen::DoCmpT(LCmpT* instr) {
- Token::Value op = instr->op();
-
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-
- Condition condition = ComputeCompareCondition(op);
- Label true_value, done;
- __ test(eax, Operand(eax));
- __ j(condition, &true_value, Label::kNear);
- __ mov(ToRegister(instr->result()), factory()->false_value());
- __ jmp(&done, Label::kNear);
- __ bind(&true_value);
- __ mov(ToRegister(instr->result()), factory()->true_value());
- __ bind(&done);
-}
-
-void LCodeGen::EmitReturn(LReturn* instr) {
- int extra_value_count = 1;
-
- if (instr->has_constant_parameter_count()) {
- int parameter_count = ToInteger32(instr->constant_parameter_count());
- __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
- } else {
- DCHECK(info()->IsStub()); // Functions would need to drop one more value.
- Register reg = ToRegister(instr->parameter_count());
- // The argument count parameter is a smi
- __ SmiUntag(reg);
- Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
-
- // emit code to restore stack based on instr->parameter_count()
- __ pop(return_addr_reg); // save return address
- __ shl(reg, kPointerSizeLog2);
- __ add(esp, reg);
- __ jmp(return_addr_reg);
- }
-}
-
-
-void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace && info()->IsOptimizing()) {
- // Preserve the return value on the stack and rely on the runtime call
- // to return the value in the same register. We're leaving the code
- // managed by the register allocator and tearing down the frame, it's
- // safe to write to the context register.
- __ push(eax);
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kTraceExit);
- }
- if (NeedsEagerFrame()) {
- __ mov(esp, ebp);
- __ pop(ebp);
- }
-
- EmitReturn(instr);
-}
-
-
-void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ mov(result, ContextOperand(context, instr->slot_index()));
-
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ cmp(result, factory()->the_hole_value());
- if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
- } else {
- Label is_not_hole;
- __ j(not_equal, &is_not_hole, Label::kNear);
- __ mov(result, factory()->undefined_value());
- __ bind(&is_not_hole);
- }
- }
-}
-
-
-void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register value = ToRegister(instr->value());
-
- Label skip_assignment;
-
- Operand target = ContextOperand(context, instr->slot_index());
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ cmp(target, factory()->the_hole_value());
- if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
- } else {
- __ j(not_equal, &skip_assignment, Label::kNear);
- }
- }
-
- __ mov(target, value);
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- SmiCheck check_needed =
- instr->hydrogen()->value()->type().IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- Register temp = ToRegister(instr->temp());
- int offset = Context::SlotOffset(instr->slot_index());
- __ RecordWriteContextSlot(context, offset, value, temp, kSaveFPRegs,
- EMIT_REMEMBERED_SET, check_needed);
- }
-
- __ bind(&skip_assignment);
-}
-
-
-void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- HObjectAccess access = instr->hydrogen()->access();
- int offset = access.offset();
-
- if (access.IsExternalMemory()) {
- Register result = ToRegister(instr->result());
- MemOperand operand = instr->object()->IsConstantOperand()
- ? MemOperand::StaticVariable(ToExternalReference(
- LConstantOperand::cast(instr->object())))
- : MemOperand(ToRegister(instr->object()), offset);
- __ Load(result, operand, access.representation());
- return;
- }
-
- Register object = ToRegister(instr->object());
- if (instr->hydrogen()->representation().IsDouble()) {
- X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset));
- return;
- }
-
- Register result = ToRegister(instr->result());
- if (!access.IsInobject()) {
- __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
- object = result;
- }
- __ Load(result, FieldOperand(object, offset), access.representation());
-}
-
-
-void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
- DCHECK(!operand->IsDoubleRegister());
- if (operand->IsConstantOperand()) {
- Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
- AllowDeferredHandleDereference smi_check;
- if (object->IsSmi()) {
- __ Push(Handle<Smi>::cast(object));
- } else {
- __ PushHeapObject(Handle<HeapObject>::cast(object));
- }
- } else if (operand->IsRegister()) {
- __ push(ToRegister(operand));
- } else {
- __ push(ToOperand(operand));
- }
-}
-
-
-void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
- Register function = ToRegister(instr->function());
- Register temp = ToRegister(instr->temp());
- Register result = ToRegister(instr->result());
-
- // Get the prototype or initial map from the function.
- __ mov(result,
- FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Check that the function has a prototype or an initial map.
- __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
- DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
-
- // If the function does not have an initial map, we're done.
- Label done;
- __ CmpObjectType(result, MAP_TYPE, temp);
- __ j(not_equal, &done, Label::kNear);
-
- // Get the prototype from the initial map.
- __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
-
- // All done.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
- Register result = ToRegister(instr->result());
- __ LoadRoot(result, instr->index());
-}
-
-
-void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
- Register arguments = ToRegister(instr->arguments());
- Register result = ToRegister(instr->result());
- if (instr->length()->IsConstantOperand() &&
- instr->index()->IsConstantOperand()) {
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
- int index = (const_length - const_index) + 1;
- __ mov(result, Operand(arguments, index * kPointerSize));
- } else {
- Register length = ToRegister(instr->length());
- Operand index = ToOperand(instr->index());
- // There are two words between the frame pointer and the last argument.
- // Subtracting from length accounts for one of them add one more.
- __ sub(length, index);
- __ mov(result, Operand(arguments, length, times_4, kPointerSize));
- }
-}
-
-
-void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- LOperand* key = instr->key();
- if (!key->IsConstantOperand() &&
- ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
- elements_kind)) {
- __ SmiUntag(ToRegister(key));
- }
- Operand operand(BuildFastArrayOperand(
- instr->elements(),
- key,
- instr->hydrogen()->key()->representation(),
- elements_kind,
- instr->base_offset()));
- if (elements_kind == FLOAT32_ELEMENTS) {
- X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand);
- } else if (elements_kind == FLOAT64_ELEMENTS) {
- X87Mov(ToX87Register(instr->result()), operand);
- } else {
- Register result(ToRegister(instr->result()));
- switch (elements_kind) {
- case INT8_ELEMENTS:
- __ movsx_b(result, operand);
- break;
- case UINT8_ELEMENTS:
- case UINT8_CLAMPED_ELEMENTS:
- __ movzx_b(result, operand);
- break;
- case INT16_ELEMENTS:
- __ movsx_w(result, operand);
- break;
- case UINT16_ELEMENTS:
- __ movzx_w(result, operand);
- break;
- case INT32_ELEMENTS:
- __ mov(result, operand);
- break;
- case UINT32_ELEMENTS:
- __ mov(result, operand);
- if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- __ test(result, Operand(result));
- DeoptimizeIf(negative, instr, DeoptimizeReason::kNegativeValue);
- }
- break;
- case FLOAT32_ELEMENTS:
- case FLOAT64_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
- case FAST_STRING_WRAPPER_ELEMENTS:
- case SLOW_STRING_WRAPPER_ELEMENTS:
- case NO_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
- if (instr->hydrogen()->RequiresHoleCheck()) {
- Operand hole_check_operand = BuildFastArrayOperand(
- instr->elements(), instr->key(),
- instr->hydrogen()->key()->representation(),
- FAST_DOUBLE_ELEMENTS,
- instr->base_offset() + sizeof(kHoleNanLower32));
- __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
- DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
- }
-
- Operand double_load_operand = BuildFastArrayOperand(
- instr->elements(),
- instr->key(),
- instr->hydrogen()->key()->representation(),
- FAST_DOUBLE_ELEMENTS,
- instr->base_offset());
- X87Mov(ToX87Register(instr->result()), double_load_operand);
-}
-
-
-void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
- Register result = ToRegister(instr->result());
-
- // Load the result.
- __ mov(result,
- BuildFastArrayOperand(instr->elements(), instr->key(),
- instr->hydrogen()->key()->representation(),
- FAST_ELEMENTS, instr->base_offset()));
-
- // Check for the hole value.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- __ test(result, Immediate(kSmiTagMask));
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotASmi);
- } else {
- __ cmp(result, factory()->the_hole_value());
- DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
- }
- } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
- DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
- Label done;
- __ cmp(result, factory()->the_hole_value());
- __ j(not_equal, &done);
- if (info()->IsStub()) {
- // A stub can safely convert the hole to undefined only if the array
- // protector cell contains (Smi) Isolate::kProtectorValid.
- // Otherwise it needs to bail out.
- __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
- __ cmp(FieldOperand(result, PropertyCell::kValueOffset),
- Immediate(Smi::FromInt(Isolate::kProtectorValid)));
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kHole);
- }
- __ mov(result, isolate()->factory()->undefined_value());
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_fixed_typed_array()) {
- DoLoadKeyedExternalArray(instr);
- } else if (instr->hydrogen()->representation().IsDouble()) {
- DoLoadKeyedFixedDoubleArray(instr);
- } else {
- DoLoadKeyedFixedArray(instr);
- }
-}
-
-
-Operand LCodeGen::BuildFastArrayOperand(
- LOperand* elements_pointer,
- LOperand* key,
- Representation key_representation,
- ElementsKind elements_kind,
- uint32_t base_offset) {
- Register elements_pointer_reg = ToRegister(elements_pointer);
- int element_shift_size = ElementsKindToShiftSize(elements_kind);
- int shift_size = element_shift_size;
- if (key->IsConstantOperand()) {
- int constant_value = ToInteger32(LConstantOperand::cast(key));
- if (constant_value & 0xF0000000) {
- Abort(kArrayIndexConstantValueTooBig);
- }
- return Operand(elements_pointer_reg,
- ((constant_value) << shift_size)
- + base_offset);
- } else {
- // Take the tag bit into account while computing the shift size.
- if (key_representation.IsSmi() && (shift_size >= 1)) {
- shift_size -= kSmiTagSize;
- }
- ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
- return Operand(elements_pointer_reg,
- ToRegister(key),
- scale_factor,
- base_offset);
- }
-}
-
-
-void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
- Register result = ToRegister(instr->result());
-
- if (instr->hydrogen()->from_inlined()) {
- __ lea(result, Operand(esp, -2 * kPointerSize));
- } else if (instr->hydrogen()->arguments_adaptor()) {
- // Check for arguments adapter frame.
- Label done, adapted;
- __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(result,
- Operand(result, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ cmp(Operand(result),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adapted, Label::kNear);
-
- // No arguments adaptor frame.
- __ mov(result, Operand(ebp));
- __ jmp(&done, Label::kNear);
-
- // Arguments adaptor frame present.
- __ bind(&adapted);
- __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-
- // Result is the frame pointer for the frame if not adapted and for the real
- // frame below the adaptor frame if adapted.
- __ bind(&done);
- } else {
- __ mov(result, Operand(ebp));
- }
-}
-
-
-void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
- Operand elem = ToOperand(instr->elements());
- Register result = ToRegister(instr->result());
-
- Label done;
-
- // If no arguments adaptor frame the number of arguments is fixed.
- __ cmp(ebp, elem);
- __ mov(result, Immediate(scope()->num_parameters()));
- __ j(equal, &done, Label::kNear);
-
- // Arguments adaptor frame present. Get argument length from there.
- __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(result, Operand(result,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(result);
-
- // Argument length is in result register.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
-
- // If the receiver is null or undefined, we have to pass the global
- // object as a receiver to normal functions. Values have to be
- // passed unchanged to builtins and strict-mode functions.
- Label receiver_ok, global_object;
- Label::Distance dist;
-
- // For x87 debug version jitted code's size exceeds 128 bytes whether
- // FLAG_deopt_every_n_times
- // is set or not. Always use Label:kFar for label distance for debug mode.
- if (FLAG_debug_code)
- dist = Label::kFar;
- else
- dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
-
- Register scratch = ToRegister(instr->temp());
-
- if (!instr->hydrogen()->known_function()) {
- // Do not transform the receiver to object for strict mode
- // functions.
- __ mov(scratch,
- FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
- Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
- __ j(not_equal, &receiver_ok, dist);
-
- // Do not transform the receiver to object for builtins.
- __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
- Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
- __ j(not_equal, &receiver_ok, dist);
- }
-
- // Normal function. Replace undefined or null with global receiver.
- __ cmp(receiver, factory()->null_value());
- __ j(equal, &global_object, dist);
- __ cmp(receiver, factory()->undefined_value());
- __ j(equal, &global_object, dist);
-
- // The receiver should be a JS object.
- __ test(receiver, Immediate(kSmiTagMask));
- DeoptimizeIf(equal, instr, DeoptimizeReason::kSmi);
- __ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, scratch);
- DeoptimizeIf(below, instr, DeoptimizeReason::kNotAJavaScriptObject);
-
- __ jmp(&receiver_ok, dist);
- __ bind(&global_object);
- __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset));
- __ mov(receiver, ContextOperand(receiver, Context::NATIVE_CONTEXT_INDEX));
- __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_PROXY_INDEX));
- __ bind(&receiver_ok);
-}
-
-
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
- Register length = ToRegister(instr->length());
- Register elements = ToRegister(instr->elements());
- DCHECK(receiver.is(eax)); // Used for parameter count.
- DCHECK(function.is(edi)); // Required by InvokeFunction.
- DCHECK(ToRegister(instr->result()).is(eax));
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- const uint32_t kArgumentsLimit = 1 * KB;
- __ cmp(length, kArgumentsLimit);
- DeoptimizeIf(above, instr, DeoptimizeReason::kTooManyArguments);
-
- __ push(receiver);
- __ mov(receiver, length);
-
- // Loop through the arguments pushing them onto the execution
- // stack.
- Label invoke, loop;
- // length is a small non-negative integer, due to the test above.
- __ test(length, Operand(length));
- __ j(zero, &invoke, Label::kNear);
- __ bind(&loop);
- __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
- __ dec(length);
- __ j(not_zero, &loop);
-
- // Invoke the function.
- __ bind(&invoke);
-
- InvokeFlag flag = CALL_FUNCTION;
- if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
- DCHECK(!info()->saves_caller_doubles());
- // TODO(ishell): drop current frame before pushing arguments to the stack.
- flag = JUMP_FUNCTION;
- ParameterCount actual(eax);
- // It is safe to use ebx, ecx and edx as scratch registers here given that
- // 1) we are not going to return to caller function anyway,
- // 2) ebx (expected arguments count) and edx (new.target) will be
- // initialized below.
- PrepareForTailCall(actual, ebx, ecx, edx);
- }
-
- DCHECK(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount actual(eax);
- __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
-}
-
-
-void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
- __ int3();
-}
-
-
-void LCodeGen::DoPushArgument(LPushArgument* instr) {
- LOperand* argument = instr->value();
- EmitPushTaggedOperand(argument);
-}
-
-
-void LCodeGen::DoDrop(LDrop* instr) {
- __ Drop(instr->count());
-}
-
-
-void LCodeGen::DoThisFunction(LThisFunction* instr) {
- Register result = ToRegister(instr->result());
- __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-}
-
-
-void LCodeGen::DoContext(LContext* instr) {
- Register result = ToRegister(instr->result());
- if (info()->IsOptimizing()) {
- __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
- } else {
- // If there is no frame, the context must be in esi.
- DCHECK(result.is(esi));
- }
-}
-
-
-void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- __ push(Immediate(instr->hydrogen()->declarations()));
- __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
- __ push(Immediate(instr->hydrogen()->feedback_vector()));
- CallRuntime(Runtime::kDeclareGlobals, instr);
-}
-
-void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
- int formal_parameter_count, int arity,
- bool is_tail_call, LInstruction* instr) {
- bool dont_adapt_arguments =
- formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
- bool can_invoke_directly =
- dont_adapt_arguments || formal_parameter_count == arity;
-
- Register function_reg = edi;
-
- if (can_invoke_directly) {
- // Change context.
- __ mov(esi, FieldOperand(function_reg, JSFunction::kContextOffset));
-
- // Always initialize new target and number of actual arguments.
- __ mov(edx, factory()->undefined_value());
- __ mov(eax, arity);
-
- bool is_self_call = function.is_identical_to(info()->closure());
-
- // Invoke function directly.
- if (is_self_call) {
- Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
- if (is_tail_call) {
- __ Jump(self, RelocInfo::CODE_TARGET);
- } else {
- __ Call(self, RelocInfo::CODE_TARGET);
- }
- } else {
- Operand target = FieldOperand(function_reg, JSFunction::kCodeEntryOffset);
- if (is_tail_call) {
- __ jmp(target);
- } else {
- __ call(target);
- }
- }
-
- if (!is_tail_call) {
- // Set up deoptimization.
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
- }
- } else {
- // We need to adapt arguments.
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(
- this, pointers, Safepoint::kLazyDeopt);
- ParameterCount actual(arity);
- ParameterCount expected(formal_parameter_count);
- InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
- __ InvokeFunction(function_reg, expected, actual, flag, generator);
- }
-}
-
-
-void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
- DCHECK(ToRegister(instr->result()).is(eax));
-
- if (instr->hydrogen()->IsTailCall()) {
- if (NeedsEagerFrame()) __ leave();
-
- if (instr->target()->IsConstantOperand()) {
- LConstantOperand* target = LConstantOperand::cast(instr->target());
- Handle<Code> code = Handle<Code>::cast(ToHandle(target));
- __ jmp(code, RelocInfo::CODE_TARGET);
- } else {
- DCHECK(instr->target()->IsRegister());
- Register target = ToRegister(instr->target());
- __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(target);
- }
- } else {
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
-
- if (instr->target()->IsConstantOperand()) {
- LConstantOperand* target = LConstantOperand::cast(instr->target());
- Handle<Code> code = Handle<Code>::cast(ToHandle(target));
- generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
- __ call(code, RelocInfo::CODE_TARGET);
- } else {
- DCHECK(instr->target()->IsRegister());
- Register target = ToRegister(instr->target());
- generator.BeforeCall(__ CallSize(Operand(target)));
- __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ call(target);
- }
- generator.AfterCall();
- }
-}
-
-
-void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
- Register input_reg = ToRegister(instr->value());
- __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
-
- Label slow, allocated, done;
- uint32_t available_regs = eax.bit() | ecx.bit() | edx.bit() | ebx.bit();
- available_regs &= ~input_reg.bit();
- if (instr->context()->IsRegister()) {
- // Make sure that the context isn't overwritten in the AllocateHeapNumber
- // macro below.
- available_regs &= ~ToRegister(instr->context()).bit();
- }
-
- Register tmp =
- Register::from_code(base::bits::CountTrailingZeros32(available_regs));
- available_regs &= ~tmp.bit();
- Register tmp2 =
- Register::from_code(base::bits::CountTrailingZeros32(available_regs));
-
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this);
-
- __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
- // Check the sign of the argument. If the argument is positive, just
- // return it. We do not need to patch the stack since |input| and
- // |result| are the same register and |input| will be restored
- // unchanged by popping safepoint registers.
- __ test(tmp, Immediate(HeapNumber::kSignMask));
- __ j(zero, &done, Label::kNear);
-
- __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
- __ jmp(&allocated, Label::kNear);
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
- instr, instr->context());
- // Set the pointer to the new heap number in tmp.
- if (!tmp.is(eax)) __ mov(tmp, eax);
- // Restore input_reg after call to runtime.
- __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
-
- __ bind(&allocated);
- __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
- __ and_(tmp2, ~HeapNumber::kSignMask);
- __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
- __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
- __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
- __ StoreToSafepointRegisterSlot(input_reg, tmp);
-
- __ bind(&done);
-}
-
-
-void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
- Register input_reg = ToRegister(instr->value());
- __ test(input_reg, Operand(input_reg));
- Label is_positive;
- __ j(not_sign, &is_positive, Label::kNear);
- __ neg(input_reg); // Sets flags.
- DeoptimizeIf(negative, instr, DeoptimizeReason::kOverflow);
- __ bind(&is_positive);
-}
-
-
-void LCodeGen::DoMathAbs(LMathAbs* instr) {
- // Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
- public:
- DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
- LMathAbs* instr,
- const X87Stack& x87_stack)
- : LDeferredCode(codegen, x87_stack), instr_(instr) { }
- void Generate() override {
- codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LMathAbs* instr_;
- };
-
- DCHECK(instr->value()->Equals(instr->result()));
- Representation r = instr->hydrogen()->value()->representation();
-
- if (r.IsDouble()) {
- X87Register value = ToX87Register(instr->value());
- X87Fxch(value);
- __ fabs();
- } else if (r.IsSmiOrInteger32()) {
- EmitIntegerMathAbs(instr);
- } else { // Tagged case.
- DeferredMathAbsTaggedHeapNumber* deferred =
- new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr, x87_stack_);
- Register input_reg = ToRegister(instr->value());
- // Smi check.
- __ JumpIfNotSmi(input_reg, deferred->entry());
- EmitIntegerMathAbs(instr);
- __ bind(deferred->exit());
- }
-}
-
-
-void LCodeGen::DoMathFloor(LMathFloor* instr) {
- Register output_reg = ToRegister(instr->result());
- X87Register input_reg = ToX87Register(instr->value());
- X87Fxch(input_reg);
-
- Label not_minus_zero, done;
- // Deoptimize on unordered.
- __ fldz();
- __ fld(1);
- __ FCmp();
- DeoptimizeIf(parity_even, instr, DeoptimizeReason::kNaN);
- __ j(below, &not_minus_zero, Label::kNear);
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Check for negative zero.
- __ j(not_equal, &not_minus_zero, Label::kNear);
- // +- 0.0.
- __ fld(0);
- __ FXamSign();
- DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
- __ Move(output_reg, Immediate(0));
- __ jmp(&done, Label::kFar);
- }
-
- // Positive input.
- // rc=01B, round down.
- __ bind(&not_minus_zero);
- __ fnclex();
- __ X87SetRC(0x0400);
- __ sub(esp, Immediate(kPointerSize));
- __ fist_s(Operand(esp, 0));
- __ pop(output_reg);
- __ X87SetRC(0x0000);
- __ X87CheckIA();
- DeoptimizeIf(equal, instr, DeoptimizeReason::kOverflow);
- __ fnclex();
- __ X87SetRC(0x0000);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoMathRound(LMathRound* instr) {
- X87Register input_reg = ToX87Register(instr->value());
- Register result = ToRegister(instr->result());
- X87Fxch(input_reg);
- Label below_one_half, below_minus_one_half, done;
-
- ExternalReference one_half = ExternalReference::address_of_one_half();
- ExternalReference minus_one_half =
- ExternalReference::address_of_minus_one_half();
-
- __ fld_d(Operand::StaticVariable(one_half));
- __ fld(1);
- __ FCmp();
- __ j(carry, &below_one_half);
-
- // Use rounds towards zero, since 0.5 <= x, we use floor(0.5 + x)
- __ fld(0);
- __ fadd_d(Operand::StaticVariable(one_half));
- // rc=11B, round toward zero.
- __ X87SetRC(0x0c00);
- __ sub(esp, Immediate(kPointerSize));
- // Clear exception bits.
- __ fnclex();
- __ fistp_s(MemOperand(esp, 0));
- // Restore round mode.
- __ X87SetRC(0x0000);
- // Check overflow.
- __ X87CheckIA();
- __ pop(result);
- DeoptimizeIf(equal, instr, DeoptimizeReason::kConversionOverflow);
- __ fnclex();
- // Restore round mode.
- __ X87SetRC(0x0000);
- __ jmp(&done);
-
- __ bind(&below_one_half);
- __ fld_d(Operand::StaticVariable(minus_one_half));
- __ fld(1);
- __ FCmp();
- __ j(carry, &below_minus_one_half);
- // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
- // we can ignore the difference between a result of -0 and +0.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // If the sign is positive, we return +0.
- __ fld(0);
- __ FXamSign();
- DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
- }
- __ Move(result, Immediate(0));
- __ jmp(&done);
-
- __ bind(&below_minus_one_half);
- __ fld(0);
- __ fadd_d(Operand::StaticVariable(one_half));
- // rc=01B, round down.
- __ X87SetRC(0x0400);
- __ sub(esp, Immediate(kPointerSize));
- // Clear exception bits.
- __ fnclex();
- __ fistp_s(MemOperand(esp, 0));
- // Restore round mode.
- __ X87SetRC(0x0000);
- // Check overflow.
- __ X87CheckIA();
- __ pop(result);
- DeoptimizeIf(equal, instr, DeoptimizeReason::kConversionOverflow);
- __ fnclex();
- // Restore round mode.
- __ X87SetRC(0x0000);
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoMathFround(LMathFround* instr) {
- X87Register input_reg = ToX87Register(instr->value());
- X87Fxch(input_reg);
- __ sub(esp, Immediate(kPointerSize));
- __ fstp_s(MemOperand(esp, 0));
- X87Fld(MemOperand(esp, 0), kX87FloatOperand);
- __ add(esp, Immediate(kPointerSize));
-}
-
-
-void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
- X87Register input_reg = ToX87Register(instr->value());
- __ X87SetFPUCW(0x027F);
- X87Fxch(input_reg);
- __ fsqrt();
- __ X87SetFPUCW(0x037F);
-}
-
-
-void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
- X87Register input_reg = ToX87Register(instr->value());
- DCHECK(ToX87Register(instr->result()).is(input_reg));
- X87Fxch(input_reg);
- // Note that according to ECMA-262 15.8.2.13:
- // Math.pow(-Infinity, 0.5) == Infinity
- // Math.sqrt(-Infinity) == NaN
- Label done, sqrt;
- // Check base for -Infinity. C3 == 0, C2 == 1, C1 == 1 and C0 == 1
- __ fxam();
- __ push(eax);
- __ fnstsw_ax();
- __ and_(eax, Immediate(0x4700));
- __ cmp(eax, Immediate(0x0700));
- __ j(not_equal, &sqrt, Label::kNear);
- // If input is -Infinity, return Infinity.
- __ fchs();
- __ jmp(&done, Label::kNear);
-
- // Square root.
- __ bind(&sqrt);
- __ fldz();
- __ faddp(); // Convert -0 to +0.
- __ fsqrt();
- __ bind(&done);
- __ pop(eax);
-}
-
-
-void LCodeGen::DoPower(LPower* instr) {
- Representation exponent_type = instr->hydrogen()->right()->representation();
- X87Register result = ToX87Register(instr->result());
- // Having marked this as a call, we can use any registers.
- X87Register base = ToX87Register(instr->left());
- ExternalReference one_half = ExternalReference::address_of_one_half();
-
- if (exponent_type.IsSmi()) {
- Register exponent = ToRegister(instr->right());
- X87LoadForUsage(base);
- __ SmiUntag(exponent);
- __ push(exponent);
- __ fild_s(MemOperand(esp, 0));
- __ pop(exponent);
- } else if (exponent_type.IsTagged()) {
- Register exponent = ToRegister(instr->right());
- Register temp = exponent.is(ecx) ? eax : ecx;
- Label no_deopt, done;
- X87LoadForUsage(base);
- __ JumpIfSmi(exponent, &no_deopt);
- __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, temp);
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
- // Heap number(double)
- __ fld_d(FieldOperand(exponent, HeapNumber::kValueOffset));
- __ jmp(&done);
- // SMI
- __ bind(&no_deopt);
- __ SmiUntag(exponent);
- __ push(exponent);
- __ fild_s(MemOperand(esp, 0));
- __ pop(exponent);
- __ bind(&done);
- } else if (exponent_type.IsInteger32()) {
- Register exponent = ToRegister(instr->right());
- X87LoadForUsage(base);
- __ push(exponent);
- __ fild_s(MemOperand(esp, 0));
- __ pop(exponent);
- } else {
- DCHECK(exponent_type.IsDouble());
- X87Register exponent_double = ToX87Register(instr->right());
- X87LoadForUsage(base, exponent_double);
- }
-
- // FP data stack {base, exponent(TOS)}.
- // Handle (exponent==+-0.5 && base == -0).
- Label not_plus_0;
- __ fld(0);
- __ fabs();
- X87Fld(Operand::StaticVariable(one_half), kX87DoubleOperand);
- __ FCmp();
- __ j(parity_even, &not_plus_0, Label::kNear); // NaN.
- __ j(not_equal, &not_plus_0, Label::kNear);
- __ fldz();
- // FP data stack {base, exponent(TOS), zero}.
- __ faddp(2);
- __ bind(&not_plus_0);
-
- {
- __ PrepareCallCFunction(4, eax);
- __ fstp_d(MemOperand(esp, kDoubleSize)); // Exponent value.
- __ fstp_d(MemOperand(esp, 0)); // Base value.
- X87PrepareToWrite(result);
- __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
- 4);
- // Return value is in st(0) on ia32.
- X87CommitWrite(result);
- }
-}
-
-
-void LCodeGen::DoMathLog(LMathLog* instr) {
- DCHECK(instr->value()->Equals(instr->result()));
- X87Register result = ToX87Register(instr->result());
- X87Register input_reg = ToX87Register(instr->value());
- X87Fxch(input_reg);
-
- // Pass one double as argument on the stack.
- __ PrepareCallCFunction(2, eax);
- __ fstp_d(MemOperand(esp, 0));
- X87PrepareToWrite(result);
- __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 2);
- // Return value is in st(0) on ia32.
- X87CommitWrite(result);
-}
-
-
-void LCodeGen::DoMathClz32(LMathClz32* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
-
- __ Lzcnt(result, input);
-}
-
-void LCodeGen::DoMathCos(LMathCos* instr) {
- X87Register result = ToX87Register(instr->result());
- X87Register input_reg = ToX87Register(instr->value());
- __ fld(x87_stack_.st(input_reg));
-
- // Pass one double as argument on the stack.
- __ PrepareCallCFunction(2, eax);
- __ fstp_d(MemOperand(esp, 0));
- X87PrepareToWrite(result);
- __ X87SetFPUCW(0x027F);
- __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 2);
- __ X87SetFPUCW(0x037F);
- // Return value is in st(0) on ia32.
- X87CommitWrite(result);
-}
-
-void LCodeGen::DoMathSin(LMathSin* instr) {
- X87Register result = ToX87Register(instr->result());
- X87Register input_reg = ToX87Register(instr->value());
- __ fld(x87_stack_.st(input_reg));
-
- // Pass one double as argument on the stack.
- __ PrepareCallCFunction(2, eax);
- __ fstp_d(MemOperand(esp, 0));
- X87PrepareToWrite(result);
- __ X87SetFPUCW(0x027F);
- __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 2);
- __ X87SetFPUCW(0x037F);
- // Return value is in st(0) on ia32.
- X87CommitWrite(result);
-}
-
-void LCodeGen::DoMathExp(LMathExp* instr) {
- X87Register result = ToX87Register(instr->result());
- X87Register input_reg = ToX87Register(instr->value());
- __ fld(x87_stack_.st(input_reg));
-
- // Pass one double as argument on the stack.
- __ PrepareCallCFunction(2, eax);
- __ fstp_d(MemOperand(esp, 0));
- X87PrepareToWrite(result);
- __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 2);
- // Return value is in st(0) on ia32.
- X87CommitWrite(result);
-}
-
-void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
- Register scratch1, Register scratch2,
- Register scratch3) {
-#if DEBUG
- if (actual.is_reg()) {
- DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
- } else {
- DCHECK(!AreAliased(scratch1, scratch2, scratch3));
- }
-#endif
- if (FLAG_code_comments) {
- if (actual.is_reg()) {
- Comment(";;; PrepareForTailCall, actual: %s {",
- RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
- actual.reg().code()));
- } else {
- Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
- }
- }
-
- // Check if next frame is an arguments adaptor frame.
- Register caller_args_count_reg = scratch1;
- Label no_arguments_adaptor, formal_parameter_count_loaded;
- __ mov(scratch2, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ cmp(Operand(scratch2, StandardFrameConstants::kContextOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &no_arguments_adaptor, Label::kNear);
-
- // Drop current frame and load arguments count from arguments adaptor frame.
- __ mov(ebp, scratch2);
- __ mov(caller_args_count_reg,
- Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
- __ jmp(&formal_parameter_count_loaded, Label::kNear);
-
- __ bind(&no_arguments_adaptor);
- // Load caller's formal parameter count.
- __ mov(caller_args_count_reg,
- Immediate(info()->literal()->parameter_count()));
-
- __ bind(&formal_parameter_count_loaded);
- __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3,
- ReturnAddressState::kNotOnStack, 0);
- Comment(";;; }");
-}
-
-void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
- HInvokeFunction* hinstr = instr->hydrogen();
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->function()).is(edi));
- DCHECK(instr->HasPointerMap());
-
- bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
-
- if (is_tail_call) {
- DCHECK(!info()->saves_caller_doubles());
- ParameterCount actual(instr->arity());
- // It is safe to use ebx, ecx and edx as scratch registers here given that
- // 1) we are not going to return to caller function anyway,
- // 2) ebx (expected arguments count) and edx (new.target) will be
- // initialized below.
- PrepareForTailCall(actual, ebx, ecx, edx);
- }
-
- Handle<JSFunction> known_function = hinstr->known_function();
- if (known_function.is_null()) {
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount actual(instr->arity());
- InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
- __ InvokeFunction(edi, no_reg, actual, flag, generator);
- } else {
- CallKnownFunction(known_function, hinstr->formal_parameter_count(),
- instr->arity(), is_tail_call, instr);
- }
-}
-
-
-void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->constructor()).is(edi));
- DCHECK(ToRegister(instr->result()).is(eax));
-
- __ Move(eax, Immediate(instr->arity()));
- __ mov(ebx, instr->hydrogen()->site());
-
- ElementsKind kind = instr->hydrogen()->elements_kind();
- AllocationSiteOverrideMode override_mode =
- (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
- ? DISABLE_ALLOCATION_SITES
- : DONT_OVERRIDE;
-
- if (instr->arity() == 0) {
- ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- } else if (instr->arity() == 1) {
- Label done;
- if (IsFastPackedElementsKind(kind)) {
- Label packed_case;
- // We might need a change here
- // look at the first argument
- __ mov(ecx, Operand(esp, 0));
- __ test(ecx, ecx);
- __ j(zero, &packed_case, Label::kNear);
-
- ElementsKind holey_kind = GetHoleyElementsKind(kind);
- ArraySingleArgumentConstructorStub stub(isolate(),
- holey_kind,
- override_mode);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ jmp(&done, Label::kNear);
- __ bind(&packed_case);
- }
-
- ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ bind(&done);
- } else {
- ArrayNArgumentsConstructorStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
-void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
-}
-
-
-void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
- Register function = ToRegister(instr->function());
- Register code_object = ToRegister(instr->code_object());
- __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
- __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
-}
-
-
-void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
- Register result = ToRegister(instr->result());
- Register base = ToRegister(instr->base_object());
- if (instr->offset()->IsConstantOperand()) {
- LConstantOperand* offset = LConstantOperand::cast(instr->offset());
- __ lea(result, Operand(base, ToInteger32(offset)));
- } else {
- Register offset = ToRegister(instr->offset());
- __ lea(result, Operand(base, offset, times_1, 0));
- }
-}
-
-
-void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
- Representation representation = instr->hydrogen()->field_representation();
-
- HObjectAccess access = instr->hydrogen()->access();
- int offset = access.offset();
-
- if (access.IsExternalMemory()) {
- DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
- MemOperand operand = instr->object()->IsConstantOperand()
- ? MemOperand::StaticVariable(
- ToExternalReference(LConstantOperand::cast(instr->object())))
- : MemOperand(ToRegister(instr->object()), offset);
- if (instr->value()->IsConstantOperand()) {
- LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- __ mov(operand, Immediate(ToInteger32(operand_value)));
- } else {
- Register value = ToRegister(instr->value());
- __ Store(value, operand, representation);
- }
- return;
- }
-
- Register object = ToRegister(instr->object());
- __ AssertNotSmi(object);
- DCHECK(!representation.IsSmi() ||
- !instr->value()->IsConstantOperand() ||
- IsSmi(LConstantOperand::cast(instr->value())));
- if (representation.IsDouble()) {
- DCHECK(access.IsInobject());
- DCHECK(!instr->hydrogen()->has_transition());
- DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
- X87Register value = ToX87Register(instr->value());
- X87Mov(FieldOperand(object, offset), value);
- return;
- }
-
- if (instr->hydrogen()->has_transition()) {
- Handle<Map> transition = instr->hydrogen()->transition_map();
- AddDeprecationDependency(transition);
- __ mov(FieldOperand(object, HeapObject::kMapOffset), transition);
- if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
- Register temp = ToRegister(instr->temp());
- Register temp_map = ToRegister(instr->temp_map());
- __ mov(temp_map, transition);
- __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map);
- // Update the write barrier for the map field.
- __ RecordWriteForMap(object, transition, temp_map, temp, kSaveFPRegs);
- }
- }
-
- // Do the store.
- Register write_register = object;
- if (!access.IsInobject()) {
- write_register = ToRegister(instr->temp());
- __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
- }
-
- MemOperand operand = FieldOperand(write_register, offset);
- if (instr->value()->IsConstantOperand()) {
- LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- if (operand_value->IsRegister()) {
- Register value = ToRegister(operand_value);
- __ Store(value, operand, representation);
- } else if (representation.IsInteger32() || representation.IsExternal()) {
- Immediate immediate = ToImmediate(operand_value, representation);
- DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
- __ mov(operand, immediate);
- } else {
- Handle<Object> handle_value = ToHandle(operand_value);
- DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
- __ mov(operand, handle_value);
- }
- } else {
- Register value = ToRegister(instr->value());
- __ Store(value, operand, representation);
- }
-
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- Register value = ToRegister(instr->value());
- Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
- // Update the write barrier for the object for in-object properties.
- __ RecordWriteField(write_register, offset, value, temp, kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- instr->hydrogen()->SmiCheckForWriteBarrier(),
- instr->hydrogen()->PointersToHereCheckForValue());
- }
-}
-
-
-void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal;
- if (instr->index()->IsConstantOperand()) {
- __ cmp(ToOperand(instr->length()),
- ToImmediate(LConstantOperand::cast(instr->index()),
- instr->hydrogen()->length()->representation()));
- cc = CommuteCondition(cc);
- } else if (instr->length()->IsConstantOperand()) {
- __ cmp(ToOperand(instr->index()),
- ToImmediate(LConstantOperand::cast(instr->length()),
- instr->hydrogen()->index()->representation()));
- } else {
- __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
- }
- if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
- Label done;
- __ j(NegateCondition(cc), &done, Label::kNear);
- __ int3();
- __ bind(&done);
- } else {
- DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds);
- }
-}
-
-
-void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- LOperand* key = instr->key();
- if (!key->IsConstantOperand() &&
- ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
- elements_kind)) {
- __ SmiUntag(ToRegister(key));
- }
- Operand operand(BuildFastArrayOperand(
- instr->elements(),
- key,
- instr->hydrogen()->key()->representation(),
- elements_kind,
- instr->base_offset()));
- if (elements_kind == FLOAT32_ELEMENTS) {
- X87Mov(operand, ToX87Register(instr->value()), kX87FloatOperand);
- } else if (elements_kind == FLOAT64_ELEMENTS) {
- uint64_t int_val = kHoleNanInt64;
- int32_t lower = static_cast<int32_t>(int_val);
- int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
- Operand operand2 = BuildFastArrayOperand(
- instr->elements(), instr->key(),
- instr->hydrogen()->key()->representation(), elements_kind,
- instr->base_offset() + kPointerSize);
-
- Label no_special_nan_handling, done;
- X87Register value = ToX87Register(instr->value());
- X87Fxch(value);
- __ lea(esp, Operand(esp, -kDoubleSize));
- __ fst_d(MemOperand(esp, 0));
- __ lea(esp, Operand(esp, kDoubleSize));
- int offset = sizeof(kHoleNanUpper32);
- __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
- __ j(not_equal, &no_special_nan_handling, Label::kNear);
- __ mov(operand, Immediate(lower));
- __ mov(operand2, Immediate(upper));
- __ jmp(&done, Label::kNear);
-
- __ bind(&no_special_nan_handling);
- __ fst_d(operand);
- __ bind(&done);
- } else {
- Register value = ToRegister(instr->value());
- switch (elements_kind) {
- case UINT8_ELEMENTS:
- case INT8_ELEMENTS:
- case UINT8_CLAMPED_ELEMENTS:
- __ mov_b(operand, value);
- break;
- case UINT16_ELEMENTS:
- case INT16_ELEMENTS:
- __ mov_w(operand, value);
- break;
- case UINT32_ELEMENTS:
- case INT32_ELEMENTS:
- __ mov(operand, value);
- break;
- case FLOAT32_ELEMENTS:
- case FLOAT64_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
- case FAST_STRING_WRAPPER_ELEMENTS:
- case SLOW_STRING_WRAPPER_ELEMENTS:
- case NO_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
- Operand double_store_operand = BuildFastArrayOperand(
- instr->elements(),
- instr->key(),
- instr->hydrogen()->key()->representation(),
- FAST_DOUBLE_ELEMENTS,
- instr->base_offset());
-
- uint64_t int_val = kHoleNanInt64;
- int32_t lower = static_cast<int32_t>(int_val);
- int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
- Operand double_store_operand2 = BuildFastArrayOperand(
- instr->elements(), instr->key(),
- instr->hydrogen()->key()->representation(), FAST_DOUBLE_ELEMENTS,
- instr->base_offset() + kPointerSize);
-
- if (instr->hydrogen()->IsConstantHoleStore()) {
- // This means we should store the (double) hole. No floating point
- // registers required.
- __ mov(double_store_operand, Immediate(lower));
- __ mov(double_store_operand2, Immediate(upper));
- } else {
- Label no_special_nan_handling, done;
- X87Register value = ToX87Register(instr->value());
- X87Fxch(value);
-
- if (instr->NeedsCanonicalization()) {
- __ fld(0);
- __ fld(0);
- __ FCmp();
- __ j(parity_odd, &no_special_nan_handling, Label::kNear);
- // All NaNs are Canonicalized to 0x7fffffffffffffff
- __ mov(double_store_operand, Immediate(0xffffffff));
- __ mov(double_store_operand2, Immediate(0x7fffffff));
- __ jmp(&done, Label::kNear);
- } else {
- __ lea(esp, Operand(esp, -kDoubleSize));
- __ fst_d(MemOperand(esp, 0));
- __ lea(esp, Operand(esp, kDoubleSize));
- int offset = sizeof(kHoleNanUpper32);
- __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
- __ j(not_equal, &no_special_nan_handling, Label::kNear);
- __ mov(double_store_operand, Immediate(lower));
- __ mov(double_store_operand2, Immediate(upper));
- __ jmp(&done, Label::kNear);
- }
- __ bind(&no_special_nan_handling);
- __ fst_d(double_store_operand);
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
- Register elements = ToRegister(instr->elements());
- Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
-
- Operand operand = BuildFastArrayOperand(
- instr->elements(),
- instr->key(),
- instr->hydrogen()->key()->representation(),
- FAST_ELEMENTS,
- instr->base_offset());
- if (instr->value()->IsRegister()) {
- __ mov(operand, ToRegister(instr->value()));
- } else {
- LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- if (IsSmi(operand_value)) {
- Immediate immediate = ToImmediate(operand_value, Representation::Smi());
- __ mov(operand, immediate);
- } else {
- DCHECK(!IsInteger32(operand_value));
- Handle<Object> handle_value = ToHandle(operand_value);
- __ mov(operand, handle_value);
- }
- }
-
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- DCHECK(instr->value()->IsRegister());
- Register value = ToRegister(instr->value());
- DCHECK(!instr->key()->IsConstantOperand());
- SmiCheck check_needed =
- instr->hydrogen()->value()->type().IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- // Compute address of modified element and store it into key register.
- __ lea(key, operand);
- __ RecordWrite(elements, key, value, kSaveFPRegs, EMIT_REMEMBERED_SET,
- check_needed,
- instr->hydrogen()->PointersToHereCheckForValue());
- }
-}
-
-
-void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
- // By cases...external, fast-double, fast
- if (instr->is_fixed_typed_array()) {
- DoStoreKeyedExternalArray(instr);
- } else if (instr->hydrogen()->value()->representation().IsDouble()) {
- DoStoreKeyedFixedDoubleArray(instr);
- } else {
- DoStoreKeyedFixedArray(instr);
- }
-}
-
-
-void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
- Register object = ToRegister(instr->object());
- Register temp = ToRegister(instr->temp());
- Label no_memento_found;
- __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
- DeoptimizeIf(equal, instr, DeoptimizeReason::kMementoFound);
- __ bind(&no_memento_found);
-}
-
-
-void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
- class DeferredMaybeGrowElements final : public LDeferredCode {
- public:
- DeferredMaybeGrowElements(LCodeGen* codegen,
- LMaybeGrowElements* instr,
- const X87Stack& x87_stack)
- : LDeferredCode(codegen, x87_stack), instr_(instr) {}
- void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LMaybeGrowElements* instr_;
- };
-
- Register result = eax;
- DeferredMaybeGrowElements* deferred =
- new (zone()) DeferredMaybeGrowElements(this, instr, x87_stack_);
- LOperand* key = instr->key();
- LOperand* current_capacity = instr->current_capacity();
-
- DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
- DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
- DCHECK(key->IsConstantOperand() || key->IsRegister());
- DCHECK(current_capacity->IsConstantOperand() ||
- current_capacity->IsRegister());
-
- if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
- int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
- int32_t constant_capacity =
- ToInteger32(LConstantOperand::cast(current_capacity));
- if (constant_key >= constant_capacity) {
- // Deferred case.
- __ jmp(deferred->entry());
- }
- } else if (key->IsConstantOperand()) {
- int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
- __ cmp(ToOperand(current_capacity), Immediate(constant_key));
- __ j(less_equal, deferred->entry());
- } else if (current_capacity->IsConstantOperand()) {
- int32_t constant_capacity =
- ToInteger32(LConstantOperand::cast(current_capacity));
- __ cmp(ToRegister(key), Immediate(constant_capacity));
- __ j(greater_equal, deferred->entry());
- } else {
- __ cmp(ToRegister(key), ToRegister(current_capacity));
- __ j(greater_equal, deferred->entry());
- }
-
- __ mov(result, ToOperand(instr->elements()));
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- Register result = eax;
- __ Move(result, Immediate(0));
-
- // We have to call a stub.
- {
- PushSafepointRegistersScope scope(this);
- if (instr->object()->IsRegister()) {
- __ Move(result, ToRegister(instr->object()));
- } else {
- __ mov(result, ToOperand(instr->object()));
- }
-
- LOperand* key = instr->key();
- if (key->IsConstantOperand()) {
- LConstantOperand* constant_key = LConstantOperand::cast(key);
- int32_t int_key = ToInteger32(constant_key);
- if (Smi::IsValid(int_key)) {
- __ mov(ebx, Immediate(Smi::FromInt(int_key)));
- } else {
- // We should never get here at runtime because there is a smi check on
- // the key before this point.
- __ int3();
- }
- } else {
- __ Move(ebx, ToRegister(key));
- __ SmiTag(ebx);
- }
-
- GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
- __ CallStub(&stub);
- RecordSafepointWithLazyDeopt(
- instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- __ StoreToSafepointRegisterSlot(result, result);
- }
-
- // Deopt on smi, which means the elements array changed to dictionary mode.
- __ test(result, Immediate(kSmiTagMask));
- DeoptimizeIf(equal, instr, DeoptimizeReason::kSmi);
-}
-
-
-void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
- Register object_reg = ToRegister(instr->object());
-
- Handle<Map> from_map = instr->original_map();
- Handle<Map> to_map = instr->transitioned_map();
- ElementsKind from_kind = instr->from_kind();
- ElementsKind to_kind = instr->to_kind();
-
- Label not_applicable;
- bool is_simple_map_transition =
- IsSimpleMapChangeTransition(from_kind, to_kind);
- Label::Distance branch_distance =
- is_simple_map_transition ? Label::kNear : Label::kFar;
- __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
- __ j(not_equal, &not_applicable, branch_distance);
- if (is_simple_map_transition) {
- Register new_map_reg = ToRegister(instr->new_map_temp());
- __ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
- Immediate(to_map));
- // Write barrier.
- DCHECK_NOT_NULL(instr->temp());
- __ RecordWriteForMap(object_reg, to_map, new_map_reg,
- ToRegister(instr->temp()), kDontSaveFPRegs);
- } else {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(object_reg.is(eax));
- PushSafepointRegistersScope scope(this);
- __ mov(ebx, to_map);
- TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
- __ CallStub(&stub);
- RecordSafepointWithLazyDeopt(instr,
- RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- }
- __ bind(&not_applicable);
-}
-
-
-void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt final : public LDeferredCode {
- public:
- DeferredStringCharCodeAt(LCodeGen* codegen,
- LStringCharCodeAt* instr,
- const X87Stack& x87_stack)
- : LDeferredCode(codegen, x87_stack), instr_(instr) { }
- void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LStringCharCodeAt* instr_;
- };
-
- DeferredStringCharCodeAt* deferred =
- new(zone()) DeferredStringCharCodeAt(this, instr, x87_stack_);
-
- StringCharLoadGenerator::Generate(masm(),
- factory(),
- ToRegister(instr->string()),
- ToRegister(instr->index()),
- ToRegister(instr->result()),
- deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Move(result, Immediate(0));
-
- PushSafepointRegistersScope scope(this);
- __ push(string);
- // Push the index as a smi. This is safe because of the checks in
- // DoStringCharCodeAt above.
- STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
- if (instr->index()->IsConstantOperand()) {
- Immediate immediate = ToImmediate(LConstantOperand::cast(instr->index()),
- Representation::Smi());
- __ push(immediate);
- } else {
- Register index = ToRegister(instr->index());
- __ SmiTag(index);
- __ push(index);
- }
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2,
- instr, instr->context());
- __ AssertSmi(eax);
- __ SmiUntag(eax);
- __ StoreToSafepointRegisterSlot(result, eax);
-}
-
-
-void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode final : public LDeferredCode {
- public:
- DeferredStringCharFromCode(LCodeGen* codegen,
- LStringCharFromCode* instr,
- const X87Stack& x87_stack)
- : LDeferredCode(codegen, x87_stack), instr_(instr) { }
- void Generate() override {
- codegen()->DoDeferredStringCharFromCode(instr_);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LStringCharFromCode* instr_;
- };
-
- DeferredStringCharFromCode* deferred =
- new(zone()) DeferredStringCharFromCode(this, instr, x87_stack_);
-
- DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
- DCHECK(!char_code.is(result));
-
- __ cmp(char_code, String::kMaxOneByteCharCode);
- __ j(above, deferred->entry());
- __ Move(result, Immediate(factory()->single_character_string_cache()));
- __ mov(result, FieldOperand(result,
- char_code, times_pointer_size,
- FixedArray::kHeaderSize));
- __ cmp(result, factory()->undefined_value());
- __ j(equal, deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Move(result, Immediate(0));
-
- PushSafepointRegistersScope scope(this);
- __ SmiTag(char_code);
- __ push(char_code);
- CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
- instr->context());
- __ StoreToSafepointRegisterSlot(result, eax);
-}
-
-
-void LCodeGen::DoStringAdd(LStringAdd* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->left()).is(edx));
- DCHECK(ToRegister(instr->right()).is(eax));
- StringAddStub stub(isolate(),
- instr->hydrogen()->flags(),
- instr->hydrogen()->pretenure_flag());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- LOperand* input = instr->value();
- LOperand* output = instr->result();
- DCHECK(input->IsRegister() || input->IsStackSlot());
- DCHECK(output->IsDoubleRegister());
- if (input->IsRegister()) {
- Register input_reg = ToRegister(input);
- __ push(input_reg);
- X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand);
- __ pop(input_reg);
- } else {
- X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand);
- }
-}
-
-
-void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
- LOperand* input = instr->value();
- LOperand* output = instr->result();
- X87Register res = ToX87Register(output);
- X87PrepareToWrite(res);
- __ LoadUint32NoSSE2(ToRegister(input));
- X87CommitWrite(res);
-}
-
-
-void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- class DeferredNumberTagI final : public LDeferredCode {
- public:
- DeferredNumberTagI(LCodeGen* codegen,
- LNumberTagI* instr,
- const X87Stack& x87_stack)
- : LDeferredCode(codegen, x87_stack), instr_(instr) { }
- void Generate() override {
- codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
- SIGNED_INT32);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LNumberTagI* instr_;
- };
-
- LOperand* input = instr->value();
- DCHECK(input->IsRegister() && input->Equals(instr->result()));
- Register reg = ToRegister(input);
-
- DeferredNumberTagI* deferred =
- new(zone()) DeferredNumberTagI(this, instr, x87_stack_);
- __ SmiTag(reg);
- __ j(overflow, deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU final : public LDeferredCode {
- public:
- DeferredNumberTagU(LCodeGen* codegen,
- LNumberTagU* instr,
- const X87Stack& x87_stack)
- : LDeferredCode(codegen, x87_stack), instr_(instr) { }
- void Generate() override {
- codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
- UNSIGNED_INT32);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LNumberTagU* instr_;
- };
-
- LOperand* input = instr->value();
- DCHECK(input->IsRegister() && input->Equals(instr->result()));
- Register reg = ToRegister(input);
-
- DeferredNumberTagU* deferred =
- new(zone()) DeferredNumberTagU(this, instr, x87_stack_);
- __ cmp(reg, Immediate(Smi::kMaxValue));
- __ j(above, deferred->entry());
- __ SmiTag(reg);
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
- LOperand* value,
- LOperand* temp,
- IntegerSignedness signedness) {
- Label done, slow;
- Register reg = ToRegister(value);
- Register tmp = ToRegister(temp);
-
- if (signedness == SIGNED_INT32) {
- // There was overflow, so bits 30 and 31 of the original integer
- // disagree. Try to allocate a heap number in new space and store
- // the value in there. If that fails, call the runtime system.
- __ SmiUntag(reg);
- __ xor_(reg, 0x80000000);
- __ push(reg);
- __ fild_s(Operand(esp, 0));
- __ pop(reg);
- } else {
- // There's no fild variant for unsigned values, so zero-extend to a 64-bit
- // int manually.
- __ push(Immediate(0));
- __ push(reg);
- __ fild_d(Operand(esp, 0));
- __ pop(reg);
- __ pop(reg);
- }
-
- if (FLAG_inline_new) {
- __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
- __ jmp(&done, Label::kNear);
- }
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
- {
- // TODO(3095996): Put a valid pointer value in the stack slot where the
- // result register is stored, as this register is in the pointer map, but
- // contains an integer value.
- __ Move(reg, Immediate(0));
-
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this);
- // Reset the context register.
- if (!reg.is(esi)) {
- __ Move(esi, Immediate(0));
- }
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(reg, eax);
- }
-
- __ bind(&done);
- __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
-}
-
-
-void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD final : public LDeferredCode {
- public:
- DeferredNumberTagD(LCodeGen* codegen,
- LNumberTagD* instr,
- const X87Stack& x87_stack)
- : LDeferredCode(codegen, x87_stack), instr_(instr) { }
- void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LNumberTagD* instr_;
- };
-
- Register reg = ToRegister(instr->result());
-
- // Put the value to the top of stack
- X87Register src = ToX87Register(instr->value());
- // Don't use X87LoadForUsage here, which is only used by Instruction which
- // clobbers fp registers.
- x87_stack_.Fxch(src);
-
- DeferredNumberTagD* deferred =
- new(zone()) DeferredNumberTagD(this, instr, x87_stack_);
- if (FLAG_inline_new) {
- Register tmp = ToRegister(instr->temp());
- __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
- } else {
- __ jmp(deferred->entry());
- }
- __ bind(deferred->exit());
- __ fst_d(FieldOperand(reg, HeapNumber::kValueOffset));
-}
-
-
-void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- Register reg = ToRegister(instr->result());
- __ Move(reg, Immediate(0));
-
- PushSafepointRegistersScope scope(this);
- // Reset the context register.
- if (!reg.is(esi)) {
- __ Move(esi, Immediate(0));
- }
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(reg, eax);
-}
-
-
-void LCodeGen::DoSmiTag(LSmiTag* instr) {
- HChange* hchange = instr->hydrogen();
- Register input = ToRegister(instr->value());
- if (hchange->CheckFlag(HValue::kCanOverflow) &&
- hchange->value()->CheckFlag(HValue::kUint32)) {
- __ test(input, Immediate(0xc0000000));
- DeoptimizeIf(not_zero, instr, DeoptimizeReason::kOverflow);
- }
- __ SmiTag(input);
- if (hchange->CheckFlag(HValue::kCanOverflow) &&
- !hchange->value()->CheckFlag(HValue::kUint32)) {
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
- }
-}
-
-
-void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
- LOperand* input = instr->value();
- Register result = ToRegister(input);
- DCHECK(input->IsRegister() && input->Equals(instr->result()));
- if (instr->needs_check()) {
- __ test(result, Immediate(kSmiTagMask));
- DeoptimizeIf(not_zero, instr, DeoptimizeReason::kNotASmi);
- } else {
- __ AssertSmi(result);
- }
- __ SmiUntag(result);
-}
-
-
-void LCodeGen::EmitNumberUntagDNoSSE2(LNumberUntagD* instr, Register input_reg,
- Register temp_reg, X87Register res_reg,
- NumberUntagDMode mode) {
- bool can_convert_undefined_to_nan = instr->truncating();
- bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
-
- Label load_smi, done;
-
- X87PrepareToWrite(res_reg);
- if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
- // Smi check.
- __ JumpIfSmi(input_reg, &load_smi);
-
- // Heap number map check.
- __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- if (!can_convert_undefined_to_nan) {
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
- } else {
- Label heap_number, convert;
- __ j(equal, &heap_number);
-
- // Convert undefined (or hole) to NaN.
- __ cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, instr,
- DeoptimizeReason::kNotAHeapNumberUndefined);
-
- __ bind(&convert);
- __ push(Immediate(0xfff80000));
- __ push(Immediate(0x00000000));
- __ fld_d(MemOperand(esp, 0));
- __ lea(esp, Operand(esp, kDoubleSize));
- __ jmp(&done, Label::kNear);
-
- __ bind(&heap_number);
- }
- // Heap number to x87 conversion.
- __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
- if (deoptimize_on_minus_zero) {
- __ fldz();
- __ FCmp();
- __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ j(not_zero, &done, Label::kNear);
-
- // Use general purpose registers to check if we have -0.0
- __ mov(temp_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
- __ test(temp_reg, Immediate(HeapNumber::kSignMask));
- __ j(zero, &done, Label::kNear);
-
- // Pop FPU stack before deoptimizing.
- __ fstp(0);
- DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
- }
- __ jmp(&done, Label::kNear);
- } else {
- DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
- }
-
- __ bind(&load_smi);
- // Clobbering a temp is faster than re-tagging the
- // input register since we avoid dependencies.
- __ mov(temp_reg, input_reg);
- __ SmiUntag(temp_reg); // Untag smi before converting to float.
- __ push(temp_reg);
- __ fild_s(Operand(esp, 0));
- __ add(esp, Immediate(kPointerSize));
- __ bind(&done);
- X87CommitWrite(res_reg);
-}
-
-
-void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
- Register input_reg = ToRegister(instr->value());
-
- // The input was optimistically untagged; revert it.
- STATIC_ASSERT(kSmiTagSize == 1);
- __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag));
-
- if (instr->truncating()) {
- Label truncate;
- Label::Distance truncate_distance =
- DeoptEveryNTimes() ? Label::kFar : Label::kNear;
- __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- __ j(equal, &truncate, truncate_distance);
- __ push(input_reg);
- __ CmpObjectType(input_reg, ODDBALL_TYPE, input_reg);
- __ pop(input_reg);
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotANumberOrOddball);
- __ bind(&truncate);
- __ TruncateHeapNumberToI(input_reg, input_reg);
- } else {
- // TODO(olivf) Converting a number on the fpu is actually quite slow. We
- // should first try a fast conversion and then bailout to this slow case.
- __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- isolate()->factory()->heap_number_map());
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
-
- __ sub(esp, Immediate(kPointerSize));
- __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
-
- if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
- Label no_precision_lost, not_nan, zero_check;
- __ fld(0);
-
- __ fist_s(MemOperand(esp, 0));
- __ fild_s(MemOperand(esp, 0));
- __ FCmp();
- __ pop(input_reg);
-
- __ j(equal, &no_precision_lost, Label::kNear);
- __ fstp(0);
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision);
- __ bind(&no_precision_lost);
-
- __ j(parity_odd, &not_nan);
- __ fstp(0);
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN);
- __ bind(&not_nan);
-
- __ test(input_reg, Operand(input_reg));
- __ j(zero, &zero_check, Label::kNear);
- __ fstp(0);
- __ jmp(done);
-
- __ bind(&zero_check);
- // To check for minus zero, we load the value again as float, and check
- // if that is still 0.
- __ sub(esp, Immediate(kPointerSize));
- __ fstp_s(Operand(esp, 0));
- __ pop(input_reg);
- __ test(input_reg, Operand(input_reg));
- DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
- } else {
- __ fist_s(MemOperand(esp, 0));
- __ fild_s(MemOperand(esp, 0));
- __ FCmp();
- __ pop(input_reg);
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kLostPrecision);
- DeoptimizeIf(parity_even, instr, DeoptimizeReason::kNaN);
- }
- }
-}
-
-
-void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI final : public LDeferredCode {
- public:
- DeferredTaggedToI(LCodeGen* codegen,
- LTaggedToI* instr,
- const X87Stack& x87_stack)
- : LDeferredCode(codegen, x87_stack), instr_(instr) { }
- void Generate() override { codegen()->DoDeferredTaggedToI(instr_, done()); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LTaggedToI* instr_;
- };
-
- LOperand* input = instr->value();
- DCHECK(input->IsRegister());
- Register input_reg = ToRegister(input);
- DCHECK(input_reg.is(ToRegister(instr->result())));
-
- if (instr->hydrogen()->value()->representation().IsSmi()) {
- __ SmiUntag(input_reg);
- } else {
- DeferredTaggedToI* deferred =
- new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
- // Optimistically untag the input.
- // If the input is a HeapObject, SmiUntag will set the carry flag.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ SmiUntag(input_reg);
- // Branch to deferred code if the input was tagged.
- // The deferred code will take care of restoring the tag.
- __ j(carry, deferred->entry());
- __ bind(deferred->exit());
- }
-}
-
-
-void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
- LOperand* input = instr->value();
- DCHECK(input->IsRegister());
- LOperand* temp = instr->temp();
- DCHECK(temp->IsRegister());
- LOperand* result = instr->result();
- DCHECK(result->IsDoubleRegister());
-
- Register input_reg = ToRegister(input);
- Register temp_reg = ToRegister(temp);
-
- HValue* value = instr->hydrogen()->value();
- NumberUntagDMode mode = value->representation().IsSmi()
- ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
-
- EmitNumberUntagDNoSSE2(instr, input_reg, temp_reg, ToX87Register(result),
- mode);
-}
-
-
-void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
- LOperand* input = instr->value();
- DCHECK(input->IsDoubleRegister());
- LOperand* result = instr->result();
- DCHECK(result->IsRegister());
- Register result_reg = ToRegister(result);
-
- if (instr->truncating()) {
- X87Register input_reg = ToX87Register(input);
- X87Fxch(input_reg);
- __ TruncateX87TOSToI(result_reg);
- } else {
- Label lost_precision, is_nan, minus_zero, done;
- X87Register input_reg = ToX87Register(input);
- X87Fxch(input_reg);
- __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
- &lost_precision, &is_nan, &minus_zero);
- __ jmp(&done);
- __ bind(&lost_precision);
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision);
- __ bind(&is_nan);
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN);
- __ bind(&minus_zero);
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
- LOperand* input = instr->value();
- DCHECK(input->IsDoubleRegister());
- LOperand* result = instr->result();
- DCHECK(result->IsRegister());
- Register result_reg = ToRegister(result);
-
- Label lost_precision, is_nan, minus_zero, done;
- X87Register input_reg = ToX87Register(input);
- X87Fxch(input_reg);
- __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
- &lost_precision, &is_nan, &minus_zero);
- __ jmp(&done);
- __ bind(&lost_precision);
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision);
- __ bind(&is_nan);
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN);
- __ bind(&minus_zero);
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
- __ bind(&done);
- __ SmiTag(result_reg);
- DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
-}
-
-
-void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
- LOperand* input = instr->value();
- __ test(ToOperand(input), Immediate(kSmiTagMask));
- DeoptimizeIf(not_zero, instr, DeoptimizeReason::kNotASmi);
-}
-
-
-void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- LOperand* input = instr->value();
- __ test(ToOperand(input), Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr, DeoptimizeReason::kSmi);
- }
-}
-
-
-void LCodeGen::DoCheckArrayBufferNotNeutered(
- LCheckArrayBufferNotNeutered* instr) {
- Register view = ToRegister(instr->view());
- Register scratch = ToRegister(instr->scratch());
-
- __ mov(scratch, FieldOperand(view, JSArrayBufferView::kBufferOffset));
- __ test_b(FieldOperand(scratch, JSArrayBuffer::kBitFieldOffset),
- Immediate(1 << JSArrayBuffer::WasNeutered::kShift));
- DeoptimizeIf(not_zero, instr, DeoptimizeReason::kOutOfBounds);
-}
-
-
-void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
- Register input = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
-
- if (instr->hydrogen()->is_interval_check()) {
- InstanceType first;
- InstanceType last;
- instr->hydrogen()->GetCheckInterval(&first, &last);
-
- __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(first));
-
- // If there is only one type in the interval check for equality.
- if (first == last) {
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongInstanceType);
- } else {
- DeoptimizeIf(below, instr, DeoptimizeReason::kWrongInstanceType);
- // Omit check for the last type.
- if (last != LAST_TYPE) {
- __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(last));
- DeoptimizeIf(above, instr, DeoptimizeReason::kWrongInstanceType);
- }
- }
- } else {
- uint8_t mask;
- uint8_t tag;
- instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
-
- if (base::bits::IsPowerOfTwo32(mask)) {
- DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
- __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(mask));
- DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
- DeoptimizeReason::kWrongInstanceType);
- } else {
- __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ and_(temp, mask);
- __ cmp(temp, tag);
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongInstanceType);
- }
- }
-}
-
-
-void LCodeGen::DoCheckValue(LCheckValue* instr) {
- Handle<HeapObject> object = instr->hydrogen()->object().handle();
- if (instr->hydrogen()->object_in_new_space()) {
- Register reg = ToRegister(instr->value());
- Handle<Cell> cell = isolate()->factory()->NewCell(object);
- __ cmp(reg, Operand::ForCell(cell));
- } else {
- Operand operand = ToOperand(instr->value());
- __ cmp(operand, object);
- }
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kValueMismatch);
-}
-
-
-void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
- Label deopt, done;
- // If the map is not deprecated the migration attempt does not make sense.
- __ push(object);
- __ mov(object, FieldOperand(object, HeapObject::kMapOffset));
- __ test(FieldOperand(object, Map::kBitField3Offset),
- Immediate(Map::Deprecated::kMask));
- __ pop(object);
- __ j(zero, &deopt);
-
- {
- PushSafepointRegistersScope scope(this);
- __ push(object);
- __ xor_(esi, esi);
- __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
-
- __ test(eax, Immediate(kSmiTagMask));
- }
- __ j(not_zero, &done);
-
- __ bind(&deopt);
- DeoptimizeIf(no_condition, instr, DeoptimizeReason::kInstanceMigrationFailed);
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- class DeferredCheckMaps final : public LDeferredCode {
- public:
- DeferredCheckMaps(LCodeGen* codegen,
- LCheckMaps* instr,
- Register object,
- const X87Stack& x87_stack)
- : LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) {
- SetExit(check_maps());
- }
- void Generate() override {
- codegen()->DoDeferredInstanceMigration(instr_, object_);
- }
- Label* check_maps() { return &check_maps_; }
- LInstruction* instr() override { return instr_; }
-
- private:
- LCheckMaps* instr_;
- Label check_maps_;
- Register object_;
- };
-
- if (instr->hydrogen()->IsStabilityCheck()) {
- const UniqueSet<Map>* maps = instr->hydrogen()->maps();
- for (int i = 0; i < maps->size(); ++i) {
- AddStabilityDependency(maps->at(i).handle());
- }
- return;
- }
-
- LOperand* input = instr->value();
- DCHECK(input->IsRegister());
- Register reg = ToRegister(input);
-
- DeferredCheckMaps* deferred = NULL;
- if (instr->hydrogen()->HasMigrationTarget()) {
- deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_);
- __ bind(deferred->check_maps());
- }
-
- const UniqueSet<Map>* maps = instr->hydrogen()->maps();
- Label success;
- for (int i = 0; i < maps->size() - 1; i++) {
- Handle<Map> map = maps->at(i).handle();
- __ CompareMap(reg, map);
- __ j(equal, &success, Label::kNear);
- }
-
- Handle<Map> map = maps->at(maps->size() - 1).handle();
- __ CompareMap(reg, map);
- if (instr->hydrogen()->HasMigrationTarget()) {
- __ j(not_equal, deferred->entry());
- } else {
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongMap);
- }
-
- __ bind(&success);
-}
-
-
-void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
- X87Register value_reg = ToX87Register(instr->unclamped());
- Register result_reg = ToRegister(instr->result());
- X87Fxch(value_reg);
- __ ClampTOSToUint8(result_reg);
-}
-
-
-void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
- DCHECK(instr->unclamped()->Equals(instr->result()));
- Register value_reg = ToRegister(instr->result());
- __ ClampUint8(value_reg);
-}
-
-
-void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
- Register input_reg = ToRegister(instr->unclamped());
- Register result_reg = ToRegister(instr->result());
- Register scratch = ToRegister(instr->scratch());
- Register scratch2 = ToRegister(instr->scratch2());
- Register scratch3 = ToRegister(instr->scratch3());
- Label is_smi, done, heap_number, valid_exponent,
- largest_value, zero_result, maybe_nan_or_infinity;
-
- __ JumpIfSmi(input_reg, &is_smi);
-
- // Check for heap number
- __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- __ j(equal, &heap_number, Label::kNear);
-
- // Check for undefined. Undefined is converted to zero for clamping
- // conversions.
- __ cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
- __ jmp(&zero_result, Label::kNear);
-
- // Heap number
- __ bind(&heap_number);
-
- // Surprisingly, all of the hand-crafted bit-manipulations below are much
- // faster than the x86 FPU built-in instruction, especially since "banker's
- // rounding" would be additionally very expensive
-
- // Get exponent word.
- __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset));
- __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
-
- // Test for negative values --> clamp to zero
- __ test(scratch, scratch);
- __ j(negative, &zero_result, Label::kNear);
-
- // Get exponent alone in scratch2.
- __ mov(scratch2, scratch);
- __ and_(scratch2, HeapNumber::kExponentMask);
- __ shr(scratch2, HeapNumber::kExponentShift);
- __ j(zero, &zero_result, Label::kNear);
- __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1));
- __ j(negative, &zero_result, Label::kNear);
-
- const uint32_t non_int8_exponent = 7;
- __ cmp(scratch2, Immediate(non_int8_exponent + 1));
- // If the exponent is too big, check for special values.
- __ j(greater, &maybe_nan_or_infinity, Label::kNear);
-
- __ bind(&valid_exponent);
- // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent
- // < 7. The shift bias is the number of bits to shift the mantissa such that
- // with an exponent of 7 such the that top-most one is in bit 30, allowing
- // detection the rounding overflow of a 255.5 to 256 (bit 31 goes from 0 to
- // 1).
- int shift_bias = (30 - HeapNumber::kExponentShift) - 7 - 1;
- __ lea(result_reg, MemOperand(scratch2, shift_bias));
- // Here result_reg (ecx) is the shift, scratch is the exponent word. Get the
- // top bits of the mantissa.
- __ and_(scratch, HeapNumber::kMantissaMask);
- // Put back the implicit 1 of the mantissa
- __ or_(scratch, 1 << HeapNumber::kExponentShift);
- // Shift up to round
- __ shl_cl(scratch);
- // Use "banker's rounding" to spec: If fractional part of number is 0.5, then
- // use the bit in the "ones" place and add it to the "halves" place, which has
- // the effect of rounding to even.
- __ mov(scratch2, scratch);
- const uint32_t one_half_bit_shift = 30 - sizeof(uint8_t) * 8;
- const uint32_t one_bit_shift = one_half_bit_shift + 1;
- __ and_(scratch2, Immediate((1 << one_bit_shift) - 1));
- __ cmp(scratch2, Immediate(1 << one_half_bit_shift));
- Label no_round;
- __ j(less, &no_round, Label::kNear);
- Label round_up;
- __ mov(scratch2, Immediate(1 << one_half_bit_shift));
- __ j(greater, &round_up, Label::kNear);
- __ test(scratch3, scratch3);
- __ j(not_zero, &round_up, Label::kNear);
- __ mov(scratch2, scratch);
- __ and_(scratch2, Immediate(1 << one_bit_shift));
- __ shr(scratch2, 1);
- __ bind(&round_up);
- __ add(scratch, scratch2);
- __ j(overflow, &largest_value, Label::kNear);
- __ bind(&no_round);
- __ shr(scratch, 23);
- __ mov(result_reg, scratch);
- __ jmp(&done, Label::kNear);
-
- __ bind(&maybe_nan_or_infinity);
- // Check for NaN/Infinity, all other values map to 255
- __ cmp(scratch2, Immediate(HeapNumber::kInfinityOrNanExponent + 1));
- __ j(not_equal, &largest_value, Label::kNear);
-
- // Check for NaN, which differs from Infinity in that at least one mantissa
- // bit is set.
- __ and_(scratch, HeapNumber::kMantissaMask);
- __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
- __ j(not_zero, &zero_result, Label::kNear); // M!=0 --> NaN
- // Infinity -> Fall through to map to 255.
-
- __ bind(&largest_value);
- __ mov(result_reg, Immediate(255));
- __ jmp(&done, Label::kNear);
-
- __ bind(&zero_result);
- __ xor_(result_reg, result_reg);
- __ jmp(&done, Label::kNear);
-
- // smi
- __ bind(&is_smi);
- if (!input_reg.is(result_reg)) {
- __ mov(result_reg, input_reg);
- }
- __ SmiUntag(result_reg);
- __ ClampUint8(result_reg);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoAllocate(LAllocate* instr) {
- class DeferredAllocate final : public LDeferredCode {
- public:
- DeferredAllocate(LCodeGen* codegen,
- LAllocate* instr,
- const X87Stack& x87_stack)
- : LDeferredCode(codegen, x87_stack), instr_(instr) { }
- void Generate() override { codegen()->DoDeferredAllocate(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LAllocate* instr_;
- };
-
- DeferredAllocate* deferred =
- new(zone()) DeferredAllocate(this, instr, x87_stack_);
-
- Register result = ToRegister(instr->result());
- Register temp = ToRegister(instr->temp());
-
- // Allocate memory for the object.
- AllocationFlags flags = NO_ALLOCATION_FLAGS;
- if (instr->hydrogen()->MustAllocateDoubleAligned()) {
- flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
- }
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- flags = static_cast<AllocationFlags>(flags | PRETENURE);
- }
-
- if (instr->hydrogen()->IsAllocationFoldingDominator()) {
- flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
- }
- DCHECK(!instr->hydrogen()->IsAllocationFolded());
-
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= kMaxRegularHeapObjectSize);
- __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
- } else {
- Register size = ToRegister(instr->size());
- __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
- }
-
- __ bind(deferred->exit());
-
- if (instr->hydrogen()->MustPrefillWithFiller()) {
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ mov(temp, (size / kPointerSize) - 1);
- } else {
- temp = ToRegister(instr->size());
- __ shr(temp, kPointerSizeLog2);
- __ dec(temp);
- }
- Label loop;
- __ bind(&loop);
- __ mov(FieldOperand(result, temp, times_pointer_size, 0),
- isolate()->factory()->one_pointer_filler_map());
- __ dec(temp);
- __ j(not_zero, &loop);
- }
-}
-
-void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
- DCHECK(instr->hydrogen()->IsAllocationFolded());
- DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
- Register result = ToRegister(instr->result());
- Register temp = ToRegister(instr->temp());
-
- AllocationFlags flags = ALLOCATION_FOLDED;
- if (instr->hydrogen()->MustAllocateDoubleAligned()) {
- flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
- }
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- flags = static_cast<AllocationFlags>(flags | PRETENURE);
- }
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- CHECK(size <= kMaxRegularHeapObjectSize);
- __ FastAllocate(size, result, temp, flags);
- } else {
- Register size = ToRegister(instr->size());
- __ FastAllocate(size, result, temp, flags);
- }
-}
-
-void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Move(result, Immediate(Smi::kZero));
-
- PushSafepointRegistersScope scope(this);
- if (instr->size()->IsRegister()) {
- Register size = ToRegister(instr->size());
- DCHECK(!size.is(result));
- __ SmiTag(ToRegister(instr->size()));
- __ push(size);
- } else {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- if (size >= 0 && size <= Smi::kMaxValue) {
- __ push(Immediate(Smi::FromInt(size)));
- } else {
- // We should never get here at runtime => abort
- __ int3();
- return;
- }
- }
-
- int flags = AllocateDoubleAlignFlag::encode(
- instr->hydrogen()->MustAllocateDoubleAligned());
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- flags = AllocateTargetSpace::update(flags, OLD_SPACE);
- } else {
- flags = AllocateTargetSpace::update(flags, NEW_SPACE);
- }
- __ push(Immediate(Smi::FromInt(flags)));
-
- CallRuntimeFromDeferred(
- Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
- __ StoreToSafepointRegisterSlot(result, eax);
-
- if (instr->hydrogen()->IsAllocationFoldingDominator()) {
- AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
- if (instr->hydrogen()->IsOldSpaceAllocation()) {
- DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
- allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
- }
- // If the allocation folding dominator allocate triggered a GC, allocation
- // happend in the runtime. We have to reset the top pointer to virtually
- // undo the allocation.
- ExternalReference allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
- __ sub(eax, Immediate(kHeapObjectTag));
- __ mov(Operand::StaticVariable(allocation_top), eax);
- __ add(eax, Immediate(kHeapObjectTag));
- }
-}
-
-
-void LCodeGen::DoTypeof(LTypeof* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->value()).is(ebx));
- Label end, do_call;
- Register value_register = ToRegister(instr->value());
- __ JumpIfNotSmi(value_register, &do_call);
- __ mov(eax, Immediate(isolate()->factory()->number_string()));
- __ jmp(&end);
- __ bind(&do_call);
- Callable callable = CodeFactory::Typeof(isolate());
- CallCode(callable.code(), RelocInfo::CODE_TARGET, instr);
- __ bind(&end);
-}
-
-
-void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Condition final_branch_condition = EmitTypeofIs(instr, input);
- if (final_branch_condition != no_condition) {
- EmitBranch(instr, final_branch_condition);
- }
-}
-
-
-Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
- Label* true_label = instr->TrueLabel(chunk_);
- Label* false_label = instr->FalseLabel(chunk_);
- Handle<String> type_name = instr->type_literal();
- int left_block = instr->TrueDestination(chunk_);
- int right_block = instr->FalseDestination(chunk_);
- int next_block = GetNextEmittedBlock();
-
- Label::Distance true_distance = left_block == next_block ? Label::kNear
- : Label::kFar;
- Label::Distance false_distance = right_block == next_block ? Label::kNear
- : Label::kFar;
- Condition final_branch_condition = no_condition;
- if (String::Equals(type_name, factory()->number_string())) {
- __ JumpIfSmi(input, true_label, true_distance);
- __ cmp(FieldOperand(input, HeapObject::kMapOffset),
- factory()->heap_number_map());
- final_branch_condition = equal;
-
- } else if (String::Equals(type_name, factory()->string_string())) {
- __ JumpIfSmi(input, false_label, false_distance);
- __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
- final_branch_condition = below;
-
- } else if (String::Equals(type_name, factory()->symbol_string())) {
- __ JumpIfSmi(input, false_label, false_distance);
- __ CmpObjectType(input, SYMBOL_TYPE, input);
- final_branch_condition = equal;
-
- } else if (String::Equals(type_name, factory()->boolean_string())) {
- __ cmp(input, factory()->true_value());
- __ j(equal, true_label, true_distance);
- __ cmp(input, factory()->false_value());
- final_branch_condition = equal;
-
- } else if (String::Equals(type_name, factory()->undefined_string())) {
- __ cmp(input, factory()->null_value());
- __ j(equal, false_label, false_distance);
- __ JumpIfSmi(input, false_label, false_distance);
- // Check for undetectable objects => true.
- __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
- __ test_b(FieldOperand(input, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- final_branch_condition = not_zero;
-
- } else if (String::Equals(type_name, factory()->function_string())) {
- __ JumpIfSmi(input, false_label, false_distance);
- // Check for callable and not undetectable objects => true.
- __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
- __ movzx_b(input, FieldOperand(input, Map::kBitFieldOffset));
- __ and_(input, (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
- __ cmp(input, 1 << Map::kIsCallable);
- final_branch_condition = equal;
-
- } else if (String::Equals(type_name, factory()->object_string())) {
- __ JumpIfSmi(input, false_label, false_distance);
- __ cmp(input, factory()->null_value());
- __ j(equal, true_label, true_distance);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CmpObjectType(input, FIRST_JS_RECEIVER_TYPE, input);
- __ j(below, false_label, false_distance);
- // Check for callable or undetectable objects => false.
- __ test_b(FieldOperand(input, Map::kBitFieldOffset),
- Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
- final_branch_condition = zero;
-
- } else {
- __ jmp(false_label, false_distance);
- }
- return final_branch_condition;
-}
-
-
-void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
- if (info()->ShouldEnsureSpaceForLazyDeopt()) {
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- if (current_pc < last_lazy_deopt_pc_ + space_needed) {
- int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- __ Nop(padding_size);
- }
- }
- last_lazy_deopt_pc_ = masm()->pc_offset();
-}
-
-
-void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- last_lazy_deopt_pc_ = masm()->pc_offset();
- DCHECK(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- Deoptimizer::BailoutType type = instr->hydrogen()->type();
- // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
- // needed return address), even though the implementation of LAZY and EAGER is
- // now identical. When LAZY is eventually completely folded into EAGER, remove
- // the special case below.
- if (info()->IsStub() && type == Deoptimizer::EAGER) {
- type = Deoptimizer::LAZY;
- }
- DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type);
-}
-
-
-void LCodeGen::DoDummy(LDummy* instr) {
- // Nothing to see here, move on!
-}
-
-
-void LCodeGen::DoDummyUse(LDummyUse* instr) {
- // Nothing to see here, move on!
-}
-
-
-void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
- PushSafepointRegistersScope scope(this);
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
- RecordSafepointWithLazyDeopt(
- instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- DCHECK(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck final : public LDeferredCode {
- public:
- DeferredStackCheck(LCodeGen* codegen,
- LStackCheck* instr,
- const X87Stack& x87_stack)
- : LDeferredCode(codegen, x87_stack), instr_(instr) { }
- void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
- LInstruction* instr() override { return instr_; }
-
- private:
- LStackCheck* instr_;
- };
-
- DCHECK(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- // There is no LLazyBailout instruction for stack-checks. We have to
- // prepare for lazy deoptimization explicitly here.
- if (instr->hydrogen()->is_function_entry()) {
- // Perform stack overflow check.
- Label done;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &done, Label::kNear);
-
- DCHECK(instr->context()->IsRegister());
- DCHECK(ToRegister(instr->context()).is(esi));
- CallCode(isolate()->builtins()->StackCheck(),
- RelocInfo::CODE_TARGET,
- instr);
- __ bind(&done);
- } else {
- DCHECK(instr->hydrogen()->is_backwards_branch());
- // Perform stack overflow check if this goto needs it before jumping.
- DeferredStackCheck* deferred_stack_check =
- new(zone()) DeferredStackCheck(this, instr, x87_stack_);
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(below, deferred_stack_check->entry());
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- __ bind(instr->done_label());
- deferred_stack_check->SetExit(instr->done_label());
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- // Don't record a deoptimization index for the safepoint here.
- // This will be done explicitly when emitting call and the safepoint in
- // the deferred code.
- }
-}
-
-
-void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
- // This is a pseudo-instruction that ensures that the environment here is
- // properly registered for deoptimization and records the assembler's PC
- // offset.
- LEnvironment* environment = instr->environment();
-
- // If the environment were already registered, we would have no way of
- // backpatching it with the spill slot operands.
- DCHECK(!environment->HasBeenRegistered());
- RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
-
- GenerateOsrPrologue();
-}
-
-
-void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
-
- Label use_cache, call_runtime;
- __ CheckEnumCache(&call_runtime);
-
- __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
- __ jmp(&use_cache, Label::kNear);
-
- // Get the set of properties to enumerate.
- __ bind(&call_runtime);
- __ push(eax);
- CallRuntime(Runtime::kForInEnumerate, instr);
- __ bind(&use_cache);
-}
-
-
-void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
- Register map = ToRegister(instr->map());
- Register result = ToRegister(instr->result());
- Label load_cache, done;
- __ EnumLength(result, map);
- __ cmp(result, Immediate(Smi::kZero));
- __ j(not_equal, &load_cache, Label::kNear);
- __ mov(result, isolate()->factory()->empty_fixed_array());
- __ jmp(&done, Label::kNear);
-
- __ bind(&load_cache);
- __ LoadInstanceDescriptors(map, result);
- __ mov(result, FieldOperand(result, DescriptorArray::kEnumCacheBridgeOffset));
- __ mov(result,
- FieldOperand(result, FixedArray::SizeFor(instr->idx())));
- __ bind(&done);
- __ test(result, result);
- DeoptimizeIf(equal, instr, DeoptimizeReason::kNoCache);
-}
-
-
-void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
- Register object = ToRegister(instr->value());
- __ cmp(ToRegister(instr->map()),
- FieldOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongMap);
-}
-
-
-void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
- Register object,
- Register index) {
- PushSafepointRegistersScope scope(this);
- __ push(object);
- __ push(index);
- __ xor_(esi, esi);
- __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(object, eax);
-}
-
-
-void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
- class DeferredLoadMutableDouble final : public LDeferredCode {
- public:
- DeferredLoadMutableDouble(LCodeGen* codegen,
- LLoadFieldByIndex* instr,
- Register object,
- Register index,
- const X87Stack& x87_stack)
- : LDeferredCode(codegen, x87_stack),
- instr_(instr),
- object_(object),
- index_(index) {
- }
- void Generate() override {
- codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
- }
- LInstruction* instr() override { return instr_; }
-
- private:
- LLoadFieldByIndex* instr_;
- Register object_;
- Register index_;
- };
-
- Register object = ToRegister(instr->object());
- Register index = ToRegister(instr->index());
-
- DeferredLoadMutableDouble* deferred;
- deferred = new(zone()) DeferredLoadMutableDouble(
- this, instr, object, index, x87_stack_);
-
- Label out_of_object, done;
- __ test(index, Immediate(Smi::FromInt(1)));
- __ j(not_zero, deferred->entry());
-
- __ sar(index, 1);
-
- __ cmp(index, Immediate(0));
- __ j(less, &out_of_object, Label::kNear);
- __ mov(object, FieldOperand(object,
- index,
- times_half_pointer_size,
- JSObject::kHeaderSize));
- __ jmp(&done, Label::kNear);
-
- __ bind(&out_of_object);
- __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset));
- __ neg(index);
- // Index is now equal to out of object property index plus 1.
- __ mov(object, FieldOperand(object,
- index,
- times_half_pointer_size,
- FixedArray::kHeaderSize - kPointerSize));
- __ bind(deferred->exit());
- __ bind(&done);
-}
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.h b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.h
deleted file mode 100644
index e183fab963..0000000000
--- a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.h
+++ /dev/null
@@ -1,489 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_X87_LITHIUM_CODEGEN_X87_H_
-#define V8_CRANKSHAFT_X87_LITHIUM_CODEGEN_X87_H_
-
-#include <map>
-
-#include "src/ast/scopes.h"
-#include "src/base/logging.h"
-#include "src/crankshaft/lithium-codegen.h"
-#include "src/crankshaft/x87/lithium-gap-resolver-x87.h"
-#include "src/crankshaft/x87/lithium-x87.h"
-#include "src/deoptimizer.h"
-#include "src/safepoint-table.h"
-#include "src/utils.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LDeferredCode;
-class LGapNode;
-class SafepointGenerator;
-
-class LCodeGen: public LCodeGenBase {
- public:
- LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : LCodeGenBase(chunk, assembler, info),
- jump_table_(4, info->zone()),
- scope_(info->scope()),
- deferred_(8, info->zone()),
- frame_is_built_(false),
- x87_stack_(assembler),
- safepoints_(info->zone()),
- resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple) {
- PopulateDeoptimizationLiteralsWithInlinedFunctions();
- }
-
- int LookupDestination(int block_id) const {
- return chunk()->LookupDestination(block_id);
- }
-
- bool IsNextEmittedBlock(int block_id) const {
- return LookupDestination(block_id) == GetNextEmittedBlock();
- }
-
- bool NeedsEagerFrame() const {
- return HasAllocatedStackSlots() || info()->is_non_deferred_calling() ||
- !info()->IsStub() || info()->requires_frame();
- }
- bool NeedsDeferredFrame() const {
- return !NeedsEagerFrame() && info()->is_deferred_calling();
- }
-
- // Support for converting LOperands to assembler types.
- Operand ToOperand(LOperand* op) const;
- Register ToRegister(LOperand* op) const;
- X87Register ToX87Register(LOperand* op) const;
-
- bool IsInteger32(LConstantOperand* op) const;
- bool IsSmi(LConstantOperand* op) const;
- Immediate ToImmediate(LOperand* op, const Representation& r) const {
- return Immediate(ToRepresentation(LConstantOperand::cast(op), r));
- }
- double ToDouble(LConstantOperand* op) const;
-
- // Support for non-sse2 (x87) floating point stack handling.
- // These functions maintain the mapping of physical stack registers to our
- // virtual registers between instructions.
- enum X87OperandType { kX87DoubleOperand, kX87FloatOperand, kX87IntOperand };
-
- void X87Mov(X87Register reg, Operand src,
- X87OperandType operand = kX87DoubleOperand);
- void X87Mov(Operand src, X87Register reg,
- X87OperandType operand = kX87DoubleOperand);
- void X87Mov(X87Register reg, X87Register src,
- X87OperandType operand = kX87DoubleOperand);
-
- void X87PrepareBinaryOp(
- X87Register left, X87Register right, X87Register result);
-
- void X87LoadForUsage(X87Register reg);
- void X87LoadForUsage(X87Register reg1, X87Register reg2);
- void X87PrepareToWrite(X87Register reg) { x87_stack_.PrepareToWrite(reg); }
- void X87CommitWrite(X87Register reg) { x87_stack_.CommitWrite(reg); }
-
- void X87Fxch(X87Register reg, int other_slot = 0) {
- x87_stack_.Fxch(reg, other_slot);
- }
- void X87Free(X87Register reg) {
- x87_stack_.Free(reg);
- }
-
-
- bool X87StackEmpty() {
- return x87_stack_.depth() == 0;
- }
-
- Handle<Object> ToHandle(LConstantOperand* op) const;
-
- // The operand denoting the second word (the one with a higher address) of
- // a double stack slot.
- Operand HighOperand(LOperand* op);
-
- // Try to generate code for the entire chunk, but it may fail if the
- // chunk contains constructs we cannot handle. Returns true if the
- // code generation attempt succeeded.
- bool GenerateCode();
-
- // Finish the code by setting stack height, safepoint, and bailout
- // information on it.
- void FinishCode(Handle<Code> code);
-
- // Deferred code support.
- void DoDeferredNumberTagD(LNumberTagD* instr);
-
- enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
- void DoDeferredNumberTagIU(LInstruction* instr,
- LOperand* value,
- LOperand* temp,
- IntegerSignedness signedness);
-
- void DoDeferredTaggedToI(LTaggedToI* instr, Label* done);
- void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
- void DoDeferredStackCheck(LStackCheck* instr);
- void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
- void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
- void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredAllocate(LAllocate* instr);
- void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
- void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
- Register object,
- Register index);
-
- // Parallel move support.
- void DoParallelMove(LParallelMove* move);
- void DoGap(LGap* instr);
-
- // Emit frame translation commands for an environment.
- void WriteTranslation(LEnvironment* environment, Translation* translation);
-
- void EnsureRelocSpaceForDeoptimization();
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) void Do##type(L##type* node);
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- private:
- Scope* scope() const { return scope_; }
-
- void EmitClassOfTest(Label* if_true, Label* if_false,
- Handle<String> class_name, Register input,
- Register temporary, Register temporary2);
-
- bool HasAllocatedStackSlots() const {
- return chunk()->HasAllocatedStackSlots();
- }
- int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); }
- int GetTotalFrameSlotCount() const {
- return chunk()->GetTotalFrameSlotCount();
- }
-
- void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
-
- // Code generation passes. Returns true if code generation should
- // continue.
- void GenerateBodyInstructionPre(LInstruction* instr) override;
- void GenerateBodyInstructionPost(LInstruction* instr) override;
- bool GeneratePrologue();
- bool GenerateDeferredCode();
- bool GenerateJumpTable();
- bool GenerateSafepointTable();
-
- // Generates the custom OSR entrypoint and sets the osr_pc_offset.
- void GenerateOsrPrologue();
-
- enum SafepointMode {
- RECORD_SIMPLE_SAFEPOINT,
- RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
- };
-
- void CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr);
-
- void CallCodeGeneric(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode);
-
- void CallRuntime(const Runtime::Function* fun, int argc, LInstruction* instr,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
-
- void CallRuntime(Runtime::FunctionId id,
- int argc,
- LInstruction* instr) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, argc, instr);
- }
-
- void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, function->nargs, instr);
- }
-
- void CallRuntimeFromDeferred(Runtime::FunctionId id,
- int argc,
- LInstruction* instr,
- LOperand* context);
-
- void LoadContextFromDeferred(LOperand* context);
-
- void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
- Register scratch2, Register scratch3);
-
- // Generate a direct call to a known function. Expects the function
- // to be in edi.
- void CallKnownFunction(Handle<JSFunction> function,
- int formal_parameter_count, int arity,
- bool is_tail_call, LInstruction* instr);
-
- void RecordSafepointWithLazyDeopt(LInstruction* instr,
- SafepointMode safepoint_mode);
-
- void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
- Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition cc, LInstruction* instr,
- DeoptimizeReason deopt_reason,
- Deoptimizer::BailoutType bailout_type);
- void DeoptimizeIf(Condition cc, LInstruction* instr,
- DeoptimizeReason deopt_reason);
-
- bool DeoptEveryNTimes() {
- return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
- }
-
- void AddToTranslation(LEnvironment* environment,
- Translation* translation,
- LOperand* op,
- bool is_tagged,
- bool is_uint32,
- int* object_index_pointer,
- int* dematerialized_index_pointer);
-
- Register ToRegister(int index) const;
- X87Register ToX87Register(int index) const;
- int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
- int32_t ToInteger32(LConstantOperand* op) const;
- ExternalReference ToExternalReference(LConstantOperand* op) const;
-
- Operand BuildFastArrayOperand(LOperand* elements_pointer,
- LOperand* key,
- Representation key_representation,
- ElementsKind elements_kind,
- uint32_t base_offset);
-
- Operand BuildSeqStringOperand(Register string,
- LOperand* index,
- String::Encoding encoding);
-
- void EmitIntegerMathAbs(LMathAbs* instr);
-
- // Support for recording safepoint information.
- void RecordSafepoint(LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- Safepoint::DeoptMode mode);
- void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
- void RecordSafepoint(Safepoint::DeoptMode mode);
- void RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode mode);
-
- static Condition TokenToCondition(Token::Value op, bool is_unsigned);
- void EmitGoto(int block);
-
- // EmitBranch expects to be the last instruction of a block.
- template<class InstrType>
- void EmitBranch(InstrType instr, Condition cc);
- template <class InstrType>
- void EmitTrueBranch(InstrType instr, Condition cc);
- template <class InstrType>
- void EmitFalseBranch(InstrType instr, Condition cc);
- void EmitNumberUntagDNoSSE2(LNumberUntagD* instr, Register input,
- Register temp, X87Register res_reg,
- NumberUntagDMode mode);
-
- // Emits optimized code for typeof x == "y". Modifies input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitTypeofIs(LTypeofIsAndBranch* instr, Register input);
-
- // Emits optimized code for %_IsString(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsString(Register input,
- Register temp1,
- Label* is_not_string,
- SmiCheck check_needed);
-
- // Emits optimized code to deep-copy the contents of statically known
- // object graphs (e.g. object literal boilerplate).
- void EmitDeepCopy(Handle<JSObject> object,
- Register result,
- Register source,
- int* offset,
- AllocationSiteMode mode);
-
- void EnsureSpaceForLazyDeopt(int space_needed) override;
- void DoLoadKeyedExternalArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedArray(LLoadKeyed* instr);
- void DoStoreKeyedExternalArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedArray(LStoreKeyed* instr);
-
- template <class T>
- void EmitVectorLoadICRegisters(T* instr);
-
- void EmitReturn(LReturn* instr);
-
- // Emits code for pushing either a tagged constant, a (non-double)
- // register, or a stack slot operand.
- void EmitPushTaggedOperand(LOperand* operand);
-
- void X87Fld(Operand src, X87OperandType opts);
-
- void EmitFlushX87ForDeopt();
- void FlushX87StackIfNecessary(LInstruction* instr) {
- x87_stack_.FlushIfNecessary(instr, this);
- }
- friend class LGapResolver;
-
-#ifdef _MSC_VER
- // On windows, you may not access the stack more than one page below
- // the most recently mapped page. To make the allocated area randomly
- // accessible, we write an arbitrary value to each page in range
- // esp + offset - page_size .. esp in turn.
- void MakeSureStackPagesMapped(int offset);
-#endif
-
- ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
- Scope* const scope_;
- ZoneList<LDeferredCode*> deferred_;
- bool frame_is_built_;
-
- class X87Stack : public ZoneObject {
- public:
- explicit X87Stack(MacroAssembler* masm)
- : stack_depth_(0), is_mutable_(true), masm_(masm) { }
- explicit X87Stack(const X87Stack& other)
- : stack_depth_(other.stack_depth_), is_mutable_(false), masm_(masm()) {
- for (int i = 0; i < stack_depth_; i++) {
- stack_[i] = other.stack_[i];
- }
- }
- bool operator==(const X87Stack& other) const {
- if (stack_depth_ != other.stack_depth_) return false;
- for (int i = 0; i < stack_depth_; i++) {
- if (!stack_[i].is(other.stack_[i])) return false;
- }
- return true;
- }
- X87Stack& operator=(const X87Stack& other) {
- stack_depth_ = other.stack_depth_;
- for (int i = 0; i < stack_depth_; i++) {
- stack_[i] = other.stack_[i];
- }
- return *this;
- }
- bool Contains(X87Register reg);
- void Fxch(X87Register reg, int other_slot = 0);
- void Free(X87Register reg);
- void PrepareToWrite(X87Register reg);
- void CommitWrite(X87Register reg);
- void FlushIfNecessary(LInstruction* instr, LCodeGen* cgen);
- void LeavingBlock(int current_block_id, LGoto* goto_instr, LCodeGen* cgen);
- int depth() const { return stack_depth_; }
- int GetLayout();
- int st(X87Register reg) { return st2idx(ArrayIndex(reg)); }
- void pop() {
- DCHECK(is_mutable_);
- USE(is_mutable_);
- stack_depth_--;
- }
- void push(X87Register reg) {
- DCHECK(is_mutable_);
- DCHECK(stack_depth_ < X87Register::kMaxNumAllocatableRegisters);
- stack_[stack_depth_] = reg;
- stack_depth_++;
- }
-
- MacroAssembler* masm() const { return masm_; }
- Isolate* isolate() const { return masm_->isolate(); }
-
- private:
- int ArrayIndex(X87Register reg);
- int st2idx(int pos);
-
- X87Register stack_[X87Register::kMaxNumAllocatableRegisters];
- int stack_depth_;
- bool is_mutable_;
- MacroAssembler* masm_;
- };
- X87Stack x87_stack_;
- // block_id -> X87Stack*;
- typedef std::map<int, X87Stack*> X87StackMap;
- X87StackMap x87_stack_map_;
-
- // Builder that keeps track of safepoints in the code. The table
- // itself is emitted at the end of the generated code.
- SafepointTableBuilder safepoints_;
-
- // Compiler from a set of parallel moves to a sequential list of moves.
- LGapResolver resolver_;
-
- Safepoint::Kind expected_safepoint_kind_;
-
- class PushSafepointRegistersScope final BASE_EMBEDDED {
- public:
- explicit PushSafepointRegistersScope(LCodeGen* codegen)
- : codegen_(codegen) {
- DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
- codegen_->masm_->PushSafepointRegisters();
- codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
- DCHECK(codegen_->info()->is_calling());
- }
-
- ~PushSafepointRegistersScope() {
- DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
- codegen_->masm_->PopSafepointRegisters();
- codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
- }
-
- private:
- LCodeGen* codegen_;
- };
-
- friend class LDeferredCode;
- friend class LEnvironment;
- friend class SafepointGenerator;
- friend class X87Stack;
- DISALLOW_COPY_AND_ASSIGN(LCodeGen);
-};
-
-
-class LDeferredCode : public ZoneObject {
- public:
- explicit LDeferredCode(LCodeGen* codegen, const LCodeGen::X87Stack& x87_stack)
- : codegen_(codegen),
- external_exit_(NULL),
- instruction_index_(codegen->current_instruction_),
- x87_stack_(x87_stack) {
- codegen->AddDeferredCode(this);
- }
-
- virtual ~LDeferredCode() {}
- virtual void Generate() = 0;
- virtual LInstruction* instr() = 0;
-
- void SetExit(Label* exit) { external_exit_ = exit; }
- Label* entry() { return &entry_; }
- Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
- Label* done() { return codegen_->NeedsDeferredFrame() ? &done_ : exit(); }
- int instruction_index() const { return instruction_index_; }
- const LCodeGen::X87Stack& x87_stack() const { return x87_stack_; }
-
- protected:
- LCodeGen* codegen() const { return codegen_; }
- MacroAssembler* masm() const { return codegen_->masm(); }
-
- private:
- LCodeGen* codegen_;
- Label entry_;
- Label exit_;
- Label* external_exit_;
- Label done_;
- int instruction_index_;
- LCodeGen::X87Stack x87_stack_;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_X87_LITHIUM_CODEGEN_X87_H_
diff --git a/deps/v8/src/crankshaft/x87/lithium-gap-resolver-x87.cc b/deps/v8/src/crankshaft/x87/lithium-gap-resolver-x87.cc
deleted file mode 100644
index 6bfc2e2a07..0000000000
--- a/deps/v8/src/crankshaft/x87/lithium-gap-resolver-x87.cc
+++ /dev/null
@@ -1,457 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X87
-
-#include "src/crankshaft/x87/lithium-gap-resolver-x87.h"
-#include "src/register-configuration.h"
-
-#include "src/crankshaft/x87/lithium-codegen-x87.h"
-
-namespace v8 {
-namespace internal {
-
-LGapResolver::LGapResolver(LCodeGen* owner)
- : cgen_(owner),
- moves_(32, owner->zone()),
- source_uses_(),
- destination_uses_(),
- spilled_register_(-1) {}
-
-
-void LGapResolver::Resolve(LParallelMove* parallel_move) {
- DCHECK(HasBeenReset());
- // Build up a worklist of moves.
- BuildInitialMoveList(parallel_move);
-
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands move = moves_[i];
- // Skip constants to perform them last. They don't block other moves
- // and skipping such moves with register destinations keeps those
- // registers free for the whole algorithm.
- if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
- PerformMove(i);
- }
- }
-
- // Perform the moves with constant sources.
- for (int i = 0; i < moves_.length(); ++i) {
- if (!moves_[i].IsEliminated()) {
- DCHECK(moves_[i].source()->IsConstantOperand());
- EmitMove(i);
- }
- }
-
- Finish();
- DCHECK(HasBeenReset());
-}
-
-
-void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
- // Perform a linear sweep of the moves to add them to the initial list of
- // moves to perform, ignoring any move that is redundant (the source is
- // the same as the destination, the destination is ignored and
- // unallocated, or the move was already eliminated).
- const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
- for (int i = 0; i < moves->length(); ++i) {
- LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) AddMove(move);
- }
- Verify();
-}
-
-
-void LGapResolver::PerformMove(int index) {
- // Each call to this function performs a move and deletes it from the move
- // graph. We first recursively perform any move blocking this one. We
- // mark a move as "pending" on entry to PerformMove in order to detect
- // cycles in the move graph. We use operand swaps to resolve cycles,
- // which means that a call to PerformMove could change any source operand
- // in the move graph.
-
- DCHECK(!moves_[index].IsPending());
- DCHECK(!moves_[index].IsRedundant());
-
- // Clear this move's destination to indicate a pending move. The actual
- // destination is saved on the side.
- DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated.
- LOperand* destination = moves_[index].destination();
- moves_[index].set_destination(NULL);
-
- // Perform a depth-first traversal of the move graph to resolve
- // dependencies. Any unperformed, unpending move with a source the same
- // as this one's destination blocks this one so recursively perform all
- // such moves.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(destination) && !other_move.IsPending()) {
- // Though PerformMove can change any source operand in the move graph,
- // this call cannot create a blocking move via a swap (this loop does
- // not miss any). Assume there is a non-blocking move with source A
- // and this move is blocked on source B and there is a swap of A and
- // B. Then A and B must be involved in the same cycle (or they would
- // not be swapped). Since this move's destination is B and there is
- // only a single incoming edge to an operand, this move must also be
- // involved in the same cycle. In that case, the blocking move will
- // be created but will be "pending" when we return from PerformMove.
- PerformMove(i);
- }
- }
-
- // We are about to resolve this move and don't need it marked as
- // pending, so restore its destination.
- moves_[index].set_destination(destination);
-
- // This move's source may have changed due to swaps to resolve cycles and
- // so it may now be the last move in the cycle. If so remove it.
- if (moves_[index].source()->Equals(destination)) {
- RemoveMove(index);
- return;
- }
-
- // The move may be blocked on a (at most one) pending move, in which case
- // we have a cycle. Search for such a blocking move and perform a swap to
- // resolve it.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(destination)) {
- DCHECK(other_move.IsPending());
- EmitSwap(index);
- return;
- }
- }
-
- // This move is not blocked.
- EmitMove(index);
-}
-
-
-void LGapResolver::AddMove(LMoveOperands move) {
- LOperand* source = move.source();
- if (source->IsRegister()) ++source_uses_[source->index()];
-
- LOperand* destination = move.destination();
- if (destination->IsRegister()) ++destination_uses_[destination->index()];
-
- moves_.Add(move, cgen_->zone());
-}
-
-
-void LGapResolver::RemoveMove(int index) {
- LOperand* source = moves_[index].source();
- if (source->IsRegister()) {
- --source_uses_[source->index()];
- DCHECK(source_uses_[source->index()] >= 0);
- }
-
- LOperand* destination = moves_[index].destination();
- if (destination->IsRegister()) {
- --destination_uses_[destination->index()];
- DCHECK(destination_uses_[destination->index()] >= 0);
- }
-
- moves_[index].Eliminate();
-}
-
-
-int LGapResolver::CountSourceUses(LOperand* operand) {
- int count = 0;
- for (int i = 0; i < moves_.length(); ++i) {
- if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) {
- ++count;
- }
- }
- return count;
-}
-
-
-Register LGapResolver::GetFreeRegisterNot(Register reg) {
- int skip_index = reg.is(no_reg) ? -1 : reg.code();
- const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
- for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
- int code = config->GetAllocatableGeneralCode(i);
- if (source_uses_[code] == 0 && destination_uses_[code] > 0 &&
- code != skip_index) {
- return Register::from_code(code);
- }
- }
- return no_reg;
-}
-
-
-bool LGapResolver::HasBeenReset() {
- if (!moves_.is_empty()) return false;
- if (spilled_register_ >= 0) return false;
- const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
- for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
- int code = config->GetAllocatableGeneralCode(i);
- if (source_uses_[code] != 0) return false;
- if (destination_uses_[code] != 0) return false;
- }
- return true;
-}
-
-
-void LGapResolver::Verify() {
-#ifdef ENABLE_SLOW_DCHECKS
- // No operand should be the destination for more than one move.
- for (int i = 0; i < moves_.length(); ++i) {
- LOperand* destination = moves_[i].destination();
- for (int j = i + 1; j < moves_.length(); ++j) {
- SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
- }
- }
-#endif
-}
-
-
-#define __ ACCESS_MASM(cgen_->masm())
-
-void LGapResolver::Finish() {
- if (spilled_register_ >= 0) {
- __ pop(Register::from_code(spilled_register_));
- spilled_register_ = -1;
- }
- moves_.Rewind(0);
-}
-
-
-void LGapResolver::EnsureRestored(LOperand* operand) {
- if (operand->IsRegister() && operand->index() == spilled_register_) {
- __ pop(Register::from_code(spilled_register_));
- spilled_register_ = -1;
- }
-}
-
-
-Register LGapResolver::EnsureTempRegister() {
- // 1. We may have already spilled to create a temp register.
- if (spilled_register_ >= 0) {
- return Register::from_code(spilled_register_);
- }
-
- // 2. We may have a free register that we can use without spilling.
- Register free = GetFreeRegisterNot(no_reg);
- if (!free.is(no_reg)) return free;
-
- // 3. Prefer to spill a register that is not used in any remaining move
- // because it will not need to be restored until the end.
- const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
- for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
- int code = config->GetAllocatableGeneralCode(i);
- if (source_uses_[code] == 0 && destination_uses_[code] == 0) {
- Register scratch = Register::from_code(code);
- __ push(scratch);
- spilled_register_ = code;
- return scratch;
- }
- }
-
- // 4. Use an arbitrary register. Register 0 is as arbitrary as any other.
- spilled_register_ = config->GetAllocatableGeneralCode(0);
- Register scratch = Register::from_code(spilled_register_);
- __ push(scratch);
- return scratch;
-}
-
-
-void LGapResolver::EmitMove(int index) {
- LOperand* source = moves_[index].source();
- LOperand* destination = moves_[index].destination();
- EnsureRestored(source);
- EnsureRestored(destination);
-
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister()) {
- DCHECK(destination->IsRegister() || destination->IsStackSlot());
- Register src = cgen_->ToRegister(source);
- Operand dst = cgen_->ToOperand(destination);
- __ mov(dst, src);
-
- } else if (source->IsStackSlot()) {
- DCHECK(destination->IsRegister() || destination->IsStackSlot());
- Operand src = cgen_->ToOperand(source);
- if (destination->IsRegister()) {
- Register dst = cgen_->ToRegister(destination);
- __ mov(dst, src);
- } else {
- // Spill on demand to use a temporary register for memory-to-memory
- // moves.
- Register tmp = EnsureTempRegister();
- Operand dst = cgen_->ToOperand(destination);
- __ mov(tmp, src);
- __ mov(dst, tmp);
- }
-
- } else if (source->IsConstantOperand()) {
- LConstantOperand* constant_source = LConstantOperand::cast(source);
- if (destination->IsRegister()) {
- Register dst = cgen_->ToRegister(destination);
- Representation r = cgen_->IsSmi(constant_source)
- ? Representation::Smi() : Representation::Integer32();
- if (cgen_->IsInteger32(constant_source)) {
- __ Move(dst, cgen_->ToImmediate(constant_source, r));
- } else {
- __ LoadObject(dst, cgen_->ToHandle(constant_source));
- }
- } else if (destination->IsDoubleRegister()) {
- double v = cgen_->ToDouble(constant_source);
- uint64_t int_val = bit_cast<uint64_t, double>(v);
- int32_t lower = static_cast<int32_t>(int_val);
- int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
- __ push(Immediate(upper));
- __ push(Immediate(lower));
- X87Register dst = cgen_->ToX87Register(destination);
- cgen_->X87Mov(dst, MemOperand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
- } else {
- DCHECK(destination->IsStackSlot());
- Operand dst = cgen_->ToOperand(destination);
- Representation r = cgen_->IsSmi(constant_source)
- ? Representation::Smi() : Representation::Integer32();
- if (cgen_->IsInteger32(constant_source)) {
- __ Move(dst, cgen_->ToImmediate(constant_source, r));
- } else {
- Register tmp = EnsureTempRegister();
- __ LoadObject(tmp, cgen_->ToHandle(constant_source));
- __ mov(dst, tmp);
- }
- }
-
- } else if (source->IsDoubleRegister()) {
- // load from the register onto the stack, store in destination, which must
- // be a double stack slot in the non-SSE2 case.
- if (destination->IsDoubleStackSlot()) {
- Operand dst = cgen_->ToOperand(destination);
- X87Register src = cgen_->ToX87Register(source);
- cgen_->X87Mov(dst, src);
- } else {
- X87Register dst = cgen_->ToX87Register(destination);
- X87Register src = cgen_->ToX87Register(source);
- cgen_->X87Mov(dst, src);
- }
- } else if (source->IsDoubleStackSlot()) {
- // load from the stack slot on top of the floating point stack, and then
- // store in destination. If destination is a double register, then it
- // represents the top of the stack and nothing needs to be done.
- if (destination->IsDoubleStackSlot()) {
- Register tmp = EnsureTempRegister();
- Operand src0 = cgen_->ToOperand(source);
- Operand src1 = cgen_->HighOperand(source);
- Operand dst0 = cgen_->ToOperand(destination);
- Operand dst1 = cgen_->HighOperand(destination);
- __ mov(tmp, src0); // Then use tmp to copy source to destination.
- __ mov(dst0, tmp);
- __ mov(tmp, src1);
- __ mov(dst1, tmp);
- } else {
- Operand src = cgen_->ToOperand(source);
- X87Register dst = cgen_->ToX87Register(destination);
- cgen_->X87Mov(dst, src);
- }
- } else {
- UNREACHABLE();
- }
-
- RemoveMove(index);
-}
-
-
-void LGapResolver::EmitSwap(int index) {
- LOperand* source = moves_[index].source();
- LOperand* destination = moves_[index].destination();
- EnsureRestored(source);
- EnsureRestored(destination);
-
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister() && destination->IsRegister()) {
- // Register-register.
- Register src = cgen_->ToRegister(source);
- Register dst = cgen_->ToRegister(destination);
- __ xchg(dst, src);
-
- } else if ((source->IsRegister() && destination->IsStackSlot()) ||
- (source->IsStackSlot() && destination->IsRegister())) {
- // Register-memory. Use a free register as a temp if possible. Do not
- // spill on demand because the simple spill implementation cannot avoid
- // spilling src at this point.
- Register tmp = GetFreeRegisterNot(no_reg);
- Register reg =
- cgen_->ToRegister(source->IsRegister() ? source : destination);
- Operand mem =
- cgen_->ToOperand(source->IsRegister() ? destination : source);
- if (tmp.is(no_reg)) {
- __ xor_(reg, mem);
- __ xor_(mem, reg);
- __ xor_(reg, mem);
- } else {
- __ mov(tmp, mem);
- __ mov(mem, reg);
- __ mov(reg, tmp);
- }
-
- } else if (source->IsStackSlot() && destination->IsStackSlot()) {
- // Memory-memory. Spill on demand to use a temporary. If there is a
- // free register after that, use it as a second temporary.
- Register tmp0 = EnsureTempRegister();
- Register tmp1 = GetFreeRegisterNot(tmp0);
- Operand src = cgen_->ToOperand(source);
- Operand dst = cgen_->ToOperand(destination);
- if (tmp1.is(no_reg)) {
- // Only one temp register available to us.
- __ mov(tmp0, dst);
- __ xor_(tmp0, src);
- __ xor_(src, tmp0);
- __ xor_(tmp0, src);
- __ mov(dst, tmp0);
- } else {
- __ mov(tmp0, dst);
- __ mov(tmp1, src);
- __ mov(dst, tmp1);
- __ mov(src, tmp0);
- }
- } else {
- // No other combinations are possible.
- UNREACHABLE();
- }
-
- // The swap of source and destination has executed a move from source to
- // destination.
- RemoveMove(index);
-
- // Any unperformed (including pending) move with a source of either
- // this move's source or destination needs to have their source
- // changed to reflect the state of affairs after the swap.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(source)) {
- moves_[i].set_source(destination);
- } else if (other_move.Blocks(destination)) {
- moves_[i].set_source(source);
- }
- }
-
- // In addition to swapping the actual uses as sources, we need to update
- // the use counts.
- if (source->IsRegister() && destination->IsRegister()) {
- int temp = source_uses_[source->index()];
- source_uses_[source->index()] = source_uses_[destination->index()];
- source_uses_[destination->index()] = temp;
- } else if (source->IsRegister()) {
- // We don't have use counts for non-register operands like destination.
- // Compute those counts now.
- source_uses_[source->index()] = CountSourceUses(source);
- } else if (destination->IsRegister()) {
- source_uses_[destination->index()] = CountSourceUses(destination);
- }
-}
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/crankshaft/x87/lithium-gap-resolver-x87.h b/deps/v8/src/crankshaft/x87/lithium-gap-resolver-x87.h
deleted file mode 100644
index 6b6e2e64b6..0000000000
--- a/deps/v8/src/crankshaft/x87/lithium-gap-resolver-x87.h
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_X87_LITHIUM_GAP_RESOLVER_X87_H_
-#define V8_CRANKSHAFT_X87_LITHIUM_GAP_RESOLVER_X87_H_
-
-#include "src/crankshaft/lithium.h"
-
-namespace v8 {
-namespace internal {
-
-class LCodeGen;
-class LGapResolver;
-
-class LGapResolver final BASE_EMBEDDED {
- public:
- explicit LGapResolver(LCodeGen* owner);
-
- // Resolve a set of parallel moves, emitting assembler instructions.
- void Resolve(LParallelMove* parallel_move);
-
- private:
- // Build the initial list of moves.
- void BuildInitialMoveList(LParallelMove* parallel_move);
-
- // Perform the move at the moves_ index in question (possibly requiring
- // other moves to satisfy dependencies).
- void PerformMove(int index);
-
- // Emit any code necessary at the end of a gap move.
- void Finish();
-
- // Add or delete a move from the move graph without emitting any code.
- // Used to build up the graph and remove trivial moves.
- void AddMove(LMoveOperands move);
- void RemoveMove(int index);
-
- // Report the count of uses of operand as a source in a not-yet-performed
- // move. Used to rebuild use counts.
- int CountSourceUses(LOperand* operand);
-
- // Emit a move and remove it from the move graph.
- void EmitMove(int index);
-
- // Execute a move by emitting a swap of two operands. The move from
- // source to destination is removed from the move graph.
- void EmitSwap(int index);
-
- // Ensure that the given operand is not spilled.
- void EnsureRestored(LOperand* operand);
-
- // Return a register that can be used as a temp register, spilling
- // something if necessary.
- Register EnsureTempRegister();
-
- // Return a known free register different from the given one (which could
- // be no_reg---returning any free register), or no_reg if there is no such
- // register.
- Register GetFreeRegisterNot(Register reg);
-
- // Verify that the state is the initial one, ready to resolve a single
- // parallel move.
- bool HasBeenReset();
-
- // Verify the move list before performing moves.
- void Verify();
-
- LCodeGen* cgen_;
-
- // List of moves not yet resolved.
- ZoneList<LMoveOperands> moves_;
-
- // Source and destination use counts for the general purpose registers.
- int source_uses_[Register::kNumRegisters];
- int destination_uses_[DoubleRegister::kMaxNumRegisters];
-
- // If we had to spill on demand, the currently spilled register's
- // allocation index.
- int spilled_register_;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_X87_LITHIUM_GAP_RESOLVER_X87_H_
diff --git a/deps/v8/src/crankshaft/x87/lithium-x87.cc b/deps/v8/src/crankshaft/x87/lithium-x87.cc
deleted file mode 100644
index 2e5165a00c..0000000000
--- a/deps/v8/src/crankshaft/x87/lithium-x87.cc
+++ /dev/null
@@ -1,2469 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/x87/lithium-x87.h"
-
-#include <sstream>
-
-#if V8_TARGET_ARCH_X87
-
-#include "src/crankshaft/hydrogen-osr.h"
-#include "src/crankshaft/lithium-inl.h"
-#include "src/crankshaft/x87/lithium-codegen-x87.h"
-
-namespace v8 {
-namespace internal {
-
-#define DEFINE_COMPILE(type) \
- void L##type::CompileToNative(LCodeGen* generator) { \
- generator->Do##type(this); \
- }
-LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
-#undef DEFINE_COMPILE
-
-
-#ifdef DEBUG
-void LInstruction::VerifyCall() {
- // Call instructions can use only fixed registers as temporaries and
- // outputs because all registers are blocked by the calling convention.
- // Inputs operands must use a fixed register or use-at-start policy or
- // a non-register policy.
- DCHECK(Output() == NULL ||
- LUnallocated::cast(Output())->HasFixedPolicy() ||
- !LUnallocated::cast(Output())->HasRegisterPolicy());
- for (UseIterator it(this); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- DCHECK(operand->HasFixedPolicy() ||
- operand->IsUsedAtStart());
- }
- for (TempIterator it(this); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
- }
-}
-#endif
-
-
-bool LInstruction::HasDoubleRegisterResult() {
- return HasResult() && result()->IsDoubleRegister();
-}
-
-
-bool LInstruction::HasDoubleRegisterInput() {
- for (int i = 0; i < InputCount(); i++) {
- LOperand* op = InputAt(i);
- if (op != NULL && op->IsDoubleRegister()) {
- return true;
- }
- }
- return false;
-}
-
-
-bool LInstruction::IsDoubleInput(X87Register reg, LCodeGen* cgen) {
- for (int i = 0; i < InputCount(); i++) {
- LOperand* op = InputAt(i);
- if (op != NULL && op->IsDoubleRegister()) {
- if (cgen->ToX87Register(op).is(reg)) return true;
- }
- }
- return false;
-}
-
-
-void LInstruction::PrintTo(StringStream* stream) {
- stream->Add("%s ", this->Mnemonic());
-
- PrintOutputOperandTo(stream);
-
- PrintDataTo(stream);
-
- if (HasEnvironment()) {
- stream->Add(" ");
- environment()->PrintTo(stream);
- }
-
- if (HasPointerMap()) {
- stream->Add(" ");
- pointer_map()->PrintTo(stream);
- }
-}
-
-
-void LInstruction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- for (int i = 0; i < InputCount(); i++) {
- if (i > 0) stream->Add(" ");
- if (InputAt(i) == NULL) {
- stream->Add("NULL");
- } else {
- InputAt(i)->PrintTo(stream);
- }
- }
-}
-
-
-void LInstruction::PrintOutputOperandTo(StringStream* stream) {
- if (HasResult()) result()->PrintTo(stream);
-}
-
-
-void LLabel::PrintDataTo(StringStream* stream) {
- LGap::PrintDataTo(stream);
- LLabel* rep = replacement();
- if (rep != NULL) {
- stream->Add(" Dead block replaced with B%d", rep->block_id());
- }
-}
-
-
-bool LGap::IsRedundant() const {
- for (int i = 0; i < 4; i++) {
- if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
- return false;
- }
- }
-
- return true;
-}
-
-
-void LGap::PrintDataTo(StringStream* stream) {
- for (int i = 0; i < 4; i++) {
- stream->Add("(");
- if (parallel_moves_[i] != NULL) {
- parallel_moves_[i]->PrintDataTo(stream);
- }
- stream->Add(") ");
- }
-}
-
-
-const char* LArithmeticD::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-d";
- case Token::SUB: return "sub-d";
- case Token::MUL: return "mul-d";
- case Token::DIV: return "div-d";
- case Token::MOD: return "mod-d";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-const char* LArithmeticT::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-t";
- case Token::SUB: return "sub-t";
- case Token::MUL: return "mul-t";
- case Token::MOD: return "mod-t";
- case Token::DIV: return "div-t";
- case Token::BIT_AND: return "bit-and-t";
- case Token::BIT_OR: return "bit-or-t";
- case Token::BIT_XOR: return "bit-xor-t";
- case Token::ROR: return "ror-t";
- case Token::SHL: return "sal-t";
- case Token::SAR: return "sar-t";
- case Token::SHR: return "shr-t";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-bool LGoto::HasInterestingComment(LCodeGen* gen) const {
- return !gen->IsNextEmittedBlock(block_id());
-}
-
-
-void LGoto::PrintDataTo(StringStream* stream) {
- stream->Add("B%d", block_id());
-}
-
-
-void LBranch::PrintDataTo(StringStream* stream) {
- stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
- value()->PrintTo(stream);
-}
-
-
-void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- left()->PrintTo(stream);
- stream->Add(" %s ", Token::String(op()));
- right()->PrintTo(stream);
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_string(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_smi(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_undetectable(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if string_compare(");
- left()->PrintTo(stream);
- right()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_instance_type(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if class_of_test(");
- value()->PrintTo(stream);
- stream->Add(", \"%o\") then B%d else B%d", *hydrogen()->class_name(),
- true_block_id(), false_block_id());
-}
-
-void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if typeof ");
- value()->PrintTo(stream);
- stream->Add(" == \"%s\" then B%d else B%d",
- hydrogen()->type_literal()->ToCString().get(),
- true_block_id(), false_block_id());
-}
-
-
-void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
- stream->Add(" = ");
- function()->PrintTo(stream);
- stream->Add(".code_entry = ");
- code_object()->PrintTo(stream);
-}
-
-
-void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
- stream->Add(" = ");
- base_object()->PrintTo(stream);
- stream->Add(" + ");
- offset()->PrintTo(stream);
-}
-
-
-void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
- for (int i = 0; i < InputCount(); i++) {
- InputAt(i)->PrintTo(stream);
- stream->Add(" ");
- }
- stream->Add("#%d / ", arity());
-}
-
-
-void LLoadContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add("[%d]", slot_index());
-}
-
-
-void LStoreContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add("[%d] <- ", slot_index());
- value()->PrintTo(stream);
-}
-
-
-void LInvokeFunction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- context()->PrintTo(stream);
- stream->Add(" ");
- function()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
-void LCallNewArray::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- context()->PrintTo(stream);
- stream->Add(" ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
- ElementsKind kind = hydrogen()->elements_kind();
- stream->Add(" (%s) ", ElementsKindToString(kind));
-}
-
-
-void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
- arguments()->PrintTo(stream);
-
- stream->Add(" length ");
- length()->PrintTo(stream);
-
- stream->Add(" index ");
- index()->PrintTo(stream);
-}
-
-
-int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
- // Skip a slot if for a double-width slot.
- if (kind == DOUBLE_REGISTERS) {
- current_frame_slots_++;
- current_frame_slots_ |= 1;
- num_double_slots_++;
- }
- return current_frame_slots_++;
-}
-
-
-LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
- int index = GetNextSpillIndex(kind);
- if (kind == DOUBLE_REGISTERS) {
- return LDoubleStackSlot::Create(index, zone());
- } else {
- DCHECK(kind == GENERAL_REGISTERS);
- return LStackSlot::Create(index, zone());
- }
-}
-
-
-void LStoreNamedField::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- std::ostringstream os;
- os << hydrogen()->access() << " <- ";
- stream->Add(os.str().c_str());
- value()->PrintTo(stream);
-}
-
-
-void LLoadKeyed::PrintDataTo(StringStream* stream) {
- elements()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d]", base_offset());
- } else {
- stream->Add("]");
- }
-}
-
-
-void LStoreKeyed::PrintDataTo(StringStream* stream) {
- elements()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d] <-", base_offset());
- } else {
- stream->Add("] <- ");
- }
-
- if (value() == NULL) {
- DCHECK(hydrogen()->IsConstantHoleStore() &&
- hydrogen()->value()->representation().IsDouble());
- stream->Add("<the hole(nan)>");
- } else {
- value()->PrintTo(stream);
- }
-}
-
-
-void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(" %p -> %p", *original_map(), *transitioned_map());
-}
-
-
-LPlatformChunk* LChunkBuilder::Build() {
- DCHECK(is_unused());
- chunk_ = new(zone()) LPlatformChunk(info(), graph());
- LPhase phase("L_Building chunk", chunk_);
- status_ = BUILDING;
-
- // If compiling for OSR, reserve space for the unoptimized frame,
- // which will be subsumed into this frame.
- if (graph()->has_osr()) {
- for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
- chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
- }
- }
-
- const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
- for (int i = 0; i < blocks->length(); i++) {
- HBasicBlock* next = NULL;
- if (i < blocks->length() - 1) next = blocks->at(i + 1);
- DoBasicBlock(blocks->at(i), next);
- if (is_aborted()) return NULL;
- }
- status_ = DONE;
- return chunk_;
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(X87Register reg) {
- return new (zone())
- LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
-}
-
-
-LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
- return Use(value, ToUnallocated(fixed_register));
-}
-
-
-LOperand* LChunkBuilder::UseRegister(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
- return Use(value,
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
- LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
-}
-
-
-LOperand* LChunkBuilder::UseAtStart(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
- LUnallocated::USED_AT_START));
-}
-
-
-static inline bool CanBeImmediateConstant(HValue* value) {
- return value->IsConstant() && HConstant::cast(value)->NotInNewSpace();
-}
-
-
-LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
- return CanBeImmediateConstant(value)
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value);
-}
-
-
-LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
- return CanBeImmediateConstant(value)
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseFixedOrConstant(HValue* value,
- Register fixed_register) {
- return CanBeImmediateConstant(value)
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseFixed(value, fixed_register);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
- return CanBeImmediateConstant(value)
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegister(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
- return CanBeImmediateConstant(value)
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegisterAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseConstant(HValue* value) {
- return chunk_->DefineConstantOperand(HConstant::cast(value));
-}
-
-
-LOperand* LChunkBuilder::UseAny(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
- if (value->EmitAtUses()) {
- HInstruction* instr = HInstruction::cast(value);
- VisitInstruction(instr);
- }
- operand->set_virtual_register(value->id());
- return operand;
-}
-
-
-LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
- LUnallocated* result) {
- result->set_virtual_register(current_instruction_->id());
- instr->set_result(result);
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::DefineAsRegister(
- LTemplateResultInstruction<1>* instr) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-LInstruction* LChunkBuilder::DefineAsSpilled(
- LTemplateResultInstruction<1>* instr,
- int index) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
-}
-
-
-LInstruction* LChunkBuilder::DefineSameAsFirst(
- LTemplateResultInstruction<1>* instr) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
-}
-
-
-LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr,
- Register reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr,
- X87Register reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
- HEnvironment* hydrogen_env = current_block_->last_environment();
- return LChunkBuilderBase::AssignEnvironment(instr, hydrogen_env);
-}
-
-
-LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize) {
- info()->MarkAsNonDeferredCalling();
-
-#ifdef DEBUG
- instr->VerifyCall();
-#endif
- instr->MarkAsCall();
- instr = AssignPointerMap(instr);
-
- // If instruction does not have side-effects lazy deoptimization
- // after the call will try to deoptimize to the point before the call.
- // Thus we still need to attach environment to this call even if
- // call sequence can not deoptimize eagerly.
- bool needs_environment =
- (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
- !hinstr->HasObservableSideEffects();
- if (needs_environment && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- // We can't really figure out if the environment is needed or not.
- instr->environment()->set_has_been_used();
- }
-
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
- DCHECK(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(zone()));
- return instr;
-}
-
-
-LUnallocated* LChunkBuilder::TempRegister() {
- LUnallocated* operand =
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
- int vreg = allocator_->GetVirtualRegister();
- if (!allocator_->AllocationOk()) {
- Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
- vreg = 0;
- }
- operand->set_virtual_register(vreg);
- return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(Register reg) {
- LUnallocated* operand = ToUnallocated(reg);
- DCHECK(operand->HasFixedPolicy());
- return operand;
-}
-
-
-LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
- return new(zone()) LLabel(instr->block());
-}
-
-
-LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
- return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
- return AssignEnvironment(new(zone()) LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoShift(Token::Value op,
- HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->left());
-
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- int constant_value = 0;
- bool does_deopt = false;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
- // Left shifts can deoptimize if we shift by > 0 and the result cannot be
- // truncated to smi.
- if (instr->representation().IsSmi() && constant_value > 0) {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
- }
- } else {
- right = UseFixed(right_value, ecx);
- }
-
- // Shift operations can only deoptimize if we do a logical shift by 0 and
- // the result cannot be truncated to int32.
- if (op == Token::SHR && constant_value == 0) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
- }
-
- LInstruction* result =
- DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
- return does_deopt ? AssignEnvironment(result) : result;
- } else {
- return DoArithmeticT(op, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->left()->representation().IsDouble());
- DCHECK(instr->right()->representation().IsDouble());
- if (op == Token::MOD) {
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return MarkAsCall(DefineSameAsFirst(result), instr);
- } else {
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineSameAsFirst(result);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HBinaryOperation* instr) {
- HValue* left = instr->left();
- HValue* right = instr->right();
- DCHECK(left->representation().IsTagged());
- DCHECK(right->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left_operand = UseFixed(left, edx);
- LOperand* right_operand = UseFixed(right, eax);
- LArithmeticT* result =
- new(zone()) LArithmeticT(op, context, left_operand, right_operand);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
- DCHECK(is_building());
- current_block_ = block;
- next_block_ = next_block;
- if (block->IsStartBlock()) {
- block->UpdateEnvironment(graph_->start_environment());
- argument_count_ = 0;
- } else if (block->predecessors()->length() == 1) {
- // We have a single predecessor => copy environment and outgoing
- // argument count from the predecessor.
- DCHECK(block->phis()->length() == 0);
- HBasicBlock* pred = block->predecessors()->at(0);
- HEnvironment* last_environment = pred->last_environment();
- DCHECK(last_environment != NULL);
- // Only copy the environment, if it is later used again.
- if (pred->end()->SecondSuccessor() == NULL) {
- DCHECK(pred->end()->FirstSuccessor() == block);
- } else {
- if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
- pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
- last_environment = last_environment->Copy();
- }
- }
- block->UpdateEnvironment(last_environment);
- DCHECK(pred->argument_count() >= 0);
- argument_count_ = pred->argument_count();
- } else {
- // We are at a state join => process phis.
- HBasicBlock* pred = block->predecessors()->at(0);
- // No need to copy the environment, it cannot be used later.
- HEnvironment* last_environment = pred->last_environment();
- for (int i = 0; i < block->phis()->length(); ++i) {
- HPhi* phi = block->phis()->at(i);
- if (phi->HasMergedIndex()) {
- last_environment->SetValueAt(phi->merged_index(), phi);
- }
- }
- for (int i = 0; i < block->deleted_phis()->length(); ++i) {
- if (block->deleted_phis()->at(i) < last_environment->length()) {
- last_environment->SetValueAt(block->deleted_phis()->at(i),
- graph_->GetConstantUndefined());
- }
- }
- block->UpdateEnvironment(last_environment);
- // Pick up the outgoing argument count of one of the predecessors.
- argument_count_ = pred->argument_count();
- }
- HInstruction* current = block->first();
- int start = chunk_->instructions()->length();
- while (current != NULL && !is_aborted()) {
- // Code for constants in registers is generated lazily.
- if (!current->EmitAtUses()) {
- VisitInstruction(current);
- }
- current = current->next();
- }
- int end = chunk_->instructions()->length() - 1;
- if (end >= start) {
- block->set_first_instruction_index(start);
- block->set_last_instruction_index(end);
- }
- block->set_argument_count(argument_count_);
- next_block_ = NULL;
- current_block_ = NULL;
-}
-
-
-void LChunkBuilder::VisitInstruction(HInstruction* current) {
- HInstruction* old_current = current_instruction_;
- current_instruction_ = current;
-
- LInstruction* instr = NULL;
- if (current->CanReplaceWithDummyUses()) {
- if (current->OperandCount() == 0) {
- instr = DefineAsRegister(new(zone()) LDummy());
- } else {
- DCHECK(!current->OperandAt(0)->IsControlInstruction());
- instr = DefineAsRegister(new(zone())
- LDummyUse(UseAny(current->OperandAt(0))));
- }
- for (int i = 1; i < current->OperandCount(); ++i) {
- if (current->OperandAt(i)->IsControlInstruction()) continue;
- LInstruction* dummy =
- new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
- dummy->set_hydrogen_value(current);
- chunk_->AddInstruction(dummy, current_block_);
- }
- } else {
- HBasicBlock* successor;
- if (current->IsControlInstruction() &&
- HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
- successor != NULL) {
- // Always insert a fpu register barrier here when branch is optimized to
- // be a direct goto.
- // TODO(weiliang): require a better solution.
- if (!current->IsGoto()) {
- LClobberDoubles* clobber = new (zone()) LClobberDoubles(isolate());
- clobber->set_hydrogen_value(current);
- chunk_->AddInstruction(clobber, current_block_);
- }
- instr = new(zone()) LGoto(successor);
- } else {
- instr = current->CompileToLithium(this);
- }
- }
-
- argument_count_ += current->argument_delta();
- DCHECK(argument_count_ >= 0);
-
- if (instr != NULL) {
- AddInstruction(instr, current);
- }
-
- current_instruction_ = old_current;
-}
-
-
-void LChunkBuilder::AddInstruction(LInstruction* instr,
- HInstruction* hydrogen_val) {
- // Associate the hydrogen instruction first, since we may need it for
- // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
- instr->set_hydrogen_value(hydrogen_val);
-
-#if DEBUG
- // Make sure that the lithium instruction has either no fixed register
- // constraints in temps or the result OR no uses that are only used at
- // start. If this invariant doesn't hold, the register allocator can decide
- // to insert a split of a range immediately before the instruction due to an
- // already allocated register needing to be used for the instruction's fixed
- // register constraint. In this case, The register allocator won't see an
- // interference between the split child and the use-at-start (it would if
- // the it was just a plain use), so it is free to move the split child into
- // the same register that is used for the use-at-start.
- // See https://code.google.com/p/chromium/issues/detail?id=201590
- if (!(instr->ClobbersRegisters() &&
- instr->ClobbersDoubleRegisters(isolate()))) {
- int fixed = 0;
- int used_at_start = 0;
- for (UseIterator it(instr); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- if (operand->IsUsedAtStart()) ++used_at_start;
- }
- if (instr->Output() != NULL) {
- if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
- }
- for (TempIterator it(instr); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- if (operand->HasFixedPolicy()) ++fixed;
- }
- DCHECK(fixed == 0 || used_at_start == 0);
- }
-#endif
-
- if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
- instr = AssignPointerMap(instr);
- }
- if (FLAG_stress_environments && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
- if (instr->IsGoto() &&
- (LGoto::cast(instr)->jumps_to_join() || next_block_->is_osr_entry())) {
- // TODO(olivf) Since phis of spilled values are joined as registers
- // (not in the stack slot), we need to allow the goto gaps to keep one
- // x87 register alive. To ensure all other values are still spilled, we
- // insert a fpu register barrier right before.
- LClobberDoubles* clobber = new(zone()) LClobberDoubles(isolate());
- clobber->set_hydrogen_value(hydrogen_val);
- chunk_->AddInstruction(clobber, current_block_);
- }
- chunk_->AddInstruction(instr, current_block_);
-
- CreateLazyBailoutForCall(current_block_, instr, hydrogen_val);
-}
-
-
-LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
- LInstruction* result = new (zone()) LPrologue();
- if (info_->scope()->NeedsContext()) {
- result = MarkAsCall(result, instr);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- return new(zone()) LGoto(instr->FirstSuccessor());
-}
-
-
-LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* value = instr->value();
- Representation r = value->representation();
- HType type = value->type();
- ToBooleanHints expected = instr->expected_input_types();
- if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
-
- bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
- type.IsJSArray() || type.IsHeapNumber() || type.IsString();
- LOperand* temp = !easy_case && (expected & ToBooleanHint::kNeedsMap)
- ? TempRegister()
- : NULL;
- LInstruction* branch =
- temp != NULL ? new (zone()) LBranch(UseRegister(value), temp)
- : new (zone()) LBranch(UseRegisterAtStart(value), temp);
- if (!easy_case && ((!(expected & ToBooleanHint::kSmallInteger) &&
- (expected & ToBooleanHint::kNeedsMap)) ||
- expected != ToBooleanHint::kAny)) {
- branch = AssignEnvironment(branch);
- }
- return branch;
-}
-
-
-LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
- return new(zone()) LDebugBreak();
-}
-
-
-LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LCmpMapAndBranch(value);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
- info()->MarkAsRequiresFrame();
- return DefineAsRegister(new(zone()) LArgumentsLength(Use(length->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
- info()->MarkAsRequiresFrame();
- return DefineAsRegister(new(zone()) LArgumentsElements);
-}
-
-
-LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
- HHasInPrototypeChainAndBranch* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* prototype = UseRegister(instr->prototype());
- LOperand* temp = TempRegister();
- LHasInPrototypeChainAndBranch* result =
- new (zone()) LHasInPrototypeChainAndBranch(object, prototype, temp);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
- LOperand* receiver = UseRegister(instr->receiver());
- LOperand* function = UseRegister(instr->function());
- LOperand* temp = TempRegister();
- LWrapReceiver* result =
- new(zone()) LWrapReceiver(receiver, function, temp);
- return AssignEnvironment(DefineSameAsFirst(result));
-}
-
-
-LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
- LOperand* function = UseFixed(instr->function(), edi);
- LOperand* receiver = UseFixed(instr->receiver(), eax);
- LOperand* length = UseFixed(instr->length(), ebx);
- LOperand* elements = UseFixed(instr->elements(), ecx);
- LApplyArguments* result = new(zone()) LApplyArguments(function,
- receiver,
- length,
- elements);
- return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
- int argc = instr->OperandCount();
- for (int i = 0; i < argc; ++i) {
- LOperand* argument = UseAny(instr->argument(i));
- AddInstruction(new(zone()) LPushArgument(argument), instr);
- }
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreCodeEntry(
- HStoreCodeEntry* store_code_entry) {
- LOperand* function = UseRegister(store_code_entry->function());
- LOperand* code_object = UseTempRegister(store_code_entry->code_object());
- return new(zone()) LStoreCodeEntry(function, code_object);
-}
-
-
-LInstruction* LChunkBuilder::DoInnerAllocatedObject(
- HInnerAllocatedObject* instr) {
- LOperand* base_object = UseRegisterAtStart(instr->base_object());
- LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
- return DefineAsRegister(
- new(zone()) LInnerAllocatedObject(base_object, offset));
-}
-
-
-LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
- return instr->HasNoUses()
- ? NULL
- : DefineAsRegister(new(zone()) LThisFunction);
-}
-
-
-LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- if (instr->HasNoUses()) return NULL;
-
- if (info()->IsStub()) {
- return DefineFixed(new(zone()) LContext, esi);
- }
-
- return DefineAsRegister(new(zone()) LContext);
-}
-
-
-LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallWithDescriptor(
- HCallWithDescriptor* instr) {
- CallInterfaceDescriptor descriptor = instr->descriptor();
- DCHECK_EQ(descriptor.GetParameterCount() +
- LCallWithDescriptor::kImplicitRegisterParameterCount,
- instr->OperandCount());
-
- LOperand* target = UseRegisterOrConstantAtStart(instr->target());
- ZoneList<LOperand*> ops(instr->OperandCount(), zone());
- // Target
- ops.Add(target, zone());
- // Context
- LOperand* op = UseFixed(instr->OperandAt(1), esi);
- ops.Add(op, zone());
- // Load register parameters.
- int i = 0;
- for (; i < descriptor.GetRegisterParameterCount(); i++) {
- op = UseFixed(instr->OperandAt(
- i + LCallWithDescriptor::kImplicitRegisterParameterCount),
- descriptor.GetRegisterParameter(i));
- ops.Add(op, zone());
- }
- // Push stack parameters.
- for (; i < descriptor.GetParameterCount(); i++) {
- op = UseAny(instr->OperandAt(
- i + LCallWithDescriptor::kImplicitRegisterParameterCount));
- AddInstruction(new (zone()) LPushArgument(op), instr);
- }
-
- LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
- descriptor, ops, zone());
- if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
- result->MarkAsSyntacticTailCall();
- }
- return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* function = UseFixed(instr->function(), edi);
- LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
- if (instr->syntactic_tail_call_mode() == TailCallMode::kAllow) {
- result->MarkAsSyntacticTailCall();
- }
- return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
- switch (instr->op()) {
- case kMathCos:
- return DoMathCos(instr);
- case kMathFloor:
- return DoMathFloor(instr);
- case kMathRound:
- return DoMathRound(instr);
- case kMathFround:
- return DoMathFround(instr);
- case kMathAbs:
- return DoMathAbs(instr);
- case kMathLog:
- return DoMathLog(instr);
- case kMathExp:
- return DoMathExp(instr);
- case kMathSqrt:
- return DoMathSqrt(instr);
- case kMathPowHalf:
- return DoMathPowHalf(instr);
- case kMathClz32:
- return DoMathClz32(instr);
- case kMathSin:
- return DoMathSin(instr);
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- LMathFloor* result = new(zone()) LMathFloor(input);
- return AssignEnvironment(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- LInstruction* result = DefineAsRegister(new (zone()) LMathRound(input));
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
- LOperand* input = UseRegister(instr->value());
- LMathFround* result = new (zone()) LMathFround(input);
- return DefineSameAsFirst(result);
-}
-
-
-LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
- LOperand* context = UseAny(instr->context()); // Deferred use.
- LOperand* input = UseRegisterAtStart(instr->value());
- LInstruction* result =
- DefineSameAsFirst(new(zone()) LMathAbs(context, input));
- Representation r = instr->value()->representation();
- if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
- if (!r.IsDouble()) result = AssignEnvironment(result);
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseRegisterAtStart(instr->value());
- return MarkAsCall(DefineSameAsFirst(new(zone()) LMathLog(input)), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- LMathClz32* result = new(zone()) LMathClz32(input);
- return DefineAsRegister(result);
-}
-
-LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseRegisterAtStart(instr->value());
- return MarkAsCall(DefineSameAsFirst(new (zone()) LMathCos(input)), instr);
-}
-
-LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseRegisterAtStart(instr->value());
- return MarkAsCall(DefineSameAsFirst(new (zone()) LMathSin(input)), instr);
-}
-
-LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseRegisterAtStart(instr->value());
- return MarkAsCall(DefineSameAsFirst(new (zone()) LMathExp(input)), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- LOperand* temp1 = FixedTemp(ecx);
- LOperand* temp2 = FixedTemp(edx);
- LMathSqrt* result = new(zone()) LMathSqrt(input, temp1, temp2);
- return MarkAsCall(DefineSameAsFirst(result), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- LMathPowHalf* result = new (zone()) LMathPowHalf(input);
- return DefineSameAsFirst(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* constructor = UseFixed(instr->constructor(), edi);
- LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoRor(HRor* instr) {
- return DoShift(Token::ROR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShr(HShr* instr) {
- return DoShift(Token::SHR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoSar(HSar* instr) {
- return DoShift(Token::SAR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShl(HShl* instr) {
- return DoShift(Token::SHL, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32));
-
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
- return DefineSameAsFirst(new(zone()) LBitI(left, right));
- } else {
- return DoArithmeticT(instr->op(), instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
- dividend, divisor));
- if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
- (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
- (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
- divisor != 1 && divisor != -1)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
- DCHECK(instr->representation().IsInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LOperand* temp1 = FixedTemp(eax);
- LOperand* temp2 = FixedTemp(edx);
- LInstruction* result = DefineFixed(new(zone()) LDivByConstI(
- dividend, divisor, temp1, temp2), edx);
- if (divisor == 0 ||
- (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
- !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseFixed(instr->left(), eax);
- LOperand* divisor = UseRegister(instr->right());
- LOperand* temp = FixedTemp(edx);
- LInstruction* result = DefineFixed(new(zone()) LDivI(
- dividend, divisor, temp), eax);
- if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- instr->CheckFlag(HValue::kCanOverflow) ||
- !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- if (instr->RightIsPowerOf2()) {
- return DoDivByPowerOf2I(instr);
- } else if (instr->right()->IsConstant()) {
- return DoDivByConstI(instr);
- } else {
- return DoDivI(instr);
- }
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else {
- return DoArithmeticT(Token::DIV, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
- LOperand* dividend = UseRegisterAtStart(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result = DefineSameAsFirst(new(zone()) LFlooringDivByPowerOf2I(
- dividend, divisor));
- if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
- (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
- DCHECK(instr->representation().IsInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LOperand* temp1 = FixedTemp(eax);
- LOperand* temp2 = FixedTemp(edx);
- LOperand* temp3 =
- ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
- (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
- NULL : TempRegister();
- LInstruction* result =
- DefineFixed(new(zone()) LFlooringDivByConstI(dividend,
- divisor,
- temp1,
- temp2,
- temp3),
- edx);
- if (divisor == 0 ||
- (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseFixed(instr->left(), eax);
- LOperand* divisor = UseRegister(instr->right());
- LOperand* temp = FixedTemp(edx);
- LInstruction* result = DefineFixed(new(zone()) LFlooringDivI(
- dividend, divisor, temp), eax);
- if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- if (instr->RightIsPowerOf2()) {
- return DoFlooringDivByPowerOf2I(instr);
- } else if (instr->right()->IsConstant()) {
- return DoFlooringDivByConstI(instr);
- } else {
- return DoFlooringDivI(instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegisterAtStart(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
- dividend, divisor));
- if (instr->CheckFlag(HValue::kLeftCanBeNegative) &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- int32_t divisor = instr->right()->GetInteger32Constant();
- LOperand* temp1 = FixedTemp(eax);
- LOperand* temp2 = FixedTemp(edx);
- LInstruction* result = DefineFixed(new(zone()) LModByConstI(
- dividend, divisor, temp1, temp2), eax);
- if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoModI(HMod* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseFixed(instr->left(), eax);
- LOperand* divisor = UseRegister(instr->right());
- LOperand* temp = FixedTemp(edx);
- LInstruction* result = DefineFixed(new(zone()) LModI(
- dividend, divisor, temp), edx);
- if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- if (instr->RightIsPowerOf2()) {
- return DoModByPowerOf2I(instr);
- } else if (instr->right()->IsConstant()) {
- return DoModByConstI(instr);
- } else {
- return DoModI(instr);
- }
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::MOD, instr);
- } else {
- return DoArithmeticT(Token::MOD, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMul(HMul* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- HValue* h_right = instr->BetterRightOperand();
- LOperand* right = UseOrConstant(h_right);
- LOperand* temp = NULL;
- if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- temp = TempRegister();
- }
- LMulI* mul = new(zone()) LMulI(left, right, temp);
- int constant_value =
- h_right->IsConstant() ? HConstant::cast(h_right)->Integer32Value() : 0;
- // |needs_environment| must mirror the cases where LCodeGen::DoMulI calls
- // |DeoptimizeIf|.
- bool needs_environment =
- instr->CheckFlag(HValue::kCanOverflow) ||
- (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
- (!right->IsConstantOperand() || constant_value <= 0));
- if (needs_environment) {
- AssignEnvironment(mul);
- }
- return DefineSameAsFirst(mul);
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::MUL, instr);
- } else {
- return DoArithmeticT(Token::MUL, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoSub(HSub* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- LSubI* sub = new(zone()) LSubI(left, right);
- LInstruction* result = DefineSameAsFirst(sub);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::SUB, instr);
- } else {
- return DoArithmeticT(Token::SUB, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- // Check to see if it would be advantageous to use an lea instruction rather
- // than an add. This is the case when no overflow check is needed and there
- // are multiple uses of the add's inputs, so using a 3-register add will
- // preserve all input values for later uses.
- bool use_lea = LAddI::UseLea(instr);
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- HValue* right_candidate = instr->BetterRightOperand();
- LOperand* right = use_lea
- ? UseRegisterOrConstantAtStart(right_candidate)
- : UseOrConstantAtStart(right_candidate);
- LAddI* add = new(zone()) LAddI(left, right);
- bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
- LInstruction* result = use_lea
- ? DefineAsRegister(add)
- : DefineSameAsFirst(add);
- if (can_overflow) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::ADD, instr);
- } else if (instr->representation().IsExternal()) {
- DCHECK(instr->IsConsistentExternalRepresentation());
- DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
- bool use_lea = LAddI::UseLea(instr);
- LOperand* left = UseRegisterAtStart(instr->left());
- HValue* right_candidate = instr->right();
- LOperand* right = use_lea
- ? UseRegisterOrConstantAtStart(right_candidate)
- : UseOrConstantAtStart(right_candidate);
- LAddI* add = new(zone()) LAddI(left, right);
- LInstruction* result = use_lea
- ? DefineAsRegister(add)
- : DefineSameAsFirst(add);
- return result;
- } else {
- return DoArithmeticT(Token::ADD, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
- LOperand* left = NULL;
- LOperand* right = NULL;
- LOperand* scratch = TempRegister();
-
- if (instr->representation().IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- left = UseRegisterAtStart(instr->BetterLeftOperand());
- right = UseOrConstantAtStart(instr->BetterRightOperand());
- } else {
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->left()->representation().IsDouble());
- DCHECK(instr->right()->representation().IsDouble());
- left = UseRegisterAtStart(instr->left());
- right = UseRegisterAtStart(instr->right());
- }
- LMathMinMax* minmax = new (zone()) LMathMinMax(left, right, scratch);
- return DefineSameAsFirst(minmax);
-}
-
-
-LInstruction* LChunkBuilder::DoPower(HPower* instr) {
- // Unlike ia32, we don't have a MathPowStub and directly call c function.
- DCHECK(instr->representation().IsDouble());
- DCHECK(instr->left()->representation().IsDouble());
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LPower* result = new (zone()) LPower(left, right);
- return MarkAsCall(DefineSameAsFirst(result), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
- DCHECK(instr->left()->representation().IsSmiOrTagged());
- DCHECK(instr->right()->representation().IsSmiOrTagged());
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left = UseFixed(instr->left(), edx);
- LOperand* right = UseFixed(instr->right(), eax);
- LCmpT* result = new(zone()) LCmpT(context, left, right);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
- HCompareNumericAndBranch* instr) {
- Representation r = instr->representation();
- if (r.IsSmiOrInteger32()) {
- DCHECK(instr->left()->representation().Equals(r));
- DCHECK(instr->right()->representation().Equals(r));
- LOperand* left = UseRegisterOrConstantAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- return new(zone()) LCompareNumericAndBranch(left, right);
- } else {
- DCHECK(r.IsDouble());
- DCHECK(instr->left()->representation().IsDouble());
- DCHECK(instr->right()->representation().IsDouble());
- LOperand* left;
- LOperand* right;
- if (CanBeImmediateConstant(instr->left()) &&
- CanBeImmediateConstant(instr->right())) {
- // The code generator requires either both inputs to be constant
- // operands, or neither.
- left = UseConstant(instr->left());
- right = UseConstant(instr->right());
- } else {
- left = UseRegisterAtStart(instr->left());
- right = UseRegisterAtStart(instr->right());
- }
- return new(zone()) LCompareNumericAndBranch(left, right);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
- HCompareObjectEqAndBranch* instr) {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- return new(zone()) LCmpObjectEqAndBranch(left, right);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
- HCompareHoleAndBranch* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return new (zone()) LCmpHoleAndBranch(value);
-}
-
-
-LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* temp = TempRegister();
- return new(zone()) LIsStringAndBranch(UseRegister(instr->value()), temp);
-}
-
-
-LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- return new(zone()) LIsSmiAndBranch(Use(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
- HIsUndetectableAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- return new(zone()) LIsUndetectableAndBranch(
- UseRegisterAtStart(instr->value()), TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoStringCompareAndBranch(
- HStringCompareAndBranch* instr) {
- DCHECK(instr->left()->representation().IsTagged());
- DCHECK(instr->right()->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left = UseFixed(instr->left(), edx);
- LOperand* right = UseFixed(instr->right(), eax);
-
- LStringCompareAndBranch* result = new(zone())
- LStringCompareAndBranch(context, left, right);
-
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
- HHasInstanceTypeAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- return new(zone()) LHasInstanceTypeAndBranch(
- UseRegisterAtStart(instr->value()),
- TempRegister());
-}
-
-LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
- HClassOfTestAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- return new (zone()) LClassOfTestAndBranch(UseRegister(instr->value()),
- TempRegister(), TempRegister());
-}
-
-LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
- LOperand* string = UseRegisterAtStart(instr->string());
- LOperand* index = UseRegisterOrConstantAtStart(instr->index());
- return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index));
-}
-
-
-LOperand* LChunkBuilder::GetSeqStringSetCharOperand(HSeqStringSetChar* instr) {
- if (instr->encoding() == String::ONE_BYTE_ENCODING) {
- if (FLAG_debug_code) {
- return UseFixed(instr->value(), eax);
- } else {
- return UseFixedOrConstant(instr->value(), eax);
- }
- } else {
- if (FLAG_debug_code) {
- return UseRegisterAtStart(instr->value());
- } else {
- return UseRegisterOrConstantAtStart(instr->value());
- }
- }
-}
-
-
-LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
- LOperand* string = UseRegisterAtStart(instr->string());
- LOperand* index = FLAG_debug_code
- ? UseRegisterAtStart(instr->index())
- : UseRegisterOrConstantAtStart(instr->index());
- LOperand* value = GetSeqStringSetCharOperand(instr);
- LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), esi) : NULL;
- LInstruction* result = new(zone()) LSeqStringSetChar(context, string,
- index, value);
- if (FLAG_debug_code) {
- result = MarkAsCall(result, instr);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- if (!FLAG_debug_code && instr->skip_check()) return NULL;
- LOperand* index = UseRegisterOrConstantAtStart(instr->index());
- LOperand* length = !index->IsConstantOperand()
- ? UseOrConstantAtStart(instr->length())
- : UseAtStart(instr->length());
- LInstruction* result = new(zone()) LBoundsCheck(index, length);
- if (!FLAG_debug_code || !instr->skip_check()) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
- // The control instruction marking the end of a block that completed
- // abruptly (e.g., threw an exception). There is nothing specific to do.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
- // All HForceRepresentation instructions should be eliminated in the
- // representation change phase of Hydrogen.
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoChange(HChange* instr) {
- Representation from = instr->from();
- Representation to = instr->to();
- HValue* val = instr->value();
- if (from.IsSmi()) {
- if (to.IsTagged()) {
- LOperand* value = UseRegister(val);
- return DefineSameAsFirst(new(zone()) LDummyUse(value));
- }
- from = Representation::Tagged();
- }
- if (from.IsTagged()) {
- if (to.IsDouble()) {
- LOperand* value = UseRegister(val);
- LOperand* temp = TempRegister();
- LInstruction* result =
- DefineAsRegister(new(zone()) LNumberUntagD(value, temp));
- if (!val->representation().IsSmi()) result = AssignEnvironment(result);
- return result;
- } else if (to.IsSmi()) {
- LOperand* value = UseRegister(val);
- if (val->type().IsSmi()) {
- return DefineSameAsFirst(new(zone()) LDummyUse(value));
- }
- return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
- } else {
- DCHECK(to.IsInteger32());
- if (val->type().IsSmi() || val->representation().IsSmi()) {
- LOperand* value = UseRegister(val);
- return DefineSameAsFirst(new(zone()) LSmiUntag(value, false));
- } else {
- LOperand* value = UseRegister(val);
- LInstruction* result = DefineSameAsFirst(new(zone()) LTaggedToI(value));
- if (!val->representation().IsSmi()) result = AssignEnvironment(result);
- return result;
- }
- }
- } else if (from.IsDouble()) {
- if (to.IsTagged()) {
- info()->MarkAsDeferredCalling();
- LOperand* value = UseRegisterAtStart(val);
- LOperand* temp = FLAG_inline_new ? TempRegister() : NULL;
- LUnallocated* result_temp = TempRegister();
- LNumberTagD* result = new(zone()) LNumberTagD(value, temp);
- return AssignPointerMap(Define(result, result_temp));
- } else if (to.IsSmi()) {
- LOperand* value = UseRegister(val);
- return AssignEnvironment(
- DefineAsRegister(new(zone()) LDoubleToSmi(value)));
- } else {
- DCHECK(to.IsInteger32());
- bool truncating = instr->CanTruncateToInt32();
- LOperand* value = UseRegister(val);
- LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value));
- if (!truncating) result = AssignEnvironment(result);
- return result;
- }
- } else if (from.IsInteger32()) {
- info()->MarkAsDeferredCalling();
- if (to.IsTagged()) {
- if (!instr->CheckFlag(HValue::kCanOverflow)) {
- LOperand* value = UseRegister(val);
- return DefineSameAsFirst(new(zone()) LSmiTag(value));
- } else if (val->CheckFlag(HInstruction::kUint32)) {
- LOperand* value = UseRegister(val);
- LOperand* temp = TempRegister();
- LNumberTagU* result = new(zone()) LNumberTagU(value, temp);
- return AssignPointerMap(DefineSameAsFirst(result));
- } else {
- LOperand* value = UseRegister(val);
- LOperand* temp = TempRegister();
- LNumberTagI* result = new(zone()) LNumberTagI(value, temp);
- return AssignPointerMap(DefineSameAsFirst(result));
- }
- } else if (to.IsSmi()) {
- LOperand* value = UseRegister(val);
- LInstruction* result = DefineSameAsFirst(new(zone()) LSmiTag(value));
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else {
- DCHECK(to.IsDouble());
- if (val->CheckFlag(HInstruction::kUint32)) {
- return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val)));
- } else {
- return DefineAsRegister(new(zone()) LInteger32ToDouble(Use(val)));
- }
- }
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
- LOperand* value = UseAtStart(instr->value());
- LInstruction* result = new(zone()) LCheckNonSmi(value);
- if (!instr->value()->type().IsHeapObject()) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckArrayBufferNotNeutered(
- HCheckArrayBufferNotNeutered* instr) {
- LOperand* view = UseRegisterAtStart(instr->value());
- LOperand* scratch = TempRegister();
- LCheckArrayBufferNotNeutered* result =
- new (zone()) LCheckArrayBufferNotNeutered(view, scratch);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- LCheckInstanceType* result = new(zone()) LCheckInstanceType(value, temp);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
- // If the object is in new space, we'll emit a global cell compare and so
- // want the value in a register. If the object gets promoted before we
- // emit code, we will still get the register but will do an immediate
- // compare instead of the cell compare. This is safe.
- LOperand* value = instr->object_in_new_space()
- ? UseRegisterAtStart(instr->value()) : UseAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckValue(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
- if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps;
- LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value));
- if (instr->HasMigrationTarget()) {
- info()->MarkAsDeferredCalling();
- result = AssignPointerMap(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
- HValue* value = instr->value();
- Representation input_rep = value->representation();
- if (input_rep.IsDouble()) {
- LOperand* reg = UseRegister(value);
- return DefineFixed(new (zone()) LClampDToUint8(reg), eax);
- } else if (input_rep.IsInteger32()) {
- LOperand* reg = UseFixed(value, eax);
- return DefineFixed(new(zone()) LClampIToUint8(reg), eax);
- } else {
- DCHECK(input_rep.IsSmiOrTagged());
- LOperand* value = UseRegister(instr->value());
- LClampTToUint8NoSSE2* res =
- new(zone()) LClampTToUint8NoSSE2(value, TempRegister(),
- TempRegister(), TempRegister());
- return AssignEnvironment(DefineFixed(res, ecx));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
- LOperand* context = info()->IsStub() ? UseFixed(instr->context(), esi) : NULL;
- LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
- return new(zone()) LReturn(
- UseFixed(instr->value(), eax), context, parameter_count);
-}
-
-
-LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
- Representation r = instr->representation();
- if (r.IsSmi()) {
- return DefineAsRegister(new(zone()) LConstantS);
- } else if (r.IsInteger32()) {
- return DefineAsRegister(new(zone()) LConstantI);
- } else if (r.IsDouble()) {
- return DefineAsRegister(new (zone()) LConstantD);
- } else if (r.IsExternal()) {
- return DefineAsRegister(new(zone()) LConstantE);
- } else if (r.IsTagged()) {
- return DefineAsRegister(new(zone()) LConstantT);
- } else {
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- LInstruction* result =
- DefineAsRegister(new(zone()) LLoadContextSlot(context));
- if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
- LOperand* value;
- LOperand* temp;
- LOperand* context = UseRegister(instr->context());
- if (instr->NeedsWriteBarrier()) {
- value = UseTempRegister(instr->value());
- temp = TempRegister();
- } else {
- value = UseRegister(instr->value());
- temp = NULL;
- }
- LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
- if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- LOperand* obj = (instr->access().IsExternalMemory() &&
- instr->access().offset() == 0)
- ? UseRegisterOrConstantAtStart(instr->object())
- : UseRegisterAtStart(instr->object());
- return DefineAsRegister(new(zone()) LLoadNamedField(obj));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
- HLoadFunctionPrototype* instr) {
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()),
- TempRegister())));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
- return DefineAsRegister(new(zone()) LLoadRoot);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
- DCHECK(instr->key()->representation().IsSmiOrInteger32());
- ElementsKind elements_kind = instr->elements_kind();
- bool clobbers_key = ExternalArrayOpRequiresTemp(
- instr->key()->representation(), elements_kind);
- LOperand* key = clobbers_key
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- LInstruction* result = NULL;
-
- if (!instr->is_fixed_typed_array()) {
- LOperand* obj = UseRegisterAtStart(instr->elements());
- result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr));
- } else {
- DCHECK(
- (instr->representation().IsInteger32() &&
- !(IsDoubleOrFloatElementsKind(instr->elements_kind()))) ||
- (instr->representation().IsDouble() &&
- (IsDoubleOrFloatElementsKind(instr->elements_kind()))));
- LOperand* backing_store = UseRegister(instr->elements());
- LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
- result = DefineAsRegister(
- new (zone()) LLoadKeyed(backing_store, key, backing_store_owner));
- }
-
- bool needs_environment;
- if (instr->is_fixed_typed_array()) {
- // see LCodeGen::DoLoadKeyedExternalArray
- needs_environment = elements_kind == UINT32_ELEMENTS &&
- !instr->CheckFlag(HInstruction::kUint32);
- } else {
- // see LCodeGen::DoLoadKeyedFixedDoubleArray and
- // LCodeGen::DoLoadKeyedFixedArray
- needs_environment =
- instr->RequiresHoleCheck() ||
- (instr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED && info()->IsStub());
- }
-
- if (needs_environment) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LOperand* LChunkBuilder::GetStoreKeyedValueOperand(HStoreKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
-
- // Determine if we need a byte register in this case for the value.
- bool val_is_fixed_register =
- elements_kind == UINT8_ELEMENTS ||
- elements_kind == INT8_ELEMENTS ||
- elements_kind == UINT8_CLAMPED_ELEMENTS;
- if (val_is_fixed_register) {
- return UseFixed(instr->value(), eax);
- }
-
- if (IsDoubleOrFloatElementsKind(elements_kind)) {
- return UseRegisterAtStart(instr->value());
- }
-
- return UseRegister(instr->value());
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- if (!instr->is_fixed_typed_array()) {
- DCHECK(instr->elements()->representation().IsTagged());
- DCHECK(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsSmi());
-
- if (instr->value()->representation().IsDouble()) {
- LOperand* object = UseRegisterAtStart(instr->elements());
- // For storing double hole, no fp register required.
- LOperand* val = instr->IsConstantHoleStore()
- ? NULL
- : UseRegisterAtStart(instr->value());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- return new (zone()) LStoreKeyed(object, key, val, nullptr);
- } else {
- DCHECK(instr->value()->representation().IsSmiOrTagged());
- bool needs_write_barrier = instr->NeedsWriteBarrier();
-
- LOperand* obj = UseRegister(instr->elements());
- LOperand* val;
- LOperand* key;
- if (needs_write_barrier) {
- val = UseTempRegister(instr->value());
- key = UseTempRegister(instr->key());
- } else {
- val = UseRegisterOrConstantAtStart(instr->value());
- key = UseRegisterOrConstantAtStart(instr->key());
- }
- return new (zone()) LStoreKeyed(obj, key, val, nullptr);
- }
- }
-
- ElementsKind elements_kind = instr->elements_kind();
- DCHECK(
- (instr->value()->representation().IsInteger32() &&
- !IsDoubleOrFloatElementsKind(elements_kind)) ||
- (instr->value()->representation().IsDouble() &&
- IsDoubleOrFloatElementsKind(elements_kind)));
- DCHECK(instr->elements()->representation().IsExternal());
-
- LOperand* backing_store = UseRegister(instr->elements());
- LOperand* val = GetStoreKeyedValueOperand(instr);
- bool clobbers_key = ExternalArrayOpRequiresTemp(
- instr->key()->representation(), elements_kind);
- LOperand* key = clobbers_key
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
- return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner);
-}
-
-
-LInstruction* LChunkBuilder::DoTransitionElementsKind(
- HTransitionElementsKind* instr) {
- if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
- LOperand* object = UseRegister(instr->object());
- LOperand* new_map_reg = TempRegister();
- LOperand* temp_reg = TempRegister();
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, NULL,
- new_map_reg, temp_reg);
- return result;
- } else {
- LOperand* object = UseFixed(instr->object(), eax);
- LOperand* context = UseFixed(instr->context(), esi);
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, context, NULL, NULL);
- return MarkAsCall(result, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoTrapAllocationMemento(
- HTrapAllocationMemento* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* temp = TempRegister();
- LTrapAllocationMemento* result =
- new(zone()) LTrapAllocationMemento(object, temp);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) {
- info()->MarkAsDeferredCalling();
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object = Use(instr->object());
- LOperand* elements = Use(instr->elements());
- LOperand* key = UseRegisterOrConstant(instr->key());
- LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity());
-
- LMaybeGrowElements* result = new (zone())
- LMaybeGrowElements(context, object, elements, key, current_capacity);
- DefineFixed(result, eax);
- return AssignPointerMap(AssignEnvironment(result));
-}
-
-
-LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
- bool is_in_object = instr->access().IsInobject();
- bool is_external_location = instr->access().IsExternalMemory() &&
- instr->access().offset() == 0;
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- bool needs_write_barrier_for_map = instr->has_transition() &&
- instr->NeedsWriteBarrierForMap();
-
- LOperand* obj;
- if (needs_write_barrier) {
- obj = is_in_object
- ? UseRegister(instr->object())
- : UseTempRegister(instr->object());
- } else if (is_external_location) {
- DCHECK(!is_in_object);
- DCHECK(!needs_write_barrier);
- DCHECK(!needs_write_barrier_for_map);
- obj = UseRegisterOrConstant(instr->object());
- } else {
- obj = needs_write_barrier_for_map
- ? UseRegister(instr->object())
- : UseRegisterAtStart(instr->object());
- }
-
- bool can_be_constant = instr->value()->IsConstant() &&
- HConstant::cast(instr->value())->NotInNewSpace() &&
- !instr->field_representation().IsDouble();
-
- LOperand* val;
- if (instr->field_representation().IsInteger8() ||
- instr->field_representation().IsUInteger8()) {
- // mov_b requires a byte register (i.e. any of eax, ebx, ecx, edx).
- // Just force the value to be in eax and we're safe here.
- val = UseFixed(instr->value(), eax);
- } else if (needs_write_barrier) {
- val = UseTempRegister(instr->value());
- } else if (can_be_constant) {
- val = UseRegisterOrConstant(instr->value());
- } else if (instr->field_representation().IsDouble()) {
- val = UseRegisterAtStart(instr->value());
- } else {
- val = UseRegister(instr->value());
- }
-
- // We only need a scratch register if we have a write barrier or we
- // have a store into the properties array (not in-object-property).
- LOperand* temp = (!is_in_object || needs_write_barrier ||
- needs_write_barrier_for_map) ? TempRegister() : NULL;
-
- // We need a temporary register for write barrier of the map field.
- LOperand* temp_map = needs_write_barrier_for_map ? TempRegister() : NULL;
-
- return new(zone()) LStoreNamedField(obj, val, temp, temp_map);
-}
-
-
-LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left = UseFixed(instr->left(), edx);
- LOperand* right = UseFixed(instr->right(), eax);
- LStringAdd* string_add = new(zone()) LStringAdd(context, left, right);
- return MarkAsCall(DefineFixed(string_add, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
- LOperand* string = UseTempRegister(instr->string());
- LOperand* index = UseTempRegister(instr->index());
- LOperand* context = UseAny(instr->context());
- LStringCharCodeAt* result =
- new(zone()) LStringCharCodeAt(context, string, index);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
- LOperand* char_code = UseRegister(instr->value());
- LOperand* context = UseAny(instr->context());
- LStringCharFromCode* result =
- new(zone()) LStringCharFromCode(context, char_code);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
- LOperand* size = instr->size()->IsConstant() ? UseConstant(instr->size())
- : UseRegister(instr->size());
- if (instr->IsAllocationFolded()) {
- LOperand* temp = TempRegister();
- LFastAllocate* result = new (zone()) LFastAllocate(size, temp);
- return DefineAsRegister(result);
- } else {
- info()->MarkAsDeferredCalling();
- LOperand* context = UseAny(instr->context());
- LOperand* temp = TempRegister();
- LAllocate* result = new (zone()) LAllocate(context, size, temp);
- return AssignPointerMap(DefineAsRegister(result));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
- DCHECK(argument_count_ == 0);
- allocator_->MarkAsOsrEntry();
- current_block_->last_environment()->set_ast_id(instr->ast_id());
- return AssignEnvironment(new(zone()) LOsrEntry);
-}
-
-
-LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- LParameter* result = new(zone()) LParameter;
- if (instr->kind() == HParameter::STACK_PARAMETER) {
- int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(result, spill_index);
- } else {
- DCHECK(info()->IsStub());
- CallInterfaceDescriptor descriptor = graph()->descriptor();
- int index = static_cast<int>(instr->index());
- Register reg = descriptor.GetRegisterParameter(index);
- return DefineFixed(result, reg);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- // Use an index that corresponds to the location in the unoptimized frame,
- // which the optimized frame will subsume.
- int env_index = instr->index();
- int spill_index = 0;
- if (instr->environment()->is_parameter_index(env_index)) {
- spill_index = chunk()->GetParameterStackSlot(env_index);
- } else {
- spill_index = env_index - instr->environment()->first_local_index();
- if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
- Retry(kNotEnoughSpillSlotsForOsr);
- spill_index = 0;
- }
- spill_index += StandardFrameConstants::kFixedSlotCount;
- }
- return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
- // There are no real uses of the arguments object.
- // arguments.length and element access are supported directly on
- // stack arguments, and any real arguments object use causes a bailout.
- // So this value is never used.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
- instr->ReplayEnvironment(current_block_->last_environment());
-
- // There are no real uses of a captured object.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
- info()->MarkAsRequiresFrame();
- LOperand* args = UseRegister(instr->arguments());
- LOperand* length;
- LOperand* index;
- if (instr->length()->IsConstant() && instr->index()->IsConstant()) {
- length = UseRegisterOrConstant(instr->length());
- index = UseOrConstant(instr->index());
- } else {
- length = UseTempRegister(instr->length());
- index = Use(instr->index());
- }
- return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
-}
-
-
-LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* value = UseFixed(instr->value(), ebx);
- LTypeof* result = new(zone()) LTypeof(context, value);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
- return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
- instr->ReplayEnvironment(current_block_->last_environment());
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
- info()->MarkAsDeferredCalling();
- if (instr->is_function_entry()) {
- LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(new(zone()) LStackCheck(context), instr);
- } else {
- DCHECK(instr->is_backwards_branch());
- LOperand* context = UseAny(instr->context());
- return AssignEnvironment(
- AssignPointerMap(new(zone()) LStackCheck(context)));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
- HEnvironment* outer = current_block_->last_environment();
- outer->set_ast_id(instr->ReturnId());
- HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner = outer->CopyForInlining(
- instr->closure(), instr->arguments_count(), instr->function(), undefined,
- instr->inlining_kind(), instr->syntactic_tail_call_mode());
- // Only replay binding of arguments object if it wasn't removed from graph.
- if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
- inner->Bind(instr->arguments_var(), instr->arguments_object());
- }
- inner->BindContext(instr->closure_context());
- inner->set_entry(instr);
- current_block_->UpdateEnvironment(inner);
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
- LInstruction* pop = NULL;
-
- HEnvironment* env = current_block_->last_environment();
-
- if (env->entry()->arguments_pushed()) {
- int argument_count = env->arguments_environment()->parameter_count();
- pop = new(zone()) LDrop(argument_count);
- DCHECK(instr->argument_delta() == -argument_count);
- }
-
- HEnvironment* outer = current_block_->last_environment()->
- DiscardInlined(false);
- current_block_->UpdateEnvironment(outer);
- return pop;
-}
-
-
-LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object = UseFixed(instr->enumerable(), eax);
- LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
- return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
- LOperand* map = UseRegister(instr->map());
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LForInCacheArray(map)));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* map = UseRegisterAtStart(instr->map());
- return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* index = UseTempRegister(instr->index());
- LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
- LInstruction* result = DefineSameAsFirst(load);
- return AssignPointerMap(result);
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/crankshaft/x87/lithium-x87.h b/deps/v8/src/crankshaft/x87/lithium-x87.h
deleted file mode 100644
index 220f0db3bb..0000000000
--- a/deps/v8/src/crankshaft/x87/lithium-x87.h
+++ /dev/null
@@ -1,2508 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_X87_LITHIUM_X87_H_
-#define V8_CRANKSHAFT_X87_LITHIUM_X87_H_
-
-#include "src/crankshaft/hydrogen.h"
-#include "src/crankshaft/lithium.h"
-#include "src/crankshaft/lithium-allocator.h"
-#include "src/safepoint-table.h"
-#include "src/utils.h"
-
-namespace v8 {
-namespace internal {
-
-namespace compiler {
-class RCodeVisualizer;
-}
-
-// Forward declarations.
-class LCodeGen;
-
-#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
- V(AccessArgumentsAt) \
- V(AddI) \
- V(Allocate) \
- V(ApplyArguments) \
- V(ArgumentsElements) \
- V(ArgumentsLength) \
- V(ArithmeticD) \
- V(ArithmeticT) \
- V(BitI) \
- V(BoundsCheck) \
- V(Branch) \
- V(CallWithDescriptor) \
- V(CallNewArray) \
- V(CallRuntime) \
- V(CheckArrayBufferNotNeutered) \
- V(CheckInstanceType) \
- V(CheckMaps) \
- V(CheckMapValue) \
- V(CheckNonSmi) \
- V(CheckSmi) \
- V(CheckValue) \
- V(ClampDToUint8) \
- V(ClampIToUint8) \
- V(ClampTToUint8NoSSE2) \
- V(ClassOfTestAndBranch) \
- V(ClobberDoubles) \
- V(CompareNumericAndBranch) \
- V(CmpObjectEqAndBranch) \
- V(CmpHoleAndBranch) \
- V(CmpMapAndBranch) \
- V(CmpT) \
- V(ConstantD) \
- V(ConstantE) \
- V(ConstantI) \
- V(ConstantS) \
- V(ConstantT) \
- V(Context) \
- V(DebugBreak) \
- V(DeclareGlobals) \
- V(Deoptimize) \
- V(DivByConstI) \
- V(DivByPowerOf2I) \
- V(DivI) \
- V(DoubleToI) \
- V(DoubleToSmi) \
- V(Drop) \
- V(Dummy) \
- V(DummyUse) \
- V(FastAllocate) \
- V(FlooringDivByConstI) \
- V(FlooringDivByPowerOf2I) \
- V(FlooringDivI) \
- V(ForInCacheArray) \
- V(ForInPrepareMap) \
- V(Goto) \
- V(HasInPrototypeChainAndBranch) \
- V(HasInstanceTypeAndBranch) \
- V(InnerAllocatedObject) \
- V(InstructionGap) \
- V(Integer32ToDouble) \
- V(InvokeFunction) \
- V(IsStringAndBranch) \
- V(IsSmiAndBranch) \
- V(IsUndetectableAndBranch) \
- V(Label) \
- V(LazyBailout) \
- V(LoadContextSlot) \
- V(LoadFieldByIndex) \
- V(LoadFunctionPrototype) \
- V(LoadKeyed) \
- V(LoadNamedField) \
- V(LoadRoot) \
- V(MathAbs) \
- V(MathClz32) \
- V(MathCos) \
- V(MathExp) \
- V(MathFloor) \
- V(MathFround) \
- V(MathLog) \
- V(MathMinMax) \
- V(MathPowHalf) \
- V(MathRound) \
- V(MathSqrt) \
- V(MaybeGrowElements) \
- V(MathSin) \
- V(ModByConstI) \
- V(ModByPowerOf2I) \
- V(ModI) \
- V(MulI) \
- V(NumberTagD) \
- V(NumberTagI) \
- V(NumberTagU) \
- V(NumberUntagD) \
- V(OsrEntry) \
- V(Parameter) \
- V(Power) \
- V(Prologue) \
- V(PushArgument) \
- V(Return) \
- V(SeqStringGetChar) \
- V(SeqStringSetChar) \
- V(ShiftI) \
- V(SmiTag) \
- V(SmiUntag) \
- V(StackCheck) \
- V(StoreCodeEntry) \
- V(StoreContextSlot) \
- V(StoreKeyed) \
- V(StoreNamedField) \
- V(StringAdd) \
- V(StringCharCodeAt) \
- V(StringCharFromCode) \
- V(StringCompareAndBranch) \
- V(SubI) \
- V(TaggedToI) \
- V(ThisFunction) \
- V(TransitionElementsKind) \
- V(TrapAllocationMemento) \
- V(Typeof) \
- V(TypeofIsAndBranch) \
- V(Uint32ToDouble) \
- V(UnknownOSRValue) \
- V(WrapReceiver)
-
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- Opcode opcode() const final { return LInstruction::k##type; } \
- void CompileToNative(LCodeGen* generator) final; \
- const char* Mnemonic() const final { return mnemonic; } \
- static L##type* cast(LInstruction* instr) { \
- DCHECK(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
- }
-
-
-#define DECLARE_HYDROGEN_ACCESSOR(type) \
- H##type* hydrogen() const { \
- return H##type::cast(hydrogen_value()); \
- }
-
-
-class LInstruction : public ZoneObject {
- public:
- LInstruction()
- : environment_(NULL),
- hydrogen_value_(NULL),
- bit_field_(IsCallBits::encode(false)) {
- }
-
- virtual ~LInstruction() {}
-
- virtual void CompileToNative(LCodeGen* generator) = 0;
- virtual const char* Mnemonic() const = 0;
- virtual void PrintTo(StringStream* stream);
- virtual void PrintDataTo(StringStream* stream);
- virtual void PrintOutputOperandTo(StringStream* stream);
-
- enum Opcode {
- // Declare a unique enum value for each instruction.
-#define DECLARE_OPCODE(type) k##type,
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE) kAdapter,
- kNumberOfInstructions
-#undef DECLARE_OPCODE
- };
-
- virtual Opcode opcode() const = 0;
-
- // Declare non-virtual type testers for all leaf IR classes.
-#define DECLARE_PREDICATE(type) \
- bool Is##type() const { return opcode() == k##type; }
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
-#undef DECLARE_PREDICATE
-
- // Declare virtual predicates for instructions that don't have
- // an opcode.
- virtual bool IsGap() const { return false; }
-
- virtual bool IsControl() const { return false; }
-
- // Try deleting this instruction if possible.
- virtual bool TryDelete() { return false; }
-
- void set_environment(LEnvironment* env) { environment_ = env; }
- LEnvironment* environment() const { return environment_; }
- bool HasEnvironment() const { return environment_ != NULL; }
-
- void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
- LPointerMap* pointer_map() const { return pointer_map_.get(); }
- bool HasPointerMap() const { return pointer_map_.is_set(); }
-
- void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
- HValue* hydrogen_value() const { return hydrogen_value_; }
-
- void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
- bool IsCall() const { return IsCallBits::decode(bit_field_); }
-
- void MarkAsSyntacticTailCall() {
- bit_field_ = IsSyntacticTailCallBits::update(bit_field_, true);
- }
- bool IsSyntacticTailCall() const {
- return IsSyntacticTailCallBits::decode(bit_field_);
- }
-
- // Interface to the register allocator and iterators.
- bool ClobbersTemps() const { return IsCall(); }
- bool ClobbersRegisters() const { return IsCall(); }
- virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
- return IsCall() ||
- // We only have rudimentary X87Stack tracking, thus in general
- // cannot handle phi-nodes.
- (IsControl());
- }
-
- virtual bool HasResult() const = 0;
- virtual LOperand* result() const = 0;
-
- bool HasDoubleRegisterResult();
- bool HasDoubleRegisterInput();
- bool IsDoubleInput(X87Register reg, LCodeGen* cgen);
-
- LOperand* FirstInput() { return InputAt(0); }
- LOperand* Output() { return HasResult() ? result() : NULL; }
-
- virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
-
-#ifdef DEBUG
- void VerifyCall();
-#endif
-
- virtual int InputCount() = 0;
- virtual LOperand* InputAt(int i) = 0;
-
- private:
- // Iterator support.
- friend class InputIterator;
-
- friend class TempIterator;
- virtual int TempCount() = 0;
- virtual LOperand* TempAt(int i) = 0;
-
- class IsCallBits: public BitField<bool, 0, 1> {};
- class IsSyntacticTailCallBits : public BitField<bool, IsCallBits::kNext, 1> {
- };
-
- LEnvironment* environment_;
- SetOncePointer<LPointerMap> pointer_map_;
- HValue* hydrogen_value_;
- int bit_field_;
-};
-
-
-// R = number of result operands (0 or 1).
-template<int R>
-class LTemplateResultInstruction : public LInstruction {
- public:
- // Allow 0 or 1 output operands.
- STATIC_ASSERT(R == 0 || R == 1);
- bool HasResult() const final { return R != 0 && result() != NULL; }
- void set_result(LOperand* operand) { results_[0] = operand; }
- LOperand* result() const override { return results_[0]; }
-
- protected:
- EmbeddedContainer<LOperand*, R> results_;
-};
-
-
-// R = number of result operands (0 or 1).
-// I = number of input operands.
-// T = number of temporary operands.
-template<int R, int I, int T>
-class LTemplateInstruction : public LTemplateResultInstruction<R> {
- protected:
- EmbeddedContainer<LOperand*, I> inputs_;
- EmbeddedContainer<LOperand*, T> temps_;
-
- private:
- // Iterator support.
- int InputCount() final { return I; }
- LOperand* InputAt(int i) final { return inputs_[i]; }
-
- int TempCount() final { return T; }
- LOperand* TempAt(int i) final { return temps_[i]; }
-};
-
-
-class LGap : public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGap(HBasicBlock* block) : block_(block) {
- parallel_moves_[BEFORE] = NULL;
- parallel_moves_[START] = NULL;
- parallel_moves_[END] = NULL;
- parallel_moves_[AFTER] = NULL;
- }
-
- // Can't use the DECLARE-macro here because of sub-classes.
- bool IsGap() const final { return true; }
- void PrintDataTo(StringStream* stream) override;
- static LGap* cast(LInstruction* instr) {
- DCHECK(instr->IsGap());
- return reinterpret_cast<LGap*>(instr);
- }
-
- bool IsRedundant() const;
-
- HBasicBlock* block() const { return block_; }
-
- enum InnerPosition {
- BEFORE,
- START,
- END,
- AFTER,
- FIRST_INNER_POSITION = BEFORE,
- LAST_INNER_POSITION = AFTER
- };
-
- LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
- if (parallel_moves_[pos] == NULL) {
- parallel_moves_[pos] = new(zone) LParallelMove(zone);
- }
- return parallel_moves_[pos];
- }
-
- LParallelMove* GetParallelMove(InnerPosition pos) {
- return parallel_moves_[pos];
- }
-
- private:
- LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
- HBasicBlock* block_;
-};
-
-
-class LInstructionGap final : public LGap {
- public:
- explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
-
- bool HasInterestingComment(LCodeGen* gen) const override {
- return !IsRedundant();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
-};
-
-
-class LClobberDoubles final : public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LClobberDoubles(Isolate* isolate) { }
-
- bool ClobbersDoubleRegisters(Isolate* isolate) const override { return true; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClobberDoubles, "clobber-d")
-};
-
-
-class LGoto final : public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGoto(HBasicBlock* block) : block_(block) { }
-
- bool HasInterestingComment(LCodeGen* gen) const override;
- DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- void PrintDataTo(StringStream* stream) override;
- bool IsControl() const override { return true; }
-
- int block_id() const { return block_->block_id(); }
- bool ClobbersDoubleRegisters(Isolate* isolate) const override {
- return false;
- }
-
- bool jumps_to_join() const { return block_->predecessors()->length() > 1; }
- HBasicBlock* block() const { return block_; }
-
- private:
- HBasicBlock* block_;
-};
-
-
-class LPrologue final : public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue")
-};
-
-
-class LLazyBailout final : public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
-};
-
-
-class LDummy final : public LTemplateInstruction<1, 0, 0> {
- public:
- LDummy() {}
- DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
-};
-
-
-class LDummyUse final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDummyUse(LOperand* value) {
- inputs_[0] = value;
- }
- DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
-};
-
-
-class LDeoptimize final : public LTemplateInstruction<0, 0, 0> {
- public:
- bool IsControl() const override { return true; }
- DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
- DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
-};
-
-
-class LLabel final : public LGap {
- public:
- explicit LLabel(HBasicBlock* block)
- : LGap(block), replacement_(NULL) { }
-
- bool HasInterestingComment(LCodeGen* gen) const override { return false; }
- DECLARE_CONCRETE_INSTRUCTION(Label, "label")
-
- void PrintDataTo(StringStream* stream) override;
-
- int block_id() const { return block()->block_id(); }
- bool is_loop_header() const { return block()->IsLoopHeader(); }
- bool is_osr_entry() const { return block()->is_osr_entry(); }
- Label* label() { return &label_; }
- LLabel* replacement() const { return replacement_; }
- void set_replacement(LLabel* label) { replacement_ = label; }
- bool HasReplacement() const { return replacement_ != NULL; }
-
- private:
- Label label_;
- LLabel* replacement_;
-};
-
-
-class LParameter final : public LTemplateInstruction<1, 0, 0> {
- public:
- bool HasInterestingComment(LCodeGen* gen) const override { return false; }
- DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
-};
-
-
-class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
- public:
- bool HasInterestingComment(LCodeGen* gen) const override { return false; }
- DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
-};
-
-
-template<int I, int T>
-class LControlInstruction: public LTemplateInstruction<0, I, T> {
- public:
- LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
-
- bool IsControl() const final { return true; }
-
- int SuccessorCount() { return hydrogen()->SuccessorCount(); }
- HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
-
- int TrueDestination(LChunk* chunk) {
- return chunk->LookupDestination(true_block_id());
- }
- int FalseDestination(LChunk* chunk) {
- return chunk->LookupDestination(false_block_id());
- }
-
- Label* TrueLabel(LChunk* chunk) {
- if (true_label_ == NULL) {
- true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
- }
- return true_label_;
- }
- Label* FalseLabel(LChunk* chunk) {
- if (false_label_ == NULL) {
- false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
- }
- return false_label_;
- }
-
- protected:
- int true_block_id() { return SuccessorAt(0)->block_id(); }
- int false_block_id() { return SuccessorAt(1)->block_id(); }
-
- private:
- HControlInstruction* hydrogen() {
- return HControlInstruction::cast(this->hydrogen_value());
- }
-
- Label* false_label_;
- Label* true_label_;
-};
-
-
-class LWrapReceiver final : public LTemplateInstruction<1, 2, 1> {
- public:
- LWrapReceiver(LOperand* receiver,
- LOperand* function,
- LOperand* temp) {
- inputs_[0] = receiver;
- inputs_[1] = function;
- temps_[0] = temp;
- }
-
- LOperand* receiver() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
- DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
-};
-
-
-class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
- public:
- LApplyArguments(LOperand* function,
- LOperand* receiver,
- LOperand* length,
- LOperand* elements) {
- inputs_[0] = function;
- inputs_[1] = receiver;
- inputs_[2] = length;
- inputs_[3] = elements;
- }
-
- LOperand* function() { return inputs_[0]; }
- LOperand* receiver() { return inputs_[1]; }
- LOperand* length() { return inputs_[2]; }
- LOperand* elements() { return inputs_[3]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
- DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
-};
-
-
-class LAccessArgumentsAt final : public LTemplateInstruction<1, 3, 0> {
- public:
- LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
- inputs_[0] = arguments;
- inputs_[1] = length;
- inputs_[2] = index;
- }
-
- LOperand* arguments() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LArgumentsLength final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LArgumentsLength(LOperand* elements) {
- inputs_[0] = elements;
- }
-
- LOperand* elements() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
-};
-
-
-class LArgumentsElements final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
- DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
-};
-
-
-class LDebugBreak final : public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
-};
-
-
-class LModByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
- public:
- LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-
- private:
- int32_t divisor_;
-};
-
-
-class LModByConstI final : public LTemplateInstruction<1, 1, 2> {
- public:
- LModByConstI(LOperand* dividend,
- int32_t divisor,
- LOperand* temp1,
- LOperand* temp2) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-
- private:
- int32_t divisor_;
-};
-
-
-class LModI final : public LTemplateInstruction<1, 2, 1> {
- public:
- LModI(LOperand* left, LOperand* right, LOperand* temp) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-};
-
-
-class LDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
- public:
- LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
-
- private:
- int32_t divisor_;
-};
-
-
-class LDivByConstI final : public LTemplateInstruction<1, 1, 2> {
- public:
- LDivByConstI(LOperand* dividend,
- int32_t divisor,
- LOperand* temp1,
- LOperand* temp2) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
-
- private:
- int32_t divisor_;
-};
-
-
-class LDivI final : public LTemplateInstruction<1, 2, 1> {
- public:
- LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
- inputs_[0] = dividend;
- inputs_[1] = divisor;
- temps_[0] = temp;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- LOperand* divisor() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
- DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
-};
-
-
-class LFlooringDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
- public:
- LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
-
- DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
- "flooring-div-by-power-of-2-i")
- DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-
- private:
- int32_t divisor_;
-};
-
-
-class LFlooringDivByConstI final : public LTemplateInstruction<1, 1, 3> {
- public:
- LFlooringDivByConstI(LOperand* dividend,
- int32_t divisor,
- LOperand* temp1,
- LOperand* temp2,
- LOperand* temp3) {
- inputs_[0] = dividend;
- divisor_ = divisor;
- temps_[0] = temp1;
- temps_[1] = temp2;
- temps_[2] = temp3;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- int32_t divisor() const { return divisor_; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
- LOperand* temp3() { return temps_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
- DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-
- private:
- int32_t divisor_;
-};
-
-
-class LFlooringDivI final : public LTemplateInstruction<1, 2, 1> {
- public:
- LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
- inputs_[0] = dividend;
- inputs_[1] = divisor;
- temps_[0] = temp;
- }
-
- LOperand* dividend() { return inputs_[0]; }
- LOperand* divisor() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
- DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-};
-
-
-class LMulI final : public LTemplateInstruction<1, 2, 1> {
- public:
- LMulI(LOperand* left, LOperand* right, LOperand* temp) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
- DECLARE_HYDROGEN_ACCESSOR(Mul)
-};
-
-
-class LCompareNumericAndBranch final : public LControlInstruction<2, 0> {
- public:
- LCompareNumericAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
- "compare-numeric-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
-
- Token::Value op() const { return hydrogen()->token(); }
- bool is_double() const {
- return hydrogen()->representation().IsDouble();
- }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LMathFloor final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathFloor(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-
-class LMathRound final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathRound(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-
-class LMathFround final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathFround(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround")
-};
-
-
-class LMathAbs final : public LTemplateInstruction<1, 2, 0> {
- public:
- LMathAbs(LOperand* context, LOperand* value) {
- inputs_[1] = context;
- inputs_[0] = value;
- }
-
- LOperand* context() { return inputs_[1]; }
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-};
-
-
-class LMathLog final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathLog(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
-};
-
-
-class LMathClz32 final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathClz32(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
-};
-
-class LMathCos final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathCos(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
-};
-
-class LMathSin final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathSin(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
-};
-
-class LMathExp final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathExp(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
-};
-
-
-class LMathSqrt final : public LTemplateInstruction<1, 1, 2> {
- public:
- explicit LMathSqrt(LOperand* value,
- LOperand* temp1,
- LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
-};
-
-
-class LMathPowHalf final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMathPowHalf(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
-};
-
-
-class LCmpObjectEqAndBranch final : public LControlInstruction<2, 0> {
- public:
- LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
-};
-
-
-class LCmpHoleAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LCmpHoleAndBranch(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
-};
-
-
-class LIsStringAndBranch final : public LControlInstruction<1, 1> {
- public:
- LIsStringAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LIsSmiAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LIsSmiAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LIsUndetectableAndBranch final : public LControlInstruction<1, 1> {
- public:
- LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
- "is-undetectable-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LStringCompareAndBranch final : public LControlInstruction<3, 0> {
- public:
- LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
- "string-compare-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 1> {
- public:
- LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
- "has-instance-type-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-class LClassOfTestAndBranch final : public LControlInstruction<1, 2> {
- public:
- LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch, "class-of-test-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-class LCmpT final : public LTemplateInstruction<1, 3, 0> {
- public:
- LCmpT(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
- DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
-
- LOperand* context() { return inputs_[0]; }
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 1> {
- public:
- LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype,
- LOperand* scratch) {
- inputs_[0] = object;
- inputs_[1] = prototype;
- temps_[0] = scratch;
- }
-
- LOperand* object() const { return inputs_[0]; }
- LOperand* prototype() const { return inputs_[1]; }
- LOperand* scratch() const { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch,
- "has-in-prototype-chain-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch)
-};
-
-
-class LBoundsCheck final : public LTemplateInstruction<0, 2, 0> {
- public:
- LBoundsCheck(LOperand* index, LOperand* length) {
- inputs_[0] = index;
- inputs_[1] = length;
- }
-
- LOperand* index() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
- DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
-};
-
-
-class LBitI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LBitI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
- DECLARE_HYDROGEN_ACCESSOR(Bitwise)
-
- Token::Value op() const { return hydrogen()->op(); }
-};
-
-
-class LShiftI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
- : op_(op), can_deopt_(can_deopt) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
-
- Token::Value op() const { return op_; }
- bool can_deopt() const { return can_deopt_; }
-
- private:
- Token::Value op_;
- bool can_deopt_;
-};
-
-
-class LSubI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LSubI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
- DECLARE_HYDROGEN_ACCESSOR(Sub)
-};
-
-
-class LConstantI final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- int32_t value() const { return hydrogen()->Integer32Value(); }
-};
-
-
-class LConstantS final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
-};
-
-
-class LConstantD final : public LTemplateInstruction<1, 0, 1> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- uint64_t bits() const { return hydrogen()->DoubleValueAsBits(); }
-};
-
-
-class LConstantE final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- ExternalReference value() const {
- return hydrogen()->ExternalReferenceValue();
- }
-};
-
-
-class LConstantT final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- Handle<Object> value(Isolate* isolate) const {
- return hydrogen()->handle(isolate);
- }
-};
-
-
-class LBranch final : public LControlInstruction<1, 1> {
- public:
- LBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
- DECLARE_HYDROGEN_ACCESSOR(Branch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LCmpMapAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LCmpMapAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMap)
-
- Handle<Map> map() const { return hydrogen()->map().handle(); }
-};
-
-
-class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
- public:
- LSeqStringGetChar(LOperand* string, LOperand* index) {
- inputs_[0] = string;
- inputs_[1] = index;
- }
-
- LOperand* string() const { return inputs_[0]; }
- LOperand* index() const { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
-};
-
-
-class LSeqStringSetChar final : public LTemplateInstruction<1, 4, 0> {
- public:
- LSeqStringSetChar(LOperand* context,
- LOperand* string,
- LOperand* index,
- LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = string;
- inputs_[2] = index;
- inputs_[3] = value;
- }
-
- LOperand* string() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
- LOperand* value() { return inputs_[3]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
-};
-
-
-class LAddI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LAddI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- static bool UseLea(HAdd* add) {
- return !add->CheckFlag(HValue::kCanOverflow) &&
- add->BetterLeftOperand()->UseCount() > 1;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
- DECLARE_HYDROGEN_ACCESSOR(Add)
-};
-
-
-class LMathMinMax final : public LTemplateInstruction<1, 2, 1> {
- public:
- LMathMinMax(LOperand* left, LOperand* right, LOperand* temp) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
- DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
-};
-
-
-class LPower final : public LTemplateInstruction<1, 2, 0> {
- public:
- LPower(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Power, "power")
- DECLARE_HYDROGEN_ACCESSOR(Power)
-};
-
-
-class LArithmeticD final : public LTemplateInstruction<1, 2, 0> {
- public:
- LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
- : op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- Token::Value op() const { return op_; }
-
- Opcode opcode() const override { return LInstruction::kArithmeticD; }
- void CompileToNative(LCodeGen* generator) override;
- const char* Mnemonic() const override;
-
- private:
- Token::Value op_;
-};
-
-
-class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
- public:
- LArithmeticT(Token::Value op,
- LOperand* context,
- LOperand* left,
- LOperand* right)
- : op_(op) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
- Token::Value op() const { return op_; }
-
- Opcode opcode() const override { return LInstruction::kArithmeticT; }
- void CompileToNative(LCodeGen* generator) override;
- const char* Mnemonic() const override;
-
- DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
-
- private:
- Token::Value op_;
-};
-
-
-class LReturn final : public LTemplateInstruction<0, 3, 0> {
- public:
- explicit LReturn(LOperand* value,
- LOperand* context,
- LOperand* parameter_count) {
- inputs_[0] = value;
- inputs_[1] = context;
- inputs_[2] = parameter_count;
- }
-
- bool has_constant_parameter_count() {
- return parameter_count()->IsConstantOperand();
- }
- LConstantOperand* constant_parameter_count() {
- DCHECK(has_constant_parameter_count());
- return LConstantOperand::cast(parameter_count());
- }
- LOperand* parameter_count() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Return, "return")
- DECLARE_HYDROGEN_ACCESSOR(Return)
-};
-
-
-class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedField(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
-};
-
-
-class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 1> {
- public:
- LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
- inputs_[0] = function;
- temps_[0] = temp;
- }
-
- LOperand* function() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
- DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
-};
-
-
-class LLoadRoot final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
- DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
-
- Heap::RootListIndex index() const { return hydrogen()->index(); }
-};
-
-
-class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
- public:
- LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
- inputs_[0] = elements;
- inputs_[1] = key;
- inputs_[2] = backing_store_owner;
- }
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* backing_store_owner() { return inputs_[2]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
- bool is_fixed_typed_array() const {
- return hydrogen()->is_fixed_typed_array();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
-
- void PrintDataTo(StringStream* stream) override;
- uint32_t base_offset() const { return hydrogen()->base_offset(); }
- bool key_is_smi() {
- return hydrogen()->key()->representation().IsTagged();
- }
-};
-
-
-inline static bool ExternalArrayOpRequiresTemp(
- Representation key_representation,
- ElementsKind elements_kind) {
- // Operations that require the key to be divided by two to be converted into
- // an index cannot fold the scale operation into a load and need an extra
- // temp register to do the work.
- return key_representation.IsSmi() &&
- (elements_kind == UINT8_ELEMENTS || elements_kind == INT8_ELEMENTS ||
- elements_kind == UINT8_CLAMPED_ELEMENTS);
-}
-
-
-class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadContextSlot(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
-
- int slot_index() { return hydrogen()->slot_index(); }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LStoreContextSlot final : public LTemplateInstruction<0, 2, 1> {
- public:
- LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
- inputs_[0] = context;
- inputs_[1] = value;
- temps_[0] = temp;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
-
- int slot_index() { return hydrogen()->slot_index(); }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LPushArgument final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LPushArgument(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
-};
-
-
-class LDrop final : public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LDrop(int count) : count_(count) { }
-
- int count() const { return count_; }
-
- DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
-
- private:
- int count_;
-};
-
-
-class LStoreCodeEntry final : public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreCodeEntry(LOperand* function, LOperand* code_object) {
- inputs_[0] = function;
- inputs_[1] = code_object;
- }
-
- LOperand* function() { return inputs_[0]; }
- LOperand* code_object() { return inputs_[1]; }
-
- void PrintDataTo(StringStream* stream) override;
-
- DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
- DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
-};
-
-
-class LInnerAllocatedObject final : public LTemplateInstruction<1, 2, 0> {
- public:
- LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
- inputs_[0] = base_object;
- inputs_[1] = offset;
- }
-
- LOperand* base_object() const { return inputs_[0]; }
- LOperand* offset() const { return inputs_[1]; }
-
- void PrintDataTo(StringStream* stream) override;
-
- DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
-};
-
-
-class LThisFunction final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
- DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
-};
-
-
-class LContext final : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Context, "context")
- DECLARE_HYDROGEN_ACCESSOR(Context)
-};
-
-
-class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LDeclareGlobals(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
- DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
-};
-
-
-class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
- public:
- LCallWithDescriptor(CallInterfaceDescriptor descriptor,
- const ZoneList<LOperand*>& operands, Zone* zone)
- : inputs_(descriptor.GetRegisterParameterCount() +
- kImplicitRegisterParameterCount,
- zone) {
- DCHECK(descriptor.GetRegisterParameterCount() +
- kImplicitRegisterParameterCount ==
- operands.length());
- inputs_.AddAll(operands, zone);
- }
-
- LOperand* target() const { return inputs_[0]; }
-
- DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
-
- // The target and context are passed as implicit parameters that are not
- // explicitly listed in the descriptor.
- static const int kImplicitRegisterParameterCount = 2;
-
- private:
- DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-
- ZoneList<LOperand*> inputs_;
-
- // Iterator support.
- int InputCount() final { return inputs_.length(); }
- LOperand* InputAt(int i) final { return inputs_[i]; }
-
- int TempCount() final { return 0; }
- LOperand* TempAt(int i) final { return NULL; }
-};
-
-
-class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
- public:
- LInvokeFunction(LOperand* context, LOperand* function) {
- inputs_[0] = context;
- inputs_[1] = function;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
- DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallNewArray(LOperand* context, LOperand* constructor) {
- inputs_[0] = context;
- inputs_[1] = constructor;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* constructor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
- DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallRuntime final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallRuntime(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
- DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
-
- bool ClobbersDoubleRegisters(Isolate* isolate) const override {
- return save_doubles() == kDontSaveFPRegs;
- }
-
- const Runtime::Function* function() const { return hydrogen()->function(); }
- int arity() const { return hydrogen()->argument_count(); }
- SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
-};
-
-
-class LInteger32ToDouble final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInteger32ToDouble(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
-};
-
-
-class LUint32ToDouble final : public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LUint32ToDouble(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
-};
-
-
-class LNumberTagI final : public LTemplateInstruction<1, 1, 1> {
- public:
- LNumberTagI(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
-};
-
-
-class LNumberTagU final : public LTemplateInstruction<1, 1, 1> {
- public:
- LNumberTagU(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
-};
-
-
-class LNumberTagD final : public LTemplateInstruction<1, 1, 1> {
- public:
- LNumberTagD(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDoubleToI(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-class LDoubleToSmi final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDoubleToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-};
-
-
-// Truncating conversion from a tagged value to an int32.
-class LTaggedToI final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LTaggedToI(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-class LSmiTag final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LSmiTag(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LNumberUntagD final : public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LNumberUntagD(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
- DECLARE_HYDROGEN_ACCESSOR(Change);
-
- bool truncating() { return hydrogen()->CanTruncateToNumber(); }
-};
-
-
-class LSmiUntag final : public LTemplateInstruction<1, 1, 0> {
- public:
- LSmiUntag(LOperand* value, bool needs_check)
- : needs_check_(needs_check) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
-
- bool needs_check() const { return needs_check_; }
-
- private:
- bool needs_check_;
-};
-
-
-class LStoreNamedField final : public LTemplateInstruction<0, 2, 2> {
- public:
- LStoreNamedField(LOperand* obj,
- LOperand* val,
- LOperand* temp,
- LOperand* temp_map) {
- inputs_[0] = obj;
- inputs_[1] = val;
- temps_[0] = temp;
- temps_[1] = temp_map;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp_map() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
- public:
- LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val,
- LOperand* backing_store_owner) {
- inputs_[0] = obj;
- inputs_[1] = key;
- inputs_[2] = val;
- inputs_[3] = backing_store_owner;
- }
-
- bool is_fixed_typed_array() const {
- return hydrogen()->is_fixed_typed_array();
- }
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- LOperand* backing_store_owner() { return inputs_[3]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
-
- void PrintDataTo(StringStream* stream) override;
- uint32_t base_offset() const { return hydrogen()->base_offset(); }
- bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
-};
-
-
-class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 2> {
- public:
- LTransitionElementsKind(LOperand* object,
- LOperand* context,
- LOperand* new_map_temp,
- LOperand* temp) {
- inputs_[0] = object;
- inputs_[1] = context;
- temps_[0] = new_map_temp;
- temps_[1] = temp;
- }
-
- LOperand* context() { return inputs_[1]; }
- LOperand* object() { return inputs_[0]; }
- LOperand* new_map_temp() { return temps_[0]; }
- LOperand* temp() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
- "transition-elements-kind")
- DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
-
- void PrintDataTo(StringStream* stream) override;
-
- Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
- Handle<Map> transitioned_map() {
- return hydrogen()->transitioned_map().handle();
- }
- ElementsKind from_kind() { return hydrogen()->from_kind(); }
- ElementsKind to_kind() { return hydrogen()->to_kind(); }
-};
-
-
-class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 1> {
- public:
- LTrapAllocationMemento(LOperand* object,
- LOperand* temp) {
- inputs_[0] = object;
- temps_[0] = temp;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
- "trap-allocation-memento")
-};
-
-
-class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
- public:
- LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
- LOperand* key, LOperand* current_capacity) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = elements;
- inputs_[3] = key;
- inputs_[4] = current_capacity;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* elements() { return inputs_[2]; }
- LOperand* key() { return inputs_[3]; }
- LOperand* current_capacity() { return inputs_[4]; }
-
- bool ClobbersDoubleRegisters(Isolate* isolate) const override { return true; }
-
- DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
- DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
-};
-
-
-class LStringAdd final : public LTemplateInstruction<1, 3, 0> {
- public:
- LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
- DECLARE_HYDROGEN_ACCESSOR(StringAdd)
-};
-
-
-class LStringCharCodeAt final : public LTemplateInstruction<1, 3, 0> {
- public:
- LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
- inputs_[0] = context;
- inputs_[1] = string;
- inputs_[2] = index;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* string() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
- DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
-};
-
-
-class LStringCharFromCode final : public LTemplateInstruction<1, 2, 0> {
- public:
- LStringCharFromCode(LOperand* context, LOperand* char_code) {
- inputs_[0] = context;
- inputs_[1] = char_code;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* char_code() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
- DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
-};
-
-
-class LCheckValue final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckValue(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
- DECLARE_HYDROGEN_ACCESSOR(CheckValue)
-};
-
-
-class LCheckArrayBufferNotNeutered final
- : public LTemplateInstruction<0, 1, 1> {
- public:
- explicit LCheckArrayBufferNotNeutered(LOperand* view, LOperand* scratch) {
- inputs_[0] = view;
- temps_[0] = scratch;
- }
-
- LOperand* view() { return inputs_[0]; }
- LOperand* scratch() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckArrayBufferNotNeutered,
- "check-array-buffer-not-neutered")
- DECLARE_HYDROGEN_ACCESSOR(CheckArrayBufferNotNeutered)
-};
-
-
-class LCheckInstanceType final : public LTemplateInstruction<0, 1, 1> {
- public:
- LCheckInstanceType(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
- DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
-};
-
-
-class LCheckMaps final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckMaps(LOperand* value = NULL) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
-};
-
-
-class LCheckSmi final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCheckSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
-};
-
-
-class LClampDToUint8 final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LClampDToUint8(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
-};
-
-
-class LClampIToUint8 final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LClampIToUint8(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
-};
-
-
-// Truncating conversion from a tagged value to an int32.
-class LClampTToUint8NoSSE2 final : public LTemplateInstruction<1, 1, 3> {
- public:
- LClampTToUint8NoSSE2(LOperand* unclamped,
- LOperand* temp1,
- LOperand* temp2,
- LOperand* temp3) {
- inputs_[0] = unclamped;
- temps_[0] = temp1;
- temps_[1] = temp2;
- temps_[2] = temp3;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
- LOperand* scratch() { return temps_[0]; }
- LOperand* scratch2() { return temps_[1]; }
- LOperand* scratch3() { return temps_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8NoSSE2,
- "clamp-t-to-uint8-nosse2")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-};
-
-
-class LCheckNonSmi final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckNonSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
- DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
-};
-
-
-class LAllocate final : public LTemplateInstruction<1, 2, 1> {
- public:
- LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
- inputs_[0] = context;
- inputs_[1] = size;
- temps_[0] = temp;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* size() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
- DECLARE_HYDROGEN_ACCESSOR(Allocate)
-};
-
-class LFastAllocate final : public LTemplateInstruction<1, 1, 1> {
- public:
- LFastAllocate(LOperand* size, LOperand* temp) {
- inputs_[0] = size;
- temps_[0] = temp;
- }
-
- LOperand* size() const { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
- DECLARE_HYDROGEN_ACCESSOR(Allocate)
-};
-
-class LTypeof final : public LTemplateInstruction<1, 2, 0> {
- public:
- LTypeof(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
-};
-
-
-class LTypeofIsAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LTypeofIsAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
-
- Handle<String> type_literal() { return hydrogen()->type_literal(); }
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
-class LOsrEntry final : public LTemplateInstruction<0, 0, 0> {
- public:
- bool HasInterestingComment(LCodeGen* gen) const override { return false; }
- DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
-};
-
-
-class LStackCheck final : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LStackCheck(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
- DECLARE_HYDROGEN_ACCESSOR(StackCheck)
-
- Label* done_label() { return &done_label_; }
-
- private:
- Label done_label_;
-};
-
-
-class LForInPrepareMap final : public LTemplateInstruction<1, 2, 0> {
- public:
- LForInPrepareMap(LOperand* context, LOperand* object) {
- inputs_[0] = context;
- inputs_[1] = object;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
-};
-
-
-class LForInCacheArray final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LForInCacheArray(LOperand* map) {
- inputs_[0] = map;
- }
-
- LOperand* map() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
-
- int idx() {
- return HForInCacheArray::cast(this->hydrogen_value())->idx();
- }
-};
-
-
-class LCheckMapValue final : public LTemplateInstruction<0, 2, 0> {
- public:
- LCheckMapValue(LOperand* value, LOperand* map) {
- inputs_[0] = value;
- inputs_[1] = map;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* map() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
-};
-
-
-class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadFieldByIndex(LOperand* object, LOperand* index) {
- inputs_[0] = object;
- inputs_[1] = index;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
-};
-
-
-class LChunkBuilder;
-class LPlatformChunk final : public LChunk {
- public:
- LPlatformChunk(CompilationInfo* info, HGraph* graph)
- : LChunk(info, graph),
- num_double_slots_(0) { }
-
- int GetNextSpillIndex(RegisterKind kind);
- LOperand* GetNextSpillSlot(RegisterKind kind);
-
- int num_double_slots() const { return num_double_slots_; }
-
- private:
- int num_double_slots_;
-};
-
-
-class LChunkBuilder final : public LChunkBuilderBase {
- public:
- LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : LChunkBuilderBase(info, graph),
- current_instruction_(NULL),
- current_block_(NULL),
- next_block_(NULL),
- allocator_(allocator) {}
-
- // Build the sequence for the graph.
- LPlatformChunk* Build();
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
- HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- LInstruction* DoMathFloor(HUnaryMathOperation* instr);
- LInstruction* DoMathRound(HUnaryMathOperation* instr);
- LInstruction* DoMathFround(HUnaryMathOperation* instr);
- LInstruction* DoMathAbs(HUnaryMathOperation* instr);
- LInstruction* DoMathLog(HUnaryMathOperation* instr);
- LInstruction* DoMathCos(HUnaryMathOperation* instr);
- LInstruction* DoMathSin(HUnaryMathOperation* instr);
- LInstruction* DoMathExp(HUnaryMathOperation* instr);
- LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
- LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
- LInstruction* DoMathClz32(HUnaryMathOperation* instr);
- LInstruction* DoDivByPowerOf2I(HDiv* instr);
- LInstruction* DoDivByConstI(HDiv* instr);
- LInstruction* DoDivI(HDiv* instr);
- LInstruction* DoModByPowerOf2I(HMod* instr);
- LInstruction* DoModByConstI(HMod* instr);
- LInstruction* DoModI(HMod* instr);
- LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
- LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
- LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
-
- private:
- // Methods for getting operands for Use / Define / Temp.
- LUnallocated* ToUnallocated(Register reg);
- LUnallocated* ToUnallocated(X87Register reg);
-
- // Methods for setting up define-use relationships.
- MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
- MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
-
- // A value that is guaranteed to be allocated to a register.
- // Operand created by UseRegister is guaranteed to be live until the end of
- // instruction. This means that register allocator will not reuse it's
- // register for any other operand inside instruction.
- // Operand created by UseRegisterAtStart is guaranteed to be live only at
- // instruction start. Register allocator is free to assign the same register
- // to some other operand used inside instruction (i.e. temporary or
- // output).
- MUST_USE_RESULT LOperand* UseRegister(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
-
- // An input operand in a register that may be trashed.
- MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
-
- // An input operand in a register or stack slot.
- MUST_USE_RESULT LOperand* Use(HValue* value);
- MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
-
- // An input operand in a register, stack slot or a constant operand.
- MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
-
- // An input operand in a fixed register or a constant operand.
- MUST_USE_RESULT LOperand* UseFixedOrConstant(HValue* value,
- Register fixed_register);
-
- // An input operand in a register or a constant operand.
- MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
-
- // An input operand in a constant operand.
- MUST_USE_RESULT LOperand* UseConstant(HValue* value);
-
- // An input operand in register, stack slot or a constant operand.
- // Will not be moved to a register even if one is freely available.
- MUST_USE_RESULT LOperand* UseAny(HValue* value) override;
-
- // Temporary operand that must be in a register.
- MUST_USE_RESULT LUnallocated* TempRegister();
- MUST_USE_RESULT LOperand* FixedTemp(Register reg);
-
- // Methods for setting up define-use relationships.
- // Return the same instruction that they are passed.
- LInstruction* Define(LTemplateResultInstruction<1>* instr,
- LUnallocated* result);
- LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
- LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
- int index);
- LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
- LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
- Register reg);
- LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
- X87Register reg);
- LInstruction* DefineX87TOS(LTemplateResultInstruction<1>* instr);
- // Assigns an environment to an instruction. An instruction which can
- // deoptimize must have an environment.
- LInstruction* AssignEnvironment(LInstruction* instr);
- // Assigns a pointer map to an instruction. An instruction which can
- // trigger a GC or a lazy deoptimization must have a pointer map.
- LInstruction* AssignPointerMap(LInstruction* instr);
-
- enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
-
- LOperand* GetSeqStringSetCharOperand(HSeqStringSetChar* instr);
-
- // Marks a call for the register allocator. Assigns a pointer map to
- // support GC and lazy deoptimization. Assigns an environment to support
- // eager deoptimization if CAN_DEOPTIMIZE_EAGERLY.
- LInstruction* MarkAsCall(
- LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
-
- void VisitInstruction(HInstruction* current);
- void AddInstruction(LInstruction* instr, HInstruction* current);
-
- void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
- LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
- LInstruction* DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr);
- LInstruction* DoArithmeticT(Token::Value op,
- HBinaryOperation* instr);
-
- LOperand* GetStoreKeyedValueOperand(HStoreKeyed* instr);
-
- HInstruction* current_instruction_;
- HBasicBlock* current_block_;
- HBasicBlock* next_block_;
- LAllocator* allocator_;
-
- DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
-};
-
-#undef DECLARE_HYDROGEN_ACCESSOR
-#undef DECLARE_CONCRETE_INSTRUCTION
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_X87_LITHIUM_X87_H_
diff --git a/deps/v8/src/d8-console.cc b/deps/v8/src/d8-console.cc
index e4f81b3c2d..8de2401a78 100644
--- a/deps/v8/src/d8-console.cc
+++ b/deps/v8/src/d8-console.cc
@@ -40,27 +40,33 @@ D8Console::D8Console(Isolate* isolate) : isolate_(isolate) {
default_timer_ = base::TimeTicks::HighResolutionNow();
}
-void D8Console::Log(const debug::ConsoleCallArguments& args) {
+void D8Console::Log(const debug::ConsoleCallArguments& args,
+ const v8::debug::ConsoleContext&) {
WriteToFile(stdout, isolate_, args);
}
-void D8Console::Error(const debug::ConsoleCallArguments& args) {
+void D8Console::Error(const debug::ConsoleCallArguments& args,
+ const v8::debug::ConsoleContext&) {
WriteToFile(stderr, isolate_, args);
}
-void D8Console::Warn(const debug::ConsoleCallArguments& args) {
+void D8Console::Warn(const debug::ConsoleCallArguments& args,
+ const v8::debug::ConsoleContext&) {
WriteToFile(stdout, isolate_, args);
}
-void D8Console::Info(const debug::ConsoleCallArguments& args) {
+void D8Console::Info(const debug::ConsoleCallArguments& args,
+ const v8::debug::ConsoleContext&) {
WriteToFile(stdout, isolate_, args);
}
-void D8Console::Debug(const debug::ConsoleCallArguments& args) {
+void D8Console::Debug(const debug::ConsoleCallArguments& args,
+ const v8::debug::ConsoleContext&) {
WriteToFile(stdout, isolate_, args);
}
-void D8Console::Time(const debug::ConsoleCallArguments& args) {
+void D8Console::Time(const debug::ConsoleCallArguments& args,
+ const v8::debug::ConsoleContext&) {
if (args.Length() == 0) {
default_timer_ = base::TimeTicks::HighResolutionNow();
} else {
@@ -83,7 +89,8 @@ void D8Console::Time(const debug::ConsoleCallArguments& args) {
}
}
-void D8Console::TimeEnd(const debug::ConsoleCallArguments& args) {
+void D8Console::TimeEnd(const debug::ConsoleCallArguments& args,
+ const v8::debug::ConsoleContext&) {
base::TimeDelta delta;
base::TimeTicks now = base::TimeTicks::HighResolutionNow();
if (args.Length() == 0) {
diff --git a/deps/v8/src/d8-console.h b/deps/v8/src/d8-console.h
index 293cb21180..a98525b9f4 100644
--- a/deps/v8/src/d8-console.h
+++ b/deps/v8/src/d8-console.h
@@ -16,13 +16,20 @@ class D8Console : public debug::ConsoleDelegate {
explicit D8Console(Isolate* isolate);
private:
- void Log(const debug::ConsoleCallArguments& args) override;
- void Error(const debug::ConsoleCallArguments& args) override;
- void Warn(const debug::ConsoleCallArguments& args) override;
- void Info(const debug::ConsoleCallArguments& args) override;
- void Debug(const debug::ConsoleCallArguments& args) override;
- void Time(const debug::ConsoleCallArguments& args) override;
- void TimeEnd(const debug::ConsoleCallArguments& args) override;
+ void Log(const debug::ConsoleCallArguments& args,
+ const v8::debug::ConsoleContext&) override;
+ void Error(const debug::ConsoleCallArguments& args,
+ const v8::debug::ConsoleContext&) override;
+ void Warn(const debug::ConsoleCallArguments& args,
+ const v8::debug::ConsoleContext&) override;
+ void Info(const debug::ConsoleCallArguments& args,
+ const v8::debug::ConsoleContext&) override;
+ void Debug(const debug::ConsoleCallArguments& args,
+ const v8::debug::ConsoleContext&) override;
+ void Time(const debug::ConsoleCallArguments& args,
+ const v8::debug::ConsoleContext&) override;
+ void TimeEnd(const debug::ConsoleCallArguments& args,
+ const v8::debug::ConsoleContext&) override;
Isolate* isolate_;
std::map<std::string, base::TimeTicks> timers_;
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 1bb5300ce9..2324ba9992 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -135,6 +135,7 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
return malloc(length);
#endif
}
+ using ArrayBufferAllocatorBase::Free;
void Free(void* data, size_t length) override {
#if USE_VM
if (RoundToPageSize(&length)) {
@@ -156,7 +157,7 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
}
#if USE_VM
void* VirtualMemoryAllocate(size_t length) {
- void* data = base::VirtualMemory::ReserveRegion(length);
+ void* data = base::VirtualMemory::ReserveRegion(length, nullptr);
if (data && !base::VirtualMemory::CommitRegion(data, length, false)) {
base::VirtualMemory::ReleaseRegion(data, length);
return nullptr;
@@ -195,50 +196,50 @@ class MockArrayBufferAllocator : public ArrayBufferAllocatorBase {
// disallowed, and the time reported by {MonotonicallyIncreasingTime} is
// deterministic.
class PredictablePlatform : public Platform {
-public:
- explicit PredictablePlatform(std::unique_ptr<Platform> platform)
- : platform_(std::move(platform)) {
- DCHECK_NOT_NULL(platform_);
- }
-
- void CallOnBackgroundThread(Task* task,
- ExpectedRuntime expected_runtime) override {
- // It's not defined when background tasks are being executed, so we can just
- // execute them right away.
- task->Run();
- delete task;
- }
-
- void CallOnForegroundThread(v8::Isolate* isolate, Task* task) override {
- platform_->CallOnForegroundThread(isolate, task);
- }
-
- void CallDelayedOnForegroundThread(v8::Isolate* isolate, Task* task,
- double delay_in_seconds) override {
- platform_->CallDelayedOnForegroundThread(isolate, task, delay_in_seconds);
- }
-
- void CallIdleOnForegroundThread(Isolate* isolate, IdleTask* task) override {
- UNREACHABLE();
- }
-
- bool IdleTasksEnabled(Isolate* isolate) override { return false; }
-
- double MonotonicallyIncreasingTime() override {
- return synthetic_time_in_sec_ += 0.00001;
- }
-
- v8::TracingController* GetTracingController() override {
- return platform_->GetTracingController();
- }
-
- Platform* platform() const { return platform_.get(); }
-
-private:
- double synthetic_time_in_sec_ = 0.0;
- std::unique_ptr<Platform> platform_;
-
- DISALLOW_COPY_AND_ASSIGN(PredictablePlatform);
+ public:
+ explicit PredictablePlatform(std::unique_ptr<Platform> platform)
+ : platform_(std::move(platform)) {
+ DCHECK_NOT_NULL(platform_);
+ }
+
+ void CallOnBackgroundThread(Task* task,
+ ExpectedRuntime expected_runtime) override {
+ // It's not defined when background tasks are being executed, so we can just
+ // execute them right away.
+ task->Run();
+ delete task;
+ }
+
+ void CallOnForegroundThread(v8::Isolate* isolate, Task* task) override {
+ platform_->CallOnForegroundThread(isolate, task);
+ }
+
+ void CallDelayedOnForegroundThread(v8::Isolate* isolate, Task* task,
+ double delay_in_seconds) override {
+ platform_->CallDelayedOnForegroundThread(isolate, task, delay_in_seconds);
+ }
+
+ void CallIdleOnForegroundThread(Isolate* isolate, IdleTask* task) override {
+ UNREACHABLE();
+ }
+
+ bool IdleTasksEnabled(Isolate* isolate) override { return false; }
+
+ double MonotonicallyIncreasingTime() override {
+ return synthetic_time_in_sec_ += 0.00001;
+ }
+
+ v8::TracingController* GetTracingController() override {
+ return platform_->GetTracingController();
+ }
+
+ Platform* platform() const { return platform_.get(); }
+
+ private:
+ double synthetic_time_in_sec_ = 0.0;
+ std::unique_ptr<Platform> platform_;
+
+ DISALLOW_COPY_AND_ASSIGN(PredictablePlatform);
};
@@ -484,9 +485,9 @@ ScriptCompiler::CachedData* CompileForCachedData(
}
Isolate::CreateParams create_params;
create_params.array_buffer_allocator = Shell::array_buffer_allocator;
- create_params.host_import_module_dynamically_callback_ =
- Shell::HostImportModuleDynamically;
Isolate* temp_isolate = Isolate::New(create_params);
+ temp_isolate->SetHostImportModuleDynamicallyCallback(
+ Shell::HostImportModuleDynamically);
ScriptCompiler::CachedData* result = NULL;
{
Isolate::Scope isolate_scope(temp_isolate);
@@ -776,27 +777,36 @@ namespace {
struct DynamicImportData {
DynamicImportData(Isolate* isolate_, Local<String> referrer_,
Local<String> specifier_,
- Local<DynamicImportResult> result_)
+ Local<Promise::Resolver> resolver_)
: isolate(isolate_) {
referrer.Reset(isolate, referrer_);
specifier.Reset(isolate, specifier_);
- result.Reset(isolate, result_);
+ resolver.Reset(isolate, resolver_);
}
Isolate* isolate;
Global<String> referrer;
Global<String> specifier;
- Global<DynamicImportResult> result;
+ Global<Promise::Resolver> resolver;
};
} // namespace
-void Shell::HostImportModuleDynamically(Isolate* isolate,
- Local<String> referrer,
- Local<String> specifier,
- Local<DynamicImportResult> result) {
- DynamicImportData* data =
- new DynamicImportData(isolate, referrer, specifier, result);
- isolate->EnqueueMicrotask(Shell::DoHostImportModuleDynamically, data);
+
+MaybeLocal<Promise> Shell::HostImportModuleDynamically(
+ Local<Context> context, Local<String> referrer, Local<String> specifier) {
+ Isolate* isolate = context->GetIsolate();
+
+ MaybeLocal<Promise::Resolver> maybe_resolver =
+ Promise::Resolver::New(context);
+ Local<Promise::Resolver> resolver;
+ if (maybe_resolver.ToLocal(&resolver)) {
+ DynamicImportData* data =
+ new DynamicImportData(isolate, referrer, specifier, resolver);
+ isolate->EnqueueMicrotask(Shell::DoHostImportModuleDynamically, data);
+ return resolver->GetPromise();
+ }
+
+ return MaybeLocal<Promise>();
}
void Shell::DoHostImportModuleDynamically(void* import_data) {
@@ -807,7 +817,7 @@ void Shell::DoHostImportModuleDynamically(void* import_data) {
Local<String> referrer(import_data_->referrer.Get(isolate));
Local<String> specifier(import_data_->specifier.Get(isolate));
- Local<DynamicImportResult> result(import_data_->result.Get(isolate));
+ Local<Promise::Resolver> resolver(import_data_->resolver.Get(isolate));
PerIsolateData* data = PerIsolateData::Get(isolate);
Local<Context> realm = data->realms_[data->realm_current_].Get(isolate);
@@ -815,9 +825,11 @@ void Shell::DoHostImportModuleDynamically(void* import_data) {
std::string source_url = ToSTLString(referrer);
std::string dir_name =
- IsAbsolutePath(source_url) ? DirName(source_url) : GetWorkingDirectory();
+ DirName(IsAbsolutePath(source_url)
+ ? source_url
+ : NormalizePath(source_url, GetWorkingDirectory()));
std::string file_name = ToSTLString(specifier);
- std::string absolute_path = NormalizePath(file_name.c_str(), dir_name);
+ std::string absolute_path = NormalizePath(file_name, dir_name);
TryCatch try_catch(isolate);
try_catch.SetVerbose(true);
@@ -829,12 +841,13 @@ void Shell::DoHostImportModuleDynamically(void* import_data) {
root_module = module_it->second.Get(isolate);
} else if (!FetchModuleTree(realm, absolute_path).ToLocal(&root_module)) {
CHECK(try_catch.HasCaught());
- CHECK(result->FinishDynamicImportFailure(realm, try_catch.Exception()));
+ resolver->Reject(realm, try_catch.Exception()).ToChecked();
return;
}
MaybeLocal<Value> maybe_result;
- if (root_module->Instantiate(realm, ResolveModuleCallback)) {
+ if (root_module->InstantiateModule(realm, ResolveModuleCallback)
+ .FromMaybe(false)) {
maybe_result = root_module->Evaluate(realm);
EmptyMessageQueues(isolate);
}
@@ -842,12 +855,13 @@ void Shell::DoHostImportModuleDynamically(void* import_data) {
Local<Value> module;
if (!maybe_result.ToLocal(&module)) {
DCHECK(try_catch.HasCaught());
- CHECK(result->FinishDynamicImportFailure(realm, try_catch.Exception()));
+ resolver->Reject(realm, try_catch.Exception()).ToChecked();
return;
}
DCHECK(!try_catch.HasCaught());
- CHECK(result->FinishDynamicImportSuccess(realm, root_module));
+ Local<Value> module_namespace = root_module->GetModuleNamespace();
+ resolver->Resolve(realm, module_namespace).ToChecked();
}
bool Shell::ExecuteModule(Isolate* isolate, const char* file_name) {
@@ -872,7 +886,8 @@ bool Shell::ExecuteModule(Isolate* isolate, const char* file_name) {
}
MaybeLocal<Value> maybe_result;
- if (root_module->Instantiate(realm, ResolveModuleCallback)) {
+ if (root_module->InstantiateModule(realm, ResolveModuleCallback)
+ .FromMaybe(false)) {
maybe_result = root_module->Evaluate(realm);
EmptyMessageQueues(isolate);
}
@@ -1195,6 +1210,13 @@ void Shell::Read(const v8::FunctionCallbackInfo<v8::Value>& args) {
Throw(args.GetIsolate(), "Error loading file");
return;
}
+ if (args.Length() == 2) {
+ String::Utf8Value format(args[1]);
+ if (*format && std::strcmp(*format, "binary") == 0) {
+ ReadBuffer(args);
+ return;
+ }
+ }
Local<String> source = ReadFile(args.GetIsolate(), *file);
if (source.IsEmpty()) {
Throw(args.GetIsolate(), "Error loading file");
@@ -1414,10 +1436,11 @@ void Shell::ReportException(Isolate* isolate, v8::TryCatch* try_catch) {
// print the exception.
printf("%s\n", exception_string);
} else if (message->GetScriptOrigin().Options().IsWasm()) {
- // Print <WASM>[(function index)]((function name))+(offset): (message).
+ // Print wasm-function[(function index)]:(offset): (message).
int function_index = message->GetLineNumber(context).FromJust() - 1;
int offset = message->GetStartColumn(context).FromJust();
- printf("<WASM>[%d]+%d: %s\n", function_index, offset, exception_string);
+ printf("wasm-function[%d]:%d: %s\n", function_index, offset,
+ exception_string);
} else {
// Print (filename):(line number): (message).
v8::String::Utf8Value filename(message->GetScriptOrigin().ResourceName());
@@ -1848,6 +1871,33 @@ void Shell::WriteIgnitionDispatchCountersFile(v8::Isolate* isolate) {
JSON::Stringify(context, dispatch_counters).ToLocalChecked());
}
+namespace {
+int LineFromOffset(Local<debug::Script> script, int offset) {
+ debug::Location location = script->GetSourceLocation(offset);
+ return location.GetLineNumber();
+}
+
+void WriteLcovDataForRange(std::vector<uint32_t>& lines, int start_line,
+ int end_line, uint32_t count) {
+ // Ensure space in the array.
+ lines.resize(std::max(static_cast<size_t>(end_line + 1), lines.size()), 0);
+ // Boundary lines could be shared between two functions with different
+ // invocation counts. Take the maximum.
+ lines[start_line] = std::max(lines[start_line], count);
+ lines[end_line] = std::max(lines[end_line], count);
+ // Invocation counts for non-boundary lines are overwritten.
+ for (int k = start_line + 1; k < end_line; k++) lines[k] = count;
+}
+
+void WriteLcovDataForNamedRange(std::ostream& sink,
+ std::vector<uint32_t>& lines, std::string name,
+ int start_line, int end_line, uint32_t count) {
+ WriteLcovDataForRange(lines, start_line, end_line, count);
+ sink << "FN:" << start_line + 1 << "," << name << std::endl;
+ sink << "FNDA:" << count << "," << name << std::endl;
+}
+} // namespace
+
// Write coverage data in LCOV format. See man page for geninfo(1).
void Shell::WriteLcovData(v8::Isolate* isolate, const char* file) {
if (!file) return;
@@ -1869,33 +1919,38 @@ void Shell::WriteLcovData(v8::Isolate* isolate, const char* file) {
for (size_t j = 0; j < script_data.FunctionCount(); j++) {
debug::Coverage::FunctionData function_data =
script_data.GetFunctionData(j);
- debug::Location start =
- script->GetSourceLocation(function_data.StartOffset());
- debug::Location end =
- script->GetSourceLocation(function_data.EndOffset());
- int start_line = start.GetLineNumber();
- int end_line = end.GetLineNumber();
- uint32_t count = function_data.Count();
- // Ensure space in the array.
- lines.resize(std::max(static_cast<size_t>(end_line + 1), lines.size()),
- 0);
- // Boundary lines could be shared between two functions with different
- // invocation counts. Take the maximum.
- lines[start_line] = std::max(lines[start_line], count);
- lines[end_line] = std::max(lines[end_line], count);
- // Invocation counts for non-boundary lines are overwritten.
- for (int k = start_line + 1; k < end_line; k++) lines[k] = count;
+
// Write function stats.
- Local<String> name;
- std::stringstream name_stream;
- if (function_data.Name().ToLocal(&name)) {
- name_stream << ToSTLString(name);
- } else {
- name_stream << "<" << start_line + 1 << "-";
- name_stream << start.GetColumnNumber() << ">";
+ {
+ debug::Location start =
+ script->GetSourceLocation(function_data.StartOffset());
+ debug::Location end =
+ script->GetSourceLocation(function_data.EndOffset());
+ int start_line = start.GetLineNumber();
+ int end_line = end.GetLineNumber();
+ uint32_t count = function_data.Count();
+
+ Local<String> name;
+ std::stringstream name_stream;
+ if (function_data.Name().ToLocal(&name)) {
+ name_stream << ToSTLString(name);
+ } else {
+ name_stream << "<" << start_line + 1 << "-";
+ name_stream << start.GetColumnNumber() << ">";
+ }
+
+ WriteLcovDataForNamedRange(sink, lines, name_stream.str(), start_line,
+ end_line, count);
+ }
+
+ // Process inner blocks.
+ for (size_t k = 0; k < function_data.BlockCount(); k++) {
+ debug::Coverage::BlockData block_data = function_data.GetBlockData(k);
+ int start_line = LineFromOffset(script, block_data.StartOffset());
+ int end_line = LineFromOffset(script, block_data.EndOffset() - 1);
+ uint32_t count = block_data.Count();
+ WriteLcovDataForRange(lines, start_line, end_line, count);
}
- sink << "FN:" << start_line + 1 << "," << name_stream.str() << std::endl;
- sink << "FNDA:" << count << "," << name_stream.str() << std::endl;
}
// Write per-line coverage. LCOV uses 1-based line numbers.
for (size_t i = 0; i < lines.size(); i++) {
@@ -2127,6 +2182,7 @@ class InspectorFrontend final : public v8_inspector::V8Inspector::Channel {
void flushProtocolNotifications() override {}
void Send(const v8_inspector::StringView& string) {
+ v8::Isolate::AllowJavascriptExecutionScope allow_script(isolate_);
int length = static_cast<int>(string.length());
DCHECK(length < v8::String::kMaxLength);
Local<String> message =
@@ -2316,9 +2372,9 @@ base::Thread::Options SourceGroup::GetThreadOptions() {
void SourceGroup::ExecuteInThread() {
Isolate::CreateParams create_params;
create_params.array_buffer_allocator = Shell::array_buffer_allocator;
- create_params.host_import_module_dynamically_callback_ =
- Shell::HostImportModuleDynamically;
Isolate* isolate = Isolate::New(create_params);
+ isolate->SetHostImportModuleDynamicallyCallback(
+ Shell::HostImportModuleDynamically);
Shell::EnsureEventLoopInitialized(isolate);
D8Console console(isolate);
@@ -2437,7 +2493,7 @@ std::unique_ptr<SerializationData> Worker::GetMessage() {
while (!out_queue_.Dequeue(&result)) {
// If the worker is no longer running, and there are no messages in the
// queue, don't expect any more messages from it.
- if (!base::NoBarrier_Load(&running_)) break;
+ if (!base::Relaxed_Load(&running_)) break;
out_semaphore_.Wait();
}
return result;
@@ -2445,7 +2501,7 @@ std::unique_ptr<SerializationData> Worker::GetMessage() {
void Worker::Terminate() {
- base::NoBarrier_Store(&running_, false);
+ base::Relaxed_Store(&running_, false);
// Post NULL to wake the Worker thread message loop, and tell it to stop
// running.
PostMessage(NULL);
@@ -2461,9 +2517,9 @@ void Worker::WaitForThread() {
void Worker::ExecuteInThread() {
Isolate::CreateParams create_params;
create_params.array_buffer_allocator = Shell::array_buffer_allocator;
- create_params.host_import_module_dynamically_callback_ =
- Shell::HostImportModuleDynamically;
Isolate* isolate = Isolate::New(create_params);
+ isolate->SetHostImportModuleDynamicallyCallback(
+ Shell::HostImportModuleDynamically);
D8Console console(isolate);
debug::SetConsoleDelegate(isolate, &console);
{
@@ -2704,7 +2760,10 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[], bool last_run) {
{
EnsureEventLoopInitialized(isolate);
if (options.lcov_file) {
- debug::Coverage::SelectMode(isolate, debug::Coverage::kPreciseCount);
+ debug::Coverage::Mode mode = i::FLAG_block_coverage
+ ? debug::Coverage::kBlockCount
+ : debug::Coverage::kPreciseCount;
+ debug::Coverage::SelectMode(isolate, mode);
}
HandleScope scope(isolate);
Local<Context> context = CreateEvaluationContext(isolate);
@@ -3057,12 +3116,6 @@ int Shell::Main(int argc, char* argv[]) {
? v8::platform::InProcessStackDumping::kDisabled
: v8::platform::InProcessStackDumping::kEnabled;
- g_platform = v8::platform::CreateDefaultPlatform(
- 0, v8::platform::IdleTaskSupport::kEnabled, in_process_stack_dumping);
- if (i::FLAG_verify_predictable) {
- g_platform = new PredictablePlatform(std::unique_ptr<Platform>(g_platform));
- }
-
platform::tracing::TracingController* tracing_controller = nullptr;
if (options.trace_enabled && !i::FLAG_verify_predictable) {
trace_file.open("v8_trace.json");
@@ -3072,7 +3125,13 @@ int Shell::Main(int argc, char* argv[]) {
platform::tracing::TraceBuffer::kRingBufferChunks,
platform::tracing::TraceWriter::CreateJSONTraceWriter(trace_file));
tracing_controller->Initialize(trace_buffer);
- platform::SetTracingController(g_platform, tracing_controller);
+ }
+
+ g_platform = v8::platform::CreateDefaultPlatform(
+ 0, v8::platform::IdleTaskSupport::kEnabled, in_process_stack_dumping,
+ tracing_controller);
+ if (i::FLAG_verify_predictable) {
+ g_platform = new PredictablePlatform(std::unique_ptr<Platform>(g_platform));
}
v8::V8::InitializePlatform(g_platform);
@@ -3110,9 +3169,6 @@ int Shell::Main(int argc, char* argv[]) {
create_params.add_histogram_sample_callback = AddHistogramSample;
}
- create_params.host_import_module_dynamically_callback_ =
- Shell::HostImportModuleDynamically;
-
if (i::trap_handler::UseTrapHandler()) {
if (!v8::V8::RegisterDefaultSignalHandler()) {
fprintf(stderr, "Could not register signal handler");
@@ -3121,6 +3177,8 @@ int Shell::Main(int argc, char* argv[]) {
}
Isolate* isolate = Isolate::New(create_params);
+ isolate->SetHostImportModuleDynamicallyCallback(
+ Shell::HostImportModuleDynamically);
D8Console console(isolate);
{
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index baa3a0bf6f..6922a1602e 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -444,10 +444,8 @@ class Shell : public i::AllStatic {
static void SetUMask(const v8::FunctionCallbackInfo<v8::Value>& args);
static void MakeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args);
static void RemoveDirectory(const v8::FunctionCallbackInfo<v8::Value>& args);
- static void HostImportModuleDynamically(Isolate* isolate,
- Local<String> referrer,
- Local<String> specifier,
- Local<DynamicImportResult> result);
+ static MaybeLocal<Promise> HostImportModuleDynamically(
+ Local<Context> context, Local<String> referrer, Local<String> specifier);
// Data is of type DynamicImportData*. We use void* here to be able
// to conform with MicrotaskCallback interface and enqueue this
diff --git a/deps/v8/src/date.cc b/deps/v8/src/date.cc
index ff420f5e4f..06f5f4ffb1 100644
--- a/deps/v8/src/date.cc
+++ b/deps/v8/src/date.cc
@@ -333,7 +333,6 @@ int DateCache::DaylightSavingsOffsetInMs(int64_t time_ms) {
}
}
UNREACHABLE();
- return 0;
}
diff --git a/deps/v8/src/debug/OWNERS b/deps/v8/src/debug/OWNERS
index 4e493cdc6e..81f1e952bc 100644
--- a/deps/v8/src/debug/OWNERS
+++ b/deps/v8/src/debug/OWNERS
@@ -6,3 +6,5 @@ mvstanton@chromium.org
ulan@chromium.org
verwaest@chromium.org
yangguo@chromium.org
+
+# COMPONENT: Platform>DevTools>JavaScript
diff --git a/deps/v8/src/debug/debug-coverage.cc b/deps/v8/src/debug/debug-coverage.cc
index 76f7b3fd5a..5a47063834 100644
--- a/deps/v8/src/debug/debug-coverage.cc
+++ b/deps/v8/src/debug/debug-coverage.cc
@@ -4,12 +4,13 @@
#include "src/debug/debug-coverage.h"
+#include "src/ast/ast.h"
#include "src/base/hashmap.h"
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
#include "src/isolate.h"
-#include "src/objects-inl.h"
#include "src/objects.h"
+#include "src/objects/debug-objects-inl.h"
namespace v8 {
namespace internal {
@@ -57,12 +58,320 @@ bool CompareSharedFunctionInfo(SharedFunctionInfo* a, SharedFunctionInfo* b) {
if (a_start == b_start) return a->end_position() > b->end_position();
return a_start < b_start;
}
+
+bool CompareCoverageBlock(const CoverageBlock& a, const CoverageBlock& b) {
+ DCHECK(a.start != kNoSourcePosition);
+ DCHECK(b.start != kNoSourcePosition);
+ if (a.start == b.start) return a.end > b.end;
+ return a.start < b.start;
+}
+
+std::vector<CoverageBlock> GetSortedBlockData(Isolate* isolate,
+ SharedFunctionInfo* shared) {
+ DCHECK(FLAG_block_coverage);
+ DCHECK(shared->HasCoverageInfo());
+
+ CoverageInfo* coverage_info =
+ CoverageInfo::cast(shared->GetDebugInfo()->coverage_info());
+
+ std::vector<CoverageBlock> result;
+ if (coverage_info->SlotCount() == 0) return result;
+
+ for (int i = 0; i < coverage_info->SlotCount(); i++) {
+ const int start_pos = coverage_info->StartSourcePosition(i);
+ const int until_pos = coverage_info->EndSourcePosition(i);
+ const int count = coverage_info->BlockCount(i);
+
+ DCHECK(start_pos != kNoSourcePosition);
+ result.emplace_back(start_pos, until_pos, count);
+ }
+
+ // Sort according to the block nesting structure.
+ std::sort(result.begin(), result.end(), CompareCoverageBlock);
+
+ return result;
+}
+
+// A utility class to simplify logic for performing passes over block coverage
+// ranges. Provides access to the implicit tree structure of ranges (i.e. access
+// to parent and sibling blocks), and supports efficient in-place editing and
+// deletion. The underlying backing store is the array of CoverageBlocks stored
+// on the CoverageFunction.
+class CoverageBlockIterator final {
+ public:
+ explicit CoverageBlockIterator(CoverageFunction* function)
+ : function_(function),
+ ended_(false),
+ delete_current_(false),
+ read_index_(-1),
+ write_index_(-1) {
+ DCHECK(std::is_sorted(function_->blocks.begin(), function_->blocks.end(),
+ CompareCoverageBlock));
+ }
+
+ ~CoverageBlockIterator() {
+ Finalize();
+ DCHECK(std::is_sorted(function_->blocks.begin(), function_->blocks.end(),
+ CompareCoverageBlock));
+ }
+
+ bool HasNext() const {
+ return read_index_ + 1 < static_cast<int>(function_->blocks.size());
+ }
+
+ bool Next() {
+ if (!HasNext()) {
+ if (!ended_) MaybeWriteCurrent();
+ ended_ = true;
+ return false;
+ }
+
+ // If a block has been deleted, subsequent iteration moves trailing blocks
+ // to their updated position within the array.
+ MaybeWriteCurrent();
+
+ if (read_index_ == -1) {
+ // Initialize the nesting stack with the function range.
+ nesting_stack_.emplace_back(function_->start, function_->end,
+ function_->count);
+ } else if (!delete_current_) {
+ nesting_stack_.emplace_back(GetBlock());
+ }
+
+ delete_current_ = false;
+ read_index_++;
+
+ DCHECK(IsActive());
+
+ CoverageBlock& block = GetBlock();
+ while (nesting_stack_.size() > 1 &&
+ nesting_stack_.back().end <= block.start) {
+ nesting_stack_.pop_back();
+ }
+
+ DCHECK_IMPLIES(block.start >= function_->end,
+ block.end == kNoSourcePosition);
+ DCHECK_NE(block.start, kNoSourcePosition);
+ DCHECK_LE(block.end, GetParent().end);
+
+ return true;
+ }
+
+ CoverageBlock& GetBlock() {
+ DCHECK(IsActive());
+ return function_->blocks[read_index_];
+ }
+
+ CoverageBlock& GetNextBlock() {
+ DCHECK(IsActive());
+ DCHECK(HasNext());
+ return function_->blocks[read_index_ + 1];
+ }
+
+ CoverageBlock& GetParent() {
+ DCHECK(IsActive());
+ return nesting_stack_.back();
+ }
+
+ bool HasSiblingOrChild() {
+ DCHECK(IsActive());
+ return HasNext() && GetNextBlock().start < GetParent().end;
+ }
+
+ CoverageBlock& GetSiblingOrChild() {
+ DCHECK(HasSiblingOrChild());
+ DCHECK(IsActive());
+ return GetNextBlock();
+ }
+
+ void DeleteBlock() {
+ DCHECK(!delete_current_);
+ DCHECK(IsActive());
+ delete_current_ = true;
+ }
+
+ private:
+ void MaybeWriteCurrent() {
+ if (delete_current_) return;
+ if (read_index_ >= 0 && write_index_ != read_index_) {
+ function_->blocks[write_index_] = function_->blocks[read_index_];
+ }
+ write_index_++;
+ }
+
+ void Finalize() {
+ while (Next()) {
+ // Just iterate to the end.
+ }
+ function_->blocks.resize(write_index_);
+ }
+
+ bool IsActive() const { return read_index_ >= 0 && !ended_; }
+
+ CoverageFunction* function_;
+ std::vector<CoverageBlock> nesting_stack_;
+ bool ended_;
+ bool delete_current_;
+ int read_index_;
+ int write_index_;
+};
+
+bool HaveSameSourceRange(const CoverageBlock& lhs, const CoverageBlock& rhs) {
+ return lhs.start == rhs.start && lhs.end == rhs.end;
+}
+
+void MergeDuplicateSingletons(CoverageFunction* function) {
+ CoverageBlockIterator iter(function);
+
+ while (iter.Next() && iter.HasNext()) {
+ CoverageBlock& block = iter.GetBlock();
+ CoverageBlock& next_block = iter.GetNextBlock();
+
+ // Identical ranges should only occur through singleton ranges. Consider the
+ // ranges for `for (.) break;`: continuation ranges for both the `break` and
+ // `for` statements begin after the trailing semicolon.
+ // Such ranges are merged and keep the maximal execution count.
+ if (!HaveSameSourceRange(block, next_block)) continue;
+
+ DCHECK_EQ(kNoSourcePosition, block.end); // Singleton range.
+ next_block.count = std::max(block.count, next_block.count);
+ iter.DeleteBlock();
+ }
+}
+
+// Rewrite position singletons (produced by unconditional control flow
+// like return statements, and by continuation counters) into source
+// ranges that end at the next sibling range or the end of the parent
+// range, whichever comes first.
+void RewritePositionSingletonsToRanges(CoverageFunction* function) {
+ CoverageBlockIterator iter(function);
+
+ while (iter.Next()) {
+ CoverageBlock& block = iter.GetBlock();
+ CoverageBlock& parent = iter.GetParent();
+
+ if (block.start >= function->end) {
+ DCHECK_EQ(block.end, kNoSourcePosition);
+ iter.DeleteBlock();
+ } else if (block.end == kNoSourcePosition) {
+ // The current block ends at the next sibling block (if it exists) or the
+ // end of the parent block otherwise.
+ block.end = iter.HasSiblingOrChild() ? iter.GetSiblingOrChild().start
+ : parent.end;
+ }
+ }
+}
+
+void MergeNestedAndConsecutiveRanges(CoverageFunction* function) {
+ CoverageBlockIterator iter(function);
+
+ while (iter.Next()) {
+ CoverageBlock& block = iter.GetBlock();
+ CoverageBlock& parent = iter.GetParent();
+
+ if (parent.count == block.count) {
+ iter.DeleteBlock();
+ } else if (iter.HasSiblingOrChild()) {
+ CoverageBlock& sibling = iter.GetSiblingOrChild();
+ if (sibling.start == block.end && sibling.count == block.count) {
+ // Best-effort: this pass may miss mergeable siblings in the presence of
+ // child blocks.
+ sibling.start = block.start;
+ iter.DeleteBlock();
+ }
+ }
+ }
+}
+
+void FilterUncoveredRanges(CoverageFunction* function) {
+ CoverageBlockIterator iter(function);
+
+ while (iter.Next()) {
+ CoverageBlock& block = iter.GetBlock();
+ CoverageBlock& parent = iter.GetParent();
+ if (block.count == 0 && parent.count == 0) iter.DeleteBlock();
+ }
+}
+
+void FilterEmptyRanges(CoverageFunction* function) {
+ CoverageBlockIterator iter(function);
+
+ while (iter.Next()) {
+ CoverageBlock& block = iter.GetBlock();
+ if (block.start == block.end) iter.DeleteBlock();
+ }
+}
+
+void ClampToBinary(CoverageFunction* function) {
+ CoverageBlockIterator iter(function);
+
+ while (iter.Next()) {
+ CoverageBlock& block = iter.GetBlock();
+ if (block.count > 0) block.count = 1;
+ }
+}
+
+void ResetAllBlockCounts(SharedFunctionInfo* shared) {
+ DCHECK(FLAG_block_coverage);
+ DCHECK(shared->HasCoverageInfo());
+
+ CoverageInfo* coverage_info =
+ CoverageInfo::cast(shared->GetDebugInfo()->coverage_info());
+
+ for (int i = 0; i < coverage_info->SlotCount(); i++) {
+ coverage_info->ResetBlockCount(i);
+ }
+}
+
+bool IsBlockMode(debug::Coverage::Mode mode) {
+ switch (mode) {
+ case debug::Coverage::kBlockBinary:
+ case debug::Coverage::kBlockCount:
+ return true;
+ default:
+ return false;
+ }
+}
+
+void CollectBlockCoverage(Isolate* isolate, CoverageFunction* function,
+ SharedFunctionInfo* info,
+ debug::Coverage::Mode mode) {
+ DCHECK(FLAG_block_coverage);
+ DCHECK(IsBlockMode(mode));
+
+ function->has_block_coverage = true;
+ function->blocks = GetSortedBlockData(isolate, info);
+
+ // Remove duplicate singleton ranges, keeping the max count.
+ MergeDuplicateSingletons(function);
+
+ // Rewrite all singletons (created e.g. by continuations and unconditional
+ // control flow) to ranges.
+ RewritePositionSingletonsToRanges(function);
+
+ // Merge nested and consecutive ranges with identical counts.
+ MergeNestedAndConsecutiveRanges(function);
+
+ // Filter out ranges with count == 0 unless the immediate parent range has
+ // a count != 0.
+ FilterUncoveredRanges(function);
+
+ // Filter out ranges of zero length.
+ FilterEmptyRanges(function);
+
+ // If in binary mode, only report counts of 0/1.
+ if (mode == debug::Coverage::kBlockBinary) ClampToBinary(function);
+
+ // Reset all counters on the DebugInfo to zero.
+ ResetAllBlockCounts(info);
+}
} // anonymous namespace
Coverage* Coverage::CollectPrecise(Isolate* isolate) {
DCHECK(!isolate->is_best_effort_code_coverage());
Coverage* result = Collect(isolate, isolate->code_coverage_mode());
- if (isolate->is_precise_binary_code_coverage()) {
+ if (isolate->is_precise_binary_code_coverage() ||
+ isolate->is_block_binary_code_coverage()) {
// We do not have to hold onto feedback vectors for invocations we already
// reported. So we can reset the list.
isolate->SetCodeCoverageList(*ArrayList::New(isolate, 0));
@@ -78,10 +387,13 @@ Coverage* Coverage::Collect(Isolate* isolate,
v8::debug::Coverage::Mode collectionMode) {
SharedToCounterMap counter_map;
+ const bool reset_count = collectionMode != v8::debug::Coverage::kBestEffort;
+
switch (isolate->code_coverage_mode()) {
+ case v8::debug::Coverage::kBlockBinary:
+ case v8::debug::Coverage::kBlockCount:
case v8::debug::Coverage::kPreciseBinary:
case v8::debug::Coverage::kPreciseCount: {
- bool reset_count = collectionMode != v8::debug::Coverage::kBestEffort;
// Feedback vectors are already listed to prevent losing them to GC.
DCHECK(isolate->factory()->code_coverage_list()->IsArrayList());
Handle<ArrayList> list =
@@ -149,8 +461,10 @@ Coverage* Coverage::Collect(Isolate* isolate,
}
if (count != 0) {
switch (collectionMode) {
+ case v8::debug::Coverage::kBlockCount:
case v8::debug::Coverage::kPreciseCount:
break;
+ case v8::debug::Coverage::kBlockBinary:
case v8::debug::Coverage::kPreciseBinary:
count = info->has_reported_binary_coverage() ? 0 : 1;
info->set_has_reported_binary_coverage(true);
@@ -167,6 +481,12 @@ Coverage* Coverage::Collect(Isolate* isolate,
Handle<String> name(info->DebugName(), isolate);
nesting.push_back(functions->size());
functions->emplace_back(start, end, count, name);
+
+ if (FLAG_block_coverage && IsBlockMode(collectionMode) &&
+ info->HasCoverageInfo()) {
+ CoverageFunction* function = &functions->back();
+ CollectBlockCoverage(isolate, function, info, collectionMode);
+ }
}
}
@@ -179,8 +499,15 @@ Coverage* Coverage::Collect(Isolate* isolate,
void Coverage::SelectMode(Isolate* isolate, debug::Coverage::Mode mode) {
switch (mode) {
case debug::Coverage::kBestEffort:
+ // Note that DevTools switches back to best-effort coverage once the
+ // recording is stopped. Since we delete coverage infos at that point, any
+ // following coverage recording (without reloads) will be at function
+ // granularity.
+ if (FLAG_block_coverage) isolate->debug()->RemoveAllCoverageInfos();
isolate->SetCodeCoverageList(isolate->heap()->undefined_value());
break;
+ case debug::Coverage::kBlockBinary:
+ case debug::Coverage::kBlockCount:
case debug::Coverage::kPreciseBinary:
case debug::Coverage::kPreciseCount: {
HandleScope scope(isolate);
diff --git a/deps/v8/src/debug/debug-coverage.h b/deps/v8/src/debug/debug-coverage.h
index b21622cb2e..49e3d60f21 100644
--- a/deps/v8/src/debug/debug-coverage.h
+++ b/deps/v8/src/debug/debug-coverage.h
@@ -16,13 +16,24 @@ namespace internal {
// Forward declaration.
class Isolate;
+struct CoverageBlock {
+ CoverageBlock(int s, int e, uint32_t c) : start(s), end(e), count(c) {}
+ CoverageBlock() : CoverageBlock(kNoSourcePosition, kNoSourcePosition, 0) {}
+ int start;
+ int end;
+ uint32_t count;
+};
+
struct CoverageFunction {
CoverageFunction(int s, int e, uint32_t c, Handle<String> n)
- : start(s), end(e), count(c), name(n) {}
+ : start(s), end(e), count(c), name(n), has_block_coverage(false) {}
int start;
int end;
uint32_t count;
Handle<String> name;
+ // Blocks are sorted by start position, from outer to inner blocks.
+ std::vector<CoverageBlock> blocks;
+ bool has_block_coverage;
};
struct CoverageScript {
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index 15a007ac89..10b0f602c0 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -172,7 +172,8 @@ DebugEvaluate::ContextBuilder::ContextBuilder(Isolate* isolate,
evaluation_context_ = outer_context;
break;
} else if (scope_type == ScopeIterator::ScopeTypeCatch ||
- scope_type == ScopeIterator::ScopeTypeWith) {
+ scope_type == ScopeIterator::ScopeTypeWith ||
+ scope_type == ScopeIterator::ScopeTypeModule) {
ContextChainElement context_chain_element;
Handle<Context> current_context = it.CurrentContext();
if (!current_context->IsDebugEvaluateContext()) {
@@ -276,8 +277,6 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(IsJSProxy) \
V(IsJSMap) \
V(IsJSSet) \
- V(IsJSMapIterator) \
- V(IsJSSetIterator) \
V(IsJSWeakMap) \
V(IsJSWeakSet) \
V(IsRegExp) \
@@ -312,8 +311,8 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(FixedArrayGet) \
V(StringGetRawHashField) \
V(GenericHash) \
- V(MapIteratorInitialize) \
V(MapInitialize) \
+ V(SetInitialize) \
/* Called from builtins */ \
V(StringParseFloat) \
V(StringParseInt) \
@@ -341,6 +340,8 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(ForInPrepare) \
V(Call) \
V(MaxSmi) \
+ V(NewObject) \
+ V(FinalizeInstanceSize) \
V(HasInPrototypeChain)
#define CASE(Name) \
@@ -436,13 +437,18 @@ bool BytecodeHasNoSideEffect(interpreter::Bytecode bytecode) {
case Bytecode::kToObject:
case Bytecode::kToNumber:
case Bytecode::kToName:
+ case Bytecode::kToPrimitiveToString:
// Misc.
+ case Bytecode::kStringConcat:
case Bytecode::kForInPrepare:
case Bytecode::kForInContinue:
case Bytecode::kForInNext:
case Bytecode::kForInStep:
case Bytecode::kThrow:
case Bytecode::kReThrow:
+ case Bytecode::kThrowReferenceErrorIfHole:
+ case Bytecode::kThrowSuperNotCalledIfHole:
+ case Bytecode::kThrowSuperAlreadyCalledIfNotHole:
case Bytecode::kIllegal:
case Bytecode::kCallJSRuntime:
case Bytecode::kStackCheck:
@@ -476,6 +482,7 @@ bool BuiltinHasNoSideEffect(Builtins::Name id) {
case Builtins::kObjectPrototypeValueOf:
case Builtins::kObjectValues:
case Builtins::kObjectHasOwnProperty:
+ case Builtins::kObjectPrototypeIsPrototypeOf:
case Builtins::kObjectPrototypePropertyIsEnumerable:
case Builtins::kObjectProtoToString:
// Array builtins.
@@ -525,6 +532,13 @@ bool BuiltinHasNoSideEffect(Builtins::Name id) {
case Builtins::kDatePrototypeToJson:
case Builtins::kDatePrototypeToPrimitive:
case Builtins::kDatePrototypeValueOf:
+ // Map builtins.
+ case Builtins::kMapConstructor:
+ case Builtins::kMapGet:
+ case Builtins::kMapPrototypeEntries:
+ case Builtins::kMapPrototypeGetSize:
+ case Builtins::kMapPrototypeKeys:
+ case Builtins::kMapPrototypeValues:
// Math builtins.
case Builtins::kMathAbs:
case Builtins::kMathAcos:
@@ -574,6 +588,11 @@ bool BuiltinHasNoSideEffect(Builtins::Name id) {
case Builtins::kNumberPrototypeToPrecision:
case Builtins::kNumberPrototypeToString:
case Builtins::kNumberPrototypeValueOf:
+ // Set builtins.
+ case Builtins::kSetConstructor:
+ case Builtins::kSetPrototypeEntries:
+ case Builtins::kSetPrototypeGetSize:
+ case Builtins::kSetPrototypeValues:
// String builtins. Strings are immutable.
case Builtins::kStringFromCharCode:
case Builtins::kStringFromCodePoint:
@@ -590,8 +609,10 @@ bool BuiltinHasNoSideEffect(Builtins::Name id) {
case Builtins::kStringPrototypeSubstr:
case Builtins::kStringPrototypeSubstring:
case Builtins::kStringPrototypeToString:
+#ifndef V8_INTL_SUPPORT
case Builtins::kStringPrototypeToLowerCase:
case Builtins::kStringPrototypeToUpperCase:
+#endif
case Builtins::kStringPrototypeTrim:
case Builtins::kStringPrototypeTrimLeft:
case Builtins::kStringPrototypeTrimRight:
diff --git a/deps/v8/src/debug/debug-frames.cc b/deps/v8/src/debug/debug-frames.cc
index d4899114c9..39c5b3b329 100644
--- a/deps/v8/src/debug/debug-frames.cc
+++ b/deps/v8/src/debug/debug-frames.cc
@@ -27,9 +27,9 @@ FrameInspector::FrameInspector(StandardFrame* frame, int inlined_frame_index,
// Calculate the deoptimized frame.
if (is_optimized_) {
DCHECK(js_frame != nullptr);
- // TODO(turbofan): Revisit once we support deoptimization.
+ // TODO(turbofan): Deoptimization from AstGraphBuilder is not supported.
if (js_frame->LookupCode()->is_turbofanned() &&
- js_frame->function()->shared()->asm_function()) {
+ !js_frame->function()->shared()->HasBytecodeArray()) {
is_optimized_ = false;
return;
}
@@ -73,10 +73,10 @@ Handle<Object> FrameInspector::GetParameter(int index) {
}
Handle<Object> FrameInspector::GetExpression(int index) {
- // TODO(turbofan): Revisit once we support deoptimization.
+ // TODO(turbofan): Deoptimization from AstGraphBuilder is not supported.
if (frame_->is_java_script() &&
javascript_frame()->LookupCode()->is_turbofanned() &&
- javascript_frame()->function()->shared()->asm_function()) {
+ !javascript_frame()->function()->shared()->HasBytecodeArray()) {
return isolate_->factory()->undefined_value();
}
return is_optimized_ ? deoptimized_frame_->GetExpression(index)
diff --git a/deps/v8/src/debug/debug-interface.h b/deps/v8/src/debug/debug-interface.h
index ffe34c112e..984d8ae58d 100644
--- a/deps/v8/src/debug/debug-interface.h
+++ b/deps/v8/src/debug/debug-interface.h
@@ -17,6 +17,7 @@
namespace v8 {
namespace internal {
+struct CoverageBlock;
struct CoverageFunction;
struct CoverageScript;
class Coverage;
@@ -215,6 +216,9 @@ V8_EXPORT_PRIVATE void SetConsoleDelegate(Isolate* isolate,
int GetStackFrameId(v8::Local<v8::StackFrame> frame);
+v8::Local<v8::StackTrace> GetDetailedStackTrace(Isolate* isolate,
+ v8::Local<v8::Object> error);
+
/**
* Native wrapper around v8::internal::JSGeneratorObject object.
*/
@@ -245,10 +249,29 @@ class V8_EXPORT_PRIVATE Coverage {
// We are only interested in a yes/no result for the function. Optimization
// and GC can be allowed once a function has been invoked. Collecting
// precise binary coverage resets counters for incremental updates.
- kPreciseBinary
+ kPreciseBinary,
+ // Similar to the precise coverage modes but provides coverage at a
+ // lower granularity. Design doc: goo.gl/lA2swZ.
+ kBlockCount,
+ kBlockBinary,
};
- class ScriptData; // Forward declaration.
+ // Forward declarations.
+ class ScriptData;
+ class FunctionData;
+
+ class V8_EXPORT_PRIVATE BlockData {
+ public:
+ int StartOffset() const;
+ int EndOffset() const;
+ uint32_t Count() const;
+
+ private:
+ explicit BlockData(i::CoverageBlock* block) : block_(block) {}
+ i::CoverageBlock* block_;
+
+ friend class v8::debug::Coverage::FunctionData;
+ };
class V8_EXPORT_PRIVATE FunctionData {
public:
@@ -256,6 +279,9 @@ class V8_EXPORT_PRIVATE Coverage {
int EndOffset() const;
uint32_t Count() const;
MaybeLocal<String> Name() const;
+ size_t BlockCount() const;
+ bool HasBlockCoverage() const;
+ BlockData GetBlockData(size_t i) const;
private:
explicit FunctionData(i::CoverageFunction* function)
diff --git a/deps/v8/src/debug/debug-scopes.cc b/deps/v8/src/debug/debug-scopes.cc
index 2d4c01ebe3..a91397b04b 100644
--- a/deps/v8/src/debug/debug-scopes.cc
+++ b/deps/v8/src/debug/debug-scopes.cc
@@ -30,16 +30,20 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
return;
}
- context_ = Handle<Context>::cast(frame_inspector->GetContext());
-
// We should not instantiate a ScopeIterator for wasm frames.
DCHECK(frame_inspector->GetScript()->type() != Script::TYPE_WASM);
+ TryParseAndRetrieveScopes(option);
+}
+
+void ScopeIterator::TryParseAndRetrieveScopes(ScopeIterator::Option option) {
+ context_ = GetContext();
+
// Catch the case when the debugger stops in an internal function.
Handle<JSFunction> function = GetFunction();
Handle<SharedFunctionInfo> shared_info(function->shared());
Handle<ScopeInfo> scope_info(shared_info->scope_info());
- if (shared_info->script()->IsUndefined(isolate)) {
+ if (shared_info->script()->IsUndefined(isolate_)) {
while (context_->closure() == *function) {
context_ = Handle<Context>(context_->previous(), isolate_);
}
@@ -54,7 +58,8 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
// and include nested scopes into the "fast" iteration case as well.
bool ignore_nested_scopes = (option == IGNORE_NESTED_SCOPES);
bool collect_non_locals = (option == COLLECT_NON_LOCALS);
- if (!ignore_nested_scopes && shared_info->HasDebugInfo()) {
+ if (!ignore_nested_scopes && shared_info->HasBreakInfo() &&
+ frame_inspector_ != nullptr) {
// The source position at return is always the end of the function,
// which is not consistent with the current scope chain. Therefore all
// nested with, catch and block contexts are skipped, and we can only
@@ -109,8 +114,8 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
// Inner function.
info.reset(new ParseInfo(shared_info));
}
- if (parsing::ParseAny(info.get(), isolate) &&
- Rewriter::Rewrite(info.get(), isolate)) {
+ if (parsing::ParseAny(info.get(), isolate_) &&
+ Rewriter::Rewrite(info.get(), isolate_)) {
DeclarationScope* scope = info->literal()->scope();
if (!ignore_nested_scopes || collect_non_locals) {
CollectNonLocals(info.get(), scope);
@@ -136,7 +141,6 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
ScopeIterator::ScopeIterator(Isolate* isolate, Handle<JSFunction> function)
: isolate_(isolate),
- frame_inspector_(NULL),
context_(function->context()),
seen_script_scope_(false) {
if (!function->shared()->IsSubjectToDebugging()) context_ = Handle<Context>();
@@ -146,13 +150,14 @@ ScopeIterator::ScopeIterator(Isolate* isolate, Handle<JSFunction> function)
ScopeIterator::ScopeIterator(Isolate* isolate,
Handle<JSGeneratorObject> generator)
: isolate_(isolate),
- frame_inspector_(NULL),
+ generator_(generator),
context_(generator->context()),
seen_script_scope_(false) {
if (!generator->function()->shared()->IsSubjectToDebugging()) {
context_ = Handle<Context>();
+ return;
}
- UnwrapEvaluationContext();
+ TryParseAndRetrieveScopes(DEFAULT);
}
void ScopeIterator::UnwrapEvaluationContext() {
@@ -327,7 +332,6 @@ MaybeHandle<JSObject> ScopeIterator::ScopeObject() {
return MaterializeModuleScope();
}
UNREACHABLE();
- return Handle<JSObject>();
}
@@ -363,7 +367,7 @@ bool ScopeIterator::SetVariableValue(Handle<String> variable_name,
case ScopeIterator::ScopeTypeEval:
return SetInnerScopeVariableValue(variable_name, new_value);
case ScopeIterator::ScopeTypeModule:
- // TODO(neis): Implement.
+ return SetModuleVariableValue(variable_name, new_value);
break;
}
return false;
@@ -460,10 +464,36 @@ void ScopeIterator::DebugPrint() {
}
#endif
+inline Handle<Context> ScopeIterator::GetContext() {
+ if (frame_inspector_) {
+ return Handle<Context>::cast(frame_inspector_->GetContext());
+ } else {
+ DCHECK(!generator_.is_null());
+ return handle(generator_->context());
+ }
+}
+
+Handle<JSFunction> ScopeIterator::GetFunction() {
+ if (frame_inspector_) {
+ return frame_inspector_->GetFunction();
+ } else {
+ DCHECK(!generator_.is_null());
+ return handle(generator_->function());
+ }
+}
+
+int ScopeIterator::GetSourcePosition() {
+ if (frame_inspector_) {
+ return frame_inspector_->GetSourcePosition();
+ } else {
+ DCHECK(!generator_.is_null());
+ return generator_->source_position();
+ }
+}
+
void ScopeIterator::RetrieveScopeChain(DeclarationScope* scope) {
DCHECK_NOT_NULL(scope);
- int source_position = frame_inspector_->GetSourcePosition();
- GetNestedScopeChain(isolate_, scope, source_position);
+ GetNestedScopeChain(isolate_, scope, GetSourcePosition());
}
void ScopeIterator::CollectNonLocals(ParseInfo* info, DeclarationScope* scope) {
@@ -491,20 +521,41 @@ MaybeHandle<JSObject> ScopeIterator::MaterializeScriptScope() {
return script_scope;
}
+void ScopeIterator::MaterializeStackLocals(Handle<JSObject> local_scope,
+ Handle<ScopeInfo> scope_info) {
+ if (frame_inspector_) {
+ return frame_inspector_->MaterializeStackLocals(local_scope, scope_info);
+ }
+
+ DCHECK(!generator_.is_null());
+ // Fill all stack locals.
+ Handle<FixedArray> register_file(generator_->register_file());
+ for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
+ Handle<String> name = handle(scope_info->StackLocalName(i));
+ if (ScopeInfo::VariableIsSynthetic(*name)) continue;
+ Handle<Object> value(register_file->get(scope_info->StackLocalIndex(i)),
+ isolate_);
+ // TODO(yangguo): We convert optimized out values to {undefined} when they
+ // are passed to the debugger. Eventually we should handle them somehow.
+ if (value->IsTheHole(isolate_) || value->IsOptimizedOut(isolate_)) {
+ DCHECK(!value.is_identical_to(isolate_->factory()->stale_register()));
+ value = isolate_->factory()->undefined_value();
+ }
+ JSObject::SetOwnPropertyIgnoreAttributes(local_scope, name, value, NONE)
+ .Check();
+ }
+}
MaybeHandle<JSObject> ScopeIterator::MaterializeLocalScope() {
- Handle<JSFunction> function = GetFunction();
+ Handle<JSFunction> function(GetFunction());
+ Handle<SharedFunctionInfo> shared(function->shared());
+ Handle<ScopeInfo> scope_info(shared->scope_info());
Handle<JSObject> local_scope =
isolate_->factory()->NewJSObjectWithNullProto();
- frame_inspector_->MaterializeStackLocals(local_scope, function);
-
- Handle<Context> frame_context =
- Handle<Context>::cast(frame_inspector_->GetContext());
+ MaterializeStackLocals(local_scope, scope_info);
- HandleScope scope(isolate_);
- Handle<SharedFunctionInfo> shared(function->shared());
- Handle<ScopeInfo> scope_info(shared->scope_info());
+ Handle<Context> frame_context = GetContext();
if (!scope_info->HasContext()) return local_scope;
@@ -586,7 +637,7 @@ Handle<JSObject> ScopeIterator::MaterializeInnerScope() {
Handle<Context> context = Handle<Context>::null();
if (!nested_scope_chain_.is_empty()) {
Handle<ScopeInfo> scope_info = nested_scope_chain_.last().scope_info;
- frame_inspector_->MaterializeStackLocals(inner_scope, scope_info);
+ MaterializeStackLocals(inner_scope, scope_info);
if (scope_info->HasContext()) context = CurrentContext();
} else {
context = CurrentContext();
@@ -616,14 +667,19 @@ MaybeHandle<JSObject> ScopeIterator::MaterializeModuleScope() {
}
bool ScopeIterator::SetParameterValue(Handle<ScopeInfo> scope_info,
- JavaScriptFrame* frame,
Handle<String> parameter_name,
Handle<Object> new_value) {
// Setting stack locals of optimized frames is not supported.
- if (frame->is_optimized()) return false;
HandleScope scope(isolate_);
for (int i = 0; i < scope_info->ParameterCount(); ++i) {
if (String::Equals(handle(scope_info->ParameterName(i)), parameter_name)) {
+ // Suspended generators should not get here because all parameters should
+ // be context-allocated.
+ DCHECK_NOT_NULL(frame_inspector_);
+ JavaScriptFrame* frame = GetFrame();
+ if (frame->is_optimized()) {
+ return false;
+ }
frame->SetParameterValue(i, *new_value);
return true;
}
@@ -634,14 +690,24 @@ bool ScopeIterator::SetParameterValue(Handle<ScopeInfo> scope_info,
bool ScopeIterator::SetStackVariableValue(Handle<ScopeInfo> scope_info,
Handle<String> variable_name,
Handle<Object> new_value) {
- if (frame_inspector_ == nullptr) return false;
- JavaScriptFrame* frame = GetFrame();
- // Setting stack locals of optimized frames is not supported.
- if (frame->is_optimized()) return false;
+ // Setting stack locals of optimized frames is not supported. Suspended
+ // generators are supported.
HandleScope scope(isolate_);
for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
if (String::Equals(handle(scope_info->StackLocalName(i)), variable_name)) {
- frame->SetExpression(scope_info->StackLocalIndex(i), *new_value);
+ int stack_local_index = scope_info->StackLocalIndex(i);
+ if (frame_inspector_ != nullptr) {
+ // Set the variable on the stack.
+ JavaScriptFrame* frame = GetFrame();
+ if (frame->is_optimized()) return false;
+ frame->SetExpression(stack_local_index, *new_value);
+ } else {
+ // Set the variable in the suspended generator.
+ DCHECK(!generator_.is_null());
+ Handle<FixedArray> register_file(generator_->register_file());
+ DCHECK_LT(stack_local_index, register_file->length());
+ register_file->set(stack_local_index, *new_value);
+ }
return true;
}
}
@@ -666,7 +732,8 @@ bool ScopeIterator::SetContextVariableValue(Handle<ScopeInfo> scope_info,
}
}
- if (context->has_extension()) {
+ // TODO(neis): Clean up context "extension" mess.
+ if (!context->IsModuleContext() && context->has_extension()) {
Handle<JSObject> ext(context->extension_object());
Maybe<bool> maybe = JSReceiver::HasOwnProperty(ext, variable_name);
DCHECK(maybe.IsJust());
@@ -684,11 +751,10 @@ bool ScopeIterator::SetContextVariableValue(Handle<ScopeInfo> scope_info,
bool ScopeIterator::SetLocalVariableValue(Handle<String> variable_name,
Handle<Object> new_value) {
- JavaScriptFrame* frame = GetFrame();
- Handle<ScopeInfo> scope_info(frame->function()->shared()->scope_info());
+ Handle<ScopeInfo> scope_info(GetFunction()->shared()->scope_info());
// Parameter might be shadowed in context. Don't stop here.
- bool result = SetParameterValue(scope_info, frame, variable_name, new_value);
+ bool result = SetParameterValue(scope_info, variable_name, new_value);
// Stack locals.
if (SetStackVariableValue(scope_info, variable_name, new_value)) {
@@ -704,6 +770,41 @@ bool ScopeIterator::SetLocalVariableValue(Handle<String> variable_name,
return result;
}
+bool ScopeIterator::SetModuleVariableValue(Handle<String> variable_name,
+ Handle<Object> new_value) {
+ DCHECK_NOT_NULL(frame_inspector_);
+
+ // Get module context and its scope info.
+ Handle<Context> context = CurrentContext();
+ while (!context->IsModuleContext()) {
+ context = handle(context->previous(), isolate_);
+ }
+ Handle<ScopeInfo> scope_info(context->scope_info(), isolate_);
+ DCHECK_EQ(scope_info->scope_type(), MODULE_SCOPE);
+
+ if (SetContextVariableValue(scope_info, context, variable_name, new_value)) {
+ return true;
+ }
+
+ int cell_index;
+ {
+ VariableMode mode;
+ InitializationFlag init_flag;
+ MaybeAssignedFlag maybe_assigned_flag;
+ cell_index = scope_info->ModuleIndex(variable_name, &mode, &init_flag,
+ &maybe_assigned_flag);
+ }
+
+ // Setting imports is currently not supported.
+ bool found = ModuleDescriptor::GetCellIndexKind(cell_index) ==
+ ModuleDescriptor::kExport;
+ if (found) {
+ Module::StoreVariable(handle(context->module(), isolate_), cell_index,
+ new_value);
+ }
+ return found;
+}
+
bool ScopeIterator::SetInnerScopeVariableValue(Handle<String> variable_name,
Handle<Object> new_value) {
Handle<ScopeInfo> scope_info = CurrentScopeInfo();
@@ -840,7 +941,7 @@ void ScopeIterator::GetNestedScopeChain(Isolate* isolate, Scope* scope,
if (scope->is_function_scope()) {
// Do not collect scopes of nested inner functions inside the current one.
// Nested arrow functions could have the same end positions.
- Handle<JSFunction> function = frame_inspector_->GetFunction();
+ Handle<JSFunction> function = GetFunction();
if (scope->start_position() > function->shared()->start_position() &&
scope->end_position() <= function->shared()->end_position()) {
return;
diff --git a/deps/v8/src/debug/debug-scopes.h b/deps/v8/src/debug/debug-scopes.h
index d187f3e7bd..c93e5f89cb 100644
--- a/deps/v8/src/debug/debug-scopes.h
+++ b/deps/v8/src/debug/debug-scopes.h
@@ -93,7 +93,8 @@ class ScopeIterator {
};
Isolate* isolate_;
- FrameInspector* const frame_inspector_;
+ FrameInspector* const frame_inspector_ = nullptr;
+ Handle<JSGeneratorObject> generator_;
Handle<Context> context_;
List<ExtendedScopeInfo> nested_scope_chain_;
Handle<StringSet> non_locals_;
@@ -103,9 +104,14 @@ class ScopeIterator {
return frame_inspector_->GetArgumentsFrame();
}
- inline Handle<JSFunction> GetFunction() {
- return frame_inspector_->GetFunction();
- }
+ Handle<Context> GetContext();
+ Handle<JSFunction> GetFunction();
+ int GetSourcePosition();
+
+ void MaterializeStackLocals(Handle<JSObject> local_scope,
+ Handle<ScopeInfo> scope_info);
+
+ void TryParseAndRetrieveScopes(ScopeIterator::Option option);
void RetrieveScopeChain(DeclarationScope* scope);
@@ -131,9 +137,11 @@ class ScopeIterator {
Handle<Object> new_value);
bool SetCatchVariableValue(Handle<String> variable_name,
Handle<Object> new_value);
+ bool SetModuleVariableValue(Handle<String> variable_name,
+ Handle<Object> new_value);
// Helper functions.
- bool SetParameterValue(Handle<ScopeInfo> scope_info, JavaScriptFrame* frame,
+ bool SetParameterValue(Handle<ScopeInfo> scope_info,
Handle<String> parameter_name,
Handle<Object> new_value);
bool SetStackVariableValue(Handle<ScopeInfo> scope_info,
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index 41c22c9ea5..243eed637e 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -28,6 +28,7 @@
#include "src/list.h"
#include "src/log.h"
#include "src/messages.h"
+#include "src/objects/debug-objects-inl.h"
#include "src/snapshot/natives.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects.h"
@@ -115,12 +116,12 @@ bool BreakLocation::HasBreakPoint(Handle<DebugInfo> debug_info) const {
if (abstract_code_->IsCode()) {
DCHECK_EQ(debug_info->DebugCode(), abstract_code_->GetCode());
CodeBreakIterator it(debug_info);
- it.SkipToPosition(position_, BREAK_POSITION_ALIGNED);
+ it.SkipToPosition(position_);
return it.code_offset() == code_offset_;
} else {
DCHECK(abstract_code_->IsBytecodeArray());
BytecodeArrayBreakIterator it(debug_info);
- it.SkipToPosition(position_, BREAK_POSITION_ALIGNED);
+ it.SkipToPosition(position_);
return it.code_offset() == code_offset_;
}
}
@@ -158,18 +159,11 @@ BreakIterator::BreakIterator(Handle<DebugInfo> debug_info)
statement_position_ = position_;
}
-int BreakIterator::BreakIndexFromPosition(int source_position,
- BreakPositionAlignment alignment) {
+int BreakIterator::BreakIndexFromPosition(int source_position) {
int distance = kMaxInt;
int closest_break = break_index();
while (!Done()) {
- int next_position;
- if (alignment == STATEMENT_ALIGNED) {
- next_position = statement_position();
- } else {
- DCHECK(alignment == BREAK_POSITION_ALIGNED);
- next_position = position();
- }
+ int next_position = position();
if (source_position <= next_position &&
next_position - source_position < distance) {
closest_break = break_index();
@@ -196,7 +190,6 @@ int CodeBreakIterator::GetModeMask() {
int mask = 0;
mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_RETURN);
mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_CALL);
- mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL);
mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION);
return mask;
}
@@ -231,10 +224,6 @@ DebugBreakType CodeBreakIterator::GetDebugBreakType() {
return DEBUG_BREAK_SLOT_AT_RETURN;
} else if (RelocInfo::IsDebugBreakSlotAtCall(rmode())) {
return DEBUG_BREAK_SLOT_AT_CALL;
- } else if (RelocInfo::IsDebugBreakSlotAtTailCall(rmode())) {
- return isolate()->is_tail_call_elimination_enabled()
- ? DEBUG_BREAK_SLOT_AT_TAIL_CALL
- : DEBUG_BREAK_SLOT_AT_CALL;
} else if (RelocInfo::IsDebugBreakSlot(rmode())) {
return DEBUG_BREAK_SLOT;
} else {
@@ -242,10 +231,14 @@ DebugBreakType CodeBreakIterator::GetDebugBreakType() {
}
}
-void CodeBreakIterator::SkipToPosition(int position,
- BreakPositionAlignment alignment) {
+void CodeBreakIterator::SkipToPosition(int position) {
CodeBreakIterator it(debug_info_);
- SkipTo(it.BreakIndexFromPosition(position, alignment));
+ SkipTo(it.BreakIndexFromPosition(position));
+}
+
+int CodeBreakIterator::code_offset() {
+ return static_cast<int>(rinfo()->pc() -
+ debug_info_->DebugCode()->instruction_start());
}
void CodeBreakIterator::SetDebugBreak() {
@@ -313,10 +306,6 @@ DebugBreakType BytecodeArrayBreakIterator::GetDebugBreakType() {
return DEBUGGER_STATEMENT;
} else if (bytecode == interpreter::Bytecode::kReturn) {
return DEBUG_BREAK_SLOT_AT_RETURN;
- } else if (bytecode == interpreter::Bytecode::kTailCall) {
- return isolate()->is_tail_call_elimination_enabled()
- ? DEBUG_BREAK_SLOT_AT_TAIL_CALL
- : DEBUG_BREAK_SLOT_AT_CALL;
} else if (interpreter::Bytecodes::IsCallOrConstruct(bytecode)) {
return DEBUG_BREAK_SLOT_AT_CALL;
} else if (source_position_iterator_.is_statement()) {
@@ -326,10 +315,9 @@ DebugBreakType BytecodeArrayBreakIterator::GetDebugBreakType() {
}
}
-void BytecodeArrayBreakIterator::SkipToPosition(
- int position, BreakPositionAlignment alignment) {
+void BytecodeArrayBreakIterator::SkipToPosition(int position) {
BytecodeArrayBreakIterator it(debug_info_);
- SkipTo(it.BreakIndexFromPosition(position, alignment));
+ SkipTo(it.BreakIndexFromPosition(position));
}
void BytecodeArrayBreakIterator::SetDebugBreak() {
@@ -396,8 +384,8 @@ void Debug::ThreadInit() {
thread_local_.async_task_count_ = 0;
clear_suspended_generator();
thread_local_.restart_fp_ = nullptr;
- base::NoBarrier_Store(&thread_local_.current_debug_scope_,
- static_cast<base::AtomicWord>(0));
+ base::Relaxed_Store(&thread_local_.current_debug_scope_,
+ static_cast<base::AtomicWord>(0));
UpdateHookOnFunctionCall();
}
@@ -482,6 +470,8 @@ void Debug::Unload() {
// Return debugger is not loaded.
if (!is_loaded()) return;
+ if (FLAG_block_coverage) RemoveAllCoverageInfos();
+
// Clear debugger context global handle.
GlobalHandles::Destroy(Handle<Object>::cast(debug_context_).location());
debug_context_ = Handle<Context>();
@@ -505,7 +495,7 @@ void Debug::Break(JavaScriptFrame* frame) {
// Return if we fail to retrieve debug info.
Handle<JSFunction> function(frame->function());
Handle<SharedFunctionInfo> shared(function->shared());
- if (!EnsureDebugInfo(shared)) return;
+ if (!EnsureBreakInfo(shared)) return;
Handle<DebugInfo> debug_info(shared->GetDebugInfo(), isolate_);
// Find the break location where execution has stopped.
@@ -553,8 +543,6 @@ void Debug::Break(JavaScriptFrame* frame) {
case StepNext:
// Step next should not break in a deeper frame than target frame.
if (current_frame_count > target_frame_count) return;
- // For step-next, a tail call is like a return and should break.
- step_break = location.IsTailCall();
// Fall through.
case StepIn: {
FrameSummary summary = FrameSummary::GetTop(frame);
@@ -606,7 +594,7 @@ bool Debug::IsMutedAtCurrentLocation(JavaScriptFrame* frame) {
FrameSummary summary = FrameSummary::GetTop(frame);
DCHECK(!summary.IsWasm());
Handle<JSFunction> function = summary.AsJavaScript().function();
- if (!function->shared()->HasDebugInfo()) return false;
+ if (!function->shared()->HasBreakInfo()) return false;
Handle<DebugInfo> debug_info(function->shared()->GetDebugInfo());
// Enter the debugger.
DebugScope debug_scope(this);
@@ -627,6 +615,7 @@ bool Debug::IsMutedAtCurrentLocation(JavaScriptFrame* frame) {
MaybeHandle<Object> Debug::CallFunction(const char* name, int argc,
Handle<Object> args[]) {
+ AllowJavascriptExecutionDebugOnly allow_script(isolate_);
PostponeInterruptsScope no_interrupts(isolate_);
AssertDebugContext();
Handle<JSReceiver> holder =
@@ -672,14 +661,13 @@ bool Debug::SetBreakPoint(Handle<JSFunction> function,
// Make sure the function is compiled and has set up the debug info.
Handle<SharedFunctionInfo> shared(function->shared());
- if (!EnsureDebugInfo(shared)) return true;
+ if (!EnsureBreakInfo(shared)) return true;
Handle<DebugInfo> debug_info(shared->GetDebugInfo());
// Source positions starts with zero.
DCHECK(*source_position >= 0);
// Find the break point and change it.
- *source_position =
- FindBreakablePosition(debug_info, *source_position, STATEMENT_ALIGNED);
+ *source_position = FindBreakablePosition(debug_info, *source_position);
DebugInfo::SetBreakPoint(debug_info, *source_position, break_point_object);
// At least one active break point now.
DCHECK(debug_info->GetBreakPointCount() > 0);
@@ -691,11 +679,9 @@ bool Debug::SetBreakPoint(Handle<JSFunction> function,
return true;
}
-
bool Debug::SetBreakPointForScript(Handle<Script> script,
Handle<Object> break_point_object,
- int* source_position,
- BreakPositionAlignment alignment) {
+ int* source_position) {
if (script->type() == Script::TYPE_WASM) {
Handle<WasmCompiledModule> compiled_module(
WasmCompiledModule::cast(script->wasm_compiled_module()), isolate_);
@@ -712,7 +698,7 @@ bool Debug::SetBreakPointForScript(Handle<Script> script,
// Make sure the function has set up the debug info.
Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo>::cast(result);
- if (!EnsureDebugInfo(shared)) return false;
+ if (!EnsureBreakInfo(shared)) return false;
// Find position within function. The script position might be before the
// source position of the first function.
@@ -723,8 +709,7 @@ bool Debug::SetBreakPointForScript(Handle<Script> script,
Handle<DebugInfo> debug_info(shared->GetDebugInfo());
// Find the break point and change it.
- *source_position =
- FindBreakablePosition(debug_info, *source_position, alignment);
+ *source_position = FindBreakablePosition(debug_info, *source_position);
DebugInfo::SetBreakPoint(debug_info, *source_position, break_point_object);
// At least one active break point now.
DCHECK(debug_info->GetBreakPointCount() > 0);
@@ -737,23 +722,19 @@ bool Debug::SetBreakPointForScript(Handle<Script> script,
}
int Debug::FindBreakablePosition(Handle<DebugInfo> debug_info,
- int source_position,
- BreakPositionAlignment alignment) {
- int statement_position;
+ int source_position) {
int position;
if (debug_info->HasDebugCode()) {
CodeBreakIterator it(debug_info);
- it.SkipToPosition(source_position, alignment);
- statement_position = it.statement_position();
+ it.SkipToPosition(source_position);
position = it.position();
} else {
DCHECK(debug_info->HasDebugBytecodeArray());
BytecodeArrayBreakIterator it(debug_info);
- it.SkipToPosition(source_position, alignment);
- statement_position = it.statement_position();
+ it.SkipToPosition(source_position);
position = it.position();
}
- return alignment == STATEMENT_ALIGNED ? statement_position : position;
+ return position;
}
void Debug::ApplyBreakPoints(Handle<DebugInfo> debug_info) {
@@ -766,12 +747,12 @@ void Debug::ApplyBreakPoints(Handle<DebugInfo> debug_info) {
if (info->GetBreakPointCount() == 0) continue;
if (debug_info->HasDebugCode()) {
CodeBreakIterator it(debug_info);
- it.SkipToPosition(info->source_position(), BREAK_POSITION_ALIGNED);
+ it.SkipToPosition(info->source_position());
it.SetDebugBreak();
}
if (debug_info->HasDebugBytecodeArray()) {
BytecodeArrayBreakIterator it(debug_info);
- it.SkipToPosition(info->source_position(), BREAK_POSITION_ALIGNED);
+ it.SkipToPosition(info->source_position());
it.SetDebugBreak();
}
}
@@ -803,7 +784,7 @@ void Debug::ClearBreakPoint(Handle<Object> break_point_object) {
if (DebugInfo::ClearBreakPoint(debug_info, break_point_object)) {
ClearBreakPoints(debug_info);
if (debug_info->GetBreakPointCount() == 0) {
- RemoveDebugInfoAndClearFromShared(debug_info);
+ RemoveBreakInfoAndMaybeFree(debug_info);
} else {
ApplyBreakPoints(debug_info);
}
@@ -812,25 +793,19 @@ void Debug::ClearBreakPoint(Handle<Object> break_point_object) {
}
}
-// Clear out all the debug break code. This is ONLY supposed to be used when
-// shutting down the debugger as it will leave the break point information in
-// DebugInfo even though the code is patched back to the non break point state.
+// Clear out all the debug break code.
void Debug::ClearAllBreakPoints() {
- for (DebugInfoListNode* node = debug_info_list_; node != NULL;
- node = node->next()) {
- ClearBreakPoints(node->debug_info());
- }
- // Remove all debug info.
- while (debug_info_list_ != NULL) {
- RemoveDebugInfoAndClearFromShared(debug_info_list_->debug_info());
- }
+ ClearAllDebugInfos([=](Handle<DebugInfo> info) {
+ ClearBreakPoints(info);
+ return info->ClearBreakInfo();
+ });
}
void Debug::FloodWithOneShot(Handle<SharedFunctionInfo> shared,
bool returns_only) {
if (IsBlackboxed(shared)) return;
// Make sure the function is compiled and has set up the debug info.
- if (!EnsureDebugInfo(shared)) return;
+ if (!EnsureBreakInfo(shared)) return;
Handle<DebugInfo> debug_info(shared->GetDebugInfo());
// Flood the function with break points.
if (debug_info->HasDebugCode()) {
@@ -1025,7 +1000,7 @@ void Debug::PrepareStep(StepAction step_action) {
auto summary = FrameSummary::GetTop(frame).AsJavaScript();
Handle<JSFunction> function(summary.function());
Handle<SharedFunctionInfo> shared(function->shared());
- if (!EnsureDebugInfo(shared)) return;
+ if (!EnsureBreakInfo(shared)) return;
Handle<DebugInfo> debug_info(shared->GetDebugInfo());
BreakLocation location = BreakLocation::FromFrame(debug_info, js_frame);
@@ -1043,8 +1018,6 @@ void Debug::PrepareStep(StepAction step_action) {
}
UpdateHookOnFunctionCall();
- // A step-next at a tail call is a step-out.
- if (location.IsTailCall() && step_action == StepNext) step_action = StepOut;
// A step-next in blackboxed function is a step-out.
if (step_action == StepNext && IsBlackboxed(shared)) step_action = StepOut;
@@ -1075,7 +1048,7 @@ void Debug::PrepareStep(StepAction step_action) {
// and deoptimize every frame along the way.
bool in_current_frame = true;
for (; !frames_it.done(); frames_it.Advance()) {
- // TODO(clemensh): Implement stepping out from JS to WASM.
+ // TODO(clemensh): Implement stepping out from JS to wasm.
if (frames_it.frame()->is_wasm()) continue;
JavaScriptFrame* frame = JavaScriptFrame::cast(frames_it.frame());
if (last_step_action() == StepIn) {
@@ -1104,7 +1077,7 @@ void Debug::PrepareStep(StepAction step_action) {
thread_local_.target_frame_count_ = current_frame_count;
// Fall through.
case StepIn:
- // TODO(clemensh): Implement stepping from JS into WASM.
+ // TODO(clemensh): Implement stepping from JS into wasm.
FloodWithOneShot(shared);
break;
}
@@ -1112,10 +1085,9 @@ void Debug::PrepareStep(StepAction step_action) {
// Simple function for returning the source positions for active break points.
Handle<Object> Debug::GetSourceBreakLocations(
- Handle<SharedFunctionInfo> shared,
- BreakPositionAlignment position_alignment) {
+ Handle<SharedFunctionInfo> shared) {
Isolate* isolate = shared->GetIsolate();
- if (!shared->HasDebugInfo()) {
+ if (!shared->HasBreakInfo()) {
return isolate->factory()->undefined_value();
}
Handle<DebugInfo> debug_info(shared->GetDebugInfo());
@@ -1131,25 +1103,10 @@ Handle<Object> Debug::GetSourceBreakLocations(
BreakPointInfo::cast(debug_info->break_points()->get(i));
int break_points = break_point_info->GetBreakPointCount();
if (break_points == 0) continue;
- Smi* position = NULL;
- if (position_alignment == STATEMENT_ALIGNED) {
- if (debug_info->HasDebugCode()) {
- CodeBreakIterator it(debug_info);
- it.SkipToPosition(break_point_info->source_position(),
- BREAK_POSITION_ALIGNED);
- position = Smi::FromInt(it.statement_position());
- } else {
- DCHECK(debug_info->HasDebugBytecodeArray());
- BytecodeArrayBreakIterator it(debug_info);
- it.SkipToPosition(break_point_info->source_position(),
- BREAK_POSITION_ALIGNED);
- position = Smi::FromInt(it.statement_position());
- }
- } else {
- DCHECK_EQ(BREAK_POSITION_ALIGNED, position_alignment);
- position = Smi::FromInt(break_point_info->source_position());
+ for (int j = 0; j < break_points; ++j) {
+ locations->set(count++,
+ Smi::FromInt(break_point_info->source_position()));
}
- for (int j = 0; j < break_points; ++j) locations->set(count++, position);
}
}
return locations;
@@ -1234,7 +1191,6 @@ static Address ComputeNewPcForRedirect(Code* new_code, Code* old_code,
}
UNREACHABLE();
- return NULL;
}
@@ -1301,9 +1257,6 @@ bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
OptimizingCompileDispatcher::BlockingBehavior::kBlock);
}
- // The native context has a list of OSR'd optimized code. Clear it.
- isolate_->ClearOSROptimizedCode();
-
// Make sure we abort incremental marking.
isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
GarbageCollectionReason::kDebugger);
@@ -1361,10 +1314,10 @@ namespace {
template <typename Iterator>
void GetBreakablePositions(Iterator* it, int start_position, int end_position,
std::vector<BreakLocation>* locations) {
- it->SkipToPosition(start_position, BREAK_POSITION_ALIGNED);
- while (!it->Done() && it->position() < end_position &&
- it->position() >= start_position) {
- locations->push_back(it->GetBreakLocation());
+ while (!it->Done()) {
+ if (it->position() >= start_position && it->position() < end_position) {
+ locations->push_back(it->GetBreakLocation());
+ }
it->Next();
}
}
@@ -1394,7 +1347,7 @@ bool Debug::GetPossibleBreakpoints(Handle<Script> script, int start_position,
// Make sure the function has set up the debug info.
Handle<SharedFunctionInfo> shared =
Handle<SharedFunctionInfo>::cast(result);
- if (!EnsureDebugInfo(shared)) return false;
+ if (!EnsureBreakInfo(shared)) return false;
Handle<DebugInfo> debug_info(shared->GetDebugInfo());
FindBreakablePositions(debug_info, start_position, end_position, locations);
@@ -1427,12 +1380,12 @@ bool Debug::GetPossibleBreakpoints(Handle<Script> script, int start_position,
was_compiled = true;
}
}
- if (!EnsureDebugInfo(candidates[i])) return false;
+ if (!EnsureBreakInfo(candidates[i])) return false;
}
if (was_compiled) continue;
for (int i = 0; i < candidates.length(); ++i) {
- CHECK(candidates[i]->HasDebugInfo());
+ CHECK(candidates[i]->HasBreakInfo());
Handle<DebugInfo> debug_info(candidates[i]->GetDebugInfo());
FindBreakablePositions(debug_info, start_position, end_position,
locations);
@@ -1440,7 +1393,6 @@ bool Debug::GetPossibleBreakpoints(Handle<Script> script, int start_position,
return true;
}
UNREACHABLE();
- return false;
}
void Debug::RecordGenerator(Handle<JSGeneratorObject> generator_object) {
@@ -1540,7 +1492,7 @@ Handle<Object> Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
// info while bypassing PrepareFunctionForBreakpoints.
if (iteration > 1) {
AllowHeapAllocation allow_before_return;
- CreateDebugInfo(shared_handle);
+ CreateBreakInfo(shared_handle);
}
return shared_handle;
}
@@ -1556,9 +1508,9 @@ Handle<Object> Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
// Ensures the debug information is present for shared.
-bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared) {
- // Return if we already have the debug info for shared.
- if (shared->HasDebugInfo()) return true;
+bool Debug::EnsureBreakInfo(Handle<SharedFunctionInfo> shared) {
+ // Return if we already have the break info for shared.
+ if (shared->HasBreakInfo()) return true;
if (!shared->IsSubjectToDebugging()) return false;
if (!shared->is_compiled() && !Compiler::CompileDebugCode(shared)) {
return false;
@@ -1567,49 +1519,124 @@ bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared) {
// To prepare bytecode for debugging, we already need to have the debug
// info (containing the debug copy) upfront, but since we do not recompile,
// preparing for break points cannot fail.
- CreateDebugInfo(shared);
+ CreateBreakInfo(shared);
CHECK(PrepareFunctionForBreakPoints(shared));
return true;
}
+void Debug::CreateBreakInfo(Handle<SharedFunctionInfo> shared) {
+ HandleScope scope(isolate_);
+ Handle<DebugInfo> debug_info = GetOrCreateDebugInfo(shared);
-void Debug::CreateDebugInfo(Handle<SharedFunctionInfo> shared) {
- // Create the debug info object.
- Handle<DebugInfo> debug_info = isolate_->factory()->NewDebugInfo(shared);
+ // Initialize with break information.
+
+ DCHECK(!debug_info->HasBreakInfo());
+
+ Factory* factory = isolate_->factory();
+ Handle<FixedArray> break_points(
+ factory->NewFixedArray(DebugInfo::kEstimatedNofBreakPointsInFunction));
+
+ // Make a copy of the bytecode array if available.
+ Handle<Object> maybe_debug_bytecode_array = factory->undefined_value();
+ if (shared->HasBytecodeArray()) {
+ Handle<BytecodeArray> original(shared->bytecode_array());
+ maybe_debug_bytecode_array = factory->CopyBytecodeArray(original);
+ }
+
+ debug_info->set_flags(debug_info->flags() | DebugInfo::kHasBreakInfo);
+ debug_info->set_debug_bytecode_array(*maybe_debug_bytecode_array);
+ debug_info->set_break_points(*break_points);
+}
+
+Handle<DebugInfo> Debug::GetOrCreateDebugInfo(
+ Handle<SharedFunctionInfo> shared) {
+ if (shared->HasDebugInfo()) return handle(shared->GetDebugInfo());
- // Add debug info to the list.
+ // Create debug info and add it to the list.
+ Handle<DebugInfo> debug_info = isolate_->factory()->NewDebugInfo(shared);
DebugInfoListNode* node = new DebugInfoListNode(*debug_info);
node->set_next(debug_info_list_);
debug_info_list_ = node;
+
+ return debug_info;
}
+void Debug::InstallCoverageInfo(Handle<SharedFunctionInfo> shared,
+ Handle<CoverageInfo> coverage_info) {
+ DCHECK(FLAG_block_coverage);
+ DCHECK(!coverage_info.is_null());
+
+ Handle<DebugInfo> debug_info = GetOrCreateDebugInfo(shared);
+
+ DCHECK(!debug_info->HasCoverageInfo());
-void Debug::RemoveDebugInfoAndClearFromShared(Handle<DebugInfo> debug_info) {
+ debug_info->set_flags(debug_info->flags() | DebugInfo::kHasCoverageInfo);
+ debug_info->set_coverage_info(*coverage_info);
+}
+
+void Debug::RemoveAllCoverageInfos() {
+ DCHECK(FLAG_block_coverage);
+ ClearAllDebugInfos(
+ [=](Handle<DebugInfo> info) { return info->ClearCoverageInfo(); });
+}
+
+void Debug::FindDebugInfo(Handle<DebugInfo> debug_info,
+ DebugInfoListNode** prev, DebugInfoListNode** curr) {
HandleScope scope(isolate_);
- Handle<SharedFunctionInfo> shared(debug_info->shared());
+ *prev = nullptr;
+ *curr = debug_info_list_;
+ while (*curr != nullptr) {
+ if ((*curr)->debug_info().is_identical_to(debug_info)) return;
+ *prev = *curr;
+ *curr = (*curr)->next();
+ }
+
+ UNREACHABLE();
+}
- DCHECK_NOT_NULL(debug_info_list_);
- // Run through the debug info objects to find this one and remove it.
- DebugInfoListNode* prev = NULL;
+void Debug::ClearAllDebugInfos(DebugInfoClearFunction clear_function) {
+ DebugInfoListNode* prev = nullptr;
DebugInfoListNode* current = debug_info_list_;
- while (current != NULL) {
- if (current->debug_info().is_identical_to(debug_info)) {
- // Unlink from list. If prev is NULL we are looking at the first element.
- if (prev == NULL) {
- debug_info_list_ = current->next();
- } else {
- prev->set_next(current->next());
- }
- shared->set_debug_info(Smi::FromInt(debug_info->debugger_hints()));
- delete current;
- return;
+ while (current != nullptr) {
+ DebugInfoListNode* next = current->next();
+ Handle<DebugInfo> debug_info = current->debug_info();
+ if (clear_function(debug_info)) {
+ FreeDebugInfoListNode(prev, current);
+ current = next;
+ } else {
+ prev = current;
+ current = next;
}
- // Move to next in list.
- prev = current;
- current = current->next();
}
+}
- UNREACHABLE();
+void Debug::RemoveBreakInfoAndMaybeFree(Handle<DebugInfo> debug_info) {
+ bool should_unlink = debug_info->ClearBreakInfo();
+ if (should_unlink) {
+ DebugInfoListNode* prev;
+ DebugInfoListNode* node;
+ FindDebugInfo(debug_info, &prev, &node);
+ FreeDebugInfoListNode(prev, node);
+ }
+}
+
+void Debug::FreeDebugInfoListNode(DebugInfoListNode* prev,
+ DebugInfoListNode* node) {
+ DCHECK(node->debug_info()->IsEmpty());
+
+ // Unlink from list. If prev is NULL we are looking at the first element.
+ if (prev == nullptr) {
+ debug_info_list_ = node->next();
+ } else {
+ prev->set_next(node->next());
+ }
+
+ // Pack debugger hints back into the SFI::debug_info field.
+ Handle<DebugInfo> debug_info(node->debug_info());
+ debug_info->shared()->set_debug_info(
+ Smi::FromInt(debug_info->debugger_hints()));
+
+ delete node;
}
bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
@@ -1619,12 +1646,12 @@ bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
Handle<SharedFunctionInfo> shared(frame->function()->shared());
// With no debug info there are no break points, so we can't be at a return.
- if (!shared->HasDebugInfo()) return false;
+ if (!shared->HasBreakInfo()) return false;
DCHECK(!frame->is_optimized());
Handle<DebugInfo> debug_info(shared->GetDebugInfo());
BreakLocation location = BreakLocation::FromFrame(debug_info, frame);
- return location.IsReturn() || location.IsTailCall();
+ return location.IsReturn();
}
void Debug::ScheduleFrameRestart(StackFrame* frame) {
@@ -2105,6 +2132,7 @@ void Debug::UpdateHookOnFunctionCall() {
}
MaybeHandle<Object> Debug::Call(Handle<Object> fun, Handle<Object> data) {
+ AllowJavascriptExecutionDebugOnly allow_script(isolate_);
DebugScope debug_scope(this);
if (debug_scope.failed()) return isolate_->factory()->undefined_value();
@@ -2202,9 +2230,8 @@ void Debug::PrintBreakLocation() {
int column = Script::GetColumnNumber(script, source_position) -
(line == 0 ? script->column_offset() : 0);
Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
- int line_start =
- line == 0 ? 0 : Smi::cast(line_ends->get(line - 1))->value() + 1;
- int line_end = Smi::cast(line_ends->get(line))->value();
+ int line_start = line == 0 ? 0 : Smi::ToInt(line_ends->get(line - 1)) + 1;
+ int line_end = Smi::ToInt(line_ends->get(line));
DisallowHeapAllocation no_gc;
String::FlatContent content = source->GetFlatContent();
if (content.IsOneByte()) {
@@ -2227,8 +2254,8 @@ DebugScope::DebugScope(Debug* debug)
no_termination_exceptons_(debug_->isolate_,
StackGuard::TERMINATE_EXECUTION) {
// Link recursive debugger entry.
- base::NoBarrier_Store(&debug_->thread_local_.current_debug_scope_,
- reinterpret_cast<base::AtomicWord>(this));
+ base::Relaxed_Store(&debug_->thread_local_.current_debug_scope_,
+ reinterpret_cast<base::AtomicWord>(this));
// Store the previous break id, frame id and return value.
break_id_ = debug_->break_id();
@@ -2252,8 +2279,8 @@ DebugScope::DebugScope(Debug* debug)
DebugScope::~DebugScope() {
// Leaving this debugger entry.
- base::NoBarrier_Store(&debug_->thread_local_.current_debug_scope_,
- reinterpret_cast<base::AtomicWord>(prev_));
+ base::Relaxed_Store(&debug_->thread_local_.current_debug_scope_,
+ reinterpret_cast<base::AtomicWord>(prev_));
// Restore to the previous break state.
debug_->thread_local_.break_frame_id_ = break_frame_id_;
@@ -2375,6 +2402,7 @@ JavaScriptDebugDelegate::~JavaScriptDebugDelegate() {
void JavaScriptDebugDelegate::ProcessDebugEvent(v8::DebugEvent event,
Handle<JSObject> event_data,
Handle<JSObject> exec_state) {
+ AllowJavascriptExecutionDebugOnly allow_script(isolate_);
Handle<Object> argv[] = {Handle<Object>(Smi::FromInt(event), isolate_),
exec_state, event_data, data_};
Handle<JSReceiver> global = isolate_->global_proxy();
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index 5dad8a8ceb..f4c9528881 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -17,6 +17,7 @@
#include "src/flags.h"
#include "src/frames.h"
#include "src/globals.h"
+#include "src/objects/debug-objects.h"
#include "src/runtime/runtime.h"
#include "src/source-position-table.h"
#include "src/string-stream.h"
@@ -49,20 +50,12 @@ enum ExceptionBreakType {
};
-// The different types of breakpoint position alignments.
-// Must match Debug.BreakPositionAlignment in debug.js
-enum BreakPositionAlignment {
- STATEMENT_ALIGNED = 0,
- BREAK_POSITION_ALIGNED = 1
-};
-
enum DebugBreakType {
NOT_DEBUG_BREAK,
DEBUGGER_STATEMENT,
DEBUG_BREAK_SLOT,
DEBUG_BREAK_SLOT_AT_CALL,
DEBUG_BREAK_SLOT_AT_RETURN,
- DEBUG_BREAK_SLOT_AT_TAIL_CALL,
};
enum IgnoreBreakMode {
@@ -81,9 +74,6 @@ class BreakLocation {
inline bool IsReturn() const { return type_ == DEBUG_BREAK_SLOT_AT_RETURN; }
inline bool IsCall() const { return type_ == DEBUG_BREAK_SLOT_AT_CALL; }
- inline bool IsTailCall() const {
- return type_ == DEBUG_BREAK_SLOT_AT_TAIL_CALL;
- }
inline bool IsDebugBreakSlot() const { return type_ >= DEBUG_BREAK_SLOT; }
inline bool IsDebuggerStatement() const {
return type_ == DEBUGGER_STATEMENT;
@@ -148,7 +138,7 @@ class BreakIterator {
protected:
explicit BreakIterator(Handle<DebugInfo> debug_info);
- int BreakIndexFromPosition(int position, BreakPositionAlignment alignment);
+ int BreakIndexFromPosition(int position);
Isolate* isolate() { return debug_info_->GetIsolate(); }
@@ -175,12 +165,9 @@ class CodeBreakIterator : public BreakIterator {
void ClearDebugBreak() override;
void SetDebugBreak() override;
- void SkipToPosition(int position, BreakPositionAlignment alignment);
+ void SkipToPosition(int position);
- int code_offset() override {
- return static_cast<int>(rinfo()->pc() -
- debug_info_->DebugCode()->instruction_start());
- }
+ int code_offset() override;
private:
int GetModeMask();
@@ -207,7 +194,7 @@ class BytecodeArrayBreakIterator : public BreakIterator {
void ClearDebugBreak() override;
void SetDebugBreak() override;
- void SkipToPosition(int position, BreakPositionAlignment alignment);
+ void SkipToPosition(int position);
int code_offset() override { return source_position_iterator_.code_offset(); }
@@ -296,8 +283,7 @@ class Debug {
int* source_position);
bool SetBreakPointForScript(Handle<Script> script,
Handle<Object> break_point_object,
- int* source_position,
- BreakPositionAlignment alignment);
+ int* source_position);
void ClearBreakPoint(Handle<Object> break_point_object);
void ChangeBreakOnException(ExceptionBreakType type, bool enable);
bool IsBreakOnException(ExceptionBreakType type);
@@ -334,9 +320,13 @@ class Debug {
void SetDebugDelegate(debug::DebugDelegate* delegate, bool pass_ownership);
// Returns whether the operation succeeded.
- bool EnsureDebugInfo(Handle<SharedFunctionInfo> shared);
- void CreateDebugInfo(Handle<SharedFunctionInfo> shared);
- static Handle<DebugInfo> GetDebugInfo(Handle<SharedFunctionInfo> shared);
+ bool EnsureBreakInfo(Handle<SharedFunctionInfo> shared);
+ void CreateBreakInfo(Handle<SharedFunctionInfo> shared);
+ Handle<DebugInfo> GetOrCreateDebugInfo(Handle<SharedFunctionInfo> shared);
+
+ void InstallCoverageInfo(Handle<SharedFunctionInfo> shared,
+ Handle<CoverageInfo> coverage_info);
+ void RemoveAllCoverageInfos();
template <typename C>
bool CompileToRevealInnerFunctions(C* compilable);
@@ -346,8 +336,7 @@ class Debug {
int position);
static Handle<Object> GetSourceBreakLocations(
- Handle<SharedFunctionInfo> shared,
- BreakPositionAlignment position_aligment);
+ Handle<SharedFunctionInfo> shared);
// Check whether a global object is the debug global object.
bool IsDebugGlobal(JSGlobalObject* global);
@@ -381,7 +370,7 @@ class Debug {
// Flags and states.
DebugScope* debugger_entry() {
return reinterpret_cast<DebugScope*>(
- base::NoBarrier_Load(&thread_local_.current_debug_scope_));
+ base::Relaxed_Load(&thread_local_.current_debug_scope_));
}
inline Handle<Context> debug_context() { return debug_context_; }
@@ -393,7 +382,7 @@ class Debug {
inline bool is_active() const { return is_active_; }
inline bool is_loaded() const { return !debug_context_.is_null(); }
inline bool in_debug_scope() const {
- return !!base::NoBarrier_Load(&thread_local_.current_debug_scope_);
+ return !!base::Relaxed_Load(&thread_local_.current_debug_scope_);
}
void set_break_points_active(bool v) { break_points_active_ = v; }
bool break_points_active() const { return break_points_active_; }
@@ -481,8 +470,7 @@ class Debug {
void ProcessDebugEvent(v8::DebugEvent event, Handle<JSObject> event_data);
// Find the closest source position for a break point for a given position.
- int FindBreakablePosition(Handle<DebugInfo> debug_info, int source_position,
- BreakPositionAlignment alignment);
+ int FindBreakablePosition(Handle<DebugInfo> debug_info, int source_position);
// Instrument code to break at break points.
void ApplyBreakPoints(Handle<DebugInfo> debug_info);
// Clear code from instrumentation.
@@ -498,7 +486,6 @@ class Debug {
bool IsFrameBlackboxed(JavaScriptFrame* frame);
void ActivateStepOut(StackFrame* frame);
- void RemoveDebugInfoAndClearFromShared(Handle<DebugInfo> debug_info);
MaybeHandle<FixedArray> CheckBreakPoints(Handle<DebugInfo> debug_info,
BreakLocation* location,
bool* has_break_points = nullptr);
@@ -516,6 +503,15 @@ class Debug {
void PrintBreakLocation();
+ // Wraps logic for clearing and maybe freeing all debug infos.
+ typedef std::function<bool(Handle<DebugInfo>)> DebugInfoClearFunction;
+ void ClearAllDebugInfos(DebugInfoClearFunction clear_function);
+
+ void RemoveBreakInfoAndMaybeFree(Handle<DebugInfo> debug_info);
+ void FindDebugInfo(Handle<DebugInfo> debug_info, DebugInfoListNode** prev,
+ DebugInfoListNode** curr);
+ void FreeDebugInfoListNode(DebugInfoListNode* prev, DebugInfoListNode* node);
+
// Global handles.
Handle<Context> debug_context_;
diff --git a/deps/v8/src/debug/debug.js b/deps/v8/src/debug/debug.js
index 6993274f09..0568efebfe 100644
--- a/deps/v8/src/debug/debug.js
+++ b/deps/v8/src/debug/debug.js
@@ -63,13 +63,6 @@ Debug.ScriptBreakPointType = { ScriptId: 0,
ScriptName: 1,
ScriptRegExp: 2 };
-// The different types of breakpoint position alignments.
-// Must match BreakPositionAlignment in debug.h.
-Debug.BreakPositionAlignment = {
- Statement: 0,
- BreakPosition: 1
-};
-
function ScriptTypeFlag(type) {
return (1 << type);
}
@@ -221,7 +214,7 @@ function IsBreakPointTriggered(break_id, break_point) {
// script name or script id and the break point is represented as line and
// column.
function ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column,
- opt_groupId, opt_position_alignment) {
+ opt_groupId) {
this.type_ = type;
if (type == Debug.ScriptBreakPointType.ScriptId) {
this.script_id_ = script_id_or_name;
@@ -235,8 +228,6 @@ function ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column,
this.line_ = opt_line || 0;
this.column_ = opt_column;
this.groupId_ = opt_groupId;
- this.position_alignment_ = IS_UNDEFINED(opt_position_alignment)
- ? Debug.BreakPositionAlignment.Statement : opt_position_alignment;
this.active_ = true;
this.condition_ = null;
this.break_points_ = [];
@@ -378,7 +369,6 @@ ScriptBreakPoint.prototype.set = function (script) {
// Create a break point object and set the break point.
var break_point = MakeBreakPoint(position, this);
var actual_position = %SetScriptBreakPoint(script, position,
- this.position_alignment_,
break_point);
if (IS_UNDEFINED(actual_position)) {
actual_position = position;
@@ -559,8 +549,7 @@ Debug.setBreakPoint = function(func, opt_line, opt_column, opt_condition) {
Debug.setBreakPointByScriptIdAndPosition = function(script_id, position,
- condition, enabled,
- opt_position_alignment)
+ condition, enabled)
{
var break_point = MakeBreakPoint(position);
break_point.setCondition(condition);
@@ -569,10 +558,7 @@ Debug.setBreakPointByScriptIdAndPosition = function(script_id, position,
}
var script = scriptById(script_id);
if (script) {
- var position_alignment = IS_UNDEFINED(opt_position_alignment)
- ? Debug.BreakPositionAlignment.Statement : opt_position_alignment;
- break_point.actual_position = %SetScriptBreakPoint(script, position,
- position_alignment, break_point);
+ break_point.actual_position = %SetScriptBreakPoint(script, position, break_point);
}
return break_point;
};
@@ -654,11 +640,11 @@ Debug.findScriptBreakPoint = function(break_point_number, remove) {
// specified source line and column within that line.
Debug.setScriptBreakPoint = function(type, script_id_or_name,
opt_line, opt_column, opt_condition,
- opt_groupId, opt_position_alignment) {
+ opt_groupId) {
// Create script break point object.
var script_break_point =
new ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column,
- opt_groupId, opt_position_alignment);
+ opt_groupId);
// Assign number to the new script break point and add it.
script_break_point.number_ = next_break_point_number++;
@@ -680,12 +666,10 @@ Debug.setScriptBreakPoint = function(type, script_id_or_name,
Debug.setScriptBreakPointById = function(script_id,
opt_line, opt_column,
- opt_condition, opt_groupId,
- opt_position_alignment) {
+ opt_condition, opt_groupId) {
return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId,
script_id, opt_line, opt_column,
- opt_condition, opt_groupId,
- opt_position_alignment);
+ opt_condition, opt_groupId);
};
@@ -759,13 +743,11 @@ Debug.isBreakOnUncaughtException = function() {
return !!%IsBreakOnException(Debug.ExceptionBreak.Uncaught);
};
-Debug.showBreakPoints = function(f, full, opt_position_alignment) {
+Debug.showBreakPoints = function(f, full) {
if (!IS_FUNCTION(f)) throw %make_error(kDebuggerType);
var source = full ? this.scriptSource(f) : this.source(f);
var offset = full ? 0 : this.sourcePosition(f);
- var position_alignment = IS_UNDEFINED(opt_position_alignment)
- ? Debug.BreakPositionAlignment.Statement : opt_position_alignment;
- var locations = %GetBreakLocations(f, position_alignment);
+ var locations = %GetBreakLocations(f);
if (!locations) return source;
locations.sort(function(x, y) { return x - y; });
var result = "";
@@ -1018,7 +1000,7 @@ utils.InstallConstants(global, [
]);
// Functions needed by the debugger runtime.
-utils.InstallFunctions(utils, DONT_ENUM, [
+utils.InstallConstants(utils, [
"MakeExecutionState", MakeExecutionState,
"MakeExceptionEvent", MakeExceptionEvent,
"MakeBreakEvent", MakeBreakEvent,
diff --git a/deps/v8/src/debug/interface-types.h b/deps/v8/src/debug/interface-types.h
index 265cd7559c..8cd2d63d04 100644
--- a/deps/v8/src/debug/interface-types.h
+++ b/deps/v8/src/debug/interface-types.h
@@ -106,33 +106,67 @@ class ConsoleCallArguments : private v8::FunctionCallbackInfo<v8::Value> {
explicit ConsoleCallArguments(internal::BuiltinArguments&);
};
-// v8::FunctionCallbackInfo could be used for getting arguments only. Calling
-// of any other getter will produce a crash.
+class ConsoleContext {
+ public:
+ ConsoleContext(int id, v8::Local<v8::String> name) : id_(id), name_(name) {}
+ ConsoleContext() : id_(0) {}
+
+ int id() const { return id_; }
+ v8::Local<v8::String> name() const { return name_; }
+
+ private:
+ int id_;
+ v8::Local<v8::String> name_;
+};
+
class ConsoleDelegate {
public:
- virtual void Debug(const ConsoleCallArguments& args) {}
- virtual void Error(const ConsoleCallArguments& args) {}
- virtual void Info(const ConsoleCallArguments& args) {}
- virtual void Log(const ConsoleCallArguments& args) {}
- virtual void Warn(const ConsoleCallArguments& args) {}
- virtual void Dir(const ConsoleCallArguments& args) {}
- virtual void DirXml(const ConsoleCallArguments& args) {}
- virtual void Table(const ConsoleCallArguments& args) {}
- virtual void Trace(const ConsoleCallArguments& args) {}
- virtual void Group(const ConsoleCallArguments& args) {}
- virtual void GroupCollapsed(const ConsoleCallArguments& args) {}
- virtual void GroupEnd(const ConsoleCallArguments& args) {}
- virtual void Clear(const ConsoleCallArguments& args) {}
- virtual void Count(const ConsoleCallArguments& args) {}
- virtual void Assert(const ConsoleCallArguments& args) {}
- virtual void MarkTimeline(const ConsoleCallArguments& args) {}
- virtual void Profile(const ConsoleCallArguments& args) {}
- virtual void ProfileEnd(const ConsoleCallArguments& args) {}
- virtual void Timeline(const ConsoleCallArguments& args) {}
- virtual void TimelineEnd(const ConsoleCallArguments& args) {}
- virtual void Time(const ConsoleCallArguments& args) {}
- virtual void TimeEnd(const ConsoleCallArguments& args) {}
- virtual void TimeStamp(const ConsoleCallArguments& args) {}
+ virtual void Debug(const ConsoleCallArguments& args,
+ const ConsoleContext& context) {}
+ virtual void Error(const ConsoleCallArguments& args,
+ const ConsoleContext& context) {}
+ virtual void Info(const ConsoleCallArguments& args,
+ const ConsoleContext& context) {}
+ virtual void Log(const ConsoleCallArguments& args,
+ const ConsoleContext& context) {}
+ virtual void Warn(const ConsoleCallArguments& args,
+ const ConsoleContext& context) {}
+ virtual void Dir(const ConsoleCallArguments& args,
+ const ConsoleContext& context) {}
+ virtual void DirXml(const ConsoleCallArguments& args,
+ const ConsoleContext& context) {}
+ virtual void Table(const ConsoleCallArguments& args,
+ const ConsoleContext& context) {}
+ virtual void Trace(const ConsoleCallArguments& args,
+ const ConsoleContext& context) {}
+ virtual void Group(const ConsoleCallArguments& args,
+ const ConsoleContext& context) {}
+ virtual void GroupCollapsed(const ConsoleCallArguments& args,
+ const ConsoleContext& context) {}
+ virtual void GroupEnd(const ConsoleCallArguments& args,
+ const ConsoleContext& context) {}
+ virtual void Clear(const ConsoleCallArguments& args,
+ const ConsoleContext& context) {}
+ virtual void Count(const ConsoleCallArguments& args,
+ const ConsoleContext& context) {}
+ virtual void Assert(const ConsoleCallArguments& args,
+ const ConsoleContext& context) {}
+ virtual void MarkTimeline(const ConsoleCallArguments& args,
+ const ConsoleContext& context) {}
+ virtual void Profile(const ConsoleCallArguments& args,
+ const ConsoleContext& context) {}
+ virtual void ProfileEnd(const ConsoleCallArguments& args,
+ const ConsoleContext& context) {}
+ virtual void Timeline(const ConsoleCallArguments& args,
+ const ConsoleContext& context) {}
+ virtual void TimelineEnd(const ConsoleCallArguments& args,
+ const ConsoleContext& context) {}
+ virtual void Time(const ConsoleCallArguments& args,
+ const ConsoleContext& context) {}
+ virtual void TimeEnd(const ConsoleCallArguments& args,
+ const ConsoleContext& context) {}
+ virtual void TimeStamp(const ConsoleCallArguments& args,
+ const ConsoleContext& context) {}
virtual ~ConsoleDelegate() = default;
};
diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index 6dab0028c8..31d76d7bd0 100644
--- a/deps/v8/src/debug/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -437,7 +437,7 @@ class LineEndsWrapper {
int string_len_;
int GetPosAfterNewLine(int index) {
- return Smi::cast(ends_array_->get(index))->value() + 1;
+ return Smi::ToInt(ends_array_->get(index)) + 1;
}
};
@@ -603,7 +603,7 @@ static Handle<SharedFunctionInfo> UnwrapSharedFunctionInfoFromJSValue(
static int GetArrayLength(Handle<JSArray> array) {
Object* length = array->length();
CHECK(length->IsSmi());
- return Smi::cast(length)->value();
+ return Smi::ToInt(length);
}
void FunctionInfoWrapper::SetInitialProperties(Handle<String> name,
@@ -910,9 +910,9 @@ void LiveEdit::ReplaceFunctionCode(
}
}
- if (shared_info->HasDebugInfo()) {
+ if (shared_info->HasBreakInfo()) {
// Existing break points will be re-applied. Reset the debug info here.
- isolate->debug()->RemoveDebugInfoAndClearFromShared(
+ isolate->debug()->RemoveBreakInfoAndMaybeFree(
handle(shared_info->GetDebugInfo()));
}
shared_info->set_scope_info(new_shared_info->scope_info());
@@ -1073,9 +1073,9 @@ void LiveEdit::PatchFunctionPositions(Handle<JSArray> shared_info_array,
Handle<AbstractCode>(AbstractCode::cast(info->code())),
position_change_array);
}
- if (info->HasDebugInfo()) {
+ if (info->HasBreakInfo()) {
// Existing break points will be re-applied. Reset the debug info here.
- info->GetIsolate()->debug()->RemoveDebugInfoAndClearFromShared(
+ info->GetIsolate()->debug()->RemoveBreakInfoAndMaybeFree(
handle(info->GetDebugInfo()));
}
}
@@ -1243,8 +1243,7 @@ class MultipleFunctionTarget {
Handle<Object> old_element =
JSReceiver::GetElement(isolate, result_, i).ToHandleChecked();
if (!old_element->IsSmi() ||
- Smi::cast(*old_element)->value() ==
- LiveEdit::FUNCTION_AVAILABLE_FOR_PATCH) {
+ Smi::ToInt(*old_element) == LiveEdit::FUNCTION_AVAILABLE_FOR_PATCH) {
SetElementSloppy(result_, i,
Handle<Smi>(Smi::FromInt(status), isolate));
}
@@ -1596,7 +1595,7 @@ void LiveEditFunctionTracker::VisitFunctionLiteral(FunctionLiteral* node) {
void LiveEditFunctionTracker::FunctionStarted(FunctionLiteral* fun) {
HandleScope handle_scope(isolate_);
FunctionInfoWrapper info = FunctionInfoWrapper::Create(isolate_);
- info.SetInitialProperties(fun->name(), fun->start_position(),
+ info.SetInitialProperties(fun->name(isolate_), fun->start_position(),
fun->end_position(), fun->parameter_count(),
current_parent_index_, fun->function_literal_id());
current_parent_index_ = len_;
diff --git a/deps/v8/src/debug/mips64/debug-mips64.cc b/deps/v8/src/debug/mips64/debug-mips64.cc
index fded965462..55ff7e26d2 100644
--- a/deps/v8/src/debug/mips64/debug-mips64.cc
+++ b/deps/v8/src/debug/mips64/debug-mips64.cc
@@ -130,7 +130,7 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::INTERNAL);
__ Ld(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ Ld(a0,
+ __ Lw(a0,
FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(a2, a0);
diff --git a/deps/v8/src/debug/mirrors.js b/deps/v8/src/debug/mirrors.js
index 8096f2c54e..534e354988 100644
--- a/deps/v8/src/debug/mirrors.js
+++ b/deps/v8/src/debug/mirrors.js
@@ -2364,11 +2364,8 @@ ContextMirror.prototype.data = function() {
// ----------------------------------------------------------------------------
// Exports
-utils.InstallFunctions(global, DONT_ENUM, [
- "MakeMirror", MakeMirror,
-]);
-
utils.InstallConstants(global, [
+ "MakeMirror", MakeMirror,
"ScopeType", ScopeType,
"PropertyType", PropertyType,
"PropertyAttribute", PropertyAttribute,
diff --git a/deps/v8/src/debug/x64/debug-x64.cc b/deps/v8/src/debug/x64/debug-x64.cc
index 63689dedab..3167b245bf 100644
--- a/deps/v8/src/debug/x64/debug-x64.cc
+++ b/deps/v8/src/debug/x64/debug-x64.cc
@@ -117,8 +117,8 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
__ leave();
__ movp(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ LoadSharedFunctionInfoSpecialField(
- rbx, rbx, SharedFunctionInfo::kFormalParameterCountOffset);
+ __ movsxlq(
+ rbx, FieldOperand(rbx, SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount dummy(rbx);
__ InvokeFunction(rdi, no_reg, dummy, dummy, JUMP_FUNCTION,
diff --git a/deps/v8/src/debug/x87/OWNERS b/deps/v8/src/debug/x87/OWNERS
deleted file mode 100644
index 61245ae8e2..0000000000
--- a/deps/v8/src/debug/x87/OWNERS
+++ /dev/null
@@ -1,2 +0,0 @@
-weiliang.lin@intel.com
-chunyang.dai@intel.com
diff --git a/deps/v8/src/debug/x87/debug-x87.cc b/deps/v8/src/debug/x87/debug-x87.cc
deleted file mode 100644
index 8810f01f42..0000000000
--- a/deps/v8/src/debug/x87/debug-x87.cc
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X87
-
-#include "src/debug/debug.h"
-
-#include "src/codegen.h"
-#include "src/debug/liveedit.h"
-#include "src/x87/frames-x87.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-void EmitDebugBreakSlot(MacroAssembler* masm) {
- Label check_codesize;
- __ bind(&check_codesize);
- __ Nop(Assembler::kDebugBreakSlotLength);
- DCHECK_EQ(Assembler::kDebugBreakSlotLength,
- masm->SizeOfCodeGeneratedSince(&check_codesize));
-}
-
-
-void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode) {
- // Generate enough nop's to make space for a call instruction.
- masm->RecordDebugBreakSlot(mode);
- EmitDebugBreakSlot(masm);
-}
-
-
-void DebugCodegen::ClearDebugBreakSlot(Isolate* isolate, Address pc) {
- CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotLength);
- EmitDebugBreakSlot(patcher.masm());
-}
-
-
-void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
- Handle<Code> code) {
- DCHECK(code->is_debug_stub());
- static const int kSize = Assembler::kDebugBreakSlotLength;
- CodePatcher patcher(isolate, pc, kSize);
-
- // Add a label for checking the size of the code used for returning.
- Label check_codesize;
- patcher.masm()->bind(&check_codesize);
- patcher.masm()->call(code->entry(), RelocInfo::NONE32);
- // Check that the size of the code generated is as expected.
- DCHECK_EQ(kSize, patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
-}
-
-bool DebugCodegen::DebugBreakSlotIsPatched(Address pc) {
- return !Assembler::IsNop(pc);
-}
-
-void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
- DebugBreakCallHelperMode mode) {
- __ RecordComment("Debug break");
-
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Load padding words on stack.
- for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
- __ push(Immediate(Smi::FromInt(LiveEdit::kFramePaddingValue)));
- }
- __ push(Immediate(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
-
- // Push arguments for DebugBreak call.
- if (mode == SAVE_RESULT_REGISTER) {
- // Break on return.
- __ push(eax);
- } else {
- // Non-return breaks.
- __ Push(masm->isolate()->factory()->the_hole_value());
- }
- __ Move(eax, Immediate(1));
- __ mov(ebx,
- Immediate(ExternalReference(
- Runtime::FunctionForId(Runtime::kDebugBreak), masm->isolate())));
-
- CEntryStub ceb(masm->isolate(), 1);
- __ CallStub(&ceb);
-
- if (FLAG_debug_code) {
- for (int i = 0; i < kNumJSCallerSaved; ++i) {
- Register reg = {JSCallerSavedCode(i)};
- // Do not clobber eax if mode is SAVE_RESULT_REGISTER. It will
- // contain return value of the function.
- if (!(reg.is(eax) && (mode == SAVE_RESULT_REGISTER))) {
- __ Move(reg, Immediate(kDebugZapValue));
- }
- }
- }
-
- __ pop(ebx);
- // We divide stored value by 2 (untagging) and multiply it by word's size.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiShiftSize == 0);
- __ lea(esp, Operand(esp, ebx, times_half_pointer_size, 0));
-
- // Get rid of the internal frame.
- }
-
- // This call did not replace a call , so there will be an unwanted
- // return address left on the stack. Here we get rid of that.
- __ add(esp, Immediate(kPointerSize));
-
- // Now that the break point has been handled, resume normal execution by
- // jumping to the target address intended by the caller and that was
- // overwritten by the address of DebugBreakXXX.
- ExternalReference after_break_target =
- ExternalReference::debug_after_break_target_address(masm->isolate());
- __ jmp(Operand::StaticVariable(after_break_target));
-}
-
-
-void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- // We do not know our frame height, but set esp based on ebp.
- __ lea(esp, Operand(ebp, FrameDropperFrameConstants::kFunctionOffset));
- __ pop(edi); // Function.
- __ add(esp, Immediate(-FrameDropperFrameConstants::kCodeOffset)); // INTERNAL
- // frame
- // marker
- // and code
- __ pop(ebp);
-
- ParameterCount dummy(0);
- __ CheckDebugHook(edi, no_reg, dummy, dummy);
-
- // Load context from the function.
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Clear new.target register as a safety measure.
- __ mov(edx, masm->isolate()->factory()->undefined_value());
-
- // Get function code.
- __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kCodeOffset));
- __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
-
- // Re-run JSFunction, edi is function, esi is context.
- __ jmp(ebx);
-}
-
-
-const bool LiveEdit::kFrameDropperSupported = true;
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/deoptimize-reason.cc b/deps/v8/src/deoptimize-reason.cc
index b0ee780070..733fdfd883 100644
--- a/deps/v8/src/deoptimize-reason.cc
+++ b/deps/v8/src/deoptimize-reason.cc
@@ -16,7 +16,6 @@ std::ostream& operator<<(std::ostream& os, DeoptimizeReason reason) {
#undef DEOPTIMIZE_REASON
}
UNREACHABLE();
- return os;
}
size_t hash_value(DeoptimizeReason reason) {
diff --git a/deps/v8/src/deoptimize-reason.h b/deps/v8/src/deoptimize-reason.h
index 8b93839e10..ca986aafd3 100644
--- a/deps/v8/src/deoptimize-reason.h
+++ b/deps/v8/src/deoptimize-reason.h
@@ -24,6 +24,8 @@ namespace internal {
V(InsufficientTypeFeedbackForCall, "Insufficient type feedback for call") \
V(InsufficientTypeFeedbackForCallWithArguments, \
"Insufficient type feedback for call with arguments") \
+ V(InsufficientTypeFeedbackForConstruct, \
+ "Insufficient type feedback for construct") \
V(FastPathFailed, "Falling off the fast path") \
V(InsufficientTypeFeedbackForCombinedTypeOfBinaryOperation, \
"Insufficient type feedback for combined type of binary operation") \
@@ -49,6 +51,7 @@ namespace internal {
V(NotAJavaScriptObject, "not a JavaScript object") \
V(NotANumberOrOddball, "not a Number or Oddball") \
V(NotASmi, "not a Smi") \
+ V(NotASymbol, "not a Symbol") \
V(OutOfBounds, "out of bounds") \
V(OutsideOfRange, "Outside of range") \
V(Overflow, "overflow") \
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index d7c5006686..63597e7106 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -16,6 +16,7 @@
#include "src/global-handles.h"
#include "src/interpreter/interpreter.h"
#include "src/macro-assembler.h"
+#include "src/objects/debug-objects-inl.h"
#include "src/tracing/trace-event.h"
#include "src/v8.h"
@@ -116,8 +117,8 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
int counter = jsframe_index;
for (auto it = translated_values.begin(); it != translated_values.end();
it++) {
- if (it->kind() == TranslatedFrame::kFunction ||
- it->kind() == TranslatedFrame::kInterpretedFunction) {
+ if (it->kind() == TranslatedFrame::kInterpretedFunction ||
+ it->kind() == TranslatedFrame::kJavaScriptBuiltinContinuation) {
if (counter == 0) {
frame_it = it;
break;
@@ -126,6 +127,9 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
}
}
CHECK(frame_it != translated_values.end());
+ // We only include kJavaScriptBuiltinContinuation frames above to get the
+ // counting right.
+ CHECK_EQ(frame_it->kind(), TranslatedFrame::kInterpretedFunction);
DeoptimizedFrameInfo* info =
new DeoptimizedFrameInfo(&translated_values, frame_it, isolate);
@@ -244,6 +248,7 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
shared->increment_deopt_count();
code->set_deopt_already_counted(true);
}
+
function->set_code(shared->code());
if (FLAG_trace_deopt) {
@@ -283,17 +288,19 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
}
SafepointEntry safepoint = code->GetSafepointEntry(it.frame()->pc());
int deopt_index = safepoint.deoptimization_index();
+
// Turbofan deopt is checked when we are patching addresses on stack.
- bool turbofanned =
- code->is_turbofanned() && function->shared()->asm_function();
- bool safe_to_deopt =
- deopt_index != Safepoint::kNoDeoptimizationIndex || turbofanned;
- bool builtin = code->kind() == Code::BUILTIN;
- CHECK(topmost_optimized_code == NULL || safe_to_deopt || turbofanned ||
- builtin);
+ bool is_non_deoptimizing_asm_code =
+ code->is_turbofanned() && !function->shared()->HasBytecodeArray();
+ bool safe_if_deopt_triggered =
+ deopt_index != Safepoint::kNoDeoptimizationIndex ||
+ is_non_deoptimizing_asm_code;
+ bool is_builtin_code = code->kind() == Code::BUILTIN;
+ CHECK(topmost_optimized_code == NULL || safe_if_deopt_triggered ||
+ is_non_deoptimizing_asm_code || is_builtin_code);
if (topmost_optimized_code == NULL) {
topmost_optimized_code = code;
- safe_to_deopt_topmost_optimized_code = safe_to_deopt;
+ safe_to_deopt_topmost_optimized_code = safe_if_deopt_triggered;
}
}
}
@@ -347,13 +354,6 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
#endif
// It is finally time to die, code object.
- // Remove the code from the osr optimized code cache.
- DeoptimizationInputData* deopt_data =
- DeoptimizationInputData::cast(codes[i]->deoptimization_data());
- if (deopt_data->OsrAstId()->value() != BailoutId::None().ToInt()) {
- isolate->EvictOSROptimizedCode(codes[i], "deoptimized code");
- }
-
// Do platform-specific patching to force any activations to lazy deopt.
PatchCodeForDeoptimization(isolate, codes[i]);
@@ -424,6 +424,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function, Code* code) {
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
TRACE_EVENT0("v8", "V8.DeoptimizeCode");
if (code == nullptr) code = function->code();
+
if (code->kind() == Code::OPTIMIZED_FUNCTION) {
// Mark the code for deoptimization and unlink any functions that also
// refer to that code. The code cannot be shared across native contexts,
@@ -467,7 +468,6 @@ CodeEventListener::DeoptKind DeoptKindOfBailoutType(
return CodeEventListener::kLazy;
}
UNREACHABLE();
- return CodeEventListener::kEager;
}
} // namespace
@@ -524,9 +524,8 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction* function,
CHECK(AllowHeapAllocation::IsAllowed());
disallow_heap_allocation_ = new DisallowHeapAllocation();
#endif // DEBUG
- if (function != nullptr && function->IsOptimized() &&
- (compiled_code_->kind() != Code::OPTIMIZED_FUNCTION ||
- !compiled_code_->deopt_already_counted())) {
+ if (compiled_code_->kind() != Code::OPTIMIZED_FUNCTION ||
+ !compiled_code_->deopt_already_counted()) {
// If the function is optimized, and we haven't counted that deopt yet, then
// increment the function's deopt count so that we can avoid optimising
// functions that deopt too often.
@@ -535,7 +534,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction* function,
// Soft deopts shouldn't count against the overall deoptimization count
// that can eventually lead to disabling optimization for a function.
isolate->counters()->soft_deopts_executed()->Increment();
- } else {
+ } else if (function != nullptr) {
function->shared()->increment_deopt_count();
}
}
@@ -630,30 +629,6 @@ int Deoptimizer::GetDeoptimizationId(Isolate* isolate,
}
-int Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data,
- BailoutId id,
- SharedFunctionInfo* shared) {
- // TODO(kasperl): For now, we do a simple linear search for the PC
- // offset associated with the given node id. This should probably be
- // changed to a binary search.
- int length = data->DeoptPoints();
- for (int i = 0; i < length; i++) {
- if (data->AstId(i) == id) {
- return data->PcAndState(i)->value();
- }
- }
- OFStream os(stderr);
- os << "[couldn't find pc offset for node=" << id.ToInt() << "]\n"
- << "[method: " << shared->DebugName()->ToCString().get() << "]\n"
- << "[source:\n" << SourceCodeOf(shared) << "\n]" << std::endl;
-
- shared->GetHeap()->isolate()->PushStackTraceAndDie(0xfefefefe, data, shared,
- 0xfefefeff);
- FATAL("unable to find pc offset during deoptimization");
- return -1;
-}
-
-
int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
int length = 0;
// Count all entries in the deoptimizing code list of every context.
@@ -676,17 +651,6 @@ namespace {
int LookupCatchHandler(TranslatedFrame* translated_frame, int* data_out) {
switch (translated_frame->kind()) {
- case TranslatedFrame::kFunction: {
-#ifdef DEBUG
- JSFunction* function =
- JSFunction::cast(translated_frame->begin()->GetRawValue());
- Code* non_optimized_code = function->shared()->code();
- HandlerTable* table =
- HandlerTable::cast(non_optimized_code->handler_table());
- DCHECK_EQ(0, table->NumberOfRangeEntries());
-#endif
- break;
- }
case TranslatedFrame::kInterpretedFunction: {
int bytecode_offset = translated_frame->node_id().ToInt();
JSFunction* function =
@@ -751,7 +715,7 @@ void Deoptimizer::DoComputeOutputFrames() {
}
}
- BailoutId node_id = input_data->AstId(bailout_id_);
+ BailoutId node_id = input_data->BytecodeOffset(bailout_id_);
ByteArray* translations = input_data->TranslationByteArray();
unsigned translation_index =
input_data->TranslationIndex(bailout_id_)->value();
@@ -796,11 +760,6 @@ void Deoptimizer::DoComputeOutputFrames() {
// Read the ast node id, function, and frame height for this output frame.
TranslatedFrame* translated_frame = &(translated_state_.frames()[i]);
switch (translated_frame->kind()) {
- case TranslatedFrame::kFunction:
- DoComputeJSFrame(translated_frame, frame_index,
- deoptimizing_throw_ && i == count - 1);
- jsframe_count_++;
- break;
case TranslatedFrame::kInterpretedFunction:
DoComputeInterpretedFrame(translated_frame, frame_index,
deoptimizing_throw_ && i == count - 1);
@@ -809,12 +768,6 @@ void Deoptimizer::DoComputeOutputFrames() {
case TranslatedFrame::kArgumentsAdaptor:
DoComputeArgumentsAdaptorFrame(translated_frame, frame_index);
break;
- case TranslatedFrame::kTailCallerFunction:
- DoComputeTailCallerFrame(translated_frame, frame_index);
- // Tail caller frame translations do not produce output frames.
- frame_index--;
- output_count_--;
- break;
case TranslatedFrame::kConstructStub:
DoComputeConstructStubFrame(translated_frame, frame_index);
break;
@@ -824,8 +777,11 @@ void Deoptimizer::DoComputeOutputFrames() {
case TranslatedFrame::kSetter:
DoComputeAccessorStubFrame(translated_frame, frame_index, true);
break;
- case TranslatedFrame::kCompiledStub:
- DoComputeCompiledStubFrame(translated_frame, frame_index);
+ case TranslatedFrame::kBuiltinContinuation:
+ DoComputeBuiltinContinuation(translated_frame, frame_index, false);
+ break;
+ case TranslatedFrame::kJavaScriptBuiltinContinuation:
+ DoComputeBuiltinContinuation(translated_frame, frame_index, true);
break;
case TranslatedFrame::kInvalid:
FATAL("invalid frame");
@@ -850,248 +806,6 @@ void Deoptimizer::DoComputeOutputFrames() {
}
}
-void Deoptimizer::DoComputeJSFrame(TranslatedFrame* translated_frame,
- int frame_index, bool goto_catch_handler) {
- SharedFunctionInfo* shared = translated_frame->raw_shared_info();
-
- TranslatedFrame::iterator value_iterator = translated_frame->begin();
- bool is_bottommost = (0 == frame_index);
- bool is_topmost = (output_count_ - 1 == frame_index);
- int input_index = 0;
-
- BailoutId node_id = translated_frame->node_id();
- unsigned height =
- translated_frame->height() - 1; // Do not count the context.
- unsigned height_in_bytes = height * kPointerSize;
- if (goto_catch_handler) {
- // Take the stack height from the handler table.
- height = catch_handler_data_;
- // We also make space for the exception itself.
- height_in_bytes = (height + 1) * kPointerSize;
- CHECK(is_topmost);
- }
-
- JSFunction* function = JSFunction::cast(value_iterator->GetRawValue());
- value_iterator++;
- input_index++;
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(), " translating frame ");
- std::unique_ptr<char[]> name = shared->DebugName()->ToCString();
- PrintF(trace_scope_->file(), "%s", name.get());
- PrintF(trace_scope_->file(), " => node=%d, height=%d%s\n", node_id.ToInt(),
- height_in_bytes, goto_catch_handler ? " (throw)" : "");
- }
-
- // The 'fixed' part of the frame consists of the incoming parameters and
- // the part described by JavaScriptFrameConstants.
- unsigned fixed_frame_size = ComputeJavascriptFixedSize(shared);
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- int parameter_count = shared->internal_formal_parameter_count() + 1;
- FrameDescription* output_frame = new (output_frame_size)
- FrameDescription(output_frame_size, parameter_count);
- output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- CHECK(frame_index >= 0 && frame_index < output_count_);
- CHECK_NULL(output_[frame_index]);
- output_[frame_index] = output_frame;
-
- // The top address of the frame is computed from the previous frame's top and
- // this frame's size.
- intptr_t top_address;
- if (is_bottommost) {
- top_address = caller_frame_top_ - output_frame_size;
- } else {
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- }
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- unsigned output_offset = output_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
- output_offset);
- }
-
- if (trace_scope_ != nullptr) {
- PrintF(trace_scope_->file(), " -------------------------\n");
- }
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Synthesize their values and set them up
- // explicitly.
- //
- // The caller's pc for the bottommost output frame is the same as in the
- // input frame. For all subsequent output frames, it can be read from the
- // previous one. This frame's pc can be computed from the non-optimized
- // function code and AST id of the bailout.
- output_offset -= kPCOnStackSize;
- intptr_t value;
- if (is_bottommost) {
- value = caller_pc_;
- } else {
- value = output_[frame_index - 1]->GetPc();
- }
- output_frame->SetCallerPc(output_offset, value);
- DebugPrintOutputSlot(value, frame_index, output_offset, "caller's pc\n");
-
- // The caller's frame pointer for the bottommost output frame is the same
- // as in the input frame. For all subsequent output frames, it can be
- // read from the previous one. Also compute and set this frame's frame
- // pointer.
- output_offset -= kFPOnStackSize;
- if (is_bottommost) {
- value = caller_fp_;
- } else {
- value = output_[frame_index - 1]->GetFp();
- }
- output_frame->SetCallerFp(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (is_topmost) {
- Register fp_reg = JavaScriptFrame::fp_register();
- output_frame->SetRegister(fp_reg.code(), fp_value);
- }
- DebugPrintOutputSlot(value, frame_index, output_offset, "caller's fp\n");
-
- if (FLAG_enable_embedded_constant_pool) {
- // For the bottommost output frame the constant pool pointer can be gotten
- // from the input frame. For subsequent output frames, it can be read from
- // the previous frame.
- output_offset -= kPointerSize;
- if (is_bottommost) {
- value = caller_constant_pool_;
- } else {
- value = output_[frame_index - 1]->GetConstantPool();
- }
- output_frame->SetCallerConstantPool(output_offset, value);
- DebugPrintOutputSlot(value, frame_index, output_offset,
- "caller's constant_pool\n");
- }
-
- // For the bottommost output frame the context can be gotten from the input
- // frame. For all subsequent output frames it can be gotten from the function
- // so long as we don't inline functions that need local contexts.
- output_offset -= kPointerSize;
-
- // When deoptimizing into a catch block, we need to take the context
- // from just above the top of the operand stack (we push the context
- // at the entry of the try block).
- TranslatedFrame::iterator context_pos = value_iterator;
- int context_input_index = input_index;
- if (goto_catch_handler) {
- for (unsigned i = 0; i < height + 1; ++i) {
- context_pos++;
- context_input_index++;
- }
- }
- // Read the context from the translations.
- Object* context = context_pos->GetRawValue();
- if (context->IsUndefined(isolate_)) {
- // If the context was optimized away, just use the context from
- // the activation. This should only apply to Crankshaft code.
- CHECK(!compiled_code_->is_turbofanned());
- context = is_bottommost ? reinterpret_cast<Object*>(input_frame_context_)
- : function->context();
- }
- value = reinterpret_cast<intptr_t>(context);
- output_frame->SetContext(value);
- WriteValueToOutput(context, context_input_index, frame_index, output_offset,
- "context ");
- if (context == isolate_->heap()->arguments_marker()) {
- Address output_address =
- reinterpret_cast<Address>(output_[frame_index]->GetTop()) +
- output_offset;
- values_to_materialize_.push_back({output_address, context_pos});
- }
- value_iterator++;
- input_index++;
-
- // The function was mentioned explicitly in the BEGIN_FRAME.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(function);
- WriteValueToOutput(function, 0, frame_index, output_offset, "function ");
-
- if (trace_scope_ != nullptr) {
- PrintF(trace_scope_->file(), " -------------------------\n");
- }
-
- // Translate the rest of the frame.
- for (unsigned i = 0; i < height; ++i) {
- output_offset -= kPointerSize;
- WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
- output_offset);
- }
- if (goto_catch_handler) {
- // Write out the exception for the catch handler.
- output_offset -= kPointerSize;
- Object* exception_obj = reinterpret_cast<Object*>(
- input_->GetRegister(FullCodeGenerator::result_register().code()));
- WriteValueToOutput(exception_obj, input_index, frame_index, output_offset,
- "exception ");
- input_index++;
- }
- CHECK_EQ(0u, output_offset);
-
- // Update constant pool.
- Code* non_optimized_code = shared->code();
- if (FLAG_enable_embedded_constant_pool) {
- intptr_t constant_pool_value =
- reinterpret_cast<intptr_t>(non_optimized_code->constant_pool());
- output_frame->SetConstantPool(constant_pool_value);
- if (is_topmost) {
- Register constant_pool_reg =
- JavaScriptFrame::constant_pool_pointer_register();
- output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value);
- }
- }
-
- // Compute this frame's PC and state.
- FixedArray* raw_data = non_optimized_code->deoptimization_data();
- DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
- Address start = non_optimized_code->instruction_start();
- unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
- unsigned pc_offset = goto_catch_handler
- ? catch_handler_pc_offset_
- : FullCodeGenerator::PcField::decode(pc_and_state);
- intptr_t pc_value = reinterpret_cast<intptr_t>(start + pc_offset);
- output_frame->SetPc(pc_value);
-
- // If we are going to the catch handler, then the exception lives in
- // the accumulator.
- BailoutState state =
- goto_catch_handler
- ? BailoutState::TOS_REGISTER
- : FullCodeGenerator::BailoutStateField::decode(pc_and_state);
- output_frame->SetState(Smi::FromInt(static_cast<int>(state)));
-
- // Clear the context register. The context might be a de-materialized object
- // and will be materialized by {Runtime_NotifyDeoptimized}. For additional
- // safety we use Smi(0) instead of the potential {arguments_marker} here.
- if (is_topmost) {
- intptr_t context_value = reinterpret_cast<intptr_t>(Smi::kZero);
- Register context_reg = JavaScriptFrame::context_register();
- output_frame->SetRegister(context_reg.code(), context_value);
- }
-
- // Set the continuation for the topmost frame.
- if (is_topmost) {
- Builtins* builtins = isolate_->builtins();
- Code* continuation = builtins->builtin(Builtins::kNotifyDeoptimized);
- if (bailout_type_ == LAZY) {
- continuation = builtins->builtin(Builtins::kNotifyLazyDeoptimized);
- } else if (bailout_type_ == SOFT) {
- continuation = builtins->builtin(Builtins::kNotifySoftDeoptimized);
- } else {
- CHECK_EQ(bailout_type_, EAGER);
- }
- output_frame->SetContinuation(
- reinterpret_cast<intptr_t>(continuation->entry()));
- }
-}
-
void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
int frame_index,
bool goto_catch_handler) {
@@ -1263,7 +977,7 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
// Set the bytecode array pointer.
output_offset -= kPointerSize;
- Object* bytecode_array = shared->HasDebugInfo()
+ Object* bytecode_array = shared->HasBreakInfo()
? shared->GetDebugInfo()->DebugBytecodeArray()
: shared->bytecode_array();
WriteValueToOutput(bytecode_array, 0, frame_index, output_offset,
@@ -1271,13 +985,17 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
// The bytecode offset was mentioned explicitly in the BEGIN_FRAME.
output_offset -= kPointerSize;
+
int raw_bytecode_offset =
BytecodeArray::kHeaderSize - kHeapObjectTag + bytecode_offset;
Smi* smi_bytecode_offset = Smi::FromInt(raw_bytecode_offset);
- WriteValueToOutput(smi_bytecode_offset, 0, frame_index, output_offset,
- "bytecode offset ");
+ output_[frame_index]->SetFrameSlot(
+ output_offset, reinterpret_cast<intptr_t>(smi_bytecode_offset));
if (trace_scope_ != nullptr) {
+ DebugPrintOutputSlot(reinterpret_cast<intptr_t>(smi_bytecode_offset),
+ frame_index, output_offset, "bytecode offset @ ");
+ PrintF(trace_scope_->file(), "%d\n", bytecode_offset);
PrintF(trace_scope_->file(), " -------------------------\n");
}
@@ -1348,10 +1066,7 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
intptr_t context_value = reinterpret_cast<intptr_t>(Smi::kZero);
Register context_reg = JavaScriptFrame::context_register();
output_frame->SetRegister(context_reg.code(), context_value);
- }
-
- // Set the continuation for the topmost frame.
- if (is_topmost) {
+ // Set the continuation for the topmost frame.
Code* continuation = builtins->builtin(Builtins::kNotifyDeoptimized);
if (bailout_type_ == LAZY) {
continuation = builtins->builtin(Builtins::kNotifyLazyDeoptimized);
@@ -1486,70 +1201,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
}
}
-void Deoptimizer::DoComputeTailCallerFrame(TranslatedFrame* translated_frame,
- int frame_index) {
- SharedFunctionInfo* shared = translated_frame->raw_shared_info();
-
- bool is_bottommost = (0 == frame_index);
- // Tail caller frame can't be topmost.
- CHECK_NE(output_count_ - 1, frame_index);
-
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(), " translating tail caller frame ");
- std::unique_ptr<char[]> name = shared->DebugName()->ToCString();
- PrintF(trace_scope_->file(), "%s\n", name.get());
- }
-
- if (!is_bottommost) return;
-
- // Drop arguments adaptor frame below current frame if it exsits.
- Address fp_address = input_->GetFramePointerAddress();
- Address adaptor_fp_address =
- Memory::Address_at(fp_address + CommonFrameConstants::kCallerFPOffset);
-
- if (StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR) !=
- Memory::intptr_at(adaptor_fp_address +
- CommonFrameConstants::kContextOrFrameTypeOffset)) {
- return;
- }
-
- int caller_params_count =
- Smi::cast(
- Memory::Object_at(adaptor_fp_address +
- ArgumentsAdaptorFrameConstants::kLengthOffset))
- ->value();
-
- int callee_params_count =
- function_->shared()->internal_formal_parameter_count();
-
- // Both caller and callee parameters count do not include receiver.
- int offset = (caller_params_count - callee_params_count) * kPointerSize;
- intptr_t new_stack_fp =
- reinterpret_cast<intptr_t>(adaptor_fp_address) + offset;
-
- intptr_t new_caller_frame_top = new_stack_fp +
- (callee_params_count + 1) * kPointerSize +
- CommonFrameConstants::kFixedFrameSizeAboveFp;
-
- intptr_t adaptor_caller_pc = Memory::intptr_at(
- adaptor_fp_address + CommonFrameConstants::kCallerPCOffset);
- intptr_t adaptor_caller_fp = Memory::intptr_at(
- adaptor_fp_address + CommonFrameConstants::kCallerFPOffset);
-
- if (trace_scope_ != NULL) {
- PrintF(trace_scope_->file(),
- " dropping caller arguments adaptor frame: offset=%d, "
- "fp: 0x%08" V8PRIxPTR " -> 0x%08" V8PRIxPTR
- ", "
- "caller sp: 0x%08" V8PRIxPTR " -> 0x%08" V8PRIxPTR "\n",
- offset, stack_fp_, new_stack_fp, caller_frame_top_,
- new_caller_frame_top);
- }
- caller_frame_top_ = new_caller_frame_top;
- caller_fp_ = adaptor_caller_fp;
- caller_pc_ = adaptor_caller_pc;
-}
-
void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
int frame_index) {
TranslatedFrame::iterator value_iterator = translated_frame->begin();
@@ -1931,237 +1582,296 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslatedFrame* translated_frame,
}
}
-void Deoptimizer::DoComputeCompiledStubFrame(TranslatedFrame* translated_frame,
- int frame_index) {
- //
- // FROM TO
- // | .... | | .... |
- // +-------------------------+ +-------------------------+
- // | JSFunction continuation | | JSFunction continuation |
- // +-------------------------+ +-------------------------+
- // | | saved frame (FP) | | saved frame (FP) |
- // | +=========================+<-fpreg +=========================+<-fpreg
- // | |constant pool (if ool_cp)| |constant pool (if ool_cp)|
- // | +-------------------------+ +-------------------------|
- // | | JSFunction context | | JSFunction context |
- // v +-------------------------+ +-------------------------|
- // | COMPILED_STUB marker | | STUB_FAILURE marker |
- // +-------------------------+ +-------------------------+
- // | | | caller args.arguments_ |
- // | ... | +-------------------------+
- // | | | caller args.length_ |
- // |-------------------------|<-spreg +-------------------------+
- // | caller args pointer |
- // +-------------------------+
- // | caller stack param 1 |
- // parameters in registers +-------------------------+
- // and spilled to stack | .... |
- // +-------------------------+
- // | caller stack param n |
- // +-------------------------+<-spreg
- // reg = number of parameters
- // reg = failure handler address
- // reg = saved frame
- // reg = JSFunction context
- //
- // Caller stack params contain the register parameters to the stub first,
- // and then, if the descriptor specifies a constant number of stack
- // parameters, the stack parameters as well.
-
+// BuiltinContinuationFrames capture the machine state that is expected as input
+// to a builtin, including both input register values and stack parameters. When
+// the frame is reactivated (i.e. the frame below it returns), a
+// ContinueToBuiltin stub restores the register state from the frame and tail
+// calls to the actual target builtin, making it appear that the stub had been
+// directly called by the frame above it. The input values to populate the frame
+// are taken from the deopt's FrameState.
+//
+// Frame translation happens in two modes, EAGER and LAZY. In EAGER mode, all of
+// the parameters to the Builtin are explicitly specified in the TurboFan
+// FrameState node. In LAZY mode, there is always one fewer parameters specified
+// in the FrameState than expected by the Builtin. In that case, construction of
+// BuiltinContinuationFrame adds the final missing parameter during
+// deoptimization, and that parameter is always on the stack and contains the
+// value returned from the callee of the call site triggering the LAZY deopt
+// (e.g. rax on x64). This requires that continuation Builtins for LAZY deopts
+// must have at least one stack parameter.
+//
+// TO
+// | .... |
+// +-------------------------+
+// | builtin param 0 |<- FrameState input value n becomes
+// +-------------------------+
+// | ... |
+// +-------------------------+
+// | builtin param m |<- FrameState input value n+m-1, or in
+// +-------------------------+ the LAZY case, return LAZY result value
+// | ContinueToBuiltin entry |
+// +-------------------------+
+// | | saved frame (FP) |
+// | +=========================+<- fpreg
+// | |constant pool (if ool_cp)|
+// v +-------------------------+
+// |BUILTIN_CONTINUATION mark|
+// +-------------------------+
+// | JS Builtin code object |
+// +-------------------------+
+// | builtin input GPR reg0 |<- populated from deopt FrameState using
+// +-------------------------+ the builtin's CallInterfaceDescriptor
+// | ... | to map a FrameState's 0..n-1 inputs to
+// +-------------------------+ the builtin's n input register params.
+// | builtin input GPR regn |
+// |-------------------------|<- spreg
+//
+void Deoptimizer::DoComputeBuiltinContinuation(
+ TranslatedFrame* translated_frame, int frame_index,
+ bool java_script_builtin) {
TranslatedFrame::iterator value_iterator = translated_frame->begin();
int input_index = 0;
- CHECK(compiled_code_->is_hydrogen_stub());
- int major_key = CodeStub::GetMajorKey(compiled_code_);
- CodeStubDescriptor descriptor(isolate_, compiled_code_->stub_key());
-
- // The output frame must have room for all pushed register parameters
- // and the standard stack frame slots. Include space for an argument
- // object to the callee and optionally the space to pass the argument
- // object to the stub failure handler.
- int param_count = descriptor.GetRegisterParameterCount();
- int stack_param_count = descriptor.GetStackParameterCount();
- // The translated frame contains all of the register parameters
- // plus the context.
- CHECK_EQ(translated_frame->height(), param_count + 1);
- CHECK_GE(param_count, 0);
-
- int height_in_bytes = kPointerSize * (param_count + stack_param_count);
- int fixed_frame_size = StubFailureTrampolineFrameConstants::kFixedFrameSize;
- int output_frame_size = height_in_bytes + fixed_frame_size;
+ // The output frame must have room for all of the parameters that need to be
+ // passed to the builtin continuation.
+ int height_in_words = translated_frame->height();
+
+ BailoutId bailout_id = translated_frame->node_id();
+ Builtins::Name builtin_name = Builtins::GetBuiltinFromBailoutId(bailout_id);
+ Code* builtin = isolate()->builtins()->builtin(builtin_name);
+ Callable continuation_callable =
+ Builtins::CallableFor(isolate(), builtin_name);
+ CallInterfaceDescriptor continuation_descriptor =
+ continuation_callable.descriptor();
+
+ bool is_bottommost = (0 == frame_index);
+ bool is_topmost = (output_count_ - 1 == frame_index);
+ bool must_handle_result = !is_topmost || bailout_type_ == LAZY;
+
+ const RegisterConfiguration* config(RegisterConfiguration::Turbofan());
+ int allocatable_register_count = config->num_allocatable_general_registers();
+ int register_parameter_count =
+ continuation_descriptor.GetRegisterParameterCount();
+ // Make sure to account for the context by removing it from the register
+ // parameter count.
+ int stack_param_count = height_in_words - register_parameter_count - 1;
+ if (must_handle_result) stack_param_count++;
+ int output_frame_size =
+ kPointerSize * (stack_param_count + allocatable_register_count) +
+ TYPED_FRAME_SIZE(2); // For destination builtin code and registers
+
+ // Validate types of parameters. They must all be tagged except for argc for
+ // JS builtins.
+ bool has_argc = false;
+ for (int i = 0; i < register_parameter_count; ++i) {
+ MachineType type = continuation_descriptor.GetParameterType(i);
+ int code = continuation_descriptor.GetRegisterParameter(i).code();
+ // Only tagged and int32 arguments are supported, and int32 only for the
+ // arguments count on JavaScript builtins.
+ if (type == MachineType::Int32()) {
+ CHECK_EQ(code, kJavaScriptCallArgCountRegister.code());
+ has_argc = true;
+ } else {
+ // Any other argument must be a tagged value.
+ CHECK(IsAnyTagged(type.representation()));
+ }
+ }
+ CHECK_EQ(java_script_builtin, has_argc);
+
if (trace_scope_ != NULL) {
PrintF(trace_scope_->file(),
- " translating %s => StubFailureTrampolineStub, height=%d\n",
- CodeStub::MajorName(static_cast<CodeStub::Major>(major_key)),
- height_in_bytes);
+ " translating BuiltinContinuation to %s,"
+ " register param count %d,"
+ " stack param count %d\n",
+ Builtins::name(builtin_name), register_parameter_count,
+ stack_param_count);
}
- // The stub failure trampoline is a single frame.
+ unsigned output_frame_offset = output_frame_size;
FrameDescription* output_frame =
new (output_frame_size) FrameDescription(output_frame_size);
- output_frame->SetFrameType(StackFrame::STUB_FAILURE_TRAMPOLINE);
- CHECK_EQ(frame_index, 0);
output_[frame_index] = output_frame;
// The top address of the frame is computed from the previous frame's top and
// this frame's size.
- intptr_t top_address = caller_frame_top_ - output_frame_size;
+ intptr_t top_address;
+ if (is_bottommost) {
+ top_address = caller_frame_top_ - output_frame_size;
+ } else {
+ top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ }
output_frame->SetTop(top_address);
+ output_frame->SetState(
+ Smi::FromInt(static_cast<int>(BailoutState::NO_REGISTERS)));
+
+ // Get the possible JSFunction for the case that
+ intptr_t maybe_function =
+ reinterpret_cast<intptr_t>(value_iterator->GetRawValue());
+ ++value_iterator;
+
+ std::vector<intptr_t> register_values;
+ int total_registers = config->num_general_registers();
+ register_values.resize(total_registers, 0);
+ for (int i = 0; i < total_registers; ++i) {
+ register_values[i] = 0;
+ }
+
+ intptr_t value;
+
+ Register result_reg = FullCodeGenerator::result_register();
+ if (must_handle_result) {
+ value = input_->GetRegister(result_reg.code());
+ } else {
+ value = reinterpret_cast<intptr_t>(isolate()->heap()->undefined_value());
+ }
+ output_frame->SetRegister(result_reg.code(), value);
+
+ int translated_stack_parameters =
+ must_handle_result ? stack_param_count - 1 : stack_param_count;
+
+ for (int i = 0; i < translated_stack_parameters; ++i) {
+ output_frame_offset -= kPointerSize;
+ WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
+ output_frame_offset);
+ }
+
+ if (must_handle_result) {
+ output_frame_offset -= kPointerSize;
+ WriteValueToOutput(isolate()->heap()->the_hole_value(), input_index,
+ frame_index, output_frame_offset,
+ "placeholder for return result on lazy deopt ");
+ }
+
+ for (int i = 0; i < register_parameter_count; ++i) {
+ value = reinterpret_cast<intptr_t>(value_iterator->GetRawValue());
+ int code = continuation_descriptor.GetRegisterParameter(i).code();
+ register_values[code] = value;
+ ++input_index;
+ ++value_iterator;
+ }
+
+ // The context register is always implicit in the CallInterfaceDescriptor but
+ // its register must be explicitly set when continuing to the builtin. Make
+ // sure that it's harvested from the translation and copied into the register
+ // set (it was automatically added at the end of the FrameState by the
+ // instruction selector).
+ value = reinterpret_cast<intptr_t>(value_iterator->GetRawValue());
+ register_values[kContextRegister.code()] = value;
+ output_frame->SetContext(value);
+ output_frame->SetRegister(kContextRegister.code(), value);
+ ++input_index;
+ ++value_iterator;
+
// Set caller's PC (JSFunction continuation).
- unsigned output_frame_offset = output_frame_size - kFPOnStackSize;
- intptr_t value = caller_pc_;
+ output_frame_offset -= kPCOnStackSize;
+ if (is_bottommost) {
+ value = caller_pc_;
+ } else {
+ value = output_[frame_index - 1]->GetPc();
+ }
output_frame->SetCallerPc(output_frame_offset, value);
DebugPrintOutputSlot(value, frame_index, output_frame_offset,
"caller's pc\n");
- // Read caller's FP from the input frame, and set this frame's FP.
- value = caller_fp_;
+ // Read caller's FP from the previous frame, and set this frame's FP.
output_frame_offset -= kFPOnStackSize;
+ if (is_bottommost) {
+ value = caller_fp_;
+ } else {
+ value = output_[frame_index - 1]->GetFp();
+ }
output_frame->SetCallerFp(output_frame_offset, value);
- intptr_t frame_ptr = top_address + output_frame_offset;
- Register fp_reg = StubFailureTrampolineFrame::fp_register();
- output_frame->SetRegister(fp_reg.code(), frame_ptr);
- output_frame->SetFp(frame_ptr);
+ intptr_t fp_value = top_address + output_frame_offset;
+ output_frame->SetFp(fp_value);
DebugPrintOutputSlot(value, frame_index, output_frame_offset,
"caller's fp\n");
if (FLAG_enable_embedded_constant_pool) {
- // Read the caller's constant pool from the input frame.
- value = caller_constant_pool_;
+ // Read the caller's constant pool from the previous frame.
output_frame_offset -= kPointerSize;
+ if (is_bottommost) {
+ value = caller_constant_pool_;
+ } else {
+ value = output_[frame_index - 1]->GetConstantPool();
+ }
output_frame->SetCallerConstantPool(output_frame_offset, value);
DebugPrintOutputSlot(value, frame_index, output_frame_offset,
"caller's constant_pool\n");
}
- // The marker for the typed stack frame
- output_frame_offset -= kPointerSize;
- value = StackFrame::TypeToMarker(StackFrame::STUB_FAILURE_TRAMPOLINE);
- output_frame->SetFrameSlot(output_frame_offset, value);
- DebugPrintOutputSlot(value, frame_index, output_frame_offset,
- "function (stub failure sentinel)\n");
-
- intptr_t caller_arg_count = stack_param_count;
- bool arg_count_known = !descriptor.stack_parameter_count().is_valid();
-
- // Build the Arguments object for the caller's parameters and a pointer to it.
+ // A marker value is used in place of the context.
output_frame_offset -= kPointerSize;
- int args_arguments_offset = output_frame_offset;
- intptr_t the_hole = reinterpret_cast<intptr_t>(
- isolate_->heap()->the_hole_value());
- if (arg_count_known) {
- value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
- (caller_arg_count - 1) * kPointerSize;
- } else {
- value = the_hole;
- }
-
- output_frame->SetFrameSlot(args_arguments_offset, value);
- DebugPrintOutputSlot(
- value, frame_index, args_arguments_offset,
- arg_count_known ? "args.arguments\n" : "args.arguments (the hole)\n");
+ intptr_t marker =
+ java_script_builtin
+ ? StackFrame::TypeToMarker(
+ StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION)
+ : StackFrame::TypeToMarker(StackFrame::BUILTIN_CONTINUATION);
+ output_frame->SetFrameSlot(output_frame_offset, marker);
+ DebugPrintOutputSlot(marker, frame_index, output_frame_offset,
+ "context (builtin continuation sentinel)\n");
output_frame_offset -= kPointerSize;
- int length_frame_offset = output_frame_offset;
- value = arg_count_known ? caller_arg_count : the_hole;
- output_frame->SetFrameSlot(length_frame_offset, value);
- DebugPrintOutputSlot(
- value, frame_index, length_frame_offset,
- arg_count_known ? "args.length\n" : "args.length (the hole)\n");
+ value = java_script_builtin ? maybe_function : 0;
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ DebugPrintOutputSlot(value, frame_index, output_frame_offset,
+ java_script_builtin ? "JSFunction\n" : "unused\n");
+ // The builtin to continue to
output_frame_offset -= kPointerSize;
- value = frame_ptr + StandardFrameConstants::kCallerSPOffset -
- (output_frame_size - output_frame_offset) + kPointerSize;
+ value = reinterpret_cast<intptr_t>(builtin);
output_frame->SetFrameSlot(output_frame_offset, value);
- DebugPrintOutputSlot(value, frame_index, output_frame_offset, "args*\n");
+ DebugPrintOutputSlot(value, frame_index, output_frame_offset,
+ "builtin address\n");
- // Copy the register parameters to the failure frame.
- int arguments_length_offset = -1;
- for (int i = 0; i < param_count; ++i) {
+ for (int i = 0; i < allocatable_register_count; ++i) {
output_frame_offset -= kPointerSize;
- WriteTranslatedValueToOutput(&value_iterator, &input_index, 0,
- output_frame_offset);
-
- if (!arg_count_known &&
- descriptor.GetRegisterParameter(i)
- .is(descriptor.stack_parameter_count())) {
- arguments_length_offset = output_frame_offset;
+ int code = config->GetAllocatableGeneralCode(i);
+ value = register_values[code];
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ if (trace_scope_ != nullptr) {
+ ScopedVector<char> str(128);
+ if (java_script_builtin &&
+ code == kJavaScriptCallArgCountRegister.code()) {
+ SNPrintF(
+ str,
+ "tagged argument count %s (will be untagged by continuation)\n",
+ config->GetGeneralRegisterName(code));
+ } else {
+ SNPrintF(str, "builtin register argument %s\n",
+ config->GetGeneralRegisterName(code));
+ }
+ DebugPrintOutputSlot(value, frame_index, output_frame_offset,
+ str.start());
}
}
- Object* maybe_context = value_iterator->GetRawValue();
- CHECK(maybe_context->IsContext());
- Register context_reg = StubFailureTrampolineFrame::context_register();
- value = reinterpret_cast<intptr_t>(maybe_context);
- output_frame->SetRegister(context_reg.code(), value);
- ++value_iterator;
-
- // Copy constant stack parameters to the failure frame. If the number of stack
- // parameters is not known in the descriptor, the arguments object is the way
- // to access them.
- for (int i = 0; i < stack_param_count; i++) {
- output_frame_offset -= kPointerSize;
- Object** stack_parameter = reinterpret_cast<Object**>(
- frame_ptr + StandardFrameConstants::kCallerSPOffset +
- (stack_param_count - i - 1) * kPointerSize);
- value = reinterpret_cast<intptr_t>(*stack_parameter);
- output_frame->SetFrameSlot(output_frame_offset, value);
- DebugPrintOutputSlot(value, frame_index, output_frame_offset,
- "stack parameter\n");
- }
-
- CHECK_EQ(0u, output_frame_offset);
-
- if (!arg_count_known) {
- CHECK_GE(arguments_length_offset, 0);
- // We know it's a smi because 1) the code stub guarantees the stack
- // parameter count is in smi range, and 2) the DoTranslateCommand in the
- // parameter loop above translated that to a tagged value.
- Smi* smi_caller_arg_count = reinterpret_cast<Smi*>(
- output_frame->GetFrameSlot(arguments_length_offset));
- caller_arg_count = smi_caller_arg_count->value();
- output_frame->SetFrameSlot(length_frame_offset, caller_arg_count);
- DebugPrintOutputSlot(caller_arg_count, frame_index, length_frame_offset,
- "args.length\n");
- value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
- (caller_arg_count - 1) * kPointerSize;
- output_frame->SetFrameSlot(args_arguments_offset, value);
- DebugPrintOutputSlot(value, frame_index, args_arguments_offset,
- "args.arguments");
- }
-
- // Copy the double registers from the input into the output frame.
- CopyDoubleRegisters(output_frame);
-
- // Fill registers containing handler and number of parameters.
- SetPlatformCompiledStubRegisters(output_frame, &descriptor);
-
- // Compute this frame's PC, state, and continuation.
- Code* trampoline = NULL;
- StubFunctionMode function_mode = descriptor.function_mode();
- StubFailureTrampolineStub(isolate_, function_mode)
- .FindCodeInCache(&trampoline);
- DCHECK(trampoline != NULL);
- output_frame->SetPc(reinterpret_cast<intptr_t>(
- trampoline->instruction_start()));
- if (FLAG_enable_embedded_constant_pool) {
- Register constant_pool_reg =
- StubFailureTrampolineFrame::constant_pool_pointer_register();
- intptr_t constant_pool_value =
- reinterpret_cast<intptr_t>(trampoline->constant_pool());
- output_frame->SetConstantPool(constant_pool_value);
- output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value);
- }
- output_frame->SetState(
- Smi::FromInt(static_cast<int>(BailoutState::NO_REGISTERS)));
- Code* notify_failure =
- isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
+ // Ensure the frame pointer register points to the callee's frame. The builtin
+ // will build its own frame once we continue to it.
+ Register fp_reg = JavaScriptFrame::fp_register();
+ output_frame->SetRegister(fp_reg.code(), output_[frame_index - 1]->GetFp());
+
+ Code* continue_to_builtin =
+ java_script_builtin
+ ? (must_handle_result
+ ? isolate()->builtins()->builtin(
+ Builtins::kContinueToJavaScriptBuiltinWithResult)
+ : isolate()->builtins()->builtin(
+ Builtins::kContinueToJavaScriptBuiltin))
+ : (must_handle_result
+ ? isolate()->builtins()->builtin(
+ Builtins::kContinueToCodeStubBuiltinWithResult)
+ : isolate()->builtins()->builtin(
+ Builtins::kContinueToCodeStubBuiltin));
+ output_frame->SetPc(
+ reinterpret_cast<intptr_t>(continue_to_builtin->instruction_start()));
+
+ Code* continuation =
+ isolate()->builtins()->builtin(Builtins::kNotifyBuiltinContinuation);
output_frame->SetContinuation(
- reinterpret_cast<intptr_t>(notify_failure->entry()));
+ reinterpret_cast<intptr_t>(continuation->entry()));
}
-
void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
// Walk to the last JavaScript output frame to find out if it has
// adapted arguments.
@@ -2260,8 +1970,8 @@ unsigned Deoptimizer::ComputeInputFrameSize() const {
unsigned result = fixed_size_above_fp + fp_to_sp_delta_;
if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
unsigned stack_slots = compiled_code_->stack_slots();
- unsigned outgoing_size =
- ComputeOutgoingArgumentSize(compiled_code_, bailout_id_);
+ unsigned outgoing_size = 0;
+ // ComputeOutgoingArgumentSize(compiled_code_, bailout_id_);
CHECK_EQ(fixed_size_above_fp + (stack_slots * kPointerSize) -
CommonFrameConstants::kFixedFrameSizeAboveFp + outgoing_size,
result);
@@ -2291,16 +2001,6 @@ unsigned Deoptimizer::ComputeIncomingArgumentSize(SharedFunctionInfo* shared) {
return (shared->internal_formal_parameter_count() + 1) * kPointerSize;
}
-
-// static
-unsigned Deoptimizer::ComputeOutgoingArgumentSize(Code* code,
- unsigned bailout_id) {
- DeoptimizationInputData* data =
- DeoptimizationInputData::cast(code->deoptimization_data());
- unsigned height = data->ArgumentsStackHeight(bailout_id)->value();
- return height * kPointerSize;
-}
-
void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
BailoutType type,
int max_entry_id) {
@@ -2320,7 +2020,7 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
masm.set_emit_debug_code(false);
GenerateDeoptimizationEntries(&masm, entry_count, type);
CodeDesc desc;
- masm.GetCode(&desc);
+ masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
MemoryChunk* chunk = data->deopt_entry_code_[type];
@@ -2405,6 +2105,24 @@ Handle<ByteArray> TranslationBuffer::CreateByteArray(Factory* factory) {
return result;
}
+void Translation::BeginBuiltinContinuationFrame(BailoutId bailout_id,
+ int literal_id,
+ unsigned height) {
+ buffer_->Add(BUILTIN_CONTINUATION_FRAME);
+ buffer_->Add(bailout_id.ToInt());
+ buffer_->Add(literal_id);
+ buffer_->Add(height);
+}
+
+void Translation::BeginJavaScriptBuiltinContinuationFrame(BailoutId bailout_id,
+ int literal_id,
+ unsigned height) {
+ buffer_->Add(JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME);
+ buffer_->Add(bailout_id.ToInt());
+ buffer_->Add(literal_id);
+ buffer_->Add(height);
+}
+
void Translation::BeginConstructStubFrame(BailoutId bailout_id, int literal_id,
unsigned height) {
buffer_->Add(CONSTRUCT_STUB_FRAME);
@@ -2432,20 +2150,6 @@ void Translation::BeginArgumentsAdaptorFrame(int literal_id, unsigned height) {
buffer_->Add(height);
}
-void Translation::BeginTailCallerFrame(int literal_id) {
- buffer_->Add(TAIL_CALLER_FRAME);
- buffer_->Add(literal_id);
-}
-
-void Translation::BeginJSFrame(BailoutId node_id, int literal_id,
- unsigned height) {
- buffer_->Add(JS_FRAME);
- buffer_->Add(node_id.ToInt());
- buffer_->Add(literal_id);
- buffer_->Add(height);
-}
-
-
void Translation::BeginInterpretedFrame(BailoutId bytecode_offset,
int literal_id, unsigned height) {
buffer_->Add(INTERPRETED_FRAME);
@@ -2455,17 +2159,6 @@ void Translation::BeginInterpretedFrame(BailoutId bytecode_offset,
}
-void Translation::BeginCompiledStubFrame(int height) {
- buffer_->Add(COMPILED_STUB_FRAME);
- buffer_->Add(height);
-}
-
-
-void Translation::BeginArgumentsObject(int args_length) {
- buffer_->Add(ARGUMENTS_OBJECT);
- buffer_->Add(args_length);
-}
-
void Translation::ArgumentsElements(bool is_rest) {
buffer_->Add(ARGUMENTS_ELEMENTS);
buffer_->Add(is_rest);
@@ -2562,16 +2255,6 @@ void Translation::StoreLiteral(int literal_id) {
}
-void Translation::StoreArgumentsObject(bool args_known,
- int args_index,
- int args_length) {
- buffer_->Add(ARGUMENTS_OBJECT);
- buffer_->Add(args_known);
- buffer_->Add(args_index);
- buffer_->Add(args_length);
-}
-
-
void Translation::StoreJSFrameFunction() {
StoreStackSlot((StandardFrameConstants::kCallerPCOffset -
StandardFrameConstants::kFunctionOffset) /
@@ -2583,7 +2266,6 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
case GETTER_STUB_FRAME:
case SETTER_STUB_FRAME:
case DUPLICATED_OBJECT:
- case ARGUMENTS_OBJECT:
case CAPTURED_OBJECT:
case REGISTER:
case INT32_REGISTER:
@@ -2598,15 +2280,14 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
case FLOAT_STACK_SLOT:
case DOUBLE_STACK_SLOT:
case LITERAL:
- case COMPILED_STUB_FRAME:
- case TAIL_CALLER_FRAME:
return 1;
case BEGIN:
case ARGUMENTS_ADAPTOR_FRAME:
return 2;
- case JS_FRAME:
case INTERPRETED_FRAME:
case CONSTRUCT_STUB_FRAME:
+ case BUILTIN_CONTINUATION_FRAME:
+ case JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME:
return 3;
case ARGUMENTS_ELEMENTS:
case ARGUMENTS_LENGTH:
@@ -2626,7 +2307,6 @@ const char* Translation::StringFor(Opcode opcode) {
}
#undef TRANSLATION_OPCODE_CASE
UNREACHABLE();
- return "";
}
#endif
@@ -2753,14 +2433,9 @@ DeoptimizedFrameInfo::DeoptimizedFrameInfo(TranslatedState* state,
parameter_frame != state->begin() &&
(parameter_frame - 1)->kind() == TranslatedFrame::kConstructStub;
- if (frame_it->kind() == TranslatedFrame::kInterpretedFunction) {
- source_position_ = Deoptimizer::ComputeSourcePositionFromBytecodeArray(
- *frame_it->shared_info(), frame_it->node_id());
- } else {
- DCHECK_EQ(TranslatedFrame::kFunction, frame_it->kind());
- source_position_ = Deoptimizer::ComputeSourcePositionFromBaselineCode(
- *frame_it->shared_info(), frame_it->node_id());
- }
+ DCHECK_EQ(TranslatedFrame::kInterpretedFunction, frame_it->kind());
+ source_position_ = Deoptimizer::ComputeSourcePositionFromBytecodeArray(
+ *frame_it->shared_info(), frame_it->node_id());
TranslatedFrame::iterator value_it = frame_it->begin();
// Get the function. Note that this might materialize the function.
@@ -2789,9 +2464,7 @@ DeoptimizedFrameInfo::DeoptimizedFrameInfo(TranslatedState* state,
// Get the expression stack.
int stack_height = frame_it->height();
- if (frame_it->kind() == TranslatedFrame::kFunction ||
- frame_it->kind() == TranslatedFrame::kInterpretedFunction) {
- // For full-code frames, we should not count the context.
+ if (frame_it->kind() == TranslatedFrame::kInterpretedFunction) {
// For interpreter frames, we should not count the accumulator.
// TODO(jarin): Clean up the indexing in translated frames.
stack_height--;
@@ -2840,19 +2513,6 @@ Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code* code, Address pc) {
// static
-int Deoptimizer::ComputeSourcePositionFromBaselineCode(
- SharedFunctionInfo* shared, BailoutId node_id) {
- DCHECK(shared->HasBaselineCode());
- Code* code = shared->code();
- FixedArray* raw_data = code->deoptimization_data();
- DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
- unsigned pc_and_state = Deoptimizer::GetOutputInfo(data, node_id, shared);
- int code_offset =
- static_cast<int>(FullCodeGenerator::PcField::decode(pc_and_state));
- return AbstractCode::cast(code)->SourcePosition(code_offset);
-}
-
-// static
int Deoptimizer::ComputeSourcePositionFromBytecodeArray(
SharedFunctionInfo* shared, BailoutId node_id) {
DCHECK(shared->HasBytecodeArray());
@@ -2861,15 +2521,6 @@ int Deoptimizer::ComputeSourcePositionFromBytecodeArray(
}
// static
-TranslatedValue TranslatedValue::NewArgumentsObject(TranslatedState* container,
- int length,
- int object_index) {
- TranslatedValue slot(container, kArgumentsObject);
- slot.materialization_info_ = {object_index, length};
- return slot;
-}
-
-// static
TranslatedValue TranslatedValue::NewDeferredObject(TranslatedState* container,
int length,
int object_index) {
@@ -2979,14 +2630,13 @@ Float64 TranslatedValue::double_value() const {
int TranslatedValue::object_length() const {
- DCHECK(kind() == kArgumentsObject || kind() == kCapturedObject);
+ DCHECK(kind() == kCapturedObject);
return materialization_info_.length_;
}
int TranslatedValue::object_index() const {
- DCHECK(kind() == kArgumentsObject || kind() == kCapturedObject ||
- kind() == kDuplicatedObject);
+ DCHECK(kind() == kCapturedObject || kind() == kDuplicatedObject);
return materialization_info_.id_;
}
@@ -3055,7 +2705,6 @@ Handle<Object> TranslatedValue::GetValue() {
return value_.ToHandleChecked();
}
- case TranslatedValue::kArgumentsObject:
case TranslatedValue::kCapturedObject:
case TranslatedValue::kDuplicatedObject:
return container_->MaterializeObjectAt(object_index());
@@ -3104,7 +2753,6 @@ void TranslatedValue::MaterializeSimple() {
case kCapturedObject:
case kDuplicatedObject:
- case kArgumentsObject:
case kInvalid:
case kTagged:
case kBoolBit:
@@ -3118,7 +2766,6 @@ bool TranslatedValue::IsMaterializedObject() const {
switch (kind()) {
case kCapturedObject:
case kDuplicatedObject:
- case kArgumentsObject:
return true;
default:
return false;
@@ -3131,7 +2778,7 @@ bool TranslatedValue::IsMaterializableByDebugger() const {
}
int TranslatedValue::GetChildrenCount() const {
- if (kind() == kCapturedObject || kind() == kArgumentsObject) {
+ if (kind() == kCapturedObject) {
return object_length();
} else {
return 0;
@@ -3168,16 +2815,6 @@ void TranslatedValue::Handlify() {
}
-TranslatedFrame TranslatedFrame::JSFrame(BailoutId node_id,
- SharedFunctionInfo* shared_info,
- int height) {
- TranslatedFrame frame(kFunction, shared_info->GetIsolate(), shared_info,
- height);
- frame.node_id_ = node_id;
- return frame;
-}
-
-
TranslatedFrame TranslatedFrame::InterpretedFrame(
BailoutId bytecode_offset, SharedFunctionInfo* shared_info, int height) {
TranslatedFrame frame(kInterpretedFunction, shared_info->GetIsolate(),
@@ -3200,12 +2837,6 @@ TranslatedFrame TranslatedFrame::ArgumentsAdaptorFrame(
shared_info, height);
}
-TranslatedFrame TranslatedFrame::TailCallerFrame(
- SharedFunctionInfo* shared_info) {
- return TranslatedFrame(kTailCallerFunction, shared_info->GetIsolate(),
- shared_info, 0);
-}
-
TranslatedFrame TranslatedFrame::ConstructStubFrame(
BailoutId bailout_id, SharedFunctionInfo* shared_info, int height) {
TranslatedFrame frame(kConstructStub, shared_info->GetIsolate(), shared_info,
@@ -3214,16 +2845,24 @@ TranslatedFrame TranslatedFrame::ConstructStubFrame(
return frame;
}
+TranslatedFrame TranslatedFrame::BuiltinContinuationFrame(
+ BailoutId bailout_id, SharedFunctionInfo* shared_info, int height) {
+ TranslatedFrame frame(kBuiltinContinuation, shared_info->GetIsolate(),
+ shared_info, height);
+ frame.node_id_ = bailout_id;
+ return frame;
+}
+
+TranslatedFrame TranslatedFrame::JavaScriptBuiltinContinuationFrame(
+ BailoutId bailout_id, SharedFunctionInfo* shared_info, int height) {
+ TranslatedFrame frame(kJavaScriptBuiltinContinuation,
+ shared_info->GetIsolate(), shared_info, height);
+ frame.node_id_ = bailout_id;
+ return frame;
+}
int TranslatedFrame::GetValueCount() {
switch (kind()) {
- case kFunction: {
- int parameter_count =
- raw_shared_info_->internal_formal_parameter_count() + 1;
- // + 1 for function.
- return height_ + parameter_count + 1;
- }
-
case kInterpretedFunction: {
int parameter_count =
raw_shared_info_->internal_formal_parameter_count() + 1;
@@ -3239,20 +2878,15 @@ int TranslatedFrame::GetValueCount() {
case kArgumentsAdaptor:
case kConstructStub:
+ case kBuiltinContinuation:
+ case kJavaScriptBuiltinContinuation:
return 1 + height_;
- case kTailCallerFunction:
- return 1; // Function.
-
- case kCompiledStub:
- return height_;
-
case kInvalid:
UNREACHABLE();
break;
}
UNREACHABLE();
- return -1;
}
@@ -3273,21 +2907,6 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator->Next());
switch (opcode) {
- case Translation::JS_FRAME: {
- BailoutId node_id = BailoutId(iterator->Next());
- SharedFunctionInfo* shared_info =
- SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
- int height = iterator->Next();
- if (trace_file != nullptr) {
- std::unique_ptr<char[]> name = shared_info->DebugName()->ToCString();
- PrintF(trace_file, " reading input frame %s", name.get());
- int arg_count = shared_info->internal_formal_parameter_count() + 1;
- PrintF(trace_file, " => node=%d, args=%d, height=%d; inputs:\n",
- node_id.ToInt(), arg_count, height);
- }
- return TranslatedFrame::JSFrame(node_id, shared_info, height);
- }
-
case Translation::INTERPRETED_FRAME: {
BailoutId bytecode_offset = BailoutId(iterator->Next());
SharedFunctionInfo* shared_info =
@@ -3317,30 +2936,57 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
return TranslatedFrame::ArgumentsAdaptorFrame(shared_info, height);
}
- case Translation::TAIL_CALLER_FRAME: {
+ case Translation::CONSTRUCT_STUB_FRAME: {
+ BailoutId bailout_id = BailoutId(iterator->Next());
SharedFunctionInfo* shared_info =
SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
+ int height = iterator->Next();
if (trace_file != nullptr) {
std::unique_ptr<char[]> name = shared_info->DebugName()->ToCString();
- PrintF(trace_file, " reading tail caller frame marker %s\n",
+ PrintF(trace_file, " reading construct stub frame %s", name.get());
+ PrintF(trace_file, " => bailout_id=%d, height=%d; inputs:\n",
+ bailout_id.ToInt(), height);
+ }
+ return TranslatedFrame::ConstructStubFrame(bailout_id, shared_info,
+ height);
+ }
+
+ case Translation::BUILTIN_CONTINUATION_FRAME: {
+ BailoutId bailout_id = BailoutId(iterator->Next());
+ SharedFunctionInfo* shared_info =
+ SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
+ int height = iterator->Next();
+ if (trace_file != nullptr) {
+ std::unique_ptr<char[]> name = shared_info->DebugName()->ToCString();
+ PrintF(trace_file, " reading builtin continuation frame %s",
name.get());
+ PrintF(trace_file, " => bailout_id=%d, height=%d; inputs:\n",
+ bailout_id.ToInt(), height);
}
- return TranslatedFrame::TailCallerFrame(shared_info);
+ // Add one to the height to account for the context which was implicitly
+ // added to the translation during code generation.
+ int height_with_context = height + 1;
+ return TranslatedFrame::BuiltinContinuationFrame(bailout_id, shared_info,
+ height_with_context);
}
- case Translation::CONSTRUCT_STUB_FRAME: {
+ case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME: {
BailoutId bailout_id = BailoutId(iterator->Next());
SharedFunctionInfo* shared_info =
SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
int height = iterator->Next();
if (trace_file != nullptr) {
std::unique_ptr<char[]> name = shared_info->DebugName()->ToCString();
- PrintF(trace_file, " reading construct stub frame %s", name.get());
+ PrintF(trace_file, " reading JavaScript builtin continuation frame %s",
+ name.get());
PrintF(trace_file, " => bailout_id=%d, height=%d; inputs:\n",
bailout_id.ToInt(), height);
}
- return TranslatedFrame::ConstructStubFrame(bailout_id, shared_info,
- height);
+ // Add one to the height to account for the context which was implicitly
+ // added to the translation during code generation.
+ int height_with_context = height + 1;
+ return TranslatedFrame::JavaScriptBuiltinContinuationFrame(
+ bailout_id, shared_info, height_with_context);
}
case Translation::GETTER_STUB_FRAME: {
@@ -3365,19 +3011,8 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
shared_info);
}
- case Translation::COMPILED_STUB_FRAME: {
- int height = iterator->Next();
- if (trace_file != nullptr) {
- PrintF(trace_file,
- " reading compiler stub frame => height=%d; inputs:\n", height);
- }
- return TranslatedFrame::CompiledStubFrame(height,
- literal_array->GetIsolate());
- }
-
case Translation::BEGIN:
case Translation::DUPLICATED_OBJECT:
- case Translation::ARGUMENTS_OBJECT:
case Translation::ARGUMENTS_ELEMENTS:
case Translation::ARGUMENTS_LENGTH:
case Translation::CAPTURED_OBJECT:
@@ -3503,14 +3138,13 @@ int TranslatedState::CreateNextTranslatedValue(
static_cast<Translation::Opcode>(iterator->Next());
switch (opcode) {
case Translation::BEGIN:
- case Translation::JS_FRAME:
case Translation::INTERPRETED_FRAME:
case Translation::ARGUMENTS_ADAPTOR_FRAME:
- case Translation::TAIL_CALLER_FRAME:
case Translation::CONSTRUCT_STUB_FRAME:
case Translation::GETTER_STUB_FRAME:
case Translation::SETTER_STUB_FRAME:
- case Translation::COMPILED_STUB_FRAME:
+ case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME:
+ case Translation::BUILTIN_CONTINUATION_FRAME:
// Peeled off before getting here.
break;
@@ -3526,20 +3160,6 @@ int TranslatedState::CreateNextTranslatedValue(
return translated_value.GetChildrenCount();
}
- case Translation::ARGUMENTS_OBJECT: {
- int arg_count = iterator->Next();
- int object_index = static_cast<int>(object_positions_.size());
- if (trace_file != nullptr) {
- PrintF(trace_file, "arguments object #%d (length = %d)", object_index,
- arg_count);
- }
- object_positions_.push_back({frame_index, value_index});
- TranslatedValue translated_value =
- TranslatedValue::NewArgumentsObject(this, arg_count, object_index);
- frame.Add(translated_value);
- return translated_value.GetChildrenCount();
- }
-
case Translation::ARGUMENTS_ELEMENTS: {
bool is_rest = iterator->Next();
CreateArgumentsElementsTranslatedValues(frame_index, fp, is_rest,
@@ -3621,7 +3241,6 @@ int TranslatedState::CreateNextTranslatedValue(
if (trace_file != nullptr) {
PrintF(trace_file, "%" V8PRIuPTR " ; %s (uint)", value,
converter.NameOfCPURegister(input_reg));
- reinterpret_cast<Object*>(value)->ShortPrint(trace_file);
}
TranslatedValue translated_value =
TranslatedValue::NewUInt32(this, static_cast<uint32_t>(value));
@@ -3790,11 +3409,8 @@ int TranslatedState::CreateNextTranslatedValue(
return translated_value.GetChildrenCount();
}
-
TranslatedState::TranslatedState(JavaScriptFrame* frame)
- : isolate_(nullptr),
- stack_frame_pointer_(nullptr),
- has_adapted_arguments_(false) {
+ : isolate_(nullptr), stack_frame_pointer_(nullptr) {
int deopt_index = Safepoint::kNoDeoptimizationIndex;
DeoptimizationInputData* data =
static_cast<OptimizedFrame*>(frame)->GetDeoptimizationData(&deopt_index);
@@ -3806,11 +3422,8 @@ TranslatedState::TranslatedState(JavaScriptFrame* frame)
frame->function()->shared()->internal_formal_parameter_count());
}
-
TranslatedState::TranslatedState()
- : isolate_(nullptr),
- stack_frame_pointer_(nullptr),
- has_adapted_arguments_(false) {}
+ : isolate_(nullptr), stack_frame_pointer_(nullptr) {}
void TranslatedState::Init(Address input_frame_pointer,
TranslationIterator* iterator,
@@ -3890,7 +3503,6 @@ void TranslatedState::Prepare(bool has_adapted_arguments,
for (auto& frame : frames_) frame.Handlify();
stack_frame_pointer_ = stack_frame_pointer;
- has_adapted_arguments_ = has_adapted_arguments;
UpdateFromPreviouslyMaterializedObjects();
}
@@ -3958,7 +3570,7 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
slot->value_ = object;
Handle<Object> properties = materializer.FieldAt(value_index);
Handle<Object> elements = materializer.FieldAt(value_index);
- object->set_properties(FixedArray::cast(*properties));
+ object->set_raw_properties_or_hash(*properties);
object->set_elements(FixedArrayBase::cast(*elements));
int in_object_properties = map->GetInObjectProperties();
for (int i = 0; i < in_object_properties; ++i) {
@@ -3968,6 +3580,35 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
}
return object;
}
+ case JS_SET_KEY_VALUE_ITERATOR_TYPE:
+ case JS_SET_VALUE_ITERATOR_TYPE: {
+ Handle<JSSetIterator> object = Handle<JSSetIterator>::cast(
+ isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
+ Handle<Object> properties = materializer.FieldAt(value_index);
+ Handle<Object> elements = materializer.FieldAt(value_index);
+ Handle<Object> table = materializer.FieldAt(value_index);
+ Handle<Object> index = materializer.FieldAt(value_index);
+ object->set_raw_properties_or_hash(FixedArray::cast(*properties));
+ object->set_elements(FixedArrayBase::cast(*elements));
+ object->set_table(*table);
+ object->set_index(*index);
+ return object;
+ }
+ case JS_MAP_KEY_ITERATOR_TYPE:
+ case JS_MAP_KEY_VALUE_ITERATOR_TYPE:
+ case JS_MAP_VALUE_ITERATOR_TYPE: {
+ Handle<JSMapIterator> object = Handle<JSMapIterator>::cast(
+ isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
+ Handle<Object> properties = materializer.FieldAt(value_index);
+ Handle<Object> elements = materializer.FieldAt(value_index);
+ Handle<Object> table = materializer.FieldAt(value_index);
+ Handle<Object> index = materializer.FieldAt(value_index);
+ object->set_raw_properties_or_hash(FixedArray::cast(*properties));
+ object->set_elements(FixedArrayBase::cast(*elements));
+ object->set_table(*table);
+ object->set_index(*index);
+ return object;
+ }
case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
case JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE:
@@ -4013,7 +3654,7 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
Handle<Object> iterated_object = materializer.FieldAt(value_index);
Handle<Object> next_index = materializer.FieldAt(value_index);
Handle<Object> iterated_object_map = materializer.FieldAt(value_index);
- object->set_properties(FixedArray::cast(*properties));
+ object->set_raw_properties_or_hash(*properties);
object->set_elements(FixedArrayBase::cast(*elements));
object->set_object(*iterated_object);
object->set_index(*next_index);
@@ -4030,12 +3671,12 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
Handle<Object> elements = materializer.FieldAt(value_index);
Handle<Object> iterated_string = materializer.FieldAt(value_index);
Handle<Object> next_index = materializer.FieldAt(value_index);
- object->set_properties(FixedArray::cast(*properties));
+ object->set_raw_properties_or_hash(*properties);
object->set_elements(FixedArrayBase::cast(*elements));
CHECK(iterated_string->IsString());
object->set_string(String::cast(*iterated_string));
CHECK(next_index->IsSmi());
- object->set_index(Smi::cast(*next_index)->value());
+ object->set_index(Smi::ToInt(*next_index));
return object;
}
case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE: {
@@ -4046,7 +3687,7 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
Handle<Object> properties = materializer.FieldAt(value_index);
Handle<Object> elements = materializer.FieldAt(value_index);
Handle<Object> sync_iterator = materializer.FieldAt(value_index);
- object->set_properties(FixedArray::cast(*properties));
+ object->set_raw_properties_or_hash(*properties);
object->set_elements(FixedArrayBase::cast(*elements));
object->set_sync_iterator(JSReceiver::cast(*sync_iterator));
return object;
@@ -4058,11 +3699,58 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
Handle<Object> properties = materializer.FieldAt(value_index);
Handle<Object> elements = materializer.FieldAt(value_index);
Handle<Object> array_length = materializer.FieldAt(value_index);
- object->set_properties(FixedArray::cast(*properties));
+ object->set_raw_properties_or_hash(*properties);
object->set_elements(FixedArrayBase::cast(*elements));
object->set_length(*array_length);
return object;
}
+ case JS_BOUND_FUNCTION_TYPE: {
+ Handle<JSBoundFunction> object = Handle<JSBoundFunction>::cast(
+ isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
+ slot->value_ = object;
+ Handle<Object> properties = materializer.FieldAt(value_index);
+ Handle<Object> elements = materializer.FieldAt(value_index);
+ Handle<Object> bound_target_function = materializer.FieldAt(value_index);
+ Handle<Object> bound_this = materializer.FieldAt(value_index);
+ Handle<Object> bound_arguments = materializer.FieldAt(value_index);
+ object->set_raw_properties_or_hash(*properties);
+ object->set_elements(FixedArrayBase::cast(*elements));
+ object->set_bound_target_function(
+ JSReceiver::cast(*bound_target_function));
+ object->set_bound_this(*bound_this);
+ object->set_bound_arguments(FixedArray::cast(*bound_arguments));
+ return object;
+ }
+ case JS_GENERATOR_OBJECT_TYPE: {
+ Handle<JSGeneratorObject> object = Handle<JSGeneratorObject>::cast(
+ isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
+ slot->value_ = object;
+ Handle<Object> properties = materializer.FieldAt(value_index);
+ Handle<Object> elements = materializer.FieldAt(value_index);
+ Handle<Object> function = materializer.FieldAt(value_index);
+ Handle<Object> context = materializer.FieldAt(value_index);
+ Handle<Object> receiver = materializer.FieldAt(value_index);
+ Handle<Object> input_or_debug_pos = materializer.FieldAt(value_index);
+ Handle<Object> resume_mode = materializer.FieldAt(value_index);
+ Handle<Object> continuation_offset = materializer.FieldAt(value_index);
+ Handle<Object> register_file = materializer.FieldAt(value_index);
+ object->set_raw_properties_or_hash(*properties);
+ object->set_elements(FixedArrayBase::cast(*elements));
+ object->set_function(JSFunction::cast(*function));
+ object->set_context(Context::cast(*context));
+ object->set_receiver(*receiver);
+ object->set_input_or_debug_pos(*input_or_debug_pos);
+ object->set_resume_mode(Smi::ToInt(*resume_mode));
+ object->set_continuation(Smi::ToInt(*continuation_offset));
+ object->set_register_file(FixedArray::cast(*register_file));
+ int in_object_properties = map->GetInObjectProperties();
+ for (int i = 0; i < in_object_properties; ++i) {
+ Handle<Object> value = materializer.FieldAt(value_index);
+ FieldIndex index = FieldIndex::ForPropertyIndex(object->map(), i);
+ object->FastPropertyAtPut(index, *value);
+ }
+ return object;
+ }
case CONS_STRING_TYPE: {
Handle<ConsString> object = Handle<ConsString>::cast(
isolate_->factory()
@@ -4075,7 +3763,7 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
Handle<Object> first = materializer.FieldAt(value_index);
Handle<Object> second = materializer.FieldAt(value_index);
object->set_map(*map);
- object->set_length(Smi::cast(*string_length)->value());
+ object->set_length(Smi::ToInt(*string_length));
object->set_first(String::cast(*first));
object->set_second(String::cast(*second));
CHECK(hash->IsNumber()); // The {Name::kEmptyHashField} value.
@@ -4110,6 +3798,20 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
}
return object;
}
+ case PROPERTY_ARRAY_TYPE: {
+ DCHECK_EQ(*map, isolate_->heap()->property_array_map());
+ Handle<Object> lengthObject = materializer.FieldAt(value_index);
+ int32_t array_length = 0;
+ CHECK(lengthObject->ToInt32(&array_length));
+ Handle<PropertyArray> object =
+ isolate_->factory()->NewPropertyArray(array_length);
+ slot->value_ = object;
+ for (int i = 0; i < array_length; ++i) {
+ Handle<Object> value = materializer.FieldAt(value_index);
+ object->set(i, *value);
+ }
+ return object;
+ }
case FIXED_DOUBLE_ARRAY_TYPE: {
DCHECK_EQ(*map, isolate_->heap()->fixed_double_array_map());
Handle<Object> lengthObject = materializer.FieldAt(value_index);
@@ -4133,6 +3835,24 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
}
return object;
}
+ case JS_REGEXP_TYPE: {
+ Handle<JSRegExp> object = Handle<JSRegExp>::cast(
+ isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
+ slot->value_ = object;
+ Handle<Object> properties = materializer.FieldAt(value_index);
+ Handle<Object> elements = materializer.FieldAt(value_index);
+ Handle<Object> data = materializer.FieldAt(value_index);
+ Handle<Object> source = materializer.FieldAt(value_index);
+ Handle<Object> flags = materializer.FieldAt(value_index);
+ Handle<Object> last_index = materializer.FieldAt(value_index);
+ object->set_raw_properties_or_hash(*properties);
+ object->set_elements(FixedArrayBase::cast(*elements));
+ object->set_data(*data);
+ object->set_source(*source);
+ object->set_flags(*flags);
+ object->set_last_index(*last_index);
+ return object;
+ }
case STRING_TYPE:
case ONE_BYTE_STRING_TYPE:
case CONS_ONE_BYTE_STRING_TYPE:
@@ -4165,22 +3885,17 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
case JS_MESSAGE_OBJECT_TYPE:
case JS_DATE_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
- case JS_GENERATOR_OBJECT_TYPE:
case JS_ASYNC_GENERATOR_OBJECT_TYPE:
case JS_MODULE_NAMESPACE_TYPE:
case JS_ARRAY_BUFFER_TYPE:
- case JS_REGEXP_TYPE:
case JS_TYPED_ARRAY_TYPE:
case JS_DATA_VIEW_TYPE:
case JS_SET_TYPE:
case JS_MAP_TYPE:
- case JS_SET_ITERATOR_TYPE:
- case JS_MAP_ITERATOR_TYPE:
case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE:
case JS_PROMISE_CAPABILITY_TYPE:
case JS_PROMISE_TYPE:
- case JS_BOUND_FUNCTION_TYPE:
case JS_PROXY_TYPE:
case MAP_TYPE:
case ALLOCATION_SITE_TYPE:
@@ -4214,14 +3929,17 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
case STACK_FRAME_INFO_TYPE:
case CELL_TYPE:
case WEAK_CELL_TYPE:
+ case SMALL_ORDERED_HASH_MAP_TYPE:
+ case SMALL_ORDERED_HASH_SET_TYPE:
case PROTOTYPE_INFO_TYPE:
case TUPLE2_TYPE:
case TUPLE3_TYPE:
case ASYNC_GENERATOR_REQUEST_TYPE:
- case PADDING_TYPE_1:
- case PADDING_TYPE_2:
- case PADDING_TYPE_3:
- case PADDING_TYPE_4:
+ case PREPARSED_SCOPE_DATA_TYPE:
+ case WASM_MODULE_TYPE:
+ case WASM_INSTANCE_TYPE:
+ case WASM_MEMORY_TYPE:
+ case WASM_TABLE_TYPE:
OFStream os(stderr);
os << "[couldn't handle instance type " << map->instance_type() << "]"
<< std::endl;
@@ -4229,7 +3947,6 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
break;
}
UNREACHABLE();
- return Handle<Object>::null();
}
Handle<Object> TranslatedState::MaterializeAt(int frame_index,
@@ -4256,29 +3973,6 @@ Handle<Object> TranslatedState::MaterializeAt(int frame_index,
return value;
}
- case TranslatedValue::kArgumentsObject: {
- int length = slot->GetChildrenCount();
- Handle<JSObject> arguments;
- if (GetAdaptedArguments(&arguments, frame_index)) {
- // Store the materialized object and consume the nested values.
- for (int i = 0; i < length; ++i) {
- MaterializeAt(frame_index, value_index);
- }
- } else {
- Handle<JSFunction> function =
- Handle<JSFunction>::cast(frame->front().GetValue());
- arguments = isolate_->factory()->NewArgumentsObject(function, length);
- Handle<FixedArray> array = isolate_->factory()->NewFixedArray(length);
- DCHECK_EQ(array->length(), length);
- arguments->set_elements(*array);
- for (int i = 0; i < length; ++i) {
- Handle<Object> value = MaterializeAt(frame_index, value_index);
- array->set(i, *value);
- }
- }
- slot->value_ = arguments;
- return arguments;
- }
case TranslatedValue::kCapturedObject: {
// The map must be a tagged object.
CHECK(frame->values_[*value_index].kind() == TranslatedValue::kTagged);
@@ -4320,50 +4014,10 @@ Handle<Object> TranslatedState::MaterializeObjectAt(int object_index) {
return MaterializeAt(pos.frame_index_, &(pos.value_index_));
}
-bool TranslatedState::GetAdaptedArguments(Handle<JSObject>* result,
- int frame_index) {
- if (frame_index == 0) {
- // Top level frame -> we need to go to the parent frame on the stack.
- if (!has_adapted_arguments_) return false;
-
- // This is top level frame, so we need to go to the stack to get
- // this function's argument. (Note that this relies on not inlining
- // recursive functions!)
- Handle<JSFunction> function =
- Handle<JSFunction>::cast(frames_[frame_index].front().GetValue());
- *result = Accessors::FunctionGetArguments(function);
- return true;
- } else {
- TranslatedFrame* previous_frame = &(frames_[frame_index]);
- if (previous_frame->kind() != TranslatedFrame::kArgumentsAdaptor) {
- return false;
- }
- // We get the adapted arguments from the parent translation.
- int length = previous_frame->height();
- Handle<JSFunction> function =
- Handle<JSFunction>::cast(previous_frame->front().GetValue());
- Handle<JSObject> arguments =
- isolate_->factory()->NewArgumentsObject(function, length);
- Handle<FixedArray> array = isolate_->factory()->NewFixedArray(length);
- arguments->set_elements(*array);
- TranslatedFrame::iterator arg_iterator = previous_frame->begin();
- arg_iterator++; // Skip function.
- for (int i = 0; i < length; ++i) {
- Handle<Object> value = arg_iterator->GetValue();
- array->set(i, *value);
- arg_iterator++;
- }
- CHECK(arg_iterator == previous_frame->end());
- *result = arguments;
- return true;
- }
-}
-
TranslatedFrame* TranslatedState::GetArgumentsInfoFromJSFrameIndex(
int jsframe_index, int* args_count) {
for (size_t i = 0; i < frames_.size(); i++) {
- if (frames_[i].kind() == TranslatedFrame::kFunction ||
- frames_[i].kind() == TranslatedFrame::kInterpretedFunction) {
+ if (frames_[i].kind() == TranslatedFrame::kInterpretedFunction) {
if (jsframe_index > 0) {
jsframe_index--;
} else {
@@ -4426,9 +4080,7 @@ void TranslatedState::StoreMaterializedValuesAndDeopt(JavaScriptFrame* frame) {
if (new_store && value_changed) {
materialized_store->Set(stack_frame_pointer_,
previously_materialized_objects);
- CHECK(frames_[0].kind() == TranslatedFrame::kFunction ||
- frames_[0].kind() == TranslatedFrame::kInterpretedFunction ||
- frames_[0].kind() == TranslatedFrame::kTailCallerFunction);
+ CHECK(frames_[0].kind() == TranslatedFrame::kInterpretedFunction);
CHECK_EQ(frame->function(), frames_[0].front().GetRawValue());
Deoptimizer::DeoptimizeFunction(frame->function(), frame->LookupCode());
}
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index 16c5abeb86..857716b188 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -7,6 +7,7 @@
#include "src/allocation.h"
#include "src/deoptimize-reason.h"
+#include "src/float.h"
#include "src/macro-assembler.h"
#include "src/source-position.h"
#include "src/zone/zone-chunk-list.h"
@@ -20,37 +21,6 @@ class DeoptimizedFrameInfo;
class TranslatedState;
class RegisterValues;
-// Safety wrapper for a 32-bit floating-point value to make sure we don't loose
-// the exact bit pattern during deoptimization when passing this value. Note
-// that there is intentionally no way to construct it from a {float} value.
-class Float32 {
- public:
- Float32() : bit_pattern_(0) {}
- uint32_t get_bits() const { return bit_pattern_; }
- float get_scalar() const { return bit_cast<float>(bit_pattern_); }
- static Float32 FromBits(uint32_t bits) { return Float32(bits); }
-
- private:
- explicit Float32(uint32_t bit_pattern) : bit_pattern_(bit_pattern) {}
- uint32_t bit_pattern_;
-};
-
-// Safety wrapper for a 64-bit floating-point value to make sure we don't loose
-// the exact bit pattern during deoptimization when passing this value. Note
-// that there is intentionally no way to construct it from a {double} value.
-class Float64 {
- public:
- Float64() : bit_pattern_(0) {}
- uint64_t get_bits() const { return bit_pattern_; }
- double get_scalar() const { return bit_cast<double>(bit_pattern_); }
- bool is_hole_nan() const { return bit_pattern_ == kHoleNanInt64; }
- static Float64 FromBits(uint64_t bits) { return Float64(bits); }
-
- private:
- explicit Float64(uint64_t bit_pattern) : bit_pattern_(bit_pattern) {}
- uint64_t bit_pattern_;
-};
-
class TranslatedValue {
public:
// Allocation-less getter of the value.
@@ -74,14 +44,12 @@ class TranslatedValue {
kBoolBit,
kFloat,
kDouble,
- kCapturedObject, // Object captured by the escape analysis.
- // The number of nested objects can be obtained
- // with the DeferredObjectLength() method
- // (the values of the nested objects follow
- // this value in the depth-first order.)
- kDuplicatedObject, // Duplicated object of a deferred object.
- kArgumentsObject // Arguments object - only used to keep indexing
- // in sync, it should not be materialized.
+ kCapturedObject, // Object captured by the escape analysis.
+ // The number of nested objects can be obtained
+ // with the DeferredObjectLength() method
+ // (the values of the nested objects follow
+ // this value in the depth-first order.)
+ kDuplicatedObject // Duplicated object of a deferred object.
};
TranslatedValue(TranslatedState* container, Kind kind)
@@ -90,8 +58,6 @@ class TranslatedValue {
void Handlify();
int GetChildrenCount() const;
- static TranslatedValue NewArgumentsObject(TranslatedState* container,
- int length, int object_index);
static TranslatedValue NewDeferredObject(TranslatedState* container,
int length, int object_index);
static TranslatedValue NewDuplicateObject(TranslatedState* container, int id);
@@ -118,7 +84,7 @@ class TranslatedValue {
struct MaterializedObjectInfo {
int id_;
- int length_; // Applies only to kArgumentsObject or kCapturedObject kinds.
+ int length_; // Applies only to kCapturedObject kinds.
};
union {
@@ -132,8 +98,7 @@ class TranslatedValue {
Float32 float_value_;
// kind is kDouble
Float64 double_value_;
- // kind is kDuplicatedObject or kArgumentsObject or
- // kCapturedObject.
+ // kind is kDuplicatedObject or kCapturedObject.
MaterializedObjectInfo materialization_info_;
};
@@ -151,14 +116,13 @@ class TranslatedValue {
class TranslatedFrame {
public:
enum Kind {
- kFunction,
kInterpretedFunction,
kGetter,
kSetter,
- kTailCallerFunction,
kArgumentsAdaptor,
kConstructStub,
- kCompiledStub,
+ kBuiltinContinuation,
+ kJavaScriptBuiltinContinuation,
kInvalid
};
@@ -217,8 +181,6 @@ class TranslatedFrame {
friend class TranslatedState;
// Constructor static methods.
- static TranslatedFrame JSFrame(BailoutId node_id,
- SharedFunctionInfo* shared_info, int height);
static TranslatedFrame InterpretedFrame(BailoutId bytecode_offset,
SharedFunctionInfo* shared_info,
int height);
@@ -226,13 +188,13 @@ class TranslatedFrame {
SharedFunctionInfo* shared_info);
static TranslatedFrame ArgumentsAdaptorFrame(SharedFunctionInfo* shared_info,
int height);
- static TranslatedFrame TailCallerFrame(SharedFunctionInfo* shared_info);
static TranslatedFrame ConstructStubFrame(BailoutId bailout_id,
SharedFunctionInfo* shared_info,
int height);
- static TranslatedFrame CompiledStubFrame(int height, Isolate* isolate) {
- return TranslatedFrame(kCompiledStub, isolate, nullptr, height);
- }
+ static TranslatedFrame BuiltinContinuationFrame(
+ BailoutId bailout_id, SharedFunctionInfo* shared_info, int height);
+ static TranslatedFrame JavaScriptBuiltinContinuationFrame(
+ BailoutId bailout_id, SharedFunctionInfo* shared_info, int height);
static TranslatedFrame InvalidFrame() {
return TranslatedFrame(kInvalid, nullptr);
}
@@ -330,7 +292,6 @@ class TranslatedState {
class CapturedObjectMaterializer;
Handle<Object> MaterializeCapturedObjectAt(TranslatedValue* slot,
int frame_index, int* value_index);
- bool GetAdaptedArguments(Handle<JSObject>* result, int frame_index);
static uint32_t GetUInt32Slot(Address fp, int slot_index);
static Float32 GetFloatSlot(Address fp, int slot_index);
@@ -339,7 +300,6 @@ class TranslatedState {
std::vector<TranslatedFrame> frames_;
Isolate* isolate_;
Address stack_frame_pointer_;
- bool has_adapted_arguments_;
int formal_parameter_count_;
struct ObjectPosition {
@@ -373,7 +333,6 @@ class Deoptimizer : public Malloced {
return "TOS_REGISTER";
}
UNREACHABLE();
- return nullptr;
}
struct DeoptInfo {
@@ -390,8 +349,6 @@ class Deoptimizer : public Malloced {
static DeoptInfo GetDeoptInfo(Code* code, byte* from);
- static int ComputeSourcePositionFromBaselineCode(SharedFunctionInfo* shared,
- BailoutId node_id);
static int ComputeSourcePositionFromBytecodeArray(SharedFunctionInfo* shared,
BailoutId node_id);
@@ -493,9 +450,6 @@ class Deoptimizer : public Malloced {
static int GetDeoptimizationId(Isolate* isolate,
Address addr,
BailoutType type);
- static int GetOutputInfo(DeoptimizationOutputData* data,
- BailoutId node_id,
- SharedFunctionInfo* shared);
// Code generation support.
static int input_offset() { return OFFSET_OF(Deoptimizer, input_); }
@@ -555,20 +509,16 @@ class Deoptimizer : public Malloced {
void DeleteFrameDescriptions();
void DoComputeOutputFrames();
- void DoComputeJSFrame(TranslatedFrame* translated_frame, int frame_index,
- bool goto_catch_handler);
void DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
int frame_index, bool goto_catch_handler);
void DoComputeArgumentsAdaptorFrame(TranslatedFrame* translated_frame,
int frame_index);
- void DoComputeTailCallerFrame(TranslatedFrame* translated_frame,
- int frame_index);
void DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
int frame_index);
void DoComputeAccessorStubFrame(TranslatedFrame* translated_frame,
int frame_index, bool is_setter_stub_frame);
- void DoComputeCompiledStubFrame(TranslatedFrame* translated_frame,
- int frame_index);
+ void DoComputeBuiltinContinuation(TranslatedFrame* translated_frame,
+ int frame_index, bool java_script_frame);
void WriteTranslatedValueToOutput(
TranslatedFrame::iterator* iterator, int* input_index, int frame_index,
@@ -610,15 +560,6 @@ class Deoptimizer : public Malloced {
// searching all code objects).
Code* FindDeoptimizingCode(Address addr);
- // Fill the given output frame's registers to contain the failure handler
- // address and the number of parameters for a stub failure trampoline.
- void SetPlatformCompiledStubRegisters(FrameDescription* output_frame,
- CodeStubDescriptor* desc);
-
- // Fill the given output frame's double registers with the original values
- // from the input frame's double registers.
- void CopyDoubleRegisters(FrameDescription* output_frame);
-
Isolate* isolate_;
JSFunction* function_;
Code* compiled_code_;
@@ -922,33 +863,31 @@ class TranslationIterator BASE_EMBEDDED {
int index_;
};
-#define TRANSLATION_OPCODE_LIST(V) \
- V(BEGIN) \
- V(JS_FRAME) \
- V(INTERPRETED_FRAME) \
- V(CONSTRUCT_STUB_FRAME) \
- V(GETTER_STUB_FRAME) \
- V(SETTER_STUB_FRAME) \
- V(ARGUMENTS_ADAPTOR_FRAME) \
- V(TAIL_CALLER_FRAME) \
- V(COMPILED_STUB_FRAME) \
- V(DUPLICATED_OBJECT) \
- V(ARGUMENTS_OBJECT) \
- V(ARGUMENTS_ELEMENTS) \
- V(ARGUMENTS_LENGTH) \
- V(CAPTURED_OBJECT) \
- V(REGISTER) \
- V(INT32_REGISTER) \
- V(UINT32_REGISTER) \
- V(BOOL_REGISTER) \
- V(FLOAT_REGISTER) \
- V(DOUBLE_REGISTER) \
- V(STACK_SLOT) \
- V(INT32_STACK_SLOT) \
- V(UINT32_STACK_SLOT) \
- V(BOOL_STACK_SLOT) \
- V(FLOAT_STACK_SLOT) \
- V(DOUBLE_STACK_SLOT) \
+#define TRANSLATION_OPCODE_LIST(V) \
+ V(BEGIN) \
+ V(INTERPRETED_FRAME) \
+ V(BUILTIN_CONTINUATION_FRAME) \
+ V(JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME) \
+ V(CONSTRUCT_STUB_FRAME) \
+ V(GETTER_STUB_FRAME) \
+ V(SETTER_STUB_FRAME) \
+ V(ARGUMENTS_ADAPTOR_FRAME) \
+ V(DUPLICATED_OBJECT) \
+ V(ARGUMENTS_ELEMENTS) \
+ V(ARGUMENTS_LENGTH) \
+ V(CAPTURED_OBJECT) \
+ V(REGISTER) \
+ V(INT32_REGISTER) \
+ V(UINT32_REGISTER) \
+ V(BOOL_REGISTER) \
+ V(FLOAT_REGISTER) \
+ V(DOUBLE_REGISTER) \
+ V(STACK_SLOT) \
+ V(INT32_STACK_SLOT) \
+ V(UINT32_STACK_SLOT) \
+ V(BOOL_STACK_SLOT) \
+ V(FLOAT_STACK_SLOT) \
+ V(DOUBLE_STACK_SLOT) \
V(LITERAL)
class Translation BASE_EMBEDDED {
@@ -973,17 +912,17 @@ class Translation BASE_EMBEDDED {
int index() const { return index_; }
// Commands.
- void BeginJSFrame(BailoutId node_id, int literal_id, unsigned height);
void BeginInterpretedFrame(BailoutId bytecode_offset, int literal_id,
unsigned height);
- void BeginCompiledStubFrame(int height);
void BeginArgumentsAdaptorFrame(int literal_id, unsigned height);
- void BeginTailCallerFrame(int literal_id);
void BeginConstructStubFrame(BailoutId bailout_id, int literal_id,
unsigned height);
+ void BeginBuiltinContinuationFrame(BailoutId bailout_id, int literal_id,
+ unsigned height);
+ void BeginJavaScriptBuiltinContinuationFrame(BailoutId bailout_id,
+ int literal_id, unsigned height);
void BeginGetterStubFrame(int literal_id);
void BeginSetterStubFrame(int literal_id);
- void BeginArgumentsObject(int args_length);
void ArgumentsElements(bool is_rest);
void ArgumentsLength(bool is_rest);
void BeginCapturedObject(int length);
@@ -1001,7 +940,6 @@ class Translation BASE_EMBEDDED {
void StoreFloatStackSlot(int index);
void StoreDoubleStackSlot(int index);
void StoreLiteral(int literal_id);
- void StoreArgumentsObject(bool args_known, int args_index, int args_length);
void StoreJSFrameFunction();
Zone* zone() const { return zone_; }
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index 6c0542ec90..b9d8c2f20c 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -74,6 +74,92 @@ static void DumpBuffer(std::ostream* os, StringBuilder* out) {
static const int kOutBufferSize = 2048 + String::kMaxShortPrintLength;
static const int kRelocInfoPosition = 57;
+static void PrintRelocInfo(StringBuilder* out, Isolate* isolate,
+ const ExternalReferenceEncoder& ref_encoder,
+ std::ostream* os, RelocInfo* relocinfo,
+ bool first_reloc_info = true) {
+ // Indent the printing of the reloc info.
+ if (first_reloc_info) {
+ // The first reloc info is printed after the disassembled instruction.
+ out->AddPadding(' ', kRelocInfoPosition - out->position());
+ } else {
+ // Additional reloc infos are printed on separate lines.
+ DumpBuffer(os, out);
+ out->AddPadding(' ', kRelocInfoPosition);
+ }
+
+ RelocInfo::Mode rmode = relocinfo->rmode();
+ if (rmode == RelocInfo::DEOPT_SCRIPT_OFFSET) {
+ out->AddFormatted(" ;; debug: deopt position, script offset '%d'",
+ static_cast<int>(relocinfo->data()));
+ } else if (rmode == RelocInfo::DEOPT_INLINING_ID) {
+ out->AddFormatted(" ;; debug: deopt position, inlining id '%d'",
+ static_cast<int>(relocinfo->data()));
+ } else if (rmode == RelocInfo::DEOPT_REASON) {
+ DeoptimizeReason reason = static_cast<DeoptimizeReason>(relocinfo->data());
+ out->AddFormatted(" ;; debug: deopt reason '%s'",
+ DeoptimizeReasonToString(reason));
+ } else if (rmode == RelocInfo::DEOPT_ID) {
+ out->AddFormatted(" ;; debug: deopt index %d",
+ static_cast<int>(relocinfo->data()));
+ } else if (rmode == RelocInfo::EMBEDDED_OBJECT) {
+ HeapStringAllocator allocator;
+ StringStream accumulator(&allocator);
+ relocinfo->target_object()->ShortPrint(&accumulator);
+ std::unique_ptr<char[]> obj_name = accumulator.ToCString();
+ out->AddFormatted(" ;; object: %s", obj_name.get());
+ } else if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+ const char* reference_name = ref_encoder.NameOfAddress(
+ isolate, relocinfo->target_external_reference());
+ out->AddFormatted(" ;; external reference (%s)", reference_name);
+ } else if (RelocInfo::IsCodeTarget(rmode)) {
+ out->AddFormatted(" ;; code:");
+ Code* code = Code::GetCodeFromTargetAddress(relocinfo->target_address());
+ Code::Kind kind = code->kind();
+ if (code->is_inline_cache_stub()) {
+ out->AddFormatted(" %s", Code::Kind2String(kind));
+ if (kind == Code::COMPARE_IC) {
+ InlineCacheState ic_state = IC::StateFromCode(code);
+ out->AddFormatted(" %s", Code::ICState2String(ic_state));
+ }
+ } else if (kind == Code::STUB || kind == Code::HANDLER) {
+ // Get the STUB key and extract major and minor key.
+ uint32_t key = code->stub_key();
+ uint32_t minor_key = CodeStub::MinorKeyFromKey(key);
+ CodeStub::Major major_key = CodeStub::GetMajorKey(code);
+ DCHECK(major_key == CodeStub::MajorKeyFromKey(key));
+ out->AddFormatted(" %s, %s, ", Code::Kind2String(kind),
+ CodeStub::MajorName(major_key));
+ out->AddFormatted("minor: %d", minor_key);
+ } else {
+ out->AddFormatted(" %s", Code::Kind2String(kind));
+ }
+ } else if (RelocInfo::IsRuntimeEntry(rmode) &&
+ isolate->deoptimizer_data() != nullptr) {
+ // A runtime entry reloinfo might be a deoptimization bailout->
+ Address addr = relocinfo->target_address();
+ int id =
+ Deoptimizer::GetDeoptimizationId(isolate, addr, Deoptimizer::EAGER);
+ if (id == Deoptimizer::kNotDeoptimizationEntry) {
+ id = Deoptimizer::GetDeoptimizationId(isolate, addr, Deoptimizer::LAZY);
+ if (id == Deoptimizer::kNotDeoptimizationEntry) {
+ id = Deoptimizer::GetDeoptimizationId(isolate, addr, Deoptimizer::SOFT);
+ if (id == Deoptimizer::kNotDeoptimizationEntry) {
+ out->AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode));
+ } else {
+ out->AddFormatted(" ;; soft deoptimization bailout %d", id);
+ }
+ } else {
+ out->AddFormatted(" ;; lazy deoptimization bailout %d", id);
+ }
+ } else {
+ out->AddFormatted(" ;; deoptimization bailout %d", id);
+ }
+ } else {
+ out->AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode));
+ }
+}
+
static int DecodeIt(Isolate* isolate, std::ostream* os,
const V8NameConverter& converter, byte* begin, byte* end) {
SealHandleScope shs(isolate);
@@ -162,97 +248,32 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
// Put together the reloc info
RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], converter.code());
- // Indent the printing of the reloc info.
- if (i == 0) {
- // The first reloc info is printed after the disassembled instruction.
- out.AddPadding(' ', kRelocInfoPosition - out.position());
- } else {
- // Additional reloc infos are printed on separate lines.
- DumpBuffer(os, &out);
- out.AddPadding(' ', kRelocInfoPosition);
- }
+ bool first_reloc_info = (i == 0);
+ PrintRelocInfo(&out, isolate, ref_encoder, os, &relocinfo,
+ first_reloc_info);
+ }
- RelocInfo::Mode rmode = relocinfo.rmode();
- if (rmode == RelocInfo::DEOPT_SCRIPT_OFFSET) {
- out.AddFormatted(" ;; debug: deopt position, script offset '%d'",
- static_cast<int>(relocinfo.data()));
- } else if (rmode == RelocInfo::DEOPT_INLINING_ID) {
- out.AddFormatted(" ;; debug: deopt position, inlining id '%d'",
- static_cast<int>(relocinfo.data()));
- } else if (rmode == RelocInfo::DEOPT_REASON) {
- DeoptimizeReason reason =
- static_cast<DeoptimizeReason>(relocinfo.data());
- out.AddFormatted(" ;; debug: deopt reason '%s'",
- DeoptimizeReasonToString(reason));
- } else if (rmode == RelocInfo::DEOPT_ID) {
- out.AddFormatted(" ;; debug: deopt index %d",
- static_cast<int>(relocinfo.data()));
- } else if (rmode == RelocInfo::EMBEDDED_OBJECT) {
- HeapStringAllocator allocator;
- StringStream accumulator(&allocator);
- relocinfo.target_object()->ShortPrint(&accumulator);
- std::unique_ptr<char[]> obj_name = accumulator.ToCString();
- out.AddFormatted(" ;; object: %s", obj_name.get());
- } else if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
- const char* reference_name = ref_encoder.NameOfAddress(
- isolate, relocinfo.target_external_reference());
- out.AddFormatted(" ;; external reference (%s)", reference_name);
- } else if (RelocInfo::IsCodeTarget(rmode)) {
- out.AddFormatted(" ;; code:");
- Code* code = Code::GetCodeFromTargetAddress(relocinfo.target_address());
- Code::Kind kind = code->kind();
- if (code->is_inline_cache_stub()) {
- out.AddFormatted(" %s", Code::Kind2String(kind));
- if (kind == Code::BINARY_OP_IC || kind == Code::TO_BOOLEAN_IC ||
- kind == Code::COMPARE_IC) {
- InlineCacheState ic_state = IC::StateFromCode(code);
- out.AddFormatted(" %s", Code::ICState2String(ic_state));
- }
- } else if (kind == Code::STUB || kind == Code::HANDLER) {
- // Get the STUB key and extract major and minor key.
- uint32_t key = code->stub_key();
- uint32_t minor_key = CodeStub::MinorKeyFromKey(key);
- CodeStub::Major major_key = CodeStub::GetMajorKey(code);
- DCHECK(major_key == CodeStub::MajorKeyFromKey(key));
- out.AddFormatted(" %s, %s, ", Code::Kind2String(kind),
- CodeStub::MajorName(major_key));
- out.AddFormatted("minor: %d", minor_key);
- } else {
- out.AddFormatted(" %s", Code::Kind2String(kind));
- }
- if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- out.AddFormatted(" (id = %d)", static_cast<int>(relocinfo.data()));
- }
- } else if (RelocInfo::IsRuntimeEntry(rmode) &&
- isolate->deoptimizer_data() != NULL) {
- // A runtime entry reloinfo might be a deoptimization bailout.
- Address addr = relocinfo.target_address();
- int id = Deoptimizer::GetDeoptimizationId(isolate,
- addr,
- Deoptimizer::EAGER);
- if (id == Deoptimizer::kNotDeoptimizationEntry) {
- id = Deoptimizer::GetDeoptimizationId(isolate,
- addr,
- Deoptimizer::LAZY);
- if (id == Deoptimizer::kNotDeoptimizationEntry) {
- id = Deoptimizer::GetDeoptimizationId(isolate,
- addr,
- Deoptimizer::SOFT);
- if (id == Deoptimizer::kNotDeoptimizationEntry) {
- out.AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode));
- } else {
- out.AddFormatted(" ;; soft deoptimization bailout %d", id);
- }
- } else {
- out.AddFormatted(" ;; lazy deoptimization bailout %d", id);
+ // If this is a constant pool load and we haven't found any RelocInfo
+ // already, check if we can find some RelocInfo for the target address in
+ // the constant pool.
+ if (pcs.is_empty() && converter.code() != nullptr) {
+ RelocInfo dummy_rinfo(prev_pc, RelocInfo::NONE32, 0, nullptr);
+ if (dummy_rinfo.IsInConstantPool()) {
+ byte* constant_pool_entry_address =
+ dummy_rinfo.constant_pool_entry_address();
+ RelocIterator reloc_it(converter.code());
+ while (!reloc_it.done()) {
+ if (reloc_it.rinfo()->IsInConstantPool() &&
+ (reloc_it.rinfo()->constant_pool_entry_address() ==
+ constant_pool_entry_address)) {
+ PrintRelocInfo(&out, isolate, ref_encoder, os, reloc_it.rinfo());
+ break;
}
- } else {
- out.AddFormatted(" ;; deoptimization bailout %d", id);
+ reloc_it.next();
}
- } else {
- out.AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode));
}
}
+
DumpBuffer(os, &out);
}
diff --git a/deps/v8/src/double.h b/deps/v8/src/double.h
index f21bd748f9..8a59a72484 100644
--- a/deps/v8/src/double.h
+++ b/deps/v8/src/double.h
@@ -174,7 +174,9 @@ class Double {
static const int kMaxExponent = 0x7FF - kExponentBias;
static const uint64_t kInfinity = V8_2PART_UINT64_C(0x7FF00000, 00000000);
- const uint64_t d64_;
+ // The field d64_ is not marked as const to permit the usage of the copy
+ // constructor.
+ uint64_t d64_;
static uint64_t DiyFpToUint64(DiyFp diy_fp) {
uint64_t significand = diy_fp.f();
diff --git a/deps/v8/src/dtoa.cc b/deps/v8/src/dtoa.cc
index 76993cf650..7d5f4258ef 100644
--- a/deps/v8/src/dtoa.cc
+++ b/deps/v8/src/dtoa.cc
@@ -24,7 +24,6 @@ static BignumDtoaMode DtoaToBignumDtoaMode(DtoaMode dtoa_mode) {
case DTOA_PRECISION: return BIGNUM_DTOA_PRECISION;
default:
UNREACHABLE();
- return BIGNUM_DTOA_SHORTEST; // To silence compiler.
}
}
diff --git a/deps/v8/src/effects.h b/deps/v8/src/effects.h
deleted file mode 100644
index f8b1bd9b2f..0000000000
--- a/deps/v8/src/effects.h
+++ /dev/null
@@ -1,335 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_EFFECTS_H_
-#define V8_EFFECTS_H_
-
-#include "src/ast/ast-types.h"
-
-namespace v8 {
-namespace internal {
-
-
-// A simple struct to represent (write) effects. A write is represented as a
-// modification of type bounds (e.g. of a variable).
-//
-// An effect can either be definite, if the write is known to have taken place,
-// or 'possible', if it was optional. The difference is relevant when composing
-// effects.
-//
-// There are two ways to compose effects: sequentially (they happen one after
-// the other) or alternatively (either one or the other happens). A definite
-// effect cancels out any previous effect upon sequencing. A possible effect
-// merges into a previous effect, i.e., type bounds are merged. Alternative
-// composition always merges bounds. It yields a possible effect if at least
-// one was only possible.
-struct Effect {
- enum Modality { POSSIBLE, DEFINITE };
-
- Modality modality;
- AstBounds bounds;
-
- Effect() : modality(DEFINITE) {}
- explicit Effect(AstBounds b, Modality m = DEFINITE)
- : modality(m), bounds(b) {}
-
- // The unknown effect.
- static Effect Unknown(Zone* zone) {
- return Effect(AstBounds::Unbounded(), POSSIBLE);
- }
-
- static Effect Forget(Zone* zone) {
- return Effect(AstBounds::Unbounded(), DEFINITE);
- }
-
- // Sequential composition, as in 'e1; e2'.
- static Effect Seq(Effect e1, Effect e2, Zone* zone) {
- if (e2.modality == DEFINITE) return e2;
- return Effect(AstBounds::Either(e1.bounds, e2.bounds, zone), e1.modality);
- }
-
- // Alternative composition, as in 'cond ? e1 : e2'.
- static Effect Alt(Effect e1, Effect e2, Zone* zone) {
- return Effect(AstBounds::Either(e1.bounds, e2.bounds, zone),
- e1.modality == POSSIBLE ? POSSIBLE : e2.modality);
- }
-};
-
-
-// Classes encapsulating sets of effects on variables.
-//
-// Effects maps variables to effects and supports sequential and alternative
-// composition.
-//
-// NestedEffects is an incremental representation that supports persistence
-// through functional extension. It represents the map as an adjoin of a list
-// of maps, whose tail can be shared.
-//
-// Both classes provide similar interfaces, implemented in parts through the
-// EffectsMixin below (using sandwich style, to work around the style guide's
-// MI restriction).
-//
-// We also (ab)use Effects/NestedEffects as a representation for abstract
-// store typings. In that case, only definite effects are of interest.
-
-template<class Var, class Base, class Effects>
-class EffectsMixin: public Base {
- public:
- explicit EffectsMixin(Zone* zone) : Base(zone) {}
-
- Effect Lookup(Var var) {
- Locator locator;
- return this->Find(var, &locator)
- ? locator.value() : Effect::Unknown(Base::zone());
- }
-
- AstBounds LookupBounds(Var var) {
- Effect effect = Lookup(var);
- return effect.modality == Effect::DEFINITE ? effect.bounds
- : AstBounds::Unbounded();
- }
-
- // Sequential composition.
- void Seq(Var var, Effect effect) {
- Locator locator;
- if (!this->Insert(var, &locator)) {
- effect = Effect::Seq(locator.value(), effect, Base::zone());
- }
- locator.set_value(effect);
- }
-
- void Seq(Effects that) {
- SeqMerger<EffectsMixin> merge = { *this };
- that.ForEach(&merge);
- }
-
- // Alternative composition.
- void Alt(Var var, Effect effect) {
- Locator locator;
- if (!this->Insert(var, &locator)) {
- effect = Effect::Alt(locator.value(), effect, Base::zone());
- }
- locator.set_value(effect);
- }
-
- void Alt(Effects that) {
- AltWeakener<EffectsMixin> weaken = { *this, that };
- this->ForEach(&weaken);
- AltMerger<EffectsMixin> merge = { *this };
- that.ForEach(&merge);
- }
-
- // Invalidation.
- void Forget() {
- Overrider override = {
- Effect::Forget(Base::zone()), Effects(Base::zone()) };
- this->ForEach(&override);
- Seq(override.effects);
- }
-
- protected:
- typedef typename Base::Locator Locator;
-
- template<class Self>
- struct SeqMerger {
- void Call(Var var, Effect effect) { self.Seq(var, effect); }
- Self self;
- };
-
- template<class Self>
- struct AltMerger {
- void Call(Var var, Effect effect) { self.Alt(var, effect); }
- Self self;
- };
-
- template<class Self>
- struct AltWeakener {
- void Call(Var var, Effect effect) {
- if (effect.modality == Effect::DEFINITE && !other.Contains(var)) {
- effect.modality = Effect::POSSIBLE;
- Locator locator;
- self.Insert(var, &locator);
- locator.set_value(effect);
- }
- }
- Self self;
- Effects other;
- };
-
- struct Overrider {
- void Call(Var var, Effect effect) { effects.Seq(var, new_effect); }
- Effect new_effect;
- Effects effects;
- };
-};
-
-
-template<class Var, Var kNoVar> class Effects;
-template<class Var, Var kNoVar> class NestedEffectsBase;
-
-template<class Var, Var kNoVar>
-class EffectsBase {
- public:
- explicit EffectsBase(Zone* zone) : map_(new(zone) Mapping(zone)) {}
-
- bool IsEmpty() { return map_->is_empty(); }
-
- protected:
- friend class NestedEffectsBase<Var, kNoVar>;
- friend class
- EffectsMixin<Var, NestedEffectsBase<Var, kNoVar>, Effects<Var, kNoVar> >;
-
- Zone* zone() { return map_->allocator().zone(); }
-
- struct SplayTreeConfig {
- typedef Var Key;
- typedef Effect Value;
- static const Var kNoKey = kNoVar;
- static Effect NoValue() { return Effect(); }
- static int Compare(int x, int y) { return y - x; }
- };
- typedef ZoneSplayTree<SplayTreeConfig> Mapping;
- typedef typename Mapping::Locator Locator;
-
- bool Contains(Var var) {
- DCHECK(var != kNoVar);
- return map_->Contains(var);
- }
- bool Find(Var var, Locator* locator) {
- DCHECK(var != kNoVar);
- return map_->Find(var, locator);
- }
- bool Insert(Var var, Locator* locator) {
- DCHECK(var != kNoVar);
- return map_->Insert(var, locator);
- }
-
- template<class Callback>
- void ForEach(Callback* callback) {
- return map_->ForEach(callback);
- }
-
- private:
- Mapping* map_;
-};
-
-template<class Var, Var kNoVar>
-const Var EffectsBase<Var, kNoVar>::SplayTreeConfig::kNoKey;
-
-template<class Var, Var kNoVar>
-class Effects: public
- EffectsMixin<Var, EffectsBase<Var, kNoVar>, Effects<Var, kNoVar> > {
- public:
- explicit Effects(Zone* zone)
- : EffectsMixin<Var, EffectsBase<Var, kNoVar>, Effects<Var, kNoVar> >(zone)
- {}
-};
-
-
-template<class Var, Var kNoVar>
-class NestedEffectsBase {
- public:
- explicit NestedEffectsBase(Zone* zone) : node_(new(zone) Node(zone)) {}
-
- template<class Callback>
- void ForEach(Callback* callback) {
- if (node_->previous) NestedEffectsBase(node_->previous).ForEach(callback);
- node_->effects.ForEach(callback);
- }
-
- Effects<Var, kNoVar> Top() { return node_->effects; }
-
- bool IsEmpty() {
- for (Node* node = node_; node != NULL; node = node->previous) {
- if (!node->effects.IsEmpty()) return false;
- }
- return true;
- }
-
- protected:
- typedef typename EffectsBase<Var, kNoVar>::Locator Locator;
-
- Zone* zone() { return node_->zone; }
-
- void push() { node_ = new(node_->zone) Node(node_->zone, node_); }
- void pop() { node_ = node_->previous; }
- bool is_empty() { return node_ == NULL; }
-
- bool Contains(Var var) {
- DCHECK(var != kNoVar);
- for (Node* node = node_; node != NULL; node = node->previous) {
- if (node->effects.Contains(var)) return true;
- }
- return false;
- }
-
- bool Find(Var var, Locator* locator) {
- DCHECK(var != kNoVar);
- for (Node* node = node_; node != NULL; node = node->previous) {
- if (node->effects.Find(var, locator)) return true;
- }
- return false;
- }
-
- bool Insert(Var var, Locator* locator);
-
- private:
- struct Node: ZoneObject {
- Zone* zone;
- Effects<Var, kNoVar> effects;
- Node* previous;
- explicit Node(Zone* zone, Node* previous = NULL)
- : zone(zone), effects(zone), previous(previous) {}
- };
-
- explicit NestedEffectsBase(Node* node) : node_(node) {}
-
- Node* node_;
-};
-
-
-template<class Var, Var kNoVar>
-bool NestedEffectsBase<Var, kNoVar>::Insert(Var var, Locator* locator) {
- DCHECK(var != kNoVar);
- if (!node_->effects.Insert(var, locator)) return false;
- Locator shadowed;
- for (Node* node = node_->previous; node != NULL; node = node->previous) {
- if (node->effects.Find(var, &shadowed)) {
- // Initialize with shadowed entry.
- locator->set_value(shadowed.value());
- return false;
- }
- }
- return true;
-}
-
-
-template<class Var, Var kNoVar>
-class NestedEffects: public
- EffectsMixin<Var, NestedEffectsBase<Var, kNoVar>, Effects<Var, kNoVar> > {
- public:
- explicit NestedEffects(Zone* zone) :
- EffectsMixin<Var, NestedEffectsBase<Var, kNoVar>, Effects<Var, kNoVar> >(
- zone) {}
-
- // Create an extension of the current effect set. The current set should not
- // be modified while the extension is in use.
- NestedEffects Push() {
- NestedEffects result = *this;
- result.push();
- return result;
- }
-
- NestedEffects Pop() {
- NestedEffects result = *this;
- result.pop();
- DCHECK(!this->is_empty());
- return result;
- }
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_EFFECTS_H_
diff --git a/deps/v8/src/elements-kind.cc b/deps/v8/src/elements-kind.cc
index 8651b7681d..9878a09d9a 100644
--- a/deps/v8/src/elements-kind.cc
+++ b/deps/v8/src/elements-kind.cc
@@ -27,14 +27,14 @@ int ElementsKindToShiftSize(ElementsKind elements_kind) {
case INT32_ELEMENTS:
case FLOAT32_ELEMENTS:
return 2;
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case PACKED_DOUBLE_ELEMENTS:
+ case HOLEY_DOUBLE_ELEMENTS:
case FLOAT64_ELEMENTS:
return 3;
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
+ case PACKED_SMI_ELEMENTS:
+ case PACKED_ELEMENTS:
+ case HOLEY_SMI_ELEMENTS:
+ case HOLEY_ELEMENTS:
case DICTIONARY_ELEMENTS:
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
@@ -43,10 +43,8 @@ int ElementsKindToShiftSize(ElementsKind elements_kind) {
return kPointerSizeLog2;
case NO_ELEMENTS:
UNREACHABLE();
- return 0;
}
UNREACHABLE();
- return 0;
}
@@ -73,21 +71,21 @@ struct InitializeFastElementsKindSequence {
ElementsKind* fast_elements_kind_sequence =
new ElementsKind[kFastElementsKindCount];
*fast_elements_kind_sequence_ptr = fast_elements_kind_sequence;
- STATIC_ASSERT(FAST_SMI_ELEMENTS == FIRST_FAST_ELEMENTS_KIND);
- fast_elements_kind_sequence[0] = FAST_SMI_ELEMENTS;
- fast_elements_kind_sequence[1] = FAST_HOLEY_SMI_ELEMENTS;
- fast_elements_kind_sequence[2] = FAST_DOUBLE_ELEMENTS;
- fast_elements_kind_sequence[3] = FAST_HOLEY_DOUBLE_ELEMENTS;
- fast_elements_kind_sequence[4] = FAST_ELEMENTS;
- fast_elements_kind_sequence[5] = FAST_HOLEY_ELEMENTS;
+ STATIC_ASSERT(PACKED_SMI_ELEMENTS == FIRST_FAST_ELEMENTS_KIND);
+ fast_elements_kind_sequence[0] = PACKED_SMI_ELEMENTS;
+ fast_elements_kind_sequence[1] = HOLEY_SMI_ELEMENTS;
+ fast_elements_kind_sequence[2] = PACKED_DOUBLE_ELEMENTS;
+ fast_elements_kind_sequence[3] = HOLEY_DOUBLE_ELEMENTS;
+ fast_elements_kind_sequence[4] = PACKED_ELEMENTS;
+ fast_elements_kind_sequence[5] = HOLEY_ELEMENTS;
// Verify that kFastElementsKindPackedToHoley is correct.
- STATIC_ASSERT(FAST_SMI_ELEMENTS + kFastElementsKindPackedToHoley ==
- FAST_HOLEY_SMI_ELEMENTS);
- STATIC_ASSERT(FAST_DOUBLE_ELEMENTS + kFastElementsKindPackedToHoley ==
- FAST_HOLEY_DOUBLE_ELEMENTS);
- STATIC_ASSERT(FAST_ELEMENTS + kFastElementsKindPackedToHoley ==
- FAST_HOLEY_ELEMENTS);
+ STATIC_ASSERT(PACKED_SMI_ELEMENTS + kFastElementsKindPackedToHoley ==
+ HOLEY_SMI_ELEMENTS);
+ STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS + kFastElementsKindPackedToHoley ==
+ HOLEY_DOUBLE_ELEMENTS);
+ STATIC_ASSERT(PACKED_ELEMENTS + kFastElementsKindPackedToHoley ==
+ HOLEY_ELEMENTS);
}
};
@@ -111,7 +109,6 @@ int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind) {
}
}
UNREACHABLE();
- return 0;
}
@@ -134,21 +131,19 @@ bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
}
if (IsFastElementsKind(from_kind) && IsFastTransitionTarget(to_kind)) {
switch (from_kind) {
- case FAST_SMI_ELEMENTS:
- return to_kind != FAST_SMI_ELEMENTS;
- case FAST_HOLEY_SMI_ELEMENTS:
- return to_kind != FAST_SMI_ELEMENTS &&
- to_kind != FAST_HOLEY_SMI_ELEMENTS;
- case FAST_DOUBLE_ELEMENTS:
- return to_kind != FAST_SMI_ELEMENTS &&
- to_kind != FAST_HOLEY_SMI_ELEMENTS &&
- to_kind != FAST_DOUBLE_ELEMENTS;
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- return to_kind == FAST_ELEMENTS ||
- to_kind == FAST_HOLEY_ELEMENTS;
- case FAST_ELEMENTS:
- return to_kind == FAST_HOLEY_ELEMENTS;
- case FAST_HOLEY_ELEMENTS:
+ case PACKED_SMI_ELEMENTS:
+ return to_kind != PACKED_SMI_ELEMENTS;
+ case HOLEY_SMI_ELEMENTS:
+ return to_kind != PACKED_SMI_ELEMENTS && to_kind != HOLEY_SMI_ELEMENTS;
+ case PACKED_DOUBLE_ELEMENTS:
+ return to_kind != PACKED_SMI_ELEMENTS &&
+ to_kind != HOLEY_SMI_ELEMENTS &&
+ to_kind != PACKED_DOUBLE_ELEMENTS;
+ case HOLEY_DOUBLE_ELEMENTS:
+ return to_kind == PACKED_ELEMENTS || to_kind == HOLEY_ELEMENTS;
+ case PACKED_ELEMENTS:
+ return to_kind == HOLEY_ELEMENTS;
+ case HOLEY_ELEMENTS:
return false;
default:
return false;
diff --git a/deps/v8/src/elements-kind.h b/deps/v8/src/elements-kind.h
index e3485bed46..838fa47769 100644
--- a/deps/v8/src/elements-kind.h
+++ b/deps/v8/src/elements-kind.h
@@ -14,18 +14,18 @@ namespace internal {
enum ElementsKind {
// The "fast" kind for elements that only contain SMI values. Must be first
// to make it possible to efficiently check maps for this kind.
- FAST_SMI_ELEMENTS,
- FAST_HOLEY_SMI_ELEMENTS,
+ PACKED_SMI_ELEMENTS,
+ HOLEY_SMI_ELEMENTS,
// The "fast" kind for tagged values. Must be second to make it possible to
- // efficiently check maps for this and the FAST_SMI_ONLY_ELEMENTS kind
+ // efficiently check maps for this and the PACKED_SMI_ELEMENTS kind
// together at once.
- FAST_ELEMENTS,
- FAST_HOLEY_ELEMENTS,
+ PACKED_ELEMENTS,
+ HOLEY_ELEMENTS,
// The "fast" kind for unwrapped, non-tagged double values.
- FAST_DOUBLE_ELEMENTS,
- FAST_HOLEY_DOUBLE_ELEMENTS,
+ PACKED_DOUBLE_ELEMENTS,
+ HOLEY_DOUBLE_ELEMENTS,
// The "slow" kind.
DICTIONARY_ELEMENTS,
@@ -54,28 +54,28 @@ enum ElementsKind {
NO_ELEMENTS,
// Derived constants from ElementsKind.
- FIRST_ELEMENTS_KIND = FAST_SMI_ELEMENTS,
+ FIRST_ELEMENTS_KIND = PACKED_SMI_ELEMENTS,
LAST_ELEMENTS_KIND = UINT8_CLAMPED_ELEMENTS,
- FIRST_FAST_ELEMENTS_KIND = FAST_SMI_ELEMENTS,
- LAST_FAST_ELEMENTS_KIND = FAST_HOLEY_DOUBLE_ELEMENTS,
+ FIRST_FAST_ELEMENTS_KIND = PACKED_SMI_ELEMENTS,
+ LAST_FAST_ELEMENTS_KIND = HOLEY_DOUBLE_ELEMENTS,
FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND = UINT8_ELEMENTS,
LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND = UINT8_CLAMPED_ELEMENTS,
- TERMINAL_FAST_ELEMENTS_KIND = FAST_HOLEY_ELEMENTS
+ TERMINAL_FAST_ELEMENTS_KIND = HOLEY_ELEMENTS
};
const int kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1;
-const int kFastElementsKindCount = LAST_FAST_ELEMENTS_KIND -
- FIRST_FAST_ELEMENTS_KIND + 1;
+const int kFastElementsKindCount =
+ LAST_FAST_ELEMENTS_KIND - FIRST_FAST_ELEMENTS_KIND + 1;
// The number to add to a packed elements kind to reach a holey elements kind
const int kFastElementsKindPackedToHoley =
- FAST_HOLEY_SMI_ELEMENTS - FAST_SMI_ELEMENTS;
+ HOLEY_SMI_ELEMENTS - PACKED_SMI_ELEMENTS;
int ElementsKindToShiftSize(ElementsKind elements_kind);
int GetDefaultHeaderSizeForElementsKind(ElementsKind elements_kind);
const char* ElementsKindToString(ElementsKind kind);
-inline ElementsKind GetInitialFastElementsKind() { return FAST_SMI_ELEMENTS; }
+inline ElementsKind GetInitialFastElementsKind() { return PACKED_SMI_ELEMENTS; }
ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_number);
int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind);
@@ -101,29 +101,24 @@ inline bool IsFixedTypedArrayElementsKind(ElementsKind kind) {
kind <= LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND;
}
-
inline bool IsTerminalElementsKind(ElementsKind kind) {
return kind == TERMINAL_FAST_ELEMENTS_KIND ||
IsFixedTypedArrayElementsKind(kind);
}
-
inline bool IsFastElementsKind(ElementsKind kind) {
STATIC_ASSERT(FIRST_FAST_ELEMENTS_KIND == 0);
- return kind <= FAST_HOLEY_DOUBLE_ELEMENTS;
+ return kind <= HOLEY_DOUBLE_ELEMENTS;
}
-
inline bool IsTransitionElementsKind(ElementsKind kind) {
return IsFastElementsKind(kind) || IsFixedTypedArrayElementsKind(kind) ||
kind == FAST_SLOPPY_ARGUMENTS_ELEMENTS ||
kind == FAST_STRING_WRAPPER_ELEMENTS;
}
-
-inline bool IsFastDoubleElementsKind(ElementsKind kind) {
- return kind == FAST_DOUBLE_ELEMENTS ||
- kind == FAST_HOLEY_DOUBLE_ELEMENTS;
+inline bool IsDoubleElementsKind(ElementsKind kind) {
+ return kind == PACKED_DOUBLE_ELEMENTS || kind == HOLEY_DOUBLE_ELEMENTS;
}
@@ -133,94 +128,80 @@ inline bool IsFixedFloatElementsKind(ElementsKind kind) {
inline bool IsDoubleOrFloatElementsKind(ElementsKind kind) {
- return IsFastDoubleElementsKind(kind) || IsFixedFloatElementsKind(kind);
+ return IsDoubleElementsKind(kind) || IsFixedFloatElementsKind(kind);
}
-
-inline bool IsFastSmiOrObjectElementsKind(ElementsKind kind) {
- return kind == FAST_SMI_ELEMENTS ||
- kind == FAST_HOLEY_SMI_ELEMENTS ||
- kind == FAST_ELEMENTS ||
- kind == FAST_HOLEY_ELEMENTS;
+inline bool IsSmiOrObjectElementsKind(ElementsKind kind) {
+ return kind == PACKED_SMI_ELEMENTS || kind == HOLEY_SMI_ELEMENTS ||
+ kind == PACKED_ELEMENTS || kind == HOLEY_ELEMENTS;
}
-
-inline bool IsFastSmiElementsKind(ElementsKind kind) {
- return kind == FAST_SMI_ELEMENTS ||
- kind == FAST_HOLEY_SMI_ELEMENTS;
+inline bool IsSmiElementsKind(ElementsKind kind) {
+ return kind == PACKED_SMI_ELEMENTS || kind == HOLEY_SMI_ELEMENTS;
}
inline bool IsFastNumberElementsKind(ElementsKind kind) {
- return IsFastSmiElementsKind(kind) || IsFastDoubleElementsKind(kind);
+ return IsSmiElementsKind(kind) || IsDoubleElementsKind(kind);
}
-
-inline bool IsFastObjectElementsKind(ElementsKind kind) {
- return kind == FAST_ELEMENTS ||
- kind == FAST_HOLEY_ELEMENTS;
+inline bool IsObjectElementsKind(ElementsKind kind) {
+ return kind == PACKED_ELEMENTS || kind == HOLEY_ELEMENTS;
}
-
-inline bool IsFastHoleyElementsKind(ElementsKind kind) {
- return kind == FAST_HOLEY_SMI_ELEMENTS ||
- kind == FAST_HOLEY_DOUBLE_ELEMENTS ||
- kind == FAST_HOLEY_ELEMENTS;
+inline bool IsHoleyElementsKind(ElementsKind kind) {
+ return kind == HOLEY_SMI_ELEMENTS || kind == HOLEY_DOUBLE_ELEMENTS ||
+ kind == HOLEY_ELEMENTS;
}
-
-inline bool IsHoleyElementsKind(ElementsKind kind) {
- return IsFastHoleyElementsKind(kind) ||
- kind == DICTIONARY_ELEMENTS;
+inline bool IsHoleyOrDictionaryElementsKind(ElementsKind kind) {
+ return IsHoleyElementsKind(kind) || kind == DICTIONARY_ELEMENTS;
}
inline bool IsFastPackedElementsKind(ElementsKind kind) {
- return kind == FAST_SMI_ELEMENTS || kind == FAST_DOUBLE_ELEMENTS ||
- kind == FAST_ELEMENTS;
+ return kind == PACKED_SMI_ELEMENTS || kind == PACKED_DOUBLE_ELEMENTS ||
+ kind == PACKED_ELEMENTS;
}
inline ElementsKind GetPackedElementsKind(ElementsKind holey_kind) {
- if (holey_kind == FAST_HOLEY_SMI_ELEMENTS) {
- return FAST_SMI_ELEMENTS;
+ if (holey_kind == HOLEY_SMI_ELEMENTS) {
+ return PACKED_SMI_ELEMENTS;
}
- if (holey_kind == FAST_HOLEY_DOUBLE_ELEMENTS) {
- return FAST_DOUBLE_ELEMENTS;
+ if (holey_kind == HOLEY_DOUBLE_ELEMENTS) {
+ return PACKED_DOUBLE_ELEMENTS;
}
- if (holey_kind == FAST_HOLEY_ELEMENTS) {
- return FAST_ELEMENTS;
+ if (holey_kind == HOLEY_ELEMENTS) {
+ return PACKED_ELEMENTS;
}
return holey_kind;
}
inline ElementsKind GetHoleyElementsKind(ElementsKind packed_kind) {
- if (packed_kind == FAST_SMI_ELEMENTS) {
- return FAST_HOLEY_SMI_ELEMENTS;
+ if (packed_kind == PACKED_SMI_ELEMENTS) {
+ return HOLEY_SMI_ELEMENTS;
}
- if (packed_kind == FAST_DOUBLE_ELEMENTS) {
- return FAST_HOLEY_DOUBLE_ELEMENTS;
+ if (packed_kind == PACKED_DOUBLE_ELEMENTS) {
+ return HOLEY_DOUBLE_ELEMENTS;
}
- if (packed_kind == FAST_ELEMENTS) {
- return FAST_HOLEY_ELEMENTS;
+ if (packed_kind == PACKED_ELEMENTS) {
+ return HOLEY_ELEMENTS;
}
return packed_kind;
}
inline ElementsKind FastSmiToObjectElementsKind(ElementsKind from_kind) {
- DCHECK(IsFastSmiElementsKind(from_kind));
- return (from_kind == FAST_SMI_ELEMENTS)
- ? FAST_ELEMENTS
- : FAST_HOLEY_ELEMENTS;
+ DCHECK(IsSmiElementsKind(from_kind));
+ return (from_kind == PACKED_SMI_ELEMENTS) ? PACKED_ELEMENTS : HOLEY_ELEMENTS;
}
inline bool IsSimpleMapChangeTransition(ElementsKind from_kind,
ElementsKind to_kind) {
return (GetHoleyElementsKind(from_kind) == to_kind) ||
- (IsFastSmiElementsKind(from_kind) &&
- IsFastObjectElementsKind(to_kind));
+ (IsSmiElementsKind(from_kind) && IsObjectElementsKind(to_kind));
}
@@ -239,7 +220,7 @@ inline ElementsKind GetMoreGeneralElementsKind(ElementsKind from_kind,
inline bool IsTransitionableFastElementsKind(ElementsKind from_kind) {
return IsFastElementsKind(from_kind) &&
- from_kind != TERMINAL_FAST_ELEMENTS_KIND;
+ from_kind != TERMINAL_FAST_ELEMENTS_KIND;
}
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index 2f6cf4e749..716cc00b9a 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -63,13 +63,13 @@ enum Where { AT_START, AT_END };
// identical. Note that the order must match that of the ElementsKind enum for
// the |accessor_array[]| below to work.
#define ELEMENTS_LIST(V) \
- V(FastPackedSmiElementsAccessor, FAST_SMI_ELEMENTS, FixedArray) \
- V(FastHoleySmiElementsAccessor, FAST_HOLEY_SMI_ELEMENTS, FixedArray) \
- V(FastPackedObjectElementsAccessor, FAST_ELEMENTS, FixedArray) \
- V(FastHoleyObjectElementsAccessor, FAST_HOLEY_ELEMENTS, FixedArray) \
- V(FastPackedDoubleElementsAccessor, FAST_DOUBLE_ELEMENTS, FixedDoubleArray) \
- V(FastHoleyDoubleElementsAccessor, FAST_HOLEY_DOUBLE_ELEMENTS, \
+ V(FastPackedSmiElementsAccessor, PACKED_SMI_ELEMENTS, FixedArray) \
+ V(FastHoleySmiElementsAccessor, HOLEY_SMI_ELEMENTS, FixedArray) \
+ V(FastPackedObjectElementsAccessor, PACKED_ELEMENTS, FixedArray) \
+ V(FastHoleyObjectElementsAccessor, HOLEY_ELEMENTS, FixedArray) \
+ V(FastPackedDoubleElementsAccessor, PACKED_DOUBLE_ELEMENTS, \
FixedDoubleArray) \
+ V(FastHoleyDoubleElementsAccessor, HOLEY_DOUBLE_ELEMENTS, FixedDoubleArray) \
V(DictionaryElementsAccessor, DICTIONARY_ELEMENTS, SeededNumberDictionary) \
V(FastSloppyArgumentsElementsAccessor, FAST_SLOPPY_ARGUMENTS_ELEMENTS, \
FixedArray) \
@@ -95,12 +95,14 @@ template<ElementsKind Kind> class ElementsKindTraits {
typedef FixedArrayBase BackingStore;
};
-#define ELEMENTS_TRAITS(Class, KindParam, Store) \
-template<> class ElementsKindTraits<KindParam> { \
- public: /* NOLINT */ \
- static const ElementsKind Kind = KindParam; \
- typedef Store BackingStore; \
-};
+#define ELEMENTS_TRAITS(Class, KindParam, Store) \
+ template <> \
+ class ElementsKindTraits<KindParam> { \
+ public: /* NOLINT */ \
+ static constexpr ElementsKind Kind = KindParam; \
+ typedef Store BackingStore; \
+ }; \
+ constexpr ElementsKind ElementsKindTraits<KindParam>::Kind;
ELEMENTS_LIST(ELEMENTS_TRAITS)
#undef ELEMENTS_TRAITS
@@ -140,11 +142,11 @@ void CopyObjectToObjectElements(FixedArrayBase* from_base,
if (copy_size == 0) return;
FixedArray* from = FixedArray::cast(from_base);
FixedArray* to = FixedArray::cast(to_base);
- DCHECK(IsFastSmiOrObjectElementsKind(from_kind));
- DCHECK(IsFastSmiOrObjectElementsKind(to_kind));
+ DCHECK(IsSmiOrObjectElementsKind(from_kind));
+ DCHECK(IsSmiOrObjectElementsKind(to_kind));
WriteBarrierMode write_barrier_mode =
- (IsFastObjectElementsKind(from_kind) && IsFastObjectElementsKind(to_kind))
+ (IsObjectElementsKind(from_kind) && IsObjectElementsKind(to_kind))
? UPDATE_WRITE_BARRIER
: SKIP_WRITE_BARRIER;
for (int i = 0; i < copy_size; i++) {
@@ -175,16 +177,15 @@ static void CopyDictionaryToObjectElements(
}
}
DCHECK(to_base != from_base);
- DCHECK(IsFastSmiOrObjectElementsKind(to_kind));
+ DCHECK(IsSmiOrObjectElementsKind(to_kind));
if (copy_size == 0) return;
FixedArray* to = FixedArray::cast(to_base);
uint32_t to_length = to->length();
if (to_start + copy_size > to_length) {
copy_size = to_length - to_start;
}
- WriteBarrierMode write_barrier_mode = IsFastObjectElementsKind(to_kind)
- ? UPDATE_WRITE_BARRIER
- : SKIP_WRITE_BARRIER;
+ WriteBarrierMode write_barrier_mode =
+ IsObjectElementsKind(to_kind) ? UPDATE_WRITE_BARRIER : SKIP_WRITE_BARRIER;
Isolate* isolate = from->GetIsolate();
for (int i = 0; i < copy_size; i++) {
int entry = from->FindEntry(isolate, i + from_start);
@@ -314,7 +315,7 @@ static void CopySmiToDoubleElements(FixedArrayBase* from_base,
if (hole_or_smi == the_hole) {
to->set_the_hole(to_start);
} else {
- to->set(to_start, Smi::cast(hole_or_smi)->value());
+ to->set(to_start, Smi::ToInt(hole_or_smi));
}
}
}
@@ -354,7 +355,7 @@ static void CopyPackedSmiToDoubleElements(FixedArrayBase* from_base,
from_start < from_end; from_start++, to_start++) {
Object* smi = from->get(from_start);
DCHECK(!smi->IsTheHole(from->GetIsolate()));
- to->set(to_start, Smi::cast(smi)->value());
+ to->set(to_start, Smi::ToInt(smi));
}
}
@@ -451,7 +452,10 @@ static void SortIndices(
Handle<FixedArray> indices, uint32_t sort_size,
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER) {
struct {
- bool operator()(Object* a, Object* b) {
+ bool operator()(const base::AtomicElement<Object*>& elementA,
+ const base::AtomicElement<Object*>& elementB) {
+ const Object* a = elementA.value();
+ const Object* b = elementB.value();
if (a->IsSmi() || !a->IsUndefined(HeapObject::cast(a)->GetIsolate())) {
if (!b->IsSmi() && b->IsUndefined(HeapObject::cast(b)->GetIsolate())) {
return true;
@@ -461,8 +465,11 @@ static void SortIndices(
return !b->IsSmi() && b->IsUndefined(HeapObject::cast(b)->GetIsolate());
}
} cmp;
- Object** start =
- reinterpret_cast<Object**>(indices->GetFirstElementAddress());
+ // Use AtomicElement wrapper to ensure that std::sort uses atomic load and
+ // store operations that are safe for concurrent marking.
+ base::AtomicElement<Object*>* start =
+ reinterpret_cast<base::AtomicElement<Object*>*>(
+ indices->GetFirstElementAddress());
std::sort(start, start + sort_size, cmp);
if (write_barrier_mode != SKIP_WRITE_BARRIER) {
FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(indices->GetIsolate()->heap(), *indices,
@@ -539,19 +546,18 @@ class ElementsAccessorBase : public ElementsAccessor {
static ElementsKind kind() { return ElementsTraits::Kind; }
- static void ValidateContents(Handle<JSObject> holder, int length) {
- }
+ static void ValidateContents(JSObject* holder, int length) {}
- static void ValidateImpl(Handle<JSObject> holder) {
- Handle<FixedArrayBase> fixed_array_base(holder->elements());
+ static void ValidateImpl(JSObject* holder) {
+ FixedArrayBase* fixed_array_base = holder->elements();
if (!fixed_array_base->IsHeapObject()) return;
// Arrays that have been shifted in place can't be verified.
if (fixed_array_base->IsFiller()) return;
int length = 0;
if (holder->IsJSArray()) {
- Object* length_obj = Handle<JSArray>::cast(holder)->length();
+ Object* length_obj = JSArray::cast(holder)->length();
if (length_obj->IsSmi()) {
- length = Smi::cast(length_obj)->value();
+ length = Smi::ToInt(length_obj);
}
} else {
length = fixed_array_base->length();
@@ -559,7 +565,7 @@ class ElementsAccessorBase : public ElementsAccessor {
Subclass::ValidateContents(holder, length);
}
- void Validate(Handle<JSObject> holder) final {
+ void Validate(JSObject* holder) final {
DisallowHeapAllocation no_gc;
Subclass::ValidateImpl(holder);
}
@@ -579,9 +585,9 @@ class ElementsAccessorBase : public ElementsAccessor {
}
static void TryTransitionResultArrayToPacked(Handle<JSArray> array) {
- if (!IsHoleyElementsKind(kind())) return;
+ if (!IsHoleyOrDictionaryElementsKind(kind())) return;
Handle<FixedArrayBase> backing_store(array->elements());
- int length = Smi::cast(array->length())->value();
+ int length = Smi::ToInt(array->length());
if (!Subclass::IsPackedImpl(*array, *backing_store, 0, length)) {
return;
}
@@ -668,7 +674,6 @@ class ElementsAccessorBase : public ElementsAccessor {
static uint32_t PushImpl(Handle<JSArray> receiver, Arguments* args,
uint32_t push_sized) {
UNREACHABLE();
- return 0;
}
uint32_t Unshift(Handle<JSArray> receiver, Arguments* args,
@@ -679,7 +684,6 @@ class ElementsAccessorBase : public ElementsAccessor {
static uint32_t UnshiftImpl(Handle<JSArray> receiver, Arguments* args,
uint32_t unshift_size) {
UNREACHABLE();
- return 0;
}
Handle<JSObject> Slice(Handle<JSObject> receiver, uint32_t start,
@@ -695,14 +699,12 @@ class ElementsAccessorBase : public ElementsAccessor {
static Handle<JSObject> SliceImpl(Handle<JSObject> receiver, uint32_t start,
uint32_t end) {
UNREACHABLE();
- return Handle<JSObject>();
}
static Handle<JSObject> SliceWithResultImpl(Handle<JSObject> receiver,
uint32_t start, uint32_t end,
Handle<JSObject> result) {
UNREACHABLE();
- return Handle<JSObject>();
}
Handle<JSArray> Splice(Handle<JSArray> receiver, uint32_t start,
@@ -715,7 +717,6 @@ class ElementsAccessorBase : public ElementsAccessor {
uint32_t start, uint32_t delete_count,
Arguments* args, uint32_t add_count) {
UNREACHABLE();
- return Handle<JSArray>();
}
Handle<Object> Pop(Handle<JSArray> receiver) final {
@@ -724,7 +725,6 @@ class ElementsAccessorBase : public ElementsAccessor {
static Handle<Object> PopImpl(Handle<JSArray> receiver) {
UNREACHABLE();
- return Handle<Object>();
}
Handle<Object> Shift(Handle<JSArray> receiver) final {
@@ -733,7 +733,6 @@ class ElementsAccessorBase : public ElementsAccessor {
static Handle<Object> ShiftImpl(Handle<JSArray> receiver) {
UNREACHABLE();
- return Handle<Object>();
}
void SetLength(Handle<JSArray> array, uint32_t length) final {
@@ -751,7 +750,7 @@ class ElementsAccessorBase : public ElementsAccessor {
if (old_length < length) {
ElementsKind kind = array->GetElementsKind();
- if (!IsFastHoleyElementsKind(kind)) {
+ if (!IsHoleyElementsKind(kind)) {
kind = GetHoleyElementsKind(kind);
JSObject::TransitionElementsKind(array, kind);
}
@@ -763,7 +762,7 @@ class ElementsAccessorBase : public ElementsAccessor {
if (length == 0) {
array->initialize_elements();
} else if (length <= capacity) {
- if (IsFastSmiOrObjectElementsKind(kind())) {
+ if (IsSmiOrObjectElementsKind(kind())) {
JSObject::EnsureWritableFastElements(array);
if (array->elements() != *backing_store) {
backing_store = handle(array->elements(), isolate);
@@ -778,6 +777,10 @@ class ElementsAccessorBase : public ElementsAccessor {
? (capacity - length) / 2
: capacity - length;
isolate->heap()->RightTrimFixedArray(*backing_store, elements_to_trim);
+ // Fill the non-trimmed elements with holes.
+ BackingStore::cast(*backing_store)
+ ->FillWithHoles(length,
+ std::min(old_length, capacity - elements_to_trim));
} else {
// Otherwise, fill the unused tail with holes.
BackingStore::cast(*backing_store)->FillWithHoles(length, old_length);
@@ -789,7 +792,7 @@ class ElementsAccessorBase : public ElementsAccessor {
}
array->set_length(Smi::FromInt(length));
- JSObject::ValidateElements(array);
+ JSObject::ValidateElements(*array);
}
uint32_t NumberOfElements(JSObject* receiver) final {
@@ -805,7 +808,7 @@ class ElementsAccessorBase : public ElementsAccessor {
if (receiver->IsJSArray()) {
DCHECK(JSArray::cast(receiver)->length()->IsSmi());
return static_cast<uint32_t>(
- Smi::cast(JSArray::cast(receiver)->length())->value());
+ Smi::ToInt(JSArray::cast(receiver)->length()));
}
return Subclass::GetCapacityImpl(receiver, elements);
}
@@ -836,7 +839,7 @@ class ElementsAccessorBase : public ElementsAccessor {
uint32_t dst_index, int copy_size) {
Isolate* isolate = object->GetIsolate();
Handle<FixedArrayBase> new_elements;
- if (IsFastDoubleElementsKind(kind())) {
+ if (IsDoubleElementsKind(kind())) {
new_elements = isolate->factory()->NewFixedDoubleArray(capacity);
} else {
new_elements = isolate->factory()->NewUninitializedFixedArray(capacity);
@@ -844,7 +847,7 @@ class ElementsAccessorBase : public ElementsAccessor {
int packed_size = kPackedSizeNotKnown;
if (IsFastPackedElementsKind(from_kind) && object->IsJSArray()) {
- packed_size = Smi::cast(JSArray::cast(*object)->length())->value();
+ packed_size = Smi::ToInt(JSArray::cast(*object)->length());
}
Subclass::CopyElementsImpl(*old_elements, src_index, *new_elements,
@@ -858,7 +861,7 @@ class ElementsAccessorBase : public ElementsAccessor {
Handle<Map> from_map = handle(object->map());
ElementsKind from_kind = from_map->elements_kind();
ElementsKind to_kind = to_map->elements_kind();
- if (IsFastHoleyElementsKind(from_kind)) {
+ if (IsHoleyElementsKind(from_kind)) {
to_kind = GetHoleyElementsKind(to_kind);
}
if (from_kind != to_kind) {
@@ -869,16 +872,14 @@ class ElementsAccessorBase : public ElementsAccessor {
Handle<FixedArrayBase> from_elements(object->elements());
if (object->elements() == object->GetHeap()->empty_fixed_array() ||
- IsFastDoubleElementsKind(from_kind) ==
- IsFastDoubleElementsKind(to_kind)) {
+ IsDoubleElementsKind(from_kind) == IsDoubleElementsKind(to_kind)) {
// No change is needed to the elements() buffer, the transition
// only requires a map change.
JSObject::MigrateToMap(object, to_map);
} else {
- DCHECK((IsFastSmiElementsKind(from_kind) &&
- IsFastDoubleElementsKind(to_kind)) ||
- (IsFastDoubleElementsKind(from_kind) &&
- IsFastObjectElementsKind(to_kind)));
+ DCHECK(
+ (IsSmiElementsKind(from_kind) && IsDoubleElementsKind(to_kind)) ||
+ (IsDoubleElementsKind(from_kind) && IsObjectElementsKind(to_kind)));
uint32_t capacity = static_cast<uint32_t>(object->elements()->length());
Handle<FixedArrayBase> elements = ConvertElementsWithCapacity(
object, from_elements, from_kind, capacity);
@@ -895,7 +896,7 @@ class ElementsAccessorBase : public ElementsAccessor {
static void GrowCapacityAndConvertImpl(Handle<JSObject> object,
uint32_t capacity) {
ElementsKind from_kind = object->GetElementsKind();
- if (IsFastSmiOrObjectElementsKind(from_kind)) {
+ if (IsSmiOrObjectElementsKind(from_kind)) {
// Array optimizations rely on the prototype lookups of Array objects
// always returning undefined. If there is a store to the initial
// prototype object, make sure all of these optimizations are invalidated.
@@ -904,8 +905,7 @@ class ElementsAccessorBase : public ElementsAccessor {
Handle<FixedArrayBase> old_elements(object->elements());
// This method should only be called if there's a reason to update the
// elements.
- DCHECK(IsFastDoubleElementsKind(from_kind) !=
- IsFastDoubleElementsKind(kind()) ||
+ DCHECK(IsDoubleElementsKind(from_kind) != IsDoubleElementsKind(kind()) ||
IsDictionaryElementsKind(from_kind) ||
static_cast<uint32_t>(old_elements->length()) < capacity);
Subclass::BasicGrowCapacityAndConvertImpl(object, old_elements, from_kind,
@@ -918,7 +918,8 @@ class ElementsAccessorBase : public ElementsAccessor {
Handle<FixedArrayBase> elements =
ConvertElementsWithCapacity(object, old_elements, from_kind, capacity);
- if (IsHoleyElementsKind(from_kind)) to_kind = GetHoleyElementsKind(to_kind);
+ if (IsHoleyOrDictionaryElementsKind(from_kind))
+ to_kind = GetHoleyElementsKind(to_kind);
Handle<Map> new_map = JSObject::GetElementsTransitionMap(object, to_kind);
JSObject::SetMapAndElements(object, new_map, elements);
@@ -982,8 +983,7 @@ class ElementsAccessorBase : public ElementsAccessor {
bool is_packed = IsFastPackedElementsKind(from_kind) &&
from_holder->IsJSArray();
if (is_packed) {
- packed_size =
- Smi::cast(JSArray::cast(from_holder)->length())->value();
+ packed_size = Smi::ToInt(JSArray::cast(from_holder)->length());
if (copy_size >= 0 && packed_size > copy_size) {
packed_size = copy_size;
}
@@ -1017,7 +1017,6 @@ class ElementsAccessorBase : public ElementsAccessor {
Handle<JSObject> destination,
size_t length) {
UNREACHABLE();
- return *source;
}
Handle<SeededNumberDictionary> Normalize(Handle<JSObject> object) final {
@@ -1027,7 +1026,6 @@ class ElementsAccessorBase : public ElementsAccessor {
static Handle<SeededNumberDictionary> NormalizeImpl(
Handle<JSObject> object, Handle<FixedArrayBase> elements) {
UNREACHABLE();
- return Handle<SeededNumberDictionary>();
}
Maybe<bool> CollectValuesOrEntries(Isolate* isolate, Handle<JSObject> object,
@@ -1157,7 +1155,7 @@ class ElementsAccessorBase : public ElementsAccessor {
// store size as a last emergency measure if we cannot allocate the big
// array.
if (!raw_array.ToHandle(&combined_keys)) {
- if (IsHoleyElementsKind(kind())) {
+ if (IsHoleyOrDictionaryElementsKind(kind())) {
// If we overestimate the result list size we might end up in the
// large-object space which doesn't free memory on shrinking the list.
// Hence we try to estimate the final size for holey backing stores more
@@ -1191,12 +1189,13 @@ class ElementsAccessorBase : public ElementsAccessor {
}
// Copy over the passed-in property keys.
- CopyObjectToObjectElements(*keys, FAST_ELEMENTS, 0, *combined_keys,
- FAST_ELEMENTS, nof_indices, nof_property_keys);
+ CopyObjectToObjectElements(*keys, PACKED_ELEMENTS, 0, *combined_keys,
+ PACKED_ELEMENTS, nof_indices, nof_property_keys);
// For holey elements and arguments we might have to shrink the collected
// keys since the estimates might be off.
- if (IsHoleyElementsKind(kind()) || IsSloppyArgumentsElementsKind(kind())) {
+ if (IsHoleyOrDictionaryElementsKind(kind()) ||
+ IsSloppyArgumentsElementsKind(kind())) {
// Shrink combined_keys to the final size.
int final_size = nof_indices + nof_property_keys;
DCHECK_LE(final_size, combined_keys->length());
@@ -1225,7 +1224,6 @@ class ElementsAccessorBase : public ElementsAccessor {
Handle<Object> obj_value, uint32_t start,
uint32_t end) {
UNREACHABLE();
- return *receiver;
}
Object* Fill(Isolate* isolate, Handle<JSObject> receiver,
@@ -1266,7 +1264,6 @@ class ElementsAccessorBase : public ElementsAccessor {
Handle<Object> value,
uint32_t start_from) {
UNREACHABLE();
- return Just<int64_t>(-1);
}
Maybe<int64_t> LastIndexOfValue(Isolate* isolate, Handle<JSObject> receiver,
@@ -1288,7 +1285,7 @@ class ElementsAccessorBase : public ElementsAccessor {
FixedArrayBase* backing_store,
uint32_t index, PropertyFilter filter) {
uint32_t length = Subclass::GetMaxIndex(holder, backing_store);
- if (IsHoleyElementsKind(kind())) {
+ if (IsHoleyOrDictionaryElementsKind(kind())) {
return index < length &&
!BackingStore::cast(backing_store)
->is_the_hole(isolate, index)
@@ -1308,11 +1305,11 @@ class ElementsAccessorBase : public ElementsAccessor {
static PropertyDetails GetDetailsImpl(FixedArrayBase* backing_store,
uint32_t entry) {
- return PropertyDetails(kData, NONE, 0, PropertyCellType::kNoCell);
+ return PropertyDetails(kData, NONE, PropertyCellType::kNoCell);
}
static PropertyDetails GetDetailsImpl(JSObject* holder, uint32_t entry) {
- return PropertyDetails(kData, NONE, 0, PropertyCellType::kNoCell);
+ return PropertyDetails(kData, NONE, PropertyCellType::kNoCell);
}
PropertyDetails GetDetails(JSObject* holder, uint32_t entry) final {
@@ -1327,7 +1324,6 @@ class ElementsAccessorBase : public ElementsAccessor {
static Handle<FixedArray> CreateListFromArrayImpl(Isolate* isolate,
Handle<JSArray> array) {
UNREACHABLE();
- return Handle<FixedArray>();
}
private:
@@ -1367,44 +1363,44 @@ class DictionaryElementsAccessor
int capacity = dict->Capacity();
uint32_t old_length = 0;
CHECK(array->length()->ToArrayLength(&old_length));
- if (length < old_length) {
- if (dict->requires_slow_elements()) {
- // Find last non-deletable element in range of elements to be
- // deleted and adjust range accordingly.
- for (int entry = 0; entry < capacity; entry++) {
- DisallowHeapAllocation no_gc;
- Object* index = dict->KeyAt(entry);
- if (index->IsNumber()) {
- uint32_t number = static_cast<uint32_t>(index->Number());
- if (length <= number && number < old_length) {
- PropertyDetails details = dict->DetailsAt(entry);
- if (!details.IsConfigurable()) length = number + 1;
+ {
+ DisallowHeapAllocation no_gc;
+ if (length < old_length) {
+ if (dict->requires_slow_elements()) {
+ // Find last non-deletable element in range of elements to be
+ // deleted and adjust range accordingly.
+ for (int entry = 0; entry < capacity; entry++) {
+ Object* index = dict->KeyAt(entry);
+ if (dict->IsKey(isolate, index)) {
+ uint32_t number = static_cast<uint32_t>(index->Number());
+ if (length <= number && number < old_length) {
+ PropertyDetails details = dict->DetailsAt(entry);
+ if (!details.IsConfigurable()) length = number + 1;
+ }
}
}
}
- }
- if (length == 0) {
- // Flush the backing store.
- JSObject::ResetElements(array);
- } else {
- DisallowHeapAllocation no_gc;
- // Remove elements that should be deleted.
- int removed_entries = 0;
- Handle<Object> the_hole_value = isolate->factory()->the_hole_value();
- for (int entry = 0; entry < capacity; entry++) {
- Object* index = dict->KeyAt(entry);
- if (index->IsNumber()) {
- uint32_t number = static_cast<uint32_t>(index->Number());
- if (length <= number && number < old_length) {
- dict->SetEntry(entry, the_hole_value, the_hole_value);
- removed_entries++;
+ if (length == 0) {
+ // Flush the backing store.
+ array->initialize_elements();
+ } else {
+ // Remove elements that should be deleted.
+ int removed_entries = 0;
+ for (int entry = 0; entry < capacity; entry++) {
+ Object* index = dict->KeyAt(entry);
+ if (dict->IsKey(isolate, index)) {
+ uint32_t number = static_cast<uint32_t>(index->Number());
+ if (length <= number && number < old_length) {
+ dict->ClearEntry(entry);
+ removed_entries++;
+ }
}
}
- }
- // Update the number of elements.
- dict->ElementsRemoved(removed_entries);
+ // Update the number of elements.
+ dict->ElementsRemoved(removed_entries);
+ }
}
}
@@ -1421,16 +1417,10 @@ class DictionaryElementsAccessor
static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) {
- // TODO(verwaest): Remove reliance on index in Shrink.
Handle<SeededNumberDictionary> dict(
SeededNumberDictionary::cast(obj->elements()));
- uint32_t index = GetIndexForEntryImpl(*dict, entry);
- Handle<Object> result = SeededNumberDictionary::DeleteProperty(dict, entry);
- USE(result);
- DCHECK(result->IsTrue(dict->GetIsolate()));
- Handle<FixedArray> new_elements =
- SeededNumberDictionary::Shrink(dict, index);
- obj->set_elements(*new_elements);
+ dict = SeededNumberDictionary::DeleteEntry(dict, entry);
+ obj->set_elements(*dict);
}
static bool HasAccessorsImpl(JSObject* holder,
@@ -1443,7 +1433,6 @@ class DictionaryElementsAccessor
for (int i = 0; i < capacity; i++) {
Object* key = dict->KeyAt(i);
if (!dict->IsKey(isolate, key)) continue;
- DCHECK(!dict->IsDeleted(i));
PropertyDetails details = dict->DetailsAt(i);
if (details.kind() == kAccessor) return true;
}
@@ -1478,22 +1467,23 @@ class DictionaryElementsAccessor
if (attributes != NONE) object->RequireSlowElements(dictionary);
dictionary->ValueAtPut(entry, *value);
PropertyDetails details = dictionary->DetailsAt(entry);
- details = PropertyDetails(kData, attributes, details.dictionary_index(),
- PropertyCellType::kNoCell);
+ details = PropertyDetails(kData, attributes, PropertyCellType::kNoCell,
+ details.dictionary_index());
+
dictionary->DetailsAtPut(entry, details);
}
static void AddImpl(Handle<JSObject> object, uint32_t index,
Handle<Object> value, PropertyAttributes attributes,
uint32_t new_capacity) {
- PropertyDetails details(kData, attributes, 0, PropertyCellType::kNoCell);
+ PropertyDetails details(kData, attributes, PropertyCellType::kNoCell);
Handle<SeededNumberDictionary> dictionary =
object->HasFastElements() || object->HasFastStringWrapperElements()
? JSObject::NormalizeElements(object)
: handle(SeededNumberDictionary::cast(object->elements()));
Handle<SeededNumberDictionary> new_dictionary =
- SeededNumberDictionary::AddNumberEntry(dictionary, index, value,
- details, object);
+ SeededNumberDictionary::Add(dictionary, index, value, details);
+ new_dictionary->UpdateMaxNumberKey(index, object);
if (attributes != NONE) object->RequireSlowElements(*new_dictionary);
if (dictionary.is_identical_to(new_dictionary)) return;
object->set_elements(*new_dictionary);
@@ -1541,7 +1531,6 @@ class DictionaryElementsAccessor
static uint32_t FilterKey(Handle<SeededNumberDictionary> dictionary,
int entry, Object* raw_key, PropertyFilter filter) {
- DCHECK(!dictionary->IsDeleted(entry));
DCHECK(raw_key->IsNumber());
DCHECK_LE(raw_key->Number(), kMaxUInt32);
PropertyDetails details = dictionary->DetailsAt(entry);
@@ -1614,16 +1603,12 @@ class DictionaryElementsAccessor
KeyAccumulator* accumulator,
AddKeyConversion convert) {
Isolate* isolate = accumulator->isolate();
- Handle<Object> undefined = isolate->factory()->undefined_value();
- Handle<Object> the_hole = isolate->factory()->the_hole_value();
Handle<SeededNumberDictionary> dictionary(
SeededNumberDictionary::cast(receiver->elements()), isolate);
int capacity = dictionary->Capacity();
for (int i = 0; i < capacity; i++) {
Object* k = dictionary->KeyAt(i);
- if (k == *undefined) continue;
- if (k == *the_hole) continue;
- if (dictionary->IsDeleted(i)) continue;
+ if (!dictionary->IsKey(isolate, k)) continue;
Object* value = dictionary->ValueAt(i);
DCHECK(!value->IsTheHole(isolate));
DCHECK(!value->IsAccessorPair());
@@ -1725,15 +1710,18 @@ class DictionaryElementsAccessor
if (*dictionary == receiver->elements()) continue;
// Otherwise, bailout or update elements
+
+ // If switched to initial elements, return true if searching for
+ // undefined, and false otherwise.
+ if (receiver->map()->GetInitialElements() == receiver->elements()) {
+ return Just(search_for_hole);
+ }
+
+ // If switched to fast elements, continue with the correct accessor.
if (receiver->GetElementsKind() != DICTIONARY_ELEMENTS) {
- if (receiver->map()->GetInitialElements() == receiver->elements()) {
- // If switched to initial elements, return true if searching for
- // undefined, and false otherwise.
- return Just(search_for_hole);
- }
- // Otherwise, switch to slow path.
- return IncludesValueSlowPath(isolate, receiver, value, k + 1,
- length);
+ ElementsAccessor* accessor = receiver->GetElementsAccessor();
+ return accessor->IncludesValue(isolate, receiver, value, k + 1,
+ length);
}
dictionary = handle(
SeededNumberDictionary::cast(receiver->elements()), isolate);
@@ -1805,6 +1793,36 @@ class DictionaryElementsAccessor
}
return Just<int64_t>(-1);
}
+
+ static void ValidateContents(JSObject* holder, int length) {
+ DisallowHeapAllocation no_gc;
+#if DEBUG
+ DCHECK_EQ(holder->map()->elements_kind(), DICTIONARY_ELEMENTS);
+ if (!FLAG_enable_slow_asserts) return;
+ Isolate* isolate = holder->GetIsolate();
+ SeededNumberDictionary* dictionary =
+ SeededNumberDictionary::cast(holder->elements());
+ // Validate the requires_slow_elements and max_number_key values.
+ int capacity = dictionary->Capacity();
+ bool requires_slow_elements = false;
+ int max_key = 0;
+ for (int i = 0; i < capacity; ++i) {
+ Object* k;
+ if (!dictionary->ToKey(isolate, i, &k)) continue;
+ DCHECK_LE(0.0, k->Number());
+ if (k->Number() > SeededNumberDictionary::kRequiresSlowElementsLimit) {
+ requires_slow_elements = true;
+ } else {
+ max_key = Max(max_key, Smi::ToInt(k));
+ }
+ }
+ if (requires_slow_elements) {
+ DCHECK(dictionary->requires_slow_elements());
+ } else if (!dictionary->requires_slow_elements()) {
+ DCHECK_LE(max_key, dictionary->max_number_key());
+ }
+#endif
+ }
};
@@ -1824,7 +1842,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
// Ensure that notifications fire if the array or object prototypes are
// normalizing.
- if (IsFastSmiOrObjectElementsKind(kind)) {
+ if (IsSmiOrObjectElementsKind(kind)) {
isolate->UpdateArrayProtectorOnNormalizeElements(object);
}
@@ -1834,15 +1852,21 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
PropertyDetails details = PropertyDetails::Empty();
int j = 0;
+ int max_number_key = -1;
for (int i = 0; j < capacity; i++) {
- if (IsHoleyElementsKind(kind)) {
+ if (IsHoleyOrDictionaryElementsKind(kind)) {
if (BackingStore::cast(*store)->is_the_hole(isolate, i)) continue;
}
+ max_number_key = i;
Handle<Object> value = Subclass::GetImpl(isolate, *store, i);
- dictionary = SeededNumberDictionary::AddNumberEntry(dictionary, i, value,
- details, object);
+ dictionary = SeededNumberDictionary::Add(dictionary, i, value, details);
j++;
}
+
+ if (max_number_key > 0) {
+ dictionary->UpdateMaxNumberKey(static_cast<uint32_t>(max_number_key),
+ object);
+ }
return dictionary;
}
@@ -1870,7 +1894,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
static void DeleteCommon(Handle<JSObject> obj, uint32_t entry,
Handle<FixedArrayBase> store) {
- DCHECK(obj->HasFastSmiOrObjectElements() || obj->HasFastDoubleElements() ||
+ DCHECK(obj->HasSmiOrObjectElements() || obj->HasDoubleElements() ||
obj->HasFastArgumentsElements() ||
obj->HasFastStringWrapperElements());
Handle<BackingStore> backing_store = Handle<BackingStore>::cast(store);
@@ -1957,8 +1981,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
ElementsKind from_kind = object->GetElementsKind();
ElementsKind to_kind = Subclass::kind();
if (IsDictionaryElementsKind(from_kind) ||
- IsFastDoubleElementsKind(from_kind) !=
- IsFastDoubleElementsKind(to_kind) ||
+ IsDoubleElementsKind(from_kind) != IsDoubleElementsKind(to_kind) ||
Subclass::GetCapacityImpl(*object, object->elements()) !=
new_capacity) {
Subclass::GrowCapacityAndConvertImpl(object, new_capacity);
@@ -1966,8 +1989,8 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
if (IsFastElementsKind(from_kind) && from_kind != to_kind) {
JSObject::TransitionElementsKind(object, to_kind);
}
- if (IsFastSmiOrObjectElementsKind(from_kind)) {
- DCHECK(IsFastSmiOrObjectElementsKind(to_kind));
+ if (IsSmiOrObjectElementsKind(from_kind)) {
+ DCHECK(IsSmiOrObjectElementsKind(to_kind));
JSObject::EnsureWritableFastElements(object);
}
}
@@ -1979,7 +2002,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
if (IsFastPackedElementsKind(kind)) {
JSObject::TransitionElementsKind(obj, GetHoleyElementsKind(kind));
}
- if (IsFastSmiOrObjectElementsKind(KindTraits::Kind)) {
+ if (IsSmiOrObjectElementsKind(KindTraits::Kind)) {
JSObject::EnsureWritableFastElements(obj);
}
DeleteCommon(obj, entry, handle(obj->elements()));
@@ -2016,16 +2039,15 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
}
}
- static void ValidateContents(Handle<JSObject> holder, int length) {
+ static void ValidateContents(JSObject* holder, int length) {
#if DEBUG
Isolate* isolate = holder->GetIsolate();
Heap* heap = isolate->heap();
- HandleScope scope(isolate);
- Handle<FixedArrayBase> elements(holder->elements(), isolate);
+ FixedArrayBase* elements = holder->elements();
Map* map = elements->map();
- if (IsFastSmiOrObjectElementsKind(KindTraits::Kind)) {
+ if (IsSmiOrObjectElementsKind(KindTraits::Kind)) {
DCHECK_NE(map, heap->fixed_double_array_map());
- } else if (IsFastDoubleElementsKind(KindTraits::Kind)) {
+ } else if (IsDoubleElementsKind(KindTraits::Kind)) {
DCHECK_NE(map, heap->fixed_cow_array_map());
if (map == heap->fixed_array_map()) DCHECK_EQ(0, length);
} else {
@@ -2034,20 +2056,21 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
if (length == 0) return; // nothing to do!
#if ENABLE_SLOW_DCHECKS
DisallowHeapAllocation no_gc;
- Handle<BackingStore> backing_store = Handle<BackingStore>::cast(elements);
- if (IsFastSmiElementsKind(KindTraits::Kind)) {
+ BackingStore* backing_store = BackingStore::cast(elements);
+ if (IsSmiElementsKind(KindTraits::Kind)) {
+ HandleScope scope(isolate);
for (int i = 0; i < length; i++) {
- DCHECK(BackingStore::get(*backing_store, i, isolate)->IsSmi() ||
- (IsFastHoleyElementsKind(KindTraits::Kind) &&
+ DCHECK(BackingStore::get(backing_store, i, isolate)->IsSmi() ||
+ (IsHoleyElementsKind(KindTraits::Kind) &&
backing_store->is_the_hole(isolate, i)));
}
- } else if (KindTraits::Kind == FAST_ELEMENTS ||
- KindTraits::Kind == FAST_DOUBLE_ELEMENTS) {
+ } else if (KindTraits::Kind == PACKED_ELEMENTS ||
+ KindTraits::Kind == PACKED_DOUBLE_ELEMENTS) {
for (int i = 0; i < length; i++) {
DCHECK(!backing_store->is_the_hole(isolate, i));
}
} else {
- DCHECK(IsFastHoleyElementsKind(KindTraits::Kind));
+ DCHECK(IsHoleyElementsKind(KindTraits::Kind));
}
#endif
#endif
@@ -2095,12 +2118,12 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
Arguments* args, uint32_t add_count) {
Isolate* isolate = receiver->GetIsolate();
Heap* heap = isolate->heap();
- uint32_t length = Smi::cast(receiver->length())->value();
+ uint32_t length = Smi::ToInt(receiver->length());
uint32_t new_length = length - delete_count + add_count;
ElementsKind kind = KindTraits::Kind;
if (new_length <= static_cast<uint32_t>(receiver->elements()->length()) &&
- IsFastSmiOrObjectElementsKind(kind)) {
+ IsSmiOrObjectElementsKind(kind)) {
HandleScope scope(isolate);
JSObject::EnsureWritableFastElements(receiver);
}
@@ -2179,7 +2202,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
DCHECK_LE(hole_start, backing_store->length());
DCHECK_LE(hole_end, backing_store->length());
} else if (len != 0) {
- if (IsFastDoubleElementsKind(KindTraits::Kind)) {
+ if (IsDoubleElementsKind(KindTraits::Kind)) {
MemMove(dst_elms->data_start() + dst_index,
dst_elms->data_start() + src_index, len * kDoubleSize);
} else {
@@ -2216,60 +2239,59 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
if (!value->IsNumber()) {
if (value == undefined) {
- // Only FAST_ELEMENTS, FAST_HOLEY_ELEMENTS, FAST_HOLEY_SMI_ELEMENTS, and
- // FAST_HOLEY_DOUBLE_ELEMENTS can have `undefined` as a value.
- if (!IsFastObjectElementsKind(Subclass::kind()) &&
- !IsFastHoleyElementsKind(Subclass::kind())) {
+ // Only PACKED_ELEMENTS, HOLEY_ELEMENTS, HOLEY_SMI_ELEMENTS, and
+ // HOLEY_DOUBLE_ELEMENTS can have `undefined` as a value.
+ if (!IsObjectElementsKind(Subclass::kind()) &&
+ !IsHoleyElementsKind(Subclass::kind())) {
return Just(false);
}
- // Search for `undefined` or The Hole in FAST_ELEMENTS,
- // FAST_HOLEY_ELEMENTS or FAST_HOLEY_SMI_ELEMENTS
- if (IsFastSmiOrObjectElementsKind(Subclass::kind())) {
+ // Search for `undefined` or The Hole in PACKED_ELEMENTS,
+ // HOLEY_ELEMENTS or HOLEY_SMI_ELEMENTS
+ if (IsSmiOrObjectElementsKind(Subclass::kind())) {
auto elements = FixedArray::cast(receiver->elements());
for (uint32_t k = start_from; k < length; ++k) {
Object* element_k = elements->get(k);
- if (IsFastHoleyElementsKind(Subclass::kind()) &&
+ if (IsHoleyElementsKind(Subclass::kind()) &&
element_k == the_hole) {
return Just(true);
}
- if (IsFastObjectElementsKind(Subclass::kind()) &&
+ if (IsObjectElementsKind(Subclass::kind()) &&
element_k == undefined) {
return Just(true);
}
}
return Just(false);
} else {
- // Seach for The Hole in FAST_HOLEY_DOUBLE_ELEMENTS
- DCHECK_EQ(Subclass::kind(), FAST_HOLEY_DOUBLE_ELEMENTS);
+ // Seach for The Hole in HOLEY_DOUBLE_ELEMENTS
+ DCHECK_EQ(Subclass::kind(), HOLEY_DOUBLE_ELEMENTS);
auto elements = FixedDoubleArray::cast(receiver->elements());
for (uint32_t k = start_from; k < length; ++k) {
- if (IsFastHoleyElementsKind(Subclass::kind()) &&
+ if (IsHoleyElementsKind(Subclass::kind()) &&
elements->is_the_hole(k)) {
return Just(true);
}
}
return Just(false);
}
- } else if (!IsFastObjectElementsKind(Subclass::kind())) {
+ } else if (!IsObjectElementsKind(Subclass::kind())) {
// Search for non-number, non-Undefined value, with either
- // FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, FAST_HOLEY_SMI_ELEMENTS or
- // FAST_HOLEY_DOUBLE_ELEMENTS. Guaranteed to return false, since these
+ // PACKED_SMI_ELEMENTS, PACKED_DOUBLE_ELEMENTS, HOLEY_SMI_ELEMENTS or
+ // HOLEY_DOUBLE_ELEMENTS. Guaranteed to return false, since these
// elements kinds can only contain Number values or undefined.
return Just(false);
} else {
// Search for non-number, non-Undefined value with either
- // FAST_ELEMENTS or FAST_HOLEY_ELEMENTS.
- DCHECK(IsFastObjectElementsKind(Subclass::kind()));
+ // PACKED_ELEMENTS or HOLEY_ELEMENTS.
+ DCHECK(IsObjectElementsKind(Subclass::kind()));
auto elements = FixedArray::cast(receiver->elements());
for (uint32_t k = start_from; k < length; ++k) {
Object* element_k = elements->get(k);
- if (IsFastHoleyElementsKind(Subclass::kind()) &&
- element_k == the_hole) {
+ if (IsHoleyElementsKind(Subclass::kind()) && element_k == the_hole) {
continue;
}
@@ -2280,14 +2302,14 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
} else {
if (!value->IsNaN()) {
double search_value = value->Number();
- if (IsFastDoubleElementsKind(Subclass::kind())) {
- // Search for non-NaN Number in FAST_DOUBLE_ELEMENTS or
- // FAST_HOLEY_DOUBLE_ELEMENTS --- Skip TheHole, and trust UCOMISD or
+ if (IsDoubleElementsKind(Subclass::kind())) {
+ // Search for non-NaN Number in PACKED_DOUBLE_ELEMENTS or
+ // HOLEY_DOUBLE_ELEMENTS --- Skip TheHole, and trust UCOMISD or
// similar operation for result.
auto elements = FixedDoubleArray::cast(receiver->elements());
for (uint32_t k = start_from; k < length; ++k) {
- if (IsFastHoleyElementsKind(Subclass::kind()) &&
+ if (IsHoleyElementsKind(Subclass::kind()) &&
elements->is_the_hole(k)) {
continue;
}
@@ -2295,8 +2317,8 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
}
return Just(false);
} else {
- // Search for non-NaN Number in FAST_ELEMENTS, FAST_HOLEY_ELEMENTS,
- // FAST_SMI_ELEMENTS or FAST_HOLEY_SMI_ELEMENTS --- Skip non-Numbers,
+ // Search for non-NaN Number in PACKED_ELEMENTS, HOLEY_ELEMENTS,
+ // PACKED_SMI_ELEMENTS or HOLEY_SMI_ELEMENTS --- Skip non-Numbers,
// and trust UCOMISD or similar operation for result
auto elements = FixedArray::cast(receiver->elements());
@@ -2310,17 +2332,17 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
}
} else {
// Search for NaN --- NaN cannot be represented with Smi elements, so
- // abort if ElementsKind is FAST_SMI_ELEMENTS or FAST_HOLEY_SMI_ELEMENTS
- if (IsFastSmiElementsKind(Subclass::kind())) return Just(false);
+ // abort if ElementsKind is PACKED_SMI_ELEMENTS or HOLEY_SMI_ELEMENTS
+ if (IsSmiElementsKind(Subclass::kind())) return Just(false);
- if (IsFastDoubleElementsKind(Subclass::kind())) {
- // Search for NaN in FAST_DOUBLE_ELEMENTS or
- // FAST_HOLEY_DOUBLE_ELEMENTS --- Skip The Hole and trust
+ if (IsDoubleElementsKind(Subclass::kind())) {
+ // Search for NaN in PACKED_DOUBLE_ELEMENTS or
+ // HOLEY_DOUBLE_ELEMENTS --- Skip The Hole and trust
// std::isnan(elementK) for result
auto elements = FixedDoubleArray::cast(receiver->elements());
for (uint32_t k = start_from; k < length; ++k) {
- if (IsFastHoleyElementsKind(Subclass::kind()) &&
+ if (IsHoleyElementsKind(Subclass::kind()) &&
elements->is_the_hole(k)) {
continue;
}
@@ -2328,10 +2350,10 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
}
return Just(false);
} else {
- // Search for NaN in FAST_ELEMENTS, FAST_HOLEY_ELEMENTS,
- // FAST_SMI_ELEMENTS or FAST_HOLEY_SMI_ELEMENTS. Return true if
+ // Search for NaN in PACKED_ELEMENTS, HOLEY_ELEMENTS,
+ // PACKED_SMI_ELEMENTS or HOLEY_SMI_ELEMENTS. Return true if
// elementK->IsHeapNumber() && std::isnan(elementK->Number())
- DCHECK(IsFastSmiOrObjectElementsKind(Subclass::kind()));
+ DCHECK(IsSmiOrObjectElementsKind(Subclass::kind()));
auto elements = FixedArray::cast(receiver->elements());
for (uint32_t k = start_from; k < length; ++k) {
@@ -2409,13 +2431,12 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
Where remove_position) {
Isolate* isolate = receiver->GetIsolate();
ElementsKind kind = KindTraits::Kind;
- if (IsFastSmiOrObjectElementsKind(kind)) {
+ if (IsSmiOrObjectElementsKind(kind)) {
HandleScope scope(isolate);
JSObject::EnsureWritableFastElements(receiver);
}
Handle<FixedArrayBase> backing_store(receiver->elements(), isolate);
- uint32_t length =
- static_cast<uint32_t>(Smi::cast(receiver->length())->value());
+ uint32_t length = static_cast<uint32_t>(Smi::ToInt(receiver->length()));
DCHECK(length > 0);
int new_length = length - 1;
int remove_index = remove_position == AT_START ? 0 : new_length;
@@ -2427,7 +2448,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
}
Subclass::SetLengthImpl(isolate, receiver, new_length, backing_store);
- if (IsHoleyElementsKind(kind) && result->IsTheHole(isolate)) {
+ if (IsHoleyOrDictionaryElementsKind(kind) && result->IsTheHole(isolate)) {
return isolate->factory()->undefined_value();
}
return result;
@@ -2437,7 +2458,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
Handle<FixedArrayBase> backing_store,
Arguments* args, uint32_t add_size,
Where add_position) {
- uint32_t length = Smi::cast(receiver->length())->value();
+ uint32_t length = Smi::ToInt(receiver->length());
DCHECK(0 < add_size);
uint32_t elms_len = backing_store->length();
// Check we do not overflow the new_length.
@@ -2524,17 +2545,17 @@ class FastSmiOrObjectElementsAccessor
DisallowHeapAllocation no_gc;
ElementsKind to_kind = KindTraits::Kind;
switch (from_kind) {
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
+ case PACKED_SMI_ELEMENTS:
+ case HOLEY_SMI_ELEMENTS:
+ case PACKED_ELEMENTS:
+ case HOLEY_ELEMENTS:
CopyObjectToObjectElements(from, from_kind, from_start, to, to_kind,
to_start, copy_size);
break;
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS: {
+ case PACKED_DOUBLE_ELEMENTS:
+ case HOLEY_DOUBLE_ELEMENTS: {
AllowHeapAllocation allow_allocation;
- DCHECK(IsFastObjectElementsKind(to_kind));
+ DCHECK(IsObjectElementsKind(to_kind));
CopyDoubleToObjectElements(from, from_start, to, to_start, copy_size);
break;
}
@@ -2572,7 +2593,7 @@ class FastSmiOrObjectElementsAccessor
length = std::min(static_cast<uint32_t>(elements_base->length()), length);
// Only FAST_{,HOLEY_}ELEMENTS can store non-numbers.
- if (!value->IsNumber() && !IsFastObjectElementsKind(Subclass::kind())) {
+ if (!value->IsNumber() && !IsObjectElementsKind(Subclass::kind())) {
return Just<int64_t>(-1);
}
// NaN can never be found by strict equality.
@@ -2586,52 +2607,47 @@ class FastSmiOrObjectElementsAccessor
}
};
-
class FastPackedSmiElementsAccessor
: public FastSmiOrObjectElementsAccessor<
- FastPackedSmiElementsAccessor,
- ElementsKindTraits<FAST_SMI_ELEMENTS> > {
+ FastPackedSmiElementsAccessor,
+ ElementsKindTraits<PACKED_SMI_ELEMENTS>> {
public:
explicit FastPackedSmiElementsAccessor(const char* name)
: FastSmiOrObjectElementsAccessor<
- FastPackedSmiElementsAccessor,
- ElementsKindTraits<FAST_SMI_ELEMENTS> >(name) {}
+ FastPackedSmiElementsAccessor,
+ ElementsKindTraits<PACKED_SMI_ELEMENTS>>(name) {}
};
-
class FastHoleySmiElementsAccessor
: public FastSmiOrObjectElementsAccessor<
- FastHoleySmiElementsAccessor,
- ElementsKindTraits<FAST_HOLEY_SMI_ELEMENTS> > {
+ FastHoleySmiElementsAccessor,
+ ElementsKindTraits<HOLEY_SMI_ELEMENTS>> {
public:
explicit FastHoleySmiElementsAccessor(const char* name)
- : FastSmiOrObjectElementsAccessor<
- FastHoleySmiElementsAccessor,
- ElementsKindTraits<FAST_HOLEY_SMI_ELEMENTS> >(name) {}
+ : FastSmiOrObjectElementsAccessor<FastHoleySmiElementsAccessor,
+ ElementsKindTraits<HOLEY_SMI_ELEMENTS>>(
+ name) {}
};
-
class FastPackedObjectElementsAccessor
: public FastSmiOrObjectElementsAccessor<
- FastPackedObjectElementsAccessor,
- ElementsKindTraits<FAST_ELEMENTS> > {
+ FastPackedObjectElementsAccessor,
+ ElementsKindTraits<PACKED_ELEMENTS>> {
public:
explicit FastPackedObjectElementsAccessor(const char* name)
- : FastSmiOrObjectElementsAccessor<
- FastPackedObjectElementsAccessor,
- ElementsKindTraits<FAST_ELEMENTS> >(name) {}
+ : FastSmiOrObjectElementsAccessor<FastPackedObjectElementsAccessor,
+ ElementsKindTraits<PACKED_ELEMENTS>>(
+ name) {}
};
-
class FastHoleyObjectElementsAccessor
: public FastSmiOrObjectElementsAccessor<
- FastHoleyObjectElementsAccessor,
- ElementsKindTraits<FAST_HOLEY_ELEMENTS> > {
+ FastHoleyObjectElementsAccessor, ElementsKindTraits<HOLEY_ELEMENTS>> {
public:
explicit FastHoleyObjectElementsAccessor(const char* name)
- : FastSmiOrObjectElementsAccessor<
- FastHoleyObjectElementsAccessor,
- ElementsKindTraits<FAST_HOLEY_ELEMENTS> >(name) {}
+ : FastSmiOrObjectElementsAccessor<FastHoleyObjectElementsAccessor,
+ ElementsKindTraits<HOLEY_ELEMENTS>>(
+ name) {}
};
template <typename Subclass, typename KindTraits>
@@ -2668,19 +2684,19 @@ class FastDoubleElementsAccessor
int copy_size) {
DisallowHeapAllocation no_allocation;
switch (from_kind) {
- case FAST_SMI_ELEMENTS:
+ case PACKED_SMI_ELEMENTS:
CopyPackedSmiToDoubleElements(from, from_start, to, to_start,
packed_size, copy_size);
break;
- case FAST_HOLEY_SMI_ELEMENTS:
+ case HOLEY_SMI_ELEMENTS:
CopySmiToDoubleElements(from, from_start, to, to_start, copy_size);
break;
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case PACKED_DOUBLE_ELEMENTS:
+ case HOLEY_DOUBLE_ELEMENTS:
CopyDoubleToDoubleElements(from, from_start, to, to_start, copy_size);
break;
- case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
+ case PACKED_ELEMENTS:
+ case HOLEY_ELEMENTS:
CopyObjectToDoubleElements(from, from_start, to, to_start, copy_size);
break;
case DICTIONARY_ELEMENTS:
@@ -2736,28 +2752,26 @@ class FastDoubleElementsAccessor
}
};
-
class FastPackedDoubleElementsAccessor
: public FastDoubleElementsAccessor<
- FastPackedDoubleElementsAccessor,
- ElementsKindTraits<FAST_DOUBLE_ELEMENTS> > {
+ FastPackedDoubleElementsAccessor,
+ ElementsKindTraits<PACKED_DOUBLE_ELEMENTS>> {
public:
explicit FastPackedDoubleElementsAccessor(const char* name)
- : FastDoubleElementsAccessor<
- FastPackedDoubleElementsAccessor,
- ElementsKindTraits<FAST_DOUBLE_ELEMENTS> >(name) {}
+ : FastDoubleElementsAccessor<FastPackedDoubleElementsAccessor,
+ ElementsKindTraits<PACKED_DOUBLE_ELEMENTS>>(
+ name) {}
};
-
class FastHoleyDoubleElementsAccessor
: public FastDoubleElementsAccessor<
- FastHoleyDoubleElementsAccessor,
- ElementsKindTraits<FAST_HOLEY_DOUBLE_ELEMENTS> > {
+ FastHoleyDoubleElementsAccessor,
+ ElementsKindTraits<HOLEY_DOUBLE_ELEMENTS>> {
public:
explicit FastHoleyDoubleElementsAccessor(const char* name)
- : FastDoubleElementsAccessor<
- FastHoleyDoubleElementsAccessor,
- ElementsKindTraits<FAST_HOLEY_DOUBLE_ELEMENTS> >(name) {}
+ : FastDoubleElementsAccessor<FastHoleyDoubleElementsAccessor,
+ ElementsKindTraits<HOLEY_DOUBLE_ELEMENTS>>(
+ name) {}
};
@@ -2795,12 +2809,12 @@ class TypedElementsAccessor
}
static PropertyDetails GetDetailsImpl(JSObject* holder, uint32_t entry) {
- return PropertyDetails(kData, DONT_DELETE, 0, PropertyCellType::kNoCell);
+ return PropertyDetails(kData, DONT_DELETE, PropertyCellType::kNoCell);
}
static PropertyDetails GetDetailsImpl(FixedArrayBase* backing_store,
uint32_t entry) {
- return PropertyDetails(kData, DONT_DELETE, 0, PropertyCellType::kNoCell);
+ return PropertyDetails(kData, DONT_DELETE, PropertyCellType::kNoCell);
}
static bool HasElementImpl(Isolate* isolate, JSObject* holder, uint32_t index,
@@ -2896,7 +2910,7 @@ class TypedElementsAccessor
ctype value;
if (obj_value->IsSmi()) {
- value = BackingStore::from(Smi::cast(*obj_value)->value());
+ value = BackingStore::from(Smi::ToInt(*obj_value));
} else {
DCHECK(obj_value->IsHeapNumber());
value = BackingStore::from(HeapNumber::cast(*obj_value)->value());
@@ -3232,17 +3246,17 @@ class TypedElementsAccessor
Object* undefined = isolate->heap()->undefined_value();
// Fastpath for packed Smi kind.
- if (kind == FAST_SMI_ELEMENTS) {
+ if (kind == PACKED_SMI_ELEMENTS) {
FixedArray* source_store = FixedArray::cast(source->elements());
for (uint32_t i = 0; i < length; i++) {
Object* elem = source_store->get(i);
DCHECK(elem->IsSmi());
- int int_value = Smi::cast(elem)->value();
+ int int_value = Smi::ToInt(elem);
dest->set(i, dest->from(int_value));
}
return true;
- } else if (kind == FAST_HOLEY_SMI_ELEMENTS) {
+ } else if (kind == HOLEY_SMI_ELEMENTS) {
FixedArray* source_store = FixedArray::cast(source->elements());
for (uint32_t i = 0; i < length; i++) {
if (source_store->is_the_hole(isolate, i)) {
@@ -3250,12 +3264,12 @@ class TypedElementsAccessor
} else {
Object* elem = source_store->get(i);
DCHECK(elem->IsSmi());
- int int_value = Smi::cast(elem)->value();
+ int int_value = Smi::ToInt(elem);
dest->set(i, dest->from(int_value));
}
}
return true;
- } else if (kind == FAST_DOUBLE_ELEMENTS) {
+ } else if (kind == PACKED_DOUBLE_ELEMENTS) {
// Fastpath for packed double kind. We avoid boxing and then immediately
// unboxing the double here by using get_scalar.
FixedDoubleArray* source_store =
@@ -3268,7 +3282,7 @@ class TypedElementsAccessor
dest->set(i, dest->from(elem));
}
return true;
- } else if (kind == FAST_HOLEY_DOUBLE_ELEMENTS) {
+ } else if (kind == HOLEY_DOUBLE_ELEMENTS) {
FixedDoubleArray* source_store =
FixedDoubleArray::cast(source->elements());
for (uint32_t i = 0; i < length; i++) {
@@ -3368,7 +3382,7 @@ class SloppyArgumentsElementsAccessor
Object* probe = elements->get_mapped_entry(entry);
DCHECK(!probe->IsTheHole(isolate));
Context* context = elements->context();
- int context_entry = Smi::cast(probe)->value();
+ int context_entry = Smi::ToInt(probe);
DCHECK(!context->get(context_entry)->IsTheHole(isolate));
return handle(context->get(context_entry), isolate);
} else {
@@ -3404,7 +3418,7 @@ class SloppyArgumentsElementsAccessor
Object* probe = elements->get_mapped_entry(entry);
DCHECK(!probe->IsTheHole(store->GetIsolate()));
Context* context = elements->context();
- int context_entry = Smi::cast(probe)->value();
+ int context_entry = Smi::ToInt(probe);
DCHECK(!context->get(context_entry)->IsTheHole(store->GetIsolate()));
context->set(context_entry, value);
} else {
@@ -3524,7 +3538,7 @@ class SloppyArgumentsElementsAccessor
SloppyArgumentsElements::cast(holder->elements());
uint32_t length = elements->parameter_map_length();
if (entry < length) {
- return PropertyDetails(kData, NONE, 0, PropertyCellType::kNoCell);
+ return PropertyDetails(kData, NONE, PropertyCellType::kNoCell);
}
FixedArray* arguments = elements->arguments();
return ArgumentsAccessor::GetDetailsImpl(arguments, entry - length);
@@ -3719,18 +3733,10 @@ class SlowSloppyArgumentsElementsAccessor
Isolate* isolate = obj->GetIsolate();
Handle<SeededNumberDictionary> dict(
SeededNumberDictionary::cast(elements->arguments()), isolate);
- // TODO(verwaest): Remove reliance on index in Shrink.
- uint32_t index = GetIndexForEntryImpl(*dict, entry);
int length = elements->parameter_map_length();
- Handle<Object> result =
- SeededNumberDictionary::DeleteProperty(dict, entry - length);
- USE(result);
- DCHECK(result->IsTrue(isolate));
- Handle<FixedArray> new_elements =
- SeededNumberDictionary::Shrink(dict, index);
- elements->set_arguments(*new_elements);
+ dict = SeededNumberDictionary::DeleteEntry(dict, entry - length);
+ elements->set_arguments(*dict);
}
-
static void AddImpl(Handle<JSObject> object, uint32_t index,
Handle<Object> value, PropertyAttributes attributes,
uint32_t new_capacity) {
@@ -3743,10 +3749,9 @@ class SlowSloppyArgumentsElementsAccessor
old_arguments->IsSeededNumberDictionary()
? Handle<SeededNumberDictionary>::cast(old_arguments)
: JSObject::NormalizeElements(object);
- PropertyDetails details(kData, attributes, 0, PropertyCellType::kNoCell);
+ PropertyDetails details(kData, attributes, PropertyCellType::kNoCell);
Handle<SeededNumberDictionary> new_dictionary =
- SeededNumberDictionary::AddNumberEntry(dictionary, index, value,
- details, object);
+ SeededNumberDictionary::Add(dictionary, index, value, details);
if (attributes != NONE) object->RequireSlowElements(*new_dictionary);
if (*dictionary != *new_dictionary) {
elements->set_arguments(*new_dictionary);
@@ -3765,7 +3770,7 @@ class SlowSloppyArgumentsElementsAccessor
Object* probe = elements->get_mapped_entry(entry);
DCHECK(!probe->IsTheHole(isolate));
Context* context = elements->context();
- int context_entry = Smi::cast(probe)->value();
+ int context_entry = Smi::ToInt(probe);
DCHECK(!context->get(context_entry)->IsTheHole(isolate));
context->set(context_entry, *value);
@@ -3776,11 +3781,10 @@ class SlowSloppyArgumentsElementsAccessor
value = isolate->factory()->NewAliasedArgumentsEntry(context_entry);
}
- PropertyDetails details(kData, attributes, 0, PropertyCellType::kNoCell);
+ PropertyDetails details(kData, attributes, PropertyCellType::kNoCell);
Handle<SeededNumberDictionary> arguments(
SeededNumberDictionary::cast(elements->arguments()), isolate);
- arguments = SeededNumberDictionary::AddNumberEntry(
- arguments, entry, value, details, object);
+ arguments = SeededNumberDictionary::Add(arguments, entry, value, details);
// If the attributes were NONE, we would have called set rather than
// reconfigure.
DCHECK_NE(NONE, attributes);
@@ -3823,8 +3827,8 @@ class FastSloppyArgumentsElementsAccessor
uint32_t end) {
Isolate* isolate = receiver->GetIsolate();
uint32_t result_len = end < start ? 0u : end - start;
- Handle<JSArray> result_array = isolate->factory()->NewJSArray(
- FAST_HOLEY_ELEMENTS, result_len, result_len);
+ Handle<JSArray> result_array =
+ isolate->factory()->NewJSArray(HOLEY_ELEMENTS, result_len, result_len);
DisallowHeapAllocation no_gc;
FixedArray* elements = FixedArray::cast(result_array->elements());
FixedArray* parameters = FixedArray::cast(receiver->elements());
@@ -3912,12 +3916,12 @@ class FastSloppyArgumentsElementsAccessor
int copy_size) {
DCHECK(!to->IsDictionary());
if (from_kind == SLOW_SLOPPY_ARGUMENTS_ELEMENTS) {
- CopyDictionaryToObjectElements(from, from_start, to, FAST_HOLEY_ELEMENTS,
+ CopyDictionaryToObjectElements(from, from_start, to, HOLEY_ELEMENTS,
to_start, copy_size);
} else {
DCHECK_EQ(FAST_SLOPPY_ARGUMENTS_ELEMENTS, from_kind);
- CopyObjectToObjectElements(from, FAST_HOLEY_ELEMENTS, from_start, to,
- FAST_HOLEY_ELEMENTS, to_start, copy_size);
+ CopyObjectToObjectElements(from, HOLEY_ELEMENTS, from_start, to,
+ HOLEY_ELEMENTS, to_start, copy_size);
}
}
@@ -3939,7 +3943,7 @@ class FastSloppyArgumentsElementsAccessor
object, FAST_SLOPPY_ARGUMENTS_ELEMENTS);
JSObject::MigrateToMap(object, new_map);
elements->set_arguments(FixedArray::cast(*arguments));
- JSObject::ValidateElements(object);
+ JSObject::ValidateElements(*object);
}
};
@@ -3972,7 +3976,6 @@ class StringWrapperElementsAccessor
static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase* elements,
uint32_t entry) {
UNREACHABLE();
- return Handle<Object>();
}
static PropertyDetails GetDetailsImpl(JSObject* holder, uint32_t entry) {
@@ -3980,7 +3983,7 @@ class StringWrapperElementsAccessor
if (entry < length) {
PropertyAttributes attributes =
static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
- return PropertyDetails(kData, attributes, 0, PropertyCellType::kNoCell);
+ return PropertyDetails(kData, attributes, PropertyCellType::kNoCell);
}
return BackingStoreAccessor::GetDetailsImpl(holder, entry - length);
}
@@ -4089,12 +4092,12 @@ class StringWrapperElementsAccessor
int copy_size) {
DCHECK(!to->IsDictionary());
if (from_kind == SLOW_STRING_WRAPPER_ELEMENTS) {
- CopyDictionaryToObjectElements(from, from_start, to, FAST_HOLEY_ELEMENTS,
+ CopyDictionaryToObjectElements(from, from_start, to, HOLEY_ELEMENTS,
to_start, copy_size);
} else {
DCHECK_EQ(FAST_STRING_WRAPPER_ELEMENTS, from_kind);
- CopyObjectToObjectElements(from, FAST_HOLEY_ELEMENTS, from_start, to,
- FAST_HOLEY_ELEMENTS, to_start, copy_size);
+ CopyObjectToObjectElements(from, HOLEY_ELEMENTS, from_start, to,
+ HOLEY_ELEMENTS, to_start, copy_size);
}
}
@@ -4207,7 +4210,7 @@ MaybeHandle<Object> ArrayConstructInitializeElements(Handle<JSArray> array,
ElementsKind elements_kind = array->GetElementsKind();
JSArray::Initialize(array, length, length);
- if (!IsFastHoleyElementsKind(elements_kind)) {
+ if (!IsHoleyElementsKind(elements_kind)) {
elements_kind = GetHoleyElementsKind(elements_kind);
JSObject::TransitionElementsKind(array, elements_kind);
}
@@ -4231,7 +4234,7 @@ MaybeHandle<Object> ArrayConstructInitializeElements(Handle<JSArray> array,
// Allocate an appropriately typed elements array.
ElementsKind elements_kind = array->GetElementsKind();
Handle<FixedArrayBase> elms;
- if (IsFastDoubleElementsKind(elements_kind)) {
+ if (IsDoubleElementsKind(elements_kind)) {
elms = Handle<FixedArrayBase>::cast(
factory->NewFixedDoubleArray(number_of_elements));
} else {
@@ -4241,16 +4244,16 @@ MaybeHandle<Object> ArrayConstructInitializeElements(Handle<JSArray> array,
// Fill in the content
switch (elements_kind) {
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_SMI_ELEMENTS: {
+ case HOLEY_SMI_ELEMENTS:
+ case PACKED_SMI_ELEMENTS: {
Handle<FixedArray> smi_elms = Handle<FixedArray>::cast(elms);
for (int entry = 0; entry < number_of_elements; entry++) {
smi_elms->set(entry, (*args)[entry], SKIP_WRITE_BARRIER);
}
break;
}
- case FAST_HOLEY_ELEMENTS:
- case FAST_ELEMENTS: {
+ case HOLEY_ELEMENTS:
+ case PACKED_ELEMENTS: {
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
Handle<FixedArray> object_elms = Handle<FixedArray>::cast(elms);
@@ -4259,8 +4262,8 @@ MaybeHandle<Object> ArrayConstructInitializeElements(Handle<JSArray> array,
}
break;
}
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS: {
+ case HOLEY_DOUBLE_ELEMENTS:
+ case PACKED_DOUBLE_ELEMENTS: {
Handle<FixedDoubleArray> double_elms =
Handle<FixedDoubleArray>::cast(elms);
for (int entry = 0; entry < number_of_elements; entry++) {
@@ -4312,8 +4315,8 @@ Handle<JSArray> ElementsAccessor::Concat(Isolate* isolate, Arguments* args,
for (uint32_t i = 0; i < concat_size; i++) {
Object* arg = (*args)[i];
ElementsKind arg_kind = JSArray::cast(arg)->GetElementsKind();
- has_raw_doubles = has_raw_doubles || IsFastDoubleElementsKind(arg_kind);
- is_holey = is_holey || IsFastHoleyElementsKind(arg_kind);
+ has_raw_doubles = has_raw_doubles || IsDoubleElementsKind(arg_kind);
+ is_holey = is_holey || IsHoleyElementsKind(arg_kind);
result_elements_kind =
GetMoreGeneralElementsKind(result_elements_kind, arg_kind);
}
@@ -4326,7 +4329,7 @@ Handle<JSArray> ElementsAccessor::Concat(Isolate* isolate, Arguments* args,
// elements array needs to be initialized to contain proper holes, since
// boxing doubles may cause incremental marking.
bool requires_double_boxing =
- has_raw_doubles && !IsFastDoubleElementsKind(result_elements_kind);
+ has_raw_doubles && !IsDoubleElementsKind(result_elements_kind);
ArrayStorageAllocationMode mode = requires_double_boxing
? INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
: DONT_INITIALIZE_ARRAY_ELEMENTS;
diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h
index 5184b29765..9e64764bb0 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/elements.h
@@ -6,7 +6,6 @@
#define V8_ELEMENTS_H_
#include "src/elements-kind.h"
-#include "src/isolate.h"
#include "src/keys.h"
#include "src/objects.h"
@@ -30,7 +29,7 @@ class ElementsAccessor {
// Checks the elements of an object for consistency, asserting when a problem
// is found.
- virtual void Validate(Handle<JSObject> obj) = 0;
+ virtual void Validate(JSObject* obj) = 0;
// Returns true if a holder contains an element with the specified index
// without iterating up the prototype chain. The caller can optionally pass
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index c6ea3847b8..eeebfadde2 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -96,9 +96,10 @@ class V8_EXPORT_PRIVATE StackGuard final {
V(API_INTERRUPT, ApiInterrupt, 4) \
V(DEOPT_MARKED_ALLOCATION_SITES, DeoptMarkedAllocationSites, 5)
-#define V(NAME, Name, id) \
- inline bool Check##Name() { return CheckInterrupt(NAME); } \
- inline void Request##Name() { RequestInterrupt(NAME); } \
+#define V(NAME, Name, id) \
+ inline bool Check##Name() { return CheckInterrupt(NAME); } \
+ inline bool CheckAndClear##Name() { return CheckAndClearInterrupt(NAME); } \
+ inline void Request##Name() { RequestInterrupt(NAME); } \
inline void Clear##Name() { ClearInterrupt(NAME); }
INTERRUPT_LIST(V)
#undef V
@@ -199,18 +200,18 @@ class V8_EXPORT_PRIVATE StackGuard final {
base::AtomicWord climit_;
uintptr_t jslimit() {
- return bit_cast<uintptr_t>(base::NoBarrier_Load(&jslimit_));
+ return bit_cast<uintptr_t>(base::Relaxed_Load(&jslimit_));
}
void set_jslimit(uintptr_t limit) {
- return base::NoBarrier_Store(&jslimit_,
- static_cast<base::AtomicWord>(limit));
+ return base::Relaxed_Store(&jslimit_,
+ static_cast<base::AtomicWord>(limit));
}
uintptr_t climit() {
- return bit_cast<uintptr_t>(base::NoBarrier_Load(&climit_));
+ return bit_cast<uintptr_t>(base::Relaxed_Load(&climit_));
}
void set_climit(uintptr_t limit) {
- return base::NoBarrier_Store(&climit_,
- static_cast<base::AtomicWord>(limit));
+ return base::Relaxed_Store(&climit_,
+ static_cast<base::AtomicWord>(limit));
}
PostponeInterruptsScope* postpone_interrupts_;
diff --git a/deps/v8/src/extensions/ignition-statistics-extension.cc b/deps/v8/src/extensions/ignition-statistics-extension.cc
index bab738f0f3..29ade53221 100644
--- a/deps/v8/src/extensions/ignition-statistics-extension.cc
+++ b/deps/v8/src/extensions/ignition-statistics-extension.cc
@@ -26,7 +26,6 @@ const char* const IgnitionStatisticsExtension::kSource =
void IgnitionStatisticsExtension::GetIgnitionDispatchCounters(
const v8::FunctionCallbackInfo<v8::Value>& args) {
- DCHECK_EQ(args.Length(), 0);
DCHECK(FLAG_trace_ignition_dispatches);
args.GetReturnValue().Set(reinterpret_cast<Isolate*>(args.GetIsolate())
->interpreter()
diff --git a/deps/v8/src/external-reference-table.cc b/deps/v8/src/external-reference-table.cc
index 95c3a976e6..68150783e6 100644
--- a/deps/v8/src/external-reference-table.cc
+++ b/deps/v8/src/external-reference-table.cc
@@ -271,6 +271,14 @@ void ExternalReferenceTable::AddReferences(Isolate* isolate) {
Add(ExternalReference::search_string_raw<const uc16, const uc16>(isolate)
.address(),
"search_string_raw<1-byte, 2-byte>");
+ Add(ExternalReference::orderedhashmap_gethash_raw(isolate).address(),
+ "orderedhashmap_gethash_raw");
+ Add(ExternalReference::orderedhashtable_has_raw<OrderedHashMap, 2>(isolate)
+ .address(),
+ "orderedhashtable_has_raw<OrderedHashMap, 2>");
+ Add(ExternalReference::orderedhashtable_has_raw<OrderedHashSet, 1>(isolate)
+ .address(),
+ "orderedhashtable_has_raw<OrderedHashSet, 1>");
Add(ExternalReference::log_enter_external_function(isolate).address(),
"Logger::EnterExternal");
Add(ExternalReference::log_leave_external_function(isolate).address(),
@@ -281,9 +289,6 @@ void ExternalReferenceTable::AddReferences(Isolate* isolate) {
"Isolate::stress_deopt_count_address()");
Add(ExternalReference::runtime_function_table_address(isolate).address(),
"Runtime::runtime_function_table_address()");
- Add(ExternalReference::is_tail_call_elimination_enabled_address(isolate)
- .address(),
- "Isolate::is_tail_call_elimination_enabled_address()");
Add(ExternalReference::address_of_float_abs_constant().address(),
"float_absolute_constant");
Add(ExternalReference::address_of_float_neg_constant().address(),
@@ -365,9 +370,14 @@ void ExternalReferenceTable::AddBuiltins(Isolate* isolate) {
const char* name;
};
static const BuiltinEntry builtins[] = {
+#define BUILTIN_LIST_EXTERNAL_REFS(DEF) \
+ BUILTIN_LIST_C(DEF) \
+ BUILTIN_LIST_A(DEF) \
+ DEF(CallProxy)
#define DEF_ENTRY(Name, ...) {Builtins::k##Name, "Builtin_" #Name},
- BUILTIN_LIST_C(DEF_ENTRY) BUILTIN_LIST_A(DEF_ENTRY)
+ BUILTIN_LIST_EXTERNAL_REFS(DEF_ENTRY)
#undef DEF_ENTRY
+#undef BUILTIN_LIST_EXTERNAL_REFS
};
for (unsigned i = 0; i < arraysize(builtins); ++i) {
Add(isolate->builtins()->builtin_address(builtins[i].id), builtins[i].name);
@@ -400,8 +410,8 @@ void ExternalReferenceTable::AddIsolateAddresses(Isolate* isolate) {
#undef BUILD_NAME_LITERAL
};
- for (int i = 0; i < Isolate::kIsolateAddressCount; ++i) {
- Add(isolate->get_address_from_id(static_cast<Isolate::AddressId>(i)),
+ for (int i = 0; i < IsolateAddressId::kIsolateAddressCount; ++i) {
+ Add(isolate->get_address_from_id(static_cast<IsolateAddressId>(i)),
address_names[i]);
}
}
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index 7f2eae35dd..45a08c9232 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -6,6 +6,7 @@
#include "src/accessors.h"
#include "src/allocation-site-scopes.h"
+#include "src/ast/ast-source-ranges.h"
#include "src/ast/ast.h"
#include "src/base/bits.h"
#include "src/bootstrapper.h"
@@ -13,6 +14,7 @@
#include "src/conversions.h"
#include "src/isolate-inl.h"
#include "src/macro-assembler.h"
+#include "src/objects/debug-objects-inl.h"
#include "src/objects/frame-array-inl.h"
#include "src/objects/module-info.h"
#include "src/objects/scope-info.h"
@@ -152,6 +154,14 @@ Handle<FixedArray> Factory::NewFixedArray(int size, PretenureFlag pretenure) {
FixedArray);
}
+Handle<PropertyArray> Factory::NewPropertyArray(int size,
+ PretenureFlag pretenure) {
+ DCHECK_LE(0, size);
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocatePropertyArray(size, pretenure),
+ PropertyArray);
+}
+
MaybeHandle<FixedArray> Factory::TryNewFixedArray(int size,
PretenureFlag pretenure) {
DCHECK(0 <= size);
@@ -174,10 +184,12 @@ Handle<FixedArray> Factory::NewFixedArrayWithHoles(int size,
}
Handle<FixedArray> Factory::NewUninitializedFixedArray(int size) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateUninitializedFixedArray(size),
- FixedArray);
+ // TODO(ulan): As an experiment this temporarily returns an initialized fixed
+ // array. After getting canary/performance coverage, either remove the
+ // function or revert to returning uninitilized array.
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateFixedArray(size, NOT_TENURED),
+ FixedArray);
}
Handle<BoilerplateDescription> Factory::NewBoilerplateDescription(
@@ -239,6 +251,24 @@ Handle<FrameArray> Factory::NewFrameArray(int number_of_frames,
return Handle<FrameArray>::cast(result);
}
+Handle<SmallOrderedHashSet> Factory::NewSmallOrderedHashSet(
+ int size, PretenureFlag pretenure) {
+ DCHECK_LE(0, size);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateSmallOrderedHashSet(size, pretenure),
+ SmallOrderedHashSet);
+}
+
+Handle<SmallOrderedHashMap> Factory::NewSmallOrderedHashMap(
+ int size, PretenureFlag pretenure) {
+ DCHECK_LE(0, size);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateSmallOrderedHashMap(size, pretenure),
+ SmallOrderedHashMap);
+}
+
Handle<OrderedHashSet> Factory::NewOrderedHashSet() {
return OrderedHashSet::Allocate(isolate(), OrderedHashSet::kMinCapacity);
}
@@ -297,7 +327,6 @@ Handle<String> Factory::InternalizeStringWithKey(StringTableKey* key) {
return StringTable::LookupKey(isolate(), key);
}
-
MaybeHandle<String> Factory::NewStringFromOneByte(Vector<const uint8_t> string,
PretenureFlag pretenure) {
int length = string.length();
@@ -605,8 +634,8 @@ static inline Handle<String> MakeOrFindTwoCharacterString(Isolate* isolate,
// when building the new string.
if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) {
// We can do this.
- DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU +
- 1)); // because of this.
+ DCHECK(base::bits::IsPowerOfTwo(String::kMaxOneByteCharCodeU +
+ 1)); // because of this.
Handle<SeqOneByteString> str =
isolate->factory()->NewRawOneByteString(2).ToHandleChecked();
uint8_t* dest = str->GetChars();
@@ -818,7 +847,6 @@ Handle<String> Factory::NewProperSubString(Handle<String> str,
return slice;
}
-
MaybeHandle<String> Factory::NewExternalStringFromOneByte(
const ExternalOneByteString::Resource* resource) {
size_t length = resource->length();
@@ -916,25 +944,6 @@ Handle<Symbol> Factory::NewPrivateSymbol() {
return symbol;
}
-Handle<JSPromise> Factory::NewJSPromise() {
- Handle<JSFunction> constructor(
- isolate()->native_context()->promise_function(), isolate());
- DCHECK(constructor->has_initial_map());
- Handle<Map> map(constructor->initial_map(), isolate());
-
- DCHECK(!map->is_prototype_map());
- Handle<JSObject> promise_obj = NewJSObjectFromMap(map);
- Handle<JSPromise> promise = Handle<JSPromise>::cast(promise_obj);
- promise->set_status(v8::Promise::kPending);
- promise->set_flags(0);
- for (int i = 0; i < v8::Promise::kEmbedderFieldCount; i++) {
- promise->SetEmbedderField(i, Smi::kZero);
- }
-
- isolate()->RunPromiseHook(PromiseHookType::kInit, promise, undefined_value());
- return promise;
-}
-
Handle<Context> Factory::NewNativeContext() {
Handle<FixedArray> array =
NewFixedArray(Context::NATIVE_CONTEXT_SLOTS, TENURED);
@@ -1130,8 +1139,6 @@ Handle<Script> Factory::NewScript(Handle<String> source) {
script->set_eval_from_position(0);
script->set_shared_function_infos(*empty_fixed_array(), SKIP_WRITE_BARRIER);
script->set_flags(0);
- script->set_preparsed_scope_data(
- PodArray<uint32_t>::cast(heap->empty_byte_array()));
heap->set_script_list(*WeakFixedArray::Add(script_list(), script));
return script;
@@ -1216,11 +1223,9 @@ Handle<Cell> Factory::NewManyClosuresCell(Handle<Object> value) {
return cell;
}
-Handle<PropertyCell> Factory::NewPropertyCell() {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocatePropertyCell(),
- PropertyCell);
+Handle<PropertyCell> Factory::NewPropertyCell(Handle<Name> name) {
+ CALL_HEAP_FUNCTION(isolate(), isolate()->heap()->AllocatePropertyCell(*name),
+ PropertyCell);
}
@@ -1279,7 +1284,6 @@ Handle<JSObject> Factory::CopyJSObjectWithAllocationSite(
JSObject);
}
-
Handle<FixedArray> Factory::CopyFixedArrayWithMap(Handle<FixedArray> array,
Handle<Map> map) {
CALL_HEAP_FUNCTION(isolate(),
@@ -1287,13 +1291,21 @@ Handle<FixedArray> Factory::CopyFixedArrayWithMap(Handle<FixedArray> array,
FixedArray);
}
-
Handle<FixedArray> Factory::CopyFixedArrayAndGrow(Handle<FixedArray> array,
int grow_by,
PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(isolate(), isolate()->heap()->CopyFixedArrayAndGrow(
- *array, grow_by, pretenure),
- FixedArray);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->CopyArrayAndGrow(*array, grow_by, pretenure),
+ FixedArray);
+}
+
+Handle<PropertyArray> Factory::CopyPropertyArrayAndGrow(
+ Handle<PropertyArray> array, int grow_by, PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->CopyArrayAndGrow(*array, grow_by, pretenure),
+ PropertyArray);
}
Handle<FixedArray> Factory::CopyFixedArrayUpTo(Handle<FixedArray> array,
@@ -1460,66 +1472,92 @@ Handle<JSFunction> Factory::NewFunction(Handle<Map> map,
return function;
}
-
-Handle<JSFunction> Factory::NewFunction(Handle<Map> map,
- Handle<String> name,
- MaybeHandle<Code> code) {
+Handle<JSFunction> Factory::NewFunction(Handle<Map> map, Handle<String> name,
+ MaybeHandle<Code> maybe_code) {
+ DCHECK(!name.is_null());
Handle<Context> context(isolate()->native_context());
Handle<SharedFunctionInfo> info =
- NewSharedFunctionInfo(name, code, map->is_constructor());
+ NewSharedFunctionInfo(name, maybe_code, map->is_constructor());
+ // Proper language mode in shared function info will be set outside.
DCHECK(is_sloppy(info->language_mode()));
DCHECK(!map->IsUndefined(isolate()));
- DCHECK(
- map.is_identical_to(isolate()->sloppy_function_map()) ||
- map.is_identical_to(isolate()->sloppy_function_without_prototype_map()) ||
- map.is_identical_to(
- isolate()->sloppy_function_with_readonly_prototype_map()) ||
- map.is_identical_to(isolate()->strict_function_map()) ||
- map.is_identical_to(isolate()->strict_function_without_prototype_map()) ||
- // TODO(titzer): wasm_function_map() could be undefined here. ugly.
- (*map == context->get(Context::WASM_FUNCTION_MAP_INDEX)) ||
- (*map == context->get(Context::NATIVE_FUNCTION_MAP_INDEX)) ||
- map.is_identical_to(isolate()->proxy_function_map()));
+#ifdef DEBUG
+ if (isolate()->bootstrapper()->IsActive()) {
+ Handle<Code> code;
+ bool has_code = maybe_code.ToHandle(&code);
+ DCHECK(
+ // During bootstrapping some of these maps could be not created yet.
+ (*map == context->get(Context::STRICT_FUNCTION_MAP_INDEX)) ||
+ (*map ==
+ context->get(Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX)) ||
+ (*map ==
+ context->get(
+ Context::STRICT_FUNCTION_WITH_READONLY_PROTOTYPE_MAP_INDEX)) ||
+ // Check if it's a creation of an empty or Proxy function during
+ // bootstrapping.
+ (has_code && (code->builtin_index() == Builtins::kEmptyFunction ||
+ code->builtin_index() == Builtins::kProxyConstructor)));
+ } else {
+ DCHECK(
+ (*map == *isolate()->sloppy_function_map()) ||
+ (*map == *isolate()->sloppy_function_without_prototype_map()) ||
+ (*map == *isolate()->sloppy_function_with_readonly_prototype_map()) ||
+ (*map == *isolate()->strict_function_map()) ||
+ (*map == *isolate()->strict_function_without_prototype_map()) ||
+ (*map == *isolate()->native_function_map()));
+ }
+#endif
return NewFunction(map, info, context);
}
Handle<JSFunction> Factory::NewFunction(Handle<String> name) {
- return NewFunction(
- isolate()->sloppy_function_map(), name, MaybeHandle<Code>());
+ Handle<JSFunction> result =
+ NewFunction(isolate()->sloppy_function_map(), name, MaybeHandle<Code>());
+ DCHECK(is_sloppy(result->shared()->language_mode()));
+ return result;
}
-
-Handle<JSFunction> Factory::NewFunctionWithoutPrototype(Handle<String> name,
- Handle<Code> code,
- bool is_strict) {
- Handle<Map> map = is_strict
+Handle<JSFunction> Factory::NewFunctionWithoutPrototype(
+ Handle<String> name, Handle<Code> code, LanguageMode language_mode) {
+ Handle<Map> map = is_strict(language_mode)
? isolate()->strict_function_without_prototype_map()
: isolate()->sloppy_function_without_prototype_map();
- return NewFunction(map, name, code);
+ Handle<JSFunction> result = NewFunction(map, name, code);
+ result->shared()->set_language_mode(language_mode);
+ return result;
}
-
Handle<JSFunction> Factory::NewFunction(Handle<String> name, Handle<Code> code,
Handle<Object> prototype,
- bool is_strict) {
- Handle<Map> map = is_strict ? isolate()->strict_function_map()
- : isolate()->sloppy_function_map();
+ LanguageMode language_mode,
+ MutableMode prototype_mutability) {
+ Handle<Map> map;
+ if (prototype_mutability == MUTABLE) {
+ map = is_strict(language_mode) ? isolate()->strict_function_map()
+ : isolate()->sloppy_function_map();
+ } else {
+ map = is_strict(language_mode)
+ ? isolate()->strict_function_with_readonly_prototype_map()
+ : isolate()->sloppy_function_with_readonly_prototype_map();
+ }
Handle<JSFunction> result = NewFunction(map, name, code);
result->set_prototype_or_initial_map(*prototype);
+ result->shared()->set_language_mode(language_mode);
return result;
}
-
Handle<JSFunction> Factory::NewFunction(Handle<String> name, Handle<Code> code,
Handle<Object> prototype,
InstanceType type, int instance_size,
- bool is_strict) {
+ LanguageMode language_mode,
+ MutableMode prototype_mutability) {
// Allocate the function
- Handle<JSFunction> function = NewFunction(name, code, prototype, is_strict);
+ Handle<JSFunction> function =
+ NewFunction(name, code, prototype, language_mode, prototype_mutability);
ElementsKind elements_kind =
- type == JS_ARRAY_TYPE ? FAST_SMI_ELEMENTS : FAST_HOLEY_SMI_ELEMENTS;
+ type == JS_ARRAY_TYPE ? PACKED_SMI_ELEMENTS : HOLEY_SMI_ELEMENTS;
Handle<Map> initial_map = NewMap(type, instance_size, elements_kind);
// TODO(littledan): Why do we have this is_generator test when
// NewFunctionPrototype already handles finding an appropriately
@@ -1529,10 +1567,7 @@ Handle<JSFunction> Factory::NewFunction(Handle<String> name, Handle<Code> code,
prototype = NewFunctionPrototype(function);
}
}
-
- JSFunction::SetInitialMap(function, initial_map,
- Handle<JSReceiver>::cast(prototype));
-
+ JSFunction::SetInitialMap(function, initial_map, prototype);
return function;
}
@@ -1541,7 +1576,8 @@ Handle<JSFunction> Factory::NewFunction(Handle<String> name,
Handle<Code> code,
InstanceType type,
int instance_size) {
- return NewFunction(name, code, the_hole_value(), type, instance_size);
+ DCHECK(isolate()->bootstrapper()->IsActive());
+ return NewFunction(name, code, the_hole_value(), type, instance_size, STRICT);
}
@@ -1579,10 +1615,8 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> info,
Handle<Context> context,
PretenureFlag pretenure) {
- int map_index =
- Context::FunctionMapIndex(info->language_mode(), info->kind());
- Handle<Map> initial_map(Map::cast(context->native_context()->get(map_index)));
-
+ Handle<Map> initial_map(
+ Map::cast(context->native_context()->get(info->function_map_index())));
return NewFunctionFromSharedFunctionInfo(initial_map, info, context,
pretenure);
}
@@ -1590,10 +1624,8 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> info, Handle<Context> context,
Handle<Cell> vector, PretenureFlag pretenure) {
- int map_index =
- Context::FunctionMapIndex(info->language_mode(), info->kind());
- Handle<Map> initial_map(Map::cast(context->native_context()->get(map_index)));
-
+ Handle<Map> initial_map(
+ Map::cast(context->native_context()->get(info->function_map_index())));
return NewFunctionFromSharedFunctionInfo(initial_map, info, context, vector,
pretenure);
}
@@ -1667,6 +1699,14 @@ Handle<ModuleInfo> Factory::NewModuleInfo() {
return Handle<ModuleInfo>::cast(array);
}
+Handle<PreParsedScopeData> Factory::NewPreParsedScopeData() {
+ Handle<PreParsedScopeData> result =
+ Handle<PreParsedScopeData>::cast(NewStruct(PREPARSED_SCOPE_DATA_TYPE));
+ result->set_scope_data(PodArray<uint32_t>::cast(*empty_byte_array()));
+ result->set_child_data(*empty_fixed_array());
+ return result;
+}
+
Handle<JSObject> Factory::NewExternal(void* value) {
Handle<Foreign> foreign = NewForeign(static_cast<Address>(value));
Handle<JSObject> external = NewJSObjectFromMap(external_map());
@@ -1713,8 +1753,6 @@ Handle<Code> Factory::NewCode(const CodeDesc& desc,
// The code object has not been fully initialized yet. We rely on the
// fact that no allocation will happen from this point on.
DisallowHeapAllocation no_gc;
- code->set_gc_metadata(Smi::kZero);
- code->set_ic_age(isolate()->heap()->global_ic_age());
code->set_instruction_size(desc.instr_size);
code->set_relocation_info(*reloc_info);
code->set_flags(flags);
@@ -1833,10 +1871,10 @@ Handle<JSGlobalObject> Factory::NewJSGlobalObject(
PropertyDetails details = descs->GetDetails(i);
// Only accessors are expected.
DCHECK_EQ(kAccessor, details.kind());
- PropertyDetails d(kAccessor, details.attributes(), i + 1,
+ PropertyDetails d(kAccessor, details.attributes(),
PropertyCellType::kMutable);
Handle<Name> name(descs->GetKey(i));
- Handle<PropertyCell> cell = NewPropertyCell();
+ Handle<PropertyCell> cell = NewPropertyCell(name);
cell->set_value(descs->GetValue(i));
// |dictionary| already contains enough space for all properties.
USE(GlobalDictionary::Add(dictionary, name, cell, d));
@@ -1851,8 +1889,8 @@ Handle<JSGlobalObject> Factory::NewJSGlobalObject(
new_map->set_dictionary_map(true);
// Set up the global object as a normalized object.
- global->set_map(*new_map);
- global->set_properties(*dictionary);
+ global->set_global_dictionary(*dictionary);
+ global->synchronized_set_map(*new_map);
// Make sure result is a global object with properties in dictionary.
DCHECK(global->IsJSGlobalObject() && !global->HasFastProperties());
@@ -1876,10 +1914,10 @@ Handle<JSObject> Factory::NewJSObjectFromMap(
Handle<JSObject> Factory::NewSlowJSObjectFromMap(Handle<Map> map, int capacity,
PretenureFlag pretenure) {
DCHECK(map->is_dictionary_map());
- Handle<FixedArray> object_properties =
+ Handle<NameDictionary> object_properties =
NameDictionary::New(isolate(), capacity);
Handle<JSObject> js_object = NewJSObjectFromMap(map, pretenure);
- js_object->set_properties(*object_properties);
+ js_object->set_raw_properties_or_hash(*object_properties);
return js_object;
}
@@ -1912,7 +1950,7 @@ Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
array->set_elements(*elements);
array->set_length(Smi::FromInt(length));
- JSObject::ValidateElements(array);
+ JSObject::ValidateElements(*array);
return array;
}
@@ -1932,7 +1970,7 @@ void Factory::NewJSArrayStorage(Handle<JSArray> array,
HandleScope inner_scope(isolate());
Handle<FixedArrayBase> elms;
ElementsKind elements_kind = array->GetElementsKind();
- if (IsFastDoubleElementsKind(elements_kind)) {
+ if (IsDoubleElementsKind(elements_kind)) {
if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
elms = NewFixedDoubleArray(capacity);
} else {
@@ -1940,7 +1978,7 @@ void Factory::NewJSArrayStorage(Handle<JSArray> array,
elms = NewFixedDoubleArrayWithHoles(capacity);
}
} else {
- DCHECK(IsFastSmiOrObjectElementsKind(elements_kind));
+ DCHECK(IsSmiOrObjectElementsKind(elements_kind));
if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
elms = NewUninitializedFixedArray(capacity);
} else {
@@ -2001,9 +2039,11 @@ Handle<Module> Factory::NewModule(Handle<SharedFunctionInfo> code) {
module->set_hash(isolate()->GenerateIdentityHash(Smi::kMaxValue));
module->set_module_namespace(isolate()->heap()->undefined_value());
module->set_requested_modules(*requested_modules);
- module->set_status(Module::kUnprepared);
- DCHECK(!module->instantiated());
- DCHECK(!module->evaluated());
+ module->set_script(Script::cast(code->script()));
+ module->set_status(Module::kUninstantiated);
+ module->set_exception(isolate()->heap()->the_hole_value());
+ module->set_dfs_index(-1);
+ module->set_dfs_ancestor_index(-1);
return module;
}
@@ -2063,20 +2103,24 @@ Handle<JSSet> Factory::NewJSSet() {
return js_set;
}
-
-Handle<JSMapIterator> Factory::NewJSMapIterator() {
- Handle<Map> map(isolate()->native_context()->map_iterator_map());
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateJSObjectFromMap(*map),
- JSMapIterator);
+Handle<JSMapIterator> Factory::NewJSMapIterator(Handle<Map> map,
+ Handle<OrderedHashMap> table,
+ int index) {
+ Handle<JSMapIterator> result =
+ Handle<JSMapIterator>::cast(NewJSObjectFromMap(map));
+ result->set_table(*table);
+ result->set_index(Smi::FromInt(index));
+ return result;
}
-
-Handle<JSSetIterator> Factory::NewJSSetIterator() {
- Handle<Map> map(isolate()->native_context()->set_iterator_map());
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateJSObjectFromMap(*map),
- JSSetIterator);
+Handle<JSSetIterator> Factory::NewJSSetIterator(Handle<Map> map,
+ Handle<OrderedHashSet> table,
+ int index) {
+ Handle<JSSetIterator> result =
+ Handle<JSSetIterator>::cast(NewJSObjectFromMap(map));
+ result->set_table(*table);
+ result->set_index(Smi::FromInt(index));
+ return result;
}
ExternalArrayType Factory::GetArrayTypeFromElementsKind(ElementsKind kind) {
@@ -2087,7 +2131,6 @@ ExternalArrayType Factory::GetArrayTypeFromElementsKind(ElementsKind kind) {
TYPED_ARRAYS(TYPED_ARRAY_CASE)
default:
UNREACHABLE();
- return kExternalInt8Array;
}
#undef TYPED_ARRAY_CASE
}
@@ -2100,7 +2143,6 @@ size_t Factory::GetExternalArrayElementSize(ExternalArrayType type) {
TYPED_ARRAYS(TYPED_ARRAY_CASE)
default:
UNREACHABLE();
- return 0;
}
#undef TYPED_ARRAY_CASE
}
@@ -2115,7 +2157,6 @@ ElementsKind GetExternalArrayElementsKind(ExternalArrayType type) {
TYPED_ARRAYS(TYPED_ARRAY_CASE)
}
UNREACHABLE();
- return FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND;
#undef TYPED_ARRAY_CASE
}
@@ -2127,7 +2168,6 @@ size_t GetFixedTypedArraysElementSize(ElementsKind kind) {
TYPED_ARRAYS(TYPED_ARRAY_CASE)
default:
UNREACHABLE();
- return 0;
}
#undef TYPED_ARRAY_CASE
}
@@ -2145,7 +2185,6 @@ JSFunction* GetTypedArrayFun(ExternalArrayType type, Isolate* isolate) {
default:
UNREACHABLE();
- return NULL;
}
}
@@ -2162,7 +2201,6 @@ JSFunction* GetTypedArrayFun(ElementsKind elements_kind, Isolate* isolate) {
default:
UNREACHABLE();
- return NULL;
}
}
@@ -2326,7 +2364,7 @@ MaybeHandle<JSBoundFunction> Factory::NewJSBoundFunction(
? isolate()->bound_function_with_constructor_map()
: isolate()->bound_function_without_constructor_map();
if (map->prototype() != *prototype) {
- map = Map::TransitionToPrototype(map, prototype, REGULAR_PROTOTYPE);
+ map = Map::TransitionToPrototype(map, prototype);
}
DCHECK_EQ(target_function->IsConstructor(), map->is_constructor());
@@ -2415,14 +2453,13 @@ void Factory::ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> object,
}
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
- Handle<String> name, FunctionKind kind, Handle<Code> code,
+ MaybeHandle<String> name, FunctionKind kind, Handle<Code> code,
Handle<ScopeInfo> scope_info) {
DCHECK(IsValidFunctionKind(kind));
Handle<SharedFunctionInfo> shared =
- NewSharedFunctionInfo(name, code, IsConstructable(kind));
+ NewSharedFunctionInfo(name, code, IsConstructable(kind), kind);
shared->set_scope_info(*scope_info);
shared->set_outer_scope_info(*the_hole_value());
- shared->set_kind(kind);
if (IsGeneratorFunction(kind)) {
shared->set_instance_class_name(isolate()->heap()->Generator_string());
}
@@ -2436,7 +2473,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForLiteral(
Handle<SharedFunctionInfo> result =
NewSharedFunctionInfo(literal->name(), literal->kind(), code, scope_info);
SharedFunctionInfo::InitFromFunctionLiteral(result, literal);
- SharedFunctionInfo::SetScript(result, script);
+ SharedFunctionInfo::SetScript(result, script, false);
return result;
}
@@ -2446,7 +2483,8 @@ Handle<JSMessageObject> Factory::NewJSMessageObject(
Handle<Object> stack_frames) {
Handle<Map> map = message_object_map();
Handle<JSMessageObject> message_obj = New<JSMessageObject>(map, NEW_SPACE);
- message_obj->set_properties(*empty_fixed_array(), SKIP_WRITE_BARRIER);
+ message_obj->set_raw_properties_or_hash(*empty_fixed_array(),
+ SKIP_WRITE_BARRIER);
message_obj->initialize_elements();
message_obj->set_elements(*empty_fixed_array(), SKIP_WRITE_BARRIER);
message_obj->set_type(message);
@@ -2459,18 +2497,24 @@ Handle<JSMessageObject> Factory::NewJSMessageObject(
return message_obj;
}
-
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
- Handle<String> name, MaybeHandle<Code> maybe_code, bool is_constructor) {
+ MaybeHandle<String> maybe_name, MaybeHandle<Code> maybe_code,
+ bool is_constructor, FunctionKind kind) {
// Function names are assumed to be flat elsewhere. Must flatten before
// allocating SharedFunctionInfo to avoid GC seeing the uninitialized SFI.
- name = String::Flatten(name, TENURED);
+ Handle<String> shared_name;
+ bool has_shared_name = maybe_name.ToHandle(&shared_name);
+ if (has_shared_name) {
+ shared_name = String::Flatten(shared_name, TENURED);
+ }
Handle<Map> map = shared_function_info_map();
Handle<SharedFunctionInfo> share = New<SharedFunctionInfo>(map, OLD_SPACE);
// Set pointer fields.
- share->set_name(*name);
+ share->set_raw_name(has_shared_name
+ ? *shared_name
+ : SharedFunctionInfo::kNoSharedNameSentinel);
share->set_function_data(*undefined_value(), SKIP_WRITE_BARRIER);
Handle<Code> code;
if (!maybe_code.ToHandle(&code)) {
@@ -2509,12 +2553,18 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
// All compiler hints default to false or 0.
share->set_compiler_hints(0);
share->set_opt_count_and_bailout_reason(0);
+ share->set_kind(kind);
+
+ share->set_preparsed_scope_data(*null_value());
// Link into the list.
Handle<Object> new_noscript_list =
WeakFixedArray::Add(noscript_shared_function_infos(), share);
isolate()->heap()->set_noscript_shared_function_infos(*new_noscript_list);
+#ifdef VERIFY_HEAP
+ share->SharedFunctionInfoVerify();
+#endif
return share;
}
@@ -2586,31 +2636,17 @@ Handle<String> Factory::NumberToString(Handle<Object> number,
return js_string;
}
-
Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
DCHECK(!shared->HasDebugInfo());
- // Allocate initial fixed array for active break points before allocating the
- // debug info object to avoid allocation while setting up the debug info
- // object.
- Handle<FixedArray> break_points(
- NewFixedArray(DebugInfo::kEstimatedNofBreakPointsInFunction));
-
- // Make a copy of the bytecode array if available.
- Handle<Object> maybe_debug_bytecode_array = undefined_value();
- if (shared->HasBytecodeArray()) {
- Handle<BytecodeArray> original(shared->bytecode_array());
- maybe_debug_bytecode_array = CopyBytecodeArray(original);
- }
-
- // Create and set up the debug info object. Debug info contains function, a
- // copy of the original code, the executing code and initial fixed array for
- // active break points.
+ Heap* heap = isolate()->heap();
+
Handle<DebugInfo> debug_info =
Handle<DebugInfo>::cast(NewStruct(DEBUG_INFO_TYPE));
+ debug_info->set_flags(DebugInfo::kNone);
debug_info->set_shared(*shared);
debug_info->set_debugger_hints(shared->debugger_hints());
- debug_info->set_debug_bytecode_array(*maybe_debug_bytecode_array);
- debug_info->set_break_points(*break_points);
+ debug_info->set_debug_bytecode_array(heap->undefined_value());
+ debug_info->set_break_points(heap->empty_fixed_array());
// Link debug info to function.
shared->set_debug_info(*debug_info);
@@ -2618,6 +2654,22 @@ Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
return debug_info;
}
+Handle<CoverageInfo> Factory::NewCoverageInfo(
+ const ZoneVector<SourceRange>& slots) {
+ const int slot_count = static_cast<int>(slots.size());
+
+ const int length = CoverageInfo::FixedArrayLengthForSlotCount(slot_count);
+ Handle<CoverageInfo> info =
+ Handle<CoverageInfo>::cast(NewUninitializedFixedArray(length));
+
+ for (int i = 0; i < slot_count; i++) {
+ SourceRange range = slots[i];
+ info->InitializeSlot(i, range.start, range.end);
+ }
+
+ return info;
+}
+
Handle<BreakPointInfo> Factory::NewBreakPointInfo(int source_position) {
Handle<BreakPointInfo> new_break_point_info =
Handle<BreakPointInfo>::cast(NewStruct(TUPLE2_TYPE));
@@ -2756,8 +2808,6 @@ void Factory::SetRegExpIrregexpData(Handle<JSRegExp> regexp,
store->set(JSRegExp::kFlagsIndex, Smi::FromInt(flags));
store->set(JSRegExp::kIrregexpLatin1CodeIndex, uninitialized);
store->set(JSRegExp::kIrregexpUC16CodeIndex, uninitialized);
- store->set(JSRegExp::kIrregexpLatin1CodeSavedIndex, uninitialized);
- store->set(JSRegExp::kIrregexpUC16CodeSavedIndex, uninitialized);
store->set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::kZero);
store->set(JSRegExp::kIrregexpCaptureCountIndex,
Smi::FromInt(capture_count));
@@ -2785,8 +2835,8 @@ Handle<RegExpMatchInfo> Factory::NewRegExpMatchInfo() {
Handle<Object> Factory::GlobalConstantFor(Handle<Name> name) {
if (Name::Equals(name, undefined_string())) return undefined_value();
- if (Name::Equals(name, nan_string())) return nan_value();
- if (Name::Equals(name, infinity_string())) return infinity_value();
+ if (Name::Equals(name, NaN_string())) return nan_value();
+ if (Name::Equals(name, Infinity_string())) return infinity_value();
return Handle<Object>::null();
}
@@ -2805,68 +2855,91 @@ Handle<String> Factory::ToPrimitiveHintString(ToPrimitiveHint hint) {
return string_string();
}
UNREACHABLE();
- return Handle<String>::null();
}
-Handle<Map> Factory::CreateSloppyFunctionMap(FunctionMode function_mode) {
+Handle<Map> Factory::CreateSloppyFunctionMap(
+ FunctionMode function_mode, MaybeHandle<JSFunction> maybe_empty_function) {
Handle<Map> map = NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
- SetFunctionInstanceDescriptor(map, function_mode);
+ SetSloppyFunctionInstanceDescriptor(map, function_mode);
map->set_is_constructor(IsFunctionModeWithPrototype(function_mode));
map->set_is_callable();
+ Handle<JSFunction> empty_function;
+ if (maybe_empty_function.ToHandle(&empty_function)) {
+ Map::SetPrototype(map, empty_function);
+ }
return map;
}
-void Factory::SetFunctionInstanceDescriptor(Handle<Map> map,
- FunctionMode function_mode) {
+void Factory::SetSloppyFunctionInstanceDescriptor(Handle<Map> map,
+ FunctionMode function_mode) {
int size = IsFunctionModeWithPrototype(function_mode) ? 5 : 4;
+ int inobject_properties_count = 0;
+ if (IsFunctionModeWithName(function_mode)) ++inobject_properties_count;
+ map->SetInObjectProperties(inobject_properties_count);
+ map->set_instance_size(JSFunction::kSize +
+ inobject_properties_count * kPointerSize);
+
Map::EnsureDescriptorSlack(map, size);
PropertyAttributes ro_attribs =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
+ PropertyAttributes rw_attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
PropertyAttributes roc_attribs =
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
+ int field_index = 0;
STATIC_ASSERT(JSFunction::kLengthDescriptorIndex == 0);
Handle<AccessorInfo> length =
Accessors::FunctionLengthInfo(isolate(), roc_attribs);
- { // Add length.
+ { // Add length accessor.
Descriptor d = Descriptor::AccessorConstant(
Handle<Name>(Name::cast(length->name())), length, roc_attribs);
map->AppendDescriptor(&d);
}
STATIC_ASSERT(JSFunction::kNameDescriptorIndex == 1);
- Handle<AccessorInfo> name =
- Accessors::FunctionNameInfo(isolate(), roc_attribs);
- { // Add name.
+ if (IsFunctionModeWithName(function_mode)) {
+ // Add name field.
+ Handle<Name> name = isolate()->factory()->name_string();
+ Descriptor d = Descriptor::DataField(name, field_index++, roc_attribs,
+ Representation::Tagged());
+ map->AppendDescriptor(&d);
+
+ } else {
+ // Add name accessor.
+ Handle<AccessorInfo> name =
+ Accessors::FunctionNameInfo(isolate(), roc_attribs);
Descriptor d = Descriptor::AccessorConstant(
Handle<Name>(Name::cast(name->name())), name, roc_attribs);
map->AppendDescriptor(&d);
}
Handle<AccessorInfo> args =
Accessors::FunctionArgumentsInfo(isolate(), ro_attribs);
- { // Add arguments.
+ { // Add arguments accessor.
Descriptor d = Descriptor::AccessorConstant(
Handle<Name>(Name::cast(args->name())), args, ro_attribs);
map->AppendDescriptor(&d);
}
Handle<AccessorInfo> caller =
Accessors::FunctionCallerInfo(isolate(), ro_attribs);
- { // Add caller.
+ { // Add caller accessor.
Descriptor d = Descriptor::AccessorConstant(
Handle<Name>(Name::cast(caller->name())), caller, ro_attribs);
map->AppendDescriptor(&d);
}
if (IsFunctionModeWithPrototype(function_mode)) {
- if (function_mode == FUNCTION_WITH_WRITEABLE_PROTOTYPE) {
- ro_attribs = static_cast<PropertyAttributes>(ro_attribs & ~READ_ONLY);
- }
+ // Add prototype accessor.
+ PropertyAttributes attribs =
+ IsFunctionModeWithWritablePrototype(function_mode) ? rw_attribs
+ : ro_attribs;
Handle<AccessorInfo> prototype =
- Accessors::FunctionPrototypeInfo(isolate(), ro_attribs);
+ Accessors::FunctionPrototypeInfo(isolate(), attribs);
Descriptor d = Descriptor::AccessorConstant(
- Handle<Name>(Name::cast(prototype->name())), prototype, ro_attribs);
+ Handle<Name>(Name::cast(prototype->name())), prototype, attribs);
map->AppendDescriptor(&d);
}
+ DCHECK_EQ(inobject_properties_count, field_index);
}
Handle<Map> Factory::CreateStrictFunctionMap(
@@ -2881,7 +2954,17 @@ Handle<Map> Factory::CreateStrictFunctionMap(
void Factory::SetStrictFunctionInstanceDescriptor(Handle<Map> map,
FunctionMode function_mode) {
- int size = IsFunctionModeWithPrototype(function_mode) ? 3 : 2;
+ DCHECK_EQ(JS_FUNCTION_TYPE, map->instance_type());
+ int inobject_properties_count = 0;
+ if (IsFunctionModeWithName(function_mode)) ++inobject_properties_count;
+ if (IsFunctionModeWithHomeObject(function_mode)) ++inobject_properties_count;
+ map->SetInObjectProperties(inobject_properties_count);
+ map->set_instance_size(JSFunction::kSize +
+ inobject_properties_count * kPointerSize);
+
+ int size = (IsFunctionModeWithPrototype(function_mode) ? 3 : 2) +
+ inobject_properties_count;
+
Map::EnsureDescriptorSlack(map, size);
PropertyAttributes rw_attribs =
@@ -2891,11 +2974,9 @@ void Factory::SetStrictFunctionInstanceDescriptor(Handle<Map> map,
PropertyAttributes roc_attribs =
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
- DCHECK(function_mode == FUNCTION_WITH_WRITEABLE_PROTOTYPE ||
- function_mode == FUNCTION_WITH_READONLY_PROTOTYPE ||
- function_mode == FUNCTION_WITHOUT_PROTOTYPE);
+ int field_index = 0;
STATIC_ASSERT(JSFunction::kLengthDescriptorIndex == 0);
- { // Add length.
+ { // Add length accessor.
Handle<AccessorInfo> length =
Accessors::FunctionLengthInfo(isolate(), roc_attribs);
Descriptor d = Descriptor::AccessorConstant(
@@ -2904,17 +2985,26 @@ void Factory::SetStrictFunctionInstanceDescriptor(Handle<Map> map,
}
STATIC_ASSERT(JSFunction::kNameDescriptorIndex == 1);
- { // Add name.
+ if (IsFunctionModeWithName(function_mode)) {
+ // Add name field.
+ Handle<Name> name = isolate()->factory()->name_string();
+ Descriptor d = Descriptor::DataField(name, field_index++, roc_attribs,
+ Representation::Tagged());
+ map->AppendDescriptor(&d);
+
+ } else {
+ // Add name accessor.
Handle<AccessorInfo> name =
Accessors::FunctionNameInfo(isolate(), roc_attribs);
Descriptor d = Descriptor::AccessorConstant(
handle(Name::cast(name->name())), name, roc_attribs);
map->AppendDescriptor(&d);
}
+
if (IsFunctionModeWithPrototype(function_mode)) {
- // Add prototype.
+ // Add prototype accessor.
PropertyAttributes attribs =
- function_mode == FUNCTION_WITH_WRITEABLE_PROTOTYPE ? rw_attribs
+ IsFunctionModeWithWritablePrototype(function_mode) ? rw_attribs
: ro_attribs;
Handle<AccessorInfo> prototype =
Accessors::FunctionPrototypeInfo(isolate(), attribs);
@@ -2922,6 +3012,15 @@ void Factory::SetStrictFunctionInstanceDescriptor(Handle<Map> map,
Handle<Name>(Name::cast(prototype->name())), prototype, attribs);
map->AppendDescriptor(&d);
}
+
+ if (IsFunctionModeWithHomeObject(function_mode)) {
+ // Add home object field.
+ Handle<Name> name = isolate()->factory()->home_object_symbol();
+ Descriptor d = Descriptor::DataField(name, field_index++, DONT_ENUM,
+ Representation::Tagged());
+ map->AppendDescriptor(&d);
+ }
+ DCHECK_EQ(inobject_properties_count, field_index);
}
Handle<Map> Factory::CreateClassFunctionMap(Handle<JSFunction> empty_function) {
@@ -2942,7 +3041,7 @@ void Factory::SetClassFunctionInstanceDescriptor(Handle<Map> map) {
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
STATIC_ASSERT(JSFunction::kLengthDescriptorIndex == 0);
- { // Add length.
+ { // Add length accessor.
Handle<AccessorInfo> length =
Accessors::FunctionLengthInfo(isolate(), roc_attribs);
Descriptor d = Descriptor::AccessorConstant(
@@ -2951,7 +3050,7 @@ void Factory::SetClassFunctionInstanceDescriptor(Handle<Map> map) {
}
{
- // Add prototype.
+ // Add prototype accessor.
Handle<AccessorInfo> prototype =
Accessors::FunctionPrototypeInfo(isolate(), rw_attribs);
Descriptor d = Descriptor::AccessorConstant(
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index 8146205559..d81d51534c 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -12,20 +12,47 @@
#include "src/objects/descriptor-array.h"
#include "src/objects/dictionary.h"
#include "src/objects/scope-info.h"
+#include "src/objects/string.h"
#include "src/string-hasher.h"
namespace v8 {
namespace internal {
+class AliasedArgumentsEntry;
+class BreakPointInfo;
class BoilerplateDescription;
class ConstantElementsPair;
+class CoverageInfo;
+class DebugInfo;
+struct SourceRange;
+class PreParsedScopeData;
enum FunctionMode {
- // With prototype.
- FUNCTION_WITH_WRITEABLE_PROTOTYPE,
- FUNCTION_WITH_READONLY_PROTOTYPE,
+ kWithNameBit = 1 << 0,
+ kWithHomeObjectBit = 1 << 1,
+ kWithWritablePrototypeBit = 1 << 2,
+ kWithReadonlyPrototypeBit = 1 << 3,
+ kWithPrototypeBits = kWithWritablePrototypeBit | kWithReadonlyPrototypeBit,
+
// Without prototype.
- FUNCTION_WITHOUT_PROTOTYPE
+ FUNCTION_WITHOUT_PROTOTYPE = 0,
+ METHOD_WITH_NAME = kWithNameBit,
+ METHOD_WITH_HOME_OBJECT = kWithHomeObjectBit,
+ METHOD_WITH_NAME_AND_HOME_OBJECT = kWithNameBit | kWithHomeObjectBit,
+
+ // With writable prototype.
+ FUNCTION_WITH_WRITEABLE_PROTOTYPE = kWithWritablePrototypeBit,
+ FUNCTION_WITH_NAME_AND_WRITEABLE_PROTOTYPE =
+ kWithWritablePrototypeBit | kWithNameBit,
+ FUNCTION_WITH_HOME_OBJECT_AND_WRITEABLE_PROTOTYPE =
+ kWithWritablePrototypeBit | kWithHomeObjectBit,
+ FUNCTION_WITH_NAME_AND_HOME_OBJECT_AND_WRITEABLE_PROTOTYPE =
+ kWithWritablePrototypeBit | kWithNameBit | kWithHomeObjectBit,
+
+ // With readonly prototype.
+ FUNCTION_WITH_READONLY_PROTOTYPE = kWithReadonlyPrototypeBit,
+ FUNCTION_WITH_NAME_AND_READONLY_PROTOTYPE =
+ kWithReadonlyPrototypeBit | kWithNameBit,
};
// Interface for handle based allocation.
@@ -38,6 +65,8 @@ class V8_EXPORT_PRIVATE Factory final {
// Allocates a fixed array initialized with undefined values.
Handle<FixedArray> NewFixedArray(int size,
PretenureFlag pretenure = NOT_TENURED);
+ Handle<PropertyArray> NewPropertyArray(int size,
+ PretenureFlag pretenure = NOT_TENURED);
// Tries allocating a fixed array initialized with undefined values.
// In case of an allocation failure (OOM) an empty handle is returned.
// The caller has to manually signal an
@@ -80,6 +109,13 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<OrderedHashSet> NewOrderedHashSet();
Handle<OrderedHashMap> NewOrderedHashMap();
+ Handle<SmallOrderedHashSet> NewSmallOrderedHashSet(
+ int size = SmallOrderedHashSet::kMinCapacity,
+ PretenureFlag pretenure = NOT_TENURED);
+ Handle<SmallOrderedHashMap> NewSmallOrderedHashMap(
+ int size = SmallOrderedHashMap::kMinCapacity,
+ PretenureFlag pretenure = NOT_TENURED);
+
// Create a new PrototypeInfo struct.
Handle<PrototypeInfo> NewPrototypeInfo();
@@ -269,9 +305,6 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<Symbol> NewSymbol();
Handle<Symbol> NewPrivateSymbol();
- // Create a promise.
- Handle<JSPromise> NewJSPromise();
-
// Create a global (but otherwise uninitialized) context.
Handle<Context> NewNativeContext();
@@ -358,7 +391,7 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<Cell> NewCell(Handle<Object> value);
- Handle<PropertyCell> NewPropertyCell();
+ Handle<PropertyCell> NewPropertyCell(Handle<Name> name);
Handle<WeakCell> NewWeakCell(Handle<HeapObject> value);
@@ -371,10 +404,8 @@ class V8_EXPORT_PRIVATE Factory final {
// Allocate a tenured AllocationSite. It's payload is null.
Handle<AllocationSite> NewAllocationSite();
- Handle<Map> NewMap(
- InstanceType type,
- int instance_size,
- ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
+ Handle<Map> NewMap(InstanceType type, int instance_size,
+ ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
Handle<HeapObject> NewFillerObject(int size,
bool double_align,
@@ -394,6 +425,10 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<FixedArray> array, int grow_by,
PretenureFlag pretenure = NOT_TENURED);
+ Handle<PropertyArray> CopyPropertyArrayAndGrow(
+ Handle<PropertyArray> array, int grow_by,
+ PretenureFlag pretenure = NOT_TENURED);
+
Handle<FixedArray> CopyFixedArrayUpTo(Handle<FixedArray> array, int new_len,
PretenureFlag pretenure = NOT_TENURED);
@@ -563,9 +598,12 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<JSMap> NewJSMap();
Handle<JSSet> NewJSSet();
- // TODO(aandrey): Maybe these should take table, index and kind arguments.
- Handle<JSMapIterator> NewJSMapIterator();
- Handle<JSSetIterator> NewJSSetIterator();
+ Handle<JSMapIterator> NewJSMapIterator(Handle<Map> map,
+ Handle<OrderedHashMap> table,
+ int index);
+ Handle<JSSetIterator> NewJSSetIterator(Handle<Map> map,
+ Handle<OrderedHashSet> table,
+ int index);
// Allocates a bound function.
MaybeHandle<JSBoundFunction> NewJSBoundFunction(
@@ -591,11 +629,12 @@ class V8_EXPORT_PRIVATE Factory final {
PretenureFlag pretenure = TENURED);
Handle<JSFunction> NewFunction(Handle<String> name, Handle<Code> code,
Handle<Object> prototype,
- bool is_strict = false);
+ LanguageMode language_mode = SLOPPY,
+ MutableMode prototype_mutability = MUTABLE);
Handle<JSFunction> NewFunction(Handle<String> name);
- Handle<JSFunction> NewFunctionWithoutPrototype(Handle<String> name,
- Handle<Code> code,
- bool is_strict = false);
+ Handle<JSFunction> NewFunctionWithoutPrototype(
+ Handle<String> name, Handle<Code> code,
+ LanguageMode language_mode = SLOPPY);
Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
Handle<Map> initial_map, Handle<SharedFunctionInfo> function_info,
@@ -617,7 +656,8 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<JSFunction> NewFunction(Handle<String> name, Handle<Code> code,
Handle<Object> prototype, InstanceType type,
int instance_size,
- bool is_strict = false);
+ LanguageMode language_mode = SLOPPY,
+ MutableMode prototype_mutability = MUTABLE);
Handle<JSFunction> NewFunction(Handle<String> name,
Handle<Code> code,
InstanceType type,
@@ -631,6 +671,8 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<ModuleInfoEntry> NewModuleInfoEntry();
Handle<ModuleInfo> NewModuleInfo();
+ Handle<PreParsedScopeData> NewPreParsedScopeData();
+
// Create an External object for V8's external API.
Handle<JSObject> NewExternal(void* value);
@@ -741,21 +783,33 @@ class V8_EXPORT_PRIVATE Factory final {
// Allocates a new SharedFunctionInfo object.
Handle<SharedFunctionInfo> NewSharedFunctionInfo(
- Handle<String> name, FunctionKind kind, Handle<Code> code,
+ MaybeHandle<String> name, FunctionKind kind, Handle<Code> code,
Handle<ScopeInfo> scope_info);
- Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name,
- MaybeHandle<Code> code,
- bool is_constructor);
+ Handle<SharedFunctionInfo> NewSharedFunctionInfo(
+ MaybeHandle<String> name, MaybeHandle<Code> code, bool is_constructor,
+ FunctionKind kind = kNormalFunction);
Handle<SharedFunctionInfo> NewSharedFunctionInfoForLiteral(
FunctionLiteral* literal, Handle<Script> script);
static bool IsFunctionModeWithPrototype(FunctionMode function_mode) {
- return (function_mode == FUNCTION_WITH_WRITEABLE_PROTOTYPE ||
- function_mode == FUNCTION_WITH_READONLY_PROTOTYPE);
+ return (function_mode & kWithPrototypeBits) != 0;
}
- Handle<Map> CreateSloppyFunctionMap(FunctionMode function_mode);
+ static bool IsFunctionModeWithWritablePrototype(FunctionMode function_mode) {
+ return (function_mode & kWithWritablePrototypeBit) != 0;
+ }
+
+ static bool IsFunctionModeWithName(FunctionMode function_mode) {
+ return (function_mode & kWithNameBit) != 0;
+ }
+
+ static bool IsFunctionModeWithHomeObject(FunctionMode function_mode) {
+ return (function_mode & kWithHomeObjectBit) != 0;
+ }
+
+ Handle<Map> CreateSloppyFunctionMap(
+ FunctionMode function_mode, MaybeHandle<JSFunction> maybe_empty_function);
Handle<Map> CreateStrictFunctionMap(FunctionMode function_mode,
Handle<JSFunction> empty_function);
@@ -772,6 +826,8 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<DebugInfo> NewDebugInfo(Handle<SharedFunctionInfo> shared);
+ Handle<CoverageInfo> NewCoverageInfo(const ZoneVector<SourceRange>& slots);
+
// Return a map for given number of properties using the map cache in the
// native context.
Handle<Map> ObjectLiteralMapFromCache(Handle<Context> native_context,
@@ -837,8 +893,8 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<JSArray> NewJSArray(ElementsKind elements_kind,
PretenureFlag pretenure = NOT_TENURED);
- void SetFunctionInstanceDescriptor(Handle<Map> map,
- FunctionMode function_mode);
+ void SetSloppyFunctionInstanceDescriptor(Handle<Map> map,
+ FunctionMode function_mode);
void SetStrictFunctionInstanceDescriptor(Handle<Map> map,
FunctionMode function_mode);
diff --git a/deps/v8/src/feedback-vector-inl.h b/deps/v8/src/feedback-vector-inl.h
index e368385166..a53d854e50 100644
--- a/deps/v8/src/feedback-vector-inl.h
+++ b/deps/v8/src/feedback-vector-inl.h
@@ -8,6 +8,7 @@
#include "src/factory.h"
#include "src/feedback-vector.h"
#include "src/globals.h"
+#include "src/objects/shared-function-info.h"
namespace v8 {
namespace internal {
@@ -46,7 +47,7 @@ bool FeedbackMetadata::is_empty() const {
int FeedbackMetadata::slot_count() const {
if (length() == 0) return 0;
DCHECK(length() > kReservedIndexCount);
- return Smi::cast(get(kSlotsCountIndex))->value();
+ return Smi::ToInt(get(kSlotsCountIndex));
}
// static
@@ -60,7 +61,6 @@ int FeedbackMetadata::GetSlotSize(FeedbackSlotKind kind) {
case FeedbackSlotKind::kGeneral:
case FeedbackSlotKind::kCompareOp:
case FeedbackSlotKind::kBinaryOp:
- case FeedbackSlotKind::kToBoolean:
case FeedbackSlotKind::kLiteral:
case FeedbackSlotKind::kCreateClosure:
case FeedbackSlotKind::kTypeProfile:
@@ -106,20 +106,37 @@ SharedFunctionInfo* FeedbackVector::shared_function_info() const {
}
int FeedbackVector::invocation_count() const {
- return Smi::cast(get(kInvocationCountIndex))->value();
+ return Smi::ToInt(get(kInvocationCountIndex));
}
void FeedbackVector::clear_invocation_count() {
set(kInvocationCountIndex, Smi::kZero);
}
+Object* FeedbackVector::optimized_code_cell() const {
+ return get(kOptimizedCodeIndex);
+}
+
Code* FeedbackVector::optimized_code() const {
- WeakCell* cell = WeakCell::cast(get(kOptimizedCodeIndex));
+ Object* slot = optimized_code_cell();
+ if (slot->IsSmi()) return nullptr;
+ WeakCell* cell = WeakCell::cast(slot);
return cell->cleared() ? nullptr : Code::cast(cell->value());
}
+OptimizationMarker FeedbackVector::optimization_marker() const {
+ Object* slot = optimized_code_cell();
+ if (!slot->IsSmi()) return OptimizationMarker::kNone;
+ Smi* value = Smi::cast(slot);
+ return static_cast<OptimizationMarker>(value->value());
+}
+
bool FeedbackVector::has_optimized_code() const {
- return !WeakCell::cast(get(kOptimizedCodeIndex))->cleared();
+ return optimized_code() != nullptr;
+}
+
+bool FeedbackVector::has_optimization_marker() const {
+ return optimization_marker() != OptimizationMarker::kNone;
}
// Conversion from an integer index to either a slot or an ic slot.
@@ -146,8 +163,11 @@ BinaryOperationHint BinaryOperationHintFromFeedback(int type_feedback) {
case BinaryOperationFeedback::kSignedSmall:
return BinaryOperationHint::kSignedSmall;
case BinaryOperationFeedback::kNumber:
+ return BinaryOperationHint::kNumber;
case BinaryOperationFeedback::kNumberOrOddball:
return BinaryOperationHint::kNumberOrOddball;
+ case BinaryOperationFeedback::kNonEmptyString:
+ return BinaryOperationHint::kNonEmptyString;
case BinaryOperationFeedback::kString:
return BinaryOperationHint::kString;
case BinaryOperationFeedback::kAny:
@@ -155,7 +175,6 @@ BinaryOperationHint BinaryOperationHintFromFeedback(int type_feedback) {
return BinaryOperationHint::kAny;
}
UNREACHABLE();
- return BinaryOperationHint::kNone;
}
// Helper function to transform the feedback to CompareOperationHint.
@@ -173,13 +192,14 @@ CompareOperationHint CompareOperationHintFromFeedback(int type_feedback) {
return CompareOperationHint::kInternalizedString;
case CompareOperationFeedback::kString:
return CompareOperationHint::kString;
+ case CompareOperationFeedback::kSymbol:
+ return CompareOperationHint::kSymbol;
case CompareOperationFeedback::kReceiver:
return CompareOperationHint::kReceiver;
default:
return CompareOperationHint::kAny;
}
UNREACHABLE();
- return CompareOperationHint::kNone;
}
void FeedbackVector::ComputeCounts(int* with_type_info, int* generic,
@@ -226,7 +246,7 @@ void FeedbackVector::ComputeCounts(int* with_type_info, int* generic,
// TODO(mvstanton): Remove code_is_interpreted when full code is retired
// from service.
if (code_is_interpreted) {
- int const feedback = Smi::cast(obj)->value();
+ int const feedback = Smi::ToInt(obj);
BinaryOperationHint hint = BinaryOperationHintFromFeedback(feedback);
if (hint == BinaryOperationHint::kAny) {
gen++;
@@ -243,7 +263,7 @@ void FeedbackVector::ComputeCounts(int* with_type_info, int* generic,
// TODO(mvstanton): Remove code_is_interpreted when full code is retired
// from service.
if (code_is_interpreted) {
- int const feedback = Smi::cast(obj)->value();
+ int const feedback = Smi::ToInt(obj);
CompareOperationHint hint =
CompareOperationHintFromFeedback(feedback);
if (hint == CompareOperationHint::kAny) {
@@ -256,7 +276,6 @@ void FeedbackVector::ComputeCounts(int* with_type_info, int* generic,
}
break;
}
- case FeedbackSlotKind::kToBoolean:
case FeedbackSlotKind::kCreateClosure:
case FeedbackSlotKind::kGeneral:
case FeedbackSlotKind::kLiteral:
diff --git a/deps/v8/src/feedback-vector.cc b/deps/v8/src/feedback-vector.cc
index afbcf2a923..f1c5faf62d 100644
--- a/deps/v8/src/feedback-vector.cc
+++ b/deps/v8/src/feedback-vector.cc
@@ -37,13 +37,13 @@ std::ostream& operator<<(std::ostream& os, FeedbackSlotKind kind) {
FeedbackSlotKind FeedbackMetadata::GetKind(FeedbackSlot slot) const {
int index = VectorICComputer::index(kReservedIndexCount, slot.ToInt());
- int data = Smi::cast(get(index))->value();
+ int data = Smi::ToInt(get(index));
return VectorICComputer::decode(data, slot.ToInt());
}
void FeedbackMetadata::SetKind(FeedbackSlot slot, FeedbackSlotKind kind) {
int index = VectorICComputer::index(kReservedIndexCount, slot.ToInt());
- int data = Smi::cast(get(index))->value();
+ int data = Smi::ToInt(get(index));
int new_data = VectorICComputer::encode(data, slot.ToInt(), kind);
set(index, Smi::FromInt(new_data));
}
@@ -154,8 +154,6 @@ const char* FeedbackMetadata::Kind2String(FeedbackSlotKind kind) {
return "BinaryOp";
case FeedbackSlotKind::kCompareOp:
return "CompareOp";
- case FeedbackSlotKind::kToBoolean:
- return "ToBoolean";
case FeedbackSlotKind::kStoreDataPropertyInLiteral:
return "StoreDataPropertyInLiteral";
case FeedbackSlotKind::kCreateClosure:
@@ -170,7 +168,6 @@ const char* FeedbackMetadata::Kind2String(FeedbackSlotKind kind) {
break;
}
UNREACHABLE();
- return "?";
}
bool FeedbackMetadata::HasTypeProfileSlot() const {
@@ -203,7 +200,7 @@ Handle<FeedbackVector> FeedbackVector::New(Isolate* isolate,
Handle<FixedArray> array = factory->NewFixedArray(length, TENURED);
array->set_map_no_write_barrier(isolate->heap()->feedback_vector_map());
array->set(kSharedFunctionInfoIndex, *shared);
- array->set(kOptimizedCodeIndex, *factory->empty_weak_cell());
+ array->set(kOptimizedCodeIndex, Smi::FromEnum(OptimizationMarker::kNone));
array->set(kInvocationCountIndex, Smi::kZero);
// Ensure we can skip the write barrier
@@ -225,7 +222,6 @@ Handle<FeedbackVector> FeedbackVector::New(Isolate* isolate,
break;
case FeedbackSlotKind::kCompareOp:
case FeedbackSlotKind::kBinaryOp:
- case FeedbackSlotKind::kToBoolean:
array->set(index, Smi::kZero, SKIP_WRITE_BARRIER);
break;
case FeedbackSlotKind::kCreateClosure: {
@@ -234,7 +230,7 @@ Handle<FeedbackVector> FeedbackVector::New(Isolate* isolate,
break;
}
case FeedbackSlotKind::kLiteral:
- array->set(index, *undefined_value, SKIP_WRITE_BARRIER);
+ array->set(index, Smi::kZero, SKIP_WRITE_BARRIER);
break;
case FeedbackSlotKind::kCall:
array->set(index, *uninitialized_sentinel, SKIP_WRITE_BARRIER);
@@ -306,28 +302,38 @@ void FeedbackVector::SetOptimizedCode(Handle<FeedbackVector> vector,
vector->set(kOptimizedCodeIndex, *cell);
}
+void FeedbackVector::SetOptimizationMarker(OptimizationMarker marker) {
+ set(kOptimizedCodeIndex, Smi::FromEnum(marker));
+}
+
void FeedbackVector::ClearOptimizedCode() {
- set(kOptimizedCodeIndex, GetIsolate()->heap()->empty_weak_cell());
+ set(kOptimizedCodeIndex, Smi::FromEnum(OptimizationMarker::kNone));
}
void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
SharedFunctionInfo* shared, const char* reason) {
- WeakCell* cell = WeakCell::cast(get(kOptimizedCodeIndex));
- if (!cell->cleared()) {
- Code* code = Code::cast(cell->value());
- if (code->marked_for_deoptimization()) {
- if (FLAG_trace_deopt) {
- PrintF("[evicting optimizing code marked for deoptimization (%s) for ",
- reason);
- shared->ShortPrint();
- PrintF("]\n");
- }
- if (!code->deopt_already_counted()) {
- shared->increment_deopt_count();
- code->set_deopt_already_counted(true);
- }
- ClearOptimizedCode();
+ Object* slot = get(kOptimizedCodeIndex);
+ if (slot->IsSmi()) return;
+
+ WeakCell* cell = WeakCell::cast(slot);
+ if (cell->cleared()) {
+ ClearOptimizedCode();
+ return;
+ }
+
+ Code* code = Code::cast(cell->value());
+ if (code->marked_for_deoptimization()) {
+ if (FLAG_trace_deopt) {
+ PrintF("[evicting optimizing code marked for deoptimization (%s) for ",
+ reason);
+ shared->ShortPrint();
+ PrintF("]\n");
+ }
+ if (!code->deopt_already_counted()) {
+ shared->increment_deopt_count();
+ code->set_deopt_already_counted(true);
}
+ ClearOptimizedCode();
}
}
@@ -336,7 +342,6 @@ void FeedbackVector::ClearSlots(JSFunction* host_function) {
Object* uninitialized_sentinel =
FeedbackVector::RawUninitializedSentinel(isolate);
- Oddball* undefined_value = isolate->heap()->undefined_value();
bool feedback_updated = false;
FeedbackMetadataIterator iter(metadata());
@@ -427,7 +432,7 @@ void FeedbackVector::ClearSlots(JSFunction* host_function) {
break;
}
case FeedbackSlotKind::kLiteral: {
- Set(slot, undefined_value, SKIP_WRITE_BARRIER);
+ Set(slot, Smi::kZero, SKIP_WRITE_BARRIER);
feedback_updated = true;
break;
}
@@ -439,7 +444,6 @@ void FeedbackVector::ClearSlots(JSFunction* host_function) {
}
break;
}
- case FeedbackSlotKind::kToBoolean:
case FeedbackSlotKind::kInvalid:
case FeedbackSlotKind::kKindsNumber:
UNREACHABLE();
@@ -626,7 +630,7 @@ InlineCacheState CallICNexus::StateFromFeedback() const {
int CallICNexus::ExtractCallCount() {
Object* call_count = GetFeedbackExtra();
CHECK(call_count->IsSmi());
- int value = Smi::cast(call_count)->value();
+ int value = Smi::ToInt(call_count);
return value;
}
@@ -869,7 +873,7 @@ KeyedAccessStoreMode KeyedStoreICNexus::GetKeyedAccessStoreMode() const {
IcCheckType KeyedLoadICNexus::GetKeyType() const {
Object* feedback = GetFeedback();
if (feedback == *FeedbackVector::MegamorphicSentinel(GetIsolate())) {
- return static_cast<IcCheckType>(Smi::cast(GetFeedbackExtra())->value());
+ return static_cast<IcCheckType>(Smi::ToInt(GetFeedbackExtra()));
}
return IsPropertyNameFeedback(feedback) ? PROPERTY : ELEMENT;
}
@@ -877,7 +881,7 @@ IcCheckType KeyedLoadICNexus::GetKeyType() const {
IcCheckType KeyedStoreICNexus::GetKeyType() const {
Object* feedback = GetFeedback();
if (feedback == *FeedbackVector::MegamorphicSentinel(GetIsolate())) {
- return static_cast<IcCheckType>(Smi::cast(GetFeedbackExtra())->value());
+ return static_cast<IcCheckType>(Smi::ToInt(GetFeedbackExtra()));
}
return IsPropertyNameFeedback(feedback) ? PROPERTY : ELEMENT;
}
@@ -905,12 +909,12 @@ InlineCacheState CompareICNexus::StateFromFeedback() const {
}
BinaryOperationHint BinaryOpICNexus::GetBinaryOperationFeedback() const {
- int feedback = Smi::cast(GetFeedback())->value();
+ int feedback = Smi::ToInt(GetFeedback());
return BinaryOperationHintFromFeedback(feedback);
}
CompareOperationHint CompareICNexus::GetCompareOperationFeedback() const {
- int feedback = Smi::cast(GetFeedback())->value();
+ int feedback = Smi::ToInt(GetFeedback());
return CompareOperationHintFromFeedback(feedback);
}
@@ -956,19 +960,19 @@ void CollectTypeProfileNexus::Collect(Handle<String> type, int position) {
Handle<UnseededNumberDictionary> types;
if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
- types = UnseededNumberDictionary::NewEmpty(isolate);
+ types = UnseededNumberDictionary::New(isolate, 1);
} else {
types = handle(UnseededNumberDictionary::cast(feedback));
}
Handle<ArrayList> position_specific_types;
- if (types->Has(position)) {
- int entry = types->FindEntry(position);
+ int entry = types->FindEntry(position);
+ if (entry == UnseededNumberDictionary::kNotFound) {
+ position_specific_types = ArrayList::New(isolate, 1);
+ } else {
DCHECK(types->ValueAt(entry)->IsArrayList());
position_specific_types = handle(ArrayList::cast(types->ValueAt(entry)));
- } else {
- position_specific_types = ArrayList::New(isolate, 1);
}
types = UnseededNumberDictionary::Set(
@@ -994,11 +998,12 @@ Handle<JSObject> ConvertToJSObject(Isolate* isolate,
Handle<ArrayList> position_specific_types(
ArrayList::cast(feedback->get(value_index)));
- int position = Smi::cast(key)->value();
- JSObject::AddDataElement(type_profile, position,
- isolate->factory()->NewJSArrayWithElements(
- position_specific_types->Elements()),
- PropertyAttributes::NONE)
+ int position = Smi::ToInt(key);
+ JSObject::AddDataElement(
+ type_profile, position,
+ isolate->factory()->NewJSArrayWithElements(
+ ArrayList::Elements(position_specific_types)),
+ PropertyAttributes::NONE)
.ToHandleChecked();
}
}
diff --git a/deps/v8/src/feedback-vector.h b/deps/v8/src/feedback-vector.h
index 84ec460de1..b130bf2b95 100644
--- a/deps/v8/src/feedback-vector.h
+++ b/deps/v8/src/feedback-vector.h
@@ -10,6 +10,7 @@
#include "src/base/logging.h"
#include "src/elements-kind.h"
#include "src/objects/map.h"
+#include "src/objects/name.h"
#include "src/type-hints.h"
#include "src/zone/zone-containers.h"
@@ -36,7 +37,6 @@ enum class FeedbackSlotKind {
kStoreKeyedStrict,
kBinaryOp,
kCompareOp,
- kToBoolean,
kStoreDataPropertyInLiteral,
kTypeProfile,
kCreateClosure,
@@ -177,7 +177,7 @@ class FeedbackVectorSpecBase {
void Print();
#endif // OBJECT_PRINT
- DECLARE_PRINTER(FeedbackVectorSpec)
+ DECL_PRINTER(FeedbackVectorSpec)
private:
inline FeedbackSlot AddSlot(FeedbackSlotKind kind);
@@ -275,7 +275,7 @@ class FeedbackMetadata : public FixedArray {
void Print();
#endif // OBJECT_PRINT
- DECLARE_PRINTER(FeedbackMetadata)
+ DECL_PRINTER(FeedbackMetadata)
static const char* Kind2String(FeedbackSlotKind kind);
bool HasTypeProfileSlot() const;
@@ -324,13 +324,17 @@ class FeedbackVector : public FixedArray {
inline int invocation_count() const;
inline void clear_invocation_count();
+ inline Object* optimized_code_cell() const;
inline Code* optimized_code() const;
+ inline OptimizationMarker optimization_marker() const;
inline bool has_optimized_code() const;
+ inline bool has_optimization_marker() const;
void ClearOptimizedCode();
void EvictOptimizedCodeMarkedForDeoptimization(SharedFunctionInfo* shared,
const char* reason);
static void SetOptimizedCode(Handle<FeedbackVector> vector,
Handle<Code> code);
+ void SetOptimizationMarker(OptimizationMarker marker);
// Conversion from a slot to an integer index to the underlying array.
static int GetIndex(FeedbackSlot slot) {
@@ -383,7 +387,7 @@ class FeedbackVector : public FixedArray {
void Print();
#endif // OBJECT_PRINT
- DECLARE_PRINTER(FeedbackVector)
+ DECL_PRINTER(FeedbackVector)
// Clears the vector slots.
void ClearSlots(JSFunction* host_function);
@@ -412,7 +416,8 @@ class FeedbackVector : public FixedArray {
// code that looks into the contents of a slot assuming to find a String,
// a Symbol, an AllocationSite, a WeakCell, or a FixedArray.
STATIC_ASSERT(WeakCell::kSize >= 2 * kPointerSize);
-STATIC_ASSERT(WeakCell::kValueOffset == AllocationSite::kTransitionInfoOffset);
+STATIC_ASSERT(WeakCell::kValueOffset ==
+ AllocationSite::kTransitionInfoOrBoilerplateOffset);
STATIC_ASSERT(WeakCell::kValueOffset == FixedArray::kLengthOffset);
STATIC_ASSERT(WeakCell::kValueOffset == Name::kHashFieldSlot);
// Verify that an empty hash field looks like a tagged object, but can't
@@ -721,7 +726,7 @@ class CompareICNexus final : public FeedbackNexus {
CompareOperationHint GetCompareOperationFeedback() const;
int ExtractMaps(MapHandles* maps) const final {
- // BinaryOpICs don't record map feedback.
+ // CompareICs don't record map feedback.
return 0;
}
MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
diff --git a/deps/v8/src/ffi/OWNERS b/deps/v8/src/ffi/OWNERS
index dc9a9780a6..f78789f5b5 100644
--- a/deps/v8/src/ffi/OWNERS
+++ b/deps/v8/src/ffi/OWNERS
@@ -1,2 +1,4 @@
mattloring@google.com
ofrobots@google.com
+
+# COMPONENT: Blink>JavaScript>Compiler
diff --git a/deps/v8/src/ffi/ffi-compiler.cc b/deps/v8/src/ffi/ffi-compiler.cc
index a526ce3d4b..e442b66cbe 100644
--- a/deps/v8/src/ffi/ffi-compiler.cc
+++ b/deps/v8/src/ffi/ffi-compiler.cc
@@ -42,7 +42,6 @@ class FFIAssembler : public CodeStubAssembler {
return ChangeInt32ToTagged(node);
}
UNREACHABLE();
- return nullptr;
}
Node* FromJS(Node* node, Node* context, FFIType type) {
@@ -51,7 +50,6 @@ class FFIAssembler : public CodeStubAssembler {
return TruncateTaggedToWord32(context, node);
}
UNREACHABLE();
- return nullptr;
}
MachineType FFIToMachineType(FFIType type) {
@@ -60,7 +58,6 @@ class FFIAssembler : public CodeStubAssembler {
return MachineType::Int32();
}
UNREACHABLE();
- return MachineType::None();
}
Signature<MachineType>* FFIToMachineSignature(FFISignature* sig) {
diff --git a/deps/v8/src/field-index-inl.h b/deps/v8/src/field-index-inl.h
index db6ba3ed3d..c4d3342f3f 100644
--- a/deps/v8/src/field-index-inl.h
+++ b/deps/v8/src/field-index-inl.h
@@ -11,8 +11,7 @@
namespace v8 {
namespace internal {
-
-inline FieldIndex FieldIndex::ForInObjectOffset(int offset, Map* map) {
+inline FieldIndex FieldIndex::ForInObjectOffset(int offset, const Map* map) {
DCHECK((offset % kPointerSize) == 0);
int index = offset / kPointerSize;
DCHECK(map == NULL ||
@@ -21,8 +20,7 @@ inline FieldIndex FieldIndex::ForInObjectOffset(int offset, Map* map) {
return FieldIndex(true, index, false, 0, 0, true);
}
-
-inline FieldIndex FieldIndex::ForPropertyIndex(Map* map,
+inline FieldIndex FieldIndex::ForPropertyIndex(const Map* map,
int property_index,
bool is_double) {
DCHECK(map->instance_type() >= FIRST_NONSTRING_TYPE);
@@ -42,7 +40,8 @@ inline FieldIndex FieldIndex::ForPropertyIndex(Map* map,
// Takes an index as computed by GetLoadByFieldIndex and reconstructs a
// FieldIndex object from it.
-inline FieldIndex FieldIndex::ForLoadByFieldIndex(Map* map, int orig_index) {
+inline FieldIndex FieldIndex::ForLoadByFieldIndex(const Map* map,
+ int orig_index) {
int field_index = orig_index;
bool is_inobject = true;
bool is_double = field_index & 1;
@@ -85,7 +84,8 @@ inline int FieldIndex::GetLoadByFieldIndex() const {
return is_double() ? (result | 1) : result;
}
-inline FieldIndex FieldIndex::ForDescriptor(Map* map, int descriptor_index) {
+inline FieldIndex FieldIndex::ForDescriptor(const Map* map,
+ int descriptor_index) {
PropertyDetails details =
map->instance_descriptors()->GetDetails(descriptor_index);
int field_index = details.field_index();
diff --git a/deps/v8/src/field-index.h b/deps/v8/src/field-index.h
index 37b2f3c59d..78c1d75110 100644
--- a/deps/v8/src/field-index.h
+++ b/deps/v8/src/field-index.h
@@ -21,12 +21,11 @@ class FieldIndex final {
public:
FieldIndex() : bit_field_(0) {}
- static FieldIndex ForPropertyIndex(Map* map,
- int index,
+ static FieldIndex ForPropertyIndex(const Map* map, int index,
bool is_double = false);
- static FieldIndex ForInObjectOffset(int offset, Map* map = NULL);
- static FieldIndex ForDescriptor(Map* map, int descriptor_index);
- static FieldIndex ForLoadByFieldIndex(Map* map, int index);
+ static FieldIndex ForInObjectOffset(int offset, const Map* map = NULL);
+ static FieldIndex ForDescriptor(const Map* map, int descriptor_index);
+ static FieldIndex ForLoadByFieldIndex(const Map* map, int index);
static FieldIndex FromFieldAccessStubKey(int key);
int GetLoadByFieldIndex() const;
diff --git a/deps/v8/src/field-type.cc b/deps/v8/src/field-type.cc
index 0097a35bc0..3b51095323 100644
--- a/deps/v8/src/field-type.cc
+++ b/deps/v8/src/field-type.cc
@@ -4,7 +4,6 @@
#include "src/field-type.h"
-#include "src/ast/ast-types.h"
#include "src/handles-inl.h"
#include "src/objects-inl.h"
#include "src/ostreams.h"
@@ -72,13 +71,6 @@ bool FieldType::NowIs(FieldType* other) {
bool FieldType::NowIs(Handle<FieldType> other) { return NowIs(*other); }
-AstType* FieldType::Convert(Zone* zone) {
- if (IsAny()) return AstType::NonInternal();
- if (IsNone()) return AstType::None();
- DCHECK(IsClass());
- return AstType::Class(AsClass(), zone);
-}
-
void FieldType::PrintTo(std::ostream& os) {
if (IsAny()) {
os << "Any";
diff --git a/deps/v8/src/field-type.h b/deps/v8/src/field-type.h
index 5eb19dfe0c..40114f76d3 100644
--- a/deps/v8/src/field-type.h
+++ b/deps/v8/src/field-type.h
@@ -5,7 +5,6 @@
#ifndef V8_FIELD_TYPE_H_
#define V8_FIELD_TYPE_H_
-#include "src/ast/ast-types.h"
#include "src/objects.h"
#include "src/objects/map.h"
#include "src/ostreams.h"
@@ -42,7 +41,6 @@ class FieldType : public Object {
bool NowStable();
bool NowIs(FieldType* other);
bool NowIs(Handle<FieldType> other);
- AstType* Convert(Zone* zone);
void PrintTo(std::ostream& os);
};
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index f719555c5f..3aa0dad423 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -19,10 +19,6 @@
#define DEFINE_NEG_NEG_IMPLICATION(whenflag, thenflag) \
DEFINE_NEG_VALUE_IMPLICATION(whenflag, thenflag, false)
-#define DEFINE_DUAL_IMPLICATION(whenflag, thenflag) \
- DEFINE_IMPLICATION(whenflag, thenflag) \
- DEFINE_NEG_NEG_IMPLICATION(whenflag, thenflag)
-
// We want to declare the names of the variables for the header file. Normally
// this will just be an extern declaration, but for a readonly flag we let the
// compiler make better optimizations by giving it the value.
@@ -193,17 +189,25 @@ DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
DEFINE_IMPLICATION(es_staging, harmony)
// Features that are still work in progress (behind individual flags).
-#define HARMONY_INPROGRESS(V) \
+#define HARMONY_INPROGRESS_BASE(V) \
V(harmony_array_prototype_values, "harmony Array.prototype.values") \
V(harmony_function_sent, "harmony function.sent") \
- V(harmony_tailcalls, "harmony tail calls") \
V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
V(harmony_do_expressions, "harmony do-expressions") \
V(harmony_class_fields, "harmony public fields in class literals") \
V(harmony_async_iteration, "harmony async iteration") \
- V(harmony_dynamic_import, "harmony dynamic import") \
V(harmony_promise_finally, "harmony Promise.prototype.finally")
+#ifdef V8_INTL_SUPPORT
+#define HARMONY_INPROGRESS(V) \
+ HARMONY_INPROGRESS_BASE(V) \
+ V(harmony_number_format_to_parts, \
+ "Intl.NumberFormat.prototype." \
+ "formatToParts")
+#else
+#define HARMONY_INPROGRESS(V) HARMONY_INPROGRESS_BASE(V)
+#endif
+
// Features that are complete (but still behind --harmony/es-staging flag).
#define HARMONY_STAGED(V) \
V(harmony_function_tostring, "harmony Function.prototype.toString") \
@@ -217,24 +221,15 @@ DEFINE_IMPLICATION(es_staging, harmony)
"harmony invalid escapes in tagged template literals") \
V(harmony_restrict_constructor_return, \
"harmony disallow non undefined primitive return value from class " \
- "constructor")
+ "constructor") \
+ V(harmony_dynamic_import, "harmony dynamic import")
// Features that are shipping (turned on by default, but internal flag remains).
-#define HARMONY_SHIPPING_BASE(V) \
- V(harmony_restrictive_generators, \
- "harmony restrictions on generator declarations") \
- V(harmony_trailing_commas, \
- "harmony trailing commas in function parameter lists") \
+#define HARMONY_SHIPPING(V) \
+ V(harmony_restrictive_generators, \
+ "harmony restrictions on generator declarations") \
V(harmony_object_rest_spread, "harmony object rest spread properties")
-#ifdef V8_INTL_SUPPORT
-#define HARMONY_SHIPPING(V) \
- HARMONY_SHIPPING_BASE(V) \
- V(icu_case_mapping, "case mapping with ICU rather than Unibrow")
-#else
-#define HARMONY_SHIPPING(V) HARMONY_SHIPPING_BASE(V)
-#endif
-
// Once a shipping feature has proved stable in the wild, it will be dropped
// from HARMONY_SHIPPING, all occurrences of the FLAG_ variable are removed,
// and associated tests are moved from the harmony directory to the appropriate
@@ -271,18 +266,10 @@ DEFINE_BOOL(icu_timezone_data, false,
DEFINE_BOOL(future, FUTURE_BOOL,
"Implies all staged features that we want to ship in the "
"not-too-far future")
-DEFINE_IMPLICATION(future, turbo)
-
-DEFINE_DUAL_IMPLICATION(turbo, ignition)
-DEFINE_DUAL_IMPLICATION(turbo, thin_strings)
// Flags for experimental implementation features.
DEFINE_BOOL(allocation_site_pretenuring, true,
"pretenure with allocation sites")
-DEFINE_BOOL(mark_shared_functions_for_tier_up, true,
- "mark shared functions for tier up")
-DEFINE_BOOL(mark_optimizing_shared_functions, true,
- "mark shared functions if they are concurrently optimizing")
DEFINE_BOOL(page_promotion, true, "promote pages based on utilization")
DEFINE_INT(page_promotion_threshold, 70,
"min percentage of live bytes on a page to enable fast evacuation")
@@ -302,6 +289,7 @@ DEFINE_BOOL(track_field_types, true, "track field types")
DEFINE_IMPLICATION(track_field_types, track_fields)
DEFINE_IMPLICATION(track_field_types, track_heap_object_fields)
DEFINE_BOOL(type_profile, false, "collect type information")
+DEFINE_BOOL(block_coverage, false, "collect block coverage information")
DEFINE_BOOL(feedback_normalization, false,
"feed back normalization to constructors")
// TODO(jkummerow): This currently adds too much load on the stub cache.
@@ -319,14 +307,19 @@ DEFINE_VALUE_IMPLICATION(optimize_for_size, max_semi_space_size, 1)
DEFINE_BOOL(unbox_double_arrays, true, "automatically unbox arrays of doubles")
DEFINE_BOOL(string_slices, true, "use string slices")
+// Flags for Full-codegen.
+DEFINE_BOOL(stress_fullcodegen, false,
+ "use fullcodegen compiler for all functions it can support")
+
// Flags for Ignition.
-DEFINE_BOOL(ignition, false, "use ignition interpreter")
DEFINE_BOOL(ignition_osr, true, "enable support for OSR from ignition code")
DEFINE_BOOL(ignition_elide_noneffectful_bytecodes, true,
"elide bytecodes which won't have any external effect")
DEFINE_BOOL(ignition_reo, true, "use ignition register equivalence optimizer")
DEFINE_BOOL(ignition_filter_expression_positions, true,
"filter expression positions before the bytecode pipeline")
+DEFINE_BOOL(ignition_string_concat, false,
+ "translate string add chains into string concatenations")
DEFINE_BOOL(print_bytecode, false,
"print bytecode generated by ignition interpreter")
DEFINE_STRING(print_bytecode_filter, "*",
@@ -346,11 +339,6 @@ DEFINE_STRING(trace_ignition_dispatches_output_file, nullptr,
// Flags for Crankshaft.
DEFINE_STRING(hydrogen_filter, "*", "optimization filter")
-DEFINE_BOOL(use_gvn, true, "use hydrogen global value numbering")
-DEFINE_INT(gvn_iterations, 3, "maximum number of GVN fix-point iterations")
-DEFINE_BOOL(use_canonicalizing, true, "use hydrogen instruction canonicalizing")
-DEFINE_BOOL(use_inlining, true, "use function inlining")
-DEFINE_BOOL(use_escape_analysis, true, "use hydrogen escape analysis")
DEFINE_BOOL(use_allocation_folding, true, "use allocation folding")
DEFINE_BOOL(use_local_allocation_folding, false, "only fold in basic blocks")
DEFINE_BOOL(use_write_barrier_elimination, true,
@@ -358,7 +346,7 @@ DEFINE_BOOL(use_write_barrier_elimination, true,
DEFINE_INT(max_inlining_levels, 5, "maximum number of inlining levels")
DEFINE_INT(max_inlined_source_size, 600,
"maximum source size in bytes considered for a single inlining")
-DEFINE_INT(max_inlined_nodes, 200,
+DEFINE_INT(max_inlined_nodes, 230,
"maximum number of AST nodes considered for a single inlining")
DEFINE_INT(max_inlined_nodes_absolute, 1600,
"maximum absolute number of AST nodes considered for inlining "
@@ -368,10 +356,7 @@ DEFINE_INT(max_inlined_nodes_cumulative, 400,
DEFINE_INT(max_inlined_nodes_small, 10,
"maximum number of AST nodes considered for small function inlining")
DEFINE_FLOAT(min_inlining_frequency, 0.15, "minimum frequency for inlining")
-DEFINE_BOOL(loop_invariant_code_motion, true, "loop invariant code motion")
DEFINE_BOOL(fast_math, true, "faster (but maybe less accurate) math functions")
-DEFINE_BOOL(hydrogen_stats, false, "print statistics for hydrogen")
-DEFINE_BOOL(trace_check_elimination, false, "trace check elimination phase")
DEFINE_BOOL(trace_environment_liveness, false,
"trace liveness of local variable slots")
DEFINE_BOOL(trace_hydrogen, false, "trace generated hydrogen to file")
@@ -379,62 +364,34 @@ DEFINE_STRING(trace_hydrogen_filter, "*", "hydrogen tracing filter")
DEFINE_BOOL(trace_hydrogen_stubs, false, "trace generated hydrogen for stubs")
DEFINE_STRING(trace_hydrogen_file, NULL, "trace hydrogen to given file name")
DEFINE_STRING(trace_phase, "HLZ", "trace generated IR for specified phases")
-DEFINE_BOOL(trace_inlining, false, "trace inlining decisions")
-DEFINE_BOOL(trace_load_elimination, false, "trace load elimination")
DEFINE_BOOL(trace_store_elimination, false, "trace store elimination")
DEFINE_BOOL(turbo_verify_store_elimination, false,
"verify store elimination more rigorously")
DEFINE_BOOL(trace_alloc, false, "trace register allocator")
DEFINE_BOOL(trace_all_uses, false, "trace all use positions")
-DEFINE_BOOL(trace_range, false, "trace range analysis")
-DEFINE_BOOL(trace_gvn, false, "trace global value numbering")
DEFINE_BOOL(trace_representation, false, "trace representation types")
-DEFINE_BOOL(trace_removable_simulates, false, "trace removable simulates")
-DEFINE_BOOL(trace_escape_analysis, false, "trace hydrogen escape analysis")
-DEFINE_BOOL(trace_allocation_folding, false, "trace allocation folding")
DEFINE_BOOL(trace_track_allocation_sites, false,
"trace the tracking of allocation sites")
DEFINE_BOOL(trace_migration, false, "trace object migration")
DEFINE_BOOL(trace_generalization, false, "trace map generalization")
DEFINE_BOOL(stress_pointer_maps, false, "pointer map for every instruction")
-DEFINE_BOOL(stress_environments, false, "environment for every instruction")
DEFINE_INT(deopt_every_n_times, 0,
"deoptimize every n times a deopt point is passed")
-DEFINE_INT(deopt_every_n_garbage_collections, 0,
- "deoptimize every n garbage collections")
DEFINE_BOOL(print_deopt_stress, false, "print number of possible deopt points")
DEFINE_BOOL(trap_on_deopt, false, "put a break point before deoptimizing")
DEFINE_BOOL(trap_on_stub_deopt, false,
"put a break point before deoptimizing a stub")
-DEFINE_BOOL(deoptimize_uncommon_cases, true, "deoptimize uncommon cases")
DEFINE_BOOL(polymorphic_inlining, true, "polymorphic inlining")
DEFINE_BOOL(use_osr, true, "use on-stack replacement")
-DEFINE_BOOL(array_bounds_checks_elimination, true,
- "perform array bounds checks elimination")
-DEFINE_BOOL(trace_bce, false, "trace array bounds check elimination")
-DEFINE_BOOL(array_index_dehoisting, true, "perform array index dehoisting")
DEFINE_BOOL(analyze_environment_liveness, true,
"analyze liveness of environment slots and zap dead values")
DEFINE_BOOL(load_elimination, true, "use load elimination")
DEFINE_BOOL(check_elimination, true, "use check elimination")
DEFINE_BOOL(store_elimination, false, "use store elimination")
-DEFINE_BOOL(dead_code_elimination, true, "use dead code elimination")
-DEFINE_BOOL(fold_constants, true, "use constant folding")
-DEFINE_BOOL(trace_dead_code_elimination, false, "trace dead code elimination")
-DEFINE_BOOL(unreachable_code_elimination, true, "eliminate unreachable code")
DEFINE_BOOL(trace_osr, false, "trace on-stack replacement")
DEFINE_INT(stress_runs, 0, "number of stress runs")
-DEFINE_BOOL(lookup_sample_by_shared, true,
- "when picking a function to optimize, watch for shared function "
- "info, not JSFunction itself")
-DEFINE_BOOL(flush_optimized_code_cache, false,
- "flushes the cache of optimized code for closures on every GC")
-DEFINE_BOOL(inline_construct, true, "inline constructor calls")
-DEFINE_BOOL(inline_arguments, true, "inline functions with arguments object")
DEFINE_BOOL(inline_accessors, true, "inline JavaScript accessors")
DEFINE_BOOL(inline_into_try, true, "inline into try blocks")
-DEFINE_INT(escape_analysis_iterations, 1,
- "maximum number of escape analysis fix-point iterations")
DEFINE_BOOL(concurrent_recompilation, true,
"optimizing hot functions asynchronously on a separate thread")
@@ -452,18 +409,11 @@ DEFINE_BOOL(omit_map_checks_for_leaf_maps, true,
"deoptimize the optimized code if the layout of the maps changes.")
// Flags for TurboFan.
-#ifdef V8_DISABLE_TURBO
-// Allow to disable turbofan with a build flag after it's turned on by default.
-#define TURBO_BOOL false
-#else
-#define TURBO_BOOL true
-#endif
-DEFINE_BOOL(turbo, TURBO_BOOL, "enable TurboFan compiler")
DEFINE_BOOL(turbo_sp_frame_access, false,
"use stack pointer-relative access to frame wherever possible")
DEFINE_BOOL(turbo_preprocess_ranges, true,
"run pre-register allocation heuristics")
-DEFINE_STRING(turbo_filter, "~~", "optimization filter for TurboFan compiler")
+DEFINE_STRING(turbo_filter, "*", "optimization filter for TurboFan compiler")
DEFINE_BOOL(trace_turbo, false, "trace generated TurboFan IR")
DEFINE_BOOL(trace_turbo_graph, false, "trace generated TurboFan graphs")
DEFINE_IMPLICATION(trace_turbo_graph, trace_turbo)
@@ -476,7 +426,6 @@ DEFINE_BOOL(trace_turbo_trimming, false, "trace TurboFan's graph trimmer")
DEFINE_BOOL(trace_turbo_jt, false, "trace TurboFan's jump threading")
DEFINE_BOOL(trace_turbo_ceq, false, "trace TurboFan's control equivalence")
DEFINE_BOOL(trace_turbo_loop, false, "trace TurboFan's loop optimizations")
-DEFINE_BOOL(turbo_asm, true, "enable TurboFan for asm.js code")
DEFINE_BOOL(turbo_verify, DEBUG_BOOL, "verify TurboFan graphs at each phase")
DEFINE_STRING(turbo_verify_machine_graph, nullptr,
"verify TurboFan machine graph before instruction selection")
@@ -501,6 +450,8 @@ DEFINE_BOOL(function_context_specialization, false,
"enable function context specialization in TurboFan")
DEFINE_BOOL(turbo_inlining, true, "enable inlining in TurboFan")
DEFINE_BOOL(trace_turbo_inlining, false, "trace TurboFan inlining")
+DEFINE_BOOL(turbo_inline_array_builtins, true,
+ "inline array builtins in TurboFan code")
DEFINE_BOOL(turbo_load_elimination, true, "enable load elimination in TurboFan")
DEFINE_BOOL(trace_turbo_load_elimination, false,
"trace TurboFan load elimination")
@@ -527,20 +478,20 @@ DEFINE_BOOL(turbo_experimental, false,
// Flags to help platform porters
DEFINE_BOOL(minimal, false,
"simplifies execution model to make porting "
- "easier (e.g. always use Ignition, never use Crankshaft")
-DEFINE_IMPLICATION(minimal, ignition)
+ "easier (e.g. always use Ignition, never optimize)")
+DEFINE_NEG_IMPLICATION(minimal, stress_fullcodegen)
DEFINE_NEG_IMPLICATION(minimal, opt)
DEFINE_NEG_IMPLICATION(minimal, use_ic)
// Flags for native WebAssembly.
-DEFINE_BOOL(expose_wasm, true, "expose WASM interface to JavaScript")
+DEFINE_BOOL(expose_wasm, true, "expose wasm interface to JavaScript")
DEFINE_BOOL(assume_asmjs_origin, false,
"force wasm decoder to assume input is internal asm-wasm format")
DEFINE_BOOL(wasm_disable_structured_cloning, false,
- "disable WASM structured cloning")
+ "disable wasm structured cloning")
DEFINE_INT(wasm_num_compilation_tasks, 10,
"number of parallel compilation tasks for wasm")
-DEFINE_BOOL(wasm_async_compilation, false,
+DEFINE_BOOL(wasm_async_compilation, true,
"enable actual asynchronous compilation for WebAssembly.compile")
// Parallel compilation confuses turbo_stats, force single threaded.
DEFINE_VALUE_IMPLICATION(turbo_stats, wasm_num_compilation_tasks, 0)
@@ -553,8 +504,8 @@ DEFINE_BOOL(trace_wasm_decode_time, false, "trace decoding time of wasm code")
DEFINE_BOOL(trace_wasm_compiler, false, "trace compiling of wasm code")
DEFINE_BOOL(trace_wasm_interpreter, false, "trace interpretation of wasm code")
DEFINE_INT(trace_wasm_ast_start, 0,
- "start function for WASM AST trace (inclusive)")
-DEFINE_INT(trace_wasm_ast_end, 0, "end function for WASM AST trace (exclusive)")
+ "start function for wasm AST trace (inclusive)")
+DEFINE_INT(trace_wasm_ast_end, 0, "end function for wasm AST trace (exclusive)")
DEFINE_UINT(skip_compiling_wasm_funcs, 0, "start compiling at function N")
DEFINE_BOOL(wasm_break_on_decoder_error, false,
"debug break when wasm decoder encounters an error")
@@ -568,22 +519,22 @@ DEFINE_BOOL(trace_asm_scanner, false,
DEFINE_BOOL(trace_asm_parser, false, "verbose logging of asm.js parse failures")
DEFINE_BOOL(stress_validate_asm, false, "try to validate everything as asm.js")
-DEFINE_BOOL(dump_wasm_module, false, "dump WASM module bytes")
+DEFINE_BOOL(dump_wasm_module, false, "dump wasm module bytes")
DEFINE_STRING(dump_wasm_module_path, NULL, "directory to dump wasm modules to")
DEFINE_INT(typed_array_max_size_in_heap, 64,
"threshold for in-heap typed array")
-DEFINE_BOOL(wasm_simd_prototype, false,
+DEFINE_BOOL(experimental_wasm_simd, false,
"enable prototype simd opcodes for wasm")
-DEFINE_BOOL(wasm_eh_prototype, false,
+DEFINE_BOOL(experimental_wasm_eh, false,
"enable prototype exception handling opcodes for wasm")
-DEFINE_BOOL(wasm_mv_prototype, false,
+DEFINE_BOOL(experimental_wasm_mv, false,
"enable prototype multi-value support for wasm")
-DEFINE_BOOL(wasm_atomics_prototype, false,
- "enable prototype atomic opcodes for wasm")
+DEFINE_BOOL(experimental_wasm_threads, false,
+ "enable prototype threads for wasm")
-DEFINE_BOOL(wasm_opt, true, "enable wasm optimization")
+DEFINE_BOOL(wasm_opt, false, "enable wasm optimization")
DEFINE_BOOL(wasm_no_bounds_checks, false,
"disable bounds checks (performance testing only)")
DEFINE_BOOL(wasm_no_stack_checks, false,
@@ -662,11 +613,7 @@ DEFINE_BOOL(trace_fragmentation_verbose, false,
DEFINE_BOOL(trace_evacuation, false, "report evacuation statistics")
DEFINE_BOOL(trace_mutator_utilization, false,
"print mutator utilization, allocation speed, gc speed")
-DEFINE_BOOL(flush_code, false, "flush code that we expect not to use again")
-DEFINE_BOOL(trace_code_flushing, false, "trace code flushing progress")
-DEFINE_BOOL(age_code, true,
- "track un-executed functions to age code and flush only "
- "old code (required for code flushing)")
+DEFINE_BOOL(age_code, true, "track un-executed functions to age code")
DEFINE_BOOL(incremental_marking, true, "use incremental marking")
DEFINE_BOOL(incremental_marking_wrappers, true,
"use incremental marking for marking wrappers")
@@ -688,7 +635,6 @@ DEFINE_INT(min_progress_during_incremental_marking_finalization, 32,
DEFINE_INT(max_incremental_marking_finalization_rounds, 3,
"at most try this many times to finalize incremental marking")
DEFINE_BOOL(minor_mc, false, "perform young generation mark compact GCs")
-DEFINE_NEG_IMPLICATION(minor_mc, flush_code)
DEFINE_BOOL(black_allocation, true, "use black allocation")
DEFINE_BOOL(concurrent_store_buffer, true,
"use concurrent store buffer processing")
@@ -725,6 +671,9 @@ DEFINE_INT(v8_os_page_size, 0, "override OS page size (in KBytes)")
DEFINE_BOOL(always_compact, false, "Perform compaction on every full GC")
DEFINE_BOOL(never_compact, false,
"Never perform compaction on full GC - testing only")
+// TODO(ulan): enable compaction for concurrent marking when it correctly
+// records slots to evacuation candidates.
+DEFINE_IMPLICATION(concurrent_marking, never_compact)
DEFINE_BOOL(compact_code_space, true, "Compact code space on full collections")
DEFINE_BOOL(cleanup_code_caches_at_gc, true,
"Flush code caches in maps during mark compact cycle.")
@@ -737,6 +686,8 @@ DEFINE_BOOL(force_marking_deque_overflows, false,
DEFINE_BOOL(stress_compaction, false,
"stress the GC compactor to flush out bugs (implies "
"--force_marking_deque_overflows)")
+DEFINE_BOOL(stress_incremental_marking, false,
+ "force incremental marking for small heaps and run it more often")
DEFINE_BOOL(manual_evacuation_candidates_selection, false,
"Test mode only flag. It allows an unit test to select evacuation "
"candidates pages (requires --stress_compaction).")
@@ -746,7 +697,9 @@ DEFINE_BOOL(fast_promotion_new_space, false,
// assembler-ia32.cc / assembler-arm.cc / assembler-x64.cc
DEFINE_BOOL(debug_code, DEBUG_BOOL,
"generate extra code (assertions) for debugging")
-DEFINE_BOOL(code_comments, false, "emit comments in code disassembly")
+DEFINE_BOOL(code_comments, false,
+ "emit comments in code disassembly; for more readable source "
+ "positions you should add --no-concurrent_recompilation")
DEFINE_BOOL(enable_sse3, true, "enable use of SSE3 instructions if available")
DEFINE_BOOL(enable_ssse3, true, "enable use of SSSE3 instructions if available")
DEFINE_BOOL(enable_sse4_1, true,
@@ -937,7 +890,7 @@ DEFINE_BOOL(native_code_counters, false,
"generate extra code for manipulating stats counters")
// objects.cc
-DEFINE_BOOL(thin_strings, false, "Enable ThinString support")
+DEFINE_BOOL(thin_strings, true, "Enable ThinString support")
DEFINE_BOOL(trace_weak_arrays, false, "Trace WeakFixedArray usage")
DEFINE_BOOL(trace_prototype_users, false,
"Trace updates to prototype user tracking")
@@ -950,6 +903,7 @@ DEFINE_BOOL(trace_maps, false, "trace map creation")
// preparser.cc
DEFINE_BOOL(use_parse_tasks, false, "use parse tasks")
DEFINE_BOOL(trace_parse_tasks, false, "trace parse task creation")
+DEFINE_NEG_IMPLICATION(use_parse_tasks, experimental_preparser_scope_analysis)
// parser.cc
DEFINE_BOOL(allow_natives_syntax, false, "allow natives syntax")
@@ -961,7 +915,8 @@ DEFINE_BOOL(aggressive_lazy_inner_functions, false,
DEFINE_IMPLICATION(aggressive_lazy_inner_functions, lazy_inner_functions)
DEFINE_BOOL(experimental_preparser_scope_analysis, false,
"perform scope analysis for preparsed inner functions")
-DEFINE_IMPLICATION(experimental_preparser_scope_analysis, lazy_inner_functions)
+DEFINE_IMPLICATION(experimental_preparser_scope_analysis,
+ aggressive_lazy_inner_functions)
// simulator-arm.cc, simulator-arm64.cc and simulator-mips.cc
DEFINE_BOOL(trace_sim, false, "Trace simulator execution")
@@ -1043,10 +998,6 @@ DEFINE_STRING(startup_src, NULL,
DEFINE_STRING(startup_blob, NULL,
"Write V8 startup blob file. (mksnapshot only)")
-// code-stubs-hydrogen.cc
-DEFINE_BOOL(profile_hydrogen_code_stub_compilation, false,
- "Print the time it takes to lazily compile hydrogen code stubs.")
-
//
// Dev shell flags
//
@@ -1266,6 +1217,7 @@ DEFINE_BOOL(print_unopt_code, false,
"printing optimized code based on it")
DEFINE_BOOL(print_code_verbose, false, "print more information for code")
DEFINE_BOOL(print_builtin_code, false, "print generated code for builtins")
+DEFINE_BOOL(print_builtin_size, false, "print code size for builtins")
#ifdef ENABLE_DISASSEMBLER
DEFINE_BOOL(sodium, false,
diff --git a/deps/v8/src/flags.cc b/deps/v8/src/flags.cc
index ed901cf6e8..9fdc5d04be 100644
--- a/deps/v8/src/flags.cc
+++ b/deps/v8/src/flags.cc
@@ -154,7 +154,6 @@ struct Flag {
return args_variable()->argc == 0;
}
UNREACHABLE();
- return true;
}
// Set a flag back to it's default value.
@@ -207,7 +206,6 @@ static const char* Type2String(Flag::FlagType type) {
case Flag::TYPE_ARGS: return "arguments";
}
UNREACHABLE();
- return NULL;
}
diff --git a/deps/v8/src/float.h b/deps/v8/src/float.h
new file mode 100644
index 0000000000..5094b3393f
--- /dev/null
+++ b/deps/v8/src/float.h
@@ -0,0 +1,57 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_FLOAT32_H_
+#define V8_FLOAT32_H_
+
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+
+// TODO(ahaas): Make these classes with the one in double.h
+
+// Safety wrapper for a 32-bit floating-point value to make sure we don't lose
+// the exact bit pattern during deoptimization when passing this value.
+class Float32 {
+ public:
+ Float32() : bit_pattern_(0) {}
+
+ explicit Float32(uint32_t bit_pattern) : bit_pattern_(bit_pattern) {}
+
+ // This constructor does not guarantee that bit pattern of the input value
+ // is preserved if the input is a NaN.
+ explicit Float32(float value) : bit_pattern_(bit_cast<uint32_t>(value)) {}
+
+ uint32_t get_bits() const { return bit_pattern_; }
+
+ float get_scalar() const { return bit_cast<float>(bit_pattern_); }
+
+ static Float32 FromBits(uint32_t bits) { return Float32(bits); }
+
+ private:
+ uint32_t bit_pattern_;
+};
+
+// Safety wrapper for a 64-bit floating-point value to make sure we don't lose
+// the exact bit pattern during deoptimization when passing this value. Note
+// that there is intentionally no way to construct it from a {double} value.
+// TODO(ahaas): Unify this class with Double in double.h
+class Float64 {
+ public:
+ Float64() : bit_pattern_(0) {}
+ uint64_t get_bits() const { return bit_pattern_; }
+ double get_scalar() const { return bit_cast<double>(bit_pattern_); }
+ bool is_hole_nan() const { return bit_pattern_ == kHoleNanInt64; }
+ static Float64 FromBits(uint64_t bits) { return Float64(bits); }
+
+ private:
+ explicit Float64(uint64_t bit_pattern) : bit_pattern_(bit_pattern) {}
+ uint64_t bit_pattern_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_FLOAT32_H_
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index bf1db05295..5a383144be 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -26,8 +26,6 @@
#include "src/mips64/frames-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_S390
#include "src/s390/frames-s390.h" // NOLINT
-#elif V8_TARGET_ARCH_X87
-#include "src/x87/frames-x87.h" // NOLINT
#else
#error Unsupported target architecture.
#endif
@@ -111,7 +109,7 @@ inline Object* BuiltinExitFrame::receiver_slot_object() const {
// fp[2 + argc - 1]: receiver.
Object* argc_slot = argc_slot_object();
DCHECK(argc_slot->IsSmi());
- int argc = Smi::cast(argc_slot)->value();
+ int argc = Smi::ToInt(argc_slot);
const int receiverOffset =
BuiltinExitFrameConstants::kNewTargetOffset + (argc - 1) * kPointerSize;
@@ -183,7 +181,9 @@ inline JavaScriptFrame::JavaScriptFrame(StackFrameIteratorBase* iterator)
Address JavaScriptFrame::GetParameterSlot(int index) const {
int param_count = ComputeParametersCount();
- DCHECK(-1 <= index && index < param_count);
+ DCHECK(-1 <= index &&
+ (index < param_count ||
+ param_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel));
int parameter_offset = (param_count - index - 1) * kPointerSize;
return caller_sp() + parameter_offset;
}
@@ -269,16 +269,18 @@ inline InternalFrame::InternalFrame(StackFrameIteratorBase* iterator)
: StandardFrame(iterator) {
}
-
-inline StubFailureTrampolineFrame::StubFailureTrampolineFrame(
- StackFrameIteratorBase* iterator) : StandardFrame(iterator) {
-}
-
-
inline ConstructFrame::ConstructFrame(StackFrameIteratorBase* iterator)
: InternalFrame(iterator) {
}
+inline BuiltinContinuationFrame::BuiltinContinuationFrame(
+ StackFrameIteratorBase* iterator)
+ : InternalFrame(iterator) {}
+
+inline JavaScriptBuiltinContinuationFrame::JavaScriptBuiltinContinuationFrame(
+ StackFrameIteratorBase* iterator)
+ : JavaScriptFrame(iterator) {}
+
inline JavaScriptFrameIterator::JavaScriptFrameIterator(
Isolate* isolate)
: iterator_(isolate) {
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index 90610fafb6..2b94b2b81f 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -138,13 +138,24 @@ void JavaScriptFrameIterator::Advance() {
} while (!iterator_.done() && !iterator_.frame()->is_java_script());
}
-
void JavaScriptFrameIterator::AdvanceToArgumentsFrame() {
if (!frame()->has_adapted_arguments()) return;
iterator_.Advance();
DCHECK(iterator_.frame()->is_arguments_adaptor());
}
+void JavaScriptFrameIterator::AdvanceWhileDebugContext(Debug* debug) {
+ if (!debug->in_debug_scope()) return;
+
+ while (!done()) {
+ Context* context = Context::cast(frame()->context());
+ if (context->native_context() == *debug->debug_context()) {
+ Advance();
+ } else {
+ break;
+ }
+ }
+}
// -------------------------------------------------------------------------
@@ -504,9 +515,10 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
case ENTRY:
case ENTRY_CONSTRUCT:
case EXIT:
+ case BUILTIN_CONTINUATION:
+ case JAVA_SCRIPT_BUILTIN_CONTINUATION:
case BUILTIN_EXIT:
case STUB:
- case STUB_FAILURE_TRAMPOLINE:
case INTERNAL:
case CONSTRUCT:
case ARGUMENTS_ADAPTOR:
@@ -682,7 +694,7 @@ int BuiltinExitFrame::ComputeParametersCount() const {
DCHECK(argc_slot->IsSmi());
// Argc also counts the receiver, target, new target, and argc itself as args,
// therefore the real argument count is argc - 4.
- int argc = Smi::cast(argc_slot)->value() - 4;
+ int argc = Smi::ToInt(argc_slot) - 4;
DCHECK(argc >= 0);
return argc;
}
@@ -752,7 +764,6 @@ int StandardFrame::ComputeExpressionsCount() const {
Object* StandardFrame::GetParameter(int index) const {
// StandardFrame does not define any parameters.
UNREACHABLE();
- return nullptr;
}
int StandardFrame::ComputeParametersCount() const { return 0; }
@@ -803,8 +814,9 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
case ENTRY:
case ENTRY_CONSTRUCT:
case EXIT:
+ case BUILTIN_CONTINUATION:
+ case JAVA_SCRIPT_BUILTIN_CONTINUATION:
case BUILTIN_EXIT:
- case STUB_FAILURE_TRAMPOLINE:
case ARGUMENTS_ADAPTOR:
case STUB:
case INTERNAL:
@@ -1144,10 +1156,16 @@ int JavaScriptFrame::ComputeParametersCount() const {
return GetNumberOfIncomingArguments();
}
+int JavaScriptBuiltinContinuationFrame::ComputeParametersCount() const {
+ Object* argc_object =
+ Memory::Object_at(fp() + BuiltinContinuationFrameConstants::kArgCOffset);
+ return Smi::ToInt(argc_object);
+}
+
namespace {
-bool CannotDeoptFromAsmCode(Code* code, JSFunction* function) {
- return code->is_turbofanned() && function->shared()->asm_function();
+bool IsNonDeoptimizingAsmCode(Code* code, JSFunction* function) {
+ return code->is_turbofanned() && !function->shared()->HasBytecodeArray();
}
} // namespace
@@ -1164,7 +1182,7 @@ FrameSummary::JavaScriptFrameSummary::JavaScriptFrameSummary(
is_constructor_(is_constructor) {
DCHECK(abstract_code->IsBytecodeArray() ||
Code::cast(abstract_code)->kind() != Code::OPTIMIZED_FUNCTION ||
- CannotDeoptFromAsmCode(Code::cast(abstract_code), function) ||
+ IsNonDeoptimizingAsmCode(Code::cast(abstract_code), function) ||
mode == kApproximateSummary);
}
@@ -1257,7 +1275,7 @@ uint32_t FrameSummary::WasmCompiledFrameSummary::function_index() const {
FixedArray* deopt_data = code()->deoptimization_data();
DCHECK_EQ(2, deopt_data->length());
DCHECK(deopt_data->get(1)->IsSmi());
- int val = Smi::cast(deopt_data->get(1))->value();
+ int val = Smi::ToInt(deopt_data->get(1));
DCHECK_LE(0, val);
return static_cast<uint32_t>(val);
}
@@ -1348,7 +1366,7 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames,
// TODO(turbofan): Revisit once we support deoptimization across the board.
Code* code = LookupCode();
if (code->kind() == Code::BUILTIN ||
- CannotDeoptFromAsmCode(code, function())) {
+ IsNonDeoptimizingAsmCode(code, function())) {
return JavaScriptFrame::Summarize(frames);
}
@@ -1377,8 +1395,8 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames,
bool is_constructor = IsConstructor();
while (jsframe_count != 0) {
frame_opcode = static_cast<Translation::Opcode>(it.Next());
- if (frame_opcode == Translation::JS_FRAME ||
- frame_opcode == Translation::INTERPRETED_FRAME) {
+ if (frame_opcode == Translation::INTERPRETED_FRAME ||
+ frame_opcode == Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME) {
jsframe_count--;
BailoutId const bailout_id = BailoutId(it.Next());
SharedFunctionInfo* const shared_info =
@@ -1422,14 +1440,10 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames,
AbstractCode* abstract_code;
unsigned code_offset;
- if (frame_opcode == Translation::JS_FRAME) {
- Code* code = shared_info->code();
- DeoptimizationOutputData* const output_data =
- DeoptimizationOutputData::cast(code->deoptimization_data());
- unsigned const entry =
- Deoptimizer::GetOutputInfo(output_data, bailout_id, shared_info);
- code_offset = FullCodeGenerator::PcField::decode(entry);
- abstract_code = AbstractCode::cast(code);
+ if (frame_opcode == Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME) {
+ code_offset = 0;
+ abstract_code = AbstractCode::cast(isolate()->builtins()->builtin(
+ Builtins::GetBuiltinFromBailoutId(bailout_id)));
} else {
DCHECK_EQ(frame_opcode, Translation::INTERPRETED_FRAME);
code_offset = bailout_id.ToInt(); // Points to current bytecode.
@@ -1515,7 +1529,7 @@ void OptimizedFrame::GetFunctions(List<SharedFunctionInfo*>* functions) const {
// TODO(turbofan): Revisit once we support deoptimization across the board.
Code* code = LookupCode();
if (code->kind() == Code::BUILTIN ||
- CannotDeoptFromAsmCode(code, function())) {
+ IsNonDeoptimizingAsmCode(code, function())) {
return JavaScriptFrame::GetFunctions(functions);
}
@@ -1537,8 +1551,8 @@ void OptimizedFrame::GetFunctions(List<SharedFunctionInfo*>* functions) const {
// in the deoptimization translation are ordered bottom-to-top.
while (jsframe_count != 0) {
opcode = static_cast<Translation::Opcode>(it.Next());
- if (opcode == Translation::JS_FRAME ||
- opcode == Translation::INTERPRETED_FRAME) {
+ if (opcode == Translation::INTERPRETED_FRAME ||
+ opcode == Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME) {
it.Next(); // Skip bailout id.
jsframe_count--;
@@ -1584,7 +1598,7 @@ int InterpretedFrame::GetBytecodeOffset() const {
DCHECK_EQ(
InterpreterFrameConstants::kBytecodeOffsetFromFp,
InterpreterFrameConstants::kExpressionsOffset - index * kPointerSize);
- int raw_offset = Smi::cast(GetExpression(index))->value();
+ int raw_offset = Smi::ToInt(GetExpression(index));
return raw_offset - BytecodeArray::kHeaderSize + kHeapObjectTag;
}
@@ -1595,7 +1609,7 @@ int InterpretedFrame::GetBytecodeOffset(Address fp) {
InterpreterFrameConstants::kBytecodeOffsetFromFp,
InterpreterFrameConstants::kExpressionsOffset - index * kPointerSize);
Address expression_offset = fp + offset - index * kPointerSize;
- int raw_offset = Smi::cast(Memory::Object_at(expression_offset))->value();
+ int raw_offset = Smi::ToInt(Memory::Object_at(expression_offset));
return raw_offset - BytecodeArray::kHeaderSize + kHeapObjectTag;
}
@@ -1653,12 +1667,12 @@ void InterpretedFrame::Summarize(List<FrameSummary>* functions,
}
int ArgumentsAdaptorFrame::GetNumberOfIncomingArguments() const {
- return Smi::cast(GetExpression(0))->value();
+ return Smi::ToInt(GetExpression(0));
}
int ArgumentsAdaptorFrame::GetLength(Address fp) {
const int offset = ArgumentsAdaptorFrameConstants::kLengthOffset;
- return Smi::cast(Memory::Object_at(fp + offset))->value();
+ return Smi::ToInt(Memory::Object_at(fp + offset));
}
Code* ArgumentsAdaptorFrame::unchecked_code() const {
@@ -1667,7 +1681,7 @@ Code* ArgumentsAdaptorFrame::unchecked_code() const {
}
int BuiltinFrame::GetNumberOfIncomingArguments() const {
- return Smi::cast(GetExpression(0))->value();
+ return Smi::ToInt(GetExpression(0));
}
void BuiltinFrame::PrintFrameKind(StringStream* accumulator) const {
@@ -2053,42 +2067,6 @@ void InternalFrame::Iterate(RootVisitor* v) const {
if (code->has_tagged_params()) IterateExpressions(v);
}
-void StubFailureTrampolineFrame::Iterate(RootVisitor* v) const {
- Object** base = &Memory::Object_at(sp());
- Object** limit = &Memory::Object_at(
- fp() + StubFailureTrampolineFrameConstants::kFixedHeaderBottomOffset);
- v->VisitRootPointers(Root::kTop, base, limit);
- base = &Memory::Object_at(fp() + StandardFrameConstants::kFunctionOffset);
- const int offset = StandardFrameConstants::kLastObjectOffset;
- limit = &Memory::Object_at(fp() + offset) + 1;
- v->VisitRootPointers(Root::kTop, base, limit);
- IteratePc(v, pc_address(), constant_pool_address(), LookupCode());
-}
-
-
-Address StubFailureTrampolineFrame::GetCallerStackPointer() const {
- return fp() + StandardFrameConstants::kCallerSPOffset;
-}
-
-
-Code* StubFailureTrampolineFrame::unchecked_code() const {
- Code* trampoline;
- StubFailureTrampolineStub(isolate(), NOT_JS_FUNCTION_STUB_MODE).
- FindCodeInCache(&trampoline);
- if (trampoline->contains(pc())) {
- return trampoline;
- }
-
- StubFailureTrampolineStub(isolate(), JS_FUNCTION_STUB_MODE).
- FindCodeInCache(&trampoline);
- if (trampoline->contains(pc())) {
- return trampoline;
- }
-
- UNREACHABLE();
- return NULL;
-}
-
// -------------------------------------------------------------------------
@@ -2101,7 +2079,6 @@ JavaScriptFrame* StackFrameLocator::FindJavaScriptFrame(int n) {
iterator_.Advance();
}
UNREACHABLE();
- return NULL;
}
@@ -2184,9 +2161,8 @@ Code* InnerPointerToCodeCache::GcSafeFindCodeForInnerPointer(
InnerPointerToCodeCache::InnerPointerToCodeCacheEntry*
InnerPointerToCodeCache::GetCacheEntry(Address inner_pointer) {
isolate_->counters()->pc_to_code()->Increment();
- DCHECK(base::bits::IsPowerOfTwo32(kInnerPointerToCodeCacheSize));
- uint32_t hash = ComputeIntegerHash(ObjectAddressForHashing(inner_pointer),
- v8::internal::kZeroHashSeed);
+ DCHECK(base::bits::IsPowerOfTwo(kInnerPointerToCodeCacheSize));
+ uint32_t hash = ComputeIntegerHash(ObjectAddressForHashing(inner_pointer));
uint32_t index = hash & (kInnerPointerToCodeCacheSize - 1);
InnerPointerToCodeCacheEntry* entry = cache(index);
if (entry->inner_pointer == inner_pointer) {
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index a5355a4e8c..92cce268f1 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -15,6 +15,7 @@ namespace v8 {
namespace internal {
class AbstractCode;
+class Debug;
class ObjectVisitor;
class StringStream;
@@ -98,23 +99,24 @@ class StackHandler BASE_EMBEDDED {
DISALLOW_IMPLICIT_CONSTRUCTORS(StackHandler);
};
-#define STACK_FRAME_TYPE_LIST(V) \
- V(ENTRY, EntryFrame) \
- V(ENTRY_CONSTRUCT, EntryConstructFrame) \
- V(EXIT, ExitFrame) \
- V(JAVA_SCRIPT, JavaScriptFrame) \
- V(OPTIMIZED, OptimizedFrame) \
- V(WASM_COMPILED, WasmCompiledFrame) \
- V(WASM_TO_JS, WasmToJsFrame) \
- V(JS_TO_WASM, JsToWasmFrame) \
- V(WASM_INTERPRETER_ENTRY, WasmInterpreterEntryFrame) \
- V(INTERPRETED, InterpretedFrame) \
- V(STUB, StubFrame) \
- V(STUB_FAILURE_TRAMPOLINE, StubFailureTrampolineFrame) \
- V(INTERNAL, InternalFrame) \
- V(CONSTRUCT, ConstructFrame) \
- V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame) \
- V(BUILTIN, BuiltinFrame) \
+#define STACK_FRAME_TYPE_LIST(V) \
+ V(ENTRY, EntryFrame) \
+ V(ENTRY_CONSTRUCT, EntryConstructFrame) \
+ V(EXIT, ExitFrame) \
+ V(JAVA_SCRIPT, JavaScriptFrame) \
+ V(OPTIMIZED, OptimizedFrame) \
+ V(WASM_COMPILED, WasmCompiledFrame) \
+ V(WASM_TO_JS, WasmToJsFrame) \
+ V(JS_TO_WASM, JsToWasmFrame) \
+ V(WASM_INTERPRETER_ENTRY, WasmInterpreterEntryFrame) \
+ V(INTERPRETED, InterpretedFrame) \
+ V(STUB, StubFrame) \
+ V(BUILTIN_CONTINUATION, BuiltinContinuationFrame) \
+ V(JAVA_SCRIPT_BUILTIN_CONTINUATION, JavaScriptBuiltinContinuationFrame) \
+ V(INTERNAL, InternalFrame) \
+ V(CONSTRUCT, ConstructFrame) \
+ V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame) \
+ V(BUILTIN, BuiltinFrame) \
V(BUILTIN_EXIT, BuiltinExitFrame)
// Every pointer in a frame has a slot id. On 32-bit platforms, doubles consume
@@ -359,14 +361,13 @@ class ConstructFrameConstants : public TypedFrameConstants {
DEFINE_TYPED_FRAME_SIZES(4);
};
-class StubFailureTrampolineFrameConstants : public InternalFrameConstants {
+class BuiltinContinuationFrameConstants : public TypedFrameConstants {
public:
- static const int kArgumentsArgumentsOffset =
- TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
- static const int kArgumentsLengthOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
- static const int kArgumentsPointerOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(2);
- static const int kFixedHeaderBottomOffset = kArgumentsPointerOffset;
- DEFINE_TYPED_FRAME_SIZES(3);
+ // FP-relative.
+ static const int kFunctionOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ static const int kBuiltinOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ static const int kArgCOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(2);
+ DEFINE_TYPED_FRAME_SIZES(2);
};
// Behaves like an exit frame but with target and new target args.
@@ -522,8 +523,11 @@ class StackFrame BASE_EMBEDDED {
bool is_arguments_adaptor() const { return type() == ARGUMENTS_ADAPTOR; }
bool is_builtin() const { return type() == BUILTIN; }
bool is_internal() const { return type() == INTERNAL; }
- bool is_stub_failure_trampoline() const {
- return type() == STUB_FAILURE_TRAMPOLINE;
+ bool is_builtin_continuation() const {
+ return type() == BUILTIN_CONTINUATION;
+ }
+ bool is_java_script_builtin_continuation() const {
+ return type() == JAVA_SCRIPT_BUILTIN_CONTINUATION;
}
bool is_construct() const { return type() == CONSTRUCT; }
bool is_builtin_exit() const { return type() == BUILTIN_EXIT; }
@@ -532,7 +536,8 @@ class StackFrame BASE_EMBEDDED {
bool is_java_script() const {
Type type = this->type();
return (type == JAVA_SCRIPT) || (type == OPTIMIZED) ||
- (type == INTERPRETED) || (type == BUILTIN);
+ (type == INTERPRETED) || (type == BUILTIN) ||
+ (type == JAVA_SCRIPT_BUILTIN_CONTINUATION);
}
bool is_wasm() const {
Type type = this->type();
@@ -1416,51 +1421,59 @@ class InternalFrame: public StandardFrame {
};
-class StubFailureTrampolineFrame: public StandardFrame {
+// Construct frames are special trampoline frames introduced to handle
+// function invocations through 'new'.
+class ConstructFrame: public InternalFrame {
public:
- Type type() const override { return STUB_FAILURE_TRAMPOLINE; }
+ Type type() const override { return CONSTRUCT; }
- // Get the code associated with this frame.
- // This method could be called during marking phase of GC.
- Code* unchecked_code() const override;
+ static ConstructFrame* cast(StackFrame* frame) {
+ DCHECK(frame->is_construct());
+ return static_cast<ConstructFrame*>(frame);
+ }
- void Iterate(RootVisitor* v) const override;
+ protected:
+ inline explicit ConstructFrame(StackFrameIteratorBase* iterator);
- // Architecture-specific register description.
- static Register fp_register();
- static Register context_register();
- static Register constant_pool_pointer_register();
+ private:
+ friend class StackFrameIteratorBase;
+};
- protected:
- inline explicit StubFailureTrampolineFrame(
- StackFrameIteratorBase* iterator);
+class BuiltinContinuationFrame : public InternalFrame {
+ public:
+ Type type() const override { return BUILTIN_CONTINUATION; }
- Address GetCallerStackPointer() const override;
+ static BuiltinContinuationFrame* cast(StackFrame* frame) {
+ DCHECK(frame->is_builtin_continuation());
+ return static_cast<BuiltinContinuationFrame*>(frame);
+ }
+
+ protected:
+ inline explicit BuiltinContinuationFrame(StackFrameIteratorBase* iterator);
private:
friend class StackFrameIteratorBase;
};
-
-// Construct frames are special trampoline frames introduced to handle
-// function invocations through 'new'.
-class ConstructFrame: public InternalFrame {
+class JavaScriptBuiltinContinuationFrame : public JavaScriptFrame {
public:
- Type type() const override { return CONSTRUCT; }
+ Type type() const override { return JAVA_SCRIPT_BUILTIN_CONTINUATION; }
- static ConstructFrame* cast(StackFrame* frame) {
- DCHECK(frame->is_construct());
- return static_cast<ConstructFrame*>(frame);
+ static JavaScriptBuiltinContinuationFrame* cast(StackFrame* frame) {
+ DCHECK(frame->is_java_script_builtin_continuation());
+ return static_cast<JavaScriptBuiltinContinuationFrame*>(frame);
}
+ int ComputeParametersCount() const override;
+
protected:
- inline explicit ConstructFrame(StackFrameIteratorBase* iterator);
+ inline explicit JavaScriptBuiltinContinuationFrame(
+ StackFrameIteratorBase* iterator);
private:
friend class StackFrameIteratorBase;
};
-
class StackFrameIteratorBase BASE_EMBEDDED {
public:
Isolate* isolate() const { return isolate_; }
@@ -1531,12 +1544,16 @@ class JavaScriptFrameIterator BASE_EMBEDDED {
// arguments.
void AdvanceToArgumentsFrame();
+ // Skips the frames that point to the debug context.
+ void AdvanceWhileDebugContext(Debug* debug);
+
private:
StackFrameIterator iterator_;
};
// NOTE: The stack trace frame iterator is an iterator that only traverse proper
-// JavaScript frames that have proper JavaScript functions and WASM frames.
+// JavaScript frames that have proper JavaScript functions and WebAssembly
+// frames.
class StackTraceFrameIterator BASE_EMBEDDED {
public:
explicit StackTraceFrameIterator(Isolate* isolate);
diff --git a/deps/v8/src/full-codegen/OWNERS b/deps/v8/src/full-codegen/OWNERS
index 19e4ed6b6e..68f0495205 100644
--- a/deps/v8/src/full-codegen/OWNERS
+++ b/deps/v8/src/full-codegen/OWNERS
@@ -8,3 +8,5 @@ mstarzinger@chromium.org
mvstanton@chromium.org
verwaest@chromium.org
yangguo@chromium.org
+
+# COMPONENT: Blink>JavaScript>Compiler
diff --git a/deps/v8/src/full-codegen/arm/full-codegen-arm.cc b/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
index 30913d50c7..3c1cb7d91a 100644
--- a/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
+++ b/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
@@ -109,6 +109,10 @@ class JumpPatchSite BASE_EMBEDDED {
// frames-arm.h for its layout.
void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
+ // Block sharing of code target entries. The interrupt checks must be
+ // possible to patch individually, and replacing code with a debug version
+ // relies on RelocInfo not being shared.
+ Assembler::BlockCodeTargetSharingScope block_code_target_sharing(masm_);
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(literal());
@@ -194,8 +198,6 @@ void FullCodeGenerator::Generate() {
__ push(r1);
__ Push(info->scope()->scope_info());
__ CallRuntime(Runtime::kNewScriptContext);
- PrepareForBailoutForId(BailoutId::ScriptContext(),
- BailoutState::TOS_REGISTER);
// The new target value is not used, clobbering is safe.
DCHECK_NULL(info->scope()->new_target_var());
} else {
@@ -253,12 +255,6 @@ void FullCodeGenerator::Generate() {
}
}
- // Register holding this function and new target are both trashed in case we
- // bailout here. But since that can happen only when new target is not used
- // and we allocate a context, the value of |function_in_register| is correct.
- PrepareForBailoutForId(BailoutId::FunctionContext(),
- BailoutState::NO_REGISTERS);
-
// We don't support new.target and rest parameters here.
DCHECK_NULL(info->scope()->new_target_var());
DCHECK_NULL(info->scope()->rest_parameter());
@@ -273,14 +269,16 @@ void FullCodeGenerator::Generate() {
__ ldr(r1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
if (is_strict(language_mode()) || !has_simple_parameters()) {
- Callable callable = CodeFactory::FastNewStrictArguments(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kFastNewStrictArguments);
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
} else if (literal()->has_duplicate_parameters()) {
__ Push(r1);
__ CallRuntime(Runtime::kNewSloppyArguments_Generic);
} else {
- Callable callable = CodeFactory::FastNewSloppyArguments(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kFastNewSloppyArguments);
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
@@ -293,8 +291,6 @@ void FullCodeGenerator::Generate() {
}
// Visit the declarations and body.
- PrepareForBailoutForId(BailoutId::FunctionEntry(),
- BailoutState::NO_REGISTERS);
{
Comment cmnt(masm_, "[ Declarations");
VisitDeclarations(scope()->declarations());
@@ -307,19 +303,16 @@ void FullCodeGenerator::Generate() {
{
Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(),
- BailoutState::NO_REGISTERS);
Label ok;
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
+ __ CompareRoot(sp, Heap::kStackLimitRootIndex);
__ b(hs, &ok);
Handle<Code> stack_check = isolate()->builtins()->StackCheck();
masm_->MaybeCheckConstPool();
PredictableCodeSizeScope predictable(masm_);
predictable.ExpectSize(
masm_->CallSize(stack_check, RelocInfo::CODE_TARGET));
- __ Call(stack_check, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
- CAN_INLINE_TARGET_ADDRESS, false);
+ __ Call(stack_check, RelocInfo::CODE_TARGET, al, CAN_INLINE_TARGET_ADDRESS,
+ false);
__ bind(&ok);
}
@@ -404,11 +397,6 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
EmitProfilingCounterReset();
__ bind(&ok);
- PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
- // Record a mapping of the OSR id to this PC. This is used if the OSR
- // entry becomes the target of a bailout. We don't expect it to be, but
- // we want it to work if it is.
- PrepareForBailoutForId(stmt->OsrEntryId(), BailoutState::NO_REGISTERS);
}
void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
@@ -496,10 +484,6 @@ void FullCodeGenerator::StackValueContext::Plug(
void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
if (index == Heap::kUndefinedValueRootIndex ||
index == Heap::kNullValueRootIndex ||
index == Heap::kFalseValueRootIndex) {
@@ -519,22 +503,26 @@ void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
void FullCodeGenerator::AccumulatorValueContext::Plug(
Handle<Object> lit) const {
- __ mov(result_register(), Operand(lit));
+ if (lit->IsHeapObject()) {
+ __ mov(result_register(), Operand(Handle<HeapObject>::cast(lit)));
+ } else {
+ __ mov(result_register(), Operand(Smi::cast(*lit)));
+ }
}
void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
// Immediates cannot be pushed directly.
- __ mov(result_register(), Operand(lit));
+ if (lit->IsHeapObject()) {
+ __ mov(result_register(), Operand(Handle<HeapObject>::cast(lit)));
+ } else {
+ __ mov(result_register(), Operand(Smi::cast(*lit)));
+ }
codegen()->PushOperand(result_register());
}
void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
DCHECK(lit->IsNullOrUndefined(isolate()) || !lit->IsUndetectable());
if (lit->IsNullOrUndefined(isolate()) || lit->IsFalse(isolate())) {
if (false_label_ != fall_through_) __ b(false_label_);
@@ -547,14 +535,14 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
if (true_label_ != fall_through_) __ b(true_label_);
}
} else if (lit->IsSmi()) {
- if (Smi::cast(*lit)->value() == 0) {
+ if (Smi::ToInt(*lit) == 0) {
if (false_label_ != fall_through_) __ b(false_label_);
} else {
if (true_label_ != fall_through_) __ b(true_label_);
}
} else {
// For simplicity we always test the accumulator register.
- __ mov(result_register(), Operand(lit));
+ __ mov(result_register(), Operand(Handle<HeapObject>::cast(lit)));
codegen()->DoTest(this);
}
}
@@ -591,14 +579,16 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(
void FullCodeGenerator::StackValueContext::Plug(
Label* materialize_true,
Label* materialize_false) const {
+ UseScratchRegisterScope temps(masm());
+ Register scratch = temps.Acquire();
Label done;
__ bind(materialize_true);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ LoadRoot(scratch, Heap::kTrueValueRootIndex);
__ jmp(&done);
__ bind(materialize_false);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ LoadRoot(scratch, Heap::kFalseValueRootIndex);
__ bind(&done);
- codegen()->PushOperand(ip);
+ codegen()->PushOperand(scratch);
}
@@ -619,16 +609,14 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
Heap::RootListIndex value_root_index =
flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
- __ LoadRoot(ip, value_root_index);
- codegen()->PushOperand(ip);
+ UseScratchRegisterScope temps(masm());
+ Register scratch = temps.Acquire();
+ __ LoadRoot(scratch, value_root_index);
+ codegen()->PushOperand(scratch);
}
void FullCodeGenerator::TestContext::Plug(bool flag) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
if (flag) {
if (true_label_ != fall_through_) __ b(true_label_);
} else {
@@ -641,8 +629,9 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_true,
Label* if_false,
Label* fall_through) {
- Handle<Code> ic = ToBooleanICStub::GetUninitialized(isolate());
- CallIC(ic, condition->test_id());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kToBoolean);
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ RestoreContext();
__ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
}
@@ -719,27 +708,6 @@ void FullCodeGenerator::SetVar(Variable* var,
}
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
- bool should_normalize,
- Label* if_true,
- Label* if_false) {
- // Only prepare for bailouts before splits if we're in a test
- // context. Otherwise, we let the Visit function deal with the
- // preparation to avoid preparing with the same AST id twice.
- if (!context()->IsTest()) return;
-
- Label skip;
- if (should_normalize) __ b(&skip);
- PrepareForBailout(expr, BailoutState::TOS_REGISTER);
- if (should_normalize) {
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r0, ip);
- Split(eq, if_true, if_false, NULL);
- __ bind(&skip);
- }
-}
-
-
void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// The variable in the declaration always resides in the current function
// context.
@@ -786,7 +754,6 @@ void FullCodeGenerator::VisitVariableDeclaration(
__ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
__ str(r0, ContextMemOperand(cp, variable->index()));
// No write barrier since the_hole_value is in old space.
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
}
break;
@@ -844,7 +811,6 @@ void FullCodeGenerator::VisitFunctionDeclaration(
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
break;
}
@@ -873,7 +839,6 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Keep the switch value on the stack until a case matches.
VisitForStackValue(stmt->tag());
- PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
ZoneList<CaseClause*>* clauses = stmt->cases();
CaseClause* default_clause = NULL; // Can occur anywhere in the list.
@@ -917,14 +882,12 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
SetExpressionPosition(clause);
Handle<Code> ic =
CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
- CallIC(ic, clause->CompareId());
+ CallIC(ic);
patch_site.EmitPatchInfo();
Label skip;
__ b(&skip);
- PrepareForBailout(clause, BailoutState::TOS_REGISTER);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r0, ip);
+ __ CompareRoot(r0, Heap::kTrueValueRootIndex);
__ b(ne, &next_test);
__ Drop(1);
__ jmp(clause->body_target());
@@ -951,12 +914,10 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ Case body");
CaseClause* clause = clauses->at(i);
__ bind(clause->body_target());
- PrepareForBailoutForId(clause->EntryId(), BailoutState::NO_REGISTERS);
VisitStatements(clause->statements());
}
__ bind(nested_statement.break_label());
- PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
}
@@ -989,7 +950,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Call(isolate()->builtins()->ToObject(), RelocInfo::CODE_TARGET);
RestoreContext();
__ bind(&done_convert);
- PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
__ push(r0);
// Check cache validity in generated code. If we cannot guarantee cache
@@ -1009,15 +969,13 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&call_runtime);
__ push(r0); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kForInEnumerate);
- PrepareForBailoutForId(stmt->EnumId(), BailoutState::TOS_REGISTER);
// If we got a map from the runtime call, we can do a fast
// modification check. Otherwise, we got a fixed array, and we have
// to do a slow check.
Label fixed_array;
__ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kMetaMapRootIndex);
- __ cmp(r2, ip);
+ __ CompareRoot(r2, Heap::kMetaMapRootIndex);
__ b(ne, &fixed_array);
// We got a map in register r0. Get the enumeration cache from it.
@@ -1050,7 +1008,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Push(r1, r0); // Smi and array
__ ldr(r1, FieldMemOperand(r0, FixedArray::kLengthOffset));
__ Push(r1); // Fixed array length (as smi).
- PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
__ mov(r0, Operand(Smi::kZero));
__ Push(r0); // Initial index.
@@ -1091,7 +1048,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// have the key or returns the name-converted key.
__ Call(isolate()->builtins()->ForInFilter(), RelocInfo::CODE_TARGET);
RestoreContext();
- PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
__ CompareRoot(result_register(), Heap::kUndefinedValueRootIndex);
__ b(eq, loop_statement.continue_label());
@@ -1102,18 +1058,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Perform the assignment as if via '='.
{ EffectContext context(this);
EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
- PrepareForBailoutForId(stmt->AssignmentId(), BailoutState::NO_REGISTERS);
}
- // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
- PrepareForBailoutForId(stmt->BodyId(), BailoutState::NO_REGISTERS);
// Generate code for the body of the loop.
Visit(stmt->body());
// Generate code for the going to the next element by incrementing
// the index (smi) stored on top of the stack.
__ bind(loop_statement.continue_label());
- PrepareForBailoutForId(stmt->IncrementId(), BailoutState::NO_REGISTERS);
__ pop(r0);
__ add(r0, r0, Operand(Smi::FromInt(1)));
__ push(r0);
@@ -1126,7 +1078,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
DropOperands(5);
// Exit and decrement the loop depth.
- PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
__ bind(&exit);
decrement_loop_depth();
}
@@ -1154,7 +1105,6 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
// Record position before possible IC call.
SetExpressionPosition(proxy);
- PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
Variable* var = proxy->var();
// Two cases: global variables and all other types of variables.
@@ -1228,11 +1178,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Push(r3, r2, r1, r0);
__ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
- Callable callable = CodeFactory::FastCloneShallowObject(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kFastCloneShallowObject);
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
- PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
// If result_saved is true the result is on top of the stack. If
// result_saved is false the result is in r0.
@@ -1267,7 +1217,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(r0));
__ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
CallStoreIC(property->GetSlot(0), key->value(), kStoreOwn);
- PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
if (NeedsHomeObject(value)) {
EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
@@ -1300,21 +1249,17 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(value);
DCHECK(property->emit_store());
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
- PrepareForBailoutForId(expr->GetIdForPropertySet(i),
- BailoutState::NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->setter = property;
}
break;
@@ -1334,7 +1279,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(r0, Operand(Smi::FromInt(NONE)));
PushOperand(r0);
CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
- PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
}
if (result_saved) {
@@ -1364,7 +1308,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
- PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
bool result_saved = false; // Is the result saved to the stack?
ZoneList<Expression*>* subexprs = expr->values();
@@ -1389,9 +1332,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ mov(StoreDescriptor::NameRegister(), Operand(Smi::FromInt(array_index)));
__ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp, 0));
CallKeyedStoreIC(expr->LiteralFeedbackSlot());
-
- PrepareForBailoutForId(expr->GetIdForElement(array_index),
- BailoutState::NO_REGISTERS);
}
if (result_saved) {
@@ -1449,17 +1389,12 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy());
- PrepareForBailout(expr->target(), BailoutState::TOS_REGISTER);
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
break;
case NAMED_SUPER_PROPERTY:
case KEYED_SUPER_PROPERTY:
@@ -1473,17 +1408,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForAccumulatorValue(expr->value());
AccumulatorValueContext context(this);
- if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr->binary_operation(),
- op,
- expr->target(),
- expr->value());
- } else {
- EmitBinaryOp(expr->binary_operation(), op);
- }
-
- // Deoptimization point in case the binary operation may have side effects.
- PrepareForBailout(expr->binary_operation(), BailoutState::TOS_REGISTER);
+ EmitBinaryOp(expr->binary_operation(), op);
} else {
VisitForAccumulatorValue(expr->value());
}
@@ -1496,7 +1421,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VariableProxy* proxy = expr->target()->AsVariableProxy();
EmitVariableAssignment(proxy->var(), expr->op(), expr->AssignmentSlot(),
proxy->hole_check_mode());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(r0);
break;
}
@@ -1513,11 +1437,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
}
-void FullCodeGenerator::VisitSuspend(Suspend* expr) {
- // Resumable functions are not supported.
- UNREACHABLE();
-}
-
void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
OperandStackDepthIncrement(2);
__ Push(reg1, reg2);
@@ -1538,129 +1457,11 @@ void FullCodeGenerator::EmitOperandStackDepthCheck() {
}
}
-void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
- Label allocate, done_allocate;
-
- __ Allocate(JSIteratorResult::kSize, r0, r2, r3, &allocate,
- NO_ALLOCATION_FLAGS);
- __ b(&done_allocate);
-
- __ bind(&allocate);
- __ Push(Smi::FromInt(JSIteratorResult::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace);
-
- __ bind(&done_allocate);
- __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r1);
- PopOperand(r2);
- __ LoadRoot(r3,
- done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
- __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
- __ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
- __ str(r2, FieldMemOperand(r0, JSIteratorResult::kValueOffset));
- __ str(r3, FieldMemOperand(r0, JSIteratorResult::kDoneOffset));
-}
-
-
-void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
- Token::Value op,
- Expression* left_expr,
- Expression* right_expr) {
- Label done, smi_case, stub_call;
-
- Register scratch1 = r2;
- Register scratch2 = r3;
-
- // Get the arguments.
- Register left = r1;
- Register right = r0;
- PopOperand(left);
-
- // Perform combined smi check on both operands.
- __ orr(scratch1, left, Operand(right));
- STATIC_ASSERT(kSmiTag == 0);
- JumpPatchSite patch_site(masm_);
- patch_site.EmitJumpIfSmi(scratch1, &smi_case);
-
- __ bind(&stub_call);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
- CallIC(code, expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
- __ jmp(&done);
-
- __ bind(&smi_case);
- // Smi case. This code works the same way as the smi-smi case in the type
- // recording binary operation stub, see
- switch (op) {
- case Token::SAR:
- __ GetLeastBitsFromSmi(scratch1, right, 5);
- __ mov(right, Operand(left, ASR, scratch1));
- __ bic(right, right, Operand(kSmiTagMask));
- break;
- case Token::SHL: {
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ mov(scratch1, Operand(scratch1, LSL, scratch2));
- __ TrySmiTag(right, scratch1, &stub_call);
- break;
- }
- case Token::SHR: {
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ mov(scratch1, Operand(scratch1, LSR, scratch2));
- __ tst(scratch1, Operand(0xc0000000));
- __ b(ne, &stub_call);
- __ SmiTag(right, scratch1);
- break;
- }
- case Token::ADD:
- __ add(scratch1, left, Operand(right), SetCC);
- __ b(vs, &stub_call);
- __ mov(right, scratch1);
- break;
- case Token::SUB:
- __ sub(scratch1, left, Operand(right), SetCC);
- __ b(vs, &stub_call);
- __ mov(right, scratch1);
- break;
- case Token::MUL: {
- __ SmiUntag(ip, right);
- __ smull(scratch1, scratch2, left, ip);
- __ mov(ip, Operand(scratch1, ASR, 31));
- __ cmp(ip, Operand(scratch2));
- __ b(ne, &stub_call);
- __ cmp(scratch1, Operand::Zero());
- __ mov(right, Operand(scratch1), LeaveCC, ne);
- __ b(ne, &done);
- __ add(scratch2, right, Operand(left), SetCC);
- __ mov(right, Operand(Smi::kZero), LeaveCC, pl);
- __ b(mi, &stub_call);
- break;
- }
- case Token::BIT_OR:
- __ orr(right, left, Operand(right));
- break;
- case Token::BIT_AND:
- __ and_(right, left, Operand(right));
- break;
- case Token::BIT_XOR:
- __ eor(right, left, Operand(right));
- break;
- default:
- UNREACHABLE();
- }
-
- __ bind(&done);
- context()->Plug(r0);
-}
-
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
PopOperand(r1);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
- JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(code, expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
+ Handle<Code> code = CodeFactory::BinaryOperation(isolate(), op).code();
+ __ Call(code, RelocInfo::CODE_TARGET);
+ RestoreContext();
context()->Plug(r0);
}
@@ -1780,7 +1581,6 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
PopOperand(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->AssignmentSlot(), prop->key()->AsLiteral()->value());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(r0);
}
@@ -1793,7 +1593,6 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
CallKeyedStoreIC(expr->AssignmentSlot());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(r0);
}
@@ -1806,12 +1605,13 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
if (callee->IsVariableProxy()) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
- PrepareForBailout(callee, BailoutState::NO_REGISTERS);
}
+ UseScratchRegisterScope temps(masm());
+ Register scratch = temps.Acquire();
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- PushOperand(ip);
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ PushOperand(scratch);
convert_mode = ConvertReceiverMode::kNullOrUndefined;
} else {
// Load the function from the receiver.
@@ -1819,11 +1619,13 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
DCHECK(!callee->AsProperty()->IsSuperAccess());
__ ldr(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
EmitNamedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(),
- BailoutState::TOS_REGISTER);
- // Push the target function under the receiver.
- __ ldr(ip, MemOperand(sp, 0));
- PushOperand(ip);
+ {
+ UseScratchRegisterScope temps(masm());
+ Register scratch = temps.Acquire();
+ // Push the target function under the receiver.
+ __ ldr(scratch, MemOperand(sp, 0));
+ PushOperand(scratch);
+ }
__ str(r0, MemOperand(sp, kPointerSize));
convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
}
@@ -1845,12 +1647,16 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ ldr(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
__ Move(LoadDescriptor::NameRegister(), r0);
EmitKeyedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(),
- BailoutState::TOS_REGISTER);
- // Push the target function under the receiver.
- __ ldr(ip, MemOperand(sp, 0));
- PushOperand(ip);
+ {
+ UseScratchRegisterScope temps(masm());
+ Register scratch = temps.Acquire();
+
+ // Push the target function under the receiver.
+ __ ldr(scratch, MemOperand(sp, 0));
+ PushOperand(scratch);
+ }
+
__ str(r0, MemOperand(sp, kPointerSize));
EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
@@ -1865,26 +1671,14 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
VisitForStackValue(args->at(i));
}
- PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
- SetCallPosition(expr, expr->tail_call_mode());
- if (expr->tail_call_mode() == TailCallMode::kAllow) {
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceTailCall);
- }
- // Update profiling counters before the tail call since we will
- // not return to this function.
- EmitProfilingCounterHandlingForReturnSequence(true);
- }
- Handle<Code> code =
- CodeFactory::CallICTrampoline(isolate(), mode, expr->tail_call_mode())
- .code();
+ SetCallPosition(expr);
+ Handle<Code> code = CodeFactory::CallICTrampoline(isolate(), mode).code();
__ mov(r3, Operand(IntFromSlot(expr->CallFeedbackICSlot())));
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ mov(r0, Operand(arg_count));
CallIC(code);
OperandStackDepthDecrement(arg_count + 1);
- RecordJSReturnSite(expr);
RestoreContext();
context()->DropAndPlug(1, r0);
}
@@ -1923,7 +1717,6 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
CallConstructStub stub(isolate());
CallIC(stub.GetCode());
OperandStackDepthDecrement(arg_count + 1);
- PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
RestoreContext();
context()->Plug(r0);
}
@@ -1942,7 +1735,6 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ SmiTst(r0);
Split(eq, if_true, if_false, fall_through);
@@ -1965,7 +1757,6 @@ void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
__ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, FIRST_JS_RECEIVER_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(ge, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -1987,7 +1778,6 @@ void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
__ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2009,7 +1799,6 @@ void FullCodeGenerator::EmitIsTypedArray(CallRuntime* expr) {
__ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, JS_TYPED_ARRAY_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2031,7 +1820,6 @@ void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
__ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, JS_PROXY_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2135,7 +1923,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
for (Expression* const arg : *args) {
VisitForStackValue(arg);
}
- PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
// Move target to r1.
int const argc = args->length() - 2;
__ ldr(r1, MemOperand(sp, (argc + 1) * kPointerSize));
@@ -2162,43 +1949,15 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
DCHECK(expr->arguments()->length() == 0);
ExternalReference debug_is_active =
ExternalReference::debug_is_active_address(isolate());
- __ mov(ip, Operand(debug_is_active));
- __ ldrb(r0, MemOperand(ip));
+ UseScratchRegisterScope temps(masm());
+ Register scratch = temps.Acquire();
+ __ mov(scratch, Operand(debug_is_active));
+ __ ldrb(r0, MemOperand(scratch));
__ SmiTag(r0);
context()->Plug(r0);
}
-void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- Label runtime, done;
-
- __ Allocate(JSIteratorResult::kSize, r0, r2, r3, &runtime,
- NO_ALLOCATION_FLAGS);
- __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r1);
- __ pop(r3);
- __ pop(r2);
- __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
- __ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
- __ str(r2, FieldMemOperand(r0, JSIteratorResult::kValueOffset));
- __ str(r3, FieldMemOperand(r0, JSIteratorResult::kDoneOffset));
- STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
- __ b(&done);
-
- __ bind(&runtime);
- CallRuntimeWithOperands(Runtime::kCreateIterResultObject);
-
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Push function.
__ LoadNativeContextSlot(expr->context_index(), r0);
@@ -2300,14 +2059,10 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
&materialize_true);
if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
__ bind(&materialize_true);
- PrepareForBailoutForId(expr->MaterializeTrueId(),
- BailoutState::NO_REGISTERS);
__ LoadRoot(r0, Heap::kTrueValueRootIndex);
if (context()->IsStackValue()) __ push(r0);
__ jmp(&done);
__ bind(&materialize_false);
- PrepareForBailoutForId(expr->MaterializeFalseId(),
- BailoutState::NO_REGISTERS);
__ LoadRoot(r0, Heap::kFalseValueRootIndex);
if (context()->IsStackValue()) __ push(r0);
__ bind(&done);
@@ -2349,8 +2104,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
// Reserve space for result of postfix operation.
if (expr->is_postfix() && !context()->IsEffect()) {
- __ mov(ip, Operand(Smi::kZero));
- PushOperand(ip);
+ UseScratchRegisterScope temps(masm());
+ Register scratch = temps.Acquire();
+ __ mov(scratch, Operand(Smi::kZero));
+ PushOperand(scratch);
}
switch (assign_type) {
case NAMED_PROPERTY: {
@@ -2378,59 +2135,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
}
- // We need a second deoptimization point after loading the value
- // in case evaluating the property load my have a side effect.
- if (assign_type == VARIABLE) {
- PrepareForBailout(expr->expression(), BailoutState::TOS_REGISTER);
- } else {
- PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
- }
-
- // Inline smi case if we are in a loop.
- Label stub_call, done;
- JumpPatchSite patch_site(masm_);
-
- int count_value = expr->op() == Token::INC ? 1 : -1;
- if (ShouldInlineSmiCase(expr->op())) {
- Label slow;
- patch_site.EmitJumpIfNotSmi(r0, &slow);
-
- // Save result for postfix expressions.
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- // Save the result on the stack. If we have a named or keyed property
- // we store the result under the receiver that is currently on top
- // of the stack.
- switch (assign_type) {
- case VARIABLE:
- __ push(r0);
- break;
- case NAMED_PROPERTY:
- __ str(r0, MemOperand(sp, kPointerSize));
- break;
- case KEYED_PROPERTY:
- __ str(r0, MemOperand(sp, 2 * kPointerSize));
- break;
- case NAMED_SUPER_PROPERTY:
- case KEYED_SUPER_PROPERTY:
- UNREACHABLE();
- break;
- }
- }
- }
-
- __ add(r0, r0, Operand(Smi::FromInt(count_value)), SetCC);
- __ b(vc, &done);
- // Call stub. Undo operation first.
- __ sub(r0, r0, Operand(Smi::FromInt(count_value)));
- __ jmp(&stub_call);
- __ bind(&slow);
- }
-
// Convert old value into a number.
__ Call(isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
RestoreContext();
- PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -2456,17 +2163,16 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
}
-
- __ bind(&stub_call);
+ int count_value = expr->op() == Token::INC ? 1 : -1;
__ mov(r1, r0);
__ mov(r0, Operand(Smi::FromInt(count_value)));
SetExpressionPosition(expr);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD).code();
- CallIC(code, expr->CountBinOpFeedbackId());
- patch_site.EmitPatchInfo();
- __ bind(&done);
+ Handle<Code> code =
+ CodeFactory::BinaryOperation(isolate(), Token::ADD).code();
+ __ Call(code, RelocInfo::CODE_TARGET);
+ RestoreContext();
// Store the value returned in r0.
switch (assign_type) {
@@ -2476,8 +2182,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
{ EffectContext context(this);
EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
proxy->hole_check_mode());
- PrepareForBailoutForId(expr->AssignmentId(),
- BailoutState::TOS_REGISTER);
context.Plug(r0);
}
// For all contexts except EffectConstant We have the result on
@@ -2488,8 +2192,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
proxy->hole_check_mode());
- PrepareForBailoutForId(expr->AssignmentId(),
- BailoutState::TOS_REGISTER);
context()->Plug(r0);
}
break;
@@ -2497,7 +2199,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
PopOperand(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -2511,7 +2212,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
PopOperands(StoreDescriptor::ReceiverRegister(),
StoreDescriptor::NameRegister());
CallKeyedStoreIC(expr->CountSlot());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -2542,14 +2242,12 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
{ AccumulatorValueContext context(this);
VisitForTypeofValue(sub_expr);
}
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Factory* factory = isolate()->factory();
if (String::Equals(check, factory->number_string())) {
__ JumpIfSmi(r0, if_true);
__ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(r0, ip);
+ __ CompareRoot(r0, Heap::kHeapNumberMapRootIndex);
Split(eq, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->string_string())) {
__ JumpIfSmi(r0, if_false);
@@ -2623,7 +2321,6 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForStackValue(expr->right());
SetExpressionPosition(expr);
EmitHasProperty();
- PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(r0, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
break;
@@ -2634,7 +2331,6 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
PopOperand(r1);
__ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
RestoreContext();
- PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(r0, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
break;
@@ -2658,9 +2354,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
- CallIC(ic, expr->CompareOperationFeedbackId());
+ CallIC(ic);
patch_site.EmitPatchInfo();
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ cmp(r0, Operand::Zero());
Split(cond, if_true, if_false, fall_through);
}
@@ -2683,7 +2378,6 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
&if_true, &if_false, &fall_through);
VisitForAccumulatorValue(sub_expr);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
if (expr->op() == Token::EQ_STRICT) {
Heap::RootListIndex nil_value = nil == kNullValue ?
Heap::kNullValueRootIndex :
@@ -2729,22 +2423,24 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
DeclarationScope* closure_scope = scope()->GetClosureScope();
+ UseScratchRegisterScope temps(masm());
+ Register scratch = temps.Acquire();
if (closure_scope->is_script_scope() ||
closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
// code.
- __ LoadNativeContextSlot(Context::CLOSURE_INDEX, ip);
+ __ LoadNativeContextSlot(Context::CLOSURE_INDEX, scratch);
} else if (closure_scope->is_eval_scope()) {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
// code. Fetch it from the context.
- __ ldr(ip, ContextMemOperand(cp, Context::CLOSURE_INDEX));
+ __ ldr(scratch, ContextMemOperand(cp, Context::CLOSURE_INDEX));
} else {
DCHECK(closure_scope->is_function_scope());
- __ ldr(ip, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ ldr(scratch, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
- PushOperand(ip);
+ PushOperand(scratch);
}
diff --git a/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc b/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
index bbb7450fe4..40fec48958 100644
--- a/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
+++ b/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
@@ -108,6 +108,10 @@ class JumpPatchSite BASE_EMBEDDED {
// frames-arm.h for its layout.
void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
+ // Block sharing of code target entries. The interrupt checks must be
+ // possible to patch individually, and replacing code with a debug version
+ // relies on RelocInfo not being shared.
+ Assembler::BlockCodeTargetSharingScope block_code_target_sharing(masm_);
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(literal());
@@ -196,8 +200,6 @@ void FullCodeGenerator::Generate() {
__ Mov(x10, Operand(info->scope()->scope_info()));
__ Push(x1, x10);
__ CallRuntime(Runtime::kNewScriptContext);
- PrepareForBailoutForId(BailoutId::ScriptContext(),
- BailoutState::TOS_REGISTER);
// The new target value is not used, clobbering is safe.
DCHECK_NULL(info->scope()->new_target_var());
} else {
@@ -254,12 +256,6 @@ void FullCodeGenerator::Generate() {
}
}
- // Register holding this function and new target are both trashed in case we
- // bailout here. But since that can happen only when new target is not used
- // and we allocate a context, the value of |function_in_register| is correct.
- PrepareForBailoutForId(BailoutId::FunctionContext(),
- BailoutState::NO_REGISTERS);
-
// We don't support new.target and rest parameters here.
DCHECK_NULL(info->scope()->new_target_var());
DCHECK_NULL(info->scope()->rest_parameter());
@@ -274,14 +270,16 @@ void FullCodeGenerator::Generate() {
__ Ldr(x1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
if (is_strict(language_mode()) || !has_simple_parameters()) {
- Callable callable = CodeFactory::FastNewStrictArguments(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kFastNewStrictArguments);
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
} else if (literal()->has_duplicate_parameters()) {
__ Push(x1);
__ CallRuntime(Runtime::kNewSloppyArguments_Generic);
} else {
- Callable callable = CodeFactory::FastNewSloppyArguments(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kFastNewSloppyArguments);
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
@@ -294,8 +292,6 @@ void FullCodeGenerator::Generate() {
}
// Visit the declarations and body.
- PrepareForBailoutForId(BailoutId::FunctionEntry(),
- BailoutState::NO_REGISTERS);
{
Comment cmnt(masm_, "[ Declarations");
VisitDeclarations(scope()->declarations());
@@ -308,8 +304,6 @@ void FullCodeGenerator::Generate() {
{
Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(),
- BailoutState::NO_REGISTERS);
Label ok;
DCHECK(jssp.Is(__ StackPointer()));
__ CompareRoot(jssp, Heap::kStackLimitRootIndex);
@@ -388,11 +382,6 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
EmitProfilingCounterReset();
__ Bind(&ok);
- PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
- // Record a mapping of the OSR id to this PC. This is used if the OSR
- // entry becomes the target of a bailout. We don't expect it to be, but
- // we want it to work if it is.
- PrepareForBailoutForId(stmt->OsrEntryId(), BailoutState::NO_REGISTERS);
}
void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
@@ -485,8 +474,6 @@ void FullCodeGenerator::StackValueContext::Plug(
void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
- false_label_);
if (index == Heap::kUndefinedValueRootIndex ||
index == Heap::kNullValueRootIndex ||
index == Heap::kFalseValueRootIndex) {
@@ -506,22 +493,26 @@ void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
void FullCodeGenerator::AccumulatorValueContext::Plug(
Handle<Object> lit) const {
- __ Mov(result_register(), Operand(lit));
+ if (lit->IsHeapObject()) {
+ __ Mov(result_register(), Operand(Handle<HeapObject>::cast(lit)));
+ } else {
+ __ Mov(result_register(), Operand(Smi::cast(*lit)));
+ }
}
void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
// Immediates cannot be pushed directly.
- __ Mov(result_register(), Operand(lit));
+ if (lit->IsHeapObject()) {
+ __ Mov(result_register(), Operand(Handle<HeapObject>::cast(lit)));
+ } else {
+ __ Mov(result_register(), Operand(Smi::cast(*lit)));
+ }
codegen()->PushOperand(result_register());
}
void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
DCHECK(lit->IsNullOrUndefined(isolate()) || !lit->IsUndetectable());
if (lit->IsNullOrUndefined(isolate()) || lit->IsFalse(isolate())) {
if (false_label_ != fall_through_) __ B(false_label_);
@@ -534,14 +525,14 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
if (true_label_ != fall_through_) __ B(true_label_);
}
} else if (lit->IsSmi()) {
- if (Smi::cast(*lit)->value() == 0) {
+ if (Smi::ToInt(*lit) == 0) {
if (false_label_ != fall_through_) __ B(false_label_);
} else {
if (true_label_ != fall_through_) __ B(true_label_);
}
} else {
// For simplicity we always test the accumulator register.
- __ Mov(result_register(), Operand(lit));
+ __ Mov(result_register(), Operand(Handle<HeapObject>::cast(lit)));
codegen()->DoTest(this);
}
}
@@ -612,10 +603,6 @@ void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
void FullCodeGenerator::TestContext::Plug(bool flag) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
if (flag) {
if (true_label_ != fall_through_) {
__ B(true_label_);
@@ -632,8 +619,9 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_true,
Label* if_false,
Label* fall_through) {
- Handle<Code> ic = ToBooleanICStub::GetUninitialized(isolate());
- CallIC(ic, condition->test_id());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kToBoolean);
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ RestoreContext();
__ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
}
@@ -709,29 +697,6 @@ void FullCodeGenerator::SetVar(Variable* var,
}
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
- bool should_normalize,
- Label* if_true,
- Label* if_false) {
- // Only prepare for bailouts before splits if we're in a test
- // context. Otherwise, we let the Visit function deal with the
- // preparation to avoid preparing with the same AST id twice.
- if (!context()->IsTest()) return;
-
- // TODO(all): Investigate to see if there is something to work on here.
- Label skip;
- if (should_normalize) {
- __ B(&skip);
- }
- PrepareForBailout(expr, BailoutState::TOS_REGISTER);
- if (should_normalize) {
- __ CompareRoot(x0, Heap::kTrueValueRootIndex);
- Split(eq, if_true, if_false, NULL);
- __ Bind(&skip);
- }
-}
-
-
void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// The variable in the declaration always resides in the current function
// context.
@@ -778,7 +743,6 @@ void FullCodeGenerator::VisitVariableDeclaration(
__ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
__ Str(x10, ContextMemOperand(cp, variable->index()));
// No write barrier since the_hole_value is in old space.
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
}
break;
@@ -836,7 +800,6 @@ void FullCodeGenerator::VisitFunctionDeclaration(
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
break;
}
@@ -870,7 +833,6 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Keep the switch value on the stack until a case matches.
VisitForStackValue(stmt->tag());
- PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
ZoneList<CaseClause*>* clauses = stmt->cases();
CaseClause* default_clause = NULL; // Can occur anywhere in the list.
@@ -897,27 +859,29 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Perform the comparison as if via '==='.
__ Peek(x1, 0); // Switch value.
- JumpPatchSite patch_site(masm_);
- if (ShouldInlineSmiCase(Token::EQ_STRICT)) {
- Label slow_case;
- patch_site.EmitJumpIfEitherNotSmi(x0, x1, &slow_case);
- __ Cmp(x1, x0);
- __ B(ne, &next_test);
- __ Drop(1); // Switch value is no longer needed.
- __ B(clause->body_target());
- __ Bind(&slow_case);
- }
+ {
+ Assembler::BlockPoolsScope scope(masm_);
+ JumpPatchSite patch_site(masm_);
+ if (ShouldInlineSmiCase(Token::EQ_STRICT)) {
+ Label slow_case;
+ patch_site.EmitJumpIfEitherNotSmi(x0, x1, &slow_case);
+ __ Cmp(x1, x0);
+ __ B(ne, &next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ __ B(clause->body_target());
+ __ Bind(&slow_case);
+ }
- // Record position before stub call for type feedback.
- SetExpressionPosition(clause);
- Handle<Code> ic =
- CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
- CallIC(ic, clause->CompareId());
- patch_site.EmitPatchInfo();
+ // Record position before stub call for type feedback.
+ SetExpressionPosition(clause);
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
+ CallIC(ic);
+ patch_site.EmitPatchInfo();
+ }
Label skip;
__ B(&skip);
- PrepareForBailout(clause, BailoutState::TOS_REGISTER);
__ JumpIfNotRoot(x0, Heap::kTrueValueRootIndex, &next_test);
__ Drop(1);
__ B(clause->body_target());
@@ -943,12 +907,10 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ Case body");
CaseClause* clause = clauses->at(i);
__ Bind(clause->body_target());
- PrepareForBailoutForId(clause->EntryId(), BailoutState::NO_REGISTERS);
VisitStatements(clause->statements());
}
__ Bind(nested_statement.break_label());
- PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
}
@@ -981,7 +943,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Call(isolate()->builtins()->ToObject(), RelocInfo::CODE_TARGET);
RestoreContext();
__ Bind(&done_convert);
- PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
__ Push(x0);
// Check cache validity in generated code. If we cannot guarantee cache
@@ -1001,7 +962,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Bind(&call_runtime);
__ Push(x0); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kForInEnumerate);
- PrepareForBailoutForId(stmt->EnumId(), BailoutState::TOS_REGISTER);
// If we got a map from the runtime call, we can do a fast
// modification check. Otherwise, we got a fixed array, and we have
@@ -1037,7 +997,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Mov(x1, Smi::FromInt(1)); // Smi(1) indicates slow check.
__ Ldr(x2, FieldMemOperand(x0, FixedArray::kLengthOffset));
__ Push(x1, x0, x2); // Smi and array, fixed array length (as smi).
- PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
__ Push(xzr); // Initial index.
// Generate code for doing the condition check.
@@ -1077,7 +1036,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// have the key or returns the name-converted key.
__ Call(isolate()->builtins()->ForInFilter(), RelocInfo::CODE_TARGET);
RestoreContext();
- PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
__ CompareRoot(result_register(), Heap::kUndefinedValueRootIndex);
__ B(eq, loop_statement.continue_label());
@@ -1087,18 +1045,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Perform the assignment as if via '='.
{ EffectContext context(this);
EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
- PrepareForBailoutForId(stmt->AssignmentId(), BailoutState::NO_REGISTERS);
}
- // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
- PrepareForBailoutForId(stmt->BodyId(), BailoutState::NO_REGISTERS);
// Generate code for the body of the loop.
Visit(stmt->body());
// Generate code for going to the next element by incrementing
// the index (smi) stored on top of the stack.
__ Bind(loop_statement.continue_label());
- PrepareForBailoutForId(stmt->IncrementId(), BailoutState::NO_REGISTERS);
// TODO(all): We could use a callee saved register to avoid popping.
__ Pop(x0);
__ Add(x0, x0, Smi::FromInt(1));
@@ -1112,7 +1066,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
DropOperands(5);
// Exit and decrement the loop depth.
- PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
__ Bind(&exit);
decrement_loop_depth();
}
@@ -1138,7 +1091,6 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
// Record position before possible IC call.
SetExpressionPosition(proxy);
- PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
Variable* var = proxy->var();
// Two cases: global variables and all other types of variables.
@@ -1212,11 +1164,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Push(x3, x2, x1, x0);
__ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
- Callable callable = CodeFactory::FastCloneShallowObject(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kFastCloneShallowObject);
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
- PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
// If result_saved is true the result is on top of the stack. If
// result_saved is false the result is in x0.
@@ -1251,7 +1203,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(x0));
__ Peek(StoreDescriptor::ReceiverRegister(), 0);
CallStoreIC(property->GetSlot(0), key->value(), kStoreOwn);
- PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
if (NeedsHomeObject(value)) {
EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
@@ -1283,20 +1234,16 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
PushOperand(x0);
VisitForStackValue(value);
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
- PrepareForBailoutForId(expr->GetIdForPropertySet(i),
- BailoutState::NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->setter = property;
}
break;
@@ -1316,7 +1263,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Mov(x10, Smi::FromInt(NONE));
PushOperand(x10);
CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
- PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
}
if (result_saved) {
@@ -1346,7 +1292,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
- PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
bool result_saved = false; // Is the result saved to the stack?
ZoneList<Expression*>* subexprs = expr->values();
@@ -1371,9 +1316,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ Mov(StoreDescriptor::NameRegister(), Smi::FromInt(array_index));
__ Peek(StoreDescriptor::ReceiverRegister(), 0);
CallKeyedStoreIC(expr->LiteralFeedbackSlot());
-
- PrepareForBailoutForId(expr->GetIdForElement(array_index),
- BailoutState::NO_REGISTERS);
}
if (result_saved) {
@@ -1430,17 +1372,12 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy());
- PrepareForBailout(expr->target(), BailoutState::TOS_REGISTER);
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
break;
case NAMED_SUPER_PROPERTY:
case KEYED_SUPER_PROPERTY:
@@ -1454,17 +1391,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForAccumulatorValue(expr->value());
AccumulatorValueContext context(this);
- if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr->binary_operation(),
- op,
- expr->target(),
- expr->value());
- } else {
- EmitBinaryOp(expr->binary_operation(), op);
- }
-
- // Deoptimization point in case the binary operation may have side effects.
- PrepareForBailout(expr->binary_operation(), BailoutState::TOS_REGISTER);
+ EmitBinaryOp(expr->binary_operation(), op);
} else {
VisitForAccumulatorValue(expr->value());
}
@@ -1477,7 +1404,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VariableProxy* proxy = expr->target()->AsVariableProxy();
EmitVariableAssignment(proxy->var(), expr->op(), expr->AssignmentSlot(),
proxy->hole_check_mode());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(x0);
break;
}
@@ -1495,114 +1421,11 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
-void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
- Token::Value op,
- Expression* left_expr,
- Expression* right_expr) {
- Label done, both_smis, stub_call;
-
- // Get the arguments.
- Register left = x1;
- Register right = x0;
- Register result = x0;
- PopOperand(left);
-
- // Perform combined smi check on both operands.
- __ Orr(x10, left, right);
- JumpPatchSite patch_site(masm_);
- patch_site.EmitJumpIfSmi(x10, &both_smis);
-
- __ Bind(&stub_call);
-
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
- {
- Assembler::BlockPoolsScope scope(masm_);
- CallIC(code, expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
- }
- __ B(&done);
-
- __ Bind(&both_smis);
- // Smi case. This code works in the same way as the smi-smi case in the type
- // recording binary operation stub, see
- // BinaryOpStub::GenerateSmiSmiOperation for comments.
- // TODO(all): That doesn't exist any more. Where are the comments?
- //
- // The set of operations that needs to be supported here is controlled by
- // FullCodeGenerator::ShouldInlineSmiCase().
- switch (op) {
- case Token::SAR:
- __ Ubfx(right, right, kSmiShift, 5);
- __ Asr(result, left, right);
- __ Bic(result, result, kSmiShiftMask);
- break;
- case Token::SHL:
- __ Ubfx(right, right, kSmiShift, 5);
- __ Lsl(result, left, right);
- break;
- case Token::SHR:
- // If `left >>> right` >= 0x80000000, the result is not representable in a
- // signed 32-bit smi.
- __ Ubfx(right, right, kSmiShift, 5);
- __ Lsr(x10, left, right);
- __ Tbnz(x10, kXSignBit, &stub_call);
- __ Bic(result, x10, kSmiShiftMask);
- break;
- case Token::ADD:
- __ Adds(x10, left, right);
- __ B(vs, &stub_call);
- __ Mov(result, x10);
- break;
- case Token::SUB:
- __ Subs(x10, left, right);
- __ B(vs, &stub_call);
- __ Mov(result, x10);
- break;
- case Token::MUL: {
- Label not_minus_zero, done;
- STATIC_ASSERT(static_cast<unsigned>(kSmiShift) == (kXRegSizeInBits / 2));
- STATIC_ASSERT(kSmiTag == 0);
- __ Smulh(x10, left, right);
- __ Cbnz(x10, &not_minus_zero);
- __ Eor(x11, left, right);
- __ Tbnz(x11, kXSignBit, &stub_call);
- __ Mov(result, x10);
- __ B(&done);
- __ Bind(&not_minus_zero);
- __ Cls(x11, x10);
- __ Cmp(x11, kXRegSizeInBits - kSmiShift);
- __ B(lt, &stub_call);
- __ SmiTag(result, x10);
- __ Bind(&done);
- break;
- }
- case Token::BIT_OR:
- __ Orr(result, left, right);
- break;
- case Token::BIT_AND:
- __ And(result, left, right);
- break;
- case Token::BIT_XOR:
- __ Eor(result, left, right);
- break;
- default:
- UNREACHABLE();
- }
-
- __ Bind(&done);
- context()->Plug(x0);
-}
-
-
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
PopOperand(x1);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
- JumpPatchSite patch_site(masm_); // Unbound, signals no inlined smi code.
- {
- Assembler::BlockPoolsScope scope(masm_);
- CallIC(code, expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
- }
+ Handle<Code> code = CodeFactory::BinaryOperation(isolate(), op).code();
+ __ Call(code, RelocInfo::CODE_TARGET);
+ RestoreContext();
context()->Plug(x0);
}
@@ -1724,7 +1547,6 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
PopOperand(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->AssignmentSlot(), prop->key()->AsLiteral()->value());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(x0);
}
@@ -1740,7 +1562,6 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
CallKeyedStoreIC(expr->AssignmentSlot());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(x0);
}
@@ -1754,7 +1575,6 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
if (callee->IsVariableProxy()) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
- PrepareForBailout(callee, BailoutState::NO_REGISTERS);
}
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
@@ -1771,8 +1591,6 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
DCHECK(!callee->AsProperty()->IsSuperAccess());
__ Peek(LoadDescriptor::ReceiverRegister(), 0);
EmitNamedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(),
- BailoutState::TOS_REGISTER);
// Push the target function under the receiver.
PopOperand(x10);
PushOperands(x0, x10);
@@ -1797,8 +1615,6 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ Peek(LoadDescriptor::ReceiverRegister(), 0);
__ Move(LoadDescriptor::NameRegister(), x0);
EmitKeyedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(),
- BailoutState::TOS_REGISTER);
// Push the target function under the receiver.
PopOperand(x10);
@@ -1817,26 +1633,14 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
VisitForStackValue(args->at(i));
}
- PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
- SetCallPosition(expr, expr->tail_call_mode());
- if (expr->tail_call_mode() == TailCallMode::kAllow) {
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceTailCall);
- }
- // Update profiling counters before the tail call since we will
- // not return to this function.
- EmitProfilingCounterHandlingForReturnSequence(true);
- }
- Handle<Code> code =
- CodeFactory::CallICTrampoline(isolate(), mode, expr->tail_call_mode())
- .code();
+ SetCallPosition(expr);
+ Handle<Code> code = CodeFactory::CallICTrampoline(isolate(), mode).code();
__ Mov(x3, IntFromSlot(expr->CallFeedbackICSlot()));
__ Peek(x1, (arg_count + 1) * kXRegSize);
__ Mov(x0, arg_count);
CallIC(code);
OperandStackDepthDecrement(arg_count + 1);
- RecordJSReturnSite(expr);
RestoreContext();
context()->DropAndPlug(1, x0);
}
@@ -1875,7 +1679,6 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
CallConstructStub stub(isolate());
CallIC(stub.GetCode());
OperandStackDepthDecrement(arg_count + 1);
- PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
RestoreContext();
context()->Plug(x0);
}
@@ -1894,7 +1697,6 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ TestAndSplit(x0, kSmiTagMask, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -1916,7 +1718,6 @@ void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
__ JumpIfSmi(x0, if_false);
__ CompareObjectType(x0, x10, x11, FIRST_JS_RECEIVER_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(ge, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -1938,7 +1739,6 @@ void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
__ JumpIfSmi(x0, if_false);
__ CompareObjectType(x0, x10, x11, JS_ARRAY_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -1960,7 +1760,6 @@ void FullCodeGenerator::EmitIsTypedArray(CallRuntime* expr) {
__ JumpIfSmi(x0, if_false);
__ CompareObjectType(x0, x10, x11, JS_TYPED_ARRAY_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -1982,7 +1781,6 @@ void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
__ JumpIfSmi(x0, if_false);
__ CompareObjectType(x0, x10, x11, JS_PROXY_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2090,7 +1888,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
for (Expression* const arg : *args) {
VisitForStackValue(arg);
}
- PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
// Move target to x1.
int const argc = args->length() - 2;
__ Peek(x1, (argc + 1) * kXRegSize);
@@ -2124,47 +1921,6 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- Label runtime, done;
-
- Register result = x0;
- __ Allocate(JSIteratorResult::kSize, result, x10, x11, &runtime,
- NO_ALLOCATION_FLAGS);
- Register map_reg = x1;
- Register result_value = x2;
- Register boolean_done = x3;
- Register empty_fixed_array = x4;
- Register untagged_result = x5;
- __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, map_reg);
- __ Pop(boolean_done);
- __ Pop(result_value);
- __ LoadRoot(empty_fixed_array, Heap::kEmptyFixedArrayRootIndex);
- STATIC_ASSERT(JSObject::kPropertiesOffset + kPointerSize ==
- JSObject::kElementsOffset);
- STATIC_ASSERT(JSIteratorResult::kValueOffset + kPointerSize ==
- JSIteratorResult::kDoneOffset);
- __ ObjectUntag(untagged_result, result);
- __ Str(map_reg, MemOperand(untagged_result, HeapObject::kMapOffset));
- __ Stp(empty_fixed_array, empty_fixed_array,
- MemOperand(untagged_result, JSObject::kPropertiesOffset));
- __ Stp(result_value, boolean_done,
- MemOperand(untagged_result, JSIteratorResult::kValueOffset));
- STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
- __ B(&done);
-
- __ Bind(&runtime);
- CallRuntimeWithOperands(Runtime::kCreateIterResultObject);
-
- __ Bind(&done);
- context()->Plug(x0);
-}
-
-
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Push function.
__ LoadNativeContextSlot(expr->context_index(), x0);
@@ -2264,14 +2020,10 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
__ Bind(&materialize_true);
- PrepareForBailoutForId(expr->MaterializeTrueId(),
- BailoutState::NO_REGISTERS);
__ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
__ B(&done);
__ Bind(&materialize_false);
- PrepareForBailoutForId(expr->MaterializeFalseId(),
- BailoutState::NO_REGISTERS);
__ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
__ B(&done);
@@ -2342,59 +2094,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
}
- // We need a second deoptimization point after loading the value
- // in case evaluating the property load my have a side effect.
- if (assign_type == VARIABLE) {
- PrepareForBailout(expr->expression(), BailoutState::TOS_REGISTER);
- } else {
- PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
- }
-
- // Inline smi case if we are in a loop.
- Label stub_call, done;
- JumpPatchSite patch_site(masm_);
-
- int count_value = expr->op() == Token::INC ? 1 : -1;
- if (ShouldInlineSmiCase(expr->op())) {
- Label slow;
- patch_site.EmitJumpIfNotSmi(x0, &slow);
-
- // Save result for postfix expressions.
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- // Save the result on the stack. If we have a named or keyed property we
- // store the result under the receiver that is currently on top of the
- // stack.
- switch (assign_type) {
- case VARIABLE:
- __ Push(x0);
- break;
- case NAMED_PROPERTY:
- __ Poke(x0, kPointerSize);
- break;
- case KEYED_PROPERTY:
- __ Poke(x0, kPointerSize * 2);
- break;
- case NAMED_SUPER_PROPERTY:
- case KEYED_SUPER_PROPERTY:
- UNREACHABLE();
- break;
- }
- }
- }
-
- __ Adds(x0, x0, Smi::FromInt(count_value));
- __ B(vc, &done);
- // Call stub. Undo operation first.
- __ Sub(x0, x0, Smi::FromInt(count_value));
- __ B(&stub_call);
- __ Bind(&slow);
- }
-
// Convert old value into a number.
__ Call(isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
RestoreContext();
- PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -2420,19 +2122,16 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
}
- __ Bind(&stub_call);
+ int count_value = expr->op() == Token::INC ? 1 : -1;
__ Mov(x1, x0);
__ Mov(x0, Smi::FromInt(count_value));
SetExpressionPosition(expr);
- {
- Assembler::BlockPoolsScope scope(masm_);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD).code();
- CallIC(code, expr->CountBinOpFeedbackId());
- patch_site.EmitPatchInfo();
- }
- __ Bind(&done);
+ Handle<Code> code =
+ CodeFactory::BinaryOperation(isolate(), Token::ADD).code();
+ __ Call(code, RelocInfo::CODE_TARGET);
+ RestoreContext();
// Store the value returned in x0.
switch (assign_type) {
@@ -2442,8 +2141,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
{ EffectContext context(this);
EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
proxy->hole_check_mode());
- PrepareForBailoutForId(expr->AssignmentId(),
- BailoutState::TOS_REGISTER);
context.Plug(x0);
}
// For all contexts except EffectConstant We have the result on
@@ -2454,8 +2151,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
proxy->hole_check_mode());
- PrepareForBailoutForId(expr->AssignmentId(),
- BailoutState::TOS_REGISTER);
context()->Plug(x0);
}
break;
@@ -2463,7 +2158,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
PopOperand(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -2477,7 +2171,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
PopOperand(StoreDescriptor::NameRegister());
PopOperand(StoreDescriptor::ReceiverRegister());
CallKeyedStoreIC(expr->CountSlot());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -2510,7 +2203,6 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
{ AccumulatorValueContext context(this);
VisitForTypeofValue(sub_expr);
}
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Factory* factory = isolate()->factory();
if (String::Equals(check, factory->number_string())) {
@@ -2596,7 +2288,6 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForStackValue(expr->right());
SetExpressionPosition(expr);
EmitHasProperty();
- PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(x0, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
break;
@@ -2607,7 +2298,6 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
PopOperand(x1);
__ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
RestoreContext();
- PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(x0, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
break;
@@ -2621,20 +2311,22 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Pop the stack value.
PopOperand(x1);
- JumpPatchSite patch_site(masm_);
- if (ShouldInlineSmiCase(op)) {
- Label slow_case;
- patch_site.EmitJumpIfEitherNotSmi(x0, x1, &slow_case);
- __ Cmp(x1, x0);
- Split(cond, if_true, if_false, NULL);
- __ Bind(&slow_case);
- }
+ {
+ Assembler::BlockPoolsScope scope(masm_);
+ JumpPatchSite patch_site(masm_);
+ if (ShouldInlineSmiCase(op)) {
+ Label slow_case;
+ patch_site.EmitJumpIfEitherNotSmi(x0, x1, &slow_case);
+ __ Cmp(x1, x0);
+ Split(cond, if_true, if_false, NULL);
+ __ Bind(&slow_case);
+ }
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
- CallIC(ic, expr->CompareOperationFeedbackId());
- patch_site.EmitPatchInfo();
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ CompareAndSplit(x0, 0, cond, if_true, if_false, fall_through);
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+ CallIC(ic);
+ patch_site.EmitPatchInfo();
+ __ CompareAndSplit(x0, 0, cond, if_true, if_false, fall_through);
+ }
}
}
@@ -2656,7 +2348,6 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
&if_true, &if_false, &fall_through);
VisitForAccumulatorValue(sub_expr);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
if (expr->op() == Token::EQ_STRICT) {
Heap::RootListIndex nil_value = nil == kNullValue ?
@@ -2675,11 +2366,6 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::VisitSuspend(Suspend* expr) {
- // Resumable functions are not supported.
- UNREACHABLE();
-}
-
void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
OperandStackDepthIncrement(2);
__ Push(reg1, reg2);
@@ -2706,45 +2392,6 @@ void FullCodeGenerator::EmitOperandStackDepthCheck() {
}
}
-void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
- Label allocate, done_allocate;
-
- // Allocate and populate an object with this form: { value: VAL, done: DONE }
-
- Register result = x0;
- __ Allocate(JSIteratorResult::kSize, result, x10, x11, &allocate,
- NO_ALLOCATION_FLAGS);
- __ B(&done_allocate);
-
- __ Bind(&allocate);
- __ Push(Smi::FromInt(JSIteratorResult::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace);
-
- __ Bind(&done_allocate);
- Register map_reg = x1;
- Register result_value = x2;
- Register boolean_done = x3;
- Register empty_fixed_array = x4;
- Register untagged_result = x5;
- __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, map_reg);
- PopOperand(result_value);
- __ LoadRoot(boolean_done,
- done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
- __ LoadRoot(empty_fixed_array, Heap::kEmptyFixedArrayRootIndex);
- STATIC_ASSERT(JSObject::kPropertiesOffset + kPointerSize ==
- JSObject::kElementsOffset);
- STATIC_ASSERT(JSIteratorResult::kValueOffset + kPointerSize ==
- JSIteratorResult::kDoneOffset);
- __ ObjectUntag(untagged_result, result);
- __ Str(map_reg, MemOperand(untagged_result, HeapObject::kMapOffset));
- __ Stp(empty_fixed_array, empty_fixed_array,
- MemOperand(untagged_result, JSObject::kPropertiesOffset));
- __ Stp(result_value, boolean_done,
- MemOperand(untagged_result, JSIteratorResult::kValueOffset));
- STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
-}
-
-
// TODO(all): I don't like this method.
// It seems to me that in too many places x0 is used in place of this.
// Also, this function is not suitable for all places where x0 should be
diff --git a/deps/v8/src/full-codegen/full-codegen.cc b/deps/v8/src/full-codegen/full-codegen.cc
index 4d0dd3d589..3b092a7414 100644
--- a/deps/v8/src/full-codegen/full-codegen.cc
+++ b/deps/v8/src/full-codegen/full-codegen.cc
@@ -61,10 +61,6 @@ FullCodeGenerator::FullCodeGenerator(MacroAssembler* masm,
operand_stack_depth_(0),
globals_(NULL),
context_(NULL),
- bailout_entries_(info->HasDeoptimizationSupport()
- ? info->literal()->ast_node_count()
- : 0,
- info->zone()),
back_edges_(2, info->zone()),
source_position_table_builder_(info->zone(),
info->SourcePositionRecordingMode()),
@@ -87,7 +83,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
bool FullCodeGenerator::MakeCode(CompilationInfo* info, uintptr_t stack_limit) {
Isolate* isolate = info->isolate();
- DCHECK(!info->shared_info()->must_use_ignition_turbo());
+ DCHECK(!info->literal()->must_use_ignition());
DCHECK(!FLAG_minimal);
RuntimeCallTimerScope runtimeTimer(isolate,
&RuntimeCallStats::CompileFullCode);
@@ -116,12 +112,9 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info, uintptr_t stack_limit) {
Handle<Code> code =
CodeGenerator::MakeCodeEpilogue(&masm, nullptr, info, masm.CodeObject());
- cgen.PopulateDeoptimizationData(code);
cgen.PopulateTypeFeedbackInfo(code);
- code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
code->set_has_reloc_info_for_serialization(info->will_serialize());
code->set_allow_osr_at_loop_nesting_level(0);
- code->set_profiler_ticks(0);
code->set_back_edge_table_offset(table_offset);
Handle<ByteArray> source_positions =
cgen.source_position_table_builder_.ToSourcePositionTable(
@@ -155,21 +148,6 @@ unsigned FullCodeGenerator::EmitBackEdgeTable() {
}
-void FullCodeGenerator::PopulateDeoptimizationData(Handle<Code> code) {
- // Fill in the deoptimization information.
- DCHECK(info_->HasDeoptimizationSupport() || bailout_entries_.is_empty());
- if (!info_->HasDeoptimizationSupport()) return;
- int length = bailout_entries_.length();
- Handle<DeoptimizationOutputData> data =
- DeoptimizationOutputData::New(isolate(), length, TENURED);
- for (int i = 0; i < length; i++) {
- data->SetAstId(i, bailout_entries_[i].id);
- data->SetPcAndState(i, Smi::FromInt(bailout_entries_[i].pc_and_state));
- }
- code->set_deoptimization_data(*data);
-}
-
-
void FullCodeGenerator::PopulateTypeFeedbackInfo(Handle<Code> code) {
Handle<TypeFeedbackInfo> info = isolate()->factory()->NewTypeFeedbackInfo();
info->set_ic_total_count(ic_total_count_);
@@ -195,23 +173,18 @@ void FullCodeGenerator::Initialize(uintptr_t stack_limit) {
masm_->set_predictable_code_size(true);
}
-void FullCodeGenerator::PrepareForBailout(Expression* node,
- BailoutState state) {
- PrepareForBailoutForId(node->id(), state);
-}
-
-void FullCodeGenerator::CallIC(Handle<Code> code, TypeFeedbackId ast_id) {
+void FullCodeGenerator::CallIC(Handle<Code> code) {
ic_total_count_++;
- __ Call(code, RelocInfo::CODE_TARGET, ast_id);
+ __ Call(code, RelocInfo::CODE_TARGET);
}
void FullCodeGenerator::CallLoadIC(FeedbackSlot slot, Handle<Object> name) {
DCHECK(name->IsName());
- __ Move(LoadDescriptor::NameRegister(), name);
+ __ Move(LoadDescriptor::NameRegister(), Handle<Name>::cast(name));
EmitLoadSlot(LoadDescriptor::SlotRegister(), slot);
- Handle<Code> code = CodeFactory::LoadIC(isolate()).code();
+ Handle<Code> code = isolate()->builtins()->LoadICTrampoline();
__ Call(code, RelocInfo::CODE_TARGET);
RestoreContext();
}
@@ -219,7 +192,7 @@ void FullCodeGenerator::CallLoadIC(FeedbackSlot slot, Handle<Object> name) {
void FullCodeGenerator::CallStoreIC(FeedbackSlot slot, Handle<Object> name,
StoreICKind store_ic_kind) {
DCHECK(name->IsName());
- __ Move(StoreDescriptor::NameRegister(), name);
+ __ Move(StoreDescriptor::NameRegister(), Handle<Name>::cast(name));
STATIC_ASSERT(!StoreDescriptor::kPassLastArgsOnStack ||
StoreDescriptor::kStackArgumentsCount == 2);
@@ -275,39 +248,6 @@ void FullCodeGenerator::CallKeyedStoreIC(FeedbackSlot slot) {
RestoreContext();
}
-void FullCodeGenerator::RecordJSReturnSite(Call* call) {
- // We record the offset of the function return so we can rebuild the frame
- // if the function was inlined, i.e., this is the return address in the
- // inlined function's frame.
- //
- // The bailout state is ignored. We defensively set it to TOS_REGISTER, which
- // is the real state of the unoptimized code at the return site.
- PrepareForBailoutForId(call->ReturnId(), BailoutState::TOS_REGISTER);
-#ifdef DEBUG
- // In debug builds, mark the return so we can verify that this function
- // was called.
- DCHECK(!call->return_is_recorded_);
- call->return_is_recorded_ = true;
-#endif
-}
-
-void FullCodeGenerator::PrepareForBailoutForId(BailoutId id,
- BailoutState state) {
- // There's no need to prepare this code for bailouts from already optimized
- // code or code that can't be optimized.
- if (!info_->HasDeoptimizationSupport()) return;
- unsigned pc_and_state =
- BailoutStateField::encode(state) | PcField::encode(masm_->pc_offset());
- DCHECK(Smi::IsValid(pc_and_state));
-#ifdef DEBUG
- for (int i = 0; i < bailout_entries_.length(); ++i) {
- DCHECK(bailout_entries_[i].id != id);
- }
-#endif
- BailoutEntry entry = { id, pc_and_state };
- bailout_entries_.Add(entry, zone());
-}
-
void FullCodeGenerator::RecordBackEdge(BailoutId ast_id) {
// The pc offset does not need to be encoded and packed together with a state.
@@ -344,7 +284,6 @@ void FullCodeGenerator::TestContext::Plug(Variable* var) const {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
// For simplicity we always test the accumulator register.
codegen()->GetVar(result_register(), var);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
codegen()->DoTest(this);
}
@@ -366,7 +305,6 @@ void FullCodeGenerator::StackValueContext::Plug(Register reg) const {
void FullCodeGenerator::TestContext::Plug(Register reg) const {
// For simplicity we always test the accumulator register.
__ Move(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
codegen()->DoTest(this);
}
@@ -392,7 +330,6 @@ void FullCodeGenerator::TestContext::DropAndPlug(int count,
// For simplicity we always test the accumulator register.
codegen()->DropOperands(count);
__ Move(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
codegen()->DoTest(this);
}
@@ -413,7 +350,6 @@ void FullCodeGenerator::StackValueContext::PlugTOS() const {
void FullCodeGenerator::TestContext::PlugTOS() const {
// For simplicity we always test the accumulator register.
codegen()->PopOperand(result_register());
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
codegen()->DoTest(this);
}
@@ -526,7 +462,7 @@ int FullCodeGenerator::DeclareGlobalsFlags() {
void FullCodeGenerator::PushOperand(Handle<Object> handle) {
OperandStackDepthIncrement(1);
- __ Push(handle);
+ __ PushObject(handle);
}
void FullCodeGenerator::PushOperand(Smi* smi) {
@@ -614,30 +550,35 @@ void FullCodeGenerator::EmitIntrinsicAsStubCall(CallRuntime* expr,
void FullCodeGenerator::EmitToString(CallRuntime* expr) {
- EmitIntrinsicAsStubCall(expr, CodeFactory::ToString(isolate()));
+ EmitIntrinsicAsStubCall(
+ expr, Builtins::CallableFor(isolate(), Builtins::kToString));
}
void FullCodeGenerator::EmitToLength(CallRuntime* expr) {
- EmitIntrinsicAsStubCall(expr, CodeFactory::ToLength(isolate()));
+ EmitIntrinsicAsStubCall(
+ expr, Builtins::CallableFor(isolate(), Builtins::kToLength));
}
void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
- EmitIntrinsicAsStubCall(expr, CodeFactory::ToInteger(isolate()));
+ EmitIntrinsicAsStubCall(
+ expr, Builtins::CallableFor(isolate(), Builtins::kToInteger));
}
void FullCodeGenerator::EmitToNumber(CallRuntime* expr) {
- EmitIntrinsicAsStubCall(expr, CodeFactory::ToNumber(isolate()));
+ EmitIntrinsicAsStubCall(
+ expr, Builtins::CallableFor(isolate(), Builtins::kToNumber));
}
void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
- EmitIntrinsicAsStubCall(expr, CodeFactory::ToObject(isolate()));
+ EmitIntrinsicAsStubCall(
+ expr, Builtins::CallableFor(isolate(), Builtins::kToObject));
}
void FullCodeGenerator::EmitHasProperty() {
- Callable callable = CodeFactory::HasProperty(isolate());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kHasProperty);
PopOperand(callable.descriptor().GetRegisterParameter(1));
PopOperand(callable.descriptor().GetRegisterParameter(0));
__ Call(callable.code(), RelocInfo::CODE_TARGET);
@@ -697,16 +638,12 @@ void FullCodeGenerator::SetExpressionAsStatementPosition(Expression* expr) {
}
}
-void FullCodeGenerator::SetCallPosition(Expression* expr,
- TailCallMode tail_call_mode) {
+void FullCodeGenerator::SetCallPosition(Expression* expr) {
if (expr->position() == kNoSourcePosition) return;
RecordPosition(expr->position());
if (info_->is_debug()) {
- RelocInfo::Mode mode = (tail_call_mode == TailCallMode::kAllow)
- ? RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL
- : RelocInfo::DEBUG_BREAK_SLOT_AT_CALL;
// Always emit a debug break slot before a call.
- DebugCodegen::GenerateSlot(masm_, mode);
+ DebugCodegen::GenerateSlot(masm_, RelocInfo::DEBUG_BREAK_SLOT_AT_CALL);
}
}
@@ -771,7 +708,6 @@ void FullCodeGenerator::VisitLogicalExpression(BinaryOperation* expr) {
Comment cmnt(masm_, is_logical_and ? "[ Logical AND" : "[ Logical OR");
Expression* left = expr->left();
Expression* right = expr->right();
- BailoutId right_id = expr->RightId();
Label done;
if (context()->IsTest()) {
@@ -782,7 +718,6 @@ void FullCodeGenerator::VisitLogicalExpression(BinaryOperation* expr) {
} else {
VisitForControl(left, test->true_label(), &eval_right, &eval_right);
}
- PrepareForBailoutForId(right_id, BailoutState::NO_REGISTERS);
__ bind(&eval_right);
} else if (context()->IsAccumulatorValue()) {
@@ -801,7 +736,6 @@ void FullCodeGenerator::VisitLogicalExpression(BinaryOperation* expr) {
__ jmp(&done);
__ bind(&discard);
__ Drop(1);
- PrepareForBailoutForId(right_id, BailoutState::NO_REGISTERS);
} else if (context()->IsStackValue()) {
VisitForAccumulatorValue(left);
@@ -816,7 +750,6 @@ void FullCodeGenerator::VisitLogicalExpression(BinaryOperation* expr) {
}
__ bind(&discard);
__ Drop(1);
- PrepareForBailoutForId(right_id, BailoutState::NO_REGISTERS);
} else {
DCHECK(context()->IsEffect());
@@ -826,7 +759,6 @@ void FullCodeGenerator::VisitLogicalExpression(BinaryOperation* expr) {
} else {
VisitForControl(left, &done, &eval_right, &eval_right);
}
- PrepareForBailoutForId(right_id, BailoutState::NO_REGISTERS);
__ bind(&eval_right);
}
@@ -845,11 +777,7 @@ void FullCodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
VisitForAccumulatorValue(right);
SetExpressionPosition(expr);
- if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr, op, left, right);
- } else {
- EmitBinaryOp(expr, op);
- }
+ EmitBinaryOp(expr, op);
}
void FullCodeGenerator::VisitProperty(Property* expr) {
@@ -871,7 +799,6 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
PopOperand(LoadDescriptor::ReceiverRegister());
EmitKeyedPropertyLoad(expr);
}
- PrepareForBailoutForId(expr->LoadId(), BailoutState::TOS_REGISTER);
context()->Plug(result_register());
}
@@ -882,7 +809,6 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
if (proxy != NULL && proxy->var()->IsUnallocated()) {
EmitVariableLoad(proxy, INSIDE_TYPEOF);
- PrepareForBailout(proxy, BailoutState::TOS_REGISTER);
} else {
// This expression cannot throw a reference error at the top level.
VisitInDuplicateContext(expr);
@@ -895,8 +821,7 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
NestedBlock nested_block(this, stmt);
{
- EnterBlockScopeIfNeeded block_scope_state(
- this, stmt->scope(), stmt->EntryId(), stmt->DeclsId(), stmt->ExitId());
+ EnterBlockScopeIfNeeded block_scope_state(this, stmt->scope());
VisitStatements(stmt->statements());
__ bind(nested_block.break_label());
}
@@ -930,24 +855,17 @@ void FullCodeGenerator::VisitIfStatement(IfStatement* stmt) {
if (stmt->HasElseStatement()) {
VisitForControl(stmt->condition(), &then_part, &else_part, &then_part);
- PrepareForBailoutForId(stmt->ThenId(), BailoutState::NO_REGISTERS);
__ bind(&then_part);
Visit(stmt->then_statement());
__ jmp(&done);
-
- PrepareForBailoutForId(stmt->ElseId(), BailoutState::NO_REGISTERS);
__ bind(&else_part);
Visit(stmt->else_statement());
} else {
VisitForControl(stmt->condition(), &then_part, &done, &then_part);
- PrepareForBailoutForId(stmt->ThenId(), BailoutState::NO_REGISTERS);
__ bind(&then_part);
Visit(stmt->then_statement());
-
- PrepareForBailoutForId(stmt->ElseId(), BailoutState::NO_REGISTERS);
}
__ bind(&done);
- PrepareForBailoutForId(stmt->IfId(), BailoutState::NO_REGISTERS);
}
void FullCodeGenerator::EmitContinue(Statement* target) {
@@ -1035,7 +953,8 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
// doesn't just get a copy of the existing unoptimized code.
if (!FLAG_always_opt && !FLAG_prepare_always_opt && !pretenure &&
scope()->is_function_scope()) {
- Callable callable = CodeFactory::FastNewClosure(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kFastNewClosure);
__ Move(callable.descriptor().GetRegisterParameter(0), info);
__ EmitLoadFeedbackVector(callable.descriptor().GetRegisterParameter(1));
__ Move(callable.descriptor().GetRegisterParameter(2), SmiFromSlot(slot));
@@ -1065,7 +984,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
EmitLoadSlot(LoadDescriptor::SlotRegister(), prop->PropertyFeedbackSlot());
- Handle<Code> code = CodeFactory::KeyedLoadIC(isolate()).code();
+ Handle<Code> code = isolate()->builtins()->KeyedLoadICTrampoline();
__ Call(code, RelocInfo::CODE_TARGET);
RestoreContext();
}
@@ -1110,7 +1029,6 @@ void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
// Record the position of the do while condition and make sure it is
// possible to break on the condition.
__ bind(loop_statement.continue_label());
- PrepareForBailoutForId(stmt->ContinueId(), BailoutState::NO_REGISTERS);
// Here is the actual 'while' keyword.
SetExpressionAsStatementPosition(stmt->cond());
@@ -1120,12 +1038,10 @@ void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
&book_keeping);
// Check stack before looping.
- PrepareForBailoutForId(stmt->BackEdgeId(), BailoutState::NO_REGISTERS);
__ bind(&book_keeping);
EmitBackEdgeBookkeeping(stmt, &body);
__ jmp(&body);
- PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
__ bind(loop_statement.break_label());
decrement_loop_depth();
}
@@ -1146,7 +1062,6 @@ void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
loop_statement.break_label(),
&body);
- PrepareForBailoutForId(stmt->BodyId(), BailoutState::NO_REGISTERS);
__ bind(&body);
Visit(stmt->body());
@@ -1156,7 +1071,6 @@ void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
EmitBackEdgeBookkeeping(stmt, &loop);
__ jmp(&loop);
- PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
__ bind(loop_statement.break_label());
decrement_loop_depth();
}
@@ -1179,11 +1093,9 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
// Emit the test at the bottom of the loop (even if empty).
__ jmp(&test);
- PrepareForBailoutForId(stmt->BodyId(), BailoutState::NO_REGISTERS);
__ bind(&body);
Visit(stmt->body());
- PrepareForBailoutForId(stmt->ContinueId(), BailoutState::NO_REGISTERS);
__ bind(loop_statement.continue_label());
if (stmt->next() != NULL) {
SetStatementPosition(stmt->next());
@@ -1204,7 +1116,6 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
__ jmp(&body);
}
- PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
__ bind(loop_statement.break_label());
decrement_loop_depth();
}
@@ -1250,7 +1161,6 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) {
VisitForControl(expr->condition(), &true_case, &false_case, &true_case);
int original_stack_depth = operand_stack_depth_;
- PrepareForBailoutForId(expr->ThenId(), BailoutState::NO_REGISTERS);
__ bind(&true_case);
SetExpressionPosition(expr->then_expression());
if (context()->IsTest()) {
@@ -1265,7 +1175,6 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) {
}
operand_stack_depth_ = original_stack_depth;
- PrepareForBailoutForId(expr->ElseId(), BailoutState::NO_REGISTERS);
__ bind(&false_case);
SetExpressionPosition(expr->else_expression());
VisitInDuplicateContext(expr->else_expression());
@@ -1303,7 +1212,8 @@ void FullCodeGenerator::VisitClassLiteral(ClassLiteral* lit) {
void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
Comment cmnt(masm_, "[ RegExpLiteral");
- Callable callable = CodeFactory::FastCloneRegExp(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kFastCloneRegExp);
CallInterfaceDescriptor descriptor = callable.descriptor();
LoadFromFrameField(JavaScriptFrameConstants::kFunctionOffset,
descriptor.GetRegisterParameter(0));
@@ -1343,15 +1253,7 @@ void FullCodeGenerator::VisitThrow(Throw* expr) {
void FullCodeGenerator::VisitCall(Call* expr) {
-#ifdef DEBUG
- // We want to verify that RecordJSReturnSite gets called on all paths
- // through this function. Avoid early returns.
- expr->return_is_recorded_ = false;
-#endif
-
- Comment cmnt(masm_, (expr->tail_call_mode() == TailCallMode::kAllow)
- ? "[ TailCall"
- : "[ Call");
+ Comment cmnt(masm_, "[ Call");
Expression* callee = expr->expression();
Call::CallType call_type = expr->GetCallType();
@@ -1388,11 +1290,6 @@ void FullCodeGenerator::VisitCall(Call* expr) {
case Call::WITH_CALL:
UNREACHABLE();
}
-
-#ifdef DEBUG
- // RecordJSReturnSite should have been called.
- DCHECK(expr->return_is_recorded_);
-#endif
}
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
@@ -1408,7 +1305,6 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
VisitForStackValue(args->at(i));
}
- PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
EmitCallJSRuntimeFunction(expr);
context()->DropAndPlug(1, result_register());
@@ -1430,7 +1326,6 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
}
// Call the C runtime function.
- PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
__ CallRuntime(expr->function(), arg_count);
OperandStackDepthDecrement(arg_count);
context()->Plug(result_register());
@@ -1456,6 +1351,20 @@ void FullCodeGenerator::VisitRewritableExpression(RewritableExpression* expr) {
Visit(expr->expression());
}
+void FullCodeGenerator::VisitYield(Yield* expr) {
+ // Resumable functions are not supported.
+ UNREACHABLE();
+}
+
+void FullCodeGenerator::VisitYieldStar(YieldStar* expr) {
+ // Resumable functions are not supported.
+ UNREACHABLE();
+}
+
+void FullCodeGenerator::VisitAwait(Await* expr) {
+ // Resumable functions are not supported.
+ UNREACHABLE();
+}
bool FullCodeGenerator::TryLiteralCompare(CompareOperation* expr) {
Expression* sub_expr;
@@ -1550,15 +1459,12 @@ bool BackEdgeTable::Verify(Isolate* isolate, Code* unoptimized) {
}
#endif // DEBUG
-
FullCodeGenerator::EnterBlockScopeIfNeeded::EnterBlockScopeIfNeeded(
- FullCodeGenerator* codegen, Scope* scope, BailoutId entry_id,
- BailoutId declarations_id, BailoutId exit_id)
- : codegen_(codegen), exit_id_(exit_id) {
+ FullCodeGenerator* codegen, Scope* scope)
+ : codegen_(codegen) {
saved_scope_ = codegen_->scope();
if (scope == NULL) {
- codegen_->PrepareForBailoutForId(entry_id, BailoutState::NO_REGISTERS);
needs_block_context_ = false;
} else {
needs_block_context_ = scope->NeedsContext();
@@ -1575,13 +1481,10 @@ FullCodeGenerator::EnterBlockScopeIfNeeded::EnterBlockScopeIfNeeded(
codegen_->context_register());
}
CHECK_EQ(0, scope->num_stack_slots());
- codegen_->PrepareForBailoutForId(entry_id, BailoutState::NO_REGISTERS);
}
{
Comment cmnt(masm(), "[ Declarations");
codegen_->VisitDeclarations(scope->declarations());
- codegen_->PrepareForBailoutForId(declarations_id,
- BailoutState::NO_REGISTERS);
}
}
}
@@ -1595,7 +1498,6 @@ FullCodeGenerator::EnterBlockScopeIfNeeded::~EnterBlockScopeIfNeeded() {
codegen_->StoreToFrameField(StandardFrameConstants::kContextOffset,
codegen_->context_register());
}
- codegen_->PrepareForBailoutForId(exit_id_, BailoutState::NO_REGISTERS);
codegen_->scope_ = saved_scope_;
}
diff --git a/deps/v8/src/full-codegen/full-codegen.h b/deps/v8/src/full-codegen/full-codegen.h
index 05cd2f335b..b576c6eda8 100644
--- a/deps/v8/src/full-codegen/full-codegen.h
+++ b/deps/v8/src/full-codegen/full-codegen.h
@@ -42,15 +42,10 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
static bool MakeCode(CompilationInfo* info, uintptr_t stack_limit);
static bool MakeCode(CompilationInfo* info);
- // Encode bailout state and pc-offset as a BitField<type, start, size>.
- // Only use 30 bits because we encode the result as a smi.
- class BailoutStateField : public BitField<Deoptimizer::BailoutState, 0, 1> {};
- class PcField : public BitField<unsigned, 1, 30 - 1> {};
-
static const int kMaxBackEdgeWeight = 127;
// Platform-specific code size multiplier.
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
+#if V8_TARGET_ARCH_IA32
static const int kCodeSizeMultiplier = 105;
#elif V8_TARGET_ARCH_X64
static const int kCodeSizeMultiplier = 165;
@@ -254,21 +249,18 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
if (FLAG_verify_operand_stack_depth) EmitOperandStackDepthCheck();
EffectContext context(this);
Visit(expr);
- PrepareForBailout(expr, BailoutState::NO_REGISTERS);
}
void VisitForAccumulatorValue(Expression* expr) {
if (FLAG_verify_operand_stack_depth) EmitOperandStackDepthCheck();
AccumulatorValueContext context(this);
Visit(expr);
- PrepareForBailout(expr, BailoutState::TOS_REGISTER);
}
void VisitForStackValue(Expression* expr) {
if (FLAG_verify_operand_stack_depth) EmitOperandStackDepthCheck();
StackValueContext context(this);
Visit(expr);
- PrepareForBailout(expr, BailoutState::NO_REGISTERS);
}
void VisitForControl(Expression* expr,
@@ -317,11 +309,6 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
// stack depth is in sync with the actual operand stack during runtime.
void EmitOperandStackDepthCheck();
- // Generate code to create an iterator result object. The "value" property is
- // set to a value popped from the stack, and "done" is set according to the
- // argument. The result object is left in the result register.
- void EmitCreateIteratorResult(bool done);
-
// Try to perform a comparison as a fast inlined literal compare if
// the operands allow it. Returns true if the compare operations
// has been matched and all code generated; false otherwise.
@@ -338,10 +325,6 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
Expression* sub_expr,
NilValue nil);
- // Bailout support.
- void PrepareForBailout(Expression* node, Deoptimizer::BailoutState state);
- void PrepareForBailoutForId(BailoutId id, Deoptimizer::BailoutState state);
-
// Returns an int32 for the index into the FixedArray that backs the feedback
// vector
int32_t IntFromSlot(FeedbackSlot slot) const {
@@ -354,20 +337,6 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
return Smi::FromInt(IntFromSlot(slot));
}
- // Record a call's return site offset, used to rebuild the frame if the
- // called function was inlined at the site.
- void RecordJSReturnSite(Call* call);
-
- // Prepare for bailout before a test (or compare) and branch. If
- // should_normalize, then the following comparison will not handle the
- // canonical JS true value so we will insert a (dead) test against true at
- // the actual bailout target from the optimized code. If not
- // should_normalize, the true and false labels are ignored.
- void PrepareForBailoutBeforeSplit(Expression* expr,
- bool should_normalize,
- Label* if_true,
- Label* if_false);
-
// If enabled, emit debug code for checking that the current context is
// neither a with nor a catch context.
void EmitDebugCheckDeclarationContext(Variable* variable);
@@ -418,8 +387,7 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
F(ToLength) \
F(ToNumber) \
F(ToObject) \
- F(DebugIsActive) \
- F(CreateIterResultObject)
+ F(DebugIsActive)
#define GENERATOR_DECLARATION(Name) void Emit##Name(CallRuntime* call);
FOR_EACH_FULL_CODE_INTRINSIC(GENERATOR_DECLARATION)
@@ -461,13 +429,6 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
// of the stack and the right one in the accumulator.
void EmitBinaryOp(BinaryOperation* expr, Token::Value op);
- // Helper functions for generating inlined smi code for certain
- // binary operations.
- void EmitInlineSmiBinaryOp(BinaryOperation* expr,
- Token::Value op,
- Expression* left,
- Expression* right);
-
// Assign to the given expression as if via '='. The right-hand-side value
// is expected in the accumulator. slot is only used if FLAG_vector_stores
// is true.
@@ -509,8 +470,7 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
// Platform-specific code for pushing a slot to the stack.
void EmitPushSlot(FeedbackSlot slot);
- void CallIC(Handle<Code> code,
- TypeFeedbackId id = TypeFeedbackId::None());
+ void CallIC(Handle<Code> code);
void CallLoadIC(FeedbackSlot slot, Handle<Object> name);
enum StoreICKind { kStoreNamed, kStoreOwn, kStoreGlobal };
@@ -535,8 +495,7 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
// This is used in loop headers where we want to break for each iteration.
void SetExpressionAsStatementPosition(Expression* expr);
- void SetCallPosition(Expression* expr,
- TailCallMode tail_call_mode = TailCallMode::kDisallow);
+ void SetCallPosition(Expression* expr);
void SetConstructCallPosition(Expression* expr) {
// Currently call and construct calls are treated the same wrt debugging.
@@ -602,17 +561,11 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
void VisitForTypeofValue(Expression* expr);
void Generate();
- void PopulateDeoptimizationData(Handle<Code> code);
void PopulateTypeFeedbackInfo(Handle<Code> code);
bool MustCreateObjectLiteralWithRuntime(ObjectLiteral* expr) const;
bool MustCreateArrayLiteralWithRuntime(ArrayLiteral* expr) const;
- struct BailoutEntry {
- BailoutId id;
- unsigned pc_and_state;
- };
-
struct BackEdgeEntry {
BailoutId id;
unsigned pc;
@@ -789,9 +742,7 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
class EnterBlockScopeIfNeeded {
public:
- EnterBlockScopeIfNeeded(FullCodeGenerator* codegen, Scope* scope,
- BailoutId entry_id, BailoutId declarations_id,
- BailoutId exit_id);
+ EnterBlockScopeIfNeeded(FullCodeGenerator* codegen, Scope* scope);
~EnterBlockScopeIfNeeded();
private:
@@ -799,7 +750,6 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
FullCodeGenerator* codegen_;
Scope* saved_scope_;
- BailoutId exit_id_;
bool needs_block_context_;
};
@@ -814,7 +764,6 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
int operand_stack_depth_;
ZoneList<Handle<Object> >* globals_;
const ExpressionContext* context_;
- ZoneList<BailoutEntry> bailout_entries_;
ZoneList<BackEdgeEntry> back_edges_;
SourcePositionTableBuilder source_position_table_builder_;
int ic_total_count_;
diff --git a/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc b/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
index 0af067c81d..5d9d1c5606 100644
--- a/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
@@ -180,8 +180,6 @@ void FullCodeGenerator::Generate() {
__ push(edi);
__ Push(info->scope()->scope_info());
__ CallRuntime(Runtime::kNewScriptContext);
- PrepareForBailoutForId(BailoutId::ScriptContext(),
- BailoutState::TOS_REGISTER);
// The new target value is not used, clobbering is safe.
DCHECK_NULL(info->scope()->new_target_var());
} else {
@@ -242,12 +240,6 @@ void FullCodeGenerator::Generate() {
}
}
- // Register holding this function and new target are both trashed in case we
- // bailout here. But since that can happen only when new target is not used
- // and we allocate a context, the value of |function_in_register| is correct.
- PrepareForBailoutForId(BailoutId::FunctionContext(),
- BailoutState::NO_REGISTERS);
-
// We don't support new.target and rest parameters here.
DCHECK_NULL(info->scope()->new_target_var());
DCHECK_NULL(info->scope()->rest_parameter());
@@ -282,8 +274,6 @@ void FullCodeGenerator::Generate() {
}
// Visit the declarations and body.
- PrepareForBailoutForId(BailoutId::FunctionEntry(),
- BailoutState::NO_REGISTERS);
{
Comment cmnt(masm_, "[ Declarations");
VisitDeclarations(info->scope()->declarations());
@@ -296,8 +286,6 @@ void FullCodeGenerator::Generate() {
{
Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(),
- BailoutState::NO_REGISTERS);
Label ok;
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
@@ -364,11 +352,6 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
EmitProfilingCounterReset();
__ bind(&ok);
- PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
- // Record a mapping of the OSR id to this PC. This is used if the OSR
- // entry becomes the target of a bailout. We don't expect it to be, but
- // we want it to work if it is.
- PrepareForBailoutForId(stmt->OsrEntryId(), BailoutState::NO_REGISTERS);
}
void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
@@ -459,9 +442,9 @@ void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
void FullCodeGenerator::AccumulatorValueContext::Plug(
Handle<Object> lit) const {
if (lit->IsSmi()) {
- __ SafeMove(result_register(), Immediate(lit));
+ __ SafeMove(result_register(), Immediate(Smi::cast(*lit)));
} else {
- __ Move(result_register(), Immediate(lit));
+ __ Move(result_register(), Immediate(Handle<HeapObject>::cast(lit)));
}
}
@@ -469,18 +452,14 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(
void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
codegen()->OperandStackDepthIncrement(1);
if (lit->IsSmi()) {
- __ SafePush(Immediate(lit));
+ __ SafePush(Immediate(Smi::cast(*lit)));
} else {
- __ push(Immediate(lit));
+ __ push(Immediate(Handle<HeapObject>::cast(lit)));
}
}
void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
DCHECK(lit->IsNullOrUndefined(isolate()) || !lit->IsUndetectable());
if (lit->IsNullOrUndefined(isolate()) || lit->IsFalse(isolate())) {
if (false_label_ != fall_through_) __ jmp(false_label_);
@@ -493,14 +472,14 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
if (true_label_ != fall_through_) __ jmp(true_label_);
}
} else if (lit->IsSmi()) {
- if (Smi::cast(*lit)->value() == 0) {
+ if (Smi::ToInt(*lit) == 0) {
if (false_label_ != fall_through_) __ jmp(false_label_);
} else {
if (true_label_ != fall_through_) __ jmp(true_label_);
}
} else {
// For simplicity we always test the accumulator register.
- __ mov(result_register(), lit);
+ __ mov(result_register(), Handle<HeapObject>::cast(lit));
codegen()->DoTest(this);
}
}
@@ -556,27 +535,21 @@ void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
- Handle<Object> value = flag
- ? isolate()->factory()->true_value()
- : isolate()->factory()->false_value();
+ Handle<HeapObject> value = flag ? isolate()->factory()->true_value()
+ : isolate()->factory()->false_value();
__ mov(result_register(), value);
}
void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
codegen()->OperandStackDepthIncrement(1);
- Handle<Object> value = flag
- ? isolate()->factory()->true_value()
- : isolate()->factory()->false_value();
+ Handle<HeapObject> value = flag ? isolate()->factory()->true_value()
+ : isolate()->factory()->false_value();
__ push(Immediate(value));
}
void FullCodeGenerator::TestContext::Plug(bool flag) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
if (flag) {
if (true_label_ != fall_through_) __ jmp(true_label_);
} else {
@@ -589,8 +562,9 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_true,
Label* if_false,
Label* fall_through) {
- Handle<Code> ic = ToBooleanICStub::GetUninitialized(isolate());
- CallIC(ic, condition->test_id());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kToBoolean);
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ RestoreContext();
__ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
Split(equal, if_true, if_false, fall_through);
}
@@ -664,26 +638,6 @@ void FullCodeGenerator::SetVar(Variable* var,
}
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
- bool should_normalize,
- Label* if_true,
- Label* if_false) {
- // Only prepare for bailouts before splits if we're in a test
- // context. Otherwise, we let the Visit function deal with the
- // preparation to avoid preparing with the same AST id twice.
- if (!context()->IsTest()) return;
-
- Label skip;
- if (should_normalize) __ jmp(&skip, Label::kNear);
- PrepareForBailout(expr, BailoutState::TOS_REGISTER);
- if (should_normalize) {
- __ cmp(eax, isolate()->factory()->true_value());
- Split(equal, if_true, if_false, NULL);
- __ bind(&skip);
- }
-}
-
-
void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// The variable in the declaration always resides in the current context.
DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
@@ -729,7 +683,6 @@ void FullCodeGenerator::VisitVariableDeclaration(
__ mov(ContextOperand(esi, variable->index()),
Immediate(isolate()->factory()->the_hole_value()));
// No write barrier since the hole value is in old space.
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
}
break;
@@ -785,7 +738,6 @@ void FullCodeGenerator::VisitFunctionDeclaration(
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
break;
}
@@ -814,7 +766,6 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Keep the switch value on the stack until a case matches.
VisitForStackValue(stmt->tag());
- PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
ZoneList<CaseClause*>* clauses = stmt->cases();
CaseClause* default_clause = NULL; // Can occur anywhere in the list.
@@ -858,12 +809,11 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
SetExpressionPosition(clause);
Handle<Code> ic =
CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
- CallIC(ic, clause->CompareId());
+ CallIC(ic);
patch_site.EmitPatchInfo();
Label skip;
__ jmp(&skip, Label::kNear);
- PrepareForBailout(clause, BailoutState::TOS_REGISTER);
__ cmp(eax, isolate()->factory()->true_value());
__ j(not_equal, &next_test);
__ Drop(1);
@@ -891,12 +841,10 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ Case body");
CaseClause* clause = clauses->at(i);
__ bind(clause->body_target());
- PrepareForBailoutForId(clause->EntryId(), BailoutState::NO_REGISTERS);
VisitStatements(clause->statements());
}
__ bind(nested_statement.break_label());
- PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
}
@@ -929,7 +877,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Call(isolate()->builtins()->ToObject(), RelocInfo::CODE_TARGET);
RestoreContext();
__ bind(&done_convert);
- PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
__ push(eax);
// Check cache validity in generated code. If we cannot guarantee cache
@@ -946,7 +893,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&call_runtime);
__ push(eax);
__ CallRuntime(Runtime::kForInEnumerate);
- PrepareForBailoutForId(stmt->EnumId(), BailoutState::TOS_REGISTER);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
isolate()->factory()->meta_map());
__ j(not_equal, &fixed_array);
@@ -982,7 +928,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(eax); // Array
__ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
__ push(eax); // Fixed array length (as smi).
- PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
__ push(Immediate(Smi::kZero)); // Initial index.
// Generate code for doing the condition check.
@@ -1019,7 +964,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// have the key or returns the name-converted key.
__ Call(isolate()->builtins()->ForInFilter(), RelocInfo::CODE_TARGET);
RestoreContext();
- PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
__ JumpIfRoot(result_register(), Heap::kUndefinedValueRootIndex,
loop_statement.continue_label());
@@ -1029,18 +973,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Perform the assignment as if via '='.
{ EffectContext context(this);
EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
- PrepareForBailoutForId(stmt->AssignmentId(), BailoutState::NO_REGISTERS);
}
- // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
- PrepareForBailoutForId(stmt->BodyId(), BailoutState::NO_REGISTERS);
// Generate code for the body of the loop.
Visit(stmt->body());
// Generate code for going to the next element by incrementing the
// index (smi) stored on top of the stack.
__ bind(loop_statement.continue_label());
- PrepareForBailoutForId(stmt->IncrementId(), BailoutState::NO_REGISTERS);
__ add(Operand(esp, 0 * kPointerSize), Immediate(Smi::FromInt(1)));
EmitBackEdgeBookkeeping(stmt, &loop);
@@ -1051,7 +991,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
DropOperands(5);
// Exit and decrement the loop depth.
- PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
__ bind(&exit);
decrement_loop_depth();
}
@@ -1076,7 +1015,6 @@ void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
SetExpressionPosition(proxy);
- PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
Variable* var = proxy->var();
// Two cases: global variables and all other types of variables.
@@ -1154,11 +1092,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(ebx, Immediate(SmiFromSlot(expr->literal_slot())));
__ mov(ecx, Immediate(constant_properties));
__ mov(edx, Immediate(Smi::FromInt(flags)));
- Callable callable = CodeFactory::FastCloneShallowObject(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kFastCloneShallowObject);
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
- PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
// If result_saved is true the result is on top of the stack. If
// result_saved is false the result is in eax.
@@ -1193,7 +1131,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(eax));
__ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
CallStoreIC(property->GetSlot(0), key->value(), kStoreOwn);
- PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
if (NeedsHomeObject(value)) {
EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
}
@@ -1220,20 +1157,16 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(value);
DCHECK(property->emit_store());
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
- PrepareForBailoutForId(expr->GetIdForPropertySet(i),
- BailoutState::NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->setter = property;
}
break;
@@ -1253,7 +1186,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
PushOperand(Smi::FromInt(NONE));
CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
- PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
}
if (result_saved) {
@@ -1285,7 +1217,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
- PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
bool result_saved = false; // Is the result saved to the stack?
ZoneList<Expression*>* subexprs = expr->values();
@@ -1311,8 +1242,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Immediate(Smi::FromInt(array_index)));
__ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
CallKeyedStoreIC(expr->LiteralFeedbackSlot());
- PrepareForBailoutForId(expr->GetIdForElement(array_index),
- BailoutState::NO_REGISTERS);
}
if (result_saved) {
@@ -1371,17 +1300,12 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy());
- PrepareForBailout(expr->target(), BailoutState::TOS_REGISTER);
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
break;
case NAMED_SUPER_PROPERTY:
case KEYED_SUPER_PROPERTY:
@@ -1394,17 +1318,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
PushOperand(eax); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
- if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr->binary_operation(),
- op,
- expr->target(),
- expr->value());
- } else {
- EmitBinaryOp(expr->binary_operation(), op);
- }
-
- // Deoptimization point in case the binary operation may have side effects.
- PrepareForBailout(expr->binary_operation(), BailoutState::TOS_REGISTER);
+ EmitBinaryOp(expr->binary_operation(), op);
} else {
VisitForAccumulatorValue(expr->value());
}
@@ -1417,7 +1331,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VariableProxy* proxy = expr->target()->AsVariableProxy();
EmitVariableAssignment(proxy->var(), expr->op(), expr->AssignmentSlot(),
proxy->hole_check_mode());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(eax);
break;
}
@@ -1434,11 +1347,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
}
-void FullCodeGenerator::VisitSuspend(Suspend* expr) {
- // Resumable functions are not supported.
- UNREACHABLE();
-}
-
void FullCodeGenerator::PushOperand(MemOperand operand) {
OperandStackDepthIncrement(1);
__ Push(operand);
@@ -1455,132 +1363,11 @@ void FullCodeGenerator::EmitOperandStackDepthCheck() {
}
}
-void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
- Label allocate, done_allocate;
-
- __ Allocate(JSIteratorResult::kSize, eax, ecx, edx, &allocate,
- NO_ALLOCATION_FLAGS);
- __ jmp(&done_allocate, Label::kNear);
-
- __ bind(&allocate);
- __ Push(Smi::FromInt(JSIteratorResult::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace);
-
- __ bind(&done_allocate);
- __ mov(ebx, NativeContextOperand());
- __ mov(ebx, ContextOperand(ebx, Context::ITERATOR_RESULT_MAP_INDEX));
- __ mov(FieldOperand(eax, HeapObject::kMapOffset), ebx);
- __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
- isolate()->factory()->empty_fixed_array());
- __ mov(FieldOperand(eax, JSObject::kElementsOffset),
- isolate()->factory()->empty_fixed_array());
- __ pop(FieldOperand(eax, JSIteratorResult::kValueOffset));
- __ mov(FieldOperand(eax, JSIteratorResult::kDoneOffset),
- isolate()->factory()->ToBoolean(done));
- STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
- OperandStackDepthDecrement(1);
-}
-
-
-void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
- Token::Value op,
- Expression* left,
- Expression* right) {
- // Do combined smi check of the operands. Left operand is on the
- // stack. Right operand is in eax.
- Label smi_case, done, stub_call;
- PopOperand(edx);
- __ mov(ecx, eax);
- __ or_(eax, edx);
- JumpPatchSite patch_site(masm_);
- patch_site.EmitJumpIfSmi(eax, &smi_case, Label::kNear);
-
- __ bind(&stub_call);
- __ mov(eax, ecx);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
- CallIC(code, expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
- __ jmp(&done, Label::kNear);
-
- // Smi case.
- __ bind(&smi_case);
- __ mov(eax, edx); // Copy left operand in case of a stub call.
-
- switch (op) {
- case Token::SAR:
- __ SmiUntag(ecx);
- __ sar_cl(eax); // No checks of result necessary
- __ and_(eax, Immediate(~kSmiTagMask));
- break;
- case Token::SHL: {
- Label result_ok;
- __ SmiUntag(eax);
- __ SmiUntag(ecx);
- __ shl_cl(eax);
- // Check that the *signed* result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(positive, &result_ok);
- __ SmiTag(ecx);
- __ jmp(&stub_call);
- __ bind(&result_ok);
- __ SmiTag(eax);
- break;
- }
- case Token::SHR: {
- Label result_ok;
- __ SmiUntag(eax);
- __ SmiUntag(ecx);
- __ shr_cl(eax);
- __ test(eax, Immediate(0xc0000000));
- __ j(zero, &result_ok);
- __ SmiTag(ecx);
- __ jmp(&stub_call);
- __ bind(&result_ok);
- __ SmiTag(eax);
- break;
- }
- case Token::ADD:
- __ add(eax, ecx);
- __ j(overflow, &stub_call);
- break;
- case Token::SUB:
- __ sub(eax, ecx);
- __ j(overflow, &stub_call);
- break;
- case Token::MUL: {
- __ SmiUntag(eax);
- __ imul(eax, ecx);
- __ j(overflow, &stub_call);
- __ test(eax, eax);
- __ j(not_zero, &done, Label::kNear);
- __ mov(ebx, edx);
- __ or_(ebx, ecx);
- __ j(negative, &stub_call);
- break;
- }
- case Token::BIT_OR:
- __ or_(eax, ecx);
- break;
- case Token::BIT_AND:
- __ and_(eax, ecx);
- break;
- case Token::BIT_XOR:
- __ xor_(eax, ecx);
- break;
- default:
- UNREACHABLE();
- }
-
- __ bind(&done);
- context()->Plug(eax);
-}
-
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
PopOperand(edx);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
- JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(code, expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
+ Handle<Code> code = CodeFactory::BinaryOperation(isolate(), op).code();
+ __ Call(code, RelocInfo::CODE_TARGET);
+ RestoreContext();
context()->Plug(eax);
}
@@ -1700,7 +1487,6 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
PopOperand(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->AssignmentSlot(), prop->key()->AsLiteral()->value());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(eax);
}
@@ -1715,7 +1501,6 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
PopOperand(StoreDescriptor::ReceiverRegister());
DCHECK(StoreDescriptor::ValueRegister().is(eax));
CallKeyedStoreIC(expr->AssignmentSlot());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(eax);
}
@@ -1728,7 +1513,6 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
if (callee->IsVariableProxy()) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
- PrepareForBailout(callee, BailoutState::NO_REGISTERS);
}
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
@@ -1740,8 +1524,6 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
DCHECK(!callee->AsProperty()->IsSuperAccess());
__ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
EmitNamedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(),
- BailoutState::TOS_REGISTER);
// Push the target function under the receiver.
PushOperand(Operand(esp, 0));
__ mov(Operand(esp, kPointerSize), eax);
@@ -1765,8 +1547,6 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
__ mov(LoadDescriptor::NameRegister(), eax);
EmitKeyedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(),
- BailoutState::TOS_REGISTER);
// Push the target function under the receiver.
PushOperand(Operand(esp, 0));
@@ -1784,26 +1564,14 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
VisitForStackValue(args->at(i));
}
- PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
- SetCallPosition(expr, expr->tail_call_mode());
- if (expr->tail_call_mode() == TailCallMode::kAllow) {
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceTailCall);
- }
- // Update profiling counters before the tail call since we will
- // not return to this function.
- EmitProfilingCounterHandlingForReturnSequence(true);
- }
- Handle<Code> code =
- CodeFactory::CallICTrampoline(isolate(), mode, expr->tail_call_mode())
- .code();
+ SetCallPosition(expr);
+ Handle<Code> code = CodeFactory::CallICTrampoline(isolate(), mode).code();
__ Move(edx, Immediate(IntFromSlot(expr->CallFeedbackICSlot())));
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
__ Move(eax, Immediate(arg_count));
CallIC(code);
OperandStackDepthDecrement(arg_count + 1);
- RecordJSReturnSite(expr);
RestoreContext();
context()->DropAndPlug(1, eax);
}
@@ -1842,7 +1610,6 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
CallConstructStub stub(isolate());
CallIC(stub.GetCode());
OperandStackDepthDecrement(arg_count + 1);
- PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
RestoreContext();
context()->Plug(eax);
}
@@ -1861,7 +1628,6 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ test(eax, Immediate(kSmiTagMask));
Split(zero, if_true, if_false, fall_through);
@@ -1884,7 +1650,6 @@ void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
__ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ebx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(above_equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -1906,7 +1671,6 @@ void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
__ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -1928,7 +1692,6 @@ void FullCodeGenerator::EmitIsTypedArray(CallRuntime* expr) {
__ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, JS_TYPED_ARRAY_TYPE, ebx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -1950,7 +1713,6 @@ void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
__ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, JS_PROXY_TYPE, ebx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2053,7 +1815,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
for (Expression* const arg : *args) {
VisitForStackValue(arg);
}
- PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
// Move target to edi.
int const argc = args->length() - 2;
__ mov(edi, Operand(esp, (argc + 1) * kPointerSize));
@@ -2086,36 +1847,6 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- Label runtime, done;
-
- __ Allocate(JSIteratorResult::kSize, eax, ecx, edx, &runtime,
- NO_ALLOCATION_FLAGS);
- __ mov(ebx, NativeContextOperand());
- __ mov(ebx, ContextOperand(ebx, Context::ITERATOR_RESULT_MAP_INDEX));
- __ mov(FieldOperand(eax, HeapObject::kMapOffset), ebx);
- __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
- isolate()->factory()->empty_fixed_array());
- __ mov(FieldOperand(eax, JSObject::kElementsOffset),
- isolate()->factory()->empty_fixed_array());
- __ pop(FieldOperand(eax, JSIteratorResult::kDoneOffset));
- __ pop(FieldOperand(eax, JSIteratorResult::kValueOffset));
- STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
- __ jmp(&done, Label::kNear);
-
- __ bind(&runtime);
- CallRuntimeWithOperands(Runtime::kCreateIterResultObject);
-
- __ bind(&done);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Push function.
__ LoadGlobalFunction(expr->context_index(), eax);
@@ -2217,8 +1948,6 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
&materialize_true);
if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
__ bind(&materialize_true);
- PrepareForBailoutForId(expr->MaterializeTrueId(),
- BailoutState::NO_REGISTERS);
if (context()->IsAccumulatorValue()) {
__ mov(eax, isolate()->factory()->true_value());
} else {
@@ -2226,8 +1955,6 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
}
__ jmp(&done, Label::kNear);
__ bind(&materialize_false);
- PrepareForBailoutForId(expr->MaterializeFalseId(),
- BailoutState::NO_REGISTERS);
if (context()->IsAccumulatorValue()) {
__ mov(eax, isolate()->factory()->false_value());
} else {
@@ -2300,65 +2027,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
}
- // We need a second deoptimization point after loading the value
- // in case evaluating the property load my have a side effect.
- if (assign_type == VARIABLE) {
- PrepareForBailout(expr->expression(), BailoutState::TOS_REGISTER);
- } else {
- PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
- }
-
- // Inline smi case if we are in a loop.
- Label done, stub_call;
- JumpPatchSite patch_site(masm_);
- if (ShouldInlineSmiCase(expr->op())) {
- Label slow;
- patch_site.EmitJumpIfNotSmi(eax, &slow, Label::kNear);
-
- // Save result for postfix expressions.
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- // Save the result on the stack. If we have a named or keyed property
- // we store the result under the receiver that is currently on top
- // of the stack.
- switch (assign_type) {
- case VARIABLE:
- __ push(eax);
- break;
- case NAMED_PROPERTY:
- __ mov(Operand(esp, kPointerSize), eax);
- break;
- case KEYED_PROPERTY:
- __ mov(Operand(esp, 2 * kPointerSize), eax);
- break;
- case NAMED_SUPER_PROPERTY:
- case KEYED_SUPER_PROPERTY:
- UNREACHABLE();
- break;
- }
- }
- }
-
- if (expr->op() == Token::INC) {
- __ add(eax, Immediate(Smi::FromInt(1)));
- } else {
- __ sub(eax, Immediate(Smi::FromInt(1)));
- }
- __ j(no_overflow, &done, Label::kNear);
- // Call stub. Undo operation first.
- if (expr->op() == Token::INC) {
- __ sub(eax, Immediate(Smi::FromInt(1)));
- } else {
- __ add(eax, Immediate(Smi::FromInt(1)));
- }
- __ jmp(&stub_call, Label::kNear);
- __ bind(&slow);
- }
-
// Convert old value into a number.
__ Call(isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
RestoreContext();
- PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -2387,14 +2058,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetExpressionPosition(expr);
// Call stub for +1/-1.
- __ bind(&stub_call);
__ mov(edx, eax);
__ mov(eax, Immediate(Smi::FromInt(1)));
Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), expr->binary_op()).code();
- CallIC(code, expr->CountBinOpFeedbackId());
- patch_site.EmitPatchInfo();
- __ bind(&done);
+ CodeFactory::BinaryOperation(isolate(), expr->binary_op()).code();
+ __ Call(code, RelocInfo::CODE_TARGET);
+ RestoreContext();
// Store the value returned in eax.
switch (assign_type) {
@@ -2405,8 +2074,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
{ EffectContext context(this);
EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
proxy->hole_check_mode());
- PrepareForBailoutForId(expr->AssignmentId(),
- BailoutState::TOS_REGISTER);
context.Plug(eax);
}
// For all contexts except EffectContext We have the result on
@@ -2418,8 +2085,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Perform the assignment as if via '='.
EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
proxy->hole_check_mode());
- PrepareForBailoutForId(expr->AssignmentId(),
- BailoutState::TOS_REGISTER);
context()->Plug(eax);
}
break;
@@ -2427,7 +2092,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
PopOperand(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -2441,7 +2105,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
PopOperand(StoreDescriptor::NameRegister());
PopOperand(StoreDescriptor::ReceiverRegister());
CallKeyedStoreIC(expr->CountSlot());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
// Result is on the stack
if (!context()->IsEffect()) {
@@ -2473,7 +2136,6 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
{ AccumulatorValueContext context(this);
VisitForTypeofValue(sub_expr);
}
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Factory* factory = isolate()->factory();
if (String::Equals(check, factory->number_string())) {
@@ -2552,7 +2214,6 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForStackValue(expr->right());
SetExpressionPosition(expr);
EmitHasProperty();
- PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ cmp(eax, isolate()->factory()->true_value());
Split(equal, if_true, if_false, fall_through);
break;
@@ -2563,7 +2224,6 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
PopOperand(edx);
__ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
RestoreContext();
- PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ cmp(eax, isolate()->factory()->true_value());
Split(equal, if_true, if_false, fall_through);
break;
@@ -2588,10 +2248,9 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
- CallIC(ic, expr->CompareOperationFeedbackId());
+ CallIC(ic);
patch_site.EmitPatchInfo();
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ test(eax, eax);
Split(cc, if_true, if_false, fall_through);
}
@@ -2614,11 +2273,10 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
&if_true, &if_false, &fall_through);
VisitForAccumulatorValue(sub_expr);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Handle<Object> nil_value = nil == kNullValue
- ? isolate()->factory()->null_value()
- : isolate()->factory()->undefined_value();
+ Handle<HeapObject> nil_value = nil == kNullValue
+ ? isolate()->factory()->null_value()
+ : isolate()->factory()->undefined_value();
if (expr->op() == Token::EQ_STRICT) {
__ cmp(eax, nil_value);
Split(equal, if_true, if_false, fall_through);
diff --git a/deps/v8/src/full-codegen/mips/full-codegen-mips.cc b/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
index e051b0b158..431d2cdc75 100644
--- a/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
+++ b/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
@@ -201,8 +201,6 @@ void FullCodeGenerator::Generate() {
__ push(a1);
__ Push(info->scope()->scope_info());
__ CallRuntime(Runtime::kNewScriptContext);
- PrepareForBailoutForId(BailoutId::ScriptContext(),
- BailoutState::TOS_REGISTER);
// The new target value is not used, clobbering is safe.
DCHECK_NULL(info->scope()->new_target_var());
} else {
@@ -260,12 +258,6 @@ void FullCodeGenerator::Generate() {
}
}
- // Register holding this function and new target are both trashed in case we
- // bailout here. But since that can happen only when new target is not used
- // and we allocate a context, the value of |function_in_register| is correct.
- PrepareForBailoutForId(BailoutId::FunctionContext(),
- BailoutState::NO_REGISTERS);
-
// We don't support new.target and rest parameters here.
DCHECK_NULL(info->scope()->new_target_var());
DCHECK_NULL(info->scope()->rest_parameter());
@@ -280,14 +272,16 @@ void FullCodeGenerator::Generate() {
__ lw(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
if (is_strict(language_mode()) || !has_simple_parameters()) {
- Callable callable = CodeFactory::FastNewStrictArguments(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kFastNewStrictArguments);
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
} else if (literal()->has_duplicate_parameters()) {
__ Push(a1);
__ CallRuntime(Runtime::kNewSloppyArguments_Generic);
} else {
- Callable callable = CodeFactory::FastNewSloppyArguments(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kFastNewSloppyArguments);
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
@@ -301,8 +295,6 @@ void FullCodeGenerator::Generate() {
// Visit the declarations and body unless there is an illegal
// redeclaration.
- PrepareForBailoutForId(BailoutId::FunctionEntry(),
- BailoutState::NO_REGISTERS);
{
Comment cmnt(masm_, "[ Declarations");
VisitDeclarations(scope()->declarations());
@@ -315,8 +307,6 @@ void FullCodeGenerator::Generate() {
{
Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(),
- BailoutState::NO_REGISTERS);
Label ok;
__ LoadRoot(at, Heap::kStackLimitRootIndex);
__ Branch(&ok, hs, sp, Operand(at));
@@ -395,11 +385,6 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
EmitProfilingCounterReset();
__ bind(&ok);
- PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
- // Record a mapping of the OSR id to this PC. This is used if the OSR
- // entry becomes the target of a bailout. We don't expect it to be, but
- // we want it to work if it is.
- PrepareForBailoutForId(stmt->OsrEntryId(), BailoutState::NO_REGISTERS);
}
void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
@@ -484,10 +469,6 @@ void FullCodeGenerator::StackValueContext::Plug(
void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
if (index == Heap::kUndefinedValueRootIndex ||
index == Heap::kNullValueRootIndex ||
index == Heap::kFalseValueRootIndex) {
@@ -507,22 +488,26 @@ void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
void FullCodeGenerator::AccumulatorValueContext::Plug(
Handle<Object> lit) const {
- __ li(result_register(), Operand(lit));
+ if (lit->IsHeapObject()) {
+ __ li(result_register(), Operand(Handle<HeapObject>::cast(lit)));
+ } else {
+ __ li(result_register(), Operand(Smi::cast(*lit)));
+ }
}
void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
// Immediates cannot be pushed directly.
- __ li(result_register(), Operand(lit));
+ if (lit->IsHeapObject()) {
+ __ li(result_register(), Operand(Handle<HeapObject>::cast(lit)));
+ } else {
+ __ li(result_register(), Operand(Smi::cast(*lit)));
+ }
codegen()->PushOperand(result_register());
}
void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
DCHECK(lit->IsNullOrUndefined(isolate()) || !lit->IsUndetectable());
if (lit->IsNullOrUndefined(isolate()) || lit->IsFalse(isolate())) {
if (false_label_ != fall_through_) __ Branch(false_label_);
@@ -535,14 +520,14 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
if (true_label_ != fall_through_) __ Branch(true_label_);
}
} else if (lit->IsSmi()) {
- if (Smi::cast(*lit)->value() == 0) {
+ if (Smi::ToInt(*lit) == 0) {
if (false_label_ != fall_through_) __ Branch(false_label_);
} else {
if (true_label_ != fall_through_) __ Branch(true_label_);
}
} else {
// For simplicity we always test the accumulator register.
- __ li(result_register(), Operand(lit));
+ __ li(result_register(), Operand(Handle<HeapObject>::cast(lit)));
codegen()->DoTest(this);
}
}
@@ -616,10 +601,6 @@ void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
void FullCodeGenerator::TestContext::Plug(bool flag) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
if (flag) {
if (true_label_ != fall_through_) __ Branch(true_label_);
} else {
@@ -633,8 +614,9 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_false,
Label* fall_through) {
__ mov(a0, result_register());
- Handle<Code> ic = ToBooleanICStub::GetUninitialized(isolate());
- CallIC(ic, condition->test_id());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kToBoolean);
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ RestoreContext();
__ LoadRoot(at, Heap::kTrueValueRootIndex);
Split(eq, result_register(), Operand(at), if_true, if_false, fall_through);
}
@@ -712,26 +694,6 @@ void FullCodeGenerator::SetVar(Variable* var,
}
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
- bool should_normalize,
- Label* if_true,
- Label* if_false) {
- // Only prepare for bailouts before splits if we're in a test
- // context. Otherwise, we let the Visit function deal with the
- // preparation to avoid preparing with the same AST id twice.
- if (!context()->IsTest()) return;
-
- Label skip;
- if (should_normalize) __ Branch(&skip);
- PrepareForBailout(expr, BailoutState::TOS_REGISTER);
- if (should_normalize) {
- __ LoadRoot(t0, Heap::kTrueValueRootIndex);
- Split(eq, v0, Operand(t0), if_true, if_false, NULL);
- __ bind(&skip);
- }
-}
-
-
void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// The variable in the declaration always resides in the current function
// context.
@@ -780,7 +742,6 @@ void FullCodeGenerator::VisitVariableDeclaration(
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ sw(at, ContextMemOperand(cp, variable->index()));
// No write barrier since the_hole_value is in old space.
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
}
break;
@@ -838,7 +799,6 @@ void FullCodeGenerator::VisitFunctionDeclaration(
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
break;
}
@@ -867,7 +827,6 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Keep the switch value on the stack until a case matches.
VisitForStackValue(stmt->tag());
- PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
ZoneList<CaseClause*>* clauses = stmt->cases();
CaseClause* default_clause = NULL; // Can occur anywhere in the list.
@@ -912,12 +871,11 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
SetExpressionPosition(clause);
Handle<Code> ic =
CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
- CallIC(ic, clause->CompareId());
+ CallIC(ic);
patch_site.EmitPatchInfo();
Label skip;
__ Branch(&skip);
- PrepareForBailout(clause, BailoutState::TOS_REGISTER);
__ LoadRoot(at, Heap::kTrueValueRootIndex);
__ Branch(&next_test, ne, v0, Operand(at));
__ Drop(1);
@@ -944,12 +902,10 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ Case body");
CaseClause* clause = clauses->at(i);
__ bind(clause->body_target());
- PrepareForBailoutForId(clause->EntryId(), BailoutState::NO_REGISTERS);
VisitStatements(clause->statements());
}
__ bind(nested_statement.break_label());
- PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
}
@@ -985,7 +941,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
RestoreContext();
__ mov(a0, v0);
__ bind(&done_convert);
- PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
__ push(a0);
// Check cache validity in generated code. If we cannot guarantee cache
@@ -1005,7 +960,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&call_runtime);
__ push(a0); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kForInEnumerate);
- PrepareForBailoutForId(stmt->EnumId(), BailoutState::TOS_REGISTER);
// If we got a map from the runtime call, we can do a fast
// modification check. Otherwise, we got a fixed array, and we have
@@ -1043,7 +997,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Push(a1, v0); // Smi and array
__ lw(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
__ Push(a1); // Fixed array length (as smi).
- PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
__ li(a0, Operand(Smi::kZero));
__ Push(a0); // Initial index.
@@ -1085,7 +1038,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// have the key or returns the name-converted key.
__ Call(isolate()->builtins()->ForInFilter(), RelocInfo::CODE_TARGET);
RestoreContext();
- PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(loop_statement.continue_label(), eq, result_register(),
Operand(at));
@@ -1096,18 +1048,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Perform the assignment as if via '='.
{ EffectContext context(this);
EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
- PrepareForBailoutForId(stmt->AssignmentId(), BailoutState::NO_REGISTERS);
}
- // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
- PrepareForBailoutForId(stmt->BodyId(), BailoutState::NO_REGISTERS);
// Generate code for the body of the loop.
Visit(stmt->body());
// Generate code for the going to the next element by incrementing
// the index (smi) stored on top of the stack.
__ bind(loop_statement.continue_label());
- PrepareForBailoutForId(stmt->IncrementId(), BailoutState::NO_REGISTERS);
__ pop(a0);
__ Addu(a0, a0, Operand(Smi::FromInt(1)));
__ push(a0);
@@ -1120,7 +1068,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
DropOperands(5);
// Exit and decrement the loop depth.
- PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
__ bind(&exit);
decrement_loop_depth();
}
@@ -1148,7 +1095,6 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
// Record position before possible IC call.
SetExpressionPosition(proxy);
- PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
Variable* var = proxy->var();
// Two cases: global variables and all other types of variables.
@@ -1222,11 +1168,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Push(a3, a2, a1, a0);
__ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
- Callable callable = CodeFactory::FastCloneShallowObject(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kFastCloneShallowObject);
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
- PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
// If result_saved is true the result is on top of the stack. If
// result_saved is false the result is in v0.
@@ -1262,7 +1208,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(a0));
__ lw(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
CallStoreIC(property->GetSlot(0), key->value(), kStoreOwn);
- PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
if (NeedsHomeObject(value)) {
EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
@@ -1295,20 +1240,16 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(value);
DCHECK(property->emit_store());
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
- PrepareForBailoutForId(expr->GetIdForPropertySet(i),
- BailoutState::NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->setter = property;
}
break;
@@ -1328,7 +1269,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ li(a0, Operand(Smi::FromInt(NONE)));
PushOperand(a0);
CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
- PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
}
if (result_saved) {
@@ -1359,7 +1299,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
- PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
bool result_saved = false; // Is the result saved to the stack?
ZoneList<Expression*>* subexprs = expr->values();
@@ -1386,9 +1325,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ lw(StoreDescriptor::ReceiverRegister(), MemOperand(sp, 0));
__ mov(StoreDescriptor::ValueRegister(), result_register());
CallKeyedStoreIC(expr->LiteralFeedbackSlot());
-
- PrepareForBailoutForId(expr->GetIdForElement(array_index),
- BailoutState::NO_REGISTERS);
}
if (result_saved) {
@@ -1447,17 +1383,12 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy());
- PrepareForBailout(expr->target(), BailoutState::TOS_REGISTER);
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
break;
case NAMED_SUPER_PROPERTY:
case KEYED_SUPER_PROPERTY:
@@ -1471,17 +1402,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForAccumulatorValue(expr->value());
AccumulatorValueContext context(this);
- if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr->binary_operation(),
- op,
- expr->target(),
- expr->value());
- } else {
- EmitBinaryOp(expr->binary_operation(), op);
- }
-
- // Deoptimization point in case the binary operation may have side effects.
- PrepareForBailout(expr->binary_operation(), BailoutState::TOS_REGISTER);
+ EmitBinaryOp(expr->binary_operation(), op);
} else {
VisitForAccumulatorValue(expr->value());
}
@@ -1494,7 +1415,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VariableProxy* proxy = expr->target()->AsVariableProxy();
EmitVariableAssignment(proxy->var(), expr->op(), expr->AssignmentSlot(),
proxy->hole_check_mode());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(v0);
break;
}
@@ -1511,11 +1431,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
}
-void FullCodeGenerator::VisitSuspend(Suspend* expr) {
- // Resumable functions are not supported.
- UNREACHABLE();
-}
-
void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
OperandStackDepthIncrement(2);
__ Push(reg1, reg2);
@@ -1547,128 +1462,12 @@ void FullCodeGenerator::EmitOperandStackDepthCheck() {
}
}
-void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
- Label allocate, done_allocate;
-
- __ Allocate(JSIteratorResult::kSize, v0, a2, a3, &allocate,
- NO_ALLOCATION_FLAGS);
- __ jmp(&done_allocate);
-
- __ bind(&allocate);
- __ Push(Smi::FromInt(JSIteratorResult::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace);
-
- __ bind(&done_allocate);
- __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, a1);
- PopOperand(a2);
- __ LoadRoot(a3,
- done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
- __ LoadRoot(t0, Heap::kEmptyFixedArrayRootIndex);
- __ sw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
- __ sw(a2, FieldMemOperand(v0, JSIteratorResult::kValueOffset));
- __ sw(a3, FieldMemOperand(v0, JSIteratorResult::kDoneOffset));
- STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
-}
-
-
-void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
- Token::Value op,
- Expression* left_expr,
- Expression* right_expr) {
- Label done, smi_case, stub_call;
-
- Register scratch1 = a2;
- Register scratch2 = a3;
-
- // Get the arguments.
- Register left = a1;
- Register right = a0;
- PopOperand(left);
- __ mov(a0, result_register());
-
- // Perform combined smi check on both operands.
- __ Or(scratch1, left, Operand(right));
- STATIC_ASSERT(kSmiTag == 0);
- JumpPatchSite patch_site(masm_);
- patch_site.EmitJumpIfSmi(scratch1, &smi_case);
-
- __ bind(&stub_call);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
- CallIC(code, expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
- __ jmp(&done);
-
- __ bind(&smi_case);
- // Smi case. This code works the same way as the smi-smi case in the type
- // recording binary operation stub, see
- switch (op) {
- case Token::SAR:
- __ GetLeastBitsFromSmi(scratch1, right, 5);
- __ srav(right, left, scratch1);
- __ And(v0, right, Operand(~kSmiTagMask));
- break;
- case Token::SHL: {
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ sllv(scratch1, scratch1, scratch2);
- __ Addu(scratch2, scratch1, Operand(0x40000000));
- __ Branch(&stub_call, lt, scratch2, Operand(zero_reg));
- __ SmiTag(v0, scratch1);
- break;
- }
- case Token::SHR: {
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ srlv(scratch1, scratch1, scratch2);
- __ And(scratch2, scratch1, 0xc0000000);
- __ Branch(&stub_call, ne, scratch2, Operand(zero_reg));
- __ SmiTag(v0, scratch1);
- break;
- }
- case Token::ADD:
- __ AddBranchOvf(v0, left, Operand(right), &stub_call);
- break;
- case Token::SUB:
- __ SubBranchOvf(v0, left, Operand(right), &stub_call);
- break;
- case Token::MUL: {
- __ SmiUntag(scratch1, right);
- __ Mul(scratch2, v0, left, scratch1);
- __ sra(scratch1, v0, 31);
- __ Branch(&stub_call, ne, scratch1, Operand(scratch2));
- __ Branch(&done, ne, v0, Operand(zero_reg));
- __ Addu(scratch2, right, left);
- __ Branch(&stub_call, lt, scratch2, Operand(zero_reg));
- DCHECK(Smi::kZero == 0);
- __ mov(v0, zero_reg);
- break;
- }
- case Token::BIT_OR:
- __ Or(v0, left, Operand(right));
- break;
- case Token::BIT_AND:
- __ And(v0, left, Operand(right));
- break;
- case Token::BIT_XOR:
- __ Xor(v0, left, Operand(right));
- break;
- default:
- UNREACHABLE();
- }
-
- __ bind(&done);
- context()->Plug(v0);
-}
-
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
__ mov(a0, result_register());
PopOperand(a1);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
- JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(code, expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
+ Handle<Code> code = CodeFactory::BinaryOperation(isolate(), op).code();
+ __ Call(code, RelocInfo::CODE_TARGET);
+ RestoreContext();
context()->Plug(v0);
}
@@ -1790,7 +1589,6 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
PopOperand(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->AssignmentSlot(), prop->key()->AsLiteral()->value());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(v0);
}
@@ -1809,7 +1607,6 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
CallKeyedStoreIC(expr->AssignmentSlot());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(v0);
}
@@ -1822,7 +1619,6 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
if (callee->IsVariableProxy()) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
- PrepareForBailout(callee, BailoutState::NO_REGISTERS);
}
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
@@ -1835,8 +1631,6 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
DCHECK(!callee->AsProperty()->IsSuperAccess());
__ lw(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
EmitNamedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(),
- BailoutState::TOS_REGISTER);
// Push the target function under the receiver.
__ lw(at, MemOperand(sp, 0));
PushOperand(at);
@@ -1861,8 +1655,6 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ lw(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
__ Move(LoadDescriptor::NameRegister(), v0);
EmitKeyedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(),
- BailoutState::TOS_REGISTER);
// Push the target function under the receiver.
__ lw(at, MemOperand(sp, 0));
@@ -1881,27 +1673,15 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
VisitForStackValue(args->at(i));
}
- PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
// Record source position of the IC call.
- SetCallPosition(expr, expr->tail_call_mode());
- if (expr->tail_call_mode() == TailCallMode::kAllow) {
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceTailCall);
- }
- // Update profiling counters before the tail call since we will
- // not return to this function.
- EmitProfilingCounterHandlingForReturnSequence(true);
- }
- Handle<Code> code =
- CodeFactory::CallICTrampoline(isolate(), mode, expr->tail_call_mode())
- .code();
+ SetCallPosition(expr);
+ Handle<Code> code = CodeFactory::CallICTrampoline(isolate(), mode).code();
__ li(a3, Operand(IntFromSlot(expr->CallFeedbackICSlot())));
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ li(a0, Operand(arg_count));
CallIC(code);
OperandStackDepthDecrement(arg_count + 1);
- RecordJSReturnSite(expr);
RestoreContext();
context()->DropAndPlug(1, v0);
}
@@ -1940,7 +1720,6 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
CallConstructStub stub(isolate());
CallIC(stub.GetCode());
OperandStackDepthDecrement(arg_count + 1);
- PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
RestoreContext();
context()->Plug(v0);
}
@@ -1959,7 +1738,6 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ SmiTst(v0, t0);
Split(eq, t0, Operand(zero_reg), if_true, if_false, fall_through);
@@ -1982,7 +1760,6 @@ void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
__ JumpIfSmi(v0, if_false);
__ GetObjectType(v0, a1, a1);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(ge, a1, Operand(FIRST_JS_RECEIVER_TYPE),
if_true, if_false, fall_through);
@@ -2005,7 +1782,6 @@ void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
__ JumpIfSmi(v0, if_false);
__ GetObjectType(v0, a1, a1);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, a1, Operand(JS_ARRAY_TYPE),
if_true, if_false, fall_through);
@@ -2028,7 +1804,6 @@ void FullCodeGenerator::EmitIsTypedArray(CallRuntime* expr) {
__ JumpIfSmi(v0, if_false);
__ GetObjectType(v0, a1, a1);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, a1, Operand(JS_TYPED_ARRAY_TYPE), if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2050,7 +1825,6 @@ void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
__ JumpIfSmi(v0, if_false);
__ GetObjectType(v0, a1, a1);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, a1, Operand(JS_PROXY_TYPE), if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2154,7 +1928,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
for (Expression* const arg : *args) {
VisitForStackValue(arg);
}
- PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
// Move target to a1.
int const argc = args->length() - 2;
__ lw(a1, MemOperand(sp, (argc + 1) * kPointerSize));
@@ -2188,35 +1961,6 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- Label runtime, done;
-
- __ Allocate(JSIteratorResult::kSize, v0, a2, a3, &runtime,
- NO_ALLOCATION_FLAGS);
- __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, a1);
- __ Pop(a2, a3);
- __ LoadRoot(t0, Heap::kEmptyFixedArrayRootIndex);
- __ sw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
- __ sw(a2, FieldMemOperand(v0, JSIteratorResult::kValueOffset));
- __ sw(a3, FieldMemOperand(v0, JSIteratorResult::kDoneOffset));
- STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
- __ jmp(&done);
-
- __ bind(&runtime);
- CallRuntimeWithOperands(Runtime::kCreateIterResultObject);
-
- __ bind(&done);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Push function.
__ LoadNativeContextSlot(expr->context_index(), v0);
@@ -2318,14 +2062,10 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
&materialize_true);
if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
__ bind(&materialize_true);
- PrepareForBailoutForId(expr->MaterializeTrueId(),
- BailoutState::NO_REGISTERS);
__ LoadRoot(v0, Heap::kTrueValueRootIndex);
if (context()->IsStackValue()) __ push(v0);
__ jmp(&done);
__ bind(&materialize_false);
- PrepareForBailoutForId(expr->MaterializeFalseId(),
- BailoutState::NO_REGISTERS);
__ LoadRoot(v0, Heap::kFalseValueRootIndex);
if (context()->IsStackValue()) __ push(v0);
__ bind(&done);
@@ -2396,61 +2136,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
}
- // We need a second deoptimization point after loading the value
- // in case evaluating the property load my have a side effect.
- if (assign_type == VARIABLE) {
- PrepareForBailout(expr->expression(), BailoutState::TOS_REGISTER);
- } else {
- PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
- }
-
- // Inline smi case if we are in a loop.
- Label stub_call, done;
- JumpPatchSite patch_site(masm_);
-
- int count_value = expr->op() == Token::INC ? 1 : -1;
__ mov(a0, v0);
- if (ShouldInlineSmiCase(expr->op())) {
- Label slow;
- patch_site.EmitJumpIfNotSmi(v0, &slow);
-
- // Save result for postfix expressions.
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- // Save the result on the stack. If we have a named or keyed property
- // we store the result under the receiver that is currently on top
- // of the stack.
- switch (assign_type) {
- case VARIABLE:
- __ push(v0);
- break;
- case NAMED_PROPERTY:
- __ sw(v0, MemOperand(sp, kPointerSize));
- break;
- case KEYED_PROPERTY:
- __ sw(v0, MemOperand(sp, 2 * kPointerSize));
- break;
- case NAMED_SUPER_PROPERTY:
- case KEYED_SUPER_PROPERTY:
- UNREACHABLE();
- break;
- }
- }
- }
-
- Register scratch1 = a1;
- __ li(scratch1, Operand(Smi::FromInt(count_value)));
- __ AddBranchNoOvf(v0, v0, Operand(scratch1), &done);
- // Call stub. Undo operation first.
- __ Move(v0, a0);
- __ jmp(&stub_call);
- __ bind(&slow);
- }
// Convert old value into a number.
__ Call(isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
RestoreContext();
- PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -2476,16 +2166,16 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
}
- __ bind(&stub_call);
+ int count_value = expr->op() == Token::INC ? 1 : -1;
__ mov(a1, v0);
__ li(a0, Operand(Smi::FromInt(count_value)));
SetExpressionPosition(expr);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD).code();
- CallIC(code, expr->CountBinOpFeedbackId());
- patch_site.EmitPatchInfo();
- __ bind(&done);
+ Handle<Code> code =
+ CodeFactory::BinaryOperation(isolate(), Token::ADD).code();
+ __ Call(code, RelocInfo::CODE_TARGET);
+ RestoreContext();
// Store the value returned in v0.
switch (assign_type) {
@@ -2495,8 +2185,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
{ EffectContext context(this);
EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
proxy->hole_check_mode());
- PrepareForBailoutForId(expr->AssignmentId(),
- BailoutState::TOS_REGISTER);
context.Plug(v0);
}
// For all contexts except EffectConstant we have the result on
@@ -2507,8 +2195,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
proxy->hole_check_mode());
- PrepareForBailoutForId(expr->AssignmentId(),
- BailoutState::TOS_REGISTER);
context()->Plug(v0);
}
break;
@@ -2517,7 +2203,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(StoreDescriptor::ValueRegister(), result_register());
PopOperand(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -2532,7 +2217,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
PopOperands(StoreDescriptor::ReceiverRegister(),
StoreDescriptor::NameRegister());
CallKeyedStoreIC(expr->CountSlot());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -2563,7 +2247,6 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
{ AccumulatorValueContext context(this);
VisitForTypeofValue(sub_expr);
}
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Factory* factory = isolate()->factory();
if (String::Equals(check, factory->number_string())) {
@@ -2644,7 +2327,6 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForStackValue(expr->right());
SetExpressionPosition(expr);
EmitHasProperty();
- PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ LoadRoot(t0, Heap::kTrueValueRootIndex);
Split(eq, v0, Operand(t0), if_true, if_false, fall_through);
break;
@@ -2656,7 +2338,6 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
PopOperand(a1);
__ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
RestoreContext();
- PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ LoadRoot(at, Heap::kTrueValueRootIndex);
Split(eq, v0, Operand(at), if_true, if_false, fall_through);
break;
@@ -2680,9 +2361,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
- CallIC(ic, expr->CompareOperationFeedbackId());
+ CallIC(ic);
patch_site.EmitPatchInfo();
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(cc, v0, Operand(zero_reg), if_true, if_false, fall_through);
}
}
@@ -2704,7 +2384,6 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
&if_true, &if_false, &fall_through);
VisitForAccumulatorValue(sub_expr);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
if (expr->op() == Token::EQ_STRICT) {
Heap::RootListIndex nil_value = nil == kNullValue ?
Heap::kNullValueRootIndex :
diff --git a/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc b/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
index 718e174b26..79c53981b7 100644
--- a/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
+++ b/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
@@ -200,8 +200,6 @@ void FullCodeGenerator::Generate() {
__ push(a1);
__ Push(info->scope()->scope_info());
__ CallRuntime(Runtime::kNewScriptContext);
- PrepareForBailoutForId(BailoutId::ScriptContext(),
- BailoutState::TOS_REGISTER);
// The new target value is not used, clobbering is safe.
DCHECK_NULL(info->scope()->new_target_var());
} else {
@@ -259,12 +257,6 @@ void FullCodeGenerator::Generate() {
}
}
- // Register holding this function and new target are both trashed in case we
- // bailout here. But since that can happen only when new target is not used
- // and we allocate a context, the value of |function_in_register| is correct.
- PrepareForBailoutForId(BailoutId::FunctionContext(),
- BailoutState::NO_REGISTERS);
-
// We don't support new.target and rest parameters here.
DCHECK_NULL(info->scope()->new_target_var());
DCHECK_NULL(info->scope()->rest_parameter());
@@ -279,14 +271,16 @@ void FullCodeGenerator::Generate() {
__ Ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
if (is_strict(language_mode()) || !has_simple_parameters()) {
- Callable callable = CodeFactory::FastNewStrictArguments(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kFastNewStrictArguments);
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
} else if (literal()->has_duplicate_parameters()) {
__ Push(a1);
__ CallRuntime(Runtime::kNewSloppyArguments_Generic);
} else {
- Callable callable = CodeFactory::FastNewSloppyArguments(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kFastNewSloppyArguments);
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
@@ -299,8 +293,6 @@ void FullCodeGenerator::Generate() {
}
// Visit the declarations and body.
- PrepareForBailoutForId(BailoutId::FunctionEntry(),
- BailoutState::NO_REGISTERS);
{
Comment cmnt(masm_, "[ Declarations");
VisitDeclarations(scope()->declarations());
@@ -313,8 +305,6 @@ void FullCodeGenerator::Generate() {
{
Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(),
- BailoutState::NO_REGISTERS);
Label ok;
__ LoadRoot(at, Heap::kStackLimitRootIndex);
__ Branch(&ok, hs, sp, Operand(at));
@@ -395,11 +385,6 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
EmitProfilingCounterReset();
__ bind(&ok);
- PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
- // Record a mapping of the OSR id to this PC. This is used if the OSR
- // entry becomes the target of a bailout. We don't expect it to be, but
- // we want it to work if it is.
- PrepareForBailoutForId(stmt->OsrEntryId(), BailoutState::NO_REGISTERS);
}
void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
@@ -484,10 +469,6 @@ void FullCodeGenerator::StackValueContext::Plug(
void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
if (index == Heap::kUndefinedValueRootIndex ||
index == Heap::kNullValueRootIndex ||
index == Heap::kFalseValueRootIndex) {
@@ -507,22 +488,26 @@ void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
void FullCodeGenerator::AccumulatorValueContext::Plug(
Handle<Object> lit) const {
- __ li(result_register(), Operand(lit));
+ if (lit->IsHeapObject()) {
+ __ li(result_register(), Operand(Handle<HeapObject>::cast(lit)));
+ } else {
+ __ li(result_register(), Operand(Smi::cast(*lit)));
+ }
}
void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
// Immediates cannot be pushed directly.
- __ li(result_register(), Operand(lit));
+ if (lit->IsHeapObject()) {
+ __ li(result_register(), Operand(Handle<HeapObject>::cast(lit)));
+ } else {
+ __ li(result_register(), Operand(Smi::cast(*lit)));
+ }
codegen()->PushOperand(result_register());
}
void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
DCHECK(lit->IsNullOrUndefined(isolate()) || !lit->IsUndetectable());
if (lit->IsNullOrUndefined(isolate()) || lit->IsFalse(isolate())) {
if (false_label_ != fall_through_) __ Branch(false_label_);
@@ -535,14 +520,14 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
if (true_label_ != fall_through_) __ Branch(true_label_);
}
} else if (lit->IsSmi()) {
- if (Smi::cast(*lit)->value() == 0) {
+ if (Smi::ToInt(*lit) == 0) {
if (false_label_ != fall_through_) __ Branch(false_label_);
} else {
if (true_label_ != fall_through_) __ Branch(true_label_);
}
} else {
// For simplicity we always test the accumulator register.
- __ li(result_register(), Operand(lit));
+ __ li(result_register(), Operand(Handle<HeapObject>::cast(lit)));
codegen()->DoTest(this);
}
}
@@ -616,10 +601,6 @@ void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
void FullCodeGenerator::TestContext::Plug(bool flag) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
if (flag) {
if (true_label_ != fall_through_) __ Branch(true_label_);
} else {
@@ -633,8 +614,9 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_false,
Label* fall_through) {
__ mov(a0, result_register());
- Handle<Code> ic = ToBooleanICStub::GetUninitialized(isolate());
- CallIC(ic, condition->test_id());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kToBoolean);
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ RestoreContext();
__ LoadRoot(at, Heap::kTrueValueRootIndex);
Split(eq, result_register(), Operand(at), if_true, if_false, fall_through);
}
@@ -712,26 +694,6 @@ void FullCodeGenerator::SetVar(Variable* var,
}
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
- bool should_normalize,
- Label* if_true,
- Label* if_false) {
- // Only prepare for bailouts before splits if we're in a test
- // context. Otherwise, we let the Visit function deal with the
- // preparation to avoid preparing with the same AST id twice.
- if (!context()->IsTest()) return;
-
- Label skip;
- if (should_normalize) __ Branch(&skip);
- PrepareForBailout(expr, BailoutState::TOS_REGISTER);
- if (should_normalize) {
- __ LoadRoot(a4, Heap::kTrueValueRootIndex);
- Split(eq, v0, Operand(a4), if_true, if_false, NULL);
- __ bind(&skip);
- }
-}
-
-
void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// The variable in the declaration always resides in the current function
// context.
@@ -780,7 +742,6 @@ void FullCodeGenerator::VisitVariableDeclaration(
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ Sd(at, ContextMemOperand(cp, variable->index()));
// No write barrier since the_hole_value is in old space.
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
}
break;
@@ -838,7 +799,6 @@ void FullCodeGenerator::VisitFunctionDeclaration(
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
break;
}
@@ -867,7 +827,6 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Keep the switch value on the stack until a case matches.
VisitForStackValue(stmt->tag());
- PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
ZoneList<CaseClause*>* clauses = stmt->cases();
CaseClause* default_clause = NULL; // Can occur anywhere in the list.
@@ -912,12 +871,11 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
SetExpressionPosition(clause);
Handle<Code> ic =
CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
- CallIC(ic, clause->CompareId());
+ CallIC(ic);
patch_site.EmitPatchInfo();
Label skip;
__ Branch(&skip);
- PrepareForBailout(clause, BailoutState::TOS_REGISTER);
__ LoadRoot(at, Heap::kTrueValueRootIndex);
__ Branch(&next_test, ne, v0, Operand(at));
__ Drop(1);
@@ -944,12 +902,10 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ Case body");
CaseClause* clause = clauses->at(i);
__ bind(clause->body_target());
- PrepareForBailoutForId(clause->EntryId(), BailoutState::NO_REGISTERS);
VisitStatements(clause->statements());
}
__ bind(nested_statement.break_label());
- PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
}
@@ -986,7 +942,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
RestoreContext();
__ mov(a0, v0);
__ bind(&done_convert);
- PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
__ push(a0);
// Check cache validity in generated code. If we cannot guarantee cache
@@ -1006,7 +961,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&call_runtime);
__ push(a0); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kForInEnumerate);
- PrepareForBailoutForId(stmt->EnumId(), BailoutState::TOS_REGISTER);
// If we got a map from the runtime call, we can do a fast
// modification check. Otherwise, we got a fixed array, and we have
@@ -1044,7 +998,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Push(a1, v0); // Smi and array
__ Ld(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
__ Push(a1); // Fixed array length (as smi).
- PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
__ li(a0, Operand(Smi::kZero));
__ Push(a0); // Initial index.
@@ -1087,7 +1040,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// have the key or returns the name-converted key.
__ Call(isolate()->builtins()->ForInFilter(), RelocInfo::CODE_TARGET);
RestoreContext();
- PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(loop_statement.continue_label(), eq, result_register(),
Operand(at));
@@ -1098,18 +1050,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Perform the assignment as if via '='.
{ EffectContext context(this);
EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
- PrepareForBailoutForId(stmt->AssignmentId(), BailoutState::NO_REGISTERS);
}
- // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
- PrepareForBailoutForId(stmt->BodyId(), BailoutState::NO_REGISTERS);
// Generate code for the body of the loop.
Visit(stmt->body());
// Generate code for the going to the next element by incrementing
// the index (smi) stored on top of the stack.
__ bind(loop_statement.continue_label());
- PrepareForBailoutForId(stmt->IncrementId(), BailoutState::NO_REGISTERS);
__ pop(a0);
__ Daddu(a0, a0, Operand(Smi::FromInt(1)));
__ push(a0);
@@ -1122,7 +1070,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
DropOperands(5);
// Exit and decrement the loop depth.
- PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
__ bind(&exit);
decrement_loop_depth();
}
@@ -1150,7 +1097,6 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
// Record position before possible IC call.
SetExpressionPosition(proxy);
- PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
Variable* var = proxy->var();
// Two cases: global variables and all other types of variables.
@@ -1224,11 +1170,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Push(a3, a2, a1, a0);
__ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
- Callable callable = CodeFactory::FastCloneShallowObject(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kFastCloneShallowObject);
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
- PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
// If result_saved is true the result is on top of the stack. If
// result_saved is false the result is in v0.
@@ -1264,7 +1210,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(a0));
__ Ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
CallStoreIC(property->GetSlot(0), key->value(), kStoreOwn);
- PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
if (NeedsHomeObject(value)) {
EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
@@ -1297,20 +1242,16 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(value);
DCHECK(property->emit_store());
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
- PrepareForBailoutForId(expr->GetIdForPropertySet(i),
- BailoutState::NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->setter = property;
}
break;
@@ -1330,7 +1271,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ li(a0, Operand(Smi::FromInt(NONE)));
PushOperand(a0);
CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
- PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
}
if (result_saved) {
@@ -1361,7 +1301,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
- PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
bool result_saved = false; // Is the result saved to the stack?
ZoneList<Expression*>* subexprs = expr->values();
@@ -1388,9 +1327,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ Ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp, 0));
__ mov(StoreDescriptor::ValueRegister(), result_register());
CallKeyedStoreIC(expr->LiteralFeedbackSlot());
-
- PrepareForBailoutForId(expr->GetIdForElement(array_index),
- BailoutState::NO_REGISTERS);
}
if (result_saved) {
@@ -1449,17 +1385,12 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy());
- PrepareForBailout(expr->target(), BailoutState::TOS_REGISTER);
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
break;
case NAMED_SUPER_PROPERTY:
case KEYED_SUPER_PROPERTY:
@@ -1473,17 +1404,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForAccumulatorValue(expr->value());
AccumulatorValueContext context(this);
- if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr->binary_operation(),
- op,
- expr->target(),
- expr->value());
- } else {
- EmitBinaryOp(expr->binary_operation(), op);
- }
-
- // Deoptimization point in case the binary operation may have side effects.
- PrepareForBailout(expr->binary_operation(), BailoutState::TOS_REGISTER);
+ EmitBinaryOp(expr->binary_operation(), op);
} else {
VisitForAccumulatorValue(expr->value());
}
@@ -1496,7 +1417,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VariableProxy* proxy = expr->target()->AsVariableProxy();
EmitVariableAssignment(proxy->var(), expr->op(), expr->AssignmentSlot(),
proxy->hole_check_mode());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(v0);
break;
}
@@ -1513,11 +1433,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
}
-void FullCodeGenerator::VisitSuspend(Suspend* expr) {
- // Resumable functions are not supported.
- UNREACHABLE();
-}
-
void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
OperandStackDepthIncrement(2);
__ Push(reg1, reg2);
@@ -1549,127 +1464,12 @@ void FullCodeGenerator::EmitOperandStackDepthCheck() {
}
}
-void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
- Label allocate, done_allocate;
-
- __ Allocate(JSIteratorResult::kSize, v0, a2, a3, &allocate,
- NO_ALLOCATION_FLAGS);
- __ jmp(&done_allocate);
-
- __ bind(&allocate);
- __ Push(Smi::FromInt(JSIteratorResult::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace);
-
- __ bind(&done_allocate);
- __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, a1);
- PopOperand(a2);
- __ LoadRoot(a3,
- done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
- __ LoadRoot(a4, Heap::kEmptyFixedArrayRootIndex);
- __ Sd(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ Sd(a4, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ Sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
- __ Sd(a2, FieldMemOperand(v0, JSIteratorResult::kValueOffset));
- __ Sd(a3, FieldMemOperand(v0, JSIteratorResult::kDoneOffset));
- STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
-}
-
-
-void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
- Token::Value op,
- Expression* left_expr,
- Expression* right_expr) {
- Label done, smi_case, stub_call;
-
- Register scratch1 = a2;
- Register scratch2 = a3;
-
- // Get the arguments.
- Register left = a1;
- Register right = a0;
- PopOperand(left);
- __ mov(a0, result_register());
-
- // Perform combined smi check on both operands.
- __ Or(scratch1, left, Operand(right));
- STATIC_ASSERT(kSmiTag == 0);
- JumpPatchSite patch_site(masm_);
- patch_site.EmitJumpIfSmi(scratch1, &smi_case);
-
- __ bind(&stub_call);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
- CallIC(code, expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
- __ jmp(&done);
-
- __ bind(&smi_case);
- // Smi case. This code works the same way as the smi-smi case in the type
- // recording binary operation stub, see
- switch (op) {
- case Token::SAR:
- __ GetLeastBitsFromSmi(scratch1, right, 5);
- __ dsrav(right, left, scratch1);
- __ And(v0, right, Operand(0xffffffff00000000L));
- break;
- case Token::SHL: {
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ dsllv(scratch1, scratch1, scratch2);
- __ SmiTag(v0, scratch1);
- break;
- }
- case Token::SHR: {
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ dsrlv(scratch1, scratch1, scratch2);
- __ And(scratch2, scratch1, 0x80000000);
- __ Branch(&stub_call, ne, scratch2, Operand(zero_reg));
- __ SmiTag(v0, scratch1);
- break;
- }
- case Token::ADD:
- __ DaddBranchOvf(v0, left, Operand(right), &stub_call);
- break;
- case Token::SUB:
- __ DsubBranchOvf(v0, left, Operand(right), &stub_call);
- break;
- case Token::MUL: {
- __ Dmulh(v0, left, right);
- __ dsra32(scratch2, v0, 0);
- __ sra(scratch1, v0, 31);
- __ Branch(USE_DELAY_SLOT, &stub_call, ne, scratch2, Operand(scratch1));
- __ SmiTag(v0);
- __ Branch(USE_DELAY_SLOT, &done, ne, v0, Operand(zero_reg));
- __ Daddu(scratch2, right, left);
- __ Branch(&stub_call, lt, scratch2, Operand(zero_reg));
- DCHECK(Smi::kZero == 0);
- __ mov(v0, zero_reg);
- break;
- }
- case Token::BIT_OR:
- __ Or(v0, left, Operand(right));
- break;
- case Token::BIT_AND:
- __ And(v0, left, Operand(right));
- break;
- case Token::BIT_XOR:
- __ Xor(v0, left, Operand(right));
- break;
- default:
- UNREACHABLE();
- }
-
- __ bind(&done);
- context()->Plug(v0);
-}
-
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
__ mov(a0, result_register());
PopOperand(a1);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
- JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(code, expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
+ Handle<Code> code = CodeFactory::BinaryOperation(isolate(), op).code();
+ __ Call(code, RelocInfo::CODE_TARGET);
+ RestoreContext();
context()->Plug(v0);
}
@@ -1791,7 +1591,6 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
PopOperand(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->AssignmentSlot(), prop->key()->AsLiteral()->value());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(v0);
}
@@ -1810,7 +1609,6 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
CallKeyedStoreIC(expr->AssignmentSlot());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(v0);
}
@@ -1823,7 +1621,6 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
if (callee->IsVariableProxy()) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
- PrepareForBailout(callee, BailoutState::NO_REGISTERS);
}
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
@@ -1836,8 +1633,6 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
DCHECK(!callee->AsProperty()->IsSuperAccess());
__ Ld(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
EmitNamedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(),
- BailoutState::TOS_REGISTER);
// Push the target function under the receiver.
__ Ld(at, MemOperand(sp, 0));
PushOperand(at);
@@ -1862,8 +1657,6 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ Ld(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
__ Move(LoadDescriptor::NameRegister(), v0);
EmitKeyedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(),
- BailoutState::TOS_REGISTER);
// Push the target function under the receiver.
__ Ld(at, MemOperand(sp, 0));
@@ -1882,27 +1675,15 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
VisitForStackValue(args->at(i));
}
- PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
// Record source position of the IC call.
- SetCallPosition(expr, expr->tail_call_mode());
- if (expr->tail_call_mode() == TailCallMode::kAllow) {
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceTailCall);
- }
- // Update profiling counters before the tail call since we will
- // not return to this function.
- EmitProfilingCounterHandlingForReturnSequence(true);
- }
- Handle<Code> code =
- CodeFactory::CallICTrampoline(isolate(), mode, expr->tail_call_mode())
- .code();
+ SetCallPosition(expr);
+ Handle<Code> code = CodeFactory::CallICTrampoline(isolate(), mode).code();
__ li(a3, Operand(IntFromSlot(expr->CallFeedbackICSlot())));
__ Ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ li(a0, Operand(arg_count));
CallIC(code);
OperandStackDepthDecrement(arg_count + 1);
- RecordJSReturnSite(expr);
RestoreContext();
context()->DropAndPlug(1, v0);
}
@@ -1941,7 +1722,6 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
CallConstructStub stub(isolate());
CallIC(stub.GetCode());
OperandStackDepthDecrement(arg_count + 1);
- PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
RestoreContext();
context()->Plug(v0);
}
@@ -1960,7 +1740,6 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ SmiTst(v0, a4);
Split(eq, a4, Operand(zero_reg), if_true, if_false, fall_through);
@@ -1983,7 +1762,6 @@ void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
__ JumpIfSmi(v0, if_false);
__ GetObjectType(v0, a1, a1);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(ge, a1, Operand(FIRST_JS_RECEIVER_TYPE),
if_true, if_false, fall_through);
@@ -2006,7 +1784,6 @@ void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
__ JumpIfSmi(v0, if_false);
__ GetObjectType(v0, a1, a1);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, a1, Operand(JS_ARRAY_TYPE),
if_true, if_false, fall_through);
@@ -2029,7 +1806,6 @@ void FullCodeGenerator::EmitIsTypedArray(CallRuntime* expr) {
__ JumpIfSmi(v0, if_false);
__ GetObjectType(v0, a1, a1);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, a1, Operand(JS_TYPED_ARRAY_TYPE), if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2051,7 +1827,6 @@ void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
__ JumpIfSmi(v0, if_false);
__ GetObjectType(v0, a1, a1);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, a1, Operand(JS_PROXY_TYPE), if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2155,7 +1930,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
for (Expression* const arg : *args) {
VisitForStackValue(arg);
}
- PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
// Move target to a1.
int const argc = args->length() - 2;
__ Ld(a1, MemOperand(sp, (argc + 1) * kPointerSize));
@@ -2189,35 +1963,6 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- Label runtime, done;
-
- __ Allocate(JSIteratorResult::kSize, v0, a2, a3, &runtime,
- NO_ALLOCATION_FLAGS);
- __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, a1);
- __ Pop(a2, a3);
- __ LoadRoot(a4, Heap::kEmptyFixedArrayRootIndex);
- __ Sd(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ Sd(a4, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ Sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
- __ Sd(a2, FieldMemOperand(v0, JSIteratorResult::kValueOffset));
- __ Sd(a3, FieldMemOperand(v0, JSIteratorResult::kDoneOffset));
- STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
- __ jmp(&done);
-
- __ bind(&runtime);
- CallRuntimeWithOperands(Runtime::kCreateIterResultObject);
-
- __ bind(&done);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Push function.
__ LoadNativeContextSlot(expr->context_index(), v0);
@@ -2319,14 +2064,10 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
&materialize_true);
if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
__ bind(&materialize_true);
- PrepareForBailoutForId(expr->MaterializeTrueId(),
- BailoutState::NO_REGISTERS);
__ LoadRoot(v0, Heap::kTrueValueRootIndex);
if (context()->IsStackValue()) __ push(v0);
__ jmp(&done);
__ bind(&materialize_false);
- PrepareForBailoutForId(expr->MaterializeFalseId(),
- BailoutState::NO_REGISTERS);
__ LoadRoot(v0, Heap::kFalseValueRootIndex);
if (context()->IsStackValue()) __ push(v0);
__ bind(&done);
@@ -2397,61 +2138,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
}
- // We need a second deoptimization point after loading the value
- // in case evaluating the property load my have a side effect.
- if (assign_type == VARIABLE) {
- PrepareForBailout(expr->expression(), BailoutState::TOS_REGISTER);
- } else {
- PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
- }
-
- // Inline smi case if we are in a loop.
- Label stub_call, done;
- JumpPatchSite patch_site(masm_);
-
- int count_value = expr->op() == Token::INC ? 1 : -1;
__ mov(a0, v0);
- if (ShouldInlineSmiCase(expr->op())) {
- Label slow;
- patch_site.EmitJumpIfNotSmi(v0, &slow);
-
- // Save result for postfix expressions.
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- // Save the result on the stack. If we have a named or keyed property
- // we store the result under the receiver that is currently on top
- // of the stack.
- switch (assign_type) {
- case VARIABLE:
- __ push(v0);
- break;
- case NAMED_PROPERTY:
- __ Sd(v0, MemOperand(sp, kPointerSize));
- break;
- case KEYED_PROPERTY:
- __ Sd(v0, MemOperand(sp, 2 * kPointerSize));
- break;
- case NAMED_SUPER_PROPERTY:
- case KEYED_SUPER_PROPERTY:
- UNREACHABLE();
- break;
- }
- }
- }
-
- Register scratch1 = a1;
- __ li(scratch1, Operand(Smi::FromInt(count_value)));
- __ DaddBranchNoOvf(v0, v0, Operand(scratch1), &done);
- // Call stub. Undo operation first.
- __ Move(v0, a0);
- __ jmp(&stub_call);
- __ bind(&slow);
- }
// Convert old value into a number.
__ Call(isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
RestoreContext();
- PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -2477,16 +2168,16 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
}
- __ bind(&stub_call);
+ int count_value = expr->op() == Token::INC ? 1 : -1;
__ mov(a1, v0);
__ li(a0, Operand(Smi::FromInt(count_value)));
SetExpressionPosition(expr);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD).code();
- CallIC(code, expr->CountBinOpFeedbackId());
- patch_site.EmitPatchInfo();
- __ bind(&done);
+ Handle<Code> code =
+ CodeFactory::BinaryOperation(isolate(), Token::ADD).code();
+ __ Call(code, RelocInfo::CODE_TARGET);
+ RestoreContext();
// Store the value returned in v0.
switch (assign_type) {
@@ -2496,8 +2187,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
{ EffectContext context(this);
EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
proxy->hole_check_mode());
- PrepareForBailoutForId(expr->AssignmentId(),
- BailoutState::TOS_REGISTER);
context.Plug(v0);
}
// For all contexts except EffectConstant we have the result on
@@ -2508,8 +2197,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
proxy->hole_check_mode());
- PrepareForBailoutForId(expr->AssignmentId(),
- BailoutState::TOS_REGISTER);
context()->Plug(v0);
}
break;
@@ -2518,7 +2205,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(StoreDescriptor::ValueRegister(), result_register());
PopOperand(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -2533,7 +2219,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
PopOperands(StoreDescriptor::ReceiverRegister(),
StoreDescriptor::NameRegister());
CallKeyedStoreIC(expr->CountSlot());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -2564,7 +2249,6 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
{ AccumulatorValueContext context(this);
VisitForTypeofValue(sub_expr);
}
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Factory* factory = isolate()->factory();
if (String::Equals(check, factory->number_string())) {
@@ -2645,7 +2329,6 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForStackValue(expr->right());
SetExpressionPosition(expr);
EmitHasProperty();
- PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ LoadRoot(a4, Heap::kTrueValueRootIndex);
Split(eq, v0, Operand(a4), if_true, if_false, fall_through);
break;
@@ -2657,7 +2340,6 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
PopOperand(a1);
__ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
RestoreContext();
- PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ LoadRoot(a4, Heap::kTrueValueRootIndex);
Split(eq, v0, Operand(a4), if_true, if_false, fall_through);
break;
@@ -2681,9 +2363,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
- CallIC(ic, expr->CompareOperationFeedbackId());
+ CallIC(ic);
patch_site.EmitPatchInfo();
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(cc, v0, Operand(zero_reg), if_true, if_false, fall_through);
}
}
@@ -2705,7 +2386,6 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
&if_true, &if_false, &fall_through);
VisitForAccumulatorValue(sub_expr);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
if (expr->op() == Token::EQ_STRICT) {
Heap::RootListIndex nil_value = nil == kNullValue ?
Heap::kNullValueRootIndex :
diff --git a/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc b/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
index 2ab08235cf..30d8f72f4f 100644
--- a/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
+++ b/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
@@ -198,8 +198,6 @@ void FullCodeGenerator::Generate() {
__ push(r4);
__ Push(info->scope()->scope_info());
__ CallRuntime(Runtime::kNewScriptContext);
- PrepareForBailoutForId(BailoutId::ScriptContext(),
- BailoutState::TOS_REGISTER);
// The new target value is not used, clobbering is safe.
DCHECK_NULL(info->scope()->new_target_var());
} else {
@@ -257,12 +255,6 @@ void FullCodeGenerator::Generate() {
}
}
- // Register holding this function and new target are both trashed in case we
- // bailout here. But since that can happen only when new target is not used
- // and we allocate a context, the value of |function_in_register| is correct.
- PrepareForBailoutForId(BailoutId::FunctionContext(),
- BailoutState::NO_REGISTERS);
-
// We don't support new.target and rest parameters here.
DCHECK_NULL(info->scope()->new_target_var());
DCHECK_NULL(info->scope()->rest_parameter());
@@ -277,14 +269,16 @@ void FullCodeGenerator::Generate() {
__ LoadP(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
if (is_strict(language_mode()) || !has_simple_parameters()) {
- Callable callable = CodeFactory::FastNewStrictArguments(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kFastNewStrictArguments);
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
} else if (literal()->has_duplicate_parameters()) {
__ Push(r4);
__ CallRuntime(Runtime::kNewSloppyArguments_Generic);
} else {
- Callable callable = CodeFactory::FastNewSloppyArguments(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kFastNewSloppyArguments);
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
@@ -297,8 +291,6 @@ void FullCodeGenerator::Generate() {
}
// Visit the declarations and body.
- PrepareForBailoutForId(BailoutId::FunctionEntry(),
- BailoutState::NO_REGISTERS);
{
Comment cmnt(masm_, "[ Declarations");
VisitDeclarations(scope()->declarations());
@@ -311,8 +303,6 @@ void FullCodeGenerator::Generate() {
{
Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(),
- BailoutState::NO_REGISTERS);
Label ok;
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmpl(sp, ip);
@@ -389,11 +379,6 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
EmitProfilingCounterReset();
__ bind(&ok);
- PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
- // Record a mapping of the OSR id to this PC. This is used if the OSR
- // entry becomes the target of a bailout. We don't expect it to be, but
- // we want it to work if it is.
- PrepareForBailoutForId(stmt->OsrEntryId(), BailoutState::NO_REGISTERS);
}
void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
@@ -477,8 +462,6 @@ void FullCodeGenerator::StackValueContext::Plug(
void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
- false_label_);
if (index == Heap::kUndefinedValueRootIndex ||
index == Heap::kNullValueRootIndex ||
index == Heap::kFalseValueRootIndex) {
@@ -497,20 +480,26 @@ void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {}
void FullCodeGenerator::AccumulatorValueContext::Plug(
Handle<Object> lit) const {
- __ mov(result_register(), Operand(lit));
+ if (lit->IsHeapObject()) {
+ __ mov(result_register(), Operand(Handle<HeapObject>::cast(lit)));
+ } else {
+ __ mov(result_register(), Operand(Smi::cast(*lit)));
+ }
}
void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
// Immediates cannot be pushed directly.
- __ mov(result_register(), Operand(lit));
+ if (lit->IsHeapObject()) {
+ __ mov(result_register(), Operand(Handle<HeapObject>::cast(lit)));
+ } else {
+ __ mov(result_register(), Operand(Smi::cast(*lit)));
+ }
codegen()->PushOperand(result_register());
}
void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
- false_label_);
DCHECK(lit->IsNullOrUndefined(isolate()) || !lit->IsUndetectable());
if (lit->IsNullOrUndefined(isolate()) || lit->IsFalse(isolate())) {
if (false_label_ != fall_through_) __ b(false_label_);
@@ -523,14 +512,14 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
if (true_label_ != fall_through_) __ b(true_label_);
}
} else if (lit->IsSmi()) {
- if (Smi::cast(*lit)->value() == 0) {
+ if (Smi::ToInt(*lit) == 0) {
if (false_label_ != fall_through_) __ b(false_label_);
} else {
if (true_label_ != fall_through_) __ b(true_label_);
}
} else {
// For simplicity we always test the accumulator register.
- __ mov(result_register(), Operand(lit));
+ __ mov(result_register(), Operand(Handle<HeapObject>::cast(lit)));
codegen()->DoTest(this);
}
}
@@ -599,8 +588,6 @@ void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
void FullCodeGenerator::TestContext::Plug(bool flag) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
- false_label_);
if (flag) {
if (true_label_ != fall_through_) __ b(true_label_);
} else {
@@ -611,8 +598,9 @@ void FullCodeGenerator::TestContext::Plug(bool flag) const {
void FullCodeGenerator::DoTest(Expression* condition, Label* if_true,
Label* if_false, Label* fall_through) {
- Handle<Code> ic = ToBooleanICStub::GetUninitialized(isolate());
- CallIC(ic, condition->test_id());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kToBoolean);
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ RestoreContext();
__ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
}
@@ -681,27 +669,6 @@ void FullCodeGenerator::SetVar(Variable* var, Register src, Register scratch0,
}
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
- bool should_normalize,
- Label* if_true,
- Label* if_false) {
- // Only prepare for bailouts before splits if we're in a test
- // context. Otherwise, we let the Visit function deal with the
- // preparation to avoid preparing with the same AST id twice.
- if (!context()->IsTest()) return;
-
- Label skip;
- if (should_normalize) __ b(&skip);
- PrepareForBailout(expr, BailoutState::TOS_REGISTER);
- if (should_normalize) {
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r3, ip);
- Split(eq, if_true, if_false, NULL);
- __ bind(&skip);
- }
-}
-
-
void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// The variable in the declaration always resides in the current function
// context.
@@ -748,7 +715,6 @@ void FullCodeGenerator::VisitVariableDeclaration(
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ StoreP(ip, ContextMemOperand(cp, variable->index()), r0);
// No write barrier since the_hole_value is in old space.
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
}
break;
@@ -802,7 +768,6 @@ void FullCodeGenerator::VisitFunctionDeclaration(
__ RecordWriteContextSlot(cp, offset, result_register(), r5,
kLRHasBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
break;
}
@@ -831,7 +796,6 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Keep the switch value on the stack until a case matches.
VisitForStackValue(stmt->tag());
- PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
ZoneList<CaseClause*>* clauses = stmt->cases();
CaseClause* default_clause = NULL; // Can occur anywhere in the list.
@@ -875,12 +839,11 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
SetExpressionPosition(clause);
Handle<Code> ic =
CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
- CallIC(ic, clause->CompareId());
+ CallIC(ic);
patch_site.EmitPatchInfo();
Label skip;
__ b(&skip);
- PrepareForBailout(clause, BailoutState::TOS_REGISTER);
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(r3, ip);
__ bne(&next_test);
@@ -909,12 +872,10 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ Case body");
CaseClause* clause = clauses->at(i);
__ bind(clause->body_target());
- PrepareForBailoutForId(clause->EntryId(), BailoutState::NO_REGISTERS);
VisitStatements(clause->statements());
}
__ bind(nested_statement.break_label());
- PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
}
@@ -947,7 +908,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Call(isolate()->builtins()->ToObject(), RelocInfo::CODE_TARGET);
RestoreContext();
__ bind(&done_convert);
- PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
__ push(r3);
// Check cache validity in generated code. If we cannot guarantee cache
@@ -967,7 +927,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&call_runtime);
__ push(r3); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kForInEnumerate);
- PrepareForBailoutForId(stmt->EnumId(), BailoutState::TOS_REGISTER);
// If we got a map from the runtime call, we can do a fast
// modification check. Otherwise, we got a fixed array, and we have
@@ -1009,7 +968,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Push(r4, r3); // Smi and array
__ LoadP(r4, FieldMemOperand(r3, FixedArray::kLengthOffset));
__ Push(r4); // Fixed array length (as smi).
- PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
__ LoadSmiLiteral(r3, Smi::kZero);
__ Push(r3); // Initial index.
@@ -1053,7 +1011,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// just skip it.
__ Push(r4, r6); // Enumerable and current entry.
__ CallRuntime(Runtime::kForInFilter);
- PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
__ mr(r6, r3);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ cmp(r3, r0);
@@ -1067,18 +1024,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
{
EffectContext context(this);
EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
- PrepareForBailoutForId(stmt->AssignmentId(), BailoutState::NO_REGISTERS);
}
- // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
- PrepareForBailoutForId(stmt->BodyId(), BailoutState::NO_REGISTERS);
// Generate code for the body of the loop.
Visit(stmt->body());
// Generate code for the going to the next element by incrementing
// the index (smi) stored on top of the stack.
__ bind(loop_statement.continue_label());
- PrepareForBailoutForId(stmt->IncrementId(), BailoutState::NO_REGISTERS);
__ pop(r3);
__ AddSmiLiteral(r3, r3, Smi::FromInt(1), r0);
__ push(r3);
@@ -1091,7 +1044,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
DropOperands(5);
// Exit and decrement the loop depth.
- PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
__ bind(&exit);
decrement_loop_depth();
}
@@ -1119,7 +1071,6 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
// Record position before possible IC call.
SetExpressionPosition(proxy);
- PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
Variable* var = proxy->var();
// Two cases: global variables and all other types of variables.
@@ -1193,11 +1144,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Push(r6, r5, r4, r3);
__ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
- Callable callable = CodeFactory::FastCloneShallowObject(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kFastCloneShallowObject);
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
- PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
// If result_saved is true the result is on top of the stack. If
// result_saved is false the result is in r3.
@@ -1232,7 +1183,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(r3));
__ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
CallStoreIC(property->GetSlot(0), key->value(), kStoreOwn);
- PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
if (NeedsHomeObject(value)) {
EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
@@ -1265,20 +1215,16 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(value);
DCHECK(property->emit_store());
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
- PrepareForBailoutForId(expr->GetIdForPropertySet(i),
- BailoutState::NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->setter = property;
}
break;
@@ -1297,7 +1243,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ LoadSmiLiteral(r3, Smi::FromInt(NONE));
PushOperand(r3);
CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
- PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
}
if (result_saved) {
@@ -1327,7 +1272,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
- PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
bool result_saved = false; // Is the result saved to the stack?
ZoneList<Expression*>* subexprs = expr->values();
@@ -1352,9 +1296,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Smi::FromInt(array_index));
__ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp, 0));
CallKeyedStoreIC(expr->LiteralFeedbackSlot());
-
- PrepareForBailoutForId(expr->GetIdForElement(array_index),
- BailoutState::NO_REGISTERS);
}
if (result_saved) {
@@ -1413,17 +1354,12 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy());
- PrepareForBailout(expr->target(), BailoutState::TOS_REGISTER);
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
break;
case NAMED_SUPER_PROPERTY:
case KEYED_SUPER_PROPERTY:
@@ -1437,15 +1373,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForAccumulatorValue(expr->value());
AccumulatorValueContext context(this);
- if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr->binary_operation(), op, expr->target(),
- expr->value());
- } else {
- EmitBinaryOp(expr->binary_operation(), op);
- }
-
- // Deoptimization point in case the binary operation may have side effects.
- PrepareForBailout(expr->binary_operation(), BailoutState::TOS_REGISTER);
+ EmitBinaryOp(expr->binary_operation(), op);
} else {
VisitForAccumulatorValue(expr->value());
}
@@ -1458,7 +1386,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VariableProxy* proxy = expr->target()->AsVariableProxy();
EmitVariableAssignment(proxy->var(), expr->op(), expr->AssignmentSlot(),
proxy->hole_check_mode());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(r3);
break;
}
@@ -1475,11 +1402,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
}
-void FullCodeGenerator::VisitSuspend(Suspend* expr) {
- // Resumable functions are not supported.
- UNREACHABLE();
-}
-
void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
OperandStackDepthIncrement(2);
__ Push(reg1, reg2);
@@ -1513,161 +1435,11 @@ void FullCodeGenerator::EmitOperandStackDepthCheck() {
}
}
-void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
- Label allocate, done_allocate;
-
- __ Allocate(JSIteratorResult::kSize, r3, r5, r6, &allocate,
- NO_ALLOCATION_FLAGS);
- __ b(&done_allocate);
-
- __ bind(&allocate);
- __ Push(Smi::FromInt(JSIteratorResult::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace);
-
- __ bind(&done_allocate);
- __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r4);
- PopOperand(r5);
- __ LoadRoot(r6,
- done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
- __ LoadRoot(r7, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(r4, FieldMemOperand(r3, HeapObject::kMapOffset), r0);
- __ StoreP(r7, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
- __ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
- __ StoreP(r5, FieldMemOperand(r3, JSIteratorResult::kValueOffset), r0);
- __ StoreP(r6, FieldMemOperand(r3, JSIteratorResult::kDoneOffset), r0);
-}
-
-
-void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
- Token::Value op,
- Expression* left_expr,
- Expression* right_expr) {
- Label done, smi_case, stub_call;
-
- Register scratch1 = r5;
- Register scratch2 = r6;
-
- // Get the arguments.
- Register left = r4;
- Register right = r3;
- PopOperand(left);
-
- // Perform combined smi check on both operands.
- __ orx(scratch1, left, right);
- STATIC_ASSERT(kSmiTag == 0);
- JumpPatchSite patch_site(masm_);
- patch_site.EmitJumpIfSmi(scratch1, &smi_case);
-
- __ bind(&stub_call);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
- CallIC(code, expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
- __ b(&done);
-
- __ bind(&smi_case);
- // Smi case. This code works the same way as the smi-smi case in the type
- // recording binary operation stub.
- switch (op) {
- case Token::SAR:
- __ GetLeastBitsFromSmi(scratch1, right, 5);
- __ ShiftRightArith(right, left, scratch1);
- __ ClearRightImm(right, right, Operand(kSmiTagSize + kSmiShiftSize));
- break;
- case Token::SHL: {
- __ GetLeastBitsFromSmi(scratch2, right, 5);
-#if V8_TARGET_ARCH_PPC64
- __ ShiftLeft_(right, left, scratch2);
-#else
- __ SmiUntag(scratch1, left);
- __ ShiftLeft_(scratch1, scratch1, scratch2);
- // Check that the *signed* result fits in a smi
- __ JumpIfNotSmiCandidate(scratch1, scratch2, &stub_call);
- __ SmiTag(right, scratch1);
-#endif
- break;
- }
- case Token::SHR: {
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ srw(scratch1, scratch1, scratch2);
- // Unsigned shift is not allowed to produce a negative number.
- __ JumpIfNotUnsignedSmiCandidate(scratch1, r0, &stub_call);
- __ SmiTag(right, scratch1);
- break;
- }
- case Token::ADD: {
- __ AddAndCheckForOverflow(scratch1, left, right, scratch2, r0);
- __ BranchOnOverflow(&stub_call);
- __ mr(right, scratch1);
- break;
- }
- case Token::SUB: {
- __ SubAndCheckForOverflow(scratch1, left, right, scratch2, r0);
- __ BranchOnOverflow(&stub_call);
- __ mr(right, scratch1);
- break;
- }
- case Token::MUL: {
- Label mul_zero;
-#if V8_TARGET_ARCH_PPC64
- // Remove tag from both operands.
- __ SmiUntag(ip, right);
- __ SmiUntag(r0, left);
- __ Mul(scratch1, r0, ip);
- // Check for overflowing the smi range - no overflow if higher 33 bits of
- // the result are identical.
- __ TestIfInt32(scratch1, r0);
- __ bne(&stub_call);
-#else
- __ SmiUntag(ip, right);
- __ mullw(scratch1, left, ip);
- __ mulhw(scratch2, left, ip);
- // Check for overflowing the smi range - no overflow if higher 33 bits of
- // the result are identical.
- __ TestIfInt32(scratch2, scratch1, ip);
- __ bne(&stub_call);
-#endif
- // Go slow on zero result to handle -0.
- __ cmpi(scratch1, Operand::Zero());
- __ beq(&mul_zero);
-#if V8_TARGET_ARCH_PPC64
- __ SmiTag(right, scratch1);
-#else
- __ mr(right, scratch1);
-#endif
- __ b(&done);
- // We need -0 if we were multiplying a negative number with 0 to get 0.
- // We know one of them was zero.
- __ bind(&mul_zero);
- __ add(scratch2, right, left);
- __ cmpi(scratch2, Operand::Zero());
- __ blt(&stub_call);
- __ LoadSmiLiteral(right, Smi::kZero);
- break;
- }
- case Token::BIT_OR:
- __ orx(right, left, right);
- break;
- case Token::BIT_AND:
- __ and_(right, left, right);
- break;
- case Token::BIT_XOR:
- __ xor_(right, left, right);
- break;
- default:
- UNREACHABLE();
- }
-
- __ bind(&done);
- context()->Plug(r3);
-}
-
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
PopOperand(r4);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
- JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(code, expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
+ Handle<Code> code = CodeFactory::BinaryOperation(isolate(), op).code();
+ __ Call(code, RelocInfo::CODE_TARGET);
+ RestoreContext();
context()->Plug(r3);
}
@@ -1787,7 +1559,6 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
PopOperand(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->AssignmentSlot(), prop->key()->AsLiteral()->value());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(r3);
}
@@ -1800,7 +1571,6 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
CallKeyedStoreIC(expr->AssignmentSlot());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(r3);
}
@@ -1814,7 +1584,6 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
{
StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
- PrepareForBailout(callee, BailoutState::NO_REGISTERS);
}
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
@@ -1827,8 +1596,6 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
DCHECK(!callee->AsProperty()->IsSuperAccess());
__ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
EmitNamedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(),
- BailoutState::TOS_REGISTER);
// Push the target function under the receiver.
__ LoadP(r0, MemOperand(sp, 0));
PushOperand(r0);
@@ -1852,8 +1619,6 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr, Expression* key) {
__ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
__ Move(LoadDescriptor::NameRegister(), r3);
EmitKeyedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(),
- BailoutState::TOS_REGISTER);
// Push the target function under the receiver.
__ LoadP(ip, MemOperand(sp, 0));
@@ -1872,26 +1637,14 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
VisitForStackValue(args->at(i));
}
- PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
- SetCallPosition(expr, expr->tail_call_mode());
- if (expr->tail_call_mode() == TailCallMode::kAllow) {
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceTailCall);
- }
- // Update profiling counters before the tail call since we will
- // not return to this function.
- EmitProfilingCounterHandlingForReturnSequence(true);
- }
- Handle<Code> code =
- CodeFactory::CallICTrampoline(isolate(), mode, expr->tail_call_mode())
- .code();
+ SetCallPosition(expr);
+ Handle<Code> code = CodeFactory::CallICTrampoline(isolate(), mode).code();
__ mov(r6, Operand(IntFromSlot(expr->CallFeedbackICSlot())));
__ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
__ mov(r3, Operand(arg_count));
CallIC(code);
OperandStackDepthDecrement(arg_count + 1);
- RecordJSReturnSite(expr);
RestoreContext();
context()->DropAndPlug(1, r3);
}
@@ -1930,7 +1683,6 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
CallConstructStub stub(isolate());
CallIC(stub.GetCode());
OperandStackDepthDecrement(arg_count + 1);
- PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
RestoreContext();
context()->Plug(r3);
}
@@ -1949,7 +1701,6 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
&if_false, &fall_through);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ TestIfSmi(r3, r0);
Split(eq, if_true, if_false, fall_through, cr0);
@@ -1972,7 +1723,6 @@ void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
__ JumpIfSmi(r3, if_false);
__ CompareObjectType(r3, r4, r4, FIRST_JS_RECEIVER_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(ge, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -1994,7 +1744,6 @@ void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
__ JumpIfSmi(r3, if_false);
__ CompareObjectType(r3, r4, r4, JS_ARRAY_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2016,7 +1765,6 @@ void FullCodeGenerator::EmitIsTypedArray(CallRuntime* expr) {
__ JumpIfSmi(r3, if_false);
__ CompareObjectType(r3, r4, r4, JS_TYPED_ARRAY_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2038,7 +1786,6 @@ void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
__ JumpIfSmi(r3, if_false);
__ CompareObjectType(r3, r4, r4, JS_PROXY_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2143,7 +1890,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
for (Expression* const arg : *args) {
VisitForStackValue(arg);
}
- PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
// Move target to r4.
int const argc = args->length() - 2;
__ LoadP(r4, MemOperand(sp, (argc + 1) * kPointerSize));
@@ -2177,35 +1923,6 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- Label runtime, done;
-
- __ Allocate(JSIteratorResult::kSize, r3, r5, r6, &runtime,
- NO_ALLOCATION_FLAGS);
- __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r4);
- __ Pop(r5, r6);
- __ LoadRoot(r7, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(r4, FieldMemOperand(r3, HeapObject::kMapOffset), r0);
- __ StoreP(r7, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
- __ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
- __ StoreP(r5, FieldMemOperand(r3, JSIteratorResult::kValueOffset), r0);
- __ StoreP(r6, FieldMemOperand(r3, JSIteratorResult::kDoneOffset), r0);
- STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
- __ b(&done);
-
- __ bind(&runtime);
- CallRuntimeWithOperands(Runtime::kCreateIterResultObject);
-
- __ bind(&done);
- context()->Plug(r3);
-}
-
-
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Push function.
__ LoadNativeContextSlot(expr->context_index(), r3);
@@ -2303,14 +2020,10 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
&materialize_true, &materialize_true);
if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
__ bind(&materialize_true);
- PrepareForBailoutForId(expr->MaterializeTrueId(),
- BailoutState::NO_REGISTERS);
__ LoadRoot(r3, Heap::kTrueValueRootIndex);
if (context()->IsStackValue()) __ push(r3);
__ b(&done);
__ bind(&materialize_false);
- PrepareForBailoutForId(expr->MaterializeFalseId(),
- BailoutState::NO_REGISTERS);
__ LoadRoot(r3, Heap::kFalseValueRootIndex);
if (context()->IsStackValue()) __ push(r3);
__ bind(&done);
@@ -2381,62 +2094,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
}
- // We need a second deoptimization point after loading the value
- // in case evaluating the property load my have a side effect.
- if (assign_type == VARIABLE) {
- PrepareForBailout(expr->expression(), BailoutState::TOS_REGISTER);
- } else {
- PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
- }
-
- // Inline smi case if we are in a loop.
- Label stub_call, done;
- JumpPatchSite patch_site(masm_);
-
- int count_value = expr->op() == Token::INC ? 1 : -1;
- if (ShouldInlineSmiCase(expr->op())) {
- Label slow;
- patch_site.EmitJumpIfNotSmi(r3, &slow);
-
- // Save result for postfix expressions.
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- // Save the result on the stack. If we have a named or keyed property
- // we store the result under the receiver that is currently on top
- // of the stack.
- switch (assign_type) {
- case VARIABLE:
- __ push(r3);
- break;
- case NAMED_PROPERTY:
- __ StoreP(r3, MemOperand(sp, kPointerSize));
- break;
- case KEYED_PROPERTY:
- __ StoreP(r3, MemOperand(sp, 2 * kPointerSize));
- break;
- case NAMED_SUPER_PROPERTY:
- case KEYED_SUPER_PROPERTY:
- UNREACHABLE();
- break;
- }
- }
- }
-
- Register scratch1 = r4;
- Register scratch2 = r5;
- __ LoadSmiLiteral(scratch1, Smi::FromInt(count_value));
- __ AddAndCheckForOverflow(r3, r3, scratch1, scratch2, r0);
- __ BranchOnNoOverflow(&done);
- // Call stub. Undo operation first.
- __ sub(r3, r3, scratch1);
- __ b(&stub_call);
- __ bind(&slow);
- }
-
// Convert old value into a number.
__ Call(isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
RestoreContext();
- PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -2462,16 +2122,16 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
}
- __ bind(&stub_call);
+ int count_value = expr->op() == Token::INC ? 1 : -1;
__ mr(r4, r3);
__ LoadSmiLiteral(r3, Smi::FromInt(count_value));
SetExpressionPosition(expr);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD).code();
- CallIC(code, expr->CountBinOpFeedbackId());
- patch_site.EmitPatchInfo();
- __ bind(&done);
+ Handle<Code> code =
+ CodeFactory::BinaryOperation(isolate(), Token::ADD).code();
+ __ Call(code, RelocInfo::CODE_TARGET);
+ RestoreContext();
// Store the value returned in r3.
switch (assign_type) {
@@ -2482,8 +2142,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
EffectContext context(this);
EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
proxy->hole_check_mode());
- PrepareForBailoutForId(expr->AssignmentId(),
- BailoutState::TOS_REGISTER);
context.Plug(r3);
}
// For all contexts except EffectConstant We have the result on
@@ -2494,8 +2152,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
proxy->hole_check_mode());
- PrepareForBailoutForId(expr->AssignmentId(),
- BailoutState::TOS_REGISTER);
context()->Plug(r3);
}
break;
@@ -2503,7 +2159,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
PopOperand(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -2517,7 +2172,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
PopOperands(StoreDescriptor::ReceiverRegister(),
StoreDescriptor::NameRegister());
CallKeyedStoreIC(expr->CountSlot());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -2549,7 +2203,6 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
AccumulatorValueContext context(this);
VisitForTypeofValue(sub_expr);
}
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Factory* factory = isolate()->factory();
if (String::Equals(check, factory->number_string())) {
@@ -2631,7 +2284,6 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForStackValue(expr->right());
SetExpressionPosition(expr);
EmitHasProperty();
- PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(r3, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
break;
@@ -2642,7 +2294,6 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
PopOperand(r4);
__ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
RestoreContext();
- PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(r3, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
break;
@@ -2666,9 +2317,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
- CallIC(ic, expr->CompareOperationFeedbackId());
+ CallIC(ic);
patch_site.EmitPatchInfo();
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ cmpi(r3, Operand::Zero());
Split(cond, if_true, if_false, fall_through);
}
@@ -2691,7 +2341,6 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
&if_false, &fall_through);
VisitForAccumulatorValue(sub_expr);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
if (expr->op() == Token::EQ_STRICT) {
Heap::RootListIndex nil_value = nil == kNullValue
? Heap::kNullValueRootIndex
diff --git a/deps/v8/src/full-codegen/s390/full-codegen-s390.cc b/deps/v8/src/full-codegen/s390/full-codegen-s390.cc
index 250406ad53..938165688f 100644
--- a/deps/v8/src/full-codegen/s390/full-codegen-s390.cc
+++ b/deps/v8/src/full-codegen/s390/full-codegen-s390.cc
@@ -202,8 +202,6 @@ void FullCodeGenerator::Generate() {
__ push(r3);
__ Push(info->scope()->scope_info());
__ CallRuntime(Runtime::kNewScriptContext);
- PrepareForBailoutForId(BailoutId::ScriptContext(),
- BailoutState::TOS_REGISTER);
// The new target value is not used, clobbering is safe.
DCHECK_NULL(info->scope()->new_target_var());
} else {
@@ -261,12 +259,6 @@ void FullCodeGenerator::Generate() {
}
}
- // Register holding this function and new target are both trashed in case we
- // bailout here. But since that can happen only when new target is not used
- // and we allocate a context, the value of |function_in_register| is correct.
- PrepareForBailoutForId(BailoutId::FunctionContext(),
- BailoutState::NO_REGISTERS);
-
// We don't support new.target and rest parameters here.
DCHECK_NULL(info->scope()->new_target_var());
DCHECK_NULL(info->scope()->rest_parameter());
@@ -281,14 +273,16 @@ void FullCodeGenerator::Generate() {
__ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
if (is_strict(language_mode()) || !has_simple_parameters()) {
- Callable callable = CodeFactory::FastNewStrictArguments(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kFastNewStrictArguments);
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
} else if (literal()->has_duplicate_parameters()) {
__ Push(r3);
__ CallRuntime(Runtime::kNewSloppyArguments_Generic);
} else {
- Callable callable = CodeFactory::FastNewSloppyArguments(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kFastNewSloppyArguments);
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
@@ -301,8 +295,6 @@ void FullCodeGenerator::Generate() {
}
// Visit the declarations and body.
- PrepareForBailoutForId(BailoutId::FunctionEntry(),
- BailoutState::NO_REGISTERS);
{
Comment cmnt(masm_, "[ Declarations");
VisitDeclarations(scope()->declarations());
@@ -315,8 +307,6 @@ void FullCodeGenerator::Generate() {
{
Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(),
- BailoutState::NO_REGISTERS);
Label ok;
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ CmpLogicalP(sp, ip);
@@ -388,11 +378,6 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
EmitProfilingCounterReset();
__ bind(&ok);
- PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
- // Record a mapping of the OSR id to this PC. This is used if the OSR
- // entry becomes the target of a bailout. We don't expect it to be, but
- // we want it to work if it is.
- PrepareForBailoutForId(stmt->OsrEntryId(), BailoutState::NO_REGISTERS);
}
void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
@@ -472,8 +457,6 @@ void FullCodeGenerator::StackValueContext::Plug(
}
void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
- false_label_);
if (index == Heap::kUndefinedValueRootIndex ||
index == Heap::kNullValueRootIndex ||
index == Heap::kFalseValueRootIndex) {
@@ -490,18 +473,24 @@ void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {}
void FullCodeGenerator::AccumulatorValueContext::Plug(
Handle<Object> lit) const {
- __ mov(result_register(), Operand(lit));
+ if (lit->IsHeapObject()) {
+ __ mov(result_register(), Operand(Handle<HeapObject>::cast(lit)));
+ } else {
+ __ mov(result_register(), Operand(Smi::cast(*lit)));
+ }
}
void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
// Immediates cannot be pushed directly.
- __ mov(result_register(), Operand(lit));
+ if (lit->IsHeapObject()) {
+ __ mov(result_register(), Operand(Handle<HeapObject>::cast(lit)));
+ } else {
+ __ mov(result_register(), Operand(Smi::cast(*lit)));
+ }
codegen()->PushOperand(result_register());
}
void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
- false_label_);
DCHECK(lit->IsNullOrUndefined(isolate()) || !lit->IsUndetectable());
if (lit->IsNullOrUndefined(isolate()) || lit->IsFalse(isolate())) {
if (false_label_ != fall_through_) __ b(false_label_);
@@ -514,14 +503,14 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
if (true_label_ != fall_through_) __ b(true_label_);
}
} else if (lit->IsSmi()) {
- if (Smi::cast(*lit)->value() == 0) {
+ if (Smi::ToInt(*lit) == 0) {
if (false_label_ != fall_through_) __ b(false_label_);
} else {
if (true_label_ != fall_through_) __ b(true_label_);
}
} else {
// For simplicity we always test the accumulator register.
- __ mov(result_register(), Operand(lit));
+ __ mov(result_register(), Operand(Handle<HeapObject>::cast(lit)));
codegen()->DoTest(this);
}
}
@@ -582,8 +571,6 @@ void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
}
void FullCodeGenerator::TestContext::Plug(bool flag) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
- false_label_);
if (flag) {
if (true_label_ != fall_through_) __ b(true_label_);
} else {
@@ -593,8 +580,9 @@ void FullCodeGenerator::TestContext::Plug(bool flag) const {
void FullCodeGenerator::DoTest(Expression* condition, Label* if_true,
Label* if_false, Label* fall_through) {
- Handle<Code> ic = ToBooleanICStub::GetUninitialized(isolate());
- CallIC(ic, condition->test_id());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kToBoolean);
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ RestoreContext();
__ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
}
@@ -657,25 +645,6 @@ void FullCodeGenerator::SetVar(Variable* var, Register src, Register scratch0,
}
}
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
- bool should_normalize,
- Label* if_true,
- Label* if_false) {
- // Only prepare for bailouts before splits if we're in a test
- // context. Otherwise, we let the Visit function deal with the
- // preparation to avoid preparing with the same AST id twice.
- if (!context()->IsTest()) return;
-
- Label skip;
- if (should_normalize) __ b(&skip);
- PrepareForBailout(expr, BailoutState::TOS_REGISTER);
- if (should_normalize) {
- __ CompareRoot(r2, Heap::kTrueValueRootIndex);
- Split(eq, if_true, if_false, NULL);
- __ bind(&skip);
- }
-}
-
void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// The variable in the declaration always resides in the current function
// context.
@@ -721,7 +690,6 @@ void FullCodeGenerator::VisitVariableDeclaration(
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ StoreP(ip, ContextMemOperand(cp, variable->index()));
// No write barrier since the_hole_value is in old space.
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
}
break;
@@ -773,7 +741,6 @@ void FullCodeGenerator::VisitFunctionDeclaration(
__ RecordWriteContextSlot(cp, offset, result_register(), r4,
kLRHasBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
break;
}
@@ -800,7 +767,6 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Keep the switch value on the stack until a case matches.
VisitForStackValue(stmt->tag());
- PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
ZoneList<CaseClause*>* clauses = stmt->cases();
CaseClause* default_clause = NULL; // Can occur anywhere in the list.
@@ -845,12 +811,11 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
SetExpressionPosition(clause);
Handle<Code> ic =
CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
- CallIC(ic, clause->CompareId());
+ CallIC(ic);
patch_site.EmitPatchInfo();
Label skip;
__ b(&skip);
- PrepareForBailout(clause, BailoutState::TOS_REGISTER);
__ CompareRoot(r2, Heap::kTrueValueRootIndex);
__ bne(&next_test);
__ Drop(1);
@@ -878,12 +843,10 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ Case body");
CaseClause* clause = clauses->at(i);
__ bind(clause->body_target());
- PrepareForBailoutForId(clause->EntryId(), BailoutState::NO_REGISTERS);
VisitStatements(clause->statements());
}
__ bind(nested_statement.break_label());
- PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
}
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
@@ -915,7 +878,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Call(isolate()->builtins()->ToObject(), RelocInfo::CODE_TARGET);
RestoreContext();
__ bind(&done_convert);
- PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
__ push(r2);
// Check cache validity in generated code. If we cannot guarantee cache
@@ -935,7 +897,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&call_runtime);
__ push(r2); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kForInEnumerate);
- PrepareForBailoutForId(stmt->EnumId(), BailoutState::TOS_REGISTER);
// If we got a map from the runtime call, we can do a fast
// modification check. Otherwise, we got a fixed array, and we have
@@ -976,7 +937,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Push(r3, r2); // Smi and array
__ LoadP(r3, FieldMemOperand(r2, FixedArray::kLengthOffset));
__ Push(r3); // Fixed array length (as smi).
- PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
__ LoadSmiLiteral(r2, Smi::kZero);
__ Push(r2); // Initial index.
@@ -1020,7 +980,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// just skip it.
__ Push(r3, r5); // Enumerable and current entry.
__ CallRuntime(Runtime::kForInFilter);
- PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
__ LoadRR(r5, r2);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ CmpP(r2, r0);
@@ -1034,18 +993,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
{
EffectContext context(this);
EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
- PrepareForBailoutForId(stmt->AssignmentId(), BailoutState::NO_REGISTERS);
}
- // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
- PrepareForBailoutForId(stmt->BodyId(), BailoutState::NO_REGISTERS);
// Generate code for the body of the loop.
Visit(stmt->body());
// Generate code for the going to the next element by incrementing
// the index (smi) stored on top of the stack.
__ bind(loop_statement.continue_label());
- PrepareForBailoutForId(stmt->IncrementId(), BailoutState::NO_REGISTERS);
__ pop(r2);
__ AddSmiLiteral(r2, r2, Smi::FromInt(1), r0);
__ push(r2);
@@ -1058,7 +1013,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
DropOperands(5);
// Exit and decrement the loop depth.
- PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
__ bind(&exit);
decrement_loop_depth();
}
@@ -1086,7 +1040,6 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
// Record position before possible IC call.
SetExpressionPosition(proxy);
- PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
Variable* var = proxy->var();
// Two cases: global variables and all other types of variables.
@@ -1158,11 +1111,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Push(r5, r4, r3, r2);
__ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
- Callable callable = CodeFactory::FastCloneShallowObject(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kFastCloneShallowObject);
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
- PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
// If result_saved is true the result is on top of the stack. If
// result_saved is false the result is in r2.
@@ -1197,7 +1150,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(r2));
__ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
CallStoreIC(property->GetSlot(0), key->value(), kStoreOwn);
- PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
if (NeedsHomeObject(value)) {
EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
@@ -1230,20 +1182,16 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(value);
DCHECK(property->emit_store());
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
- PrepareForBailoutForId(expr->GetIdForPropertySet(i),
- BailoutState::NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->setter = property;
}
break;
@@ -1262,7 +1210,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ LoadSmiLiteral(r2, Smi::FromInt(NONE));
PushOperand(r2);
CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
- PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
}
if (result_saved) {
@@ -1291,7 +1238,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
- PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
bool result_saved = false; // Is the result saved to the stack?
ZoneList<Expression*>* subexprs = expr->values();
@@ -1316,9 +1262,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Smi::FromInt(array_index));
__ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp, 0));
CallKeyedStoreIC(expr->LiteralFeedbackSlot());
-
- PrepareForBailoutForId(expr->GetIdForElement(array_index),
- BailoutState::NO_REGISTERS);
}
if (result_saved) {
@@ -1376,17 +1319,12 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy());
- PrepareForBailout(expr->target(), BailoutState::TOS_REGISTER);
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
break;
case NAMED_SUPER_PROPERTY:
case KEYED_SUPER_PROPERTY:
@@ -1400,15 +1338,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForAccumulatorValue(expr->value());
AccumulatorValueContext context(this);
- if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr->binary_operation(), op, expr->target(),
- expr->value());
- } else {
- EmitBinaryOp(expr->binary_operation(), op);
- }
-
- // Deoptimization point in case the binary operation may have side effects.
- PrepareForBailout(expr->binary_operation(), BailoutState::TOS_REGISTER);
+ EmitBinaryOp(expr->binary_operation(), op);
} else {
VisitForAccumulatorValue(expr->value());
}
@@ -1421,7 +1351,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VariableProxy* proxy = expr->target()->AsVariableProxy();
EmitVariableAssignment(proxy->var(), expr->op(), expr->AssignmentSlot(),
proxy->hole_check_mode());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(r2);
break;
}
@@ -1438,11 +1367,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
}
-void FullCodeGenerator::VisitSuspend(Suspend* expr) {
- // Resumable functions are not supported.
- UNREACHABLE();
-}
-
void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
OperandStackDepthIncrement(2);
__ Push(reg1, reg2);
@@ -1475,173 +1399,11 @@ void FullCodeGenerator::EmitOperandStackDepthCheck() {
}
}
-void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
- Label allocate, done_allocate;
-
- __ Allocate(JSIteratorResult::kSize, r2, r4, r5, &allocate,
- NO_ALLOCATION_FLAGS);
- __ b(&done_allocate);
-
- __ bind(&allocate);
- __ Push(Smi::FromInt(JSIteratorResult::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace);
-
- __ bind(&done_allocate);
- __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r3);
- PopOperand(r4);
- __ LoadRoot(r5,
- done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
- __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(r3, FieldMemOperand(r2, HeapObject::kMapOffset), r0);
- __ StoreP(r6, FieldMemOperand(r2, JSObject::kPropertiesOffset), r0);
- __ StoreP(r6, FieldMemOperand(r2, JSObject::kElementsOffset), r0);
- __ StoreP(r4, FieldMemOperand(r2, JSIteratorResult::kValueOffset), r0);
- __ StoreP(r5, FieldMemOperand(r2, JSIteratorResult::kDoneOffset), r0);
-}
-
-void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
- Token::Value op,
- Expression* left_expr,
- Expression* right_expr) {
- Label done, smi_case, stub_call;
-
- Register scratch1 = r4;
- Register scratch2 = r5;
-
- // Get the arguments.
- Register left = r3;
- Register right = r2;
- PopOperand(left);
-
- // Perform combined smi check on both operands.
- __ LoadRR(scratch1, right);
- __ OrP(scratch1, left);
- STATIC_ASSERT(kSmiTag == 0);
- JumpPatchSite patch_site(masm_);
- patch_site.EmitJumpIfSmi(scratch1, &smi_case);
-
- __ bind(&stub_call);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
- CallIC(code, expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
- __ b(&done);
-
- __ bind(&smi_case);
- // Smi case. This code works the same way as the smi-smi case in the type
- // recording binary operation stub.
- switch (op) {
- case Token::SAR:
- __ GetLeastBitsFromSmi(scratch1, right, 5);
- __ ShiftRightArithP(right, left, scratch1);
- __ ClearRightImm(right, right, Operand(kSmiTagSize + kSmiShiftSize));
- break;
- case Token::SHL: {
- __ GetLeastBitsFromSmi(scratch2, right, 5);
-#if V8_TARGET_ARCH_S390X
- __ ShiftLeftP(right, left, scratch2);
-#else
- __ SmiUntag(scratch1, left);
- __ ShiftLeftP(scratch1, scratch1, scratch2);
- // Check that the *signed* result fits in a smi
- __ JumpIfNotSmiCandidate(scratch1, scratch2, &stub_call);
- __ SmiTag(right, scratch1);
-#endif
- break;
- }
- case Token::SHR: {
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ srl(scratch1, scratch2);
- // Unsigned shift is not allowed to produce a negative number.
- __ JumpIfNotUnsignedSmiCandidate(scratch1, r0, &stub_call);
- __ SmiTag(right, scratch1);
- break;
- }
- case Token::ADD: {
- __ AddP(scratch1, left, right);
- __ b(overflow, &stub_call);
- __ LoadRR(right, scratch1);
- break;
- }
- case Token::SUB: {
- __ SubP(scratch1, left, right);
- __ b(overflow, &stub_call);
- __ LoadRR(right, scratch1);
- break;
- }
- case Token::MUL: {
- Label mul_zero;
- if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
- __ SmiUntag(ip, right);
- __ MulPWithCondition(scratch2, ip, left);
- __ b(overflow, &stub_call);
- __ beq(&mul_zero, Label::kNear);
- __ LoadRR(right, scratch2);
- } else {
-#if V8_TARGET_ARCH_S390X
- // Remove tag from both operands.
- __ SmiUntag(ip, right);
- __ SmiUntag(scratch2, left);
- __ mr_z(scratch1, ip);
- // Check for overflowing the smi range - no overflow if higher 33 bits
- // of the result are identical.
- __ lr(ip, scratch2); // 32 bit load
- __ sra(ip, Operand(31));
- __ cr_z(ip, scratch1); // 32 bit compare
- __ bne(&stub_call);
-#else
- __ SmiUntag(ip, right);
- __ LoadRR(scratch2, left); // load into low order of reg pair
- __ mr_z(scratch1, ip); // R4:R5 = R5 * ip
- // Check for overflowing the smi range - no overflow if higher 33 bits
- // of the result are identical.
- __ lr(ip, scratch2); // 32 bit load
- __ sra(ip, Operand(31));
- __ cr_z(ip, scratch1); // 32 bit compare
- __ bne(&stub_call);
-#endif
- // Go slow on zero result to handle -0.
- __ chi(scratch2, Operand::Zero());
- __ beq(&mul_zero, Label::kNear);
-#if V8_TARGET_ARCH_S390X
- __ SmiTag(right, scratch2);
-#else
- __ LoadRR(right, scratch2);
-#endif
- }
- __ b(&done);
- // We need -0 if we were multiplying a negative number with 0 to get 0.
- // We know one of them was zero.
- __ bind(&mul_zero);
- __ AddP(scratch2, right, left);
- __ CmpP(scratch2, Operand::Zero());
- __ blt(&stub_call);
- __ LoadSmiLiteral(right, Smi::kZero);
- break;
- }
- case Token::BIT_OR:
- __ OrP(right, left);
- break;
- case Token::BIT_AND:
- __ AndP(right, left);
- break;
- case Token::BIT_XOR:
- __ XorP(right, left);
- break;
- default:
- UNREACHABLE();
- }
-
- __ bind(&done);
- context()->Plug(r2);
-}
-
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
PopOperand(r3);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
- JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(code, expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
+ Handle<Code> code = CodeFactory::BinaryOperation(isolate(), op).code();
+ __ Call(code, RelocInfo::CODE_TARGET);
+ RestoreContext();
context()->Plug(r2);
}
@@ -1759,7 +1521,6 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
PopOperand(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->AssignmentSlot(), prop->key()->AsLiteral()->value());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(r2);
}
@@ -1771,7 +1532,6 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
CallKeyedStoreIC(expr->AssignmentSlot());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(r2);
}
@@ -1785,7 +1545,6 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
{
StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
- PrepareForBailout(callee, BailoutState::NO_REGISTERS);
}
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
@@ -1798,8 +1557,6 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
DCHECK(!callee->AsProperty()->IsSuperAccess());
__ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
EmitNamedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(),
- BailoutState::TOS_REGISTER);
// Push the target function under the receiver.
__ LoadP(r1, MemOperand(sp, 0));
PushOperand(r1);
@@ -1822,8 +1579,6 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr, Expression* key) {
__ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
__ Move(LoadDescriptor::NameRegister(), r2);
EmitKeyedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(),
- BailoutState::TOS_REGISTER);
// Push the target function under the receiver.
__ LoadP(ip, MemOperand(sp, 0));
@@ -1840,27 +1595,14 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
}
-
- PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
- SetCallPosition(expr, expr->tail_call_mode());
- if (expr->tail_call_mode() == TailCallMode::kAllow) {
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceTailCall);
- }
- // Update profiling counters before the tail call since we will
- // not return to this function.
- EmitProfilingCounterHandlingForReturnSequence(true);
- }
- Handle<Code> code =
- CodeFactory::CallICTrampoline(isolate(), mode, expr->tail_call_mode())
- .code();
+ SetCallPosition(expr);
+ Handle<Code> code = CodeFactory::CallICTrampoline(isolate(), mode).code();
__ Load(r5, Operand(IntFromSlot(expr->CallFeedbackICSlot())));
__ LoadP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
__ mov(r2, Operand(arg_count));
CallIC(code);
OperandStackDepthDecrement(arg_count + 1);
- RecordJSReturnSite(expr);
RestoreContext();
context()->DropAndPlug(1, r2);
}
@@ -1899,7 +1641,6 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
CallConstructStub stub(isolate());
CallIC(stub.GetCode());
OperandStackDepthDecrement(arg_count + 1);
- PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
RestoreContext();
context()->Plug(r2);
}
@@ -1917,7 +1658,6 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
&if_false, &fall_through);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ TestIfSmi(r2);
Split(eq, if_true, if_false, fall_through);
@@ -1939,7 +1679,6 @@ void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
__ JumpIfSmi(r2, if_false);
__ CompareObjectType(r2, r3, r3, FIRST_JS_RECEIVER_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(ge, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -1960,7 +1699,6 @@ void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
__ JumpIfSmi(r2, if_false);
__ CompareObjectType(r2, r3, r3, JS_ARRAY_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -1981,7 +1719,6 @@ void FullCodeGenerator::EmitIsTypedArray(CallRuntime* expr) {
__ JumpIfSmi(r2, if_false);
__ CompareObjectType(r2, r3, r3, JS_TYPED_ARRAY_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2002,7 +1739,6 @@ void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
__ JumpIfSmi(r2, if_false);
__ CompareObjectType(r2, r3, r3, JS_PROXY_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2106,7 +1842,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
for (Expression* const arg : *args) {
VisitForStackValue(arg);
}
- PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
// Move target to r3.
int const argc = args->length() - 2;
__ LoadP(r3, MemOperand(sp, (argc + 1) * kPointerSize));
@@ -2139,34 +1874,6 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
context()->Plug(r2);
}
-void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- Label runtime, done;
-
- __ Allocate(JSIteratorResult::kSize, r2, r4, r5, &runtime,
- NO_ALLOCATION_FLAGS);
- __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r3);
- __ Pop(r4, r5);
- __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(r3, FieldMemOperand(r2, HeapObject::kMapOffset), r0);
- __ StoreP(r6, FieldMemOperand(r2, JSObject::kPropertiesOffset), r0);
- __ StoreP(r6, FieldMemOperand(r2, JSObject::kElementsOffset), r0);
- __ StoreP(r4, FieldMemOperand(r2, JSIteratorResult::kValueOffset), r0);
- __ StoreP(r5, FieldMemOperand(r2, JSIteratorResult::kDoneOffset), r0);
- STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
- __ b(&done);
-
- __ bind(&runtime);
- CallRuntimeWithOperands(Runtime::kCreateIterResultObject);
-
- __ bind(&done);
- context()->Plug(r2);
-}
-
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Push function.
__ LoadNativeContextSlot(expr->context_index(), r2);
@@ -2262,14 +1969,10 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
&materialize_true, &materialize_true);
if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
__ bind(&materialize_true);
- PrepareForBailoutForId(expr->MaterializeTrueId(),
- BailoutState::NO_REGISTERS);
__ LoadRoot(r2, Heap::kTrueValueRootIndex);
if (context()->IsStackValue()) __ push(r2);
__ b(&done);
__ bind(&materialize_false);
- PrepareForBailoutForId(expr->MaterializeFalseId(),
- BailoutState::NO_REGISTERS);
__ LoadRoot(r2, Heap::kFalseValueRootIndex);
if (context()->IsStackValue()) __ push(r2);
__ bind(&done);
@@ -2339,62 +2042,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
}
- // We need a second deoptimization point after loading the value
- // in case evaluating the property load my have a side effect.
- if (assign_type == VARIABLE) {
- PrepareForBailout(expr->expression(), BailoutState::TOS_REGISTER);
- } else {
- PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
- }
-
- // Inline smi case if we are in a loop.
- Label stub_call, done;
- JumpPatchSite patch_site(masm_);
-
- int count_value = expr->op() == Token::INC ? 1 : -1;
- if (ShouldInlineSmiCase(expr->op())) {
- Label slow;
- patch_site.EmitJumpIfNotSmi(r2, &slow);
-
- // Save result for postfix expressions.
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- // Save the result on the stack. If we have a named or keyed property
- // we store the result under the receiver that is currently on top
- // of the stack.
- switch (assign_type) {
- case VARIABLE:
- __ push(r2);
- break;
- case NAMED_PROPERTY:
- __ StoreP(r2, MemOperand(sp, kPointerSize));
- break;
- case KEYED_PROPERTY:
- __ StoreP(r2, MemOperand(sp, 2 * kPointerSize));
- break;
- case NAMED_SUPER_PROPERTY:
- case KEYED_SUPER_PROPERTY:
- UNREACHABLE();
- break;
- }
- }
- }
-
- Register scratch1 = r3;
- Register scratch2 = r4;
- __ LoadSmiLiteral(scratch1, Smi::FromInt(count_value));
- __ AddP(scratch2, r2, scratch1);
- __ LoadOnConditionP(nooverflow, r2, scratch2);
- __ b(nooverflow, &done);
- // Call stub. Undo operation first.
- __ b(&stub_call);
- __ bind(&slow);
- }
-
// Convert old value into a number.
__ Call(isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
RestoreContext();
- PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -2420,16 +2070,16 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
}
- __ bind(&stub_call);
+ int count_value = expr->op() == Token::INC ? 1 : -1;
__ LoadRR(r3, r2);
__ LoadSmiLiteral(r2, Smi::FromInt(count_value));
SetExpressionPosition(expr);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD).code();
- CallIC(code, expr->CountBinOpFeedbackId());
- patch_site.EmitPatchInfo();
- __ bind(&done);
+ Handle<Code> code =
+ CodeFactory::BinaryOperation(isolate(), Token::ADD).code();
+ __ Call(code, RelocInfo::CODE_TARGET);
+ RestoreContext();
// Store the value returned in r2.
switch (assign_type) {
@@ -2440,8 +2090,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
EffectContext context(this);
EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
proxy->hole_check_mode());
- PrepareForBailoutForId(expr->AssignmentId(),
- BailoutState::TOS_REGISTER);
context.Plug(r2);
}
// For all contexts except EffectConstant We have the result on
@@ -2452,8 +2100,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
proxy->hole_check_mode());
- PrepareForBailoutForId(expr->AssignmentId(),
- BailoutState::TOS_REGISTER);
context()->Plug(r2);
}
break;
@@ -2461,7 +2107,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
PopOperand(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -2475,7 +2120,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
PopOperands(StoreDescriptor::ReceiverRegister(),
StoreDescriptor::NameRegister());
CallKeyedStoreIC(expr->CountSlot());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -2506,7 +2150,6 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
AccumulatorValueContext context(this);
VisitForTypeofValue(sub_expr);
}
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Factory* factory = isolate()->factory();
if (String::Equals(check, factory->number_string())) {
@@ -2584,7 +2227,6 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForStackValue(expr->right());
SetExpressionPosition(expr);
EmitHasProperty();
- PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(r2, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
break;
@@ -2595,7 +2237,6 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
PopOperand(r3);
__ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
RestoreContext();
- PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(r2, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
break;
@@ -2620,9 +2261,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
- CallIC(ic, expr->CompareOperationFeedbackId());
+ CallIC(ic);
patch_site.EmitPatchInfo();
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ CmpP(r2, Operand::Zero());
Split(cond, if_true, if_false, fall_through);
}
@@ -2644,7 +2284,6 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
&if_false, &fall_through);
VisitForAccumulatorValue(sub_expr);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
if (expr->op() == Token::EQ_STRICT) {
Heap::RootListIndex nil_value = nil == kNullValue
? Heap::kNullValueRootIndex
diff --git a/deps/v8/src/full-codegen/x64/full-codegen-x64.cc b/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
index b15874c84b..0b4d127d24 100644
--- a/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
+++ b/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
@@ -182,8 +182,6 @@ void FullCodeGenerator::Generate() {
__ Push(rdi);
__ Push(info->scope()->scope_info());
__ CallRuntime(Runtime::kNewScriptContext);
- PrepareForBailoutForId(BailoutId::ScriptContext(),
- BailoutState::TOS_REGISTER);
// The new target value is not used, clobbering is safe.
DCHECK_NULL(info->scope()->new_target_var());
} else {
@@ -240,12 +238,6 @@ void FullCodeGenerator::Generate() {
}
}
- // Register holding this function and new target are both trashed in case we
- // bailout here. But since that can happen only when new target is not used
- // and we allocate a context, the value of |function_in_register| is correct.
- PrepareForBailoutForId(BailoutId::FunctionContext(),
- BailoutState::NO_REGISTERS);
-
// We don't support new.target and rest parameters here.
DCHECK_NULL(info->scope()->new_target_var());
DCHECK_NULL(info->scope()->rest_parameter());
@@ -283,8 +275,6 @@ void FullCodeGenerator::Generate() {
// Visit the declarations and body unless there is an illegal
// redeclaration.
- PrepareForBailoutForId(BailoutId::FunctionEntry(),
- BailoutState::NO_REGISTERS);
{
Comment cmnt(masm_, "[ Declarations");
VisitDeclarations(info->scope()->declarations());
@@ -297,8 +287,6 @@ void FullCodeGenerator::Generate() {
{
Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(),
- BailoutState::NO_REGISTERS);
Label ok;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &ok, Label::kNear);
@@ -370,12 +358,6 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
EmitProfilingCounterReset();
}
__ bind(&ok);
-
- PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
- // Record a mapping of the OSR id to this PC. This is used if the OSR
- // entry becomes the target of a bailout. We don't expect it to be, but
- // we want it to work if it is.
- PrepareForBailoutForId(stmt->OsrEntryId(), BailoutState::NO_REGISTERS);
}
void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
@@ -453,10 +435,6 @@ void FullCodeGenerator::StackValueContext::Plug(
void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
if (index == Heap::kUndefinedValueRootIndex ||
index == Heap::kNullValueRootIndex ||
index == Heap::kFalseValueRootIndex) {
@@ -479,7 +457,7 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(
if (lit->IsSmi()) {
__ SafeMove(result_register(), Smi::cast(*lit));
} else {
- __ Move(result_register(), lit);
+ __ Move(result_register(), Handle<HeapObject>::cast(lit));
}
}
@@ -489,16 +467,12 @@ void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
if (lit->IsSmi()) {
__ SafePush(Smi::cast(*lit));
} else {
- __ Push(lit);
+ __ Push(Handle<HeapObject>::cast(lit));
}
}
void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
DCHECK(lit->IsNullOrUndefined(isolate()) || !lit->IsUndetectable());
if (lit->IsNullOrUndefined(isolate()) || lit->IsFalse(isolate())) {
if (false_label_ != fall_through_) __ jmp(false_label_);
@@ -511,14 +485,14 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
if (true_label_ != fall_through_) __ jmp(true_label_);
}
} else if (lit->IsSmi()) {
- if (Smi::cast(*lit)->value() == 0) {
+ if (Smi::ToInt(*lit) == 0) {
if (false_label_ != fall_through_) __ jmp(false_label_);
} else {
if (true_label_ != fall_through_) __ jmp(true_label_);
}
} else {
// For simplicity we always test the accumulator register.
- __ Move(result_register(), lit);
+ __ Move(result_register(), Handle<HeapObject>::cast(lit));
codegen()->DoTest(this);
}
}
@@ -589,10 +563,6 @@ void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
void FullCodeGenerator::TestContext::Plug(bool flag) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
if (flag) {
if (true_label_ != fall_through_) __ jmp(true_label_);
} else {
@@ -605,8 +575,9 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_true,
Label* if_false,
Label* fall_through) {
- Handle<Code> ic = ToBooleanICStub::GetUninitialized(isolate());
- CallIC(ic, condition->test_id());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kToBoolean);
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ RestoreContext();
__ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
Split(equal, if_true, if_false, fall_through);
}
@@ -680,26 +651,6 @@ void FullCodeGenerator::SetVar(Variable* var,
}
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
- bool should_normalize,
- Label* if_true,
- Label* if_false) {
- // Only prepare for bailouts before splits if we're in a test
- // context. Otherwise, we let the Visit function deal with the
- // preparation to avoid preparing with the same AST id twice.
- if (!context()->IsTest()) return;
-
- Label skip;
- if (should_normalize) __ jmp(&skip, Label::kNear);
- PrepareForBailout(expr, BailoutState::TOS_REGISTER);
- if (should_normalize) {
- __ CompareRoot(rax, Heap::kTrueValueRootIndex);
- Split(equal, if_true, if_false, NULL);
- __ bind(&skip);
- }
-}
-
-
void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// The variable in the declaration always resides in the current context.
DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
@@ -745,7 +696,6 @@ void FullCodeGenerator::VisitVariableDeclaration(
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
__ movp(ContextOperand(rsi, variable->index()), kScratchRegister);
// No write barrier since the hole value is in old space.
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
}
break;
@@ -802,7 +752,6 @@ void FullCodeGenerator::VisitFunctionDeclaration(
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
break;
}
@@ -831,7 +780,6 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Keep the switch value on the stack until a case matches.
VisitForStackValue(stmt->tag());
- PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
ZoneList<CaseClause*>* clauses = stmt->cases();
CaseClause* default_clause = NULL; // Can occur anywhere in the list.
@@ -876,12 +824,11 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
SetExpressionPosition(clause);
Handle<Code> ic =
CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
- CallIC(ic, clause->CompareId());
+ CallIC(ic);
patch_site.EmitPatchInfo();
Label skip;
__ jmp(&skip, Label::kNear);
- PrepareForBailout(clause, BailoutState::TOS_REGISTER);
__ CompareRoot(rax, Heap::kTrueValueRootIndex);
__ j(not_equal, &next_test);
__ Drop(1);
@@ -909,12 +856,10 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ Case body");
CaseClause* clause = clauses->at(i);
__ bind(clause->body_target());
- PrepareForBailoutForId(clause->EntryId(), BailoutState::NO_REGISTERS);
VisitStatements(clause->statements());
}
__ bind(nested_statement.break_label());
- PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
}
@@ -947,7 +892,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Call(isolate()->builtins()->ToObject(), RelocInfo::CODE_TARGET);
RestoreContext();
__ bind(&done_convert);
- PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
__ Push(rax);
// Check cache validity in generated code. If we cannot guarantee cache
@@ -967,7 +911,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&call_runtime);
__ Push(rax); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kForInEnumerate);
- PrepareForBailoutForId(stmt->EnumId(), BailoutState::TOS_REGISTER);
// If we got a map from the runtime call, we can do a fast
// modification check. Otherwise, we got a fixed array, and we have
@@ -1009,7 +952,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Push(rax); // Array
__ movp(rax, FieldOperand(rax, FixedArray::kLengthOffset));
__ Push(rax); // Fixed array length (as smi).
- PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
__ Push(Smi::kZero); // Initial index.
// Generate code for doing the condition check.
@@ -1048,7 +990,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// have the key or returns the name-converted key.
__ Call(isolate()->builtins()->ForInFilter(), RelocInfo::CODE_TARGET);
RestoreContext();
- PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
__ JumpIfRoot(result_register(), Heap::kUndefinedValueRootIndex,
loop_statement.continue_label());
@@ -1058,18 +999,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Perform the assignment as if via '='.
{ EffectContext context(this);
EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
- PrepareForBailoutForId(stmt->AssignmentId(), BailoutState::NO_REGISTERS);
}
- // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
- PrepareForBailoutForId(stmt->BodyId(), BailoutState::NO_REGISTERS);
// Generate code for the body of the loop.
Visit(stmt->body());
// Generate code for going to the next element by incrementing the
// index (smi) stored on top of the stack.
__ bind(loop_statement.continue_label());
- PrepareForBailoutForId(stmt->IncrementId(), BailoutState::NO_REGISTERS);
__ SmiAddConstant(Operand(rsp, 0 * kPointerSize), Smi::FromInt(1));
EmitBackEdgeBookkeeping(stmt, &loop);
@@ -1080,7 +1017,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
DropOperands(5);
// Exit and decrement the loop depth.
- PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
__ bind(&exit);
decrement_loop_depth();
}
@@ -1108,7 +1044,6 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
// Record position before possible IC call.
SetExpressionPosition(proxy);
- PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
Variable* var = proxy->var();
// Two cases: global variable, and all other types of variables.
@@ -1185,11 +1120,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Move(rbx, SmiFromSlot(expr->literal_slot()));
__ Move(rcx, constant_properties);
__ Move(rdx, Smi::FromInt(flags));
- Callable callable = CodeFactory::FastCloneShallowObject(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kFastCloneShallowObject);
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
- PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
// If result_saved is true the result is on top of the stack. If
// result_saved is false the result is in rax.
@@ -1224,7 +1159,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(rax));
__ movp(StoreDescriptor::ReceiverRegister(), Operand(rsp, 0));
CallStoreIC(property->GetSlot(0), key->value(), kStoreOwn);
- PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
if (NeedsHomeObject(value)) {
EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
@@ -1252,20 +1186,16 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(value);
DCHECK(property->emit_store());
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
- PrepareForBailoutForId(expr->GetIdForPropertySet(i),
- BailoutState::NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->setter = property;
}
break;
@@ -1283,7 +1213,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
EmitAccessor(it->second->setter);
PushOperand(Smi::FromInt(NONE));
CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
- PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
}
if (result_saved) {
@@ -1315,7 +1244,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
- PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
bool result_saved = false; // Is the result saved to the stack?
ZoneList<Expression*>* subexprs = expr->values();
@@ -1340,9 +1268,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ Move(StoreDescriptor::NameRegister(), Smi::FromInt(array_index));
__ movp(StoreDescriptor::ReceiverRegister(), Operand(rsp, 0));
CallKeyedStoreIC(expr->LiteralFeedbackSlot());
-
- PrepareForBailoutForId(expr->GetIdForElement(array_index),
- BailoutState::NO_REGISTERS);
}
if (result_saved) {
@@ -1400,17 +1325,12 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy());
- PrepareForBailout(expr->target(), BailoutState::TOS_REGISTER);
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
break;
case NAMED_SUPER_PROPERTY:
case KEYED_SUPER_PROPERTY:
@@ -1424,16 +1344,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForAccumulatorValue(expr->value());
AccumulatorValueContext context(this);
- if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr->binary_operation(),
- op,
- expr->target(),
- expr->value());
- } else {
- EmitBinaryOp(expr->binary_operation(), op);
- }
- // Deoptimization point in case the binary operation may have side effects.
- PrepareForBailout(expr->binary_operation(), BailoutState::TOS_REGISTER);
+ EmitBinaryOp(expr->binary_operation(), op);
} else {
VisitForAccumulatorValue(expr->value());
}
@@ -1446,7 +1357,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VariableProxy* proxy = expr->target()->AsVariableProxy();
EmitVariableAssignment(proxy->var(), expr->op(), expr->AssignmentSlot(),
proxy->hole_check_mode());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(rax);
break;
}
@@ -1463,11 +1373,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
}
-void FullCodeGenerator::VisitSuspend(Suspend* expr) {
- // Resumable functions are not supported.
- UNREACHABLE();
-}
-
void FullCodeGenerator::PushOperand(MemOperand operand) {
OperandStackDepthIncrement(1);
__ Push(operand);
@@ -1484,97 +1389,12 @@ void FullCodeGenerator::EmitOperandStackDepthCheck() {
}
}
-void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
- Label allocate, done_allocate;
-
- __ Allocate(JSIteratorResult::kSize, rax, rcx, rdx, &allocate,
- NO_ALLOCATION_FLAGS);
- __ jmp(&done_allocate, Label::kNear);
-
- __ bind(&allocate);
- __ Push(Smi::FromInt(JSIteratorResult::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace);
-
- __ bind(&done_allocate);
- __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, rbx);
- __ movp(FieldOperand(rax, HeapObject::kMapOffset), rbx);
- __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
- __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
- __ movp(FieldOperand(rax, JSObject::kElementsOffset), rbx);
- __ Pop(FieldOperand(rax, JSIteratorResult::kValueOffset));
- __ LoadRoot(FieldOperand(rax, JSIteratorResult::kDoneOffset),
- done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
- STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
- OperandStackDepthDecrement(1);
-}
-
-
-void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
- Token::Value op,
- Expression* left,
- Expression* right) {
- // Do combined smi check of the operands. Left operand is on the
- // stack (popped into rdx). Right operand is in rax but moved into
- // rcx to make the shifts easier.
- Label done, stub_call, smi_case;
- PopOperand(rdx);
- __ movp(rcx, rax);
- __ orp(rax, rdx);
- JumpPatchSite patch_site(masm_);
- patch_site.EmitJumpIfSmi(rax, &smi_case, Label::kNear);
-
- __ bind(&stub_call);
- __ movp(rax, rcx);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
- CallIC(code, expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
- __ jmp(&done, Label::kNear);
-
- __ bind(&smi_case);
- switch (op) {
- case Token::SAR:
- __ SmiShiftArithmeticRight(rax, rdx, rcx);
- break;
- case Token::SHL:
- __ SmiShiftLeft(rax, rdx, rcx, &stub_call);
- break;
- case Token::SHR:
- __ SmiShiftLogicalRight(rax, rdx, rcx, &stub_call);
- break;
- case Token::ADD:
- __ SmiAdd(rax, rdx, rcx, &stub_call);
- break;
- case Token::SUB:
- __ SmiSub(rax, rdx, rcx, &stub_call);
- break;
- case Token::MUL:
- __ SmiMul(rax, rdx, rcx, &stub_call);
- break;
- case Token::BIT_OR:
- __ SmiOr(rax, rdx, rcx);
- break;
- case Token::BIT_AND:
- __ SmiAnd(rax, rdx, rcx);
- break;
- case Token::BIT_XOR:
- __ SmiXor(rax, rdx, rcx);
- break;
- default:
- UNREACHABLE();
- break;
- }
-
- __ bind(&done);
- context()->Plug(rax);
-}
-
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
PopOperand(rdx);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
- JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(code, expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
+ Handle<Code> code = CodeFactory::BinaryOperation(isolate(), op).code();
+ __ Call(code, RelocInfo::CODE_TARGET);
+ RestoreContext();
context()->Plug(rax);
}
@@ -1691,7 +1511,6 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
PopOperand(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->AssignmentSlot(), prop->key()->AsLiteral()->value());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(rax);
}
@@ -1703,7 +1522,6 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(rax));
CallKeyedStoreIC(expr->AssignmentSlot());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(rax);
}
@@ -1716,7 +1534,6 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
if (callee->IsVariableProxy()) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
- PrepareForBailout(callee, BailoutState::NO_REGISTERS);
}
// Push undefined as receiver. This is patched in the Call builtin if it
// is a sloppy mode method.
@@ -1728,8 +1545,6 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
DCHECK(!callee->AsProperty()->IsSuperAccess());
__ movp(LoadDescriptor::ReceiverRegister(), Operand(rsp, 0));
EmitNamedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(),
- BailoutState::TOS_REGISTER);
// Push the target function under the receiver.
PushOperand(Operand(rsp, 0));
__ movp(Operand(rsp, kPointerSize), rax);
@@ -1753,8 +1568,6 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ movp(LoadDescriptor::ReceiverRegister(), Operand(rsp, 0));
__ Move(LoadDescriptor::NameRegister(), rax);
EmitKeyedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(),
- BailoutState::TOS_REGISTER);
// Push the target function under the receiver.
PushOperand(Operand(rsp, 0));
@@ -1772,26 +1585,14 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
VisitForStackValue(args->at(i));
}
- PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
- SetCallPosition(expr, expr->tail_call_mode());
- if (expr->tail_call_mode() == TailCallMode::kAllow) {
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceTailCall);
- }
- // Update profiling counters before the tail call since we will
- // not return to this function.
- EmitProfilingCounterHandlingForReturnSequence(true);
- }
- Handle<Code> code =
- CodeFactory::CallICTrampoline(isolate(), mode, expr->tail_call_mode())
- .code();
+ SetCallPosition(expr);
+ Handle<Code> code = CodeFactory::CallICTrampoline(isolate(), mode).code();
__ Set(rdx, IntFromSlot(expr->CallFeedbackICSlot()));
__ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
__ Set(rax, arg_count);
CallIC(code);
OperandStackDepthDecrement(arg_count + 1);
- RecordJSReturnSite(expr);
RestoreContext();
// Discard the function left on TOS.
context()->DropAndPlug(1, rax);
@@ -1831,7 +1632,6 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
CallConstructStub stub(isolate());
CallIC(stub.GetCode());
OperandStackDepthDecrement(arg_count + 1);
- PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
RestoreContext();
context()->Plug(rax);
}
@@ -1849,7 +1649,6 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ JumpIfSmi(rax, if_true);
__ jmp(if_false);
@@ -1872,7 +1671,6 @@ void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rbx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(above_equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -1894,7 +1692,6 @@ void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, JS_ARRAY_TYPE, rbx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -1916,7 +1713,6 @@ void FullCodeGenerator::EmitIsTypedArray(CallRuntime* expr) {
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, JS_TYPED_ARRAY_TYPE, rbx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -1939,7 +1735,6 @@ void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, JS_PROXY_TYPE, rbx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2042,7 +1837,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
for (Expression* const arg : *args) {
VisitForStackValue(arg);
}
- PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
// Move target to rdi.
int const argc = args->length() - 2;
__ movp(rdi, Operand(rsp, (argc + 1) * kPointerSize));
@@ -2076,34 +1870,6 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- Label runtime, done;
-
- __ Allocate(JSIteratorResult::kSize, rax, rcx, rdx, &runtime,
- NO_ALLOCATION_FLAGS);
- __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, rbx);
- __ movp(FieldOperand(rax, HeapObject::kMapOffset), rbx);
- __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
- __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
- __ movp(FieldOperand(rax, JSObject::kElementsOffset), rbx);
- __ Pop(FieldOperand(rax, JSIteratorResult::kDoneOffset));
- __ Pop(FieldOperand(rax, JSIteratorResult::kValueOffset));
- STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
- __ jmp(&done, Label::kNear);
-
- __ bind(&runtime);
- CallRuntimeWithOperands(Runtime::kCreateIterResultObject);
-
- __ bind(&done);
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Push function.
__ LoadNativeContextSlot(expr->context_index(), rax);
@@ -2206,8 +1972,6 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
&materialize_true);
if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
__ bind(&materialize_true);
- PrepareForBailoutForId(expr->MaterializeTrueId(),
- BailoutState::NO_REGISTERS);
if (context()->IsAccumulatorValue()) {
__ LoadRoot(rax, Heap::kTrueValueRootIndex);
} else {
@@ -2215,8 +1979,6 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
}
__ jmp(&done, Label::kNear);
__ bind(&materialize_false);
- PrepareForBailoutForId(expr->MaterializeFalseId(),
- BailoutState::NO_REGISTERS);
if (context()->IsAccumulatorValue()) {
__ LoadRoot(rax, Heap::kFalseValueRootIndex);
} else {
@@ -2289,63 +2051,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
}
- // We need a second deoptimization point after loading the value
- // in case evaluating the property load my have a side effect.
- if (assign_type == VARIABLE) {
- PrepareForBailout(expr->expression(), BailoutState::TOS_REGISTER);
- } else {
- PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
- }
-
- // Inline smi case if we are in a loop.
- Label done, stub_call;
- JumpPatchSite patch_site(masm_);
- if (ShouldInlineSmiCase(expr->op())) {
- Label slow;
- patch_site.EmitJumpIfNotSmi(rax, &slow, Label::kNear);
-
- // Save result for postfix expressions.
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- // Save the result on the stack. If we have a named or keyed property
- // we store the result under the receiver that is currently on top
- // of the stack.
- switch (assign_type) {
- case VARIABLE:
- __ Push(rax);
- break;
- case NAMED_PROPERTY:
- __ movp(Operand(rsp, kPointerSize), rax);
- break;
- case KEYED_PROPERTY:
- __ movp(Operand(rsp, 2 * kPointerSize), rax);
- break;
- case NAMED_SUPER_PROPERTY:
- case KEYED_SUPER_PROPERTY:
- UNREACHABLE();
- break;
- }
- }
- }
-
- SmiOperationConstraints constraints =
- SmiOperationConstraint::kPreserveSourceRegister |
- SmiOperationConstraint::kBailoutOnNoOverflow;
- if (expr->op() == Token::INC) {
- __ SmiAddConstant(rax, rax, Smi::FromInt(1), constraints, &done,
- Label::kNear);
- } else {
- __ SmiSubConstant(rax, rax, Smi::FromInt(1), constraints, &done,
- Label::kNear);
- }
- __ jmp(&stub_call, Label::kNear);
- __ bind(&slow);
- }
-
// Convert old value into a number.
__ Call(isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
RestoreContext();
- PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -2374,14 +2082,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetExpressionPosition(expr);
// Call stub for +1/-1.
- __ bind(&stub_call);
__ movp(rdx, rax);
__ Move(rax, Smi::FromInt(1));
Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), expr->binary_op()).code();
- CallIC(code, expr->CountBinOpFeedbackId());
- patch_site.EmitPatchInfo();
- __ bind(&done);
+ CodeFactory::BinaryOperation(isolate(), expr->binary_op()).code();
+ __ Call(code, RelocInfo::CODE_TARGET);
+ RestoreContext();
// Store the value returned in rax.
switch (assign_type) {
@@ -2392,8 +2098,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
{ EffectContext context(this);
EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
proxy->hole_check_mode());
- PrepareForBailoutForId(expr->AssignmentId(),
- BailoutState::TOS_REGISTER);
context.Plug(rax);
}
// For all contexts except kEffect: We have the result on
@@ -2405,8 +2109,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Perform the assignment as if via '='.
EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
proxy->hole_check_mode());
- PrepareForBailoutForId(expr->AssignmentId(),
- BailoutState::TOS_REGISTER);
context()->Plug(rax);
}
break;
@@ -2414,7 +2116,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
PopOperand(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -2428,7 +2129,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
PopOperand(StoreDescriptor::NameRegister());
PopOperand(StoreDescriptor::ReceiverRegister());
CallKeyedStoreIC(expr->CountSlot());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -2459,7 +2159,6 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
{ AccumulatorValueContext context(this);
VisitForTypeofValue(sub_expr);
}
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Factory* factory = isolate()->factory();
if (String::Equals(check, factory->number_string())) {
@@ -2539,7 +2238,6 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForStackValue(expr->right());
SetExpressionPosition(expr);
EmitHasProperty();
- PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(rax, Heap::kTrueValueRootIndex);
Split(equal, if_true, if_false, fall_through);
break;
@@ -2550,7 +2248,6 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
PopOperand(rdx);
__ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
RestoreContext();
- PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(rax, Heap::kTrueValueRootIndex);
Split(equal, if_true, if_false, fall_through);
break;
@@ -2575,10 +2272,9 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
- CallIC(ic, expr->CompareOperationFeedbackId());
+ CallIC(ic);
patch_site.EmitPatchInfo();
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ testp(rax, rax);
Split(cc, if_true, if_false, fall_through);
}
@@ -2601,7 +2297,6 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
&if_true, &if_false, &fall_through);
VisitForAccumulatorValue(sub_expr);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
if (expr->op() == Token::EQ_STRICT) {
Heap::RootListIndex nil_value = nil == kNullValue ?
Heap::kNullValueRootIndex :
diff --git a/deps/v8/src/full-codegen/x87/OWNERS b/deps/v8/src/full-codegen/x87/OWNERS
deleted file mode 100644
index 61245ae8e2..0000000000
--- a/deps/v8/src/full-codegen/x87/OWNERS
+++ /dev/null
@@ -1,2 +0,0 @@
-weiliang.lin@intel.com
-chunyang.dai@intel.com
diff --git a/deps/v8/src/full-codegen/x87/full-codegen-x87.cc b/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
deleted file mode 100644
index 0499100746..0000000000
--- a/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
+++ /dev/null
@@ -1,2749 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X87
-
-#include "src/ast/compile-time-value.h"
-#include "src/ast/scopes.h"
-#include "src/builtins/builtins-constructor.h"
-#include "src/code-factory.h"
-#include "src/code-stubs.h"
-#include "src/codegen.h"
-#include "src/compilation-info.h"
-#include "src/compiler.h"
-#include "src/debug/debug.h"
-#include "src/full-codegen/full-codegen.h"
-#include "src/ic/ic.h"
-#include "src/x87/frames-x87.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm())
-
-class JumpPatchSite BASE_EMBEDDED {
- public:
- explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
-#ifdef DEBUG
- info_emitted_ = false;
-#endif
- }
-
- ~JumpPatchSite() {
- DCHECK(patch_site_.is_bound() == info_emitted_);
- }
-
- void EmitJumpIfNotSmi(Register reg,
- Label* target,
- Label::Distance distance = Label::kFar) {
- __ test(reg, Immediate(kSmiTagMask));
- EmitJump(not_carry, target, distance); // Always taken before patched.
- }
-
- void EmitJumpIfSmi(Register reg,
- Label* target,
- Label::Distance distance = Label::kFar) {
- __ test(reg, Immediate(kSmiTagMask));
- EmitJump(carry, target, distance); // Never taken before patched.
- }
-
- void EmitPatchInfo() {
- if (patch_site_.is_bound()) {
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
- DCHECK(is_uint8(delta_to_patch_site));
- __ test(eax, Immediate(delta_to_patch_site));
-#ifdef DEBUG
- info_emitted_ = true;
-#endif
- } else {
- __ nop(); // Signals no inlined code.
- }
- }
-
- private:
- // jc will be patched with jz, jnc will become jnz.
- void EmitJump(Condition cc, Label* target, Label::Distance distance) {
- DCHECK(!patch_site_.is_bound() && !info_emitted_);
- DCHECK(cc == carry || cc == not_carry);
- __ bind(&patch_site_);
- __ j(cc, target, distance);
- }
-
- MacroAssembler* masm() { return masm_; }
- MacroAssembler* masm_;
- Label patch_site_;
-#ifdef DEBUG
- bool info_emitted_;
-#endif
-};
-
-
-// Generate code for a JS function. On entry to the function the receiver
-// and arguments have been pushed on the stack left to right, with the
-// return address on top of them. The actual argument count matches the
-// formal parameter count expected by the function.
-//
-// The live registers are:
-// o edi: the JS function object being called (i.e. ourselves)
-// o edx: the new target value
-// o esi: our context
-// o ebp: our caller's frame pointer
-// o esp: stack pointer (pointing to return address)
-//
-// The function builds a JS frame. Please see JavaScriptFrameConstants in
-// frames-x87.h for its layout.
-void FullCodeGenerator::Generate() {
- CompilationInfo* info = info_;
- profiling_counter_ = isolate()->factory()->NewCell(
- Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
- SetFunctionPosition(literal());
- Comment cmnt(masm_, "[ function compiled by full code generator");
-
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
- if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
- int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
- __ mov(ecx, Operand(esp, receiver_offset));
- __ AssertNotSmi(ecx);
- __ CmpObjectType(ecx, FIRST_JS_RECEIVER_TYPE, ecx);
- __ Assert(above_equal, kSloppyFunctionExpectsJSReceiverReceiver);
- }
-
- // Open a frame scope to indicate that there is a frame on the stack. The
- // MANUAL indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done below).
- FrameScope frame_scope(masm_, StackFrame::MANUAL);
-
- info->set_prologue_offset(masm_->pc_offset());
- __ Prologue(info->GeneratePreagedPrologue());
-
- // Increment invocation count for the function.
- {
- Comment cmnt(masm_, "[ Increment invocation count");
- __ mov(ecx, FieldOperand(edi, JSFunction::kFeedbackVectorOffset));
- __ mov(ecx, FieldOperand(ecx, Cell::kValueOffset));
- __ add(
- FieldOperand(ecx, FeedbackVector::kInvocationCountIndex * kPointerSize +
- FeedbackVector::kHeaderSize),
- Immediate(Smi::FromInt(1)));
- }
-
- { Comment cmnt(masm_, "[ Allocate locals");
- int locals_count = info->scope()->num_stack_slots();
- OperandStackDepthIncrement(locals_count);
- if (locals_count == 1) {
- __ push(Immediate(isolate()->factory()->undefined_value()));
- } else if (locals_count > 1) {
- if (locals_count >= 128) {
- Label ok;
- __ mov(ecx, esp);
- __ sub(ecx, Immediate(locals_count * kPointerSize));
- ExternalReference stack_limit =
- ExternalReference::address_of_real_stack_limit(isolate());
- __ cmp(ecx, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok, Label::kNear);
- __ CallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&ok);
- }
- __ mov(eax, Immediate(isolate()->factory()->undefined_value()));
- const int kMaxPushes = 32;
- if (locals_count >= kMaxPushes) {
- int loop_iterations = locals_count / kMaxPushes;
- __ mov(ecx, loop_iterations);
- Label loop_header;
- __ bind(&loop_header);
- // Do pushes.
- for (int i = 0; i < kMaxPushes; i++) {
- __ push(eax);
- }
- __ dec(ecx);
- __ j(not_zero, &loop_header, Label::kNear);
- }
- int remaining = locals_count % kMaxPushes;
- // Emit the remaining pushes.
- for (int i = 0; i < remaining; i++) {
- __ push(eax);
- }
- }
- }
-
- bool function_in_register = true;
-
- // Possibly allocate a local context.
- if (info->scope()->NeedsContext()) {
- Comment cmnt(masm_, "[ Allocate context");
- bool need_write_barrier = true;
- int slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- // Argument to NewContext is the function, which is still in edi.
- if (info->scope()->is_script_scope()) {
- __ push(edi);
- __ Push(info->scope()->scope_info());
- __ CallRuntime(Runtime::kNewScriptContext);
- PrepareForBailoutForId(BailoutId::ScriptContext(),
- BailoutState::TOS_REGISTER);
- // The new target value is not used, clobbering is safe.
- DCHECK_NULL(info->scope()->new_target_var());
- } else {
- if (info->scope()->new_target_var() != nullptr) {
- __ push(edx); // Preserve new target.
- }
- if (slots <= ConstructorBuiltins::MaximumFunctionContextSlots()) {
- Callable callable = CodeFactory::FastNewFunctionContext(
- isolate(), info->scope()->scope_type());
- __ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
- Immediate(slots));
- __ Call(callable.code(), RelocInfo::CODE_TARGET);
- // Result of the FastNewFunctionContext builtin is always in new space.
- need_write_barrier = false;
- } else {
- __ push(edi);
- __ Push(Smi::FromInt(info->scope()->scope_type()));
- __ CallRuntime(Runtime::kNewFunctionContext);
- }
- if (info->scope()->new_target_var() != nullptr) {
- __ pop(edx); // Restore new target.
- }
- }
- function_in_register = false;
- // Context is returned in eax. It replaces the context passed to us.
- // It's saved in the stack and kept live in esi.
- __ mov(esi, eax);
- __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), eax);
-
- // Copy parameters into context if necessary.
- int num_parameters = info->scope()->num_parameters();
- int first_parameter = info->scope()->has_this_declaration() ? -1 : 0;
- for (int i = first_parameter; i < num_parameters; i++) {
- Variable* var =
- (i == -1) ? info->scope()->receiver() : info->scope()->parameter(i);
- if (var->IsContextSlot()) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ mov(eax, Operand(ebp, parameter_offset));
- // Store it in the context.
- int context_offset = Context::SlotOffset(var->index());
- __ mov(Operand(esi, context_offset), eax);
- // Update the write barrier. This clobbers eax and ebx.
- if (need_write_barrier) {
- __ RecordWriteContextSlot(esi, context_offset, eax, ebx,
- kDontSaveFPRegs);
- } else if (FLAG_debug_code) {
- Label done;
- __ JumpIfInNewSpace(esi, eax, &done, Label::kNear);
- __ Abort(kExpectedNewSpaceObject);
- __ bind(&done);
- }
- }
- }
- }
-
- // Register holding this function and new target are both trashed in case we
- // bailout here. But since that can happen only when new target is not used
- // and we allocate a context, the value of |function_in_register| is correct.
- PrepareForBailoutForId(BailoutId::FunctionContext(),
- BailoutState::NO_REGISTERS);
-
- // We don't support new.target and rest parameters here.
- DCHECK_NULL(info->scope()->new_target_var());
- DCHECK_NULL(info->scope()->rest_parameter());
- DCHECK_NULL(info->scope()->this_function_var());
-
- Variable* arguments = info->scope()->arguments();
- if (arguments != NULL) {
- // Arguments object must be allocated after the context object, in
- // case the "arguments" or ".arguments" variables are in the context.
- Comment cmnt(masm_, "[ Allocate arguments object");
- if (!function_in_register) {
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- }
- if (is_strict(language_mode()) || !has_simple_parameters()) {
- FastNewStrictArgumentsStub stub(isolate());
- __ CallStub(&stub);
- } else if (literal()->has_duplicate_parameters()) {
- __ Push(edi);
- __ CallRuntime(Runtime::kNewSloppyArguments_Generic);
- } else {
- FastNewSloppyArgumentsStub stub(isolate());
- __ CallStub(&stub);
- }
-
- SetVar(arguments, eax, ebx, edx);
- }
-
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter);
- }
-
- // Visit the declarations and body.
- PrepareForBailoutForId(BailoutId::FunctionEntry(),
- BailoutState::NO_REGISTERS);
- {
- Comment cmnt(masm_, "[ Declarations");
- VisitDeclarations(scope()->declarations());
- }
-
- // Assert that the declarations do not use ICs. Otherwise the debugger
- // won't be able to redirect a PC at an IC to the correct IC in newly
- // recompiled code.
- DCHECK_EQ(0, ic_total_count_);
-
- {
- Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(),
- BailoutState::NO_REGISTERS);
- Label ok;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok, Label::kNear);
- __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
- __ bind(&ok);
- }
-
- {
- Comment cmnt(masm_, "[ Body");
- DCHECK(loop_depth() == 0);
- VisitStatements(literal()->body());
- DCHECK(loop_depth() == 0);
- }
-
- // Always emit a 'return undefined' in case control fell off the end of
- // the body.
- { Comment cmnt(masm_, "[ return <undefined>;");
- __ mov(eax, isolate()->factory()->undefined_value());
- EmitReturnSequence();
- }
-}
-
-
-void FullCodeGenerator::ClearAccumulator() {
- __ Move(eax, Immediate(Smi::kZero));
-}
-
-
-void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
- __ mov(ebx, Immediate(profiling_counter_));
- __ sub(FieldOperand(ebx, Cell::kValueOffset),
- Immediate(Smi::FromInt(delta)));
-}
-
-
-void FullCodeGenerator::EmitProfilingCounterReset() {
- int reset_value = FLAG_interrupt_budget;
- __ mov(ebx, Immediate(profiling_counter_));
- __ mov(FieldOperand(ebx, Cell::kValueOffset),
- Immediate(Smi::FromInt(reset_value)));
-}
-
-
-void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
- Label* back_edge_target) {
- Comment cmnt(masm_, "[ Back edge bookkeeping");
- Label ok;
-
- DCHECK(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- int weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
- EmitProfilingCounterDecrement(weight);
- __ j(positive, &ok, Label::kNear);
- __ call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
-
- // Record a mapping of this PC offset to the OSR id. This is used to find
- // the AST id from the unoptimized code in order to use it as a key into
- // the deoptimization input data found in the optimized code.
- RecordBackEdge(stmt->OsrEntryId());
-
- EmitProfilingCounterReset();
-
- __ bind(&ok);
- PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
- // Record a mapping of the OSR id to this PC. This is used if the OSR
- // entry becomes the target of a bailout. We don't expect it to be, but
- // we want it to work if it is.
- PrepareForBailoutForId(stmt->OsrEntryId(), BailoutState::NO_REGISTERS);
-}
-
-void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
- bool is_tail_call) {
- // Pretend that the exit is a backwards jump to the entry.
- int weight = 1;
- if (info_->ShouldSelfOptimize()) {
- weight = FLAG_interrupt_budget / FLAG_self_opt_count;
- } else {
- int distance = masm_->pc_offset();
- weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier));
- }
- EmitProfilingCounterDecrement(weight);
- Label ok;
- __ j(positive, &ok, Label::kNear);
- // Don't need to save result register if we are going to do a tail call.
- if (!is_tail_call) {
- __ push(eax);
- }
- __ call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
- if (!is_tail_call) {
- __ pop(eax);
- }
- EmitProfilingCounterReset();
- __ bind(&ok);
-}
-
-void FullCodeGenerator::EmitReturnSequence() {
- Comment cmnt(masm_, "[ Return sequence");
- if (return_label_.is_bound()) {
- __ jmp(&return_label_);
- } else {
- // Common return label
- __ bind(&return_label_);
- if (FLAG_trace) {
- __ push(eax);
- __ CallRuntime(Runtime::kTraceExit);
- }
- EmitProfilingCounterHandlingForReturnSequence(false);
-
- SetReturnPosition(literal());
- __ leave();
-
- int arg_count = info_->scope()->num_parameters() + 1;
- int arguments_bytes = arg_count * kPointerSize;
- __ Ret(arguments_bytes, ecx);
- }
-}
-
-void FullCodeGenerator::RestoreContext() {
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-}
-
-void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- MemOperand operand = codegen()->VarOperand(var, result_register());
- // Memory operands can be pushed directly.
- codegen()->PushOperand(operand);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
- UNREACHABLE(); // Not used on X87.
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Heap::RootListIndex index) const {
- UNREACHABLE(); // Not used on X87.
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(
- Heap::RootListIndex index) const {
- UNREACHABLE(); // Not used on X87.
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
- UNREACHABLE(); // Not used on X87.
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Handle<Object> lit) const {
- if (lit->IsSmi()) {
- __ SafeMove(result_register(), Immediate(lit));
- } else {
- __ Move(result_register(), Immediate(lit));
- }
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
- codegen()->OperandStackDepthIncrement(1);
- if (lit->IsSmi()) {
- __ SafePush(Immediate(lit));
- } else {
- __ push(Immediate(lit));
- }
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
- DCHECK(lit->IsNullOrUndefined(isolate()) || !lit->IsUndetectable());
- if (lit->IsNullOrUndefined(isolate()) || lit->IsFalse(isolate())) {
- if (false_label_ != fall_through_) __ jmp(false_label_);
- } else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
- if (true_label_ != fall_through_) __ jmp(true_label_);
- } else if (lit->IsString()) {
- if (String::cast(*lit)->length() == 0) {
- if (false_label_ != fall_through_) __ jmp(false_label_);
- } else {
- if (true_label_ != fall_through_) __ jmp(true_label_);
- }
- } else if (lit->IsSmi()) {
- if (Smi::cast(*lit)->value() == 0) {
- if (false_label_ != fall_through_) __ jmp(false_label_);
- } else {
- if (true_label_ != fall_through_) __ jmp(true_label_);
- }
- } else {
- // For simplicity we always test the accumulator register.
- __ mov(result_register(), lit);
- codegen()->DoTest(this);
- }
-}
-
-
-void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
- Register reg) const {
- DCHECK(count > 0);
- if (count > 1) codegen()->DropOperands(count - 1);
- __ mov(Operand(esp, 0), reg);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
- Label* materialize_false) const {
- DCHECK(materialize_true == materialize_false);
- __ bind(materialize_true);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Label* materialize_true,
- Label* materialize_false) const {
- Label done;
- __ bind(materialize_true);
- __ mov(result_register(), isolate()->factory()->true_value());
- __ jmp(&done, Label::kNear);
- __ bind(materialize_false);
- __ mov(result_register(), isolate()->factory()->false_value());
- __ bind(&done);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(
- Label* materialize_true,
- Label* materialize_false) const {
- codegen()->OperandStackDepthIncrement(1);
- Label done;
- __ bind(materialize_true);
- __ push(Immediate(isolate()->factory()->true_value()));
- __ jmp(&done, Label::kNear);
- __ bind(materialize_false);
- __ push(Immediate(isolate()->factory()->false_value()));
- __ bind(&done);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
- Label* materialize_false) const {
- DCHECK(materialize_true == true_label_);
- DCHECK(materialize_false == false_label_);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
- Handle<Object> value = flag
- ? isolate()->factory()->true_value()
- : isolate()->factory()->false_value();
- __ mov(result_register(), value);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
- codegen()->OperandStackDepthIncrement(1);
- Handle<Object> value = flag
- ? isolate()->factory()->true_value()
- : isolate()->factory()->false_value();
- __ push(Immediate(value));
-}
-
-
-void FullCodeGenerator::TestContext::Plug(bool flag) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
- if (flag) {
- if (true_label_ != fall_through_) __ jmp(true_label_);
- } else {
- if (false_label_ != fall_through_) __ jmp(false_label_);
- }
-}
-
-
-void FullCodeGenerator::DoTest(Expression* condition,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- Handle<Code> ic = ToBooleanICStub::GetUninitialized(isolate());
- CallIC(ic, condition->test_id());
- __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
- Split(equal, if_true, if_false, fall_through);
-}
-
-
-void FullCodeGenerator::Split(Condition cc,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- if (if_false == fall_through) {
- __ j(cc, if_true);
- } else if (if_true == fall_through) {
- __ j(NegateCondition(cc), if_false);
- } else {
- __ j(cc, if_true);
- __ jmp(if_false);
- }
-}
-
-
-MemOperand FullCodeGenerator::StackOperand(Variable* var) {
- DCHECK(var->IsStackAllocated());
- // Offset is negative because higher indexes are at lower addresses.
- int offset = -var->index() * kPointerSize;
- // Adjust by a (parameter or local) base offset.
- if (var->IsParameter()) {
- offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
- } else {
- offset += JavaScriptFrameConstants::kLocal0Offset;
- }
- return Operand(ebp, offset);
-}
-
-
-MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
- DCHECK(var->IsContextSlot() || var->IsStackAllocated());
- if (var->IsContextSlot()) {
- int context_chain_length = scope()->ContextChainLength(var->scope());
- __ LoadContext(scratch, context_chain_length);
- return ContextOperand(scratch, var->index());
- } else {
- return StackOperand(var);
- }
-}
-
-
-void FullCodeGenerator::GetVar(Register dest, Variable* var) {
- DCHECK(var->IsContextSlot() || var->IsStackAllocated());
- MemOperand location = VarOperand(var, dest);
- __ mov(dest, location);
-}
-
-
-void FullCodeGenerator::SetVar(Variable* var,
- Register src,
- Register scratch0,
- Register scratch1) {
- DCHECK(var->IsContextSlot() || var->IsStackAllocated());
- DCHECK(!scratch0.is(src));
- DCHECK(!scratch0.is(scratch1));
- DCHECK(!scratch1.is(src));
- MemOperand location = VarOperand(var, scratch0);
- __ mov(location, src);
-
- // Emit the write barrier code if the location is in the heap.
- if (var->IsContextSlot()) {
- int offset = Context::SlotOffset(var->index());
- DCHECK(!scratch0.is(esi) && !src.is(esi) && !scratch1.is(esi));
- __ RecordWriteContextSlot(scratch0, offset, src, scratch1, kDontSaveFPRegs);
- }
-}
-
-
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
- bool should_normalize,
- Label* if_true,
- Label* if_false) {
- // Only prepare for bailouts before splits if we're in a test
- // context. Otherwise, we let the Visit function deal with the
- // preparation to avoid preparing with the same AST id twice.
- if (!context()->IsTest()) return;
-
- Label skip;
- if (should_normalize) __ jmp(&skip, Label::kNear);
- PrepareForBailout(expr, BailoutState::TOS_REGISTER);
- if (should_normalize) {
- __ cmp(eax, isolate()->factory()->true_value());
- Split(equal, if_true, if_false, NULL);
- __ bind(&skip);
- }
-}
-
-
-void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
- // The variable in the declaration always resides in the current context.
- DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (FLAG_debug_code) {
- // Check that we're not inside a with or catch context.
- __ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset));
- __ cmp(ebx, isolate()->factory()->with_context_map());
- __ Check(not_equal, kDeclarationInWithContext);
- __ cmp(ebx, isolate()->factory()->catch_context_map());
- __ Check(not_equal, kDeclarationInCatchContext);
- }
-}
-
-
-void FullCodeGenerator::VisitVariableDeclaration(
- VariableDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case VariableLocation::UNALLOCATED: {
- DCHECK(!variable->binding_needs_init());
- globals_->Add(variable->name(), zone());
- FeedbackSlot slot = proxy->VariableFeedbackSlot();
- DCHECK(!slot.IsInvalid());
- globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
- globals_->Add(isolate()->factory()->undefined_value(), zone());
- globals_->Add(isolate()->factory()->undefined_value(), zone());
- break;
- }
- case VariableLocation::PARAMETER:
- case VariableLocation::LOCAL:
- if (variable->binding_needs_init()) {
- Comment cmnt(masm_, "[ VariableDeclaration");
- __ mov(StackOperand(variable),
- Immediate(isolate()->factory()->the_hole_value()));
- }
- break;
-
- case VariableLocation::CONTEXT:
- if (variable->binding_needs_init()) {
- Comment cmnt(masm_, "[ VariableDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- __ mov(ContextOperand(esi, variable->index()),
- Immediate(isolate()->factory()->the_hole_value()));
- // No write barrier since the hole value is in old space.
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
- }
- break;
-
- case VariableLocation::LOOKUP:
- case VariableLocation::MODULE:
- UNREACHABLE();
- }
-}
-
-void FullCodeGenerator::VisitFunctionDeclaration(
- FunctionDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case VariableLocation::UNALLOCATED: {
- globals_->Add(variable->name(), zone());
- FeedbackSlot slot = proxy->VariableFeedbackSlot();
- DCHECK(!slot.IsInvalid());
- globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
-
- // We need the slot where the literals array lives, too.
- slot = declaration->fun()->LiteralFeedbackSlot();
- DCHECK(!slot.IsInvalid());
- globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
-
- Handle<SharedFunctionInfo> function =
- Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
- // Check for stack-overflow exception.
- if (function.is_null()) return SetStackOverflow();
- globals_->Add(function, zone());
- break;
- }
-
- case VariableLocation::PARAMETER:
- case VariableLocation::LOCAL: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- VisitForAccumulatorValue(declaration->fun());
- __ mov(StackOperand(variable), result_register());
- break;
- }
-
- case VariableLocation::CONTEXT: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- VisitForAccumulatorValue(declaration->fun());
- __ mov(ContextOperand(esi, variable->index()), result_register());
- // We know that we have written a function, which is not a smi.
- __ RecordWriteContextSlot(esi, Context::SlotOffset(variable->index()),
- result_register(), ecx, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
- break;
- }
-
- case VariableLocation::LOOKUP:
- case VariableLocation::MODULE:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- // Call the runtime to declare the globals.
- __ Push(pairs);
- __ Push(Smi::FromInt(DeclareGlobalsFlags()));
- __ EmitLoadFeedbackVector(eax);
- __ Push(eax);
- __ CallRuntime(Runtime::kDeclareGlobals);
- // Return value is ignored.
-}
-
-
-void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
- Comment cmnt(masm_, "[ SwitchStatement");
- Breakable nested_statement(this, stmt);
- SetStatementPosition(stmt);
-
- // Keep the switch value on the stack until a case matches.
- VisitForStackValue(stmt->tag());
- PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
-
- ZoneList<CaseClause*>* clauses = stmt->cases();
- CaseClause* default_clause = NULL; // Can occur anywhere in the list.
-
- Label next_test; // Recycled for each test.
- // Compile all the tests with branches to their bodies.
- for (int i = 0; i < clauses->length(); i++) {
- CaseClause* clause = clauses->at(i);
- clause->body_target()->Unuse();
-
- // The default is not a test, but remember it as final fall through.
- if (clause->is_default()) {
- default_clause = clause;
- continue;
- }
-
- Comment cmnt(masm_, "[ Case comparison");
- __ bind(&next_test);
- next_test.Unuse();
-
- // Compile the label expression.
- VisitForAccumulatorValue(clause->label());
-
- // Perform the comparison as if via '==='.
- __ mov(edx, Operand(esp, 0)); // Switch value.
- bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
- JumpPatchSite patch_site(masm_);
- if (inline_smi_code) {
- Label slow_case;
- __ mov(ecx, edx);
- __ or_(ecx, eax);
- patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
-
- __ cmp(edx, eax);
- __ j(not_equal, &next_test);
- __ Drop(1); // Switch value is no longer needed.
- __ jmp(clause->body_target());
- __ bind(&slow_case);
- }
-
- SetExpressionPosition(clause);
- Handle<Code> ic =
- CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
- CallIC(ic, clause->CompareId());
- patch_site.EmitPatchInfo();
-
- Label skip;
- __ jmp(&skip, Label::kNear);
- PrepareForBailout(clause, BailoutState::TOS_REGISTER);
- __ cmp(eax, isolate()->factory()->true_value());
- __ j(not_equal, &next_test);
- __ Drop(1);
- __ jmp(clause->body_target());
- __ bind(&skip);
-
- __ test(eax, eax);
- __ j(not_equal, &next_test);
- __ Drop(1); // Switch value is no longer needed.
- __ jmp(clause->body_target());
- }
-
- // Discard the test value and jump to the default if present, otherwise to
- // the end of the statement.
- __ bind(&next_test);
- DropOperands(1); // Switch value is no longer needed.
- if (default_clause == NULL) {
- __ jmp(nested_statement.break_label());
- } else {
- __ jmp(default_clause->body_target());
- }
-
- // Compile all the case bodies.
- for (int i = 0; i < clauses->length(); i++) {
- Comment cmnt(masm_, "[ Case body");
- CaseClause* clause = clauses->at(i);
- __ bind(clause->body_target());
- PrepareForBailoutForId(clause->EntryId(), BailoutState::NO_REGISTERS);
- VisitStatements(clause->statements());
- }
-
- __ bind(nested_statement.break_label());
- PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
- Comment cmnt(masm_, "[ ForInStatement");
- SetStatementPosition(stmt, SKIP_BREAK);
-
- FeedbackSlot slot = stmt->ForInFeedbackSlot();
-
- // Get the object to enumerate over.
- SetExpressionAsStatementPosition(stmt->enumerable());
- VisitForAccumulatorValue(stmt->enumerable());
- OperandStackDepthIncrement(5);
-
- Label loop, exit;
- Iteration loop_statement(this, stmt);
- increment_loop_depth();
-
- // If the object is null or undefined, skip over the loop, otherwise convert
- // it to a JS receiver. See ECMA-262 version 5, section 12.6.4.
- Label convert, done_convert;
- __ JumpIfSmi(eax, &convert, Label::kNear);
- __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
- __ j(above_equal, &done_convert, Label::kNear);
- __ cmp(eax, isolate()->factory()->undefined_value());
- __ j(equal, &exit);
- __ cmp(eax, isolate()->factory()->null_value());
- __ j(equal, &exit);
- __ bind(&convert);
- __ Call(isolate()->builtins()->ToObject(), RelocInfo::CODE_TARGET);
- RestoreContext();
- __ bind(&done_convert);
- PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
- __ push(eax);
-
- // Check cache validity in generated code. If we cannot guarantee cache
- // validity, call the runtime system to check cache validity or get the
- // property names in a fixed array. Note: Proxies never have an enum cache,
- // so will always take the slow path.
- Label call_runtime, use_cache, fixed_array;
- __ CheckEnumCache(&call_runtime);
-
- __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
- __ jmp(&use_cache, Label::kNear);
-
- // Get the set of properties to enumerate.
- __ bind(&call_runtime);
- __ push(eax);
- __ CallRuntime(Runtime::kForInEnumerate);
- PrepareForBailoutForId(stmt->EnumId(), BailoutState::TOS_REGISTER);
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- isolate()->factory()->meta_map());
- __ j(not_equal, &fixed_array);
-
-
- // We got a map in register eax. Get the enumeration cache from it.
- Label no_descriptors;
- __ bind(&use_cache);
-
- __ EnumLength(edx, eax);
- __ cmp(edx, Immediate(Smi::kZero));
- __ j(equal, &no_descriptors);
-
- __ LoadInstanceDescriptors(eax, ecx);
- __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeOffset));
- __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
-
- // Set up the four remaining stack slots.
- __ push(eax); // Map.
- __ push(ecx); // Enumeration cache.
- __ push(edx); // Number of valid entries for the map in the enum cache.
- __ push(Immediate(Smi::kZero)); // Initial index.
- __ jmp(&loop);
-
- __ bind(&no_descriptors);
- __ add(esp, Immediate(kPointerSize));
- __ jmp(&exit);
-
- // We got a fixed array in register eax. Iterate through that.
- __ bind(&fixed_array);
-
- __ push(Immediate(Smi::FromInt(1))); // Smi(1) indicates slow check
- __ push(eax); // Array
- __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
- __ push(eax); // Fixed array length (as smi).
- PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
- __ push(Immediate(Smi::kZero)); // Initial index.
-
- // Generate code for doing the condition check.
- __ bind(&loop);
- SetExpressionAsStatementPosition(stmt->each());
-
- __ mov(eax, Operand(esp, 0 * kPointerSize)); // Get the current index.
- __ cmp(eax, Operand(esp, 1 * kPointerSize)); // Compare to the array length.
- __ j(above_equal, loop_statement.break_label());
-
- // Get the current entry of the array into register eax.
- __ mov(ebx, Operand(esp, 2 * kPointerSize));
- __ mov(eax, FieldOperand(ebx, eax, times_2, FixedArray::kHeaderSize));
-
- // Get the expected map from the stack or a smi in the
- // permanent slow case into register edx.
- __ mov(edx, Operand(esp, 3 * kPointerSize));
-
- // Check if the expected map still matches that of the enumerable.
- // If not, we may have to filter the key.
- Label update_each;
- __ mov(ebx, Operand(esp, 4 * kPointerSize));
- __ cmp(edx, FieldOperand(ebx, HeapObject::kMapOffset));
- __ j(equal, &update_each, Label::kNear);
-
- // We need to filter the key, record slow-path here.
- int const vector_index = SmiFromSlot(slot)->value();
- __ EmitLoadFeedbackVector(edx);
- __ mov(FieldOperand(edx, FixedArray::OffsetOfElementAt(vector_index)),
- Immediate(FeedbackVector::MegamorphicSentinel(isolate())));
-
- // eax contains the key. The receiver in ebx is the second argument to the
- // ForInFilter. ForInFilter returns undefined if the receiver doesn't
- // have the key or returns the name-converted key.
- __ Call(isolate()->builtins()->ForInFilter(), RelocInfo::CODE_TARGET);
- RestoreContext();
- PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
- __ JumpIfRoot(result_register(), Heap::kUndefinedValueRootIndex,
- loop_statement.continue_label());
-
- // Update the 'each' property or variable from the possibly filtered
- // entry in register eax.
- __ bind(&update_each);
- // Perform the assignment as if via '='.
- { EffectContext context(this);
- EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
- PrepareForBailoutForId(stmt->AssignmentId(), BailoutState::NO_REGISTERS);
- }
-
- // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
- PrepareForBailoutForId(stmt->BodyId(), BailoutState::NO_REGISTERS);
- // Generate code for the body of the loop.
- Visit(stmt->body());
-
- // Generate code for going to the next element by incrementing the
- // index (smi) stored on top of the stack.
- __ bind(loop_statement.continue_label());
- PrepareForBailoutForId(stmt->IncrementId(), BailoutState::NO_REGISTERS);
- __ add(Operand(esp, 0 * kPointerSize), Immediate(Smi::FromInt(1)));
-
- EmitBackEdgeBookkeeping(stmt, &loop);
- __ jmp(&loop);
-
- // Remove the pointers stored on the stack.
- __ bind(loop_statement.break_label());
- DropOperands(5);
-
- // Exit and decrement the loop depth.
- PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
- __ bind(&exit);
- decrement_loop_depth();
-}
-
-void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
- FeedbackSlot slot) {
- DCHECK(NeedsHomeObject(initializer));
- __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
- __ mov(StoreDescriptor::ValueRegister(), Operand(esp, offset * kPointerSize));
- CallStoreIC(slot, isolate()->factory()->home_object_symbol());
-}
-
-void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
- int offset,
- FeedbackSlot slot) {
- DCHECK(NeedsHomeObject(initializer));
- __ mov(StoreDescriptor::ReceiverRegister(), eax);
- __ mov(StoreDescriptor::ValueRegister(), Operand(esp, offset * kPointerSize));
- CallStoreIC(slot, isolate()->factory()->home_object_symbol());
-}
-
-void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
- TypeofMode typeof_mode) {
- SetExpressionPosition(proxy);
- PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
- Variable* var = proxy->var();
-
- // Two cases: global variables and all other types of variables.
- switch (var->location()) {
- case VariableLocation::UNALLOCATED: {
- Comment cmnt(masm_, "[ Global variable");
- EmitGlobalVariableLoad(proxy, typeof_mode);
- context()->Plug(eax);
- break;
- }
-
- case VariableLocation::PARAMETER:
- case VariableLocation::LOCAL:
- case VariableLocation::CONTEXT: {
- DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_mode);
- Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
- : "[ Stack variable");
-
- if (proxy->hole_check_mode() == HoleCheckMode::kRequired) {
- // Throw a reference error when using an uninitialized let/const
- // binding in harmony mode.
- Label done;
- GetVar(eax, var);
- __ cmp(eax, isolate()->factory()->the_hole_value());
- __ j(not_equal, &done, Label::kNear);
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError);
- __ bind(&done);
- context()->Plug(eax);
- break;
- }
- context()->Plug(var);
- break;
- }
-
- case VariableLocation::LOOKUP:
- case VariableLocation::MODULE:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
- Expression* expression = (property == NULL) ? NULL : property->value();
- if (expression == NULL) {
- PushOperand(isolate()->factory()->null_value());
- } else {
- VisitForStackValue(expression);
- if (NeedsHomeObject(expression)) {
- DCHECK(property->kind() == ObjectLiteral::Property::GETTER ||
- property->kind() == ObjectLiteral::Property::SETTER);
- int offset = property->kind() == ObjectLiteral::Property::GETTER ? 2 : 3;
- EmitSetHomeObject(expression, offset, property->GetSlot());
- }
- }
-}
-
-
-void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
- Comment cmnt(masm_, "[ ObjectLiteral");
-
- Handle<BoilerplateDescription> constant_properties =
- expr->GetOrBuildConstantProperties(isolate());
- int flags = expr->ComputeFlags();
- // If any of the keys would store to the elements array, then we shouldn't
- // allow it.
- if (MustCreateObjectLiteralWithRuntime(expr)) {
- __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(constant_properties));
- __ push(Immediate(Smi::FromInt(flags)));
- __ CallRuntime(Runtime::kCreateObjectLiteral);
- } else {
- __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
- __ mov(ecx, Immediate(constant_properties));
- __ mov(edx, Immediate(Smi::FromInt(flags)));
- Callable callable = CodeFactory::FastCloneShallowObject(isolate());
- __ Call(callable.code(), RelocInfo::CODE_TARGET);
- RestoreContext();
- }
- PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
-
- // If result_saved is true the result is on top of the stack. If
- // result_saved is false the result is in eax.
- bool result_saved = false;
-
- AccessorTable accessor_table(zone());
- for (int i = 0; i < expr->properties()->length(); i++) {
- ObjectLiteral::Property* property = expr->properties()->at(i);
- DCHECK(!property->is_computed_name());
- if (property->IsCompileTimeValue()) continue;
-
- Literal* key = property->key()->AsLiteral();
- Expression* value = property->value();
- if (!result_saved) {
- PushOperand(eax); // Save result on the stack
- result_saved = true;
- }
- switch (property->kind()) {
- case ObjectLiteral::Property::SPREAD:
- case ObjectLiteral::Property::CONSTANT:
- UNREACHABLE();
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- DCHECK(!CompileTimeValue::IsCompileTimeValue(value));
- // Fall through.
- case ObjectLiteral::Property::COMPUTED:
- // It is safe to use [[Put]] here because the boilerplate already
- // contains computed properties with an uninitialized value.
- if (key->IsStringLiteral()) {
- DCHECK(key->IsPropertyName());
- if (property->emit_store()) {
- VisitForAccumulatorValue(value);
- DCHECK(StoreDescriptor::ValueRegister().is(eax));
- __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
- CallStoreIC(property->GetSlot(0), key->value(), kStoreOwn);
- PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
- if (NeedsHomeObject(value)) {
- EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
- }
- } else {
- VisitForEffect(value);
- }
- break;
- }
- PushOperand(Operand(esp, 0)); // Duplicate receiver.
- VisitForStackValue(key);
- VisitForStackValue(value);
- if (property->emit_store()) {
- if (NeedsHomeObject(value)) {
- EmitSetHomeObject(value, 2, property->GetSlot());
- }
- PushOperand(Smi::FromInt(SLOPPY)); // Language mode
- CallRuntimeWithOperands(Runtime::kSetProperty);
- } else {
- DropOperands(3);
- }
- break;
- case ObjectLiteral::Property::PROTOTYPE:
- PushOperand(Operand(esp, 0)); // Duplicate receiver.
- VisitForStackValue(value);
- DCHECK(property->emit_store());
- CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
- PrepareForBailoutForId(expr->GetIdForPropertySet(i),
- BailoutState::NO_REGISTERS);
- break;
- case ObjectLiteral::Property::GETTER:
- if (property->emit_store()) {
- AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(i);
- it->second->getter = property;
- }
- break;
- case ObjectLiteral::Property::SETTER:
- if (property->emit_store()) {
- AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(i);
- it->second->setter = property;
- }
- break;
- }
- }
-
- // Emit code to define accessors, using only a single call to the runtime for
- // each pair of corresponding getters and setters.
- for (AccessorTable::Iterator it = accessor_table.begin();
- it != accessor_table.end();
- ++it) {
- PushOperand(Operand(esp, 0)); // Duplicate receiver.
- VisitForStackValue(it->first);
-
- EmitAccessor(it->second->getter);
- EmitAccessor(it->second->setter);
-
- PushOperand(Smi::FromInt(NONE));
- CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
- PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
- }
-
- if (result_saved) {
- context()->PlugTOS();
- } else {
- context()->Plug(eax);
- }
-}
-
-
-void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
- Comment cmnt(masm_, "[ ArrayLiteral");
-
- Handle<ConstantElementsPair> constant_elements =
- expr->GetOrBuildConstantElements(isolate());
-
- if (MustCreateArrayLiteralWithRuntime(expr)) {
- __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(constant_elements));
- __ push(Immediate(Smi::FromInt(expr->ComputeFlags())));
- __ CallRuntime(Runtime::kCreateArrayLiteral);
- } else {
- __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
- __ mov(ecx, Immediate(constant_elements));
- Callable callable =
- CodeFactory::FastCloneShallowArray(isolate(), TRACK_ALLOCATION_SITE);
- __ Call(callable.code(), RelocInfo::CODE_TARGET);
- RestoreContext();
- }
- PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
-
- bool result_saved = false; // Is the result saved to the stack?
- ZoneList<Expression*>* subexprs = expr->values();
- int length = subexprs->length();
-
- // Emit code to evaluate all the non-constant subexpressions and to store
- // them into the newly cloned array.
- for (int array_index = 0; array_index < length; array_index++) {
- Expression* subexpr = subexprs->at(array_index);
- DCHECK(!subexpr->IsSpread());
-
- // If the subexpression is a literal or a simple materialized literal it
- // is already set in the cloned array.
- if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
-
- if (!result_saved) {
- PushOperand(eax); // array literal.
- result_saved = true;
- }
- VisitForAccumulatorValue(subexpr);
-
- __ mov(StoreDescriptor::NameRegister(),
- Immediate(Smi::FromInt(array_index)));
- __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
- CallKeyedStoreIC(expr->LiteralFeedbackSlot());
- PrepareForBailoutForId(expr->GetIdForElement(array_index),
- BailoutState::NO_REGISTERS);
- }
-
- if (result_saved) {
- context()->PlugTOS();
- } else {
- context()->Plug(eax);
- }
-}
-
-
-void FullCodeGenerator::VisitAssignment(Assignment* expr) {
- DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
-
- Comment cmnt(masm_, "[ Assignment");
-
- Property* property = expr->target()->AsProperty();
- LhsKind assign_type = Property::GetAssignType(property);
-
- // Evaluate LHS expression.
- switch (assign_type) {
- case VARIABLE:
- // Nothing to do here.
- break;
- case NAMED_PROPERTY:
- if (expr->is_compound()) {
- // We need the receiver both on the stack and in the register.
- VisitForStackValue(property->obj());
- __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
- } else {
- VisitForStackValue(property->obj());
- }
- break;
- case KEYED_PROPERTY: {
- if (expr->is_compound()) {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, kPointerSize));
- __ mov(LoadDescriptor::NameRegister(), Operand(esp, 0));
- } else {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- }
- break;
- }
- case NAMED_SUPER_PROPERTY:
- case KEYED_SUPER_PROPERTY:
- UNREACHABLE();
- break;
- }
-
- // For compound assignments we need another deoptimization point after the
- // variable/property load.
- if (expr->is_compound()) {
- AccumulatorValueContext result_context(this);
- { AccumulatorValueContext left_operand_context(this);
- switch (assign_type) {
- case VARIABLE:
- EmitVariableLoad(expr->target()->AsVariableProxy());
- PrepareForBailout(expr->target(), BailoutState::TOS_REGISTER);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
- break;
- case NAMED_SUPER_PROPERTY:
- case KEYED_SUPER_PROPERTY:
- UNREACHABLE();
- break;
- }
- }
-
- Token::Value op = expr->binary_op();
- PushOperand(eax); // Left operand goes on the stack.
- VisitForAccumulatorValue(expr->value());
-
- if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr->binary_operation(),
- op,
- expr->target(),
- expr->value());
- } else {
- EmitBinaryOp(expr->binary_operation(), op);
- }
-
- // Deoptimization point in case the binary operation may have side effects.
- PrepareForBailout(expr->binary_operation(), BailoutState::TOS_REGISTER);
- } else {
- VisitForAccumulatorValue(expr->value());
- }
-
- SetExpressionPosition(expr);
-
- // Store the value.
- switch (assign_type) {
- case VARIABLE: {
- VariableProxy* proxy = expr->target()->AsVariableProxy();
- EmitVariableAssignment(proxy->var(), expr->op(), expr->AssignmentSlot(),
- proxy->hole_check_mode());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
- context()->Plug(eax);
- break;
- }
- case NAMED_PROPERTY:
- EmitNamedPropertyAssignment(expr);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyAssignment(expr);
- break;
- case NAMED_SUPER_PROPERTY:
- case KEYED_SUPER_PROPERTY:
- UNREACHABLE();
- break;
- }
-}
-
-void FullCodeGenerator::VisitSuspend(Suspend* expr) {
- // Resumable functions are not supported.
- UNREACHABLE();
-}
-
-void FullCodeGenerator::PushOperand(MemOperand operand) {
- OperandStackDepthIncrement(1);
- __ Push(operand);
-}
-
-void FullCodeGenerator::EmitOperandStackDepthCheck() {
- if (FLAG_debug_code) {
- int expected_diff = StandardFrameConstants::kFixedFrameSizeFromFp +
- operand_stack_depth_ * kPointerSize;
- __ mov(eax, ebp);
- __ sub(eax, esp);
- __ cmp(eax, Immediate(expected_diff));
- __ Assert(equal, kUnexpectedStackDepth);
- }
-}
-
-void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
- Label allocate, done_allocate;
-
- __ Allocate(JSIteratorResult::kSize, eax, ecx, edx, &allocate,
- NO_ALLOCATION_FLAGS);
- __ jmp(&done_allocate, Label::kNear);
-
- __ bind(&allocate);
- __ Push(Smi::FromInt(JSIteratorResult::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace);
-
- __ bind(&done_allocate);
- __ mov(ebx, NativeContextOperand());
- __ mov(ebx, ContextOperand(ebx, Context::ITERATOR_RESULT_MAP_INDEX));
- __ mov(FieldOperand(eax, HeapObject::kMapOffset), ebx);
- __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
- isolate()->factory()->empty_fixed_array());
- __ mov(FieldOperand(eax, JSObject::kElementsOffset),
- isolate()->factory()->empty_fixed_array());
- __ pop(FieldOperand(eax, JSIteratorResult::kValueOffset));
- __ mov(FieldOperand(eax, JSIteratorResult::kDoneOffset),
- isolate()->factory()->ToBoolean(done));
- STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
- OperandStackDepthDecrement(1);
-}
-
-
-void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
- Token::Value op,
- Expression* left,
- Expression* right) {
- // Do combined smi check of the operands. Left operand is on the
- // stack. Right operand is in eax.
- Label smi_case, done, stub_call;
- PopOperand(edx);
- __ mov(ecx, eax);
- __ or_(eax, edx);
- JumpPatchSite patch_site(masm_);
- patch_site.EmitJumpIfSmi(eax, &smi_case, Label::kNear);
-
- __ bind(&stub_call);
- __ mov(eax, ecx);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
- CallIC(code, expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
- __ jmp(&done, Label::kNear);
-
- // Smi case.
- __ bind(&smi_case);
- __ mov(eax, edx); // Copy left operand in case of a stub call.
-
- switch (op) {
- case Token::SAR:
- __ SmiUntag(ecx);
- __ sar_cl(eax); // No checks of result necessary
- __ and_(eax, Immediate(~kSmiTagMask));
- break;
- case Token::SHL: {
- Label result_ok;
- __ SmiUntag(eax);
- __ SmiUntag(ecx);
- __ shl_cl(eax);
- // Check that the *signed* result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(positive, &result_ok);
- __ SmiTag(ecx);
- __ jmp(&stub_call);
- __ bind(&result_ok);
- __ SmiTag(eax);
- break;
- }
- case Token::SHR: {
- Label result_ok;
- __ SmiUntag(eax);
- __ SmiUntag(ecx);
- __ shr_cl(eax);
- __ test(eax, Immediate(0xc0000000));
- __ j(zero, &result_ok);
- __ SmiTag(ecx);
- __ jmp(&stub_call);
- __ bind(&result_ok);
- __ SmiTag(eax);
- break;
- }
- case Token::ADD:
- __ add(eax, ecx);
- __ j(overflow, &stub_call);
- break;
- case Token::SUB:
- __ sub(eax, ecx);
- __ j(overflow, &stub_call);
- break;
- case Token::MUL: {
- __ SmiUntag(eax);
- __ imul(eax, ecx);
- __ j(overflow, &stub_call);
- __ test(eax, eax);
- __ j(not_zero, &done, Label::kNear);
- __ mov(ebx, edx);
- __ or_(ebx, ecx);
- __ j(negative, &stub_call);
- break;
- }
- case Token::BIT_OR:
- __ or_(eax, ecx);
- break;
- case Token::BIT_AND:
- __ and_(eax, ecx);
- break;
- case Token::BIT_XOR:
- __ xor_(eax, ecx);
- break;
- default:
- UNREACHABLE();
- }
-
- __ bind(&done);
- context()->Plug(eax);
-}
-
-void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
- PopOperand(edx);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
- JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(code, expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
- context()->Plug(eax);
-}
-
-void FullCodeGenerator::EmitAssignment(Expression* expr, FeedbackSlot slot) {
- DCHECK(expr->IsValidReferenceExpressionOrThis());
-
- Property* prop = expr->AsProperty();
- LhsKind assign_type = Property::GetAssignType(prop);
-
- switch (assign_type) {
- case VARIABLE: {
- VariableProxy* proxy = expr->AsVariableProxy();
- EffectContext context(this);
- EmitVariableAssignment(proxy->var(), Token::ASSIGN, slot,
- proxy->hole_check_mode());
- break;
- }
- case NAMED_PROPERTY: {
- PushOperand(eax); // Preserve value.
- VisitForAccumulatorValue(prop->obj());
- __ Move(StoreDescriptor::ReceiverRegister(), eax);
- PopOperand(StoreDescriptor::ValueRegister()); // Restore value.
- CallStoreIC(slot, prop->key()->AsLiteral()->value());
- break;
- }
- case KEYED_PROPERTY: {
- PushOperand(eax); // Preserve value.
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- __ Move(StoreDescriptor::NameRegister(), eax);
- PopOperand(StoreDescriptor::ReceiverRegister()); // Receiver.
- PopOperand(StoreDescriptor::ValueRegister()); // Restore value.
- CallKeyedStoreIC(slot);
- break;
- }
- case NAMED_SUPER_PROPERTY:
- case KEYED_SUPER_PROPERTY:
- UNREACHABLE();
- break;
- }
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
- Variable* var, MemOperand location) {
- __ mov(location, eax);
- if (var->IsContextSlot()) {
- __ mov(edx, eax);
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
- }
-}
-
-void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackSlot slot,
- HoleCheckMode hole_check_mode) {
- if (var->IsUnallocated()) {
- // Global var, const, or let.
- __ mov(StoreDescriptor::ReceiverRegister(), NativeContextOperand());
- __ mov(StoreDescriptor::ReceiverRegister(),
- ContextOperand(StoreDescriptor::ReceiverRegister(),
- Context::EXTENSION_INDEX));
- CallStoreIC(slot, var->name(), kStoreGlobal);
-
- } else if (IsLexicalVariableMode(var->mode()) && op != Token::INIT) {
- DCHECK(!var->IsLookupSlot());
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- MemOperand location = VarOperand(var, ecx);
- // Perform an initialization check for lexically declared variables.
- if (hole_check_mode == HoleCheckMode::kRequired) {
- Label assign;
- __ mov(edx, location);
- __ cmp(edx, isolate()->factory()->the_hole_value());
- __ j(not_equal, &assign, Label::kNear);
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError);
- __ bind(&assign);
- }
- if (var->mode() != CONST) {
- EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (var->throw_on_const_assignment(language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError);
- }
- } else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
- // Initializing assignment to const {this} needs a write barrier.
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- Label uninitialized_this;
- MemOperand location = VarOperand(var, ecx);
- __ mov(edx, location);
- __ cmp(edx, isolate()->factory()->the_hole_value());
- __ j(equal, &uninitialized_this);
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError);
- __ bind(&uninitialized_this);
- EmitStoreToStackLocalOrContextSlot(var, location);
-
- } else {
- DCHECK(var->mode() != CONST || op == Token::INIT);
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- DCHECK(!var->IsLookupSlot());
- // Assignment to var or initializing assignment to let/const in harmony
- // mode.
- MemOperand location = VarOperand(var, ecx);
- EmitStoreToStackLocalOrContextSlot(var, location);
- }
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
- // Assignment to a property, using a named store IC.
- // eax : value
- // esp[0] : receiver
- Property* prop = expr->target()->AsProperty();
- DCHECK(prop != NULL);
- DCHECK(prop->key()->IsLiteral());
-
- PopOperand(StoreDescriptor::ReceiverRegister());
- CallStoreIC(expr->AssignmentSlot(), prop->key()->AsLiteral()->value());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
- // Assignment to a property, using a keyed store IC.
- // eax : value
- // esp[0] : key
- // esp[kPointerSize] : receiver
-
- PopOperand(StoreDescriptor::NameRegister()); // Key.
- PopOperand(StoreDescriptor::ReceiverRegister());
- DCHECK(StoreDescriptor::ValueRegister().is(eax));
- CallKeyedStoreIC(expr->AssignmentSlot());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
- context()->Plug(eax);
-}
-
-// Code common for calls using the IC.
-void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
- Expression* callee = expr->expression();
-
- // Get the target function.
- ConvertReceiverMode convert_mode;
- if (callee->IsVariableProxy()) {
- { StackValueContext context(this);
- EmitVariableLoad(callee->AsVariableProxy());
- PrepareForBailout(callee, BailoutState::NO_REGISTERS);
- }
- // Push undefined as receiver. This is patched in the method prologue if it
- // is a sloppy mode method.
- PushOperand(isolate()->factory()->undefined_value());
- convert_mode = ConvertReceiverMode::kNullOrUndefined;
- } else {
- // Load the function from the receiver.
- DCHECK(callee->IsProperty());
- DCHECK(!callee->AsProperty()->IsSuperAccess());
- __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
- EmitNamedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(),
- BailoutState::TOS_REGISTER);
- // Push the target function under the receiver.
- PushOperand(Operand(esp, 0));
- __ mov(Operand(esp, kPointerSize), eax);
- convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
- }
-
- EmitCall(expr, convert_mode);
-}
-
-
-// Code common for calls using the IC.
-void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
- Expression* key) {
- // Load the key.
- VisitForAccumulatorValue(key);
-
- Expression* callee = expr->expression();
-
- // Load the function from the receiver.
- DCHECK(callee->IsProperty());
- __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
- __ mov(LoadDescriptor::NameRegister(), eax);
- EmitKeyedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(),
- BailoutState::TOS_REGISTER);
-
- // Push the target function under the receiver.
- PushOperand(Operand(esp, 0));
- __ mov(Operand(esp, kPointerSize), eax);
-
- EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
-}
-
-
-void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
- // Load the arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
- SetCallPosition(expr, expr->tail_call_mode());
- if (expr->tail_call_mode() == TailCallMode::kAllow) {
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceTailCall);
- }
- // Update profiling counters before the tail call since we will
- // not return to this function.
- EmitProfilingCounterHandlingForReturnSequence(true);
- }
- Handle<Code> code =
- CodeFactory::CallICTrampoline(isolate(), mode, expr->tail_call_mode())
- .code();
- __ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackICSlot())));
- __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
- __ Move(eax, Immediate(arg_count));
- CallIC(code);
- OperandStackDepthDecrement(arg_count + 1);
-
- RecordJSReturnSite(expr);
- RestoreContext();
- context()->DropAndPlug(1, eax);
-}
-
-void FullCodeGenerator::VisitCallNew(CallNew* expr) {
- Comment cmnt(masm_, "[ CallNew");
- // According to ECMA-262, section 11.2.2, page 44, the function
- // expression in new calls must be evaluated before the
- // arguments.
-
- // Push constructor on the stack. If it's not a function it's used as
- // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
- // ignored.
- DCHECK(!expr->expression()->IsSuperPropertyReference());
- VisitForStackValue(expr->expression());
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- SetConstructCallPosition(expr);
-
- // Load function and argument count into edi and eax.
- __ Move(eax, Immediate(arg_count));
- __ mov(edi, Operand(esp, arg_count * kPointerSize));
-
- // Record call targets in unoptimized code.
- __ EmitLoadFeedbackVector(ebx);
- __ mov(edx, Immediate(SmiFromSlot(expr->CallNewFeedbackSlot())));
-
- CallConstructStub stub(isolate());
- CallIC(stub.GetCode());
- OperandStackDepthDecrement(arg_count + 1);
- PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
- RestoreContext();
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ test(eax, Immediate(kSmiTagMask));
- Split(zero, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ebx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(above_equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsTypedArray(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- __ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, JS_TYPED_ARRAY_TYPE, ebx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- __ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, JS_PROXY_TYPE, ebx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
- Label done, null, function, non_function_constructor;
-
- VisitForAccumulatorValue(args->at(0));
-
- // If the object is not a JSReceiver, we return null.
- __ JumpIfSmi(eax, &null, Label::kNear);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, eax);
- __ j(below, &null, Label::kNear);
-
- // Return 'Function' for JSFunction and JSBoundFunction objects.
- __ CmpInstanceType(eax, FIRST_FUNCTION_TYPE);
- STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
- __ j(above_equal, &function, Label::kNear);
-
- // Check if the constructor in the map is a JS function.
- __ GetMapConstructor(eax, eax, ebx);
- __ CmpInstanceType(ebx, JS_FUNCTION_TYPE);
- __ j(not_equal, &non_function_constructor, Label::kNear);
-
- // eax now contains the constructor function. Grab the
- // instance class name from there.
- __ mov(eax, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
- __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kInstanceClassNameOffset));
- __ jmp(&done, Label::kNear);
-
- // Non-JS objects have class null.
- __ bind(&null);
- __ mov(eax, isolate()->factory()->null_value());
- __ jmp(&done, Label::kNear);
-
- // Functions have class 'Function'.
- __ bind(&function);
- __ mov(eax, isolate()->factory()->Function_string());
- __ jmp(&done, Label::kNear);
-
- // Objects with a non-function constructor have class 'Object'.
- __ bind(&non_function_constructor);
- __ mov(eax, isolate()->factory()->Object_string());
-
- // All done.
- __ bind(&done);
-
- context()->Plug(eax);
-}
-
-void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Register object = ebx;
- Register index = eax;
- Register result = edx;
-
- PopOperand(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharCodeAtGenerator generator(object, index, result, &need_conversion,
- &need_conversion, &index_out_of_range);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // NaN.
- __ Move(result, Immediate(isolate()->factory()->nan_value()));
- __ jmp(&done);
-
- __ bind(&need_conversion);
- // Move the undefined value into the result register, which will
- // trigger conversion.
- __ Move(result, Immediate(isolate()->factory()->undefined_value()));
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
-
- __ bind(&done);
- context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitCall(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_LE(2, args->length());
- // Push target, receiver and arguments onto the stack.
- for (Expression* const arg : *args) {
- VisitForStackValue(arg);
- }
- PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
- // Move target to edi.
- int const argc = args->length() - 2;
- __ mov(edi, Operand(esp, (argc + 1) * kPointerSize));
- // Call the target.
- __ mov(eax, Immediate(argc));
- __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
- OperandStackDepthDecrement(argc + 1);
- RestoreContext();
- // Discard the function left on TOS.
- context()->DropAndPlug(1, eax);
-}
-
-void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
- VisitForAccumulatorValue(args->at(0));
- __ AssertFunction(eax);
- __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
- __ mov(eax, FieldOperand(eax, Map::kPrototypeOffset));
- context()->Plug(eax);
-}
-
-void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
- DCHECK(expr->arguments()->length() == 0);
- ExternalReference debug_is_active =
- ExternalReference::debug_is_active_address(isolate());
- __ movzx_b(eax, Operand::StaticVariable(debug_is_active));
- __ SmiTag(eax);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- Label runtime, done;
-
- __ Allocate(JSIteratorResult::kSize, eax, ecx, edx, &runtime,
- NO_ALLOCATION_FLAGS);
- __ mov(ebx, NativeContextOperand());
- __ mov(ebx, ContextOperand(ebx, Context::ITERATOR_RESULT_MAP_INDEX));
- __ mov(FieldOperand(eax, HeapObject::kMapOffset), ebx);
- __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
- isolate()->factory()->empty_fixed_array());
- __ mov(FieldOperand(eax, JSObject::kElementsOffset),
- isolate()->factory()->empty_fixed_array());
- __ pop(FieldOperand(eax, JSIteratorResult::kDoneOffset));
- __ pop(FieldOperand(eax, JSIteratorResult::kValueOffset));
- STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
- __ jmp(&done, Label::kNear);
-
- __ bind(&runtime);
- CallRuntimeWithOperands(Runtime::kCreateIterResultObject);
-
- __ bind(&done);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
- // Push function.
- __ LoadGlobalFunction(expr->context_index(), eax);
- PushOperand(eax);
-
- // Push undefined as receiver.
- PushOperand(isolate()->factory()->undefined_value());
-}
-
-
-void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- SetCallPosition(expr);
- __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
- __ Set(eax, arg_count);
- __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
- RelocInfo::CODE_TARGET);
- OperandStackDepthDecrement(arg_count + 1);
- RestoreContext();
-}
-
-
-void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
- switch (expr->op()) {
- case Token::DELETE: {
- Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
- Property* property = expr->expression()->AsProperty();
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
-
- if (property != NULL) {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- PushOperand(Smi::FromInt(language_mode()));
- CallRuntimeWithOperands(Runtime::kDeleteProperty);
- context()->Plug(eax);
- } else if (proxy != NULL) {
- Variable* var = proxy->var();
- // Delete of an unqualified identifier is disallowed in strict mode but
- // "delete this" is allowed.
- bool is_this = var->is_this();
- DCHECK(is_sloppy(language_mode()) || is_this);
- if (var->IsUnallocated()) {
- __ mov(eax, NativeContextOperand());
- __ push(ContextOperand(eax, Context::EXTENSION_INDEX));
- __ push(Immediate(var->name()));
- __ Push(Smi::FromInt(SLOPPY));
- __ CallRuntime(Runtime::kDeleteProperty);
- context()->Plug(eax);
- } else {
- DCHECK(!var->IsLookupSlot());
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- // Result of deleting non-global variables is false. 'this' is
- // not really a variable, though we implement it as one. The
- // subexpression does not have side effects.
- context()->Plug(is_this);
- }
- } else {
- // Result of deleting non-property, non-variable reference is true.
- // The subexpression may have side effects.
- VisitForEffect(expr->expression());
- context()->Plug(true);
- }
- break;
- }
-
- case Token::VOID: {
- Comment cmnt(masm_, "[ UnaryOperation (VOID)");
- VisitForEffect(expr->expression());
- context()->Plug(isolate()->factory()->undefined_value());
- break;
- }
-
- case Token::NOT: {
- Comment cmnt(masm_, "[ UnaryOperation (NOT)");
- if (context()->IsEffect()) {
- // Unary NOT has no side effects so it's only necessary to visit the
- // subexpression. Match the optimizing compiler by not branching.
- VisitForEffect(expr->expression());
- } else if (context()->IsTest()) {
- const TestContext* test = TestContext::cast(context());
- // The labels are swapped for the recursive call.
- VisitForControl(expr->expression(),
- test->false_label(),
- test->true_label(),
- test->fall_through());
- context()->Plug(test->true_label(), test->false_label());
- } else {
- // We handle value contexts explicitly rather than simply visiting
- // for control and plugging the control flow into the context,
- // because we need to prepare a pair of extra administrative AST ids
- // for the optimizing compiler.
- DCHECK(context()->IsAccumulatorValue() || context()->IsStackValue());
- Label materialize_true, materialize_false, done;
- VisitForControl(expr->expression(),
- &materialize_false,
- &materialize_true,
- &materialize_true);
- if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
- __ bind(&materialize_true);
- PrepareForBailoutForId(expr->MaterializeTrueId(),
- BailoutState::NO_REGISTERS);
- if (context()->IsAccumulatorValue()) {
- __ mov(eax, isolate()->factory()->true_value());
- } else {
- __ Push(isolate()->factory()->true_value());
- }
- __ jmp(&done, Label::kNear);
- __ bind(&materialize_false);
- PrepareForBailoutForId(expr->MaterializeFalseId(),
- BailoutState::NO_REGISTERS);
- if (context()->IsAccumulatorValue()) {
- __ mov(eax, isolate()->factory()->false_value());
- } else {
- __ Push(isolate()->factory()->false_value());
- }
- __ bind(&done);
- }
- break;
- }
-
- case Token::TYPEOF: {
- Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
- {
- AccumulatorValueContext context(this);
- VisitForTypeofValue(expr->expression());
- }
- __ mov(ebx, eax);
- __ Call(isolate()->builtins()->Typeof(), RelocInfo::CODE_TARGET);
- context()->Plug(eax);
- break;
- }
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
- DCHECK(expr->expression()->IsValidReferenceExpressionOrThis());
-
- Comment cmnt(masm_, "[ CountOperation");
-
- Property* prop = expr->expression()->AsProperty();
- LhsKind assign_type = Property::GetAssignType(prop);
-
- // Evaluate expression and get value.
- if (assign_type == VARIABLE) {
- DCHECK(expr->expression()->AsVariableProxy()->var() != NULL);
- AccumulatorValueContext context(this);
- EmitVariableLoad(expr->expression()->AsVariableProxy());
- } else {
- // Reserve space for result of postfix operation.
- if (expr->is_postfix() && !context()->IsEffect()) {
- PushOperand(Smi::kZero);
- }
- switch (assign_type) {
- case NAMED_PROPERTY: {
- // Put the object both on the stack and in the register.
- VisitForStackValue(prop->obj());
- __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
- EmitNamedPropertyLoad(prop);
- break;
- }
-
- case KEYED_PROPERTY: {
- VisitForStackValue(prop->obj());
- VisitForStackValue(prop->key());
- __ mov(LoadDescriptor::ReceiverRegister(),
- Operand(esp, kPointerSize)); // Object.
- __ mov(LoadDescriptor::NameRegister(), Operand(esp, 0)); // Key.
- EmitKeyedPropertyLoad(prop);
- break;
- }
-
- case NAMED_SUPER_PROPERTY:
- case KEYED_SUPER_PROPERTY:
- case VARIABLE:
- UNREACHABLE();
- }
- }
-
- // We need a second deoptimization point after loading the value
- // in case evaluating the property load my have a side effect.
- if (assign_type == VARIABLE) {
- PrepareForBailout(expr->expression(), BailoutState::TOS_REGISTER);
- } else {
- PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
- }
-
- // Inline smi case if we are in a loop.
- Label done, stub_call;
- JumpPatchSite patch_site(masm_);
- if (ShouldInlineSmiCase(expr->op())) {
- Label slow;
- patch_site.EmitJumpIfNotSmi(eax, &slow, Label::kNear);
-
- // Save result for postfix expressions.
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- // Save the result on the stack. If we have a named or keyed property
- // we store the result under the receiver that is currently on top
- // of the stack.
- switch (assign_type) {
- case VARIABLE:
- __ push(eax);
- break;
- case NAMED_PROPERTY:
- __ mov(Operand(esp, kPointerSize), eax);
- break;
- case KEYED_PROPERTY:
- __ mov(Operand(esp, 2 * kPointerSize), eax);
- break;
- case NAMED_SUPER_PROPERTY:
- case KEYED_SUPER_PROPERTY:
- UNREACHABLE();
- break;
- }
- }
- }
-
- if (expr->op() == Token::INC) {
- __ add(eax, Immediate(Smi::FromInt(1)));
- } else {
- __ sub(eax, Immediate(Smi::FromInt(1)));
- }
- __ j(no_overflow, &done, Label::kNear);
- // Call stub. Undo operation first.
- if (expr->op() == Token::INC) {
- __ sub(eax, Immediate(Smi::FromInt(1)));
- } else {
- __ add(eax, Immediate(Smi::FromInt(1)));
- }
- __ jmp(&stub_call, Label::kNear);
- __ bind(&slow);
- }
-
- // Convert old value into a number.
- __ Call(isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
- RestoreContext();
- PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
-
- // Save result for postfix expressions.
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- // Save the result on the stack. If we have a named or keyed property
- // we store the result under the receiver that is currently on top
- // of the stack.
- switch (assign_type) {
- case VARIABLE:
- PushOperand(eax);
- break;
- case NAMED_PROPERTY:
- __ mov(Operand(esp, kPointerSize), eax);
- break;
- case KEYED_PROPERTY:
- __ mov(Operand(esp, 2 * kPointerSize), eax);
- break;
- case NAMED_SUPER_PROPERTY:
- case KEYED_SUPER_PROPERTY:
- UNREACHABLE();
- break;
- }
- }
- }
-
- SetExpressionPosition(expr);
-
- // Call stub for +1/-1.
- __ bind(&stub_call);
- __ mov(edx, eax);
- __ mov(eax, Immediate(Smi::FromInt(1)));
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), expr->binary_op()).code();
- CallIC(code, expr->CountBinOpFeedbackId());
- patch_site.EmitPatchInfo();
- __ bind(&done);
-
- // Store the value returned in eax.
- switch (assign_type) {
- case VARIABLE: {
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
- if (expr->is_postfix()) {
- // Perform the assignment as if via '='.
- { EffectContext context(this);
- EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
- proxy->hole_check_mode());
- PrepareForBailoutForId(expr->AssignmentId(),
- BailoutState::TOS_REGISTER);
- context.Plug(eax);
- }
- // For all contexts except EffectContext We have the result on
- // top of the stack.
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- // Perform the assignment as if via '='.
- EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
- proxy->hole_check_mode());
- PrepareForBailoutForId(expr->AssignmentId(),
- BailoutState::TOS_REGISTER);
- context()->Plug(eax);
- }
- break;
- }
- case NAMED_PROPERTY: {
- PopOperand(StoreDescriptor::ReceiverRegister());
- CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(eax);
- }
- break;
- }
- case KEYED_PROPERTY: {
- PopOperand(StoreDescriptor::NameRegister());
- PopOperand(StoreDescriptor::ReceiverRegister());
- CallKeyedStoreIC(expr->CountSlot());
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
- if (expr->is_postfix()) {
- // Result is on the stack
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(eax);
- }
- break;
- }
- case NAMED_SUPER_PROPERTY:
- case KEYED_SUPER_PROPERTY:
- UNREACHABLE();
- break;
- }
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
- Expression* sub_expr,
- Handle<String> check) {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- { AccumulatorValueContext context(this);
- VisitForTypeofValue(sub_expr);
- }
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-
- Factory* factory = isolate()->factory();
- if (String::Equals(check, factory->number_string())) {
- __ JumpIfSmi(eax, if_true);
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- isolate()->factory()->heap_number_map());
- Split(equal, if_true, if_false, fall_through);
- } else if (String::Equals(check, factory->string_string())) {
- __ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx);
- Split(below, if_true, if_false, fall_through);
- } else if (String::Equals(check, factory->symbol_string())) {
- __ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, SYMBOL_TYPE, edx);
- Split(equal, if_true, if_false, fall_through);
- } else if (String::Equals(check, factory->boolean_string())) {
- __ cmp(eax, isolate()->factory()->true_value());
- __ j(equal, if_true);
- __ cmp(eax, isolate()->factory()->false_value());
- Split(equal, if_true, if_false, fall_through);
- } else if (String::Equals(check, factory->undefined_string())) {
- __ cmp(eax, isolate()->factory()->null_value());
- __ j(equal, if_false);
- __ JumpIfSmi(eax, if_false);
- // Check for undetectable objects => true.
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- Split(not_zero, if_true, if_false, fall_through);
- } else if (String::Equals(check, factory->function_string())) {
- __ JumpIfSmi(eax, if_false);
- // Check for callable and not undetectable objects => true.
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset));
- __ and_(ecx, (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
- __ cmp(ecx, 1 << Map::kIsCallable);
- Split(equal, if_true, if_false, fall_through);
- } else if (String::Equals(check, factory->object_string())) {
- __ JumpIfSmi(eax, if_false);
- __ cmp(eax, isolate()->factory()->null_value());
- __ j(equal, if_true);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, edx);
- __ j(below, if_false);
- // Check for callable or undetectable objects => false.
- __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
- Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
- Split(zero, if_true, if_false, fall_through);
- } else {
- if (if_false != fall_through) __ jmp(if_false);
- }
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
- Comment cmnt(masm_, "[ CompareOperation");
-
- // First we try a fast inlined version of the compare when one of
- // the operands is a literal.
- if (TryLiteralCompare(expr)) return;
-
- // Always perform the comparison for its control flow. Pack the result
- // into the expression's context after the comparison is performed.
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- Token::Value op = expr->op();
- VisitForStackValue(expr->left());
- switch (op) {
- case Token::IN:
- VisitForStackValue(expr->right());
- SetExpressionPosition(expr);
- EmitHasProperty();
- PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
- __ cmp(eax, isolate()->factory()->true_value());
- Split(equal, if_true, if_false, fall_through);
- break;
-
- case Token::INSTANCEOF: {
- VisitForAccumulatorValue(expr->right());
- SetExpressionPosition(expr);
- PopOperand(edx);
- __ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
- RestoreContext();
- PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
- __ cmp(eax, isolate()->factory()->true_value());
- Split(equal, if_true, if_false, fall_through);
- break;
- }
-
- default: {
- VisitForAccumulatorValue(expr->right());
- SetExpressionPosition(expr);
- Condition cc = CompareIC::ComputeCondition(op);
- PopOperand(edx);
-
- bool inline_smi_code = ShouldInlineSmiCase(op);
- JumpPatchSite patch_site(masm_);
- if (inline_smi_code) {
- Label slow_case;
- __ mov(ecx, edx);
- __ or_(ecx, eax);
- patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
- __ cmp(edx, eax);
- Split(cc, if_true, if_false, NULL);
- __ bind(&slow_case);
- }
-
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
- CallIC(ic, expr->CompareOperationFeedbackId());
- patch_site.EmitPatchInfo();
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ test(eax, eax);
- Split(cc, if_true, if_false, fall_through);
- }
- }
-
- // Convert the result of the comparison into one expected for this
- // expression's context.
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
- Expression* sub_expr,
- NilValue nil) {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- VisitForAccumulatorValue(sub_expr);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-
- Handle<Object> nil_value = nil == kNullValue
- ? isolate()->factory()->null_value()
- : isolate()->factory()->undefined_value();
- if (expr->op() == Token::EQ_STRICT) {
- __ cmp(eax, nil_value);
- Split(equal, if_true, if_false, fall_through);
- } else {
- __ JumpIfSmi(eax, if_false);
- __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
- __ test_b(FieldOperand(eax, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- Split(not_zero, if_true, if_false, fall_through);
- }
- context()->Plug(if_true, if_false);
-}
-
-
-Register FullCodeGenerator::result_register() {
- return eax;
-}
-
-
-Register FullCodeGenerator::context_register() {
- return esi;
-}
-
-void FullCodeGenerator::LoadFromFrameField(int frame_offset, Register value) {
- DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
- __ mov(value, Operand(ebp, frame_offset));
-}
-
-void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
- DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
- __ mov(Operand(ebp, frame_offset), value);
-}
-
-
-void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
- __ mov(dst, ContextOperand(esi, context_index));
-}
-
-
-void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
- DeclarationScope* closure_scope = scope()->GetClosureScope();
- if (closure_scope->is_script_scope() ||
- closure_scope->is_module_scope()) {
- // Contexts nested in the native context have a canonical empty function
- // as their closure, not the anonymous closure containing the global
- // code.
- __ mov(eax, NativeContextOperand());
- PushOperand(ContextOperand(eax, Context::CLOSURE_INDEX));
- } else if (closure_scope->is_eval_scope()) {
- // Contexts nested inside eval code have the same closure as the context
- // calling eval, not the anonymous closure containing the eval code.
- // Fetch it from the context.
- PushOperand(ContextOperand(esi, Context::CLOSURE_INDEX));
- } else {
- DCHECK(closure_scope->is_function_scope());
- PushOperand(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- }
-}
-
-
-#undef __
-
-
-static const byte kJnsInstruction = 0x79;
-static const byte kJnsOffset = 0x11;
-static const byte kNopByteOne = 0x66;
-static const byte kNopByteTwo = 0x90;
-#ifdef DEBUG
-static const byte kCallInstruction = 0xe8;
-#endif
-
-
-void BackEdgeTable::PatchAt(Code* unoptimized_code,
- Address pc,
- BackEdgeState target_state,
- Code* replacement_code) {
- Address call_target_address = pc - kIntSize;
- Address jns_instr_address = call_target_address - 3;
- Address jns_offset_address = call_target_address - 2;
-
- switch (target_state) {
- case INTERRUPT:
- // sub <profiling_counter>, <delta> ;; Not changed
- // jns ok
- // call <interrupt stub>
- // ok:
- *jns_instr_address = kJnsInstruction;
- *jns_offset_address = kJnsOffset;
- break;
- case ON_STACK_REPLACEMENT:
- // sub <profiling_counter>, <delta> ;; Not changed
- // nop
- // nop
- // call <on-stack replacment>
- // ok:
- *jns_instr_address = kNopByteOne;
- *jns_offset_address = kNopByteTwo;
- break;
- }
-
- Assembler::set_target_address_at(unoptimized_code->GetIsolate(),
- call_target_address, unoptimized_code,
- replacement_code->entry());
- unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, call_target_address, replacement_code);
-}
-
-
-BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
- Isolate* isolate,
- Code* unoptimized_code,
- Address pc) {
- Address call_target_address = pc - kIntSize;
- Address jns_instr_address = call_target_address - 3;
- DCHECK_EQ(kCallInstruction, *(call_target_address - 1));
-
- if (*jns_instr_address == kJnsInstruction) {
- DCHECK_EQ(kJnsOffset, *(call_target_address - 2));
- DCHECK_EQ(isolate->builtins()->InterruptCheck()->entry(),
- Assembler::target_address_at(call_target_address,
- unoptimized_code));
- return INTERRUPT;
- }
-
- DCHECK_EQ(kNopByteOne, *jns_instr_address);
- DCHECK_EQ(kNopByteTwo, *(call_target_address - 2));
-
- DCHECK_EQ(
- isolate->builtins()->OnStackReplacement()->entry(),
- Assembler::target_address_at(call_target_address, unoptimized_code));
- return ON_STACK_REPLACEMENT;
-}
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/gdb-jit.cc b/deps/v8/src/gdb-jit.cc
index cc5451f1b3..d0395330c2 100644
--- a/deps/v8/src/gdb-jit.cc
+++ b/deps/v8/src/gdb-jit.cc
@@ -199,7 +199,7 @@ class DebugSectionBase : public ZoneObject {
struct MachOSectionHeader {
char sectname[16];
char segname[16];
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
+#if V8_TARGET_ARCH_IA32
uint32_t addr;
uint32_t size;
#else
@@ -230,7 +230,7 @@ class MachOSection : public DebugSectionBase<MachOSectionHeader> {
uint32_t flags)
: name_(name), segment_(segment), align_(align), flags_(flags) {
if (align_ != 0) {
- DCHECK(base::bits::IsPowerOfTwo32(align));
+ DCHECK(base::bits::IsPowerOfTwo(align));
align_ = WhichPowerOf2(align_);
}
}
@@ -507,7 +507,7 @@ class MachO BASE_EMBEDDED {
uint32_t cmd;
uint32_t cmdsize;
char segname[16];
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
+#if V8_TARGET_ARCH_IA32
uint32_t vmaddr;
uint32_t vmsize;
uint32_t fileoff;
@@ -533,7 +533,7 @@ class MachO BASE_EMBEDDED {
Writer::Slot<MachOHeader> WriteHeader(Writer* w) {
DCHECK(w->position() == 0);
Writer::Slot<MachOHeader> header = w->CreateSlotHere<MachOHeader>();
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
+#if V8_TARGET_ARCH_IA32
header->magic = 0xFEEDFACEu;
header->cputype = 7; // i386
header->cpusubtype = 3; // CPU_SUBTYPE_I386_ALL
@@ -558,7 +558,7 @@ class MachO BASE_EMBEDDED {
uintptr_t code_size) {
Writer::Slot<MachOSegmentCommand> cmd =
w->CreateSlotHere<MachOSegmentCommand>();
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
+#if V8_TARGET_ARCH_IA32
cmd->cmd = LC_SEGMENT_32;
#else
cmd->cmd = LC_SEGMENT_64;
@@ -646,7 +646,7 @@ class ELF BASE_EMBEDDED {
void WriteHeader(Writer* w) {
DCHECK(w->position() == 0);
Writer::Slot<ELFHeader> header = w->CreateSlotHere<ELFHeader>();
-#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X87 || \
+#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || \
(V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT))
const uint8_t ident[16] =
{ 0x7f, 'E', 'L', 'F', 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0};
@@ -668,7 +668,7 @@ class ELF BASE_EMBEDDED {
#endif
memcpy(header->ident, ident, 16);
header->type = 1;
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
+#if V8_TARGET_ARCH_IA32
header->machine = 3;
#elif V8_TARGET_ARCH_X64
// Processor identification value for x64 is 62 as defined in
@@ -783,8 +783,8 @@ class ELFSymbol BASE_EMBEDDED {
Binding binding() const {
return static_cast<Binding>(info >> 4);
}
-#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X87 || \
- (V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT) || \
+#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || \
+ (V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT) || \
(V8_TARGET_ARCH_S390 && V8_TARGET_ARCH_32_BIT))
struct SerializedLayout {
SerializedLayout(uint32_t name,
@@ -1146,7 +1146,7 @@ class DebugInfoSection : public DebugSection {
w->Write<intptr_t>(desc_->CodeStart() + desc_->CodeSize());
Writer::Slot<uint32_t> fb_block_size = w->CreateSlotHere<uint32_t>();
uintptr_t fb_block_start = w->position();
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
+#if V8_TARGET_ARCH_IA32
w->Write<uint8_t>(DW_OP_reg5); // The frame pointer's here on ia32
#elif V8_TARGET_ARCH_X64
w->Write<uint8_t>(DW_OP_reg6); // and here on x64.
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index f603af8018..3d5348a537 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -503,7 +503,7 @@ class GlobalHandles::PendingPhantomCallbacksSecondPassTask
// the same state it would be after a call to Clear().
PendingPhantomCallbacksSecondPassTask(
List<PendingPhantomCallback>* pending_phantom_callbacks, Isolate* isolate)
- : CancelableTask(isolate) {
+ : CancelableTask(isolate), isolate_(isolate) {
pending_phantom_callbacks_.Swap(pending_phantom_callbacks);
}
@@ -516,7 +516,10 @@ class GlobalHandles::PendingPhantomCallbacksSecondPassTask
GCType::kGCTypeProcessWeakCallbacks, kNoGCCallbackFlags);
}
+ Isolate* isolate() { return isolate_; }
+
private:
+ Isolate* isolate_;
List<PendingPhantomCallback> pending_phantom_callbacks_;
DISALLOW_COPY_AND_ASSIGN(PendingPhantomCallbacksSecondPassTask);
@@ -647,6 +650,21 @@ void GlobalHandles::IterateNewSpaceStrongAndDependentRoots(RootVisitor* v) {
}
}
+void GlobalHandles::IterateNewSpaceStrongAndDependentRootsAndIdentifyUnmodified(
+ RootVisitor* v, size_t start, size_t end) {
+ for (size_t i = start; i < end; ++i) {
+ Node* node = new_space_nodes_[static_cast<int>(i)];
+ if (node->IsWeak() && !JSObject::IsUnmodifiedApiObject(node->location())) {
+ node->set_active(true);
+ }
+ if (node->IsStrongRetainer() ||
+ (node->IsWeakRetainer() && !node->is_independent() &&
+ node->is_active())) {
+ v->VisitRootPointer(Root::kGlobalHandles, node->location());
+ }
+ }
+}
+
void GlobalHandles::IdentifyWeakUnmodifiedObjects(
WeakSlotCallback is_unmodified) {
for (int i = 0; i < new_space_nodes_.length(); ++i) {
@@ -904,6 +922,17 @@ void GlobalHandles::IterateAllNewSpaceRoots(RootVisitor* v) {
}
DISABLE_CFI_PERF
+void GlobalHandles::IterateNewSpaceRoots(RootVisitor* v, size_t start,
+ size_t end) {
+ for (size_t i = start; i < end; ++i) {
+ Node* node = new_space_nodes_[static_cast<int>(i)];
+ if (node->IsRetainer()) {
+ v->VisitRootPointer(Root::kGlobalHandles, node->location());
+ }
+ }
+}
+
+DISABLE_CFI_PERF
void GlobalHandles::ApplyPersistentHandleVisitor(
v8::PersistentHandleVisitor* visitor, GlobalHandles::Node* node) {
v8::Value* value = ToApi<v8::Value>(Handle<Object>(node->location()));
diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h
index c56568de9f..6a25134698 100644
--- a/deps/v8/src/global-handles.h
+++ b/deps/v8/src/global-handles.h
@@ -92,6 +92,8 @@ class GlobalHandles {
number_of_phantom_handle_resets_ = 0;
}
+ size_t NumberOfNewSpaceNodes() { return new_space_nodes_.length(); }
+
// Clear the weakness of a global handle.
static void* ClearWeakness(Object** location);
@@ -118,6 +120,7 @@ class GlobalHandles {
void IterateAllRoots(RootVisitor* v);
void IterateAllNewSpaceRoots(RootVisitor* v);
+ void IterateNewSpaceRoots(RootVisitor* v, size_t start, size_t end);
// Iterates over all handles that have embedder-assigned class ID.
void IterateAllRootsWithClassIds(v8::PersistentHandleVisitor* v);
@@ -142,9 +145,14 @@ class GlobalHandles {
// guaranteed to contain all handles holding new space objects (but
// may also include old space objects).
- // Iterates over strong and dependent handles. See the node above.
+ // Iterates over strong and dependent handles. See the note above.
void IterateNewSpaceStrongAndDependentRoots(RootVisitor* v);
+ // Iterates over strong and dependent handles. See the note above.
+ // Also marks unmodified nodes in the same iteration.
+ void IterateNewSpaceStrongAndDependentRootsAndIdentifyUnmodified(
+ RootVisitor* v, size_t start, size_t end);
+
// Finds weak independent or unmodified handles satisfying
// the callback predicate and marks them as pending. See the note above.
void MarkNewSpaceWeakUnmodifiedObjectsPending(
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index 1194472d29..6a07802cd6 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -110,6 +110,8 @@ namespace internal {
#define V8_DEFAULT_STACK_SIZE_KB 984
#endif
+// Minimum stack size in KB required by compilers.
+const int kStackSpaceRequiredForCompilation = 40;
// Determine whether double field unboxing feature is enabled.
#if V8_TARGET_ARCH_64_BIT
@@ -151,6 +153,7 @@ const int kShortSize = sizeof(short); // NOLINT
const int kIntSize = sizeof(int);
const int kInt32Size = sizeof(int32_t);
const int kInt64Size = sizeof(int64_t);
+const int kUInt32Size = sizeof(uint32_t);
const int kSizetSize = sizeof(size_t);
const int kFloatSize = sizeof(float);
const int kDoubleSize = sizeof(double);
@@ -164,7 +167,7 @@ const int kRegisterSize = kPointerSize;
const int kPCOnStackSize = kRegisterSize;
const int kFPOnStackSize = kRegisterSize;
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
const int kElidedFrameSlots = kPCOnStackSize / kPointerSize;
#else
const int kElidedFrameSlots = 0;
@@ -314,9 +317,10 @@ inline std::ostream& operator<<(std::ostream& os, const LanguageMode& mode) {
switch (mode) {
case SLOPPY: return os << "sloppy";
case STRICT: return os << "strict";
- default: UNREACHABLE();
+ case LANGUAGE_END:
+ UNREACHABLE();
}
- return os;
+ UNREACHABLE();
}
inline bool is_sloppy(LanguageMode language_mode) {
@@ -356,7 +360,21 @@ inline std::ostream& operator<<(std::ostream& os, DeoptimizeKind kind) {
return os << "Soft";
}
UNREACHABLE();
- return os;
+}
+
+// Indicates whether the lookup is related to sloppy-mode block-scoped
+// function hoisting, and is a synthetic assignment for that.
+enum class LookupHoistingMode { kNormal, kLegacySloppy };
+
+inline std::ostream& operator<<(std::ostream& os,
+ const LookupHoistingMode& mode) {
+ switch (mode) {
+ case LookupHoistingMode::kNormal:
+ return os << "normal hoisting";
+ case LookupHoistingMode::kLegacySloppy:
+ return os << "legacy sloppy hoisting";
+ }
+ UNREACHABLE();
}
// Mask for the sign bit in a smi.
@@ -518,6 +536,8 @@ const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
enum AllocationAlignment { kWordAligned, kDoubleAligned, kDoubleUnaligned };
+enum class AccessMode { ATOMIC, NON_ATOMIC };
+
// Possible outcomes for decisions.
enum class Decision : uint8_t { kUnknown, kTrue, kFalse };
@@ -535,7 +555,6 @@ inline std::ostream& operator<<(std::ostream& os, Decision decision) {
return os << "False";
}
UNREACHABLE();
- return os;
}
// Supported write barrier modes.
@@ -562,7 +581,6 @@ inline std::ostream& operator<<(std::ostream& os, WriteBarrierKind kind) {
return os << "FullWriteBarrier";
}
UNREACHABLE();
- return os;
}
// A flag that indicates whether objects should be pretenured when
@@ -579,7 +597,6 @@ inline std::ostream& operator<<(std::ostream& os, const PretenureFlag& flag) {
return os << "Tenured";
}
UNREACHABLE();
- return os;
}
enum MinimumCapacity {
@@ -593,6 +610,7 @@ enum Executability { NOT_EXECUTABLE, EXECUTABLE };
enum VisitMode {
VISIT_ALL,
+ VISIT_ALL_IN_MINOR_MC_MARK,
VISIT_ALL_IN_MINOR_MC_UPDATE,
VISIT_ALL_IN_SCAVENGE,
VISIT_ALL_IN_SWEEP_NEWSPACE,
@@ -741,7 +759,11 @@ struct AccessorDescriptor {
// Testers for test.
#define HAS_SMI_TAG(value) \
- ((reinterpret_cast<intptr_t>(value) & kSmiTagMask) == kSmiTag)
+ ((reinterpret_cast<intptr_t>(value) & ::i::kSmiTagMask) == ::i::kSmiTag)
+
+#define HAS_HEAP_OBJECT_TAG(value) \
+ (((reinterpret_cast<intptr_t>(value) & ::i::kHeapObjectTagMask) == \
+ ::i::kHeapObjectTag))
// OBJECT_POINTER_ALIGN returns the value aligned as a HeapObject pointer
#define OBJECT_POINTER_ALIGN(value) \
@@ -831,23 +853,6 @@ inline std::ostream& operator<<(std::ostream& os, ConvertReceiverMode mode) {
return os << "ANY";
}
UNREACHABLE();
- return os;
-}
-
-// Defines whether tail call optimization is allowed.
-enum class TailCallMode : unsigned { kAllow, kDisallow };
-
-inline size_t hash_value(TailCallMode mode) { return bit_cast<unsigned>(mode); }
-
-inline std::ostream& operator<<(std::ostream& os, TailCallMode mode) {
- switch (mode) {
- case TailCallMode::kAllow:
- return os << "ALLOW_TAIL_CALLS";
- case TailCallMode::kDisallow:
- return os << "DISALLOW_TAIL_CALLS";
- }
- UNREACHABLE();
- return os;
}
// Valid hints for the abstract operation OrdinaryToPrimitive,
@@ -879,7 +884,6 @@ inline std::ostream& operator<<(std::ostream& os, CreateArgumentsType type) {
return os << "REST_PARAMETER";
}
UNREACHABLE();
- return os;
}
// Used to specify if a macro instruction must perform a smi check on tagged
@@ -908,16 +912,10 @@ enum AllocationSiteMode {
};
// The mips architecture prior to revision 5 has inverted encoding for sNaN.
-// The x87 FPU convert the sNaN to qNaN automatically when loading sNaN from
-// memmory.
-// Use mips sNaN which is a not used qNaN in x87 port as sNaN to workaround this
-// issue
-// for some test cases.
#if (V8_TARGET_ARCH_MIPS && !defined(_MIPS_ARCH_MIPS32R6) && \
(!defined(USE_SIMULATOR) || !defined(_MIPS_TARGET_SIMULATOR))) || \
(V8_TARGET_ARCH_MIPS64 && !defined(_MIPS_ARCH_MIPS64R6) && \
- (!defined(USE_SIMULATOR) || !defined(_MIPS_TARGET_SIMULATOR))) || \
- (V8_TARGET_ARCH_X87)
+ (!defined(USE_SIMULATOR) || !defined(_MIPS_TARGET_SIMULATOR)))
const uint32_t kHoleNanUpper32 = 0xFFFF7FFF;
const uint32_t kHoleNanLower32 = 0xFFFF7FFF;
#else
@@ -936,12 +934,12 @@ const double kMaxSafeInteger = 9007199254740991.0; // 2^53-1
// The order of this enum has to be kept in sync with the predicates below.
enum VariableMode : uint8_t {
// User declared variables:
- VAR, // declared via 'var', and 'function' declarations
-
LET, // declared via 'let' declarations (first lexical)
CONST, // declared via 'const' declarations (last lexical)
+ VAR, // declared via 'var', and 'function' declarations
+
// Variables introduced by the compiler:
TEMPORARY, // temporary variables (not user-visible), stack-allocated
// unless the scope as a whole has forced context allocation
@@ -953,12 +951,10 @@ enum VariableMode : uint8_t {
// variable is global unless it has been shadowed
// by an eval-introduced variable
- DYNAMIC_LOCAL, // requires dynamic lookup, but we know that the
- // variable is local and where it is unless it
- // has been shadowed by an eval-introduced
- // variable
-
- kLastVariableMode = DYNAMIC_LOCAL
+ DYNAMIC_LOCAL // requires dynamic lookup, but we know that the
+ // variable is local and where it is unless it
+ // has been shadowed by an eval-introduced
+ // variable
};
// Printing support
@@ -981,7 +977,6 @@ inline const char* VariableMode2String(VariableMode mode) {
return "TEMPORARY";
}
UNREACHABLE();
- return NULL;
}
#endif
@@ -989,8 +984,7 @@ enum VariableKind : uint8_t {
NORMAL_VARIABLE,
FUNCTION_VARIABLE,
THIS_VARIABLE,
- SLOPPY_FUNCTION_NAME_VARIABLE,
- kLastKind = SLOPPY_FUNCTION_NAME_VARIABLE
+ SLOPPY_FUNCTION_NAME_VARIABLE
};
inline bool IsDynamicVariableMode(VariableMode mode) {
@@ -999,13 +993,14 @@ inline bool IsDynamicVariableMode(VariableMode mode) {
inline bool IsDeclaredVariableMode(VariableMode mode) {
- STATIC_ASSERT(VAR == 0); // Implies that mode >= VAR.
- return mode <= CONST;
+ STATIC_ASSERT(LET == 0); // Implies that mode >= LET.
+ return mode <= VAR;
}
inline bool IsLexicalVariableMode(VariableMode mode) {
- return mode >= LET && mode <= CONST;
+ STATIC_ASSERT(LET == 0); // Implies that mode >= LET.
+ return mode <= CONST;
}
enum VariableLocation : uint8_t {
@@ -1232,7 +1227,6 @@ inline std::ostream& operator<<(std::ostream& os,
return os << "Other";
}
UNREACHABLE();
- return os;
}
inline uint32_t ObjectHash(Address address) {
@@ -1246,7 +1240,7 @@ inline uint32_t ObjectHash(Address address) {
// at different points by performing an 'OR' operation. Type feedback moves
// to a more generic type when we combine feedback.
// kSignedSmall -> kNumber -> kNumberOrOddball -> kAny
-// kString -> kAny
+// kNonEmptyString -> kString -> kAny
// TODO(mythria): Remove kNumber type when crankshaft can handle Oddballs
// similar to Numbers. We don't need kNumber feedback for Turbofan. Extra
// information about Number might reduce few instructions but causes more
@@ -1259,8 +1253,9 @@ class BinaryOperationFeedback {
kSignedSmall = 0x1,
kNumber = 0x3,
kNumberOrOddball = 0x7,
- kString = 0x8,
- kAny = 0x1F
+ kNonEmptyString = 0x8,
+ kString = 0x18,
+ kAny = 0x3F
};
};
@@ -1269,6 +1264,7 @@ class BinaryOperationFeedback {
// to a more generic type when we combine feedback.
// kSignedSmall -> kNumber -> kAny
// kInternalizedString -> kString -> kAny
+// kSymbol -> kAny
// kReceiver -> kAny
// TODO(epertoso): consider unifying this with BinaryOperationFeedback.
class CompareOperationFeedback {
@@ -1280,8 +1276,9 @@ class CompareOperationFeedback {
kNumberOrOddball = 0x7,
kInternalizedString = 0x8,
kString = 0x18,
- kReceiver = 0x20,
- kAny = 0x7F
+ kSymbol = 0x20,
+ kReceiver = 0x40,
+ kAny = 0xff
};
};
@@ -1303,7 +1300,6 @@ inline std::ostream& operator<<(std::ostream& os, UnicodeEncoding encoding) {
return os << "UTF32";
}
UNREACHABLE();
- return os;
}
enum class IterationKind { kKeys, kValues, kEntries };
@@ -1318,7 +1314,6 @@ inline std::ostream& operator<<(std::ostream& os, IterationKind kind) {
return os << "IterationKind::kEntries";
}
UNREACHABLE();
- return os;
}
// Flags for the runtime function kDefineDataPropertyInLiteral. A property can
@@ -1344,63 +1339,6 @@ enum ExternalArrayType {
kExternalUint8ClampedArray,
};
-// Static information used by SuspendGenerator bytecode & GeneratorStore, in
-// order to determine where to store bytecode offset in generator.
-enum class SuspendFlags {
- kYield = 0,
- kYieldStar = 1,
- kAwait = 2,
- kSuspendTypeMask = 3,
-
- kGenerator = 0 << 2,
- kAsyncGenerator = 1 << 2,
- kGeneratorTypeMask = 1 << 2,
-
- kBitWidth = 3,
-
- // Aliases
- kGeneratorYield = kGenerator | kYield,
- kGeneratorYieldStar = kGenerator | kYieldStar,
- kGeneratorAwait = kGenerator | kAwait,
- kAsyncGeneratorYield = kAsyncGenerator | kYield,
- kAsyncGeneratorYieldStar = kAsyncGenerator | kYieldStar,
- kAsyncGeneratorAwait = kAsyncGenerator | kAwait
-};
-
-inline constexpr SuspendFlags operator&(SuspendFlags lhs, SuspendFlags rhs) {
- return static_cast<SuspendFlags>(static_cast<uint8_t>(lhs) &
- static_cast<uint8_t>(rhs));
-}
-
-inline constexpr SuspendFlags operator|(SuspendFlags lhs, SuspendFlags rhs) {
- return static_cast<SuspendFlags>(static_cast<uint8_t>(lhs) |
- static_cast<uint8_t>(rhs));
-}
-
-inline SuspendFlags& operator|=(SuspendFlags& lhs, SuspendFlags rhs) {
- lhs = lhs | rhs;
- return lhs;
-}
-
-inline SuspendFlags operator~(SuspendFlags lhs) {
- return static_cast<SuspendFlags>(~static_cast<uint8_t>(lhs));
-}
-
-inline const char* SuspendTypeFor(SuspendFlags flags) {
- switch (flags & SuspendFlags::kSuspendTypeMask) {
- case SuspendFlags::kYield:
- return "yield";
- case SuspendFlags::kYieldStar:
- return "yield*";
- case SuspendFlags::kAwait:
- return "await";
- default:
- break;
- }
- UNREACHABLE();
- return "";
-}
-
struct AssemblerDebugInfo {
AssemblerDebugInfo(const char* name, const char* file, int line)
: name(name), file(file), line(line) {}
@@ -1415,6 +1353,52 @@ inline std::ostream& operator<<(std::ostream& os,
return os;
}
+enum class OptimizationMarker {
+ kNone,
+ kCompileOptimized,
+ kCompileOptimizedConcurrent,
+ kInOptimizationQueue
+};
+
+inline std::ostream& operator<<(std::ostream& os,
+ const OptimizationMarker& marker) {
+ switch (marker) {
+ case OptimizationMarker::kNone:
+ return os << "OptimizationMarker::kNone";
+ case OptimizationMarker::kCompileOptimized:
+ return os << "OptimizationMarker::kCompileOptimized";
+ case OptimizationMarker::kCompileOptimizedConcurrent:
+ return os << "OptimizationMarker::kCompileOptimizedConcurrent";
+ case OptimizationMarker::kInOptimizationQueue:
+ return os << "OptimizationMarker::kInOptimizationQueue";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+enum class ConcurrencyMode { kNotConcurrent, kConcurrent };
+
+#define FOR_EACH_ISOLATE_ADDRESS_NAME(C) \
+ C(Handler, handler) \
+ C(CEntryFP, c_entry_fp) \
+ C(CFunction, c_function) \
+ C(Context, context) \
+ C(PendingException, pending_exception) \
+ C(PendingHandlerContext, pending_handler_context) \
+ C(PendingHandlerCode, pending_handler_code) \
+ C(PendingHandlerOffset, pending_handler_offset) \
+ C(PendingHandlerFP, pending_handler_fp) \
+ C(PendingHandlerSP, pending_handler_sp) \
+ C(ExternalCaughtException, external_caught_exception) \
+ C(JSEntrySP, js_entry_sp)
+
+enum IsolateAddressId {
+#define DECLARE_ENUM(CamelName, hacker_name) k##CamelName##Address,
+ FOR_EACH_ISOLATE_ADDRESS_NAME(DECLARE_ENUM)
+#undef DECLARE_ENUM
+ kIsolateAddressCount
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index 4afbdb5bf4..57f8c8bc58 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -12,6 +12,16 @@
namespace v8 {
namespace internal {
+// Handles should be trivially copyable so that they can be efficiently passed
+// by value. If they are not trivially copyable, they cannot be passed in
+// registers.
+static_assert(IS_TRIVIALLY_COPYABLE(HandleBase),
+ "HandleBase should be trivially copyable");
+static_assert(IS_TRIVIALLY_COPYABLE(Handle<Object>),
+ "Handle<Object> should be trivially copyable");
+static_assert(IS_TRIVIALLY_COPYABLE(MaybeHandle<Object>),
+ "MaybeHandle<Object> should be trivially copyable");
+
#ifdef DEBUG
bool HandleBase::IsDereferenceAllowed(DereferenceCheckMode mode) const {
DCHECK_NOT_NULL(location_);
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index 3afda94208..21c2f1987b 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -125,7 +125,7 @@ class Handle final : public HandleBase {
template <typename S>
static const Handle<T> cast(Handle<S> that) {
- T::cast(*reinterpret_cast<T**>(that.location_));
+ T::cast(*reinterpret_cast<T**>(that.location()));
return Handle<T>(reinterpret_cast<T**>(that.location_));
}
@@ -185,7 +185,6 @@ template <typename T>
class MaybeHandle final {
public:
V8_INLINE MaybeHandle() {}
- V8_INLINE ~MaybeHandle() {}
// Constructor for handling automatic up casting from Handle.
// Ex. Handle<JSArray> can be passed when MaybeHandle<Object> is expected.
diff --git a/deps/v8/src/heap-symbols.h b/deps/v8/src/heap-symbols.h
index 525da14cd3..2a5ff1dfb6 100644
--- a/deps/v8/src/heap-symbols.h
+++ b/deps/v8/src/heap-symbols.h
@@ -8,6 +8,7 @@
#define INTERNALIZED_STRING_LIST(V) \
V(anonymous_function_string, "(anonymous function)") \
V(anonymous_string, "anonymous") \
+ V(add_string, "add") \
V(apply_string, "apply") \
V(arguments_string, "arguments") \
V(Arguments_string, "Arguments") \
@@ -45,9 +46,11 @@
V(constructor_string, "constructor") \
V(construct_string, "construct") \
V(create_string, "create") \
+ V(currency_string, "currency") \
V(Date_string, "Date") \
V(dayperiod_string, "dayperiod") \
V(day_string, "day") \
+ V(decimal_string, "decimal") \
V(default_string, "default") \
V(defineProperty_string, "defineProperty") \
V(deleteProperty_string, "deleteProperty") \
@@ -72,6 +75,7 @@
V(EvalError_string, "EvalError") \
V(false_string, "false") \
V(flags_string, "flags") \
+ V(fraction_string, "fraction") \
V(function_string, "function") \
V(Function_string, "Function") \
V(Generator_string, "Generator") \
@@ -81,6 +85,7 @@
V(get_string, "get") \
V(get_space_string, "get ") \
V(global_string, "global") \
+ V(group_string, "group") \
V(groups_string, "groups") \
V(has_string, "has") \
V(hour_string, "hour") \
@@ -88,7 +93,9 @@
V(illegal_access_string, "illegal access") \
V(illegal_argument_string, "illegal argument") \
V(index_string, "index") \
- V(infinity_string, "Infinity") \
+ V(infinity_string, "infinity") \
+ V(Infinity_string, "Infinity") \
+ V(integer_string, "integer") \
V(input_string, "input") \
V(isExtensible_string, "isExtensible") \
V(isView_string, "isView") \
@@ -102,15 +109,17 @@
V(literal_string, "literal") \
V(Map_string, "Map") \
V(message_string, "message") \
- V(minus_infinity_string, "-Infinity") \
+ V(minus_Infinity_string, "-Infinity") \
V(minus_zero_string, "-0") \
+ V(minusSign_string, "minusSign") \
V(minute_string, "minute") \
V(Module_string, "Module") \
V(month_string, "month") \
V(multiline_string, "multiline") \
V(name_string, "name") \
V(native_string, "native") \
- V(nan_string, "NaN") \
+ V(nan_string, "nan") \
+ V(NaN_string, "NaN") \
V(new_target_string, ".new.target") \
V(next_string, "next") \
V(NFC_string, "NFC") \
@@ -125,7 +134,10 @@
V(object_string, "object") \
V(Object_string, "Object") \
V(ok, "ok") \
+ V(one_string, "1") \
V(ownKeys_string, "ownKeys") \
+ V(percentSign_string, "percentSign") \
+ V(plusSign_string, "plusSign") \
V(position_string, "position") \
V(preventExtensions_string, "preventExtensions") \
V(Promise_string, "Promise") \
@@ -190,7 +202,8 @@
V(weekday_string, "weekday") \
V(will_handle_string, "willHandle") \
V(writable_string, "writable") \
- V(year_string, "year")
+ V(year_string, "year") \
+ V(zero_string, "0")
#define PRIVATE_SYMBOL_LIST(V) \
V(array_iteration_kind_symbol) \
@@ -198,6 +211,8 @@
V(array_iterator_object_symbol) \
V(call_site_frame_array_symbol) \
V(call_site_frame_index_symbol) \
+ V(console_context_id_symbol) \
+ V(console_context_name_symbol) \
V(class_end_position_symbol) \
V(class_start_position_symbol) \
V(detailed_stack_trace_symbol) \
@@ -226,6 +241,8 @@
V(sealed_symbol) \
V(stack_trace_symbol) \
V(strict_function_transition_symbol) \
+ V(wasm_function_index_symbol) \
+ V(wasm_instance_symbol) \
V(uninitialized_symbol)
#define PUBLIC_SYMBOL_LIST(V) \
diff --git a/deps/v8/src/heap/OWNERS b/deps/v8/src/heap/OWNERS
index 32da1ecead..79eea3aaab 100644
--- a/deps/v8/src/heap/OWNERS
+++ b/deps/v8/src/heap/OWNERS
@@ -1,7 +1,8 @@
set noparent
hpayer@chromium.org
-jochen@chromium.org
mlippautz@chromium.org
mstarzinger@chromium.org
ulan@chromium.org
+
+# COMPONENT: Blink>JavaScript>GC
diff --git a/deps/v8/src/heap/array-buffer-tracker-inl.h b/deps/v8/src/heap/array-buffer-tracker-inl.h
index d20f128002..0688a29f3a 100644
--- a/deps/v8/src/heap/array-buffer-tracker-inl.h
+++ b/deps/v8/src/heap/array-buffer-tracker-inl.h
@@ -14,7 +14,7 @@ void ArrayBufferTracker::RegisterNew(Heap* heap, JSArrayBuffer* buffer) {
void* data = buffer->backing_store();
if (!data) return;
- size_t length = NumberToSize(buffer->byte_length());
+ size_t length = buffer->allocation_length();
Page* page = Page::FromAddress(buffer->address());
{
base::LockGuard<base::RecursiveMutex> guard(page->mutex());
@@ -37,31 +37,33 @@ void ArrayBufferTracker::Unregister(Heap* heap, JSArrayBuffer* buffer) {
if (!data) return;
Page* page = Page::FromAddress(buffer->address());
- size_t length = 0;
+ size_t length = buffer->allocation_length();
{
base::LockGuard<base::RecursiveMutex> guard(page->mutex());
LocalArrayBufferTracker* tracker = page->local_tracker();
DCHECK_NOT_NULL(tracker);
- length = tracker->Remove(buffer);
+ tracker->Remove(buffer, length);
}
heap->update_external_memory(-static_cast<intptr_t>(length));
}
-void LocalArrayBufferTracker::Add(Key key, const Value& value) {
- auto ret = array_buffers_.insert(std::make_pair(key, value));
+void LocalArrayBufferTracker::Add(JSArrayBuffer* buffer, size_t length) {
+ DCHECK_GE(retained_size_ + length, retained_size_);
+ retained_size_ += length;
+ auto ret = array_buffers_.insert(buffer);
USE(ret);
// Check that we indeed inserted a new value and did not overwrite an existing
// one (which would be a bug).
DCHECK(ret.second);
}
-LocalArrayBufferTracker::Value LocalArrayBufferTracker::Remove(Key key) {
- TrackingData::iterator it = array_buffers_.find(key);
+void LocalArrayBufferTracker::Remove(JSArrayBuffer* buffer, size_t length) {
+ DCHECK_GE(retained_size_, retained_size_ - length);
+ retained_size_ -= length;
+ TrackingData::iterator it = array_buffers_.find(buffer);
// Check that we indeed find a key to remove.
DCHECK(it != array_buffers_.end());
- Value value = it->second;
array_buffers_.erase(it);
- return value;
}
} // namespace internal
diff --git a/deps/v8/src/heap/array-buffer-tracker.cc b/deps/v8/src/heap/array-buffer-tracker.cc
index b4b4757808..08b5750752 100644
--- a/deps/v8/src/heap/array-buffer-tracker.cc
+++ b/deps/v8/src/heap/array-buffer-tracker.cc
@@ -17,19 +17,21 @@ LocalArrayBufferTracker::~LocalArrayBufferTracker() {
template <typename Callback>
void LocalArrayBufferTracker::Free(Callback should_free) {
size_t freed_memory = 0;
+ size_t retained_size = 0;
for (TrackingData::iterator it = array_buffers_.begin();
it != array_buffers_.end();) {
- JSArrayBuffer* buffer = reinterpret_cast<JSArrayBuffer*>(it->first);
+ JSArrayBuffer* buffer = reinterpret_cast<JSArrayBuffer*>(*it);
+ const size_t length = buffer->allocation_length();
if (should_free(buffer)) {
- const size_t len = it->second;
+ freed_memory += length;
buffer->FreeBackingStore();
-
- freed_memory += len;
it = array_buffers_.erase(it);
} else {
+ retained_size += length;
++it;
}
}
+ retained_size_ = retained_size;
if (freed_memory > 0) {
heap_->update_external_memory_concurrently_freed(
static_cast<intptr_t>(freed_memory));
@@ -39,36 +41,41 @@ void LocalArrayBufferTracker::Free(Callback should_free) {
template <typename Callback>
void LocalArrayBufferTracker::Process(Callback callback) {
JSArrayBuffer* new_buffer = nullptr;
+ JSArrayBuffer* old_buffer = nullptr;
size_t freed_memory = 0;
+ size_t retained_size = 0;
for (TrackingData::iterator it = array_buffers_.begin();
it != array_buffers_.end();) {
- const CallbackResult result = callback(it->first, &new_buffer);
+ old_buffer = reinterpret_cast<JSArrayBuffer*>(*it);
+ const size_t length = old_buffer->allocation_length();
+ const CallbackResult result = callback(old_buffer, &new_buffer);
if (result == kKeepEntry) {
+ retained_size += length;
++it;
} else if (result == kUpdateEntry) {
DCHECK_NOT_NULL(new_buffer);
Page* target_page = Page::FromAddress(new_buffer->address());
- // We need to lock the target page because we cannot guarantee
- // exclusive access to new space pages.
- if (target_page->InNewSpace()) target_page->mutex()->Lock();
- LocalArrayBufferTracker* tracker = target_page->local_tracker();
- if (tracker == nullptr) {
- target_page->AllocateLocalTracker();
- tracker = target_page->local_tracker();
+ {
+ base::LockGuard<base::RecursiveMutex> guard(target_page->mutex());
+ LocalArrayBufferTracker* tracker = target_page->local_tracker();
+ if (tracker == nullptr) {
+ target_page->AllocateLocalTracker();
+ tracker = target_page->local_tracker();
+ }
+ DCHECK_NOT_NULL(tracker);
+ DCHECK_EQ(length, new_buffer->allocation_length());
+ tracker->Add(new_buffer, length);
}
- DCHECK_NOT_NULL(tracker);
- tracker->Add(new_buffer, it->second);
- if (target_page->InNewSpace()) target_page->mutex()->Unlock();
it = array_buffers_.erase(it);
} else if (result == kRemoveEntry) {
- const size_t len = it->second;
- it->first->FreeBackingStore();
- freed_memory += len;
+ freed_memory += length;
+ old_buffer->FreeBackingStore();
it = array_buffers_.erase(it);
} else {
UNREACHABLE();
}
}
+ retained_size_ = retained_size;
if (freed_memory > 0) {
heap_->update_external_memory_concurrently_freed(
static_cast<intptr_t>(freed_memory));
@@ -85,6 +92,17 @@ void ArrayBufferTracker::FreeDeadInNewSpace(Heap* heap) {
heap->account_external_memory_concurrently_freed();
}
+size_t ArrayBufferTracker::RetainedInNewSpace(Heap* heap) {
+ size_t retained_size = 0;
+ for (Page* page : PageRange(heap->new_space()->ToSpaceStart(),
+ heap->new_space()->ToSpaceEnd())) {
+ LocalArrayBufferTracker* tracker = page->local_tracker();
+ if (tracker == nullptr) continue;
+ retained_size += tracker->retained_size();
+ }
+ return retained_size;
+}
+
void ArrayBufferTracker::FreeDead(Page* page,
const MarkingState& marking_state) {
// Callers need to ensure having the page lock.
diff --git a/deps/v8/src/heap/array-buffer-tracker.h b/deps/v8/src/heap/array-buffer-tracker.h
index 56f042780e..e1b4dc4e4d 100644
--- a/deps/v8/src/heap/array-buffer-tracker.h
+++ b/deps/v8/src/heap/array-buffer-tracker.h
@@ -5,7 +5,7 @@
#ifndef V8_HEAP_ARRAY_BUFFER_TRACKER_H_
#define V8_HEAP_ARRAY_BUFFER_TRACKER_H_
-#include <unordered_map>
+#include <unordered_set>
#include "src/allocation.h"
#include "src/base/platform/mutex.h"
@@ -38,6 +38,9 @@ class ArrayBufferTracker : public AllStatic {
// Does not take any locks and can only be called during Scavenge.
static void FreeDeadInNewSpace(Heap* heap);
+ // Number of array buffer bytes retained from new space.
+ static size_t RetainedInNewSpace(Heap* heap);
+
// Frees all backing store pointers for dead JSArrayBuffer on a given page.
// Requires marking information to be present. Requires the page lock to be
// taken by the caller.
@@ -60,17 +63,15 @@ class ArrayBufferTracker : public AllStatic {
// Never use directly but instead always call through |ArrayBufferTracker|.
class LocalArrayBufferTracker {
public:
- typedef JSArrayBuffer* Key;
- typedef size_t Value;
-
enum CallbackResult { kKeepEntry, kUpdateEntry, kRemoveEntry };
enum FreeMode { kFreeDead, kFreeAll };
- explicit LocalArrayBufferTracker(Heap* heap) : heap_(heap) {}
+ explicit LocalArrayBufferTracker(Heap* heap)
+ : heap_(heap), retained_size_(0) {}
~LocalArrayBufferTracker();
- inline void Add(Key key, const Value& value);
- inline Value Remove(Key key);
+ inline void Add(JSArrayBuffer* buffer, size_t length);
+ inline void Remove(JSArrayBuffer* buffer, size_t length);
// Frees up array buffers.
//
@@ -90,17 +91,23 @@ class LocalArrayBufferTracker {
template <typename Callback>
void Process(Callback callback);
- bool IsEmpty() { return array_buffers_.empty(); }
+ bool IsEmpty() const { return array_buffers_.empty(); }
- bool IsTracked(Key key) {
- return array_buffers_.find(key) != array_buffers_.end();
+ bool IsTracked(JSArrayBuffer* buffer) const {
+ return array_buffers_.find(buffer) != array_buffers_.end();
}
+ size_t retained_size() const { return retained_size_; }
+
private:
- typedef std::unordered_map<Key, Value> TrackingData;
+ typedef std::unordered_set<JSArrayBuffer*> TrackingData;
Heap* heap_;
+ // The set contains raw heap pointers which are removed by the GC upon
+ // processing the tracker through its owning page.
TrackingData array_buffers_;
+ // Retained size of array buffers for this tracker in bytes.
+ size_t retained_size_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/code-stats.h b/deps/v8/src/heap/code-stats.h
index 499c9fa5ac..fa106d6435 100644
--- a/deps/v8/src/heap/code-stats.h
+++ b/deps/v8/src/heap/code-stats.h
@@ -2,14 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/assembler.h"
-#include "src/heap/spaces.h"
-#include "src/isolate.h"
-#include "src/objects.h"
-
namespace v8 {
namespace internal {
+class Isolate;
+class HeapObject;
+class LargeObjectSpace;
+class PagedSpace;
+class RelocIterator;
+
class CodeStatistics {
public:
// Collect statistics related to code size.
diff --git a/deps/v8/src/heap/concurrent-marking-deque.h b/deps/v8/src/heap/concurrent-marking-deque.h
deleted file mode 100644
index 1490923a2f..0000000000
--- a/deps/v8/src/heap/concurrent-marking-deque.h
+++ /dev/null
@@ -1,175 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_HEAP_CONCURRENT_MARKING_DEQUE_
-#define V8_HEAP_CONCURRENT_MARKING_DEQUE_
-
-#include <deque>
-
-#include "src/base/platform/mutex.h"
-
-namespace v8 {
-namespace internal {
-
-class Heap;
-class Isolate;
-class HeapObject;
-
-enum class MarkingThread { kMain, kConcurrent };
-
-enum class TargetDeque { kShared, kBailout };
-
-// The concurrent marking deque supports deque operations for two threads:
-// main and concurrent. It is implemented using two deques: shared and bailout.
-//
-// The concurrent thread can use the push and pop operations with the
-// MarkingThread::kConcurrent argument. All other operations are intended
-// to be used by the main thread only.
-//
-// The interface of the concurrent marking deque for the main thread matches
-// that of the sequential marking deque, so they can be easily switched
-// at compile time without updating the main thread call-sites.
-//
-// The shared deque is shared between the main thread and the concurrent
-// thread, so both threads can push to and pop from the shared deque.
-// The bailout deque stores objects that cannot be processed by the concurrent
-// thread. Only the concurrent thread can push to it and only the main thread
-// can pop from it.
-class ConcurrentMarkingDeque {
- public:
- // The heap parameter is needed to match the interface
- // of the sequential marking deque.
- explicit ConcurrentMarkingDeque(Heap* heap) {}
-
- // Pushes the object into the specified deque assuming that the function is
- // called on the specified thread. The main thread can push only to the shared
- // deque. The concurrent thread can push to both deques.
- bool Push(HeapObject* object, MarkingThread thread = MarkingThread::kMain,
- TargetDeque target = TargetDeque::kShared) {
- switch (target) {
- case TargetDeque::kShared:
- shared_deque_.Push(object);
- break;
- case TargetDeque::kBailout:
- bailout_deque_.Push(object);
- break;
- }
- return true;
- }
-
- // Pops an object from the bailout or shared deque assuming that the function
- // is called on the specified thread. The main thread first tries to pop the
- // bailout deque. If the deque is empty then it tries the shared deque.
- // If the shared deque is also empty, then the function returns nullptr.
- // The concurrent thread pops only from the shared deque.
- HeapObject* Pop(MarkingThread thread = MarkingThread::kMain) {
- if (thread == MarkingThread::kMain) {
- HeapObject* result = bailout_deque_.Pop();
- if (result != nullptr) return result;
- }
- return shared_deque_.Pop();
- }
-
- // All the following operations can used only by the main thread.
- void Clear() {
- bailout_deque_.Clear();
- shared_deque_.Clear();
- }
-
- bool IsFull() { return false; }
-
- bool IsEmpty() { return bailout_deque_.IsEmpty() && shared_deque_.IsEmpty(); }
-
- int Size() { return bailout_deque_.Size() + shared_deque_.Size(); }
-
- // This is used for a large array with a progress bar.
- // For simpicity, unshift to the bailout deque so that the concurrent thread
- // does not see such objects.
- bool Unshift(HeapObject* object) {
- bailout_deque_.Unshift(object);
- return true;
- }
-
- // Calls the specified callback on each element of the deques and replaces
- // the element with the result of the callback. If the callback returns
- // nullptr then the element is removed from the deque.
- // The callback must accept HeapObject* and return HeapObject*.
- template <typename Callback>
- void Update(Callback callback) {
- bailout_deque_.Update(callback);
- shared_deque_.Update(callback);
- }
-
- // These empty functions are needed to match the interface
- // of the sequential marking deque.
- void SetUp() {}
- void TearDown() {}
- void StartUsing() {}
- void StopUsing() {}
- void ClearOverflowed() {}
- void SetOverflowed() {}
- bool overflowed() const { return false; }
-
- private:
- // Simple, slow, and thread-safe deque that forwards all operations to
- // a lock-protected std::deque.
- class Deque {
- public:
- Deque() { cache_padding_[0] = 0; }
- void Clear() {
- base::LockGuard<base::Mutex> guard(&mutex_);
- return deque_.clear();
- }
- bool IsEmpty() {
- base::LockGuard<base::Mutex> guard(&mutex_);
- return deque_.empty();
- }
- int Size() {
- base::LockGuard<base::Mutex> guard(&mutex_);
- return static_cast<int>(deque_.size());
- }
- void Push(HeapObject* object) {
- base::LockGuard<base::Mutex> guard(&mutex_);
- deque_.push_back(object);
- }
- HeapObject* Pop() {
- base::LockGuard<base::Mutex> guard(&mutex_);
- if (deque_.empty()) return nullptr;
- HeapObject* result = deque_.back();
- deque_.pop_back();
- return result;
- }
- void Unshift(HeapObject* object) {
- base::LockGuard<base::Mutex> guard(&mutex_);
- deque_.push_front(object);
- }
- template <typename Callback>
- void Update(Callback callback) {
- base::LockGuard<base::Mutex> guard(&mutex_);
- std::deque<HeapObject*> new_deque;
- for (auto object : deque_) {
- HeapObject* new_object = callback(object);
- if (new_object) {
- new_deque.push_back(new_object);
- }
- }
- deque_.swap(new_deque);
- }
-
- private:
- base::Mutex mutex_;
- std::deque<HeapObject*> deque_;
- // Ensure that two deques do not share the same cache line.
- static int const kCachePadding = 64;
- char cache_padding_[kCachePadding];
- };
- Deque bailout_deque_;
- Deque shared_deque_;
- DISALLOW_COPY_AND_ASSIGN(ConcurrentMarkingDeque);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CONCURRENT_MARKING_DEQUE_
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index f541828e29..d8b1a0895f 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -7,12 +7,12 @@
#include <stack>
#include <unordered_map>
-#include "src/heap/concurrent-marking-deque.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/heap/marking.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
+#include "src/heap/worklist.h"
#include "src/isolate.h"
#include "src/locked-queue-inl.h"
#include "src/utils-inl.h"
@@ -48,18 +48,20 @@ class ConcurrentMarkingVisitor final
public:
using BaseClass = HeapVisitor<int, ConcurrentMarkingVisitor>;
- explicit ConcurrentMarkingVisitor(ConcurrentMarkingDeque* deque)
- : deque_(deque) {}
+ explicit ConcurrentMarkingVisitor(ConcurrentMarking::MarkingWorklist* shared,
+ ConcurrentMarking::MarkingWorklist* bailout,
+ int task_id)
+ : shared_(shared, task_id), bailout_(bailout, task_id) {}
- bool ShouldVisit(HeapObject* object) override {
- return ObjectMarking::GreyToBlack<MarkBit::AccessMode::ATOMIC>(
+ bool ShouldVisit(HeapObject* object) {
+ return ObjectMarking::GreyToBlack<AccessMode::ATOMIC>(
object, marking_state(object));
}
void VisitPointers(HeapObject* host, Object** start, Object** end) override {
for (Object** p = start; p < end; p++) {
Object* object = reinterpret_cast<Object*>(
- base::NoBarrier_Load(reinterpret_cast<const base::AtomicWord*>(p)));
+ base::Relaxed_Load(reinterpret_cast<const base::AtomicWord*>(p)));
if (!object->IsHeapObject()) continue;
MarkObject(HeapObject::cast(object));
}
@@ -73,11 +75,18 @@ class ConcurrentMarkingVisitor final
}
}
+ void VisitCodeEntry(JSFunction* host, Address entry_address) override {
+ Address code_entry = base::AsAtomicWord::Relaxed_Load(
+ reinterpret_cast<Address*>(entry_address));
+ Object* code = Code::GetObjectFromCodeEntry(code_entry);
+ VisitPointer(host, &code);
+ }
+
// ===========================================================================
// JS object =================================================================
// ===========================================================================
- int VisitJSObject(Map* map, JSObject* object) override {
+ int VisitJSObject(Map* map, JSObject* object) {
int size = JSObject::BodyDescriptor::SizeOf(map, object);
const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, size);
if (!ShouldVisit(object)) return 0;
@@ -85,29 +94,44 @@ class ConcurrentMarkingVisitor final
return size;
}
- int VisitJSObjectFast(Map* map, JSObject* object) override {
+ int VisitJSObjectFast(Map* map, JSObject* object) {
return VisitJSObject(map, object);
}
- int VisitJSApiObject(Map* map, JSObject* object) override {
- return VisitJSObject(map, object);
+ int VisitJSApiObject(Map* map, JSObject* object) {
+ if (ObjectMarking::IsGrey<AccessMode::ATOMIC>(object,
+ marking_state(object))) {
+ int size = JSObject::BodyDescriptor::SizeOf(map, object);
+ VisitMapPointer(object, object->map_slot());
+ // It is OK to iterate body of JS API object here because they do not have
+ // unboxed double fields.
+ DCHECK(map->HasFastPointerLayout());
+ JSObject::BodyDescriptor::IterateBody(object, size, this);
+ // The main thread will do wrapper tracing in Blink.
+ bailout_.Push(object);
+ }
+ return 0;
}
// ===========================================================================
// Fixed array object ========================================================
// ===========================================================================
- int VisitFixedArray(Map* map, FixedArray* object) override {
- // TODO(ulan): implement iteration with prefetched length.
- return BaseClass::VisitFixedArray(map, object);
+ int VisitFixedArray(Map* map, FixedArray* object) {
+ int length = object->synchronized_length();
+ int size = FixedArray::SizeFor(length);
+ if (!ShouldVisit(object)) return 0;
+ VisitMapPointer(object, object->map_slot());
+ FixedArray::BodyDescriptor::IterateBody(object, size, this);
+ return size;
}
// ===========================================================================
// Code object ===============================================================
// ===========================================================================
- int VisitCode(Map* map, Code* object) override {
- deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
+ int VisitCode(Map* map, Code* object) {
+ bailout_.Push(object);
return 0;
}
@@ -115,58 +139,94 @@ class ConcurrentMarkingVisitor final
// Objects with weak fields and/or side-effectiful visitation.
// ===========================================================================
- int VisitBytecodeArray(Map* map, BytecodeArray* object) override {
- // TODO(ulan): implement iteration of strong fields.
- deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
+ int VisitBytecodeArray(Map* map, BytecodeArray* object) {
+ if (ObjectMarking::IsGrey<AccessMode::ATOMIC>(object,
+ marking_state(object))) {
+ int size = BytecodeArray::BodyDescriptorWeak::SizeOf(map, object);
+ VisitMapPointer(object, object->map_slot());
+ BytecodeArray::BodyDescriptorWeak::IterateBody(object, size, this);
+ // Aging of bytecode arrays is done on the main thread.
+ bailout_.Push(object);
+ }
return 0;
}
- int VisitJSFunction(Map* map, JSFunction* object) override {
- // TODO(ulan): implement iteration of strong fields.
- deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
- return 0;
+ int VisitAllocationSite(Map* map, AllocationSite* object) {
+ if (!ShouldVisit(object)) return 0;
+ int size = AllocationSite::BodyDescriptorWeak::SizeOf(map, object);
+ VisitMapPointer(object, object->map_slot());
+ AllocationSite::BodyDescriptorWeak::IterateBody(object, size, this);
+ return size;
+ }
+
+ int VisitJSFunction(Map* map, JSFunction* object) {
+ if (!ShouldVisit(object)) return 0;
+ int size = JSFunction::BodyDescriptorWeak::SizeOf(map, object);
+ VisitMapPointer(object, object->map_slot());
+ JSFunction::BodyDescriptorWeak::IterateBody(object, size, this);
+ return size;
}
- int VisitMap(Map* map, Map* object) override {
+ int VisitMap(Map* map, Map* object) {
// TODO(ulan): implement iteration of strong fields.
- deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
+ bailout_.Push(object);
return 0;
}
- int VisitNativeContext(Map* map, Context* object) override {
- // TODO(ulan): implement iteration of strong fields.
- deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
+ int VisitNativeContext(Map* map, Context* object) {
+ if (ObjectMarking::IsGrey<AccessMode::ATOMIC>(object,
+ marking_state(object))) {
+ int size = Context::BodyDescriptorWeak::SizeOf(map, object);
+ VisitMapPointer(object, object->map_slot());
+ Context::BodyDescriptorWeak::IterateBody(object, size, this);
+ // TODO(ulan): implement proper weakness for normalized map cache
+ // and remove this bailout.
+ bailout_.Push(object);
+ }
return 0;
}
- int VisitSharedFunctionInfo(Map* map, SharedFunctionInfo* object) override {
- // TODO(ulan): implement iteration of strong fields.
- deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
+ int VisitSharedFunctionInfo(Map* map, SharedFunctionInfo* object) {
+ if (ObjectMarking::IsGrey<AccessMode::ATOMIC>(object,
+ marking_state(object))) {
+ int size = SharedFunctionInfo::BodyDescriptorWeak::SizeOf(map, object);
+ VisitMapPointer(object, object->map_slot());
+ SharedFunctionInfo::BodyDescriptorWeak::IterateBody(object, size, this);
+ // Resetting of IC age counter is done on the main thread.
+ bailout_.Push(object);
+ }
return 0;
}
- int VisitTransitionArray(Map* map, TransitionArray* object) override {
+ int VisitTransitionArray(Map* map, TransitionArray* object) {
// TODO(ulan): implement iteration of strong fields.
- deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
+ bailout_.Push(object);
return 0;
}
- int VisitWeakCell(Map* map, WeakCell* object) override {
+ int VisitWeakCell(Map* map, WeakCell* object) {
// TODO(ulan): implement iteration of strong fields.
- deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
+ bailout_.Push(object);
return 0;
}
- int VisitJSWeakCollection(Map* map, JSWeakCollection* object) override {
+ int VisitJSWeakCollection(Map* map, JSWeakCollection* object) {
// TODO(ulan): implement iteration of strong fields.
- deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
+ bailout_.Push(object);
return 0;
}
void MarkObject(HeapObject* object) {
- if (ObjectMarking::WhiteToGrey<MarkBit::AccessMode::ATOMIC>(
- object, marking_state(object))) {
- deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kShared);
+#ifdef THREAD_SANITIZER
+ // Perform a dummy acquire load to tell TSAN that there is no data race
+ // in mark-bit initialization. See MemoryChunk::Initialize for the
+ // corresponding release store.
+ MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+ CHECK_NOT_NULL(chunk->synchronized_heap());
+#endif
+ if (ObjectMarking::WhiteToGrey<AccessMode::ATOMIC>(object,
+ marking_state(object))) {
+ shared_.Push(object);
}
}
@@ -183,7 +243,7 @@ class ConcurrentMarkingVisitor final
Object** end) override {
for (Object** p = start; p < end; p++) {
Object* object = reinterpret_cast<Object*>(
- base::NoBarrier_Load(reinterpret_cast<const base::AtomicWord*>(p)));
+ base::Relaxed_Load(reinterpret_cast<const base::AtomicWord*>(p)));
slot_snapshot_->add(p, object);
}
}
@@ -206,82 +266,145 @@ class ConcurrentMarkingVisitor final
return MarkingState::Internal(object);
}
- ConcurrentMarkingDeque* deque_;
+ ConcurrentMarking::MarkingWorklist::View shared_;
+ ConcurrentMarking::MarkingWorklist::View bailout_;
SlotSnapshot slot_snapshot_;
};
class ConcurrentMarking::Task : public CancelableTask {
public:
Task(Isolate* isolate, ConcurrentMarking* concurrent_marking,
- base::Semaphore* on_finish)
+ base::Mutex* lock, int task_id)
: CancelableTask(isolate),
concurrent_marking_(concurrent_marking),
- on_finish_(on_finish) {}
+ lock_(lock),
+ task_id_(task_id) {}
virtual ~Task() {}
private:
// v8::internal::CancelableTask overrides.
void RunInternal() override {
- concurrent_marking_->Run();
- on_finish_->Signal();
+ concurrent_marking_->Run(task_id_, lock_);
}
ConcurrentMarking* concurrent_marking_;
- base::Semaphore* on_finish_;
+ base::Mutex* lock_;
+ int task_id_;
DISALLOW_COPY_AND_ASSIGN(Task);
};
-ConcurrentMarking::ConcurrentMarking(Heap* heap, ConcurrentMarkingDeque* deque)
+ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
+ MarkingWorklist* bailout)
: heap_(heap),
- pending_task_semaphore_(0),
- deque_(deque),
- visitor_(new ConcurrentMarkingVisitor(deque_)),
- is_task_pending_(false) {
- // The runtime flag should be set only if the compile time flag was set.
+ shared_(shared),
+ bailout_(bailout),
+ pending_task_count_(0) {
+// The runtime flag should be set only if the compile time flag was set.
#ifndef V8_CONCURRENT_MARKING
CHECK(!FLAG_concurrent_marking);
#endif
+ for (int i = 0; i <= kTasks; i++) {
+ is_pending_[i] = false;
+ }
}
-ConcurrentMarking::~ConcurrentMarking() { delete visitor_; }
-
-void ConcurrentMarking::Run() {
- double time_ms = heap_->MonotonicallyIncreasingTimeInMs();
+void ConcurrentMarking::Run(int task_id, base::Mutex* lock) {
+ ConcurrentMarkingVisitor visitor(shared_, bailout_, task_id);
+ double time_ms;
size_t bytes_marked = 0;
- base::Mutex* relocation_mutex = heap_->relocation_mutex();
+ if (FLAG_trace_concurrent_marking) {
+ heap_->isolate()->PrintWithTimestamp(
+ "Starting concurrent marking task %d\n", task_id);
+ }
{
TimedScope scope(&time_ms);
- HeapObject* object;
- while ((object = deque_->Pop(MarkingThread::kConcurrent)) != nullptr) {
- base::LockGuard<base::Mutex> guard(relocation_mutex);
- bytes_marked += visitor_->Visit(object);
+ while (true) {
+ base::LockGuard<base::Mutex> guard(lock);
+ HeapObject* object;
+ if (!shared_->Pop(task_id, &object)) break;
+ Address new_space_top = heap_->new_space()->original_top();
+ Address new_space_limit = heap_->new_space()->original_limit();
+ Address addr = object->address();
+ if (new_space_top <= addr && addr < new_space_limit) {
+ bailout_->Push(task_id, object);
+ } else {
+ Map* map = object->synchronized_map();
+ bytes_marked += visitor.Visit(map, object);
+ }
+ }
+ {
+ // Take the lock to synchronize with worklist update after
+ // young generation GC.
+ base::LockGuard<base::Mutex> guard(lock);
+ bailout_->FlushToGlobal(task_id);
+ }
+ {
+ base::LockGuard<base::Mutex> guard(&pending_lock_);
+ is_pending_[task_id] = false;
+ --pending_task_count_;
+ pending_condition_.NotifyAll();
}
}
if (FLAG_trace_concurrent_marking) {
- heap_->isolate()->PrintWithTimestamp("concurrently marked %dKB in %.2fms\n",
- static_cast<int>(bytes_marked / KB),
- time_ms);
+ heap_->isolate()->PrintWithTimestamp(
+ "Task %d concurrently marked %dKB in %.2fms\n", task_id,
+ static_cast<int>(bytes_marked / KB), time_ms);
+ }
+}
+
+void ConcurrentMarking::ScheduleTasks() {
+ if (!FLAG_concurrent_marking) return;
+ base::LockGuard<base::Mutex> guard(&pending_lock_);
+ if (pending_task_count_ < kTasks) {
+ // Task id 0 is for the main thread.
+ for (int i = 1; i <= kTasks; i++) {
+ if (!is_pending_[i]) {
+ if (FLAG_trace_concurrent_marking) {
+ heap_->isolate()->PrintWithTimestamp(
+ "Scheduling concurrent marking task %d\n", i);
+ }
+ is_pending_[i] = true;
+ ++pending_task_count_;
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ new Task(heap_->isolate(), this, &task_lock_[i].lock, i),
+ v8::Platform::kShortRunningTask);
+ }
+ }
}
}
-void ConcurrentMarking::StartTask() {
+void ConcurrentMarking::RescheduleTasksIfNeeded() {
if (!FLAG_concurrent_marking) return;
- is_task_pending_ = true;
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- new Task(heap_->isolate(), this, &pending_task_semaphore_),
- v8::Platform::kShortRunningTask);
+ {
+ base::LockGuard<base::Mutex> guard(&pending_lock_);
+ if (pending_task_count_ > 0) return;
+ }
+ if (!shared_->IsGlobalPoolEmpty()) {
+ ScheduleTasks();
+ }
}
-void ConcurrentMarking::WaitForTaskToComplete() {
+void ConcurrentMarking::EnsureCompleted() {
if (!FLAG_concurrent_marking) return;
- pending_task_semaphore_.Wait();
- is_task_pending_ = false;
+ base::LockGuard<base::Mutex> guard(&pending_lock_);
+ while (pending_task_count_ > 0) {
+ pending_condition_.Wait(&pending_lock_);
+ }
}
-void ConcurrentMarking::EnsureTaskCompleted() {
- if (IsTaskPending()) {
- WaitForTaskToComplete();
+ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
+ : concurrent_marking_(concurrent_marking) {
+ if (!FLAG_concurrent_marking) return;
+ for (int i = 1; i <= kTasks; i++) {
+ concurrent_marking_->task_lock_[i].lock.Lock();
+ }
+}
+
+ConcurrentMarking::PauseScope::~PauseScope() {
+ if (!FLAG_concurrent_marking) return;
+ for (int i = kTasks; i >= 1; i--) {
+ concurrent_marking_->task_lock_[i].lock.Unlock();
}
}
diff --git a/deps/v8/src/heap/concurrent-marking.h b/deps/v8/src/heap/concurrent-marking.h
index 134fa38f64..5179fc812d 100644
--- a/deps/v8/src/heap/concurrent-marking.h
+++ b/deps/v8/src/heap/concurrent-marking.h
@@ -7,35 +7,54 @@
#include "src/allocation.h"
#include "src/cancelable-task.h"
+#include "src/heap/worklist.h"
#include "src/utils.h"
#include "src/v8.h"
namespace v8 {
namespace internal {
-class ConcurrentMarkingDeque;
-class ConcurrentMarkingVisitor;
class Heap;
class Isolate;
class ConcurrentMarking {
public:
- ConcurrentMarking(Heap* heap, ConcurrentMarkingDeque* deque_);
- ~ConcurrentMarking();
+ // When the scope is entered, the concurrent marking tasks
+ // are paused and are not looking at the heap objects.
+ class PauseScope {
+ public:
+ explicit PauseScope(ConcurrentMarking* concurrent_marking);
+ ~PauseScope();
- void StartTask();
- void WaitForTaskToComplete();
- bool IsTaskPending() { return is_task_pending_; }
- void EnsureTaskCompleted();
+ private:
+ ConcurrentMarking* concurrent_marking_;
+ };
+
+ static const int kTasks = 4;
+ using MarkingWorklist = Worklist<HeapObject*, 64 /* segment size */>;
+
+ ConcurrentMarking(Heap* heap, MarkingWorklist* shared_,
+ MarkingWorklist* bailout_);
+
+ void ScheduleTasks();
+ void EnsureCompleted();
+ void RescheduleTasksIfNeeded();
private:
+ struct TaskLock {
+ base::Mutex lock;
+ char cache_line_padding[64];
+ };
class Task;
- void Run();
+ void Run(int task_id, base::Mutex* lock);
Heap* heap_;
- base::Semaphore pending_task_semaphore_;
- ConcurrentMarkingDeque* deque_;
- ConcurrentMarkingVisitor* visitor_;
- bool is_task_pending_;
+ MarkingWorklist* shared_;
+ MarkingWorklist* bailout_;
+ TaskLock task_lock_[kTasks + 1];
+ base::Mutex pending_lock_;
+ base::ConditionVariable pending_condition_;
+ int pending_task_count_;
+ bool is_pending_[kTasks + 1];
};
} // namespace internal
diff --git a/deps/v8/src/heap/embedder-tracing.cc b/deps/v8/src/heap/embedder-tracing.cc
index 2d11724181..1d20918ef3 100644
--- a/deps/v8/src/heap/embedder-tracing.cc
+++ b/deps/v8/src/heap/embedder-tracing.cc
@@ -13,7 +13,7 @@ void LocalEmbedderHeapTracer::TracePrologue() {
if (!InUse()) return;
CHECK(cached_wrappers_to_trace_.empty());
- num_v8_marking_deque_was_empty_ = 0;
+ num_v8_marking_worklist_was_empty_ = 0;
remote_tracer_->TracePrologue();
}
diff --git a/deps/v8/src/heap/embedder-tracing.h b/deps/v8/src/heap/embedder-tracing.h
index 5e10d6e2e8..8146a1281c 100644
--- a/deps/v8/src/heap/embedder-tracing.h
+++ b/deps/v8/src/heap/embedder-tracing.h
@@ -19,7 +19,7 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
typedef std::pair<void*, void*> WrapperInfo;
LocalEmbedderHeapTracer()
- : remote_tracer_(nullptr), num_v8_marking_deque_was_empty_(0) {}
+ : remote_tracer_(nullptr), num_v8_marking_worklist_was_empty_(0) {}
void SetRemoteTracer(EmbedderHeapTracer* tracer) { remote_tracer_ = tracer; }
bool InUse() { return remote_tracer_ != nullptr; }
@@ -45,12 +45,14 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
// are too many of them.
bool RequiresImmediateWrapperProcessing();
- void NotifyV8MarkingDequeWasEmpty() { num_v8_marking_deque_was_empty_++; }
+ void NotifyV8MarkingWorklistWasEmpty() {
+ num_v8_marking_worklist_was_empty_++;
+ }
bool ShouldFinalizeIncrementalMarking() {
static const size_t kMaxIncrementalFixpointRounds = 3;
return !FLAG_incremental_marking_wrappers || !InUse() ||
NumberOfWrappersToTrace() == 0 ||
- num_v8_marking_deque_was_empty_ > kMaxIncrementalFixpointRounds;
+ num_v8_marking_worklist_was_empty_ > kMaxIncrementalFixpointRounds;
}
private:
@@ -58,7 +60,7 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
EmbedderHeapTracer* remote_tracer_;
WrapperCache cached_wrappers_to_trace_;
- size_t num_v8_marking_deque_was_empty_;
+ size_t num_v8_marking_worklist_was_empty_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/gc-idle-time-handler.cc b/deps/v8/src/heap/gc-idle-time-handler.cc
index 905514c4bf..6142a0c8e4 100644
--- a/deps/v8/src/heap/gc-idle-time-handler.cc
+++ b/deps/v8/src/heap/gc-idle-time-handler.cc
@@ -73,9 +73,11 @@ double GCIdleTimeHandler::EstimateFinalIncrementalMarkCompactTime(
}
bool GCIdleTimeHandler::ShouldDoContextDisposalMarkCompact(
- int contexts_disposed, double contexts_disposal_rate) {
+ int contexts_disposed, double contexts_disposal_rate,
+ size_t size_of_objects) {
return contexts_disposed > 0 && contexts_disposal_rate > 0 &&
- contexts_disposal_rate < kHighContextDisposalRate;
+ contexts_disposal_rate < kHighContextDisposalRate &&
+ size_of_objects <= kMaxHeapSizeForContextDisposalMarkCompact;
}
bool GCIdleTimeHandler::ShouldDoFinalIncrementalMarkCompact(
@@ -123,9 +125,9 @@ GCIdleTimeAction GCIdleTimeHandler::Compute(double idle_time_in_ms,
GCIdleTimeHeapState heap_state) {
if (static_cast<int>(idle_time_in_ms) <= 0) {
if (heap_state.incremental_marking_stopped) {
- if (ShouldDoContextDisposalMarkCompact(
- heap_state.contexts_disposed,
- heap_state.contexts_disposal_rate)) {
+ if (ShouldDoContextDisposalMarkCompact(heap_state.contexts_disposed,
+ heap_state.contexts_disposal_rate,
+ heap_state.size_of_objects)) {
return GCIdleTimeAction::FullGC();
}
}
@@ -135,7 +137,8 @@ GCIdleTimeAction GCIdleTimeHandler::Compute(double idle_time_in_ms,
// We are in a context disposal GC scenario. Don't do anything if we do not
// get the right idle signal.
if (ShouldDoContextDisposalMarkCompact(heap_state.contexts_disposed,
- heap_state.contexts_disposal_rate)) {
+ heap_state.contexts_disposal_rate,
+ heap_state.size_of_objects)) {
return NothingOrDone(idle_time_in_ms);
}
diff --git a/deps/v8/src/heap/gc-idle-time-handler.h b/deps/v8/src/heap/gc-idle-time-handler.h
index b730a7bbba..722710e11a 100644
--- a/deps/v8/src/heap/gc-idle-time-handler.h
+++ b/deps/v8/src/heap/gc-idle-time-handler.h
@@ -107,6 +107,8 @@ class V8_EXPORT_PRIVATE GCIdleTimeHandler {
// considered low
static const size_t kLowAllocationThroughput = 1000;
+ static const size_t kMaxHeapSizeForContextDisposalMarkCompact = 100 * MB;
+
// If contexts are disposed at a higher rate a full gc is triggered.
static const double kHighContextDisposalRate;
@@ -136,7 +138,8 @@ class V8_EXPORT_PRIVATE GCIdleTimeHandler {
size_t size_of_objects, double mark_compact_speed_in_bytes_per_ms);
static bool ShouldDoContextDisposalMarkCompact(int context_disposed,
- double contexts_disposal_rate);
+ double contexts_disposal_rate,
+ size_t size_of_objects);
static bool ShouldDoFinalIncrementalMarkCompact(
double idle_time_in_ms, size_t size_of_objects,
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index 46d5bb66ee..d675492a3a 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -464,7 +464,6 @@ void GCTracer::PrintNVP() const {
"old_new=%.2f "
"weak=%.2f "
"roots=%.2f "
- "code=%.2f "
"semispace=%.2f "
"steps_count=%d "
"steps_took=%.1f "
@@ -504,7 +503,6 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::SCAVENGER_OLD_TO_NEW_POINTERS],
current_.scopes[Scope::SCAVENGER_WEAK],
current_.scopes[Scope::SCAVENGER_ROOTS],
- current_.scopes[Scope::SCAVENGER_CODE_FLUSH_CANDIDATES],
current_.scopes[Scope::SCAVENGER_SEMISPACE],
current_.incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL]
.steps,
@@ -530,7 +528,6 @@ void GCTracer::PrintNVP() const {
"minor_mc=%.2f "
"finish_sweeping=%.2f "
"mark=%.2f "
- "mark.identify_global_handles=%.2f "
"mark.seed=%.2f "
"mark.roots=%.2f "
"mark.weak=%.2f "
@@ -541,17 +538,14 @@ void GCTracer::PrintNVP() const {
"evacuate=%.2f "
"evacuate.copy=%.2f "
"evacuate.update_pointers=%.2f "
- "evacuate.update_pointers.to_new=%.2f "
- "evacuate.update_pointers.to_new.tospace=%.2f "
- "evacuate.update_pointers.to_new.roots=%.2f "
- "evacuate.update_pointers.to_new.old=%.2f "
+ "evacuate.update_pointers.to_new_roots=%.2f "
+ "evacuate.update_pointers.slots=%.2f "
"update_marking_deque=%.2f "
"reset_liveness=%.2f\n",
duration, spent_in_mutator, "mmc", current_.reduce_memory,
current_.scopes[Scope::MINOR_MC],
current_.scopes[Scope::MINOR_MC_SWEEPING],
current_.scopes[Scope::MINOR_MC_MARK],
- current_.scopes[Scope::MINOR_MC_MARK_IDENTIFY_GLOBAL_HANDLES],
current_.scopes[Scope::MINOR_MC_MARK_SEED],
current_.scopes[Scope::MINOR_MC_MARK_ROOTS],
current_.scopes[Scope::MINOR_MC_MARK_WEAK],
@@ -562,12 +556,9 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MINOR_MC_EVACUATE],
current_.scopes[Scope::MINOR_MC_EVACUATE_COPY],
current_.scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS],
- current_.scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW],
- current_
- .scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_TOSPACE],
current_
.scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS],
- current_.scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_OLD],
+ current_.scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS],
current_.scopes[Scope::MINOR_MC_MARKING_DEQUE],
current_.scopes[Scope::MINOR_MC_RESET_LIVENESS]);
break;
@@ -585,7 +576,6 @@ void GCTracer::PrintNVP() const {
"heap.external.epilogue=%.1f "
"heap.external.weak_global_handles=%.1f "
"clear=%1.f "
- "clear.code_flush=%.1f "
"clear.dependent_code=%.1f "
"clear.maps=%.1f "
"clear.slots_buffer=%.1f "
@@ -603,13 +593,12 @@ void GCTracer::PrintNVP() const {
"evacuate.epilogue=%.1f "
"evacuate.rebalance=%.1f "
"evacuate.update_pointers=%.1f "
- "evacuate.update_pointers.to_evacuated=%.1f "
- "evacuate.update_pointers.to_new=%.1f "
+ "evacuate.update_pointers.to_new_roots=%.1f "
+ "evacuate.update_pointers.slots=%.1f "
"evacuate.update_pointers.weak=%.1f "
"finish=%.1f "
"mark=%.1f "
"mark.finish_incremental=%.1f "
- "mark.prepare_code_flush=%.1f "
"mark.roots=%.1f "
"mark.weak_closure=%.1f "
"mark.weak_closure.ephemeral=%.1f "
@@ -671,7 +660,6 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::HEAP_EXTERNAL_EPILOGUE],
current_.scopes[Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES],
current_.scopes[Scope::MC_CLEAR],
- current_.scopes[Scope::MC_CLEAR_CODE_FLUSH],
current_.scopes[Scope::MC_CLEAR_DEPENDENT_CODE],
current_.scopes[Scope::MC_CLEAR_MAPS],
current_.scopes[Scope::MC_CLEAR_SLOTS_BUFFER],
@@ -689,12 +677,11 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_EVACUATE_EPILOGUE],
current_.scopes[Scope::MC_EVACUATE_REBALANCE],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS],
- current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED],
- current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW],
+ current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS],
+ current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK],
current_.scopes[Scope::MC_FINISH], current_.scopes[Scope::MC_MARK],
current_.scopes[Scope::MC_MARK_FINISH_INCREMENTAL],
- current_.scopes[Scope::MC_MARK_PREPARE_CODE_FLUSH],
current_.scopes[Scope::MC_MARK_ROOTS],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_EPHEMERAL],
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index 96b21c6712..6e3e875b94 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -10,6 +10,7 @@
#include "src/base/ring-buffer.h"
#include "src/counters.h"
#include "src/globals.h"
+#include "src/heap/heap.h"
#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
namespace v8 {
@@ -34,85 +35,79 @@ enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
F(MC_INCREMENTAL_EXTERNAL_EPILOGUE) \
F(MC_INCREMENTAL_EXTERNAL_PROLOGUE)
-#define TRACER_SCOPES(F) \
- INCREMENTAL_SCOPES(F) \
- F(HEAP_EPILOGUE) \
- F(HEAP_EPILOGUE_REDUCE_NEW_SPACE) \
- F(HEAP_EXTERNAL_EPILOGUE) \
- F(HEAP_EXTERNAL_PROLOGUE) \
- F(HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES) \
- F(HEAP_PROLOGUE) \
- F(MC_CLEAR) \
- F(MC_CLEAR_CODE_FLUSH) \
- F(MC_CLEAR_DEPENDENT_CODE) \
- F(MC_CLEAR_MAPS) \
- F(MC_CLEAR_SLOTS_BUFFER) \
- F(MC_CLEAR_STORE_BUFFER) \
- F(MC_CLEAR_STRING_TABLE) \
- F(MC_CLEAR_WEAK_CELLS) \
- F(MC_CLEAR_WEAK_COLLECTIONS) \
- F(MC_CLEAR_WEAK_LISTS) \
- F(MC_EPILOGUE) \
- F(MC_EVACUATE) \
- F(MC_EVACUATE_CANDIDATES) \
- F(MC_EVACUATE_CLEAN_UP) \
- F(MC_EVACUATE_COPY) \
- F(MC_EVACUATE_EPILOGUE) \
- F(MC_EVACUATE_PROLOGUE) \
- F(MC_EVACUATE_REBALANCE) \
- F(MC_EVACUATE_UPDATE_POINTERS) \
- F(MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED) \
- F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW) \
- F(MC_EVACUATE_UPDATE_POINTERS_WEAK) \
- F(MC_FINISH) \
- F(MC_MARK) \
- F(MC_MARK_FINISH_INCREMENTAL) \
- F(MC_MARK_PREPARE_CODE_FLUSH) \
- F(MC_MARK_ROOTS) \
- F(MC_MARK_WEAK_CLOSURE) \
- F(MC_MARK_WEAK_CLOSURE_EPHEMERAL) \
- F(MC_MARK_WEAK_CLOSURE_WEAK_HANDLES) \
- F(MC_MARK_WEAK_CLOSURE_WEAK_ROOTS) \
- F(MC_MARK_WEAK_CLOSURE_HARMONY) \
- F(MC_MARK_WRAPPER_EPILOGUE) \
- F(MC_MARK_WRAPPER_PROLOGUE) \
- F(MC_MARK_WRAPPER_TRACING) \
- F(MC_PROLOGUE) \
- F(MC_SWEEP) \
- F(MC_SWEEP_CODE) \
- F(MC_SWEEP_MAP) \
- F(MC_SWEEP_OLD) \
- F(MINOR_MC) \
- F(MINOR_MC_CLEAR) \
- F(MINOR_MC_CLEAR_STRING_TABLE) \
- F(MINOR_MC_CLEAR_WEAK_LISTS) \
- F(MINOR_MC_EVACUATE) \
- F(MINOR_MC_EVACUATE_CLEAN_UP) \
- F(MINOR_MC_EVACUATE_COPY) \
- F(MINOR_MC_EVACUATE_EPILOGUE) \
- F(MINOR_MC_EVACUATE_PROLOGUE) \
- F(MINOR_MC_EVACUATE_REBALANCE) \
- F(MINOR_MC_EVACUATE_UPDATE_POINTERS) \
- F(MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW) \
- F(MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS) \
- F(MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_TOSPACE) \
- F(MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_OLD) \
- F(MINOR_MC_EVACUATE_UPDATE_POINTERS_WEAK) \
- F(MINOR_MC_MARK) \
- F(MINOR_MC_MARK_GLOBAL_HANDLES) \
- F(MINOR_MC_MARK_IDENTIFY_GLOBAL_HANDLES) \
- F(MINOR_MC_MARK_SEED) \
- F(MINOR_MC_MARK_ROOTS) \
- F(MINOR_MC_MARK_WEAK) \
- F(MINOR_MC_MARKING_DEQUE) \
- F(MINOR_MC_RESET_LIVENESS) \
- F(MINOR_MC_SWEEPING) \
- F(SCAVENGER_CODE_FLUSH_CANDIDATES) \
- F(SCAVENGER_EVACUATE) \
- F(SCAVENGER_OLD_TO_NEW_POINTERS) \
- F(SCAVENGER_ROOTS) \
- F(SCAVENGER_SCAVENGE) \
- F(SCAVENGER_SEMISPACE) \
+#define TRACER_SCOPES(F) \
+ INCREMENTAL_SCOPES(F) \
+ F(HEAP_EPILOGUE) \
+ F(HEAP_EPILOGUE_REDUCE_NEW_SPACE) \
+ F(HEAP_EXTERNAL_EPILOGUE) \
+ F(HEAP_EXTERNAL_PROLOGUE) \
+ F(HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES) \
+ F(HEAP_PROLOGUE) \
+ F(MC_CLEAR) \
+ F(MC_CLEAR_DEPENDENT_CODE) \
+ F(MC_CLEAR_MAPS) \
+ F(MC_CLEAR_SLOTS_BUFFER) \
+ F(MC_CLEAR_STORE_BUFFER) \
+ F(MC_CLEAR_STRING_TABLE) \
+ F(MC_CLEAR_WEAK_CELLS) \
+ F(MC_CLEAR_WEAK_COLLECTIONS) \
+ F(MC_CLEAR_WEAK_LISTS) \
+ F(MC_EPILOGUE) \
+ F(MC_EVACUATE) \
+ F(MC_EVACUATE_CANDIDATES) \
+ F(MC_EVACUATE_CLEAN_UP) \
+ F(MC_EVACUATE_COPY) \
+ F(MC_EVACUATE_EPILOGUE) \
+ F(MC_EVACUATE_PROLOGUE) \
+ F(MC_EVACUATE_REBALANCE) \
+ F(MC_EVACUATE_UPDATE_POINTERS) \
+ F(MC_EVACUATE_UPDATE_POINTERS_SLOTS) \
+ F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS) \
+ F(MC_EVACUATE_UPDATE_POINTERS_WEAK) \
+ F(MC_FINISH) \
+ F(MC_MARK) \
+ F(MC_MARK_FINISH_INCREMENTAL) \
+ F(MC_MARK_ROOTS) \
+ F(MC_MARK_WEAK_CLOSURE) \
+ F(MC_MARK_WEAK_CLOSURE_EPHEMERAL) \
+ F(MC_MARK_WEAK_CLOSURE_WEAK_HANDLES) \
+ F(MC_MARK_WEAK_CLOSURE_WEAK_ROOTS) \
+ F(MC_MARK_WEAK_CLOSURE_HARMONY) \
+ F(MC_MARK_WRAPPER_EPILOGUE) \
+ F(MC_MARK_WRAPPER_PROLOGUE) \
+ F(MC_MARK_WRAPPER_TRACING) \
+ F(MC_PROLOGUE) \
+ F(MC_SWEEP) \
+ F(MC_SWEEP_CODE) \
+ F(MC_SWEEP_MAP) \
+ F(MC_SWEEP_OLD) \
+ F(MINOR_MC) \
+ F(MINOR_MC_CLEAR) \
+ F(MINOR_MC_CLEAR_STRING_TABLE) \
+ F(MINOR_MC_CLEAR_WEAK_LISTS) \
+ F(MINOR_MC_EVACUATE) \
+ F(MINOR_MC_EVACUATE_CLEAN_UP) \
+ F(MINOR_MC_EVACUATE_COPY) \
+ F(MINOR_MC_EVACUATE_EPILOGUE) \
+ F(MINOR_MC_EVACUATE_PROLOGUE) \
+ F(MINOR_MC_EVACUATE_REBALANCE) \
+ F(MINOR_MC_EVACUATE_UPDATE_POINTERS) \
+ F(MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS) \
+ F(MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS) \
+ F(MINOR_MC_EVACUATE_UPDATE_POINTERS_WEAK) \
+ F(MINOR_MC_MARK) \
+ F(MINOR_MC_MARK_GLOBAL_HANDLES) \
+ F(MINOR_MC_MARK_SEED) \
+ F(MINOR_MC_MARK_ROOTS) \
+ F(MINOR_MC_MARK_WEAK) \
+ F(MINOR_MC_MARKING_DEQUE) \
+ F(MINOR_MC_RESET_LIVENESS) \
+ F(MINOR_MC_SWEEPING) \
+ F(SCAVENGER_EVACUATE) \
+ F(SCAVENGER_OLD_TO_NEW_POINTERS) \
+ F(SCAVENGER_ROOTS) \
+ F(SCAVENGER_SCAVENGE) \
+ F(SCAVENGER_SEMISPACE) \
F(SCAVENGER_WEAK)
#define TRACE_GC(tracer, scope_id) \
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index 87aac8731d..33e31b02b8 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -23,6 +23,8 @@
#include "src/msan.h"
#include "src/objects-inl.h"
#include "src/objects/scope-info.h"
+#include "src/objects/script-inl.h"
+#include "src/profiler/heap-profiler.h"
#include "src/string-hasher.h"
namespace v8 {
@@ -30,7 +32,7 @@ namespace internal {
AllocationSpace AllocationResult::RetrySpace() {
DCHECK(IsRetry());
- return static_cast<AllocationSpace>(Smi::cast(object_)->value());
+ return static_cast<AllocationSpace>(Smi::ToInt(object_));
}
HeapObject* AllocationResult::ToObjectChecked() {
@@ -38,83 +40,6 @@ HeapObject* AllocationResult::ToObjectChecked() {
return HeapObject::cast(object_);
}
-void PromotionQueue::insert(HeapObject* target, int32_t size) {
- if (emergency_stack_ != NULL) {
- emergency_stack_->Add(Entry(target, size));
- return;
- }
-
- if ((rear_ - 1) < limit_) {
- RelocateQueueHead();
- emergency_stack_->Add(Entry(target, size));
- return;
- }
-
- struct Entry* entry = reinterpret_cast<struct Entry*>(--rear_);
- entry->obj_ = target;
- entry->size_ = size;
-
-// Assert no overflow into live objects.
-#ifdef DEBUG
- SemiSpace::AssertValidRange(target->GetIsolate()->heap()->new_space()->top(),
- reinterpret_cast<Address>(rear_));
-#endif
-}
-
-void PromotionQueue::remove(HeapObject** target, int32_t* size) {
- DCHECK(!is_empty());
- if (front_ == rear_) {
- Entry e = emergency_stack_->RemoveLast();
- *target = e.obj_;
- *size = e.size_;
- return;
- }
-
- struct Entry* entry = reinterpret_cast<struct Entry*>(--front_);
- *target = entry->obj_;
- *size = entry->size_;
-
- // Assert no underflow.
- SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_),
- reinterpret_cast<Address>(front_));
-}
-
-Page* PromotionQueue::GetHeadPage() {
- return Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_));
-}
-
-void PromotionQueue::SetNewLimit(Address limit) {
- // If we are already using an emergency stack, we can ignore it.
- if (emergency_stack_) return;
-
- // If the limit is not on the same page, we can ignore it.
- if (Page::FromAllocationAreaAddress(limit) != GetHeadPage()) return;
-
- limit_ = reinterpret_cast<struct Entry*>(limit);
-
- if (limit_ <= rear_) {
- return;
- }
-
- RelocateQueueHead();
-}
-
-bool PromotionQueue::IsBelowPromotionQueue(Address to_space_top) {
- // If an emergency stack is used, the to-space address cannot interfere
- // with the promotion queue.
- if (emergency_stack_) return true;
-
- // If the given to-space top pointer and the head of the promotion queue
- // are not on the same page, then the to-space objects are below the
- // promotion queue.
- if (GetHeadPage() != Page::FromAddress(to_space_top)) {
- return true;
- }
- // If the to space top pointer is smaller or equal than the promotion
- // queue head, then the to-space objects are below the promotion queue.
- return reinterpret_cast<struct Entry*>(to_space_top) <= rear_;
-}
-
#define ROOT_ACCESSOR(type, name, camel_name) \
type* Heap::name() { return type::cast(roots_[k##camel_name##RootIndex]); }
ROOT_LIST(ROOT_ACCESSOR)
@@ -292,6 +217,9 @@ AllocationResult Heap::CopyFixedDoubleArray(FixedDoubleArray* src) {
return CopyFixedDoubleArrayWithMap(src, src->map());
}
+AllocationResult Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
+ return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
+}
AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
AllocationAlignment alignment) {
@@ -367,7 +295,7 @@ void Heap::OnAllocationEvent(HeapObject* object, int size_in_bytes) {
UpdateAllocationsHash(size_in_bytes);
if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
- PrintAlloctionsHash();
+ PrintAllocationsHash();
}
}
@@ -402,7 +330,7 @@ void Heap::OnMoveEvent(HeapObject* target, HeapObject* source,
UpdateAllocationsHash(size_in_bytes);
if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
- PrintAlloctionsHash();
+ PrintAllocationsHash();
}
}
}
@@ -491,7 +419,7 @@ bool Heap::InOldSpaceSlow(Address address) {
return old_space_->ContainsSlow(address);
}
-bool Heap::ShouldBePromoted(Address old_address, int object_size) {
+bool Heap::ShouldBePromoted(Address old_address) {
Page* page = Page::FromAddress(old_address);
Address age_mark = new_space_->age_mark();
return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
@@ -524,48 +452,15 @@ Address* Heap::store_buffer_top_address() {
return store_buffer()->top_address();
}
-bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
- // Object migration is governed by the following rules:
- //
- // 1) Objects in new-space can be migrated to the old space
- // that matches their target space or they stay in new-space.
- // 2) Objects in old-space stay in the same space when migrating.
- // 3) Fillers (two or more words) can migrate due to left-trimming of
- // fixed arrays in new-space or old space.
- // 4) Fillers (one word) can never migrate, they are skipped by
- // incremental marking explicitly to prevent invalid pattern.
- //
- // Since this function is used for debugging only, we do not place
- // asserts here, but check everything explicitly.
- if (obj->map() == one_pointer_filler_map()) return false;
- InstanceType type = obj->map()->instance_type();
- MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
- AllocationSpace src = chunk->owner()->identity();
- switch (src) {
- case NEW_SPACE:
- return dst == src || dst == OLD_SPACE;
- case OLD_SPACE:
- return dst == src &&
- (dst == OLD_SPACE || obj->IsFiller() || obj->IsExternalString());
- case CODE_SPACE:
- return dst == src && type == CODE_TYPE;
- case MAP_SPACE:
- case LO_SPACE:
- return false;
- }
- UNREACHABLE();
- return false;
-}
-
void Heap::CopyBlock(Address dst, Address src, int byte_size) {
CopyWords(reinterpret_cast<Object**>(dst), reinterpret_cast<Object**>(src),
static_cast<size_t>(byte_size / kPointerSize));
}
template <Heap::FindMementoMode mode>
-AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
+AllocationMemento* Heap::FindAllocationMemento(Map* map, HeapObject* object) {
Address object_address = object->address();
- Address memento_address = object_address + object->Size();
+ Address memento_address = object_address + object->SizeFromMap(map);
Address last_memento_word_address = memento_address + kPointerSize;
// If the memento would be on another page, bail out immediately.
if (!Page::OnSamePage(object_address, last_memento_word_address)) {
@@ -621,11 +516,10 @@ AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
UNREACHABLE();
}
UNREACHABLE();
- return nullptr;
}
template <Heap::UpdateAllocationSiteMode mode>
-void Heap::UpdateAllocationSite(HeapObject* object,
+void Heap::UpdateAllocationSite(Map* map, HeapObject* object,
base::HashMap* pretenuring_feedback) {
DCHECK(InFromSpace(object) ||
(InToSpace(object) &&
@@ -635,9 +529,10 @@ void Heap::UpdateAllocationSite(HeapObject* object,
Page::FromAddress(object->address())
->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)));
if (!FLAG_allocation_site_pretenuring ||
- !AllocationSite::CanTrack(object->map()->instance_type()))
+ !AllocationSite::CanTrack(map->instance_type()))
return;
- AllocationMemento* memento_candidate = FindAllocationMemento<kForGC>(object);
+ AllocationMemento* memento_candidate =
+ FindAllocationMemento<kForGC>(map, object);
if (memento_candidate == nullptr) return;
if (mode == kGlobal) {
@@ -673,15 +568,6 @@ void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite* site) {
site, static_cast<uint32_t>(bit_cast<uintptr_t>(site)));
}
-bool Heap::CollectGarbage(AllocationSpace space,
- GarbageCollectionReason gc_reason,
- const v8::GCCallbackFlags callbackFlags) {
- const char* collector_reason = NULL;
- GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
- return CollectGarbage(collector, gc_reason, collector_reason, callbackFlags);
-}
-
-
Isolate* Heap::isolate() {
return reinterpret_cast<Isolate*>(
reinterpret_cast<intptr_t>(this) -
@@ -754,19 +640,10 @@ void Heap::ExternalStringTable::ShrinkNewStrings(int position) {
#endif
}
-void Heap::ClearInstanceofCache() { set_instanceof_cache_function(Smi::kZero); }
-
Oddball* Heap::ToBoolean(bool condition) {
return condition ? true_value() : false_value();
}
-
-void Heap::CompletelyClearInstanceofCache() {
- set_instanceof_cache_map(Smi::kZero);
- set_instanceof_cache_function(Smi::kZero);
-}
-
-
uint32_t Heap::HashSeed() {
uint32_t seed = static_cast<uint32_t>(hash_seed()->value());
DCHECK(FLAG_randomize_hashes || seed == 0);
@@ -858,34 +735,6 @@ AlwaysAllocateScope::~AlwaysAllocateScope() {
heap_->always_allocate_scope_count_.Decrement(1);
}
-void VerifyPointersVisitor::VisitPointers(HeapObject* host, Object** start,
- Object** end) {
- VerifyPointers(start, end);
-}
-
-void VerifyPointersVisitor::VisitRootPointers(Root root, Object** start,
- Object** end) {
- VerifyPointers(start, end);
-}
-
-void VerifyPointersVisitor::VerifyPointers(Object** start, Object** end) {
- for (Object** current = start; current < end; current++) {
- if ((*current)->IsHeapObject()) {
- HeapObject* object = HeapObject::cast(*current);
- CHECK(object->GetIsolate()->heap()->Contains(object));
- CHECK(object->map()->IsMap());
- } else {
- CHECK((*current)->IsSmi());
- }
- }
-}
-
-void VerifySmisVisitor::VisitRootPointers(Root root, Object** start,
- Object** end) {
- for (Object** current = start; current < end; current++) {
- CHECK((*current)->IsSmi());
- }
-}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index fa47dc825b..20b20024c3 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -4,6 +4,9 @@
#include "src/heap/heap.h"
+#include <unordered_map>
+#include <unordered_set>
+
#include "src/accessors.h"
#include "src/api.h"
#include "src/assembler-inl.h"
@@ -38,6 +41,8 @@
#include "src/heap/scavenger-inl.h"
#include "src/heap/store-buffer.h"
#include "src/interpreter/interpreter.h"
+#include "src/objects/object-macros.h"
+#include "src/objects/shared-function-info.h"
#include "src/regexp/jsregexp.h"
#include "src/runtime-profiler.h"
#include "src/snapshot/natives.h"
@@ -52,7 +57,6 @@
namespace v8 {
namespace internal {
-
struct Heap::StrongRootsList {
Object** start;
Object** end;
@@ -81,7 +85,7 @@ Heap::Heap()
// semispace_size_ should be a power of 2 and old_generation_size_ should
// be a multiple of Page::kPageSize.
max_semi_space_size_(8 * (kPointerSize / 4) * MB),
- initial_semispace_size_(MB),
+ initial_semispace_size_(kMinSemiSpaceSizeInKB * KB),
max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
initial_max_old_generation_size_(max_old_generation_size_),
initial_old_generation_size_(max_old_generation_size_ /
@@ -112,6 +116,7 @@ Heap::Heap()
raw_allocations_hash_(0),
ms_count_(0),
gc_count_(0),
+ mmap_region_base_(0),
remembered_unmapped_pages_index_(0),
#ifdef DEBUG
allocation_timeout_(0),
@@ -130,7 +135,6 @@ Heap::Heap()
maximum_size_scavenges_(0),
last_idle_notification_time_(0.0),
last_gc_time_(0.0),
- scavenge_collector_(nullptr),
mark_compact_collector_(nullptr),
minor_mark_compact_collector_(nullptr),
memory_allocator_(nullptr),
@@ -146,11 +150,9 @@ Heap::Heap()
new_space_allocation_counter_(0),
old_generation_allocation_counter_at_last_gc_(0),
old_generation_size_at_last_gc_(0),
- gcs_since_last_deopt_(0),
global_pretenuring_feedback_(nullptr),
ring_buffer_full_(false),
ring_buffer_end_(0),
- promotion_queue_(this),
configured_(false),
current_gc_flags_(Heap::kNoGCFlags),
current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags),
@@ -161,17 +163,9 @@ Heap::Heap()
heap_iterator_depth_(0),
local_embedder_heap_tracer_(nullptr),
fast_promotion_mode_(false),
- use_tasks_(true),
force_oom_(false),
delay_sweeper_tasks_for_testing_(false),
pending_layout_change_object_(nullptr) {
-// Allow build-time customization of the max semispace size. Building
-// V8 with snapshots and a non-default max semispace size is much
-// easier if you can define it as part of the build environment.
-#if defined(V8_MAX_SEMISPACE_SIZE)
- max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
-#endif
-
// Ensure old_generation_size_ is a multiple of kPageSize.
DCHECK((max_old_generation_size_ & (Page::kPageSize - 1)) == 0);
@@ -378,6 +372,8 @@ void Heap::PrintShortHeapStatistics() {
this->CommittedMemory() / KB);
PrintIsolate(isolate_, "External memory reported: %6" PRId64 " KB\n",
external_memory_ / KB);
+ PrintIsolate(isolate_, "External memory global %zu KB\n",
+ external_memory_callback_() / KB);
PrintIsolate(isolate_, "Total time spent in GC : %.1f ms\n",
total_gc_time_ms_);
}
@@ -556,6 +552,65 @@ class Heap::PretenuringScope {
Heap* heap_;
};
+namespace {
+inline bool MakePretenureDecision(
+ AllocationSite* site, AllocationSite::PretenureDecision current_decision,
+ double ratio, bool maximum_size_scavenge) {
+ // Here we just allow state transitions from undecided or maybe tenure
+ // to don't tenure, maybe tenure, or tenure.
+ if ((current_decision == AllocationSite::kUndecided ||
+ current_decision == AllocationSite::kMaybeTenure)) {
+ if (ratio >= AllocationSite::kPretenureRatio) {
+ // We just transition into tenure state when the semi-space was at
+ // maximum capacity.
+ if (maximum_size_scavenge) {
+ site->set_deopt_dependent_code(true);
+ site->set_pretenure_decision(AllocationSite::kTenure);
+ // Currently we just need to deopt when we make a state transition to
+ // tenure.
+ return true;
+ }
+ site->set_pretenure_decision(AllocationSite::kMaybeTenure);
+ } else {
+ site->set_pretenure_decision(AllocationSite::kDontTenure);
+ }
+ }
+ return false;
+}
+
+inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite* site,
+ bool maximum_size_scavenge) {
+ bool deopt = false;
+ int create_count = site->memento_create_count();
+ int found_count = site->memento_found_count();
+ bool minimum_mementos_created =
+ create_count >= AllocationSite::kPretenureMinimumCreated;
+ double ratio = minimum_mementos_created || FLAG_trace_pretenuring_statistics
+ ? static_cast<double>(found_count) / create_count
+ : 0.0;
+ AllocationSite::PretenureDecision current_decision =
+ site->pretenure_decision();
+
+ if (minimum_mementos_created) {
+ deopt = MakePretenureDecision(site, current_decision, ratio,
+ maximum_size_scavenge);
+ }
+
+ if (FLAG_trace_pretenuring_statistics) {
+ PrintIsolate(isolate,
+ "pretenuring: AllocationSite(%p): (created, found, ratio) "
+ "(%d, %d, %f) %s => %s\n",
+ static_cast<void*>(site), create_count, found_count, ratio,
+ site->PretenureDecisionName(current_decision),
+ site->PretenureDecisionName(site->pretenure_decision()));
+ }
+
+ // Clear feedback calculation fields until the next gc.
+ site->set_memento_found_count(0);
+ site->set_memento_create_count(0);
+ return deopt;
+}
+} // namespace
void Heap::ProcessPretenuringFeedback() {
bool trigger_deoptimization = false;
@@ -582,7 +637,7 @@ void Heap::ProcessPretenuringFeedback() {
DCHECK(site->IsAllocationSite());
active_allocation_sites++;
allocation_mementos_found += found_count;
- if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
+ if (DigestPretenuringFeedback(isolate_, site, maximum_size_scavenge)) {
trigger_deoptimization = true;
}
if (site->GetPretenureMode() == TENURED) {
@@ -667,15 +722,6 @@ void Heap::GarbageCollectionEpilogue() {
if (FLAG_code_stats) ReportCodeStatistics("After GC");
if (FLAG_check_handle_count) CheckHandleCount();
#endif
- if (FLAG_deopt_every_n_garbage_collections > 0) {
- // TODO(jkummerow/ulan/jarin): This is not safe! We can't assume that
- // the topmost optimized frame can be deoptimized safely, because it
- // might not have a lazy bailout point right after its current PC.
- if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
- Deoptimizer::DeoptimizeAll(isolate());
- gcs_since_last_deopt_ = 0;
- }
- }
UpdateMaximumCommitted();
@@ -770,7 +816,7 @@ void Heap::PreprocessStackTraces() {
// a stack trace that has already been preprocessed. Guard against this.
if (!maybe_code->IsAbstractCode()) break;
AbstractCode* abstract_code = AbstractCode::cast(maybe_code);
- int offset = Smi::cast(elements->get(j + 3))->value();
+ int offset = Smi::ToInt(elements->get(j + 3));
int pos = abstract_code->SourcePosition(offset);
elements->set(j + 2, Smi::FromInt(pos));
}
@@ -911,7 +957,7 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
const int kMaxNumberOfAttempts = 7;
const int kMinNumberOfAttempts = 2;
for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
- if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL,
+ if (!CollectGarbage(OLD_SPACE, gc_reason,
v8::kGCCallbackFlagCollectAllAvailableGarbage) &&
attempt + 1 >= kMinNumberOfAttempts) {
break;
@@ -923,45 +969,47 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
}
void Heap::ReportExternalMemoryPressure() {
+ const GCCallbackFlags kGCCallbackFlagsForExternalMemory =
+ static_cast<GCCallbackFlags>(
+ kGCCallbackFlagSynchronousPhantomCallbackProcessing |
+ kGCCallbackFlagCollectAllExternalMemory);
if (external_memory_ >
(external_memory_at_last_mark_compact_ + external_memory_hard_limit())) {
CollectAllGarbage(
kReduceMemoryFootprintMask | kFinalizeIncrementalMarkingMask,
GarbageCollectionReason::kExternalMemoryPressure,
static_cast<GCCallbackFlags>(kGCCallbackFlagCollectAllAvailableGarbage |
- kGCCallbackFlagCollectAllExternalMemory));
+ kGCCallbackFlagsForExternalMemory));
return;
}
if (incremental_marking()->IsStopped()) {
if (incremental_marking()->CanBeActivated()) {
- StartIncrementalMarking(
- i::Heap::kNoGCFlags, GarbageCollectionReason::kExternalMemoryPressure,
- static_cast<GCCallbackFlags>(
- kGCCallbackFlagSynchronousPhantomCallbackProcessing |
- kGCCallbackFlagCollectAllExternalMemory));
+ StartIncrementalMarking(i::Heap::kNoGCFlags,
+ GarbageCollectionReason::kExternalMemoryPressure,
+ kGCCallbackFlagsForExternalMemory);
} else {
CollectAllGarbage(i::Heap::kNoGCFlags,
GarbageCollectionReason::kExternalMemoryPressure,
- kGCCallbackFlagSynchronousPhantomCallbackProcessing);
+ kGCCallbackFlagsForExternalMemory);
}
} else {
// Incremental marking is turned on an has already been started.
- const double pressure =
- static_cast<double>(external_memory_ -
- external_memory_at_last_mark_compact_ -
- kExternalAllocationSoftLimit) /
- external_memory_hard_limit();
- DCHECK_GE(1, pressure);
- const double kMaxStepSizeOnExternalLimit = 25;
- const double deadline = MonotonicallyIncreasingTimeInMs() +
- pressure * kMaxStepSizeOnExternalLimit;
+ const double kMinStepSize = 5;
+ const double kMaxStepSize = 10;
+ const double ms_step =
+ Min(kMaxStepSize,
+ Max(kMinStepSize, static_cast<double>(external_memory_) /
+ external_memory_limit_ * kMinStepSize));
+ const double deadline = MonotonicallyIncreasingTimeInMs() + ms_step;
+ // Extend the gc callback flags with external memory flags.
+ current_gc_callback_flags_ = static_cast<GCCallbackFlags>(
+ current_gc_callback_flags_ | kGCCallbackFlagsForExternalMemory);
incremental_marking()->AdvanceIncrementalMarking(
deadline, IncrementalMarking::GC_VIA_STACK_GUARD,
IncrementalMarking::FORCE_COMPLETION, StepOrigin::kV8);
}
}
-
void Heap::EnsureFillerObjectAtTop() {
// There may be an allocation memento behind objects in new space. Upon
// evacuation of a non-full new space (or if we are on the last page) there
@@ -975,14 +1023,16 @@ void Heap::EnsureFillerObjectAtTop() {
}
}
-bool Heap::CollectGarbage(GarbageCollector collector,
+bool Heap::CollectGarbage(AllocationSpace space,
GarbageCollectionReason gc_reason,
- const char* collector_reason,
const v8::GCCallbackFlags gc_callback_flags) {
// The VM is in the GC state until exiting this function.
VMState<GC> state(isolate());
RuntimeCallTimerScope runtime_timer(isolate(), &RuntimeCallStats::GC);
+ const char* collector_reason = NULL;
+ GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
+
#ifdef DEBUG
// Reset the allocation timeout to the GC interval, but make sure to
// allow at least a few allocations after a collection. The reason
@@ -1065,8 +1115,8 @@ bool Heap::CollectGarbage(GarbageCollector collector,
// causes another mark-compact.
if (IsYoungGenerationCollector(collector) &&
!ShouldAbortIncrementalMarking()) {
- StartIncrementalMarkingIfAllocationLimitIsReached(kNoGCFlags,
- kNoGCCallbackFlags);
+ StartIncrementalMarkingIfAllocationLimitIsReached(
+ kNoGCFlags, kGCCallbackScheduleIdleGarbageCollection);
}
return next_gc_likely_to_collect_more;
@@ -1130,8 +1180,23 @@ void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
if (len == 0) return;
DCHECK(array->map() != fixed_cow_array_map());
- Object** dst_objects = array->data_start() + dst_index;
- MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize);
+ Object** dst = array->data_start() + dst_index;
+ Object** src = array->data_start() + src_index;
+ if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) {
+ if (dst < src) {
+ for (int i = 0; i < len; i++) {
+ base::AsAtomicWord::Relaxed_Store(
+ dst + i, base::AsAtomicWord::Relaxed_Load(src + i));
+ }
+ } else {
+ for (int i = len - 1; i >= 0; i--) {
+ base::AsAtomicWord::Relaxed_Store(
+ dst + i, base::AsAtomicWord::Relaxed_Load(src + i));
+ }
+ }
+ } else {
+ MemMove(dst, src, len * kPointerSize);
+ }
FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(this, array, dst_index, len);
}
@@ -1524,7 +1589,7 @@ void Heap::MarkCompactEpilogue() {
PreprocessStackTraces();
DCHECK(incremental_marking()->IsStopped());
- mark_compact_collector()->marking_deque()->StopUsing();
+ mark_compact_collector()->marking_worklist()->StopUsing();
}
@@ -1537,8 +1602,6 @@ void Heap::MarkCompactPrologue() {
isolate_->compilation_cache()->MarkCompactPrologue();
- CompletelyClearInstanceofCache();
-
FlushNumberStringCache();
ClearNormalizedMapCaches();
}
@@ -1562,55 +1625,11 @@ void Heap::CheckNewSpaceExpansionCriteria() {
}
}
-
static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
return heap->InNewSpace(*p) &&
!HeapObject::cast(*p)->map_word().IsForwardingAddress();
}
-void PromotionQueue::Initialize() {
- // The last to-space page may be used for promotion queue. On promotion
- // conflict, we use the emergency stack.
- DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) ==
- 0);
- front_ = rear_ =
- reinterpret_cast<struct Entry*>(heap_->new_space()->ToSpaceEnd());
- limit_ = reinterpret_cast<struct Entry*>(
- Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_))
- ->area_start());
- emergency_stack_ = NULL;
-}
-
-void PromotionQueue::Destroy() {
- DCHECK(is_empty());
- delete emergency_stack_;
- emergency_stack_ = NULL;
-}
-
-void PromotionQueue::RelocateQueueHead() {
- DCHECK(emergency_stack_ == NULL);
-
- Page* p = Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_));
- struct Entry* head_start = rear_;
- struct Entry* head_end =
- Min(front_, reinterpret_cast<struct Entry*>(p->area_end()));
-
- int entries_count =
- static_cast<int>(head_end - head_start) / sizeof(struct Entry);
-
- emergency_stack_ = new List<Entry>(2 * entries_count);
-
- while (head_start != head_end) {
- struct Entry* entry = head_start++;
- // New space allocation in SemiSpaceCopyObject marked the region
- // overlapping with promotion queue as uninitialized.
- MSAN_MEMORY_IS_INITIALIZED(entry, sizeof(struct Entry));
- emergency_stack_->Add(*entry);
- }
- rear_ = head_end;
-}
-
-
class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
public:
explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) {}
@@ -1634,6 +1653,7 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
void Heap::EvacuateYoungGeneration() {
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_EVACUATE);
base::LockGuard<base::Mutex> guard(relocation_mutex());
+ ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
if (!FLAG_concurrent_marking) {
DCHECK(fast_promotion_mode_);
DCHECK(CanExpandOldGeneration(new_space()->Size()));
@@ -1673,9 +1693,17 @@ void Heap::EvacuateYoungGeneration() {
SetGCState(NOT_IN_GC);
}
+static bool IsLogging(Isolate* isolate) {
+ return FLAG_verify_predictable || isolate->logger()->is_logging() ||
+ isolate->is_profiling() ||
+ (isolate->heap_profiler() != nullptr &&
+ isolate->heap_profiler()->is_tracking_object_moves());
+}
+
void Heap::Scavenge() {
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
base::LockGuard<base::Mutex> guard(relocation_mutex());
+ ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
// There are soft limits in the allocation code, designed to trigger a mark
// sweep collection by failing allocations. There is no sense in trying to
// trigger one during scavenge: scavenges allocation should always succeed.
@@ -1698,37 +1726,26 @@ void Heap::Scavenge() {
// Used for updating survived_since_last_expansion_ at function end.
size_t survived_watermark = PromotedSpaceSizeOfObjects();
- scavenge_collector_->SelectScavengingVisitorsTable();
-
// Flip the semispaces. After flipping, to space is empty, from space has
// live objects.
new_space_->Flip();
new_space_->ResetAllocationInfo();
- // We need to sweep newly copied objects which can be either in the
- // to space or promoted to the old generation. For to-space
- // objects, we treat the bottom of the to space as a queue. Newly
- // copied and unswept objects lie between a 'front' mark and the
- // allocation pointer.
- //
- // Promoted objects can go into various old-generation spaces, and
- // can be allocated internally in the spaces (from the free list).
- // We treat the top of the to space as a queue of addresses of
- // promoted objects. The addresses of newly promoted and unswept
- // objects lie between a 'front' mark and a 'rear' mark that is
- // updated as a side effect of promoting an object.
- //
- // There is guaranteed to be enough room at the top of the to space
- // for the addresses of promoted objects: every object promoted
- // frees up its size in bytes from the top of the new space, and
- // objects are at least one pointer in size.
- Address new_space_front = new_space_->ToSpaceStart();
- promotion_queue_.Initialize();
-
- RootScavengeVisitor root_scavenge_visitor(this);
+ const int kScavengerTasks = 1;
+ const int kMainThreadId = 0;
+ CopiedList copied_list(kScavengerTasks);
+ PromotionList promotion_list(kScavengerTasks);
+ Scavenger scavenger(this, IsLogging(isolate()),
+ incremental_marking()->IsMarking(), &copied_list,
+ &promotion_list, kMainThreadId);
+ RootScavengeVisitor root_scavenge_visitor(this, &scavenger);
isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
- &IsUnmodifiedHeapObject);
+ &JSObject::IsUnmodifiedApiObject);
+
+ std::vector<MemoryChunk*> pages;
+ RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
+ this, [&pages](MemoryChunk* chunk) { pages.push_back(chunk); });
{
// Copy roots.
@@ -1739,23 +1756,29 @@ void Heap::Scavenge() {
{
// Copy objects reachable from the old generation.
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_OLD_TO_NEW_POINTERS);
- RememberedSet<OLD_TO_NEW>::Iterate(
- this, SYNCHRONIZED, [this](Address addr) {
- return Scavenger::CheckAndScavengeObject(this, addr);
- });
-
- RememberedSet<OLD_TO_NEW>::IterateTyped(
- this, SYNCHRONIZED,
- [this](SlotType type, Address host_addr, Address addr) {
- return UpdateTypedSlotHelper::UpdateTypedSlot(
- isolate(), type, addr, [this](Object** addr) {
- // We expect that objects referenced by code are long living.
- // If we do not force promotion, then we need to clear
- // old_to_new slots in dead code objects after mark-compact.
- return Scavenger::CheckAndScavengeObject(
- this, reinterpret_cast<Address>(addr));
- });
- });
+
+ for (MemoryChunk* chunk : pages) {
+ base::LockGuard<base::RecursiveMutex> guard(chunk->mutex());
+ RememberedSet<OLD_TO_NEW>::Iterate(
+ chunk,
+ [this, &scavenger](Address addr) {
+ return scavenger.CheckAndScavengeObject(this, addr);
+ },
+ SlotSet::KEEP_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_NEW>::IterateTyped(
+ chunk,
+ [this, &scavenger](SlotType type, Address host_addr, Address addr) {
+ return UpdateTypedSlotHelper::UpdateTypedSlot(
+ isolate(), type, addr, [this, &scavenger](Object** addr) {
+ // We expect that objects referenced by code are long
+ // living. If we do not force promotion, then we need to
+ // clear old_to_new slots in dead code objects after
+ // mark-compact.
+ return scavenger.CheckAndScavengeObject(
+ this, reinterpret_cast<Address>(addr));
+ });
+ });
+ }
}
{
@@ -1764,19 +1787,8 @@ void Heap::Scavenge() {
}
{
- // Copy objects reachable from the code flushing candidates list.
- TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_CODE_FLUSH_CANDIDATES);
- MarkCompactCollector* collector = mark_compact_collector();
- if (collector->is_code_flushing_enabled()) {
- collector->code_flusher()->VisitListHeads(&root_scavenge_visitor);
- collector->code_flusher()
- ->IteratePointersToFromSpace<StaticScavengeVisitor>();
- }
- }
-
- {
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SEMISPACE);
- new_space_front = DoScavenge(new_space_front);
+ scavenger.Process();
}
isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
@@ -1784,25 +1796,27 @@ void Heap::Scavenge() {
isolate()->global_handles()->IterateNewSpaceWeakUnmodifiedRoots(
&root_scavenge_visitor);
- new_space_front = DoScavenge(new_space_front);
+ scavenger.Process();
+
+ scavenger.Finalize();
UpdateNewSpaceReferencesInExternalStringTable(
&UpdateNewSpaceReferenceInExternalStringTableEntry);
- promotion_queue_.Destroy();
-
- incremental_marking()->UpdateMarkingDequeAfterScavenge();
+ incremental_marking()->UpdateMarkingWorklistAfterScavenge();
ScavengeWeakObjectRetainer weak_object_retainer(this);
ProcessYoungWeakReferences(&weak_object_retainer);
- DCHECK(new_space_front == new_space_->top());
-
// Set age mark.
new_space_->set_age_mark(new_space_->top());
ArrayBufferTracker::FreeDeadInNewSpace(this);
+ RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(this, [](MemoryChunk* chunk) {
+ RememberedSet<OLD_TO_NEW>::PreFreeEmptyBuckets(chunk);
+ });
+
// Update how much has survived scavenge.
DCHECK_GE(PromotedSpaceSizeOfObjects(), survived_watermark);
IncrementYoungSurvivorsCounter(PromotedSpaceSizeOfObjects() +
@@ -1992,49 +2006,6 @@ void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
external_string_table_.IterateAll(&external_string_table_visitor);
}
-Address Heap::DoScavenge(Address new_space_front) {
- do {
- SemiSpace::AssertValidRange(new_space_front, new_space_->top());
- // The addresses new_space_front and new_space_.top() define a
- // queue of unprocessed copied objects. Process them until the
- // queue is empty.
- while (new_space_front != new_space_->top()) {
- if (!Page::IsAlignedToPageSize(new_space_front)) {
- HeapObject* object = HeapObject::FromAddress(new_space_front);
- new_space_front +=
- StaticScavengeVisitor::IterateBody(object->map(), object);
- } else {
- new_space_front = Page::FromAllocationAreaAddress(new_space_front)
- ->next_page()
- ->area_start();
- }
- }
-
- // Promote and process all the to-be-promoted objects.
- {
- while (!promotion_queue()->is_empty()) {
- HeapObject* target;
- int32_t size;
- promotion_queue()->remove(&target, &size);
-
- // Promoted object might be already partially visited
- // during old space pointer iteration. Thus we search specifically
- // for pointers to from semispace instead of looking for pointers
- // to new space.
- DCHECK(!target->IsMap());
-
- IterateAndScavengePromotedObject(target, static_cast<int>(size));
- }
- }
-
- // Take another spin if there are now unswept objects in new space
- // (there are currently no more unswept promoted objects).
- } while (new_space_front != new_space_->top());
-
- return new_space_front;
-}
-
-
STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) ==
0); // NOLINT
STATIC_ASSERT((FixedTypedArrayBase::kDataOffset & kDoubleAlignmentMask) ==
@@ -2122,29 +2093,28 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
if (!allocation.To(&result)) return allocation;
// Map::cast cannot be used due to uninitialized map field.
- reinterpret_cast<Map*>(result)->set_map_after_allocation(
- reinterpret_cast<Map*>(root(kMetaMapRootIndex)), SKIP_WRITE_BARRIER);
- reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
- reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
+ Map* map = reinterpret_cast<Map*>(result);
+ map->set_map_after_allocation(reinterpret_cast<Map*>(root(kMetaMapRootIndex)),
+ SKIP_WRITE_BARRIER);
+ map->set_instance_type(instance_type);
+ map->set_instance_size(instance_size);
// Initialize to only containing tagged fields.
- reinterpret_cast<Map*>(result)->set_visitor_id(
- StaticVisitorBase::GetVisitorId(instance_type, instance_size, false));
if (FLAG_unbox_double_fields) {
- reinterpret_cast<Map*>(result)
- ->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
- }
- reinterpret_cast<Map*>(result)->clear_unused();
- reinterpret_cast<Map*>(result)
- ->set_inobject_properties_or_constructor_function_index(0);
- reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
- reinterpret_cast<Map*>(result)->set_bit_field(0);
- reinterpret_cast<Map*>(result)->set_bit_field2(0);
+ map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
+ }
+ // GetVisitorId requires a properly initialized LayoutDescriptor.
+ map->set_visitor_id(Map::GetVisitorId(map));
+ map->clear_unused();
+ map->set_inobject_properties_or_constructor_function_index(0);
+ map->set_unused_property_fields(0);
+ map->set_bit_field(0);
+ map->set_bit_field2(0);
int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
Map::OwnsDescriptors::encode(true) |
Map::ConstructionCounter::encode(Map::kNoSlackTracking);
- reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
- reinterpret_cast<Map*>(result)->set_weak_cell_cache(Smi::kZero);
- return result;
+ map->set_bit_field3(bit_field3);
+ map->set_weak_cell_cache(Smi::kZero);
+ return map;
}
@@ -2176,7 +2146,7 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
}
// Must be called only after |instance_type|, |instance_size| and
// |layout_descriptor| are set.
- map->set_visitor_id(Heap::GetStaticVisitorIdForMap(map));
+ map->set_visitor_id(Map::GetVisitorId(map));
map->set_bit_field(0);
map->set_bit_field2(1 << Map::kIsExtensible);
int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
@@ -2270,7 +2240,7 @@ bool Heap::CreateInitialMaps() {
}
ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
- fixed_array_map()->set_elements_kind(FAST_HOLEY_ELEMENTS);
+ fixed_array_map()->set_elements_kind(HOLEY_ELEMENTS);
ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
@@ -2345,7 +2315,7 @@ bool Heap::CreateInitialMaps() {
}
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
- fixed_cow_array_map()->set_elements_kind(FAST_HOLEY_ELEMENTS);
+ fixed_cow_array_map()->set_elements_kind(HOLEY_ELEMENTS);
DCHECK_NE(fixed_array_map(), fixed_cow_array_map());
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
@@ -2396,10 +2366,13 @@ bool Heap::CreateInitialMaps() {
}
ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
- fixed_double_array_map()->set_elements_kind(FAST_HOLEY_DOUBLE_ELEMENTS);
+ fixed_double_array_map()->set_elements_kind(HOLEY_DOUBLE_ELEMENTS);
ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
ALLOCATE_VARSIZE_MAP(BYTECODE_ARRAY_TYPE, bytecode_array)
ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
+ ALLOCATE_VARSIZE_MAP(PROPERTY_ARRAY_TYPE, property_array)
+ ALLOCATE_VARSIZE_MAP(SMALL_ORDERED_HASH_MAP_TYPE, small_ordered_hash_map)
+ ALLOCATE_VARSIZE_MAP(SMALL_ORDERED_HASH_SET_TYPE, small_ordered_hash_set)
#define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
@@ -2485,6 +2458,12 @@ bool Heap::CreateInitialMaps() {
set_empty_byte_array(byte_array);
}
+ {
+ PropertyArray* property_array;
+ if (!AllocatePropertyArray(0, TENURED).To(&property_array)) return false;
+ set_empty_property_array(property_array);
+ }
+
#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
{ \
FixedTypedArrayBase* obj; \
@@ -2534,7 +2513,8 @@ AllocationResult Heap::AllocateCell(Object* value) {
return result;
}
-AllocationResult Heap::AllocatePropertyCell() {
+AllocationResult Heap::AllocatePropertyCell(Name* name) {
+ DCHECK(name->IsUniqueName());
int size = PropertyCell::kSize;
STATIC_ASSERT(PropertyCell::kSize <= kMaxRegularHeapObjectSize);
@@ -2548,6 +2528,7 @@ AllocationResult Heap::AllocatePropertyCell() {
cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
SKIP_WRITE_BARRIER);
cell->set_property_details(PropertyDetails(Smi::kZero));
+ cell->set_name(name);
cell->set_value(the_hole_value());
return result;
}
@@ -2625,6 +2606,9 @@ void Heap::CreateFixedStubs() {
// The eliminates the need for doing dictionary lookup in the
// stub cache for these stubs.
HandleScope scope(isolate());
+ // Canonicalize handles, so that we can share constant pool entries pointing
+ // to code targets without dereferencing their handles.
+ CanonicalHandleScope canonical(isolate());
// Create stubs that should be there, so we don't unexpectedly have to
// create them if we need them during the creation of another stub.
@@ -2731,10 +2715,6 @@ void Heap::CreateInitialObjects() {
// expanding the dictionary during bootstrapping.
set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
- set_instanceof_cache_function(Smi::kZero);
- set_instanceof_cache_map(Smi::kZero);
- set_instanceof_cache_answer(Smi::kZero);
-
{
HandleScope scope(isolate());
#define SYMBOL_INIT(name) \
@@ -2768,14 +2748,14 @@ void Heap::CreateInitialObjects() {
#undef SYMBOL_INIT
}
- Handle<NameDictionary> empty_properties_dictionary =
- NameDictionary::NewEmpty(isolate(), TENURED);
- empty_properties_dictionary->SetRequiresCopyOnCapacityChange();
- set_empty_properties_dictionary(*empty_properties_dictionary);
+ Handle<NameDictionary> empty_property_dictionary =
+ NameDictionary::New(isolate(), 1, TENURED, USE_CUSTOM_MINIMUM_CAPACITY);
+ DCHECK(!empty_property_dictionary->HasSufficientCapacityToAdd(1));
+ set_empty_property_dictionary(*empty_property_dictionary);
- set_public_symbol_table(*empty_properties_dictionary);
- set_api_symbol_table(*empty_properties_dictionary);
- set_api_private_symbol_table(*empty_properties_dictionary);
+ set_public_symbol_table(*empty_property_dictionary);
+ set_api_symbol_table(*empty_property_dictionary);
+ set_api_private_symbol_table(*empty_property_dictionary);
set_number_string_cache(
*factory->NewFixedArray(kInitialNumberStringCacheSize * 2, TENURED));
@@ -2813,9 +2793,7 @@ void Heap::CreateInitialObjects() {
set_detached_contexts(empty_fixed_array());
set_retained_maps(ArrayList::cast(empty_fixed_array()));
- set_weak_object_to_code_table(
- *WeakHashTable::New(isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY,
- TENURED));
+ set_weak_object_to_code_table(*WeakHashTable::New(isolate(), 16, TENURED));
set_weak_new_space_object_to_code_list(
ArrayList::cast(*(factory->NewFixedArray(16, TENURED))));
@@ -2826,7 +2804,9 @@ void Heap::CreateInitialObjects() {
set_script_list(Smi::kZero);
Handle<SeededNumberDictionary> slow_element_dictionary =
- SeededNumberDictionary::NewEmpty(isolate(), TENURED);
+ SeededNumberDictionary::New(isolate(), 1, TENURED,
+ USE_CUSTOM_MINIMUM_CAPACITY);
+ DCHECK(!slow_element_dictionary->HasSufficientCapacityToAdd(1));
slow_element_dictionary->set_requires_slow_elements();
set_empty_slow_element_dictionary(*slow_element_dictionary);
@@ -2836,20 +2816,30 @@ void Heap::CreateInitialObjects() {
set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
set_next_template_serial_number(Smi::kZero);
+ // Allocate the empty OrderedHashTable.
+ Handle<FixedArray> empty_ordered_hash_table =
+ factory->NewFixedArray(OrderedHashMap::kHashTableStartIndex, TENURED);
+ empty_ordered_hash_table->set_map_no_write_barrier(
+ *factory->ordered_hash_table_map());
+ for (int i = 0; i < empty_ordered_hash_table->length(); ++i) {
+ empty_ordered_hash_table->set(i, Smi::kZero);
+ }
+ set_empty_ordered_hash_table(*empty_ordered_hash_table);
+
// Allocate the empty script.
Handle<Script> script = factory->NewScript(factory->empty_string());
script->set_type(Script::TYPE_NATIVE);
set_empty_script(*script);
- Handle<PropertyCell> cell = factory->NewPropertyCell();
+ Handle<PropertyCell> cell = factory->NewPropertyCell(factory->empty_string());
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_array_protector(*cell);
- cell = factory->NewPropertyCell();
+ cell = factory->NewPropertyCell(factory->empty_string());
cell->set_value(the_hole_value());
set_empty_property_cell(*cell);
- cell = factory->NewPropertyCell();
+ cell = factory->NewPropertyCell(factory->empty_string());
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_array_iterator_protector(*cell);
@@ -2857,11 +2847,11 @@ void Heap::CreateInitialObjects() {
handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
set_is_concat_spreadable_protector(*is_concat_spreadable_cell);
- Handle<Cell> species_cell = factory->NewCell(
- handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
- set_species_protector(*species_cell);
+ cell = factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
+ set_species_protector(*cell);
- cell = factory->NewPropertyCell();
+ cell = factory->NewPropertyCell(factory->empty_string());
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_string_length_protector(*cell);
@@ -2869,7 +2859,7 @@ void Heap::CreateInitialObjects() {
handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
set_fast_array_iteration_protector(*fast_array_iteration_cell);
- cell = factory->NewPropertyCell();
+ cell = factory->NewPropertyCell(factory->empty_string());
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_array_buffer_neutering_protector(*cell);
@@ -2929,9 +2919,6 @@ void Heap::CreateInitialObjects() {
bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
switch (root_index) {
case kNumberStringCacheRootIndex:
- case kInstanceofCacheFunctionRootIndex:
- case kInstanceofCacheMapRootIndex:
- case kInstanceofCacheAnswerRootIndex:
case kCodeStubsRootIndex:
case kScriptListRootIndex:
case kMaterializedObjectsRootIndex:
@@ -2948,6 +2935,7 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
case kPublicSymbolTableRootIndex:
case kApiSymbolTableRootIndex:
case kApiPrivateSymbolTableRootIndex:
+ case kMessageListenersRootIndex:
// Smi values
#define SMI_ENTRY(type, name, Name) case k##Name##RootIndex:
SMI_ROOT_LIST(SMI_ENTRY)
@@ -2962,23 +2950,10 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
}
bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
- return !RootCanBeWrittenAfterInitialization(root_index) &&
- !InNewSpace(root(root_index));
-}
-
-bool Heap::IsUnmodifiedHeapObject(Object** p) {
- Object* object = *p;
- if (object->IsSmi()) return false;
- HeapObject* heap_object = HeapObject::cast(object);
- if (!object->IsJSObject()) return false;
- JSObject* js_object = JSObject::cast(object);
- if (!js_object->WasConstructedFromApiFunction()) return false;
- Object* maybe_constructor = js_object->map()->GetConstructor();
- if (!maybe_constructor->IsJSFunction()) return false;
- JSFunction* constructor = JSFunction::cast(maybe_constructor);
- if (js_object->elements()->length() != 0) return false;
-
- return constructor->initial_map() == heap_object->map();
+ bool can_be = !RootCanBeWrittenAfterInitialization(root_index) &&
+ !InNewSpace(root(root_index));
+ DCHECK_IMPLIES(can_be, IsImmovable(HeapObject::cast(root(root_index))));
+ return can_be;
}
int Heap::FullSizeNumberStringCacheLength() {
@@ -3021,7 +2996,6 @@ Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
default:
UNREACHABLE();
- return kUndefinedValueRootIndex;
}
}
@@ -3037,12 +3011,10 @@ Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
#undef ELEMENT_KIND_TO_ROOT_INDEX
default:
UNREACHABLE();
- return kUndefinedValueRootIndex;
}
}
-
-FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(Map* map) {
+FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(const Map* map) {
return FixedTypedArrayBase::cast(
roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
}
@@ -3060,6 +3032,45 @@ AllocationResult Heap::AllocateForeign(Address address,
return result;
}
+AllocationResult Heap::AllocateSmallOrderedHashSet(int capacity,
+ PretenureFlag pretenure) {
+ DCHECK_EQ(0, capacity % SmallOrderedHashSet::kLoadFactor);
+ CHECK_GE(SmallOrderedHashSet::kMaxCapacity, capacity);
+
+ int size = SmallOrderedHashSet::Size(capacity);
+ AllocationSpace space = SelectSpace(pretenure);
+ HeapObject* result = nullptr;
+ {
+ AllocationResult allocation = AllocateRaw(size, space);
+ if (!allocation.To(&result)) return allocation;
+ }
+
+ result->set_map_after_allocation(small_ordered_hash_set_map(),
+ SKIP_WRITE_BARRIER);
+ Handle<SmallOrderedHashSet> table(SmallOrderedHashSet::cast(result));
+ table->Initialize(isolate(), capacity);
+ return result;
+}
+
+AllocationResult Heap::AllocateSmallOrderedHashMap(int capacity,
+ PretenureFlag pretenure) {
+ DCHECK_EQ(0, capacity % SmallOrderedHashMap::kLoadFactor);
+ CHECK_GE(SmallOrderedHashMap::kMaxCapacity, capacity);
+
+ int size = SmallOrderedHashMap::Size(capacity);
+ AllocationSpace space = SelectSpace(pretenure);
+ HeapObject* result = nullptr;
+ {
+ AllocationResult allocation = AllocateRaw(size, space);
+ if (!allocation.To(&result)) return allocation;
+ }
+
+ result->set_map_after_allocation(small_ordered_hash_map_map(),
+ SKIP_WRITE_BARRIER);
+ Handle<SmallOrderedHashMap> table(SmallOrderedHashMap::cast(result));
+ table->Initialize(isolate(), capacity);
+ return result;
+}
AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
if (length < 0 || length > ByteArray::kMaxLength) {
@@ -3130,7 +3141,7 @@ HeapObject* Heap::CreateFillerObjectAt(Address addr, int size,
filler->set_map_after_allocation(
reinterpret_cast<Map*>(root(kFreeSpaceMapRootIndex)),
SKIP_WRITE_BARRIER);
- FreeSpace::cast(filler)->nobarrier_set_size(size);
+ FreeSpace::cast(filler)->relaxed_write_size(size);
}
if (mode == ClearRecordedSlots::kYes) {
ClearRecordedSlotRange(addr, addr + size);
@@ -3172,9 +3183,14 @@ void Heap::AdjustLiveBytes(HeapObject* object, int by) {
lo_space()->AdjustLiveBytes(by);
} else if (!in_heap_iterator() &&
!mark_compact_collector()->sweeping_in_progress() &&
- ObjectMarking::IsBlack(object, MarkingState::Internal(object))) {
+ ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
+ object, MarkingState::Internal(object))) {
DCHECK(MemoryChunk::FromAddress(object->address())->SweepingDone());
+#ifdef V8_CONCURRENT_MARKING
+ MarkingState::Internal(object).IncrementLiveBytes<AccessMode::ATOMIC>(by);
+#else
MarkingState::Internal(object).IncrementLiveBytes(by);
+#endif
}
}
@@ -3206,14 +3222,9 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
Address old_start = object->address();
Address new_start = old_start + bytes_to_trim;
- // Transfer the mark bits to their new location if the object is not within
- // a black area.
- if (!incremental_marking()->black_allocation() ||
- !Marking::IsBlack(ObjectMarking::MarkBitFrom(
- HeapObject::FromAddress(new_start),
- MarkingState::Internal(HeapObject::FromAddress(new_start))))) {
- incremental_marking()->TransferMark(this, object,
- HeapObject::FromAddress(new_start));
+ if (incremental_marking()->IsMarking()) {
+ incremental_marking()->NotifyLeftTrimming(
+ object, HeapObject::FromAddress(new_start));
}
// Technically in new space this write might be omitted (except for
@@ -3224,10 +3235,9 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
// Initialize header of the trimmed array. Since left trimming is only
// performed on pages which are not concurrently swept creating a filler
// object does not require synchronization.
- Object** former_start = HeapObject::RawField(object, 0);
- int new_start_index = elements_to_trim * (element_size / kPointerSize);
- former_start[new_start_index] = map;
- former_start[new_start_index + 1] = Smi::FromInt(len - elements_to_trim);
+ RELAXED_WRITE_FIELD(object, bytes_to_trim, map);
+ RELAXED_WRITE_FIELD(object, bytes_to_trim + kPointerSize,
+ Smi::FromInt(len - elements_to_trim));
FixedArrayBase* new_object =
FixedArrayBase::cast(HeapObject::FromAddress(new_start));
@@ -3294,7 +3304,8 @@ void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
// Clear the mark bits of the black area that belongs now to the filler.
// This is an optimization. The sweeper will release black fillers anyway.
if (incremental_marking()->black_allocation() &&
- ObjectMarking::IsBlackOrGrey(filler, MarkingState::Internal(filler))) {
+ ObjectMarking::IsBlackOrGrey<IncrementalMarking::kAtomicity>(
+ filler, MarkingState::Internal(filler))) {
Page* page = Page::FromAddress(new_end);
MarkingState::Internal(page).bitmap()->ClearRange(
page->AddressToMarkbitIndex(new_end),
@@ -3424,8 +3435,6 @@ AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
DCHECK(!memory_allocator()->code_range()->valid() ||
memory_allocator()->code_range()->contains(code->address()) ||
object_size <= code_space()->AreaSize());
- code->set_gc_metadata(Smi::kZero);
- code->set_ic_age(global_ic_age_);
return code;
}
@@ -3453,7 +3462,7 @@ AllocationResult Heap::CopyCode(Code* code) {
new_code->Relocate(new_addr - old_addr);
// We have to iterate over the object and process its pointers when black
// allocation is on.
- incremental_marking()->IterateBlackObject(new_code);
+ incremental_marking()->ProcessBlackAllocatedObject(new_code);
// Record all references to embedded objects in the new code object.
RecordWritesIntoCode(new_code);
return new_code;
@@ -3505,8 +3514,10 @@ AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
HeapObject* result = nullptr;
AllocationResult allocation = AllocateRaw(size, space);
if (!allocation.To(&result)) return allocation;
- // No need for write barrier since object is white and map is in old space.
- result->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
+ // New space objects are allocated white.
+ WriteBarrierMode write_barrier_mode =
+ space == NEW_SPACE ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
+ result->set_map_after_allocation(map, write_barrier_mode);
if (allocation_site != NULL) {
AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
reinterpret_cast<Address>(result) + map->instance_size());
@@ -3515,10 +3526,9 @@ AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
return result;
}
-
-void Heap::InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
+void Heap::InitializeJSObjectFromMap(JSObject* obj, Object* properties,
Map* map) {
- obj->set_properties(properties);
+ obj->set_raw_properties_or_hash(properties);
obj->initialize_elements();
// TODO(1240798): Initialize the object's body using valid initial values
// according to the object's initial map. For example, if the map's
@@ -3614,6 +3624,10 @@ AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
map->instance_type() == JS_ERROR_TYPE ||
map->instance_type() == JS_ARRAY_TYPE ||
map->instance_type() == JS_API_OBJECT_TYPE ||
+ map->instance_type() == WASM_INSTANCE_TYPE ||
+ map->instance_type() == WASM_MEMORY_TYPE ||
+ map->instance_type() == WASM_MODULE_TYPE ||
+ map->instance_type() == WASM_TABLE_TYPE ||
map->instance_type() == JS_SPECIAL_API_OBJECT_TYPE);
int object_size = map->instance_size();
@@ -3640,7 +3654,6 @@ AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
SLOW_DCHECK(JSObject::cast(clone)->GetElementsKind() ==
source->GetElementsKind());
FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
- FixedArray* properties = FixedArray::cast(source->properties());
// Update elements if necessary.
if (elements->length() > 0) {
FixedArrayBase* elem = nullptr;
@@ -3648,7 +3661,7 @@ AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
AllocationResult allocation;
if (elements->map() == fixed_cow_array_map()) {
allocation = FixedArray::cast(elements);
- } else if (source->HasFastDoubleElements()) {
+ } else if (source->HasDoubleElements()) {
allocation = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
} else {
allocation = CopyFixedArray(FixedArray::cast(elements));
@@ -3657,14 +3670,28 @@ AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
}
JSObject::cast(clone)->set_elements(elem, SKIP_WRITE_BARRIER);
}
+
// Update properties if necessary.
- if (properties->length() > 0) {
+ if (source->HasFastProperties()) {
+ if (source->property_array()->length() > 0) {
+ PropertyArray* properties = source->property_array();
+ PropertyArray* prop = nullptr;
+ {
+ // TODO(gsathya): Do not copy hash code.
+ AllocationResult allocation = CopyPropertyArray(properties);
+ if (!allocation.To(&prop)) return allocation;
+ }
+ JSObject::cast(clone)->set_raw_properties_or_hash(prop,
+ SKIP_WRITE_BARRIER);
+ }
+ } else {
+ FixedArray* properties = FixedArray::cast(source->property_dictionary());
FixedArray* prop = nullptr;
{
AllocationResult allocation = CopyFixedArray(properties);
if (!allocation.To(&prop)) return allocation;
}
- JSObject::cast(clone)->set_properties(prop, SKIP_WRITE_BARRIER);
+ JSObject::cast(clone)->set_raw_properties_or_hash(prop, SKIP_WRITE_BARRIER);
}
// Return the new clone.
return clone;
@@ -3877,9 +3904,9 @@ AllocationResult Heap::AllocateEmptyFixedTypedArray(
return AllocateFixedTypedArray(0, array_type, false, TENURED);
}
-
-AllocationResult Heap::CopyFixedArrayAndGrow(FixedArray* src, int grow_by,
- PretenureFlag pretenure) {
+template <typename T>
+AllocationResult Heap::CopyArrayAndGrow(T* src, int grow_by,
+ PretenureFlag pretenure) {
int old_len = src->length();
int new_len = old_len + grow_by;
DCHECK(new_len >= old_len);
@@ -3889,8 +3916,8 @@ AllocationResult Heap::CopyFixedArrayAndGrow(FixedArray* src, int grow_by,
if (!allocation.To(&obj)) return allocation;
}
- obj->set_map_after_allocation(fixed_array_map(), SKIP_WRITE_BARRIER);
- FixedArray* result = FixedArray::cast(obj);
+ obj->set_map_after_allocation(src->map(), SKIP_WRITE_BARRIER);
+ T* result = T::cast(obj);
result->set_length(new_len);
// Copy the content.
@@ -3901,6 +3928,12 @@ AllocationResult Heap::CopyFixedArrayAndGrow(FixedArray* src, int grow_by,
return result;
}
+template AllocationResult Heap::CopyArrayAndGrow(FixedArray* src, int grow_by,
+ PretenureFlag pretenure);
+template AllocationResult Heap::CopyArrayAndGrow(PropertyArray* src,
+ int grow_by,
+ PretenureFlag pretenure);
+
AllocationResult Heap::CopyFixedArrayUpTo(FixedArray* src, int new_len,
PretenureFlag pretenure) {
if (new_len == 0) return empty_fixed_array();
@@ -3924,7 +3957,8 @@ AllocationResult Heap::CopyFixedArrayUpTo(FixedArray* src, int new_len,
return result;
}
-AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
+template <typename T>
+AllocationResult Heap::CopyArrayWithMap(T* src, Map* map) {
int len = src->length();
HeapObject* obj = nullptr;
{
@@ -3933,14 +3967,14 @@ AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
}
obj->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
- FixedArray* result = FixedArray::cast(obj);
+ T* result = T::cast(obj);
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
// Eliminate the write barrier if possible.
if (mode == SKIP_WRITE_BARRIER) {
CopyBlock(obj->address() + kPointerSize, src->address() + kPointerSize,
- FixedArray::SizeFor(len) - kPointerSize);
+ T::SizeFor(len) - kPointerSize);
return obj;
}
@@ -3950,6 +3984,16 @@ AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
return result;
}
+template AllocationResult Heap::CopyArrayWithMap(FixedArray* src, Map* map);
+template AllocationResult Heap::CopyArrayWithMap(PropertyArray* src, Map* map);
+
+AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
+ return CopyArrayWithMap(src, map);
+}
+
+AllocationResult Heap::CopyPropertyArray(PropertyArray* src) {
+ return CopyArrayWithMap(src, property_array_map());
+}
AllocationResult Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
Map* map) {
@@ -4007,12 +4051,23 @@ AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
return array;
}
+AllocationResult Heap::AllocatePropertyArray(int length,
+ PretenureFlag pretenure) {
+ DCHECK(length >= 0);
+ DCHECK(!InNewSpace(undefined_value()));
+ HeapObject* result = nullptr;
+ {
+ AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
+ if (!allocation.To(&result)) return allocation;
+ }
-AllocationResult Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
- return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
+ result->set_map_after_allocation(property_array_map(), SKIP_WRITE_BARRIER);
+ PropertyArray* array = PropertyArray::cast(result);
+ array->set_length(length);
+ MemsetPointer(array->data_start(), undefined_value(), length);
+ return result;
}
-
AllocationResult Heap::AllocateUninitializedFixedArray(int length) {
if (length == 0) return empty_fixed_array();
@@ -4095,7 +4150,6 @@ AllocationResult Heap::AllocateStruct(InstanceType type) {
#undef MAKE_CASE
default:
UNREACHABLE();
- return exception();
}
int size = map->instance_size();
Struct* result = nullptr;
@@ -4195,7 +4249,7 @@ bool Heap::HasHighFragmentation(size_t used, size_t committed) {
bool Heap::ShouldOptimizeForMemoryUsage() {
return FLAG_optimize_for_size || isolate()->IsIsolateInBackground() ||
- HighMemoryPressure() || IsLowMemoryDevice();
+ HighMemoryPressure();
}
void Heap::ActivateMemoryReducerIfNeeded() {
@@ -4235,11 +4289,11 @@ void Heap::FinalizeIncrementalMarkingIfComplete(
if (incremental_marking()->IsMarking() &&
(incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
(!incremental_marking()->finalize_marking_completed() &&
- mark_compact_collector()->marking_deque()->IsEmpty() &&
+ mark_compact_collector()->marking_worklist()->IsEmpty() &&
local_embedder_heap_tracer()->ShouldFinalizeIncrementalMarking()))) {
FinalizeIncrementalMarking(gc_reason);
} else if (incremental_marking()->IsComplete() ||
- (mark_compact_collector()->marking_deque()->IsEmpty() &&
+ (mark_compact_collector()->marking_worklist()->IsEmpty() &&
local_embedder_heap_tracer()
->ShouldFinalizeIncrementalMarking())) {
CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_);
@@ -4262,11 +4316,11 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
Address addr = chunk.start;
while (addr < chunk.end) {
HeapObject* obj = HeapObject::FromAddress(addr);
- // There might be grey objects due to black to grey transitions in
- // incremental marking. E.g. see VisitNativeContextIncremental.
- DCHECK(ObjectMarking::IsBlackOrGrey(obj, MarkingState::Internal(obj)));
- if (ObjectMarking::IsBlack(obj, MarkingState::Internal(obj))) {
- incremental_marking()->IterateBlackObject(obj);
+ // Objects can have any color because incremental marking can
+ // start in the middle of Heap::ReserveSpace().
+ if (ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
+ obj, MarkingState::Internal(obj))) {
+ incremental_marking()->ProcessBlackAllocatedObject(obj);
}
addr += obj->Size();
}
@@ -4278,7 +4332,7 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
// Large object space doesn't use reservations, so it needs custom handling.
for (HeapObject* object : *large_objects) {
- incremental_marking()->IterateBlackObject(object);
+ incremental_marking()->ProcessBlackAllocatedObject(object);
}
}
@@ -4518,10 +4572,12 @@ void Heap::CheckMemoryPressure() {
GarbageCollectionReason::kMemoryPressure);
}
}
- MemoryReducer::Event event;
- event.type = MemoryReducer::kPossibleGarbage;
- event.time_ms = MonotonicallyIncreasingTimeInMs();
- memory_reducer_->NotifyPossibleGarbage(event);
+ if (memory_reducer_) {
+ MemoryReducer::Event event;
+ event.type = MemoryReducer::kPossibleGarbage;
+ event.time_ms = MonotonicallyIncreasingTimeInMs();
+ memory_reducer_->NotifyPossibleGarbage(event);
+ }
}
void Heap::CollectGarbageOnMemoryPressure() {
@@ -4701,7 +4757,6 @@ const char* Heap::GarbageCollectionReasonToString(
return "unknown";
}
UNREACHABLE();
- return "";
}
bool Heap::Contains(HeapObject* value) {
@@ -4743,7 +4798,6 @@ bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
return lo_space_->Contains(value);
}
UNREACHABLE();
- return false;
}
bool Heap::InSpaceSlow(Address addr, AllocationSpace space) {
@@ -4765,7 +4819,6 @@ bool Heap::InSpaceSlow(Address addr, AllocationSpace space) {
return lo_space_->ContainsSlow(addr);
}
UNREACHABLE();
- return false;
}
@@ -4929,12 +4982,14 @@ template <RememberedSetType direction>
void CollectSlots(MemoryChunk* chunk, Address start, Address end,
std::set<Address>* untyped,
std::set<std::pair<SlotType, Address> >* typed) {
- RememberedSet<direction>::Iterate(chunk, [start, end, untyped](Address slot) {
- if (start <= slot && slot < end) {
- untyped->insert(slot);
- }
- return KEEP_SLOT;
- });
+ RememberedSet<direction>::Iterate(chunk,
+ [start, end, untyped](Address slot) {
+ if (start <= slot && slot < end) {
+ untyped->insert(slot);
+ }
+ return KEEP_SLOT;
+ },
+ SlotSet::PREFREE_EMPTY_BUCKETS);
RememberedSet<direction>::IterateTyped(
chunk, [start, end, typed](SlotType type, Address host, Address slot) {
if (start <= slot && slot < end) {
@@ -4974,91 +5029,19 @@ void Heap::ZapFromSpace() {
}
}
-class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
- public:
- IterateAndScavengePromotedObjectsVisitor(Heap* heap, bool record_slots)
- : heap_(heap), record_slots_(record_slots) {}
-
- inline void VisitPointers(HeapObject* host, Object** start,
- Object** end) override {
- Address slot_address = reinterpret_cast<Address>(start);
- Page* page = Page::FromAddress(slot_address);
-
- while (slot_address < reinterpret_cast<Address>(end)) {
- Object** slot = reinterpret_cast<Object**>(slot_address);
- Object* target = *slot;
-
- if (target->IsHeapObject()) {
- if (heap_->InFromSpace(target)) {
- Scavenger::ScavengeObject(reinterpret_cast<HeapObject**>(slot),
- HeapObject::cast(target));
- target = *slot;
- if (heap_->InNewSpace(target)) {
- SLOW_DCHECK(heap_->InToSpace(target));
- SLOW_DCHECK(target->IsHeapObject());
- RememberedSet<OLD_TO_NEW>::Insert(page, slot_address);
- }
- SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(
- HeapObject::cast(target)));
- } else if (record_slots_ &&
- MarkCompactCollector::IsOnEvacuationCandidate(
- HeapObject::cast(target))) {
- heap_->mark_compact_collector()->RecordSlot(host, slot, target);
- }
- }
-
- slot_address += kPointerSize;
- }
- }
-
- inline void VisitCodeEntry(JSFunction* host,
- Address code_entry_slot) override {
- // Black allocation requires us to process objects referenced by
- // promoted objects.
- if (heap_->incremental_marking()->black_allocation()) {
- Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
- heap_->incremental_marking()->WhiteToGreyAndPush(code);
- }
- }
-
- private:
- Heap* heap_;
- bool record_slots_;
-};
-
-void Heap::IterateAndScavengePromotedObject(HeapObject* target, int size) {
- // We are not collecting slots on new space objects during mutation
- // thus we have to scan for pointers to evacuation candidates when we
- // promote objects. But we should not record any slots in non-black
- // objects. Grey object's slots would be rescanned.
- // White object might not survive until the end of collection
- // it would be a violation of the invariant to record it's slots.
- bool record_slots = false;
- if (incremental_marking()->IsCompacting()) {
- record_slots =
- ObjectMarking::IsBlack(target, MarkingState::Internal(target));
- }
-
- IterateAndScavengePromotedObjectsVisitor visitor(this, record_slots);
- if (target->IsJSFunction()) {
- // JSFunctions reachable through kNextFunctionLinkOffset are weak. Slots for
- // this links are recorded during processing of weak lists.
- JSFunction::BodyDescriptorWeakCode::IterateBody(target, size, &visitor);
- } else {
- target->IterateBody(target->map()->instance_type(), size, &visitor);
- }
-}
-
void Heap::IterateRoots(RootVisitor* v, VisitMode mode) {
IterateStrongRoots(v, mode);
IterateWeakRoots(v, mode);
}
void Heap::IterateWeakRoots(RootVisitor* v, VisitMode mode) {
+ const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
+ mode == VISIT_ALL_IN_MINOR_MC_MARK ||
+ mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
v->VisitRootPointer(Root::kStringTable, reinterpret_cast<Object**>(
&roots_[kStringTableRootIndex]));
v->Synchronize(VisitorSynchronization::kStringTable);
- if (mode != VISIT_ALL_IN_SCAVENGE && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
+ if (!isMinorGC && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
// Scavenge collections have special processing for this.
external_string_table_.IterateAll(v);
}
@@ -5123,6 +5106,9 @@ class FixStaleLeftTrimmedHandlesVisitor : public RootVisitor {
};
void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
+ const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
+ mode == VISIT_ALL_IN_MINOR_MC_MARK ||
+ mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
v->VisitRootPointers(Root::kStrongRootList, &roots_[0],
&roots_[kStrongRootListLength]);
v->Synchronize(VisitorSynchronization::kStrongRootList);
@@ -5153,7 +5139,7 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
// Iterate over the builtin code objects and code stubs in the
// heap. Note that it is not necessary to iterate over code objects
// on scavenge collections.
- if (mode != VISIT_ALL_IN_SCAVENGE && mode != VISIT_ALL_IN_MINOR_MC_UPDATE) {
+ if (!isMinorGC) {
isolate_->builtins()->IterateBuiltins(v);
v->Synchronize(VisitorSynchronization::kBuiltins);
isolate_->interpreter()->IterateDispatchTable(v);
@@ -5173,8 +5159,11 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
case VISIT_ALL_IN_SCAVENGE:
isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
break;
+ case VISIT_ALL_IN_MINOR_MC_MARK:
+ // Global handles are processed manually be the minor MC.
+ break;
case VISIT_ALL_IN_MINOR_MC_UPDATE:
- isolate_->global_handles()->IterateAllNewSpaceRoots(v);
+ // Global handles are processed manually be the minor MC.
break;
case VISIT_ALL_IN_SWEEP_NEWSPACE:
case VISIT_ALL:
@@ -5184,7 +5173,7 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
v->Synchronize(VisitorSynchronization::kGlobalHandles);
// Iterate over eternal handles.
- if (mode == VISIT_ALL_IN_SCAVENGE || mode == VISIT_ALL_IN_MINOR_MC_UPDATE) {
+ if (isMinorGC) {
isolate_->eternal_handles()->IterateNewSpaceRoots(v);
} else {
isolate_->eternal_handles()->IterateAllRoots(v);
@@ -5215,16 +5204,18 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
// TODO(1236194): Since the heap size is configurable on the command line
// and through the API, we should gracefully handle the case that the heap
// size is not big enough to fit all the initial objects.
-bool Heap::ConfigureHeap(size_t max_semi_space_size, size_t max_old_space_size,
- size_t code_range_size) {
+bool Heap::ConfigureHeap(size_t max_semi_space_size_in_kb,
+ size_t max_old_generation_size_in_mb,
+ size_t code_range_size_in_mb) {
if (HasBeenSetUp()) return false;
// Overwrite default configuration.
- if (max_semi_space_size != 0) {
- max_semi_space_size_ = max_semi_space_size * MB;
+ if (max_semi_space_size_in_kb != 0) {
+ max_semi_space_size_ =
+ ROUND_UP(max_semi_space_size_in_kb * KB, Page::kPageSize);
}
- if (max_old_space_size != 0) {
- max_old_generation_size_ = max_old_space_size * MB;
+ if (max_old_generation_size_in_mb != 0) {
+ max_old_generation_size_ = max_old_generation_size_in_mb * MB;
}
// If max space size flags are specified overwrite the configuration.
@@ -5252,6 +5243,12 @@ bool Heap::ConfigureHeap(size_t max_semi_space_size, size_t max_old_space_size,
max_semi_space_size_ = base::bits::RoundUpToPowerOfTwo32(
static_cast<uint32_t>(max_semi_space_size_));
+ if (max_semi_space_size_ == kMaxSemiSpaceSizeInKB * KB) {
+ // Start with at least 1*MB semi-space on machines with a lot of memory.
+ initial_semispace_size_ =
+ Max(initial_semispace_size_, static_cast<size_t>(1 * MB));
+ }
+
if (FLAG_min_semi_space_size > 0) {
size_t initial_semispace_size =
static_cast<size_t>(FLAG_min_semi_space_size) * MB;
@@ -5295,7 +5292,7 @@ bool Heap::ConfigureHeap(size_t max_semi_space_size, size_t max_old_space_size,
FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) +
AllocationMemento::kSize));
- code_range_size_ = code_range_size * MB;
+ code_range_size_ = code_range_size_in_mb * MB;
configured_ = true;
return true;
@@ -5427,8 +5424,11 @@ const double Heap::kTargetMutatorUtilization = 0.97;
// F * (1 - MU / (R * (1 - MU))) = 1
// F * (R * (1 - MU) - MU) / (R * (1 - MU)) = 1
// F = R * (1 - MU) / (R * (1 - MU) - MU)
-double Heap::HeapGrowingFactor(double gc_speed, double mutator_speed) {
- if (gc_speed == 0 || mutator_speed == 0) return kMaxHeapGrowingFactor;
+double Heap::HeapGrowingFactor(double gc_speed, double mutator_speed,
+ double max_factor) {
+ DCHECK(max_factor >= kMinHeapGrowingFactor);
+ DCHECK(max_factor <= kMaxHeapGrowingFactor);
+ if (gc_speed == 0 || mutator_speed == 0) return max_factor;
const double speed_ratio = gc_speed / mutator_speed;
const double mu = kTargetMutatorUtilization;
@@ -5437,13 +5437,39 @@ double Heap::HeapGrowingFactor(double gc_speed, double mutator_speed) {
const double b = speed_ratio * (1 - mu) - mu;
// The factor is a / b, but we need to check for small b first.
- double factor =
- (a < b * kMaxHeapGrowingFactor) ? a / b : kMaxHeapGrowingFactor;
- factor = Min(factor, kMaxHeapGrowingFactor);
+ double factor = (a < b * max_factor) ? a / b : max_factor;
+ factor = Min(factor, max_factor);
factor = Max(factor, kMinHeapGrowingFactor);
return factor;
}
+double Heap::MaxHeapGrowingFactor(size_t max_old_generation_size) {
+ const double min_small_factor = 1.3;
+ const double max_small_factor = 2.0;
+ const double high_factor = 4.0;
+
+ size_t max_old_generation_size_in_mb = max_old_generation_size / MB;
+ max_old_generation_size_in_mb =
+ Max(max_old_generation_size_in_mb,
+ static_cast<size_t>(kMinOldGenerationSize));
+
+ // If we are on a device with lots of memory, we allow a high heap
+ // growing factor.
+ if (max_old_generation_size_in_mb >= kMaxOldGenerationSize) {
+ return high_factor;
+ }
+
+ DCHECK_GE(max_old_generation_size_in_mb, kMinOldGenerationSize);
+ DCHECK_LT(max_old_generation_size_in_mb, kMaxOldGenerationSize);
+
+ // On smaller devices we linearly scale the factor: (X-A)/(B-A)*(D-C)+C
+ double factor = (max_old_generation_size_in_mb - kMinOldGenerationSize) *
+ (max_small_factor - min_small_factor) /
+ (kMaxOldGenerationSize - kMinOldGenerationSize) +
+ min_small_factor;
+ return factor;
+}
+
size_t Heap::CalculateOldGenerationAllocationLimit(double factor,
size_t old_gen_size) {
CHECK(factor > 1.0);
@@ -5468,7 +5494,8 @@ size_t Heap::MinimumAllocationLimitGrowingStep() {
void Heap::SetOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
double mutator_speed) {
- double factor = HeapGrowingFactor(gc_speed, mutator_speed);
+ double max_factor = MaxHeapGrowingFactor(max_old_generation_size_);
+ double factor = HeapGrowingFactor(gc_speed, mutator_speed, max_factor);
if (FLAG_trace_gc_verbose) {
isolate_->PrintWithTimestamp(
@@ -5478,10 +5505,6 @@ void Heap::SetOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
mutator_speed);
}
- if (IsMemoryConstrainedDevice()) {
- factor = Min(factor, kMaxHeapGrowingFactorMemoryConstrained);
- }
-
if (memory_reducer_->ShouldGrowHeapSlowly() ||
ShouldOptimizeForMemoryUsage()) {
factor = Min(factor, kConservativeHeapGrowingFactor);
@@ -5508,7 +5531,8 @@ void Heap::SetOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
void Heap::DampenOldGenerationAllocationLimit(size_t old_gen_size,
double gc_speed,
double mutator_speed) {
- double factor = HeapGrowingFactor(gc_speed, mutator_speed);
+ double max_factor = MaxHeapGrowingFactor(max_old_generation_size_);
+ double factor = HeapGrowingFactor(gc_speed, mutator_speed, max_factor);
size_t limit = CalculateOldGenerationAllocationLimit(factor, old_gen_size);
if (limit < old_generation_allocation_limit_) {
if (FLAG_trace_gc_verbose) {
@@ -5563,9 +5587,15 @@ bool Heap::ShouldExpandOldGenerationOnSlowAllocation() {
Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
// Code using an AlwaysAllocateScope assumes that the GC state does not
// change; that implies that no marking steps must be performed.
- if (!incremental_marking()->CanBeActivated() || always_allocate() ||
- PromotedSpaceSizeOfObjects() <=
- IncrementalMarking::kActivationThreshold) {
+ if (!incremental_marking()->CanBeActivated() || always_allocate()) {
+ // Incremental marking is disabled or it is too early to start.
+ return IncrementalMarkingLimit::kNoLimit;
+ }
+ if (FLAG_stress_incremental_marking) {
+ return IncrementalMarkingLimit::kHardLimit;
+ }
+ if (PromotedSpaceSizeOfObjects() <=
+ IncrementalMarking::kActivationThreshold) {
// Incremental marking is disabled or it is too early to start.
return IncrementalMarkingLimit::kNoLimit;
}
@@ -5615,16 +5645,6 @@ void Heap::DisableInlineAllocation() {
}
}
-
-V8_DECLARE_ONCE(initialize_gc_once);
-
-static void InitializeGCOnce() {
- Scavenger::Initialize();
- StaticScavengeVisitor::Initialize();
- MarkCompactCollector::Initialize();
-}
-
-
bool Heap::SetUp() {
#ifdef DEBUG
allocation_timeout_ = FLAG_gc_interval;
@@ -5642,7 +5662,9 @@ bool Heap::SetUp() {
if (!ConfigureHeapDefault()) return false;
}
- base::CallOnce(&initialize_gc_once, &InitializeGCOnce);
+ mmap_region_base_ =
+ reinterpret_cast<uintptr_t>(base::OS::GetRandomMmapAddr()) &
+ ~kMmapRegionMask;
// Set up memory allocator.
memory_allocator_ = new MemoryAllocator(isolate_);
@@ -5688,15 +5710,16 @@ bool Heap::SetUp() {
}
tracer_ = new GCTracer(this);
- scavenge_collector_ = new Scavenger(this);
mark_compact_collector_ = new MarkCompactCollector(this);
- incremental_marking_->set_marking_deque(
- mark_compact_collector_->marking_deque());
+ incremental_marking_->set_marking_worklist(
+ mark_compact_collector_->marking_worklist());
#ifdef V8_CONCURRENT_MARKING
- concurrent_marking_ =
- new ConcurrentMarking(this, mark_compact_collector_->marking_deque());
+ MarkCompactCollector::MarkingWorklist* marking_worklist =
+ mark_compact_collector_->marking_worklist();
+ concurrent_marking_ = new ConcurrentMarking(this, marking_worklist->shared(),
+ marking_worklist->bailout());
#else
- concurrent_marking_ = new ConcurrentMarking(this, nullptr);
+ concurrent_marking_ = new ConcurrentMarking(this, nullptr, nullptr);
#endif
minor_mark_compact_collector_ = new MinorMarkCompactCollector(this);
gc_idle_time_handler_ = new GCIdleTimeHandler();
@@ -5722,6 +5745,9 @@ bool Heap::SetUp() {
*this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask);
new_space()->AddAllocationObserver(idle_scavenge_observer_);
+ SetGetExternallyAllocatedMemoryInBytesCallback(
+ DefaultGetExternallyAllocatedMemoryInBytesCallback);
+
return true;
}
@@ -5769,7 +5795,7 @@ void Heap::ClearStackLimits() {
roots_[kRealStackLimitRootIndex] = Smi::kZero;
}
-void Heap::PrintAlloctionsHash() {
+void Heap::PrintAllocationsHash() {
uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
PrintF("\n### Allocations = %u, hash = 0x%08x\n", allocations_count(), hash);
}
@@ -5824,7 +5850,6 @@ void Heap::RegisterExternallyReferencedObject(Object** object) {
}
void Heap::TearDown() {
- use_tasks_ = false;
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
@@ -5834,16 +5859,13 @@ void Heap::TearDown() {
UpdateMaximumCommitted();
if (FLAG_verify_predictable) {
- PrintAlloctionsHash();
+ PrintAllocationsHash();
}
new_space()->RemoveAllocationObserver(idle_scavenge_observer_);
delete idle_scavenge_observer_;
idle_scavenge_observer_ = nullptr;
- delete scavenge_collector_;
- scavenge_collector_ = nullptr;
-
if (mark_compact_collector_ != nullptr) {
mark_compact_collector_->TearDown();
delete mark_compact_collector_;
@@ -6256,18 +6278,34 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
}
~UnreachableObjectsFilter() {
- heap_->mark_compact_collector()->ClearMarkbits();
+ for (auto it : reachable_) {
+ delete it.second;
+ it.second = nullptr;
+ }
}
bool SkipObject(HeapObject* object) {
if (object->IsFiller()) return true;
- return ObjectMarking::IsWhite(object, MarkingState::Internal(object));
+ MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+ if (reachable_.count(chunk) == 0) return true;
+ return reachable_[chunk]->count(object) == 0;
}
private:
+ bool MarkAsReachable(HeapObject* object) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+ if (reachable_.count(chunk) == 0) {
+ reachable_[chunk] = new std::unordered_set<HeapObject*>();
+ }
+ if (reachable_[chunk]->count(object)) return false;
+ reachable_[chunk]->insert(object);
+ return true;
+ }
+
class MarkingVisitor : public ObjectVisitor, public RootVisitor {
public:
- MarkingVisitor() : marking_stack_(10) {}
+ explicit MarkingVisitor(UnreachableObjectsFilter* filter)
+ : filter_(filter), marking_stack_(10) {}
void VisitPointers(HeapObject* host, Object** start,
Object** end) override {
@@ -6290,27 +6328,26 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
for (Object** p = start; p < end; p++) {
if (!(*p)->IsHeapObject()) continue;
HeapObject* obj = HeapObject::cast(*p);
- // Use Marking instead of ObjectMarking to avoid adjusting live bytes
- // counter.
- MarkBit mark_bit =
- ObjectMarking::MarkBitFrom(obj, MarkingState::Internal(obj));
- if (Marking::IsWhite(mark_bit)) {
- Marking::WhiteToBlack(mark_bit);
+ if (filter_->MarkAsReachable(obj)) {
marking_stack_.Add(obj);
}
}
}
+ UnreachableObjectsFilter* filter_;
List<HeapObject*> marking_stack_;
};
+ friend class MarkingVisitor;
+
void MarkReachableObjects() {
- MarkingVisitor visitor;
+ MarkingVisitor visitor(this);
heap_->IterateRoots(&visitor, VISIT_ALL);
visitor.TransitiveClosure();
}
Heap* heap_;
DisallowHeapAllocation no_allocation_;
+ std::unordered_map<MemoryChunk*, std::unordered_set<HeapObject*>*> reachable_;
};
HeapIterator::HeapIterator(Heap* heap,
@@ -6472,6 +6509,9 @@ void Heap::RememberUnmappedPage(Address page, bool compacted) {
remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
}
+void Heap::AgeInlineCaches() {
+ global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
+}
void Heap::RegisterStrongRoots(Object** start, Object** end) {
StrongRootsList* list = new StrongRootsList();
@@ -6559,12 +6599,6 @@ bool Heap::GetObjectTypeName(size_t index, const char** object_type,
return false;
}
-
-// static
-int Heap::GetStaticVisitorIdForMap(Map* map) {
- return StaticVisitorBase::GetVisitorId(map);
-}
-
const char* AllocationSpaceName(AllocationSpace space) {
switch (space) {
case NEW_SPACE:
@@ -6583,5 +6617,66 @@ const char* AllocationSpaceName(AllocationSpace space) {
return NULL;
}
+void VerifyPointersVisitor::VisitPointers(HeapObject* host, Object** start,
+ Object** end) {
+ VerifyPointers(start, end);
+}
+
+void VerifyPointersVisitor::VisitRootPointers(Root root, Object** start,
+ Object** end) {
+ VerifyPointers(start, end);
+}
+
+void VerifyPointersVisitor::VerifyPointers(Object** start, Object** end) {
+ for (Object** current = start; current < end; current++) {
+ if ((*current)->IsHeapObject()) {
+ HeapObject* object = HeapObject::cast(*current);
+ CHECK(object->GetIsolate()->heap()->Contains(object));
+ CHECK(object->map()->IsMap());
+ } else {
+ CHECK((*current)->IsSmi());
+ }
+ }
+}
+
+void VerifySmisVisitor::VisitRootPointers(Root root, Object** start,
+ Object** end) {
+ for (Object** current = start; current < end; current++) {
+ CHECK((*current)->IsSmi());
+ }
+}
+
+bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
+ // Object migration is governed by the following rules:
+ //
+ // 1) Objects in new-space can be migrated to the old space
+ // that matches their target space or they stay in new-space.
+ // 2) Objects in old-space stay in the same space when migrating.
+ // 3) Fillers (two or more words) can migrate due to left-trimming of
+ // fixed arrays in new-space or old space.
+ // 4) Fillers (one word) can never migrate, they are skipped by
+ // incremental marking explicitly to prevent invalid pattern.
+ //
+ // Since this function is used for debugging only, we do not place
+ // asserts here, but check everything explicitly.
+ if (obj->map() == one_pointer_filler_map()) return false;
+ InstanceType type = obj->map()->instance_type();
+ MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+ AllocationSpace src = chunk->owner()->identity();
+ switch (src) {
+ case NEW_SPACE:
+ return dst == src || dst == OLD_SPACE;
+ case OLD_SPACE:
+ return dst == src &&
+ (dst == OLD_SPACE || obj->IsFiller() || obj->IsExternalString());
+ case CODE_SPACE:
+ return dst == src && type == CODE_TYPE;
+ case MAP_SPACE:
+ case LO_SPACE:
+ return false;
+ }
+ UNREACHABLE();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index 7f213eff27..b579c0288a 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -91,6 +91,8 @@ using v8::MemoryPressureLevel;
V(Map, ordered_hash_table_map, OrderedHashTableMap) \
V(Map, unseeded_number_dictionary_map, UnseededNumberDictionaryMap) \
V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap) \
+ V(Map, small_ordered_hash_map_map, SmallOrderedHashMapMap) \
+ V(Map, small_ordered_hash_set_map, SmallOrderedHashSetMap) \
V(Map, message_object_map, JSMessageObjectMap) \
V(Map, external_map, ExternalMap) \
V(Map, bytecode_array_map, BytecodeArrayMap) \
@@ -98,6 +100,7 @@ using v8::MemoryPressureLevel;
V(Map, no_closures_cell_map, NoClosuresCellMap) \
V(Map, one_closure_cell_map, OneClosureCellMap) \
V(Map, many_closures_cell_map, ManyClosuresCellMap) \
+ V(Map, property_array_map, PropertyArrayMap) \
/* String maps */ \
V(Map, native_source_string_map, NativeSourceStringMap) \
V(Map, string_map, StringMap) \
@@ -149,6 +152,7 @@ using v8::MemoryPressureLevel;
V(Map, optimized_out_map, OptimizedOutMap) \
V(Map, stale_register_map, StaleRegisterMap) \
/* Canonical empty values */ \
+ V(PropertyArray, empty_property_array, EmptyPropertyArray) \
V(ByteArray, empty_byte_array, EmptyByteArray) \
V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array) \
V(FixedTypedArrayBase, empty_fixed_int8_array, EmptyFixedInt8Array) \
@@ -165,13 +169,14 @@ using v8::MemoryPressureLevel;
V(FixedArray, empty_sloppy_arguments_elements, EmptySloppyArgumentsElements) \
V(SeededNumberDictionary, empty_slow_element_dictionary, \
EmptySlowElementDictionary) \
+ V(FixedArray, empty_ordered_hash_table, EmptyOrderedHashTable) \
V(PropertyCell, empty_property_cell, EmptyPropertyCell) \
V(WeakCell, empty_weak_cell, EmptyWeakCell) \
V(InterceptorInfo, noop_interceptor_info, NoOpInterceptorInfo) \
/* Protectors */ \
V(PropertyCell, array_protector, ArrayProtector) \
V(Cell, is_concat_spreadable_protector, IsConcatSpreadableProtector) \
- V(Cell, species_protector, SpeciesProtector) \
+ V(PropertyCell, species_protector, SpeciesProtector) \
V(PropertyCell, string_length_protector, StringLengthProtector) \
V(Cell, fast_array_iteration_protector, FastArrayIterationProtector) \
V(PropertyCell, array_iterator_protector, ArrayIteratorProtector) \
@@ -188,11 +193,8 @@ using v8::MemoryPressureLevel;
V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
V(FixedArray, string_split_cache, StringSplitCache) \
V(FixedArray, regexp_multiple_cache, RegExpMultipleCache) \
- V(Object, instanceof_cache_function, InstanceofCacheFunction) \
- V(Object, instanceof_cache_map, InstanceofCacheMap) \
- V(Object, instanceof_cache_answer, InstanceofCacheAnswer) \
/* Lists and dictionaries */ \
- V(NameDictionary, empty_properties_dictionary, EmptyPropertiesDictionary) \
+ V(NameDictionary, empty_property_dictionary, EmptyPropertiesDictionary) \
V(NameDictionary, public_symbol_table, PublicSymbolTable) \
V(NameDictionary, api_symbol_table, ApiSymbolTable) \
V(NameDictionary, api_private_symbol_table, ApiPrivateSymbolTable) \
@@ -315,6 +317,9 @@ using v8::MemoryPressureLevel;
V(OnePointerFillerMap) \
V(OptimizedOut) \
V(OrderedHashTableMap) \
+ V(PropertyArrayMap) \
+ V(SmallOrderedHashMapMap) \
+ V(SmallOrderedHashSetMap) \
V(ScopeInfoMap) \
V(ScriptContextMap) \
V(SharedFunctionInfoMap) \
@@ -338,6 +343,12 @@ using v8::MemoryPressureLevel;
V(WithContextMap) \
PRIVATE_SYMBOL_LIST(V)
+#define FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(heap, array, start, length) \
+ do { \
+ heap->RecordFixedArrayElements(array, start, length); \
+ heap->incremental_marking()->RecordWrites(array); \
+ } while (false)
+
// Forward declarations.
class AllocationObserver;
class ArrayBufferTracker;
@@ -414,57 +425,6 @@ enum class YoungGenerationHandling {
// Also update src/tools/metrics/histograms/histograms.xml in chromium.
};
-// A queue of objects promoted during scavenge. Each object is accompanied by
-// its size to avoid dereferencing a map pointer for scanning. The last page in
-// to-space is used for the promotion queue. On conflict during scavenge, the
-// promotion queue is allocated externally and all entries are copied to the
-// external queue.
-class PromotionQueue {
- public:
- explicit PromotionQueue(Heap* heap)
- : front_(nullptr),
- rear_(nullptr),
- limit_(nullptr),
- emergency_stack_(nullptr),
- heap_(heap) {}
-
- void Initialize();
- void Destroy();
-
- inline void SetNewLimit(Address limit);
- inline bool IsBelowPromotionQueue(Address to_space_top);
-
- inline void insert(HeapObject* target, int32_t size);
- inline void remove(HeapObject** target, int32_t* size);
-
- bool is_empty() {
- return (front_ == rear_) &&
- (emergency_stack_ == nullptr || emergency_stack_->length() == 0);
- }
-
- private:
- struct Entry {
- Entry(HeapObject* obj, int32_t size) : obj_(obj), size_(size) {}
-
- HeapObject* obj_;
- int32_t size_;
- };
-
- inline Page* GetHeadPage();
-
- void RelocateQueueHead();
-
- // The front of the queue is higher in the memory page chain than the rear.
- struct Entry* front_;
- struct Entry* rear_;
- struct Entry* limit_;
-
- List<Entry>* emergency_stack_;
- Heap* heap_;
-
- DISALLOW_COPY_AND_ASSIGN(PromotionQueue);
-};
-
class AllocationResult {
public:
static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
@@ -613,19 +573,16 @@ class Heap {
static const int kPointerMultiplier = i::kPointerSize / 4;
#endif
- // The new space size has to be a power of 2. Sizes are in MB.
- static const int kMaxSemiSpaceSizeLowMemoryDevice = 1 * kPointerMultiplier;
- static const int kMaxSemiSpaceSizeMediumMemoryDevice = 4 * kPointerMultiplier;
- static const int kMaxSemiSpaceSizeHighMemoryDevice = 8 * kPointerMultiplier;
- static const int kMaxSemiSpaceSizeHugeMemoryDevice = 8 * kPointerMultiplier;
+ // Semi-space size needs to be a multiple of page size.
+ static const int kMinSemiSpaceSizeInKB =
+ 1 * kPointerMultiplier * ((1 << kPageSizeBits) / KB);
+ static const int kMaxSemiSpaceSizeInKB =
+ 16 * kPointerMultiplier * ((1 << kPageSizeBits) / KB);
// The old space size has to be a multiple of Page::kPageSize.
// Sizes are in MB.
- static const int kMaxOldSpaceSizeLowMemoryDevice = 128 * kPointerMultiplier;
- static const int kMaxOldSpaceSizeMediumMemoryDevice =
- 256 * kPointerMultiplier;
- static const int kMaxOldSpaceSizeHighMemoryDevice = 512 * kPointerMultiplier;
- static const int kMaxOldSpaceSizeHugeMemoryDevice = 1024 * kPointerMultiplier;
+ static const int kMinOldGenerationSize = 128 * kPointerMultiplier;
+ static const int kMaxOldGenerationSize = 1024 * kPointerMultiplier;
static const int kTraceRingBufferSize = 512;
static const int kStacktraceBufferSize = 512;
@@ -683,8 +640,6 @@ class Heap {
// they are in new space.
static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);
- static bool IsUnmodifiedHeapObject(Object** p);
-
// Zapping is needed for verify heap, and always done in debug builds.
static inline bool ShouldZapGarbage() {
#ifdef DEBUG
@@ -718,17 +673,16 @@ class Heap {
return "Unknown collector";
}
+ V8_EXPORT_PRIVATE static double MaxHeapGrowingFactor(
+ size_t max_old_generation_size);
V8_EXPORT_PRIVATE static double HeapGrowingFactor(double gc_speed,
- double mutator_speed);
+ double mutator_speed,
+ double max_factor);
// Copy block of memory from src to dst. Size of block should be aligned
// by pointer size.
static inline void CopyBlock(Address dst, Address src, int byte_size);
- // Determines a static visitor id based on the given {map} that can then be
- // stored on the map to facilitate fast dispatch for {StaticVisitorBase}.
- static int GetStaticVisitorIdForMap(Map* map);
-
// Notifies the heap that is ok to start marking or other activities that
// should not happen during deserialization.
void NotifyDeserializationComplete();
@@ -738,9 +692,6 @@ class Heap {
inline Address* OldSpaceAllocationTopAddress();
inline Address* OldSpaceAllocationLimitAddress();
- // Clear the Instanceof cache (used when a prototype changes).
- inline void ClearInstanceofCache();
-
// FreeSpace objects have a null map after deserialization. Update the map.
void RepairFreeListsAfterDeserialization();
@@ -813,7 +764,7 @@ class Heap {
// Checks whether the given object is allowed to be migrated from it's
// current space into the given destination space. Used for debugging.
- inline bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest);
+ bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest);
void CheckHandleCount();
@@ -831,7 +782,7 @@ class Heap {
// If an object has an AllocationMemento trailing it, return it, otherwise
// return NULL;
template <FindMementoMode mode>
- inline AllocationMemento* FindAllocationMemento(HeapObject* object);
+ inline AllocationMemento* FindAllocationMemento(Map* map, HeapObject* object);
// Returns false if not able to reserve.
bool ReserveSpace(Reservation* reservations, List<Address>* maps);
@@ -864,16 +815,12 @@ class Heap {
// An object should be promoted if the object has survived a
// scavenge operation.
- inline bool ShouldBePromoted(Address old_address, int object_size);
+ inline bool ShouldBePromoted(Address old_address);
void ClearNormalizedMapCaches();
void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
- // Completely clear the Instanceof cache (to stop it keeping objects alive
- // around a GC).
- inline void CompletelyClearInstanceofCache();
-
inline uint32_t HashSeed();
inline int NextScriptId();
@@ -896,9 +843,7 @@ class Heap {
// disposal. We use it to flush inline caches.
int global_ic_age() { return global_ic_age_; }
- void AgeInlineCaches() {
- global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
- }
+ void AgeInlineCaches();
int64_t external_memory_hard_limit() { return MaxOldGenerationSize() / 2; }
@@ -950,14 +895,6 @@ class Heap {
bool ShouldOptimizeForMemoryUsage();
- bool IsLowMemoryDevice() {
- return max_old_generation_size_ <= kMaxOldSpaceSizeLowMemoryDevice;
- }
-
- bool IsMemoryConstrainedDevice() {
- return max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice;
- }
-
bool HighMemoryPressure() {
return memory_pressure_level_.Value() != MemoryPressureLevel::kNone;
}
@@ -990,10 +927,14 @@ class Heap {
// Initialization. ===========================================================
// ===========================================================================
- // Configure heap size in MB before setup. Return false if the heap has been
- // set up already.
- bool ConfigureHeap(size_t max_semi_space_size, size_t max_old_space_size,
- size_t code_range_size);
+ // Configure heap sizes
+ // max_semi_space_size_in_kb: maximum semi-space size in KB
+ // max_old_generation_size_in_mb: maximum old generation size in MB
+ // code_range_size_in_mb: code range size in MB
+ // Return false if the heap has been set up already.
+ bool ConfigureHeap(size_t max_semi_space_size_in_kb,
+ size_t max_old_generation_size_in_mb,
+ size_t code_range_size_in_mb);
bool ConfigureHeapDefault();
// Prepares the heap, setting up memory areas that are needed in the isolate
@@ -1016,8 +957,6 @@ class Heap {
// Returns whether SetUp has been called.
bool HasBeenSetUp();
- bool use_tasks() const { return use_tasks_; }
-
// ===========================================================================
// Getters for spaces. =======================================================
// ===========================================================================
@@ -1044,8 +983,6 @@ class Heap {
MemoryAllocator* memory_allocator() { return memory_allocator_; }
- PromotionQueue* promotion_queue() { return &promotion_queue_; }
-
inline Isolate* isolate();
MarkCompactCollector* mark_compact_collector() {
@@ -1138,7 +1075,7 @@ class Heap {
RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type);
RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind);
- FixedTypedArrayBase* EmptyFixedTypedArrayForMap(Map* map);
+ FixedTypedArrayBase* EmptyFixedTypedArrayForMap(const Map* map);
void RegisterStrongRoots(Object** start, Object** end);
void UnregisterStrongRoots(Object** start);
@@ -1161,7 +1098,7 @@ class Heap {
// Performs garbage collection operation.
// Returns whether there is a chance that another major GC could
// collect more garbage.
- inline bool CollectGarbage(
+ bool CollectGarbage(
AllocationSpace space, GarbageCollectionReason gc_reason,
const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
@@ -1179,6 +1116,14 @@ class Heap {
// completes incremental marking in order to free external resources.
void ReportExternalMemoryPressure();
+ typedef v8::Isolate::GetExternallyAllocatedMemoryInBytesCallback
+ GetExternallyAllocatedMemoryInBytesCallback;
+
+ void SetGetExternallyAllocatedMemoryInBytesCallback(
+ GetExternallyAllocatedMemoryInBytesCallback callback) {
+ external_memory_callback_ = callback;
+ }
+
// Invoked when GC was requested via the stack guard.
void HandleGCRequest();
@@ -1196,9 +1141,6 @@ class Heap {
// Iterates over all the other roots in the heap.
void IterateWeakRoots(RootVisitor* v, VisitMode mode);
- // Iterate pointers of promoted objects.
- void IterateAndScavengePromotedObject(HeapObject* target, int size);
-
// ===========================================================================
// Store buffer API. =========================================================
// ===========================================================================
@@ -1350,6 +1292,30 @@ class Heap {
size_t InitialSemiSpaceSize() { return initial_semispace_size_; }
size_t MaxOldGenerationSize() { return max_old_generation_size_; }
+ static size_t ComputeMaxOldGenerationSize(uint64_t physical_memory) {
+ const int old_space_physical_memory_factor = 4;
+ int computed_size =
+ static_cast<int>(physical_memory / i::MB /
+ old_space_physical_memory_factor * kPointerMultiplier);
+ return Max(Min(computed_size, kMaxOldGenerationSize),
+ kMinOldGenerationSize);
+ }
+
+ static size_t ComputeMaxSemiSpaceSize(uint64_t physical_memory) {
+ const uint64_t min_physical_memory = 512 * MB;
+ const uint64_t max_physical_memory = 2 * static_cast<uint64_t>(GB);
+
+ uint64_t capped_physical_memory =
+ Max(Min(physical_memory, max_physical_memory), min_physical_memory);
+ // linearly scale max semi-space size: (X-A)/(B-A)*(D-C)+C
+ int semi_space_size_in_kb =
+ static_cast<int>(((capped_physical_memory - min_physical_memory) *
+ (kMaxSemiSpaceSizeInKB - kMinSemiSpaceSizeInKB)) /
+ (max_physical_memory - min_physical_memory) +
+ kMinSemiSpaceSizeInKB);
+ return RoundUp(semi_space_size_in_kb, (1 << kPageSizeBits) / KB);
+ }
+
// Returns the capacity of the heap in bytes w/o growing. Heap grows when
// more spaces are needed until it reaches the limit.
size_t Capacity();
@@ -1502,7 +1468,7 @@ class Heap {
// in the hash map is created. Otherwise the entry (including a the count
// value) is cached on the local pretenuring feedback.
template <UpdateAllocationSiteMode mode>
- inline void UpdateAllocationSite(HeapObject* object,
+ inline void UpdateAllocationSite(Map* map, HeapObject* object,
base::HashMap* pretenuring_feedback);
// Removes an entry from the global pretenuring storage.
@@ -1532,6 +1498,25 @@ class Heap {
void ReportHeapStatistics(const char* title);
void ReportCodeStatistics(const char* title);
#endif
+ void* GetRandomMmapAddr() {
+ void* result = base::OS::GetRandomMmapAddr();
+#if V8_TARGET_ARCH_X64
+#if V8_OS_MACOSX
+ // The Darwin kernel [as of macOS 10.12.5] does not clean up page
+ // directory entries [PDE] created from mmap or mach_vm_allocate, even
+ // after the region is destroyed. Using a virtual address space that is
+ // too large causes a leak of about 1 wired [can never be paged out] page
+ // per call to mmap(). The page is only reclaimed when the process is
+ // killed. Confine the hint to a 32-bit section of the virtual address
+ // space. See crbug.com/700928.
+ uintptr_t offset =
+ reinterpret_cast<uintptr_t>(base::OS::GetRandomMmapAddr()) &
+ kMmapRegionMask;
+ result = reinterpret_cast<void*>(mmap_region_base_ + offset);
+#endif // V8_OS_MACOSX
+#endif // V8_TARGET_ARCH_X64
+ return result;
+ }
static const char* GarbageCollectionReasonToString(
GarbageCollectionReason gc_reason);
@@ -1647,6 +1632,10 @@ class Heap {
return (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
}
+ static size_t DefaultGetExternallyAllocatedMemoryInBytesCallback() {
+ return 0;
+ }
+
#define ROOT_ACCESSOR(type, name, camel_name) \
inline void set_##name(type* value);
ROOT_LIST(ROOT_ACCESSOR)
@@ -1687,14 +1676,6 @@ class Heap {
// over all objects. May cause a GC.
void MakeHeapIterable();
- // Performs garbage collection operation.
- // Returns whether there is a chance that another major GC could
- // collect more garbage.
- bool CollectGarbage(
- GarbageCollector collector, GarbageCollectionReason gc_reason,
- const char* collector_reason,
- const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
-
// Performs garbage collection
// Returns whether there is a chance another major GC could
// collect more garbage.
@@ -1705,8 +1686,7 @@ class Heap {
inline void UpdateOldSpaceLimits();
// Initializes a JSObject based on its map.
- void InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
- Map* map);
+ void InitializeJSObjectFromMap(JSObject* obj, Object* properties, Map* map);
// Initializes JSObject body starting at given offset.
void InitializeJSObjectBody(JSObject* obj, Map* map, int start_offset);
@@ -1774,7 +1754,7 @@ class Heap {
inline void UpdateAllocationsHash(HeapObject* object);
inline void UpdateAllocationsHash(uint32_t value);
- void PrintAlloctionsHash();
+ void PrintAllocationsHash();
void AddToRingBuffer(const char* string);
void GetFromRingBuffer(char* buffer);
@@ -1832,8 +1812,6 @@ class Heap {
void Scavenge();
void EvacuateYoungGeneration();
- Address DoScavenge(Address new_space_front);
-
void UpdateNewSpaceReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func);
@@ -1991,8 +1969,17 @@ class Heap {
CopyBytecodeArray(BytecodeArray* bytecode_array);
// Allocates a fixed array initialized with undefined values
+ MUST_USE_RESULT inline AllocationResult AllocateFixedArray(
+ int length, PretenureFlag pretenure = NOT_TENURED);
+
+ // Allocates a property array initialized with undefined values
MUST_USE_RESULT AllocationResult
- AllocateFixedArray(int length, PretenureFlag pretenure = NOT_TENURED);
+ AllocatePropertyArray(int length, PretenureFlag pretenure = NOT_TENURED);
+
+ MUST_USE_RESULT AllocationResult AllocateSmallOrderedHashSet(
+ int length, PretenureFlag pretenure = NOT_TENURED);
+ MUST_USE_RESULT AllocationResult AllocateSmallOrderedHashMap(
+ int length, PretenureFlag pretenure = NOT_TENURED);
// Allocate an uninitialized object. The memory is non-executable if the
// hardware and OS allow. This is the single choke-point for allocations
@@ -2064,8 +2051,13 @@ class Heap {
MUST_USE_RESULT inline AllocationResult CopyFixedArray(FixedArray* src);
// Make a copy of src, also grow the copy, and return the copy.
- MUST_USE_RESULT AllocationResult
- CopyFixedArrayAndGrow(FixedArray* src, int grow_by, PretenureFlag pretenure);
+ template <typename T>
+ MUST_USE_RESULT AllocationResult CopyArrayAndGrow(T* src, int grow_by,
+ PretenureFlag pretenure);
+
+ // Make a copy of src, also grow the copy, and return the copy.
+ MUST_USE_RESULT AllocationResult CopyPropertyArrayAndGrow(
+ PropertyArray* src, int grow_by, PretenureFlag pretenure);
// Make a copy of src, also grow the copy, and return the copy.
MUST_USE_RESULT AllocationResult CopyFixedArrayUpTo(FixedArray* src,
@@ -2073,8 +2065,15 @@ class Heap {
PretenureFlag pretenure);
// Make a copy of src, set the map, and return the copy.
- MUST_USE_RESULT AllocationResult
- CopyFixedArrayWithMap(FixedArray* src, Map* map);
+ template <typename T>
+ MUST_USE_RESULT AllocationResult CopyArrayWithMap(T* src, Map* map);
+
+ // Make a copy of src, set the map, and return the copy.
+ MUST_USE_RESULT AllocationResult CopyFixedArrayWithMap(FixedArray* src,
+ Map* map);
+
+ // Make a copy of src, set the map, and return the copy.
+ MUST_USE_RESULT AllocationResult CopyPropertyArray(PropertyArray* src);
// Make a copy of src and return it.
MUST_USE_RESULT inline AllocationResult CopyFixedDoubleArray(
@@ -2123,7 +2122,7 @@ class Heap {
MUST_USE_RESULT AllocationResult AllocateCell(Object* value);
// Allocate a tenured JS global property cell initialized with the hole.
- MUST_USE_RESULT AllocationResult AllocatePropertyCell();
+ MUST_USE_RESULT AllocationResult AllocatePropertyCell(Name* name);
MUST_USE_RESULT AllocationResult AllocateWeakCell(HeapObject* value);
@@ -2224,6 +2223,9 @@ class Heap {
// How many gc happened.
unsigned int gc_count_;
+ static const uintptr_t kMmapRegionMask = 0xFFFFFFFFu;
+ uintptr_t mmap_region_base_;
+
// For post mortem debugging.
int remembered_unmapped_pages_index_;
Address remembered_unmapped_pages_[kRememberedUnmappedPages];
@@ -2262,6 +2264,8 @@ class Heap {
List<GCCallbackPair> gc_epilogue_callbacks_;
List<GCCallbackPair> gc_prologue_callbacks_;
+ GetExternallyAllocatedMemoryInBytesCallback external_memory_callback_;
+
int deferred_counters_[v8::Isolate::kUseCounterFeatureCount];
GCTracer* tracer_;
@@ -2291,8 +2295,6 @@ class Heap {
// Last time a garbage collection happened.
double last_gc_time_;
- Scavenger* scavenge_collector_;
-
MarkCompactCollector* mark_compact_collector_;
MinorMarkCompactCollector* minor_mark_compact_collector_;
@@ -2327,11 +2329,6 @@ class Heap {
// The size of objects in old generation after the last MarkCompact GC.
size_t old_generation_size_at_last_gc_;
- // If the --deopt_every_n_garbage_collections flag is set to a positive value,
- // this variable holds the number of garbage collections since the last
- // deoptimization triggered by garbage collection.
- int gcs_since_last_deopt_;
-
// The feedback storage is used to store allocation sites (keys) and how often
// they have been visited (values) by finding a memento behind an object. The
// storage is only alive temporary during a GC. The invariant is that all
@@ -2346,9 +2343,6 @@ class Heap {
bool ring_buffer_full_;
size_t ring_buffer_end_;
- // Shared state read by the scavenge collector and set by ScavengeObject.
- PromotionQueue promotion_queue_;
-
// Flag is set when the heap has been configured. The heap can be repeatedly
// configured through the API until it is set up.
bool configured_;
@@ -2377,8 +2371,6 @@ class Heap {
bool fast_promotion_mode_;
- bool use_tasks_;
-
// Used for testing purposes.
bool force_oom_;
bool delay_sweeper_tasks_for_testing_;
@@ -2470,21 +2462,18 @@ class AlwaysAllocateScope {
// objects in a heap space but above the allocation pointer.
class VerifyPointersVisitor : public ObjectVisitor, public RootVisitor {
public:
- inline void VisitPointers(HeapObject* host, Object** start,
- Object** end) override;
- inline void VisitRootPointers(Root root, Object** start,
- Object** end) override;
+ void VisitPointers(HeapObject* host, Object** start, Object** end) override;
+ void VisitRootPointers(Root root, Object** start, Object** end) override;
private:
- inline void VerifyPointers(Object** start, Object** end);
+ void VerifyPointers(Object** start, Object** end);
};
// Verify that all objects are Smis.
class VerifySmisVisitor : public RootVisitor {
public:
- inline void VisitRootPointers(Root root, Object** start,
- Object** end) override;
+ void VisitRootPointers(Root root, Object** start, Object** end) override;
};
diff --git a/deps/v8/src/heap/incremental-marking-inl.h b/deps/v8/src/heap/incremental-marking-inl.h
index ee594b2aee..16418bdfcb 100644
--- a/deps/v8/src/heap/incremental-marking-inl.h
+++ b/deps/v8/src/heap/incremental-marking-inl.h
@@ -19,6 +19,14 @@ void IncrementalMarking::RecordWrite(HeapObject* obj, Object** slot,
}
}
+void IncrementalMarking::RecordWrites(HeapObject* obj) {
+ if (IsMarking()) {
+ if (FLAG_concurrent_marking ||
+ ObjectMarking::IsBlack<kAtomicity>(obj, marking_state(obj))) {
+ RevisitObject(obj);
+ }
+ }
+}
void IncrementalMarking::RecordWriteOfCodeEntry(JSFunction* host, Object** slot,
Code* value) {
diff --git a/deps/v8/src/heap/incremental-marking-job.cc b/deps/v8/src/heap/incremental-marking-job.cc
index 47a27faf15..833a40f8a3 100644
--- a/deps/v8/src/heap/incremental-marking-job.cc
+++ b/deps/v8/src/heap/incremental-marking-job.cc
@@ -20,8 +20,6 @@ void IncrementalMarkingJob::Start(Heap* heap) {
ScheduleTask(heap);
}
-void IncrementalMarkingJob::NotifyTask() { task_pending_ = false; }
-
void IncrementalMarkingJob::ScheduleTask(Heap* heap) {
if (!task_pending_) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
@@ -48,16 +46,20 @@ void IncrementalMarkingJob::Task::RunInternal() {
isolate(), &RuntimeCallStats::GC_IncrementalMarkingJob);
Heap* heap = isolate()->heap();
- job_->NotifyTask();
IncrementalMarking* incremental_marking = heap->incremental_marking();
if (incremental_marking->IsStopped()) {
if (heap->IncrementalMarkingLimitReached() !=
Heap::IncrementalMarkingLimit::kNoLimit) {
heap->StartIncrementalMarking(Heap::kNoGCFlags,
GarbageCollectionReason::kIdleTask,
- kNoGCCallbackFlags);
+ kGCCallbackScheduleIdleGarbageCollection);
}
}
+
+ // Clear this flag after StartIncrementalMarking call to avoid
+ // scheduling a new task when startining incremental marking.
+ job_->task_pending_ = false;
+
if (!incremental_marking->IsStopped()) {
Step(heap);
if (!incremental_marking->IsStopped()) {
diff --git a/deps/v8/src/heap/incremental-marking-job.h b/deps/v8/src/heap/incremental-marking-job.h
index ccc60c55cb..902989b613 100644
--- a/deps/v8/src/heap/incremental-marking-job.h
+++ b/deps/v8/src/heap/incremental-marking-job.h
@@ -21,12 +21,15 @@ class IncrementalMarkingJob {
class Task : public CancelableTask {
public:
explicit Task(Isolate* isolate, IncrementalMarkingJob* job)
- : CancelableTask(isolate), job_(job) {}
+ : CancelableTask(isolate), isolate_(isolate), job_(job) {}
static void Step(Heap* heap);
// CancelableTask overrides.
void RunInternal() override;
+ Isolate* isolate() { return isolate_; }
+
private:
+ Isolate* isolate_;
IncrementalMarkingJob* job_;
};
@@ -36,8 +39,6 @@ class IncrementalMarkingJob {
void Start(Heap* heap);
- void NotifyTask();
-
void ScheduleTask(Heap* heap);
private:
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index 58731d570b..cdc8881f88 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -33,7 +33,7 @@ void IncrementalMarking::Observer::Step(int bytes_allocated, Address, size_t) {
IncrementalMarking::IncrementalMarking(Heap* heap)
: heap_(heap),
- marking_deque_(nullptr),
+ marking_worklist_(nullptr),
initial_old_generation_size_(0),
bytes_marked_ahead_of_schedule_(0),
unscanned_bytes_of_large_object_(0),
@@ -55,13 +55,14 @@ bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
DCHECK(!ObjectMarking::IsImpossible<kAtomicity>(
value_heap_obj, marking_state(value_heap_obj)));
DCHECK(!ObjectMarking::IsImpossible<kAtomicity>(obj, marking_state(obj)));
- const bool is_black =
+ const bool need_recording =
+ FLAG_concurrent_marking ||
ObjectMarking::IsBlack<kAtomicity>(obj, marking_state(obj));
- if (is_black && WhiteToGreyAndPush(value_heap_obj)) {
+ if (need_recording && WhiteToGreyAndPush(value_heap_obj)) {
RestartIfNotMarking();
}
- return is_compacting_ && is_black;
+ return is_compacting_ && need_recording;
}
@@ -131,7 +132,7 @@ void IncrementalMarking::RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo,
bool IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj) {
if (ObjectMarking::WhiteToGrey<kAtomicity>(obj, marking_state(obj))) {
- marking_deque()->Push(obj);
+ marking_worklist()->Push(obj);
return true;
}
return false;
@@ -142,33 +143,47 @@ void IncrementalMarking::MarkBlackAndPush(HeapObject* obj) {
ObjectMarking::WhiteToGrey<kAtomicity>(obj, marking_state(obj));
if (ObjectMarking::GreyToBlack<kAtomicity>(obj, marking_state(obj))) {
#ifdef V8_CONCURRENT_MARKING
- marking_deque()->Push(obj, MarkingThread::kMain, TargetDeque::kBailout);
+ marking_worklist()->PushBailout(obj);
#else
- if (!marking_deque()->Push(obj)) {
+ if (!marking_worklist()->Push(obj)) {
ObjectMarking::BlackToGrey<kAtomicity>(obj, marking_state(obj));
}
#endif
}
}
-void IncrementalMarking::TransferMark(Heap* heap, HeapObject* from,
- HeapObject* to) {
+void IncrementalMarking::NotifyLeftTrimming(HeapObject* from, HeapObject* to) {
+ DCHECK(IsMarking());
DCHECK(MemoryChunk::FromAddress(from->address())->SweepingDone());
- // This is only used when resizing an object.
- DCHECK(MemoryChunk::FromAddress(from->address()) ==
- MemoryChunk::FromAddress(to->address()));
+ DCHECK_EQ(MemoryChunk::FromAddress(from->address()),
+ MemoryChunk::FromAddress(to->address()));
+ DCHECK_NE(from, to);
- if (!IsMarking()) return;
+ MarkBit old_mark_bit = ObjectMarking::MarkBitFrom(from, marking_state(from));
+ MarkBit new_mark_bit = ObjectMarking::MarkBitFrom(to, marking_state(to));
- // If the mark doesn't move, we don't check the color of the object.
- // It doesn't matter whether the object is black, since it hasn't changed
- // size, so the adjustment to the live data count will be zero anyway.
- if (from == to) return;
+ if (black_allocation() && Marking::IsBlack(new_mark_bit)) {
+ // Nothing to do if the object is in black area.
+ return;
+ }
- MarkBit new_mark_bit = ObjectMarking::MarkBitFrom(to, marking_state(to));
- MarkBit old_mark_bit = ObjectMarking::MarkBitFrom(from, marking_state(from));
+ bool marked_black_due_to_left_trimming = false;
+ if (FLAG_concurrent_marking) {
+ // We need to mark the array black before overwriting its map and length
+ // so that the concurrent marker does not observe inconsistent state.
+ Marking::WhiteToGrey<kAtomicity>(old_mark_bit);
+ if (Marking::GreyToBlack<kAtomicity>(old_mark_bit)) {
+ // The concurrent marker will not mark the array. We need to push the
+ // new array start in marking deque to ensure that it will be marked.
+ marked_black_due_to_left_trimming = true;
+ }
+ DCHECK(Marking::IsBlack<kAtomicity>(old_mark_bit));
+ }
- if (Marking::IsBlack<kAtomicity>(old_mark_bit)) {
+ if (Marking::IsBlack<kAtomicity>(old_mark_bit) &&
+ !marked_black_due_to_left_trimming) {
+ // The array was black before left trimming or was marked black by the
+ // concurrent marker. Simply transfer the color.
if (from->address() + kPointerSize == to->address()) {
// The old and the new markbits overlap. The |to| object has the
// grey color. To make it black, we need to set the second bit.
@@ -179,12 +194,13 @@ void IncrementalMarking::TransferMark(Heap* heap, HeapObject* from,
DCHECK(success);
USE(success);
}
- } else if (Marking::IsGrey<kAtomicity>(old_mark_bit)) {
+ } else if (Marking::IsGrey<kAtomicity>(old_mark_bit) ||
+ marked_black_due_to_left_trimming) {
+ // The array was already grey or was marked black by this function.
+ // Mark the new array grey and push it to marking deque.
if (from->address() + kPointerSize == to->address()) {
- // The old and the new markbits overlap. The |to| object has the
- // white color. To make it grey, we need to set the first bit.
- // Note that Marking::WhiteToGrey does not work here because
- // old_mark_bit.Next() can be set by the concurrent marker at any time.
+ // The old and the new markbits overlap. The |to| object is either white
+ // or grey. Set the first bit to make sure that it is grey.
new_mark_bit.Set();
DCHECK(!new_mark_bit.Next().Get());
} else {
@@ -192,67 +208,72 @@ void IncrementalMarking::TransferMark(Heap* heap, HeapObject* from,
DCHECK(success);
USE(success);
}
- marking_deque()->Push(to);
+ marking_worklist()->Push(to);
RestartIfNotMarking();
}
}
-class IncrementalMarkingMarkingVisitor
- : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
+class IncrementalMarkingMarkingVisitor final
+ : public MarkingVisitor<IncrementalMarkingMarkingVisitor> {
public:
- static void Initialize() {
- StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
- table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
- table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
- }
+ typedef MarkingVisitor<IncrementalMarkingMarkingVisitor> Parent;
static const int kProgressBarScanningChunk = 32 * 1024;
- static void VisitFixedArrayIncremental(Map* map, HeapObject* object) {
+ explicit IncrementalMarkingMarkingVisitor(MarkCompactCollector* collector)
+ : MarkingVisitor<IncrementalMarkingMarkingVisitor>(collector->heap(),
+ collector),
+ incremental_marking_(collector->heap()->incremental_marking()) {}
+
+ V8_INLINE int VisitFixedArray(Map* map, FixedArray* object) {
MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+ int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
DCHECK(!FLAG_use_marking_progress_bar ||
chunk->owner()->identity() == LO_SPACE);
- Heap* heap = map->GetHeap();
// When using a progress bar for large fixed arrays, scan only a chunk of
// the array and try to push it onto the marking deque again until it is
// fully scanned. Fall back to scanning it through to the end in case this
// fails because of a full deque.
- int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
int start_offset =
Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar());
- int end_offset =
- Min(object_size, start_offset + kProgressBarScanningChunk);
- int already_scanned_offset = start_offset;
- bool scan_until_end = false;
- do {
- VisitPointers(heap, object, HeapObject::RawField(object, start_offset),
- HeapObject::RawField(object, end_offset));
- start_offset = end_offset;
- end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
- scan_until_end = heap->incremental_marking()->marking_deque()->IsFull();
- } while (scan_until_end && start_offset < object_size);
- chunk->set_progress_bar(start_offset);
if (start_offset < object_size) {
+#ifdef V8_CONCURRENT_MARKING
+ incremental_marking_->marking_worklist()->PushBailout(object);
+#else
if (ObjectMarking::IsGrey<IncrementalMarking::kAtomicity>(
- object, heap->incremental_marking()->marking_state(object))) {
- heap->incremental_marking()->marking_deque()->Unshift(object);
+ object, incremental_marking_->marking_state(object))) {
+ incremental_marking_->marking_worklist()->Push(object);
} else {
DCHECK(ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
- object, heap->incremental_marking()->marking_state(object)));
- heap->mark_compact_collector()->UnshiftBlack(object);
+ object, incremental_marking_->marking_state(object)));
+ collector_->PushBlack(object);
+ }
+#endif
+ int end_offset =
+ Min(object_size, start_offset + kProgressBarScanningChunk);
+ int already_scanned_offset = start_offset;
+ bool scan_until_end = false;
+ do {
+ VisitPointers(object, HeapObject::RawField(object, start_offset),
+ HeapObject::RawField(object, end_offset));
+ start_offset = end_offset;
+ end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
+ scan_until_end = incremental_marking_->marking_worklist()->IsFull();
+ } while (scan_until_end && start_offset < object_size);
+ chunk->set_progress_bar(start_offset);
+ if (start_offset < object_size) {
+ incremental_marking_->NotifyIncompleteScanOfObject(
+ object_size - (start_offset - already_scanned_offset));
}
- heap->incremental_marking()->NotifyIncompleteScanOfObject(
- object_size - (start_offset - already_scanned_offset));
}
} else {
- FixedArrayVisitor::Visit(map, object);
+ FixedArray::BodyDescriptor::IterateBody(object, object_size, this);
}
+ return object_size;
}
- static void VisitNativeContextIncremental(Map* map, HeapObject* object) {
- Context* context = Context::cast(object);
-
+ V8_INLINE int VisitNativeContext(Map* map, Context* context) {
// We will mark cache black with a separate pass when we finish marking.
// Note that GC can happen when the context is not fully initialized,
// so the cache can be undefined.
@@ -262,62 +283,48 @@ class IncrementalMarkingMarkingVisitor
HeapObject* heap_obj = HeapObject::cast(cache);
// Mark the object grey if it is white, do not enque it into the marking
// deque.
- Heap* heap = map->GetHeap();
- bool ignored =
- ObjectMarking::WhiteToGrey<IncrementalMarking::kAtomicity>(
- heap_obj, heap->incremental_marking()->marking_state(heap_obj));
- USE(ignored);
+ ObjectMarking::WhiteToGrey<IncrementalMarking::kAtomicity>(
+ heap_obj, incremental_marking_->marking_state(heap_obj));
}
}
- VisitNativeContext(map, context);
+ return Parent::VisitNativeContext(map, context);
}
- INLINE(static void VisitPointer(Heap* heap, HeapObject* object, Object** p)) {
+ V8_INLINE void VisitPointer(HeapObject* host, Object** p) final {
Object* target = *p;
if (target->IsHeapObject()) {
- heap->mark_compact_collector()->RecordSlot(object, p, target);
- MarkObject(heap, target);
+ collector_->RecordSlot(host, p, target);
+ MarkObject(target);
}
}
- INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
- Object** start, Object** end)) {
+ V8_INLINE void VisitPointers(HeapObject* host, Object** start,
+ Object** end) final {
for (Object** p = start; p < end; p++) {
Object* target = *p;
if (target->IsHeapObject()) {
- heap->mark_compact_collector()->RecordSlot(object, p, target);
- MarkObject(heap, target);
+ collector_->RecordSlot(host, p, target);
+ MarkObject(target);
}
}
}
// Marks the object grey and pushes it on the marking stack.
- INLINE(static void MarkObject(Heap* heap, Object* obj)) {
- heap->incremental_marking()->WhiteToGreyAndPush(HeapObject::cast(obj));
+ V8_INLINE void MarkObject(Object* obj) {
+ incremental_marking_->WhiteToGreyAndPush(HeapObject::cast(obj));
}
// Marks the object black without pushing it on the marking stack.
// Returns true if object needed marking and false otherwise.
- INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) {
+ V8_INLINE bool MarkObjectWithoutPush(Object* obj) {
HeapObject* heap_object = HeapObject::cast(obj);
return ObjectMarking::WhiteToBlack<IncrementalMarking::kAtomicity>(
- heap_object, heap->incremental_marking()->marking_state(heap_object));
+ heap_object, incremental_marking_->marking_state(heap_object));
}
-};
-void IncrementalMarking::IterateBlackObject(HeapObject* object) {
- if (IsMarking() &&
- ObjectMarking::IsBlack<kAtomicity>(object, marking_state(object))) {
- Page* page = Page::FromAddress(object->address());
- if ((page->owner() != nullptr) && (page->owner()->identity() == LO_SPACE)) {
- // IterateBlackObject requires us to visit the whole object.
- page->ResetProgressBar();
- }
- Map* map = object->map();
- WhiteToGreyAndPush(map);
- IncrementalMarkingMarkingVisitor::IterateBody(map, object);
- }
-}
+ private:
+ IncrementalMarking* const incremental_marking_;
+};
class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
public:
@@ -344,12 +351,6 @@ class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
Heap* heap_;
};
-
-void IncrementalMarking::Initialize() {
- IncrementalMarkingMarkingVisitor::Initialize();
-}
-
-
void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
bool is_marking,
bool is_compacting) {
@@ -569,7 +570,7 @@ void IncrementalMarking::StartMarking() {
PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
- marking_deque()->StartUsing();
+ marking_worklist()->StartUsing();
ActivateIncrementalWriteBarrier();
@@ -580,16 +581,18 @@ void IncrementalMarking::StartMarking() {
}
#endif
- heap_->CompletelyClearInstanceofCache();
heap_->isolate()->compilation_cache()->MarkCompactPrologue();
+ if (FLAG_concurrent_marking && !black_allocation_) {
+ StartBlackAllocation();
+ }
+
// Mark strong roots grey.
IncrementalMarkingRootMarkingVisitor visitor(this);
heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
if (FLAG_concurrent_marking) {
- ConcurrentMarking* concurrent_marking = heap_->concurrent_marking();
- concurrent_marking->StartTask();
+ heap_->concurrent_marking()->ScheduleTasks();
}
// Ready to start incremental marking.
@@ -724,7 +727,7 @@ void IncrementalMarking::RetainMaps() {
DCHECK(retained_maps->Get(i)->IsWeakCell());
WeakCell* cell = WeakCell::cast(retained_maps->Get(i));
if (cell->cleared()) continue;
- int age = Smi::cast(retained_maps->Get(i + 1))->value();
+ int age = Smi::ToInt(retained_maps->Get(i + 1));
int new_age;
Map* map = Map::cast(cell->value());
if (i >= number_of_disposed_maps && !map_retaining_is_disabled &&
@@ -777,7 +780,7 @@ void IncrementalMarking::FinalizeIncrementally() {
ProcessWeakCells();
int marking_progress =
- heap_->mark_compact_collector()->marking_deque()->Size() +
+ heap_->mark_compact_collector()->marking_worklist()->Size() +
static_cast<int>(
heap_->local_embedder_heap_tracer()->NumberOfCachedWrappersToTrace());
@@ -807,13 +810,13 @@ void IncrementalMarking::FinalizeIncrementally() {
}
}
-
-void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
+void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
if (!IsMarking()) return;
Map* filler_map = heap_->one_pointer_filler_map();
- marking_deque()->Update([this, filler_map](HeapObject* obj) -> HeapObject* {
+ marking_worklist()->Update([this, filler_map](HeapObject* obj,
+ HeapObject** out) -> bool {
DCHECK(obj->IsHeapObject());
// Only pointers to from space have to be updated.
if (heap_->InFromSpace(obj)) {
@@ -824,36 +827,44 @@ void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
// If these object are dead at scavenging time, their marking deque
// entries will not point to forwarding addresses. Hence, we can discard
// them.
- return nullptr;
+ return false;
}
HeapObject* dest = map_word.ToForwardingAddress();
DCHECK_IMPLIES(
ObjectMarking::IsWhite<kAtomicity>(obj, marking_state(obj)),
obj->IsFiller());
- return dest;
+ *out = dest;
+ return true;
} else if (heap_->InToSpace(obj)) {
// The object may be on a page that was moved in new space.
DCHECK(
Page::FromAddress(obj->address())->IsFlagSet(Page::SWEEP_TO_ITERATE));
- return ObjectMarking::IsBlack<kAtomicity>(obj,
- MarkingState::External(obj))
- ? obj
- : nullptr;
+ if (ObjectMarking::IsGrey<kAtomicity>(obj, MarkingState::External(obj))) {
+ *out = obj;
+ return true;
+ }
+ return false;
} else {
// The object may be on a page that was moved from new to old space.
if (Page::FromAddress(obj->address())
->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
- return ObjectMarking::IsBlack<kAtomicity>(obj,
- MarkingState::External(obj))
- ? obj
- : nullptr;
+ if (ObjectMarking::IsGrey<kAtomicity>(obj,
+ MarkingState::External(obj))) {
+ *out = obj;
+ return true;
+ }
+ return false;
}
DCHECK_IMPLIES(
ObjectMarking::IsWhite<kAtomicity>(obj, marking_state(obj)),
obj->IsFiller());
// Skip one word filler objects that appear on the
// stack when we perform in place array shift.
- return (obj->map() == filler_map) ? nullptr : obj;
+ if (obj->map() != filler_map) {
+ *out = obj;
+ return true;
+ }
+ return false;
}
});
}
@@ -881,16 +892,37 @@ void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
}
DCHECK(ObjectMarking::IsBlack<kAtomicity>(obj, marking_state(obj)));
WhiteToGreyAndPush(map);
- IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
+ IncrementalMarkingMarkingVisitor visitor(heap()->mark_compact_collector());
+ visitor.Visit(map, obj);
}
-intptr_t IncrementalMarking::ProcessMarkingDeque(
+void IncrementalMarking::ProcessBlackAllocatedObject(HeapObject* obj) {
+ if (IsMarking() &&
+ ObjectMarking::IsBlack<kAtomicity>(obj, marking_state(obj))) {
+ RevisitObject(obj);
+ }
+}
+
+void IncrementalMarking::RevisitObject(HeapObject* obj) {
+ DCHECK(IsMarking());
+ DCHECK(FLAG_concurrent_marking ||
+ ObjectMarking::IsBlack<kAtomicity>(obj, marking_state(obj)));
+ Page* page = Page::FromAddress(obj->address());
+ if ((page->owner() != nullptr) && (page->owner()->identity() == LO_SPACE)) {
+ page->ResetProgressBar();
+ }
+ Map* map = obj->map();
+ WhiteToGreyAndPush(map);
+ IncrementalMarkingMarkingVisitor visitor(heap()->mark_compact_collector());
+ visitor.Visit(map, obj);
+}
+
+intptr_t IncrementalMarking::ProcessMarkingWorklist(
intptr_t bytes_to_process, ForceCompletionAction completion) {
intptr_t bytes_processed = 0;
- while (!marking_deque()->IsEmpty() && (bytes_processed < bytes_to_process ||
- completion == FORCE_COMPLETION)) {
- HeapObject* obj = marking_deque()->Pop();
-
+ while (bytes_processed < bytes_to_process || completion == FORCE_COMPLETION) {
+ HeapObject* obj = marking_worklist()->Pop();
+ if (obj == nullptr) break;
// Left trimming may result in white, grey, or black filler objects on the
// marking deque. Ignore these objects.
if (obj->IsFiller()) {
@@ -919,7 +951,7 @@ void IncrementalMarking::Hurry() {
// forced e.g. in tests. It should not happen when COMPLETE was set when
// incremental marking finished and a regular GC was triggered after that
// because should_hurry_ will force a full GC.
- if (!marking_deque()->IsEmpty()) {
+ if (!marking_worklist()->IsEmpty()) {
double start = 0.0;
if (FLAG_trace_incremental_marking) {
start = heap_->MonotonicallyIncreasingTimeInMs();
@@ -929,7 +961,7 @@ void IncrementalMarking::Hurry() {
}
// TODO(gc) hurry can mark objects it encounters black as mutator
// was stopped.
- ProcessMarkingDeque(0, FORCE_COMPLETION);
+ ProcessMarkingWorklist(0, FORCE_COMPLETION);
state_ = COMPLETE;
if (FLAG_trace_incremental_marking) {
double end = heap_->MonotonicallyIncreasingTimeInMs();
@@ -1081,7 +1113,7 @@ double IncrementalMarking::AdvanceIncrementalMarking(
remaining_time_in_ms =
deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
} while (remaining_time_in_ms >= kStepSizeInMs && !IsComplete() &&
- !marking_deque()->IsEmpty());
+ !marking_worklist()->IsEmpty());
return remaining_time_in_ms;
}
@@ -1178,12 +1210,16 @@ size_t IncrementalMarking::Step(size_t bytes_to_process,
size_t bytes_processed = 0;
if (state_ == MARKING) {
- bytes_processed = ProcessMarkingDeque(bytes_to_process);
+ if (FLAG_trace_incremental_marking && FLAG_trace_concurrent_marking &&
+ FLAG_trace_gc_verbose) {
+ marking_worklist()->Print();
+ }
+ bytes_processed = ProcessMarkingWorklist(bytes_to_process);
if (step_origin == StepOrigin::kTask) {
bytes_marked_ahead_of_schedule_ += bytes_processed;
}
- if (marking_deque()->IsEmpty()) {
+ if (marking_worklist()->IsEmpty()) {
if (heap_->local_embedder_heap_tracer()
->ShouldFinalizeIncrementalMarking()) {
if (completion == FORCE_COMPLETION ||
@@ -1197,10 +1233,13 @@ size_t IncrementalMarking::Step(size_t bytes_to_process,
IncrementIdleMarkingDelayCounter();
}
} else {
- heap_->local_embedder_heap_tracer()->NotifyV8MarkingDequeWasEmpty();
+ heap_->local_embedder_heap_tracer()->NotifyV8MarkingWorklistWasEmpty();
}
}
}
+ if (FLAG_concurrent_marking) {
+ heap_->concurrent_marking()->RescheduleTasksIfNeeded();
+ }
double end = heap_->MonotonicallyIncreasingTimeInMs();
double duration = (end - start);
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index 4a88ab3fae..6fe5c9768a 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -53,8 +53,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
bool paused_;
};
- static void Initialize();
-
explicit IncrementalMarking(Heap* heap);
MarkingState marking_state(HeapObject* object) const {
@@ -65,12 +63,11 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
return MarkingState::Internal(chunk);
}
- // Transfers mark bits without requiring proper object headers.
- void TransferMark(Heap* heap, HeapObject* from, HeapObject* to);
+ void NotifyLeftTrimming(HeapObject* from, HeapObject* to);
// Transfers color including live byte count, requiring properly set up
// objects.
- template <MarkBit::AccessMode access_mode = MarkBit::NON_ATOMIC>
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
V8_INLINE void TransferColor(HeapObject* from, HeapObject* to) {
if (ObjectMarking::IsBlack<access_mode>(to, marking_state(to))) {
DCHECK(black_allocation());
@@ -139,7 +136,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void FinalizeIncrementally();
- void UpdateMarkingDequeAfterScavenge();
+ void UpdateMarkingWorklistAfterScavenge();
void Hurry();
@@ -183,9 +180,9 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
#endif
#ifdef V8_CONCURRENT_MARKING
- static const MarkBit::AccessMode kAtomicity = MarkBit::AccessMode::ATOMIC;
+ static const AccessMode kAtomicity = AccessMode::ATOMIC;
#else
- static const MarkBit::AccessMode kAtomicity = MarkBit::AccessMode::NON_ATOMIC;
+ static const AccessMode kAtomicity = AccessMode::NON_ATOMIC;
#endif
void FinalizeSweeping();
@@ -212,6 +209,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
INLINE(void RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* value));
INLINE(void RecordWriteOfCodeEntry(JSFunction* host, Object** slot,
Code* value));
+ INLINE(void RecordWrites(HeapObject* obj));
void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value);
void RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo, Object* value);
@@ -248,7 +246,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
bool IsIdleMarkingDelayCounterLimitReached();
- void IterateBlackObject(HeapObject* object);
+ void ProcessBlackAllocatedObject(HeapObject* obj);
Heap* heap() const { return heap_; }
@@ -262,13 +260,14 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void AbortBlackAllocation();
- MarkingDeque* marking_deque() {
- SLOW_DCHECK(marking_deque_ != nullptr);
- return marking_deque_;
+ MarkCompactCollector::MarkingWorklist* marking_worklist() {
+ SLOW_DCHECK(marking_worklist_ != nullptr);
+ return marking_worklist_;
}
- void set_marking_deque(MarkingDeque* marking_deque) {
- marking_deque_ = marking_deque;
+ void set_marking_worklist(
+ MarkCompactCollector::MarkingWorklist* marking_worklist) {
+ marking_worklist_ = marking_worklist;
}
private:
@@ -311,15 +310,15 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
static void SetNewSpacePageFlags(MemoryChunk* chunk, bool is_marking);
- INLINE(void ProcessMarkingDeque());
-
- INLINE(intptr_t ProcessMarkingDeque(
+ INLINE(intptr_t ProcessMarkingWorklist(
intptr_t bytes_to_process,
ForceCompletionAction completion = DO_NOT_FORCE_COMPLETION));
INLINE(bool IsFixedArrayWithProgressBar(HeapObject* object));
INLINE(void VisitObject(Map* map, HeapObject* obj, int size));
+ void RevisitObject(HeapObject* obj);
+
void IncrementIdleMarkingDelayCounter();
void AdvanceIncrementalMarkingOnAllocation();
@@ -328,7 +327,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
size_t StepSizeToMakeProgress();
Heap* heap_;
- MarkingDeque* marking_deque_;
+ MarkCompactCollector::MarkingWorklist* marking_worklist_;
double start_time_ms_;
size_t initial_old_generation_size_;
diff --git a/deps/v8/src/heap/item-parallel-job.h b/deps/v8/src/heap/item-parallel-job.h
index 4c2b37e9e6..432d884bda 100644
--- a/deps/v8/src/heap/item-parallel-job.h
+++ b/deps/v8/src/heap/item-parallel-job.h
@@ -128,13 +128,15 @@ class ItemParallelJob {
// Adds an item to the job. Transfers ownership to the job.
void AddItem(Item* item) { items_.push_back(item); }
+ int NumberOfItems() const { return static_cast<int>(items_.size()); }
+ int NumberOfTasks() const { return static_cast<int>(tasks_.size()); }
+
void Run() {
DCHECK_GE(tasks_.size(), 0);
const size_t num_tasks = tasks_.size();
const size_t num_items = items_.size();
const size_t items_per_task = (num_items + num_tasks - 1) / num_tasks;
- CancelableTaskManager::Id* task_ids =
- new CancelableTaskManager::Id[num_tasks];
+ uint32_t* task_ids = new uint32_t[num_tasks];
size_t start_index = 0;
Task* main_task = nullptr;
Task* task = nullptr;
diff --git a/deps/v8/src/heap/local-allocator.h b/deps/v8/src/heap/local-allocator.h
new file mode 100644
index 0000000000..2d7f95909e
--- /dev/null
+++ b/deps/v8/src/heap/local-allocator.h
@@ -0,0 +1,99 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/globals.h"
+#include "src/heap/heap.h"
+#include "src/heap/spaces.h"
+
+namespace v8 {
+namespace internal {
+
+// Allocator encapsulating thread-local allocation. Assumes that all other
+// allocations also go through LocalAllocator.
+class LocalAllocator {
+ public:
+ static const int kLabSize = 32 * KB;
+ static const int kMaxLabObjectSize = 8 * KB;
+
+ explicit LocalAllocator(Heap* heap)
+ : heap_(heap),
+ new_space_(heap->new_space()),
+ compaction_spaces_(heap),
+ new_space_lab_(LocalAllocationBuffer::InvalidBuffer()) {}
+
+ // Needs to be called from the main thread to finalize this LocalAllocator.
+ void Finalize() {
+ heap_->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE));
+ // Give back remaining LAB space if this LocalAllocator's new space LAB
+ // sits right next to new space allocation top.
+ const AllocationInfo info = new_space_lab_.Close();
+ const Address top = new_space_->top();
+ if (info.limit() != nullptr && info.limit() == top) {
+ DCHECK_NOT_NULL(info.top());
+ *new_space_->allocation_top_address() = info.top();
+ }
+ }
+
+ template <AllocationSpace space>
+ AllocationResult Allocate(int object_size, AllocationAlignment alignment) {
+ switch (space) {
+ case NEW_SPACE:
+ return AllocateInNewSpace(object_size, alignment);
+ case OLD_SPACE:
+ return compaction_spaces_.Get(OLD_SPACE)->AllocateRaw(object_size,
+ alignment);
+ default:
+ // Only new and old space supported.
+ UNREACHABLE();
+ break;
+ }
+ }
+
+ private:
+ AllocationResult AllocateInNewSpace(int object_size,
+ AllocationAlignment alignment) {
+ if (object_size > kMaxLabObjectSize) {
+ return new_space_->AllocateRawSynchronized(object_size, alignment);
+ }
+ return AllocateInLAB(object_size, alignment);
+ }
+
+ inline bool NewLocalAllocationBuffer() {
+ LocalAllocationBuffer saved_lab_ = new_space_lab_;
+ AllocationResult result =
+ new_space_->AllocateRawSynchronized(kLabSize, kWordAligned);
+ new_space_lab_ = LocalAllocationBuffer::FromResult(heap_, result, kLabSize);
+ if (new_space_lab_.IsValid()) {
+ new_space_lab_.TryMerge(&saved_lab_);
+ return true;
+ }
+ return false;
+ }
+
+ AllocationResult AllocateInLAB(int object_size,
+ AllocationAlignment alignment) {
+ AllocationResult allocation;
+ if (!new_space_lab_.IsValid() && !NewLocalAllocationBuffer()) {
+ return AllocationResult::Retry(OLD_SPACE);
+ }
+ allocation = new_space_lab_.AllocateRawAligned(object_size, alignment);
+ if (allocation.IsRetry()) {
+ if (!NewLocalAllocationBuffer()) {
+ return AllocationResult::Retry(OLD_SPACE);
+ } else {
+ allocation = new_space_lab_.AllocateRawAligned(object_size, alignment);
+ CHECK(!allocation.IsRetry());
+ }
+ }
+ return allocation;
+ }
+
+ Heap* const heap_;
+ NewSpace* const new_space_;
+ CompactionSpaceCollection compaction_spaces_;
+ LocalAllocationBuffer new_space_lab_;
+};
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index b8e4d46fc3..8873d213c2 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -13,23 +13,16 @@ namespace v8 {
namespace internal {
void MarkCompactCollector::PushBlack(HeapObject* obj) {
- DCHECK((ObjectMarking::IsBlack<MarkBit::NON_ATOMIC>(
+ DCHECK((ObjectMarking::IsBlack<AccessMode::NON_ATOMIC>(
obj, MarkingState::Internal(obj))));
- if (!marking_deque()->Push(obj)) {
- ObjectMarking::BlackToGrey<MarkBit::NON_ATOMIC>(
+ if (!marking_worklist()->Push(obj)) {
+ ObjectMarking::BlackToGrey<AccessMode::NON_ATOMIC>(
obj, MarkingState::Internal(obj));
}
}
-void MarkCompactCollector::UnshiftBlack(HeapObject* obj) {
- DCHECK(ObjectMarking::IsBlack(obj, MarkingState::Internal(obj)));
- if (!marking_deque()->Unshift(obj)) {
- ObjectMarking::BlackToGrey(obj, MarkingState::Internal(obj));
- }
-}
-
void MarkCompactCollector::MarkObject(HeapObject* obj) {
- if (ObjectMarking::WhiteToBlack<MarkBit::NON_ATOMIC>(
+ if (ObjectMarking::WhiteToBlack<AccessMode::NON_ATOMIC>(
obj, MarkingState::Internal(obj))) {
PushBlack(obj);
}
@@ -48,95 +41,45 @@ void MarkCompactCollector::RecordSlot(HeapObject* object, Object** slot,
}
}
-
-void CodeFlusher::AddCandidate(SharedFunctionInfo* shared_info) {
- if (GetNextCandidate(shared_info) == nullptr) {
- SetNextCandidate(shared_info, shared_function_info_candidates_head_);
- shared_function_info_candidates_head_ = shared_info;
+template <LiveObjectIterationMode mode>
+LiveObjectRange<mode>::iterator::iterator(MemoryChunk* chunk,
+ MarkingState state, Address start)
+ : chunk_(chunk),
+ one_word_filler_map_(chunk->heap()->one_pointer_filler_map()),
+ two_word_filler_map_(chunk->heap()->two_pointer_filler_map()),
+ free_space_map_(chunk->heap()->free_space_map()),
+ it_(chunk, state) {
+ it_.Advance(Bitmap::IndexToCell(
+ Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(start))));
+ if (!it_.Done()) {
+ cell_base_ = it_.CurrentCellBase();
+ current_cell_ = *it_.CurrentCell();
+ AdvanceToNextValidObject();
+ } else {
+ current_object_ = nullptr;
}
}
-
-void CodeFlusher::AddCandidate(JSFunction* function) {
- DCHECK(function->code() == function->shared()->code());
- if (function->next_function_link()->IsUndefined(isolate_)) {
- SetNextCandidate(function, jsfunction_candidates_head_);
- jsfunction_candidates_head_ = function;
- }
-}
-
-
-JSFunction** CodeFlusher::GetNextCandidateSlot(JSFunction* candidate) {
- return reinterpret_cast<JSFunction**>(
- HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset));
-}
-
-
-JSFunction* CodeFlusher::GetNextCandidate(JSFunction* candidate) {
- Object* next_candidate = candidate->next_function_link();
- return reinterpret_cast<JSFunction*>(next_candidate);
-}
-
-
-void CodeFlusher::SetNextCandidate(JSFunction* candidate,
- JSFunction* next_candidate) {
- candidate->set_next_function_link(next_candidate, UPDATE_WEAK_WRITE_BARRIER);
-}
-
-
-void CodeFlusher::ClearNextCandidate(JSFunction* candidate, Object* undefined) {
- DCHECK(undefined->IsUndefined(candidate->GetIsolate()));
- candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER);
-}
-
-
-SharedFunctionInfo* CodeFlusher::GetNextCandidate(
- SharedFunctionInfo* candidate) {
- Object* next_candidate = candidate->code()->gc_metadata();
- return reinterpret_cast<SharedFunctionInfo*>(next_candidate);
-}
-
-
-void CodeFlusher::SetNextCandidate(SharedFunctionInfo* candidate,
- SharedFunctionInfo* next_candidate) {
- candidate->code()->set_gc_metadata(next_candidate);
-}
-
-
-void CodeFlusher::ClearNextCandidate(SharedFunctionInfo* candidate) {
- candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
-}
-
-void CodeFlusher::VisitListHeads(RootVisitor* visitor) {
- visitor->VisitRootPointer(
- Root::kCodeFlusher,
- reinterpret_cast<Object**>(&jsfunction_candidates_head_));
- visitor->VisitRootPointer(
- Root::kCodeFlusher,
- reinterpret_cast<Object**>(&shared_function_info_candidates_head_));
+template <LiveObjectIterationMode mode>
+typename LiveObjectRange<mode>::iterator& LiveObjectRange<mode>::iterator::
+operator++() {
+ AdvanceToNextValidObject();
+ return *this;
}
-template <typename StaticVisitor>
-void CodeFlusher::IteratePointersToFromSpace() {
- Heap* heap = isolate_->heap();
- JSFunction* candidate = jsfunction_candidates_head_;
- while (candidate != nullptr) {
- JSFunction** slot = GetNextCandidateSlot(candidate);
- if (heap->InFromSpace(*slot)) {
- StaticVisitor::VisitPointer(heap, candidate,
- reinterpret_cast<Object**>(slot));
- }
- candidate = GetNextCandidate(candidate);
- }
+template <LiveObjectIterationMode mode>
+typename LiveObjectRange<mode>::iterator LiveObjectRange<mode>::iterator::
+operator++(int) {
+ iterator retval = *this;
+ ++(*this);
+ return retval;
}
-template <LiveObjectIterationMode T>
-HeapObject* LiveObjectIterator<T>::Next() {
- Map* one_word_filler = heap()->one_pointer_filler_map();
- Map* two_word_filler = heap()->two_pointer_filler_map();
- Map* free_space_map = heap()->free_space_map();
+template <LiveObjectIterationMode mode>
+void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
while (!it_.Done()) {
HeapObject* object = nullptr;
+ int size = 0;
while (current_cell_ != 0) {
uint32_t trailing_zeros = base::bits::CountTrailingZeros32(current_cell_);
Address addr = cell_base_ + trailing_zeros * kPointerSize;
@@ -144,10 +87,8 @@ HeapObject* LiveObjectIterator<T>::Next() {
// Clear the first bit of the found object..
current_cell_ &= ~(1u << trailing_zeros);
- uint32_t second_bit_index = 0;
- if (trailing_zeros < Bitmap::kBitIndexMask) {
- second_bit_index = 1u << (trailing_zeros + 1);
- } else {
+ uint32_t second_bit_index = 1u << (trailing_zeros + 1);
+ if (trailing_zeros >= Bitmap::kBitIndexMask) {
second_bit_index = 0x1;
// The overlapping case; there has to exist a cell after the current
// cell.
@@ -155,11 +96,9 @@ HeapObject* LiveObjectIterator<T>::Next() {
// last word is a one word filler, we are not allowed to advance. In
// that case we can return immediately.
if (!it_.Advance()) {
- DCHECK(HeapObject::FromAddress(addr)->map() ==
- HeapObject::FromAddress(addr)
- ->GetHeap()
- ->one_pointer_filler_map());
- return nullptr;
+ DCHECK(HeapObject::FromAddress(addr)->map() == one_word_filler_map_);
+ current_object_ = nullptr;
+ return;
}
cell_base_ = it_.CurrentCellBase();
current_cell_ = *it_.CurrentCell();
@@ -172,7 +111,8 @@ HeapObject* LiveObjectIterator<T>::Next() {
// object ends.
HeapObject* black_object = HeapObject::FromAddress(addr);
map = base::NoBarrierAtomicValue<Map*>::FromAddress(addr)->Value();
- Address end = addr + black_object->SizeFromMap(map) - kPointerSize;
+ size = black_object->SizeFromMap(map);
+ Address end = addr + size - kPointerSize;
// One word filler objects do not borrow the second mark bit. We have
// to jump over the advancing and clearing part.
// Note that we know that we are at a one word filler when
@@ -193,12 +133,13 @@ HeapObject* LiveObjectIterator<T>::Next() {
current_cell_ &= ~(end_index_mask + end_index_mask - 1);
}
- if (T == kBlackObjects || T == kAllLiveObjects) {
+ if (mode == kBlackObjects || mode == kAllLiveObjects) {
object = black_object;
}
- } else if ((T == kGreyObjects || T == kAllLiveObjects)) {
+ } else if ((mode == kGreyObjects || mode == kAllLiveObjects)) {
map = base::NoBarrierAtomicValue<Map*>::FromAddress(addr)->Value();
object = HeapObject::FromAddress(addr);
+ size = object->SizeFromMap(map);
}
// We found a live object.
@@ -206,8 +147,8 @@ HeapObject* LiveObjectIterator<T>::Next() {
// Do not use IsFiller() here. This may cause a data race for reading
// out the instance type when a new map concurrently is written into
// this object while iterating over the object.
- if (map == one_word_filler || map == two_word_filler ||
- map == free_space_map) {
+ if (map == one_word_filler_map_ || map == two_word_filler_map_ ||
+ map == free_space_map_) {
// There are two reasons why we can get black or grey fillers:
// 1) Black areas together with slack tracking may result in black one
// word filler objects.
@@ -227,9 +168,23 @@ HeapObject* LiveObjectIterator<T>::Next() {
current_cell_ = *it_.CurrentCell();
}
}
- if (object != nullptr) return object;
+ if (object != nullptr) {
+ current_object_ = object;
+ current_size_ = size;
+ return;
+ }
}
- return nullptr;
+ current_object_ = nullptr;
+}
+
+template <LiveObjectIterationMode mode>
+typename LiveObjectRange<mode>::iterator LiveObjectRange<mode>::begin() {
+ return iterator(chunk_, state_, start_);
+}
+
+template <LiveObjectIterationMode mode>
+typename LiveObjectRange<mode>::iterator LiveObjectRange<mode>::end() {
+ return iterator(chunk_, state_, end_);
}
} // namespace internal
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index d970e1a50e..cc47333f1d 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -4,10 +4,11 @@
#include "src/heap/mark-compact.h"
+#include <unordered_map>
+
#include "src/base/atomicops.h"
#include "src/base/bits.h"
#include "src/base/sys-info.h"
-#include "src/cancelable-task.h"
#include "src/code-stubs.h"
#include "src/compilation-cache.h"
#include "src/deoptimizer.h"
@@ -24,9 +25,8 @@
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
-#include "src/heap/page-parallel-job.h"
#include "src/heap/spaces-inl.h"
-#include "src/heap/workstealing-marking-deque.h"
+#include "src/heap/worklist.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/tracing/tracing-category-observer.h"
@@ -65,6 +65,8 @@ class MarkingVerifier : public ObjectVisitor, public RootVisitor {
virtual void VerifyPointers(Object** start, Object** end) = 0;
+ virtual bool IsMarked(HeapObject* object) = 0;
+
void VisitPointers(HeapObject* host, Object** start, Object** end) override {
VerifyPointers(start, end);
}
@@ -96,7 +98,7 @@ void MarkingVerifier::VerifyMarkingOnPage(const Page& page,
// One word fillers at the end of a black area can be grey.
if (ObjectMarking::IsBlackOrGrey(object, state) &&
object->map() != heap_->one_pointer_filler_map()) {
- CHECK(ObjectMarking::IsBlack(object, state));
+ CHECK(IsMarked(object));
CHECK(current >= next_object_must_be_here_or_later);
object->Iterate(this);
next_object_must_be_here_or_later = current + object->Size();
@@ -165,6 +167,10 @@ class FullMarkingVerifier : public MarkingVerifier {
return MarkingState::Internal(object);
}
+ bool IsMarked(HeapObject* object) override {
+ return ObjectMarking::IsBlack(object, marking_state(object));
+ }
+
void VerifyPointers(Object** start, Object** end) override {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
@@ -202,6 +208,10 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
return MarkingState::External(object);
}
+ bool IsMarked(HeapObject* object) override {
+ return ObjectMarking::IsGrey(object, marking_state(object));
+ }
+
void Run() override {
VerifyRoots(VISIT_ALL_IN_SCAVENGE);
VerifyMarking(heap_->new_space());
@@ -212,7 +222,7 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
if (!heap_->InNewSpace(object)) return;
- CHECK(ObjectMarking::IsBlackOrGrey(object, marking_state(object)));
+ CHECK(IsMarked(object));
}
}
}
@@ -345,22 +355,42 @@ static int NumberOfAvailableCores() {
}
int MarkCompactCollectorBase::NumberOfParallelCompactionTasks(int pages) {
+ DCHECK_GT(pages, 0);
return FLAG_parallel_compaction ? Min(NumberOfAvailableCores(), pages) : 1;
}
-int MarkCompactCollectorBase::NumberOfPointerUpdateTasks(int pages) {
+int MarkCompactCollectorBase::NumberOfParallelPointerUpdateTasks(int pages,
+ int slots) {
+ DCHECK_GT(pages, 0);
// Limit the number of update tasks as task creation often dominates the
// actual work that is being done.
- static const int kMaxPointerUpdateTasks = 8;
+ const int kMaxPointerUpdateTasks = 8;
+ const int kSlotsPerTask = 600;
+ const int wanted_tasks =
+ (slots >= 0) ? Max(1, Min(pages, slots / kSlotsPerTask)) : pages;
return FLAG_parallel_pointer_update
- ? Min(kMaxPointerUpdateTasks, Min(NumberOfAvailableCores(), pages))
+ ? Min(kMaxPointerUpdateTasks,
+ Min(NumberOfAvailableCores(), wanted_tasks))
: 1;
}
-int MinorMarkCompactCollector::NumberOfMarkingTasks() {
- return FLAG_minor_mc_parallel_marking
- ? Min(NumberOfAvailableCores(), kNumMarkers)
- : 1;
+int MarkCompactCollectorBase::NumberOfParallelToSpacePointerUpdateTasks(
+ int pages) {
+ DCHECK_GT(pages, 0);
+ // No cap needed because all pages we need to process are fully filled with
+ // interesting objects.
+ return FLAG_parallel_pointer_update ? Min(NumberOfAvailableCores(), pages)
+ : 1;
+}
+
+int MinorMarkCompactCollector::NumberOfParallelMarkingTasks(int pages) {
+ DCHECK_GT(pages, 0);
+ if (!FLAG_minor_mc_parallel_marking) return 1;
+ // Pages are not private to markers but we can still use them to estimate the
+ // amount of marking that is required.
+ const int kPagesPerTask = 2;
+ const int wanted_tasks = Max(1, pages / kPagesPerTask);
+ return Min(NumberOfAvailableCores(), Min(wanted_tasks, kNumMarkers));
}
MarkCompactCollector::MarkCompactCollector(Heap* heap)
@@ -374,9 +404,9 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
compacting_(false),
black_allocation_(false),
have_code_to_deoptimize_(false),
- marking_deque_(heap),
- code_flusher_(nullptr),
+ marking_worklist_(heap),
sweeper_(heap) {
+ old_to_new_slots_ = -1;
}
void MarkCompactCollector::SetUp() {
@@ -384,22 +414,14 @@ void MarkCompactCollector::SetUp() {
DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
- marking_deque()->SetUp();
-
- if (FLAG_flush_code) {
- code_flusher_ = new CodeFlusher(isolate());
- if (FLAG_trace_code_flushing) {
- PrintF("[code-flushing is now on]\n");
- }
- }
+ marking_worklist()->SetUp();
}
void MinorMarkCompactCollector::SetUp() {}
void MarkCompactCollector::TearDown() {
AbortCompaction();
- marking_deque()->TearDown();
- delete code_flusher_;
+ marking_worklist()->TearDown();
}
void MinorMarkCompactCollector::TearDown() {}
@@ -407,7 +429,7 @@ void MinorMarkCompactCollector::TearDown() {}
void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
DCHECK(!p->NeverEvacuate());
p->MarkEvacuationCandidate();
- evacuation_candidates_.Add(p);
+ evacuation_candidates_.push_back(p);
}
@@ -422,7 +444,7 @@ static void TraceFragmentation(PagedSpace* space) {
bool MarkCompactCollector::StartCompaction() {
if (!compacting_) {
- DCHECK(evacuation_candidates_.length() == 0);
+ DCHECK(evacuation_candidates_.empty());
CollectEvacuationCandidates(heap()->old_space());
@@ -436,7 +458,7 @@ bool MarkCompactCollector::StartCompaction() {
TraceFragmentation(heap()->map_space());
}
- compacting_ = evacuation_candidates_.length() > 0;
+ compacting_ = !evacuation_candidates_.empty();
}
return compacting_;
@@ -547,14 +569,12 @@ void MarkCompactCollector::ClearMarkbits() {
heap_->lo_space()->ClearMarkingStateOfLiveObjects();
}
-class MarkCompactCollector::Sweeper::SweeperTask final : public CancelableTask {
+class MarkCompactCollector::Sweeper::SweeperTask : public v8::Task {
public:
- SweeperTask(Isolate* isolate, Sweeper* sweeper,
- base::Semaphore* pending_sweeper_tasks,
+ SweeperTask(Sweeper* sweeper, base::Semaphore* pending_sweeper_tasks,
base::AtomicNumber<intptr_t>* num_sweeping_tasks,
AllocationSpace space_to_start)
- : CancelableTask(isolate),
- sweeper_(sweeper),
+ : sweeper_(sweeper),
pending_sweeper_tasks_(pending_sweeper_tasks),
num_sweeping_tasks_(num_sweeping_tasks),
space_to_start_(space_to_start) {}
@@ -562,7 +582,8 @@ class MarkCompactCollector::Sweeper::SweeperTask final : public CancelableTask {
virtual ~SweeperTask() {}
private:
- void RunInternal() final {
+ // v8::Task overrides.
+ void Run() override {
DCHECK_GE(space_to_start_, FIRST_SPACE);
DCHECK_LE(space_to_start_, LAST_PAGED_SPACE);
const int offset = space_to_start_ - FIRST_SPACE;
@@ -577,9 +598,9 @@ class MarkCompactCollector::Sweeper::SweeperTask final : public CancelableTask {
pending_sweeper_tasks_->Signal();
}
- Sweeper* const sweeper_;
- base::Semaphore* const pending_sweeper_tasks_;
- base::AtomicNumber<intptr_t>* const num_sweeping_tasks_;
+ Sweeper* sweeper_;
+ base::Semaphore* pending_sweeper_tasks_;
+ base::AtomicNumber<intptr_t>* num_sweeping_tasks_;
AllocationSpace space_to_start_;
DISALLOW_COPY_AND_ASSIGN(SweeperTask);
@@ -597,19 +618,15 @@ void MarkCompactCollector::Sweeper::StartSweeping() {
}
void MarkCompactCollector::Sweeper::StartSweeperTasks() {
- DCHECK_EQ(0, num_tasks_);
- DCHECK_EQ(0, num_sweeping_tasks_.Value());
if (FLAG_concurrent_sweeping && sweeping_in_progress_) {
ForAllSweepingSpaces([this](AllocationSpace space) {
if (space == NEW_SPACE) return;
num_sweeping_tasks_.Increment(1);
- SweeperTask* task = new SweeperTask(heap_->isolate(), this,
- &pending_sweeper_tasks_semaphore_,
- &num_sweeping_tasks_, space);
- DCHECK_LT(num_tasks_, kMaxSweeperTasks);
- task_ids_[num_tasks_++] = task->id();
+ semaphore_counter_++;
V8::GetCurrentPlatform()->CallOnBackgroundThread(
- task, v8::Platform::kShortRunningTask);
+ new SweeperTask(this, &pending_sweeper_tasks_semaphore_,
+ &num_sweeping_tasks_, space),
+ v8::Platform::kShortRunningTask);
});
}
}
@@ -637,8 +654,10 @@ void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) {
Page* MarkCompactCollector::Sweeper::GetSweptPageSafe(PagedSpace* space) {
base::LockGuard<base::Mutex> guard(&mutex_);
SweptList& list = swept_list_[space->identity()];
- if (list.length() > 0) {
- return list.RemoveLast();
+ if (!list.empty()) {
+ auto last_page = list.back();
+ list.pop_back();
+ return last_page;
}
return nullptr;
}
@@ -652,19 +671,15 @@ void MarkCompactCollector::Sweeper::EnsureCompleted() {
[this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
if (FLAG_concurrent_sweeping) {
- for (int i = 0; i < num_tasks_; i++) {
- if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
- CancelableTaskManager::kTaskAborted) {
- pending_sweeper_tasks_semaphore_.Wait();
- }
+ while (semaphore_counter_ > 0) {
+ pending_sweeper_tasks_semaphore_.Wait();
+ semaphore_counter_--;
}
- num_tasks_ = 0;
- num_sweeping_tasks_.SetValue(0);
}
ForAllSweepingSpaces([this](AllocationSpace space) {
if (space == NEW_SPACE) {
- swept_list_[NEW_SPACE].Clear();
+ swept_list_[NEW_SPACE].clear();
}
DCHECK(sweeping_list_[space].empty());
});
@@ -889,9 +904,9 @@ void MarkCompactCollector::AbortCompaction() {
p->ClearEvacuationCandidate();
}
compacting_ = false;
- evacuation_candidates_.Rewind(0);
+ evacuation_candidates_.clear();
}
- DCHECK_EQ(0, evacuation_candidates_.length());
+ DCHECK(evacuation_candidates_.empty());
}
@@ -916,7 +931,7 @@ void MarkCompactCollector::Prepare() {
// them here.
heap()->memory_allocator()->unmapper()->WaitUntilCompleted();
- heap()->concurrent_marking()->EnsureTaskCompleted();
+ heap()->concurrent_marking()->EnsureCompleted();
// Clear marking bits if incremental marking is aborted.
if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) {
@@ -928,7 +943,7 @@ void MarkCompactCollector::Prepare() {
AbortTransitionArrays();
AbortCompaction();
heap_->local_embedder_heap_tracer()->AbortTracing();
- marking_deque()->Clear();
+ marking_worklist()->Clear();
was_marked_incrementally_ = false;
}
@@ -966,8 +981,7 @@ void MarkCompactCollector::Finish() {
}
// The hashing of weak_object_to_code_table is no longer valid.
- heap()->weak_object_to_code_table()->Rehash(
- heap()->isolate()->factory()->undefined_value());
+ heap()->weak_object_to_code_table()->Rehash();
// Clear the marking state of live large objects.
heap_->lo_space()->ClearMarkingStateOfLiveObjects();
@@ -1023,418 +1037,81 @@ void MarkCompactCollector::Finish() {
// and continue with marking. This process repeats until all reachable
// objects have been marked.
-void CodeFlusher::ProcessJSFunctionCandidates() {
- Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
- Code* interpreter_entry_trampoline =
- isolate_->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
- Object* undefined = isolate_->heap()->undefined_value();
-
- JSFunction* candidate = jsfunction_candidates_head_;
- JSFunction* next_candidate;
- while (candidate != NULL) {
- next_candidate = GetNextCandidate(candidate);
- ClearNextCandidate(candidate, undefined);
-
- SharedFunctionInfo* shared = candidate->shared();
-
- Code* code = shared->code();
- if (ObjectMarking::IsWhite(code, MarkingState::Internal(code))) {
- if (FLAG_trace_code_flushing && shared->is_compiled()) {
- PrintF("[code-flushing clears: ");
- shared->ShortPrint();
- PrintF(" - age: %d]\n", code->GetAge());
- }
- // Always flush the optimized code.
- if (candidate->has_feedback_vector()) {
- candidate->feedback_vector()->ClearOptimizedCode();
- }
- if (shared->HasBytecodeArray()) {
- shared->set_code(interpreter_entry_trampoline);
- candidate->set_code(interpreter_entry_trampoline);
- } else {
- shared->set_code(lazy_compile);
- candidate->set_code(lazy_compile);
- }
- } else {
- DCHECK(ObjectMarking::IsBlack(code, MarkingState::Internal(code)));
- candidate->set_code(code);
- }
-
- // We are in the middle of a GC cycle so the write barrier in the code
- // setter did not record the slot update and we have to do that manually.
- Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
- Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
- isolate_->heap()->mark_compact_collector()->RecordCodeEntrySlot(
- candidate, slot, target);
-
- Object** shared_code_slot =
- HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
- isolate_->heap()->mark_compact_collector()->RecordSlot(
- shared, shared_code_slot, *shared_code_slot);
-
- candidate = next_candidate;
- }
-
- jsfunction_candidates_head_ = NULL;
-}
-
-
-void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
- Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
- Code* interpreter_entry_trampoline =
- isolate_->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
- SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
- SharedFunctionInfo* next_candidate;
- while (candidate != NULL) {
- next_candidate = GetNextCandidate(candidate);
- ClearNextCandidate(candidate);
-
- Code* code = candidate->code();
- if (ObjectMarking::IsWhite(code, MarkingState::Internal(code))) {
- if (FLAG_trace_code_flushing && candidate->is_compiled()) {
- PrintF("[code-flushing clears: ");
- candidate->ShortPrint();
- PrintF(" - age: %d]\n", code->GetAge());
- }
- if (candidate->HasBytecodeArray()) {
- candidate->set_code(interpreter_entry_trampoline);
- } else {
- candidate->set_code(lazy_compile);
- }
- }
-
- Object** code_slot =
- HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
- isolate_->heap()->mark_compact_collector()->RecordSlot(candidate, code_slot,
- *code_slot);
-
- candidate = next_candidate;
- }
-
- shared_function_info_candidates_head_ = NULL;
-}
-
-
-void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
- // Make sure previous flushing decisions are revisited.
- isolate_->heap()->incremental_marking()->IterateBlackObject(shared_info);
-
- if (FLAG_trace_code_flushing) {
- PrintF("[code-flushing abandons function-info: ");
- shared_info->ShortPrint();
- PrintF("]\n");
- }
-
- SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
- SharedFunctionInfo* next_candidate;
- if (candidate == shared_info) {
- next_candidate = GetNextCandidate(shared_info);
- shared_function_info_candidates_head_ = next_candidate;
- ClearNextCandidate(shared_info);
- } else {
- while (candidate != NULL) {
- next_candidate = GetNextCandidate(candidate);
-
- if (next_candidate == shared_info) {
- next_candidate = GetNextCandidate(shared_info);
- SetNextCandidate(candidate, next_candidate);
- ClearNextCandidate(shared_info);
- break;
- }
-
- candidate = next_candidate;
- }
- }
-}
-
-
-void CodeFlusher::EvictCandidate(JSFunction* function) {
- DCHECK(!function->next_function_link()->IsUndefined(isolate_));
- Object* undefined = isolate_->heap()->undefined_value();
-
- // Make sure previous flushing decisions are revisited.
- isolate_->heap()->incremental_marking()->IterateBlackObject(function);
- isolate_->heap()->incremental_marking()->IterateBlackObject(
- function->shared());
-
- if (FLAG_trace_code_flushing) {
- PrintF("[code-flushing abandons closure: ");
- function->shared()->ShortPrint();
- PrintF("]\n");
- }
-
- JSFunction* candidate = jsfunction_candidates_head_;
- JSFunction* next_candidate;
- if (candidate == function) {
- next_candidate = GetNextCandidate(function);
- jsfunction_candidates_head_ = next_candidate;
- ClearNextCandidate(function, undefined);
- } else {
- while (candidate != NULL) {
- next_candidate = GetNextCandidate(candidate);
-
- if (next_candidate == function) {
- next_candidate = GetNextCandidate(function);
- SetNextCandidate(candidate, next_candidate);
- ClearNextCandidate(function, undefined);
- break;
- }
-
- candidate = next_candidate;
- }
- }
-}
-
-class MarkCompactMarkingVisitor
- : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
+class MarkCompactMarkingVisitor final
+ : public MarkingVisitor<MarkCompactMarkingVisitor> {
public:
- static void Initialize();
+ explicit MarkCompactMarkingVisitor(MarkCompactCollector* collector)
+ : MarkingVisitor<MarkCompactMarkingVisitor>(collector->heap(),
+ collector) {}
- INLINE(static void VisitPointer(Heap* heap, HeapObject* object, Object** p)) {
- MarkObjectByPointer(heap->mark_compact_collector(), object, p);
+ V8_INLINE void VisitPointer(HeapObject* host, Object** p) final {
+ MarkObjectByPointer(host, p);
}
- INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
- Object** start, Object** end)) {
+ V8_INLINE void VisitPointers(HeapObject* host, Object** start,
+ Object** end) final {
// Mark all objects pointed to in [start, end).
const int kMinRangeForMarkingRecursion = 64;
if (end - start >= kMinRangeForMarkingRecursion) {
- if (VisitUnmarkedObjects(heap, object, start, end)) return;
+ if (VisitUnmarkedObjects(host, start, end)) return;
// We are close to a stack overflow, so just mark the objects.
}
- MarkCompactCollector* collector = heap->mark_compact_collector();
for (Object** p = start; p < end; p++) {
- MarkObjectByPointer(collector, object, p);
+ MarkObjectByPointer(host, p);
}
}
// Marks the object black and pushes it on the marking stack.
- INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
- heap->mark_compact_collector()->MarkObject(object);
+ V8_INLINE void MarkObject(HeapObject* object) {
+ collector_->MarkObject(object);
}
- // Marks the object black without pushing it on the marking stack.
- // Returns true if object needed marking and false otherwise.
- INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
+ // Marks the object black without pushing it on the marking stack. Returns
+ // true if object needed marking and false otherwise.
+ V8_INLINE bool MarkObjectWithoutPush(HeapObject* object) {
return ObjectMarking::WhiteToBlack(object, MarkingState::Internal(object));
}
- // Mark object pointed to by p.
- INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
- HeapObject* object, Object** p)) {
+ V8_INLINE void MarkObjectByPointer(HeapObject* host, Object** p) {
if (!(*p)->IsHeapObject()) return;
HeapObject* target_object = HeapObject::cast(*p);
- collector->RecordSlot(object, p, target_object);
- collector->MarkObject(target_object);
- }
-
-
- // Visit an unmarked object.
- INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
- HeapObject* obj)) {
-#ifdef DEBUG
- DCHECK(collector->heap()->Contains(obj));
-#endif
- if (ObjectMarking::WhiteToBlack(obj, MarkingState::Internal(obj))) {
- Map* map = obj->map();
- Heap* heap = obj->GetHeap();
- ObjectMarking::WhiteToBlack(obj, MarkingState::Internal(obj));
- // Mark the map pointer and the body.
- heap->mark_compact_collector()->MarkObject(map);
- IterateBody(map, obj);
- }
+ collector_->RecordSlot(host, p, target_object);
+ collector_->MarkObject(target_object);
}
+ protected:
// Visit all unmarked objects pointed to by [start, end).
// Returns false if the operation fails (lack of stack space).
- INLINE(static bool VisitUnmarkedObjects(Heap* heap, HeapObject* object,
- Object** start, Object** end)) {
+ inline bool VisitUnmarkedObjects(HeapObject* host, Object** start,
+ Object** end) {
// Return false is we are close to the stack limit.
- StackLimitCheck check(heap->isolate());
+ StackLimitCheck check(heap_->isolate());
if (check.HasOverflowed()) return false;
- MarkCompactCollector* collector = heap->mark_compact_collector();
// Visit the unmarked objects.
for (Object** p = start; p < end; p++) {
Object* o = *p;
if (!o->IsHeapObject()) continue;
- collector->RecordSlot(object, p, o);
+ collector_->RecordSlot(host, p, o);
HeapObject* obj = HeapObject::cast(o);
- VisitUnmarkedObject(collector, obj);
+ VisitUnmarkedObject(obj);
}
return true;
}
- private:
- // Code flushing support.
-
- static const int kRegExpCodeThreshold = 5;
-
- static void UpdateRegExpCodeAgeAndFlush(Heap* heap, JSRegExp* re,
- bool is_one_byte) {
- // Make sure that the fixed array is in fact initialized on the RegExp.
- // We could potentially trigger a GC when initializing the RegExp.
- if (HeapObject::cast(re->data())->map()->instance_type() !=
- FIXED_ARRAY_TYPE)
- return;
-
- // Make sure this is a RegExp that actually contains code.
- if (re->TypeTag() != JSRegExp::IRREGEXP) return;
-
- Object* code = re->DataAt(JSRegExp::code_index(is_one_byte));
- if (!code->IsSmi() &&
- HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
- // Save a copy that can be reinstated if we need the code again.
- re->SetDataAt(JSRegExp::saved_code_index(is_one_byte), code);
-
- // Saving a copy might create a pointer into compaction candidate
- // that was not observed by marker. This might happen if JSRegExp data
- // was marked through the compilation cache before marker reached JSRegExp
- // object.
- FixedArray* data = FixedArray::cast(re->data());
- if (ObjectMarking::IsBlackOrGrey(data, MarkingState::Internal(data))) {
- Object** slot =
- data->data_start() + JSRegExp::saved_code_index(is_one_byte);
- heap->mark_compact_collector()->RecordSlot(data, slot, code);
- }
-
- // Set a number in the 0-255 range to guarantee no smi overflow.
- re->SetDataAt(JSRegExp::code_index(is_one_byte),
- Smi::FromInt(heap->ms_count() & 0xff));
- } else if (code->IsSmi()) {
- int value = Smi::cast(code)->value();
- // The regexp has not been compiled yet or there was a compilation error.
- if (value == JSRegExp::kUninitializedValue ||
- value == JSRegExp::kCompilationErrorValue) {
- return;
- }
-
- // Check if we should flush now.
- if (value == ((heap->ms_count() - kRegExpCodeThreshold) & 0xff)) {
- re->SetDataAt(JSRegExp::code_index(is_one_byte),
- Smi::FromInt(JSRegExp::kUninitializedValue));
- re->SetDataAt(JSRegExp::saved_code_index(is_one_byte),
- Smi::FromInt(JSRegExp::kUninitializedValue));
- }
- }
- }
-
-
- // Works by setting the current sweep_generation (as a smi) in the
- // code object place in the data array of the RegExp and keeps a copy
- // around that can be reinstated if we reuse the RegExp before flushing.
- // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
- // we flush the code.
- static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
- Heap* heap = map->GetHeap();
- MarkCompactCollector* collector = heap->mark_compact_collector();
- if (!collector->is_code_flushing_enabled()) {
- JSObjectVisitor::Visit(map, object);
- return;
- }
- JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
- // Flush code or set age on both one byte and two byte code.
- UpdateRegExpCodeAgeAndFlush(heap, re, true);
- UpdateRegExpCodeAgeAndFlush(heap, re, false);
- // Visit the fields of the RegExp, including the updated FixedArray.
- JSObjectVisitor::Visit(map, object);
- }
-};
-
-
-void MarkCompactMarkingVisitor::Initialize() {
- StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
-
- table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode);
-}
-
-
-class CodeMarkingVisitor : public ThreadVisitor {
- public:
- explicit CodeMarkingVisitor(MarkCompactCollector* collector)
- : collector_(collector) {}
-
- void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
- collector_->PrepareThreadForCodeFlushing(isolate, top);
- }
-
- private:
- MarkCompactCollector* collector_;
-};
-
-class SharedFunctionInfoMarkingVisitor : public ObjectVisitor,
- public RootVisitor {
- public:
- explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
- : collector_(collector) {}
-
- void VisitPointers(HeapObject* host, Object** start, Object** end) override {
- for (Object** p = start; p < end; p++) MarkObject(p);
- }
-
- void VisitPointer(HeapObject* host, Object** slot) override {
- MarkObject(slot);
- }
-
- void VisitRootPointers(Root root, Object** start, Object** end) override {
- for (Object** p = start; p < end; p++) MarkObject(p);
- }
-
- void VisitRootPointer(Root root, Object** slot) override { MarkObject(slot); }
-
- private:
- void MarkObject(Object** slot) {
- Object* obj = *slot;
- if (obj->IsSharedFunctionInfo()) {
- SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
- collector_->MarkObject(shared->code());
- collector_->MarkObject(shared);
+ // Visit an unmarked object.
+ V8_INLINE void VisitUnmarkedObject(HeapObject* obj) {
+ DCHECK(heap_->Contains(obj));
+ if (ObjectMarking::WhiteToBlack(obj, MarkingState::Internal(obj))) {
+ Map* map = obj->map();
+ ObjectMarking::WhiteToBlack(obj, MarkingState::Internal(obj));
+ // Mark the map pointer and the body.
+ collector_->MarkObject(map);
+ Visit(map, obj);
}
}
- MarkCompactCollector* collector_;
};
-
-void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
- ThreadLocalTop* top) {
- for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
- // Note: for the frame that has a pending lazy deoptimization
- // StackFrame::unchecked_code will return a non-optimized code object for
- // the outermost function and StackFrame::LookupCode will return
- // actual optimized code object.
- StackFrame* frame = it.frame();
- Code* code = frame->unchecked_code();
- MarkObject(code);
- if (frame->is_optimized()) {
- Code* optimized_code = frame->LookupCode();
- MarkObject(optimized_code);
- }
- }
-}
-
-
-void MarkCompactCollector::PrepareForCodeFlushing() {
- // If code flushing is disabled, there is no need to prepare for it.
- if (!is_code_flushing_enabled()) return;
-
- // Make sure we are not referencing the code from the stack.
- DCHECK(this == heap()->mark_compact_collector());
- PrepareThreadForCodeFlushing(heap()->isolate(),
- heap()->isolate()->thread_local_top());
-
- // Iterate the archived stacks in all threads to check if
- // the code is referenced.
- CodeMarkingVisitor code_marking_visitor(this);
- heap()->isolate()->thread_manager()->IterateArchivedThreads(
- &code_marking_visitor);
-
- SharedFunctionInfoMarkingVisitor visitor(this);
- heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
- heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
-
- ProcessMarkingDeque();
-}
-
void MinorMarkCompactCollector::CleanupSweepToIteratePages() {
for (Page* p : sweep_to_iterate_pages_) {
if (p->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
@@ -1452,7 +1129,7 @@ class MarkCompactCollector::RootMarkingVisitor : public ObjectVisitor,
public RootVisitor {
public:
explicit RootMarkingVisitor(Heap* heap)
- : collector_(heap->mark_compact_collector()) {}
+ : collector_(heap->mark_compact_collector()), visitor_(collector_) {}
void VisitPointer(HeapObject* host, Object** p) override {
MarkObjectByPointer(p);
@@ -1480,19 +1157,20 @@ class MarkCompactCollector::RootMarkingVisitor : public ObjectVisitor,
HeapObject* object = HeapObject::cast(*p);
- if (ObjectMarking::WhiteToBlack<MarkBit::NON_ATOMIC>(
+ if (ObjectMarking::WhiteToBlack<AccessMode::NON_ATOMIC>(
object, MarkingState::Internal(object))) {
Map* map = object->map();
// Mark the map pointer and body, and push them on the marking stack.
collector_->MarkObject(map);
- MarkCompactMarkingVisitor::IterateBody(map, object);
+ visitor_.Visit(map, object);
// Mark all the objects reachable from the map and body. May leave
// overflowed objects in the heap.
- collector_->EmptyMarkingDeque();
+ collector_->EmptyMarkingWorklist();
}
}
MarkCompactCollector* collector_;
+ MarkCompactMarkingVisitor visitor_;
};
class InternalizedStringTableCleaner : public ObjectVisitor {
@@ -1609,10 +1287,11 @@ class MinorMarkCompactWeakObjectRetainer : public WeakObjectRetainer {
HeapObject* heap_object = HeapObject::cast(object);
if (!collector_.heap()->InNewSpace(heap_object)) return object;
- DCHECK(!ObjectMarking::IsGrey(heap_object,
- collector_.marking_state(heap_object)));
- if (ObjectMarking::IsBlack(heap_object,
- collector_.marking_state(heap_object))) {
+ // Young generation marking only marks to grey instead of black.
+ DCHECK(!ObjectMarking::IsBlack(heap_object,
+ collector_.marking_state(heap_object)));
+ if (ObjectMarking::IsGrey(heap_object,
+ collector_.marking_state(heap_object))) {
return object;
}
return nullptr;
@@ -1655,29 +1334,28 @@ template <class T>
void MarkCompactCollector::DiscoverGreyObjectsWithIterator(T* it) {
// The caller should ensure that the marking stack is initially not full,
// so that we don't waste effort pointlessly scanning for objects.
- DCHECK(!marking_deque()->IsFull());
+ DCHECK(!marking_worklist()->IsFull());
Map* filler_map = heap()->one_pointer_filler_map();
for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) {
if ((object->map() != filler_map) &&
ObjectMarking::GreyToBlack(object, MarkingState::Internal(object))) {
PushBlack(object);
- if (marking_deque()->IsFull()) return;
+ if (marking_worklist()->IsFull()) return;
}
}
}
void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) {
- DCHECK(!marking_deque()->IsFull());
- LiveObjectIterator<kGreyObjects> it(p, MarkingState::Internal(p));
- HeapObject* object = NULL;
- while ((object = it.Next()) != NULL) {
- bool success =
- ObjectMarking::GreyToBlack(object, MarkingState::Internal(object));
+ DCHECK(!marking_worklist()->IsFull());
+ for (auto object_and_size :
+ LiveObjectRange<kGreyObjects>(p, marking_state(p))) {
+ HeapObject* const object = object_and_size.first;
+ bool success = ObjectMarking::GreyToBlack(object, marking_state(object));
DCHECK(success);
USE(success);
PushBlack(object);
- if (marking_deque()->IsFull()) return;
+ if (marking_worklist()->IsFull()) return;
}
}
@@ -1767,9 +1445,13 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
if (value->IsHeapObject()) {
Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
if (p->InNewSpace()) {
- RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
+ DCHECK_IMPLIES(p->InToSpace(),
+ p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
+ RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(
+ Page::FromAddress(slot), slot);
} else if (p->IsEvacuationCandidate()) {
- RememberedSet<OLD_TO_OLD>::Insert(Page::FromAddress(slot), slot);
+ RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
+ Page::FromAddress(slot), slot);
}
}
}
@@ -1815,9 +1497,9 @@ class YoungGenerationMigrationObserver final : public MigrationObserver {
// Migrate color to old generation marking in case the object survived young
// generation garbage collection.
if (heap_->incremental_marking()->IsMarking()) {
- DCHECK(ObjectMarking::IsWhite(
+ DCHECK(ObjectMarking::IsWhite<AccessMode::ATOMIC>(
dst, mark_compact_collector_->marking_state(dst)));
- heap_->incremental_marking()->TransferColor<MarkBit::ATOMIC>(src, dst);
+ heap_->incremental_marking()->TransferColor<AccessMode::ATOMIC>(src, dst);
}
}
@@ -1865,9 +1547,13 @@ class YoungGenerationRecordMigratedSlotVisitor final
if (value->IsHeapObject()) {
Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
if (p->InNewSpace()) {
- RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
+ DCHECK_IMPLIES(p->InToSpace(),
+ p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
+ RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(
+ Page::FromAddress(slot), slot);
} else if (p->IsEvacuationCandidate() && IsLive(host)) {
- RememberedSet<OLD_TO_OLD>::Insert(Page::FromAddress(slot), slot);
+ RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
+ Page::FromAddress(slot), slot);
}
}
}
@@ -1876,7 +1562,7 @@ class YoungGenerationRecordMigratedSlotVisitor final
class HeapObjectVisitor {
public:
virtual ~HeapObjectVisitor() {}
- virtual bool Visit(HeapObject* object) = 0;
+ virtual bool Visit(HeapObject* object, int size) = 0;
};
class EvacuateVisitorBase : public HeapObjectVisitor {
@@ -1924,8 +1610,8 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
}
- base::NoBarrier_Store(reinterpret_cast<base::AtomicWord*>(src_addr),
- reinterpret_cast<base::AtomicWord>(dst_addr));
+ base::Relaxed_Store(reinterpret_cast<base::AtomicWord*>(src_addr),
+ reinterpret_cast<base::AtomicWord>(dst_addr));
}
EvacuateVisitorBase(Heap* heap, CompactionSpaceCollection* compaction_spaces,
@@ -1937,11 +1623,10 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
}
inline bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
- HeapObject** target_object) {
+ int size, HeapObject** target_object) {
#ifdef VERIFY_HEAP
if (AbortCompactionForTesting(object)) return false;
#endif // VERIFY_HEAP
- int size = object->Size();
AllocationAlignment alignment = object->RequiredAlignment();
AllocationResult allocation = target_space->AllocateRaw(size, alignment);
if (allocation.To(target_object)) {
@@ -2006,19 +1691,18 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
semispace_copied_size_(0),
local_pretenuring_feedback_(local_pretenuring_feedback) {}
- inline bool Visit(HeapObject* object) override {
- heap_->UpdateAllocationSite<Heap::kCached>(object,
- local_pretenuring_feedback_);
- int size = object->Size();
+ inline bool Visit(HeapObject* object, int size) override {
HeapObject* target_object = nullptr;
- if (heap_->ShouldBePromoted(object->address(), size) &&
- TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object,
+ if (heap_->ShouldBePromoted(object->address()) &&
+ TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object, size,
&target_object)) {
promoted_size_ += size;
return true;
}
+ heap_->UpdateAllocationSite<Heap::kCached>(object->map(), object,
+ local_pretenuring_feedback_);
HeapObject* target = nullptr;
- AllocationSpace space = AllocateTargetObject(object, &target);
+ AllocationSpace space = AllocateTargetObject(object, size, &target);
MigrateObject(HeapObject::cast(target), object, size, space);
semispace_copied_size_ += size;
return true;
@@ -2034,9 +1718,8 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
kStickyBailoutOldSpace,
};
- inline AllocationSpace AllocateTargetObject(HeapObject* old_object,
+ inline AllocationSpace AllocateTargetObject(HeapObject* old_object, int size,
HeapObject** target_object) {
- const int size = old_object->Size();
AllocationAlignment alignment = old_object->RequiredAlignment();
AllocationResult allocation;
AllocationSpace space_allocated_in = space_to_allocate_;
@@ -2153,16 +1836,18 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
case NEW_TO_OLD: {
page->Unlink();
Page* new_page = Page::ConvertNewToOld(page);
+ DCHECK(!new_page->InNewSpace());
new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
break;
}
}
}
- inline bool Visit(HeapObject* object) {
- heap_->UpdateAllocationSite<Heap::kCached>(object,
- local_pretenuring_feedback_);
- if (mode == NEW_TO_OLD) {
+ inline bool Visit(HeapObject* object, int size) {
+ if (mode == NEW_TO_NEW) {
+ heap_->UpdateAllocationSite<Heap::kCached>(object->map(), object,
+ local_pretenuring_feedback_);
+ } else if (mode == NEW_TO_OLD) {
object->IterateBodyFast(record_visitor_);
}
return true;
@@ -2185,11 +1870,11 @@ class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
RecordMigratedSlotVisitor* record_visitor)
: EvacuateVisitorBase(heap, compaction_spaces, record_visitor) {}
- inline bool Visit(HeapObject* object) override {
+ inline bool Visit(HeapObject* object, int size) override {
CompactionSpace* target_space = compaction_spaces_->Get(
Page::FromAddress(object->address())->owner()->identity());
HeapObject* target_object = nullptr;
- if (TryEvacuateObject(target_space, object, &target_object)) {
+ if (TryEvacuateObject(target_space, object, size, &target_object)) {
DCHECK(object->map_word().IsForwardingAddress());
return true;
}
@@ -2201,7 +1886,7 @@ class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
public:
explicit EvacuateRecordOnlyVisitor(Heap* heap) : heap_(heap) {}
- inline bool Visit(HeapObject* object) {
+ inline bool Visit(HeapObject* object, int size) {
RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
object->IterateBody(&visitor);
return true;
@@ -2214,7 +1899,7 @@ class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
for (Page* p : *space) {
DiscoverGreyObjectsOnPage(p);
- if (marking_deque()->IsFull()) return;
+ if (marking_worklist()->IsFull()) return;
}
}
@@ -2223,7 +1908,7 @@ void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() {
NewSpace* space = heap()->new_space();
for (Page* page : PageRange(space->bottom(), space->top())) {
DiscoverGreyObjectsOnPage(page);
- if (marking_deque()->IsFull()) return;
+ if (marking_worklist()->IsFull()) return;
}
}
@@ -2242,7 +1927,7 @@ void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
MarkingState::Internal(string_table))) {
// Explicitly mark the prefix.
string_table->IteratePrefix(visitor);
- ProcessMarkingDeque();
+ ProcessMarkingWorklist();
}
}
@@ -2255,9 +1940,9 @@ void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
MarkStringTable(visitor);
// There may be overflowed objects in the heap. Visit them now.
- while (marking_deque()->overflowed()) {
- RefillMarkingDeque();
- EmptyMarkingDeque();
+ while (marking_worklist()->overflowed()) {
+ RefillMarkingWorklist();
+ EmptyMarkingWorklist();
}
}
@@ -2265,20 +1950,21 @@ void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
// Before: the marking stack contains zero or more heap object pointers.
// After: the marking stack is empty, and all objects reachable from the
// marking stack have been marked, or are overflowed in the heap.
-void MarkCompactCollector::EmptyMarkingDeque() {
- while (!marking_deque()->IsEmpty()) {
- HeapObject* object = marking_deque()->Pop();
-
+void MarkCompactCollector::EmptyMarkingWorklist() {
+ HeapObject* object;
+ MarkCompactMarkingVisitor visitor(this);
+ while ((object = marking_worklist()->Pop()) != nullptr) {
DCHECK(!object->IsFiller());
DCHECK(object->IsHeapObject());
DCHECK(heap()->Contains(object));
- DCHECK(!(ObjectMarking::IsWhite<MarkBit::NON_ATOMIC>(
+ DCHECK(!(ObjectMarking::IsWhite<AccessMode::NON_ATOMIC>(
object, MarkingState::Internal(object))));
Map* map = object->map();
MarkObject(map);
- MarkCompactMarkingVisitor::IterateBody(map, object);
+ visitor.Visit(map, object);
}
+ DCHECK(marking_worklist()->IsEmpty());
}
@@ -2287,44 +1973,44 @@ void MarkCompactCollector::EmptyMarkingDeque() {
// before sweeping completes. If sweeping completes, there are no remaining
// overflowed objects in the heap so the overflow flag on the markings stack
// is cleared.
-void MarkCompactCollector::RefillMarkingDeque() {
+void MarkCompactCollector::RefillMarkingWorklist() {
isolate()->CountUsage(v8::Isolate::UseCounterFeature::kMarkDequeOverflow);
- DCHECK(marking_deque()->overflowed());
+ DCHECK(marking_worklist()->overflowed());
DiscoverGreyObjectsInNewSpace();
- if (marking_deque()->IsFull()) return;
+ if (marking_worklist()->IsFull()) return;
DiscoverGreyObjectsInSpace(heap()->old_space());
- if (marking_deque()->IsFull()) return;
+ if (marking_worklist()->IsFull()) return;
DiscoverGreyObjectsInSpace(heap()->code_space());
- if (marking_deque()->IsFull()) return;
+ if (marking_worklist()->IsFull()) return;
DiscoverGreyObjectsInSpace(heap()->map_space());
- if (marking_deque()->IsFull()) return;
+ if (marking_worklist()->IsFull()) return;
LargeObjectIterator lo_it(heap()->lo_space());
DiscoverGreyObjectsWithIterator(&lo_it);
- if (marking_deque()->IsFull()) return;
+ if (marking_worklist()->IsFull()) return;
- marking_deque()->ClearOverflowed();
+ marking_worklist()->ClearOverflowed();
}
// Mark all objects reachable (transitively) from objects on the marking
// stack. Before: the marking stack contains zero or more heap object
// pointers. After: the marking stack is empty and there are no overflowed
// objects in the heap.
-void MarkCompactCollector::ProcessMarkingDeque() {
- EmptyMarkingDeque();
- while (marking_deque()->overflowed()) {
- RefillMarkingDeque();
- EmptyMarkingDeque();
+void MarkCompactCollector::ProcessMarkingWorklist() {
+ EmptyMarkingWorklist();
+ while (marking_worklist()->overflowed()) {
+ RefillMarkingWorklist();
+ EmptyMarkingWorklist();
}
- DCHECK(marking_deque()->IsEmpty());
+ DCHECK(marking_worklist()->IsEmpty());
}
// Mark all objects reachable (transitively) from objects on the marking
// stack including references only considered in the atomic marking pause.
void MarkCompactCollector::ProcessEphemeralMarking(
bool only_process_harmony_weak_collections) {
- DCHECK(marking_deque()->IsEmpty() && !marking_deque()->overflowed());
+ DCHECK(marking_worklist()->IsEmpty() && !marking_worklist()->overflowed());
bool work_to_do = true;
while (work_to_do) {
if (!only_process_harmony_weak_collections) {
@@ -2344,10 +2030,10 @@ void MarkCompactCollector::ProcessEphemeralMarking(
heap_->local_embedder_heap_tracer()->ClearCachedWrappersToTrace();
}
ProcessWeakCollections();
- work_to_do = !marking_deque()->IsEmpty();
- ProcessMarkingDeque();
+ work_to_do = !marking_worklist()->IsEmpty();
+ ProcessMarkingWorklist();
}
- CHECK(marking_deque()->IsEmpty());
+ CHECK(marking_worklist()->IsEmpty());
CHECK_EQ(0, heap()->local_embedder_heap_tracer()->NumberOfWrappersToTrace());
}
@@ -2363,7 +2049,7 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(
if (!code->CanDeoptAt(it.frame()->pc())) {
Code::BodyDescriptor::IterateBody(code, visitor);
}
- ProcessMarkingDeque();
+ ProcessMarkingWorklist();
return;
}
}
@@ -2380,7 +2066,7 @@ class ObjectStatsVisitor : public HeapObjectVisitor {
live_collector_.CollectGlobalStatistics();
}
- bool Visit(HeapObject* obj) override {
+ bool Visit(HeapObject* obj, int size) override {
if (ObjectMarking::IsBlack(obj, MarkingState::Internal(obj))) {
live_collector_.CollectStatistics(obj);
} else {
@@ -2402,7 +2088,7 @@ void MarkCompactCollector::VisitAllObjects(HeapObjectVisitor* visitor) {
std::unique_ptr<ObjectIterator> it(space_it.next()->GetObjectIterator());
ObjectIterator* obj_it = it.get();
while ((obj = obj_it->Next()) != nullptr) {
- visitor->Visit(obj);
+ visitor->Visit(obj, obj->Size());
}
}
}
@@ -2433,65 +2119,28 @@ void MarkCompactCollector::RecordObjectStats() {
}
class YoungGenerationMarkingVisitor final
- : public HeapVisitor<void, YoungGenerationMarkingVisitor> {
+ : public NewSpaceVisitor<YoungGenerationMarkingVisitor> {
public:
- using BaseClass = HeapVisitor<int, YoungGenerationMarkingVisitor>;
+ YoungGenerationMarkingVisitor(
+ Heap* heap, MinorMarkCompactCollector::MarkingWorklist* global_worklist,
+ int task_id)
+ : heap_(heap), worklist_(global_worklist, task_id) {}
- YoungGenerationMarkingVisitor(Heap* heap,
- WorkStealingMarkingDeque* global_marking_deque,
- int task_id)
- : heap_(heap), marking_deque_(global_marking_deque, task_id) {}
-
- void VisitPointers(HeapObject* host, Object** start, Object** end) final {
- const int kMinRangeForMarkingRecursion = 64;
- if (end - start >= kMinRangeForMarkingRecursion) {
- if (MarkRecursively(host, start, end)) return;
- }
+ V8_INLINE void VisitPointers(HeapObject* host, Object** start,
+ Object** end) final {
for (Object** p = start; p < end; p++) {
VisitPointer(host, p);
}
}
- void VisitPointer(HeapObject* host, Object** slot) final {
+ V8_INLINE void VisitPointer(HeapObject* host, Object** slot) final {
Object* target = *slot;
if (heap_->InNewSpace(target)) {
HeapObject* target_object = HeapObject::cast(target);
- MarkObjectViaMarkingDeque(target_object);
+ MarkObjectViaMarkingWorklist(target_object);
}
}
- // Special cases for young generation. Also see StaticNewSpaceVisitor.
-
- void VisitJSFunction(Map* map, JSFunction* object) final {
- if (!ShouldVisit(object)) return;
- int size = JSFunction::BodyDescriptorWeakCode::SizeOf(map, object);
- VisitMapPointer(object, object->map_slot());
- JSFunction::BodyDescriptorWeakCode::IterateBody(object, size, this);
- return;
- }
-
- void VisitNativeContext(Map* map, Context* object) final {
- if (!ShouldVisit(object)) return;
- int size = Context::ScavengeBodyDescriptor::SizeOf(map, object);
- VisitMapPointer(object, object->map_slot());
- Context::ScavengeBodyDescriptor::IterateBody(object, size, this);
- return;
- }
-
- void VisitJSApiObject(Map* map, JSObject* object) final {
- return VisitJSObject(map, object);
- }
-
- void VisitBytecodeArray(Map* map, BytecodeArray* object) final {
- UNREACHABLE();
- return;
- }
-
- void VisitSharedFunctionInfo(Map* map, SharedFunctionInfo* object) final {
- UNREACHABLE();
- return;
- }
-
private:
inline MarkingState marking_state(HeapObject* object) {
SLOW_DCHECK(
@@ -2500,32 +2149,16 @@ class YoungGenerationMarkingVisitor final
return MarkingState::External(object);
}
- inline void MarkObjectViaMarkingDeque(HeapObject* object) {
- if (ObjectMarking::WhiteToBlack<MarkBit::ATOMIC>(object,
- marking_state(object))) {
+ inline void MarkObjectViaMarkingWorklist(HeapObject* object) {
+ if (ObjectMarking::WhiteToGrey<AccessMode::ATOMIC>(object,
+ marking_state(object))) {
// Marking deque overflow is unsupported for the young generation.
- CHECK(marking_deque_.Push(object));
+ CHECK(worklist_.Push(object));
}
}
- inline bool MarkRecursively(HeapObject* host, Object** start, Object** end) {
- // TODO(mlippautz): Stack check on background tasks. We cannot do a reliable
- // stack check on background tasks yet.
- for (Object** p = start; p < end; p++) {
- Object* target = *p;
- if (heap_->InNewSpace(target)) {
- HeapObject* target_object = HeapObject::cast(target);
- if (ObjectMarking::WhiteToBlack<MarkBit::ATOMIC>(
- target_object, marking_state(target_object))) {
- Visit(target_object);
- }
- }
- }
- return true;
- }
-
Heap* heap_;
- LocalWorkStealingMarkingDeque marking_deque_;
+ MinorMarkCompactCollector::MarkingWorklist::View worklist_;
};
class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
@@ -2555,10 +2188,10 @@ class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
if (!collector_->heap()->InNewSpace(object)) return;
- if (ObjectMarking::WhiteToBlack<MarkBit::NON_ATOMIC>(
+ if (ObjectMarking::WhiteToGrey<AccessMode::NON_ATOMIC>(
object, marking_state(object))) {
- collector_->marking_visitor(kMainMarker)->Visit(object);
- collector_->EmptyMarkingDeque();
+ collector_->main_marking_visitor()->Visit(object);
+ collector_->EmptyMarkingWorklist();
}
}
@@ -2566,6 +2199,7 @@ class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
};
class MarkingItem;
+class GlobalHandlesMarkingItem;
class PageMarkingItem;
class RootMarkingItem;
class YoungGenerationMarkingTask;
@@ -2578,15 +2212,16 @@ class MarkingItem : public ItemParallelJob::Item {
class YoungGenerationMarkingTask : public ItemParallelJob::Task {
public:
- YoungGenerationMarkingTask(Isolate* isolate,
- MinorMarkCompactCollector* collector,
- WorkStealingMarkingDeque* marking_deque,
- YoungGenerationMarkingVisitor* visitor,
- int task_id)
+ YoungGenerationMarkingTask(
+ Isolate* isolate, MinorMarkCompactCollector* collector,
+ MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
: ItemParallelJob::Task(isolate),
collector_(collector),
- marking_deque_(marking_deque, task_id),
- visitor_(visitor) {}
+ marking_worklist_(global_worklist, task_id),
+ visitor_(isolate->heap(), global_worklist, task_id) {
+ local_live_bytes_.reserve(isolate->heap()->new_space()->Capacity() /
+ Page::kPageSize);
+ }
void RunInParallel() override {
double marking_time = 0.0;
@@ -2596,10 +2231,11 @@ class YoungGenerationMarkingTask : public ItemParallelJob::Task {
while ((item = GetItem<MarkingItem>()) != nullptr) {
item->Process(this);
item->MarkFinished();
- EmptyLocalMarkingDeque();
+ EmptyLocalMarkingWorklist();
}
- EmptyMarkingDeque();
- DCHECK(marking_deque_.IsEmpty());
+ EmptyMarkingWorklist();
+ DCHECK(marking_worklist_.IsLocalEmpty());
+ FlushLiveBytes();
}
if (FLAG_trace_minor_mc_parallel_marking) {
PrintIsolate(collector_->isolate(), "marking[%p]: time=%f\n",
@@ -2610,32 +2246,50 @@ class YoungGenerationMarkingTask : public ItemParallelJob::Task {
void MarkObject(Object* object) {
if (!collector_->heap()->InNewSpace(object)) return;
HeapObject* heap_object = HeapObject::cast(object);
- if (ObjectMarking::WhiteToBlack<MarkBit::ATOMIC>(
+ if (ObjectMarking::WhiteToGrey<AccessMode::ATOMIC>(
heap_object, collector_->marking_state(heap_object))) {
- visitor_->Visit(heap_object);
+ const int size = visitor_.Visit(heap_object);
+ IncrementLiveBytes(heap_object, size);
}
}
private:
- void EmptyLocalMarkingDeque() {
+ MarkingState marking_state(HeapObject* object) {
+ return MarkingState::External(object);
+ }
+
+ void EmptyLocalMarkingWorklist() {
HeapObject* object = nullptr;
- while (marking_deque_.Pop(&object)) {
- visitor_->Visit(object);
+ while (marking_worklist_.Pop(&object)) {
+ const int size = visitor_.Visit(object);
+ IncrementLiveBytes(object, size);
}
}
- void EmptyMarkingDeque() {
+ void EmptyMarkingWorklist() {
HeapObject* object = nullptr;
- while (marking_deque_.WaitForMoreObjects()) {
- while (marking_deque_.Pop(&object)) {
- visitor_->Visit(object);
- }
+ while (marking_worklist_.Pop(&object)) {
+ const int size = visitor_.Visit(object);
+ IncrementLiveBytes(object, size);
+ }
+ }
+
+ void IncrementLiveBytes(HeapObject* object, intptr_t bytes) {
+ local_live_bytes_[Page::FromAddress(reinterpret_cast<Address>(object))] +=
+ bytes;
+ }
+
+ void FlushLiveBytes() {
+ for (auto pair : local_live_bytes_) {
+ collector_->marking_state(pair.first)
+ .IncrementLiveBytes<AccessMode::ATOMIC>(pair.second);
}
}
MinorMarkCompactCollector* collector_;
- LocalWorkStealingMarkingDeque marking_deque_;
- YoungGenerationMarkingVisitor* visitor_;
+ MinorMarkCompactCollector::MarkingWorklist::View marking_worklist_;
+ YoungGenerationMarkingVisitor visitor_;
+ std::unordered_map<Page*, intptr_t, Page::Hasher> local_live_bytes_;
};
class BatchedRootMarkingItem : public MarkingItem {
@@ -2656,8 +2310,10 @@ class BatchedRootMarkingItem : public MarkingItem {
class PageMarkingItem : public MarkingItem {
public:
- explicit PageMarkingItem(MemoryChunk* chunk) : chunk_(chunk) {}
- virtual ~PageMarkingItem() {}
+ explicit PageMarkingItem(MemoryChunk* chunk,
+ base::AtomicNumber<intptr_t>* global_slots)
+ : chunk_(chunk), global_slots_(global_slots), slots_(0) {}
+ virtual ~PageMarkingItem() { global_slots_->Increment(slots_); }
void Process(YoungGenerationMarkingTask* task) override {
base::LockGuard<base::RecursiveMutex> guard(chunk_->mutex());
@@ -2669,9 +2325,10 @@ class PageMarkingItem : public MarkingItem {
inline Heap* heap() { return chunk_->heap(); }
void MarkUntypedPointers(YoungGenerationMarkingTask* task) {
- RememberedSet<OLD_TO_NEW>::Iterate(chunk_, [this, task](Address slot) {
- return CheckAndMarkObject(task, slot);
- });
+ RememberedSet<OLD_TO_NEW>::Iterate(
+ chunk_,
+ [this, task](Address slot) { return CheckAndMarkObject(task, slot); },
+ SlotSet::PREFREE_EMPTY_BUCKETS);
}
void MarkTypedPointers(YoungGenerationMarkingTask* task) {
@@ -2696,12 +2353,56 @@ class PageMarkingItem : public MarkingItem {
DCHECK(heap()->InToSpace(object));
HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
task->MarkObject(heap_object);
+ slots_++;
return KEEP_SLOT;
}
return REMOVE_SLOT;
}
MemoryChunk* chunk_;
+ base::AtomicNumber<intptr_t>* global_slots_;
+ intptr_t slots_;
+};
+
+class GlobalHandlesMarkingItem : public MarkingItem {
+ public:
+ GlobalHandlesMarkingItem(GlobalHandles* global_handles, size_t start,
+ size_t end)
+ : global_handles_(global_handles), start_(start), end_(end) {}
+ virtual ~GlobalHandlesMarkingItem() {}
+
+ void Process(YoungGenerationMarkingTask* task) override {
+ GlobalHandlesRootMarkingVisitor visitor(task);
+ global_handles_
+ ->IterateNewSpaceStrongAndDependentRootsAndIdentifyUnmodified(
+ &visitor, start_, end_);
+ }
+
+ private:
+ class GlobalHandlesRootMarkingVisitor : public RootVisitor {
+ public:
+ explicit GlobalHandlesRootMarkingVisitor(YoungGenerationMarkingTask* task)
+ : task_(task) {}
+
+ void VisitRootPointer(Root root, Object** p) override {
+ DCHECK(Root::kGlobalHandles == root);
+ task_->MarkObject(*p);
+ }
+
+ void VisitRootPointers(Root root, Object** start, Object** end) override {
+ DCHECK(Root::kGlobalHandles == root);
+ for (Object** p = start; p < end; p++) {
+ task_->MarkObject(*p);
+ }
+ }
+
+ private:
+ YoungGenerationMarkingTask* task_;
+ };
+
+ GlobalHandles* global_handles_;
+ size_t start_;
+ size_t end_;
};
// This root visitor walks all roots and creates items bundling objects that
@@ -2738,7 +2439,7 @@ class MinorMarkCompactCollector::RootMarkingVisitorSeedOnly
// Bundling several objects together in items avoids issues with allocating
// and deallocating items; both are operations that are performed on the main
// thread.
- static const int kBufferSize = 32;
+ static const int kBufferSize = 128;
void AddObject(Object* object) {
buffered_objects_.push_back(object);
@@ -2750,73 +2451,81 @@ class MinorMarkCompactCollector::RootMarkingVisitorSeedOnly
};
MinorMarkCompactCollector::MinorMarkCompactCollector(Heap* heap)
- : MarkCompactCollectorBase(heap), page_parallel_job_semaphore_(0) {
- marking_deque_ = new WorkStealingMarkingDeque();
- for (int i = 0; i < kNumMarkers; i++) {
- marking_visitor_[i] =
- new YoungGenerationMarkingVisitor(heap, marking_deque_, i);
- }
+ : MarkCompactCollectorBase(heap),
+ worklist_(new MinorMarkCompactCollector::MarkingWorklist()),
+ main_marking_visitor_(
+ new YoungGenerationMarkingVisitor(heap, worklist_, kMainMarker)),
+ page_parallel_job_semaphore_(0) {
+ static_assert(
+ kNumMarkers <= MinorMarkCompactCollector::MarkingWorklist::kMaxNumTasks,
+ "more marker tasks than marking deque can handle");
}
MinorMarkCompactCollector::~MinorMarkCompactCollector() {
- for (int i = 0; i < kNumMarkers; i++) {
- DCHECK_NOT_NULL(marking_visitor_[i]);
- delete marking_visitor_[i];
- }
- delete marking_deque_;
-}
-
-SlotCallbackResult MinorMarkCompactCollector::CheckAndMarkObject(
- Heap* heap, Address slot_address) {
- Object* object = *reinterpret_cast<Object**>(slot_address);
- if (heap->InNewSpace(object)) {
- // Marking happens before flipping the young generation, so the object
- // has to be in ToSpace.
- DCHECK(heap->InToSpace(object));
- HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
- const MarkingState state = MarkingState::External(heap_object);
- if (ObjectMarking::WhiteToBlack<MarkBit::NON_ATOMIC>(heap_object, state)) {
- heap->minor_mark_compact_collector()
- ->marking_visitor(kMainMarker)
- ->Visit(heap_object);
- }
- return KEEP_SLOT;
- }
- return REMOVE_SLOT;
+ delete worklist_;
+ delete main_marking_visitor_;
}
static bool IsUnmarkedObjectForYoungGeneration(Heap* heap, Object** p) {
DCHECK_IMPLIES(heap->InNewSpace(*p), heap->InToSpace(*p));
return heap->InNewSpace(*p) &&
- !ObjectMarking::IsBlack(HeapObject::cast(*p),
- MarkingState::External(HeapObject::cast(*p)));
+ !ObjectMarking::IsGrey(HeapObject::cast(*p),
+ MarkingState::External(HeapObject::cast(*p)));
}
-void MinorMarkCompactCollector::MarkRootSetInParallel() {
- // Seed the root set (roots + old->new set).
- ItemParallelJob job(isolate()->cancelable_task_manager(),
- &page_parallel_job_semaphore_);
-
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_SEED);
- RootMarkingVisitorSeedOnly root_seed_visitor(&job);
- heap()->IterateRoots(&root_seed_visitor, VISIT_ALL_IN_SCAVENGE);
- RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
- heap(), [&job](MemoryChunk* chunk) {
- job.AddItem(new PageMarkingItem(chunk));
- });
- root_seed_visitor.FlushObjects();
+template <class ParallelItem>
+static void SeedGlobalHandles(GlobalHandles* global_handles,
+ ItemParallelJob* job) {
+ // Create batches of global handles.
+ const size_t kGlobalHandlesBufferSize = 1000;
+ const size_t new_space_nodes = global_handles->NumberOfNewSpaceNodes();
+ for (size_t start = 0; start < new_space_nodes;
+ start += kGlobalHandlesBufferSize) {
+ size_t end = start + kGlobalHandlesBufferSize;
+ if (end > new_space_nodes) end = new_space_nodes;
+ job->AddItem(new ParallelItem(global_handles, start, end));
}
+}
+void MinorMarkCompactCollector::MarkRootSetInParallel() {
+ base::AtomicNumber<intptr_t> slots;
{
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS);
- const int num_tasks = NumberOfMarkingTasks();
- for (int i = 0; i < num_tasks; i++) {
- job.AddTask(new YoungGenerationMarkingTask(
- isolate(), this, marking_deque(), marking_visitor(i), i));
+ ItemParallelJob job(isolate()->cancelable_task_manager(),
+ &page_parallel_job_semaphore_);
+
+ // Seed the root set (roots + old->new set).
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_SEED);
+ // Create batches of roots.
+ RootMarkingVisitorSeedOnly root_seed_visitor(&job);
+ heap()->IterateRoots(&root_seed_visitor, VISIT_ALL_IN_MINOR_MC_MARK);
+ // Create batches of global handles.
+ SeedGlobalHandles<GlobalHandlesMarkingItem>(isolate()->global_handles(),
+ &job);
+ // Create items for each page.
+ RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
+ heap(), [&job, &slots](MemoryChunk* chunk) {
+ job.AddItem(new PageMarkingItem(chunk, &slots));
+ });
+ // Flush any remaining objects in the seeding visitor.
+ root_seed_visitor.FlushObjects();
+ }
+
+ // Add tasks and run in parallel.
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS);
+ const int new_space_pages =
+ static_cast<int>(heap()->new_space()->Capacity()) / Page::kPageSize;
+ const int num_tasks = NumberOfParallelMarkingTasks(new_space_pages);
+ for (int i = 0; i < num_tasks; i++) {
+ job.AddTask(
+ new YoungGenerationMarkingTask(isolate(), this, worklist(), i));
+ }
+ job.Run();
+ DCHECK(worklist()->IsGlobalEmpty());
}
- job.Run();
}
+ old_to_new_slots_ = static_cast<int>(slots.Value());
}
void MinorMarkCompactCollector::MarkLiveObjects() {
@@ -2826,20 +2535,13 @@ void MinorMarkCompactCollector::MarkLiveObjects() {
RootMarkingVisitor root_visitor(this);
- {
- TRACE_GC(heap()->tracer(),
- GCTracer::Scope::MINOR_MC_MARK_IDENTIFY_GLOBAL_HANDLES);
- isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
- &Heap::IsUnmodifiedHeapObject);
- }
-
MarkRootSetInParallel();
// Mark rest on the main thread.
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_WEAK);
heap()->IterateEncounteredWeakCollections(&root_visitor);
- ProcessMarkingDeque();
+ ProcessMarkingWorklist();
}
{
@@ -2848,29 +2550,28 @@ void MinorMarkCompactCollector::MarkLiveObjects() {
&IsUnmarkedObjectForYoungGeneration);
isolate()->global_handles()->IterateNewSpaceWeakUnmodifiedRoots(
&root_visitor);
- ProcessMarkingDeque();
+ ProcessMarkingWorklist();
}
}
-void MinorMarkCompactCollector::ProcessMarkingDeque() {
- EmptyMarkingDeque();
+void MinorMarkCompactCollector::ProcessMarkingWorklist() {
+ EmptyMarkingWorklist();
}
-void MinorMarkCompactCollector::EmptyMarkingDeque() {
- LocalWorkStealingMarkingDeque local_marking_deque(marking_deque(),
- kMainMarker);
+void MinorMarkCompactCollector::EmptyMarkingWorklist() {
+ MarkingWorklist::View marking_worklist(worklist(), kMainMarker);
HeapObject* object = nullptr;
- while (local_marking_deque.Pop(&object)) {
+ while (marking_worklist.Pop(&object)) {
DCHECK(!object->IsFiller());
DCHECK(object->IsHeapObject());
DCHECK(heap()->Contains(object));
- DCHECK(!(ObjectMarking::IsWhite<MarkBit::NON_ATOMIC>(
+ DCHECK(!(ObjectMarking::IsWhite<AccessMode::NON_ATOMIC>(
object, marking_state(object))));
- DCHECK((ObjectMarking::IsBlack<MarkBit::NON_ATOMIC>(
+ DCHECK((ObjectMarking::IsGrey<AccessMode::NON_ATOMIC>(
object, marking_state(object))));
- marking_visitor(kMainMarker)->Visit(object);
+ main_marking_visitor()->Visit(object);
}
- DCHECK(local_marking_deque.IsEmpty());
+ DCHECK(marking_worklist.IsLocalEmpty());
}
void MinorMarkCompactCollector::CollectGarbage() {
@@ -2899,7 +2600,7 @@ void MinorMarkCompactCollector::CollectGarbage() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARKING_DEQUE);
- heap()->incremental_marking()->UpdateMarkingDequeAfterScavenge();
+ heap()->incremental_marking()->UpdateMarkingWorklistAfterScavenge();
}
{
@@ -2922,20 +2623,20 @@ void MinorMarkCompactCollector::MakeIterable(
MarkCompactCollector* full_collector = heap()->mark_compact_collector();
Address free_start = p->area_start();
DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
- LiveObjectIterator<kBlackObjects> it(p, marking_state(p));
- HeapObject* object = nullptr;
- while ((object = it.Next()) != nullptr) {
- DCHECK(ObjectMarking::IsBlack(object, marking_state(object)));
+ for (auto object_and_size :
+ LiveObjectRange<kGreyObjects>(p, marking_state(p))) {
+ HeapObject* const object = object_and_size.first;
+ DCHECK(ObjectMarking::IsGrey(object, marking_state(object)));
Address free_end = object->address();
if (free_end != free_start) {
CHECK_GT(free_end, free_start);
size_t size = static_cast<size_t>(free_end - free_start);
+ full_collector->marking_state(p).bitmap()->ClearRange(
+ p->AddressToMarkbitIndex(free_start),
+ p->AddressToMarkbitIndex(free_end));
if (free_space_mode == ZAP_FREE_SPACE) {
memset(free_start, 0xcc, size);
- full_collector->marking_state(p).bitmap()->ClearRange(
- p->AddressToMarkbitIndex(free_start),
- p->AddressToMarkbitIndex(free_end));
}
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo);
@@ -2948,11 +2649,11 @@ void MinorMarkCompactCollector::MakeIterable(
if (free_start != p->area_end()) {
CHECK_GT(p->area_end(), free_start);
size_t size = static_cast<size_t>(p->area_end() - free_start);
+ full_collector->marking_state(p).bitmap()->ClearRange(
+ p->AddressToMarkbitIndex(free_start),
+ p->AddressToMarkbitIndex(p->area_end()));
if (free_space_mode == ZAP_FREE_SPACE) {
memset(free_start, 0xcc, size);
- full_collector->marking_state(p).bitmap()->ClearRange(
- p->AddressToMarkbitIndex(free_start),
- p->AddressToMarkbitIndex(p->area_end()));
}
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo);
@@ -2988,7 +2689,7 @@ void MinorMarkCompactCollector::EvacuatePrologue() {
NewSpace* new_space = heap()->new_space();
// Append the list of new space pages to be processed.
for (Page* p : PageRange(new_space->bottom(), new_space->top())) {
- new_space_evacuation_pages_.Add(p);
+ new_space_evacuation_pages_.push_back(p);
}
new_space->Flip();
new_space->ResetAllocationInfo();
@@ -3035,7 +2736,7 @@ void MinorMarkCompactCollector::Evacuate() {
sweep_to_iterate_pages_.push_back(p);
}
}
- new_space_evacuation_pages_.Rewind(0);
+ new_space_evacuation_pages_.clear();
}
{
@@ -3066,15 +2767,10 @@ void MarkCompactCollector::MarkLiveObjects() {
state_ = MARK_LIVE_OBJECTS;
#endif
- marking_deque()->StartUsing();
+ marking_worklist()->StartUsing();
heap_->local_embedder_heap_tracer()->EnterFinalPause();
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_PREPARE_CODE_FLUSH);
- PrepareForCodeFlushing();
- }
-
RootMarkingVisitor root_visitor(heap());
{
@@ -3107,7 +2803,7 @@ void MarkCompactCollector::MarkLiveObjects() {
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES);
heap()->isolate()->global_handles()->IdentifyWeakHandles(
&IsUnmarkedHeapObject);
- ProcessMarkingDeque();
+ ProcessMarkingWorklist();
}
// Then we mark the objects.
@@ -3115,7 +2811,7 @@ void MarkCompactCollector::MarkLiveObjects() {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS);
heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
- ProcessMarkingDeque();
+ ProcessMarkingWorklist();
}
// Repeat Harmony weak maps marking to mark unmarked objects reachable from
@@ -3161,13 +2857,6 @@ void MarkCompactCollector::ClearNonLiveReferences() {
heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
}
- // Flush code from collected candidates.
- if (is_code_flushing_enabled()) {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_CODE_FLUSH);
- code_flusher_->ProcessCandidates();
- }
-
-
DependentCode* dependent_code_list;
Object* non_live_map_list;
ClearWeakCells(&non_live_map_list, &dependent_code_list);
@@ -3392,8 +3081,7 @@ void MarkCompactCollector::TrimEnumCache(Map* map,
DescriptorArray* descriptors) {
int live_enum = map->EnumLength();
if (live_enum == kInvalidEnumCacheSentinel) {
- live_enum =
- map->NumberOfDescribedProperties(OWN_DESCRIPTORS, ENUMERABLE_STRINGS);
+ live_enum = map->NumberOfEnumerableProperties();
}
if (live_enum == 0) return descriptors->ClearEnumCache();
@@ -3410,6 +3098,7 @@ void MarkCompactCollector::TrimEnumCache(Map* map,
void MarkCompactCollector::ProcessWeakCollections() {
+ MarkCompactMarkingVisitor visitor(this);
Object* weak_collection_obj = heap()->encountered_weak_collections();
while (weak_collection_obj != Smi::kZero) {
JSWeakCollection* weak_collection =
@@ -3427,8 +3116,7 @@ void MarkCompactCollector::ProcessWeakCollections() {
RecordSlot(table, key_slot, *key_slot);
Object** value_slot =
table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
- MarkCompactMarkingVisitor::MarkObjectByPointer(this, table,
- value_slot);
+ visitor.MarkObjectByPointer(table, value_slot);
}
}
}
@@ -3596,10 +3284,9 @@ void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo,
}
}
+template <AccessMode access_mode>
static inline SlotCallbackResult UpdateSlot(Object** slot) {
- Object* obj = reinterpret_cast<Object*>(
- base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
-
+ Object* obj = *slot;
if (obj->IsHeapObject()) {
HeapObject* heap_obj = HeapObject::cast(obj);
MapWord map_word = heap_obj->map_word();
@@ -3609,14 +3296,16 @@ static inline SlotCallbackResult UpdateSlot(Object** slot) {
Page::FromAddress(heap_obj->address())
->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
HeapObject* target = map_word.ToForwardingAddress();
- base::NoBarrier_CompareAndSwap(
- reinterpret_cast<base::AtomicWord*>(slot),
- reinterpret_cast<base::AtomicWord>(obj),
- reinterpret_cast<base::AtomicWord>(target));
+ if (access_mode == AccessMode::NON_ATOMIC) {
+ *slot = target;
+ } else {
+ base::AsAtomicWord::Release_CompareAndSwap(slot, obj, target);
+ }
DCHECK(!heap_obj->GetHeap()->InFromSpace(target));
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
}
}
+ // OLD_TO_OLD slots are always removed after updating.
return REMOVE_SLOT;
}
@@ -3626,36 +3315,45 @@ static inline SlotCallbackResult UpdateSlot(Object** slot) {
// nevers visits code objects.
class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
public:
- void VisitPointer(HeapObject* host, Object** p) override { UpdateSlot(p); }
+ void VisitPointer(HeapObject* host, Object** p) override {
+ UpdateSlotInternal(p);
+ }
void VisitPointers(HeapObject* host, Object** start, Object** end) override {
- for (Object** p = start; p < end; p++) UpdateSlot(p);
+ for (Object** p = start; p < end; p++) UpdateSlotInternal(p);
}
- void VisitRootPointer(Root root, Object** p) override { UpdateSlot(p); }
+ void VisitRootPointer(Root root, Object** p) override {
+ UpdateSlotInternal(p);
+ }
void VisitRootPointers(Root root, Object** start, Object** end) override {
- for (Object** p = start; p < end; p++) UpdateSlot(p);
+ for (Object** p = start; p < end; p++) UpdateSlotInternal(p);
}
void VisitCellPointer(Code* host, RelocInfo* rinfo) override {
- UpdateTypedSlotHelper::UpdateCell(rinfo, UpdateSlot);
+ UpdateTypedSlotHelper::UpdateCell(rinfo, UpdateSlotInternal);
}
void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
- UpdateTypedSlotHelper::UpdateEmbeddedPointer(rinfo, UpdateSlot);
+ UpdateTypedSlotHelper::UpdateEmbeddedPointer(rinfo, UpdateSlotInternal);
}
void VisitCodeTarget(Code* host, RelocInfo* rinfo) override {
- UpdateTypedSlotHelper::UpdateCodeTarget(rinfo, UpdateSlot);
+ UpdateTypedSlotHelper::UpdateCodeTarget(rinfo, UpdateSlotInternal);
}
void VisitCodeEntry(JSFunction* host, Address entry_address) override {
- UpdateTypedSlotHelper::UpdateCodeEntry(entry_address, UpdateSlot);
+ UpdateTypedSlotHelper::UpdateCodeEntry(entry_address, UpdateSlotInternal);
}
void VisitDebugTarget(Code* host, RelocInfo* rinfo) override {
- UpdateTypedSlotHelper::UpdateDebugTarget(rinfo, UpdateSlot);
+ UpdateTypedSlotHelper::UpdateDebugTarget(rinfo, UpdateSlotInternal);
+ }
+
+ private:
+ static inline SlotCallbackResult UpdateSlotInternal(Object** slot) {
+ return UpdateSlot<AccessMode::NON_ATOMIC>(slot);
}
};
@@ -3675,15 +3373,16 @@ void MarkCompactCollector::EvacuatePrologue() {
NewSpace* new_space = heap()->new_space();
// Append the list of new space pages to be processed.
for (Page* p : PageRange(new_space->bottom(), new_space->top())) {
- new_space_evacuation_pages_.Add(p);
+ new_space_evacuation_pages_.push_back(p);
}
new_space->Flip();
new_space->ResetAllocationInfo();
// Old space.
- DCHECK(old_space_evacuation_pages_.is_empty());
- old_space_evacuation_pages_.Swap(&evacuation_candidates_);
- DCHECK(evacuation_candidates_.is_empty());
+ DCHECK(old_space_evacuation_pages_.empty());
+ old_space_evacuation_pages_ = std::move(evacuation_candidates_);
+ evacuation_candidates_.clear();
+ DCHECK(evacuation_candidates_.empty());
}
void MarkCompactCollector::EvacuateEpilogue() {
@@ -3709,7 +3408,6 @@ class Evacuator : public Malloced {
if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION))
return kPageNewToNew;
if (chunk->InNewSpace()) return kObjectsNewToOld;
- DCHECK(chunk->IsEvacuationCandidate());
return kObjectsOldToOld;
}
@@ -3738,7 +3436,7 @@ class Evacuator : public Malloced {
virtual ~Evacuator() {}
- bool EvacuatePage(Page* page);
+ void EvacuatePage(Page* page);
void AddObserver(MigrationObserver* observer) {
new_space_visitor_.AddObserver(observer);
@@ -3756,7 +3454,7 @@ class Evacuator : public Malloced {
static const int kInitialLocalPretenuringFeedbackCapacity = 256;
// |saved_live_bytes| returns the live bytes of the page that was processed.
- virtual bool RawEvacuatePage(Page* page, intptr_t* saved_live_bytes) = 0;
+ virtual void RawEvacuatePage(Page* page, intptr_t* saved_live_bytes) = 0;
inline Heap* heap() { return heap_; }
@@ -3784,31 +3482,29 @@ class Evacuator : public Malloced {
intptr_t bytes_compacted_;
};
-bool Evacuator::EvacuatePage(Page* page) {
- bool success = false;
+void Evacuator::EvacuatePage(Page* page) {
DCHECK(page->SweepingDone());
intptr_t saved_live_bytes = 0;
double evacuation_time = 0.0;
{
AlwaysAllocateScope always_allocate(heap()->isolate());
TimedScope timed_scope(&evacuation_time);
- success = RawEvacuatePage(page, &saved_live_bytes);
+ RawEvacuatePage(page, &saved_live_bytes);
}
ReportCompactionProgress(evacuation_time, saved_live_bytes);
if (FLAG_trace_evacuation) {
- PrintIsolate(heap()->isolate(),
- "evacuation[%p]: page=%p new_space=%d "
- "page_evacuation=%d executable=%d contains_age_mark=%d "
- "live_bytes=%" V8PRIdPTR " time=%f success=%d\n",
- static_cast<void*>(this), static_cast<void*>(page),
- page->InNewSpace(),
- page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
- page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
- page->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
- page->Contains(heap()->new_space()->age_mark()),
- saved_live_bytes, evacuation_time, success);
+ PrintIsolate(
+ heap()->isolate(),
+ "evacuation[%p]: page=%p new_space=%d "
+ "page_evacuation=%d executable=%d contains_age_mark=%d "
+ "live_bytes=%" V8PRIdPTR " time=%f success=%d\n",
+ static_cast<void*>(this), static_cast<void*>(page), page->InNewSpace(),
+ page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
+ page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
+ page->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
+ page->Contains(heap()->new_space()->age_mark()), saved_live_bytes,
+ evacuation_time, page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
}
- return success;
}
void Evacuator::Finalize() {
@@ -3836,64 +3532,49 @@ class FullEvacuator : public Evacuator {
: Evacuator(collector->heap(), record_visitor), collector_(collector) {}
protected:
- bool RawEvacuatePage(Page* page, intptr_t* live_bytes) override;
+ void RawEvacuatePage(Page* page, intptr_t* live_bytes) override;
MarkCompactCollector* collector_;
};
-bool FullEvacuator::RawEvacuatePage(Page* page, intptr_t* live_bytes) {
- bool success = false;
- LiveObjectVisitor object_visitor;
+void FullEvacuator::RawEvacuatePage(Page* page, intptr_t* live_bytes) {
const MarkingState state = collector_->marking_state(page);
*live_bytes = state.live_bytes();
+ HeapObject* failed_object = nullptr;
switch (ComputeEvacuationMode(page)) {
case kObjectsNewToOld:
- success = object_visitor.VisitBlackObjects(
+ LiveObjectVisitor::VisitBlackObjectsNoFail(
page, state, &new_space_visitor_, LiveObjectVisitor::kClearMarkbits);
- DCHECK(success);
- ArrayBufferTracker::ProcessBuffers(
- page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
+ // ArrayBufferTracker will be updated during pointers updating.
break;
case kPageNewToOld:
- success = object_visitor.VisitBlackObjects(
+ LiveObjectVisitor::VisitBlackObjectsNoFail(
page, state, &new_to_old_page_visitor_,
LiveObjectVisitor::kKeepMarking);
- DCHECK(success);
new_to_old_page_visitor_.account_moved_bytes(state.live_bytes());
// ArrayBufferTracker will be updated during sweeping.
break;
case kPageNewToNew:
- success = object_visitor.VisitBlackObjects(
+ LiveObjectVisitor::VisitBlackObjectsNoFail(
page, state, &new_to_new_page_visitor_,
LiveObjectVisitor::kKeepMarking);
- DCHECK(success);
new_to_new_page_visitor_.account_moved_bytes(state.live_bytes());
// ArrayBufferTracker will be updated during sweeping.
break;
- case kObjectsOldToOld:
- success = object_visitor.VisitBlackObjects(
- page, state, &old_space_visitor_, LiveObjectVisitor::kClearMarkbits);
+ case kObjectsOldToOld: {
+ const bool success = LiveObjectVisitor::VisitBlackObjects(
+ page, state, &old_space_visitor_, LiveObjectVisitor::kClearMarkbits,
+ &failed_object);
if (!success) {
- // Aborted compaction page. We have to record slots here, since we
- // might not have recorded them in first place.
- // Note: We mark the page as aborted here to be able to record slots
- // for code objects in |RecordMigratedSlotVisitor| and to be able
- // to identify the page later on for post processing.
- page->SetFlag(Page::COMPACTION_WAS_ABORTED);
- EvacuateRecordOnlyVisitor record_visitor(heap());
- success = object_visitor.VisitBlackObjects(
- page, state, &record_visitor, LiveObjectVisitor::kKeepMarking);
- ArrayBufferTracker::ProcessBuffers(
- page, ArrayBufferTracker::kUpdateForwardedKeepOthers);
- DCHECK(success);
- success = false;
+ // Aborted compaction page. Actual processing happens on the main
+ // thread for simplicity reasons.
+ collector_->ReportAbortedEvacuationCandidate(failed_object, page);
} else {
- ArrayBufferTracker::ProcessBuffers(
- page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
+ // ArrayBufferTracker will be updated during pointers updating.
}
break;
+ }
}
- return success;
}
class YoungGenerationEvacuator : public Evacuator {
@@ -3903,76 +3584,95 @@ class YoungGenerationEvacuator : public Evacuator {
: Evacuator(collector->heap(), record_visitor), collector_(collector) {}
protected:
- bool RawEvacuatePage(Page* page, intptr_t* live_bytes) override;
+ void RawEvacuatePage(Page* page, intptr_t* live_bytes) override;
MinorMarkCompactCollector* collector_;
};
-bool YoungGenerationEvacuator::RawEvacuatePage(Page* page,
+void YoungGenerationEvacuator::RawEvacuatePage(Page* page,
intptr_t* live_bytes) {
- bool success = false;
- LiveObjectVisitor object_visitor;
const MarkingState state = collector_->marking_state(page);
*live_bytes = state.live_bytes();
switch (ComputeEvacuationMode(page)) {
case kObjectsNewToOld:
- success = object_visitor.VisitBlackObjects(
+ LiveObjectVisitor::VisitGreyObjectsNoFail(
page, state, &new_space_visitor_, LiveObjectVisitor::kClearMarkbits);
- DCHECK(success);
- ArrayBufferTracker::ProcessBuffers(
- page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
+ // ArrayBufferTracker will be updated during pointers updating.
break;
case kPageNewToOld:
- success = object_visitor.VisitBlackObjects(
+ LiveObjectVisitor::VisitGreyObjectsNoFail(
page, state, &new_to_old_page_visitor_,
LiveObjectVisitor::kKeepMarking);
- DCHECK(success);
new_to_old_page_visitor_.account_moved_bytes(state.live_bytes());
// TODO(mlippautz): If cleaning array buffers is too slow here we can
// delay it until the next GC.
ArrayBufferTracker::FreeDead(page, state);
- if (heap()->ShouldZapGarbage())
+ if (heap()->ShouldZapGarbage()) {
collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
ZAP_FREE_SPACE);
+ } else if (heap()->incremental_marking()->IsMarking()) {
+ // When incremental marking is on, we need to clear the mark bits of
+ // the full collector. We cannot yet discard the young generation mark
+ // bits as they are still relevant for pointers updating.
+ collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
+ IGNORE_FREE_SPACE);
+ }
break;
case kPageNewToNew:
- success = object_visitor.VisitBlackObjects(
+ LiveObjectVisitor::VisitGreyObjectsNoFail(
page, state, &new_to_new_page_visitor_,
LiveObjectVisitor::kKeepMarking);
- DCHECK(success);
new_to_new_page_visitor_.account_moved_bytes(state.live_bytes());
// TODO(mlippautz): If cleaning array buffers is too slow here we can
// delay it until the next GC.
ArrayBufferTracker::FreeDead(page, state);
- if (heap()->ShouldZapGarbage())
+ if (heap()->ShouldZapGarbage()) {
collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
ZAP_FREE_SPACE);
+ } else if (heap()->incremental_marking()->IsMarking()) {
+ // When incremental marking is on, we need to clear the mark bits of
+ // the full collector. We cannot yet discard the young generation mark
+ // bits as they are still relevant for pointers updating.
+ collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
+ IGNORE_FREE_SPACE);
+ }
break;
case kObjectsOldToOld:
UNREACHABLE();
break;
}
- return success;
}
-class EvacuationJobTraits {
+class PageEvacuationItem : public ItemParallelJob::Item {
public:
- struct PageData {
- MarkingState marking_state;
- };
+ explicit PageEvacuationItem(Page* page) : page_(page) {}
+ virtual ~PageEvacuationItem() {}
+ Page* page() const { return page_; }
- typedef PageData PerPageData;
- typedef Evacuator* PerTaskData;
+ private:
+ Page* page_;
+};
- static void ProcessPageInParallel(Heap* heap, PerTaskData evacuator,
- MemoryChunk* chunk, PerPageData) {
- evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk));
- }
+class PageEvacuationTask : public ItemParallelJob::Task {
+ public:
+ PageEvacuationTask(Isolate* isolate, Evacuator* evacuator)
+ : ItemParallelJob::Task(isolate), evacuator_(evacuator) {}
+
+ void RunInParallel() override {
+ PageEvacuationItem* item = nullptr;
+ while ((item = GetItem<PageEvacuationItem>()) != nullptr) {
+ evacuator_->EvacuatePage(item->page());
+ item->MarkFinished();
+ }
+ };
+
+ private:
+ Evacuator* evacuator_;
};
template <class Evacuator, class Collector>
void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
- Collector* collector, PageParallelJob<EvacuationJobTraits>* job,
+ Collector* collector, ItemParallelJob* job,
RecordMigratedSlotVisitor* record_visitor,
MigrationObserver* migration_observer, const intptr_t live_bytes) {
// Used for trace summary.
@@ -3988,15 +3688,16 @@ void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
ProfilingMigrationObserver profiling_observer(heap());
const int wanted_num_tasks =
- NumberOfParallelCompactionTasks(job->NumberOfPages());
+ NumberOfParallelCompactionTasks(job->NumberOfItems());
Evacuator** evacuators = new Evacuator*[wanted_num_tasks];
for (int i = 0; i < wanted_num_tasks; i++) {
evacuators[i] = new Evacuator(collector, record_visitor);
if (profiling) evacuators[i]->AddObserver(&profiling_observer);
if (migration_observer != nullptr)
evacuators[i]->AddObserver(migration_observer);
+ job->AddTask(new PageEvacuationTask(heap()->isolate(), evacuators[i]));
}
- job->Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; });
+ job->Run();
const Address top = heap()->new_space()->top();
for (int i = 0; i < wanted_num_tasks; i++) {
evacuators[i]->Finalize();
@@ -4017,7 +3718,7 @@ void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
"wanted_tasks=%d tasks=%d cores=%" PRIuS
" live_bytes=%" V8PRIdPTR " compaction_speed=%.f\n",
isolate()->time_millis_since_init(),
- FLAG_parallel_compaction ? "yes" : "no", job->NumberOfPages(),
+ FLAG_parallel_compaction ? "yes" : "no", job->NumberOfItems(),
wanted_num_tasks, job->NumberOfTasks(),
V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(),
live_bytes, compaction_speed);
@@ -4033,18 +3734,18 @@ bool MarkCompactCollectorBase::ShouldMovePage(Page* p, intptr_t live_bytes) {
}
void MarkCompactCollector::EvacuatePagesInParallel() {
- PageParallelJob<EvacuationJobTraits> job(
- heap_, heap_->isolate()->cancelable_task_manager(),
- &page_parallel_job_semaphore_);
+ ItemParallelJob evacuation_job(isolate()->cancelable_task_manager(),
+ &page_parallel_job_semaphore_);
intptr_t live_bytes = 0;
for (Page* page : old_space_evacuation_pages_) {
live_bytes += MarkingState::Internal(page).live_bytes();
- job.AddPage(page, {marking_state(page)});
+ evacuation_job.AddItem(new PageEvacuationItem(page));
}
for (Page* page : new_space_evacuation_pages_) {
intptr_t live_bytes_on_page = MarkingState::Internal(page).live_bytes();
+ if (live_bytes_on_page == 0 && !page->contains_array_buffers()) continue;
live_bytes += live_bytes_on_page;
if (ShouldMovePage(page, live_bytes_on_page)) {
if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
@@ -4053,24 +3754,24 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
}
}
- job.AddPage(page, {marking_state(page)});
+ evacuation_job.AddItem(new PageEvacuationItem(page));
}
- DCHECK_GE(job.NumberOfPages(), 1);
+ if (evacuation_job.NumberOfItems() == 0) return;
RecordMigratedSlotVisitor record_visitor(this);
- CreateAndExecuteEvacuationTasks<FullEvacuator>(this, &job, &record_visitor,
- nullptr, live_bytes);
+ CreateAndExecuteEvacuationTasks<FullEvacuator>(
+ this, &evacuation_job, &record_visitor, nullptr, live_bytes);
PostProcessEvacuationCandidates();
}
void MinorMarkCompactCollector::EvacuatePagesInParallel() {
- PageParallelJob<EvacuationJobTraits> job(
- heap_, heap_->isolate()->cancelable_task_manager(),
- &page_parallel_job_semaphore_);
+ ItemParallelJob evacuation_job(isolate()->cancelable_task_manager(),
+ &page_parallel_job_semaphore_);
intptr_t live_bytes = 0;
for (Page* page : new_space_evacuation_pages_) {
intptr_t live_bytes_on_page = marking_state(page).live_bytes();
+ if (live_bytes_on_page == 0 && !page->contains_array_buffers()) continue;
live_bytes += live_bytes_on_page;
if (ShouldMovePage(page, live_bytes_on_page)) {
if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
@@ -4079,16 +3780,16 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
}
}
- job.AddPage(page, {marking_state(page)});
+ evacuation_job.AddItem(new PageEvacuationItem(page));
}
- DCHECK_GE(job.NumberOfPages(), 1);
+ if (evacuation_job.NumberOfItems() == 0) return;
YoungGenerationMigrationObserver observer(heap(),
heap()->mark_compact_collector());
YoungGenerationRecordMigratedSlotVisitor record_visitor(
heap()->mark_compact_collector());
CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>(
- this, &job, &record_visitor, &observer, live_bytes);
+ this, &evacuation_job, &record_visitor, &observer, live_bytes);
}
class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
@@ -4157,10 +3858,8 @@ int MarkCompactCollector::Sweeper::RawSweep(
intptr_t max_freed_bytes = 0;
int curr_region = -1;
- LiveObjectIterator<kBlackObjects> it(p, state);
- HeapObject* object = NULL;
-
- while ((object = it.Next()) != NULL) {
+ for (auto object_and_size : LiveObjectRange<kBlackObjects>(p, state)) {
+ HeapObject* const object = object_and_size.first;
DCHECK(ObjectMarking::IsBlack(object, state));
Address free_end = object->address();
if (free_end != free_start) {
@@ -4271,34 +3970,25 @@ bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
void MarkCompactCollector::RecordLiveSlotsOnPage(Page* page) {
EvacuateRecordOnlyVisitor visitor(heap());
- LiveObjectVisitor object_visitor;
- object_visitor.VisitBlackObjects(page, MarkingState::Internal(page), &visitor,
- LiveObjectVisitor::kKeepMarking);
+ LiveObjectVisitor::VisitBlackObjectsNoFail(page, MarkingState::Internal(page),
+ &visitor,
+ LiveObjectVisitor::kKeepMarking);
}
template <class Visitor>
bool LiveObjectVisitor::VisitBlackObjects(MemoryChunk* chunk,
const MarkingState& state,
Visitor* visitor,
- IterationMode iteration_mode) {
- LiveObjectIterator<kBlackObjects> it(chunk, state);
- HeapObject* object = nullptr;
- while ((object = it.Next()) != nullptr) {
- DCHECK(ObjectMarking::IsBlack(object, state));
- if (!visitor->Visit(object)) {
+ IterationMode iteration_mode,
+ HeapObject** failed_object) {
+ for (auto object_and_size : LiveObjectRange<kBlackObjects>(chunk, state)) {
+ HeapObject* const object = object_and_size.first;
+ if (!visitor->Visit(object, object_and_size.second)) {
if (iteration_mode == kClearMarkbits) {
state.bitmap()->ClearRange(
chunk->AddressToMarkbitIndex(chunk->area_start()),
chunk->AddressToMarkbitIndex(object->address()));
- SlotSet* slot_set = chunk->slot_set<OLD_TO_NEW>();
- if (slot_set != nullptr) {
- slot_set->RemoveRange(
- 0, static_cast<int>(object->address() - chunk->address()),
- SlotSet::PREFREE_EMPTY_BUCKETS);
- }
- RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(chunk, chunk->address(),
- object->address());
- RecomputeLiveBytes(chunk, state);
+ *failed_object = object;
}
return false;
}
@@ -4309,13 +3999,45 @@ bool LiveObjectVisitor::VisitBlackObjects(MemoryChunk* chunk,
return true;
}
+template <class Visitor>
+void LiveObjectVisitor::VisitBlackObjectsNoFail(MemoryChunk* chunk,
+ const MarkingState& state,
+ Visitor* visitor,
+ IterationMode iteration_mode) {
+ for (auto object_and_size : LiveObjectRange<kBlackObjects>(chunk, state)) {
+ HeapObject* const object = object_and_size.first;
+ DCHECK(ObjectMarking::IsBlack(object, state));
+ const bool success = visitor->Visit(object, object_and_size.second);
+ USE(success);
+ DCHECK(success);
+ }
+ if (iteration_mode == kClearMarkbits) {
+ state.ClearLiveness();
+ }
+}
+
+template <class Visitor>
+void LiveObjectVisitor::VisitGreyObjectsNoFail(MemoryChunk* chunk,
+ const MarkingState& state,
+ Visitor* visitor,
+ IterationMode iteration_mode) {
+ for (auto object_and_size : LiveObjectRange<kGreyObjects>(chunk, state)) {
+ HeapObject* const object = object_and_size.first;
+ DCHECK(ObjectMarking::IsGrey(object, state));
+ const bool success = visitor->Visit(object, object_and_size.second);
+ USE(success);
+ DCHECK(success);
+ }
+ if (iteration_mode == kClearMarkbits) {
+ state.ClearLiveness();
+ }
+}
+
void LiveObjectVisitor::RecomputeLiveBytes(MemoryChunk* chunk,
const MarkingState& state) {
- LiveObjectIterator<kBlackObjects> it(chunk, state);
int new_live_size = 0;
- HeapObject* object = nullptr;
- while ((object = it.Next()) != nullptr) {
- new_live_size += object->Size();
+ for (auto object_and_size : LiveObjectRange<kAllLiveObjects>(chunk, state)) {
+ new_live_size += object_and_size.second;
}
state.SetLiveBytes(new_live_size);
}
@@ -4323,7 +4045,7 @@ void LiveObjectVisitor::RecomputeLiveBytes(MemoryChunk* chunk,
void MarkCompactCollector::Sweeper::AddSweptPageSafe(PagedSpace* space,
Page* page) {
base::LockGuard<base::Mutex> guard(&mutex_);
- swept_list_[space->identity()].Add(page);
+ swept_list_[space->identity()].push_back(page);
}
void MarkCompactCollector::Evacuate() {
@@ -4370,7 +4092,7 @@ void MarkCompactCollector::Evacuate() {
sweeper().AddPage(p->owner()->identity(), p);
}
}
- new_space_evacuation_pages_.Rewind(0);
+ new_space_evacuation_pages_.clear();
for (Page* p : old_space_evacuation_pages_) {
// Important: skip list should be cleared only after roots were updated
@@ -4398,211 +4120,356 @@ void MarkCompactCollector::Evacuate() {
#endif
}
-template <RememberedSetType type>
-class PointerUpdateJobTraits {
+class UpdatingItem : public ItemParallelJob::Item {
public:
- typedef int PerPageData; // Per page data is not used in this job.
- typedef const MarkCompactCollectorBase* PerTaskData;
+ virtual ~UpdatingItem() {}
+ virtual void Process() = 0;
+};
- static void ProcessPageInParallel(Heap* heap, PerTaskData task_data,
- MemoryChunk* chunk, PerPageData) {
- UpdateUntypedPointers(heap, chunk, task_data);
- UpdateTypedPointers(heap, chunk, task_data);
+class PointersUpatingTask : public ItemParallelJob::Task {
+ public:
+ explicit PointersUpatingTask(Isolate* isolate)
+ : ItemParallelJob::Task(isolate) {}
+
+ void RunInParallel() override {
+ UpdatingItem* item = nullptr;
+ while ((item = GetItem<UpdatingItem>()) != nullptr) {
+ item->Process();
+ item->MarkFinished();
+ }
+ };
+};
+
+class ToSpaceUpdatingItem : public UpdatingItem {
+ public:
+ explicit ToSpaceUpdatingItem(MemoryChunk* chunk, Address start, Address end,
+ MarkingState marking_state)
+ : chunk_(chunk),
+ start_(start),
+ end_(end),
+ marking_state_(marking_state) {}
+ virtual ~ToSpaceUpdatingItem() {}
+
+ void Process() override {
+ if (chunk_->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
+ // New->new promoted pages contain garbage so they require iteration using
+ // markbits.
+ ProcessVisitLive();
+ } else {
+ ProcessVisitAll();
+ }
}
private:
- static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk,
- const MarkCompactCollectorBase* collector) {
- base::LockGuard<base::RecursiveMutex> guard(chunk->mutex());
- if (type == OLD_TO_NEW) {
- RememberedSet<OLD_TO_NEW>::Iterate(
- chunk, [heap, collector](Address slot) {
- return CheckAndUpdateOldToNewSlot(heap, slot, collector);
- });
- } else {
- RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) {
- return UpdateSlot(reinterpret_cast<Object**>(slot));
- });
+ void ProcessVisitAll() {
+ PointersUpdatingVisitor visitor;
+ for (Address cur = start_; cur < end_;) {
+ HeapObject* object = HeapObject::FromAddress(cur);
+ Map* map = object->map();
+ int size = object->SizeFromMap(map);
+ object->IterateBody(map->instance_type(), size, &visitor);
+ cur += size;
}
}
- static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk,
- const MarkCompactCollectorBase* collector) {
- if (type == OLD_TO_OLD) {
- Isolate* isolate = heap->isolate();
- RememberedSet<OLD_TO_OLD>::IterateTyped(
- chunk,
- [isolate](SlotType slot_type, Address host_addr, Address slot) {
- return UpdateTypedSlotHelper::UpdateTypedSlot(isolate, slot_type,
- slot, UpdateSlot);
- });
- } else {
- Isolate* isolate = heap->isolate();
- RememberedSet<OLD_TO_NEW>::IterateTyped(
- chunk, [isolate, heap, collector](SlotType slot_type,
- Address host_addr, Address slot) {
- return UpdateTypedSlotHelper::UpdateTypedSlot(
- isolate, slot_type, slot, [heap, collector](Object** slot) {
- return CheckAndUpdateOldToNewSlot(
- heap, reinterpret_cast<Address>(slot), collector);
- });
- });
+ void ProcessVisitLive() {
+ // For young generation evacuations we want to visit grey objects, for
+ // full MC, we need to visit black objects.
+ PointersUpdatingVisitor visitor;
+ for (auto object_and_size :
+ LiveObjectRange<kAllLiveObjects>(chunk_, marking_state_)) {
+ object_and_size.first->IterateBodyFast(&visitor);
}
}
- static SlotCallbackResult CheckAndUpdateOldToNewSlot(
- Heap* heap, Address slot_address,
- const MarkCompactCollectorBase* collector) {
- // There may be concurrent action on slots in dead objects. Concurrent
- // sweeper threads may overwrite the slot content with a free space object.
- // Moreover, the pointed-to object may also get concurrently overwritten
- // with a free space object. The sweeper always gets priority performing
- // these writes.
- base::NoBarrierAtomicValue<Object*>* slot =
- base::NoBarrierAtomicValue<Object*>::FromAddress(slot_address);
- Object* slot_reference = slot->Value();
- if (heap->InFromSpace(slot_reference)) {
- HeapObject* heap_object = reinterpret_cast<HeapObject*>(slot_reference);
+ MemoryChunk* chunk_;
+ Address start_;
+ Address end_;
+ MarkingState marking_state_;
+};
+
+class RememberedSetUpdatingItem : public UpdatingItem {
+ public:
+ explicit RememberedSetUpdatingItem(Heap* heap,
+ MarkCompactCollectorBase* collector,
+ MemoryChunk* chunk,
+ RememberedSetUpdatingMode updating_mode)
+ : heap_(heap),
+ collector_(collector),
+ chunk_(chunk),
+ updating_mode_(updating_mode) {}
+ virtual ~RememberedSetUpdatingItem() {}
+
+ void Process() override {
+ base::LockGuard<base::RecursiveMutex> guard(chunk_->mutex());
+ UpdateUntypedPointers();
+ UpdateTypedPointers();
+ }
+
+ private:
+ template <AccessMode access_mode>
+ inline SlotCallbackResult CheckAndUpdateOldToNewSlot(Address slot_address) {
+ Object** slot = reinterpret_cast<Object**>(slot_address);
+ if (heap_->InFromSpace(*slot)) {
+ HeapObject* heap_object = reinterpret_cast<HeapObject*>(*slot);
DCHECK(heap_object->IsHeapObject());
MapWord map_word = heap_object->map_word();
- // There could still be stale pointers in large object space, map space,
- // and old space for pages that have been promoted.
if (map_word.IsForwardingAddress()) {
- // A sweeper thread may concurrently write a size value which looks like
- // a forwarding pointer. We have to ignore these values.
- if (map_word.ToRawValue() < Page::kPageSize) {
- return REMOVE_SLOT;
+ if (access_mode == AccessMode::ATOMIC) {
+ HeapObject** heap_obj_slot = reinterpret_cast<HeapObject**>(slot);
+ base::AsAtomicWord::Relaxed_Store(heap_obj_slot,
+ map_word.ToForwardingAddress());
+ } else {
+ *slot = map_word.ToForwardingAddress();
}
- // Update the corresponding slot only if the slot content did not
- // change in the meantime. This may happen when a concurrent sweeper
- // thread stored a free space object at that memory location.
- slot->TrySetValue(slot_reference, map_word.ToForwardingAddress());
}
// If the object was in from space before and is after executing the
// callback in to space, the object is still live.
// Unfortunately, we do not know about the slot. It could be in a
// just freed free space object.
- if (heap->InToSpace(slot->Value())) {
+ if (heap_->InToSpace(*slot)) {
return KEEP_SLOT;
}
- } else if (heap->InToSpace(slot_reference)) {
+ } else if (heap_->InToSpace(*slot)) {
// Slots can point to "to" space if the page has been moved, or if the
- // slot has been recorded multiple times in the remembered set. Since
- // there is no forwarding information present we need to check the
- // markbits to determine liveness.
- HeapObject* heap_object = reinterpret_cast<HeapObject*>(slot_reference);
- if (ObjectMarking::IsBlack(heap_object,
- collector->marking_state(heap_object)))
- return KEEP_SLOT;
+ // slot has been recorded multiple times in the remembered set, or
+ // if the slot was already updated during old->old updating.
+ // In case the page has been moved, check markbits to determine liveness
+ // of the slot. In the other case, the slot can just be kept.
+ HeapObject* heap_object = reinterpret_cast<HeapObject*>(*slot);
+ // IsBlackOrGrey is required because objects are marked as grey for
+ // the young generation collector while they are black for the full MC.);
+ if (Page::FromAddress(heap_object->address())
+ ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
+ if (ObjectMarking::IsBlackOrGrey(
+ heap_object, collector_->marking_state(heap_object))) {
+ return KEEP_SLOT;
+ } else {
+ return REMOVE_SLOT;
+ }
+ }
+ return KEEP_SLOT;
} else {
- DCHECK(!heap->InNewSpace(slot_reference));
+ DCHECK(!heap_->InNewSpace(*slot));
}
return REMOVE_SLOT;
}
-};
-template <RememberedSetType type>
-void MarkCompactCollectorBase::UpdatePointersInParallel(
- Heap* heap, base::Semaphore* semaphore,
- const MarkCompactCollectorBase* collector) {
- PageParallelJob<PointerUpdateJobTraits<type> > job(
- heap, heap->isolate()->cancelable_task_manager(), semaphore);
- RememberedSet<type>::IterateMemoryChunks(
- heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); });
- int num_pages = job.NumberOfPages();
- int num_tasks = NumberOfPointerUpdateTasks(num_pages);
- job.Run(num_tasks, [collector](int i) { return collector; });
-}
+ void UpdateUntypedPointers() {
+ // A map slot might point to new space and be required for iterating
+ // an object concurrently by another task. Hence, we need to update
+ // those slots using atomics.
+ if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) {
+ if (chunk_->owner() == heap_->map_space()) {
+ RememberedSet<OLD_TO_NEW>::Iterate(
+ chunk_,
+ [this](Address slot) {
+ return CheckAndUpdateOldToNewSlot<AccessMode::ATOMIC>(slot);
+ },
+ SlotSet::PREFREE_EMPTY_BUCKETS);
+ } else {
+ RememberedSet<OLD_TO_NEW>::Iterate(
+ chunk_,
+ [this](Address slot) {
+ return CheckAndUpdateOldToNewSlot<AccessMode::NON_ATOMIC>(slot);
+ },
+ SlotSet::PREFREE_EMPTY_BUCKETS);
+ }
+ }
+ if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
+ (chunk_->slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() != nullptr)) {
+ if (chunk_->owner() == heap_->map_space()) {
+ RememberedSet<OLD_TO_OLD>::Iterate(
+ chunk_,
+ [](Address slot) {
+ return UpdateSlot<AccessMode::ATOMIC>(
+ reinterpret_cast<Object**>(slot));
+ },
+ SlotSet::PREFREE_EMPTY_BUCKETS);
+ } else {
+ RememberedSet<OLD_TO_OLD>::Iterate(
+ chunk_,
+ [](Address slot) {
+ return UpdateSlot<AccessMode::NON_ATOMIC>(
+ reinterpret_cast<Object**>(slot));
+ },
+ SlotSet::PREFREE_EMPTY_BUCKETS);
+ }
+ }
+ }
-class ToSpacePointerUpdateJobTraits {
- public:
- struct PageData {
- Address start;
- Address end;
- MarkingState marking_state;
- };
+ void UpdateTypedPointers() {
+ Isolate* isolate = heap_->isolate();
+ if (chunk_->typed_slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() !=
+ nullptr) {
+ CHECK_NE(chunk_->owner(), heap_->map_space());
+ RememberedSet<OLD_TO_NEW>::IterateTyped(
+ chunk_,
+ [isolate, this](SlotType slot_type, Address host_addr, Address slot) {
+ return UpdateTypedSlotHelper::UpdateTypedSlot(
+ isolate, slot_type, slot, [this](Object** slot) {
+ return CheckAndUpdateOldToNewSlot<AccessMode::NON_ATOMIC>(
+ reinterpret_cast<Address>(slot));
+ });
+ });
+ }
+ if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
+ (chunk_->typed_slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() !=
+ nullptr)) {
+ CHECK_NE(chunk_->owner(), heap_->map_space());
+ RememberedSet<OLD_TO_OLD>::IterateTyped(
+ chunk_,
+ [isolate](SlotType slot_type, Address host_addr, Address slot) {
+ return UpdateTypedSlotHelper::UpdateTypedSlot(
+ isolate, slot_type, slot, UpdateSlot<AccessMode::NON_ATOMIC>);
+ });
+ }
+ }
- typedef PageData PerPageData;
- typedef PointersUpdatingVisitor* PerTaskData;
+ Heap* heap_;
+ MarkCompactCollectorBase* collector_;
+ MemoryChunk* chunk_;
+ RememberedSetUpdatingMode updating_mode_;
+};
- static void ProcessPageInParallel(Heap* heap, PerTaskData visitor,
- MemoryChunk* chunk, PerPageData page_data) {
- if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
- // New->new promoted pages contain garbage so they require iteration
- // using markbits.
- ProcessPageInParallelVisitLive(heap, visitor, chunk, page_data);
- } else {
- ProcessPageInParallelVisitAll(heap, visitor, chunk, page_data);
- }
+class GlobalHandlesUpdatingItem : public UpdatingItem {
+ public:
+ GlobalHandlesUpdatingItem(GlobalHandles* global_handles, size_t start,
+ size_t end)
+ : global_handles_(global_handles), start_(start), end_(end) {}
+ virtual ~GlobalHandlesUpdatingItem() {}
+
+ void Process() override {
+ PointersUpdatingVisitor updating_visitor;
+ global_handles_->IterateNewSpaceRoots(&updating_visitor, start_, end_);
}
private:
- static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor,
- MemoryChunk* chunk,
- PerPageData page_data) {
- for (Address cur = page_data.start; cur < page_data.end;) {
- HeapObject* object = HeapObject::FromAddress(cur);
- Map* map = object->map();
- int size = object->SizeFromMap(map);
- object->IterateBody(map->instance_type(), size, visitor);
- cur += size;
- }
- }
+ GlobalHandles* global_handles_;
+ size_t start_;
+ size_t end_;
+};
- static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor,
- MemoryChunk* chunk,
- PerPageData page_data) {
- LiveObjectIterator<kBlackObjects> it(chunk, page_data.marking_state);
- HeapObject* object = NULL;
- while ((object = it.Next()) != NULL) {
- Map* map = object->map();
- int size = object->SizeFromMap(map);
- object->IterateBody(map->instance_type(), size, visitor);
- }
+// Update array buffers on a page that has been evacuated by copying objects.
+// Target page exclusivity in old space is guaranteed by the fact that
+// evacuation tasks either (a) retrieved a fresh page, or (b) retrieved all
+// free list items of a given page. For new space the tracker will update
+// using a lock.
+class ArrayBufferTrackerUpdatingItem : public UpdatingItem {
+ public:
+ explicit ArrayBufferTrackerUpdatingItem(Page* page) : page_(page) {}
+ virtual ~ArrayBufferTrackerUpdatingItem() {}
+
+ void Process() override {
+ ArrayBufferTracker::ProcessBuffers(
+ page_, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
}
+
+ private:
+ Page* page_;
};
-template <class MarkingStateProvider>
-void UpdateToSpacePointersInParallel(
- Heap* heap, base::Semaphore* semaphore,
- const MarkingStateProvider& marking_state_provider) {
- PageParallelJob<ToSpacePointerUpdateJobTraits> job(
- heap, heap->isolate()->cancelable_task_manager(), semaphore);
- Address space_start = heap->new_space()->bottom();
- Address space_end = heap->new_space()->top();
+int MarkCompactCollectorBase::CollectToSpaceUpdatingItems(
+ ItemParallelJob* job) {
+ // Seed to space pages.
+ const Address space_start = heap()->new_space()->bottom();
+ const Address space_end = heap()->new_space()->top();
+ int pages = 0;
for (Page* page : PageRange(space_start, space_end)) {
Address start =
page->Contains(space_start) ? space_start : page->area_start();
Address end = page->Contains(space_end) ? space_end : page->area_end();
- job.AddPage(page, {start, end, marking_state_provider.marking_state(page)});
+ job->AddItem(
+ new ToSpaceUpdatingItem(page, start, end, marking_state(page)));
+ pages++;
+ }
+ if (pages == 0) return 0;
+ return NumberOfParallelToSpacePointerUpdateTasks(pages);
+}
+
+int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
+ ItemParallelJob* job, RememberedSetUpdatingMode mode) {
+ int pages = 0;
+ if (mode == RememberedSetUpdatingMode::ALL) {
+ RememberedSet<OLD_TO_OLD>::IterateMemoryChunks(
+ heap(), [this, &job, &pages, mode](MemoryChunk* chunk) {
+ job->AddItem(
+ new RememberedSetUpdatingItem(heap(), this, chunk, mode));
+ pages++;
+ });
+ }
+ RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
+ heap(), [this, &job, &pages, mode](MemoryChunk* chunk) {
+ const bool contains_old_to_old_slots =
+ chunk->slot_set<OLD_TO_OLD>() != nullptr ||
+ chunk->typed_slot_set<OLD_TO_OLD>() != nullptr;
+ if (mode == RememberedSetUpdatingMode::OLD_TO_NEW_ONLY ||
+ !contains_old_to_old_slots) {
+ job->AddItem(
+ new RememberedSetUpdatingItem(heap(), this, chunk, mode));
+ pages++;
+ }
+ });
+ return (pages == 0)
+ ? 0
+ : NumberOfParallelPointerUpdateTasks(pages, old_to_new_slots_);
+}
+
+void MinorMarkCompactCollector::CollectNewSpaceArrayBufferTrackerItems(
+ ItemParallelJob* job) {
+ for (Page* p : new_space_evacuation_pages_) {
+ if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsNewToOld) {
+ job->AddItem(new ArrayBufferTrackerUpdatingItem(p));
+ }
+ }
+}
+
+void MarkCompactCollector::CollectNewSpaceArrayBufferTrackerItems(
+ ItemParallelJob* job) {
+ for (Page* p : new_space_evacuation_pages_) {
+ if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsNewToOld) {
+ job->AddItem(new ArrayBufferTrackerUpdatingItem(p));
+ }
+ }
+}
+
+void MarkCompactCollector::CollectOldSpaceArrayBufferTrackerItems(
+ ItemParallelJob* job) {
+ for (Page* p : old_space_evacuation_pages_) {
+ if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsOldToOld &&
+ p->IsEvacuationCandidate()) {
+ job->AddItem(new ArrayBufferTrackerUpdatingItem(p));
+ }
}
- PointersUpdatingVisitor visitor;
- int num_tasks = FLAG_parallel_pointer_update ? job.NumberOfPages() : 1;
- job.Run(num_tasks, [&visitor](int i) { return &visitor; });
}
void MarkCompactCollector::UpdatePointersAfterEvacuation() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
+ PointersUpdatingVisitor updating_visitor;
+ ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
+ &page_parallel_job_semaphore_);
+
+ CollectNewSpaceArrayBufferTrackerItems(&updating_job);
+ CollectOldSpaceArrayBufferTrackerItems(&updating_job);
+
+ const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
+ const int remembered_set_tasks = CollectRememberedSetUpdatingItems(
+ &updating_job, RememberedSetUpdatingMode::ALL);
+ const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
+ for (int i = 0; i < num_tasks; i++) {
+ updating_job.AddTask(new PointersUpatingTask(isolate()));
+ }
{
TRACE_GC(heap()->tracer(),
- GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
- UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_,
- *this);
- // Update roots.
- PointersUpdatingVisitor updating_visitor;
+ GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
- UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_,
- this);
}
-
{
- Heap* heap = this->heap();
- TRACE_GC(heap->tracer(),
- GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED);
- UpdatePointersInParallel<OLD_TO_OLD>(heap_, &page_parallel_job_semaphore_,
- this);
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS);
+ updating_job.Run();
}
{
@@ -4622,28 +4489,30 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS);
PointersUpdatingVisitor updating_visitor;
+ ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
+ &page_parallel_job_semaphore_);
+
+ CollectNewSpaceArrayBufferTrackerItems(&updating_job);
+ // Create batches of global handles.
+ SeedGlobalHandles<GlobalHandlesUpdatingItem>(isolate()->global_handles(),
+ &updating_job);
+ const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
+ const int remembered_set_tasks = CollectRememberedSetUpdatingItems(
+ &updating_job, RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
+ const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
+ for (int i = 0; i < num_tasks; i++) {
+ updating_job.AddTask(new PointersUpatingTask(isolate()));
+ }
{
TRACE_GC(heap()->tracer(),
- GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
- {
- TRACE_GC(
- heap()->tracer(),
- GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_TOSPACE);
- UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_,
- *this);
- }
- {
- TRACE_GC(heap()->tracer(),
- GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
- heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_MINOR_MC_UPDATE);
- }
- {
- TRACE_GC(heap()->tracer(),
- GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_OLD);
- UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_,
- this);
- }
+ GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
+ heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_MINOR_MC_UPDATE);
+ }
+ {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS);
+ updating_job.Run();
}
{
@@ -4660,18 +4529,56 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
}
}
+void MarkCompactCollector::ReportAbortedEvacuationCandidate(
+ HeapObject* failed_object, Page* page) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+
+ page->SetFlag(Page::COMPACTION_WAS_ABORTED);
+ aborted_evacuation_candidates_.push_back(std::make_pair(failed_object, page));
+}
+
void MarkCompactCollector::PostProcessEvacuationCandidates() {
- int aborted_pages = 0;
+ for (auto object_and_page : aborted_evacuation_candidates_) {
+ HeapObject* failed_object = object_and_page.first;
+ Page* page = object_and_page.second;
+ DCHECK(page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
+ // Aborted compaction page. We have to record slots here, since we
+ // might not have recorded them in first place.
+
+ // Remove outdated slots.
+ RememberedSet<OLD_TO_NEW>::RemoveRange(page, page->address(),
+ failed_object->address(),
+ SlotSet::PREFREE_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
+ failed_object->address());
+ const MarkingState state = marking_state(page);
+ // Recompute live bytes.
+ LiveObjectVisitor::RecomputeLiveBytes(page, state);
+ // Re-record slots.
+ EvacuateRecordOnlyVisitor record_visitor(heap());
+ LiveObjectVisitor::VisitBlackObjectsNoFail(page, state, &record_visitor,
+ LiveObjectVisitor::kKeepMarking);
+ // Fix up array buffers.
+ ArrayBufferTracker::ProcessBuffers(
+ page, ArrayBufferTracker::kUpdateForwardedKeepOthers);
+ }
+ const int aborted_pages =
+ static_cast<int>(aborted_evacuation_candidates_.size());
+ aborted_evacuation_candidates_.clear();
+ int aborted_pages_verified = 0;
for (Page* p : old_space_evacuation_pages_) {
if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
+ // After clearing the evacuation candidate flag the page is again in a
+ // regular state.
p->ClearEvacuationCandidate();
- aborted_pages++;
+ aborted_pages_verified++;
} else {
DCHECK(p->IsEvacuationCandidate());
DCHECK(p->SweepingDone());
p->Unlink();
}
}
+ DCHECK_EQ(aborted_pages_verified, aborted_pages);
if (FLAG_trace_evacuation && (aborted_pages > 0)) {
PrintIsolate(isolate(), "%8.0f ms: evacuation: aborted=%d\n",
isolate()->time_millis_since_init(), aborted_pages);
@@ -4686,7 +4593,7 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
CHECK(p->SweepingDone());
space->ReleasePage(p);
}
- old_space_evacuation_pages_.Rewind(0);
+ old_space_evacuation_pages_.clear();
compacting_ = false;
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
}
@@ -4741,7 +4648,7 @@ int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
{
base::LockGuard<base::Mutex> guard(&mutex_);
- swept_list_[identity].Add(page);
+ swept_list_[identity].push_back(page);
}
return max_freed;
}
@@ -4774,12 +4681,6 @@ Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe(
return page;
}
-void MarkCompactCollector::Sweeper::AddSweepingPageSafe(AllocationSpace space,
- Page* page) {
- base::LockGuard<base::Mutex> guard(&mutex_);
- sweeping_list_[space].push_back(page);
-}
-
void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
space->ClearStats();
@@ -4793,7 +4694,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
if (p->IsEvacuationCandidate()) {
// Will be processed in Evacuate.
- DCHECK(evacuation_candidates_.length() > 0);
+ DCHECK(!evacuation_candidates_.empty());
continue;
}
@@ -4863,11 +4764,6 @@ void MarkCompactCollector::StartSweepSpaces() {
heap_->lo_space()->FreeUnmarkedObjects();
}
-void MarkCompactCollector::Initialize() {
- MarkCompactMarkingVisitor::Initialize();
- IncrementalMarking::Initialize();
-}
-
void MarkCompactCollector::RecordCodeEntrySlot(HeapObject* host, Address slot,
Code* target) {
Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index e32ab4c6f1..937dad1a91 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -6,41 +6,25 @@
#define V8_HEAP_MARK_COMPACT_H_
#include <deque>
+#include <vector>
#include "src/base/bits.h"
-#include "src/base/platform/condition-variable.h"
-#include "src/cancelable-task.h"
-#include "src/heap/concurrent-marking-deque.h"
#include "src/heap/marking.h"
#include "src/heap/sequential-marking-deque.h"
#include "src/heap/spaces.h"
-#include "src/heap/store-buffer.h"
+#include "src/heap/worklist.h"
namespace v8 {
namespace internal {
// Forward declarations.
-class CodeFlusher;
class EvacuationJobTraits;
class HeapObjectVisitor;
-class LocalWorkStealingMarkingDeque;
-class MarkCompactCollector;
-class MinorMarkCompactCollector;
-class MarkingVisitor;
+class ItemParallelJob;
class MigrationObserver;
-template <typename JobTraits>
-class PageParallelJob;
class RecordMigratedSlotVisitor;
-class ThreadLocalTop;
-class WorkStealingMarkingDeque;
class YoungGenerationMarkingVisitor;
-#ifdef V8_CONCURRENT_MARKING
-using MarkingDeque = ConcurrentMarkingDeque;
-#else
-using MarkingDeque = SequentialMarkingDeque;
-#endif
-
class ObjectMarking : public AllStatic {
public:
V8_INLINE static MarkBit MarkBitFrom(HeapObject* obj,
@@ -55,34 +39,34 @@ class ObjectMarking : public AllStatic {
return Marking::Color(ObjectMarking::MarkBitFrom(obj, state));
}
- template <MarkBit::AccessMode access_mode = MarkBit::NON_ATOMIC>
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
V8_INLINE static bool IsImpossible(HeapObject* obj,
const MarkingState& state) {
return Marking::IsImpossible<access_mode>(MarkBitFrom(obj, state));
}
- template <MarkBit::AccessMode access_mode = MarkBit::NON_ATOMIC>
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
V8_INLINE static bool IsBlack(HeapObject* obj, const MarkingState& state) {
return Marking::IsBlack<access_mode>(MarkBitFrom(obj, state));
}
- template <MarkBit::AccessMode access_mode = MarkBit::NON_ATOMIC>
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
V8_INLINE static bool IsWhite(HeapObject* obj, const MarkingState& state) {
return Marking::IsWhite<access_mode>(MarkBitFrom(obj, state));
}
- template <MarkBit::AccessMode access_mode = MarkBit::NON_ATOMIC>
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
V8_INLINE static bool IsGrey(HeapObject* obj, const MarkingState& state) {
return Marking::IsGrey<access_mode>(MarkBitFrom(obj, state));
}
- template <MarkBit::AccessMode access_mode = MarkBit::NON_ATOMIC>
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
V8_INLINE static bool IsBlackOrGrey(HeapObject* obj,
const MarkingState& state) {
return Marking::IsBlackOrGrey<access_mode>(MarkBitFrom(obj, state));
}
- template <MarkBit::AccessMode access_mode = MarkBit::NON_ATOMIC>
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
V8_INLINE static bool BlackToGrey(HeapObject* obj,
const MarkingState& state) {
MarkBit markbit = MarkBitFrom(obj, state);
@@ -91,20 +75,20 @@ class ObjectMarking : public AllStatic {
return true;
}
- template <MarkBit::AccessMode access_mode = MarkBit::NON_ATOMIC>
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
V8_INLINE static bool WhiteToGrey(HeapObject* obj,
const MarkingState& state) {
return Marking::WhiteToGrey<access_mode>(MarkBitFrom(obj, state));
}
- template <MarkBit::AccessMode access_mode = MarkBit::NON_ATOMIC>
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
V8_INLINE static bool WhiteToBlack(HeapObject* obj,
const MarkingState& state) {
return ObjectMarking::WhiteToGrey<access_mode>(obj, state) &&
ObjectMarking::GreyToBlack<access_mode>(obj, state);
}
- template <MarkBit::AccessMode access_mode = MarkBit::NON_ATOMIC>
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
V8_INLINE static bool GreyToBlack(HeapObject* obj,
const MarkingState& state) {
MarkBit markbit = MarkBitFrom(obj, state);
@@ -117,86 +101,34 @@ class ObjectMarking : public AllStatic {
DISALLOW_IMPLICIT_CONSTRUCTORS(ObjectMarking);
};
-// CodeFlusher collects candidates for code flushing during marking and
-// processes those candidates after marking has completed in order to
-// reset those functions referencing code objects that would otherwise
-// be unreachable. Code objects can be referenced in two ways:
-// - SharedFunctionInfo references unoptimized code.
-// - JSFunction references either unoptimized or optimized code.
-// We are not allowed to flush unoptimized code for functions that got
-// optimized or inlined into optimized code, because we might bailout
-// into the unoptimized code again during deoptimization.
-class CodeFlusher {
- public:
- explicit CodeFlusher(Isolate* isolate)
- : isolate_(isolate),
- jsfunction_candidates_head_(nullptr),
- shared_function_info_candidates_head_(nullptr) {}
-
- inline void AddCandidate(SharedFunctionInfo* shared_info);
- inline void AddCandidate(JSFunction* function);
-
- void EvictCandidate(SharedFunctionInfo* shared_info);
- void EvictCandidate(JSFunction* function);
-
- void ProcessCandidates() {
- ProcessSharedFunctionInfoCandidates();
- ProcessJSFunctionCandidates();
- }
-
- inline void VisitListHeads(RootVisitor* v);
-
- template <typename StaticVisitor>
- inline void IteratePointersToFromSpace();
-
- private:
- void ProcessJSFunctionCandidates();
- void ProcessSharedFunctionInfoCandidates();
-
- static inline JSFunction** GetNextCandidateSlot(JSFunction* candidate);
- static inline JSFunction* GetNextCandidate(JSFunction* candidate);
- static inline void SetNextCandidate(JSFunction* candidate,
- JSFunction* next_candidate);
- static inline void ClearNextCandidate(JSFunction* candidate,
- Object* undefined);
-
- static inline SharedFunctionInfo* GetNextCandidate(
- SharedFunctionInfo* candidate);
- static inline void SetNextCandidate(SharedFunctionInfo* candidate,
- SharedFunctionInfo* next_candidate);
- static inline void ClearNextCandidate(SharedFunctionInfo* candidate);
-
- Isolate* isolate_;
- JSFunction* jsfunction_candidates_head_;
- SharedFunctionInfo* shared_function_info_candidates_head_;
-
- DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
-};
-
-class MarkBitCellIterator BASE_EMBEDDED {
+class MarkBitCellIterator {
public:
MarkBitCellIterator(MemoryChunk* chunk, MarkingState state) : chunk_(chunk) {
- last_cell_index_ = Bitmap::IndexToCell(Bitmap::CellAlignIndex(
+ DCHECK(Bitmap::IsCellAligned(
+ chunk_->AddressToMarkbitIndex(chunk_->area_start())));
+ DCHECK(Bitmap::IsCellAligned(
chunk_->AddressToMarkbitIndex(chunk_->area_end())));
+ last_cell_index_ =
+ Bitmap::IndexToCell(chunk_->AddressToMarkbitIndex(chunk_->area_end()));
cell_base_ = chunk_->area_start();
- cell_index_ = Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(cell_base_)));
+ cell_index_ =
+ Bitmap::IndexToCell(chunk_->AddressToMarkbitIndex(cell_base_));
cells_ = state.bitmap()->cells();
}
- inline bool Done() { return cell_index_ == last_cell_index_; }
+ inline bool Done() { return cell_index_ >= last_cell_index_; }
inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
inline MarkBit::CellType* CurrentCell() {
- DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
- chunk_->AddressToMarkbitIndex(cell_base_))));
+ DCHECK_EQ(cell_index_, Bitmap::IndexToCell(Bitmap::CellAlignIndex(
+ chunk_->AddressToMarkbitIndex(cell_base_))));
return &cells_[cell_index_];
}
inline Address CurrentCellBase() {
- DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
- chunk_->AddressToMarkbitIndex(cell_base_))));
+ DCHECK_EQ(cell_index_, Bitmap::IndexToCell(Bitmap::CellAlignIndex(
+ chunk_->AddressToMarkbitIndex(cell_base_))));
return cell_base_;
}
@@ -233,55 +165,105 @@ class MarkBitCellIterator BASE_EMBEDDED {
Address cell_base_;
};
-// Grey objects can happen on black pages when black objects transition to
-// grey e.g. when calling RecordWrites on them.
enum LiveObjectIterationMode {
kBlackObjects,
kGreyObjects,
kAllLiveObjects
};
-template <LiveObjectIterationMode T>
-class LiveObjectIterator BASE_EMBEDDED {
+template <LiveObjectIterationMode mode>
+class LiveObjectRange {
public:
- LiveObjectIterator(MemoryChunk* chunk, MarkingState state)
+ class iterator {
+ public:
+ using value_type = std::pair<HeapObject*, int /* size */>;
+ using pointer = const value_type*;
+ using reference = const value_type&;
+ using iterator_category = std::forward_iterator_tag;
+
+ inline iterator(MemoryChunk* chunk, MarkingState state, Address start);
+
+ inline iterator& operator++();
+ inline iterator operator++(int);
+
+ bool operator==(iterator other) const {
+ return current_object_ == other.current_object_;
+ }
+
+ bool operator!=(iterator other) const { return !(*this == other); }
+
+ value_type operator*() {
+ return std::make_pair(current_object_, current_size_);
+ }
+
+ private:
+ inline void AdvanceToNextValidObject();
+
+ MemoryChunk* const chunk_;
+ Map* const one_word_filler_map_;
+ Map* const two_word_filler_map_;
+ Map* const free_space_map_;
+ MarkBitCellIterator it_;
+ Address cell_base_;
+ MarkBit::CellType current_cell_;
+ HeapObject* current_object_;
+ int current_size_;
+ };
+
+ LiveObjectRange(MemoryChunk* chunk, MarkingState state)
: chunk_(chunk),
- it_(chunk_, state),
- cell_base_(it_.CurrentCellBase()),
- current_cell_(*it_.CurrentCell()) {}
+ state_(state),
+ start_(chunk_->area_start()),
+ end_(chunk->area_end()) {}
- HeapObject* Next();
+ inline iterator begin();
+ inline iterator end();
private:
- inline Heap* heap() { return chunk_->heap(); }
-
- MemoryChunk* chunk_;
- MarkBitCellIterator it_;
- Address cell_base_;
- MarkBit::CellType current_cell_;
+ MemoryChunk* const chunk_;
+ MarkingState state_;
+ Address start_;
+ Address end_;
};
-class LiveObjectVisitor BASE_EMBEDDED {
+class LiveObjectVisitor : AllStatic {
public:
enum IterationMode {
kKeepMarking,
kClearMarkbits,
};
- // Visits black objects on a MemoryChunk until the Visitor returns for an
- // object. If IterationMode::kClearMarkbits is passed the markbits and slots
- // for visited objects are cleared for each successfully visited object.
+ // Visits black objects on a MemoryChunk until the Visitor returns |false| for
+ // an object. If IterationMode::kClearMarkbits is passed the markbits and
+ // slots for visited objects are cleared for each successfully visited object.
template <class Visitor>
- bool VisitBlackObjects(MemoryChunk* chunk, const MarkingState& state,
- Visitor* visitor, IterationMode iteration_mode);
+ static bool VisitBlackObjects(MemoryChunk* chunk, const MarkingState& state,
+ Visitor* visitor, IterationMode iteration_mode,
+ HeapObject** failed_object);
- private:
- void RecomputeLiveBytes(MemoryChunk* chunk, const MarkingState& state);
+ // Visits black objects on a MemoryChunk. The visitor is not allowed to fail
+ // visitation for an object.
+ template <class Visitor>
+ static void VisitBlackObjectsNoFail(MemoryChunk* chunk,
+ const MarkingState& state,
+ Visitor* visitor,
+ IterationMode iteration_mode);
+
+ // Visits black objects on a MemoryChunk. The visitor is not allowed to fail
+ // visitation for an object.
+ template <class Visitor>
+ static void VisitGreyObjectsNoFail(MemoryChunk* chunk,
+ const MarkingState& state,
+ Visitor* visitor,
+ IterationMode iteration_mode);
+
+ static void RecomputeLiveBytes(MemoryChunk* chunk, const MarkingState& state);
};
enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD };
enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
enum MarkingTreatmentMode { KEEP, CLEAR };
+enum class RememberedSetUpdatingMode { ALL, OLD_TO_NEW_ONLY };
// Base class for minor and full MC collectors.
class MarkCompactCollectorBase {
@@ -301,14 +283,15 @@ class MarkCompactCollectorBase {
inline Isolate* isolate() { return heap()->isolate(); }
protected:
- explicit MarkCompactCollectorBase(Heap* heap) : heap_(heap) {}
+ explicit MarkCompactCollectorBase(Heap* heap)
+ : heap_(heap), old_to_new_slots_(0) {}
// Marking operations for objects reachable from roots.
virtual void MarkLiveObjects() = 0;
// Mark objects reachable (transitively) from objects in the marking
// stack.
- virtual void EmptyMarkingDeque() = 0;
- virtual void ProcessMarkingDeque() = 0;
+ virtual void EmptyMarkingWorklist() = 0;
+ virtual void ProcessMarkingWorklist() = 0;
// Clear non-live references held in side data structures.
virtual void ClearNonLiveReferences() = 0;
virtual void EvacuatePrologue() = 0;
@@ -317,26 +300,27 @@ class MarkCompactCollectorBase {
virtual void EvacuatePagesInParallel() = 0;
virtual void UpdatePointersAfterEvacuation() = 0;
- // The number of parallel compaction tasks, including the main thread.
- int NumberOfParallelCompactionTasks(int pages, intptr_t live_bytes);
-
template <class Evacuator, class Collector>
void CreateAndExecuteEvacuationTasks(
- Collector* collector, PageParallelJob<EvacuationJobTraits>* job,
+ Collector* collector, ItemParallelJob* job,
RecordMigratedSlotVisitor* record_visitor,
MigrationObserver* migration_observer, const intptr_t live_bytes);
// Returns whether this page should be moved according to heuristics.
bool ShouldMovePage(Page* p, intptr_t live_bytes);
- template <RememberedSetType type>
- void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore,
- const MarkCompactCollectorBase* collector);
+ int CollectToSpaceUpdatingItems(ItemParallelJob* job);
+ int CollectRememberedSetUpdatingItems(ItemParallelJob* job,
+ RememberedSetUpdatingMode mode);
int NumberOfParallelCompactionTasks(int pages);
- int NumberOfPointerUpdateTasks(int pages);
+ int NumberOfParallelPointerUpdateTasks(int pages, int slots);
+ int NumberOfParallelToSpacePointerUpdateTasks(int pages);
Heap* heap_;
+ // Number of old to new slots. Should be computed during MarkLiveObjects.
+ // -1 indicates that the value couldn't be computed.
+ int old_to_new_slots_;
};
// Collector for young-generation only.
@@ -362,24 +346,23 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
void CleanupSweepToIteratePages();
private:
+ using MarkingWorklist = Worklist<HeapObject*, 64 /* segment size */>;
class RootMarkingVisitorSeedOnly;
class RootMarkingVisitor;
- static const int kNumMarkers = 4;
+ static const int kNumMarkers = 8;
static const int kMainMarker = 0;
- inline WorkStealingMarkingDeque* marking_deque() { return marking_deque_; }
+ inline MarkingWorklist* worklist() { return worklist_; }
- inline YoungGenerationMarkingVisitor* marking_visitor(int index) {
- DCHECK_LT(index, kNumMarkers);
- return marking_visitor_[index];
+ inline YoungGenerationMarkingVisitor* main_marking_visitor() {
+ return main_marking_visitor_;
}
- SlotCallbackResult CheckAndMarkObject(Heap* heap, Address slot_address);
void MarkLiveObjects() override;
void MarkRootSetInParallel();
- void ProcessMarkingDeque() override;
- void EmptyMarkingDeque() override;
+ void ProcessMarkingWorklist() override;
+ void EmptyMarkingWorklist() override;
void ClearNonLiveReferences() override;
void EvacuatePrologue() override;
@@ -388,15 +371,16 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
void EvacuatePagesInParallel() override;
void UpdatePointersAfterEvacuation() override;
- int NumberOfMarkingTasks();
+ void CollectNewSpaceArrayBufferTrackerItems(ItemParallelJob* job);
+
+ int NumberOfParallelMarkingTasks(int pages);
- WorkStealingMarkingDeque* marking_deque_;
- YoungGenerationMarkingVisitor* marking_visitor_[kNumMarkers];
+ MarkingWorklist* worklist_;
+ YoungGenerationMarkingVisitor* main_marking_visitor_;
base::Semaphore page_parallel_job_semaphore_;
- List<Page*> new_space_evacuation_pages_;
+ std::vector<Page*> new_space_evacuation_pages_;
std::vector<Page*> sweep_to_iterate_pages_;
- friend class MarkYoungGenerationJobTraits;
friend class YoungGenerationMarkingTask;
friend class YoungGenerationMarkingVisitor;
};
@@ -404,10 +388,111 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
// Collector for young and old generation.
class MarkCompactCollector final : public MarkCompactCollectorBase {
public:
+ // Wrapper for the shared and bailout worklists.
+ class MarkingWorklist {
+ public:
+ using ConcurrentMarkingWorklist = Worklist<HeapObject*, 64>;
+
+ static const int kMainThread = 0;
+ // The heap parameter is not used but needed to match the sequential case.
+ explicit MarkingWorklist(Heap* heap) {}
+
+ bool Push(HeapObject* object) { return shared_.Push(kMainThread, object); }
+
+ bool PushBailout(HeapObject* object) {
+ return bailout_.Push(kMainThread, object);
+ }
+
+ HeapObject* Pop() {
+ HeapObject* result;
+#ifdef V8_CONCURRENT_MARKING
+ if (bailout_.Pop(kMainThread, &result)) return result;
+#endif
+ if (shared_.Pop(kMainThread, &result)) return result;
+ return nullptr;
+ }
+
+ void Clear() {
+ bailout_.Clear();
+ shared_.Clear();
+ }
+
+ bool IsFull() { return false; }
+
+ bool IsEmpty() {
+ return bailout_.IsLocalEmpty(kMainThread) &&
+ shared_.IsLocalEmpty(kMainThread) &&
+ bailout_.IsGlobalPoolEmpty() && shared_.IsGlobalPoolEmpty();
+ }
+
+ int Size() {
+ return static_cast<int>(bailout_.LocalSize(kMainThread) +
+ shared_.LocalSize(kMainThread));
+ }
+
+ // Calls the specified callback on each element of the deques and replaces
+ // the element with the result of the callback. If the callback returns
+ // nullptr then the element is removed from the deque.
+ // The callback must accept HeapObject* and return HeapObject*.
+ template <typename Callback>
+ void Update(Callback callback) {
+ bailout_.Update(callback);
+ shared_.Update(callback);
+ }
+
+ ConcurrentMarkingWorklist* shared() { return &shared_; }
+ ConcurrentMarkingWorklist* bailout() { return &bailout_; }
+
+ // These empty functions are needed to match the interface
+ // of the sequential marking deque.
+ void SetUp() {}
+ void TearDown() { Clear(); }
+ void StartUsing() {}
+ void StopUsing() {}
+ void ClearOverflowed() {}
+ void SetOverflowed() {}
+ bool overflowed() const { return false; }
+
+ void Print() {
+ PrintWorklist("shared", &shared_);
+ PrintWorklist("bailout", &bailout_);
+ }
+
+ private:
+ // Prints the stats about the global pool of the worklist.
+ void PrintWorklist(const char* worklist_name,
+ ConcurrentMarkingWorklist* worklist) {
+ std::map<InstanceType, int> count;
+ int total_count = 0;
+ worklist->IterateGlobalPool([&count, &total_count](HeapObject* obj) {
+ ++total_count;
+ count[obj->map()->instance_type()]++;
+ });
+ std::vector<std::pair<int, InstanceType>> rank;
+ for (auto i : count) {
+ rank.push_back(std::make_pair(i.second, i.first));
+ }
+ std::map<InstanceType, std::string> instance_type_name;
+#define INSTANCE_TYPE_NAME(name) instance_type_name[name] = #name;
+ INSTANCE_TYPE_LIST(INSTANCE_TYPE_NAME)
+#undef INSTANCE_TYPE_NAME
+ std::sort(rank.begin(), rank.end(),
+ std::greater<std::pair<int, InstanceType>>());
+ PrintF("Worklist %s: %d\n", worklist_name, total_count);
+ for (auto i : rank) {
+ PrintF(" [%s]: %d\n", instance_type_name[i.second].c_str(), i.first);
+ }
+ }
+ ConcurrentMarkingWorklist shared_;
+ ConcurrentMarkingWorklist bailout_;
+ };
+
class RootMarkingVisitor;
class Sweeper {
public:
+ class SweeperTask;
+
enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
enum ClearOldToNewSlotsMode {
DO_NOT_CLEAR,
@@ -416,15 +501,15 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
};
typedef std::deque<Page*> SweepingList;
- typedef List<Page*> SweptList;
+ typedef std::vector<Page*> SweptList;
static int RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
FreeSpaceTreatmentMode free_space_mode);
explicit Sweeper(Heap* heap)
: heap_(heap),
- num_tasks_(0),
pending_sweeper_tasks_semaphore_(0),
+ semaphore_counter_(0),
sweeping_in_progress_(false),
num_sweeping_tasks_(0) {}
@@ -450,10 +535,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
Page* GetSweptPageSafe(PagedSpace* space);
private:
- class SweeperTask;
-
static const int kAllocationSpaces = LAST_PAGED_SPACE + 1;
- static const int kMaxSweeperTasks = kAllocationSpaces;
static ClearOldToNewSlotsMode GetClearOldToNewSlotsMode(Page* p);
@@ -465,14 +547,13 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
}
Page* GetSweepingPageSafe(AllocationSpace space);
- void AddSweepingPageSafe(AllocationSpace space, Page* page);
void PrepareToBeSweptPage(AllocationSpace space, Page* page);
- Heap* const heap_;
- int num_tasks_;
- CancelableTaskManager::Id task_ids_[kMaxSweeperTasks];
+ Heap* heap_;
base::Semaphore pending_sweeper_tasks_semaphore_;
+ // Counter is only used for waiting on the semaphore.
+ intptr_t semaphore_counter_;
base::Mutex mutex_;
SweptList swept_list_[kAllocationSpaces];
SweepingList sweeping_list_[kAllocationSpaces];
@@ -487,8 +568,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
kClearMarkbits,
};
- static void Initialize();
-
MarkingState marking_state(HeapObject* object) const override {
return MarkingState::Internal(object);
}
@@ -514,9 +593,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void AbortCompaction();
- CodeFlusher* code_flusher() { return code_flusher_; }
- inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
-
INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
return Page::FromAddress(reinterpret_cast<Address>(host))
->ShouldSkipEvacuationSlotRecording();
@@ -562,7 +638,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
bool evacuation() const { return evacuation_; }
- MarkingDeque* marking_deque() { return &marking_deque_; }
+ MarkingWorklist* marking_worklist() { return &marking_worklist_; }
Sweeper& sweeper() { return sweeper_; }
@@ -597,22 +673,12 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Finishes GC, performs heap verification if enabled.
void Finish();
- // Mark code objects that are active on the stack to prevent them
- // from being flushed.
- void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
-
- void PrepareForCodeFlushing();
-
void MarkLiveObjects() override;
// Pushes a black object onto the marking stack and accounts for live bytes.
// Note that this assumes live bytes have not yet been counted.
V8_INLINE void PushBlack(HeapObject* obj);
- // Unshifts a black object into the marking stack and accounts for live bytes.
- // Note that this assumes lives bytes have already been counted.
- V8_INLINE void UnshiftBlack(HeapObject* obj);
-
// Marks the object black and pushes it on the marking stack.
// This is for non-incremental marking only.
V8_INLINE void MarkObject(HeapObject* obj);
@@ -624,7 +690,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// the string table are weak.
void MarkStringTable(RootMarkingVisitor* visitor);
- void ProcessMarkingDeque() override;
+ void ProcessMarkingWorklist() override;
// Mark objects reachable (transitively) from objects in the marking stack
// or overflowed in the heap. This respects references only considered in
@@ -644,15 +710,15 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// This function empties the marking stack, but may leave overflowed objects
// in the heap, in which case the marking stack's overflow flag will be set.
- void EmptyMarkingDeque() override;
+ void EmptyMarkingWorklist() override;
// Refill the marking stack with overflowed objects from the heap. This
// function either leaves the marking stack full or clears the overflow
// flag on the marking stack.
- void RefillMarkingDeque();
+ void RefillMarkingWorklist();
// Helper methods for refilling the marking stack by discovering grey objects
- // on various pages of the heap. Used by {RefillMarkingDeque} only.
+ // on various pages of the heap. Used by {RefillMarkingWorklist} only.
template <class T>
void DiscoverGreyObjectsWithIterator(T* it);
void DiscoverGreyObjectsOnPage(MemoryChunk* p);
@@ -710,9 +776,14 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void EvacuatePagesInParallel() override;
void UpdatePointersAfterEvacuation() override;
+ void CollectNewSpaceArrayBufferTrackerItems(ItemParallelJob* job);
+ void CollectOldSpaceArrayBufferTrackerItems(ItemParallelJob* job);
+
void ReleaseEvacuationCandidates();
void PostProcessEvacuationCandidates();
+ void ReportAbortedEvacuationCandidate(HeapObject* failed_object, Page* page);
+ base::Mutex mutex_;
base::Semaphore page_parallel_job_semaphore_;
#ifdef DEBUG
@@ -742,29 +813,25 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
bool have_code_to_deoptimize_;
- MarkingDeque marking_deque_;
-
- CodeFlusher* code_flusher_;
+ MarkingWorklist marking_worklist_;
// Candidates for pages that should be evacuated.
- List<Page*> evacuation_candidates_;
+ std::vector<Page*> evacuation_candidates_;
// Pages that are actually processed during evacuation.
- List<Page*> old_space_evacuation_pages_;
- List<Page*> new_space_evacuation_pages_;
+ std::vector<Page*> old_space_evacuation_pages_;
+ std::vector<Page*> new_space_evacuation_pages_;
+ std::vector<std::pair<HeapObject*, Page*>> aborted_evacuation_candidates_;
Sweeper sweeper_;
- friend class CodeMarkingVisitor;
+ friend class FullEvacuator;
friend class Heap;
friend class IncrementalMarkingMarkingVisitor;
friend class MarkCompactMarkingVisitor;
- friend class MarkingVisitor;
friend class RecordMigratedSlotVisitor;
- friend class SharedFunctionInfoMarkingVisitor;
- friend class StoreBuffer;
};
-class EvacuationScope BASE_EMBEDDED {
+class EvacuationScope {
public:
explicit EvacuationScope(MarkCompactCollector* collector)
: collector_(collector) {
diff --git a/deps/v8/src/heap/marking.cc b/deps/v8/src/heap/marking.cc
new file mode 100644
index 0000000000..eef3d0a59f
--- /dev/null
+++ b/deps/v8/src/heap/marking.cc
@@ -0,0 +1,201 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/marking.h"
+
+namespace v8 {
+namespace internal {
+
+void Bitmap::Clear() {
+ base::Atomic32* cell_base = reinterpret_cast<base::Atomic32*>(cells());
+ for (int i = 0; i < CellsCount(); i++) {
+ base::Relaxed_Store(cell_base + i, 0);
+ }
+ // This fence prevents re-ordering of publishing stores with the mark-bit
+ // clearing stores.
+ base::MemoryFence();
+}
+
+void Bitmap::SetRange(uint32_t start_index, uint32_t end_index) {
+ unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
+ unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
+ if (start_cell_index != end_cell_index) {
+ // Firstly, fill all bits from the start address to the end of the first
+ // cell with 1s.
+ SetBitsInCell<AccessMode::ATOMIC>(start_cell_index,
+ ~(start_index_mask - 1));
+ // Then fill all in between cells with 1s.
+ base::Atomic32* cell_base = reinterpret_cast<base::Atomic32*>(cells());
+ for (unsigned int i = start_cell_index + 1; i < end_cell_index; i++) {
+ base::Relaxed_Store(cell_base + i, ~0u);
+ }
+ // Finally, fill all bits until the end address in the last cell with 1s.
+ SetBitsInCell<AccessMode::ATOMIC>(end_cell_index, (end_index_mask - 1));
+ } else {
+ SetBitsInCell<AccessMode::ATOMIC>(start_cell_index,
+ end_index_mask - start_index_mask);
+ }
+ // This fence prevents re-ordering of publishing stores with the mark-
+ // bit setting stores.
+ base::MemoryFence();
+}
+
+void Bitmap::ClearRange(uint32_t start_index, uint32_t end_index) {
+ unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
+
+ unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
+
+ if (start_cell_index != end_cell_index) {
+ // Firstly, fill all bits from the start address to the end of the first
+ // cell with 0s.
+ ClearBitsInCell<AccessMode::ATOMIC>(start_cell_index,
+ ~(start_index_mask - 1));
+ // Then fill all in between cells with 0s.
+ base::Atomic32* cell_base = reinterpret_cast<base::Atomic32*>(cells());
+ for (unsigned int i = start_cell_index + 1; i < end_cell_index; i++) {
+ base::Relaxed_Store(cell_base + i, 0);
+ }
+ // Finally, set all bits until the end address in the last cell with 0s.
+ ClearBitsInCell<AccessMode::ATOMIC>(end_cell_index, (end_index_mask - 1));
+ } else {
+ ClearBitsInCell<AccessMode::ATOMIC>(start_cell_index,
+ (end_index_mask - start_index_mask));
+ }
+ // This fence prevents re-ordering of publishing stores with the mark-
+ // bit clearing stores.
+ base::MemoryFence();
+}
+
+bool Bitmap::AllBitsSetInRange(uint32_t start_index, uint32_t end_index) {
+ unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
+
+ unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
+
+ MarkBit::CellType matching_mask;
+ if (start_cell_index != end_cell_index) {
+ matching_mask = ~(start_index_mask - 1);
+ if ((cells()[start_cell_index] & matching_mask) != matching_mask) {
+ return false;
+ }
+ for (unsigned int i = start_cell_index + 1; i < end_cell_index; i++) {
+ if (cells()[i] != ~0u) return false;
+ }
+ matching_mask = (end_index_mask - 1);
+ // Check against a mask of 0 to avoid dereferencing the cell after the
+ // end of the bitmap.
+ return (matching_mask == 0) ||
+ ((cells()[end_cell_index] & matching_mask) == matching_mask);
+ } else {
+ matching_mask = end_index_mask - start_index_mask;
+ // Check against a mask of 0 to avoid dereferencing the cell after the
+ // end of the bitmap.
+ return (matching_mask == 0) ||
+ (cells()[end_cell_index] & matching_mask) == matching_mask;
+ }
+}
+
+bool Bitmap::AllBitsClearInRange(uint32_t start_index, uint32_t end_index) {
+ unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
+
+ unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
+
+ MarkBit::CellType matching_mask;
+ if (start_cell_index != end_cell_index) {
+ matching_mask = ~(start_index_mask - 1);
+ if ((cells()[start_cell_index] & matching_mask)) return false;
+ for (unsigned int i = start_cell_index + 1; i < end_cell_index; i++) {
+ if (cells()[i]) return false;
+ }
+ matching_mask = (end_index_mask - 1);
+ // Check against a mask of 0 to avoid dereferencing the cell after the
+ // end of the bitmap.
+ return (matching_mask == 0) || !(cells()[end_cell_index] & matching_mask);
+ } else {
+ matching_mask = end_index_mask - start_index_mask;
+ // Check against a mask of 0 to avoid dereferencing the cell after the
+ // end of the bitmap.
+ return (matching_mask == 0) || !(cells()[end_cell_index] & matching_mask);
+ }
+}
+
+namespace {
+
+void PrintWord(uint32_t word, uint32_t himask = 0) {
+ for (uint32_t mask = 1; mask != 0; mask <<= 1) {
+ if ((mask & himask) != 0) PrintF("[");
+ PrintF((mask & word) ? "1" : "0");
+ if ((mask & himask) != 0) PrintF("]");
+ }
+}
+
+class CellPrinter {
+ public:
+ CellPrinter() : seq_start(0), seq_type(0), seq_length(0) {}
+
+ void Print(uint32_t pos, uint32_t cell) {
+ if (cell == seq_type) {
+ seq_length++;
+ return;
+ }
+
+ Flush();
+
+ if (IsSeq(cell)) {
+ seq_start = pos;
+ seq_length = 0;
+ seq_type = cell;
+ return;
+ }
+
+ PrintF("%d: ", pos);
+ PrintWord(cell);
+ PrintF("\n");
+ }
+
+ void Flush() {
+ if (seq_length > 0) {
+ PrintF("%d: %dx%d\n", seq_start, seq_type == 0 ? 0 : 1,
+ seq_length * Bitmap::kBitsPerCell);
+ seq_length = 0;
+ }
+ }
+
+ static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
+
+ private:
+ uint32_t seq_start;
+ uint32_t seq_type;
+ uint32_t seq_length;
+};
+
+} // anonymous namespace
+
+void Bitmap::Print() {
+ CellPrinter printer;
+ for (int i = 0; i < CellsCount(); i++) {
+ printer.Print(i, cells()[i]);
+ }
+ printer.Flush();
+ PrintF("\n");
+}
+
+bool Bitmap::IsClean() {
+ for (int i = 0; i < CellsCount(); i++) {
+ if (cells()[i] != 0) {
+ return false;
+ }
+ }
+ return true;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/marking.h b/deps/v8/src/heap/marking.h
index ab98a124bc..c76302218f 100644
--- a/deps/v8/src/heap/marking.h
+++ b/deps/v8/src/heap/marking.h
@@ -16,11 +16,7 @@ class MarkBit {
typedef uint32_t CellType;
STATIC_ASSERT(sizeof(CellType) == sizeof(base::Atomic32));
- enum AccessMode { ATOMIC, NON_ATOMIC };
-
- inline MarkBit(base::Atomic32* cell, CellType mask) : cell_(cell) {
- mask_ = static_cast<base::Atomic32>(mask);
- }
+ inline MarkBit(CellType* cell, CellType mask) : cell_(cell), mask_(mask) {}
#ifdef DEBUG
bool operator==(const MarkBit& other) {
@@ -40,19 +36,19 @@ class MarkBit {
// The function returns true if it succeeded to
// transition the bit from 0 to 1.
- template <AccessMode mode = NON_ATOMIC>
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
inline bool Set();
- template <AccessMode mode = NON_ATOMIC>
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
inline bool Get();
// The function returns true if it succeeded to
// transition the bit from 1 to 0.
- template <AccessMode mode = NON_ATOMIC>
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
inline bool Clear();
- base::Atomic32* cell_;
- base::Atomic32 mask_;
+ CellType* cell_;
+ CellType mask_;
friend class IncrementalMarking;
friend class ConcurrentMarkingMarkbits;
@@ -60,57 +56,41 @@ class MarkBit {
};
template <>
-inline bool MarkBit::Set<MarkBit::NON_ATOMIC>() {
- base::Atomic32 old_value = *cell_;
+inline bool MarkBit::Set<AccessMode::NON_ATOMIC>() {
+ CellType old_value = *cell_;
*cell_ = old_value | mask_;
return (old_value & mask_) == 0;
}
template <>
-inline bool MarkBit::Set<MarkBit::ATOMIC>() {
- base::Atomic32 old_value;
- base::Atomic32 new_value;
- do {
- old_value = base::NoBarrier_Load(cell_);
- if (old_value & mask_) return false;
- new_value = old_value | mask_;
- } while (base::Release_CompareAndSwap(cell_, old_value, new_value) !=
- old_value);
- return true;
+inline bool MarkBit::Set<AccessMode::ATOMIC>() {
+ return base::AsAtomic32::SetBits(cell_, mask_, mask_);
}
template <>
-inline bool MarkBit::Get<MarkBit::NON_ATOMIC>() {
- return (base::NoBarrier_Load(cell_) & mask_) != 0;
+inline bool MarkBit::Get<AccessMode::NON_ATOMIC>() {
+ return (*cell_ & mask_) != 0;
}
template <>
-inline bool MarkBit::Get<MarkBit::ATOMIC>() {
- return (base::Acquire_Load(cell_) & mask_) != 0;
+inline bool MarkBit::Get<AccessMode::ATOMIC>() {
+ return (base::AsAtomic32::Acquire_Load(cell_) & mask_) != 0;
}
template <>
-inline bool MarkBit::Clear<MarkBit::NON_ATOMIC>() {
- base::Atomic32 old_value = *cell_;
+inline bool MarkBit::Clear<AccessMode::NON_ATOMIC>() {
+ CellType old_value = *cell_;
*cell_ = old_value & ~mask_;
return (old_value & mask_) == mask_;
}
template <>
-inline bool MarkBit::Clear<MarkBit::ATOMIC>() {
- base::Atomic32 old_value;
- base::Atomic32 new_value;
- do {
- old_value = base::NoBarrier_Load(cell_);
- if (!(old_value & mask_)) return false;
- new_value = old_value & ~mask_;
- } while (base::Release_CompareAndSwap(cell_, old_value, new_value) !=
- old_value);
- return true;
+inline bool MarkBit::Clear<AccessMode::ATOMIC>() {
+ return base::AsAtomic32::SetBits(cell_, 0u, mask_);
}
// Bitmap is a sequence of cells each containing fixed number of bits.
-class Bitmap {
+class V8_EXPORT_PRIVATE Bitmap {
public:
static const uint32_t kBitsPerCell = 32;
static const uint32_t kBitsPerCellLog2 = 5;
@@ -129,11 +109,7 @@ class Bitmap {
int CellsCount() { return CellsForLength(kLength); }
- static int SizeFor(int cells_count) {
- return sizeof(MarkBit::CellType) * cells_count;
- }
-
- INLINE(static uint32_t IndexToCell(uint32_t index)) {
+ V8_INLINE static uint32_t IndexToCell(uint32_t index) {
return index >> kBitsPerCellLog2;
}
@@ -141,204 +117,85 @@ class Bitmap {
return index & kBitIndexMask;
}
- INLINE(static uint32_t CellToIndex(uint32_t index)) {
- return index << kBitsPerCellLog2;
+ // Retrieves the cell containing the provided markbit index.
+ V8_INLINE static uint32_t CellAlignIndex(uint32_t index) {
+ return index & ~kBitIndexMask;
}
- INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
- return (index + kBitIndexMask) & ~kBitIndexMask;
+ V8_INLINE static bool IsCellAligned(uint32_t index) {
+ return (index & kBitIndexMask) == 0;
}
- INLINE(MarkBit::CellType* cells()) {
+ V8_INLINE MarkBit::CellType* cells() {
return reinterpret_cast<MarkBit::CellType*>(this);
}
- INLINE(Address address()) { return reinterpret_cast<Address>(this); }
-
- INLINE(static Bitmap* FromAddress(Address addr)) {
+ V8_INLINE static Bitmap* FromAddress(Address addr) {
return reinterpret_cast<Bitmap*>(addr);
}
inline MarkBit MarkBitFromIndex(uint32_t index) {
MarkBit::CellType mask = 1u << IndexInCell(index);
MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
- return MarkBit(reinterpret_cast<base::Atomic32*>(cell), mask);
- }
-
- void Clear() {
- for (int i = 0; i < CellsCount(); i++) cells()[i] = 0;
- }
-
- // Sets all bits in the range [start_index, end_index).
- void SetRange(uint32_t start_index, uint32_t end_index) {
- unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
- MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
-
- unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
- MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
-
- if (start_cell_index != end_cell_index) {
- // Firstly, fill all bits from the start address to the end of the first
- // cell with 1s.
- cells()[start_cell_index] |= ~(start_index_mask - 1);
- // Then fill all in between cells with 1s.
- for (unsigned int i = start_cell_index + 1; i < end_cell_index; i++) {
- cells()[i] = ~0u;
- }
- // Finally, fill all bits until the end address in the last cell with 1s.
- cells()[end_cell_index] |= (end_index_mask - 1);
- } else {
- cells()[start_cell_index] |= end_index_mask - start_index_mask;
- }
+ return MarkBit(cell, mask);
}
- // Clears all bits in the range [start_index, end_index).
- void ClearRange(uint32_t start_index, uint32_t end_index) {
- unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
- MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
-
- unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
- MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
-
- if (start_cell_index != end_cell_index) {
- // Firstly, fill all bits from the start address to the end of the first
- // cell with 0s.
- cells()[start_cell_index] &= (start_index_mask - 1);
- // Then fill all in between cells with 0s.
- for (unsigned int i = start_cell_index + 1; i < end_cell_index; i++) {
- cells()[i] = 0;
- }
- // Finally, set all bits until the end address in the last cell with 0s.
- cells()[end_cell_index] &= ~(end_index_mask - 1);
- } else {
- cells()[start_cell_index] &= ~(end_index_mask - start_index_mask);
- }
- }
+ void Clear();
- // Returns true if all bits in the range [start_index, end_index) are set.
- bool AllBitsSetInRange(uint32_t start_index, uint32_t end_index) {
- unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
- MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
-
- unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
- MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
-
- MarkBit::CellType matching_mask;
- if (start_cell_index != end_cell_index) {
- matching_mask = ~(start_index_mask - 1);
- if ((cells()[start_cell_index] & matching_mask) != matching_mask) {
- return false;
- }
- for (unsigned int i = start_cell_index + 1; i < end_cell_index; i++) {
- if (cells()[i] != ~0u) return false;
- }
- matching_mask = (end_index_mask - 1);
- // Check against a mask of 0 to avoid dereferencing the cell after the
- // end of the bitmap.
- return (matching_mask == 0) ||
- ((cells()[end_cell_index] & matching_mask) == matching_mask);
- } else {
- matching_mask = end_index_mask - start_index_mask;
- // Check against a mask of 0 to avoid dereferencing the cell after the
- // end of the bitmap.
- return (matching_mask == 0) ||
- (cells()[end_cell_index] & matching_mask) == matching_mask;
- }
- }
-
- // Returns true if all bits in the range [start_index, end_index) are cleared.
- bool AllBitsClearInRange(uint32_t start_index, uint32_t end_index) {
- unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
- MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
-
- unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
- MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
-
- MarkBit::CellType matching_mask;
- if (start_cell_index != end_cell_index) {
- matching_mask = ~(start_index_mask - 1);
- if ((cells()[start_cell_index] & matching_mask)) return false;
- for (unsigned int i = start_cell_index + 1; i < end_cell_index; i++) {
- if (cells()[i]) return false;
- }
- matching_mask = (end_index_mask - 1);
- // Check against a mask of 0 to avoid dereferencing the cell after the
- // end of the bitmap.
- return (matching_mask == 0) || !(cells()[end_cell_index] & matching_mask);
- } else {
- matching_mask = end_index_mask - start_index_mask;
- // Check against a mask of 0 to avoid dereferencing the cell after the
- // end of the bitmap.
- return (matching_mask == 0) || !(cells()[end_cell_index] & matching_mask);
- }
- }
+ // Clears bits in the given cell. The mask specifies bits to clear: if a
+ // bit is set in the mask then the corresponding bit is cleared in the cell.
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
+ void ClearBitsInCell(uint32_t cell_index, uint32_t mask);
- static void PrintWord(uint32_t word, uint32_t himask = 0) {
- for (uint32_t mask = 1; mask != 0; mask <<= 1) {
- if ((mask & himask) != 0) PrintF("[");
- PrintF((mask & word) ? "1" : "0");
- if ((mask & himask) != 0) PrintF("]");
- }
- }
+ // Sets bits in the given cell. The mask specifies bits to set: if a
+ // bit is set in the mask then the corresponding bit is set in the cell.
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
+ void SetBitsInCell(uint32_t cell_index, uint32_t mask);
- class CellPrinter {
- public:
- CellPrinter() : seq_start(0), seq_type(0), seq_length(0) {}
+ // Sets all bits in the range [start_index, end_index). The cells at the
+ // boundary of the range are updated with atomic compare and swap operation.
+ // The inner cells are updated with relaxed write.
+ void SetRange(uint32_t start_index, uint32_t end_index);
- void Print(uint32_t pos, uint32_t cell) {
- if (cell == seq_type) {
- seq_length++;
- return;
- }
+ // Clears all bits in the range [start_index, end_index). The cells at the
+ // boundary of the range are updated with atomic compare and swap operation.
+ // The inner cells are updated with relaxed write.
+ void ClearRange(uint32_t start_index, uint32_t end_index);
- Flush();
+ // Returns true if all bits in the range [start_index, end_index) are set.
+ bool AllBitsSetInRange(uint32_t start_index, uint32_t end_index);
- if (IsSeq(cell)) {
- seq_start = pos;
- seq_length = 0;
- seq_type = cell;
- return;
- }
+ // Returns true if all bits in the range [start_index, end_index) are cleared.
+ bool AllBitsClearInRange(uint32_t start_index, uint32_t end_index);
- PrintF("%d: ", pos);
- PrintWord(cell);
- PrintF("\n");
- }
+ void Print();
- void Flush() {
- if (seq_length > 0) {
- PrintF("%d: %dx%d\n", seq_start, seq_type == 0 ? 0 : 1,
- seq_length * kBitsPerCell);
- seq_length = 0;
- }
- }
+ bool IsClean();
+};
- static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
+template <>
+inline void Bitmap::SetBitsInCell<AccessMode::NON_ATOMIC>(uint32_t cell_index,
+ uint32_t mask) {
+ cells()[cell_index] |= mask;
+}
- private:
- uint32_t seq_start;
- uint32_t seq_type;
- uint32_t seq_length;
- };
+template <>
+inline void Bitmap::SetBitsInCell<AccessMode::ATOMIC>(uint32_t cell_index,
+ uint32_t mask) {
+ base::AsAtomic32::SetBits(cells() + cell_index, mask, mask);
+}
- void Print() {
- CellPrinter printer;
- for (int i = 0; i < CellsCount(); i++) {
- printer.Print(i, cells()[i]);
- }
- printer.Flush();
- PrintF("\n");
- }
+template <>
+inline void Bitmap::ClearBitsInCell<AccessMode::NON_ATOMIC>(uint32_t cell_index,
+ uint32_t mask) {
+ cells()[cell_index] &= ~mask;
+}
- bool IsClean() {
- for (int i = 0; i < CellsCount(); i++) {
- if (cells()[i] != 0) {
- return false;
- }
- }
- return true;
- }
-};
+template <>
+inline void Bitmap::ClearBitsInCell<AccessMode::ATOMIC>(uint32_t cell_index,
+ uint32_t mask) {
+ base::AsAtomic32::SetBits(cells() + cell_index, 0u, mask);
+}
class Marking : public AllStatic {
public:
@@ -348,9 +205,9 @@ class Marking : public AllStatic {
// Impossible markbits: 01
static const char* kImpossibleBitPattern;
- template <MarkBit::AccessMode mode = MarkBit::NON_ATOMIC>
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
INLINE(static bool IsImpossible(MarkBit mark_bit)) {
- if (mode == MarkBit::NON_ATOMIC) {
+ if (mode == AccessMode::NON_ATOMIC) {
return !mark_bit.Get<mode>() && mark_bit.Next().Get<mode>();
}
// If we are in concurrent mode we can only tell if an object has the
@@ -366,36 +223,36 @@ class Marking : public AllStatic {
// Black markbits: 11
static const char* kBlackBitPattern;
- template <MarkBit::AccessMode mode = MarkBit::NON_ATOMIC>
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
INLINE(static bool IsBlack(MarkBit mark_bit)) {
return mark_bit.Get<mode>() && mark_bit.Next().Get<mode>();
}
// White markbits: 00 - this is required by the mark bit clearer.
static const char* kWhiteBitPattern;
- template <MarkBit::AccessMode mode = MarkBit::NON_ATOMIC>
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
INLINE(static bool IsWhite(MarkBit mark_bit)) {
- DCHECK(!IsImpossible(mark_bit));
+ DCHECK(!IsImpossible<mode>(mark_bit));
return !mark_bit.Get<mode>();
}
// Grey markbits: 10
static const char* kGreyBitPattern;
- template <MarkBit::AccessMode mode = MarkBit::NON_ATOMIC>
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
INLINE(static bool IsGrey(MarkBit mark_bit)) {
return mark_bit.Get<mode>() && !mark_bit.Next().Get<mode>();
}
// IsBlackOrGrey assumes that the first bit is set for black or grey
// objects.
- template <MarkBit::AccessMode mode = MarkBit::NON_ATOMIC>
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
INLINE(static bool IsBlackOrGrey(MarkBit mark_bit)) {
return mark_bit.Get<mode>();
}
- template <MarkBit::AccessMode mode = MarkBit::NON_ATOMIC>
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
INLINE(static void MarkWhite(MarkBit markbit)) {
- STATIC_ASSERT(mode == MarkBit::NON_ATOMIC);
+ STATIC_ASSERT(mode == AccessMode::NON_ATOMIC);
markbit.Clear<mode>();
markbit.Next().Clear<mode>();
}
@@ -403,30 +260,30 @@ class Marking : public AllStatic {
// Warning: this method is not safe in general in concurrent scenarios.
// If you know that nobody else will change the bits on the given location
// then you may use it.
- template <MarkBit::AccessMode mode = MarkBit::NON_ATOMIC>
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
INLINE(static void MarkBlack(MarkBit markbit)) {
markbit.Set<mode>();
markbit.Next().Set<mode>();
}
- template <MarkBit::AccessMode mode = MarkBit::NON_ATOMIC>
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
INLINE(static bool BlackToGrey(MarkBit markbit)) {
- STATIC_ASSERT(mode == MarkBit::NON_ATOMIC);
+ STATIC_ASSERT(mode == AccessMode::NON_ATOMIC);
DCHECK(IsBlack(markbit));
return markbit.Next().Clear<mode>();
}
- template <MarkBit::AccessMode mode = MarkBit::NON_ATOMIC>
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
INLINE(static bool WhiteToGrey(MarkBit markbit)) {
return markbit.Set<mode>();
}
- template <MarkBit::AccessMode mode = MarkBit::NON_ATOMIC>
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
INLINE(static bool WhiteToBlack(MarkBit markbit)) {
return markbit.Set<mode>() && markbit.Next().Set<mode>();
}
- template <MarkBit::AccessMode mode = MarkBit::NON_ATOMIC>
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
INLINE(static bool GreyToBlack(MarkBit markbit)) {
return markbit.Get<mode>() && markbit.Next().Set<mode>();
}
@@ -457,7 +314,6 @@ class Marking : public AllStatic {
if (IsWhite(mark_bit)) return WHITE_OBJECT;
if (IsGrey(mark_bit)) return GREY_OBJECT;
UNREACHABLE();
- return IMPOSSIBLE_COLOR;
}
private:
diff --git a/deps/v8/src/heap/memory-reducer.cc b/deps/v8/src/heap/memory-reducer.cc
index 46b7b576d2..0e1449bb92 100644
--- a/deps/v8/src/heap/memory-reducer.cc
+++ b/deps/v8/src/heap/memory-reducer.cc
@@ -197,7 +197,6 @@ MemoryReducer::State MemoryReducer::Step(const State& state,
}
}
UNREACHABLE();
- return State(kDone, 0, 0, 0.0, 0); // Make the compiler happy.
}
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index a9f50cdfbf..66c864b945 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -320,7 +320,7 @@ void ObjectStatsCollector::CollectGlobalStatistics() {
OBJECT_TO_CODE_SUB_TYPE);
RecordHashTableHelper(nullptr, heap_->code_stubs(),
CODE_STUBS_TABLE_SUB_TYPE);
- RecordHashTableHelper(nullptr, heap_->empty_properties_dictionary(),
+ RecordHashTableHelper(nullptr, heap_->empty_property_dictionary(),
EMPTY_PROPERTIES_DICTIONARY_SUB_TYPE);
CompilationCache* compilation_cache = heap_->isolate()->compilation_cache();
CompilationCacheTableVisitor v(this);
@@ -335,7 +335,7 @@ static bool CanRecordFixedArray(Heap* heap, FixedArrayBase* array) {
array != heap->empty_sloppy_arguments_elements() &&
array != heap->empty_slow_element_dictionary() &&
array != heap->empty_descriptor_array() &&
- array != heap->empty_properties_dictionary();
+ array != heap->empty_property_dictionary();
}
static bool IsCowArray(Heap* heap, FixedArrayBase* array) {
@@ -393,27 +393,29 @@ void ObjectStatsCollector::RecordJSObjectDetails(JSObject* object) {
SeededNumberDictionary* dict = SeededNumberDictionary::cast(elements);
RecordHashTableHelper(object, dict, DICTIONARY_ELEMENTS_SUB_TYPE);
} else {
- if (IsFastHoleyElementsKind(object->GetElementsKind())) {
+ if (IsHoleyElementsKind(object->GetElementsKind())) {
int used = object->GetFastElementsUsage() * kPointerSize;
- if (object->GetElementsKind() == FAST_HOLEY_DOUBLE_ELEMENTS) used *= 2;
+ if (object->GetElementsKind() == HOLEY_DOUBLE_ELEMENTS) used *= 2;
CHECK_GE(elements->Size(), used);
overhead = elements->Size() - used - FixedArray::kHeaderSize;
}
- stats_->RecordFixedArraySubTypeStats(elements, FAST_ELEMENTS_SUB_TYPE,
+ stats_->RecordFixedArraySubTypeStats(elements, PACKED_ELEMENTS_SUB_TYPE,
elements->Size(), overhead);
}
}
- overhead = 0;
- FixedArrayBase* properties = object->properties();
- if (CanRecordFixedArray(heap_, properties) &&
- SameLiveness(object, properties) && !IsCowArray(heap_, properties)) {
- if (properties->IsDictionary()) {
- NameDictionary* dict = NameDictionary::cast(properties);
- RecordHashTableHelper(object, dict, DICTIONARY_PROPERTIES_SUB_TYPE);
- } else {
- stats_->RecordFixedArraySubTypeStats(properties, FAST_PROPERTIES_SUB_TYPE,
- properties->Size(), overhead);
+ if (object->IsJSGlobalObject()) {
+ GlobalDictionary* properties =
+ JSGlobalObject::cast(object)->global_dictionary();
+ if (CanRecordFixedArray(heap_, properties) &&
+ SameLiveness(object, properties)) {
+ RecordHashTableHelper(object, properties, DICTIONARY_PROPERTIES_SUB_TYPE);
+ }
+ } else if (!object->HasFastProperties()) {
+ NameDictionary* properties = object->property_dictionary();
+ if (CanRecordFixedArray(heap_, properties) &&
+ SameLiveness(object, properties)) {
+ RecordHashTableHelper(object, properties, DICTIONARY_PROPERTIES_SUB_TYPE);
}
}
}
@@ -462,8 +464,8 @@ void ObjectStatsCollector::RecordMapDetails(Map* map_obj) {
}
}
- if (map_obj->has_code_cache()) {
- FixedArray* code_cache = map_obj->code_cache();
+ FixedArray* code_cache = map_obj->code_cache();
+ if (code_cache->length() > 0) {
if (code_cache->IsCodeCacheHashTable()) {
RecordHashTableHelper(map_obj, CodeCacheHashTable::cast(code_cache),
MAP_CODE_CACHE_SUB_TYPE);
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index 11bf679ec4..ad3ddbc52c 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -5,9 +5,11 @@
#ifndef V8_OBJECTS_VISITING_INL_H_
#define V8_OBJECTS_VISITING_INL_H_
+#include "src/heap/objects-visiting.h"
+
#include "src/heap/array-buffer-tracker.h"
+#include "src/heap/embedder-tracing.h"
#include "src/heap/mark-compact.h"
-#include "src/heap/objects-visiting.h"
#include "src/ic/ic-state.h"
#include "src/macro-assembler.h"
#include "src/objects-body-descriptors-inl.h"
@@ -15,461 +17,302 @@
namespace v8 {
namespace internal {
-
-template <typename Callback>
-Callback VisitorDispatchTable<Callback>::GetVisitor(Map* map) {
- return reinterpret_cast<Callback>(callbacks_[map->visitor_id()]);
+template <typename ResultType, typename ConcreteVisitor>
+ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(HeapObject* object) {
+ return Visit(object->map(), object);
}
-
-template <typename StaticVisitor>
-void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
- table_.Register(
- kVisitShortcutCandidate,
- &FixedBodyVisitor<StaticVisitor, ConsString::BodyDescriptor, int>::Visit);
-
- table_.Register(
- kVisitConsString,
- &FixedBodyVisitor<StaticVisitor, ConsString::BodyDescriptor, int>::Visit);
-
- table_.Register(
- kVisitThinString,
- &FixedBodyVisitor<StaticVisitor, ThinString::BodyDescriptor, int>::Visit);
-
- table_.Register(kVisitSlicedString,
- &FixedBodyVisitor<StaticVisitor, SlicedString::BodyDescriptor,
- int>::Visit);
-
- table_.Register(
- kVisitSymbol,
- &FixedBodyVisitor<StaticVisitor, Symbol::BodyDescriptor, int>::Visit);
-
- table_.Register(kVisitFixedArray,
- &FlexibleBodyVisitor<StaticVisitor,
- FixedArray::BodyDescriptor, int>::Visit);
-
- table_.Register(kVisitFixedDoubleArray, &VisitFixedDoubleArray);
- table_.Register(
- kVisitFixedTypedArrayBase,
- &FlexibleBodyVisitor<StaticVisitor, FixedTypedArrayBase::BodyDescriptor,
- int>::Visit);
-
- table_.Register(
- kVisitFixedFloat64Array,
- &FlexibleBodyVisitor<StaticVisitor, FixedTypedArrayBase::BodyDescriptor,
- int>::Visit);
-
- table_.Register(
- kVisitNativeContext,
- &FixedBodyVisitor<StaticVisitor, Context::ScavengeBodyDescriptor,
- int>::Visit);
-
- table_.Register(kVisitByteArray, &VisitByteArray);
-
- table_.Register(
- kVisitSharedFunctionInfo,
- &FixedBodyVisitor<StaticVisitor, SharedFunctionInfo::BodyDescriptor,
- int>::Visit);
-
- table_.Register(kVisitSeqOneByteString, &VisitSeqOneByteString);
-
- table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString);
-
- // Don't visit code entry. We are using this visitor only during scavenges.
- table_.Register(
- kVisitJSFunction,
- &FlexibleBodyVisitor<StaticVisitor, JSFunction::BodyDescriptorWeakCode,
- int>::Visit);
-
- table_.Register(
- kVisitJSArrayBuffer,
- &FlexibleBodyVisitor<StaticVisitor, JSArrayBuffer::BodyDescriptor,
- int>::Visit);
-
- table_.Register(kVisitFreeSpace, &VisitFreeSpace);
-
- table_.Register(
- kVisitJSWeakCollection,
- &FlexibleBodyVisitor<StaticVisitor, JSWeakCollection::BodyDescriptor,
- int>::Visit);
-
- table_.Register(kVisitJSRegExp, &JSObjectVisitor::Visit);
-
- table_.Register(kVisitDataObject, &DataObjectVisitor::Visit);
-
- table_.Register(kVisitJSObjectFast, &JSObjectFastVisitor::Visit);
- table_.Register(kVisitJSObject, &JSObjectVisitor::Visit);
-
- // Not using specialized Api object visitor for newspace.
- table_.Register(kVisitJSApiObject, &JSObjectVisitor::Visit);
-
- table_.Register(kVisitStruct, &StructVisitor::Visit);
-
- table_.Register(kVisitBytecodeArray, &UnreachableVisitor);
- table_.Register(kVisitSharedFunctionInfo, &UnreachableVisitor);
+template <typename ResultType, typename ConcreteVisitor>
+ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(Map* map,
+ HeapObject* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ switch (static_cast<VisitorId>(map->visitor_id())) {
+#define CASE(type) \
+ case kVisit##type: \
+ return visitor->Visit##type(map, type::cast(object));
+ TYPED_VISITOR_ID_LIST(CASE)
+#undef CASE
+ case kVisitShortcutCandidate:
+ return visitor->VisitShortcutCandidate(map, ConsString::cast(object));
+ case kVisitNativeContext:
+ return visitor->VisitNativeContext(map, Context::cast(object));
+ case kVisitDataObject:
+ return visitor->VisitDataObject(map, HeapObject::cast(object));
+ case kVisitJSObjectFast:
+ return visitor->VisitJSObjectFast(map, JSObject::cast(object));
+ case kVisitJSApiObject:
+ return visitor->VisitJSApiObject(map, JSObject::cast(object));
+ case kVisitStruct:
+ return visitor->VisitStruct(map, HeapObject::cast(object));
+ case kVisitFreeSpace:
+ return visitor->VisitFreeSpace(map, FreeSpace::cast(object));
+ case kVisitorIdCount:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ // Make the compiler happy.
+ return ResultType();
}
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::Initialize() {
- table_.Register(kVisitShortcutCandidate,
- &FixedBodyVisitor<StaticVisitor, ConsString::BodyDescriptor,
- void>::Visit);
-
- table_.Register(kVisitConsString,
- &FixedBodyVisitor<StaticVisitor, ConsString::BodyDescriptor,
- void>::Visit);
-
- table_.Register(kVisitThinString,
- &FixedBodyVisitor<StaticVisitor, ThinString::BodyDescriptor,
- void>::Visit);
-
- table_.Register(kVisitSlicedString,
- &FixedBodyVisitor<StaticVisitor, SlicedString::BodyDescriptor,
- void>::Visit);
-
- table_.Register(
- kVisitSymbol,
- &FixedBodyVisitor<StaticVisitor, Symbol::BodyDescriptor, void>::Visit);
-
- table_.Register(kVisitFixedArray, &FixedArrayVisitor::Visit);
-
- table_.Register(kVisitFixedDoubleArray, &DataObjectVisitor::Visit);
-
- table_.Register(
- kVisitFixedTypedArrayBase,
- &FlexibleBodyVisitor<StaticVisitor, FixedTypedArrayBase::BodyDescriptor,
- void>::Visit);
-
- table_.Register(
- kVisitFixedFloat64Array,
- &FlexibleBodyVisitor<StaticVisitor, FixedTypedArrayBase::BodyDescriptor,
- void>::Visit);
-
- table_.Register(kVisitNativeContext, &VisitNativeContext);
-
- table_.Register(
- kVisitAllocationSite,
- &FixedBodyVisitor<StaticVisitor, AllocationSite::MarkingBodyDescriptor,
- void>::Visit);
-
- table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
-
- table_.Register(kVisitBytecodeArray, &VisitBytecodeArray);
-
- table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit);
-
- table_.Register(kVisitSeqOneByteString, &DataObjectVisitor::Visit);
-
- table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
-
- table_.Register(kVisitJSWeakCollection, &VisitWeakCollection);
-
- table_.Register(
- kVisitOddball,
- &FixedBodyVisitor<StaticVisitor, Oddball::BodyDescriptor, void>::Visit);
-
- table_.Register(kVisitMap, &VisitMap);
-
- table_.Register(kVisitCode, &VisitCode);
-
- table_.Register(kVisitSharedFunctionInfo, &VisitSharedFunctionInfo);
-
- table_.Register(kVisitJSFunction, &VisitJSFunction);
-
- table_.Register(
- kVisitJSArrayBuffer,
- &FlexibleBodyVisitor<StaticVisitor, JSArrayBuffer::BodyDescriptor,
- void>::Visit);
-
- table_.Register(kVisitJSRegExp, &JSObjectVisitor::Visit);
-
- table_.Register(
- kVisitCell,
- &FixedBodyVisitor<StaticVisitor, Cell::BodyDescriptor, void>::Visit);
-
- table_.Register(kVisitPropertyCell,
- &FixedBodyVisitor<StaticVisitor, PropertyCell::BodyDescriptor,
- void>::Visit);
-
- table_.Register(kVisitWeakCell, &VisitWeakCell);
-
- table_.Register(kVisitTransitionArray, &VisitTransitionArray);
-
- table_.Register(kVisitDataObject, &DataObjectVisitor::Visit);
-
- table_.Register(kVisitJSObjectFast, &JSObjectFastVisitor::Visit);
- table_.Register(kVisitJSObject, &JSObjectVisitor::Visit);
-
- table_.Register(kVisitJSApiObject, &JSApiObjectVisitor::Visit);
-
- table_.Register(kVisitStruct, &StructObjectVisitor::Visit);
+template <typename ResultType, typename ConcreteVisitor>
+void HeapVisitor<ResultType, ConcreteVisitor>::VisitMapPointer(
+ HeapObject* host, HeapObject** map) {
+ static_cast<ConcreteVisitor*>(this)->VisitPointer(
+ host, reinterpret_cast<Object**>(map));
}
+#define VISIT(type) \
+ template <typename ResultType, typename ConcreteVisitor> \
+ ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit##type( \
+ Map* map, type* object) { \
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this); \
+ if (!visitor->ShouldVisit(object)) return ResultType(); \
+ int size = type::BodyDescriptor::SizeOf(map, object); \
+ if (visitor->ShouldVisitMapPointer()) \
+ visitor->VisitMapPointer(object, object->map_slot()); \
+ type::BodyDescriptor::IterateBody(object, size, visitor); \
+ return static_cast<ResultType>(size); \
+ }
+TYPED_VISITOR_ID_LIST(VISIT)
+#undef VISIT
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitCodeEntry(
- Heap* heap, HeapObject* object, Address entry_address) {
- Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
- heap->mark_compact_collector()->RecordCodeEntrySlot(object, entry_address,
- code);
- StaticVisitor::MarkObject(heap, code);
+template <typename ResultType, typename ConcreteVisitor>
+ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitShortcutCandidate(
+ Map* map, ConsString* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ if (!visitor->ShouldVisit(object)) return ResultType();
+ int size = ConsString::BodyDescriptor::SizeOf(map, object);
+ if (visitor->ShouldVisitMapPointer())
+ visitor->VisitMapPointer(object, object->map_slot());
+ ConsString::BodyDescriptor::IterateBody(object, size, visitor);
+ return static_cast<ResultType>(size);
}
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitEmbeddedPointer(
- Heap* heap, RelocInfo* rinfo) {
- DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
- HeapObject* object = HeapObject::cast(rinfo->target_object());
- Code* host = rinfo->host();
- heap->mark_compact_collector()->RecordRelocSlot(host, rinfo, object);
- // TODO(ulan): It could be better to record slots only for strongly embedded
- // objects here and record slots for weakly embedded object during clearing
- // of non-live references in mark-compact.
- if (!host->IsWeakObject(object)) {
- StaticVisitor::MarkObject(heap, object);
- }
+template <typename ResultType, typename ConcreteVisitor>
+ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitNativeContext(
+ Map* map, Context* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ if (!visitor->ShouldVisit(object)) return ResultType();
+ int size = Context::BodyDescriptor::SizeOf(map, object);
+ if (visitor->ShouldVisitMapPointer())
+ visitor->VisitMapPointer(object, object->map_slot());
+ Context::BodyDescriptor::IterateBody(object, size, visitor);
+ return static_cast<ResultType>(size);
}
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitCell(Heap* heap,
- RelocInfo* rinfo) {
- DCHECK(rinfo->rmode() == RelocInfo::CELL);
- Cell* cell = rinfo->target_cell();
- Code* host = rinfo->host();
- heap->mark_compact_collector()->RecordRelocSlot(host, rinfo, cell);
- if (!host->IsWeakObject(cell)) {
- StaticVisitor::MarkObject(heap, cell);
- }
+template <typename ResultType, typename ConcreteVisitor>
+ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitDataObject(
+ Map* map, HeapObject* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ if (!visitor->ShouldVisit(object)) return ResultType();
+ int size = map->instance_size();
+ if (visitor->ShouldVisitMapPointer())
+ visitor->VisitMapPointer(object, object->map_slot());
+ return static_cast<ResultType>(size);
}
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitDebugTarget(Heap* heap,
- RelocInfo* rinfo) {
- DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
- rinfo->IsPatchedDebugBreakSlotSequence());
- Code* target = Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
- Code* host = rinfo->host();
- heap->mark_compact_collector()->RecordRelocSlot(host, rinfo, target);
- StaticVisitor::MarkObject(heap, target);
+template <typename ResultType, typename ConcreteVisitor>
+ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitJSObjectFast(
+ Map* map, JSObject* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ if (!visitor->ShouldVisit(object)) return ResultType();
+ int size = JSObject::FastBodyDescriptor::SizeOf(map, object);
+ if (visitor->ShouldVisitMapPointer())
+ visitor->VisitMapPointer(object, object->map_slot());
+ JSObject::FastBodyDescriptor::IterateBody(object, size, visitor);
+ return static_cast<ResultType>(size);
}
+template <typename ResultType, typename ConcreteVisitor>
+ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitJSApiObject(
+ Map* map, JSObject* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ if (!visitor->ShouldVisit(object)) return ResultType();
+ int size = JSObject::BodyDescriptor::SizeOf(map, object);
+ if (visitor->ShouldVisitMapPointer())
+ visitor->VisitMapPointer(object, object->map_slot());
+ JSObject::BodyDescriptor::IterateBody(object, size, visitor);
+ return static_cast<ResultType>(size);
+}
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget(Heap* heap,
- RelocInfo* rinfo) {
- DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
- Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- Code* host = rinfo->host();
- heap->mark_compact_collector()->RecordRelocSlot(host, rinfo, target);
- StaticVisitor::MarkObject(heap, target);
+template <typename ResultType, typename ConcreteVisitor>
+ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitStruct(
+ Map* map, HeapObject* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ if (!visitor->ShouldVisit(object)) return ResultType();
+ int size = map->instance_size();
+ if (visitor->ShouldVisitMapPointer())
+ visitor->VisitMapPointer(object, object->map_slot());
+ StructBodyDescriptor::IterateBody(object, size, visitor);
+ return static_cast<ResultType>(size);
}
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitCodeAgeSequence(
- Heap* heap, RelocInfo* rinfo) {
- DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
- Code* target = rinfo->code_age_stub();
- DCHECK(target != NULL);
- Code* host = rinfo->host();
- heap->mark_compact_collector()->RecordRelocSlot(host, rinfo, target);
- StaticVisitor::MarkObject(heap, target);
+template <typename ResultType, typename ConcreteVisitor>
+ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitFreeSpace(
+ Map* map, FreeSpace* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ if (!visitor->ShouldVisit(object)) return ResultType();
+ if (visitor->ShouldVisitMapPointer())
+ visitor->VisitMapPointer(object, object->map_slot());
+ return static_cast<ResultType>(FreeSpace::cast(object)->size());
}
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitBytecodeArray(
- Map* map, HeapObject* object) {
- FixedBodyVisitor<StaticVisitor, BytecodeArray::MarkingBodyDescriptor,
- void>::Visit(map, object);
- BytecodeArray::cast(object)->MakeOlder();
+template <typename ConcreteVisitor>
+int NewSpaceVisitor<ConcreteVisitor>::VisitJSFunction(Map* map,
+ JSFunction* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ int size = JSFunction::BodyDescriptorWeak::SizeOf(map, object);
+ JSFunction::BodyDescriptorWeak::IterateBody(object, size, visitor);
+ return size;
}
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitNativeContext(
- Map* map, HeapObject* object) {
- FixedBodyVisitor<StaticVisitor, Context::MarkCompactBodyDescriptor,
- void>::Visit(map, object);
+template <typename ConcreteVisitor>
+int NewSpaceVisitor<ConcreteVisitor>::VisitNativeContext(Map* map,
+ Context* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ int size = Context::BodyDescriptor::SizeOf(map, object);
+ Context::BodyDescriptor::IterateBody(object, size, visitor);
+ return size;
}
+template <typename ConcreteVisitor>
+int NewSpaceVisitor<ConcreteVisitor>::VisitJSApiObject(Map* map,
+ JSObject* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ return visitor->VisitJSObject(map, object);
+}
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitMap(Map* map,
- HeapObject* object) {
- Heap* heap = map->GetHeap();
- Map* map_object = Map::cast(object);
+template <typename ConcreteVisitor>
+int MarkingVisitor<ConcreteVisitor>::VisitJSFunction(Map* map,
+ JSFunction* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ int size = JSFunction::BodyDescriptorWeak::SizeOf(map, object);
+ JSFunction::BodyDescriptorWeak::IterateBody(object, size, visitor);
+ return size;
+}
- // Clears the cache of ICs related to this map.
- if (FLAG_cleanup_code_caches_at_gc) {
- map_object->ClearCodeCache(heap);
+template <typename ConcreteVisitor>
+int MarkingVisitor<ConcreteVisitor>::VisitTransitionArray(
+ Map* map, TransitionArray* array) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ // Visit strong references.
+ if (array->HasPrototypeTransitions()) {
+ visitor->VisitPointer(array, array->GetPrototypeTransitionsSlot());
}
-
- // When map collection is enabled we have to mark through map's transitions
- // and back pointers in a special way to make these links weak.
- if (map_object->CanTransition()) {
- MarkMapContents(heap, map_object);
- } else {
- StaticVisitor::VisitPointers(
- heap, object,
- HeapObject::RawField(object, Map::kPointerFieldsBeginOffset),
- HeapObject::RawField(object, Map::kPointerFieldsEndOffset));
+ int num_transitions = TransitionArray::NumberOfTransitions(array);
+ for (int i = 0; i < num_transitions; ++i) {
+ visitor->VisitPointer(array, array->GetKeySlot(i));
}
+ // Enqueue the array in linked list of encountered transition arrays if it is
+ // not already in the list.
+ if (array->next_link()->IsUndefined(heap_->isolate())) {
+ array->set_next_link(heap_->encountered_transition_arrays(),
+ UPDATE_WEAK_WRITE_BARRIER);
+ heap_->set_encountered_transition_arrays(array);
+ }
+ return TransitionArray::BodyDescriptor::SizeOf(map, array);
}
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitWeakCell(Map* map,
- HeapObject* object) {
- Heap* heap = map->GetHeap();
- WeakCell* weak_cell = reinterpret_cast<WeakCell*>(object);
+template <typename ConcreteVisitor>
+int MarkingVisitor<ConcreteVisitor>::VisitWeakCell(Map* map,
+ WeakCell* weak_cell) {
// Enqueue weak cell in linked list of encountered weak collections.
// We can ignore weak cells with cleared values because they will always
// contain smi zero.
if (weak_cell->next_cleared() && !weak_cell->cleared()) {
HeapObject* value = HeapObject::cast(weak_cell->value());
- if (ObjectMarking::IsBlackOrGrey(value, MarkingState::Internal(value))) {
+ if (ObjectMarking::IsBlackOrGrey<IncrementalMarking::kAtomicity>(
+ value, collector_->marking_state(value))) {
// Weak cells with live values are directly processed here to reduce
// the processing time of weak cells during the main GC pause.
Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
- map->GetHeap()->mark_compact_collector()->RecordSlot(weak_cell, slot,
- *slot);
+ collector_->RecordSlot(weak_cell, slot, *slot);
} else {
// If we do not know about liveness of values of weak cells, we have to
// process them when we know the liveness of the whole transitive
// closure.
- weak_cell->set_next(heap->encountered_weak_cells(),
+ weak_cell->set_next(heap_->encountered_weak_cells(),
UPDATE_WEAK_WRITE_BARRIER);
- heap->set_encountered_weak_cells(weak_cell);
+ heap_->set_encountered_weak_cells(weak_cell);
}
}
+ return WeakCell::BodyDescriptor::SizeOf(map, weak_cell);
}
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitTransitionArray(
- Map* map, HeapObject* object) {
- TransitionArray* array = TransitionArray::cast(object);
- Heap* heap = array->GetHeap();
- // Visit strong references.
- if (array->HasPrototypeTransitions()) {
- StaticVisitor::VisitPointer(heap, array,
- array->GetPrototypeTransitionsSlot());
- }
- int num_transitions = TransitionArray::NumberOfTransitions(array);
- for (int i = 0; i < num_transitions; ++i) {
- StaticVisitor::VisitPointer(heap, array, array->GetKeySlot(i));
- }
- // Enqueue the array in linked list of encountered transition arrays if it is
- // not already in the list.
- if (array->next_link()->IsUndefined(heap->isolate())) {
- Heap* heap = map->GetHeap();
- array->set_next_link(heap->encountered_transition_arrays(),
- UPDATE_WEAK_WRITE_BARRIER);
- heap->set_encountered_transition_arrays(array);
- }
+template <typename ConcreteVisitor>
+int MarkingVisitor<ConcreteVisitor>::VisitNativeContext(Map* map,
+ Context* context) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ int size = Context::BodyDescriptorWeak::SizeOf(map, context);
+ Context::BodyDescriptorWeak::IterateBody(context, size, visitor);
+ return size;
}
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitWeakCollection(
- Map* map, HeapObject* object) {
- typedef FlexibleBodyVisitor<StaticVisitor,
- JSWeakCollection::BodyDescriptorWeak,
- void> JSWeakCollectionBodyVisitor;
- Heap* heap = map->GetHeap();
- JSWeakCollection* weak_collection =
- reinterpret_cast<JSWeakCollection*>(object);
+template <typename ConcreteVisitor>
+int MarkingVisitor<ConcreteVisitor>::VisitJSWeakCollection(
+ Map* map, JSWeakCollection* weak_collection) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
// Enqueue weak collection in linked list of encountered weak collections.
- if (weak_collection->next() == heap->undefined_value()) {
- weak_collection->set_next(heap->encountered_weak_collections());
- heap->set_encountered_weak_collections(weak_collection);
+ if (weak_collection->next() == heap_->undefined_value()) {
+ weak_collection->set_next(heap_->encountered_weak_collections());
+ heap_->set_encountered_weak_collections(weak_collection);
}
// Skip visiting the backing hash table containing the mappings and the
// pointer to the other enqueued weak collections, both are post-processed.
- JSWeakCollectionBodyVisitor::Visit(map, object);
+ int size = JSWeakCollection::BodyDescriptorWeak::SizeOf(map, weak_collection);
+ JSWeakCollection::BodyDescriptorWeak::IterateBody(weak_collection, size,
+ visitor);
// Partially initialized weak collection is enqueued, but table is ignored.
- if (!weak_collection->table()->IsHashTable()) return;
+ if (!weak_collection->table()->IsHashTable()) return size;
// Mark the backing hash table without pushing it on the marking stack.
- Object** slot = HeapObject::RawField(object, JSWeakCollection::kTableOffset);
+ Object** slot =
+ HeapObject::RawField(weak_collection, JSWeakCollection::kTableOffset);
HeapObject* obj = HeapObject::cast(*slot);
- heap->mark_compact_collector()->RecordSlot(object, slot, obj);
- StaticVisitor::MarkObjectWithoutPush(heap, obj);
+ collector_->RecordSlot(weak_collection, slot, obj);
+ visitor->MarkObjectWithoutPush(obj);
+ return size;
}
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitCode(Map* map,
- HeapObject* object) {
- typedef FlexibleBodyVisitor<StaticVisitor, Code::BodyDescriptor, void>
- CodeBodyVisitor;
- Heap* heap = map->GetHeap();
- Code* code = Code::cast(object);
- if (FLAG_age_code && !heap->isolate()->serializer_enabled()) {
- code->MakeOlder();
+template <typename ConcreteVisitor>
+int MarkingVisitor<ConcreteVisitor>::VisitSharedFunctionInfo(
+ Map* map, SharedFunctionInfo* sfi) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ if (sfi->ic_age() != heap_->global_ic_age()) {
+ sfi->ResetForNewContext(heap_->global_ic_age());
}
- CodeBodyVisitor::Visit(map, object);
+ int size = SharedFunctionInfo::BodyDescriptor::SizeOf(map, sfi);
+ SharedFunctionInfo::BodyDescriptor::IterateBody(sfi, size, visitor);
+ return size;
}
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
- Map* map, HeapObject* object) {
- Heap* heap = map->GetHeap();
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
- if (shared->ic_age() != heap->global_ic_age()) {
- shared->ResetForNewContext(heap->global_ic_age());
- }
- MarkCompactCollector* collector = heap->mark_compact_collector();
- if (collector->is_code_flushing_enabled()) {
- if (IsFlushable(heap, shared)) {
- // This function's code looks flushable. But we have to postpone
- // the decision until we see all functions that point to the same
- // SharedFunctionInfo because some of them might be optimized.
- // That would also make the non-optimized version of the code
- // non-flushable, because it is required for bailing out from
- // optimized code.
- collector->code_flusher()->AddCandidate(shared);
- // Treat the reference to the code object weakly.
- VisitSharedFunctionInfoWeakCode(map, object);
- return;
- }
- }
- VisitSharedFunctionInfoStrongCode(map, object);
+template <typename ConcreteVisitor>
+int MarkingVisitor<ConcreteVisitor>::VisitBytecodeArray(Map* map,
+ BytecodeArray* array) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ int size = BytecodeArray::BodyDescriptor::SizeOf(map, array);
+ BytecodeArray::BodyDescriptor::IterateBody(array, size, visitor);
+ array->MakeOlder();
+ return size;
}
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitJSFunction(Map* map,
- HeapObject* object) {
- Heap* heap = map->GetHeap();
- JSFunction* function = JSFunction::cast(object);
- MarkCompactCollector* collector = heap->mark_compact_collector();
- if (collector->is_code_flushing_enabled()) {
- if (IsFlushable(heap, function)) {
- // This function's code looks flushable. But we have to postpone
- // the decision until we see all functions that point to the same
- // SharedFunctionInfo because some of them might be optimized.
- // That would also make the non-optimized version of the code
- // non-flushable, because it is required for bailing out from
- // optimized code.
- collector->code_flusher()->AddCandidate(function);
- // Treat the reference to the code object weakly.
- VisitJSFunctionWeakCode(map, object);
- return;
- } else {
- // Visit all unoptimized code objects to prevent flushing them.
- StaticVisitor::MarkObject(heap, function->shared()->code());
- }
+template <typename ConcreteVisitor>
+int MarkingVisitor<ConcreteVisitor>::VisitCode(Map* map, Code* code) {
+ if (FLAG_age_code && !heap_->isolate()->serializer_enabled()) {
+ code->MakeOlder();
}
- VisitJSFunctionStrongCode(map, object);
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ int size = Code::BodyDescriptor::SizeOf(map, code);
+ Code::BodyDescriptor::IterateBody(code, size, visitor);
+ return size;
}
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(Heap* heap,
- Map* map) {
+template <typename ConcreteVisitor>
+void MarkingVisitor<ConcreteVisitor>::MarkMapContents(Map* map) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
// Since descriptor arrays are potentially shared, ensure that only the
// descriptors that belong to this map are marked. The first time a non-empty
// descriptor array is marked, its header is also visited. The slot holding
@@ -478,296 +321,134 @@ void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(Heap* heap,
// just mark the entire descriptor array.
if (!map->is_prototype_map()) {
DescriptorArray* descriptors = map->instance_descriptors();
- if (StaticVisitor::MarkObjectWithoutPush(heap, descriptors) &&
+ if (visitor->MarkObjectWithoutPush(descriptors) &&
descriptors->length() > 0) {
- StaticVisitor::VisitPointers(heap, descriptors,
- descriptors->GetFirstElementAddress(),
- descriptors->GetDescriptorEndSlot(0));
+ visitor->VisitPointers(descriptors, descriptors->GetFirstElementAddress(),
+ descriptors->GetDescriptorEndSlot(0));
}
int start = 0;
int end = map->NumberOfOwnDescriptors();
if (start < end) {
- StaticVisitor::VisitPointers(heap, descriptors,
- descriptors->GetDescriptorStartSlot(start),
- descriptors->GetDescriptorEndSlot(end));
+ visitor->VisitPointers(descriptors,
+ descriptors->GetDescriptorStartSlot(start),
+ descriptors->GetDescriptorEndSlot(end));
}
}
// Mark the pointer fields of the Map. Since the transitions array has
// been marked already, it is fine that one of these fields contains a
// pointer to it.
- StaticVisitor::VisitPointers(
- heap, map, HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
+ visitor->VisitPointers(
+ map, HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
}
+template <typename ConcreteVisitor>
+int MarkingVisitor<ConcreteVisitor>::VisitMap(Map* map, Map* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
-inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
- Object* undefined = heap->undefined_value();
- return (info->script() != undefined) &&
- (reinterpret_cast<Script*>(info->script())->source() != undefined);
-}
-
-
-template <typename StaticVisitor>
-bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(Heap* heap,
- JSFunction* function) {
- SharedFunctionInfo* shared_info = function->shared();
-
- // Code is either on stack, in compilation cache or referenced
- // by optimized version of function.
- if (ObjectMarking::IsBlackOrGrey(function->code(),
- MarkingState::Internal(function->code()))) {
- return false;
- }
-
- // We do not (yet) flush code for optimized functions.
- if (function->code() != shared_info->code()) {
- return false;
+ // Clears the cache of ICs related to this map.
+ if (FLAG_cleanup_code_caches_at_gc) {
+ object->ClearCodeCache(heap_);
}
- // Check age of optimized code.
- if (FLAG_age_code && !function->code()->IsOld()) {
- return false;
+ // When map collection is enabled we have to mark through map's transitions
+ // and back pointers in a special way to make these links weak.
+ if (object->CanTransition()) {
+ MarkMapContents(object);
+ } else {
+ visitor->VisitPointers(
+ object, HeapObject::RawField(object, Map::kPointerFieldsBeginOffset),
+ HeapObject::RawField(object, Map::kPointerFieldsEndOffset));
}
-
- return IsFlushable(heap, shared_info);
+ return Map::BodyDescriptor::SizeOf(map, object);
}
-
-template <typename StaticVisitor>
-bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(
- Heap* heap, SharedFunctionInfo* shared_info) {
- // Code is either on stack, in compilation cache or referenced
- // by optimized version of function.
- if (ObjectMarking::IsBlackOrGrey(
- shared_info->code(), MarkingState::Internal(shared_info->code()))) {
- return false;
- }
-
- // The function must be compiled and have the source code available,
- // to be able to recompile it in case we need the function again.
- if (!(shared_info->is_compiled() && HasSourceCode(heap, shared_info))) {
- return false;
- }
-
- // We never flush code for API functions.
- if (shared_info->IsApiFunction()) {
- return false;
- }
-
- // Only flush code for functions.
- if (shared_info->code()->kind() != Code::FUNCTION) {
- return false;
- }
-
- // Function must be lazy compilable.
- if (!shared_info->allows_lazy_compilation()) {
- return false;
- }
-
- // We do not (yet?) flush code for generator functions, or async functions,
- // because we don't know if there are still live activations
- // (generator objects) on the heap.
- if (IsResumableFunction(shared_info->kind())) {
- return false;
- }
-
- // If this is a full script wrapped in a function we do not flush the code.
- if (shared_info->is_toplevel()) {
- return false;
- }
-
- // The function must be user code.
- if (!shared_info->IsUserJavaScript()) {
- return false;
- }
-
- // Maintain debug break slots in the code.
- if (shared_info->HasDebugCode()) {
- return false;
- }
-
- // If this is a function initialized with %SetCode then the one-to-one
- // relation between SharedFunctionInfo and Code is broken.
- if (shared_info->dont_flush()) {
- return false;
- }
-
- // Check age of code. If code aging is disabled we never flush.
- if (!FLAG_age_code || !shared_info->code()->IsOld()) {
- return false;
+template <typename ConcreteVisitor>
+int MarkingVisitor<ConcreteVisitor>::VisitJSApiObject(Map* map,
+ JSObject* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ if (heap_->local_embedder_heap_tracer()->InUse()) {
+ DCHECK(object->IsJSObject());
+ heap_->TracePossibleWrapper(object);
}
-
- return true;
-}
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfoStrongCode(
- Map* map, HeapObject* object) {
- FixedBodyVisitor<StaticVisitor, SharedFunctionInfo::BodyDescriptor,
- void>::Visit(map, object);
-}
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfoWeakCode(
- Map* map, HeapObject* object) {
- // Skip visiting kCodeOffset as it is treated weakly here.
- STATIC_ASSERT(SharedFunctionInfo::kCodeOffset <
- SharedFunctionInfo::BodyDescriptorWeakCode::kStartOffset);
- FixedBodyVisitor<StaticVisitor, SharedFunctionInfo::BodyDescriptorWeakCode,
- void>::Visit(map, object);
+ int size = JSObject::BodyDescriptor::SizeOf(map, object);
+ JSObject::BodyDescriptor::IterateBody(object, size, visitor);
+ return size;
}
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionStrongCode(
- Map* map, HeapObject* object) {
- typedef FlexibleBodyVisitor<StaticVisitor,
- JSFunction::BodyDescriptorStrongCode,
- void> JSFunctionStrongCodeBodyVisitor;
- JSFunctionStrongCodeBodyVisitor::Visit(map, object);
+template <typename ConcreteVisitor>
+int MarkingVisitor<ConcreteVisitor>::VisitAllocationSite(
+ Map* map, AllocationSite* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ int size = AllocationSite::BodyDescriptorWeak::SizeOf(map, object);
+ AllocationSite::BodyDescriptorWeak::IterateBody(object, size, visitor);
+ return size;
}
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionWeakCode(
- Map* map, HeapObject* object) {
- typedef FlexibleBodyVisitor<StaticVisitor, JSFunction::BodyDescriptorWeakCode,
- void> JSFunctionWeakCodeBodyVisitor;
- JSFunctionWeakCodeBodyVisitor::Visit(map, object);
+template <typename ConcreteVisitor>
+void MarkingVisitor<ConcreteVisitor>::VisitCodeEntry(JSFunction* host,
+ Address entry_address) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
+ collector_->RecordCodeEntrySlot(host, entry_address, code);
+ visitor->MarkObject(code);
}
-template <typename ResultType, typename ConcreteVisitor>
-ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(HeapObject* object) {
- Map* map = object->map();
+template <typename ConcreteVisitor>
+void MarkingVisitor<ConcreteVisitor>::VisitEmbeddedPointer(Code* host,
+ RelocInfo* rinfo) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- switch (static_cast<VisitorId>(map->visitor_id())) {
-#define CASE(type) \
- case kVisit##type: \
- return visitor->Visit##type(map, type::cast(object));
- TYPED_VISITOR_ID_LIST(CASE)
-#undef CASE
- case kVisitShortcutCandidate:
- return visitor->VisitShortcutCandidate(map, ConsString::cast(object));
- case kVisitNativeContext:
- return visitor->VisitNativeContext(map, Context::cast(object));
- case kVisitDataObject:
- return visitor->VisitDataObject(map, HeapObject::cast(object));
- case kVisitJSObjectFast:
- return visitor->VisitJSObjectFast(map, JSObject::cast(object));
- case kVisitJSApiObject:
- return visitor->VisitJSApiObject(map, JSObject::cast(object));
- case kVisitStruct:
- return visitor->VisitStruct(map, HeapObject::cast(object));
- case kVisitFreeSpace:
- return visitor->VisitFreeSpace(map, FreeSpace::cast(object));
- case kVisitorIdCount:
- UNREACHABLE();
+ DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+ HeapObject* object = HeapObject::cast(rinfo->target_object());
+ collector_->RecordRelocSlot(host, rinfo, object);
+ if (!host->IsWeakObject(object)) {
+ visitor->MarkObject(object);
}
- UNREACHABLE();
- // Make the compiler happy.
- return ResultType();
-}
-
-template <typename ResultType, typename ConcreteVisitor>
-void HeapVisitor<ResultType, ConcreteVisitor>::VisitMapPointer(
- HeapObject* host, HeapObject** map) {
- static_cast<ConcreteVisitor*>(this)->VisitPointer(
- host, reinterpret_cast<Object**>(map));
-}
-
-template <typename ResultType, typename ConcreteVisitor>
-bool HeapVisitor<ResultType, ConcreteVisitor>::ShouldVisit(HeapObject* object) {
- return true;
}
-#define VISIT(type) \
- template <typename ResultType, typename ConcreteVisitor> \
- ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit##type( \
- Map* map, type* object) { \
- ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this); \
- if (!visitor->ShouldVisit(object)) return ResultType(); \
- int size = type::BodyDescriptor::SizeOf(map, object); \
- visitor->VisitMapPointer(object, object->map_slot()); \
- type::BodyDescriptor::IterateBody(object, size, visitor); \
- return static_cast<ResultType>(size); \
- }
-TYPED_VISITOR_ID_LIST(VISIT)
-#undef VISIT
-
-template <typename ResultType, typename ConcreteVisitor>
-ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitShortcutCandidate(
- Map* map, ConsString* object) {
+template <typename ConcreteVisitor>
+void MarkingVisitor<ConcreteVisitor>::VisitCellPointer(Code* host,
+ RelocInfo* rinfo) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- if (!visitor->ShouldVisit(object)) return ResultType();
- int size = ConsString::BodyDescriptor::SizeOf(map, object);
- visitor->VisitMapPointer(object, object->map_slot());
- ConsString::BodyDescriptor::IterateBody(object, size,
- static_cast<ConcreteVisitor*>(this));
- return static_cast<ResultType>(size);
+ DCHECK(rinfo->rmode() == RelocInfo::CELL);
+ Cell* cell = rinfo->target_cell();
+ collector_->RecordRelocSlot(host, rinfo, cell);
+ if (!host->IsWeakObject(cell)) {
+ visitor->MarkObject(cell);
+ }
}
-template <typename ResultType, typename ConcreteVisitor>
-ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitNativeContext(
- Map* map, Context* object) {
+template <typename ConcreteVisitor>
+void MarkingVisitor<ConcreteVisitor>::VisitDebugTarget(Code* host,
+ RelocInfo* rinfo) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- if (!visitor->ShouldVisit(object)) return ResultType();
- int size = Context::BodyDescriptor::SizeOf(map, object);
- visitor->VisitMapPointer(object, object->map_slot());
- Context::BodyDescriptor::IterateBody(object, size,
- static_cast<ConcreteVisitor*>(this));
- return static_cast<ResultType>(size);
+ DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
+ rinfo->IsPatchedDebugBreakSlotSequence());
+ Code* target = Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
+ collector_->RecordRelocSlot(host, rinfo, target);
+ visitor->MarkObject(target);
}
-template <typename ResultType, typename ConcreteVisitor>
-ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitDataObject(
- Map* map, HeapObject* object) {
+template <typename ConcreteVisitor>
+void MarkingVisitor<ConcreteVisitor>::VisitCodeTarget(Code* host,
+ RelocInfo* rinfo) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- if (!visitor->ShouldVisit(object)) return ResultType();
- int size = map->instance_size();
- visitor->VisitMapPointer(object, object->map_slot());
- return static_cast<ResultType>(size);
+ DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
+ Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ collector_->RecordRelocSlot(host, rinfo, target);
+ visitor->MarkObject(target);
}
-template <typename ResultType, typename ConcreteVisitor>
-ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitJSObjectFast(
- Map* map, JSObject* object) {
- ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- if (!visitor->ShouldVisit(object)) return ResultType();
- int size = JSObject::FastBodyDescriptor::SizeOf(map, object);
- visitor->VisitMapPointer(object, object->map_slot());
- JSObject::FastBodyDescriptor::IterateBody(
- object, size, static_cast<ConcreteVisitor*>(this));
- return static_cast<ResultType>(size);
-}
-template <typename ResultType, typename ConcreteVisitor>
-ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitJSApiObject(
- Map* map, JSObject* object) {
- ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- if (!visitor->ShouldVisit(object)) return ResultType();
- int size = JSObject::BodyDescriptor::SizeOf(map, object);
- visitor->VisitMapPointer(object, object->map_slot());
- JSObject::BodyDescriptor::IterateBody(object, size,
- static_cast<ConcreteVisitor*>(this));
- return static_cast<ResultType>(size);
-}
-template <typename ResultType, typename ConcreteVisitor>
-ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitStruct(
- Map* map, HeapObject* object) {
- ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- if (!visitor->ShouldVisit(object)) return ResultType();
- int size = map->instance_size();
- visitor->VisitMapPointer(object, object->map_slot());
- StructBodyDescriptor::IterateBody(object, size,
- static_cast<ConcreteVisitor*>(this));
- return static_cast<ResultType>(size);
-}
-template <typename ResultType, typename ConcreteVisitor>
-ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitFreeSpace(
- Map* map, FreeSpace* object) {
+template <typename ConcreteVisitor>
+void MarkingVisitor<ConcreteVisitor>::VisitCodeAgeSequence(Code* host,
+ RelocInfo* rinfo) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- if (!visitor->ShouldVisit(object)) return ResultType();
- visitor->VisitMapPointer(object, object->map_slot());
- return static_cast<ResultType>(FreeSpace::cast(object)->size());
+ DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
+ Code* target = rinfo->code_age_stub();
+ DCHECK_NOT_NULL(target);
+ collector_->RecordRelocSlot(host, rinfo, target);
+ visitor->MarkObject(target);
}
} // namespace internal
diff --git a/deps/v8/src/heap/objects-visiting.cc b/deps/v8/src/heap/objects-visiting.cc
index 5849fcb882..e6e59e1f77 100644
--- a/deps/v8/src/heap/objects-visiting.cc
+++ b/deps/v8/src/heap/objects-visiting.cc
@@ -11,201 +11,6 @@
namespace v8 {
namespace internal {
-VisitorId StaticVisitorBase::GetVisitorId(Map* map) {
- return GetVisitorId(map->instance_type(), map->instance_size(),
- FLAG_unbox_double_fields && !map->HasFastPointerLayout());
-}
-
-VisitorId StaticVisitorBase::GetVisitorId(int instance_type, int instance_size,
- bool has_unboxed_fields) {
- if (instance_type < FIRST_NONSTRING_TYPE) {
- switch (instance_type & kStringRepresentationMask) {
- case kSeqStringTag:
- if ((instance_type & kStringEncodingMask) == kOneByteStringTag) {
- return kVisitSeqOneByteString;
- } else {
- return kVisitSeqTwoByteString;
- }
-
- case kConsStringTag:
- if (IsShortcutCandidate(instance_type)) {
- return kVisitShortcutCandidate;
- } else {
- return kVisitConsString;
- }
-
- case kSlicedStringTag:
- return kVisitSlicedString;
-
- case kExternalStringTag:
- return kVisitDataObject;
-
- case kThinStringTag:
- return kVisitThinString;
- }
- UNREACHABLE();
- }
-
- switch (instance_type) {
- case BYTE_ARRAY_TYPE:
- return kVisitByteArray;
-
- case BYTECODE_ARRAY_TYPE:
- return kVisitBytecodeArray;
-
- case FREE_SPACE_TYPE:
- return kVisitFreeSpace;
-
- case FIXED_ARRAY_TYPE:
- return kVisitFixedArray;
-
- case FIXED_DOUBLE_ARRAY_TYPE:
- return kVisitFixedDoubleArray;
-
- case ODDBALL_TYPE:
- return kVisitOddball;
-
- case MAP_TYPE:
- return kVisitMap;
-
- case CODE_TYPE:
- return kVisitCode;
-
- case CELL_TYPE:
- return kVisitCell;
-
- case PROPERTY_CELL_TYPE:
- return kVisitPropertyCell;
-
- case WEAK_CELL_TYPE:
- return kVisitWeakCell;
-
- case TRANSITION_ARRAY_TYPE:
- return kVisitTransitionArray;
-
- case JS_WEAK_MAP_TYPE:
- case JS_WEAK_SET_TYPE:
- return kVisitJSWeakCollection;
-
- case JS_REGEXP_TYPE:
- return kVisitJSRegExp;
-
- case SHARED_FUNCTION_INFO_TYPE:
- return kVisitSharedFunctionInfo;
-
- case JS_PROXY_TYPE:
- return kVisitStruct;
-
- case SYMBOL_TYPE:
- return kVisitSymbol;
-
- case JS_ARRAY_BUFFER_TYPE:
- return kVisitJSArrayBuffer;
-
- case JS_OBJECT_TYPE:
- case JS_ERROR_TYPE:
- case JS_ARGUMENTS_TYPE:
- case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
- case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
- case JS_GENERATOR_OBJECT_TYPE:
- case JS_ASYNC_GENERATOR_OBJECT_TYPE:
- case JS_MODULE_NAMESPACE_TYPE:
- case JS_VALUE_TYPE:
- case JS_DATE_TYPE:
- case JS_ARRAY_TYPE:
- case JS_GLOBAL_PROXY_TYPE:
- case JS_GLOBAL_OBJECT_TYPE:
- case JS_MESSAGE_OBJECT_TYPE:
- case JS_TYPED_ARRAY_TYPE:
- case JS_DATA_VIEW_TYPE:
- case JS_SET_TYPE:
- case JS_MAP_TYPE:
- case JS_SET_ITERATOR_TYPE:
- case JS_MAP_ITERATOR_TYPE:
- case JS_STRING_ITERATOR_TYPE:
-
- case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
- case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
- case JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE:
- case JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_INT8_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_INT16_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_INT32_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE:
-
- case JS_PROMISE_CAPABILITY_TYPE:
- case JS_PROMISE_TYPE:
- case JS_BOUND_FUNCTION_TYPE:
- return has_unboxed_fields ? kVisitJSObject : kVisitJSObjectFast;
- case JS_API_OBJECT_TYPE:
- case JS_SPECIAL_API_OBJECT_TYPE:
- return kVisitJSApiObject;
-
- case JS_FUNCTION_TYPE:
- return kVisitJSFunction;
-
- case FILLER_TYPE:
- case FOREIGN_TYPE:
- case HEAP_NUMBER_TYPE:
- case MUTABLE_HEAP_NUMBER_TYPE:
- return kVisitDataObject;
-
- case FIXED_UINT8_ARRAY_TYPE:
- case FIXED_INT8_ARRAY_TYPE:
- case FIXED_UINT16_ARRAY_TYPE:
- case FIXED_INT16_ARRAY_TYPE:
- case FIXED_UINT32_ARRAY_TYPE:
- case FIXED_INT32_ARRAY_TYPE:
- case FIXED_FLOAT32_ARRAY_TYPE:
- case FIXED_UINT8_CLAMPED_ARRAY_TYPE:
- return kVisitFixedTypedArrayBase;
-
- case FIXED_FLOAT64_ARRAY_TYPE:
- return kVisitFixedFloat64Array;
-
-#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE:
- STRUCT_LIST(MAKE_STRUCT_CASE)
-#undef MAKE_STRUCT_CASE
- if (instance_type == ALLOCATION_SITE_TYPE) {
- return kVisitAllocationSite;
- }
-
- return kVisitStruct;
-
- default:
- UNREACHABLE();
- return kVisitorIdCount;
- }
-}
-
-
// We don't record weak slots during marking or scavenges. Instead we do it
// once when we complete mark-compact cycle. Note that write barrier has no
// effect if we are already in the middle of compacting mark-sweep cycle and we
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index c578a42d64..efb1c32f1c 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -6,348 +6,14 @@
#define V8_OBJECTS_VISITING_H_
#include "src/allocation.h"
-#include "src/heap/embedder-tracing.h"
#include "src/heap/heap.h"
-#include "src/heap/spaces.h"
#include "src/layout-descriptor.h"
#include "src/objects-body-descriptors.h"
-
-// This file provides base classes and auxiliary methods for defining
-// static object visitors used during GC.
-// Visiting HeapObject body with a normal ObjectVisitor requires performing
-// two switches on object's instance type to determine object size and layout
-// and one or more virtual method calls on visitor itself.
-// Static visitor is different: it provides a dispatch table which contains
-// pointers to specialized visit functions. Each map has the visitor_id
-// field which contains an index of specialized visitor to use.
+#include "src/objects/string.h"
namespace v8 {
namespace internal {
-#define VISITOR_ID_LIST(V) \
- V(SeqOneByteString) \
- V(SeqTwoByteString) \
- V(ShortcutCandidate) \
- V(ByteArray) \
- V(BytecodeArray) \
- V(FreeSpace) \
- V(FixedArray) \
- V(FixedDoubleArray) \
- V(FixedTypedArrayBase) \
- V(FixedFloat64Array) \
- V(NativeContext) \
- V(AllocationSite) \
- V(DataObject) \
- V(JSObjectFast) \
- V(JSObject) \
- V(JSApiObject) \
- V(Struct) \
- V(ConsString) \
- V(SlicedString) \
- V(ThinString) \
- V(Symbol) \
- V(Oddball) \
- V(Code) \
- V(Map) \
- V(Cell) \
- V(PropertyCell) \
- V(WeakCell) \
- V(TransitionArray) \
- V(SharedFunctionInfo) \
- V(JSFunction) \
- V(JSWeakCollection) \
- V(JSArrayBuffer) \
- V(JSRegExp)
-
-// For data objects, JS objects and structs along with generic visitor which
-// can visit object of any size we provide visitors specialized by
-// object size in words.
-// Ids of specialized visitors are declared in a linear order (without
-// holes) starting from the id of visitor specialized for 2 words objects
-// (base visitor id) and ending with the id of generic visitor.
-// Method GetVisitorIdForSize depends on this ordering to calculate visitor
-// id of specialized visitor from given instance size, base visitor id and
-// generic visitor's id.
-enum VisitorId {
-#define VISITOR_ID_ENUM_DECL(id) kVisit##id,
- VISITOR_ID_LIST(VISITOR_ID_ENUM_DECL)
-#undef VISITOR_ID_ENUM_DECL
- kVisitorIdCount
-};
-
-// Base class for all static visitors.
-class StaticVisitorBase : public AllStatic {
- public:
- // Visitor ID should fit in one byte.
- STATIC_ASSERT(kVisitorIdCount <= 256);
-
- // Determine which specialized visitor should be used for given instance type
- // and instance type.
- static VisitorId GetVisitorId(int instance_type, int instance_size,
- bool has_unboxed_fields);
-
- // Determine which specialized visitor should be used for given map.
- static VisitorId GetVisitorId(Map* map);
-};
-
-
-template <typename Callback>
-class VisitorDispatchTable {
- public:
- void CopyFrom(VisitorDispatchTable* other) {
- // We are not using memcpy to guarantee that during update
- // every element of callbacks_ array will remain correct
- // pointer (memcpy might be implemented as a byte copying loop).
- for (int i = 0; i < kVisitorIdCount; i++) {
- base::NoBarrier_Store(&callbacks_[i], other->callbacks_[i]);
- }
- }
-
- inline Callback GetVisitor(Map* map);
-
- inline Callback GetVisitorById(VisitorId id) {
- return reinterpret_cast<Callback>(callbacks_[id]);
- }
-
- void Register(VisitorId id, Callback callback) {
- DCHECK(id < kVisitorIdCount); // id is unsigned.
- callbacks_[id] = reinterpret_cast<base::AtomicWord>(callback);
- }
-
- private:
- base::AtomicWord callbacks_[kVisitorIdCount];
-};
-
-
-template <typename StaticVisitor, typename BodyDescriptor, typename ReturnType>
-class FlexibleBodyVisitor : public AllStatic {
- public:
- INLINE(static ReturnType Visit(Map* map, HeapObject* object)) {
- int object_size = BodyDescriptor::SizeOf(map, object);
- BodyDescriptor::template IterateBody<StaticVisitor>(object, object_size);
- return static_cast<ReturnType>(object_size);
- }
-};
-
-
-template <typename StaticVisitor, typename BodyDescriptor, typename ReturnType>
-class FixedBodyVisitor : public AllStatic {
- public:
- INLINE(static ReturnType Visit(Map* map, HeapObject* object)) {
- BodyDescriptor::template IterateBody<StaticVisitor>(object);
- return static_cast<ReturnType>(BodyDescriptor::kSize);
- }
-};
-
-
-// Base class for visitors used for a linear new space iteration.
-// IterateBody returns size of visited object.
-// Certain types of objects (i.e. Code objects) are not handled
-// by dispatch table of this visitor because they cannot appear
-// in the new space.
-//
-// This class is intended to be used in the following way:
-//
-// class SomeVisitor : public StaticNewSpaceVisitor<SomeVisitor> {
-// ...
-// }
-//
-// This is an example of Curiously recurring template pattern
-// (see http://en.wikipedia.org/wiki/Curiously_recurring_template_pattern).
-// We use CRTP to guarantee aggressive compile time optimizations (i.e.
-// inlining and specialization of StaticVisitor::VisitPointers methods).
-template <typename StaticVisitor>
-class StaticNewSpaceVisitor : public StaticVisitorBase {
- public:
- static void Initialize();
-
- INLINE(static int IterateBody(Map* map, HeapObject* obj)) {
- return table_.GetVisitor(map)(map, obj);
- }
-
- INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
- Object** start, Object** end)) {
- for (Object** p = start; p < end; p++) {
- StaticVisitor::VisitPointer(heap, object, p);
- }
- }
-
- // Although we are using the JSFunction body descriptor which does not
- // visit the code entry, compiler wants it to be accessible.
- // See JSFunction::BodyDescriptorImpl.
- inline static void VisitCodeEntry(Heap* heap, HeapObject* object,
- Address entry_address) {
- UNREACHABLE();
- }
-
- private:
- inline static int UnreachableVisitor(Map* map, HeapObject* object) {
- UNREACHABLE();
- return 0;
- }
-
- INLINE(static int VisitByteArray(Map* map, HeapObject* object)) {
- return reinterpret_cast<ByteArray*>(object)->ByteArraySize();
- }
-
- INLINE(static int VisitFixedDoubleArray(Map* map, HeapObject* object)) {
- int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
- return FixedDoubleArray::SizeFor(length);
- }
-
- INLINE(static int VisitSeqOneByteString(Map* map, HeapObject* object)) {
- return SeqOneByteString::cast(object)
- ->SeqOneByteStringSize(map->instance_type());
- }
-
- INLINE(static int VisitSeqTwoByteString(Map* map, HeapObject* object)) {
- return SeqTwoByteString::cast(object)
- ->SeqTwoByteStringSize(map->instance_type());
- }
-
- INLINE(static int VisitFreeSpace(Map* map, HeapObject* object)) {
- return FreeSpace::cast(object)->size();
- }
-
- class DataObjectVisitor {
- public:
- template <int object_size>
- static inline int VisitSpecialized(Map* map, HeapObject* object) {
- return object_size;
- }
-
- INLINE(static int Visit(Map* map, HeapObject* object)) {
- return map->instance_size();
- }
- };
-
- typedef FlexibleBodyVisitor<StaticVisitor, StructBodyDescriptor, int>
- StructVisitor;
-
- typedef FlexibleBodyVisitor<StaticVisitor, JSObject::BodyDescriptor, int>
- JSObjectVisitor;
-
- // Visitor for JSObjects without unboxed double fields.
- typedef FlexibleBodyVisitor<StaticVisitor, JSObject::FastBodyDescriptor, int>
- JSObjectFastVisitor;
-
- typedef int (*Callback)(Map* map, HeapObject* object);
-
- static VisitorDispatchTable<Callback> table_;
-};
-
-
-template <typename StaticVisitor>
-VisitorDispatchTable<typename StaticNewSpaceVisitor<StaticVisitor>::Callback>
- StaticNewSpaceVisitor<StaticVisitor>::table_;
-
-
-// Base class for visitors used to transitively mark the entire heap.
-// IterateBody returns nothing.
-// Certain types of objects might not be handled by this base class and
-// no visitor function is registered by the generic initialization. A
-// specialized visitor function needs to be provided by the inheriting
-// class itself for those cases.
-//
-// This class is intended to be used in the following way:
-//
-// class SomeVisitor : public StaticMarkingVisitor<SomeVisitor> {
-// ...
-// }
-//
-// This is an example of Curiously recurring template pattern.
-template <typename StaticVisitor>
-class StaticMarkingVisitor : public StaticVisitorBase {
- public:
- static void Initialize();
-
- INLINE(static void IterateBody(Map* map, HeapObject* obj)) {
- table_.GetVisitor(map)(map, obj);
- }
-
- INLINE(static void VisitWeakCell(Map* map, HeapObject* object));
- INLINE(static void VisitTransitionArray(Map* map, HeapObject* object));
- INLINE(static void VisitCodeEntry(Heap* heap, HeapObject* object,
- Address entry_address));
- INLINE(static void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo));
- INLINE(static void VisitCell(Heap* heap, RelocInfo* rinfo));
- INLINE(static void VisitDebugTarget(Heap* heap, RelocInfo* rinfo));
- INLINE(static void VisitCodeTarget(Heap* heap, RelocInfo* rinfo));
- INLINE(static void VisitCodeAgeSequence(Heap* heap, RelocInfo* rinfo));
- INLINE(static void VisitExternalReference(RelocInfo* rinfo)) {}
- INLINE(static void VisitInternalReference(RelocInfo* rinfo)) {}
- INLINE(static void VisitRuntimeEntry(RelocInfo* rinfo)) {}
- // Skip the weak next code link in a code object.
- INLINE(static void VisitNextCodeLink(Heap* heap, Object** slot)) {}
-
- protected:
- INLINE(static void VisitMap(Map* map, HeapObject* object));
- INLINE(static void VisitCode(Map* map, HeapObject* object));
- INLINE(static void VisitBytecodeArray(Map* map, HeapObject* object));
- INLINE(static void VisitSharedFunctionInfo(Map* map, HeapObject* object));
- INLINE(static void VisitWeakCollection(Map* map, HeapObject* object));
- INLINE(static void VisitJSFunction(Map* map, HeapObject* object));
- INLINE(static void VisitNativeContext(Map* map, HeapObject* object));
-
- // Mark pointers in a Map treating some elements of the descriptor array weak.
- static void MarkMapContents(Heap* heap, Map* map);
-
- // Code flushing support.
- INLINE(static bool IsFlushable(Heap* heap, JSFunction* function));
- INLINE(static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info));
-
- // Helpers used by code flushing support that visit pointer fields and treat
- // references to code objects either strongly or weakly.
- static void VisitSharedFunctionInfoStrongCode(Map* map, HeapObject* object);
- static void VisitSharedFunctionInfoWeakCode(Map* map, HeapObject* object);
- static void VisitJSFunctionStrongCode(Map* map, HeapObject* object);
- static void VisitJSFunctionWeakCode(Map* map, HeapObject* object);
-
- class DataObjectVisitor {
- public:
- template <int size>
- static inline void VisitSpecialized(Map* map, HeapObject* object) {}
-
- INLINE(static void Visit(Map* map, HeapObject* object)) {}
- };
-
- typedef FlexibleBodyVisitor<StaticVisitor, FixedArray::BodyDescriptor, void>
- FixedArrayVisitor;
-
- typedef FlexibleBodyVisitor<StaticVisitor, JSObject::FastBodyDescriptor, void>
- JSObjectFastVisitor;
- typedef FlexibleBodyVisitor<StaticVisitor, JSObject::BodyDescriptor, void>
- JSObjectVisitor;
-
- class JSApiObjectVisitor : AllStatic {
- public:
- INLINE(static void Visit(Map* map, HeapObject* object)) {
- TracePossibleWrapper(object);
- JSObjectVisitor::Visit(map, object);
- }
-
- private:
- INLINE(static void TracePossibleWrapper(HeapObject* object)) {
- if (object->GetHeap()->local_embedder_heap_tracer()->InUse()) {
- DCHECK(object->IsJSObject());
- object->GetHeap()->TracePossibleWrapper(JSObject::cast(object));
- }
- }
- };
-
- typedef FlexibleBodyVisitor<StaticVisitor, StructBodyDescriptor, void>
- StructObjectVisitor;
-
- typedef void (*Callback)(Map* map, HeapObject* object);
-
- static VisitorDispatchTable<Callback> table_;
-};
-
-
-template <typename StaticVisitor>
-VisitorDispatchTable<typename StaticMarkingVisitor<StaticVisitor>::Callback>
- StaticMarkingVisitor<StaticVisitor>::table_;
-
#define TYPED_VISITOR_ID_LIST(V) \
V(AllocationSite) \
V(ByteArray) \
@@ -366,20 +32,21 @@ VisitorDispatchTable<typename StaticMarkingVisitor<StaticVisitor>::Callback>
V(JSWeakCollection) \
V(Map) \
V(Oddball) \
+ V(PropertyArray) \
V(PropertyCell) \
V(SeqOneByteString) \
V(SeqTwoByteString) \
V(SharedFunctionInfo) \
V(SlicedString) \
+ V(SmallOrderedHashMap) \
+ V(SmallOrderedHashSet) \
V(Symbol) \
- V(TransitionArray) \
V(ThinString) \
+ V(TransitionArray) \
V(WeakCell)
-// The base class for visitors that need to dispatch on object type.
-// It is similar to StaticVisitor except it uses virtual dispatch
-// instead of static dispatch table. The default behavour of all
-// visit functions is to iterate body of the given object using
+// The base class for visitors that need to dispatch on object type. The default
+// behavior of all visit functions is to iterate body of the given object using
// the BodyDescriptor of the object.
//
// The visit functions return the size of the object cast to ResultType.
@@ -389,32 +56,95 @@ VisitorDispatchTable<typename StaticMarkingVisitor<StaticVisitor>::Callback>
// class SomeVisitor : public HeapVisitor<ResultType, SomeVisitor> {
// ...
// }
-//
-// This is an example of Curiously recurring template pattern.
-// TODO(ulan): replace static visitors with the HeapVisitor.
template <typename ResultType, typename ConcreteVisitor>
class HeapVisitor : public ObjectVisitor {
public:
- ResultType Visit(HeapObject* object);
+ V8_INLINE ResultType Visit(HeapObject* object);
+ V8_INLINE ResultType Visit(Map* map, HeapObject* object);
protected:
// A guard predicate for visiting the object.
// If it returns false then the default implementations of the Visit*
// functions bailout from iterating the object pointers.
- virtual bool ShouldVisit(HeapObject* object);
+ V8_INLINE bool ShouldVisit(HeapObject* object) { return true; }
+ // Guard predicate for visiting the objects map pointer separately.
+ V8_INLINE bool ShouldVisitMapPointer() { return true; }
// A callback for visiting the map pointer in the object header.
- virtual void VisitMapPointer(HeapObject* host, HeapObject** map);
+ V8_INLINE void VisitMapPointer(HeapObject* host, HeapObject** map);
-#define VISIT(type) virtual ResultType Visit##type(Map* map, type* object);
+#define VISIT(type) V8_INLINE ResultType Visit##type(Map* map, type* object);
TYPED_VISITOR_ID_LIST(VISIT)
#undef VISIT
- virtual ResultType VisitShortcutCandidate(Map* map, ConsString* object);
- virtual ResultType VisitNativeContext(Map* map, Context* object);
- virtual ResultType VisitDataObject(Map* map, HeapObject* object);
- virtual ResultType VisitJSObjectFast(Map* map, JSObject* object);
- virtual ResultType VisitJSApiObject(Map* map, JSObject* object);
- virtual ResultType VisitStruct(Map* map, HeapObject* object);
- virtual ResultType VisitFreeSpace(Map* map, FreeSpace* object);
+ V8_INLINE ResultType VisitShortcutCandidate(Map* map, ConsString* object);
+ V8_INLINE ResultType VisitNativeContext(Map* map, Context* object);
+ V8_INLINE ResultType VisitDataObject(Map* map, HeapObject* object);
+ V8_INLINE ResultType VisitJSObjectFast(Map* map, JSObject* object);
+ V8_INLINE ResultType VisitJSApiObject(Map* map, JSObject* object);
+ V8_INLINE ResultType VisitStruct(Map* map, HeapObject* object);
+ V8_INLINE ResultType VisitFreeSpace(Map* map, FreeSpace* object);
+};
+
+template <typename ConcreteVisitor>
+class NewSpaceVisitor : public HeapVisitor<int, ConcreteVisitor> {
+ public:
+ V8_INLINE bool ShouldVisitMapPointer() { return false; }
+
+ void VisitCodeEntry(JSFunction* host, Address code_entry) final {
+ // Code is not in new space.
+ }
+
+ // Special cases for young generation.
+
+ V8_INLINE int VisitJSFunction(Map* map, JSFunction* object);
+ V8_INLINE int VisitNativeContext(Map* map, Context* object);
+ V8_INLINE int VisitJSApiObject(Map* map, JSObject* object);
+
+ int VisitBytecodeArray(Map* map, BytecodeArray* object) {
+ UNREACHABLE();
+ return 0;
+ }
+
+ int VisitSharedFunctionInfo(Map* map, SharedFunctionInfo* object) {
+ UNREACHABLE();
+ return 0;
+ }
+};
+
+template <typename ConcreteVisitor>
+class MarkingVisitor : public HeapVisitor<int, ConcreteVisitor> {
+ public:
+ explicit MarkingVisitor(Heap* heap, MarkCompactCollector* collector)
+ : heap_(heap), collector_(collector) {}
+
+ V8_INLINE bool ShouldVisitMapPointer() { return false; }
+
+ V8_INLINE int VisitJSFunction(Map* map, JSFunction* object);
+ V8_INLINE int VisitWeakCell(Map* map, WeakCell* object);
+ V8_INLINE int VisitTransitionArray(Map* map, TransitionArray* object);
+ V8_INLINE int VisitNativeContext(Map* map, Context* object);
+ V8_INLINE int VisitJSWeakCollection(Map* map, JSWeakCollection* object);
+ V8_INLINE int VisitSharedFunctionInfo(Map* map, SharedFunctionInfo* object);
+ V8_INLINE int VisitBytecodeArray(Map* map, BytecodeArray* object);
+ V8_INLINE int VisitCode(Map* map, Code* object);
+ V8_INLINE int VisitMap(Map* map, Map* object);
+ V8_INLINE int VisitJSApiObject(Map* map, JSObject* object);
+ V8_INLINE int VisitAllocationSite(Map* map, AllocationSite* object);
+
+ // ObjectVisitor implementation.
+ V8_INLINE void VisitCodeEntry(JSFunction* host, Address entry_address) final;
+ V8_INLINE void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) final;
+ V8_INLINE void VisitCellPointer(Code* host, RelocInfo* rinfo) final;
+ V8_INLINE void VisitDebugTarget(Code* host, RelocInfo* rinfo) final;
+ V8_INLINE void VisitCodeTarget(Code* host, RelocInfo* rinfo) final;
+ V8_INLINE void VisitCodeAgeSequence(Code* host, RelocInfo* rinfo) final;
+ // Skip weak next code link.
+ V8_INLINE void VisitNextCodeLink(Code* host, Object** p) final {}
+
+ protected:
+ V8_INLINE void MarkMapContents(Map* map);
+
+ Heap* heap_;
+ MarkCompactCollector* collector_;
};
class WeakObjectRetainer;
diff --git a/deps/v8/src/heap/page-parallel-job.h b/deps/v8/src/heap/page-parallel-job.h
deleted file mode 100644
index 939bdb3b3b..0000000000
--- a/deps/v8/src/heap/page-parallel-job.h
+++ /dev/null
@@ -1,180 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_HEAP_PAGE_PARALLEL_JOB_
-#define V8_HEAP_PAGE_PARALLEL_JOB_
-
-#include "src/allocation.h"
-#include "src/cancelable-task.h"
-#include "src/utils.h"
-#include "src/v8.h"
-
-namespace v8 {
-namespace internal {
-
-class Heap;
-class Isolate;
-
-// This class manages background tasks that process set of pages in parallel.
-// The JobTraits class needs to define:
-// - PerPageData type - state associated with each page.
-// - PerTaskData type - state associated with each task.
-// - static void ProcessPageInParallel(Heap* heap,
-// PerTaskData task_data,
-// MemoryChunk* page,
-// PerPageData page_data)
-template <typename JobTraits>
-class PageParallelJob {
- public:
- // PageParallelJob cannot dynamically create a semaphore because of a bug in
- // glibc. See http://crbug.com/609249 and
- // https://sourceware.org/bugzilla/show_bug.cgi?id=12674.
- // The caller must provide a semaphore with value 0 and ensure that
- // the lifetime of the semaphore is the same as the lifetime of the Isolate.
- // It is guaranteed that the semaphore value will be 0 after Run() call.
- PageParallelJob(Heap* heap, CancelableTaskManager* cancelable_task_manager,
- base::Semaphore* semaphore)
- : heap_(heap),
- cancelable_task_manager_(cancelable_task_manager),
- items_(nullptr),
- num_items_(0),
- num_tasks_(0),
- pending_tasks_(semaphore) {}
-
- ~PageParallelJob() {
- Item* item = items_;
- while (item != nullptr) {
- Item* next = item->next;
- delete item;
- item = next;
- }
- }
-
- void AddPage(MemoryChunk* chunk, typename JobTraits::PerPageData data) {
- Item* item = new Item(chunk, data, items_);
- items_ = item;
- ++num_items_;
- }
-
- int NumberOfPages() const { return num_items_; }
-
- // Returns the number of tasks that were spawned when running the job.
- int NumberOfTasks() const { return num_tasks_; }
-
- // Runs the given number of tasks in parallel and processes the previously
- // added pages. This function blocks until all tasks finish.
- // The callback takes the index of a task and returns data for that task.
- template <typename Callback>
- void Run(int num_tasks, Callback per_task_data_callback) {
- if (num_items_ == 0) return;
- DCHECK_GE(num_tasks, 1);
- CancelableTaskManager::Id task_ids[kMaxNumberOfTasks];
- const int max_num_tasks = Min(
- kMaxNumberOfTasks,
- static_cast<int>(
- V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
- num_tasks_ = Max(1, Min(num_tasks, max_num_tasks));
- int items_per_task = (num_items_ + num_tasks_ - 1) / num_tasks_;
- int start_index = 0;
- Task* main_task = nullptr;
- for (int i = 0; i < num_tasks_; i++, start_index += items_per_task) {
- if (start_index >= num_items_) {
- start_index -= num_items_;
- }
- Task* task = new Task(heap_, items_, num_items_, start_index,
- pending_tasks_, per_task_data_callback(i));
- task_ids[i] = task->id();
- if (i > 0) {
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- task, v8::Platform::kShortRunningTask);
- } else {
- main_task = task;
- }
- }
- // Contribute on main thread.
- main_task->Run();
- delete main_task;
- // Wait for background tasks.
- for (int i = 0; i < num_tasks_; i++) {
- if (cancelable_task_manager_->TryAbort(task_ids[i]) !=
- CancelableTaskManager::kTaskAborted) {
- pending_tasks_->Wait();
- }
- }
- }
-
- private:
- static const int kMaxNumberOfTasks = 32;
-
- enum ProcessingState { kAvailable, kProcessing, kFinished };
-
- struct Item : public Malloced {
- Item(MemoryChunk* chunk, typename JobTraits::PerPageData data, Item* next)
- : chunk(chunk), state(kAvailable), data(data), next(next) {}
- MemoryChunk* chunk;
- base::AtomicValue<ProcessingState> state;
- typename JobTraits::PerPageData data;
- Item* next;
- };
-
- class Task : public CancelableTask {
- public:
- Task(Heap* heap, Item* items, int num_items, int start_index,
- base::Semaphore* on_finish, typename JobTraits::PerTaskData data)
- : CancelableTask(heap->isolate()),
- heap_(heap),
- items_(items),
- num_items_(num_items),
- start_index_(start_index),
- on_finish_(on_finish),
- data_(data) {}
-
- virtual ~Task() {}
-
- private:
- // v8::internal::CancelableTask overrides.
- void RunInternal() override {
- // Each task starts at a different index to improve parallelization.
- Item* current = items_;
- int skip = start_index_;
- while (skip-- > 0) {
- current = current->next;
- }
- for (int i = 0; i < num_items_; i++) {
- if (current->state.TrySetValue(kAvailable, kProcessing)) {
- JobTraits::ProcessPageInParallel(heap_, data_, current->chunk,
- current->data);
- current->state.SetValue(kFinished);
- }
- current = current->next;
- // Wrap around if needed.
- if (current == nullptr) {
- current = items_;
- }
- }
- on_finish_->Signal();
- }
-
- Heap* heap_;
- Item* items_;
- int num_items_;
- int start_index_;
- base::Semaphore* on_finish_;
- typename JobTraits::PerTaskData data_;
- DISALLOW_COPY_AND_ASSIGN(Task);
- };
-
- Heap* heap_;
- CancelableTaskManager* cancelable_task_manager_;
- Item* items_;
- int num_items_;
- int num_tasks_;
- base::Semaphore* pending_tasks_;
- DISALLOW_COPY_AND_ASSIGN(PageParallelJob);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_HEAP_PAGE_PARALLEL_JOB_
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
index b60cd451ee..5908940d9e 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set.h
@@ -21,14 +21,16 @@ class RememberedSet : public AllStatic {
public:
// Given a page and a slot in that page, this function adds the slot to the
// remembered set.
+ template <AccessMode access_mode = AccessMode::ATOMIC>
static void Insert(MemoryChunk* chunk, Address slot_addr) {
DCHECK(chunk->Contains(slot_addr));
- SlotSet* slot_set = chunk->slot_set<type>();
+ SlotSet* slot_set = chunk->slot_set<type, access_mode>();
if (slot_set == nullptr) {
slot_set = chunk->AllocateSlotSet<type>();
}
uintptr_t offset = slot_addr - chunk->address();
- slot_set[offset / Page::kPageSize].Insert(offset % Page::kPageSize);
+ slot_set[offset / Page::kPageSize].Insert<access_mode>(offset %
+ Page::kPageSize);
}
// Given a page and a slot in that page, this function returns true if
@@ -127,15 +129,18 @@ class RememberedSet : public AllStatic {
// Iterates and filters the remembered set in the given memory chunk with
// the given callback. The callback should take (Address slot) and return
// SlotCallbackResult.
+ //
+ // Notice that |mode| can only be of FREE* or PREFREE* if there are no other
+ // threads concurrently inserting slots.
template <typename Callback>
- static void Iterate(MemoryChunk* chunk, Callback callback) {
+ static void Iterate(MemoryChunk* chunk, Callback callback,
+ SlotSet::EmptyBucketMode mode) {
SlotSet* slots = chunk->slot_set<type>();
if (slots != nullptr) {
size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
int new_count = 0;
for (size_t page = 0; page < pages; page++) {
- new_count +=
- slots[page].Iterate(callback, SlotSet::PREFREE_EMPTY_BUCKETS);
+ new_count += slots[page].Iterate(callback, mode);
}
// Only old-to-old slot sets are released eagerly. Old-new-slot sets are
// released by the sweeper threads.
@@ -145,6 +150,17 @@ class RememberedSet : public AllStatic {
}
}
+ static void PreFreeEmptyBuckets(MemoryChunk* chunk) {
+ DCHECK(type == OLD_TO_NEW);
+ SlotSet* slots = chunk->slot_set<type>();
+ if (slots != nullptr) {
+ size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
+ for (size_t page = 0; page < pages; page++) {
+ slots[page].PreFreeEmptyBuckets();
+ }
+ }
+ }
+
// Given a page and a typed slot in that page, this function adds the slot
// to the remembered set.
static void InsertTyped(Page* page, Address host_addr, SlotType slot_type,
@@ -341,7 +357,6 @@ class UpdateTypedSlotHelper {
break;
}
UNREACHABLE();
- return REMOVE_SLOT;
}
};
@@ -356,7 +371,6 @@ inline SlotType SlotTypeForRelocInfoMode(RelocInfo::Mode rmode) {
return DEBUG_TARGET_SLOT;
}
UNREACHABLE();
- return CLEARED_SLOT;
}
} // namespace internal
diff --git a/deps/v8/src/heap/scavenge-job.h b/deps/v8/src/heap/scavenge-job.h
index f7fbfc1480..c13e8e9205 100644
--- a/deps/v8/src/heap/scavenge-job.h
+++ b/deps/v8/src/heap/scavenge-job.h
@@ -22,11 +22,14 @@ class V8_EXPORT_PRIVATE ScavengeJob {
class IdleTask : public CancelableIdleTask {
public:
explicit IdleTask(Isolate* isolate, ScavengeJob* job)
- : CancelableIdleTask(isolate), job_(job) {}
+ : CancelableIdleTask(isolate), isolate_(isolate), job_(job) {}
// CancelableIdleTask overrides.
void RunInternal(double deadline_in_seconds) override;
+ Isolate* isolate() { return isolate_; }
+
private:
+ Isolate* isolate_;
ScavengeJob* job_;
};
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index 4cc215a83e..38b3ef2a8f 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -6,12 +6,187 @@
#define V8_HEAP_SCAVENGER_INL_H_
#include "src/heap/scavenger.h"
+#include "src/objects/map.h"
namespace v8 {
namespace internal {
+namespace {
+
+// White list for objects that for sure only contain data.
+bool ContainsOnlyData(VisitorId visitor_id) {
+ switch (visitor_id) {
+ case kVisitSeqOneByteString:
+ return true;
+ case kVisitSeqTwoByteString:
+ return true;
+ case kVisitByteArray:
+ return true;
+ case kVisitFixedDoubleArray:
+ return true;
+ case kVisitDataObject:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+} // namespace
+
+void Scavenger::MigrateObject(Map* map, HeapObject* source, HeapObject* target,
+ int size) {
+ // Copy the content of source to target.
+ heap()->CopyBlock(target->address(), source->address(), size);
+
+ // Set the forwarding address.
+ source->set_map_word(MapWord::FromForwardingAddress(target));
+
+ if (V8_UNLIKELY(is_logging_)) {
+ // Update NewSpace stats if necessary.
+ RecordCopiedObject(target);
+ heap()->OnMoveEvent(target, source, size);
+ }
+
+ if (is_incremental_marking_) {
+ heap()->incremental_marking()->TransferColor(source, target);
+ }
+ heap()->UpdateAllocationSite<Heap::kCached>(map, source,
+ &local_pretenuring_feedback_);
+}
+
+bool Scavenger::SemiSpaceCopyObject(Map* map, HeapObject** slot,
+ HeapObject* object, int object_size) {
+ DCHECK(heap()->AllowedToBeMigrated(object, NEW_SPACE));
+ AllocationAlignment alignment = object->RequiredAlignment();
+ AllocationResult allocation =
+ allocator_.Allocate<NEW_SPACE>(object_size, alignment);
+
+ HeapObject* target = nullptr;
+ if (allocation.To(&target)) {
+ DCHECK(ObjectMarking::IsWhite(
+ target, heap()->mark_compact_collector()->marking_state(target)));
+ MigrateObject(map, object, target, object_size);
+ *slot = target;
+
+ copied_list_.Insert(target, object_size);
+ copied_size_ += object_size;
+ return true;
+ }
+ return false;
+}
+
+bool Scavenger::PromoteObject(Map* map, HeapObject** slot, HeapObject* object,
+ int object_size) {
+ AllocationAlignment alignment = object->RequiredAlignment();
+ AllocationResult allocation =
+ allocator_.Allocate<OLD_SPACE>(object_size, alignment);
+
+ HeapObject* target = nullptr;
+ if (allocation.To(&target)) {
+ DCHECK(ObjectMarking::IsWhite(
+ target, heap()->mark_compact_collector()->marking_state(target)));
+ MigrateObject(map, object, target, object_size);
+ *slot = target;
+
+ if (!ContainsOnlyData(static_cast<VisitorId>(map->visitor_id()))) {
+ promotion_list_.Push(ObjectAndSize(target, object_size));
+ }
+ promoted_size_ += object_size;
+ return true;
+ }
+ return false;
+}
+
+void Scavenger::EvacuateObjectDefault(Map* map, HeapObject** slot,
+ HeapObject* object, int object_size) {
+ SLOW_DCHECK(object_size <= Page::kAllocatableMemory);
+ SLOW_DCHECK(object->SizeFromMap(map) == object_size);
+
+ if (!heap()->ShouldBePromoted(object->address())) {
+ // A semi-space copy may fail due to fragmentation. In that case, we
+ // try to promote the object.
+ if (SemiSpaceCopyObject(map, slot, object, object_size)) {
+ return;
+ }
+ }
+
+ if (PromoteObject(map, slot, object, object_size)) {
+ return;
+ }
+
+ // If promotion failed, we try to copy the object to the other semi-space
+ if (SemiSpaceCopyObject(map, slot, object, object_size)) return;
+
+ FatalProcessOutOfMemory("Scavenger: semi-space copy\n");
+}
+
+void Scavenger::EvacuateThinString(Map* map, HeapObject** slot,
+ ThinString* object, int object_size) {
+ if (!is_incremental_marking_) {
+ HeapObject* actual = object->actual();
+ *slot = actual;
+ // ThinStrings always refer to internalized strings, which are
+ // always in old space.
+ DCHECK(!heap()->InNewSpace(actual));
+ object->set_map_word(MapWord::FromForwardingAddress(actual));
+ return;
+ }
+
+ EvacuateObjectDefault(map, slot, object, object_size);
+}
+
+void Scavenger::EvacuateShortcutCandidate(Map* map, HeapObject** slot,
+ ConsString* object, int object_size) {
+ DCHECK(IsShortcutCandidate(map->instance_type()));
+ if (!is_incremental_marking_ &&
+ object->unchecked_second() == heap()->empty_string()) {
+ HeapObject* first = HeapObject::cast(object->unchecked_first());
+
+ *slot = first;
+
+ if (!heap()->InNewSpace(first)) {
+ object->set_map_word(MapWord::FromForwardingAddress(first));
+ return;
+ }
+
+ MapWord first_word = first->map_word();
+ if (first_word.IsForwardingAddress()) {
+ HeapObject* target = first_word.ToForwardingAddress();
+
+ *slot = target;
+ object->set_map_word(MapWord::FromForwardingAddress(target));
+ return;
+ }
+
+ EvacuateObject(slot, first_word.ToMap(), first);
+ object->set_map_word(MapWord::FromForwardingAddress(*slot));
+ return;
+ }
+
+ EvacuateObjectDefault(map, slot, object, object_size);
+}
+
+void Scavenger::EvacuateObject(HeapObject** slot, Map* map,
+ HeapObject* source) {
+ SLOW_DCHECK(heap_->InFromSpace(source));
+ SLOW_DCHECK(!MapWord::FromMap(map).IsForwardingAddress());
+ int size = source->SizeFromMap(map);
+ switch (static_cast<VisitorId>(map->visitor_id())) {
+ case kVisitThinString:
+ EvacuateThinString(map, slot, ThinString::cast(source), size);
+ break;
+ case kVisitShortcutCandidate:
+ EvacuateShortcutCandidate(map, slot, ConsString::cast(source), size);
+ break;
+ default:
+ EvacuateObjectDefault(map, slot, source, size);
+ break;
+ }
+}
+
void Scavenger::ScavengeObject(HeapObject** p, HeapObject* object) {
- DCHECK(object->GetIsolate()->heap()->InFromSpace(object));
+ DCHECK(heap()->InFromSpace(object));
// We use the first word (where the map pointer usually is) of a heap
// object to record the forwarding pointer. A forwarding pointer can
@@ -28,13 +203,11 @@ void Scavenger::ScavengeObject(HeapObject** p, HeapObject* object) {
return;
}
- object->GetHeap()->UpdateAllocationSite<Heap::kGlobal>(
- object, object->GetHeap()->global_pretenuring_feedback_);
-
+ Map* map = first_word.ToMap();
// AllocationMementos are unrooted and shouldn't survive a scavenge
- DCHECK(object->map() != object->GetHeap()->allocation_memento_map());
+ DCHECK_NE(heap()->allocation_memento_map(), map);
// Call the slow part of scavenge object.
- return ScavengeObjectSlow(p, object);
+ EvacuateObject(p, map, object);
}
SlotCallbackResult Scavenger::CheckAndScavengeObject(Heap* heap,
@@ -61,13 +234,14 @@ SlotCallbackResult Scavenger::CheckAndScavengeObject(Heap* heap,
return REMOVE_SLOT;
}
-// static
-void StaticScavengeVisitor::VisitPointer(Heap* heap, HeapObject* obj,
- Object** p) {
- Object* object = *p;
- if (!heap->InNewSpace(object)) return;
- Scavenger::ScavengeObject(reinterpret_cast<HeapObject**>(p),
- reinterpret_cast<HeapObject*>(object));
+void ScavengeVisitor::VisitPointers(HeapObject* host, Object** start,
+ Object** end) {
+ for (Object** p = start; p < end; p++) {
+ Object* object = *p;
+ if (!heap_->InNewSpace(object)) continue;
+ scavenger_->ScavengeObject(reinterpret_cast<HeapObject**>(p),
+ reinterpret_cast<HeapObject*>(object));
+ }
}
} // namespace internal
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index e211388729..41c6176ee3 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -4,459 +4,140 @@
#include "src/heap/scavenger.h"
-#include "src/contexts.h"
#include "src/heap/heap-inl.h"
-#include "src/heap/incremental-marking.h"
+#include "src/heap/mark-compact-inl.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/scavenger-inl.h"
-#include "src/isolate.h"
-#include "src/log.h"
-#include "src/profiler/heap-profiler.h"
+#include "src/objects-body-descriptors-inl.h"
namespace v8 {
namespace internal {
-enum LoggingAndProfiling {
- LOGGING_AND_PROFILING_ENABLED,
- LOGGING_AND_PROFILING_DISABLED
-};
-
-
-enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
-
-template <MarksHandling marks_handling,
- LoggingAndProfiling logging_and_profiling_mode>
-class ScavengingVisitor : public StaticVisitorBase {
+class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
public:
- static void Initialize() {
- table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
- table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
- table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
- table_.Register(kVisitThinString, &EvacuateThinString);
- table_.Register(kVisitByteArray, &EvacuateByteArray);
- table_.Register(kVisitFixedArray, &EvacuateFixedArray);
- table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
- table_.Register(kVisitFixedTypedArrayBase, &EvacuateFixedTypedArray);
- table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
- table_.Register(kVisitJSArrayBuffer,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
-
- table_.Register(
- kVisitNativeContext,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
- Context::kSize>);
-
- table_.Register(
- kVisitConsString,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
- ConsString::kSize>);
-
- table_.Register(
- kVisitSlicedString,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
- SlicedString::kSize>);
-
- table_.Register(
- kVisitSymbol,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
- Symbol::kSize>);
-
- table_.Register(
- kVisitSharedFunctionInfo,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
- SharedFunctionInfo::kSize>);
-
- table_.Register(kVisitJSWeakCollection,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
-
- table_.Register(kVisitJSRegExp,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
-
- table_.Register(kVisitJSFunction, &EvacuateJSFunction);
-
- table_.Register(kVisitDataObject,
- &ObjectEvacuationStrategy<DATA_OBJECT>::Visit);
-
- table_.Register(kVisitJSObjectFast,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
- table_.Register(kVisitJSObject,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
-
- table_.Register(kVisitJSApiObject,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
-
- table_.Register(kVisitStruct,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
- }
-
- static VisitorDispatchTable<ScavengingCallback>* GetTable() {
- return &table_;
- }
-
- static void EvacuateThinStringNoShortcut(Map* map, HeapObject** slot,
- HeapObject* object) {
- EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object,
- ThinString::kSize);
- }
-
- private:
- enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
-
- static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
- bool should_record = false;
-#ifdef DEBUG
- should_record = FLAG_heap_stats;
-#endif
- should_record = should_record || FLAG_log_gc;
- if (should_record) {
- if (heap->new_space()->Contains(obj)) {
- heap->new_space()->RecordAllocation(obj);
- } else {
- heap->new_space()->RecordPromotion(obj);
+ IterateAndScavengePromotedObjectsVisitor(Heap* heap, Scavenger* scavenger,
+ bool record_slots)
+ : heap_(heap), scavenger_(scavenger), record_slots_(record_slots) {}
+
+ inline void VisitPointers(HeapObject* host, Object** start,
+ Object** end) final {
+ for (Address slot_address = reinterpret_cast<Address>(start);
+ slot_address < reinterpret_cast<Address>(end);
+ slot_address += kPointerSize) {
+ Object** slot = reinterpret_cast<Object**>(slot_address);
+ Object* target = *slot;
+
+ if (target->IsHeapObject()) {
+ if (heap_->InFromSpace(target)) {
+ scavenger_->ScavengeObject(reinterpret_cast<HeapObject**>(slot),
+ HeapObject::cast(target));
+ target = *slot;
+ if (heap_->InNewSpace(target)) {
+ SLOW_DCHECK(target->IsHeapObject());
+ SLOW_DCHECK(heap_->InToSpace(target));
+ RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot_address),
+ slot_address);
+ }
+ SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(
+ HeapObject::cast(target)));
+ } else if (record_slots_ &&
+ MarkCompactCollector::IsOnEvacuationCandidate(
+ HeapObject::cast(target))) {
+ heap_->mark_compact_collector()->RecordSlot(host, slot, target);
+ }
}
}
}
- // Helper function used by CopyObject to copy a source object to an
- // allocated target object and update the forwarding pointer in the source
- // object. Returns the target object.
- INLINE(static void MigrateObject(Heap* heap, HeapObject* source,
- HeapObject* target, int size)) {
- // If we migrate into to-space, then the to-space top pointer should be
- // right after the target object. Incorporate double alignment
- // over-allocation.
- DCHECK(!heap->InToSpace(target) ||
- target->address() + size == heap->new_space()->top() ||
- target->address() + size + kPointerSize == heap->new_space()->top());
-
- // Make sure that we do not overwrite the promotion queue which is at
- // the end of to-space.
- DCHECK(!heap->InToSpace(target) ||
- heap->promotion_queue()->IsBelowPromotionQueue(
- heap->new_space()->top()));
-
- // Copy the content of source to target.
- heap->CopyBlock(target->address(), source->address(), size);
-
- // Set the forwarding address.
- source->set_map_word(MapWord::FromForwardingAddress(target));
-
- if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
- // Update NewSpace stats if necessary.
- RecordCopiedObject(heap, target);
- heap->OnMoveEvent(target, source, size);
- }
-
- if (marks_handling == TRANSFER_MARKS) {
- heap->incremental_marking()->TransferColor(source, target);
- }
- }
-
- template <AllocationAlignment alignment>
- static inline bool SemiSpaceCopyObject(Map* map, HeapObject** slot,
- HeapObject* object, int object_size) {
- Heap* heap = map->GetHeap();
-
- DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
- AllocationResult allocation =
- heap->new_space()->AllocateRaw(object_size, alignment);
-
- HeapObject* target = NULL; // Initialization to please compiler.
- if (allocation.To(&target)) {
- // Order is important here: Set the promotion limit before storing a
- // filler for double alignment or migrating the object. Otherwise we
- // may end up overwriting promotion queue entries when we migrate the
- // object.
- heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
-
- MigrateObject(heap, object, target, object_size);
-
- // Update slot to new target.
- *slot = target;
-
- heap->IncrementSemiSpaceCopiedObjectSize(object_size);
- return true;
- }
- return false;
- }
-
-
- template <ObjectContents object_contents, AllocationAlignment alignment>
- static inline bool PromoteObject(Map* map, HeapObject** slot,
- HeapObject* object, int object_size) {
- Heap* heap = map->GetHeap();
-
- AllocationResult allocation =
- heap->old_space()->AllocateRaw(object_size, alignment);
-
- HeapObject* target = NULL; // Initialization to please compiler.
- if (allocation.To(&target)) {
- DCHECK(ObjectMarking::IsWhite(
- target, heap->mark_compact_collector()->marking_state(target)));
- MigrateObject(heap, object, target, object_size);
-
- // Update slot to new target using CAS. A concurrent sweeper thread my
- // filter the slot concurrently.
- HeapObject* old = *slot;
- base::Release_CompareAndSwap(reinterpret_cast<base::AtomicWord*>(slot),
- reinterpret_cast<base::AtomicWord>(old),
- reinterpret_cast<base::AtomicWord>(target));
-
- if (object_contents == POINTER_OBJECT) {
- heap->promotion_queue()->insert(target, object_size);
- }
- heap->IncrementPromotedObjectsSize(object_size);
- return true;
- }
- return false;
- }
-
- template <ObjectContents object_contents, AllocationAlignment alignment>
- static inline void EvacuateObject(Map* map, HeapObject** slot,
- HeapObject* object, int object_size) {
- SLOW_DCHECK(object_size <= Page::kAllocatableMemory);
- SLOW_DCHECK(object->Size() == object_size);
- Heap* heap = map->GetHeap();
-
- if (!heap->ShouldBePromoted(object->address(), object_size)) {
- // A semi-space copy may fail due to fragmentation. In that case, we
- // try to promote the object.
- if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) {
- return;
- }
- }
-
- if (PromoteObject<object_contents, alignment>(map, slot, object,
- object_size)) {
- return;
- }
+ inline void VisitCodeEntry(JSFunction* host, Address code_entry_slot) final {
+ // Black allocation is not enabled during Scavenges.
+ DCHECK(!heap_->incremental_marking()->black_allocation());
- // If promotion failed, we try to copy the object to the other semi-space
- if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return;
-
- FatalProcessOutOfMemory("Scavenger: semi-space copy\n");
- }
-
- static inline void EvacuateJSFunction(Map* map, HeapObject** slot,
- HeapObject* object) {
- ObjectEvacuationStrategy<POINTER_OBJECT>::Visit(map, slot, object);
-
- if (marks_handling == IGNORE_MARKS) return;
-
- MapWord map_word = object->map_word();
- DCHECK(map_word.IsForwardingAddress());
- HeapObject* target = map_word.ToForwardingAddress();
-
- // TODO(mlippautz): Notify collector of this object so we don't have to
- // retrieve the state our of thin air.
- if (ObjectMarking::IsBlack(target, MarkingState::Internal(target))) {
- // This object is black and it might not be rescanned by marker.
- // We should explicitly record code entry slot for compaction because
- // promotion queue processing (IteratePromotedObjectPointers) will
- // miss it as it is not HeapObject-tagged.
- Address code_entry_slot =
- target->address() + JSFunction::kCodeEntryOffset;
+ if (ObjectMarking::IsBlack(host, MarkingState::Internal(host))) {
Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
- map->GetHeap()->mark_compact_collector()->RecordCodeEntrySlot(
- target, code_entry_slot, code);
+ heap_->mark_compact_collector()->RecordCodeEntrySlot(
+ host, code_entry_slot, code);
}
}
- static inline void EvacuateFixedArray(Map* map, HeapObject** slot,
- HeapObject* object) {
- int length = reinterpret_cast<FixedArray*>(object)->synchronized_length();
- int object_size = FixedArray::SizeFor(length);
- EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object,
- object_size);
- }
-
- static inline void EvacuateFixedDoubleArray(Map* map, HeapObject** slot,
- HeapObject* object) {
- int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
- int object_size = FixedDoubleArray::SizeFor(length);
- EvacuateObject<DATA_OBJECT, kDoubleAligned>(map, slot, object, object_size);
- }
-
- static inline void EvacuateFixedTypedArray(Map* map, HeapObject** slot,
- HeapObject* object) {
- int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
- EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object,
- object_size);
- }
-
- static inline void EvacuateFixedFloat64Array(Map* map, HeapObject** slot,
- HeapObject* object) {
- int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
- EvacuateObject<POINTER_OBJECT, kDoubleAligned>(map, slot, object,
- object_size);
- }
-
- static inline void EvacuateByteArray(Map* map, HeapObject** slot,
- HeapObject* object) {
- int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
- EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
- }
-
- static inline void EvacuateSeqOneByteString(Map* map, HeapObject** slot,
- HeapObject* object) {
- int object_size = SeqOneByteString::cast(object)
- ->SeqOneByteStringSize(map->instance_type());
- EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
- }
+ private:
+ Heap* const heap_;
+ Scavenger* const scavenger_;
+ const bool record_slots_;
+};
- static inline void EvacuateSeqTwoByteString(Map* map, HeapObject** slot,
- HeapObject* object) {
- int object_size = SeqTwoByteString::cast(object)
- ->SeqTwoByteStringSize(map->instance_type());
- EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
+void Scavenger::IterateAndScavengePromotedObject(HeapObject* target, int size) {
+ // We are not collecting slots on new space objects during mutation
+ // thus we have to scan for pointers to evacuation candidates when we
+ // promote objects. But we should not record any slots in non-black
+ // objects. Grey object's slots would be rescanned.
+ // White object might not survive until the end of collection
+ // it would be a violation of the invariant to record it's slots.
+ const bool record_slots =
+ heap()->incremental_marking()->IsCompacting() &&
+ ObjectMarking::IsBlack(target, MarkingState::Internal(target));
+ IterateAndScavengePromotedObjectsVisitor visitor(heap(), this, record_slots);
+ if (target->IsJSFunction()) {
+ // JSFunctions reachable through kNextFunctionLinkOffset are weak. Slots for
+ // this links are recorded during processing of weak lists.
+ JSFunction::BodyDescriptorWeak::IterateBody(target, size, &visitor);
+ } else {
+ target->IterateBody(target->map()->instance_type(), size, &visitor);
}
+}
- static inline void EvacuateShortcutCandidate(Map* map, HeapObject** slot,
- HeapObject* object) {
- DCHECK(IsShortcutCandidate(map->instance_type()));
-
- Heap* heap = map->GetHeap();
-
- if (marks_handling == IGNORE_MARKS &&
- ConsString::cast(object)->unchecked_second() == heap->empty_string()) {
- HeapObject* first =
- HeapObject::cast(ConsString::cast(object)->unchecked_first());
-
- *slot = first;
-
- if (!heap->InNewSpace(first)) {
- object->set_map_word(MapWord::FromForwardingAddress(first));
- return;
- }
-
- MapWord first_word = first->map_word();
- if (first_word.IsForwardingAddress()) {
- HeapObject* target = first_word.ToForwardingAddress();
-
- *slot = target;
- object->set_map_word(MapWord::FromForwardingAddress(target));
- return;
+void Scavenger::Process() {
+ // Threshold when to switch processing the promotion list to avoid
+ // allocating too much backing store in the worklist.
+ const int kProcessPromotionListThreshold = kPromotionListSegmentSize / 2;
+ ScavengeVisitor scavenge_visitor(heap(), this);
+
+ bool done;
+ do {
+ done = true;
+ AddressRange range;
+ while ((promotion_list_.LocalPushSegmentSize() <
+ kProcessPromotionListThreshold) &&
+ copied_list_.Pop(&range)) {
+ for (Address current = range.first; current < range.second;) {
+ HeapObject* object = HeapObject::FromAddress(current);
+ int size = object->Size();
+ scavenge_visitor.Visit(object);
+ current += size;
}
-
- Scavenger::ScavengeObjectSlow(slot, first);
- object->set_map_word(MapWord::FromForwardingAddress(*slot));
- return;
- }
-
- int object_size = ConsString::kSize;
- EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object,
- object_size);
- }
-
- static inline void EvacuateThinString(Map* map, HeapObject** slot,
- HeapObject* object) {
- if (marks_handling == IGNORE_MARKS) {
- HeapObject* actual = ThinString::cast(object)->actual();
- *slot = actual;
- // ThinStrings always refer to internalized strings, which are
- // always in old space.
- DCHECK(!map->GetHeap()->InNewSpace(actual));
- object->set_map_word(MapWord::FromForwardingAddress(actual));
- return;
- }
-
- EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object,
- ThinString::kSize);
- }
-
- template <ObjectContents object_contents>
- class ObjectEvacuationStrategy {
- public:
- template <int object_size>
- static inline void VisitSpecialized(Map* map, HeapObject** slot,
- HeapObject* object) {
- EvacuateObject<object_contents, kWordAligned>(map, slot, object,
- object_size);
+ done = false;
}
-
- static inline void Visit(Map* map, HeapObject** slot, HeapObject* object) {
- int object_size = map->instance_size();
- EvacuateObject<object_contents, kWordAligned>(map, slot, object,
- object_size);
+ ObjectAndSize object_and_size;
+ while (promotion_list_.Pop(&object_and_size)) {
+ HeapObject* target = object_and_size.first;
+ int size = object_and_size.second;
+ DCHECK(!target->IsMap());
+ IterateAndScavengePromotedObject(target, size);
+ done = false;
}
- };
-
- static VisitorDispatchTable<ScavengingCallback> table_;
-};
-
-template <MarksHandling marks_handling,
- LoggingAndProfiling logging_and_profiling_mode>
-VisitorDispatchTable<ScavengingCallback>
- ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
-
-// static
-void Scavenger::Initialize() {
- ScavengingVisitor<TRANSFER_MARKS,
- LOGGING_AND_PROFILING_DISABLED>::Initialize();
- ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
- ScavengingVisitor<TRANSFER_MARKS,
- LOGGING_AND_PROFILING_ENABLED>::Initialize();
- ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
+ } while (!done);
}
-
-// static
-void Scavenger::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
- SLOW_DCHECK(object->GetIsolate()->heap()->InFromSpace(object));
- MapWord first_word = object->map_word();
- SLOW_DCHECK(!first_word.IsForwardingAddress());
- Map* map = first_word.ToMap();
- Scavenger* scavenger = map->GetHeap()->scavenge_collector_;
- scavenger->scavenging_visitors_table_.GetVisitor(map)(map, p, object);
-}
-
-
-void Scavenger::SelectScavengingVisitorsTable() {
- bool logging_and_profiling =
- FLAG_verify_predictable || isolate()->logger()->is_logging() ||
- isolate()->is_profiling() ||
- (isolate()->heap_profiler() != NULL &&
- isolate()->heap_profiler()->is_tracking_object_moves());
-
- if (!heap()->incremental_marking()->IsMarking()) {
- if (!logging_and_profiling) {
- scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<IGNORE_MARKS,
- LOGGING_AND_PROFILING_DISABLED>::GetTable());
- } else {
- scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<IGNORE_MARKS,
- LOGGING_AND_PROFILING_ENABLED>::GetTable());
- }
- } else {
- if (!logging_and_profiling) {
- scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<TRANSFER_MARKS,
- LOGGING_AND_PROFILING_DISABLED>::GetTable());
+void Scavenger::RecordCopiedObject(HeapObject* obj) {
+ bool should_record = FLAG_log_gc;
+#ifdef DEBUG
+ should_record = FLAG_heap_stats;
+#endif
+ if (should_record) {
+ if (heap()->new_space()->Contains(obj)) {
+ heap()->new_space()->RecordAllocation(obj);
} else {
- scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<TRANSFER_MARKS,
- LOGGING_AND_PROFILING_ENABLED>::GetTable());
- }
-
- if (heap()->incremental_marking()->IsCompacting()) {
- // When compacting forbid short-circuiting of cons-strings.
- // Scavenging code relies on the fact that new space object
- // can't be evacuated into evacuation candidate but
- // short-circuiting violates this assumption.
- scavenging_visitors_table_.Register(
- kVisitShortcutCandidate,
- scavenging_visitors_table_.GetVisitorById(kVisitConsString));
- scavenging_visitors_table_.Register(
- kVisitThinString,
- &ScavengingVisitor<TRANSFER_MARKS, LOGGING_AND_PROFILING_DISABLED>::
- EvacuateThinStringNoShortcut);
+ heap()->new_space()->RecordPromotion(obj);
}
}
}
-
-Isolate* Scavenger::isolate() { return heap()->isolate(); }
+void Scavenger::Finalize() {
+ heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
+ heap()->IncrementSemiSpaceCopiedObjectSize(copied_size_);
+ heap()->IncrementPromotedObjectsSize(promoted_size_);
+ allocator_.Finalize();
+}
void RootScavengeVisitor::VisitRootPointer(Root root, Object** p) {
ScavengePointer(p);
@@ -472,8 +153,8 @@ void RootScavengeVisitor::ScavengePointer(Object** p) {
Object* object = *p;
if (!heap_->InNewSpace(object)) return;
- Scavenger::ScavengeObject(reinterpret_cast<HeapObject**>(p),
- reinterpret_cast<HeapObject*>(object));
+ scavenger_->ScavengeObject(reinterpret_cast<HeapObject**>(p),
+ reinterpret_cast<HeapObject*>(object));
}
} // namespace internal
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
index 09f2955651..869e4ad5f3 100644
--- a/deps/v8/src/heap/scavenger.h
+++ b/deps/v8/src/heap/scavenger.h
@@ -5,67 +5,173 @@
#ifndef V8_HEAP_SCAVENGER_H_
#define V8_HEAP_SCAVENGER_H_
+#include "src/heap/local-allocator.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/slot-set.h"
+#include "src/heap/worklist.h"
namespace v8 {
namespace internal {
-typedef void (*ScavengingCallback)(Map* map, HeapObject** slot,
- HeapObject* object);
+static const int kCopiedListSegmentSize = 64;
+static const int kPromotionListSegmentSize = 64;
+
+using AddressRange = std::pair<Address, Address>;
+using CopiedList = Worklist<AddressRange, kCopiedListSegmentSize>;
+using ObjectAndSize = std::pair<HeapObject*, int>;
+using PromotionList = Worklist<ObjectAndSize, kPromotionListSegmentSize>;
+
+// A list of copied ranges. Keeps the last consecutive range local and announces
+// all other ranges to a global work list.
+class CopiedRangesList {
+ public:
+ CopiedRangesList(CopiedList* copied_list, int task_id)
+ : current_start_(nullptr),
+ current_end_(nullptr),
+ copied_list_(copied_list, task_id) {}
+
+ ~CopiedRangesList() {
+ CHECK_NULL(current_start_);
+ CHECK_NULL(current_end_);
+ }
+
+ void Insert(HeapObject* object, int size) {
+ const Address object_address = object->address();
+ if (current_end_ != object_address) {
+ if (current_start_ != nullptr) {
+ copied_list_.Push(AddressRange(current_start_, current_end_));
+ }
+ current_start_ = object_address;
+ current_end_ = current_start_ + size;
+ return;
+ }
+ DCHECK_EQ(current_end_, object_address);
+ current_end_ += size;
+ return;
+ }
+
+ bool Pop(AddressRange* entry) {
+ if (copied_list_.Pop(entry)) {
+ return true;
+ } else if (current_start_ != nullptr) {
+ *entry = AddressRange(current_start_, current_end_);
+ current_start_ = current_end_ = nullptr;
+ return true;
+ }
+ return false;
+ }
+
+ private:
+ Address current_start_;
+ Address current_end_;
+ CopiedList::View copied_list_;
+};
class Scavenger {
public:
- explicit Scavenger(Heap* heap) : heap_(heap) {}
+ Scavenger(Heap* heap, bool is_logging, bool is_incremental_marking,
+ CopiedList* copied_list, PromotionList* promotion_list, int task_id)
+ : heap_(heap),
+ promotion_list_(promotion_list, task_id),
+ copied_list_(copied_list, task_id),
+ local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
+ copied_size_(0),
+ promoted_size_(0),
+ allocator_(heap),
+ is_logging_(is_logging),
+ is_incremental_marking_(is_incremental_marking) {}
+
+ // Scavenges an object |object| referenced from slot |p|. |object| is required
+ // to be in from space.
+ inline void ScavengeObject(HeapObject** p, HeapObject* object);
+
+ // Potentially scavenges an object referenced from |slot_address| if it is
+ // indeed a HeapObject and resides in from space.
+ inline SlotCallbackResult CheckAndScavengeObject(Heap* heap,
+ Address slot_address);
+
+ // Processes remaining work (=objects) after single objects have been
+ // manually scavenged using ScavengeObject or CheckAndScavengeObject.
+ void Process();
+
+ // Finalize the Scavenger. Needs to be called from the main thread.
+ void Finalize();
- // Initializes static visitor dispatch tables.
- static void Initialize();
+ private:
+ static const int kInitialLocalPretenuringFeedbackCapacity = 256;
- // Callback function passed to Heap::Iterate etc. Copies an object if
- // necessary, the object might be promoted to an old space. The caller must
- // ensure the precondition that the object is (a) a heap object and (b) in
- // the heap's from space.
- static inline void ScavengeObject(HeapObject** p, HeapObject* object);
- static inline SlotCallbackResult CheckAndScavengeObject(Heap* heap,
- Address slot_address);
+ inline Heap* heap() { return heap_; }
- // Slow part of {ScavengeObject} above.
- static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
+ // Copies |source| to |target| and sets the forwarding pointer in |source|.
+ V8_INLINE void MigrateObject(Map* map, HeapObject* source, HeapObject* target,
+ int size);
- // Chooses an appropriate static visitor table depending on the current state
- // of the heap (i.e. incremental marking, logging and profiling).
- void SelectScavengingVisitorsTable();
+ V8_INLINE bool SemiSpaceCopyObject(Map* map, HeapObject** slot,
+ HeapObject* object, int object_size);
- Isolate* isolate();
- Heap* heap() { return heap_; }
+ V8_INLINE bool PromoteObject(Map* map, HeapObject** slot, HeapObject* object,
+ int object_size);
- private:
- Heap* heap_;
- VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
+ V8_INLINE void EvacuateObject(HeapObject** slot, Map* map,
+ HeapObject* source);
+
+ // Different cases for object evacuation.
+
+ V8_INLINE void EvacuateObjectDefault(Map* map, HeapObject** slot,
+ HeapObject* object, int object_size);
+
+ V8_INLINE void EvacuateJSFunction(Map* map, HeapObject** slot,
+ JSFunction* object, int object_size);
+
+ inline void EvacuateThinString(Map* map, HeapObject** slot,
+ ThinString* object, int object_size);
+
+ inline void EvacuateShortcutCandidate(Map* map, HeapObject** slot,
+ ConsString* object, int object_size);
+
+ void IterateAndScavengePromotedObject(HeapObject* target, int size);
+
+ void RecordCopiedObject(HeapObject* obj);
+
+ Heap* const heap_;
+ PromotionList::View promotion_list_;
+ CopiedRangesList copied_list_;
+ base::HashMap local_pretenuring_feedback_;
+ size_t copied_size_;
+ size_t promoted_size_;
+ LocalAllocator allocator_;
+ bool is_logging_;
+ bool is_incremental_marking_;
};
// Helper class for turning the scavenger into an object visitor that is also
// filtering out non-HeapObjects and objects which do not reside in new space.
-class RootScavengeVisitor : public RootVisitor {
+class RootScavengeVisitor final : public RootVisitor {
public:
- explicit RootScavengeVisitor(Heap* heap) : heap_(heap) {}
+ RootScavengeVisitor(Heap* heap, Scavenger* scavenger)
+ : heap_(heap), scavenger_(scavenger) {}
- void VisitRootPointer(Root root, Object** p) override;
- void VisitRootPointers(Root root, Object** start, Object** end) override;
+ void VisitRootPointer(Root root, Object** p) final;
+ void VisitRootPointers(Root root, Object** start, Object** end) final;
private:
- inline void ScavengePointer(Object** p);
+ void ScavengePointer(Object** p);
- Heap* heap_;
+ Heap* const heap_;
+ Scavenger* const scavenger_;
};
-
-// Helper class for turning the scavenger into an object visitor that is also
-// filtering out non-HeapObjects and objects which do not reside in new space.
-class StaticScavengeVisitor
- : public StaticNewSpaceVisitor<StaticScavengeVisitor> {
+class ScavengeVisitor final : public NewSpaceVisitor<ScavengeVisitor> {
public:
- static inline void VisitPointer(Heap* heap, HeapObject* object, Object** p);
+ ScavengeVisitor(Heap* heap, Scavenger* scavenger)
+ : heap_(heap), scavenger_(scavenger) {}
+
+ V8_INLINE void VisitPointers(HeapObject* host, Object** start,
+ Object** end) final;
+
+ private:
+ Heap* const heap_;
+ Scavenger* const scavenger_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/sequential-marking-deque.cc b/deps/v8/src/heap/sequential-marking-deque.cc
index a715b3fd85..034ad67dfe 100644
--- a/deps/v8/src/heap/sequential-marking-deque.cc
+++ b/deps/v8/src/heap/sequential-marking-deque.cc
@@ -13,7 +13,8 @@ namespace v8 {
namespace internal {
void SequentialMarkingDeque::SetUp() {
- backing_store_ = new base::VirtualMemory(kMaxSize);
+ backing_store_ =
+ new base::VirtualMemory(kMaxSize, heap_->GetRandomMmapAddr());
backing_store_committed_size_ = 0;
if (backing_store_ == nullptr) {
V8::FatalProcessOutOfMemory("SequentialMarkingDeque::SetUp");
@@ -35,8 +36,7 @@ void SequentialMarkingDeque::StartUsing() {
size_t size = FLAG_force_marking_deque_overflows
? 64 * kPointerSize
: backing_store_committed_size_;
- DCHECK(
- base::bits::IsPowerOfTwo32(static_cast<uint32_t>(size / kPointerSize)));
+ DCHECK(base::bits::IsPowerOfTwo(static_cast<uint32_t>(size / kPointerSize)));
mask_ = static_cast<int>((size / kPointerSize) - 1);
top_ = bottom_ = 0;
overflowed_ = false;
diff --git a/deps/v8/src/heap/sequential-marking-deque.h b/deps/v8/src/heap/sequential-marking-deque.h
index 86098dd730..2ae99c887b 100644
--- a/deps/v8/src/heap/sequential-marking-deque.h
+++ b/deps/v8/src/heap/sequential-marking-deque.h
@@ -72,25 +72,12 @@ class SequentialMarkingDeque {
}
INLINE(HeapObject* Pop()) {
- DCHECK(!IsEmpty());
+ if (IsEmpty()) return nullptr;
top_ = ((top_ - 1) & mask_);
HeapObject* object = array_[top_];
return object;
}
- // Unshift the object into the marking stack if there is room, otherwise mark
- // the deque as overflowed and wait for a rescan of the heap.
- INLINE(bool Unshift(HeapObject* object)) {
- if (IsFull()) {
- SetOverflowed();
- return false;
- } else {
- bottom_ = ((bottom_ - 1) & mask_);
- array_[bottom_] = object;
- return true;
- }
- }
-
// Calls the specified callback on each element of the deque and replaces
// the element with the result of the callback. If the callback returns
// nullptr then the element is removed from the deque.
@@ -100,9 +87,7 @@ class SequentialMarkingDeque {
int i = bottom_;
int new_top = bottom_;
while (i != top_) {
- HeapObject* object = callback(array_[i]);
- if (object) {
- array_[new_top] = object;
+ if (callback(array_[i], &array_[new_top])) {
new_top = (new_top + 1) & mask_;
}
i = (i + 1) & mask_;
diff --git a/deps/v8/src/heap/slot-set.h b/deps/v8/src/heap/slot-set.h
index 7612199c3c..64ba266f21 100644
--- a/deps/v8/src/heap/slot-set.h
+++ b/deps/v8/src/heap/slot-set.h
@@ -36,7 +36,7 @@ class SlotSet : public Malloced {
SlotSet() {
for (int i = 0; i < kBuckets; i++) {
- bucket[i].SetValue(nullptr);
+ StoreBucket(&buckets_[i], nullptr);
}
}
@@ -52,16 +52,28 @@ class SlotSet : public Malloced {
// The slot offset specifies a slot at address page_start_ + slot_offset.
// This method should only be called on the main thread because concurrent
// allocation of the bucket is not thread-safe.
+ //
+ // AccessMode defines whether there can be concurrent access on the buckets
+ // or not.
+ template <AccessMode access_mode = AccessMode::ATOMIC>
void Insert(int slot_offset) {
int bucket_index, cell_index, bit_index;
SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
- base::AtomicValue<uint32_t>* current_bucket = bucket[bucket_index].Value();
- if (current_bucket == nullptr) {
- current_bucket = AllocateBucket();
- bucket[bucket_index].SetValue(current_bucket);
+ Bucket bucket = LoadBucket<access_mode>(&buckets_[bucket_index]);
+ if (bucket == nullptr) {
+ bucket = AllocateBucket();
+ if (!SwapInNewBucket<access_mode>(&buckets_[bucket_index], bucket)) {
+ DeleteArray<uint32_t>(bucket);
+ bucket = LoadBucket<access_mode>(&buckets_[bucket_index]);
+ }
}
- if (!(current_bucket[cell_index].Value() & (1u << bit_index))) {
- current_bucket[cell_index].SetBit(bit_index);
+ // Check that monotonicity is preserved, i.e., once a bucket is set we do
+ // not free it concurrently.
+ DCHECK_NOT_NULL(bucket);
+ DCHECK_EQ(bucket, LoadBucket<access_mode>(&buckets_[bucket_index]));
+ uint32_t mask = 1u << bit_index;
+ if ((LoadCell<access_mode>(&bucket[cell_index]) & mask) == 0) {
+ SetCellBits<access_mode>(&bucket[cell_index], mask);
}
}
@@ -70,25 +82,21 @@ class SlotSet : public Malloced {
bool Contains(int slot_offset) {
int bucket_index, cell_index, bit_index;
SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
- base::AtomicValue<uint32_t>* current_bucket = bucket[bucket_index].Value();
- if (current_bucket == nullptr) {
- return false;
- }
- return (current_bucket[cell_index].Value() & (1u << bit_index)) != 0;
+ Bucket bucket = LoadBucket(&buckets_[bucket_index]);
+ if (bucket == nullptr) return false;
+ return (LoadCell(&bucket[cell_index]) & (1u << bit_index)) != 0;
}
// The slot offset specifies a slot at address page_start_ + slot_offset.
void Remove(int slot_offset) {
int bucket_index, cell_index, bit_index;
SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
- base::AtomicValue<uint32_t>* current_bucket = bucket[bucket_index].Value();
- if (current_bucket != nullptr) {
- uint32_t cell = current_bucket[cell_index].Value();
- if (cell) {
- uint32_t bit_mask = 1u << bit_index;
- if (cell & bit_mask) {
- current_bucket[cell_index].ClearBit(bit_index);
- }
+ Bucket bucket = LoadBucket(&buckets_[bucket_index]);
+ if (bucket != nullptr) {
+ uint32_t cell = LoadCell(&bucket[cell_index]);
+ uint32_t bit_mask = 1u << bit_index;
+ if (cell & bit_mask) {
+ ClearCellBits(&bucket[cell_index], bit_mask);
}
}
}
@@ -104,18 +112,24 @@ class SlotSet : public Malloced {
SlotToIndices(end_offset, &end_bucket, &end_cell, &end_bit);
uint32_t start_mask = (1u << start_bit) - 1;
uint32_t end_mask = ~((1u << end_bit) - 1);
+ Bucket bucket;
if (start_bucket == end_bucket && start_cell == end_cell) {
- ClearCell(start_bucket, start_cell, ~(start_mask | end_mask));
+ bucket = LoadBucket(&buckets_[start_bucket]);
+ if (bucket != nullptr) {
+ ClearCellBits(&bucket[start_cell], ~(start_mask | end_mask));
+ }
return;
}
int current_bucket = start_bucket;
int current_cell = start_cell;
- ClearCell(current_bucket, current_cell, ~start_mask);
+ bucket = LoadBucket(&buckets_[current_bucket]);
+ if (bucket != nullptr) {
+ ClearCellBits(&bucket[current_cell], ~start_mask);
+ }
current_cell++;
- base::AtomicValue<uint32_t>* bucket_ptr = bucket[current_bucket].Value();
if (current_bucket < end_bucket) {
- if (bucket_ptr != nullptr) {
- ClearBucket(bucket_ptr, current_cell, kCellsPerBucket);
+ if (bucket != nullptr) {
+ ClearBucket(bucket, current_cell, kCellsPerBucket);
}
// The rest of the current bucket is cleared.
// Move on to the next bucket.
@@ -131,37 +145,35 @@ class SlotSet : public Malloced {
ReleaseBucket(current_bucket);
} else {
DCHECK(mode == KEEP_EMPTY_BUCKETS);
- bucket_ptr = bucket[current_bucket].Value();
- if (bucket_ptr) {
- ClearBucket(bucket_ptr, 0, kCellsPerBucket);
+ bucket = LoadBucket(&buckets_[current_bucket]);
+ if (bucket != nullptr) {
+ ClearBucket(bucket, 0, kCellsPerBucket);
}
}
current_bucket++;
}
// All buckets between start_bucket and end_bucket are cleared.
- bucket_ptr = bucket[current_bucket].Value();
+ bucket = LoadBucket(&buckets_[current_bucket]);
DCHECK(current_bucket == end_bucket && current_cell <= end_cell);
- if (current_bucket == kBuckets || bucket_ptr == nullptr) {
+ if (current_bucket == kBuckets || bucket == nullptr) {
return;
}
while (current_cell < end_cell) {
- bucket_ptr[current_cell].SetValue(0);
+ StoreCell(&bucket[current_cell], 0);
current_cell++;
}
// All cells between start_cell and end_cell are cleared.
DCHECK(current_bucket == end_bucket && current_cell == end_cell);
- ClearCell(end_bucket, end_cell, ~end_mask);
+ ClearCellBits(&bucket[end_cell], ~end_mask);
}
// The slot offset specifies a slot at address page_start_ + slot_offset.
bool Lookup(int slot_offset) {
int bucket_index, cell_index, bit_index;
SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
- if (bucket[bucket_index].Value() != nullptr) {
- uint32_t cell = bucket[bucket_index].Value()[cell_index].Value();
- return (cell & (1u << bit_index)) != 0;
- }
- return false;
+ Bucket bucket = LoadBucket(&buckets_[bucket_index]);
+ if (bucket == nullptr) return false;
+ return (LoadCell(&bucket[cell_index]) & (1u << bit_index)) != 0;
}
// Iterate over all slots in the set and for each slot invoke the callback.
@@ -178,14 +190,13 @@ class SlotSet : public Malloced {
int Iterate(Callback callback, EmptyBucketMode mode) {
int new_count = 0;
for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
- base::AtomicValue<uint32_t>* current_bucket =
- bucket[bucket_index].Value();
- if (current_bucket != nullptr) {
+ Bucket bucket = LoadBucket(&buckets_[bucket_index]);
+ if (bucket != nullptr) {
int in_bucket_count = 0;
int cell_offset = bucket_index * kBitsPerBucket;
for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) {
- if (current_bucket[i].Value()) {
- uint32_t cell = current_bucket[i].Value();
+ uint32_t cell = LoadCell(&bucket[i]);
+ if (cell) {
uint32_t old_cell = cell;
uint32_t mask = 0;
while (cell) {
@@ -201,15 +212,7 @@ class SlotSet : public Malloced {
}
uint32_t new_cell = old_cell & ~mask;
if (old_cell != new_cell) {
- while (!current_bucket[i].TrySetValue(old_cell, new_cell)) {
- // If TrySetValue fails, the cell must have changed. We just
- // have to read the current value of the cell, & it with the
- // computed value, and retry. We can do this, because this
- // method will only be called on the main thread and filtering
- // threads will only remove slots.
- old_cell = current_bucket[i].Value();
- new_cell = old_cell & ~mask;
- }
+ ClearCellBits(&bucket[i], mask);
}
}
}
@@ -222,16 +225,36 @@ class SlotSet : public Malloced {
return new_count;
}
+ void PreFreeEmptyBuckets() {
+ for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
+ Bucket bucket = LoadBucket(&buckets_[bucket_index]);
+ if (bucket != nullptr) {
+ bool found_non_empty_cell = false;
+ int cell_offset = bucket_index * kBitsPerBucket;
+ for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) {
+ if (LoadCell(&bucket[i])) {
+ found_non_empty_cell = true;
+ break;
+ }
+ }
+ if (!found_non_empty_cell) {
+ PreFreeEmptyBucket(bucket_index);
+ }
+ }
+ }
+ }
+
void FreeToBeFreedBuckets() {
base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
while (!to_be_freed_buckets_.empty()) {
- base::AtomicValue<uint32_t>* top = to_be_freed_buckets_.top();
+ Bucket top = to_be_freed_buckets_.top();
to_be_freed_buckets_.pop();
- DeleteArray<base::AtomicValue<uint32_t>>(top);
+ DeleteArray<uint32_t>(top);
}
}
private:
+ typedef uint32_t* Bucket;
static const int kMaxSlots = (1 << kPageSizeBits) / kPointerSize;
static const int kCellsPerBucket = 32;
static const int kCellsPerBucketLog2 = 5;
@@ -241,51 +264,88 @@ class SlotSet : public Malloced {
static const int kBitsPerBucketLog2 = kCellsPerBucketLog2 + kBitsPerCellLog2;
static const int kBuckets = kMaxSlots / kCellsPerBucket / kBitsPerCell;
- base::AtomicValue<uint32_t>* AllocateBucket() {
- base::AtomicValue<uint32_t>* result =
- NewArray<base::AtomicValue<uint32_t>>(kCellsPerBucket);
+ Bucket AllocateBucket() {
+ Bucket result = NewArray<uint32_t>(kCellsPerBucket);
for (int i = 0; i < kCellsPerBucket; i++) {
- result[i].SetValue(0);
+ result[i] = 0;
}
return result;
}
- void ClearBucket(base::AtomicValue<uint32_t>* bucket, int start_cell,
- int end_cell) {
+ void ClearBucket(Bucket bucket, int start_cell, int end_cell) {
DCHECK_GE(start_cell, 0);
DCHECK_LE(end_cell, kCellsPerBucket);
int current_cell = start_cell;
while (current_cell < kCellsPerBucket) {
- bucket[current_cell].SetValue(0);
+ StoreCell(&bucket[current_cell], 0);
current_cell++;
}
}
void PreFreeEmptyBucket(int bucket_index) {
- base::AtomicValue<uint32_t>* bucket_ptr = bucket[bucket_index].Value();
- if (bucket_ptr != nullptr) {
+ Bucket bucket = LoadBucket(&buckets_[bucket_index]);
+ if (bucket != nullptr) {
base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
- to_be_freed_buckets_.push(bucket_ptr);
- bucket[bucket_index].SetValue(nullptr);
+ to_be_freed_buckets_.push(bucket);
+ StoreBucket(&buckets_[bucket_index], nullptr);
}
}
void ReleaseBucket(int bucket_index) {
- DeleteArray<base::AtomicValue<uint32_t>>(bucket[bucket_index].Value());
- bucket[bucket_index].SetValue(nullptr);
+ Bucket bucket = LoadBucket(&buckets_[bucket_index]);
+ StoreBucket(&buckets_[bucket_index], nullptr);
+ DeleteArray<uint32_t>(bucket);
}
- void ClearCell(int bucket_index, int cell_index, uint32_t mask) {
- if (bucket_index < kBuckets) {
- base::AtomicValue<uint32_t>* cells = bucket[bucket_index].Value();
- if (cells != nullptr) {
- uint32_t cell = cells[cell_index].Value();
- if (cell) cells[cell_index].SetBits(0, mask);
- }
+ template <AccessMode access_mode = AccessMode::ATOMIC>
+ Bucket LoadBucket(Bucket* bucket) {
+ if (access_mode == AccessMode::ATOMIC)
+ return base::AsAtomicWord::Acquire_Load(bucket);
+ return *bucket;
+ }
+
+ template <AccessMode access_mode = AccessMode::ATOMIC>
+ void StoreBucket(Bucket* bucket, Bucket value) {
+ if (access_mode == AccessMode::ATOMIC) {
+ base::AsAtomicWord::Release_Store(bucket, value);
+ } else {
+ *bucket = value;
+ }
+ }
+
+ template <AccessMode access_mode = AccessMode::ATOMIC>
+ bool SwapInNewBucket(Bucket* bucket, Bucket value) {
+ if (access_mode == AccessMode::ATOMIC) {
+ return base::AsAtomicWord::Release_CompareAndSwap(bucket, nullptr,
+ value) == nullptr;
+ } else {
+ DCHECK_NULL(*bucket);
+ *bucket = value;
+ return true;
+ }
+ }
+
+ template <AccessMode access_mode = AccessMode::ATOMIC>
+ uint32_t LoadCell(uint32_t* cell) {
+ if (access_mode == AccessMode::ATOMIC)
+ return base::AsAtomic32::Acquire_Load(cell);
+ return *cell;
+ }
+
+ void StoreCell(uint32_t* cell, uint32_t value) {
+ base::AsAtomic32::Release_Store(cell, value);
+ }
+
+ void ClearCellBits(uint32_t* cell, uint32_t mask) {
+ base::AsAtomic32::SetBits(cell, 0u, mask);
+ }
+
+ template <AccessMode access_mode = AccessMode::ATOMIC>
+ void SetCellBits(uint32_t* cell, uint32_t mask) {
+ if (access_mode == AccessMode::ATOMIC) {
+ base::AsAtomic32::SetBits(cell, mask, mask);
} else {
- // GCC bug 59124: Emits wrong warnings
- // "array subscript is above array bounds"
- UNREACHABLE();
+ *cell = (*cell & ~mask) | mask;
}
}
@@ -300,10 +360,10 @@ class SlotSet : public Malloced {
*bit_index = slot & (kBitsPerCell - 1);
}
- base::AtomicValue<base::AtomicValue<uint32_t>*> bucket[kBuckets];
+ Bucket buckets_[kBuckets];
Address page_start_;
base::Mutex to_be_freed_buckets_mutex_;
- std::stack<base::AtomicValue<uint32_t>*> to_be_freed_buckets_;
+ std::stack<uint32_t*> to_be_freed_buckets_;
};
enum SlotType {
@@ -330,62 +390,65 @@ class TypedSlotSet {
typedef std::pair<SlotType, uint32_t> TypeAndOffset;
struct TypedSlot {
- TypedSlot() {
- type_and_offset_.SetValue(0);
- host_offset_.SetValue(0);
- }
+ TypedSlot() : type_and_offset_(0), host_offset_(0) {}
- TypedSlot(SlotType type, uint32_t host_offset, uint32_t offset) {
- type_and_offset_.SetValue(TypeField::encode(type) |
- OffsetField::encode(offset));
- host_offset_.SetValue(host_offset);
- }
+ TypedSlot(SlotType type, uint32_t host_offset, uint32_t offset)
+ : type_and_offset_(TypeField::encode(type) |
+ OffsetField::encode(offset)),
+ host_offset_(host_offset) {}
bool operator==(const TypedSlot other) {
- return type_and_offset_.Value() == other.type_and_offset_.Value() &&
- host_offset_.Value() == other.host_offset_.Value();
+ return type_and_offset() == other.type_and_offset() &&
+ host_offset() == other.host_offset();
}
bool operator!=(const TypedSlot other) { return !(*this == other); }
- SlotType type() { return TypeField::decode(type_and_offset_.Value()); }
+ SlotType type() const { return TypeField::decode(type_and_offset()); }
- uint32_t offset() { return OffsetField::decode(type_and_offset_.Value()); }
+ uint32_t offset() const { return OffsetField::decode(type_and_offset()); }
- TypeAndOffset GetTypeAndOffset() {
- uint32_t type_and_offset = type_and_offset_.Value();
- return std::make_pair(TypeField::decode(type_and_offset),
- OffsetField::decode(type_and_offset));
+ TypeAndOffset GetTypeAndOffset() const {
+ uint32_t t_and_o = type_and_offset();
+ return std::make_pair(TypeField::decode(t_and_o),
+ OffsetField::decode(t_and_o));
}
- uint32_t host_offset() { return host_offset_.Value(); }
+ uint32_t type_and_offset() const {
+ return base::AsAtomic32::Acquire_Load(&type_and_offset_);
+ }
+
+ uint32_t host_offset() const {
+ return base::AsAtomic32::Acquire_Load(&host_offset_);
+ }
void Set(TypedSlot slot) {
- type_and_offset_.SetValue(slot.type_and_offset_.Value());
- host_offset_.SetValue(slot.host_offset_.Value());
+ base::AsAtomic32::Release_Store(&type_and_offset_,
+ slot.type_and_offset());
+ base::AsAtomic32::Release_Store(&host_offset_, slot.host_offset());
}
void Clear() {
- type_and_offset_.SetValue(TypeField::encode(CLEARED_SLOT) |
- OffsetField::encode(0));
- host_offset_.SetValue(0);
+ base::AsAtomic32::Release_Store(
+ &type_and_offset_,
+ TypeField::encode(CLEARED_SLOT) | OffsetField::encode(0));
+ base::AsAtomic32::Release_Store(&host_offset_, 0);
}
- base::AtomicValue<uint32_t> type_and_offset_;
- base::AtomicValue<uint32_t> host_offset_;
+ uint32_t type_and_offset_;
+ uint32_t host_offset_;
};
static const int kMaxOffset = 1 << 29;
- explicit TypedSlotSet(Address page_start) : page_start_(page_start) {
- chunk_.SetValue(new Chunk(nullptr, kInitialBufferSize));
- }
+ explicit TypedSlotSet(Address page_start)
+ : page_start_(page_start), top_(new Chunk(nullptr, kInitialBufferSize)) {}
~TypedSlotSet() {
- Chunk* chunk = chunk_.Value();
+ Chunk* chunk = load_top();
while (chunk != nullptr) {
- Chunk* next = chunk->next.Value();
+ Chunk* n = chunk->next();
delete chunk;
- chunk = next;
+ chunk = n;
}
FreeToBeFreedChunks();
}
@@ -394,16 +457,16 @@ class TypedSlotSet {
// This method can only be called on the main thread.
void Insert(SlotType type, uint32_t host_offset, uint32_t offset) {
TypedSlot slot(type, host_offset, offset);
- Chunk* top_chunk = chunk_.Value();
+ Chunk* top_chunk = load_top();
if (!top_chunk) {
top_chunk = new Chunk(nullptr, kInitialBufferSize);
- chunk_.SetValue(top_chunk);
+ set_top(top_chunk);
}
if (!top_chunk->AddSlot(slot)) {
Chunk* new_top_chunk =
- new Chunk(top_chunk, NextCapacity(top_chunk->capacity.Value()));
+ new Chunk(top_chunk, NextCapacity(top_chunk->capacity()));
bool added = new_top_chunk->AddSlot(slot);
- chunk_.SetValue(new_top_chunk);
+ set_top(new_top_chunk);
DCHECK(added);
USE(added);
}
@@ -421,18 +484,17 @@ class TypedSlotSet {
template <typename Callback>
int Iterate(Callback callback, IterationMode mode) {
STATIC_ASSERT(CLEARED_SLOT < 8);
- Chunk* chunk = chunk_.Value();
+ Chunk* chunk = load_top();
Chunk* previous = nullptr;
int new_count = 0;
while (chunk != nullptr) {
- TypedSlot* buffer = chunk->buffer.Value();
- int count = chunk->count.Value();
+ TypedSlot* buf = chunk->buffer();
bool empty = true;
- for (int i = 0; i < count; i++) {
+ for (int i = 0; i < chunk->count(); i++) {
// Order is important here. We have to read out the slot type last to
// observe the concurrent removal case consistently.
- Address host_addr = page_start_ + buffer[i].host_offset();
- TypeAndOffset type_and_offset = buffer[i].GetTypeAndOffset();
+ Address host_addr = page_start_ + buf[i].host_offset();
+ TypeAndOffset type_and_offset = buf[i].GetTypeAndOffset();
SlotType type = type_and_offset.first;
if (type != CLEARED_SLOT) {
Address addr = page_start_ + type_and_offset.second;
@@ -440,26 +502,26 @@ class TypedSlotSet {
new_count++;
empty = false;
} else {
- buffer[i].Clear();
+ buf[i].Clear();
}
}
}
- Chunk* next = chunk->next.Value();
+ Chunk* n = chunk->next();
if (mode == PREFREE_EMPTY_CHUNKS && empty) {
// We remove the chunk from the list but let it still point its next
// chunk to allow concurrent iteration.
if (previous) {
- previous->next.SetValue(next);
+ previous->set_next(n);
} else {
- chunk_.SetValue(next);
+ set_top(n);
}
base::LockGuard<base::Mutex> guard(&to_be_freed_chunks_mutex_);
to_be_freed_chunks_.push(chunk);
} else {
previous = chunk;
}
- chunk = next;
+ chunk = n;
}
return new_count;
}
@@ -474,12 +536,11 @@ class TypedSlotSet {
}
void RemoveInvaldSlots(std::map<uint32_t, uint32_t>& invalid_ranges) {
- Chunk* chunk = chunk_.Value();
+ Chunk* chunk = load_top();
while (chunk != nullptr) {
- TypedSlot* buffer = chunk->buffer.Value();
- int count = chunk->count.Value();
- for (int i = 0; i < count; i++) {
- uint32_t host_offset = buffer[i].host_offset();
+ TypedSlot* buf = chunk->buffer();
+ for (int i = 0; i < chunk->count(); i++) {
+ uint32_t host_offset = buf[i].host_offset();
std::map<uint32_t, uint32_t>::iterator upper_bound =
invalid_ranges.upper_bound(host_offset);
if (upper_bound == invalid_ranges.begin()) continue;
@@ -488,10 +549,10 @@ class TypedSlotSet {
upper_bound--;
DCHECK_LE(upper_bound->first, host_offset);
if (upper_bound->second > host_offset) {
- buffer[i].Clear();
+ buf[i].Clear();
}
}
- chunk = chunk->next.Value();
+ chunk = chunk->next();
}
}
@@ -508,31 +569,55 @@ class TypedSlotSet {
struct Chunk : Malloced {
explicit Chunk(Chunk* next_chunk, int chunk_capacity) {
- count.SetValue(0);
- capacity.SetValue(chunk_capacity);
- buffer.SetValue(NewArray<TypedSlot>(chunk_capacity));
- next.SetValue(next_chunk);
+ next_ = next_chunk;
+ buffer_ = NewArray<TypedSlot>(chunk_capacity);
+ capacity_ = chunk_capacity;
+ count_ = 0;
}
+
+ ~Chunk() { DeleteArray(buffer_); }
+
bool AddSlot(TypedSlot slot) {
- int current_count = count.Value();
- if (current_count == capacity.Value()) return false;
- TypedSlot* current_buffer = buffer.Value();
+ int current_count = count();
+ if (current_count == capacity()) return false;
+ TypedSlot* current_buffer = buffer();
// Order is important here. We have to write the slot first before
// increasing the counter to guarantee that a consistent state is
// observed by concurrent threads.
current_buffer[current_count].Set(slot);
- count.SetValue(current_count + 1);
+ set_count(current_count + 1);
return true;
}
- ~Chunk() { DeleteArray(buffer.Value()); }
- base::AtomicValue<Chunk*> next;
- base::AtomicValue<int> count;
- base::AtomicValue<int> capacity;
- base::AtomicValue<TypedSlot*> buffer;
+
+ Chunk* next() const { return base::AsAtomicWord::Acquire_Load(&next_); }
+
+ void set_next(Chunk* n) {
+ return base::AsAtomicWord::Release_Store(&next_, n);
+ }
+
+ TypedSlot* buffer() const { return buffer_; }
+
+ int32_t capacity() const { return capacity_; }
+
+ int32_t count() const { return base::AsAtomic32::Acquire_Load(&count_); }
+
+ void set_count(int32_t new_value) {
+ base::AsAtomic32::Release_Store(&count_, new_value);
+ }
+
+ private:
+ Chunk* next_;
+ TypedSlot* buffer_;
+ int32_t capacity_;
+ int32_t count_;
};
+ Chunk* load_top() { return base::AsAtomicWord::Acquire_Load(&top_); }
+
+ void set_top(Chunk* c) { base::AsAtomicWord::Release_Store(&top_, c); }
+
Address page_start_;
- base::AtomicValue<Chunk*> chunk_;
+ Chunk* top_;
base::Mutex to_be_freed_chunks_mutex_;
std::stack<Chunk*> to_be_freed_chunks_;
};
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index 5b44d1dc10..0fef117b7e 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -7,10 +7,7 @@
#include "src/heap/incremental-marking.h"
#include "src/heap/spaces.h"
-#include "src/isolate.h"
#include "src/msan.h"
-#include "src/profiler/heap-profiler.h"
-#include "src/v8memory.h"
namespace v8 {
namespace internal {
@@ -94,36 +91,6 @@ HeapObject* HeapObjectIterator::FromCurrentPage() {
}
// -----------------------------------------------------------------------------
-// MemoryAllocator
-
-#ifdef ENABLE_HEAP_PROTECTION
-
-void MemoryAllocator::Protect(Address start, size_t size) {
- base::OS::Protect(start, size);
-}
-
-
-void MemoryAllocator::Unprotect(Address start, size_t size,
- Executability executable) {
- base::OS::Unprotect(start, size, executable);
-}
-
-
-void MemoryAllocator::ProtectChunkFromPage(Page* page) {
- int id = GetChunkId(page);
- base::OS::Protect(chunks_[id].address(), chunks_[id].size());
-}
-
-
-void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
- int id = GetChunkId(page);
- base::OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
- chunks_[id].owner()->executable() == EXECUTABLE);
-}
-
-#endif
-
-// -----------------------------------------------------------------------------
// SemiSpace
bool SemiSpace::Contains(HeapObject* o) {
@@ -169,61 +136,6 @@ bool NewSpace::FromSpaceContainsSlow(Address a) {
bool NewSpace::ToSpaceContains(Object* o) { return to_space_.Contains(o); }
bool NewSpace::FromSpaceContains(Object* o) { return from_space_.Contains(o); }
-Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
- SemiSpace* owner) {
- DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
- bool in_to_space = (owner->id() != kFromSpace);
- chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
- : MemoryChunk::IN_FROM_SPACE);
- DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
- : MemoryChunk::IN_TO_SPACE));
- Page* page = static_cast<Page*>(chunk);
- heap->incremental_marking()->SetNewSpacePageFlags(page);
- page->AllocateLocalTracker();
- if (FLAG_minor_mc) {
- page->AllocateYoungGenerationBitmap();
- MarkingState::External(page).ClearLiveness();
- }
- return page;
-}
-
-// --------------------------------------------------------------------------
-// PagedSpace
-
-template <Page::InitializationMode mode>
-Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
- PagedSpace* owner) {
- Page* page = reinterpret_cast<Page*>(chunk);
- DCHECK(page->area_size() <= kAllocatableMemory);
- DCHECK(chunk->owner() == owner);
-
- owner->IncreaseCapacity(page->area_size());
- heap->incremental_marking()->SetOldSpacePageFlags(chunk);
-
- // Make sure that categories are initialized before freeing the area.
- page->InitializeFreeListCategories();
- // In the case we do not free the memory, we effectively account for the whole
- // page as allocated memory that cannot be used for further allocations.
- if (mode == kFreeMemory) {
- owner->Free(page->area_start(), page->area_size());
- }
-
- return page;
-}
-
-Page* Page::ConvertNewToOld(Page* old_page) {
- DCHECK(!old_page->is_anchor());
- DCHECK(old_page->InNewSpace());
- OldSpace* old_space = old_page->heap()->old_space();
- old_page->set_owner(old_space);
- old_page->SetFlags(0, static_cast<uintptr_t>(~0));
- old_space->AccountCommitted(old_page->size());
- Page* new_page = Page::Initialize<kDoNotFreeMemory>(
- old_page->heap(), old_page, NOT_EXECUTABLE, old_space);
- new_page->InsertAfter(old_space->anchor()->prev_page());
- return new_page;
-}
-
void Page::InitializeFreeListCategories() {
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
categories_[i].Initialize(static_cast<FreeListCategoryType>(i));
@@ -334,7 +246,6 @@ MemoryChunk* MemoryChunkIterator::next() {
break;
}
UNREACHABLE();
- return nullptr;
}
Page* FreeListCategory::page() const {
@@ -576,27 +487,6 @@ MUST_USE_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
return AllocateRaw(size_in_bytes, alignment);
}
-LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
- Executability executable, Space* owner) {
- if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
- STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
- FATAL("Code page is too large.");
- }
- heap->incremental_marking()->SetOldSpacePageFlags(chunk);
-
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());
-
- // Initialize the owner field for each contained page (except the first, which
- // is initialized by MemoryChunk::Initialize).
- for (Address addr = chunk->address() + Page::kPageSize + Page::kOwnerOffset;
- addr < chunk->area_end(); addr += Page::kPageSize) {
- // Clear out kPageHeaderTag.
- Memory::Address_at(addr) = 0;
- }
-
- return static_cast<LargePage*>(chunk);
-}
-
size_t LargeObjectSpace::Available() {
return ObjectSizeFor(heap()->memory_allocator()->Available());
}
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index 3e67788828..6f4546c816 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -123,8 +123,10 @@ bool CodeRange::SetUp(size_t requested) {
DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
code_range_ = new base::VirtualMemory(
- requested, Max(kCodeRangeAreaAlignment,
- static_cast<size_t>(base::OS::AllocateAlignment())));
+ requested,
+ Max(kCodeRangeAreaAlignment,
+ static_cast<size_t>(base::OS::AllocateAlignment())),
+ base::OS::GetRandomMmapAddr());
CHECK(code_range_ != NULL);
if (!code_range_->IsReserved()) {
delete code_range_;
@@ -300,7 +302,7 @@ MemoryAllocator::MemoryAllocator(Isolate* isolate)
size_executable_(0),
lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
highest_ever_allocated_(reinterpret_cast<void*>(0)),
- unmapper_(isolate->heap(), this) {}
+ unmapper_(this) {}
bool MemoryAllocator::SetUp(size_t capacity, size_t code_range_size) {
capacity_ = RoundUp(capacity, Page::kPageSize);
@@ -332,46 +334,40 @@ void MemoryAllocator::TearDown() {
code_range_ = nullptr;
}
-class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
+class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public v8::Task {
public:
- explicit UnmapFreeMemoryTask(Isolate* isolate, Unmapper* unmapper)
- : CancelableTask(isolate), unmapper_(unmapper) {}
+ explicit UnmapFreeMemoryTask(Unmapper* unmapper) : unmapper_(unmapper) {}
private:
- void RunInternal() override {
+ // v8::Task overrides.
+ void Run() override {
unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
unmapper_->pending_unmapping_tasks_semaphore_.Signal();
}
- Unmapper* const unmapper_;
+ Unmapper* unmapper_;
DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
};
void MemoryAllocator::Unmapper::FreeQueuedChunks() {
ReconsiderDelayedChunks();
- if (heap_->use_tasks() && FLAG_concurrent_sweeping) {
- if (concurrent_unmapping_tasks_active_ >= kMaxUnmapperTasks) {
- // kMaxUnmapperTasks are already running. Avoid creating any more.
- return;
- }
- UnmapFreeMemoryTask* task = new UnmapFreeMemoryTask(heap_->isolate(), this);
- DCHECK_LT(concurrent_unmapping_tasks_active_, kMaxUnmapperTasks);
- task_ids_[concurrent_unmapping_tasks_active_++] = task->id();
+ if (FLAG_concurrent_sweeping) {
V8::GetCurrentPlatform()->CallOnBackgroundThread(
- task, v8::Platform::kShortRunningTask);
+ new UnmapFreeMemoryTask(this), v8::Platform::kShortRunningTask);
+ concurrent_unmapping_tasks_active_++;
} else {
PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
}
}
-void MemoryAllocator::Unmapper::WaitUntilCompleted() {
- for (int i = 0; i < concurrent_unmapping_tasks_active_; i++) {
- if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
- CancelableTaskManager::kTaskAborted) {
- pending_unmapping_tasks_semaphore_.Wait();
- }
- concurrent_unmapping_tasks_active_ = 0;
+bool MemoryAllocator::Unmapper::WaitUntilCompleted() {
+ bool waited = false;
+ while (concurrent_unmapping_tasks_active_ > 0) {
+ pending_unmapping_tasks_semaphore_.Wait();
+ concurrent_unmapping_tasks_active_--;
+ waited = true;
}
+ return waited;
}
template <MemoryAllocator::Unmapper::FreeMode mode>
@@ -398,7 +394,7 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
}
void MemoryAllocator::Unmapper::TearDown() {
- CHECK_EQ(0, concurrent_unmapping_tasks_active_);
+ WaitUntilCompleted();
ReconsiderDelayedChunks();
CHECK(delayed_regular_chunks_.empty());
PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
@@ -422,7 +418,7 @@ bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) {
// because the memory chunk can be in the queue of a sweeper task.
// Chunks in old generation are unmapped if they are empty.
DCHECK(chunk->InNewSpace() || chunk->SweepingDone());
- return !chunk->InNewSpace() || mc == nullptr || !FLAG_concurrent_sweeping ||
+ return !chunk->InNewSpace() || mc == nullptr ||
!mc->sweeper().sweeping_in_progress();
}
@@ -466,23 +462,29 @@ void MemoryAllocator::FreeMemory(Address base, size_t size,
}
Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
+ void* hint,
base::VirtualMemory* controller) {
- base::VirtualMemory reservation(size, alignment);
+ base::VirtualMemory reservation(size, alignment, hint);
- if (!reservation.IsReserved()) return NULL;
- size_.Increment(reservation.size());
- Address base =
+ if (!reservation.IsReserved()) return nullptr;
+ const Address base =
RoundUp(static_cast<Address>(reservation.address()), alignment);
+ if (base + size != reservation.end()) {
+ const Address unused_start = RoundUp(base + size, GetCommitPageSize());
+ reservation.ReleasePartial(unused_start);
+ }
+ size_.Increment(reservation.size());
controller->TakeControl(&reservation);
return base;
}
Address MemoryAllocator::AllocateAlignedMemory(
size_t reserve_size, size_t commit_size, size_t alignment,
- Executability executable, base::VirtualMemory* controller) {
+ Executability executable, void* hint, base::VirtualMemory* controller) {
DCHECK(commit_size <= reserve_size);
base::VirtualMemory reservation;
- Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
+ Address base =
+ ReserveAlignedMemory(reserve_size, alignment, hint, &reservation);
if (base == NULL) return NULL;
if (executable == EXECUTABLE) {
@@ -518,6 +520,23 @@ void Page::InitializeAsAnchor(Space* space) {
SetFlag(ANCHOR);
}
+Heap* MemoryChunk::synchronized_heap() {
+ return reinterpret_cast<Heap*>(
+ base::Acquire_Load(reinterpret_cast<base::AtomicWord*>(&heap_)));
+}
+
+void MemoryChunk::InitializationMemoryFence() {
+ base::MemoryFence();
+#ifdef THREAD_SANITIZER
+ // Since TSAN does not process memory fences, we use the following annotation
+ // to tell TSAN that there is no data race when emitting a
+ // InitializationMemoryFence. Note that the other thread still needs to
+ // perform MemoryChunk::synchronized_heap().
+ base::Release_Store(reinterpret_cast<base::AtomicWord*>(&heap_),
+ reinterpret_cast<base::AtomicWord>(heap_));
+#endif
+}
+
MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
Executability executable, Space* owner,
@@ -533,10 +552,12 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->flags_ = Flags(NO_FLAGS);
chunk->set_owner(owner);
chunk->InitializeReservedMemory();
- chunk->slot_set_[OLD_TO_NEW].SetValue(nullptr);
- chunk->slot_set_[OLD_TO_OLD].SetValue(nullptr);
- chunk->typed_slot_set_[OLD_TO_NEW].SetValue(nullptr);
- chunk->typed_slot_set_[OLD_TO_OLD].SetValue(nullptr);
+ base::AsAtomicWord::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
+ base::AsAtomicWord::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr);
+ base::AsAtomicWord::Release_Store(&chunk->typed_slot_set_[OLD_TO_NEW],
+ nullptr);
+ base::AsAtomicWord::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
+ nullptr);
chunk->skip_list_ = nullptr;
chunk->progress_bar_ = 0;
chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
@@ -560,10 +581,83 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
if (reservation != nullptr) {
chunk->reservation_.TakeControl(reservation);
}
-
return chunk;
}
+template <Page::InitializationMode mode>
+Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
+ PagedSpace* owner) {
+ Page* page = reinterpret_cast<Page*>(chunk);
+ DCHECK(page->area_size() <= kAllocatableMemory);
+ DCHECK(chunk->owner() == owner);
+
+ owner->IncreaseCapacity(page->area_size());
+ heap->incremental_marking()->SetOldSpacePageFlags(chunk);
+
+ // Make sure that categories are initialized before freeing the area.
+ page->InitializeFreeListCategories();
+ // In the case we do not free the memory, we effectively account for the whole
+ // page as allocated memory that cannot be used for further allocations.
+ if (mode == kFreeMemory) {
+ owner->Free(page->area_start(), page->area_size());
+ }
+ page->InitializationMemoryFence();
+ return page;
+}
+
+Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
+ SemiSpace* owner) {
+ DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
+ bool in_to_space = (owner->id() != kFromSpace);
+ chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
+ : MemoryChunk::IN_FROM_SPACE);
+ DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
+ : MemoryChunk::IN_TO_SPACE));
+ Page* page = static_cast<Page*>(chunk);
+ heap->incremental_marking()->SetNewSpacePageFlags(page);
+ page->AllocateLocalTracker();
+ if (FLAG_minor_mc) {
+ page->AllocateYoungGenerationBitmap();
+ MarkingState::External(page).ClearLiveness();
+ }
+ page->InitializationMemoryFence();
+ return page;
+}
+
+LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
+ Executability executable, Space* owner) {
+ if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
+ STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
+ FATAL("Code page is too large.");
+ }
+ heap->incremental_marking()->SetOldSpacePageFlags(chunk);
+
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());
+
+ // Initialize the owner field for each contained page (except the first, which
+ // is initialized by MemoryChunk::Initialize).
+ for (Address addr = chunk->address() + Page::kPageSize + Page::kOwnerOffset;
+ addr < chunk->area_end(); addr += Page::kPageSize) {
+ // Clear out kPageHeaderTag.
+ Memory::Address_at(addr) = 0;
+ }
+ LargePage* page = static_cast<LargePage*>(chunk);
+ page->InitializationMemoryFence();
+ return page;
+}
+
+Page* Page::ConvertNewToOld(Page* old_page) {
+ DCHECK(!old_page->is_anchor());
+ DCHECK(old_page->InNewSpace());
+ OldSpace* old_space = old_page->heap()->old_space();
+ old_page->set_owner(old_space);
+ old_page->SetFlags(0, static_cast<uintptr_t>(~0));
+ old_space->AccountCommitted(old_page->size());
+ Page* new_page = Page::Initialize<kDoNotFreeMemory>(
+ old_page->heap(), old_page, NOT_EXECUTABLE, old_space);
+ new_page->InsertAfter(old_space->anchor()->prev_page());
+ return new_page;
+}
// Commit MemoryChunk area to the requested size.
bool MemoryChunk::CommitArea(size_t requested) {
@@ -640,22 +734,6 @@ void MemoryChunk::Unlink() {
set_next_chunk(NULL);
}
-void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) {
- DCHECK_GE(bytes_to_shrink, static_cast<size_t>(GetCommitPageSize()));
- DCHECK_EQ(0u, bytes_to_shrink % GetCommitPageSize());
- Address free_start = chunk->area_end_ - bytes_to_shrink;
- // Don't adjust the size of the page. The area is just uncomitted but not
- // released.
- chunk->area_end_ -= bytes_to_shrink;
- UncommitBlock(free_start, bytes_to_shrink);
- if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
- if (chunk->reservation_.IsReserved())
- chunk->reservation_.Guard(chunk->area_end_);
- else
- base::OS::Guard(chunk->area_end_, GetCommitPageSize());
- }
-}
-
MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
size_t commit_area_size,
Executability executable,
@@ -668,6 +746,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
base::VirtualMemory reservation;
Address area_start = nullptr;
Address area_end = nullptr;
+ void* address_hint = heap->GetRandomMmapAddr();
//
// MemoryChunk layout:
@@ -727,7 +806,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
} else {
base = AllocateAlignedMemory(chunk_size, commit_size,
MemoryChunk::kAlignment, executable,
- &reservation);
+ address_hint, &reservation);
if (base == NULL) return NULL;
// Update executable memory size.
size_executable_.Increment(reservation.size());
@@ -748,7 +827,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
GetCommitPageSize());
base =
AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
- executable, &reservation);
+ executable, address_hint, &reservation);
if (base == NULL) return NULL;
@@ -803,6 +882,11 @@ size_t Page::AvailableInFreeList() {
}
size_t Page::ShrinkToHighWaterMark() {
+ // Shrinking only makes sense outside of the CodeRange, where we don't care
+ // about address space fragmentation.
+ base::VirtualMemory* reservation = reserved_memory();
+ if (!reservation->IsReserved()) return 0;
+
// Shrink pages to high water mark. The water mark points either to a filler
// or the area_end.
HeapObject* filler = HeapObject::FromAddress(HighWaterMark());
@@ -832,6 +916,7 @@ size_t Page::ShrinkToHighWaterMark() {
static_cast<size_t>(area_end() - filler->address() - FreeSpace::kSize),
MemoryAllocator::GetCommitPageSize());
if (unused > 0) {
+ DCHECK_EQ(0u, unused % MemoryAllocator::GetCommitPageSize());
if (FLAG_trace_gc_verbose) {
PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
reinterpret_cast<void*>(this),
@@ -842,7 +927,8 @@ size_t Page::ShrinkToHighWaterMark() {
filler->address(),
static_cast<int>(area_end() - filler->address() - unused),
ClearRecordedSlots::kNo);
- heap()->memory_allocator()->ShrinkChunk(this, unused);
+ heap()->memory_allocator()->PartialFreeMemory(
+ this, address() + size() - unused, unused, area_end() - unused);
CHECK(filler->IsFiller());
CHECK_EQ(filler->address() + filler->Size(), area_end());
}
@@ -856,8 +942,9 @@ void Page::CreateBlackArea(Address start, Address end) {
DCHECK_EQ(Page::FromAddress(end - 1), this);
MarkingState::Internal(this).bitmap()->SetRange(AddressToMarkbitIndex(start),
AddressToMarkbitIndex(end));
- MarkingState::Internal(this).IncrementLiveBytes(
- static_cast<int>(end - start));
+ MarkingState::Internal(this)
+ .IncrementLiveBytes<IncrementalMarking::kAtomicity>(
+ static_cast<int>(end - start));
}
void Page::DestroyBlackArea(Address start, Address end) {
@@ -867,29 +954,33 @@ void Page::DestroyBlackArea(Address start, Address end) {
DCHECK_EQ(Page::FromAddress(end - 1), this);
MarkingState::Internal(this).bitmap()->ClearRange(
AddressToMarkbitIndex(start), AddressToMarkbitIndex(end));
- MarkingState::Internal(this).IncrementLiveBytes(
- -static_cast<int>(end - start));
+ MarkingState::Internal(this)
+ .IncrementLiveBytes<IncrementalMarking::kAtomicity>(
+ -static_cast<int>(end - start));
}
-void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk,
- Address start_free) {
- // We do not allow partial shrink for code.
- DCHECK(chunk->executable() == NOT_EXECUTABLE);
-
- intptr_t size;
+void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
+ size_t bytes_to_free,
+ Address new_area_end) {
base::VirtualMemory* reservation = chunk->reserved_memory();
DCHECK(reservation->IsReserved());
- size = static_cast<intptr_t>(reservation->size());
-
- size_t to_free_size = size - (start_free - chunk->address());
-
- DCHECK(size_.Value() >= to_free_size);
- size_.Decrement(to_free_size);
+ chunk->size_ -= bytes_to_free;
+ chunk->area_end_ = new_area_end;
+ if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(chunk->area_end_) %
+ static_cast<uintptr_t>(GetCommitPageSize()));
+ DCHECK_EQ(chunk->address() + chunk->size(),
+ chunk->area_end() + CodePageGuardSize());
+ reservation->Guard(chunk->area_end_);
+ }
+ // On e.g. Windows, a reservation may be larger than a page and releasing
+ // partially starting at |start_free| will also release the potentially
+ // unused part behind the current page.
+ const size_t released_bytes = reservation->ReleasePartial(start_free);
+ DCHECK_GE(size_.Value(), released_bytes);
+ size_.Decrement(released_bytes);
isolate_->counters()->memory_allocated()->Decrement(
- static_cast<int>(to_free_size));
- chunk->set_size(size - to_free_size);
-
- reservation->ReleasePartial(start_free);
+ static_cast<int>(released_bytes));
}
void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
@@ -1077,7 +1168,7 @@ size_t MemoryAllocator::CodePageAreaEndOffset() {
intptr_t MemoryAllocator::GetCommitPageSize() {
if (FLAG_v8_os_page_size != 0) {
- DCHECK(base::bits::IsPowerOfTwo32(FLAG_v8_os_page_size));
+ DCHECK(base::bits::IsPowerOfTwo(FLAG_v8_os_page_size));
return FLAG_v8_os_page_size * KB;
} else {
return base::OS::CommitPageSize();
@@ -1117,6 +1208,10 @@ bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
// -----------------------------------------------------------------------------
// MemoryChunk implementation
+bool MemoryChunk::contains_array_buffers() {
+ return local_tracker() != nullptr && !local_tracker()->IsEmpty();
+}
+
void MemoryChunk::ReleaseAllocatedMemory() {
if (skip_list_ != nullptr) {
delete skip_list_;
@@ -1150,12 +1245,13 @@ template SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
template <RememberedSetType type>
SlotSet* MemoryChunk::AllocateSlotSet() {
SlotSet* slot_set = AllocateAndInitializeSlotSet(size_, address());
- if (!slot_set_[type].TrySetValue(nullptr, slot_set)) {
+ SlotSet* old_slot_set = base::AsAtomicWord::Release_CompareAndSwap(
+ &slot_set_[type], nullptr, slot_set);
+ if (old_slot_set != nullptr) {
delete[] slot_set;
- slot_set = slot_set_[type].Value();
- DCHECK(slot_set);
- return slot_set;
+ slot_set = old_slot_set;
}
+ DCHECK(slot_set);
return slot_set;
}
@@ -1164,10 +1260,10 @@ template void MemoryChunk::ReleaseSlotSet<OLD_TO_OLD>();
template <RememberedSetType type>
void MemoryChunk::ReleaseSlotSet() {
- SlotSet* slot_set = slot_set_[type].Value();
+ SlotSet* slot_set = slot_set_[type];
if (slot_set) {
+ slot_set_[type] = nullptr;
delete[] slot_set;
- slot_set_[type].SetValue(nullptr);
}
}
@@ -1176,14 +1272,15 @@ template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_OLD>();
template <RememberedSetType type>
TypedSlotSet* MemoryChunk::AllocateTypedSlotSet() {
- TypedSlotSet* slot_set = new TypedSlotSet(address());
- if (!typed_slot_set_[type].TrySetValue(nullptr, slot_set)) {
- delete slot_set;
- slot_set = typed_slot_set_[type].Value();
- DCHECK(slot_set);
- return slot_set;
+ TypedSlotSet* typed_slot_set = new TypedSlotSet(address());
+ TypedSlotSet* old_value = base::AsAtomicWord::Release_CompareAndSwap(
+ &typed_slot_set_[type], nullptr, typed_slot_set);
+ if (old_value != nullptr) {
+ delete typed_slot_set;
+ typed_slot_set = old_value;
}
- return slot_set;
+ DCHECK(typed_slot_set);
+ return typed_slot_set;
}
template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_NEW>();
@@ -1191,10 +1288,10 @@ template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_OLD>();
template <RememberedSetType type>
void MemoryChunk::ReleaseTypedSlotSet() {
- TypedSlotSet* typed_slot_set = typed_slot_set_[type].Value();
+ TypedSlotSet* typed_slot_set = typed_slot_set_[type];
if (typed_slot_set) {
+ typed_slot_set_[type] = nullptr;
delete typed_slot_set;
- typed_slot_set_[type].SetValue(nullptr);
}
}
@@ -1315,6 +1412,8 @@ void PagedSpace::RefillFreeList() {
}
void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
+ base::LockGuard<base::Mutex> guard(mutex());
+
DCHECK(identity() == other->identity());
// Unmerged fields:
// area_size_
@@ -1409,9 +1508,7 @@ void PagedSpace::ShrinkImmortalImmovablePages() {
DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
size_t unused = page->ShrinkToHighWaterMark();
accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
- // Do not account for the unused space as uncommitted because the counter
- // is kept in sync with page size which is also not adjusted for those
- // chunks.
+ AccountUncommitted(unused);
}
}
@@ -1502,8 +1599,9 @@ void PagedSpace::EmptyAllocationInfo() {
MarkingState::Internal(page).bitmap()->ClearRange(
page->AddressToMarkbitIndex(current_top),
page->AddressToMarkbitIndex(current_limit));
- MarkingState::Internal(page).IncrementLiveBytes(
- -static_cast<int>(current_limit - current_top));
+ MarkingState::Internal(page)
+ .IncrementLiveBytes<IncrementalMarking::kAtomicity>(
+ -static_cast<int>(current_limit - current_top));
}
}
@@ -1582,14 +1680,16 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
// All the interior pointers should be contained in the heap.
int size = object->Size();
object->IterateBody(map->instance_type(), size, visitor);
- if (ObjectMarking::IsBlack(object, MarkingState::Internal(object))) {
+ if (ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
+ object, MarkingState::Internal(object))) {
black_size += size;
}
CHECK(object->address() + size <= top);
end_of_previous_object = object->address() + size;
}
- CHECK_LE(black_size, MarkingState::Internal(page).live_bytes());
+ CHECK_LE(black_size,
+ MarkingState::Internal(page).live_bytes<AccessMode::ATOMIC>());
}
CHECK(allocation_pointer_found_in_space);
}
@@ -1601,7 +1701,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
bool NewSpace::SetUp(size_t initial_semispace_capacity,
size_t maximum_semispace_capacity) {
DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
- DCHECK(base::bits::IsPowerOfTwo32(
+ DCHECK(base::bits::IsPowerOfTwo(
static_cast<uint32_t>(maximum_semispace_capacity)));
to_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
@@ -1687,7 +1787,6 @@ void NewSpace::Shrink() {
}
bool NewSpace::Rebalance() {
- CHECK(heap()->promotion_queue()->is_empty());
// Order here is important to make use of the page pool.
return to_space_.EnsureCurrentCapacity() &&
from_space_.EnsureCurrentCapacity();
@@ -1783,6 +1882,8 @@ LocalAllocationBuffer& LocalAllocationBuffer::operator=(
void NewSpace::UpdateAllocationInfo() {
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.Reset(to_space_.page_low(), to_space_.page_high());
+ original_top_.SetValue(top());
+ original_limit_.SetValue(limit());
UpdateInlineAllocationLimit(0);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
@@ -1830,10 +1931,6 @@ bool NewSpace::AddFreshPage() {
// Clear remainder of current page.
Address limit = Page::FromAllocationAreaAddress(top)->area_end();
- if (heap()->gc_state() == Heap::SCAVENGE) {
- heap()->promotion_queue()->SetNewLimit(limit);
- }
-
int remaining_in_page = static_cast<int>(limit - top);
heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
UpdateAllocationInfo();
@@ -2220,7 +2317,6 @@ void SemiSpace::set_age_mark(Address mark) {
std::unique_ptr<ObjectIterator> SemiSpace::GetObjectIterator() {
// Use the NewSpace::NewObjectIterator to iterate the ToSpace.
UNREACHABLE();
- return std::unique_ptr<ObjectIterator>();
}
#ifdef DEBUG
@@ -2708,7 +2804,7 @@ HeapObject* FreeList::Allocate(size_t size_in_bytes) {
owner_->EmptyAllocationInfo();
owner_->heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
- Heap::kNoGCFlags, kNoGCCallbackFlags);
+ Heap::kNoGCFlags, kGCCallbackScheduleIdleGarbageCollection);
size_t new_node_size = 0;
FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
@@ -2844,7 +2940,7 @@ size_t FreeListCategory::SumFreeList() {
FreeSpace* cur = top();
while (cur != NULL) {
DCHECK(cur->map() == cur->GetHeap()->root(Heap::kFreeSpaceMapRootIndex));
- sum += cur->nobarrier_size();
+ sum += cur->relaxed_read_size();
cur = cur->next();
}
return sum;
@@ -3146,15 +3242,16 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
reinterpret_cast<Object**>(object->address())[1] = Smi::kZero;
}
- heap()->StartIncrementalMarkingIfAllocationLimitIsReached(Heap::kNoGCFlags,
- kNoGCCallbackFlags);
+ heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
+ Heap::kNoGCFlags, kGCCallbackScheduleIdleGarbageCollection);
AllocationStep(object->address(), object_size);
heap()->CreateFillerObjectAt(object->address(), object_size,
ClearRecordedSlots::kNo);
if (heap()->incremental_marking()->black_allocation()) {
- ObjectMarking::WhiteToBlack(object, MarkingState::Internal(object));
+ ObjectMarking::WhiteToBlack<IncrementalMarking::kAtomicity>(
+ object, MarkingState::Internal(object));
}
return object;
}
@@ -3246,18 +3343,24 @@ void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page,
}
void LargeObjectSpace::FreeUnmarkedObjects() {
- LargePage* previous = NULL;
+ LargePage* previous = nullptr;
LargePage* current = first_page_;
- while (current != NULL) {
+ while (current != nullptr) {
HeapObject* object = current->GetObject();
DCHECK(!ObjectMarking::IsGrey(object, MarkingState::Internal(object)));
if (ObjectMarking::IsBlack(object, MarkingState::Internal(object))) {
Address free_start;
if ((free_start = current->GetAddressToShrink()) != 0) {
- // TODO(hpayer): Perform partial free concurrently.
+ DCHECK(!current->IsFlagSet(Page::IS_EXECUTABLE));
current->ClearOutOfLiveRangeSlots(free_start);
RemoveChunkMapEntries(current, free_start);
- heap()->memory_allocator()->PartialFreeMemory(current, free_start);
+ const size_t bytes_to_free =
+ current->size() - (free_start - current->address());
+ heap()->memory_allocator()->PartialFreeMemory(
+ current, free_start, bytes_to_free,
+ current->area_start() + object->Size());
+ size_ -= bytes_to_free;
+ AccountUncommitted(bytes_to_free);
}
previous = current;
current = current->next_page();
@@ -3265,7 +3368,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
LargePage* page = current;
// Cut the chunk out from the chunk list.
current = current->next_page();
- if (previous == NULL) {
+ if (previous == nullptr) {
first_page_ = current;
} else {
previous->set_next_page(current);
@@ -3326,7 +3429,8 @@ void LargeObjectSpace::Verify() {
CHECK(object->IsAbstractCode() || object->IsSeqString() ||
object->IsExternalString() || object->IsThinString() ||
object->IsFixedArray() || object->IsFixedDoubleArray() ||
- object->IsByteArray() || object->IsFreeSpace());
+ object->IsPropertyArray() || object->IsByteArray() ||
+ object->IsFreeSpace());
// The object itself should look OK.
object->ObjectVerify();
@@ -3349,6 +3453,16 @@ void LargeObjectSpace::Verify() {
CHECK(element_object->map()->IsMap());
}
}
+ } else if (object->IsPropertyArray()) {
+ PropertyArray* array = PropertyArray::cast(object);
+ for (int j = 0; j < array->length(); j++) {
+ Object* property = array->get(j);
+ if (property->IsHeapObject()) {
+ HeapObject* property_object = HeapObject::cast(property);
+ CHECK(heap()->Contains(property_object));
+ CHECK(property_object->map()->IsMap());
+ }
+ }
}
}
}
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 5c37482ac2..a8394dd486 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -14,8 +14,8 @@
#include "src/base/atomicops.h"
#include "src/base/bits.h"
#include "src/base/hashmap.h"
+#include "src/base/iterator.h"
#include "src/base/platform/mutex.h"
-#include "src/cancelable-task.h"
#include "src/flags.h"
#include "src/globals.h"
#include "src/heap/heap.h"
@@ -237,6 +237,13 @@ class FreeListCategory {
// any heap object.
class MemoryChunk {
public:
+ // Use with std data structures.
+ struct Hasher {
+ size_t operator()(Page* const p) const {
+ return reinterpret_cast<size_t>(p) >> kPageSizeBits;
+ }
+ };
+
enum Flag {
NO_FLAGS = 0u,
IS_EXECUTABLE = 1u << 0,
@@ -434,32 +441,42 @@ class MemoryChunk {
inline Heap* heap() const { return heap_; }
+ Heap* synchronized_heap();
+
inline SkipList* skip_list() { return skip_list_; }
inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
- template <RememberedSetType type>
+ template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
SlotSet* slot_set() {
- return slot_set_[type].Value();
+ if (access_mode == AccessMode::ATOMIC)
+ return base::AsAtomicWord::Acquire_Load(&slot_set_[type]);
+ return slot_set_[type];
}
- template <RememberedSetType type>
+ template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
TypedSlotSet* typed_slot_set() {
- return typed_slot_set_[type].Value();
+ if (access_mode == AccessMode::ATOMIC)
+ return base::AsAtomicWord::Acquire_Load(&typed_slot_set_[type]);
+ return typed_slot_set_[type];
}
- inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
-
template <RememberedSetType type>
SlotSet* AllocateSlotSet();
+ // Not safe to be called concurrently.
template <RememberedSetType type>
void ReleaseSlotSet();
template <RememberedSetType type>
TypedSlotSet* AllocateTypedSlotSet();
+ // Not safe to be called concurrently.
template <RememberedSetType type>
void ReleaseTypedSlotSet();
+
void AllocateLocalTracker();
void ReleaseLocalTracker();
+ inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
+ bool contains_array_buffers();
+
void AllocateYoungGenerationBitmap();
void ReleaseYoungGenerationBitmap();
@@ -581,6 +598,10 @@ class MemoryChunk {
base::VirtualMemory* reserved_memory() { return &reservation_; }
+ // Emits a memory barrier. For TSAN builds the other thread needs to perform
+ // MemoryChunk::synchronized_heap() to simulate the barrier.
+ void InitializationMemoryFence();
+
size_t size_;
Flags flags_;
@@ -608,9 +629,8 @@ class MemoryChunk {
// A single slot set for small pages (of size kPageSize) or an array of slot
// set for large pages. In the latter case the number of entries in the array
// is ceil(size() / kPageSize).
- base::AtomicValue<SlotSet*> slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
- base::AtomicValue<TypedSlotSet*>
- typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
+ SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
+ TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
SkipList* skip_list_;
@@ -675,7 +695,7 @@ class MarkingState {
MarkingState(Bitmap* bitmap, intptr_t* live_bytes)
: bitmap_(bitmap), live_bytes_(live_bytes) {}
- template <MarkBit::AccessMode mode = MarkBit::NON_ATOMIC>
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
inline void IncrementLiveBytes(intptr_t by) const;
void SetLiveBytes(intptr_t value) const {
@@ -688,7 +708,9 @@ class MarkingState {
}
Bitmap* bitmap() const { return bitmap_; }
- intptr_t live_bytes() const { return *live_bytes_; }
+
+ template <AccessMode mode = AccessMode::NON_ATOMIC>
+ inline intptr_t live_bytes() const;
private:
Bitmap* bitmap_;
@@ -696,19 +718,29 @@ class MarkingState {
};
template <>
-inline void MarkingState::IncrementLiveBytes<MarkBit::NON_ATOMIC>(
+inline void MarkingState::IncrementLiveBytes<AccessMode::NON_ATOMIC>(
intptr_t by) const {
*live_bytes_ += by;
}
template <>
-inline void MarkingState::IncrementLiveBytes<MarkBit::ATOMIC>(
+inline void MarkingState::IncrementLiveBytes<AccessMode::ATOMIC>(
intptr_t by) const {
reinterpret_cast<base::AtomicNumber<intptr_t>*>(live_bytes_)->Increment(by);
}
+template <>
+inline intptr_t MarkingState::live_bytes<AccessMode::NON_ATOMIC>() const {
+ return *live_bytes_;
+}
+
+template <>
+inline intptr_t MarkingState::live_bytes<AccessMode::ATOMIC>() const {
+ return reinterpret_cast<base::AtomicNumber<intptr_t>*>(live_bytes_)->Value();
+}
+
// -----------------------------------------------------------------------------
-// A page is a memory chunk of a size 1MB. Large object pages may be larger.
+// A page is a memory chunk of a size 512K. Large object pages may be larger.
//
// The only way to get a page pointer is by calling factory methods:
// Page* p = Page::FromAddress(addr); or
@@ -722,8 +754,6 @@ class Page : public MemoryChunk {
static_cast<intptr_t>(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
- static inline Page* ConvertNewToOld(Page* old_page);
-
// Returns the page containing a given address. The address ranges
// from [page_addr .. page_addr + kPageSize[. This only works if the object
// is in fact in a page.
@@ -754,6 +784,8 @@ class Page : public MemoryChunk {
kObjectStartOffset;
}
+ static Page* ConvertNewToOld(Page* old_page);
+
inline static Page* FromAnyPointerAddress(Heap* heap, Address addr);
// Create a Page object that is only used as anchor for the doubly-linked
@@ -835,10 +867,10 @@ class Page : public MemoryChunk {
enum InitializationMode { kFreeMemory, kDoNotFreeMemory };
template <InitializationMode mode = kFreeMemory>
- static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
- Executability executable, PagedSpace* owner);
- static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
- Executability executable, SemiSpace* owner);
+ static Page* Initialize(Heap* heap, MemoryChunk* chunk,
+ Executability executable, PagedSpace* owner);
+ static Page* Initialize(Heap* heap, MemoryChunk* chunk,
+ Executability executable, SemiSpace* owner);
inline void InitializeFreeListCategories();
@@ -870,8 +902,8 @@ class LargePage : public MemoryChunk {
static const int kMaxCodePageSize = 512 * MB;
private:
- static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk,
- Executability executable, Space* owner);
+ static LargePage* Initialize(Heap* heap, MemoryChunk* chunk,
+ Executability executable, Space* owner);
friend class MemoryAllocator;
};
@@ -954,6 +986,8 @@ class Space : public Malloced {
committed_ -= bytes;
}
+ V8_EXPORT_PRIVATE void* GetRandomMmapAddr();
+
#ifdef DEBUG
virtual void Print() = 0;
#endif
@@ -1150,9 +1184,8 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
public:
class UnmapFreeMemoryTask;
- Unmapper(Heap* heap, MemoryAllocator* allocator)
- : heap_(heap),
- allocator_(allocator),
+ explicit Unmapper(MemoryAllocator* allocator)
+ : allocator_(allocator),
pending_unmapping_tasks_semaphore_(0),
concurrent_unmapping_tasks_active_(0) {
chunks_[kRegular].reserve(kReservedQueueingSlots);
@@ -1186,14 +1219,13 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
}
void FreeQueuedChunks();
- void WaitUntilCompleted();
+ bool WaitUntilCompleted();
void TearDown();
bool has_delayed_chunks() { return delayed_regular_chunks_.size() > 0; }
private:
static const int kReservedQueueingSlots = 64;
- static const int kMaxUnmapperTasks = 24;
enum ChunkQueueType {
kRegular, // Pages of kPageSize that do not live in a CodeRange and
@@ -1232,15 +1264,13 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
template <FreeMode mode>
void PerformFreeMemoryOnQueuedChunks();
- Heap* const heap_;
- MemoryAllocator* const allocator_;
base::Mutex mutex_;
+ MemoryAllocator* allocator_;
std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
// Delayed chunks cannot be processed in the current unmapping cycle because
// of dependencies such as an active sweeper.
// See MemoryAllocator::CanFreeMemoryChunk.
std::list<MemoryChunk*> delayed_regular_chunks_;
- CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks];
base::Semaphore pending_unmapping_tasks_semaphore_;
intptr_t concurrent_unmapping_tasks_active_;
@@ -1332,20 +1362,24 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
MemoryChunk* AllocateChunk(size_t reserve_area_size, size_t commit_area_size,
Executability executable, Space* space);
- void ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink);
-
- Address ReserveAlignedMemory(size_t requested, size_t alignment,
+ Address ReserveAlignedMemory(size_t requested, size_t alignment, void* hint,
base::VirtualMemory* controller);
Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
size_t alignment, Executability executable,
- base::VirtualMemory* controller);
+ void* hint, base::VirtualMemory* controller);
bool CommitMemory(Address addr, size_t size, Executability executable);
void FreeMemory(base::VirtualMemory* reservation, Executability executable);
- void PartialFreeMemory(MemoryChunk* chunk, Address start_free);
void FreeMemory(Address addr, size_t size, Executability executable);
+ // Partially release |bytes_to_free| bytes starting at |start_free|. Note that
+ // internally memory is freed from |start_free| to the end of the reservation.
+ // Additional memory beyond the page is not accounted though, so
+ // |bytes_to_free| is computed by the caller.
+ void PartialFreeMemory(MemoryChunk* chunk, Address start_free,
+ size_t bytes_to_free, Address new_area_end);
+
// Commit a contiguous block of memory from the initial chunk. Assumes that
// the address is not NULL, the size is greater than zero, and that the
// block is contained in the initial chunk. Returns true if it succeeded
@@ -1460,7 +1494,7 @@ class V8_EXPORT_PRIVATE ObjectIterator : public Malloced {
template <class PAGE_TYPE>
class PageIteratorImpl
- : public std::iterator<std::forward_iterator_tag, PAGE_TYPE> {
+ : public base::iterator<std::forward_iterator_tag, PAGE_TYPE> {
public:
explicit PageIteratorImpl(PAGE_TYPE* p) : p_(p) {}
PageIteratorImpl(const PageIteratorImpl<PAGE_TYPE>& other) : p_(other.p_) {}
@@ -1540,22 +1574,14 @@ class V8_EXPORT_PRIVATE HeapObjectIterator : public ObjectIterator {
// space.
class AllocationInfo {
public:
- AllocationInfo() : original_top_(nullptr), top_(nullptr), limit_(nullptr) {}
- AllocationInfo(Address top, Address limit)
- : original_top_(top), top_(top), limit_(limit) {}
+ AllocationInfo() : top_(nullptr), limit_(nullptr) {}
+ AllocationInfo(Address top, Address limit) : top_(top), limit_(limit) {}
void Reset(Address top, Address limit) {
- original_top_ = top;
set_top(top);
set_limit(limit);
}
- Address original_top() {
- SLOW_DCHECK(top_ == NULL ||
- (reinterpret_cast<intptr_t>(top_) & kHeapObjectTagMask) == 0);
- return original_top_;
- }
-
INLINE(void set_top(Address top)) {
SLOW_DCHECK(top == NULL ||
(reinterpret_cast<intptr_t>(top) & kHeapObjectTagMask) == 0);
@@ -1589,8 +1615,6 @@ class AllocationInfo {
#endif
private:
- // The original top address when the allocation info was initialized.
- Address original_top_;
// Current allocation top.
Address top_;
// Current allocation limit.
@@ -2341,14 +2365,12 @@ class SemiSpace : public Space {
size_t Size() override {
UNREACHABLE();
- return 0;
}
size_t SizeOfObjects() override { return Size(); }
size_t Available() override {
UNREACHABLE();
- return 0;
}
iterator begin() { return iterator(anchor_.next_page()); }
@@ -2440,10 +2462,10 @@ class NewSpace : public Space {
explicit NewSpace(Heap* heap)
: Space(heap, NEW_SPACE, NOT_EXECUTABLE),
+ top_on_previous_step_(0),
to_space_(heap, kToSpace),
from_space_(heap, kFromSpace),
reservation_(),
- top_on_previous_step_(0),
allocated_histogram_(nullptr),
promoted_histogram_(nullptr) {}
@@ -2577,6 +2599,10 @@ class NewSpace : public Space {
return allocation_info_.limit();
}
+ Address original_top() { return original_top_.Value(); }
+
+ Address original_limit() { return original_limit_.Value(); }
+
// Return the address of the first object in the active semispace.
Address bottom() { return to_space_.space_start(); }
@@ -2707,16 +2733,20 @@ class NewSpace : public Space {
base::Mutex mutex_;
+ // Allocation pointer and limit for normal allocation and allocation during
+ // mark-compact collection.
+ AllocationInfo allocation_info_;
+ Address top_on_previous_step_;
+ // The top and the limit at the time of setting the allocation info.
+ // These values can be accessed by background tasks.
+ base::AtomicValue<Address> original_top_;
+ base::AtomicValue<Address> original_limit_;
+
// The semispaces.
SemiSpace to_space_;
SemiSpace from_space_;
base::VirtualMemory reservation_;
- // Allocation pointer and limit for normal allocation and allocation during
- // mark-compact collection.
- AllocationInfo allocation_info_;
-
- Address top_on_previous_step_;
HistogramInfo* allocated_histogram_;
HistogramInfo* promoted_histogram_;
@@ -2785,7 +2815,6 @@ class CompactionSpaceCollection : public Malloced {
UNREACHABLE();
}
UNREACHABLE();
- return nullptr;
}
private:
@@ -2824,7 +2853,7 @@ class MapSpace : public PagedSpace {
: PagedSpace(heap, id, NOT_EXECUTABLE) {}
int RoundSizeDownToObjectAlignment(int size) override {
- if (base::bits::IsPowerOfTwo32(Map::kSize)) {
+ if (base::bits::IsPowerOfTwo(Map::kSize)) {
return RoundDown(size, Map::kSize);
} else {
return (size / Map::kSize) * Map::kSize;
diff --git a/deps/v8/src/heap/store-buffer.cc b/deps/v8/src/heap/store-buffer.cc
index b803b10d06..31333e6437 100644
--- a/deps/v8/src/heap/store-buffer.cc
+++ b/deps/v8/src/heap/store-buffer.cc
@@ -35,7 +35,8 @@ void StoreBuffer::SetUp() {
// Allocate 3x the buffer size, so that we can start the new store buffer
// aligned to 2x the size. This lets us use a bit test to detect the end of
// the area.
- virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 3);
+ virtual_memory_ =
+ new base::VirtualMemory(kStoreBufferSize * 3, heap_->GetRandomMmapAddr());
uintptr_t start_as_int =
reinterpret_cast<uintptr_t>(virtual_memory_->address());
start_[0] =
diff --git a/deps/v8/src/heap/worklist.h b/deps/v8/src/heap/worklist.h
new file mode 100644
index 0000000000..b6856b4849
--- /dev/null
+++ b/deps/v8/src/heap/worklist.h
@@ -0,0 +1,354 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_WORKLIST_
+#define V8_HEAP_WORKLIST_
+
+#include <cstddef>
+#include <vector>
+
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+#include "src/base/platform/mutex.h"
+#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
+
+namespace v8 {
+namespace internal {
+
+// A concurrent worklist based on segments. Each tasks gets private
+// push and pop segments. Empty pop segments are swapped with their
+// corresponding push segments. Full push segments are published to a global
+// pool of segments and replaced with empty segments.
+//
+// Work stealing is best effort, i.e., there is no way to inform other tasks
+// of the need of items.
+template <typename EntryType, int SEGMENT_SIZE>
+class Worklist {
+ public:
+ class View {
+ public:
+ View(Worklist<EntryType, SEGMENT_SIZE>* worklist, int task_id)
+ : worklist_(worklist), task_id_(task_id) {}
+
+ // Pushes an entry onto the worklist.
+ bool Push(EntryType entry) { return worklist_->Push(task_id_, entry); }
+
+ // Pops an entry from the worklist.
+ bool Pop(EntryType* entry) { return worklist_->Pop(task_id_, entry); }
+
+ // Returns true if the local portion of the worklist is empty.
+ bool IsLocalEmpty() { return worklist_->IsLocalEmpty(task_id_); }
+
+ // Returns true if the worklist is empty. Can only be used from the main
+ // thread without concurrent access.
+ bool IsGlobalEmpty() { return worklist_->IsGlobalEmpty(); }
+
+ bool IsGlobalPoolEmpty() { return worklist_->IsGlobalPoolEmpty(); }
+
+ size_t LocalPushSegmentSize() {
+ return worklist_->LocalPushSegmentSize(task_id_);
+ }
+
+ private:
+ Worklist<EntryType, SEGMENT_SIZE>* worklist_;
+ int task_id_;
+ };
+
+ static const int kMaxNumTasks = 8;
+ static const int kSegmentCapacity = SEGMENT_SIZE;
+
+ Worklist() : Worklist(kMaxNumTasks) {}
+
+ explicit Worklist(int num_tasks) : num_tasks_(num_tasks) {
+ for (int i = 0; i < num_tasks_; i++) {
+ private_push_segment(i) = new Segment();
+ private_pop_segment(i) = new Segment();
+ }
+ }
+
+ ~Worklist() {
+ CHECK(IsGlobalEmpty());
+ for (int i = 0; i < num_tasks_; i++) {
+ DCHECK_NOT_NULL(private_push_segment(i));
+ DCHECK_NOT_NULL(private_pop_segment(i));
+ delete private_push_segment(i);
+ delete private_pop_segment(i);
+ }
+ }
+
+ bool Push(int task_id, EntryType entry) {
+ DCHECK_LT(task_id, num_tasks_);
+ DCHECK_NOT_NULL(private_push_segment(task_id));
+ if (!private_push_segment(task_id)->Push(entry)) {
+ PublishPushSegmentToGlobal(task_id);
+ bool success = private_push_segment(task_id)->Push(entry);
+ USE(success);
+ DCHECK(success);
+ }
+ return true;
+ }
+
+ bool Pop(int task_id, EntryType* entry) {
+ DCHECK_LT(task_id, num_tasks_);
+ DCHECK_NOT_NULL(private_pop_segment(task_id));
+ if (!private_pop_segment(task_id)->Pop(entry)) {
+ if (!private_push_segment(task_id)->IsEmpty()) {
+ Segment* tmp = private_pop_segment(task_id);
+ private_pop_segment(task_id) = private_push_segment(task_id);
+ private_push_segment(task_id) = tmp;
+ } else if (!StealPopSegmentFromGlobal(task_id)) {
+ return false;
+ }
+ bool success = private_pop_segment(task_id)->Pop(entry);
+ USE(success);
+ DCHECK(success);
+ }
+ return true;
+ }
+
+ size_t LocalPushSegmentSize(int task_id) {
+ return private_push_segment(task_id)->Size();
+ }
+
+ bool IsLocalEmpty(int task_id) {
+ return private_pop_segment(task_id)->IsEmpty() &&
+ private_push_segment(task_id)->IsEmpty();
+ }
+
+ bool IsGlobalPoolEmpty() { return global_pool_.IsEmpty(); }
+
+ bool IsGlobalEmpty() {
+ for (int i = 0; i < num_tasks_; i++) {
+ if (!IsLocalEmpty(i)) return false;
+ }
+ return global_pool_.IsEmpty();
+ }
+
+ size_t LocalSize(int task_id) {
+ return private_pop_segment(task_id)->Size() +
+ private_push_segment(task_id)->Size();
+ }
+
+ // Clears all segments. Frees the global segment pool.
+ //
+ // Assumes that no other tasks are running.
+ void Clear() {
+ for (int i = 0; i < num_tasks_; i++) {
+ private_pop_segment(i)->Clear();
+ private_push_segment(i)->Clear();
+ }
+ global_pool_.Clear();
+ }
+
+ // Calls the specified callback on each element of the deques and replaces
+ // the element with the result of the callback.
+ // The signature of the callback is
+ // bool Callback(EntryType old, EntryType* new).
+ // If the callback returns |false| then the element is removed from the
+ // worklist. Otherwise the |new| entry is updated.
+ //
+ // Assumes that no other tasks are running.
+ template <typename Callback>
+ void Update(Callback callback) {
+ for (int i = 0; i < num_tasks_; i++) {
+ private_pop_segment(i)->Update(callback);
+ private_push_segment(i)->Update(callback);
+ }
+ global_pool_.Update(callback);
+ }
+
+ template <typename Callback>
+ void IterateGlobalPool(Callback callback) {
+ global_pool_.Iterate(callback);
+ }
+
+ void FlushToGlobal(int task_id) {
+ PublishPushSegmentToGlobal(task_id);
+ PublishPopSegmentToGlobal(task_id);
+ }
+
+ private:
+ FRIEND_TEST(WorkListTest, SegmentCreate);
+ FRIEND_TEST(WorkListTest, SegmentPush);
+ FRIEND_TEST(WorkListTest, SegmentPushPop);
+ FRIEND_TEST(WorkListTest, SegmentIsEmpty);
+ FRIEND_TEST(WorkListTest, SegmentIsFull);
+ FRIEND_TEST(WorkListTest, SegmentClear);
+ FRIEND_TEST(WorkListTest, SegmentFullPushFails);
+ FRIEND_TEST(WorkListTest, SegmentEmptyPopFails);
+ FRIEND_TEST(WorkListTest, SegmentUpdateFalse);
+ FRIEND_TEST(WorkListTest, SegmentUpdate);
+
+ class Segment {
+ public:
+ static const int kCapacity = kSegmentCapacity;
+
+ Segment() : index_(0) {}
+
+ bool Push(EntryType entry) {
+ if (IsFull()) return false;
+ entries_[index_++] = entry;
+ return true;
+ }
+
+ bool Pop(EntryType* entry) {
+ if (IsEmpty()) return false;
+ *entry = entries_[--index_];
+ return true;
+ }
+
+ size_t Size() const { return index_; }
+ bool IsEmpty() const { return index_ == 0; }
+ bool IsFull() const { return index_ == kCapacity; }
+ void Clear() { index_ = 0; }
+
+ template <typename Callback>
+ void Update(Callback callback) {
+ size_t new_index = 0;
+ for (size_t i = 0; i < index_; i++) {
+ if (callback(entries_[i], &entries_[new_index])) {
+ new_index++;
+ }
+ }
+ index_ = new_index;
+ }
+
+ template <typename Callback>
+ void Iterate(Callback callback) const {
+ for (size_t i = 0; i < index_; i++) {
+ callback(entries_[i]);
+ }
+ }
+
+ Segment* next() const { return next_; }
+ void set_next(Segment* segment) { next_ = segment; }
+
+ private:
+ Segment* next_;
+ size_t index_;
+ EntryType entries_[kCapacity];
+ };
+
+ struct PrivateSegmentHolder {
+ Segment* private_push_segment;
+ Segment* private_pop_segment;
+ char cache_line_padding[64];
+ };
+
+ class GlobalPool {
+ public:
+ GlobalPool() : top_(nullptr) {}
+
+ V8_INLINE void Push(Segment* segment) {
+ base::LockGuard<base::Mutex> guard(&lock_);
+ segment->set_next(top_);
+ top_ = segment;
+ }
+
+ V8_INLINE bool Pop(Segment** segment) {
+ base::LockGuard<base::Mutex> guard(&lock_);
+ if (top_ != nullptr) {
+ *segment = top_;
+ top_ = top_->next();
+ return true;
+ }
+ return false;
+ }
+
+ V8_INLINE bool IsEmpty() {
+ base::LockGuard<base::Mutex> guard(&lock_);
+ return top_ == nullptr;
+ }
+
+ void Clear() {
+ base::LockGuard<base::Mutex> guard(&lock_);
+ Segment* current = top_;
+ while (current != nullptr) {
+ Segment* tmp = current;
+ current = current->next();
+ delete tmp;
+ }
+ top_ = nullptr;
+ }
+
+ // See Worklist::Update.
+ template <typename Callback>
+ void Update(Callback callback) {
+ base::LockGuard<base::Mutex> guard(&lock_);
+ Segment* prev = nullptr;
+ Segment* current = top_;
+ while (current != nullptr) {
+ current->Update(callback);
+ if (current->IsEmpty()) {
+ if (prev == nullptr) {
+ top_ = current->next();
+ } else {
+ prev->set_next(current->next());
+ }
+ Segment* tmp = current;
+ current = current->next();
+ delete tmp;
+ } else {
+ prev = current;
+ current = current->next();
+ }
+ }
+ }
+
+ // See Worklist::Iterate.
+ template <typename Callback>
+ void Iterate(Callback callback) {
+ base::LockGuard<base::Mutex> guard(&lock_);
+ for (Segment* current = top_; current != nullptr;
+ current = current->next()) {
+ current->Iterate(callback);
+ }
+ }
+
+ private:
+ base::Mutex lock_;
+ Segment* top_;
+ };
+
+ V8_INLINE Segment*& private_push_segment(int task_id) {
+ return private_segments_[task_id].private_push_segment;
+ }
+
+ V8_INLINE Segment*& private_pop_segment(int task_id) {
+ return private_segments_[task_id].private_pop_segment;
+ }
+
+ V8_INLINE void PublishPushSegmentToGlobal(int task_id) {
+ if (!private_push_segment(task_id)->IsEmpty()) {
+ global_pool_.Push(private_push_segment(task_id));
+ private_push_segment(task_id) = new Segment();
+ }
+ }
+
+ V8_INLINE void PublishPopSegmentToGlobal(int task_id) {
+ if (!private_pop_segment(task_id)->IsEmpty()) {
+ global_pool_.Push(private_pop_segment(task_id));
+ private_pop_segment(task_id) = new Segment();
+ }
+ }
+
+ V8_INLINE bool StealPopSegmentFromGlobal(int task_id) {
+ Segment* new_segment = nullptr;
+ if (global_pool_.Pop(&new_segment)) {
+ delete private_pop_segment(task_id);
+ private_pop_segment(task_id) = new_segment;
+ return true;
+ }
+ return false;
+ }
+
+ PrivateSegmentHolder private_segments_[kMaxNumTasks];
+ GlobalPool global_pool_;
+ int num_tasks_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_WORKSTEALING_BAG_
diff --git a/deps/v8/src/heap/workstealing-marking-deque.h b/deps/v8/src/heap/workstealing-marking-deque.h
deleted file mode 100644
index 1a3dc865e4..0000000000
--- a/deps/v8/src/heap/workstealing-marking-deque.h
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_HEAP_WORKSTEALING_MARKING_DEQUE_
-#define V8_HEAP_WORKSTEALING_MARKING_DEQUE_
-
-#include <cstddef>
-
-#include "src/base/logging.h"
-#include "src/base/macros.h"
-
-namespace v8 {
-namespace internal {
-
-class HeapObject;
-
-class StackSegment {
- public:
- static const int kNumEntries = 64;
-
- StackSegment(StackSegment* next, StackSegment* prev)
- : next_(next), prev_(prev), index_(0) {}
-
- bool Push(HeapObject* object) {
- if (IsFull()) return false;
-
- objects_[index_++] = object;
- return true;
- }
-
- bool Pop(HeapObject** object) {
- if (IsEmpty()) return false;
-
- *object = objects_[--index_];
- return true;
- }
-
- size_t Size() { return index_; }
- bool IsEmpty() { return index_ == 0; }
- bool IsFull() { return index_ == kNumEntries; }
- void Clear() { index_ = 0; }
-
- StackSegment* next() { return next_; }
- StackSegment* prev() { return prev_; }
- void set_next(StackSegment* next) { next_ = next; }
- void set_prev(StackSegment* prev) { prev_ = prev; }
-
- void Unlink() {
- if (next() != nullptr) next()->set_prev(prev());
- if (prev() != nullptr) prev()->set_next(next());
- }
-
- private:
- StackSegment* next_;
- StackSegment* prev_;
- size_t index_;
- HeapObject* objects_[kNumEntries];
-};
-
-class SegmentedStack {
- public:
- SegmentedStack()
- : front_(new StackSegment(nullptr, nullptr)), back_(front_) {}
-
- ~SegmentedStack() {
- CHECK(IsEmpty());
- delete front_;
- }
-
- bool Push(HeapObject* object) {
- if (!front_->Push(object)) {
- NewFront();
- bool success = front_->Push(object);
- USE(success);
- DCHECK(success);
- }
- return true;
- }
-
- bool Pop(HeapObject** object) {
- if (!front_->Pop(object)) {
- if (IsEmpty()) return false;
- DeleteFront();
- bool success = front_->Pop(object);
- USE(success);
- DCHECK(success);
- }
- return object;
- }
-
- bool IsEmpty() { return front_ == back_ && front_->IsEmpty(); }
-
- private:
- void NewFront() {
- StackSegment* s = new StackSegment(front_, nullptr);
- front_->set_prev(s);
- front_ = s;
- }
-
- void DeleteFront() { delete Unlink(front_); }
-
- StackSegment* Unlink(StackSegment* segment) {
- CHECK_NE(front_, back_);
- if (segment == front_) front_ = front_->next();
- if (segment == back_) back_ = back_->prev();
- segment->Unlink();
- return segment;
- }
-
- StackSegment* front_;
- StackSegment* back_;
-};
-
-// TODO(mlippautz): Implement actual work stealing.
-class WorkStealingMarkingDeque {
- public:
- static const int kMaxNumTasks = 4;
-
- bool Push(int task_id, HeapObject* object) {
- DCHECK_LT(task_id, kMaxNumTasks);
- return private_stacks_[task_id].Push(object);
- }
-
- bool Pop(int task_id, HeapObject** object) {
- DCHECK_LT(task_id, kMaxNumTasks);
- return private_stacks_[task_id].Pop(object);
- }
-
- bool IsLocalEmpty(int task_id) { return private_stacks_[task_id].IsEmpty(); }
-
- private:
- SegmentedStack private_stacks_[kMaxNumTasks];
-};
-
-class LocalWorkStealingMarkingDeque {
- public:
- LocalWorkStealingMarkingDeque(WorkStealingMarkingDeque* deque, int task_id)
- : deque_(deque), task_id_(task_id) {}
-
- // Pushes an object onto the marking deque.
- bool Push(HeapObject* object) { return deque_->Push(task_id_, object); }
-
- // Pops an object onto the marking deque.
- bool Pop(HeapObject** object) { return deque_->Pop(task_id_, object); }
-
- // Returns true if the local portion of the marking deque is empty.
- bool IsEmpty() { return deque_->IsLocalEmpty(task_id_); }
-
- // Blocks if there are no more objects available. Returns execution with
- // |true| once new objects are available and |false| otherwise.
- bool WaitForMoreObjects() {
- // Return false once the local portion of the marking deque is drained.
- // TODO(mlippautz): Implement a barrier that can be used to synchronize
- // work stealing and emptiness.
- return !IsEmpty();
- }
-
- private:
- WorkStealingMarkingDeque* deque_;
- int task_id_;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_HEAP_WORKSTEALING_MARKING_DEQUE_
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index d83e02522a..52243796c2 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -93,7 +93,6 @@ Address RelocInfo::target_address_address() {
Address RelocInfo::constant_pool_entry_address() {
UNREACHABLE();
- return NULL;
}
@@ -296,50 +295,40 @@ void RelocInfo::Visit(Heap* heap) {
Immediate::Immediate(int x) {
- x_ = x;
+ value_.immediate = x;
rmode_ = RelocInfo::NONE32;
}
Immediate::Immediate(Address x, RelocInfo::Mode rmode) {
- x_ = reinterpret_cast<int32_t>(x);
+ value_.immediate = reinterpret_cast<int32_t>(x);
rmode_ = rmode;
}
Immediate::Immediate(const ExternalReference& ext) {
- x_ = reinterpret_cast<int32_t>(ext.address());
+ value_.immediate = reinterpret_cast<int32_t>(ext.address());
rmode_ = RelocInfo::EXTERNAL_REFERENCE;
}
Immediate::Immediate(Label* internal_offset) {
- x_ = reinterpret_cast<int32_t>(internal_offset);
+ value_.immediate = reinterpret_cast<int32_t>(internal_offset);
rmode_ = RelocInfo::INTERNAL_REFERENCE;
}
-
-Immediate::Immediate(Handle<Object> handle) {
- AllowDeferredHandleDereference using_raw_address;
- // Verify all Objects referred by code are NOT in new space.
- Object* obj = *handle;
- if (obj->IsHeapObject()) {
- x_ = reinterpret_cast<intptr_t>(handle.location());
- rmode_ = RelocInfo::EMBEDDED_OBJECT;
- } else {
- // no relocation needed
- x_ = reinterpret_cast<intptr_t>(obj);
- rmode_ = RelocInfo::NONE32;
- }
+Immediate::Immediate(Handle<HeapObject> handle) {
+ value_.immediate = reinterpret_cast<intptr_t>(handle.address());
+ rmode_ = RelocInfo::EMBEDDED_OBJECT;
}
Immediate::Immediate(Smi* value) {
- x_ = reinterpret_cast<intptr_t>(value);
+ value_.immediate = reinterpret_cast<intptr_t>(value);
rmode_ = RelocInfo::NONE32;
}
Immediate::Immediate(Address addr) {
- x_ = reinterpret_cast<int32_t>(addr);
+ value_.immediate = reinterpret_cast<int32_t>(addr);
rmode_ = RelocInfo::NONE32;
}
@@ -355,48 +344,36 @@ void Assembler::emit_q(uint64_t x) {
pc_ += sizeof(uint64_t);
}
-
-void Assembler::emit(Handle<Object> handle) {
- AllowDeferredHandleDereference heap_object_check;
- // Verify all Objects referred by code are NOT in new space.
- Object* obj = *handle;
- if (obj->IsHeapObject()) {
- emit(reinterpret_cast<intptr_t>(handle.location()),
- RelocInfo::EMBEDDED_OBJECT);
- } else {
- // no relocation needed
- emit(reinterpret_cast<intptr_t>(obj));
- }
+void Assembler::emit(Handle<HeapObject> handle) {
+ emit(reinterpret_cast<intptr_t>(handle.address()),
+ RelocInfo::EMBEDDED_OBJECT);
}
-
-void Assembler::emit(uint32_t x, RelocInfo::Mode rmode, TypeFeedbackId id) {
- if (rmode == RelocInfo::CODE_TARGET && !id.IsNone()) {
- RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, id.ToInt());
- } else if (!RelocInfo::IsNone(rmode)
- && rmode != RelocInfo::CODE_AGE_SEQUENCE) {
+void Assembler::emit(uint32_t x, RelocInfo::Mode rmode) {
+ if (!RelocInfo::IsNone(rmode) && rmode != RelocInfo::CODE_AGE_SEQUENCE) {
RecordRelocInfo(rmode);
}
emit(x);
}
-
-void Assembler::emit(Handle<Code> code,
- RelocInfo::Mode rmode,
- TypeFeedbackId id) {
- AllowDeferredHandleDereference embedding_raw_address;
- emit(reinterpret_cast<intptr_t>(code.location()), rmode, id);
+void Assembler::emit(Handle<Code> code, RelocInfo::Mode rmode) {
+ emit(reinterpret_cast<intptr_t>(code.address()), rmode);
}
void Assembler::emit(const Immediate& x) {
if (x.rmode_ == RelocInfo::INTERNAL_REFERENCE) {
- Label* label = reinterpret_cast<Label*>(x.x_);
+ Label* label = reinterpret_cast<Label*>(x.immediate());
emit_code_relative_offset(label);
return;
}
if (!RelocInfo::IsNone(x.rmode_)) RecordRelocInfo(x.rmode_);
- emit(x.x_);
+ if (x.is_heap_object_request()) {
+ RequestHeapObject(x.heap_object_request());
+ emit(0);
+ } else {
+ emit(x.immediate());
+ }
}
@@ -412,13 +389,13 @@ void Assembler::emit_code_relative_offset(Label* label) {
void Assembler::emit_b(Immediate x) {
DCHECK(x.is_int8() || x.is_uint8());
- uint8_t value = static_cast<uint8_t>(x.x_);
+ uint8_t value = static_cast<uint8_t>(x.immediate());
*pc_++ = value;
}
void Assembler::emit_w(const Immediate& x) {
DCHECK(RelocInfo::IsNone(x.rmode_));
- uint16_t value = static_cast<uint16_t>(x.x_);
+ uint16_t value = static_cast<uint16_t>(x.immediate());
reinterpret_cast<uint16_t*>(pc_)[0] = value;
pc_ += sizeof(uint16_t);
}
@@ -545,7 +522,7 @@ Operand::Operand(int32_t disp, RelocInfo::Mode rmode) {
Operand::Operand(Immediate imm) {
// [disp/r]
set_modrm(0, ebp);
- set_dispr(imm.x_, imm.rmode_);
+ set_dispr(imm.immediate(), imm.rmode_);
}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index 5ef07489e9..78c8cd816e 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -49,6 +49,7 @@
#include "src/base/bits.h"
#include "src/base/cpu.h"
+#include "src/code-stubs.h"
#include "src/disassembler.h"
#include "src/macro-assembler.h"
#include "src/v8.h"
@@ -56,6 +57,22 @@
namespace v8 {
namespace internal {
+Immediate Immediate::EmbeddedNumber(double value) {
+ int32_t smi;
+ if (DoubleToSmiInteger(value, &smi)) return Immediate(Smi::FromInt(smi));
+ Immediate result(0, RelocInfo::EMBEDDED_OBJECT);
+ result.is_heap_object_request_ = true;
+ result.value_.heap_object_request = HeapObjectRequest(value);
+ return result;
+}
+
+Immediate Immediate::EmbeddedCode(CodeStub* stub) {
+ Immediate result(0, RelocInfo::CODE_TARGET);
+ result.is_heap_object_request_ = true;
+ result.value_.heap_object_request = HeapObjectRequest(stub);
+ return result;
+}
+
// -----------------------------------------------------------------------------
// Implementation of CpuFeatures
@@ -113,6 +130,7 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
if (cross_compile) return;
if (cpu.has_sse41() && FLAG_enable_sse4_1) supported_ |= 1u << SSE4_1;
+ if (cpu.has_ssse3() && FLAG_enable_ssse3) supported_ |= 1u << SSSE3;
if (cpu.has_sse3() && FLAG_enable_sse3) supported_ |= 1u << SSE3;
if (cpu.has_avx() && FLAG_enable_avx && cpu.has_osxsave() &&
OSHasAVXSupport()) {
@@ -137,13 +155,13 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
void CpuFeatures::PrintTarget() { }
void CpuFeatures::PrintFeatures() {
printf(
- "SSE3=%d SSE4_1=%d AVX=%d FMA3=%d BMI1=%d BMI2=%d LZCNT=%d POPCNT=%d "
- "ATOM=%d\n",
- CpuFeatures::IsSupported(SSE3), CpuFeatures::IsSupported(SSE4_1),
- CpuFeatures::IsSupported(AVX), CpuFeatures::IsSupported(FMA3),
- CpuFeatures::IsSupported(BMI1), CpuFeatures::IsSupported(BMI2),
- CpuFeatures::IsSupported(LZCNT), CpuFeatures::IsSupported(POPCNT),
- CpuFeatures::IsSupported(ATOM));
+ "SSE3=%d SSSE3=%d SSE4_1=%d AVX=%d FMA3=%d BMI1=%d BMI2=%d LZCNT=%d "
+ "POPCNT=%d ATOM=%d\n",
+ CpuFeatures::IsSupported(SSE3), CpuFeatures::IsSupported(SSSE3),
+ CpuFeatures::IsSupported(SSE4_1), CpuFeatures::IsSupported(AVX),
+ CpuFeatures::IsSupported(FMA3), CpuFeatures::IsSupported(BMI1),
+ CpuFeatures::IsSupported(BMI2), CpuFeatures::IsSupported(LZCNT),
+ CpuFeatures::IsSupported(POPCNT), CpuFeatures::IsSupported(ATOM));
}
@@ -298,6 +316,23 @@ Register Operand::reg() const {
return Register::from_code(buf_[0] & 0x07);
}
+void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
+ for (auto& request : heap_object_requests_) {
+ Handle<HeapObject> object;
+ switch (request.kind()) {
+ case HeapObjectRequest::kHeapNumber:
+ object = isolate->factory()->NewHeapNumber(request.heap_number(),
+ IMMUTABLE, TENURED);
+ break;
+ case HeapObjectRequest::kCodeStub:
+ request.code_stub()->set_isolate(isolate);
+ object = request.code_stub()->GetCode();
+ break;
+ }
+ Address pc = buffer_ + request.offset();
+ Memory::Object_Handle_at(pc) = object;
+ }
+}
// -----------------------------------------------------------------------------
// Implementation of Assembler.
@@ -320,11 +355,13 @@ Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
}
-
-void Assembler::GetCode(CodeDesc* desc) {
+void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
// Finalize code (at this point overflow() may be true, but the gap ensures
// that we are still not overlapping instructions and relocation info).
DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
+
+ AllocateAndInstallRequestedHeapObjects(isolate);
+
// Set up code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
@@ -338,7 +375,7 @@ void Assembler::GetCode(CodeDesc* desc) {
void Assembler::Align(int m) {
- DCHECK(base::bits::IsPowerOfTwo32(m));
+ DCHECK(base::bits::IsPowerOfTwo(m));
int mask = m - 1;
int addr = pc_offset();
Nop((m - (addr & mask)) & mask);
@@ -459,7 +496,7 @@ void Assembler::push(const Immediate& x) {
EnsureSpace ensure_space(this);
if (x.is_int8()) {
EMIT(0x6a);
- EMIT(x.x_);
+ EMIT(x.immediate());
} else {
EMIT(0x68);
emit(x);
@@ -527,7 +564,7 @@ void Assembler::mov_b(const Operand& dst, const Immediate& src) {
EnsureSpace ensure_space(this);
EMIT(0xC6);
emit_operand(eax, dst);
- EMIT(static_cast<int8_t>(src.x_));
+ EMIT(static_cast<int8_t>(src.immediate()));
}
@@ -560,8 +597,8 @@ void Assembler::mov_w(const Operand& dst, const Immediate& src) {
EMIT(0x66);
EMIT(0xC7);
emit_operand(eax, dst);
- EMIT(static_cast<int8_t>(src.x_ & 0xff));
- EMIT(static_cast<int8_t>(src.x_ >> 8));
+ EMIT(static_cast<int8_t>(src.immediate() & 0xff));
+ EMIT(static_cast<int8_t>(src.immediate() >> 8));
}
@@ -578,8 +615,7 @@ void Assembler::mov(Register dst, const Immediate& x) {
emit(x);
}
-
-void Assembler::mov(Register dst, Handle<Object> handle) {
+void Assembler::mov(Register dst, Handle<HeapObject> handle) {
EnsureSpace ensure_space(this);
EMIT(0xB8 | dst.code());
emit(handle);
@@ -607,8 +643,7 @@ void Assembler::mov(const Operand& dst, const Immediate& x) {
emit(x);
}
-
-void Assembler::mov(const Operand& dst, Handle<Object> handle) {
+void Assembler::mov(const Operand& dst, Handle<HeapObject> handle) {
EnsureSpace ensure_space(this);
EMIT(0xC7);
emit_operand(eax, dst);
@@ -870,8 +905,7 @@ void Assembler::cmp(Register reg, int32_t imm32) {
emit_arith(7, Operand(reg), Immediate(imm32));
}
-
-void Assembler::cmp(Register reg, Handle<Object> handle) {
+void Assembler::cmp(Register reg, Handle<HeapObject> handle) {
EnsureSpace ensure_space(this);
emit_arith(7, Operand(reg), Immediate(handle));
}
@@ -894,8 +928,7 @@ void Assembler::cmp(const Operand& op, const Immediate& imm) {
emit_arith(7, op, imm);
}
-
-void Assembler::cmp(const Operand& op, Handle<Object> handle) {
+void Assembler::cmp(const Operand& op, Handle<HeapObject> handle) {
EnsureSpace ensure_space(this);
emit_arith(7, op, Immediate(handle));
}
@@ -1304,7 +1337,7 @@ void Assembler::test_b(Register reg, Immediate imm8) {
EMIT(0xA8);
emit_b(imm8);
} else if (reg.is_byte_register()) {
- emit_arith_b(0xF6, 0xC0, reg, static_cast<uint8_t>(imm8.x_));
+ emit_arith_b(0xF6, 0xC0, reg, static_cast<uint8_t>(imm8.immediate()));
} else {
EMIT(0x66);
EMIT(0xF7);
@@ -1580,17 +1613,19 @@ int Assembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
return 1 /* EMIT */ + sizeof(uint32_t) /* emit */;
}
-
-void Assembler::call(Handle<Code> code,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id) {
+void Assembler::call(Handle<Code> code, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
DCHECK(RelocInfo::IsCodeTarget(rmode)
|| rmode == RelocInfo::CODE_AGE_SEQUENCE);
EMIT(0xE8);
- emit(code, rmode, ast_id);
+ emit(code, rmode);
}
+void Assembler::call(CodeStub* stub) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xE8);
+ emit(Immediate::EmbeddedCode(stub));
+}
void Assembler::jmp(Label* L, Label::Distance distance) {
EnsureSpace ensure_space(this);
@@ -2672,8 +2707,26 @@ void Assembler::psrlq(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
+void Assembler::pshufb(XMMRegister dst, const Operand& src) {
+ DCHECK(IsEnabled(SSSE3));
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x38);
+ EMIT(0x00);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::pshuflw(XMMRegister dst, const Operand& src, uint8_t shuffle) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF2);
+ EMIT(0x0F);
+ EMIT(0x70);
+ emit_sse_operand(dst, src);
+ EMIT(shuffle);
+}
-void Assembler::pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
+void Assembler::pshufd(XMMRegister dst, const Operand& src, uint8_t shuffle) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2682,6 +2735,27 @@ void Assembler::pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
EMIT(shuffle);
}
+void Assembler::pextrb(const Operand& dst, XMMRegister src, int8_t offset) {
+ DCHECK(IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x3A);
+ EMIT(0x14);
+ emit_sse_operand(src, dst);
+ EMIT(offset);
+}
+
+void Assembler::pextrw(const Operand& dst, XMMRegister src, int8_t offset) {
+ DCHECK(IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x3A);
+ EMIT(0x15);
+ emit_sse_operand(src, dst);
+ EMIT(offset);
+}
void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) {
DCHECK(IsEnabled(SSE4_1));
@@ -2694,6 +2768,17 @@ void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) {
EMIT(offset);
}
+void Assembler::pinsrb(XMMRegister dst, const Operand& src, int8_t offset) {
+ DCHECK(IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x3A);
+ EMIT(0x20);
+ emit_sse_operand(dst, src);
+ EMIT(offset);
+}
+
void Assembler::pinsrw(XMMRegister dst, const Operand& src, int8_t offset) {
DCHECK(is_uint8(offset));
EnsureSpace ensure_space(this);
@@ -2873,6 +2958,49 @@ void Assembler::vpsrad(XMMRegister dst, XMMRegister src, int8_t imm8) {
EMIT(imm8);
}
+void Assembler::vpshuflw(XMMRegister dst, const Operand& src, uint8_t shuffle) {
+ vinstr(0x70, dst, xmm0, src, kF2, k0F, kWIG);
+ EMIT(shuffle);
+}
+
+void Assembler::vpshufd(XMMRegister dst, const Operand& src, uint8_t shuffle) {
+ vinstr(0x70, dst, xmm0, src, k66, k0F, kWIG);
+ EMIT(shuffle);
+}
+
+void Assembler::vpextrb(const Operand& dst, XMMRegister src, int8_t offset) {
+ vinstr(0x14, src, xmm0, dst, k66, k0F3A, kWIG);
+ EMIT(offset);
+}
+
+void Assembler::vpextrw(const Operand& dst, XMMRegister src, int8_t offset) {
+ vinstr(0x15, src, xmm0, dst, k66, k0F3A, kWIG);
+ EMIT(offset);
+}
+
+void Assembler::vpextrd(const Operand& dst, XMMRegister src, int8_t offset) {
+ vinstr(0x16, src, xmm0, dst, k66, k0F3A, kWIG);
+ EMIT(offset);
+}
+
+void Assembler::vpinsrb(XMMRegister dst, XMMRegister src1, const Operand& src2,
+ int8_t offset) {
+ vinstr(0x20, dst, src1, src2, k66, k0F3A, kWIG);
+ EMIT(offset);
+}
+
+void Assembler::vpinsrw(XMMRegister dst, XMMRegister src1, const Operand& src2,
+ int8_t offset) {
+ vinstr(0xC4, dst, src1, src2, k66, k0F, kWIG);
+ EMIT(offset);
+}
+
+void Assembler::vpinsrd(XMMRegister dst, XMMRegister src1, const Operand& src2,
+ int8_t offset) {
+ vinstr(0x22, dst, src1, src2, k66, k0F3A, kWIG);
+ EMIT(offset);
+}
+
void Assembler::bmi1(byte op, Register reg, Register vreg, const Operand& rm) {
DCHECK(IsEnabled(BMI1));
EnsureSpace ensure_space(this);
@@ -3015,9 +3143,7 @@ void Assembler::GrowBuffer() {
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
- if (desc.buffer_size > kMaximalBufferSize ||
- static_cast<size_t>(desc.buffer_size) >
- isolate_data().max_old_generation_size_) {
+ if (desc.buffer_size > kMaximalBufferSize) {
V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
}
@@ -3074,7 +3200,7 @@ void Assembler::emit_arith(int sel, Operand dst, const Immediate& x) {
if (x.is_int8()) {
EMIT(0x83); // using a sign-extended 8-bit immediate.
emit_operand(ireg, dst);
- EMIT(x.x_ & 0xFF);
+ EMIT(x.immediate() & 0xFF);
} else if (dst.is_reg(eax)) {
EMIT((sel << 3) | 0x05); // short form if the destination is eax.
emit(x);
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index cbb8ba2761..27c53ae951 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -262,7 +262,6 @@ enum RoundingMode {
kRoundToZero = 0x3
};
-
// -----------------------------------------------------------------------------
// Machine instruction Immediates
@@ -270,33 +269,61 @@ class Immediate BASE_EMBEDDED {
public:
inline explicit Immediate(int x);
inline explicit Immediate(const ExternalReference& ext);
- inline explicit Immediate(Handle<Object> handle);
+ inline explicit Immediate(Handle<HeapObject> handle);
inline explicit Immediate(Smi* value);
inline explicit Immediate(Address addr);
inline explicit Immediate(Address x, RelocInfo::Mode rmode);
+ static Immediate EmbeddedNumber(double number); // Smi or HeapNumber.
+ static Immediate EmbeddedCode(CodeStub* code);
+
static Immediate CodeRelativeOffset(Label* label) {
return Immediate(label);
}
- bool is_zero() const { return x_ == 0 && RelocInfo::IsNone(rmode_); }
+ bool is_heap_object_request() const {
+ DCHECK_IMPLIES(is_heap_object_request_,
+ rmode_ == RelocInfo::EMBEDDED_OBJECT ||
+ rmode_ == RelocInfo::CODE_TARGET);
+ return is_heap_object_request_;
+ }
+
+ HeapObjectRequest heap_object_request() const {
+ DCHECK(is_heap_object_request());
+ return value_.heap_object_request;
+ }
+
+ int immediate() const {
+ DCHECK(!is_heap_object_request());
+ return value_.immediate;
+ }
+
+ bool is_zero() const { return RelocInfo::IsNone(rmode_) && immediate() == 0; }
bool is_int8() const {
- return -128 <= x_ && x_ < 128 && RelocInfo::IsNone(rmode_);
+ return RelocInfo::IsNone(rmode_) && i::is_int8(immediate());
}
bool is_uint8() const {
- return v8::internal::is_uint8(x_) && RelocInfo::IsNone(rmode_);
+ return RelocInfo::IsNone(rmode_) && i::is_uint8(immediate());
}
bool is_int16() const {
- return -32768 <= x_ && x_ < 32768 && RelocInfo::IsNone(rmode_);
+ return RelocInfo::IsNone(rmode_) && i::is_int16(immediate());
}
+
bool is_uint16() const {
- return v8::internal::is_uint16(x_) && RelocInfo::IsNone(rmode_);
+ return RelocInfo::IsNone(rmode_) && i::is_uint16(immediate());
}
+ RelocInfo::Mode rmode() const { return rmode_; }
+
private:
inline explicit Immediate(Label* value);
- int x_;
+ union Value {
+ Value() {}
+ HeapObjectRequest heap_object_request;
+ int immediate;
+ } value_;
+ bool is_heap_object_request_ = false;
RelocInfo::Mode rmode_;
friend class Operand;
@@ -369,13 +396,11 @@ class Operand BASE_EMBEDDED {
}
static Operand ForCell(Handle<Cell> cell) {
- AllowDeferredHandleDereference embedding_raw_address;
- return Operand(reinterpret_cast<int32_t>(cell.location()),
- RelocInfo::CELL);
+ return Operand(reinterpret_cast<int32_t>(cell.address()), RelocInfo::CELL);
}
static Operand ForRegisterPlusImmediate(Register base, Immediate imm) {
- return Operand(base, imm.x_, imm.rmode_);
+ return Operand(base, imm.value_.immediate, imm.rmode_);
}
// Returns true if this Operand is a wrapper for the specified register.
@@ -404,7 +429,6 @@ class Operand BASE_EMBEDDED {
RelocInfo::Mode rmode_;
friend class Assembler;
- friend class MacroAssembler;
};
@@ -488,12 +512,12 @@ class Assembler : public AssemblerBase {
Assembler(Isolate* isolate, void* buffer, int buffer_size)
: Assembler(IsolateData(isolate), buffer, buffer_size) {}
Assembler(IsolateData isolate_data, void* buffer, int buffer_size);
- virtual ~Assembler() { }
+ virtual ~Assembler() {}
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
- void GetCode(CodeDesc* desc);
+ void GetCode(Isolate* isolate, CodeDesc* desc);
// Read/Modify the code target in the branch/call instruction at pc.
// The isolate argument is unused (and may be nullptr) when skipping flushing.
@@ -616,11 +640,11 @@ class Assembler : public AssemblerBase {
void mov(Register dst, int32_t imm32);
void mov(Register dst, const Immediate& x);
- void mov(Register dst, Handle<Object> handle);
+ void mov(Register dst, Handle<HeapObject> handle);
void mov(Register dst, const Operand& src);
void mov(Register dst, Register src);
void mov(const Operand& dst, const Immediate& x);
- void mov(const Operand& dst, Handle<Object> handle);
+ void mov(const Operand& dst, Handle<HeapObject> handle);
void mov(const Operand& dst, Register src);
void movsx_b(Register dst, Register src) { movsx_b(dst, Operand(src)); }
@@ -693,13 +717,13 @@ class Assembler : public AssemblerBase {
void cmpw(Register dst, Register src) { cmpw(Operand(dst), src); }
void cmpw(const Operand& dst, Register src);
void cmp(Register reg, int32_t imm32);
- void cmp(Register reg, Handle<Object> handle);
+ void cmp(Register reg, Handle<HeapObject> handle);
void cmp(Register reg0, Register reg1) { cmp(reg0, Operand(reg1)); }
void cmp(Register reg, const Operand& op);
void cmp(Register reg, const Immediate& imm) { cmp(Operand(reg), imm); }
void cmp(const Operand& op, Register reg);
void cmp(const Operand& op, const Immediate& imm);
- void cmp(const Operand& op, Handle<Object> handle);
+ void cmp(const Operand& op, Handle<HeapObject> handle);
void dec_b(Register dst);
void dec_b(const Operand& dst);
@@ -841,9 +865,8 @@ class Assembler : public AssemblerBase {
void call(Register reg) { call(Operand(reg)); }
void call(const Operand& adr);
int CallSize(Handle<Code> code, RelocInfo::Mode mode);
- void call(Handle<Code> code,
- RelocInfo::Mode rmode,
- TypeFeedbackId id = TypeFeedbackId::None());
+ void call(Handle<Code> code, RelocInfo::Mode rmode);
+ void call(CodeStub* stub);
// Jumps
// unconditional jump to L
@@ -1105,11 +1128,37 @@ class Assembler : public AssemblerBase {
void psllq(XMMRegister dst, XMMRegister src);
void psrlq(XMMRegister reg, int8_t shift);
void psrlq(XMMRegister dst, XMMRegister src);
- void pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle);
+
+ // pshufb is SSSE3 instruction
+ void pshufb(XMMRegister dst, XMMRegister src) { pshufb(dst, Operand(src)); }
+ void pshufb(XMMRegister dst, const Operand& src);
+ void pshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
+ pshuflw(dst, Operand(src), shuffle);
+ }
+ void pshuflw(XMMRegister dst, const Operand& src, uint8_t shuffle);
+ void pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
+ pshufd(dst, Operand(src), shuffle);
+ }
+ void pshufd(XMMRegister dst, const Operand& src, uint8_t shuffle);
+
+ void pextrb(Register dst, XMMRegister src, int8_t offset) {
+ pextrb(Operand(dst), src, offset);
+ }
+ void pextrb(const Operand& dst, XMMRegister src, int8_t offset);
+ // Use SSE4_1 encoding for pextrw reg, xmm, imm8 for consistency
+ void pextrw(Register dst, XMMRegister src, int8_t offset) {
+ pextrw(Operand(dst), src, offset);
+ }
+ void pextrw(const Operand& dst, XMMRegister src, int8_t offset);
void pextrd(Register dst, XMMRegister src, int8_t offset) {
pextrd(Operand(dst), src, offset);
}
void pextrd(const Operand& dst, XMMRegister src, int8_t offset);
+
+ void pinsrb(XMMRegister dst, Register src, int8_t offset) {
+ pinsrb(dst, Operand(src), offset);
+ }
+ void pinsrb(XMMRegister dst, const Operand& src, int8_t offset);
void pinsrw(XMMRegister dst, Register src, int8_t offset) {
pinsrw(dst, Operand(src), offset);
}
@@ -1362,6 +1411,53 @@ class Assembler : public AssemblerBase {
void vpsraw(XMMRegister dst, XMMRegister src, int8_t imm8);
void vpsrad(XMMRegister dst, XMMRegister src, int8_t imm8);
+ void vpshufb(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+ vpshufb(dst, src1, Operand(src2));
+ }
+ void vpshufb(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ vinstr(0x00, dst, src1, src2, k66, k0F38, kW0);
+ }
+ void vpshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
+ vpshuflw(dst, Operand(src), shuffle);
+ }
+ void vpshuflw(XMMRegister dst, const Operand& src, uint8_t shuffle);
+ void vpshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
+ vpshufd(dst, Operand(src), shuffle);
+ }
+ void vpshufd(XMMRegister dst, const Operand& src, uint8_t shuffle);
+
+ void vpextrb(Register dst, XMMRegister src, int8_t offset) {
+ vpextrb(Operand(dst), src, offset);
+ }
+ void vpextrb(const Operand& dst, XMMRegister src, int8_t offset);
+ void vpextrw(Register dst, XMMRegister src, int8_t offset) {
+ vpextrw(Operand(dst), src, offset);
+ }
+ void vpextrw(const Operand& dst, XMMRegister src, int8_t offset);
+ void vpextrd(Register dst, XMMRegister src, int8_t offset) {
+ vpextrd(Operand(dst), src, offset);
+ }
+ void vpextrd(const Operand& dst, XMMRegister src, int8_t offset);
+
+ void vpinsrb(XMMRegister dst, XMMRegister src1, Register src2,
+ int8_t offset) {
+ vpinsrb(dst, src1, Operand(src2), offset);
+ }
+ void vpinsrb(XMMRegister dst, XMMRegister src1, const Operand& src2,
+ int8_t offset);
+ void vpinsrw(XMMRegister dst, XMMRegister src1, Register src2,
+ int8_t offset) {
+ vpinsrw(dst, src1, Operand(src2), offset);
+ }
+ void vpinsrw(XMMRegister dst, XMMRegister src1, const Operand& src2,
+ int8_t offset);
+ void vpinsrd(XMMRegister dst, XMMRegister src1, Register src2,
+ int8_t offset) {
+ vpinsrd(dst, src1, Operand(src2), offset);
+ }
+ void vpinsrd(XMMRegister dst, XMMRegister src1, const Operand& src2,
+ int8_t offset);
+
void vcvtdq2ps(XMMRegister dst, XMMRegister src) {
vcvtdq2ps(dst, Operand(src));
}
@@ -1375,6 +1471,15 @@ class Assembler : public AssemblerBase {
vinstr(0x5B, dst, xmm0, src, kF3, k0F, kWIG);
}
+ void vmovd(XMMRegister dst, Register src) { vmovd(dst, Operand(src)); }
+ void vmovd(XMMRegister dst, const Operand& src) {
+ vinstr(0x6E, dst, xmm0, src, k66, k0F, kWIG);
+ }
+ void vmovd(Register dst, XMMRegister src) { movd(Operand(dst), src); }
+ void vmovd(const Operand& dst, XMMRegister src) {
+ vinstr(0x7E, src, xmm0, dst, k66, k0F, kWIG);
+ }
+
// BMI instruction
void andn(Register dst, Register src1, Register src2) {
andn(dst, src1, Operand(src2));
@@ -1635,13 +1740,9 @@ class Assembler : public AssemblerBase {
// code emission
void GrowBuffer();
inline void emit(uint32_t x);
- inline void emit(Handle<Object> handle);
- inline void emit(uint32_t x,
- RelocInfo::Mode rmode,
- TypeFeedbackId id = TypeFeedbackId::None());
- inline void emit(Handle<Code> code,
- RelocInfo::Mode rmode,
- TypeFeedbackId id = TypeFeedbackId::None());
+ inline void emit(Handle<HeapObject> handle);
+ inline void emit(uint32_t x, RelocInfo::Mode rmode);
+ inline void emit(Handle<Code> code, RelocInfo::Mode rmode);
inline void emit(const Immediate& x);
inline void emit_b(Immediate x);
inline void emit_w(const Immediate& x);
@@ -1709,6 +1810,19 @@ class Assembler : public AssemblerBase {
// code generation
RelocInfoWriter reloc_info_writer;
+
+ // The following functions help with avoiding allocations of embedded heap
+ // objects during the code assembly phase. {RequestHeapObject} records the
+ // need for a future heap number allocation or code stub generation. After
+ // code assembly, {AllocateAndInstallRequestedHeapObjects} will allocate these
+ // objects and place them where they are expected (determined by the pc offset
+ // associated with each request). That is, for each request, it will patch the
+ // dummy heap object handle that we emitted during code assembly with the
+ // actual heap object handle.
+ void RequestHeapObject(HeapObjectRequest request);
+ void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
+
+ std::forward_list<HeapObjectRequest> heap_object_requests_;
};
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 6550d6e016..56f7d94ec8 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -34,28 +34,6 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kNewArray);
}
-void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
- ExternalReference miss) {
- // Update the static counter each time a new code stub is generated.
- isolate()->counters()->code_stubs()->Increment();
-
- CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
- int param_count = descriptor.GetRegisterParameterCount();
- {
- // Call the runtime system in a fresh internal frame.
- FrameScope scope(masm, StackFrame::INTERNAL);
- DCHECK(param_count == 0 ||
- eax.is(descriptor.GetRegisterParameter(param_count - 1)));
- // Push arguments
- for (int i = 0; i < param_count; ++i) {
- __ push(descriptor.GetRegisterParameter(i));
- }
- __ CallExternalReference(miss, param_count);
- }
-
- __ ret(0);
-}
-
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
// We don't allow a GC during a store buffer overflow so there is no need to
@@ -946,13 +924,10 @@ bool CEntryStub::NeedsImmovableCode() {
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
- StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
// It is important that the store buffer overflow stubs are generated first.
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
CreateWeakCellStub::GenerateAheadOfTime(isolate);
- BinaryOpICStub::GenerateAheadOfTime(isolate);
- BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
}
@@ -1058,7 +1033,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ mov(edx, Immediate(isolate()->factory()->the_hole_value()));
Label okay;
ExternalReference pending_exception_address(
- Isolate::kPendingExceptionAddress, isolate());
+ IsolateAddressId::kPendingExceptionAddress, isolate());
__ cmp(edx, Operand::StaticVariable(pending_exception_address));
// Cannot use check here as it attempts to generate call into runtime.
__ j(equal, &okay, Label::kNear);
@@ -1075,15 +1050,15 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ bind(&exception_returned);
ExternalReference pending_handler_context_address(
- Isolate::kPendingHandlerContextAddress, isolate());
+ IsolateAddressId::kPendingHandlerContextAddress, isolate());
ExternalReference pending_handler_code_address(
- Isolate::kPendingHandlerCodeAddress, isolate());
+ IsolateAddressId::kPendingHandlerCodeAddress, isolate());
ExternalReference pending_handler_offset_address(
- Isolate::kPendingHandlerOffsetAddress, isolate());
+ IsolateAddressId::kPendingHandlerOffsetAddress, isolate());
ExternalReference pending_handler_fp_address(
- Isolate::kPendingHandlerFPAddress, isolate());
+ IsolateAddressId::kPendingHandlerFPAddress, isolate());
ExternalReference pending_handler_sp_address(
- Isolate::kPendingHandlerSPAddress, isolate());
+ IsolateAddressId::kPendingHandlerSPAddress, isolate());
// Ask the runtime for help to determine the handler. This will set eax to
// contain the current pending exception, don't clobber it.
@@ -1133,7 +1108,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Push marker in two places.
StackFrame::Type marker = type();
__ push(Immediate(StackFrame::TypeToMarker(marker))); // marker
- ExternalReference context_address(Isolate::kContextAddress, isolate());
+ ExternalReference context_address(IsolateAddressId::kContextAddress,
+ isolate());
__ push(Operand::StaticVariable(context_address)); // context
// Save callee-saved registers (C calling conventions).
__ push(edi);
@@ -1141,11 +1117,11 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ push(ebx);
// Save copies of the top frame descriptor on the stack.
- ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate());
+ ExternalReference c_entry_fp(IsolateAddressId::kCEntryFPAddress, isolate());
__ push(Operand::StaticVariable(c_entry_fp));
// If this is the outermost JS call, set js_entry_sp value.
- ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
+ ExternalReference js_entry_sp(IsolateAddressId::kJSEntrySPAddress, isolate());
__ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
__ j(not_equal, &not_outermost_js, Label::kNear);
__ mov(Operand::StaticVariable(js_entry_sp), ebp);
@@ -1161,8 +1137,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
handler_offset_ = handler_entry.pos();
// Caught exception: Store result (exception) in the pending exception
// field in the JSEnv and return a failure sentinel.
- ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
- isolate());
+ ExternalReference pending_exception(
+ IsolateAddressId::kPendingExceptionAddress, isolate());
__ mov(Operand::StaticVariable(pending_exception), eax);
__ mov(eax, Immediate(isolate()->factory()->exception()));
__ jmp(&exit);
@@ -1202,8 +1178,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ bind(&not_outermost_js_2);
// Restore the top frame descriptor from the stack.
- __ pop(Operand::StaticVariable(ExternalReference(
- Isolate::kCEntryFPAddress, isolate())));
+ __ pop(Operand::StaticVariable(
+ ExternalReference(IsolateAddressId::kCEntryFPAddress, isolate())));
// Restore callee-saved registers (C calling conventions).
__ pop(ebx);
@@ -1437,34 +1413,6 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
}
-void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- edx : left
- // -- eax : right
- // -- esp[0] : return address
- // -----------------------------------
-
- // Load ecx with the allocation site. We stick an undefined dummy value here
- // and replace it with the real allocation site later when we instantiate this
- // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
- __ mov(ecx, isolate()->factory()->undefined_value());
-
- // Make sure that we actually patched the allocation site.
- if (FLAG_debug_code) {
- __ test(ecx, Immediate(kSmiTagMask));
- __ Assert(not_equal, kExpectedAllocationSite);
- __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
- isolate()->factory()->allocation_site_map());
- __ Assert(equal, kExpectedAllocationSite);
- }
-
- // Tail call into the stub that handles binary operations with allocation
- // sites.
- BinaryOpWithAllocationSiteStub stub(isolate(), state());
- __ TailCallStub(&stub);
-}
-
-
void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
DCHECK_EQ(CompareICState::BOOLEAN, state());
Label miss;
@@ -1919,7 +1867,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
NameDictionaryLookupStub stub(masm->isolate(), properties, r0, r0,
NEGATIVE_LOOKUP);
- __ push(Immediate(Handle<Object>(name)));
+ __ push(Immediate(name));
__ push(Immediate(name->Hash()));
__ CallStub(&stub);
__ test(r0, r0);
@@ -2118,8 +2066,10 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode) {
- Label object_is_black, need_incremental, need_incremental_pop_object;
+ Label need_incremental, need_incremental_pop_object;
+#ifndef V8_CONCURRENT_MARKING
+ Label object_is_black;
// Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker.
__ JumpIfBlack(regs_.object(),
@@ -2137,6 +2087,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
}
__ bind(&object_is_black);
+#endif
// Get the value from the slot.
__ mov(regs_.scratch0(), Operand(regs_.address(), 0));
@@ -2188,19 +2139,11 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// Fall through when we need to inform the incremental marker.
}
-
-void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
- CEntryStub ces(isolate(), 1, kSaveFPRegs);
- __ call(ces.GetCode(), RelocInfo::CODE_TARGET);
- int parameter_count_offset =
- StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
- __ mov(ebx, MemOperand(ebp, parameter_count_offset));
- masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
- __ pop(ecx);
- int additional_offset =
- function_mode() == JS_FUNCTION_STUB_MODE ? kPointerSize : 0;
- __ lea(esp, MemOperand(esp, ebx, times_pointer_size, additional_offset));
- __ jmp(ecx); // Return to IC Miss stub, continuation still on stack.
+void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
+ Zone* zone) {
+ if (tasm->isolate()->function_entry_hook() != NULL) {
+ tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
+ }
}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
@@ -2252,8 +2195,8 @@ static void CreateArrayDispatch(MacroAssembler* masm,
mode);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
- int last_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
+ int last_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
Label next;
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
@@ -2280,24 +2223,12 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// edi - constructor?
// esp[0] - return address
// esp[4] - last argument
- Label normal_sequence;
- if (mode == DONT_OVERRIDE) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
- STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
-
- // is the low bit set? If so, we are holey and that is good.
- __ test_b(edx, Immediate(1));
- __ j(not_zero, &normal_sequence);
- }
-
- // look at the first argument
- __ mov(ecx, Operand(esp, kPointerSize));
- __ test(ecx, ecx);
- __ j(zero, &normal_sequence);
+ STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(PACKED_ELEMENTS == 2);
+ STATIC_ASSERT(HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS == 4);
+ STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS == 5);
if (mode == DISABLE_ALLOCATION_SITES) {
ElementsKind initial = GetInitialFastElementsKind();
@@ -2307,13 +2238,12 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
holey_initial,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub_holey);
-
- __ bind(&normal_sequence);
- ArraySingleArgumentConstructorStub stub(masm->isolate(),
- initial,
- DISABLE_ALLOCATION_SITES);
- __ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
+ // is the low bit set? If so, we are holey and that is good.
+ Label normal_sequence;
+ __ test_b(edx, Immediate(1));
+ __ j(not_zero, &normal_sequence);
+
// We are going to create a holey array, but our kind is non-holey.
// Fix kind and retry.
__ inc(edx);
@@ -2329,12 +2259,13 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// in the AllocationSite::transition_info field because elements kind is
// restricted to a portion of the field...upper bits need to be left alone.
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ add(FieldOperand(ebx, AllocationSite::kTransitionInfoOffset),
- Immediate(Smi::FromInt(kFastElementsKindPackedToHoley)));
+ __ add(
+ FieldOperand(ebx, AllocationSite::kTransitionInfoOrBoilerplateOffset),
+ Immediate(Smi::FromInt(kFastElementsKindPackedToHoley)));
__ bind(&normal_sequence);
- int last_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
+ int last_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
Label next;
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
@@ -2355,13 +2286,13 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
template<class T>
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
- int to_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
+ int to_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(isolate, kind);
stub.GetCode();
- if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ if (AllocationSite::ShouldTrack(kind)) {
T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
stub1.GetCode();
}
@@ -2376,7 +2307,7 @@ void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
ArrayNArgumentsConstructorStub stub(isolate);
stub.GetCode();
- ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
+ ElementsKind kinds[2] = {PACKED_ELEMENTS, HOLEY_ELEMENTS};
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things
InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
@@ -2443,7 +2374,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ j(equal, &no_info);
// Only look at the lower 16 bits of the transition info.
- __ mov(edx, FieldOperand(ebx, AllocationSite::kTransitionInfoOffset));
+ __ mov(edx,
+ FieldOperand(ebx, AllocationSite::kTransitionInfoOrBoilerplateOffset));
__ SmiUntag(edx);
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
__ and_(edx, Immediate(AllocationSite::ElementsKindBits::kMask));
@@ -2532,21 +2464,21 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
if (FLAG_debug_code) {
Label done;
- __ cmp(ecx, Immediate(FAST_ELEMENTS));
+ __ cmp(ecx, Immediate(PACKED_ELEMENTS));
__ j(equal, &done);
- __ cmp(ecx, Immediate(FAST_HOLEY_ELEMENTS));
+ __ cmp(ecx, Immediate(HOLEY_ELEMENTS));
__ Assert(equal,
kInvalidElementsKindForInternalArrayOrInternalPackedArray);
__ bind(&done);
}
Label fast_elements_case;
- __ cmp(ecx, Immediate(FAST_ELEMENTS));
+ __ cmp(ecx, Immediate(PACKED_ELEMENTS));
__ j(equal, &fast_elements_case);
- GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+ GenerateCase(masm, HOLEY_ELEMENTS);
__ bind(&fast_elements_case);
- GenerateCase(masm, FAST_ELEMENTS);
+ GenerateCase(masm, PACKED_ELEMENTS);
}
// Generates an Operand for saving parameters after PrepareCallApiFunction.
diff --git a/deps/v8/src/ia32/code-stubs-ia32.h b/deps/v8/src/ia32/code-stubs-ia32.h
index 649e2ccf16..8b0a3e4ebb 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.h
+++ b/deps/v8/src/ia32/code-stubs-ia32.h
@@ -293,7 +293,6 @@ class RecordWriteStub: public PlatformCodeStub {
}
}
UNREACHABLE();
- return no_reg;
}
friend class RecordWriteStub;
};
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index 02a16de85f..366359f543 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -55,7 +55,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
}
CodeDesc desc;
- masm.GetCode(&desc);
+ masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
@@ -468,7 +468,7 @@ MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
MemMoveEmitPopAndReturn(&masm);
CodeDesc desc;
- masm.GetCode(&desc);
+ masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index 1f6785352a..e1a518b4eb 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -166,23 +166,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
-void Deoptimizer::SetPlatformCompiledStubRegisters(
- FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
- intptr_t handler =
- reinterpret_cast<intptr_t>(descriptor->deoptimization_handler());
- int params = descriptor->GetHandlerParameterCount();
- output_frame->SetRegister(eax.code(), params);
- output_frame->SetRegister(ebx.code(), handler);
-}
-
-
-void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
- for (int i = 0; i < XMMRegister::kMaxNumRegisters; ++i) {
- Float64 double_value = input_->GetDoubleRegister(i);
- output_frame->SetDoubleRegister(i, double_value);
- }
-}
-
#define __ masm()->
void Deoptimizer::TableEntryGenerator::Generate() {
@@ -213,7 +196,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ pushad();
- ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress, isolate());
+ ExternalReference c_entry_fp_address(IsolateAddressId::kCEntryFPAddress,
+ isolate());
__ mov(Operand::StaticVariable(c_entry_fp_address), ebp);
const int kSavedRegistersAreaSize =
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index 36acd1e05d..d4f84786c0 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -738,6 +738,11 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
+ case 0x00:
+ AppendToBuffer("vpshufb %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x99:
AppendToBuffer("vfmadd132s%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
@@ -817,6 +822,48 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
default:
UnimplementedInstruction();
}
+ } else if (vex_66() && vex_0f3a()) {
+ int mod, regop, rm, vvvv = vex_vreg();
+ get_modrm(*current, &mod, &regop, &rm);
+ switch (opcode) {
+ case 0x14:
+ AppendToBuffer("vpextrb ");
+ current += PrintRightOperand(current);
+ AppendToBuffer(",%s,%d", NameOfXMMRegister(regop),
+ *reinterpret_cast<int8_t*>(current));
+ current++;
+ break;
+ case 0x15:
+ AppendToBuffer("vpextrw ");
+ current += PrintRightOperand(current);
+ AppendToBuffer(",%s,%d", NameOfXMMRegister(regop),
+ *reinterpret_cast<int8_t*>(current));
+ current++;
+ break;
+ case 0x16:
+ AppendToBuffer("vpextrd ");
+ current += PrintRightOperand(current);
+ AppendToBuffer(",%s,%d", NameOfXMMRegister(regop),
+ *reinterpret_cast<int8_t*>(current));
+ current++;
+ break;
+ case 0x20:
+ AppendToBuffer("vpinsrb %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightOperand(current);
+ AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(current));
+ current++;
+ break;
+ case 0x22:
+ AppendToBuffer("vpinsrd %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightOperand(current);
+ AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(current));
+ current++;
+ break;
+ default:
+ UnimplementedInstruction();
+ }
} else if (vex_f2() && vex_0f()) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
@@ -851,6 +898,12 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+ case 0x70:
+ AppendToBuffer("vpshuflw %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(current));
+ current++;
+ break;
default:
UnimplementedInstruction();
}
@@ -1101,6 +1154,16 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+ case 0x6E:
+ AppendToBuffer("vmovd %s,", NameOfXMMRegister(regop));
+ current += PrintRightOperand(current);
+ break;
+ case 0x70:
+ AppendToBuffer("vpshufd %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(current));
+ current++;
+ break;
case 0x71:
AppendToBuffer("vps%sw %s,%s", sf_str[regop / 2],
NameOfXMMRegister(vvvv), NameOfXMMRegister(rm));
@@ -1113,6 +1176,18 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
current++;
AppendToBuffer(",%u", *current++);
break;
+ case 0x7E:
+ AppendToBuffer("vmovd ");
+ current += PrintRightOperand(current);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ break;
+ case 0xC4:
+ AppendToBuffer("vpinsrw %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightOperand(current);
+ AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(current));
+ current++;
+ break;
#define DECLARE_SSE_AVX_DIS_CASE(instruction, notUsed1, notUsed2, opcode) \
case 0x##opcode: { \
AppendToBuffer("v" #instruction " %s,%s,", NameOfXMMRegister(regop), \
@@ -1810,6 +1885,10 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
switch (op) {
+ case 0x00:
+ AppendToBuffer("pshufb %s,", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ break;
case 0x17:
AppendToBuffer("ptest %s,%s", NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
@@ -1847,16 +1926,33 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(rm),
static_cast<int>(imm8));
data += 2;
+ } else if (*data == 0x14) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("pextrb ");
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%s,%d", NameOfXMMRegister(regop),
+ *reinterpret_cast<int8_t*>(data));
+ data++;
+ } else if (*data == 0x15) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("pextrw ");
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%s,%d", NameOfXMMRegister(regop),
+ *reinterpret_cast<int8_t*>(data));
+ data++;
} else if (*data == 0x16) {
data++;
int mod, regop, rm;
- get_modrm(*data, &mod, &rm, &regop);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("pextrd %s,%s,%d",
- NameOfCPURegister(regop),
- NameOfXMMRegister(rm),
- static_cast<int>(imm8));
- data += 2;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("pextrd ");
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%s,%d", NameOfXMMRegister(regop),
+ *reinterpret_cast<int8_t*>(data));
+ data++;
} else if (*data == 0x17) {
data++;
int mod, regop, rm;
@@ -1867,16 +1963,22 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(regop),
static_cast<int>(imm8));
data += 2;
+ } else if (*data == 0x20) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("pinsrb %s,", NameOfXMMRegister(regop));
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(data));
+ data++;
} else if (*data == 0x22) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("pinsrd %s,%s,%d",
- NameOfXMMRegister(regop),
- NameOfCPURegister(rm),
- static_cast<int>(imm8));
- data += 2;
+ AppendToBuffer("pinsrd %s,", NameOfXMMRegister(regop));
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(data));
+ data++;
} else {
UnimplementedInstruction();
}
@@ -1942,12 +2044,10 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("pshufd %s,%s,%d",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm),
- static_cast<int>(imm8));
- data += 2;
+ AppendToBuffer("pshufd %s,", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(data));
+ data++;
} else if (*data == 0x90) {
data++;
AppendToBuffer("nop"); // 2 byte nop.
@@ -2127,6 +2227,14 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("cvtsd2ss %s,", NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
+ } else if (b2 == 0x70) {
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("pshuflw %s,", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(data));
+ data++;
} else {
const char* mnem = "?";
switch (b2) {
@@ -2407,7 +2515,6 @@ const char* NameConverter::NameOfXMMRegister(int reg) const {
const char* NameConverter::NameInCode(byte* addr) const {
// IA32 does not embed debug strings at the moment.
UNREACHABLE();
- return "";
}
diff --git a/deps/v8/src/ia32/frames-ia32.cc b/deps/v8/src/ia32/frames-ia32.cc
index 255bdbba01..4e5e1057d1 100644
--- a/deps/v8/src/ia32/frames-ia32.cc
+++ b/deps/v8/src/ia32/frames-ia32.cc
@@ -18,15 +18,6 @@ Register JavaScriptFrame::fp_register() { return ebp; }
Register JavaScriptFrame::context_register() { return esi; }
Register JavaScriptFrame::constant_pool_pointer_register() {
UNREACHABLE();
- return no_reg;
-}
-
-
-Register StubFailureTrampolineFrame::fp_register() { return ebp; }
-Register StubFailureTrampolineFrame::context_register() { return esi; }
-Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
- UNREACHABLE();
- return no_reg;
}
diff --git a/deps/v8/src/ia32/interface-descriptors-ia32.cc b/deps/v8/src/ia32/interface-descriptors-ia32.cc
index b8547d0194..8b8dae5220 100644
--- a/deps/v8/src/ia32/interface-descriptors-ia32.cc
+++ b/deps/v8/src/ia32/interface-descriptors-ia32.cc
@@ -47,6 +47,8 @@ const Register StoreTransitionDescriptor::MapRegister() { return edi; }
const Register StringCompareDescriptor::LeftRegister() { return edx; }
const Register StringCompareDescriptor::RightRegister() { return eax; }
+const Register StringConcatDescriptor::ArgumentsCountRegister() { return eax; }
+
const Register ApiGetterDescriptor::HolderRegister() { return ecx; }
const Register ApiGetterDescriptor::CallbackRegister() { return eax; }
@@ -154,6 +156,16 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // eax : number of arguments (on the stack, not including receiver)
+ // edi : the target to call
+ // ebx : arguments list (FixedArray)
+ // ecx : arguments list length (untagged)
+ Register registers[] = {edi, eax, ebx, ecx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void CallForwardVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// eax : number of arguments
@@ -163,6 +175,34 @@ void CallForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallWithSpreadDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // eax : number of arguments (on the stack, not including receiver)
+ // edi : the target to call
+ // ebx : the object to spread
+ Register registers[] = {edi, eax, ebx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // edi : the target to call
+ // ebx : the arguments list
+ Register registers[] = {edi, ebx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // eax : number of arguments (on the stack, not including receiver)
+ // edi : the target to call
+ // edx : the new target
+ // ebx : arguments list (FixedArray)
+ // ecx : arguments list length (untagged)
+ Register registers[] = {edi, edx, eax, ebx, ecx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// eax : number of arguments
@@ -173,6 +213,25 @@ void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // eax : number of arguments (on the stack, not including receiver)
+ // edi : the target to call
+ // edx : the new target
+ // ebx : the object to spread
+ Register registers[] = {edi, edx, eax, ebx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // edi : the target to call
+ // edx : the new target
+ // ebx : the arguments list
+ Register registers[] = {edi, edx, ebx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void ConstructStubDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// eax : number of arguments
@@ -372,8 +431,7 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
Register registers[] = {
eax, // the value to pass to the generator
ebx, // the JSGeneratorObject to resume
- edx, // the resume mode (tagged)
- ecx // SuspendFlags (tagged)
+ edx // the resume mode (tagged)
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index a87b2425fb..d2fd5d5823 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -23,18 +23,11 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
- : Assembler(isolate, buffer, size),
- generating_stub_(false),
- has_frame_(false),
- isolate_(isolate),
+ : TurboAssembler(isolate, buffer, size, create_code_object),
jit_cookie_(0) {
if (FLAG_mask_constants_with_cookie) {
jit_cookie_ = isolate->random_number_generator()->NextInt();
}
- if (create_code_object == CodeObjectRequired::kYes) {
- code_object_ =
- Handle<Object>::New(isolate_->heap()->undefined_value(), isolate_);
- }
}
@@ -73,7 +66,12 @@ void MacroAssembler::Store(Register src, const Operand& dst, Representation r) {
void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
if (isolate()->heap()->RootCanBeTreatedAsConstant(index)) {
- mov(destination, isolate()->heap()->root_handle(index));
+ Handle<Object> object = isolate()->heap()->root_handle(index);
+ if (object->IsHeapObject()) {
+ mov(destination, Handle<HeapObject>::cast(object));
+ } else {
+ mov(destination, Immediate(Smi::cast(*object)));
+ }
return;
}
ExternalReference roots_array_start =
@@ -111,20 +109,29 @@ void MacroAssembler::CompareRoot(Register with,
void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
- cmp(with, isolate()->heap()->root_handle(index));
+ Handle<Object> object = isolate()->heap()->root_handle(index);
+ if (object->IsHeapObject()) {
+ cmp(with, Handle<HeapObject>::cast(object));
+ } else {
+ cmp(with, Immediate(Smi::cast(*object)));
+ }
}
void MacroAssembler::CompareRoot(const Operand& with,
Heap::RootListIndex index) {
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
- cmp(with, isolate()->heap()->root_handle(index));
+ Handle<Object> object = isolate()->heap()->root_handle(index);
+ if (object->IsHeapObject()) {
+ cmp(with, Handle<HeapObject>::cast(object));
+ } else {
+ cmp(with, Immediate(Smi::cast(*object)));
+ }
}
-
void MacroAssembler::PushRoot(Heap::RootListIndex index) {
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
- Push(isolate()->heap()->root_handle(index));
+ PushObject(isolate()->heap()->root_handle(index));
}
#define REG(Name) \
@@ -265,12 +272,17 @@ void MacroAssembler::ClampUint8(Register reg) {
bind(&done);
}
+void TurboAssembler::SlowTruncateToIDelayed(Zone* zone, Register result_reg,
+ Register input_reg, int offset) {
+ CallStubDelayed(
+ new (zone) DoubleToIStub(nullptr, input_reg, result_reg, offset, true));
+}
void MacroAssembler::SlowTruncateToI(Register result_reg,
Register input_reg,
int offset) {
DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true);
- call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ CallStub(&stub);
}
@@ -383,8 +395,7 @@ void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
bind(&done);
}
-
-void MacroAssembler::LoadUint32(XMMRegister dst, const Operand& src) {
+void TurboAssembler::LoadUint32(XMMRegister dst, const Operand& src) {
Label done;
cmp(src, Immediate(0));
ExternalReference uint32_bias = ExternalReference::address_of_uint32_bias();
@@ -650,13 +661,12 @@ void MacroAssembler::MaybeDropFrames() {
RelocInfo::CODE_TARGET);
}
-void MacroAssembler::Cvtsi2sd(XMMRegister dst, const Operand& src) {
+void TurboAssembler::Cvtsi2sd(XMMRegister dst, const Operand& src) {
xorps(dst, dst);
cvtsi2sd(dst, src);
}
-
-void MacroAssembler::Cvtui2ss(XMMRegister dst, Register src, Register tmp) {
+void TurboAssembler::Cvtui2ss(XMMRegister dst, Register src, Register tmp) {
Label msb_set_src;
Label jmp_return;
test(src, src);
@@ -674,7 +684,7 @@ void MacroAssembler::Cvtui2ss(XMMRegister dst, Register src, Register tmp) {
bind(&jmp_return);
}
-void MacroAssembler::ShlPair(Register high, Register low, uint8_t shift) {
+void TurboAssembler::ShlPair(Register high, Register low, uint8_t shift) {
if (shift >= 32) {
mov(high, low);
shl(high, shift - 32);
@@ -685,7 +695,7 @@ void MacroAssembler::ShlPair(Register high, Register low, uint8_t shift) {
}
}
-void MacroAssembler::ShlPair_cl(Register high, Register low) {
+void TurboAssembler::ShlPair_cl(Register high, Register low) {
shld_cl(high, low);
shl_cl(low);
Label done;
@@ -696,7 +706,7 @@ void MacroAssembler::ShlPair_cl(Register high, Register low) {
bind(&done);
}
-void MacroAssembler::ShrPair(Register high, Register low, uint8_t shift) {
+void TurboAssembler::ShrPair(Register high, Register low, uint8_t shift) {
if (shift >= 32) {
mov(low, high);
shr(low, shift - 32);
@@ -707,7 +717,7 @@ void MacroAssembler::ShrPair(Register high, Register low, uint8_t shift) {
}
}
-void MacroAssembler::ShrPair_cl(Register high, Register low) {
+void TurboAssembler::ShrPair_cl(Register high, Register low) {
shrd_cl(low, high);
shr_cl(high);
Label done;
@@ -718,7 +728,7 @@ void MacroAssembler::ShrPair_cl(Register high, Register low) {
bind(&done);
}
-void MacroAssembler::SarPair(Register high, Register low, uint8_t shift) {
+void TurboAssembler::SarPair(Register high, Register low, uint8_t shift) {
if (shift >= 32) {
mov(low, high);
sar(low, shift - 32);
@@ -729,7 +739,7 @@ void MacroAssembler::SarPair(Register high, Register low, uint8_t shift) {
}
}
-void MacroAssembler::SarPair_cl(Register high, Register low) {
+void TurboAssembler::SarPair_cl(Register high, Register low) {
shrd_cl(low, high);
sar_cl(high);
Label done;
@@ -743,13 +753,13 @@ void MacroAssembler::SarPair_cl(Register high, Register low) {
bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) {
static const int kMaxImmediateBits = 17;
if (!RelocInfo::IsNone(x.rmode_)) return false;
- return !is_intn(x.x_, kMaxImmediateBits);
+ return !is_intn(x.immediate(), kMaxImmediateBits);
}
void MacroAssembler::SafeMove(Register dst, const Immediate& x) {
if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
- Move(dst, Immediate(x.x_ ^ jit_cookie()));
+ Move(dst, Immediate(x.immediate() ^ jit_cookie()));
xor_(dst, jit_cookie());
} else {
Move(dst, x);
@@ -759,7 +769,7 @@ void MacroAssembler::SafeMove(Register dst, const Immediate& x) {
void MacroAssembler::SafePush(const Immediate& x) {
if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
- push(Immediate(x.x_ ^ jit_cookie()));
+ push(Immediate(x.immediate() ^ jit_cookie()));
xor_(Operand(esp, 0), Immediate(jit_cookie()));
} else {
push(x);
@@ -821,6 +831,16 @@ void MacroAssembler::AssertSmi(Register object) {
}
}
+void MacroAssembler::AssertFixedArray(Register object) {
+ if (emit_debug_code()) {
+ test(object, Immediate(kSmiTagMask));
+ Check(not_equal, kOperandIsASmiAndNotAFixedArray);
+ Push(object);
+ CmpObjectType(object, FIXED_ARRAY_TYPE, object);
+ Pop(object);
+ Check(equal, kOperandIsNotAFixedArray);
+ }
+}
void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
@@ -845,8 +865,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
}
-void MacroAssembler::AssertGeneratorObject(Register object, Register flags) {
- // `flags` should be an untagged integer. See `SuspendFlags` in src/globals.h
+void MacroAssembler::AssertGeneratorObject(Register object) {
if (!emit_debug_code()) return;
test(object, Immediate(kSmiTagMask));
@@ -859,18 +878,13 @@ void MacroAssembler::AssertGeneratorObject(Register object, Register flags) {
// Load map
mov(map, FieldOperand(object, HeapObject::kMapOffset));
- Label async, do_check;
- test(flags, Immediate(static_cast<int>(SuspendFlags::kGeneratorTypeMask)));
- j(not_zero, &async, Label::kNear);
-
+ Label do_check;
// Check if JSGeneratorObject
CmpInstanceType(map, JS_GENERATOR_OBJECT_TYPE);
- jmp(&do_check, Label::kNear);
+ j(equal, &do_check, Label::kNear);
- bind(&async);
// Check if JSAsyncGeneratorObject
CmpInstanceType(map, JS_ASYNC_GENERATOR_OBJECT_TYPE);
- jmp(&do_check, Label::kNear);
bind(&do_check);
Pop(object);
@@ -900,19 +914,19 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
}
-void MacroAssembler::StubPrologue(StackFrame::Type type) {
+void TurboAssembler::StubPrologue(StackFrame::Type type) {
push(ebp); // Caller's frame pointer.
mov(ebp, esp);
push(Immediate(StackFrame::TypeToMarker(type)));
}
-void MacroAssembler::Prologue(bool code_pre_aging) {
+void TurboAssembler::Prologue(bool code_pre_aging) {
PredictableCodeSizeScope predictible_code_size_scope(this,
kNoCodeAgeSequenceLength);
if (code_pre_aging) {
- // Pre-age the code.
+ // Pre-age the code.
call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
- RelocInfo::CODE_AGE_SEQUENCE);
+ RelocInfo::CODE_AGE_SEQUENCE);
Nop(kNoCodeAgeSequenceLength - Assembler::kCallInstructionLength);
} else {
push(ebp); // Caller's frame pointer.
@@ -928,15 +942,7 @@ void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
mov(vector, FieldOperand(vector, Cell::kValueOffset));
}
-
-void MacroAssembler::EnterFrame(StackFrame::Type type,
- bool load_constant_pool_pointer_reg) {
- // Out-of-line constant pool not implemented on ia32.
- UNREACHABLE();
-}
-
-
-void MacroAssembler::EnterFrame(StackFrame::Type type) {
+void TurboAssembler::EnterFrame(StackFrame::Type type) {
push(ebp);
mov(ebp, esp);
push(Immediate(StackFrame::TypeToMarker(type)));
@@ -949,8 +955,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
}
}
-
-void MacroAssembler::LeaveFrame(StackFrame::Type type) {
+void TurboAssembler::LeaveFrame(StackFrame::Type type) {
if (emit_debug_code()) {
cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
Immediate(StackFrame::TypeToMarker(type)));
@@ -995,9 +1000,12 @@ void MacroAssembler::EnterExitFramePrologue(StackFrame::Type frame_type) {
push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot.
// Save the frame pointer and the context in top.
- ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress, isolate());
- ExternalReference context_address(Isolate::kContextAddress, isolate());
- ExternalReference c_function_address(Isolate::kCFunctionAddress, isolate());
+ ExternalReference c_entry_fp_address(IsolateAddressId::kCEntryFPAddress,
+ isolate());
+ ExternalReference context_address(IsolateAddressId::kContextAddress,
+ isolate());
+ ExternalReference c_function_address(IsolateAddressId::kCFunctionAddress,
+ isolate());
mov(Operand::StaticVariable(c_entry_fp_address), ebp);
mov(Operand::StaticVariable(context_address), esi);
mov(Operand::StaticVariable(c_function_address), ebx);
@@ -1022,7 +1030,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
// Get the required frame alignment for the OS.
const int kFrameAlignment = base::OS::ActivationFrameAlignment();
if (kFrameAlignment > 0) {
- DCHECK(base::bits::IsPowerOfTwo32(kFrameAlignment));
+ DCHECK(base::bits::IsPowerOfTwo(kFrameAlignment));
and_(esp, -kFrameAlignment);
}
@@ -1081,7 +1089,8 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
// Restore current context from top and clear it in debug mode.
- ExternalReference context_address(Isolate::kContextAddress, isolate());
+ ExternalReference context_address(IsolateAddressId::kContextAddress,
+ isolate());
if (restore_context) {
mov(esi, Operand::StaticVariable(context_address));
}
@@ -1090,7 +1099,7 @@ void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
#endif
// Clear the top frame.
- ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
+ ExternalReference c_entry_fp_address(IsolateAddressId::kCEntryFPAddress,
isolate());
mov(Operand::StaticVariable(c_entry_fp_address), Immediate(0));
}
@@ -1110,7 +1119,8 @@ void MacroAssembler::PushStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
// Link the current handler as the next handler.
- ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
+ ExternalReference handler_address(IsolateAddressId::kHandlerAddress,
+ isolate());
push(Operand::StaticVariable(handler_address));
// Set this new handler as the current one.
@@ -1120,7 +1130,8 @@ void MacroAssembler::PushStackHandler() {
void MacroAssembler::PopStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
+ ExternalReference handler_address(IsolateAddressId::kHandlerAddress,
+ isolate());
pop(Operand::StaticVariable(handler_address));
add(esp, Immediate(StackHandlerConstants::kSize - kPointerSize));
}
@@ -1226,7 +1237,6 @@ void MacroAssembler::Allocate(int object_size,
AllocationFlags flags) {
DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
DCHECK(object_size <= kMaxRegularHeapObjectSize);
- DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1276,10 +1286,7 @@ void MacroAssembler::Allocate(int object_size,
cmp(top_reg, Operand::StaticVariable(allocation_limit));
j(above, gc_required);
- if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
- // The top pointer is not updated for allocation folding dominators.
- UpdateAllocationTopHelper(top_reg, scratch, flags);
- }
+ UpdateAllocationTopHelper(top_reg, scratch, flags);
if (top_reg.is(result)) {
sub(result, Immediate(object_size - kHeapObjectTag));
@@ -1301,8 +1308,6 @@ void MacroAssembler::Allocate(int header_size,
Label* gc_required,
AllocationFlags flags) {
DCHECK((flags & SIZE_IN_WORDS) == 0);
- DCHECK((flags & ALLOCATION_FOLDING_DOMINATOR) == 0);
- DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1375,7 +1380,6 @@ void MacroAssembler::Allocate(Register object_size,
Label* gc_required,
AllocationFlags flags) {
DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
- DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1426,61 +1430,9 @@ void MacroAssembler::Allocate(Register object_size,
DCHECK(kHeapObjectTag == 1);
inc(result);
- if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
- // The top pointer is not updated for allocation folding dominators.
- UpdateAllocationTopHelper(result_end, scratch, flags);
- }
-}
-
-void MacroAssembler::FastAllocate(int object_size, Register result,
- Register result_end, AllocationFlags flags) {
- DCHECK(!result.is(result_end));
- // Load address of new object into result.
- LoadAllocationTopHelper(result, no_reg, flags);
-
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
- DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
- Label aligned;
- test(result, Immediate(kDoubleAlignmentMask));
- j(zero, &aligned, Label::kNear);
- mov(Operand(result, 0),
- Immediate(isolate()->factory()->one_pointer_filler_map()));
- add(result, Immediate(kDoubleSize / 2));
- bind(&aligned);
- }
-
- lea(result_end, Operand(result, object_size));
- UpdateAllocationTopHelper(result_end, no_reg, flags);
-
- DCHECK(kHeapObjectTag == 1);
- inc(result);
-}
-
-void MacroAssembler::FastAllocate(Register object_size, Register result,
- Register result_end, AllocationFlags flags) {
- DCHECK(!result.is(result_end));
- // Load address of new object into result.
- LoadAllocationTopHelper(result, no_reg, flags);
-
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
- DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
- Label aligned;
- test(result, Immediate(kDoubleAlignmentMask));
- j(zero, &aligned, Label::kNear);
- mov(Operand(result, 0),
- Immediate(isolate()->factory()->one_pointer_filler_map()));
- add(result, Immediate(kDoubleSize / 2));
- bind(&aligned);
- }
-
- lea(result_end, Operand(result, object_size, times_1, 0));
- UpdateAllocationTopHelper(result_end, no_reg, flags);
-
- DCHECK(kHeapObjectTag == 1);
- inc(result);
+ UpdateAllocationTopHelper(result_end, scratch, flags);
}
-
void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,
@@ -1513,7 +1465,7 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
LoadGlobalFunctionInitialMap(constructor, scratch);
mov(FieldOperand(result, HeapObject::kMapOffset), scratch);
LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
- mov(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
+ mov(FieldOperand(result, JSObject::kPropertiesOrHashOffset), scratch);
mov(FieldOperand(result, JSObject::kElementsOffset), scratch);
mov(FieldOperand(result, JSValue::kValueOffset), value);
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
@@ -1537,7 +1489,7 @@ void MacroAssembler::BooleanBitTest(Register object,
int field_offset,
int bit_index) {
bit_index += kSmiTagSize + kSmiShiftSize;
- DCHECK(base::bits::IsPowerOfTwo32(kBitsPerByte));
+ DCHECK(base::bits::IsPowerOfTwo(kBitsPerByte));
int byte_index = bit_index / kBitsPerByte;
int byte_bit_index = bit_index & (kBitsPerByte - 1);
test_b(FieldOperand(object, field_offset + byte_index),
@@ -1557,20 +1509,22 @@ void MacroAssembler::GetMapConstructor(Register result, Register map,
bind(&done);
}
-void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
+void MacroAssembler::CallStub(CodeStub* stub) {
DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
- call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
+ call(stub->GetCode(), RelocInfo::CODE_TARGET);
}
+void TurboAssembler::CallStubDelayed(CodeStub* stub) {
+ DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
+ call(stub);
+}
void MacroAssembler::TailCallStub(CodeStub* stub) {
jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
}
-
-
-bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
- return has_frame_ || !stub->SometimesSetsUpAFrame();
+bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
+ return has_frame() || !stub->SometimesSetsUpAFrame();
}
void MacroAssembler::CallRuntime(const Runtime::Function* f,
@@ -1591,6 +1545,17 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
CallStub(&ces);
}
+void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
+ SaveFPRegsMode save_doubles) {
+ const Runtime::Function* f = Runtime::FunctionForId(fid);
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ Move(eax, Immediate(f->nargs));
+ mov(ebx, Immediate(ExternalReference(f, isolate())));
+ CallStubDelayed(new (zone) CEntryStub(nullptr, 1, save_doubles));
+}
void MacroAssembler::CallExternalReference(ExternalReference ref,
int num_arguments) {
@@ -1634,7 +1599,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
}
-void MacroAssembler::PrepareForTailCall(
+void TurboAssembler::PrepareForTailCall(
const ParameterCount& callee_args_count, Register caller_args_count_reg,
Register scratch0, Register scratch1, ReturnAddressState ra_state,
int number_of_temp_values_after_return_address) {
@@ -1767,8 +1732,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
if (!definitely_matches) {
- Handle<Code> adaptor =
- isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
call(adaptor, RelocInfo::CODE_TARGET);
@@ -1878,7 +1842,6 @@ void MacroAssembler::InvokeFunction(Register fun,
mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kFormalParameterCountOffset));
- SmiUntag(ebx);
ParameterCount expected(ebx);
InvokeFunctionCode(edi, new_target, expected, actual, flag, call_wrapper);
@@ -1905,7 +1868,7 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
- LoadHeapObject(edi, function);
+ Move(edi, function);
InvokeFunction(edi, expected, actual, flag, call_wrapper);
}
@@ -1995,17 +1958,17 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
}
-void MacroAssembler::LoadHeapObject(Register result,
- Handle<HeapObject> object) {
- mov(result, object);
-}
-
-
void MacroAssembler::CmpHeapObject(Register reg, Handle<HeapObject> object) {
cmp(reg, object);
}
-void MacroAssembler::PushHeapObject(Handle<HeapObject> object) { Push(object); }
+void MacroAssembler::PushObject(Handle<Object> object) {
+ if (object->IsHeapObject()) {
+ Push(Handle<HeapObject>::cast(object));
+ } else {
+ Push(Smi::cast(*object));
+ }
+}
void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
mov(value, cell);
@@ -2019,13 +1982,9 @@ void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
JumpIfSmi(value, miss);
}
+void TurboAssembler::Ret() { ret(0); }
-void MacroAssembler::Ret() {
- ret(0);
-}
-
-
-void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
+void TurboAssembler::Ret(int bytes_dropped, Register scratch) {
if (is_uint16(bytes_dropped)) {
ret(bytes_dropped);
} else {
@@ -2043,29 +2002,30 @@ void MacroAssembler::Drop(int stack_elements) {
}
}
-
-void MacroAssembler::Move(Register dst, Register src) {
+void TurboAssembler::Move(Register dst, Register src) {
if (!dst.is(src)) {
mov(dst, src);
}
}
-
-void MacroAssembler::Move(Register dst, const Immediate& x) {
- if (x.is_zero() && RelocInfo::IsNone(x.rmode_)) {
+void TurboAssembler::Move(Register dst, const Immediate& x) {
+ if (!x.is_heap_object_request() && x.is_zero() &&
+ RelocInfo::IsNone(x.rmode())) {
xor_(dst, dst); // Shorter than mov of 32-bit immediate 0.
} else {
mov(dst, x);
}
}
-
-void MacroAssembler::Move(const Operand& dst, const Immediate& x) {
+void TurboAssembler::Move(const Operand& dst, const Immediate& x) {
mov(dst, x);
}
+void TurboAssembler::Move(Register dst, Handle<HeapObject> object) {
+ mov(dst, object);
+}
-void MacroAssembler::Move(XMMRegister dst, uint32_t src) {
+void TurboAssembler::Move(XMMRegister dst, uint32_t src) {
if (src == 0) {
pxor(dst, dst);
} else {
@@ -2089,8 +2049,7 @@ void MacroAssembler::Move(XMMRegister dst, uint32_t src) {
}
}
-
-void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
+void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
if (src == 0) {
pxor(dst, dst);
} else {
@@ -2127,10 +2086,85 @@ void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
}
}
+void TurboAssembler::Pxor(XMMRegister dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpxor(dst, dst, src);
+ } else {
+ pxor(dst, src);
+ }
+}
+
+void TurboAssembler::Pshuflw(XMMRegister dst, const Operand& src,
+ uint8_t shuffle) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpshuflw(dst, src, shuffle);
+ } else {
+ pshuflw(dst, src, shuffle);
+ }
+}
+
+void TurboAssembler::Pshufd(XMMRegister dst, const Operand& src,
+ uint8_t shuffle) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpshufd(dst, src, shuffle);
+ } else {
+ pshufd(dst, src, shuffle);
+ }
+}
-void MacroAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
+void TurboAssembler::Pshufb(XMMRegister dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpshufb(dst, dst, src);
+ return;
+ }
+ if (CpuFeatures::IsSupported(SSSE3)) {
+ CpuFeatureScope sse_scope(this, SSSE3);
+ pshufb(dst, src);
+ return;
+ }
+ UNREACHABLE();
+}
+
+void TurboAssembler::Pextrb(Register dst, XMMRegister src, int8_t imm8) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpextrb(dst, src, imm8);
+ return;
+ }
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ pextrb(dst, src, imm8);
+ return;
+ }
+ UNREACHABLE();
+}
+
+void TurboAssembler::Pextrw(Register dst, XMMRegister src, int8_t imm8) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpextrw(dst, src, imm8);
+ return;
+ }
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ pextrw(dst, src, imm8);
+ return;
+ }
+ UNREACHABLE();
+}
+
+void TurboAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
if (imm8 == 0) {
- movd(dst, src);
+ Movd(dst, src);
+ return;
+ }
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpextrd(dst, src, imm8);
return;
}
if (CpuFeatures::IsSupported(SSE4_1)) {
@@ -2143,7 +2177,7 @@ void MacroAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
movd(dst, xmm0);
}
-void MacroAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8,
+void TurboAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8,
bool is_64_bits) {
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope sse_scope(this, SSE4_1);
@@ -2171,8 +2205,7 @@ void MacroAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8,
}
}
-
-void MacroAssembler::Lzcnt(Register dst, const Operand& src) {
+void TurboAssembler::Lzcnt(Register dst, const Operand& src) {
if (CpuFeatures::IsSupported(LZCNT)) {
CpuFeatureScope scope(this, LZCNT);
lzcnt(dst, src);
@@ -2186,8 +2219,7 @@ void MacroAssembler::Lzcnt(Register dst, const Operand& src) {
xor_(dst, Immediate(31)); // for x in [0..31], 31^x == 31-x.
}
-
-void MacroAssembler::Tzcnt(Register dst, const Operand& src) {
+void TurboAssembler::Tzcnt(Register dst, const Operand& src) {
if (CpuFeatures::IsSupported(BMI1)) {
CpuFeatureScope scope(this, BMI1);
tzcnt(dst, src);
@@ -2200,8 +2232,7 @@ void MacroAssembler::Tzcnt(Register dst, const Operand& src) {
bind(&not_zero_src);
}
-
-void MacroAssembler::Popcnt(Register dst, const Operand& src) {
+void TurboAssembler::Popcnt(Register dst, const Operand& src) {
if (CpuFeatures::IsSupported(POPCNT)) {
CpuFeatureScope scope(this, POPCNT);
popcnt(dst, src);
@@ -2273,14 +2304,15 @@ void MacroAssembler::DecrementCounter(Condition cc,
}
}
-
-void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
+void TurboAssembler::Assert(Condition cc, BailoutReason reason) {
if (emit_debug_code()) Check(cc, reason);
}
+void TurboAssembler::AssertUnreachable(BailoutReason reason) {
+ if (emit_debug_code()) Abort(reason);
+}
-
-void MacroAssembler::Check(Condition cc, BailoutReason reason) {
+void TurboAssembler::Check(Condition cc, BailoutReason reason) {
Label L;
j(cc, &L);
Abort(reason);
@@ -2288,12 +2320,11 @@ void MacroAssembler::Check(Condition cc, BailoutReason reason) {
bind(&L);
}
-
-void MacroAssembler::CheckStackAlignment() {
+void TurboAssembler::CheckStackAlignment() {
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
- DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
Label alignment_as_expected;
test(esp, Immediate(frame_alignment_mask));
j(zero, &alignment_as_expected);
@@ -2303,8 +2334,7 @@ void MacroAssembler::CheckStackAlignment() {
}
}
-
-void MacroAssembler::Abort(BailoutReason reason) {
+void TurboAssembler::Abort(BailoutReason reason) {
#ifdef DEBUG
const char* msg = GetBailoutReason(reason);
if (msg != NULL) {
@@ -2318,13 +2348,10 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- // Check if Abort() has already been initialized.
- DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
-
Move(edx, Smi::FromInt(static_cast<int>(reason)));
// Disable stub call restrictions to always allow calls to abort.
- if (!has_frame_) {
+ if (!has_frame()) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
@@ -2453,15 +2480,14 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
SmiUntag(index);
}
-
-void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
+void TurboAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
int frame_alignment = base::OS::ActivationFrameAlignment();
if (frame_alignment != 0) {
// Make stack end at alignment and make room for num_arguments words
// and the original value of esp.
mov(scratch, esp);
sub(esp, Immediate((num_arguments + 1) * kPointerSize));
- DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
and_(esp, -frame_alignment);
mov(Operand(esp, num_arguments * kPointerSize), scratch);
} else {
@@ -2469,17 +2495,14 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
}
}
-
-void MacroAssembler::CallCFunction(ExternalReference function,
+void TurboAssembler::CallCFunction(ExternalReference function,
int num_arguments) {
// Trashing eax is ok as it will be the return value.
mov(eax, Immediate(function));
CallCFunction(eax, num_arguments);
}
-
-void MacroAssembler::CallCFunction(Register function,
- int num_arguments) {
+void TurboAssembler::CallCFunction(Register function, int num_arguments) {
DCHECK_LE(num_arguments, kMaxCParameters);
DCHECK(has_frame());
// Check stack alignment.
@@ -2545,14 +2568,9 @@ CodePatcher::~CodePatcher() {
DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
}
-
-void MacroAssembler::CheckPageFlag(
- Register object,
- Register scratch,
- int mask,
- Condition cc,
- Label* condition_met,
- Label::Distance condition_met_distance) {
+void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
+ Condition cc, Label* condition_met,
+ Label::Distance condition_met_distance) {
DCHECK(cc == zero || cc == not_zero);
if (scratch.is(object)) {
and_(scratch, Immediate(~Page::kPageAlignmentMask));
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index e754a87128..42b910788c 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -51,16 +51,242 @@ bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
Register reg8 = no_reg);
#endif
+class TurboAssembler : public Assembler {
+ public:
+ TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
+ CodeObjectRequired create_code_object)
+ : Assembler(isolate, buffer, buffer_size), isolate_(isolate) {
+ if (create_code_object == CodeObjectRequired::kYes) {
+ code_object_ =
+ Handle<HeapObject>::New(isolate->heap()->undefined_value(), isolate);
+ }
+ }
+
+ void set_has_frame(bool value) { has_frame_ = value; }
+ bool has_frame() const { return has_frame_; }
+
+ Isolate* isolate() const { return isolate_; }
+
+ Handle<HeapObject> CodeObject() {
+ DCHECK(!code_object_.is_null());
+ return code_object_;
+ }
+
+ void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
+ Label* condition_met,
+ Label::Distance condition_met_distance = Label::kFar);
+
+ // Activation support.
+ void EnterFrame(StackFrame::Type type);
+ void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) {
+ // Out-of-line constant pool not implemented on ia32.
+ UNREACHABLE();
+ }
+ void LeaveFrame(StackFrame::Type type);
+
+ // Print a message to stdout and abort execution.
+ void Abort(BailoutReason reason);
+
+ // Calls Abort(msg) if the condition cc is not satisfied.
+ // Use --debug_code to enable.
+ void Assert(Condition cc, BailoutReason reason);
+
+ // Like Assert(), but without condition.
+ // Use --debug_code to enable.
+ void AssertUnreachable(BailoutReason reason);
+
+ // Like Assert(), but always enabled.
+ void Check(Condition cc, BailoutReason reason);
+
+ // Check that the stack is aligned.
+ void CheckStackAlignment();
+
+ // Nop, because ia32 does not have a root register.
+ void InitializeRootRegister() {}
+
+ // Move a constant into a destination using the most efficient encoding.
+ void Move(Register dst, const Immediate& x);
+
+ void Move(Register dst, Smi* source) { Move(dst, Immediate(source)); }
+
+ // Move if the registers are not identical.
+ void Move(Register target, Register source);
+
+ void Move(const Operand& dst, const Immediate& x);
+
+ // Move an immediate into an XMM register.
+ void Move(XMMRegister dst, uint32_t src);
+ void Move(XMMRegister dst, uint64_t src);
+ void Move(XMMRegister dst, float src) { Move(dst, bit_cast<uint32_t>(src)); }
+ void Move(XMMRegister dst, double src) { Move(dst, bit_cast<uint64_t>(src)); }
+
+ void Move(Register dst, Handle<HeapObject> handle);
+
+ void Call(Handle<Code> target, RelocInfo::Mode rmode) { call(target, rmode); }
+ void Call(Label* target) { call(target); }
+
+ inline bool AllowThisStubCall(CodeStub* stub);
+ void CallStubDelayed(CodeStub* stub);
+
+ void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+
+ // Jump the register contains a smi.
+ inline void JumpIfSmi(Register value, Label* smi_label,
+ Label::Distance distance = Label::kFar) {
+ test(value, Immediate(kSmiTagMask));
+ j(zero, smi_label, distance);
+ }
+ // Jump if the operand is a smi.
+ inline void JumpIfSmi(Operand value, Label* smi_label,
+ Label::Distance distance = Label::kFar) {
+ test(value, Immediate(kSmiTagMask));
+ j(zero, smi_label, distance);
+ }
+
+ void SmiUntag(Register reg) { sar(reg, kSmiTagSize); }
+
+ // Removes current frame and its arguments from the stack preserving
+ // the arguments and a return address pushed to the stack for the next call.
+ // |ra_state| defines whether return address is already pushed to stack or
+ // not. Both |callee_args_count| and |caller_args_count_reg| do not include
+ // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
+ // is trashed. |number_of_temp_values_after_return_address| specifies
+ // the number of words pushed to the stack after the return address. This is
+ // to allow "allocation" of scratch registers that this function requires
+ // by saving their values on the stack.
+ void PrepareForTailCall(const ParameterCount& callee_args_count,
+ Register caller_args_count_reg, Register scratch0,
+ Register scratch1, ReturnAddressState ra_state,
+ int number_of_temp_values_after_return_address);
+
+ // Before calling a C-function from generated code, align arguments on stack.
+ // After aligning the frame, arguments must be stored in esp[0], esp[4],
+ // etc., not pushed. The argument count assumes all arguments are word sized.
+ // Some compilers/platforms require the stack to be aligned when calling
+ // C++ code.
+ // Needs a scratch register to do some arithmetic. This register will be
+ // trashed.
+ void PrepareCallCFunction(int num_arguments, Register scratch);
+
+ // Calls a C function and cleans up the space for arguments allocated
+ // by PrepareCallCFunction. The called function is not allowed to trigger a
+ // garbage collection, since that might move the code and invalidate the
+ // return address (unless this is somehow accounted for by the called
+ // function).
+ void CallCFunction(ExternalReference function, int num_arguments);
+ void CallCFunction(Register function, int num_arguments);
+
+ void ShlPair(Register high, Register low, uint8_t imm8);
+ void ShlPair_cl(Register high, Register low);
+ void ShrPair(Register high, Register low, uint8_t imm8);
+ void ShrPair_cl(Register high, Register src);
+ void SarPair(Register high, Register low, uint8_t imm8);
+ void SarPair_cl(Register high, Register low);
+
+ // Generates function and stub prologue code.
+ void StubPrologue(StackFrame::Type type);
+ void Prologue(bool code_pre_aging);
+
+ void Lzcnt(Register dst, Register src) { Lzcnt(dst, Operand(src)); }
+ void Lzcnt(Register dst, const Operand& src);
+
+ void Tzcnt(Register dst, Register src) { Tzcnt(dst, Operand(src)); }
+ void Tzcnt(Register dst, const Operand& src);
+
+ void Popcnt(Register dst, Register src) { Popcnt(dst, Operand(src)); }
+ void Popcnt(Register dst, const Operand& src);
+
+ void Ret();
+
+ // Return and drop arguments from stack, where the number of arguments
+ // may be bigger than 2^16 - 1. Requires a scratch register.
+ void Ret(int bytes_dropped, Register scratch);
+
+ void Pxor(XMMRegister dst, XMMRegister src) { Pxor(dst, Operand(src)); }
+ void Pxor(XMMRegister dst, const Operand& src);
+
+ void Pshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
+ Pshuflw(dst, Operand(src), shuffle);
+ }
+ void Pshuflw(XMMRegister dst, const Operand& src, uint8_t shuffle);
+ void Pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
+ Pshufd(dst, Operand(src), shuffle);
+ }
+ void Pshufd(XMMRegister dst, const Operand& src, uint8_t shuffle);
+
+// SSE/SSE2 instructions with AVX version.
+#define AVX_OP2_WITH_TYPE(macro_name, name, dst_type, src_type) \
+ void macro_name(dst_type dst, src_type src) { \
+ if (CpuFeatures::IsSupported(AVX)) { \
+ CpuFeatureScope scope(this, AVX); \
+ v##name(dst, src); \
+ } else { \
+ name(dst, src); \
+ } \
+ }
+
+ AVX_OP2_WITH_TYPE(Movd, movd, XMMRegister, Register)
+ AVX_OP2_WITH_TYPE(Movd, movd, XMMRegister, const Operand&)
+ AVX_OP2_WITH_TYPE(Movd, movd, Register, XMMRegister)
+ AVX_OP2_WITH_TYPE(Movd, movd, const Operand&, XMMRegister)
+
+#undef AVX_OP2_WITH_TYPE
+
+ // Non-SSE2 instructions.
+ void Pshufb(XMMRegister dst, XMMRegister src) { Pshufb(dst, Operand(src)); }
+ void Pshufb(XMMRegister dst, const Operand& src);
+
+ void Pextrb(Register dst, XMMRegister src, int8_t imm8);
+ void Pextrw(Register dst, XMMRegister src, int8_t imm8);
+ void Pextrd(Register dst, XMMRegister src, int8_t imm8);
+ void Pinsrd(XMMRegister dst, Register src, int8_t imm8,
+ bool is_64_bits = false) {
+ Pinsrd(dst, Operand(src), imm8, is_64_bits);
+ }
+ void Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8,
+ bool is_64_bits = false);
+
+ void LoadUint32(XMMRegister dst, Register src) {
+ LoadUint32(dst, Operand(src));
+ }
+ void LoadUint32(XMMRegister dst, const Operand& src);
+
+ // Expression support
+ // cvtsi2sd instruction only writes to the low 64-bit of dst register, which
+ // hinders register renaming and makes dependence chains longer. So we use
+ // xorps to clear the dst register before cvtsi2sd to solve this issue.
+ void Cvtsi2sd(XMMRegister dst, Register src) { Cvtsi2sd(dst, Operand(src)); }
+ void Cvtsi2sd(XMMRegister dst, const Operand& src);
+
+ void Cvtui2ss(XMMRegister dst, Register src, Register tmp);
+
+ void SlowTruncateToIDelayed(Zone* zone, Register result_reg,
+ Register input_reg,
+ int offset = HeapNumber::kValueOffset -
+ kHeapObjectTag);
+
+ void Push(Register src) { push(src); }
+ void Push(const Operand& src) { push(src); }
+ void Push(Immediate value) { push(value); }
+ void Push(Handle<HeapObject> handle) { push(Immediate(handle)); }
+ void Push(Smi* smi) { Push(Immediate(smi)); }
+
+ private:
+ bool has_frame_ = false;
+ Isolate* const isolate_;
+ // This handle will be patched with the code object on installation.
+ Handle<HeapObject> code_object_;
+};
+
// MacroAssembler implements a collection of frequently used macros.
-class MacroAssembler: public Assembler {
+class MacroAssembler : public TurboAssembler {
public:
MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object);
int jit_cookie() const { return jit_cookie_; }
- Isolate* isolate() const { return isolate_; }
-
void Load(Register dst, const Operand& src, Representation r);
void Store(Register src, const Operand& dst, Representation r);
@@ -133,10 +359,6 @@ class MacroAssembler: public Assembler {
SaveFPRegsMode save_fp,
RememberedSetFinalAction and_then);
- void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
- Label* condition_met,
- Label::Distance condition_met_distance = Label::kFar);
-
void CheckPageFlagForMap(
Handle<Map> map, int mask, Condition cc, Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
@@ -235,10 +457,6 @@ class MacroAssembler: public Assembler {
// Frame restart support
void MaybeDropFrames();
- // Generates function and stub prologue code.
- void StubPrologue(StackFrame::Type type);
- void Prologue(bool code_pre_aging);
-
// Enter specific kind of exit frame. Expects the number of
// arguments in register eax and sets up the number of arguments in
// register edi and the pointer to the first argument in register
@@ -278,28 +496,15 @@ class MacroAssembler: public Assembler {
void StoreToSafepointRegisterSlot(Register dst, Immediate src);
void LoadFromSafepointRegisterSlot(Register dst, Register src);
- // Nop, because ia32 does not have a root register.
- void InitializeRootRegister() {}
-
- void LoadHeapObject(Register result, Handle<HeapObject> object);
void CmpHeapObject(Register reg, Handle<HeapObject> object);
- void PushHeapObject(Handle<HeapObject> object);
-
- void LoadObject(Register result, Handle<Object> object) {
- AllowDeferredHandleDereference heap_object_check;
- if (object->IsHeapObject()) {
- LoadHeapObject(result, Handle<HeapObject>::cast(object));
- } else {
- Move(result, Immediate(object));
- }
- }
+ void PushObject(Handle<Object> object);
void CmpObject(Register reg, Handle<Object> object) {
AllowDeferredHandleDereference heap_object_check;
if (object->IsHeapObject()) {
CmpHeapObject(reg, Handle<HeapObject>::cast(object));
} else {
- cmp(reg, Immediate(object));
+ cmp(reg, Immediate(Smi::cast(*object)));
}
}
@@ -312,19 +517,6 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// JavaScript invokes
- // Removes current frame and its arguments from the stack preserving
- // the arguments and a return address pushed to the stack for the next call.
- // |ra_state| defines whether return address is already pushed to stack or
- // not. Both |callee_args_count| and |caller_args_count_reg| do not include
- // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
- // is trashed. |number_of_temp_values_after_return_address| specifies
- // the number of words pushed to the stack after the return address. This is
- // to allow "allocation" of scratch registers that this function requires
- // by saving their values on the stack.
- void PrepareForTailCall(const ParameterCount& callee_args_count,
- Register caller_args_count_reg, Register scratch0,
- Register scratch1, ReturnAddressState ra_state,
- int number_of_temp_values_after_return_address);
// Invoke the JavaScript function code by either calling or jumping.
@@ -353,22 +545,6 @@ class MacroAssembler: public Assembler {
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
- // Expression support
- // cvtsi2sd instruction only writes to the low 64-bit of dst register, which
- // hinders register renaming and makes dependence chains longer. So we use
- // xorps to clear the dst register before cvtsi2sd to solve this issue.
- void Cvtsi2sd(XMMRegister dst, Register src) { Cvtsi2sd(dst, Operand(src)); }
- void Cvtsi2sd(XMMRegister dst, const Operand& src);
-
- void Cvtui2ss(XMMRegister dst, Register src, Register tmp);
-
- void ShlPair(Register high, Register low, uint8_t imm8);
- void ShlPair_cl(Register high, Register low);
- void ShrPair(Register high, Register low, uint8_t imm8);
- void ShrPair_cl(Register high, Register src);
- void SarPair(Register high, Register low, uint8_t imm8);
- void SarPair_cl(Register high, Register low);
-
// Support for constant splitting.
bool IsUnsafeImmediate(const Immediate& x);
void SafeMove(Register dst, const Immediate& x);
@@ -425,35 +601,15 @@ class MacroAssembler: public Assembler {
STATIC_ASSERT(kSmiTagSize == 1);
add(reg, reg);
}
- void SmiUntag(Register reg) {
- sar(reg, kSmiTagSize);
- }
// Modifies the register even if it does not contain a Smi!
- void SmiUntag(Register reg, Label* is_smi) {
+ void UntagSmi(Register reg, Label* is_smi) {
STATIC_ASSERT(kSmiTagSize == 1);
sar(reg, kSmiTagSize);
STATIC_ASSERT(kSmiTag == 0);
j(not_carry, is_smi);
}
- void LoadUint32(XMMRegister dst, Register src) {
- LoadUint32(dst, Operand(src));
- }
- void LoadUint32(XMMRegister dst, const Operand& src);
-
- // Jump the register contains a smi.
- inline void JumpIfSmi(Register value, Label* smi_label,
- Label::Distance distance = Label::kFar) {
- test(value, Immediate(kSmiTagMask));
- j(zero, smi_label, distance);
- }
- // Jump if the operand is a smi.
- inline void JumpIfSmi(Operand value, Label* smi_label,
- Label::Distance distance = Label::kFar) {
- test(value, Immediate(kSmiTagMask));
- j(zero, smi_label, distance);
- }
// Jump if register contain a non-smi.
inline void JumpIfNotSmi(Register value, Label* not_smi_label,
Label::Distance distance = Label::kFar) {
@@ -521,6 +677,9 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is a smi, enabled via --debug-code.
void AssertNotSmi(Register object);
+ // Abort execution if argument is not a FixedArray, enabled via --debug-code.
+ void AssertFixedArray(Register object);
+
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
@@ -528,9 +687,9 @@ class MacroAssembler: public Assembler {
// enabled via --debug-code.
void AssertBoundFunction(Register object);
- // Abort execution if argument is not a JSGeneratorObject,
+ // Abort execution if argument is not a JSGeneratorObject (or subclass),
// enabled via --debug-code.
- void AssertGeneratorObject(Register object, Register suspend_flags);
+ void AssertGeneratorObject(Register object);
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
@@ -574,14 +733,6 @@ class MacroAssembler: public Assembler {
void Allocate(Register object_size, Register result, Register result_end,
Register scratch, Label* gc_required, AllocationFlags flags);
- // FastAllocate is right now only used for folded allocations. It just
- // increments the top pointer without checking against limit. This can only
- // be done if it was proved earlier that the allocation will succeed.
- void FastAllocate(int object_size, Register result, Register result_end,
- AllocationFlags flags);
- void FastAllocate(Register object_size, Register result, Register result_end,
- AllocationFlags flags);
-
// Allocate a heap number in new space with undefined value. The
// register scratch2 can be passed as no_reg; the others must be
// valid registers. Returns tagged pointer in result register, or
@@ -614,7 +765,7 @@ class MacroAssembler: public Assembler {
// Runtime calls
// Call a code stub. Generate the code if necessary.
- void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
+ void CallStub(CodeStub* stub);
// Tail call a code stub (jump). Generate the code if necessary.
void TailCallStub(CodeStub* stub);
@@ -646,23 +797,6 @@ class MacroAssembler: public Assembler {
// Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid);
- // Before calling a C-function from generated code, align arguments on stack.
- // After aligning the frame, arguments must be stored in esp[0], esp[4],
- // etc., not pushed. The argument count assumes all arguments are word sized.
- // Some compilers/platforms require the stack to be aligned when calling
- // C++ code.
- // Needs a scratch register to do some arithmetic. This register will be
- // trashed.
- void PrepareCallCFunction(int num_arguments, Register scratch);
-
- // Calls a C function and cleans up the space for arguments allocated
- // by PrepareCallCFunction. The called function is not allowed to trigger a
- // garbage collection, since that might move the code and invalidate the
- // return address (unless this is somehow accounted for by the called
- // function).
- void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, int num_arguments);
-
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& ext,
bool builtin_exit_frame = false);
@@ -670,12 +804,6 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Utilities
- void Ret();
-
- // Return and drop arguments from stack, where the number of arguments
- // may be bigger than 2^16 - 1. Requires a scratch register.
- void Ret(int bytes_dropped, Register scratch);
-
// Emit code that loads |parameter_index|'th parameter from the stack to
// the register according to the CallInterfaceDescriptor definition.
// |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
@@ -698,63 +826,12 @@ class MacroAssembler: public Assembler {
// from the stack, clobbering only the esp register.
void Drop(int element_count);
- void Call(Label* target) { call(target); }
- void Call(Handle<Code> target, RelocInfo::Mode rmode,
- TypeFeedbackId id = TypeFeedbackId::None()) {
- call(target, rmode, id);
- }
void Jump(Handle<Code> target, RelocInfo::Mode rmode) { jmp(target, rmode); }
- void Push(Register src) { push(src); }
- void Push(const Operand& src) { push(src); }
- void Push(Immediate value) { push(value); }
void Pop(Register dst) { pop(dst); }
void Pop(const Operand& dst) { pop(dst); }
void PushReturnAddressFrom(Register src) { push(src); }
void PopReturnAddressTo(Register dst) { pop(dst); }
- // Non-SSE2 instructions.
- void Pextrd(Register dst, XMMRegister src, int8_t imm8);
- void Pinsrd(XMMRegister dst, Register src, int8_t imm8,
- bool is_64_bits = false) {
- Pinsrd(dst, Operand(src), imm8, is_64_bits);
- }
- void Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8,
- bool is_64_bits = false);
-
- void Lzcnt(Register dst, Register src) { Lzcnt(dst, Operand(src)); }
- void Lzcnt(Register dst, const Operand& src);
-
- void Tzcnt(Register dst, Register src) { Tzcnt(dst, Operand(src)); }
- void Tzcnt(Register dst, const Operand& src);
-
- void Popcnt(Register dst, Register src) { Popcnt(dst, Operand(src)); }
- void Popcnt(Register dst, const Operand& src);
-
- // Move if the registers are not identical.
- void Move(Register target, Register source);
-
- // Move a constant into a destination using the most efficient encoding.
- void Move(Register dst, const Immediate& x);
- void Move(const Operand& dst, const Immediate& x);
-
- // Move an immediate into an XMM register.
- void Move(XMMRegister dst, uint32_t src);
- void Move(XMMRegister dst, uint64_t src);
- void Move(XMMRegister dst, float src) { Move(dst, bit_cast<uint32_t>(src)); }
- void Move(XMMRegister dst, double src) { Move(dst, bit_cast<uint64_t>(src)); }
-
- void Move(Register dst, Handle<Object> handle) { LoadObject(dst, handle); }
- void Move(Register dst, Smi* source) { Move(dst, Immediate(source)); }
-
- // Push a handle value.
- void Push(Handle<Object> handle) { push(Immediate(handle)); }
- void Push(Smi* smi) { Push(Immediate(smi)); }
-
- Handle<Object> CodeObject() {
- DCHECK(!code_object_.is_null());
- return code_object_;
- }
-
// Emit code for a truncating division by a constant. The dividend register is
// unchanged, the result is in edx, and eax gets clobbered.
void TruncatingDiv(Register dividend, int32_t divisor);
@@ -769,29 +846,6 @@ class MacroAssembler: public Assembler {
void DecrementCounter(Condition cc, StatsCounter* counter, int value);
// ---------------------------------------------------------------------------
- // Debugging
-
- // Calls Abort(msg) if the condition cc is not satisfied.
- // Use --debug_code to enable.
- void Assert(Condition cc, BailoutReason reason);
-
- // Like Assert(), but always enabled.
- void Check(Condition cc, BailoutReason reason);
-
- // Print a message to stdout and abort execution.
- void Abort(BailoutReason reason);
-
- // Check that the stack is aligned.
- void CheckStackAlignment();
-
- // Verify restrictions about code generated in stubs.
- void set_generating_stub(bool value) { generating_stub_ = value; }
- bool generating_stub() { return generating_stub_; }
- void set_has_frame(bool value) { has_frame_ = value; }
- bool has_frame() { return has_frame_; }
- inline bool AllowThisStubCall(CodeStub* stub);
-
- // ---------------------------------------------------------------------------
// String utilities.
// Checks if both objects are sequential one-byte strings, and jumps to label
@@ -819,11 +873,6 @@ class MacroAssembler: public Assembler {
// Load the type feedback vector from a JavaScript frame.
void EmitLoadFeedbackVector(Register vector);
- // Activation support.
- void EnterFrame(StackFrame::Type type);
- void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
- void LeaveFrame(StackFrame::Type type);
-
void EnterBuiltinFrame(Register context, Register target, Register argc);
void LeaveBuiltinFrame(Register context, Register target, Register argc);
@@ -842,11 +891,6 @@ class MacroAssembler: public Assembler {
Label* no_memento_found);
private:
- bool generating_stub_;
- bool has_frame_;
- Isolate* isolate_;
- // This handle will be patched with the code object on installation.
- Handle<Object> code_object_;
int jit_cookie_;
// Helper functions for generating invokes.
diff --git a/deps/v8/src/ic/OWNERS b/deps/v8/src/ic/OWNERS
index 3581afece3..fa1291f6f3 100644
--- a/deps/v8/src/ic/OWNERS
+++ b/deps/v8/src/ic/OWNERS
@@ -5,3 +5,5 @@ ishell@chromium.org
jkummerow@chromium.org
mvstanton@chromium.org
verwaest@chromium.org
+
+# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/ic/accessor-assembler.cc b/deps/v8/src/ic/accessor-assembler.cc
index 6508169558..f3d9f09ca4 100644
--- a/deps/v8/src/ic/accessor-assembler.cc
+++ b/deps/v8/src/ic/accessor-assembler.cc
@@ -34,8 +34,7 @@ Node* AccessorAssembler::TryMonomorphicCase(Node* slot, Node* vector,
// Adding |header_size| with a separate IntPtrAdd rather than passing it
// into ElementOffsetFromIndex() allows it to be folded into a single
// [base, index, offset] indirect memory access on x64.
- Node* offset =
- ElementOffsetFromIndex(slot, FAST_HOLEY_ELEMENTS, SMI_PARAMETERS);
+ Node* offset = ElementOffsetFromIndex(slot, HOLEY_ELEMENTS, SMI_PARAMETERS);
Node* feedback = Load(MachineType::AnyTagged(), vector,
IntPtrAdd(offset, IntPtrConstant(header_size)));
@@ -250,7 +249,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
DCHECK(isolate()->heap()->array_protector()->IsPropertyCell());
GotoIfNot(
WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
- SmiConstant(Smi::FromInt(Isolate::kProtectorValid))),
+ SmiConstant(Isolate::kProtectorValid)),
miss);
exit_point->Return(UndefinedConstant());
}
@@ -408,8 +407,7 @@ void AccessorAssembler::HandleLoadICProtoHandlerCase(
GotoIf(WordEqual(validity_cell, IntPtrConstant(0)),
&validity_cell_check_done);
Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
- GotoIf(WordNotEqual(cell_value,
- SmiConstant(Smi::FromInt(Map::kPrototypeChainValid))),
+ GotoIf(WordNotEqual(cell_value, SmiConstant(Map::kPrototypeChainValid)),
miss);
Goto(&validity_cell_check_done);
@@ -712,8 +710,7 @@ void AccessorAssembler::HandleStoreICElementHandlerCase(
Comment("HandleStoreICElementHandlerCase");
Node* validity_cell = LoadObjectField(handler, Tuple2::kValue1Offset);
Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
- GotoIf(WordNotEqual(cell_value,
- SmiConstant(Smi::FromInt(Map::kPrototypeChainValid))),
+ GotoIf(WordNotEqual(cell_value, SmiConstant(Map::kPrototypeChainValid)),
miss);
Node* code_handler = LoadObjectField(handler, Tuple2::kValue2Offset);
@@ -742,8 +739,7 @@ void AccessorAssembler::HandleStoreICProtoHandler(
GotoIf(WordEqual(validity_cell, IntPtrConstant(0)),
&validity_cell_check_done);
Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
- GotoIf(WordNotEqual(cell_value,
- SmiConstant(Smi::FromInt(Map::kPrototypeChainValid))),
+ GotoIf(WordNotEqual(cell_value, SmiConstant(Map::kPrototypeChainValid)),
miss);
Goto(&validity_cell_check_done);
@@ -1062,7 +1058,7 @@ void AccessorAssembler::ExtendPropertiesBackingStore(Node* object,
// capacity even for a map that think it doesn't have any unused fields.
// Perform a bounds check to see if we actually have to grow the array.
Node* offset = DecodeWord<StoreHandler::FieldOffsetBits>(handler_word);
- Node* size = ElementOffsetFromIndex(length, FAST_ELEMENTS, mode,
+ Node* size = ElementOffsetFromIndex(length, PACKED_ELEMENTS, mode,
FixedArray::kHeaderSize);
GotoIf(UintPtrLessThan(offset, size), &done);
@@ -1070,9 +1066,8 @@ void AccessorAssembler::ExtendPropertiesBackingStore(Node* object,
Node* new_capacity = IntPtrOrSmiAdd(length, delta, mode);
// Grow properties array.
- ElementsKind kind = FAST_ELEMENTS;
DCHECK(kMaxNumberOfDescriptors + JSObject::kFieldsAdded <
- FixedArrayBase::GetMaxLengthForNewSpaceAllocation(kind));
+ FixedArrayBase::GetMaxLengthForNewSpaceAllocation(PACKED_ELEMENTS));
// The size of a new properties backing store is guaranteed to be small
// enough that the new backing store will be allocated in new space.
CSA_ASSERT(this,
@@ -1082,17 +1077,16 @@ void AccessorAssembler::ExtendPropertiesBackingStore(Node* object,
kMaxNumberOfDescriptors + JSObject::kFieldsAdded, mode),
mode));
- Node* new_properties = AllocateFixedArray(kind, new_capacity, mode);
+ Node* new_properties = AllocatePropertyArray(new_capacity, mode);
- FillFixedArrayWithValue(kind, new_properties, length, new_capacity,
- Heap::kUndefinedValueRootIndex, mode);
+ FillPropertyArrayWithUndefined(new_properties, length, new_capacity, mode);
// |new_properties| is guaranteed to be in new space, so we can skip
// the write barrier.
- CopyFixedArrayElements(kind, properties, new_properties, length,
- SKIP_WRITE_BARRIER, mode);
+ CopyPropertyArrayValues(properties, new_properties, length,
+ SKIP_WRITE_BARRIER, mode);
- StoreObjectField(object, JSObject::kPropertiesOffset, new_properties);
+ StoreObjectField(object, JSObject::kPropertiesOrHashOffset, new_properties);
Comment("] Extend storage");
Goto(&done);
@@ -1195,20 +1189,20 @@ void AccessorAssembler::EmitElementLoad(
EmitFastElementsBoundsCheck(object, elements, intptr_index,
is_jsarray_condition, out_of_bounds);
int32_t kinds[] = {// Handled by if_fast_packed.
- FAST_SMI_ELEMENTS, FAST_ELEMENTS,
+ PACKED_SMI_ELEMENTS, PACKED_ELEMENTS,
// Handled by if_fast_holey.
- FAST_HOLEY_SMI_ELEMENTS, FAST_HOLEY_ELEMENTS,
+ HOLEY_SMI_ELEMENTS, HOLEY_ELEMENTS,
// Handled by if_fast_double.
- FAST_DOUBLE_ELEMENTS,
+ PACKED_DOUBLE_ELEMENTS,
// Handled by if_fast_holey_double.
- FAST_HOLEY_DOUBLE_ELEMENTS};
+ HOLEY_DOUBLE_ELEMENTS};
Label* labels[] = {// FAST_{SMI,}_ELEMENTS
&if_fast_packed, &if_fast_packed,
// FAST_HOLEY_{SMI,}_ELEMENTS
&if_fast_holey, &if_fast_holey,
- // FAST_DOUBLE_ELEMENTS
+ // PACKED_DOUBLE_ELEMENTS
&if_fast_double,
- // FAST_HOLEY_DOUBLE_ELEMENTS
+ // HOLEY_DOUBLE_ELEMENTS
&if_fast_holey_double};
Switch(elements_kind, unimplemented_elements_kind, kinds, labels,
arraysize(kinds));
@@ -1469,7 +1463,7 @@ void AccessorAssembler::GenericElementLoad(Node* receiver, Node* receiver_map,
}
void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
- Node* instance_type, Node* key,
+ Node* instance_type,
const LoadICParameters* p,
Label* slow,
UseStubCache use_stub_cache) {
@@ -1502,7 +1496,7 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
VARIABLE(var_name_index, MachineType::PointerRepresentation());
Label* notfound =
use_stub_cache == kUseStubCache ? &stub_cache : &lookup_prototype_chain;
- DescriptorLookup(key, descriptors, bitfield3, &if_descriptor_found,
+ DescriptorLookup(p->name, descriptors, bitfield3, &if_descriptor_found,
&var_name_index, notfound);
BIND(&if_descriptor_found);
@@ -1518,7 +1512,7 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
Comment("stub cache probe for fast property load");
VARIABLE(var_handler, MachineRepresentation::kTagged);
Label found_handler(this, &var_handler), stub_cache_miss(this);
- TryProbeStubCache(isolate()->load_stub_cache(), receiver, key,
+ TryProbeStubCache(isolate()->load_stub_cache(), receiver, p->name,
&found_handler, &var_handler, &stub_cache_miss);
BIND(&found_handler);
{
@@ -1544,7 +1538,7 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
VARIABLE(var_name_index, MachineType::PointerRepresentation());
Label dictionary_found(this, &var_name_index);
- NameDictionaryLookup<NameDictionary>(properties, key, &dictionary_found,
+ NameDictionaryLookup<NameDictionary>(properties, p->name, &dictionary_found,
&var_name_index,
&lookup_prototype_chain);
BIND(&dictionary_found);
@@ -1574,7 +1568,7 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
var_holder_map.Bind(receiver_map);
var_holder_instance_type.Bind(instance_type);
// Private symbols must not be looked up on the prototype chain.
- GotoIf(IsPrivateSymbol(key), &return_undefined);
+ GotoIf(IsPrivateSymbol(p->name), &return_undefined);
Goto(&loop);
BIND(&loop);
{
@@ -1590,7 +1584,7 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
var_holder_instance_type.Bind(proto_instance_type);
Label next_proto(this), return_value(this, &var_value), goto_slow(this);
TryGetOwnProperty(p->context, receiver, proto, proto_map,
- proto_instance_type, key, &return_value, &var_value,
+ proto_instance_type, p->name, &return_value, &var_value,
&next_proto, &goto_slow);
// This trampoline and the next are required to appease Turbofan's
@@ -1776,7 +1770,8 @@ void AccessorAssembler::LoadIC_BytecodeHandler(const LoadICParameters* p,
Comment("LoadIC_BytecodeHandler_noninlined");
// Call into the stub that implements the non-inlined parts of LoadIC.
- Callable ic = CodeFactory::LoadICInOptimizedCode_Noninlined(isolate());
+ Callable ic =
+ Builtins::CallableFor(isolate(), Builtins::kLoadIC_Noninlined);
Node* code_target = HeapConstant(ic.code());
exit_point->ReturnCallStub(ic.descriptor(), code_target, p->context,
p->receiver, p->name, p->slot, p->vector);
@@ -1859,9 +1854,9 @@ void AccessorAssembler::LoadIC_Noninlined(const LoadICParameters* p,
GotoIfNot(
WordEqual(feedback, LoadRoot(Heap::kuninitialized_symbolRootIndex)),
miss);
- exit_point->ReturnCallStub(CodeFactory::LoadIC_Uninitialized(isolate()),
- p->context, p->receiver, p->name, p->slot,
- p->vector);
+ exit_point->ReturnCallStub(
+ Builtins::CallableFor(isolate(), Builtins::kLoadIC_Uninitialized),
+ p->context, p->receiver, p->name, p->slot, p->vector);
}
}
@@ -1891,7 +1886,7 @@ void AccessorAssembler::LoadIC_Uninitialized(const LoadICParameters* p) {
BIND(&not_function_prototype);
}
- GenericPropertyLoad(receiver, receiver_map, instance_type, p->name, p, &miss,
+ GenericPropertyLoad(receiver, receiver_map, instance_type, p, &miss,
kDontUseStubCache);
BIND(&miss);
@@ -2061,8 +2056,9 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p) {
GotoIfNot(WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
&try_polymorphic_name);
// TODO(jkummerow): Inline this? Or some of it?
- TailCallStub(CodeFactory::KeyedLoadIC_Megamorphic(isolate()), p->context,
- p->receiver, p->name, p->slot, p->vector);
+ TailCallStub(
+ Builtins::CallableFor(isolate(), Builtins::kKeyedLoadIC_Megamorphic),
+ p->context, p->receiver, p->name, p->slot, p->vector);
}
BIND(&try_polymorphic_name);
{
@@ -2106,8 +2102,9 @@ void AccessorAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
BIND(&if_unique_name);
{
- GenericPropertyLoad(receiver, receiver_map, instance_type,
- var_unique.value(), p, &slow);
+ LoadICParameters pp = *p;
+ pp.name = var_unique.value();
+ GenericPropertyLoad(receiver, receiver_map, instance_type, &pp, &slow);
}
BIND(&if_notunique);
@@ -2326,8 +2323,7 @@ void AccessorAssembler::GenerateLoadICTrampoline() {
Node* context = Parameter(Descriptor::kContext);
Node* vector = LoadFeedbackVectorForStub();
- Callable callable = CodeFactory::LoadICInOptimizedCode(isolate());
- TailCallStub(callable, context, receiver, name, slot, vector);
+ TailCallBuiltin(Builtins::kLoadIC, context, receiver, name, slot, vector);
}
void AccessorAssembler::GenerateLoadICProtoArray(
@@ -2416,8 +2412,8 @@ void AccessorAssembler::GenerateKeyedLoadICTrampoline() {
Node* context = Parameter(Descriptor::kContext);
Node* vector = LoadFeedbackVectorForStub();
- Callable callable = CodeFactory::KeyedLoadICInOptimizedCode(isolate());
- TailCallStub(callable, context, receiver, name, slot, vector);
+ TailCallBuiltin(Builtins::kKeyedLoadIC, context, receiver, name, slot,
+ vector);
}
void AccessorAssembler::GenerateKeyedLoadIC_Megamorphic() {
diff --git a/deps/v8/src/ic/accessor-assembler.h b/deps/v8/src/ic/accessor-assembler.h
index 5644fa8ae8..c771b2ff5a 100644
--- a/deps/v8/src/ic/accessor-assembler.h
+++ b/deps/v8/src/ic/accessor-assembler.h
@@ -182,8 +182,8 @@ class AccessorAssembler : public CodeStubAssembler {
enum UseStubCache { kUseStubCache, kDontUseStubCache };
void GenericPropertyLoad(Node* receiver, Node* receiver_map,
- Node* instance_type, Node* key,
- const LoadICParameters* p, Label* slow,
+ Node* instance_type, const LoadICParameters* p,
+ Label* slow,
UseStubCache use_stub_cache = kUseStubCache);
// Low-level helpers.
diff --git a/deps/v8/src/ic/arm/handler-compiler-arm.cc b/deps/v8/src/ic/arm/handler-compiler-arm.cc
index 317a95146f..c17670d921 100644
--- a/deps/v8/src/ic/arm/handler-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/handler-compiler-arm.cc
@@ -134,7 +134,8 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
// Load properties array.
Register properties = scratch0;
- __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ ldr(properties,
+ FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
// Check that the properties array is a dictionary.
__ ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
Register tmp = properties;
@@ -143,8 +144,8 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ b(ne, miss_label);
// Restore the temporarily used register.
- __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
+ __ ldr(properties,
+ FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
NameDictionaryLookupStub::GenerateNegativeLookup(
masm, miss_label, &done, receiver, properties, name, scratch1);
@@ -165,8 +166,7 @@ void PropertyHandlerCompiler::GenerateCheckPropertyCell(
Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
__ LoadWeakValue(scratch, weak_cell, miss);
__ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch, ip);
+ __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
__ b(ne, miss);
}
@@ -199,9 +199,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
// Put holder in place.
CallOptimization::HolderLookup holder_lookup;
- int holder_depth = 0;
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup,
- &holder_depth);
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
switch (holder_lookup) {
case CallOptimization::kHolderIsReceiver:
__ Move(holder, receiver);
@@ -209,10 +207,6 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
case CallOptimization::kHolderFound:
__ ldr(holder, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ ldr(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
- for (int i = 1; i < holder_depth; i++) {
- __ ldr(holder, FieldMemOperand(holder, HeapObject::kMapOffset));
- __ ldr(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
- }
break;
case CallOptimization::kHolderNotFound:
UNREACHABLE();
@@ -402,17 +396,22 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ push(receiver()); // receiver
__ push(holder_reg);
- // If the callback cannot leak, then push the callback directly,
- // otherwise wrap it in a weak cell.
- if (callback->data()->IsUndefined(isolate()) || callback->data()->IsSmi()) {
- __ mov(ip, Operand(callback));
- } else {
- Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
- __ mov(ip, Operand(cell));
+ {
+ UseScratchRegisterScope temps(masm());
+ Register scratch = temps.Acquire();
+
+ // If the callback cannot leak, then push the callback directly,
+ // otherwise wrap it in a weak cell.
+ if (callback->data()->IsUndefined(isolate()) || callback->data()->IsSmi()) {
+ __ mov(scratch, Operand(callback));
+ } else {
+ Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
+ __ mov(scratch, Operand(cell));
+ }
+ __ push(scratch);
+ __ mov(scratch, Operand(name));
+ __ Push(scratch, value());
}
- __ push(ip);
- __ mov(ip, Operand(name));
- __ Push(ip, value());
__ Push(Smi::FromInt(language_mode));
// Do tail-call to the runtime system.
diff --git a/deps/v8/src/ic/arm/ic-arm.cc b/deps/v8/src/ic/arm/ic-arm.cc
index 5cf9dc46ae..7eddd42298 100644
--- a/deps/v8/src/ic/arm/ic-arm.cc
+++ b/deps/v8/src/ic/arm/ic-arm.cc
@@ -29,7 +29,6 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
return ge;
default:
UNREACHABLE();
- return kNoCondition;
}
}
diff --git a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
index db6dc639a1..cc42c030e3 100644
--- a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
@@ -74,7 +74,8 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
// Load properties array.
Register properties = scratch0;
- __ Ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Ldr(properties,
+ FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
// Check that the properties array is a dictionary.
__ Ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
__ JumpIfNotRoot(map, Heap::kHashTableMapRootIndex, miss_label);
@@ -134,9 +135,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
// Put holder in place.
CallOptimization::HolderLookup holder_lookup;
- int holder_depth = 0;
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup,
- &holder_depth);
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
switch (holder_lookup) {
case CallOptimization::kHolderIsReceiver:
__ Mov(holder, receiver);
@@ -144,10 +143,6 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
case CallOptimization::kHolderFound:
__ Ldr(holder, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ Ldr(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
- for (int i = 1; i < holder_depth; i++) {
- __ Ldr(holder, FieldMemOperand(holder, HeapObject::kMapOffset));
- __ Ldr(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
- }
break;
case CallOptimization::kHolderNotFound:
UNREACHABLE();
diff --git a/deps/v8/src/ic/arm64/ic-arm64.cc b/deps/v8/src/ic/arm64/ic-arm64.cc
index f77bb8af5b..8e9a7f5d2b 100644
--- a/deps/v8/src/ic/arm64/ic-arm64.cc
+++ b/deps/v8/src/ic/arm64/ic-arm64.cc
@@ -29,7 +29,6 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
return ge;
default:
UNREACHABLE();
- return al;
}
}
diff --git a/deps/v8/src/ic/binary-op-assembler.cc b/deps/v8/src/ic/binary-op-assembler.cc
index 29df4bf082..0690d8e528 100644
--- a/deps/v8/src/ic/binary-op-assembler.cc
+++ b/deps/v8/src/ic/binary-op-assembler.cc
@@ -13,7 +13,9 @@ using compiler::Node;
Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
Node* rhs, Node* slot_id,
- Node* feedback_vector) {
+ Node* feedback_vector,
+ Node* function,
+ bool rhs_is_smi) {
// Shared entry for floating point addition.
Label do_fadd(this), if_lhsisnotnumber(this, Label::kDeferred),
check_rhsisoddball(this, Label::kDeferred),
@@ -25,24 +27,51 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
VARIABLE(var_result, MachineRepresentation::kTagged);
// Check if the {lhs} is a Smi or a HeapObject.
- Label if_lhsissmi(this), if_lhsisnotsmi(this);
- Branch(TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+ Label if_lhsissmi(this);
+ // If rhs is known to be an Smi we want to fast path Smi operation. This is
+ // for AddSmi operation. For the normal Add operation, we want to fast path
+ // both Smi and Number operations, so this path should not be marked as
+ // Deferred.
+ Label if_lhsisnotsmi(this,
+ rhs_is_smi ? Label::kDeferred : Label::kNonDeferred);
+ Branch(TaggedIsNotSmi(lhs), &if_lhsisnotsmi, &if_lhsissmi);
BIND(&if_lhsissmi);
{
- // Check if the {rhs} is also a Smi.
- Label if_rhsissmi(this), if_rhsisnotsmi(this);
- Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+ Comment("lhs is Smi");
+ if (!rhs_is_smi) {
+ // Check if the {rhs} is also a Smi.
+ Label if_rhsissmi(this), if_rhsisnotsmi(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+ BIND(&if_rhsisnotsmi);
+ {
+ // Check if the {rhs} is a HeapNumber.
+ GotoIfNot(IsHeapNumber(rhs), &check_rhsisoddball);
+
+ var_fadd_lhs.Bind(SmiToFloat64(lhs));
+ var_fadd_rhs.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_fadd);
+ }
+
+ BIND(&if_rhsissmi);
+ }
- BIND(&if_rhsissmi);
{
+ Comment("perform smi operation");
// Try fast Smi addition first.
Node* pair = IntPtrAddWithOverflow(BitcastTaggedToWord(lhs),
BitcastTaggedToWord(rhs));
Node* overflow = Projection(1, pair);
// Check if the Smi additon overflowed.
- Label if_overflow(this), if_notoverflow(this);
+ // If rhs is known to be an Smi we want to fast path Smi operation. This
+ // is for AddSmi operation. For the normal Add operation, we want to fast
+ // path both Smi and Number operations, so this path should not be marked
+ // as Deferred.
+ Label if_overflow(this,
+ rhs_is_smi ? Label::kDeferred : Label::kNonDeferred),
+ if_notoverflow(this);
Branch(overflow, &if_overflow, &if_notoverflow);
BIND(&if_overflow);
@@ -60,50 +89,33 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
Goto(&end);
}
}
-
- BIND(&if_rhsisnotsmi);
- {
- // Load the map of {rhs}.
- Node* rhs_map = LoadMap(rhs);
-
- // Check if the {rhs} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(rhs_map), &check_rhsisoddball);
-
- var_fadd_lhs.Bind(SmiToFloat64(lhs));
- var_fadd_rhs.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fadd);
- }
}
BIND(&if_lhsisnotsmi);
{
- // Load the map of {lhs}.
- Node* lhs_map = LoadMap(lhs);
-
// Check if {lhs} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(lhs_map), &if_lhsisnotnumber);
+ GotoIfNot(IsHeapNumber(lhs), &if_lhsisnotnumber);
- // Check if the {rhs} is Smi.
- Label if_rhsissmi(this), if_rhsisnotsmi(this);
- Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
-
- BIND(&if_rhsissmi);
- {
- var_fadd_lhs.Bind(LoadHeapNumberValue(lhs));
- var_fadd_rhs.Bind(SmiToFloat64(rhs));
- Goto(&do_fadd);
- }
+ if (!rhs_is_smi) {
+ // Check if the {rhs} is Smi.
+ Label if_rhsissmi(this), if_rhsisnotsmi(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
- BIND(&if_rhsisnotsmi);
- {
- // Load the map of {rhs}.
- Node* rhs_map = LoadMap(rhs);
+ BIND(&if_rhsisnotsmi);
+ {
+ // Check if the {rhs} is a HeapNumber.
+ GotoIfNot(IsHeapNumber(rhs), &check_rhsisoddball);
- // Check if the {rhs} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(rhs_map), &check_rhsisoddball);
+ var_fadd_lhs.Bind(LoadHeapNumberValue(lhs));
+ var_fadd_rhs.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_fadd);
+ }
+ BIND(&if_rhsissmi);
+ }
+ {
var_fadd_lhs.Bind(LoadHeapNumberValue(lhs));
- var_fadd_rhs.Bind(LoadHeapNumberValue(rhs));
+ var_fadd_rhs.Bind(SmiToFloat64(rhs));
Goto(&do_fadd);
}
}
@@ -130,11 +142,8 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
{
GotoIf(TaggedIsSmi(rhs), &call_with_oddball_feedback);
- // Load the map of the {rhs}.
- Node* rhs_map = LoadMap(rhs);
-
// Check if {rhs} is a HeapNumber.
- Branch(IsHeapNumberMap(rhs_map), &call_with_oddball_feedback,
+ Branch(IsHeapNumber(rhs), &call_with_oddball_feedback,
&check_rhsisoddball);
}
@@ -189,122 +198,105 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
BIND(&call_add_stub);
{
- Callable callable = CodeFactory::Add(isolate());
- var_result.Bind(CallStub(callable, context, lhs, rhs));
+ var_result.Bind(CallBuiltin(Builtins::kAdd, context, lhs, rhs));
Goto(&end);
}
BIND(&end);
- UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_id);
+ UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_id, function);
return var_result.value();
}
-Node* BinaryOpAssembler::Generate_SubtractWithFeedback(Node* context, Node* lhs,
- Node* rhs, Node* slot_id,
- Node* feedback_vector) {
- // Shared entry for floating point subtraction.
- Label do_fsub(this), end(this), call_subtract_stub(this),
- if_lhsisnotnumber(this), check_rhsisoddball(this),
- call_with_any_feedback(this);
- VARIABLE(var_fsub_lhs, MachineRepresentation::kFloat64);
- VARIABLE(var_fsub_rhs, MachineRepresentation::kFloat64);
+Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
+ Node* context, Node* lhs, Node* rhs, Node* slot_id, Node* feedback_vector,
+ Node* function, const SmiOperation& smiOperation,
+ const FloatOperation& floatOperation, Token::Value opcode,
+ bool rhs_is_smi) {
+ Label do_float_operation(this), end(this), call_stub(this),
+ check_rhsisoddball(this, Label::kDeferred), call_with_any_feedback(this),
+ if_lhsisnotnumber(this, Label::kDeferred);
+ VARIABLE(var_float_lhs, MachineRepresentation::kFloat64);
+ VARIABLE(var_float_rhs, MachineRepresentation::kFloat64);
VARIABLE(var_type_feedback, MachineRepresentation::kTaggedSigned);
VARIABLE(var_result, MachineRepresentation::kTagged);
- // Check if the {lhs} is a Smi or a HeapObject.
- Label if_lhsissmi(this), if_lhsisnotsmi(this);
- Branch(TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+ Label if_lhsissmi(this);
+ // If rhs is known to be an Smi (in the SubSmi, MulSmi, DivSmi, ModSmi
+ // bytecode handlers) we want to fast path Smi operation. For the normal
+ // operation, we want to fast path both Smi and Number operations, so this
+ // path should not be marked as Deferred.
+ Label if_lhsisnotsmi(this,
+ rhs_is_smi ? Label::kDeferred : Label::kNonDeferred);
+ Branch(TaggedIsNotSmi(lhs), &if_lhsisnotsmi, &if_lhsissmi);
+ // Check if the {lhs} is a Smi or a HeapObject.
BIND(&if_lhsissmi);
{
- // Check if the {rhs} is also a Smi.
- Label if_rhsissmi(this), if_rhsisnotsmi(this);
- Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
-
- BIND(&if_rhsissmi);
- {
- // Try a fast Smi subtraction first.
- Node* pair = IntPtrSubWithOverflow(BitcastTaggedToWord(lhs),
- BitcastTaggedToWord(rhs));
- Node* overflow = Projection(1, pair);
-
- // Check if the Smi subtraction overflowed.
- Label if_overflow(this), if_notoverflow(this);
- Branch(overflow, &if_overflow, &if_notoverflow);
-
- BIND(&if_overflow);
+ Comment("lhs is Smi");
+ if (!rhs_is_smi) {
+ // Check if the {rhs} is also a Smi.
+ Label if_rhsissmi(this), if_rhsisnotsmi(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+ BIND(&if_rhsisnotsmi);
{
- // lhs, rhs - smi and result - number. combined - number.
- // The result doesn't fit into Smi range.
- var_fsub_lhs.Bind(SmiToFloat64(lhs));
- var_fsub_rhs.Bind(SmiToFloat64(rhs));
- Goto(&do_fsub);
+ // Check if {rhs} is a HeapNumber.
+ GotoIfNot(IsHeapNumber(rhs), &check_rhsisoddball);
+
+ // Perform a floating point operation.
+ var_float_lhs.Bind(SmiToFloat64(lhs));
+ var_float_rhs.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_float_operation);
}
- BIND(&if_notoverflow);
- // lhs, rhs, result smi. combined - smi.
- var_type_feedback.Bind(
- SmiConstant(BinaryOperationFeedback::kSignedSmall));
- var_result.Bind(BitcastWordToTaggedSigned(Projection(0, pair)));
- Goto(&end);
+ BIND(&if_rhsissmi);
}
- BIND(&if_rhsisnotsmi);
{
- // Load the map of the {rhs}.
- Node* rhs_map = LoadMap(rhs);
-
- // Check if {rhs} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(rhs_map), &check_rhsisoddball);
-
- // Perform a floating point subtraction.
- var_fsub_lhs.Bind(SmiToFloat64(lhs));
- var_fsub_rhs.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fsub);
+ Comment("perform smi operation");
+ var_result.Bind(smiOperation(lhs, rhs, &var_type_feedback));
+ Goto(&end);
}
}
BIND(&if_lhsisnotsmi);
{
- // Load the map of the {lhs}.
- Node* lhs_map = LoadMap(lhs);
-
+ Comment("lhs is not Smi");
// Check if the {lhs} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(lhs_map), &if_lhsisnotnumber);
+ GotoIfNot(IsHeapNumber(lhs), &if_lhsisnotnumber);
- // Check if the {rhs} is a Smi.
- Label if_rhsissmi(this), if_rhsisnotsmi(this);
- Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+ if (!rhs_is_smi) {
+ // Check if the {rhs} is a Smi.
+ Label if_rhsissmi(this), if_rhsisnotsmi(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
- BIND(&if_rhsissmi);
- {
- // Perform a floating point subtraction.
- var_fsub_lhs.Bind(LoadHeapNumberValue(lhs));
- var_fsub_rhs.Bind(SmiToFloat64(rhs));
- Goto(&do_fsub);
- }
+ BIND(&if_rhsisnotsmi);
+ {
+ // Check if the {rhs} is a HeapNumber.
+ GotoIfNot(IsHeapNumber(rhs), &check_rhsisoddball);
- BIND(&if_rhsisnotsmi);
- {
- // Load the map of the {rhs}.
- Node* rhs_map = LoadMap(rhs);
+ // Perform a floating point operation.
+ var_float_lhs.Bind(LoadHeapNumberValue(lhs));
+ var_float_rhs.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_float_operation);
+ }
- // Check if the {rhs} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(rhs_map), &check_rhsisoddball);
+ BIND(&if_rhsissmi);
+ }
+ {
// Perform a floating point subtraction.
- var_fsub_lhs.Bind(LoadHeapNumberValue(lhs));
- var_fsub_rhs.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fsub);
+ var_float_lhs.Bind(LoadHeapNumberValue(lhs));
+ var_float_rhs.Bind(SmiToFloat64(rhs));
+ Goto(&do_float_operation);
}
}
- BIND(&do_fsub);
+ BIND(&do_float_operation);
{
var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber));
- Node* lhs_value = var_fsub_lhs.value();
- Node* rhs_value = var_fsub_rhs.value();
- Node* value = Float64Sub(lhs_value, rhs_value);
+ Node* lhs_value = var_float_lhs.value();
+ Node* rhs_value = var_float_rhs.value();
+ Node* value = floatOperation(lhs_value, rhs_value);
var_result.Bind(AllocateHeapNumberWithValue(value));
Goto(&end);
}
@@ -325,20 +317,17 @@ Node* BinaryOpAssembler::Generate_SubtractWithFeedback(Node* context, Node* lhs,
{
var_type_feedback.Bind(
SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
- Goto(&call_subtract_stub);
+ Goto(&call_stub);
}
BIND(&if_rhsisnotsmi);
{
- // Load the map of the {rhs}.
- Node* rhs_map = LoadMap(rhs);
-
// Check if {rhs} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(rhs_map), &check_rhsisoddball);
+ GotoIfNot(IsHeapNumber(rhs), &check_rhsisoddball);
var_type_feedback.Bind(
SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
- Goto(&call_subtract_stub);
+ Goto(&call_stub);
}
}
@@ -353,474 +342,160 @@ Node* BinaryOpAssembler::Generate_SubtractWithFeedback(Node* context, Node* lhs,
var_type_feedback.Bind(
SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
- Goto(&call_subtract_stub);
+ Goto(&call_stub);
}
BIND(&call_with_any_feedback);
{
var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny));
- Goto(&call_subtract_stub);
- }
-
- BIND(&call_subtract_stub);
- {
- Callable callable = CodeFactory::Subtract(isolate());
- var_result.Bind(CallStub(callable, context, lhs, rhs));
- Goto(&end);
- }
-
- BIND(&end);
- UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_id);
- return var_result.value();
-}
-
-Node* BinaryOpAssembler::Generate_MultiplyWithFeedback(Node* context, Node* lhs,
- Node* rhs, Node* slot_id,
- Node* feedback_vector) {
- // Shared entry point for floating point multiplication.
- Label do_fmul(this), if_lhsisnotnumber(this, Label::kDeferred),
- check_rhsisoddball(this, Label::kDeferred),
- call_with_oddball_feedback(this), call_with_any_feedback(this),
- call_multiply_stub(this), end(this);
- VARIABLE(var_lhs_float64, MachineRepresentation::kFloat64);
- VARIABLE(var_rhs_float64, MachineRepresentation::kFloat64);
- VARIABLE(var_result, MachineRepresentation::kTagged);
- VARIABLE(var_type_feedback, MachineRepresentation::kTaggedSigned);
-
- Label lhs_is_smi(this), lhs_is_not_smi(this);
- Branch(TaggedIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
-
- BIND(&lhs_is_smi);
- {
- Label rhs_is_smi(this), rhs_is_not_smi(this);
- Branch(TaggedIsSmi(rhs), &rhs_is_smi, &rhs_is_not_smi);
-
- BIND(&rhs_is_smi);
- {
- // Both {lhs} and {rhs} are Smis. The result is not necessarily a smi,
- // in case of overflow.
- var_result.Bind(SmiMul(lhs, rhs));
- var_type_feedback.Bind(
- SelectSmiConstant(TaggedIsSmi(var_result.value()),
- BinaryOperationFeedback::kSignedSmall,
- BinaryOperationFeedback::kNumber));
- Goto(&end);
- }
-
- BIND(&rhs_is_not_smi);
- {
- Node* rhs_map = LoadMap(rhs);
-
- // Check if {rhs} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(rhs_map), &check_rhsisoddball);
-
- // Convert {lhs} to a double and multiply it with the value of {rhs}.
- var_lhs_float64.Bind(SmiToFloat64(lhs));
- var_rhs_float64.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fmul);
+ Goto(&call_stub);
+ }
+
+ BIND(&call_stub);
+ {
+ Node* result;
+ switch (opcode) {
+ case Token::SUB:
+ result = CallBuiltin(Builtins::kSubtract, context, lhs, rhs);
+ break;
+ case Token::MUL:
+ result = CallBuiltin(Builtins::kMultiply, context, lhs, rhs);
+ break;
+ case Token::DIV:
+ result = CallBuiltin(Builtins::kDivide, context, lhs, rhs);
+ break;
+ case Token::MOD:
+ result = CallBuiltin(Builtins::kModulus, context, lhs, rhs);
+ break;
+ default:
+ UNREACHABLE();
}
- }
-
- BIND(&lhs_is_not_smi);
- {
- Node* lhs_map = LoadMap(lhs);
-
- // Check if {lhs} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(lhs_map), &if_lhsisnotnumber);
-
- // Check if {rhs} is a Smi.
- Label rhs_is_smi(this), rhs_is_not_smi(this);
- Branch(TaggedIsSmi(rhs), &rhs_is_smi, &rhs_is_not_smi);
-
- BIND(&rhs_is_smi);
- {
- // Convert {rhs} to a double and multiply it with the value of {lhs}.
- var_lhs_float64.Bind(LoadHeapNumberValue(lhs));
- var_rhs_float64.Bind(SmiToFloat64(rhs));
- Goto(&do_fmul);
- }
-
- BIND(&rhs_is_not_smi);
- {
- Node* rhs_map = LoadMap(rhs);
-
- // Check if {rhs} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(rhs_map), &check_rhsisoddball);
-
- // Both {lhs} and {rhs} are HeapNumbers. Load their values and
- // multiply them.
- var_lhs_float64.Bind(LoadHeapNumberValue(lhs));
- var_rhs_float64.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fmul);
- }
- }
-
- BIND(&do_fmul);
- {
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber));
- Node* value = Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
- Node* result = AllocateHeapNumberWithValue(value);
var_result.Bind(result);
Goto(&end);
}
- BIND(&if_lhsisnotnumber);
- {
- // No checks on rhs are done yet. We just know lhs is not a number or Smi.
- // Check if lhs is an oddball.
- Node* lhs_instance_type = LoadInstanceType(lhs);
- Node* lhs_is_oddball =
- Word32Equal(lhs_instance_type, Int32Constant(ODDBALL_TYPE));
- GotoIfNot(lhs_is_oddball, &call_with_any_feedback);
-
- GotoIf(TaggedIsSmi(rhs), &call_with_oddball_feedback);
-
- // Load the map of the {rhs}.
- Node* rhs_map = LoadMap(rhs);
-
- // Check if {rhs} is a HeapNumber.
- Branch(IsHeapNumberMap(rhs_map), &call_with_oddball_feedback,
- &check_rhsisoddball);
- }
-
- BIND(&check_rhsisoddball);
- {
- // Check if rhs is an oddball. At this point we know lhs is either a
- // Smi or number or oddball and rhs is not a number or Smi.
- Node* rhs_instance_type = LoadInstanceType(rhs);
- Node* rhs_is_oddball =
- Word32Equal(rhs_instance_type, Int32Constant(ODDBALL_TYPE));
- Branch(rhs_is_oddball, &call_with_oddball_feedback,
- &call_with_any_feedback);
- }
-
- BIND(&call_with_oddball_feedback);
- {
- var_type_feedback.Bind(
- SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
- Goto(&call_multiply_stub);
- }
-
- BIND(&call_with_any_feedback);
- {
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny));
- Goto(&call_multiply_stub);
- }
-
- BIND(&call_multiply_stub);
- {
- Callable callable = CodeFactory::Multiply(isolate());
- var_result.Bind(CallStub(callable, context, lhs, rhs));
- Goto(&end);
- }
-
BIND(&end);
- UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_id);
+ UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_id, function);
return var_result.value();
}
-Node* BinaryOpAssembler::Generate_DivideWithFeedback(Node* context,
- Node* dividend,
- Node* divisor,
- Node* slot_id,
- Node* feedback_vector) {
- // Shared entry point for floating point division.
- Label do_fdiv(this), dividend_is_not_number(this, Label::kDeferred),
- check_divisor_for_oddball(this, Label::kDeferred),
- call_with_oddball_feedback(this), call_with_any_feedback(this),
- call_divide_stub(this), end(this);
- VARIABLE(var_dividend_float64, MachineRepresentation::kFloat64);
- VARIABLE(var_divisor_float64, MachineRepresentation::kFloat64);
- VARIABLE(var_result, MachineRepresentation::kTagged);
- VARIABLE(var_type_feedback, MachineRepresentation::kTaggedSigned);
-
- Label dividend_is_smi(this), dividend_is_not_smi(this);
- Branch(TaggedIsSmi(dividend), &dividend_is_smi, &dividend_is_not_smi);
-
- BIND(&dividend_is_smi);
- {
- Label divisor_is_smi(this), divisor_is_not_smi(this);
- Branch(TaggedIsSmi(divisor), &divisor_is_smi, &divisor_is_not_smi);
-
- BIND(&divisor_is_smi);
+Node* BinaryOpAssembler::Generate_SubtractWithFeedback(Node* context, Node* lhs,
+ Node* rhs, Node* slot_id,
+ Node* feedback_vector,
+ Node* function,
+ bool rhs_is_smi) {
+ auto smiFunction = [=](Node* lhs, Node* rhs, Variable* var_type_feedback) {
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+ // Try a fast Smi subtraction first.
+ Node* pair = IntPtrSubWithOverflow(BitcastTaggedToWord(lhs),
+ BitcastTaggedToWord(rhs));
+ Node* overflow = Projection(1, pair);
+
+ // Check if the Smi subtraction overflowed.
+ Label if_notoverflow(this), end(this);
+ // If rhs is known to be an Smi (for SubSmi) we want to fast path Smi
+ // operation. For the normal Sub operation, we want to fast path both
+ // Smi and Number operations, so this path should not be marked as Deferred.
+ Label if_overflow(this,
+ rhs_is_smi ? Label::kDeferred : Label::kNonDeferred);
+ Branch(overflow, &if_overflow, &if_notoverflow);
+
+ BIND(&if_notoverflow);
{
- Label bailout(this);
-
- // Try to perform Smi division if possible.
- var_result.Bind(TrySmiDiv(dividend, divisor, &bailout));
- var_type_feedback.Bind(
+ var_type_feedback->Bind(
SmiConstant(BinaryOperationFeedback::kSignedSmall));
+ var_result.Bind(BitcastWordToTaggedSigned(Projection(0, pair)));
Goto(&end);
-
- // Bailout: convert {dividend} and {divisor} to double and do double
- // division.
- BIND(&bailout);
- {
- var_dividend_float64.Bind(SmiToFloat64(dividend));
- var_divisor_float64.Bind(SmiToFloat64(divisor));
- Goto(&do_fdiv);
- }
}
- BIND(&divisor_is_not_smi);
+ BIND(&if_overflow);
{
- Node* divisor_map = LoadMap(divisor);
-
- // Check if {divisor} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(divisor_map), &check_divisor_for_oddball);
-
- // Convert {dividend} to a double and divide it with the value of
- // {divisor}.
- var_dividend_float64.Bind(SmiToFloat64(dividend));
- var_divisor_float64.Bind(LoadHeapNumberValue(divisor));
- Goto(&do_fdiv);
- }
-
- BIND(&dividend_is_not_smi);
- {
- Node* dividend_map = LoadMap(dividend);
-
- // Check if {dividend} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(dividend_map), &dividend_is_not_number);
-
- // Check if {divisor} is a Smi.
- Label divisor_is_smi(this), divisor_is_not_smi(this);
- Branch(TaggedIsSmi(divisor), &divisor_is_smi, &divisor_is_not_smi);
-
- BIND(&divisor_is_smi);
- {
- // Convert {divisor} to a double and use it for a floating point
- // division.
- var_dividend_float64.Bind(LoadHeapNumberValue(dividend));
- var_divisor_float64.Bind(SmiToFloat64(divisor));
- Goto(&do_fdiv);
- }
-
- BIND(&divisor_is_not_smi);
- {
- Node* divisor_map = LoadMap(divisor);
-
- // Check if {divisor} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(divisor_map), &check_divisor_for_oddball);
-
- // Both {dividend} and {divisor} are HeapNumbers. Load their values
- // and divide them.
- var_dividend_float64.Bind(LoadHeapNumberValue(dividend));
- var_divisor_float64.Bind(LoadHeapNumberValue(divisor));
- Goto(&do_fdiv);
- }
+ var_type_feedback->Bind(SmiConstant(BinaryOperationFeedback::kNumber));
+ Node* value = Float64Sub(SmiToFloat64(lhs), SmiToFloat64(rhs));
+ var_result.Bind(AllocateHeapNumberWithValue(value));
+ Goto(&end);
}
- }
-
- BIND(&do_fdiv);
- {
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber));
- Node* value =
- Float64Div(var_dividend_float64.value(), var_divisor_float64.value());
- var_result.Bind(AllocateHeapNumberWithValue(value));
- Goto(&end);
- }
-
- BIND(&dividend_is_not_number);
- {
- // We just know dividend is not a number or Smi. No checks on divisor yet.
- // Check if dividend is an oddball.
- Node* dividend_instance_type = LoadInstanceType(dividend);
- Node* dividend_is_oddball =
- Word32Equal(dividend_instance_type, Int32Constant(ODDBALL_TYPE));
- GotoIfNot(dividend_is_oddball, &call_with_any_feedback);
-
- GotoIf(TaggedIsSmi(divisor), &call_with_oddball_feedback);
-
- // Load the map of the {divisor}.
- Node* divisor_map = LoadMap(divisor);
-
- // Check if {divisor} is a HeapNumber.
- Branch(IsHeapNumberMap(divisor_map), &call_with_oddball_feedback,
- &check_divisor_for_oddball);
- }
-
- BIND(&check_divisor_for_oddball);
- {
- // Check if divisor is an oddball. At this point we know dividend is either
- // a Smi or number or oddball and divisor is not a number or Smi.
- Node* divisor_instance_type = LoadInstanceType(divisor);
- Node* divisor_is_oddball =
- Word32Equal(divisor_instance_type, Int32Constant(ODDBALL_TYPE));
- Branch(divisor_is_oddball, &call_with_oddball_feedback,
- &call_with_any_feedback);
- }
-
- BIND(&call_with_oddball_feedback);
- {
- var_type_feedback.Bind(
- SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
- Goto(&call_divide_stub);
- }
- BIND(&call_with_any_feedback);
- {
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny));
- Goto(&call_divide_stub);
- }
-
- BIND(&call_divide_stub);
- {
- Callable callable = CodeFactory::Divide(isolate());
- var_result.Bind(CallStub(callable, context, dividend, divisor));
- Goto(&end);
- }
-
- BIND(&end);
- UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_id);
- return var_result.value();
+ BIND(&end);
+ return var_result.value();
+ };
+ auto floatFunction = [=](Node* lhs, Node* rhs) {
+ return Float64Sub(lhs, rhs);
+ };
+ return Generate_BinaryOperationWithFeedback(
+ context, lhs, rhs, slot_id, feedback_vector, function, smiFunction,
+ floatFunction, Token::SUB, rhs_is_smi);
}
-Node* BinaryOpAssembler::Generate_ModulusWithFeedback(Node* context,
- Node* dividend,
- Node* divisor,
- Node* slot_id,
- Node* feedback_vector) {
- // Shared entry point for floating point division.
- Label do_fmod(this), dividend_is_not_number(this, Label::kDeferred),
- check_divisor_for_oddball(this, Label::kDeferred),
- call_with_oddball_feedback(this), call_with_any_feedback(this),
- call_modulus_stub(this), end(this);
- VARIABLE(var_dividend_float64, MachineRepresentation::kFloat64);
- VARIABLE(var_divisor_float64, MachineRepresentation::kFloat64);
- VARIABLE(var_result, MachineRepresentation::kTagged);
- VARIABLE(var_type_feedback, MachineRepresentation::kTaggedSigned);
-
- Label dividend_is_smi(this), dividend_is_not_smi(this);
- Branch(TaggedIsSmi(dividend), &dividend_is_smi, &dividend_is_not_smi);
+Node* BinaryOpAssembler::Generate_MultiplyWithFeedback(Node* context, Node* lhs,
+ Node* rhs, Node* slot_id,
+ Node* feedback_vector,
+ Node* function,
+ bool rhs_is_smi) {
+ auto smiFunction = [=](Node* lhs, Node* rhs, Variable* var_type_feedback) {
+ Node* result = SmiMul(lhs, rhs);
+ var_type_feedback->Bind(SelectSmiConstant(
+ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
+ BinaryOperationFeedback::kNumber));
+ return result;
+ };
+ auto floatFunction = [=](Node* lhs, Node* rhs) {
+ return Float64Mul(lhs, rhs);
+ };
+ return Generate_BinaryOperationWithFeedback(
+ context, lhs, rhs, slot_id, feedback_vector, function, smiFunction,
+ floatFunction, Token::MUL, rhs_is_smi);
+}
- BIND(&dividend_is_smi);
- {
- Label divisor_is_smi(this), divisor_is_not_smi(this);
- Branch(TaggedIsSmi(divisor), &divisor_is_smi, &divisor_is_not_smi);
+Node* BinaryOpAssembler::Generate_DivideWithFeedback(
+ Node* context, Node* dividend, Node* divisor, Node* slot_id,
+ Node* feedback_vector, Node* function, bool rhs_is_smi) {
+ auto smiFunction = [=](Node* lhs, Node* rhs, Variable* var_type_feedback) {
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+ // If rhs is known to be an Smi (for DivSmi) we want to fast path Smi
+ // operation. For the normal Div operation, we want to fast path both
+ // Smi and Number operations, so this path should not be marked as Deferred.
+ Label bailout(this, rhs_is_smi ? Label::kDeferred : Label::kNonDeferred),
+ end(this);
+ var_result.Bind(TrySmiDiv(lhs, rhs, &bailout));
+ var_type_feedback->Bind(SmiConstant(BinaryOperationFeedback::kSignedSmall));
+ Goto(&end);
- BIND(&divisor_is_smi);
+ BIND(&bailout);
{
- var_result.Bind(SmiMod(dividend, divisor));
- var_type_feedback.Bind(
- SelectSmiConstant(TaggedIsSmi(var_result.value()),
- BinaryOperationFeedback::kSignedSmall,
- BinaryOperationFeedback::kNumber));
+ var_type_feedback->Bind(SmiConstant(BinaryOperationFeedback::kNumber));
+ Node* value = Float64Div(SmiToFloat64(lhs), SmiToFloat64(rhs));
+ var_result.Bind(AllocateHeapNumberWithValue(value));
Goto(&end);
}
- BIND(&divisor_is_not_smi);
- {
- Node* divisor_map = LoadMap(divisor);
-
- // Check if {divisor} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(divisor_map), &check_divisor_for_oddball);
-
- // Convert {dividend} to a double and divide it with the value of
- // {divisor}.
- var_dividend_float64.Bind(SmiToFloat64(dividend));
- var_divisor_float64.Bind(LoadHeapNumberValue(divisor));
- Goto(&do_fmod);
- }
- }
-
- BIND(&dividend_is_not_smi);
- {
- Node* dividend_map = LoadMap(dividend);
-
- // Check if {dividend} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(dividend_map), &dividend_is_not_number);
-
- // Check if {divisor} is a Smi.
- Label divisor_is_smi(this), divisor_is_not_smi(this);
- Branch(TaggedIsSmi(divisor), &divisor_is_smi, &divisor_is_not_smi);
-
- BIND(&divisor_is_smi);
- {
- // Convert {divisor} to a double and use it for a floating point
- // division.
- var_dividend_float64.Bind(LoadHeapNumberValue(dividend));
- var_divisor_float64.Bind(SmiToFloat64(divisor));
- Goto(&do_fmod);
- }
-
- BIND(&divisor_is_not_smi);
- {
- Node* divisor_map = LoadMap(divisor);
-
- // Check if {divisor} is a HeapNumber.
- GotoIfNot(IsHeapNumberMap(divisor_map), &check_divisor_for_oddball);
-
- // Both {dividend} and {divisor} are HeapNumbers. Load their values
- // and divide them.
- var_dividend_float64.Bind(LoadHeapNumberValue(dividend));
- var_divisor_float64.Bind(LoadHeapNumberValue(divisor));
- Goto(&do_fmod);
- }
- }
-
- BIND(&do_fmod);
- {
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber));
- Node* value =
- Float64Mod(var_dividend_float64.value(), var_divisor_float64.value());
- var_result.Bind(AllocateHeapNumberWithValue(value));
- Goto(&end);
- }
-
- BIND(&dividend_is_not_number);
- {
- // No checks on divisor yet. We just know dividend is not a number or Smi.
- // Check if dividend is an oddball.
- Node* dividend_instance_type = LoadInstanceType(dividend);
- Node* dividend_is_oddball =
- Word32Equal(dividend_instance_type, Int32Constant(ODDBALL_TYPE));
- GotoIfNot(dividend_is_oddball, &call_with_any_feedback);
-
- GotoIf(TaggedIsSmi(divisor), &call_with_oddball_feedback);
-
- // Load the map of the {divisor}.
- Node* divisor_map = LoadMap(divisor);
-
- // Check if {divisor} is a HeapNumber.
- Branch(IsHeapNumberMap(divisor_map), &call_with_oddball_feedback,
- &check_divisor_for_oddball);
- }
-
- BIND(&check_divisor_for_oddball);
- {
- // Check if divisor is an oddball. At this point we know dividend is either
- // a Smi or number or oddball and divisor is not a number or Smi.
- Node* divisor_instance_type = LoadInstanceType(divisor);
- Node* divisor_is_oddball =
- Word32Equal(divisor_instance_type, Int32Constant(ODDBALL_TYPE));
- Branch(divisor_is_oddball, &call_with_oddball_feedback,
- &call_with_any_feedback);
- }
-
- BIND(&call_with_oddball_feedback);
- {
- var_type_feedback.Bind(
- SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
- Goto(&call_modulus_stub);
- }
-
- BIND(&call_with_any_feedback);
- {
- var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny));
- Goto(&call_modulus_stub);
- }
-
- BIND(&call_modulus_stub);
- {
- Callable callable = CodeFactory::Modulus(isolate());
- var_result.Bind(CallStub(callable, context, dividend, divisor));
- Goto(&end);
- }
+ BIND(&end);
+ return var_result.value();
+ };
+ auto floatFunction = [=](Node* lhs, Node* rhs) {
+ return Float64Div(lhs, rhs);
+ };
+ return Generate_BinaryOperationWithFeedback(
+ context, dividend, divisor, slot_id, feedback_vector, function,
+ smiFunction, floatFunction, Token::DIV, rhs_is_smi);
+}
- BIND(&end);
- UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_id);
- return var_result.value();
+Node* BinaryOpAssembler::Generate_ModulusWithFeedback(
+ Node* context, Node* dividend, Node* divisor, Node* slot_id,
+ Node* feedback_vector, Node* function, bool rhs_is_smi) {
+ auto smiFunction = [=](Node* lhs, Node* rhs, Variable* var_type_feedback) {
+ Node* result = SmiMod(lhs, rhs);
+ var_type_feedback->Bind(SelectSmiConstant(
+ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
+ BinaryOperationFeedback::kNumber));
+ return result;
+ };
+ auto floatFunction = [=](Node* lhs, Node* rhs) {
+ return Float64Mod(lhs, rhs);
+ };
+ return Generate_BinaryOperationWithFeedback(
+ context, dividend, divisor, slot_id, feedback_vector, function,
+ smiFunction, floatFunction, Token::MOD, rhs_is_smi);
}
} // namespace internal
diff --git a/deps/v8/src/ic/binary-op-assembler.h b/deps/v8/src/ic/binary-op-assembler.h
index 849dfc29dc..bb37298447 100644
--- a/deps/v8/src/ic/binary-op-assembler.h
+++ b/deps/v8/src/ic/binary-op-assembler.h
@@ -5,6 +5,7 @@
#ifndef V8_SRC_IC_BINARY_OP_ASSEMBLER_H_
#define V8_SRC_IC_BINARY_OP_ASSEMBLER_H_
+#include <functional>
#include "src/code-stub-assembler.h"
namespace v8 {
@@ -22,21 +23,36 @@ class BinaryOpAssembler : public CodeStubAssembler {
: CodeStubAssembler(state) {}
Node* Generate_AddWithFeedback(Node* context, Node* lhs, Node* rhs,
- Node* slot_id, Node* feedback_vector);
+ Node* slot_id, Node* feedback_vector,
+ Node* function, bool rhs_is_smi);
Node* Generate_SubtractWithFeedback(Node* context, Node* lhs, Node* rhs,
- Node* slot_id, Node* feedback_vector);
+ Node* slot_id, Node* feedback_vector,
+ Node* function, bool rhs_is_smi);
Node* Generate_MultiplyWithFeedback(Node* context, Node* lhs, Node* rhs,
- Node* slot_id, Node* feedback_vector);
+ Node* slot_id, Node* feedback_vector,
+ Node* function, bool rhs_is_smi);
Node* Generate_DivideWithFeedback(Node* context, Node* dividend,
Node* divisor, Node* slot_id,
- Node* feedback_vector);
+ Node* feedback_vector, Node* function,
+ bool rhs_is_smi);
Node* Generate_ModulusWithFeedback(Node* context, Node* dividend,
Node* divisor, Node* slot_id,
- Node* feedback_vector);
+ Node* feedback_vector, Node* function,
+ bool rhs_is_smi);
+
+ private:
+ typedef std::function<Node*(Node*, Node*, Variable*)> SmiOperation;
+ typedef std::function<Node*(Node*, Node*)> FloatOperation;
+
+ Node* Generate_BinaryOperationWithFeedback(
+ Node* context, Node* lhs, Node* rhs, Node* slot_id, Node* feedback_vector,
+ Node* function, const SmiOperation& smiOperation,
+ const FloatOperation& floatOperation, Token::Value opcode,
+ bool rhs_is_smi);
};
} // namespace internal
diff --git a/deps/v8/src/ic/call-optimization.cc b/deps/v8/src/ic/call-optimization.cc
index 6780ac4ca4..975f789596 100644
--- a/deps/v8/src/ic/call-optimization.cc
+++ b/deps/v8/src/ic/call-optimization.cc
@@ -20,10 +20,8 @@ CallOptimization::CallOptimization(Handle<Object> function) {
}
}
-
Handle<JSObject> CallOptimization::LookupHolderOfExpectedType(
- Handle<Map> object_map, HolderLookup* holder_lookup,
- int* holder_depth_in_prototype_chain) const {
+ Handle<Map> object_map, HolderLookup* holder_lookup) const {
DCHECK(is_simple_api_call());
if (!object_map->IsJSObjectMap()) {
*holder_lookup = kHolderNotFound;
@@ -34,15 +32,11 @@ Handle<JSObject> CallOptimization::LookupHolderOfExpectedType(
*holder_lookup = kHolderIsReceiver;
return Handle<JSObject>::null();
}
- for (int depth = 1; true; depth++) {
- if (!object_map->has_hidden_prototype()) break;
+ if (object_map->has_hidden_prototype()) {
Handle<JSObject> prototype(JSObject::cast(object_map->prototype()));
object_map = handle(prototype->map());
if (expected_receiver_type_->IsTemplateFor(*object_map)) {
*holder_lookup = kHolderFound;
- if (holder_depth_in_prototype_chain != NULL) {
- *holder_depth_in_prototype_chain = depth;
- }
return prototype;
}
}
@@ -84,7 +78,6 @@ bool CallOptimization::IsCompatibleReceiverMap(Handle<Map> map,
break;
}
UNREACHABLE();
- return false;
}
void CallOptimization::Initialize(
diff --git a/deps/v8/src/ic/call-optimization.h b/deps/v8/src/ic/call-optimization.h
index efabd3387c..8ca8cde112 100644
--- a/deps/v8/src/ic/call-optimization.h
+++ b/deps/v8/src/ic/call-optimization.h
@@ -38,8 +38,7 @@ class CallOptimization BASE_EMBEDDED {
enum HolderLookup { kHolderNotFound, kHolderIsReceiver, kHolderFound };
Handle<JSObject> LookupHolderOfExpectedType(
- Handle<Map> receiver_map, HolderLookup* holder_lookup,
- int* holder_depth_in_prototype_chain = NULL) const;
+ Handle<Map> receiver_map, HolderLookup* holder_lookup) const;
// Check if the api holder is between the receiver and the holder.
bool IsCompatibleReceiver(Handle<Object> receiver,
diff --git a/deps/v8/src/ic/handler-compiler.cc b/deps/v8/src/ic/handler-compiler.cc
index 69ba39768a..b4aff8ec55 100644
--- a/deps/v8/src/ic/handler-compiler.cc
+++ b/deps/v8/src/ic/handler-compiler.cc
@@ -30,7 +30,7 @@ Handle<Code> PropertyHandlerCompiler::GetCode(Code::Kind kind,
// Create code object in the heap.
CodeDesc desc;
- masm()->GetCode(&desc);
+ masm()->GetCode(isolate(), &desc);
Handle<Code> code = factory()->NewCode(desc, flags, masm()->CodeObject());
if (code->IsCodeStubOrIC()) code->set_stub_key(CodeStub::NoCacheKey());
#ifdef ENABLE_DISASSEMBLER
diff --git a/deps/v8/src/ic/handler-compiler.h b/deps/v8/src/ic/handler-compiler.h
index 4eb1b464c3..788e4bc0ed 100644
--- a/deps/v8/src/ic/handler-compiler.h
+++ b/deps/v8/src/ic/handler-compiler.h
@@ -27,7 +27,6 @@ class PropertyHandlerCompiler : public PropertyAccessCompiler {
virtual Register FrontendHeader(Register object_reg, Handle<Name> name,
Label* miss) {
UNREACHABLE();
- return receiver();
}
virtual void FrontendFooter(Handle<Name> name, Label* miss) { UNREACHABLE(); }
diff --git a/deps/v8/src/ic/handler-configuration-inl.h b/deps/v8/src/ic/handler-configuration-inl.h
index 5f31d15d46..6c75b76ac7 100644
--- a/deps/v8/src/ic/handler-configuration-inl.h
+++ b/deps/v8/src/ic/handler-configuration-inl.h
@@ -122,7 +122,6 @@ Handle<Smi> StoreHandler::StoreField(Isolate* isolate, Kind kind,
break;
default:
UNREACHABLE();
- return Handle<Smi>::null();
}
DCHECK(kind == kStoreField || kind == kTransitionToField ||
diff --git a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
index 2cfa49b15b..e65f2ea8ff 100644
--- a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
@@ -84,7 +84,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
// Load properties array.
Register properties = scratch0;
- __ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
+ __ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOrHashOffset));
// Check that the properties array is a dictionary.
__ cmp(FieldOperand(properties, HeapObject::kMapOffset),
@@ -139,9 +139,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
// Put holder in place.
CallOptimization::HolderLookup holder_lookup;
- int holder_depth = 0;
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup,
- &holder_depth);
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
switch (holder_lookup) {
case CallOptimization::kHolderIsReceiver:
__ Move(holder, receiver);
@@ -149,10 +147,6 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
case CallOptimization::kHolderFound:
__ mov(holder, FieldOperand(receiver, HeapObject::kMapOffset));
__ mov(holder, FieldOperand(holder, Map::kPrototypeOffset));
- for (int i = 1; i < holder_depth; i++) {
- __ mov(holder, FieldOperand(holder, HeapObject::kMapOffset));
- __ mov(holder, FieldOperand(holder, Map::kPrototypeOffset));
- }
break;
case CallOptimization::kHolderNotFound:
UNREACHABLE();
diff --git a/deps/v8/src/ic/ia32/ic-ia32.cc b/deps/v8/src/ic/ia32/ic-ia32.cc
index c4b4cdcc2b..2d2017d595 100644
--- a/deps/v8/src/ic/ia32/ic-ia32.cc
+++ b/deps/v8/src/ic/ia32/ic-ia32.cc
@@ -27,7 +27,6 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
return greater_equal;
default:
UNREACHABLE();
- return no_condition;
}
}
diff --git a/deps/v8/src/ic/ic-inl.h b/deps/v8/src/ic/ic-inl.h
index 8ac3bd99da..fb86528ff8 100644
--- a/deps/v8/src/ic/ic-inl.h
+++ b/deps/v8/src/ic/ic-inl.h
@@ -58,9 +58,8 @@ void IC::SetTargetAtAddress(Address address, Code* target,
Address constant_pool) {
if (AddressIsDeoptimizedCode(target->GetIsolate(), address)) return;
- // Only these three old-style ICs still do code patching.
- DCHECK(target->is_binary_op_stub() || target->is_compare_ic_stub() ||
- target->is_to_boolean_ic_stub());
+ // Only one old-style ICs still does code patching.
+ DCHECK(target->is_compare_ic_stub());
Heap* heap = target->GetHeap();
Code* old_target = GetTargetAtAddress(address, constant_pool);
diff --git a/deps/v8/src/ic/ic-state.cc b/deps/v8/src/ic/ic-state.cc
index a217b115fd..74a59d8f25 100644
--- a/deps/v8/src/ic/ic-state.cc
+++ b/deps/v8/src/ic/ic-state.cc
@@ -4,7 +4,6 @@
#include "src/ic/ic-state.h"
-#include "src/ast/ast-types.h"
#include "src/feedback-vector.h"
#include "src/ic/ic.h"
#include "src/objects-inl.h"
@@ -19,339 +18,6 @@ void ICUtility::Clear(Isolate* isolate, Address address,
}
-// static
-STATIC_CONST_MEMBER_DEFINITION const int BinaryOpICState::FIRST_TOKEN;
-
-
-// static
-STATIC_CONST_MEMBER_DEFINITION const int BinaryOpICState::LAST_TOKEN;
-
-
-BinaryOpICState::BinaryOpICState(Isolate* isolate, ExtraICState extra_ic_state)
- : fixed_right_arg_(
- HasFixedRightArgField::decode(extra_ic_state)
- ? Just(1 << FixedRightArgValueField::decode(extra_ic_state))
- : Nothing<int>()),
- isolate_(isolate) {
- op_ =
- static_cast<Token::Value>(FIRST_TOKEN + OpField::decode(extra_ic_state));
- left_kind_ = LeftKindField::decode(extra_ic_state);
- right_kind_ = fixed_right_arg_.IsJust()
- ? (Smi::IsValid(fixed_right_arg_.FromJust()) ? SMI : INT32)
- : RightKindField::decode(extra_ic_state);
- result_kind_ = ResultKindField::decode(extra_ic_state);
- DCHECK_LE(FIRST_TOKEN, op_);
- DCHECK_LE(op_, LAST_TOKEN);
-}
-
-
-ExtraICState BinaryOpICState::GetExtraICState() const {
- ExtraICState extra_ic_state =
- OpField::encode(op_ - FIRST_TOKEN) | LeftKindField::encode(left_kind_) |
- ResultKindField::encode(result_kind_) |
- HasFixedRightArgField::encode(fixed_right_arg_.IsJust());
- if (fixed_right_arg_.IsJust()) {
- extra_ic_state = FixedRightArgValueField::update(
- extra_ic_state, WhichPowerOf2(fixed_right_arg_.FromJust()));
- } else {
- extra_ic_state = RightKindField::update(extra_ic_state, right_kind_);
- }
- return extra_ic_state;
-}
-
-std::string BinaryOpICState::ToString() const {
- std::string ret = "(";
- ret += Token::Name(op_);
- if (CouldCreateAllocationMementos()) ret += "_CreateAllocationMementos";
- ret += ":";
- ret += BinaryOpICState::KindToString(left_kind_);
- ret += "*";
- if (fixed_right_arg_.IsJust()) {
- ret += fixed_right_arg_.FromJust();
- } else {
- ret += BinaryOpICState::KindToString(right_kind_);
- }
- ret += "->";
- ret += BinaryOpICState::KindToString(result_kind_);
- ret += ")";
- return ret;
-}
-
-// static
-void BinaryOpICState::GenerateAheadOfTime(
- Isolate* isolate, void (*Generate)(Isolate*, const BinaryOpICState&)) {
-// TODO(olivf) We should investigate why adding stubs to the snapshot is so
-// expensive at runtime. When solved we should be able to add most binops to
-// the snapshot instead of hand-picking them.
-// Generated list of commonly used stubs
-#define GENERATE(op, left_kind, right_kind, result_kind) \
- do { \
- BinaryOpICState state(isolate, op); \
- state.left_kind_ = left_kind; \
- state.fixed_right_arg_ = Nothing<int>(); \
- state.right_kind_ = right_kind; \
- state.result_kind_ = result_kind; \
- Generate(isolate, state); \
- } while (false)
- GENERATE(Token::ADD, INT32, INT32, INT32);
- GENERATE(Token::ADD, INT32, INT32, NUMBER);
- GENERATE(Token::ADD, INT32, NUMBER, NUMBER);
- GENERATE(Token::ADD, INT32, SMI, INT32);
- GENERATE(Token::ADD, NUMBER, INT32, NUMBER);
- GENERATE(Token::ADD, NUMBER, NUMBER, NUMBER);
- GENERATE(Token::ADD, NUMBER, SMI, NUMBER);
- GENERATE(Token::ADD, SMI, INT32, INT32);
- GENERATE(Token::ADD, SMI, INT32, NUMBER);
- GENERATE(Token::ADD, SMI, NUMBER, NUMBER);
- GENERATE(Token::ADD, SMI, SMI, INT32);
- GENERATE(Token::ADD, SMI, SMI, SMI);
- GENERATE(Token::BIT_AND, INT32, INT32, INT32);
- GENERATE(Token::BIT_AND, INT32, INT32, SMI);
- GENERATE(Token::BIT_AND, INT32, SMI, INT32);
- GENERATE(Token::BIT_AND, INT32, SMI, SMI);
- GENERATE(Token::BIT_AND, NUMBER, INT32, INT32);
- GENERATE(Token::BIT_AND, NUMBER, SMI, SMI);
- GENERATE(Token::BIT_AND, SMI, INT32, INT32);
- GENERATE(Token::BIT_AND, SMI, INT32, SMI);
- GENERATE(Token::BIT_AND, SMI, NUMBER, SMI);
- GENERATE(Token::BIT_AND, SMI, SMI, SMI);
- GENERATE(Token::BIT_OR, INT32, INT32, INT32);
- GENERATE(Token::BIT_OR, INT32, INT32, SMI);
- GENERATE(Token::BIT_OR, INT32, SMI, INT32);
- GENERATE(Token::BIT_OR, INT32, SMI, SMI);
- GENERATE(Token::BIT_OR, NUMBER, SMI, INT32);
- GENERATE(Token::BIT_OR, NUMBER, SMI, SMI);
- GENERATE(Token::BIT_OR, SMI, INT32, INT32);
- GENERATE(Token::BIT_OR, SMI, INT32, SMI);
- GENERATE(Token::BIT_OR, SMI, SMI, SMI);
- GENERATE(Token::BIT_XOR, INT32, INT32, INT32);
- GENERATE(Token::BIT_XOR, INT32, INT32, SMI);
- GENERATE(Token::BIT_XOR, INT32, NUMBER, SMI);
- GENERATE(Token::BIT_XOR, INT32, SMI, INT32);
- GENERATE(Token::BIT_XOR, NUMBER, INT32, INT32);
- GENERATE(Token::BIT_XOR, NUMBER, SMI, INT32);
- GENERATE(Token::BIT_XOR, NUMBER, SMI, SMI);
- GENERATE(Token::BIT_XOR, SMI, INT32, INT32);
- GENERATE(Token::BIT_XOR, SMI, INT32, SMI);
- GENERATE(Token::BIT_XOR, SMI, SMI, SMI);
- GENERATE(Token::DIV, INT32, INT32, INT32);
- GENERATE(Token::DIV, INT32, INT32, NUMBER);
- GENERATE(Token::DIV, INT32, NUMBER, NUMBER);
- GENERATE(Token::DIV, INT32, SMI, INT32);
- GENERATE(Token::DIV, INT32, SMI, NUMBER);
- GENERATE(Token::DIV, NUMBER, INT32, NUMBER);
- GENERATE(Token::DIV, NUMBER, NUMBER, NUMBER);
- GENERATE(Token::DIV, NUMBER, SMI, NUMBER);
- GENERATE(Token::DIV, SMI, INT32, INT32);
- GENERATE(Token::DIV, SMI, INT32, NUMBER);
- GENERATE(Token::DIV, SMI, NUMBER, NUMBER);
- GENERATE(Token::DIV, SMI, SMI, NUMBER);
- GENERATE(Token::DIV, SMI, SMI, SMI);
- GENERATE(Token::MOD, NUMBER, SMI, NUMBER);
- GENERATE(Token::MOD, SMI, SMI, SMI);
- GENERATE(Token::MUL, INT32, INT32, INT32);
- GENERATE(Token::MUL, INT32, INT32, NUMBER);
- GENERATE(Token::MUL, INT32, NUMBER, NUMBER);
- GENERATE(Token::MUL, INT32, SMI, INT32);
- GENERATE(Token::MUL, INT32, SMI, NUMBER);
- GENERATE(Token::MUL, NUMBER, INT32, NUMBER);
- GENERATE(Token::MUL, NUMBER, NUMBER, NUMBER);
- GENERATE(Token::MUL, NUMBER, SMI, NUMBER);
- GENERATE(Token::MUL, SMI, INT32, INT32);
- GENERATE(Token::MUL, SMI, INT32, NUMBER);
- GENERATE(Token::MUL, SMI, NUMBER, NUMBER);
- GENERATE(Token::MUL, SMI, SMI, INT32);
- GENERATE(Token::MUL, SMI, SMI, NUMBER);
- GENERATE(Token::MUL, SMI, SMI, SMI);
- GENERATE(Token::SAR, INT32, SMI, INT32);
- GENERATE(Token::SAR, INT32, SMI, SMI);
- GENERATE(Token::SAR, NUMBER, SMI, SMI);
- GENERATE(Token::SAR, SMI, SMI, SMI);
- GENERATE(Token::SHL, INT32, SMI, INT32);
- GENERATE(Token::SHL, INT32, SMI, SMI);
- GENERATE(Token::SHL, NUMBER, SMI, SMI);
- GENERATE(Token::SHL, SMI, SMI, INT32);
- GENERATE(Token::SHL, SMI, SMI, SMI);
- GENERATE(Token::SHR, INT32, SMI, SMI);
- GENERATE(Token::SHR, NUMBER, SMI, INT32);
- GENERATE(Token::SHR, NUMBER, SMI, SMI);
- GENERATE(Token::SHR, SMI, SMI, SMI);
- GENERATE(Token::SUB, INT32, INT32, INT32);
- GENERATE(Token::SUB, INT32, NUMBER, NUMBER);
- GENERATE(Token::SUB, INT32, SMI, INT32);
- GENERATE(Token::SUB, NUMBER, INT32, NUMBER);
- GENERATE(Token::SUB, NUMBER, NUMBER, NUMBER);
- GENERATE(Token::SUB, NUMBER, SMI, NUMBER);
- GENERATE(Token::SUB, SMI, INT32, INT32);
- GENERATE(Token::SUB, SMI, NUMBER, NUMBER);
- GENERATE(Token::SUB, SMI, SMI, SMI);
-#undef GENERATE
-#define GENERATE(op, left_kind, fixed_right_arg_value, result_kind) \
- do { \
- BinaryOpICState state(isolate, op); \
- state.left_kind_ = left_kind; \
- state.fixed_right_arg_ = Just(fixed_right_arg_value); \
- state.right_kind_ = SMI; \
- state.result_kind_ = result_kind; \
- Generate(isolate, state); \
- } while (false)
- GENERATE(Token::MOD, SMI, 2, SMI);
- GENERATE(Token::MOD, SMI, 4, SMI);
- GENERATE(Token::MOD, SMI, 8, SMI);
- GENERATE(Token::MOD, SMI, 16, SMI);
- GENERATE(Token::MOD, SMI, 32, SMI);
- GENERATE(Token::MOD, SMI, 2048, SMI);
-#undef GENERATE
-}
-
-AstType* BinaryOpICState::GetResultType() const {
- Kind result_kind = result_kind_;
- if (HasSideEffects()) {
- result_kind = NONE;
- } else if (result_kind == GENERIC && op_ == Token::ADD) {
- return AstType::NumberOrString();
- } else if (result_kind == NUMBER && op_ == Token::SHR) {
- return AstType::Unsigned32();
- }
- DCHECK_NE(GENERIC, result_kind);
- return KindToType(result_kind);
-}
-
-
-std::ostream& operator<<(std::ostream& os, const BinaryOpICState& s) {
- os << "(" << Token::Name(s.op_);
- if (s.CouldCreateAllocationMementos()) os << "_CreateAllocationMementos";
- os << ":" << BinaryOpICState::KindToString(s.left_kind_) << "*";
- if (s.fixed_right_arg_.IsJust()) {
- os << s.fixed_right_arg_.FromJust();
- } else {
- os << BinaryOpICState::KindToString(s.right_kind_);
- }
- return os << "->" << BinaryOpICState::KindToString(s.result_kind_) << ")";
-}
-
-
-void BinaryOpICState::Update(Handle<Object> left, Handle<Object> right,
- Handle<Object> result) {
- ExtraICState old_extra_ic_state = GetExtraICState();
-
- left_kind_ = UpdateKind(left, left_kind_);
- right_kind_ = UpdateKind(right, right_kind_);
-
- int32_t fixed_right_arg_value = 0;
- bool has_fixed_right_arg =
- op_ == Token::MOD && right->ToInt32(&fixed_right_arg_value) &&
- fixed_right_arg_value > 0 &&
- base::bits::IsPowerOfTwo32(fixed_right_arg_value) &&
- FixedRightArgValueField::is_valid(WhichPowerOf2(fixed_right_arg_value)) &&
- (left_kind_ == SMI || left_kind_ == INT32) &&
- (result_kind_ == NONE || !fixed_right_arg_.IsJust());
- fixed_right_arg_ =
- has_fixed_right_arg ? Just(fixed_right_arg_value) : Nothing<int32_t>();
- result_kind_ = UpdateKind(result, result_kind_);
-
- if (!Token::IsTruncatingBinaryOp(op_)) {
- Kind input_kind = Max(left_kind_, right_kind_);
- if (result_kind_ < input_kind && input_kind <= NUMBER) {
- result_kind_ = input_kind;
- }
- }
-
- // We don't want to distinguish INT32 and NUMBER for string add (because
- // NumberToString can't make use of this anyway).
- if (left_kind_ == STRING && right_kind_ == INT32) {
- DCHECK_EQ(STRING, result_kind_);
- DCHECK_EQ(Token::ADD, op_);
- right_kind_ = NUMBER;
- } else if (right_kind_ == STRING && left_kind_ == INT32) {
- DCHECK_EQ(STRING, result_kind_);
- DCHECK_EQ(Token::ADD, op_);
- left_kind_ = NUMBER;
- }
-
- if (old_extra_ic_state == GetExtraICState()) {
- // Tagged operations can lead to non-truncating HChanges
- if (left->IsOddball()) {
- left_kind_ = GENERIC;
- } else {
- DCHECK(right->IsOddball());
- right_kind_ = GENERIC;
- }
- }
-}
-
-
-BinaryOpICState::Kind BinaryOpICState::UpdateKind(Handle<Object> object,
- Kind kind) const {
- Kind new_kind = GENERIC;
- bool is_truncating = Token::IsTruncatingBinaryOp(op());
- if (object->IsOddball() && is_truncating) {
- // Oddballs will be automatically truncated by HChange.
- new_kind = INT32;
- } else if (object->IsUndefined(isolate_)) {
- // Undefined will be automatically truncated by HChange.
- new_kind = is_truncating ? INT32 : NUMBER;
- } else if (object->IsSmi()) {
- new_kind = SMI;
- } else if (object->IsHeapNumber()) {
- double value = Handle<HeapNumber>::cast(object)->value();
- new_kind = IsInt32Double(value) ? INT32 : NUMBER;
- } else if (object->IsString() && op() == Token::ADD) {
- new_kind = STRING;
- }
- if (new_kind == INT32 && SmiValuesAre32Bits()) {
- new_kind = NUMBER;
- }
- if (kind != NONE && ((new_kind <= NUMBER && kind > NUMBER) ||
- (new_kind > NUMBER && kind <= NUMBER))) {
- new_kind = GENERIC;
- }
- return Max(kind, new_kind);
-}
-
-
-// static
-const char* BinaryOpICState::KindToString(Kind kind) {
- switch (kind) {
- case NONE:
- return "None";
- case SMI:
- return "Smi";
- case INT32:
- return "Int32";
- case NUMBER:
- return "Number";
- case STRING:
- return "String";
- case GENERIC:
- return "Generic";
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-// static
-AstType* BinaryOpICState::KindToType(Kind kind) {
- switch (kind) {
- case NONE:
- return AstType::None();
- case SMI:
- return AstType::SignedSmall();
- case INT32:
- return AstType::Signed32();
- case NUMBER:
- return AstType::Number();
- case STRING:
- return AstType::String();
- case GENERIC:
- return AstType::Any();
- }
- UNREACHABLE();
- return NULL;
-}
-
-
const char* CompareICState::GetStateName(State state) {
switch (state) {
case UNINITIALIZED:
@@ -376,34 +42,6 @@ const char* CompareICState::GetStateName(State state) {
return "GENERIC";
}
UNREACHABLE();
- return NULL;
-}
-
-AstType* CompareICState::StateToType(Zone* zone, State state, Handle<Map> map) {
- switch (state) {
- case UNINITIALIZED:
- return AstType::None();
- case BOOLEAN:
- return AstType::Boolean();
- case SMI:
- return AstType::SignedSmall();
- case NUMBER:
- return AstType::Number();
- case STRING:
- return AstType::String();
- case INTERNALIZED_STRING:
- return AstType::InternalizedString();
- case UNIQUE_NAME:
- return AstType::UniqueName();
- case RECEIVER:
- return AstType::Receiver();
- case KNOWN_RECEIVER:
- return map.is_null() ? AstType::Receiver() : AstType::Class(map, zone);
- case GENERIC:
- return AstType::Any();
- }
- UNREACHABLE();
- return NULL;
}
@@ -522,7 +160,6 @@ CompareICState::State CompareICState::TargetState(
return GENERIC;
}
UNREACHABLE();
- return GENERIC; // Make the compiler happy.
}
} // namespace internal
diff --git a/deps/v8/src/ic/ic-state.h b/deps/v8/src/ic/ic-state.h
index 16651c5623..7a7c7578e5 100644
--- a/deps/v8/src/ic/ic-state.h
+++ b/deps/v8/src/ic/ic-state.h
@@ -11,8 +11,6 @@
namespace v8 {
namespace internal {
-class AstType;
-
const int kMaxKeyedPolymorphism = 4;
@@ -23,130 +21,6 @@ class ICUtility : public AllStatic {
};
-class BinaryOpICState final BASE_EMBEDDED {
- public:
- BinaryOpICState(Isolate* isolate, ExtraICState extra_ic_state);
- BinaryOpICState(Isolate* isolate, Token::Value op)
- : op_(op),
- left_kind_(NONE),
- right_kind_(NONE),
- result_kind_(NONE),
- fixed_right_arg_(Nothing<int>()),
- isolate_(isolate) {
- DCHECK_LE(FIRST_TOKEN, op);
- DCHECK_LE(op, LAST_TOKEN);
- }
-
- InlineCacheState GetICState() const {
- if (Max(left_kind_, right_kind_) == NONE) {
- return ::v8::internal::UNINITIALIZED;
- }
- if (Max(left_kind_, right_kind_) == GENERIC) {
- return ::v8::internal::MEGAMORPHIC;
- }
- if (Min(left_kind_, right_kind_) == GENERIC) {
- return ::v8::internal::GENERIC;
- }
- return ::v8::internal::MONOMORPHIC;
- }
-
- ExtraICState GetExtraICState() const;
- std::string ToString() const;
-
- static void GenerateAheadOfTime(Isolate*,
- void (*Generate)(Isolate*,
- const BinaryOpICState&));
-
- // Returns true if the IC _could_ create allocation mementos.
- bool CouldCreateAllocationMementos() const {
- if (left_kind_ == STRING || right_kind_ == STRING) {
- DCHECK_EQ(Token::ADD, op_);
- return true;
- }
- return false;
- }
-
- // Returns true if the IC _should_ create allocation mementos.
- bool ShouldCreateAllocationMementos() const {
- return FLAG_allocation_site_pretenuring && CouldCreateAllocationMementos();
- }
-
- bool HasSideEffects() const {
- return Max(left_kind_, right_kind_) == GENERIC;
- }
-
- // Returns true if the IC should enable the inline smi code (i.e. if either
- // parameter may be a smi).
- bool UseInlinedSmiCode() const {
- return KindMaybeSmi(left_kind_) || KindMaybeSmi(right_kind_);
- }
-
- static const int FIRST_TOKEN = Token::BIT_OR;
- static const int LAST_TOKEN = Token::MOD;
-
- Token::Value op() const { return op_; }
- Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
-
- AstType* GetLeftType() const { return KindToType(left_kind_); }
- AstType* GetRightType() const { return KindToType(right_kind_); }
- AstType* GetResultType() const;
-
- void Update(Handle<Object> left, Handle<Object> right, Handle<Object> result);
-
- Isolate* isolate() const { return isolate_; }
-
- enum Kind { NONE, SMI, INT32, NUMBER, STRING, GENERIC };
- Kind kind() const {
- return KindGeneralize(KindGeneralize(left_kind_, right_kind_),
- result_kind_);
- }
-
- private:
- friend std::ostream& operator<<(std::ostream& os, const BinaryOpICState& s);
-
- Kind UpdateKind(Handle<Object> object, Kind kind) const;
-
- static const char* KindToString(Kind kind);
- static AstType* KindToType(Kind kind);
- static bool KindMaybeSmi(Kind kind) {
- return (kind >= SMI && kind <= NUMBER) || kind == GENERIC;
- }
- static bool KindLessGeneralThan(Kind kind1, Kind kind2) {
- if (kind1 == NONE) return true;
- if (kind1 == kind2) return true;
- if (kind2 == GENERIC) return true;
- if (kind2 == STRING) return false;
- return kind1 <= kind2;
- }
- static Kind KindGeneralize(Kind kind1, Kind kind2) {
- if (KindLessGeneralThan(kind1, kind2)) return kind2;
- if (KindLessGeneralThan(kind2, kind1)) return kind1;
- return GENERIC;
- }
-
- // We truncate the last bit of the token.
- STATIC_ASSERT(LAST_TOKEN - FIRST_TOKEN < (1 << 4));
- class OpField : public BitField<int, 0, 4> {};
- class ResultKindField : public BitField<Kind, 4, 3> {};
- class LeftKindField : public BitField<Kind, 7, 3> {};
- // When fixed right arg is set, we don't need to store the right kind.
- // Thus the two fields can overlap.
- class HasFixedRightArgField : public BitField<bool, 10, 1> {};
- class FixedRightArgValueField : public BitField<int, 11, 4> {};
- class RightKindField : public BitField<Kind, 11, 3> {};
-
- Token::Value op_;
- Kind left_kind_;
- Kind right_kind_;
- Kind result_kind_;
- Maybe<int> fixed_right_arg_;
- Isolate* isolate_;
-};
-
-
-std::ostream& operator<<(std::ostream& os, const BinaryOpICState& s);
-
-
class CompareICState {
public:
// The type/state lattice is defined by the following inequations:
@@ -169,9 +43,6 @@ class CompareICState {
GENERIC
};
- static AstType* StateToType(Zone* zone, State state,
- Handle<Map> map = Handle<Map>());
-
static State NewInputState(State old_state, Handle<Object> value);
static const char* GetStateName(CompareICState::State state);
diff --git a/deps/v8/src/ic/ic-stats.cc b/deps/v8/src/ic/ic-stats.cc
index de2529fcd9..c305209d48 100644
--- a/deps/v8/src/ic/ic-stats.cc
+++ b/deps/v8/src/ic/ic-stats.cc
@@ -17,21 +17,21 @@ base::LazyInstance<ICStats>::type ICStats::instance_ =
LAZY_INSTANCE_INITIALIZER;
ICStats::ICStats() : ic_infos_(MAX_IC_INFO), pos_(0) {
- base::NoBarrier_Store(&enabled_, 0);
+ base::Relaxed_Store(&enabled_, 0);
}
void ICStats::Begin() {
if (V8_LIKELY(!FLAG_ic_stats)) return;
- base::NoBarrier_Store(&enabled_, 1);
+ base::Relaxed_Store(&enabled_, 1);
}
void ICStats::End() {
- if (base::NoBarrier_Load(&enabled_) != 1) return;
+ if (base::Relaxed_Load(&enabled_) != 1) return;
++pos_;
if (pos_ == MAX_IC_INFO) {
Dump();
}
- base::NoBarrier_Store(&enabled_, 0);
+ base::Relaxed_Store(&enabled_, 0);
}
void ICStats::Reset() {
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index 5dca55ed3e..2684d0ba36 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -50,7 +50,6 @@ char IC::TransitionMarkFromState(IC::State state) {
return 'G';
}
UNREACHABLE();
- return 0;
}
@@ -190,9 +189,9 @@ IC::IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus)
Address* pc_address =
reinterpret_cast<Address*>(entry + ExitFrameConstants::kCallerPCOffset);
Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset);
- // If there's another JavaScript frame on the stack or a
- // StubFailureTrampoline, we need to look one frame further down the stack to
- // find the frame pointer and the return address stack slot.
+ // If there's another JavaScript frame on the stack we need to look one frame
+ // further down the stack to find the frame pointer and the return address
+ // stack slot.
if (depth == EXTRA_CALL_FRAME) {
if (FLAG_enable_embedded_constant_pool) {
constant_pool = reinterpret_cast<Address*>(
@@ -230,12 +229,8 @@ IC::IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus)
} else {
Code* target = this->target();
Code::Kind kind = target->kind();
- if (kind == Code::BINARY_OP_IC) {
- kind_ = FeedbackSlotKind::kBinaryOp;
- } else if (kind == Code::COMPARE_IC) {
+ if (kind == Code::COMPARE_IC) {
kind_ = FeedbackSlotKind::kCompareOp;
- } else if (kind == Code::TO_BOOLEAN_IC) {
- kind_ = FeedbackSlotKind::kToBoolean;
} else {
UNREACHABLE();
kind_ = FeedbackSlotKind::kInvalid;
@@ -262,22 +257,13 @@ bool IC::ShouldPushPopSlotAndVector(Code::Kind kind) {
InlineCacheState IC::StateFromCode(Code* code) {
Isolate* isolate = code->GetIsolate();
switch (code->kind()) {
- case Code::BINARY_OP_IC: {
- BinaryOpICState state(isolate, code->extra_ic_state());
- return state.GetICState();
- }
case Code::COMPARE_IC: {
CompareICStub stub(isolate, code->extra_ic_state());
return stub.GetICState();
}
- case Code::TO_BOOLEAN_IC: {
- ToBooleanICStub stub(isolate, code->extra_ic_state());
- return stub.GetICState();
- }
default:
if (code->is_debug_stub()) return UNINITIALIZED;
UNREACHABLE();
- return UNINITIALIZED;
}
}
@@ -428,23 +414,15 @@ static void ComputeTypeInfoCountDelta(IC::State old_state, IC::State new_state,
// static
void IC::OnFeedbackChanged(Isolate* isolate, JSFunction* host_function) {
- Code* host = host_function->shared()->code();
-
- if (host->kind() == Code::FUNCTION) {
- TypeFeedbackInfo* info = TypeFeedbackInfo::cast(host->type_feedback_info());
- info->change_own_type_change_checksum();
- host->set_profiler_ticks(0);
- } else if (host_function->IsInterpreted()) {
- if (FLAG_trace_opt_verbose) {
- if (host_function->shared()->profiler_ticks() != 0) {
- PrintF("[resetting ticks for ");
- host_function->PrintName();
- PrintF(" due from %d due to IC change]\n",
- host_function->shared()->profiler_ticks());
- }
+ if (FLAG_trace_opt_verbose) {
+ if (host_function->shared()->profiler_ticks() != 0) {
+ PrintF("[resetting ticks for ");
+ host_function->PrintName();
+ PrintF(" due from %d due to IC change]\n",
+ host_function->shared()->profiler_ticks());
}
- host_function->shared()->set_profiler_ticks(0);
}
+ host_function->shared()->set_profiler_ticks(0);
isolate->runtime_profiler()->NotifyICChanged();
// TODO(2029): When an optimized function is patched, it would
// be nice to propagate the corresponding type information to its
@@ -454,9 +432,7 @@ void IC::OnFeedbackChanged(Isolate* isolate, JSFunction* host_function) {
void IC::PostPatching(Address address, Code* target, Code* old_target) {
// Type vector based ICs update these statistics at a different time because
// they don't always patch on state change.
- DCHECK(target->kind() == Code::BINARY_OP_IC ||
- target->kind() == Code::COMPARE_IC ||
- target->kind() == Code::TO_BOOLEAN_IC);
+ DCHECK(target->kind() == Code::COMPARE_IC);
DCHECK(old_target->is_inline_cache_stub());
DCHECK(target->is_inline_cache_stub());
@@ -480,10 +456,14 @@ void IC::PostPatching(Address address, Code* target, Code* old_target) {
info->change_ic_with_type_info_count(polymorphic_delta);
info->change_ic_generic_count(generic_delta);
}
- TypeFeedbackInfo* info = TypeFeedbackInfo::cast(host->type_feedback_info());
- info->change_own_type_change_checksum();
}
- host->set_profiler_ticks(0);
+
+ // TODO(leszeks): Normally we would reset profiler ticks here -- but, we don't
+ // currently have access the the feedback vector from the IC. In practice,
+ // this is not an issue, as these ICs are only used by asm.js, which shouldn't
+ // have too many IC changes. This inconsistency should go away once these
+ // Crankshaft/hydrogen code stubs go away.
+
isolate->runtime_profiler()->NotifyICChanged();
// TODO(2029): When an optimized function is patched, it would
// be nice to propagate the corresponding type information to its
@@ -571,6 +551,10 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
PatchCache(name, slow_stub());
TRACE_IC("LoadIC", name);
}
+
+ if (*name == isolate()->heap()->iterator_symbol()) {
+ return Runtime::ThrowIteratorError(isolate(), object);
+ }
return TypeError(MessageTemplate::kNonObjectPropertyLoad, object, name);
}
@@ -882,8 +866,7 @@ Handle<WeakCell> HolderCell(Isolate* isolate, Handle<JSObject> holder,
GlobalDictionary* dict = global->global_dictionary();
int number = dict->FindEntry(name);
DCHECK_NE(NameDictionary::kNotFound, number);
- Handle<PropertyCell> cell(PropertyCell::cast(dict->ValueAt(number)),
- isolate);
+ Handle<PropertyCell> cell(dict->CellAt(number), isolate);
return isolate->factory()->NewWeakCell(cell);
}
return Map::GetOrCreatePrototypeWeakCell(holder, isolate);
@@ -1160,7 +1143,7 @@ Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
}
// When debugging we need to go the slow path to flood the accessor.
- if (GetHostFunction()->shared()->HasDebugInfo()) {
+ if (GetHostFunction()->shared()->HasBreakInfo()) {
TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
return slow_stub();
}
@@ -1296,7 +1279,7 @@ Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup) {
Handle<Object> accessors = lookup->GetAccessors();
DCHECK(accessors->IsAccessorPair());
DCHECK(holder->HasFastProperties());
- DCHECK(!GetHostFunction()->shared()->HasDebugInfo());
+ DCHECK(!GetHostFunction()->shared()->HasBreakInfo());
Handle<Object> getter(Handle<AccessorPair>::cast(accessors)->getter(),
isolate());
CallOptimization call_optimization(getter);
@@ -1316,7 +1299,7 @@ static Handle<Object> TryConvertKey(Handle<Object> key, Isolate* isolate) {
if (key->IsHeapNumber()) {
double value = Handle<HeapNumber>::cast(key)->value();
if (std::isnan(value)) {
- key = isolate->factory()->nan_string();
+ key = isolate->factory()->NaN_string();
} else {
int int_value = FastD2I(value);
if (value == int_value && Smi::IsValid(int_value)) {
@@ -1430,9 +1413,9 @@ Handle<Object> KeyedLoadIC::LoadElementHandler(Handle<Map> receiver_map) {
}
DCHECK(IsFastElementsKind(elements_kind) ||
IsFixedTypedArrayElementsKind(elements_kind));
- // TODO(jkummerow): Use IsHoleyElementsKind(elements_kind).
+ // TODO(jkummerow): Use IsHoleyOrDictionaryElementsKind(elements_kind).
bool convert_hole_to_undefined =
- is_js_array && elements_kind == FAST_HOLEY_ELEMENTS &&
+ is_js_array && elements_kind == HOLEY_ELEMENTS &&
*receiver_map == isolate()->get_initial_js_array_map(elements_kind);
TRACE_HANDLER_STATS(isolate(), KeyedLoadIC_LoadElementDH);
return LoadHandler::LoadElement(isolate(), elements_kind,
@@ -2045,8 +2028,9 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
List<Handle<Object>> handlers(static_cast<int>(target_receiver_maps.size()));
StoreElementPolymorphicHandlers(&target_receiver_maps, &handlers, store_mode);
- DCHECK_LE(1, target_receiver_maps.size());
- if (target_receiver_maps.size() == 1) {
+ if (target_receiver_maps.size() == 0) {
+ ConfigureVectorState(PREMONOMORPHIC, Handle<Name>());
+ } else if (target_receiver_maps.size() == 1) {
ConfigureVectorState(Handle<Name>(), target_receiver_maps[0],
handlers.at(0));
} else {
@@ -2060,16 +2044,16 @@ Handle<Map> KeyedStoreIC::ComputeTransitionedMap(
switch (store_mode) {
case STORE_TRANSITION_TO_OBJECT:
case STORE_AND_GROW_TRANSITION_TO_OBJECT: {
- ElementsKind kind = IsFastHoleyElementsKind(map->elements_kind())
- ? FAST_HOLEY_ELEMENTS
- : FAST_ELEMENTS;
+ ElementsKind kind = IsHoleyElementsKind(map->elements_kind())
+ ? HOLEY_ELEMENTS
+ : PACKED_ELEMENTS;
return Map::TransitionElementsTo(map, kind);
}
case STORE_TRANSITION_TO_DOUBLE:
case STORE_AND_GROW_TRANSITION_TO_DOUBLE: {
- ElementsKind kind = IsFastHoleyElementsKind(map->elements_kind())
- ? FAST_HOLEY_DOUBLE_ELEMENTS
- : FAST_DOUBLE_ELEMENTS;
+ ElementsKind kind = IsHoleyElementsKind(map->elements_kind())
+ ? HOLEY_DOUBLE_ELEMENTS
+ : PACKED_DOUBLE_ELEMENTS;
return Map::TransitionElementsTo(map, kind);
}
case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS:
@@ -2081,7 +2065,6 @@ Handle<Map> KeyedStoreIC::ComputeTransitionedMap(
return map;
}
UNREACHABLE();
- return MaybeHandle<Map>().ToHandleChecked();
}
Handle<Object> KeyedStoreIC::StoreElementHandler(
@@ -2205,14 +2188,14 @@ static KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver,
!receiver->WouldConvertToSlowElements(index);
if (allow_growth) {
// Handle growing array in stub if necessary.
- if (receiver->HasFastSmiElements()) {
+ if (receiver->HasSmiElements()) {
if (value->IsHeapNumber()) {
return STORE_AND_GROW_TRANSITION_TO_DOUBLE;
}
if (value->IsHeapObject()) {
return STORE_AND_GROW_TRANSITION_TO_OBJECT;
}
- } else if (receiver->HasFastDoubleElements()) {
+ } else if (receiver->HasDoubleElements()) {
if (!value->IsSmi() && !value->IsHeapNumber()) {
return STORE_AND_GROW_TRANSITION_TO_OBJECT;
}
@@ -2220,13 +2203,13 @@ static KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver,
return STORE_AND_GROW_NO_TRANSITION;
} else {
// Handle only in-bounds elements accesses.
- if (receiver->HasFastSmiElements()) {
+ if (receiver->HasSmiElements()) {
if (value->IsHeapNumber()) {
return STORE_TRANSITION_TO_DOUBLE;
} else if (value->IsHeapObject()) {
return STORE_TRANSITION_TO_OBJECT;
}
- } else if (receiver->HasFastDoubleElements()) {
+ } else if (receiver->HasDoubleElements()) {
if (!value->IsSmi() && !value->IsHeapNumber()) {
return STORE_TRANSITION_TO_OBJECT;
}
@@ -2282,6 +2265,10 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
return store_handle;
}
+ if (state() != UNINITIALIZED) {
+ JSObject::MakePrototypesFast(object, kStartAtPrototype, isolate());
+ }
+
bool use_ic = FLAG_use_ic && !object->IsStringWrapper() &&
!object->IsAccessCheckNeeded() && !object->IsJSGlobalProxy();
if (use_ic && !object->IsSmi()) {
@@ -2304,9 +2291,9 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
old_receiver_map = handle(receiver->map(), isolate());
is_arguments = receiver->IsJSArgumentsObject();
if (!is_arguments) {
- key_is_valid_index = key->IsSmi() && Smi::cast(*key)->value() >= 0;
+ key_is_valid_index = key->IsSmi() && Smi::ToInt(*key) >= 0;
if (key_is_valid_index) {
- uint32_t index = static_cast<uint32_t>(Smi::cast(*key)->value());
+ uint32_t index = static_cast<uint32_t>(Smi::ToInt(*key));
store_mode = GetStoreMode(receiver, index, value);
}
}
@@ -2560,158 +2547,6 @@ RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
}
-MaybeHandle<Object> BinaryOpIC::Transition(
- Handle<AllocationSite> allocation_site, Handle<Object> left,
- Handle<Object> right) {
- BinaryOpICState state(isolate(), extra_ic_state());
-
- // Compute the actual result using the builtin for the binary operation.
- Handle<Object> result;
- switch (state.op()) {
- default:
- UNREACHABLE();
- case Token::ADD:
- ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
- Object::Add(isolate(), left, right), Object);
- break;
- case Token::SUB:
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result, Object::Subtract(isolate(), left, right), Object);
- break;
- case Token::MUL:
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result, Object::Multiply(isolate(), left, right), Object);
- break;
- case Token::DIV:
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result, Object::Divide(isolate(), left, right), Object);
- break;
- case Token::MOD:
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result, Object::Modulus(isolate(), left, right), Object);
- break;
- case Token::BIT_OR:
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result, Object::BitwiseOr(isolate(), left, right), Object);
- break;
- case Token::BIT_AND:
- ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
- Object::BitwiseAnd(isolate(), left, right),
- Object);
- break;
- case Token::BIT_XOR:
- ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
- Object::BitwiseXor(isolate(), left, right),
- Object);
- break;
- case Token::SAR:
- ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
- Object::ShiftRight(isolate(), left, right),
- Object);
- break;
- case Token::SHR:
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result, Object::ShiftRightLogical(isolate(), left, right),
- Object);
- break;
- case Token::SHL:
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result, Object::ShiftLeft(isolate(), left, right), Object);
- break;
- }
-
- // Do not try to update the target if the code was marked for lazy
- // deoptimization. (Since we do not relocate addresses in these
- // code objects, an attempt to access the target could fail.)
- if (AddressIsDeoptimizedCode()) {
- return result;
- }
-
- // Compute the new state.
- BinaryOpICState old_state(isolate(), target()->extra_ic_state());
- state.Update(left, right, result);
-
- // Check if we have a string operation here.
- Handle<Code> new_target;
- if (!allocation_site.is_null() || state.ShouldCreateAllocationMementos()) {
- // Setup the allocation site on-demand.
- if (allocation_site.is_null()) {
- allocation_site = isolate()->factory()->NewAllocationSite();
- }
-
- // Install the stub with an allocation site.
- BinaryOpICWithAllocationSiteStub stub(isolate(), state);
- new_target = stub.GetCodeCopyFromTemplate(allocation_site);
-
- // Sanity check the trampoline stub.
- DCHECK_EQ(*allocation_site, new_target->FindFirstAllocationSite());
- } else {
- // Install the generic stub.
- BinaryOpICStub stub(isolate(), state);
- new_target = stub.GetCode();
-
- // Sanity check the generic stub.
- DCHECK_NULL(new_target->FindFirstAllocationSite());
- }
- set_target(*new_target);
-
- if (FLAG_ic_stats &
- v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING) {
- auto ic_stats = ICStats::instance();
- ic_stats->Begin();
- ICInfo& ic_info = ic_stats->Current();
- ic_info.type = "BinaryOpIC";
- ic_info.state = old_state.ToString();
- ic_info.state += " => ";
- ic_info.state += state.ToString();
- JavaScriptFrame::CollectTopFrameForICStats(isolate());
- ic_stats->End();
- } else if (FLAG_ic_stats) {
- int line;
- int column;
- Address pc = GetAbstractPC(&line, &column);
- LOG(isolate(),
- BinaryOpIC(pc, line, column, *new_target, old_state.ToString().c_str(),
- state.ToString().c_str(),
- allocation_site.is_null() ? nullptr : *allocation_site));
- }
-
- // Patch the inlined smi code as necessary.
- if (!old_state.UseInlinedSmiCode() && state.UseInlinedSmiCode()) {
- PatchInlinedSmiCode(isolate(), address(), ENABLE_INLINED_SMI_CHECK);
- } else if (old_state.UseInlinedSmiCode() && !state.UseInlinedSmiCode()) {
- PatchInlinedSmiCode(isolate(), address(), DISABLE_INLINED_SMI_CHECK);
- }
-
- return result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_BinaryOpIC_Miss) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- typedef BinaryOpDescriptor Descriptor;
- Handle<Object> left = args.at(Descriptor::kLeft);
- Handle<Object> right = args.at(Descriptor::kRight);
- BinaryOpIC ic(isolate);
- RETURN_RESULT_OR_FAILURE(
- isolate, ic.Transition(Handle<AllocationSite>::null(), left, right));
-}
-
-
-RUNTIME_FUNCTION(Runtime_BinaryOpIC_MissWithAllocationSite) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- typedef BinaryOpWithAllocationSiteDescriptor Descriptor;
- Handle<AllocationSite> allocation_site =
- args.at<AllocationSite>(Descriptor::kAllocationSite);
- Handle<Object> left = args.at(Descriptor::kLeft);
- Handle<Object> right = args.at(Descriptor::kRight);
- BinaryOpIC ic(isolate);
- RETURN_RESULT_OR_FAILURE(isolate,
- ic.Transition(allocation_site, left, right));
-}
-
Code* CompareIC::GetRawUninitialized(Isolate* isolate, Token::Value op) {
CompareICStub stub(isolate, op, CompareICState::UNINITIALIZED,
CompareICState::UNINITIALIZED,
@@ -2800,51 +2635,6 @@ RUNTIME_FUNCTION(Runtime_Unreachable) {
}
-Handle<Object> ToBooleanIC::ToBoolean(Handle<Object> object) {
- ToBooleanICStub stub(isolate(), extra_ic_state());
- ToBooleanHints old_hints = stub.hints();
- bool to_boolean_value = stub.UpdateStatus(object);
- ToBooleanHints new_hints = stub.hints();
- Handle<Code> code = stub.GetCode();
- set_target(*code);
-
- // Note: Although a no-op transition is semantically OK, it is hinting at a
- // bug somewhere in our state transition machinery.
- DCHECK_NE(old_hints, new_hints);
- if (V8_UNLIKELY(FLAG_ic_stats)) {
- if (FLAG_ic_stats &
- v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING) {
- auto ic_stats = ICStats::instance();
- ic_stats->Begin();
- ICInfo& ic_info = ic_stats->Current();
- ic_info.type = "ToBooleanIC";
- ic_info.state = ToString(old_hints);
- ic_info.state += "=>";
- ic_info.state += ToString(new_hints);
- ic_stats->End();
- } else {
- int line;
- int column;
- Address pc = GetAbstractPC(&line, &column);
- LOG(isolate(),
- ToBooleanIC(pc, line, column, *code, ToString(old_hints).c_str(),
- ToString(new_hints).c_str()));
- }
- }
-
- return isolate()->factory()->ToBoolean(to_boolean_value);
-}
-
-
-RUNTIME_FUNCTION(Runtime_ToBooleanIC_Miss) {
- DCHECK(args.length() == 1);
- HandleScope scope(isolate);
- Handle<Object> object = args.at(0);
- ToBooleanIC ic(isolate);
- return *ic.ToBoolean(object);
-}
-
-
RUNTIME_FUNCTION(Runtime_StoreCallbackProperty) {
Handle<JSObject> receiver = args.at<JSObject>(0);
Handle<JSObject> holder = args.at<JSObject>(1);
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index 9ea8905757..bb8dca540c 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -122,11 +122,9 @@ class IC {
Handle<Object> ComputeHandler(LookupIterator* lookup);
virtual Handle<Object> GetMapIndependentHandler(LookupIterator* lookup) {
UNREACHABLE();
- return Handle<Code>::null();
}
virtual Handle<Code> CompileHandler(LookupIterator* lookup) {
UNREACHABLE();
- return Handle<Code>::null();
}
void UpdateMonomorphicIC(Handle<Object> handler, Handle<Name> name);
@@ -423,17 +421,6 @@ class KeyedStoreIC : public StoreIC {
};
-// Type Recording BinaryOpIC, that records the types of the inputs and outputs.
-class BinaryOpIC : public IC {
- public:
- explicit BinaryOpIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) {}
-
- MaybeHandle<Object> Transition(Handle<AllocationSite> allocation_site,
- Handle<Object> left,
- Handle<Object> right) WARN_UNUSED_RESULT;
-};
-
-
class CompareIC : public IC {
public:
CompareIC(Isolate* isolate, Token::Value op)
@@ -461,16 +448,7 @@ class CompareIC : public IC {
friend class IC;
};
-
-class ToBooleanIC : public IC {
- public:
- explicit ToBooleanIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) {}
-
- Handle<Object> ToBoolean(Handle<Object> object);
-};
-
-
-// Helper for BinaryOpIC and CompareIC.
+// Helper for CompareIC.
enum InlinedSmiCheck { ENABLE_INLINED_SMI_CHECK, DISABLE_INLINED_SMI_CHECK };
void PatchInlinedSmiCode(Isolate* isolate, Address address,
InlinedSmiCheck check);
diff --git a/deps/v8/src/ic/keyed-store-generic.cc b/deps/v8/src/ic/keyed-store-generic.cc
index 19c7e47caa..79b7f83eaf 100644
--- a/deps/v8/src/ic/keyed-store-generic.cc
+++ b/deps/v8/src/ic/keyed-store-generic.cc
@@ -132,7 +132,7 @@ void KeyedStoreGenericAssembler::TryRewriteElements(
DCHECK(IsFastPackedElementsKind(from_kind));
ElementsKind holey_from_kind = GetHoleyElementsKind(from_kind);
ElementsKind holey_to_kind = GetHoleyElementsKind(to_kind);
- if (AllocationSite::GetMode(from_kind, to_kind) == TRACK_ALLOCATION_SITE) {
+ if (AllocationSite::ShouldTrack(from_kind, to_kind)) {
TrapAllocationMemento(receiver, bailout);
}
Label perform_transition(this), check_holey_map(this);
@@ -161,8 +161,7 @@ void KeyedStoreGenericAssembler::TryRewriteElements(
// Found a supported transition target map, perform the transition!
BIND(&perform_transition);
{
- if (IsFastDoubleElementsKind(from_kind) !=
- IsFastDoubleElementsKind(to_kind)) {
+ if (IsDoubleElementsKind(from_kind) != IsDoubleElementsKind(to_kind)) {
Node* capacity = SmiUntag(LoadFixedArrayBaseLength(elements));
GrowElementsCapacity(receiver, elements, from_kind, to_kind, capacity,
capacity, INTPTR_PARAMETERS, bailout);
@@ -178,8 +177,7 @@ void KeyedStoreGenericAssembler::TryChangeToHoleyMapHelper(
Node* packed_map =
LoadContextElement(native_context, Context::ArrayMapIndex(packed_kind));
GotoIf(WordNotEqual(receiver_map, packed_map), map_mismatch);
- if (AllocationSite::GetMode(packed_kind, holey_kind) ==
- TRACK_ALLOCATION_SITE) {
+ if (AllocationSite::ShouldTrack(packed_kind, holey_kind)) {
TrapAllocationMemento(receiver, bailout);
}
Node* holey_map =
@@ -263,7 +261,7 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
// FixedArray backing store -> Smi or object elements.
{
- Node* offset = ElementOffsetFromIndex(intptr_index, FAST_ELEMENTS,
+ Node* offset = ElementOffsetFromIndex(intptr_index, PACKED_ELEMENTS,
INTPTR_PARAMETERS, kHeaderSize);
// Check if we're about to overwrite the hole. We can safely do that
// only if there can be no setters on the prototype chain.
@@ -288,7 +286,7 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
// If we're about to introduce holes, ensure holey elements.
if (update_length == kBumpLengthWithGap) {
TryChangeToHoleyMapMulti(receiver, receiver_map, elements_kind, context,
- FAST_SMI_ELEMENTS, FAST_ELEMENTS, slow);
+ PACKED_SMI_ELEMENTS, PACKED_ELEMENTS, slow);
}
StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, offset,
value);
@@ -300,14 +298,14 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
// Check if we already have object elements; just do the store if so.
{
Label must_transition(this);
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
GotoIf(Int32LessThanOrEqual(elements_kind,
- Int32Constant(FAST_HOLEY_SMI_ELEMENTS)),
+ Int32Constant(HOLEY_SMI_ELEMENTS)),
&must_transition);
if (update_length == kBumpLengthWithGap) {
TryChangeToHoleyMap(receiver, receiver_map, elements_kind, context,
- FAST_ELEMENTS, slow);
+ PACKED_ELEMENTS, slow);
}
Store(elements, offset, value);
MaybeUpdateLengthAndReturn(receiver, intptr_index, value, update_length);
@@ -326,14 +324,15 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
// If we're adding holes at the end, always transition to a holey
// elements kind, otherwise try to remain packed.
ElementsKind target_kind = update_length == kBumpLengthWithGap
- ? FAST_HOLEY_DOUBLE_ELEMENTS
- : FAST_DOUBLE_ELEMENTS;
+ ? HOLEY_DOUBLE_ELEMENTS
+ : PACKED_DOUBLE_ELEMENTS;
TryRewriteElements(receiver, receiver_map, elements, native_context,
- FAST_SMI_ELEMENTS, target_kind, slow);
+ PACKED_SMI_ELEMENTS, target_kind, slow);
// Reload migrated elements.
Node* double_elements = LoadElements(receiver);
- Node* double_offset = ElementOffsetFromIndex(
- intptr_index, FAST_DOUBLE_ELEMENTS, INTPTR_PARAMETERS, kHeaderSize);
+ Node* double_offset =
+ ElementOffsetFromIndex(intptr_index, PACKED_DOUBLE_ELEMENTS,
+ INTPTR_PARAMETERS, kHeaderSize);
// Make sure we do not store signalling NaNs into double arrays.
Node* double_value = Float64SilenceNaN(LoadHeapNumberValue(value));
StoreNoWriteBarrier(MachineRepresentation::kFloat64, double_elements,
@@ -347,10 +346,10 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
// If we're adding holes at the end, always transition to a holey
// elements kind, otherwise try to remain packed.
ElementsKind target_kind = update_length == kBumpLengthWithGap
- ? FAST_HOLEY_ELEMENTS
- : FAST_ELEMENTS;
+ ? HOLEY_ELEMENTS
+ : PACKED_ELEMENTS;
TryRewriteElements(receiver, receiver_map, elements, native_context,
- FAST_SMI_ELEMENTS, target_kind, slow);
+ PACKED_SMI_ELEMENTS, target_kind, slow);
// The elements backing store didn't change, no reload necessary.
CSA_ASSERT(this, WordEqual(elements, LoadElements(receiver)));
Store(elements, offset, value);
@@ -366,7 +365,7 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
&check_cow_elements);
// FixedDoubleArray backing store -> double elements.
{
- Node* offset = ElementOffsetFromIndex(intptr_index, FAST_DOUBLE_ELEMENTS,
+ Node* offset = ElementOffsetFromIndex(intptr_index, PACKED_DOUBLE_ELEMENTS,
INTPTR_PARAMETERS, kHeaderSize);
// Check if we're about to overwrite the hole. We can safely do that
// only if there can be no setters on the prototype chain.
@@ -396,7 +395,7 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
// If we're about to introduce holes, ensure holey elements.
if (update_length == kBumpLengthWithGap) {
TryChangeToHoleyMap(receiver, receiver_map, elements_kind, context,
- FAST_DOUBLE_ELEMENTS, slow);
+ PACKED_DOUBLE_ELEMENTS, slow);
}
StoreNoWriteBarrier(MachineRepresentation::kFloat64, elements, offset,
double_value);
@@ -409,14 +408,14 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
{
Node* native_context = LoadNativeContext(context);
ElementsKind target_kind = update_length == kBumpLengthWithGap
- ? FAST_HOLEY_ELEMENTS
- : FAST_ELEMENTS;
+ ? HOLEY_ELEMENTS
+ : PACKED_ELEMENTS;
TryRewriteElements(receiver, receiver_map, elements, native_context,
- FAST_DOUBLE_ELEMENTS, target_kind, slow);
+ PACKED_DOUBLE_ELEMENTS, target_kind, slow);
// Reload migrated elements.
Node* fast_elements = LoadElements(receiver);
Node* fast_offset = ElementOffsetFromIndex(
- intptr_index, FAST_ELEMENTS, INTPTR_PARAMETERS, kHeaderSize);
+ intptr_index, PACKED_ELEMENTS, INTPTR_PARAMETERS, kHeaderSize);
Store(fast_elements, fast_offset, value);
MaybeUpdateLengthAndReturn(receiver, intptr_index, value, update_length);
}
@@ -488,7 +487,8 @@ void KeyedStoreGenericAssembler::EmitGenericElementStore(
Goto(slow);
}
- // Any ElementsKind > LAST_FAST_ELEMENTS_KIND jumps here for further dispatch.
+ // Any ElementsKind > LAST_FAST_ELEMENTS_KIND jumps here for further
+ // dispatch.
BIND(&if_nonfast);
{
STATIC_ASSERT(LAST_ELEMENTS_KIND == LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
@@ -866,8 +866,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
BIND(&not_callable);
{
if (language_mode == STRICT) {
- Node* message =
- SmiConstant(Smi::FromInt(MessageTemplate::kNoSetterInCallback));
+ Node* message = SmiConstant(MessageTemplate::kNoSetterInCallback);
TailCallRuntime(Runtime::kThrowTypeError, p->context, message, p->name,
var_accessor_holder.value());
} else {
@@ -880,8 +879,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
BIND(&readonly);
{
if (language_mode == STRICT) {
- Node* message =
- SmiConstant(Smi::FromInt(MessageTemplate::kStrictReadOnlyProperty));
+ Node* message = SmiConstant(MessageTemplate::kStrictReadOnlyProperty);
Node* type = Typeof(p->receiver);
TailCallRuntime(Runtime::kThrowTypeError, p->context, message, p->name,
type, p->receiver);
diff --git a/deps/v8/src/ic/mips/handler-compiler-mips.cc b/deps/v8/src/ic/mips/handler-compiler-mips.cc
index 9f0174f44d..0f92191e4a 100644
--- a/deps/v8/src/ic/mips/handler-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/handler-compiler-mips.cc
@@ -126,7 +126,8 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
// Load properties array.
Register properties = scratch0;
- __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ lw(properties,
+ FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
// Check that the properties array is a dictionary.
__ lw(map, FieldMemOperand(properties, HeapObject::kMapOffset));
Register tmp = properties;
@@ -134,8 +135,8 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ Branch(miss_label, ne, map, Operand(tmp));
// Restore the temporarily used register.
- __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
+ __ lw(properties,
+ FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
NameDictionaryLookupStub::GenerateNegativeLookup(
masm, miss_label, &done, receiver, properties, name, scratch1);
@@ -189,9 +190,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
// Put holder in place.
CallOptimization::HolderLookup holder_lookup;
- int holder_depth = 0;
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup,
- &holder_depth);
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
switch (holder_lookup) {
case CallOptimization::kHolderIsReceiver:
__ Move(holder, receiver);
@@ -199,10 +198,6 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
case CallOptimization::kHolderFound:
__ lw(holder, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ lw(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
- for (int i = 1; i < holder_depth; i++) {
- __ lw(holder, FieldMemOperand(holder, HeapObject::kMapOffset));
- __ lw(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
- }
break;
case CallOptimization::kHolderNotFound:
UNREACHABLE();
diff --git a/deps/v8/src/ic/mips/ic-mips.cc b/deps/v8/src/ic/mips/ic-mips.cc
index fd39972f0e..d299fb52e0 100644
--- a/deps/v8/src/ic/mips/ic-mips.cc
+++ b/deps/v8/src/ic/mips/ic-mips.cc
@@ -27,7 +27,6 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
return ge;
default:
UNREACHABLE();
- return kNoCondition;
}
}
diff --git a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
index 99638f5493..171ed2eee8 100644
--- a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
@@ -126,7 +126,8 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
// Load properties array.
Register properties = scratch0;
- __ Ld(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Ld(properties,
+ FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
// Check that the properties array is a dictionary.
__ Ld(map, FieldMemOperand(properties, HeapObject::kMapOffset));
Register tmp = properties;
@@ -134,7 +135,8 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ Branch(miss_label, ne, map, Operand(tmp));
// Restore the temporarily used register.
- __ Ld(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Ld(properties,
+ FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
NameDictionaryLookupStub::GenerateNegativeLookup(
masm, miss_label, &done, receiver, properties, name, scratch1);
@@ -188,9 +190,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
// Put holder in place.
CallOptimization::HolderLookup holder_lookup;
- int holder_depth = 0;
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup,
- &holder_depth);
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
switch (holder_lookup) {
case CallOptimization::kHolderIsReceiver:
__ Move(holder, receiver);
@@ -198,10 +198,6 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
case CallOptimization::kHolderFound:
__ Ld(holder, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ Ld(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
- for (int i = 1; i < holder_depth; i++) {
- __ Ld(holder, FieldMemOperand(holder, HeapObject::kMapOffset));
- __ Ld(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
- }
break;
case CallOptimization::kHolderNotFound:
UNREACHABLE();
diff --git a/deps/v8/src/ic/mips64/ic-mips64.cc b/deps/v8/src/ic/mips64/ic-mips64.cc
index 0e2032a41d..41cb2c6dbc 100644
--- a/deps/v8/src/ic/mips64/ic-mips64.cc
+++ b/deps/v8/src/ic/mips64/ic-mips64.cc
@@ -27,7 +27,6 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
return ge;
default:
UNREACHABLE();
- return kNoCondition;
}
}
diff --git a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
index 877e3996e0..333cae8d68 100644
--- a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
+++ b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
@@ -128,7 +128,8 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
// Load properties array.
Register properties = scratch0;
- __ LoadP(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ LoadP(properties,
+ FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
// Check that the properties array is a dictionary.
__ LoadP(map, FieldMemOperand(properties, HeapObject::kMapOffset));
Register tmp = properties;
@@ -137,8 +138,8 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ bne(miss_label);
// Restore the temporarily used register.
- __ LoadP(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
+ __ LoadP(properties,
+ FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
NameDictionaryLookupStub::GenerateNegativeLookup(
masm, miss_label, &done, receiver, properties, name, scratch1);
@@ -194,9 +195,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
// Put holder in place.
CallOptimization::HolderLookup holder_lookup;
- int holder_depth = 0;
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup,
- &holder_depth);
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
switch (holder_lookup) {
case CallOptimization::kHolderIsReceiver:
__ Move(holder, receiver);
@@ -204,10 +203,6 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
case CallOptimization::kHolderFound:
__ LoadP(holder, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ LoadP(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
- for (int i = 1; i < holder_depth; i++) {
- __ LoadP(holder, FieldMemOperand(holder, HeapObject::kMapOffset));
- __ LoadP(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
- }
break;
case CallOptimization::kHolderNotFound:
UNREACHABLE();
diff --git a/deps/v8/src/ic/ppc/ic-ppc.cc b/deps/v8/src/ic/ppc/ic-ppc.cc
index 0f25846870..14ad5c5b77 100644
--- a/deps/v8/src/ic/ppc/ic-ppc.cc
+++ b/deps/v8/src/ic/ppc/ic-ppc.cc
@@ -27,7 +27,6 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
return ge;
default:
UNREACHABLE();
- return kNoCondition;
}
}
diff --git a/deps/v8/src/ic/s390/handler-compiler-s390.cc b/deps/v8/src/ic/s390/handler-compiler-s390.cc
index 718b24d608..26730a9ad5 100644
--- a/deps/v8/src/ic/s390/handler-compiler-s390.cc
+++ b/deps/v8/src/ic/s390/handler-compiler-s390.cc
@@ -125,14 +125,16 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
// Load properties array.
Register properties = scratch0;
- __ LoadP(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ LoadP(properties,
+ FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
// Check that the properties array is a dictionary.
__ LoadP(map, FieldMemOperand(properties, HeapObject::kMapOffset));
__ CompareRoot(map, Heap::kHashTableMapRootIndex);
__ bne(miss_label);
// Restore the temporarily used register.
- __ LoadP(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ LoadP(properties,
+ FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
NameDictionaryLookupStub::GenerateNegativeLookup(
masm, miss_label, &done, receiver, properties, name, scratch1);
@@ -186,9 +188,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
// Put holder in place.
CallOptimization::HolderLookup holder_lookup;
- int holder_depth = 0;
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup,
- &holder_depth);
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
switch (holder_lookup) {
case CallOptimization::kHolderIsReceiver:
__ Move(holder, receiver);
@@ -196,10 +196,6 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
case CallOptimization::kHolderFound:
__ LoadP(holder, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ LoadP(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
- for (int i = 1; i < holder_depth; i++) {
- __ LoadP(holder, FieldMemOperand(holder, HeapObject::kMapOffset));
- __ LoadP(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
- }
break;
case CallOptimization::kHolderNotFound:
UNREACHABLE();
diff --git a/deps/v8/src/ic/s390/ic-s390.cc b/deps/v8/src/ic/s390/ic-s390.cc
index 494a4cd1d7..9be3878a58 100644
--- a/deps/v8/src/ic/s390/ic-s390.cc
+++ b/deps/v8/src/ic/s390/ic-s390.cc
@@ -27,7 +27,6 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
return ge;
default:
UNREACHABLE();
- return kNoCondition;
}
}
diff --git a/deps/v8/src/ic/stub-cache.cc b/deps/v8/src/ic/stub-cache.cc
index 6396c57061..46ac580a70 100644
--- a/deps/v8/src/ic/stub-cache.cc
+++ b/deps/v8/src/ic/stub-cache.cc
@@ -9,7 +9,6 @@
#include "src/counters.h"
#include "src/heap/heap.h"
#include "src/ic/ic-inl.h"
-#include "src/type-info.h"
namespace v8 {
namespace internal {
@@ -22,8 +21,8 @@ StubCache::StubCache(Isolate* isolate, Code::Kind ic_kind)
}
void StubCache::Initialize() {
- DCHECK(base::bits::IsPowerOfTwo32(kPrimaryTableSize));
- DCHECK(base::bits::IsPowerOfTwo32(kSecondaryTableSize));
+ DCHECK(base::bits::IsPowerOfTwo(kPrimaryTableSize));
+ DCHECK(base::bits::IsPowerOfTwo(kSecondaryTableSize));
Clear();
}
@@ -41,12 +40,10 @@ bool CommonStubCacheChecks(StubCache* stub_cache, Name* name, Map* map,
if (handler) {
DCHECK(IC::IsHandler(handler));
if (handler->IsCode()) {
- Code* code = Code::cast(handler);
- Code::Flags expected_flags =
- Code::ComputeHandlerFlags(stub_cache->ic_kind());
- Code::Flags flags = code->flags();
- DCHECK_EQ(expected_flags, flags);
- DCHECK_EQ(Code::HANDLER, Code::ExtractKindFromFlags(code->flags()));
+ Code::Flags code_flags = Code::cast(handler)->flags();
+ Code::Kind ic_code_kind = stub_cache->ic_kind();
+ DCHECK_EQ(ic_code_kind, Code::ExtractExtraICStateFromFlags(code_flags));
+ DCHECK_EQ(Code::HANDLER, Code::ExtractKindFromFlags(code_flags));
}
}
return true;
diff --git a/deps/v8/src/ic/stub-cache.h b/deps/v8/src/ic/stub-cache.h
index ffb0a398ad..74b5715883 100644
--- a/deps/v8/src/ic/stub-cache.h
+++ b/deps/v8/src/ic/stub-cache.h
@@ -71,7 +71,6 @@ class StubCache {
return StubCache::secondary_;
}
UNREACHABLE();
- return nullptr;
}
Isolate* isolate() { return isolate_; }
diff --git a/deps/v8/src/ic/x64/handler-compiler-x64.cc b/deps/v8/src/ic/x64/handler-compiler-x64.cc
index eeddd55a7b..51bb791712 100644
--- a/deps/v8/src/ic/x64/handler-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/handler-compiler-x64.cc
@@ -69,7 +69,8 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
// Load properties array.
Register properties = scratch0;
- __ movp(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
+ __ movp(properties,
+ FieldOperand(receiver, JSObject::kPropertiesOrHashOffset));
// Check that the properties array is a dictionary.
__ CompareRoot(FieldOperand(properties, HeapObject::kMapOffset),
@@ -117,9 +118,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
// Put holder in place.
CallOptimization::HolderLookup holder_lookup;
- int holder_depth = 0;
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup,
- &holder_depth);
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
switch (holder_lookup) {
case CallOptimization::kHolderIsReceiver:
__ Move(holder, receiver);
@@ -127,10 +126,6 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
case CallOptimization::kHolderFound:
__ movp(holder, FieldOperand(receiver, HeapObject::kMapOffset));
__ movp(holder, FieldOperand(holder, Map::kPrototypeOffset));
- for (int i = 1; i < holder_depth; i++) {
- __ movp(holder, FieldOperand(holder, HeapObject::kMapOffset));
- __ movp(holder, FieldOperand(holder, Map::kPrototypeOffset));
- }
break;
case CallOptimization::kHolderNotFound:
UNREACHABLE();
diff --git a/deps/v8/src/ic/x64/ic-x64.cc b/deps/v8/src/ic/x64/ic-x64.cc
index 3b87bc9b6a..96468b1a16 100644
--- a/deps/v8/src/ic/x64/ic-x64.cc
+++ b/deps/v8/src/ic/x64/ic-x64.cc
@@ -28,7 +28,6 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
return greater_equal;
default:
UNREACHABLE();
- return no_condition;
}
}
diff --git a/deps/v8/src/ic/x87/OWNERS b/deps/v8/src/ic/x87/OWNERS
deleted file mode 100644
index 61245ae8e2..0000000000
--- a/deps/v8/src/ic/x87/OWNERS
+++ /dev/null
@@ -1,2 +0,0 @@
-weiliang.lin@intel.com
-chunyang.dai@intel.com
diff --git a/deps/v8/src/ic/x87/access-compiler-x87.cc b/deps/v8/src/ic/x87/access-compiler-x87.cc
deleted file mode 100644
index d1867553cd..0000000000
--- a/deps/v8/src/ic/x87/access-compiler-x87.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X87
-
-#include "src/ic/access-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
- Handle<Code> code) {
- __ jmp(code, RelocInfo::CODE_TARGET);
-}
-
-void PropertyAccessCompiler::InitializePlatformSpecific(
- AccessCompilerData* data) {
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register name = LoadDescriptor::NameRegister();
-
- // Load calling convention.
- // receiver, name, scratch1, scratch2, scratch3.
- Register load_registers[] = {receiver, name, ebx, eax, edi};
-
- // Store calling convention.
- // receiver, name, scratch1, scratch2.
- Register store_registers[] = {receiver, name, ebx, edi};
-
- data->Initialize(arraysize(load_registers), load_registers,
- arraysize(store_registers), store_registers);
-}
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/ic/x87/handler-compiler-x87.cc b/deps/v8/src/ic/x87/handler-compiler-x87.cc
deleted file mode 100644
index dc572a19cc..0000000000
--- a/deps/v8/src/ic/x87/handler-compiler-x87.cc
+++ /dev/null
@@ -1,456 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X87
-
-#include "src/ic/handler-compiler.h"
-
-#include "src/api-arguments.h"
-#include "src/field-type.h"
-#include "src/ic/call-optimization.h"
-#include "src/ic/ic.h"
-#include "src/isolate-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void NamedLoadHandlerCompiler::GenerateLoadViaGetterForDeopt(
- MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
- // Restore context register.
- __ pop(esi);
- }
- __ ret(0);
-}
-
-
-void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
- Register slot) {
- MacroAssembler* masm = this->masm();
- STATIC_ASSERT(LoadWithVectorDescriptor::kSlot <
- LoadWithVectorDescriptor::kVector);
- STATIC_ASSERT(StoreWithVectorDescriptor::kSlot <
- StoreWithVectorDescriptor::kVector);
- STATIC_ASSERT(StoreTransitionDescriptor::kSlot <
- StoreTransitionDescriptor::kVector);
- __ push(slot);
- __ push(vector);
-}
-
-
-void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
- MacroAssembler* masm = this->masm();
- __ pop(vector);
- __ pop(slot);
-}
-
-
-void PropertyHandlerCompiler::DiscardVectorAndSlot() {
- MacroAssembler* masm = this->masm();
- // Remove vector and slot.
- __ add(esp, Immediate(2 * kPointerSize));
-}
-
-void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
- MacroAssembler* masm, Label* miss_label, Register receiver,
- Handle<Name> name, Register scratch0, Register scratch1) {
- DCHECK(name->IsUniqueName());
- DCHECK(!receiver.is(scratch0));
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->negative_lookups(), 1);
- __ IncrementCounter(counters->negative_lookups_miss(), 1);
-
- __ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
-
- const int kInterceptorOrAccessCheckNeededMask =
- (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
- // Bail out if the receiver has a named interceptor or requires access checks.
- __ test_b(FieldOperand(scratch0, Map::kBitFieldOffset),
- Immediate(kInterceptorOrAccessCheckNeededMask));
- __ j(not_zero, miss_label);
-
- // Check that receiver is a JSObject.
- __ CmpInstanceType(scratch0, FIRST_JS_RECEIVER_TYPE);
- __ j(below, miss_label);
-
- // Load properties array.
- Register properties = scratch0;
- __ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
-
- // Check that the properties array is a dictionary.
- __ cmp(FieldOperand(properties, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->hash_table_map()));
- __ j(not_equal, miss_label);
-
- Label done;
- NameDictionaryLookupStub::GenerateNegativeLookup(masm, miss_label, &done,
- properties, name, scratch1);
- __ bind(&done);
- __ DecrementCounter(counters->negative_lookups_miss(), 1);
-}
-
-// Generate call to api function.
-// This function uses push() to generate smaller, faster code than
-// the version above. It is an optimization that should will be removed
-// when api call ICs are generated in hydrogen.
-void PropertyHandlerCompiler::GenerateApiAccessorCall(
- MacroAssembler* masm, const CallOptimization& optimization,
- Handle<Map> receiver_map, Register receiver, Register scratch,
- bool is_store, Register store_parameter, Register accessor_holder,
- int accessor_index) {
- DCHECK(!accessor_holder.is(scratch));
- // Copy return value.
- __ pop(scratch);
-
- if (is_store) {
- // Discard stack arguments.
- __ add(esp, Immediate(StoreWithVectorDescriptor::kStackArgumentsCount *
- kPointerSize));
- }
- // Write the receiver and arguments to stack frame.
- __ push(receiver);
- if (is_store) {
- DCHECK(!AreAliased(receiver, scratch, store_parameter));
- __ push(store_parameter);
- }
- __ push(scratch);
- // Stack now matches JSFunction abi.
- DCHECK(optimization.is_simple_api_call());
-
- // Abi for CallApiCallbackStub.
- Register callee = edi;
- Register data = ebx;
- Register holder = ecx;
- Register api_function_address = edx;
- scratch = no_reg;
-
- // Put callee in place.
- __ LoadAccessor(callee, accessor_holder, accessor_index,
- is_store ? ACCESSOR_SETTER : ACCESSOR_GETTER);
-
- // Put holder in place.
- CallOptimization::HolderLookup holder_lookup;
- int holder_depth = 0;
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup,
- &holder_depth);
- switch (holder_lookup) {
- case CallOptimization::kHolderIsReceiver:
- __ Move(holder, receiver);
- break;
- case CallOptimization::kHolderFound:
- __ mov(holder, FieldOperand(receiver, HeapObject::kMapOffset));
- __ mov(holder, FieldOperand(holder, Map::kPrototypeOffset));
- for (int i = 1; i < holder_depth; i++) {
- __ mov(holder, FieldOperand(holder, HeapObject::kMapOffset));
- __ mov(holder, FieldOperand(holder, Map::kPrototypeOffset));
- }
- break;
- case CallOptimization::kHolderNotFound:
- UNREACHABLE();
- break;
- }
-
- Isolate* isolate = masm->isolate();
- Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- bool call_data_undefined = false;
- // Put call data in place.
- if (api_call_info->data()->IsUndefined(isolate)) {
- call_data_undefined = true;
- __ mov(data, Immediate(isolate->factory()->undefined_value()));
- } else {
- if (optimization.is_constant_call()) {
- __ mov(data, FieldOperand(callee, JSFunction::kSharedFunctionInfoOffset));
- __ mov(data, FieldOperand(data, SharedFunctionInfo::kFunctionDataOffset));
- __ mov(data, FieldOperand(data, FunctionTemplateInfo::kCallCodeOffset));
- } else {
- __ mov(data, FieldOperand(callee, FunctionTemplateInfo::kCallCodeOffset));
- }
- __ mov(data, FieldOperand(data, CallHandlerInfo::kDataOffset));
- }
-
- // Put api_function_address in place.
- Address function_address = v8::ToCData<Address>(api_call_info->callback());
- __ mov(api_function_address, Immediate(function_address));
-
- // Jump to stub.
- CallApiCallbackStub stub(isolate, is_store, call_data_undefined,
- !optimization.is_constant_call());
- __ TailCallStub(&stub);
-}
-
-
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-void PropertyHandlerCompiler::GenerateCheckPropertyCell(
- MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
- Register scratch, Label* miss) {
- Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
- global, name, PropertyCellType::kInvalidated);
- Isolate* isolate = masm->isolate();
- DCHECK(cell->value()->IsTheHole(isolate));
- Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
- __ LoadWeakValue(scratch, weak_cell, miss);
- __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
- Immediate(isolate->factory()->the_hole_value()));
- __ j(not_equal, miss);
-}
-
-
-void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
- int accessor_index, int expected_arguments, Register scratch) {
- // ----------- S t a t e -------------
- // -- esp[12] : value
- // -- esp[8] : slot
- // -- esp[4] : vector
- // -- esp[0] : return address
- // -----------------------------------
- __ LoadParameterFromStack<Descriptor>(value(), Descriptor::kValue);
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Save context register
- __ push(esi);
- // Save value register, so we can restore it later.
- __ push(value());
-
- if (accessor_index >= 0) {
- DCHECK(!holder.is(scratch));
- DCHECK(!receiver.is(scratch));
- DCHECK(!value().is(scratch));
- // Call the JavaScript setter with receiver and value on the stack.
- if (map->IsJSGlobalObjectMap()) {
- __ mov(scratch,
- FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
- receiver = scratch;
- }
- __ push(receiver);
- __ push(value());
- __ LoadAccessor(edi, holder, accessor_index, ACCESSOR_SETTER);
- __ Set(eax, 1);
- __ Call(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined),
- RelocInfo::CODE_TARGET);
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // We have to return the passed value, not the return value of the setter.
- __ pop(eax);
- // Restore context register.
- __ pop(esi);
- }
- if (accessor_index >= 0) {
- __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
- } else {
- // If we generate a global code snippet for deoptimization only, don't try
- // to drop stack arguments for the StoreIC because they are not a part of
- // expression stack and deoptimizer does not reconstruct them.
- __ ret(0);
- }
-}
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
- Handle<Name> name) {
- if (!label->is_unused()) {
- __ bind(label);
- __ mov(this->name(), Immediate(name));
- }
-}
-
-void PropertyHandlerCompiler::GenerateAccessCheck(
- Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
- Label* miss, bool compare_native_contexts_only) {
- Label done;
- // Load current native context.
- __ mov(scratch1, NativeContextOperand());
- // Load expected native context.
- __ LoadWeakValue(scratch2, native_context_cell, miss);
- __ cmp(scratch1, scratch2);
-
- if (!compare_native_contexts_only) {
- __ j(equal, &done);
-
- // Compare security tokens of current and expected native contexts.
- __ mov(scratch1, ContextOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
- __ mov(scratch2, ContextOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
- __ cmp(scratch1, scratch2);
- }
- __ j(not_equal, miss);
-
- __ bind(&done);
-}
-
-Register PropertyHandlerCompiler::CheckPrototypes(
- Register object_reg, Register holder_reg, Register scratch1,
- Register scratch2, Handle<Name> name, Label* miss) {
- Handle<Map> receiver_map = map();
-
- // Make sure there's no overlap between holder and object registers.
- DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
- DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
- !scratch2.is(scratch1));
-
- Handle<Cell> validity_cell =
- Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
- if (!validity_cell.is_null()) {
- DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid), validity_cell->value());
- // Operand::ForCell(...) points to the cell's payload!
- __ cmp(Operand::ForCell(validity_cell),
- Immediate(Smi::FromInt(Map::kPrototypeChainValid)));
- __ j(not_equal, miss);
- }
-
- // Keep track of the current object in register reg.
- Register reg = object_reg;
- int depth = 0;
-
- Handle<JSObject> current = Handle<JSObject>::null();
- if (receiver_map->IsJSGlobalObjectMap()) {
- current = isolate()->global_object();
- }
-
- Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
- isolate());
- Handle<Map> holder_map(holder()->map());
- // Traverse the prototype chain and check the maps in the prototype chain for
- // fast and global objects or do negative lookup for normal objects.
- while (!current_map.is_identical_to(holder_map)) {
- ++depth;
-
- if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
- name, scratch2, miss);
- } else if (current_map->is_dictionary_map()) {
- DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- DCHECK(name->IsUniqueName());
- DCHECK(current.is_null() ||
- current->property_dictionary()->FindEntry(name) ==
- NameDictionary::kNotFound);
-
- if (depth > 1) {
- Handle<WeakCell> weak_cell =
- Map::GetOrCreatePrototypeWeakCell(current, isolate());
- __ LoadWeakValue(reg, weak_cell, miss);
- }
- GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
- scratch2);
- }
-
- reg = holder_reg; // From now on the object will be in holder_reg.
- // Go to the next object in the prototype chain.
- current = handle(JSObject::cast(current_map->prototype()));
- current_map = handle(current->map());
- }
-
- DCHECK(!current_map->IsJSGlobalProxyMap());
-
- // Log the check depth.
- LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
-
- if (depth != 0) {
- Handle<WeakCell> weak_cell =
- Map::GetOrCreatePrototypeWeakCell(current, isolate());
- __ LoadWeakValue(reg, weak_cell, miss);
- }
-
- // Return the register containing the holder.
- return reg;
-}
-
-
-void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
- if (!miss->is_unused()) {
- Label success;
- __ jmp(&success);
- __ bind(miss);
- if (IC::ShouldPushPopSlotAndVector(kind())) {
- DCHECK(kind() == Code::LOAD_IC);
- PopVectorAndSlot();
- }
- TailCallBuiltin(masm(), MissBuiltin(kind()));
- __ bind(&success);
- }
-}
-
-
-void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
- if (!miss->is_unused()) {
- Label success;
- __ jmp(&success);
- GenerateRestoreName(miss, name);
- DCHECK(!IC::ShouldPushPopSlotAndVector(kind()));
- TailCallBuiltin(masm(), MissBuiltin(kind()));
- __ bind(&success);
- }
-}
-
-void NamedStoreHandlerCompiler::ZapStackArgumentsRegisterAliases() {
- // Zap register aliases of the arguments passed on the stack to ensure they
- // are properly loaded by the handler (debug-only).
- STATIC_ASSERT(Descriptor::kPassLastArgsOnStack);
- STATIC_ASSERT(Descriptor::kStackArgumentsCount == 3);
- __ mov(Descriptor::ValueRegister(), Immediate(kDebugZapValue));
- __ mov(Descriptor::SlotRegister(), Immediate(kDebugZapValue));
- __ mov(Descriptor::VectorRegister(), Immediate(kDebugZapValue));
-}
-
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
- Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
- LanguageMode language_mode) {
- Register holder_reg = Frontend(name);
- __ LoadParameterFromStack<Descriptor>(value(), Descriptor::kValue);
-
- __ pop(scratch1()); // remove the return address
- // Discard stack arguments.
- __ add(esp, Immediate(StoreWithVectorDescriptor::kStackArgumentsCount *
- kPointerSize));
- __ push(receiver());
- __ push(holder_reg);
- // If the callback cannot leak, then push the callback directly,
- // otherwise wrap it in a weak cell.
- if (callback->data()->IsUndefined(isolate()) || callback->data()->IsSmi()) {
- __ Push(callback);
- } else {
- Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
- __ Push(cell);
- }
- __ Push(name);
- __ push(value());
- __ push(Immediate(Smi::FromInt(language_mode)));
- __ push(scratch1()); // restore return address
-
- // Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStoreCallbackProperty);
-
- // Return the generated code.
- return GetCode(kind(), name);
-}
-
-
-Register NamedStoreHandlerCompiler::value() {
- return StoreDescriptor::ValueRegister();
-}
-
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/ic/x87/ic-x87.cc b/deps/v8/src/ic/x87/ic-x87.cc
deleted file mode 100644
index 7564c006b8..0000000000
--- a/deps/v8/src/ic/x87/ic-x87.cc
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X87
-
-#include "src/codegen.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-Condition CompareIC::ComputeCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return equal;
- case Token::LT:
- return less;
- case Token::GT:
- return greater;
- case Token::LTE:
- return less_equal;
- case Token::GTE:
- return greater_equal;
- default:
- UNREACHABLE();
- return no_condition;
- }
-}
-
-
-bool CompareIC::HasInlinedSmiCode(Address address) {
- // The address of the instruction following the call.
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
-
- // If the instruction following the call is not a test al, nothing
- // was inlined.
- return *test_instruction_address == Assembler::kTestAlByte;
-}
-
-
-void PatchInlinedSmiCode(Isolate* isolate, Address address,
- InlinedSmiCheck check) {
- // The address of the instruction following the call.
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
-
- // If the instruction following the call is not a test al, nothing
- // was inlined.
- if (*test_instruction_address != Assembler::kTestAlByte) {
- DCHECK(*test_instruction_address == Assembler::kNopByte);
- return;
- }
-
- Address delta_address = test_instruction_address + 1;
- // The delta to the start of the map check instruction and the
- // condition code uses at the patched jump.
- uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
- if (FLAG_trace_ic) {
- LOG(isolate, PatchIC(address, test_instruction_address, delta));
- }
-
- // Patch with a short conditional jump. Enabling means switching from a short
- // jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the
- // reverse operation of that.
- Address jmp_address = test_instruction_address - delta;
- DCHECK((check == ENABLE_INLINED_SMI_CHECK)
- ? (*jmp_address == Assembler::kJncShortOpcode ||
- *jmp_address == Assembler::kJcShortOpcode)
- : (*jmp_address == Assembler::kJnzShortOpcode ||
- *jmp_address == Assembler::kJzShortOpcode));
- Condition cc =
- (check == ENABLE_INLINED_SMI_CHECK)
- ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
- : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
- *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
-}
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/identity-map.cc b/deps/v8/src/identity-map.cc
index 5633347292..b652d6a6db 100644
--- a/deps/v8/src/identity-map.cc
+++ b/deps/v8/src/identity-map.cc
@@ -76,7 +76,6 @@ int IdentityMapBase::InsertKey(Object* address) {
Resize(capacity_ * kResizeFactor);
}
UNREACHABLE();
- return -1;
}
void* IdentityMapBase::DeleteIndex(int index) {
diff --git a/deps/v8/src/inspector/BUILD.gn b/deps/v8/src/inspector/BUILD.gn
index e6742c09f7..eb3aaec105 100644
--- a/deps/v8/src/inspector/BUILD.gn
+++ b/deps/v8/src/inspector/BUILD.gn
@@ -132,8 +132,6 @@ v8_source_set("inspector") {
sources += get_target_outputs(":inspector_injected_script")
sources += get_target_outputs(":inspector_debugger_script")
sources += [
- "injected-script-native.cc",
- "injected-script-native.h",
"injected-script.cc",
"injected-script.h",
"inspected-context.cc",
diff --git a/deps/v8/src/inspector/DEPS b/deps/v8/src/inspector/DEPS
index b69626cdb8..85b506a956 100644
--- a/deps/v8/src/inspector/DEPS
+++ b/deps/v8/src/inspector/DEPS
@@ -6,6 +6,7 @@ include_rules = [
"+src/base/logging.h",
"+src/base/platform/platform.h",
"+src/conversions.h",
+ "+src/flags.h",
"+src/unicode-cache.h",
"+src/inspector",
"+src/tracing",
diff --git a/deps/v8/src/inspector/OWNERS b/deps/v8/src/inspector/OWNERS
index 2c4bd8d24b..82ee735d55 100644
--- a/deps/v8/src/inspector/OWNERS
+++ b/deps/v8/src/inspector/OWNERS
@@ -3,7 +3,6 @@ set noparent
alph@chromium.org
caseq@chromium.org
dgozman@chromium.org
-jochen@chromium.org
kozyatinskiy@chromium.org
pfeldman@chromium.org
yangguo@chromium.org
@@ -13,3 +12,5 @@ yangguo@chromium.org
per-file js_protocol.json=set noparent
per-file js_protocol.json=dgozman@chromium.org
per-file js_protocol.json=pfeldman@chromium.org
+
+# COMPONENT: Platform>DevTools>JavaScript
diff --git a/deps/v8/src/inspector/PRESUBMIT.py b/deps/v8/src/inspector/PRESUBMIT.py
index 491564b2d9..5945d7a1ed 100644
--- a/deps/v8/src/inspector/PRESUBMIT.py
+++ b/deps/v8/src/inspector/PRESUBMIT.py
@@ -53,3 +53,16 @@ def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CompileScripts(input_api, output_api))
return results
+
+def PostUploadHook(cl, change, output_api):
+ """git cl upload will call this hook after the issue is created/modified.
+
+ This hook adds extra try bots to the CL description in order to run layout
+ tests in addition to CQ try bots.
+ """
+ return output_api.EnsureCQIncludeTrybotsAreAdded(
+ cl,
+ [
+ 'master.tryserver.blink:linux_trusty_blink_rel'
+ ],
+ 'Automatically added layout test trybots to run tests on CQ.')
diff --git a/deps/v8/src/inspector/debugger-script.js b/deps/v8/src/inspector/debugger-script.js
index 89f0d75903..9ca6cd0a64 100644
--- a/deps/v8/src/inspector/debugger-script.js
+++ b/deps/v8/src/inspector/debugger-script.js
@@ -108,7 +108,7 @@ DebuggerScript.getGeneratorScopes = function(gen)
*/
DebuggerScript.setBreakpoint = function(execState, info)
{
- var breakId = Debug.setScriptBreakPointById(info.sourceID, info.lineNumber, info.columnNumber, info.condition, undefined, Debug.BreakPositionAlignment.BreakPosition);
+ var breakId = Debug.setScriptBreakPointById(info.sourceID, info.lineNumber, info.columnNumber, info.condition, undefined);
var locations = Debug.findBreakPointActualLocations(breakId);
if (!locations.length)
return undefined;
diff --git a/deps/v8/src/inspector/debugger_script_externs.js b/deps/v8/src/inspector/debugger_script_externs.js
index 656bada862..e87d2e96c4 100644
--- a/deps/v8/src/inspector/debugger_script_externs.js
+++ b/deps/v8/src/inspector/debugger_script_externs.js
@@ -55,9 +55,8 @@ Debug.scripts = function() {}
* @param {number=} column
* @param {string=} condition
* @param {string=} groupId
- * @param {Debug.BreakPositionAlignment=} positionAlignment
*/
-Debug.setScriptBreakPointById = function(scriptId, line, column, condition, groupId, positionAlignment) {}
+Debug.setScriptBreakPointById = function(scriptId, line, column, condition, groupId) {}
/**
* @param {number} breakId
@@ -72,13 +71,6 @@ Debug.findBreakPointActualLocations = function(breakId) {}
*/
Debug.findBreakPoint = function(breakId, remove) {}
-/** @enum */
-const BreakPositionAlignment = {
- Statement: 0,
- BreakPosition: 1
-};
-Debug.BreakPositionAlignment = BreakPositionAlignment;
-
/** @const */
var LiveEdit = {}
diff --git a/deps/v8/src/inspector/injected-script-native.cc b/deps/v8/src/inspector/injected-script-native.cc
deleted file mode 100644
index 5d0136b3b6..0000000000
--- a/deps/v8/src/inspector/injected-script-native.cc
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/inspector/injected-script-native.h"
-
-namespace v8_inspector {
-
-InjectedScriptNative::InjectedScriptNative(v8::Isolate* isolate)
- : m_lastBoundObjectId(1), m_isolate(isolate) {}
-
-static const char privateKeyName[] = "v8-inspector#injectedScript";
-
-InjectedScriptNative::~InjectedScriptNative() {}
-
-void InjectedScriptNative::setOnInjectedScriptHost(
- v8::Local<v8::Object> injectedScriptHost) {
- v8::HandleScope handleScope(m_isolate);
- v8::Local<v8::External> external = v8::External::New(m_isolate, this);
- v8::Local<v8::Private> privateKey = v8::Private::ForApi(
- m_isolate, v8::String::NewFromUtf8(m_isolate, privateKeyName,
- v8::NewStringType::kInternalized)
- .ToLocalChecked());
- injectedScriptHost->SetPrivate(m_isolate->GetCurrentContext(), privateKey,
- external);
-}
-
-InjectedScriptNative* InjectedScriptNative::fromInjectedScriptHost(
- v8::Isolate* isolate, v8::Local<v8::Object> injectedScriptObject) {
- v8::HandleScope handleScope(isolate);
- v8::Local<v8::Context> context = isolate->GetCurrentContext();
- v8::Local<v8::Private> privateKey = v8::Private::ForApi(
- isolate, v8::String::NewFromUtf8(isolate, privateKeyName,
- v8::NewStringType::kInternalized)
- .ToLocalChecked());
- v8::Local<v8::Value> value =
- injectedScriptObject->GetPrivate(context, privateKey).ToLocalChecked();
- DCHECK(value->IsExternal());
- v8::Local<v8::External> external = value.As<v8::External>();
- return static_cast<InjectedScriptNative*>(external->Value());
-}
-
-int InjectedScriptNative::bind(v8::Local<v8::Value> value,
- const String16& groupName) {
- if (m_lastBoundObjectId <= 0) m_lastBoundObjectId = 1;
- int id = m_lastBoundObjectId++;
- m_idToWrappedObject.insert(
- std::make_pair(id, v8::Global<v8::Value>(m_isolate, value)));
- addObjectToGroup(id, groupName);
- return id;
-}
-
-void InjectedScriptNative::unbind(int id) {
- m_idToWrappedObject.erase(id);
- m_idToObjectGroupName.erase(id);
-}
-
-v8::Local<v8::Value> InjectedScriptNative::objectForId(int id) {
- auto iter = m_idToWrappedObject.find(id);
- return iter != m_idToWrappedObject.end() ? iter->second.Get(m_isolate)
- : v8::Local<v8::Value>();
-}
-
-void InjectedScriptNative::addObjectToGroup(int objectId,
- const String16& groupName) {
- if (groupName.isEmpty()) return;
- if (objectId <= 0) return;
- m_idToObjectGroupName[objectId] = groupName;
- m_nameToObjectGroup[groupName].push_back(
- objectId); // Creates an empty vector if key is not there
-}
-
-void InjectedScriptNative::releaseObjectGroup(const String16& groupName) {
- if (groupName.isEmpty()) return;
- NameToObjectGroup::iterator groupIt = m_nameToObjectGroup.find(groupName);
- if (groupIt == m_nameToObjectGroup.end()) return;
- for (int id : groupIt->second) unbind(id);
- m_nameToObjectGroup.erase(groupIt);
-}
-
-String16 InjectedScriptNative::groupName(int objectId) const {
- if (objectId <= 0) return String16();
- IdToObjectGroupName::const_iterator iterator =
- m_idToObjectGroupName.find(objectId);
- return iterator != m_idToObjectGroupName.end() ? iterator->second
- : String16();
-}
-
-} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/injected-script-native.h b/deps/v8/src/inspector/injected-script-native.h
deleted file mode 100644
index c0b93013fe..0000000000
--- a/deps/v8/src/inspector/injected-script-native.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_INSPECTOR_INJECTEDSCRIPTNATIVE_H_
-#define V8_INSPECTOR_INJECTEDSCRIPTNATIVE_H_
-
-#include <vector>
-
-#include "src/inspector/protocol/Protocol.h"
-
-#include "include/v8.h"
-
-namespace v8_inspector {
-
-class InjectedScriptNative final {
- public:
- explicit InjectedScriptNative(v8::Isolate*);
- ~InjectedScriptNative();
-
- void setOnInjectedScriptHost(v8::Local<v8::Object>);
- static InjectedScriptNative* fromInjectedScriptHost(v8::Isolate* isolate,
- v8::Local<v8::Object>);
-
- int bind(v8::Local<v8::Value>, const String16& groupName);
- void unbind(int id);
- v8::Local<v8::Value> objectForId(int id);
-
- void releaseObjectGroup(const String16& groupName);
- String16 groupName(int objectId) const;
-
- private:
- void addObjectToGroup(int objectId, const String16& groupName);
-
- int m_lastBoundObjectId;
- v8::Isolate* m_isolate;
- protocol::HashMap<int, v8::Global<v8::Value>> m_idToWrappedObject;
- typedef protocol::HashMap<int, String16> IdToObjectGroupName;
- IdToObjectGroupName m_idToObjectGroupName;
- typedef protocol::HashMap<String16, std::vector<int>> NameToObjectGroup;
- NameToObjectGroup m_nameToObjectGroup;
-};
-
-} // namespace v8_inspector
-
-#endif // V8_INSPECTOR_INJECTEDSCRIPTNATIVE_H_
diff --git a/deps/v8/src/inspector/injected-script.cc b/deps/v8/src/inspector/injected-script.cc
index b4183a735a..b71fb4313e 100644
--- a/deps/v8/src/inspector/injected-script.cc
+++ b/deps/v8/src/inspector/injected-script.cc
@@ -30,7 +30,6 @@
#include "src/inspector/injected-script.h"
-#include "src/inspector/injected-script-native.h"
#include "src/inspector/injected-script-source.h"
#include "src/inspector/inspected-context.h"
#include "src/inspector/protocol/Protocol.h"
@@ -48,6 +47,10 @@
namespace v8_inspector {
+namespace {
+static const char privateKeyName[] = "v8-inspector#injectedScript";
+}
+
using protocol::Array;
using protocol::Runtime::PropertyDescriptor;
using protocol::Runtime::InternalPropertyDescriptor;
@@ -55,17 +58,13 @@ using protocol::Runtime::RemoteObject;
using protocol::Maybe;
std::unique_ptr<InjectedScript> InjectedScript::create(
- InspectedContext* inspectedContext) {
+ InspectedContext* inspectedContext, int sessionId) {
v8::Isolate* isolate = inspectedContext->isolate();
v8::HandleScope handles(isolate);
v8::Local<v8::Context> context = inspectedContext->context();
v8::Context::Scope scope(context);
-
- std::unique_ptr<InjectedScriptNative> injectedScriptNative(
- new InjectedScriptNative(isolate));
- v8::Local<v8::Object> scriptHostWrapper =
- V8InjectedScriptHost::create(context, inspectedContext->inspector());
- injectedScriptNative->setOnInjectedScriptHost(scriptHostWrapper);
+ v8::MicrotasksScope microtasksScope(isolate,
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
// Inject javascript into the context. The compiled script is supposed to
// evaluate into
@@ -87,13 +86,13 @@ std::unique_ptr<InjectedScript> InjectedScript::create(
.ToLocal(&value))
return nullptr;
DCHECK(value->IsFunction());
+ v8::Local<v8::Object> scriptHostWrapper =
+ V8InjectedScriptHost::create(context, inspectedContext->inspector());
v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(value);
v8::Local<v8::Object> windowGlobal = context->Global();
v8::Local<v8::Value> info[] = {
scriptHostWrapper, windowGlobal,
v8::Number::New(isolate, inspectedContext->contextId())};
- v8::MicrotasksScope microtasksScope(isolate,
- v8::MicrotasksScope::kDoNotRunMicrotasks);
int contextGroupId = inspectedContext->contextGroupId();
int contextId = inspectedContext->contextId();
@@ -105,17 +104,23 @@ std::unique_ptr<InjectedScript> InjectedScript::create(
if (inspector->getContext(contextGroupId, contextId) != inspectedContext)
return nullptr;
if (!injectedScriptValue->IsObject()) return nullptr;
- return std::unique_ptr<InjectedScript>(
- new InjectedScript(inspectedContext, injectedScriptValue.As<v8::Object>(),
- std::move(injectedScriptNative)));
+
+ std::unique_ptr<InjectedScript> injectedScript(new InjectedScript(
+ inspectedContext, injectedScriptValue.As<v8::Object>(), sessionId));
+ v8::Local<v8::Private> privateKey = v8::Private::ForApi(
+ isolate, v8::String::NewFromUtf8(isolate, privateKeyName,
+ v8::NewStringType::kInternalized)
+ .ToLocalChecked());
+ scriptHostWrapper->SetPrivate(
+ context, privateKey, v8::External::New(isolate, injectedScript.get()));
+ return injectedScript;
}
-InjectedScript::InjectedScript(
- InspectedContext* context, v8::Local<v8::Object> object,
- std::unique_ptr<InjectedScriptNative> injectedScriptNative)
+InjectedScript::InjectedScript(InspectedContext* context,
+ v8::Local<v8::Object> object, int sessionId)
: m_context(context),
m_value(context->isolate(), object),
- m_native(std::move(injectedScriptNative)) {}
+ m_sessionId(sessionId) {}
InjectedScript::~InjectedScript() {}
@@ -165,7 +170,7 @@ void InjectedScript::releaseObject(const String16& objectId) {
if (!object) return;
int boundId = 0;
if (!object->getInteger("id", &boundId)) return;
- m_native->unbind(boundId);
+ unbindObject(boundId);
}
Response InjectedScript::wrapObject(
@@ -266,19 +271,26 @@ std::unique_ptr<protocol::Runtime::RemoteObject> InjectedScript::wrapTable(
Response InjectedScript::findObject(const RemoteObjectId& objectId,
v8::Local<v8::Value>* outObject) const {
- *outObject = m_native->objectForId(objectId.id());
- if (outObject->IsEmpty())
+ auto it = m_idToWrappedObject.find(objectId.id());
+ if (it == m_idToWrappedObject.end())
return Response::Error("Could not find object with given id");
+ *outObject = it->second.Get(m_context->isolate());
return Response::OK();
}
String16 InjectedScript::objectGroupName(const RemoteObjectId& objectId) const {
- return m_native->groupName(objectId.id());
+ if (objectId.id() <= 0) return String16();
+ auto it = m_idToObjectGroupName.find(objectId.id());
+ return it != m_idToObjectGroupName.end() ? it->second : String16();
}
void InjectedScript::releaseObjectGroup(const String16& objectGroup) {
- m_native->releaseObjectGroup(objectGroup);
if (objectGroup == "console") m_lastEvaluationResult.Reset();
+ if (objectGroup.isEmpty()) return;
+ auto it = m_nameToObjectGroup.find(objectGroup);
+ if (it == m_nameToObjectGroup.end()) return;
+ for (int id : it->second) unbindObject(id);
+ m_nameToObjectGroup.erase(it);
}
void InjectedScript::setCustomObjectFormatterEnabled(bool enabled) {
@@ -410,27 +422,26 @@ v8::Local<v8::Object> InjectedScript::commandLineAPI() {
m_commandLineAPI.Reset(
m_context->isolate(),
m_context->inspector()->console()->createCommandLineAPI(
- m_context->context()));
+ m_context->context(), m_sessionId));
}
return m_commandLineAPI.Get(m_context->isolate());
}
-InjectedScript::Scope::Scope(V8InspectorImpl* inspector, int contextGroupId)
- : m_inspector(inspector),
- m_contextGroupId(contextGroupId),
+InjectedScript::Scope::Scope(V8InspectorSessionImpl* session)
+ : m_inspector(session->inspector()),
m_injectedScript(nullptr),
- m_handleScope(inspector->isolate()),
- m_tryCatch(inspector->isolate()),
+ m_handleScope(m_inspector->isolate()),
+ m_tryCatch(m_inspector->isolate()),
m_ignoreExceptionsAndMuteConsole(false),
m_previousPauseOnExceptionsState(v8::debug::NoBreakOnException),
- m_userGesture(false) {}
+ m_userGesture(false),
+ m_contextGroupId(session->contextGroupId()),
+ m_sessionId(session->sessionId()) {}
Response InjectedScript::Scope::initialize() {
cleanup();
- // TODO(dgozman): what if we reattach to the same context group during
- // evaluate? Introduce a session id?
V8InspectorSessionImpl* session =
- m_inspector->sessionForContextGroup(m_contextGroupId);
+ m_inspector->sessionById(m_contextGroupId, m_sessionId);
if (!session) return Response::InternalError();
Response response = findInjectedScript(session);
if (!response.isSuccess()) return response;
@@ -489,10 +500,9 @@ InjectedScript::Scope::~Scope() {
cleanup();
}
-InjectedScript::ContextScope::ContextScope(V8InspectorImpl* inspector,
- int contextGroupId,
+InjectedScript::ContextScope::ContextScope(V8InspectorSessionImpl* session,
int executionContextId)
- : InjectedScript::Scope(inspector, contextGroupId),
+ : InjectedScript::Scope(session),
m_executionContextId(executionContextId) {}
InjectedScript::ContextScope::~ContextScope() {}
@@ -502,11 +512,9 @@ Response InjectedScript::ContextScope::findInjectedScript(
return session->findInjectedScript(m_executionContextId, m_injectedScript);
}
-InjectedScript::ObjectScope::ObjectScope(V8InspectorImpl* inspector,
- int contextGroupId,
+InjectedScript::ObjectScope::ObjectScope(V8InspectorSessionImpl* session,
const String16& remoteObjectId)
- : InjectedScript::Scope(inspector, contextGroupId),
- m_remoteObjectId(remoteObjectId) {}
+ : InjectedScript::Scope(session), m_remoteObjectId(remoteObjectId) {}
InjectedScript::ObjectScope::~ObjectScope() {}
@@ -525,11 +533,9 @@ Response InjectedScript::ObjectScope::findInjectedScript(
return Response::OK();
}
-InjectedScript::CallFrameScope::CallFrameScope(V8InspectorImpl* inspector,
- int contextGroupId,
+InjectedScript::CallFrameScope::CallFrameScope(V8InspectorSessionImpl* session,
const String16& remoteObjectId)
- : InjectedScript::Scope(inspector, contextGroupId),
- m_remoteCallFrameId(remoteObjectId) {}
+ : InjectedScript::Scope(session), m_remoteCallFrameId(remoteObjectId) {}
InjectedScript::CallFrameScope::~CallFrameScope() {}
@@ -542,4 +548,37 @@ Response InjectedScript::CallFrameScope::findInjectedScript(
return session->findInjectedScript(remoteId.get(), m_injectedScript);
}
+InjectedScript* InjectedScript::fromInjectedScriptHost(
+ v8::Isolate* isolate, v8::Local<v8::Object> injectedScriptObject) {
+ v8::HandleScope handleScope(isolate);
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ v8::Local<v8::Private> privateKey = v8::Private::ForApi(
+ isolate, v8::String::NewFromUtf8(isolate, privateKeyName,
+ v8::NewStringType::kInternalized)
+ .ToLocalChecked());
+ v8::Local<v8::Value> value =
+ injectedScriptObject->GetPrivate(context, privateKey).ToLocalChecked();
+ DCHECK(value->IsExternal());
+ v8::Local<v8::External> external = value.As<v8::External>();
+ return static_cast<InjectedScript*>(external->Value());
+}
+
+int InjectedScript::bindObject(v8::Local<v8::Value> value,
+ const String16& groupName) {
+ if (m_lastBoundObjectId <= 0) m_lastBoundObjectId = 1;
+ int id = m_lastBoundObjectId++;
+ m_idToWrappedObject[id].Reset(m_context->isolate(), value);
+
+ if (!groupName.isEmpty() && id > 0) {
+ m_idToObjectGroupName[id] = groupName;
+ m_nameToObjectGroup[groupName].push_back(id);
+ }
+ return id;
+}
+
+void InjectedScript::unbindObject(int id) {
+ m_idToWrappedObject.erase(id);
+ m_idToObjectGroupName.erase(id);
+}
+
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/injected-script.h b/deps/v8/src/inspector/injected-script.h
index 9e6680a7e3..7b64efcd24 100644
--- a/deps/v8/src/inspector/injected-script.h
+++ b/deps/v8/src/inspector/injected-script.h
@@ -31,8 +31,9 @@
#ifndef V8_INSPECTOR_INJECTEDSCRIPT_H_
#define V8_INSPECTOR_INJECTEDSCRIPT_H_
+#include <unordered_map>
+
#include "src/base/macros.h"
-#include "src/inspector/injected-script-native.h"
#include "src/inspector/inspected-context.h"
#include "src/inspector/protocol/Forward.h"
#include "src/inspector/protocol/Runtime.h"
@@ -53,8 +54,11 @@ using protocol::Response;
class InjectedScript final {
public:
- static std::unique_ptr<InjectedScript> create(InspectedContext*);
+ static std::unique_ptr<InjectedScript> create(InspectedContext*,
+ int sessionId);
~InjectedScript();
+ static InjectedScript* fromInjectedScriptHost(v8::Isolate* isolate,
+ v8::Local<v8::Object>);
InspectedContext* context() const { return m_context; }
@@ -99,6 +103,8 @@ class InjectedScript final {
Maybe<protocol::Runtime::ExceptionDetails>*);
v8::Local<v8::Value> lastEvaluationResult() const;
+ int bindObject(v8::Local<v8::Value>, const String16& groupName);
+
class Scope {
public:
Response initialize();
@@ -110,12 +116,11 @@ class InjectedScript final {
const v8::TryCatch& tryCatch() const { return m_tryCatch; }
protected:
- Scope(V8InspectorImpl*, int contextGroupId);
+ explicit Scope(V8InspectorSessionImpl*);
virtual ~Scope();
virtual Response findInjectedScript(V8InspectorSessionImpl*) = 0;
V8InspectorImpl* m_inspector;
- int m_contextGroupId;
InjectedScript* m_injectedScript;
private:
@@ -130,11 +135,13 @@ class InjectedScript final {
bool m_ignoreExceptionsAndMuteConsole;
v8::debug::ExceptionBreakState m_previousPauseOnExceptionsState;
bool m_userGesture;
+ int m_contextGroupId;
+ int m_sessionId;
};
class ContextScope : public Scope {
public:
- ContextScope(V8InspectorImpl*, int contextGroupId, int executionContextId);
+ ContextScope(V8InspectorSessionImpl*, int executionContextId);
~ContextScope();
private:
@@ -146,8 +153,7 @@ class InjectedScript final {
class ObjectScope : public Scope {
public:
- ObjectScope(V8InspectorImpl*, int contextGroupId,
- const String16& remoteObjectId);
+ ObjectScope(V8InspectorSessionImpl*, const String16& remoteObjectId);
~ObjectScope();
const String16& objectGroupName() const { return m_objectGroupName; }
v8::Local<v8::Value> object() const { return m_object; }
@@ -163,8 +169,7 @@ class InjectedScript final {
class CallFrameScope : public Scope {
public:
- CallFrameScope(V8InspectorImpl*, int contextGroupId,
- const String16& remoteCallFrameId);
+ CallFrameScope(V8InspectorSessionImpl*, const String16& remoteCallFrameId);
~CallFrameScope();
size_t frameOrdinal() const { return m_frameOrdinal; }
@@ -177,19 +182,23 @@ class InjectedScript final {
};
private:
- InjectedScript(InspectedContext*, v8::Local<v8::Object>,
- std::unique_ptr<InjectedScriptNative>);
+ InjectedScript(InspectedContext*, v8::Local<v8::Object>, int sessionId);
v8::Local<v8::Value> v8Value() const;
Response wrapValue(v8::Local<v8::Value>, const String16& groupName,
bool forceValueType, bool generatePreview,
v8::Local<v8::Value>* result) const;
v8::Local<v8::Object> commandLineAPI();
+ void unbindObject(int id);
InspectedContext* m_context;
v8::Global<v8::Value> m_value;
+ int m_sessionId;
v8::Global<v8::Value> m_lastEvaluationResult;
- std::unique_ptr<InjectedScriptNative> m_native;
v8::Global<v8::Object> m_commandLineAPI;
+ int m_lastBoundObjectId = 1;
+ std::unordered_map<int, v8::Global<v8::Value>> m_idToWrappedObject;
+ std::unordered_map<int, String16> m_idToObjectGroupName;
+ std::unordered_map<String16, std::vector<int>> m_nameToObjectGroup;
DISALLOW_COPY_AND_ASSIGN(InjectedScript);
};
diff --git a/deps/v8/src/inspector/inspected-context.cc b/deps/v8/src/inspector/inspected-context.cc
index 1a5f49c0b5..d7a2f810db 100644
--- a/deps/v8/src/inspector/inspected-context.cc
+++ b/deps/v8/src/inspector/inspected-context.cc
@@ -15,39 +15,6 @@
namespace v8_inspector {
-class InspectedContext::WeakCallbackData {
- public:
- WeakCallbackData(InspectedContext* context, V8InspectorImpl* inspector,
- int groupId, int contextId)
- : m_context(context),
- m_inspector(inspector),
- m_groupId(groupId),
- m_contextId(contextId) {}
-
- static void resetContext(const v8::WeakCallbackInfo<WeakCallbackData>& data) {
- // InspectedContext is alive here because weak handler is still alive.
- data.GetParameter()->m_context->m_weakCallbackData = nullptr;
- data.GetParameter()->m_context->m_context.Reset();
- data.SetSecondPassCallback(&callContextCollected);
- }
-
- static void callContextCollected(
- const v8::WeakCallbackInfo<WeakCallbackData>& data) {
- // InspectedContext can be dead here since anything can happen between first
- // and second pass callback.
- WeakCallbackData* callbackData = data.GetParameter();
- callbackData->m_inspector->contextCollected(callbackData->m_groupId,
- callbackData->m_contextId);
- delete callbackData;
- }
-
- private:
- InspectedContext* m_context;
- V8InspectorImpl* m_inspector;
- int m_groupId;
- int m_contextId;
-};
-
InspectedContext::InspectedContext(V8InspectorImpl* inspector,
const V8ContextInfo& info, int contextId)
: m_inspector(inspector),
@@ -56,14 +23,8 @@ InspectedContext::InspectedContext(V8InspectorImpl* inspector,
m_contextGroupId(info.contextGroupId),
m_origin(toString16(info.origin)),
m_humanReadableName(toString16(info.humanReadableName)),
- m_auxData(toString16(info.auxData)),
- m_reported(false) {
+ m_auxData(toString16(info.auxData)) {
v8::debug::SetContextId(info.context, contextId);
- m_weakCallbackData =
- new WeakCallbackData(this, m_inspector, m_contextGroupId, m_contextId);
- m_context.SetWeak(m_weakCallbackData,
- &InspectedContext::WeakCallbackData::resetContext,
- v8::WeakCallbackType::kParameter);
if (!info.hasMemoryOnConsole) return;
v8::Context::Scope contextScope(info.context);
v8::Local<v8::Object> global = info.context->Global();
@@ -77,9 +38,6 @@ InspectedContext::InspectedContext(V8InspectorImpl* inspector,
}
InspectedContext::~InspectedContext() {
- // If we destory InspectedContext before weak callback is invoked then we need
- // to delete data here.
- if (!m_context.IsEmpty()) delete m_weakCallbackData;
}
// static
@@ -95,15 +53,34 @@ v8::Isolate* InspectedContext::isolate() const {
return m_inspector->isolate();
}
-bool InspectedContext::createInjectedScript() {
- DCHECK(!m_injectedScript);
- std::unique_ptr<InjectedScript> injectedScript = InjectedScript::create(this);
+bool InspectedContext::isReported(int sessionId) const {
+ return m_reportedSessionIds.find(sessionId) != m_reportedSessionIds.cend();
+}
+
+void InspectedContext::setReported(int sessionId, bool reported) {
+ if (reported)
+ m_reportedSessionIds.insert(sessionId);
+ else
+ m_reportedSessionIds.erase(sessionId);
+}
+
+InjectedScript* InspectedContext::getInjectedScript(int sessionId) {
+ auto it = m_injectedScripts.find(sessionId);
+ return it == m_injectedScripts.end() ? nullptr : it->second.get();
+}
+
+bool InspectedContext::createInjectedScript(int sessionId) {
+ DCHECK(m_injectedScripts.find(sessionId) == m_injectedScripts.end());
+ std::unique_ptr<InjectedScript> injectedScript =
+ InjectedScript::create(this, sessionId);
// InjectedScript::create can destroy |this|.
if (!injectedScript) return false;
- m_injectedScript = std::move(injectedScript);
+ m_injectedScripts[sessionId] = std::move(injectedScript);
return true;
}
-void InspectedContext::discardInjectedScript() { m_injectedScript.reset(); }
+void InspectedContext::discardInjectedScript(int sessionId) {
+ m_injectedScripts.erase(sessionId);
+}
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/inspected-context.h b/deps/v8/src/inspector/inspected-context.h
index 422725046a..b32263bc2e 100644
--- a/deps/v8/src/inspector/inspected-context.h
+++ b/deps/v8/src/inspector/inspected-context.h
@@ -5,6 +5,9 @@
#ifndef V8_INSPECTOR_INSPECTEDCONTEXT_H_
#define V8_INSPECTOR_INSPECTEDCONTEXT_H_
+#include <unordered_map>
+#include <unordered_set>
+
#include "src/base/macros.h"
#include "src/inspector/string-16.h"
@@ -30,22 +33,20 @@ class InspectedContext {
String16 humanReadableName() const { return m_humanReadableName; }
String16 auxData() const { return m_auxData; }
- bool isReported() const { return m_reported; }
- void setReported(bool reported) { m_reported = reported; }
+ bool isReported(int sessionId) const;
+ void setReported(int sessionId, bool reported);
v8::Isolate* isolate() const;
V8InspectorImpl* inspector() const { return m_inspector; }
- InjectedScript* getInjectedScript() { return m_injectedScript.get(); }
- bool createInjectedScript();
- void discardInjectedScript();
+ InjectedScript* getInjectedScript(int sessionId);
+ bool createInjectedScript(int sessionId);
+ void discardInjectedScript(int sessionId);
private:
friend class V8InspectorImpl;
InspectedContext(V8InspectorImpl*, const V8ContextInfo&, int contextId);
- class WeakCallbackData;
-
V8InspectorImpl* m_inspector;
v8::Global<v8::Context> m_context;
int m_contextId;
@@ -53,9 +54,8 @@ class InspectedContext {
const String16 m_origin;
const String16 m_humanReadableName;
const String16 m_auxData;
- bool m_reported;
- std::unique_ptr<InjectedScript> m_injectedScript;
- WeakCallbackData* m_weakCallbackData;
+ std::unordered_set<int> m_reportedSessionIds;
+ std::unordered_map<int, std::unique_ptr<InjectedScript>> m_injectedScripts;
DISALLOW_COPY_AND_ASSIGN(InspectedContext);
};
diff --git a/deps/v8/src/inspector/inspector.gypi b/deps/v8/src/inspector/inspector.gypi
index 8aff49d0ea..634c73f1e2 100644
--- a/deps/v8/src/inspector/inspector.gypi
+++ b/deps/v8/src/inspector/inspector.gypi
@@ -38,8 +38,6 @@
'../../include/v8-inspector-protocol.h',
'inspector/injected-script.cc',
'inspector/injected-script.h',
- 'inspector/injected-script-native.cc',
- 'inspector/injected-script-native.h',
'inspector/inspected-context.cc',
'inspector/inspected-context.h',
'inspector/java-script-call-frame.cc',
diff --git a/deps/v8/src/inspector/inspector_protocol_config.json b/deps/v8/src/inspector/inspector_protocol_config.json
index ce84b7c714..125a248919 100644
--- a/deps/v8/src/inspector/inspector_protocol_config.json
+++ b/deps/v8/src/inspector/inspector_protocol_config.json
@@ -12,7 +12,7 @@
{
"domain": "Runtime",
"async": ["evaluate", "awaitPromise", "callFunctionOn", "runScript"],
- "exported": ["StackTrace", "RemoteObject"]
+ "exported": ["StackTrace", "RemoteObject", "ExecutionContextId"]
},
{
"domain": "Debugger",
diff --git a/deps/v8/src/inspector/js_protocol.json b/deps/v8/src/inspector/js_protocol.json
index 62545cd80d..b61ce88bb3 100644
--- a/deps/v8/src/inspector/js_protocol.json
+++ b/deps/v8/src/inspector/js_protocol.json
@@ -388,7 +388,8 @@
{ "name": "args", "type": "array", "items": { "$ref": "RemoteObject" }, "description": "Call arguments." },
{ "name": "executionContextId", "$ref": "ExecutionContextId", "description": "Identifier of the context where the call was made." },
{ "name": "timestamp", "$ref": "Timestamp", "description": "Call timestamp." },
- { "name": "stackTrace", "$ref": "StackTrace", "optional": true, "description": "Stack trace captured when the call was made." }
+ { "name": "stackTrace", "$ref": "StackTrace", "optional": true, "description": "Stack trace captured when the call was made." },
+ { "name": "context", "type": "string", "optional": true, "experimental": true, "description": "Console context descriptor for calls on non-default console context (not console.*): 'anonymous#unique-logger-id' for call on unnamed context, 'name#unique-logger-id' for call on named context." }
]
},
{
@@ -862,7 +863,8 @@
"description": "Coverage data for a JavaScript function.",
"properties": [
{ "name": "functionName", "type": "string", "description": "JavaScript function name." },
- { "name": "ranges", "type": "array", "items": { "$ref": "CoverageRange" }, "description": "Source ranges inside the function with coverage data." }
+ { "name": "ranges", "type": "array", "items": { "$ref": "CoverageRange" }, "description": "Source ranges inside the function with coverage data." },
+ { "name": "isBlockCoverage", "type": "boolean", "description": "Whether coverage data for this function has block granularity." }
],
"experimental": true
},
diff --git a/deps/v8/src/inspector/string-16.cc b/deps/v8/src/inspector/string-16.cc
index 6544646d71..30dd7dd14c 100644
--- a/deps/v8/src/inspector/string-16.cc
+++ b/deps/v8/src/inspector/string-16.cc
@@ -362,6 +362,41 @@ static inline void putUTF8Triple(char*& buffer, UChar ch) {
} // namespace
+String16::String16() {}
+
+String16::String16(const String16& other)
+ : m_impl(other.m_impl), hash_code(other.hash_code) {}
+
+String16::String16(String16&& other)
+ : m_impl(std::move(other.m_impl)), hash_code(other.hash_code) {}
+
+String16::String16(const UChar* characters, size_t size)
+ : m_impl(characters, size) {}
+
+String16::String16(const UChar* characters) : m_impl(characters) {}
+
+String16::String16(const char* characters)
+ : String16(characters, std::strlen(characters)) {}
+
+String16::String16(const char* characters, size_t size) {
+ m_impl.resize(size);
+ for (size_t i = 0; i < size; ++i) m_impl[i] = characters[i];
+}
+
+String16::String16(const std::basic_string<UChar>& impl) : m_impl(impl) {}
+
+String16& String16::operator=(const String16& other) {
+ m_impl = other.m_impl;
+ hash_code = other.hash_code;
+ return *this;
+}
+
+String16& String16::operator=(String16&& other) {
+ m_impl = std::move(other.m_impl);
+ hash_code = other.hash_code;
+ return *this;
+}
+
// static
String16 String16::fromInteger(int number) {
char arr[50];
diff --git a/deps/v8/src/inspector/string-16.h b/deps/v8/src/inspector/string-16.h
index 0270f5117a..1140092374 100644
--- a/deps/v8/src/inspector/string-16.h
+++ b/deps/v8/src/inspector/string-16.h
@@ -20,32 +20,17 @@ class String16 {
public:
static const size_t kNotFound = static_cast<size_t>(-1);
- String16() {}
- String16(const String16& other)
- : m_impl(other.m_impl), hash_code(other.hash_code) {}
- String16(String16&& other)
- : m_impl(std::move(other.m_impl)), hash_code(other.hash_code) {}
- String16(const UChar* characters, size_t size) : m_impl(characters, size) {}
- String16(const UChar* characters) // NOLINT(runtime/explicit)
- : m_impl(characters) {}
- String16(const char* characters) // NOLINT(runtime/explicit)
- : String16(characters, std::strlen(characters)) {}
- String16(const char* characters, size_t size) {
- m_impl.resize(size);
- for (size_t i = 0; i < size; ++i) m_impl[i] = characters[i];
- }
- explicit String16(const std::basic_string<UChar>& impl) : m_impl(impl) {}
-
- String16& operator=(const String16& other) {
- m_impl = other.m_impl;
- hash_code = other.hash_code;
- return *this;
- }
- String16& operator=(String16&& other) {
- m_impl = std::move(other.m_impl);
- hash_code = other.hash_code;
- return *this;
- }
+ String16();
+ String16(const String16& other);
+ String16(String16&& other);
+ String16(const UChar* characters, size_t size);
+ String16(const UChar* characters); // NOLINT(runtime/explicit)
+ String16(const char* characters); // NOLINT(runtime/explicit)
+ String16(const char* characters, size_t size);
+ explicit String16(const std::basic_string<UChar>& impl);
+
+ String16& operator=(const String16& other);
+ String16& operator=(String16&& other);
static String16 fromInteger(int);
static String16 fromInteger(size_t);
diff --git a/deps/v8/src/inspector/v8-console-message.cc b/deps/v8/src/inspector/v8-console-message.cc
index 22fe18137e..fa740bbc4e 100644
--- a/deps/v8/src/inspector/v8-console-message.cc
+++ b/deps/v8/src/inspector/v8-console-message.cc
@@ -321,10 +321,13 @@ void V8ConsoleMessage::reportToFrontend(protocol::Runtime::Frontend* frontend,
arguments->addItem(std::move(messageArg));
}
}
+ Maybe<String16> consoleContext;
+ if (!m_consoleContext.isEmpty()) consoleContext = m_consoleContext;
frontend->consoleAPICalled(
consoleAPITypeValue(m_type), std::move(arguments), m_contextId,
m_timestamp,
- m_stackTrace ? m_stackTrace->buildInspectorObjectImpl() : nullptr);
+ m_stackTrace ? m_stackTrace->buildInspectorObjectImpl() : nullptr,
+ std::move(consoleContext));
return;
}
UNREACHABLE();
@@ -356,6 +359,7 @@ std::unique_ptr<V8ConsoleMessage> V8ConsoleMessage::createForConsoleAPI(
v8::Local<v8::Context> v8Context, int contextId, int groupId,
V8InspectorImpl* inspector, double timestamp, ConsoleAPIType type,
const std::vector<v8::Local<v8::Value>>& arguments,
+ const String16& consoleContext,
std::unique_ptr<V8StackTraceImpl> stackTrace) {
v8::Isolate* isolate = v8Context->GetIsolate();
@@ -367,6 +371,7 @@ std::unique_ptr<V8ConsoleMessage> V8ConsoleMessage::createForConsoleAPI(
message->m_columnNumber = stackTrace->topColumnNumber();
}
message->m_stackTrace = std::move(stackTrace);
+ message->m_consoleContext = consoleContext;
message->m_type = type;
message->m_contextId = contextId;
for (size_t i = 0; i < arguments.size(); ++i) {
@@ -459,13 +464,12 @@ void V8ConsoleMessageStorage::addMessage(
V8InspectorImpl* inspector = m_inspector;
if (message->type() == ConsoleAPIType::kClear) clear();
- V8InspectorSessionImpl* session =
- inspector->sessionForContextGroup(contextGroupId);
- if (session) {
- if (message->origin() == V8MessageOrigin::kConsole)
- session->consoleAgent()->messageAdded(message.get());
- session->runtimeAgent()->messageAdded(message.get());
- }
+ inspector->forEachSession(
+ contextGroupId, [&message](V8InspectorSessionImpl* session) {
+ if (message->origin() == V8MessageOrigin::kConsole)
+ session->consoleAgent()->messageAdded(message.get());
+ session->runtimeAgent()->messageAdded(message.get());
+ });
if (!inspector->hasConsoleMessageStorage(contextGroupId)) return;
DCHECK(m_messages.size() <= maxConsoleMessageCount);
@@ -486,10 +490,10 @@ void V8ConsoleMessageStorage::addMessage(
void V8ConsoleMessageStorage::clear() {
m_messages.clear();
m_estimatedSize = 0;
- if (V8InspectorSessionImpl* session =
- m_inspector->sessionForContextGroup(m_contextGroupId)) {
- session->releaseObjectGroup("console");
- }
+ m_inspector->forEachSession(m_contextGroupId,
+ [](V8InspectorSessionImpl* session) {
+ session->releaseObjectGroup("console");
+ });
m_data.clear();
}
diff --git a/deps/v8/src/inspector/v8-console-message.h b/deps/v8/src/inspector/v8-console-message.h
index 3c7cc78856..57f692f6db 100644
--- a/deps/v8/src/inspector/v8-console-message.h
+++ b/deps/v8/src/inspector/v8-console-message.h
@@ -49,7 +49,7 @@ class V8ConsoleMessage {
v8::Local<v8::Context> v8Context, int contextId, int groupId,
V8InspectorImpl* inspector, double timestamp, ConsoleAPIType,
const std::vector<v8::Local<v8::Value>>& arguments,
- std::unique_ptr<V8StackTraceImpl>);
+ const String16& consoleContext, std::unique_ptr<V8StackTraceImpl>);
static std::unique_ptr<V8ConsoleMessage> createForException(
double timestamp, const String16& detailedMessage, const String16& url,
@@ -99,6 +99,7 @@ class V8ConsoleMessage {
int m_v8Size = 0;
Arguments m_arguments;
String16 m_detailedMessage;
+ String16 m_consoleContext;
};
class V8ConsoleMessageStorage {
diff --git a/deps/v8/src/inspector/v8-console.cc b/deps/v8/src/inspector/v8-console.cc
index 0d3c03a4da..4d71adfd47 100644
--- a/deps/v8/src/inspector/v8-console.cc
+++ b/deps/v8/src/inspector/v8-console.cc
@@ -23,11 +23,20 @@ namespace v8_inspector {
namespace {
+String16 consoleContextToString(
+ const v8::debug::ConsoleContext& consoleContext) {
+ if (consoleContext.id() == 0) return String16();
+ return toProtocolString(consoleContext.name()) + "#" +
+ String16::fromInteger(consoleContext.id());
+}
+
class ConsoleHelper {
public:
ConsoleHelper(const v8::debug::ConsoleCallArguments& info,
+ const v8::debug::ConsoleContext& consoleContext,
V8InspectorImpl* inspector)
: m_info(info),
+ m_consoleContext(consoleContext),
m_isolate(inspector->isolate()),
m_context(m_isolate->GetCurrentContext()),
m_inspector(inspector),
@@ -37,10 +46,14 @@ class ConsoleHelper {
int contextId() const { return m_contextId; }
int groupId() const { return m_groupId; }
- InjectedScript* injectedScript() {
+ InjectedScript* injectedScript(int sessionId) {
InspectedContext* context = m_inspector->getContext(m_groupId, m_contextId);
if (!context) return nullptr;
- return context->getInjectedScript();
+ return context->getInjectedScript(sessionId);
+ }
+
+ V8InspectorSessionImpl* session(int sessionId) {
+ return m_inspector->sessionById(m_groupId, sessionId);
}
V8ConsoleMessageStorage* consoleMessageStorage() {
@@ -75,6 +88,7 @@ class ConsoleHelper {
V8ConsoleMessage::createForConsoleAPI(
m_context, m_contextId, m_groupId, m_inspector,
m_inspector->client()->currentTimeMS(), type, arguments,
+ consoleContextToString(m_consoleContext),
m_inspector->debugger()->captureStackTrace(false));
consoleMessageStorage()->addMessage(std::move(message));
}
@@ -98,6 +112,7 @@ class ConsoleHelper {
String16 firstArgToString(const String16& defaultValue) {
if (m_info.Length() < 1) return defaultValue;
v8::Local<v8::String> titleValue;
+ v8::TryCatch tryCatch(m_context->GetIsolate());
if (m_info[0]->IsObject()) {
if (!m_info[0].As<v8::Object>()->ObjectProtoToString(m_context).ToLocal(
&titleValue))
@@ -124,28 +139,13 @@ class ConsoleHelper {
return func;
}
- V8ProfilerAgentImpl* profilerAgent() {
- if (V8InspectorSessionImpl* session = currentSession()) {
- if (session && session->profilerAgent()->enabled())
- return session->profilerAgent();
- }
- return nullptr;
- }
-
- V8DebuggerAgentImpl* debuggerAgent() {
- if (V8InspectorSessionImpl* session = currentSession()) {
- if (session && session->debuggerAgent()->enabled())
- return session->debuggerAgent();
- }
- return nullptr;
- }
-
- V8InspectorSessionImpl* currentSession() {
- return m_inspector->sessionForContextGroup(m_groupId);
+ void forEachSession(std::function<void(V8InspectorSessionImpl*)> callback) {
+ m_inspector->forEachSession(m_groupId, callback);
}
private:
const v8::debug::ConsoleCallArguments& m_info;
+ const v8::debug::ConsoleContext& m_consoleContext;
v8::Isolate* m_isolate;
v8::Local<v8::Context> m_context;
V8InspectorImpl* m_inspector = nullptr;
@@ -190,72 +190,95 @@ void createBoundFunctionProperty(v8::Local<v8::Context> context,
V8Console::V8Console(V8InspectorImpl* inspector) : m_inspector(inspector) {}
-void V8Console::Debug(const v8::debug::ConsoleCallArguments& info) {
- ConsoleHelper(info, m_inspector).reportCall(ConsoleAPIType::kDebug);
+void V8Console::Debug(const v8::debug::ConsoleCallArguments& info,
+ const v8::debug::ConsoleContext& consoleContext) {
+ ConsoleHelper(info, consoleContext, m_inspector)
+ .reportCall(ConsoleAPIType::kDebug);
}
-void V8Console::Error(const v8::debug::ConsoleCallArguments& info) {
- ConsoleHelper(info, m_inspector).reportCall(ConsoleAPIType::kError);
+void V8Console::Error(const v8::debug::ConsoleCallArguments& info,
+ const v8::debug::ConsoleContext& consoleContext) {
+ ConsoleHelper(info, consoleContext, m_inspector)
+ .reportCall(ConsoleAPIType::kError);
}
-void V8Console::Info(const v8::debug::ConsoleCallArguments& info) {
- ConsoleHelper(info, m_inspector).reportCall(ConsoleAPIType::kInfo);
+void V8Console::Info(const v8::debug::ConsoleCallArguments& info,
+ const v8::debug::ConsoleContext& consoleContext) {
+ ConsoleHelper(info, consoleContext, m_inspector)
+ .reportCall(ConsoleAPIType::kInfo);
}
-void V8Console::Log(const v8::debug::ConsoleCallArguments& info) {
- ConsoleHelper(info, m_inspector).reportCall(ConsoleAPIType::kLog);
+void V8Console::Log(const v8::debug::ConsoleCallArguments& info,
+ const v8::debug::ConsoleContext& consoleContext) {
+ ConsoleHelper(info, consoleContext, m_inspector)
+ .reportCall(ConsoleAPIType::kLog);
}
-void V8Console::Warn(const v8::debug::ConsoleCallArguments& info) {
- ConsoleHelper(info, m_inspector).reportCall(ConsoleAPIType::kWarning);
+void V8Console::Warn(const v8::debug::ConsoleCallArguments& info,
+ const v8::debug::ConsoleContext& consoleContext) {
+ ConsoleHelper(info, consoleContext, m_inspector)
+ .reportCall(ConsoleAPIType::kWarning);
}
-void V8Console::Dir(const v8::debug::ConsoleCallArguments& info) {
- ConsoleHelper(info, m_inspector).reportCall(ConsoleAPIType::kDir);
+void V8Console::Dir(const v8::debug::ConsoleCallArguments& info,
+ const v8::debug::ConsoleContext& consoleContext) {
+ ConsoleHelper(info, consoleContext, m_inspector)
+ .reportCall(ConsoleAPIType::kDir);
}
-void V8Console::DirXml(const v8::debug::ConsoleCallArguments& info) {
- ConsoleHelper(info, m_inspector).reportCall(ConsoleAPIType::kDirXML);
+void V8Console::DirXml(const v8::debug::ConsoleCallArguments& info,
+ const v8::debug::ConsoleContext& consoleContext) {
+ ConsoleHelper(info, consoleContext, m_inspector)
+ .reportCall(ConsoleAPIType::kDirXML);
}
-void V8Console::Table(const v8::debug::ConsoleCallArguments& info) {
- ConsoleHelper(info, m_inspector).reportCall(ConsoleAPIType::kTable);
+void V8Console::Table(const v8::debug::ConsoleCallArguments& info,
+ const v8::debug::ConsoleContext& consoleContext) {
+ ConsoleHelper(info, consoleContext, m_inspector)
+ .reportCall(ConsoleAPIType::kTable);
}
-void V8Console::Trace(const v8::debug::ConsoleCallArguments& info) {
- ConsoleHelper(info, m_inspector)
+void V8Console::Trace(const v8::debug::ConsoleCallArguments& info,
+ const v8::debug::ConsoleContext& consoleContext) {
+ ConsoleHelper(info, consoleContext, m_inspector)
.reportCallWithDefaultArgument(ConsoleAPIType::kTrace,
String16("console.trace"));
}
-void V8Console::Group(const v8::debug::ConsoleCallArguments& info) {
- ConsoleHelper(info, m_inspector)
+void V8Console::Group(const v8::debug::ConsoleCallArguments& info,
+ const v8::debug::ConsoleContext& consoleContext) {
+ ConsoleHelper(info, consoleContext, m_inspector)
.reportCallWithDefaultArgument(ConsoleAPIType::kStartGroup,
String16("console.group"));
}
-void V8Console::GroupCollapsed(const v8::debug::ConsoleCallArguments& info) {
- ConsoleHelper(info, m_inspector)
+void V8Console::GroupCollapsed(
+ const v8::debug::ConsoleCallArguments& info,
+ const v8::debug::ConsoleContext& consoleContext) {
+ ConsoleHelper(info, consoleContext, m_inspector)
.reportCallWithDefaultArgument(ConsoleAPIType::kStartGroupCollapsed,
String16("console.groupCollapsed"));
}
-void V8Console::GroupEnd(const v8::debug::ConsoleCallArguments& info) {
- ConsoleHelper(info, m_inspector)
+void V8Console::GroupEnd(const v8::debug::ConsoleCallArguments& info,
+ const v8::debug::ConsoleContext& consoleContext) {
+ ConsoleHelper(info, consoleContext, m_inspector)
.reportCallWithDefaultArgument(ConsoleAPIType::kEndGroup,
String16("console.groupEnd"));
}
-void V8Console::Clear(const v8::debug::ConsoleCallArguments& info) {
- ConsoleHelper helper(info, m_inspector);
+void V8Console::Clear(const v8::debug::ConsoleCallArguments& info,
+ const v8::debug::ConsoleContext& consoleContext) {
+ ConsoleHelper helper(info, consoleContext, m_inspector);
if (!helper.groupId()) return;
m_inspector->client()->consoleClear(helper.groupId());
helper.reportCallWithDefaultArgument(ConsoleAPIType::kClear,
String16("console.clear"));
}
-void V8Console::Count(const v8::debug::ConsoleCallArguments& info) {
- ConsoleHelper helper(info, m_inspector);
+void V8Console::Count(const v8::debug::ConsoleCallArguments& info,
+ const v8::debug::ConsoleContext& consoleContext) {
+ ConsoleHelper helper(info, consoleContext, m_inspector);
String16 title = helper.firstArgToString(String16());
String16 identifier;
if (title.isEmpty()) {
@@ -268,6 +291,7 @@ void V8Console::Count(const v8::debug::ConsoleCallArguments& info) {
} else {
identifier = title + "@";
}
+ identifier = consoleContextToString(consoleContext) + "@" + identifier;
int count =
helper.consoleMessageStorage()->count(helper.contextId(), identifier);
@@ -277,8 +301,9 @@ void V8Console::Count(const v8::debug::ConsoleCallArguments& info) {
title.isEmpty() ? countString : (title + ": " + countString));
}
-void V8Console::Assert(const v8::debug::ConsoleCallArguments& info) {
- ConsoleHelper helper(info, m_inspector);
+void V8Console::Assert(const v8::debug::ConsoleCallArguments& info,
+ const v8::debug::ConsoleContext& consoleContext) {
+ ConsoleHelper helper(info, consoleContext, m_inspector);
DCHECK(!helper.firstArgToBoolean(false));
std::vector<v8::Local<v8::Value>> arguments;
@@ -287,82 +312,96 @@ void V8Console::Assert(const v8::debug::ConsoleCallArguments& info) {
arguments.push_back(
toV8String(m_inspector->isolate(), String16("console.assert")));
helper.reportCall(ConsoleAPIType::kAssert, arguments);
-
- if (V8DebuggerAgentImpl* debuggerAgent = helper.debuggerAgent())
- debuggerAgent->breakProgramOnException(
- protocol::Debugger::Paused::ReasonEnum::Assert, nullptr);
+ m_inspector->debugger()->breakProgramOnAssert(helper.groupId());
}
-void V8Console::MarkTimeline(const v8::debug::ConsoleCallArguments& info) {
- ConsoleHelper(info, m_inspector)
+void V8Console::MarkTimeline(const v8::debug::ConsoleCallArguments& info,
+ const v8::debug::ConsoleContext& consoleContext) {
+ ConsoleHelper(info, consoleContext, m_inspector)
.reportDeprecatedCall("V8Console#markTimelineDeprecated",
"'console.markTimeline' is "
"deprecated. Please use "
"'console.timeStamp' instead.");
- TimeStamp(info);
+ TimeStamp(info, consoleContext);
}
-void V8Console::Profile(const v8::debug::ConsoleCallArguments& info) {
- ConsoleHelper helper(info, m_inspector);
- if (V8ProfilerAgentImpl* profilerAgent = helper.profilerAgent())
- profilerAgent->consoleProfile(helper.firstArgToString(String16()));
+void V8Console::Profile(const v8::debug::ConsoleCallArguments& info,
+ const v8::debug::ConsoleContext& consoleContext) {
+ ConsoleHelper helper(info, consoleContext, m_inspector);
+ helper.forEachSession([&helper](V8InspectorSessionImpl* session) {
+ session->profilerAgent()->consoleProfile(
+ helper.firstArgToString(String16()));
+ });
}
-void V8Console::ProfileEnd(const v8::debug::ConsoleCallArguments& info) {
- ConsoleHelper helper(info, m_inspector);
- if (V8ProfilerAgentImpl* profilerAgent = helper.profilerAgent())
- profilerAgent->consoleProfileEnd(helper.firstArgToString(String16()));
+void V8Console::ProfileEnd(const v8::debug::ConsoleCallArguments& info,
+ const v8::debug::ConsoleContext& consoleContext) {
+ ConsoleHelper helper(info, consoleContext, m_inspector);
+ helper.forEachSession([&helper](V8InspectorSessionImpl* session) {
+ session->profilerAgent()->consoleProfileEnd(
+ helper.firstArgToString(String16()));
+ });
}
static void timeFunction(const v8::debug::ConsoleCallArguments& info,
+ const v8::debug::ConsoleContext& consoleContext,
bool timelinePrefix, V8InspectorImpl* inspector) {
- ConsoleHelper helper(info, inspector);
+ ConsoleHelper helper(info, consoleContext, inspector);
String16 protocolTitle = helper.firstArgToString("default");
if (timelinePrefix) protocolTitle = "Timeline '" + protocolTitle + "'";
inspector->client()->consoleTime(toStringView(protocolTitle));
- helper.consoleMessageStorage()->time(helper.contextId(), protocolTitle);
+ helper.consoleMessageStorage()->time(
+ helper.contextId(),
+ protocolTitle + "@" + consoleContextToString(consoleContext));
}
static void timeEndFunction(const v8::debug::ConsoleCallArguments& info,
+ const v8::debug::ConsoleContext& consoleContext,
bool timelinePrefix, V8InspectorImpl* inspector) {
- ConsoleHelper helper(info, inspector);
+ ConsoleHelper helper(info, consoleContext, inspector);
String16 protocolTitle = helper.firstArgToString("default");
if (timelinePrefix) protocolTitle = "Timeline '" + protocolTitle + "'";
inspector->client()->consoleTimeEnd(toStringView(protocolTitle));
- double elapsed = helper.consoleMessageStorage()->timeEnd(helper.contextId(),
- protocolTitle);
+ double elapsed = helper.consoleMessageStorage()->timeEnd(
+ helper.contextId(),
+ protocolTitle + "@" + consoleContextToString(consoleContext));
String16 message =
protocolTitle + ": " + String16::fromDouble(elapsed) + "ms";
helper.reportCallWithArgument(ConsoleAPIType::kTimeEnd, message);
}
-void V8Console::Timeline(const v8::debug::ConsoleCallArguments& info) {
- ConsoleHelper(info, m_inspector)
+void V8Console::Timeline(const v8::debug::ConsoleCallArguments& info,
+ const v8::debug::ConsoleContext& consoleContext) {
+ ConsoleHelper(info, consoleContext, m_inspector)
.reportDeprecatedCall("V8Console#timeline",
"'console.timeline' is deprecated. Please use "
"'console.time' instead.");
- timeFunction(info, true, m_inspector);
+ timeFunction(info, consoleContext, true, m_inspector);
}
-void V8Console::TimelineEnd(const v8::debug::ConsoleCallArguments& info) {
- ConsoleHelper(info, m_inspector)
+void V8Console::TimelineEnd(const v8::debug::ConsoleCallArguments& info,
+ const v8::debug::ConsoleContext& consoleContext) {
+ ConsoleHelper(info, consoleContext, m_inspector)
.reportDeprecatedCall("V8Console#timelineEnd",
"'console.timelineEnd' is "
"deprecated. Please use "
"'console.timeEnd' instead.");
- timeEndFunction(info, true, m_inspector);
+ timeEndFunction(info, consoleContext, true, m_inspector);
}
-void V8Console::Time(const v8::debug::ConsoleCallArguments& info) {
- timeFunction(info, false, m_inspector);
+void V8Console::Time(const v8::debug::ConsoleCallArguments& info,
+ const v8::debug::ConsoleContext& consoleContext) {
+ timeFunction(info, consoleContext, false, m_inspector);
}
-void V8Console::TimeEnd(const v8::debug::ConsoleCallArguments& info) {
- timeEndFunction(info, false, m_inspector);
+void V8Console::TimeEnd(const v8::debug::ConsoleCallArguments& info,
+ const v8::debug::ConsoleContext& consoleContext) {
+ timeEndFunction(info, consoleContext, false, m_inspector);
}
-void V8Console::TimeStamp(const v8::debug::ConsoleCallArguments& info) {
- ConsoleHelper helper(info, m_inspector);
+void V8Console::TimeStamp(const v8::debug::ConsoleCallArguments& info,
+ const v8::debug::ConsoleContext& consoleContext) {
+ ConsoleHelper helper(info, consoleContext, m_inspector);
String16 title = helper.firstArgToString(String16());
m_inspector->client()->consoleTimeStamp(toStringView(title));
}
@@ -385,12 +424,13 @@ void V8Console::memorySetterCallback(
// setter just ignores the passed value. http://crbug.com/468611
}
-void V8Console::keysCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+void V8Console::keysCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
+ int sessionId) {
v8::Isolate* isolate = info.GetIsolate();
info.GetReturnValue().Set(v8::Array::New(isolate));
v8::debug::ConsoleCallArguments args(info);
- ConsoleHelper helper(args, m_inspector);
+ ConsoleHelper helper(args, v8::debug::ConsoleContext(), m_inspector);
v8::Local<v8::Object> obj;
if (!helper.firstArgAsObject().ToLocal(&obj)) return;
v8::Local<v8::Array> names;
@@ -399,13 +439,13 @@ void V8Console::keysCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
info.GetReturnValue().Set(names);
}
-void V8Console::valuesCallback(
- const v8::FunctionCallbackInfo<v8::Value>& info) {
+void V8Console::valuesCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
+ int sessionId) {
v8::Isolate* isolate = info.GetIsolate();
info.GetReturnValue().Set(v8::Array::New(isolate));
v8::debug::ConsoleCallArguments args(info);
- ConsoleHelper helper(args, m_inspector);
+ ConsoleHelper helper(args, v8::debug::ConsoleContext(), m_inspector);
v8::Local<v8::Object> obj;
if (!helper.firstArgAsObject().ToLocal(&obj)) return;
v8::Local<v8::Array> names;
@@ -422,52 +462,55 @@ void V8Console::valuesCallback(
info.GetReturnValue().Set(values);
}
-static void setFunctionBreakpoint(ConsoleHelper& helper,
+static void setFunctionBreakpoint(ConsoleHelper& helper, int sessionId,
v8::Local<v8::Function> function,
V8DebuggerAgentImpl::BreakpointSource source,
const String16& condition, bool enable) {
- V8DebuggerAgentImpl* debuggerAgent = helper.debuggerAgent();
- if (!debuggerAgent) return;
String16 scriptId = String16::fromInteger(function->ScriptId());
int lineNumber = function->GetScriptLineNumber();
int columnNumber = function->GetScriptColumnNumber();
if (lineNumber == v8::Function::kLineOffsetNotFound ||
columnNumber == v8::Function::kLineOffsetNotFound)
return;
- if (enable)
- debuggerAgent->setBreakpointAt(scriptId, lineNumber, columnNumber, source,
- condition);
- else
- debuggerAgent->removeBreakpointAt(scriptId, lineNumber, columnNumber,
- source);
+
+ if (V8InspectorSessionImpl* session = helper.session(sessionId)) {
+ if (!session->debuggerAgent()->enabled()) return;
+ if (enable) {
+ session->debuggerAgent()->setBreakpointAt(
+ scriptId, lineNumber, columnNumber, source, condition);
+ } else {
+ session->debuggerAgent()->removeBreakpointAt(scriptId, lineNumber,
+ columnNumber, source);
+ }
+ }
}
void V8Console::debugFunctionCallback(
- const v8::FunctionCallbackInfo<v8::Value>& info) {
+ const v8::FunctionCallbackInfo<v8::Value>& info, int sessionId) {
v8::debug::ConsoleCallArguments args(info);
- ConsoleHelper helper(args, m_inspector);
+ ConsoleHelper helper(args, v8::debug::ConsoleContext(), m_inspector);
v8::Local<v8::Function> function;
if (!helper.firstArgAsFunction().ToLocal(&function)) return;
- setFunctionBreakpoint(helper, function,
+ setFunctionBreakpoint(helper, sessionId, function,
V8DebuggerAgentImpl::DebugCommandBreakpointSource,
String16(), true);
}
void V8Console::undebugFunctionCallback(
- const v8::FunctionCallbackInfo<v8::Value>& info) {
+ const v8::FunctionCallbackInfo<v8::Value>& info, int sessionId) {
v8::debug::ConsoleCallArguments args(info);
- ConsoleHelper helper(args, m_inspector);
+ ConsoleHelper helper(args, v8::debug::ConsoleContext(), m_inspector);
v8::Local<v8::Function> function;
if (!helper.firstArgAsFunction().ToLocal(&function)) return;
- setFunctionBreakpoint(helper, function,
+ setFunctionBreakpoint(helper, sessionId, function,
V8DebuggerAgentImpl::DebugCommandBreakpointSource,
String16(), false);
}
void V8Console::monitorFunctionCallback(
- const v8::FunctionCallbackInfo<v8::Value>& info) {
+ const v8::FunctionCallbackInfo<v8::Value>& info, int sessionId) {
v8::debug::ConsoleCallArguments args(info);
- ConsoleHelper helper(args, m_inspector);
+ ConsoleHelper helper(args, v8::debug::ConsoleContext(), m_inspector);
v8::Local<v8::Function> function;
if (!helper.firstArgAsFunction().ToLocal(&function)) return;
v8::Local<v8::Value> name = function->GetName();
@@ -483,39 +526,40 @@ void V8Console::monitorFunctionCallback(
builder.append(
" called\" + (arguments.length > 0 ? \" with arguments: \" + "
"Array.prototype.join.call(arguments, \", \") : \"\")) && false");
- setFunctionBreakpoint(helper, function,
+ setFunctionBreakpoint(helper, sessionId, function,
V8DebuggerAgentImpl::MonitorCommandBreakpointSource,
builder.toString(), true);
}
void V8Console::unmonitorFunctionCallback(
- const v8::FunctionCallbackInfo<v8::Value>& info) {
+ const v8::FunctionCallbackInfo<v8::Value>& info, int sessionId) {
v8::debug::ConsoleCallArguments args(info);
- ConsoleHelper helper(args, m_inspector);
+ ConsoleHelper helper(args, v8::debug::ConsoleContext(), m_inspector);
v8::Local<v8::Function> function;
if (!helper.firstArgAsFunction().ToLocal(&function)) return;
- setFunctionBreakpoint(helper, function,
+ setFunctionBreakpoint(helper, sessionId, function,
V8DebuggerAgentImpl::MonitorCommandBreakpointSource,
String16(), false);
}
void V8Console::lastEvaluationResultCallback(
- const v8::FunctionCallbackInfo<v8::Value>& info) {
+ const v8::FunctionCallbackInfo<v8::Value>& info, int sessionId) {
v8::debug::ConsoleCallArguments args(info);
- ConsoleHelper helper(args, m_inspector);
- InjectedScript* injectedScript = helper.injectedScript();
+ ConsoleHelper helper(args, v8::debug::ConsoleContext(), m_inspector);
+ InjectedScript* injectedScript = helper.injectedScript(sessionId);
if (!injectedScript) return;
info.GetReturnValue().Set(injectedScript->lastEvaluationResult());
}
static void inspectImpl(const v8::FunctionCallbackInfo<v8::Value>& info,
- bool copyToClipboard, V8InspectorImpl* inspector) {
+ int sessionId, bool copyToClipboard,
+ V8InspectorImpl* inspector) {
if (info.Length() < 1) return;
if (!copyToClipboard) info.GetReturnValue().Set(info[0]);
v8::debug::ConsoleCallArguments args(info);
- ConsoleHelper helper(args, inspector);
- InjectedScript* injectedScript = helper.injectedScript();
+ ConsoleHelper helper(args, v8::debug::ConsoleContext(), inspector);
+ InjectedScript* injectedScript = helper.injectedScript(sessionId);
if (!injectedScript) return;
std::unique_ptr<protocol::Runtime::RemoteObject> wrappedObject;
protocol::Response response =
@@ -526,27 +570,28 @@ static void inspectImpl(const v8::FunctionCallbackInfo<v8::Value>& info,
std::unique_ptr<protocol::DictionaryValue> hints =
protocol::DictionaryValue::create();
if (copyToClipboard) hints->setBoolean("copyToClipboard", true);
- if (V8InspectorSessionImpl* session = helper.currentSession()) {
+ if (V8InspectorSessionImpl* session = helper.session(sessionId)) {
session->runtimeAgent()->inspect(std::move(wrappedObject),
std::move(hints));
}
}
-void V8Console::inspectCallback(
- const v8::FunctionCallbackInfo<v8::Value>& info) {
- inspectImpl(info, false, m_inspector);
+void V8Console::inspectCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
+ int sessionId) {
+ inspectImpl(info, sessionId, false, m_inspector);
}
-void V8Console::copyCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
- inspectImpl(info, true, m_inspector);
+void V8Console::copyCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
+ int sessionId) {
+ inspectImpl(info, sessionId, true, m_inspector);
}
void V8Console::inspectedObject(const v8::FunctionCallbackInfo<v8::Value>& info,
- unsigned num) {
+ int sessionId, unsigned num) {
DCHECK(num < V8InspectorSessionImpl::kInspectedObjectBufferSize);
v8::debug::ConsoleCallArguments args(info);
- ConsoleHelper helper(args, m_inspector);
- if (V8InspectorSessionImpl* session = helper.currentSession()) {
+ ConsoleHelper helper(args, v8::debug::ConsoleContext(), m_inspector);
+ if (V8InspectorSessionImpl* session = helper.session(sessionId)) {
V8InspectorSession::Inspectable* object = session->inspectedObject(num);
v8::Isolate* isolate = info.GetIsolate();
if (object)
@@ -574,7 +619,7 @@ void V8Console::installMemoryGetter(v8::Local<v8::Context> context,
}
v8::Local<v8::Object> V8Console::createCommandLineAPI(
- v8::Local<v8::Context> context) {
+ v8::Local<v8::Context> context, int sessionId) {
v8::Isolate* isolate = context->GetIsolate();
v8::MicrotasksScope microtasksScope(isolate,
v8::MicrotasksScope::kDoNotRunMicrotasks);
@@ -585,7 +630,9 @@ v8::Local<v8::Object> V8Console::createCommandLineAPI(
DCHECK(success);
USE(success);
- v8::Local<v8::External> data = v8::External::New(isolate, this);
+ // TODO(dgozman): this CommandLineAPIData instance leaks. Use PodArray maybe?
+ v8::Local<v8::External> data =
+ v8::External::New(isolate, new CommandLineAPIData(this, sessionId));
createBoundFunctionProperty(context, commandLineAPI, data, "dir",
&V8Console::call<&V8Console::Dir>,
"function dir(value) { [Command Line API] }");
@@ -715,6 +762,8 @@ V8Console::CommandLineAPIScope::CommandLineAPIScope(
m_global(global),
m_installedMethods(v8::Set::New(context->GetIsolate())),
m_cleanup(false) {
+ v8::MicrotasksScope microtasksScope(context->GetIsolate(),
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
v8::Local<v8::Array> names;
if (!m_commandLineAPI->GetOwnPropertyNames(context).ToLocal(&names)) return;
v8::Local<v8::External> externalThis =
@@ -740,6 +789,8 @@ V8Console::CommandLineAPIScope::CommandLineAPIScope(
}
V8Console::CommandLineAPIScope::~CommandLineAPIScope() {
+ v8::MicrotasksScope microtasksScope(m_context->GetIsolate(),
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
m_cleanup = true;
v8::Local<v8::Array> names = m_installedMethods->AsArray();
for (uint32_t i = 0; i < names->Length(); ++i) {
diff --git a/deps/v8/src/inspector/v8-console.h b/deps/v8/src/inspector/v8-console.h
index e31133c4e1..b0e4beb2e6 100644
--- a/deps/v8/src/inspector/v8-console.h
+++ b/deps/v8/src/inspector/v8-console.h
@@ -19,7 +19,8 @@ class V8InspectorImpl;
// https://console.spec.whatwg.org/#console-interface
class V8Console : public v8::debug::ConsoleDelegate {
public:
- v8::Local<v8::Object> createCommandLineAPI(v8::Local<v8::Context> context);
+ v8::Local<v8::Object> createCommandLineAPI(v8::Local<v8::Context> context,
+ int sessionId);
void installMemoryGetter(v8::Local<v8::Context> context,
v8::Local<v8::Object> console);
@@ -49,29 +50,52 @@ class V8Console : public v8::debug::ConsoleDelegate {
explicit V8Console(V8InspectorImpl* inspector);
private:
- void Debug(const v8::debug::ConsoleCallArguments&) override;
- void Error(const v8::debug::ConsoleCallArguments&) override;
- void Info(const v8::debug::ConsoleCallArguments&) override;
- void Log(const v8::debug::ConsoleCallArguments&) override;
- void Warn(const v8::debug::ConsoleCallArguments&) override;
- void Dir(const v8::debug::ConsoleCallArguments&) override;
- void DirXml(const v8::debug::ConsoleCallArguments&) override;
- void Table(const v8::debug::ConsoleCallArguments&) override;
- void Trace(const v8::debug::ConsoleCallArguments&) override;
- void Group(const v8::debug::ConsoleCallArguments&) override;
- void GroupCollapsed(const v8::debug::ConsoleCallArguments&) override;
- void GroupEnd(const v8::debug::ConsoleCallArguments&) override;
- void Clear(const v8::debug::ConsoleCallArguments&) override;
- void Count(const v8::debug::ConsoleCallArguments&) override;
- void Assert(const v8::debug::ConsoleCallArguments&) override;
- void MarkTimeline(const v8::debug::ConsoleCallArguments&) override;
- void Profile(const v8::debug::ConsoleCallArguments&) override;
- void ProfileEnd(const v8::debug::ConsoleCallArguments&) override;
- void Timeline(const v8::debug::ConsoleCallArguments&) override;
- void TimelineEnd(const v8::debug::ConsoleCallArguments&) override;
- void Time(const v8::debug::ConsoleCallArguments&) override;
- void TimeEnd(const v8::debug::ConsoleCallArguments&) override;
- void TimeStamp(const v8::debug::ConsoleCallArguments&) override;
+ void Debug(const v8::debug::ConsoleCallArguments&,
+ const v8::debug::ConsoleContext& consoleContext) override;
+ void Error(const v8::debug::ConsoleCallArguments&,
+ const v8::debug::ConsoleContext& consoleContext) override;
+ void Info(const v8::debug::ConsoleCallArguments&,
+ const v8::debug::ConsoleContext& consoleContext) override;
+ void Log(const v8::debug::ConsoleCallArguments&,
+ const v8::debug::ConsoleContext& consoleContext) override;
+ void Warn(const v8::debug::ConsoleCallArguments&,
+ const v8::debug::ConsoleContext& consoleContext) override;
+ void Dir(const v8::debug::ConsoleCallArguments&,
+ const v8::debug::ConsoleContext& consoleContext) override;
+ void DirXml(const v8::debug::ConsoleCallArguments&,
+ const v8::debug::ConsoleContext& consoleContext) override;
+ void Table(const v8::debug::ConsoleCallArguments&,
+ const v8::debug::ConsoleContext& consoleContext) override;
+ void Trace(const v8::debug::ConsoleCallArguments&,
+ const v8::debug::ConsoleContext& consoleContext) override;
+ void Group(const v8::debug::ConsoleCallArguments&,
+ const v8::debug::ConsoleContext& consoleContext) override;
+ void GroupCollapsed(const v8::debug::ConsoleCallArguments&,
+ const v8::debug::ConsoleContext& consoleContext) override;
+ void GroupEnd(const v8::debug::ConsoleCallArguments&,
+ const v8::debug::ConsoleContext& consoleContext) override;
+ void Clear(const v8::debug::ConsoleCallArguments&,
+ const v8::debug::ConsoleContext& consoleContext) override;
+ void Count(const v8::debug::ConsoleCallArguments&,
+ const v8::debug::ConsoleContext& consoleContext) override;
+ void Assert(const v8::debug::ConsoleCallArguments&,
+ const v8::debug::ConsoleContext& consoleContext) override;
+ void MarkTimeline(const v8::debug::ConsoleCallArguments&,
+ const v8::debug::ConsoleContext& consoleContext) override;
+ void Profile(const v8::debug::ConsoleCallArguments&,
+ const v8::debug::ConsoleContext& consoleContext) override;
+ void ProfileEnd(const v8::debug::ConsoleCallArguments&,
+ const v8::debug::ConsoleContext& consoleContext) override;
+ void Timeline(const v8::debug::ConsoleCallArguments&,
+ const v8::debug::ConsoleContext& consoleContext) override;
+ void TimelineEnd(const v8::debug::ConsoleCallArguments&,
+ const v8::debug::ConsoleContext& consoleContext) override;
+ void Time(const v8::debug::ConsoleCallArguments&,
+ const v8::debug::ConsoleContext& consoleContext) override;
+ void TimeEnd(const v8::debug::ConsoleCallArguments&,
+ const v8::debug::ConsoleContext& consoleContext) override;
+ void TimeStamp(const v8::debug::ConsoleCallArguments&,
+ const v8::debug::ConsoleContext& consoleContext) override;
template <void (V8Console::*func)(const v8::FunctionCallbackInfo<v8::Value>&)>
static void call(const v8::FunctionCallbackInfo<v8::Value>& info) {
@@ -79,12 +103,21 @@ class V8Console : public v8::debug::ConsoleDelegate {
static_cast<V8Console*>(info.Data().As<v8::External>()->Value());
(console->*func)(info);
}
- template <void (V8Console::*func)(const v8::debug::ConsoleCallArguments&)>
+ using CommandLineAPIData = std::pair<V8Console*, int>;
+ template <void (V8Console::*func)(const v8::FunctionCallbackInfo<v8::Value>&,
+ int)>
static void call(const v8::FunctionCallbackInfo<v8::Value>& info) {
- V8Console* console =
- static_cast<V8Console*>(info.Data().As<v8::External>()->Value());
+ CommandLineAPIData* data = static_cast<CommandLineAPIData*>(
+ info.Data().As<v8::External>()->Value());
+ (data->first->*func)(info, data->second);
+ }
+ template <void (V8Console::*func)(const v8::debug::ConsoleCallArguments&,
+ const v8::debug::ConsoleContext&)>
+ static void call(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ CommandLineAPIData* data = static_cast<CommandLineAPIData*>(
+ info.Data().As<v8::External>()->Value());
v8::debug::ConsoleCallArguments args(info);
- (console->*func)(args);
+ (data->first->*func)(args, v8::debug::ConsoleContext());
}
// TODO(foolip): There is no spec for the Memory Info API, see blink-dev:
@@ -93,31 +126,43 @@ class V8Console : public v8::debug::ConsoleDelegate {
void memorySetterCallback(const v8::FunctionCallbackInfo<v8::Value>&);
// CommandLineAPI
- void keysCallback(const v8::FunctionCallbackInfo<v8::Value>&);
- void valuesCallback(const v8::FunctionCallbackInfo<v8::Value>&);
- void debugFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>&);
- void undebugFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>&);
- void monitorFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>&);
- void unmonitorFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>&);
- void lastEvaluationResultCallback(const v8::FunctionCallbackInfo<v8::Value>&);
- void inspectCallback(const v8::FunctionCallbackInfo<v8::Value>&);
- void copyCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+ void keysCallback(const v8::FunctionCallbackInfo<v8::Value>&, int sessionId);
+ void valuesCallback(const v8::FunctionCallbackInfo<v8::Value>&,
+ int sessionId);
+ void debugFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>&,
+ int sessionId);
+ void undebugFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>&,
+ int sessionId);
+ void monitorFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>&,
+ int sessionId);
+ void unmonitorFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>&,
+ int sessionId);
+ void lastEvaluationResultCallback(const v8::FunctionCallbackInfo<v8::Value>&,
+ int sessionId);
+ void inspectCallback(const v8::FunctionCallbackInfo<v8::Value>&,
+ int sessionId);
+ void copyCallback(const v8::FunctionCallbackInfo<v8::Value>&, int sessionId);
void inspectedObject(const v8::FunctionCallbackInfo<v8::Value>&,
- unsigned num);
- void inspectedObject0(const v8::FunctionCallbackInfo<v8::Value>& info) {
- inspectedObject(info, 0);
+ int sessionId, unsigned num);
+ void inspectedObject0(const v8::FunctionCallbackInfo<v8::Value>& info,
+ int sessionId) {
+ inspectedObject(info, sessionId, 0);
}
- void inspectedObject1(const v8::FunctionCallbackInfo<v8::Value>& info) {
- inspectedObject(info, 1);
+ void inspectedObject1(const v8::FunctionCallbackInfo<v8::Value>& info,
+ int sessionId) {
+ inspectedObject(info, sessionId, 1);
}
- void inspectedObject2(const v8::FunctionCallbackInfo<v8::Value>& info) {
- inspectedObject(info, 2);
+ void inspectedObject2(const v8::FunctionCallbackInfo<v8::Value>& info,
+ int sessionId) {
+ inspectedObject(info, sessionId, 2);
}
- void inspectedObject3(const v8::FunctionCallbackInfo<v8::Value>& info) {
- inspectedObject(info, 3);
+ void inspectedObject3(const v8::FunctionCallbackInfo<v8::Value>& info,
+ int sessionId) {
+ inspectedObject(info, sessionId, 3);
}
- void inspectedObject4(const v8::FunctionCallbackInfo<v8::Value>& info) {
- inspectedObject(info, 4);
+ void inspectedObject4(const v8::FunctionCallbackInfo<v8::Value>& info,
+ int sessionId) {
+ inspectedObject(info, sessionId, 4);
}
V8InspectorImpl* m_inspector;
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.cc b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
index 7b03c96c0a..fb6274d84b 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -211,13 +211,10 @@ void V8DebuggerAgentImpl::enableImpl() {
for (size_t i = 0; i < compiledScripts.size(); i++)
didParseSource(std::move(compiledScripts[i]), true);
- // FIXME(WK44513): breakpoints activated flag should be synchronized between
- // all front-ends
- m_debugger->setBreakpointsActivated(true);
+ m_breakpointsActive = true;
+ m_debugger->setBreakpointsActive(true);
}
-bool V8DebuggerAgentImpl::enabled() { return m_enabled; }
-
Response V8DebuggerAgentImpl::enable() {
if (enabled()) return Response::OK();
@@ -238,6 +235,10 @@ Response V8DebuggerAgentImpl::disable() {
m_state->setInteger(DebuggerAgentState::asyncCallStackDepth, 0);
if (isPaused()) m_debugger->continueProgram(m_session->contextGroupId());
+ if (m_breakpointsActive) {
+ m_debugger->setBreakpointsActive(false);
+ m_breakpointsActive = false;
+ }
m_debugger->disable();
JavaScriptCallFrames emptyCallFrames;
m_pausedCallFrames.swap(emptyCallFrames);
@@ -286,7 +287,9 @@ void V8DebuggerAgentImpl::restore() {
Response V8DebuggerAgentImpl::setBreakpointsActive(bool active) {
if (!enabled()) return Response::Error(kDebuggerNotEnabled);
- m_debugger->setBreakpointsActivated(active);
+ if (m_breakpointsActive == active) return Response::OK();
+ m_breakpointsActive = active;
+ m_debugger->setBreakpointsActive(active);
if (!active && !m_breakReason.empty()) {
clearBreakDetails();
m_debugger->setPauseOnNextStatement(false, m_session->contextGroupId());
@@ -461,6 +464,8 @@ Response V8DebuggerAgentImpl::getPossibleBreakpoints(
v8::Local<v8::Context> debuggerContext =
v8::debug::GetDebugContext(m_isolate);
v8::Context::Scope contextScope(debuggerContext);
+ v8::MicrotasksScope microtasks(m_isolate,
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
v8::TryCatch tryCatch(m_isolate);
it->second->getPossibleBreakpoints(
v8Start, v8End, restrictToFunction.fromMaybe(false), &v8Locations);
@@ -527,6 +532,10 @@ bool V8DebuggerAgentImpl::isFunctionBlackboxed(const String16& scriptId,
std::distance(ranges.begin(), itStartRange) % 2;
}
+bool V8DebuggerAgentImpl::acceptsPause(bool isOOMBreak) const {
+ return enabled() && (isOOMBreak || !m_skipAllPauses);
+}
+
std::unique_ptr<protocol::Debugger::Location>
V8DebuggerAgentImpl::resolveBreakpoint(const String16& breakpointId,
const ScriptBreakpoint& breakpoint,
@@ -629,8 +638,7 @@ Response V8DebuggerAgentImpl::restartFrame(
std::unique_ptr<Array<CallFrame>>* newCallFrames,
Maybe<protocol::Runtime::StackTrace>* asyncStackTrace) {
if (!isPaused()) return Response::Error(kDebuggerNotPaused);
- InjectedScript::CallFrameScope scope(m_inspector, m_session->contextGroupId(),
- callFrameId);
+ InjectedScript::CallFrameScope scope(m_session, callFrameId);
Response response = scope.initialize();
if (!response.isSuccess()) return response;
if (scope.frameOrdinal() >= m_pausedCallFrames.size())
@@ -683,7 +691,7 @@ void V8DebuggerAgentImpl::clearBreakDetails() {
void V8DebuggerAgentImpl::schedulePauseOnNextStatement(
const String16& breakReason,
std::unique_ptr<protocol::DictionaryValue> data) {
- if (!enabled() || isPaused() || !m_debugger->breakpointsActivated()) return;
+ if (isPaused() || !acceptsPause(false) || !m_breakpointsActive) return;
if (m_breakReason.empty()) {
m_debugger->setPauseOnNextStatement(true, m_session->contextGroupId());
}
@@ -691,7 +699,7 @@ void V8DebuggerAgentImpl::schedulePauseOnNextStatement(
}
void V8DebuggerAgentImpl::cancelPauseOnNextStatement() {
- if (!enabled() || isPaused() || !m_debugger->breakpointsActivated()) return;
+ if (isPaused() || !acceptsPause(false) || !m_breakpointsActive) return;
if (m_breakReason.size() == 1) {
m_debugger->setPauseOnNextStatement(false, m_session->contextGroupId());
}
@@ -765,6 +773,8 @@ Response V8DebuggerAgentImpl::setPauseOnExceptions(
}
void V8DebuggerAgentImpl::setPauseOnExceptionsImpl(int pauseState) {
+ // TODO(dgozman): this changes the global state and forces all context groups
+ // to pause. We should make this flag be per-context-group.
m_debugger->setPauseOnExceptionsState(
static_cast<v8::debug::ExceptionBreakState>(pauseState));
m_state->setInteger(DebuggerAgentState::pauseOnExceptionsState, pauseState);
@@ -777,8 +787,7 @@ Response V8DebuggerAgentImpl::evaluateOnCallFrame(
Maybe<bool> throwOnSideEffect, std::unique_ptr<RemoteObject>* result,
Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails) {
if (!isPaused()) return Response::Error(kDebuggerNotPaused);
- InjectedScript::CallFrameScope scope(m_inspector, m_session->contextGroupId(),
- callFrameId);
+ InjectedScript::CallFrameScope scope(m_session, callFrameId);
Response response = scope.initialize();
if (!response.isSuccess()) return response;
if (scope.frameOrdinal() >= m_pausedCallFrames.size())
@@ -808,8 +817,7 @@ Response V8DebuggerAgentImpl::setVariableValue(
const String16& callFrameId) {
if (!enabled()) return Response::Error(kDebuggerNotEnabled);
if (!isPaused()) return Response::Error(kDebuggerNotPaused);
- InjectedScript::CallFrameScope scope(m_inspector, m_session->contextGroupId(),
- callFrameId);
+ InjectedScript::CallFrameScope scope(m_session, callFrameId);
Response response = scope.initialize();
if (!response.isSuccess()) return response;
v8::Local<v8::Value> newValue;
@@ -924,6 +932,8 @@ Response V8DebuggerAgentImpl::currentCallFrames(
v8::Local<v8::Context> debuggerContext =
v8::debug::GetDebugContext(m_isolate);
v8::Context::Scope contextScope(debuggerContext);
+ v8::MicrotasksScope microtasks(m_isolate,
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
v8::Local<v8::Array> objects = v8::Array::New(m_isolate);
@@ -1034,7 +1044,9 @@ V8DebuggerAgentImpl::currentAsyncStackTrace() {
m_debugger->maxAsyncCallChainDepth() - 1);
}
-bool V8DebuggerAgentImpl::isPaused() const { return m_debugger->isPaused(); }
+bool V8DebuggerAgentImpl::isPaused() const {
+ return m_debugger->isPausedInContextGroup(m_session->contextGroupId());
+}
void V8DebuggerAgentImpl::didParseSource(
std::unique_ptr<V8DebuggerScript> script, bool success) {
@@ -1140,7 +1152,7 @@ void V8DebuggerAgentImpl::didPause(int contextId,
v8::Local<v8::Value> exception,
const std::vector<String16>& hitBreakpoints,
bool isPromiseRejection, bool isUncaught,
- bool isOOMBreak) {
+ bool isOOMBreak, bool isAssert) {
JavaScriptCallFrames frames = m_debugger->currentCallFrames();
m_pausedCallFrames.swap(frames);
v8::HandleScope handles(m_isolate);
@@ -1150,6 +1162,9 @@ void V8DebuggerAgentImpl::didPause(int contextId,
if (isOOMBreak) {
hitReasons.push_back(
std::make_pair(protocol::Debugger::Paused::ReasonEnum::OOM, nullptr));
+ } else if (isAssert) {
+ hitReasons.push_back(std::make_pair(
+ protocol::Debugger::Paused::ReasonEnum::Assert, nullptr));
} else if (!exception.IsEmpty()) {
InjectedScript* injectedScript = nullptr;
m_session->findInjectedScript(contextId, injectedScript);
@@ -1237,11 +1252,19 @@ void V8DebuggerAgentImpl::didContinue() {
void V8DebuggerAgentImpl::breakProgram(
const String16& breakReason,
std::unique_ptr<protocol::DictionaryValue> data) {
- if (!enabled() || !m_debugger->canBreakProgram() || m_skipAllPauses) return;
+ if (!enabled() || m_skipAllPauses || !m_debugger->canBreakProgram()) return;
std::vector<BreakReason> currentScheduledReason;
currentScheduledReason.swap(m_breakReason);
pushBreakDetails(breakReason, std::move(data));
- if (!m_debugger->breakProgram(m_session->contextGroupId())) return;
+
+ int contextGroupId = m_session->contextGroupId();
+ int sessionId = m_session->sessionId();
+ V8InspectorImpl* inspector = m_inspector;
+ m_debugger->breakProgram(contextGroupId);
+ // Check that session and |this| are still around.
+ if (!inspector->sessionById(contextGroupId, sessionId)) return;
+ if (!enabled()) return;
+
popBreakDetails();
m_breakReason.swap(currentScheduledReason);
if (!m_breakReason.empty()) {
@@ -1249,15 +1272,6 @@ void V8DebuggerAgentImpl::breakProgram(
}
}
-void V8DebuggerAgentImpl::breakProgramOnException(
- const String16& breakReason,
- std::unique_ptr<protocol::DictionaryValue> data) {
- if (!enabled() ||
- m_debugger->getPauseOnExceptionsState() == v8::debug::NoBreakOnException)
- return;
- breakProgram(breakReason, std::move(data));
-}
-
void V8DebuggerAgentImpl::setBreakpointAt(const String16& scriptId,
int lineNumber, int columnNumber,
BreakpointSource source,
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.h b/deps/v8/src/inspector/v8-debugger-agent-impl.h
index c9433e20f6..edf996704d 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.h
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.h
@@ -109,7 +109,7 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
std::unique_ptr<protocol::Array<protocol::Debugger::ScriptPosition>>
positions) override;
- bool enabled();
+ bool enabled() const { return m_enabled; }
void setBreakpointAt(const String16& scriptId, int lineNumber,
int columnNumber, BreakpointSource,
@@ -122,15 +122,14 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
void cancelPauseOnNextStatement();
void breakProgram(const String16& breakReason,
std::unique_ptr<protocol::DictionaryValue> data);
- void breakProgramOnException(const String16& breakReason,
- std::unique_ptr<protocol::DictionaryValue> data);
void reset();
// Interface for V8InspectorImpl
void didPause(int contextId, v8::Local<v8::Value> exception,
const std::vector<String16>& hitBreakpoints,
- bool isPromiseRejection, bool isUncaught, bool isOOMBreak);
+ bool isPromiseRejection, bool isUncaught, bool isOOMBreak,
+ bool isAssert);
void didContinue();
void didParseSource(std::unique_ptr<V8DebuggerScript>, bool success);
@@ -138,7 +137,7 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
const v8::debug::Location& start,
const v8::debug::Location& end);
- bool skipAllPauses() const { return m_skipAllPauses; }
+ bool acceptsPause(bool isOOMBreak) const;
v8::Isolate* isolate() { return m_isolate; }
@@ -195,6 +194,7 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
void popBreakDetails();
bool m_skipAllPauses = false;
+ bool m_breakpointsActive = false;
std::unique_ptr<V8Regex> m_blackboxPattern;
protocol::HashMap<String16, std::vector<std::pair<int, int>>>
diff --git a/deps/v8/src/inspector/v8-debugger.cc b/deps/v8/src/inspector/v8-debugger.cc
index 86a48401a6..03bf57cf7d 100644
--- a/deps/v8/src/inspector/v8-debugger.cc
+++ b/deps/v8/src/inspector/v8-debugger.cc
@@ -11,7 +11,9 @@
#include "src/inspector/string-util.h"
#include "src/inspector/v8-debugger-agent-impl.h"
#include "src/inspector/v8-inspector-impl.h"
+#include "src/inspector/v8-inspector-session-impl.h"
#include "src/inspector/v8-internal-value-type.h"
+#include "src/inspector/v8-runtime-agent-impl.h"
#include "src/inspector/v8-stack-trace-impl.h"
#include "src/inspector/v8-value-copier.h"
@@ -27,15 +29,6 @@ inline v8::Local<v8::Boolean> v8Boolean(bool value, v8::Isolate* isolate) {
return value ? v8::True(isolate) : v8::False(isolate);
}
-V8DebuggerAgentImpl* agentForScript(V8InspectorImpl* inspector,
- v8::Local<v8::debug::Script> script) {
- int contextId;
- if (!script->ContextId().To(&contextId)) return nullptr;
- int contextGroupId = inspector->contextGroupId(contextId);
- if (!contextGroupId) return nullptr;
- return inspector->enabledDebuggerAgentForGroup(contextGroupId);
-}
-
v8::MaybeLocal<v8::Array> collectionsEntries(v8::Local<v8::Context> context,
v8::Local<v8::Value> value) {
v8::Isolate* isolate = context->GetIsolate();
@@ -168,7 +161,6 @@ V8Debugger::V8Debugger(v8::Isolate* isolate, V8InspectorImpl* inspector)
: m_isolate(isolate),
m_inspector(inspector),
m_enableCount(0),
- m_breakpointsActivated(true),
m_ignoreScriptParsedEventsCounter(0),
m_maxAsyncCallStacks(kMaxAsyncTaskStacks),
m_maxAsyncCallStackDepth(0),
@@ -205,6 +197,10 @@ void V8Debugger::disable() {
m_isolate->RestoreOriginalHeapLimit();
}
+bool V8Debugger::isPausedInContextGroup(int contextGroupId) const {
+ return isPaused() && m_pausedContextGroupId == contextGroupId;
+}
+
bool V8Debugger::enabled() const { return !m_debuggerScript.IsEmpty(); }
void V8Debugger::getCompiledScripts(
@@ -233,6 +229,8 @@ String16 V8Debugger::setBreakpoint(const ScriptBreakpoint& breakpoint,
v8::HandleScope scope(m_isolate);
v8::Local<v8::Context> context = debuggerContext();
v8::Context::Scope contextScope(context);
+ v8::MicrotasksScope microtasks(m_isolate,
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
v8::Local<v8::Object> info = v8::Object::New(m_isolate);
bool success = false;
@@ -280,6 +278,8 @@ void V8Debugger::removeBreakpoint(const String16& breakpointId) {
v8::HandleScope scope(m_isolate);
v8::Local<v8::Context> context = debuggerContext();
v8::Context::Scope contextScope(context);
+ v8::MicrotasksScope microtasks(m_isolate,
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
v8::Local<v8::Object> info = v8::Object::New(m_isolate);
bool success = false;
@@ -312,13 +312,13 @@ void V8Debugger::clearBreakpoints() {
v8::debug::Call(debuggerContext(), clearBreakpoints).ToLocalChecked();
}
-void V8Debugger::setBreakpointsActivated(bool activated) {
+void V8Debugger::setBreakpointsActive(bool active) {
if (!enabled()) {
UNREACHABLE();
return;
}
- v8::debug::SetBreakPointsActive(m_isolate, activated);
- m_breakpointsActivated = activated;
+ m_breakpointsActiveCount += active ? 1 : -1;
+ v8::debug::SetBreakPointsActive(m_isolate, m_breakpointsActiveCount);
}
v8::debug::ExceptionBreakState V8Debugger::getPauseOnExceptionsState() {
@@ -350,18 +350,16 @@ void V8Debugger::setPauseOnNextStatement(bool pause, int targetContextGroupId) {
}
bool V8Debugger::canBreakProgram() {
- if (!m_breakpointsActivated) return false;
return !v8::debug::AllFramesOnStackAreBlackboxed(m_isolate);
}
-bool V8Debugger::breakProgram(int targetContextGroupId) {
+void V8Debugger::breakProgram(int targetContextGroupId) {
+ DCHECK(canBreakProgram());
// Don't allow nested breaks.
- if (isPaused()) return true;
- if (!canBreakProgram()) return true;
+ if (isPaused()) return;
DCHECK(targetContextGroupId);
m_targetContextGroupId = targetContextGroupId;
v8::debug::BreakRightNow(m_isolate);
- return m_inspector->enabledDebuggerAgentForGroup(targetContextGroupId);
}
void V8Debugger::continueProgram(int targetContextGroupId) {
@@ -371,6 +369,18 @@ void V8Debugger::continueProgram(int targetContextGroupId) {
m_executionState.Clear();
}
+void V8Debugger::breakProgramOnAssert(int targetContextGroupId) {
+ if (!enabled()) return;
+ if (m_pauseOnExceptionsState == v8::debug::NoBreakOnException) return;
+ // Don't allow nested breaks.
+ if (isPaused()) return;
+ if (!canBreakProgram()) return;
+ DCHECK(targetContextGroupId);
+ m_targetContextGroupId = targetContextGroupId;
+ m_scheduledAssertBreak = true;
+ v8::debug::BreakRightNow(m_isolate);
+}
+
void V8Debugger::stepIntoStatement(int targetContextGroupId) {
DCHECK(isPaused());
DCHECK(!m_executionState.IsEmpty());
@@ -605,9 +615,17 @@ void V8Debugger::handleProgramBreak(v8::Local<v8::Context> pausedContext,
m_stepIntoAsyncCallback.reset();
}
m_breakRequested = false;
- V8DebuggerAgentImpl* agent = m_inspector->enabledDebuggerAgentForGroup(
- m_inspector->contextGroupId(pausedContext));
- if (!agent || (agent->skipAllPauses() && !m_scheduledOOMBreak)) return;
+
+ bool scheduledOOMBreak = m_scheduledOOMBreak;
+ bool scheduledAssertBreak = m_scheduledAssertBreak;
+ bool hasAgents = false;
+ m_inspector->forEachSession(
+ contextGroupId,
+ [&scheduledOOMBreak, &hasAgents](V8InspectorSessionImpl* session) {
+ if (session->debuggerAgent()->acceptsPause(scheduledOOMBreak))
+ hasAgents = true;
+ });
+ if (!hasAgents) return;
std::vector<String16> breakpointIds;
if (!hitBreakpointNumbers.IsEmpty()) {
@@ -627,27 +645,39 @@ void V8Debugger::handleProgramBreak(v8::Local<v8::Context> pausedContext,
}
clearContinueToLocation();
+ DCHECK(contextGroupId);
m_pausedContext = pausedContext;
m_executionState = executionState;
m_pausedContextGroupId = contextGroupId;
- agent->didPause(InspectedContext::contextId(pausedContext), exception,
- breakpointIds, isPromiseRejection, isUncaught,
- m_scheduledOOMBreak);
- int groupId = m_inspector->contextGroupId(pausedContext);
- DCHECK(groupId);
+
+ m_inspector->forEachSession(
+ contextGroupId, [&pausedContext, &exception, &breakpointIds,
+ &isPromiseRejection, &isUncaught, &scheduledOOMBreak,
+ &scheduledAssertBreak](V8InspectorSessionImpl* session) {
+ if (session->debuggerAgent()->acceptsPause(scheduledOOMBreak)) {
+ session->debuggerAgent()->didPause(
+ InspectedContext::contextId(pausedContext), exception,
+ breakpointIds, isPromiseRejection, isUncaught, scheduledOOMBreak,
+ scheduledAssertBreak);
+ }
+ });
{
v8::Context::Scope scope(pausedContext);
v8::Local<v8::Context> context = m_isolate->GetCurrentContext();
CHECK(!context.IsEmpty() &&
context != v8::debug::GetDebugContext(m_isolate));
- m_inspector->client()->runMessageLoopOnPause(groupId);
+ m_inspector->client()->runMessageLoopOnPause(contextGroupId);
m_pausedContextGroupId = 0;
}
- // The agent may have been removed in the nested loop.
- agent = m_inspector->enabledDebuggerAgentForGroup(groupId);
- if (agent) agent->didContinue();
+ m_inspector->forEachSession(contextGroupId,
+ [](V8InspectorSessionImpl* session) {
+ if (session->debuggerAgent()->enabled())
+ session->debuggerAgent()->didContinue();
+ });
+
if (m_scheduledOOMBreak) m_isolate->RestoreOriginalHeapLimit();
m_scheduledOOMBreak = false;
+ m_scheduledAssertBreak = false;
m_pausedContext.Clear();
m_executionState.Clear();
}
@@ -664,14 +694,28 @@ void V8Debugger::v8OOMCallback(void* data) {
void V8Debugger::ScriptCompiled(v8::Local<v8::debug::Script> script,
bool has_compile_error) {
- V8DebuggerAgentImpl* agent = agentForScript(m_inspector, script);
- if (!agent) return;
+ int contextId;
+ if (!script->ContextId().To(&contextId)) return;
if (script->IsWasm()) {
- m_wasmTranslation.AddScript(script.As<v8::debug::WasmScript>(), agent);
+ WasmTranslation* wasmTranslation = &m_wasmTranslation;
+ m_inspector->forEachSession(
+ m_inspector->contextGroupId(contextId),
+ [&script, &wasmTranslation](V8InspectorSessionImpl* session) {
+ if (!session->debuggerAgent()->enabled()) return;
+ wasmTranslation->AddScript(script.As<v8::debug::WasmScript>(),
+ session->debuggerAgent());
+ });
} else if (m_ignoreScriptParsedEventsCounter == 0) {
- agent->didParseSource(
- V8DebuggerScript::Create(m_isolate, script, inLiveEditScope),
- !has_compile_error);
+ v8::Isolate* isolate = m_isolate;
+ m_inspector->forEachSession(
+ m_inspector->contextGroupId(contextId),
+ [&isolate, &script,
+ &has_compile_error](V8InspectorSessionImpl* session) {
+ if (!session->debuggerAgent()->enabled()) return;
+ session->debuggerAgent()->didParseSource(
+ V8DebuggerScript::Create(isolate, script, inLiveEditScope),
+ !has_compile_error);
+ });
}
}
@@ -702,10 +746,21 @@ void V8Debugger::ExceptionThrown(v8::Local<v8::Context> pausedContext,
bool V8Debugger::IsFunctionBlackboxed(v8::Local<v8::debug::Script> script,
const v8::debug::Location& start,
const v8::debug::Location& end) {
- V8DebuggerAgentImpl* agent = agentForScript(m_inspector, script);
- if (!agent) return false;
- return agent->isFunctionBlackboxed(String16::fromInteger(script->Id()), start,
- end);
+ int contextId;
+ if (!script->ContextId().To(&contextId)) return false;
+ bool hasAgents = false;
+ bool allBlackboxed = true;
+ String16 scriptId = String16::fromInteger(script->Id());
+ m_inspector->forEachSession(
+ m_inspector->contextGroupId(contextId),
+ [&hasAgents, &allBlackboxed, &scriptId, &start,
+ &end](V8InspectorSessionImpl* session) {
+ V8DebuggerAgentImpl* agent = session->debuggerAgent();
+ if (!agent->enabled()) return;
+ hasAgents = true;
+ allBlackboxed &= agent->isFunctionBlackboxed(scriptId, start, end);
+ });
+ return hasAgents && allBlackboxed;
}
void V8Debugger::PromiseEventOccurred(v8::debug::PromiseDebugActionType type,
@@ -787,7 +842,6 @@ v8::MaybeLocal<v8::Value> V8Debugger::getTargetScopes(
ScopeTargetKind kind) {
if (!enabled()) {
UNREACHABLE();
- return v8::Local<v8::Value>::New(m_isolate, v8::Undefined(m_isolate));
}
v8::Local<v8::Value> argv[] = {value};
v8::Local<v8::Value> scopesValue;
@@ -909,6 +963,7 @@ void V8Debugger::setAsyncCallStackDepth(V8DebuggerAgentImpl* agent, int depth) {
}
if (m_maxAsyncCallStackDepth == maxAsyncCallStackDepth) return;
+ // TODO(dgozman): ideally, this should be per context group.
m_maxAsyncCallStackDepth = maxAsyncCallStackDepth;
if (!maxAsyncCallStackDepth) allAsyncTasksCanceled();
}
@@ -1082,8 +1137,14 @@ std::unique_ptr<V8StackTraceImpl> V8Debugger::captureStackTrace(
if (!contextGroupId) return nullptr;
int stackSize = 1;
- if (fullStack || m_inspector->enabledRuntimeAgentForGroup(contextGroupId)) {
+ if (fullStack) {
stackSize = V8StackTraceImpl::maxCallStackSizeToCapture;
+ } else {
+ m_inspector->forEachSession(
+ contextGroupId, [&stackSize](V8InspectorSessionImpl* session) {
+ if (session->runtimeAgent()->enabled())
+ stackSize = V8StackTraceImpl::maxCallStackSizeToCapture;
+ });
}
return V8StackTraceImpl::capture(this, contextGroupId, stackSize);
}
diff --git a/deps/v8/src/inspector/v8-debugger.h b/deps/v8/src/inspector/v8-debugger.h
index a15c288c0d..7c660f8095 100644
--- a/deps/v8/src/inspector/v8-debugger.h
+++ b/deps/v8/src/inspector/v8-debugger.h
@@ -44,14 +44,14 @@ class V8Debugger : public v8::debug::DebugDelegate {
String16 setBreakpoint(const ScriptBreakpoint&, int* actualLineNumber,
int* actualColumnNumber);
void removeBreakpoint(const String16& breakpointId);
- void setBreakpointsActivated(bool);
- bool breakpointsActivated() const { return m_breakpointsActivated; }
+ void setBreakpointsActive(bool);
v8::debug::ExceptionBreakState getPauseOnExceptionsState();
void setPauseOnExceptionsState(v8::debug::ExceptionBreakState);
bool canBreakProgram();
- bool breakProgram(int targetContextGroupId);
+ void breakProgram(int targetContextGroupId);
void continueProgram(int targetContextGroupId);
+ void breakProgramOnAssert(int targetContextGroupId);
void setPauseOnNextStatement(bool, int targetContextGroupId);
void stepIntoStatement(int targetContextGroupId);
@@ -82,6 +82,7 @@ class V8Debugger : public v8::debug::DebugDelegate {
void disable();
bool isPaused() const { return m_pausedContextGroupId; }
+ bool isPausedInContextGroup(int contextGroupId) const;
v8::Local<v8::Context> pausedContext() { return m_pausedContext; }
int maxAsyncCallChainDepth() { return m_maxAsyncCallStackDepth; }
@@ -181,13 +182,14 @@ class V8Debugger : public v8::debug::DebugDelegate {
v8::Isolate* m_isolate;
V8InspectorImpl* m_inspector;
int m_enableCount;
- bool m_breakpointsActivated;
+ int m_breakpointsActiveCount = 0;
v8::Global<v8::Object> m_debuggerScript;
v8::Global<v8::Context> m_debuggerContext;
v8::Local<v8::Object> m_executionState;
v8::Local<v8::Context> m_pausedContext;
int m_ignoreScriptParsedEventsCounter;
bool m_scheduledOOMBreak = false;
+ bool m_scheduledAssertBreak = false;
int m_targetContextGroupId = 0;
int m_pausedContextGroupId = 0;
String16 m_continueToLocationBreakpointId;
diff --git a/deps/v8/src/inspector/v8-function-call.cc b/deps/v8/src/inspector/v8-function-call.cc
index 0fcca70cb7..ebeb7a3d07 100644
--- a/deps/v8/src/inspector/v8-function-call.cc
+++ b/deps/v8/src/inspector/v8-function-call.cc
@@ -99,6 +99,7 @@ v8::Local<v8::Value> V8FunctionCall::callWithoutExceptionHandling() {
}
v8::MicrotasksScope microtasksScope(m_context->GetIsolate(),
v8::MicrotasksScope::kDoNotRunMicrotasks);
+ v8::Isolate::AllowJavascriptExecutionScope(m_context->GetIsolate());
v8::MaybeLocal<v8::Value> maybeResult = function->Call(
m_context, thisObject, static_cast<int>(m_arguments.size()), info.get());
if (contextGroupId) {
diff --git a/deps/v8/src/inspector/v8-injected-script-host.cc b/deps/v8/src/inspector/v8-injected-script-host.cc
index 663e9e7b02..47c51e809c 100644
--- a/deps/v8/src/inspector/v8-injected-script-host.cc
+++ b/deps/v8/src/inspector/v8-injected-script-host.cc
@@ -5,7 +5,7 @@
#include "src/inspector/v8-injected-script-host.h"
#include "src/base/macros.h"
-#include "src/inspector/injected-script-native.h"
+#include "src/inspector/injected-script.h"
#include "src/inspector/string-util.h"
#include "src/inspector/v8-debugger.h"
#include "src/inspector/v8-inspector-impl.h"
@@ -309,16 +309,15 @@ void V8InjectedScriptHost::objectHasOwnPropertyCallback(
void V8InjectedScriptHost::bindCallback(
const v8::FunctionCallbackInfo<v8::Value>& info) {
if (info.Length() < 2 || !info[1]->IsString()) return;
- InjectedScriptNative* injectedScriptNative =
- InjectedScriptNative::fromInjectedScriptHost(info.GetIsolate(),
- info.Holder());
- if (!injectedScriptNative) return;
+ InjectedScript* injectedScript =
+ InjectedScript::fromInjectedScriptHost(info.GetIsolate(), info.Holder());
+ if (!injectedScript) return;
v8::Local<v8::Context> context = info.GetIsolate()->GetCurrentContext();
v8::Local<v8::String> v8groupName =
info[1]->ToString(context).ToLocalChecked();
String16 groupName = toProtocolStringWithTypeCheck(v8groupName);
- int id = injectedScriptNative->bind(info[0], groupName);
+ int id = injectedScript->bindObject(info[0], groupName);
info.GetReturnValue().Set(id);
}
diff --git a/deps/v8/src/inspector/v8-inspector-impl.cc b/deps/v8/src/inspector/v8-inspector-impl.cc
index 56ed0babf0..6b8e7324f5 100644
--- a/deps/v8/src/inspector/v8-inspector-impl.cc
+++ b/deps/v8/src/inspector/v8-inspector-impl.cc
@@ -30,6 +30,8 @@
#include "src/inspector/v8-inspector-impl.h"
+#include <vector>
+
#include "src/inspector/inspected-context.h"
#include "src/inspector/string-util.h"
#include "src/inspector/v8-console-agent-impl.h"
@@ -74,27 +76,6 @@ int V8InspectorImpl::contextGroupId(int contextId) {
return it != m_contextIdToGroupIdMap.end() ? it->second : 0;
}
-V8DebuggerAgentImpl* V8InspectorImpl::enabledDebuggerAgentForGroup(
- int contextGroupId) {
- V8InspectorSessionImpl* session = sessionForContextGroup(contextGroupId);
- V8DebuggerAgentImpl* agent = session ? session->debuggerAgent() : nullptr;
- return agent && agent->enabled() ? agent : nullptr;
-}
-
-V8RuntimeAgentImpl* V8InspectorImpl::enabledRuntimeAgentForGroup(
- int contextGroupId) {
- V8InspectorSessionImpl* session = sessionForContextGroup(contextGroupId);
- V8RuntimeAgentImpl* agent = session ? session->runtimeAgent() : nullptr;
- return agent && agent->enabled() ? agent : nullptr;
-}
-
-V8ProfilerAgentImpl* V8InspectorImpl::enabledProfilerAgentForGroup(
- int contextGroupId) {
- V8InspectorSessionImpl* session = sessionForContextGroup(contextGroupId);
- V8ProfilerAgentImpl* agent = session ? session->profilerAgent() : nullptr;
- return agent && agent->enabled() ? agent : nullptr;
-}
-
v8::MaybeLocal<v8::Value> V8InspectorImpl::compileAndRunInternalScript(
v8::Local<v8::Context> context, v8::Local<v8::String> source) {
v8::Local<v8::UnboundScript> unboundScript;
@@ -170,16 +151,18 @@ std::unique_ptr<V8StackTrace> V8InspectorImpl::createStackTrace(
std::unique_ptr<V8InspectorSession> V8InspectorImpl::connect(
int contextGroupId, V8Inspector::Channel* channel,
const StringView& state) {
- DCHECK(m_sessions.find(contextGroupId) == m_sessions.cend());
+ int sessionId = ++m_lastSessionId;
std::unique_ptr<V8InspectorSessionImpl> session =
- V8InspectorSessionImpl::create(this, contextGroupId, channel, state);
- m_sessions[contextGroupId] = session.get();
+ V8InspectorSessionImpl::create(this, contextGroupId, sessionId, channel,
+ state);
+ m_sessions[contextGroupId][sessionId] = session.get();
return std::move(session);
}
void V8InspectorImpl::disconnect(V8InspectorSessionImpl* session) {
- DCHECK(m_sessions.find(session->contextGroupId()) != m_sessions.end());
- m_sessions.erase(session->contextGroupId());
+ auto& map = m_sessions[session->contextGroupId()];
+ map.erase(session->sessionId());
+ if (map.empty()) m_sessions.erase(session->contextGroupId());
}
InspectedContext* V8InspectorImpl::getContext(int groupId,
@@ -211,18 +194,15 @@ void V8InspectorImpl::contextCreated(const V8ContextInfo& info) {
DCHECK(contextById->find(contextId) == contextById->cend());
(*contextById)[contextId].reset(context);
- SessionMap::iterator sessionIt = m_sessions.find(info.contextGroupId);
- if (sessionIt != m_sessions.end())
- sessionIt->second->runtimeAgent()->reportExecutionContextCreated(context);
+ forEachSession(
+ info.contextGroupId, [&context](V8InspectorSessionImpl* session) {
+ session->runtimeAgent()->reportExecutionContextCreated(context);
+ });
}
void V8InspectorImpl::contextDestroyed(v8::Local<v8::Context> context) {
int contextId = InspectedContext::contextId(context);
int groupId = contextGroupId(context);
- contextCollected(groupId, contextId);
-}
-
-void V8InspectorImpl::contextCollected(int groupId, int contextId) {
m_contextIdToGroupIdMap.erase(contextId);
ConsoleStorageMap::iterator storageIt = m_consoleStorageMap.find(groupId);
@@ -232,31 +212,34 @@ void V8InspectorImpl::contextCollected(int groupId, int contextId) {
InspectedContext* inspectedContext = getContext(groupId, contextId);
if (!inspectedContext) return;
- SessionMap::iterator iter = m_sessions.find(groupId);
- if (iter != m_sessions.end())
- iter->second->runtimeAgent()->reportExecutionContextDestroyed(
- inspectedContext);
+ forEachSession(groupId, [&inspectedContext](V8InspectorSessionImpl* session) {
+ session->runtimeAgent()->reportExecutionContextDestroyed(inspectedContext);
+ });
discardInspectedContext(groupId, contextId);
}
void V8InspectorImpl::resetContextGroup(int contextGroupId) {
m_consoleStorageMap.erase(contextGroupId);
m_muteExceptionsMap.erase(contextGroupId);
- SessionMap::iterator session = m_sessions.find(contextGroupId);
- if (session != m_sessions.end()) session->second->reset();
+ forEachSession(contextGroupId,
+ [](V8InspectorSessionImpl* session) { session->reset(); });
m_contexts.erase(contextGroupId);
m_debugger->wasmTranslation()->Clear();
}
void V8InspectorImpl::idleStarted() {
- for (auto it = m_sessions.begin(); it != m_sessions.end(); ++it) {
- if (it->second->profilerAgent()->idleStarted()) return;
+ for (auto& it : m_sessions) {
+ for (auto& it2 : it.second) {
+ if (it2.second->profilerAgent()->idleStarted()) return;
+ }
}
}
void V8InspectorImpl::idleFinished() {
- for (auto it = m_sessions.begin(); it != m_sessions.end(); ++it) {
- if (it->second->profilerAgent()->idleFinished()) return;
+ for (auto& it : m_sessions) {
+ for (auto& it2 : it.second) {
+ if (it2.second->profilerAgent()->idleFinished()) return;
+ }
}
}
@@ -331,17 +314,12 @@ void V8InspectorImpl::discardInspectedContext(int contextGroupId,
if (m_contexts[contextGroupId]->empty()) m_contexts.erase(contextGroupId);
}
-const V8InspectorImpl::ContextByIdMap* V8InspectorImpl::contextGroup(
- int contextGroupId) {
- ContextsByGroupMap::iterator iter = m_contexts.find(contextGroupId);
- return iter == m_contexts.end() ? nullptr : iter->second.get();
-}
-
-V8InspectorSessionImpl* V8InspectorImpl::sessionForContextGroup(
- int contextGroupId) {
- if (!contextGroupId) return nullptr;
- SessionMap::iterator iter = m_sessions.find(contextGroupId);
- return iter == m_sessions.end() ? nullptr : iter->second;
+V8InspectorSessionImpl* V8InspectorImpl::sessionById(int contextGroupId,
+ int sessionId) {
+ auto it = m_sessions.find(contextGroupId);
+ if (it == m_sessions.end()) return nullptr;
+ auto it2 = it->second.find(sessionId);
+ return it2 == it->second.end() ? nullptr : it2->second;
}
V8Console* V8InspectorImpl::console() {
@@ -349,4 +327,38 @@ V8Console* V8InspectorImpl::console() {
return m_console.get();
}
+void V8InspectorImpl::forEachContext(
+ int contextGroupId, std::function<void(InspectedContext*)> callback) {
+ auto it = m_contexts.find(contextGroupId);
+ if (it == m_contexts.end()) return;
+ std::vector<int> ids;
+ ids.reserve(it->second->size());
+ for (auto& contextIt : *(it->second)) ids.push_back(contextIt.first);
+
+ // Retrieve by ids each time since |callback| may destroy some contexts.
+ for (auto& contextId : ids) {
+ it = m_contexts.find(contextGroupId);
+ if (it == m_contexts.end()) continue;
+ auto contextIt = it->second->find(contextId);
+ if (contextIt != it->second->end()) callback(contextIt->second.get());
+ }
+}
+
+void V8InspectorImpl::forEachSession(
+ int contextGroupId, std::function<void(V8InspectorSessionImpl*)> callback) {
+ auto it = m_sessions.find(contextGroupId);
+ if (it == m_sessions.end()) return;
+ std::vector<int> ids;
+ ids.reserve(it->second.size());
+ for (auto& sessionIt : it->second) ids.push_back(sessionIt.first);
+
+ // Retrieve by ids each time since |callback| may destroy some contexts.
+ for (auto& sessionId : ids) {
+ it = m_sessions.find(contextGroupId);
+ if (it == m_sessions.end()) continue;
+ auto sessionIt = it->second.find(sessionId);
+ if (sessionIt != it->second.end()) callback(sessionIt->second);
+ }
+}
+
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-inspector-impl.h b/deps/v8/src/inspector/v8-inspector-impl.h
index 804804e0ab..3effb39f7c 100644
--- a/deps/v8/src/inspector/v8-inspector-impl.h
+++ b/deps/v8/src/inspector/v8-inspector-impl.h
@@ -31,7 +31,7 @@
#ifndef V8_INSPECTOR_V8INSPECTORIMPL_H_
#define V8_INSPECTOR_V8INSPECTORIMPL_H_
-#include <vector>
+#include <functional>
#include "src/base/macros.h"
#include "src/inspector/protocol/Protocol.h"
@@ -74,7 +74,6 @@ class V8InspectorImpl : public V8Inspector {
const StringView& state) override;
void contextCreated(const V8ContextInfo&) override;
void contextDestroyed(v8::Local<v8::Context>) override;
- void contextCollected(int contextGroupId, int contextId);
void resetContextGroup(int contextGroupId) override;
void idleStarted() override;
void idleFinished() override;
@@ -103,17 +102,15 @@ class V8InspectorImpl : public V8Inspector {
void unmuteExceptions(int contextGroupId);
V8ConsoleMessageStorage* ensureConsoleMessageStorage(int contextGroupId);
bool hasConsoleMessageStorage(int contextGroupId);
- using ContextByIdMap =
- protocol::HashMap<int, std::unique_ptr<InspectedContext>>;
void discardInspectedContext(int contextGroupId, int contextId);
- const ContextByIdMap* contextGroup(int contextGroupId);
void disconnect(V8InspectorSessionImpl*);
- V8InspectorSessionImpl* sessionForContextGroup(int contextGroupId);
+ V8InspectorSessionImpl* sessionById(int contextGroupId, int sessionId);
InspectedContext* getContext(int groupId, int contextId) const;
- V8DebuggerAgentImpl* enabledDebuggerAgentForGroup(int contextGroupId);
- V8RuntimeAgentImpl* enabledRuntimeAgentForGroup(int contextGroupId);
- V8ProfilerAgentImpl* enabledProfilerAgentForGroup(int contextGroupId);
V8Console* console();
+ void forEachContext(int contextGroupId,
+ std::function<void(InspectedContext*)> callback);
+ void forEachSession(int contextGroupId,
+ std::function<void(V8InspectorSessionImpl*)> callback);
private:
v8::Isolate* m_isolate;
@@ -123,16 +120,20 @@ class V8InspectorImpl : public V8Inspector {
int m_capturingStackTracesCount;
unsigned m_lastExceptionId;
int m_lastContextId;
+ int m_lastSessionId = 0;
using MuteExceptionsMap = protocol::HashMap<int, int>;
MuteExceptionsMap m_muteExceptionsMap;
+ using ContextByIdMap =
+ protocol::HashMap<int, std::unique_ptr<InspectedContext>>;
using ContextsByGroupMap =
protocol::HashMap<int, std::unique_ptr<ContextByIdMap>>;
ContextsByGroupMap m_contexts;
- using SessionMap = protocol::HashMap<int, V8InspectorSessionImpl*>;
- SessionMap m_sessions;
+ // contextGroupId -> sessionId -> session
+ protocol::HashMap<int, protocol::HashMap<int, V8InspectorSessionImpl*>>
+ m_sessions;
using ConsoleStorageMap =
protocol::HashMap<int, std::unique_ptr<V8ConsoleMessageStorage>>;
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.cc b/deps/v8/src/inspector/v8-inspector-session-impl.cc
index 2674fc2f63..f0bbc3b3c7 100644
--- a/deps/v8/src/inspector/v8-inspector-session-impl.cc
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.cc
@@ -43,17 +43,19 @@ int V8ContextInfo::executionContextId(v8::Local<v8::Context> context) {
}
std::unique_ptr<V8InspectorSessionImpl> V8InspectorSessionImpl::create(
- V8InspectorImpl* inspector, int contextGroupId,
+ V8InspectorImpl* inspector, int contextGroupId, int sessionId,
V8Inspector::Channel* channel, const StringView& state) {
- return std::unique_ptr<V8InspectorSessionImpl>(
- new V8InspectorSessionImpl(inspector, contextGroupId, channel, state));
+ return std::unique_ptr<V8InspectorSessionImpl>(new V8InspectorSessionImpl(
+ inspector, contextGroupId, sessionId, channel, state));
}
V8InspectorSessionImpl::V8InspectorSessionImpl(V8InspectorImpl* inspector,
int contextGroupId,
+ int sessionId,
V8Inspector::Channel* channel,
const StringView& savedState)
: m_contextGroupId(contextGroupId),
+ m_sessionId(sessionId),
m_inspector(inspector),
m_channel(channel),
m_customObjectFormatterEnabled(false),
@@ -181,46 +183,27 @@ void V8InspectorSessionImpl::reset() {
void V8InspectorSessionImpl::discardInjectedScripts() {
m_inspectedObjects.clear();
- const V8InspectorImpl::ContextByIdMap* contexts =
- m_inspector->contextGroup(m_contextGroupId);
- if (!contexts) return;
-
- std::vector<int> keys;
- keys.reserve(contexts->size());
- for (auto& idContext : *contexts) keys.push_back(idContext.first);
- for (auto& key : keys) {
- contexts = m_inspector->contextGroup(m_contextGroupId);
- if (!contexts) continue;
- auto contextIt = contexts->find(key);
- if (contextIt != contexts->end())
- contextIt->second
- ->discardInjectedScript(); // This may destroy some contexts.
- }
+ int sessionId = m_sessionId;
+ m_inspector->forEachContext(m_contextGroupId,
+ [&sessionId](InspectedContext* context) {
+ context->discardInjectedScript(sessionId);
+ });
}
Response V8InspectorSessionImpl::findInjectedScript(
int contextId, InjectedScript*& injectedScript) {
injectedScript = nullptr;
- if (!contextId)
- return Response::Error("Cannot find context with specified id");
-
- const V8InspectorImpl::ContextByIdMap* contexts =
- m_inspector->contextGroup(m_contextGroupId);
- if (!contexts)
- return Response::Error("Cannot find context with specified id");
-
- auto contextsIt = contexts->find(contextId);
- if (contextsIt == contexts->end())
- return Response::Error("Cannot find context with specified id");
-
- const std::unique_ptr<InspectedContext>& context = contextsIt->second;
- if (!context->getInjectedScript()) {
- if (!context->createInjectedScript())
+ InspectedContext* context =
+ m_inspector->getContext(m_contextGroupId, contextId);
+ if (!context) return Response::Error("Cannot find context with specified id");
+ injectedScript = context->getInjectedScript(m_sessionId);
+ if (!injectedScript) {
+ if (!context->createInjectedScript(m_sessionId))
return Response::Error("Cannot access specified execution context");
+ injectedScript = context->getInjectedScript(m_sessionId);
if (m_customObjectFormatterEnabled)
- context->getInjectedScript()->setCustomObjectFormatterEnabled(true);
+ injectedScript->setCustomObjectFormatterEnabled(true);
}
- injectedScript = context->getInjectedScript();
return Response::OK();
}
@@ -234,22 +217,12 @@ void V8InspectorSessionImpl::releaseObjectGroup(const StringView& objectGroup) {
}
void V8InspectorSessionImpl::releaseObjectGroup(const String16& objectGroup) {
- const V8InspectorImpl::ContextByIdMap* contexts =
- m_inspector->contextGroup(m_contextGroupId);
- if (!contexts) return;
-
- std::vector<int> keys;
- for (auto& idContext : *contexts) keys.push_back(idContext.first);
- for (auto& key : keys) {
- contexts = m_inspector->contextGroup(m_contextGroupId);
- if (!contexts) continue;
- auto contextsIt = contexts->find(key);
- if (contextsIt == contexts->end()) continue;
- InjectedScript* injectedScript = contextsIt->second->getInjectedScript();
- if (injectedScript)
- injectedScript->releaseObjectGroup(
- objectGroup); // This may destroy some contexts.
- }
+ int sessionId = m_sessionId;
+ m_inspector->forEachContext(
+ m_contextGroupId, [&objectGroup, &sessionId](InspectedContext* context) {
+ InjectedScript* injectedScript = context->getInjectedScript(sessionId);
+ if (injectedScript) injectedScript->releaseObjectGroup(objectGroup);
+ });
}
bool V8InspectorSessionImpl::unwrapObject(
@@ -319,22 +292,20 @@ V8InspectorSessionImpl::wrapTable(v8::Local<v8::Context> context,
void V8InspectorSessionImpl::setCustomObjectFormatterEnabled(bool enabled) {
m_customObjectFormatterEnabled = enabled;
- const V8InspectorImpl::ContextByIdMap* contexts =
- m_inspector->contextGroup(m_contextGroupId);
- if (!contexts) return;
- for (auto& idContext : *contexts) {
- InjectedScript* injectedScript = idContext.second->getInjectedScript();
- if (injectedScript)
- injectedScript->setCustomObjectFormatterEnabled(enabled);
- }
+ int sessionId = m_sessionId;
+ m_inspector->forEachContext(
+ m_contextGroupId, [&enabled, &sessionId](InspectedContext* context) {
+ InjectedScript* injectedScript = context->getInjectedScript(sessionId);
+ if (injectedScript)
+ injectedScript->setCustomObjectFormatterEnabled(enabled);
+ });
}
void V8InspectorSessionImpl::reportAllContexts(V8RuntimeAgentImpl* agent) {
- const V8InspectorImpl::ContextByIdMap* contexts =
- m_inspector->contextGroup(m_contextGroupId);
- if (!contexts) return;
- for (auto& idContext : *contexts)
- agent->reportExecutionContextCreated(idContext.second.get());
+ m_inspector->forEachContext(m_contextGroupId,
+ [&agent](InspectedContext* context) {
+ agent->reportExecutionContextCreated(context);
+ });
}
void V8InspectorSessionImpl::dispatchProtocolMessage(
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.h b/deps/v8/src/inspector/v8-inspector-session-impl.h
index 7a59e1cead..adac6f1a85 100644
--- a/deps/v8/src/inspector/v8-inspector-session-impl.h
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.h
@@ -32,8 +32,8 @@ class V8InspectorSessionImpl : public V8InspectorSession,
public protocol::FrontendChannel {
public:
static std::unique_ptr<V8InspectorSessionImpl> create(
- V8InspectorImpl*, int contextGroupId, V8Inspector::Channel*,
- const StringView& state);
+ V8InspectorImpl*, int contextGroupId, int sessionId,
+ V8Inspector::Channel*, const StringView& state);
~V8InspectorSessionImpl();
V8InspectorImpl* inspector() const { return m_inspector; }
@@ -43,6 +43,7 @@ class V8InspectorSessionImpl : public V8InspectorSession,
V8ProfilerAgentImpl* profilerAgent() { return m_profilerAgent.get(); }
V8RuntimeAgentImpl* runtimeAgent() { return m_runtimeAgent.get(); }
int contextGroupId() const { return m_contextGroupId; }
+ int sessionId() const { return m_sessionId; }
Response findInjectedScript(int contextId, InjectedScript*&);
Response findInjectedScript(RemoteObjectIdBase*, InjectedScript*&);
@@ -91,7 +92,7 @@ class V8InspectorSessionImpl : public V8InspectorSession,
static const unsigned kInspectedObjectBufferSize = 5;
private:
- V8InspectorSessionImpl(V8InspectorImpl*, int contextGroupId,
+ V8InspectorSessionImpl(V8InspectorImpl*, int contextGroupId, int sessionId,
V8Inspector::Channel*, const StringView& state);
protocol::DictionaryValue* agentState(const String16& name);
@@ -103,6 +104,7 @@ class V8InspectorSessionImpl : public V8InspectorSession,
void flushProtocolNotifications() override;
int m_contextGroupId;
+ int m_sessionId;
V8InspectorImpl* m_inspector;
V8Inspector::Channel* m_channel;
bool m_customObjectFormatterEnabled;
diff --git a/deps/v8/src/inspector/v8-internal-value-type.cc b/deps/v8/src/inspector/v8-internal-value-type.cc
index 46f5dac1ac..54e839e64e 100644
--- a/deps/v8/src/inspector/v8-internal-value-type.cc
+++ b/deps/v8/src/inspector/v8-internal-value-type.cc
@@ -29,7 +29,6 @@ v8::Local<v8::String> subtypeForInternalType(v8::Isolate* isolate,
return toV8StringInternalized(isolate, "internal#scopeList");
}
UNREACHABLE();
- return v8::Local<v8::String>();
}
} // namespace
diff --git a/deps/v8/src/inspector/v8-profiler-agent-impl.cc b/deps/v8/src/inspector/v8-profiler-agent-impl.cc
index 8aa55a3414..026b6c5925 100644
--- a/deps/v8/src/inspector/v8-profiler-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-profiler-agent-impl.cc
@@ -7,6 +7,7 @@
#include <vector>
#include "src/base/atomicops.h"
+#include "src/flags.h" // TODO(jgruber): Remove include and DEPS entry.
#include "src/inspector/protocol/Protocol.h"
#include "src/inspector/string-util.h"
#include "src/inspector/v8-debugger.h"
@@ -279,9 +280,19 @@ Response V8ProfilerAgentImpl::startPreciseCoverage(Maybe<bool> callCount) {
m_state->setBoolean(ProfilerAgentState::preciseCoverageStarted, true);
m_state->setBoolean(ProfilerAgentState::preciseCoverageCallCount,
callCountValue);
- v8::debug::Coverage::SelectMode(
- m_isolate, callCountValue ? v8::debug::Coverage::kPreciseCount
- : v8::debug::Coverage::kPreciseBinary);
+ // BlockCount is a superset of PreciseCount. It includes block-granularity
+ // coverage data if it exists (at the time of writing, that's the case for
+ // each function recompiled after the BlockCount mode has been set); and
+ // function-granularity coverage data otherwise.
+ // TODO(jgruber): Implement block binary coverage.
+ v8::debug::Coverage::Mode count_mode =
+ v8::internal::FLAG_block_coverage ? v8::debug::Coverage::kBlockCount
+ : v8::debug::Coverage::kPreciseCount;
+ v8::debug::Coverage::Mode binary_mode =
+ v8::internal::FLAG_block_coverage ? v8::debug::Coverage::kBlockBinary
+ : v8::debug::Coverage::kPreciseBinary;
+ v8::debug::Coverage::SelectMode(m_isolate,
+ callCountValue ? count_mode : binary_mode);
return Response::OK();
}
@@ -294,6 +305,15 @@ Response V8ProfilerAgentImpl::stopPreciseCoverage() {
}
namespace {
+std::unique_ptr<protocol::Profiler::CoverageRange> createCoverageRange(
+ int start, int end, int count) {
+ return protocol::Profiler::CoverageRange::create()
+ .setStartOffset(start)
+ .setEndOffset(end)
+ .setCount(count)
+ .build();
+}
+
Response coverageToProtocol(
v8::Isolate* isolate, const v8::debug::Coverage& coverage,
std::unique_ptr<protocol::Array<protocol::Profiler::ScriptCoverage>>*
@@ -311,18 +331,27 @@ Response coverageToProtocol(
script_data.GetFunctionData(j);
std::unique_ptr<protocol::Array<protocol::Profiler::CoverageRange>>
ranges = protocol::Array<protocol::Profiler::CoverageRange>::create();
- // At this point we only have per-function coverage data, so there is
- // only one range per function.
- ranges->addItem(protocol::Profiler::CoverageRange::create()
- .setStartOffset(function_data.StartOffset())
- .setEndOffset(function_data.EndOffset())
- .setCount(function_data.Count())
- .build());
+
+ // Add function range.
+ ranges->addItem(createCoverageRange(function_data.StartOffset(),
+ function_data.EndOffset(),
+ function_data.Count()));
+
+ // Process inner blocks.
+ for (size_t k = 0; k < function_data.BlockCount(); k++) {
+ v8::debug::Coverage::BlockData block_data =
+ function_data.GetBlockData(k);
+ ranges->addItem(createCoverageRange(block_data.StartOffset(),
+ block_data.EndOffset(),
+ block_data.Count()));
+ }
+
functions->addItem(
protocol::Profiler::FunctionCoverage::create()
.setFunctionName(toProtocolString(
function_data.Name().FromMaybe(v8::Local<v8::String>())))
.setRanges(std::move(ranges))
+ .setIsBlockCoverage(function_data.HasBlockCoverage())
.build());
}
String16 url;
@@ -364,7 +393,7 @@ Response V8ProfilerAgentImpl::getBestEffortCoverage(
String16 V8ProfilerAgentImpl::nextProfileId() {
return String16::fromInteger(
- v8::base::NoBarrier_AtomicIncrement(&s_lastProfileId, 1));
+ v8::base::Relaxed_AtomicIncrement(&s_lastProfileId, 1));
}
void V8ProfilerAgentImpl::startProfiling(const String16& title) {
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.cc b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
index 9b4944aa57..c0e5ac14d8 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
@@ -61,12 +61,12 @@ namespace {
template <typename Callback>
class ProtocolPromiseHandler {
public:
- static void add(V8InspectorImpl* inspector, v8::Local<v8::Context> context,
+ static void add(V8InspectorSessionImpl* session,
+ v8::Local<v8::Context> context,
v8::MaybeLocal<v8::Value> value,
- const String16& notPromiseError, int contextGroupId,
- int executionContextId, const String16& objectGroup,
- bool returnByValue, bool generatePreview,
- std::unique_ptr<Callback> callback) {
+ const String16& notPromiseError, int executionContextId,
+ const String16& objectGroup, bool returnByValue,
+ bool generatePreview, std::unique_ptr<Callback> callback) {
if (value.IsEmpty()) {
callback->sendFailure(Response::InternalError());
return;
@@ -75,14 +75,15 @@ class ProtocolPromiseHandler {
callback->sendFailure(Response::Error(notPromiseError));
return;
}
+ V8InspectorImpl* inspector = session->inspector();
v8::MicrotasksScope microtasks_scope(inspector->isolate(),
v8::MicrotasksScope::kRunMicrotasks);
v8::Local<v8::Promise> promise =
v8::Local<v8::Promise>::Cast(value.ToLocalChecked());
Callback* rawCallback = callback.get();
ProtocolPromiseHandler<Callback>* handler = new ProtocolPromiseHandler(
- inspector, contextGroupId, executionContextId, objectGroup,
- returnByValue, generatePreview, std::move(callback));
+ session, executionContextId, objectGroup, returnByValue,
+ generatePreview, std::move(callback));
v8::Local<v8::Value> wrapper = handler->m_wrapper.Get(inspector->isolate());
v8::Local<v8::Function> thenCallbackFunction =
@@ -134,12 +135,27 @@ class ProtocolPromiseHandler {
handler->wrapObject(value));
if (!wrappedValue) return;
- std::unique_ptr<V8StackTraceImpl> stack =
- handler->m_inspector->debugger()->captureStackTrace(true);
+ String16 message;
+ std::unique_ptr<V8StackTraceImpl> stack;
+ if (value->IsNativeError()) {
+ message =
+ " " +
+ toProtocolString(
+ value->ToDetailString(info.GetIsolate()->GetCurrentContext())
+ .ToLocalChecked());
+ v8::Local<v8::StackTrace> stackTrace = v8::debug::GetDetailedStackTrace(
+ info.GetIsolate(), v8::Local<v8::Object>::Cast(value));
+ if (!stackTrace.IsEmpty()) {
+ stack = handler->m_inspector->debugger()->createStackTrace(stackTrace);
+ }
+ }
+ if (!stack) {
+ stack = handler->m_inspector->debugger()->captureStackTrace(true);
+ }
std::unique_ptr<protocol::Runtime::ExceptionDetails> exceptionDetails =
protocol::Runtime::ExceptionDetails::create()
.setExceptionId(handler->m_inspector->nextExceptionId())
- .setText("Uncaught (in promise)")
+ .setText("Uncaught (in promise)" + message)
.setLineNumber(stack && !stack->isEmpty() ? stack->topLineNumber()
: 0)
.setColumnNumber(
@@ -154,19 +170,20 @@ class ProtocolPromiseHandler {
std::move(exceptionDetails));
}
- ProtocolPromiseHandler(V8InspectorImpl* inspector, int contextGroupId,
+ ProtocolPromiseHandler(V8InspectorSessionImpl* session,
int executionContextId, const String16& objectGroup,
bool returnByValue, bool generatePreview,
std::unique_ptr<Callback> callback)
- : m_inspector(inspector),
- m_contextGroupId(contextGroupId),
+ : m_inspector(session->inspector()),
+ m_sessionId(session->sessionId()),
+ m_contextGroupId(session->contextGroupId()),
m_executionContextId(executionContextId),
m_objectGroup(objectGroup),
m_returnByValue(returnByValue),
m_generatePreview(generatePreview),
m_callback(std::move(callback)),
- m_wrapper(inspector->isolate(),
- v8::External::New(inspector->isolate(), this)) {
+ m_wrapper(m_inspector->isolate(),
+ v8::External::New(m_inspector->isolate(), this)) {
m_wrapper.SetWeak(this, cleanup, v8::WeakCallbackType::kParameter);
}
@@ -184,8 +201,13 @@ class ProtocolPromiseHandler {
std::unique_ptr<protocol::Runtime::RemoteObject> wrapObject(
v8::Local<v8::Value> value) {
- InjectedScript::ContextScope scope(m_inspector, m_contextGroupId,
- m_executionContextId);
+ V8InspectorSessionImpl* session =
+ m_inspector->sessionById(m_contextGroupId, m_sessionId);
+ if (!session) {
+ m_callback->sendFailure(Response::Error("No session"));
+ return nullptr;
+ }
+ InjectedScript::ContextScope scope(session, m_executionContextId);
Response response = scope.initialize();
if (!response.isSuccess()) {
m_callback->sendFailure(response);
@@ -203,6 +225,7 @@ class ProtocolPromiseHandler {
}
V8InspectorImpl* m_inspector;
+ int m_sessionId;
int m_contextGroupId;
int m_executionContextId;
String16 m_objectGroup;
@@ -276,8 +299,7 @@ void V8RuntimeAgentImpl::evaluate(
return;
}
- InjectedScript::ContextScope scope(m_inspector, m_session->contextGroupId(),
- contextId);
+ InjectedScript::ContextScope scope(m_session, contextId);
response = scope.initialize();
if (!response.isSuccess()) {
callback->sendFailure(response);
@@ -320,8 +342,8 @@ void V8RuntimeAgentImpl::evaluate(
return;
}
ProtocolPromiseHandler<EvaluateCallback>::add(
- m_inspector, scope.context(), maybeResultValue,
- "Result of the evaluation is not a promise", m_session->contextGroupId(),
+ m_session, scope.context(), maybeResultValue,
+ "Result of the evaluation is not a promise",
scope.injectedScript()->context()->contextId(), objectGroup.fromMaybe(""),
returnByValue.fromMaybe(false), generatePreview.fromMaybe(false),
std::move(callback));
@@ -331,16 +353,15 @@ void V8RuntimeAgentImpl::awaitPromise(
const String16& promiseObjectId, Maybe<bool> returnByValue,
Maybe<bool> generatePreview,
std::unique_ptr<AwaitPromiseCallback> callback) {
- InjectedScript::ObjectScope scope(m_inspector, m_session->contextGroupId(),
- promiseObjectId);
+ InjectedScript::ObjectScope scope(m_session, promiseObjectId);
Response response = scope.initialize();
if (!response.isSuccess()) {
callback->sendFailure(response);
return;
}
ProtocolPromiseHandler<AwaitPromiseCallback>::add(
- m_inspector, scope.context(), scope.object(),
- "Could not find promise with given id", m_session->contextGroupId(),
+ m_session, scope.context(), scope.object(),
+ "Could not find promise with given id",
scope.injectedScript()->context()->contextId(), scope.objectGroupName(),
returnByValue.fromMaybe(false), generatePreview.fromMaybe(false),
std::move(callback));
@@ -352,8 +373,7 @@ void V8RuntimeAgentImpl::callFunctionOn(
Maybe<bool> silent, Maybe<bool> returnByValue, Maybe<bool> generatePreview,
Maybe<bool> userGesture, Maybe<bool> awaitPromise,
std::unique_ptr<CallFunctionOnCallback> callback) {
- InjectedScript::ObjectScope scope(m_inspector, m_session->contextGroupId(),
- objectId);
+ InjectedScript::ObjectScope scope(m_session, objectId);
Response response = scope.initialize();
if (!response.isSuccess()) {
callback->sendFailure(response);
@@ -438,9 +458,8 @@ void V8RuntimeAgentImpl::callFunctionOn(
}
ProtocolPromiseHandler<CallFunctionOnCallback>::add(
- m_inspector, scope.context(), maybeResultValue,
+ m_session, scope.context(), maybeResultValue,
"Result of the function call is not a promise",
- m_session->contextGroupId(),
scope.injectedScript()->context()->contextId(), scope.objectGroupName(),
returnByValue.fromMaybe(false), generatePreview.fromMaybe(false),
std::move(callback));
@@ -456,12 +475,13 @@ Response V8RuntimeAgentImpl::getProperties(
Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails) {
using protocol::Runtime::InternalPropertyDescriptor;
- InjectedScript::ObjectScope scope(m_inspector, m_session->contextGroupId(),
- objectId);
+ InjectedScript::ObjectScope scope(m_session, objectId);
Response response = scope.initialize();
if (!response.isSuccess()) return response;
scope.ignoreExceptionsAndMuteConsole();
+ v8::MicrotasksScope microtasks_scope(m_inspector->isolate(),
+ v8::MicrotasksScope::kRunMicrotasks);
if (!scope.object()->IsObject())
return Response::Error("Value with given id is not an object");
@@ -507,8 +527,7 @@ Response V8RuntimeAgentImpl::getProperties(
}
Response V8RuntimeAgentImpl::releaseObject(const String16& objectId) {
- InjectedScript::ObjectScope scope(m_inspector, m_session->contextGroupId(),
- objectId);
+ InjectedScript::ObjectScope scope(m_session, objectId);
Response response = scope.initialize();
if (!response.isSuccess()) return response;
scope.injectedScript()->releaseObject(objectId);
@@ -550,8 +569,7 @@ Response V8RuntimeAgentImpl::compileScript(
Response response = ensureContext(m_inspector, m_session->contextGroupId(),
std::move(executionContextId), &contextId);
if (!response.isSuccess()) return response;
- InjectedScript::ContextScope scope(m_inspector, m_session->contextGroupId(),
- contextId);
+ InjectedScript::ContextScope scope(m_session, contextId);
response = scope.initialize();
if (!response.isSuccess()) return response;
@@ -607,8 +625,7 @@ void V8RuntimeAgentImpl::runScript(
return;
}
- InjectedScript::ContextScope scope(m_inspector, m_session->contextGroupId(),
- contextId);
+ InjectedScript::ContextScope scope(m_session, contextId);
response = scope.initialize();
if (!response.isSuccess()) {
callback->sendFailure(response);
@@ -650,9 +667,8 @@ void V8RuntimeAgentImpl::runScript(
return;
}
ProtocolPromiseHandler<RunScriptCallback>::add(
- m_inspector, scope.context(), maybeResultValue.ToLocalChecked(),
+ m_session, scope.context(), maybeResultValue.ToLocalChecked(),
"Result of the script execution is not a promise",
- m_session->contextGroupId(),
scope.injectedScript()->context()->contextId(), objectGroup.fromMaybe(""),
returnByValue.fromMaybe(false), generatePreview.fromMaybe(false),
std::move(callback));
@@ -700,10 +716,11 @@ Response V8RuntimeAgentImpl::disable() {
void V8RuntimeAgentImpl::reset() {
m_compiledScripts.clear();
if (m_enabled) {
- if (const V8InspectorImpl::ContextByIdMap* contexts =
- m_inspector->contextGroup(m_session->contextGroupId())) {
- for (auto& idContext : *contexts) idContext.second->setReported(false);
- }
+ int sessionId = m_session->sessionId();
+ m_inspector->forEachContext(m_session->contextGroupId(),
+ [&sessionId](InspectedContext* context) {
+ context->setReported(sessionId, false);
+ });
m_frontend.executionContextsCleared();
}
}
@@ -711,7 +728,7 @@ void V8RuntimeAgentImpl::reset() {
void V8RuntimeAgentImpl::reportExecutionContextCreated(
InspectedContext* context) {
if (!m_enabled) return;
- context->setReported(true);
+ context->setReported(m_session->sessionId(), true);
std::unique_ptr<protocol::Runtime::ExecutionContextDescription> description =
protocol::Runtime::ExecutionContextDescription::create()
.setId(context->contextId())
@@ -726,8 +743,8 @@ void V8RuntimeAgentImpl::reportExecutionContextCreated(
void V8RuntimeAgentImpl::reportExecutionContextDestroyed(
InspectedContext* context) {
- if (m_enabled && context->isReported()) {
- context->setReported(false);
+ if (m_enabled && context->isReported(m_session->sessionId())) {
+ context->setReported(m_session->sessionId(), false);
m_frontend.executionContextDestroyed(context->contextId());
}
}
diff --git a/deps/v8/src/inspector/v8-value-copier.cc b/deps/v8/src/inspector/v8-value-copier.cc
index fcaeb618ca..49756c63d1 100644
--- a/deps/v8/src/inspector/v8-value-copier.cc
+++ b/deps/v8/src/inspector/v8-value-copier.cc
@@ -79,7 +79,6 @@ protocol::Response toProtocolValue(v8::Local<v8::Context> context,
using protocol::Response;
if (value.IsEmpty()) {
UNREACHABLE();
- return Response::InternalError();
}
if (!maxDepth) return Response::Error("Object reference chain is too long");
diff --git a/deps/v8/src/interface-descriptors.cc b/deps/v8/src/interface-descriptors.cc
index f44ee619d9..25f0332c13 100644
--- a/deps/v8/src/interface-descriptors.cc
+++ b/deps/v8/src/interface-descriptors.cc
@@ -258,12 +258,36 @@ void StringCompareDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void StringConcatDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {ArgumentsCountRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void StringConcatDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kArgumentsCount
+ MachineType machine_types[] = {MachineType::Int32()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
void TypeConversionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ArgumentRegister()};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void TypeConversionStackParameterDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ data->InitializePlatformSpecific(0, nullptr);
+}
+
+void TypeConversionStackParameterDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ data->InitializePlatformIndependent(data->register_param_count(), 1, NULL);
+}
+
void MathPowTaggedDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {exponent()};
@@ -345,10 +369,10 @@ void StoreWithVectorDescriptor::InitializePlatformSpecific(
void BinaryOpWithVectorDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
- // kLeft, kRight, kSlot, kVector
- MachineType machine_types[] = {MachineType::AnyTagged(),
- MachineType::AnyTagged(), MachineType::Int32(),
- MachineType::AnyTagged()};
+ // kLeft, kRight, kSlot, kVector, kFunction
+ MachineType machine_types[] = {
+ MachineType::AnyTagged(), MachineType::AnyTagged(), MachineType::Int32(),
+ MachineType::AnyTagged(), MachineType::AnyTagged()};
data->InitializePlatformIndependent(arraysize(machine_types), 0,
machine_types);
}
@@ -445,6 +469,16 @@ void CallTrampolineDescriptor::InitializePlatformIndependent(
machine_types);
}
+void CallVarargsDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kTarget, kActualArgumentsCount, kArgumentsList, kArgumentsLength
+ MachineType machine_types[] = {MachineType::AnyTagged(), MachineType::Int32(),
+ MachineType::AnyTagged(),
+ MachineType::Int32()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
void CallForwardVarargsDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kTarget, kActualArgumentsCount, kStartIndex
@@ -454,6 +488,35 @@ void CallForwardVarargsDescriptor::InitializePlatformIndependent(
machine_types);
}
+void CallWithSpreadDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kTarget, kArgumentsCount, kArgumentsList
+ MachineType machine_types[] = {MachineType::AnyTagged(), MachineType::Int32(),
+ MachineType::AnyTagged()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
+void CallWithArrayLikeDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kTarget, kArgumentsList
+ MachineType machine_types[] = {MachineType::AnyTagged(),
+ MachineType::AnyTagged()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
+void ConstructVarargsDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kTarget, kNewTarget, kActualArgumentsCount, kArgumentsList,
+ // kArgumentsLength
+ MachineType machine_types[] = {
+ MachineType::AnyTagged(), MachineType::AnyTagged(), MachineType::Int32(),
+ MachineType::AnyTagged(), MachineType::Int32()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
void ConstructForwardVarargsDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kTarget, kNewTarget, kActualArgumentsCount, kStartIndex
@@ -464,6 +527,26 @@ void ConstructForwardVarargsDescriptor::InitializePlatformIndependent(
machine_types);
}
+void ConstructWithSpreadDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kTarget, kNewTarget, kArgumentsCount, kSpread
+ MachineType machine_types[] = {MachineType::AnyTagged(),
+ MachineType::AnyTagged(), MachineType::Int32(),
+ MachineType::AnyTagged()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
+void ConstructWithArrayLikeDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kTarget, kNewTarget, kArgumentsList
+ MachineType machine_types[] = {MachineType::AnyTagged(),
+ MachineType::AnyTagged(),
+ MachineType::AnyTagged()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
void ConstructStubDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kFunction, kNewTarget, kActualArgumentsCount, kAllocationSite
diff --git a/deps/v8/src/interface-descriptors.h b/deps/v8/src/interface-descriptors.h
index 127e156a82..0cd4cbd193 100644
--- a/deps/v8/src/interface-descriptors.h
+++ b/deps/v8/src/interface-descriptors.h
@@ -35,6 +35,7 @@ class PlatformInterfaceDescriptor;
V(FastNewObject) \
V(FastNewArguments) \
V(TypeConversion) \
+ V(TypeConversionStackParameter) \
V(Typeof) \
V(FastCloneRegExp) \
V(FastCloneShallowArray) \
@@ -44,11 +45,17 @@ class PlatformInterfaceDescriptor;
V(CallFunction) \
V(CallIC) \
V(CallICTrampoline) \
+ V(CallVarargs) \
V(CallForwardVarargs) \
+ V(CallWithSpread) \
+ V(CallWithArrayLike) \
V(CallConstruct) \
V(CallTrampoline) \
V(ConstructStub) \
+ V(ConstructVarargs) \
V(ConstructForwardVarargs) \
+ V(ConstructWithSpread) \
+ V(ConstructWithArrayLike) \
V(ConstructTrampoline) \
V(TransitionElementsKind) \
V(AllocateHeapNumber) \
@@ -67,6 +74,7 @@ class PlatformInterfaceDescriptor;
V(StringCharAt) \
V(StringCharCodeAt) \
V(StringCompare) \
+ V(StringConcat) \
V(SubString) \
V(ForInPrepare) \
V(GetProperty) \
@@ -99,13 +107,12 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
PlatformInterfaceDescriptor* platform_descriptor = NULL);
// if machine_types is null, then an array of size
- // (register_parameter_count + extra_parameter_count) will be created
- // with MachineType::AnyTagged() for each member.
+ // (parameter_count + extra_parameter_count) will be created with
+ // MachineType::AnyTagged() for each member.
//
// if machine_types is not null, then it should be of the size
- // register_parameter_count. Those members of the parameter array
- // will be initialized from {machine_types}, and the rest initialized
- // to MachineType::AnyTagged().
+ // parameter_count. Those members of the parameter array will be initialized
+ // from {machine_types}, and the rest initialized to MachineType::AnyTagged().
void InitializePlatformIndependent(int parameter_count,
int extra_parameter_count,
const MachineType* machine_types);
@@ -385,7 +392,7 @@ class StoreDescriptor : public CallInterfaceDescriptor {
static const Register ValueRegister();
static const Register SlotRegister();
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
+#if V8_TARGET_ARCH_IA32
static const bool kPassLastArgsOnStack = true;
#else
static const bool kPassLastArgsOnStack = false;
@@ -507,6 +514,14 @@ class TypeConversionDescriptor final : public CallInterfaceDescriptor {
static const Register ArgumentRegister();
};
+class TypeConversionStackParameterDescriptor final
+ : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kArgument)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
+ TypeConversionStackParameterDescriptor, CallInterfaceDescriptor)
+};
+
class ForInPrepareDescriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kObject)
@@ -574,6 +589,14 @@ class CallTrampolineDescriptor : public CallInterfaceDescriptor {
CallInterfaceDescriptor)
};
+class CallVarargsDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kTarget, kActualArgumentsCount, kArgumentsList,
+ kArgumentsLength)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(CallVarargsDescriptor,
+ CallInterfaceDescriptor)
+};
+
class CallForwardVarargsDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kTarget, kActualArgumentsCount, kStartIndex)
@@ -581,6 +604,28 @@ class CallForwardVarargsDescriptor : public CallInterfaceDescriptor {
CallInterfaceDescriptor)
};
+class CallWithSpreadDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kTarget, kArgumentsCount, kSpread)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(CallWithSpreadDescriptor,
+ CallInterfaceDescriptor)
+};
+
+class CallWithArrayLikeDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kTarget, kArgumentsList)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(CallWithArrayLikeDescriptor,
+ CallInterfaceDescriptor)
+};
+
+class ConstructVarargsDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kTarget, kNewTarget, kActualArgumentsCount, kArgumentsList,
+ kArgumentsLength)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ConstructVarargsDescriptor,
+ CallInterfaceDescriptor)
+};
+
class ConstructForwardVarargsDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kTarget, kNewTarget, kActualArgumentsCount, kStartIndex)
@@ -588,6 +633,20 @@ class ConstructForwardVarargsDescriptor : public CallInterfaceDescriptor {
ConstructForwardVarargsDescriptor, CallInterfaceDescriptor)
};
+class ConstructWithSpreadDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kTarget, kNewTarget, kArgumentsCount, kSpread)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ConstructWithSpreadDescriptor,
+ CallInterfaceDescriptor)
+};
+
+class ConstructWithArrayLikeDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kTarget, kNewTarget, kArgumentsList)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ConstructWithArrayLikeDescriptor,
+ CallInterfaceDescriptor)
+};
+
class ConstructStubDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kFunction, kNewTarget, kActualArgumentsCount,
@@ -690,7 +749,6 @@ class ArrayNArgumentsConstructorDescriptor : public CallInterfaceDescriptor {
ArrayNArgumentsConstructorDescriptor, CallInterfaceDescriptor)
};
-
class CompareDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kLeft, kRight)
@@ -714,7 +772,7 @@ class BinaryOpWithAllocationSiteDescriptor : public CallInterfaceDescriptor {
class BinaryOpWithVectorDescriptor : public CallInterfaceDescriptor {
public:
- DEFINE_PARAMETERS(kLeft, kRight, kSlot, kVector)
+ DEFINE_PARAMETERS(kLeft, kRight, kSlot, kVector, kFunction)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(BinaryOpWithVectorDescriptor,
CallInterfaceDescriptor)
};
@@ -753,6 +811,15 @@ class StringCompareDescriptor : public CallInterfaceDescriptor {
static const Register RightRegister();
};
+class StringConcatDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kArgumentsCount)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StringConcatDescriptor,
+ CallInterfaceDescriptor)
+
+ static const Register ArgumentsCountRegister();
+};
+
class SubStringDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kString, kFrom, kTo)
diff --git a/deps/v8/src/interpreter/OWNERS b/deps/v8/src/interpreter/OWNERS
index 0f2165c647..e985bda102 100644
--- a/deps/v8/src/interpreter/OWNERS
+++ b/deps/v8/src/interpreter/OWNERS
@@ -5,3 +5,5 @@ leszeks@chromium.org
mstarzinger@chromium.org
mythria@chromium.org
rmcilroy@chromium.org
+
+# COMPONENT: Blink>JavaScript>Interpreter
diff --git a/deps/v8/src/interpreter/block-coverage-builder.h b/deps/v8/src/interpreter/block-coverage-builder.h
new file mode 100644
index 0000000000..dc1b4d704b
--- /dev/null
+++ b/deps/v8/src/interpreter/block-coverage-builder.h
@@ -0,0 +1,68 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BLOCK_COVERAGE_BUILDER_H_
+#define V8_INTERPRETER_BLOCK_COVERAGE_BUILDER_H_
+
+#include "src/ast/ast-source-ranges.h"
+#include "src/interpreter/bytecode-array-builder.h"
+
+#include "src/zone/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+// Used to generate IncBlockCounter bytecodes and the {source range, slot}
+// mapping for block coverage.
+class BlockCoverageBuilder final : public ZoneObject {
+ public:
+ BlockCoverageBuilder(Zone* zone, BytecodeArrayBuilder* builder,
+ SourceRangeMap* source_range_map)
+ : slots_(0, zone),
+ builder_(builder),
+ source_range_map_(source_range_map) {
+ DCHECK_NOT_NULL(builder);
+ DCHECK_NOT_NULL(source_range_map);
+ }
+
+ static constexpr int kNoCoverageArraySlot = -1;
+
+ int AllocateBlockCoverageSlot(AstNode* node, SourceRangeKind kind) {
+ AstNodeSourceRanges* ranges = source_range_map_->Find(node);
+ if (ranges == nullptr) return kNoCoverageArraySlot;
+
+ SourceRange range = ranges->GetRange(kind);
+ if (range.IsEmpty()) return kNoCoverageArraySlot;
+
+ const int slot = static_cast<int>(slots_.size());
+ slots_.emplace_back(range);
+ return slot;
+ }
+
+ void IncrementBlockCounter(int coverage_array_slot) {
+ if (coverage_array_slot == kNoCoverageArraySlot) return;
+ builder_->IncBlockCounter(coverage_array_slot);
+ }
+
+ void IncrementBlockCounter(AstNode* node, SourceRangeKind kind) {
+ int slot = AllocateBlockCoverageSlot(node, kind);
+ IncrementBlockCounter(slot);
+ }
+
+ const ZoneVector<SourceRange>& slots() const { return slots_; }
+
+ private:
+ // Contains source range information for allocated block coverage counter
+ // slots. Slot i covers range slots_[i].
+ ZoneVector<SourceRange> slots_;
+ BytecodeArrayBuilder* builder_;
+ SourceRangeMap* source_range_map_;
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_BLOCK_COVERAGE_BUILDER_H_
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.cc b/deps/v8/src/interpreter/bytecode-array-accessor.cc
index c3a0b3cb9e..f597981514 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.cc
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.cc
@@ -142,7 +142,8 @@ int BytecodeArrayAccessor::GetRegisterOperandRange(int operand_index) const {
Bytecodes::GetOperandTypes(current_bytecode());
OperandType operand_type = operand_types[operand_index];
DCHECK(Bytecodes::IsRegisterOperandType(operand_type));
- if (operand_type == OperandType::kRegList) {
+ if (operand_type == OperandType::kRegList ||
+ operand_type == OperandType::kRegOutList) {
return GetRegisterCountOperand(operand_index + 1);
} else {
return Bytecodes::GetNumberOfRegistersRepresentedBy(operand_type);
@@ -191,7 +192,6 @@ int BytecodeArrayAccessor::GetJumpTargetOffset() const {
return GetAbsoluteOffset(smi->value());
} else {
UNREACHABLE();
- return kMinInt;
}
}
@@ -260,7 +260,7 @@ JumpTableTargetOffsets::iterator::iterator(
JumpTableTargetOffset JumpTableTargetOffsets::iterator::operator*() {
DCHECK_LT(table_offset_, table_end_);
DCHECK(current_->IsSmi());
- return {index_, accessor_->GetAbsoluteOffset(Smi::cast(*current_)->value())};
+ return {index_, accessor_->GetAbsoluteOffset(Smi::ToInt(*current_))};
}
JumpTableTargetOffsets::iterator& JumpTableTargetOffsets::iterator::
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index 80c59e4c47..1dd5c93d8f 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -61,8 +61,6 @@ BytecodeArrayBuilder::BytecodeArrayBuilder(
zone, &register_allocator_, fixed_register_count(), parameter_count,
new (zone) RegisterTransferWriter(this));
}
-
- return_position_ = literal ? literal->return_position() : kNoSourcePosition;
}
Register BytecodeArrayBuilder::Parameter(int parameter_index) const {
@@ -121,22 +119,18 @@ BytecodeSourceInfo BytecodeArrayBuilder::CurrentSourcePosition(
void BytecodeArrayBuilder::SetDeferredSourceInfo(
BytecodeSourceInfo source_info) {
if (!source_info.is_valid()) return;
- if (deferred_source_info_.is_valid()) {
- // Emit any previous deferred source info now as a nop.
- BytecodeNode node = BytecodeNode::Nop(deferred_source_info_);
- bytecode_array_writer_.Write(&node);
- }
deferred_source_info_ = source_info;
}
void BytecodeArrayBuilder::AttachOrEmitDeferredSourceInfo(BytecodeNode* node) {
if (!deferred_source_info_.is_valid()) return;
-
if (!node->source_info().is_valid()) {
node->set_source_info(deferred_source_info_);
- } else {
- BytecodeNode node = BytecodeNode::Nop(deferred_source_info_);
- bytecode_array_writer_.Write(&node);
+ } else if (deferred_source_info_.is_statement() &&
+ node->source_info().is_expression()) {
+ BytecodeSourceInfo source_position = node->source_info();
+ source_position.MakeStatementPosition(source_position.source_position());
+ node->set_source_info(source_position);
}
deferred_source_info_.set_invalid();
}
@@ -203,7 +197,6 @@ class UnsignedOperandHelper {
return value <= kMaxUInt32;
default:
UNREACHABLE();
- return false;
}
}
};
@@ -263,6 +256,15 @@ class OperandHelper<OperandType::kRegOut> {
};
template <>
+class OperandHelper<OperandType::kRegOutList> {
+ public:
+ INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder,
+ RegisterList reg_list)) {
+ return builder->GetOutputRegisterListOperand(reg_list);
+ }
+};
+
+template <>
class OperandHelper<OperandType::kRegOutPair> {
public:
INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder,
@@ -634,7 +636,7 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadAccumulatorWithRegister(
Register reg) {
if (register_optimizer_) {
// Defer source info so that if we elide the bytecode transfer, we attach
- // the source info to a subsequent bytecode or to a nop.
+ // the source info to a subsequent bytecode if it exists.
SetDeferredSourceInfo(CurrentSourcePosition(Bytecode::kLdar));
register_optimizer_->DoLdar(reg);
} else {
@@ -647,7 +649,7 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreAccumulatorInRegister(
Register reg) {
if (register_optimizer_) {
// Defer source info so that if we elide the bytecode transfer, we attach
- // the source info to a subsequent bytecode or to a nop.
+ // the source info to a subsequent bytecode if it exists.
SetDeferredSourceInfo(CurrentSourcePosition(Bytecode::kStar));
register_optimizer_->DoStar(reg);
} else {
@@ -661,7 +663,7 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::MoveRegister(Register from,
DCHECK(from != to);
if (register_optimizer_) {
// Defer source info so that if we elide the bytecode transfer, we attach
- // the source info to a subsequent bytecode or to a nop.
+ // the source info to a subsequent bytecode if it exists.
SetDeferredSourceInfo(CurrentSourcePosition(Bytecode::kMov));
register_optimizer_->DoMov(from, to);
} else {
@@ -772,14 +774,12 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupGlobalSlot(
}
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreLookupSlot(
- const AstRawString* name, LanguageMode language_mode) {
+ const AstRawString* name, LanguageMode language_mode,
+ LookupHoistingMode lookup_hoisting_mode) {
size_t name_index = GetConstantPoolEntry(name);
- if (language_mode == SLOPPY) {
- OutputStaLookupSlotSloppy(name_index);
- } else {
- DCHECK_EQ(language_mode, STRICT);
- OutputStaLookupSlotStrict(name_index);
- }
+ uint8_t flags =
+ StoreLookupSlotFlags::Encode(language_mode, lookup_hoisting_mode);
+ OutputStaLookupSlot(name_index, flags);
return *this;
}
@@ -978,24 +978,34 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::PopContext(Register context) {
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::ConvertAccumulatorToObject(
- Register out) {
+BytecodeArrayBuilder& BytecodeArrayBuilder::ToObject(Register out) {
OutputToObject(out);
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::ConvertAccumulatorToName(
- Register out) {
+BytecodeArrayBuilder& BytecodeArrayBuilder::ToName(Register out) {
OutputToName(out);
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::ConvertAccumulatorToNumber(
- Register out, int feedback_slot) {
+BytecodeArrayBuilder& BytecodeArrayBuilder::ToNumber(Register out,
+ int feedback_slot) {
OutputToNumber(out, feedback_slot);
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::ToPrimitiveToString(
+ Register out, int feedback_slot) {
+ OutputToPrimitiveToString(out, feedback_slot);
+ return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::StringConcat(
+ RegisterList operand_registers) {
+ OutputStringConcat(operand_registers, operand_registers.register_count());
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(BytecodeLabel* label) {
// Flush the register optimizer when binding a label to ensure all
// expected registers are valid when jumping to this label.
@@ -1115,13 +1125,6 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfNotNil(BytecodeLabel* label,
}
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfNotHole(
- BytecodeLabel* label) {
- DCHECK(!label->is_bound());
- OutputJumpIfNotHole(label, 0);
- return *this;
-}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfJSReceiver(
BytecodeLabel* label) {
DCHECK(!label->is_bound());
@@ -1176,17 +1179,39 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::ReThrow() {
}
BytecodeArrayBuilder& BytecodeArrayBuilder::Return() {
- SetReturnPosition();
OutputReturn();
return_seen_in_block_ = true;
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::ThrowReferenceErrorIfHole(
+ const AstRawString* name) {
+ size_t entry = GetConstantPoolEntry(name);
+ OutputThrowReferenceErrorIfHole(entry);
+ return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::ThrowSuperNotCalledIfHole() {
+ OutputThrowSuperNotCalledIfHole();
+ return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::ThrowSuperAlreadyCalledIfNotHole() {
+ OutputThrowSuperAlreadyCalledIfNotHole();
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::Debugger() {
OutputDebugger();
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::IncBlockCounter(
+ int coverage_array_slot) {
+ OutputIncBlockCounter(coverage_array_slot);
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::ForInPrepare(
Register receiver, RegisterList cache_info_triple) {
DCHECK_EQ(3, cache_info_triple.register_count());
@@ -1226,15 +1251,21 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadModuleVariable(int cell_index,
}
BytecodeArrayBuilder& BytecodeArrayBuilder::SuspendGenerator(
- Register generator, SuspendFlags flags) {
- OutputSuspendGenerator(generator,
- SuspendGeneratorBytecodeFlags::Encode(flags));
+ Register generator, RegisterList registers) {
+ OutputSuspendGenerator(generator, registers, registers.register_count());
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::ResumeGenerator(
+BytecodeArrayBuilder& BytecodeArrayBuilder::RestoreGeneratorState(
Register generator) {
- OutputResumeGenerator(generator);
+ OutputRestoreGeneratorState(generator);
+ return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::RestoreGeneratorRegisters(
+ Register generator, RegisterList registers) {
+ OutputRestoreGeneratorRegisters(generator, registers,
+ registers.register_count());
return *this;
}
@@ -1300,13 +1331,6 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CallAnyReceiver(Register callable,
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::TailCall(Register callable,
- RegisterList args,
- int feedback_slot) {
- OutputTailCall(callable, args, args.register_count(), feedback_slot);
- return *this;
-}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::CallWithSpread(Register callable,
RegisterList args) {
OutputCallWithSpread(callable, args, args.register_count());
@@ -1426,11 +1450,6 @@ void BytecodeArrayBuilder::SetDeferredConstantPoolEntry(size_t entry,
constant_array_builder()->SetDeferredAt(entry, object);
}
-void BytecodeArrayBuilder::SetReturnPosition() {
- if (return_position_ == kNoSourcePosition) return;
- latest_source_info_.MakeStatementPosition(return_position_);
-}
-
bool BytecodeArrayBuilder::RegisterIsValid(Register reg) const {
if (!reg.is_valid()) {
return false;
@@ -1506,7 +1525,6 @@ std::ostream& operator<<(std::ostream& os,
return os << "ConvertToBoolean";
}
UNREACHABLE();
- return os;
}
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index fa336cde13..b82d0e28a3 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -183,8 +183,9 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
int feedback_slot, int depth);
// Store value in the accumulator into the variable with |name|.
- BytecodeArrayBuilder& StoreLookupSlot(const AstRawString* name,
- LanguageMode language_mode);
+ BytecodeArrayBuilder& StoreLookupSlot(
+ const AstRawString* name, LanguageMode language_mode,
+ LookupHoistingMode lookup_hoisting_mode);
// Create a new closure for a SharedFunctionInfo which will be inserted at
// constant pool index |shared_function_info_entry|.
@@ -343,10 +344,16 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
TestTypeOfFlags::LiteralFlag literal_flag);
// Converts accumulator and stores result in register |out|.
- BytecodeArrayBuilder& ConvertAccumulatorToObject(Register out);
- BytecodeArrayBuilder& ConvertAccumulatorToName(Register out);
- BytecodeArrayBuilder& ConvertAccumulatorToNumber(Register out,
- int feedback_slot);
+ BytecodeArrayBuilder& ToObject(Register out);
+ BytecodeArrayBuilder& ToName(Register out);
+ BytecodeArrayBuilder& ToNumber(Register out, int feedback_slot);
+
+ // Converts accumulator to a primitive and then to a string, and stores result
+ // in register |out|.
+ BytecodeArrayBuilder& ToPrimitiveToString(Register out, int feedback_slot);
+ // Concatenate all the string values in |operand_registers| into a string
+ // and store result in the accumulator.
+ BytecodeArrayBuilder& StringConcat(RegisterList operand_registers);
// Flow Control.
BytecodeArrayBuilder& Bind(BytecodeLabel* label);
@@ -380,10 +387,16 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
BytecodeArrayBuilder& Throw();
BytecodeArrayBuilder& ReThrow();
BytecodeArrayBuilder& Return();
+ BytecodeArrayBuilder& ThrowReferenceErrorIfHole(const AstRawString* name);
+ BytecodeArrayBuilder& ThrowSuperNotCalledIfHole();
+ BytecodeArrayBuilder& ThrowSuperAlreadyCalledIfNotHole();
// Debugger.
BytecodeArrayBuilder& Debugger();
+ // Increment the block counter at the given slot (block code coverage).
+ BytecodeArrayBuilder& IncBlockCounter(int slot);
+
// Complex flow control.
BytecodeArrayBuilder& ForInPrepare(Register receiver,
RegisterList cache_info_triple);
@@ -395,8 +408,10 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
// Generators.
BytecodeArrayBuilder& SuspendGenerator(Register generator,
- SuspendFlags flags);
- BytecodeArrayBuilder& ResumeGenerator(Register generator);
+ RegisterList registers);
+ BytecodeArrayBuilder& RestoreGeneratorState(Register generator);
+ BytecodeArrayBuilder& RestoreGeneratorRegisters(Register generator,
+ RegisterList registers);
// Exception handling.
BytecodeArrayBuilder& MarkHandler(int handler_id,
@@ -446,6 +461,14 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
latest_source_info_.MakeStatementPosition(expr->position());
}
+ void SetReturnPosition(int source_position, FunctionLiteral* literal) {
+ if (source_position != kNoSourcePosition) {
+ latest_source_info_.MakeStatementPosition(source_position);
+ } else if (literal->return_position() != kNoSourcePosition) {
+ latest_source_info_.MakeStatementPosition(literal->return_position());
+ }
+ }
+
bool RequiresImplicitReturn() const { return !return_seen_in_block_; }
// Returns the raw operand value for the given register or register list.
@@ -497,9 +520,6 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
bool RegisterIsValid(Register reg) const;
bool RegisterListIsValid(RegisterList reg_list) const;
- // Set position for return.
- void SetReturnPosition();
-
// Sets a deferred source info which should be emitted before any future
// source info (either attached to a following bytecode or as a nop).
void SetDeferredSourceInfo(BytecodeSourceInfo source_info);
@@ -543,7 +563,6 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
bool return_seen_in_block_;
int parameter_count_;
int local_register_count_;
- int return_position_;
BytecodeRegisterAllocator register_allocator_;
BytecodeArrayWriter bytecode_array_writer_;
BytecodeRegisterOptimizer* register_optimizer_;
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.cc b/deps/v8/src/interpreter/bytecode-array-writer.cc
index d3cc0204d4..67a52dcdcb 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.cc
+++ b/deps/v8/src/interpreter/bytecode-array-writer.cc
@@ -244,8 +244,6 @@ Bytecode GetJumpWithConstantOperand(Bytecode jump_bytecode) {
return Bytecode::kJumpIfToBooleanTrueConstant;
case Bytecode::kJumpIfToBooleanFalse:
return Bytecode::kJumpIfToBooleanFalseConstant;
- case Bytecode::kJumpIfNotHole:
- return Bytecode::kJumpIfNotHoleConstant;
case Bytecode::kJumpIfNull:
return Bytecode::kJumpIfNullConstant;
case Bytecode::kJumpIfNotNull:
@@ -258,7 +256,6 @@ Bytecode GetJumpWithConstantOperand(Bytecode jump_bytecode) {
return Bytecode::kJumpIfJSReceiverConstant;
default:
UNREACHABLE();
- return Bytecode::kIllegal;
}
}
diff --git a/deps/v8/src/interpreter/bytecode-decoder.cc b/deps/v8/src/interpreter/bytecode-decoder.cc
index aa0ef2796f..d5b64629f7 100644
--- a/deps/v8/src/interpreter/bytecode-decoder.cc
+++ b/deps/v8/src/interpreter/bytecode-decoder.cc
@@ -80,7 +80,6 @@ const char* NameForRuntimeId(uint32_t idx) {
#undef CASE
default:
UNREACHABLE();
- return nullptr;
}
}
} // anonymous namespace
@@ -175,6 +174,7 @@ std::ostream& BytecodeDecoder::Decode(std::ostream& os,
<< reg_list.last_register().ToString(parameter_count);
break;
}
+ case interpreter::OperandType::kRegOutList:
case interpreter::OperandType::kRegList: {
DCHECK_LT(i, number_of_operands - 1);
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, i + 1),
diff --git a/deps/v8/src/interpreter/bytecode-flags.cc b/deps/v8/src/interpreter/bytecode-flags.cc
index 4d50bf69c3..39f313f249 100644
--- a/deps/v8/src/interpreter/bytecode-flags.cc
+++ b/deps/v8/src/interpreter/bytecode-flags.cc
@@ -18,7 +18,7 @@ namespace interpreter {
uint8_t CreateArrayLiteralFlags::Encode(bool use_fast_shallow_clone,
int runtime_flags) {
uint8_t result = FlagsBits::encode(runtime_flags);
- result |= FastShallowCloneBit::encode(use_fast_shallow_clone);
+ result |= FastCloneSupportedBit::encode(use_fast_shallow_clone);
return result;
}
@@ -75,13 +75,12 @@ TestTypeOfFlags::LiteralFlag TestTypeOfFlags::Decode(uint8_t raw_flag) {
}
// static
-uint8_t SuspendGeneratorBytecodeFlags::Encode(SuspendFlags flags) {
- return FlagsBits::encode(flags);
-}
-
-// static
-SuspendFlags SuspendGeneratorBytecodeFlags::Decode(uint8_t flags) {
- return FlagsBits::decode(flags);
+uint8_t StoreLookupSlotFlags::Encode(LanguageMode language_mode,
+ LookupHoistingMode lookup_hoisting_mode) {
+ DCHECK_IMPLIES(lookup_hoisting_mode == LookupHoistingMode::kLegacySloppy,
+ language_mode == SLOPPY);
+ return LanguageModeBit::encode(language_mode) |
+ LookupHoistingModeBit::encode(static_cast<bool>(lookup_hoisting_mode));
}
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-flags.h b/deps/v8/src/interpreter/bytecode-flags.h
index 76e5f868c5..fb08420a10 100644
--- a/deps/v8/src/interpreter/bytecode-flags.h
+++ b/deps/v8/src/interpreter/bytecode-flags.h
@@ -18,8 +18,8 @@ namespace interpreter {
class CreateArrayLiteralFlags {
public:
- class FlagsBits : public BitField8<int, 0, 4> {};
- class FastShallowCloneBit : public BitField8<bool, FlagsBits::kNext, 1> {};
+ class FlagsBits : public BitField8<int, 0, 5> {};
+ class FastCloneSupportedBit : public BitField8<bool, FlagsBits::kNext, 1> {};
static uint8_t Encode(bool use_fast_shallow_clone, int runtime_flags);
@@ -29,7 +29,7 @@ class CreateArrayLiteralFlags {
class CreateObjectLiteralFlags {
public:
- class FlagsBits : public BitField8<int, 0, 4> {};
+ class FlagsBits : public BitField8<int, 0, 5> {};
class FastCloneSupportedBit : public BitField8<bool, FlagsBits::kNext, 1> {};
static uint8_t Encode(int runtime_flags, bool fast_clone_supported);
@@ -76,17 +76,17 @@ class TestTypeOfFlags {
DISALLOW_IMPLICIT_CONSTRUCTORS(TestTypeOfFlags);
};
-class SuspendGeneratorBytecodeFlags {
+class StoreLookupSlotFlags {
public:
- class FlagsBits
- : public BitField8<SuspendFlags, 0,
- static_cast<int>(SuspendFlags::kBitWidth)> {};
+ class LanguageModeBit : public BitField8<bool, 0, 1> {};
+ class LookupHoistingModeBit
+ : public BitField8<bool, LanguageModeBit::kNext, 1> {};
- static uint8_t Encode(SuspendFlags suspend_type);
- static SuspendFlags Decode(uint8_t flags);
+ static uint8_t Encode(LanguageMode language_mode,
+ LookupHoistingMode lookup_hoisting_mode);
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(SuspendGeneratorBytecodeFlags);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StoreLookupSlotFlags);
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 7ca2c37607..f96480d69a 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -4,6 +4,7 @@
#include "src/interpreter/bytecode-generator.h"
+#include "src/ast/ast-source-ranges.h"
#include "src/ast/compile-time-value.h"
#include "src/ast/scopes.h"
#include "src/builtins/builtins-constructor.h"
@@ -28,14 +29,12 @@ namespace interpreter {
// popping of the current {context_register} during visitation.
class BytecodeGenerator::ContextScope BASE_EMBEDDED {
public:
- ContextScope(BytecodeGenerator* generator, Scope* scope,
- bool should_pop_context = true)
+ ContextScope(BytecodeGenerator* generator, Scope* scope)
: generator_(generator),
scope_(scope),
outer_(generator_->execution_context()),
register_(Register::current_context()),
- depth_(0),
- should_pop_context_(should_pop_context) {
+ depth_(0) {
DCHECK(scope->NeedsContext() || outer_ == nullptr);
if (outer_) {
depth_ = outer_->depth_ + 1;
@@ -50,7 +49,7 @@ class BytecodeGenerator::ContextScope BASE_EMBEDDED {
}
~ContextScope() {
- if (outer_ && should_pop_context_) {
+ if (outer_) {
DCHECK_EQ(register_.index(), Register::current_context().index());
generator_->builder()->PopContext(outer_->reg());
outer_->set_register(register_);
@@ -78,7 +77,6 @@ class BytecodeGenerator::ContextScope BASE_EMBEDDED {
}
Register reg() const { return register_; }
- bool ShouldPopContext() { return should_pop_context_; }
private:
const BytecodeArrayBuilder* builder() const { return generator_->builder(); }
@@ -90,7 +88,6 @@ class BytecodeGenerator::ContextScope BASE_EMBEDDED {
ContextScope* outer_;
Register register_;
int depth_;
- bool should_pop_context_;
};
// Scoped class for tracking control statements entered by the
@@ -104,11 +101,18 @@ class BytecodeGenerator::ControlScope BASE_EMBEDDED {
}
virtual ~ControlScope() { generator_->set_execution_control(outer()); }
- void Break(Statement* stmt) { PerformCommand(CMD_BREAK, stmt); }
- void Continue(Statement* stmt) { PerformCommand(CMD_CONTINUE, stmt); }
- void ReturnAccumulator() { PerformCommand(CMD_RETURN, nullptr); }
- void AsyncReturnAccumulator() { PerformCommand(CMD_ASYNC_RETURN, nullptr); }
- void ReThrowAccumulator() { PerformCommand(CMD_RETHROW, nullptr); }
+ void Break(Statement* stmt) {
+ PerformCommand(CMD_BREAK, stmt, kNoSourcePosition);
+ }
+ void Continue(Statement* stmt) {
+ PerformCommand(CMD_CONTINUE, stmt, kNoSourcePosition);
+ }
+ void ReturnAccumulator(int source_position = kNoSourcePosition) {
+ PerformCommand(CMD_RETURN, nullptr, source_position);
+ }
+ void AsyncReturnAccumulator(int source_position = kNoSourcePosition) {
+ PerformCommand(CMD_ASYNC_RETURN, nullptr, source_position);
+ }
class DeferredCommands;
@@ -120,8 +124,15 @@ class BytecodeGenerator::ControlScope BASE_EMBEDDED {
CMD_ASYNC_RETURN,
CMD_RETHROW
};
- void PerformCommand(Command command, Statement* statement);
- virtual bool Execute(Command command, Statement* statement) = 0;
+ void PerformCommand(Command command, Statement* statement,
+ int source_position);
+ virtual bool Execute(Command command, Statement* statement,
+ int source_position) = 0;
+
+ // Helper to pop the context chain to a depth expected by this control scope.
+ // Note that it is the responsibility of each individual {Execute} method to
+ // trigger this when commands are handled and control-flow continues locally.
+ void PopContextToExpectedDepth();
BytecodeGenerator* generator() const { return generator_; }
ControlScope* outer() const { return outer_; }
@@ -207,7 +218,8 @@ class BytecodeGenerator::ControlScope::DeferredCommands final {
.JumpIfFalse(ToBooleanMode::kAlreadyBoolean, &fall_through);
builder()->LoadAccumulatorWithRegister(result_register_);
- execution_control()->PerformCommand(entry.command, entry.statement);
+ execution_control()->PerformCommand(entry.command, entry.statement,
+ kNoSourcePosition);
} else {
// For multiple entries, build a jump table and switch on the token,
// jumping to the fallthrough if none of them match.
@@ -222,7 +234,8 @@ class BytecodeGenerator::ControlScope::DeferredCommands final {
builder()
->Bind(jump_table, entry.token)
.LoadAccumulatorWithRegister(result_register_);
- execution_control()->PerformCommand(entry.command, entry.statement);
+ execution_control()->PerformCommand(entry.command, entry.statement,
+ kNoSourcePosition);
}
}
@@ -294,18 +307,22 @@ class BytecodeGenerator::ControlScopeForTopLevel final
: ControlScope(generator) {}
protected:
- bool Execute(Command command, Statement* statement) override {
+ bool Execute(Command command, Statement* statement,
+ int source_position) override {
switch (command) {
case CMD_BREAK: // We should never see break/continue in top-level.
case CMD_CONTINUE:
UNREACHABLE();
case CMD_RETURN:
- generator()->BuildReturn();
+ // No need to pop contexts, execution leaves the method body.
+ generator()->BuildReturn(source_position);
return true;
case CMD_ASYNC_RETURN:
- generator()->BuildAsyncReturn();
+ // No need to pop contexts, execution leaves the method body.
+ generator()->BuildAsyncReturn(source_position);
return true;
case CMD_RETHROW:
+ // No need to pop contexts, execution leaves the method body.
generator()->BuildReThrow();
return true;
}
@@ -325,10 +342,13 @@ class BytecodeGenerator::ControlScopeForBreakable final
control_builder_(control_builder) {}
protected:
- bool Execute(Command command, Statement* statement) override {
+ bool Execute(Command command, Statement* statement,
+ int source_position) override {
+ control_builder_->set_needs_continuation_counter();
if (statement != statement_) return false;
switch (command) {
case CMD_BREAK:
+ PopContextToExpectedDepth();
control_builder_->Break();
return true;
case CMD_CONTINUE:
@@ -361,13 +381,16 @@ class BytecodeGenerator::ControlScopeForIteration final
~ControlScopeForIteration() { generator()->loop_depth_--; }
protected:
- bool Execute(Command command, Statement* statement) override {
+ bool Execute(Command command, Statement* statement,
+ int source_position) override {
if (statement != statement_) return false;
switch (command) {
case CMD_BREAK:
+ PopContextToExpectedDepth();
loop_builder_->Break();
return true;
case CMD_CONTINUE:
+ PopContextToExpectedDepth();
loop_builder_->Continue();
return true;
case CMD_RETURN:
@@ -392,7 +415,8 @@ class BytecodeGenerator::ControlScopeForTryCatch final
: ControlScope(generator) {}
protected:
- bool Execute(Command command, Statement* statement) override {
+ bool Execute(Command command, Statement* statement,
+ int source_position) override {
switch (command) {
case CMD_BREAK:
case CMD_CONTINUE:
@@ -400,6 +424,8 @@ class BytecodeGenerator::ControlScopeForTryCatch final
case CMD_ASYNC_RETURN:
break;
case CMD_RETHROW:
+ // No need to pop contexts, execution re-enters the method body via the
+ // stack unwinding mechanism which itself restores contexts correctly.
generator()->BuildReThrow();
return true;
}
@@ -419,13 +445,20 @@ class BytecodeGenerator::ControlScopeForTryFinally final
commands_(commands) {}
protected:
- bool Execute(Command command, Statement* statement) override {
+ bool Execute(Command command, Statement* statement,
+ int source_position) override {
switch (command) {
case CMD_BREAK:
case CMD_CONTINUE:
case CMD_RETURN:
case CMD_ASYNC_RETURN:
case CMD_RETHROW:
+ PopContextToExpectedDepth();
+ // We don't record source_position here since we don't generate return
+ // bytecode right here and will generate it later as part of finally
+ // block. Each return bytecode generated in finally block will get own
+ // return source position from corresponded return statement or we'll
+ // use end of function if no return statement is presented.
commands_->RecordCommand(command, statement);
try_finally_builder_->LeaveTry();
return true;
@@ -439,39 +472,50 @@ class BytecodeGenerator::ControlScopeForTryFinally final
};
void BytecodeGenerator::ControlScope::PerformCommand(Command command,
- Statement* statement) {
+ Statement* statement,
+ int source_position) {
ControlScope* current = this;
- ContextScope* context = generator()->execution_context();
- // Pop context to the expected depth but do not pop the outermost context.
- if (context != current->context() && context->ShouldPopContext()) {
- generator()->builder()->PopContext(current->context()->reg());
- }
do {
- if (current->Execute(command, statement)) {
+ if (current->Execute(command, statement, source_position)) {
return;
}
current = current->outer();
- if (current->context() != context && context->ShouldPopContext()) {
- // Pop context to the expected depth.
- // TODO(rmcilroy): Only emit a single context pop.
- generator()->builder()->PopContext(current->context()->reg());
- }
} while (current != nullptr);
UNREACHABLE();
}
-class BytecodeGenerator::RegisterAllocationScope {
+void BytecodeGenerator::ControlScope::PopContextToExpectedDepth() {
+ // Pop context to the expected depth. Note that this can in fact pop multiple
+ // contexts at once because the {PopContext} bytecode takes a saved register.
+ if (generator()->execution_context() != context()) {
+ generator()->builder()->PopContext(context()->reg());
+ }
+}
+
+class BytecodeGenerator::RegisterAllocationScope final {
public:
explicit RegisterAllocationScope(BytecodeGenerator* generator)
: generator_(generator),
outer_next_register_index_(
generator->register_allocator()->next_register_index()) {}
- virtual ~RegisterAllocationScope() {
+ ~RegisterAllocationScope() {
generator_->register_allocator()->ReleaseRegisters(
outer_next_register_index_);
}
+ // Enable the registers in |register_list| to escape into the parent register
+ // allocation scope.
+ void EscapeRegisterListToParent(RegisterList* register_list) {
+ DCHECK_LT(register_list->last_register().index(),
+ generator_->register_allocator()->next_register_index());
+ DCHECK_LE(register_list->first_register().index(),
+ outer_next_register_index_);
+ DCHECK_GE(register_list->last_register().index(),
+ outer_next_register_index_);
+ outer_next_register_index_ = register_list->last_register().index() + 1;
+ }
+
private:
BytecodeGenerator* generator_;
int outer_next_register_index_;
@@ -483,7 +527,9 @@ class BytecodeGenerator::RegisterAllocationScope {
// used.
class BytecodeGenerator::ExpressionResultScope {
public:
- ExpressionResultScope(BytecodeGenerator* generator, Expression::Context kind)
+ enum class Kind { kEffect, kValue, kTest, kAddition };
+
+ ExpressionResultScope(BytecodeGenerator* generator, Kind kind)
: generator_(generator),
outer_(generator->execution_result()),
allocator_(generator),
@@ -496,28 +542,37 @@ class BytecodeGenerator::ExpressionResultScope {
generator_->set_execution_result(outer_);
}
- bool IsEffect() const { return kind_ == Expression::kEffect; }
- bool IsValue() const { return kind_ == Expression::kValue; }
- bool IsTest() const { return kind_ == Expression::kTest; }
+ bool IsEffect() const { return kind_ == Kind::kEffect; }
+ bool IsValue() const { return kind_ == Kind::kValue; }
+ bool IsTest() const { return kind_ == Kind::kTest; }
+ bool IsAddition() const { return kind_ == Kind::kAddition; }
TestResultScope* AsTest() {
DCHECK(IsTest());
return reinterpret_cast<TestResultScope*>(this);
}
- // Specify expression always returns a Boolean result value.
- void SetResultIsBoolean() {
+ AdditionResultScope* AsAddition() {
+ DCHECK(IsAddition());
+ return reinterpret_cast<AdditionResultScope*>(this);
+ }
+
+ // Specify expression always returns a result value of type |hint|.
+ void set_type_hint(TypeHint hint) {
DCHECK(type_hint_ == TypeHint::kAny);
- type_hint_ = TypeHint::kBoolean;
+ type_hint_ = hint;
}
TypeHint type_hint() const { return type_hint_; }
+ protected:
+ RegisterAllocationScope* allocation_scope() { return &allocator_; }
+
private:
BytecodeGenerator* generator_;
ExpressionResultScope* outer_;
RegisterAllocationScope allocator_;
- Expression::Context kind_;
+ Kind kind_;
TypeHint type_hint_;
DISALLOW_COPY_AND_ASSIGN(ExpressionResultScope);
@@ -529,7 +584,7 @@ class BytecodeGenerator::EffectResultScope final
: public ExpressionResultScope {
public:
explicit EffectResultScope(BytecodeGenerator* generator)
- : ExpressionResultScope(generator, Expression::kEffect) {}
+ : ExpressionResultScope(generator, Kind::kEffect) {}
};
// Scoped class used when the result of the current expression to be
@@ -537,7 +592,7 @@ class BytecodeGenerator::EffectResultScope final
class BytecodeGenerator::ValueResultScope final : public ExpressionResultScope {
public:
explicit ValueResultScope(BytecodeGenerator* generator)
- : ExpressionResultScope(generator, Expression::kValue) {}
+ : ExpressionResultScope(generator, Kind::kValue) {}
};
// Scoped class used when the result of the current expression to be
@@ -546,11 +601,11 @@ class BytecodeGenerator::TestResultScope final : public ExpressionResultScope {
public:
TestResultScope(BytecodeGenerator* generator, BytecodeLabels* then_labels,
BytecodeLabels* else_labels, TestFallthrough fallthrough)
- : ExpressionResultScope(generator, Expression::kTest),
- then_labels_(then_labels),
- else_labels_(else_labels),
+ : ExpressionResultScope(generator, Kind::kTest),
+ result_consumed_by_test_(false),
fallthrough_(fallthrough),
- result_consumed_by_test_(false) {}
+ then_labels_(then_labels),
+ else_labels_(else_labels) {}
// Used when code special cases for TestResultScope and consumes any
// possible value by testing and jumping to a then/else label.
@@ -559,12 +614,26 @@ class BytecodeGenerator::TestResultScope final : public ExpressionResultScope {
}
bool result_consumed_by_test() { return result_consumed_by_test_; }
+ // Inverts the control flow of the operation, swapping the then and else
+ // labels and the fallthrough.
+ void InvertControlFlow() {
+ std::swap(then_labels_, else_labels_);
+ fallthrough_ = inverted_fallthrough();
+ }
+
BytecodeLabel* NewThenLabel() { return then_labels_->New(); }
BytecodeLabel* NewElseLabel() { return else_labels_->New(); }
BytecodeLabels* then_labels() const { return then_labels_; }
BytecodeLabels* else_labels() const { return else_labels_; }
+ void set_then_labels(BytecodeLabels* then_labels) {
+ then_labels_ = then_labels;
+ }
+ void set_else_labels(BytecodeLabels* else_labels) {
+ else_labels_ = else_labels;
+ }
+
TestFallthrough fallthrough() const { return fallthrough_; }
TestFallthrough inverted_fallthrough() const {
switch (fallthrough_) {
@@ -576,16 +645,50 @@ class BytecodeGenerator::TestResultScope final : public ExpressionResultScope {
return TestFallthrough::kNone;
}
}
+ void set_fallthrough(TestFallthrough fallthrough) {
+ fallthrough_ = fallthrough;
+ }
private:
+ bool result_consumed_by_test_;
+ TestFallthrough fallthrough_;
BytecodeLabels* then_labels_;
BytecodeLabels* else_labels_;
- TestFallthrough fallthrough_;
- bool result_consumed_by_test_;
DISALLOW_COPY_AND_ASSIGN(TestResultScope);
};
+// Scoped class used when the result of the current expression to be evaluated
+// will be used for an addition operation.
+class BytecodeGenerator::AdditionResultScope final
+ : public ExpressionResultScope {
+ public:
+ explicit AdditionResultScope(BytecodeGenerator* generator,
+ RegisterList* operand_registers)
+ : ExpressionResultScope(generator, Kind::kAddition),
+ result_deferred_until_concat_(false),
+ operand_registers_(operand_registers) {}
+
+ RegisterList* operand_registers() { return operand_registers_; }
+
+ // Used when code special cases string concatenation and the values have
+ // been consumed as operand registers and the result won't be computed until
+ // the concat operation is performed.
+ void SetResultDeferredUntilConcat() {
+ // Ensure registers allocated on the growable operand register list are
+ // kept live when we return from this result scope.
+ allocation_scope()->EscapeRegisterListToParent(operand_registers_);
+ result_deferred_until_concat_ = true;
+ }
+ bool result_deferred_until_concat() { return result_deferred_until_concat_; }
+
+ private:
+ bool result_deferred_until_concat_;
+ RegisterList* operand_registers_;
+
+ DISALLOW_COPY_AND_ASSIGN(AdditionResultScope);
+};
+
// Used to build a list of global declaration initial value pairs.
class BytecodeGenerator::GlobalDeclarationsBuilder final : public ZoneObject {
public:
@@ -707,6 +810,7 @@ BytecodeGenerator::BytecodeGenerator(CompilationInfo* info)
closure_scope_(info->scope()),
current_scope_(info->scope()),
globals_builder_(new (zone()) GlobalDeclarationsBuilder(info->zone())),
+ block_coverage_builder_(nullptr),
global_declarations_(0, info->zone()),
function_literals_(0, info->zone()),
native_function_literals_(0, info->zone()),
@@ -716,13 +820,28 @@ BytecodeGenerator::BytecodeGenerator(CompilationInfo* info)
execution_context_(nullptr),
execution_result_(nullptr),
generator_jump_table_(nullptr),
+ generator_object_(),
generator_state_(),
- loop_depth_(0) {
+ loop_depth_(0),
+ catch_prediction_(HandlerTable::UNCAUGHT) {
DCHECK_EQ(closure_scope(), closure_scope()->GetClosureScope());
+ if (info->is_block_coverage_enabled()) {
+ DCHECK(FLAG_block_coverage);
+ block_coverage_builder_ = new (zone()) BlockCoverageBuilder(
+ zone(), builder(), info->parse_info()->source_range_map());
+ }
}
Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(Isolate* isolate) {
+ DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
+
AllocateDeferredConstants(isolate);
+
+ if (info()->is_block_coverage_enabled()) {
+ info()->set_coverage_info(
+ isolate->factory()->NewCoverageInfo(block_coverage_builder_->slots()));
+ }
+
if (HasStackOverflow()) return Handle<BytecodeArray>();
return builder()->ToBytecodeArray(isolate);
}
@@ -788,7 +907,7 @@ void BytecodeGenerator::GenerateBytecode(uintptr_t stack_limit) {
InitializeAstVisitor(stack_limit);
// Initialize the incoming context.
- ContextScope incoming_context(this, closure_scope(), false);
+ ContextScope incoming_context(this, closure_scope());
// Initialize control scope.
ControlScopeForTopLevel control(this);
@@ -802,19 +921,14 @@ void BytecodeGenerator::GenerateBytecode(uintptr_t stack_limit) {
if (closure_scope()->NeedsContext()) {
// Push a new inner context scope for the function.
BuildNewLocalActivationContext();
- ContextScope local_function_context(this, closure_scope(), false);
+ ContextScope local_function_context(this, closure_scope());
BuildLocalActivationContextInitialization();
GenerateBytecodeBody();
} else {
GenerateBytecodeBody();
}
- // Emit an implicit return instruction in case control flow can fall off the
- // end of the function without an explicit return being present on all paths.
- if (builder()->RequiresImplicitReturn()) {
- builder()->LoadUndefined();
- BuildReturn();
- }
+ // Check that we are not falling off the end.
DCHECK(!builder()->RequiresImplicitReturn());
}
@@ -862,18 +976,31 @@ void BytecodeGenerator::GenerateBytecodeBody() {
// Visit statements in the function body.
VisitStatements(info()->literal()->body());
+
+ // Emit an implicit return instruction in case control flow can fall off the
+ // end of the function without an explicit return being present on all paths.
+ if (builder()->RequiresImplicitReturn()) {
+ builder()->LoadUndefined();
+ BuildReturn();
+ }
}
void BytecodeGenerator::VisitIterationHeader(IterationStatement* stmt,
LoopBuilder* loop_builder) {
- // Recall that stmt->yield_count() is always zero inside ordinary
- // (i.e. non-generator) functions.
- if (stmt->suspend_count() == 0) {
+ VisitIterationHeader(stmt->first_suspend_id(), stmt->suspend_count(),
+ loop_builder);
+}
+
+void BytecodeGenerator::VisitIterationHeader(int first_suspend_id,
+ int suspend_count,
+ LoopBuilder* loop_builder) {
+ // Recall that suspend_count is always zero inside ordinary (i.e.
+ // non-generator) functions.
+ if (suspend_count == 0) {
loop_builder->LoopHeader();
} else {
- loop_builder->LoopHeaderInGenerator(
- &generator_jump_table_, static_cast<int>(stmt->first_suspend_id()),
- static_cast<int>(stmt->suspend_count()));
+ loop_builder->LoopHeaderInGenerator(&generator_jump_table_,
+ first_suspend_id, suspend_count);
// Perform state dispatch on the generator state, assuming this is a resume.
builder()
@@ -900,6 +1027,7 @@ void BytecodeGenerator::VisitIterationHeader(IterationStatement* stmt,
void BytecodeGenerator::BuildGeneratorPrologue() {
DCHECK_GT(info()->literal()->suspend_count(), 0);
+ generator_object_ = register_allocator()->NewRegister();
generator_state_ = register_allocator()->NewRegister();
generator_jump_table_ =
builder()->AllocateJumpTable(info()->literal()->suspend_count(), 0);
@@ -908,21 +1036,25 @@ void BytecodeGenerator::BuildGeneratorPrologue() {
// indicate that this is a resume call and to pass in the generator object.
// In ordinary calls, new.target is always undefined because generator
// functions are non-constructable.
- Register generator_object = Register::new_target();
+ builder()->MoveRegister(Register::new_target(), generator_object_);
+
BytecodeLabel regular_call;
builder()
- ->LoadAccumulatorWithRegister(generator_object)
+ ->LoadAccumulatorWithRegister(generator_object_)
.JumpIfUndefined(&regular_call);
// This is a resume call. Restore the current context and the registers,
// then perform state dispatch.
- Register generator_context = register_allocator()->NewRegister();
- builder()
- ->CallRuntime(Runtime::kInlineGeneratorGetContext, generator_object)
- .PushContext(generator_context)
- .ResumeGenerator(generator_object)
- .StoreAccumulatorInRegister(generator_state_)
- .SwitchOnSmiNoFeedback(generator_jump_table_);
+ {
+ RegisterAllocationScope register_scope(this);
+ Register generator_context = register_allocator()->NewRegister();
+ builder()
+ ->CallRuntime(Runtime::kInlineGeneratorGetContext, generator_object_)
+ .PushContext(generator_context)
+ .RestoreGeneratorState(generator_object_)
+ .StoreAccumulatorInRegister(generator_state_)
+ .SwitchOnSmiNoFeedback(generator_jump_table_);
+ }
// We fall through when the generator state is not in the jump table.
// TODO(leszeks): Only generate this for debug builds.
BuildAbort(BailoutReason::kInvalidJumpTableIndex);
@@ -950,13 +1082,13 @@ void BytecodeGenerator::VisitBlock(Block* stmt) {
}
void BytecodeGenerator::VisitBlockDeclarationsAndStatements(Block* stmt) {
- BlockBuilder block_builder(builder());
+ BlockBuilder block_builder(builder(), block_coverage_builder_, stmt);
ControlScopeForBreakable execution_control(this, stmt, &block_builder);
if (stmt->scope() != nullptr) {
VisitDeclarations(stmt->scope()->declarations());
}
VisitStatements(stmt->statements());
- if (stmt->labels() != nullptr) block_builder.EndBlock();
+ block_builder.EndBlock();
}
void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
@@ -1122,12 +1254,20 @@ void BytecodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
void BytecodeGenerator::VisitIfStatement(IfStatement* stmt) {
builder()->SetStatementPosition(stmt);
+
+ int then_slot =
+ AllocateBlockCoverageSlotIfEnabled(stmt, SourceRangeKind::kThen);
+ int else_slot =
+ AllocateBlockCoverageSlotIfEnabled(stmt, SourceRangeKind::kElse);
+
if (stmt->condition()->ToBooleanIsTrue()) {
// Generate then block unconditionally as always true.
+ BuildIncrementBlockCoverageCounterIfEnabled(then_slot);
Visit(stmt->then_statement());
} else if (stmt->condition()->ToBooleanIsFalse()) {
// Generate else block unconditionally if it exists.
if (stmt->HasElseStatement()) {
+ BuildIncrementBlockCoverageCounterIfEnabled(else_slot);
Visit(stmt->else_statement());
}
} else {
@@ -1140,17 +1280,21 @@ void BytecodeGenerator::VisitIfStatement(IfStatement* stmt) {
TestFallthrough::kThen);
then_labels.Bind(builder());
+ BuildIncrementBlockCoverageCounterIfEnabled(then_slot);
Visit(stmt->then_statement());
if (stmt->HasElseStatement()) {
builder()->Jump(&end_label);
else_labels.Bind(builder());
+ BuildIncrementBlockCoverageCounterIfEnabled(else_slot);
Visit(stmt->else_statement());
} else {
else_labels.Bind(builder());
}
builder()->Bind(&end_label);
}
+ BuildIncrementBlockCoverageCounterIfEnabled(stmt,
+ SourceRangeKind::kContinuation);
}
void BytecodeGenerator::VisitSloppyBlockFunctionStatement(
@@ -1159,22 +1303,25 @@ void BytecodeGenerator::VisitSloppyBlockFunctionStatement(
}
void BytecodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
+ AllocateBlockCoverageSlotIfEnabled(stmt, SourceRangeKind::kContinuation);
builder()->SetStatementPosition(stmt);
execution_control()->Continue(stmt->target());
}
void BytecodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
+ AllocateBlockCoverageSlotIfEnabled(stmt, SourceRangeKind::kContinuation);
builder()->SetStatementPosition(stmt);
execution_control()->Break(stmt->target());
}
void BytecodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
+ AllocateBlockCoverageSlotIfEnabled(stmt, SourceRangeKind::kContinuation);
builder()->SetStatementPosition(stmt);
VisitForAccumulatorValue(stmt->expression());
if (stmt->is_async_return()) {
- execution_control()->AsyncReturnAccumulator();
+ execution_control()->AsyncReturnAccumulator(stmt->end_position());
} else {
- execution_control()->ReturnAccumulator();
+ execution_control()->ReturnAccumulator(stmt->end_position());
}
}
@@ -1229,9 +1376,12 @@ void BytecodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
for (int i = 0; i < clauses->length(); i++) {
CaseClause* clause = clauses->at(i);
switch_builder.SetCaseTarget(i);
+ BuildIncrementBlockCoverageCounterIfEnabled(clause, SourceRangeKind::kBody);
VisitStatements(clause->statements());
}
switch_builder.BindBreakTarget();
+ BuildIncrementBlockCoverageCounterIfEnabled(stmt,
+ SourceRangeKind::kContinuation);
}
void BytecodeGenerator::VisitCaseClause(CaseClause* clause) {
@@ -1241,6 +1391,7 @@ void BytecodeGenerator::VisitCaseClause(CaseClause* clause) {
void BytecodeGenerator::VisitIterationBody(IterationStatement* stmt,
LoopBuilder* loop_builder) {
+ loop_builder->LoopBody();
ControlScopeForIteration execution_control(this, stmt, loop_builder);
builder()->StackCheck(stmt->position());
Visit(stmt->body());
@@ -1248,7 +1399,7 @@ void BytecodeGenerator::VisitIterationBody(IterationStatement* stmt,
}
void BytecodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
- LoopBuilder loop_builder(builder());
+ LoopBuilder loop_builder(builder(), block_coverage_builder_, stmt);
if (stmt->cond()->ToBooleanIsFalse()) {
VisitIterationBody(stmt, &loop_builder);
} else if (stmt->cond()->ToBooleanIsTrue()) {
@@ -1268,12 +1419,13 @@ void BytecodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
}
void BytecodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
+ LoopBuilder loop_builder(builder(), block_coverage_builder_, stmt);
+
if (stmt->cond()->ToBooleanIsFalse()) {
// If the condition is false there is no need to generate the loop.
return;
}
- LoopBuilder loop_builder(builder());
VisitIterationHeader(stmt, &loop_builder);
if (!stmt->cond()->ToBooleanIsTrue()) {
builder()->SetExpressionAsStatementPosition(stmt->cond());
@@ -1287,6 +1439,8 @@ void BytecodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
}
void BytecodeGenerator::VisitForStatement(ForStatement* stmt) {
+ LoopBuilder loop_builder(builder(), block_coverage_builder_, stmt);
+
if (stmt->init() != nullptr) {
Visit(stmt->init());
}
@@ -1296,7 +1450,6 @@ void BytecodeGenerator::VisitForStatement(ForStatement* stmt) {
return;
}
- LoopBuilder loop_builder(builder());
VisitIterationHeader(stmt, &loop_builder);
if (stmt->cond() && !stmt->cond()->ToBooleanIsTrue()) {
builder()->SetExpressionAsStatementPosition(stmt->cond());
@@ -1395,7 +1548,7 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
builder()->JumpIfUndefined(&subject_undefined_label);
builder()->JumpIfNull(&subject_null_label);
Register receiver = register_allocator()->NewRegister();
- builder()->ConvertAccumulatorToObject(receiver);
+ builder()->ToObject(receiver);
// Used as kRegTriple and kRegPair in ForInPrepare and ForInNext.
RegisterList triple = register_allocator()->NewRegisterList(3);
@@ -1409,7 +1562,7 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// The loop
{
- LoopBuilder loop_builder(builder());
+ LoopBuilder loop_builder(builder(), block_coverage_builder_, stmt);
VisitIterationHeader(stmt, &loop_builder);
builder()->SetExpressionAsStatementPosition(stmt->each());
builder()->ForInContinue(index, cache_length);
@@ -1429,7 +1582,7 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
}
void BytecodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
- LoopBuilder loop_builder(builder());
+ LoopBuilder loop_builder(builder(), block_coverage_builder_, stmt);
builder()->SetExpressionAsStatementPosition(stmt->assign_iterator());
VisitForEffect(stmt->assign_iterator());
@@ -1446,7 +1599,13 @@ void BytecodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
}
void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
- TryCatchBuilder try_control_builder(builder(), stmt->catch_prediction());
+ // Update catch prediction tracking. The updated catch_prediction value lasts
+ // until the end of the try_block in the AST node, and does not apply to the
+ // catch_block.
+ HandlerTable::CatchPrediction outer_catch_prediction = catch_prediction();
+ set_catch_prediction(stmt->GetCatchPrediction(outer_catch_prediction));
+
+ TryCatchBuilder try_control_builder(builder(), catch_prediction());
// Preserve the context in a dedicated register, so that it can be restored
// when the handler is entered by the stack-unwinding machinery.
@@ -1460,6 +1619,7 @@ void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
{
ControlScopeForTryCatch scope(this, &try_control_builder);
Visit(stmt->try_block());
+ set_catch_prediction(outer_catch_prediction);
}
try_control_builder.EndTry();
@@ -1468,7 +1628,7 @@ void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
builder()->StoreAccumulatorInRegister(context);
// If requested, clear message object as we enter the catch block.
- if (stmt->clear_pending_message()) {
+ if (stmt->ShouldClearPendingException(outer_catch_prediction)) {
builder()->LoadTheHole().SetPendingMessage();
}
@@ -1476,12 +1636,15 @@ void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
builder()->LoadAccumulatorWithRegister(context);
// Evaluate the catch-block.
+ BuildIncrementBlockCoverageCounterIfEnabled(stmt, SourceRangeKind::kCatch);
VisitInScope(stmt->catch_block(), stmt->scope());
try_control_builder.EndCatch();
}
void BytecodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- TryFinallyBuilder try_control_builder(builder(), stmt->catch_prediction());
+ // We can't know whether the finally block will override ("catch") an
+ // exception thrown in the try bblock, so we just adopt the outer prediction.
+ TryFinallyBuilder try_control_builder(builder(), catch_prediction());
// We keep a record of all paths that enter the finally-block to be able to
// dispatch to the correct continuation point after the statements in the
@@ -1532,6 +1695,7 @@ void BytecodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
message);
// Evaluate the finally-block.
+ BuildIncrementBlockCoverageCounterIfEnabled(stmt, SourceRangeKind::kFinally);
Visit(stmt->finally_block());
try_control_builder.EndFinally();
@@ -1687,7 +1851,7 @@ void BytecodeGenerator::VisitClassLiteralProperties(ClassLiteral* expr,
void BytecodeGenerator::BuildClassLiteralNameProperty(ClassLiteral* expr,
Register literal) {
if (!expr->has_name_static_property() &&
- !expr->constructor()->raw_name()->IsEmpty()) {
+ expr->constructor()->has_shared_name()) {
Runtime::FunctionId runtime_id =
expr->has_static_computed_names()
? Runtime::kInstallClassNameAccessorWithCheck
@@ -1710,12 +1874,19 @@ void BytecodeGenerator::VisitDoExpression(DoExpression* expr) {
}
void BytecodeGenerator::VisitConditional(Conditional* expr) {
+ int then_slot =
+ AllocateBlockCoverageSlotIfEnabled(expr, SourceRangeKind::kThen);
+ int else_slot =
+ AllocateBlockCoverageSlotIfEnabled(expr, SourceRangeKind::kElse);
+
if (expr->condition()->ToBooleanIsTrue()) {
// Generate then block unconditionally as always true.
VisitForAccumulatorValue(expr->then_expression());
+ BuildIncrementBlockCoverageCounterIfEnabled(then_slot);
} else if (expr->condition()->ToBooleanIsFalse()) {
// Generate else block unconditionally if it exists.
VisitForAccumulatorValue(expr->else_expression());
+ BuildIncrementBlockCoverageCounterIfEnabled(else_slot);
} else {
BytecodeLabel end_label;
BytecodeLabels then_labels(zone()), else_labels(zone());
@@ -1724,10 +1895,12 @@ void BytecodeGenerator::VisitConditional(Conditional* expr) {
TestFallthrough::kThen);
then_labels.Bind(builder());
+ BuildIncrementBlockCoverageCounterIfEnabled(then_slot);
VisitForAccumulatorValue(expr->then_expression());
builder()->Jump(&end_label);
else_labels.Bind(builder());
+ BuildIncrementBlockCoverageCounterIfEnabled(else_slot);
VisitForAccumulatorValue(expr->else_expression());
builder()->Bind(&end_label);
}
@@ -1737,8 +1910,10 @@ void BytecodeGenerator::VisitLiteral(Literal* expr) {
if (!execution_result()->IsEffect()) {
const AstValue* raw_value = expr->raw_value();
builder()->LoadLiteral(raw_value);
- if (raw_value->IsTrue() || raw_value->IsFalse()) {
- execution_result()->SetResultIsBoolean();
+ if (raw_value->IsString()) {
+ execution_result()->set_type_hint(TypeHint::kString);
+ } else if (raw_value->IsTrue() || raw_value->IsFalse()) {
+ execution_result()->set_type_hint(TypeHint::kBoolean);
}
}
}
@@ -1831,6 +2006,7 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// __proto__:null is handled by CreateObjectLiteral.
if (property->IsNullPrototype()) break;
DCHECK(property->emit_store());
+ DCHECK(!property->NeedsSetFunctionName());
RegisterList args = register_allocator()->NewRegisterList(2);
builder()->MoveRegister(literal, args[0]);
VisitForRegisterValue(property->value(), args[1]);
@@ -1883,6 +2059,7 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// __proto__:null is handled by CreateObjectLiteral.
if (property->IsNullPrototype()) continue;
DCHECK(property->emit_store());
+ DCHECK(!property->NeedsSetFunctionName());
RegisterList args = register_allocator()->NewRegisterList(2);
builder()->MoveRegister(literal, args[0]);
VisitForRegisterValue(property->value(), args[1]);
@@ -2105,7 +2282,7 @@ void BytecodeGenerator::BuildVariableLoadForAccumulatorValue(
BuildVariableLoad(variable, slot, hole_check_mode, typeof_mode);
}
-void BytecodeGenerator::BuildReturn() {
+void BytecodeGenerator::BuildReturn(int source_position) {
if (FLAG_trace) {
RegisterAllocationScope register_scope(this);
Register result = register_allocator()->NewRegister();
@@ -2116,10 +2293,25 @@ void BytecodeGenerator::BuildReturn() {
if (info()->literal()->feedback_vector_spec()->HasTypeProfileSlot()) {
builder()->CollectTypeProfile(info()->literal()->return_position());
}
+ if (IsAsyncGeneratorFunction(info()->literal()->kind())) {
+ // Mark the generator as closed if returning from an async generator
+ // function. Note that non-async generators are closed by the
+ // generator-resume builtin.
+
+ // TODO(jarin,caitp) Move the async generator closing to the resume
+ // builtin.
+ RegisterAllocationScope register_scope(this);
+ Register result = register_allocator()->NewRegister();
+ builder()
+ ->StoreAccumulatorInRegister(result)
+ .CallRuntime(Runtime::kInlineGeneratorClose, generator_object_)
+ .LoadAccumulatorWithRegister(result);
+ }
+ builder()->SetReturnPosition(source_position, info()->literal());
builder()->Return();
}
-void BytecodeGenerator::BuildAsyncReturn() {
+void BytecodeGenerator::BuildAsyncReturn(int source_position) {
RegisterAllocationScope register_scope(this);
if (IsAsyncGeneratorFunction(info()->literal()->kind())) {
@@ -2158,7 +2350,7 @@ void BytecodeGenerator::BuildAsyncReturn() {
.LoadAccumulatorWithRegister(promise);
}
- BuildReturn();
+ BuildReturn(source_position);
}
void BytecodeGenerator::BuildReThrow() { builder()->ReThrow(); }
@@ -2172,25 +2364,14 @@ void BytecodeGenerator::BuildAbort(BailoutReason bailout_reason) {
.CallRuntime(Runtime::kAbort, reason);
}
-void BytecodeGenerator::BuildThrowReferenceError(const AstRawString* name) {
- RegisterAllocationScope register_scope(this);
- Register name_reg = register_allocator()->NewRegister();
- builder()->LoadLiteral(name).StoreAccumulatorInRegister(name_reg).CallRuntime(
- Runtime::kThrowReferenceError, name_reg);
-}
void BytecodeGenerator::BuildThrowIfHole(Variable* variable) {
- BytecodeLabel no_reference_error;
- builder()->JumpIfNotHole(&no_reference_error);
-
if (variable->is_this()) {
DCHECK(variable->mode() == CONST);
- builder()->CallRuntime(Runtime::kThrowSuperNotCalled);
+ builder()->ThrowSuperNotCalledIfHole();
} else {
- BuildThrowReferenceError(variable->raw_name());
+ builder()->ThrowReferenceErrorIfHole(variable->raw_name());
}
-
- builder()->Bind(&no_reference_error);
}
void BytecodeGenerator::BuildHoleCheckForVariableAssignment(Variable* variable,
@@ -2199,13 +2380,7 @@ void BytecodeGenerator::BuildHoleCheckForVariableAssignment(Variable* variable,
// Perform an initialization check for 'this'. 'this' variable is the
// only variable able to trigger bind operations outside the TDZ
// via 'super' calls.
- BytecodeLabel no_reference_error, reference_error;
- builder()
- ->JumpIfNotHole(&reference_error)
- .Jump(&no_reference_error)
- .Bind(&reference_error)
- .CallRuntime(Runtime::kThrowSuperAlreadyCalledError)
- .Bind(&no_reference_error);
+ builder()->ThrowSuperAlreadyCalledIfNotHole();
} else {
// Perform an initialization check for let/const declared variables.
// E.g. let x = (x = 20); is not allowed.
@@ -2214,10 +2389,9 @@ void BytecodeGenerator::BuildHoleCheckForVariableAssignment(Variable* variable,
}
}
-void BytecodeGenerator::BuildVariableAssignment(Variable* variable,
- Token::Value op,
- FeedbackSlot slot,
- HoleCheckMode hole_check_mode) {
+void BytecodeGenerator::BuildVariableAssignment(
+ Variable* variable, Token::Value op, FeedbackSlot slot,
+ HoleCheckMode hole_check_mode, LookupHoistingMode lookup_hoisting_mode) {
VariableMode mode = variable->mode();
RegisterAllocationScope assignment_register_scope(this);
BytecodeLabel end_label;
@@ -2290,7 +2464,8 @@ void BytecodeGenerator::BuildVariableAssignment(Variable* variable,
break;
}
case VariableLocation::LOOKUP: {
- builder()->StoreLookupSlot(variable->raw_name(), language_mode());
+ builder()->StoreLookupSlot(variable->raw_name(), language_mode(),
+ lookup_hoisting_mode);
break;
}
case VariableLocation::MODULE: {
@@ -2428,7 +2603,8 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
// Is the value in the accumulator safe? Yes, but scary.
VariableProxy* proxy = expr->target()->AsVariableProxy();
BuildVariableAssignment(proxy->var(), expr->op(), slot,
- proxy->hole_check_mode());
+ proxy->hole_check_mode(),
+ expr->lookup_hoisting_mode());
break;
}
case NAMED_PROPERTY:
@@ -2454,145 +2630,472 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
}
}
-void BytecodeGenerator::BuildGeneratorSuspend(Suspend* expr,
- Register generator) {
+void BytecodeGenerator::BuildGeneratorSuspend(Suspend* expr, Register value,
+ RegisterList registers_to_save) {
RegisterAllocationScope register_scope(this);
- builder()->SetExpressionPosition(expr);
- Register value = VisitForRegisterValue(expr->expression());
-
// Save context, registers, and state. Then return.
builder()
->LoadLiteral(Smi::FromInt(expr->suspend_id()))
- .SuspendGenerator(generator, expr->flags());
+ .SuspendGenerator(generator_object_, registers_to_save);
- if (expr->IsNonInitialAsyncGeneratorYield()) {
- // AsyncGenerator yields (with the exception of the initial yield) delegate
- // to AsyncGeneratorResolve(), implemented via the runtime call below.
+ if (expr->IsInitialYield() || !expr->IsYield()) {
+ builder()->LoadAccumulatorWithRegister(value);
+ } else if (IsAsyncGeneratorFunction(function_kind())) {
+ // AsyncGenerator yields (with the exception of the initial yield)
+ // delegate to AsyncGeneratorResolve(), implemented via the runtime call
+ // below.
RegisterList args = register_allocator()->NewRegisterList(3);
-
- // AsyncGeneratorYield:
- // perform AsyncGeneratorResolve(<generator>, <value>, false).
builder()
- ->MoveRegister(generator, args[0])
+ ->MoveRegister(generator_object_, args[0])
.MoveRegister(value, args[1])
.LoadFalse()
.StoreAccumulatorInRegister(args[2])
.CallRuntime(Runtime::kInlineAsyncGeneratorResolve, args);
} else {
- builder()->LoadAccumulatorWithRegister(value);
+ // Generator yields (with the exception of the initial yield) wrap the
+ // value into IteratorResult.
+ RegisterList args = register_allocator()->NewRegisterList(2);
+ builder()
+ ->MoveRegister(value, args[0])
+ .LoadFalse()
+ .StoreAccumulatorInRegister(args[1])
+ .CallRuntime(Runtime::kInlineCreateIterResultObject, args);
}
+ builder()->SetReturnPosition(kNoSourcePosition, info()->literal());
builder()->Return(); // Hard return (ignore any finally blocks).
}
-void BytecodeGenerator::BuildGeneratorResume(Suspend* expr,
- Register generator) {
+void BytecodeGenerator::BuildGeneratorResume(
+ Suspend* expr, RegisterList registers_to_restore) {
RegisterAllocationScope register_scope(this);
+ // Clobbers all registers.
+ builder()->RestoreGeneratorRegisters(generator_object_, registers_to_restore);
+
// Update state to indicate that we have finished resuming. Loop headers
// rely on this.
builder()
->LoadLiteral(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting))
.StoreAccumulatorInRegister(generator_state_);
+ // When resuming execution of a generator, module or async function, the sent
+ // value is in the [[input_or_debug_pos]] slot.
+ builder()->CallRuntime(Runtime::kInlineGeneratorGetInputOrDebugPos,
+ generator_object_);
+}
+
+void BytecodeGenerator::BuildAbruptResume(Suspend* expr) {
+ RegisterAllocationScope register_scope(this);
+
Register input = register_allocator()->NewRegister();
+ builder()->StoreAccumulatorInRegister(input);
- // When resuming an Async Generator from an Await expression, the sent
- // value is in the [[await_input_or_debug_pos]] slot. Otherwise, the sent
- // value is in the [[input_or_debug_pos]] slot.
- Runtime::FunctionId get_generator_input =
- expr->is_async_generator() && expr->is_await()
- ? Runtime::kInlineAsyncGeneratorGetAwaitInputOrDebugPos
- : Runtime::kInlineGeneratorGetInputOrDebugPos;
+ builder()->CallRuntime(Runtime::kInlineGeneratorGetResumeMode,
+ generator_object_);
- DCHECK(generator.is_valid());
- builder()
- ->CallRuntime(get_generator_input, generator)
- .StoreAccumulatorInRegister(input);
+ // Now dispatch on resume mode.
+ STATIC_ASSERT(JSGeneratorObject::kNext + 1 == JSGeneratorObject::kReturn);
+ BytecodeJumpTable* jump_table =
+ builder()->AllocateJumpTable(2, JSGeneratorObject::kNext);
+ builder()->SwitchOnSmiNoFeedback(jump_table);
+
+ {
+ // Resume with throw (switch fallthrough).
+ // TODO(leszeks): Add a debug-only check that the accumulator is
+ // JSGeneratorObject::kThrow.
+ builder()->SetExpressionPosition(expr);
+ builder()->LoadAccumulatorWithRegister(input);
+ if (expr->rethrow_on_exception()) {
+ builder()->ReThrow();
+ } else {
+ builder()->Throw();
+ }
+ }
+
+ {
+ // Resume with return.
+ builder()->Bind(jump_table, JSGeneratorObject::kReturn);
+ builder()->LoadAccumulatorWithRegister(input);
+ if (IsAsyncGeneratorFunction(function_kind())) {
+ // Async generator methods will produce the iter result object.
+ execution_control()->AsyncReturnAccumulator();
+ } else {
+ execution_control()->ReturnAccumulator();
+ }
+ }
+
+ {
+ // Resume with next.
+ builder()->Bind(jump_table, JSGeneratorObject::kNext);
+ builder()->LoadAccumulatorWithRegister(input);
+ }
+}
+
+void BytecodeGenerator::VisitYield(Yield* expr) {
+ RegisterList registers(0, register_allocator()->next_register_index());
+
+ {
+ RegisterAllocationScope scope(this);
+ builder()->SetExpressionPosition(expr);
+ Register value = VisitForRegisterValue(expr->expression());
+
+ BuildGeneratorSuspend(expr, value, registers);
+ }
+ builder()->Bind(generator_jump_table_, static_cast<int>(expr->suspend_id()));
+ // Upon resume, we continue here.
+ BuildGeneratorResume(expr, registers);
+ if (expr->on_abrupt_resume() != Suspend::kNoControl) BuildAbruptResume(expr);
+}
+
+// Desugaring of (yield* iterable)
+//
+// do {
+// const kNext = 0;
+// const kReturn = 1;
+// const kThrow = 2;
+//
+// let output; // uninitialized
+//
+// let iterator = GetIterator(iterable);
+// let input = undefined;
+// let resumeMode = kNext;
+//
+// while (true) {
+// // From the generator to the iterator:
+// // Forward input according to resumeMode and obtain output.
+// switch (resumeMode) {
+// case kNext:
+// output = iterator.next(input);
+// break;
+// case kReturn:
+// let iteratorReturn = iterator.return;
+// if (IS_NULL_OR_UNDEFINED(iteratorReturn)) return input;
+// output = %_Call(iteratorReturn, iterator, input);
+// break;
+// case kThrow:
+// let iteratorThrow = iterator.throw;
+// if (IS_NULL_OR_UNDEFINED(iteratorThrow)) {
+// let iteratorReturn = iterator.return;
+// if (!IS_NULL_OR_UNDEFINED(iteratorReturn)) {
+// output = %_Call(iteratorReturn, iterator);
+// if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
+// }
+// throw MakeTypeError(kThrowMethodMissing);
+// }
+// output = %_Call(iteratorThrow, iterator, input);
+// break;
+// }
+// if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
+// if (output.done) break;
+//
+// // From the generator to its user:
+// // Forward output, receive new input, and determine resume mode.
+// input = Suspend(output);
+// resumeMode = %GeneratorGetResumeMode();
+// }
+//
+// if (resumeMode === kReturn) {
+// return output.value;
+// }
+// output.value
+// }
+void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
+ // TODO(tebbi): Also desugar async generator yield* in the BytecodeGenerator.
+ DCHECK(!IsAsyncGeneratorFunction(function_kind()));
+
+ Register output = register_allocator()->NewRegister();
Register resume_mode = register_allocator()->NewRegister();
- builder()
- ->CallRuntime(Runtime::kInlineGeneratorGetResumeMode, generator)
- .StoreAccumulatorInRegister(resume_mode);
- // Now dispatch on resume mode.
+ {
+ RegisterAllocationScope register_scope(this);
+
+ RegisterList iterator_and_input = register_allocator()->NewRegisterList(2);
+
+ Register iterator = iterator_and_input[0];
+ BuildGetIterator(expr->expression(), IteratorType::kNormal,
+ expr->load_iterable_iterator_slot(),
+ expr->call_iterable_iterator_slot(),
+ FeedbackSlot::Invalid(), FeedbackSlot::Invalid());
+ builder()->StoreAccumulatorInRegister(iterator);
+ Register input = iterator_and_input[1];
+ builder()->LoadUndefined().StoreAccumulatorInRegister(input);
+ builder()
+ ->LoadLiteral(Smi::FromInt(JSGeneratorObject::kNext))
+ .StoreAccumulatorInRegister(resume_mode);
+
+ {
+ LoopBuilder loop(builder(), block_coverage_builder_, expr);
+ VisitIterationHeader(expr->suspend_id(), 1, &loop);
+
+ {
+ BytecodeLabels after_switch(zone());
+ BytecodeJumpTable* switch_jump_table =
+ builder()->AllocateJumpTable(2, 1);
+
+ builder()
+ ->LoadAccumulatorWithRegister(resume_mode)
+ .SwitchOnSmiNoFeedback(switch_jump_table);
+
+ // Fallthrough to default case.
+ // TODO(tebbi): Add debug code to check that {resume_mode} really is
+ // {JSGeneratorObject::kNext} in this case.
+ STATIC_ASSERT(JSGeneratorObject::kNext == 0);
+ {
+ RegisterAllocationScope register_scope(this);
+ // output = iterator.next(input);
+ Register iterator_next = register_allocator()->NewRegister();
+ builder()
+ ->LoadNamedProperty(
+ iterator, ast_string_constants()->next_string(),
+ feedback_index(expr->load_iterator_next_slot()))
+ .StoreAccumulatorInRegister(iterator_next)
+ .CallProperty(iterator_next, iterator_and_input,
+ feedback_index(expr->call_iterator_next_slot()))
+ .Jump(after_switch.New());
+ }
+
+ STATIC_ASSERT(JSGeneratorObject::kReturn == 1);
+ builder()->Bind(switch_jump_table, JSGeneratorObject::kReturn);
+ {
+ RegisterAllocationScope register_scope(this);
+ BytecodeLabels return_input(zone());
+ // Trigger return from within the inner iterator.
+ Register iterator_return = register_allocator()->NewRegister();
+ builder()
+ ->LoadNamedProperty(
+ iterator, ast_string_constants()->return_string(),
+ feedback_index(expr->load_iterator_return_slot()))
+ .JumpIfUndefined(return_input.New())
+ .JumpIfNull(return_input.New())
+ .StoreAccumulatorInRegister(iterator_return)
+ .CallProperty(iterator_return, iterator_and_input,
+ feedback_index(expr->call_iterator_return_slot1()))
+ .Jump(after_switch.New());
+
+ return_input.Bind(builder());
+ {
+ builder()->LoadAccumulatorWithRegister(input);
+ execution_control()->ReturnAccumulator();
+ }
+ }
+
+ STATIC_ASSERT(JSGeneratorObject::kThrow == 2);
+ builder()->Bind(switch_jump_table, JSGeneratorObject::kThrow);
+ {
+ BytecodeLabels iterator_throw_is_undefined(zone());
+ {
+ RegisterAllocationScope register_scope(this);
+ // If the inner iterator has a throw method, use it to trigger an
+ // exception inside.
+ Register iterator_throw = register_allocator()->NewRegister();
+ builder()
+ ->LoadNamedProperty(
+ iterator, ast_string_constants()->throw_string(),
+ feedback_index(expr->load_iterator_throw_slot()))
+ .JumpIfUndefined(iterator_throw_is_undefined.New())
+ .JumpIfNull(iterator_throw_is_undefined.New())
+ .StoreAccumulatorInRegister(iterator_throw);
+ builder()
+ ->CallProperty(iterator_throw, iterator_and_input,
+ feedback_index(expr->call_iterator_throw_slot()))
+ .Jump(after_switch.New());
+ }
- BytecodeLabel resume_with_next;
- BytecodeLabel resume_with_throw;
+ iterator_throw_is_undefined.Bind(builder());
+ {
+ RegisterAllocationScope register_scope(this);
+ BytecodeLabels throw_throw_method_missing(zone());
+ Register iterator_return = register_allocator()->NewRegister();
+ // If iterator.throw does not exist, try to use iterator.return to
+ // inform the iterator that it should stop.
+ builder()
+ ->LoadNamedProperty(
+ iterator, ast_string_constants()->return_string(),
+ feedback_index(expr->load_iterator_return_slot()))
+ .StoreAccumulatorInRegister(iterator_return);
+ builder()
+ ->JumpIfUndefined(throw_throw_method_missing.New())
+ .JumpIfNull(throw_throw_method_missing.New())
+ .CallProperty(
+ iterator_return, RegisterList(iterator),
+ feedback_index(expr->call_iterator_return_slot2()))
+ .JumpIfJSReceiver(throw_throw_method_missing.New())
+ .CallRuntime(Runtime::kThrowIteratorResultNotAnObject, output);
+
+ throw_throw_method_missing.Bind(builder());
+ builder()->CallRuntime(Runtime::kThrowThrowMethodMissing);
+ }
+ }
+ after_switch.Bind(builder());
+ }
+
+ // Check that output is an object.
+ BytecodeLabel check_if_done;
+ builder()
+ ->StoreAccumulatorInRegister(output)
+ .JumpIfJSReceiver(&check_if_done)
+ .CallRuntime(Runtime::kThrowIteratorResultNotAnObject, output);
+
+ builder()->Bind(&check_if_done);
+ // Break once output.done is true.
+ builder()->LoadNamedProperty(
+ output, ast_string_constants()->done_string(),
+ feedback_index(expr->load_output_done_slot()));
+
+ loop.BreakIfTrue(ToBooleanMode::kConvertToBoolean);
+
+ // Suspend the current generator.
+ RegisterList registers(0, register_allocator()->next_register_index());
+ BuildGeneratorSuspend(expr, output, registers);
+ builder()->Bind(generator_jump_table_,
+ static_cast<int>(expr->suspend_id()));
+ // Upon resume, we continue here.
+ BuildGeneratorResume(expr, registers);
+ builder()->StoreAccumulatorInRegister(input);
+ builder()
+ ->CallRuntime(Runtime::kInlineGeneratorGetResumeMode,
+ generator_object_)
+ .StoreAccumulatorInRegister(resume_mode);
+
+ loop.BindContinueTarget();
+ loop.JumpToHeader(loop_depth_);
+ }
+ }
+
+ // Decide if we trigger a return or if the yield* expression should just
+ // produce a value.
+ BytecodeLabel completion_is_output_value;
+ Register output_value = register_allocator()->NewRegister();
builder()
- ->LoadLiteral(Smi::FromInt(JSGeneratorObject::kNext))
- .CompareOperation(Token::EQ_STRICT, resume_mode)
- .JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &resume_with_next)
- .LoadLiteral(Smi::FromInt(JSGeneratorObject::kThrow))
+ ->LoadNamedProperty(output, ast_string_constants()->value_string(),
+ feedback_index(expr->load_output_value_slot()))
+ .StoreAccumulatorInRegister(output_value)
+ .LoadLiteral(Smi::FromInt(JSGeneratorObject::kReturn))
.CompareOperation(Token::EQ_STRICT, resume_mode)
- .JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &resume_with_throw);
- // Fall through for resuming with return.
+ .JumpIfFalse(ToBooleanMode::kAlreadyBoolean, &completion_is_output_value)
+ .LoadAccumulatorWithRegister(output_value);
+ execution_control()->ReturnAccumulator();
+
+ builder()->Bind(&completion_is_output_value);
+ builder()->LoadAccumulatorWithRegister(output_value);
+}
+
+void BytecodeGenerator::VisitAwait(Await* expr) {
+ // Rather than HandlerTable::UNCAUGHT, async functions use
+ // HandlerTable::ASYNC_AWAIT to communicate that top-level exceptions are
+ // transformed into promise rejections. This is necessary to prevent emitting
+ // multiple debbug events for the same uncaught exception. There is no point
+ // in the body of an async function where catch prediction is
+ // HandlerTable::UNCAUGHT.
+ DCHECK(catch_prediction() != HandlerTable::UNCAUGHT);
+
+ builder()->SetExpressionPosition(expr);
+ Register operand = VisitForRegisterValue(expr->expression());
+ RegisterList registers(0, operand.index());
+
+ {
+ // Await(operand) and suspend.
+ RegisterAllocationScope register_scope(this);
+
+ int await_builtin_context_index;
+ RegisterList args;
+ if (IsAsyncGeneratorFunction(function_kind())) {
+ await_builtin_context_index =
+ catch_prediction() == HandlerTable::ASYNC_AWAIT
+ ? Context::ASYNC_GENERATOR_AWAIT_UNCAUGHT
+ : Context::ASYNC_GENERATOR_AWAIT_CAUGHT;
+ args = register_allocator()->NewRegisterList(2);
+ builder()
+ ->MoveRegister(generator_object_, args[0])
+ .MoveRegister(operand, args[1]);
+ } else {
+ await_builtin_context_index =
+ catch_prediction() == HandlerTable::ASYNC_AWAIT
+ ? Context::ASYNC_FUNCTION_AWAIT_UNCAUGHT_INDEX
+ : Context::ASYNC_FUNCTION_AWAIT_CAUGHT_INDEX;
+ args = register_allocator()->NewRegisterList(3);
+ builder()
+ ->MoveRegister(generator_object_, args[0])
+ .MoveRegister(operand, args[1]);
+
+ // AsyncFunction Await builtins require a 3rd parameter to hold the outer
+ // promise.
+ Variable* var_promise = closure_scope()->promise_var();
+ BuildVariableLoadForAccumulatorValue(var_promise, FeedbackSlot::Invalid(),
+ HoleCheckMode::kElided);
+ builder()->StoreAccumulatorInRegister(args[2]);
+ }
- if (expr->is_async_generator()) {
- // Async generator methods will produce the iter result object.
- builder()->LoadAccumulatorWithRegister(input);
- execution_control()->AsyncReturnAccumulator();
- } else {
- RegisterList args = register_allocator()->NewRegisterList(2);
builder()
- ->MoveRegister(input, args[0])
- .LoadTrue()
- .StoreAccumulatorInRegister(args[1])
- .CallRuntime(Runtime::kInlineCreateIterResultObject, args);
- execution_control()->ReturnAccumulator();
+ ->CallJSRuntime(await_builtin_context_index, args)
+ .StoreAccumulatorInRegister(operand);
+
+ BuildGeneratorSuspend(expr, operand, registers);
}
- builder()->Bind(&resume_with_throw);
+ builder()->Bind(generator_jump_table_, static_cast<int>(expr->suspend_id()));
+
+ // Upon resume, we continue here, with received value in accumulator.
+ BuildGeneratorResume(expr, registers);
+
+ Register input = register_allocator()->NewRegister();
+ Register resume_mode = register_allocator()->NewRegister();
+
+ // Now dispatch on resume mode.
+ BytecodeLabel resume_next;
+ builder()
+ ->StoreAccumulatorInRegister(input)
+ .CallRuntime(Runtime::kInlineGeneratorGetResumeMode, generator_object_)
+ .StoreAccumulatorInRegister(resume_mode)
+ .LoadLiteral(Smi::FromInt(JSGeneratorObject::kNext))
+ .CompareOperation(Token::EQ_STRICT, resume_mode)
+ .JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &resume_next);
+
+ // Resume with "throw" completion (rethrow the received value).
+ // TODO(leszeks): Add a debug-only check that the accumulator is
+ // JSGeneratorObject::kThrow.
builder()->SetExpressionPosition(expr);
- builder()->LoadAccumulatorWithRegister(input);
- if (expr->rethrow_on_exception()) {
- builder()->ReThrow();
- } else {
- builder()->Throw();
- }
+ builder()->LoadAccumulatorWithRegister(input).ReThrow();
- builder()->Bind(&resume_with_next);
+ // Resume with next.
+ builder()->Bind(&resume_next);
builder()->LoadAccumulatorWithRegister(input);
}
-void BytecodeGenerator::VisitSuspend(Suspend* expr) {
- Register generator = VisitForRegisterValue(expr->generator_object());
- BuildGeneratorSuspend(expr, generator);
- builder()->Bind(generator_jump_table_, static_cast<int>(expr->suspend_id()));
- // Upon resume, we continue here.
- BuildGeneratorResume(expr, generator);
-}
-
void BytecodeGenerator::VisitThrow(Throw* expr) {
+ AllocateBlockCoverageSlotIfEnabled(expr, SourceRangeKind::kContinuation);
VisitForAccumulatorValue(expr->exception());
builder()->SetExpressionPosition(expr);
builder()->Throw();
}
-void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* expr) {
- LhsKind property_kind = Property::GetAssignType(expr);
- FeedbackSlot slot = expr->PropertyFeedbackSlot();
- builder()->SetExpressionPosition(expr);
+void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* property) {
+ LhsKind property_kind = Property::GetAssignType(property);
+ FeedbackSlot slot = property->PropertyFeedbackSlot();
switch (property_kind) {
case VARIABLE:
UNREACHABLE();
case NAMED_PROPERTY: {
+ builder()->SetExpressionPosition(property);
builder()->LoadNamedProperty(
- obj, expr->key()->AsLiteral()->AsRawPropertyName(),
+ obj, property->key()->AsLiteral()->AsRawPropertyName(),
feedback_index(slot));
break;
}
case KEYED_PROPERTY: {
- VisitForAccumulatorValue(expr->key());
+ VisitForAccumulatorValue(property->key());
+ builder()->SetExpressionPosition(property);
builder()->LoadKeyedProperty(obj, feedback_index(slot));
break;
}
case NAMED_SUPER_PROPERTY:
- VisitNamedSuperPropertyLoad(expr, Register::invalid_value());
+ VisitNamedSuperPropertyLoad(property, Register::invalid_value());
break;
case KEYED_SUPER_PROPERTY:
- VisitKeyedSuperPropertyLoad(expr, Register::invalid_value());
+ VisitKeyedSuperPropertyLoad(property, Register::invalid_value());
break;
}
}
@@ -2613,6 +3116,8 @@ void BytecodeGenerator::VisitNamedSuperPropertyLoad(Property* property,
RegisterList args = register_allocator()->NewRegisterList(3);
VisitForRegisterValue(super_property->this_var(), args[0]);
VisitForRegisterValue(super_property->home_object(), args[1]);
+
+ builder()->SetExpressionPosition(property);
builder()
->LoadLiteral(property->key()->AsLiteral()->AsRawPropertyName())
.StoreAccumulatorInRegister(args[2])
@@ -2632,6 +3137,8 @@ void BytecodeGenerator::VisitKeyedSuperPropertyLoad(Property* property,
VisitForRegisterValue(super_property->this_var(), args[0]);
VisitForRegisterValue(super_property->home_object(), args[1]);
VisitForRegisterValue(property->key(), args[2]);
+
+ builder()->SetExpressionPosition(property);
builder()->CallRuntime(Runtime::kLoadKeyedFromSuper, args);
if (opt_receiver_out.is_valid()) {
@@ -2674,7 +3181,6 @@ void BytecodeGenerator::VisitCall(Call* expr) {
RegisterList args = register_allocator()->NewGrowableRegisterList();
bool implicit_undefined_receiver = false;
- bool is_tail_call = (expr->tail_call_mode() == TailCallMode::kAllow);
// When a call contains a spread, a Call AST node is only created if there is
// exactly one spread, and it is the last argument.
bool is_spread_call = expr->only_last_arg_is_spread();
@@ -2695,7 +3201,7 @@ void BytecodeGenerator::VisitCall(Call* expr) {
}
case Call::GLOBAL_CALL: {
// Receiver is undefined for global calls.
- if (!is_tail_call && !is_spread_call) {
+ if (!is_spread_call) {
implicit_undefined_receiver = true;
} else {
// TODO(leszeks): There's no special bytecode for tail calls or spread
@@ -2733,7 +3239,7 @@ void BytecodeGenerator::VisitCall(Call* expr) {
}
case Call::OTHER_CALL: {
// Receiver is undefined for other calls.
- if (!is_tail_call && !is_spread_call) {
+ if (!is_spread_call) {
implicit_undefined_receiver = true;
} else {
// TODO(leszeks): There's no special bytecode for tail calls or spread
@@ -2800,12 +3306,8 @@ void BytecodeGenerator::VisitCall(Call* expr) {
int const feedback_slot_index = feedback_index(expr->CallFeedbackICSlot());
if (is_spread_call) {
- DCHECK(!is_tail_call);
DCHECK(!implicit_undefined_receiver);
builder()->CallWithSpread(callee, args);
- } else if (is_tail_call) {
- DCHECK(!implicit_undefined_receiver);
- builder()->TailCall(callee, args, feedback_slot_index);
} else if (call_type == Call::NAMED_PROPERTY_CALL ||
call_type == Call::KEYED_PROPERTY_CALL) {
DCHECK(!implicit_undefined_receiver);
@@ -2916,19 +3418,18 @@ void BytecodeGenerator::VisitNot(UnaryOperation* expr) {
if (execution_result()->IsEffect()) {
VisitForEffect(expr->expression());
} else if (execution_result()->IsTest()) {
+ // No actual logical negation happening, we just swap the control flow, by
+ // swapping the target labels and the fallthrough branch, and visit in the
+ // same test result context.
TestResultScope* test_result = execution_result()->AsTest();
- // No actual logical negation happening, we just swap the control flow by
- // swapping the target labels and the fallthrough branch.
- VisitForTest(expr->expression(), test_result->else_labels(),
- test_result->then_labels(),
- test_result->inverted_fallthrough());
- test_result->SetResultConsumedByTest();
+ test_result->InvertControlFlow();
+ VisitInSameTestExecutionScope(expr->expression());
} else {
TypeHint type_hint = VisitForAccumulatorValue(expr->expression());
builder()->LogicalNot(ToBooleanModeFromTypeHint(type_hint));
+ // Always returns a boolean value.
+ execution_result()->set_type_hint(TypeHint::kBoolean);
}
- // Always returns a boolean value.
- execution_result()->SetResultIsBoolean();
}
void BytecodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
@@ -3090,7 +3591,7 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
// TODO(ignition): Think about adding proper PostInc/PostDec bytecodes
// instead of this ToNumber + Inc/Dec dance.
builder()
- ->ConvertAccumulatorToNumber(old_value, feedback_index(count_slot))
+ ->ToNumber(old_value, feedback_index(count_slot))
.LoadAccumulatorWithRegister(old_value);
}
@@ -3210,25 +3711,92 @@ void BytecodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
}
// Always returns a boolean value.
- execution_result()->SetResultIsBoolean();
+ execution_result()->set_type_hint(TypeHint::kBoolean);
+}
+
+void BytecodeGenerator::BuildAddExpression(BinaryOperation* expr,
+ RegisterList* operand_registers) {
+ int initial_operand_length = operand_registers->register_count();
+ USE(initial_operand_length);
+ Register lhs, rhs;
+ TypeHint lhs_hint = VisitForAddOperand(expr->left(), operand_registers, &lhs);
+ TypeHint rhs_hint =
+ VisitForAddOperand(expr->right(), operand_registers, &rhs);
+ DCHECK_GE(operand_registers->register_count(), initial_operand_length + 2);
+ builder()->SetExpressionPosition(expr);
+
+ bool is_chained_addition = execution_result()->IsAddition() ||
+ operand_registers->register_count() > 2;
+ // Use string concatenation if one of the sides is a string, and we have more
+ // than two additions chained together.
+ if (FLAG_ignition_string_concat && is_chained_addition &&
+ (lhs_hint == TypeHint::kString || rhs_hint == TypeHint::kString)) {
+ // One of the sides is a string, perform to primitive, then to string on the
+ // other operand and perform a single StringConcat operation once the
+ // addition chain is complete.
+ if (lhs_hint != TypeHint::kString) {
+ builder()->LoadAccumulatorWithRegister(lhs).ToPrimitiveToString(
+ lhs, feedback_index(expr->BinaryOperationFeedbackSlot()));
+ } else if (rhs_hint != TypeHint::kString) {
+ builder()->LoadAccumulatorWithRegister(rhs).ToPrimitiveToString(
+ rhs, feedback_index(expr->BinaryOperationFeedbackSlot()));
+ }
+ if (execution_result()->IsAddition()) {
+ execution_result()->AsAddition()->SetResultDeferredUntilConcat();
+ }
+ execution_result()->set_type_hint(TypeHint::kString);
+ } else {
+ // Otherwise just remove the operands from the operand register list and
+ // perform a normal addition.
+ builder()->LoadAccumulatorWithRegister(rhs);
+ Register popped_rhs =
+ register_allocator()->ShrinkRegisterList(operand_registers);
+ CHECK_EQ(rhs.index(), popped_rhs.index());
+
+ builder()->BinaryOperation(
+ expr->op(), lhs, feedback_index(expr->BinaryOperationFeedbackSlot()));
+ Register popped_lhs =
+ register_allocator()->ShrinkRegisterList(operand_registers);
+ CHECK_EQ(lhs.index(), popped_lhs.index());
+ DCHECK_EQ(initial_operand_length, operand_registers->register_count());
+ }
}
void BytecodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
// TODO(rmcilroy): Special case "x * 1.0" and "x * -1" which are generated for
// +x and -x by the parser.
- FeedbackSlot slot = expr->BinaryOperationFeedbackSlot();
Expression* subexpr;
Smi* literal;
if (expr->IsSmiLiteralOperation(&subexpr, &literal)) {
VisitForAccumulatorValue(subexpr);
builder()->SetExpressionPosition(expr);
- builder()->BinaryOperationSmiLiteral(expr->op(), literal,
- feedback_index(slot));
+ builder()->BinaryOperationSmiLiteral(
+ expr->op(), literal,
+ feedback_index(expr->BinaryOperationFeedbackSlot()));
+ } else if (expr->op() == Token::ADD) {
+ // Special case addition to enable folding of string concatenations.
+ if (execution_result()->IsAddition()) {
+ BuildAddExpression(expr,
+ execution_result()->AsAddition()->operand_registers());
+ } else {
+ RegisterList operand_registers =
+ register_allocator()->NewGrowableRegisterList();
+ BuildAddExpression(expr, &operand_registers);
+ // If there are any registers in operand_registers then we need to
+ // StringConcat them together.
+ if (operand_registers.register_count() != 0) {
+ DCHECK(FLAG_ignition_string_concat);
+ // There must be more than 2 operands to the concatenation.
+ DCHECK_GT(operand_registers.register_count(), 2);
+ builder()->StringConcat(operand_registers);
+ }
+ }
} else {
Register lhs = VisitForRegisterValue(expr->left());
VisitForAccumulatorValue(expr->right());
builder()->SetExpressionPosition(expr);
- builder()->BinaryOperation(expr->op(), lhs, feedback_index(slot));
+ builder()->BinaryOperation(
+ expr->op(), lhs, feedback_index(expr->BinaryOperationFeedbackSlot()));
}
}
@@ -3246,20 +3814,19 @@ void BytecodeGenerator::VisitImportCallExpression(ImportCallExpression* expr) {
.CallRuntime(Runtime::kDynamicImportCall, args);
}
-void BytecodeGenerator::VisitGetIterator(GetIterator* expr) {
- FeedbackSlot load_slot = expr->IteratorPropertyFeedbackSlot();
- FeedbackSlot call_slot = expr->IteratorCallFeedbackSlot();
-
+void BytecodeGenerator::BuildGetIterator(Expression* iterable,
+ IteratorType hint,
+ FeedbackSlot load_slot,
+ FeedbackSlot call_slot,
+ FeedbackSlot async_load_slot,
+ FeedbackSlot async_call_slot) {
RegisterList args = register_allocator()->NewRegisterList(1);
Register method = register_allocator()->NewRegister();
Register obj = args[0];
- VisitForAccumulatorValue(expr->iterable());
-
- if (expr->hint() == IteratorType::kAsync) {
- FeedbackSlot async_load_slot = expr->AsyncIteratorPropertyFeedbackSlot();
- FeedbackSlot async_call_slot = expr->AsyncIteratorCallFeedbackSlot();
+ VisitForAccumulatorValue(iterable);
+ if (hint == IteratorType::kAsync) {
// Set method to GetMethod(obj, @@asyncIterator)
builder()->StoreAccumulatorInRegister(obj).LoadAsyncIteratorProperty(
obj, feedback_index(async_load_slot));
@@ -3313,6 +3880,15 @@ void BytecodeGenerator::VisitGetIterator(GetIterator* expr) {
}
}
+void BytecodeGenerator::VisitGetIterator(GetIterator* expr) {
+ builder()->SetExpressionPosition(expr);
+ BuildGetIterator(expr->iterable(), expr->hint(),
+ expr->IteratorPropertyFeedbackSlot(),
+ expr->IteratorCallFeedbackSlot(),
+ expr->AsyncIteratorPropertyFeedbackSlot(),
+ expr->AsyncIteratorCallFeedbackSlot());
+}
+
void BytecodeGenerator::VisitThisFunction(ThisFunction* expr) {
builder()->LoadAccumulatorWithRegister(Register::function_closure());
}
@@ -3332,24 +3908,43 @@ void BytecodeGenerator::VisitCommaExpression(BinaryOperation* binop) {
Visit(binop->right());
}
+void BytecodeGenerator::BuildLogicalTest(Token::Value token, Expression* left,
+ Expression* right) {
+ DCHECK(token == Token::OR || token == Token::AND);
+ TestResultScope* test_result = execution_result()->AsTest();
+ BytecodeLabels* then_labels = test_result->then_labels();
+ BytecodeLabels* else_labels = test_result->else_labels();
+ TestFallthrough fallthrough = test_result->fallthrough();
+ {
+ // Visit the left side using current TestResultScope.
+ BytecodeLabels test_right(zone());
+ if (token == Token::OR) {
+ test_result->set_fallthrough(TestFallthrough::kElse);
+ test_result->set_else_labels(&test_right);
+ } else {
+ DCHECK_EQ(Token::AND, token);
+ test_result->set_fallthrough(TestFallthrough::kThen);
+ test_result->set_then_labels(&test_right);
+ }
+ VisitInSameTestExecutionScope(left);
+ test_right.Bind(builder());
+ }
+ // Visit the right side in a new TestResultScope.
+ VisitForTest(right, then_labels, else_labels, fallthrough);
+}
+
void BytecodeGenerator::VisitLogicalOrExpression(BinaryOperation* binop) {
Expression* left = binop->left();
Expression* right = binop->right();
if (execution_result()->IsTest()) {
TestResultScope* test_result = execution_result()->AsTest();
-
if (left->ToBooleanIsTrue()) {
builder()->Jump(test_result->NewThenLabel());
} else if (left->ToBooleanIsFalse() && right->ToBooleanIsFalse()) {
builder()->Jump(test_result->NewElseLabel());
} else {
- BytecodeLabels test_right(zone());
- VisitForTest(left, test_result->then_labels(), &test_right,
- TestFallthrough::kElse);
- test_right.Bind(builder());
- VisitForTest(right, test_result->then_labels(),
- test_result->else_labels(), test_result->fallthrough());
+ BuildLogicalTest(Token::OR, left, right);
}
test_result->SetResultConsumedByTest();
} else {
@@ -3373,18 +3968,12 @@ void BytecodeGenerator::VisitLogicalAndExpression(BinaryOperation* binop) {
if (execution_result()->IsTest()) {
TestResultScope* test_result = execution_result()->AsTest();
-
if (left->ToBooleanIsFalse()) {
builder()->Jump(test_result->NewElseLabel());
} else if (left->ToBooleanIsTrue() && right->ToBooleanIsTrue()) {
builder()->Jump(test_result->NewThenLabel());
} else {
- BytecodeLabels test_right(zone());
- VisitForTest(left, &test_right, test_result->else_labels(),
- TestFallthrough::kThen);
- test_right.Bind(builder());
- VisitForTest(right, test_result->then_labels(),
- test_result->else_labels(), test_result->fallthrough());
+ BuildLogicalTest(Token::AND, left, right);
}
test_result->SetResultConsumedByTest();
} else {
@@ -3497,7 +4086,7 @@ void BytecodeGenerator::BuildNewLocalWithContext(Scope* scope) {
Register extension_object = register_allocator()->NewRegister();
- builder()->ConvertAccumulatorToObject(extension_object);
+ builder()->ToObject(extension_object);
VisitFunctionClosureForContext();
builder()->CreateWithContext(extension_object, scope);
}
@@ -3596,7 +4185,8 @@ void BytecodeGenerator::BuildGeneratorObjectVariableInitialization() {
builder()
->MoveRegister(Register::function_closure(), args[0])
.MoveRegister(builder()->Receiver(), args[1])
- .CallRuntime(Runtime::kInlineCreateJSGeneratorObject, args);
+ .CallRuntime(Runtime::kInlineCreateJSGeneratorObject, args)
+ .StoreAccumulatorInRegister(generator_object_);
BuildVariableAssignment(closure_scope()->generator_object_var(), Token::INIT,
FeedbackSlot::Invalid(), HoleCheckMode::kElided);
}
@@ -3628,6 +4218,42 @@ void BytecodeGenerator::VisitFunctionClosureForContext() {
}
}
+void BytecodeGenerator::BuildPushUndefinedIntoRegisterList(
+ RegisterList* reg_list) {
+ Register reg = register_allocator()->GrowRegisterList(reg_list);
+ builder()->LoadUndefined().StoreAccumulatorInRegister(reg);
+}
+
+void BytecodeGenerator::BuildLoadPropertyKey(LiteralProperty* property,
+ Register out_reg) {
+ if (property->key()->IsStringLiteral()) {
+ VisitForRegisterValue(property->key(), out_reg);
+ } else {
+ VisitForAccumulatorValue(property->key());
+ builder()->ToName(out_reg);
+ }
+}
+
+int BytecodeGenerator::AllocateBlockCoverageSlotIfEnabled(
+ AstNode* node, SourceRangeKind kind) {
+ return (block_coverage_builder_ == nullptr)
+ ? BlockCoverageBuilder::kNoCoverageArraySlot
+ : block_coverage_builder_->AllocateBlockCoverageSlot(node, kind);
+}
+
+void BytecodeGenerator::BuildIncrementBlockCoverageCounterIfEnabled(
+ AstNode* node, SourceRangeKind kind) {
+ if (block_coverage_builder_ == nullptr) return;
+ block_coverage_builder_->IncrementBlockCounter(node, kind);
+}
+
+void BytecodeGenerator::BuildIncrementBlockCoverageCounterIfEnabled(
+ int coverage_array_slot) {
+ if (block_coverage_builder_ != nullptr) {
+ block_coverage_builder_->IncrementBlockCounter(coverage_array_slot);
+ }
+}
+
// Visits the expression |expr| and places the result in the accumulator.
BytecodeGenerator::TypeHint BytecodeGenerator::VisitForAccumulatorValue(
Expression* expr) {
@@ -3668,6 +4294,25 @@ void BytecodeGenerator::VisitForRegisterValue(Expression* expr,
builder()->StoreAccumulatorInRegister(destination);
}
+// Visits the expression |expr| as an addition operand value and places the
+// result in the accumulator.
+BytecodeGenerator::TypeHint BytecodeGenerator::VisitForAddOperand(
+ Expression* expr, RegisterList* operand_registers, Register* out_register) {
+ TypeHint type_hint;
+ bool result_deferred_until_concat;
+ {
+ AdditionResultScope add_scope(this, operand_registers);
+ Visit(expr);
+ type_hint = add_scope.type_hint();
+ result_deferred_until_concat = add_scope.result_deferred_until_concat();
+ }
+ if (!result_deferred_until_concat) {
+ *out_register = register_allocator()->GrowRegisterList(operand_registers);
+ builder()->StoreAccumulatorInRegister(*out_register);
+ }
+ return type_hint;
+}
+
// Visits the expression |expr| and pushes the result into a new register
// added to the end of |reg_list|.
void BytecodeGenerator::VisitAndPushIntoRegisterList(Expression* expr,
@@ -3684,19 +4329,20 @@ void BytecodeGenerator::VisitAndPushIntoRegisterList(Expression* expr,
builder()->StoreAccumulatorInRegister(destination);
}
-void BytecodeGenerator::BuildPushUndefinedIntoRegisterList(
- RegisterList* reg_list) {
- Register reg = register_allocator()->GrowRegisterList(reg_list);
- builder()->LoadUndefined().StoreAccumulatorInRegister(reg);
-}
-
-void BytecodeGenerator::BuildLoadPropertyKey(LiteralProperty* property,
- Register out_reg) {
- if (property->key()->IsStringLiteral()) {
- VisitForRegisterValue(property->key(), out_reg);
- } else {
- VisitForAccumulatorValue(property->key());
- builder()->ConvertAccumulatorToName(out_reg);
+void BytecodeGenerator::BuildTest(ToBooleanMode mode,
+ BytecodeLabels* then_labels,
+ BytecodeLabels* else_labels,
+ TestFallthrough fallthrough) {
+ switch (fallthrough) {
+ case TestFallthrough::kThen:
+ builder()->JumpIfFalse(mode, else_labels->New());
+ break;
+ case TestFallthrough::kElse:
+ builder()->JumpIfTrue(mode, then_labels->New());
+ break;
+ case TestFallthrough::kNone:
+ builder()->JumpIfTrue(mode, then_labels->New());
+ builder()->Jump(else_labels->New());
}
}
@@ -3716,20 +4362,30 @@ void BytecodeGenerator::VisitForTest(Expression* expr,
Visit(expr);
result_consumed = test_result.result_consumed_by_test();
type_hint = test_result.type_hint();
+ // Labels and fallthrough might have been mutated, so update based on
+ // TestResultScope.
+ then_labels = test_result.then_labels();
+ else_labels = test_result.else_labels();
+ fallthrough = test_result.fallthrough();
}
if (!result_consumed) {
- ToBooleanMode mode(ToBooleanModeFromTypeHint(type_hint));
- switch (fallthrough) {
- case TestFallthrough::kThen:
- builder()->JumpIfFalse(mode, else_labels->New());
- break;
- case TestFallthrough::kElse:
- builder()->JumpIfTrue(mode, then_labels->New());
- break;
- case TestFallthrough::kNone:
- builder()->JumpIfTrue(mode, then_labels->New());
- builder()->Jump(else_labels->New());
- }
+ BuildTest(ToBooleanModeFromTypeHint(type_hint), then_labels, else_labels,
+ fallthrough);
+ }
+}
+
+void BytecodeGenerator::VisitInSameTestExecutionScope(Expression* expr) {
+ DCHECK(execution_result()->IsTest());
+ {
+ RegisterAllocationScope reg_scope(this);
+ Visit(expr);
+ }
+ if (!execution_result()->AsTest()->result_consumed_by_test()) {
+ TestResultScope* result_scope = execution_result()->AsTest();
+ BuildTest(ToBooleanModeFromTypeHint(result_scope->type_hint()),
+ result_scope->then_labels(), result_scope->else_labels(),
+ result_scope->fallthrough());
+ result_scope->SetResultConsumedByTest();
}
}
@@ -3740,10 +4396,8 @@ void BytecodeGenerator::VisitInScope(Statement* stmt, Scope* scope) {
Visit(stmt);
}
-BytecodeArrayBuilder::ToBooleanMode
-BytecodeGenerator::ToBooleanModeFromTypeHint(TypeHint type_hint) {
- return type_hint == TypeHint::kBoolean ? ToBooleanMode::kAlreadyBoolean
- : ToBooleanMode::kConvertToBoolean;
+FunctionKind BytecodeGenerator::function_kind() const {
+ return info()->literal()->kind();
}
LanguageMode BytecodeGenerator::language_mode() const {
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index 6e277e3799..85ceb548ee 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -14,13 +14,16 @@
namespace v8 {
namespace internal {
+class AstNodeSourceRanges;
class AstStringConstants;
class CompilationInfo;
+enum class SourceRangeKind;
namespace interpreter {
class GlobalDeclarationsBuilder;
class LoopBuilder;
+class BlockCoverageBuilder;
class BytecodeJumpTable;
class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
@@ -39,6 +42,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void VisitStatements(ZoneList<Statement*>* statments);
private:
+ class AdditionResultScope;
class ContextScope;
class ControlScope;
class ControlScopeForBreakable;
@@ -57,7 +61,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
using ToBooleanMode = BytecodeArrayBuilder::ToBooleanMode;
enum class TestFallthrough { kThen, kElse, kNone };
- enum class TypeHint { kAny, kBoolean };
+ enum class TypeHint { kAny, kString, kBoolean };
void GenerateBytecodeBody();
void AllocateDeferredConstants(Isolate* isolate);
@@ -110,18 +114,18 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void BuildVariableLoadForAccumulatorValue(
Variable* variable, FeedbackSlot slot, HoleCheckMode hole_check_mode,
TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
- void BuildVariableAssignment(Variable* variable, Token::Value op,
- FeedbackSlot slot,
- HoleCheckMode hole_check_mode);
+ void BuildVariableAssignment(
+ Variable* variable, Token::Value op, FeedbackSlot slot,
+ HoleCheckMode hole_check_mode,
+ LookupHoistingMode lookup_hoisting_mode = LookupHoistingMode::kNormal);
void BuildLiteralCompareNil(Token::Value compare_op, NilValue nil);
- void BuildReturn();
- void BuildAsyncReturn();
+ void BuildReturn(int source_position = kNoSourcePosition);
+ void BuildAsyncReturn(int source_position = kNoSourcePosition);
void BuildAsyncGeneratorReturn();
void BuildReThrow();
void BuildAbort(BailoutReason bailout_reason);
- void BuildThrowIfHole(Variable* variable);
- void BuildThrowReferenceError(const AstRawString* name);
void BuildHoleCheckForVariableAssignment(Variable* variable, Token::Value op);
+ void BuildThrowIfHole(Variable* variable);
// Build jump to targets[value], where
// start_index <= value < start_index + size.
@@ -135,8 +139,14 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void BuildNewLocalWithContext(Scope* scope);
void BuildGeneratorPrologue();
- void BuildGeneratorSuspend(Suspend* expr, Register generator);
- void BuildGeneratorResume(Suspend* expr, Register generator);
+ void BuildGeneratorSuspend(Suspend* expr, Register value,
+ RegisterList registers_to_save);
+ void BuildGeneratorResume(Suspend* expr, RegisterList registers_to_restore);
+ void BuildAbruptResume(Suspend* expr);
+ void BuildGetIterator(Expression* iterable, IteratorType hint,
+ FeedbackSlot load_slot, FeedbackSlot call_slot,
+ FeedbackSlot async_load_slot,
+ FeedbackSlot async_call_slot);
void VisitArgumentsObject(Variable* variable);
void VisitRestArgumentsArray(Variable* rest);
@@ -158,9 +168,23 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void VisitForInAssignment(Expression* expr, FeedbackSlot slot);
void VisitModuleNamespaceImports();
+ // Builds a logical OR/AND within a test context by rewiring the jumps based
+ // on the expression values.
+ void BuildLogicalTest(Token::Value token, Expression* left,
+ Expression* right);
+
+ // Builds an addition expression. If the result is a known string addition,
+ // then rather than emitting the add, the operands will converted to
+ // primitive, then to string and stored in registers in the
+ // |operand_registers| list for later concatenation.
+ void BuildAddExpression(BinaryOperation* expr,
+ RegisterList* operand_registers);
+
// Visit the header/body of a loop iteration.
void VisitIterationHeader(IterationStatement* stmt,
LoopBuilder* loop_builder);
+ void VisitIterationHeader(int first_suspend_id, int suspend_count,
+ LoopBuilder* loop_builder);
void VisitIterationBody(IterationStatement* stmt, LoopBuilder* loop_builder);
// Visit a statement and switch scopes, the context is in the accumulator.
@@ -170,24 +194,39 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void BuildLoadPropertyKey(LiteralProperty* property, Register out_reg);
+ int AllocateBlockCoverageSlotIfEnabled(AstNode* node, SourceRangeKind kind);
+ void BuildIncrementBlockCoverageCounterIfEnabled(AstNode* node,
+ SourceRangeKind kind);
+ void BuildIncrementBlockCoverageCounterIfEnabled(int coverage_array_slot);
+
+ void BuildTest(ToBooleanMode mode, BytecodeLabels* then_labels,
+ BytecodeLabels* else_labels, TestFallthrough fallthrough);
+
// Visitors for obtaining expression result in the accumulator, in a
// register, or just getting the effect. Some visitors return a TypeHint which
// specifies the type of the result of the visited expression.
TypeHint VisitForAccumulatorValue(Expression* expr);
void VisitForAccumulatorValueOrTheHole(Expression* expr);
MUST_USE_RESULT Register VisitForRegisterValue(Expression* expr);
- void VisitForRegisterValue(Expression* expr, Register destination);
+ INLINE(void VisitForRegisterValue(Expression* expr, Register destination));
void VisitAndPushIntoRegisterList(Expression* expr, RegisterList* reg_list);
void VisitForEffect(Expression* expr);
void VisitForTest(Expression* expr, BytecodeLabels* then_labels,
BytecodeLabels* else_labels, TestFallthrough fallthrough);
+ INLINE(TypeHint VisitForAddOperand(Expression* expr,
+ RegisterList* operand_registers,
+ Register* out_register));
+ void VisitInSameTestExecutionScope(Expression* expr);
// Returns the runtime function id for a store to super for the function's
// language mode.
inline Runtime::FunctionId StoreToSuperRuntimeId();
inline Runtime::FunctionId StoreKeyedToSuperRuntimeId();
- ToBooleanMode ToBooleanModeFromTypeHint(TypeHint type_hint);
+ static constexpr ToBooleanMode ToBooleanModeFromTypeHint(TypeHint type_hint) {
+ return type_hint == TypeHint::kBoolean ? ToBooleanMode::kAlreadyBoolean
+ : ToBooleanMode::kConvertToBoolean;
+ }
inline BytecodeArrayBuilder* builder() const { return builder_; }
inline Zone* zone() const { return zone_; }
@@ -221,8 +260,16 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
return globals_builder_;
}
inline LanguageMode language_mode() const;
+ inline FunctionKind function_kind() const;
int feedback_index(FeedbackSlot slot) const;
+ inline HandlerTable::CatchPrediction catch_prediction() const {
+ return catch_prediction_;
+ }
+ inline void set_catch_prediction(HandlerTable::CatchPrediction value) {
+ catch_prediction_ = value;
+ }
+
Zone* zone_;
BytecodeArrayBuilder* builder_;
CompilationInfo* info_;
@@ -231,6 +278,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
Scope* current_scope_;
GlobalDeclarationsBuilder* globals_builder_;
+ BlockCoverageBuilder* block_coverage_builder_;
ZoneVector<GlobalDeclarationsBuilder*> global_declarations_;
ZoneVector<std::pair<FunctionLiteral*, size_t>> function_literals_;
ZoneVector<std::pair<NativeFunctionLiteral*, size_t>>
@@ -243,8 +291,11 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
ExpressionResultScope* execution_result_;
BytecodeJumpTable* generator_jump_table_;
+ Register generator_object_;
Register generator_state_;
int loop_depth_;
+
+ HandlerTable::CatchPrediction catch_prediction_;
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-label.cc b/deps/v8/src/interpreter/bytecode-label.cc
index ef32bdd104..da607a2927 100644
--- a/deps/v8/src/interpreter/bytecode-label.cc
+++ b/deps/v8/src/interpreter/bytecode-label.cc
@@ -13,7 +13,7 @@ namespace interpreter {
BytecodeLabel* BytecodeLabels::New() {
DCHECK(!is_bound());
- labels_.push_back(BytecodeLabel());
+ labels_.emplace_back(BytecodeLabel());
return &labels_.back();
}
diff --git a/deps/v8/src/interpreter/bytecode-label.h b/deps/v8/src/interpreter/bytecode-label.h
index 4ef6265eb2..ef031efa1c 100644
--- a/deps/v8/src/interpreter/bytecode-label.h
+++ b/deps/v8/src/interpreter/bytecode-label.h
@@ -65,7 +65,7 @@ class V8_EXPORT_PRIVATE BytecodeLabels {
void BindToLabel(BytecodeArrayBuilder* builder, const BytecodeLabel& target);
bool is_bound() const {
- bool is_bound = !labels_.empty() && labels_.at(0).is_bound();
+ bool is_bound = !labels_.empty() && labels_.front().is_bound();
DCHECK(!is_bound ||
std::all_of(labels_.begin(), labels_.end(),
[](const BytecodeLabel& l) { return l.is_bound(); }));
@@ -75,7 +75,7 @@ class V8_EXPORT_PRIVATE BytecodeLabels {
bool empty() const { return labels_.empty(); }
private:
- ZoneVector<BytecodeLabel> labels_;
+ ZoneLinkedList<BytecodeLabel> labels_;
DISALLOW_COPY_AND_ASSIGN(BytecodeLabels);
};
diff --git a/deps/v8/src/interpreter/bytecode-operands.cc b/deps/v8/src/interpreter/bytecode-operands.cc
index 6be81fe62e..5ebf66be38 100644
--- a/deps/v8/src/interpreter/bytecode-operands.cc
+++ b/deps/v8/src/interpreter/bytecode-operands.cc
@@ -24,7 +24,6 @@ const char* AccumulatorUseToString(AccumulatorUse accumulator_use) {
return "ReadWrite";
}
UNREACHABLE();
- return "";
}
const char* OperandTypeToString(OperandType operand_type) {
@@ -36,7 +35,6 @@ const char* OperandTypeToString(OperandType operand_type) {
#undef CASE
}
UNREACHABLE();
- return "";
}
const char* OperandScaleToString(OperandScale operand_scale) {
@@ -48,7 +46,6 @@ const char* OperandScaleToString(OperandScale operand_scale) {
#undef CASE
}
UNREACHABLE();
- return "";
}
const char* OperandSizeToString(OperandSize operand_size) {
@@ -63,7 +60,6 @@ const char* OperandSizeToString(OperandSize operand_size) {
return "Quad";
}
UNREACHABLE();
- return "";
}
} // namespace
diff --git a/deps/v8/src/interpreter/bytecode-operands.h b/deps/v8/src/interpreter/bytecode-operands.h
index f649d93a08..7ffda897a5 100644
--- a/deps/v8/src/interpreter/bytecode-operands.h
+++ b/deps/v8/src/interpreter/bytecode-operands.h
@@ -20,6 +20,7 @@ namespace interpreter {
#define REGISTER_OUTPUT_OPERAND_TYPE_LIST(V) \
V(RegOut, OperandTypeInfo::kScalableSignedByte) \
+ V(RegOutList, OperandTypeInfo::kScalableSignedByte) \
V(RegOutPair, OperandTypeInfo::kScalableSignedByte) \
V(RegOutTriple, OperandTypeInfo::kScalableSignedByte)
@@ -129,7 +130,8 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
const OperandScale& operand_scale);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
const OperandSize& operand_size);
-std::ostream& operator<<(std::ostream& os, const OperandType& operand_type);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ const OperandType& operand_type);
class BytecodeOperands {
public:
diff --git a/deps/v8/src/interpreter/bytecode-register-allocator.h b/deps/v8/src/interpreter/bytecode-register-allocator.h
index 72e0133f43..dcd343fce3 100644
--- a/deps/v8/src/interpreter/bytecode-register-allocator.h
+++ b/deps/v8/src/interpreter/bytecode-register-allocator.h
@@ -73,13 +73,29 @@ class BytecodeRegisterAllocator final {
return reg;
}
+ // Releases the last register in |reg_list|, decreasing it's count by one and
+ // returning the register released.
+ //
+ // Note: no other new registers must be currently allocated since the register
+ // list was originally allocated or grown.
+ Register ShrinkRegisterList(RegisterList* reg_list) {
+ // If the following CHECK fails then a register was allocated (and not
+ // freed) between the creation of the RegisterList and this call to release
+ // the Register.
+ Register last_reg = reg_list->last_register();
+ CHECK_EQ(last_reg.index(), next_register_index_ - 1);
+ reg_list->DecrementRegisterCount();
+ ReleaseRegisters(next_register_index_ - 1);
+ return last_reg;
+ }
+
// Release all registers above |register_index|.
void ReleaseRegisters(int register_index) {
+ int count = next_register_index_ - register_index;
+ next_register_index_ = register_index;
if (observer_) {
- observer_->RegisterListFreeEvent(
- RegisterList(register_index, next_register_index_ - register_index));
+ observer_->RegisterListFreeEvent(RegisterList(register_index, count));
}
- next_register_index_ = register_index;
}
// Returns true if the register |reg| is a live register.
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.cc b/deps/v8/src/interpreter/bytecode-register-optimizer.cc
index 859f0e1828..af41e92365 100644
--- a/deps/v8/src/interpreter/bytecode-register-optimizer.cc
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.cc
@@ -21,6 +21,7 @@ class BytecodeRegisterOptimizer::RegisterInfo final : public ZoneObject {
equivalence_id_(equivalence_id),
materialized_(materialized),
allocated_(allocated),
+ needs_flush_(false),
next_(this),
prev_(this) {}
@@ -30,6 +31,11 @@ class BytecodeRegisterOptimizer::RegisterInfo final : public ZoneObject {
bool IsOnlyMaterializedMemberOfEquivalenceSet() const;
bool IsInSameEquivalenceSet(RegisterInfo* info) const;
+ // Get a member of the register's equivalence set that is allocated.
+ // Returns itself if allocated, and nullptr if there is no unallocated
+ // equivalent register.
+ RegisterInfo* GetAllocatedEquivalent();
+
// Get a member of this register's equivalence set that is
// materialized. The materialized equivalent will be this register
// if it is materialized. Returns nullptr if no materialized
@@ -65,12 +71,16 @@ class BytecodeRegisterOptimizer::RegisterInfo final : public ZoneObject {
equivalence_id_ = equivalence_id;
}
uint32_t equivalence_id() const { return equivalence_id_; }
+ // Indicates if a register should be processed when calling Flush().
+ bool needs_flush() const { return needs_flush_; }
+ void set_needs_flush(bool needs_flush) { needs_flush_ = needs_flush; }
private:
Register register_;
uint32_t equivalence_id_;
bool materialized_;
bool allocated_;
+ bool needs_flush_;
// Equivalence set pointers.
RegisterInfo* next_;
@@ -128,6 +138,19 @@ bool BytecodeRegisterOptimizer::RegisterInfo::IsInSameEquivalenceSet(
}
BytecodeRegisterOptimizer::RegisterInfo*
+BytecodeRegisterOptimizer::RegisterInfo::GetAllocatedEquivalent() {
+ RegisterInfo* visitor = this;
+ do {
+ if (visitor->allocated()) {
+ return visitor;
+ }
+ visitor = visitor->next_;
+ } while (visitor != this);
+
+ return nullptr;
+}
+
+BytecodeRegisterOptimizer::RegisterInfo*
BytecodeRegisterOptimizer::RegisterInfo::GetMaterializedEquivalent() {
RegisterInfo* visitor = this;
do {
@@ -199,6 +222,7 @@ BytecodeRegisterOptimizer::BytecodeRegisterOptimizer(
temporary_base_(fixed_registers_count),
max_register_index_(fixed_registers_count - 1),
register_info_table_(zone),
+ registers_needing_flushed_(zone),
equivalence_id_(0),
bytecode_writer_(bytecode_writer),
flush_required_(false),
@@ -226,29 +250,62 @@ BytecodeRegisterOptimizer::BytecodeRegisterOptimizer(
DCHECK(accumulator_info_->register_value() == accumulator_);
}
+void BytecodeRegisterOptimizer::PushToRegistersNeedingFlush(RegisterInfo* reg) {
+ if (!reg->needs_flush()) {
+ reg->set_needs_flush(true);
+ registers_needing_flushed_.push_back(reg);
+ }
+}
+
+bool BytecodeRegisterOptimizer::EnsureAllRegistersAreFlushed() const {
+ for (RegisterInfo* reg_info : register_info_table_) {
+ if (reg_info->needs_flush()) {
+ return false;
+ } else if (!reg_info->IsOnlyMemberOfEquivalenceSet()) {
+ return false;
+ } else if (reg_info->allocated() && !reg_info->materialized()) {
+ return false;
+ }
+ }
+ return true;
+}
+
void BytecodeRegisterOptimizer::Flush() {
if (!flush_required_) {
return;
}
// Materialize all live registers and break equivalences.
- size_t count = register_info_table_.size();
- for (size_t i = 0; i < count; ++i) {
- RegisterInfo* reg_info = register_info_table_[i];
- if (reg_info->materialized()) {
+ for (RegisterInfo* reg_info : registers_needing_flushed_) {
+ if (!reg_info->needs_flush()) continue;
+ reg_info->set_needs_flush(false);
+
+ RegisterInfo* materialized = reg_info->materialized()
+ ? reg_info
+ : reg_info->GetMaterializedEquivalent();
+
+ if (materialized != nullptr) {
// Walk equivalents of materialized registers, materializing
// each equivalent register as necessary and placing in their
// own equivalence set.
RegisterInfo* equivalent;
- while ((equivalent = reg_info->GetEquivalent()) != reg_info) {
+ while ((equivalent = materialized->GetEquivalent()) != materialized) {
if (equivalent->allocated() && !equivalent->materialized()) {
- OutputRegisterTransfer(reg_info, equivalent);
+ OutputRegisterTransfer(materialized, equivalent);
}
equivalent->MoveToNewEquivalenceSet(NextEquivalenceId(), true);
+ equivalent->set_needs_flush(false);
}
+ } else {
+ // Equivalernce class containing only unallocated registers.
+ DCHECK(reg_info->GetAllocatedEquivalent() == nullptr);
+ reg_info->MoveToNewEquivalenceSet(NextEquivalenceId(), false);
}
}
+ registers_needing_flushed_.clear();
+ DCHECK(EnsureAllRegistersAreFlushed());
+
flush_required_ = false;
}
@@ -304,12 +361,15 @@ BytecodeRegisterOptimizer::GetMaterializedEquivalentNotAccumulator(
void BytecodeRegisterOptimizer::Materialize(RegisterInfo* info) {
if (!info->materialized()) {
RegisterInfo* materialized = info->GetMaterializedEquivalent();
+ DCHECK_NOT_NULL(materialized);
OutputRegisterTransfer(materialized, info);
}
}
void BytecodeRegisterOptimizer::AddToEquivalenceSet(
RegisterInfo* set_member, RegisterInfo* non_set_member) {
+ // Equivalence class is now of size >= 2, so we make sure it will be flushed.
+ PushToRegistersNeedingFlush(non_set_member);
non_set_member->AddToEquivalenceSetOf(set_member);
// Flushing is only required when two or more registers are placed
// in the same equivalence set.
@@ -410,13 +470,20 @@ void BytecodeRegisterOptimizer::GrowRegisterMap(Register reg) {
for (size_t i = old_size; i < new_size; ++i) {
register_info_table_[i] =
new (zone()) RegisterInfo(RegisterFromRegisterInfoTableIndex(i),
- NextEquivalenceId(), false, false);
+ NextEquivalenceId(), true, false);
}
}
}
+void BytecodeRegisterOptimizer::AllocateRegister(RegisterInfo* info) {
+ info->set_allocated(true);
+ if (!info->materialized()) {
+ info->MoveToNewEquivalenceSet(NextEquivalenceId(), true);
+ }
+}
+
void BytecodeRegisterOptimizer::RegisterAllocateEvent(Register reg) {
- GetOrCreateRegisterInfo(reg)->set_allocated(true);
+ AllocateRegister(GetOrCreateRegisterInfo(reg));
}
void BytecodeRegisterOptimizer::RegisterListAllocateEvent(
@@ -425,7 +492,7 @@ void BytecodeRegisterOptimizer::RegisterListAllocateEvent(
int first_index = reg_list.first_register().index();
GrowRegisterMap(Register(first_index + reg_list.register_count() - 1));
for (int i = 0; i < reg_list.register_count(); i++) {
- GetRegisterInfo(Register(first_index + i))->set_allocated(true);
+ AllocateRegister(GetRegisterInfo(Register(first_index + i)));
}
}
}
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.h b/deps/v8/src/interpreter/bytecode-register-optimizer.h
index 494abb6c96..fababcf19e 100644
--- a/deps/v8/src/interpreter/bytecode-register-optimizer.h
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.h
@@ -67,7 +67,7 @@ class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final
if (Bytecodes::IsJump(bytecode) || Bytecodes::IsSwitch(bytecode) ||
bytecode == Bytecode::kDebugger ||
bytecode == Bytecode::kSuspendGenerator ||
- bytecode == Bytecode::kResumeGenerator) {
+ bytecode == Bytecode::kRestoreGeneratorRegisters) {
// All state must be flushed before emitting
// - a jump bytecode (as the register equivalents at the jump target
// aren't known)
@@ -75,7 +75,7 @@ class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final
// aren't known)
// - a call to the debugger (as it can manipulate locals and parameters),
// - a generator suspend (as this involves saving all registers).
- // - a generator resume (as this involves restoring all registers).
+ // - a generator register restore.
Flush();
}
@@ -131,6 +131,9 @@ class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final
void AddToEquivalenceSet(RegisterInfo* set_member,
RegisterInfo* non_set_member);
+ void PushToRegistersNeedingFlush(RegisterInfo* reg);
+ bool EnsureAllRegistersAreFlushed() const;
+
// Methods for finding and creating metadata for each register.
RegisterInfo* GetRegisterInfo(Register reg) {
size_t index = GetRegisterInfoTableIndex(reg);
@@ -178,6 +181,8 @@ class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final
return equivalence_id_;
}
+ void AllocateRegister(RegisterInfo* info);
+
Zone* zone() { return zone_; }
const Register accumulator_;
@@ -189,6 +194,8 @@ class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final
ZoneVector<RegisterInfo*> register_info_table_;
int register_info_table_offset_;
+ ZoneDeque<RegisterInfo*> registers_needing_flushed_;
+
// Counter for equivalence sets identifiers.
int equivalence_id_;
diff --git a/deps/v8/src/interpreter/bytecode-register.h b/deps/v8/src/interpreter/bytecode-register.h
index 554bc23a5b..5dc77ae4aa 100644
--- a/deps/v8/src/interpreter/bytecode-register.h
+++ b/deps/v8/src/interpreter/bytecode-register.h
@@ -89,13 +89,12 @@ class V8_EXPORT_PRIVATE Register final {
}
private:
+ DISALLOW_NEW_AND_DELETE();
+
static const int kInvalidIndex = kMaxInt;
static const int kRegisterFileStartOffset =
InterpreterFrameConstants::kRegisterFileFromFp / kPointerSize;
- void* operator new(size_t size) = delete;
- void operator delete(void* p) = delete;
-
int index_;
};
@@ -104,9 +103,11 @@ class RegisterList {
RegisterList() : first_reg_index_(Register().index()), register_count_(0) {}
RegisterList(int first_reg_index, int register_count)
: first_reg_index_(first_reg_index), register_count_(register_count) {}
+ explicit RegisterList(Register r) : RegisterList(r.index(), 1) {}
- // Increases the size of the register list by one.
+ // Increases/decreases the size of the register list by one.
void IncrementRegisterCount() { register_count_++; }
+ void DecrementRegisterCount() { register_count_--; }
// Returns a new RegisterList which is a truncated version of this list, with
// |count| registers.
diff --git a/deps/v8/src/interpreter/bytecodes.cc b/deps/v8/src/interpreter/bytecodes.cc
index d0665b9ea9..9ee7c0a9e6 100644
--- a/deps/v8/src/interpreter/bytecodes.cc
+++ b/deps/v8/src/interpreter/bytecodes.cc
@@ -55,6 +55,18 @@ const OperandSize* const Bytecodes::kOperandSizes[][3] = {
BYTECODE_LIST(ENTRY)
#undef ENTRY
};
+
+const OperandSize Bytecodes::kOperandKindSizes[][3] = {
+#define ENTRY(Name, ...) \
+ { OperandScaler<OperandType::k##Name, \
+ OperandScale::kSingle>::kOperandSize, \
+ OperandScaler<OperandType::k##Name, \
+ OperandScale::kDouble>::kOperandSize, \
+ OperandScaler<OperandType::k##Name, \
+ OperandScale::kQuadruple>::kOperandSize },
+ OPERAND_TYPE_LIST(ENTRY)
+#undef ENTRY
+};
// clang-format on
// static
@@ -67,7 +79,6 @@ const char* Bytecodes::ToString(Bytecode bytecode) {
#undef CASE
}
UNREACHABLE();
- return "";
}
// static
@@ -101,7 +112,6 @@ Bytecode Bytecodes::GetDebugBreak(Bytecode bytecode) {
DEBUG_BREAK_PLAIN_BYTECODE_LIST(RETURN_IF_DEBUG_BREAK_SIZE_MATCHES)
#undef RETURN_IF_DEBUG_BREAK_SIZE_MATCHES
UNREACHABLE();
- return Bytecode::kIllegal;
}
// static
@@ -133,7 +143,6 @@ Bytecode Bytecodes::GetJumpWithoutToBoolean(Bytecode bytecode) {
break;
}
UNREACHABLE();
- return Bytecode::kIllegal;
}
// static
@@ -275,32 +284,6 @@ bool Bytecodes::IsUnsignedOperandType(OperandType operand_type) {
#undef CASE
}
UNREACHABLE();
- return false;
-}
-
-// static
-OperandSize Bytecodes::SizeOfOperand(OperandType operand_type,
- OperandScale operand_scale) {
- DCHECK_LE(operand_type, OperandType::kLast);
- DCHECK_GE(operand_scale, OperandScale::kSingle);
- DCHECK_LE(operand_scale, OperandScale::kLast);
- STATIC_ASSERT(static_cast<int>(OperandScale::kQuadruple) == 4 &&
- OperandScale::kLast == OperandScale::kQuadruple);
- int scale_index = static_cast<int>(operand_scale) >> 1;
- // clang-format off
- static const OperandSize kOperandSizes[][3] = {
-#define ENTRY(Name, ...) \
- { OperandScaler<OperandType::k##Name, \
- OperandScale::kSingle>::kOperandSize, \
- OperandScaler<OperandType::k##Name, \
- OperandScale::kDouble>::kOperandSize, \
- OperandScaler<OperandType::k##Name, \
- OperandScale::kQuadruple>::kOperandSize },
- OPERAND_TYPE_LIST(ENTRY)
-#undef ENTRY
- };
- // clang-format on
- return kOperandSizes[static_cast<size_t>(operand_type)][scale_index];
}
// static
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index 83417fe879..bd97340877 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -70,8 +70,8 @@ namespace interpreter {
OperandType::kIdx, OperandType::kIdx, OperandType::kUImm) \
V(LdaLookupGlobalSlotInsideTypeof, AccumulatorUse::kWrite, \
OperandType::kIdx, OperandType::kIdx, OperandType::kUImm) \
- V(StaLookupSlotSloppy, AccumulatorUse::kReadWrite, OperandType::kIdx) \
- V(StaLookupSlotStrict, AccumulatorUse::kReadWrite, OperandType::kIdx) \
+ V(StaLookupSlot, AccumulatorUse::kReadWrite, OperandType::kIdx, \
+ OperandType::kFlag8) \
\
/* Register-accumulator transfers */ \
V(Ldar, AccumulatorUse::kWrite, OperandType::kReg) \
@@ -179,8 +179,6 @@ namespace interpreter {
OperandType::kReg, OperandType::kReg, OperandType::kIdx) \
V(CallWithSpread, AccumulatorUse::kWrite, OperandType::kReg, \
OperandType::kRegList, OperandType::kRegCount) \
- V(TailCall, AccumulatorUse::kWrite, OperandType::kReg, \
- OperandType::kRegList, OperandType::kRegCount, OperandType::kIdx) \
V(CallRuntime, AccumulatorUse::kWrite, OperandType::kRuntimeId, \
OperandType::kRegList, OperandType::kRegCount) \
V(CallRuntimeForPair, AccumulatorUse::kNone, OperandType::kRuntimeId, \
@@ -224,6 +222,12 @@ namespace interpreter {
V(ToNumber, AccumulatorUse::kRead, OperandType::kRegOut, OperandType::kIdx) \
V(ToObject, AccumulatorUse::kRead, OperandType::kRegOut) \
\
+ /* String concatenation */ \
+ V(ToPrimitiveToString, AccumulatorUse::kRead, OperandType::kRegOut, \
+ OperandType::kIdx) \
+ V(StringConcat, AccumulatorUse::kWrite, OperandType::kRegList, \
+ OperandType::kRegCount) \
+ \
/* Literals */ \
V(CreateRegExpLiteral, AccumulatorUse::kWrite, OperandType::kIdx, \
OperandType::kIdx, OperandType::kFlag8) \
@@ -266,7 +270,6 @@ namespace interpreter {
V(JumpIfTrueConstant, AccumulatorUse::kRead, OperandType::kIdx) \
V(JumpIfFalseConstant, AccumulatorUse::kRead, OperandType::kIdx) \
V(JumpIfJSReceiverConstant, AccumulatorUse::kRead, OperandType::kIdx) \
- V(JumpIfNotHoleConstant, AccumulatorUse::kRead, OperandType::kIdx) \
/* - [Start ToBoolean jumps] */ \
V(JumpIfToBooleanTrueConstant, AccumulatorUse::kRead, OperandType::kIdx) \
V(JumpIfToBooleanFalseConstant, AccumulatorUse::kRead, OperandType::kIdx) \
@@ -282,7 +285,6 @@ namespace interpreter {
V(JumpIfUndefined, AccumulatorUse::kRead, OperandType::kUImm) \
V(JumpIfNotUndefined, AccumulatorUse::kRead, OperandType::kUImm) \
V(JumpIfJSReceiver, AccumulatorUse::kRead, OperandType::kUImm) \
- V(JumpIfNotHole, AccumulatorUse::kRead, OperandType::kUImm) \
\
/* Smi-table lookup for switch statements */ \
V(SwitchOnSmiNoFeedback, AccumulatorUse::kRead, OperandType::kIdx, \
@@ -307,11 +309,16 @@ namespace interpreter {
V(Throw, AccumulatorUse::kRead) \
V(ReThrow, AccumulatorUse::kRead) \
V(Return, AccumulatorUse::kRead) \
+ V(ThrowReferenceErrorIfHole, AccumulatorUse::kRead, OperandType::kIdx) \
+ V(ThrowSuperNotCalledIfHole, AccumulatorUse::kRead) \
+ V(ThrowSuperAlreadyCalledIfNotHole, AccumulatorUse::kRead) \
\
/* Generators */ \
+ V(RestoreGeneratorState, AccumulatorUse::kWrite, OperandType::kReg) \
V(SuspendGenerator, AccumulatorUse::kRead, OperandType::kReg, \
- OperandType::kFlag8) \
- V(ResumeGenerator, AccumulatorUse::kWrite, OperandType::kReg) \
+ OperandType::kRegList, OperandType::kRegCount) \
+ V(RestoreGeneratorRegisters, AccumulatorUse::kNone, OperandType::kReg, \
+ OperandType::kRegOutList, OperandType::kRegCount) \
\
/* Debugger */ \
V(Debugger, AccumulatorUse::kNone) \
@@ -332,12 +339,11 @@ namespace interpreter {
V(DebugBreakWide, AccumulatorUse::kRead) \
V(DebugBreakExtraWide, AccumulatorUse::kRead) \
\
+ /* Block Coverage */ \
+ V(IncBlockCounter, AccumulatorUse::kNone, OperandType::kIdx) \
+ \
/* Illegal bytecode (terminates execution) */ \
V(Illegal, AccumulatorUse::kNone) \
- \
- /* No operation (used to maintain source positions for peephole */ \
- /* eliminated bytecodes). */ \
- V(Nop, AccumulatorUse::kNone)
// List of debug break bytecodes.
#define DEBUG_BREAK_PLAIN_BYTECODE_LIST(V) \
@@ -382,7 +388,6 @@ namespace interpreter {
V(JumpIfUndefined) \
V(JumpIfNotUndefined) \
V(JumpIfJSReceiver) \
- V(JumpIfNotHole)
#define JUMP_CONDITIONAL_CONSTANT_BYTECODE_LIST(V) \
JUMP_TOBOOLEAN_CONDITIONAL_CONSTANT_BYTECODE_LIST(V) \
@@ -393,7 +398,6 @@ namespace interpreter {
V(JumpIfTrueConstant) \
V(JumpIfFalseConstant) \
V(JumpIfJSReceiverConstant) \
- V(JumpIfNotHoleConstant)
#define JUMP_CONSTANT_BYTECODE_LIST(V) \
JUMP_UNCONDITIONAL_CONSTANT_BYTECODE_LIST(V) \
@@ -470,7 +474,6 @@ class V8_EXPORT_PRIVATE Bytecodes final {
return Bytecode::kWide;
default:
UNREACHABLE();
- return Bytecode::kIllegal;
}
}
@@ -491,7 +494,6 @@ class V8_EXPORT_PRIVATE Bytecodes final {
return OperandScale::kDouble;
default:
UNREACHABLE();
- return OperandScale::kSingle;
}
}
@@ -546,7 +548,7 @@ class V8_EXPORT_PRIVATE Bytecodes final {
// an immediate byte operand (OperandType::kImm).
static constexpr bool IsConditionalJumpImmediate(Bytecode bytecode) {
return bytecode >= Bytecode::kJumpIfToBooleanTrue &&
- bytecode <= Bytecode::kJumpIfNotHole;
+ bytecode <= Bytecode::kJumpIfJSReceiver;
}
// Returns true if the bytecode is a conditional jump taking
@@ -560,7 +562,7 @@ class V8_EXPORT_PRIVATE Bytecodes final {
// any kind of operand.
static constexpr bool IsConditionalJump(Bytecode bytecode) {
return bytecode >= Bytecode::kJumpIfNullConstant &&
- bytecode <= Bytecode::kJumpIfNotHole;
+ bytecode <= Bytecode::kJumpIfJSReceiver;
}
// Returns true if the bytecode is an unconditional jump.
@@ -594,13 +596,14 @@ class V8_EXPORT_PRIVATE Bytecodes final {
// any kind of operand.
static constexpr bool IsJump(Bytecode bytecode) {
return bytecode >= Bytecode::kJumpLoop &&
- bytecode <= Bytecode::kJumpIfNotHole;
+ bytecode <= Bytecode::kJumpIfJSReceiver;
}
// Returns true if the bytecode is a forward jump or conditional jump taking
// any kind of operand.
static constexpr bool IsForwardJump(Bytecode bytecode) {
- return bytecode >= Bytecode::kJump && bytecode <= Bytecode::kJumpIfNotHole;
+ return bytecode >= Bytecode::kJump &&
+ bytecode <= Bytecode::kJumpIfJSReceiver;
}
// Returns true if the bytecode is a conditional jump, a jump, or a return.
@@ -625,7 +628,7 @@ class V8_EXPORT_PRIVATE Bytecodes final {
static constexpr bool IsWithoutExternalSideEffects(Bytecode bytecode) {
return (IsAccumulatorLoadWithoutEffects(bytecode) ||
IsRegisterLoadWithoutEffects(bytecode) ||
- IsCompareWithoutEffects(bytecode) || bytecode == Bytecode::kNop ||
+ IsCompareWithoutEffects(bytecode) ||
IsJumpWithoutEffects(bytecode) || IsSwitch(bytecode));
}
@@ -645,7 +648,6 @@ class V8_EXPORT_PRIVATE Bytecodes final {
bytecode == Bytecode::kCallUndefinedReceiver0 ||
bytecode == Bytecode::kCallUndefinedReceiver1 ||
bytecode == Bytecode::kCallUndefinedReceiver2 ||
- bytecode == Bytecode::kTailCall ||
bytecode == Bytecode::kConstruct ||
bytecode == Bytecode::kCallWithSpread ||
bytecode == Bytecode::kConstructWithSpread ||
@@ -774,7 +776,6 @@ class V8_EXPORT_PRIVATE Bytecodes final {
case Bytecode::kCallUndefinedReceiver2:
return ConvertReceiverMode::kNullOrUndefined;
case Bytecode::kCallAnyReceiver:
- case Bytecode::kTailCall:
case Bytecode::kConstruct:
case Bytecode::kCallWithSpread:
case Bytecode::kConstructWithSpread:
@@ -783,7 +784,6 @@ class V8_EXPORT_PRIVATE Bytecodes final {
return ConvertReceiverMode::kAny;
default:
UNREACHABLE();
- return ConvertReceiverMode::kAny;
}
}
@@ -818,17 +818,25 @@ class V8_EXPORT_PRIVATE Bytecodes final {
case OperandType::kRegOutTriple:
return 3;
case OperandType::kRegList:
+ case OperandType::kRegOutList:
UNREACHABLE();
- return 0;
default:
return 0;
}
UNREACHABLE();
- return 0;
}
- // Returns the size of |operand| for |operand_scale|.
- static OperandSize SizeOfOperand(OperandType operand, OperandScale scale);
+ // Returns the size of |operand_type| for |operand_scale|.
+ static OperandSize SizeOfOperand(OperandType operand_type,
+ OperandScale operand_scale) {
+ DCHECK_LE(operand_type, OperandType::kLast);
+ DCHECK_GE(operand_scale, OperandScale::kSingle);
+ DCHECK_LE(operand_scale, OperandScale::kLast);
+ STATIC_ASSERT(static_cast<int>(OperandScale::kQuadruple) == 4 &&
+ OperandScale::kLast == OperandScale::kQuadruple);
+ int scale_index = static_cast<int>(operand_scale) >> 1;
+ return kOperandKindSizes[static_cast<size_t>(operand_type)][scale_index];
+ }
// Returns true if |operand_type| is a runtime-id operand (kRuntimeId).
static bool IsRuntimeIdOperandType(OperandType operand_type);
@@ -884,6 +892,7 @@ class V8_EXPORT_PRIVATE Bytecodes final {
static const bool kIsScalable[];
static const int kBytecodeSizes[][3];
static const OperandSize* const kOperandSizes[][3];
+ static OperandSize const kOperandKindSizes[][3];
};
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
diff --git a/deps/v8/src/interpreter/constant-array-builder.cc b/deps/v8/src/interpreter/constant-array-builder.cc
index f7e68f876e..ca2351fcd6 100644
--- a/deps/v8/src/interpreter/constant-array-builder.cc
+++ b/deps/v8/src/interpreter/constant-array-builder.cc
@@ -130,7 +130,6 @@ ConstantArrayBuilder::ConstantArraySlice* ConstantArrayBuilder::IndexToSlice(
}
}
UNREACHABLE();
- return nullptr;
}
MaybeHandle<Object> ConstantArrayBuilder::At(size_t index,
@@ -151,7 +150,7 @@ Handle<FixedArray> ConstantArrayBuilder::ToFixedArray(Isolate* isolate) {
for (const ConstantArraySlice* slice : idx_slice_) {
DCHECK_EQ(slice->reserved(), 0);
DCHECK(array_index == 0 ||
- base::bits::IsPowerOfTwo32(static_cast<uint32_t>(array_index)));
+ base::bits::IsPowerOfTwo(static_cast<uint32_t>(array_index)));
#if DEBUG
// Different slices might contain the same element due to reservations, but
// all elements within a slice should be unique. If this DCHECK fails, then
@@ -185,7 +184,7 @@ size_t ConstantArrayBuilder::Insert(Smi* smi) {
size_t ConstantArrayBuilder::Insert(const AstRawString* raw_string) {
return constants_map_
.LookupOrInsert(reinterpret_cast<intptr_t>(raw_string),
- raw_string->hash(),
+ raw_string->Hash(),
[&]() { return AllocateIndex(Entry(raw_string)); },
ZoneAllocationPolicy(zone_))
->value;
@@ -238,7 +237,6 @@ ConstantArrayBuilder::index_t ConstantArrayBuilder::AllocateIndexArray(
}
}
UNREACHABLE();
- return kMaxUInt32;
}
ConstantArrayBuilder::ConstantArraySlice*
@@ -292,7 +290,6 @@ OperandSize ConstantArrayBuilder::CreateReservedEntry() {
}
}
UNREACHABLE();
- return OperandSize::kNone;
}
ConstantArrayBuilder::index_t ConstantArrayBuilder::AllocateReservedEntry(
@@ -332,7 +329,6 @@ Handle<Object> ConstantArrayBuilder::Entry::ToHandle(Isolate* isolate) const {
case Tag::kDeferred:
// We shouldn't have any deferred entries by now.
UNREACHABLE();
- return Handle<Object>::null();
case Tag::kHandle:
return handle_;
case Tag::kSmi:
@@ -355,7 +351,6 @@ Handle<Object> ConstantArrayBuilder::Entry::ToHandle(Isolate* isolate) const {
#undef ENTRY_LOOKUP
}
UNREACHABLE();
- return Handle<Object>::null();
}
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/control-flow-builders.cc b/deps/v8/src/interpreter/control-flow-builders.cc
index e4281667c2..7dc671c225 100644
--- a/deps/v8/src/interpreter/control-flow-builders.cc
+++ b/deps/v8/src/interpreter/control-flow-builders.cc
@@ -41,8 +41,14 @@ void BreakableControlFlowBuilder::EmitJumpIfNull(BytecodeLabels* sites) {
}
void BlockBuilder::EndBlock() {
- builder()->Bind(&block_end_);
- BindBreakTarget();
+ if (statement_->labels() != nullptr) {
+ builder()->Bind(&block_end_);
+ BindBreakTarget();
+ }
+ if (block_coverage_builder_ != nullptr && needs_continuation_counter_) {
+ block_coverage_builder_->IncrementBlockCounter(
+ statement_, SourceRangeKind::kContinuation);
+ }
}
LoopBuilder::~LoopBuilder() {
@@ -52,6 +58,11 @@ LoopBuilder::~LoopBuilder() {
if (generator_jump_table_location_ != nullptr) {
*generator_jump_table_location_ = parent_generator_jump_table_;
}
+ // Generate block coverage counter for the continuation.
+ if (block_coverage_builder_ != nullptr) {
+ block_coverage_builder_->IncrementBlockCounter(
+ block_coverage_continuation_slot_);
+ }
}
void LoopBuilder::LoopHeader() {
@@ -83,6 +94,12 @@ void LoopBuilder::LoopHeaderInGenerator(
builder()->AllocateJumpTable(resume_count, first_resume_id);
}
+void LoopBuilder::LoopBody() {
+ if (block_coverage_builder_ != nullptr) {
+ block_coverage_builder_->IncrementBlockCounter(block_coverage_body_slot_);
+ }
+}
+
void LoopBuilder::JumpToHeader(int loop_depth) {
// Pass the proper loop nesting level to the backwards branch, to trigger
// on-stack replacement when armed for the given loop nesting depth.
diff --git a/deps/v8/src/interpreter/control-flow-builders.h b/deps/v8/src/interpreter/control-flow-builders.h
index 8cff017e78..da32e510b9 100644
--- a/deps/v8/src/interpreter/control-flow-builders.h
+++ b/deps/v8/src/interpreter/control-flow-builders.h
@@ -7,6 +7,8 @@
#include "src/interpreter/bytecode-array-builder.h"
+#include "src/ast/ast-source-ranges.h"
+#include "src/interpreter/block-coverage-builder.h"
#include "src/interpreter/bytecode-label.h"
#include "src/zone/zone-containers.h"
@@ -55,6 +57,8 @@ class V8_EXPORT_PRIVATE BreakableControlFlowBuilder
BytecodeLabels* break_labels() { return &break_labels_; }
+ void set_needs_continuation_counter() { needs_continuation_counter_ = true; }
+
protected:
void EmitJump(BytecodeLabels* labels);
void EmitJumpIfTrue(BytecodeArrayBuilder::ToBooleanMode mode,
@@ -66,6 +70,10 @@ class V8_EXPORT_PRIVATE BreakableControlFlowBuilder
// Unbound labels that identify jumps for break statements in the code.
BytecodeLabels break_labels_;
+
+ // A continuation counter (for block coverage) is needed e.g. when
+ // encountering a break statement.
+ bool needs_continuation_counter_ = false;
};
@@ -73,13 +81,19 @@ class V8_EXPORT_PRIVATE BreakableControlFlowBuilder
class V8_EXPORT_PRIVATE BlockBuilder final
: public BreakableControlFlowBuilder {
public:
- explicit BlockBuilder(BytecodeArrayBuilder* builder)
- : BreakableControlFlowBuilder(builder) {}
+ BlockBuilder(BytecodeArrayBuilder* builder,
+ BlockCoverageBuilder* block_coverage_builder,
+ BreakableStatement* statement)
+ : BreakableControlFlowBuilder(builder),
+ block_coverage_builder_(block_coverage_builder),
+ statement_(statement) {}
void EndBlock();
private:
BytecodeLabel block_end_;
+ BlockCoverageBuilder* block_coverage_builder_;
+ BreakableStatement* statement_;
};
@@ -87,16 +101,28 @@ class V8_EXPORT_PRIVATE BlockBuilder final
// their loop.
class V8_EXPORT_PRIVATE LoopBuilder final : public BreakableControlFlowBuilder {
public:
- explicit LoopBuilder(BytecodeArrayBuilder* builder)
+ LoopBuilder(BytecodeArrayBuilder* builder,
+ BlockCoverageBuilder* block_coverage_builder, AstNode* node)
: BreakableControlFlowBuilder(builder),
continue_labels_(builder->zone()),
generator_jump_table_location_(nullptr),
- parent_generator_jump_table_(nullptr) {}
+ parent_generator_jump_table_(nullptr),
+ block_coverage_builder_(block_coverage_builder) {
+ if (block_coverage_builder_ != nullptr) {
+ block_coverage_body_slot_ =
+ block_coverage_builder_->AllocateBlockCoverageSlot(
+ node, SourceRangeKind::kBody);
+ block_coverage_continuation_slot_ =
+ block_coverage_builder_->AllocateBlockCoverageSlot(
+ node, SourceRangeKind::kContinuation);
+ }
+ }
~LoopBuilder();
void LoopHeader();
void LoopHeaderInGenerator(BytecodeJumpTable** parent_generator_jump_table,
int first_resume_id, int resume_count);
+ void LoopBody();
void JumpToHeader(int loop_depth);
void BindContinueTarget();
@@ -120,6 +146,10 @@ class V8_EXPORT_PRIVATE LoopBuilder final : public BreakableControlFlowBuilder {
// field is ugly, figure out a better way to do this.
BytecodeJumpTable** generator_jump_table_location_;
BytecodeJumpTable* parent_generator_jump_table_;
+
+ int block_coverage_body_slot_;
+ int block_coverage_continuation_slot_;
+ BlockCoverageBuilder* block_coverage_builder_;
};
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index 070c89549b..50489067b8 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -563,12 +563,12 @@ Node* InterpreterAssembler::CallJSWithFeedback(
compiler::Node* function, compiler::Node* context,
compiler::Node* first_arg, compiler::Node* arg_count,
compiler::Node* slot_id, compiler::Node* feedback_vector,
- ConvertReceiverMode receiver_mode, TailCallMode tail_call_mode) {
+ ConvertReceiverMode receiver_mode) {
// Static checks to assert it is safe to examine the type feedback element.
// We don't know that we have a weak cell. We might have a private symbol
// or an AllocationSite, but the memory is safe to examine.
- // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
- // FixedArray.
+ // AllocationSite::kTransitionInfoOrBoilerplateOffset - contains a Smi or
+ // pointer to FixedArray.
// WeakCell::kValueOffset - contains a JSFunction or Smi(0)
// Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
// computed, meaning that it can't appear to be a pointer. If the low bit is
@@ -579,7 +579,7 @@ Node* InterpreterAssembler::CallJSWithFeedback(
DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
- STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
+ STATIC_ASSERT(AllocationSite::kTransitionInfoOrBoilerplateOffset ==
WeakCell::kValueOffset &&
WeakCell::kValueOffset == Symbol::kHashFieldSlot);
@@ -587,6 +587,9 @@ Node* InterpreterAssembler::CallJSWithFeedback(
Label call_function(this), extra_checks(this, Label::kDeferred), call(this),
end(this);
+ // Increment the call count.
+ IncrementCallCount(feedback_vector, slot_id);
+
// The checks. First, does function match the recorded monomorphic target?
Node* feedback_element = LoadFixedArrayElement(feedback_vector, slot_id);
Node* feedback_value = LoadWeakCellValueUnchecked(feedback_element);
@@ -600,13 +603,9 @@ Node* InterpreterAssembler::CallJSWithFeedback(
BIND(&call_function);
{
- // Increment the call count.
- IncrementCallCount(feedback_vector, slot_id);
-
// Call using call function builtin.
Callable callable = CodeFactory::InterpreterPushArgsThenCall(
- isolate(), receiver_mode, tail_call_mode,
- InterpreterPushArgsMode::kJSFunction);
+ isolate(), receiver_mode, InterpreterPushArgsMode::kJSFunction);
Node* code_target = HeapConstant(callable.code());
Node* ret_value = CallStub(callable.descriptor(), code_target, context,
arg_count, first_arg, function);
@@ -627,8 +626,7 @@ Node* InterpreterAssembler::CallJSWithFeedback(
GotoIf(is_megamorphic, &call);
Comment("check if it is an allocation site");
- GotoIfNot(IsAllocationSiteMap(LoadMap(feedback_element)),
- &check_initialized);
+ GotoIfNot(IsAllocationSite(feedback_element), &check_initialized);
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// For undefined receivers (mostly global calls), do an additional check
@@ -641,9 +639,6 @@ Node* InterpreterAssembler::CallJSWithFeedback(
Node* is_array_function = WordEqual(context_slot, function);
GotoIfNot(is_array_function, &mark_megamorphic);
- // It is a monomorphic Array function. Increment the call count.
- IncrementCallCount(feedback_vector, slot_id);
-
// Call ArrayConstructorStub.
Callable callable_call =
CodeFactory::InterpreterPushArgsThenConstructArray(isolate());
@@ -724,14 +719,10 @@ Node* InterpreterAssembler::CallJSWithFeedback(
BIND(&call);
{
- Comment("Increment call count and call using Call builtin");
- // Increment the call count.
- IncrementCallCount(feedback_vector, slot_id);
-
+ Comment("invoke using Call builtin");
// Call using call builtin.
Callable callable_call = CodeFactory::InterpreterPushArgsThenCall(
- isolate(), receiver_mode, tail_call_mode,
- InterpreterPushArgsMode::kOther);
+ isolate(), receiver_mode, InterpreterPushArgsMode::kOther);
Node* code_target_call = HeapConstant(callable_call.code());
Node* ret_value = CallStub(callable_call.descriptor(), code_target_call,
context, arg_count, first_arg, function);
@@ -745,15 +736,13 @@ Node* InterpreterAssembler::CallJSWithFeedback(
Node* InterpreterAssembler::CallJS(Node* function, Node* context,
Node* first_arg, Node* arg_count,
- ConvertReceiverMode receiver_mode,
- TailCallMode tail_call_mode) {
+ ConvertReceiverMode receiver_mode) {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
DCHECK(Bytecodes::IsCallOrConstruct(bytecode_) ||
bytecode_ == Bytecode::kInvokeIntrinsic);
DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
Callable callable = CodeFactory::InterpreterPushArgsThenCall(
- isolate(), receiver_mode, tail_call_mode,
- InterpreterPushArgsMode::kOther);
+ isolate(), receiver_mode, InterpreterPushArgsMode::kOther);
Node* code_target = HeapConstant(callable.code());
return CallStub(callable.descriptor(), code_target, context, arg_count,
@@ -765,7 +754,7 @@ Node* InterpreterAssembler::CallJSWithSpread(Node* function, Node* context,
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), ConvertReceiverMode::kAny);
Callable callable = CodeFactory::InterpreterPushArgsThenCall(
- isolate(), ConvertReceiverMode::kAny, TailCallMode::kDisallow,
+ isolate(), ConvertReceiverMode::kAny,
InterpreterPushArgsMode::kWithFinalSpread);
Node* code_target = HeapConstant(callable.code());
@@ -783,10 +772,8 @@ Node* InterpreterAssembler::Construct(Node* constructor, Node* context,
Label call_construct_function(this, &allocation_feedback),
extra_checks(this, Label::kDeferred), call_construct(this), end(this);
- // Slot id of 0 is used to indicate no type feedback is available.
- STATIC_ASSERT(FeedbackVector::kReservedIndexCount > 0);
- Node* is_feedback_unavailable = WordEqual(slot_id, IntPtrConstant(0));
- GotoIf(is_feedback_unavailable, &call_construct);
+ // Increment the call count.
+ IncrementCallCount(feedback_vector, slot_id);
// Check that the constructor is not a smi.
Node* is_smi = TaggedIsSmi(constructor);
@@ -807,8 +794,7 @@ Node* InterpreterAssembler::Construct(Node* constructor, Node* context,
BIND(&call_construct_function);
{
- Comment("call using ConstructFunction");
- IncrementCallCount(feedback_vector, slot_id);
+ Comment("construct using ConstructFunction");
Callable callable_function = CodeFactory::InterpreterPushArgsThenConstruct(
isolate(), InterpreterPushArgsMode::kJSFunction);
return_value.Bind(CallStub(callable_function.descriptor(),
@@ -945,8 +931,10 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* constructor,
Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
Node* first_arg, Node* arg_count,
int result_size) {
- DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
- DCHECK(Bytecodes::IsCallRuntime(bytecode_));
+ DCHECK_IMPLIES(Bytecodes::IsCallRuntime(bytecode_),
+ Bytecodes::MakesCallAlongCriticalPath(bytecode_));
+ DCHECK(Bytecodes::IsCallRuntime(bytecode_) ||
+ bytecode_ == Bytecode::kStringConcat);
Callable callable = CodeFactory::InterpreterCEntry(isolate(), result_size);
Node* code_target = HeapConstant(callable.code());
@@ -1252,8 +1240,8 @@ Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
BIND(&if_valueisnotoddball);
{
// Convert the {value} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_value.Bind(CallStub(callable, context, value));
+ var_value.Bind(
+ CallBuiltin(Builtins::kNonNumberToNumber, context, value));
var_type_feedback->Bind(SmiConstant(BinaryOperationFeedback::kAny));
Goto(&loop);
}
@@ -1379,29 +1367,32 @@ void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) {
bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
return false;
-#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_X87 || \
- V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_PPC
+#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390 || \
+ V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC
return true;
#else
#error "Unknown Architecture"
#endif
}
-Node* InterpreterAssembler::RegisterCount() {
- Node* bytecode_array = LoadRegister(Register::bytecode_array());
- Node* frame_size = LoadObjectField(
- bytecode_array, BytecodeArray::kFrameSizeOffset, MachineType::Uint32());
- return WordShr(ChangeUint32ToWord(frame_size),
- IntPtrConstant(kPointerSizeLog2));
+void InterpreterAssembler::AbortIfRegisterCountInvalid(Node* register_file,
+ Node* register_count) {
+ Node* array_size = LoadAndUntagFixedArrayBaseLength(register_file);
+
+ Label ok(this), abort(this, Label::kDeferred);
+ Branch(UintPtrLessThanOrEqual(register_count, array_size), &ok, &abort);
+
+ BIND(&abort);
+ Abort(kInvalidRegisterFileInGenerator);
+ Goto(&ok);
+
+ BIND(&ok);
}
-Node* InterpreterAssembler::ExportRegisterFile(Node* array) {
- Node* register_count = RegisterCount();
+Node* InterpreterAssembler::ExportRegisterFile(Node* array,
+ Node* register_count) {
if (FLAG_debug_code) {
- Node* array_size = LoadAndUntagFixedArrayBaseLength(array);
- AbortIfWordNotEqual(array_size, register_count,
- kInvalidRegisterFileInGenerator);
+ AbortIfRegisterCountInvalid(array, register_count);
}
Variable var_index(this, MachineType::PointerRepresentation());
@@ -1430,12 +1421,10 @@ Node* InterpreterAssembler::ExportRegisterFile(Node* array) {
return array;
}
-Node* InterpreterAssembler::ImportRegisterFile(Node* array) {
- Node* register_count = RegisterCount();
+Node* InterpreterAssembler::ImportRegisterFile(Node* array,
+ Node* register_count) {
if (FLAG_debug_code) {
- Node* array_size = LoadAndUntagFixedArrayBaseLength(array);
- AbortIfWordNotEqual(array_size, register_count,
- kInvalidRegisterFileInGenerator);
+ AbortIfRegisterCountInvalid(array, register_count);
}
Variable var_index(this, MachineType::PointerRepresentation());
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index c2e0bb3bd7..7a4d836f05 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -81,12 +81,11 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
void GotoIfHasContextExtensionUpToDepth(compiler::Node* context,
compiler::Node* depth, Label* target);
- // Number of registers.
- compiler::Node* RegisterCount();
-
// Backup/restore register file to/from a fixed array of the correct length.
- compiler::Node* ExportRegisterFile(compiler::Node* array);
- compiler::Node* ImportRegisterFile(compiler::Node* array);
+ compiler::Node* ExportRegisterFile(compiler::Node* array,
+ compiler::Node* register_count);
+ compiler::Node* ImportRegisterFile(compiler::Node* array,
+ compiler::Node* register_count);
// Loads from and stores to the interpreter register file.
compiler::Node* LoadRegister(Register reg);
@@ -125,19 +124,20 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// If the |receiver_mode| is kNullOrUndefined, then the receiver is implicitly
// undefined and |first_arg| is the first parameter. Otherwise, |first_arg| is
// the receiver and it is converted according to |receiver_mode|.
- compiler::Node* CallJSWithFeedback(
- compiler::Node* function, compiler::Node* context,
- compiler::Node* first_arg, compiler::Node* arg_count,
- compiler::Node* slot_id, compiler::Node* feedback_vector,
- ConvertReceiverMode receiver_mode, TailCallMode tail_call_mode);
+ compiler::Node* CallJSWithFeedback(compiler::Node* function,
+ compiler::Node* context,
+ compiler::Node* first_arg,
+ compiler::Node* arg_count,
+ compiler::Node* slot_id,
+ compiler::Node* feedback_vector,
+ ConvertReceiverMode receiver_mode);
// Call JSFunction or Callable |function| with |arg_count| arguments (not
// including receiver) and the first argument located at |first_arg|, possibly
// including the receiver depending on |receiver_mode|.
compiler::Node* CallJS(compiler::Node* function, compiler::Node* context,
compiler::Node* first_arg, compiler::Node* arg_count,
- ConvertReceiverMode receiver_mode,
- TailCallMode tail_call_mode);
+ ConvertReceiverMode receiver_mode);
// Call JSFunction or Callable |function| with |arg_count|
// arguments (not including receiver) and the first argument
@@ -221,6 +221,9 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
void Abort(BailoutReason bailout_reason);
void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
BailoutReason bailout_reason);
+ // Abort if |register_count| is invalid for given register file array.
+ void AbortIfRegisterCountInvalid(compiler::Node* register_file,
+ compiler::Node* register_count);
// Dispatch to frame dropper trampoline if necessary.
void MaybeDropFrames(compiler::Node* context);
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index b02e024d65..226bc6edc4 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -9,7 +9,9 @@
#include "src/builtins/builtins-arguments-gen.h"
#include "src/builtins/builtins-constructor-gen.h"
+#include "src/builtins/builtins-conversion-gen.h"
#include "src/builtins/builtins-forin-gen.h"
+#include "src/builtins/builtins-string-gen.h"
#include "src/code-events.h"
#include "src/code-factory.h"
#include "src/factory.h"
@@ -498,34 +500,62 @@ IGNITION_HANDLER(LdaLookupGlobalSlotInsideTypeof,
LookupGlobalSlot(Runtime::kLoadLookupSlotInsideTypeof);
}
-// StaLookupSlotSloppy <name_index>
+// StaLookupSlotSloppy <name_index> <flags>
//
// Store the object in accumulator to the object with the name in constant
-// pool entry |name_index| in sloppy mode.
-IGNITION_HANDLER(StaLookupSlotSloppy, InterpreterAssembler) {
+// pool entry |name_index|.
+IGNITION_HANDLER(StaLookupSlot, InterpreterAssembler) {
Node* value = GetAccumulator();
Node* index = BytecodeOperandIdx(0);
+ Node* bytecode_flags = BytecodeOperandFlag(1);
Node* name = LoadConstantPoolEntry(index);
Node* context = GetContext();
- Node* result =
- CallRuntime(Runtime::kStoreLookupSlot_Sloppy, context, name, value);
- SetAccumulator(result);
- Dispatch();
-}
+ Variable var_result(this, MachineRepresentation::kTagged);
-// StaLookupSlotStrict <name_index>
-//
-// Store the object in accumulator to the object with the name in constant
-// pool entry |name_index| in strict mode.
-IGNITION_HANDLER(StaLookupSlotStrict, InterpreterAssembler) {
- Node* value = GetAccumulator();
- Node* index = BytecodeOperandIdx(0);
- Node* name = LoadConstantPoolEntry(index);
- Node* context = GetContext();
- Node* result =
- CallRuntime(Runtime::kStoreLookupSlot_Strict, context, name, value);
- SetAccumulator(result);
- Dispatch();
+ Label sloppy(this), strict(this), end(this);
+ DCHECK_EQ(0, SLOPPY);
+ DCHECK_EQ(1, STRICT);
+ DCHECK_EQ(0, static_cast<int>(LookupHoistingMode::kNormal));
+ DCHECK_EQ(1, static_cast<int>(LookupHoistingMode::kLegacySloppy));
+ Branch(IsSetWord32<StoreLookupSlotFlags::LanguageModeBit>(bytecode_flags),
+ &strict, &sloppy);
+
+ BIND(&strict);
+ {
+ CSA_ASSERT(this, IsClearWord32<StoreLookupSlotFlags::LookupHoistingModeBit>(
+ bytecode_flags));
+ var_result.Bind(
+ CallRuntime(Runtime::kStoreLookupSlot_Strict, context, name, value));
+ Goto(&end);
+ }
+
+ BIND(&sloppy);
+ {
+ Label hoisting(this), ordinary(this);
+ Branch(IsSetWord32<StoreLookupSlotFlags::LookupHoistingModeBit>(
+ bytecode_flags),
+ &hoisting, &ordinary);
+
+ BIND(&hoisting);
+ {
+ var_result.Bind(CallRuntime(Runtime::kStoreLookupSlot_SloppyHoisting,
+ context, name, value));
+ Goto(&end);
+ }
+
+ BIND(&ordinary);
+ {
+ var_result.Bind(
+ CallRuntime(Runtime::kStoreLookupSlot_Sloppy, context, name, value));
+ Goto(&end);
+ }
+ }
+
+ BIND(&end);
+ {
+ SetAccumulator(var_result.value());
+ Dispatch();
+ }
}
// LdaNamedProperty <object> <name_index> <slot>
@@ -569,7 +599,7 @@ IGNITION_HANDLER(LdaNamedProperty, InterpreterAssembler) {
// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
// in the accumulator.
IGNITION_HANDLER(LdaKeyedProperty, InterpreterAssembler) {
- Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate());
+ Callable ic = Builtins::CallableFor(isolate(), Builtins::kKeyedLoadIC);
Node* code_target = HeapConstant(ic.code());
Node* reg_index = BytecodeOperandReg(0);
Node* object = LoadRegister(reg_index);
@@ -828,10 +858,9 @@ class InterpreterBinaryOpAssembler : public InterpreterAssembler {
OperandScale operand_scale)
: InterpreterAssembler(state, bytecode, operand_scale) {}
- typedef Node* (BinaryOpAssembler::*BinaryOpGenerator)(Node* context,
- Node* left, Node* right,
- Node* slot,
- Node* vector);
+ typedef Node* (BinaryOpAssembler::*BinaryOpGenerator)(
+ Node* context, Node* left, Node* right, Node* slot, Node* vector,
+ Node* function, bool lhs_is_smi);
void BinaryOpWithFeedback(BinaryOpGenerator generator) {
Node* reg_index = BytecodeOperandReg(0);
@@ -840,10 +869,26 @@ class InterpreterBinaryOpAssembler : public InterpreterAssembler {
Node* context = GetContext();
Node* slot_index = BytecodeOperandIdx(1);
Node* feedback_vector = LoadFeedbackVector();
+ Node* function = LoadRegister(Register::function_closure());
BinaryOpAssembler binop_asm(state());
- Node* result =
- (binop_asm.*generator)(context, lhs, rhs, slot_index, feedback_vector);
+ Node* result = (binop_asm.*generator)(context, lhs, rhs, slot_index,
+ feedback_vector, function, false);
+ SetAccumulator(result);
+ Dispatch();
+ }
+
+ void BinaryOpSmiWithFeedback(BinaryOpGenerator generator) {
+ Node* lhs = GetAccumulator();
+ Node* rhs = BytecodeOperandImmSmi(0);
+ Node* context = GetContext();
+ Node* slot_index = BytecodeOperandIdx(1);
+ Node* feedback_vector = LoadFeedbackVector();
+ Node* function = LoadRegister(Register::function_closure());
+
+ BinaryOpAssembler binop_asm(state());
+ Node* result = (binop_asm.*generator)(context, lhs, rhs, slot_index,
+ feedback_vector, function, true);
SetAccumulator(result);
Dispatch();
}
@@ -887,223 +932,36 @@ IGNITION_HANDLER(Mod, InterpreterBinaryOpAssembler) {
// AddSmi <imm>
//
// Adds an immediate value <imm> to the value in the accumulator.
-IGNITION_HANDLER(AddSmi, InterpreterAssembler) {
- Variable var_result(this, MachineRepresentation::kTagged);
- Label fastpath(this), slowpath(this, Label::kDeferred), end(this);
-
- Node* left = GetAccumulator();
- Node* right = BytecodeOperandImmSmi(0);
- Node* slot_index = BytecodeOperandIdx(1);
- Node* feedback_vector = LoadFeedbackVector();
-
- // {right} is known to be a Smi.
- // Check if the {left} is a Smi take the fast path.
- Branch(TaggedIsSmi(left), &fastpath, &slowpath);
- BIND(&fastpath);
- {
- // Try fast Smi addition first.
- Node* pair = IntPtrAddWithOverflow(BitcastTaggedToWord(left),
- BitcastTaggedToWord(right));
- Node* overflow = Projection(1, pair);
-
- // Check if the Smi additon overflowed.
- Label if_notoverflow(this);
- Branch(overflow, &slowpath, &if_notoverflow);
- BIND(&if_notoverflow);
- {
- UpdateFeedback(SmiConstant(BinaryOperationFeedback::kSignedSmall),
- feedback_vector, slot_index);
- var_result.Bind(BitcastWordToTaggedSigned(Projection(0, pair)));
- Goto(&end);
- }
- }
- BIND(&slowpath);
- {
- Node* context = GetContext();
- // TODO(ishell): pass slot as word-size value.
- var_result.Bind(CallBuiltin(Builtins::kAddWithFeedback, context, left,
- right, TruncateWordToWord32(slot_index),
- feedback_vector));
- Goto(&end);
- }
- BIND(&end);
- {
- SetAccumulator(var_result.value());
- Dispatch();
- }
+IGNITION_HANDLER(AddSmi, InterpreterBinaryOpAssembler) {
+ BinaryOpSmiWithFeedback(&BinaryOpAssembler::Generate_AddWithFeedback);
}
// SubSmi <imm>
//
// Subtracts an immediate value <imm> from the value in the accumulator.
-IGNITION_HANDLER(SubSmi, InterpreterAssembler) {
- Variable var_result(this, MachineRepresentation::kTagged);
- Label fastpath(this), slowpath(this, Label::kDeferred), end(this);
-
- Node* left = GetAccumulator();
- Node* right = BytecodeOperandImmSmi(0);
- Node* slot_index = BytecodeOperandIdx(1);
- Node* feedback_vector = LoadFeedbackVector();
-
- // {right} is known to be a Smi.
- // Check if the {left} is a Smi take the fast path.
- Branch(TaggedIsSmi(left), &fastpath, &slowpath);
- BIND(&fastpath);
- {
- // Try fast Smi subtraction first.
- Node* pair = IntPtrSubWithOverflow(BitcastTaggedToWord(left),
- BitcastTaggedToWord(right));
- Node* overflow = Projection(1, pair);
-
- // Check if the Smi subtraction overflowed.
- Label if_notoverflow(this);
- Branch(overflow, &slowpath, &if_notoverflow);
- BIND(&if_notoverflow);
- {
- UpdateFeedback(SmiConstant(BinaryOperationFeedback::kSignedSmall),
- feedback_vector, slot_index);
- var_result.Bind(BitcastWordToTaggedSigned(Projection(0, pair)));
- Goto(&end);
- }
- }
- BIND(&slowpath);
- {
- Node* context = GetContext();
- // TODO(ishell): pass slot as word-size value.
- var_result.Bind(CallBuiltin(Builtins::kSubtractWithFeedback, context, left,
- right, TruncateWordToWord32(slot_index),
- feedback_vector));
- Goto(&end);
- }
- BIND(&end);
- {
- SetAccumulator(var_result.value());
- Dispatch();
- }
+IGNITION_HANDLER(SubSmi, InterpreterBinaryOpAssembler) {
+ BinaryOpSmiWithFeedback(&BinaryOpAssembler::Generate_SubtractWithFeedback);
}
// MulSmi <imm>
//
// Multiplies an immediate value <imm> to the value in the accumulator.
-IGNITION_HANDLER(MulSmi, InterpreterAssembler) {
- Variable var_result(this, MachineRepresentation::kTagged);
- Label fastpath(this), slowpath(this, Label::kDeferred), end(this);
-
- Node* left = GetAccumulator();
- Node* right = BytecodeOperandImmSmi(0);
- Node* slot_index = BytecodeOperandIdx(1);
- Node* feedback_vector = LoadFeedbackVector();
-
- // {right} is known to be a Smi.
- // Check if the {left} is a Smi take the fast path.
- Branch(TaggedIsSmi(left), &fastpath, &slowpath);
- BIND(&fastpath);
- {
- // Both {lhs} and {rhs} are Smis. The result is not necessarily a smi,
- // in case of overflow.
- var_result.Bind(SmiMul(left, right));
- Node* feedback = SelectSmiConstant(TaggedIsSmi(var_result.value()),
- BinaryOperationFeedback::kSignedSmall,
- BinaryOperationFeedback::kNumber);
- UpdateFeedback(feedback, feedback_vector, slot_index);
- Goto(&end);
- }
- BIND(&slowpath);
- {
- Node* context = GetContext();
- // TODO(ishell): pass slot as word-size value.
- var_result.Bind(CallBuiltin(Builtins::kMultiplyWithFeedback, context, left,
- right, TruncateWordToWord32(slot_index),
- feedback_vector));
- Goto(&end);
- }
-
- BIND(&end);
- {
- SetAccumulator(var_result.value());
- Dispatch();
- }
+IGNITION_HANDLER(MulSmi, InterpreterBinaryOpAssembler) {
+ BinaryOpSmiWithFeedback(&BinaryOpAssembler::Generate_MultiplyWithFeedback);
}
// DivSmi <imm>
//
// Divides the value in the accumulator by immediate value <imm>.
-IGNITION_HANDLER(DivSmi, InterpreterAssembler) {
- Variable var_result(this, MachineRepresentation::kTagged);
- Label fastpath(this), slowpath(this, Label::kDeferred), end(this);
-
- Node* left = GetAccumulator();
- Node* right = BytecodeOperandImmSmi(0);
- Node* slot_index = BytecodeOperandIdx(1);
- Node* feedback_vector = LoadFeedbackVector();
-
- // {right} is known to be a Smi.
- // Check if the {left} is a Smi take the fast path.
- Branch(TaggedIsSmi(left), &fastpath, &slowpath);
- BIND(&fastpath);
- {
- var_result.Bind(TrySmiDiv(left, right, &slowpath));
- UpdateFeedback(SmiConstant(BinaryOperationFeedback::kSignedSmall),
- feedback_vector, slot_index);
- Goto(&end);
- }
- BIND(&slowpath);
- {
- Node* context = GetContext();
- // TODO(ishell): pass slot as word-size value.
- var_result.Bind(CallBuiltin(Builtins::kDivideWithFeedback, context, left,
- right, TruncateWordToWord32(slot_index),
- feedback_vector));
- Goto(&end);
- }
-
- BIND(&end);
- {
- SetAccumulator(var_result.value());
- Dispatch();
- }
+IGNITION_HANDLER(DivSmi, InterpreterBinaryOpAssembler) {
+ BinaryOpSmiWithFeedback(&BinaryOpAssembler::Generate_DivideWithFeedback);
}
// ModSmi <imm>
//
// Modulo accumulator by immediate value <imm>.
-IGNITION_HANDLER(ModSmi, InterpreterAssembler) {
- Variable var_result(this, MachineRepresentation::kTagged);
- Label fastpath(this), slowpath(this, Label::kDeferred), end(this);
-
- Node* left = GetAccumulator();
- Node* right = BytecodeOperandImmSmi(0);
- Node* slot_index = BytecodeOperandIdx(1);
- Node* feedback_vector = LoadFeedbackVector();
-
- // {right} is known to be a Smi.
- // Check if the {left} is a Smi take the fast path.
- Branch(TaggedIsSmi(left), &fastpath, &slowpath);
- BIND(&fastpath);
- {
- // Both {lhs} and {rhs} are Smis. The result is not necessarily a smi.
- var_result.Bind(SmiMod(left, right));
- Node* feedback = SelectSmiConstant(TaggedIsSmi(var_result.value()),
- BinaryOperationFeedback::kSignedSmall,
- BinaryOperationFeedback::kNumber);
- UpdateFeedback(feedback, feedback_vector, slot_index);
- Goto(&end);
- }
- BIND(&slowpath);
- {
- Node* context = GetContext();
- // TODO(ishell): pass slot as word-size value.
- var_result.Bind(CallBuiltin(Builtins::kModulusWithFeedback, context, left,
- right, TruncateWordToWord32(slot_index),
- feedback_vector));
- Goto(&end);
- }
-
- BIND(&end);
- {
- SetAccumulator(var_result.value());
- Dispatch();
- }
+IGNITION_HANDLER(ModSmi, InterpreterBinaryOpAssembler) {
+ BinaryOpSmiWithFeedback(&BinaryOpAssembler::Generate_ModulusWithFeedback);
}
class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
@@ -1177,8 +1035,9 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
Node* input_feedback =
SmiOr(var_lhs_type_feedback.value(), var_rhs_type_feedback.value());
+ Node* function = LoadRegister(Register::function_closure());
UpdateFeedback(SmiOr(result_type, input_feedback), feedback_vector,
- slot_index);
+ slot_index, function);
SetAccumulator(result);
Dispatch();
}
@@ -1254,8 +1113,9 @@ IGNITION_HANDLER(BitwiseOrSmi, InterpreterAssembler) {
Node* result_type = SelectSmiConstant(TaggedIsSmi(result),
BinaryOperationFeedback::kSignedSmall,
BinaryOperationFeedback::kNumber);
+ Node* function = LoadRegister(Register::function_closure());
UpdateFeedback(SmiOr(result_type, var_lhs_type_feedback.value()),
- feedback_vector, slot_index);
+ feedback_vector, slot_index, function);
SetAccumulator(result);
Dispatch();
}
@@ -1279,8 +1139,9 @@ IGNITION_HANDLER(BitwiseXorSmi, InterpreterAssembler) {
Node* result_type = SelectSmiConstant(TaggedIsSmi(result),
BinaryOperationFeedback::kSignedSmall,
BinaryOperationFeedback::kNumber);
+ Node* function = LoadRegister(Register::function_closure());
UpdateFeedback(SmiOr(result_type, var_lhs_type_feedback.value()),
- feedback_vector, slot_index);
+ feedback_vector, slot_index, function);
SetAccumulator(result);
Dispatch();
}
@@ -1304,8 +1165,9 @@ IGNITION_HANDLER(BitwiseAndSmi, InterpreterAssembler) {
Node* result_type = SelectSmiConstant(TaggedIsSmi(result),
BinaryOperationFeedback::kSignedSmall,
BinaryOperationFeedback::kNumber);
+ Node* function = LoadRegister(Register::function_closure());
UpdateFeedback(SmiOr(result_type, var_lhs_type_feedback.value()),
- feedback_vector, slot_index);
+ feedback_vector, slot_index, function);
SetAccumulator(result);
Dispatch();
}
@@ -1332,8 +1194,9 @@ IGNITION_HANDLER(ShiftLeftSmi, InterpreterAssembler) {
Node* result_type = SelectSmiConstant(TaggedIsSmi(result),
BinaryOperationFeedback::kSignedSmall,
BinaryOperationFeedback::kNumber);
+ Node* function = LoadRegister(Register::function_closure());
UpdateFeedback(SmiOr(result_type, var_lhs_type_feedback.value()),
- feedback_vector, slot_index);
+ feedback_vector, slot_index, function);
SetAccumulator(result);
Dispatch();
}
@@ -1360,8 +1223,9 @@ IGNITION_HANDLER(ShiftRightSmi, InterpreterAssembler) {
Node* result_type = SelectSmiConstant(TaggedIsSmi(result),
BinaryOperationFeedback::kSignedSmall,
BinaryOperationFeedback::kNumber);
+ Node* function = LoadRegister(Register::function_closure());
UpdateFeedback(SmiOr(result_type, var_lhs_type_feedback.value()),
- feedback_vector, slot_index);
+ feedback_vector, slot_index, function);
SetAccumulator(result);
Dispatch();
}
@@ -1388,13 +1252,14 @@ IGNITION_HANDLER(ShiftRightLogicalSmi, InterpreterAssembler) {
Node* result_type = SelectSmiConstant(TaggedIsSmi(result),
BinaryOperationFeedback::kSignedSmall,
BinaryOperationFeedback::kNumber);
+ Node* function = LoadRegister(Register::function_closure());
UpdateFeedback(SmiOr(result_type, var_lhs_type_feedback.value()),
- feedback_vector, slot_index);
+ feedback_vector, slot_index, function);
SetAccumulator(result);
Dispatch();
}
-// ToName
+// ToName <dst>
//
// Convert the object referenced by the accumulator to a name.
IGNITION_HANDLER(ToName, InterpreterAssembler) {
@@ -1419,8 +1284,7 @@ IGNITION_HANDLER(ToNumber, InterpreterAssembler) {
if_objectisother(this, Label::kDeferred);
GotoIf(TaggedIsSmi(object), &if_objectissmi);
- Node* object_map = LoadMap(object);
- Branch(IsHeapNumberMap(object_map), &if_objectisnumber, &if_objectisother);
+ Branch(IsHeapNumber(object), &if_objectisnumber, &if_objectisother);
BIND(&if_objectissmi);
{
@@ -1439,8 +1303,7 @@ IGNITION_HANDLER(ToNumber, InterpreterAssembler) {
BIND(&if_objectisother);
{
// Convert the {object} to a Number.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_result.Bind(CallStub(callable, context, object));
+ var_result.Bind(CallBuiltin(Builtins::kNonNumberToNumber, context, object));
var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny));
Goto(&if_done);
}
@@ -1451,16 +1314,18 @@ IGNITION_HANDLER(ToNumber, InterpreterAssembler) {
// Record the type feedback collected for {object}.
Node* slot_index = BytecodeOperandIdx(1);
Node* feedback_vector = LoadFeedbackVector();
- UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index);
+ Node* function = LoadRegister(Register::function_closure());
+ UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index,
+ function);
Dispatch();
}
-// ToObject
+// ToObject <dst>
//
// Convert the object referenced by the accumulator to a JSReceiver.
IGNITION_HANDLER(ToObject, InterpreterAssembler) {
- Callable callable(CodeFactory::ToObject(isolate()));
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject);
Node* target = HeapConstant(callable.code());
Node* accumulator = GetAccumulator();
Node* context = GetContext();
@@ -1469,14 +1334,57 @@ IGNITION_HANDLER(ToObject, InterpreterAssembler) {
Dispatch();
}
+// ToPrimitiveToString <dst>
+//
+// Convert the object referenced by the accumulator to a primitive, and then
+// convert the operand to a string, in preparation to be used by StringConcat.
+IGNITION_HANDLER(ToPrimitiveToString, InterpreterAssembler) {
+ VARIABLE(feedback, MachineRepresentation::kTagged);
+ ConversionBuiltinsAssembler conversions_assembler(state());
+ Node* result = conversions_assembler.ToPrimitiveToString(
+ GetContext(), GetAccumulator(), &feedback);
+
+ Node* function = LoadRegister(Register::function_closure());
+ UpdateFeedback(feedback.value(), LoadFeedbackVector(), BytecodeOperandIdx(1),
+ function);
+ StoreRegister(result, BytecodeOperandReg(0));
+ Dispatch();
+}
+
+// StringConcat <first_reg> <reg_count>
+//
+// Concatenates the string values in registers <first_reg> to
+// <first_reg> + <reg_count - 1> and saves the result in the accumulator.
+IGNITION_HANDLER(StringConcat, InterpreterAssembler) {
+ Label call_runtime(this, Label::kDeferred), done(this);
+
+ Node* first_reg_ptr = RegisterLocation(BytecodeOperandReg(0));
+ Node* reg_count = BytecodeOperandCount(1);
+ Node* context = GetContext();
+
+ VARIABLE(result, MachineRepresentation::kTagged);
+ StringBuiltinsAssembler string_assembler(state());
+ result.Bind(string_assembler.ConcatenateStrings(context, first_reg_ptr,
+ reg_count, &call_runtime));
+ Goto(&done);
+
+ BIND(&call_runtime);
+ {
+ Comment("Call runtime.");
+ Node* runtime_id = Int32Constant(Runtime::kStringConcat);
+ result.Bind(CallRuntimeN(runtime_id, context, first_reg_ptr, reg_count));
+ Goto(&done);
+ }
+
+ BIND(&done);
+ SetAccumulator(result.value());
+ Dispatch();
+}
+
// Inc
//
// Increments value in the accumulator by one.
IGNITION_HANDLER(Inc, InterpreterAssembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
-
Node* value = GetAccumulator();
Node* context = GetContext();
Node* slot_index = BytecodeOperandIdx(0);
@@ -1505,7 +1413,7 @@ IGNITION_HANDLER(Inc, InterpreterAssembler) {
BIND(&if_issmi);
{
// Try fast Smi addition first.
- Node* one = SmiConstant(Smi::FromInt(1));
+ Node* one = SmiConstant(1);
Node* pair = IntPtrAddWithOverflow(BitcastTaggedToWord(value),
BitcastTaggedToWord(one));
Node* overflow = Projection(1, pair);
@@ -1568,9 +1476,9 @@ IGNITION_HANDLER(Inc, InterpreterAssembler) {
BIND(&if_valuenotoddball);
{
// Convert to a Number first and try again.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny));
- value_var.Bind(CallStub(callable, context, value));
+ value_var.Bind(
+ CallBuiltin(Builtins::kNonNumberToNumber, context, value));
Goto(&start);
}
}
@@ -1590,7 +1498,9 @@ IGNITION_HANDLER(Inc, InterpreterAssembler) {
}
BIND(&end);
- UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index);
+ Node* function = LoadRegister(Register::function_closure());
+ UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index,
+ function);
SetAccumulator(result_var.value());
Dispatch();
@@ -1600,10 +1510,6 @@ IGNITION_HANDLER(Inc, InterpreterAssembler) {
//
// Decrements value in the accumulator by one.
IGNITION_HANDLER(Dec, InterpreterAssembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
-
Node* value = GetAccumulator();
Node* context = GetContext();
Node* slot_index = BytecodeOperandIdx(0);
@@ -1632,7 +1538,7 @@ IGNITION_HANDLER(Dec, InterpreterAssembler) {
BIND(&if_issmi);
{
// Try fast Smi subtraction first.
- Node* one = SmiConstant(Smi::FromInt(1));
+ Node* one = SmiConstant(1);
Node* pair = IntPtrSubWithOverflow(BitcastTaggedToWord(value),
BitcastTaggedToWord(one));
Node* overflow = Projection(1, pair);
@@ -1695,9 +1601,9 @@ IGNITION_HANDLER(Dec, InterpreterAssembler) {
BIND(&if_valuenotoddball);
{
// Convert to a Number first and try again.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny));
- value_var.Bind(CallStub(callable, context, value));
+ value_var.Bind(
+ CallBuiltin(Builtins::kNonNumberToNumber, context, value));
Goto(&start);
}
}
@@ -1717,7 +1623,9 @@ IGNITION_HANDLER(Dec, InterpreterAssembler) {
}
BIND(&end);
- UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index);
+ Node* function = LoadRegister(Register::function_closure());
+ UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index,
+ function);
SetAccumulator(result_var.value());
Dispatch();
@@ -1841,7 +1749,7 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
: InterpreterAssembler(state, bytecode, operand_scale) {}
// Generates code to perform a JS call that collects type feedback.
- void JSCall(ConvertReceiverMode receiver_mode, TailCallMode tail_call_mode) {
+ void JSCall(ConvertReceiverMode receiver_mode) {
Node* function_reg = BytecodeOperandReg(0);
Node* function = LoadRegister(function_reg);
Node* first_arg_reg = BytecodeOperandReg(1);
@@ -1859,9 +1767,8 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
Node* slot_id = BytecodeOperandIdx(3);
Node* feedback_vector = LoadFeedbackVector();
Node* context = GetContext();
- Node* result =
- CallJSWithFeedback(function, context, first_arg, args_count, slot_id,
- feedback_vector, receiver_mode, tail_call_mode);
+ Node* result = CallJSWithFeedback(function, context, first_arg, args_count,
+ slot_id, feedback_vector, receiver_mode);
SetAccumulator(result);
Dispatch();
}
@@ -1924,11 +1831,11 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
// |arg_count| arguments in subsequent registers. Collect type feedback
// into |feedback_slot_id|
IGNITION_HANDLER(CallAnyReceiver, InterpreterJSCallAssembler) {
- JSCall(ConvertReceiverMode::kAny, TailCallMode::kDisallow);
+ JSCall(ConvertReceiverMode::kAny);
}
IGNITION_HANDLER(CallProperty, InterpreterJSCallAssembler) {
- JSCall(ConvertReceiverMode::kNotNullOrUndefined, TailCallMode::kDisallow);
+ JSCall(ConvertReceiverMode::kNotNullOrUndefined);
}
IGNITION_HANDLER(CallProperty0, InterpreterJSCallAssembler) {
@@ -1944,7 +1851,7 @@ IGNITION_HANDLER(CallProperty2, InterpreterJSCallAssembler) {
}
IGNITION_HANDLER(CallUndefinedReceiver, InterpreterJSCallAssembler) {
- JSCall(ConvertReceiverMode::kNullOrUndefined, TailCallMode::kDisallow);
+ JSCall(ConvertReceiverMode::kNullOrUndefined);
}
IGNITION_HANDLER(CallUndefinedReceiver0, InterpreterJSCallAssembler) {
@@ -1959,15 +1866,6 @@ IGNITION_HANDLER(CallUndefinedReceiver2, InterpreterJSCallAssembler) {
JSCallN(2, ConvertReceiverMode::kNullOrUndefined);
}
-// TailCall <callable> <receiver> <arg_count> <feedback_slot_id>
-//
-// Tail call a JSfunction or Callable in |callable| with the |receiver| and
-// |arg_count| arguments in subsequent registers. Collect type feedback
-// into |feedback_slot_id|
-IGNITION_HANDLER(TailCall, InterpreterJSCallAssembler) {
- JSCall(ConvertReceiverMode::kAny, TailCallMode::kAllow);
-}
-
// CallRuntime <function_id> <first_arg> <arg_count>
//
// Call the runtime function |function_id| with the first argument in
@@ -2044,7 +1942,7 @@ IGNITION_HANDLER(CallJSRuntime, InterpreterAssembler) {
// Call the function.
Node* result = CallJS(function, context, first_arg, args_count,
- ConvertReceiverMode::kAny, TailCallMode::kDisallow);
+ ConvertReceiverMode::kAny);
SetAccumulator(result);
Dispatch();
}
@@ -2156,7 +2054,9 @@ class InterpreterCompareOpAssembler : public InterpreterAssembler {
Node* slot_index = BytecodeOperandIdx(1);
Node* feedback_vector = LoadFeedbackVector();
- UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index);
+ Node* function = LoadRegister(Register::function_closure());
+ UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index,
+ function);
SetAccumulator(result);
Dispatch();
}
@@ -2262,12 +2162,7 @@ IGNITION_HANDLER(TestUndetectable, InterpreterAssembler) {
GotoIf(TaggedIsSmi(object), &end);
// If it is a HeapObject, load the map and check for undetectable bit.
- Node* map = LoadMap(object);
- Node* map_bitfield = LoadMapBitField(map);
- Node* map_undetectable =
- Word32And(map_bitfield, Int32Constant(1 << Map::kIsUndetectable));
- Node* result =
- SelectBooleanConstant(Word32NotEqual(map_undetectable, Int32Constant(0)));
+ Node* result = SelectBooleanConstant(IsUndetectableMap(LoadMap(object)));
SetAccumulator(result);
Goto(&end);
@@ -2357,12 +2252,8 @@ IGNITION_HANDLER(TestTypeOf, InterpreterAssembler) {
Comment("IfUndefined");
GotoIf(TaggedIsSmi(object), &if_false);
// Check it is not null and the map has the undetectable bit set.
- GotoIf(WordEqual(object, NullConstant()), &if_false);
- Node* map_bitfield = LoadMapBitField(LoadMap(object));
- Node* undetectable_bit =
- Word32And(map_bitfield, Int32Constant(1 << Map::kIsUndetectable));
- Branch(Word32Equal(undetectable_bit, Int32Constant(0)), &if_false,
- &if_true);
+ GotoIf(IsNull(object), &if_false);
+ Branch(IsUndetectableMap(LoadMap(object)), &if_true, &if_false);
}
BIND(&if_function);
{
@@ -2690,29 +2581,6 @@ IGNITION_HANDLER(JumpIfJSReceiverConstant, InterpreterAssembler) {
Dispatch();
}
-// JumpIfNotHole <imm>
-//
-// Jump by the number of bytes represented by an immediate operand if the object
-// referenced by the accumulator is the hole.
-IGNITION_HANDLER(JumpIfNotHole, InterpreterAssembler) {
- Node* accumulator = GetAccumulator();
- Node* the_hole_value = HeapConstant(isolate()->factory()->the_hole_value());
- Node* relative_jump = BytecodeOperandUImmWord(0);
- JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
-}
-
-// JumpIfNotHoleConstant <idx>
-//
-// Jump by the number of bytes in the Smi in the |idx| entry in the constant
-// pool if the object referenced by the accumulator is the hole constant.
-IGNITION_HANDLER(JumpIfNotHoleConstant, InterpreterAssembler) {
- Node* accumulator = GetAccumulator();
- Node* the_hole_value = HeapConstant(isolate()->factory()->the_hole_value());
- Node* index = BytecodeOperandIdx(0);
- Node* relative_jump = LoadAndUntagConstantPoolEntry(index);
- JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
-}
-
// JumpLoop <imm> <loop_depth>
//
// Jump by the number of bytes represented by the immediate operand |imm|. Also
@@ -2802,9 +2670,9 @@ IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) {
Node* bytecode_flags = BytecodeOperandFlag(2);
Label fast_shallow_clone(this), call_runtime(this, Label::kDeferred);
- Branch(
- IsSetWord32<CreateArrayLiteralFlags::FastShallowCloneBit>(bytecode_flags),
- &fast_shallow_clone, &call_runtime);
+ Branch(IsSetWord32<CreateArrayLiteralFlags::FastCloneSupportedBit>(
+ bytecode_flags),
+ &fast_shallow_clone, &call_runtime);
BIND(&fast_shallow_clone);
{
@@ -2858,7 +2726,7 @@ IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
{
// If we can't do a fast clone, call into the runtime.
Node* index = BytecodeOperandIdx(0);
- Node* constant_elements = LoadConstantPoolEntry(index);
+ Node* boilerplate_description = LoadConstantPoolEntry(index);
Node* context = GetContext();
Node* flags_raw = DecodeWordFromWord32<CreateObjectLiteralFlags::FlagsBits>(
@@ -2866,7 +2734,7 @@ IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
Node* flags = SmiTag(flags_raw);
Node* result = CallRuntime(Runtime::kCreateObjectLiteral, context, closure,
- literal_index, constant_elements, flags);
+ literal_index, boilerplate_description, flags);
StoreRegister(result, BytecodeOperandReg(3));
// TODO(klaasb) build a single dispatch once the call is inlined
Dispatch();
@@ -3000,13 +2868,14 @@ IGNITION_HANDLER(CreateMappedArguments, InterpreterAssembler) {
// duplicate parameters.
Node* shared_info =
LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset);
- Node* compiler_hints = LoadObjectField(
- shared_info, SharedFunctionInfo::kHasDuplicateParametersByteOffset,
- MachineType::Uint8());
- Node* duplicate_parameters_bit = Int32Constant(
- 1 << SharedFunctionInfo::kHasDuplicateParametersBitWithinByte);
- Node* compare = Word32And(compiler_hints, duplicate_parameters_bit);
- Branch(compare, &if_duplicate_parameters, &if_not_duplicate_parameters);
+ Node* compiler_hints =
+ LoadObjectField(shared_info, SharedFunctionInfo::kCompilerHintsOffset,
+ MachineType::Uint32());
+ Node* has_duplicate_parameters =
+ IsSetWord32<SharedFunctionInfo::HasDuplicateParametersBit>(
+ compiler_hints);
+ Branch(has_duplicate_parameters, &if_duplicate_parameters,
+ &if_not_duplicate_parameters);
BIND(&if_not_duplicate_parameters);
{
@@ -3117,6 +2986,65 @@ IGNITION_HANDLER(Return, InterpreterAssembler) {
Return(accumulator);
}
+// ThrowReferenceErrorIfHole <variable_name>
+//
+// Throws an exception if the value in the accumulator is TheHole.
+IGNITION_HANDLER(ThrowReferenceErrorIfHole, InterpreterAssembler) {
+ Node* value = GetAccumulator();
+ Node* the_hole_value = HeapConstant(isolate()->factory()->the_hole_value());
+
+ Label throw_error(this, Label::kDeferred);
+ GotoIf(WordEqual(value, the_hole_value), &throw_error);
+ Dispatch();
+
+ BIND(&throw_error);
+ {
+ Node* name = LoadConstantPoolEntry(BytecodeOperandIdx(0));
+ CallRuntime(Runtime::kThrowReferenceError, GetContext(), name);
+ // We shouldn't ever return from a throw.
+ Abort(kUnexpectedReturnFromThrow);
+ }
+}
+
+// ThrowSuperNotCalledIfHole
+//
+// Throws an exception if the value in the accumulator is TheHole.
+IGNITION_HANDLER(ThrowSuperNotCalledIfHole, InterpreterAssembler) {
+ Node* value = GetAccumulator();
+ Node* the_hole_value = HeapConstant(isolate()->factory()->the_hole_value());
+
+ Label throw_error(this, Label::kDeferred);
+ GotoIf(WordEqual(value, the_hole_value), &throw_error);
+ Dispatch();
+
+ BIND(&throw_error);
+ {
+ CallRuntime(Runtime::kThrowSuperNotCalled, GetContext());
+ // We shouldn't ever return from a throw.
+ Abort(kUnexpectedReturnFromThrow);
+ }
+}
+
+// ThrowSuperAlreadyCalledIfNotHole
+//
+// Throws SuperAleradyCalled exception if the value in the accumulator is not
+// TheHole.
+IGNITION_HANDLER(ThrowSuperAlreadyCalledIfNotHole, InterpreterAssembler) {
+ Node* value = GetAccumulator();
+ Node* the_hole_value = HeapConstant(isolate()->factory()->the_hole_value());
+
+ Label throw_error(this, Label::kDeferred);
+ GotoIf(WordNotEqual(value, the_hole_value), &throw_error);
+ Dispatch();
+
+ BIND(&throw_error);
+ {
+ CallRuntime(Runtime::kThrowSuperAlreadyCalledError, GetContext());
+ // We shouldn't ever return from a throw.
+ Abort(kUnexpectedReturnFromThrow);
+ }
+}
+
// Debugger
//
// Call runtime to handle debugger statement.
@@ -3141,6 +3069,20 @@ IGNITION_HANDLER(Debugger, InterpreterAssembler) {
DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK);
#undef DEBUG_BREAK
+// IncBlockCounter <slot>
+//
+// Increment the execution count for the given slot. Used for block code
+// coverage.
+IGNITION_HANDLER(IncBlockCounter, InterpreterAssembler) {
+ Node* closure = LoadRegister(Register::function_closure());
+ Node* coverage_array_slot = BytecodeOperandIdxSmi(0);
+ Node* context = GetContext();
+
+ CallRuntime(Runtime::kIncBlockCounter, context, closure, coverage_array_slot);
+
+ Dispatch();
+}
+
class InterpreterForInPrepareAssembler : public InterpreterAssembler {
public:
InterpreterForInPrepareAssembler(CodeAssemblerState* state, Bytecode bytecode,
@@ -3245,8 +3187,7 @@ IGNITION_HANDLER(ForInNext, InterpreterAssembler) {
// Need to filter the {key} for the {receiver}.
Node* context = GetContext();
- Callable callable = CodeFactory::ForInFilter(isolate());
- Node* result = CallStub(callable, context, key, receiver);
+ Node* result = CallBuiltin(Builtins::kForInFilter, context, key, receiver);
SetAccumulator(result);
Dispatch();
}
@@ -3285,7 +3226,7 @@ IGNITION_HANDLER(ForInContinue, InterpreterAssembler) {
IGNITION_HANDLER(ForInStep, InterpreterAssembler) {
Node* index_reg = BytecodeOperandReg(0);
Node* index = LoadRegister(index_reg);
- Node* one = SmiConstant(Smi::FromInt(1));
+ Node* one = SmiConstant(1);
Node* result = SmiAdd(index, one);
SetAccumulator(result);
Dispatch();
@@ -3310,19 +3251,14 @@ IGNITION_HANDLER(ExtraWide, InterpreterAssembler) {
// An invalid bytecode aborting execution if dispatched.
IGNITION_HANDLER(Illegal, InterpreterAssembler) { Abort(kInvalidBytecode); }
-// Nop
-//
-// No operation.
-IGNITION_HANDLER(Nop, InterpreterAssembler) { Dispatch(); }
-
-// SuspendGenerator <generator>
+// SuspendGenerator <generator> <first input register> <register count>
//
// Exports the register file and stores it into the generator. Also stores the
// current context, the state given in the accumulator, and the current bytecode
// offset (for debugging purposes) into the generator.
IGNITION_HANDLER(SuspendGenerator, InterpreterAssembler) {
Node* generator_reg = BytecodeOperandReg(0);
- Node* flags = BytecodeOperandFlag(1);
+
Node* generator = LoadRegister(generator_reg);
Label if_stepping(this, Label::kDeferred), ok(this);
@@ -3340,45 +3276,21 @@ IGNITION_HANDLER(SuspendGenerator, InterpreterAssembler) {
Node* context = GetContext();
Node* state = GetAccumulator();
- ExportRegisterFile(array);
+ // Bytecode operand 1 should be always 0 (we are always store registers
+ // from the beginning).
+ CSA_ASSERT(this, WordEqual(BytecodeOperandReg(1),
+ IntPtrConstant(Register(0).ToOperand())));
+ // Bytecode operand 2 is the number of registers to store to the generator.
+ Node* register_count = ChangeUint32ToWord(BytecodeOperandCount(2));
+ ExportRegisterFile(array, register_count);
StoreObjectField(generator, JSGeneratorObject::kContextOffset, context);
StoreObjectField(generator, JSGeneratorObject::kContinuationOffset, state);
- Label if_asyncgeneratorawait(this), if_notasyncgeneratorawait(this),
- merge(this);
-
- // Calculate bytecode offset to store in the [input_or_debug_pos] or
- // [await_input_or_debug_pos] fields, to be used by the inspector.
+ // Store the bytecode offset in the [input_or_debug_pos] field, to be used by
+ // the inspector.
Node* offset = SmiTag(BytecodeOffset());
-
- using AsyncGeneratorAwaitBits = SuspendGeneratorBytecodeFlags::FlagsBits;
- Branch(Word32Equal(DecodeWord32<AsyncGeneratorAwaitBits>(flags),
- Int32Constant(
- static_cast<int>(SuspendFlags::kAsyncGeneratorAwait))),
- &if_asyncgeneratorawait, &if_notasyncgeneratorawait);
-
- BIND(&if_notasyncgeneratorawait);
- {
- // For ordinary yields (and for AwaitExpressions in Async Functions, which
- // are implemented as ordinary yields), it is safe to write over the
- // [input_or_debug_pos] field.
- StoreObjectField(generator, JSGeneratorObject::kInputOrDebugPosOffset,
- offset);
- Goto(&merge);
- }
-
- BIND(&if_asyncgeneratorawait);
- {
- // An AwaitExpression in an Async Generator requires writing to the
- // [await_input_or_debug_pos] field.
- CSA_ASSERT(this,
- HasInstanceType(generator, JS_ASYNC_GENERATOR_OBJECT_TYPE));
- StoreObjectField(
- generator, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset, offset);
- Goto(&merge);
- }
-
- BIND(&merge);
+ StoreObjectField(generator, JSGeneratorObject::kInputOrDebugPosOffset,
+ offset);
Dispatch();
BIND(&if_stepping);
@@ -3389,18 +3301,14 @@ IGNITION_HANDLER(SuspendGenerator, InterpreterAssembler) {
}
}
-// ResumeGenerator <generator>
+// RestoreGeneratorState <generator>
//
-// Imports the register file stored in the generator. Also loads the
-// generator's state and stores it in the accumulator, before overwriting it
-// with kGeneratorExecuting.
-IGNITION_HANDLER(ResumeGenerator, InterpreterAssembler) {
+// Loads the generator's state and stores it in the accumulator,
+// before overwriting it with kGeneratorExecuting.
+IGNITION_HANDLER(RestoreGeneratorState, InterpreterAssembler) {
Node* generator_reg = BytecodeOperandReg(0);
Node* generator = LoadRegister(generator_reg);
- ImportRegisterFile(
- LoadObjectField(generator, JSGeneratorObject::kRegisterFileOffset));
-
Node* old_state =
LoadObjectField(generator, JSGeneratorObject::kContinuationOffset);
Node* new_state = Int32Constant(JSGeneratorObject::kGeneratorExecuting);
@@ -3411,6 +3319,28 @@ IGNITION_HANDLER(ResumeGenerator, InterpreterAssembler) {
Dispatch();
}
+// RestoreGeneratorRegisters <generator> <first output register> <register
+// count>
+//
+// Imports the register file stored in the generator.
+IGNITION_HANDLER(RestoreGeneratorRegisters, InterpreterAssembler) {
+ Node* generator_reg = BytecodeOperandReg(0);
+ // Bytecode operand 1 is the start register. It should always be 0, so let's
+ // ignore it.
+ CSA_ASSERT(this, WordEqual(BytecodeOperandReg(1),
+ IntPtrConstant(Register(0).ToOperand())));
+ // Bytecode operand 2 is the number of registers to store to the generator.
+ Node* register_count = ChangeUint32ToWord(BytecodeOperandCount(2));
+
+ Node* generator = LoadRegister(generator_reg);
+
+ ImportRegisterFile(
+ LoadObjectField(generator, JSGeneratorObject::kRegisterFileOffset),
+ register_count);
+
+ Dispatch();
+}
+
} // namespace
Handle<Code> GenerateBytecodeHandler(Isolate* isolate, Bytecode bytecode,
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
index e8572ba1d4..8c2c919a52 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
@@ -202,21 +202,11 @@ Node* IntrinsicsGenerator::IsJSMap(Node* input, Node* arg_count,
return IsInstanceType(input, JS_MAP_TYPE);
}
-Node* IntrinsicsGenerator::IsJSMapIterator(Node* input, Node* arg_count,
- Node* context) {
- return IsInstanceType(input, JS_MAP_ITERATOR_TYPE);
-}
-
Node* IntrinsicsGenerator::IsJSSet(Node* input, Node* arg_count,
Node* context) {
return IsInstanceType(input, JS_SET_TYPE);
}
-Node* IntrinsicsGenerator::IsJSSetIterator(Node* input, Node* arg_count,
- Node* context) {
- return IsInstanceType(input, JS_SET_ITERATOR_TYPE);
-}
-
Node* IntrinsicsGenerator::IsJSWeakMap(Node* input, Node* arg_count,
Node* context) {
return IsInstanceType(input, JS_WEAK_MAP_TYPE);
@@ -276,14 +266,15 @@ Node* IntrinsicsGenerator::IntrinsicAsBuiltinCall(Node* input, Node* context,
Node* IntrinsicsGenerator::CreateIterResultObject(Node* input, Node* arg_count,
Node* context) {
- return IntrinsicAsStubCall(input, context,
- CodeFactory::CreateIterResultObject(isolate()));
+ return IntrinsicAsStubCall(
+ input, context,
+ Builtins::CallableFor(isolate(), Builtins::kCreateIterResultObject));
}
Node* IntrinsicsGenerator::HasProperty(Node* input, Node* arg_count,
Node* context) {
- return IntrinsicAsStubCall(input, context,
- CodeFactory::HasProperty(isolate()));
+ return IntrinsicAsStubCall(
+ input, context, Builtins::CallableFor(isolate(), Builtins::kHasProperty));
}
Node* IntrinsicsGenerator::SubString(Node* input, Node* arg_count,
@@ -293,27 +284,32 @@ Node* IntrinsicsGenerator::SubString(Node* input, Node* arg_count,
Node* IntrinsicsGenerator::ToString(Node* input, Node* arg_count,
Node* context) {
- return IntrinsicAsStubCall(input, context, CodeFactory::ToString(isolate()));
+ return IntrinsicAsStubCall(
+ input, context, Builtins::CallableFor(isolate(), Builtins::kToString));
}
Node* IntrinsicsGenerator::ToLength(Node* input, Node* arg_count,
Node* context) {
- return IntrinsicAsStubCall(input, context, CodeFactory::ToLength(isolate()));
+ return IntrinsicAsStubCall(
+ input, context, Builtins::CallableFor(isolate(), Builtins::kToLength));
}
Node* IntrinsicsGenerator::ToInteger(Node* input, Node* arg_count,
Node* context) {
- return IntrinsicAsStubCall(input, context, CodeFactory::ToInteger(isolate()));
+ return IntrinsicAsStubCall(
+ input, context, Builtins::CallableFor(isolate(), Builtins::kToInteger));
}
Node* IntrinsicsGenerator::ToNumber(Node* input, Node* arg_count,
Node* context) {
- return IntrinsicAsStubCall(input, context, CodeFactory::ToNumber(isolate()));
+ return IntrinsicAsStubCall(
+ input, context, Builtins::CallableFor(isolate(), Builtins::kToNumber));
}
Node* IntrinsicsGenerator::ToObject(Node* input, Node* arg_count,
Node* context) {
- return IntrinsicAsStubCall(input, context, CodeFactory::ToObject(isolate()));
+ return IntrinsicAsStubCall(
+ input, context, Builtins::CallableFor(isolate(), Builtins::kToObject));
}
Node* IntrinsicsGenerator::Call(Node* args_reg, Node* arg_count,
@@ -339,7 +335,7 @@ Node* IntrinsicsGenerator::Call(Node* args_reg, Node* arg_count,
}
Node* result = __ CallJS(function, context, receiver_arg, target_args_count,
- ConvertReceiverMode::kAny, TailCallMode::kDisallow);
+ ConvertReceiverMode::kAny);
return result;
}
@@ -387,18 +383,6 @@ Node* IntrinsicsGenerator::CreateAsyncFromSyncIterator(Node* args_reg,
return return_value.value();
}
-Node* IntrinsicsGenerator::AsyncGeneratorGetAwaitInputOrDebugPos(
- Node* args_reg, Node* arg_count, Node* context) {
- Node* generator = __ LoadRegister(args_reg);
- CSA_SLOW_ASSERT(assembler_, __ HasInstanceType(
- generator, JS_ASYNC_GENERATOR_OBJECT_TYPE));
-
- Node* const value = __ LoadObjectField(
- generator, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset);
-
- return value;
-}
-
Node* IntrinsicsGenerator::CreateJSGeneratorObject(Node* input, Node* arg_count,
Node* context) {
return IntrinsicAsBuiltinCall(input, context,
@@ -437,13 +421,10 @@ Node* IntrinsicsGenerator::GeneratorGetResumeMode(Node* args_reg,
Node* IntrinsicsGenerator::GeneratorClose(Node* args_reg, Node* arg_count,
Node* context) {
Node* generator = __ LoadRegister(args_reg);
- Node* const value =
- __ LoadObjectField(generator, JSGeneratorObject::kResumeModeOffset);
__ StoreObjectFieldNoWriteBarrier(
generator, JSGeneratorObject::kContinuationOffset,
__ SmiConstant(JSGeneratorObject::kGeneratorClosed));
-
- return value;
+ return __ UndefinedConstant();
}
Node* IntrinsicsGenerator::AsyncGeneratorReject(Node* input, Node* arg_count,
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics.cc b/deps/v8/src/interpreter/interpreter-intrinsics.cc
index 1682d59c27..e5c98e85bf 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics.cc
+++ b/deps/v8/src/interpreter/interpreter-intrinsics.cc
@@ -33,7 +33,6 @@ IntrinsicsHelper::IntrinsicId IntrinsicsHelper::FromRuntimeId(
#undef TO_RUNTIME_ID
default:
UNREACHABLE();
- return static_cast<IntrinsicsHelper::IntrinsicId>(-1);
}
}
@@ -48,7 +47,6 @@ Runtime::FunctionId IntrinsicsHelper::ToRuntimeId(
#undef TO_INTRINSIC_ID
default:
UNREACHABLE();
- return static_cast<Runtime::FunctionId>(-1);
}
}
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics.h b/deps/v8/src/interpreter/interpreter-intrinsics.h
index 3a69069532..d7d868efb9 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics.h
+++ b/deps/v8/src/interpreter/interpreter-intrinsics.h
@@ -14,8 +14,6 @@ namespace interpreter {
// List of supported intrisics, with upper case name, lower case name and
// expected number of arguments (-1 denoting argument count is variable).
#define INTRINSICS_LIST(V) \
- V(AsyncGeneratorGetAwaitInputOrDebugPos, \
- async_generator_get_await_input_or_debug_pos, 1) \
V(AsyncGeneratorReject, async_generator_reject, 2) \
V(AsyncGeneratorResolve, async_generator_resolve, 3) \
V(CreateJSGeneratorObject, create_js_generator_object, 2) \
@@ -30,11 +28,9 @@ namespace interpreter {
V(HasProperty, has_property, 2) \
V(IsArray, is_array, 1) \
V(IsJSMap, is_js_map, 1) \
- V(IsJSMapIterator, is_js_map_iterator, 1) \
V(IsJSProxy, is_js_proxy, 1) \
V(IsJSReceiver, is_js_receiver, 1) \
V(IsJSSet, is_js_set, 1) \
- V(IsJSSetIterator, is_js_set_iterator, 1) \
V(IsJSWeakMap, is_js_weak_map, 1) \
V(IsJSWeakSet, is_js_weak_set, 1) \
V(IsSmi, is_smi, 1) \
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index b793ae5310..cd0dfd9854 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -67,7 +67,6 @@ class InterpreterCompilationJob final : public CompilationJob {
BytecodeGenerator generator_;
RuntimeCallStats* runtime_call_stats_;
RuntimeCallCounter background_execute_counter_;
- bool print_bytecode_;
DISALLOW_COPY_AND_ASSIGN(InterpreterCompilationJob);
};
@@ -107,7 +106,6 @@ size_t Interpreter::GetDispatchTableIndex(Bytecode bytecode,
return index + 2 * kEntriesPerOperandScale;
}
UNREACHABLE();
- return 0;
}
void Interpreter::IterateDispatchTable(RootVisitor* v) {
@@ -149,19 +147,10 @@ InterpreterCompilationJob::InterpreterCompilationJob(CompilationInfo* info)
: CompilationJob(info->isolate(), info, "Ignition"),
generator_(info),
runtime_call_stats_(info->isolate()->counters()->runtime_call_stats()),
- background_execute_counter_("CompileBackgroundIgnition"),
- print_bytecode_(ShouldPrintBytecode(info->shared_info())) {}
+ background_execute_counter_("CompileBackgroundIgnition") {}
InterpreterCompilationJob::Status InterpreterCompilationJob::PrepareJobImpl() {
CodeGenerator::MakeCodePrologue(info(), "interpreter");
-
- if (print_bytecode_) {
- OFStream os(stdout);
- std::unique_ptr<char[]> name = info()->GetDebugName();
- os << "[generating bytecode for function: " << info()->GetDebugName().get()
- << "]" << std::endl;
- }
-
return SUCCEEDED;
}
@@ -196,8 +185,11 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl() {
return FAILED;
}
- if (print_bytecode_) {
+ if (ShouldPrintBytecode(info()->shared_info())) {
OFStream os(stdout);
+ std::unique_ptr<char[]> name = info()->GetDebugName();
+ os << "[generating bytecode for function: " << info()->GetDebugName().get()
+ << "]" << std::endl;
bytecodes->Disassemble(os);
os << std::flush;
}
diff --git a/deps/v8/src/interpreter/setup-interpreter-internal.cc b/deps/v8/src/interpreter/setup-interpreter-internal.cc
index 9adf70dffa..773b4d0cab 100644
--- a/deps/v8/src/interpreter/setup-interpreter-internal.cc
+++ b/deps/v8/src/interpreter/setup-interpreter-internal.cc
@@ -18,6 +18,9 @@ namespace interpreter {
void SetupInterpreter::InstallBytecodeHandlers(Interpreter* interpreter) {
DCHECK(!interpreter->IsDispatchTableInitialized());
HandleScope scope(interpreter->isolate_);
+ // Canonicalize handles, so that we can share constant pool entries pointing
+ // to code targets without dereferencing their handles.
+ CanonicalHandleScope canonical(interpreter->isolate_);
Address* dispatch_table = interpreter->dispatch_table_;
// Generate bytecode handlers for all bytecodes and scales.
diff --git a/deps/v8/src/intl.h b/deps/v8/src/intl.h
index 90683fe7f6..365097106f 100644
--- a/deps/v8/src/intl.h
+++ b/deps/v8/src/intl.h
@@ -11,6 +11,7 @@
#include "src/base/timezone-cache.h"
#include "src/objects.h"
+#include "src/objects/string.h"
#include "unicode/uversion.h"
namespace U_ICU_NAMESPACE {
diff --git a/deps/v8/src/isolate-inl.h b/deps/v8/src/isolate-inl.h
index 02993cfa6b..cea33db360 100644
--- a/deps/v8/src/isolate-inl.h
+++ b/deps/v8/src/isolate-inl.h
@@ -128,9 +128,9 @@ bool Isolate::IsArraySpeciesLookupChainIntact() {
// done here. In place, there are mjsunit tests harmony/array-species* which
// ensure that behavior is correct in various invalid protector cases.
- Cell* species_cell = heap()->species_protector();
+ PropertyCell* species_cell = heap()->species_protector();
return species_cell->value()->IsSmi() &&
- Smi::cast(species_cell->value())->value() == kProtectorValid;
+ Smi::ToInt(species_cell->value()) == kProtectorValid;
}
bool Isolate::IsStringLengthOverflowIntact() {
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index 366c14fb11..5b1e26e1d0 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -25,7 +25,6 @@
#include "src/compilation-statistics.h"
#include "src/compiler-dispatcher/compiler-dispatcher.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
-#include "src/crankshaft/hydrogen.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/elements.h"
@@ -52,6 +51,7 @@
#include "src/version.h"
#include "src/visitors.h"
#include "src/vm-state-inl.h"
+#include "src/wasm/compilation-manager.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects.h"
#include "src/zone/accounting-allocator.h"
@@ -62,7 +62,7 @@ namespace internal {
base::Atomic32 ThreadId::highest_thread_id_ = 0;
int ThreadId::AllocateThreadId() {
- int new_id = base::NoBarrier_AtomicIncrement(&highest_thread_id_, 1);
+ int new_id = base::Relaxed_AtomicIncrement(&highest_thread_id_, 1);
return new_id;
}
@@ -189,15 +189,14 @@ void Isolate::InitializeOncePerProcess() {
CHECK(thread_data_table_ == NULL);
isolate_key_ = base::Thread::CreateThreadLocalKey();
#if DEBUG
- base::NoBarrier_Store(&isolate_key_created_, 1);
+ base::Relaxed_Store(&isolate_key_created_, 1);
#endif
thread_id_key_ = base::Thread::CreateThreadLocalKey();
per_isolate_thread_data_key_ = base::Thread::CreateThreadLocalKey();
thread_data_table_ = new Isolate::ThreadDataTable();
}
-
-Address Isolate::get_address_from_id(Isolate::AddressId id) {
+Address Isolate::get_address_from_id(IsolateAddressId id) {
return isolate_addresses_[id];
}
@@ -428,7 +427,6 @@ class StackTraceHelper {
return !skip_next_frame_;
}
UNREACHABLE();
- return false;
}
bool IsNotHidden(JSFunction* fun) {
@@ -497,6 +495,7 @@ Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
switch (frame->type()) {
case StackFrame::JAVA_SCRIPT:
+ case StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION:
case StackFrame::OPTIMIZED:
case StackFrame::INTERPRETED:
case StackFrame::BUILTIN: {
@@ -517,18 +516,18 @@ Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
Handle<AbstractCode> abstract_code = summ.abstract_code();
const int offset = frames[i].code_offset();
- bool force_constructor = false;
+ bool is_constructor = frames[i].is_constructor();
if (frame->type() == StackFrame::BUILTIN) {
// Help CallSite::IsConstructor correctly detect hand-written
// construct stubs.
if (Code::cast(*abstract_code)->is_construct_stub()) {
- force_constructor = true;
+ is_constructor = true;
}
}
int flags = 0;
if (helper.IsStrictFrame(*fun)) flags |= FrameArray::kIsStrict;
- if (force_constructor) flags |= FrameArray::kForceConstructor;
+ if (is_constructor) flags |= FrameArray::kIsConstructor;
elements = FrameArray::AppendJSFrame(
elements, TheHoleToUndefined(this, recv), fun, abstract_code,
@@ -550,7 +549,7 @@ Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
int flags = 0;
if (helper.IsStrictFrame(*fun)) flags |= FrameArray::kIsStrict;
- if (exit_frame->IsConstructor()) flags |= FrameArray::kForceConstructor;
+ if (exit_frame->IsConstructor()) flags |= FrameArray::kIsConstructor;
elements = FrameArray::AppendJSFrame(elements, recv, fun,
Handle<AbstractCode>::cast(code),
@@ -658,7 +657,6 @@ class CaptureStackTraceHelper {
if (summ.IsJavaScript()) return NewStackFrameObject(summ.AsJavaScript());
if (summ.IsWasm()) return NewStackFrameObject(summ.AsWasm());
UNREACHABLE();
- return factory()->NewStackFrameInfo();
}
Handle<StackFrameInfo> NewStackFrameObject(
@@ -708,8 +706,7 @@ class CaptureStackTraceHelper {
frame->set_is_constructor(summ.is_constructor());
frame->set_is_wasm(false);
if (!FLAG_optimize_for_size) {
- auto new_cache =
- UnseededNumberDictionary::AtNumberPut(cache, code_offset, frame);
+ auto new_cache = UnseededNumberDictionary::Set(cache, code_offset, frame);
if (*new_cache != *cache || !maybe_cache->IsUnseededNumberDictionary()) {
AbstractCode::SetStackFrameCache(summ.abstract_code(), new_cache);
}
@@ -770,12 +767,13 @@ Handle<FixedArray> Isolate::CaptureCurrentStackTrace(
List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
frame->Summarize(&frames);
for (int i = frames.length() - 1; i >= 0 && frames_seen < limit; i--) {
+ FrameSummary& frame = frames[i];
+ if (!frame.is_subject_to_debugging()) continue;
// Filter frames from other security contexts.
if (!(options & StackTrace::kExposeFramesAcrossSecurityOrigins) &&
- !this->context()->HasSameSecurityTokenAs(*frames[i].native_context()))
+ !this->context()->HasSameSecurityTokenAs(*frame.native_context()))
continue;
- Handle<StackFrameInfo> new_frame_obj =
- helper.NewStackFrameObject(frames[i]);
+ Handle<StackFrameInfo> new_frame_obj = helper.NewStackFrameObject(frame);
stack_trace_elems->set(frames_seen, *new_frame_obj);
frames_seen++;
}
@@ -1215,7 +1213,7 @@ Object* Isolate::UnwindAndFindHandler() {
// Gather information from the handler.
Code* code = frame->LookupCode();
return FoundHandler(
- nullptr, code, Smi::cast(code->handler_table()->get(0))->value(),
+ nullptr, code, Smi::ToInt(code->handler_table()->get(0)),
handler->address() + StackHandlerConstants::kSize, 0);
}
@@ -1224,7 +1222,8 @@ Object* Isolate::UnwindAndFindHandler() {
trap_handler::ClearThreadInWasm();
}
- if (!FLAG_wasm_eh_prototype || !is_catchable_by_wasm(exception)) break;
+ if (!FLAG_experimental_wasm_eh || !is_catchable_by_wasm(exception))
+ break;
int stack_slots = 0; // Will contain stack slot count of frame.
WasmCompiledFrame* wasm_frame = static_cast<WasmCompiledFrame*>(frame);
int offset = wasm_frame->LookupExceptionHandlerInTable(&stack_slots);
@@ -1375,21 +1374,13 @@ HandlerTable::CatchPrediction PredictException(JavaScriptFrame* frame) {
// tables on the unoptimized code objects.
List<FrameSummary> summaries;
frame->Summarize(&summaries);
- for (const FrameSummary& summary : summaries) {
+ for (int i = summaries.length() - 1; i >= 0; i--) {
+ const FrameSummary& summary = summaries[i];
Handle<AbstractCode> code = summary.AsJavaScript().abstract_code();
if (code->IsCode() && code->kind() == AbstractCode::BUILTIN) {
- if (code->GetCode()->is_promise_rejection()) {
- return HandlerTable::PROMISE;
- }
-
- // This the exception throw in PromiseHandle which doesn't
- // cause a promise rejection.
- if (code->GetCode()->is_exception_caught()) {
- return HandlerTable::CAUGHT;
- }
-
- // The built-in must be marked with an exception prediction.
- UNREACHABLE();
+ prediction = code->GetCode()->GetBuiltinCatchPrediction();
+ if (prediction == HandlerTable::UNCAUGHT) continue;
+ return prediction;
}
if (code->kind() == AbstractCode::OPTIMIZED_FUNCTION) {
@@ -1413,6 +1404,23 @@ HandlerTable::CatchPrediction PredictException(JavaScriptFrame* frame) {
}
return HandlerTable::UNCAUGHT;
}
+
+Isolate::CatchType ToCatchType(HandlerTable::CatchPrediction prediction) {
+ switch (prediction) {
+ case HandlerTable::UNCAUGHT:
+ return Isolate::NOT_CAUGHT;
+ case HandlerTable::CAUGHT:
+ return Isolate::CAUGHT_BY_JAVASCRIPT;
+ case HandlerTable::PROMISE:
+ return Isolate::CAUGHT_BY_PROMISE;
+ case HandlerTable::DESUGARING:
+ return Isolate::CAUGHT_BY_DESUGARING;
+ case HandlerTable::ASYNC_AWAIT:
+ return Isolate::CAUGHT_BY_ASYNC_AWAIT;
+ default:
+ UNREACHABLE();
+ }
+}
} // anonymous namespace
Isolate::CatchType Isolate::PredictExceptionCatcher() {
@@ -1442,38 +1450,20 @@ Isolate::CatchType Isolate::PredictExceptionCatcher() {
case StackFrame::INTERPRETED:
case StackFrame::BUILTIN: {
JavaScriptFrame* js_frame = JavaScriptFrame::cast(frame);
- HandlerTable::CatchPrediction prediction = PredictException(js_frame);
- switch (prediction) {
- case HandlerTable::UNCAUGHT:
- break;
- case HandlerTable::CAUGHT:
- return CAUGHT_BY_JAVASCRIPT;
- case HandlerTable::PROMISE:
- return CAUGHT_BY_PROMISE;
- case HandlerTable::DESUGARING:
- return CAUGHT_BY_DESUGARING;
- case HandlerTable::ASYNC_AWAIT:
- return CAUGHT_BY_ASYNC_AWAIT;
- }
+ Isolate::CatchType prediction = ToCatchType(PredictException(js_frame));
+ if (prediction == NOT_CAUGHT) break;
+ return prediction;
} break;
case StackFrame::STUB: {
Handle<Code> code(frame->LookupCode());
- if (code->kind() == Code::BUILTIN && code->is_turbofanned() &&
- code->handler_table()->length()) {
- if (code->is_promise_rejection()) {
- return CAUGHT_BY_PROMISE;
- }
-
- // This the exception throw in PromiseHandle which doesn't
- // cause a promise rejection.
- if (code->is_exception_caught()) {
- return CAUGHT_BY_JAVASCRIPT;
- }
-
- // The built-in must be marked with an exception prediction.
- UNREACHABLE();
+ if (!code->IsCode() || code->kind() != Code::BUILTIN ||
+ !code->handler_table()->length() || !code->is_turbofanned()) {
+ break;
}
+
+ CatchType prediction = ToCatchType(code->GetBuiltinCatchPrediction());
+ if (prediction != NOT_CAUGHT) return prediction;
} break;
default:
@@ -1756,6 +1746,9 @@ bool Isolate::IsExternalHandlerOnTop(Object* exception) {
void Isolate::ReportPendingMessages() {
DCHECK(AllowExceptions::IsAllowed(this));
+ // The embedder might run script in response to an exception.
+ AllowJavascriptExecutionDebugOnly allow_script(this);
+
Object* exception = pending_exception();
// Try to propagate the exception to an external v8::TryCatch handler. If
@@ -2039,23 +2032,40 @@ void Isolate::SetAbortOnUncaughtExceptionCallback(
Handle<Context> Isolate::GetCallingNativeContext() {
JavaScriptFrameIterator it(this);
- if (debug_->in_debug_scope()) {
- while (!it.done()) {
- JavaScriptFrame* frame = it.frame();
- Context* context = Context::cast(frame->context());
- if (context->native_context() == *debug_->debug_context()) {
- it.Advance();
- } else {
- break;
- }
- }
- }
+ it.AdvanceWhileDebugContext(debug_);
if (it.done()) return Handle<Context>::null();
JavaScriptFrame* frame = it.frame();
Context* context = Context::cast(frame->context());
return Handle<Context>(context->native_context(), this);
}
+Handle<Context> Isolate::GetIncumbentContext() {
+ JavaScriptFrameIterator it(this);
+ it.AdvanceWhileDebugContext(debug_);
+
+ // 1st candidate: most-recently-entered author function's context
+ // if it's newer than the last Context::BackupIncumbentScope entry.
+ if (!it.done() &&
+ static_cast<const void*>(it.frame()) >
+ static_cast<const void*>(top_backup_incumbent_scope())) {
+ Context* context = Context::cast(it.frame()->context());
+ return Handle<Context>(context->native_context(), this);
+ }
+
+ // 2nd candidate: the last Context::Scope's incumbent context if any.
+ if (top_backup_incumbent_scope()) {
+ return Utils::OpenHandle(
+ *top_backup_incumbent_scope()->backup_incumbent_context_);
+ }
+
+ // Last candidate: the entered context.
+ // Given that there is no other author function is running, there must be
+ // no cross-context function running, then the incumbent realm must match
+ // the entry realm.
+ v8::Local<v8::Context> entered_context =
+ reinterpret_cast<v8::Isolate*>(this)->GetEnteredContext();
+ return Utils::OpenHandle(*entered_context);
+}
char* Isolate::ArchiveThread(char* to) {
MemCopy(to, reinterpret_cast<char*>(thread_local_top()),
@@ -2100,42 +2110,36 @@ void Isolate::ReleaseManagedObjects() {
while (current != nullptr) {
Isolate::ManagedObjectFinalizer* next = current->next_;
current->Dispose();
- delete current;
current = next;
}
// No new managed objects should pop up during finalization.
DCHECK_NULL(managed_object_finalizers_list_.next_);
}
-Isolate::ManagedObjectFinalizer* Isolate::RegisterForReleaseAtTeardown(
- void* value, Isolate::ManagedObjectFinalizer::Deleter deleter) {
- DCHECK_NOT_NULL(value);
- DCHECK_NOT_NULL(deleter);
+void Isolate::RegisterForReleaseAtTeardown(
+ Isolate::ManagedObjectFinalizer* finalizer) {
+ DCHECK_NOT_NULL(finalizer->value_);
+ DCHECK_NOT_NULL(finalizer->deleter_);
+ DCHECK_NULL(finalizer->prev_);
+ DCHECK_NULL(finalizer->next_);
- Isolate::ManagedObjectFinalizer* ret = new Isolate::ManagedObjectFinalizer();
- ret->value_ = value;
- ret->deleter_ = deleter;
// Insert at head. We keep the head alive for the lifetime of the Isolate
// because otherwise we can't reset the head, should we delete it before
// the isolate expires
Isolate::ManagedObjectFinalizer* next = managed_object_finalizers_list_.next_;
- managed_object_finalizers_list_.next_ = ret;
- ret->prev_ = &managed_object_finalizers_list_;
- ret->next_ = next;
- if (next != nullptr) next->prev_ = ret;
- return ret;
+ managed_object_finalizers_list_.next_ = finalizer;
+ finalizer->prev_ = &managed_object_finalizers_list_;
+ finalizer->next_ = next;
+ if (next != nullptr) next->prev_ = finalizer;
}
void Isolate::UnregisterFromReleaseAtTeardown(
- Isolate::ManagedObjectFinalizer** finalizer_ptr) {
- DCHECK_NOT_NULL(finalizer_ptr);
- Isolate::ManagedObjectFinalizer* finalizer = *finalizer_ptr;
+ Isolate::ManagedObjectFinalizer* finalizer) {
+ DCHECK_NOT_NULL(finalizer);
DCHECK_NOT_NULL(finalizer->prev_);
finalizer->prev_->next_ = finalizer->next_;
if (finalizer->next_ != nullptr) finalizer->next_->prev_ = finalizer->prev_;
- delete finalizer;
- *finalizer_ptr = nullptr;
}
Isolate::PerIsolateThreadData::~PerIsolateThreadData() {
@@ -2299,9 +2303,7 @@ Isolate::Isolate(bool enable_serializer)
bootstrapper_(NULL),
runtime_profiler_(NULL),
compilation_cache_(NULL),
- counters_(NULL),
logger_(NULL),
- stats_table_(NULL),
load_stub_cache_(NULL),
store_stub_cache_(NULL),
code_aging_helper_(NULL),
@@ -2353,13 +2355,14 @@ Isolate::Isolate(bool enable_serializer)
use_counter_callback_(NULL),
basic_block_profiler_(NULL),
cancelable_task_manager_(new CancelableTaskManager()),
+ wasm_compilation_manager_(new wasm::CompilationManager()),
abort_on_uncaught_exception_callback_(NULL),
total_regexp_code_generated_(0) {
{
base::LockGuard<base::Mutex> lock_guard(thread_data_table_mutex_.Pointer());
CHECK(thread_data_table_);
}
- id_ = base::NoBarrier_AtomicIncrement(&isolate_counter_, 1);
+ id_ = base::Relaxed_AtomicIncrement(&isolate_counter_, 1);
TRACE_ISOLATE(constructor);
memset(isolate_addresses_, 0,
@@ -2405,7 +2408,7 @@ void Isolate::TearDown() {
// direct pointer. We don't use Enter/Exit here to avoid
// initializing the thread data.
PerIsolateThreadData* saved_data = CurrentPerIsolateThreadData();
- DCHECK(base::NoBarrier_Load(&isolate_key_created_) == 1);
+ DCHECK(base::Relaxed_Load(&isolate_key_created_) == 1);
Isolate* saved_isolate =
reinterpret_cast<Isolate*>(base::Thread::GetThreadLocal(isolate_key_));
SetIsolateThreadLocals(this, NULL);
@@ -2443,19 +2446,15 @@ void Isolate::Deinit() {
debug()->Unload();
- FreeThreadResources();
- // Release managed objects before shutting down the heap. The finalizer might
- // need to access heap objects.
- ReleaseManagedObjects();
-
if (concurrent_recompilation_enabled()) {
optimizing_compile_dispatcher_->Stop();
delete optimizing_compile_dispatcher_;
optimizing_compile_dispatcher_ = NULL;
}
+ wasm_compilation_manager_->TearDown();
+
heap_.mark_compact_collector()->EnsureSweepingCompleted();
- heap_.memory_allocator()->unmapper()->WaitUntilCompleted();
DumpAndResetStats();
@@ -2471,6 +2470,11 @@ void Isolate::Deinit() {
sampler::Sampler* sampler = logger_->sampler();
if (sampler && sampler->IsActive()) sampler->Stop();
+ FreeThreadResources();
+ // Release managed objects before shutting down the heap. The finalizer might
+ // need to access heap objects.
+ ReleaseManagedObjects();
+
delete deoptimizer_data_;
deoptimizer_data_ = NULL;
builtins_.TearDown();
@@ -2556,8 +2560,6 @@ Isolate::~Isolate() {
store_stub_cache_ = NULL;
delete code_aging_helper_;
code_aging_helper_ = NULL;
- delete stats_table_;
- stats_table_ = NULL;
delete materialized_object_store_;
materialized_object_store_ = NULL;
@@ -2565,9 +2567,6 @@ Isolate::~Isolate() {
delete logger_;
logger_ = NULL;
- delete counters_;
- counters_ = NULL;
-
delete handle_scope_implementer_;
handle_scope_implementer_ = NULL;
@@ -2651,16 +2650,29 @@ bool Isolate::PropagatePendingExceptionToExternalTryCatch() {
return true;
}
+bool Isolate::InitializeCounters() {
+ if (async_counters_) return false;
+ async_counters_ = std::make_shared<Counters>(this);
+ return true;
+}
void Isolate::InitializeLoggingAndCounters() {
if (logger_ == NULL) {
logger_ = new Logger(this);
}
- if (counters_ == NULL) {
- counters_ = new Counters(this);
- }
+ InitializeCounters();
}
+namespace {
+void PrintBuiltinSizes(Isolate* isolate) {
+ Builtins* builtins = isolate->builtins();
+ for (int i = 0; i < Builtins::builtin_count; i++) {
+ const char* name = builtins->name(i);
+ Code* code = builtins->builtin(static_cast<Builtins::Name>(i));
+ PrintF(stdout, "%s: %d\n", name, code->instruction_size());
+ }
+}
+} // namespace
bool Isolate::Init(Deserializer* des) {
TRACE_ISOLATE(init);
@@ -2684,7 +2696,7 @@ bool Isolate::Init(Deserializer* des) {
heap_.SetStackLimits();
#define ASSIGN_ELEMENT(CamelName, hacker_name) \
- isolate_addresses_[Isolate::k##CamelName##Address] = \
+ isolate_addresses_[IsolateAddressId::k##CamelName##Address] = \
reinterpret_cast<Address>(hacker_name##_address());
FOR_EACH_ISOLATE_ADDRESS_NAME(ASSIGN_ELEMENT)
#undef ASSIGN_ELEMENT
@@ -2800,6 +2812,8 @@ bool Isolate::Init(Deserializer* des) {
delete setup_delegate_;
setup_delegate_ = nullptr;
+ if (FLAG_print_builtin_size) PrintBuiltinSizes(this);
+
// Finish initialization of ThreadLocal after deserialization is done.
clear_pending_exception();
clear_pending_message();
@@ -2825,6 +2839,9 @@ bool Isolate::Init(Deserializer* des) {
Internals::kExternalMemoryOffset);
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, heap_.external_memory_limit_)),
Internals::kExternalMemoryLimitOffset);
+ CHECK_EQ(static_cast<int>(
+ OFFSET_OF(Isolate, heap_.external_memory_at_last_mark_compact_)),
+ Internals::kExternalMemoryAtLastMarkCompactOffset);
time_millis_at_init_ = heap_.MonotonicallyIncreasingTimeInMs();
@@ -2839,7 +2856,6 @@ bool Isolate::Init(Deserializer* des) {
HandleScope scope(this);
CodeStub::GenerateFPStubs(this);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(this);
- StubFailureTrampolineStub::GenerateAheadOfTime(this);
}
initialized_from_snapshot_ = (des != NULL);
@@ -2850,16 +2866,6 @@ bool Isolate::Init(Deserializer* des) {
}
-// Initialized lazily to allow early
-// v8::V8::SetAddHistogramSampleFunction calls.
-StatsTable* Isolate::stats_table() {
- if (stats_table_ == NULL) {
- stats_table_ = new StatsTable;
- }
- return stats_table_;
-}
-
-
void Isolate::Enter() {
Isolate* current_isolate = NULL;
PerIsolateThreadData* current_data = CurrentPerIsolateThreadData();
@@ -2962,11 +2968,8 @@ void Isolate::DumpAndResetStats() {
os << ps << std::endl;
}
}
- if (hstatistics() != nullptr) hstatistics()->Print();
delete turbo_statistics_;
turbo_statistics_ = nullptr;
- delete hstatistics_;
- hstatistics_ = nullptr;
if (V8_UNLIKELY(FLAG_runtime_stats ==
v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE)) {
OFStream os(stdout);
@@ -2976,12 +2979,6 @@ void Isolate::DumpAndResetStats() {
}
-HStatistics* Isolate::GetHStatistics() {
- if (hstatistics() == NULL) set_hstatistics(new HStatistics());
- return hstatistics();
-}
-
-
CompilationStatistics* Isolate::GetTurboStatistics() {
if (turbo_statistics() == NULL)
set_turbo_statistics(new CompilationStatistics());
@@ -2989,12 +2986,6 @@ CompilationStatistics* Isolate::GetTurboStatistics() {
}
-HTracer* Isolate::GetHTracer() {
- if (htracer() == NULL) set_htracer(new HTracer(id()));
- return htracer();
-}
-
-
CodeTracer* Isolate::GetCodeTracer() {
if (code_tracer() == NULL) set_code_tracer(new CodeTracer(id()));
return code_tracer();
@@ -3014,7 +3005,8 @@ Map* Isolate::get_initial_js_array_map(ElementsKind kind) {
bool Isolate::use_optimizer() {
return FLAG_opt && !serializer_enabled_ &&
- CpuFeatures::SupportsCrankshaft() && !is_precise_count_code_coverage();
+ CpuFeatures::SupportsCrankshaft() &&
+ !is_precise_count_code_coverage() && !is_block_count_code_coverage();
}
bool Isolate::NeedsSourcePositionsForProfiling() const {
@@ -3041,26 +3033,6 @@ bool Isolate::IsArrayOrObjectPrototype(Object* object) {
return false;
}
-void Isolate::ClearOSROptimizedCode() {
- DisallowHeapAllocation no_gc;
- Object* context = heap()->native_contexts_list();
- while (!context->IsUndefined(this)) {
- Context* current_context = Context::cast(context);
- current_context->ClearOSROptimizedCodeCache();
- context = current_context->next_context_link();
- }
-}
-
-void Isolate::EvictOSROptimizedCode(Code* code, const char* reason) {
- DisallowHeapAllocation no_gc;
- Object* context = heap()->native_contexts_list();
- while (!context->IsUndefined(this)) {
- Context* current_context = Context::cast(context);
- current_context->EvictFromOSROptimizedCodeCache(code, reason);
- context = current_context->next_context_link();
- }
-}
-
bool Isolate::IsInAnyContext(Object* object, uint32_t index) {
DisallowHeapAllocation no_gc;
Object* context = heap()->native_contexts_list();
@@ -3078,7 +3050,7 @@ bool Isolate::IsFastArrayConstructorPrototypeChainIntact() {
PropertyCell* no_elements_cell = heap()->array_protector();
bool cell_reports_intact =
no_elements_cell->value()->IsSmi() &&
- Smi::cast(no_elements_cell->value())->value() == kProtectorValid;
+ Smi::ToInt(no_elements_cell->value()) == kProtectorValid;
#ifdef DEBUG
Map* root_array_map =
@@ -3136,8 +3108,7 @@ bool Isolate::IsFastArrayConstructorPrototypeChainIntact() {
bool Isolate::IsIsConcatSpreadableLookupChainIntact() {
Cell* is_concat_spreadable_cell = heap()->is_concat_spreadable_protector();
bool is_is_concat_spreadable_set =
- Smi::cast(is_concat_spreadable_cell->value())->value() ==
- kProtectorInvalid;
+ Smi::ToInt(is_concat_spreadable_cell->value()) == kProtectorInvalid;
#ifdef DEBUG
Map* root_array_map = get_initial_js_array_map(GetInitialFastElementsKind());
if (root_array_map == NULL) {
@@ -3355,22 +3326,54 @@ void Isolate::DebugStateUpdated() {
promise_hook_or_debug_is_active_ = promise_hook_ || debug()->is_active();
}
-void Isolate::RunHostImportModuleDynamicallyCallback(
- Handle<String> source_url, Handle<String> specifier,
- Handle<JSPromise> promise) {
- auto result = v8::Utils::PromiseToDynamicImportResult(promise);
+namespace {
+
+MaybeHandle<JSPromise> NewRejectedPromise(Isolate* isolate,
+ v8::Local<v8::Context> api_context,
+ Handle<Object> exception) {
+ v8::Local<v8::Promise::Resolver> resolver;
+ ASSIGN_RETURN_ON_SCHEDULED_EXCEPTION_VALUE(
+ isolate, resolver, v8::Promise::Resolver::New(api_context),
+ MaybeHandle<JSPromise>());
+
+ RETURN_ON_SCHEDULED_EXCEPTION_VALUE(
+ isolate, resolver->Reject(api_context, v8::Utils::ToLocal(exception)),
+ MaybeHandle<JSPromise>());
+
+ v8::Local<v8::Promise> promise = resolver->GetPromise();
+ return v8::Utils::OpenHandle(*promise);
+}
+
+} // namespace
+
+MaybeHandle<JSPromise> Isolate::RunHostImportModuleDynamicallyCallback(
+ Handle<String> source_url, Handle<Object> specifier) {
+ v8::Local<v8::Context> api_context = v8::Utils::ToLocal(native_context());
+
if (host_import_module_dynamically_callback_ == nullptr) {
Handle<Object> exception =
factory()->NewError(error_function(), MessageTemplate::kUnsupported);
- CHECK(result->FinishDynamicImportFailure(
- v8::Utils::ToLocal(handle(context(), this)),
- v8::Utils::ToLocal(exception)));
- return;
+ return NewRejectedPromise(this, api_context, exception);
}
- host_import_module_dynamically_callback_(
- reinterpret_cast<v8::Isolate*>(this), v8::Utils::ToLocal(source_url),
- v8::Utils::ToLocal(specifier), result);
+ Handle<String> specifier_str;
+ MaybeHandle<String> maybe_specifier = Object::ToString(this, specifier);
+ if (!maybe_specifier.ToHandle(&specifier_str)) {
+ Handle<Object> exception(pending_exception(), this);
+ clear_pending_exception();
+
+ return NewRejectedPromise(this, api_context, exception);
+ }
+ DCHECK(!has_pending_exception());
+
+ v8::Local<v8::Promise> promise;
+ ASSIGN_RETURN_ON_SCHEDULED_EXCEPTION_VALUE(
+ this, promise,
+ host_import_module_dynamically_callback_(
+ api_context, v8::Utils::ToLocal(source_url),
+ v8::Utils::ToLocal(specifier_str)),
+ MaybeHandle<JSPromise>());
+ return v8::Utils::OpenHandle(*promise);
}
void Isolate::SetHostImportModuleDynamicallyCallback(
@@ -3640,15 +3643,6 @@ std::string Isolate::GetTurboCfgFileName() {
}
}
-void Isolate::SetTailCallEliminationEnabled(bool enabled) {
- if (is_tail_call_elimination_enabled_ == enabled) return;
- is_tail_call_elimination_enabled_ = enabled;
- // TODO(ishell): Introduce DependencyGroup::kTailCallChangedGroup to
- // deoptimize only those functions that are affected by the change of this
- // flag.
- internal::Deoptimizer::DeoptimizeAll(this);
-}
-
// Heap::detached_contexts tracks detached contexts as pairs
// (number of GC since the context was detached, the context).
void Isolate::AddDetachedContext(Handle<Context> context) {
@@ -3670,7 +3664,7 @@ void Isolate::CheckDetachedContextsAfterGC() {
if (length == 0) return;
int new_length = 0;
for (int i = 0; i < length; i += 2) {
- int mark_sweeps = Smi::cast(detached_contexts->get(i))->value();
+ int mark_sweeps = Smi::ToInt(detached_contexts->get(i));
DCHECK(detached_contexts->get(i + 1)->IsWeakCell());
WeakCell* cell = WeakCell::cast(detached_contexts->get(i + 1));
if (!cell->cleared()) {
@@ -3684,7 +3678,7 @@ void Isolate::CheckDetachedContextsAfterGC() {
PrintF("%d detached contexts are collected out of %d\n",
length - new_length, length);
for (int i = 0; i < new_length; i += 2) {
- int mark_sweeps = Smi::cast(detached_contexts->get(i))->value();
+ int mark_sweeps = Smi::ToInt(detached_contexts->get(i));
DCHECK(detached_contexts->get(i + 1)->IsWeakCell());
WeakCell* cell = WeakCell::cast(detached_contexts->get(i + 1));
if (mark_sweeps > 3) {
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index d65a1f373a..a22bddf6bd 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -5,6 +5,7 @@
#ifndef V8_ISOLATE_H_
#define V8_ISOLATE_H_
+#include <cstddef>
#include <memory>
#include <queue>
@@ -70,8 +71,6 @@ class Factory;
class HandleScopeImplementer;
class HeapObjectToIndexHashMap;
class HeapProfiler;
-class HStatistics;
-class HTracer;
class InlineRuntimeFunctionsTable;
class InnerPointerToCodeCache;
class Logger;
@@ -107,6 +106,10 @@ namespace interpreter {
class Interpreter;
}
+namespace wasm {
+class CompilationManager;
+}
+
#define RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate) \
do { \
Isolate* __isolate__ = (isolate); \
@@ -129,6 +132,26 @@ class Interpreter;
#define RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, T) \
RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, MaybeHandle<T>())
+#define ASSIGN_RETURN_ON_SCHEDULED_EXCEPTION_VALUE(isolate, dst, call, value) \
+ do { \
+ Isolate* __isolate__ = (isolate); \
+ if (!(call).ToLocal(&dst)) { \
+ DCHECK(__isolate__->has_scheduled_exception()); \
+ __isolate__->PromoteScheduledException(); \
+ return value; \
+ } \
+ } while (false)
+
+#define RETURN_ON_SCHEDULED_EXCEPTION_VALUE(isolate, call, value) \
+ do { \
+ Isolate* __isolate__ = (isolate); \
+ if ((call).IsNothing()) { \
+ DCHECK(__isolate__->has_scheduled_exception()); \
+ __isolate__->PromoteScheduledException(); \
+ return value; \
+ } \
+ } while (false)
+
#define RETURN_RESULT_OR_FAILURE(isolate, call) \
do { \
Handle<Object> __result__; \
@@ -189,20 +212,6 @@ class Interpreter;
RETURN_ON_EXCEPTION_VALUE(isolate, call, MaybeHandle<T>())
-#define FOR_EACH_ISOLATE_ADDRESS_NAME(C) \
- C(Handler, handler) \
- C(CEntryFP, c_entry_fp) \
- C(CFunction, c_function) \
- C(Context, context) \
- C(PendingException, pending_exception) \
- C(PendingHandlerContext, pending_handler_context) \
- C(PendingHandlerCode, pending_handler_code) \
- C(PendingHandlerOffset, pending_handler_offset) \
- C(PendingHandlerFP, pending_handler_fp) \
- C(PendingHandlerSP, pending_handler_sp) \
- C(ExternalCaughtException, external_caught_exception) \
- C(JSEntrySP, js_entry_sp)
-
#define FOR_WITH_HANDLE_SCOPE(isolate, loop_var_type, init, loop_var, \
limit_check, increment, body) \
do { \
@@ -222,10 +231,10 @@ class Interpreter;
class ThreadId {
public:
// Creates an invalid ThreadId.
- ThreadId() { base::NoBarrier_Store(&id_, kInvalidId); }
+ ThreadId() { base::Relaxed_Store(&id_, kInvalidId); }
ThreadId& operator=(const ThreadId& other) {
- base::NoBarrier_Store(&id_, base::NoBarrier_Load(&other.id_));
+ base::Relaxed_Store(&id_, base::Relaxed_Load(&other.id_));
return *this;
}
@@ -237,17 +246,17 @@ class ThreadId {
// Compares ThreadIds for equality.
INLINE(bool Equals(const ThreadId& other) const) {
- return base::NoBarrier_Load(&id_) == base::NoBarrier_Load(&other.id_);
+ return base::Relaxed_Load(&id_) == base::Relaxed_Load(&other.id_);
}
// Checks whether this ThreadId refers to any thread.
INLINE(bool IsValid() const) {
- return base::NoBarrier_Load(&id_) != kInvalidId;
+ return base::Relaxed_Load(&id_) != kInvalidId;
}
// Converts ThreadId to an integer representation
// (required for public API: V8::V8::GetCurrentThreadId).
- int ToInteger() const { return static_cast<int>(base::NoBarrier_Load(&id_)); }
+ int ToInteger() const { return static_cast<int>(base::Relaxed_Load(&id_)); }
// Converts ThreadId to an integer representation
// (required for public API: V8::V8::TerminateExecution).
@@ -256,7 +265,7 @@ class ThreadId {
private:
static const int kInvalidId = -1;
- explicit ThreadId(int id) { base::NoBarrier_Store(&id_, id); }
+ explicit ThreadId(int id) { base::Relaxed_Store(&id_, id); }
static int AllocateThreadId();
@@ -405,8 +414,7 @@ typedef std::vector<HeapObject*> DebugObjectCache;
V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, nullptr) \
V(ExtensionCallback, wasm_module_callback, &NoExtension) \
V(ExtensionCallback, wasm_instance_callback, &NoExtension) \
- V(ExtensionCallback, wasm_compile_callback, &NoExtension) \
- V(ExtensionCallback, wasm_instantiate_callback, &NoExtension) \
+ V(ApiImplementationCallback, wasm_compile_streaming_callback, nullptr) \
V(ExternalReferenceRedirectorPointer*, external_reference_redirector, \
nullptr) \
/* State for Relocatable. */ \
@@ -418,9 +426,7 @@ typedef std::vector<HeapObject*> DebugObjectCache;
V(AddressToIndexHashMap*, external_reference_map, nullptr) \
V(HeapObjectToIndexHashMap*, root_index_map, nullptr) \
V(int, pending_microtask_count, 0) \
- V(HStatistics*, hstatistics, nullptr) \
V(CompilationStatistics*, turbo_statistics, nullptr) \
- V(HTracer*, htracer, nullptr) \
V(CodeTracer*, code_tracer, nullptr) \
V(uint32_t, per_isolate_assert_data, 0xFFFFFFFFu) \
V(PromiseRejectCallback, promise_reject_callback, nullptr) \
@@ -436,6 +442,7 @@ typedef std::vector<HeapObject*> DebugObjectCache;
/* Current code coverage mode */ \
V(debug::Coverage::Mode, code_coverage_mode, debug::Coverage::kBestEffort) \
V(int, last_stack_frame_info_id, 0) \
+ V(int, last_console_context_id, 0) \
ISOLATE_INIT_SIMULATOR_LIST(V)
#define THREAD_LOCAL_TOP_ACCESSOR(type, name) \
@@ -504,14 +511,6 @@ class Isolate {
DISALLOW_COPY_AND_ASSIGN(PerIsolateThreadData);
};
-
- enum AddressId {
-#define DECLARE_ENUM(CamelName, hacker_name) k##CamelName##Address,
- FOR_EACH_ISOLATE_ADDRESS_NAME(DECLARE_ENUM)
-#undef DECLARE_ENUM
- kIsolateAddressCount
- };
-
static void InitializeOncePerProcess();
// Returns the PerIsolateThreadData for the current thread (or NULL if one is
@@ -523,10 +522,10 @@ class Isolate {
// Returns the isolate inside which the current thread is running.
INLINE(static Isolate* Current()) {
- DCHECK(base::NoBarrier_Load(&isolate_key_created_) == 1);
+ DCHECK(base::Relaxed_Load(&isolate_key_created_) == 1);
Isolate* isolate = reinterpret_cast<Isolate*>(
base::Thread::GetExistingThreadLocal(isolate_key_));
- DCHECK(isolate != NULL);
+ DCHECK_NOT_NULL(isolate);
return isolate;
}
@@ -536,6 +535,7 @@ class Isolate {
//
// Safe to call more than once.
void InitializeLoggingAndCounters();
+ bool InitializeCounters(); // Returns false if already initialized.
bool Init(Deserializer* des);
@@ -582,7 +582,7 @@ class Isolate {
// Mutex for serializing access to break control structures.
base::RecursiveMutex* break_access() { return &break_access_; }
- Address get_address_from_id(AddressId id);
+ Address get_address_from_id(IsolateAddressId id);
// Access to top context (where the current function object was created).
Context* context() { return thread_local_top_.context_; }
@@ -829,6 +829,8 @@ class Isolate {
// is, the native context of the top-most JavaScript frame.
Handle<Context> GetCallingNativeContext();
+ Handle<Context> GetIncumbentContext();
+
void RegisterTryCatchHandler(v8::TryCatch* that);
void UnregisterTryCatchHandler(v8::TryCatch* that);
@@ -866,23 +868,24 @@ class Isolate {
#undef NATIVE_CONTEXT_FIELD_ACCESSOR
Bootstrapper* bootstrapper() { return bootstrapper_; }
- Counters* counters() {
- // Call InitializeLoggingAndCounters() if logging is needed before
- // the isolate is fully initialized.
- DCHECK(counters_ != NULL);
- return counters_;
+ // Use for updating counters on a foreground thread.
+ Counters* counters() { return async_counters().get(); }
+ // Use for updating counters on a background thread.
+ const std::shared_ptr<Counters>& async_counters() {
+ // Make sure InitializeCounters() has been called.
+ DCHECK_NOT_NULL(async_counters_.get());
+ return async_counters_;
}
RuntimeProfiler* runtime_profiler() { return runtime_profiler_; }
CompilationCache* compilation_cache() { return compilation_cache_; }
Logger* logger() {
// Call InitializeLoggingAndCounters() if logging is needed before
// the isolate is fully initialized.
- DCHECK(logger_ != NULL);
+ DCHECK_NOT_NULL(logger_);
return logger_;
}
StackGuard* stack_guard() { return &stack_guard_; }
Heap* heap() { return &heap_; }
- StatsTable* stats_table();
StubCache* load_stub_cache() { return load_stub_cache_; }
StubCache* store_stub_cache() { return store_stub_cache_; }
CodeAgingHelper* code_aging_helper() { return code_aging_helper_; }
@@ -1015,6 +1018,18 @@ class Isolate {
return code_coverage_mode() == debug::Coverage::kPreciseBinary;
}
+ bool is_block_count_code_coverage() const {
+ return code_coverage_mode() == debug::Coverage::kBlockCount;
+ }
+
+ bool is_block_binary_code_coverage() const {
+ return code_coverage_mode() == debug::Coverage::kBlockBinary;
+ }
+
+ bool is_block_code_coverage() const {
+ return is_block_count_code_coverage() || is_block_binary_code_coverage();
+ }
+
void SetCodeCoverageList(Object* value);
double time_millis_since_init() {
@@ -1099,9 +1114,7 @@ class Isolate {
int id() const { return static_cast<int>(id_); }
- HStatistics* GetHStatistics();
CompilationStatistics* GetTurboStatistics();
- HTracer* GetHTracer();
CodeTracer* GetCodeTracer();
void DumpAndResetStats();
@@ -1181,15 +1194,6 @@ class Isolate {
void RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
Handle<Object> parent);
- // Support for dynamically disabling tail call elimination.
- Address is_tail_call_elimination_enabled_address() {
- return reinterpret_cast<Address>(&is_tail_call_elimination_enabled_);
- }
- bool is_tail_call_elimination_enabled() const {
- return is_tail_call_elimination_enabled_;
- }
- void SetTailCallEliminationEnabled(bool enabled);
-
void AddDetachedContext(Handle<Context> context);
void CheckDetachedContextsAfterGC();
@@ -1208,6 +1212,10 @@ class Isolate {
return cancelable_task_manager_;
}
+ wasm::CompilationManager* wasm_compilation_manager() {
+ return wasm_compilation_manager_.get();
+ }
+
const AstStringConstants* ast_string_constants() const {
return ast_string_constants_;
}
@@ -1220,19 +1228,12 @@ class Isolate {
return compiler_dispatcher_;
}
- // Clear all optimized code stored in native contexts.
- void ClearOSROptimizedCode();
-
- // Ensure that a particular optimized code is evicted.
- void EvictOSROptimizedCode(Code* code, const char* reason);
-
bool IsInAnyContext(Object* object, uint32_t index);
void SetHostImportModuleDynamicallyCallback(
HostImportModuleDynamicallyCallback callback);
- void RunHostImportModuleDynamicallyCallback(Handle<String> referrer,
- Handle<String> specifier,
- Handle<JSPromise> promise);
+ MaybeHandle<JSPromise> RunHostImportModuleDynamicallyCallback(
+ Handle<String> referrer, Handle<Object> specifier);
void SetRAILMode(RAILMode rail_mode);
@@ -1260,42 +1261,53 @@ class Isolate {
// List of native heap values allocated by the runtime as part of its
// implementation that must be freed at isolate deinit.
- class ManagedObjectFinalizer final {
+ class ManagedObjectFinalizer {
public:
- typedef void (*Deleter)(void*);
- void Dispose() { deleter_(value_); }
+ using Deleter = void (*)(ManagedObjectFinalizer*);
+
+ ManagedObjectFinalizer(void* value, Deleter deleter)
+ : value_(value), deleter_(deleter) {}
+
+ void Dispose() { deleter_(this); }
+
+ void* value() const { return value_; }
private:
friend class Isolate;
- ManagedObjectFinalizer() {
- DCHECK_EQ(reinterpret_cast<void*>(this),
- reinterpret_cast<void*>(&value_));
- }
+ ManagedObjectFinalizer() = default;
- // value_ must be the first member
void* value_ = nullptr;
Deleter deleter_ = nullptr;
ManagedObjectFinalizer* prev_ = nullptr;
ManagedObjectFinalizer* next_ = nullptr;
};
- // Register a native value for destruction at isolate teardown.
- ManagedObjectFinalizer* RegisterForReleaseAtTeardown(
- void* value, ManagedObjectFinalizer::Deleter deleter);
+ static_assert(offsetof(ManagedObjectFinalizer, value_) == 0,
+ "value_ must be the first member");
+
+ // Register a finalizer to be called at isolate teardown.
+ void RegisterForReleaseAtTeardown(ManagedObjectFinalizer*);
// Unregister a previously registered value from release at
- // isolate teardown, deleting the ManagedObjectFinalizer.
+ // isolate teardown.
// This transfers the responsibility of the previously managed value's
- // deletion to the caller. Pass by pointer, because *finalizer_ptr gets
- // reset to nullptr.
- void UnregisterFromReleaseAtTeardown(ManagedObjectFinalizer** finalizer_ptr);
+ // deletion to the caller.
+ void UnregisterFromReleaseAtTeardown(ManagedObjectFinalizer*);
size_t elements_deletion_counter() { return elements_deletion_counter_; }
void set_elements_deletion_counter(size_t value) {
elements_deletion_counter_ = value;
}
+ const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope() const {
+ return top_backup_incumbent_scope_;
+ }
+ void set_top_backup_incumbent_scope(
+ const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope) {
+ top_backup_incumbent_scope_ = top_backup_incumbent_scope;
+ }
+
protected:
explicit Isolate(bool enable_serializer);
bool IsArrayOrObjectPrototype(Object* object);
@@ -1427,11 +1439,10 @@ class Isolate {
Bootstrapper* bootstrapper_;
RuntimeProfiler* runtime_profiler_;
CompilationCache* compilation_cache_;
- Counters* counters_;
+ std::shared_ptr<Counters> async_counters_;
base::RecursiveMutex break_access_;
Logger* logger_;
StackGuard stack_guard_;
- StatsTable* stats_table_;
StubCache* load_stub_cache_;
StubCache* store_stub_cache_;
CodeAgingHelper* code_aging_helper_;
@@ -1566,6 +1577,8 @@ class Isolate {
CancelableTaskManager* cancelable_task_manager_;
+ std::unique_ptr<wasm::CompilationManager> wasm_compilation_manager_;
+
debug::ConsoleDelegate* console_delegate_ = nullptr;
v8::Isolate::AbortOnUncaughtExceptionCallback
@@ -1584,6 +1597,10 @@ class Isolate {
size_t elements_deletion_counter_ = 0;
+ // The top entry of the v8::Context::BackupIncumbentScope stack.
+ const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope_ =
+ nullptr;
+
friend class ExecutionAccess;
friend class HandleScopeImplementer;
friend class HeapTester;
diff --git a/deps/v8/src/js/OWNERS b/deps/v8/src/js/OWNERS
index f7002c723b..0108c712e3 100644
--- a/deps/v8/src/js/OWNERS
+++ b/deps/v8/src/js/OWNERS
@@ -4,8 +4,11 @@ adamk@chromium.org
bmeurer@chromium.org
cbruni@chromium.org
ishell@chromium.org
+jgruber@chromium.org
jkummerow@chromium.org
littledan@chromium.org
rossberg@chromium.org
verwaest@chromium.org
yangguo@chromium.org
+
+# COMPONENT: Blink>JavaScript>Language
diff --git a/deps/v8/src/js/array.js b/deps/v8/src/js/array.js
index 2e22a521dc..e1834f758a 100644
--- a/deps/v8/src/js/array.js
+++ b/deps/v8/src/js/array.js
@@ -327,37 +327,42 @@ function SimpleMove(array, start_i, del_count, len, num_additional_args) {
// -------------------------------------------------------------------
-
-function ArrayToString() {
- var array;
- var func;
- if (IS_ARRAY(this)) {
- func = this.join;
- if (func === ArrayJoin) {
- return Join(this, this.length, ',', false);
+var ArrayJoin;
+DEFINE_METHOD(
+ GlobalArray.prototype,
+ toString() {
+ var array;
+ var func;
+ if (IS_ARRAY(this)) {
+ func = this.join;
+ if (func === ArrayJoin) {
+ return Join(this, this.length, ',', false);
+ }
+ array = this;
+ } else {
+ array = TO_OBJECT(this);
+ func = array.join;
}
- array = this;
- } else {
- array = TO_OBJECT(this);
- func = array.join;
- }
- if (!IS_CALLABLE(func)) {
- return %_Call(ObjectToString, array);
+ if (!IS_CALLABLE(func)) {
+ return %_Call(ObjectToString, array);
+ }
+ return %_Call(func, array);
}
- return %_Call(func, array);
-}
-
+);
function InnerArrayToLocaleString(array, length) {
return Join(array, TO_LENGTH(length), ',', true);
}
-function ArrayToLocaleString() {
- var array = TO_OBJECT(this);
- var arrayLen = array.length;
- return InnerArrayToLocaleString(array, arrayLen);
-}
+DEFINE_METHOD(
+ GlobalArray.prototype,
+ toLocaleString() {
+ var array = TO_OBJECT(this);
+ var arrayLen = array.length;
+ return InnerArrayToLocaleString(array, arrayLen);
+ }
+);
function InnerArrayJoin(separator, array, length) {
@@ -378,19 +383,22 @@ function InnerArrayJoin(separator, array, length) {
}
-function ArrayJoin(separator) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.join");
+DEFINE_METHOD(
+ GlobalArray.prototype,
+ join(separator) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.join");
- var array = TO_OBJECT(this);
- var length = TO_LENGTH(array.length);
+ var array = TO_OBJECT(this);
+ var length = TO_LENGTH(array.length);
- return InnerArrayJoin(separator, array, length);
-}
+ return InnerArrayJoin(separator, array, length);
+ }
+);
// Removes the last element from the array and returns it. See
// ECMA-262, section 15.4.4.6.
-function ArrayPop() {
+function ArrayPopFallback() {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.pop");
var array = TO_OBJECT(this);
@@ -410,7 +418,7 @@ function ArrayPop() {
// Appends the arguments to the end of the array and returns the new
// length of the array. See ECMA-262, section 15.4.4.7.
-function ArrayPush() {
+function ArrayPushFallback() {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.push");
var array = TO_OBJECT(this);
@@ -512,26 +520,29 @@ function GenericArrayReverse(array, len) {
}
-function ArrayReverse() {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reverse");
+DEFINE_METHOD(
+ GlobalArray.prototype,
+ reverse() {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reverse");
- var array = TO_OBJECT(this);
- var len = TO_LENGTH(array.length);
- var isArray = IS_ARRAY(array);
+ var array = TO_OBJECT(this);
+ var len = TO_LENGTH(array.length);
+ var isArray = IS_ARRAY(array);
- if (UseSparseVariant(array, len, isArray, len)) {
- %NormalizeElements(array);
- SparseReverse(array, len);
- return array;
- } else if (isArray && %_HasFastPackedElements(array)) {
- return PackedArrayReverse(array, len);
- } else {
- return GenericArrayReverse(array, len);
+ if (UseSparseVariant(array, len, isArray, len)) {
+ %NormalizeElements(array);
+ SparseReverse(array, len);
+ return array;
+ } else if (isArray && %_HasFastPackedElements(array)) {
+ return PackedArrayReverse(array, len);
+ } else {
+ return GenericArrayReverse(array, len);
+ }
}
-}
+);
-function ArrayShift() {
+function ArrayShiftFallback() {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.shift");
var array = TO_OBJECT(this);
@@ -558,7 +569,7 @@ function ArrayShift() {
}
-function ArrayUnshift(arg1) { // length == 1
+function ArrayUnshiftFallback(arg1) { // length == 1
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.unshift");
var array = TO_OBJECT(this);
@@ -582,7 +593,7 @@ function ArrayUnshift(arg1) { // length == 1
}
-function ArraySlice(start, end) {
+function ArraySliceFallback(start, end) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.slice");
var array = TO_OBJECT(this);
@@ -655,7 +666,7 @@ function ComputeSpliceDeleteCount(delete_count, num_arguments, len, start_i) {
}
-function ArraySplice(start, delete_count) {
+function ArraySpliceFallback(start, delete_count) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.splice");
var num_arguments = arguments.length;
@@ -992,121 +1003,133 @@ function InnerArraySort(array, length, comparefn) {
}
-function ArraySort(comparefn) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.sort");
+DEFINE_METHOD(
+ GlobalArray.prototype,
+ sort(comparefn) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.sort");
- var array = TO_OBJECT(this);
- var length = TO_LENGTH(array.length);
- return InnerArraySort(array, length, comparefn);
-}
+ var array = TO_OBJECT(this);
+ var length = TO_LENGTH(array.length);
+ return InnerArraySort(array, length, comparefn);
+ }
+);
-function ArrayLastIndexOf(element, index) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.lastIndexOf");
+DEFINE_METHOD_LEN(
+ GlobalArray.prototype,
+ lastIndexOf(element, index) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.lastIndexOf");
- var array = this;
- var length = TO_LENGTH(this.length);
+ var array = this;
+ var length = TO_LENGTH(this.length);
- if (length == 0) return -1;
- if (arguments.length < 2) {
- index = length - 1;
- } else {
- index = INVERT_NEG_ZERO(TO_INTEGER(index));
- // If index is negative, index from end of the array.
- if (index < 0) index += length;
- // If index is still negative, do not search the array.
- if (index < 0) return -1;
- else if (index >= length) index = length - 1;
- }
- var min = 0;
- var max = index;
- if (UseSparseVariant(array, length, IS_ARRAY(array), index)) {
- %NormalizeElements(array);
- var indices = %GetArrayKeys(array, index + 1);
- if (IS_NUMBER(indices)) {
- // It's an interval.
- max = indices; // Capped by index already.
- // Fall through to loop below.
+ if (length == 0) return -1;
+ if (arguments.length < 2) {
+ index = length - 1;
} else {
- if (indices.length == 0) return -1;
- // Get all the keys in sorted order.
- var sortedKeys = GetSortedArrayKeys(array, indices);
- var i = sortedKeys.length - 1;
- while (i >= 0) {
- var key = sortedKeys[i];
- if (array[key] === element) return key;
- i--;
+ index = INVERT_NEG_ZERO(TO_INTEGER(index));
+ // If index is negative, index from end of the array.
+ if (index < 0) index += length;
+ // If index is still negative, do not search the array.
+ if (index < 0) return -1;
+ else if (index >= length) index = length - 1;
+ }
+ var min = 0;
+ var max = index;
+ if (UseSparseVariant(array, length, IS_ARRAY(array), index)) {
+ %NormalizeElements(array);
+ var indices = %GetArrayKeys(array, index + 1);
+ if (IS_NUMBER(indices)) {
+ // It's an interval.
+ max = indices; // Capped by index already.
+ // Fall through to loop below.
+ } else {
+ if (indices.length == 0) return -1;
+ // Get all the keys in sorted order.
+ var sortedKeys = GetSortedArrayKeys(array, indices);
+ var i = sortedKeys.length - 1;
+ while (i >= 0) {
+ var key = sortedKeys[i];
+ if (array[key] === element) return key;
+ i--;
+ }
+ return -1;
+ }
+ }
+ // Lookup through the array.
+ if (!IS_UNDEFINED(element)) {
+ for (var i = max; i >= min; i--) {
+ if (array[i] === element) return i;
}
return -1;
}
- }
- // Lookup through the array.
- if (!IS_UNDEFINED(element)) {
for (var i = max; i >= min; i--) {
- if (array[i] === element) return i;
+ if (IS_UNDEFINED(array[i]) && i in array) {
+ return i;
+ }
}
return -1;
- }
- for (var i = max; i >= min; i--) {
- if (IS_UNDEFINED(array[i]) && i in array) {
- return i;
- }
- }
- return -1;
-}
+ },
+ 1 /* Set function length */
+);
+
// ES#sec-array.prototype.copywithin
// (Array.prototype.copyWithin ( target, start [ , end ] )
-function ArrayCopyWithin(target, start, end) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.copyWithin");
-
- var array = TO_OBJECT(this);
- var length = TO_LENGTH(array.length);
-
- target = TO_INTEGER(target);
- var to;
- if (target < 0) {
- to = MaxSimple(length + target, 0);
- } else {
- to = MinSimple(target, length);
- }
+DEFINE_METHOD_LEN(
+ GlobalArray.prototype,
+ copyWithin(target, start, end) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.copyWithin");
+
+ var array = TO_OBJECT(this);
+ var length = TO_LENGTH(array.length);
+
+ target = TO_INTEGER(target);
+ var to;
+ if (target < 0) {
+ to = MaxSimple(length + target, 0);
+ } else {
+ to = MinSimple(target, length);
+ }
- start = TO_INTEGER(start);
- var from;
- if (start < 0) {
- from = MaxSimple(length + start, 0);
- } else {
- from = MinSimple(start, length);
- }
+ start = TO_INTEGER(start);
+ var from;
+ if (start < 0) {
+ from = MaxSimple(length + start, 0);
+ } else {
+ from = MinSimple(start, length);
+ }
- end = IS_UNDEFINED(end) ? length : TO_INTEGER(end);
- var final;
- if (end < 0) {
- final = MaxSimple(length + end, 0);
- } else {
- final = MinSimple(end, length);
- }
+ end = IS_UNDEFINED(end) ? length : TO_INTEGER(end);
+ var final;
+ if (end < 0) {
+ final = MaxSimple(length + end, 0);
+ } else {
+ final = MinSimple(end, length);
+ }
- var count = MinSimple(final - from, length - to);
- var direction = 1;
- if (from < to && to < (from + count)) {
- direction = -1;
- from = from + count - 1;
- to = to + count - 1;
- }
+ var count = MinSimple(final - from, length - to);
+ var direction = 1;
+ if (from < to && to < (from + count)) {
+ direction = -1;
+ from = from + count - 1;
+ to = to + count - 1;
+ }
- while (count > 0) {
- if (from in array) {
- array[to] = array[from];
- } else {
- delete array[to];
+ while (count > 0) {
+ if (from in array) {
+ array[to] = array[from];
+ } else {
+ delete array[to];
+ }
+ from = from + direction;
+ to = to + direction;
+ count--;
}
- from = from + direction;
- to = to + direction;
- count--;
- }
- return array;
-}
+ return array;
+ },
+ 2 /* Set function length */
+);
function InnerArrayFind(predicate, thisArg, array, length) {
@@ -1126,14 +1149,18 @@ function InnerArrayFind(predicate, thisArg, array, length) {
// ES6 draft 07-15-13, section 15.4.3.23
-function ArrayFind(predicate, thisArg) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.find");
+DEFINE_METHOD_LEN(
+ GlobalArray.prototype,
+ find(predicate, thisArg) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.find");
- var array = TO_OBJECT(this);
- var length = TO_INTEGER(array.length);
+ var array = TO_OBJECT(this);
+ var length = TO_INTEGER(array.length);
- return InnerArrayFind(predicate, thisArg, array, length);
-}
+ return InnerArrayFind(predicate, thisArg, array, length);
+ },
+ 1 /* Set function length */
+);
function InnerArrayFindIndex(predicate, thisArg, array, length) {
@@ -1153,123 +1180,132 @@ function InnerArrayFindIndex(predicate, thisArg, array, length) {
// ES6 draft 07-15-13, section 15.4.3.24
-function ArrayFindIndex(predicate, thisArg) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.findIndex");
+DEFINE_METHOD_LEN(
+ GlobalArray.prototype,
+ findIndex(predicate, thisArg) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.findIndex");
- var array = TO_OBJECT(this);
- var length = TO_INTEGER(array.length);
+ var array = TO_OBJECT(this);
+ var length = TO_INTEGER(array.length);
- return InnerArrayFindIndex(predicate, thisArg, array, length);
-}
+ return InnerArrayFindIndex(predicate, thisArg, array, length);
+ },
+ 1 /* Set function length */
+);
// ES6, draft 04-05-14, section 22.1.3.6
-function ArrayFill(value, start, end) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.fill");
+DEFINE_METHOD_LEN(
+ GlobalArray.prototype,
+ fill(value, start, end) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.fill");
- var array = TO_OBJECT(this);
- var length = TO_LENGTH(array.length);
+ var array = TO_OBJECT(this);
+ var length = TO_LENGTH(array.length);
- var i = IS_UNDEFINED(start) ? 0 : TO_INTEGER(start);
- var end = IS_UNDEFINED(end) ? length : TO_INTEGER(end);
+ var i = IS_UNDEFINED(start) ? 0 : TO_INTEGER(start);
+ var end = IS_UNDEFINED(end) ? length : TO_INTEGER(end);
- if (i < 0) {
- i += length;
- if (i < 0) i = 0;
- } else {
- if (i > length) i = length;
- }
+ if (i < 0) {
+ i += length;
+ if (i < 0) i = 0;
+ } else {
+ if (i > length) i = length;
+ }
- if (end < 0) {
- end += length;
- if (end < 0) end = 0;
- } else {
- if (end > length) end = length;
- }
+ if (end < 0) {
+ end += length;
+ if (end < 0) end = 0;
+ } else {
+ if (end > length) end = length;
+ }
- if ((end - i) > 0 && %object_is_frozen(array)) {
- throw %make_type_error(kArrayFunctionsOnFrozen);
- }
+ if ((end - i) > 0 && %object_is_frozen(array)) {
+ throw %make_type_error(kArrayFunctionsOnFrozen);
+ }
- for (; i < end; i++)
- array[i] = value;
- return array;
-}
+ for (; i < end; i++)
+ array[i] = value;
+ return array;
+ },
+ 1 /* Set function length */
+);
// ES6, draft 10-14-14, section 22.1.2.1
-function ArrayFrom(arrayLike, mapfn, receiver) {
- var items = TO_OBJECT(arrayLike);
- var mapping = !IS_UNDEFINED(mapfn);
-
- if (mapping) {
- if (!IS_CALLABLE(mapfn)) {
- throw %make_type_error(kCalledNonCallable, mapfn);
+DEFINE_METHOD_LEN(
+ GlobalArray,
+ 'from'(arrayLike, mapfn, receiver) {
+ var items = TO_OBJECT(arrayLike);
+ var mapping = !IS_UNDEFINED(mapfn);
+
+ if (mapping) {
+ if (!IS_CALLABLE(mapfn)) {
+ throw %make_type_error(kCalledNonCallable, mapfn);
+ }
}
- }
- var iterable = GetMethod(items, iteratorSymbol);
- var k;
- var result;
- var mappedValue;
- var nextValue;
+ var iterable = GetMethod(items, iteratorSymbol);
+ var k;
+ var result;
+ var mappedValue;
+ var nextValue;
- if (!IS_UNDEFINED(iterable)) {
- result = %IsConstructor(this) ? new this() : [];
- k = 0;
+ if (!IS_UNDEFINED(iterable)) {
+ result = %IsConstructor(this) ? new this() : [];
+ k = 0;
- for (nextValue of
- { [iteratorSymbol]() { return GetIterator(items, iterable) } }) {
- if (mapping) {
- mappedValue = %_Call(mapfn, receiver, nextValue, k);
- } else {
- mappedValue = nextValue;
+ for (nextValue of
+ { [iteratorSymbol]() { return GetIterator(items, iterable) } }) {
+ if (mapping) {
+ mappedValue = %_Call(mapfn, receiver, nextValue, k);
+ } else {
+ mappedValue = nextValue;
+ }
+ %CreateDataProperty(result, k, mappedValue);
+ k++;
}
- %CreateDataProperty(result, k, mappedValue);
- k++;
- }
- result.length = k;
- return result;
- } else {
- var len = TO_LENGTH(items.length);
- result = %IsConstructor(this) ? new this(len) : new GlobalArray(len);
+ result.length = k;
+ return result;
+ } else {
+ var len = TO_LENGTH(items.length);
+ result = %IsConstructor(this) ? new this(len) : new GlobalArray(len);
- for (k = 0; k < len; ++k) {
- nextValue = items[k];
- if (mapping) {
- mappedValue = %_Call(mapfn, receiver, nextValue, k);
- } else {
- mappedValue = nextValue;
+ for (k = 0; k < len; ++k) {
+ nextValue = items[k];
+ if (mapping) {
+ mappedValue = %_Call(mapfn, receiver, nextValue, k);
+ } else {
+ mappedValue = nextValue;
+ }
+ %CreateDataProperty(result, k, mappedValue);
}
- %CreateDataProperty(result, k, mappedValue);
- }
-
- result.length = k;
- return result;
- }
-}
+ result.length = k;
+ return result;
+ }
+ },
+ 1 /* Set function length. */
+);
// ES6, draft 05-22-14, section 22.1.2.3
-function ArrayOf(...args) {
- var length = args.length;
- var constructor = this;
- // TODO: Implement IsConstructor (ES6 section 7.2.5)
- var array = %IsConstructor(constructor) ? new constructor(length) : [];
- for (var i = 0; i < length; i++) {
- %CreateDataProperty(array, i, args[i]);
+DEFINE_METHOD(
+ GlobalArray,
+ of(...args) {
+ var length = args.length;
+ var constructor = this;
+ // TODO: Implement IsConstructor (ES6 section 7.2.5)
+ var array = %IsConstructor(constructor) ? new constructor(length) : [];
+ for (var i = 0; i < length; i++) {
+ %CreateDataProperty(array, i, args[i]);
+ }
+ array.length = length;
+ return array;
}
- array.length = length;
- return array;
-}
+);
// -------------------------------------------------------------------
-// Set up non-enumerable constructor property on the Array.prototype
-// object.
-%AddNamedProperty(GlobalArray.prototype, "constructor", GlobalArray,
- DONT_ENUM);
-
// Set up unscopable properties on the Array.prototype object.
var unscopables = {
__proto__: null,
@@ -1287,107 +1323,64 @@ var unscopables = {
%AddNamedProperty(GlobalArray.prototype, unscopablesSymbol, unscopables,
DONT_ENUM | READ_ONLY);
-%FunctionSetLength(ArrayFrom, 1);
-
-// Set up non-enumerable functions on the Array object.
-utils.InstallFunctions(GlobalArray, DONT_ENUM, [
- "from", ArrayFrom,
- "of", ArrayOf
-]);
-
-var specialFunctions = %SpecialArrayFunctions();
-
-function getFunction(name, jsBuiltin, len) {
- var f = jsBuiltin;
- if (specialFunctions.hasOwnProperty(name)) {
- f = specialFunctions[name];
- }
- if (!IS_UNDEFINED(len)) {
- %FunctionSetLength(f, len);
- }
- return f;
-};
+var ArrayIndexOf = GlobalArray.prototype.indexOf;
+var ArrayJoin = GlobalArray.prototype.join;
+var ArrayPop = GlobalArray.prototype.pop;
+var ArrayPush = GlobalArray.prototype.push;
+var ArraySlice = GlobalArray.prototype.slice;
+var ArrayShift = GlobalArray.prototype.shift;
+var ArraySort = GlobalArray.prototype.sort;
+var ArraySplice = GlobalArray.prototype.splice;
+var ArrayToString = GlobalArray.prototype.toString;
+var ArrayUnshift = GlobalArray.prototype.unshift;
// Array prototype functions that return iterators. They are exposed to the
// public API via Template::SetIntrinsicDataProperty().
-var IteratorFunctions = {
- "entries": getFunction("entries", null, 0),
- "keys": getFunction("keys", null, 0),
- "values": getFunction("values", null, 0)
-}
-
-// Set up non-enumerable functions of the Array.prototype object and
-// set their names.
-// Manipulate the length of some of the functions to meet
-// expectations set by ECMA-262 or Mozilla.
-utils.InstallFunctions(GlobalArray.prototype, DONT_ENUM, [
- "toString", getFunction("toString", ArrayToString),
- "toLocaleString", getFunction("toLocaleString", ArrayToLocaleString),
- "join", getFunction("join", ArrayJoin),
- "pop", getFunction("pop", ArrayPop),
- "push", getFunction("push", ArrayPush, 1),
- "reverse", getFunction("reverse", ArrayReverse),
- "shift", getFunction("shift", ArrayShift),
- "unshift", getFunction("unshift", ArrayUnshift, 1),
- "slice", getFunction("slice", ArraySlice, 2),
- "splice", getFunction("splice", ArraySplice, 2),
- "sort", getFunction("sort", ArraySort),
- "indexOf", getFunction("indexOf", null, 1),
- "lastIndexOf", getFunction("lastIndexOf", ArrayLastIndexOf, 1),
- "copyWithin", getFunction("copyWithin", ArrayCopyWithin, 2),
- "find", getFunction("find", ArrayFind, 1),
- "findIndex", getFunction("findIndex", ArrayFindIndex, 1),
- "fill", getFunction("fill", ArrayFill, 1),
- "includes", getFunction("includes", null, 1),
- "entries", IteratorFunctions.entries,
- "keys", IteratorFunctions.keys,
- iteratorSymbol, IteratorFunctions.values
-]);
+var ArrayEntries = GlobalArray.prototype.entries;
+var ArrayForEach = GlobalArray.prototype.forEach;
+var ArrayKeys = GlobalArray.prototype.keys;
+var ArrayValues = GlobalArray.prototype[iteratorSymbol];
-%FunctionSetName(IteratorFunctions.entries, "entries");
-%FunctionSetName(IteratorFunctions.keys, "keys");
-%FunctionSetName(IteratorFunctions.values, "values");
// The internal Array prototype doesn't need to be fancy, since it's never
// exposed to user code.
// Adding only the functions that are actually used.
utils.SetUpLockedPrototype(InternalArray, GlobalArray(), [
- "indexOf", getFunction("indexOf", null),
- "join", getFunction("join", ArrayJoin),
- "pop", getFunction("pop", ArrayPop),
- "push", getFunction("push", ArrayPush),
- "shift", getFunction("shift", ArrayShift),
- "sort", getFunction("sort", ArraySort),
- "splice", getFunction("splice", ArraySplice)
+ "indexOf", ArrayIndexOf,
+ "join", ArrayJoin,
+ "pop", ArrayPop,
+ "push", ArrayPush,
+ "shift", ArrayShift,
+ "sort", ArraySort,
+ "splice", ArraySplice
]);
utils.SetUpLockedPrototype(InternalPackedArray, GlobalArray(), [
- "join", getFunction("join", ArrayJoin),
- "pop", getFunction("pop", ArrayPop),
- "push", getFunction("push", ArrayPush),
- "shift", getFunction("shift", ArrayShift)
+ "join", ArrayJoin,
+ "pop", ArrayPop,
+ "push", ArrayPush,
+ "shift", ArrayShift
]);
// V8 extras get a separate copy of InternalPackedArray. We give them the basic
// manipulation methods.
utils.SetUpLockedPrototype(extrasUtils.InternalPackedArray, GlobalArray(), [
- "push", getFunction("push", ArrayPush),
- "pop", getFunction("pop", ArrayPop),
- "shift", getFunction("shift", ArrayShift),
- "unshift", getFunction("unshift", ArrayUnshift),
- "splice", getFunction("splice", ArraySplice),
- "slice", getFunction("slice", ArraySlice)
+ "push", ArrayPush,
+ "pop", ArrayPop,
+ "shift", ArrayShift,
+ "unshift", ArrayUnshift,
+ "splice", ArraySplice,
+ "slice", ArraySlice
]);
// -------------------------------------------------------------------
// Exports
utils.Export(function(to) {
- to.ArrayFrom = ArrayFrom;
to.ArrayJoin = ArrayJoin;
to.ArrayPush = ArrayPush;
to.ArrayToString = ArrayToString;
- to.ArrayValues = IteratorFunctions.values,
+ to.ArrayValues = ArrayValues;
to.InnerArrayFind = InnerArrayFind;
to.InnerArrayFindIndex = InnerArrayFindIndex;
to.InnerArrayJoin = InnerArrayJoin;
@@ -1396,15 +1389,17 @@ utils.Export(function(to) {
});
%InstallToContext([
- "array_entries_iterator", IteratorFunctions.entries,
- "array_keys_iterator", IteratorFunctions.keys,
- "array_pop", ArrayPop,
- "array_push", ArrayPush,
- "array_shift", ArrayShift,
- "array_splice", ArraySplice,
- "array_slice", ArraySlice,
- "array_unshift", ArrayUnshift,
- "array_values_iterator", IteratorFunctions.values,
+ "array_entries_iterator", ArrayEntries,
+ "array_for_each_iterator", ArrayForEach,
+ "array_keys_iterator", ArrayKeys,
+ "array_values_iterator", ArrayValues,
+ // Fallback implementations of Array builtins.
+ "array_pop", ArrayPopFallback,
+ "array_push", ArrayPushFallback,
+ "array_shift", ArrayShiftFallback,
+ "array_splice", ArraySpliceFallback,
+ "array_slice", ArraySliceFallback,
+ "array_unshift", ArrayUnshiftFallback,
]);
});
diff --git a/deps/v8/src/js/collection-iterator.js b/deps/v8/src/js/collection-iterator.js
deleted file mode 100644
index 173f273f9b..0000000000
--- a/deps/v8/src/js/collection-iterator.js
+++ /dev/null
@@ -1,178 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GlobalMap = global.Map;
-var GlobalSet = global.Set;
-var iteratorSymbol = utils.ImportNow("iterator_symbol");
-var MapIterator = utils.ImportNow("MapIterator");
-var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
-var SetIterator = utils.ImportNow("SetIterator");
-
-// -------------------------------------------------------------------
-
-function SetIteratorConstructor(set, kind) {
- %SetIteratorInitialize(this, set, kind);
-}
-
-
-function SetIteratorNextJS() {
- if (!IS_SET_ITERATOR(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'Set Iterator.prototype.next', this);
- }
-
- var value_array = [UNDEFINED, UNDEFINED];
- var result = %_CreateIterResultObject(value_array, false);
- switch (%SetIteratorNext(this, value_array)) {
- case 0:
- result.value = UNDEFINED;
- result.done = true;
- break;
- case ITERATOR_KIND_VALUES:
- result.value = value_array[0];
- break;
- case ITERATOR_KIND_ENTRIES:
- value_array[1] = value_array[0];
- break;
- }
-
- return result;
-}
-
-
-function SetEntries() {
- if (!IS_SET(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'Set.prototype.entries', this);
- }
- return new SetIterator(this, ITERATOR_KIND_ENTRIES);
-}
-
-
-function SetValues() {
- if (!IS_SET(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'Set.prototype.values', this);
- }
- return new SetIterator(this, ITERATOR_KIND_VALUES);
-}
-
-// -------------------------------------------------------------------
-
-%SetCode(SetIterator, SetIteratorConstructor);
-%FunctionSetInstanceClassName(SetIterator, 'Set Iterator');
-utils.InstallFunctions(SetIterator.prototype, DONT_ENUM, [
- 'next', SetIteratorNextJS
-]);
-
-%AddNamedProperty(SetIterator.prototype, toStringTagSymbol,
- "Set Iterator", READ_ONLY | DONT_ENUM);
-
-utils.InstallFunctions(GlobalSet.prototype, DONT_ENUM, [
- 'entries', SetEntries,
- 'keys', SetValues,
- 'values', SetValues
-]);
-
-%AddNamedProperty(GlobalSet.prototype, iteratorSymbol, SetValues, DONT_ENUM);
-
-// -------------------------------------------------------------------
-
-function MapIteratorConstructor(map, kind) {
- %MapIteratorInitialize(this, map, kind);
-}
-
-
-function MapIteratorNextJS() {
- if (!IS_MAP_ITERATOR(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'Map Iterator.prototype.next', this);
- }
-
- var value_array = [UNDEFINED, UNDEFINED];
- var result = %_CreateIterResultObject(value_array, false);
- switch (%MapIteratorNext(this, value_array)) {
- case 0:
- result.value = UNDEFINED;
- result.done = true;
- break;
- case ITERATOR_KIND_KEYS:
- result.value = value_array[0];
- break;
- case ITERATOR_KIND_VALUES:
- result.value = value_array[1];
- break;
- // ITERATOR_KIND_ENTRIES does not need any processing.
- }
-
- return result;
-}
-
-
-function MapEntries() {
- if (!IS_MAP(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'Map.prototype.entries', this);
- }
- return new MapIterator(this, ITERATOR_KIND_ENTRIES);
-}
-
-
-function MapKeys() {
- if (!IS_MAP(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'Map.prototype.keys', this);
- }
- return new MapIterator(this, ITERATOR_KIND_KEYS);
-}
-
-
-function MapValues() {
- if (!IS_MAP(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'Map.prototype.values', this);
- }
- return new MapIterator(this, ITERATOR_KIND_VALUES);
-}
-
-// -------------------------------------------------------------------
-
-%SetCode(MapIterator, MapIteratorConstructor);
-%FunctionSetInstanceClassName(MapIterator, 'Map Iterator');
-utils.InstallFunctions(MapIterator.prototype, DONT_ENUM, [
- 'next', MapIteratorNextJS
-]);
-
-%AddNamedProperty(MapIterator.prototype, toStringTagSymbol,
- "Map Iterator", READ_ONLY | DONT_ENUM);
-
-
-utils.InstallFunctions(GlobalMap.prototype, DONT_ENUM, [
- 'entries', MapEntries,
- 'keys', MapKeys,
- 'values', MapValues
-]);
-
-%AddNamedProperty(GlobalMap.prototype, iteratorSymbol, MapEntries, DONT_ENUM);
-
-// -------------------------------------------------------------------
-// Exports
-
-utils.Export(function(to) {
- to.MapEntries = MapEntries;
- to.MapIteratorNext = MapIteratorNextJS;
- to.SetIteratorNext = SetIteratorNextJS;
- to.SetValues = SetValues;
-});
-
-})
diff --git a/deps/v8/src/js/collection.js b/deps/v8/src/js/collection.js
index adb2688618..e06dbb9e3f 100644
--- a/deps/v8/src/js/collection.js
+++ b/deps/v8/src/js/collection.js
@@ -3,6 +3,7 @@
// found in the LICENSE file.
(function(global, utils) {
+
"use strict";
%CheckIsBootstrapping();
@@ -17,7 +18,6 @@ var hashCodeSymbol = utils.ImportNow("hash_code_symbol");
var MathRandom = global.Math.random;
var MapIterator;
var SetIterator;
-var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
utils.Import(function(from) {
MapIterator = from.MapIterator;
@@ -119,350 +119,158 @@ function GetHash(key) {
// -------------------------------------------------------------------
// Harmony Set
-function SetConstructor(iterable) {
- if (IS_UNDEFINED(new.target)) {
- throw %make_type_error(kConstructorNotFunction, "Set");
- }
-
- %_SetInitialize(this);
-
- if (!IS_NULL_OR_UNDEFINED(iterable)) {
- var adder = this.add;
- if (!IS_CALLABLE(adder)) {
- throw %make_type_error(kPropertyNotFunction, adder, 'add', this);
+//Set up the non-enumerable functions on the Set prototype object.
+DEFINE_METHODS(
+ GlobalSet.prototype,
+ {
+ add(key) {
+ if (!IS_SET(this)) {
+ throw %make_type_error(kIncompatibleMethodReceiver, 'Set.prototype.add', this);
+ }
+ // Normalize -0 to +0 as required by the spec.
+ // Even though we use SameValueZero as the comparison for the keys we don't
+ // want to ever store -0 as the key since the key is directly exposed when
+ // doing iteration.
+ if (key === 0) {
+ key = 0;
+ }
+ var table = %_JSCollectionGetTable(this);
+ var numBuckets = ORDERED_HASH_TABLE_BUCKET_COUNT(table);
+ var hash = GetHash(key);
+ if (SetFindEntry(table, numBuckets, key, hash) !== NOT_FOUND) return this;
+
+ var nof = ORDERED_HASH_TABLE_ELEMENT_COUNT(table);
+ var nod = ORDERED_HASH_TABLE_DELETED_COUNT(table);
+ var capacity = numBuckets << 1;
+ if ((nof + nod) >= capacity) {
+ // Need to grow, bail out to runtime.
+ %SetGrow(this);
+ // Re-load state from the grown backing store.
+ table = %_JSCollectionGetTable(this);
+ numBuckets = ORDERED_HASH_TABLE_BUCKET_COUNT(table);
+ nof = ORDERED_HASH_TABLE_ELEMENT_COUNT(table);
+ nod = ORDERED_HASH_TABLE_DELETED_COUNT(table);
+ }
+ var entry = nof + nod;
+ var index = ORDERED_HASH_SET_ENTRY_TO_INDEX(entry, numBuckets);
+ var bucket = ORDERED_HASH_TABLE_HASH_TO_BUCKET(hash, numBuckets);
+ var chainEntry = ORDERED_HASH_TABLE_BUCKET_AT(table, bucket);
+ ORDERED_HASH_TABLE_SET_BUCKET_AT(table, bucket, entry);
+ ORDERED_HASH_TABLE_SET_ELEMENT_COUNT(table, nof + 1);
+ FIXED_ARRAY_SET(table, index, key);
+ FIXED_ARRAY_SET_SMI(table, index + 1, chainEntry);
+ return this;
}
- for (var value of iterable) {
- %_Call(adder, this, value);
+ delete(key) {
+ if (!IS_SET(this)) {
+ throw %make_type_error(kIncompatibleMethodReceiver,
+ 'Set.prototype.delete', this);
+ }
+ var table = %_JSCollectionGetTable(this);
+ var numBuckets = ORDERED_HASH_TABLE_BUCKET_COUNT(table);
+ var hash = GetExistingHash(key);
+ if (IS_UNDEFINED(hash)) return false;
+ var entry = SetFindEntry(table, numBuckets, key, hash);
+ if (entry === NOT_FOUND) return false;
+
+ var nof = ORDERED_HASH_TABLE_ELEMENT_COUNT(table) - 1;
+ var nod = ORDERED_HASH_TABLE_DELETED_COUNT(table) + 1;
+ var index = ORDERED_HASH_SET_ENTRY_TO_INDEX(entry, numBuckets);
+ FIXED_ARRAY_SET(table, index, %_TheHole());
+ ORDERED_HASH_TABLE_SET_ELEMENT_COUNT(table, nof);
+ ORDERED_HASH_TABLE_SET_DELETED_COUNT(table, nod);
+ if (nof < (numBuckets >>> 1)) %SetShrink(this);
+ return true;
}
}
-}
-
-
-function SetAdd(key) {
- if (!IS_SET(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver, 'Set.prototype.add', this);
- }
- // Normalize -0 to +0 as required by the spec.
- // Even though we use SameValueZero as the comparison for the keys we don't
- // want to ever store -0 as the key since the key is directly exposed when
- // doing iteration.
- if (key === 0) {
- key = 0;
- }
- var table = %_JSCollectionGetTable(this);
- var numBuckets = ORDERED_HASH_TABLE_BUCKET_COUNT(table);
- var hash = GetHash(key);
- if (SetFindEntry(table, numBuckets, key, hash) !== NOT_FOUND) return this;
-
- var nof = ORDERED_HASH_TABLE_ELEMENT_COUNT(table);
- var nod = ORDERED_HASH_TABLE_DELETED_COUNT(table);
- var capacity = numBuckets << 1;
- if ((nof + nod) >= capacity) {
- // Need to grow, bail out to runtime.
- %SetGrow(this);
- // Re-load state from the grown backing store.
- table = %_JSCollectionGetTable(this);
- numBuckets = ORDERED_HASH_TABLE_BUCKET_COUNT(table);
- nof = ORDERED_HASH_TABLE_ELEMENT_COUNT(table);
- nod = ORDERED_HASH_TABLE_DELETED_COUNT(table);
- }
- var entry = nof + nod;
- var index = ORDERED_HASH_SET_ENTRY_TO_INDEX(entry, numBuckets);
- var bucket = ORDERED_HASH_TABLE_HASH_TO_BUCKET(hash, numBuckets);
- var chainEntry = ORDERED_HASH_TABLE_BUCKET_AT(table, bucket);
- ORDERED_HASH_TABLE_SET_BUCKET_AT(table, bucket, entry);
- ORDERED_HASH_TABLE_SET_ELEMENT_COUNT(table, nof + 1);
- FIXED_ARRAY_SET(table, index, key);
- FIXED_ARRAY_SET_SMI(table, index + 1, chainEntry);
- return this;
-}
-
-
-function SetHas(key) {
- if (!IS_SET(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver, 'Set.prototype.has', this);
- }
- var table = %_JSCollectionGetTable(this);
- var numBuckets = ORDERED_HASH_TABLE_BUCKET_COUNT(table);
- var hash = GetExistingHash(key);
- if (IS_UNDEFINED(hash)) return false;
- return SetFindEntry(table, numBuckets, key, hash) !== NOT_FOUND;
-}
-
-
-function SetDelete(key) {
- if (!IS_SET(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'Set.prototype.delete', this);
- }
- var table = %_JSCollectionGetTable(this);
- var numBuckets = ORDERED_HASH_TABLE_BUCKET_COUNT(table);
- var hash = GetExistingHash(key);
- if (IS_UNDEFINED(hash)) return false;
- var entry = SetFindEntry(table, numBuckets, key, hash);
- if (entry === NOT_FOUND) return false;
-
- var nof = ORDERED_HASH_TABLE_ELEMENT_COUNT(table) - 1;
- var nod = ORDERED_HASH_TABLE_DELETED_COUNT(table) + 1;
- var index = ORDERED_HASH_SET_ENTRY_TO_INDEX(entry, numBuckets);
- FIXED_ARRAY_SET(table, index, %_TheHole());
- ORDERED_HASH_TABLE_SET_ELEMENT_COUNT(table, nof);
- ORDERED_HASH_TABLE_SET_DELETED_COUNT(table, nod);
- if (nof < (numBuckets >>> 1)) %SetShrink(this);
- return true;
-}
-
-
-function SetGetSize() {
- if (!IS_SET(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'Set.prototype.size', this);
- }
- var table = %_JSCollectionGetTable(this);
- return ORDERED_HASH_TABLE_ELEMENT_COUNT(table);
-}
-
-
-function SetClearJS() {
- if (!IS_SET(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'Set.prototype.clear', this);
- }
- %_SetClear(this);
-}
-
-
-function SetForEach(f, receiver) {
- if (!IS_SET(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'Set.prototype.forEach', this);
- }
-
- if (!IS_CALLABLE(f)) throw %make_type_error(kCalledNonCallable, f);
-
- var iterator = new SetIterator(this, ITERATOR_KIND_VALUES);
- var key;
- var value_array = [UNDEFINED];
- while (%SetIteratorNext(iterator, value_array)) {
- key = value_array[0];
- %_Call(f, receiver, key, key, this);
- }
-}
-
-// -------------------------------------------------------------------
-
-%SetCode(GlobalSet, SetConstructor);
-%FunctionSetLength(GlobalSet, 0);
-%FunctionSetPrototype(GlobalSet, new GlobalObject());
-%AddNamedProperty(GlobalSet.prototype, "constructor", GlobalSet, DONT_ENUM);
-%AddNamedProperty(GlobalSet.prototype, toStringTagSymbol, "Set",
- DONT_ENUM | READ_ONLY);
-
-%FunctionSetLength(SetForEach, 1);
-
-// Set up the non-enumerable functions on the Set prototype object.
-utils.InstallGetter(GlobalSet.prototype, "size", SetGetSize);
-utils.InstallFunctions(GlobalSet.prototype, DONT_ENUM, [
- "add", SetAdd,
- "has", SetHas,
- "delete", SetDelete,
- "clear", SetClearJS,
- "forEach", SetForEach
-]);
+);
-
-// -------------------------------------------------------------------
// Harmony Map
-function MapConstructor(iterable) {
- if (IS_UNDEFINED(new.target)) {
- throw %make_type_error(kConstructorNotFunction, "Map");
- }
+//Set up the non-enumerable functions on the Map prototype object.
+DEFINE_METHODS(
+ GlobalMap.prototype,
+ {
+ set(key, value) {
+ if (!IS_MAP(this)) {
+ throw %make_type_error(kIncompatibleMethodReceiver,
+ 'Map.prototype.set', this);
+ }
+ // Normalize -0 to +0 as required by the spec.
+ // Even though we use SameValueZero as the comparison for the keys we don't
+ // want to ever store -0 as the key since the key is directly exposed when
+ // doing iteration.
+ if (key === 0) {
+ key = 0;
+ }
- %_MapInitialize(this);
+ var table = %_JSCollectionGetTable(this);
+ var numBuckets = ORDERED_HASH_TABLE_BUCKET_COUNT(table);
+ var hash = GetHash(key);
+ var entry = MapFindEntry(table, numBuckets, key, hash);
+ if (entry !== NOT_FOUND) {
+ var existingIndex = ORDERED_HASH_MAP_ENTRY_TO_INDEX(entry, numBuckets);
+ FIXED_ARRAY_SET(table, existingIndex + 1, value);
+ return this;
+ }
- if (!IS_NULL_OR_UNDEFINED(iterable)) {
- var adder = this.set;
- if (!IS_CALLABLE(adder)) {
- throw %make_type_error(kPropertyNotFunction, adder, 'set', this);
+ var nof = ORDERED_HASH_TABLE_ELEMENT_COUNT(table);
+ var nod = ORDERED_HASH_TABLE_DELETED_COUNT(table);
+ var capacity = numBuckets << 1;
+ if ((nof + nod) >= capacity) {
+ // Need to grow, bail out to runtime.
+ %MapGrow(this);
+ // Re-load state from the grown backing store.
+ table = %_JSCollectionGetTable(this);
+ numBuckets = ORDERED_HASH_TABLE_BUCKET_COUNT(table);
+ nof = ORDERED_HASH_TABLE_ELEMENT_COUNT(table);
+ nod = ORDERED_HASH_TABLE_DELETED_COUNT(table);
+ }
+ entry = nof + nod;
+ var index = ORDERED_HASH_MAP_ENTRY_TO_INDEX(entry, numBuckets);
+ var bucket = ORDERED_HASH_TABLE_HASH_TO_BUCKET(hash, numBuckets);
+ var chainEntry = ORDERED_HASH_TABLE_BUCKET_AT(table, bucket);
+ ORDERED_HASH_TABLE_SET_BUCKET_AT(table, bucket, entry);
+ ORDERED_HASH_TABLE_SET_ELEMENT_COUNT(table, nof + 1);
+ FIXED_ARRAY_SET(table, index, key);
+ FIXED_ARRAY_SET(table, index + 1, value);
+ FIXED_ARRAY_SET(table, index + 2, chainEntry);
+ return this;
}
- for (var nextItem of iterable) {
- if (!IS_RECEIVER(nextItem)) {
- throw %make_type_error(kIteratorValueNotAnObject, nextItem);
+ delete(key) {
+ if (!IS_MAP(this)) {
+ throw %make_type_error(kIncompatibleMethodReceiver,
+ 'Map.prototype.delete', this);
}
- %_Call(adder, this, nextItem[0], nextItem[1]);
+ var table = %_JSCollectionGetTable(this);
+ var numBuckets = ORDERED_HASH_TABLE_BUCKET_COUNT(table);
+ var hash = GetHash(key);
+ var entry = MapFindEntry(table, numBuckets, key, hash);
+ if (entry === NOT_FOUND) return false;
+
+ var nof = ORDERED_HASH_TABLE_ELEMENT_COUNT(table) - 1;
+ var nod = ORDERED_HASH_TABLE_DELETED_COUNT(table) + 1;
+ var index = ORDERED_HASH_MAP_ENTRY_TO_INDEX(entry, numBuckets);
+ FIXED_ARRAY_SET(table, index, %_TheHole());
+ FIXED_ARRAY_SET(table, index + 1, %_TheHole());
+ ORDERED_HASH_TABLE_SET_ELEMENT_COUNT(table, nof);
+ ORDERED_HASH_TABLE_SET_DELETED_COUNT(table, nod);
+ if (nof < (numBuckets >>> 1)) %MapShrink(this);
+ return true;
}
}
-}
-
-
-function MapGet(key) {
- if (!IS_MAP(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'Map.prototype.get', this);
- }
- var table = %_JSCollectionGetTable(this);
- var numBuckets = ORDERED_HASH_TABLE_BUCKET_COUNT(table);
- var hash = GetExistingHash(key);
- if (IS_UNDEFINED(hash)) return UNDEFINED;
- var entry = MapFindEntry(table, numBuckets, key, hash);
- if (entry === NOT_FOUND) return UNDEFINED;
- return ORDERED_HASH_MAP_VALUE_AT(table, entry, numBuckets);
-}
-
-
-function MapSet(key, value) {
- if (!IS_MAP(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'Map.prototype.set', this);
- }
- // Normalize -0 to +0 as required by the spec.
- // Even though we use SameValueZero as the comparison for the keys we don't
- // want to ever store -0 as the key since the key is directly exposed when
- // doing iteration.
- if (key === 0) {
- key = 0;
- }
-
- var table = %_JSCollectionGetTable(this);
- var numBuckets = ORDERED_HASH_TABLE_BUCKET_COUNT(table);
- var hash = GetHash(key);
- var entry = MapFindEntry(table, numBuckets, key, hash);
- if (entry !== NOT_FOUND) {
- var existingIndex = ORDERED_HASH_MAP_ENTRY_TO_INDEX(entry, numBuckets);
- FIXED_ARRAY_SET(table, existingIndex + 1, value);
- return this;
- }
-
- var nof = ORDERED_HASH_TABLE_ELEMENT_COUNT(table);
- var nod = ORDERED_HASH_TABLE_DELETED_COUNT(table);
- var capacity = numBuckets << 1;
- if ((nof + nod) >= capacity) {
- // Need to grow, bail out to runtime.
- %MapGrow(this);
- // Re-load state from the grown backing store.
- table = %_JSCollectionGetTable(this);
- numBuckets = ORDERED_HASH_TABLE_BUCKET_COUNT(table);
- nof = ORDERED_HASH_TABLE_ELEMENT_COUNT(table);
- nod = ORDERED_HASH_TABLE_DELETED_COUNT(table);
- }
- entry = nof + nod;
- var index = ORDERED_HASH_MAP_ENTRY_TO_INDEX(entry, numBuckets);
- var bucket = ORDERED_HASH_TABLE_HASH_TO_BUCKET(hash, numBuckets);
- var chainEntry = ORDERED_HASH_TABLE_BUCKET_AT(table, bucket);
- ORDERED_HASH_TABLE_SET_BUCKET_AT(table, bucket, entry);
- ORDERED_HASH_TABLE_SET_ELEMENT_COUNT(table, nof + 1);
- FIXED_ARRAY_SET(table, index, key);
- FIXED_ARRAY_SET(table, index + 1, value);
- FIXED_ARRAY_SET(table, index + 2, chainEntry);
- return this;
-}
-
-
-function MapHas(key) {
- if (!IS_MAP(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'Map.prototype.has', this);
- }
- var table = %_JSCollectionGetTable(this);
- var numBuckets = ORDERED_HASH_TABLE_BUCKET_COUNT(table);
- var hash = GetHash(key);
- return MapFindEntry(table, numBuckets, key, hash) !== NOT_FOUND;
-}
-
-
-function MapDelete(key) {
- if (!IS_MAP(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'Map.prototype.delete', this);
- }
- var table = %_JSCollectionGetTable(this);
- var numBuckets = ORDERED_HASH_TABLE_BUCKET_COUNT(table);
- var hash = GetHash(key);
- var entry = MapFindEntry(table, numBuckets, key, hash);
- if (entry === NOT_FOUND) return false;
-
- var nof = ORDERED_HASH_TABLE_ELEMENT_COUNT(table) - 1;
- var nod = ORDERED_HASH_TABLE_DELETED_COUNT(table) + 1;
- var index = ORDERED_HASH_MAP_ENTRY_TO_INDEX(entry, numBuckets);
- FIXED_ARRAY_SET(table, index, %_TheHole());
- FIXED_ARRAY_SET(table, index + 1, %_TheHole());
- ORDERED_HASH_TABLE_SET_ELEMENT_COUNT(table, nof);
- ORDERED_HASH_TABLE_SET_DELETED_COUNT(table, nod);
- if (nof < (numBuckets >>> 1)) %MapShrink(this);
- return true;
-}
-
-
-function MapGetSize() {
- if (!IS_MAP(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'Map.prototype.size', this);
- }
- var table = %_JSCollectionGetTable(this);
- return ORDERED_HASH_TABLE_ELEMENT_COUNT(table);
-}
-
-
-function MapClearJS() {
- if (!IS_MAP(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'Map.prototype.clear', this);
- }
- %_MapClear(this);
-}
-
-
-function MapForEach(f, receiver) {
- if (!IS_MAP(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'Map.prototype.forEach', this);
- }
-
- if (!IS_CALLABLE(f)) throw %make_type_error(kCalledNonCallable, f);
-
- var iterator = new MapIterator(this, ITERATOR_KIND_ENTRIES);
- var value_array = [UNDEFINED, UNDEFINED];
- while (%MapIteratorNext(iterator, value_array)) {
- %_Call(f, receiver, value_array[1], value_array[0], this);
- }
-}
-
-// -------------------------------------------------------------------
-
-%SetCode(GlobalMap, MapConstructor);
-%FunctionSetLength(GlobalMap, 0);
-%FunctionSetPrototype(GlobalMap, new GlobalObject());
-%AddNamedProperty(GlobalMap.prototype, "constructor", GlobalMap, DONT_ENUM);
-%AddNamedProperty(
- GlobalMap.prototype, toStringTagSymbol, "Map", DONT_ENUM | READ_ONLY);
-
-%FunctionSetLength(MapForEach, 1);
-
-// Set up the non-enumerable functions on the Map prototype object.
-utils.InstallGetter(GlobalMap.prototype, "size", MapGetSize);
-utils.InstallFunctions(GlobalMap.prototype, DONT_ENUM, [
- "get", MapGet,
- "set", MapSet,
- "has", MapHas,
- "delete", MapDelete,
- "clear", MapClearJS,
- "forEach", MapForEach
-]);
+);
// -----------------------------------------------------------------------
// Exports
%InstallToContext([
- "map_get", MapGet,
- "map_set", MapSet,
- "map_has", MapHas,
- "map_delete", MapDelete,
- "set_add", SetAdd,
- "set_has", SetHas,
- "set_delete", SetDelete,
+ "map_set", GlobalMap.prototype.set,
+ "map_delete", GlobalMap.prototype.delete,
+ "set_add", GlobalSet.prototype.add,
+ "set_delete", GlobalSet.prototype.delete,
]);
utils.Export(function(to) {
diff --git a/deps/v8/src/js/intl.js b/deps/v8/src/js/intl.js
index cb83cfc1f5..1579337fc1 100644
--- a/deps/v8/src/js/intl.js
+++ b/deps/v8/src/js/intl.js
@@ -29,15 +29,11 @@ var GlobalNumber = global.Number;
var GlobalRegExp = global.RegExp;
var GlobalString = global.String;
var IntlFallbackSymbol = utils.ImportNow("intl_fallback_symbol");
-var InstallFunctions = utils.InstallFunctions;
-var InstallGetter = utils.InstallGetter;
var InternalArray = utils.InternalArray;
var MaxSimple;
var ObjectHasOwnProperty = global.Object.prototype.hasOwnProperty;
-var OverrideFunction = utils.OverrideFunction;
var patternSymbol = utils.ImportNow("intl_pattern_symbol");
var resolvedSymbol = utils.ImportNow("intl_resolved_symbol");
-var SetFunctionName = utils.SetFunctionName;
var StringSubstr = GlobalString.prototype.substr;
var StringSubstring = GlobalString.prototype.substring;
@@ -49,11 +45,6 @@ utils.Import(function(from) {
// Utilities for definitions
-function InstallFunction(object, name, func) {
- InstallFunctions(object, DONT_ENUM, [name, func]);
-}
-
-
/**
* Adds bound method to the prototype of the given object.
*/
@@ -61,41 +52,36 @@ function AddBoundMethod(obj, methodName, implementation, length, typename,
compat) {
%CheckIsBootstrapping();
var internalName = %CreatePrivateSymbol(methodName);
- // Making getter an anonymous function will cause
- // %DefineGetterPropertyUnchecked to properly set the "name"
- // property on each JSFunction instance created here, rather
- // than (as utils.InstallGetter would) on the SharedFunctionInfo
- // associated with all functions returned from AddBoundMethod.
- var getter = ANONYMOUS_FUNCTION(function() {
- var receiver = Unwrap(this, typename, obj, methodName, compat);
- if (IS_UNDEFINED(receiver[internalName])) {
- var boundMethod;
- if (IS_UNDEFINED(length) || length === 2) {
- boundMethod =
- ANONYMOUS_FUNCTION((fst, snd) => implementation(receiver, fst, snd));
- } else if (length === 1) {
- boundMethod = ANONYMOUS_FUNCTION(fst => implementation(receiver, fst));
- } else {
- boundMethod = ANONYMOUS_FUNCTION((...args) => {
- // DateTimeFormat.format needs to be 0 arg method, but can still
- // receive an optional dateValue param. If one was provided, pass it
- // along.
- if (args.length > 0) {
- return implementation(receiver, args[0]);
- } else {
- return implementation(receiver);
- }
- });
+
+ DEFINE_METHOD(
+ obj.prototype,
+ get [methodName]() {
+ var receiver = Unwrap(this, typename, obj, methodName, compat);
+ if (IS_UNDEFINED(receiver[internalName])) {
+ var boundMethod;
+ if (IS_UNDEFINED(length) || length === 2) {
+ boundMethod =
+ ANONYMOUS_FUNCTION((fst, snd) => implementation(receiver, fst, snd));
+ } else if (length === 1) {
+ boundMethod = ANONYMOUS_FUNCTION(fst => implementation(receiver, fst));
+ } else {
+ boundMethod = ANONYMOUS_FUNCTION((...args) => {
+ // DateTimeFormat.format needs to be 0 arg method, but can still
+ // receive an optional dateValue param. If one was provided, pass it
+ // along.
+ if (args.length > 0) {
+ return implementation(receiver, args[0]);
+ } else {
+ return implementation(receiver);
+ }
+ });
+ }
+ %SetNativeFlag(boundMethod);
+ receiver[internalName] = boundMethod;
}
- %SetNativeFlag(boundMethod);
- receiver[internalName] = boundMethod;
+ return receiver[internalName];
}
- return receiver[internalName];
- });
-
- %FunctionRemovePrototype(getter);
- %DefineGetterPropertyUnchecked(obj.prototype, methodName, getter, DONT_ENUM);
- %SetNativeFlag(getter);
+ );
}
function IntlConstruct(receiver, constructor, create, newTarget, args,
@@ -914,7 +900,9 @@ function BuildLanguageTagREs() {
}
// ECMA 402 section 8.2.1
-InstallFunction(GlobalIntl, 'getCanonicalLocales', function(locales) {
+DEFINE_METHOD(
+ GlobalIntl,
+ getCanonicalLocales(locales) {
return makeArray(canonicalizeLocaleList(locales));
}
);
@@ -1035,7 +1023,9 @@ function CollatorConstructor() {
/**
* Collator resolvedOptions method.
*/
-InstallFunction(GlobalIntlCollator.prototype, 'resolvedOptions', function() {
+DEFINE_METHOD(
+ GlobalIntlCollator.prototype,
+ resolvedOptions() {
var coll = Unwrap(this, 'collator', GlobalIntlCollator, 'resolvedOptions',
false);
return {
@@ -1057,7 +1047,9 @@ InstallFunction(GlobalIntlCollator.prototype, 'resolvedOptions', function() {
* order in the returned list as in the input list.
* Options are optional parameter.
*/
-InstallFunction(GlobalIntlCollator, 'supportedLocalesOf', function(locales) {
+DEFINE_METHOD(
+ GlobalIntlCollator,
+ supportedLocalesOf(locales) {
return supportedLocalesOf('collator', locales, arguments[1]);
}
);
@@ -1254,8 +1246,9 @@ function NumberFormatConstructor() {
/**
* NumberFormat resolvedOptions method.
*/
-InstallFunction(GlobalIntlNumberFormat.prototype, 'resolvedOptions',
- function() {
+DEFINE_METHOD(
+ GlobalIntlNumberFormat.prototype,
+ resolvedOptions() {
var format = Unwrap(this, 'numberformat', GlobalIntlNumberFormat,
'resolvedOptions', true);
var result = {
@@ -1295,8 +1288,9 @@ InstallFunction(GlobalIntlNumberFormat.prototype, 'resolvedOptions',
* order in the returned list as in the input list.
* Options are optional parameter.
*/
-InstallFunction(GlobalIntlNumberFormat, 'supportedLocalesOf',
- function(locales) {
+DEFINE_METHOD(
+ GlobalIntlNumberFormat,
+ supportedLocalesOf(locales) {
return supportedLocalesOf('numberformat', locales, arguments[1]);
}
);
@@ -1614,8 +1608,9 @@ function DateTimeFormatConstructor() {
/**
* DateTimeFormat resolvedOptions method.
*/
-InstallFunction(GlobalIntlDateTimeFormat.prototype, 'resolvedOptions',
- function() {
+DEFINE_METHOD(
+ GlobalIntlDateTimeFormat.prototype,
+ resolvedOptions() {
var format = Unwrap(this, 'dateformat', GlobalIntlDateTimeFormat,
'resolvedOptions', true);
@@ -1666,8 +1661,9 @@ InstallFunction(GlobalIntlDateTimeFormat.prototype, 'resolvedOptions',
* order in the returned list as in the input list.
* Options are optional parameter.
*/
-InstallFunction(GlobalIntlDateTimeFormat, 'supportedLocalesOf',
- function(locales) {
+DEFINE_METHOD(
+ GlobalIntlDateTimeFormat,
+ supportedLocalesOf(locales) {
return supportedLocalesOf('dateformat', locales, arguments[1]);
}
);
@@ -1691,8 +1687,9 @@ function formatDate(formatter, dateValue) {
return %InternalDateFormat(formatter, new GlobalDate(dateMs));
}
-InstallFunction(GlobalIntlDateTimeFormat.prototype, 'formatToParts',
- function(dateValue) {
+DEFINE_METHOD(
+ GlobalIntlDateTimeFormat.prototype,
+ formatToParts(dateValue) {
CHECK_OBJECT_COERCIBLE(this, "Intl.DateTimeFormat.prototype.formatToParts");
if (!IS_OBJECT(this)) {
throw %make_type_error(kCalledOnNonObject, this);
@@ -1810,8 +1807,9 @@ function v8BreakIteratorConstructor() {
/**
* BreakIterator resolvedOptions method.
*/
-InstallFunction(GlobalIntlv8BreakIterator.prototype, 'resolvedOptions',
- function() {
+DEFINE_METHOD(
+ GlobalIntlv8BreakIterator.prototype,
+ resolvedOptions() {
if (!IS_UNDEFINED(new.target)) {
throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
}
@@ -1833,8 +1831,9 @@ InstallFunction(GlobalIntlv8BreakIterator.prototype, 'resolvedOptions',
* order in the returned list as in the input list.
* Options are optional parameter.
*/
-InstallFunction(GlobalIntlv8BreakIterator, 'supportedLocalesOf',
- function(locales) {
+DEFINE_METHOD(
+ GlobalIntlv8BreakIterator,
+ supportedLocalesOf(locales) {
if (!IS_UNDEFINED(new.target)) {
throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
}
@@ -1976,7 +1975,9 @@ function LocaleConvertCase(s, locales, isToUpper) {
* Compares this and that, and returns less than 0, 0 or greater than 0 value.
* Overrides the built-in method.
*/
-OverrideFunction(GlobalString.prototype, 'localeCompare', function(that) {
+DEFINE_METHOD(
+ GlobalString.prototype,
+ localeCompare(that) {
if (IS_NULL_OR_UNDEFINED(this)) {
throw %make_type_error(kMethodInvokedOnNullOrUndefined);
}
@@ -1988,26 +1989,31 @@ OverrideFunction(GlobalString.prototype, 'localeCompare', function(that) {
}
);
-function ToLocaleLowerCaseIntl(locales) {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLocaleLowerCase");
- return LocaleConvertCase(TO_STRING(this), locales, false);
-}
-
-%FunctionSetLength(ToLocaleLowerCaseIntl, 0);
-
-function ToLocaleUpperCaseIntl(locales) {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLocaleUpperCase");
- return LocaleConvertCase(TO_STRING(this), locales, true);
-}
+var StringPrototypeMethods = {};
+DEFINE_METHODS_LEN(
+ GlobalString.prototype,
+ {
+ toLocaleLowerCase(locales) {
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLocaleLowerCase");
+ return LocaleConvertCase(TO_STRING(this), locales, false);
+ }
-%FunctionSetLength(ToLocaleUpperCaseIntl, 0);
+ toLocaleUpperCase(locales) {
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLocaleUpperCase");
+ return LocaleConvertCase(TO_STRING(this), locales, true);
+ }
+ },
+ 0 /* Set function length of both methods. */
+);
/**
* Formats a Number object (this) using locale and options values.
* If locale or options are omitted, defaults are used.
*/
-OverrideFunction(GlobalNumber.prototype, 'toLocaleString', function() {
+DEFINE_METHOD(
+ GlobalNumber.prototype,
+ toLocaleString() {
if (!(this instanceof GlobalNumber) && typeof(this) !== 'number') {
throw %make_type_error(kMethodInvokedOnWrongType, "Number");
}
@@ -2045,7 +2051,9 @@ function toLocaleDateTime(date, locales, options, required, defaults, service) {
* If locale or options are omitted, defaults are used - both date and time are
* present in the output.
*/
-OverrideFunction(GlobalDate.prototype, 'toLocaleString', function() {
+DEFINE_METHOD(
+ GlobalDate.prototype,
+ toLocaleString() {
var locales = arguments[0];
var options = arguments[1];
return toLocaleDateTime(
@@ -2059,7 +2067,9 @@ OverrideFunction(GlobalDate.prototype, 'toLocaleString', function() {
* If locale or options are omitted, defaults are used - only date is present
* in the output.
*/
-OverrideFunction(GlobalDate.prototype, 'toLocaleDateString', function() {
+DEFINE_METHOD(
+ GlobalDate.prototype,
+ toLocaleDateString() {
var locales = arguments[0];
var options = arguments[1];
return toLocaleDateTime(
@@ -2073,7 +2083,9 @@ OverrideFunction(GlobalDate.prototype, 'toLocaleDateString', function() {
* If locale or options are omitted, defaults are used - only time is present
* in the output.
*/
-OverrideFunction(GlobalDate.prototype, 'toLocaleTimeString', function() {
+DEFINE_METHOD(
+ GlobalDate.prototype,
+ toLocaleTimeString() {
var locales = arguments[0];
var options = arguments[1];
return toLocaleDateTime(
@@ -2081,15 +2093,4 @@ OverrideFunction(GlobalDate.prototype, 'toLocaleTimeString', function() {
}
);
-%FunctionRemovePrototype(ToLocaleLowerCaseIntl);
-%FunctionRemovePrototype(ToLocaleUpperCaseIntl);
-
-utils.SetFunctionName(ToLocaleLowerCaseIntl, "toLocaleLowerCase");
-utils.SetFunctionName(ToLocaleUpperCaseIntl, "toLocaleUpperCase");
-
-utils.Export(function(to) {
- to.ToLocaleLowerCaseIntl = ToLocaleLowerCaseIntl;
- to.ToLocaleUpperCaseIntl = ToLocaleUpperCaseIntl;
-});
-
})
diff --git a/deps/v8/src/js/macros.py b/deps/v8/src/js/macros.py
index 08f25b1f26..329b851e4b 100644
--- a/deps/v8/src/js/macros.py
+++ b/deps/v8/src/js/macros.py
@@ -53,7 +53,7 @@ macro IS_FUNCTION(arg) = (%IsFunction(arg));
macro IS_GENERATOR(arg) = (%_ClassOf(arg) === 'Generator');
macro IS_GLOBAL(arg) = (%_ClassOf(arg) === 'global');
macro IS_MAP(arg) = (%_IsJSMap(arg));
-macro IS_MAP_ITERATOR(arg) = (%_IsJSMapIterator(arg));
+macro IS_MAP_ITERATOR(arg) = (%_ClassOf(arg) === 'Map Iterator');
macro IS_NULL(arg) = (arg === null);
macro IS_NULL_OR_UNDEFINED(arg) = (arg == null);
macro IS_NUMBER(arg) = (typeof(arg) === 'number');
@@ -61,7 +61,7 @@ macro IS_OBJECT(arg) = (typeof(arg) === 'object');
macro IS_PROXY(arg) = (%_IsJSProxy(arg));
macro IS_SCRIPT(arg) = (%_ClassOf(arg) === 'Script');
macro IS_SET(arg) = (%_IsJSSet(arg));
-macro IS_SET_ITERATOR(arg) = (%_IsJSSetIterator(arg));
+macro IS_SET_ITERATOR(arg) = (%_ClassOf(arg) === 'Set Iterator');
macro IS_SHAREDARRAYBUFFER(arg) = (%_ClassOf(arg) === 'SharedArrayBuffer');
macro IS_STRING(arg) = (typeof(arg) === 'string');
macro IS_SYMBOL(arg) = (typeof(arg) === 'symbol');
@@ -85,8 +85,6 @@ macro NUMBER_IS_NAN(arg) = (%IS_VAR(arg) !== arg);
macro NUMBER_IS_FINITE(arg) = (%_IsSmi(%IS_VAR(arg)) || ((arg == arg) && (arg != 1/0) && (arg != -1/0)));
macro TO_BOOLEAN(arg) = (!!(arg));
macro TO_INTEGER(arg) = (%_ToInteger(arg));
-macro TO_INT32(arg) = ((arg) | 0);
-macro TO_UINT32(arg) = ((arg) >>> 0);
macro INVERT_NEG_ZERO(arg) = ((arg) + 0);
macro TO_LENGTH(arg) = (%_ToLength(arg));
macro TO_STRING(arg) = (%_ToString(arg));
@@ -95,38 +93,23 @@ macro TO_OBJECT(arg) = (%_ToObject(arg));
macro HAS_OWN_PROPERTY(obj, key) = (%_Call(ObjectHasOwnProperty, obj, key));
# Private names.
-macro IS_PRIVATE(sym) = (%SymbolIsPrivate(sym));
-macro HAS_PRIVATE(obj, key) = HAS_OWN_PROPERTY(obj, key);
-macro HAS_DEFINED_PRIVATE(obj, sym) = (!IS_UNDEFINED(obj[sym]));
macro GET_PRIVATE(obj, sym) = (obj[sym]);
macro SET_PRIVATE(obj, sym, val) = (obj[sym] = val);
# To avoid ES2015 Function name inference.
macro ANONYMOUS_FUNCTION(fn) = (0, (fn));
+macro DEFINE_METHODS_LEN(obj, class_def, len) = %DefineMethodsInternal(obj, class class_def, len);
+macro DEFINE_METHOD_LEN(obj, method_def, len) = %DefineMethodsInternal(obj, class { method_def }, len);
+macro DEFINE_METHODS(obj, class_def) = DEFINE_METHODS_LEN(obj, class_def, -1);
+macro DEFINE_METHOD(obj, method_def) = DEFINE_METHOD_LEN(obj, method_def, -1);
+
# Constants. The compiler constant folds them.
define INFINITY = (1/0);
define UNDEFINED = (void 0);
-# Macros implemented in Python.
-python macro CHAR_CODE(str) = ord(str[1]);
-
-# For messages.js
-# Matches Script::Type from objects.h
-define TYPE_NATIVE = 0;
-define TYPE_EXTENSION = 1;
-define TYPE_NORMAL = 2;
-
-# Matches Script::CompilationType from objects.h
-define COMPILATION_TYPE_HOST = 0;
-define COMPILATION_TYPE_EVAL = 1;
-define COMPILATION_TYPE_JSON = 2;
-
# Must match PropertyFilter in property-details.h
define PROPERTY_FILTER_NONE = 0;
-define PROPERTY_FILTER_ONLY_ENUMERABLE = 2;
-define PROPERTY_FILTER_SKIP_STRINGS = 8;
-define PROPERTY_FILTER_SKIP_SYMBOLS = 16;
# Use for keys, values and entries iterators.
define ITERATOR_KIND_KEYS = 1;
@@ -162,35 +145,3 @@ define NOT_FOUND = -1;
# Check whether debug is active.
define DEBUG_IS_ACTIVE = (%_DebugIsActive() != 0);
-
-# UseCounters from include/v8.h
-define kUseAsm = 0;
-define kBreakIterator = 1;
-define kLegacyConst = 2;
-define kMarkDequeOverflow = 3;
-define kStoreBufferOverflow = 4;
-define kSlotsBufferOverflow = 5;
-define kForcedGC = 7;
-define kSloppyMode = 8;
-define kStrictMode = 9;
-define kRegExpPrototypeStickyGetter = 11;
-define kRegExpPrototypeToString = 12;
-define kRegExpPrototypeUnicodeGetter = 13;
-define kIntlV8Parse = 14;
-define kIntlPattern = 15;
-define kIntlResolved = 16;
-define kPromiseChain = 17;
-define kPromiseAccept = 18;
-define kPromiseDefer = 19;
-define kHtmlCommentInExternalScript = 20;
-define kHtmlComment = 21;
-define kSloppyModeBlockScopedFunctionRedefinition = 22;
-define kForInInitializer = 23;
-define kArrayProtectorDirtied = 24;
-define kArraySpeciesModified = 25;
-define kArrayPrototypeConstructorModified = 26;
-define kArrayInstanceProtoModified = 27;
-define kArrayInstanceConstructorModified = 28;
-define kLegacyFunctionDeclaration = 29;
-define kRegExpPrototypeSourceGetter = 30;
-define kRegExpPrototypeOldFlagGetter = 31;
diff --git a/deps/v8/src/js/max-min.js b/deps/v8/src/js/max-min.js
index 4b7076ed22..e451c09d1d 100644
--- a/deps/v8/src/js/max-min.js
+++ b/deps/v8/src/js/max-min.js
@@ -4,6 +4,8 @@
(function(global, utils) {
+"use strict";
+
%CheckIsBootstrapping();
function MaxSimple(a, b) {
diff --git a/deps/v8/src/js/messages.js b/deps/v8/src/js/messages.js
index 3ea2bef5ad..aebd37a791 100644
--- a/deps/v8/src/js/messages.js
+++ b/deps/v8/src/js/messages.js
@@ -6,6 +6,8 @@
(function(global, utils) {
+"use strict";
+
%CheckIsBootstrapping();
// -------------------------------------------------------------------
@@ -17,14 +19,6 @@ var Script = utils.ImportNow("Script");
// Script
/**
- * Set up the Script function and constructor.
- */
-%FunctionSetInstanceClassName(Script, 'Script');
-%AddNamedProperty(Script.prototype, 'constructor', Script,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
-
-
-/**
* Get information on a specific source position.
* Returns an object with the following following properties:
* script : script object for the source
diff --git a/deps/v8/src/js/prologue.js b/deps/v8/src/js/prologue.js
index 91f36cb573..08ef3ba520 100644
--- a/deps/v8/src/js/prologue.js
+++ b/deps/v8/src/js/prologue.js
@@ -38,18 +38,6 @@ function ImportNow(name) {
}
-function SetFunctionName(f, name, prefix) {
- if (IS_SYMBOL(name)) {
- name = "[" + %SymbolDescription(name) + "]";
- }
- if (IS_UNDEFINED(prefix)) {
- %FunctionSetName(f, name);
- } else {
- %FunctionSetName(f, prefix + " " + name);
- }
-}
-
-
function InstallConstants(object, constants) {
%CheckIsBootstrapping();
%OptimizeObjectForAddingMultipleProperties(object, constants.length >> 1);
@@ -63,44 +51,6 @@ function InstallConstants(object, constants) {
}
-function InstallFunctions(object, attributes, functions) {
- %CheckIsBootstrapping();
- %OptimizeObjectForAddingMultipleProperties(object, functions.length >> 1);
- for (var i = 0; i < functions.length; i += 2) {
- var key = functions[i];
- var f = functions[i + 1];
- SetFunctionName(f, key);
- %FunctionRemovePrototype(f);
- %AddNamedProperty(object, key, f, attributes);
- %SetNativeFlag(f);
- }
- %ToFastProperties(object);
-}
-
-
-// Helper function to install a getter-only accessor property.
-function InstallGetter(object, name, getter, attributes, prefix) {
- %CheckIsBootstrapping();
- if (IS_UNDEFINED(attributes)) attributes = DONT_ENUM;
- SetFunctionName(getter, name, IS_UNDEFINED(prefix) ? "get" : prefix);
- %FunctionRemovePrototype(getter);
- %DefineGetterPropertyUnchecked(object, name, getter, attributes);
- %SetNativeFlag(getter);
-}
-
-
-function OverrideFunction(object, name, f, afterInitialBootstrap) {
- %CheckIsBootstrapping();
- %object_define_property(object, name, { value: f,
- writeable: true,
- configurable: true,
- enumerable: false });
- SetFunctionName(f, name);
- if (!afterInitialBootstrap) %FunctionRemovePrototype(f);
- %SetNativeFlag(f);
-}
-
-
// Prevents changes to the prototype of a built-in function.
// The "prototype" property of the function object is made non-configurable,
// and the prototype object is made non-extensible. The latter prevents
@@ -156,11 +106,7 @@ function PostNatives(utils) {
utils.Import = Import;
utils.ImportNow = ImportNow;
utils.Export = Export;
-utils.SetFunctionName = SetFunctionName;
utils.InstallConstants = InstallConstants;
-utils.InstallFunctions = InstallFunctions;
-utils.InstallGetter = InstallGetter;
-utils.OverrideFunction = OverrideFunction;
utils.SetUpLockedPrototype = SetUpLockedPrototype;
utils.PostNatives = PostNatives;
diff --git a/deps/v8/src/js/promise.js b/deps/v8/src/js/promise.js
deleted file mode 100644
index 27571daabb..0000000000
--- a/deps/v8/src/js/promise.js
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils, extrasUtils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var InternalArray = utils.InternalArray;
-var promiseHandledBySymbol =
- utils.ImportNow("promise_handled_by_symbol");
-var promiseForwardingHandlerSymbol =
- utils.ImportNow("promise_forwarding_handler_symbol");
-var GlobalPromise = global.Promise;
-
-// -------------------------------------------------------------------
-// Define exported functions.
-
-// Combinators.
-
-// ES#sec-promise.all
-// Promise.all ( iterable )
-function PromiseAll(iterable) {
- if (!IS_RECEIVER(this)) {
- throw %make_type_error(kCalledOnNonObject, "Promise.all");
- }
-
- // false debugEvent so that forwarding the rejection through all does not
- // trigger redundant ExceptionEvents
- var deferred = %new_promise_capability(this, false);
- var resolutions = new InternalArray();
- var count;
-
- // For catch prediction, don't treat the .then calls as handling it;
- // instead, recurse outwards.
- var instrumenting = DEBUG_IS_ACTIVE;
- if (instrumenting) {
- SET_PRIVATE(deferred.reject, promiseForwardingHandlerSymbol, true);
- }
-
- function CreateResolveElementFunction(index, values, promiseCapability) {
- var alreadyCalled = false;
- return (x) => {
- if (alreadyCalled === true) return;
- alreadyCalled = true;
- values[index] = x;
- if (--count === 0) {
- var valuesArray = [];
- %MoveArrayContents(values, valuesArray);
- %_Call(promiseCapability.resolve, UNDEFINED, valuesArray);
- }
- };
- }
-
- try {
- var i = 0;
- count = 1;
- for (var value of iterable) {
- var nextPromise = this.resolve(value);
- ++count;
- var throwawayPromise = nextPromise.then(
- CreateResolveElementFunction(i, resolutions, deferred),
- deferred.reject);
- // For catch prediction, mark that rejections here are semantically
- // handled by the combined Promise.
- if (instrumenting && %is_promise(throwawayPromise)) {
- SET_PRIVATE(throwawayPromise, promiseHandledBySymbol, deferred.promise);
- }
- ++i;
- }
-
- // 6.d
- if (--count === 0) {
- var valuesArray = [];
- %MoveArrayContents(resolutions, valuesArray);
- %_Call(deferred.resolve, UNDEFINED, valuesArray);
- }
-
- } catch (e) {
- %_Call(deferred.reject, UNDEFINED, e);
- }
- return deferred.promise;
-}
-
-// ES#sec-promise.race
-// Promise.race ( iterable )
-function PromiseRace(iterable) {
- if (!IS_RECEIVER(this)) {
- throw %make_type_error(kCalledOnNonObject, PromiseRace);
- }
-
- // false debugEvent so that forwarding the rejection through race does not
- // trigger redundant ExceptionEvents
- var deferred = %new_promise_capability(this, false);
-
- // For catch prediction, don't treat the .then calls as handling it;
- // instead, recurse outwards.
- var instrumenting = DEBUG_IS_ACTIVE;
- if (instrumenting) {
- SET_PRIVATE(deferred.reject, promiseForwardingHandlerSymbol, true);
- }
-
- try {
- for (var value of iterable) {
- var throwawayPromise = this.resolve(value).then(deferred.resolve,
- deferred.reject);
- // For catch prediction, mark that rejections here are semantically
- // handled by the combined Promise.
- if (instrumenting && %is_promise(throwawayPromise)) {
- SET_PRIVATE(throwawayPromise, promiseHandledBySymbol, deferred.promise);
- }
- }
- } catch (e) {
- %_Call(deferred.reject, UNDEFINED, e);
- }
- return deferred.promise;
-}
-
-// -------------------------------------------------------------------
-// Install exported functions.
-
-utils.InstallFunctions(GlobalPromise, DONT_ENUM, [
- "all", PromiseAll,
- "race", PromiseRace,
-]);
-
-})
diff --git a/deps/v8/src/js/proxy.js b/deps/v8/src/js/proxy.js
index a111c09427..4b6255a8ff 100644
--- a/deps/v8/src/js/proxy.js
+++ b/deps/v8/src/js/proxy.js
@@ -15,16 +15,13 @@ var GlobalProxy = global.Proxy;
//----------------------------------------------------------------------------
-function ProxyCreateRevocable(target, handler) {
- var p = new GlobalProxy(target, handler);
- return {proxy: p, revoke: () => %JSProxyRevoke(p)};
-}
-
-//-------------------------------------------------------------------
-
//Set up non-enumerable properties of the Proxy object.
-utils.InstallFunctions(GlobalProxy, DONT_ENUM, [
- "revocable", ProxyCreateRevocable
-]);
+DEFINE_METHOD(
+ GlobalProxy,
+ revocable(target, handler) {
+ var p = new GlobalProxy(target, handler);
+ return {proxy: p, revoke: () => %JSProxyRevoke(p)};
+ }
+);
})
diff --git a/deps/v8/src/js/spread.js b/deps/v8/src/js/spread.js
index 39b12e7a8e..0b56ca7edd 100644
--- a/deps/v8/src/js/spread.js
+++ b/deps/v8/src/js/spread.js
@@ -4,7 +4,7 @@
(function(global, utils) {
-'use strict';
+"use strict";
// -------------------------------------------------------------------
// Imports
diff --git a/deps/v8/src/js/string.js b/deps/v8/src/js/string.js
index a3a59d5fde..521afc2c42 100644
--- a/deps/v8/src/js/string.js
+++ b/deps/v8/src/js/string.js
@@ -4,6 +4,8 @@
(function(global, utils) {
+"use strict";
+
%CheckIsBootstrapping();
// -------------------------------------------------------------------
@@ -15,188 +17,179 @@ var searchSymbol = utils.ImportNow("search_symbol");
//-------------------------------------------------------------------
-// ES6 21.1.3.11.
-function StringMatchJS(pattern) {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.match");
-
- if (!IS_NULL_OR_UNDEFINED(pattern)) {
- var matcher = pattern[matchSymbol];
- if (!IS_UNDEFINED(matcher)) {
- return %_Call(matcher, pattern, this);
- }
- }
-
- var subject = TO_STRING(this);
-
- // Equivalent to RegExpCreate (ES#sec-regexpcreate)
- var regexp = %RegExpCreate(pattern);
- return regexp[matchSymbol](subject);
-}
-
-// ES6 21.1.3.15.
-function StringSearch(pattern) {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.search");
-
- if (!IS_NULL_OR_UNDEFINED(pattern)) {
- var searcher = pattern[searchSymbol];
- if (!IS_UNDEFINED(searcher)) {
- return %_Call(searcher, pattern, this);
- }
- }
-
- var subject = TO_STRING(this);
-
- // Equivalent to RegExpCreate (ES#sec-regexpcreate)
- var regexp = %RegExpCreate(pattern);
- return %_Call(regexp[searchSymbol], regexp, subject);
-}
-
-
-// ES6 draft, revision 26 (2014-07-18), section B.2.3.2.1
+// ES#sec-createhtml
function HtmlEscape(str) {
return %RegExpInternalReplace(/"/g, TO_STRING(str), "&quot;");
}
+// Set up the non-enumerable functions on the String prototype object.
+DEFINE_METHODS(
+ GlobalString.prototype,
+ {
+ /* ES#sec-string.prototype.match */
+ match(pattern) {
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.match");
+
+ if (!IS_NULL_OR_UNDEFINED(pattern)) {
+ var matcher = pattern[matchSymbol];
+ if (!IS_UNDEFINED(matcher)) {
+ return %_Call(matcher, pattern, this);
+ }
+ }
+
+ var subject = TO_STRING(this);
+
+ // Equivalent to RegExpCreate (ES#sec-regexpcreate)
+ var regexp = %RegExpCreate(pattern);
+ return regexp[matchSymbol](subject);
+ }
-// ES6 draft, revision 26 (2014-07-18), section B.2.3.2
-function StringAnchor(name) {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.anchor");
- return "<a name=\"" + HtmlEscape(name) + "\">" + TO_STRING(this) +
- "</a>";
-}
-
-
-// ES6 draft, revision 26 (2014-07-18), section B.2.3.3
-function StringBig() {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.big");
- return "<big>" + TO_STRING(this) + "</big>";
-}
-
-
-// ES6 draft, revision 26 (2014-07-18), section B.2.3.4
-function StringBlink() {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.blink");
- return "<blink>" + TO_STRING(this) + "</blink>";
-}
-
-
-// ES6 draft, revision 26 (2014-07-18), section B.2.3.5
-function StringBold() {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.bold");
- return "<b>" + TO_STRING(this) + "</b>";
-}
-
-
-// ES6 draft, revision 26 (2014-07-18), section B.2.3.6
-function StringFixed() {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.fixed");
- return "<tt>" + TO_STRING(this) + "</tt>";
-}
-
+ /* ES#sec-string.prototype.search */
+ search(pattern) {
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.search");
-// ES6 draft, revision 26 (2014-07-18), section B.2.3.7
-function StringFontcolor(color) {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.fontcolor");
- return "<font color=\"" + HtmlEscape(color) + "\">" + TO_STRING(this) +
- "</font>";
-}
+ if (!IS_NULL_OR_UNDEFINED(pattern)) {
+ var searcher = pattern[searchSymbol];
+ if (!IS_UNDEFINED(searcher)) {
+ return %_Call(searcher, pattern, this);
+ }
+ }
+ var subject = TO_STRING(this);
-// ES6 draft, revision 26 (2014-07-18), section B.2.3.8
-function StringFontsize(size) {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.fontsize");
- return "<font size=\"" + HtmlEscape(size) + "\">" + TO_STRING(this) +
- "</font>";
-}
-
+ // Equivalent to RegExpCreate (ES#sec-regexpcreate)
+ var regexp = %RegExpCreate(pattern);
+ return %_Call(regexp[searchSymbol], regexp, subject);
+ }
-// ES6 draft, revision 26 (2014-07-18), section B.2.3.9
-function StringItalics() {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.italics");
- return "<i>" + TO_STRING(this) + "</i>";
-}
+ /* ES#sec-string.prototype.anchor */
+ anchor(name) {
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.anchor");
+ return "<a name=\"" + HtmlEscape(name) + "\">" + TO_STRING(this) +
+ "</a>";
+ }
+ /* ES#sec-string.prototype.big */
+ big() {
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.big");
+ return "<big>" + TO_STRING(this) + "</big>";
+ }
-// ES6 draft, revision 26 (2014-07-18), section B.2.3.10
-function StringLink(s) {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.link");
- return "<a href=\"" + HtmlEscape(s) + "\">" + TO_STRING(this) + "</a>";
-}
+ /* ES#sec-string.prototype.blink */
+ blink() {
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.blink");
+ return "<blink>" + TO_STRING(this) + "</blink>";
+ }
+ /* ES#sec-string.prototype.bold */
+ bold() {
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.bold");
+ return "<b>" + TO_STRING(this) + "</b>";
+ }
-// ES6 draft, revision 26 (2014-07-18), section B.2.3.11
-function StringSmall() {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.small");
- return "<small>" + TO_STRING(this) + "</small>";
-}
+ /* ES#sec-string.prototype.fixed */
+ fixed() {
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.fixed");
+ return "<tt>" + TO_STRING(this) + "</tt>";
+ }
+ /* ES#sec-string.prototype.fontcolor */
+ fontcolor(color) {
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.fontcolor");
+ return "<font color=\"" + HtmlEscape(color) + "\">" + TO_STRING(this) +
+ "</font>";
+ }
-// ES6 draft, revision 26 (2014-07-18), section B.2.3.12
-function StringStrike() {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.strike");
- return "<strike>" + TO_STRING(this) + "</strike>";
-}
+ /* ES#sec-string.prototype.fontsize */
+ fontsize(size) {
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.fontsize");
+ return "<font size=\"" + HtmlEscape(size) + "\">" + TO_STRING(this) +
+ "</font>";
+ }
+ /* ES#sec-string.prototype.italics */
+ italics() {
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.italics");
+ return "<i>" + TO_STRING(this) + "</i>";
+ }
-// ES6 draft, revision 26 (2014-07-18), section B.2.3.13
-function StringSub() {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.sub");
- return "<sub>" + TO_STRING(this) + "</sub>";
-}
+ /* ES#sec-string.prototype.link */
+ link(s) {
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.link");
+ return "<a href=\"" + HtmlEscape(s) + "\">" + TO_STRING(this) + "</a>";
+ }
+ /* ES#sec-string.prototype.small */
+ small() {
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.small");
+ return "<small>" + TO_STRING(this) + "</small>";
+ }
-// ES6 draft, revision 26 (2014-07-18), section B.2.3.14
-function StringSup() {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.sup");
- return "<sup>" + TO_STRING(this) + "</sup>";
-}
+ /* ES#sec-string.prototype.strike */
+ strike() {
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.strike");
+ return "<strike>" + TO_STRING(this) + "</strike>";
+ }
-// ES6, section 21.1.3.13
-function StringRepeat(count) {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.repeat");
+ /* ES#sec-string.prototype.sub */
+ sub() {
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.sub");
+ return "<sub>" + TO_STRING(this) + "</sub>";
+ }
- var s = TO_STRING(this);
- var n = TO_INTEGER(count);
+ /* ES#sec-string.prototype.sup */
+ sup() {
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.sup");
+ return "<sup>" + TO_STRING(this) + "</sup>";
+ }
- if (n < 0 || n === INFINITY) throw %make_range_error(kInvalidCountValue);
+ /* ES#sec-string.prototype.repeat */
+ repeat(count) {
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.repeat");
- // Early return to allow an arbitrarily-large repeat of the empty string.
- if (s.length === 0) return "";
+ var s = TO_STRING(this);
+ var n = TO_INTEGER(count);
- // The maximum string length is stored in a smi, so a longer repeat
- // must result in a range error.
- if (n > %_MaxSmi()) throw %make_range_error(kInvalidStringLength);
+ if (n < 0 || n === INFINITY) throw %make_range_error(kInvalidCountValue);
- var r = "";
- while (true) {
- if (n & 1) r += s;
- n >>= 1;
- if (n === 0) return r;
- s += s;
- }
-}
+ // Early return to allow an arbitrarily-large repeat of the empty string.
+ if (s.length === 0) return "";
+ // The maximum string length is stored in a smi, so a longer repeat
+ // must result in a range error.
+ if (n > %_MaxSmi()) throw %make_range_error(kInvalidStringLength);
-// ES6 Draft 05-22-2014, section 21.1.3.3
-function StringCodePointAt(pos) {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.codePointAt");
+ var r = "";
+ while (true) {
+ if (n & 1) r += s;
+ n >>= 1;
+ if (n === 0) return r;
+ s += s;
+ }
+ }
- var string = TO_STRING(this);
- var size = string.length;
- pos = TO_INTEGER(pos);
- if (pos < 0 || pos >= size) {
- return UNDEFINED;
- }
- var first = %_StringCharCodeAt(string, pos);
- if (first < 0xD800 || first > 0xDBFF || pos + 1 == size) {
- return first;
- }
- var second = %_StringCharCodeAt(string, pos + 1);
- if (second < 0xDC00 || second > 0xDFFF) {
- return first;
+ /* ES#sec-string.prototype.codepointat */
+ codePointAt(pos) {
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.codePointAt");
+
+ var string = TO_STRING(this);
+ var size = string.length;
+ pos = TO_INTEGER(pos);
+ if (pos < 0 || pos >= size) {
+ return UNDEFINED;
+ }
+ var first = %_StringCharCodeAt(string, pos);
+ if (first < 0xD800 || first > 0xDBFF || pos + 1 == size) {
+ return first;
+ }
+ var second = %_StringCharCodeAt(string, pos + 1);
+ if (second < 0xDC00 || second > 0xDFFF) {
+ return first;
+ }
+ return (first - 0xD800) * 0x400 + second + 0x2400;
+ }
}
- return (first - 0xD800) * 0x400 + second + 0x2400;
-}
+);
function StringPad(thisString, maxLength, fillString) {
maxLength = TO_LENGTH(maxLength);
@@ -233,78 +226,58 @@ function StringPad(thisString, maxLength, fillString) {
return filler;
}
-// ES#sec-string.prototype.padstart
-// String.prototype.padStart(maxLength [, fillString])
-function StringPadStart(maxLength, fillString) {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.padStart");
- var thisString = TO_STRING(this);
+DEFINE_METHODS_LEN(
+ GlobalString.prototype,
+ {
+ /* ES#sec-string.prototype.padstart */
+ /* String.prototype.padStart(maxLength [, fillString]) */
+ padStart(maxLength, fillString) {
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.padStart");
+ var thisString = TO_STRING(this);
- return StringPad(thisString, maxLength, fillString) + thisString;
-}
-%FunctionSetLength(StringPadStart, 1);
+ return StringPad(thisString, maxLength, fillString) + thisString;
+ }
-// ES#sec-string.prototype.padend
-// String.prototype.padEnd(maxLength [, fillString])
-function StringPadEnd(maxLength, fillString) {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.padEnd");
- var thisString = TO_STRING(this);
+ /* ES#sec-string.prototype.padend */
+ /* String.prototype.padEnd(maxLength [, fillString]) */
+ padEnd(maxLength, fillString) {
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.padEnd");
+ var thisString = TO_STRING(this);
- return thisString + StringPad(thisString, maxLength, fillString);
-}
-%FunctionSetLength(StringPadEnd, 1);
+ return thisString + StringPad(thisString, maxLength, fillString);
+ }
+ },
+ 1 /* Set functions length */
+);
// -------------------------------------------------------------------
// String methods related to templates
-// ES6 Draft 03-17-2015, section 21.1.2.4
-function StringRaw(callSite) {
- "use strict";
- var numberOfSubstitutions = arguments.length;
- var cooked = TO_OBJECT(callSite);
- var raw = TO_OBJECT(cooked.raw);
- var literalSegments = TO_LENGTH(raw.length);
- if (literalSegments <= 0) return "";
-
- var result = TO_STRING(raw[0]);
-
- for (var i = 1; i < literalSegments; ++i) {
- if (i < numberOfSubstitutions) {
- result += TO_STRING(arguments[i]);
+// Set up the non-enumerable functions on the String object.
+DEFINE_METHOD(
+ GlobalString,
+
+ /* ES#sec-string.raw */
+ raw(callSite) {
+ var numberOfSubstitutions = arguments.length;
+ var cooked = TO_OBJECT(callSite);
+ var raw = TO_OBJECT(cooked.raw);
+ var literalSegments = TO_LENGTH(raw.length);
+ if (literalSegments <= 0) return "";
+
+ var result = TO_STRING(raw[0]);
+
+ for (var i = 1; i < literalSegments; ++i) {
+ if (i < numberOfSubstitutions) {
+ result += TO_STRING(arguments[i]);
+ }
+ result += TO_STRING(raw[i]);
}
- result += TO_STRING(raw[i]);
- }
- return result;
-}
+ return result;
+ }
+);
// -------------------------------------------------------------------
-// Set up the non-enumerable functions on the String object.
-utils.InstallFunctions(GlobalString, DONT_ENUM, [
- "raw", StringRaw
-]);
-
-// Set up the non-enumerable functions on the String prototype object.
-utils.InstallFunctions(GlobalString.prototype, DONT_ENUM, [
- "codePointAt", StringCodePointAt,
- "match", StringMatchJS,
- "padEnd", StringPadEnd,
- "padStart", StringPadStart,
- "repeat", StringRepeat,
- "search", StringSearch,
- "link", StringLink,
- "anchor", StringAnchor,
- "fontcolor", StringFontcolor,
- "fontsize", StringFontsize,
- "big", StringBig,
- "blink", StringBlink,
- "bold", StringBold,
- "fixed", StringFixed,
- "italics", StringItalics,
- "small", StringSmall,
- "strike", StringStrike,
- "sub", StringSub,
- "sup", StringSup
-]);
-
})
diff --git a/deps/v8/src/js/typedarray.js b/deps/v8/src/js/typedarray.js
index 1c65c32dbd..caced99321 100644
--- a/deps/v8/src/js/typedarray.js
+++ b/deps/v8/src/js/typedarray.js
@@ -233,18 +233,21 @@ endmacro
TYPED_ARRAYS(TYPED_ARRAY_CONSTRUCTOR)
-function TypedArraySubArray(begin, end) {
- switch (%_ClassOf(this)) {
+DEFINE_METHOD(
+ GlobalTypedArray.prototype,
+ subarray(begin, end) {
+ switch (%_ClassOf(this)) {
macro TYPED_ARRAY_SUBARRAY_CASE(NAME, ELEMENT_SIZE)
- case "NAME":
- return %_Call(NAMESubArray, this, begin, end);
+ case "NAME":
+ return %_Call(NAMESubArray, this, begin, end);
endmacro
TYPED_ARRAYS(TYPED_ARRAY_SUBARRAY_CASE)
+ }
+ throw %make_type_error(kIncompatibleMethodReceiver,
+ "get %TypedArray%.prototype.subarray", this);
}
- throw %make_type_error(kIncompatibleMethodReceiver,
- "get %TypedArray%.prototype.subarray", this);
-}
-%SetForceInlineFlag(TypedArraySubArray);
+);
+%SetForceInlineFlag(GlobalTypedArray.prototype.subarray);
@@ -314,87 +317,64 @@ function TypedArraySetFromOverlappingTypedArray(target, source, offset) {
}
}
-function TypedArraySet(obj, offset) {
- var intOffset = IS_UNDEFINED(offset) ? 0 : TO_INTEGER(offset);
- if (intOffset < 0) throw %make_type_error(kTypedArraySetNegativeOffset);
+DEFINE_METHOD_LEN(
+ GlobalTypedArray.prototype,
+ set(obj, offset) {
+ var intOffset = IS_UNDEFINED(offset) ? 0 : TO_INTEGER(offset);
+ if (intOffset < 0) throw %make_type_error(kTypedArraySetNegativeOffset);
- if (intOffset > %_MaxSmi()) {
- throw %make_range_error(kTypedArraySetSourceTooLarge);
- }
+ if (intOffset > %_MaxSmi()) {
+ throw %make_range_error(kTypedArraySetSourceTooLarge);
+ }
- switch (%TypedArraySetFastCases(this, obj, intOffset)) {
- // These numbers should be synchronized with runtime.cc.
- case 0: // TYPED_ARRAY_SET_TYPED_ARRAY_SAME_TYPE
- return;
- case 1: // TYPED_ARRAY_SET_TYPED_ARRAY_OVERLAPPING
- TypedArraySetFromOverlappingTypedArray(this, obj, intOffset);
- return;
- case 2: // TYPED_ARRAY_SET_TYPED_ARRAY_NONOVERLAPPING
- if (intOffset === 0) {
- %TypedArrayCopyElements(this, obj, %_TypedArrayGetLength(obj));
- } else {
- TypedArraySetFromArrayLike(
- this, obj, %_TypedArrayGetLength(obj), intOffset);
- }
- return;
- case 3: // TYPED_ARRAY_SET_NON_TYPED_ARRAY
- var l = obj.length;
- if (IS_UNDEFINED(l)) {
- if (IS_NUMBER(obj)) {
- // For number as a first argument, throw TypeError
- // instead of silently ignoring the call, so that
- // users know they did something wrong.
- // (Consistent with Firefox and Blink/WebKit)
- throw %make_type_error(kInvalidArgument);
+ switch (%TypedArraySetFastCases(this, obj, intOffset)) {
+ // These numbers should be synchronized with runtime.cc.
+ case 0: // TYPED_ARRAY_SET_TYPED_ARRAY_SAME_TYPE
+ return;
+ case 1: // TYPED_ARRAY_SET_TYPED_ARRAY_OVERLAPPING
+ TypedArraySetFromOverlappingTypedArray(this, obj, intOffset);
+ return;
+ case 2: // TYPED_ARRAY_SET_TYPED_ARRAY_NONOVERLAPPING
+ if (intOffset === 0) {
+ %TypedArrayCopyElements(this, obj, %_TypedArrayGetLength(obj));
+ } else {
+ TypedArraySetFromArrayLike(
+ this, obj, %_TypedArrayGetLength(obj), intOffset);
}
return;
- }
- l = TO_LENGTH(l);
- if (intOffset + l > %_TypedArrayGetLength(this)) {
- throw %make_range_error(kTypedArraySetSourceTooLarge);
- }
- TypedArraySetFromArrayLike(this, obj, l, intOffset);
- return;
- }
-}
-%FunctionSetLength(TypedArraySet, 1);
-
-function TypedArrayGetToStringTag() {
- if (!IS_TYPEDARRAY(this)) return;
- var name = %_ClassOf(this);
- if (IS_UNDEFINED(name)) return;
- return name;
-}
-
-function InnerTypedArrayForEach(f, receiver, array, length) {
- if (!IS_CALLABLE(f)) throw %make_type_error(kCalledNonCallable, f);
-
- if (IS_UNDEFINED(receiver)) {
- for (var i = 0; i < length; i++) {
- if (i in array) {
- var element = array[i];
- f(element, i, array);
- }
- }
- } else {
- for (var i = 0; i < length; i++) {
- if (i in array) {
- var element = array[i];
- %_Call(f, receiver, element, i, array);
- }
+ case 3: // TYPED_ARRAY_SET_NON_TYPED_ARRAY
+ var l = obj.length;
+ if (IS_UNDEFINED(l)) {
+ if (IS_NUMBER(obj)) {
+ // For number as a first argument, throw TypeError
+ // instead of silently ignoring the call, so that
+ // users know they did something wrong.
+ // (Consistent with Firefox and Blink/WebKit)
+ throw %make_type_error(kInvalidArgument);
+ }
+ return;
+ }
+ l = TO_LENGTH(l);
+ if (intOffset + l > %_TypedArrayGetLength(this)) {
+ throw %make_range_error(kTypedArraySetSourceTooLarge);
+ }
+ TypedArraySetFromArrayLike(this, obj, l, intOffset);
+ return;
}
+ },
+ 1 /* Set function length. */
+);
+
+
+DEFINE_METHOD(
+ GlobalTypedArray.prototype,
+ get [toStringTagSymbol]() {
+ if (!IS_TYPEDARRAY(this)) return;
+ var name = %_ClassOf(this);
+ if (IS_UNDEFINED(name)) return;
+ return name;
}
-}
-
-// ES6 draft 08-24-14, section 22.2.3.12
-function TypedArrayForEach(f, receiver) {
- ValidateTypedArray(this, "%TypedArray%.prototype.forEach");
-
- var length = %_TypedArrayGetLength(this);
-
- InnerTypedArrayForEach(f, receiver, this, length);
-}
-%FunctionSetLength(TypedArrayForEach, 1);
+);
// The following functions cannot be made efficient on sparse arrays while
// preserving the semantics, since the calls to the receiver function can add
@@ -415,88 +395,109 @@ function InnerTypedArrayFilter(f, receiver, array, length, result) {
// ES6 draft 07-15-13, section 22.2.3.9
-function TypedArrayFilter(f, thisArg) {
- ValidateTypedArray(this, "%TypeArray%.prototype.filter");
-
- var length = %_TypedArrayGetLength(this);
- if (!IS_CALLABLE(f)) throw %make_type_error(kCalledNonCallable, f);
- var result = new InternalArray();
- InnerTypedArrayFilter(f, thisArg, this, length, result);
- var captured = result.length;
- var output = TypedArraySpeciesCreate(this, captured);
- for (var i = 0; i < captured; i++) {
- output[i] = result[i];
- }
- return output;
-}
-%FunctionSetLength(TypedArrayFilter, 1);
+DEFINE_METHOD_LEN(
+ GlobalTypedArray.prototype,
+ filter(f, thisArg) {
+ ValidateTypedArray(this, "%TypeArray%.prototype.filter");
+
+ var length = %_TypedArrayGetLength(this);
+ if (!IS_CALLABLE(f)) throw %make_type_error(kCalledNonCallable, f);
+ var result = new InternalArray();
+ InnerTypedArrayFilter(f, thisArg, this, length, result);
+ var captured = result.length;
+ var output = TypedArraySpeciesCreate(this, captured);
+ for (var i = 0; i < captured; i++) {
+ output[i] = result[i];
+ }
+ return output;
+ },
+ 1 /* Set function length. */
+);
// ES6 draft 07-15-13, section 22.2.3.10
-function TypedArrayFind(predicate, thisArg) {
- ValidateTypedArray(this, "%TypedArray%.prototype.find");
+DEFINE_METHOD_LEN(
+ GlobalTypedArray.prototype,
+ find(predicate, thisArg) {
+ ValidateTypedArray(this, "%TypedArray%.prototype.find");
- var length = %_TypedArrayGetLength(this);
+ var length = %_TypedArrayGetLength(this);
- return InnerArrayFind(predicate, thisArg, this, length);
-}
-%FunctionSetLength(TypedArrayFind, 1);
+ return InnerArrayFind(predicate, thisArg, this, length);
+ },
+ 1 /* Set function length. */
+);
// ES6 draft 07-15-13, section 22.2.3.11
-function TypedArrayFindIndex(predicate, thisArg) {
- ValidateTypedArray(this, "%TypedArray%.prototype.findIndex");
+DEFINE_METHOD_LEN(
+ GlobalTypedArray.prototype,
+ findIndex(predicate, thisArg) {
+ ValidateTypedArray(this, "%TypedArray%.prototype.findIndex");
- var length = %_TypedArrayGetLength(this);
+ var length = %_TypedArrayGetLength(this);
- return InnerArrayFindIndex(predicate, thisArg, this, length);
-}
-%FunctionSetLength(TypedArrayFindIndex, 1);
+ return InnerArrayFindIndex(predicate, thisArg, this, length);
+ },
+ 1 /* Set function length. */
+);
// ES6 draft 05-18-15, section 22.2.3.25
-function TypedArraySort(comparefn) {
- ValidateTypedArray(this, "%TypedArray%.prototype.sort");
+DEFINE_METHOD(
+ GlobalTypedArray.prototype,
+ sort(comparefn) {
+ ValidateTypedArray(this, "%TypedArray%.prototype.sort");
- var length = %_TypedArrayGetLength(this);
+ var length = %_TypedArrayGetLength(this);
- if (IS_UNDEFINED(comparefn)) {
- return %TypedArraySortFast(this);
- }
+ if (IS_UNDEFINED(comparefn)) {
+ return %TypedArraySortFast(this);
+ }
- return InnerArraySort(this, length, comparefn);
-}
+ return InnerArraySort(this, length, comparefn);
+ }
+);
// ES6 section 22.2.3.27
-function TypedArrayToLocaleString() {
- ValidateTypedArray(this, "%TypedArray%.prototype.toLocaleString");
+DEFINE_METHOD(
+ GlobalTypedArray.prototype,
+ toLocaleString() {
+ ValidateTypedArray(this, "%TypedArray%.prototype.toLocaleString");
- var length = %_TypedArrayGetLength(this);
+ var length = %_TypedArrayGetLength(this);
- return InnerArrayToLocaleString(this, length);
-}
+ return InnerArrayToLocaleString(this, length);
+ }
+);
// ES6 section 22.2.3.14
-function TypedArrayJoin(separator) {
- ValidateTypedArray(this, "%TypedArray%.prototype.join");
+DEFINE_METHOD(
+ GlobalTypedArray.prototype,
+ join(separator) {
+ ValidateTypedArray(this, "%TypedArray%.prototype.join");
- var length = %_TypedArrayGetLength(this);
+ var length = %_TypedArrayGetLength(this);
- return InnerArrayJoin(separator, this, length);
-}
+ return InnerArrayJoin(separator, this, length);
+ }
+);
// ES6 draft 08-24-14, section 22.2.2.2
-function TypedArrayOf() {
- var length = arguments.length;
- var array = TypedArrayCreate(this, length);
- for (var i = 0; i < length; i++) {
- array[i] = arguments[i];
+DEFINE_METHOD(
+ GlobalTypedArray,
+ of() {
+ var length = arguments.length;
+ var array = TypedArrayCreate(this, length);
+ for (var i = 0; i < length; i++) {
+ array[i] = arguments[i];
+ }
+ return array;
}
- return array;
-}
+);
// ES#sec-iterabletoarraylike Runtime Semantics: IterableToArrayLike( items )
@@ -520,31 +521,34 @@ function IterableToArrayLike(items) {
// ES#sec-%typedarray%.from
// %TypedArray%.from ( source [ , mapfn [ , thisArg ] ] )
-function TypedArrayFrom(source, mapfn, thisArg) {
- if (!%IsConstructor(this)) throw %make_type_error(kNotConstructor, this);
- var mapping;
- if (!IS_UNDEFINED(mapfn)) {
- if (!IS_CALLABLE(mapfn)) throw %make_type_error(kCalledNonCallable, this);
- mapping = true;
- } else {
- mapping = false;
- }
- var arrayLike = IterableToArrayLike(source);
- var length = TO_LENGTH(arrayLike.length);
- var targetObject = TypedArrayCreate(this, length);
- var value, mappedValue;
- for (var i = 0; i < length; i++) {
- value = arrayLike[i];
- if (mapping) {
- mappedValue = %_Call(mapfn, thisArg, value, i);
+DEFINE_METHOD_LEN(
+ GlobalTypedArray,
+ 'from'(source, mapfn, thisArg) {
+ if (!%IsConstructor(this)) throw %make_type_error(kNotConstructor, this);
+ var mapping;
+ if (!IS_UNDEFINED(mapfn)) {
+ if (!IS_CALLABLE(mapfn)) throw %make_type_error(kCalledNonCallable, this);
+ mapping = true;
} else {
- mappedValue = value;
+ mapping = false;
}
- targetObject[i] = mappedValue;
- }
- return targetObject;
-}
-%FunctionSetLength(TypedArrayFrom, 1);
+ var arrayLike = IterableToArrayLike(source);
+ var length = TO_LENGTH(arrayLike.length);
+ var targetObject = TypedArrayCreate(this, length);
+ var value, mappedValue;
+ for (var i = 0; i < length; i++) {
+ value = arrayLike[i];
+ if (mapping) {
+ mappedValue = %_Call(mapfn, thisArg, value, i);
+ } else {
+ mappedValue = value;
+ }
+ targetObject[i] = mappedValue;
+ }
+ return targetObject;
+ },
+ 1 /* Set function length. */
+);
// TODO(bmeurer): Migrate this to a proper builtin.
function TypedArrayConstructor() {
@@ -554,23 +558,7 @@ function TypedArrayConstructor() {
// -------------------------------------------------------------------
%SetCode(GlobalTypedArray, TypedArrayConstructor);
-utils.InstallFunctions(GlobalTypedArray, DONT_ENUM, [
- "from", TypedArrayFrom,
- "of", TypedArrayOf
-]);
-utils.InstallGetter(GlobalTypedArray.prototype, toStringTagSymbol,
- TypedArrayGetToStringTag);
-utils.InstallFunctions(GlobalTypedArray.prototype, DONT_ENUM, [
- "subarray", TypedArraySubArray,
- "set", TypedArraySet,
- "filter", TypedArrayFilter,
- "find", TypedArrayFind,
- "findIndex", TypedArrayFindIndex,
- "join", TypedArrayJoin,
- "forEach", TypedArrayForEach,
- "sort", TypedArraySort,
- "toLocaleString", TypedArrayToLocaleString
-]);
+
%AddNamedProperty(GlobalTypedArray.prototype, "toString", ArrayToString,
DONT_ENUM);
diff --git a/deps/v8/src/js/v8natives.js b/deps/v8/src/js/v8natives.js
index 5ae75bae22..5de98a0a09 100644
--- a/deps/v8/src/js/v8natives.js
+++ b/deps/v8/src/js/v8natives.js
@@ -4,6 +4,8 @@
(function(global, utils) {
+"use strict";
+
%CheckIsBootstrapping();
// ----------------------------------------------------------------------------
@@ -15,20 +17,15 @@ var iteratorSymbol = utils.ImportNow("iterator_symbol");
// ----------------------------------------------------------------------------
// Object
-// ES6 19.1.3.5 Object.prototype.toLocaleString([reserved1 [,reserved2]])
-function ObjectToLocaleString() {
- CHECK_OBJECT_COERCIBLE(this, "Object.prototype.toLocaleString");
- return this.toString();
-}
-
-
-// ES6 19.1.3.3 Object.prototype.isPrototypeOf(V)
-function ObjectIsPrototypeOf(V) {
- if (!IS_RECEIVER(V)) return false;
- var O = TO_OBJECT(this);
- return %HasInPrototypeChain(V, O);
-}
-
+// Set up non-enumerable functions on the Object.prototype object.
+DEFINE_METHOD(
+ GlobalObject.prototype,
+ // ES6 19.1.3.5 Object.prototype.toLocaleString([reserved1 [,reserved2]])
+ toLocaleString() {
+ CHECK_OBJECT_COERCIBLE(this, "Object.prototype.toLocaleString");
+ return this.toString();
+ }
+);
// ES6 7.3.9
function GetMethod(obj, p) {
@@ -54,22 +51,6 @@ function ObjectConstructor(x) {
%SetNativeFlag(GlobalObject);
%SetCode(GlobalObject, ObjectConstructor);
-%AddNamedProperty(GlobalObject.prototype, "constructor", GlobalObject,
- DONT_ENUM);
-
-// Set up non-enumerable functions on the Object.prototype object.
-utils.InstallFunctions(GlobalObject.prototype, DONT_ENUM, [
- // toString is added in bootstrapper.cc
- "toLocaleString", ObjectToLocaleString,
- // valueOf is added in bootstrapper.cc.
- "isPrototypeOf", ObjectIsPrototypeOf,
- // propertyIsEnumerable is added in bootstrapper.cc.
- // __defineGetter__ is added in bootstrapper.cc.
- // __lookupGetter__ is added in bootstrapper.cc.
- // __defineSetter__ is added in bootstrapper.cc.
- // __lookupSetter__ is added in bootstrapper.cc.
-]);
-
// ----------------------------------------------------------------------------
// Iterator related spec functions.
diff --git a/deps/v8/src/js/weak-collection.js b/deps/v8/src/js/weak-collection.js
index f5092d29f5..30d654b806 100644
--- a/deps/v8/src/js/weak-collection.js
+++ b/deps/v8/src/js/weak-collection.js
@@ -13,10 +13,8 @@
var GetExistingHash;
var GetHash;
-var GlobalObject = global.Object;
var GlobalWeakMap = global.WeakMap;
var GlobalWeakSet = global.WeakSet;
-var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
utils.Import(function(from) {
GetExistingHash = from.GetExistingHash;
@@ -48,69 +46,36 @@ function WeakMapConstructor(iterable) {
}
-function WeakMapGet(key) {
- if (!IS_WEAKMAP(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'WeakMap.prototype.get', this);
- }
- if (!IS_RECEIVER(key)) return UNDEFINED;
- var hash = GetExistingHash(key);
- if (IS_UNDEFINED(hash)) return UNDEFINED;
- return %WeakCollectionGet(this, key, hash);
-}
-
-
-function WeakMapSet(key, value) {
- if (!IS_WEAKMAP(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'WeakMap.prototype.set', this);
- }
- if (!IS_RECEIVER(key)) throw %make_type_error(kInvalidWeakMapKey);
- return %WeakCollectionSet(this, key, value, GetHash(key));
-}
-
-
-function WeakMapHas(key) {
- if (!IS_WEAKMAP(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'WeakMap.prototype.has', this);
- }
- if (!IS_RECEIVER(key)) return false;
- var hash = GetExistingHash(key);
- if (IS_UNDEFINED(hash)) return false;
- return %WeakCollectionHas(this, key, hash);
-}
-
+// Set up the non-enumerable functions on the WeakMap prototype object.
+DEFINE_METHODS(
+ GlobalWeakMap.prototype,
+ {
+ set(key, value) {
+ if (!IS_WEAKMAP(this)) {
+ throw %make_type_error(kIncompatibleMethodReceiver,
+ 'WeakMap.prototype.set', this);
+ }
+ if (!IS_RECEIVER(key)) throw %make_type_error(kInvalidWeakMapKey);
+ return %WeakCollectionSet(this, key, value, GetHash(key));
+ }
-function WeakMapDelete(key) {
- if (!IS_WEAKMAP(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'WeakMap.prototype.delete', this);
+ delete(key) {
+ if (!IS_WEAKMAP(this)) {
+ throw %make_type_error(kIncompatibleMethodReceiver,
+ 'WeakMap.prototype.delete', this);
+ }
+ if (!IS_RECEIVER(key)) return false;
+ var hash = GetExistingHash(key);
+ if (IS_UNDEFINED(hash)) return false;
+ return %WeakCollectionDelete(this, key, hash);
+ }
}
- if (!IS_RECEIVER(key)) return false;
- var hash = GetExistingHash(key);
- if (IS_UNDEFINED(hash)) return false;
- return %WeakCollectionDelete(this, key, hash);
-}
-
+);
// -------------------------------------------------------------------
%SetCode(GlobalWeakMap, WeakMapConstructor);
%FunctionSetLength(GlobalWeakMap, 0);
-%FunctionSetPrototype(GlobalWeakMap, new GlobalObject());
-%AddNamedProperty(GlobalWeakMap.prototype, "constructor", GlobalWeakMap,
- DONT_ENUM);
-%AddNamedProperty(GlobalWeakMap.prototype, toStringTagSymbol, "WeakMap",
- DONT_ENUM | READ_ONLY);
-
-// Set up the non-enumerable functions on the WeakMap prototype object.
-utils.InstallFunctions(GlobalWeakMap.prototype, DONT_ENUM, [
- "get", WeakMapGet,
- "set", WeakMapSet,
- "has", WeakMapHas,
- "delete", WeakMapDelete
-]);
// -------------------------------------------------------------------
// Harmony WeakSet
@@ -134,55 +99,35 @@ function WeakSetConstructor(iterable) {
}
-function WeakSetAdd(value) {
- if (!IS_WEAKSET(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'WeakSet.prototype.add', this);
- }
- if (!IS_RECEIVER(value)) throw %make_type_error(kInvalidWeakSetValue);
- return %WeakCollectionSet(this, value, true, GetHash(value));
-}
-
-
-function WeakSetHas(value) {
- if (!IS_WEAKSET(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'WeakSet.prototype.has', this);
- }
- if (!IS_RECEIVER(value)) return false;
- var hash = GetExistingHash(value);
- if (IS_UNDEFINED(hash)) return false;
- return %WeakCollectionHas(this, value, hash);
-}
-
+// Set up the non-enumerable functions on the WeakSet prototype object.
+DEFINE_METHODS(
+ GlobalWeakSet.prototype,
+ {
+ add(value) {
+ if (!IS_WEAKSET(this)) {
+ throw %make_type_error(kIncompatibleMethodReceiver,
+ 'WeakSet.prototype.add', this);
+ }
+ if (!IS_RECEIVER(value)) throw %make_type_error(kInvalidWeakSetValue);
+ return %WeakCollectionSet(this, value, true, GetHash(value));
+ }
-function WeakSetDelete(value) {
- if (!IS_WEAKSET(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'WeakSet.prototype.delete', this);
+ delete(value) {
+ if (!IS_WEAKSET(this)) {
+ throw %make_type_error(kIncompatibleMethodReceiver,
+ 'WeakSet.prototype.delete', this);
+ }
+ if (!IS_RECEIVER(value)) return false;
+ var hash = GetExistingHash(value);
+ if (IS_UNDEFINED(hash)) return false;
+ return %WeakCollectionDelete(this, value, hash);
+ }
}
- if (!IS_RECEIVER(value)) return false;
- var hash = GetExistingHash(value);
- if (IS_UNDEFINED(hash)) return false;
- return %WeakCollectionDelete(this, value, hash);
-}
-
+);
// -------------------------------------------------------------------
%SetCode(GlobalWeakSet, WeakSetConstructor);
%FunctionSetLength(GlobalWeakSet, 0);
-%FunctionSetPrototype(GlobalWeakSet, new GlobalObject());
-%AddNamedProperty(GlobalWeakSet.prototype, "constructor", GlobalWeakSet,
- DONT_ENUM);
-%AddNamedProperty(GlobalWeakSet.prototype, toStringTagSymbol, "WeakSet",
- DONT_ENUM | READ_ONLY);
-
-// Set up the non-enumerable functions on the WeakSet prototype object.
-utils.InstallFunctions(GlobalWeakSet.prototype, DONT_ENUM, [
- "add", WeakSetAdd,
- "has", WeakSetHas,
- "delete", WeakSetDelete
-]);
})
diff --git a/deps/v8/src/json-parser.cc b/deps/v8/src/json-parser.cc
index 93d305df7a..ab48034b17 100644
--- a/deps/v8/src/json-parser.cc
+++ b/deps/v8/src/json-parser.cc
@@ -80,6 +80,8 @@ MaybeHandle<Object> JsonParseInternalizer::InternalizeJsonProperty(
bool JsonParseInternalizer::RecurseAndApply(Handle<JSReceiver> holder,
Handle<String> name) {
+ STACK_CHECK(isolate_, false);
+
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate_, result, InternalizeJsonProperty(holder, name), false);
@@ -513,14 +515,14 @@ class ElementKindLattice {
ElementsKind GetElementsKind() const {
switch (value_) {
case SMI_ELEMENTS:
- return FAST_SMI_ELEMENTS;
+ return PACKED_SMI_ELEMENTS;
case NUMBER_ELEMENTS:
- return FAST_DOUBLE_ELEMENTS;
+ return PACKED_DOUBLE_ELEMENTS;
case OBJECT_ELEMENTS:
- return FAST_ELEMENTS;
+ return PACKED_ELEMENTS;
default:
UNREACHABLE();
- return FAST_ELEMENTS;
+ return PACKED_ELEMENTS;
}
}
@@ -557,15 +559,15 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonArray() {
const ElementsKind kind = lattice.GetElementsKind();
switch (kind) {
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS: {
+ case PACKED_ELEMENTS:
+ case PACKED_SMI_ELEMENTS: {
Handle<FixedArray> elems =
factory()->NewFixedArray(elements.length(), pretenure_);
for (int i = 0; i < elements.length(); i++) elems->set(i, *elements[i]);
json_array = factory()->NewJSArrayWithElements(elems, kind, pretenure_);
break;
}
- case FAST_DOUBLE_ELEMENTS: {
+ case PACKED_DOUBLE_ELEMENTS: {
Handle<FixedDoubleArray> elems = Handle<FixedDoubleArray>::cast(
factory()->NewFixedDoubleArray(elements.length(), pretenure_));
for (int i = 0; i < elements.length(); i++) {
diff --git a/deps/v8/src/json-stringifier.cc b/deps/v8/src/json-stringifier.cc
index f31aedd9a9..5cbc419150 100644
--- a/deps/v8/src/json-stringifier.cc
+++ b/deps/v8/src/json-stringifier.cc
@@ -216,7 +216,7 @@ MaybeHandle<Object> JsonStringifier::ApplyReplacerFunction(
Handle<JSReceiver> JsonStringifier::CurrentHolder(
Handle<Object> value, Handle<Object> initial_holder) {
- int length = Smi::cast(stack_->length())->value();
+ int length = Smi::ToInt(stack_->length());
if (length == 0) {
Handle<JSObject> holder =
factory()->NewJSObject(isolate_->object_function());
@@ -237,7 +237,7 @@ JsonStringifier::Result JsonStringifier::StackPush(Handle<Object> object) {
return EXCEPTION;
}
- int length = Smi::cast(stack_->length())->value();
+ int length = Smi::ToInt(stack_->length());
{
DisallowHeapAllocation no_allocation;
FixedArray* elements = FixedArray::cast(stack_->elements());
@@ -257,7 +257,7 @@ JsonStringifier::Result JsonStringifier::StackPush(Handle<Object> object) {
}
void JsonStringifier::StackPop() {
- int length = Smi::cast(stack_->length())->value();
+ int length = Smi::ToInt(stack_->length());
stack_->set_length(Smi::FromInt(length - 1));
}
@@ -334,7 +334,6 @@ JsonStringifier::Result JsonStringifier::Serialize_(Handle<Object> object,
}
UNREACHABLE();
- return UNCHANGED;
}
JsonStringifier::Result JsonStringifier::SerializeJSValue(
@@ -395,7 +394,7 @@ JsonStringifier::Result JsonStringifier::SerializeJSArray(
uint32_t i = 0;
if (replacer_function_.is_null()) {
switch (object->GetElementsKind()) {
- case FAST_SMI_ELEMENTS: {
+ case PACKED_SMI_ELEMENTS: {
Handle<FixedArray> elements(FixedArray::cast(object->elements()),
isolate_);
StackLimitCheck interrupt_check(isolate_);
@@ -411,7 +410,7 @@ JsonStringifier::Result JsonStringifier::SerializeJSArray(
}
break;
}
- case FAST_DOUBLE_ELEMENTS: {
+ case PACKED_DOUBLE_ELEMENTS: {
// Empty array is FixedArray but not FixedDoubleArray.
if (length == 0) break;
Handle<FixedDoubleArray> elements(
@@ -429,11 +428,11 @@ JsonStringifier::Result JsonStringifier::SerializeJSArray(
}
break;
}
- case FAST_ELEMENTS: {
+ case PACKED_ELEMENTS: {
Handle<Object> old_length(object->length(), isolate_);
while (i < length) {
if (object->length() != *old_length ||
- object->GetElementsKind() != FAST_ELEMENTS) {
+ object->GetElementsKind() != PACKED_ELEMENTS) {
// Fall back to slow path.
break;
}
diff --git a/deps/v8/src/keys.cc b/deps/v8/src/keys.cc
index da26263fc4..534ea864b9 100644
--- a/deps/v8/src/keys.cc
+++ b/deps/v8/src/keys.cc
@@ -218,11 +218,7 @@ void TrySettingEmptyEnumCache(JSReceiver* object) {
DCHECK_EQ(kInvalidEnumCacheSentinel, map->EnumLength());
if (!map->OnlyHasSimpleProperties()) return;
if (map->IsJSProxyMap()) return;
- if (map->NumberOfOwnDescriptors() > 0) {
- int number_of_enumerable_own_properties =
- map->NumberOfDescribedProperties(OWN_DESCRIPTORS, ENUMERABLE_STRINGS);
- if (number_of_enumerable_own_properties > 0) return;
- }
+ if (map->NumberOfEnumerableProperties() > 0) return;
DCHECK(object->IsJSObject());
map->SetEnumLength(0);
}
@@ -286,12 +282,9 @@ Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
// first step to using the cache is to set the enum length of the map by
// counting the number of own descriptors that are ENUMERABLE_STRINGS.
if (own_property_count == kInvalidEnumCacheSentinel) {
- own_property_count =
- map->NumberOfDescribedProperties(OWN_DESCRIPTORS, ENUMERABLE_STRINGS);
+ own_property_count = map->NumberOfEnumerableProperties();
} else {
- DCHECK(
- own_property_count ==
- map->NumberOfDescribedProperties(OWN_DESCRIPTORS, ENUMERABLE_STRINGS));
+ DCHECK_EQ(own_property_count, map->NumberOfEnumerableProperties());
}
if (descs->HasEnumCache()) {
@@ -574,7 +567,7 @@ Handle<FixedArray> GetOwnEnumPropertyDictionaryKeys(Isolate* isolate,
Handle<JSObject> object,
T* raw_dictionary) {
Handle<T> dictionary(raw_dictionary, isolate);
- int length = dictionary->NumberOfEnumElements();
+ int length = dictionary->NumberOfEnumerableProperties();
if (length == 0) {
return isolate->factory()->empty_fixed_array();
}
@@ -606,7 +599,8 @@ Maybe<bool> KeyAccumulator::CollectOwnPropertyNames(Handle<JSReceiver> receiver,
}
} else if (object->IsJSGlobalObject()) {
enum_keys = GetOwnEnumPropertyDictionaryKeys(
- isolate_, mode_, this, object, object->global_dictionary());
+ isolate_, mode_, this, object,
+ JSGlobalObject::cast(*object)->global_dictionary());
} else {
enum_keys = GetOwnEnumPropertyDictionaryKeys(
isolate_, mode_, this, object, object->property_dictionary());
@@ -627,7 +621,8 @@ Maybe<bool> KeyAccumulator::CollectOwnPropertyNames(Handle<JSReceiver> receiver,
}
} else if (object->IsJSGlobalObject()) {
GlobalDictionary::CollectKeysTo(
- handle(object->global_dictionary(), isolate_), this);
+ handle(JSGlobalObject::cast(*object)->global_dictionary(), isolate_),
+ this);
} else {
NameDictionary::CollectKeysTo(
handle(object->property_dictionary(), isolate_), this);
@@ -704,7 +699,7 @@ Handle<FixedArray> KeyAccumulator::GetOwnEnumPropertyKeys(
} else if (object->IsJSGlobalObject()) {
return GetOwnEnumPropertyDictionaryKeys(
isolate, KeyCollectionMode::kOwnOnly, nullptr, object,
- object->global_dictionary());
+ JSGlobalObject::cast(*object)->global_dictionary());
} else {
return GetOwnEnumPropertyDictionaryKeys(
isolate, KeyCollectionMode::kOwnOnly, nullptr, object,
diff --git a/deps/v8/src/keys.h b/deps/v8/src/keys.h
index c5ac93c098..0db12d96ba 100644
--- a/deps/v8/src/keys.h
+++ b/deps/v8/src/keys.h
@@ -5,8 +5,8 @@
#ifndef V8_KEYS_H_
#define V8_KEYS_H_
-#include "src/isolate.h"
#include "src/objects.h"
+#include "src/objects/hash-table.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/label.h b/deps/v8/src/label.h
index e77a2afcc1..680754cf20 100644
--- a/deps/v8/src/label.h
+++ b/deps/v8/src/label.h
@@ -44,7 +44,6 @@ class Label {
if (pos_ < 0) return -pos_ - 1;
if (pos_ > 0) return pos_ - 1;
UNREACHABLE();
- return 0;
}
int near_link_pos() const { return near_link_pos_ - 1; }
diff --git a/deps/v8/src/layout-descriptor-inl.h b/deps/v8/src/layout-descriptor-inl.h
index 4f193b30e8..f49fd14a3f 100644
--- a/deps/v8/src/layout-descriptor-inl.h
+++ b/deps/v8/src/layout-descriptor-inl.h
@@ -20,9 +20,11 @@ Handle<LayoutDescriptor> LayoutDescriptor::New(Isolate* isolate, int length) {
// The whole bit vector fits into a smi.
return handle(LayoutDescriptor::FromSmi(Smi::kZero), isolate);
}
- length = GetSlowModeBackingStoreLength(length);
- return Handle<LayoutDescriptor>::cast(isolate->factory()->NewFixedTypedArray(
- length, kExternalUint32Array, true));
+ int backing_store_length = GetSlowModeBackingStoreLength(length);
+ Handle<LayoutDescriptor> result = Handle<LayoutDescriptor>::cast(
+ isolate->factory()->NewByteArray(backing_store_length));
+ memset(result->GetDataStartAddress(), 0, result->DataSize());
+ return result;
}
@@ -47,11 +49,11 @@ bool LayoutDescriptor::GetIndexes(int field_index, int* layout_word_index,
return false;
}
- *layout_word_index = field_index / kNumberOfBits;
+ *layout_word_index = field_index / kBitsPerLayoutWord;
CHECK((!IsSmi() && (*layout_word_index < length())) ||
(IsSmi() && (*layout_word_index < 1)));
- *layout_bit_index = field_index % kNumberOfBits;
+ *layout_bit_index = field_index % kBitsPerLayoutWord;
return true;
}
@@ -72,16 +74,16 @@ LayoutDescriptor* LayoutDescriptor::SetTagged(int field_index, bool tagged) {
uint32_t layout_mask = static_cast<uint32_t>(1) << layout_bit_index;
if (IsSlowLayout()) {
- uint32_t value = get_scalar(layout_word_index);
+ uint32_t value = get_layout_word(layout_word_index);
if (tagged) {
value &= ~layout_mask;
} else {
value |= layout_mask;
}
- set(layout_word_index, value);
+ set_layout_word(layout_word_index, value);
return this;
} else {
- uint32_t value = static_cast<uint32_t>(Smi::cast(this)->value());
+ uint32_t value = static_cast<uint32_t>(Smi::ToInt(this));
if (tagged) {
value &= ~layout_mask;
} else {
@@ -105,10 +107,10 @@ bool LayoutDescriptor::IsTagged(int field_index) {
uint32_t layout_mask = static_cast<uint32_t>(1) << layout_bit_index;
if (IsSlowLayout()) {
- uint32_t value = get_scalar(layout_word_index);
+ uint32_t value = get_layout_word(layout_word_index);
return (value & layout_mask) == 0;
} else {
- uint32_t value = static_cast<uint32_t>(Smi::cast(this)->value());
+ uint32_t value = static_cast<uint32_t>(Smi::ToInt(this));
return (value & layout_mask) == 0;
}
}
@@ -128,39 +130,24 @@ bool LayoutDescriptor::IsSlowLayout() { return !IsSmi(); }
int LayoutDescriptor::capacity() {
- return IsSlowLayout() ? (length() * kNumberOfBits) : kSmiValueSize;
+ return IsSlowLayout() ? (length() * kBitsPerByte) : kSmiValueSize;
}
LayoutDescriptor* LayoutDescriptor::cast_gc_safe(Object* object) {
- if (object->IsSmi()) {
- // Fast mode layout descriptor.
- return reinterpret_cast<LayoutDescriptor*>(object);
- }
-
- // This is a mixed descriptor which is a fixed typed array.
- MapWord map_word = reinterpret_cast<HeapObject*>(object)->map_word();
- if (map_word.IsForwardingAddress()) {
- // Mark-compact has already moved layout descriptor.
- object = map_word.ToForwardingAddress();
- }
- return LayoutDescriptor::cast(object);
+ // The map word of the object can be a forwarding pointer during
+ // object evacuation phase of GC. Since the layout descriptor methods
+ // for checking whether a field is tagged or not do not depend on the
+ // object map, it should be safe.
+ return reinterpret_cast<LayoutDescriptor*>(object);
}
-
int LayoutDescriptor::GetSlowModeBackingStoreLength(int length) {
- length = (length + kNumberOfBits - 1) / kNumberOfBits;
DCHECK_LT(0, length);
-
- if (SmiValuesAre32Bits() && (length & 1)) {
- // On 64-bit systems if the length is odd then the half-word space would be
- // lost anyway (due to alignment and the fact that we are allocating
- // uint32-typed array), so we increase the length of allocated array
- // to utilize that "lost" space which could also help to avoid layout
- // descriptor reallocations.
- ++length;
- }
- return length;
+ // We allocate kPointerSize rounded blocks of memory anyway so we increase
+ // the length of allocated array to utilize that "lost" space which could
+ // also help to avoid layout descriptor reallocations.
+ return RoundUp(length, kBitsPerByte * kPointerSize) / kBitsPerByte;
}
diff --git a/deps/v8/src/layout-descriptor.cc b/deps/v8/src/layout-descriptor.cc
index 001bfe0637..ed3f738735 100644
--- a/deps/v8/src/layout-descriptor.cc
+++ b/deps/v8/src/layout-descriptor.cc
@@ -107,14 +107,14 @@ Handle<LayoutDescriptor> LayoutDescriptor::EnsureCapacity(
DCHECK(new_layout_descriptor->IsSlowLayout());
if (layout_descriptor->IsSlowLayout()) {
- memcpy(new_layout_descriptor->DataPtr(), layout_descriptor->DataPtr(),
+ memcpy(new_layout_descriptor->GetDataStartAddress(),
+ layout_descriptor->GetDataStartAddress(),
layout_descriptor->DataSize());
return new_layout_descriptor;
} else {
// Fast layout.
- uint32_t value =
- static_cast<uint32_t>(Smi::cast(*layout_descriptor)->value());
- new_layout_descriptor->set(0, value);
+ uint32_t value = static_cast<uint32_t>(Smi::ToInt(*layout_descriptor));
+ new_layout_descriptor->set_layout_word(0, value);
return new_layout_descriptor;
}
}
@@ -138,30 +138,29 @@ bool LayoutDescriptor::IsTagged(int field_index, int max_sequence_length,
}
uint32_t layout_mask = static_cast<uint32_t>(1) << layout_bit_index;
- uint32_t value = IsSlowLayout()
- ? get_scalar(layout_word_index)
- : static_cast<uint32_t>(Smi::cast(this)->value());
+ uint32_t value = IsSlowLayout() ? get_layout_word(layout_word_index)
+ : static_cast<uint32_t>(Smi::ToInt(this));
bool is_tagged = (value & layout_mask) == 0;
if (!is_tagged) value = ~value; // Count set bits instead of cleared bits.
value = value & ~(layout_mask - 1); // Clear bits we are not interested in.
int sequence_length = CountTrailingZeros32(value) - layout_bit_index;
- if (layout_bit_index + sequence_length == kNumberOfBits) {
+ if (layout_bit_index + sequence_length == kBitsPerLayoutWord) {
// This is a contiguous sequence till the end of current word, proceed
// counting in the subsequent words.
if (IsSlowLayout()) {
- int len = length();
++layout_word_index;
- for (; layout_word_index < len; layout_word_index++) {
- value = get_scalar(layout_word_index);
+ int num_words = number_of_layout_words();
+ for (; layout_word_index < num_words; layout_word_index++) {
+ value = get_layout_word(layout_word_index);
bool cur_is_tagged = (value & 1) == 0;
if (cur_is_tagged != is_tagged) break;
if (!is_tagged) value = ~value; // Count set bits instead.
int cur_sequence_length = CountTrailingZeros32(value);
sequence_length += cur_sequence_length;
if (sequence_length >= max_sequence_length) break;
- if (cur_sequence_length != kNumberOfBits) break;
+ if (cur_sequence_length != kBitsPerLayoutWord) break;
}
}
if (is_tagged && (field_index + sequence_length == capacity())) {
@@ -241,14 +240,15 @@ LayoutDescriptor* LayoutDescriptor::Trim(Heap* heap, Map* map,
DCHECK_LT(kSmiValueSize, layout_descriptor_length);
// Trim, clean and reinitialize this slow-mode layout descriptor.
- int array_length = GetSlowModeBackingStoreLength(layout_descriptor_length);
- int current_length = length();
- if (current_length != array_length) {
- DCHECK_LT(array_length, current_length);
- int delta = current_length - array_length;
+ int new_backing_store_length =
+ GetSlowModeBackingStoreLength(layout_descriptor_length);
+ int backing_store_length = length();
+ if (new_backing_store_length != backing_store_length) {
+ DCHECK_LT(new_backing_store_length, backing_store_length);
+ int delta = backing_store_length - new_backing_store_length;
heap->RightTrimFixedArray(this, delta);
}
- memset(DataPtr(), 0, DataSize());
+ memset(GetDataStartAddress(), 0, DataSize());
LayoutDescriptor* layout_descriptor =
Initialize(this, map, descriptors, num_descriptors);
DCHECK_EQ(this, layout_descriptor);
diff --git a/deps/v8/src/layout-descriptor.h b/deps/v8/src/layout-descriptor.h
index b75536a36f..7f8b311f3c 100644
--- a/deps/v8/src/layout-descriptor.h
+++ b/deps/v8/src/layout-descriptor.h
@@ -21,8 +21,10 @@ namespace internal {
// Otherwise the field is considered tagged. If the queried bit lays "outside"
// of the descriptor then the field is also considered tagged.
// Once a layout descriptor is created it is allowed only to append properties
-// to it.
-class LayoutDescriptor : public FixedTypedArray<Uint32ArrayTraits> {
+// to it. GC uses layout descriptors to iterate objects. Avoid heap pointers
+// in a layout descriptor because they can lead to data races in GC when
+// GC moves objects in parallel.
+class LayoutDescriptor : public ByteArray {
public:
V8_INLINE bool IsTagged(int field_index);
@@ -94,7 +96,10 @@ class LayoutDescriptor : public FixedTypedArray<Uint32ArrayTraits> {
LayoutDescriptor* SetTaggedForTesting(int field_index, bool tagged);
private:
- static const int kNumberOfBits = 32;
+ static const int kBitsPerLayoutWord = 32;
+ int number_of_layout_words() { return length() / kUInt32Size; }
+ uint32_t get_layout_word(int index) const { return get_uint32(index); }
+ void set_layout_word(int index, uint32_t value) { set_uint32(index, value); }
V8_INLINE static Handle<LayoutDescriptor> New(Isolate* isolate, int length);
V8_INLINE static LayoutDescriptor* FromSmi(Smi* smi);
diff --git a/deps/v8/src/libplatform/OWNERS b/deps/v8/src/libplatform/OWNERS
index d691287b2d..8c766ab282 100644
--- a/deps/v8/src/libplatform/OWNERS
+++ b/deps/v8/src/libplatform/OWNERS
@@ -1 +1,3 @@
-jochen@chromium.org
+mlippautz@chromium.org
+
+# COMPONENT: Blink>JavaScript>API
diff --git a/deps/v8/src/libplatform/default-platform.cc b/deps/v8/src/libplatform/default-platform.cc
index 34cda33b43..6245814c74 100644
--- a/deps/v8/src/libplatform/default-platform.cc
+++ b/deps/v8/src/libplatform/default-platform.cc
@@ -13,8 +13,6 @@
#include "src/base/platform/platform.h"
#include "src/base/platform/time.h"
#include "src/base/sys-info.h"
-#include "src/libplatform/tracing/trace-buffer.h"
-#include "src/libplatform/tracing/trace-writer.h"
#include "src/libplatform/worker-thread.h"
namespace v8 {
@@ -31,15 +29,15 @@ void PrintStackTrace() {
} // namespace
-v8::Platform* CreateDefaultPlatform(int thread_pool_size,
- IdleTaskSupport idle_task_support,
- InProcessStackDumping in_process_stack_dumping,
- v8::TracingController* tracing_controller) {
+v8::Platform* CreateDefaultPlatform(
+ int thread_pool_size, IdleTaskSupport idle_task_support,
+ InProcessStackDumping in_process_stack_dumping,
+ v8::TracingController* tracing_controller) {
if (in_process_stack_dumping == InProcessStackDumping::kEnabled) {
v8::base::debug::EnableInProcessStackDumping();
}
DefaultPlatform* platform =
- new DefaultPlatform(idle_task_support, tracing_controller);
+ new DefaultPlatform(idle_task_support, tracing_controller);
platform->SetThreadPoolSize(thread_pool_size);
platform->EnsureInitialized();
return platform;
@@ -65,7 +63,7 @@ void RunIdleTasks(v8::Platform* platform, v8::Isolate* isolate,
void SetTracingController(
v8::Platform* platform,
v8::platform::tracing::TracingController* tracing_controller) {
- return static_cast<DefaultPlatform*>(platform)->SetTracingController(
+ static_cast<DefaultPlatform*>(platform)->SetTracingController(
tracing_controller);
}
@@ -79,11 +77,8 @@ DefaultPlatform::DefaultPlatform(IdleTaskSupport idle_task_support,
if (tracing_controller) {
tracing_controller_.reset(tracing_controller);
} else {
- tracing::TraceWriter* writer = new tracing::NullTraceWriter();
- tracing::TraceBuffer* ring_buffer =
- new tracing::TraceBufferRingBuffer(1, writer);
tracing::TracingController* controller = new tracing::TracingController();
- controller->Initialize(ring_buffer);
+ controller->Initialize(nullptr);
tracing_controller_.reset(controller);
}
}
diff --git a/deps/v8/src/libplatform/tracing/trace-writer.h b/deps/v8/src/libplatform/tracing/trace-writer.h
index 67559f91fe..43d7cb6a90 100644
--- a/deps/v8/src/libplatform/tracing/trace-writer.h
+++ b/deps/v8/src/libplatform/tracing/trace-writer.h
@@ -26,14 +26,6 @@ class JSONTraceWriter : public TraceWriter {
bool append_comma_ = false;
};
-class NullTraceWriter : public TraceWriter {
- public:
- NullTraceWriter() = default;
- ~NullTraceWriter() = default;
- void AppendTraceEvent(TraceObject*) override {}
- void Flush() override {}
-};
-
} // namespace tracing
} // namespace platform
} // namespace v8
diff --git a/deps/v8/src/libplatform/tracing/tracing-controller.cc b/deps/v8/src/libplatform/tracing/tracing-controller.cc
index 4e71f432e8..2fb610ac72 100644
--- a/deps/v8/src/libplatform/tracing/tracing-controller.cc
+++ b/deps/v8/src/libplatform/tracing/tracing-controller.cc
@@ -7,6 +7,7 @@
#include "include/libplatform/v8-tracing.h"
+#include "src/base/atomicops.h"
#include "src/base/platform/mutex.h"
namespace v8 {
@@ -144,11 +145,13 @@ void TracingController::UpdateCategoryGroupEnabledFlag(size_t category_index) {
enabled_flag |= ENABLED_FOR_RECORDING;
}
- g_category_group_enabled[category_index] = enabled_flag;
+ base::Relaxed_Store(reinterpret_cast<base::Atomic8*>(
+ g_category_group_enabled + category_index),
+ enabled_flag);
}
void TracingController::UpdateCategoryGroupEnabledFlags() {
- size_t category_index = base::NoBarrier_Load(&g_category_index);
+ size_t category_index = base::Relaxed_Load(&g_category_index);
for (size_t i = 0; i < category_index; i++) UpdateCategoryGroupEnabledFlag(i);
}
diff --git a/deps/v8/src/libsampler/sampler.cc b/deps/v8/src/libsampler/sampler.cc
index f65498aa60..8b351613e7 100644
--- a/deps/v8/src/libsampler/sampler.cc
+++ b/deps/v8/src/libsampler/sampler.cc
@@ -13,7 +13,7 @@
#include <signal.h>
#include <sys/time.h>
-#if !V8_OS_QNX && !V8_OS_AIX
+#if !V8_OS_QNX && !V8_OS_FUCHSIA && !V8_OS_AIX
#include <sys/syscall.h> // NOLINT
#endif
@@ -415,7 +415,7 @@ void SignalHandler::FillRegisterState(void* context, RegisterState* state) {
#if !(V8_OS_OPENBSD || (V8_OS_LINUX && (V8_HOST_ARCH_PPC || V8_HOST_ARCH_S390)))
mcontext_t& mcontext = ucontext->uc_mcontext;
#endif
-#if V8_OS_LINUX
+#if V8_OS_LINUX || V8_OS_FUCHSIA
#if V8_HOST_ARCH_IA32
state->pc = reinterpret_cast<void*>(mcontext.gregs[REG_EIP]);
state->sp = reinterpret_cast<void*>(mcontext.gregs[REG_ESP]);
@@ -602,7 +602,7 @@ void Sampler::Stop() {
void Sampler::IncreaseProfilingDepth() {
- base::NoBarrier_AtomicIncrement(&profiling_, 1);
+ base::Relaxed_AtomicIncrement(&profiling_, 1);
#if defined(USE_SIGNALS)
SignalHandler::IncreaseSamplerCount();
#endif
@@ -613,7 +613,7 @@ void Sampler::DecreaseProfilingDepth() {
#if defined(USE_SIGNALS)
SignalHandler::DecreaseSamplerCount();
#endif
- base::NoBarrier_AtomicIncrement(&profiling_, -1);
+ base::Relaxed_AtomicIncrement(&profiling_, -1);
}
diff --git a/deps/v8/src/libsampler/sampler.h b/deps/v8/src/libsampler/sampler.h
index 311d577e1e..6ce6798a44 100644
--- a/deps/v8/src/libsampler/sampler.h
+++ b/deps/v8/src/libsampler/sampler.h
@@ -46,24 +46,24 @@ class Sampler {
// Whether the sampling thread should use this Sampler for CPU profiling?
bool IsProfiling() const {
- return base::NoBarrier_Load(&profiling_) > 0 &&
- !base::NoBarrier_Load(&has_processing_thread_);
+ return base::Relaxed_Load(&profiling_) > 0 &&
+ !base::Relaxed_Load(&has_processing_thread_);
}
void IncreaseProfilingDepth();
void DecreaseProfilingDepth();
// Whether the sampler is running (that is, consumes resources).
- bool IsActive() const { return base::NoBarrier_Load(&active_) != 0; }
+ bool IsActive() const { return base::Relaxed_Load(&active_) != 0; }
// CpuProfiler collects samples by calling DoSample directly
// without calling Start. To keep it working, we register the sampler
// with the CpuProfiler.
- bool IsRegistered() const { return base::NoBarrier_Load(&registered_) != 0; }
+ bool IsRegistered() const { return base::Relaxed_Load(&registered_) != 0; }
void DoSample();
void SetHasProcessingThread(bool value) {
- base::NoBarrier_Store(&has_processing_thread_, value);
+ base::Relaxed_Store(&has_processing_thread_, value);
}
// Used in tests to make sure that stack sampling is performed.
@@ -85,8 +85,8 @@ class Sampler {
unsigned external_sample_count_;
private:
- void SetActive(bool value) { base::NoBarrier_Store(&active_, value); }
- void SetRegistered(bool value) { base::NoBarrier_Store(&registered_, value); }
+ void SetActive(bool value) { base::Relaxed_Store(&active_, value); }
+ void SetRegistered(bool value) { base::Relaxed_Store(&registered_, value); }
Isolate* isolate_;
base::Atomic32 profiling_;
diff --git a/deps/v8/src/log-utils.cc b/deps/v8/src/log-utils.cc
index 462f83f534..7c3de0c6ca 100644
--- a/deps/v8/src/log-utils.cc
+++ b/deps/v8/src/log-utils.cc
@@ -164,7 +164,7 @@ void Log::MessageBuilder::Append(String* str) {
}
void Log::MessageBuilder::AppendAddress(Address addr) {
- Append("%p", static_cast<void*>(addr));
+ Append("0x%" V8PRIxPTR, reinterpret_cast<intptr_t>(addr));
}
void Log::MessageBuilder::AppendSymbolName(Symbol* symbol) {
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index 7f029d87b5..83ca726c59 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -370,8 +370,6 @@ void LowLevelLogger::LogCodeInfo() {
const char arch[] = "ppc";
#elif V8_TARGET_ARCH_MIPS
const char arch[] = "mips";
-#elif V8_TARGET_ARCH_X87
- const char arch[] = "x87";
#elif V8_TARGET_ARCH_ARM64
const char arch[] = "arm64";
#elif V8_TARGET_ARCH_S390
@@ -559,7 +557,7 @@ class Profiler: public base::Thread {
if (paused_)
return;
- if (Succ(head_) == static_cast<int>(base::NoBarrier_Load(&tail_))) {
+ if (Succ(head_) == static_cast<int>(base::Relaxed_Load(&tail_))) {
overflow_ = true;
} else {
buffer_[head_] = *sample;
@@ -578,10 +576,10 @@ class Profiler: public base::Thread {
// Waits for a signal and removes profiling data.
bool Remove(v8::TickSample* sample) {
buffer_semaphore_.Wait(); // Wait for an element.
- *sample = buffer_[base::NoBarrier_Load(&tail_)];
+ *sample = buffer_[base::Relaxed_Load(&tail_)];
bool result = overflow_;
- base::NoBarrier_Store(&tail_, static_cast<base::Atomic32>(
- Succ(base::NoBarrier_Load(&tail_))));
+ base::Relaxed_Store(
+ &tail_, static_cast<base::Atomic32>(Succ(base::Relaxed_Load(&tail_))));
overflow_ = false;
return result;
}
@@ -667,8 +665,8 @@ Profiler::Profiler(Isolate* isolate)
buffer_semaphore_(0),
engaged_(false),
paused_(false) {
- base::NoBarrier_Store(&tail_, 0);
- base::NoBarrier_Store(&running_, 0);
+ base::Relaxed_Store(&tail_, 0);
+ base::Relaxed_Store(&running_, 0);
}
@@ -685,7 +683,7 @@ void Profiler::Engage() {
}
// Start thread processing the profiler buffer.
- base::NoBarrier_Store(&running_, 1);
+ base::Relaxed_Store(&running_, 1);
Start();
// Register to get ticks.
@@ -705,7 +703,7 @@ void Profiler::Disengage() {
// Terminate the worker thread by setting running_ to false,
// inserting a fake element in the queue and then wait for
// the thread to terminate.
- base::NoBarrier_Store(&running_, 0);
+ base::Relaxed_Store(&running_, 0);
v8::TickSample sample;
// Reset 'paused_' flag, otherwise semaphore may not be signalled.
resume();
@@ -719,7 +717,7 @@ void Profiler::Disengage() {
void Profiler::Run() {
v8::TickSample sample;
bool overflow = Remove(&sample);
- while (base::NoBarrier_Load(&running_)) {
+ while (base::Relaxed_Load(&running_)) {
LOG(isolate_, TickEvent(&sample, overflow));
overflow = Remove(&sample);
}
@@ -1349,7 +1347,7 @@ void Logger::ICEvent(const char* type, bool keyed, const Address pc, int line,
msg.AppendAddress(reinterpret_cast<Address>(map));
msg.Append(",");
if (key->IsSmi()) {
- msg.Append("%d", Smi::cast(key)->value());
+ msg.Append("%d", Smi::ToInt(key));
} else if (key->IsNumber()) {
msg.Append("%lf", key->Number());
} else if (key->IsString()) {
@@ -1380,34 +1378,6 @@ void Logger::CompareIC(const Address pc, int line, int column, Code* stub,
msg.WriteToLogFile();
}
-void Logger::BinaryOpIC(const Address pc, int line, int column, Code* stub,
- const char* old_state, const char* new_state,
- AllocationSite* allocation_site) {
- if (!log_->IsEnabled() || !FLAG_trace_ic) return;
- Log::MessageBuilder msg(log_);
- msg.Append("BinaryOpIC,");
- msg.AppendAddress(pc);
- msg.Append(",%d,%d,", line, column);
- msg.AppendAddress(reinterpret_cast<Address>(stub));
- msg.Append(",%s,%s,", old_state, new_state);
- if (allocation_site != nullptr) {
- msg.AppendAddress(reinterpret_cast<Address>(allocation_site));
- }
- msg.WriteToLogFile();
-}
-
-void Logger::ToBooleanIC(const Address pc, int line, int column, Code* stub,
- const char* old_state, const char* new_state) {
- if (!log_->IsEnabled() || !FLAG_trace_ic) return;
- Log::MessageBuilder msg(log_);
- msg.Append("ToBooleanIC,");
- msg.AppendAddress(pc);
- msg.Append(",%d,%d,", line, column);
- msg.AppendAddress(reinterpret_cast<Address>(stub));
- msg.Append(",%s,%s,", old_state, new_state);
- msg.WriteToLogFile();
-}
-
void Logger::PatchIC(const Address pc, const Address test, int delta) {
if (!log_->IsEnabled() || !FLAG_trace_ic) return;
Log::MessageBuilder msg(log_);
@@ -1526,9 +1496,7 @@ void Logger::LogCodeObject(Object* object) {
return; // We log this later using LogCompiledFunctions.
case AbstractCode::BYTECODE_HANDLER:
return; // We log it later by walking the dispatch table.
- case AbstractCode::BINARY_OP_IC: // fall through
case AbstractCode::COMPARE_IC: // fall through
- case AbstractCode::TO_BOOLEAN_IC: // fall through
case AbstractCode::STUB:
description =
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index 0ed0580c37..46b5d3789c 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -196,11 +196,6 @@ class Logger : public CodeEventListener {
const char* op, const char* old_left, const char* old_right,
const char* old_state, const char* new_left,
const char* new_right, const char* new_state);
- void BinaryOpIC(const Address pc, int line, int column, Code* stub,
- const char* old_state, const char* new_state,
- AllocationSite* allocation_site);
- void ToBooleanIC(const Address pc, int line, int column, Code* stub,
- const char* old_state, const char* new_state);
void PatchIC(const Address pc, const Address test, int delta);
// ==== Events logged by --log-gc. ====
diff --git a/deps/v8/src/lookup.cc b/deps/v8/src/lookup.cc
index cbbdd05079..9d66987435 100644
--- a/deps/v8/src/lookup.cc
+++ b/deps/v8/src/lookup.cc
@@ -208,7 +208,7 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
if (IsElement()) {
ElementsKind kind = holder->GetElementsKind();
ElementsKind to = value->OptimalElementsKind();
- if (IsHoleyElementsKind(kind)) to = GetHoleyElementsKind(to);
+ if (IsHoleyOrDictionaryElementsKind(kind)) to = GetHoleyElementsKind(to);
to = GetMoreGeneralElementsKind(kind, to);
if (kind != to) {
@@ -216,17 +216,16 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
}
// Copy the backing store if it is copy-on-write.
- if (IsFastSmiOrObjectElementsKind(to)) {
+ if (IsSmiOrObjectElementsKind(to)) {
JSObject::EnsureWritableFastElements(holder);
}
return;
}
if (holder->IsJSGlobalObject()) {
- Handle<GlobalDictionary> dictionary(holder->global_dictionary());
- Handle<PropertyCell> cell(
- PropertyCell::cast(dictionary->ValueAt(dictionary_entry())));
- DCHECK(!cell->IsTheHole(isolate_));
+ Handle<GlobalDictionary> dictionary(
+ JSGlobalObject::cast(*holder)->global_dictionary());
+ Handle<PropertyCell> cell(dictionary->CellAt(dictionary_entry()));
property_details_ = cell->property_details();
PropertyCell::PrepareForValue(dictionary, dictionary_entry(), value,
property_details_);
@@ -289,9 +288,10 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
}
if (!IsElement() && !holder->HasFastProperties()) {
- PropertyDetails details(kData, attributes, 0, PropertyCellType::kMutable);
+ PropertyDetails details(kData, attributes, PropertyCellType::kMutable);
if (holder->IsJSGlobalObject()) {
- Handle<GlobalDictionary> dictionary(holder->global_dictionary());
+ Handle<GlobalDictionary> dictionary(
+ JSGlobalObject::cast(*holder)->global_dictionary());
Handle<PropertyCell> cell = PropertyCell::PrepareForValue(
dictionary, dictionary_entry(), value, details);
@@ -304,7 +304,7 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
int enumeration_index = original_details.dictionary_index();
DCHECK(enumeration_index > 0);
details = details.set_index(enumeration_index);
- dictionary->SetEntry(dictionary_entry(), name(), value, details);
+ dictionary->SetEntry(dictionary_entry(), *name(), *value, details);
property_details_ = details;
}
state_ = DATA;
@@ -357,8 +357,8 @@ void LookupIterator::PrepareTransitionToDataProperty(
// SetNextEnumerationIndex.
int index = dictionary->NextEnumerationIndex();
dictionary->SetNextEnumerationIndex(index + 1);
- property_details_ = PropertyDetails(kData, attributes, index,
- PropertyCellType::kUninitialized);
+ property_details_ = PropertyDetails(
+ kData, attributes, PropertyCellType::kUninitialized, index);
PropertyCellType new_type =
PropertyCell::UpdatedType(cell, value, property_details_);
property_details_ = property_details_.set_cell_type(new_type);
@@ -368,7 +368,7 @@ void LookupIterator::PrepareTransitionToDataProperty(
} else {
// Don't set enumeration index (it will be set during value store).
property_details_ =
- PropertyDetails(kData, attributes, 0, PropertyCellType::kNoCell);
+ PropertyDetails(kData, attributes, PropertyCellType::kNoCell);
transition_ = map;
}
return;
@@ -382,7 +382,7 @@ void LookupIterator::PrepareTransitionToDataProperty(
if (transition->is_dictionary_map()) {
// Don't set enumeration index (it will be set during value store).
property_details_ =
- PropertyDetails(kData, attributes, 0, PropertyCellType::kNoCell);
+ PropertyDetails(kData, attributes, PropertyCellType::kNoCell);
} else {
property_details_ = transition->GetLastDescriptorDetails();
has_property_ = true;
@@ -414,7 +414,7 @@ void LookupIterator::ApplyTransitionToDataProperty(Handle<JSObject> receiver) {
dictionary = NameDictionary::Add(dictionary, name(),
isolate_->factory()->uninitialized_value(),
property_details_, &entry);
- receiver->set_properties(*dictionary);
+ receiver->SetProperties(*dictionary);
// Reload details containing proper enumeration index value.
property_details_ = dictionary->DetailsAt(entry);
number_ = entry;
@@ -448,8 +448,7 @@ void LookupIterator::Delete() {
"DeletingProperty");
ReloadPropertyInformation<false>();
}
- // TODO(verwaest): Get rid of the name_ argument.
- JSReceiver::DeleteNormalizedProperty(holder, name_, number_);
+ JSReceiver::DeleteNormalizedProperty(holder, number_);
if (holder->IsJSObject()) {
JSObject::ReoptimizeIfPrototype(Handle<JSObject>::cast(holder));
}
@@ -531,15 +530,15 @@ void LookupIterator::TransitionToAccessorPair(Handle<Object> pair,
Handle<JSObject> receiver = GetStoreTarget();
holder_ = receiver;
- PropertyDetails details(kAccessor, attributes, 0, PropertyCellType::kMutable);
+ PropertyDetails details(kAccessor, attributes, PropertyCellType::kMutable);
if (IsElement()) {
// TODO(verwaest): Move code into the element accessor.
Handle<SeededNumberDictionary> dictionary =
JSObject::NormalizeElements(receiver);
- dictionary = SeededNumberDictionary::Set(dictionary, index_, pair, details,
- receiver);
+ dictionary = SeededNumberDictionary::Set(dictionary, index_, pair, receiver,
+ details);
receiver->RequireSlowElements(*dictionary);
if (receiver->HasSlowArgumentsElements()) {
@@ -599,10 +598,8 @@ Handle<Object> LookupIterator::FetchValue() const {
ElementsAccessor* accessor = holder->GetElementsAccessor();
return accessor->Get(holder, number_);
} else if (holder_->IsJSGlobalObject()) {
- Handle<JSObject> holder = GetHolder<JSObject>();
+ Handle<JSGlobalObject> holder = GetHolder<JSGlobalObject>();
result = holder->global_dictionary()->ValueAt(number_);
- DCHECK(result->IsPropertyCell());
- result = PropertyCell::cast(result)->value();
} else if (!holder_->HasFastProperties()) {
result = holder_->property_dictionary()->ValueAt(number_);
} else if (property_details_.location() == kField) {
@@ -711,9 +708,8 @@ Handle<FieldType> LookupIterator::GetFieldType() const {
Handle<PropertyCell> LookupIterator::GetPropertyCell() const {
DCHECK(!IsElement());
Handle<JSGlobalObject> holder = GetHolder<JSGlobalObject>();
- Object* value = holder->global_dictionary()->ValueAt(dictionary_entry());
- DCHECK(value->IsPropertyCell());
- return handle(PropertyCell::cast(value), isolate_);
+ return handle(holder->global_dictionary()->CellAt(dictionary_entry()),
+ isolate_);
}
@@ -751,10 +747,9 @@ void LookupIterator::WriteDataValue(Handle<Object> value,
DCHECK_EQ(kConst, property_details_.constness());
}
} else if (holder->IsJSGlobalObject()) {
- GlobalDictionary* dictionary = JSObject::cast(*holder)->global_dictionary();
- Object* cell = dictionary->ValueAt(dictionary_entry());
- DCHECK(cell->IsPropertyCell());
- PropertyCell::cast(cell)->set_value(*value);
+ GlobalDictionary* dictionary =
+ JSGlobalObject::cast(*holder)->global_dictionary();
+ dictionary->CellAt(dictionary_entry())->set_value(*value);
} else {
NameDictionary* dictionary = holder->property_dictionary();
dictionary->ValueAtPut(dictionary_entry(), *value);
@@ -831,12 +826,12 @@ LookupIterator::State LookupIterator::LookupInSpecialHolder(
// Fall through.
case INTERCEPTOR:
if (!is_element && map->IsJSGlobalObjectMap()) {
- GlobalDictionary* dict = JSObject::cast(holder)->global_dictionary();
+ GlobalDictionary* dict =
+ JSGlobalObject::cast(holder)->global_dictionary();
int number = dict->FindEntry(name_);
if (number == GlobalDictionary::kNotFound) return NOT_FOUND;
number_ = static_cast<uint32_t>(number);
- DCHECK(dict->ValueAt(number_)->IsPropertyCell());
- PropertyCell* cell = PropertyCell::cast(dict->ValueAt(number_));
+ PropertyCell* cell = dict->CellAt(number_);
if (cell->value()->IsTheHole(isolate_)) return NOT_FOUND;
property_details_ = cell->property_details();
has_property_ = true;
@@ -857,7 +852,6 @@ LookupIterator::State LookupIterator::LookupInSpecialHolder(
UNREACHABLE();
}
UNREACHABLE();
- return NOT_FOUND;
}
template <bool is_element>
@@ -900,7 +894,6 @@ LookupIterator::State LookupIterator::LookupInRegularHolder(
}
UNREACHABLE();
- return state_;
}
Handle<InterceptorInfo> LookupIterator::GetInterceptorForFailedAccessCheck()
diff --git a/deps/v8/src/machine-type.cc b/deps/v8/src/machine-type.cc
index ba555dd36a..1761aa10cf 100644
--- a/deps/v8/src/machine-type.cc
+++ b/deps/v8/src/machine-type.cc
@@ -32,12 +32,6 @@ const char* MachineReprToString(MachineRepresentation rep) {
return "kRepFloat64";
case MachineRepresentation::kSimd128:
return "kRepSimd128";
- case MachineRepresentation::kSimd1x4:
- return "kRepSimd1x4";
- case MachineRepresentation::kSimd1x8:
- return "kRepSimd1x8";
- case MachineRepresentation::kSimd1x16:
- return "kRepSimd1x16";
case MachineRepresentation::kTaggedSigned:
return "kRepTaggedSigned";
case MachineRepresentation::kTaggedPointer:
@@ -46,7 +40,6 @@ const char* MachineReprToString(MachineRepresentation rep) {
return "kRepTagged";
}
UNREACHABLE();
- return nullptr;
}
std::ostream& operator<<(std::ostream& os, MachineSemantic type) {
@@ -69,7 +62,6 @@ std::ostream& operator<<(std::ostream& os, MachineSemantic type) {
return os << "kTypeAny";
}
UNREACHABLE();
- return os;
}
diff --git a/deps/v8/src/machine-type.h b/deps/v8/src/machine-type.h
index 1f87cf297b..fddc11cb19 100644
--- a/deps/v8/src/machine-type.h
+++ b/deps/v8/src/machine-type.h
@@ -29,11 +29,8 @@ enum class MachineRepresentation {
kFloat32,
kFloat64,
kSimd128,
- kSimd1x4, // SIMD boolean vector types.
- kSimd1x8,
- kSimd1x16,
kFirstFPRepresentation = kFloat32,
- kLastRepresentation = kSimd1x16
+ kLastRepresentation = kSimd128
};
static_assert(static_cast<int>(MachineRepresentation::kLastRepresentation) <
@@ -130,16 +127,6 @@ class MachineType {
static MachineType Simd128() {
return MachineType(MachineRepresentation::kSimd128, MachineSemantic::kNone);
}
- static MachineType Simd1x4() {
- return MachineType(MachineRepresentation::kSimd1x4, MachineSemantic::kNone);
- }
- static MachineType Simd1x8() {
- return MachineType(MachineRepresentation::kSimd1x8, MachineSemantic::kNone);
- }
- static MachineType Simd1x16() {
- return MachineType(MachineRepresentation::kSimd1x16,
- MachineSemantic::kNone);
- }
static MachineType Pointer() {
return MachineType(PointerRepresentation(), MachineSemantic::kNone);
}
@@ -186,16 +173,6 @@ class MachineType {
static MachineType RepSimd128() {
return MachineType(MachineRepresentation::kSimd128, MachineSemantic::kNone);
}
- static MachineType RepSimd1x4() {
- return MachineType(MachineRepresentation::kSimd1x4, MachineSemantic::kNone);
- }
- static MachineType RepSimd1x8() {
- return MachineType(MachineRepresentation::kSimd1x8, MachineSemantic::kNone);
- }
- static MachineType RepSimd1x16() {
- return MachineType(MachineRepresentation::kSimd1x16,
- MachineSemantic::kNone);
- }
static MachineType RepTagged() {
return MachineType(MachineRepresentation::kTagged, MachineSemantic::kNone);
}
@@ -224,12 +201,6 @@ class MachineType {
return MachineType::Float64();
case MachineRepresentation::kSimd128:
return MachineType::Simd128();
- case MachineRepresentation::kSimd1x4:
- return MachineType::Simd1x4();
- case MachineRepresentation::kSimd1x8:
- return MachineType::Simd1x8();
- case MachineRepresentation::kSimd1x16:
- return MachineType::Simd1x16();
case MachineRepresentation::kTagged:
return MachineType::AnyTagged();
case MachineRepresentation::kTaggedSigned:
@@ -238,7 +209,6 @@ class MachineType {
return MachineType::TaggedPointer();
default:
UNREACHABLE();
- return MachineType::None();
}
}
@@ -303,7 +273,6 @@ V8_EXPORT_PRIVATE inline int ElementSizeLog2Of(MachineRepresentation rep) {
break;
}
UNREACHABLE();
- return -1;
}
typedef Signature<MachineType> MachineSignature;
diff --git a/deps/v8/src/macro-assembler.h b/deps/v8/src/macro-assembler.h
index 77a402d5ca..c9600e03f6 100644
--- a/deps/v8/src/macro-assembler.h
+++ b/deps/v8/src/macro-assembler.h
@@ -28,10 +28,6 @@ enum AllocationFlags {
DOUBLE_ALIGNMENT = 1 << 2,
// Directly allocate in old space
PRETENURE = 1 << 3,
- // Allocation folding dominator
- ALLOCATION_FOLDING_DOMINATOR = 1 << 4,
- // Folded allocation
- ALLOCATION_FOLDED = 1 << 5
};
#if V8_TARGET_ARCH_IA32
@@ -56,8 +52,6 @@ enum AllocationFlags {
#elif V8_TARGET_ARCH_S390
#include "src/s390/constants-s390.h"
#include "src/s390/macro-assembler-s390.h"
-#elif V8_TARGET_ARCH_X87
-#include "src/x87/macro-assembler-x87.h"
#else
#error Unsupported target architecture.
#endif
@@ -70,19 +64,19 @@ static constexpr int kMaxCParameters = 9;
class FrameScope {
public:
- explicit FrameScope(MacroAssembler* masm, StackFrame::Type type)
- : masm_(masm), type_(type), old_has_frame_(masm->has_frame()) {
- masm->set_has_frame(true);
+ explicit FrameScope(TurboAssembler* tasm, StackFrame::Type type)
+ : tasm_(tasm), type_(type), old_has_frame_(tasm->has_frame()) {
+ tasm->set_has_frame(true);
if (type != StackFrame::MANUAL && type_ != StackFrame::NONE) {
- masm->EnterFrame(type);
+ tasm->EnterFrame(type);
}
}
~FrameScope() {
if (type_ != StackFrame::MANUAL && type_ != StackFrame::NONE) {
- masm_->LeaveFrame(type_);
+ tasm_->LeaveFrame(type_);
}
- masm_->set_has_frame(old_has_frame_);
+ tasm_->set_has_frame(old_has_frame_);
}
// Normally we generate the leave-frame code when this object goes
@@ -92,11 +86,11 @@ class FrameScope {
// the code will be generated again when it goes out of scope.
void GenerateLeaveFrame() {
DCHECK(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
- masm_->LeaveFrame(type_);
+ tasm_->LeaveFrame(type_);
}
private:
- MacroAssembler* masm_;
+ TurboAssembler* tasm_;
StackFrame::Type type_;
bool old_has_frame_;
};
diff --git a/deps/v8/src/managed.h b/deps/v8/src/managed.h
index d073cc558b..ebbfe33c16 100644
--- a/deps/v8/src/managed.h
+++ b/deps/v8/src/managed.h
@@ -20,11 +20,21 @@ namespace internal {
// address is typed as CppType**. The double indirection is due to the
// use, by Managed, of Isolate::ManagedObjectFinalizer, which has a CppType*
// first field.
+// Calling Foreign::set_foreign_address is not allowed on a Managed object.
template <class CppType>
class Managed : public Foreign {
+ class FinalizerWithHandle : public Isolate::ManagedObjectFinalizer {
+ public:
+ FinalizerWithHandle(void* value,
+ Isolate::ManagedObjectFinalizer::Deleter deleter)
+ : Isolate::ManagedObjectFinalizer(value, deleter) {}
+
+ Object** global_handle_location;
+ };
+
public:
V8_INLINE CppType* get() {
- return *(reinterpret_cast<CppType**>(foreign_address()));
+ return reinterpret_cast<CppType*>(GetFinalizer()->value());
}
static Managed<CppType>* cast(Object* obj) {
@@ -33,46 +43,42 @@ class Managed : public Foreign {
}
static Handle<Managed<CppType>> New(Isolate* isolate, CppType* ptr) {
- Isolate::ManagedObjectFinalizer* node =
- isolate->RegisterForReleaseAtTeardown(ptr,
- Managed<CppType>::NativeDelete);
+ FinalizerWithHandle* finalizer =
+ new FinalizerWithHandle(ptr, &NativeDelete);
+ isolate->RegisterForReleaseAtTeardown(finalizer);
Handle<Managed<CppType>> handle = Handle<Managed<CppType>>::cast(
- isolate->factory()->NewForeign(reinterpret_cast<Address>(node)));
- RegisterWeakCallbackForDelete(isolate, handle);
+ isolate->factory()->NewForeign(reinterpret_cast<Address>(finalizer)));
+ Handle<Object> global_handle = isolate->global_handles()->Create(*handle);
+ finalizer->global_handle_location = global_handle.location();
+ GlobalHandles::MakeWeak(finalizer->global_handle_location,
+ handle->GetFinalizer(), &Managed<CppType>::GCDelete,
+ v8::WeakCallbackType::kParameter);
+
return handle;
}
private:
- static void RegisterWeakCallbackForDelete(Isolate* isolate,
- Handle<Managed<CppType>> handle) {
- Handle<Object> global_handle = isolate->global_handles()->Create(*handle);
- GlobalHandles::MakeWeak(global_handle.location(), global_handle.location(),
- &Managed<CppType>::GCDelete,
- v8::WeakCallbackType::kFinalizer);
- }
-
static void GCDelete(const v8::WeakCallbackInfo<void>& data) {
- Managed<CppType>** p =
- reinterpret_cast<Managed<CppType>**>(data.GetParameter());
-
- Isolate::ManagedObjectFinalizer* finalizer = (*p)->GetFinalizer();
+ FinalizerWithHandle* finalizer =
+ reinterpret_cast<FinalizerWithHandle*>(data.GetParameter());
Isolate* isolate = reinterpret_cast<Isolate*>(data.GetIsolate());
- finalizer->Dispose();
- isolate->UnregisterFromReleaseAtTeardown(&finalizer);
+ isolate->UnregisterFromReleaseAtTeardown(finalizer);
- (*p)->set_foreign_address(static_cast<Address>(nullptr));
- GlobalHandles::Destroy(reinterpret_cast<Object**>(p));
+ GlobalHandles::Destroy(finalizer->global_handle_location);
+ NativeDelete(finalizer);
}
- static void NativeDelete(void* value) {
- CppType* typed_value = reinterpret_cast<CppType*>(value);
+ static void NativeDelete(Isolate::ManagedObjectFinalizer* finalizer) {
+ CppType* typed_value = reinterpret_cast<CppType*>(finalizer->value());
delete typed_value;
+ FinalizerWithHandle* finalizer_with_handle =
+ static_cast<FinalizerWithHandle*>(finalizer);
+ delete finalizer_with_handle;
}
- Isolate::ManagedObjectFinalizer* GetFinalizer() {
- return reinterpret_cast<Isolate::ManagedObjectFinalizer*>(
- foreign_address());
+ FinalizerWithHandle* GetFinalizer() {
+ return reinterpret_cast<FinalizerWithHandle*>(foreign_address());
}
};
} // namespace internal
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index b71b9afce4..3f29295bd8 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -148,7 +148,7 @@ void MessageHandler::ReportMessageNoExceptions(
FixedArray* listener = FixedArray::cast(global_listeners->get(i));
Foreign* callback_obj = Foreign::cast(listener->get(0));
int32_t message_levels =
- static_cast<int32_t>(Smi::cast(listener->get(2))->value());
+ static_cast<int32_t>(Smi::ToInt(listener->get(2)));
if (!(message_levels & error_level)) {
continue;
}
@@ -302,7 +302,7 @@ void JSStackFrame::FromFrameArray(Isolate* isolate, Handle<FrameArray> array,
offset_ = array->Offset(frame_ix)->value();
const int flags = array->Flags(frame_ix)->value();
- force_constructor_ = (flags & FrameArray::kForceConstructor) != 0;
+ is_constructor_ = (flags & FrameArray::kIsConstructor) != 0;
is_strict_ = (flags & FrameArray::kIsStrict) != 0;
}
@@ -316,7 +316,7 @@ JSStackFrame::JSStackFrame(Isolate* isolate, Handle<Object> receiver,
function_(function),
code_(code),
offset_(offset),
- force_constructor_(false),
+ is_constructor_(false),
is_strict_(false) {}
Handle<Object> JSStackFrame::GetFunction() const {
@@ -383,19 +383,16 @@ Handle<Object> JSStackFrame::GetMethodName() {
}
Handle<JSObject> obj = Handle<JSObject>::cast(receiver);
- Handle<Object> function_name(function_->shared()->name(), isolate_);
- if (function_name->IsString()) {
- Handle<String> name = Handle<String>::cast(function_name);
- // ES2015 gives getters and setters name prefixes which must
- // be stripped to find the property name.
- if (name->IsUtf8EqualTo(CStrVector("get "), true) ||
- name->IsUtf8EqualTo(CStrVector("set "), true)) {
- name = isolate_->factory()->NewProperSubString(name, 4, name->length());
- }
- if (CheckMethodName(isolate_, obj, name, function_,
- LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR)) {
- return name;
- }
+ Handle<String> name(function_->shared()->name(), isolate_);
+ // ES2015 gives getters and setters name prefixes which must
+ // be stripped to find the property name.
+ if (name->IsUtf8EqualTo(CStrVector("get "), true) ||
+ name->IsUtf8EqualTo(CStrVector("set "), true)) {
+ name = isolate_->factory()->NewProperSubString(name, 4, name->length());
+ }
+ if (CheckMethodName(isolate_, obj, name, function_,
+ LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR)) {
+ return name;
}
HandleScope outer_scope(isolate_);
@@ -461,15 +458,6 @@ bool JSStackFrame::IsToplevel() {
return receiver_->IsJSGlobalProxy() || receiver_->IsNullOrUndefined(isolate_);
}
-bool JSStackFrame::IsConstructor() {
- if (force_constructor_) return true;
- if (!receiver_->IsJSObject()) return false;
- Handle<Object> constructor =
- JSReceiver::GetDataProperty(Handle<JSObject>::cast(receiver_),
- isolate_->factory()->constructor_string());
- return constructor.is_identical_to(function_);
-}
-
namespace {
bool IsNonEmptyString(Handle<Object> object) {
@@ -656,16 +644,16 @@ void WasmStackFrame::FromFrameArray(Isolate* isolate, Handle<FrameArray> array,
offset_ = array->Offset(frame_ix)->value();
}
+Handle<Object> WasmStackFrame::GetReceiver() const { return wasm_instance_; }
+
Handle<Object> WasmStackFrame::GetFunction() const {
- Handle<Object> obj(Smi::FromInt(wasm_func_index_), isolate_);
- return obj;
+ return handle(Smi::FromInt(wasm_func_index_), isolate_);
}
Handle<Object> WasmStackFrame::GetFunctionName() {
Handle<Object> name;
- Handle<WasmCompiledModule> compiled_module(
- Handle<WasmInstanceObject>::cast(wasm_instance_)->compiled_module(),
- isolate_);
+ Handle<WasmCompiledModule> compiled_module(wasm_instance_->compiled_module(),
+ isolate_);
if (!WasmCompiledModule::GetFunctionNameOrNull(isolate_, compiled_module,
wasm_func_index_)
.ToHandle(&name)) {
@@ -677,25 +665,36 @@ Handle<Object> WasmStackFrame::GetFunctionName() {
MaybeHandle<String> WasmStackFrame::ToString() {
IncrementalStringBuilder builder(isolate_);
- Handle<Object> name = GetFunctionName();
- if (name->IsNull(isolate_)) {
- builder.AppendCString("<WASM UNNAMED>");
- } else {
- DCHECK(name->IsString());
- builder.AppendString(Handle<String>::cast(name));
+ Handle<WasmCompiledModule> compiled_module(wasm_instance_->compiled_module(),
+ isolate_);
+ MaybeHandle<String> module_name =
+ WasmCompiledModule::GetModuleNameOrNull(isolate_, compiled_module);
+ MaybeHandle<String> function_name = WasmCompiledModule::GetFunctionNameOrNull(
+ isolate_, compiled_module, wasm_func_index_);
+ bool has_name = !module_name.is_null() || !function_name.is_null();
+ if (has_name) {
+ if (module_name.is_null()) {
+ builder.AppendString(function_name.ToHandleChecked());
+ } else {
+ builder.AppendString(module_name.ToHandleChecked());
+ if (!function_name.is_null()) {
+ builder.AppendCString(".");
+ builder.AppendString(function_name.ToHandleChecked());
+ }
+ }
+ builder.AppendCString(" (");
}
- builder.AppendCString(" (<WASM>[");
+ builder.AppendCString("wasm-function[");
char buffer[16];
- SNPrintF(ArrayVector(buffer), "%u", wasm_func_index_);
+ SNPrintF(ArrayVector(buffer), "%u]", wasm_func_index_);
builder.AppendCString(buffer);
- builder.AppendCString("]+");
-
- SNPrintF(ArrayVector(buffer), "%d", GetPosition());
+ SNPrintF(ArrayVector(buffer), ":%d", GetPosition());
builder.AppendCString(buffer);
- builder.AppendCString(")");
+
+ if (has_name) builder.AppendCString(")");
return builder.Finish();
}
@@ -831,7 +830,6 @@ StackFrameBase* FrameArrayIterator::Frame() {
return &asm_wasm_frame_;
default:
UNREACHABLE();
- return nullptr;
}
}
diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h
index 7df7288662..c3cadba355 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/messages.h
@@ -25,6 +25,7 @@ class JSMessageObject;
class LookupIterator;
class SharedFunctionInfo;
class SourceInfo;
+class WasmInstanceObject;
class MessageLocation {
public:
@@ -105,7 +106,7 @@ class JSStackFrame : public StackFrameBase {
bool IsNative() override;
bool IsToplevel() override;
- bool IsConstructor() override;
+ bool IsConstructor() override { return is_constructor_; }
bool IsStrict() const override { return is_strict_; }
MaybeHandle<String> ToString() override;
@@ -122,7 +123,7 @@ class JSStackFrame : public StackFrameBase {
Handle<AbstractCode> code_;
int offset_;
- bool force_constructor_;
+ bool is_constructor_;
bool is_strict_;
friend class FrameArrayIterator;
@@ -132,7 +133,7 @@ class WasmStackFrame : public StackFrameBase {
public:
virtual ~WasmStackFrame() {}
- Handle<Object> GetReceiver() const override { return wasm_instance_; }
+ Handle<Object> GetReceiver() const override;
Handle<Object> GetFunction() const override;
Handle<Object> GetFileName() override { return Null(); }
@@ -159,8 +160,7 @@ class WasmStackFrame : public StackFrameBase {
bool HasScript() const override;
Handle<Script> GetScript() const override;
- // TODO(wasm): Use proper typing.
- Handle<Object> wasm_instance_;
+ Handle<WasmInstanceObject> wasm_instance_;
uint32_t wasm_func_index_;
Handle<AbstractCode> code_; // null handle for interpreted frames.
int offset_;
@@ -330,7 +330,9 @@ class ErrorUtils : public AllStatic {
T(NoAccess, "no access") \
T(NonCallableInInstanceOfCheck, \
"Right-hand side of 'instanceof' is not callable") \
- T(NonCoercible, "Cannot match against 'undefined' or 'null'.") \
+ T(NonCoercible, "Cannot destructure 'undefined' or 'null'.") \
+ T(NonCoercibleWithProperty, \
+ "Cannot destructure property `%` of 'undefined' or 'null'.") \
T(NonExtensibleProto, "% is not extensible") \
T(NonObjectInInstanceOfCheck, \
"Right-hand side of 'instanceof' is not an object") \
@@ -343,6 +345,7 @@ class ErrorUtils : public AllStatic {
T(NotDateObject, "this is not a Date object.") \
T(NotGeneric, "% requires that 'this' be a %") \
T(NotIterable, "% is not iterable") \
+ T(NotAsyncIterable, "% is not async iterable") \
T(NotPropertyName, "% is not a valid property name") \
T(NotTypedArray, "this is not a typed array.") \
T(NotSuperConstructor, "Super constructor % of % is not a constructor") \
@@ -461,9 +464,6 @@ class ErrorUtils : public AllStatic {
T(RegExpNonObject, "% getter called on non-object %") \
T(RegExpNonRegExp, "% getter called on non-RegExp object") \
T(ResolverNotAFunction, "Promise resolver % is not a function") \
- T(RestrictedFunctionProperties, \
- "'caller' and 'arguments' are restricted function properties and cannot " \
- "be accessed in this context.") \
T(ReturnMethodNotCallable, "The iterator's 'return' method is not callable") \
T(SharedArrayBufferTooShort, \
"Derived SharedArrayBuffer constructor created a buffer which was too " \
@@ -566,6 +566,11 @@ class ErrorUtils : public AllStatic {
T(IllegalLanguageModeDirective, \
"Illegal '%' directive in function with non-simple parameter list") \
T(IllegalReturn, "Illegal return statement") \
+ T(InvalidRestBindingPattern, \
+ "`...` must be followed by an identifier in declaration contexts") \
+ T(InvalidRestAssignmentPattern, \
+ "`...` must be followed by an assignable reference in assignment " \
+ "contexts") \
T(InvalidEscapedReservedWord, "Keyword must not contain escaped characters") \
T(InvalidEscapedMetaProperty, "'%' must not contain escaped characters") \
T(InvalidLhsInAssignment, "Invalid left-hand side in assignment") \
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index 9233913528..6c7dfd3c47 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -56,21 +56,21 @@ bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(MIPS_SIMD); }
Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
rm_ = no_reg;
- imm32_ = immediate;
+ value_.immediate = immediate;
rmode_ = rmode;
}
Operand::Operand(const ExternalReference& f) {
rm_ = no_reg;
- imm32_ = reinterpret_cast<int32_t>(f.address());
+ value_.immediate = reinterpret_cast<int32_t>(f.address());
rmode_ = RelocInfo::EXTERNAL_REFERENCE;
}
Operand::Operand(Smi* value) {
rm_ = no_reg;
- imm32_ = reinterpret_cast<intptr_t>(value);
+ value_.immediate = reinterpret_cast<intptr_t>(value);
rmode_ = RelocInfo::NONE32;
}
@@ -137,7 +137,6 @@ Address RelocInfo::target_address_address() {
Address RelocInfo::constant_pool_entry_address() {
UNREACHABLE();
- return NULL;
}
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index 3a37c16e5a..4612ccb73f 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -38,6 +38,7 @@
#include "src/base/bits.h"
#include "src/base/cpu.h"
+#include "src/code-stubs.h"
#include "src/mips/assembler-mips-inl.h"
namespace v8 {
@@ -225,21 +226,27 @@ void RelocInfo::unchecked_update_wasm_size(Isolate* isolate, uint32_t size,
// Implementation of Operand and MemOperand.
// See assembler-mips-inl.h for inlined constructors.
-Operand::Operand(Handle<Object> handle) {
- AllowDeferredHandleDereference using_raw_address;
+Operand::Operand(Handle<HeapObject> handle) {
rm_ = no_reg;
- // Verify all Objects referred by code are NOT in new space.
- Object* obj = *handle;
- if (obj->IsHeapObject()) {
- imm32_ = reinterpret_cast<intptr_t>(handle.location());
- rmode_ = RelocInfo::EMBEDDED_OBJECT;
- } else {
- // No relocation needed.
- imm32_ = reinterpret_cast<intptr_t>(obj);
- rmode_ = RelocInfo::NONE32;
- }
+ value_.immediate = reinterpret_cast<intptr_t>(handle.address());
+ rmode_ = RelocInfo::EMBEDDED_OBJECT;
+}
+
+Operand Operand::EmbeddedNumber(double value) {
+ int32_t smi;
+ if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
+ Operand result(0, RelocInfo::EMBEDDED_OBJECT);
+ result.is_heap_object_request_ = true;
+ result.value_.heap_object_request = HeapObjectRequest(value);
+ return result;
}
+Operand Operand::EmbeddedCode(CodeStub* stub) {
+ Operand result(0, RelocInfo::CODE_TARGET);
+ result.is_heap_object_request_ = true;
+ result.value_.heap_object_request = HeapObjectRequest(stub);
+ return result;
+}
MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
offset_ = offset;
@@ -251,6 +258,24 @@ MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
offset_ = unit * multiplier + offset_addend;
}
+void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
+ for (auto& request : heap_object_requests_) {
+ Handle<HeapObject> object;
+ switch (request.kind()) {
+ case HeapObjectRequest::kHeapNumber:
+ object = isolate->factory()->NewHeapNumber(request.heap_number(),
+ IMMUTABLE, TENURED);
+ break;
+ case HeapObjectRequest::kCodeStub:
+ request.code_stub()->set_isolate(isolate);
+ object = request.code_stub()->GetCode();
+ break;
+ }
+ Address pc = buffer_ + request.offset();
+ set_target_value_at(isolate, pc,
+ reinterpret_cast<uint32_t>(object.location()));
+ }
+}
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
@@ -290,8 +315,7 @@ const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
const Instr kLwSwOffsetMask = kImm16Mask;
Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
- : AssemblerBase(isolate_data, buffer, buffer_size),
- recorded_ast_id_(TypeFeedbackId::None()) {
+ : AssemblerBase(isolate_data, buffer, buffer_size) {
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
last_trampoline_pool_end_ = 0;
@@ -307,14 +331,14 @@ Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
trampoline_emitted_ = FLAG_force_long_branches;
unbound_labels_count_ = 0;
block_buffer_growth_ = false;
-
- ClearRecordedAstId();
}
-
-void Assembler::GetCode(CodeDesc* desc) {
+void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
EmitForbiddenSlotInstruction();
DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
+
+ AllocateAndInstallRequestedHeapObjects(isolate);
+
// Set up code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
@@ -328,7 +352,7 @@ void Assembler::GetCode(CodeDesc* desc) {
void Assembler::Align(int m) {
- DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
+ DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
EmitForbiddenSlotInstruction();
while ((pc_offset() & (m - 1)) != 0) {
nop();
@@ -480,6 +504,29 @@ const int kEndOfChain = -4;
// Determines the end of the Jump chain (a subset of the label link chain).
const int kEndOfJumpChain = 0;
+bool Assembler::IsMsaBranch(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ uint32_t rs_field = GetRsField(instr);
+ if (opcode == COP1) {
+ switch (rs_field) {
+ case BZ_V:
+ case BZ_B:
+ case BZ_H:
+ case BZ_W:
+ case BZ_D:
+ case BNZ_V:
+ case BNZ_B:
+ case BNZ_H:
+ case BNZ_W:
+ case BNZ_D:
+ return true;
+ default:
+ return false;
+ }
+ } else {
+ return false;
+ }
+}
bool Assembler::IsBranch(Instr instr) {
uint32_t opcode = GetOpcodeField(instr);
@@ -493,7 +540,7 @@ bool Assembler::IsBranch(Instr instr) {
rt_field == BLTZAL || rt_field == BGEZAL)) ||
(opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
(opcode == COP1 && rs_field == BC1EQZ) ||
- (opcode == COP1 && rs_field == BC1NEZ);
+ (opcode == COP1 && rs_field == BC1NEZ) || IsMsaBranch(instr);
if (!isBranch && IsMipsArchVariant(kMips32r6)) {
// All the 3 variants of POP10 (BOVC, BEQC, BEQZALC) and
// POP30 (BNVC, BNEC, BNEZALC) are branch ops.
@@ -1448,6 +1495,7 @@ void Assembler::bgec(Register rs, Register rt, int16_t offset) {
void Assembler::bgezal(Register rs, int16_t offset) {
DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
+ DCHECK(!(rs.is(ra)));
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
BlockTrampolinePoolFor(1); // For associated delay slot.
@@ -1518,6 +1566,7 @@ void Assembler::bltz(Register rs, int16_t offset) {
void Assembler::bltzal(Register rs, int16_t offset) {
DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
+ DCHECK(!(rs.is(ra)));
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
BlockTrampolinePoolFor(1); // For associated delay slot.
@@ -1554,6 +1603,7 @@ void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
void Assembler::blezalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rt.is(zero_reg)));
+ DCHECK(!(rt.is(ra)));
GenInstrImmediate(BLEZ, zero_reg, rt, offset,
CompactBranchType::COMPACT_BRANCH);
}
@@ -1562,6 +1612,7 @@ void Assembler::blezalc(Register rt, int16_t offset) {
void Assembler::bgezalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rt.is(zero_reg)));
+ DCHECK(!(rt.is(ra)));
GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1569,6 +1620,7 @@ void Assembler::bgezalc(Register rt, int16_t offset) {
void Assembler::bgezall(Register rs, int16_t offset) {
DCHECK(!IsMipsArchVariant(kMips32r6));
DCHECK(!(rs.is(zero_reg)));
+ DCHECK(!(rs.is(ra)));
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
BlockTrampolinePoolFor(1); // For associated delay slot.
@@ -1578,6 +1630,7 @@ void Assembler::bgezall(Register rs, int16_t offset) {
void Assembler::bltzalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rt.is(zero_reg)));
+ DCHECK(!(rt.is(ra)));
GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1585,6 +1638,7 @@ void Assembler::bltzalc(Register rt, int16_t offset) {
void Assembler::bgtzalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rt.is(zero_reg)));
+ DCHECK(!(rt.is(ra)));
GenInstrImmediate(BGTZ, zero_reg, rt, offset,
CompactBranchType::COMPACT_BRANCH);
}
@@ -1593,6 +1647,7 @@ void Assembler::bgtzalc(Register rt, int16_t offset) {
void Assembler::beqzalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rt.is(zero_reg)));
+ DCHECK(!(rt.is(ra)));
GenInstrImmediate(ADDI, zero_reg, rt, offset,
CompactBranchType::COMPACT_BRANCH);
}
@@ -1601,6 +1656,7 @@ void Assembler::beqzalc(Register rt, int16_t offset) {
void Assembler::bnezalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rt.is(zero_reg)));
+ DCHECK(!(rt.is(ra)));
GenInstrImmediate(DADDI, zero_reg, rt, offset,
CompactBranchType::COMPACT_BRANCH);
}
@@ -1902,107 +1958,151 @@ void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
// ------------Memory-instructions-------------
-// Helper for base-reg + offset, when offset is larger than int16.
-void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
- DCHECK(!src.rm().is(at));
- if (IsMipsArchVariant(kMips32r6)) {
- int32_t hi = (src.offset_ >> kLuiShift) & kImm16Mask;
- if (src.offset_ & kNegOffset) {
- hi += 1;
- }
- aui(at, src.rm(), hi);
- addiu(at, at, src.offset_ & kImm16Mask);
- } else {
- lui(at, (src.offset_ >> kLuiShift) & kImm16Mask);
- ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
- addu(at, at, src.rm()); // Add base register.
+void Assembler::AdjustBaseAndOffset(MemOperand& src,
+ OffsetAccessType access_type,
+ int second_access_add_to_offset) {
+ // This method is used to adjust the base register and offset pair
+ // for a load/store when the offset doesn't fit into int16_t.
+ // It is assumed that 'base + offset' is sufficiently aligned for memory
+ // operands that are machine word in size or smaller. For doubleword-sized
+ // operands it's assumed that 'base' is a multiple of 8, while 'offset'
+ // may be a multiple of 4 (e.g. 4-byte-aligned long and double arguments
+ // and spilled variables on the stack accessed relative to the stack
+ // pointer register).
+ // We preserve the "alignment" of 'offset' by adjusting it by a multiple of 8.
+
+ bool doubleword_aligned = (src.offset() & (kDoubleSize - 1)) == 0;
+ bool two_accesses = static_cast<bool>(access_type) || !doubleword_aligned;
+ DCHECK(second_access_add_to_offset <= 7); // Must be <= 7.
+
+ // is_int16 must be passed a signed value, hence the static cast below.
+ if (is_int16(src.offset()) &&
+ (!two_accesses || is_int16(static_cast<int32_t>(
+ src.offset() + second_access_add_to_offset)))) {
+ // Nothing to do: 'offset' (and, if needed, 'offset + 4', or other specified
+ // value) fits into int16_t.
+ return;
}
-}
-// Helper for base-reg + upper part of offset, when offset is larger than int16.
-// Loads higher part of the offset to AT register.
-// Returns lower part of the offset to be used as offset
-// in Load/Store instructions
-int32_t Assembler::LoadRegPlusUpperOffsetPartToAt(const MemOperand& src) {
- DCHECK(!src.rm().is(at));
- int32_t hi = (src.offset_ >> kLuiShift) & kImm16Mask;
- // If the highest bit of the lower part of the offset is 1, this would make
- // the offset in the load/store instruction negative. We need to compensate
- // for this by adding 1 to the upper part of the offset.
- if (src.offset_ & kNegOffset) {
- hi += 1;
- }
+ DCHECK(!src.rm().is(
+ at)); // Must not overwrite the register 'base' while loading 'offset'.
- if (IsMipsArchVariant(kMips32r6)) {
- aui(at, src.rm(), hi);
+#ifdef DEBUG
+ // Remember the "(mis)alignment" of 'offset', it will be checked at the end.
+ uint32_t misalignment = src.offset() & (kDoubleSize - 1);
+#endif
+
+ // Do not load the whole 32-bit 'offset' if it can be represented as
+ // a sum of two 16-bit signed offsets. This can save an instruction or two.
+ // To simplify matters, only do this for a symmetric range of offsets from
+ // about -64KB to about +64KB, allowing further addition of 4 when accessing
+ // 64-bit variables with two 32-bit accesses.
+ constexpr int32_t kMinOffsetForSimpleAdjustment =
+ 0x7ff8; // Max int16_t that's a multiple of 8.
+ constexpr int32_t kMaxOffsetForSimpleAdjustment =
+ 2 * kMinOffsetForSimpleAdjustment;
+ if (0 <= src.offset() && src.offset() <= kMaxOffsetForSimpleAdjustment) {
+ addiu(at, src.rm(), kMinOffsetForSimpleAdjustment);
+ src.offset_ -= kMinOffsetForSimpleAdjustment;
+ } else if (-kMaxOffsetForSimpleAdjustment <= src.offset() &&
+ src.offset() < 0) {
+ addiu(at, src.rm(), -kMinOffsetForSimpleAdjustment);
+ src.offset_ += kMinOffsetForSimpleAdjustment;
+ } else if (IsMipsArchVariant(kMips32r6)) {
+ // On r6 take advantage of the aui instruction, e.g.:
+ // aui at, base, offset_high
+ // lw reg_lo, offset_low(at)
+ // lw reg_hi, (offset_low+4)(at)
+ // or when offset_low+4 overflows int16_t:
+ // aui at, base, offset_high
+ // addiu at, at, 8
+ // lw reg_lo, (offset_low-8)(at)
+ // lw reg_hi, (offset_low-4)(at)
+ int16_t offset_high = static_cast<uint16_t>(src.offset() >> 16);
+ int16_t offset_low = static_cast<uint16_t>(src.offset());
+ offset_high += (offset_low < 0)
+ ? 1
+ : 0; // Account for offset sign extension in load/store.
+ aui(at, src.rm(), static_cast<uint16_t>(offset_high));
+ if (two_accesses && !is_int16(static_cast<int32_t>(
+ offset_low + second_access_add_to_offset))) {
+ // Avoid overflow in the 16-bit offset of the load/store instruction when
+ // adding 4.
+ addiu(at, at, kDoubleSize);
+ offset_low -= kDoubleSize;
+ }
+ src.offset_ = offset_low;
} else {
- lui(at, hi);
- addu(at, at, src.rm());
+ // Do not load the whole 32-bit 'offset' if it can be represented as
+ // a sum of three 16-bit signed offsets. This can save an instruction.
+ // To simplify matters, only do this for a symmetric range of offsets from
+ // about -96KB to about +96KB, allowing further addition of 4 when accessing
+ // 64-bit variables with two 32-bit accesses.
+ constexpr int32_t kMinOffsetForMediumAdjustment =
+ 2 * kMinOffsetForSimpleAdjustment;
+ constexpr int32_t kMaxOffsetForMediumAdjustment =
+ 3 * kMinOffsetForSimpleAdjustment;
+ if (0 <= src.offset() && src.offset() <= kMaxOffsetForMediumAdjustment) {
+ addiu(at, src.rm(), kMinOffsetForMediumAdjustment / 2);
+ addiu(at, at, kMinOffsetForMediumAdjustment / 2);
+ src.offset_ -= kMinOffsetForMediumAdjustment;
+ } else if (-kMaxOffsetForMediumAdjustment <= src.offset() &&
+ src.offset() < 0) {
+ addiu(at, src.rm(), -kMinOffsetForMediumAdjustment / 2);
+ addiu(at, at, -kMinOffsetForMediumAdjustment / 2);
+ src.offset_ += kMinOffsetForMediumAdjustment;
+ } else {
+ // Now that all shorter options have been exhausted, load the full 32-bit
+ // offset.
+ int32_t loaded_offset = RoundDown(src.offset(), kDoubleSize);
+ lui(at, (loaded_offset >> kLuiShift) & kImm16Mask);
+ ori(at, at, loaded_offset & kImm16Mask); // Load 32-bit offset.
+ addu(at, at, src.rm());
+ src.offset_ -= loaded_offset;
+ }
}
- return (src.offset_ & kImm16Mask);
-}
+ src.rm_ = at;
-// Helper for loading base-reg + upper offset's part to AT reg when we are using
-// two 32-bit loads/stores instead of one 64-bit
-int32_t Assembler::LoadUpperOffsetForTwoMemoryAccesses(const MemOperand& src) {
- DCHECK(!src.rm().is(at));
- if (is_int16((src.offset_ & kImm16Mask) + kIntSize)) {
- // Only if lower part of offset + kIntSize fits in 16bits
- return LoadRegPlusUpperOffsetPartToAt(src);
+ DCHECK(is_int16(src.offset()));
+ if (two_accesses) {
+ DCHECK(is_int16(
+ static_cast<int32_t>(src.offset() + second_access_add_to_offset)));
}
- // In case offset's lower part + kIntSize doesn't fit in 16bits,
- // load reg + hole offset to AT
- LoadRegPlusOffsetToAt(src);
- return 0;
+ DCHECK(misalignment == (src.offset() & (kDoubleSize - 1)));
}
void Assembler::lb(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- GenInstrImmediate(LB, at, rd, off16);
- }
+ MemOperand source = rs;
+ AdjustBaseAndOffset(source);
+ GenInstrImmediate(LB, source.rm(), rd, source.offset());
}
void Assembler::lbu(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- GenInstrImmediate(LBU, at, rd, off16);
- }
+ MemOperand source = rs;
+ AdjustBaseAndOffset(source);
+ GenInstrImmediate(LBU, source.rm(), rd, source.offset());
}
void Assembler::lh(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- GenInstrImmediate(LH, at, rd, off16);
- }
+ MemOperand source = rs;
+ AdjustBaseAndOffset(source);
+ GenInstrImmediate(LH, source.rm(), rd, source.offset());
}
void Assembler::lhu(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- GenInstrImmediate(LHU, at, rd, off16);
- }
+ MemOperand source = rs;
+ AdjustBaseAndOffset(source);
+ GenInstrImmediate(LHU, source.rm(), rd, source.offset());
}
void Assembler::lw(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- GenInstrImmediate(LW, at, rd, off16);
- }
+ MemOperand source = rs;
+ AdjustBaseAndOffset(source);
+ GenInstrImmediate(LW, source.rm(), rd, source.offset());
}
@@ -2023,32 +2123,23 @@ void Assembler::lwr(Register rd, const MemOperand& rs) {
void Assembler::sb(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to store.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- GenInstrImmediate(SB, at, rd, off16);
- }
+ MemOperand source = rs;
+ AdjustBaseAndOffset(source);
+ GenInstrImmediate(SB, source.rm(), rd, source.offset());
}
void Assembler::sh(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to store.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- GenInstrImmediate(SH, at, rd, off16);
- }
+ MemOperand source = rs;
+ AdjustBaseAndOffset(source);
+ GenInstrImmediate(SH, source.rm(), rd, source.offset());
}
void Assembler::sw(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to store.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- GenInstrImmediate(SW, at, rd, off16);
- }
+ MemOperand source = rs;
+ AdjustBaseAndOffset(source);
+ GenInstrImmediate(SW, source.rm(), rd, source.offset());
}
@@ -2333,22 +2424,16 @@ void Assembler::seb(Register rd, Register rt) {
// Load, store, move.
void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
- if (is_int16(src.offset_)) {
- GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
- GenInstrImmediate(LWC1, at, fd, off16);
- }
+ MemOperand tmp = src;
+ AdjustBaseAndOffset(tmp);
+ GenInstrImmediate(LWC1, tmp.rm(), fd, tmp.offset());
}
void Assembler::swc1(FPURegister fd, const MemOperand& src) {
- if (is_int16(src.offset_)) {
- GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
- GenInstrImmediate(SWC1, at, fd, off16);
- }
+ MemOperand tmp = src;
+ AdjustBaseAndOffset(tmp);
+ GenInstrImmediate(SWC1, tmp.rm(), fd, tmp.offset());
}
@@ -3024,14 +3109,17 @@ MSA_BRANCH_LIST(MSA_BRANCH)
V(st_w, ST_W) \
V(st_d, ST_D)
-#define MSA_LD_ST(name, opcode) \
- void Assembler::name(MSARegister wd, const MemOperand& rs) { \
- if (is_int10(rs.offset())) { \
- GenInstrMsaMI10(opcode, rs.offset(), rs.rm(), wd); \
- } else { \
- LoadRegPlusOffsetToAt(rs); \
- GenInstrMsaMI10(opcode, 0, at, wd); \
- } \
+#define MSA_LD_ST(name, opcode) \
+ void Assembler::name(MSARegister wd, const MemOperand& rs) { \
+ MemOperand source = rs; \
+ AdjustBaseAndOffset(source); \
+ if (is_int10(source.offset())) { \
+ GenInstrMsaMI10(opcode, source.offset(), source.rm(), wd); \
+ } else { \
+ DCHECK(!rs.rm().is(at)); \
+ addiu(at, source.rm(), source.offset()); \
+ GenInstrMsaMI10(opcode, 0, at, wd); \
+ } \
}
MSA_LD_ST_LIST(MSA_LD_ST)
@@ -3539,7 +3627,6 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
return 2; // Number of instructions patched.
} else {
UNREACHABLE();
- return 0;
}
}
}
@@ -3558,9 +3645,7 @@ void Assembler::GrowBuffer() {
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
- if (desc.buffer_size > kMaximalBufferSize ||
- static_cast<size_t>(desc.buffer_size) >
- isolate_data().max_old_generation_size_) {
+ if (desc.buffer_size > kMaximalBufferSize) {
V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
}
@@ -3648,14 +3733,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
return;
}
DCHECK(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
- if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId().ToInt(),
- NULL);
- ClearRecordedAstId();
- reloc_info_writer.Write(&reloc_info_with_ast_id);
- } else {
- reloc_info_writer.Write(&rinfo);
- }
+ reloc_info_writer.Write(&rinfo);
}
}
@@ -3769,7 +3847,6 @@ Address Assembler::target_address_at(Address pc) {
// We should never get here, force a bad address if we do.
UNREACHABLE();
- return (Address)0x0;
}
@@ -3791,15 +3868,14 @@ void Assembler::QuietNaN(HeapObject* object) {
// There is an optimization below, which emits a nop when the address
// fits in just 16 bits. This is unlikely to help, and should be benchmarked,
// and possibly removed.
-void Assembler::set_target_address_at(Isolate* isolate, Address pc,
- Address target,
- ICacheFlushMode icache_flush_mode) {
+void Assembler::set_target_value_at(Isolate* isolate, Address pc,
+ uint32_t target,
+ ICacheFlushMode icache_flush_mode) {
DCHECK_IMPLIES(isolate == nullptr, icache_flush_mode == SKIP_ICACHE_FLUSH);
Instr instr2 = instr_at(pc + kInstrSize);
uint32_t rt_code = GetRtField(instr2);
uint32_t* p = reinterpret_cast<uint32_t*>(pc);
- uint32_t itarget = reinterpret_cast<uint32_t>(target);
#ifdef DEBUG
// Check we have the result from a li macro-instruction, using instr pair.
@@ -3810,7 +3886,7 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
if (IsJicOrJialc(instr2)) {
// Must use 2 instructions to insure patchable code => use lui and jic
uint32_t lui_offset, jic_offset;
- Assembler::UnpackTargetAddressUnsigned(itarget, lui_offset, jic_offset);
+ Assembler::UnpackTargetAddressUnsigned(target, lui_offset, jic_offset);
*p &= ~kImm16Mask;
*(p + 1) &= ~kImm16Mask;
@@ -3822,8 +3898,8 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
// Must use 2 instructions to insure patchable code => just use lui and ori.
// lui rt, upper-16.
// ori rt rt, lower-16.
- *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
- *(p + 1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
+ *p = LUI | rt_code | ((target & kHiMask) >> kLuiShift);
+ *(p + 1) = ORI | rt_code | (rt_code << 5) | (target & kImm16Mask);
}
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index 7df318b9ab..1fc4d21132 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -423,9 +423,12 @@ class Operand BASE_EMBEDDED {
INLINE(explicit Operand(const char* s));
INLINE(explicit Operand(Object** opp));
INLINE(explicit Operand(Context** cpp));
- explicit Operand(Handle<Object> handle);
+ explicit Operand(Handle<HeapObject> handle);
INLINE(explicit Operand(Smi* value));
+ static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
+ static Operand EmbeddedCode(CodeStub* stub);
+
// Register.
INLINE(explicit Operand(Register rm));
@@ -434,7 +437,23 @@ class Operand BASE_EMBEDDED {
inline int32_t immediate() const {
DCHECK(!is_reg());
- return imm32_;
+ DCHECK(!IsHeapObjectRequest());
+ return value_.immediate;
+ }
+
+ bool IsImmediate() const { return !rm_.is_valid(); }
+
+ HeapObjectRequest heap_object_request() const {
+ DCHECK(IsHeapObjectRequest());
+ return value_.heap_object_request;
+ }
+
+ bool IsHeapObjectRequest() const {
+ DCHECK_IMPLIES(is_heap_object_request_, IsImmediate());
+ DCHECK_IMPLIES(is_heap_object_request_,
+ rmode_ == RelocInfo::EMBEDDED_OBJECT ||
+ rmode_ == RelocInfo::CODE_TARGET);
+ return is_heap_object_request_;
}
Register rm() const { return rm_; }
@@ -443,11 +462,16 @@ class Operand BASE_EMBEDDED {
private:
Register rm_;
- int32_t imm32_; // Valid if rm_ == no_reg.
+ union Value {
+ Value() {}
+ HeapObjectRequest heap_object_request; // if is_heap_object_request_
+ int32_t immediate; // otherwise
+ } value_; // valid if rm_ == no_reg
+ bool is_heap_object_request_ = false;
RelocInfo::Mode rmode_;
friend class Assembler;
- friend class MacroAssembler;
+ // friend class MacroAssembler;
};
@@ -500,7 +524,7 @@ class Assembler : public AssemblerBase {
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
- void GetCode(CodeDesc* desc);
+ void GetCode(Isolate* isolate, CodeDesc* desc);
// Label operations & relative jumps (PPUM Appendix D).
//
@@ -567,9 +591,12 @@ class Assembler : public AssemblerBase {
// Read/Modify the code target address in the branch/call instruction at pc.
// The isolate argument is unused (and may be nullptr) when skipping flushing.
static Address target_address_at(Address pc);
- static void set_target_address_at(
- Isolate* isolate, Address pc, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+ INLINE(static void set_target_address_at)
+ (Isolate* isolate, Address pc, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) {
+ set_target_value_at(isolate, pc, reinterpret_cast<uint32_t>(target),
+ icache_flush_mode);
+ }
// On MIPS there is no Constant Pool so we skip that parameter.
INLINE(static Address target_address_at(Address pc, Address constant_pool)) {
return target_address_at(pc);
@@ -584,6 +611,10 @@ class Assembler : public AssemblerBase {
Isolate* isolate, Address pc, Code* code, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
+ static void set_target_value_at(
+ Isolate* isolate, Address pc, uint32_t target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
inline static Address target_address_from_return_address(Address pc);
@@ -1100,15 +1131,45 @@ class Assembler : public AssemblerBase {
// MSA instructions
void bz_v(MSARegister wt, int16_t offset);
+ inline void bz_v(MSARegister wt, Label* L) {
+ bz_v(wt, shifted_branch_offset(L));
+ }
void bz_b(MSARegister wt, int16_t offset);
+ inline void bz_b(MSARegister wt, Label* L) {
+ bz_b(wt, shifted_branch_offset(L));
+ }
void bz_h(MSARegister wt, int16_t offset);
+ inline void bz_h(MSARegister wt, Label* L) {
+ bz_h(wt, shifted_branch_offset(L));
+ }
void bz_w(MSARegister wt, int16_t offset);
+ inline void bz_w(MSARegister wt, Label* L) {
+ bz_w(wt, shifted_branch_offset(L));
+ }
void bz_d(MSARegister wt, int16_t offset);
+ inline void bz_d(MSARegister wt, Label* L) {
+ bz_d(wt, shifted_branch_offset(L));
+ }
void bnz_v(MSARegister wt, int16_t offset);
+ inline void bnz_v(MSARegister wt, Label* L) {
+ bnz_v(wt, shifted_branch_offset(L));
+ }
void bnz_b(MSARegister wt, int16_t offset);
+ inline void bnz_b(MSARegister wt, Label* L) {
+ bnz_b(wt, shifted_branch_offset(L));
+ }
void bnz_h(MSARegister wt, int16_t offset);
+ inline void bnz_h(MSARegister wt, Label* L) {
+ bnz_h(wt, shifted_branch_offset(L));
+ }
void bnz_w(MSARegister wt, int16_t offset);
+ inline void bnz_w(MSARegister wt, Label* L) {
+ bnz_w(wt, shifted_branch_offset(L));
+ }
void bnz_d(MSARegister wt, int16_t offset);
+ inline void bnz_d(MSARegister wt, Label* L) {
+ bnz_d(wt, shifted_branch_offset(L));
+ }
void ld_b(MSARegister wd, const MemOperand& rs);
void ld_h(MSARegister wd, const MemOperand& rs);
@@ -1705,20 +1766,6 @@ class Assembler : public AssemblerBase {
// Mark address of a debug break slot.
void RecordDebugBreakSlot(RelocInfo::Mode mode);
- // Record the AST id of the CallIC being compiled, so that it can be placed
- // in the relocation information.
- void SetRecordedAstId(TypeFeedbackId ast_id) {
- DCHECK(recorded_ast_id_.IsNone());
- recorded_ast_id_ = ast_id;
- }
-
- TypeFeedbackId RecordedAstId() {
- DCHECK(!recorded_ast_id_.IsNone());
- return recorded_ast_id_;
- }
-
- void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); }
-
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
void RecordComment(const char* msg);
@@ -1763,6 +1810,7 @@ class Assembler : public AssemblerBase {
// Check if an instruction is a branch of some kind.
static bool IsBranch(Instr instr);
+ static bool IsMsaBranch(Instr instr);
static bool IsBc(Instr instr);
static bool IsBzc(Instr instr);
static bool IsBeq(Instr instr);
@@ -1849,15 +1897,18 @@ class Assembler : public AssemblerBase {
// Load Scaled Address instruction.
void lsa(Register rd, Register rt, Register rs, uint8_t sa);
- // Helpers.
- void LoadRegPlusOffsetToAt(const MemOperand& src);
- int32_t LoadRegPlusUpperOffsetPartToAt(const MemOperand& src);
- int32_t LoadUpperOffsetForTwoMemoryAccesses(const MemOperand& src);
+ // Readable constants for base and offset adjustment helper, these indicate if
+ // aside from offset, another value like offset + 4 should fit into int16.
+ enum class OffsetAccessType : bool {
+ SINGLE_ACCESS = false,
+ TWO_ACCESSES = true
+ };
- // Relocation for a type-recording IC has the AST id added to it. This
- // member variable is a way to pass the information from the call site to
- // the relocation info.
- TypeFeedbackId recorded_ast_id_;
+ // Helper function for memory load/store using base register and offset.
+ void AdjustBaseAndOffset(
+ MemOperand& src,
+ OffsetAccessType access_type = OffsetAccessType::SINGLE_ACCESS,
+ int second_access_add_to_offset = 4);
int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
@@ -2204,6 +2255,23 @@ class Assembler : public AssemblerBase {
Trampoline trampoline_;
bool internal_trampoline_exception_;
+ // The following functions help with avoiding allocations of embedded heap
+ // objects during the code assembly phase. {RequestHeapObject} records the
+ // need for a future heap number allocation or code stub generation. After
+ // code assembly, {AllocateAndInstallRequestedHeapObjects} will allocate these
+ // objects and place them where they are expected (determined by the pc offset
+ // associated with each request). That is, for each request, it will patch the
+ // dummy heap object handle that we emitted during code assembly with the
+ // actual heap object handle.
+ protected:
+ // TODO(neis): Make private if its use can be moved out of TurboAssembler.
+ void RequestHeapObject(HeapObjectRequest request);
+
+ private:
+ void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
+
+ std::forward_list<HeapObjectRequest> heap_object_requests_;
+
friend class RegExpMacroAssemblerMIPS;
friend class RelocInfo;
friend class CodePatcher;
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index 0fcdafca21..0f1efbf736 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -46,32 +46,6 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
Register rhs);
-void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
- ExternalReference miss) {
- // Update the static counter each time a new code stub is generated.
- isolate()->counters()->code_stubs()->Increment();
-
- CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
- int param_count = descriptor.GetRegisterParameterCount();
- {
- // Call the runtime system in a fresh internal frame.
- FrameScope scope(masm, StackFrame::INTERNAL);
- DCHECK(param_count == 0 ||
- a0.is(descriptor.GetRegisterParameter(param_count - 1)));
- // Push arguments, adjust sp.
- __ Subu(sp, sp, Operand(param_count * kPointerSize));
- for (int i = 0; i < param_count; ++i) {
- // Store argument to stack.
- __ sw(descriptor.GetRegisterParameter(i),
- MemOperand(sp, (param_count - 1 - i) * kPointerSize));
- }
- __ CallExternalReference(miss, param_count);
- }
-
- __ Ret();
-}
-
-
void DoubleToIStub::Generate(MacroAssembler* masm) {
Label out_of_range, only_low, negate, done;
Register input_reg = source();
@@ -875,14 +849,11 @@ bool CEntryStub::NeedsImmovableCode() {
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
- StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
CreateWeakCellStub::GenerateAheadOfTime(isolate);
- BinaryOpICStub::GenerateAheadOfTime(isolate);
StoreRegistersStateStub::GenerateAheadOfTime(isolate);
RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
- BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
}
@@ -1031,7 +1002,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
if (FLAG_debug_code) {
Label okay;
ExternalReference pending_exception_address(
- Isolate::kPendingExceptionAddress, isolate());
+ IsolateAddressId::kPendingExceptionAddress, isolate());
__ li(a2, Operand(pending_exception_address));
__ lw(a2, MemOperand(a2));
__ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
@@ -1059,15 +1030,15 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ bind(&exception_returned);
ExternalReference pending_handler_context_address(
- Isolate::kPendingHandlerContextAddress, isolate());
+ IsolateAddressId::kPendingHandlerContextAddress, isolate());
ExternalReference pending_handler_code_address(
- Isolate::kPendingHandlerCodeAddress, isolate());
+ IsolateAddressId::kPendingHandlerCodeAddress, isolate());
ExternalReference pending_handler_offset_address(
- Isolate::kPendingHandlerOffsetAddress, isolate());
+ IsolateAddressId::kPendingHandlerOffsetAddress, isolate());
ExternalReference pending_handler_fp_address(
- Isolate::kPendingHandlerFPAddress, isolate());
+ IsolateAddressId::kPendingHandlerFPAddress, isolate());
ExternalReference pending_handler_sp_address(
- Isolate::kPendingHandlerSPAddress, isolate());
+ IsolateAddressId::kPendingHandlerSPAddress, isolate());
// Ask the runtime for help to determine the handler. This will set v0 to
// contain the current pending exception, don't clobber it.
@@ -1144,7 +1115,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
StackFrame::Type marker = type();
__ li(t2, Operand(StackFrame::TypeToMarker(marker)));
__ li(t1, Operand(StackFrame::TypeToMarker(marker)));
- __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
+ __ li(t0, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
isolate)));
__ lw(t0, MemOperand(t0));
__ Push(t3, t2, t1, t0);
@@ -1169,7 +1140,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// If this is the outermost JS call, set js_entry_sp value.
Label non_outermost_js;
- ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
+ ExternalReference js_entry_sp(IsolateAddressId::kJSEntrySPAddress, isolate);
__ li(t1, Operand(ExternalReference(js_entry_sp)));
__ lw(t2, MemOperand(t1));
__ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
@@ -1192,8 +1163,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// field in the JSEnv and return a failure sentinel. Coming in here the
// fp will be invalid because the PushStackHandler below sets it to 0 to
// signal the existence of the JSEntry frame.
- __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
+ __ li(t0, Operand(ExternalReference(
+ IsolateAddressId::kPendingExceptionAddress, isolate)));
__ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
__ LoadRoot(v0, Heap::kExceptionRootIndex);
__ b(&exit); // b exposes branch delay slot.
@@ -1253,7 +1224,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Restore the top frame descriptors from the stack.
__ pop(t1);
- __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
+ __ li(t0, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
isolate)));
__ sw(t1, MemOperand(t0));
@@ -1628,34 +1599,6 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
}
-void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a1 : left
- // -- a0 : right
- // -- ra : return address
- // -----------------------------------
-
- // Load a2 with the allocation site. We stick an undefined dummy value here
- // and replace it with the real allocation site later when we instantiate this
- // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
- __ li(a2, isolate()->factory()->undefined_value());
-
- // Make sure that we actually patched the allocation site.
- if (FLAG_debug_code) {
- __ And(at, a2, Operand(kSmiTagMask));
- __ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg));
- __ lw(t0, FieldMemOperand(a2, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Assert(eq, kExpectedAllocationSite, t0, Operand(at));
- }
-
- // Tail call into the stub that handles binary operations with allocation
- // sites.
- BinaryOpWithAllocationSiteStub stub(isolate(), state());
- __ TailCallStub(&stub);
-}
-
-
void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
DCHECK_EQ(CompareICState::BOOLEAN, state());
Label miss;
@@ -2130,7 +2073,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
// Restore the properties.
__ lw(properties,
- FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
}
const int spill_mask =
@@ -2138,7 +2081,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
a2.bit() | a1.bit() | a0.bit() | v0.bit());
__ MultiPush(spill_mask);
- __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
__ li(a1, Operand(Handle<Name>(name)));
NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
__ CallStub(&stub);
@@ -2354,10 +2297,11 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode) {
- Label on_black;
Label need_incremental;
Label need_incremental_pop_scratch;
+#ifndef V8_CONCURRENT_MARKING
+ Label on_black;
// Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker.
__ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
@@ -2374,6 +2318,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
}
__ bind(&on_black);
+#endif
// Get the value from the slot.
__ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
@@ -2425,20 +2370,13 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// Fall through when we need to inform the incremental marker.
}
-
-void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
- CEntryStub ces(isolate(), 1, kSaveFPRegs);
- __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
- int parameter_count_offset =
- StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
- __ lw(a1, MemOperand(fp, parameter_count_offset));
- if (function_mode() == JS_FUNCTION_STUB_MODE) {
- __ Addu(a1, a1, Operand(1));
+void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
+ Zone* zone) {
+ if (tasm->isolate()->function_entry_hook() != NULL) {
+ tasm->push(ra);
+ tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
+ tasm->pop(ra);
}
- masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
- __ sll(a1, a1, kPointerSizeLog2);
- __ Ret(USE_DELAY_SLOT);
- __ Addu(sp, sp, a1);
}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
@@ -2479,7 +2417,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
int frame_alignment = masm->ActivationFrameAlignment();
if (frame_alignment > kPointerSize) {
__ mov(s5, sp);
- DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
__ And(sp, sp, Operand(-frame_alignment));
}
__ Subu(sp, sp, kCArgsSlotsSize);
@@ -2521,8 +2459,8 @@ static void CreateArrayDispatch(MacroAssembler* masm,
T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
- int last_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
+ int last_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(masm->isolate(), kind);
@@ -2544,23 +2482,12 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// a0 - number of arguments
// a1 - constructor?
// sp[0] - last argument
- Label normal_sequence;
- if (mode == DONT_OVERRIDE) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
- STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
-
- // is the low bit set? If so, we are holey and that is good.
- __ And(at, a3, Operand(1));
- __ Branch(&normal_sequence, ne, at, Operand(zero_reg));
- }
-
- // look at the first argument
- __ lw(t1, MemOperand(sp, 0));
- __ Branch(&normal_sequence, eq, t1, Operand(zero_reg));
+ STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(PACKED_ELEMENTS == 2);
+ STATIC_ASSERT(HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS == 4);
+ STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS == 5);
if (mode == DISABLE_ALLOCATION_SITES) {
ElementsKind initial = GetInitialFastElementsKind();
@@ -2570,13 +2497,12 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
holey_initial,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub_holey);
-
- __ bind(&normal_sequence);
- ArraySingleArgumentConstructorStub stub(masm->isolate(),
- initial,
- DISABLE_ALLOCATION_SITES);
- __ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
+ // is the low bit set? If so, we are holey and that is good.
+ Label normal_sequence;
+ __ And(at, a3, Operand(1));
+ __ Branch(&normal_sequence, ne, at, Operand(zero_reg));
+
// We are going to create a holey array, but our kind is non-holey.
// Fix kind and retry (only if we have an allocation site in the slot).
__ Addu(a3, a3, Operand(1));
@@ -2591,14 +2517,15 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// in the AllocationSite::transition_info field because elements kind is
// restricted to a portion of the field...upper bits need to be left alone.
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ lw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
+ __ lw(t0, FieldMemOperand(
+ a2, AllocationSite::kTransitionInfoOrBoilerplateOffset));
__ Addu(t0, t0, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
- __ sw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
-
+ __ sw(t0, FieldMemOperand(
+ a2, AllocationSite::kTransitionInfoOrBoilerplateOffset));
__ bind(&normal_sequence);
- int last_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
+ int last_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
@@ -2615,13 +2542,13 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
template<class T>
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
- int to_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
+ int to_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(isolate, kind);
stub.GetCode();
- if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ if (AllocationSite::ShouldTrack(kind)) {
T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
stub1.GetCode();
}
@@ -2635,7 +2562,7 @@ void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
isolate);
ArrayNArgumentsConstructorStub stub(isolate);
stub.GetCode();
- ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
+ ElementsKind kinds[2] = {PACKED_ELEMENTS, HOLEY_ELEMENTS};
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things.
InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
@@ -2702,7 +2629,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&no_info, eq, a2, Operand(at));
- __ lw(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
+ __ lw(a3, FieldMemOperand(
+ a2, AllocationSite::kTransitionInfoOrBoilerplateOffset));
__ SmiUntag(a3);
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
__ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
@@ -2780,19 +2708,18 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
if (FLAG_debug_code) {
Label done;
- __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
- __ Assert(
- eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray,
- a3, Operand(FAST_HOLEY_ELEMENTS));
+ __ Branch(&done, eq, a3, Operand(PACKED_ELEMENTS));
+ __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray, a3,
+ Operand(HOLEY_ELEMENTS));
__ bind(&done);
}
Label fast_elements_case;
- __ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS));
- GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+ __ Branch(&fast_elements_case, eq, a3, Operand(PACKED_ELEMENTS));
+ GenerateCase(masm, HOLEY_ELEMENTS);
__ bind(&fast_elements_case);
- GenerateCase(masm, FAST_ELEMENTS);
+ GenerateCase(masm, PACKED_ELEMENTS);
}
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index 2d4730e557..85f06714f3 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -543,7 +543,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
__ nop();
}
CodeDesc desc;
- masm.GetCode(&desc);
+ masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
@@ -571,7 +571,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
__ Ret();
CodeDesc desc;
- masm.GetCode(&desc);
+ masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
diff --git a/deps/v8/src/mips/constants-mips.h b/deps/v8/src/mips/constants-mips.h
index df1d77136c..3fd2da47d7 100644
--- a/deps/v8/src/mips/constants-mips.h
+++ b/deps/v8/src/mips/constants-mips.h
@@ -161,6 +161,11 @@ const int kInvalidMSARegister = -1;
const int kInvalidMSAControlRegister = -1;
const int kMSAIRRegister = 0;
const int kMSACSRRegister = 1;
+const int kMSARegSize = 128;
+const int kMSALanesByte = kMSARegSize / 8;
+const int kMSALanesHalf = kMSARegSize / 16;
+const int kMSALanesWord = kMSARegSize / 32;
+const int kMSALanesDword = kMSARegSize / 64;
// FPU (coprocessor 1) control registers. Currently only FCSR is implemented.
const int kFCSRRegister = 31;
@@ -370,6 +375,16 @@ const int kImm5Mask = ((1 << 5) - 1);
const int kImm8Mask = ((1 << 8) - 1);
const int kImm10Mask = ((1 << 10) - 1);
const int kMsaI5I10Mask = ((7U << 23) | ((1 << 6) - 1));
+const int kMsaI8Mask = ((3U << 24) | ((1 << 6) - 1));
+const int kMsaI5Mask = ((7U << 23) | ((1 << 6) - 1));
+const int kMsaMI10Mask = (15U << 2);
+const int kMsaBITMask = ((7U << 23) | ((1 << 6) - 1));
+const int kMsaELMMask = (15U << 22);
+const int kMsa3RMask = ((7U << 23) | ((1 << 6) - 1));
+const int kMsa3RFMask = ((15U << 22) | ((1 << 6) - 1));
+const int kMsaVECMask = (23U << 21);
+const int kMsa2RMask = (7U << 18);
+const int kMsa2RFMask = (15U << 17);
const int kRsFieldMask = ((1 << kRsBits) - 1) << kRsShift;
const int kRtFieldMask = ((1 << kRtBits) - 1) << kRtShift;
const int kRdFieldMask = ((1 << kRdBits) - 1) << kRdShift;
@@ -1046,6 +1061,36 @@ inline Condition NegateFpuCondition(Condition cc) {
}
}
+enum MSABranchCondition {
+ all_not_zero = 0, // Branch If All Elements Are Not Zero
+ one_elem_not_zero, // Branch If At Least One Element of Any Format Is Not
+ // Zero
+ one_elem_zero, // Branch If At Least One Element Is Zero
+ all_zero // Branch If All Elements of Any Format Are Zero
+};
+
+inline MSABranchCondition NegateMSABranchCondition(MSABranchCondition cond) {
+ switch (cond) {
+ case all_not_zero:
+ return one_elem_zero;
+ case one_elem_not_zero:
+ return all_zero;
+ case one_elem_zero:
+ return all_not_zero;
+ case all_zero:
+ return one_elem_not_zero;
+ default:
+ return cond;
+ }
+}
+
+enum MSABranchDF {
+ MSA_BRANCH_B = 0,
+ MSA_BRANCH_H,
+ MSA_BRANCH_W,
+ MSA_BRANCH_D,
+ MSA_BRANCH_V
+};
// Commute a condition such that {a cond b == b cond' a}.
inline Condition CommuteCondition(Condition cc) {
@@ -1825,6 +1870,16 @@ bool InstructionGetters<T>::IsForbiddenAfterBranchInstr(Instr instr) {
case BC1:
case BC1EQZ:
case BC1NEZ:
+ case BZ_V:
+ case BZ_B:
+ case BZ_H:
+ case BZ_W:
+ case BZ_D:
+ case BNZ_V:
+ case BNZ_B:
+ case BNZ_H:
+ case BNZ_W:
+ case BNZ_D:
return true;
break;
default:
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index 0c2d2c7544..f8ce065537 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -77,24 +77,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
-void Deoptimizer::SetPlatformCompiledStubRegisters(
- FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
- ApiFunction function(descriptor->deoptimization_handler());
- ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
- intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
- int params = descriptor->GetHandlerParameterCount();
- output_frame->SetRegister(a0.code(), params);
- output_frame->SetRegister(a1.code(), handler);
-}
-
-
-void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
- for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
- Float64 double_value = input_->GetDoubleRegister(i);
- output_frame->SetDoubleRegister(i, double_value);
- }
-}
-
#define __ masm()->
@@ -140,7 +122,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
}
- __ li(a2, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ __ li(a2, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
+ isolate())));
__ sw(fp, MemOperand(a2));
const int kSavedRegistersAreaSize =
@@ -323,7 +306,11 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Maximum size of a table entry generated below.
+#ifdef _MIPS_ARCH_MIPS32R6
+const int Deoptimizer::table_entry_size_ = 2 * Assembler::kInstrSize;
+#else
const int Deoptimizer::table_entry_size_ = 3 * Assembler::kInstrSize;
+#endif
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
@@ -332,8 +319,14 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
// Note that registers are still live when jumping to an entry.
Label table_start, done, trampoline_jump;
__ bind(&table_start);
+
+#ifdef _MIPS_ARCH_MIPS32R6
+ int kMaxEntriesBranchReach =
+ (1 << (kImm26Bits - 2)) / (table_entry_size_ / Assembler::kInstrSize);
+#else
int kMaxEntriesBranchReach = (1 << (kImm16Bits - 2))/
(table_entry_size_ / Assembler::kInstrSize);
+#endif
if (count() <= kMaxEntriesBranchReach) {
// Common case.
@@ -341,10 +334,14 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
Label start;
__ bind(&start);
DCHECK(is_int16(i));
- __ BranchShort(USE_DELAY_SLOT, &done); // Expose delay slot.
- __ li(at, i); // In the delay slot.
- __ nop();
-
+ if (IsMipsArchVariant(kMips32r6)) {
+ __ li(at, i);
+ __ BranchShort(PROTECT, &done);
+ } else {
+ __ BranchShort(USE_DELAY_SLOT, &done); // Expose delay slot.
+ __ li(at, i); // In the delay slot.
+ __ nop();
+ }
DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
}
@@ -353,6 +350,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
__ bind(&done);
__ Push(at);
} else {
+ DCHECK(!IsMipsArchVariant(kMips32r6));
// Uncommon case, the branch cannot reach.
// Create mini trampoline to reach the end of the table
for (int i = 0, j = 0; i < count(); i++, j++) {
diff --git a/deps/v8/src/mips/disasm-mips.cc b/deps/v8/src/mips/disasm-mips.cc
index 8d50be1c00..1a66555d8e 100644
--- a/deps/v8/src/mips/disasm-mips.cc
+++ b/deps/v8/src/mips/disasm-mips.cc
@@ -59,17 +59,6 @@ class Decoder {
int InstructionDecode(byte* instruction);
private:
- const uint32_t kMsaI8Mask = ((3U << 24) | ((1 << 6) - 1));
- const uint32_t kMsaI5Mask = ((7U << 23) | ((1 << 6) - 1));
- const uint32_t kMsaMI10Mask = (15U << 2);
- const uint32_t kMsaBITMask = ((7U << 23) | ((1 << 6) - 1));
- const uint32_t kMsaELMMask = (15U << 22);
- const uint32_t kMsa3RMask = ((7U << 23) | ((1 << 6) - 1));
- const uint32_t kMsa3RFMask = ((15U << 22) | ((1 << 6) - 1));
- const uint32_t kMsaVECMask = (23U << 21);
- const uint32_t kMsa2RMask = (7U << 18);
- const uint32_t kMsa2RFMask = (15U << 17);
-
// Bottleneck functions to print into the out_buffer.
void PrintChar(const char ch);
void Print(const char* str);
@@ -660,7 +649,6 @@ int Decoder::FormatRegister(Instruction* instr, const char* format) {
return 2;
}
UNREACHABLE();
- return -1;
}
@@ -706,7 +694,6 @@ int Decoder::FormatFPURegister(Instruction* instr, const char* format) {
}
}
UNREACHABLE();
- return -1;
}
// Handle all MSARegister based formatting in this function to reduce the
@@ -728,7 +715,6 @@ int Decoder::FormatMSARegister(Instruction* instr, const char* format) {
}
UNREACHABLE();
- return -1;
}
// FormatOption takes a formatting string and interprets it based on
@@ -980,7 +966,6 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
return 1;
}
UNREACHABLE();
- return -1;
}
diff --git a/deps/v8/src/mips/frames-mips.cc b/deps/v8/src/mips/frames-mips.cc
index c962994079..952ae347a0 100644
--- a/deps/v8/src/mips/frames-mips.cc
+++ b/deps/v8/src/mips/frames-mips.cc
@@ -18,15 +18,6 @@ Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() {
UNREACHABLE();
- return no_reg;
-}
-
-
-Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
-Register StubFailureTrampolineFrame::context_register() { return cp; }
-Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
- UNREACHABLE();
- return no_reg;
}
diff --git a/deps/v8/src/mips/interface-descriptors-mips.cc b/deps/v8/src/mips/interface-descriptors-mips.cc
index c1e8229e22..3a9fd1d0e7 100644
--- a/deps/v8/src/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/mips/interface-descriptors-mips.cc
@@ -47,6 +47,8 @@ const Register StoreTransitionDescriptor::MapRegister() { return t1; }
const Register StringCompareDescriptor::LeftRegister() { return a1; }
const Register StringCompareDescriptor::RightRegister() { return a0; }
+const Register StringConcatDescriptor::ArgumentsCountRegister() { return a0; }
+
const Register ApiGetterDescriptor::HolderRegister() { return a0; }
const Register ApiGetterDescriptor::CallbackRegister() { return a3; }
@@ -153,6 +155,16 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a2 : arguments list (FixedArray)
+ // t0 : arguments list length (untagged)
+ Register registers[] = {a1, a0, a2, t0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void CallForwardVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// a1: the target to call
@@ -162,6 +174,34 @@ void CallForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallWithSpreadDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a2 : the object to spread
+ Register registers[] = {a1, a0, a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a1 : the target to call
+ // a2 : the arguments list
+ Register registers[] = {a1, a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a3 : the new target
+ // a2 : arguments list (FixedArray)
+ // t0 : arguments list length (untagged)
+ Register registers[] = {a1, a3, a0, a2, t0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// a1: the target to call
@@ -172,6 +212,25 @@ void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a3 : the new target
+ // a2 : the object to spread
+ Register registers[] = {a1, a3, a0, a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a1 : the target to call
+ // a3 : the new target
+ // a2 : the arguments list
+ Register registers[] = {a1, a3, a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void ConstructStubDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// a1: target
@@ -368,8 +427,7 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
Register registers[] = {
v0, // the value to pass to the generator
a1, // the JSGeneratorObject to resume
- a2, // the resume mode (tagged)
- a3 // SuspendFlags (tagged)
+ a2 // the resume mode (tagged)
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 6dd611e1f6..cb2da0c44e 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -20,16 +20,7 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
- : Assembler(isolate, buffer, size),
- generating_stub_(false),
- has_frame_(false),
- has_double_zero_reg_set_(false),
- isolate_(isolate) {
- if (create_code_object == CodeObjectRequired::kYes) {
- code_object_ =
- Handle<Object>::New(isolate_->heap()->undefined_value(), isolate_);
- }
-}
+ : TurboAssembler(isolate, buffer, size, create_code_object) {}
void MacroAssembler::Load(Register dst,
const MemOperand& src,
@@ -67,16 +58,13 @@ void MacroAssembler::Store(Register src,
}
}
-void MacroAssembler::LoadRoot(Register destination,
- Heap::RootListIndex index) {
+void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
lw(destination, MemOperand(s6, index << kPointerSizeLog2));
}
-
-void MacroAssembler::LoadRoot(Register destination,
- Heap::RootListIndex index,
- Condition cond,
- Register src1, const Operand& src2) {
+void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
+ Condition cond, Register src1,
+ const Operand& src2) {
Branch(2, NegateCondition(cond), src1, src2);
lw(destination, MemOperand(s6, index << kPointerSizeLog2));
}
@@ -98,7 +86,7 @@ void MacroAssembler::StoreRoot(Register source,
sw(source, MemOperand(s6, index << kPointerSizeLog2));
}
-void MacroAssembler::PushCommonFrame(Register marker_reg) {
+void TurboAssembler::PushCommonFrame(Register marker_reg) {
if (marker_reg.is_valid()) {
Push(ra, fp, marker_reg);
Addu(fp, sp, Operand(kPointerSize));
@@ -116,7 +104,7 @@ void MacroAssembler::PopCommonFrame(Register marker_reg) {
}
}
-void MacroAssembler::PushStandardFrame(Register function_reg) {
+void TurboAssembler::PushStandardFrame(Register function_reg) {
int offset = -StandardFrameConstants::kContextOffset;
if (function_reg.is_valid()) {
Push(ra, fp, cp, function_reg);
@@ -544,12 +532,12 @@ void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
// ---------------------------------------------------------------------------
// Instruction macros.
-void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::Addu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
addu(rd, rs, rt.rm());
} else {
- if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
- addiu(rd, rs, rt.imm32_);
+ if (is_int16(rt.immediate()) && !MustUseReg(rt.rmode())) {
+ addiu(rd, rs, rt.immediate());
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
@@ -559,17 +547,17 @@ void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::Subu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
subu(rd, rs, rt.rm());
} else {
- if (is_int16(-rt.imm32_) && !MustUseReg(rt.rmode_)) {
- addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
- } else if (!(-rt.imm32_ & kHiMask) && !MustUseReg(rt.rmode_)) { // Use load
+ if (is_int16(-rt.immediate()) && !MustUseReg(rt.rmode())) {
+ addiu(rd, rs, -rt.immediate()); // No subiu instr, use addiu(x, y, -imm).
+ } else if (!(-rt.immediate() & kHiMask) &&
+ !MustUseReg(rt.rmode())) { // Use load
// -imm and addu for cases where loading -imm generates one instruction.
DCHECK(!rs.is(at));
- li(at, -rt.imm32_);
+ li(at, -rt.immediate());
addu(rd, rs, at);
} else {
// li handles the relocation.
@@ -580,8 +568,7 @@ void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::Mul(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (IsMipsArchVariant(kLoongson)) {
mult(rs, rt.rm());
@@ -602,9 +589,8 @@ void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Mul(Register rd_hi, Register rd_lo,
- Register rs, const Operand& rt) {
+void TurboAssembler::Mul(Register rd_hi, Register rd_lo, Register rs,
+ const Operand& rt) {
if (rt.is_reg()) {
if (!IsMipsArchVariant(kMips32r6)) {
mult(rs, rt.rm());
@@ -645,7 +631,7 @@ void MacroAssembler::Mul(Register rd_hi, Register rd_lo,
}
}
-void MacroAssembler::Mulu(Register rd_hi, Register rd_lo, Register rs,
+void TurboAssembler::Mulu(Register rd_hi, Register rd_lo, Register rs,
const Operand& rt) {
Register reg;
if (rt.is_reg()) {
@@ -674,7 +660,7 @@ void MacroAssembler::Mulu(Register rd_hi, Register rd_lo, Register rs,
}
}
-void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (!IsMipsArchVariant(kMips32r6)) {
mult(rs, rt.rm());
@@ -695,8 +681,7 @@ void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Mult(Register rs, const Operand& rt) {
+void TurboAssembler::Mult(Register rs, const Operand& rt) {
if (rt.is_reg()) {
mult(rs, rt.rm());
} else {
@@ -707,8 +692,7 @@ void MacroAssembler::Mult(Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (!IsMipsArchVariant(kMips32r6)) {
multu(rs, rt.rm());
@@ -729,8 +713,7 @@ void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Multu(Register rs, const Operand& rt) {
+void TurboAssembler::Multu(Register rs, const Operand& rt) {
if (rt.is_reg()) {
multu(rs, rt.rm());
} else {
@@ -741,8 +724,7 @@ void MacroAssembler::Multu(Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Div(Register rs, const Operand& rt) {
+void TurboAssembler::Div(Register rs, const Operand& rt) {
if (rt.is_reg()) {
div(rs, rt.rm());
} else {
@@ -753,9 +735,8 @@ void MacroAssembler::Div(Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Div(Register rem, Register res,
- Register rs, const Operand& rt) {
+void TurboAssembler::Div(Register rem, Register res, Register rs,
+ const Operand& rt) {
if (rt.is_reg()) {
if (!IsMipsArchVariant(kMips32r6)) {
div(rs, rt.rm());
@@ -780,8 +761,7 @@ void MacroAssembler::Div(Register rem, Register res,
}
}
-
-void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
+void TurboAssembler::Div(Register res, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (!IsMipsArchVariant(kMips32r6)) {
div(rs, rt.rm());
@@ -802,8 +782,7 @@ void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::Mod(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (!IsMipsArchVariant(kMips32r6)) {
div(rs, rt.rm());
@@ -824,8 +803,7 @@ void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::Modu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (!IsMipsArchVariant(kMips32r6)) {
divu(rs, rt.rm());
@@ -846,8 +824,7 @@ void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Divu(Register rs, const Operand& rt) {
+void TurboAssembler::Divu(Register rs, const Operand& rt) {
if (rt.is_reg()) {
divu(rs, rt.rm());
} else {
@@ -858,8 +835,7 @@ void MacroAssembler::Divu(Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
+void TurboAssembler::Divu(Register res, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (!IsMipsArchVariant(kMips32r6)) {
divu(rs, rt.rm());
@@ -880,13 +856,12 @@ void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::And(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
and_(rd, rs, rt.rm());
} else {
- if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
- andi(rd, rs, rt.imm32_);
+ if (is_uint16(rt.immediate()) && !MustUseReg(rt.rmode())) {
+ andi(rd, rs, rt.immediate());
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
@@ -896,13 +871,12 @@ void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::Or(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
or_(rd, rs, rt.rm());
} else {
- if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
- ori(rd, rs, rt.imm32_);
+ if (is_uint16(rt.immediate()) && !MustUseReg(rt.rmode())) {
+ ori(rd, rs, rt.immediate());
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
@@ -912,13 +886,12 @@ void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::Xor(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
xor_(rd, rs, rt.rm());
} else {
- if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
- xori(rd, rs, rt.imm32_);
+ if (is_uint16(rt.immediate()) && !MustUseReg(rt.rmode())) {
+ xori(rd, rs, rt.immediate());
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
@@ -928,8 +901,7 @@ void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::Nor(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
nor(rd, rs, rt.rm());
} else {
@@ -940,8 +912,7 @@ void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Neg(Register rs, const Operand& rt) {
+void TurboAssembler::Neg(Register rs, const Operand& rt) {
DCHECK(rt.is_reg());
DCHECK(!at.is(rs));
DCHECK(!at.is(rt.rm()));
@@ -949,13 +920,12 @@ void MacroAssembler::Neg(Register rs, const Operand& rt) {
xor_(rs, rt.rm(), at);
}
-
-void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::Slt(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
slt(rd, rs, rt.rm());
} else {
- if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
- slti(rd, rs, rt.imm32_);
+ if (is_int16(rt.immediate()) && !MustUseReg(rt.rmode())) {
+ slti(rd, rs, rt.immediate());
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
@@ -965,18 +935,18 @@ void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sltu(rd, rs, rt.rm());
} else {
const uint32_t int16_min = std::numeric_limits<int16_t>::min();
- if (is_uint15(rt.imm32_) && !MustUseReg(rt.rmode_)) {
+ if (is_uint15(rt.immediate()) && !MustUseReg(rt.rmode())) {
// Imm range is: [0, 32767].
- sltiu(rd, rs, rt.imm32_);
- } else if (is_uint15(rt.imm32_ - int16_min) && !MustUseReg(rt.rmode_)) {
+ sltiu(rd, rs, rt.immediate());
+ } else if (is_uint15(rt.immediate() - int16_min) &&
+ !MustUseReg(rt.rmode())) {
// Imm range is: [max_unsigned-32767,max_unsigned].
- sltiu(rd, rs, static_cast<uint16_t>(rt.imm32_));
+ sltiu(rd, rs, static_cast<uint16_t>(rt.immediate()));
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
@@ -986,13 +956,12 @@ void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {
if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
if (rt.is_reg()) {
rotrv(rd, rs, rt.rm());
} else {
- rotr(rd, rs, rt.imm32_ & 0x1f);
+ rotr(rd, rs, rt.immediate() & 0x1f);
}
} else {
if (rt.is_reg()) {
@@ -1001,11 +970,11 @@ void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
srlv(rd, rs, rt.rm());
or_(rd, rd, at);
} else {
- if (rt.imm32_ == 0) {
+ if (rt.immediate() == 0) {
srl(rd, rs, 0);
} else {
- srl(at, rs, rt.imm32_ & 0x1f);
- sll(rd, rs, (0x20 - (rt.imm32_ & 0x1f)) & 0x1f);
+ srl(at, rs, rt.immediate() & 0x1f);
+ sll(rd, rs, (0x20 - (rt.immediate() & 0x1f)) & 0x1f);
or_(rd, rd, at);
}
}
@@ -1021,8 +990,7 @@ void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
}
}
-
-void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
+void TurboAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
Register scratch) {
DCHECK(sa >= 1 && sa <= 31);
if (IsMipsArchVariant(kMips32r6) && sa <= 4) {
@@ -1035,7 +1003,7 @@ void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
}
}
-void MacroAssembler::Bovc(Register rs, Register rt, Label* L) {
+void TurboAssembler::Bovc(Register rs, Register rt, Label* L) {
if (is_trampoline_emitted()) {
Label skip;
bnvc(rs, rt, &skip);
@@ -1046,7 +1014,7 @@ void MacroAssembler::Bovc(Register rs, Register rt, Label* L) {
}
}
-void MacroAssembler::Bnvc(Register rs, Register rt, Label* L) {
+void TurboAssembler::Bnvc(Register rs, Register rt, Label* L) {
if (is_trampoline_emitted()) {
Label skip;
bovc(rs, rt, &skip);
@@ -1060,7 +1028,7 @@ void MacroAssembler::Bnvc(Register rs, Register rt, Label* L) {
// ------------Pseudo-instructions-------------
// Word Swap Byte
-void MacroAssembler::ByteSwapSigned(Register dest, Register src,
+void TurboAssembler::ByteSwapSigned(Register dest, Register src,
int operand_size) {
DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4);
@@ -1098,7 +1066,7 @@ void MacroAssembler::ByteSwapSigned(Register dest, Register src,
}
}
-void MacroAssembler::ByteSwapUnsigned(Register dest, Register src,
+void TurboAssembler::ByteSwapUnsigned(Register dest, Register src,
int operand_size) {
DCHECK(operand_size == 1 || operand_size == 2);
@@ -1126,7 +1094,7 @@ void MacroAssembler::ByteSwapUnsigned(Register dest, Register src,
}
}
-void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
+void TurboAssembler::Ulw(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (IsMipsArchVariant(kMips32r6)) {
@@ -1134,46 +1102,40 @@ void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
} else {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
IsMipsArchVariant(kLoongson));
- if (is_int16(rs.offset() + kMipsLwrOffset) &&
- is_int16(rs.offset() + kMipsLwlOffset)) {
- if (!rd.is(rs.rm())) {
- lwr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
- lwl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
- } else {
- lwr(at, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
- lwl(at, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
- mov(rd, at);
- }
- } else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(rs);
- lwr(rd, MemOperand(at, kMipsLwrOffset));
- lwl(rd, MemOperand(at, kMipsLwlOffset));
+ DCHECK(kMipsLwrOffset <= 3 && kMipsLwlOffset <= 3);
+ MemOperand source = rs;
+ // Adjust offset for two accesses and check if offset + 3 fits into int16_t.
+ AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 3);
+ if (!rd.is(source.rm())) {
+ lwr(rd, MemOperand(source.rm(), source.offset() + kMipsLwrOffset));
+ lwl(rd, MemOperand(source.rm(), source.offset() + kMipsLwlOffset));
+ } else {
+ lwr(at, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
+ lwl(at, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
+ mov(rd, at);
}
}
}
-
-void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
+void TurboAssembler::Usw(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
+ DCHECK(!rd.is(rs.rm()));
if (IsMipsArchVariant(kMips32r6)) {
sw(rd, rs);
} else {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
IsMipsArchVariant(kLoongson));
- if (is_int16(rs.offset() + kMipsSwrOffset) &&
- is_int16(rs.offset() + kMipsSwlOffset)) {
- swr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwrOffset));
- swl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwlOffset));
- } else {
- LoadRegPlusOffsetToAt(rs);
- swr(rd, MemOperand(at, kMipsSwrOffset));
- swl(rd, MemOperand(at, kMipsSwlOffset));
- }
+ DCHECK(kMipsSwrOffset <= 3 && kMipsSwlOffset <= 3);
+ MemOperand source = rs;
+ // Adjust offset for two accesses and check if offset + 3 fits into int16_t.
+ AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 3);
+ swr(rd, MemOperand(source.rm(), source.offset() + kMipsSwrOffset));
+ swl(rd, MemOperand(source.rm(), source.offset() + kMipsSwlOffset));
}
}
-void MacroAssembler::Ulh(Register rd, const MemOperand& rs) {
+void TurboAssembler::Ulh(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (IsMipsArchVariant(kMips32r6)) {
@@ -1181,22 +1143,24 @@ void MacroAssembler::Ulh(Register rd, const MemOperand& rs) {
} else {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
IsMipsArchVariant(kLoongson));
- if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
+ MemOperand source = rs;
+ // Adjust offset for two accesses and check if offset + 1 fits into int16_t.
+ AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
+ if (source.rm().is(at)) {
#if defined(V8_TARGET_LITTLE_ENDIAN)
- lbu(at, rs);
- lb(rd, MemOperand(rs.rm(), rs.offset() + 1));
+ lb(rd, MemOperand(source.rm(), source.offset() + 1));
+ lbu(at, source);
#elif defined(V8_TARGET_BIG_ENDIAN)
- lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
- lb(rd, rs);
+ lb(rd, source);
+ lbu(at, MemOperand(source.rm(), source.offset() + 1));
#endif
- } else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(rs);
+ } else {
#if defined(V8_TARGET_LITTLE_ENDIAN)
- lb(rd, MemOperand(at, 1));
- lbu(at, MemOperand(at, 0));
+ lbu(at, source);
+ lb(rd, MemOperand(source.rm(), source.offset() + 1));
#elif defined(V8_TARGET_BIG_ENDIAN)
- lb(rd, MemOperand(at, 0));
- lbu(at, MemOperand(at, 1));
+ lbu(at, MemOperand(source.rm(), source.offset() + 1));
+ lb(rd, source);
#endif
}
sll(rd, rd, 8);
@@ -1204,7 +1168,7 @@ void MacroAssembler::Ulh(Register rd, const MemOperand& rs) {
}
}
-void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) {
+void TurboAssembler::Ulhu(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (IsMipsArchVariant(kMips32r6)) {
@@ -1212,22 +1176,24 @@ void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) {
} else {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
IsMipsArchVariant(kLoongson));
- if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
+ MemOperand source = rs;
+ // Adjust offset for two accesses and check if offset + 1 fits into int16_t.
+ AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
+ if (source.rm().is(at)) {
#if defined(V8_TARGET_LITTLE_ENDIAN)
- lbu(at, rs);
- lbu(rd, MemOperand(rs.rm(), rs.offset() + 1));
+ lbu(rd, MemOperand(source.rm(), source.offset() + 1));
+ lbu(at, source);
#elif defined(V8_TARGET_BIG_ENDIAN)
- lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
- lbu(rd, rs);
+ lbu(rd, source);
+ lbu(at, MemOperand(source.rm(), source.offset() + 1));
#endif
- } else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(rs);
+ } else {
#if defined(V8_TARGET_LITTLE_ENDIAN)
- lbu(rd, MemOperand(at, 1));
- lbu(at, MemOperand(at, 0));
+ lbu(at, source);
+ lbu(rd, MemOperand(source.rm(), source.offset() + 1));
#elif defined(V8_TARGET_BIG_ENDIAN)
- lbu(rd, MemOperand(at, 0));
- lbu(at, MemOperand(at, 1));
+ lbu(at, MemOperand(source.rm(), source.offset() + 1));
+ lbu(rd, source);
#endif
}
sll(rd, rd, 8);
@@ -1235,7 +1201,7 @@ void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) {
}
}
-void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
+void TurboAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
DCHECK(!rs.rm().is(scratch));
@@ -1246,11 +1212,8 @@ void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
IsMipsArchVariant(kLoongson));
MemOperand source = rs;
- // If offset > 16 bits, load address to at with offset 0.
- if (!is_int16(rs.offset()) || !is_int16(rs.offset() + 1)) {
- LoadRegPlusOffsetToAt(rs);
- source = MemOperand(at, 0);
- }
+ // Adjust offset for two accesses and check if offset + 1 fits into int16_t.
+ AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
if (!scratch.is(rd)) {
mov(scratch, rd);
@@ -1268,7 +1231,7 @@ void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
}
}
-void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
+void TurboAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
if (IsMipsArchVariant(kMips32r6)) {
lwc1(fd, rs);
@@ -1280,7 +1243,7 @@ void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
}
}
-void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
+void TurboAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
if (IsMipsArchVariant(kMips32r6)) {
swc1(fd, rs);
@@ -1292,7 +1255,7 @@ void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
}
}
-void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
+void TurboAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
DCHECK(!scratch.is(at));
if (IsMipsArchVariant(kMips32r6)) {
@@ -1307,7 +1270,7 @@ void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
}
}
-void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
+void TurboAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
DCHECK(!scratch.is(at));
if (IsMipsArchVariant(kMips32r6)) {
@@ -1322,109 +1285,90 @@ void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
}
}
-void MacroAssembler::Ldc1(FPURegister fd, const MemOperand& src) {
+void TurboAssembler::Ldc1(FPURegister fd, const MemOperand& src) {
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
// load to two 32-bit loads.
+ DCHECK(Register::kMantissaOffset <= 4 && Register::kExponentOffset <= 4);
+ MemOperand tmp = src;
+ AdjustBaseAndOffset(tmp, OffsetAccessType::TWO_ACCESSES);
+ lwc1(fd, MemOperand(tmp.rm(), tmp.offset() + Register::kMantissaOffset));
if (IsFp32Mode()) { // fp32 mode.
- if (is_int16(src.offset()) && is_int16(src.offset() + kIntSize)) {
- lwc1(fd, MemOperand(src.rm(), src.offset() + Register::kMantissaOffset));
- FPURegister nextfpreg;
- nextfpreg.setcode(fd.code() + 1);
- lwc1(nextfpreg,
- MemOperand(src.rm(), src.offset() + Register::kExponentOffset));
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
- lwc1(fd, MemOperand(at, off16 + Register::kMantissaOffset));
- FPURegister nextfpreg;
- nextfpreg.setcode(fd.code() + 1);
- lwc1(nextfpreg, MemOperand(at, off16 + Register::kExponentOffset));
- }
+ FPURegister nextfpreg;
+ nextfpreg.setcode(fd.code() + 1);
+ lwc1(nextfpreg,
+ MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset));
} else {
DCHECK(IsFp64Mode() || IsFpxxMode());
// Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
- if (is_int16(src.offset()) && is_int16(src.offset() + kIntSize)) {
- lwc1(fd, MemOperand(src.rm(), src.offset() + Register::kMantissaOffset));
- lw(at, MemOperand(src.rm(), src.offset() + Register::kExponentOffset));
- mthc1(at, fd);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
- lwc1(fd, MemOperand(at, off16 + Register::kMantissaOffset));
- lw(at, MemOperand(at, off16 + Register::kExponentOffset));
- mthc1(at, fd);
- }
+ DCHECK(!src.rm().is(at));
+ lw(at, MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset));
+ Mthc1(at, fd);
}
}
-void MacroAssembler::Sdc1(FPURegister fd, const MemOperand& src) {
+void TurboAssembler::Sdc1(FPURegister fd, const MemOperand& src) {
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
// store to two 32-bit stores.
- DCHECK(!src.rm().is(at));
- DCHECK(!src.rm().is(t8));
+ DCHECK(Register::kMantissaOffset <= 4 && Register::kExponentOffset <= 4);
+ MemOperand tmp = src;
+ AdjustBaseAndOffset(tmp, OffsetAccessType::TWO_ACCESSES);
+ swc1(fd, MemOperand(tmp.rm(), tmp.offset() + Register::kMantissaOffset));
if (IsFp32Mode()) { // fp32 mode.
- if (is_int16(src.offset()) && is_int16(src.offset() + kIntSize)) {
- swc1(fd, MemOperand(src.rm(), src.offset() + Register::kMantissaOffset));
- FPURegister nextfpreg;
- nextfpreg.setcode(fd.code() + 1);
- swc1(nextfpreg,
- MemOperand(src.rm(), src.offset() + Register::kExponentOffset));
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
- swc1(fd, MemOperand(at, off16 + Register::kMantissaOffset));
- FPURegister nextfpreg;
- nextfpreg.setcode(fd.code() + 1);
- swc1(nextfpreg, MemOperand(at, off16 + Register::kExponentOffset));
- }
+ FPURegister nextfpreg;
+ nextfpreg.setcode(fd.code() + 1);
+ swc1(nextfpreg,
+ MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset));
} else {
DCHECK(IsFp64Mode() || IsFpxxMode());
// Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
- if (is_int16(src.offset()) && is_int16(src.offset() + kIntSize)) {
- swc1(fd, MemOperand(src.rm(), src.offset() + Register::kMantissaOffset));
- mfhc1(at, fd);
- sw(at, MemOperand(src.rm(), src.offset() + Register::kExponentOffset));
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
- swc1(fd, MemOperand(at, off16 + Register::kMantissaOffset));
- mfhc1(t8, fd);
- sw(t8, MemOperand(at, off16 + Register::kExponentOffset));
- }
+ DCHECK(!src.rm().is(t8));
+ Mfhc1(t8, fd);
+ sw(t8, MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset));
}
}
-void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
+void TurboAssembler::li(Register dst, Handle<HeapObject> value, LiFlags mode) {
li(dst, Operand(value), mode);
}
-
-void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
+void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
DCHECK(!j.is_reg());
BlockTrampolinePoolScope block_trampoline_pool(this);
- if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
+ if (!MustUseReg(j.rmode()) && mode == OPTIMIZE_SIZE) {
// Normal load of an immediate value which does not need Relocation Info.
- if (is_int16(j.imm32_)) {
- addiu(rd, zero_reg, j.imm32_);
- } else if (!(j.imm32_ & kHiMask)) {
- ori(rd, zero_reg, j.imm32_);
- } else if (!(j.imm32_ & kImm16Mask)) {
- lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
+ if (is_int16(j.immediate())) {
+ addiu(rd, zero_reg, j.immediate());
+ } else if (!(j.immediate() & kHiMask)) {
+ ori(rd, zero_reg, j.immediate());
} else {
- lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
- ori(rd, rd, (j.imm32_ & kImm16Mask));
+ lui(rd, (j.immediate() >> kLuiShift) & kImm16Mask);
+ if (j.immediate() & kImm16Mask) {
+ ori(rd, rd, (j.immediate() & kImm16Mask));
+ }
}
} else {
- if (MustUseReg(j.rmode_)) {
- RecordRelocInfo(j.rmode_, j.imm32_);
+ int32_t immediate;
+ if (j.IsHeapObjectRequest()) {
+ RequestHeapObject(j.heap_object_request());
+ immediate = 0;
+ } else {
+ immediate = j.immediate();
+ }
+
+ if (MustUseReg(j.rmode())) {
+ RecordRelocInfo(j.rmode(), immediate);
}
// We always need the same number of instructions as we may need to patch
// this code to load another value which may need 2 instructions to load.
- lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
- ori(rd, rd, (j.imm32_ & kImm16Mask));
+
+ lui(rd, (immediate >> kLuiShift) & kImm16Mask);
+ ori(rd, rd, (immediate & kImm16Mask));
}
}
-
-void MacroAssembler::MultiPush(RegList regs) {
+void TurboAssembler::MultiPush(RegList regs) {
int16_t num_to_push = NumberOfBitsSet(regs);
int16_t stack_offset = num_to_push * kPointerSize;
@@ -1451,8 +1395,7 @@ void MacroAssembler::MultiPushReversed(RegList regs) {
}
}
-
-void MacroAssembler::MultiPop(RegList regs) {
+void TurboAssembler::MultiPop(RegList regs) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
@@ -1477,8 +1420,7 @@ void MacroAssembler::MultiPopReversed(RegList regs) {
addiu(sp, sp, stack_offset);
}
-
-void MacroAssembler::MultiPushFPU(RegList regs) {
+void TurboAssembler::MultiPushFPU(RegList regs) {
int16_t num_to_push = NumberOfBitsSet(regs);
int16_t stack_offset = num_to_push * kDoubleSize;
@@ -1505,8 +1447,7 @@ void MacroAssembler::MultiPushReversedFPU(RegList regs) {
}
}
-
-void MacroAssembler::MultiPopFPU(RegList regs) {
+void TurboAssembler::MultiPopFPU(RegList regs) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
@@ -1531,160 +1472,127 @@ void MacroAssembler::MultiPopReversedFPU(RegList regs) {
addiu(sp, sp, stack_offset);
}
-void MacroAssembler::AddPair(Register dst_low, Register dst_high,
+void TurboAssembler::AddPair(Register dst_low, Register dst_high,
Register left_low, Register left_high,
Register right_low, Register right_high) {
- Label no_overflow;
Register kScratchReg = s3;
- Register kScratchReg2 = s4;
- // Add lower word
- Addu(dst_low, left_low, right_low);
+ if (left_low.is(right_low)) {
+ // Special case for left = right and the sum potentially overwriting both
+ // left and right.
+ Slt(kScratchReg, left_low, zero_reg);
+ Addu(dst_low, left_low, right_low);
+ } else {
+ Addu(dst_low, left_low, right_low);
+ // If the sum overwrites right, left remains unchanged, otherwise right
+ // remains unchanged.
+ Sltu(kScratchReg, dst_low, (dst_low.is(right_low)) ? left_low : right_low);
+ }
Addu(dst_high, left_high, right_high);
- // Check for lower word unsigned overflow
- Sltu(kScratchReg, dst_low, left_low);
- Sltu(kScratchReg2, dst_low, right_low);
- Or(kScratchReg, kScratchReg2, kScratchReg);
- Branch(&no_overflow, eq, kScratchReg, Operand(zero_reg));
- // Increment higher word if there was overflow
- Addu(dst_high, dst_high, 0x1);
- bind(&no_overflow);
+ Addu(dst_high, dst_high, kScratchReg);
}
-void MacroAssembler::SubPair(Register dst_low, Register dst_high,
+void TurboAssembler::SubPair(Register dst_low, Register dst_high,
Register left_low, Register left_high,
Register right_low, Register right_high) {
- Label no_overflow;
Register kScratchReg = s3;
- // Subtract lower word
+ Sltu(kScratchReg, left_low, right_low);
Subu(dst_low, left_low, right_low);
Subu(dst_high, left_high, right_high);
- // Check for lower word unsigned underflow
- Sltu(kScratchReg, left_low, right_low);
- Branch(&no_overflow, eq, kScratchReg, Operand(zero_reg));
- // Decrement higher word if there was underflow
- Subu(dst_high, dst_high, 0x1);
- bind(&no_overflow);
+ Subu(dst_high, dst_high, kScratchReg);
}
-void MacroAssembler::ShlPair(Register dst_low, Register dst_high,
+void TurboAssembler::ShlPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register shift) {
- Label less_than_32;
- Label zero_shift;
- Label word_shift;
Label done;
Register kScratchReg = s3;
+ Register kScratchReg2 = s4;
And(shift, shift, 0x3F);
- li(kScratchReg, 0x20);
- Branch(&less_than_32, lt, shift, Operand(kScratchReg));
-
- Branch(&word_shift, eq, shift, Operand(kScratchReg));
- // Shift more than 32
- Subu(kScratchReg, shift, kScratchReg);
- mov(dst_low, zero_reg);
- sllv(dst_high, src_low, kScratchReg);
- Branch(&done);
- // Word shift
- bind(&word_shift);
- mov(dst_low, zero_reg);
- mov(dst_high, src_low);
- Branch(&done);
-
- bind(&less_than_32);
- // Check if zero shift
- Branch(&zero_shift, eq, shift, Operand(zero_reg));
- // Shift less than 32
- Subu(kScratchReg, kScratchReg, shift);
- sllv(dst_high, src_high, shift);
sllv(dst_low, src_low, shift);
- srlv(kScratchReg, src_low, kScratchReg);
+ Nor(kScratchReg2, zero_reg, shift);
+ srl(kScratchReg, src_low, 1);
+ srlv(kScratchReg, kScratchReg, kScratchReg2);
+ sllv(dst_high, src_high, shift);
Or(dst_high, dst_high, kScratchReg);
- Branch(&done);
- // Zero shift
- bind(&zero_shift);
- mov(dst_low, src_low);
- mov(dst_high, src_high);
+ And(kScratchReg, shift, 32);
+ if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
+ Branch(&done, eq, kScratchReg, Operand(zero_reg));
+ mov(dst_high, dst_low);
+ mov(dst_low, zero_reg);
+ } else {
+ movn(dst_high, dst_low, kScratchReg);
+ movn(dst_low, zero_reg, kScratchReg);
+ }
bind(&done);
}
-void MacroAssembler::ShlPair(Register dst_low, Register dst_high,
+void TurboAssembler::ShlPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
uint32_t shift) {
Register kScratchReg = s3;
shift = shift & 0x3F;
- if (shift < 32) {
- if (shift == 0) {
- mov(dst_low, src_low);
- mov(dst_high, src_high);
+ if (shift == 0) {
+ mov(dst_low, src_low);
+ mov(dst_high, src_high);
+ } else if (shift < 32) {
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ srl(dst_high, src_low, 32 - shift);
+ Ins(dst_high, src_high, shift, 32 - shift);
+ sll(dst_low, src_low, shift);
} else {
sll(dst_high, src_high, shift);
sll(dst_low, src_low, shift);
- shift = 32 - shift;
- srl(kScratchReg, src_low, shift);
+ srl(kScratchReg, src_low, 32 - shift);
Or(dst_high, dst_high, kScratchReg);
}
+ } else if (shift == 32) {
+ mov(dst_low, zero_reg);
+ mov(dst_high, src_low);
} else {
- if (shift == 32) {
- mov(dst_low, zero_reg);
- mov(dst_high, src_low);
- } else {
- shift = shift - 32;
- mov(dst_low, zero_reg);
- sll(dst_high, src_low, shift);
- }
+ shift = shift - 32;
+ mov(dst_low, zero_reg);
+ sll(dst_high, src_low, shift);
}
}
-void MacroAssembler::ShrPair(Register dst_low, Register dst_high,
+void TurboAssembler::ShrPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register shift) {
- Label less_than_32;
- Label zero_shift;
- Label word_shift;
Label done;
Register kScratchReg = s3;
+ Register kScratchReg2 = s4;
And(shift, shift, 0x3F);
- li(kScratchReg, 0x20);
- Branch(&less_than_32, lt, shift, Operand(kScratchReg));
-
- Branch(&word_shift, eq, shift, Operand(kScratchReg));
- // Shift more than 32
- Subu(kScratchReg, shift, kScratchReg);
- mov(dst_high, zero_reg);
- srlv(dst_low, src_high, kScratchReg);
- Branch(&done);
- // Word shift
- bind(&word_shift);
- mov(dst_high, zero_reg);
- mov(dst_low, src_high);
- Branch(&done);
-
- bind(&less_than_32);
- // Check if zero shift
- Branch(&zero_shift, eq, shift, Operand(zero_reg));
- // Shift less than 32
- Subu(kScratchReg, kScratchReg, shift);
srlv(dst_high, src_high, shift);
+ Nor(kScratchReg2, zero_reg, shift);
+ sll(kScratchReg, src_high, 1);
+ sllv(kScratchReg, kScratchReg, kScratchReg2);
srlv(dst_low, src_low, shift);
- sllv(kScratchReg, src_high, kScratchReg);
Or(dst_low, dst_low, kScratchReg);
- Branch(&done);
- // Zero shift
- bind(&zero_shift);
- mov(dst_low, src_low);
- mov(dst_high, src_high);
+ And(kScratchReg, shift, 32);
+ if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
+ Branch(&done, eq, kScratchReg, Operand(zero_reg));
+ mov(dst_low, dst_high);
+ mov(dst_high, zero_reg);
+ } else {
+ movn(dst_low, dst_high, kScratchReg);
+ movn(dst_high, zero_reg, kScratchReg);
+ }
bind(&done);
}
-void MacroAssembler::ShrPair(Register dst_low, Register dst_high,
+void TurboAssembler::ShrPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
uint32_t shift) {
Register kScratchReg = s3;
shift = shift & 0x3F;
- if (shift < 32) {
- if (shift == 0) {
- mov(dst_low, src_low);
- mov(dst_high, src_high);
+ if (shift == 0) {
+ mov(dst_low, src_low);
+ mov(dst_high, src_high);
+ } else if (shift < 32) {
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ srl(dst_low, src_low, shift);
+ Ins(dst_low, src_high, 32 - shift, shift);
+ srl(dst_high, src_high, shift);
} else {
srl(dst_high, src_high, shift);
srl(dst_low, src_low, shift);
@@ -1692,73 +1600,49 @@ void MacroAssembler::ShrPair(Register dst_low, Register dst_high,
sll(kScratchReg, src_high, shift);
Or(dst_low, dst_low, kScratchReg);
}
+ } else if (shift == 32) {
+ mov(dst_high, zero_reg);
+ mov(dst_low, src_high);
} else {
- if (shift == 32) {
- mov(dst_high, zero_reg);
- mov(dst_low, src_high);
- } else {
- shift = shift - 32;
- mov(dst_high, zero_reg);
- srl(dst_low, src_high, shift);
- }
+ shift = shift - 32;
+ mov(dst_high, zero_reg);
+ srl(dst_low, src_high, shift);
}
}
-void MacroAssembler::SarPair(Register dst_low, Register dst_high,
+void TurboAssembler::SarPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register shift) {
- Label less_than_32;
- Label zero_shift;
- Label word_shift;
Label done;
Register kScratchReg = s3;
Register kScratchReg2 = s4;
And(shift, shift, 0x3F);
- li(kScratchReg, 0x20);
- Branch(&less_than_32, lt, shift, Operand(kScratchReg));
-
- Branch(&word_shift, eq, shift, Operand(kScratchReg));
-
- // Shift more than 32
- li(kScratchReg2, 0x1F);
- Subu(kScratchReg, shift, kScratchReg);
- srav(dst_high, src_high, kScratchReg2);
- srav(dst_low, src_high, kScratchReg);
- Branch(&done);
- // Word shift
- bind(&word_shift);
- li(kScratchReg2, 0x1F);
- srav(dst_high, src_high, kScratchReg2);
- mov(dst_low, src_high);
- Branch(&done);
-
- bind(&less_than_32);
- // Check if zero shift
- Branch(&zero_shift, eq, shift, Operand(zero_reg));
-
- // Shift less than 32
- Subu(kScratchReg, kScratchReg, shift);
srav(dst_high, src_high, shift);
+ Nor(kScratchReg2, zero_reg, shift);
+ sll(kScratchReg, src_high, 1);
+ sllv(kScratchReg, kScratchReg, kScratchReg2);
srlv(dst_low, src_low, shift);
- sllv(kScratchReg, src_high, kScratchReg);
Or(dst_low, dst_low, kScratchReg);
- Branch(&done);
- // Zero shift
- bind(&zero_shift);
- mov(dst_low, src_low);
- mov(dst_high, src_high);
+ And(kScratchReg, shift, 32);
+ Branch(&done, eq, kScratchReg, Operand(zero_reg));
+ mov(dst_low, dst_high);
+ sra(dst_high, dst_high, 31);
bind(&done);
}
-void MacroAssembler::SarPair(Register dst_low, Register dst_high,
+void TurboAssembler::SarPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
uint32_t shift) {
Register kScratchReg = s3;
shift = shift & 0x3F;
- if (shift < 32) {
- if (shift == 0) {
- mov(dst_low, src_low);
- mov(dst_high, src_high);
+ if (shift == 0) {
+ mov(dst_low, src_low);
+ mov(dst_high, src_high);
+ } else if (shift < 32) {
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ srl(dst_low, src_low, shift);
+ Ins(dst_low, src_high, 32 - shift, shift);
+ sra(dst_high, src_high, shift);
} else {
sra(dst_high, src_high, shift);
srl(dst_low, src_low, shift);
@@ -1766,21 +1650,17 @@ void MacroAssembler::SarPair(Register dst_low, Register dst_high,
sll(kScratchReg, src_high, shift);
Or(dst_low, dst_low, kScratchReg);
}
+ } else if (shift == 32) {
+ sra(dst_high, src_high, 31);
+ mov(dst_low, src_high);
} else {
- if (shift == 32) {
- sra(dst_high, src_high, 31);
- mov(dst_low, src_high);
- } else {
- shift = shift - 32;
- sra(dst_high, src_high, 31);
- sra(dst_low, src_high, shift);
- }
+ shift = shift - 32;
+ sra(dst_high, src_high, 31);
+ sra(dst_low, src_high, shift);
}
}
-void MacroAssembler::Ext(Register rt,
- Register rs,
- uint16_t pos,
+void TurboAssembler::Ext(Register rt, Register rs, uint16_t pos,
uint16_t size) {
DCHECK(pos < 32);
DCHECK(pos + size < 33);
@@ -1800,10 +1680,7 @@ void MacroAssembler::Ext(Register rt,
}
}
-
-void MacroAssembler::Ins(Register rt,
- Register rs,
- uint16_t pos,
+void TurboAssembler::Ins(Register rt, Register rs, uint16_t pos,
uint16_t size) {
DCHECK(pos < 32);
DCHECK(pos + size <= 32);
@@ -1824,7 +1701,7 @@ void MacroAssembler::Ins(Register rt,
}
}
-void MacroAssembler::Seb(Register rd, Register rt) {
+void TurboAssembler::Seb(Register rd, Register rt) {
if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
seb(rd, rt);
} else {
@@ -1834,7 +1711,7 @@ void MacroAssembler::Seb(Register rd, Register rt) {
}
}
-void MacroAssembler::Seh(Register rd, Register rt) {
+void TurboAssembler::Seh(Register rd, Register rt) {
if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
seh(rd, rt);
} else {
@@ -1844,7 +1721,7 @@ void MacroAssembler::Seh(Register rd, Register rt) {
}
}
-void MacroAssembler::Neg_s(FPURegister fd, FPURegister fs) {
+void TurboAssembler::Neg_s(FPURegister fd, FPURegister fs) {
if (IsMipsArchVariant(kMips32r6)) {
// r6 neg_s changes the sign for NaN-like operands as well.
neg_s(fd, fs);
@@ -1861,16 +1738,14 @@ void MacroAssembler::Neg_s(FPURegister fd, FPURegister fs) {
neg_s(fd, fs); // In delay slot.
bind(&is_nan);
mfc1(scratch1, fs);
- And(scratch2, scratch1, Operand(~kBinary32SignMask));
- And(scratch1, scratch1, Operand(kBinary32SignMask));
- Xor(scratch1, scratch1, Operand(kBinary32SignMask));
- Or(scratch2, scratch2, scratch1);
- mtc1(scratch2, fd);
+ li(scratch2, kBinary32SignMask);
+ Xor(scratch1, scratch1, scratch2);
+ mtc1(scratch1, fd);
bind(&done);
}
}
-void MacroAssembler::Neg_d(FPURegister fd, FPURegister fs) {
+void TurboAssembler::Neg_d(FPURegister fd, FPURegister fs) {
if (IsMipsArchVariant(kMips32r6)) {
// r6 neg_d changes the sign for NaN-like operands as well.
neg_d(fd, fs);
@@ -1887,16 +1762,14 @@ void MacroAssembler::Neg_d(FPURegister fd, FPURegister fs) {
neg_d(fd, fs); // In delay slot.
bind(&is_nan);
Mfhc1(scratch1, fs);
- And(scratch2, scratch1, Operand(~HeapNumber::kSignMask));
- And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- Xor(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- Or(scratch2, scratch2, scratch1);
- Mthc1(scratch2, fd);
+ li(scratch2, HeapNumber::kSignMask);
+ Xor(scratch1, scratch1, scratch2);
+ Mthc1(scratch1, fd);
bind(&done);
}
}
-void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs,
+void TurboAssembler::Cvt_d_uw(FPURegister fd, Register rs,
FPURegister scratch) {
// In FP64Mode we do convertion from long.
if (IsFp64Mode()) {
@@ -1932,15 +1805,13 @@ void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs,
}
}
-
-void MacroAssembler::Trunc_uw_d(FPURegister fd,
- FPURegister fs,
+void TurboAssembler::Trunc_uw_d(FPURegister fd, FPURegister fs,
FPURegister scratch) {
Trunc_uw_d(fs, t8, scratch);
mtc1(t8, fd);
}
-void MacroAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs,
+void TurboAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs,
FPURegister scratch) {
Trunc_uw_s(fs, t8, scratch);
mtc1(t8, fd);
@@ -1989,9 +1860,7 @@ void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
}
}
-
-void MacroAssembler::Trunc_uw_d(FPURegister fd,
- Register rs,
+void TurboAssembler::Trunc_uw_d(FPURegister fd, Register rs,
FPURegister scratch) {
DCHECK(!fd.is(scratch));
DCHECK(!rs.is(at));
@@ -2022,7 +1891,7 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
bind(&done);
}
-void MacroAssembler::Trunc_uw_s(FPURegister fd, Register rs,
+void TurboAssembler::Trunc_uw_s(FPURegister fd, Register rs,
FPURegister scratch) {
DCHECK(!fd.is(scratch));
DCHECK(!rs.is(at));
@@ -2052,7 +1921,7 @@ void MacroAssembler::Trunc_uw_s(FPURegister fd, Register rs,
bind(&done);
}
-void MacroAssembler::Mthc1(Register rt, FPURegister fs) {
+void TurboAssembler::Mthc1(Register rt, FPURegister fs) {
if (IsFp32Mode()) {
mtc1(rt, fs.high());
} else {
@@ -2062,8 +1931,7 @@ void MacroAssembler::Mthc1(Register rt, FPURegister fs) {
}
}
-
-void MacroAssembler::Mfhc1(Register rt, FPURegister fs) {
+void TurboAssembler::Mfhc1(Register rt, FPURegister fs) {
if (IsFp32Mode()) {
mfc1(rt, fs.high());
} else {
@@ -2073,7 +1941,7 @@ void MacroAssembler::Mfhc1(Register rt, FPURegister fs) {
}
}
-void MacroAssembler::Madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
+void TurboAssembler::Madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
if (IsMipsArchVariant(kMips32r2)) {
madd_s(fd, fr, fs, ft);
@@ -2084,7 +1952,7 @@ void MacroAssembler::Madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
}
}
-void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
+void TurboAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
if (IsMipsArchVariant(kMips32r2)) {
madd_d(fd, fr, fs, ft);
@@ -2095,7 +1963,7 @@ void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
}
}
-void MacroAssembler::Msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
+void TurboAssembler::Msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
if (IsMipsArchVariant(kMips32r2)) {
msub_s(fd, fr, fs, ft);
@@ -2106,7 +1974,7 @@ void MacroAssembler::Msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
}
}
-void MacroAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
+void TurboAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
if (IsMipsArchVariant(kMips32r2)) {
msub_d(fd, fr, fs, ft);
@@ -2117,7 +1985,7 @@ void MacroAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
}
}
-void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
+void TurboAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
Label* nan, Condition cond, FPURegister cmp1,
FPURegister cmp2, BranchDelaySlot bd) {
{
@@ -2187,7 +2055,7 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
}
}
-void MacroAssembler::BranchShortF(SecondaryField sizeField, Label* target,
+void TurboAssembler::BranchShortF(SecondaryField sizeField, Label* target,
Condition cc, FPURegister cmp1,
FPURegister cmp2, BranchDelaySlot bd) {
if (!IsMipsArchVariant(kMips32r6)) {
@@ -2316,8 +2184,84 @@ void MacroAssembler::BranchShortF(SecondaryField sizeField, Label* target,
}
}
+void TurboAssembler::BranchMSA(Label* target, MSABranchDF df,
+ MSABranchCondition cond, MSARegister wt,
+ BranchDelaySlot bd) {
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+
+ if (target) {
+ bool long_branch =
+ target->is_bound() ? !is_near(target) : is_trampoline_emitted();
+ if (long_branch) {
+ Label skip;
+ MSABranchCondition neg_cond = NegateMSABranchCondition(cond);
+ BranchShortMSA(df, &skip, neg_cond, wt, bd);
+ BranchLong(target, bd);
+ bind(&skip);
+ } else {
+ BranchShortMSA(df, target, cond, wt, bd);
+ }
+ }
+ }
+}
+
+void TurboAssembler::BranchShortMSA(MSABranchDF df, Label* target,
+ MSABranchCondition cond, MSARegister wt,
+ BranchDelaySlot bd) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (target) {
+ switch (cond) {
+ case all_not_zero:
+ switch (df) {
+ case MSA_BRANCH_D:
+ bnz_d(wt, target);
+ break;
+ case MSA_BRANCH_W:
+ bnz_w(wt, target);
+ break;
+ case MSA_BRANCH_H:
+ bnz_h(wt, target);
+ break;
+ case MSA_BRANCH_B:
+ default:
+ bnz_b(wt, target);
+ }
+ break;
+ case one_elem_not_zero:
+ bnz_v(wt, target);
+ break;
+ case one_elem_zero:
+ switch (df) {
+ case MSA_BRANCH_D:
+ bz_d(wt, target);
+ break;
+ case MSA_BRANCH_W:
+ bz_w(wt, target);
+ break;
+ case MSA_BRANCH_H:
+ bz_h(wt, target);
+ break;
+ case MSA_BRANCH_B:
+ default:
+ bz_b(wt, target);
+ }
+ break;
+ case all_zero:
+ bz_v(wt, target);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ }
+ if (bd == PROTECT) {
+ nop();
+ }
+}
-void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) {
+void TurboAssembler::FmoveLow(FPURegister dst, Register src_low) {
if (IsFp32Mode()) {
mtc1(src_low, dst);
} else {
@@ -2330,14 +2274,12 @@ void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) {
}
}
-
-void MacroAssembler::Move(FPURegister dst, float imm) {
+void TurboAssembler::Move(FPURegister dst, float imm) {
li(at, Operand(bit_cast<int32_t>(imm)));
mtc1(at, dst);
}
-
-void MacroAssembler::Move(FPURegister dst, double imm) {
+void TurboAssembler::Move(FPURegister dst, double imm) {
int64_t imm_bits = bit_cast<int64_t>(imm);
// Handle special values first.
if (imm_bits == bit_cast<int64_t>(0.0) && has_double_zero_reg_set_) {
@@ -2367,8 +2309,7 @@ void MacroAssembler::Move(FPURegister dst, double imm) {
}
}
-
-void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
+void TurboAssembler::Movz(Register rd, Register rs, Register rt) {
if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
Label done;
Branch(&done, ne, rt, Operand(zero_reg));
@@ -2379,8 +2320,7 @@ void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
}
}
-
-void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
+void TurboAssembler::Movn(Register rd, Register rs, Register rt) {
if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
Label done;
Branch(&done, eq, rt, Operand(zero_reg));
@@ -2391,8 +2331,7 @@ void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
}
}
-
-void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
+void TurboAssembler::Movt(Register rd, Register rs, uint16_t cc) {
if (IsMipsArchVariant(kLoongson)) {
// Tests an FP condition code and then conditionally move rs to rd.
// We do not currently use any FPU cc bit other than bit 0.
@@ -2417,8 +2356,7 @@ void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
}
}
-
-void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
+void TurboAssembler::Movf(Register rd, Register rs, uint16_t cc) {
if (IsMipsArchVariant(kLoongson)) {
// Tests an FP condition code and then conditionally move rs to rd.
// We do not currently use any FPU cc bit other than bit 0.
@@ -2443,7 +2381,7 @@ void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
}
}
-void MacroAssembler::Clz(Register rd, Register rs) {
+void TurboAssembler::Clz(Register rd, Register rs) {
if (IsMipsArchVariant(kLoongson)) {
DCHECK(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
Register mask = t8;
@@ -2528,8 +2466,7 @@ void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
bind(&done);
}
-
-void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
+void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
DoubleRegister double_input,
Label* done) {
DoubleRegister single_scratch = kLithiumScratchDouble.low();
@@ -2553,9 +2490,8 @@ void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
Branch(done, eq, scratch, Operand(zero_reg));
}
-
-void MacroAssembler::TruncateDoubleToI(Register result,
- DoubleRegister double_input) {
+void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
+ DoubleRegister double_input) {
Label done;
TryInlineTruncateDoubleToI(result, double_input, &done);
@@ -2565,8 +2501,7 @@ void MacroAssembler::TruncateDoubleToI(Register result,
Subu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
Sdc1(double_input, MemOperand(sp, 0));
- DoubleToIStub stub(isolate(), sp, result, 0, true, true);
- CallStub(&stub);
+ CallStubDelayed(new (zone) DoubleToIStub(nullptr, sp, result, 0, true, true));
Addu(sp, sp, Operand(kDoubleSize));
pop(ra);
@@ -2636,22 +2571,19 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst,
(cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
(cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
-
-void MacroAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) {
+void TurboAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) {
DCHECK(IsMipsArchVariant(kMips32r6) ? is_int26(offset) : is_int16(offset));
BranchShort(offset, bdslot);
}
-
-void MacroAssembler::Branch(int32_t offset, Condition cond, Register rs,
+void TurboAssembler::Branch(int32_t offset, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bdslot) {
bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
DCHECK(is_near);
USE(is_near);
}
-
-void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
+void TurboAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
if (L->is_bound()) {
if (is_near_branch(L)) {
BranchShort(L, bdslot);
@@ -2667,10 +2599,8 @@ void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
}
}
-
-void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
+void TurboAssembler::Branch(Label* L, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot) {
if (L->is_bound()) {
if (!BranchShortCheck(0, L, cond, rs, rt, bdslot)) {
if (cond != cc_always) {
@@ -2700,18 +2630,13 @@ void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
}
}
-
-void MacroAssembler::Branch(Label* L,
- Condition cond,
- Register rs,
- Heap::RootListIndex index,
- BranchDelaySlot bdslot) {
+void TurboAssembler::Branch(Label* L, Condition cond, Register rs,
+ Heap::RootListIndex index, BranchDelaySlot bdslot) {
LoadRoot(at, index);
Branch(L, cond, rs, Operand(at), bdslot);
}
-
-void MacroAssembler::BranchShortHelper(int16_t offset, Label* L,
+void TurboAssembler::BranchShortHelper(int16_t offset, Label* L,
BranchDelaySlot bdslot) {
DCHECK(L == nullptr || offset == 0);
offset = GetOffset(offset, L, OffsetSize::kOffset16);
@@ -2722,15 +2647,13 @@ void MacroAssembler::BranchShortHelper(int16_t offset, Label* L,
nop();
}
-
-void MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L) {
+void TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L) {
DCHECK(L == nullptr || offset == 0);
offset = GetOffset(offset, L, OffsetSize::kOffset26);
bc(offset);
}
-
-void MacroAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) {
+void TurboAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) {
if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
DCHECK(is_int26(offset));
BranchShortHelperR6(offset, nullptr);
@@ -2740,8 +2663,7 @@ void MacroAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) {
}
}
-
-void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
+void TurboAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
BranchShortHelperR6(0, L);
} else {
@@ -2758,8 +2680,7 @@ static inline bool IsZero(const Operand& rt) {
}
}
-
-int32_t MacroAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
+int32_t TurboAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
if (L) {
offset = branch_offset_helper(L, bits) >> 2;
} else {
@@ -2768,12 +2689,11 @@ int32_t MacroAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
return offset;
}
-
-Register MacroAssembler::GetRtAsRegisterHelper(const Operand& rt,
+Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt,
Register scratch) {
Register r2 = no_reg;
if (rt.is_reg()) {
- r2 = rt.rm_;
+ r2 = rt.rm();
} else {
r2 = scratch;
li(r2, rt);
@@ -2782,8 +2702,7 @@ Register MacroAssembler::GetRtAsRegisterHelper(const Operand& rt,
return r2;
}
-
-bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
+bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
Condition cond, Register rs,
const Operand& rt) {
DCHECK(L == nullptr || offset == 0);
@@ -2803,7 +2722,7 @@ bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
bc(offset);
break;
case eq:
- if (rs.code() == rt.rm_.reg_code) {
+ if (rs.code() == rt.rm().reg_code) {
// Pre R6 beq is used here to make the code patchable. Otherwise bc
// should be used which has no condition field so is not patchable.
bits = OffsetSize::kOffset16;
@@ -2827,7 +2746,7 @@ bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
}
break;
case ne:
- if (rs.code() == rt.rm_.reg_code) {
+ if (rs.code() == rt.rm().reg_code) {
// Pre R6 bne is used here to make the code patchable. Otherwise we
// should not generate any instruction.
bits = OffsetSize::kOffset16;
@@ -2854,7 +2773,7 @@ bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
// Signed comparison.
case greater:
// rs > rt
- if (rs.code() == rt.rm_.reg_code) {
+ if (rs.code() == rt.rm().reg_code) {
break; // No code needs to be emitted.
} else if (rs.is(zero_reg)) {
bits = OffsetSize::kOffset16;
@@ -2878,7 +2797,7 @@ bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
break;
case greater_equal:
// rs >= rt
- if (rs.code() == rt.rm_.reg_code) {
+ if (rs.code() == rt.rm().reg_code) {
bits = OffsetSize::kOffset26;
if (!is_near(L, bits)) return false;
offset = GetOffset(offset, L, bits);
@@ -2905,7 +2824,7 @@ bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
break;
case less:
// rs < rt
- if (rs.code() == rt.rm_.reg_code) {
+ if (rs.code() == rt.rm().reg_code) {
break; // No code needs to be emitted.
} else if (rs.is(zero_reg)) {
bits = OffsetSize::kOffset16;
@@ -2929,7 +2848,7 @@ bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
break;
case less_equal:
// rs <= rt
- if (rs.code() == rt.rm_.reg_code) {
+ if (rs.code() == rt.rm().reg_code) {
bits = OffsetSize::kOffset26;
if (!is_near(L, bits)) return false;
offset = GetOffset(offset, L, bits);
@@ -2958,7 +2877,7 @@ bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
// Unsigned comparison.
case Ugreater:
// rs > rt
- if (rs.code() == rt.rm_.reg_code) {
+ if (rs.code() == rt.rm().reg_code) {
break; // No code needs to be emitted.
} else if (rs.is(zero_reg)) {
bits = OffsetSize::kOffset21;
@@ -2982,7 +2901,7 @@ bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
break;
case Ugreater_equal:
// rs >= rt
- if (rs.code() == rt.rm_.reg_code) {
+ if (rs.code() == rt.rm().reg_code) {
bits = OffsetSize::kOffset26;
if (!is_near(L, bits)) return false;
offset = GetOffset(offset, L, bits);
@@ -3009,7 +2928,7 @@ bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
break;
case Uless:
// rs < rt
- if (rs.code() == rt.rm_.reg_code) {
+ if (rs.code() == rt.rm().reg_code) {
break; // No code needs to be emitted.
} else if (rs.is(zero_reg)) {
bits = OffsetSize::kOffset21;
@@ -3030,7 +2949,7 @@ bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
break;
case Uless_equal:
// rs <= rt
- if (rs.code() == rt.rm_.reg_code) {
+ if (rs.code() == rt.rm().reg_code) {
bits = OffsetSize::kOffset26;
if (!is_near(L, bits)) return false;
offset = GetOffset(offset, L, bits);
@@ -3063,8 +2982,7 @@ bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
return true;
}
-
-bool MacroAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
+bool TurboAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
Register rs, const Operand& rt,
BranchDelaySlot bdslot) {
DCHECK(L == nullptr || offset == 0);
@@ -3199,8 +3117,7 @@ bool MacroAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
return true;
}
-
-bool MacroAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
+bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
Register rs, const Operand& rt,
BranchDelaySlot bdslot) {
BRANCH_ARGS_CHECK(cond, rs, rt);
@@ -3223,33 +3140,28 @@ bool MacroAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
return false;
}
-
-void MacroAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
+void TurboAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bdslot) {
BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
}
-
-void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
+void TurboAssembler::BranchShort(Label* L, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bdslot) {
BranchShortCheck(0, L, cond, rs, rt, bdslot);
}
-
-void MacroAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) {
+void TurboAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) {
BranchAndLinkShort(offset, bdslot);
}
-
-void MacroAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs,
+void TurboAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bdslot) {
bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt, bdslot);
DCHECK(is_near);
USE(is_near);
}
-
-void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
+void TurboAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
if (L->is_bound()) {
if (is_near_branch(L)) {
BranchAndLinkShort(L, bdslot);
@@ -3265,10 +3177,8 @@ void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
}
}
-
-void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
+void TurboAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot) {
if (L->is_bound()) {
if (!BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot)) {
Label skip;
@@ -3290,8 +3200,7 @@ void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
}
}
-
-void MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
+void TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
BranchDelaySlot bdslot) {
DCHECK(L == nullptr || offset == 0);
offset = GetOffset(offset, L, OffsetSize::kOffset16);
@@ -3302,15 +3211,13 @@ void MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
nop();
}
-
-void MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) {
+void TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) {
DCHECK(L == nullptr || offset == 0);
offset = GetOffset(offset, L, OffsetSize::kOffset26);
balc(offset);
}
-
-void MacroAssembler::BranchAndLinkShort(int32_t offset,
+void TurboAssembler::BranchAndLinkShort(int32_t offset,
BranchDelaySlot bdslot) {
if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
DCHECK(is_int26(offset));
@@ -3321,8 +3228,7 @@ void MacroAssembler::BranchAndLinkShort(int32_t offset,
}
}
-
-void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
+void TurboAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
BranchAndLinkShortHelperR6(0, L);
} else {
@@ -3330,8 +3236,7 @@ void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
}
}
-
-bool MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
+bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
Condition cond, Register rs,
const Operand& rt) {
DCHECK(L == nullptr || offset == 0);
@@ -3363,7 +3268,7 @@ bool MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
// Signed comparison.
case greater:
// rs > rt
- if (rs.code() == rt.rm_.reg_code) {
+ if (rs.code() == rt.rm().reg_code) {
break; // No code needs to be emitted.
} else if (rs.is(zero_reg)) {
if (!is_near(L, bits)) return false;
@@ -3383,7 +3288,7 @@ bool MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
break;
case greater_equal:
// rs >= rt
- if (rs.code() == rt.rm_.reg_code) {
+ if (rs.code() == rt.rm().reg_code) {
bits = OffsetSize::kOffset26;
if (!is_near(L, bits)) return false;
offset = GetOffset(offset, L, bits);
@@ -3406,7 +3311,7 @@ bool MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
break;
case less:
// rs < rt
- if (rs.code() == rt.rm_.reg_code) {
+ if (rs.code() == rt.rm().reg_code) {
break; // No code needs to be emitted.
} else if (rs.is(zero_reg)) {
if (!is_near(L, bits)) return false;
@@ -3426,7 +3331,7 @@ bool MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
break;
case less_equal:
// rs <= r2
- if (rs.code() == rt.rm_.reg_code) {
+ if (rs.code() == rt.rm().reg_code) {
bits = OffsetSize::kOffset26;
if (!is_near(L, bits)) return false;
offset = GetOffset(offset, L, bits);
@@ -3488,7 +3393,7 @@ bool MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
// Pre r6 we need to use a bgezal or bltzal, but they can't be used directly
// with the slt instructions. We could use sub or add instead but we would miss
// overflow cases, so we keep slt and add an intermediate third instruction.
-bool MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
+bool TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot) {
@@ -3579,8 +3484,7 @@ bool MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
return true;
}
-
-bool MacroAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
+bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot) {
@@ -3605,7 +3509,7 @@ bool MacroAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
return false;
}
-void MacroAssembler::Jump(Register target, int16_t offset, Condition cond,
+void TurboAssembler::Jump(Register target, int16_t offset, Condition cond,
Register rs, const Operand& rt, BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(is_int16(offset));
@@ -3633,7 +3537,7 @@ void MacroAssembler::Jump(Register target, int16_t offset, Condition cond,
}
}
-void MacroAssembler::Jump(Register target, Register base, int16_t offset,
+void TurboAssembler::Jump(Register target, Register base, int16_t offset,
Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
DCHECK(is_int16(offset));
@@ -3664,7 +3568,7 @@ void MacroAssembler::Jump(Register target, Register base, int16_t offset,
}
}
-void MacroAssembler::Jump(Register target, const Operand& offset,
+void TurboAssembler::Jump(Register target, const Operand& offset,
Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -3698,12 +3602,8 @@ void MacroAssembler::Jump(Register target, const Operand& offset,
}
}
-
-void MacroAssembler::Jump(intptr_t target,
- RelocInfo::Mode rmode,
- Condition cond,
- Register rs,
- const Operand& rt,
+void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
+ Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Label skip;
@@ -3726,30 +3626,20 @@ void MacroAssembler::Jump(intptr_t target,
bind(&skip);
}
-
-void MacroAssembler::Jump(Address target,
- RelocInfo::Mode rmode,
- Condition cond,
- Register rs,
- const Operand& rt,
- BranchDelaySlot bd) {
+void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
+ Register rs, const Operand& rt, BranchDelaySlot bd) {
DCHECK(!RelocInfo::IsCodeTarget(rmode));
Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
}
-
-void MacroAssembler::Jump(Handle<Code> code,
- RelocInfo::Mode rmode,
- Condition cond,
- Register rs,
- const Operand& rt,
+void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
+ Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
- AllowDeferredHandleDereference embedding_raw_address;
- Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
+ Jump(reinterpret_cast<intptr_t>(code.address()), rmode, cond, rs, rt, bd);
}
-int MacroAssembler::CallSize(Register target, int16_t offset, Condition cond,
+int TurboAssembler::CallSize(Register target, int16_t offset, Condition cond,
Register rs, const Operand& rt,
BranchDelaySlot bd) {
int size = 0;
@@ -3771,7 +3661,7 @@ int MacroAssembler::CallSize(Register target, int16_t offset, Condition cond,
// Note: To call gcc-compiled C code on mips, you must call thru t9.
-void MacroAssembler::Call(Register target, int16_t offset, Condition cond,
+void TurboAssembler::Call(Register target, int16_t offset, Condition cond,
Register rs, const Operand& rt, BranchDelaySlot bd) {
DCHECK(is_int16(offset));
#ifdef DEBUG
@@ -3811,7 +3701,7 @@ void MacroAssembler::Call(Register target, int16_t offset, Condition cond,
}
// Note: To call gcc-compiled C code on mips, you must call thru t9.
-void MacroAssembler::Call(Register target, Register base, int16_t offset,
+void TurboAssembler::Call(Register target, Register base, int16_t offset,
Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
DCHECK(is_uint16(offset));
@@ -3853,12 +3743,8 @@ void MacroAssembler::Call(Register target, Register base, int16_t offset,
#endif
}
-
-int MacroAssembler::CallSize(Address target,
- RelocInfo::Mode rmode,
- Condition cond,
- Register rs,
- const Operand& rt,
+int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode,
+ Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
int size = CallSize(t9, 0, cond, rs, rt, bd);
if (IsMipsArchVariant(kMips32r6) && bd == PROTECT && cond == cc_always)
@@ -3867,13 +3753,8 @@ int MacroAssembler::CallSize(Address target,
return size + 2 * kInstrSize;
}
-
-void MacroAssembler::Call(Address target,
- RelocInfo::Mode rmode,
- Condition cond,
- Register rs,
- const Operand& rt,
- BranchDelaySlot bd) {
+void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
+ Register rs, const Operand& rt, BranchDelaySlot bd) {
CheckBuffer();
BlockTrampolinePoolScope block_trampoline_pool(this);
Label start;
@@ -3895,51 +3776,32 @@ void MacroAssembler::Call(Address target,
SizeOfCodeGeneratedSince(&start));
}
-
-int MacroAssembler::CallSize(Handle<Code> code,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id,
- Condition cond,
- Register rs,
- const Operand& rt,
+int TurboAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
+ Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
AllowDeferredHandleDereference using_raw_address;
- return CallSize(reinterpret_cast<Address>(code.location()),
- rmode, cond, rs, rt, bd);
+ return CallSize(code.address(), rmode, cond, rs, rt, bd);
}
-
-void MacroAssembler::Call(Handle<Code> code,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id,
- Condition cond,
- Register rs,
- const Operand& rt,
+void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
+ Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Label start;
bind(&start);
DCHECK(RelocInfo::IsCodeTarget(rmode));
- if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
- SetRecordedAstId(ast_id);
- rmode = RelocInfo::CODE_TARGET_WITH_ID;
- }
AllowDeferredHandleDereference embedding_raw_address;
- Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
- DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
+ Call(code.address(), rmode, cond, rs, rt, bd);
+ DCHECK_EQ(CallSize(code, rmode, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
}
-
-void MacroAssembler::Ret(Condition cond,
- Register rs,
- const Operand& rt,
+void TurboAssembler::Ret(Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
Jump(ra, 0, cond, rs, rt, bd);
}
-
-void MacroAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
+void TurboAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT &&
(!L->is_bound() || is_near_r6(L))) {
BranchShortHelperR6(0, L);
@@ -3978,8 +3840,7 @@ void MacroAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
}
}
-
-void MacroAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
+void TurboAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT &&
(!L->is_bound() || is_near_r6(L))) {
BranchAndLinkShortHelperR6(0, L);
@@ -4018,16 +3879,13 @@ void MacroAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
}
}
-
-void MacroAssembler::DropAndRet(int drop) {
+void TurboAssembler::DropAndRet(int drop) {
DCHECK(is_int16(drop * kPointerSize));
Ret(USE_DELAY_SLOT);
addiu(sp, sp, drop * kPointerSize);
}
-void MacroAssembler::DropAndRet(int drop,
- Condition cond,
- Register r1,
+void TurboAssembler::DropAndRet(int drop, Condition cond, Register r1,
const Operand& r2) {
// Both Drop and Ret need to be conditional.
Label skip;
@@ -4043,10 +3901,7 @@ void MacroAssembler::DropAndRet(int drop,
}
}
-
-void MacroAssembler::Drop(int count,
- Condition cond,
- Register reg,
+void TurboAssembler::Drop(int count, Condition cond, Register reg,
const Operand& op) {
if (count <= 0) {
return;
@@ -4081,17 +3936,26 @@ void MacroAssembler::Swap(Register reg1,
}
}
+void TurboAssembler::Call(Label* target) { BranchAndLink(target); }
-void MacroAssembler::Call(Label* target) {
- BranchAndLink(target);
+void TurboAssembler::Push(Handle<HeapObject> handle) {
+ li(at, Operand(handle));
+ push(at);
}
-
-void MacroAssembler::Push(Handle<Object> handle) {
- li(at, Operand(handle));
+void TurboAssembler::Push(Smi* smi) {
+ li(at, Operand(smi));
push(at);
}
+void MacroAssembler::PushObject(Handle<Object> handle) {
+ if (handle->IsHeapObject()) {
+ Push(Handle<HeapObject>::cast(handle));
+ } else {
+ Push(Smi::cast(*handle));
+ }
+}
+
void MacroAssembler::MaybeDropFrames() {
// Check whether we need to drop frames to restart a function on the stack.
ExternalReference restart_fp =
@@ -4111,7 +3975,8 @@ void MacroAssembler::PushStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
// Link the current handler as the next handler.
- li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+ li(t2,
+ Operand(ExternalReference(IsolateAddressId::kHandlerAddress, isolate())));
lw(t1, MemOperand(t2));
push(t1);
@@ -4124,7 +3989,8 @@ void MacroAssembler::PopStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
pop(a1);
Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
- li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+ li(at,
+ Operand(ExternalReference(IsolateAddressId::kHandlerAddress, isolate())));
sw(a1, MemOperand(at));
}
@@ -4136,7 +4002,6 @@ void MacroAssembler::Allocate(int object_size,
Label* gc_required,
AllocationFlags flags) {
DCHECK(object_size <= kMaxRegularHeapObjectSize);
- DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -4210,10 +4075,7 @@ void MacroAssembler::Allocate(int object_size,
Addu(result_end, result, Operand(object_size));
Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
- if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
- // The top pointer is not updated for allocation folding dominators.
- sw(result_end, MemOperand(top_address));
- }
+ sw(result_end, MemOperand(top_address));
// Tag object.
Addu(result, result, Operand(kHeapObjectTag));
@@ -4223,7 +4085,6 @@ void MacroAssembler::Allocate(int object_size,
void MacroAssembler::Allocate(Register object_size, Register result,
Register result_end, Register scratch,
Label* gc_required, AllocationFlags flags) {
- DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -4305,101 +4166,9 @@ void MacroAssembler::Allocate(Register object_size, Register result,
Check(eq, kUnalignedAllocationInNewSpace, alloc_limit, Operand(zero_reg));
}
- if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
- // The top pointer is not updated for allocation folding dominators.
- sw(result_end, MemOperand(top_address));
- }
-
- // Tag object.
- Addu(result, result, Operand(kHeapObjectTag));
-}
-
-void MacroAssembler::FastAllocate(int object_size, Register result,
- Register scratch1, Register scratch2,
- AllocationFlags flags) {
- DCHECK(object_size <= kMaxRegularHeapObjectSize);
- DCHECK(!AreAliased(result, scratch1, scratch2, t9, at));
-
- // Make object size into bytes.
- if ((flags & SIZE_IN_WORDS) != 0) {
- object_size *= kPointerSize;
- }
- DCHECK_EQ(0, object_size & kObjectAlignmentMask);
-
- ExternalReference allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), flags);
-
- // Set up allocation top address and allocation limit registers.
- Register top_address = scratch1;
- // This code stores a temporary value in t9.
- Register result_end = scratch2;
- li(top_address, Operand(allocation_top));
- lw(result, MemOperand(top_address));
-
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
- // Align the next allocation. Storing the filler map without checking top is
- // safe in new-space because the limit of the heap is aligned there.
- DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
- And(result_end, result, Operand(kDoubleAlignmentMask));
- Label aligned;
- Branch(&aligned, eq, result_end, Operand(zero_reg));
- li(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
- sw(result_end, MemOperand(result));
- Addu(result, result, Operand(kDoubleSize / 2));
- bind(&aligned);
- }
-
- Addu(result_end, result, Operand(object_size));
-
- // The top pointer is not updated for allocation folding dominators.
- sw(result_end, MemOperand(top_address));
-
- Addu(result, result, Operand(kHeapObjectTag));
-}
-
-void MacroAssembler::FastAllocate(Register object_size, Register result,
- Register result_end, Register scratch,
- AllocationFlags flags) {
- // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
- // is not specified. Other registers must not overlap.
- DCHECK(!AreAliased(object_size, result, scratch, t9, at));
- DCHECK(!AreAliased(result_end, result, scratch, t9, at));
- DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
-
- ExternalReference allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), flags);
-
- // Set up allocation top address and allocation limit registers.
- Register top_address = scratch;
- // This code stores a temporary value in t9.
- li(top_address, Operand(allocation_top));
- lw(result, MemOperand(top_address));
-
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
- // Align the next allocation. Storing the filler map without checking top is
- // safe in new-space because the limit of the heap is aligned there.
- DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
- And(result_end, result, Operand(kDoubleAlignmentMask));
- Label aligned;
- Branch(&aligned, eq, result_end, Operand(zero_reg));
- li(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
- sw(result_end, MemOperand(result));
- Addu(result, result, Operand(kDoubleSize / 2));
- bind(&aligned);
- }
-
- // Calculate new top and bail out if new space is exhausted. Use result
- // to calculate the new top. Object size may be in words so a shift is
- // required to get the number of bytes.
- if ((flags & SIZE_IN_WORDS) != 0) {
- Lsa(result_end, result, object_size, kPointerSizeLog2);
- } else {
- Addu(result_end, result, Operand(object_size));
- }
-
- // The top pointer is not updated for allocation folding dominators.
sw(result_end, MemOperand(top_address));
+ // Tag object.
Addu(result, result, Operand(kHeapObjectTag));
}
@@ -4465,7 +4234,7 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
sw(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
- sw(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
+ sw(scratch1, FieldMemOperand(result, JSObject::kPropertiesOrHashOffset));
sw(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
sw(value, FieldMemOperand(result, JSValue::kValueOffset));
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
@@ -4530,7 +4299,7 @@ void MacroAssembler::CheckMap(Register obj,
Branch(fail, ne, scratch, Operand(at));
}
-void MacroAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
+void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
const DoubleRegister src) {
sub_d(dst, src, kDoubleRegZero);
}
@@ -4547,8 +4316,7 @@ void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
JumpIfSmi(value, miss);
}
-
-void MacroAssembler::MovFromFloatResult(DoubleRegister dst) {
+void TurboAssembler::MovFromFloatResult(DoubleRegister dst) {
if (IsMipsSoftFloatABI) {
if (kArchEndian == kLittle) {
Move(dst, v0, v1);
@@ -4560,8 +4328,7 @@ void MacroAssembler::MovFromFloatResult(DoubleRegister dst) {
}
}
-
-void MacroAssembler::MovFromFloatParameter(DoubleRegister dst) {
+void TurboAssembler::MovFromFloatParameter(DoubleRegister dst) {
if (IsMipsSoftFloatABI) {
if (kArchEndian == kLittle) {
Move(dst, a0, a1);
@@ -4573,8 +4340,7 @@ void MacroAssembler::MovFromFloatParameter(DoubleRegister dst) {
}
}
-
-void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
+void TurboAssembler::MovToFloatParameter(DoubleRegister src) {
if (!IsMipsSoftFloatABI) {
Move(f12, src);
} else {
@@ -4586,8 +4352,7 @@ void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
}
}
-
-void MacroAssembler::MovToFloatResult(DoubleRegister src) {
+void TurboAssembler::MovToFloatResult(DoubleRegister src) {
if (!IsMipsSoftFloatABI) {
Move(f0, src);
} else {
@@ -4599,8 +4364,7 @@ void MacroAssembler::MovToFloatResult(DoubleRegister src) {
}
}
-
-void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
+void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
DoubleRegister src2) {
if (!IsMipsSoftFloatABI) {
if (src2.is(f12)) {
@@ -4626,7 +4390,7 @@ void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
// -----------------------------------------------------------------------------
// JavaScript invokes.
-void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
+void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
Register caller_args_count_reg,
Register scratch0, Register scratch1) {
#if DEBUG
@@ -4854,7 +4618,6 @@ void MacroAssembler::InvokeFunction(Register function,
lw(expected_reg,
FieldMemOperand(temp_reg,
SharedFunctionInfo::kFormalParameterCountOffset));
- sra(expected_reg, expected_reg, kSmiTagSize);
ParameterCount expected(expected_reg);
InvokeFunctionCode(function, new_target, expected, actual, flag,
@@ -4930,16 +4693,24 @@ void MacroAssembler::GetObjectType(Register object,
// Runtime calls.
void MacroAssembler::CallStub(CodeStub* stub,
- TypeFeedbackId ast_id,
Condition cond,
Register r1,
const Operand& r2,
BranchDelaySlot bd) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
- cond, r1, r2, bd);
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
}
+void TurboAssembler::CallStubDelayed(CodeStub* stub, Condition cond,
+ Register r1, const Operand& r2,
+ BranchDelaySlot bd) {
+ DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
+
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+
+ li(at, Operand::EmbeddedCode(stub));
+ Call(at);
+}
void MacroAssembler::TailCallStub(CodeStub* stub,
Condition cond,
@@ -4949,9 +4720,8 @@ void MacroAssembler::TailCallStub(CodeStub* stub,
Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
}
-
-bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
- return has_frame_ || !stub->SometimesSetsUpAFrame();
+bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
+ return has_frame() || !stub->SometimesSetsUpAFrame();
}
void MacroAssembler::ObjectToDoubleFPURegister(Register object,
@@ -4999,22 +4769,20 @@ void MacroAssembler::SmiToDoubleFPURegister(Register smi,
cvt_d_w(value, value);
}
-
-static inline void BranchOvfHelper(MacroAssembler* masm, Register overflow_dst,
+static inline void BranchOvfHelper(TurboAssembler* tasm, Register overflow_dst,
Label* overflow_label,
Label* no_overflow_label) {
DCHECK(overflow_label || no_overflow_label);
if (!overflow_label) {
DCHECK(no_overflow_label);
- masm->Branch(no_overflow_label, ge, overflow_dst, Operand(zero_reg));
+ tasm->Branch(no_overflow_label, ge, overflow_dst, Operand(zero_reg));
} else {
- masm->Branch(overflow_label, lt, overflow_dst, Operand(zero_reg));
- if (no_overflow_label) masm->Branch(no_overflow_label);
+ tasm->Branch(overflow_label, lt, overflow_dst, Operand(zero_reg));
+ if (no_overflow_label) tasm->Branch(no_overflow_label);
}
}
-
-void MacroAssembler::AddBranchOvf(Register dst, Register left,
+void TurboAssembler::AddBranchOvf(Register dst, Register left,
const Operand& right, Label* overflow_label,
Label* no_overflow_label, Register scratch) {
if (right.is_reg()) {
@@ -5053,8 +4821,7 @@ void MacroAssembler::AddBranchOvf(Register dst, Register left,
}
}
-
-void MacroAssembler::AddBranchOvf(Register dst, Register left, Register right,
+void TurboAssembler::AddBranchOvf(Register dst, Register left, Register right,
Label* overflow_label,
Label* no_overflow_label, Register scratch) {
if (IsMipsArchVariant(kMips32r6)) {
@@ -5111,8 +4878,7 @@ void MacroAssembler::AddBranchOvf(Register dst, Register left, Register right,
}
}
-
-void MacroAssembler::SubBranchOvf(Register dst, Register left,
+void TurboAssembler::SubBranchOvf(Register dst, Register left,
const Operand& right, Label* overflow_label,
Label* no_overflow_label, Register scratch) {
DCHECK(overflow_label || no_overflow_label);
@@ -5146,8 +4912,7 @@ void MacroAssembler::SubBranchOvf(Register dst, Register left,
}
}
-
-void MacroAssembler::SubBranchOvf(Register dst, Register left, Register right,
+void TurboAssembler::SubBranchOvf(Register dst, Register left, Register right,
Label* overflow_label,
Label* no_overflow_label, Register scratch) {
DCHECK(overflow_label || no_overflow_label);
@@ -5190,21 +4955,21 @@ void MacroAssembler::SubBranchOvf(Register dst, Register left, Register right,
BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
}
-static inline void BranchOvfHelperMult(MacroAssembler* masm,
+static inline void BranchOvfHelperMult(TurboAssembler* tasm,
Register overflow_dst,
Label* overflow_label,
Label* no_overflow_label) {
DCHECK(overflow_label || no_overflow_label);
if (!overflow_label) {
DCHECK(no_overflow_label);
- masm->Branch(no_overflow_label, eq, overflow_dst, Operand(zero_reg));
+ tasm->Branch(no_overflow_label, eq, overflow_dst, Operand(zero_reg));
} else {
- masm->Branch(overflow_label, ne, overflow_dst, Operand(zero_reg));
- if (no_overflow_label) masm->Branch(no_overflow_label);
+ tasm->Branch(overflow_label, ne, overflow_dst, Operand(zero_reg));
+ if (no_overflow_label) tasm->Branch(no_overflow_label);
}
}
-void MacroAssembler::MulBranchOvf(Register dst, Register left,
+void TurboAssembler::MulBranchOvf(Register dst, Register left,
const Operand& right, Label* overflow_label,
Label* no_overflow_label, Register scratch) {
DCHECK(overflow_label || no_overflow_label);
@@ -5227,7 +4992,7 @@ void MacroAssembler::MulBranchOvf(Register dst, Register left,
}
}
-void MacroAssembler::MulBranchOvf(Register dst, Register left, Register right,
+void TurboAssembler::MulBranchOvf(Register dst, Register left, Register right,
Label* overflow_label,
Label* no_overflow_label, Register scratch) {
DCHECK(overflow_label || no_overflow_label);
@@ -5254,6 +5019,19 @@ void MacroAssembler::MulBranchOvf(Register dst, Register left, Register right,
BranchOvfHelperMult(this, overflow_dst, overflow_label, no_overflow_label);
}
+void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
+ SaveFPRegsMode save_doubles,
+ BranchDelaySlot bd) {
+ const Runtime::Function* f = Runtime::FunctionForId(fid);
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ PrepareCEntryArgs(f->nargs);
+ PrepareCEntryFunction(ExternalReference(f, isolate()));
+ CallStubDelayed(new (zone) CEntryStub(nullptr, 1, save_doubles));
+}
+
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles,
BranchDelaySlot bd) {
@@ -5271,7 +5049,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
PrepareCEntryArgs(num_arguments);
PrepareCEntryFunction(ExternalReference(f, isolate()));
CEntryStub stub(isolate(), 1, save_doubles);
- CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
+ CallStub(&stub, al, zero_reg, Operand(zero_reg), bd);
}
@@ -5282,7 +5060,7 @@ void MacroAssembler::CallExternalReference(const ExternalReference& ext,
PrepareCEntryFunction(ext);
CEntryStub stub(isolate(), 1);
- CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
+ CallStub(&stub, al, zero_reg, Operand(zero_reg), bd);
}
@@ -5346,16 +5124,14 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
// -----------------------------------------------------------------------------
// Debugging.
-void MacroAssembler::Assert(Condition cc, BailoutReason reason,
- Register rs, Operand rt) {
+void TurboAssembler::Assert(Condition cc, BailoutReason reason, Register rs,
+ Operand rt) {
if (emit_debug_code())
Check(cc, reason, rs, rt);
}
-
-
-void MacroAssembler::Check(Condition cc, BailoutReason reason,
- Register rs, Operand rt) {
+void TurboAssembler::Check(Condition cc, BailoutReason reason, Register rs,
+ Operand rt) {
Label L;
Branch(&L, cc, rs, rt);
Abort(reason);
@@ -5363,8 +5139,7 @@ void MacroAssembler::Check(Condition cc, BailoutReason reason,
bind(&L);
}
-
-void MacroAssembler::Abort(BailoutReason reason) {
+void TurboAssembler::Abort(BailoutReason reason) {
Label abort_start;
bind(&abort_start);
#ifdef DEBUG
@@ -5380,9 +5155,6 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- // Check if Abort() has already been initialized.
- DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
-
Move(a0, Smi::FromInt(static_cast<int>(reason)));
// Disable stub call restrictions to always allow calls to abort.
@@ -5447,13 +5219,12 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
}
}
-void MacroAssembler::StubPrologue(StackFrame::Type type) {
+void TurboAssembler::StubPrologue(StackFrame::Type type) {
li(at, Operand(StackFrame::TypeToMarker(type)));
PushCommonFrame(at);
}
-
-void MacroAssembler::Prologue(bool code_pre_aging) {
+void TurboAssembler::Prologue(bool code_pre_aging) {
PredictableCodeSizeScope predictible_code_size_scope(
this, kNoCodeAgeSequenceLength);
// The following three instructions must remain together and unmodified
@@ -5483,15 +5254,7 @@ void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
lw(vector, FieldMemOperand(vector, Cell::kValueOffset));
}
-
-void MacroAssembler::EnterFrame(StackFrame::Type type,
- bool load_constant_pool_pointer_reg) {
- // Out-of-line constant pool not implemented on mips.
- UNREACHABLE();
-}
-
-
-void MacroAssembler::EnterFrame(StackFrame::Type type) {
+void TurboAssembler::EnterFrame(StackFrame::Type type) {
int stack_offset, fp_offset;
if (type == StackFrame::INTERNAL) {
stack_offset = -4 * kPointerSize;
@@ -5519,8 +5282,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
Addu(fp, sp, Operand(fp_offset));
}
-
-void MacroAssembler::LeaveFrame(StackFrame::Type type) {
+void TurboAssembler::LeaveFrame(StackFrame::Type type) {
addiu(sp, fp, 2 * kPointerSize);
lw(ra, MemOperand(fp, 1 * kPointerSize));
lw(fp, MemOperand(fp, 0 * kPointerSize));
@@ -5577,9 +5339,11 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
// Save the frame pointer and the context in top.
- li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ li(t8,
+ Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress, isolate())));
sw(fp, MemOperand(t8));
- li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+ li(t8,
+ Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
sw(cp, MemOperand(t8));
const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
@@ -5587,7 +5351,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// The stack must be allign to 0 modulo 8 for stores with sdc1.
DCHECK(kDoubleSize == frame_alignment);
if (frame_alignment > 0) {
- DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
And(sp, sp, Operand(-frame_alignment)); // Align stack.
}
int space = FPURegister::kMaxNumRegisters * kDoubleSize;
@@ -5605,7 +5369,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
DCHECK(stack_space >= 0);
Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
if (frame_alignment > 0) {
- DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
And(sp, sp, Operand(-frame_alignment)); // Align stack.
}
@@ -5630,16 +5394,19 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
}
// Clear top frame.
- li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ li(t8,
+ Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress, isolate())));
sw(zero_reg, MemOperand(t8));
// Restore current context from top and clear it in debug mode.
if (restore_context) {
- li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+ li(t8, Operand(ExternalReference(IsolateAddressId::kContextAddress,
+ isolate())));
lw(cp, MemOperand(t8));
}
#ifdef DEBUG
- li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+ li(t8,
+ Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
sw(a3, MemOperand(t8));
#endif
@@ -5663,7 +5430,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
addiu(sp, sp, 8);
}
-int MacroAssembler::ActivationFrameAlignment() {
+int TurboAssembler::ActivationFrameAlignment() {
#if V8_HOST_ARCH_MIPS
// Running on the real platform. Use the alignment as mandated by the local
// environment.
@@ -5687,7 +5454,7 @@ void MacroAssembler::AssertStackIsAligned() {
if (frame_alignment > kPointerSize) {
Label alignment_as_expected;
- DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
andi(at, sp, frame_alignment_mask);
Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
// Don't use Check here, as it will call Runtime_Abort re-entering here.
@@ -5741,10 +5508,8 @@ void MacroAssembler::UntagAndJumpIfSmi(Register dst,
SmiUntag(dst, src);
}
-void MacroAssembler::JumpIfSmi(Register value,
- Label* smi_label,
- Register scratch,
- BranchDelaySlot bd) {
+void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
+ Register scratch, BranchDelaySlot bd) {
DCHECK_EQ(0, kSmiTag);
andi(scratch, value, kSmiTagMask);
Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
@@ -5797,6 +5562,15 @@ void MacroAssembler::AssertSmi(Register object) {
}
}
+void MacroAssembler::AssertFixedArray(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, kOperandIsASmiAndNotAFixedArray, t8, Operand(zero_reg));
+ GetObjectType(object, t8, t8);
+ Check(eq, kOperandIsNotAFixedArray, t8, Operand(FIXED_ARRAY_TYPE));
+ }
+}
void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
@@ -5819,8 +5593,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
}
-void MacroAssembler::AssertGeneratorObject(Register object, Register flags) {
- // `flags` should be an untagged integer. See `SuspendFlags` in src/globals.h
+void MacroAssembler::AssertGeneratorObject(Register object) {
if (!emit_debug_code()) return;
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
@@ -5828,21 +5601,15 @@ void MacroAssembler::AssertGeneratorObject(Register object, Register flags) {
GetObjectType(object, t8, t8);
- Label async, abort, done;
- And(t9, flags, Operand(static_cast<int>(SuspendFlags::kGeneratorTypeMask)));
- Branch(&async, equal, t9,
- Operand(static_cast<int>(SuspendFlags::kAsyncGenerator)));
+ Label done;
// Check if JSGeneratorObject
Branch(&done, eq, t8, Operand(JS_GENERATOR_OBJECT_TYPE));
- jmp(&abort);
- bind(&async);
// Check if JSAsyncGeneratorObject
Branch(&done, eq, t8, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
- bind(&abort);
- Abort(kOperandIsASmiAndNotAGeneratorObject);
+ Abort(kOperandIsNotAGeneratorObject);
bind(&done);
}
@@ -5909,7 +5676,7 @@ void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
scratch2, failure);
}
-void MacroAssembler::Float32Max(FPURegister dst, FPURegister src1,
+void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
if (src1.is(src2)) {
Move_s(dst, src1);
@@ -5947,12 +5714,12 @@ void MacroAssembler::Float32Max(FPURegister dst, FPURegister src1,
}
}
-void MacroAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1,
+void TurboAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1,
FPURegister src2) {
add_s(dst, src1, src2);
}
-void MacroAssembler::Float32Min(FPURegister dst, FPURegister src1,
+void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
if (src1.is(src2)) {
Move_s(dst, src1);
@@ -5990,12 +5757,12 @@ void MacroAssembler::Float32Min(FPURegister dst, FPURegister src1,
}
}
-void MacroAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1,
+void TurboAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1,
FPURegister src2) {
add_s(dst, src1, src2);
}
-void MacroAssembler::Float64Max(DoubleRegister dst, DoubleRegister src1,
+void TurboAssembler::Float64Max(DoubleRegister dst, DoubleRegister src1,
DoubleRegister src2, Label* out_of_line) {
if (src1.is(src2)) {
Move_d(dst, src1);
@@ -6033,13 +5800,13 @@ void MacroAssembler::Float64Max(DoubleRegister dst, DoubleRegister src1,
}
}
-void MacroAssembler::Float64MaxOutOfLine(DoubleRegister dst,
+void TurboAssembler::Float64MaxOutOfLine(DoubleRegister dst,
DoubleRegister src1,
DoubleRegister src2) {
add_d(dst, src1, src2);
}
-void MacroAssembler::Float64Min(DoubleRegister dst, DoubleRegister src1,
+void TurboAssembler::Float64Min(DoubleRegister dst, DoubleRegister src1,
DoubleRegister src2, Label* out_of_line) {
if (src1.is(src2)) {
Move_d(dst, src1);
@@ -6077,7 +5844,7 @@ void MacroAssembler::Float64Min(DoubleRegister dst, DoubleRegister src1,
}
}
-void MacroAssembler::Float64MinOutOfLine(DoubleRegister dst,
+void TurboAssembler::Float64MinOutOfLine(DoubleRegister dst,
DoubleRegister src1,
DoubleRegister src2) {
add_d(dst, src1, src2);
@@ -6099,7 +5866,7 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
static const int kRegisterPassedArguments = 4;
-int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
+int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments) {
int stack_passed_words = 0;
num_reg_arguments += 2 * num_double_arguments;
@@ -6148,8 +5915,7 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
SmiUntag(index, index);
}
-
-void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
int num_double_arguments,
Register scratch) {
int frame_alignment = ActivationFrameAlignment();
@@ -6166,7 +5932,7 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
// and the original value of sp.
mov(scratch, sp);
Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
- DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
And(sp, sp, Operand(-frame_alignment));
sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else {
@@ -6174,14 +5940,12 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
}
}
-
-void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
Register scratch) {
PrepareCallCFunction(num_reg_arguments, 0, scratch);
}
-
-void MacroAssembler::CallCFunction(ExternalReference function,
+void TurboAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments) {
if (IsMipsArchVariant(kMips32r6)) {
@@ -6200,26 +5964,21 @@ void MacroAssembler::CallCFunction(ExternalReference function,
}
}
-
-void MacroAssembler::CallCFunction(Register function,
- int num_reg_arguments,
+void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
int num_double_arguments) {
CallCFunctionHelper(function, 0, num_reg_arguments, num_double_arguments);
}
-
-void MacroAssembler::CallCFunction(ExternalReference function,
+void TurboAssembler::CallCFunction(ExternalReference function,
int num_arguments) {
CallCFunction(function, num_arguments, 0);
}
-
-void MacroAssembler::CallCFunction(Register function,
- int num_arguments) {
+void TurboAssembler::CallCFunction(Register function, int num_arguments) {
CallCFunction(function, num_arguments, 0);
}
-void MacroAssembler::CallCFunctionHelper(Register function_base,
+void TurboAssembler::CallCFunctionHelper(Register function_base,
int16_t function_offset,
int num_reg_arguments,
int num_double_arguments) {
@@ -6236,7 +5995,7 @@ void MacroAssembler::CallCFunctionHelper(Register function_base,
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
- DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
Label alignment_as_expected;
And(at, sp, Operand(frame_alignment_mask));
Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
@@ -6272,13 +6031,8 @@ void MacroAssembler::CallCFunctionHelper(Register function_base,
#undef BRANCH_ARGS_CHECK
-
-void MacroAssembler::CheckPageFlag(
- Register object,
- Register scratch,
- int mask,
- Condition cc,
- Label* condition_met) {
+void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
+ Condition cc, Label* condition_met) {
And(scratch, object, Operand(~Page::kPageAlignmentMask));
lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
And(scratch, scratch, Operand(mask));
@@ -6544,7 +6298,6 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
return candidate;
}
UNREACHABLE();
- return no_reg;
}
bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 3b2539e408..7db4ad71b5 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -135,15 +135,69 @@ inline MemOperand CFunctionArgumentOperand(int index) {
return MemOperand(sp, offset);
}
-
-// MacroAssembler implements a collection of frequently used macros.
-class MacroAssembler: public Assembler {
+class TurboAssembler : public Assembler {
public:
- MacroAssembler(Isolate* isolate, void* buffer, int size,
- CodeObjectRequired create_code_object);
+ TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
+ CodeObjectRequired create_code_object)
+ : Assembler(isolate, buffer, buffer_size),
+ isolate_(isolate),
+ has_double_zero_reg_set_(false) {
+ if (create_code_object == CodeObjectRequired::kYes) {
+ code_object_ =
+ Handle<HeapObject>::New(isolate->heap()->undefined_value(), isolate);
+ }
+ }
+
+ void set_has_frame(bool value) { has_frame_ = value; }
+ bool has_frame() const { return has_frame_; }
Isolate* isolate() const { return isolate_; }
+ Handle<HeapObject> CodeObject() {
+ DCHECK(!code_object_.is_null());
+ return code_object_;
+ }
+
+ // Activation support.
+ void EnterFrame(StackFrame::Type type);
+ void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) {
+ // Out-of-line constant pool not implemented on mips.
+ UNREACHABLE();
+ }
+ void LeaveFrame(StackFrame::Type type);
+
+ // Generates function and stub prologue code.
+ void StubPrologue(StackFrame::Type type);
+ void Prologue(bool code_pre_aging);
+
+ void InitializeRootRegister() {
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate());
+ li(kRootRegister, Operand(roots_array_start));
+ }
+
+ // Jump unconditionally to given label.
+ // We NEED a nop in the branch delay slot, as it used by v8, for example in
+ // CodeGenerator::ProcessDeferred().
+ // Currently the branch delay slot is filled by the MacroAssembler.
+ // Use rather b(Label) for code generation.
+ void jmp(Label* L) { Branch(L); }
+
+ // -------------------------------------------------------------------------
+ // Debugging.
+
+ // Calls Abort(msg) if the condition cc is not satisfied.
+ // Use --debug_code to enable.
+ void Assert(Condition cc, BailoutReason reason, Register rs, Operand rt);
+
+ // Like Assert(), but always enabled.
+ void Check(Condition cc, BailoutReason reason, Register rs, Operand rt);
+
+ // Print a message to stdout and abort execution.
+ void Abort(BailoutReason msg);
+
+ inline bool AllowThisStubCall(CodeStub* stub);
+
// Arguments macros.
#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
#define COND_ARGS cond, r1, r2
@@ -175,6 +229,42 @@ class MacroAssembler: public Assembler {
#undef COND_TYPED_ARGS
#undef COND_ARGS
+ // Wrapper functions for the different cmp/branch types.
+ inline void BranchF32(Label* target, Label* nan, Condition cc,
+ FPURegister cmp1, FPURegister cmp2,
+ BranchDelaySlot bd = PROTECT) {
+ BranchFCommon(S, target, nan, cc, cmp1, cmp2, bd);
+ }
+
+ inline void BranchF64(Label* target, Label* nan, Condition cc,
+ FPURegister cmp1, FPURegister cmp2,
+ BranchDelaySlot bd = PROTECT) {
+ BranchFCommon(D, target, nan, cc, cmp1, cmp2, bd);
+ }
+
+ // Alternate (inline) version for better readability with USE_DELAY_SLOT.
+ inline void BranchF64(BranchDelaySlot bd, Label* target, Label* nan,
+ Condition cc, FPURegister cmp1, FPURegister cmp2) {
+ BranchF64(target, nan, cc, cmp1, cmp2, bd);
+ }
+
+ inline void BranchF32(BranchDelaySlot bd, Label* target, Label* nan,
+ Condition cc, FPURegister cmp1, FPURegister cmp2) {
+ BranchF32(target, nan, cc, cmp1, cmp2, bd);
+ }
+
+ void BranchMSA(Label* target, MSABranchDF df, MSABranchCondition cond,
+ MSARegister wt, BranchDelaySlot bd = PROTECT);
+
+ void Branch(Label* L, Condition cond, Register rs, Heap::RootListIndex index,
+ BranchDelaySlot bdslot = PROTECT);
+
+ // Load int32 in the rd register.
+ void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
+ inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
+ li(rd, Operand(j), mode);
+ }
+ void li(Register dst, Handle<HeapObject> value, LiFlags mode = OPTIMIZE_SIZE);
// Jump, Call, and Ret pseudo instructions implementing inter-working.
#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
@@ -193,51 +283,17 @@ class MacroAssembler: public Assembler {
void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
int CallSize(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- TypeFeedbackId ast_id = TypeFeedbackId::None(),
COND_ARGS);
void Call(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- TypeFeedbackId ast_id = TypeFeedbackId::None(),
COND_ARGS);
+ void Call(Label* target);
void Ret(COND_ARGS);
inline void Ret(BranchDelaySlot bd, Condition cond = al,
Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) {
Ret(cond, rs, rt, bd);
}
- bool IsNear(Label* L, Condition cond, int rs_reg);
-
- void Branch(Label* L,
- Condition cond,
- Register rs,
- Heap::RootListIndex index,
- BranchDelaySlot bdslot = PROTECT);
-
-// Number of instructions needed for calculation of switch table entry address
-#ifdef _MIPS_ARCH_MIPS32R6
- static constexpr int kSwitchTablePrologueSize = 5;
-#else
- static constexpr int kSwitchTablePrologueSize = 10;
-#endif
- // GetLabelFunction must be lambda '[](size_t index) -> Label*' or a
- // functor/function with 'Label *func(size_t index)' declaration.
- template <typename Func>
- void GenerateSwitchTable(Register index, size_t case_count,
- Func GetLabelFunction);
-#undef COND_ARGS
-
- // Emit code that loads |parameter_index|'th parameter from the stack to
- // the register according to the CallInterfaceDescriptor definition.
- // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
- // below the caller's sp.
- template <class Descriptor>
- void LoadParameterFromStack(
- Register reg, typename Descriptor::ParameterIndices parameter_index,
- int sp_to_ra_offset_in_words = 0) {
- DCHECK(Descriptor::kPassLastArgsOnStack);
- UNIMPLEMENTED();
- }
-
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
void Drop(int count,
@@ -254,13 +310,361 @@ class MacroAssembler: public Assembler {
Register reg,
const Operand& op);
- // Swap two registers. If the scratch register is omitted then a slightly
- // less efficient form using xor instead of mov is emitted.
- void Swap(Register reg1, Register reg2, Register scratch = no_reg);
+ void push(Register src) {
+ Addu(sp, sp, Operand(-kPointerSize));
+ sw(src, MemOperand(sp, 0));
+ }
- void Call(Label* target);
+ void Push(Register src) { push(src); }
+ void Push(Handle<HeapObject> handle);
+ void Push(Smi* smi);
+
+ // Push two registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2) {
+ Subu(sp, sp, Operand(2 * kPointerSize));
+ sw(src1, MemOperand(sp, 1 * kPointerSize));
+ sw(src2, MemOperand(sp, 0 * kPointerSize));
+ }
+
+ // Push three registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3) {
+ Subu(sp, sp, Operand(3 * kPointerSize));
+ sw(src1, MemOperand(sp, 2 * kPointerSize));
+ sw(src2, MemOperand(sp, 1 * kPointerSize));
+ sw(src3, MemOperand(sp, 0 * kPointerSize));
+ }
+
+ // Push four registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3, Register src4) {
+ Subu(sp, sp, Operand(4 * kPointerSize));
+ sw(src1, MemOperand(sp, 3 * kPointerSize));
+ sw(src2, MemOperand(sp, 2 * kPointerSize));
+ sw(src3, MemOperand(sp, 1 * kPointerSize));
+ sw(src4, MemOperand(sp, 0 * kPointerSize));
+ }
+
+ // Push five registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3, Register src4,
+ Register src5) {
+ Subu(sp, sp, Operand(5 * kPointerSize));
+ sw(src1, MemOperand(sp, 4 * kPointerSize));
+ sw(src2, MemOperand(sp, 3 * kPointerSize));
+ sw(src3, MemOperand(sp, 2 * kPointerSize));
+ sw(src4, MemOperand(sp, 1 * kPointerSize));
+ sw(src5, MemOperand(sp, 0 * kPointerSize));
+ }
+
+ void Push(Register src, Condition cond, Register tst1, Register tst2) {
+ // Since we don't have conditional execution we use a Branch.
+ Branch(3, cond, tst1, Operand(tst2));
+ Subu(sp, sp, Operand(kPointerSize));
+ sw(src, MemOperand(sp, 0));
+ }
+
+ // Push multiple registers on the stack.
+ // Registers are saved in numerical order, with higher numbered registers
+ // saved in higher memory addresses.
+ void MultiPush(RegList regs);
+ void MultiPushFPU(RegList regs);
+
+ void pop(Register dst) {
+ lw(dst, MemOperand(sp, 0));
+ Addu(sp, sp, Operand(kPointerSize));
+ }
+
+ void Pop(Register dst) { pop(dst); }
+
+ // Pop two registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1, Register src2) {
+ DCHECK(!src1.is(src2));
+ lw(src2, MemOperand(sp, 0 * kPointerSize));
+ lw(src1, MemOperand(sp, 1 * kPointerSize));
+ Addu(sp, sp, 2 * kPointerSize);
+ }
+
+ // Pop three registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1, Register src2, Register src3) {
+ lw(src3, MemOperand(sp, 0 * kPointerSize));
+ lw(src2, MemOperand(sp, 1 * kPointerSize));
+ lw(src1, MemOperand(sp, 2 * kPointerSize));
+ Addu(sp, sp, 3 * kPointerSize);
+ }
+
+ void Pop(uint32_t count = 1) { Addu(sp, sp, Operand(count * kPointerSize)); }
+
+ // Pops multiple values from the stack and load them in the
+ // registers specified in regs. Pop order is the opposite as in MultiPush.
+ void MultiPop(RegList regs);
+ void MultiPopFPU(RegList regs);
+
+ // Load Scaled Address instructions. Parameter sa (shift argument) must be
+ // between [1, 31] (inclusive). On pre-r6 architectures the scratch register
+ // may be clobbered.
+ void Lsa(Register rd, Register rs, Register rt, uint8_t sa,
+ Register scratch = at);
+
+#define DEFINE_INSTRUCTION(instr) \
+ void instr(Register rd, Register rs, const Operand& rt); \
+ void instr(Register rd, Register rs, Register rt) { \
+ instr(rd, rs, Operand(rt)); \
+ } \
+ void instr(Register rs, Register rt, int32_t j) { instr(rs, rt, Operand(j)); }
+
+#define DEFINE_INSTRUCTION2(instr) \
+ void instr(Register rs, const Operand& rt); \
+ void instr(Register rs, Register rt) { instr(rs, Operand(rt)); } \
+ void instr(Register rs, int32_t j) { instr(rs, Operand(j)); }
+
+#define DEFINE_INSTRUCTION3(instr) \
+ void instr(Register rd_hi, Register rd_lo, Register rs, const Operand& rt); \
+ void instr(Register rd_hi, Register rd_lo, Register rs, Register rt) { \
+ instr(rd_hi, rd_lo, rs, Operand(rt)); \
+ } \
+ void instr(Register rd_hi, Register rd_lo, Register rs, int32_t j) { \
+ instr(rd_hi, rd_lo, rs, Operand(j)); \
+ }
+
+ DEFINE_INSTRUCTION(Addu);
+ DEFINE_INSTRUCTION(Subu);
+ DEFINE_INSTRUCTION(Mul);
+ DEFINE_INSTRUCTION(Div);
+ DEFINE_INSTRUCTION(Divu);
+ DEFINE_INSTRUCTION(Mod);
+ DEFINE_INSTRUCTION(Modu);
+ DEFINE_INSTRUCTION(Mulh);
+ DEFINE_INSTRUCTION2(Mult);
+ DEFINE_INSTRUCTION(Mulhu);
+ DEFINE_INSTRUCTION2(Multu);
+ DEFINE_INSTRUCTION2(Div);
+ DEFINE_INSTRUCTION2(Divu);
+
+ DEFINE_INSTRUCTION3(Div);
+ DEFINE_INSTRUCTION3(Mul);
+ DEFINE_INSTRUCTION3(Mulu);
+
+ DEFINE_INSTRUCTION(And);
+ DEFINE_INSTRUCTION(Or);
+ DEFINE_INSTRUCTION(Xor);
+ DEFINE_INSTRUCTION(Nor);
+ DEFINE_INSTRUCTION2(Neg);
+
+ DEFINE_INSTRUCTION(Slt);
+ DEFINE_INSTRUCTION(Sltu);
+
+ // MIPS32 R2 instruction macro.
+ DEFINE_INSTRUCTION(Ror);
+
+#undef DEFINE_INSTRUCTION
+#undef DEFINE_INSTRUCTION2
+#undef DEFINE_INSTRUCTION3
+
+ void SmiUntag(Register reg) { sra(reg, reg, kSmiTagSize); }
+
+ void SmiUntag(Register dst, Register src) { sra(dst, src, kSmiTagSize); }
+
+ // Removes current frame and its arguments from the stack preserving
+ // the arguments and a return address pushed to the stack for the next call.
+ // Both |callee_args_count| and |caller_args_count_reg| do not include
+ // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
+ // is trashed.
+ void PrepareForTailCall(const ParameterCount& callee_args_count,
+ Register caller_args_count_reg, Register scratch0,
+ Register scratch1);
+
+ int CalculateStackPassedWords(int num_reg_arguments,
+ int num_double_arguments);
+
+ // Before calling a C-function from generated code, align arguments on stack
+ // and add space for the four mips argument slots.
+ // After aligning the frame, non-register arguments must be stored on the
+ // stack, after the argument-slots using helper: CFunctionArgumentOperand().
+ // The argument count assumes all arguments are word sized.
+ // Some compilers/platforms require the stack to be aligned when calling
+ // C++ code.
+ // Needs a scratch register to do some arithmetic. This register will be
+ // trashed.
+ void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
+ Register scratch);
+ void PrepareCallCFunction(int num_reg_arguments, Register scratch);
+
+ // Arguments 1-4 are placed in registers a0 thru a3 respectively.
+ // Arguments 5..n are stored to stack using following:
+ // sw(t0, CFunctionArgumentOperand(5));
+
+ // Calls a C function and cleans up the space for arguments allocated
+ // by PrepareCallCFunction. The called function is not allowed to trigger a
+ // garbage collection, since that might move the code and invalidate the
+ // return address (unless this is somehow accounted for by the called
+ // function).
+ void CallCFunction(ExternalReference function, int num_arguments);
+ void CallCFunction(Register function, int num_arguments);
+ void CallCFunction(ExternalReference function, int num_reg_arguments,
+ int num_double_arguments);
+ void CallCFunction(Register function, int num_reg_arguments,
+ int num_double_arguments);
+ void MovFromFloatResult(DoubleRegister dst);
+ void MovFromFloatParameter(DoubleRegister dst);
+
+ // There are two ways of passing double arguments on MIPS, depending on
+ // whether soft or hard floating point ABI is used. These functions
+ // abstract parameter passing for the three different ways we call
+ // C functions from generated code.
+ void MovToFloatParameter(DoubleRegister src);
+ void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
+ void MovToFloatResult(DoubleRegister src);
+
+ // See comments at the beginning of CEntryStub::Generate.
+ inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); }
+ inline void PrepareCEntryFunction(const ExternalReference& ref) {
+ li(a1, Operand(ref));
+ }
+
+ void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
+ Label* condition_met);
+
+ void CallStubDelayed(CodeStub* stub, COND_ARGS);
+#undef COND_ARGS
+
+ void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs,
+ BranchDelaySlot bd = PROTECT);
+
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
+ // succeeds, otherwise falls through if result is saturated. On return
+ // 'result' either holds answer, or is clobbered on fall through.
+ //
+ // Only public for the test code in test-code-stubs-arm.cc.
+ void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
+ Label* done);
+
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
+ // Exits with 'result' holding the answer.
+ void TruncateDoubleToIDelayed(Zone* zone, Register result,
+ DoubleRegister double_input);
+
+ // Conditional move.
+ void Movz(Register rd, Register rs, Register rt);
+ void Movn(Register rd, Register rs, Register rt);
+ void Movt(Register rd, Register rs, uint16_t cc = 0);
+ void Movf(Register rd, Register rs, uint16_t cc = 0);
+
+ void Clz(Register rd, Register rs);
+
+ // Int64Lowering instructions
+ void AddPair(Register dst_low, Register dst_high, Register left_low,
+ Register left_high, Register right_low, Register right_high);
+
+ void SubPair(Register dst_low, Register dst_high, Register left_low,
+ Register left_high, Register right_low, Register right_high);
+
+ void ShlPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, Register shift);
- inline void Move(Register dst, Handle<Object> handle) { li(dst, handle); }
+ void ShlPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, uint32_t shift);
+
+ void ShrPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, Register shift);
+
+ void ShrPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, uint32_t shift);
+
+ void SarPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, Register shift);
+
+ void SarPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, uint32_t shift);
+
+ // MIPS32 R2 instruction macro.
+ void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void Seb(Register rd, Register rt);
+ void Seh(Register rd, Register rt);
+ void Neg_s(FPURegister fd, FPURegister fs);
+ void Neg_d(FPURegister fd, FPURegister fs);
+
+ // MIPS32 R6 instruction macros.
+ void Bovc(Register rt, Register rs, Label* L);
+ void Bnvc(Register rt, Register rs, Label* L);
+
+ // Convert single to unsigned word.
+ void Trunc_uw_s(FPURegister fd, FPURegister fs, FPURegister scratch);
+ void Trunc_uw_s(FPURegister fd, Register rs, FPURegister scratch);
+
+ // FP32 mode: Move the general purpose register into
+ // the high part of the double-register pair.
+ // FP64 mode: Move the general-purpose register into
+ // the higher 32 bits of the 64-bit coprocessor register,
+ // while leaving the low bits unchanged.
+ void Mthc1(Register rt, FPURegister fs);
+
+ // FP32 mode: move the high part of the double-register pair into
+ // general purpose register.
+ // FP64 mode: Move the higher 32 bits of the 64-bit coprocessor register into
+ // general-purpose register.
+ void Mfhc1(Register rt, FPURegister fs);
+
+ void Madd_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
+ FPURegister scratch);
+ void Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
+ FPURegister scratch);
+ void Msub_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
+ FPURegister scratch);
+ void Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
+ FPURegister scratch);
+
+ // Change endianness
+ void ByteSwapSigned(Register dest, Register src, int operand_size);
+ void ByteSwapUnsigned(Register dest, Register src, int operand_size);
+
+ void Ulh(Register rd, const MemOperand& rs);
+ void Ulhu(Register rd, const MemOperand& rs);
+ void Ush(Register rd, const MemOperand& rs, Register scratch);
+
+ void Ulw(Register rd, const MemOperand& rs);
+ void Usw(Register rd, const MemOperand& rs);
+
+ void Ulwc1(FPURegister fd, const MemOperand& rs, Register scratch);
+ void Uswc1(FPURegister fd, const MemOperand& rs, Register scratch);
+
+ void Uldc1(FPURegister fd, const MemOperand& rs, Register scratch);
+ void Usdc1(FPURegister fd, const MemOperand& rs, Register scratch);
+
+ void Ldc1(FPURegister fd, const MemOperand& src);
+ void Sdc1(FPURegister fs, const MemOperand& dst);
+
+ // Perform a floating-point min or max operation with the
+ // (IEEE-754-compatible) semantics of MIPS32's Release 6 MIN.fmt/MAX.fmt.
+ // Some cases, typically NaNs or +/-0.0, are expected to be rare and are
+ // handled in out-of-line code. The specific behaviour depends on supported
+ // instructions.
+ //
+ // These functions assume (and assert) that !src1.is(src2). It is permitted
+ // for the result to alias either input register.
+ void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2,
+ Label* out_of_line);
+ void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2,
+ Label* out_of_line);
+ void Float64Max(DoubleRegister dst, DoubleRegister src1, DoubleRegister src2,
+ Label* out_of_line);
+ void Float64Min(DoubleRegister dst, DoubleRegister src1, DoubleRegister src2,
+ Label* out_of_line);
+
+ // Generate out-of-line cases for the macros above.
+ void Float32MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
+ void Float32MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
+ void Float64MaxOutOfLine(DoubleRegister dst, DoubleRegister src1,
+ DoubleRegister src2);
+ void Float64MinOutOfLine(DoubleRegister dst, DoubleRegister src1,
+ DoubleRegister src2);
+
+ bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; }
+
+ void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
+
+ inline void Move(Register dst, Handle<HeapObject> handle) { li(dst, handle); }
inline void Move(Register dst, Smi* smi) { li(dst, Operand(smi)); }
inline void Move(Register dst, Register src) {
@@ -310,23 +714,195 @@ class MacroAssembler: public Assembler {
void Move(FPURegister dst, float imm);
void Move(FPURegister dst, double imm);
- // Conditional move.
- void Movz(Register rd, Register rs, Register rt);
- void Movn(Register rd, Register rs, Register rt);
- void Movt(Register rd, Register rs, uint16_t cc = 0);
- void Movf(Register rd, Register rs, uint16_t cc = 0);
+ // -------------------------------------------------------------------------
+ // Overflow handling functions.
+ // Usage: first call the appropriate arithmetic function, then call one of the
+ // jump functions with the overflow_dst register as the second parameter.
- void Clz(Register rd, Register rs);
+ inline void AddBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Register scratch = at) {
+ AddBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
+ }
- // Jump unconditionally to given label.
- // We NEED a nop in the branch delay slot, as it used by v8, for example in
- // CodeGenerator::ProcessDeferred().
- // Currently the branch delay slot is filled by the MacroAssembler.
- // Use rather b(Label) for code generation.
- void jmp(Label* L) {
- Branch(L);
+ inline void AddBranchNoOvf(Register dst, Register left, const Operand& right,
+ Label* no_overflow_label, Register scratch = at) {
+ AddBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
+ }
+
+ void AddBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
+
+ void AddBranchOvf(Register dst, Register left, Register right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
+
+ inline void SubBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Register scratch = at) {
+ SubBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
+ }
+
+ inline void SubBranchNoOvf(Register dst, Register left, const Operand& right,
+ Label* no_overflow_label, Register scratch = at) {
+ SubBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
+ }
+
+ void SubBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
+
+ void SubBranchOvf(Register dst, Register left, Register right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
+
+ inline void MulBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Register scratch = at) {
+ MulBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
+ }
+
+ inline void MulBranchNoOvf(Register dst, Register left, const Operand& right,
+ Label* no_overflow_label, Register scratch = at) {
+ MulBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
+ }
+
+ void MulBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
+
+ void MulBranchOvf(Register dst, Register left, Register right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
+
+// Number of instructions needed for calculation of switch table entry address
+#ifdef _MIPS_ARCH_MIPS32R6
+ static constexpr int kSwitchTablePrologueSize = 5;
+#else
+ static constexpr int kSwitchTablePrologueSize = 10;
+#endif
+ // GetLabelFunction must be lambda '[](size_t index) -> Label*' or a
+ // functor/function with 'Label *func(size_t index)' declaration.
+ template <typename Func>
+ void GenerateSwitchTable(Register index, size_t case_count,
+ Func GetLabelFunction);
+
+ // Load an object from the root table.
+ void LoadRoot(Register destination, Heap::RootListIndex index);
+ void LoadRoot(Register destination, Heap::RootListIndex index, Condition cond,
+ Register src1, const Operand& src2);
+
+ // If the value is a NaN, canonicalize the value else, do nothing.
+ void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
+
+ // ---------------------------------------------------------------------------
+ // FPU macros. These do not handle special cases like NaN or +- inf.
+
+ // Convert unsigned word to double.
+ void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
+
+ // Convert double to unsigned word.
+ void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
+ void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
+
+ // Jump the register contains a smi.
+ void JumpIfSmi(Register value, Label* smi_label, Register scratch = at,
+ BranchDelaySlot bd = PROTECT);
+
+ // Push a standard frame, consisting of ra, fp, context and JS function.
+ void PushStandardFrame(Register function_reg);
+
+ // Get the actual activation frame alignment for target environment.
+ static int ActivationFrameAlignment();
+
+ // Alias functions for backward compatibility.
+ inline void BranchF(Label* target, Label* nan, Condition cc, FPURegister cmp1,
+ FPURegister cmp2, BranchDelaySlot bd = PROTECT) {
+ BranchF64(target, nan, cc, cmp1, cmp2, bd);
+ }
+
+ inline void BranchF(BranchDelaySlot bd, Label* target, Label* nan,
+ Condition cc, FPURegister cmp1, FPURegister cmp2) {
+ BranchF64(bd, target, nan, cc, cmp1, cmp2);
+ }
+
+ protected:
+ void BranchLong(Label* L, BranchDelaySlot bdslot);
+
+ inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
+
+ inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
+
+ private:
+ bool has_frame_ = false;
+ Isolate* const isolate_;
+ // This handle will be patched with the code object on installation.
+ Handle<HeapObject> code_object_;
+ bool has_double_zero_reg_set_;
+
+ void CallCFunctionHelper(Register function_base, int16_t function_offset,
+ int num_reg_arguments, int num_double_arguments);
+
+ // Common implementation of BranchF functions for the different formats.
+ void BranchFCommon(SecondaryField sizeField, Label* target, Label* nan,
+ Condition cc, FPURegister cmp1, FPURegister cmp2,
+ BranchDelaySlot bd = PROTECT);
+
+ void BranchShortF(SecondaryField sizeField, Label* target, Condition cc,
+ FPURegister cmp1, FPURegister cmp2,
+ BranchDelaySlot bd = PROTECT);
+
+ void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond,
+ MSARegister wt, BranchDelaySlot bd = PROTECT);
+
+ void BranchShortHelperR6(int32_t offset, Label* L);
+ void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);
+ bool BranchShortHelperR6(int32_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt);
+ bool BranchShortHelper(int16_t offset, Label* L, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot);
+ bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot);
+
+ void BranchAndLinkShortHelperR6(int32_t offset, Label* L);
+ void BranchAndLinkShortHelper(int16_t offset, Label* L,
+ BranchDelaySlot bdslot);
+ void BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot = PROTECT);
+ void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
+ bool BranchAndLinkShortHelperR6(int32_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt);
+ bool BranchAndLinkShortHelper(int16_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt,
+ BranchDelaySlot bdslot);
+ bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt,
+ BranchDelaySlot bdslot);
+ void BranchAndLinkLong(Label* L, BranchDelaySlot bdslot);
+
+ // Push a fixed frame, consisting of ra, fp.
+ void PushCommonFrame(Register marker_reg = no_reg);
+};
+
+// MacroAssembler implements a collection of frequently used macros.
+class MacroAssembler : public TurboAssembler {
+ public:
+ MacroAssembler(Isolate* isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object);
+
+ // Emit code that loads |parameter_index|'th parameter from the stack to
+ // the register according to the CallInterfaceDescriptor definition.
+ // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
+ // below the caller's sp.
+ template <class Descriptor>
+ void LoadParameterFromStack(
+ Register reg, typename Descriptor::ParameterIndices parameter_index,
+ int sp_to_ra_offset_in_words = 0) {
+ DCHECK(Descriptor::kPassLastArgsOnStack);
+ UNIMPLEMENTED();
}
+ // Swap two registers. If the scratch register is omitted then a slightly
+ // less efficient form using xor instead of mov is emitted.
+ void Swap(Register reg1, Register reg2, Register scratch = no_reg);
+
void Load(Register dst, const MemOperand& src, Representation r);
void Store(Register src, const MemOperand& dst, Representation r);
@@ -348,75 +924,44 @@ class MacroAssembler: public Assembler {
Branch(if_not_equal, ne, with, Operand(at));
}
- // Load an object from the root table.
- void LoadRoot(Register destination,
- Heap::RootListIndex index);
- void LoadRoot(Register destination,
- Heap::RootListIndex index,
- Condition cond, Register src1, const Operand& src2);
-
// Store an object to the root table.
- void StoreRoot(Register source,
- Heap::RootListIndex index);
- void StoreRoot(Register source,
- Heap::RootListIndex index,
- Condition cond, Register src1, const Operand& src2);
+ void StoreRoot(Register source, Heap::RootListIndex index);
+ void StoreRoot(Register source, Heap::RootListIndex index, Condition cond,
+ Register src1, const Operand& src2);
// ---------------------------------------------------------------------------
// GC Support
- void IncrementalMarkingRecordWriteHelper(Register object,
- Register value,
+ void IncrementalMarkingRecordWriteHelper(Register object, Register value,
Register address);
- enum RememberedSetFinalAction {
- kReturnAtEnd,
- kFallThroughAtEnd
- };
-
+ enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
// Record in the remembered set the fact that we have a pointer to new space
// at the address pointed to by the addr register. Only works if addr is not
// in new space.
void RememberedSetHelper(Register object, // Used for debug code.
- Register addr,
- Register scratch,
+ Register addr, Register scratch,
SaveFPRegsMode save_fp,
RememberedSetFinalAction and_then);
- void CheckPageFlag(Register object,
- Register scratch,
- int mask,
- Condition cc,
- Label* condition_met);
-
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but it will be clobbered.
- void JumpIfNotInNewSpace(Register object,
- Register scratch,
- Label* branch) {
+ void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) {
InNewSpace(object, scratch, eq, branch);
}
// Check if object is in new space. Jumps if the object is in new space.
// The register scratch can be object itself, but scratch will be clobbered.
- void JumpIfInNewSpace(Register object,
- Register scratch,
- Label* branch) {
+ void JumpIfInNewSpace(Register object, Register scratch, Label* branch) {
InNewSpace(object, scratch, ne, branch);
}
// Check if an object has a given incremental marking color.
- void HasColor(Register object,
- Register scratch0,
- Register scratch1,
- Label* has_color,
- int first_bit,
- int second_bit);
-
- void JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
+ void HasColor(Register object, Register scratch0, Register scratch1,
+ Label* has_color, int first_bit, int second_bit);
+
+ void JumpIfBlack(Register object, Register scratch0, Register scratch1,
Label* on_black);
// Checks the color of an object. If the object is white we jump to the
@@ -430,12 +975,8 @@ class MacroAssembler: public Assembler {
// The offset is the offset from the start of the object, not the offset from
// the tagged HeapObject pointer. For use with FieldOperand(reg, off).
void RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register scratch,
- RAStatus ra_status,
- SaveFPRegsMode save_fp,
+ Register object, int offset, Register value, Register scratch,
+ RAStatus ra_status, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK,
PointersToHereCheck pointers_to_here_check_for_value =
@@ -444,24 +985,14 @@ class MacroAssembler: public Assembler {
// As above, but the offset has the tag presubtracted. For use with
// MemOperand(reg, off).
inline void RecordWriteContextSlot(
- Register context,
- int offset,
- Register value,
- Register scratch,
- RAStatus ra_status,
- SaveFPRegsMode save_fp,
+ Register context, int offset, Register value, Register scratch,
+ RAStatus ra_status, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK,
PointersToHereCheck pointers_to_here_check_for_value =
kPointersToHereMaybeInteresting) {
- RecordWriteField(context,
- offset + kHeapObjectTag,
- value,
- scratch,
- ra_status,
- save_fp,
- remembered_set_action,
- smi_check,
+ RecordWriteField(context, offset + kHeapObjectTag, value, scratch,
+ ra_status, save_fp, remembered_set_action, smi_check,
pointers_to_here_check_for_value);
}
@@ -470,36 +1001,26 @@ class MacroAssembler: public Assembler {
void RecordWriteCodeEntryField(Register js_function, Register code_entry,
Register scratch);
- void RecordWriteForMap(
- Register object,
- Register map,
- Register dst,
- RAStatus ra_status,
- SaveFPRegsMode save_fp);
+ void RecordWriteForMap(Register object, Register map, Register dst,
+ RAStatus ra_status, SaveFPRegsMode save_fp);
// For a given |object| notify the garbage collector that the slot |address|
// has been written. |value| is the object being stored. The value and
// address registers are clobbered by the operation.
void RecordWrite(
- Register object,
- Register address,
- Register value,
- RAStatus ra_status,
+ Register object, Register address, Register value, RAStatus ra_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK,
PointersToHereCheck pointers_to_here_check_for_value =
kPointersToHereMaybeInteresting);
-
// ---------------------------------------------------------------------------
// Inline caching support.
void GetNumberHash(Register reg0, Register scratch);
- inline void MarkCode(NopMarkerTypes type) {
- nop(type);
- }
+ inline void MarkCode(NopMarkerTypes type) { nop(type); }
// Check if the given instruction is a 'type' marker.
// i.e. check if it is a sll zero_reg, zero_reg, <type> (referenced as
@@ -510,7 +1031,6 @@ class MacroAssembler: public Assembler {
return IsNop(instr, type);
}
-
static inline int GetCodeMarker(Instr instr) {
uint32_t opcode = ((instr & kOpcodeMask));
uint32_t rt = ((instr & kRtFieldMask) >> kRtShift);
@@ -519,9 +1039,9 @@ class MacroAssembler: public Assembler {
// Return <n> if we have a sll zero_reg, zero_reg, n
// else return -1.
- bool sllzz = (opcode == SLL &&
- rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
- rs == static_cast<uint32_t>(ToNumber(zero_reg)));
+ bool sllzz =
+ (opcode == SLL && rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
+ rs == static_cast<uint32_t>(ToNumber(zero_reg)));
int type =
(sllzz && FIRST_IC_MARKER <= sa && sa < LAST_CODE_MARKER) ? sa : -1;
DCHECK((type == -1) ||
@@ -529,8 +1049,6 @@ class MacroAssembler: public Assembler {
return type;
}
-
-
// ---------------------------------------------------------------------------
// Allocation support.
@@ -541,38 +1059,20 @@ class MacroAssembler: public Assembler {
// tag_allocated_object is true the result is tagged as as a heap object.
// All registers are clobbered also when control continues at the gc_required
// label.
- void Allocate(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
+ void Allocate(int object_size, Register result, Register scratch1,
+ Register scratch2, Label* gc_required, AllocationFlags flags);
void Allocate(Register object_size, Register result, Register result_new,
Register scratch, Label* gc_required, AllocationFlags flags);
- // FastAllocate is right now only used for folded allocations. It just
- // increments the top pointer without checking against limit. This can only
- // be done if it was proved earlier that the allocation will succeed.
- void FastAllocate(int object_size, Register result, Register scratch1,
- Register scratch2, AllocationFlags flags);
-
- void FastAllocate(Register object_size, Register result, Register result_new,
- Register scratch, AllocationFlags flags);
-
// Allocates a heap number or jumps to the gc_required label if the young
// space is full and a scavenge is needed. All registers are clobbered also
// when control continues at the gc_required label.
- void AllocateHeapNumber(Register result,
- Register scratch1,
- Register scratch2,
- Register heap_number_map,
- Label* gc_required,
+ void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
+ Register heap_number_map, Label* gc_required,
MutableMode mode = IMMUTABLE);
- void AllocateHeapNumberWithValue(Register result,
- FPURegister value,
- Register scratch1,
- Register scratch2,
+ void AllocateHeapNumberWithValue(Register result, FPURegister value,
+ Register scratch1, Register scratch2,
Label* gc_required);
// Allocate and initialize a JSValue wrapper with the specified {constructor}
@@ -581,212 +1081,16 @@ class MacroAssembler: public Assembler {
Register scratch1, Register scratch2,
Label* gc_required);
- // ---------------------------------------------------------------------------
- // Instruction macros.
-
-#define DEFINE_INSTRUCTION(instr) \
- void instr(Register rd, Register rs, const Operand& rt); \
- void instr(Register rd, Register rs, Register rt) { \
- instr(rd, rs, Operand(rt)); \
- } \
- void instr(Register rs, Register rt, int32_t j) { \
- instr(rs, rt, Operand(j)); \
- }
-
-#define DEFINE_INSTRUCTION2(instr) \
- void instr(Register rs, const Operand& rt); \
- void instr(Register rs, Register rt) { \
- instr(rs, Operand(rt)); \
- } \
- void instr(Register rs, int32_t j) { \
- instr(rs, Operand(j)); \
- }
-
-#define DEFINE_INSTRUCTION3(instr) \
- void instr(Register rd_hi, Register rd_lo, Register rs, const Operand& rt); \
- void instr(Register rd_hi, Register rd_lo, Register rs, Register rt) { \
- instr(rd_hi, rd_lo, rs, Operand(rt)); \
- } \
- void instr(Register rd_hi, Register rd_lo, Register rs, int32_t j) { \
- instr(rd_hi, rd_lo, rs, Operand(j)); \
- }
-
- DEFINE_INSTRUCTION(Addu);
- DEFINE_INSTRUCTION(Subu);
- DEFINE_INSTRUCTION(Mul);
- DEFINE_INSTRUCTION(Div);
- DEFINE_INSTRUCTION(Divu);
- DEFINE_INSTRUCTION(Mod);
- DEFINE_INSTRUCTION(Modu);
- DEFINE_INSTRUCTION(Mulh);
- DEFINE_INSTRUCTION2(Mult);
- DEFINE_INSTRUCTION(Mulhu);
- DEFINE_INSTRUCTION2(Multu);
- DEFINE_INSTRUCTION2(Div);
- DEFINE_INSTRUCTION2(Divu);
-
- DEFINE_INSTRUCTION3(Div);
- DEFINE_INSTRUCTION3(Mul);
- DEFINE_INSTRUCTION3(Mulu);
-
- DEFINE_INSTRUCTION(And);
- DEFINE_INSTRUCTION(Or);
- DEFINE_INSTRUCTION(Xor);
- DEFINE_INSTRUCTION(Nor);
- DEFINE_INSTRUCTION2(Neg);
-
- DEFINE_INSTRUCTION(Slt);
- DEFINE_INSTRUCTION(Sltu);
-
- // MIPS32 R2 instruction macro.
- DEFINE_INSTRUCTION(Ror);
-
-#undef DEFINE_INSTRUCTION
-#undef DEFINE_INSTRUCTION2
-#undef DEFINE_INSTRUCTION3
-
- // Load Scaled Address instructions. Parameter sa (shift argument) must be
- // between [1, 31] (inclusive). On pre-r6 architectures the scratch register
- // may be clobbered.
- void Lsa(Register rd, Register rs, Register rt, uint8_t sa,
- Register scratch = at);
-
void Pref(int32_t hint, const MemOperand& rs);
+ void PushObject(Handle<Object> handle);
- // ---------------------------------------------------------------------------
- // Pseudo-instructions.
-
- // Change endianness
- void ByteSwapSigned(Register dest, Register src, int operand_size);
- void ByteSwapUnsigned(Register dest, Register src, int operand_size);
-
- void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
-
- void Ulh(Register rd, const MemOperand& rs);
- void Ulhu(Register rd, const MemOperand& rs);
- void Ush(Register rd, const MemOperand& rs, Register scratch);
-
- void Ulw(Register rd, const MemOperand& rs);
- void Usw(Register rd, const MemOperand& rs);
-
- void Ulwc1(FPURegister fd, const MemOperand& rs, Register scratch);
- void Uswc1(FPURegister fd, const MemOperand& rs, Register scratch);
-
- void Uldc1(FPURegister fd, const MemOperand& rs, Register scratch);
- void Usdc1(FPURegister fd, const MemOperand& rs, Register scratch);
-
- void Ldc1(FPURegister fd, const MemOperand& src);
- void Sdc1(FPURegister fs, const MemOperand& dst);
-
- // Load int32 in the rd register.
- void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
- inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
- li(rd, Operand(j), mode);
- }
- void li(Register dst, Handle<Object> value, LiFlags mode = OPTIMIZE_SIZE);
-
- // Push multiple registers on the stack.
- // Registers are saved in numerical order, with higher numbered registers
- // saved in higher memory addresses.
- void MultiPush(RegList regs);
void MultiPushReversed(RegList regs);
-
- void MultiPushFPU(RegList regs);
void MultiPushReversedFPU(RegList regs);
- void push(Register src) {
- Addu(sp, sp, Operand(-kPointerSize));
- sw(src, MemOperand(sp, 0));
- }
- void Push(Register src) { push(src); }
-
- // Push a handle.
- void Push(Handle<Object> handle);
- void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
-
- // Push two registers. Pushes leftmost register first (to highest address).
- void Push(Register src1, Register src2) {
- Subu(sp, sp, Operand(2 * kPointerSize));
- sw(src1, MemOperand(sp, 1 * kPointerSize));
- sw(src2, MemOperand(sp, 0 * kPointerSize));
- }
-
- // Push three registers. Pushes leftmost register first (to highest address).
- void Push(Register src1, Register src2, Register src3) {
- Subu(sp, sp, Operand(3 * kPointerSize));
- sw(src1, MemOperand(sp, 2 * kPointerSize));
- sw(src2, MemOperand(sp, 1 * kPointerSize));
- sw(src3, MemOperand(sp, 0 * kPointerSize));
- }
-
- // Push four registers. Pushes leftmost register first (to highest address).
- void Push(Register src1, Register src2, Register src3, Register src4) {
- Subu(sp, sp, Operand(4 * kPointerSize));
- sw(src1, MemOperand(sp, 3 * kPointerSize));
- sw(src2, MemOperand(sp, 2 * kPointerSize));
- sw(src3, MemOperand(sp, 1 * kPointerSize));
- sw(src4, MemOperand(sp, 0 * kPointerSize));
- }
-
- // Push five registers. Pushes leftmost register first (to highest address).
- void Push(Register src1, Register src2, Register src3, Register src4,
- Register src5) {
- Subu(sp, sp, Operand(5 * kPointerSize));
- sw(src1, MemOperand(sp, 4 * kPointerSize));
- sw(src2, MemOperand(sp, 3 * kPointerSize));
- sw(src3, MemOperand(sp, 2 * kPointerSize));
- sw(src4, MemOperand(sp, 1 * kPointerSize));
- sw(src5, MemOperand(sp, 0 * kPointerSize));
- }
-
- void Push(Register src, Condition cond, Register tst1, Register tst2) {
- // Since we don't have conditional execution we use a Branch.
- Branch(3, cond, tst1, Operand(tst2));
- Subu(sp, sp, Operand(kPointerSize));
- sw(src, MemOperand(sp, 0));
- }
-
- // Pops multiple values from the stack and load them in the
- // registers specified in regs. Pop order is the opposite as in MultiPush.
- void MultiPop(RegList regs);
void MultiPopReversed(RegList regs);
-
- void MultiPopFPU(RegList regs);
void MultiPopReversedFPU(RegList regs);
- void pop(Register dst) {
- lw(dst, MemOperand(sp, 0));
- Addu(sp, sp, Operand(kPointerSize));
- }
- void Pop(Register dst) { pop(dst); }
-
- // Pop two registers. Pops rightmost register first (from lower address).
- void Pop(Register src1, Register src2) {
- DCHECK(!src1.is(src2));
- lw(src2, MemOperand(sp, 0 * kPointerSize));
- lw(src1, MemOperand(sp, 1 * kPointerSize));
- Addu(sp, sp, 2 * kPointerSize);
- }
-
- // Pop three registers. Pops rightmost register first (from lower address).
- void Pop(Register src1, Register src2, Register src3) {
- lw(src3, MemOperand(sp, 0 * kPointerSize));
- lw(src2, MemOperand(sp, 1 * kPointerSize));
- lw(src1, MemOperand(sp, 2 * kPointerSize));
- Addu(sp, sp, 3 * kPointerSize);
- }
-
- void Pop(uint32_t count = 1) {
- Addu(sp, sp, Operand(count * kPointerSize));
- }
-
- // Push a fixed frame, consisting of ra, fp.
- void PushCommonFrame(Register marker_reg = no_reg);
-
- // Push a standard frame, consisting of ra, fp, context and JS function.
- void PushStandardFrame(Register function_reg);
-
void PopCommonFrame(Register marker_reg = no_reg);
// Push and pop the registers that can hold pointers, as defined by the
@@ -800,147 +1104,21 @@ class MacroAssembler: public Assembler {
// into register dst.
void LoadFromSafepointRegisterSlot(Register dst, Register src);
- // MIPS32 R2 instruction macro.
- void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
- void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
- void Seb(Register rd, Register rt);
- void Seh(Register rd, Register rt);
- void Neg_s(FPURegister fd, FPURegister fs);
- void Neg_d(FPURegister fd, FPURegister fs);
-
- // MIPS32 R6 instruction macros.
- void Bovc(Register rt, Register rs, Label* L);
- void Bnvc(Register rt, Register rs, Label* L);
-
- // Int64Lowering instructions
- void AddPair(Register dst_low, Register dst_high, Register left_low,
- Register left_high, Register right_low, Register right_high);
-
- void SubPair(Register dst_low, Register dst_high, Register left_low,
- Register left_high, Register right_low, Register right_high);
-
- void ShlPair(Register dst_low, Register dst_high, Register src_low,
- Register src_high, Register shift);
-
- void ShlPair(Register dst_low, Register dst_high, Register src_low,
- Register src_high, uint32_t shift);
-
- void ShrPair(Register dst_low, Register dst_high, Register src_low,
- Register src_high, Register shift);
-
- void ShrPair(Register dst_low, Register dst_high, Register src_low,
- Register src_high, uint32_t shift);
-
- void SarPair(Register dst_low, Register dst_high, Register src_low,
- Register src_high, Register shift);
-
- void SarPair(Register dst_low, Register dst_high, Register src_low,
- Register src_high, uint32_t shift);
-
- // ---------------------------------------------------------------------------
- // FPU macros. These do not handle special cases like NaN or +- inf.
-
- // Convert unsigned word to double.
- void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
-
- // Convert single to unsigned word.
- void Trunc_uw_s(FPURegister fd, FPURegister fs, FPURegister scratch);
- void Trunc_uw_s(FPURegister fd, Register rs, FPURegister scratch);
-
- // Convert double to unsigned word.
- void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
- void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
-
void Trunc_w_d(FPURegister fd, FPURegister fs);
void Round_w_d(FPURegister fd, FPURegister fs);
void Floor_w_d(FPURegister fd, FPURegister fs);
void Ceil_w_d(FPURegister fd, FPURegister fs);
- // FP32 mode: Move the general purpose register into
- // the high part of the double-register pair.
- // FP64 mode: Move the general-purpose register into
- // the higher 32 bits of the 64-bit coprocessor register,
- // while leaving the low bits unchanged.
- void Mthc1(Register rt, FPURegister fs);
-
- // FP32 mode: move the high part of the double-register pair into
- // general purpose register.
- // FP64 mode: Move the higher 32 bits of the 64-bit coprocessor register into
- // general-purpose register.
- void Mfhc1(Register rt, FPURegister fs);
-
- void Madd_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
- FPURegister scratch);
- void Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
- FPURegister scratch);
- void Msub_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
- FPURegister scratch);
- void Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
- FPURegister scratch);
-
- // Wrapper functions for the different cmp/branch types.
- inline void BranchF32(Label* target, Label* nan, Condition cc,
- FPURegister cmp1, FPURegister cmp2,
- BranchDelaySlot bd = PROTECT) {
- BranchFCommon(S, target, nan, cc, cmp1, cmp2, bd);
- }
-
- inline void BranchF64(Label* target, Label* nan, Condition cc,
- FPURegister cmp1, FPURegister cmp2,
- BranchDelaySlot bd = PROTECT) {
- BranchFCommon(D, target, nan, cc, cmp1, cmp2, bd);
- }
-
- // Alternate (inline) version for better readability with USE_DELAY_SLOT.
- inline void BranchF64(BranchDelaySlot bd, Label* target, Label* nan,
- Condition cc, FPURegister cmp1, FPURegister cmp2) {
- BranchF64(target, nan, cc, cmp1, cmp2, bd);
- }
-
- inline void BranchF32(BranchDelaySlot bd, Label* target, Label* nan,
- Condition cc, FPURegister cmp1, FPURegister cmp2) {
- BranchF32(target, nan, cc, cmp1, cmp2, bd);
- }
-
- // Alias functions for backward compatibility.
- inline void BranchF(Label* target, Label* nan, Condition cc, FPURegister cmp1,
- FPURegister cmp2, BranchDelaySlot bd = PROTECT) {
- BranchF64(target, nan, cc, cmp1, cmp2, bd);
- }
-
- inline void BranchF(BranchDelaySlot bd, Label* target, Label* nan,
- Condition cc, FPURegister cmp1, FPURegister cmp2) {
- BranchF64(bd, target, nan, cc, cmp1, cmp2);
- }
-
// Truncates a double using a specific rounding mode, and writes the value
// to the result register.
// The except_flag will contain any exceptions caused by the instruction.
// If check_inexact is kDontCheckForInexactConversion, then the inexact
// exception is masked.
- void EmitFPUTruncate(FPURoundingMode rounding_mode,
- Register result,
- DoubleRegister double_input,
- Register scratch,
- DoubleRegister double_scratch,
- Register except_flag,
- CheckForInexactConversion check_inexact
- = kDontCheckForInexactConversion);
-
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
- // succeeds, otherwise falls through if result is saturated. On return
- // 'result' either holds answer, or is clobbered on fall through.
- //
- // Only public for the test code in test-code-stubs-arm.cc.
- void TryInlineTruncateDoubleToI(Register result,
- DoubleRegister input,
- Label* done);
-
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
- // Exits with 'result' holding the answer.
- void TruncateDoubleToI(Register result, DoubleRegister double_input);
+ void EmitFPUTruncate(
+ FPURoundingMode rounding_mode, Register result,
+ DoubleRegister double_input, Register scratch,
+ DoubleRegister double_scratch, Register except_flag,
+ CheckForInexactConversion check_inexact = kDontCheckForInexactConversion);
// Performs a truncating conversion of a heap number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
@@ -1005,9 +1183,6 @@ class MacroAssembler: public Assembler {
bool restore_context, bool do_return = NO_EMIT_RETURN,
bool argument_count_is_length = false);
- // Get the actual activation frame alignment for target environment.
- static int ActivationFrameAlignment();
-
// Make sure the stack is aligned. Only emits code in debug mode.
void AssertStackIsAligned();
@@ -1031,24 +1206,9 @@ class MacroAssembler: public Assembler {
Register map,
Register scratch);
- void InitializeRootRegister() {
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(isolate());
- li(kRootRegister, Operand(roots_array_start));
- }
-
// -------------------------------------------------------------------------
// JavaScript invokes.
- // Removes current frame and its arguments from the stack preserving
- // the arguments and a return address pushed to the stack for the next call.
- // Both |callee_args_count| and |caller_args_count_reg| do not include
- // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
- // is trashed.
- void PrepareForTailCall(const ParameterCount& callee_args_count,
- Register caller_args_count_reg, Register scratch0,
- Register scratch1);
-
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
const ParameterCount& expected,
@@ -1155,9 +1315,6 @@ class MacroAssembler: public Assembler {
Label* fail,
SmiCheckType smi_check_type);
- // If the value is a NaN, canonicalize the value else, do nothing.
- void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
-
// Get value of the weak cell.
void GetWeakValue(Register value, Handle<WeakCell> cell);
@@ -1202,106 +1359,13 @@ class MacroAssembler: public Assembler {
Register scratch1);
// -------------------------------------------------------------------------
- // Overflow handling functions.
- // Usage: first call the appropriate arithmetic function, then call one of the
- // jump functions with the overflow_dst register as the second parameter.
-
- inline void AddBranchOvf(Register dst, Register left, const Operand& right,
- Label* overflow_label, Register scratch = at) {
- AddBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
- }
-
- inline void AddBranchNoOvf(Register dst, Register left, const Operand& right,
- Label* no_overflow_label, Register scratch = at) {
- AddBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
- }
-
- void AddBranchOvf(Register dst, Register left, const Operand& right,
- Label* overflow_label, Label* no_overflow_label,
- Register scratch = at);
-
- void AddBranchOvf(Register dst, Register left, Register right,
- Label* overflow_label, Label* no_overflow_label,
- Register scratch = at);
-
-
- inline void SubBranchOvf(Register dst, Register left, const Operand& right,
- Label* overflow_label, Register scratch = at) {
- SubBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
- }
-
- inline void SubBranchNoOvf(Register dst, Register left, const Operand& right,
- Label* no_overflow_label, Register scratch = at) {
- SubBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
- }
-
- void SubBranchOvf(Register dst, Register left, const Operand& right,
- Label* overflow_label, Label* no_overflow_label,
- Register scratch = at);
-
- void SubBranchOvf(Register dst, Register left, Register right,
- Label* overflow_label, Label* no_overflow_label,
- Register scratch = at);
-
- inline void MulBranchOvf(Register dst, Register left, const Operand& right,
- Label* overflow_label, Register scratch = at) {
- MulBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
- }
-
- inline void MulBranchNoOvf(Register dst, Register left, const Operand& right,
- Label* no_overflow_label, Register scratch = at) {
- MulBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
- }
-
- void MulBranchOvf(Register dst, Register left, const Operand& right,
- Label* overflow_label, Label* no_overflow_label,
- Register scratch = at);
-
- void MulBranchOvf(Register dst, Register left, Register right,
- Label* overflow_label, Label* no_overflow_label,
- Register scratch = at);
-
- // Perform a floating-point min or max operation with the
- // (IEEE-754-compatible) semantics of MIPS32's Release 6 MIN.fmt/MAX.fmt.
- // Some cases, typically NaNs or +/-0.0, are expected to be rare and are
- // handled in out-of-line code. The specific behaviour depends on supported
- // instructions.
- //
- // These functions assume (and assert) that !src1.is(src2). It is permitted
- // for the result to alias either input register.
- void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2,
- Label* out_of_line);
- void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2,
- Label* out_of_line);
- void Float64Max(DoubleRegister dst, DoubleRegister src1, DoubleRegister src2,
- Label* out_of_line);
- void Float64Min(DoubleRegister dst, DoubleRegister src1, DoubleRegister src2,
- Label* out_of_line);
-
- // Generate out-of-line cases for the macros above.
- void Float32MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
- void Float32MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
- void Float64MaxOutOfLine(DoubleRegister dst, DoubleRegister src1,
- DoubleRegister src2);
- void Float64MinOutOfLine(DoubleRegister dst, DoubleRegister src1,
- DoubleRegister src2);
-
- // -------------------------------------------------------------------------
// Runtime calls.
- // See comments at the beginning of CEntryStub::Generate.
- inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); }
-
- inline void PrepareCEntryFunction(const ExternalReference& ref) {
- li(a1, Operand(ref));
- }
-
#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Call a code stub.
void CallStub(CodeStub* stub,
- TypeFeedbackId ast_id = TypeFeedbackId::None(),
COND_ARGS);
// Tail call a code stub (jump).
@@ -1344,52 +1408,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid);
- int CalculateStackPassedWords(int num_reg_arguments,
- int num_double_arguments);
-
- // Before calling a C-function from generated code, align arguments on stack
- // and add space for the four mips argument slots.
- // After aligning the frame, non-register arguments must be stored on the
- // stack, after the argument-slots using helper: CFunctionArgumentOperand().
- // The argument count assumes all arguments are word sized.
- // Some compilers/platforms require the stack to be aligned when calling
- // C++ code.
- // Needs a scratch register to do some arithmetic. This register will be
- // trashed.
- void PrepareCallCFunction(int num_reg_arguments,
- int num_double_registers,
- Register scratch);
- void PrepareCallCFunction(int num_reg_arguments,
- Register scratch);
-
- // Arguments 1-4 are placed in registers a0 thru a3 respectively.
- // Arguments 5..n are stored to stack using following:
- // sw(t0, CFunctionArgumentOperand(5));
-
- // Calls a C function and cleans up the space for arguments allocated
- // by PrepareCallCFunction. The called function is not allowed to trigger a
- // garbage collection, since that might move the code and invalidate the
- // return address (unless this is somehow accounted for by the called
- // function).
- void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, int num_arguments);
- void CallCFunction(ExternalReference function,
- int num_reg_arguments,
- int num_double_arguments);
- void CallCFunction(Register function,
- int num_reg_arguments,
- int num_double_arguments);
- void MovFromFloatResult(DoubleRegister dst);
- void MovFromFloatParameter(DoubleRegister dst);
-
- // There are two ways of passing double arguments on MIPS, depending on
- // whether soft or hard floating point ABI is used. These functions
- // abstract parameter passing for the three different ways we call
- // C functions from generated code.
- void MovToFloatParameter(DoubleRegister src);
- void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
- void MovToFloatResult(DoubleRegister src);
-
// Jump to the builtin routine.
void JumpToExternalReference(const ExternalReference& builtin,
BranchDelaySlot bd = PROTECT,
@@ -1401,11 +1419,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
const char* name;
};
- Handle<Object> CodeObject() {
- DCHECK(!code_object_.is_null());
- return code_object_;
- }
-
// Emit code for a truncating division by a constant. The dividend register is
// unchanged and at gets clobbered. Dividend and result must be different.
void TruncatingDiv(Register result, Register dividend, int32_t divisor);
@@ -1420,27 +1433,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2);
-
- // -------------------------------------------------------------------------
- // Debugging.
-
- // Calls Abort(msg) if the condition cc is not satisfied.
- // Use --debug_code to enable.
- void Assert(Condition cc, BailoutReason reason, Register rs, Operand rt);
-
- // Like Assert(), but always enabled.
- void Check(Condition cc, BailoutReason reason, Register rs, Operand rt);
-
- // Print a message to stdout and abort execution.
- void Abort(BailoutReason msg);
-
- // Verify restrictions about code generated in stubs.
- void set_generating_stub(bool value) { generating_stub_ = value; }
- bool generating_stub() { return generating_stub_; }
- void set_has_frame(bool value) { has_frame_ = value; }
- bool has_frame() { return has_frame_; }
- inline bool AllowThisStubCall(CodeStub* stub);
-
// ---------------------------------------------------------------------------
// Number utilities.
@@ -1491,14 +1483,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
mov(dst, at);
}
- void SmiUntag(Register reg) {
- sra(reg, reg, kSmiTagSize);
- }
-
- void SmiUntag(Register dst, Register src) {
- sra(dst, src, kSmiTagSize);
- }
-
// Test if the register contains a smi.
inline void SmiTst(Register value, Register scratch) {
And(scratch, value, Operand(kSmiTagMask));
@@ -1511,12 +1495,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Souce and destination can be the same register.
void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
- // Jump the register contains a smi.
- void JumpIfSmi(Register value,
- Label* smi_label,
- Register scratch = at,
- BranchDelaySlot bd = PROTECT);
-
// Jump if the register contains a non-smi.
void JumpIfNotSmi(Register value,
Label* not_smi_label,
@@ -1532,6 +1510,9 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void AssertNotSmi(Register object);
void AssertSmi(Register object);
+ // Abort execution if argument is not a FixedArray, enabled via --debug-code.
+ void AssertFixedArray(Register object);
+
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
@@ -1539,9 +1520,9 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// enabled via --debug-code.
void AssertBoundFunction(Register object);
- // Abort execution if argument is not a JSGeneratorObject,
+ // Abort execution if argument is not a JSGeneratorObject (or subclass),
// enabled via --debug-code.
- void AssertGeneratorObject(Register object, Register flags);
+ void AssertGeneratorObject(Register object);
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
@@ -1636,18 +1617,9 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
DecodeField<Field>(reg, reg);
}
- // Generates function and stub prologue code.
- void StubPrologue(StackFrame::Type type);
- void Prologue(bool code_pre_aging);
-
// Load the type feedback vector from a JavaScript frame.
void EmitLoadFeedbackVector(Register vector);
- // Activation support.
- void EnterFrame(StackFrame::Type type);
- void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
- void LeaveFrame(StackFrame::Type type);
-
void EnterBuiltinFrame(Register context, Register target, Register argc);
void LeaveBuiltinFrame(Register context, Register target, Register argc);
@@ -1664,48 +1636,7 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
Register scratch_reg,
Label* no_memento_found);
- bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; }
-
private:
- void CallCFunctionHelper(Register function_base, int16_t function_offset,
- int num_reg_arguments, int num_double_arguments);
-
- inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
- inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
- void BranchShortHelperR6(int32_t offset, Label* L);
- void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);
- bool BranchShortHelperR6(int32_t offset, Label* L, Condition cond,
- Register rs, const Operand& rt);
- bool BranchShortHelper(int16_t offset, Label* L, Condition cond, Register rs,
- const Operand& rt, BranchDelaySlot bdslot);
- bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
- const Operand& rt, BranchDelaySlot bdslot);
-
- void BranchAndLinkShortHelperR6(int32_t offset, Label* L);
- void BranchAndLinkShortHelper(int16_t offset, Label* L,
- BranchDelaySlot bdslot);
- void BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot = PROTECT);
- void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
- bool BranchAndLinkShortHelperR6(int32_t offset, Label* L, Condition cond,
- Register rs, const Operand& rt);
- bool BranchAndLinkShortHelper(int16_t offset, Label* L, Condition cond,
- Register rs, const Operand& rt,
- BranchDelaySlot bdslot);
- bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
- Register rs, const Operand& rt,
- BranchDelaySlot bdslot);
- void BranchLong(Label* L, BranchDelaySlot bdslot);
- void BranchAndLinkLong(Label* L, BranchDelaySlot bdslot);
-
- // Common implementation of BranchF functions for the different formats.
- void BranchFCommon(SecondaryField sizeField, Label* target, Label* nan,
- Condition cc, FPURegister cmp1, FPURegister cmp2,
- BranchDelaySlot bd = PROTECT);
-
- void BranchShortF(SecondaryField sizeField, Label* target, Condition cc,
- FPURegister cmp1, FPURegister cmp2,
- BranchDelaySlot bd = PROTECT);
-
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
@@ -1731,19 +1662,11 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
MemOperand SafepointRegisterSlot(Register reg);
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
- bool generating_stub_;
- bool has_frame_;
- bool has_double_zero_reg_set_;
- Isolate* isolate_;
- // This handle will be patched with the code object on installation.
- Handle<Object> code_object_;
-
// Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
friend class StandardFrame;
};
-
// The code patcher is used to patch (typically) small parts of code e.g. for
// debugging and other types of instrumentation. When using the code patcher
// the exact number of bytes specified must be emitted. It is not legal to emit
@@ -1781,7 +1704,7 @@ class CodePatcher {
};
template <typename Func>
-void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
+void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
Func GetLabelFunction) {
if (kArchVariant >= kMips32r6) {
BlockTrampolinePoolFor(case_count + kSwitchTablePrologueSize);
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index 38816e9e0d..c2e20a5f77 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -907,7 +907,8 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
registers_[i] = 0;
}
for (int i = 0; i < kNumFPURegisters; i++) {
- FPUregisters_[i] = 0;
+ FPUregisters_[2 * i] = 0;
+ FPUregisters_[2 * i + 1] = 0; // upper part for MSA ASE
}
if (IsMipsArchVariant(kMips32r6)) {
FCSR_ = kFCSRNaN2008FlagMask;
@@ -1063,7 +1064,7 @@ void Simulator::set_dw_register(int reg, const int* dbl) {
void Simulator::set_fpu_register(int fpureg, int64_t value) {
DCHECK(IsFp64Mode());
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- FPUregisters_[fpureg] = value;
+ FPUregisters_[fpureg * 2] = value;
}
@@ -1071,7 +1072,7 @@ void Simulator::set_fpu_register_word(int fpureg, int32_t value) {
// Set ONLY lower 32-bits, leaving upper bits untouched.
// TODO(plind): big endian issue.
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- int32_t *pword = reinterpret_cast<int32_t*>(&FPUregisters_[fpureg]);
+ int32_t* pword = reinterpret_cast<int32_t*>(&FPUregisters_[fpureg * 2]);
*pword = value;
}
@@ -1080,21 +1081,22 @@ void Simulator::set_fpu_register_hi_word(int fpureg, int32_t value) {
// Set ONLY upper 32-bits, leaving lower bits untouched.
// TODO(plind): big endian issue.
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- int32_t *phiword = (reinterpret_cast<int32_t*>(&FPUregisters_[fpureg])) + 1;
+ int32_t* phiword =
+ (reinterpret_cast<int32_t*>(&FPUregisters_[fpureg * 2])) + 1;
*phiword = value;
}
void Simulator::set_fpu_register_float(int fpureg, float value) {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- *bit_cast<float*>(&FPUregisters_[fpureg]) = value;
+ *bit_cast<float*>(&FPUregisters_[fpureg * 2]) = value;
}
void Simulator::set_fpu_register_double(int fpureg, double value) {
if (IsFp64Mode()) {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- *bit_cast<double*>(&FPUregisters_[fpureg]) = value;
+ *bit_cast<double*>(&FPUregisters_[fpureg * 2]) = value;
} else {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
int64_t i64 = bit_cast<int64_t>(value);
@@ -1132,7 +1134,7 @@ double Simulator::get_double_from_register_pair(int reg) {
int64_t Simulator::get_fpu_register(int fpureg) const {
if (IsFp64Mode()) {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- return FPUregisters_[fpureg];
+ return FPUregisters_[fpureg * 2];
} else {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
uint64_t i64;
@@ -1145,32 +1147,32 @@ int64_t Simulator::get_fpu_register(int fpureg) const {
int32_t Simulator::get_fpu_register_word(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- return static_cast<int32_t>(FPUregisters_[fpureg] & 0xffffffff);
+ return static_cast<int32_t>(FPUregisters_[fpureg * 2] & 0xffffffff);
}
int32_t Simulator::get_fpu_register_signed_word(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- return static_cast<int32_t>(FPUregisters_[fpureg] & 0xffffffff);
+ return static_cast<int32_t>(FPUregisters_[fpureg * 2] & 0xffffffff);
}
int32_t Simulator::get_fpu_register_hi_word(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- return static_cast<int32_t>((FPUregisters_[fpureg] >> 32) & 0xffffffff);
+ return static_cast<int32_t>((FPUregisters_[fpureg * 2] >> 32) & 0xffffffff);
}
float Simulator::get_fpu_register_float(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- return *bit_cast<float*>(const_cast<int64_t*>(&FPUregisters_[fpureg]));
+ return *bit_cast<float*>(const_cast<int64_t*>(&FPUregisters_[fpureg * 2]));
}
double Simulator::get_fpu_register_double(int fpureg) const {
if (IsFp64Mode()) {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- return *bit_cast<double*>(&FPUregisters_[fpureg]);
+ return *bit_cast<double*>(&FPUregisters_[fpureg * 2]);
} else {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
int64_t i64;
@@ -1180,6 +1182,17 @@ double Simulator::get_fpu_register_double(int fpureg) const {
}
}
+template <typename T>
+void Simulator::get_msa_register(int wreg, T* value) {
+ DCHECK((wreg >= 0) && (wreg < kNumMSARegisters));
+ memcpy(value, FPUregisters_ + wreg * 2, kSimd128Size);
+}
+
+template <typename T>
+void Simulator::set_msa_register(int wreg, const T* value) {
+ DCHECK((wreg >= 0) && (wreg < kNumMSARegisters));
+ memcpy(FPUregisters_ + wreg * 2, value, kSimd128Size);
+}
// Runtime FP routines take up to two double arguments and zero
// or one integer arguments. All are constructed here,
@@ -1744,6 +1757,96 @@ void Simulator::TraceRegWr(int64_t value, TraceType t) {
}
}
+template <typename T>
+void Simulator::TraceMSARegWr(T* value, TraceType t) {
+ if (::v8::internal::FLAG_trace_sim) {
+ union {
+ uint8_t b[16];
+ uint16_t h[8];
+ uint32_t w[4];
+ uint64_t d[2];
+ float f[4];
+ double df[2];
+ } v;
+ memcpy(v.b, value, kSimd128Size);
+ switch (t) {
+ case BYTE:
+ SNPrintF(trace_buf_,
+ "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64 ")",
+ v.d[0], v.d[1], icount_);
+ break;
+ case HALF:
+ SNPrintF(trace_buf_,
+ "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64 ")",
+ v.d[0], v.d[1], icount_);
+ break;
+ case WORD:
+ SNPrintF(trace_buf_,
+ "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64
+ ") int32[0..3]:%" PRId32 " %" PRId32 " %" PRId32
+ " %" PRId32,
+ v.d[0], v.d[1], icount_, v.w[0], v.w[1], v.w[2], v.w[3]);
+ break;
+ case DWORD:
+ SNPrintF(trace_buf_,
+ "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64 ")",
+ v.d[0], v.d[1], icount_);
+ break;
+ case FLOAT:
+ SNPrintF(trace_buf_,
+ "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64
+ ") flt[0..3]:%e %e %e %e",
+ v.d[0], v.d[1], icount_, v.f[0], v.f[1], v.f[2], v.f[3]);
+ break;
+ case DOUBLE:
+ SNPrintF(trace_buf_,
+ "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64
+ ") dbl[0..1]:%e %e",
+ v.d[0], v.d[1], icount_, v.df[0], v.df[1]);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+template <typename T>
+void Simulator::TraceMSARegWr(T* value) {
+ if (::v8::internal::FLAG_trace_sim) {
+ union {
+ uint8_t b[kMSALanesByte];
+ uint16_t h[kMSALanesHalf];
+ uint32_t w[kMSALanesWord];
+ uint64_t d[kMSALanesDword];
+ float f[kMSALanesWord];
+ double df[kMSALanesDword];
+ } v;
+ memcpy(v.b, value, kMSALanesByte);
+
+ if (std::is_same<T, int32_t>::value) {
+ SNPrintF(trace_buf_,
+ "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64
+ ") int32[0..3]:%" PRId32 " %" PRId32 " %" PRId32
+ " %" PRId32,
+ v.d[0], v.d[1], icount_, v.w[0], v.w[1], v.w[2], v.w[3]);
+ } else if (std::is_same<T, float>::value) {
+ SNPrintF(trace_buf_,
+ "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64
+ ") flt[0..3]:%e %e %e %e",
+ v.d[0], v.d[1], icount_, v.f[0], v.f[1], v.f[2], v.f[3]);
+ } else if (std::is_same<T, double>::value) {
+ SNPrintF(trace_buf_,
+ "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64
+ ") dbl[0..1]:%e %e",
+ v.d[0], v.d[1], icount_, v.df[0], v.df[1]);
+ } else {
+ SNPrintF(trace_buf_,
+ "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64 ")",
+ v.d[0], v.d[1], icount_);
+ }
+ }
+}
+
// TODO(plind): consider making icount_ printing a flag option.
void Simulator::TraceMemRd(int32_t addr, int32_t value, TraceType t) {
if (::v8::internal::FLAG_trace_sim) {
@@ -3998,7 +4101,12 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
// Interpret sa field as 5-bit lsb of insert.
uint16_t lsb = sa();
uint16_t size = msb - lsb + 1;
- uint32_t mask = (1 << size) - 1;
+ uint32_t mask;
+ if (size < 32) {
+ mask = (1 << size) - 1;
+ } else {
+ mask = std::numeric_limits<uint32_t>::max();
+ }
alu_out = (rt_u() & ~(mask << lsb)) | ((rs_u() & mask) << lsb);
// Ins instr leaves result in Rt, rather than Rd.
SetResult(rt_reg(), alu_out);
@@ -4010,7 +4118,12 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
// Interpret sa field as 5-bit lsb of extract.
uint16_t lsb = sa();
uint16_t size = msb + 1;
- uint32_t mask = (1 << size) - 1;
+ uint32_t mask;
+ if (size < 32) {
+ mask = (1 << size) - 1;
+ } else {
+ mask = std::numeric_limits<uint32_t>::max();
+ }
alu_out = (rs_u() & (mask << lsb)) >> lsb;
SetResult(rt_reg(), alu_out);
break;
@@ -4116,6 +4229,730 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
}
}
+int Simulator::DecodeMsaDataFormat() {
+ int df = -1;
+ if (instr_.IsMSABranchInstr()) {
+ switch (instr_.RsFieldRaw()) {
+ case BZ_V:
+ case BNZ_V:
+ df = MSA_VECT;
+ break;
+ case BZ_B:
+ case BNZ_B:
+ df = MSA_BYTE;
+ break;
+ case BZ_H:
+ case BNZ_H:
+ df = MSA_HALF;
+ break;
+ case BZ_W:
+ case BNZ_W:
+ df = MSA_WORD;
+ break;
+ case BZ_D:
+ case BNZ_D:
+ df = MSA_DWORD;
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ int DF[] = {MSA_BYTE, MSA_HALF, MSA_WORD, MSA_DWORD};
+ switch (instr_.MSAMinorOpcodeField()) {
+ case kMsaMinorI5:
+ case kMsaMinorI10:
+ case kMsaMinor3R:
+ df = DF[instr_.Bits(22, 21)];
+ break;
+ case kMsaMinorMI10:
+ df = DF[instr_.Bits(1, 0)];
+ break;
+ case kMsaMinorBIT:
+ df = DF[instr_.MsaBitDf()];
+ break;
+ case kMsaMinorELM:
+ df = DF[instr_.MsaElmDf()];
+ break;
+ case kMsaMinor3RF: {
+ uint32_t opcode = instr_.InstructionBits() & kMsa3RFMask;
+ switch (opcode) {
+ case FEXDO:
+ case FTQ:
+ case MUL_Q:
+ case MADD_Q:
+ case MSUB_Q:
+ case MULR_Q:
+ case MADDR_Q:
+ case MSUBR_Q:
+ df = DF[1 + instr_.Bit(21)];
+ break;
+ default:
+ df = DF[2 + instr_.Bit(21)];
+ break;
+ }
+ } break;
+ case kMsaMinor2R:
+ df = DF[instr_.Bits(17, 16)];
+ break;
+ case kMsaMinor2RF:
+ df = DF[2 + instr_.Bit(16)];
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ return df;
+}
+
+void Simulator::DecodeTypeMsaI8() {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
+ uint32_t opcode = instr_.InstructionBits() & kMsaI8Mask;
+ int8_t i8 = instr_.MsaImm8Value();
+ msa_reg_t ws, wd;
+
+ switch (opcode) {
+ case ANDI_B:
+ get_msa_register(instr_.WsValue(), ws.b);
+ for (int i = 0; i < kMSALanesByte; i++) {
+ wd.b[i] = ws.b[i] & i8;
+ }
+ set_msa_register(instr_.WdValue(), wd.b);
+ TraceMSARegWr(wd.b);
+ break;
+ case ORI_B:
+ get_msa_register(instr_.WsValue(), ws.b);
+ for (int i = 0; i < kMSALanesByte; i++) {
+ wd.b[i] = ws.b[i] | i8;
+ }
+ set_msa_register(instr_.WdValue(), wd.b);
+ TraceMSARegWr(wd.b);
+ break;
+ case NORI_B:
+ get_msa_register(instr_.WsValue(), ws.b);
+ for (int i = 0; i < kMSALanesByte; i++) {
+ wd.b[i] = ~(ws.b[i] | i8);
+ }
+ set_msa_register(instr_.WdValue(), wd.b);
+ TraceMSARegWr(wd.b);
+ break;
+ case XORI_B:
+ get_msa_register(instr_.WsValue(), ws.b);
+ for (int i = 0; i < kMSALanesByte; i++) {
+ wd.b[i] = ws.b[i] ^ i8;
+ }
+ set_msa_register(instr_.WdValue(), wd.b);
+ TraceMSARegWr(wd.b);
+ break;
+ case BMNZI_B:
+ get_msa_register(instr_.WsValue(), ws.b);
+ get_msa_register(instr_.WdValue(), wd.b);
+ for (int i = 0; i < kMSALanesByte; i++) {
+ wd.b[i] = (ws.b[i] & i8) | (wd.b[i] & ~i8);
+ }
+ set_msa_register(instr_.WdValue(), wd.b);
+ TraceMSARegWr(wd.b);
+ break;
+ case BMZI_B:
+ get_msa_register(instr_.WsValue(), ws.b);
+ get_msa_register(instr_.WdValue(), wd.b);
+ for (int i = 0; i < kMSALanesByte; i++) {
+ wd.b[i] = (ws.b[i] & ~i8) | (wd.b[i] & i8);
+ }
+ set_msa_register(instr_.WdValue(), wd.b);
+ TraceMSARegWr(wd.b);
+ break;
+ case BSELI_B:
+ get_msa_register(instr_.WsValue(), ws.b);
+ get_msa_register(instr_.WdValue(), wd.b);
+ for (int i = 0; i < kMSALanesByte; i++) {
+ wd.b[i] = (ws.b[i] & ~wd.b[i]) | (wd.b[i] & i8);
+ }
+ set_msa_register(instr_.WdValue(), wd.b);
+ TraceMSARegWr(wd.b);
+ break;
+ case SHF_B:
+ get_msa_register(instr_.WsValue(), ws.b);
+ for (int i = 0; i < kMSALanesByte; i++) {
+ int j = i % 4;
+ int k = (i8 >> (2 * j)) & 0x3;
+ wd.b[i] = ws.b[i - j + k];
+ }
+ set_msa_register(instr_.WdValue(), wd.b);
+ TraceMSARegWr(wd.b);
+ break;
+ case SHF_H:
+ get_msa_register(instr_.WsValue(), ws.h);
+ for (int i = 0; i < kMSALanesHalf; i++) {
+ int j = i % 4;
+ int k = (i8 >> (2 * j)) & 0x3;
+ wd.h[i] = ws.h[i - j + k];
+ }
+ set_msa_register(instr_.WdValue(), wd.h);
+ TraceMSARegWr(wd.h);
+ break;
+ case SHF_W:
+ get_msa_register(instr_.WsValue(), ws.w);
+ for (int i = 0; i < kMSALanesWord; i++) {
+ int j = (i8 >> (2 * i)) & 0x3;
+ wd.w[i] = ws.w[j];
+ }
+ set_msa_register(instr_.WdValue(), wd.w);
+ TraceMSARegWr(wd.w);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+template <typename T>
+T Simulator::MsaI5InstrHelper(uint32_t opcode, T ws, int32_t i5) {
+ T res;
+ uint32_t ui5 = i5 & 0x1Fu;
+ uint64_t ws_u64 = static_cast<uint64_t>(ws);
+ uint64_t ui5_u64 = static_cast<uint64_t>(ui5);
+
+ switch (opcode) {
+ case ADDVI:
+ res = static_cast<T>(ws + ui5);
+ break;
+ case SUBVI:
+ res = static_cast<T>(ws - ui5);
+ break;
+ case MAXI_S:
+ res = static_cast<T>(Max(ws, static_cast<T>(i5)));
+ break;
+ case MINI_S:
+ res = static_cast<T>(Min(ws, static_cast<T>(i5)));
+ break;
+ case MAXI_U:
+ res = static_cast<T>(Max(ws_u64, ui5_u64));
+ break;
+ case MINI_U:
+ res = static_cast<T>(Min(ws_u64, ui5_u64));
+ break;
+ case CEQI:
+ res = static_cast<T>(!Compare(ws, static_cast<T>(i5)) ? -1ull : 0ull);
+ break;
+ case CLTI_S:
+ res = static_cast<T>((Compare(ws, static_cast<T>(i5)) == -1) ? -1ull
+ : 0ull);
+ break;
+ case CLTI_U:
+ res = static_cast<T>((Compare(ws_u64, ui5_u64) == -1) ? -1ull : 0ull);
+ break;
+ case CLEI_S:
+ res =
+ static_cast<T>((Compare(ws, static_cast<T>(i5)) != 1) ? -1ull : 0ull);
+ break;
+ case CLEI_U:
+ res = static_cast<T>((Compare(ws_u64, ui5_u64) != 1) ? -1ull : 0ull);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return res;
+}
+
+void Simulator::DecodeTypeMsaI5() {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
+ uint32_t opcode = instr_.InstructionBits() & kMsaI5Mask;
+ msa_reg_t ws, wd;
+
+ // sign extend 5bit value to int32_t
+ int32_t i5 = static_cast<int32_t>(instr_.MsaImm5Value() << 27) >> 27;
+
+#define MSA_I5_DF(elem, num_of_lanes) \
+ get_msa_register(instr_.WsValue(), ws.elem); \
+ for (int i = 0; i < num_of_lanes; i++) { \
+ wd.elem[i] = MsaI5InstrHelper(opcode, ws.elem[i], i5); \
+ } \
+ set_msa_register(instr_.WdValue(), wd.elem); \
+ TraceMSARegWr(wd.elem)
+
+ switch (DecodeMsaDataFormat()) {
+ case MSA_BYTE:
+ MSA_I5_DF(b, kMSALanesByte);
+ break;
+ case MSA_HALF:
+ MSA_I5_DF(h, kMSALanesHalf);
+ break;
+ case MSA_WORD:
+ MSA_I5_DF(w, kMSALanesWord);
+ break;
+ case MSA_DWORD:
+ MSA_I5_DF(d, kMSALanesDword);
+ break;
+ default:
+ UNREACHABLE();
+ }
+#undef MSA_I5_DF
+}
+
+void Simulator::DecodeTypeMsaI10() {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
+ uint32_t opcode = instr_.InstructionBits() & kMsaI5Mask;
+ if (opcode == LDI) {
+ UNIMPLEMENTED();
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeTypeMsaELM() {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
+ uint32_t opcode = instr_.InstructionBits() & kMsaELMMask;
+ int32_t n = instr_.MsaElmNValue();
+ int32_t alu_out;
+ switch (opcode) {
+ case COPY_S:
+ case COPY_U: {
+ msa_reg_t ws;
+ switch (DecodeMsaDataFormat()) {
+ case MSA_BYTE: {
+ DCHECK(n < kMSALanesByte);
+ get_msa_register(instr_.WsValue(), ws.b);
+ alu_out = static_cast<int32_t>(ws.b[n]);
+ SetResult(wd_reg(), (opcode == COPY_U) ? alu_out & 0xFFu : alu_out);
+ break;
+ }
+ case MSA_HALF: {
+ DCHECK(n < kMSALanesHalf);
+ get_msa_register(instr_.WsValue(), ws.h);
+ alu_out = static_cast<int32_t>(ws.h[n]);
+ SetResult(wd_reg(), (opcode == COPY_U) ? alu_out & 0xFFFFu : alu_out);
+ break;
+ }
+ case MSA_WORD: {
+ DCHECK(n < kMSALanesWord);
+ get_msa_register(instr_.WsValue(), ws.w);
+ alu_out = static_cast<int32_t>(ws.w[n]);
+ SetResult(wd_reg(), alu_out);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } break;
+ case INSERT: {
+ msa_reg_t wd;
+ switch (DecodeMsaDataFormat()) {
+ case MSA_BYTE: {
+ DCHECK(n < kMSALanesByte);
+ int32_t rs = get_register(instr_.WsValue());
+ get_msa_register(instr_.WdValue(), wd.b);
+ wd.b[n] = rs & 0xFFu;
+ set_msa_register(instr_.WdValue(), wd.b);
+ TraceMSARegWr(wd.b);
+ break;
+ }
+ case MSA_HALF: {
+ DCHECK(n < kMSALanesHalf);
+ int32_t rs = get_register(instr_.WsValue());
+ get_msa_register(instr_.WdValue(), wd.h);
+ wd.h[n] = rs & 0xFFFFu;
+ set_msa_register(instr_.WdValue(), wd.h);
+ TraceMSARegWr(wd.h);
+ break;
+ }
+ case MSA_WORD: {
+ DCHECK(n < kMSALanesWord);
+ int32_t rs = get_register(instr_.WsValue());
+ get_msa_register(instr_.WdValue(), wd.w);
+ wd.w[n] = rs;
+ set_msa_register(instr_.WdValue(), wd.w);
+ TraceMSARegWr(wd.w);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } break;
+ case SLDI:
+ case SPLATI:
+ case INSVE:
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeTypeMsaBIT() {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
+ uint32_t opcode = instr_.InstructionBits() & kMsaBITMask;
+
+ switch (opcode) {
+ case SLLI:
+ case SRAI:
+ case SRLI:
+ case BCLRI:
+ case BSETI:
+ case BNEGI:
+ case BINSLI:
+ case BINSRI:
+ case SAT_S:
+ case SAT_U:
+ case SRARI:
+ case SRLRI:
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeTypeMsaMI10() {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
+ uint32_t opcode = instr_.InstructionBits() & kMsaMI10Mask;
+ if (opcode == MSA_LD) {
+ UNIMPLEMENTED();
+ } else if (opcode == MSA_ST) {
+ UNIMPLEMENTED();
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeTypeMsa3R() {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
+ uint32_t opcode = instr_.InstructionBits() & kMsa3RMask;
+ switch (opcode) {
+ case SLL_MSA:
+ case SRA_MSA:
+ case SRL_MSA:
+ case BCLR:
+ case BSET:
+ case BNEG:
+ case BINSL:
+ case BINSR:
+ case ADDV:
+ case SUBV:
+ case MAX_S:
+ case MAX_U:
+ case MIN_S:
+ case MIN_U:
+ case MAX_A:
+ case MIN_A:
+ case CEQ:
+ case CLT_S:
+ case CLT_U:
+ case CLE_S:
+ case CLE_U:
+ case ADD_A:
+ case ADDS_A:
+ case ADDS_S:
+ case ADDS_U:
+ case AVE_S:
+ case AVE_U:
+ case AVER_S:
+ case AVER_U:
+ case SUBS_S:
+ case SUBS_U:
+ case SUBSUS_U:
+ case SUBSUU_S:
+ case ASUB_S:
+ case ASUB_U:
+ case MULV:
+ case MADDV:
+ case MSUBV:
+ case DIV_S_MSA:
+ case DIV_U:
+ case MOD_S:
+ case MOD_U:
+ case DOTP_S:
+ case DOTP_U:
+ case DPADD_S:
+ case DPADD_U:
+ case DPSUB_S:
+ case DPSUB_U:
+ case SLD:
+ case SPLAT:
+ case PCKEV:
+ case PCKOD:
+ case ILVL:
+ case ILVR:
+ case ILVEV:
+ case ILVOD:
+ case VSHF:
+ case SRAR:
+ case SRLR:
+ case HADD_S:
+ case HADD_U:
+ case HSUB_S:
+ case HSUB_U:
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeTypeMsa3RF() {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
+ uint32_t opcode = instr_.InstructionBits() & kMsa3RFMask;
+ switch (opcode) {
+ case FCAF:
+ case FCUN:
+ case FCEQ:
+ case FCUEQ:
+ case FCLT:
+ case FCULT:
+ case FCLE:
+ case FCULE:
+ case FSAF:
+ case FSUN:
+ case FSEQ:
+ case FSUEQ:
+ case FSLT:
+ case FSULT:
+ case FSLE:
+ case FSULE:
+ case FADD:
+ case FSUB:
+ case FMUL:
+ case FDIV:
+ case FMADD:
+ case FMSUB:
+ case FEXP2:
+ case FEXDO:
+ case FTQ:
+ case FMIN:
+ case FMIN_A:
+ case FMAX:
+ case FMAX_A:
+ case FCOR:
+ case FCUNE:
+ case FCNE:
+ case MUL_Q:
+ case MADD_Q:
+ case MSUB_Q:
+ case FSOR:
+ case FSUNE:
+ case FSNE:
+ case MULR_Q:
+ case MADDR_Q:
+ case MSUBR_Q:
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeTypeMsaVec() {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
+ uint32_t opcode = instr_.InstructionBits() & kMsaVECMask;
+ msa_reg_t wd, ws, wt;
+
+ get_msa_register(instr_.WsValue(), ws.w);
+ get_msa_register(instr_.WtValue(), wt.w);
+ if (opcode == BMNZ_V || opcode == BMZ_V || opcode == BSEL_V) {
+ get_msa_register(instr_.WdValue(), wd.w);
+ }
+
+ for (int i = 0; i < kMSALanesWord; i++) {
+ switch (opcode) {
+ case AND_V:
+ wd.w[i] = ws.w[i] & wt.w[i];
+ break;
+ case OR_V:
+ wd.w[i] = ws.w[i] | wt.w[i];
+ break;
+ case NOR_V:
+ wd.w[i] = ~(ws.w[i] | wt.w[i]);
+ break;
+ case XOR_V:
+ wd.w[i] = ws.w[i] ^ wt.w[i];
+ break;
+ case BMNZ_V:
+ wd.w[i] = (wt.w[i] & ws.w[i]) | (~wt.w[i] & wd.w[i]);
+ break;
+ case BMZ_V:
+ wd.w[i] = (~wt.w[i] & ws.w[i]) | (wt.w[i] & wd.w[i]);
+ break;
+ case BSEL_V:
+ wd.w[i] = (~wd.w[i] & ws.w[i]) | (wd.w[i] & wt.w[i]);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ set_msa_register(instr_.WdValue(), wd.w);
+ TraceMSARegWr(wd.d);
+}
+
+void Simulator::DecodeTypeMsa2R() {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
+ uint32_t opcode = instr_.InstructionBits() & kMsa2RMask;
+ msa_reg_t wd, ws;
+ switch (opcode) {
+ case FILL:
+ switch (DecodeMsaDataFormat()) {
+ case MSA_BYTE: {
+ int32_t rs = get_register(instr_.WsValue());
+ for (int i = 0; i < kMSALanesByte; i++) {
+ wd.b[i] = rs & 0xFFu;
+ }
+ set_msa_register(instr_.WdValue(), wd.b);
+ TraceMSARegWr(wd.b);
+ break;
+ }
+ case MSA_HALF: {
+ int32_t rs = get_register(instr_.WsValue());
+ for (int i = 0; i < kMSALanesHalf; i++) {
+ wd.h[i] = rs & 0xFFFFu;
+ }
+ set_msa_register(instr_.WdValue(), wd.h);
+ TraceMSARegWr(wd.h);
+ break;
+ }
+ case MSA_WORD: {
+ int32_t rs = get_register(instr_.WsValue());
+ for (int i = 0; i < kMSALanesWord; i++) {
+ wd.w[i] = rs;
+ }
+ set_msa_register(instr_.WdValue(), wd.w);
+ TraceMSARegWr(wd.w);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case PCNT:
+#define PCNT_DF(elem, num_of_lanes) \
+ get_msa_register(instr_.WsValue(), ws.elem); \
+ for (int i = 0; i < num_of_lanes; i++) { \
+ uint64_t u64elem = static_cast<uint64_t>(ws.elem[i]); \
+ wd.elem[i] = base::bits::CountPopulation64(u64elem); \
+ } \
+ set_msa_register(instr_.WdValue(), wd.elem); \
+ TraceMSARegWr(wd.elem)
+
+ switch (DecodeMsaDataFormat()) {
+ case MSA_BYTE:
+ PCNT_DF(ub, kMSALanesByte);
+ break;
+ case MSA_HALF:
+ PCNT_DF(uh, kMSALanesHalf);
+ break;
+ case MSA_WORD:
+ PCNT_DF(uw, kMSALanesWord);
+ break;
+ case MSA_DWORD:
+ PCNT_DF(ud, kMSALanesDword);
+ break;
+ default:
+ UNREACHABLE();
+ }
+#undef PCNT_DF
+ break;
+ case NLOC:
+#define NLOC_DF(elem, num_of_lanes) \
+ get_msa_register(instr_.WsValue(), ws.elem); \
+ for (int i = 0; i < num_of_lanes; i++) { \
+ const uint64_t mask = (num_of_lanes == kMSALanesDword) \
+ ? UINT64_MAX \
+ : (1ULL << (kMSARegSize / num_of_lanes)) - 1; \
+ uint64_t u64elem = static_cast<uint64_t>(~ws.elem[i]) & mask; \
+ wd.elem[i] = base::bits::CountLeadingZeros64(u64elem) - \
+ (64 - kMSARegSize / num_of_lanes); \
+ } \
+ set_msa_register(instr_.WdValue(), wd.elem); \
+ TraceMSARegWr(wd.elem)
+
+ switch (DecodeMsaDataFormat()) {
+ case MSA_BYTE:
+ NLOC_DF(ub, kMSALanesByte);
+ break;
+ case MSA_HALF:
+ NLOC_DF(uh, kMSALanesHalf);
+ break;
+ case MSA_WORD:
+ NLOC_DF(uw, kMSALanesWord);
+ break;
+ case MSA_DWORD:
+ NLOC_DF(ud, kMSALanesDword);
+ break;
+ default:
+ UNREACHABLE();
+ }
+#undef NLOC_DF
+ break;
+ case NLZC:
+#define NLZC_DF(elem, num_of_lanes) \
+ get_msa_register(instr_.WsValue(), ws.elem); \
+ for (int i = 0; i < num_of_lanes; i++) { \
+ uint64_t u64elem = static_cast<uint64_t>(ws.elem[i]); \
+ wd.elem[i] = base::bits::CountLeadingZeros64(u64elem) - \
+ (64 - kMSARegSize / num_of_lanes); \
+ } \
+ set_msa_register(instr_.WdValue(), wd.elem); \
+ TraceMSARegWr(wd.elem)
+
+ switch (DecodeMsaDataFormat()) {
+ case MSA_BYTE:
+ NLZC_DF(ub, kMSALanesByte);
+ break;
+ case MSA_HALF:
+ NLZC_DF(uh, kMSALanesHalf);
+ break;
+ case MSA_WORD:
+ NLZC_DF(uw, kMSALanesWord);
+ break;
+ case MSA_DWORD:
+ NLZC_DF(ud, kMSALanesDword);
+ break;
+ default:
+ UNREACHABLE();
+ }
+#undef NLZC_DF
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeTypeMsa2RF() {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
+ uint32_t opcode = instr_.InstructionBits() & kMsa2RFMask;
+ switch (opcode) {
+ case FCLASS:
+ case FTRUNC_S:
+ case FTRUNC_U:
+ case FSQRT:
+ case FRSQRT:
+ case FRCP:
+ case FRINT:
+ case FLOG2:
+ case FEXUPL:
+ case FEXUPR:
+ case FFQL:
+ case FFQR:
+ case FTINT_S:
+ case FTINT_U:
+ case FFINT_S:
+ case FFINT_U:
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
void Simulator::DecodeTypeRegister() {
// ---------- Execution.
switch (instr_.OpcodeFieldRaw()) {
@@ -4134,6 +4971,27 @@ void Simulator::DecodeTypeRegister() {
case SPECIAL3:
DecodeTypeRegisterSPECIAL3();
break;
+ case MSA:
+ switch (instr_.MSAMinorOpcodeField()) {
+ case kMsaMinor3R:
+ DecodeTypeMsa3R();
+ break;
+ case kMsaMinor3RF:
+ DecodeTypeMsa3RF();
+ break;
+ case kMsaMinorVEC:
+ DecodeTypeMsaVec();
+ break;
+ case kMsaMinor2R:
+ DecodeTypeMsa2R();
+ break;
+ case kMsaMinor2RF:
+ DecodeTypeMsa2RF();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
default:
UNREACHABLE();
}
@@ -4238,6 +5096,18 @@ void Simulator::DecodeTypeImmediate() {
case BC1NEZ:
BranchHelper(get_fpu_register(ft_reg) & 0x1);
break;
+ case BZ_V:
+ case BZ_B:
+ case BZ_H:
+ case BZ_W:
+ case BZ_D:
+ case BNZ_V:
+ case BNZ_B:
+ case BNZ_H:
+ case BNZ_W:
+ case BNZ_D:
+ UNIMPLEMENTED();
+ break;
default:
UNREACHABLE();
}
@@ -4580,6 +5450,31 @@ void Simulator::DecodeTypeImmediate() {
SetResult(rs_reg, alu_out);
break;
}
+ case MSA:
+ switch (instr_.MSAMinorOpcodeField()) {
+ case kMsaMinorI8:
+ DecodeTypeMsaI8();
+ break;
+ case kMsaMinorI5:
+ DecodeTypeMsaI5();
+ break;
+ case kMsaMinorI10:
+ DecodeTypeMsaI10();
+ break;
+ case kMsaMinorELM:
+ DecodeTypeMsaELM();
+ break;
+ case kMsaMinorBIT:
+ DecodeTypeMsaBIT();
+ break;
+ case kMsaMinorMI10:
+ DecodeTypeMsaMI10();
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ break;
default:
UNREACHABLE();
}
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index 1ed96bd003..40aba780ba 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -181,6 +181,43 @@ class Simulator {
kNumFPURegisters
};
+ // MSA registers
+ enum MSARegister {
+ w0,
+ w1,
+ w2,
+ w3,
+ w4,
+ w5,
+ w6,
+ w7,
+ w8,
+ w9,
+ w10,
+ w11,
+ w12,
+ w13,
+ w14,
+ w15,
+ w16,
+ w17,
+ w18,
+ w19,
+ w20,
+ w21,
+ w22,
+ w23,
+ w24,
+ w25,
+ w26,
+ w27,
+ w28,
+ w29,
+ w30,
+ w31,
+ kNumMSARegisters
+ };
+
explicit Simulator(Isolate* isolate);
~Simulator();
@@ -213,6 +250,10 @@ class Simulator {
int32_t get_fpu_register_hi_word(int fpureg) const;
float get_fpu_register_float(int fpureg) const;
double get_fpu_register_double(int fpureg) const;
+ template <typename T>
+ void get_msa_register(int wreg, T* value);
+ template <typename T>
+ void set_msa_register(int wreg, const T* value);
void set_fcsr_bit(uint32_t cc, bool value);
bool test_fcsr_bit(uint32_t cc);
void set_fcsr_rounding_mode(FPURoundingMode mode);
@@ -293,6 +334,19 @@ class Simulator {
// Helpers for data value tracing.
enum TraceType { BYTE, HALF, WORD, DWORD, FLOAT, DOUBLE, FLOAT_DOUBLE };
+ // MSA Data Format
+ enum MSADataFormat { MSA_VECT = 0, MSA_BYTE, MSA_HALF, MSA_WORD, MSA_DWORD };
+ typedef union {
+ int8_t b[kMSALanesByte];
+ uint8_t ub[kMSALanesByte];
+ int16_t h[kMSALanesHalf];
+ uint16_t uh[kMSALanesHalf];
+ int32_t w[kMSALanesWord];
+ uint32_t uw[kMSALanesWord];
+ int64_t d[kMSALanesDword];
+ uint64_t ud[kMSALanesDword];
+ } msa_reg_t;
+
// Read and write memory.
inline uint32_t ReadBU(int32_t addr);
inline int32_t ReadB(int32_t addr);
@@ -313,6 +367,10 @@ class Simulator {
void TraceRegWr(int32_t value, TraceType t = WORD);
void TraceRegWr(int64_t value, TraceType t = DWORD);
+ template <typename T>
+ void TraceMSARegWr(T* value, TraceType t);
+ template <typename T>
+ void TraceMSARegWr(T* value);
void TraceMemWr(int32_t addr, int32_t value, TraceType t = WORD);
void TraceMemRd(int32_t addr, int32_t value, TraceType t = WORD);
void TraceMemWr(int32_t addr, int64_t value, TraceType t = DWORD);
@@ -352,6 +410,21 @@ class Simulator {
void DecodeTypeRegisterLRsType();
+ int DecodeMsaDataFormat();
+ void DecodeTypeMsaI8();
+ void DecodeTypeMsaI5();
+ void DecodeTypeMsaI10();
+ void DecodeTypeMsaELM();
+ void DecodeTypeMsaBIT();
+ void DecodeTypeMsaMI10();
+ void DecodeTypeMsa3R();
+ void DecodeTypeMsa3RF();
+ void DecodeTypeMsaVec();
+ void DecodeTypeMsa2R();
+ void DecodeTypeMsa2RF();
+ template <typename T>
+ T MsaI5InstrHelper(uint32_t opcode, T ws, int32_t i5);
+
inline int32_t rs_reg() const { return instr_.RsValue(); }
inline int32_t rs() const { return get_register(rs_reg()); }
inline uint32_t rs_u() const {
@@ -369,6 +442,9 @@ class Simulator {
inline int32_t fd_reg() const { return instr_.FdValue(); }
inline int32_t sa() const { return instr_.SaValue(); }
inline int32_t lsa_sa() const { return instr_.LsaSaValue(); }
+ inline int32_t ws_reg() const { return instr_.WsValue(); }
+ inline int32_t wt_reg() const { return instr_.WtValue(); }
+ inline int32_t wd_reg() const { return instr_.WdValue(); }
inline void SetResult(int32_t rd_reg, int32_t alu_out) {
set_register(rd_reg, alu_out);
@@ -480,7 +556,9 @@ class Simulator {
// Coprocessor Registers.
// Note: FP32 mode uses only the lower 32-bit part of each element,
// the upper 32-bit is unpredictable.
- int64_t FPUregisters_[kNumFPURegisters];
+ // Note: FPUregisters_[] array is increased to 64 * 8B = 32 * 16B in
+ // order to support MSA registers
+ int64_t FPUregisters_[kNumFPURegisters * 2];
// FPU control register.
uint32_t FCSR_;
diff --git a/deps/v8/src/mips64/assembler-mips64-inl.h b/deps/v8/src/mips64/assembler-mips64-inl.h
index e873e04e13..52947a60ec 100644
--- a/deps/v8/src/mips64/assembler-mips64-inl.h
+++ b/deps/v8/src/mips64/assembler-mips64-inl.h
@@ -56,21 +56,21 @@ bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(MIPS_SIMD); }
Operand::Operand(int64_t immediate, RelocInfo::Mode rmode) {
rm_ = no_reg;
- imm64_ = immediate;
+ value_.immediate = immediate;
rmode_ = rmode;
}
Operand::Operand(const ExternalReference& f) {
rm_ = no_reg;
- imm64_ = reinterpret_cast<int64_t>(f.address());
+ value_.immediate = reinterpret_cast<int64_t>(f.address());
rmode_ = RelocInfo::EXTERNAL_REFERENCE;
}
Operand::Operand(Smi* value) {
rm_ = no_reg;
- imm64_ = reinterpret_cast<intptr_t>(value);
+ value_.immediate = reinterpret_cast<intptr_t>(value);
rmode_ = RelocInfo::NONE32;
}
@@ -130,7 +130,6 @@ Address RelocInfo::target_address_address() {
Address RelocInfo::constant_pool_entry_address() {
UNREACHABLE();
- return NULL;
}
diff --git a/deps/v8/src/mips64/assembler-mips64.cc b/deps/v8/src/mips64/assembler-mips64.cc
index 084d5db036..37c839ce95 100644
--- a/deps/v8/src/mips64/assembler-mips64.cc
+++ b/deps/v8/src/mips64/assembler-mips64.cc
@@ -37,6 +37,7 @@
#if V8_TARGET_ARCH_MIPS64
#include "src/base/cpu.h"
+#include "src/code-stubs.h"
#include "src/mips64/assembler-mips64-inl.h"
namespace v8 {
@@ -205,21 +206,27 @@ void RelocInfo::unchecked_update_wasm_size(Isolate* isolate, uint32_t size,
// Implementation of Operand and MemOperand.
// See assembler-mips-inl.h for inlined constructors.
-Operand::Operand(Handle<Object> handle) {
- AllowDeferredHandleDereference using_raw_address;
+Operand::Operand(Handle<HeapObject> handle) {
rm_ = no_reg;
- // Verify all Objects referred by code are NOT in new space.
- Object* obj = *handle;
- if (obj->IsHeapObject()) {
- imm64_ = reinterpret_cast<intptr_t>(handle.location());
- rmode_ = RelocInfo::EMBEDDED_OBJECT;
- } else {
- // No relocation needed.
- imm64_ = reinterpret_cast<intptr_t>(obj);
- rmode_ = RelocInfo::NONE64;
- }
+ value_.immediate = reinterpret_cast<intptr_t>(handle.address());
+ rmode_ = RelocInfo::EMBEDDED_OBJECT;
+}
+
+Operand Operand::EmbeddedNumber(double value) {
+ int32_t smi;
+ if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
+ Operand result(0, RelocInfo::EMBEDDED_OBJECT);
+ result.is_heap_object_request_ = true;
+ result.value_.heap_object_request = HeapObjectRequest(value);
+ return result;
}
+Operand Operand::EmbeddedCode(CodeStub* stub) {
+ Operand result(0, RelocInfo::CODE_TARGET);
+ result.is_heap_object_request_ = true;
+ result.value_.heap_object_request = HeapObjectRequest(stub);
+ return result;
+}
MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
offset_ = offset;
@@ -232,6 +239,24 @@ MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
offset_ = unit * multiplier + offset_addend;
}
+void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
+ for (auto& request : heap_object_requests_) {
+ Handle<HeapObject> object;
+ switch (request.kind()) {
+ case HeapObjectRequest::kHeapNumber:
+ object = isolate->factory()->NewHeapNumber(request.heap_number(),
+ IMMUTABLE, TENURED);
+ break;
+ case HeapObjectRequest::kCodeStub:
+ request.code_stub()->set_isolate(isolate);
+ object = request.code_stub()->GetCode();
+ break;
+ }
+ Address pc = buffer_ + request.offset();
+ set_target_value_at(isolate, pc,
+ reinterpret_cast<uint64_t>(object.location()));
+ }
+}
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
@@ -270,8 +295,7 @@ const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
const Instr kLwSwOffsetMask = kImm16Mask;
Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
- : AssemblerBase(isolate_data, buffer, buffer_size),
- recorded_ast_id_(TypeFeedbackId::None()) {
+ : AssemblerBase(isolate_data, buffer, buffer_size) {
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
last_trampoline_pool_end_ = 0;
@@ -287,14 +311,14 @@ Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
trampoline_emitted_ = FLAG_force_long_branches;
unbound_labels_count_ = 0;
block_buffer_growth_ = false;
-
- ClearRecordedAstId();
}
-
-void Assembler::GetCode(CodeDesc* desc) {
+void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
EmitForbiddenSlotInstruction();
DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
+
+ AllocateAndInstallRequestedHeapObjects(isolate);
+
// Set up code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
@@ -309,7 +333,7 @@ void Assembler::GetCode(CodeDesc* desc) {
void Assembler::Align(int m) {
- DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
+ DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
EmitForbiddenSlotInstruction();
while ((pc_offset() & (m - 1)) != 0) {
nop();
@@ -461,6 +485,29 @@ const int kEndOfChain = -4;
// Determines the end of the Jump chain (a subset of the label link chain).
const int kEndOfJumpChain = 0;
+bool Assembler::IsMsaBranch(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ uint32_t rs_field = GetRsField(instr);
+ if (opcode == COP1) {
+ switch (rs_field) {
+ case BZ_V:
+ case BZ_B:
+ case BZ_H:
+ case BZ_W:
+ case BZ_D:
+ case BNZ_V:
+ case BNZ_B:
+ case BNZ_H:
+ case BNZ_W:
+ case BNZ_D:
+ return true;
+ default:
+ return false;
+ }
+ } else {
+ return false;
+ }
+}
bool Assembler::IsBranch(Instr instr) {
uint32_t opcode = GetOpcodeField(instr);
@@ -474,7 +521,7 @@ bool Assembler::IsBranch(Instr instr) {
rt_field == BLTZAL || rt_field == BGEZAL)) ||
(opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
(opcode == COP1 && rs_field == BC1EQZ) ||
- (opcode == COP1 && rs_field == BC1NEZ);
+ (opcode == COP1 && rs_field == BC1NEZ) || IsMsaBranch(instr);
if (!isBranch && kArchVariant == kMips64r6) {
// All the 3 variants of POP10 (BOVC, BEQC, BEQZALC) and
// POP30 (BNVC, BNEC, BNEZALC) are branch ops.
@@ -1434,6 +1481,7 @@ void Assembler::bgec(Register rs, Register rt, int16_t offset) {
void Assembler::bgezal(Register rs, int16_t offset) {
DCHECK(kArchVariant != kMips64r6 || rs.is(zero_reg));
+ DCHECK(!(rs.is(ra)));
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
BlockTrampolinePoolFor(1); // For associated delay slot.
@@ -1504,6 +1552,7 @@ void Assembler::bltz(Register rs, int16_t offset) {
void Assembler::bltzal(Register rs, int16_t offset) {
DCHECK(kArchVariant != kMips64r6 || rs.is(zero_reg));
+ DCHECK(!(rs.is(ra)));
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
BlockTrampolinePoolFor(1); // For associated delay slot.
@@ -1540,6 +1589,7 @@ void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
void Assembler::blezalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
+ DCHECK(!(rt.is(ra)));
GenInstrImmediate(BLEZ, zero_reg, rt, offset,
CompactBranchType::COMPACT_BRANCH);
}
@@ -1548,6 +1598,7 @@ void Assembler::blezalc(Register rt, int16_t offset) {
void Assembler::bgezalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
+ DCHECK(!(rt.is(ra)));
GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1555,6 +1606,7 @@ void Assembler::bgezalc(Register rt, int16_t offset) {
void Assembler::bgezall(Register rs, int16_t offset) {
DCHECK(kArchVariant != kMips64r6);
DCHECK(!(rs.is(zero_reg)));
+ DCHECK(!(rs.is(ra)));
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
BlockTrampolinePoolFor(1); // For associated delay slot.
@@ -1564,6 +1616,7 @@ void Assembler::bgezall(Register rs, int16_t offset) {
void Assembler::bltzalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
+ DCHECK(!(rt.is(ra)));
GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1571,6 +1624,7 @@ void Assembler::bltzalc(Register rt, int16_t offset) {
void Assembler::bgtzalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
+ DCHECK(!(rt.is(ra)));
GenInstrImmediate(BGTZ, zero_reg, rt, offset,
CompactBranchType::COMPACT_BRANCH);
}
@@ -1579,6 +1633,7 @@ void Assembler::bgtzalc(Register rt, int16_t offset) {
void Assembler::beqzalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
+ DCHECK(!(rt.is(ra)));
GenInstrImmediate(ADDI, zero_reg, rt, offset,
CompactBranchType::COMPACT_BRANCH);
}
@@ -1587,6 +1642,7 @@ void Assembler::beqzalc(Register rt, int16_t offset) {
void Assembler::bnezalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
+ DCHECK(!(rt.is(ra)));
GenInstrImmediate(DADDI, zero_reg, rt, offset,
CompactBranchType::COMPACT_BRANCH);
}
@@ -2062,31 +2118,130 @@ void Assembler::dlsa(Register rd, Register rt, Register rs, uint8_t sa) {
// ------------Memory-instructions-------------
-// Helper for base-reg + offset, when offset is larger than int16.
-void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
- DCHECK(!src.rm().is(at));
- DCHECK(is_int32(src.offset_));
+void Assembler::AdjustBaseAndOffset(MemOperand& src,
+ OffsetAccessType access_type,
+ int second_access_add_to_offset) {
+ // This method is used to adjust the base register and offset pair
+ // for a load/store when the offset doesn't fit into int16_t.
+ // It is assumed that 'base + offset' is sufficiently aligned for memory
+ // operands that are machine word in size or smaller. For doubleword-sized
+ // operands it's assumed that 'base' is a multiple of 8, while 'offset'
+ // may be a multiple of 4 (e.g. 4-byte-aligned long and double arguments
+ // and spilled variables on the stack accessed relative to the stack
+ // pointer register).
+ // We preserve the "alignment" of 'offset' by adjusting it by a multiple of 8.
+
+ bool doubleword_aligned = (src.offset() & (kDoubleSize - 1)) == 0;
+ bool two_accesses = static_cast<bool>(access_type) || !doubleword_aligned;
+ DCHECK(second_access_add_to_offset <= 7); // Must be <= 7.
+
+ // is_int16 must be passed a signed value, hence the static cast below.
+ if (is_int16(src.offset()) &&
+ (!two_accesses || is_int16(static_cast<int32_t>(
+ src.offset() + second_access_add_to_offset)))) {
+ // Nothing to do: 'offset' (and, if needed, 'offset + 4', or other specified
+ // value) fits into int16_t.
+ return;
+ }
+
+ DCHECK(!src.rm().is(
+ at)); // Must not overwrite the register 'base' while loading 'offset'.
- if (kArchVariant == kMips64r6) {
- int32_t hi = (src.offset_ >> kLuiShift) & kImm16Mask;
- if (src.offset_ & kNegOffset) {
- if ((hi & kNegOffset) != ((hi + 1) & kNegOffset)) {
- lui(at, (src.offset_ >> kLuiShift) & kImm16Mask);
- ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
- daddu(at, at, src.rm()); // Add base register.
- return;
- }
+#ifdef DEBUG
+ // Remember the "(mis)alignment" of 'offset', it will be checked at the end.
+ uint32_t misalignment = src.offset() & (kDoubleSize - 1);
+#endif
+
+ // Do not load the whole 32-bit 'offset' if it can be represented as
+ // a sum of two 16-bit signed offsets. This can save an instruction or two.
+ // To simplify matters, only do this for a symmetric range of offsets from
+ // about -64KB to about +64KB, allowing further addition of 4 when accessing
+ // 64-bit variables with two 32-bit accesses.
+ constexpr int32_t kMinOffsetForSimpleAdjustment =
+ 0x7ff8; // Max int16_t that's a multiple of 8.
+ constexpr int32_t kMaxOffsetForSimpleAdjustment =
+ 2 * kMinOffsetForSimpleAdjustment;
+
+ if (0 <= src.offset() && src.offset() <= kMaxOffsetForSimpleAdjustment) {
+ daddiu(at, src.rm(), kMinOffsetForSimpleAdjustment);
+ src.offset_ -= kMinOffsetForSimpleAdjustment;
+ } else if (-kMaxOffsetForSimpleAdjustment <= src.offset() &&
+ src.offset() < 0) {
+ daddiu(at, src.rm(), -kMinOffsetForSimpleAdjustment);
+ src.offset_ += kMinOffsetForSimpleAdjustment;
+ } else if (kArchVariant == kMips64r6) {
+ // On r6 take advantage of the daui instruction, e.g.:
+ // daui at, base, offset_high
+ // [dahi at, 1] // When `offset` is close to +2GB.
+ // lw reg_lo, offset_low(at)
+ // [lw reg_hi, (offset_low+4)(at)] // If misaligned 64-bit load.
+ // or when offset_low+4 overflows int16_t:
+ // daui at, base, offset_high
+ // daddiu at, at, 8
+ // lw reg_lo, (offset_low-8)(at)
+ // lw reg_hi, (offset_low-4)(at)
+ int16_t offset_low = static_cast<uint16_t>(src.offset());
+ int32_t offset_low32 = offset_low;
+ int16_t offset_high = static_cast<uint16_t>(src.offset() >> 16);
+ bool increment_hi16 = offset_low < 0;
+ bool overflow_hi16 = false;
+
+ if (increment_hi16) {
+ offset_high++;
+ overflow_hi16 = (offset_high == -32768);
+ }
+ daui(at, src.rm(), static_cast<uint16_t>(offset_high));
+
+ if (overflow_hi16) {
+ dahi(at, 1);
+ }
- hi += 1;
+ if (two_accesses && !is_int16(static_cast<int32_t>(
+ offset_low32 + second_access_add_to_offset))) {
+ // Avoid overflow in the 16-bit offset of the load/store instruction when
+ // adding 4.
+ daddiu(at, at, kDoubleSize);
+ offset_low32 -= kDoubleSize;
}
- daui(at, src.rm(), hi);
- daddiu(at, at, src.offset_ & kImm16Mask);
+ src.offset_ = offset_low32;
} else {
- lui(at, (src.offset_ >> kLuiShift) & kImm16Mask);
- ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
- daddu(at, at, src.rm()); // Add base register.
+ // Do not load the whole 32-bit 'offset' if it can be represented as
+ // a sum of three 16-bit signed offsets. This can save an instruction.
+ // To simplify matters, only do this for a symmetric range of offsets from
+ // about -96KB to about +96KB, allowing further addition of 4 when accessing
+ // 64-bit variables with two 32-bit accesses.
+ constexpr int32_t kMinOffsetForMediumAdjustment =
+ 2 * kMinOffsetForSimpleAdjustment;
+ constexpr int32_t kMaxOffsetForMediumAdjustment =
+ 3 * kMinOffsetForSimpleAdjustment;
+ if (0 <= src.offset() && src.offset() <= kMaxOffsetForMediumAdjustment) {
+ daddiu(at, src.rm(), kMinOffsetForMediumAdjustment / 2);
+ daddiu(at, at, kMinOffsetForMediumAdjustment / 2);
+ src.offset_ -= kMinOffsetForMediumAdjustment;
+ } else if (-kMaxOffsetForMediumAdjustment <= src.offset() &&
+ src.offset() < 0) {
+ daddiu(at, src.rm(), -kMinOffsetForMediumAdjustment / 2);
+ daddiu(at, at, -kMinOffsetForMediumAdjustment / 2);
+ src.offset_ += kMinOffsetForMediumAdjustment;
+ } else {
+ // Now that all shorter options have been exhausted, load the full 32-bit
+ // offset.
+ int32_t loaded_offset = RoundDown(src.offset(), kDoubleSize);
+ lui(at, (loaded_offset >> kLuiShift) & kImm16Mask);
+ ori(at, at, loaded_offset & kImm16Mask); // Load 32-bit offset.
+ daddu(at, at, src.rm());
+ src.offset_ -= loaded_offset;
+ }
}
+ src.rm_ = at;
+
+ DCHECK(is_int16(src.offset()));
+ if (two_accesses) {
+ DCHECK(is_int16(
+ static_cast<int32_t>(src.offset() + second_access_add_to_offset)));
+ }
+ DCHECK(misalignment == (src.offset() & (kDoubleSize - 1)));
}
void Assembler::lb(Register rd, const MemOperand& rs) {
@@ -2849,26 +3004,30 @@ void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
void Assembler::madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft) {
- DCHECK(kArchVariant == kMips64r2);
- GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_S);
+ // On Loongson 3A (MIPS64R2), MADD.S instruction is actually fused MADD.S and
+ // this causes failure in some of the tests. Since this optimization is rarely
+ // used, and not used at all on MIPS64R6, this isntruction is removed.
+ UNREACHABLE();
}
void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft) {
- DCHECK(kArchVariant == kMips64r2);
- GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
+ // On Loongson 3A (MIPS64R2), MADD.D instruction is actually fused MADD.D and
+ // this causes failure in some of the tests. Since this optimization is rarely
+ // used, and not used at all on MIPS64R6, this isntruction is removed.
+ UNREACHABLE();
}
void Assembler::msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft) {
- DCHECK(kArchVariant == kMips64r2);
- GenInstrRegister(COP1X, fr, ft, fs, fd, MSUB_S);
+ // See explanation for instruction madd_s.
+ UNREACHABLE();
}
void Assembler::msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft) {
- DCHECK(kArchVariant == kMips64r2);
- GenInstrRegister(COP1X, fr, ft, fs, fd, MSUB_D);
+ // See explanation for instruction madd_d.
+ UNREACHABLE();
}
void Assembler::maddf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
@@ -3253,14 +3412,17 @@ MSA_BRANCH_LIST(MSA_BRANCH)
V(st_w, ST_W) \
V(st_d, ST_D)
-#define MSA_LD_ST(name, opcode) \
- void Assembler::name(MSARegister wd, const MemOperand& rs) { \
- if (is_int10(rs.offset())) { \
- GenInstrMsaMI10(opcode, rs.offset(), rs.rm(), wd); \
- } else { \
- LoadRegPlusOffsetToAt(rs); \
- GenInstrMsaMI10(opcode, 0, at, wd); \
- } \
+#define MSA_LD_ST(name, opcode) \
+ void Assembler::name(MSARegister wd, const MemOperand& rs) { \
+ MemOperand source = rs; \
+ AdjustBaseAndOffset(source); \
+ if (is_int10(source.offset())) { \
+ GenInstrMsaMI10(opcode, source.offset(), source.rm(), wd); \
+ } else { \
+ DCHECK(!rs.rm().is(at)); \
+ daddiu(at, source.rm(), source.offset()); \
+ GenInstrMsaMI10(opcode, 0, at, wd); \
+ } \
}
MSA_LD_ST_LIST(MSA_LD_ST)
@@ -3815,9 +3977,7 @@ void Assembler::GrowBuffer() {
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
- if (desc.buffer_size > kMaximalBufferSize ||
- static_cast<size_t>(desc.buffer_size) >
- isolate_data().max_old_generation_size_) {
+ if (desc.buffer_size > kMaximalBufferSize) {
V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
}
@@ -3906,14 +4066,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
return;
}
DCHECK(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
- if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId().ToInt(),
- NULL);
- ClearRecordedAstId();
- reloc_info_writer.Write(&reloc_info_with_ast_id);
- } else {
- reloc_info_writer.Write(&rinfo);
- }
+ reloc_info_writer.Write(&rinfo);
}
}
@@ -4005,7 +4158,6 @@ Address Assembler::target_address_at(Address pc) {
}
// We should never get here, force a bad address if we do.
UNREACHABLE();
- return (Address)0x0;
}
@@ -4030,18 +4182,17 @@ void Assembler::QuietNaN(HeapObject* object) {
// There is an optimization below, which emits a nop when the address
// fits in just 16 bits. This is unlikely to help, and should be benchmarked,
// and possibly removed.
-void Assembler::set_target_address_at(Isolate* isolate, Address pc,
- Address target,
- ICacheFlushMode icache_flush_mode) {
-// There is an optimization where only 4 instructions are used to load address
-// in code on MIP64 because only 48-bits of address is effectively used.
-// It relies on fact the upper [63:48] bits are not used for virtual address
-// translation and they have to be set according to value of bit 47 in order
-// get canonical address.
+void Assembler::set_target_value_at(Isolate* isolate, Address pc,
+ uint64_t target,
+ ICacheFlushMode icache_flush_mode) {
+ // There is an optimization where only 4 instructions are used to load address
+ // in code on MIP64 because only 48-bits of address is effectively used.
+ // It relies on fact the upper [63:48] bits are not used for virtual address
+ // translation and they have to be set according to value of bit 47 in order
+ // get canonical address.
Instr instr1 = instr_at(pc + kInstrSize);
uint32_t rt_code = GetRt(instr1);
uint32_t* p = reinterpret_cast<uint32_t*>(pc);
- uint64_t itarget = reinterpret_cast<uint64_t>(target);
#ifdef DEBUG
// Check we have the result from a li macro-instruction.
@@ -4056,11 +4207,11 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
// ori rt, rt, lower-16.
// dsll rt, rt, 16.
// ori rt rt, lower-16.
- *p = LUI | (rt_code << kRtShift) | ((itarget >> 32) & kImm16Mask);
- *(p + 1) = ORI | (rt_code << kRtShift) | (rt_code << kRsShift)
- | ((itarget >> 16) & kImm16Mask);
- *(p + 3) = ORI | (rt_code << kRsShift) | (rt_code << kRtShift)
- | (itarget & kImm16Mask);
+ *p = LUI | (rt_code << kRtShift) | ((target >> 32) & kImm16Mask);
+ *(p + 1) = ORI | (rt_code << kRtShift) | (rt_code << kRsShift) |
+ ((target >> 16) & kImm16Mask);
+ *(p + 3) = ORI | (rt_code << kRsShift) | (rt_code << kRtShift) |
+ (target & kImm16Mask);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(isolate, pc, 4 * Assembler::kInstrSize);
diff --git a/deps/v8/src/mips64/assembler-mips64.h b/deps/v8/src/mips64/assembler-mips64.h
index dc78b890ed..d077c6f84f 100644
--- a/deps/v8/src/mips64/assembler-mips64.h
+++ b/deps/v8/src/mips64/assembler-mips64.h
@@ -429,9 +429,12 @@ class Operand BASE_EMBEDDED {
INLINE(explicit Operand(const char* s));
INLINE(explicit Operand(Object** opp));
INLINE(explicit Operand(Context** cpp));
- explicit Operand(Handle<Object> handle);
+ explicit Operand(Handle<HeapObject> handle);
INLINE(explicit Operand(Smi* value));
+ static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
+ static Operand EmbeddedCode(CodeStub* stub);
+
// Register.
INLINE(explicit Operand(Register rm));
@@ -440,14 +443,37 @@ class Operand BASE_EMBEDDED {
inline int64_t immediate() const {
DCHECK(!is_reg());
- return imm64_;
+ DCHECK(!IsHeapObjectRequest());
+ return value_.immediate;
+ }
+
+ bool IsImmediate() const { return !rm_.is_valid(); }
+
+ HeapObjectRequest heap_object_request() const {
+ DCHECK(IsHeapObjectRequest());
+ return value_.heap_object_request;
+ }
+
+ bool IsHeapObjectRequest() const {
+ DCHECK_IMPLIES(is_heap_object_request_, IsImmediate());
+ DCHECK_IMPLIES(is_heap_object_request_,
+ rmode_ == RelocInfo::EMBEDDED_OBJECT ||
+ rmode_ == RelocInfo::CODE_TARGET);
+ return is_heap_object_request_;
}
Register rm() const { return rm_; }
+ RelocInfo::Mode rmode() const { return rmode_; }
+
private:
Register rm_;
- int64_t imm64_; // Valid if rm_ == no_reg.
+ union Value {
+ Value() {}
+ HeapObjectRequest heap_object_request; // if is_heap_object_request_
+ int64_t immediate; // otherwise
+ } value_; // valid if rm_ == no_reg
+ bool is_heap_object_request_ = false;
RelocInfo::Mode rmode_;
friend class Assembler;
@@ -504,7 +530,7 @@ class Assembler : public AssemblerBase {
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
- void GetCode(CodeDesc* desc);
+ void GetCode(Isolate* isolate, CodeDesc* desc);
// Label operations & relative jumps (PPUM Appendix D).
//
@@ -572,9 +598,12 @@ class Assembler : public AssemblerBase {
// Read/Modify the code target address in the branch/call instruction at pc.
// The isolate argument is unused (and may be nullptr) when skipping flushing.
static Address target_address_at(Address pc);
- static void set_target_address_at(
+ INLINE(static void set_target_address_at(
Isolate* isolate, Address pc, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
+ set_target_value_at(isolate, pc, reinterpret_cast<uint64_t>(target),
+ icache_flush_mode);
+ }
// On MIPS there is no Constant Pool so we skip that parameter.
INLINE(static Address target_address_at(Address pc, Address constant_pool)) {
return target_address_at(pc);
@@ -589,6 +618,10 @@ class Assembler : public AssemblerBase {
Isolate* isolate, Address pc, Code* code, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
+ static void set_target_value_at(
+ Isolate* isolate, Address pc, uint64_t target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
inline static Address target_address_from_return_address(Address pc);
@@ -1155,15 +1188,45 @@ class Assembler : public AssemblerBase {
// MSA instructions
void bz_v(MSARegister wt, int16_t offset);
+ inline void bz_v(MSARegister wt, Label* L) {
+ bz_v(wt, shifted_branch_offset(L));
+ }
void bz_b(MSARegister wt, int16_t offset);
+ inline void bz_b(MSARegister wt, Label* L) {
+ bz_b(wt, shifted_branch_offset(L));
+ }
void bz_h(MSARegister wt, int16_t offset);
+ inline void bz_h(MSARegister wt, Label* L) {
+ bz_h(wt, shifted_branch_offset(L));
+ }
void bz_w(MSARegister wt, int16_t offset);
+ inline void bz_w(MSARegister wt, Label* L) {
+ bz_w(wt, shifted_branch_offset(L));
+ }
void bz_d(MSARegister wt, int16_t offset);
+ inline void bz_d(MSARegister wt, Label* L) {
+ bz_d(wt, shifted_branch_offset(L));
+ }
void bnz_v(MSARegister wt, int16_t offset);
+ inline void bnz_v(MSARegister wt, Label* L) {
+ bnz_v(wt, shifted_branch_offset(L));
+ }
void bnz_b(MSARegister wt, int16_t offset);
+ inline void bnz_b(MSARegister wt, Label* L) {
+ bnz_b(wt, shifted_branch_offset(L));
+ }
void bnz_h(MSARegister wt, int16_t offset);
+ inline void bnz_h(MSARegister wt, Label* L) {
+ bnz_h(wt, shifted_branch_offset(L));
+ }
void bnz_w(MSARegister wt, int16_t offset);
+ inline void bnz_w(MSARegister wt, Label* L) {
+ bnz_w(wt, shifted_branch_offset(L));
+ }
void bnz_d(MSARegister wt, int16_t offset);
+ inline void bnz_d(MSARegister wt, Label* L) {
+ bnz_d(wt, shifted_branch_offset(L));
+ }
void ld_b(MSARegister wd, const MemOperand& rs);
void ld_h(MSARegister wd, const MemOperand& rs);
@@ -1763,20 +1826,6 @@ class Assembler : public AssemblerBase {
// Mark address of a debug break slot.
void RecordDebugBreakSlot(RelocInfo::Mode mode);
- // Record the AST id of the CallIC being compiled, so that it can be placed
- // in the relocation information.
- void SetRecordedAstId(TypeFeedbackId ast_id) {
- DCHECK(recorded_ast_id_.IsNone());
- recorded_ast_id_ = ast_id;
- }
-
- TypeFeedbackId RecordedAstId() {
- DCHECK(!recorded_ast_id_.IsNone());
- return recorded_ast_id_;
- }
-
- void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); }
-
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
void RecordComment(const char* msg);
@@ -1823,6 +1872,7 @@ class Assembler : public AssemblerBase {
// Check if an instruction is a branch of some kind.
static bool IsBranch(Instr instr);
+ static bool IsMsaBranch(Instr instr);
static bool IsBc(Instr instr);
static bool IsBzc(Instr instr);
@@ -1901,13 +1951,18 @@ class Assembler : public AssemblerBase {
void lsa(Register rd, Register rt, Register rs, uint8_t sa);
void dlsa(Register rd, Register rt, Register rs, uint8_t sa);
- // Helpers.
- void LoadRegPlusOffsetToAt(const MemOperand& src);
+ // Readable constants for base and offset adjustment helper, these indicate if
+ // aside from offset, another value like offset + 4 should fit into int16.
+ enum class OffsetAccessType : bool {
+ SINGLE_ACCESS = false,
+ TWO_ACCESSES = true
+ };
- // Relocation for a type-recording IC has the AST id added to it. This
- // member variable is a way to pass the information from the call site to
- // the relocation info.
- TypeFeedbackId recorded_ast_id_;
+ // Helper function for memory load/store using base register and offset.
+ void AdjustBaseAndOffset(
+ MemOperand& src,
+ OffsetAccessType access_type = OffsetAccessType::SINGLE_ACCESS,
+ int second_access_add_to_offset = 4);
inline static void set_target_internal_reference_encoded_at(Address pc,
Address target);
@@ -1990,7 +2045,7 @@ class Assembler : public AssemblerBase {
// the generated instructions. This is so that multi-instruction sequences do
// not have to check for overflow. The same is true for writes of large
// relocation info entries.
- static constexpr int kGap = 32;
+ static constexpr int kGap = 128;
// Repeated checking whether the trampoline pool should be emitted is rather
// expensive. By default we only check again once a number of instructions
@@ -2253,6 +2308,23 @@ class Assembler : public AssemblerBase {
Trampoline trampoline_;
bool internal_trampoline_exception_;
+ // The following functions help with avoiding allocations of embedded heap
+ // objects during the code assembly phase. {RequestHeapObject} records the
+ // need for a future heap number allocation or code stub generation. After
+ // code assembly, {AllocateAndInstallRequestedHeapObjects} will allocate these
+ // objects and place them where they are expected (determined by the pc offset
+ // associated with each request). That is, for each request, it will patch the
+ // dummy heap object handle that we emitted during code assembly with the
+ // actual heap object handle.
+ protected:
+ // TODO(neis): Make private if its use can be moved out of TurboAssembler.
+ void RequestHeapObject(HeapObjectRequest request);
+
+ private:
+ void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
+
+ std::forward_list<HeapObjectRequest> heap_object_requests_;
+
friend class RegExpMacroAssemblerMIPS;
friend class RelocInfo;
friend class CodePatcher;
diff --git a/deps/v8/src/mips64/code-stubs-mips64.cc b/deps/v8/src/mips64/code-stubs-mips64.cc
index 1b6b502522..dbb18b14d8 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.cc
+++ b/deps/v8/src/mips64/code-stubs-mips64.cc
@@ -45,32 +45,6 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
Register rhs);
-void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
- ExternalReference miss) {
- // Update the static counter each time a new code stub is generated.
- isolate()->counters()->code_stubs()->Increment();
-
- CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
- int param_count = descriptor.GetRegisterParameterCount();
- {
- // Call the runtime system in a fresh internal frame.
- FrameScope scope(masm, StackFrame::INTERNAL);
- DCHECK((param_count == 0) ||
- a0.is(descriptor.GetRegisterParameter(param_count - 1)));
- // Push arguments, adjust sp.
- __ Dsubu(sp, sp, Operand(param_count * kPointerSize));
- for (int i = 0; i < param_count; ++i) {
- // Store argument to stack.
- __ Sd(descriptor.GetRegisterParameter(i),
- MemOperand(sp, (param_count - 1 - i) * kPointerSize));
- }
- __ CallExternalReference(miss, param_count);
- }
-
- __ Ret();
-}
-
-
void DoubleToIStub::Generate(MacroAssembler* masm) {
Label out_of_range, only_low, negate, done;
Register input_reg = source();
@@ -872,14 +846,11 @@ bool CEntryStub::NeedsImmovableCode() {
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
- StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
CreateWeakCellStub::GenerateAheadOfTime(isolate);
- BinaryOpICStub::GenerateAheadOfTime(isolate);
StoreRegistersStateStub::GenerateAheadOfTime(isolate);
RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
- BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
}
@@ -1028,7 +999,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
if (FLAG_debug_code) {
Label okay;
ExternalReference pending_exception_address(
- Isolate::kPendingExceptionAddress, isolate());
+ IsolateAddressId::kPendingExceptionAddress, isolate());
__ li(a2, Operand(pending_exception_address));
__ Ld(a2, MemOperand(a2));
__ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
@@ -1056,15 +1027,15 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ bind(&exception_returned);
ExternalReference pending_handler_context_address(
- Isolate::kPendingHandlerContextAddress, isolate());
+ IsolateAddressId::kPendingHandlerContextAddress, isolate());
ExternalReference pending_handler_code_address(
- Isolate::kPendingHandlerCodeAddress, isolate());
+ IsolateAddressId::kPendingHandlerCodeAddress, isolate());
ExternalReference pending_handler_offset_address(
- Isolate::kPendingHandlerOffsetAddress, isolate());
+ IsolateAddressId::kPendingHandlerOffsetAddress, isolate());
ExternalReference pending_handler_fp_address(
- Isolate::kPendingHandlerFPAddress, isolate());
+ IsolateAddressId::kPendingHandlerFPAddress, isolate());
ExternalReference pending_handler_sp_address(
- Isolate::kPendingHandlerSPAddress, isolate());
+ IsolateAddressId::kPendingHandlerSPAddress, isolate());
// Ask the runtime for help to determine the handler. This will set v0 to
// contain the current pending exception, don't clobber it.
@@ -1141,7 +1112,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
StackFrame::Type marker = type();
__ li(a6, Operand(StackFrame::TypeToMarker(marker)));
__ li(a5, Operand(StackFrame::TypeToMarker(marker)));
- ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate);
+ ExternalReference c_entry_fp(IsolateAddressId::kCEntryFPAddress, isolate);
__ li(a4, Operand(c_entry_fp));
__ Ld(a4, MemOperand(a4));
__ Push(a7, a6, a5, a4);
@@ -1166,7 +1137,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// If this is the outermost JS call, set js_entry_sp value.
Label non_outermost_js;
- ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
+ ExternalReference js_entry_sp(IsolateAddressId::kJSEntrySPAddress, isolate);
__ li(a5, Operand(ExternalReference(js_entry_sp)));
__ Ld(a6, MemOperand(a5));
__ Branch(&non_outermost_js, ne, a6, Operand(zero_reg));
@@ -1189,8 +1160,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// field in the JSEnv and return a failure sentinel. Coming in here the
// fp will be invalid because the PushStackHandler below sets it to 0 to
// signal the existence of the JSEntry frame.
- __ li(a4, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
+ __ li(a4, Operand(ExternalReference(
+ IsolateAddressId::kPendingExceptionAddress, isolate)));
__ Sd(v0, MemOperand(a4)); // We come back from 'invoke'. result is in v0.
__ LoadRoot(v0, Heap::kExceptionRootIndex);
__ b(&exit); // b exposes branch delay slot.
@@ -1250,7 +1221,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Restore the top frame descriptors from the stack.
__ pop(a5);
- __ li(a4, Operand(ExternalReference(Isolate::kCEntryFPAddress,
+ __ li(a4, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
isolate)));
__ Sd(a5, MemOperand(a4));
@@ -1633,34 +1604,6 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
}
-void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a1 : left
- // -- a0 : right
- // -- ra : return address
- // -----------------------------------
-
- // Load a2 with the allocation site. We stick an undefined dummy value here
- // and replace it with the real allocation site later when we instantiate this
- // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
- __ li(a2, isolate()->factory()->undefined_value());
-
- // Make sure that we actually patched the allocation site.
- if (FLAG_debug_code) {
- __ And(at, a2, Operand(kSmiTagMask));
- __ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg));
- __ Ld(a4, FieldMemOperand(a2, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Assert(eq, kExpectedAllocationSite, a4, Operand(at));
- }
-
- // Tail call into the stub that handles binary operations with allocation
- // sites.
- BinaryOpWithAllocationSiteStub stub(isolate(), state());
- __ TailCallStub(&stub);
-}
-
-
void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
DCHECK_EQ(CompareICState::BOOLEAN, state());
Label miss;
@@ -2134,7 +2077,8 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ bind(&good);
// Restore the properties.
- __ Ld(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Ld(properties,
+ FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
}
const int spill_mask =
@@ -2142,7 +2086,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
a2.bit() | a1.bit() | a0.bit() | v0.bit());
__ MultiPush(spill_mask);
- __ Ld(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Ld(a0, FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
__ li(a1, Operand(Handle<Name>(name)));
NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
__ CallStub(&stub);
@@ -2357,10 +2301,11 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode) {
- Label on_black;
Label need_incremental;
Label need_incremental_pop_scratch;
+#ifndef V8_CONCURRENT_MARKING
+ Label on_black;
// Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker.
__ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
@@ -2377,6 +2322,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
}
__ bind(&on_black);
+#endif
// Get the value from the slot.
__ Ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
@@ -2428,20 +2374,13 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// Fall through when we need to inform the incremental marker.
}
-
-void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
- CEntryStub ces(isolate(), 1, kSaveFPRegs);
- __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
- int parameter_count_offset =
- StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
- __ Ld(a1, MemOperand(fp, parameter_count_offset));
- if (function_mode() == JS_FUNCTION_STUB_MODE) {
- __ Daddu(a1, a1, Operand(1));
+void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
+ Zone* zone) {
+ if (tasm->isolate()->function_entry_hook() != NULL) {
+ tasm->push(ra);
+ tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
+ tasm->pop(ra);
}
- masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
- __ dsll(a1, a1, kPointerSizeLog2);
- __ Ret(USE_DELAY_SLOT);
- __ Daddu(sp, sp, a1);
}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
@@ -2482,7 +2421,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
int frame_alignment = masm->ActivationFrameAlignment();
if (frame_alignment > kPointerSize) {
__ mov(s5, sp);
- DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
__ And(sp, sp, Operand(-frame_alignment));
}
@@ -2525,8 +2464,8 @@ static void CreateArrayDispatch(MacroAssembler* masm,
T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
- int last_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
+ int last_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(masm->isolate(), kind);
@@ -2548,22 +2487,12 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// a0 - number of arguments
// a1 - constructor?
// sp[0] - last argument
- Label normal_sequence;
- if (mode == DONT_OVERRIDE) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
- STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
-
- // is the low bit set? If so, we are holey and that is good.
- __ And(at, a3, Operand(1));
- __ Branch(&normal_sequence, ne, at, Operand(zero_reg));
- }
- // look at the first argument
- __ Ld(a5, MemOperand(sp, 0));
- __ Branch(&normal_sequence, eq, a5, Operand(zero_reg));
+ STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(PACKED_ELEMENTS == 2);
+ STATIC_ASSERT(HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS == 4);
+ STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS == 5);
if (mode == DISABLE_ALLOCATION_SITES) {
ElementsKind initial = GetInitialFastElementsKind();
@@ -2573,13 +2502,12 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
holey_initial,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub_holey);
-
- __ bind(&normal_sequence);
- ArraySingleArgumentConstructorStub stub(masm->isolate(),
- initial,
- DISABLE_ALLOCATION_SITES);
- __ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
+ // is the low bit set? If so, we are holey and that is good.
+ Label normal_sequence;
+ __ And(at, a3, Operand(1));
+ __ Branch(&normal_sequence, ne, at, Operand(zero_reg));
+
// We are going to create a holey array, but our kind is non-holey.
// Fix kind and retry (only if we have an allocation site in the slot).
__ Daddu(a3, a3, Operand(1));
@@ -2594,13 +2522,15 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// in the AllocationSite::transition_info field because elements kind is
// restricted to a portion of the field...upper bits need to be left alone.
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ Ld(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
+ __ Ld(a4, FieldMemOperand(
+ a2, AllocationSite::kTransitionInfoOrBoilerplateOffset));
__ Daddu(a4, a4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
- __ Sd(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
+ __ Sd(a4, FieldMemOperand(
+ a2, AllocationSite::kTransitionInfoOrBoilerplateOffset));
__ bind(&normal_sequence);
- int last_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
+ int last_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
@@ -2617,13 +2547,13 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
template<class T>
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
- int to_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
+ int to_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(isolate, kind);
stub.GetCode();
- if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ if (AllocationSite::ShouldTrack(kind)) {
T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
stub1.GetCode();
}
@@ -2637,7 +2567,7 @@ void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
isolate);
ArrayNArgumentsConstructorStub stub(isolate);
stub.GetCode();
- ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
+ ElementsKind kinds[2] = {PACKED_ELEMENTS, HOLEY_ELEMENTS};
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things.
InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
@@ -2704,7 +2634,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&no_info, eq, a2, Operand(at));
- __ Ld(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
+ __ Ld(a3, FieldMemOperand(
+ a2, AllocationSite::kTransitionInfoOrBoilerplateOffset));
__ SmiUntag(a3);
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
__ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
@@ -2782,19 +2713,18 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
if (FLAG_debug_code) {
Label done;
- __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
- __ Assert(
- eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray,
- a3, Operand(FAST_HOLEY_ELEMENTS));
+ __ Branch(&done, eq, a3, Operand(PACKED_ELEMENTS));
+ __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray, a3,
+ Operand(HOLEY_ELEMENTS));
__ bind(&done);
}
Label fast_elements_case;
- __ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS));
- GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+ __ Branch(&fast_elements_case, eq, a3, Operand(PACKED_ELEMENTS));
+ GenerateCase(masm, HOLEY_ELEMENTS);
__ bind(&fast_elements_case);
- GenerateCase(masm, FAST_ELEMENTS);
+ GenerateCase(masm, PACKED_ELEMENTS);
}
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
diff --git a/deps/v8/src/mips64/codegen-mips64.cc b/deps/v8/src/mips64/codegen-mips64.cc
index 6bd0b7a7d9..110a3e8f10 100644
--- a/deps/v8/src/mips64/codegen-mips64.cc
+++ b/deps/v8/src/mips64/codegen-mips64.cc
@@ -545,7 +545,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
__ nop();
}
CodeDesc desc;
- masm.GetCode(&desc);
+ masm.GetCode(isolte, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
@@ -573,7 +573,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
__ Ret();
CodeDesc desc;
- masm.GetCode(&desc);
+ masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
diff --git a/deps/v8/src/mips64/constants-mips64.h b/deps/v8/src/mips64/constants-mips64.h
index eb9fe4573d..7215da54fb 100644
--- a/deps/v8/src/mips64/constants-mips64.h
+++ b/deps/v8/src/mips64/constants-mips64.h
@@ -122,6 +122,11 @@ const int kInvalidMSARegister = -1;
const int kInvalidMSAControlRegister = -1;
const int kMSAIRRegister = 0;
const int kMSACSRRegister = 1;
+const int kMSARegSize = 128;
+const int kMSALanesByte = kMSARegSize / 8;
+const int kMSALanesHalf = kMSARegSize / 16;
+const int kMSALanesWord = kMSARegSize / 32;
+const int kMSALanesDword = kMSARegSize / 64;
// FPU (coprocessor 1) control registers. Currently only FCSR is implemented.
const int kFCSRRegister = 31;
@@ -273,19 +278,19 @@ const int kBp3Shift = 6;
const int kBp3Bits = 3;
const int kImm16Shift = 0;
-const int kImm16Bits = 16;
+const int kImm16Bits = 16;
const int kImm18Shift = 0;
const int kImm18Bits = 18;
const int kImm19Shift = 0;
const int kImm19Bits = 19;
const int kImm21Shift = 0;
-const int kImm21Bits = 21;
+const int kImm21Bits = 21;
const int kImm26Shift = 0;
-const int kImm26Bits = 26;
+const int kImm26Bits = 26;
const int kImm28Shift = 0;
-const int kImm28Bits = 28;
+const int kImm28Bits = 28;
const int kImm32Shift = 0;
-const int kImm32Bits = 32;
+const int kImm32Bits = 32;
const int kMsaImm8Shift = 16;
const int kMsaImm8Bits = 8;
const int kMsaImm5Shift = 16;
@@ -322,30 +327,40 @@ const int kWdShift = 6;
// ----- Miscellaneous useful masks.
// Instruction bit masks.
-const int kOpcodeMask = ((1 << kOpcodeBits) - 1) << kOpcodeShift;
-const int kImm16Mask = ((1 << kImm16Bits) - 1) << kImm16Shift;
+const int kOpcodeMask = ((1 << kOpcodeBits) - 1) << kOpcodeShift;
+const int kImm16Mask = ((1 << kImm16Bits) - 1) << kImm16Shift;
const int kImm18Mask = ((1 << kImm18Bits) - 1) << kImm18Shift;
const int kImm19Mask = ((1 << kImm19Bits) - 1) << kImm19Shift;
const int kImm21Mask = ((1 << kImm21Bits) - 1) << kImm21Shift;
-const int kImm26Mask = ((1 << kImm26Bits) - 1) << kImm26Shift;
-const int kImm28Mask = ((1 << kImm28Bits) - 1) << kImm28Shift;
+const int kImm26Mask = ((1 << kImm26Bits) - 1) << kImm26Shift;
+const int kImm28Mask = ((1 << kImm28Bits) - 1) << kImm28Shift;
const int kImm5Mask = ((1 << 5) - 1);
const int kImm8Mask = ((1 << 8) - 1);
const int kImm10Mask = ((1 << 10) - 1);
const int kMsaI5I10Mask = ((7U << 23) | ((1 << 6) - 1));
-const int kRsFieldMask = ((1 << kRsBits) - 1) << kRsShift;
-const int kRtFieldMask = ((1 << kRtBits) - 1) << kRtShift;
-const int kRdFieldMask = ((1 << kRdBits) - 1) << kRdShift;
-const int kSaFieldMask = ((1 << kSaBits) - 1) << kSaShift;
-const int kFunctionFieldMask = ((1 << kFunctionBits) - 1) << kFunctionShift;
+const int kMsaI8Mask = ((3U << 24) | ((1 << 6) - 1));
+const int kMsaI5Mask = ((7U << 23) | ((1 << 6) - 1));
+const int kMsaMI10Mask = (15U << 2);
+const int kMsaBITMask = ((7U << 23) | ((1 << 6) - 1));
+const int kMsaELMMask = (15U << 22);
+const int kMsa3RMask = ((7U << 23) | ((1 << 6) - 1));
+const int kMsa3RFMask = ((15U << 22) | ((1 << 6) - 1));
+const int kMsaVECMask = (23U << 21);
+const int kMsa2RMask = (7U << 18);
+const int kMsa2RFMask = (15U << 17);
+const int kRsFieldMask = ((1 << kRsBits) - 1) << kRsShift;
+const int kRtFieldMask = ((1 << kRtBits) - 1) << kRtShift;
+const int kRdFieldMask = ((1 << kRdBits) - 1) << kRdShift;
+const int kSaFieldMask = ((1 << kSaBits) - 1) << kSaShift;
+const int kFunctionFieldMask = ((1 << kFunctionBits) - 1) << kFunctionShift;
// Misc masks.
-const int kHiMask = 0xffff << 16;
-const int kLoMask = 0xffff;
-const int kSignMask = 0x80000000;
-const int kJumpAddrMask = (1 << (kImm26Bits + kImmFieldShift)) - 1;
-const int64_t kHi16MaskOf64 = (int64_t)0xffff << 48;
-const int64_t kSe16MaskOf64 = (int64_t)0xffff << 32;
-const int64_t kTh16MaskOf64 = (int64_t)0xffff << 16;
+const int kHiMaskOf32 = 0xffff << 16; // Only to be used with 32-bit values
+const int kLoMaskOf32 = 0xffff;
+const int kSignMaskOf32 = 0x80000000; // Only to be used with 32-bit values
+const int kJumpAddrMask = (1 << (kImm26Bits + kImmFieldShift)) - 1;
+const int64_t kTop16MaskOf64 = (int64_t)0xffff << 48;
+const int64_t kHigher16MaskOf64 = (int64_t)0xffff << 32;
+const int64_t kUpper16MaskOf64 = (int64_t)0xffff << 16;
const int32_t kJalRawMark = 0x00000000;
const int32_t kJRawMark = 0xf0000000;
const int32_t kJumpRawMask = 0xf0000000;
@@ -1077,6 +1092,36 @@ inline Condition NegateFpuCondition(Condition cc) {
}
}
+enum MSABranchCondition {
+ all_not_zero = 0, // Branch If All Elements Are Not Zero
+ one_elem_not_zero, // Branch If At Least One Element of Any Format Is Not
+ // Zero
+ one_elem_zero, // Branch If At Least One Element Is Zero
+ all_zero // Branch If All Elements of Any Format Are Zero
+};
+
+inline MSABranchCondition NegateMSABranchCondition(MSABranchCondition cond) {
+ switch (cond) {
+ case all_not_zero:
+ return one_elem_zero;
+ case one_elem_not_zero:
+ return all_zero;
+ case one_elem_zero:
+ return all_not_zero;
+ case all_zero:
+ return one_elem_not_zero;
+ default:
+ return cond;
+ }
+}
+
+enum MSABranchDF {
+ MSA_BRANCH_B = 0,
+ MSA_BRANCH_H,
+ MSA_BRANCH_W,
+ MSA_BRANCH_D,
+ MSA_BRANCH_V
+};
// Commute a condition such that {a cond b == b cond' a}.
inline Condition CommuteCondition(Condition cc) {
@@ -1903,6 +1948,16 @@ bool InstructionGetters<T>::IsForbiddenAfterBranchInstr(Instr instr) {
case BC1:
case BC1EQZ:
case BC1NEZ:
+ case BZ_V:
+ case BZ_B:
+ case BZ_H:
+ case BZ_W:
+ case BZ_D:
+ case BNZ_V:
+ case BNZ_B:
+ case BNZ_H:
+ case BNZ_W:
+ case BNZ_D:
return true;
break;
default:
diff --git a/deps/v8/src/mips64/deoptimizer-mips64.cc b/deps/v8/src/mips64/deoptimizer-mips64.cc
index 804a176bce..4885b5f050 100644
--- a/deps/v8/src/mips64/deoptimizer-mips64.cc
+++ b/deps/v8/src/mips64/deoptimizer-mips64.cc
@@ -77,24 +77,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
-void Deoptimizer::SetPlatformCompiledStubRegisters(
- FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
- ApiFunction function(descriptor->deoptimization_handler());
- ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
- intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
- int params = descriptor->GetHandlerParameterCount();
- output_frame->SetRegister(a0.code(), params);
- output_frame->SetRegister(a1.code(), handler);
-}
-
-
-void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
- for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
- Float64 double_value = input_->GetDoubleRegister(i);
- output_frame->SetDoubleRegister(i, double_value);
- }
-}
-
#define __ masm()->
@@ -141,7 +123,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
}
- __ li(a2, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ __ li(a2, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
+ isolate())));
__ Sd(fp, MemOperand(a2));
const int kSavedRegistersAreaSize =
@@ -323,7 +306,11 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Maximum size of a table entry generated below.
+#ifdef _MIPS_ARCH_MIPS64R6
+const int Deoptimizer::table_entry_size_ = 2 * Assembler::kInstrSize;
+#else
const int Deoptimizer::table_entry_size_ = 3 * Assembler::kInstrSize;
+#endif
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
@@ -332,8 +319,13 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
// Note that registers are still live when jumping to an entry.
Label table_start, done, trampoline_jump;
__ bind(&table_start);
+#ifdef _MIPS_ARCH_MIPS64R6
+ int kMaxEntriesBranchReach =
+ (1 << (kImm26Bits - 2)) / (table_entry_size_ / Assembler::kInstrSize);
+#else
int kMaxEntriesBranchReach =
(1 << (kImm16Bits - 2)) / (table_entry_size_ / Assembler::kInstrSize);
+#endif
if (count() <= kMaxEntriesBranchReach) {
// Common case.
@@ -341,9 +333,14 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
Label start;
__ bind(&start);
DCHECK(is_int16(i));
- __ BranchShort(USE_DELAY_SLOT, &done); // Expose delay slot.
- __ li(at, i); // In the delay slot.
- __ nop();
+ if (kArchVariant == kMips64r6) {
+ __ li(at, i);
+ __ BranchShort(PROTECT, &done);
+ } else {
+ __ BranchShort(USE_DELAY_SLOT, &done); // Expose delay slot.
+ __ li(at, i); // In the delay slot.
+ __ nop();
+ }
DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
}
@@ -353,6 +350,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
__ bind(&done);
__ Push(at);
} else {
+ DCHECK(kArchVariant != kMips64r6);
// Uncommon case, the branch cannot reach.
// Create mini trampoline to reach the end of the table
for (int i = 0, j = 0; i < count(); i++, j++) {
diff --git a/deps/v8/src/mips64/disasm-mips64.cc b/deps/v8/src/mips64/disasm-mips64.cc
index 2ebd0ead13..debdf63215 100644
--- a/deps/v8/src/mips64/disasm-mips64.cc
+++ b/deps/v8/src/mips64/disasm-mips64.cc
@@ -60,17 +60,6 @@ class Decoder {
int InstructionDecode(byte* instruction);
private:
- const uint32_t kMsaI8Mask = ((3U << 24) | ((1 << 6) - 1));
- const uint32_t kMsaI5Mask = ((7U << 23) | ((1 << 6) - 1));
- const uint32_t kMsaMI10Mask = (15U << 2);
- const uint32_t kMsaBITMask = ((7U << 23) | ((1 << 6) - 1));
- const uint32_t kMsaELMMask = (15U << 22);
- const uint32_t kMsa3RMask = ((7U << 23) | ((1 << 6) - 1));
- const uint32_t kMsa3RFMask = ((15U << 22) | ((1 << 6) - 1));
- const uint32_t kMsaVECMask = (23U << 21);
- const uint32_t kMsa2RMask = (7U << 18);
- const uint32_t kMsa2RFMask = (15U << 17);
-
// Bottleneck functions to print into the out_buffer.
void PrintChar(const char ch);
void Print(const char* str);
@@ -701,7 +690,6 @@ int Decoder::FormatRegister(Instruction* instr, const char* format) {
return 2;
}
UNREACHABLE();
- return -1;
}
@@ -747,7 +735,6 @@ int Decoder::FormatFPURegister(Instruction* instr, const char* format) {
}
}
UNREACHABLE();
- return -1;
}
// Handle all MSARegister based formatting in this function to reduce the
@@ -769,7 +756,6 @@ int Decoder::FormatMSARegister(Instruction* instr, const char* format) {
}
UNREACHABLE();
- return -1;
}
// FormatOption takes a formatting string and interprets it based on
@@ -1034,7 +1020,6 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
return 1;
}
UNREACHABLE();
- return -1;
}
diff --git a/deps/v8/src/mips64/frames-mips64.cc b/deps/v8/src/mips64/frames-mips64.cc
index f8ac6bf194..5cede8de5d 100644
--- a/deps/v8/src/mips64/frames-mips64.cc
+++ b/deps/v8/src/mips64/frames-mips64.cc
@@ -18,15 +18,6 @@ Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() {
UNREACHABLE();
- return no_reg;
-}
-
-
-Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
-Register StubFailureTrampolineFrame::context_register() { return cp; }
-Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
- UNREACHABLE();
- return no_reg;
}
diff --git a/deps/v8/src/mips64/interface-descriptors-mips64.cc b/deps/v8/src/mips64/interface-descriptors-mips64.cc
index 73889d2d34..fba07a4483 100644
--- a/deps/v8/src/mips64/interface-descriptors-mips64.cc
+++ b/deps/v8/src/mips64/interface-descriptors-mips64.cc
@@ -47,6 +47,8 @@ const Register StoreTransitionDescriptor::MapRegister() { return a5; }
const Register StringCompareDescriptor::LeftRegister() { return a1; }
const Register StringCompareDescriptor::RightRegister() { return a0; }
+const Register StringConcatDescriptor::ArgumentsCountRegister() { return a0; }
+
const Register ApiGetterDescriptor::HolderRegister() { return a0; }
const Register ApiGetterDescriptor::CallbackRegister() { return a3; }
@@ -153,6 +155,16 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a2 : arguments list (FixedArray)
+ // a4 : arguments list length (untagged)
+ Register registers[] = {a1, a0, a2, a4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void CallForwardVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// a1: the target to call
@@ -162,6 +174,34 @@ void CallForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallWithSpreadDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a2 : the object to spread
+ Register registers[] = {a1, a0, a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a1 : the target to call
+ // a2 : the arguments list
+ Register registers[] = {a1, a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a3 : the new target
+ // a2 : arguments list (FixedArray)
+ // a4 : arguments list length (untagged)
+ Register registers[] = {a1, a3, a0, a2, a4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// a1: the target to call
@@ -172,6 +212,25 @@ void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a3 : the new target
+ // a2 : the object to spread
+ Register registers[] = {a1, a3, a0, a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a1 : the target to call
+ // a3 : the new target
+ // a2 : the arguments list
+ Register registers[] = {a1, a3, a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void ConstructStubDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// a1: target
@@ -367,8 +426,7 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
Register registers[] = {
v0, // the value to pass to the generator
a1, // the JSGeneratorObject to resume
- a2, // the resume mode (tagged)
- a3 // SuspendFlags (tagged)
+ a2 // the resume mode (tagged)
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.cc b/deps/v8/src/mips64/macro-assembler-mips64.cc
index 84a55d46e6..2a60ddee28 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/macro-assembler-mips64.cc
@@ -31,16 +31,7 @@ const uint32_t kSingleNaNMask = kSingleExponentMask | (1 << kSingleNaNShift);
MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
- : Assembler(isolate, buffer, size),
- generating_stub_(false),
- has_frame_(false),
- has_double_zero_reg_set_(false),
- isolate_(isolate) {
- if (create_code_object == CodeObjectRequired::kYes) {
- code_object_ =
- Handle<Object>::New(isolate_->heap()->undefined_value(), isolate_);
- }
-}
+ : TurboAssembler(isolate, buffer, size, create_code_object) {}
void MacroAssembler::Load(Register dst,
const MemOperand& src,
@@ -82,17 +73,13 @@ void MacroAssembler::Store(Register src,
}
}
-
-void MacroAssembler::LoadRoot(Register destination,
- Heap::RootListIndex index) {
+void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
Ld(destination, MemOperand(s6, index << kPointerSizeLog2));
}
-
-void MacroAssembler::LoadRoot(Register destination,
- Heap::RootListIndex index,
- Condition cond,
- Register src1, const Operand& src2) {
+void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
+ Condition cond, Register src1,
+ const Operand& src2) {
Branch(2, NegateCondition(cond), src1, src2);
Ld(destination, MemOperand(s6, index << kPointerSizeLog2));
}
@@ -114,7 +101,7 @@ void MacroAssembler::StoreRoot(Register source,
Sd(source, MemOperand(s6, index << kPointerSizeLog2));
}
-void MacroAssembler::PushCommonFrame(Register marker_reg) {
+void TurboAssembler::PushCommonFrame(Register marker_reg) {
if (marker_reg.is_valid()) {
Push(ra, fp, marker_reg);
Daddu(fp, sp, Operand(kPointerSize));
@@ -132,7 +119,7 @@ void MacroAssembler::PopCommonFrame(Register marker_reg) {
}
}
-void MacroAssembler::PushStandardFrame(Register function_reg) {
+void TurboAssembler::PushStandardFrame(Register function_reg) {
int offset = -StandardFrameConstants::kContextOffset;
if (function_reg.is_valid()) {
Push(ra, fp, cp, function_reg);
@@ -195,62 +182,6 @@ MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
return MemOperand(sp, doubles_size + register_offset);
}
-// Helper for base-reg + offset, when offset is larger than int16.
-void MacroAssembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
- DCHECK(!src.rm().is(at));
- DCHECK(is_int32(src.offset()));
-
- if (kArchVariant == kMips64r6) {
- int32_t hi = (src.offset() >> kLuiShift) & kImm16Mask;
- if (src.offset() & kNegOffset) {
- if ((hi & kNegOffset) != ((hi + 1) & kNegOffset)) {
- lui(at, (src.offset() >> kLuiShift) & kImm16Mask);
- ori(at, at, src.offset() & kImm16Mask); // Load 32-bit offset.
- daddu(at, at, src.rm()); // Add base register.
- return;
- }
-
- hi += 1;
- }
-
- daui(at, src.rm(), hi);
- daddiu(at, at, src.offset() & kImm16Mask);
- } else {
- lui(at, (src.offset() >> kLuiShift) & kImm16Mask);
- ori(at, at, src.offset() & kImm16Mask); // Load 32-bit offset.
- daddu(at, at, src.rm()); // Add base register.
- }
-}
-
-// Helper for base-reg + upper part of offset, when offset is larger than int16.
-// Loads higher part of the offset to AT register.
-// Returns lower part of the offset to be used as offset
-// in Load/Store instructions
-int32_t MacroAssembler::LoadRegPlusUpperOffsetPartToAt(const MemOperand& src) {
- DCHECK(!src.rm().is(at));
- DCHECK(is_int32(src.offset()));
- int32_t hi = (src.offset() >> kLuiShift) & kImm16Mask;
- // If the highest bit of the lower part of the offset is 1, this would make
- // the offset in the load/store instruction negative. We need to compensate
- // for this by adding 1 to the upper part of the offset.
- if (src.offset() & kNegOffset) {
- if ((hi & kNegOffset) != ((hi + 1) & kNegOffset)) {
- LoadRegPlusOffsetToAt(src);
- return 0;
- }
-
- hi += 1;
- }
-
- if (kArchVariant == kMips64r6) {
- daui(at, src.rm(), hi);
- } else {
- lui(at, hi);
- daddu(at, at, src.rm());
- }
- return (src.offset() & kImm16Mask);
-}
-
void MacroAssembler::InNewSpace(Register object,
Register scratch,
Condition cc,
@@ -617,12 +548,12 @@ void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
// ---------------------------------------------------------------------------
// Instruction macros.
-void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::Addu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
addu(rd, rs, rt.rm());
} else {
- if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
- addiu(rd, rs, static_cast<int32_t>(rt.imm64_));
+ if (is_int16(rt.immediate()) && !MustUseReg(rt.rmode())) {
+ addiu(rd, rs, static_cast<int32_t>(rt.immediate()));
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
@@ -632,13 +563,12 @@ void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Daddu(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::Daddu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
daddu(rd, rs, rt.rm());
} else {
- if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
- daddiu(rd, rs, static_cast<int32_t>(rt.imm64_));
+ if (is_int16(rt.immediate()) && !MustUseReg(rt.rmode())) {
+ daddiu(rd, rs, static_cast<int32_t>(rt.immediate()));
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
@@ -648,43 +578,55 @@ void MacroAssembler::Daddu(Register rd, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::Subu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
subu(rd, rs, rt.rm());
} else {
- if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
- addiu(rd, rs, static_cast<int32_t>(
- -rt.imm64_)); // No subiu instr, use addiu(x, y, -imm).
+ DCHECK(is_int32(rt.immediate()));
+ if (is_int16(-rt.immediate()) && !MustUseReg(rt.rmode())) {
+ addiu(rd, rs,
+ static_cast<int32_t>(
+ -rt.immediate())); // No subiu instr, use addiu(x, y, -imm).
} else {
- // li handles the relocation.
DCHECK(!rs.is(at));
- li(at, rt);
- subu(rd, rs, at);
+ if (-rt.immediate() >> 16 == 0 && !MustUseReg(rt.rmode())) {
+ // Use load -imm and addu when loading -imm generates one instruction.
+ li(at, -rt.immediate());
+ addu(rd, rs, at);
+ } else {
+ // li handles the relocation.
+ li(at, rt);
+ subu(rd, rs, at);
+ }
}
}
}
-
-void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::Dsubu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
dsubu(rd, rs, rt.rm());
+ } else if (is_int16(-rt.immediate()) && !MustUseReg(rt.rmode())) {
+ daddiu(rd, rs,
+ static_cast<int32_t>(
+ -rt.immediate())); // No dsubiu instr, use daddiu(x, y, -imm).
} else {
- if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
- daddiu(rd, rs,
- static_cast<int32_t>(
- -rt.imm64_)); // No subiu instr, use addiu(x, y, -imm).
+ DCHECK(!rs.is(at));
+ int li_count = InstrCountForLi64Bit(rt.immediate());
+ int li_neg_count = InstrCountForLi64Bit(-rt.immediate());
+ if (li_neg_count < li_count && !MustUseReg(rt.rmode())) {
+ // Use load -imm and daddu when loading -imm generates one instruction.
+ DCHECK(rt.immediate() != std::numeric_limits<int32_t>::min());
+ li(at, Operand(-rt.immediate()));
+ Daddu(rd, rs, at);
} else {
// li handles the relocation.
- DCHECK(!rs.is(at));
li(at, rt);
dsubu(rd, rs, at);
}
}
}
-
-void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::Mul(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
mul(rd, rs, rt.rm());
} else {
@@ -695,8 +637,7 @@ void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant != kMips64r6) {
mult(rs, rt.rm());
@@ -717,8 +658,7 @@ void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant != kMips64r6) {
multu(rs, rt.rm());
@@ -739,8 +679,7 @@ void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::Dmul(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant == kMips64r6) {
dmul(rd, rs, rt.rm());
@@ -761,8 +700,7 @@ void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Dmulh(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::Dmulh(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant == kMips64r6) {
dmuh(rd, rs, rt.rm());
@@ -783,8 +721,7 @@ void MacroAssembler::Dmulh(Register rd, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Mult(Register rs, const Operand& rt) {
+void TurboAssembler::Mult(Register rs, const Operand& rt) {
if (rt.is_reg()) {
mult(rs, rt.rm());
} else {
@@ -795,8 +732,7 @@ void MacroAssembler::Mult(Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Dmult(Register rs, const Operand& rt) {
+void TurboAssembler::Dmult(Register rs, const Operand& rt) {
if (rt.is_reg()) {
dmult(rs, rt.rm());
} else {
@@ -807,8 +743,7 @@ void MacroAssembler::Dmult(Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Multu(Register rs, const Operand& rt) {
+void TurboAssembler::Multu(Register rs, const Operand& rt) {
if (rt.is_reg()) {
multu(rs, rt.rm());
} else {
@@ -819,8 +754,7 @@ void MacroAssembler::Multu(Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Dmultu(Register rs, const Operand& rt) {
+void TurboAssembler::Dmultu(Register rs, const Operand& rt) {
if (rt.is_reg()) {
dmultu(rs, rt.rm());
} else {
@@ -831,8 +765,7 @@ void MacroAssembler::Dmultu(Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Div(Register rs, const Operand& rt) {
+void TurboAssembler::Div(Register rs, const Operand& rt) {
if (rt.is_reg()) {
div(rs, rt.rm());
} else {
@@ -843,8 +776,7 @@ void MacroAssembler::Div(Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
+void TurboAssembler::Div(Register res, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant != kMips64r6) {
div(rs, rt.rm());
@@ -865,8 +797,7 @@ void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::Mod(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant != kMips64r6) {
div(rs, rt.rm());
@@ -887,8 +818,7 @@ void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::Modu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant != kMips64r6) {
divu(rs, rt.rm());
@@ -909,8 +839,7 @@ void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Ddiv(Register rs, const Operand& rt) {
+void TurboAssembler::Ddiv(Register rs, const Operand& rt) {
if (rt.is_reg()) {
ddiv(rs, rt.rm());
} else {
@@ -921,8 +850,7 @@ void MacroAssembler::Ddiv(Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Ddiv(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::Ddiv(Register rd, Register rs, const Operand& rt) {
if (kArchVariant != kMips64r6) {
if (rt.is_reg()) {
ddiv(rs, rt.rm());
@@ -946,8 +874,7 @@ void MacroAssembler::Ddiv(Register rd, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Divu(Register rs, const Operand& rt) {
+void TurboAssembler::Divu(Register rs, const Operand& rt) {
if (rt.is_reg()) {
divu(rs, rt.rm());
} else {
@@ -958,8 +885,7 @@ void MacroAssembler::Divu(Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
+void TurboAssembler::Divu(Register res, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant != kMips64r6) {
divu(rs, rt.rm());
@@ -980,8 +906,7 @@ void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Ddivu(Register rs, const Operand& rt) {
+void TurboAssembler::Ddivu(Register rs, const Operand& rt) {
if (rt.is_reg()) {
ddivu(rs, rt.rm());
} else {
@@ -992,8 +917,7 @@ void MacroAssembler::Ddivu(Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Ddivu(Register res, Register rs, const Operand& rt) {
+void TurboAssembler::Ddivu(Register res, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant != kMips64r6) {
ddivu(rs, rt.rm());
@@ -1014,8 +938,7 @@ void MacroAssembler::Ddivu(Register res, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::Dmod(Register rd, Register rs, const Operand& rt) {
if (kArchVariant != kMips64r6) {
if (rt.is_reg()) {
ddiv(rs, rt.rm());
@@ -1039,8 +962,7 @@ void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Dmodu(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::Dmodu(Register rd, Register rs, const Operand& rt) {
if (kArchVariant != kMips64r6) {
if (rt.is_reg()) {
ddivu(rs, rt.rm());
@@ -1064,13 +986,12 @@ void MacroAssembler::Dmodu(Register rd, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::And(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
and_(rd, rs, rt.rm());
} else {
- if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
- andi(rd, rs, static_cast<int32_t>(rt.imm64_));
+ if (is_uint16(rt.immediate()) && !MustUseReg(rt.rmode())) {
+ andi(rd, rs, static_cast<int32_t>(rt.immediate()));
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
@@ -1080,13 +1001,12 @@ void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::Or(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
or_(rd, rs, rt.rm());
} else {
- if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
- ori(rd, rs, static_cast<int32_t>(rt.imm64_));
+ if (is_uint16(rt.immediate()) && !MustUseReg(rt.rmode())) {
+ ori(rd, rs, static_cast<int32_t>(rt.immediate()));
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
@@ -1096,13 +1016,12 @@ void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::Xor(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
xor_(rd, rs, rt.rm());
} else {
- if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
- xori(rd, rs, static_cast<int32_t>(rt.imm64_));
+ if (is_uint16(rt.immediate()) && !MustUseReg(rt.rmode())) {
+ xori(rd, rs, static_cast<int32_t>(rt.immediate()));
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
@@ -1112,8 +1031,7 @@ void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::Nor(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
nor(rd, rs, rt.rm());
} else {
@@ -1124,8 +1042,7 @@ void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Neg(Register rs, const Operand& rt) {
+void TurboAssembler::Neg(Register rs, const Operand& rt) {
DCHECK(rt.is_reg());
DCHECK(!at.is(rs));
DCHECK(!at.is(rt.rm()));
@@ -1133,13 +1050,12 @@ void MacroAssembler::Neg(Register rs, const Operand& rt) {
xor_(rs, rt.rm(), at);
}
-
-void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::Slt(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
slt(rd, rs, rt.rm());
} else {
- if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
- slti(rd, rs, static_cast<int32_t>(rt.imm64_));
+ if (is_int16(rt.immediate()) && !MustUseReg(rt.rmode())) {
+ slti(rd, rs, static_cast<int32_t>(rt.immediate()));
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
@@ -1149,18 +1065,18 @@ void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sltu(rd, rs, rt.rm());
} else {
const uint64_t int16_min = std::numeric_limits<int16_t>::min();
- if (is_uint15(rt.imm64_) && !MustUseReg(rt.rmode_)) {
+ if (is_uint15(rt.immediate()) && !MustUseReg(rt.rmode())) {
// Imm range is: [0, 32767].
- sltiu(rd, rs, static_cast<int32_t>(rt.imm64_));
- } else if (is_uint15(rt.imm64_ - int16_min) && !MustUseReg(rt.rmode_)) {
+ sltiu(rd, rs, static_cast<int32_t>(rt.immediate()));
+ } else if (is_uint15(rt.immediate() - int16_min) &&
+ !MustUseReg(rt.rmode())) {
// Imm range is: [max_unsigned-32767,max_unsigned].
- sltiu(rd, rs, static_cast<uint16_t>(rt.imm64_));
+ sltiu(rd, rs, static_cast<uint16_t>(rt.immediate()));
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
@@ -1170,12 +1086,11 @@ void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
rotrv(rd, rs, rt.rm());
} else {
- int64_t ror_value = rt.imm64_ % 32;
+ int64_t ror_value = rt.immediate() % 32;
if (ror_value < 0) {
ror_value += 32;
}
@@ -1183,12 +1098,11 @@ void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
}
}
-
-void MacroAssembler::Dror(Register rd, Register rs, const Operand& rt) {
+void TurboAssembler::Dror(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
drotrv(rd, rs, rt.rm());
} else {
- int64_t dror_value = rt.imm64_ % 64;
+ int64_t dror_value = rt.immediate() % 64;
if (dror_value < 0) dror_value += 64;
if (dror_value <= 31) {
drotr(rd, rs, dror_value);
@@ -1203,8 +1117,7 @@ void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
pref(hint, rs);
}
-
-void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
+void TurboAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
Register scratch) {
DCHECK(sa >= 1 && sa <= 31);
if (kArchVariant == kMips64r6 && sa <= 4) {
@@ -1217,8 +1130,7 @@ void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
}
}
-
-void MacroAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa,
+void TurboAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa,
Register scratch) {
DCHECK(sa >= 1 && sa <= 31);
if (kArchVariant == kMips64r6 && sa <= 4) {
@@ -1231,7 +1143,7 @@ void MacroAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa,
}
}
-void MacroAssembler::Bovc(Register rs, Register rt, Label* L) {
+void TurboAssembler::Bovc(Register rs, Register rt, Label* L) {
if (is_trampoline_emitted()) {
Label skip;
bnvc(rs, rt, &skip);
@@ -1242,7 +1154,7 @@ void MacroAssembler::Bovc(Register rs, Register rt, Label* L) {
}
}
-void MacroAssembler::Bnvc(Register rs, Register rt, Label* L) {
+void TurboAssembler::Bnvc(Register rs, Register rt, Label* L) {
if (is_trampoline_emitted()) {
Label skip;
bovc(rs, rt, &skip);
@@ -1256,7 +1168,7 @@ void MacroAssembler::Bnvc(Register rs, Register rt, Label* L) {
// ------------Pseudo-instructions-------------
// Change endianness
-void MacroAssembler::ByteSwapSigned(Register dest, Register src,
+void TurboAssembler::ByteSwapSigned(Register dest, Register src,
int operand_size) {
DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4 ||
operand_size == 8);
@@ -1281,7 +1193,7 @@ void MacroAssembler::ByteSwapSigned(Register dest, Register src,
}
}
-void MacroAssembler::ByteSwapUnsigned(Register dest, Register src,
+void TurboAssembler::ByteSwapUnsigned(Register dest, Register src,
int operand_size) {
DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4);
if (operand_size == 1) {
@@ -1300,32 +1212,29 @@ void MacroAssembler::ByteSwapUnsigned(Register dest, Register src,
}
}
-void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
+void TurboAssembler::Ulw(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (kArchVariant == kMips64r6) {
Lw(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
- if (is_int16(rs.offset() + kMipsLwrOffset) &&
- is_int16(rs.offset() + kMipsLwlOffset)) {
- if (!rd.is(rs.rm())) {
- lwr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
- lwl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
- } else {
- lwr(at, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
- lwl(at, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
- mov(rd, at);
- }
- } else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(rs);
- lwr(rd, MemOperand(at, kMipsLwrOffset));
- lwl(rd, MemOperand(at, kMipsLwlOffset));
+ DCHECK(kMipsLwrOffset <= 3 && kMipsLwlOffset <= 3);
+ MemOperand source = rs;
+ // Adjust offset for two accesses and check if offset + 3 fits into int16_t.
+ AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 3);
+ if (!rd.is(source.rm())) {
+ lwr(rd, MemOperand(source.rm(), source.offset() + kMipsLwrOffset));
+ lwl(rd, MemOperand(source.rm(), source.offset() + kMipsLwlOffset));
+ } else {
+ lwr(at, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
+ lwl(at, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
+ mov(rd, at);
}
}
}
-void MacroAssembler::Ulwu(Register rd, const MemOperand& rs) {
+void TurboAssembler::Ulwu(Register rd, const MemOperand& rs) {
if (kArchVariant == kMips64r6) {
Lwu(rd, rs);
} else {
@@ -1335,49 +1244,48 @@ void MacroAssembler::Ulwu(Register rd, const MemOperand& rs) {
}
}
-
-void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
+void TurboAssembler::Usw(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
+ DCHECK(!rd.is(rs.rm()));
if (kArchVariant == kMips64r6) {
Sw(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
- if (is_int16(rs.offset() + kMipsSwrOffset) &&
- is_int16(rs.offset() + kMipsSwlOffset)) {
- swr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwrOffset));
- swl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwlOffset));
- } else {
- LoadRegPlusOffsetToAt(rs);
- swr(rd, MemOperand(at, kMipsSwrOffset));
- swl(rd, MemOperand(at, kMipsSwlOffset));
- }
+ DCHECK(kMipsSwrOffset <= 3 && kMipsSwlOffset <= 3);
+ MemOperand source = rs;
+ // Adjust offset for two accesses and check if offset + 3 fits into int16_t.
+ AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 3);
+ swr(rd, MemOperand(source.rm(), source.offset() + kMipsSwrOffset));
+ swl(rd, MemOperand(source.rm(), source.offset() + kMipsSwlOffset));
}
}
-void MacroAssembler::Ulh(Register rd, const MemOperand& rs) {
+void TurboAssembler::Ulh(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (kArchVariant == kMips64r6) {
Lh(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
- if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
+ MemOperand source = rs;
+ // Adjust offset for two accesses and check if offset + 1 fits into int16_t.
+ AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
+ if (source.rm().is(at)) {
#if defined(V8_TARGET_LITTLE_ENDIAN)
- Lbu(at, rs);
- Lb(rd, MemOperand(rs.rm(), rs.offset() + 1));
+ Lb(rd, MemOperand(source.rm(), source.offset() + 1));
+ Lbu(at, source);
#elif defined(V8_TARGET_BIG_ENDIAN)
- Lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
- Lb(rd, rs);
+ Lb(rd, source);
+ Lbu(at, MemOperand(source.rm(), source.offset() + 1));
#endif
- } else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(rs);
+ } else {
#if defined(V8_TARGET_LITTLE_ENDIAN)
- Lb(rd, MemOperand(at, 1));
- Lbu(at, MemOperand(at, 0));
+ Lbu(at, source);
+ Lb(rd, MemOperand(source.rm(), source.offset() + 1));
#elif defined(V8_TARGET_BIG_ENDIAN)
- Lb(rd, MemOperand(at, 0));
- Lbu(at, MemOperand(at, 1));
+ Lbu(at, MemOperand(source.rm(), source.offset() + 1));
+ Lb(rd, source);
#endif
}
dsll(rd, rd, 8);
@@ -1385,29 +1293,31 @@ void MacroAssembler::Ulh(Register rd, const MemOperand& rs) {
}
}
-void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) {
+void TurboAssembler::Ulhu(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (kArchVariant == kMips64r6) {
Lhu(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
- if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
+ MemOperand source = rs;
+ // Adjust offset for two accesses and check if offset + 1 fits into int16_t.
+ AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
+ if (source.rm().is(at)) {
#if defined(V8_TARGET_LITTLE_ENDIAN)
- Lbu(at, rs);
- Lbu(rd, MemOperand(rs.rm(), rs.offset() + 1));
+ Lbu(rd, MemOperand(source.rm(), source.offset() + 1));
+ Lbu(at, source);
#elif defined(V8_TARGET_BIG_ENDIAN)
- Lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
- Lbu(rd, rs);
+ Lbu(rd, source);
+ Lbu(at, MemOperand(source.rm(), source.offset() + 1));
#endif
- } else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(rs);
+ } else {
#if defined(V8_TARGET_LITTLE_ENDIAN)
- Lbu(rd, MemOperand(at, 1));
- Lbu(at, MemOperand(at, 0));
+ Lbu(at, source);
+ Lbu(rd, MemOperand(source.rm(), source.offset() + 1));
#elif defined(V8_TARGET_BIG_ENDIAN)
- Lbu(rd, MemOperand(at, 0));
- Lbu(at, MemOperand(at, 1));
+ Lbu(at, MemOperand(source.rm(), source.offset() + 1));
+ Lbu(rd, source);
#endif
}
dsll(rd, rd, 8);
@@ -1415,7 +1325,7 @@ void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) {
}
}
-void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
+void TurboAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
DCHECK(!rs.rm().is(scratch));
@@ -1425,11 +1335,8 @@ void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
} else {
DCHECK(kArchVariant == kMips64r2);
MemOperand source = rs;
- // If offset > 16 bits, load address to at with offset 0.
- if (!is_int16(rs.offset()) || !is_int16(rs.offset() + 1)) {
- LoadRegPlusOffsetToAt(rs);
- source = MemOperand(at, 0);
- }
+ // Adjust offset for two accesses and check if offset + 1 fits into int16_t.
+ AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
if (!scratch.is(rd)) {
mov(scratch, rd);
@@ -1447,27 +1354,24 @@ void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
}
}
-void MacroAssembler::Uld(Register rd, const MemOperand& rs) {
+void TurboAssembler::Uld(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (kArchVariant == kMips64r6) {
Ld(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
- if (is_int16(rs.offset() + kMipsLdrOffset) &&
- is_int16(rs.offset() + kMipsLdlOffset)) {
- if (!rd.is(rs.rm())) {
- ldr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLdrOffset));
- ldl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLdlOffset));
- } else {
- ldr(at, MemOperand(rs.rm(), rs.offset() + kMipsLdrOffset));
- ldl(at, MemOperand(rs.rm(), rs.offset() + kMipsLdlOffset));
- mov(rd, at);
- }
- } else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(rs);
- ldr(rd, MemOperand(at, kMipsLdrOffset));
- ldl(rd, MemOperand(at, kMipsLdlOffset));
+ DCHECK(kMipsLdrOffset <= 7 && kMipsLdlOffset <= 7);
+ MemOperand source = rs;
+ // Adjust offset for two accesses and check if offset + 7 fits into int16_t.
+ AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 7);
+ if (!rd.is(source.rm())) {
+ ldr(rd, MemOperand(source.rm(), source.offset() + kMipsLdrOffset));
+ ldl(rd, MemOperand(source.rm(), source.offset() + kMipsLdlOffset));
+ } else {
+ ldr(at, MemOperand(rs.rm(), rs.offset() + kMipsLdrOffset));
+ ldl(at, MemOperand(rs.rm(), rs.offset() + kMipsLdlOffset));
+ mov(rd, at);
}
}
}
@@ -1484,22 +1388,19 @@ void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs,
Daddu(rd, rd, scratch);
}
-void MacroAssembler::Usd(Register rd, const MemOperand& rs) {
+void TurboAssembler::Usd(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (kArchVariant == kMips64r6) {
Sd(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
- if (is_int16(rs.offset() + kMipsSdrOffset) &&
- is_int16(rs.offset() + kMipsSdlOffset)) {
- sdr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSdrOffset));
- sdl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSdlOffset));
- } else {
- LoadRegPlusOffsetToAt(rs);
- sdr(rd, MemOperand(at, kMipsSdrOffset));
- sdl(rd, MemOperand(at, kMipsSdlOffset));
- }
+ DCHECK(kMipsSdrOffset <= 7 && kMipsSdlOffset <= 7);
+ MemOperand source = rs;
+ // Adjust offset for two accesses and check if offset + 7 fits into int16_t.
+ AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 7);
+ sdr(rd, MemOperand(source.rm(), source.offset() + kMipsSdrOffset));
+ sdl(rd, MemOperand(source.rm(), source.offset() + kMipsSdlOffset));
}
}
@@ -1512,7 +1413,7 @@ void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs,
Sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
}
-void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
+void TurboAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
if (kArchVariant == kMips64r6) {
Lwc1(fd, rs);
@@ -1523,7 +1424,7 @@ void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
}
}
-void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
+void TurboAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
if (kArchVariant == kMips64r6) {
Swc1(fd, rs);
@@ -1534,7 +1435,7 @@ void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
}
}
-void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
+void TurboAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
DCHECK(!scratch.is(at));
if (kArchVariant == kMips64r6) {
@@ -1546,7 +1447,7 @@ void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
}
}
-void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
+void TurboAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
DCHECK(!scratch.is(at));
if (kArchVariant == kMips64r6) {
@@ -1558,282 +1459,465 @@ void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
}
}
-void MacroAssembler::Lb(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset())) {
- lb(rd, rs);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- lb(rd, MemOperand(at, off16));
- }
+void TurboAssembler::Lb(Register rd, const MemOperand& rs) {
+ MemOperand source = rs;
+ AdjustBaseAndOffset(source);
+ lb(rd, source);
}
-void MacroAssembler::Lbu(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset())) {
- lbu(rd, rs);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- lbu(rd, MemOperand(at, off16));
- }
+void TurboAssembler::Lbu(Register rd, const MemOperand& rs) {
+ MemOperand source = rs;
+ AdjustBaseAndOffset(source);
+ lbu(rd, source);
}
-void MacroAssembler::Sb(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset())) {
- sb(rd, rs);
- } else { // Offset > 16 bits, use multiple instructions to store.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- sb(rd, MemOperand(at, off16));
- }
+void TurboAssembler::Sb(Register rd, const MemOperand& rs) {
+ MemOperand source = rs;
+ AdjustBaseAndOffset(source);
+ sb(rd, source);
}
-void MacroAssembler::Lh(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset())) {
- lh(rd, rs);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- lh(rd, MemOperand(at, off16));
- }
+void TurboAssembler::Lh(Register rd, const MemOperand& rs) {
+ MemOperand source = rs;
+ AdjustBaseAndOffset(source);
+ lh(rd, source);
}
-void MacroAssembler::Lhu(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset())) {
- lhu(rd, rs);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- lhu(rd, MemOperand(at, off16));
- }
+void TurboAssembler::Lhu(Register rd, const MemOperand& rs) {
+ MemOperand source = rs;
+ AdjustBaseAndOffset(source);
+ lhu(rd, source);
}
-void MacroAssembler::Sh(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset())) {
- sh(rd, rs);
- } else { // Offset > 16 bits, use multiple instructions to store.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- sh(rd, MemOperand(at, off16));
- }
+void TurboAssembler::Sh(Register rd, const MemOperand& rs) {
+ MemOperand source = rs;
+ AdjustBaseAndOffset(source);
+ sh(rd, source);
}
-void MacroAssembler::Lw(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset())) {
- lw(rd, rs);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- lw(rd, MemOperand(at, off16));
- }
+void TurboAssembler::Lw(Register rd, const MemOperand& rs) {
+ MemOperand source = rs;
+ AdjustBaseAndOffset(source);
+ lw(rd, source);
}
-void MacroAssembler::Lwu(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset())) {
- lwu(rd, rs);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- lwu(rd, MemOperand(at, off16));
- }
+void TurboAssembler::Lwu(Register rd, const MemOperand& rs) {
+ MemOperand source = rs;
+ AdjustBaseAndOffset(source);
+ lwu(rd, source);
}
-void MacroAssembler::Sw(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset())) {
- sw(rd, rs);
- } else { // Offset > 16 bits, use multiple instructions to store.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- sw(rd, MemOperand(at, off16));
- }
+void TurboAssembler::Sw(Register rd, const MemOperand& rs) {
+ MemOperand source = rs;
+ AdjustBaseAndOffset(source);
+ sw(rd, source);
}
-void MacroAssembler::Ld(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset())) {
- ld(rd, rs);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- ld(rd, MemOperand(at, off16));
- }
+void TurboAssembler::Ld(Register rd, const MemOperand& rs) {
+ MemOperand source = rs;
+ AdjustBaseAndOffset(source);
+ ld(rd, source);
}
-void MacroAssembler::Sd(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset())) {
- sd(rd, rs);
- } else { // Offset > 16 bits, use multiple instructions to store.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- sd(rd, MemOperand(at, off16));
- }
+void TurboAssembler::Sd(Register rd, const MemOperand& rs) {
+ MemOperand source = rs;
+ AdjustBaseAndOffset(source);
+ sd(rd, source);
}
-void MacroAssembler::Lwc1(FPURegister fd, const MemOperand& src) {
- if (is_int16(src.offset())) {
- lwc1(fd, src);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
- lwc1(fd, MemOperand(at, off16));
- }
+void TurboAssembler::Lwc1(FPURegister fd, const MemOperand& src) {
+ MemOperand tmp = src;
+ AdjustBaseAndOffset(tmp);
+ lwc1(fd, tmp);
}
-void MacroAssembler::Swc1(FPURegister fs, const MemOperand& src) {
- if (is_int16(src.offset())) {
- swc1(fs, src);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
- swc1(fs, MemOperand(at, off16));
- }
+void TurboAssembler::Swc1(FPURegister fs, const MemOperand& src) {
+ MemOperand tmp = src;
+ AdjustBaseAndOffset(tmp);
+ swc1(fs, tmp);
}
-void MacroAssembler::Ldc1(FPURegister fd, const MemOperand& src) {
- if (is_int16(src.offset())) {
- ldc1(fd, src);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
- ldc1(fd, MemOperand(at, off16));
- }
+void TurboAssembler::Ldc1(FPURegister fd, const MemOperand& src) {
+ MemOperand tmp = src;
+ AdjustBaseAndOffset(tmp);
+ ldc1(fd, tmp);
}
-void MacroAssembler::Sdc1(FPURegister fs, const MemOperand& src) {
- DCHECK(!src.rm().is(at));
- if (is_int16(src.offset())) {
- sdc1(fs, src);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
- sdc1(fs, MemOperand(at, off16));
- }
+void TurboAssembler::Sdc1(FPURegister fs, const MemOperand& src) {
+ MemOperand tmp = src;
+ AdjustBaseAndOffset(tmp);
+ sdc1(fs, tmp);
}
-void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
+void TurboAssembler::li(Register dst, Handle<HeapObject> value, LiFlags mode) {
li(dst, Operand(value), mode);
}
-static inline int64_t ShiftAndFixSignExtension(int64_t imm, int bitnum) {
- if ((imm >> (bitnum - 1)) & 0x1) {
- imm = (imm >> bitnum) + 1;
+static inline int InstrCountForLiLower32Bit(int64_t value) {
+ if (!is_int16(static_cast<int32_t>(value)) && (value & kUpper16MaskOf64) &&
+ (value & kImm16Mask)) {
+ return 2;
} else {
- imm = imm >> bitnum;
+ return 1;
}
- return imm;
}
-bool MacroAssembler::LiLower32BitHelper(Register rd, Operand j) {
- bool higher_bits_sign_extended = false;
- if (is_int16(j.imm64_)) {
- daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask));
- } else if (!(j.imm64_ & kHiMask)) {
- ori(rd, zero_reg, (j.imm64_ & kImm16Mask));
- } else if (!(j.imm64_ & kImm16Mask)) {
- lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
- if ((j.imm64_ >> (kLuiShift + 15)) & 0x1) {
- higher_bits_sign_extended = true;
+void TurboAssembler::LiLower32BitHelper(Register rd, Operand j) {
+ if (is_int16(static_cast<int32_t>(j.immediate()))) {
+ daddiu(rd, zero_reg, (j.immediate() & kImm16Mask));
+ } else if (!(j.immediate() & kUpper16MaskOf64)) {
+ ori(rd, zero_reg, j.immediate() & kImm16Mask);
+ } else {
+ lui(rd, j.immediate() >> kLuiShift & kImm16Mask);
+ if (j.immediate() & kImm16Mask) {
+ ori(rd, rd, j.immediate() & kImm16Mask);
}
+ }
+}
+
+static inline int InstrCountForLoadReplicatedConst32(int64_t value) {
+ uint32_t x = static_cast<uint32_t>(value);
+ uint32_t y = static_cast<uint32_t>(value >> 32);
+
+ if (x == y) {
+ return (is_uint16(x) || is_int16(x) || (x & kImm16Mask) == 0) ? 2 : 3;
+ }
+
+ return INT_MAX;
+}
+
+int TurboAssembler::InstrCountForLi64Bit(int64_t value) {
+ if (is_int32(value)) {
+ return InstrCountForLiLower32Bit(value);
} else {
- lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
- ori(rd, rd, (j.imm64_ & kImm16Mask));
- if ((j.imm64_ >> (kLuiShift + 15)) & 0x1) {
- higher_bits_sign_extended = true;
+ int bit31 = value >> 31 & 0x1;
+ if ((value & kUpper16MaskOf64) == 0 && is_int16(value >> 32) &&
+ kArchVariant == kMips64r6) {
+ return 2;
+ } else if ((value & (kHigher16MaskOf64 | kUpper16MaskOf64)) == 0 &&
+ kArchVariant == kMips64r6) {
+ return 2;
+ } else if ((value & kImm16Mask) == 0 && is_int16((value >> 32) + bit31) &&
+ kArchVariant == kMips64r6) {
+ return 2;
+ } else if ((value & kImm16Mask) == 0 &&
+ ((value >> 31) & 0x1ffff) == ((0x20000 - bit31) & 0x1ffff) &&
+ kArchVariant == kMips64r6) {
+ return 2;
+ } else if (is_int16(static_cast<int32_t>(value)) &&
+ is_int16((value >> 32) + bit31) && kArchVariant == kMips64r6) {
+ return 2;
+ } else if (is_int16(static_cast<int32_t>(value)) &&
+ ((value >> 31) & 0x1ffff) == ((0x20000 - bit31) & 0x1ffff) &&
+ kArchVariant == kMips64r6) {
+ return 2;
+ } else if (base::bits::IsPowerOfTwo(value + 1)) {
+ return 2;
+ } else {
+ int shift_cnt = base::bits::CountTrailingZeros64(value);
+ int rep32_count = InstrCountForLoadReplicatedConst32(value);
+ int64_t tmp = value >> shift_cnt;
+ if (is_uint16(tmp)) {
+ return 2;
+ } else if (is_int16(tmp)) {
+ return 2;
+ } else if (rep32_count < 3) {
+ return 2;
+ } else if (is_int32(tmp)) {
+ return 3;
+ } else {
+ shift_cnt = 16 + base::bits::CountTrailingZeros64(value >> 16);
+ tmp = value >> shift_cnt;
+ if (is_uint16(tmp)) {
+ return 3;
+ } else if (is_int16(tmp)) {
+ return 3;
+ } else if (rep32_count < 4) {
+ return 3;
+ } else if (kArchVariant == kMips64r6) {
+ int64_t imm = value;
+ int count = InstrCountForLiLower32Bit(imm);
+ imm = (imm >> 32) + bit31;
+ if (imm & kImm16Mask) {
+ count++;
+ }
+ imm = (imm >> 16) + (imm >> 15 & 0x1);
+ if (imm & kImm16Mask) {
+ count++;
+ }
+ return count;
+ } else {
+ if (is_int48(value)) {
+ int64_t k = value >> 16;
+ int count = InstrCountForLiLower32Bit(k) + 1;
+ if (value & kImm16Mask) {
+ count++;
+ }
+ return count;
+ } else {
+ int64_t k = value >> 32;
+ int count = InstrCountForLiLower32Bit(k);
+ if ((value >> 16) & kImm16Mask) {
+ count += 3;
+ if (value & kImm16Mask) {
+ count++;
+ }
+ } else {
+ count++;
+ if (value & kImm16Mask) {
+ count++;
+ }
+ }
+ return count;
+ }
+ }
+ }
}
}
- return higher_bits_sign_extended;
+ UNREACHABLE();
+ return INT_MAX;
}
-void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
+void TurboAssembler::li_optimized(Register rd, Operand j, LiFlags mode) {
DCHECK(!j.is_reg());
+ DCHECK(!MustUseReg(j.rmode()));
+ DCHECK(mode == OPTIMIZE_SIZE);
BlockTrampolinePoolScope block_trampoline_pool(this);
- if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
- // Normal load of an immediate value which does not need Relocation Info.
- if (is_int32(j.imm64_)) {
- LiLower32BitHelper(rd, j);
+ // Normal load of an immediate value which does not need Relocation Info.
+ if (is_int32(j.immediate())) {
+ LiLower32BitHelper(rd, j);
+ } else {
+ int bit31 = j.immediate() >> 31 & 0x1;
+ if ((j.immediate() & kUpper16MaskOf64) == 0 &&
+ is_int16(j.immediate() >> 32) && kArchVariant == kMips64r6) {
+ // 64-bit value which consists of an unsigned 16-bit value in its
+ // least significant 32-bits, and a signed 16-bit value in its
+ // most significant 32-bits.
+ ori(rd, zero_reg, j.immediate() & kImm16Mask);
+ dahi(rd, j.immediate() >> 32 & kImm16Mask);
+ } else if ((j.immediate() & (kHigher16MaskOf64 | kUpper16MaskOf64)) == 0 &&
+ kArchVariant == kMips64r6) {
+ // 64-bit value which consists of an unsigned 16-bit value in its
+ // least significant 48-bits, and a signed 16-bit value in its
+ // most significant 16-bits.
+ ori(rd, zero_reg, j.immediate() & kImm16Mask);
+ dati(rd, j.immediate() >> 48 & kImm16Mask);
+ } else if ((j.immediate() & kImm16Mask) == 0 &&
+ is_int16((j.immediate() >> 32) + bit31) &&
+ kArchVariant == kMips64r6) {
+ // 16 LSBs (Least Significant Bits) all set to zero.
+ // 48 MSBs (Most Significant Bits) hold a signed 32-bit value.
+ lui(rd, j.immediate() >> kLuiShift & kImm16Mask);
+ dahi(rd, ((j.immediate() >> 32) + bit31) & kImm16Mask);
+ } else if ((j.immediate() & kImm16Mask) == 0 &&
+ ((j.immediate() >> 31) & 0x1ffff) ==
+ ((0x20000 - bit31) & 0x1ffff) &&
+ kArchVariant == kMips64r6) {
+ // 16 LSBs all set to zero.
+ // 48 MSBs hold a signed value which can't be represented by signed
+ // 32-bit number, and the middle 16 bits are all zero, or all one.
+ lui(rd, j.immediate() >> kLuiShift & kImm16Mask);
+ dati(rd, ((j.immediate() >> 48) + bit31) & kImm16Mask);
+ } else if (is_int16(static_cast<int32_t>(j.immediate())) &&
+ is_int16((j.immediate() >> 32) + bit31) &&
+ kArchVariant == kMips64r6) {
+ // 32 LSBs contain a signed 16-bit number.
+ // 32 MSBs contain a signed 16-bit number.
+ daddiu(rd, zero_reg, j.immediate() & kImm16Mask);
+ dahi(rd, ((j.immediate() >> 32) + bit31) & kImm16Mask);
+ } else if (is_int16(static_cast<int32_t>(j.immediate())) &&
+ ((j.immediate() >> 31) & 0x1ffff) ==
+ ((0x20000 - bit31) & 0x1ffff) &&
+ kArchVariant == kMips64r6) {
+ // 48 LSBs contain an unsigned 16-bit number.
+ // 16 MSBs contain a signed 16-bit number.
+ daddiu(rd, zero_reg, j.immediate() & kImm16Mask);
+ dati(rd, ((j.immediate() >> 48) + bit31) & kImm16Mask);
+ } else if (base::bits::IsPowerOfTwo(j.immediate() + 1)) {
+ // 64-bit values which have their "n" MSBs set to one, and their
+ // "64-n" LSBs set to zero. "n" must meet the restrictions 0 < n < 64.
+ int shift_cnt = 64 - base::bits::CountTrailingZeros64(j.immediate() + 1);
+ daddiu(rd, zero_reg, -1);
+ if (shift_cnt < 32) {
+ dsrl(rd, rd, shift_cnt);
+ } else {
+ dsrl32(rd, rd, shift_cnt & 31);
+ }
} else {
- if (kArchVariant == kMips64r6) {
- int64_t imm = j.imm64_;
- bool higher_bits_sign_extended = LiLower32BitHelper(rd, j);
- imm = ShiftAndFixSignExtension(imm, 32);
- // If LUI writes 1s to higher bits, we need both DAHI/DATI.
- if ((imm & kImm16Mask) ||
- (higher_bits_sign_extended && (j.imm64_ > 0))) {
- dahi(rd, imm & kImm16Mask);
+ int shift_cnt = base::bits::CountTrailingZeros64(j.immediate());
+ int rep32_count = InstrCountForLoadReplicatedConst32(j.immediate());
+ int64_t tmp = j.immediate() >> shift_cnt;
+ if (is_uint16(tmp)) {
+ // Value can be computed by loading a 16-bit unsigned value, and
+ // then shifting left.
+ ori(rd, zero_reg, tmp & kImm16Mask);
+ if (shift_cnt < 32) {
+ dsll(rd, rd, shift_cnt);
+ } else {
+ dsll32(rd, rd, shift_cnt & 31);
+ }
+ } else if (is_int16(tmp)) {
+ // Value can be computed by loading a 16-bit signed value, and
+ // then shifting left.
+ daddiu(rd, zero_reg, static_cast<int32_t>(tmp));
+ if (shift_cnt < 32) {
+ dsll(rd, rd, shift_cnt);
+ } else {
+ dsll32(rd, rd, shift_cnt & 31);
}
- imm = ShiftAndFixSignExtension(imm, 16);
- if ((!is_int48(j.imm64_) && (imm & kImm16Mask)) ||
- (higher_bits_sign_extended && (j.imm64_ > 0))) {
- dati(rd, imm & kImm16Mask);
+ } else if (rep32_count < 3) {
+ // Value being loaded has 32 LSBs equal to the 32 MSBs, and the
+ // value loaded into the 32 LSBs can be loaded with a single
+ // MIPS instruction.
+ LiLower32BitHelper(rd, j);
+ Dins(rd, rd, 32, 32);
+ } else if (is_int32(tmp)) {
+ // Loads with 3 instructions.
+ // Value can be computed by loading a 32-bit signed value, and
+ // then shifting left.
+ lui(rd, tmp >> kLuiShift & kImm16Mask);
+ ori(rd, rd, tmp & kImm16Mask);
+ if (shift_cnt < 32) {
+ dsll(rd, rd, shift_cnt);
+ } else {
+ dsll32(rd, rd, shift_cnt & 31);
}
} else {
- if (is_int48(j.imm64_)) {
- if ((j.imm64_ >> 32) & kImm16Mask) {
- lui(rd, (j.imm64_ >> 32) & kImm16Mask);
- if ((j.imm64_ >> 16) & kImm16Mask) {
- ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
- }
+ shift_cnt = 16 + base::bits::CountTrailingZeros64(j.immediate() >> 16);
+ tmp = j.immediate() >> shift_cnt;
+ if (is_uint16(tmp)) {
+ // Value can be computed by loading a 16-bit unsigned value,
+ // shifting left, and "or"ing in another 16-bit unsigned value.
+ ori(rd, zero_reg, tmp & kImm16Mask);
+ if (shift_cnt < 32) {
+ dsll(rd, rd, shift_cnt);
} else {
- ori(rd, zero_reg, (j.imm64_ >> 16) & kImm16Mask);
+ dsll32(rd, rd, shift_cnt & 31);
}
- dsll(rd, rd, 16);
- if (j.imm64_ & kImm16Mask) {
- ori(rd, rd, j.imm64_ & kImm16Mask);
+ ori(rd, rd, j.immediate() & kImm16Mask);
+ } else if (is_int16(tmp)) {
+ // Value can be computed by loading a 16-bit signed value,
+ // shifting left, and "or"ing in a 16-bit unsigned value.
+ daddiu(rd, zero_reg, static_cast<int32_t>(tmp));
+ if (shift_cnt < 32) {
+ dsll(rd, rd, shift_cnt);
+ } else {
+ dsll32(rd, rd, shift_cnt & 31);
}
- } else {
- lui(rd, (j.imm64_ >> 48) & kImm16Mask);
- if ((j.imm64_ >> 32) & kImm16Mask) {
- ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
+ ori(rd, rd, j.immediate() & kImm16Mask);
+ } else if (rep32_count < 4) {
+ // Value being loaded has 32 LSBs equal to the 32 MSBs, and the
+ // value in the 32 LSBs requires 2 MIPS instructions to load.
+ LiLower32BitHelper(rd, j);
+ Dins(rd, rd, 32, 32);
+ } else if (kArchVariant == kMips64r6) {
+ // Loads with 3-4 instructions.
+ // Catch-all case to get any other 64-bit values which aren't
+ // handled by special cases above.
+ int64_t imm = j.immediate();
+ LiLower32BitHelper(rd, j);
+ imm = (imm >> 32) + bit31;
+ if (imm & kImm16Mask) {
+ dahi(rd, imm & kImm16Mask);
}
- if ((j.imm64_ >> 16) & kImm16Mask) {
+ imm = (imm >> 16) + (imm >> 15 & 0x1);
+ if (imm & kImm16Mask) {
+ dati(rd, imm & kImm16Mask);
+ }
+ } else {
+ if (is_int48(j.immediate())) {
+ Operand k = Operand(j.immediate() >> 16);
+ LiLower32BitHelper(rd, k);
dsll(rd, rd, 16);
- ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
- if (j.imm64_ & kImm16Mask) {
- dsll(rd, rd, 16);
- ori(rd, rd, j.imm64_ & kImm16Mask);
- } else {
- dsll(rd, rd, 16);
+ if (j.immediate() & kImm16Mask) {
+ ori(rd, rd, j.immediate() & kImm16Mask);
}
} else {
- if (j.imm64_ & kImm16Mask) {
- dsll32(rd, rd, 0);
- ori(rd, rd, j.imm64_ & kImm16Mask);
+ Operand k = Operand(j.immediate() >> 32);
+ LiLower32BitHelper(rd, k);
+ if ((j.immediate() >> 16) & kImm16Mask) {
+ dsll(rd, rd, 16);
+ ori(rd, rd, (j.immediate() >> 16) & kImm16Mask);
+ dsll(rd, rd, 16);
+ if (j.immediate() & kImm16Mask) {
+ ori(rd, rd, j.immediate() & kImm16Mask);
+ }
} else {
dsll32(rd, rd, 0);
+ if (j.immediate() & kImm16Mask) {
+ ori(rd, rd, j.immediate() & kImm16Mask);
+ }
}
}
}
}
}
- } else if (MustUseReg(j.rmode_)) {
- RecordRelocInfo(j.rmode_, j.imm64_);
- lui(rd, (j.imm64_ >> 32) & kImm16Mask);
- ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
+ }
+}
+
+void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
+ DCHECK(!j.is_reg());
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (!MustUseReg(j.rmode()) && mode == OPTIMIZE_SIZE) {
+ int li_count = InstrCountForLi64Bit(j.immediate());
+ int li_neg_count = InstrCountForLi64Bit(-j.immediate());
+ int li_not_count = InstrCountForLi64Bit(~j.immediate());
+ // Loading -MIN_INT64 could cause problems, but loading MIN_INT64 takes only
+ // two instructions so no need to check for this.
+ if (li_neg_count <= li_not_count && li_neg_count < li_count - 1) {
+ DCHECK(j.immediate() != std::numeric_limits<int64_t>::min());
+ li_optimized(rd, Operand(-j.immediate()), mode);
+ Dsubu(rd, zero_reg, rd);
+ } else if (li_neg_count > li_not_count && li_not_count < li_count - 1) {
+ DCHECK(j.immediate() != std::numeric_limits<int64_t>::min());
+ li_optimized(rd, Operand(~j.immediate()), mode);
+ nor(rd, rd, rd);
+ } else {
+ li_optimized(rd, j, mode);
+ }
+ } else if (MustUseReg(j.rmode())) {
+ int64_t immediate;
+ if (j.IsHeapObjectRequest()) {
+ RequestHeapObject(j.heap_object_request());
+ immediate = 0;
+ } else {
+ immediate = j.immediate();
+ }
+
+ RecordRelocInfo(j.rmode(), immediate);
+ lui(rd, (immediate >> 32) & kImm16Mask);
+ ori(rd, rd, (immediate >> 16) & kImm16Mask);
dsll(rd, rd, 16);
- ori(rd, rd, j.imm64_ & kImm16Mask);
+ ori(rd, rd, immediate & kImm16Mask);
} else if (mode == ADDRESS_LOAD) {
// We always need the same number of instructions as we may need to patch
// this code to load another value which may need all 4 instructions.
- lui(rd, (j.imm64_ >> 32) & kImm16Mask);
- ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
+ lui(rd, (j.immediate() >> 32) & kImm16Mask);
+ ori(rd, rd, (j.immediate() >> 16) & kImm16Mask);
dsll(rd, rd, 16);
- ori(rd, rd, j.imm64_ & kImm16Mask);
- } else {
+ ori(rd, rd, j.immediate() & kImm16Mask);
+ } else { // mode == CONSTANT_SIZE - always emit the same instruction
+ // sequence.
if (kArchVariant == kMips64r6) {
- int64_t imm = j.imm64_;
- lui(rd, (imm >> kLuiShift) & kImm16Mask);
- if (imm & kImm16Mask) {
- ori(rd, rd, (imm & kImm16Mask));
- }
- if ((imm >> 31) & 0x1) {
- imm = (imm >> 32) + 1;
- } else {
- imm = imm >> 32;
- }
- dahi(rd, imm & kImm16Mask);
- if ((imm >> 15) & 0x1) {
- imm = (imm >> 16) + 1;
- } else {
- imm = imm >> 16;
- }
- dati(rd, imm & kImm16Mask);
+ int64_t imm = j.immediate();
+ lui(rd, imm >> kLuiShift & kImm16Mask);
+ ori(rd, rd, (imm & kImm16Mask));
+ imm = (imm >> 32) + ((imm >> 31) & 0x1);
+ dahi(rd, imm & kImm16Mask & kImm16Mask);
+ imm = (imm >> 16) + ((imm >> 15) & 0x1);
+ dati(rd, imm & kImm16Mask & kImm16Mask);
} else {
- lui(rd, (j.imm64_ >> 48) & kImm16Mask);
- ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
+ lui(rd, (j.immediate() >> 48) & kImm16Mask);
+ ori(rd, rd, (j.immediate() >> 32) & kImm16Mask);
dsll(rd, rd, 16);
- ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
+ ori(rd, rd, (j.immediate() >> 16) & kImm16Mask);
dsll(rd, rd, 16);
- ori(rd, rd, j.imm64_ & kImm16Mask);
+ ori(rd, rd, j.immediate() & kImm16Mask);
}
}
}
-
-void MacroAssembler::MultiPush(RegList regs) {
+void TurboAssembler::MultiPush(RegList regs) {
int16_t num_to_push = NumberOfBitsSet(regs);
int16_t stack_offset = num_to_push * kPointerSize;
@@ -1860,8 +1944,7 @@ void MacroAssembler::MultiPushReversed(RegList regs) {
}
}
-
-void MacroAssembler::MultiPop(RegList regs) {
+void TurboAssembler::MultiPop(RegList regs) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
@@ -1886,8 +1969,7 @@ void MacroAssembler::MultiPopReversed(RegList regs) {
daddiu(sp, sp, stack_offset);
}
-
-void MacroAssembler::MultiPushFPU(RegList regs) {
+void TurboAssembler::MultiPushFPU(RegList regs) {
int16_t num_to_push = NumberOfBitsSet(regs);
int16_t stack_offset = num_to_push * kDoubleSize;
@@ -1914,8 +1996,7 @@ void MacroAssembler::MultiPushReversedFPU(RegList regs) {
}
}
-
-void MacroAssembler::MultiPopFPU(RegList regs) {
+void TurboAssembler::MultiPopFPU(RegList regs) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
@@ -1940,18 +2021,14 @@ void MacroAssembler::MultiPopReversedFPU(RegList regs) {
daddiu(sp, sp, stack_offset);
}
-
-void MacroAssembler::Ext(Register rt,
- Register rs,
- uint16_t pos,
+void TurboAssembler::Ext(Register rt, Register rs, uint16_t pos,
uint16_t size) {
DCHECK(pos < 32);
DCHECK(pos + size < 33);
ext_(rt, rs, pos, size);
}
-
-void MacroAssembler::Dext(Register rt, Register rs, uint16_t pos,
+void TurboAssembler::Dext(Register rt, Register rs, uint16_t pos,
uint16_t size) {
DCHECK(pos < 64 && 0 < size && size <= 64 && 0 < pos + size &&
pos + size <= 64);
@@ -1964,10 +2041,7 @@ void MacroAssembler::Dext(Register rt, Register rs, uint16_t pos,
}
}
-
-void MacroAssembler::Ins(Register rt,
- Register rs,
- uint16_t pos,
+void TurboAssembler::Ins(Register rt, Register rs, uint16_t pos,
uint16_t size) {
DCHECK(pos < 32);
DCHECK(pos + size <= 32);
@@ -1975,7 +2049,7 @@ void MacroAssembler::Ins(Register rt,
ins_(rt, rs, pos, size);
}
-void MacroAssembler::Dins(Register rt, Register rs, uint16_t pos,
+void TurboAssembler::Dins(Register rt, Register rs, uint16_t pos,
uint16_t size) {
DCHECK(pos < 64 && 0 < size && size <= 64 && 0 < pos + size &&
pos + size <= 64);
@@ -1988,7 +2062,7 @@ void MacroAssembler::Dins(Register rt, Register rs, uint16_t pos,
}
}
-void MacroAssembler::Neg_s(FPURegister fd, FPURegister fs) {
+void TurboAssembler::Neg_s(FPURegister fd, FPURegister fs) {
if (kArchVariant == kMips64r6) {
// r6 neg_s changes the sign for NaN-like operands as well.
neg_s(fd, fs);
@@ -2004,16 +2078,14 @@ void MacroAssembler::Neg_s(FPURegister fd, FPURegister fs) {
neg_s(fd, fs); // In delay slot.
bind(&is_nan);
mfc1(scratch1, fs);
- And(scratch2, scratch1, Operand(~kBinary32SignMask));
- And(scratch1, scratch1, Operand(kBinary32SignMask));
- Xor(scratch1, scratch1, Operand(kBinary32SignMask));
- Or(scratch2, scratch2, scratch1);
- mtc1(scratch2, fd);
+ li(scratch2, kBinary32SignMask);
+ Xor(scratch1, scratch1, scratch2);
+ mtc1(scratch1, fd);
bind(&done);
}
}
-void MacroAssembler::Neg_d(FPURegister fd, FPURegister fs) {
+void TurboAssembler::Neg_d(FPURegister fd, FPURegister fs) {
if (kArchVariant == kMips64r6) {
// r6 neg_d changes the sign for NaN-like operands as well.
neg_d(fd, fs);
@@ -2029,23 +2101,20 @@ void MacroAssembler::Neg_d(FPURegister fd, FPURegister fs) {
neg_d(fd, fs); // In delay slot.
bind(&is_nan);
dmfc1(scratch1, fs);
- And(scratch2, scratch1, Operand(~Double::kSignMask));
- And(scratch1, scratch1, Operand(Double::kSignMask));
- Xor(scratch1, scratch1, Operand(Double::kSignMask));
- Or(scratch2, scratch2, scratch1);
- dmtc1(scratch2, fd);
+ li(scratch2, Double::kSignMask);
+ Xor(scratch1, scratch1, scratch2);
+ dmtc1(scratch1, fd);
bind(&done);
}
}
-void MacroAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
+void TurboAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
// Move the data from fs to t8.
mfc1(t8, fs);
Cvt_d_uw(fd, t8);
}
-
-void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
+void TurboAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
// Convert rs to a FP value in fd.
DCHECK(!rs.is(t9));
DCHECK(!rs.is(at));
@@ -2056,15 +2125,13 @@ void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
cvt_d_l(fd, fd);
}
-
-void MacroAssembler::Cvt_d_ul(FPURegister fd, FPURegister fs) {
+void TurboAssembler::Cvt_d_ul(FPURegister fd, FPURegister fs) {
// Move the data from fs to t8.
dmfc1(t8, fs);
Cvt_d_ul(fd, t8);
}
-
-void MacroAssembler::Cvt_d_ul(FPURegister fd, Register rs) {
+void TurboAssembler::Cvt_d_ul(FPURegister fd, Register rs) {
// Convert rs to a FP value in fd.
DCHECK(!rs.is(t9));
@@ -2091,13 +2158,13 @@ void MacroAssembler::Cvt_d_ul(FPURegister fd, Register rs) {
bind(&conversion_done);
}
-void MacroAssembler::Cvt_s_uw(FPURegister fd, FPURegister fs) {
+void TurboAssembler::Cvt_s_uw(FPURegister fd, FPURegister fs) {
// Move the data from fs to t8.
mfc1(t8, fs);
Cvt_s_uw(fd, t8);
}
-void MacroAssembler::Cvt_s_uw(FPURegister fd, Register rs) {
+void TurboAssembler::Cvt_s_uw(FPURegister fd, Register rs) {
// Convert rs to a FP value in fd.
DCHECK(!rs.is(t9));
DCHECK(!rs.is(at));
@@ -2108,14 +2175,13 @@ void MacroAssembler::Cvt_s_uw(FPURegister fd, Register rs) {
cvt_s_l(fd, fd);
}
-void MacroAssembler::Cvt_s_ul(FPURegister fd, FPURegister fs) {
+void TurboAssembler::Cvt_s_ul(FPURegister fd, FPURegister fs) {
// Move the data from fs to t8.
dmfc1(t8, fs);
Cvt_s_ul(fd, t8);
}
-
-void MacroAssembler::Cvt_s_ul(FPURegister fd, Register rs) {
+void TurboAssembler::Cvt_s_ul(FPURegister fd, Register rs) {
// Convert rs to a FP value in fd.
DCHECK(!rs.is(t9));
@@ -2175,28 +2241,25 @@ void MacroAssembler::Trunc_l_ud(FPURegister fd,
trunc_l_d(fd, fs);
}
-
-void MacroAssembler::Trunc_uw_d(FPURegister fd,
- FPURegister fs,
+void TurboAssembler::Trunc_uw_d(FPURegister fd, FPURegister fs,
FPURegister scratch) {
Trunc_uw_d(fs, t8, scratch);
mtc1(t8, fd);
}
-void MacroAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs,
+void TurboAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs,
FPURegister scratch) {
Trunc_uw_s(fs, t8, scratch);
mtc1(t8, fd);
}
-void MacroAssembler::Trunc_ul_d(FPURegister fd, FPURegister fs,
+void TurboAssembler::Trunc_ul_d(FPURegister fd, FPURegister fs,
FPURegister scratch, Register result) {
Trunc_ul_d(fs, t8, scratch, result);
dmtc1(t8, fd);
}
-
-void MacroAssembler::Trunc_ul_s(FPURegister fd, FPURegister fs,
+void TurboAssembler::Trunc_ul_s(FPURegister fd, FPURegister fs,
FPURegister scratch, Register result) {
Trunc_ul_s(fs, t8, scratch, result);
dmtc1(t8, fd);
@@ -2222,9 +2285,7 @@ void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
ceil_w_d(fd, fs);
}
-
-void MacroAssembler::Trunc_uw_d(FPURegister fd,
- Register rs,
+void TurboAssembler::Trunc_uw_d(FPURegister fd, Register rs,
FPURegister scratch) {
DCHECK(!fd.is(scratch));
DCHECK(!rs.is(at));
@@ -2255,7 +2316,7 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
bind(&done);
}
-void MacroAssembler::Trunc_uw_s(FPURegister fd, Register rs,
+void TurboAssembler::Trunc_uw_s(FPURegister fd, Register rs,
FPURegister scratch) {
DCHECK(!fd.is(scratch));
DCHECK(!rs.is(at));
@@ -2285,7 +2346,7 @@ void MacroAssembler::Trunc_uw_s(FPURegister fd, Register rs,
bind(&done);
}
-void MacroAssembler::Trunc_ul_d(FPURegister fd, Register rs,
+void TurboAssembler::Trunc_ul_d(FPURegister fd, Register rs,
FPURegister scratch, Register result) {
DCHECK(!fd.is(scratch));
DCHECK(!AreAliased(rs, result, at));
@@ -2332,8 +2393,7 @@ void MacroAssembler::Trunc_ul_d(FPURegister fd, Register rs,
bind(&fail);
}
-
-void MacroAssembler::Trunc_ul_s(FPURegister fd, Register rs,
+void TurboAssembler::Trunc_ul_s(FPURegister fd, Register rs,
FPURegister scratch, Register result) {
DCHECK(!fd.is(scratch));
DCHECK(!AreAliased(rs, result, at));
@@ -2382,49 +2442,33 @@ void MacroAssembler::Trunc_ul_s(FPURegister fd, Register rs,
void MacroAssembler::Madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
- if (kArchVariant == kMips64r2) {
- madd_s(fd, fr, fs, ft);
- } else {
- DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
- mul_s(scratch, fs, ft);
- add_s(fd, fr, scratch);
- }
+ DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+ mul_s(scratch, fs, ft);
+ add_s(fd, fr, scratch);
}
void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
- if (kArchVariant == kMips64r2) {
- madd_d(fd, fr, fs, ft);
- } else {
- DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
- mul_d(scratch, fs, ft);
- add_d(fd, fr, scratch);
- }
+ DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+ mul_d(scratch, fs, ft);
+ add_d(fd, fr, scratch);
}
void MacroAssembler::Msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
- if (kArchVariant == kMips64r2) {
- msub_s(fd, fr, fs, ft);
- } else {
- DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
- mul_s(scratch, fs, ft);
- sub_s(fd, scratch, fr);
- }
+ DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+ mul_s(scratch, fs, ft);
+ sub_s(fd, scratch, fr);
}
void MacroAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
- if (kArchVariant == kMips64r2) {
- msub_d(fd, fr, fs, ft);
- } else {
- DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
- mul_d(scratch, fs, ft);
- sub_d(fd, scratch, fr);
- }
+ DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+ mul_d(scratch, fs, ft);
+ sub_d(fd, scratch, fr);
}
-void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
+void TurboAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
Label* nan, Condition cond, FPURegister cmp1,
FPURegister cmp2, BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -2494,8 +2538,7 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
}
}
-
-void MacroAssembler::BranchShortF(SecondaryField sizeField, Label* target,
+void TurboAssembler::BranchShortF(SecondaryField sizeField, Label* target,
Condition cc, FPURegister cmp1,
FPURegister cmp2, BranchDelaySlot bd) {
if (kArchVariant != kMips64r6) {
@@ -2625,22 +2668,96 @@ void MacroAssembler::BranchShortF(SecondaryField sizeField, Label* target,
}
}
+void TurboAssembler::BranchMSA(Label* target, MSABranchDF df,
+ MSABranchCondition cond, MSARegister wt,
+ BranchDelaySlot bd) {
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+
+ if (target) {
+ bool long_branch =
+ target->is_bound() ? !is_near(target) : is_trampoline_emitted();
+ if (long_branch) {
+ Label skip;
+ MSABranchCondition neg_cond = NegateMSABranchCondition(cond);
+ BranchShortMSA(df, &skip, neg_cond, wt, bd);
+ BranchLong(target, bd);
+ bind(&skip);
+ } else {
+ BranchShortMSA(df, target, cond, wt, bd);
+ }
+ }
+ }
+}
-void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) {
+void TurboAssembler::BranchShortMSA(MSABranchDF df, Label* target,
+ MSABranchCondition cond, MSARegister wt,
+ BranchDelaySlot bd) {
+ if (kArchVariant == kMips64r6) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (target) {
+ switch (cond) {
+ case all_not_zero:
+ switch (df) {
+ case MSA_BRANCH_D:
+ bnz_d(wt, target);
+ break;
+ case MSA_BRANCH_W:
+ bnz_w(wt, target);
+ break;
+ case MSA_BRANCH_H:
+ bnz_h(wt, target);
+ break;
+ case MSA_BRANCH_B:
+ default:
+ bnz_b(wt, target);
+ }
+ break;
+ case one_elem_not_zero:
+ bnz_v(wt, target);
+ break;
+ case one_elem_zero:
+ switch (df) {
+ case MSA_BRANCH_D:
+ bz_d(wt, target);
+ break;
+ case MSA_BRANCH_W:
+ bz_w(wt, target);
+ break;
+ case MSA_BRANCH_H:
+ bz_h(wt, target);
+ break;
+ case MSA_BRANCH_B:
+ default:
+ bz_b(wt, target);
+ }
+ break;
+ case all_zero:
+ bz_v(wt, target);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ }
+ if (bd == PROTECT) {
+ nop();
+ }
+}
+
+void TurboAssembler::FmoveLow(FPURegister dst, Register src_low) {
DCHECK(!src_low.is(at));
mfhc1(at, dst);
mtc1(src_low, dst);
mthc1(at, dst);
}
-
-void MacroAssembler::Move(FPURegister dst, float imm) {
+void TurboAssembler::Move(FPURegister dst, float imm) {
li(at, Operand(bit_cast<int32_t>(imm)));
mtc1(at, dst);
}
-
-void MacroAssembler::Move(FPURegister dst, double imm) {
+void TurboAssembler::Move(FPURegister dst, double imm) {
int64_t imm_bits = bit_cast<int64_t>(imm);
// Handle special values first.
if (imm_bits == bit_cast<int64_t>(0.0) && has_double_zero_reg_set_) {
@@ -2653,34 +2770,16 @@ void MacroAssembler::Move(FPURegister dst, double imm) {
// Move the low part of the double into the lower bits of the corresponding
// FPU register.
if (lo != 0) {
- if (!(lo & kImm16Mask)) {
- lui(at, (lo >> kLuiShift) & kImm16Mask);
- mtc1(at, dst);
- } else if (!(lo & kHiMask)) {
- ori(at, zero_reg, lo & kImm16Mask);
- mtc1(at, dst);
- } else {
- lui(at, (lo >> kLuiShift) & kImm16Mask);
- ori(at, at, lo & kImm16Mask);
- mtc1(at, dst);
- }
+ li(at, lo);
+ mtc1(at, dst);
} else {
mtc1(zero_reg, dst);
}
// Move the high part of the double into the high bits of the corresponding
// FPU register.
if (hi != 0) {
- if (!(hi & kImm16Mask)) {
- lui(at, (hi >> kLuiShift) & kImm16Mask);
- mthc1(at, dst);
- } else if (!(hi & kHiMask)) {
- ori(at, zero_reg, hi & kImm16Mask);
- mthc1(at, dst);
- } else {
- lui(at, (hi >> kLuiShift) & kImm16Mask);
- ori(at, at, hi & kImm16Mask);
- mthc1(at, dst);
- }
+ li(at, hi);
+ mthc1(at, dst);
} else {
mthc1(zero_reg, dst);
}
@@ -2688,8 +2787,7 @@ void MacroAssembler::Move(FPURegister dst, double imm) {
}
}
-
-void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
+void TurboAssembler::Movz(Register rd, Register rs, Register rt) {
if (kArchVariant == kMips64r6) {
Label done;
Branch(&done, ne, rt, Operand(zero_reg));
@@ -2700,8 +2798,7 @@ void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
}
}
-
-void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
+void TurboAssembler::Movn(Register rd, Register rs, Register rt) {
if (kArchVariant == kMips64r6) {
Label done;
Branch(&done, eq, rt, Operand(zero_reg));
@@ -2712,21 +2809,15 @@ void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
}
}
-
-void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
+void TurboAssembler::Movt(Register rd, Register rs, uint16_t cc) {
movt(rd, rs, cc);
}
-
-void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
+void TurboAssembler::Movf(Register rd, Register rs, uint16_t cc) {
movf(rd, rs, cc);
}
-
-void MacroAssembler::Clz(Register rd, Register rs) {
- clz(rd, rs);
-}
-
+void TurboAssembler::Clz(Register rd, Register rs) { clz(rd, rs); }
void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
Register result,
@@ -2791,8 +2882,7 @@ void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
bind(&done);
}
-
-void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
+void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
DoubleRegister double_input,
Label* done) {
DoubleRegister single_scratch = kLithiumScratchDouble.low();
@@ -2816,9 +2906,8 @@ void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
Branch(done, eq, scratch, Operand(zero_reg));
}
-
-void MacroAssembler::TruncateDoubleToI(Register result,
- DoubleRegister double_input) {
+void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
+ DoubleRegister double_input) {
Label done;
TryInlineTruncateDoubleToI(result, double_input, &done);
@@ -2828,8 +2917,7 @@ void MacroAssembler::TruncateDoubleToI(Register result,
Dsubu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
Sdc1(double_input, MemOperand(sp, 0));
- DoubleToIStub stub(isolate(), sp, result, 0, true, true);
- CallStub(&stub);
+ CallStubDelayed(new (zone) DoubleToIStub(nullptr, sp, result, 0, true, true));
Daddu(sp, sp, Operand(kDoubleSize));
pop(ra);
@@ -2902,22 +2990,19 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst,
(cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
(cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
-
-void MacroAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) {
+void TurboAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) {
DCHECK(kArchVariant == kMips64r6 ? is_int26(offset) : is_int16(offset));
BranchShort(offset, bdslot);
}
-
-void MacroAssembler::Branch(int32_t offset, Condition cond, Register rs,
+void TurboAssembler::Branch(int32_t offset, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bdslot) {
bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
DCHECK(is_near);
USE(is_near);
}
-
-void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
+void TurboAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
if (L->is_bound()) {
if (is_near_branch(L)) {
BranchShort(L, bdslot);
@@ -2933,10 +3018,8 @@ void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
}
}
-
-void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
+void TurboAssembler::Branch(Label* L, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot) {
if (L->is_bound()) {
if (!BranchShortCheck(0, L, cond, rs, rt, bdslot)) {
if (cond != cc_always) {
@@ -2966,18 +3049,13 @@ void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
}
}
-
-void MacroAssembler::Branch(Label* L,
- Condition cond,
- Register rs,
- Heap::RootListIndex index,
- BranchDelaySlot bdslot) {
+void TurboAssembler::Branch(Label* L, Condition cond, Register rs,
+ Heap::RootListIndex index, BranchDelaySlot bdslot) {
LoadRoot(at, index);
Branch(L, cond, rs, Operand(at), bdslot);
}
-
-void MacroAssembler::BranchShortHelper(int16_t offset, Label* L,
+void TurboAssembler::BranchShortHelper(int16_t offset, Label* L,
BranchDelaySlot bdslot) {
DCHECK(L == nullptr || offset == 0);
offset = GetOffset(offset, L, OffsetSize::kOffset16);
@@ -2988,15 +3066,13 @@ void MacroAssembler::BranchShortHelper(int16_t offset, Label* L,
nop();
}
-
-void MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L) {
+void TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L) {
DCHECK(L == nullptr || offset == 0);
offset = GetOffset(offset, L, OffsetSize::kOffset26);
bc(offset);
}
-
-void MacroAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) {
+void TurboAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) {
if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
DCHECK(is_int26(offset));
BranchShortHelperR6(offset, nullptr);
@@ -3006,8 +3082,7 @@ void MacroAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) {
}
}
-
-void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
+void TurboAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
BranchShortHelperR6(0, L);
} else {
@@ -3024,8 +3099,7 @@ static inline bool IsZero(const Operand& rt) {
}
}
-
-int32_t MacroAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
+int32_t TurboAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
if (L) {
offset = branch_offset_helper(L, bits) >> 2;
} else {
@@ -3034,12 +3108,11 @@ int32_t MacroAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
return offset;
}
-
-Register MacroAssembler::GetRtAsRegisterHelper(const Operand& rt,
+Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt,
Register scratch) {
Register r2 = no_reg;
if (rt.is_reg()) {
- r2 = rt.rm_;
+ r2 = rt.rm();
} else {
r2 = scratch;
li(r2, rt);
@@ -3048,8 +3121,7 @@ Register MacroAssembler::GetRtAsRegisterHelper(const Operand& rt,
return r2;
}
-
-bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
+bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
Condition cond, Register rs,
const Operand& rt) {
DCHECK(L == nullptr || offset == 0);
@@ -3069,7 +3141,7 @@ bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
bc(offset);
break;
case eq:
- if (rs.code() == rt.rm_.reg_code) {
+ if (rs.code() == rt.rm().reg_code) {
// Pre R6 beq is used here to make the code patchable. Otherwise bc
// should be used which has no condition field so is not patchable.
bits = OffsetSize::kOffset16;
@@ -3093,7 +3165,7 @@ bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
}
break;
case ne:
- if (rs.code() == rt.rm_.reg_code) {
+ if (rs.code() == rt.rm().reg_code) {
// Pre R6 bne is used here to make the code patchable. Otherwise we
// should not generate any instruction.
bits = OffsetSize::kOffset16;
@@ -3120,7 +3192,7 @@ bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
// Signed comparison.
case greater:
// rs > rt
- if (rs.code() == rt.rm_.reg_code) {
+ if (rs.code() == rt.rm().reg_code) {
break; // No code needs to be emitted.
} else if (rs.is(zero_reg)) {
bits = OffsetSize::kOffset16;
@@ -3144,7 +3216,7 @@ bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
break;
case greater_equal:
// rs >= rt
- if (rs.code() == rt.rm_.reg_code) {
+ if (rs.code() == rt.rm().reg_code) {
bits = OffsetSize::kOffset26;
if (!is_near(L, bits)) return false;
offset = GetOffset(offset, L, bits);
@@ -3171,7 +3243,7 @@ bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
break;
case less:
// rs < rt
- if (rs.code() == rt.rm_.reg_code) {
+ if (rs.code() == rt.rm().reg_code) {
break; // No code needs to be emitted.
} else if (rs.is(zero_reg)) {
bits = OffsetSize::kOffset16;
@@ -3195,7 +3267,7 @@ bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
break;
case less_equal:
// rs <= rt
- if (rs.code() == rt.rm_.reg_code) {
+ if (rs.code() == rt.rm().reg_code) {
bits = OffsetSize::kOffset26;
if (!is_near(L, bits)) return false;
offset = GetOffset(offset, L, bits);
@@ -3224,7 +3296,7 @@ bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
// Unsigned comparison.
case Ugreater:
// rs > rt
- if (rs.code() == rt.rm_.reg_code) {
+ if (rs.code() == rt.rm().reg_code) {
break; // No code needs to be emitted.
} else if (rs.is(zero_reg)) {
bits = OffsetSize::kOffset21;
@@ -3248,7 +3320,7 @@ bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
break;
case Ugreater_equal:
// rs >= rt
- if (rs.code() == rt.rm_.reg_code) {
+ if (rs.code() == rt.rm().reg_code) {
bits = OffsetSize::kOffset26;
if (!is_near(L, bits)) return false;
offset = GetOffset(offset, L, bits);
@@ -3275,7 +3347,7 @@ bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
break;
case Uless:
// rs < rt
- if (rs.code() == rt.rm_.reg_code) {
+ if (rs.code() == rt.rm().reg_code) {
break; // No code needs to be emitted.
} else if (rs.is(zero_reg)) {
bits = OffsetSize::kOffset21;
@@ -3296,7 +3368,7 @@ bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
break;
case Uless_equal:
// rs <= rt
- if (rs.code() == rt.rm_.reg_code) {
+ if (rs.code() == rt.rm().reg_code) {
bits = OffsetSize::kOffset26;
if (!is_near(L, bits)) return false;
offset = GetOffset(offset, L, bits);
@@ -3329,8 +3401,7 @@ bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
return true;
}
-
-bool MacroAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
+bool TurboAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
Register rs, const Operand& rt,
BranchDelaySlot bdslot) {
DCHECK(L == nullptr || offset == 0);
@@ -3466,8 +3537,7 @@ bool MacroAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
return true;
}
-
-bool MacroAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
+bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
Register rs, const Operand& rt,
BranchDelaySlot bdslot) {
BRANCH_ARGS_CHECK(cond, rs, rt);
@@ -3491,33 +3561,28 @@ bool MacroAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
return false;
}
-
-void MacroAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
+void TurboAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bdslot) {
BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
}
-
-void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
+void TurboAssembler::BranchShort(Label* L, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bdslot) {
BranchShortCheck(0, L, cond, rs, rt, bdslot);
}
-
-void MacroAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) {
+void TurboAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) {
BranchAndLinkShort(offset, bdslot);
}
-
-void MacroAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs,
+void TurboAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bdslot) {
bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt, bdslot);
DCHECK(is_near);
USE(is_near);
}
-
-void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
+void TurboAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
if (L->is_bound()) {
if (is_near_branch(L)) {
BranchAndLinkShort(L, bdslot);
@@ -3533,10 +3598,8 @@ void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
}
}
-
-void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
+void TurboAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot) {
if (L->is_bound()) {
if (!BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot)) {
Label skip;
@@ -3558,8 +3621,7 @@ void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
}
}
-
-void MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
+void TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
BranchDelaySlot bdslot) {
DCHECK(L == nullptr || offset == 0);
offset = GetOffset(offset, L, OffsetSize::kOffset16);
@@ -3570,15 +3632,13 @@ void MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
nop();
}
-
-void MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) {
+void TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) {
DCHECK(L == nullptr || offset == 0);
offset = GetOffset(offset, L, OffsetSize::kOffset26);
balc(offset);
}
-
-void MacroAssembler::BranchAndLinkShort(int32_t offset,
+void TurboAssembler::BranchAndLinkShort(int32_t offset,
BranchDelaySlot bdslot) {
if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
DCHECK(is_int26(offset));
@@ -3589,8 +3649,7 @@ void MacroAssembler::BranchAndLinkShort(int32_t offset,
}
}
-
-void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
+void TurboAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
BranchAndLinkShortHelperR6(0, L);
} else {
@@ -3598,8 +3657,7 @@ void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
}
}
-
-bool MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
+bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
Condition cond, Register rs,
const Operand& rt) {
DCHECK(L == nullptr || offset == 0);
@@ -3631,7 +3689,7 @@ bool MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
// Signed comparison.
case greater:
// rs > rt
- if (rs.code() == rt.rm_.reg_code) {
+ if (rs.code() == rt.rm().reg_code) {
break; // No code needs to be emitted.
} else if (rs.is(zero_reg)) {
if (!is_near(L, bits)) return false;
@@ -3651,7 +3709,7 @@ bool MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
break;
case greater_equal:
// rs >= rt
- if (rs.code() == rt.rm_.reg_code) {
+ if (rs.code() == rt.rm().reg_code) {
bits = OffsetSize::kOffset26;
if (!is_near(L, bits)) return false;
offset = GetOffset(offset, L, bits);
@@ -3674,7 +3732,7 @@ bool MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
break;
case less:
// rs < rt
- if (rs.code() == rt.rm_.reg_code) {
+ if (rs.code() == rt.rm().reg_code) {
break; // No code needs to be emitted.
} else if (rs.is(zero_reg)) {
if (!is_near(L, bits)) return false;
@@ -3694,7 +3752,7 @@ bool MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
break;
case less_equal:
// rs <= r2
- if (rs.code() == rt.rm_.reg_code) {
+ if (rs.code() == rt.rm().reg_code) {
bits = OffsetSize::kOffset26;
if (!is_near(L, bits)) return false;
offset = GetOffset(offset, L, bits);
@@ -3756,7 +3814,7 @@ bool MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
// Pre r6 we need to use a bgezal or bltzal, but they can't be used directly
// with the slt instructions. We could use sub or add instead but we would miss
// overflow cases, so we keep slt and add an intermediate third instruction.
-bool MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
+bool TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot) {
@@ -3847,8 +3905,7 @@ bool MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
return true;
}
-
-bool MacroAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
+bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot) {
@@ -3873,12 +3930,8 @@ bool MacroAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
return false;
}
-
-void MacroAssembler::Jump(Register target,
- Condition cond,
- Register rs,
- const Operand& rt,
- BranchDelaySlot bd) {
+void TurboAssembler::Jump(Register target, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (kArchVariant == kMips64r6 && bd == PROTECT) {
if (cond == cc_always) {
@@ -3901,12 +3954,8 @@ void MacroAssembler::Jump(Register target,
}
}
-
-void MacroAssembler::Jump(intptr_t target,
- RelocInfo::Mode rmode,
- Condition cond,
- Register rs,
- const Operand& rt,
+void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
+ Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
Label skip;
if (cond != cc_always) {
@@ -3919,35 +3968,21 @@ void MacroAssembler::Jump(intptr_t target,
bind(&skip);
}
-
-void MacroAssembler::Jump(Address target,
- RelocInfo::Mode rmode,
- Condition cond,
- Register rs,
- const Operand& rt,
- BranchDelaySlot bd) {
+void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
+ Register rs, const Operand& rt, BranchDelaySlot bd) {
DCHECK(!RelocInfo::IsCodeTarget(rmode));
Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
}
-
-void MacroAssembler::Jump(Handle<Code> code,
- RelocInfo::Mode rmode,
- Condition cond,
- Register rs,
- const Operand& rt,
+void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
+ Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
- AllowDeferredHandleDereference embedding_raw_address;
- Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
+ Jump(reinterpret_cast<intptr_t>(code.address()), rmode, cond, rs, rt, bd);
}
-
-int MacroAssembler::CallSize(Register target,
- Condition cond,
- Register rs,
- const Operand& rt,
- BranchDelaySlot bd) {
+int TurboAssembler::CallSize(Register target, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bd) {
int size = 0;
if (cond == cc_always) {
@@ -3963,11 +3998,8 @@ int MacroAssembler::CallSize(Register target,
// Note: To call gcc-compiled C code on mips, you must call thru t9.
-void MacroAssembler::Call(Register target,
- Condition cond,
- Register rs,
- const Operand& rt,
- BranchDelaySlot bd) {
+void TurboAssembler::Call(Register target, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bd) {
#ifdef DEBUG
int size = IsPrevInstrCompactBranch() ? kInstrSize : 0;
#endif
@@ -4001,24 +4033,15 @@ void MacroAssembler::Call(Register target,
#endif
}
-
-int MacroAssembler::CallSize(Address target,
- RelocInfo::Mode rmode,
- Condition cond,
- Register rs,
- const Operand& rt,
+int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode,
+ Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
int size = CallSize(t9, cond, rs, rt, bd);
return size + 4 * kInstrSize;
}
-
-void MacroAssembler::Call(Address target,
- RelocInfo::Mode rmode,
- Condition cond,
- Register rs,
- const Operand& rt,
- BranchDelaySlot bd) {
+void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
+ Register rs, const Operand& rt, BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Label start;
bind(&start);
@@ -4029,51 +4052,30 @@ void MacroAssembler::Call(Address target,
SizeOfCodeGeneratedSince(&start));
}
-
-int MacroAssembler::CallSize(Handle<Code> code,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id,
- Condition cond,
- Register rs,
- const Operand& rt,
+int TurboAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
+ Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
- AllowDeferredHandleDereference using_raw_address;
- return CallSize(reinterpret_cast<Address>(code.location()),
- rmode, cond, rs, rt, bd);
+ return CallSize(code.address(), rmode, cond, rs, rt, bd);
}
-
-void MacroAssembler::Call(Handle<Code> code,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id,
- Condition cond,
- Register rs,
- const Operand& rt,
+void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
+ Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Label start;
bind(&start);
DCHECK(RelocInfo::IsCodeTarget(rmode));
- if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
- SetRecordedAstId(ast_id);
- rmode = RelocInfo::CODE_TARGET_WITH_ID;
- }
- AllowDeferredHandleDereference embedding_raw_address;
- Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
- DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
+ Call(code.address(), rmode, cond, rs, rt, bd);
+ DCHECK_EQ(CallSize(code, rmode, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
}
-
-void MacroAssembler::Ret(Condition cond,
- Register rs,
- const Operand& rt,
+void TurboAssembler::Ret(Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
Jump(ra, cond, rs, rt, bd);
}
-
-void MacroAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
+void TurboAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
if (kArchVariant == kMips64r6 && bdslot == PROTECT &&
(!L->is_bound() || is_near_r6(L))) {
BranchShortHelperR6(0, L);
@@ -4092,8 +4094,7 @@ void MacroAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
}
}
-
-void MacroAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
+void TurboAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
if (kArchVariant == kMips64r6 && bdslot == PROTECT &&
(!L->is_bound() || is_near_r6(L))) {
BranchAndLinkShortHelperR6(0, L);
@@ -4112,16 +4113,13 @@ void MacroAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
}
}
-
-void MacroAssembler::DropAndRet(int drop) {
+void TurboAssembler::DropAndRet(int drop) {
DCHECK(is_int16(drop * kPointerSize));
Ret(USE_DELAY_SLOT);
daddiu(sp, sp, drop * kPointerSize);
}
-void MacroAssembler::DropAndRet(int drop,
- Condition cond,
- Register r1,
+void TurboAssembler::DropAndRet(int drop, Condition cond, Register r1,
const Operand& r2) {
// Both Drop and Ret need to be conditional.
Label skip;
@@ -4137,10 +4135,7 @@ void MacroAssembler::DropAndRet(int drop,
}
}
-
-void MacroAssembler::Drop(int count,
- Condition cond,
- Register reg,
+void TurboAssembler::Drop(int count, Condition cond, Register reg,
const Operand& op) {
if (count <= 0) {
return;
@@ -4175,17 +4170,26 @@ void MacroAssembler::Swap(Register reg1,
}
}
+void TurboAssembler::Call(Label* target) { BranchAndLink(target); }
-void MacroAssembler::Call(Label* target) {
- BranchAndLink(target);
+void TurboAssembler::Push(Smi* smi) {
+ li(at, Operand(smi));
+ push(at);
}
-
-void MacroAssembler::Push(Handle<Object> handle) {
+void TurboAssembler::Push(Handle<HeapObject> handle) {
li(at, Operand(handle));
push(at);
}
+void MacroAssembler::PushObject(Handle<Object> handle) {
+ if (handle->IsHeapObject()) {
+ li(at, Operand(Handle<HeapObject>::cast(handle)));
+ } else {
+ li(at, Operand(Smi::cast(*handle)));
+ }
+ push(at);
+}
void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
DCHECK(!src.is(scratch));
@@ -4227,7 +4231,8 @@ void MacroAssembler::PushStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
// Link the current handler as the next handler.
- li(a6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+ li(a6,
+ Operand(ExternalReference(IsolateAddressId::kHandlerAddress, isolate())));
Ld(a5, MemOperand(a6));
push(a5);
@@ -4241,7 +4246,8 @@ void MacroAssembler::PopStackHandler() {
pop(a1);
Daddu(sp, sp, Operand(static_cast<int64_t>(StackHandlerConstants::kSize -
kPointerSize)));
- li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+ li(at,
+ Operand(ExternalReference(IsolateAddressId::kHandlerAddress, isolate())));
Sd(a1, MemOperand(at));
}
@@ -4319,10 +4325,7 @@ void MacroAssembler::Allocate(int object_size,
Daddu(result_end, result, Operand(object_size));
Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
- if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
- // The top pointer is not updated for allocation folding dominators.
- Sd(result_end, MemOperand(top_address));
- }
+ Sd(result_end, MemOperand(top_address));
// Tag object.
Daddu(result, result, Operand(kHeapObjectTag));
@@ -4404,88 +4407,9 @@ void MacroAssembler::Allocate(Register object_size, Register result,
Check(eq, kUnalignedAllocationInNewSpace, at, Operand(zero_reg));
}
- if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
- // The top pointer is not updated for allocation folding dominators.
- Sd(result_end, MemOperand(top_address));
- }
-
- // Tag object if.
- Daddu(result, result, Operand(kHeapObjectTag));
-}
-
-void MacroAssembler::FastAllocate(int object_size, Register result,
- Register scratch1, Register scratch2,
- AllocationFlags flags) {
- DCHECK(object_size <= kMaxRegularHeapObjectSize);
- DCHECK(!AreAliased(result, scratch1, scratch2, at));
-
- // Make object size into bytes.
- if ((flags & SIZE_IN_WORDS) != 0) {
- object_size *= kPointerSize;
- }
- DCHECK(0 == (object_size & kObjectAlignmentMask));
-
- ExternalReference allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), flags);
-
- Register top_address = scratch1;
- Register result_end = scratch2;
- li(top_address, Operand(allocation_top));
- Ld(result, MemOperand(top_address));
-
- // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
- // the same alignment on MIPS64.
- STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
-
- if (emit_debug_code()) {
- And(at, result, Operand(kDoubleAlignmentMask));
- Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
- }
-
- // Calculate new top and write it back.
- Daddu(result_end, result, Operand(object_size));
Sd(result_end, MemOperand(top_address));
- Daddu(result, result, Operand(kHeapObjectTag));
-}
-
-void MacroAssembler::FastAllocate(Register object_size, Register result,
- Register result_end, Register scratch,
- AllocationFlags flags) {
- // |object_size| and |result_end| may overlap, other registers must not.
- DCHECK(!AreAliased(object_size, result, scratch, at));
- DCHECK(!AreAliased(result_end, result, scratch, at));
-
- ExternalReference allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), flags);
-
- // Set up allocation top address and object size registers.
- Register top_address = scratch;
- li(top_address, Operand(allocation_top));
- Ld(result, MemOperand(top_address));
-
- // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
- // the same alignment on MIPS64.
- STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
-
- if (emit_debug_code()) {
- And(at, result, Operand(kDoubleAlignmentMask));
- Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
- }
-
- // Calculate new top and write it back
- if ((flags & SIZE_IN_WORDS) != 0) {
- Dlsa(result_end, result, object_size, kPointerSizeLog2);
- } else {
- Daddu(result_end, result, Operand(object_size));
- }
-
- // Update allocation top. result temporarily holds the new top.
- if (emit_debug_code()) {
- And(at, result_end, Operand(kObjectAlignmentMask));
- Check(eq, kUnalignedAllocationInNewSpace, at, Operand(zero_reg));
- }
-
+ // Tag object if.
Daddu(result, result, Operand(kHeapObjectTag));
}
@@ -4551,7 +4475,7 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
Sd(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
- Sd(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
+ Sd(scratch1, FieldMemOperand(result, JSObject::kPropertiesOrHashOffset));
Sd(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
Sd(value, FieldMemOperand(result, JSValue::kValueOffset));
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
@@ -4689,7 +4613,7 @@ void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
Ld(value, FieldMemOperand(value, WeakCell::kValueOffset));
}
-void MacroAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
+void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
const DoubleRegister src) {
sub_d(dst, src, kDoubleRegZero);
}
@@ -4700,8 +4624,7 @@ void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
JumpIfSmi(value, miss);
}
-
-void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
+void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) {
if (IsMipsSoftFloatABI) {
if (kArchEndian == kLittle) {
Move(dst, v0, v1);
@@ -4713,8 +4636,7 @@ void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
}
}
-
-void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
+void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) {
if (IsMipsSoftFloatABI) {
if (kArchEndian == kLittle) {
Move(dst, a0, a1);
@@ -4726,8 +4648,7 @@ void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
}
}
-
-void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
+void TurboAssembler::MovToFloatParameter(DoubleRegister src) {
if (!IsMipsSoftFloatABI) {
Move(f12, src);
} else {
@@ -4739,8 +4660,7 @@ void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
}
}
-
-void MacroAssembler::MovToFloatResult(DoubleRegister src) {
+void TurboAssembler::MovToFloatResult(DoubleRegister src) {
if (!IsMipsSoftFloatABI) {
Move(f0, src);
} else {
@@ -4752,8 +4672,7 @@ void MacroAssembler::MovToFloatResult(DoubleRegister src) {
}
}
-
-void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
+void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
DoubleRegister src2) {
if (!IsMipsSoftFloatABI) {
const DoubleRegister fparg2 = f13;
@@ -4780,7 +4699,7 @@ void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
// -----------------------------------------------------------------------------
// JavaScript invokes.
-void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
+void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
Register caller_args_count_reg,
Register scratch0, Register scratch1) {
#if DEBUG
@@ -5082,16 +5001,24 @@ void MacroAssembler::GetObjectType(Register object,
// Runtime calls.
void MacroAssembler::CallStub(CodeStub* stub,
- TypeFeedbackId ast_id,
Condition cond,
Register r1,
const Operand& r2,
BranchDelaySlot bd) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
- cond, r1, r2, bd);
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
}
+void TurboAssembler::CallStubDelayed(CodeStub* stub, Condition cond,
+ Register r1, const Operand& r2,
+ BranchDelaySlot bd) {
+ DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
+
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+
+ li(at, Operand::EmbeddedCode(stub));
+ Call(at);
+}
void MacroAssembler::TailCallStub(CodeStub* stub,
Condition cond,
@@ -5101,9 +5028,8 @@ void MacroAssembler::TailCallStub(CodeStub* stub,
Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
}
-
-bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
- return has_frame_ || !stub->SometimesSetsUpAFrame();
+bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
+ return has_frame() || !stub->SometimesSetsUpAFrame();
}
void MacroAssembler::ObjectToDoubleFPURegister(Register object,
@@ -5152,16 +5078,16 @@ void MacroAssembler::SmiToDoubleFPURegister(Register smi,
cvt_d_w(value, value);
}
-static inline void BranchOvfHelper(MacroAssembler* masm, Register overflow_dst,
+static inline void BranchOvfHelper(TurboAssembler* tasm, Register overflow_dst,
Label* overflow_label,
Label* no_overflow_label) {
DCHECK(overflow_label || no_overflow_label);
if (!overflow_label) {
DCHECK(no_overflow_label);
- masm->Branch(no_overflow_label, ge, overflow_dst, Operand(zero_reg));
+ tasm->Branch(no_overflow_label, ge, overflow_dst, Operand(zero_reg));
} else {
- masm->Branch(overflow_label, lt, overflow_dst, Operand(zero_reg));
- if (no_overflow_label) masm->Branch(no_overflow_label);
+ tasm->Branch(overflow_label, lt, overflow_dst, Operand(zero_reg));
+ if (no_overflow_label) tasm->Branch(no_overflow_label);
}
}
@@ -5340,7 +5266,7 @@ void MacroAssembler::SubBranchOvf(Register dst, Register left, Register right,
BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
}
-void MacroAssembler::DaddBranchOvf(Register dst, Register left,
+void TurboAssembler::DaddBranchOvf(Register dst, Register left,
const Operand& right, Label* overflow_label,
Label* no_overflow_label, Register scratch) {
if (right.is_reg()) {
@@ -5369,8 +5295,7 @@ void MacroAssembler::DaddBranchOvf(Register dst, Register left,
}
}
-
-void MacroAssembler::DaddBranchOvf(Register dst, Register left, Register right,
+void TurboAssembler::DaddBranchOvf(Register dst, Register left, Register right,
Label* overflow_label,
Label* no_overflow_label, Register scratch) {
Register overflow_dst = t9;
@@ -5408,8 +5333,7 @@ void MacroAssembler::DaddBranchOvf(Register dst, Register left, Register right,
BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
}
-
-void MacroAssembler::DsubBranchOvf(Register dst, Register left,
+void TurboAssembler::DsubBranchOvf(Register dst, Register left,
const Operand& right, Label* overflow_label,
Label* no_overflow_label, Register scratch) {
DCHECK(overflow_label || no_overflow_label);
@@ -5440,8 +5364,7 @@ void MacroAssembler::DsubBranchOvf(Register dst, Register left,
}
}
-
-void MacroAssembler::DsubBranchOvf(Register dst, Register left, Register right,
+void TurboAssembler::DsubBranchOvf(Register dst, Register left, Register right,
Label* overflow_label,
Label* no_overflow_label, Register scratch) {
DCHECK(overflow_label || no_overflow_label);
@@ -5484,21 +5407,21 @@ void MacroAssembler::DsubBranchOvf(Register dst, Register left, Register right,
BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
}
-static inline void BranchOvfHelperMult(MacroAssembler* masm,
+static inline void BranchOvfHelperMult(TurboAssembler* tasm,
Register overflow_dst,
Label* overflow_label,
Label* no_overflow_label) {
DCHECK(overflow_label || no_overflow_label);
if (!overflow_label) {
DCHECK(no_overflow_label);
- masm->Branch(no_overflow_label, eq, overflow_dst, Operand(zero_reg));
+ tasm->Branch(no_overflow_label, eq, overflow_dst, Operand(zero_reg));
} else {
- masm->Branch(overflow_label, ne, overflow_dst, Operand(zero_reg));
- if (no_overflow_label) masm->Branch(no_overflow_label);
+ tasm->Branch(overflow_label, ne, overflow_dst, Operand(zero_reg));
+ if (no_overflow_label) tasm->Branch(no_overflow_label);
}
}
-void MacroAssembler::MulBranchOvf(Register dst, Register left,
+void TurboAssembler::MulBranchOvf(Register dst, Register left,
const Operand& right, Label* overflow_label,
Label* no_overflow_label, Register scratch) {
DCHECK(overflow_label || no_overflow_label);
@@ -5529,7 +5452,7 @@ void MacroAssembler::MulBranchOvf(Register dst, Register left,
}
}
-void MacroAssembler::MulBranchOvf(Register dst, Register left, Register right,
+void TurboAssembler::MulBranchOvf(Register dst, Register left, Register right,
Label* overflow_label,
Label* no_overflow_label, Register scratch) {
DCHECK(overflow_label || no_overflow_label);
@@ -5557,6 +5480,19 @@ void MacroAssembler::MulBranchOvf(Register dst, Register left, Register right,
BranchOvfHelperMult(this, overflow_dst, overflow_label, no_overflow_label);
}
+void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
+ SaveFPRegsMode save_doubles,
+ BranchDelaySlot bd) {
+ const Runtime::Function* f = Runtime::FunctionForId(fid);
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ PrepareCEntryArgs(f->nargs);
+ PrepareCEntryFunction(ExternalReference(f, isolate()));
+ CallStubDelayed(new (zone) CEntryStub(nullptr, 1, save_doubles));
+}
+
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles,
BranchDelaySlot bd) {
@@ -5574,7 +5510,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
PrepareCEntryArgs(num_arguments);
PrepareCEntryFunction(ExternalReference(f, isolate()));
CEntryStub stub(isolate(), 1, save_doubles);
- CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
+ CallStub(&stub, al, zero_reg, Operand(zero_reg), bd);
}
@@ -5585,7 +5521,7 @@ void MacroAssembler::CallExternalReference(const ExternalReference& ext,
PrepareCEntryFunction(ext);
CEntryStub stub(isolate(), 1);
- CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
+ CallStub(&stub, al, zero_reg, Operand(zero_reg), bd);
}
@@ -5649,16 +5585,14 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
// -----------------------------------------------------------------------------
// Debugging.
-void MacroAssembler::Assert(Condition cc, BailoutReason reason,
- Register rs, Operand rt) {
+void TurboAssembler::Assert(Condition cc, BailoutReason reason, Register rs,
+ Operand rt) {
if (emit_debug_code())
Check(cc, reason, rs, rt);
}
-
-
-void MacroAssembler::Check(Condition cc, BailoutReason reason,
- Register rs, Operand rt) {
+void TurboAssembler::Check(Condition cc, BailoutReason reason, Register rs,
+ Operand rt) {
Label L;
Branch(&L, cc, rs, rt);
Abort(reason);
@@ -5666,8 +5600,7 @@ void MacroAssembler::Check(Condition cc, BailoutReason reason,
bind(&L);
}
-
-void MacroAssembler::Abort(BailoutReason reason) {
+void TurboAssembler::Abort(BailoutReason reason) {
Label abort_start;
bind(&abort_start);
#ifdef DEBUG
@@ -5683,13 +5616,10 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- // Check if Abort() has already been initialized.
- DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
-
Move(a0, Smi::FromInt(static_cast<int>(reason)));
// Disable stub call restrictions to always allow calls to abort.
- if (!has_frame_) {
+ if (!has_frame()) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
@@ -5750,13 +5680,12 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
}
}
-void MacroAssembler::StubPrologue(StackFrame::Type type) {
+void TurboAssembler::StubPrologue(StackFrame::Type type) {
li(at, Operand(StackFrame::TypeToMarker(type)));
PushCommonFrame(at);
}
-
-void MacroAssembler::Prologue(bool code_pre_aging) {
+void TurboAssembler::Prologue(bool code_pre_aging) {
PredictableCodeSizeScope predictible_code_size_scope(
this, kNoCodeAgeSequenceLength);
// The following three instructions must remain together and unmodified
@@ -5788,15 +5717,7 @@ void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
Ld(vector, FieldMemOperand(vector, Cell::kValueOffset));
}
-
-void MacroAssembler::EnterFrame(StackFrame::Type type,
- bool load_constant_pool_pointer_reg) {
- // Out-of-line constant pool not implemented on mips64.
- UNREACHABLE();
-}
-
-
-void MacroAssembler::EnterFrame(StackFrame::Type type) {
+void TurboAssembler::EnterFrame(StackFrame::Type type) {
int stack_offset, fp_offset;
if (type == StackFrame::INTERNAL) {
stack_offset = -4 * kPointerSize;
@@ -5824,8 +5745,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
Daddu(fp, sp, Operand(fp_offset));
}
-
-void MacroAssembler::LeaveFrame(StackFrame::Type type) {
+void TurboAssembler::LeaveFrame(StackFrame::Type type) {
daddiu(sp, fp, 2 * kPointerSize);
Ld(ra, MemOperand(fp, 1 * kPointerSize));
Ld(fp, MemOperand(fp, 0 * kPointerSize));
@@ -5882,9 +5802,11 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
Sd(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
// Save the frame pointer and the context in top.
- li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ li(t8,
+ Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress, isolate())));
Sd(fp, MemOperand(t8));
- li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+ li(t8,
+ Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
Sd(cp, MemOperand(t8));
const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
@@ -5906,7 +5828,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
DCHECK(stack_space >= 0);
Dsubu(sp, sp, Operand((stack_space + 2) * kPointerSize));
if (frame_alignment > 0) {
- DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
And(sp, sp, Operand(-frame_alignment)); // Align stack.
}
@@ -5933,16 +5855,19 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
}
// Clear top frame.
- li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ li(t8,
+ Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress, isolate())));
Sd(zero_reg, MemOperand(t8));
// Restore current context from top and clear it in debug mode.
if (restore_context) {
- li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+ li(t8, Operand(ExternalReference(IsolateAddressId::kContextAddress,
+ isolate())));
Ld(cp, MemOperand(t8));
}
#ifdef DEBUG
- li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+ li(t8,
+ Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
Sd(a3, MemOperand(t8));
#endif
@@ -5966,7 +5891,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
daddiu(sp, sp, 2 * kPointerSize);
}
-int MacroAssembler::ActivationFrameAlignment() {
+int TurboAssembler::ActivationFrameAlignment() {
#if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
// Running on the real platform. Use the alignment as mandated by the local
// environment.
@@ -5990,7 +5915,7 @@ void MacroAssembler::AssertStackIsAligned() {
if (frame_alignment > kPointerSize) {
Label alignment_as_expected;
- DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
andi(at, sp, frame_alignment_mask);
Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
// Don't use Check here, as it will call Runtime_Abort re-entering here.
@@ -6101,10 +6026,8 @@ void MacroAssembler::UntagAndJumpIfSmi(Register dst,
SmiUntag(dst, src);
}
-void MacroAssembler::JumpIfSmi(Register value,
- Label* smi_label,
- Register scratch,
- BranchDelaySlot bd) {
+void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
+ Register scratch, BranchDelaySlot bd) {
DCHECK_EQ(0, kSmiTag);
andi(scratch, value, kSmiTagMask);
Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
@@ -6167,6 +6090,15 @@ void MacroAssembler::AssertSmi(Register object) {
}
}
+void MacroAssembler::AssertFixedArray(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, kOperandIsASmiAndNotAFixedArray, t8, Operand(zero_reg));
+ GetObjectType(object, t8, t8);
+ Check(eq, kOperandIsNotAFixedArray, t8, Operand(FIXED_ARRAY_TYPE));
+ }
+}
void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
@@ -6189,8 +6121,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
}
-void MacroAssembler::AssertGeneratorObject(Register object, Register flags) {
- // `flags` should be an untagged integer. See `SuspendFlags` in src/globals.h
+void MacroAssembler::AssertGeneratorObject(Register object) {
if (!emit_debug_code()) return;
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
@@ -6198,21 +6129,15 @@ void MacroAssembler::AssertGeneratorObject(Register object, Register flags) {
GetObjectType(object, t8, t8);
- Label async, abort, done;
- And(t9, flags, Operand(static_cast<int>(SuspendFlags::kGeneratorTypeMask)));
- Branch(&async, equal, t9,
- Operand(static_cast<int>(SuspendFlags::kAsyncGenerator)));
+ Label done;
// Check if JSGeneratorObject
Branch(&done, eq, t8, Operand(JS_GENERATOR_OBJECT_TYPE));
- jmp(&abort);
- bind(&async);
// Check if JSAsyncGeneratorObject
Branch(&done, eq, t8, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
- bind(&abort);
- Abort(kOperandIsASmiAndNotAGeneratorObject);
+ Abort(kOperandIsNotAGeneratorObject);
bind(&done);
}
@@ -6279,7 +6204,7 @@ void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
scratch2, failure);
}
-void MacroAssembler::Float32Max(FPURegister dst, FPURegister src1,
+void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
if (src1.is(src2)) {
Move_s(dst, src1);
@@ -6318,12 +6243,12 @@ void MacroAssembler::Float32Max(FPURegister dst, FPURegister src1,
}
}
-void MacroAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1,
+void TurboAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1,
FPURegister src2) {
add_s(dst, src1, src2);
}
-void MacroAssembler::Float32Min(FPURegister dst, FPURegister src1,
+void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
if (src1.is(src2)) {
Move_s(dst, src1);
@@ -6362,12 +6287,12 @@ void MacroAssembler::Float32Min(FPURegister dst, FPURegister src1,
}
}
-void MacroAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1,
+void TurboAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1,
FPURegister src2) {
add_s(dst, src1, src2);
}
-void MacroAssembler::Float64Max(FPURegister dst, FPURegister src1,
+void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
if (src1.is(src2)) {
Move_d(dst, src1);
@@ -6405,12 +6330,12 @@ void MacroAssembler::Float64Max(FPURegister dst, FPURegister src1,
}
}
-void MacroAssembler::Float64MaxOutOfLine(FPURegister dst, FPURegister src1,
+void TurboAssembler::Float64MaxOutOfLine(FPURegister dst, FPURegister src1,
FPURegister src2) {
add_d(dst, src1, src2);
}
-void MacroAssembler::Float64Min(FPURegister dst, FPURegister src1,
+void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
if (src1.is(src2)) {
Move_d(dst, src1);
@@ -6448,7 +6373,7 @@ void MacroAssembler::Float64Min(FPURegister dst, FPURegister src1,
}
}
-void MacroAssembler::Float64MinOutOfLine(FPURegister dst, FPURegister src1,
+void TurboAssembler::Float64MinOutOfLine(FPURegister dst, FPURegister src1,
FPURegister src2) {
add_d(dst, src1, src2);
}
@@ -6469,7 +6394,7 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
static const int kRegisterPassedArguments = 8;
-int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
+int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments) {
int stack_passed_words = 0;
num_reg_arguments += 2 * num_double_arguments;
@@ -6509,8 +6434,7 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
Check(ge, kIndexIsNegative, index, Operand(zero_reg));
}
-
-void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
int num_double_arguments,
Register scratch) {
int frame_alignment = ActivationFrameAlignment();
@@ -6529,7 +6453,7 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
// and the original value of sp.
mov(scratch, sp);
Dsubu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
- DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
And(sp, sp, Operand(-frame_alignment));
Sd(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else {
@@ -6537,41 +6461,33 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
}
}
-
-void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
Register scratch) {
PrepareCallCFunction(num_reg_arguments, 0, scratch);
}
-
-void MacroAssembler::CallCFunction(ExternalReference function,
+void TurboAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments) {
li(t8, Operand(function));
CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
}
-
-void MacroAssembler::CallCFunction(Register function,
- int num_reg_arguments,
+void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
int num_double_arguments) {
CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
}
-
-void MacroAssembler::CallCFunction(ExternalReference function,
+void TurboAssembler::CallCFunction(ExternalReference function,
int num_arguments) {
CallCFunction(function, num_arguments, 0);
}
-
-void MacroAssembler::CallCFunction(Register function,
- int num_arguments) {
+void TurboAssembler::CallCFunction(Register function, int num_arguments) {
CallCFunction(function, num_arguments, 0);
}
-
-void MacroAssembler::CallCFunctionHelper(Register function,
+void TurboAssembler::CallCFunctionHelper(Register function,
int num_reg_arguments,
int num_double_arguments) {
DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
@@ -6587,7 +6503,7 @@ void MacroAssembler::CallCFunctionHelper(Register function,
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
- DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
Label alignment_as_expected;
And(at, sp, Operand(frame_alignment_mask));
Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
@@ -6623,13 +6539,8 @@ void MacroAssembler::CallCFunctionHelper(Register function,
#undef BRANCH_ARGS_CHECK
-
-void MacroAssembler::CheckPageFlag(
- Register object,
- Register scratch,
- int mask,
- Condition cc,
- Label* condition_met) {
+void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
+ Condition cc, Label* condition_met) {
And(scratch, object, Operand(~Page::kPageAlignmentMask));
Ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
And(scratch, scratch, Operand(mask));
@@ -6900,7 +6811,6 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
return candidate;
}
UNREACHABLE();
- return no_reg;
}
bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.h b/deps/v8/src/mips64/macro-assembler-mips64.h
index ef13a2f57f..66406a6663 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/mips64/macro-assembler-mips64.h
@@ -78,19 +78,20 @@ enum BranchDelaySlot {
enum LiFlags {
// If the constant value can be represented in just 16 bits, then
// optimize the li to use a single instruction, rather than lui/ori/dsll
- // sequence.
+ // sequence. A number of other optimizations that emits less than
+ // maximum number of instructions exists.
OPTIMIZE_SIZE = 0,
- // Always use 6 instructions (lui/ori/dsll sequence), even if the constant
+ // Always use 6 instructions (lui/ori/dsll sequence) for release 2 or 4
+ // instructions for release 6 (lui/ori/dahi/dati), even if the constant
// could be loaded with just one, so that this value is patchable later.
CONSTANT_SIZE = 1,
// For address loads only 4 instruction are required. Used to mark
// constant load that will be used as address without relocation
// information. It ensures predictable code size, so specific sites
// in code are patchable.
- ADDRESS_LOAD = 2
+ ADDRESS_LOAD = 2
};
-
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum PointersToHereCheck {
@@ -163,15 +164,69 @@ inline MemOperand CFunctionArgumentOperand(int index) {
return MemOperand(sp, offset);
}
-
-// MacroAssembler implements a collection of frequently used macros.
-class MacroAssembler: public Assembler {
+class TurboAssembler : public Assembler {
public:
- MacroAssembler(Isolate* isolate, void* buffer, int size,
- CodeObjectRequired create_code_object);
+ TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
+ CodeObjectRequired create_code_object)
+ : Assembler(isolate, buffer, buffer_size),
+ isolate_(isolate),
+ has_double_zero_reg_set_(false) {
+ if (create_code_object == CodeObjectRequired::kYes) {
+ code_object_ =
+ Handle<HeapObject>::New(isolate->heap()->undefined_value(), isolate);
+ }
+ }
+
+ void set_has_frame(bool value) { has_frame_ = value; }
+ bool has_frame() const { return has_frame_; }
Isolate* isolate() const { return isolate_; }
+ Handle<HeapObject> CodeObject() {
+ DCHECK(!code_object_.is_null());
+ return code_object_;
+ }
+
+ // Activation support.
+ void EnterFrame(StackFrame::Type type);
+ void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) {
+ // Out-of-line constant pool not implemented on mips.
+ UNREACHABLE();
+ }
+ void LeaveFrame(StackFrame::Type type);
+
+ // Generates function and stub prologue code.
+ void StubPrologue(StackFrame::Type type);
+ void Prologue(bool code_pre_aging);
+
+ void InitializeRootRegister() {
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate());
+ li(kRootRegister, Operand(roots_array_start));
+ }
+
+ // Jump unconditionally to given label.
+ // We NEED a nop in the branch delay slot, as it used by v8, for example in
+ // CodeGenerator::ProcessDeferred().
+ // Currently the branch delay slot is filled by the MacroAssembler.
+ // Use rather b(Label) for code generation.
+ void jmp(Label* L) { Branch(L); }
+
+ // -------------------------------------------------------------------------
+ // Debugging.
+
+ // Calls Abort(msg) if the condition cc is not satisfied.
+ // Use --debug_code to enable.
+ void Assert(Condition cc, BailoutReason reason, Register rs, Operand rt);
+
+ // Like Assert(), but always enabled.
+ void Check(Condition cc, BailoutReason reason, Register rs, Operand rt);
+
+ // Print a message to stdout and abort execution.
+ void Abort(BailoutReason msg);
+
+ inline bool AllowThisStubCall(CodeStub* stub);
+
// Arguments macros.
#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
#define COND_ARGS cond, r1, r2
@@ -203,6 +258,56 @@ class MacroAssembler: public Assembler {
#undef COND_TYPED_ARGS
#undef COND_ARGS
+ // Wrapper functions for the different cmp/branch types.
+ inline void BranchF32(Label* target, Label* nan, Condition cc,
+ FPURegister cmp1, FPURegister cmp2,
+ BranchDelaySlot bd = PROTECT) {
+ BranchFCommon(S, target, nan, cc, cmp1, cmp2, bd);
+ }
+
+ inline void BranchF64(Label* target, Label* nan, Condition cc,
+ FPURegister cmp1, FPURegister cmp2,
+ BranchDelaySlot bd = PROTECT) {
+ BranchFCommon(D, target, nan, cc, cmp1, cmp2, bd);
+ }
+
+ // Alternate (inline) version for better readability with USE_DELAY_SLOT.
+ inline void BranchF64(BranchDelaySlot bd, Label* target, Label* nan,
+ Condition cc, FPURegister cmp1, FPURegister cmp2) {
+ BranchF64(target, nan, cc, cmp1, cmp2, bd);
+ }
+
+ inline void BranchF32(BranchDelaySlot bd, Label* target, Label* nan,
+ Condition cc, FPURegister cmp1, FPURegister cmp2) {
+ BranchF32(target, nan, cc, cmp1, cmp2, bd);
+ }
+
+ // Alias functions for backward compatibility.
+ inline void BranchF(Label* target, Label* nan, Condition cc, FPURegister cmp1,
+ FPURegister cmp2, BranchDelaySlot bd = PROTECT) {
+ BranchF64(target, nan, cc, cmp1, cmp2, bd);
+ }
+
+ inline void BranchF(BranchDelaySlot bd, Label* target, Label* nan,
+ Condition cc, FPURegister cmp1, FPURegister cmp2) {
+ BranchF64(bd, target, nan, cc, cmp1, cmp2);
+ }
+
+ void BranchMSA(Label* target, MSABranchDF df, MSABranchCondition cond,
+ MSARegister wt, BranchDelaySlot bd = PROTECT);
+
+ void Branch(Label* L, Condition cond, Register rs, Heap::RootListIndex index,
+ BranchDelaySlot bdslot = PROTECT);
+
+ static int InstrCountForLi64Bit(int64_t value);
+ inline void LiLower32BitHelper(Register rd, Operand j);
+ void li_optimized(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
+ // Load int32 in the rd register.
+ void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
+ inline void li(Register rd, int64_t j, LiFlags mode = OPTIMIZE_SIZE) {
+ li(rd, Operand(j), mode);
+ }
+ void li(Register dst, Handle<HeapObject> value, LiFlags mode = OPTIMIZE_SIZE);
// Jump, Call, and Ret pseudo instructions implementing inter-working.
#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
@@ -218,52 +323,17 @@ class MacroAssembler: public Assembler {
void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
int CallSize(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- TypeFeedbackId ast_id = TypeFeedbackId::None(),
COND_ARGS);
void Call(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- TypeFeedbackId ast_id = TypeFeedbackId::None(),
COND_ARGS);
+ void Call(Label* target);
void Ret(COND_ARGS);
inline void Ret(BranchDelaySlot bd, Condition cond = al,
Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) {
Ret(cond, rs, rt, bd);
}
- bool IsNear(Label* L, Condition cond, int rs_reg);
-
- void Branch(Label* L,
- Condition cond,
- Register rs,
- Heap::RootListIndex index,
- BranchDelaySlot bdslot = PROTECT);
-
-// Number of instructions needed for calculation of switch table entry address
-#ifdef _MIPS_ARCH_MIPS64R6
- static const int kSwitchTablePrologueSize = 6;
-#else
- static const int kSwitchTablePrologueSize = 11;
-#endif
-
- // GetLabelFunction must be lambda '[](size_t index) -> Label*' or a
- // functor/function with 'Label *func(size_t index)' declaration.
- template <typename Func>
- void GenerateSwitchTable(Register index, size_t case_count,
- Func GetLabelFunction);
-#undef COND_ARGS
-
- // Emit code that loads |parameter_index|'th parameter from the stack to
- // the register according to the CallInterfaceDescriptor definition.
- // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
- // below the caller's sp.
- template <class Descriptor>
- void LoadParameterFromStack(
- Register reg, typename Descriptor::ParameterIndices parameter_index,
- int sp_to_ra_offset_in_words = 0) {
- DCHECK(Descriptor::kPassLastArgsOnStack);
- UNIMPLEMENTED();
- }
-
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
void Drop(int count,
@@ -280,30 +350,338 @@ class MacroAssembler: public Assembler {
Register reg,
const Operand& op);
- // Swap two registers. If the scratch register is omitted then a slightly
- // less efficient form using xor instead of mov is emitted.
- void Swap(Register reg1, Register reg2, Register scratch = no_reg);
+ void Ld(Register rd, const MemOperand& rs);
+ void Sd(Register rd, const MemOperand& rs);
- void Call(Label* target);
+ void push(Register src) {
+ Daddu(sp, sp, Operand(-kPointerSize));
+ Sd(src, MemOperand(sp, 0));
+ }
+ void Push(Register src) { push(src); }
+ void Push(Handle<HeapObject> handle);
+ void Push(Smi* smi);
- inline void Move(Register dst, Handle<Object> handle) { li(dst, handle); }
- inline void Move(Register dst, Smi* smi) { li(dst, Operand(smi)); }
+ // Push two registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2) {
+ Dsubu(sp, sp, Operand(2 * kPointerSize));
+ Sd(src1, MemOperand(sp, 1 * kPointerSize));
+ Sd(src2, MemOperand(sp, 0 * kPointerSize));
+ }
- inline void Move(Register dst, Register src) {
- if (!dst.is(src)) {
- mov(dst, src);
- }
+ // Push three registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3) {
+ Dsubu(sp, sp, Operand(3 * kPointerSize));
+ Sd(src1, MemOperand(sp, 2 * kPointerSize));
+ Sd(src2, MemOperand(sp, 1 * kPointerSize));
+ Sd(src3, MemOperand(sp, 0 * kPointerSize));
}
- inline void Move_d(FPURegister dst, FPURegister src) {
- if (!dst.is(src)) {
- mov_d(dst, src);
+ // Push four registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3, Register src4) {
+ Dsubu(sp, sp, Operand(4 * kPointerSize));
+ Sd(src1, MemOperand(sp, 3 * kPointerSize));
+ Sd(src2, MemOperand(sp, 2 * kPointerSize));
+ Sd(src3, MemOperand(sp, 1 * kPointerSize));
+ Sd(src4, MemOperand(sp, 0 * kPointerSize));
+ }
+
+ // Push five registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3, Register src4,
+ Register src5) {
+ Dsubu(sp, sp, Operand(5 * kPointerSize));
+ Sd(src1, MemOperand(sp, 4 * kPointerSize));
+ Sd(src2, MemOperand(sp, 3 * kPointerSize));
+ Sd(src3, MemOperand(sp, 2 * kPointerSize));
+ Sd(src4, MemOperand(sp, 1 * kPointerSize));
+ Sd(src5, MemOperand(sp, 0 * kPointerSize));
+ }
+
+ void Push(Register src, Condition cond, Register tst1, Register tst2) {
+ // Since we don't have conditional execution we use a Branch.
+ Branch(3, cond, tst1, Operand(tst2));
+ Dsubu(sp, sp, Operand(kPointerSize));
+ Sd(src, MemOperand(sp, 0));
+ }
+
+ // Push multiple registers on the stack.
+ // Registers are saved in numerical order, with higher numbered registers
+ // saved in higher memory addresses.
+ void MultiPush(RegList regs);
+ void MultiPushFPU(RegList regs);
+
+ void pop(Register dst) {
+ Ld(dst, MemOperand(sp, 0));
+ Daddu(sp, sp, Operand(kPointerSize));
+ }
+ void Pop(Register dst) { pop(dst); }
+
+ // Pop two registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1, Register src2) {
+ DCHECK(!src1.is(src2));
+ Ld(src2, MemOperand(sp, 0 * kPointerSize));
+ Ld(src1, MemOperand(sp, 1 * kPointerSize));
+ Daddu(sp, sp, 2 * kPointerSize);
+ }
+
+ // Pop three registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1, Register src2, Register src3) {
+ Ld(src3, MemOperand(sp, 0 * kPointerSize));
+ Ld(src2, MemOperand(sp, 1 * kPointerSize));
+ Ld(src1, MemOperand(sp, 2 * kPointerSize));
+ Daddu(sp, sp, 3 * kPointerSize);
+ }
+
+ void Pop(uint32_t count = 1) { Daddu(sp, sp, Operand(count * kPointerSize)); }
+
+ // Pops multiple values from the stack and load them in the
+ // registers specified in regs. Pop order is the opposite as in MultiPush.
+ void MultiPop(RegList regs);
+ void MultiPopFPU(RegList regs);
+
+#define DEFINE_INSTRUCTION(instr) \
+ void instr(Register rd, Register rs, const Operand& rt); \
+ void instr(Register rd, Register rs, Register rt) { \
+ instr(rd, rs, Operand(rt)); \
+ } \
+ void instr(Register rs, Register rt, int32_t j) { instr(rs, rt, Operand(j)); }
+
+#define DEFINE_INSTRUCTION2(instr) \
+ void instr(Register rs, const Operand& rt); \
+ void instr(Register rs, Register rt) { instr(rs, Operand(rt)); } \
+ void instr(Register rs, int32_t j) { instr(rs, Operand(j)); }
+
+ DEFINE_INSTRUCTION(Addu);
+ DEFINE_INSTRUCTION(Daddu);
+ DEFINE_INSTRUCTION(Div);
+ DEFINE_INSTRUCTION(Divu);
+ DEFINE_INSTRUCTION(Ddivu);
+ DEFINE_INSTRUCTION(Mod);
+ DEFINE_INSTRUCTION(Modu);
+ DEFINE_INSTRUCTION(Ddiv);
+ DEFINE_INSTRUCTION(Subu);
+ DEFINE_INSTRUCTION(Dsubu);
+ DEFINE_INSTRUCTION(Dmod);
+ DEFINE_INSTRUCTION(Dmodu);
+ DEFINE_INSTRUCTION(Mul);
+ DEFINE_INSTRUCTION(Mulh);
+ DEFINE_INSTRUCTION(Mulhu);
+ DEFINE_INSTRUCTION(Dmul);
+ DEFINE_INSTRUCTION(Dmulh);
+ DEFINE_INSTRUCTION2(Mult);
+ DEFINE_INSTRUCTION2(Dmult);
+ DEFINE_INSTRUCTION2(Multu);
+ DEFINE_INSTRUCTION2(Dmultu);
+ DEFINE_INSTRUCTION2(Div);
+ DEFINE_INSTRUCTION2(Ddiv);
+ DEFINE_INSTRUCTION2(Divu);
+ DEFINE_INSTRUCTION2(Ddivu);
+
+ DEFINE_INSTRUCTION(And);
+ DEFINE_INSTRUCTION(Or);
+ DEFINE_INSTRUCTION(Xor);
+ DEFINE_INSTRUCTION(Nor);
+ DEFINE_INSTRUCTION2(Neg);
+
+ DEFINE_INSTRUCTION(Slt);
+ DEFINE_INSTRUCTION(Sltu);
+
+ // MIPS32 R2 instruction macro.
+ DEFINE_INSTRUCTION(Ror);
+ DEFINE_INSTRUCTION(Dror);
+
+#undef DEFINE_INSTRUCTION
+#undef DEFINE_INSTRUCTION2
+#undef DEFINE_INSTRUCTION3
+
+ void SmiUntag(Register dst, Register src) {
+ if (SmiValuesAre32Bits()) {
+ STATIC_ASSERT(kSmiShift == 32);
+ dsra32(dst, src, 0);
+ } else {
+ sra(dst, src, kSmiTagSize);
}
}
- inline void Move_s(FPURegister dst, FPURegister src) {
+ void SmiUntag(Register reg) { SmiUntag(reg, reg); }
+
+ // Removes current frame and its arguments from the stack preserving
+ // the arguments and a return address pushed to the stack for the next call.
+ // Both |callee_args_count| and |caller_args_count_reg| do not include
+ // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
+ // is trashed.
+ void PrepareForTailCall(const ParameterCount& callee_args_count,
+ Register caller_args_count_reg, Register scratch0,
+ Register scratch1);
+
+ int CalculateStackPassedWords(int num_reg_arguments,
+ int num_double_arguments);
+
+ // Before calling a C-function from generated code, align arguments on stack
+ // and add space for the four mips argument slots.
+ // After aligning the frame, non-register arguments must be stored on the
+ // stack, after the argument-slots using helper: CFunctionArgumentOperand().
+ // The argument count assumes all arguments are word sized.
+ // Some compilers/platforms require the stack to be aligned when calling
+ // C++ code.
+ // Needs a scratch register to do some arithmetic. This register will be
+ // trashed.
+ void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
+ Register scratch);
+ void PrepareCallCFunction(int num_reg_arguments, Register scratch);
+
+ // Arguments 1-4 are placed in registers a0 thru a3 respectively.
+ // Arguments 5..n are stored to stack using following:
+ // Sw(a4, CFunctionArgumentOperand(5));
+
+ // Calls a C function and cleans up the space for arguments allocated
+ // by PrepareCallCFunction. The called function is not allowed to trigger a
+ // garbage collection, since that might move the code and invalidate the
+ // return address (unless this is somehow accounted for by the called
+ // function).
+ void CallCFunction(ExternalReference function, int num_arguments);
+ void CallCFunction(Register function, int num_arguments);
+ void CallCFunction(ExternalReference function, int num_reg_arguments,
+ int num_double_arguments);
+ void CallCFunction(Register function, int num_reg_arguments,
+ int num_double_arguments);
+ void MovFromFloatResult(DoubleRegister dst);
+ void MovFromFloatParameter(DoubleRegister dst);
+
+ // There are two ways of passing double arguments on MIPS, depending on
+ // whether soft or hard floating point ABI is used. These functions
+ // abstract parameter passing for the three different ways we call
+ // C functions from generated code.
+ void MovToFloatParameter(DoubleRegister src);
+ void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
+ void MovToFloatResult(DoubleRegister src);
+
+ // See comments at the beginning of CEntryStub::Generate.
+ inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); }
+ inline void PrepareCEntryFunction(const ExternalReference& ref) {
+ li(a1, Operand(ref));
+ }
+
+ void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
+ Label* condition_met);
+
+ void CallStubDelayed(CodeStub* stub, COND_ARGS);
+#undef COND_ARGS
+
+ void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs,
+ BranchDelaySlot bd = PROTECT);
+
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
+ // succeeds, otherwise falls through if result is saturated. On return
+ // 'result' either holds answer, or is clobbered on fall through.
+ //
+ // Only public for the test code in test-code-stubs-arm.cc.
+ void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
+ Label* done);
+
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
+ // Exits with 'result' holding the answer.
+ void TruncateDoubleToIDelayed(Zone* zone, Register result,
+ DoubleRegister double_input);
+
+ // Conditional move.
+ void Movz(Register rd, Register rs, Register rt);
+ void Movn(Register rd, Register rs, Register rt);
+ void Movt(Register rd, Register rs, uint16_t cc = 0);
+ void Movf(Register rd, Register rs, uint16_t cc = 0);
+
+ void Clz(Register rd, Register rs);
+
+ // MIPS64 R2 instruction macro.
+ void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void Dext(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void Dins(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void Neg_s(FPURegister fd, FPURegister fs);
+ void Neg_d(FPURegister fd, FPURegister fs);
+
+ // MIPS64 R6 instruction macros.
+ void Bovc(Register rt, Register rs, Label* L);
+ void Bnvc(Register rt, Register rs, Label* L);
+
+ // Convert single to unsigned word.
+ void Trunc_uw_s(FPURegister fd, FPURegister fs, FPURegister scratch);
+ void Trunc_uw_s(FPURegister fd, Register rs, FPURegister scratch);
+
+ // Change endianness
+ void ByteSwapSigned(Register dest, Register src, int operand_size);
+ void ByteSwapUnsigned(Register dest, Register src, int operand_size);
+
+ void Ulh(Register rd, const MemOperand& rs);
+ void Ulhu(Register rd, const MemOperand& rs);
+ void Ush(Register rd, const MemOperand& rs, Register scratch);
+
+ void Ulw(Register rd, const MemOperand& rs);
+ void Ulwu(Register rd, const MemOperand& rs);
+ void Usw(Register rd, const MemOperand& rs);
+
+ void Uld(Register rd, const MemOperand& rs);
+ void Usd(Register rd, const MemOperand& rs);
+
+ void Ulwc1(FPURegister fd, const MemOperand& rs, Register scratch);
+ void Uswc1(FPURegister fd, const MemOperand& rs, Register scratch);
+
+ void Uldc1(FPURegister fd, const MemOperand& rs, Register scratch);
+ void Usdc1(FPURegister fd, const MemOperand& rs, Register scratch);
+
+ void Lb(Register rd, const MemOperand& rs);
+ void Lbu(Register rd, const MemOperand& rs);
+ void Sb(Register rd, const MemOperand& rs);
+
+ void Lh(Register rd, const MemOperand& rs);
+ void Lhu(Register rd, const MemOperand& rs);
+ void Sh(Register rd, const MemOperand& rs);
+
+ void Lw(Register rd, const MemOperand& rs);
+ void Lwu(Register rd, const MemOperand& rs);
+ void Sw(Register rd, const MemOperand& rs);
+
+ void Lwc1(FPURegister fd, const MemOperand& src);
+ void Swc1(FPURegister fs, const MemOperand& dst);
+
+ void Ldc1(FPURegister fd, const MemOperand& src);
+ void Sdc1(FPURegister fs, const MemOperand& dst);
+
+ // Perform a floating-point min or max operation with the
+ // (IEEE-754-compatible) semantics of MIPS32's Release 6 MIN.fmt/MAX.fmt.
+ // Some cases, typically NaNs or +/-0.0, are expected to be rare and are
+ // handled in out-of-line code. The specific behaviour depends on supported
+ // instructions.
+ //
+ // These functions assume (and assert) that !src1.is(src2). It is permitted
+ // for the result to alias either input register.
+ void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2,
+ Label* out_of_line);
+ void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2,
+ Label* out_of_line);
+ void Float64Max(FPURegister dst, FPURegister src1, FPURegister src2,
+ Label* out_of_line);
+ void Float64Min(FPURegister dst, FPURegister src1, FPURegister src2,
+ Label* out_of_line);
+
+ // Generate out-of-line cases for the macros above.
+ void Float32MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
+ void Float32MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
+ void Float64MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
+ void Float64MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
+
+ bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; }
+
+ void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
+
+ inline void Move(Register dst, Handle<HeapObject> handle) { li(dst, handle); }
+ inline void Move(Register dst, Smi* smi) { li(dst, Operand(smi)); }
+
+ inline void Move(Register dst, Register src) {
if (!dst.is(src)) {
- mov_s(dst, src);
+ mov(dst, src);
}
}
@@ -337,26 +715,228 @@ class MacroAssembler: public Assembler {
mthc1(src_high, dst);
}
+ inline void Move_d(FPURegister dst, FPURegister src) {
+ if (!dst.is(src)) {
+ mov_d(dst, src);
+ }
+ }
+
+ inline void Move_s(FPURegister dst, FPURegister src) {
+ if (!dst.is(src)) {
+ mov_s(dst, src);
+ }
+ }
+
void Move(FPURegister dst, float imm);
void Move(FPURegister dst, double imm);
- // Conditional move.
- void Movz(Register rd, Register rs, Register rt);
- void Movn(Register rd, Register rs, Register rt);
- void Movt(Register rd, Register rs, uint16_t cc = 0);
- void Movf(Register rd, Register rs, uint16_t cc = 0);
+ inline void MulBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Register scratch = at) {
+ MulBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
+ }
- void Clz(Register rd, Register rs);
+ inline void MulBranchNoOvf(Register dst, Register left, const Operand& right,
+ Label* no_overflow_label, Register scratch = at) {
+ MulBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
+ }
- // Jump unconditionally to given label.
- // We NEED a nop in the branch delay slot, as it used by v8, for example in
- // CodeGenerator::ProcessDeferred().
- // Currently the branch delay slot is filled by the MacroAssembler.
- // Use rather b(Label) for code generation.
- void jmp(Label* L) {
- Branch(L);
+ void MulBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
+
+ void MulBranchOvf(Register dst, Register left, Register right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
+
+ inline void DaddBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Register scratch = at) {
+ DaddBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
+ }
+
+ inline void DaddBranchNoOvf(Register dst, Register left, const Operand& right,
+ Label* no_overflow_label, Register scratch = at) {
+ DaddBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
+ }
+
+ void DaddBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
+
+ void DaddBranchOvf(Register dst, Register left, Register right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
+
+ inline void DsubBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Register scratch = at) {
+ DsubBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
+ }
+
+ inline void DsubBranchNoOvf(Register dst, Register left, const Operand& right,
+ Label* no_overflow_label, Register scratch = at) {
+ DsubBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
+ }
+
+ void DsubBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
+
+ void DsubBranchOvf(Register dst, Register left, Register right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
+
+// Number of instructions needed for calculation of switch table entry address
+#ifdef _MIPS_ARCH_MIPS64R6
+ static const int kSwitchTablePrologueSize = 6;
+#else
+ static const int kSwitchTablePrologueSize = 11;
+#endif
+
+ // GetLabelFunction must be lambda '[](size_t index) -> Label*' or a
+ // functor/function with 'Label *func(size_t index)' declaration.
+ template <typename Func>
+ void GenerateSwitchTable(Register index, size_t case_count,
+ Func GetLabelFunction);
+
+ // Load an object from the root table.
+ void LoadRoot(Register destination, Heap::RootListIndex index);
+ void LoadRoot(Register destination, Heap::RootListIndex index, Condition cond,
+ Register src1, const Operand& src2);
+
+ // If the value is a NaN, canonicalize the value else, do nothing.
+ void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
+
+ // ---------------------------------------------------------------------------
+ // FPU macros. These do not handle special cases like NaN or +- inf.
+
+ // Convert unsigned word to double.
+ void Cvt_d_uw(FPURegister fd, FPURegister fs);
+ void Cvt_d_uw(FPURegister fd, Register rs);
+
+ // Convert unsigned long to double.
+ void Cvt_d_ul(FPURegister fd, FPURegister fs);
+ void Cvt_d_ul(FPURegister fd, Register rs);
+
+ // Convert unsigned word to float.
+ void Cvt_s_uw(FPURegister fd, FPURegister fs);
+ void Cvt_s_uw(FPURegister fd, Register rs);
+
+ // Convert unsigned long to float.
+ void Cvt_s_ul(FPURegister fd, FPURegister fs);
+ void Cvt_s_ul(FPURegister fd, Register rs);
+
+ // Convert double to unsigned word.
+ void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
+ void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
+
+ // Convert double to unsigned long.
+ void Trunc_ul_d(FPURegister fd, FPURegister fs, FPURegister scratch,
+ Register result = no_reg);
+ void Trunc_ul_d(FPURegister fd, Register rs, FPURegister scratch,
+ Register result = no_reg);
+
+ // Convert single to unsigned long.
+ void Trunc_ul_s(FPURegister fd, FPURegister fs, FPURegister scratch,
+ Register result = no_reg);
+ void Trunc_ul_s(FPURegister fd, Register rs, FPURegister scratch,
+ Register result = no_reg);
+
+ // Jump the register contains a smi.
+ void JumpIfSmi(Register value, Label* smi_label, Register scratch = at,
+ BranchDelaySlot bd = PROTECT);
+
+ // Push a standard frame, consisting of ra, fp, context and JS function.
+ void PushStandardFrame(Register function_reg);
+
+ // Get the actual activation frame alignment for target environment.
+ static int ActivationFrameAlignment();
+
+ // Load Scaled Address instructions. Parameter sa (shift argument) must be
+ // between [1, 31] (inclusive). On pre-r6 architectures the scratch register
+ // may be clobbered.
+ void Lsa(Register rd, Register rs, Register rt, uint8_t sa,
+ Register scratch = at);
+ void Dlsa(Register rd, Register rs, Register rt, uint8_t sa,
+ Register scratch = at);
+
+ protected:
+ inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
+ inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
+
+ private:
+ bool has_frame_ = false;
+ Isolate* const isolate_;
+ // This handle will be patched with the code object on installation.
+ Handle<HeapObject> code_object_;
+ bool has_double_zero_reg_set_;
+
+ void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond,
+ MSARegister wt, BranchDelaySlot bd = PROTECT);
+
+ void CallCFunctionHelper(Register function, int num_reg_arguments,
+ int num_double_arguments);
+
+ // Common implementation of BranchF functions for the different formats.
+ void BranchFCommon(SecondaryField sizeField, Label* target, Label* nan,
+ Condition cc, FPURegister cmp1, FPURegister cmp2,
+ BranchDelaySlot bd = PROTECT);
+
+ void BranchShortF(SecondaryField sizeField, Label* target, Condition cc,
+ FPURegister cmp1, FPURegister cmp2,
+ BranchDelaySlot bd = PROTECT);
+
+ void BranchShortHelperR6(int32_t offset, Label* L);
+ void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);
+ bool BranchShortHelperR6(int32_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt);
+ bool BranchShortHelper(int16_t offset, Label* L, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot);
+ bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot);
+
+ void BranchAndLinkShortHelperR6(int32_t offset, Label* L);
+ void BranchAndLinkShortHelper(int16_t offset, Label* L,
+ BranchDelaySlot bdslot);
+ void BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot = PROTECT);
+ void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
+ bool BranchAndLinkShortHelperR6(int32_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt);
+ bool BranchAndLinkShortHelper(int16_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt,
+ BranchDelaySlot bdslot);
+ bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt,
+ BranchDelaySlot bdslot);
+ void BranchLong(Label* L, BranchDelaySlot bdslot);
+ void BranchAndLinkLong(Label* L, BranchDelaySlot bdslot);
+
+ // Push a fixed frame, consisting of ra, fp.
+ void PushCommonFrame(Register marker_reg = no_reg);
+};
+
+// MacroAssembler implements a collection of frequently used macros.
+class MacroAssembler : public TurboAssembler {
+ public:
+ MacroAssembler(Isolate* isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object);
+
+ bool IsNear(Label* L, Condition cond, int rs_reg);
+
+ // Emit code that loads |parameter_index|'th parameter from the stack to
+ // the register according to the CallInterfaceDescriptor definition.
+ // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
+ // below the caller's sp.
+ template <class Descriptor>
+ void LoadParameterFromStack(
+ Register reg, typename Descriptor::ParameterIndices parameter_index,
+ int sp_to_ra_offset_in_words = 0) {
+ DCHECK(Descriptor::kPassLastArgsOnStack);
+ UNIMPLEMENTED();
}
+ // Swap two registers. If the scratch register is omitted then a slightly
+ // less efficient form using xor instead of mov is emitted.
+ void Swap(Register reg1, Register reg2, Register scratch = no_reg);
+
void Load(Register dst, const MemOperand& src, Representation r);
void Store(Register src, const MemOperand& dst, Representation r);
@@ -378,13 +958,6 @@ class MacroAssembler: public Assembler {
Branch(if_not_equal, ne, with, Operand(at));
}
- // Load an object from the root table.
- void LoadRoot(Register destination,
- Heap::RootListIndex index);
- void LoadRoot(Register destination,
- Heap::RootListIndex index,
- Condition cond, Register src1, const Operand& src2);
-
// Store an object to the root table.
void StoreRoot(Register source,
Heap::RootListIndex index);
@@ -414,12 +987,6 @@ class MacroAssembler: public Assembler {
SaveFPRegsMode save_fp,
RememberedSetFinalAction and_then);
- void CheckPageFlag(Register object,
- Register scratch,
- int mask,
- Condition cc,
- Label* condition_met);
-
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but it will be clobbered.
void JumpIfNotInNewSpace(Register object,
@@ -511,25 +1078,19 @@ class MacroAssembler: public Assembler {
// has been written. |value| is the object being stored. The value and
// address registers are clobbered by the operation.
void RecordWrite(
- Register object,
- Register address,
- Register value,
- RAStatus ra_status,
+ Register object, Register address, Register value, RAStatus ra_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK,
PointersToHereCheck pointers_to_here_check_for_value =
kPointersToHereMaybeInteresting);
-
// ---------------------------------------------------------------------------
// Inline caching support.
void GetNumberHash(Register reg0, Register scratch);
- inline void MarkCode(NopMarkerTypes type) {
- nop(type);
- }
+ inline void MarkCode(NopMarkerTypes type) { nop(type); }
// Check if the given instruction is a 'type' marker.
// i.e. check if it is a sll zero_reg, zero_reg, <type> (referenced as
@@ -540,7 +1101,6 @@ class MacroAssembler: public Assembler {
return IsNop(instr, type);
}
-
static inline int GetCodeMarker(Instr instr) {
uint32_t opcode = ((instr & kOpcodeMask));
uint32_t rt = ((instr & kRtFieldMask) >> kRtShift);
@@ -549,9 +1109,9 @@ class MacroAssembler: public Assembler {
// Return <n> if we have a sll zero_reg, zero_reg, n
// else return -1.
- bool sllzz = (opcode == SLL &&
- rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
- rs == static_cast<uint32_t>(ToNumber(zero_reg)));
+ bool sllzz =
+ (opcode == SLL && rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
+ rs == static_cast<uint32_t>(ToNumber(zero_reg)));
int type =
(sllzz && FIRST_IC_MARKER <= sa && sa < LAST_CODE_MARKER) ? sa : -1;
DCHECK((type == -1) ||
@@ -559,8 +1119,6 @@ class MacroAssembler: public Assembler {
return type;
}
-
-
// ---------------------------------------------------------------------------
// Allocation support.
@@ -571,39 +1129,21 @@ class MacroAssembler: public Assembler {
// tag_allocated_object is true the result is tagged as as a heap object.
// All registers are clobbered also when control continues at the gc_required
// label.
- void Allocate(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
+ void Allocate(int object_size, Register result, Register scratch1,
+ Register scratch2, Label* gc_required, AllocationFlags flags);
void Allocate(Register object_size, Register result, Register result_end,
Register scratch, Label* gc_required, AllocationFlags flags);
- // FastAllocate is right now only used for folded allocations. It just
- // increments the top pointer without checking against limit. This can only
- // be done if it was proved earlier that the allocation will succeed.
- void FastAllocate(int object_size, Register result, Register scratch1,
- Register scratch2, AllocationFlags flags);
-
- void FastAllocate(Register object_size, Register result, Register result_new,
- Register scratch, AllocationFlags flags);
-
// Allocates a heap number or jumps to the gc_required label if the young
// space is full and a scavenge is needed. All registers are clobbered also
// when control continues at the gc_required label.
- void AllocateHeapNumber(Register result,
- Register scratch1,
- Register scratch2,
- Register heap_number_map,
- Label* gc_required,
+ void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
+ Register heap_number_map, Label* gc_required,
MutableMode mode = IMMUTABLE);
- void AllocateHeapNumberWithValue(Register result,
- FPURegister value,
- Register scratch1,
- Register scratch2,
+ void AllocateHeapNumberWithValue(Register result, FPURegister value,
+ Register scratch1, Register scratch2,
Label* gc_required);
// Allocate and initialize a JSValue wrapper with the specified {constructor}
@@ -612,243 +1152,25 @@ class MacroAssembler: public Assembler {
Register scratch1, Register scratch2,
Label* gc_required);
- // ---------------------------------------------------------------------------
- // Instruction macros.
-
-#define DEFINE_INSTRUCTION(instr) \
- void instr(Register rd, Register rs, const Operand& rt); \
- void instr(Register rd, Register rs, Register rt) { \
- instr(rd, rs, Operand(rt)); \
- } \
- void instr(Register rs, Register rt, int32_t j) { \
- instr(rs, rt, Operand(j)); \
- }
-
-#define DEFINE_INSTRUCTION2(instr) \
- void instr(Register rs, const Operand& rt); \
- void instr(Register rs, Register rt) { \
- instr(rs, Operand(rt)); \
- } \
- void instr(Register rs, int32_t j) { \
- instr(rs, Operand(j)); \
- }
-
- DEFINE_INSTRUCTION(Addu);
- DEFINE_INSTRUCTION(Daddu);
- DEFINE_INSTRUCTION(Div);
- DEFINE_INSTRUCTION(Divu);
- DEFINE_INSTRUCTION(Ddivu);
- DEFINE_INSTRUCTION(Mod);
- DEFINE_INSTRUCTION(Modu);
- DEFINE_INSTRUCTION(Ddiv);
- DEFINE_INSTRUCTION(Subu);
- DEFINE_INSTRUCTION(Dsubu);
- DEFINE_INSTRUCTION(Dmod);
- DEFINE_INSTRUCTION(Dmodu);
- DEFINE_INSTRUCTION(Mul);
- DEFINE_INSTRUCTION(Mulh);
- DEFINE_INSTRUCTION(Mulhu);
- DEFINE_INSTRUCTION(Dmul);
- DEFINE_INSTRUCTION(Dmulh);
- DEFINE_INSTRUCTION2(Mult);
- DEFINE_INSTRUCTION2(Dmult);
- DEFINE_INSTRUCTION2(Multu);
- DEFINE_INSTRUCTION2(Dmultu);
- DEFINE_INSTRUCTION2(Div);
- DEFINE_INSTRUCTION2(Ddiv);
- DEFINE_INSTRUCTION2(Divu);
- DEFINE_INSTRUCTION2(Ddivu);
-
- DEFINE_INSTRUCTION(And);
- DEFINE_INSTRUCTION(Or);
- DEFINE_INSTRUCTION(Xor);
- DEFINE_INSTRUCTION(Nor);
- DEFINE_INSTRUCTION2(Neg);
-
- DEFINE_INSTRUCTION(Slt);
- DEFINE_INSTRUCTION(Sltu);
-
- // MIPS32 R2 instruction macro.
- DEFINE_INSTRUCTION(Ror);
- DEFINE_INSTRUCTION(Dror);
-
-#undef DEFINE_INSTRUCTION
-#undef DEFINE_INSTRUCTION2
-#undef DEFINE_INSTRUCTION3
-
- // Load Scaled Address instructions. Parameter sa (shift argument) must be
- // between [1, 31] (inclusive). On pre-r6 architectures the scratch register
- // may be clobbered.
- void Lsa(Register rd, Register rs, Register rt, uint8_t sa,
- Register scratch = at);
- void Dlsa(Register rd, Register rs, Register rt, uint8_t sa,
- Register scratch = at);
-
void Pref(int32_t hint, const MemOperand& rs);
-
// ---------------------------------------------------------------------------
// Pseudo-instructions.
- // Change endianness
- void ByteSwapSigned(Register dest, Register src, int operand_size);
- void ByteSwapUnsigned(Register dest, Register src, int operand_size);
-
- void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
-
- void Ulh(Register rd, const MemOperand& rs);
- void Ulhu(Register rd, const MemOperand& rs);
- void Ush(Register rd, const MemOperand& rs, Register scratch);
-
- void Ulw(Register rd, const MemOperand& rs);
- void Ulwu(Register rd, const MemOperand& rs);
- void Usw(Register rd, const MemOperand& rs);
-
- void Uld(Register rd, const MemOperand& rs);
- void Usd(Register rd, const MemOperand& rs);
-
- void Ulwc1(FPURegister fd, const MemOperand& rs, Register scratch);
- void Uswc1(FPURegister fd, const MemOperand& rs, Register scratch);
-
- void Uldc1(FPURegister fd, const MemOperand& rs, Register scratch);
- void Usdc1(FPURegister fd, const MemOperand& rs, Register scratch);
-
void LoadWordPair(Register rd, const MemOperand& rs, Register scratch = at);
void StoreWordPair(Register rd, const MemOperand& rs, Register scratch = at);
- void Lb(Register rd, const MemOperand& rs);
- void Lbu(Register rd, const MemOperand& rs);
- void Sb(Register rd, const MemOperand& rs);
-
- void Lh(Register rd, const MemOperand& rs);
- void Lhu(Register rd, const MemOperand& rs);
- void Sh(Register rd, const MemOperand& rs);
-
- void Lw(Register rd, const MemOperand& rs);
- void Lwu(Register rd, const MemOperand& rs);
- void Sw(Register rd, const MemOperand& rs);
-
- void Ld(Register rd, const MemOperand& rs);
- void Sd(Register rd, const MemOperand& rs);
-
- void Lwc1(FPURegister fd, const MemOperand& src);
- void Swc1(FPURegister fs, const MemOperand& dst);
-
- void Ldc1(FPURegister fd, const MemOperand& src);
- void Sdc1(FPURegister fs, const MemOperand& dst);
-
- // Load int32 in the rd register.
- void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
- inline bool LiLower32BitHelper(Register rd, Operand j);
- inline void li(Register rd, int64_t j, LiFlags mode = OPTIMIZE_SIZE) {
- li(rd, Operand(j), mode);
- }
- void li(Register dst, Handle<Object> value, LiFlags mode = OPTIMIZE_SIZE);
-
- // Push multiple registers on the stack.
- // Registers are saved in numerical order, with higher numbered registers
- // saved in higher memory addresses.
- void MultiPush(RegList regs);
void MultiPushReversed(RegList regs);
-
- void MultiPushFPU(RegList regs);
void MultiPushReversedFPU(RegList regs);
- void push(Register src) {
- Daddu(sp, sp, Operand(-kPointerSize));
- Sd(src, MemOperand(sp, 0));
- }
- void Push(Register src) { push(src); }
-
- // Push a handle.
- void Push(Handle<Object> handle);
- void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
-
- // Push two registers. Pushes leftmost register first (to highest address).
- void Push(Register src1, Register src2) {
- Dsubu(sp, sp, Operand(2 * kPointerSize));
- Sd(src1, MemOperand(sp, 1 * kPointerSize));
- Sd(src2, MemOperand(sp, 0 * kPointerSize));
- }
-
- // Push three registers. Pushes leftmost register first (to highest address).
- void Push(Register src1, Register src2, Register src3) {
- Dsubu(sp, sp, Operand(3 * kPointerSize));
- Sd(src1, MemOperand(sp, 2 * kPointerSize));
- Sd(src2, MemOperand(sp, 1 * kPointerSize));
- Sd(src3, MemOperand(sp, 0 * kPointerSize));
- }
-
- // Push four registers. Pushes leftmost register first (to highest address).
- void Push(Register src1, Register src2, Register src3, Register src4) {
- Dsubu(sp, sp, Operand(4 * kPointerSize));
- Sd(src1, MemOperand(sp, 3 * kPointerSize));
- Sd(src2, MemOperand(sp, 2 * kPointerSize));
- Sd(src3, MemOperand(sp, 1 * kPointerSize));
- Sd(src4, MemOperand(sp, 0 * kPointerSize));
- }
-
- // Push five registers. Pushes leftmost register first (to highest address).
- void Push(Register src1, Register src2, Register src3, Register src4,
- Register src5) {
- Dsubu(sp, sp, Operand(5 * kPointerSize));
- Sd(src1, MemOperand(sp, 4 * kPointerSize));
- Sd(src2, MemOperand(sp, 3 * kPointerSize));
- Sd(src3, MemOperand(sp, 2 * kPointerSize));
- Sd(src4, MemOperand(sp, 1 * kPointerSize));
- Sd(src5, MemOperand(sp, 0 * kPointerSize));
- }
-
- void Push(Register src, Condition cond, Register tst1, Register tst2) {
- // Since we don't have conditional execution we use a Branch.
- Branch(3, cond, tst1, Operand(tst2));
- Dsubu(sp, sp, Operand(kPointerSize));
- Sd(src, MemOperand(sp, 0));
- }
+ void PushObject(Handle<Object> handle);
void PushRegisterAsTwoSmis(Register src, Register scratch = at);
void PopRegisterAsTwoSmis(Register dst, Register scratch = at);
- // Pops multiple values from the stack and load them in the
- // registers specified in regs. Pop order is the opposite as in MultiPush.
- void MultiPop(RegList regs);
void MultiPopReversed(RegList regs);
-
- void MultiPopFPU(RegList regs);
void MultiPopReversedFPU(RegList regs);
- void pop(Register dst) {
- Ld(dst, MemOperand(sp, 0));
- Daddu(sp, sp, Operand(kPointerSize));
- }
- void Pop(Register dst) { pop(dst); }
-
- // Pop two registers. Pops rightmost register first (from lower address).
- void Pop(Register src1, Register src2) {
- DCHECK(!src1.is(src2));
- Ld(src2, MemOperand(sp, 0 * kPointerSize));
- Ld(src1, MemOperand(sp, 1 * kPointerSize));
- Daddu(sp, sp, 2 * kPointerSize);
- }
-
- // Pop three registers. Pops rightmost register first (from lower address).
- void Pop(Register src1, Register src2, Register src3) {
- Ld(src3, MemOperand(sp, 0 * kPointerSize));
- Ld(src2, MemOperand(sp, 1 * kPointerSize));
- Ld(src1, MemOperand(sp, 2 * kPointerSize));
- Daddu(sp, sp, 3 * kPointerSize);
- }
-
- void Pop(uint32_t count = 1) {
- Daddu(sp, sp, Operand(count * kPointerSize));
- }
-
- // Push a fixed frame, consisting of ra, fp.
- void PushCommonFrame(Register marker_reg = no_reg);
-
- // Push a standard frame, consisting of ra, fp, context and JS function.
- void PushStandardFrame(Register function_reg);
-
void PopCommonFrame(Register marker_reg = no_reg);
// Push and pop the registers that can hold pointers, as defined by the
@@ -862,37 +1184,6 @@ class MacroAssembler: public Assembler {
// into register dst.
void LoadFromSafepointRegisterSlot(Register dst, Register src);
- // MIPS64 R2 instruction macro.
- void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
- void Dext(Register rt, Register rs, uint16_t pos, uint16_t size);
- void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
- void Dins(Register rt, Register rs, uint16_t pos, uint16_t size);
- void Neg_s(FPURegister fd, FPURegister fs);
- void Neg_d(FPURegister fd, FPURegister fs);
-
- // MIPS64 R6 instruction macros.
- void Bovc(Register rt, Register rs, Label* L);
- void Bnvc(Register rt, Register rs, Label* L);
-
- // ---------------------------------------------------------------------------
- // FPU macros. These do not handle special cases like NaN or +- inf.
-
- // Convert unsigned word to double.
- void Cvt_d_uw(FPURegister fd, FPURegister fs);
- void Cvt_d_uw(FPURegister fd, Register rs);
-
- // Convert unsigned long to double.
- void Cvt_d_ul(FPURegister fd, FPURegister fs);
- void Cvt_d_ul(FPURegister fd, Register rs);
-
- // Convert unsigned word to float.
- void Cvt_s_uw(FPURegister fd, FPURegister fs);
- void Cvt_s_uw(FPURegister fd, Register rs);
-
- // Convert unsigned long to float.
- void Cvt_s_ul(FPURegister fd, FPURegister fs);
- void Cvt_s_ul(FPURegister fd, Register rs);
-
// Convert double to unsigned long.
void Trunc_l_ud(FPURegister fd, FPURegister fs, FPURegister scratch);
@@ -901,26 +1192,6 @@ class MacroAssembler: public Assembler {
void Floor_l_d(FPURegister fd, FPURegister fs);
void Ceil_l_d(FPURegister fd, FPURegister fs);
- // Convert double to unsigned word.
- void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
- void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
-
- // Convert single to unsigned word.
- void Trunc_uw_s(FPURegister fd, FPURegister fs, FPURegister scratch);
- void Trunc_uw_s(FPURegister fd, Register rs, FPURegister scratch);
-
- // Convert double to unsigned long.
- void Trunc_ul_d(FPURegister fd, FPURegister fs, FPURegister scratch,
- Register result = no_reg);
- void Trunc_ul_d(FPURegister fd, Register rs, FPURegister scratch,
- Register result = no_reg);
-
- // Convert single to unsigned long.
- void Trunc_ul_s(FPURegister fd, FPURegister fs, FPURegister scratch,
- Register result = no_reg);
- void Trunc_ul_s(FPURegister fd, Register rs, FPURegister scratch,
- Register result = no_reg);
-
void Trunc_w_d(FPURegister fd, FPURegister fs);
void Round_w_d(FPURegister fd, FPURegister fs);
void Floor_w_d(FPURegister fd, FPURegister fs);
@@ -941,69 +1212,19 @@ class MacroAssembler: public Assembler {
void Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
FPURegister scratch);
- // Wrapper functions for the different cmp/branch types.
- inline void BranchF32(Label* target, Label* nan, Condition cc,
- FPURegister cmp1, FPURegister cmp2,
- BranchDelaySlot bd = PROTECT) {
- BranchFCommon(S, target, nan, cc, cmp1, cmp2, bd);
- }
-
- inline void BranchF64(Label* target, Label* nan, Condition cc,
- FPURegister cmp1, FPURegister cmp2,
- BranchDelaySlot bd = PROTECT) {
- BranchFCommon(D, target, nan, cc, cmp1, cmp2, bd);
- }
-
- // Alternate (inline) version for better readability with USE_DELAY_SLOT.
- inline void BranchF64(BranchDelaySlot bd, Label* target, Label* nan,
- Condition cc, FPURegister cmp1, FPURegister cmp2) {
- BranchF64(target, nan, cc, cmp1, cmp2, bd);
- }
-
- inline void BranchF32(BranchDelaySlot bd, Label* target, Label* nan,
- Condition cc, FPURegister cmp1, FPURegister cmp2) {
- BranchF32(target, nan, cc, cmp1, cmp2, bd);
- }
-
- // Alias functions for backward compatibility.
- inline void BranchF(Label* target, Label* nan, Condition cc, FPURegister cmp1,
- FPURegister cmp2, BranchDelaySlot bd = PROTECT) {
- BranchF64(target, nan, cc, cmp1, cmp2, bd);
- }
-
- inline void BranchF(BranchDelaySlot bd, Label* target, Label* nan,
- Condition cc, FPURegister cmp1, FPURegister cmp2) {
- BranchF64(bd, target, nan, cc, cmp1, cmp2);
- }
+ void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond,
+ MSARegister wt, BranchDelaySlot bd = PROTECT);
// Truncates a double using a specific rounding mode, and writes the value
// to the result register.
// The except_flag will contain any exceptions caused by the instruction.
// If check_inexact is kDontCheckForInexactConversion, then the inexact
// exception is masked.
- void EmitFPUTruncate(FPURoundingMode rounding_mode,
- Register result,
- DoubleRegister double_input,
- Register scratch,
- DoubleRegister double_scratch,
- Register except_flag,
- CheckForInexactConversion check_inexact
- = kDontCheckForInexactConversion);
-
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
- // succeeds, otherwise falls through if result is saturated. On return
- // 'result' either holds answer, or is clobbered on fall through.
- //
- // Only public for the test code in test-code-stubs-arm.cc.
- void TryInlineTruncateDoubleToI(Register result,
- DoubleRegister input,
- Label* done);
-
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
- // Exits with 'result' holding the answer.
- void TruncateDoubleToI(Register result, DoubleRegister double_input);
+ void EmitFPUTruncate(
+ FPURoundingMode rounding_mode, Register result,
+ DoubleRegister double_input, Register scratch,
+ DoubleRegister double_scratch, Register except_flag,
+ CheckForInexactConversion check_inexact = kDontCheckForInexactConversion);
// Performs a truncating conversion of a heap number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
@@ -1068,9 +1289,6 @@ class MacroAssembler: public Assembler {
bool restore_context, bool do_return = NO_EMIT_RETURN,
bool argument_count_is_length = false);
- // Get the actual activation frame alignment for target environment.
- static int ActivationFrameAlignment();
-
// Make sure the stack is aligned. Only emits code in debug mode.
void AssertStackIsAligned();
@@ -1094,24 +1312,9 @@ class MacroAssembler: public Assembler {
Register map,
Register scratch);
- void InitializeRootRegister() {
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(isolate());
- li(kRootRegister, Operand(roots_array_start));
- }
-
// -------------------------------------------------------------------------
// JavaScript invokes.
- // Removes current frame and its arguments from the stack preserving
- // the arguments and a return address pushed to the stack for the next call.
- // Both |callee_args_count| and |caller_args_count_reg| do not include
- // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
- // is trashed.
- void PrepareForTailCall(const ParameterCount& callee_args_count,
- Register caller_args_count_reg, Register scratch0,
- Register scratch1);
-
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
const ParameterCount& expected,
@@ -1219,10 +1422,6 @@ class MacroAssembler: public Assembler {
Label* fail,
SmiCheckType smi_check_type);
- // If the value is a NaN, canonicalize the value else, do nothing.
- void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
-
-
// Get value of the weak cell.
void GetWeakValue(Register value, Handle<WeakCell> cell);
@@ -1307,60 +1506,6 @@ class MacroAssembler: public Assembler {
Label* overflow_label, Label* no_overflow_label,
Register scratch = at);
- inline void MulBranchOvf(Register dst, Register left, const Operand& right,
- Label* overflow_label, Register scratch = at) {
- MulBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
- }
-
- inline void MulBranchNoOvf(Register dst, Register left, const Operand& right,
- Label* no_overflow_label, Register scratch = at) {
- MulBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
- }
-
- void MulBranchOvf(Register dst, Register left, const Operand& right,
- Label* overflow_label, Label* no_overflow_label,
- Register scratch = at);
-
- void MulBranchOvf(Register dst, Register left, Register right,
- Label* overflow_label, Label* no_overflow_label,
- Register scratch = at);
-
- inline void DaddBranchOvf(Register dst, Register left, const Operand& right,
- Label* overflow_label, Register scratch = at) {
- DaddBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
- }
-
- inline void DaddBranchNoOvf(Register dst, Register left, const Operand& right,
- Label* no_overflow_label, Register scratch = at) {
- DaddBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
- }
-
- void DaddBranchOvf(Register dst, Register left, const Operand& right,
- Label* overflow_label, Label* no_overflow_label,
- Register scratch = at);
-
- void DaddBranchOvf(Register dst, Register left, Register right,
- Label* overflow_label, Label* no_overflow_label,
- Register scratch = at);
-
- inline void DsubBranchOvf(Register dst, Register left, const Operand& right,
- Label* overflow_label, Register scratch = at) {
- DsubBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
- }
-
- inline void DsubBranchNoOvf(Register dst, Register left, const Operand& right,
- Label* no_overflow_label, Register scratch = at) {
- DsubBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
- }
-
- void DsubBranchOvf(Register dst, Register left, const Operand& right,
- Label* overflow_label, Label* no_overflow_label,
- Register scratch = at);
-
- void DsubBranchOvf(Register dst, Register left, Register right,
- Label* overflow_label, Label* no_overflow_label,
- Register scratch = at);
-
void BranchOnOverflow(Label* label,
Register overflow_check,
BranchDelaySlot bd = PROTECT) {
@@ -1381,46 +1526,14 @@ class MacroAssembler: public Assembler {
Ret(ge, overflow_check, Operand(zero_reg), bd);
}
- // Perform a floating-point min or max operation with the
- // (IEEE-754-compatible) semantics of MIPS32's Release 6 MIN.fmt/MAX.fmt.
- // Some cases, typically NaNs or +/-0.0, are expected to be rare and are
- // handled in out-of-line code. The specific behaviour depends on supported
- // instructions.
- //
- // These functions assume (and assert) that !src1.is(src2). It is permitted
- // for the result to alias either input register.
- void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2,
- Label* out_of_line);
- void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2,
- Label* out_of_line);
- void Float64Max(FPURegister dst, FPURegister src1, FPURegister src2,
- Label* out_of_line);
- void Float64Min(FPURegister dst, FPURegister src1, FPURegister src2,
- Label* out_of_line);
-
- // Generate out-of-line cases for the macros above.
- void Float32MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
- void Float32MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
- void Float64MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
- void Float64MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
-
// -------------------------------------------------------------------------
// Runtime calls.
- // See comments at the beginning of CEntryStub::Generate.
- inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); }
-
- inline void PrepareCEntryFunction(const ExternalReference& ref) {
- li(a1, Operand(ref));
- }
-
#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Call a code stub.
- void CallStub(CodeStub* stub,
- TypeFeedbackId ast_id = TypeFeedbackId::None(),
- COND_ARGS);
+ void CallStub(CodeStub* stub, COND_ARGS);
// Tail call a code stub (jump).
void TailCallStub(CodeStub* stub, COND_ARGS);
@@ -1461,52 +1574,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid);
- int CalculateStackPassedWords(int num_reg_arguments,
- int num_double_arguments);
-
- // Before calling a C-function from generated code, align arguments on stack
- // and add space for the four mips argument slots.
- // After aligning the frame, non-register arguments must be stored on the
- // stack, after the argument-slots using helper: CFunctionArgumentOperand().
- // The argument count assumes all arguments are word sized.
- // Some compilers/platforms require the stack to be aligned when calling
- // C++ code.
- // Needs a scratch register to do some arithmetic. This register will be
- // trashed.
- void PrepareCallCFunction(int num_reg_arguments,
- int num_double_registers,
- Register scratch);
- void PrepareCallCFunction(int num_reg_arguments,
- Register scratch);
-
- // Arguments 1-4 are placed in registers a0 thru a3 respectively.
- // Arguments 5..n are stored to stack using following:
- // Sw(a4, CFunctionArgumentOperand(5));
-
- // Calls a C function and cleans up the space for arguments allocated
- // by PrepareCallCFunction. The called function is not allowed to trigger a
- // garbage collection, since that might move the code and invalidate the
- // return address (unless this is somehow accounted for by the called
- // function).
- void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, int num_arguments);
- void CallCFunction(ExternalReference function,
- int num_reg_arguments,
- int num_double_arguments);
- void CallCFunction(Register function,
- int num_reg_arguments,
- int num_double_arguments);
- void MovFromFloatResult(DoubleRegister dst);
- void MovFromFloatParameter(DoubleRegister dst);
-
- // There are two ways of passing double arguments on MIPS, depending on
- // whether soft or hard floating point ABI is used. These functions
- // abstract parameter passing for the three different ways we call
- // C functions from generated code.
- void MovToFloatParameter(DoubleRegister src);
- void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
- void MovToFloatResult(DoubleRegister src);
-
// Jump to the builtin routine.
void JumpToExternalReference(const ExternalReference& builtin,
BranchDelaySlot bd = PROTECT,
@@ -1518,11 +1585,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
const char* name;
};
- Handle<Object> CodeObject() {
- DCHECK(!code_object_.is_null());
- return code_object_;
- }
-
// Emit code for a truncating division by a constant. The dividend register is
// unchanged and at gets clobbered. Dividend and result must be different.
void TruncatingDiv(Register result, Register dividend, int32_t divisor);
@@ -1537,27 +1599,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2);
-
- // -------------------------------------------------------------------------
- // Debugging.
-
- // Calls Abort(msg) if the condition cc is not satisfied.
- // Use --debug_code to enable.
- void Assert(Condition cc, BailoutReason reason, Register rs, Operand rt);
-
- // Like Assert(), but always enabled.
- void Check(Condition cc, BailoutReason reason, Register rs, Operand rt);
-
- // Print a message to stdout and abort execution.
- void Abort(BailoutReason msg);
-
- // Verify restrictions about code generated in stubs.
- void set_generating_stub(bool value) { generating_stub_ = value; }
- bool generating_stub() { return generating_stub_; }
- void set_has_frame(bool value) { has_frame_ = value; }
- bool has_frame() { return has_frame_; }
- inline bool AllowThisStubCall(CodeStub* stub);
-
// ---------------------------------------------------------------------------
// Number utilities.
@@ -1610,19 +1651,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
}
}
- void SmiUntag(Register dst, Register src) {
- if (SmiValuesAre32Bits()) {
- STATIC_ASSERT(kSmiShift == 32);
- dsra32(dst, src, 0);
- } else {
- sra(dst, src, kSmiTagSize);
- }
- }
-
- void SmiUntag(Register reg) {
- SmiUntag(reg, reg);
- }
-
// Left-shifted from int32 equivalent of Smi.
void SmiScale(Register dst, Register src, int scale) {
if (SmiValuesAre32Bits()) {
@@ -1664,12 +1692,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Source and destination can be the same register.
void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
- // Jump the register contains a smi.
- void JumpIfSmi(Register value,
- Label* smi_label,
- Register scratch = at,
- BranchDelaySlot bd = PROTECT);
-
// Jump if the register contains a non-smi.
void JumpIfNotSmi(Register value,
Label* not_smi_label,
@@ -1685,6 +1707,9 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void AssertNotSmi(Register object);
void AssertSmi(Register object);
+ // Abort execution if argument is not a FixedArray, enabled via --debug-code.
+ void AssertFixedArray(Register object);
+
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
@@ -1692,9 +1717,9 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// enabled via --debug-code.
void AssertBoundFunction(Register object);
- // Abort execution if argument is not a JSGeneratorObject,
+ // Abort execution if argument is not a JSGeneratorObject (or subclass),
// enabled via --debug-code.
- void AssertGeneratorObject(Register object, Register flags);
+ void AssertGeneratorObject(Register object);
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
@@ -1780,18 +1805,10 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void DecodeFieldToSmi(Register reg) {
DecodeField<Field>(reg, reg);
}
- // Generates function and stub prologue code.
- void StubPrologue(StackFrame::Type type);
- void Prologue(bool code_pre_aging);
// Load the type feedback vector from a JavaScript frame.
void EmitLoadFeedbackVector(Register vector);
- // Activation support.
- void EnterFrame(StackFrame::Type type);
- void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
- void LeaveFrame(StackFrame::Type type);
-
void EnterBuiltinFrame(Register context, Register target, Register argc);
void LeaveBuiltinFrame(Register context, Register target, Register argc);
@@ -1808,50 +1825,7 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
Register scratch_reg,
Label* no_memento_found);
- bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; }
-
private:
- void CallCFunctionHelper(Register function,
- int num_reg_arguments,
- int num_double_arguments);
-
- inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
- inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
- void BranchShortHelperR6(int32_t offset, Label* L);
- void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);
- bool BranchShortHelperR6(int32_t offset, Label* L, Condition cond,
- Register rs, const Operand& rt);
- bool BranchShortHelper(int16_t offset, Label* L, Condition cond, Register rs,
- const Operand& rt, BranchDelaySlot bdslot);
- bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
- const Operand& rt, BranchDelaySlot bdslot);
-
- void BranchAndLinkShortHelperR6(int32_t offset, Label* L);
- void BranchAndLinkShortHelper(int16_t offset, Label* L,
- BranchDelaySlot bdslot);
- void BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot = PROTECT);
- void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
- bool BranchAndLinkShortHelperR6(int32_t offset, Label* L, Condition cond,
- Register rs, const Operand& rt);
- bool BranchAndLinkShortHelper(int16_t offset, Label* L, Condition cond,
- Register rs, const Operand& rt,
- BranchDelaySlot bdslot);
- bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
- Register rs, const Operand& rt,
- BranchDelaySlot bdslot);
- void BranchLong(Label* L, BranchDelaySlot bdslot);
- void BranchAndLinkLong(Label* L, BranchDelaySlot bdslot);
-
- // Common implementation of BranchF functions for the different formats.
- void BranchFCommon(SecondaryField sizeField, Label* target, Label* nan,
- Condition cc, FPURegister cmp1, FPURegister cmp2,
- BranchDelaySlot bd = PROTECT);
-
- void BranchShortF(SecondaryField sizeField, Label* target, Condition cc,
- FPURegister cmp1, FPURegister cmp2,
- BranchDelaySlot bd = PROTECT);
-
-
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
@@ -1877,17 +1851,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
MemOperand SafepointRegisterSlot(Register reg);
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
- // Helpers.
- void LoadRegPlusOffsetToAt(const MemOperand& src);
- int32_t LoadRegPlusUpperOffsetPartToAt(const MemOperand& src);
-
- bool generating_stub_;
- bool has_frame_;
- bool has_double_zero_reg_set_;
- Isolate* isolate_;
- // This handle will be patched with the code object on installation.
- Handle<Object> code_object_;
-
// Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
friend class StandardFrame;
@@ -1931,7 +1894,7 @@ class CodePatcher {
};
template <typename Func>
-void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
+void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
Func GetLabelFunction) {
// Ensure that dd-ed labels following this instruction use 8 bytes aligned
// addresses.
diff --git a/deps/v8/src/mips64/simulator-mips64.cc b/deps/v8/src/mips64/simulator-mips64.cc
index 320b97296a..9d10ca6a69 100644
--- a/deps/v8/src/mips64/simulator-mips64.cc
+++ b/deps/v8/src/mips64/simulator-mips64.cc
@@ -839,7 +839,8 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
registers_[i] = 0;
}
for (int i = 0; i < kNumFPURegisters; i++) {
- FPUregisters_[i] = 0;
+ FPUregisters_[2 * i] = 0;
+ FPUregisters_[2 * i + 1] = 0; // upper part for MSA ASE
}
if (kArchVariant == kMips64r6) {
@@ -996,7 +997,7 @@ void Simulator::set_dw_register(int reg, const int* dbl) {
void Simulator::set_fpu_register(int fpureg, int64_t value) {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- FPUregisters_[fpureg] = value;
+ FPUregisters_[fpureg * 2] = value;
}
@@ -1005,9 +1006,9 @@ void Simulator::set_fpu_register_word(int fpureg, int32_t value) {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
int32_t* pword;
if (kArchEndian == kLittle) {
- pword = reinterpret_cast<int32_t*>(&FPUregisters_[fpureg]);
+ pword = reinterpret_cast<int32_t*>(&FPUregisters_[fpureg * 2]);
} else {
- pword = reinterpret_cast<int32_t*>(&FPUregisters_[fpureg]) + 1;
+ pword = reinterpret_cast<int32_t*>(&FPUregisters_[fpureg * 2]) + 1;
}
*pword = value;
}
@@ -1018,9 +1019,9 @@ void Simulator::set_fpu_register_hi_word(int fpureg, int32_t value) {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
int32_t* phiword;
if (kArchEndian == kLittle) {
- phiword = (reinterpret_cast<int32_t*>(&FPUregisters_[fpureg])) + 1;
+ phiword = (reinterpret_cast<int32_t*>(&FPUregisters_[fpureg * 2])) + 1;
} else {
- phiword = reinterpret_cast<int32_t*>(&FPUregisters_[fpureg]);
+ phiword = reinterpret_cast<int32_t*>(&FPUregisters_[fpureg * 2]);
}
*phiword = value;
}
@@ -1028,13 +1029,13 @@ void Simulator::set_fpu_register_hi_word(int fpureg, int32_t value) {
void Simulator::set_fpu_register_float(int fpureg, float value) {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- *bit_cast<float*>(&FPUregisters_[fpureg]) = value;
+ *bit_cast<float*>(&FPUregisters_[fpureg * 2]) = value;
}
void Simulator::set_fpu_register_double(int fpureg, double value) {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- *bit_cast<double*>(&FPUregisters_[fpureg]) = value;
+ *bit_cast<double*>(&FPUregisters_[fpureg * 2]) = value;
}
@@ -1065,39 +1066,50 @@ double Simulator::get_double_from_register_pair(int reg) {
int64_t Simulator::get_fpu_register(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- return FPUregisters_[fpureg];
+ return FPUregisters_[fpureg * 2];
}
int32_t Simulator::get_fpu_register_word(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- return static_cast<int32_t>(FPUregisters_[fpureg] & 0xffffffff);
+ return static_cast<int32_t>(FPUregisters_[fpureg * 2] & 0xffffffff);
}
int32_t Simulator::get_fpu_register_signed_word(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- return static_cast<int32_t>(FPUregisters_[fpureg] & 0xffffffff);
+ return static_cast<int32_t>(FPUregisters_[fpureg * 2] & 0xffffffff);
}
int32_t Simulator::get_fpu_register_hi_word(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- return static_cast<int32_t>((FPUregisters_[fpureg] >> 32) & 0xffffffff);
+ return static_cast<int32_t>((FPUregisters_[fpureg * 2] >> 32) & 0xffffffff);
}
float Simulator::get_fpu_register_float(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- return *bit_cast<float*>(const_cast<int64_t*>(&FPUregisters_[fpureg]));
+ return *bit_cast<float*>(const_cast<int64_t*>(&FPUregisters_[fpureg * 2]));
}
double Simulator::get_fpu_register_double(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- return *bit_cast<double*>(&FPUregisters_[fpureg]);
+ return *bit_cast<double*>(&FPUregisters_[fpureg * 2]);
}
+template <typename T>
+void Simulator::get_msa_register(int wreg, T* value) {
+ DCHECK((wreg >= 0) && (wreg < kNumMSARegisters));
+ memcpy(value, FPUregisters_ + wreg * 2, kSimd128Size);
+}
+
+template <typename T>
+void Simulator::set_msa_register(int wreg, const T* value) {
+ DCHECK((wreg >= 0) && (wreg < kNumMSARegisters));
+ memcpy(FPUregisters_ + wreg * 2, value, kSimd128Size);
+}
// Runtime FP routines take up to two double arguments and zero
// or one integer arguments. All are constructed here,
@@ -1672,6 +1684,96 @@ void Simulator::TraceRegWr(int64_t value, TraceType t) {
}
}
+template <typename T>
+void Simulator::TraceMSARegWr(T* value, TraceType t) {
+ if (::v8::internal::FLAG_trace_sim) {
+ union {
+ uint8_t b[16];
+ uint16_t h[8];
+ uint32_t w[4];
+ uint64_t d[2];
+ float f[4];
+ double df[2];
+ } v;
+ memcpy(v.b, value, kSimd128Size);
+ switch (t) {
+ case BYTE:
+ SNPrintF(trace_buf_,
+ "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64 ")",
+ v.d[0], v.d[1], icount_);
+ break;
+ case HALF:
+ SNPrintF(trace_buf_,
+ "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64 ")",
+ v.d[0], v.d[1], icount_);
+ break;
+ case WORD:
+ SNPrintF(trace_buf_,
+ "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64
+ ") int32[0..3]:%" PRId32 " %" PRId32 " %" PRId32
+ " %" PRId32,
+ v.d[0], v.d[1], icount_, v.w[0], v.w[1], v.w[2], v.w[3]);
+ break;
+ case DWORD:
+ SNPrintF(trace_buf_,
+ "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64 ")",
+ v.d[0], v.d[1], icount_);
+ break;
+ case FLOAT:
+ SNPrintF(trace_buf_,
+ "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64
+ ") flt[0..3]:%e %e %e %e",
+ v.d[0], v.d[1], icount_, v.f[0], v.f[1], v.f[2], v.f[3]);
+ break;
+ case DOUBLE:
+ SNPrintF(trace_buf_,
+ "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64
+ ") dbl[0..1]:%e %e",
+ v.d[0], v.d[1], icount_, v.df[0], v.df[1]);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+template <typename T>
+void Simulator::TraceMSARegWr(T* value) {
+ if (::v8::internal::FLAG_trace_sim) {
+ union {
+ uint8_t b[kMSALanesByte];
+ uint16_t h[kMSALanesHalf];
+ uint32_t w[kMSALanesWord];
+ uint64_t d[kMSALanesDword];
+ float f[kMSALanesWord];
+ double df[kMSALanesDword];
+ } v;
+ memcpy(v.b, value, kMSALanesByte);
+
+ if (std::is_same<T, int32_t>::value) {
+ SNPrintF(trace_buf_,
+ "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64
+ ") int32[0..3]:%" PRId32 " %" PRId32 " %" PRId32
+ " %" PRId32,
+ v.d[0], v.d[1], icount_, v.w[0], v.w[1], v.w[2], v.w[3]);
+ } else if (std::is_same<T, float>::value) {
+ SNPrintF(trace_buf_,
+ "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64
+ ") flt[0..3]:%e %e %e %e",
+ v.d[0], v.d[1], icount_, v.f[0], v.f[1], v.f[2], v.f[3]);
+ } else if (std::is_same<T, double>::value) {
+ SNPrintF(trace_buf_,
+ "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64
+ ") dbl[0..1]:%e %e",
+ v.d[0], v.d[1], icount_, v.df[0], v.df[1]);
+ } else {
+ SNPrintF(trace_buf_,
+ "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64 ")",
+ v.d[0], v.d[1], icount_);
+ }
+ }
+}
+
// TODO(plind): consider making icount_ printing a flag option.
void Simulator::TraceMemRd(int64_t addr, int64_t value, TraceType t) {
if (::v8::internal::FLAG_trace_sim) {
@@ -4339,6 +4441,750 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
}
}
+int Simulator::DecodeMsaDataFormat() {
+ int df = -1;
+ if (instr_.IsMSABranchInstr()) {
+ switch (instr_.RsFieldRaw()) {
+ case BZ_V:
+ case BNZ_V:
+ df = MSA_VECT;
+ break;
+ case BZ_B:
+ case BNZ_B:
+ df = MSA_BYTE;
+ break;
+ case BZ_H:
+ case BNZ_H:
+ df = MSA_HALF;
+ break;
+ case BZ_W:
+ case BNZ_W:
+ df = MSA_WORD;
+ break;
+ case BZ_D:
+ case BNZ_D:
+ df = MSA_DWORD;
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ int DF[] = {MSA_BYTE, MSA_HALF, MSA_WORD, MSA_DWORD};
+ switch (instr_.MSAMinorOpcodeField()) {
+ case kMsaMinorI5:
+ case kMsaMinorI10:
+ case kMsaMinor3R:
+ df = DF[instr_.Bits(22, 21)];
+ break;
+ case kMsaMinorMI10:
+ df = DF[instr_.Bits(1, 0)];
+ break;
+ case kMsaMinorBIT:
+ df = DF[instr_.MsaBitDf()];
+ break;
+ case kMsaMinorELM:
+ df = DF[instr_.MsaElmDf()];
+ break;
+ case kMsaMinor3RF: {
+ uint32_t opcode = instr_.InstructionBits() & kMsa3RFMask;
+ switch (opcode) {
+ case FEXDO:
+ case FTQ:
+ case MUL_Q:
+ case MADD_Q:
+ case MSUB_Q:
+ case MULR_Q:
+ case MADDR_Q:
+ case MSUBR_Q:
+ df = DF[1 + instr_.Bit(21)];
+ break;
+ default:
+ df = DF[2 + instr_.Bit(21)];
+ break;
+ }
+ } break;
+ case kMsaMinor2R:
+ df = DF[instr_.Bits(17, 16)];
+ break;
+ case kMsaMinor2RF:
+ df = DF[2 + instr_.Bit(16)];
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ return df;
+}
+
+void Simulator::DecodeTypeMsaI8() {
+ DCHECK(kArchVariant == kMips64r6);
+ DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
+ uint32_t opcode = instr_.InstructionBits() & kMsaI8Mask;
+ int8_t i8 = instr_.MsaImm8Value();
+ msa_reg_t ws, wd;
+
+ switch (opcode) {
+ case ANDI_B:
+ get_msa_register(instr_.WsValue(), ws.b);
+ for (int i = 0; i < kMSALanesByte; i++) {
+ wd.b[i] = ws.b[i] & i8;
+ }
+ set_msa_register(instr_.WdValue(), wd.b);
+ TraceMSARegWr(wd.b);
+ break;
+ case ORI_B:
+ get_msa_register(instr_.WsValue(), ws.b);
+ for (int i = 0; i < kMSALanesByte; i++) {
+ wd.b[i] = ws.b[i] | i8;
+ }
+ set_msa_register(instr_.WdValue(), wd.b);
+ TraceMSARegWr(wd.b);
+ break;
+ case NORI_B:
+ get_msa_register(instr_.WsValue(), ws.b);
+ for (int i = 0; i < kMSALanesByte; i++) {
+ wd.b[i] = ~(ws.b[i] | i8);
+ }
+ set_msa_register(instr_.WdValue(), wd.b);
+ TraceMSARegWr(wd.b);
+ break;
+ case XORI_B:
+ get_msa_register(instr_.WsValue(), ws.b);
+ for (int i = 0; i < kMSALanesByte; i++) {
+ wd.b[i] = ws.b[i] ^ i8;
+ }
+ set_msa_register(instr_.WdValue(), wd.b);
+ TraceMSARegWr(wd.b);
+ break;
+ case BMNZI_B:
+ get_msa_register(instr_.WsValue(), ws.b);
+ get_msa_register(instr_.WdValue(), wd.b);
+ for (int i = 0; i < kMSALanesByte; i++) {
+ wd.b[i] = (ws.b[i] & i8) | (wd.b[i] & ~i8);
+ }
+ set_msa_register(instr_.WdValue(), wd.b);
+ TraceMSARegWr(wd.b);
+ break;
+ case BMZI_B:
+ get_msa_register(instr_.WsValue(), ws.b);
+ get_msa_register(instr_.WdValue(), wd.b);
+ for (int i = 0; i < kMSALanesByte; i++) {
+ wd.b[i] = (ws.b[i] & ~i8) | (wd.b[i] & i8);
+ }
+ set_msa_register(instr_.WdValue(), wd.b);
+ TraceMSARegWr(wd.b);
+ break;
+ case BSELI_B:
+ get_msa_register(instr_.WsValue(), ws.b);
+ get_msa_register(instr_.WdValue(), wd.b);
+ for (int i = 0; i < kMSALanesByte; i++) {
+ wd.b[i] = (ws.b[i] & ~wd.b[i]) | (wd.b[i] & i8);
+ }
+ set_msa_register(instr_.WdValue(), wd.b);
+ TraceMSARegWr(wd.b);
+ break;
+ case SHF_B:
+ get_msa_register(instr_.WsValue(), ws.b);
+ for (int i = 0; i < kMSALanesByte; i++) {
+ int j = i % 4;
+ int k = (i8 >> (2 * j)) & 0x3;
+ wd.b[i] = ws.b[i - j + k];
+ }
+ set_msa_register(instr_.WdValue(), wd.b);
+ TraceMSARegWr(wd.b);
+ break;
+ case SHF_H:
+ get_msa_register(instr_.WsValue(), ws.h);
+ for (int i = 0; i < kMSALanesHalf; i++) {
+ int j = i % 4;
+ int k = (i8 >> (2 * j)) & 0x3;
+ wd.h[i] = ws.h[i - j + k];
+ }
+ set_msa_register(instr_.WdValue(), wd.h);
+ TraceMSARegWr(wd.h);
+ break;
+ case SHF_W:
+ get_msa_register(instr_.WsValue(), ws.w);
+ for (int i = 0; i < kMSALanesWord; i++) {
+ int j = (i8 >> (2 * i)) & 0x3;
+ wd.w[i] = ws.w[j];
+ }
+ set_msa_register(instr_.WdValue(), wd.w);
+ TraceMSARegWr(wd.w);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+template <typename T>
+T Simulator::MsaI5InstrHelper(uint32_t opcode, T ws, int32_t i5) {
+ T res;
+ uint32_t ui5 = i5 & 0x1Fu;
+ uint64_t ws_u64 = static_cast<uint64_t>(ws);
+ uint64_t ui5_u64 = static_cast<uint64_t>(ui5);
+
+ switch (opcode) {
+ case ADDVI:
+ res = static_cast<T>(ws + ui5);
+ break;
+ case SUBVI:
+ res = static_cast<T>(ws - ui5);
+ break;
+ case MAXI_S:
+ res = static_cast<T>(Max(ws, static_cast<T>(i5)));
+ break;
+ case MINI_S:
+ res = static_cast<T>(Min(ws, static_cast<T>(i5)));
+ break;
+ case MAXI_U:
+ res = static_cast<T>(Max(ws_u64, ui5_u64));
+ break;
+ case MINI_U:
+ res = static_cast<T>(Min(ws_u64, ui5_u64));
+ break;
+ case CEQI:
+ res = static_cast<T>(!Compare(ws, static_cast<T>(i5)) ? -1ull : 0ull);
+ break;
+ case CLTI_S:
+ res = static_cast<T>((Compare(ws, static_cast<T>(i5)) == -1) ? -1ull
+ : 0ull);
+ break;
+ case CLTI_U:
+ res = static_cast<T>((Compare(ws_u64, ui5_u64) == -1) ? -1ull : 0ull);
+ break;
+ case CLEI_S:
+ res =
+ static_cast<T>((Compare(ws, static_cast<T>(i5)) != 1) ? -1ull : 0ull);
+ break;
+ case CLEI_U:
+ res = static_cast<T>((Compare(ws_u64, ui5_u64) != 1) ? -1ull : 0ull);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return res;
+}
+
+void Simulator::DecodeTypeMsaI5() {
+ DCHECK(kArchVariant == kMips64r6);
+ DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
+ uint32_t opcode = instr_.InstructionBits() & kMsaI5Mask;
+ msa_reg_t ws, wd;
+
+ // sign extend 5bit value to int32_t
+ int32_t i5 = static_cast<int32_t>(instr_.MsaImm5Value() << 27) >> 27;
+
+#define MSA_I5_DF(elem, num_of_lanes) \
+ get_msa_register(instr_.WsValue(), ws.elem); \
+ for (int i = 0; i < num_of_lanes; i++) { \
+ wd.elem[i] = MsaI5InstrHelper(opcode, ws.elem[i], i5); \
+ } \
+ set_msa_register(instr_.WdValue(), wd.elem); \
+ TraceMSARegWr(wd.elem)
+
+ switch (DecodeMsaDataFormat()) {
+ case MSA_BYTE:
+ MSA_I5_DF(b, kMSALanesByte);
+ break;
+ case MSA_HALF:
+ MSA_I5_DF(h, kMSALanesHalf);
+ break;
+ case MSA_WORD:
+ MSA_I5_DF(w, kMSALanesWord);
+ break;
+ case MSA_DWORD:
+ MSA_I5_DF(d, kMSALanesDword);
+ break;
+ default:
+ UNREACHABLE();
+ }
+#undef MSA_I5_DF
+}
+
+void Simulator::DecodeTypeMsaI10() {
+ DCHECK(kArchVariant == kMips64r6);
+ DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
+ uint32_t opcode = instr_.InstructionBits() & kMsaI5Mask;
+ if (opcode == LDI) {
+ UNIMPLEMENTED();
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeTypeMsaELM() {
+ DCHECK(kArchVariant == kMips64r6);
+ DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
+ uint32_t opcode = instr_.InstructionBits() & kMsaELMMask;
+ int32_t n = instr_.MsaElmNValue();
+ int64_t alu_out;
+ switch (opcode) {
+ case COPY_S:
+ case COPY_U: {
+ msa_reg_t ws;
+ switch (DecodeMsaDataFormat()) {
+ case MSA_BYTE:
+ DCHECK(n < kMSALanesByte);
+ get_msa_register(instr_.WsValue(), ws.b);
+ alu_out = static_cast<int32_t>(ws.b[n]);
+ SetResult(wd_reg(), (opcode == COPY_U) ? alu_out & 0xFFu : alu_out);
+ break;
+ case MSA_HALF:
+ DCHECK(n < kMSALanesHalf);
+ get_msa_register(instr_.WsValue(), ws.h);
+ alu_out = static_cast<int32_t>(ws.h[n]);
+ SetResult(wd_reg(), (opcode == COPY_U) ? alu_out & 0xFFFFu : alu_out);
+ break;
+ case MSA_WORD:
+ DCHECK(n < kMSALanesWord);
+ get_msa_register(instr_.WsValue(), ws.w);
+ alu_out = static_cast<int32_t>(ws.w[n]);
+ SetResult(wd_reg(),
+ (opcode == COPY_U) ? alu_out & 0xFFFFFFFFu : alu_out);
+ break;
+ case MSA_DWORD:
+ DCHECK(n < kMSALanesDword);
+ get_msa_register(instr_.WsValue(), ws.d);
+ alu_out = static_cast<int64_t>(ws.d[n]);
+ SetResult(wd_reg(), alu_out);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } break;
+ case INSERT: {
+ msa_reg_t wd;
+ switch (DecodeMsaDataFormat()) {
+ case MSA_BYTE: {
+ DCHECK(n < kMSALanesByte);
+ int64_t rs = get_register(instr_.WsValue());
+ get_msa_register(instr_.WdValue(), wd.b);
+ wd.b[n] = rs & 0xFFu;
+ set_msa_register(instr_.WdValue(), wd.b);
+ TraceMSARegWr(wd.b);
+ break;
+ }
+ case MSA_HALF: {
+ DCHECK(n < kMSALanesHalf);
+ int64_t rs = get_register(instr_.WsValue());
+ get_msa_register(instr_.WdValue(), wd.h);
+ wd.h[n] = rs & 0xFFFFu;
+ set_msa_register(instr_.WdValue(), wd.h);
+ TraceMSARegWr(wd.h);
+ break;
+ }
+ case MSA_WORD: {
+ DCHECK(n < kMSALanesWord);
+ int64_t rs = get_register(instr_.WsValue());
+ get_msa_register(instr_.WdValue(), wd.w);
+ wd.w[n] = rs & 0xFFFFFFFFu;
+ set_msa_register(instr_.WdValue(), wd.w);
+ TraceMSARegWr(wd.w);
+ break;
+ }
+ case MSA_DWORD: {
+ DCHECK(n < kMSALanesDword);
+ int64_t rs = get_register(instr_.WsValue());
+ get_msa_register(instr_.WdValue(), wd.d);
+ wd.d[n] = rs;
+ set_msa_register(instr_.WdValue(), wd.d);
+ TraceMSARegWr(wd.d);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } break;
+ case SLDI:
+ case SPLATI:
+ case INSVE:
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeTypeMsaBIT() {
+ DCHECK(kArchVariant == kMips64r6);
+ DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
+ uint32_t opcode = instr_.InstructionBits() & kMsaBITMask;
+
+ switch (opcode) {
+ case SLLI:
+ case SRAI:
+ case SRLI:
+ case BCLRI:
+ case BSETI:
+ case BNEGI:
+ case BINSLI:
+ case BINSRI:
+ case SAT_S:
+ case SAT_U:
+ case SRARI:
+ case SRLRI:
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeTypeMsaMI10() {
+ DCHECK(kArchVariant == kMips64r6);
+ DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
+ uint32_t opcode = instr_.InstructionBits() & kMsaMI10Mask;
+ if (opcode == MSA_LD) {
+ UNIMPLEMENTED();
+ } else if (opcode == MSA_ST) {
+ UNIMPLEMENTED();
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeTypeMsa3R() {
+ DCHECK(kArchVariant == kMips64r6);
+ DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
+ uint32_t opcode = instr_.InstructionBits() & kMsa3RMask;
+ switch (opcode) {
+ case SLL_MSA:
+ case SRA_MSA:
+ case SRL_MSA:
+ case BCLR:
+ case BSET:
+ case BNEG:
+ case BINSL:
+ case BINSR:
+ case ADDV:
+ case SUBV:
+ case MAX_S:
+ case MAX_U:
+ case MIN_S:
+ case MIN_U:
+ case MAX_A:
+ case MIN_A:
+ case CEQ:
+ case CLT_S:
+ case CLT_U:
+ case CLE_S:
+ case CLE_U:
+ case ADD_A:
+ case ADDS_A:
+ case ADDS_S:
+ case ADDS_U:
+ case AVE_S:
+ case AVE_U:
+ case AVER_S:
+ case AVER_U:
+ case SUBS_S:
+ case SUBS_U:
+ case SUBSUS_U:
+ case SUBSUU_S:
+ case ASUB_S:
+ case ASUB_U:
+ case MULV:
+ case MADDV:
+ case MSUBV:
+ case DIV_S_MSA:
+ case DIV_U:
+ case MOD_S:
+ case MOD_U:
+ case DOTP_S:
+ case DOTP_U:
+ case DPADD_S:
+ case DPADD_U:
+ case DPSUB_S:
+ case DPSUB_U:
+ case SLD:
+ case SPLAT:
+ case PCKEV:
+ case PCKOD:
+ case ILVL:
+ case ILVR:
+ case ILVEV:
+ case ILVOD:
+ case VSHF:
+ case SRAR:
+ case SRLR:
+ case HADD_S:
+ case HADD_U:
+ case HSUB_S:
+ case HSUB_U:
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeTypeMsa3RF() {
+ DCHECK(kArchVariant == kMips64r6);
+ DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
+ uint32_t opcode = instr_.InstructionBits() & kMsa3RFMask;
+ switch (opcode) {
+ case FCAF:
+ case FCUN:
+ case FCEQ:
+ case FCUEQ:
+ case FCLT:
+ case FCULT:
+ case FCLE:
+ case FCULE:
+ case FSAF:
+ case FSUN:
+ case FSEQ:
+ case FSUEQ:
+ case FSLT:
+ case FSULT:
+ case FSLE:
+ case FSULE:
+ case FADD:
+ case FSUB:
+ case FMUL:
+ case FDIV:
+ case FMADD:
+ case FMSUB:
+ case FEXP2:
+ case FEXDO:
+ case FTQ:
+ case FMIN:
+ case FMIN_A:
+ case FMAX:
+ case FMAX_A:
+ case FCOR:
+ case FCUNE:
+ case FCNE:
+ case MUL_Q:
+ case MADD_Q:
+ case MSUB_Q:
+ case FSOR:
+ case FSUNE:
+ case FSNE:
+ case MULR_Q:
+ case MADDR_Q:
+ case MSUBR_Q:
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeTypeMsaVec() {
+ DCHECK(kArchVariant == kMips64r6);
+ DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
+ uint32_t opcode = instr_.InstructionBits() & kMsaVECMask;
+ msa_reg_t wd, ws, wt;
+
+ get_msa_register(instr_.WsValue(), ws.d);
+ get_msa_register(instr_.WtValue(), wt.d);
+ if (opcode == BMNZ_V || opcode == BMZ_V || opcode == BSEL_V) {
+ get_msa_register(instr_.WdValue(), wd.d);
+ }
+
+ for (int i = 0; i < kMSALanesDword; i++) {
+ switch (opcode) {
+ case AND_V:
+ wd.d[i] = ws.d[i] & wt.d[i];
+ break;
+ case OR_V:
+ wd.d[i] = ws.d[i] | wt.d[i];
+ break;
+ case NOR_V:
+ wd.d[i] = ~(ws.d[i] | wt.d[i]);
+ break;
+ case XOR_V:
+ wd.d[i] = ws.d[i] ^ wt.d[i];
+ break;
+ case BMNZ_V:
+ wd.d[i] = (wt.d[i] & ws.d[i]) | (~wt.d[i] & wd.d[i]);
+ break;
+ case BMZ_V:
+ wd.d[i] = (~wt.d[i] & ws.d[i]) | (wt.d[i] & wd.d[i]);
+ break;
+ case BSEL_V:
+ wd.d[i] = (~wd.d[i] & ws.d[i]) | (wd.d[i] & wt.d[i]);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ set_msa_register(instr_.WdValue(), wd.d);
+ TraceMSARegWr(wd.d);
+}
+
+void Simulator::DecodeTypeMsa2R() {
+ DCHECK(kArchVariant == kMips64r6);
+ DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
+ uint32_t opcode = instr_.InstructionBits() & kMsa2RMask;
+ msa_reg_t wd, ws;
+ switch (opcode) {
+ case FILL:
+ switch (DecodeMsaDataFormat()) {
+ case MSA_BYTE: {
+ int64_t rs = get_register(instr_.WsValue());
+ for (int i = 0; i < kMSALanesByte; i++) {
+ wd.b[i] = rs & 0xFFu;
+ }
+ set_msa_register(instr_.WdValue(), wd.b);
+ TraceMSARegWr(wd.b);
+ break;
+ }
+ case MSA_HALF: {
+ int64_t rs = get_register(instr_.WsValue());
+ for (int i = 0; i < kMSALanesHalf; i++) {
+ wd.h[i] = rs & 0xFFFFu;
+ }
+ set_msa_register(instr_.WdValue(), wd.h);
+ TraceMSARegWr(wd.h);
+ break;
+ }
+ case MSA_WORD: {
+ int64_t rs = get_register(instr_.WsValue());
+ for (int i = 0; i < kMSALanesWord; i++) {
+ wd.w[i] = rs & 0xFFFFFFFFu;
+ }
+ set_msa_register(instr_.WdValue(), wd.w);
+ TraceMSARegWr(wd.w);
+ break;
+ }
+ case MSA_DWORD: {
+ int64_t rs = get_register(instr_.WsValue());
+ wd.d[0] = wd.d[1] = rs;
+ set_msa_register(instr_.WdValue(), wd.d);
+ TraceMSARegWr(wd.d);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case PCNT:
+#define PCNT_DF(elem, num_of_lanes) \
+ get_msa_register(instr_.WsValue(), ws.elem); \
+ for (int i = 0; i < num_of_lanes; i++) { \
+ uint64_t u64elem = static_cast<uint64_t>(ws.elem[i]); \
+ wd.elem[i] = base::bits::CountPopulation64(u64elem); \
+ } \
+ set_msa_register(instr_.WdValue(), wd.elem); \
+ TraceMSARegWr(wd.elem)
+
+ switch (DecodeMsaDataFormat()) {
+ case MSA_BYTE:
+ PCNT_DF(ub, kMSALanesByte);
+ break;
+ case MSA_HALF:
+ PCNT_DF(uh, kMSALanesHalf);
+ break;
+ case MSA_WORD:
+ PCNT_DF(uw, kMSALanesWord);
+ break;
+ case MSA_DWORD:
+ PCNT_DF(ud, kMSALanesDword);
+ break;
+ default:
+ UNREACHABLE();
+ }
+#undef PCNT_DF
+ break;
+ case NLOC:
+#define NLOC_DF(elem, num_of_lanes) \
+ get_msa_register(instr_.WsValue(), ws.elem); \
+ for (int i = 0; i < num_of_lanes; i++) { \
+ const uint64_t mask = (num_of_lanes == kMSALanesDword) \
+ ? UINT64_MAX \
+ : (1ULL << (kMSARegSize / num_of_lanes)) - 1; \
+ uint64_t u64elem = static_cast<uint64_t>(~ws.elem[i]) & mask; \
+ wd.elem[i] = base::bits::CountLeadingZeros64(u64elem) - \
+ (64 - kMSARegSize / num_of_lanes); \
+ } \
+ set_msa_register(instr_.WdValue(), wd.elem); \
+ TraceMSARegWr(wd.elem)
+
+ switch (DecodeMsaDataFormat()) {
+ case MSA_BYTE:
+ NLOC_DF(ub, kMSALanesByte);
+ break;
+ case MSA_HALF:
+ NLOC_DF(uh, kMSALanesHalf);
+ break;
+ case MSA_WORD:
+ NLOC_DF(uw, kMSALanesWord);
+ break;
+ case MSA_DWORD:
+ NLOC_DF(ud, kMSALanesDword);
+ break;
+ default:
+ UNREACHABLE();
+ }
+#undef NLOC_DF
+ break;
+ case NLZC:
+#define NLZC_DF(elem, num_of_lanes) \
+ get_msa_register(instr_.WsValue(), ws.elem); \
+ for (int i = 0; i < num_of_lanes; i++) { \
+ uint64_t u64elem = static_cast<uint64_t>(ws.elem[i]); \
+ wd.elem[i] = base::bits::CountLeadingZeros64(u64elem) - \
+ (64 - kMSARegSize / num_of_lanes); \
+ } \
+ set_msa_register(instr_.WdValue(), wd.elem); \
+ TraceMSARegWr(wd.elem)
+
+ switch (DecodeMsaDataFormat()) {
+ case MSA_BYTE:
+ NLZC_DF(ub, kMSALanesByte);
+ break;
+ case MSA_HALF:
+ NLZC_DF(uh, kMSALanesHalf);
+ break;
+ case MSA_WORD:
+ NLZC_DF(uw, kMSALanesWord);
+ break;
+ case MSA_DWORD:
+ NLZC_DF(ud, kMSALanesDword);
+ break;
+ default:
+ UNREACHABLE();
+ }
+#undef NLZC_DF
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeTypeMsa2RF() {
+ DCHECK(kArchVariant == kMips64r6);
+ DCHECK(CpuFeatures::IsSupported(MIPS_SIMD));
+ uint32_t opcode = instr_.InstructionBits() & kMsa2RFMask;
+ switch (opcode) {
+ case FCLASS:
+ case FTRUNC_S:
+ case FTRUNC_U:
+ case FSQRT:
+ case FRSQRT:
+ case FRCP:
+ case FRINT:
+ case FLOG2:
+ case FEXUPL:
+ case FEXUPR:
+ case FFQL:
+ case FFQR:
+ case FTINT_S:
+ case FTINT_U:
+ case FFINT_S:
+ case FFINT_U:
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
void Simulator::DecodeTypeRegister() {
// ---------- Execution.
switch (instr_.OpcodeFieldRaw()) {
@@ -4357,6 +5203,27 @@ void Simulator::DecodeTypeRegister() {
case SPECIAL3:
DecodeTypeRegisterSPECIAL3();
break;
+ case MSA:
+ switch (instr_.MSAMinorOpcodeField()) {
+ case kMsaMinor3R:
+ DecodeTypeMsa3R();
+ break;
+ case kMsaMinor3RF:
+ DecodeTypeMsa3RF();
+ break;
+ case kMsaMinorVEC:
+ DecodeTypeMsaVec();
+ break;
+ case kMsaMinor2R:
+ DecodeTypeMsa2R();
+ break;
+ case kMsaMinor2RF:
+ DecodeTypeMsa2RF();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
// Unimplemented opcodes raised an error in the configuration step before,
// so we can use the default here to set the destination register in common
// cases.
@@ -4469,6 +5336,18 @@ void Simulator::DecodeTypeImmediate() {
case BC1NEZ:
BranchHelper(get_fpu_register(ft_reg) & 0x1);
break;
+ case BZ_V:
+ case BZ_B:
+ case BZ_H:
+ case BZ_W:
+ case BZ_D:
+ case BNZ_V:
+ case BNZ_B:
+ case BNZ_H:
+ case BNZ_W:
+ case BNZ_D:
+ UNIMPLEMENTED();
+ break;
default:
UNREACHABLE();
}
@@ -4891,6 +5770,31 @@ void Simulator::DecodeTypeImmediate() {
SetResult(rs_reg, alu_out);
break;
}
+ case MSA:
+ switch (instr_.MSAMinorOpcodeField()) {
+ case kMsaMinorI8:
+ DecodeTypeMsaI8();
+ break;
+ case kMsaMinorI5:
+ DecodeTypeMsaI5();
+ break;
+ case kMsaMinorI10:
+ DecodeTypeMsaI10();
+ break;
+ case kMsaMinorELM:
+ DecodeTypeMsaELM();
+ break;
+ case kMsaMinorBIT:
+ DecodeTypeMsaBIT();
+ break;
+ case kMsaMinorMI10:
+ DecodeTypeMsaMI10();
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ break;
default:
UNREACHABLE();
}
diff --git a/deps/v8/src/mips64/simulator-mips64.h b/deps/v8/src/mips64/simulator-mips64.h
index a9e0d3d118..cddc602791 100644
--- a/deps/v8/src/mips64/simulator-mips64.h
+++ b/deps/v8/src/mips64/simulator-mips64.h
@@ -189,6 +189,43 @@ class Simulator {
kNumFPURegisters
};
+ // MSA registers
+ enum MSARegister {
+ w0,
+ w1,
+ w2,
+ w3,
+ w4,
+ w5,
+ w6,
+ w7,
+ w8,
+ w9,
+ w10,
+ w11,
+ w12,
+ w13,
+ w14,
+ w15,
+ w16,
+ w17,
+ w18,
+ w19,
+ w20,
+ w21,
+ w22,
+ w23,
+ w24,
+ w25,
+ w26,
+ w27,
+ w28,
+ w29,
+ w30,
+ w31,
+ kNumMSARegisters
+ };
+
explicit Simulator(Isolate* isolate);
~Simulator();
@@ -222,6 +259,10 @@ class Simulator {
int32_t get_fpu_register_hi_word(int fpureg) const;
float get_fpu_register_float(int fpureg) const;
double get_fpu_register_double(int fpureg) const;
+ template <typename T>
+ void get_msa_register(int wreg, T* value);
+ template <typename T>
+ void set_msa_register(int wreg, const T* value);
void set_fcsr_bit(uint32_t cc, bool value);
bool test_fcsr_bit(uint32_t cc);
bool set_fcsr_round_error(double original, double rounded);
@@ -311,6 +352,19 @@ class Simulator {
WORD_DWORD
};
+ // MSA Data Format
+ enum MSADataFormat { MSA_VECT = 0, MSA_BYTE, MSA_HALF, MSA_WORD, MSA_DWORD };
+ typedef union {
+ int8_t b[kMSALanesByte];
+ uint8_t ub[kMSALanesByte];
+ int16_t h[kMSALanesHalf];
+ uint16_t uh[kMSALanesHalf];
+ int32_t w[kMSALanesWord];
+ uint32_t uw[kMSALanesWord];
+ int64_t d[kMSALanesDword];
+ uint64_t ud[kMSALanesDword];
+ } msa_reg_t;
+
// Read and write memory.
inline uint32_t ReadBU(int64_t addr);
inline int32_t ReadB(int64_t addr);
@@ -336,6 +390,10 @@ class Simulator {
inline void DieOrDebug();
void TraceRegWr(int64_t value, TraceType t = DWORD);
+ template <typename T>
+ void TraceMSARegWr(T* value, TraceType t);
+ template <typename T>
+ void TraceMSARegWr(T* value);
void TraceMemWr(int64_t addr, int64_t value, TraceType t);
void TraceMemRd(int64_t addr, int64_t value, TraceType t = DWORD);
@@ -369,6 +427,21 @@ class Simulator {
void DecodeTypeRegisterLRsType();
+ int DecodeMsaDataFormat();
+ void DecodeTypeMsaI8();
+ void DecodeTypeMsaI5();
+ void DecodeTypeMsaI10();
+ void DecodeTypeMsaELM();
+ void DecodeTypeMsaBIT();
+ void DecodeTypeMsaMI10();
+ void DecodeTypeMsa3R();
+ void DecodeTypeMsa3RF();
+ void DecodeTypeMsaVec();
+ void DecodeTypeMsa2R();
+ void DecodeTypeMsa2RF();
+ template <typename T>
+ T MsaI5InstrHelper(uint32_t opcode, T ws, int32_t i5);
+
// Executing is handled based on the instruction type.
void DecodeTypeRegister();
@@ -389,6 +462,9 @@ class Simulator {
inline int32_t fd_reg() const { return instr_.FdValue(); }
inline int32_t sa() const { return instr_.SaValue(); }
inline int32_t lsa_sa() const { return instr_.LsaSaValue(); }
+ inline int32_t ws_reg() const { return instr_.WsValue(); }
+ inline int32_t wt_reg() const { return instr_.WtValue(); }
+ inline int32_t wd_reg() const { return instr_.WdValue(); }
inline void SetResult(const int32_t rd_reg, const int64_t alu_out) {
set_register(rd_reg, alu_out);
@@ -508,7 +584,9 @@ class Simulator {
// Registers.
int64_t registers_[kNumSimuRegisters];
// Coprocessor Registers.
- int64_t FPUregisters_[kNumFPURegisters];
+ // Note: FPUregisters_[] array is increased to 64 * 8B = 32 * 16B in
+ // order to support MSA registers
+ int64_t FPUregisters_[kNumFPURegisters * 2];
// FPU control register.
uint32_t FCSR_;
diff --git a/deps/v8/src/objects-body-descriptors-inl.h b/deps/v8/src/objects-body-descriptors-inl.h
index 7d83d51a17..e96d6e9f3e 100644
--- a/deps/v8/src/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects-body-descriptors-inl.h
@@ -7,6 +7,7 @@
#include "src/assembler-inl.h"
#include "src/objects-body-descriptors.h"
+#include "src/objects/hash-table.h"
#include "src/transitions.h"
namespace v8 {
@@ -107,7 +108,7 @@ void BodyDescriptorBase::IteratePointer(Heap* heap, HeapObject* obj,
class JSObject::BodyDescriptor final : public BodyDescriptorBase {
public:
- static const int kStartOffset = JSReceiver::kPropertiesOffset;
+ static const int kStartOffset = JSReceiver::kPropertiesOrHashOffset;
static bool IsValidSlot(HeapObject* obj, int offset) {
if (offset < kStartOffset) return false;
@@ -133,7 +134,7 @@ class JSObject::BodyDescriptor final : public BodyDescriptorBase {
class JSObject::FastBodyDescriptor final : public BodyDescriptorBase {
public:
- static const int kStartOffset = JSReceiver::kPropertiesOffset;
+ static const int kStartOffset = JSReceiver::kPropertiesOrHashOffset;
static bool IsValidSlot(HeapObject* obj, int offset) {
return offset >= kStartOffset;
@@ -172,14 +173,9 @@ class JSFunction::BodyDescriptorImpl final : public BodyDescriptorBase {
template <typename ObjectVisitor>
static inline void IterateBody(HeapObject* obj, int object_size,
ObjectVisitor* v) {
- IteratePointers(obj, kPropertiesOffset, kNonWeakFieldsEndOffset, v);
-
- if (body_visiting_policy & kVisitCodeEntry) {
- v->VisitCodeEntry(JSFunction::cast(obj),
- obj->address() + kCodeEntryOffset);
- }
-
- if (body_visiting_policy & kVisitNextFunction) {
+ IteratePointers(obj, kPropertiesOrHashOffset, kNonWeakFieldsEndOffset, v);
+ v->VisitCodeEntry(JSFunction::cast(obj), obj->address() + kCodeEntryOffset);
+ if (body_visiting_policy == kIgnoreWeakness) {
IteratePointers(obj, kNextFunctionLinkOffset, kSize, v);
}
IterateBodyImpl(obj, kSize, object_size, v);
@@ -188,15 +184,12 @@ class JSFunction::BodyDescriptorImpl final : public BodyDescriptorBase {
template <typename StaticVisitor>
static inline void IterateBody(HeapObject* obj, int object_size) {
Heap* heap = obj->GetHeap();
- IteratePointers<StaticVisitor>(heap, obj, kPropertiesOffset,
+ IteratePointers<StaticVisitor>(heap, obj, kPropertiesOrHashOffset,
kNonWeakFieldsEndOffset);
- if (body_visiting_policy & kVisitCodeEntry) {
- StaticVisitor::VisitCodeEntry(heap, obj,
- obj->address() + kCodeEntryOffset);
- }
+ StaticVisitor::VisitCodeEntry(heap, obj, obj->address() + kCodeEntryOffset);
- if (body_visiting_policy & kVisitNextFunction) {
+ if (body_visiting_policy == kIgnoreWeakness) {
IteratePointers<StaticVisitor>(heap, obj, kNextFunctionLinkOffset, kSize);
}
IterateBodyImpl<StaticVisitor>(heap, obj, kSize, object_size);
@@ -225,7 +218,7 @@ class JSArrayBuffer::BodyDescriptor final : public BodyDescriptorBase {
// Array buffers contain raw pointers that the GC does not know about. These
// are stored at kBackStoreOffset and later, so we do not iterate over
// those.
- IteratePointers(obj, kPropertiesOffset, kBackingStoreOffset, v);
+ IteratePointers(obj, kPropertiesOrHashOffset, kBackingStoreOffset, v);
IterateBodyImpl(obj, kSize, object_size, v);
}
@@ -235,7 +228,7 @@ class JSArrayBuffer::BodyDescriptor final : public BodyDescriptorBase {
// Array buffers contain raw pointers that the GC does not know about. These
// are stored at kBackStoreOffset and later, so we do not iterate over
// those.
- IteratePointers<StaticVisitor>(heap, obj, kPropertiesOffset,
+ IteratePointers<StaticVisitor>(heap, obj, kPropertiesOrHashOffset,
kBackingStoreOffset);
IterateBodyImpl<StaticVisitor>(heap, obj, kSize, object_size);
}
@@ -245,6 +238,42 @@ class JSArrayBuffer::BodyDescriptor final : public BodyDescriptorBase {
}
};
+template <typename Derived>
+class SmallOrderedHashTable<Derived>::BodyDescriptor final
+ : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(HeapObject* obj, int offset) {
+ Derived* table = reinterpret_cast<Derived*>(obj);
+ if (offset < table->GetDataTableStartOffset()) return false;
+ return IsValidSlotImpl(obj, offset);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ Derived* table = reinterpret_cast<Derived*>(obj);
+ int start = table->GetDataTableStartOffset();
+ for (int i = 0; i < table->Capacity(); i++) {
+ IteratePointer(obj, start + (i * kPointerSize), v);
+ }
+ }
+
+ template <typename StaticVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size) {
+ Heap* heap = obj->GetHeap();
+ Derived* table = reinterpret_cast<Derived*>(obj);
+ int start = table->GetDataTableStartOffset();
+ for (int i = 0; i < table->Capacity(); i++) {
+ IteratePointer<StaticVisitor>(heap, obj, start + (i * kPointerSize));
+ }
+ }
+
+ static inline int SizeOf(Map* map, HeapObject* obj) {
+ Derived* table = reinterpret_cast<Derived*>(obj);
+ return table->Size();
+ }
+};
+
class ByteArray::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(HeapObject* obj, int offset) { return false; }
@@ -257,7 +286,7 @@ class ByteArray::BodyDescriptor final : public BodyDescriptorBase {
static inline void IterateBody(HeapObject* obj, int object_size) {}
static inline int SizeOf(Map* map, HeapObject* obj) {
- return reinterpret_cast<ByteArray*>(obj)->ByteArraySize();
+ return ByteArray::SizeFor(ByteArray::cast(obj)->synchronized_length());
}
};
@@ -285,7 +314,8 @@ class BytecodeArray::BodyDescriptor final : public BodyDescriptorBase {
}
static inline int SizeOf(Map* map, HeapObject* obj) {
- return reinterpret_cast<BytecodeArray*>(obj)->BytecodeArraySize();
+ return BytecodeArray::SizeFor(
+ BytecodeArray::cast(obj)->synchronized_length());
}
};
@@ -302,7 +332,7 @@ class FixedDoubleArray::BodyDescriptor final : public BodyDescriptorBase {
static inline int SizeOf(Map* map, HeapObject* obj) {
return FixedDoubleArray::SizeFor(
- reinterpret_cast<FixedDoubleArray*>(obj)->length());
+ FixedDoubleArray::cast(obj)->synchronized_length());
}
};
@@ -325,7 +355,7 @@ class FixedTypedArrayBase::BodyDescriptor final : public BodyDescriptorBase {
}
static inline int SizeOf(Map* map, HeapObject* object) {
- return reinterpret_cast<FixedTypedArrayBase*>(object)->size();
+ return FixedTypedArrayBase::cast(object)->size();
}
};
@@ -342,10 +372,10 @@ class JSWeakCollection::BodyDescriptorImpl final : public BodyDescriptorBase {
template <typename ObjectVisitor>
static inline void IterateBody(HeapObject* obj, int object_size,
ObjectVisitor* v) {
- if (body_visiting_policy == kVisitStrong) {
- IterateBodyImpl(obj, kPropertiesOffset, object_size, v);
+ if (body_visiting_policy == kIgnoreWeakness) {
+ IterateBodyImpl(obj, kPropertiesOrHashOffset, object_size, v);
} else {
- IteratePointers(obj, kPropertiesOffset, kTableOffset, v);
+ IteratePointers(obj, kPropertiesOrHashOffset, kTableOffset, v);
IterateBodyImpl(obj, kSize, object_size, v);
}
}
@@ -353,10 +383,11 @@ class JSWeakCollection::BodyDescriptorImpl final : public BodyDescriptorBase {
template <typename StaticVisitor>
static inline void IterateBody(HeapObject* obj, int object_size) {
Heap* heap = obj->GetHeap();
- if (body_visiting_policy == kVisitStrong) {
- IterateBodyImpl<StaticVisitor>(heap, obj, kPropertiesOffset, object_size);
+ if (body_visiting_policy == kIgnoreWeakness) {
+ IterateBodyImpl<StaticVisitor>(heap, obj, kPropertiesOrHashOffset,
+ object_size);
} else {
- IteratePointers<StaticVisitor>(heap, obj, kPropertiesOffset,
+ IteratePointers<StaticVisitor>(heap, obj, kPropertiesOrHashOffset,
kTableOffset);
IterateBodyImpl<StaticVisitor>(heap, obj, kSize, object_size);
}
@@ -511,7 +542,7 @@ class SeqOneByteString::BodyDescriptor final : public BodyDescriptorBase {
static inline int SizeOf(Map* map, HeapObject* obj) {
SeqOneByteString* string = SeqOneByteString::cast(obj);
- return string->SizeFor(string->length());
+ return string->SizeFor(string->synchronized_length());
}
};
@@ -528,7 +559,7 @@ class SeqTwoByteString::BodyDescriptor final : public BodyDescriptorBase {
static inline int SizeOf(Map* map, HeapObject* obj) {
SeqTwoByteString* string = SeqTwoByteString::cast(obj);
- return string->SizeFor(string->length());
+ return string->SizeFor(string->synchronized_length());
}
};
@@ -555,7 +586,6 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
}
}
UNREACHABLE();
- return ReturnType();
}
switch (type) {
@@ -563,6 +593,8 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
return Op::template apply<FixedArray::BodyDescriptor>(p1, p2, p3);
case FIXED_DOUBLE_ARRAY_TYPE:
return ReturnType();
+ case PROPERTY_ARRAY_TYPE:
+ return Op::template apply<PropertyArray::BodyDescriptor>(p1, p2, p3);
case TRANSITION_ARRAY_TYPE:
return Op::template apply<TransitionArray::BodyDescriptor>(p1, p2, p3);
case JS_OBJECT_TYPE:
@@ -582,8 +614,11 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
case JS_DATA_VIEW_TYPE:
case JS_SET_TYPE:
case JS_MAP_TYPE:
- case JS_SET_ITERATOR_TYPE:
- case JS_MAP_ITERATOR_TYPE:
+ case JS_SET_KEY_VALUE_ITERATOR_TYPE:
+ case JS_SET_VALUE_ITERATOR_TYPE:
+ case JS_MAP_KEY_ITERATOR_TYPE:
+ case JS_MAP_KEY_VALUE_ITERATOR_TYPE:
+ case JS_MAP_VALUE_ITERATOR_TYPE:
case JS_STRING_ITERATOR_TYPE:
case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
@@ -629,6 +664,10 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
case JS_SPECIAL_API_OBJECT_TYPE:
case JS_MESSAGE_OBJECT_TYPE:
case JS_BOUND_FUNCTION_TYPE:
+ case WASM_INSTANCE_TYPE:
+ case WASM_MEMORY_TYPE:
+ case WASM_MODULE_TYPE:
+ case WASM_TABLE_TYPE:
return Op::template apply<JSObject::BodyDescriptor>(p1, p2, p3);
case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE:
@@ -657,7 +696,14 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
return Op::template apply<Symbol::BodyDescriptor>(p1, p2, p3);
case BYTECODE_ARRAY_TYPE:
return Op::template apply<BytecodeArray::BodyDescriptor>(p1, p2, p3);
-
+ case SMALL_ORDERED_HASH_SET_TYPE:
+ return Op::template apply<
+ SmallOrderedHashTable<SmallOrderedHashSet>::BodyDescriptor>(p1, p2,
+ p3);
+ case SMALL_ORDERED_HASH_MAP_TYPE:
+ return Op::template apply<
+ SmallOrderedHashTable<SmallOrderedHashMap>::BodyDescriptor>(p1, p2,
+ p3);
case HEAP_NUMBER_TYPE:
case MUTABLE_HEAP_NUMBER_TYPE:
case FILLER_TYPE:
@@ -686,7 +732,6 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
default:
PrintF("Unknown type: %d\n", type);
UNREACHABLE();
- return ReturnType();
}
}
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index 273bfa22e4..a0516d1c70 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -13,6 +13,7 @@
#include "src/layout-descriptor.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
+#include "src/objects/debug-objects-inl.h"
#include "src/objects/literal-objects.h"
#include "src/objects/module-info.h"
#include "src/ostreams.h"
@@ -86,6 +87,9 @@ void HeapObject::HeapObjectVerify() {
case TRANSITION_ARRAY_TYPE:
TransitionArray::cast(this)->TransitionArrayVerify();
break;
+ case PROPERTY_ARRAY_TYPE:
+ PropertyArray::cast(this)->PropertyArrayVerify();
+ break;
case FREE_SPACE_TYPE:
FreeSpace::cast(this)->FreeSpaceVerify();
break;
@@ -109,6 +113,10 @@ void HeapObject::HeapObjectVerify() {
case JS_API_OBJECT_TYPE:
case JS_SPECIAL_API_OBJECT_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+ case WASM_INSTANCE_TYPE:
+ case WASM_MEMORY_TYPE:
+ case WASM_MODULE_TYPE:
+ case WASM_TABLE_TYPE:
JSObject::cast(this)->JSObjectVerify();
break;
case JS_ARGUMENTS_TYPE:
@@ -159,10 +167,13 @@ void HeapObject::HeapObjectVerify() {
case JS_MAP_TYPE:
JSMap::cast(this)->JSMapVerify();
break;
- case JS_SET_ITERATOR_TYPE:
+ case JS_SET_KEY_VALUE_ITERATOR_TYPE:
+ case JS_SET_VALUE_ITERATOR_TYPE:
JSSetIterator::cast(this)->JSSetIteratorVerify();
break;
- case JS_MAP_ITERATOR_TYPE:
+ case JS_MAP_KEY_ITERATOR_TYPE:
+ case JS_MAP_KEY_VALUE_ITERATOR_TYPE:
+ case JS_MAP_VALUE_ITERATOR_TYPE:
JSMapIterator::cast(this)->JSMapIteratorVerify();
break;
@@ -248,6 +259,12 @@ void HeapObject::HeapObjectVerify() {
case JS_DATA_VIEW_TYPE:
JSDataView::cast(this)->JSDataViewVerify();
break;
+ case SMALL_ORDERED_HASH_SET_TYPE:
+ SmallOrderedHashSet::cast(this)->SmallOrderedHashTableVerify();
+ break;
+ case SMALL_ORDERED_HASH_MAP_TYPE:
+ SmallOrderedHashMap::cast(this)->SmallOrderedHashTableVerify();
+ break;
#define MAKE_STRUCT_CASE(NAME, Name, name) \
case NAME##_TYPE: \
@@ -327,13 +344,13 @@ bool JSObject::ElementsAreSafeToExamine() {
void JSObject::JSObjectVerify() {
- VerifyHeapPointer(properties());
+ VerifyPointer(raw_properties_or_hash());
VerifyHeapPointer(elements());
CHECK_IMPLIES(HasSloppyArgumentsElements(), IsJSArgumentsObject());
if (HasFastProperties()) {
int actual_unused_property_fields = map()->GetInObjectProperties() +
- properties()->length() -
+ property_array()->length() -
map()->NextFreePropertyIndex();
if (map()->unused_property_fields() != actual_unused_property_fields) {
// There are two reasons why this can happen:
@@ -394,7 +411,7 @@ void JSObject::JSObjectVerify() {
HasFastStringWrapperElements()),
(elements()->map() == GetHeap()->fixed_array_map() ||
elements()->map() == GetHeap()->fixed_cow_array_map()));
- CHECK(map()->has_fast_object_elements() == HasFastObjectElements());
+ CHECK(map()->has_fast_object_elements() == HasObjectElements());
}
}
@@ -424,7 +441,7 @@ void Map::DictionaryMapVerify() {
CHECK(is_dictionary_map());
CHECK(instance_descriptors()->IsEmpty());
CHECK_EQ(0, unused_property_fields());
- CHECK_EQ(Heap::GetStaticVisitorIdForMap(this), visitor_id());
+ CHECK_EQ(Map::GetVisitorId(this), visitor_id());
}
@@ -450,6 +467,12 @@ void FixedArray::FixedArrayVerify() {
}
}
+void PropertyArray::PropertyArrayVerify() {
+ for (int i = 0; i < length(); i++) {
+ Object* e = get(i);
+ VerifyPointer(e);
+ }
+}
void FixedDoubleArray::FixedDoubleArrayVerify() {
for (int i = 0; i < length(); i++) {
@@ -486,7 +509,6 @@ void JSArgumentsObject::JSArgumentsObjectVerify() {
void JSSloppyArgumentsObject::JSSloppyArgumentsObjectVerify() {
Isolate* isolate = GetIsolate();
- if (!map()->is_dictionary_map()) VerifyObjectField(kCalleeOffset);
if (isolate->IsInAnyContext(map(), Context::SLOPPY_ARGUMENTS_MAP_INDEX) ||
isolate->IsInAnyContext(map(),
Context::SLOW_ALIASED_ARGUMENTS_MAP_INDEX) ||
@@ -520,16 +542,14 @@ void SloppyArgumentsElements::SloppyArgumentsElementsVerify(
CHECK(arg_elements == isolate->heap()->empty_fixed_array());
return;
}
- int nofMappedParameters =
- length() - SloppyArgumentsElements::kParameterMapStart;
- CHECK_LE(nofMappedParameters, context_object->length());
- CHECK_LE(nofMappedParameters, arg_elements->length());
ElementsAccessor* accessor;
if (is_fast) {
- accessor = ElementsAccessor::ForKind(FAST_HOLEY_ELEMENTS);
+ accessor = ElementsAccessor::ForKind(HOLEY_ELEMENTS);
} else {
accessor = ElementsAccessor::ForKind(DICTIONARY_ELEMENTS);
}
+ int nofMappedParameters = 0;
+ int maxMappedIndex = 0;
for (int i = 0; i < nofMappedParameters; i++) {
// Verify that each context-mapped argument is either the hole or a valid
// Smi within context length range.
@@ -542,12 +562,20 @@ void SloppyArgumentsElements::SloppyArgumentsElementsVerify(
CHECK(accessor->HasElement(holder, i, arg_elements));
continue;
}
- Object* value = context_object->get(Smi::cast(mapped)->value());
+ int mappedIndex = Smi::ToInt(mapped);
+ nofMappedParameters++;
+ CHECK_LE(maxMappedIndex, mappedIndex);
+ maxMappedIndex = mappedIndex;
+ Object* value = context_object->get(mappedIndex);
CHECK(value->IsObject());
// None of the context-mapped entries should exist in the arguments
// elements.
CHECK(!accessor->HasElement(holder, i, arg_elements));
}
+ CHECK_LE(nofMappedParameters, context_object->length());
+ CHECK_LE(nofMappedParameters, arg_elements->length());
+ CHECK_LE(maxMappedIndex, context_object->length());
+ CHECK_LE(maxMappedIndex, arg_elements->length());
}
void JSGeneratorObject::JSGeneratorObjectVerify() {
@@ -595,32 +623,32 @@ void JSDate::JSDateVerify() {
cache_stamp()->IsNaN());
if (month()->IsSmi()) {
- int month = Smi::cast(this->month())->value();
+ int month = Smi::ToInt(this->month());
CHECK(0 <= month && month <= 11);
}
if (day()->IsSmi()) {
- int day = Smi::cast(this->day())->value();
+ int day = Smi::ToInt(this->day());
CHECK(1 <= day && day <= 31);
}
if (hour()->IsSmi()) {
- int hour = Smi::cast(this->hour())->value();
+ int hour = Smi::ToInt(this->hour());
CHECK(0 <= hour && hour <= 23);
}
if (min()->IsSmi()) {
- int min = Smi::cast(this->min())->value();
+ int min = Smi::ToInt(this->min());
CHECK(0 <= min && min <= 59);
}
if (sec()->IsSmi()) {
- int sec = Smi::cast(this->sec())->value();
+ int sec = Smi::ToInt(this->sec());
CHECK(0 <= sec && sec <= 59);
}
if (weekday()->IsSmi()) {
- int weekday = Smi::cast(this->weekday())->value();
+ int weekday = Smi::ToInt(this->weekday());
CHECK(0 <= weekday && weekday <= 6);
}
if (cache_stamp()->IsSmi()) {
- CHECK(Smi::cast(cache_stamp())->value() <=
- Smi::cast(isolate->date_cache()->stamp())->value());
+ CHECK(Smi::ToInt(cache_stamp()) <=
+ Smi::ToInt(isolate->date_cache()->stamp()));
}
}
@@ -653,9 +681,7 @@ void String::StringVerify() {
void ConsString::ConsStringVerify() {
- CHECK(this->first()->IsString());
- CHECK(this->second() == GetHeap()->empty_string() ||
- this->second()->IsString());
+ CHECK(this->first()->IsString() && this->second()->IsString());
CHECK(this->length() >= ConsString::kMinLength);
CHECK(this->length() == this->first()->length() + this->second()->length());
if (this->IsFlat()) {
@@ -684,11 +710,14 @@ void JSBoundFunction::JSBoundFunctionVerify() {
VerifyObjectField(kBoundThisOffset);
VerifyObjectField(kBoundTargetFunctionOffset);
VerifyObjectField(kBoundArgumentsOffset);
- CHECK(bound_target_function()->IsCallable());
CHECK(IsCallable());
- CHECK_EQ(IsConstructor(), bound_target_function()->IsConstructor());
-}
+ Isolate* const isolate = GetIsolate();
+ if (!raw_bound_target_function()->IsUndefined(isolate)) {
+ CHECK(bound_target_function()->IsCallable());
+ CHECK_EQ(IsConstructor(), bound_target_function()->IsConstructor());
+ }
+}
void JSFunction::JSFunctionVerify() {
CHECK(IsJSFunction());
@@ -716,16 +745,27 @@ void SharedFunctionInfo::SharedFunctionInfoVerify() {
VerifyObjectField(kScopeInfoOffset);
VerifyObjectField(kScriptOffset);
- CHECK(function_data()->IsUndefined(GetIsolate()) || IsApiFunction() ||
+ CHECK(raw_name() == kNoSharedNameSentinel || raw_name()->IsString());
+
+ Isolate* isolate = GetIsolate();
+ CHECK(function_data()->IsUndefined(isolate) || IsApiFunction() ||
HasBytecodeArray() || HasAsmWasmData());
- CHECK(function_identifier()->IsUndefined(GetIsolate()) ||
- HasBuiltinFunctionId() || HasInferredName());
+ CHECK(function_identifier()->IsUndefined(isolate) || HasBuiltinFunctionId() ||
+ HasInferredName());
+
+ int expected_map_index = Context::FunctionMapIndex(
+ language_mode(), kind(), has_shared_name(), needs_home_object());
+ CHECK_EQ(expected_map_index, function_map_index());
if (scope_info()->length() > 0) {
CHECK(kind() == scope_info()->function_kind());
CHECK_EQ(kind() == kModule, scope_info()->scope_type() == MODULE_SCOPE);
}
+
+ CHECK(preparsed_scope_data()->IsNull(isolate) ||
+ preparsed_scope_data()->IsPreParsedScopeData());
+ VerifyObjectField(kPreParsedScopeDataOffset);
}
@@ -734,7 +774,7 @@ void JSGlobalProxy::JSGlobalProxyVerify() {
JSObjectVerify();
VerifyObjectField(JSGlobalProxy::kNativeContextOffset);
// Make sure that this object has no properties, elements.
- CHECK_EQ(0, properties()->length());
+ CHECK_EQ(GetHeap()->empty_fixed_array(), raw_properties_or_hash());
CHECK_EQ(0, FixedArray::cast(elements())->length());
}
@@ -742,7 +782,7 @@ void JSGlobalProxy::JSGlobalProxyVerify() {
void JSGlobalObject::JSGlobalObjectVerify() {
CHECK(IsJSGlobalObject());
// Do not check the dummy global object for the builtins.
- if (GlobalDictionary::cast(properties())->NumberOfElements() == 0 &&
+ if (global_dictionary()->NumberOfElements() == 0 &&
elements()->length() == 0) {
return;
}
@@ -760,7 +800,7 @@ void Oddball::OddballVerify() {
number == heap->hole_nan_value());
} else {
CHECK(number->IsSmi());
- int value = Smi::cast(number)->value();
+ int value = Smi::ToInt(number);
// Hidden oddballs have negative smis.
const int kLeastHiddenOddballNumber = -7;
CHECK_LE(value, 1);
@@ -883,7 +923,7 @@ void JSArray::JSArrayVerify() {
if (!length()->IsNumber()) return;
// Verify that the length and the elements backing store are in sync.
if (length()->IsSmi() && HasFastElements()) {
- int size = Smi::cast(length())->value();
+ int size = Smi::ToInt(length());
// Holey / Packed backing stores might have slack or might have not been
// properly initialized yet.
CHECK(size <= elements()->length() ||
@@ -930,10 +970,8 @@ void JSSetIterator::JSSetIteratorVerify() {
CHECK(IsJSSetIterator());
JSObjectVerify();
VerifyHeapPointer(table());
- Isolate* isolate = GetIsolate();
- CHECK(table()->IsOrderedHashTable() || table()->IsUndefined(isolate));
- CHECK(index()->IsSmi() || index()->IsUndefined(isolate));
- CHECK(kind()->IsSmi() || kind()->IsUndefined(isolate));
+ CHECK(table()->IsOrderedHashTable());
+ CHECK(index()->IsSmi());
}
@@ -941,10 +979,8 @@ void JSMapIterator::JSMapIteratorVerify() {
CHECK(IsJSMapIterator());
JSObjectVerify();
VerifyHeapPointer(table());
- Isolate* isolate = GetIsolate();
- CHECK(table()->IsOrderedHashTable() || table()->IsUndefined(isolate));
- CHECK(index()->IsSmi() || index()->IsUndefined(isolate));
- CHECK(kind()->IsSmi() || kind()->IsUndefined(isolate));
+ CHECK(table()->IsOrderedHashTable());
+ CHECK(index()->IsSmi());
}
@@ -1018,6 +1054,51 @@ void JSPromise::JSPromiseVerify() {
reject_reactions()->IsFixedArray());
}
+template <typename Derived>
+void SmallOrderedHashTable<Derived>::SmallOrderedHashTableVerify() {
+ CHECK(IsSmallOrderedHashTable());
+ Isolate* isolate = GetIsolate();
+
+ for (int entry = 0; entry < NumberOfBuckets(); entry++) {
+ int bucket = GetFirstEntry(entry);
+ if (bucket == kNotFound) continue;
+
+ for (int offset = 0; offset < Derived::kEntrySize; offset++) {
+ Object* val = GetDataEntry(bucket, offset);
+ CHECK(!val->IsTheHole(isolate));
+ }
+ }
+
+ for (int entry = 0; entry < NumberOfElements(); entry++) {
+ int chain = GetNextEntry(entry);
+ if (chain == kNotFound) continue;
+
+ for (int offset = 0; offset < Derived::kEntrySize; offset++) {
+ Object* val = GetDataEntry(chain, offset);
+ CHECK(!val->IsTheHole(isolate));
+ }
+ }
+
+ for (int entry = 0; entry < NumberOfElements(); entry++) {
+ for (int offset = 0; offset < Derived::kEntrySize; offset++) {
+ Object* val = GetDataEntry(entry, offset);
+ VerifyPointer(val);
+ }
+ }
+
+ for (int entry = NumberOfElements(); entry < Capacity(); entry++) {
+ for (int offset = 0; offset < Derived::kEntrySize; offset++) {
+ Object* val = GetDataEntry(entry, offset);
+ CHECK(val->IsTheHole(isolate));
+ }
+ }
+}
+
+template void
+SmallOrderedHashTable<SmallOrderedHashMap>::SmallOrderedHashTableVerify();
+template void
+SmallOrderedHashTable<SmallOrderedHashSet>::SmallOrderedHashTableVerify();
+
void JSRegExp::JSRegExpVerify() {
JSObjectVerify();
Isolate* isolate = GetIsolate();
@@ -1033,23 +1114,16 @@ void JSRegExp::JSRegExpVerify() {
FixedArray* arr = FixedArray::cast(data());
Object* one_byte_data = arr->get(JSRegExp::kIrregexpLatin1CodeIndex);
- // Smi : Not compiled yet (-1) or code prepared for flushing.
- // JSObject: Compilation error.
+ // Smi : Not compiled yet (-1).
// Code/ByteArray: Compiled code.
CHECK(
- one_byte_data->IsSmi() ||
+ (one_byte_data->IsSmi() &&
+ Smi::ToInt(one_byte_data) == JSRegExp::kUninitializedValue) ||
(is_native ? one_byte_data->IsCode() : one_byte_data->IsByteArray()));
Object* uc16_data = arr->get(JSRegExp::kIrregexpUC16CodeIndex);
- CHECK(uc16_data->IsSmi() ||
- (is_native ? uc16_data->IsCode() : uc16_data->IsByteArray()));
-
- Object* one_byte_saved =
- arr->get(JSRegExp::kIrregexpLatin1CodeSavedIndex);
- CHECK(one_byte_saved->IsSmi() || one_byte_saved->IsString() ||
- one_byte_saved->IsCode());
- Object* uc16_saved = arr->get(JSRegExp::kIrregexpUC16CodeSavedIndex);
- CHECK(uc16_saved->IsSmi() || uc16_saved->IsString() ||
- uc16_saved->IsCode());
+ CHECK((uc16_data->IsSmi() &&
+ Smi::ToInt(uc16_data) == JSRegExp::kUninitializedValue) ||
+ (is_native ? uc16_data->IsCode() : uc16_data->IsByteArray()));
CHECK(arr->get(JSRegExp::kIrregexpCaptureCountIndex)->IsSmi());
CHECK(arr->get(JSRegExp::kIrregexpMaxRegisterCountIndex)->IsSmi());
@@ -1191,27 +1265,27 @@ void Module::ModuleVerify() {
VerifyPointer(exports());
VerifyPointer(module_namespace());
VerifyPointer(requested_modules());
+ VerifyPointer(script());
+ VerifyPointer(exception());
VerifySmiField(kHashOffset);
VerifySmiField(kStatusOffset);
- CHECK((!instantiated() && code()->IsSharedFunctionInfo()) ||
- (instantiated() && !evaluated() && code()->IsJSFunction()) ||
- (instantiated() && evaluated() && code()->IsModuleInfo()));
+ CHECK((status() < kInstantiating && code()->IsSharedFunctionInfo()) ||
+ (status() < kEvaluating && code()->IsJSFunction()) ||
+ code()->IsModuleInfo());
+
+ CHECK_EQ(status() == kErrored, !exception()->IsTheHole(GetIsolate()));
CHECK(module_namespace()->IsUndefined(GetIsolate()) ||
module_namespace()->IsJSModuleNamespace());
if (module_namespace()->IsJSModuleNamespace()) {
- CHECK(instantiated());
+ CHECK_LE(kInstantiating, status());
CHECK_EQ(JSModuleNamespace::cast(module_namespace())->module(), this);
}
CHECK_EQ(requested_modules()->length(), info()->module_requests()->length());
CHECK_NE(hash(), 0);
-
- CHECK_LE(kUnprepared, status());
- CHECK_LE(status(), kPrepared);
- CHECK_IMPLIES(instantiated(), status() == kPrepared);
}
void PrototypeInfo::PrototypeInfoVerify() {
@@ -1365,6 +1439,13 @@ void StackFrameInfo::StackFrameInfoVerify() {
VerifyPointer(script_name_or_source_url());
VerifyPointer(function_name());
}
+
+void PreParsedScopeData::PreParsedScopeDataVerify() {
+ CHECK(IsPreParsedScopeData());
+ CHECK(scope_data()->IsByteArray());
+ CHECK(child_data()->IsFixedArray());
+}
+
#endif // VERIFY_HEAP
#ifdef DEBUG
@@ -1377,7 +1458,7 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
info->number_of_fast_used_fields_ += map()->NextFreePropertyIndex();
info->number_of_fast_unused_fields_ += map()->unused_property_fields();
} else if (IsJSGlobalObject()) {
- GlobalDictionary* dict = global_dictionary();
+ GlobalDictionary* dict = JSGlobalObject::cast(this)->global_dictionary();
info->number_of_slow_used_properties_ += dict->NumberOfElements();
info->number_of_slow_unused_properties_ +=
dict->Capacity() - dict->NumberOfElements();
@@ -1389,12 +1470,12 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
}
// Indexed properties
switch (GetElementsKind()) {
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_ELEMENTS:
+ case HOLEY_SMI_ELEMENTS:
+ case PACKED_SMI_ELEMENTS:
+ case HOLEY_DOUBLE_ELEMENTS:
+ case PACKED_DOUBLE_ELEMENTS:
+ case HOLEY_ELEMENTS:
+ case PACKED_ELEMENTS:
case FAST_STRING_WRAPPER_ELEMENTS: {
info->number_of_objects_with_fast_elements_++;
int holes = 0;
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 9afd19c8ca..6bb5ac4247 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -31,14 +31,15 @@
#include "src/lookup-cache-inl.h"
#include "src/lookup.h"
#include "src/objects.h"
+#include "src/objects/arguments-inl.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/hash-table.h"
#include "src/objects/literal-objects.h"
#include "src/objects/module-info.h"
#include "src/objects/regexp-match-info.h"
#include "src/objects/scope-info.h"
#include "src/property.h"
#include "src/prototype.h"
-#include "src/string-hasher-inl.h"
#include "src/transitions-inl.h"
#include "src/v8memory.h"
@@ -68,79 +69,6 @@ int PropertyDetails::field_width_in_words() const {
return representation().IsDouble() ? kDoubleSize / kPointerSize : 1;
}
-#define INT_ACCESSORS(holder, name, offset) \
- int holder::name() const { return READ_INT_FIELD(this, offset); } \
- void holder::set_##name(int value) { WRITE_INT_FIELD(this, offset, value); }
-
-#define ACCESSORS_CHECKED2(holder, name, type, offset, get_condition, \
- set_condition) \
- type* holder::name() const { \
- DCHECK(get_condition); \
- return type::cast(READ_FIELD(this, offset)); \
- } \
- void holder::set_##name(type* value, WriteBarrierMode mode) { \
- DCHECK(set_condition); \
- WRITE_FIELD(this, offset, value); \
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); \
- }
-#define ACCESSORS_CHECKED(holder, name, type, offset, condition) \
- ACCESSORS_CHECKED2(holder, name, type, offset, condition, condition)
-
-#define ACCESSORS(holder, name, type, offset) \
- ACCESSORS_CHECKED(holder, name, type, offset, true)
-
-// Getter that returns a Smi as an int and writes an int as a Smi.
-#define SMI_ACCESSORS_CHECKED(holder, name, offset, condition) \
- int holder::name() const { \
- DCHECK(condition); \
- Object* value = READ_FIELD(this, offset); \
- return Smi::cast(value)->value(); \
- } \
- void holder::set_##name(int value) { \
- DCHECK(condition); \
- WRITE_FIELD(this, offset, Smi::FromInt(value)); \
- }
-
-#define SMI_ACCESSORS(holder, name, offset) \
- SMI_ACCESSORS_CHECKED(holder, name, offset, true)
-
-#define SYNCHRONIZED_SMI_ACCESSORS(holder, name, offset) \
- int holder::synchronized_##name() const { \
- Object* value = ACQUIRE_READ_FIELD(this, offset); \
- return Smi::cast(value)->value(); \
- } \
- void holder::synchronized_set_##name(int value) { \
- RELEASE_WRITE_FIELD(this, offset, Smi::FromInt(value)); \
- }
-
-#define NOBARRIER_SMI_ACCESSORS(holder, name, offset) \
- int holder::nobarrier_##name() const { \
- Object* value = NOBARRIER_READ_FIELD(this, offset); \
- return Smi::cast(value)->value(); \
- } \
- void holder::nobarrier_set_##name(int value) { \
- NOBARRIER_WRITE_FIELD(this, offset, Smi::FromInt(value)); \
- }
-
-#define BOOL_GETTER(holder, field, name, offset) \
- bool holder::name() const { \
- return BooleanBit::get(field(), offset); \
- } \
-
-
-#define BOOL_ACCESSORS(holder, field, name, offset) \
- bool holder::name() const { \
- return BooleanBit::get(field(), offset); \
- } \
- void holder::set_##name(bool value) { \
- set_##field(BooleanBit::set(field(), offset, value)); \
- }
-
-#define TYPE_CHECKER(type, instancetype) \
- bool HeapObject::Is##type() const { \
- return map()->instance_type() == instancetype; \
- }
-
TYPE_CHECKER(BreakPointInfo, TUPLE2_TYPE)
TYPE_CHECKER(ByteArray, BYTE_ARRAY_TYPE)
TYPE_CHECKER(BytecodeArray, BYTECODE_ARRAY_TYPE)
@@ -148,11 +76,11 @@ TYPE_CHECKER(CallHandlerInfo, TUPLE2_TYPE)
TYPE_CHECKER(Cell, CELL_TYPE)
TYPE_CHECKER(Code, CODE_TYPE)
TYPE_CHECKER(ConstantElementsPair, TUPLE2_TYPE)
+TYPE_CHECKER(CoverageInfo, FIXED_ARRAY_TYPE)
TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE)
TYPE_CHECKER(Foreign, FOREIGN_TYPE)
TYPE_CHECKER(FreeSpace, FREE_SPACE_TYPE)
TYPE_CHECKER(HeapNumber, HEAP_NUMBER_TYPE)
-TYPE_CHECKER(JSArgumentsObject, JS_ARGUMENTS_TYPE)
TYPE_CHECKER(JSArray, JS_ARRAY_TYPE)
TYPE_CHECKER(JSArrayBuffer, JS_ARRAY_BUFFER_TYPE)
TYPE_CHECKER(JSAsyncGeneratorObject, JS_ASYNC_GENERATOR_OBJECT_TYPE)
@@ -164,38 +92,40 @@ TYPE_CHECKER(JSError, JS_ERROR_TYPE)
TYPE_CHECKER(JSFunction, JS_FUNCTION_TYPE)
TYPE_CHECKER(JSGlobalObject, JS_GLOBAL_OBJECT_TYPE)
TYPE_CHECKER(JSMap, JS_MAP_TYPE)
-TYPE_CHECKER(JSMapIterator, JS_MAP_ITERATOR_TYPE)
TYPE_CHECKER(JSMessageObject, JS_MESSAGE_OBJECT_TYPE)
TYPE_CHECKER(JSModuleNamespace, JS_MODULE_NAMESPACE_TYPE)
TYPE_CHECKER(JSPromiseCapability, JS_PROMISE_CAPABILITY_TYPE)
TYPE_CHECKER(JSPromise, JS_PROMISE_TYPE)
TYPE_CHECKER(JSRegExp, JS_REGEXP_TYPE)
TYPE_CHECKER(JSSet, JS_SET_TYPE)
-TYPE_CHECKER(JSSetIterator, JS_SET_ITERATOR_TYPE)
TYPE_CHECKER(JSAsyncFromSyncIterator, JS_ASYNC_FROM_SYNC_ITERATOR_TYPE)
TYPE_CHECKER(JSStringIterator, JS_STRING_ITERATOR_TYPE)
TYPE_CHECKER(JSTypedArray, JS_TYPED_ARRAY_TYPE)
TYPE_CHECKER(JSValue, JS_VALUE_TYPE)
TYPE_CHECKER(JSWeakMap, JS_WEAK_MAP_TYPE)
TYPE_CHECKER(JSWeakSet, JS_WEAK_SET_TYPE)
+TYPE_CHECKER(WasmInstanceObject, WASM_INSTANCE_TYPE)
+TYPE_CHECKER(WasmMemoryObject, WASM_MEMORY_TYPE)
+TYPE_CHECKER(WasmModuleObject, WASM_MODULE_TYPE)
+TYPE_CHECKER(WasmTableObject, WASM_TABLE_TYPE)
TYPE_CHECKER(Map, MAP_TYPE)
TYPE_CHECKER(MutableHeapNumber, MUTABLE_HEAP_NUMBER_TYPE)
TYPE_CHECKER(Oddball, ODDBALL_TYPE)
TYPE_CHECKER(PropertyCell, PROPERTY_CELL_TYPE)
-TYPE_CHECKER(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE)
TYPE_CHECKER(SourcePositionTableWithFrameCache, TUPLE2_TYPE)
-TYPE_CHECKER(Symbol, SYMBOL_TYPE)
TYPE_CHECKER(TransitionArray, TRANSITION_ARRAY_TYPE)
TYPE_CHECKER(TypeFeedbackInfo, TUPLE3_TYPE)
TYPE_CHECKER(WeakCell, WEAK_CELL_TYPE)
TYPE_CHECKER(WeakFixedArray, FIXED_ARRAY_TYPE)
+TYPE_CHECKER(SmallOrderedHashSet, SMALL_ORDERED_HASH_SET_TYPE)
+TYPE_CHECKER(SmallOrderedHashMap, SMALL_ORDERED_HASH_MAP_TYPE)
+TYPE_CHECKER(PropertyArray, PROPERTY_ARRAY_TYPE)
#define TYPED_ARRAY_TYPE_CHECKER(Type, type, TYPE, ctype, size) \
TYPE_CHECKER(Fixed##Type##Array, FIXED_##TYPE##_ARRAY_TYPE)
TYPED_ARRAYS(TYPED_ARRAY_TYPE_CHECKER)
#undef TYPED_ARRAY_TYPE_CHECKER
-#undef TYPE_CHECKER
bool HeapObject::IsFixedArrayBase() const {
return IsFixedArray() || IsFixedDoubleArray() || IsFixedTypedArrayBase();
@@ -220,9 +150,8 @@ bool HeapObject::IsJSGeneratorObject() const {
bool HeapObject::IsBoilerplateDescription() const { return IsFixedArray(); }
-// External objects are not extensible, so the map check is enough.
bool HeapObject::IsExternal() const {
- return map() == GetHeap()->external_map();
+ return map()->FindRootMap() == GetHeap()->external_map();
}
#define IS_TYPE_FUNCTION_DEF(type_) \
@@ -264,12 +193,6 @@ bool HeapObject::IsUniqueName() const {
return IsInternalizedString() || IsSymbol();
}
-bool Name::IsUniqueName() const {
- uint32_t type = map()->instance_type();
- return (type & (kIsNotStringMask | kIsNotInternalizedMask)) !=
- (kStringTag | kNotInternalizedTag);
-}
-
bool HeapObject::IsFunction() const {
STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
return map()->instance_type() >= FIRST_FUNCTION_TYPE;
@@ -364,6 +287,18 @@ bool HeapObject::IsJSObject() const {
bool HeapObject::IsJSProxy() const { return map()->IsJSProxyMap(); }
+bool HeapObject::IsJSMapIterator() const {
+ InstanceType instance_type = map()->instance_type();
+ return (instance_type >= JS_MAP_KEY_ITERATOR_TYPE &&
+ instance_type <= JS_MAP_VALUE_ITERATOR_TYPE);
+}
+
+bool HeapObject::IsJSSetIterator() const {
+ InstanceType instance_type = map()->instance_type();
+ return (instance_type == JS_SET_VALUE_ITERATOR_TYPE ||
+ instance_type == JS_SET_KEY_VALUE_ITERATOR_TYPE);
+}
+
bool HeapObject::IsJSArrayIterator() const {
InstanceType instance_type = map()->instance_type();
return (instance_type >= FIRST_ARRAY_ITERATOR_TYPE &&
@@ -384,9 +319,7 @@ bool HeapObject::IsArrayList() const { return IsFixedArray(); }
bool HeapObject::IsRegExpMatchInfo() const { return IsFixedArray(); }
-bool Object::IsLayoutDescriptor() const {
- return IsSmi() || IsFixedTypedArrayBase();
-}
+bool Object::IsLayoutDescriptor() const { return IsSmi() || IsByteArray(); }
bool HeapObject::IsFeedbackVector() const {
return map() == GetHeap()->feedback_vector_map();
@@ -409,15 +342,6 @@ bool HeapObject::IsDeoptimizationInputData() const {
return length >= 0 && length % DeoptimizationInputData::kDeoptEntrySize == 0;
}
-bool HeapObject::IsDeoptimizationOutputData() const {
- if (!IsFixedArray()) return false;
- // There's actually no way to see the difference between a fixed array and
- // a deoptimization data array. Since this is used for asserts we can check
- // that the length is plausible though.
- if (FixedArray::cast(this)->length() % 2 != 0) return false;
- return true;
-}
-
bool HeapObject::IsHandlerTable() const {
if (!IsFixedArray()) return false;
// There's actually no way to see the difference between a fixed array and
@@ -540,10 +464,23 @@ bool Object::IsOrderedHashSet() const { return IsOrderedHashTable(); }
bool Object::IsOrderedHashMap() const { return IsOrderedHashTable(); }
+bool Object::IsSmallOrderedHashTable() const {
+ return IsSmallOrderedHashSet() || IsSmallOrderedHashMap();
+}
+
bool Object::IsPrimitive() const {
return IsSmi() || HeapObject::cast(this)->map()->IsPrimitiveMap();
}
+// static
+Maybe<bool> Object::IsArray(Handle<Object> object) {
+ if (object->IsSmi()) return Just(false);
+ Handle<HeapObject> heap_object = Handle<HeapObject>::cast(object);
+ if (heap_object->IsJSArray()) return Just(true);
+ if (!heap_object->IsJSProxy()) return Just(false);
+ return JSProxy::IsArray(Handle<JSProxy>::cast(object));
+}
+
bool HeapObject::IsJSGlobalProxy() const {
bool result = map()->instance_type() == JS_GLOBAL_PROXY_TYPE;
DCHECK(!result || map()->is_access_check_needed());
@@ -603,41 +540,46 @@ bool Object::IsMinusZero() const {
// Cast operations
CAST_ACCESSOR(AbstractCode)
+CAST_ACCESSOR(AccessCheckInfo)
+CAST_ACCESSOR(AccessorInfo)
+CAST_ACCESSOR(AccessorPair)
+CAST_ACCESSOR(AllocationMemento)
+CAST_ACCESSOR(AllocationSite)
CAST_ACCESSOR(ArrayList)
+CAST_ACCESSOR(AsyncGeneratorRequest)
CAST_ACCESSOR(BoilerplateDescription)
-CAST_ACCESSOR(BreakPointInfo)
CAST_ACCESSOR(ByteArray)
CAST_ACCESSOR(BytecodeArray)
CAST_ACCESSOR(CallHandlerInfo)
CAST_ACCESSOR(Cell)
CAST_ACCESSOR(Code)
-CAST_ACCESSOR(ConsString)
CAST_ACCESSOR(ConstantElementsPair)
+CAST_ACCESSOR(ContextExtension)
CAST_ACCESSOR(DeoptimizationInputData)
-CAST_ACCESSOR(DeoptimizationOutputData)
CAST_ACCESSOR(DependentCode)
CAST_ACCESSOR(DescriptorArray)
-CAST_ACCESSOR(ExternalOneByteString)
-CAST_ACCESSOR(ExternalString)
-CAST_ACCESSOR(ExternalTwoByteString)
CAST_ACCESSOR(FixedArray)
CAST_ACCESSOR(FixedArrayBase)
CAST_ACCESSOR(FixedDoubleArray)
CAST_ACCESSOR(FixedTypedArrayBase)
+CAST_ACCESSOR(PropertyArray)
CAST_ACCESSOR(Foreign)
+CAST_ACCESSOR(FunctionTemplateInfo)
CAST_ACCESSOR(GlobalDictionary)
CAST_ACCESSOR(HandlerTable)
CAST_ACCESSOR(HeapObject)
-CAST_ACCESSOR(JSArgumentsObject);
+CAST_ACCESSOR(InterceptorInfo)
CAST_ACCESSOR(JSArray)
CAST_ACCESSOR(JSArrayBuffer)
CAST_ACCESSOR(JSArrayBufferView)
+CAST_ACCESSOR(JSArrayIterator)
+CAST_ACCESSOR(JSAsyncFromSyncIterator)
+CAST_ACCESSOR(JSAsyncGeneratorObject)
CAST_ACCESSOR(JSBoundFunction)
CAST_ACCESSOR(JSDataView)
CAST_ACCESSOR(JSDate)
CAST_ACCESSOR(JSFunction)
CAST_ACCESSOR(JSGeneratorObject)
-CAST_ACCESSOR(JSAsyncGeneratorObject)
CAST_ACCESSOR(JSGlobalObject)
CAST_ACCESSOR(JSGlobalProxy)
CAST_ACCESSOR(JSMap)
@@ -645,65 +587,57 @@ CAST_ACCESSOR(JSMapIterator)
CAST_ACCESSOR(JSMessageObject)
CAST_ACCESSOR(JSModuleNamespace)
CAST_ACCESSOR(JSObject)
+CAST_ACCESSOR(JSPromise)
+CAST_ACCESSOR(JSPromiseCapability)
CAST_ACCESSOR(JSProxy)
CAST_ACCESSOR(JSReceiver)
CAST_ACCESSOR(JSRegExp)
-CAST_ACCESSOR(JSPromiseCapability)
-CAST_ACCESSOR(JSPromise)
CAST_ACCESSOR(JSSet)
CAST_ACCESSOR(JSSetIterator)
-CAST_ACCESSOR(JSSloppyArgumentsObject)
-CAST_ACCESSOR(JSAsyncFromSyncIterator)
CAST_ACCESSOR(JSStringIterator)
-CAST_ACCESSOR(JSArrayIterator)
CAST_ACCESSOR(JSTypedArray)
CAST_ACCESSOR(JSValue)
CAST_ACCESSOR(JSWeakCollection)
CAST_ACCESSOR(JSWeakMap)
CAST_ACCESSOR(JSWeakSet)
CAST_ACCESSOR(LayoutDescriptor)
+CAST_ACCESSOR(Module)
CAST_ACCESSOR(ModuleInfo)
-CAST_ACCESSOR(Name)
+CAST_ACCESSOR(ModuleInfoEntry)
CAST_ACCESSOR(NameDictionary)
CAST_ACCESSOR(NormalizedMapCache)
CAST_ACCESSOR(Object)
-CAST_ACCESSOR(ObjectHashTable)
CAST_ACCESSOR(ObjectHashSet)
+CAST_ACCESSOR(ObjectHashTable)
+CAST_ACCESSOR(ObjectTemplateInfo)
CAST_ACCESSOR(Oddball)
CAST_ACCESSOR(OrderedHashMap)
CAST_ACCESSOR(OrderedHashSet)
+CAST_ACCESSOR(PromiseReactionJobInfo)
+CAST_ACCESSOR(PromiseResolveThenableJobInfo)
CAST_ACCESSOR(PropertyCell)
-CAST_ACCESSOR(TemplateList)
+CAST_ACCESSOR(PrototypeInfo)
CAST_ACCESSOR(RegExpMatchInfo)
CAST_ACCESSOR(ScopeInfo)
CAST_ACCESSOR(SeededNumberDictionary)
-CAST_ACCESSOR(SeqOneByteString)
-CAST_ACCESSOR(SeqString)
-CAST_ACCESSOR(SeqTwoByteString)
-CAST_ACCESSOR(SharedFunctionInfo)
-CAST_ACCESSOR(SourcePositionTableWithFrameCache)
-CAST_ACCESSOR(SlicedString)
-CAST_ACCESSOR(SloppyArgumentsElements)
CAST_ACCESSOR(Smi)
-CAST_ACCESSOR(String)
+CAST_ACCESSOR(SourcePositionTableWithFrameCache)
+CAST_ACCESSOR(StackFrameInfo)
CAST_ACCESSOR(StringSet)
CAST_ACCESSOR(StringTable)
CAST_ACCESSOR(Struct)
-CAST_ACCESSOR(Symbol)
CAST_ACCESSOR(TemplateInfo)
-CAST_ACCESSOR(ThinString)
+CAST_ACCESSOR(TemplateList)
+CAST_ACCESSOR(Tuple2)
+CAST_ACCESSOR(Tuple3)
CAST_ACCESSOR(TypeFeedbackInfo)
CAST_ACCESSOR(UnseededNumberDictionary)
CAST_ACCESSOR(WeakCell)
+CAST_ACCESSOR(SmallOrderedHashMap)
+CAST_ACCESSOR(SmallOrderedHashSet)
CAST_ACCESSOR(WeakFixedArray)
CAST_ACCESSOR(WeakHashTable)
-#define MAKE_STRUCT_CAST(NAME, Name, name) CAST_ACCESSOR(Name)
-STRUCT_LIST(MAKE_STRUCT_CAST)
-#undef MAKE_STRUCT_CAST
-
-#undef CAST_ACCESSOR
-
bool Object::HasValidElements() {
// Dictionary is covered under FixedArray.
return IsFixedArray() || IsFixedDoubleArray() || IsFixedTypedArrayBase();
@@ -727,6 +661,7 @@ bool Object::KeyEquals(Object* second) {
}
bool Object::FilterKey(PropertyFilter filter) {
+ DCHECK(!IsPropertyCell());
if (IsSymbol()) {
if (filter & SKIP_SYMBOLS) return true;
if (Symbol::cast(this)->is_private()) return true;
@@ -761,295 +696,6 @@ Handle<Object> Object::WrapForRead(Isolate* isolate, Handle<Object> object,
return isolate->factory()->NewHeapNumber(HeapNumber::cast(*object)->value());
}
-StringShape::StringShape(const String* str)
- : type_(str->map()->instance_type()) {
- set_valid();
- DCHECK((type_ & kIsNotStringMask) == kStringTag);
-}
-
-StringShape::StringShape(Map* map) : type_(map->instance_type()) {
- set_valid();
- DCHECK((type_ & kIsNotStringMask) == kStringTag);
-}
-
-StringShape::StringShape(InstanceType t) : type_(static_cast<uint32_t>(t)) {
- set_valid();
- DCHECK((type_ & kIsNotStringMask) == kStringTag);
-}
-
-bool StringShape::IsInternalized() {
- DCHECK(valid());
- STATIC_ASSERT(kNotInternalizedTag != 0);
- return (type_ & (kIsNotStringMask | kIsNotInternalizedMask)) ==
- (kStringTag | kInternalizedTag);
-}
-
-bool String::IsOneByteRepresentation() const {
- uint32_t type = map()->instance_type();
- return (type & kStringEncodingMask) == kOneByteStringTag;
-}
-
-bool String::IsTwoByteRepresentation() const {
- uint32_t type = map()->instance_type();
- return (type & kStringEncodingMask) == kTwoByteStringTag;
-}
-
-bool String::IsOneByteRepresentationUnderneath() {
- uint32_t type = map()->instance_type();
- STATIC_ASSERT(kIsIndirectStringTag != 0);
- STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0);
- DCHECK(IsFlat());
- switch (type & (kIsIndirectStringMask | kStringEncodingMask)) {
- case kOneByteStringTag:
- return true;
- case kTwoByteStringTag:
- return false;
- default: // Cons or sliced string. Need to go deeper.
- return GetUnderlying()->IsOneByteRepresentation();
- }
-}
-
-bool String::IsTwoByteRepresentationUnderneath() {
- uint32_t type = map()->instance_type();
- STATIC_ASSERT(kIsIndirectStringTag != 0);
- STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0);
- DCHECK(IsFlat());
- switch (type & (kIsIndirectStringMask | kStringEncodingMask)) {
- case kOneByteStringTag:
- return false;
- case kTwoByteStringTag:
- return true;
- default: // Cons or sliced string. Need to go deeper.
- return GetUnderlying()->IsTwoByteRepresentation();
- }
-}
-
-bool String::HasOnlyOneByteChars() {
- uint32_t type = map()->instance_type();
- return (type & kOneByteDataHintMask) == kOneByteDataHintTag ||
- IsOneByteRepresentation();
-}
-
-bool StringShape::HasOnlyOneByteChars() {
- return (type_ & kStringEncodingMask) == kOneByteStringTag ||
- (type_ & kOneByteDataHintMask) == kOneByteDataHintTag;
-}
-
-bool StringShape::IsCons() {
- return (type_ & kStringRepresentationMask) == kConsStringTag;
-}
-
-bool StringShape::IsThin() {
- return (type_ & kStringRepresentationMask) == kThinStringTag;
-}
-
-bool StringShape::IsSliced() {
- return (type_ & kStringRepresentationMask) == kSlicedStringTag;
-}
-
-bool StringShape::IsIndirect() {
- return (type_ & kIsIndirectStringMask) == kIsIndirectStringTag;
-}
-
-bool StringShape::IsExternal() {
- return (type_ & kStringRepresentationMask) == kExternalStringTag;
-}
-
-bool StringShape::IsSequential() {
- return (type_ & kStringRepresentationMask) == kSeqStringTag;
-}
-
-StringRepresentationTag StringShape::representation_tag() {
- uint32_t tag = (type_ & kStringRepresentationMask);
- return static_cast<StringRepresentationTag>(tag);
-}
-
-uint32_t StringShape::encoding_tag() { return type_ & kStringEncodingMask; }
-
-uint32_t StringShape::full_representation_tag() {
- return (type_ & (kStringRepresentationMask | kStringEncodingMask));
-}
-
-STATIC_ASSERT((kStringRepresentationMask | kStringEncodingMask) ==
- Internals::kFullStringRepresentationMask);
-
-STATIC_ASSERT(static_cast<uint32_t>(kStringEncodingMask) ==
- Internals::kStringEncodingMask);
-
-bool StringShape::IsSequentialOneByte() {
- return full_representation_tag() == (kSeqStringTag | kOneByteStringTag);
-}
-
-bool StringShape::IsSequentialTwoByte() {
- return full_representation_tag() == (kSeqStringTag | kTwoByteStringTag);
-}
-
-bool StringShape::IsExternalOneByte() {
- return full_representation_tag() == (kExternalStringTag | kOneByteStringTag);
-}
-
-STATIC_ASSERT((kExternalStringTag | kOneByteStringTag) ==
- Internals::kExternalOneByteRepresentationTag);
-
-STATIC_ASSERT(v8::String::ONE_BYTE_ENCODING == kOneByteStringTag);
-
-bool StringShape::IsExternalTwoByte() {
- return full_representation_tag() == (kExternalStringTag | kTwoByteStringTag);
-}
-
-STATIC_ASSERT((kExternalStringTag | kTwoByteStringTag) ==
- Internals::kExternalTwoByteRepresentationTag);
-
-STATIC_ASSERT(v8::String::TWO_BYTE_ENCODING == kTwoByteStringTag);
-
-uc32 FlatStringReader::Get(int index) {
- if (is_one_byte_) {
- return Get<uint8_t>(index);
- } else {
- return Get<uc16>(index);
- }
-}
-
-template <typename Char>
-Char FlatStringReader::Get(int index) {
- DCHECK_EQ(is_one_byte_, sizeof(Char) == 1);
- DCHECK(0 <= index && index <= length_);
- if (sizeof(Char) == 1) {
- return static_cast<Char>(static_cast<const uint8_t*>(start_)[index]);
- } else {
- return static_cast<Char>(static_cast<const uc16*>(start_)[index]);
- }
-}
-
-Handle<Object> StringTableShape::AsHandle(Isolate* isolate, HashTableKey* key) {
- return key->AsHandle(isolate);
-}
-
-template <typename Char>
-class SequentialStringKey : public HashTableKey {
- public:
- explicit SequentialStringKey(Vector<const Char> string, uint32_t seed)
- : string_(string), hash_field_(0), seed_(seed) {}
-
- uint32_t Hash() override {
- hash_field_ = StringHasher::HashSequentialString<Char>(
- string_.start(), string_.length(), seed_);
-
- uint32_t result = hash_field_ >> String::kHashShift;
- DCHECK(result != 0); // Ensure that the hash value of 0 is never computed.
- return result;
- }
-
- uint32_t HashForObject(Object* other) override {
- return String::cast(other)->Hash();
- }
-
- Vector<const Char> string_;
- uint32_t hash_field_;
- uint32_t seed_;
-};
-
-class OneByteStringKey : public SequentialStringKey<uint8_t> {
- public:
- OneByteStringKey(Vector<const uint8_t> str, uint32_t seed)
- : SequentialStringKey<uint8_t>(str, seed) {}
-
- bool IsMatch(Object* string) override {
- return String::cast(string)->IsOneByteEqualTo(string_);
- }
-
- Handle<Object> AsHandle(Isolate* isolate) override;
-};
-
-class SeqOneByteSubStringKey : public HashTableKey {
- public:
- SeqOneByteSubStringKey(Handle<SeqOneByteString> string, int from, int length)
- : string_(string), from_(from), length_(length) {
- DCHECK(string_->IsSeqOneByteString());
- }
-
-// VS 2017 on official builds gives this spurious warning:
-// warning C4789: buffer 'key' of size 16 bytes will be overrun; 4 bytes will
-// be written starting at offset 16
-// https://bugs.chromium.org/p/v8/issues/detail?id=6068
-#if defined(V8_CC_MSVC)
-#pragma warning(push)
-#pragma warning(disable : 4789)
-#endif
- uint32_t Hash() override {
- DCHECK(length_ >= 0);
- DCHECK(from_ + length_ <= string_->length());
- const uint8_t* chars = string_->GetChars() + from_;
- hash_field_ = StringHasher::HashSequentialString(
- chars, length_, string_->GetHeap()->HashSeed());
- uint32_t result = hash_field_ >> String::kHashShift;
- DCHECK(result != 0); // Ensure that the hash value of 0 is never computed.
- return result;
- }
-#if defined(V8_CC_MSVC)
-#pragma warning(pop)
-#endif
-
- uint32_t HashForObject(Object* other) override {
- return String::cast(other)->Hash();
- }
-
- bool IsMatch(Object* string) override;
- Handle<Object> AsHandle(Isolate* isolate) override;
-
- private:
- Handle<SeqOneByteString> string_;
- int from_;
- int length_;
- uint32_t hash_field_;
-};
-
-class TwoByteStringKey : public SequentialStringKey<uc16> {
- public:
- explicit TwoByteStringKey(Vector<const uc16> str, uint32_t seed)
- : SequentialStringKey<uc16>(str, seed) {}
-
- bool IsMatch(Object* string) override {
- return String::cast(string)->IsTwoByteEqualTo(string_);
- }
-
- Handle<Object> AsHandle(Isolate* isolate) override;
-};
-
-// Utf8StringKey carries a vector of chars as key.
-class Utf8StringKey : public HashTableKey {
- public:
- explicit Utf8StringKey(Vector<const char> string, uint32_t seed)
- : string_(string), hash_field_(0), seed_(seed) {}
-
- bool IsMatch(Object* string) override {
- return String::cast(string)->IsUtf8EqualTo(string_);
- }
-
- uint32_t Hash() override {
- if (hash_field_ != 0) return hash_field_ >> String::kHashShift;
- hash_field_ = StringHasher::ComputeUtf8Hash(string_, seed_, &chars_);
- uint32_t result = hash_field_ >> String::kHashShift;
- DCHECK(result != 0); // Ensure that the hash value of 0 is never computed.
- return result;
- }
-
- uint32_t HashForObject(Object* other) override {
- return String::cast(other)->Hash();
- }
-
- Handle<Object> AsHandle(Isolate* isolate) override {
- if (hash_field_ == 0) Hash();
- return isolate->factory()->NewInternalizedStringFromUtf8(string_, chars_,
- hash_field_);
- }
-
- Vector<const char> string_;
- uint32_t hash_field_;
- int chars_; // Caches the number of characters when computing the hash code.
- uint32_t seed_;
-};
-
Representation Object::OptimalRepresentation() {
if (!FLAG_track_fields) return Representation::Tagged();
if (IsSmi()) {
@@ -1069,9 +715,9 @@ Representation Object::OptimalRepresentation() {
ElementsKind Object::OptimalElementsKind() {
- if (IsSmi()) return FAST_SMI_ELEMENTS;
- if (IsNumber()) return FAST_DOUBLE_ELEMENTS;
- return FAST_ELEMENTS;
+ if (IsSmi()) return PACKED_SMI_ELEMENTS;
+ if (IsNumber()) return PACKED_DOUBLE_ELEMENTS;
+ return PACKED_ELEMENTS;
}
@@ -1088,9 +734,9 @@ bool Object::FitsRepresentation(Representation representation) {
return true;
}
-bool Object::ToUint32(uint32_t* value) {
+bool Object::ToUint32(uint32_t* value) const {
if (IsSmi()) {
- int num = Smi::cast(this)->value();
+ int num = Smi::ToInt(this);
if (num < 0) return false;
*value = static_cast<uint32_t>(num);
return true;
@@ -1164,7 +810,7 @@ MaybeHandle<String> Object::ToString(Isolate* isolate, Handle<Object> input) {
// static
MaybeHandle<Object> Object::ToLength(Isolate* isolate, Handle<Object> input) {
if (input->IsSmi()) {
- int value = std::max(Smi::cast(*input)->value(), 0);
+ int value = std::max(Smi::ToInt(*input), 0);
return handle(Smi::FromInt(value), isolate);
}
return ConvertToLength(isolate, input);
@@ -1173,7 +819,7 @@ MaybeHandle<Object> Object::ToLength(Isolate* isolate, Handle<Object> input) {
// static
MaybeHandle<Object> Object::ToIndex(Isolate* isolate, Handle<Object> input,
MessageTemplate::Template error_index) {
- if (input->IsSmi() && Smi::cast(*input)->value() >= 0) return input;
+ if (input->IsSmi() && Smi::ToInt(*input) >= 0) return input;
return ConvertToIndex(isolate, input, error_index);
}
@@ -1268,163 +914,17 @@ bool JSObject::PrototypeHasNoElements(Isolate* isolate, JSObject* object) {
return true;
}
-#define FIELD_ADDR(p, offset) \
- (reinterpret_cast<byte*>(p) + offset - kHeapObjectTag)
-
-#define FIELD_ADDR_CONST(p, offset) \
- (reinterpret_cast<const byte*>(p) + offset - kHeapObjectTag)
-
-#define READ_FIELD(p, offset) \
- (*reinterpret_cast<Object* const*>(FIELD_ADDR_CONST(p, offset)))
-
-#define ACQUIRE_READ_FIELD(p, offset) \
- reinterpret_cast<Object*>(base::Acquire_Load( \
- reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR_CONST(p, offset))))
-
-#define NOBARRIER_READ_FIELD(p, offset) \
- reinterpret_cast<Object*>(base::NoBarrier_Load( \
- reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR_CONST(p, offset))))
-
-#ifdef V8_CONCURRENT_MARKING
-#define WRITE_FIELD(p, offset, value) \
- base::NoBarrier_Store( \
- reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
- reinterpret_cast<base::AtomicWord>(value));
-#else
-#define WRITE_FIELD(p, offset, value) \
- (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value)
-#endif
-
-#define RELEASE_WRITE_FIELD(p, offset, value) \
- base::Release_Store( \
- reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
- reinterpret_cast<base::AtomicWord>(value));
-
-#define NOBARRIER_WRITE_FIELD(p, offset, value) \
- base::NoBarrier_Store( \
- reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
- reinterpret_cast<base::AtomicWord>(value));
-
-#define WRITE_BARRIER(heap, object, offset, value) \
- heap->incremental_marking()->RecordWrite( \
- object, HeapObject::RawField(object, offset), value); \
- heap->RecordWrite(object, offset, value);
-
-#define FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(heap, array, start, length) \
- do { \
- heap->RecordFixedArrayElements(array, start, length); \
- heap->incremental_marking()->IterateBlackObject(array); \
- } while (false)
-
-#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \
- if (mode != SKIP_WRITE_BARRIER) { \
- if (mode == UPDATE_WRITE_BARRIER) { \
- heap->incremental_marking()->RecordWrite( \
- object, HeapObject::RawField(object, offset), value); \
- } \
- heap->RecordWrite(object, offset, value); \
- }
-
-#define READ_DOUBLE_FIELD(p, offset) \
- ReadDoubleValue(FIELD_ADDR_CONST(p, offset))
-
-#define WRITE_DOUBLE_FIELD(p, offset, value) \
- WriteDoubleValue(FIELD_ADDR(p, offset), value)
-
-#define READ_INT_FIELD(p, offset) \
- (*reinterpret_cast<const int*>(FIELD_ADDR_CONST(p, offset)))
-
-#define WRITE_INT_FIELD(p, offset, value) \
- (*reinterpret_cast<int*>(FIELD_ADDR(p, offset)) = value)
-
-#define READ_INTPTR_FIELD(p, offset) \
- (*reinterpret_cast<const intptr_t*>(FIELD_ADDR_CONST(p, offset)))
-
-#define WRITE_INTPTR_FIELD(p, offset, value) \
- (*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)) = value)
-
-#define READ_UINT8_FIELD(p, offset) \
- (*reinterpret_cast<const uint8_t*>(FIELD_ADDR_CONST(p, offset)))
-
-#define WRITE_UINT8_FIELD(p, offset, value) \
- (*reinterpret_cast<uint8_t*>(FIELD_ADDR(p, offset)) = value)
-
-#define READ_INT8_FIELD(p, offset) \
- (*reinterpret_cast<const int8_t*>(FIELD_ADDR_CONST(p, offset)))
-
-#define WRITE_INT8_FIELD(p, offset, value) \
- (*reinterpret_cast<int8_t*>(FIELD_ADDR(p, offset)) = value)
-
-#define READ_UINT16_FIELD(p, offset) \
- (*reinterpret_cast<const uint16_t*>(FIELD_ADDR_CONST(p, offset)))
-
-#define WRITE_UINT16_FIELD(p, offset, value) \
- (*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset)) = value)
-
-#define READ_INT16_FIELD(p, offset) \
- (*reinterpret_cast<const int16_t*>(FIELD_ADDR_CONST(p, offset)))
-
-#define WRITE_INT16_FIELD(p, offset, value) \
- (*reinterpret_cast<int16_t*>(FIELD_ADDR(p, offset)) = value)
-
-#define READ_UINT32_FIELD(p, offset) \
- (*reinterpret_cast<const uint32_t*>(FIELD_ADDR_CONST(p, offset)))
-
-#define WRITE_UINT32_FIELD(p, offset, value) \
- (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)) = value)
-
-#define READ_INT32_FIELD(p, offset) \
- (*reinterpret_cast<const int32_t*>(FIELD_ADDR_CONST(p, offset)))
-
-#define WRITE_INT32_FIELD(p, offset, value) \
- (*reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset)) = value)
-
-#define READ_FLOAT_FIELD(p, offset) \
- (*reinterpret_cast<const float*>(FIELD_ADDR_CONST(p, offset)))
-
-#define WRITE_FLOAT_FIELD(p, offset, value) \
- (*reinterpret_cast<float*>(FIELD_ADDR(p, offset)) = value)
-
-#define READ_UINT64_FIELD(p, offset) \
- (*reinterpret_cast<const uint64_t*>(FIELD_ADDR_CONST(p, offset)))
-
-#define WRITE_UINT64_FIELD(p, offset, value) \
- (*reinterpret_cast<uint64_t*>(FIELD_ADDR(p, offset)) = value)
-
-#define READ_INT64_FIELD(p, offset) \
- (*reinterpret_cast<const int64_t*>(FIELD_ADDR_CONST(p, offset)))
-
-#define WRITE_INT64_FIELD(p, offset, value) \
- (*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset)) = value)
-
-#define READ_BYTE_FIELD(p, offset) \
- (*reinterpret_cast<const byte*>(FIELD_ADDR_CONST(p, offset)))
-
-#define NOBARRIER_READ_BYTE_FIELD(p, offset) \
- static_cast<byte>(base::NoBarrier_Load( \
- reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset))))
-
-#define WRITE_BYTE_FIELD(p, offset, value) \
- (*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)) = value)
-
-#define NOBARRIER_WRITE_BYTE_FIELD(p, offset, value) \
- base::NoBarrier_Store( \
- reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)), \
- static_cast<base::Atomic8>(value));
-
Object** HeapObject::RawField(HeapObject* obj, int byte_offset) {
return reinterpret_cast<Object**>(FIELD_ADDR(obj, byte_offset));
}
+int Smi::ToInt(const Object* object) { return Smi::cast(object)->value(); }
MapWord MapWord::FromMap(const Map* map) {
return MapWord(reinterpret_cast<uintptr_t>(map));
}
-
-Map* MapWord::ToMap() {
- return reinterpret_cast<Map*>(value_);
-}
+Map* MapWord::ToMap() const { return reinterpret_cast<Map*>(value_); }
bool MapWord::IsForwardingAddress() const {
return HAS_SMI_TAG(reinterpret_cast<Object*>(value_));
@@ -1486,8 +986,7 @@ void HeapObject::set_map(Map* value) {
}
}
-
-Map* HeapObject::synchronized_map() {
+Map* HeapObject::synchronized_map() const {
return synchronized_map_word().ToMap();
}
@@ -1533,13 +1032,13 @@ HeapObject** HeapObject::map_slot() {
MapWord HeapObject::map_word() const {
return MapWord(
- reinterpret_cast<uintptr_t>(NOBARRIER_READ_FIELD(this, kMapOffset)));
+ reinterpret_cast<uintptr_t>(RELAXED_READ_FIELD(this, kMapOffset)));
}
void HeapObject::set_map_word(MapWord map_word) {
- NOBARRIER_WRITE_FIELD(
- this, kMapOffset, reinterpret_cast<Object*>(map_word.value_));
+ RELAXED_WRITE_FIELD(this, kMapOffset,
+ reinterpret_cast<Object*>(map_word.value_));
}
@@ -1554,11 +1053,7 @@ void HeapObject::synchronized_set_map_word(MapWord map_word) {
this, kMapOffset, reinterpret_cast<Object*>(map_word.value_));
}
-
-int HeapObject::Size() {
- return SizeFromMap(map());
-}
-
+int HeapObject::Size() const { return SizeFromMap(map()); }
double HeapNumber::value() const {
return READ_DOUBLE_FIELD(this, kValueOffset);
@@ -1587,8 +1082,12 @@ int HeapNumber::get_sign() {
return READ_INT_FIELD(this, kExponentOffset) & kSignMask;
}
-ACCESSORS(JSReceiver, properties, FixedArray, kPropertiesOffset)
+inline Object* OrderedHashMap::ValueAt(int entry) {
+ DCHECK_LT(entry, this->UsedCapacity());
+ return get(EntryToIndex(entry) + kValueOffset);
+}
+ACCESSORS(JSReceiver, raw_properties_or_hash, Object, kPropertiesOrHashOffset)
Object** FixedArray::GetFirstElementAddress() {
return reinterpret_cast<Object**>(FIELD_ADDR(this, OffsetOfElementAt(0)));
@@ -1611,32 +1110,8 @@ FixedArrayBase* JSObject::elements() const {
return static_cast<FixedArrayBase*>(array);
}
-Context* SloppyArgumentsElements::context() {
- return Context::cast(get(kContextIndex));
-}
-
-FixedArray* SloppyArgumentsElements::arguments() {
- return FixedArray::cast(get(kArgumentsIndex));
-}
-
-void SloppyArgumentsElements::set_arguments(FixedArray* arguments) {
- set(kArgumentsIndex, arguments);
-}
-
-uint32_t SloppyArgumentsElements::parameter_map_length() {
- return length() - kParameterMapStart;
-}
-
-Object* SloppyArgumentsElements::get_mapped_entry(uint32_t entry) {
- return get(entry + kParameterMapStart);
-}
-
-void SloppyArgumentsElements::set_mapped_entry(uint32_t entry, Object* object) {
- set(entry + kParameterMapStart, object);
-}
-
void AllocationSite::Initialize() {
- set_transition_info(Smi::kZero);
+ set_transition_info_or_boilerplate(Smi::kZero);
SetElementsKind(GetInitialFastElementsKind());
set_nested_site(Smi::kZero);
set_pretenure_data(0);
@@ -1645,16 +1120,15 @@ void AllocationSite::Initialize() {
SKIP_WRITE_BARRIER);
}
+bool AllocationSite::IsZombie() const {
+ return pretenure_decision() == kZombie;
+}
-bool AllocationSite::IsZombie() { return pretenure_decision() == kZombie; }
-
-
-bool AllocationSite::IsMaybeTenure() {
+bool AllocationSite::IsMaybeTenure() const {
return pretenure_decision() == kMaybeTenure;
}
-
-bool AllocationSite::PretenuringDecisionMade() {
+bool AllocationSite::PretenuringDecisionMade() const {
return pretenure_decision() != kUndecided;
}
@@ -1665,98 +1139,69 @@ void AllocationSite::MarkZombie() {
set_pretenure_decision(kZombie);
}
-
-ElementsKind AllocationSite::GetElementsKind() {
- DCHECK(!SitePointsToLiteral());
- int value = Smi::cast(transition_info())->value();
- return ElementsKindBits::decode(value);
+ElementsKind AllocationSite::GetElementsKind() const {
+ return ElementsKindBits::decode(transition_info());
}
void AllocationSite::SetElementsKind(ElementsKind kind) {
- int value = Smi::cast(transition_info())->value();
- set_transition_info(Smi::FromInt(ElementsKindBits::update(value, kind)),
- SKIP_WRITE_BARRIER);
+ set_transition_info(ElementsKindBits::update(transition_info(), kind));
}
-
-bool AllocationSite::CanInlineCall() {
- int value = Smi::cast(transition_info())->value();
- return DoNotInlineBit::decode(value) == 0;
+bool AllocationSite::CanInlineCall() const {
+ return DoNotInlineBit::decode(transition_info()) == 0;
}
void AllocationSite::SetDoNotInlineCall() {
- int value = Smi::cast(transition_info())->value();
- set_transition_info(Smi::FromInt(DoNotInlineBit::update(value, true)),
- SKIP_WRITE_BARRIER);
+ set_transition_info(DoNotInlineBit::update(transition_info(), true));
}
-
-bool AllocationSite::SitePointsToLiteral() {
- // If transition_info is a smi, then it represents an ElementsKind
- // for a constructed array. Otherwise, it must be a boilerplate
- // for an object or array literal.
- return transition_info()->IsJSArray() || transition_info()->IsJSObject();
+bool AllocationSite::PointsToLiteral() const {
+ Object* raw_value = transition_info_or_boilerplate();
+ DCHECK_EQ(!raw_value->IsSmi(),
+ raw_value->IsJSArray() || raw_value->IsJSObject());
+ return !raw_value->IsSmi();
}
// Heuristic: We only need to create allocation site info if the boilerplate
// elements kind is the initial elements kind.
-AllocationSiteMode AllocationSite::GetMode(
- ElementsKind boilerplate_elements_kind) {
- if (IsFastSmiElementsKind(boilerplate_elements_kind)) {
- return TRACK_ALLOCATION_SITE;
- }
-
- return DONT_TRACK_ALLOCATION_SITE;
+bool AllocationSite::ShouldTrack(ElementsKind boilerplate_elements_kind) {
+ return IsSmiElementsKind(boilerplate_elements_kind);
}
inline bool AllocationSite::CanTrack(InstanceType type) {
- if (FLAG_turbo) {
+ if (FLAG_allocation_site_pretenuring) {
// TurboFan doesn't care at all about String pretenuring feedback,
// so don't bother even trying to track that.
return type == JS_ARRAY_TYPE || type == JS_OBJECT_TYPE;
}
- if (FLAG_allocation_site_pretenuring) {
- return type == JS_ARRAY_TYPE ||
- type == JS_OBJECT_TYPE ||
- type < FIRST_NONSTRING_TYPE;
- }
return type == JS_ARRAY_TYPE;
}
-
-AllocationSite::PretenureDecision AllocationSite::pretenure_decision() {
- int value = pretenure_data();
- return PretenureDecisionBits::decode(value);
+AllocationSite::PretenureDecision AllocationSite::pretenure_decision() const {
+ return PretenureDecisionBits::decode(pretenure_data());
}
-
void AllocationSite::set_pretenure_decision(PretenureDecision decision) {
int value = pretenure_data();
set_pretenure_data(PretenureDecisionBits::update(value, decision));
}
-
-bool AllocationSite::deopt_dependent_code() {
- int value = pretenure_data();
- return DeoptDependentCodeBit::decode(value);
+bool AllocationSite::deopt_dependent_code() const {
+ return DeoptDependentCodeBit::decode(pretenure_data());
}
-
void AllocationSite::set_deopt_dependent_code(bool deopt) {
int value = pretenure_data();
set_pretenure_data(DeoptDependentCodeBit::update(value, deopt));
}
-
-int AllocationSite::memento_found_count() {
- int value = pretenure_data();
- return MementoFoundCountBits::decode(value);
+int AllocationSite::memento_found_count() const {
+ return MementoFoundCountBits::decode(pretenure_data());
}
-
inline void AllocationSite::set_memento_found_count(int count) {
int value = pretenure_data();
// Verify that we can count more mementos than we can possibly find in one
@@ -1768,15 +1213,14 @@ inline void AllocationSite::set_memento_found_count(int count) {
set_pretenure_data(MementoFoundCountBits::update(value, count));
}
-
-int AllocationSite::memento_create_count() { return pretenure_create_count(); }
-
+int AllocationSite::memento_create_count() const {
+ return pretenure_create_count();
+}
void AllocationSite::set_memento_create_count(int count) {
set_pretenure_create_count(count);
}
-
bool AllocationSite::IncrementMementoFoundCount(int increment) {
if (IsZombie()) return false;
@@ -1792,88 +1236,28 @@ inline void AllocationSite::IncrementMementoCreateCount() {
set_memento_create_count(value + 1);
}
-
-inline bool AllocationSite::MakePretenureDecision(
- PretenureDecision current_decision,
- double ratio,
- bool maximum_size_scavenge) {
- // Here we just allow state transitions from undecided or maybe tenure
- // to don't tenure, maybe tenure, or tenure.
- if ((current_decision == kUndecided || current_decision == kMaybeTenure)) {
- if (ratio >= kPretenureRatio) {
- // We just transition into tenure state when the semi-space was at
- // maximum capacity.
- if (maximum_size_scavenge) {
- set_deopt_dependent_code(true);
- set_pretenure_decision(kTenure);
- // Currently we just need to deopt when we make a state transition to
- // tenure.
- return true;
- }
- set_pretenure_decision(kMaybeTenure);
- } else {
- set_pretenure_decision(kDontTenure);
- }
- }
- return false;
-}
-
-
-inline bool AllocationSite::DigestPretenuringFeedback(
- bool maximum_size_scavenge) {
- bool deopt = false;
- int create_count = memento_create_count();
- int found_count = memento_found_count();
- bool minimum_mementos_created = create_count >= kPretenureMinimumCreated;
- double ratio =
- minimum_mementos_created || FLAG_trace_pretenuring_statistics ?
- static_cast<double>(found_count) / create_count : 0.0;
- PretenureDecision current_decision = pretenure_decision();
-
- if (minimum_mementos_created) {
- deopt = MakePretenureDecision(
- current_decision, ratio, maximum_size_scavenge);
- }
-
- if (FLAG_trace_pretenuring_statistics) {
- PrintIsolate(GetIsolate(),
- "pretenuring: AllocationSite(%p): (created, found, ratio) "
- "(%d, %d, %f) %s => %s\n",
- static_cast<void*>(this), create_count, found_count, ratio,
- PretenureDecisionName(current_decision),
- PretenureDecisionName(pretenure_decision()));
- }
-
- // Clear feedback calculation fields until the next gc.
- set_memento_found_count(0);
- set_memento_create_count(0);
- return deopt;
-}
-
-
-bool AllocationMemento::IsValid() {
+bool AllocationMemento::IsValid() const {
return allocation_site()->IsAllocationSite() &&
!AllocationSite::cast(allocation_site())->IsZombie();
}
-
-AllocationSite* AllocationMemento::GetAllocationSite() {
+AllocationSite* AllocationMemento::GetAllocationSite() const {
DCHECK(IsValid());
return AllocationSite::cast(allocation_site());
}
-Address AllocationMemento::GetAllocationSiteUnchecked() {
+Address AllocationMemento::GetAllocationSiteUnchecked() const {
return reinterpret_cast<Address>(allocation_site());
}
void JSObject::EnsureCanContainHeapObjectElements(Handle<JSObject> object) {
- JSObject::ValidateElements(object);
+ JSObject::ValidateElements(*object);
ElementsKind elements_kind = object->map()->elements_kind();
- if (!IsFastObjectElementsKind(elements_kind)) {
- if (IsFastHoleyElementsKind(elements_kind)) {
- TransitionElementsKind(object, FAST_HOLEY_ELEMENTS);
+ if (!IsObjectElementsKind(elements_kind)) {
+ if (IsHoleyElementsKind(elements_kind)) {
+ TransitionElementsKind(object, HOLEY_ELEMENTS);
} else {
- TransitionElementsKind(object, FAST_ELEMENTS);
+ TransitionElementsKind(object, PACKED_ELEMENTS);
}
}
}
@@ -1888,8 +1272,8 @@ void JSObject::EnsureCanContainElements(Handle<JSObject> object,
{
DisallowHeapAllocation no_allocation;
DCHECK(mode != ALLOW_COPIED_DOUBLE_ELEMENTS);
- bool is_holey = IsFastHoleyElementsKind(current_kind);
- if (current_kind == FAST_HOLEY_ELEMENTS) return;
+ bool is_holey = IsHoleyElementsKind(current_kind);
+ if (current_kind == HOLEY_ELEMENTS) return;
Object* the_hole = object->GetHeap()->the_hole_value();
for (uint32_t i = 0; i < count; ++i) {
Object* current = *objects++;
@@ -1898,18 +1282,18 @@ void JSObject::EnsureCanContainElements(Handle<JSObject> object,
target_kind = GetHoleyElementsKind(target_kind);
} else if (!current->IsSmi()) {
if (mode == ALLOW_CONVERTED_DOUBLE_ELEMENTS && current->IsNumber()) {
- if (IsFastSmiElementsKind(target_kind)) {
+ if (IsSmiElementsKind(target_kind)) {
if (is_holey) {
- target_kind = FAST_HOLEY_DOUBLE_ELEMENTS;
+ target_kind = HOLEY_DOUBLE_ELEMENTS;
} else {
- target_kind = FAST_DOUBLE_ELEMENTS;
+ target_kind = PACKED_DOUBLE_ELEMENTS;
}
}
} else if (is_holey) {
- target_kind = FAST_HOLEY_ELEMENTS;
+ target_kind = HOLEY_ELEMENTS;
break;
} else {
- target_kind = FAST_ELEMENTS;
+ target_kind = PACKED_ELEMENTS;
}
}
}
@@ -1938,18 +1322,18 @@ void JSObject::EnsureCanContainElements(Handle<JSObject> object,
}
DCHECK(mode == ALLOW_COPIED_DOUBLE_ELEMENTS);
- if (object->GetElementsKind() == FAST_HOLEY_SMI_ELEMENTS) {
- TransitionElementsKind(object, FAST_HOLEY_DOUBLE_ELEMENTS);
- } else if (object->GetElementsKind() == FAST_SMI_ELEMENTS) {
+ if (object->GetElementsKind() == HOLEY_SMI_ELEMENTS) {
+ TransitionElementsKind(object, HOLEY_DOUBLE_ELEMENTS);
+ } else if (object->GetElementsKind() == PACKED_SMI_ELEMENTS) {
Handle<FixedDoubleArray> double_array =
Handle<FixedDoubleArray>::cast(elements);
for (uint32_t i = 0; i < length; ++i) {
if (double_array->is_the_hole(i)) {
- TransitionElementsKind(object, FAST_HOLEY_DOUBLE_ELEMENTS);
+ TransitionElementsKind(object, HOLEY_DOUBLE_ELEMENTS);
return;
}
}
- TransitionElementsKind(object, FAST_DOUBLE_ELEMENTS);
+ TransitionElementsKind(object, PACKED_DOUBLE_ELEMENTS);
}
}
@@ -2006,11 +1390,7 @@ ACCESSORS(Oddball, to_string, String, kToStringOffset)
ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
ACCESSORS(Oddball, type_of, String, kTypeOfOffset)
-
-byte Oddball::kind() const {
- return Smi::cast(READ_FIELD(this, kKindOffset))->value();
-}
-
+byte Oddball::kind() const { return Smi::ToInt(READ_FIELD(this, kKindOffset)); }
void Oddball::set_kind(byte value) {
WRITE_FIELD(this, kKindOffset, Smi::FromInt(value));
@@ -2025,9 +1405,9 @@ Handle<Object> Oddball::ToNumber(Handle<Oddball> input) {
ACCESSORS(Cell, value, Object, kValueOffset)
ACCESSORS(PropertyCell, dependent_code, DependentCode, kDependentCodeOffset)
-ACCESSORS(PropertyCell, property_details_raw, Object, kDetailsOffset)
+ACCESSORS(PropertyCell, name, Name, kNameOffset)
ACCESSORS(PropertyCell, value, Object, kValueOffset)
-
+ACCESSORS(PropertyCell, property_details_raw, Object, kDetailsOffset)
PropertyDetails PropertyCell::property_details() {
return PropertyDetails(Smi::cast(property_details_raw()));
@@ -2057,7 +1437,8 @@ void WeakCell::initialize(HeapObject* val) {
// mark through a weak cell and collect evacuation candidates when we process
// all weak cells.
WriteBarrierMode mode =
- ObjectMarking::IsBlack(this, MarkingState::Internal(this))
+ ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
+ this, MarkingState::Internal(this))
? UPDATE_WRITE_BARRIER
: UPDATE_WEAK_WRITE_BARRIER;
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kValueOffset, val, mode);
@@ -2083,87 +1464,20 @@ void WeakCell::clear_next(Object* the_hole_value) {
bool WeakCell::next_cleared() { return next()->IsTheHole(GetIsolate()); }
-int JSObject::GetHeaderSize() { return GetHeaderSize(map()->instance_type()); }
-
-
-int JSObject::GetHeaderSize(InstanceType type) {
+int JSObject::GetHeaderSize() {
// Check for the most common kind of JavaScript object before
// falling into the generic switch. This speeds up the internal
// field operations considerably on average.
- if (type == JS_OBJECT_TYPE) return JSObject::kHeaderSize;
- switch (type) {
- case JS_API_OBJECT_TYPE:
- case JS_SPECIAL_API_OBJECT_TYPE:
- return JSObject::kHeaderSize;
- case JS_GENERATOR_OBJECT_TYPE:
- return JSGeneratorObject::kSize;
- case JS_ASYNC_GENERATOR_OBJECT_TYPE:
- return JSAsyncGeneratorObject::kSize;
- case JS_GLOBAL_PROXY_TYPE:
- return JSGlobalProxy::kSize;
- case JS_GLOBAL_OBJECT_TYPE:
- return JSGlobalObject::kSize;
- case JS_BOUND_FUNCTION_TYPE:
- return JSBoundFunction::kSize;
- case JS_FUNCTION_TYPE:
- return JSFunction::kSize;
- case JS_VALUE_TYPE:
- return JSValue::kSize;
- case JS_DATE_TYPE:
- return JSDate::kSize;
- case JS_ARRAY_TYPE:
- return JSArray::kSize;
- case JS_ARRAY_BUFFER_TYPE:
- return JSArrayBuffer::kSize;
- case JS_TYPED_ARRAY_TYPE:
- return JSTypedArray::kSize;
- case JS_DATA_VIEW_TYPE:
- return JSDataView::kSize;
- case JS_SET_TYPE:
- return JSSet::kSize;
- case JS_MAP_TYPE:
- return JSMap::kSize;
- case JS_SET_ITERATOR_TYPE:
- return JSSetIterator::kSize;
- case JS_MAP_ITERATOR_TYPE:
- return JSMapIterator::kSize;
- case JS_WEAK_MAP_TYPE:
- return JSWeakMap::kSize;
- case JS_WEAK_SET_TYPE:
- return JSWeakSet::kSize;
- case JS_PROMISE_CAPABILITY_TYPE:
- return JSPromiseCapability::kSize;
- case JS_PROMISE_TYPE:
- return JSPromise::kSize;
- case JS_REGEXP_TYPE:
- return JSRegExp::kSize;
- case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
- return JSObject::kHeaderSize;
- case JS_MESSAGE_OBJECT_TYPE:
- return JSMessageObject::kSize;
- case JS_ARGUMENTS_TYPE:
- return JSArgumentsObject::kHeaderSize;
- case JS_ERROR_TYPE:
- return JSObject::kHeaderSize;
- case JS_STRING_ITERATOR_TYPE:
- return JSStringIterator::kSize;
- case JS_MODULE_NAMESPACE_TYPE:
- return JSModuleNamespace::kHeaderSize;
- default:
- if (type >= FIRST_ARRAY_ITERATOR_TYPE &&
- type <= LAST_ARRAY_ITERATOR_TYPE) {
- return JSArrayIterator::kSize;
- }
- UNREACHABLE();
- return 0;
- }
+ InstanceType type = map()->instance_type();
+ return type == JS_OBJECT_TYPE ? JSObject::kHeaderSize : GetHeaderSize(type);
}
inline bool IsSpecialReceiverInstanceType(InstanceType instance_type) {
return instance_type <= LAST_SPECIAL_RECEIVER_TYPE;
}
-int JSObject::GetEmbedderFieldCount(Map* map) {
+// static
+int JSObject::GetEmbedderFieldCount(const Map* map) {
int instance_size = map->instance_size();
if (instance_size == kVariableSizeSentinel) return 0;
InstanceType instance_type = map->instance_type();
@@ -2171,7 +1485,9 @@ int JSObject::GetEmbedderFieldCount(Map* map) {
map->GetInObjectProperties();
}
-int JSObject::GetEmbedderFieldCount() { return GetEmbedderFieldCount(map()); }
+int JSObject::GetEmbedderFieldCount() const {
+ return GetEmbedderFieldCount(map());
+}
int JSObject::GetEmbedderFieldOffset(int index) {
DCHECK(index < GetEmbedderFieldCount() && index >= 0);
@@ -2211,8 +1527,7 @@ bool JSObject::IsUnboxedDoubleField(FieldIndex index) {
return map()->IsUnboxedDoubleField(index);
}
-
-bool Map::IsUnboxedDoubleField(FieldIndex index) {
+bool Map::IsUnboxedDoubleField(FieldIndex index) const {
if (!FLAG_unbox_double_fields) return false;
if (index.is_hidden_field() || !index.is_inobject()) return false;
return !layout_descriptor()->IsTagged(index.property_index());
@@ -2227,7 +1542,7 @@ Object* JSObject::RawFastPropertyAt(FieldIndex index) {
if (index.is_inobject()) {
return READ_FIELD(this, index.offset());
} else {
- return properties()->get(index.outobject_array_index());
+ return property_array()->get(index.outobject_array_index());
}
}
@@ -2248,13 +1563,17 @@ void JSObject::RawFastPropertyAtPut(FieldIndex index, Object* value) {
WRITE_FIELD(this, offset, value);
WRITE_BARRIER(GetHeap(), this, offset, value);
} else {
- properties()->set(index.outobject_array_index(), value);
+ property_array()->set(index.outobject_array_index(), value);
}
}
void JSObject::RawFastDoublePropertyAsBitsAtPut(FieldIndex index,
uint64_t bits) {
- WRITE_UINT64_FIELD(this, index.offset(), bits);
+ // Double unboxing is enabled only on 64-bit platforms.
+ DCHECK_EQ(kDoubleSize, kPointerSize);
+ Address field_addr = FIELD_ADDR(this, index.offset());
+ base::Relaxed_Store(reinterpret_cast<base::AtomicWord*>(field_addr),
+ static_cast<base::AtomicWord>(bits));
}
void JSObject::FastPropertyAtPut(FieldIndex index, Object* value) {
@@ -2285,7 +1604,7 @@ void JSObject::WriteToField(int descriptor, PropertyDetails details,
// and stores to the stack silently clear the signalling bit).
uint64_t bits;
if (value->IsSmi()) {
- bits = bit_cast<uint64_t>(static_cast<double>(Smi::cast(value)->value()));
+ bits = bit_cast<uint64_t>(static_cast<double>(Smi::ToInt(value)));
} else {
DCHECK(value->IsHeapNumber());
bits = HeapNumber::cast(value)->value_as_bits();
@@ -2348,8 +1667,7 @@ void JSObject::InitializeBody(Map* map, int start_offset,
}
}
-
-bool Map::TooManyFastProperties(StoreFromKeyed store_mode) {
+bool Map::TooManyFastProperties(StoreFromKeyed store_mode) const {
if (unused_property_fields() != 0) return false;
if (is_prototype_map()) return false;
int minimum = store_mode == CERTAINLY_NOT_STORE_FROM_KEYED ? 128 : 12;
@@ -2366,10 +1684,11 @@ void Struct::InitializeBody(int object_size) {
}
}
-bool Object::ToArrayLength(uint32_t* index) { return Object::ToUint32(index); }
-
+bool Object::ToArrayLength(uint32_t* index) const {
+ return Object::ToUint32(index);
+}
-bool Object::ToArrayIndex(uint32_t* index) {
+bool Object::ToArrayIndex(uint32_t* index) const {
return Object::ToUint32(index) && *index != kMaxUInt32;
}
@@ -2390,7 +1709,13 @@ void Object::VerifyApiCallResultType() {
Object* FixedArray::get(int index) const {
SLOW_DCHECK(index >= 0 && index < this->length());
- return NOBARRIER_READ_FIELD(this, kHeaderSize + index * kPointerSize);
+ return RELAXED_READ_FIELD(this, kHeaderSize + index * kPointerSize);
+}
+
+Object* PropertyArray::get(int index) const {
+ DCHECK_GE(index, 0);
+ DCHECK_LE(index, this->length());
+ return RELAXED_READ_FIELD(this, kHeaderSize + index * kPointerSize);
}
Handle<Object> FixedArray::get(FixedArray* array, int index, Isolate* isolate) {
@@ -2415,24 +1740,31 @@ bool FixedArray::is_the_hole(Isolate* isolate, int index) {
}
void FixedArray::set(int index, Smi* value) {
- DCHECK(map() != GetHeap()->fixed_cow_array_map());
- DCHECK(index >= 0 && index < this->length());
+ DCHECK_NE(map(), GetHeap()->fixed_cow_array_map());
+ DCHECK_LT(index, this->length());
DCHECK(reinterpret_cast<Object*>(value)->IsSmi());
int offset = kHeaderSize + index * kPointerSize;
- NOBARRIER_WRITE_FIELD(this, offset, value);
+ RELAXED_WRITE_FIELD(this, offset, value);
}
-
void FixedArray::set(int index, Object* value) {
DCHECK_NE(GetHeap()->fixed_cow_array_map(), map());
DCHECK(IsFixedArray());
DCHECK_GE(index, 0);
DCHECK_LT(index, this->length());
int offset = kHeaderSize + index * kPointerSize;
- NOBARRIER_WRITE_FIELD(this, offset, value);
+ RELAXED_WRITE_FIELD(this, offset, value);
WRITE_BARRIER(GetHeap(), this, offset, value);
}
+void PropertyArray::set(int index, Object* value) {
+ DCHECK(IsPropertyArray());
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, this->length());
+ int offset = kHeaderSize + index * kPointerSize;
+ RELAXED_WRITE_FIELD(this, offset, value);
+ WRITE_BARRIER(GetHeap(), this, offset, value);
+}
double FixedDoubleArray::get_scalar(int index) {
DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
@@ -2504,7 +1836,6 @@ void FixedDoubleArray::FillWithHoles(int from, int to) {
}
}
-
Object* WeakFixedArray::Get(int index) const {
Object* raw = FixedArray::cast(this)->get(index + kFirstIndex);
if (raw->IsSmi()) return raw;
@@ -2530,7 +1861,7 @@ int WeakFixedArray::Length() const {
int WeakFixedArray::last_used_index() const {
- return Smi::cast(FixedArray::cast(this)->get(kLastUsedIndexIndex))->value();
+ return Smi::ToInt(FixedArray::cast(this)->get(kLastUsedIndexIndex));
}
@@ -2555,7 +1886,7 @@ T* WeakFixedArray::Iterator::Next() {
int ArrayList::Length() const {
if (FixedArray::cast(this)->length() == 0) return 0;
- return Smi::cast(FixedArray::cast(this)->get(kLengthIndex))->value();
+ return Smi::ToInt(FixedArray::cast(this)->get(kLengthIndex));
}
@@ -2586,7 +1917,7 @@ void ArrayList::Clear(int index, Object* undefined) {
int RegExpMatchInfo::NumberOfCaptureRegisters() {
DCHECK_GE(length(), kLastMatchOverhead);
Object* obj = get(kNumberOfCapturesIndex);
- return Smi::cast(obj)->value();
+ return Smi::ToInt(obj);
}
void RegExpMatchInfo::SetNumberOfCaptureRegisters(int value) {
@@ -2618,7 +1949,7 @@ void RegExpMatchInfo::SetLastInput(Object* value) {
int RegExpMatchInfo::Capture(int i) {
DCHECK_LT(i, NumberOfCaptureRegisters());
Object* obj = get(kFirstCaptureIndex + i);
- return Smi::cast(obj)->value();
+ return Smi::ToInt(obj);
}
void RegExpMatchInfo::SetCapture(int i, int value) {
@@ -2634,8 +1965,7 @@ WriteBarrierMode HeapObject::GetWriteBarrierMode(
return UPDATE_WRITE_BARRIER;
}
-
-AllocationAlignment HeapObject::RequiredAlignment() {
+AllocationAlignment HeapObject::RequiredAlignment() const {
#ifdef V8_HOST_ARCH_32_BIT
if ((IsFixedFloat64Array() || IsFixedDoubleArray()) &&
FixedArrayBase::cast(this)->length() != 0) {
@@ -2654,10 +1984,17 @@ void FixedArray::set(int index,
DCHECK_GE(index, 0);
DCHECK_LT(index, this->length());
int offset = kHeaderSize + index * kPointerSize;
- NOBARRIER_WRITE_FIELD(this, offset, value);
+ RELAXED_WRITE_FIELD(this, offset, value);
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
}
+void PropertyArray::set(int index, Object* value, WriteBarrierMode mode) {
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, this->length());
+ int offset = kHeaderSize + index * kPointerSize;
+ RELAXED_WRITE_FIELD(this, offset, value);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
+}
void FixedArray::NoWriteBarrierSet(FixedArray* array,
int index,
@@ -2666,7 +2003,7 @@ void FixedArray::NoWriteBarrierSet(FixedArray* array,
DCHECK_GE(index, 0);
DCHECK_LT(index, array->length());
DCHECK(!array->GetHeap()->InNewSpace(value));
- NOBARRIER_WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
+ RELAXED_WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
}
void FixedArray::set_undefined(int index) {
@@ -2702,6 +2039,9 @@ Object** FixedArray::data_start() {
return HeapObject::RawField(this, kHeaderSize);
}
+Object** PropertyArray::data_start() {
+ return HeapObject::RawField(this, kHeaderSize);
+}
Object** FixedArray::RawFieldOfElementAt(int index) {
return HeapObject::RawField(this, OffsetOfElementAt(index));
@@ -2717,7 +2057,7 @@ bool DescriptorArray::IsEmpty() {
int DescriptorArray::number_of_descriptors() {
DCHECK(length() >= kFirstIndex || IsEmpty());
int len = length();
- return len == 0 ? 0 : Smi::cast(get(kDescriptorLengthIndex))->value();
+ return len == 0 ? 0 : Smi::ToInt(get(kDescriptorLengthIndex));
}
@@ -2901,19 +2241,17 @@ int DescriptorArray::SearchWithCache(Isolate* isolate, Name* name, Map* map) {
return number;
}
-PropertyDetails Map::GetLastDescriptorDetails() {
+PropertyDetails Map::GetLastDescriptorDetails() const {
return instance_descriptors()->GetDetails(LastAdded());
}
-
-int Map::LastAdded() {
+int Map::LastAdded() const {
int number_of_own_descriptors = NumberOfOwnDescriptors();
DCHECK(number_of_own_descriptors > 0);
return number_of_own_descriptors - 1;
}
-
-int Map::NumberOfOwnDescriptors() {
+int Map::NumberOfOwnDescriptors() const {
return NumberOfOwnDescriptorsBits::decode(bit_field3());
}
@@ -2923,9 +2261,7 @@ void Map::SetNumberOfOwnDescriptors(int number) {
set_bit_field3(NumberOfOwnDescriptorsBits::update(bit_field3(), number));
}
-
-int Map::EnumLength() { return EnumLengthBits::decode(bit_field3()); }
-
+int Map::EnumLength() const { return EnumLengthBits::decode(bit_field3()); }
void Map::SetEnumLength(int length) {
if (length != kInvalidEnumCacheSentinel) {
@@ -2936,8 +2272,7 @@ void Map::SetEnumLength(int length) {
set_bit_field3(EnumLengthBits::update(bit_field3(), length));
}
-
-FixedArrayBase* Map::GetInitialElements() {
+FixedArrayBase* Map::GetInitialElements() const {
FixedArrayBase* result = nullptr;
if (has_fast_elements() || has_fast_string_wrapper_elements()) {
result = GetHeap()->empty_fixed_array();
@@ -2945,6 +2280,8 @@ FixedArrayBase* Map::GetInitialElements() {
result = GetHeap()->empty_sloppy_arguments_elements();
} else if (has_fixed_typed_array_elements()) {
result = GetHeap()->EmptyFixedTypedArrayForMap(this);
+ } else if (has_dictionary_elements()) {
+ result = GetHeap()->empty_slow_element_dictionary();
} else {
UNREACHABLE();
}
@@ -3077,21 +2414,15 @@ void DescriptorArray::SwapSortedKeys(int first, int second) {
SetSortedKey(second, first_key);
}
-
-int HashTableBase::NumberOfElements() {
- return Smi::cast(get(kNumberOfElementsIndex))->value();
-}
-
-
-int HashTableBase::NumberOfDeletedElements() {
- return Smi::cast(get(kNumberOfDeletedElementsIndex))->value();
+int HashTableBase::NumberOfElements() const {
+ return Smi::ToInt(get(kNumberOfElementsIndex));
}
-
-int HashTableBase::Capacity() {
- return Smi::cast(get(kCapacityIndex))->value();
+int HashTableBase::NumberOfDeletedElements() const {
+ return Smi::ToInt(get(kNumberOfDeletedElementsIndex));
}
+int HashTableBase::Capacity() const { return Smi::ToInt(get(kCapacityIndex)); }
void HashTableBase::ElementAdded() {
SetNumberOfElements(NumberOfElements() + 1);
@@ -3120,11 +2451,6 @@ int HashTableBase::ComputeCapacity(int at_least_space_for) {
return Max(capacity, kMinCapacity);
}
-bool HashTableBase::IsKey(Isolate* isolate, Object* k) {
- Heap* heap = isolate->heap();
- return k != heap->the_hole_value() && k != heap->undefined_value();
-}
-
void HashTableBase::SetNumberOfElements(int nof) {
set(kNumberOfElementsIndex, Smi::FromInt(nof));
}
@@ -3139,48 +2465,40 @@ Map* BaseShape<Key>::GetMap(Isolate* isolate) {
return isolate->heap()->hash_table_map();
}
-template <typename Derived, typename Shape, typename Key>
-int HashTable<Derived, Shape, Key>::FindEntry(Key key) {
+template <typename Derived, typename Shape>
+int HashTable<Derived, Shape>::FindEntry(Key key) {
return FindEntry(GetIsolate(), key);
}
-
-template<typename Derived, typename Shape, typename Key>
-int HashTable<Derived, Shape, Key>::FindEntry(Isolate* isolate, Key key) {
- return FindEntry(isolate, key, HashTable::Hash(key));
+template <typename Derived, typename Shape>
+int HashTable<Derived, Shape>::FindEntry(Isolate* isolate, Key key) {
+ return FindEntry(isolate, key, Shape::Hash(isolate, key));
}
// Find entry for key otherwise return kNotFound.
-template <typename Derived, typename Shape, typename Key>
-int HashTable<Derived, Shape, Key>::FindEntry(Isolate* isolate, Key key,
- int32_t hash) {
+template <typename Derived, typename Shape>
+int HashTable<Derived, Shape>::FindEntry(Isolate* isolate, Key key,
+ int32_t hash) {
uint32_t capacity = Capacity();
uint32_t entry = FirstProbe(hash, capacity);
uint32_t count = 1;
// EnsureCapacity will guarantee the hash table is never full.
Object* undefined = isolate->heap()->undefined_value();
Object* the_hole = isolate->heap()->the_hole_value();
+ USE(the_hole);
while (true) {
Object* element = KeyAt(entry);
// Empty entry. Uses raw unchecked accessors because it is called by the
// string table during bootstrapping.
if (element == undefined) break;
- if (element != the_hole && Shape::IsMatch(key, element)) return entry;
+ if (!(Shape::kNeedsHoleCheck && the_hole == element)) {
+ if (Shape::IsMatch(key, element)) return entry;
+ }
entry = NextProbe(entry, count++, capacity);
}
return kNotFound;
}
-template <typename Derived, typename Shape, typename Key>
-bool HashTable<Derived, Shape, Key>::Has(Key key) {
- return FindEntry(key) != kNotFound;
-}
-
-template <typename Derived, typename Shape, typename Key>
-bool HashTable<Derived, Shape, Key>::Has(Isolate* isolate, Key key) {
- return FindEntry(isolate, key) != kNotFound;
-}
-
bool ObjectHashSet::Has(Isolate* isolate, Handle<Object> key, int32_t hash) {
return FindEntry(isolate, key, hash) != kNotFound;
}
@@ -3188,24 +2506,43 @@ bool ObjectHashSet::Has(Isolate* isolate, Handle<Object> key, int32_t hash) {
bool ObjectHashSet::Has(Isolate* isolate, Handle<Object> key) {
Object* hash = key->GetHash();
if (!hash->IsSmi()) return false;
- return FindEntry(isolate, key, Smi::cast(hash)->value()) != kNotFound;
+ return FindEntry(isolate, key, Smi::ToInt(hash)) != kNotFound;
}
bool StringSetShape::IsMatch(String* key, Object* value) {
- return value->IsString() && key->Equals(String::cast(value));
+ DCHECK(value->IsString());
+ return key->Equals(String::cast(value));
+}
+
+uint32_t StringSetShape::Hash(Isolate* isolate, String* key) {
+ return key->Hash();
}
-uint32_t StringSetShape::Hash(String* key) { return key->Hash(); }
+uint32_t StringSetShape::HashForObject(Isolate* isolate, Object* object) {
+ return String::cast(object)->Hash();
+}
+
+StringTableKey::StringTableKey(uint32_t hash_field)
+ : HashTableKey(hash_field >> Name::kHashShift), hash_field_(hash_field) {}
-uint32_t StringSetShape::HashForObject(String* key, Object* object) {
- return object->IsString() ? String::cast(object)->Hash() : 0;
+void StringTableKey::set_hash_field(uint32_t hash_field) {
+ hash_field_ = hash_field;
+ set_hash(hash_field >> Name::kHashShift);
+}
+
+Handle<Object> StringTableShape::AsHandle(Isolate* isolate,
+ StringTableKey* key) {
+ return key->AsHandle(isolate);
+}
+
+uint32_t StringTableShape::HashForObject(Isolate* isolate, Object* object) {
+ return String::cast(object)->Hash();
}
bool SeededNumberDictionary::requires_slow_elements() {
Object* max_index_object = get(kMaxNumberKeyIndex);
if (!max_index_object->IsSmi()) return false;
- return 0 !=
- (Smi::cast(max_index_object)->value() & kRequiresSlowElementsMask);
+ return 0 != (Smi::ToInt(max_index_object) & kRequiresSlowElementsMask);
}
@@ -3213,7 +2550,7 @@ uint32_t SeededNumberDictionary::max_number_key() {
DCHECK(!requires_slow_elements());
Object* max_index_object = get(kMaxNumberKeyIndex);
if (!max_index_object->IsSmi()) return 0;
- uint32_t value = static_cast<uint32_t>(Smi::cast(max_index_object)->value());
+ uint32_t value = static_cast<uint32_t>(Smi::ToInt(max_index_object));
return value >> kRequiresSlowElementsTagSize;
}
@@ -3266,51 +2603,26 @@ FixedTypedArray<Traits>::cast(const Object* object) {
return reinterpret_cast<FixedTypedArray<Traits>*>(object);
}
-
-#define DEFINE_DEOPT_ELEMENT_ACCESSORS(name, type) \
- type* DeoptimizationInputData::name() { \
- return type::cast(get(k##name##Index)); \
- } \
- void DeoptimizationInputData::Set##name(type* value) { \
- set(k##name##Index, value); \
- }
-
DEFINE_DEOPT_ELEMENT_ACCESSORS(TranslationByteArray, ByteArray)
DEFINE_DEOPT_ELEMENT_ACCESSORS(InlinedFunctionCount, Smi)
DEFINE_DEOPT_ELEMENT_ACCESSORS(LiteralArray, FixedArray)
-DEFINE_DEOPT_ELEMENT_ACCESSORS(OsrAstId, Smi)
+DEFINE_DEOPT_ELEMENT_ACCESSORS(OsrBytecodeOffset, Smi)
DEFINE_DEOPT_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
DEFINE_DEOPT_ELEMENT_ACCESSORS(OptimizationId, Smi)
-DEFINE_DEOPT_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
DEFINE_DEOPT_ELEMENT_ACCESSORS(WeakCellCache, Object)
DEFINE_DEOPT_ELEMENT_ACCESSORS(InliningPositions, PodArray<InliningPosition>)
-#undef DEFINE_DEOPT_ELEMENT_ACCESSORS
-
-
-#define DEFINE_DEOPT_ENTRY_ACCESSORS(name, type) \
- type* DeoptimizationInputData::name(int i) { \
- return type::cast(get(IndexForEntry(i) + k##name##Offset)); \
- } \
- void DeoptimizationInputData::Set##name(int i, type* value) { \
- set(IndexForEntry(i) + k##name##Offset, value); \
- }
-
-DEFINE_DEOPT_ENTRY_ACCESSORS(AstIdRaw, Smi)
+DEFINE_DEOPT_ENTRY_ACCESSORS(BytecodeOffsetRaw, Smi)
DEFINE_DEOPT_ENTRY_ACCESSORS(TranslationIndex, Smi)
-DEFINE_DEOPT_ENTRY_ACCESSORS(ArgumentsStackHeight, Smi)
+DEFINE_DEOPT_ENTRY_ACCESSORS(TrampolinePc, Smi)
DEFINE_DEOPT_ENTRY_ACCESSORS(Pc, Smi)
-#undef DEFINE_DEOPT_ENTRY_ACCESSORS
-
-
-BailoutId DeoptimizationInputData::AstId(int i) {
- return BailoutId(AstIdRaw(i)->value());
+BailoutId DeoptimizationInputData::BytecodeOffset(int i) {
+ return BailoutId(BytecodeOffsetRaw(i)->value());
}
-
-void DeoptimizationInputData::SetAstId(int i, BailoutId value) {
- SetAstIdRaw(i, Smi::FromInt(value.ToInt()));
+void DeoptimizationInputData::SetBytecodeOffset(int i, BailoutId value) {
+ SetBytecodeOffsetRaw(i, Smi::FromInt(value.ToInt()));
}
@@ -3319,43 +2631,21 @@ int DeoptimizationInputData::DeoptCount() {
}
-int DeoptimizationOutputData::DeoptPoints() { return length() / 2; }
-
-
-BailoutId DeoptimizationOutputData::AstId(int index) {
- return BailoutId(Smi::cast(get(index * 2))->value());
-}
-
-
-void DeoptimizationOutputData::SetAstId(int index, BailoutId id) {
- set(index * 2, Smi::FromInt(id.ToInt()));
-}
-
-
-Smi* DeoptimizationOutputData::PcAndState(int index) {
- return Smi::cast(get(1 + index * 2));
-}
-
-
-void DeoptimizationOutputData::SetPcAndState(int index, Smi* offset) {
- set(1 + index * 2, offset);
-}
-
int HandlerTable::GetRangeStart(int index) const {
- return Smi::cast(get(index * kRangeEntrySize + kRangeStartIndex))->value();
+ return Smi::ToInt(get(index * kRangeEntrySize + kRangeStartIndex));
}
int HandlerTable::GetRangeEnd(int index) const {
- return Smi::cast(get(index * kRangeEntrySize + kRangeEndIndex))->value();
+ return Smi::ToInt(get(index * kRangeEntrySize + kRangeEndIndex));
}
int HandlerTable::GetRangeHandler(int index) const {
return HandlerOffsetField::decode(
- Smi::cast(get(index * kRangeEntrySize + kRangeHandlerIndex))->value());
+ Smi::ToInt(get(index * kRangeEntrySize + kRangeHandlerIndex)));
}
int HandlerTable::GetRangeData(int index) const {
- return Smi::cast(get(index * kRangeEntrySize + kRangeDataIndex))->value();
+ return Smi::ToInt(get(index * kRangeEntrySize + kRangeDataIndex));
}
void HandlerTable::SetRangeStart(int index, int value) {
@@ -3393,17 +2683,15 @@ int HandlerTable::NumberOfRangeEntries() const {
return length() / kRangeEntrySize;
}
-template <typename Derived, typename Shape, typename Key>
-HashTable<Derived, Shape, Key>*
-HashTable<Derived, Shape, Key>::cast(Object* obj) {
+template <typename Derived, typename Shape>
+HashTable<Derived, Shape>* HashTable<Derived, Shape>::cast(Object* obj) {
SLOW_DCHECK(obj->IsHashTable());
return reinterpret_cast<HashTable*>(obj);
}
-
-template <typename Derived, typename Shape, typename Key>
-const HashTable<Derived, Shape, Key>*
-HashTable<Derived, Shape, Key>::cast(const Object* obj) {
+template <typename Derived, typename Shape>
+const HashTable<Derived, Shape>* HashTable<Derived, Shape>::cast(
+ const Object* obj) {
SLOW_DCHECK(obj->IsHashTable());
return reinterpret_cast<const HashTable*>(obj);
}
@@ -3412,11 +2700,11 @@ HashTable<Derived, Shape, Key>::cast(const Object* obj) {
SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
SYNCHRONIZED_SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
-SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
-NOBARRIER_SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
+SMI_ACCESSORS(PropertyArray, length, kLengthOffset)
+SYNCHRONIZED_SMI_ACCESSORS(PropertyArray, length, kLengthOffset)
-SMI_ACCESSORS(String, length, kLengthOffset)
-SYNCHRONIZED_SMI_ACCESSORS(String, length, kLengthOffset)
+SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
+RELAXED_SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
int FreeSpace::Size() { return size(); }
@@ -3425,7 +2713,7 @@ int FreeSpace::Size() { return size(); }
FreeSpace* FreeSpace::next() {
DCHECK(map() == GetHeap()->root(Heap::kFreeSpaceMapRootIndex) ||
(!GetHeap()->deserialization_complete() && map() == NULL));
- DCHECK_LE(kNextOffset + kPointerSize, nobarrier_size());
+ DCHECK_LE(kNextOffset + kPointerSize, relaxed_read_size());
return reinterpret_cast<FreeSpace*>(
Memory::Address_at(address() + kNextOffset));
}
@@ -3434,8 +2722,8 @@ FreeSpace* FreeSpace::next() {
void FreeSpace::set_next(FreeSpace* next) {
DCHECK(map() == GetHeap()->root(Heap::kFreeSpaceMapRootIndex) ||
(!GetHeap()->deserialization_complete() && map() == NULL));
- DCHECK_LE(kNextOffset + kPointerSize, nobarrier_size());
- base::NoBarrier_Store(
+ DCHECK_LE(kNextOffset + kPointerSize, relaxed_read_size());
+ base::Relaxed_Store(
reinterpret_cast<base::AtomicWord*>(address() + kNextOffset),
reinterpret_cast<base::AtomicWord>(next));
}
@@ -3446,488 +2734,9 @@ FreeSpace* FreeSpace::cast(HeapObject* o) {
return reinterpret_cast<FreeSpace*>(o);
}
-
-uint32_t Name::hash_field() {
- return READ_UINT32_FIELD(this, kHashFieldOffset);
-}
-
-
-void Name::set_hash_field(uint32_t value) {
- WRITE_UINT32_FIELD(this, kHashFieldOffset, value);
-#if V8_HOST_ARCH_64_BIT
-#if V8_TARGET_LITTLE_ENDIAN
- WRITE_UINT32_FIELD(this, kHashFieldSlot + kIntSize, 0);
-#else
- WRITE_UINT32_FIELD(this, kHashFieldSlot, 0);
-#endif
-#endif
-}
-
-
-bool Name::Equals(Name* other) {
- if (other == this) return true;
- if ((this->IsInternalizedString() && other->IsInternalizedString()) ||
- this->IsSymbol() || other->IsSymbol()) {
- return false;
- }
- return String::cast(this)->SlowEquals(String::cast(other));
-}
-
-
-bool Name::Equals(Handle<Name> one, Handle<Name> two) {
- if (one.is_identical_to(two)) return true;
- if ((one->IsInternalizedString() && two->IsInternalizedString()) ||
- one->IsSymbol() || two->IsSymbol()) {
- return false;
- }
- return String::SlowEquals(Handle<String>::cast(one),
- Handle<String>::cast(two));
-}
-
-
-ACCESSORS(Symbol, name, Object, kNameOffset)
-SMI_ACCESSORS(Symbol, flags, kFlagsOffset)
-BOOL_ACCESSORS(Symbol, flags, is_private, kPrivateBit)
-BOOL_ACCESSORS(Symbol, flags, is_well_known_symbol, kWellKnownSymbolBit)
-BOOL_ACCESSORS(Symbol, flags, is_public, kPublicBit)
-
-bool String::Equals(String* other) {
- if (other == this) return true;
- if (this->IsInternalizedString() && other->IsInternalizedString()) {
- return false;
- }
- return SlowEquals(other);
-}
-
-
-bool String::Equals(Handle<String> one, Handle<String> two) {
- if (one.is_identical_to(two)) return true;
- if (one->IsInternalizedString() && two->IsInternalizedString()) {
- return false;
- }
- return SlowEquals(one, two);
-}
-
-
-Handle<String> String::Flatten(Handle<String> string, PretenureFlag pretenure) {
- if (string->IsConsString()) {
- Handle<ConsString> cons = Handle<ConsString>::cast(string);
- if (cons->IsFlat()) {
- string = handle(cons->first());
- } else {
- return SlowFlatten(cons, pretenure);
- }
- }
- if (string->IsThinString()) {
- string = handle(Handle<ThinString>::cast(string)->actual());
- DCHECK(!string->IsConsString());
- }
- return string;
-}
-
-
-uint16_t String::Get(int index) {
- DCHECK(index >= 0 && index < length());
- switch (StringShape(this).full_representation_tag()) {
- case kSeqStringTag | kOneByteStringTag:
- return SeqOneByteString::cast(this)->SeqOneByteStringGet(index);
- case kSeqStringTag | kTwoByteStringTag:
- return SeqTwoByteString::cast(this)->SeqTwoByteStringGet(index);
- case kConsStringTag | kOneByteStringTag:
- case kConsStringTag | kTwoByteStringTag:
- return ConsString::cast(this)->ConsStringGet(index);
- case kExternalStringTag | kOneByteStringTag:
- return ExternalOneByteString::cast(this)->ExternalOneByteStringGet(index);
- case kExternalStringTag | kTwoByteStringTag:
- return ExternalTwoByteString::cast(this)->ExternalTwoByteStringGet(index);
- case kSlicedStringTag | kOneByteStringTag:
- case kSlicedStringTag | kTwoByteStringTag:
- return SlicedString::cast(this)->SlicedStringGet(index);
- case kThinStringTag | kOneByteStringTag:
- case kThinStringTag | kTwoByteStringTag:
- return ThinString::cast(this)->ThinStringGet(index);
- default:
- break;
- }
-
- UNREACHABLE();
- return 0;
-}
-
-
-void String::Set(int index, uint16_t value) {
- DCHECK(index >= 0 && index < length());
- DCHECK(StringShape(this).IsSequential());
-
- return this->IsOneByteRepresentation()
- ? SeqOneByteString::cast(this)->SeqOneByteStringSet(index, value)
- : SeqTwoByteString::cast(this)->SeqTwoByteStringSet(index, value);
-}
-
-
-bool String::IsFlat() {
- if (!StringShape(this).IsCons()) return true;
- return ConsString::cast(this)->second()->length() == 0;
-}
-
-
-String* String::GetUnderlying() {
- // Giving direct access to underlying string only makes sense if the
- // wrapping string is already flattened.
- DCHECK(this->IsFlat());
- DCHECK(StringShape(this).IsIndirect());
- STATIC_ASSERT(ConsString::kFirstOffset == SlicedString::kParentOffset);
- STATIC_ASSERT(ConsString::kFirstOffset == ThinString::kActualOffset);
- const int kUnderlyingOffset = SlicedString::kParentOffset;
- return String::cast(READ_FIELD(this, kUnderlyingOffset));
-}
-
-
-template<class Visitor>
-ConsString* String::VisitFlat(Visitor* visitor,
- String* string,
- const int offset) {
- int slice_offset = offset;
- const int length = string->length();
- DCHECK(offset <= length);
- while (true) {
- int32_t type = string->map()->instance_type();
- switch (type & (kStringRepresentationMask | kStringEncodingMask)) {
- case kSeqStringTag | kOneByteStringTag:
- visitor->VisitOneByteString(
- SeqOneByteString::cast(string)->GetChars() + slice_offset,
- length - offset);
- return NULL;
-
- case kSeqStringTag | kTwoByteStringTag:
- visitor->VisitTwoByteString(
- SeqTwoByteString::cast(string)->GetChars() + slice_offset,
- length - offset);
- return NULL;
-
- case kExternalStringTag | kOneByteStringTag:
- visitor->VisitOneByteString(
- ExternalOneByteString::cast(string)->GetChars() + slice_offset,
- length - offset);
- return NULL;
-
- case kExternalStringTag | kTwoByteStringTag:
- visitor->VisitTwoByteString(
- ExternalTwoByteString::cast(string)->GetChars() + slice_offset,
- length - offset);
- return NULL;
-
- case kSlicedStringTag | kOneByteStringTag:
- case kSlicedStringTag | kTwoByteStringTag: {
- SlicedString* slicedString = SlicedString::cast(string);
- slice_offset += slicedString->offset();
- string = slicedString->parent();
- continue;
- }
-
- case kConsStringTag | kOneByteStringTag:
- case kConsStringTag | kTwoByteStringTag:
- return ConsString::cast(string);
-
- case kThinStringTag | kOneByteStringTag:
- case kThinStringTag | kTwoByteStringTag:
- string = ThinString::cast(string)->actual();
- continue;
-
- default:
- UNREACHABLE();
- return NULL;
- }
- }
-}
-
-
-template <>
-inline Vector<const uint8_t> String::GetCharVector() {
- String::FlatContent flat = GetFlatContent();
- DCHECK(flat.IsOneByte());
- return flat.ToOneByteVector();
-}
-
-
-template <>
-inline Vector<const uc16> String::GetCharVector() {
- String::FlatContent flat = GetFlatContent();
- DCHECK(flat.IsTwoByte());
- return flat.ToUC16Vector();
-}
-
-uint32_t String::ToValidIndex(Object* number) {
- uint32_t index = PositiveNumberToUint32(number);
- uint32_t length_value = static_cast<uint32_t>(length());
- if (index > length_value) return length_value;
- return index;
-}
-
-uint16_t SeqOneByteString::SeqOneByteStringGet(int index) {
- DCHECK(index >= 0 && index < length());
- return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
-}
-
-
-void SeqOneByteString::SeqOneByteStringSet(int index, uint16_t value) {
- DCHECK(index >= 0 && index < length() && value <= kMaxOneByteCharCode);
- WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize,
- static_cast<byte>(value));
-}
-
-
-Address SeqOneByteString::GetCharsAddress() {
- return FIELD_ADDR(this, kHeaderSize);
-}
-
-
-uint8_t* SeqOneByteString::GetChars() {
- return reinterpret_cast<uint8_t*>(GetCharsAddress());
-}
-
-
-Address SeqTwoByteString::GetCharsAddress() {
- return FIELD_ADDR(this, kHeaderSize);
-}
-
-
-uc16* SeqTwoByteString::GetChars() {
- return reinterpret_cast<uc16*>(FIELD_ADDR(this, kHeaderSize));
-}
-
-
-uint16_t SeqTwoByteString::SeqTwoByteStringGet(int index) {
- DCHECK(index >= 0 && index < length());
- return READ_UINT16_FIELD(this, kHeaderSize + index * kShortSize);
-}
-
-
-void SeqTwoByteString::SeqTwoByteStringSet(int index, uint16_t value) {
- DCHECK(index >= 0 && index < length());
- WRITE_UINT16_FIELD(this, kHeaderSize + index * kShortSize, value);
-}
-
-
-int SeqTwoByteString::SeqTwoByteStringSize(InstanceType instance_type) {
- return SizeFor(length());
-}
-
-
-int SeqOneByteString::SeqOneByteStringSize(InstanceType instance_type) {
- return SizeFor(length());
-}
-
-
-String* SlicedString::parent() {
- return String::cast(READ_FIELD(this, kParentOffset));
-}
-
-
-void SlicedString::set_parent(String* parent, WriteBarrierMode mode) {
- DCHECK(parent->IsSeqString() || parent->IsExternalString());
- WRITE_FIELD(this, kParentOffset, parent);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kParentOffset, parent, mode);
-}
-
-
-SMI_ACCESSORS(SlicedString, offset, kOffsetOffset)
-
-
-String* ConsString::first() {
- return String::cast(READ_FIELD(this, kFirstOffset));
-}
-
-
-Object* ConsString::unchecked_first() {
- return READ_FIELD(this, kFirstOffset);
-}
-
-
-void ConsString::set_first(String* value, WriteBarrierMode mode) {
- WRITE_FIELD(this, kFirstOffset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kFirstOffset, value, mode);
-}
-
-
-String* ConsString::second() {
- return String::cast(READ_FIELD(this, kSecondOffset));
-}
-
-
-Object* ConsString::unchecked_second() {
- return READ_FIELD(this, kSecondOffset);
-}
-
-
-void ConsString::set_second(String* value, WriteBarrierMode mode) {
- WRITE_FIELD(this, kSecondOffset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kSecondOffset, value, mode);
-}
-
-ACCESSORS(ThinString, actual, String, kActualOffset);
-
-bool ExternalString::is_short() {
- InstanceType type = map()->instance_type();
- return (type & kShortExternalStringMask) == kShortExternalStringTag;
-}
-
-
-const ExternalOneByteString::Resource* ExternalOneByteString::resource() {
- return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
-}
-
-
-void ExternalOneByteString::update_data_cache() {
- if (is_short()) return;
- const char** data_field =
- reinterpret_cast<const char**>(FIELD_ADDR(this, kResourceDataOffset));
- *data_field = resource()->data();
-}
-
-
-void ExternalOneByteString::set_resource(
- const ExternalOneByteString::Resource* resource) {
- DCHECK(IsAligned(reinterpret_cast<intptr_t>(resource), kPointerSize));
- *reinterpret_cast<const Resource**>(
- FIELD_ADDR(this, kResourceOffset)) = resource;
- if (resource != NULL) update_data_cache();
-}
-
-
-const uint8_t* ExternalOneByteString::GetChars() {
- return reinterpret_cast<const uint8_t*>(resource()->data());
-}
-
-
-uint16_t ExternalOneByteString::ExternalOneByteStringGet(int index) {
- DCHECK(index >= 0 && index < length());
- return GetChars()[index];
-}
-
-
-const ExternalTwoByteString::Resource* ExternalTwoByteString::resource() {
- return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
-}
-
-
-void ExternalTwoByteString::update_data_cache() {
- if (is_short()) return;
- const uint16_t** data_field =
- reinterpret_cast<const uint16_t**>(FIELD_ADDR(this, kResourceDataOffset));
- *data_field = resource()->data();
-}
-
-
-void ExternalTwoByteString::set_resource(
- const ExternalTwoByteString::Resource* resource) {
- *reinterpret_cast<const Resource**>(
- FIELD_ADDR(this, kResourceOffset)) = resource;
- if (resource != NULL) update_data_cache();
-}
-
-
-const uint16_t* ExternalTwoByteString::GetChars() {
- return resource()->data();
-}
-
-
-uint16_t ExternalTwoByteString::ExternalTwoByteStringGet(int index) {
- DCHECK(index >= 0 && index < length());
- return GetChars()[index];
-}
-
-
-const uint16_t* ExternalTwoByteString::ExternalTwoByteStringGetData(
- unsigned start) {
- return GetChars() + start;
-}
-
-
-int ConsStringIterator::OffsetForDepth(int depth) { return depth & kDepthMask; }
-
-
-void ConsStringIterator::PushLeft(ConsString* string) {
- frames_[depth_++ & kDepthMask] = string;
-}
-
-
-void ConsStringIterator::PushRight(ConsString* string) {
- // Inplace update.
- frames_[(depth_-1) & kDepthMask] = string;
-}
-
-
-void ConsStringIterator::AdjustMaximumDepth() {
- if (depth_ > maximum_depth_) maximum_depth_ = depth_;
-}
-
-
-void ConsStringIterator::Pop() {
- DCHECK(depth_ > 0);
- DCHECK(depth_ <= maximum_depth_);
- depth_--;
-}
-
-
-uint16_t StringCharacterStream::GetNext() {
- DCHECK(buffer8_ != NULL && end_ != NULL);
- // Advance cursor if needed.
- if (buffer8_ == end_) HasMore();
- DCHECK(buffer8_ < end_);
- return is_one_byte_ ? *buffer8_++ : *buffer16_++;
-}
-
-
-StringCharacterStream::StringCharacterStream(String* string, int offset)
- : is_one_byte_(false) {
- Reset(string, offset);
-}
-
-
-void StringCharacterStream::Reset(String* string, int offset) {
- buffer8_ = NULL;
- end_ = NULL;
- ConsString* cons_string = String::VisitFlat(this, string, offset);
- iter_.Reset(cons_string, offset);
- if (cons_string != NULL) {
- string = iter_.Next(&offset);
- if (string != NULL) String::VisitFlat(this, string, offset);
- }
-}
-
-
-bool StringCharacterStream::HasMore() {
- if (buffer8_ != end_) return true;
- int offset;
- String* string = iter_.Next(&offset);
- DCHECK_EQ(offset, 0);
- if (string == NULL) return false;
- String::VisitFlat(this, string);
- DCHECK(buffer8_ != end_);
- return true;
-}
-
-
-void StringCharacterStream::VisitOneByteString(
- const uint8_t* chars, int length) {
- is_one_byte_ = true;
- buffer8_ = chars;
- end_ = chars + length;
-}
-
-
-void StringCharacterStream::VisitTwoByteString(
- const uint16_t* chars, int length) {
- is_one_byte_ = false;
- buffer16_ = chars;
- end_ = reinterpret_cast<const uint8_t*>(chars + length);
-}
-
-
int ByteArray::Size() { return RoundUp(length() + kHeaderSize, kPointerSize); }
-byte ByteArray::get(int index) {
+byte ByteArray::get(int index) const {
DCHECK(index >= 0 && index < this->length());
return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
}
@@ -3951,7 +2760,7 @@ void ByteArray::copy_out(int index, byte* buffer, int length) {
memcpy(buffer, src_addr, length);
}
-int ByteArray::get_int(int index) {
+int ByteArray::get_int(int index) const {
DCHECK(index >= 0 && index < this->length() / kIntSize);
return READ_INT_FIELD(this, kHeaderSize + index * kIntSize);
}
@@ -3961,11 +2770,22 @@ void ByteArray::set_int(int index, int value) {
WRITE_INT_FIELD(this, kHeaderSize + index * kIntSize, value);
}
+uint32_t ByteArray::get_uint32(int index) const {
+ DCHECK(index >= 0 && index < this->length() / kUInt32Size);
+ return READ_UINT32_FIELD(this, kHeaderSize + index * kUInt32Size);
+}
+
+void ByteArray::set_uint32(int index, uint32_t value) {
+ DCHECK(index >= 0 && index < this->length() / kUInt32Size);
+ WRITE_UINT32_FIELD(this, kHeaderSize + index * kUInt32Size, value);
+}
+
ByteArray* ByteArray::FromDataStartAddress(Address address) {
DCHECK_TAG_ALIGNED(address);
return reinterpret_cast<ByteArray*>(address - kHeaderSize + kHeapObjectTag);
}
+int ByteArray::DataSize() const { return RoundUp(length(), kPointerSize); }
int ByteArray::ByteArraySize() { return SizeFor(this->length()); }
@@ -4110,33 +2930,28 @@ int FixedTypedArrayBase::ElementSize(InstanceType type) {
#undef TYPED_ARRAY_CASE
default:
UNREACHABLE();
- return 0;
}
return element_size;
}
-
-int FixedTypedArrayBase::DataSize(InstanceType type) {
+int FixedTypedArrayBase::DataSize(InstanceType type) const {
if (base_pointer() == Smi::kZero) return 0;
return length() * ElementSize(type);
}
-
-int FixedTypedArrayBase::DataSize() {
+int FixedTypedArrayBase::DataSize() const {
return DataSize(map()->instance_type());
}
-
-int FixedTypedArrayBase::size() {
+int FixedTypedArrayBase::size() const {
return OBJECT_POINTER_ALIGN(kDataOffset + DataSize());
}
-
-int FixedTypedArrayBase::TypedArraySize(InstanceType type) {
+int FixedTypedArrayBase::TypedArraySize(InstanceType type) const {
return OBJECT_POINTER_ALIGN(kDataOffset + DataSize(type));
}
-
+// static
int FixedTypedArrayBase::TypedArraySize(InstanceType type, int length) {
return OBJECT_POINTER_ALIGN(kDataOffset + length * ElementSize(type));
}
@@ -4247,7 +3062,7 @@ template <class Traits>
void FixedTypedArray<Traits>::SetValue(uint32_t index, Object* value) {
ElementType cast_value = Traits::defaultValue();
if (value->IsSmi()) {
- int int_value = Smi::cast(value)->value();
+ int int_value = Smi::ToInt(value);
cast_value = from(int_value);
} else if (value->IsHeapNumber()) {
double double_value = HeapNumber::cast(value)->value();
@@ -4306,38 +3121,33 @@ Handle<Object> Float64ArrayTraits::ToHandle(Isolate* isolate, double scalar) {
return isolate->factory()->NewNumber(scalar);
}
-
-int Map::visitor_id() {
- return READ_BYTE_FIELD(this, kVisitorIdOffset);
-}
-
+int Map::visitor_id() const { return READ_BYTE_FIELD(this, kVisitorIdOffset); }
void Map::set_visitor_id(int id) {
- DCHECK(0 <= id && id < 256);
+ DCHECK_LE(0, id);
+ DCHECK_LT(id, 256);
WRITE_BYTE_FIELD(this, kVisitorIdOffset, static_cast<byte>(id));
}
-
-int Map::instance_size() {
- return NOBARRIER_READ_BYTE_FIELD(
- this, kInstanceSizeOffset) << kPointerSizeLog2;
+int Map::instance_size() const {
+ return RELAXED_READ_BYTE_FIELD(this, kInstanceSizeOffset) << kPointerSizeLog2;
}
-
-int Map::inobject_properties_or_constructor_function_index() {
- return READ_BYTE_FIELD(this,
- kInObjectPropertiesOrConstructorFunctionIndexOffset);
+int Map::inobject_properties_or_constructor_function_index() const {
+ return RELAXED_READ_BYTE_FIELD(
+ this, kInObjectPropertiesOrConstructorFunctionIndexOffset);
}
void Map::set_inobject_properties_or_constructor_function_index(int value) {
- DCHECK(0 <= value && value < 256);
- WRITE_BYTE_FIELD(this, kInObjectPropertiesOrConstructorFunctionIndexOffset,
- static_cast<byte>(value));
+ DCHECK_LE(0, value);
+ DCHECK_LT(value, 256);
+ RELAXED_WRITE_BYTE_FIELD(this,
+ kInObjectPropertiesOrConstructorFunctionIndexOffset,
+ static_cast<byte>(value));
}
-
-int Map::GetInObjectProperties() {
+int Map::GetInObjectProperties() const {
DCHECK(IsJSObjectMap());
return inobject_properties_or_constructor_function_index();
}
@@ -4348,8 +3158,7 @@ void Map::SetInObjectProperties(int value) {
set_inobject_properties_or_constructor_function_index(value);
}
-
-int Map::GetConstructorFunctionIndex() {
+int Map::GetConstructorFunctionIndex() const {
DCHECK(IsPrimitiveMap());
return inobject_properties_or_constructor_function_index();
}
@@ -4360,8 +3169,7 @@ void Map::SetConstructorFunctionIndex(int value) {
set_inobject_properties_or_constructor_function_index(value);
}
-
-int Map::GetInObjectPropertyOffset(int index) {
+int Map::GetInObjectPropertyOffset(int index) const {
// Adjust for the number of properties stored in the object.
index -= GetInObjectProperties();
DCHECK(index <= 0);
@@ -4375,8 +3183,7 @@ Handle<Map> Map::AddMissingTransitionsForTesting(
return AddMissingTransitions(split_map, descriptors, full_layout_descriptor);
}
-
-int HeapObject::SizeFromMap(Map* map) {
+int HeapObject::SizeFromMap(Map* map) const {
int instance_size = map->instance_size();
if (instance_size != kVariableSizeSentinel) return instance_size;
// Only inline the most frequent cases.
@@ -4384,42 +3191,54 @@ int HeapObject::SizeFromMap(Map* map) {
if (instance_type == FIXED_ARRAY_TYPE ||
instance_type == TRANSITION_ARRAY_TYPE) {
return FixedArray::SizeFor(
- reinterpret_cast<FixedArray*>(this)->synchronized_length());
+ reinterpret_cast<const FixedArray*>(this)->synchronized_length());
}
if (instance_type == ONE_BYTE_STRING_TYPE ||
instance_type == ONE_BYTE_INTERNALIZED_STRING_TYPE) {
// Strings may get concurrently truncated, hence we have to access its
// length synchronized.
return SeqOneByteString::SizeFor(
- reinterpret_cast<SeqOneByteString*>(this)->synchronized_length());
+ reinterpret_cast<const SeqOneByteString*>(this)->synchronized_length());
}
if (instance_type == BYTE_ARRAY_TYPE) {
- return reinterpret_cast<ByteArray*>(this)->ByteArraySize();
+ return ByteArray::SizeFor(
+ reinterpret_cast<const ByteArray*>(this)->synchronized_length());
}
if (instance_type == BYTECODE_ARRAY_TYPE) {
- return reinterpret_cast<BytecodeArray*>(this)->BytecodeArraySize();
+ return BytecodeArray::SizeFor(
+ reinterpret_cast<const BytecodeArray*>(this)->synchronized_length());
}
if (instance_type == FREE_SPACE_TYPE) {
- return reinterpret_cast<FreeSpace*>(this)->nobarrier_size();
+ return reinterpret_cast<const FreeSpace*>(this)->relaxed_read_size();
}
if (instance_type == STRING_TYPE ||
instance_type == INTERNALIZED_STRING_TYPE) {
// Strings may get concurrently truncated, hence we have to access its
// length synchronized.
return SeqTwoByteString::SizeFor(
- reinterpret_cast<SeqTwoByteString*>(this)->synchronized_length());
+ reinterpret_cast<const SeqTwoByteString*>(this)->synchronized_length());
}
if (instance_type == FIXED_DOUBLE_ARRAY_TYPE) {
return FixedDoubleArray::SizeFor(
- reinterpret_cast<FixedDoubleArray*>(this)->length());
+ reinterpret_cast<const FixedDoubleArray*>(this)->synchronized_length());
}
if (instance_type >= FIRST_FIXED_TYPED_ARRAY_TYPE &&
instance_type <= LAST_FIXED_TYPED_ARRAY_TYPE) {
- return reinterpret_cast<FixedTypedArrayBase*>(
- this)->TypedArraySize(instance_type);
+ return reinterpret_cast<const FixedTypedArrayBase*>(this)->TypedArraySize(
+ instance_type);
+ }
+ if (instance_type == SMALL_ORDERED_HASH_SET_TYPE) {
+ return reinterpret_cast<const SmallOrderedHashSet*>(this)->Size();
+ }
+ if (instance_type == PROPERTY_ARRAY_TYPE) {
+ return PropertyArray::SizeFor(
+ reinterpret_cast<const PropertyArray*>(this)->synchronized_length());
+ }
+ if (instance_type == SMALL_ORDERED_HASH_MAP_TYPE) {
+ return reinterpret_cast<const SmallOrderedHashMap*>(this)->Size();
}
DCHECK(instance_type == CODE_TYPE);
- return reinterpret_cast<Code*>(this)->CodeSize();
+ return reinterpret_cast<const Code*>(this)->CodeSize();
}
@@ -4427,15 +3246,13 @@ void Map::set_instance_size(int value) {
DCHECK_EQ(0, value & (kPointerSize - 1));
value >>= kPointerSizeLog2;
DCHECK(0 <= value && value < 256);
- NOBARRIER_WRITE_BYTE_FIELD(
- this, kInstanceSizeOffset, static_cast<byte>(value));
+ RELAXED_WRITE_BYTE_FIELD(this, kInstanceSizeOffset, static_cast<byte>(value));
}
void Map::clear_unused() { WRITE_BYTE_FIELD(this, kUnusedOffset, 0); }
-
-InstanceType Map::instance_type() {
+InstanceType Map::instance_type() const {
return static_cast<InstanceType>(READ_BYTE_FIELD(this, kInstanceTypeOffset));
}
@@ -4444,8 +3261,7 @@ void Map::set_instance_type(InstanceType value) {
WRITE_BYTE_FIELD(this, kInstanceTypeOffset, value);
}
-
-int Map::unused_property_fields() {
+int Map::unused_property_fields() const {
return READ_BYTE_FIELD(this, kUnusedPropertyFieldsOffset);
}
@@ -4479,8 +3295,7 @@ void Map::set_non_instance_prototype(bool value) {
}
}
-
-bool Map::has_non_instance_prototype() {
+bool Map::has_non_instance_prototype() const {
return ((1 << kHasNonInstancePrototype) & bit_field()) != 0;
}
@@ -4511,8 +3326,7 @@ void Map::set_has_indexed_interceptor() {
set_bit_field(bit_field() | (1 << kHasIndexedInterceptor));
}
-
-bool Map::has_indexed_interceptor() {
+bool Map::has_indexed_interceptor() const {
return ((1 << kHasIndexedInterceptor) & bit_field()) != 0;
}
@@ -4521,8 +3335,7 @@ void Map::set_is_undetectable() {
set_bit_field(bit_field() | (1 << kIsUndetectable));
}
-
-bool Map::is_undetectable() {
+bool Map::is_undetectable() const {
return ((1 << kIsUndetectable) & bit_field()) != 0;
}
@@ -4531,8 +3344,7 @@ void Map::set_has_named_interceptor() {
set_bit_field(bit_field() | (1 << kHasNamedInterceptor));
}
-
-bool Map::has_named_interceptor() {
+bool Map::has_named_interceptor() const {
return ((1 << kHasNamedInterceptor) & bit_field()) != 0;
}
@@ -4545,8 +3357,7 @@ void Map::set_is_access_check_needed(bool access_check_needed) {
}
}
-
-bool Map::is_access_check_needed() {
+bool Map::is_access_check_needed() const {
return ((1 << kIsAccessCheckNeeded) & bit_field()) != 0;
}
@@ -4559,7 +3370,7 @@ void Map::set_is_extensible(bool value) {
}
}
-bool Map::is_extensible() {
+bool Map::is_extensible() const {
return ((1 << kIsExtensible) & bit_field2()) != 0;
}
@@ -4584,47 +3395,47 @@ void Map::set_elements_kind(ElementsKind elements_kind) {
DCHECK(this->elements_kind() == elements_kind);
}
-
-ElementsKind Map::elements_kind() {
+ElementsKind Map::elements_kind() const {
return Map::ElementsKindBits::decode(bit_field2());
}
-
-bool Map::has_fast_smi_elements() {
- return IsFastSmiElementsKind(elements_kind());
+bool Map::has_fast_smi_elements() const {
+ return IsSmiElementsKind(elements_kind());
}
-bool Map::has_fast_object_elements() {
- return IsFastObjectElementsKind(elements_kind());
+bool Map::has_fast_object_elements() const {
+ return IsObjectElementsKind(elements_kind());
}
-bool Map::has_fast_smi_or_object_elements() {
- return IsFastSmiOrObjectElementsKind(elements_kind());
+bool Map::has_fast_smi_or_object_elements() const {
+ return IsSmiOrObjectElementsKind(elements_kind());
}
-bool Map::has_fast_double_elements() {
- return IsFastDoubleElementsKind(elements_kind());
+bool Map::has_fast_double_elements() const {
+ return IsDoubleElementsKind(elements_kind());
}
-bool Map::has_fast_elements() { return IsFastElementsKind(elements_kind()); }
+bool Map::has_fast_elements() const {
+ return IsFastElementsKind(elements_kind());
+}
-bool Map::has_sloppy_arguments_elements() {
+bool Map::has_sloppy_arguments_elements() const {
return IsSloppyArgumentsElementsKind(elements_kind());
}
-bool Map::has_fast_sloppy_arguments_elements() {
+bool Map::has_fast_sloppy_arguments_elements() const {
return elements_kind() == FAST_SLOPPY_ARGUMENTS_ELEMENTS;
}
-bool Map::has_fast_string_wrapper_elements() {
+bool Map::has_fast_string_wrapper_elements() const {
return elements_kind() == FAST_STRING_WRAPPER_ELEMENTS;
}
-bool Map::has_fixed_typed_array_elements() {
+bool Map::has_fixed_typed_array_elements() const {
return IsFixedTypedArrayElementsKind(elements_kind());
}
-bool Map::has_dictionary_elements() {
+bool Map::has_dictionary_elements() const {
return IsDictionaryElementsKind(elements_kind());
}
@@ -4635,13 +3446,11 @@ void Map::set_dictionary_map(bool value) {
set_bit_field3(new_bit_field3);
}
-
-bool Map::is_dictionary_map() {
+bool Map::is_dictionary_map() const {
return DictionaryMap::decode(bit_field3());
}
-
-Code::Flags Code::flags() {
+Code::Flags Code::flags() const {
return static_cast<Flags>(READ_INT_FIELD(this, kFlagsOffset));
}
@@ -4650,8 +3459,7 @@ void Map::set_owns_descriptors(bool owns_descriptors) {
set_bit_field3(OwnsDescriptors::update(bit_field3(), owns_descriptors));
}
-
-bool Map::owns_descriptors() {
+bool Map::owns_descriptors() const {
return OwnsDescriptors::decode(bit_field3());
}
@@ -4668,18 +3476,13 @@ void Map::deprecate() {
set_bit_field3(Deprecated::update(bit_field3(), true));
}
-
-bool Map::is_deprecated() {
- return Deprecated::decode(bit_field3());
-}
-
+bool Map::is_deprecated() const { return Deprecated::decode(bit_field3()); }
void Map::set_migration_target(bool value) {
set_bit_field3(IsMigrationTarget::update(bit_field3(), value));
}
-
-bool Map::is_migration_target() {
+bool Map::is_migration_target() const {
return IsMigrationTarget::decode(bit_field3());
}
@@ -4687,7 +3490,7 @@ void Map::set_immutable_proto(bool value) {
set_bit_field3(ImmutablePrototype::update(bit_field3(), value));
}
-bool Map::is_immutable_proto() {
+bool Map::is_immutable_proto() const {
return ImmutablePrototype::decode(bit_field3());
}
@@ -4695,16 +3498,15 @@ void Map::set_new_target_is_base(bool value) {
set_bit_field3(NewTargetIsBase::update(bit_field3(), value));
}
-
-bool Map::new_target_is_base() { return NewTargetIsBase::decode(bit_field3()); }
-
+bool Map::new_target_is_base() const {
+ return NewTargetIsBase::decode(bit_field3());
+}
void Map::set_construction_counter(int value) {
set_bit_field3(ConstructionCounter::update(bit_field3(), value));
}
-
-int Map::construction_counter() {
+int Map::construction_counter() const {
return ConstructionCounter::decode(bit_field3());
}
@@ -4713,20 +3515,9 @@ void Map::mark_unstable() {
set_bit_field3(IsUnstable::update(bit_field3(), true));
}
+bool Map::is_stable() const { return !IsUnstable::decode(bit_field3()); }
-bool Map::is_stable() {
- return !IsUnstable::decode(bit_field3());
-}
-
-
-bool Map::has_code_cache() {
- // Code caches are always fixed arrays. The empty fixed array is used as a
- // sentinel for an absent code cache.
- return code_cache()->length() != 0;
-}
-
-
-bool Map::CanBeDeprecated() {
+bool Map::CanBeDeprecated() const {
int descriptor = LastAdded();
for (int i = 0; i <= descriptor; i++) {
PropertyDetails details = instance_descriptors()->GetDetails(i);
@@ -4751,48 +3542,52 @@ void Map::NotifyLeafMapLayoutChange() {
}
}
-
-bool Map::CanTransition() {
+bool Map::CanTransition() const {
// Only JSObject and subtypes have map transitions and back pointers.
STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE);
return instance_type() >= FIRST_JS_OBJECT_TYPE;
}
-
-bool Map::IsBooleanMap() { return this == GetHeap()->boolean_map(); }
-bool Map::IsPrimitiveMap() {
+bool Map::IsBooleanMap() const { return this == GetHeap()->boolean_map(); }
+bool Map::IsPrimitiveMap() const {
STATIC_ASSERT(FIRST_PRIMITIVE_TYPE == FIRST_TYPE);
return instance_type() <= LAST_PRIMITIVE_TYPE;
}
-bool Map::IsJSReceiverMap() {
+bool Map::IsJSReceiverMap() const {
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
return instance_type() >= FIRST_JS_RECEIVER_TYPE;
}
-bool Map::IsJSObjectMap() {
+bool Map::IsJSObjectMap() const {
STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
return instance_type() >= FIRST_JS_OBJECT_TYPE;
}
-bool Map::IsJSArrayMap() { return instance_type() == JS_ARRAY_TYPE; }
-bool Map::IsJSFunctionMap() { return instance_type() == JS_FUNCTION_TYPE; }
-bool Map::IsStringMap() { return instance_type() < FIRST_NONSTRING_TYPE; }
-bool Map::IsJSProxyMap() { return instance_type() == JS_PROXY_TYPE; }
-bool Map::IsJSGlobalProxyMap() {
+bool Map::IsJSArrayMap() const { return instance_type() == JS_ARRAY_TYPE; }
+bool Map::IsJSFunctionMap() const {
+ return instance_type() == JS_FUNCTION_TYPE;
+}
+bool Map::IsStringMap() const { return instance_type() < FIRST_NONSTRING_TYPE; }
+bool Map::IsJSProxyMap() const { return instance_type() == JS_PROXY_TYPE; }
+bool Map::IsJSGlobalProxyMap() const {
return instance_type() == JS_GLOBAL_PROXY_TYPE;
}
-bool Map::IsJSGlobalObjectMap() {
+bool Map::IsJSGlobalObjectMap() const {
return instance_type() == JS_GLOBAL_OBJECT_TYPE;
}
-bool Map::IsJSTypedArrayMap() { return instance_type() == JS_TYPED_ARRAY_TYPE; }
-bool Map::IsJSDataViewMap() { return instance_type() == JS_DATA_VIEW_TYPE; }
+bool Map::IsJSTypedArrayMap() const {
+ return instance_type() == JS_TYPED_ARRAY_TYPE;
+}
+bool Map::IsJSDataViewMap() const {
+ return instance_type() == JS_DATA_VIEW_TYPE;
+}
-bool Map::IsSpecialReceiverMap() {
+bool Map::IsSpecialReceiverMap() const {
bool result = IsSpecialReceiverInstanceType(instance_type());
DCHECK_IMPLIES(!result,
!has_named_interceptor() && !is_access_check_needed());
return result;
}
-bool Map::CanOmitMapChecks() {
+bool Map::CanOmitMapChecks() const {
return is_stable() && FLAG_omit_map_checks_for_leaf_maps;
}
@@ -4806,9 +3601,7 @@ void DependentCode::set_next_link(DependentCode* next) {
set(kNextLinkIndex, next);
}
-
-int DependentCode::flags() { return Smi::cast(get(kFlagsIndex))->value(); }
-
+int DependentCode::flags() { return Smi::ToInt(get(kFlagsIndex)); }
void DependentCode::set_flags(int flags) {
set(kFlagsIndex, Smi::FromInt(flags));
@@ -4857,12 +3650,9 @@ void Code::set_flags(Code::Flags flags) {
WRITE_INT_FIELD(this, kFlagsOffset, flags);
}
+Code::Kind Code::kind() const { return ExtractKindFromFlags(flags()); }
-Code::Kind Code::kind() {
- return ExtractKindFromFlags(flags());
-}
-
-bool Code::IsCodeStubOrIC() {
+bool Code::IsCodeStubOrIC() const {
switch (kind()) {
case STUB:
case HANDLER:
@@ -4875,9 +3665,8 @@ bool Code::IsCodeStubOrIC() {
}
}
-ExtraICState Code::extra_ic_state() {
- DCHECK(is_binary_op_stub() || is_compare_ic_stub() ||
- is_to_boolean_ic_stub() || is_debug_stub());
+ExtraICState Code::extra_ic_state() const {
+ DCHECK(is_compare_ic_stub() || is_debug_stub());
return ExtractExtraICStateFromFlags(flags());
}
@@ -4892,24 +3681,29 @@ void Code::set_raw_kind_specific_flags2(int value) {
WRITE_INT_FIELD(this, kKindSpecificFlags2Offset, value);
}
-
-inline bool Code::is_crankshafted() {
+inline bool Code::is_crankshafted() const {
return IsCrankshaftedField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
}
-
-inline bool Code::is_hydrogen_stub() {
+inline bool Code::is_hydrogen_stub() const {
return is_crankshafted() && kind() != OPTIMIZED_FUNCTION;
}
-inline bool Code::is_interpreter_trampoline_builtin() {
+inline bool Code::is_interpreter_trampoline_builtin() const {
Builtins* builtins = GetIsolate()->builtins();
return this == *builtins->InterpreterEntryTrampoline() ||
this == *builtins->InterpreterEnterBytecodeAdvance() ||
this == *builtins->InterpreterEnterBytecodeDispatch();
}
+inline bool Code::checks_optimization_marker() const {
+ Builtins* builtins = GetIsolate()->builtins();
+ return this == *builtins->CompileLazy() ||
+ this == *builtins->InterpreterEntryTrampoline() ||
+ this == *builtins->CheckOptimizationMarker();
+}
+
inline bool Code::has_unwinding_info() const {
return HasUnwindingInfoField::decode(READ_UINT32_FIELD(this, kFlagsOffset));
}
@@ -4926,7 +3720,7 @@ inline void Code::set_is_crankshafted(bool value) {
WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
}
-inline bool Code::has_tagged_params() {
+inline bool Code::has_tagged_params() const {
int flags = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
return HasTaggedStackField::decode(flags);
}
@@ -4937,7 +3731,7 @@ inline void Code::set_has_tagged_params(bool value) {
WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
}
-inline bool Code::is_turbofanned() {
+inline bool Code::is_turbofanned() const {
return IsTurbofannedField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
}
@@ -4949,8 +3743,7 @@ inline void Code::set_is_turbofanned(bool value) {
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
-
-inline bool Code::can_have_weak_objects() {
+inline bool Code::can_have_weak_objects() const {
DCHECK(kind() == OPTIMIZED_FUNCTION);
return CanHaveWeakObjectsField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
@@ -4964,7 +3757,7 @@ inline void Code::set_can_have_weak_objects(bool value) {
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
-inline bool Code::is_construct_stub() {
+inline bool Code::is_construct_stub() const {
DCHECK(kind() == BUILTIN);
return IsConstructStubField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
@@ -4977,7 +3770,7 @@ inline void Code::set_is_construct_stub(bool value) {
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
-inline bool Code::is_promise_rejection() {
+inline bool Code::is_promise_rejection() const {
DCHECK(kind() == BUILTIN);
return IsPromiseRejectionField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
@@ -4990,7 +3783,7 @@ inline void Code::set_is_promise_rejection(bool value) {
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
-inline bool Code::is_exception_caught() {
+inline bool Code::is_exception_caught() const {
DCHECK(kind() == BUILTIN);
return IsExceptionCaughtField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
@@ -5003,22 +3796,13 @@ inline void Code::set_is_exception_caught(bool value) {
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
-bool Code::has_deoptimization_support() {
- DCHECK_EQ(FUNCTION, kind());
- unsigned flags = READ_UINT32_FIELD(this, kFullCodeFlags);
- return FullCodeFlagsHasDeoptimizationSupportField::decode(flags);
-}
-
-
-void Code::set_has_deoptimization_support(bool value) {
- DCHECK_EQ(FUNCTION, kind());
- unsigned flags = READ_UINT32_FIELD(this, kFullCodeFlags);
- flags = FullCodeFlagsHasDeoptimizationSupportField::update(flags, value);
- WRITE_UINT32_FIELD(this, kFullCodeFlags, flags);
+inline HandlerTable::CatchPrediction Code::GetBuiltinCatchPrediction() {
+ if (is_promise_rejection()) return HandlerTable::PROMISE;
+ if (is_exception_caught()) return HandlerTable::CAUGHT;
+ return HandlerTable::UNCAUGHT;
}
-
-bool Code::has_debug_break_slots() {
+bool Code::has_debug_break_slots() const {
DCHECK_EQ(FUNCTION, kind());
unsigned flags = READ_UINT32_FIELD(this, kFullCodeFlags);
return FullCodeFlagsHasDebugBreakSlotsField::decode(flags);
@@ -5032,8 +3816,7 @@ void Code::set_has_debug_break_slots(bool value) {
WRITE_UINT32_FIELD(this, kFullCodeFlags, flags);
}
-
-bool Code::has_reloc_info_for_serialization() {
+bool Code::has_reloc_info_for_serialization() const {
DCHECK_EQ(FUNCTION, kind());
unsigned flags = READ_UINT32_FIELD(this, kFullCodeFlags);
return FullCodeFlagsHasRelocInfoForSerialization::decode(flags);
@@ -5047,8 +3830,7 @@ void Code::set_has_reloc_info_for_serialization(bool value) {
WRITE_UINT32_FIELD(this, kFullCodeFlags, flags);
}
-
-int Code::allow_osr_at_loop_nesting_level() {
+int Code::allow_osr_at_loop_nesting_level() const {
DCHECK_EQ(FUNCTION, kind());
int fields = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
return AllowOSRAtLoopNestingLevelField::decode(fields);
@@ -5063,30 +3845,15 @@ void Code::set_allow_osr_at_loop_nesting_level(int level) {
WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
}
-
-int Code::profiler_ticks() {
- DCHECK_EQ(FUNCTION, kind());
- return ProfilerTicksField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
-}
-
-
-void Code::set_profiler_ticks(int ticks) {
- if (kind() == FUNCTION) {
- unsigned previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- unsigned updated = ProfilerTicksField::update(previous, ticks);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
- }
+int Code::builtin_index() const {
+ return READ_INT_FIELD(this, kBuiltinIndexOffset);
}
-int Code::builtin_index() { return READ_INT_FIELD(this, kBuiltinIndexOffset); }
-
void Code::set_builtin_index(int index) {
WRITE_INT_FIELD(this, kBuiltinIndexOffset, index);
}
-
-unsigned Code::stack_slots() {
+unsigned Code::stack_slots() const {
DCHECK(is_crankshafted());
return StackSlotsField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
@@ -5101,8 +3868,7 @@ void Code::set_stack_slots(unsigned slots) {
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
-
-unsigned Code::safepoint_table_offset() {
+unsigned Code::safepoint_table_offset() const {
DCHECK(is_crankshafted());
return SafepointTableOffsetField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
@@ -5118,8 +3884,7 @@ void Code::set_safepoint_table_offset(unsigned offset) {
WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
}
-
-unsigned Code::back_edge_table_offset() {
+unsigned Code::back_edge_table_offset() const {
DCHECK_EQ(FUNCTION, kind());
return BackEdgeTableOffsetField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags2Offset)) << kPointerSizeLog2;
@@ -5135,8 +3900,7 @@ void Code::set_back_edge_table_offset(unsigned offset) {
WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
}
-
-bool Code::back_edges_patched_for_osr() {
+bool Code::back_edges_patched_for_osr() const {
DCHECK_EQ(FUNCTION, kind());
return allow_osr_at_loop_nesting_level() > 0;
}
@@ -5144,8 +3908,7 @@ bool Code::back_edges_patched_for_osr() {
uint16_t Code::to_boolean_state() { return extra_ic_state(); }
-
-bool Code::marked_for_deoptimization() {
+bool Code::marked_for_deoptimization() const {
DCHECK(kind() == OPTIMIZED_FUNCTION);
return MarkedForDeoptimizationField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
@@ -5160,7 +3923,7 @@ void Code::set_marked_for_deoptimization(bool flag) {
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
-bool Code::deopt_already_counted() {
+bool Code::deopt_already_counted() const {
DCHECK(kind() == OPTIMIZED_FUNCTION);
return DeoptAlreadyCountedField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
@@ -5174,7 +3937,7 @@ void Code::set_deopt_already_counted(bool flag) {
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
-bool Code::is_inline_cache_stub() {
+bool Code::is_inline_cache_stub() const {
Kind kind = this->kind();
switch (kind) {
#define CASE(name) case name: return true;
@@ -5184,7 +3947,7 @@ bool Code::is_inline_cache_stub() {
}
}
-bool Code::is_debug_stub() {
+bool Code::is_debug_stub() const {
if (kind() != BUILTIN) return false;
switch (builtin_index()) {
#define CASE_DEBUG_BUILTIN(name) case Builtins::k##name:
@@ -5196,13 +3959,11 @@ bool Code::is_debug_stub() {
}
return false;
}
-bool Code::is_handler() { return kind() == HANDLER; }
-bool Code::is_stub() { return kind() == STUB; }
-bool Code::is_binary_op_stub() { return kind() == BINARY_OP_IC; }
-bool Code::is_compare_ic_stub() { return kind() == COMPARE_IC; }
-bool Code::is_to_boolean_ic_stub() { return kind() == TO_BOOLEAN_IC; }
-bool Code::is_optimized_code() { return kind() == OPTIMIZED_FUNCTION; }
-bool Code::is_wasm_code() { return kind() == WASM_FUNCTION; }
+bool Code::is_handler() const { return kind() == HANDLER; }
+bool Code::is_stub() const { return kind() == STUB; }
+bool Code::is_compare_ic_stub() const { return kind() == COMPARE_IC; }
+bool Code::is_optimized_code() const { return kind() == OPTIMIZED_FUNCTION; }
+bool Code::is_wasm_code() const { return kind() == WASM_FUNCTION; }
Address Code::constant_pool() {
Address constant_pool = NULL;
@@ -5247,10 +4008,12 @@ Code* Code::GetCodeFromTargetAddress(Address address) {
return result;
}
+Object* Code::GetObjectFromCodeEntry(Address code_entry) {
+ return HeapObject::FromAddress(code_entry - Code::kHeaderSize);
+}
Object* Code::GetObjectFromEntryAddress(Address location_of_address) {
- return HeapObject::
- FromAddress(Memory::Address_at(location_of_address) - Code::kHeaderSize);
+ return GetObjectFromCodeEntry(Memory::Address_at(location_of_address));
}
@@ -5381,15 +4144,14 @@ void Map::set_prototype(Object* value, WriteBarrierMode mode) {
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kPrototypeOffset, value, mode);
}
-
-LayoutDescriptor* Map::layout_descriptor_gc_safe() {
- Object* layout_desc = READ_FIELD(this, kLayoutDescriptorOffset);
+LayoutDescriptor* Map::layout_descriptor_gc_safe() const {
+ Object* layout_desc = RELAXED_READ_FIELD(this, kLayoutDescriptorOffset);
return LayoutDescriptor::cast_gc_safe(layout_desc);
}
bool Map::HasFastPointerLayout() const {
- Object* layout_desc = READ_FIELD(this, kLayoutDescriptorOffset);
+ Object* layout_desc = RELAXED_READ_FIELD(this, kLayoutDescriptorOffset);
return LayoutDescriptor::IsFastPointerLayout(layout_desc);
}
@@ -5405,11 +4167,11 @@ void Map::UpdateDescriptors(DescriptorArray* descriptors,
// TODO(ishell): remove these checks from VERIFY_HEAP mode.
if (FLAG_verify_heap) {
CHECK(layout_descriptor()->IsConsistentWithMap(this));
- CHECK(visitor_id() == Heap::GetStaticVisitorIdForMap(this));
+ CHECK(visitor_id() == Map::GetVisitorId(this));
}
#else
SLOW_DCHECK(layout_descriptor()->IsConsistentWithMap(this));
- DCHECK(visitor_id() == Heap::GetStaticVisitorIdForMap(this));
+ DCHECK(visitor_id() == Map::GetVisitorId(this));
#endif
}
}
@@ -5431,7 +4193,7 @@ void Map::InitializeDescriptors(DescriptorArray* descriptors,
#else
SLOW_DCHECK(layout_descriptor()->IsConsistentWithMap(this));
#endif
- set_visitor_id(Heap::GetStaticVisitorIdForMap(this));
+ set_visitor_id(Map::GetVisitorId(this));
}
}
@@ -5451,8 +4213,7 @@ uint32_t Map::bit_field3() const {
return READ_UINT32_FIELD(this, kBitField3Offset);
}
-
-LayoutDescriptor* Map::GetLayoutDescriptor() {
+LayoutDescriptor* Map::GetLayoutDescriptor() const {
return FLAG_unbox_double_fields ? layout_descriptor()
: LayoutDescriptor::FastPointerLayout();
}
@@ -5473,8 +4234,7 @@ void Map::AppendDescriptor(Descriptor* desc) {
#endif
}
-
-Object* Map::GetBackPointer() {
+Object* Map::GetBackPointer() const {
Object* object = constructor_or_backpointer();
if (object->IsMap()) {
return object;
@@ -5482,8 +4242,7 @@ Object* Map::GetBackPointer() {
return GetIsolate()->heap()->undefined_value();
}
-
-Map* Map::ElementsTransitionMap() {
+Map* Map::ElementsTransitionMap() const {
return TransitionArray::SearchSpecial(
this, GetHeap()->elements_transition_symbol());
}
@@ -5515,9 +4274,6 @@ void Map::SetBackPointer(Object* value, WriteBarrierMode mode) {
set_constructor_or_backpointer(value, mode);
}
-ACCESSORS(JSArgumentsObject, length, Object, kLengthOffset);
-ACCESSORS(JSSloppyArgumentsObject, callee, Object, kCalleeOffset);
-
ACCESSORS(Map, code_cache, FixedArray, kCodeCacheOffset)
ACCESSORS(Map, dependent_code, DependentCode, kDependentCodeOffset)
ACCESSORS(Map, weak_cell_cache, Object, kWeakCellCacheOffset)
@@ -5556,6 +4312,9 @@ Handle<Map> Map::CopyInitialMap(Handle<Map> map) {
map->unused_property_fields());
}
+Object* JSBoundFunction::raw_bound_target_function() const {
+ return READ_FIELD(this, kBoundTargetFunctionOffset);
+}
ACCESSORS(JSBoundFunction, bound_target_function, JSReceiver,
kBoundTargetFunctionOffset)
@@ -5671,23 +4430,18 @@ ACCESSORS(Module, regular_exports, FixedArray, kRegularExportsOffset)
ACCESSORS(Module, regular_imports, FixedArray, kRegularImportsOffset)
ACCESSORS(Module, module_namespace, HeapObject, kModuleNamespaceOffset)
ACCESSORS(Module, requested_modules, FixedArray, kRequestedModulesOffset)
+ACCESSORS(Module, script, Script, kScriptOffset)
+ACCESSORS(Module, exception, Object, kExceptionOffset)
SMI_ACCESSORS(Module, status, kStatusOffset)
+SMI_ACCESSORS(Module, dfs_index, kDfsIndexOffset)
+SMI_ACCESSORS(Module, dfs_ancestor_index, kDfsAncestorIndexOffset)
SMI_ACCESSORS(Module, hash, kHashOffset)
-bool Module::evaluated() const { return code()->IsModuleInfo(); }
-
-void Module::set_evaluated() {
- DCHECK(instantiated());
- DCHECK(!evaluated());
- return set_code(
- JSFunction::cast(code())->shared()->scope_info()->ModuleDescriptorInfo());
-}
-
-bool Module::instantiated() const { return !code()->IsSharedFunctionInfo(); }
-
ModuleInfo* Module::info() const {
- if (evaluated()) return ModuleInfo::cast(code());
- ScopeInfo* scope_info = instantiated()
+ if (status() >= kEvaluating) {
+ return ModuleInfo::cast(code());
+ }
+ ScopeInfo* scope_info = status() >= kInstantiating
? JSFunction::cast(code())->shared()->scope_info()
: SharedFunctionInfo::cast(code())->scope_info();
return scope_info->ModuleDescriptorInfo();
@@ -5757,27 +4511,27 @@ ACCESSORS(ObjectTemplateInfo, data, Object, kDataOffset)
int ObjectTemplateInfo::embedder_field_count() const {
Object* value = data();
DCHECK(value->IsSmi());
- return EmbedderFieldCount::decode(Smi::cast(value)->value());
+ return EmbedderFieldCount::decode(Smi::ToInt(value));
}
void ObjectTemplateInfo::set_embedder_field_count(int count) {
- return set_data(Smi::FromInt(
- EmbedderFieldCount::update(Smi::cast(data())->value(), count)));
+ return set_data(
+ Smi::FromInt(EmbedderFieldCount::update(Smi::ToInt(data()), count)));
}
bool ObjectTemplateInfo::immutable_proto() const {
Object* value = data();
DCHECK(value->IsSmi());
- return IsImmutablePrototype::decode(Smi::cast(value)->value());
+ return IsImmutablePrototype::decode(Smi::ToInt(value));
}
void ObjectTemplateInfo::set_immutable_proto(bool immutable) {
return set_data(Smi::FromInt(
- IsImmutablePrototype::update(Smi::cast(data())->value(), immutable)));
+ IsImmutablePrototype::update(Smi::ToInt(data()), immutable)));
}
int TemplateList::length() const {
- return Smi::cast(FixedArray::cast(this)->get(kLengthIndex))->value();
+ return Smi::ToInt(FixedArray::cast(this)->get(kLengthIndex));
}
Object* TemplateList::get(int index) const {
@@ -5788,98 +4542,36 @@ void TemplateList::set(int index, Object* value) {
FixedArray::cast(this)->set(kFirstElementIndex + index, value);
}
-ACCESSORS(AllocationSite, transition_info, Object, kTransitionInfoOffset)
-ACCESSORS(AllocationSite, nested_site, Object, kNestedSiteOffset)
-SMI_ACCESSORS(AllocationSite, pretenure_data, kPretenureDataOffset)
-SMI_ACCESSORS(AllocationSite, pretenure_create_count,
- kPretenureCreateCountOffset)
-ACCESSORS(AllocationSite, dependent_code, DependentCode,
- kDependentCodeOffset)
-ACCESSORS(AllocationSite, weak_next, Object, kWeakNextOffset)
-ACCESSORS(AllocationMemento, allocation_site, Object, kAllocationSiteOffset)
-
-ACCESSORS(Script, source, Object, kSourceOffset)
-ACCESSORS(Script, name, Object, kNameOffset)
-SMI_ACCESSORS(Script, id, kIdOffset)
-SMI_ACCESSORS(Script, line_offset, kLineOffsetOffset)
-SMI_ACCESSORS(Script, column_offset, kColumnOffsetOffset)
-ACCESSORS(Script, context_data, Object, kContextOffset)
-ACCESSORS(Script, wrapper, HeapObject, kWrapperOffset)
-SMI_ACCESSORS(Script, type, kTypeOffset)
-ACCESSORS(Script, line_ends, Object, kLineEndsOffset)
-ACCESSORS_CHECKED(Script, eval_from_shared, Object, kEvalFromSharedOffset,
- this->type() != TYPE_WASM)
-SMI_ACCESSORS_CHECKED(Script, eval_from_position, kEvalFromPositionOffset,
- this->type() != TYPE_WASM)
-ACCESSORS(Script, shared_function_infos, FixedArray, kSharedFunctionInfosOffset)
-SMI_ACCESSORS(Script, flags, kFlagsOffset)
-ACCESSORS(Script, source_url, Object, kSourceUrlOffset)
-ACCESSORS(Script, source_mapping_url, Object, kSourceMappingUrlOffset)
-ACCESSORS_CHECKED(Script, wasm_compiled_module, Object, kEvalFromSharedOffset,
- this->type() == TYPE_WASM)
-ACCESSORS(Script, preparsed_scope_data, PodArray<uint32_t>,
- kPreParsedScopeDataOffset)
-
-Script::CompilationType Script::compilation_type() {
- return BooleanBit::get(flags(), kCompilationTypeBit) ?
- COMPILATION_TYPE_EVAL : COMPILATION_TYPE_HOST;
-}
-void Script::set_compilation_type(CompilationType type) {
- set_flags(BooleanBit::set(flags(), kCompilationTypeBit,
- type == COMPILATION_TYPE_EVAL));
-}
-Script::CompilationState Script::compilation_state() {
- return BooleanBit::get(flags(), kCompilationStateBit) ?
- COMPILATION_STATE_COMPILED : COMPILATION_STATE_INITIAL;
-}
-void Script::set_compilation_state(CompilationState state) {
- set_flags(BooleanBit::set(flags(), kCompilationStateBit,
- state == COMPILATION_STATE_COMPILED));
-}
-ScriptOriginOptions Script::origin_options() {
- return ScriptOriginOptions((flags() & kOriginOptionsMask) >>
- kOriginOptionsShift);
-}
-void Script::set_origin_options(ScriptOriginOptions origin_options) {
- DCHECK(!(origin_options.Flags() & ~((1 << kOriginOptionsSize) - 1)));
- set_flags((flags() & ~kOriginOptionsMask) |
- (origin_options.Flags() << kOriginOptionsShift));
-}
-
-
-ACCESSORS(DebugInfo, shared, SharedFunctionInfo, kSharedFunctionInfoIndex)
-SMI_ACCESSORS(DebugInfo, debugger_hints, kDebuggerHintsIndex)
-ACCESSORS(DebugInfo, debug_bytecode_array, Object, kDebugBytecodeArrayIndex)
-ACCESSORS(DebugInfo, break_points, FixedArray, kBreakPointsStateIndex)
+ACCESSORS(AllocationSite, transition_info_or_boilerplate, Object,
+ kTransitionInfoOrBoilerplateOffset)
-bool DebugInfo::HasDebugBytecodeArray() {
- return debug_bytecode_array()->IsBytecodeArray();
+JSObject* AllocationSite::boilerplate() const {
+ DCHECK(PointsToLiteral());
+ return JSObject::cast(transition_info_or_boilerplate());
}
-bool DebugInfo::HasDebugCode() {
- Code* code = shared()->code();
- bool has = code->kind() == Code::FUNCTION;
- DCHECK(!has || code->has_debug_break_slots());
- return has;
+void AllocationSite::set_boilerplate(JSObject* object, WriteBarrierMode mode) {
+ set_transition_info_or_boilerplate(object, mode);
}
-BytecodeArray* DebugInfo::OriginalBytecodeArray() {
- DCHECK(HasDebugBytecodeArray());
- return shared()->bytecode_array();
+int AllocationSite::transition_info() const {
+ DCHECK(!PointsToLiteral());
+ return Smi::cast(transition_info_or_boilerplate())->value();
}
-BytecodeArray* DebugInfo::DebugBytecodeArray() {
- DCHECK(HasDebugBytecodeArray());
- return BytecodeArray::cast(debug_bytecode_array());
+void AllocationSite::set_transition_info(int value) {
+ DCHECK(!PointsToLiteral());
+ set_transition_info_or_boilerplate(Smi::FromInt(value), SKIP_WRITE_BARRIER);
}
-Code* DebugInfo::DebugCode() {
- DCHECK(HasDebugCode());
- return shared()->code();
-}
-
-SMI_ACCESSORS(BreakPointInfo, source_position, kSourcePositionIndex)
-ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsIndex)
+ACCESSORS(AllocationSite, nested_site, Object, kNestedSiteOffset)
+SMI_ACCESSORS(AllocationSite, pretenure_data, kPretenureDataOffset)
+SMI_ACCESSORS(AllocationSite, pretenure_create_count,
+ kPretenureCreateCountOffset)
+ACCESSORS(AllocationSite, dependent_code, DependentCode,
+ kDependentCodeOffset)
+ACCESSORS(AllocationSite, weak_next, Object, kWeakNextOffset)
+ACCESSORS(AllocationMemento, allocation_site, Object, kAllocationSiteOffset)
SMI_ACCESSORS(StackFrameInfo, line_number, kLineNumberIndex)
SMI_ACCESSORS(StackFrameInfo, column_number, kColumnNumberIndex)
@@ -5899,22 +4591,6 @@ ACCESSORS(SourcePositionTableWithFrameCache, source_position_table, ByteArray,
ACCESSORS(SourcePositionTableWithFrameCache, stack_frame_cache,
UnseededNumberDictionary, kStackFrameCacheIndex)
-ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset)
-ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
-ACCESSORS(SharedFunctionInfo, feedback_metadata, FeedbackMetadata,
- kFeedbackMetadataOffset)
-SMI_ACCESSORS(SharedFunctionInfo, function_literal_id, kFunctionLiteralIdOffset)
-#if V8_SFI_HAS_UNIQUE_ID
-SMI_ACCESSORS(SharedFunctionInfo, unique_id, kUniqueIdOffset)
-#endif
-ACCESSORS(SharedFunctionInfo, instance_class_name, Object,
- kInstanceClassNameOffset)
-ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset)
-ACCESSORS(SharedFunctionInfo, script, Object, kScriptOffset)
-ACCESSORS(SharedFunctionInfo, debug_info, Object, kDebugInfoOffset)
-ACCESSORS(SharedFunctionInfo, function_identifier, Object,
- kFunctionIdentifierOffset)
-
SMI_ACCESSORS(FunctionTemplateInfo, length, kLengthOffset)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, hidden_prototype,
kHiddenPrototypeBit)
@@ -5929,522 +4605,55 @@ BOOL_ACCESSORS(FunctionTemplateInfo, flag, do_not_cache,
kDoNotCacheBit)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, accept_any_receiver,
kAcceptAnyReceiver)
-BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_named_expression,
- kIsNamedExpressionBit)
-BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_toplevel,
- kIsTopLevelBit)
-
-#if V8_HOST_ARCH_32_BIT
-SMI_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
-SMI_ACCESSORS(SharedFunctionInfo, internal_formal_parameter_count,
- kFormalParameterCountOffset)
-SMI_ACCESSORS(SharedFunctionInfo, expected_nof_properties,
- kExpectedNofPropertiesOffset)
-SMI_ACCESSORS(SharedFunctionInfo, start_position_and_type,
- kStartPositionAndTypeOffset)
-SMI_ACCESSORS(SharedFunctionInfo, end_position, kEndPositionOffset)
-SMI_ACCESSORS(SharedFunctionInfo, function_token_position,
- kFunctionTokenPositionOffset)
-SMI_ACCESSORS(SharedFunctionInfo, compiler_hints,
- kCompilerHintsOffset)
-SMI_ACCESSORS(SharedFunctionInfo, opt_count_and_bailout_reason,
- kOptCountAndBailoutReasonOffset)
-SMI_ACCESSORS(SharedFunctionInfo, counters, kCountersOffset)
-SMI_ACCESSORS(SharedFunctionInfo, ast_node_count, kAstNodeCountOffset)
-SMI_ACCESSORS(SharedFunctionInfo, profiler_ticks, kProfilerTicksOffset)
-
-#else
-
-#if V8_TARGET_LITTLE_ENDIAN
-#define PSEUDO_SMI_LO_ALIGN 0
-#define PSEUDO_SMI_HI_ALIGN kIntSize
-#else
-#define PSEUDO_SMI_LO_ALIGN kIntSize
-#define PSEUDO_SMI_HI_ALIGN 0
-#endif
-
-#define PSEUDO_SMI_ACCESSORS_LO(holder, name, offset) \
- STATIC_ASSERT(holder::offset % kPointerSize == PSEUDO_SMI_LO_ALIGN); \
- int holder::name() const { \
- int value = READ_INT_FIELD(this, offset); \
- DCHECK(kHeapObjectTag == 1); \
- DCHECK((value & kHeapObjectTag) == 0); \
- return value >> 1; \
- } \
- void holder::set_##name(int value) { \
- DCHECK(kHeapObjectTag == 1); \
- DCHECK((value & 0xC0000000) == 0xC0000000 || (value & 0xC0000000) == 0x0); \
- WRITE_INT_FIELD(this, offset, (value << 1) & ~kHeapObjectTag); \
- }
-
-#define PSEUDO_SMI_ACCESSORS_HI(holder, name, offset) \
- STATIC_ASSERT(holder::offset % kPointerSize == PSEUDO_SMI_HI_ALIGN); \
- INT_ACCESSORS(holder, name, offset)
-
-
-PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, length, kLengthOffset)
-PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, internal_formal_parameter_count,
- kFormalParameterCountOffset)
-
-PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo,
- expected_nof_properties,
- kExpectedNofPropertiesOffset)
-
-PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, end_position, kEndPositionOffset)
-PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo,
- start_position_and_type,
- kStartPositionAndTypeOffset)
-
-PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo,
- function_token_position,
- kFunctionTokenPositionOffset)
-PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo,
- compiler_hints,
- kCompilerHintsOffset)
-
-PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo,
- opt_count_and_bailout_reason,
- kOptCountAndBailoutReasonOffset)
-PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, counters, kCountersOffset)
-
-PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo,
- ast_node_count,
- kAstNodeCountOffset)
-PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo,
- profiler_ticks,
- kProfilerTicksOffset)
-
-#endif
-
-AbstractCode* SharedFunctionInfo::abstract_code() {
- if (HasBytecodeArray()) {
- return AbstractCode::cast(bytecode_array());
- } else {
- return AbstractCode::cast(code());
- }
-}
-
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, allows_lazy_compilation,
- kAllowLazyCompilation)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, uses_arguments,
- kUsesArguments)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, has_duplicate_parameters,
- kHasDuplicateParameters)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, asm_function, kIsAsmFunction)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_declaration,
- kIsDeclaration)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, marked_for_tier_up,
- kMarkedForTierUp)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints,
- has_concurrent_optimization_job, kHasConcurrentOptimizationJob)
-
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, needs_home_object,
- kNeedsHomeObject)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, native, kNative)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, force_inline, kForceInline)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, must_use_ignition_turbo,
- kMustUseIgnitionTurbo)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_flush, kDontFlush)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_asm_wasm_broken,
- kIsAsmWasmBroken)
-
-BOOL_GETTER(SharedFunctionInfo, compiler_hints, optimization_disabled,
- kOptimizationDisabled)
-
-void SharedFunctionInfo::set_optimization_disabled(bool disable) {
- set_compiler_hints(BooleanBit::set(compiler_hints(),
- kOptimizationDisabled,
- disable));
-}
-
-LanguageMode SharedFunctionInfo::language_mode() {
- STATIC_ASSERT(LANGUAGE_END == 2);
- return construct_language_mode(
- BooleanBit::get(compiler_hints(), kStrictModeFunction));
-}
-
-void SharedFunctionInfo::set_language_mode(LanguageMode language_mode) {
- STATIC_ASSERT(LANGUAGE_END == 2);
- // We only allow language mode transitions that set the same language mode
- // again or go up in the chain:
- DCHECK(is_sloppy(this->language_mode()) || is_strict(language_mode));
- int hints = compiler_hints();
- hints = BooleanBit::set(hints, kStrictModeFunction, is_strict(language_mode));
- set_compiler_hints(hints);
-}
-
-FunctionKind SharedFunctionInfo::kind() const {
- return FunctionKindBits::decode(compiler_hints());
-}
-
-void SharedFunctionInfo::set_kind(FunctionKind kind) {
- DCHECK(IsValidFunctionKind(kind));
- int hints = compiler_hints();
- hints = FunctionKindBits::update(hints, kind);
- set_compiler_hints(hints);
-}
-
-BOOL_ACCESSORS(SharedFunctionInfo, debugger_hints,
- name_should_print_as_anonymous, kNameShouldPrintAsAnonymous)
-BOOL_ACCESSORS(SharedFunctionInfo, debugger_hints, is_anonymous_expression,
- kIsAnonymousExpression)
-BOOL_ACCESSORS(SharedFunctionInfo, debugger_hints, deserialized, kDeserialized)
-BOOL_ACCESSORS(SharedFunctionInfo, debugger_hints, has_no_side_effect,
- kHasNoSideEffect)
-BOOL_ACCESSORS(SharedFunctionInfo, debugger_hints, computed_has_no_side_effect,
- kComputedHasNoSideEffect)
-BOOL_ACCESSORS(SharedFunctionInfo, debugger_hints, debug_is_blackboxed,
- kDebugIsBlackboxed)
-BOOL_ACCESSORS(SharedFunctionInfo, debugger_hints, computed_debug_is_blackboxed,
- kComputedDebugIsBlackboxed)
-BOOL_ACCESSORS(SharedFunctionInfo, debugger_hints, has_reported_binary_coverage,
- kHasReportedBinaryCoverage)
-
-bool Script::HasValidSource() {
- Object* src = this->source();
- if (!src->IsString()) return true;
- String* src_str = String::cast(src);
- if (!StringShape(src_str).IsExternal()) return true;
- if (src_str->IsOneByteRepresentation()) {
- return ExternalOneByteString::cast(src)->resource() != NULL;
- } else if (src_str->IsTwoByteRepresentation()) {
- return ExternalTwoByteString::cast(src)->resource() != NULL;
- }
- return true;
-}
-
-void SharedFunctionInfo::DontAdaptArguments() {
- DCHECK(code()->kind() == Code::BUILTIN || code()->kind() == Code::STUB);
- set_internal_formal_parameter_count(kDontAdaptArgumentsSentinel);
+FeedbackVector* JSFunction::feedback_vector() const {
+ DCHECK(feedback_vector_cell()->value()->IsFeedbackVector());
+ return FeedbackVector::cast(feedback_vector_cell()->value());
}
-
-int SharedFunctionInfo::start_position() const {
- return start_position_and_type() >> kStartPositionShift;
+bool JSFunction::IsOptimized() {
+ return code()->kind() == Code::OPTIMIZED_FUNCTION;
}
-
-void SharedFunctionInfo::set_start_position(int start_position) {
- set_start_position_and_type((start_position << kStartPositionShift)
- | (start_position_and_type() & ~kStartPositionMask));
+bool JSFunction::HasOptimizedCode() {
+ return IsOptimized() ||
+ (has_feedback_vector() && feedback_vector()->has_optimized_code());
}
-
-Code* SharedFunctionInfo::code() const {
- return Code::cast(READ_FIELD(this, kCodeOffset));
+bool JSFunction::HasOptimizationMarker() {
+ return has_feedback_vector() && feedback_vector()->has_optimization_marker();
}
-
-void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) {
- DCHECK(value->kind() != Code::OPTIMIZED_FUNCTION);
- // If the SharedFunctionInfo has bytecode we should never mark it for lazy
- // compile, since the bytecode is never flushed.
- DCHECK(value != GetIsolate()->builtins()->builtin(Builtins::kCompileLazy) ||
- !HasBytecodeArray());
- WRITE_FIELD(this, kCodeOffset, value);
- CONDITIONAL_WRITE_BARRIER(value->GetHeap(), this, kCodeOffset, value, mode);
+void JSFunction::ClearOptimizationMarker() {
+ DCHECK(has_feedback_vector());
+ DCHECK(!feedback_vector()->has_optimized_code());
+ feedback_vector()->SetOptimizationMarker(OptimizationMarker::kNone);
}
-
-void SharedFunctionInfo::ReplaceCode(Code* value) {
- // If the GC metadata field is already used then the function was
- // enqueued as a code flushing candidate and we remove it now.
- if (code()->gc_metadata() != NULL) {
- CodeFlusher* flusher = GetHeap()->mark_compact_collector()->code_flusher();
- flusher->EvictCandidate(this);
- }
-
- DCHECK(code()->gc_metadata() == NULL && value->gc_metadata() == NULL);
-#ifdef DEBUG
- Code::VerifyRecompiledCode(code(), value);
-#endif // DEBUG
-
- set_code(value);
-}
-
-bool SharedFunctionInfo::IsInterpreted() const {
+bool JSFunction::IsInterpreted() {
return code()->is_interpreter_trampoline_builtin();
}
-bool SharedFunctionInfo::HasBaselineCode() const {
- return code()->kind() == Code::FUNCTION;
-}
-
-ScopeInfo* SharedFunctionInfo::scope_info() const {
- return reinterpret_cast<ScopeInfo*>(READ_FIELD(this, kScopeInfoOffset));
-}
-
-
-void SharedFunctionInfo::set_scope_info(ScopeInfo* value,
- WriteBarrierMode mode) {
- WRITE_FIELD(this, kScopeInfoOffset, reinterpret_cast<Object*>(value));
- CONDITIONAL_WRITE_BARRIER(GetHeap(),
- this,
- kScopeInfoOffset,
- reinterpret_cast<Object*>(value),
- mode);
-}
-
-ACCESSORS(SharedFunctionInfo, outer_scope_info, HeapObject,
- kOuterScopeInfoOffset)
-
-bool SharedFunctionInfo::is_compiled() const {
- Builtins* builtins = GetIsolate()->builtins();
- DCHECK(code() != builtins->builtin(Builtins::kCompileOptimizedConcurrent));
- DCHECK(code() != builtins->builtin(Builtins::kCompileOptimized));
- return code() != builtins->builtin(Builtins::kCompileLazy);
-}
-
-int SharedFunctionInfo::GetLength() const {
- DCHECK(is_compiled());
- DCHECK(HasLength());
- return length();
-}
-
-bool SharedFunctionInfo::HasLength() const {
- DCHECK_IMPLIES(length() < 0, length() == kInvalidLength);
- return length() != kInvalidLength;
-}
-
-bool SharedFunctionInfo::has_simple_parameters() {
- return scope_info()->HasSimpleParameters();
-}
-
-bool SharedFunctionInfo::HasDebugInfo() const {
- bool has_debug_info = !debug_info()->IsSmi();
- DCHECK_EQ(debug_info()->IsStruct(), has_debug_info);
- DCHECK(!has_debug_info || HasDebugCode());
- return has_debug_info;
-}
-
-DebugInfo* SharedFunctionInfo::GetDebugInfo() const {
- DCHECK(HasDebugInfo());
- return DebugInfo::cast(debug_info());
-}
-
-bool SharedFunctionInfo::HasDebugCode() const {
- if (HasBaselineCode()) return code()->has_debug_break_slots();
- return HasBytecodeArray();
-}
-
-int SharedFunctionInfo::debugger_hints() const {
- if (HasDebugInfo()) return GetDebugInfo()->debugger_hints();
- return Smi::cast(debug_info())->value();
-}
-
-void SharedFunctionInfo::set_debugger_hints(int value) {
- if (HasDebugInfo()) {
- GetDebugInfo()->set_debugger_hints(value);
- } else {
- set_debug_info(Smi::FromInt(value));
- }
-}
-
-bool SharedFunctionInfo::IsApiFunction() {
- return function_data()->IsFunctionTemplateInfo();
-}
-
-
-FunctionTemplateInfo* SharedFunctionInfo::get_api_func_data() {
- DCHECK(IsApiFunction());
- return FunctionTemplateInfo::cast(function_data());
-}
-
-void SharedFunctionInfo::set_api_func_data(FunctionTemplateInfo* data) {
- DCHECK(function_data()->IsUndefined(GetIsolate()));
- set_function_data(data);
-}
-
-bool SharedFunctionInfo::HasBytecodeArray() const {
- return function_data()->IsBytecodeArray();
-}
-
-BytecodeArray* SharedFunctionInfo::bytecode_array() const {
- DCHECK(HasBytecodeArray());
- return BytecodeArray::cast(function_data());
-}
-
-void SharedFunctionInfo::set_bytecode_array(BytecodeArray* bytecode) {
- DCHECK(function_data()->IsUndefined(GetIsolate()));
- set_function_data(bytecode);
-}
-
-void SharedFunctionInfo::ClearBytecodeArray() {
- DCHECK(function_data()->IsUndefined(GetIsolate()) || HasBytecodeArray());
- set_function_data(GetHeap()->undefined_value());
-}
-
-bool SharedFunctionInfo::HasAsmWasmData() const {
- return function_data()->IsFixedArray();
-}
-
-FixedArray* SharedFunctionInfo::asm_wasm_data() const {
- DCHECK(HasAsmWasmData());
- return FixedArray::cast(function_data());
-}
-
-void SharedFunctionInfo::set_asm_wasm_data(FixedArray* data) {
- DCHECK(function_data()->IsUndefined(GetIsolate()) || HasAsmWasmData());
- set_function_data(data);
-}
-
-void SharedFunctionInfo::ClearAsmWasmData() {
- DCHECK(function_data()->IsUndefined(GetIsolate()) || HasAsmWasmData());
- set_function_data(GetHeap()->undefined_value());
-}
-
-bool SharedFunctionInfo::HasBuiltinFunctionId() {
- return function_identifier()->IsSmi();
-}
-
-BuiltinFunctionId SharedFunctionInfo::builtin_function_id() {
- DCHECK(HasBuiltinFunctionId());
- return static_cast<BuiltinFunctionId>(
- Smi::cast(function_identifier())->value());
-}
-
-void SharedFunctionInfo::set_builtin_function_id(BuiltinFunctionId id) {
- set_function_identifier(Smi::FromInt(id));
-}
-
-bool SharedFunctionInfo::HasInferredName() {
- return function_identifier()->IsString();
-}
-
-String* SharedFunctionInfo::inferred_name() {
- if (HasInferredName()) {
- return String::cast(function_identifier());
- }
- Isolate* isolate = GetIsolate();
- DCHECK(function_identifier()->IsUndefined(isolate) || HasBuiltinFunctionId());
- return isolate->heap()->empty_string();
-}
-
-void SharedFunctionInfo::set_inferred_name(String* inferred_name) {
- DCHECK(function_identifier()->IsUndefined(GetIsolate()) || HasInferredName());
- set_function_identifier(inferred_name);
-}
-
-int SharedFunctionInfo::ic_age() {
- return ICAgeBits::decode(counters());
-}
-
-
-void SharedFunctionInfo::set_ic_age(int ic_age) {
- set_counters(ICAgeBits::update(counters(), ic_age));
-}
-
-
-int SharedFunctionInfo::deopt_count() {
- return DeoptCountBits::decode(counters());
-}
-
-
-void SharedFunctionInfo::set_deopt_count(int deopt_count) {
- set_counters(DeoptCountBits::update(counters(), deopt_count));
-}
-
-
-void SharedFunctionInfo::increment_deopt_count() {
- int value = counters();
- int deopt_count = DeoptCountBits::decode(value);
- // Saturate the deopt count when incrementing, rather than overflowing.
- if (deopt_count < DeoptCountBits::kMax) {
- set_counters(DeoptCountBits::update(value, deopt_count + 1));
- }
-}
-
-
-int SharedFunctionInfo::opt_reenable_tries() {
- return OptReenableTriesBits::decode(counters());
-}
-
-
-void SharedFunctionInfo::set_opt_reenable_tries(int tries) {
- set_counters(OptReenableTriesBits::update(counters(), tries));
-}
-
-
-int SharedFunctionInfo::opt_count() {
- return OptCountBits::decode(opt_count_and_bailout_reason());
-}
-
-
-void SharedFunctionInfo::set_opt_count(int opt_count) {
- set_opt_count_and_bailout_reason(
- OptCountBits::update(opt_count_and_bailout_reason(), opt_count));
-}
-
-
-BailoutReason SharedFunctionInfo::disable_optimization_reason() {
- return static_cast<BailoutReason>(
- DisabledOptimizationReasonBits::decode(opt_count_and_bailout_reason()));
-}
-
-
-bool SharedFunctionInfo::has_deoptimization_support() {
- Code* code = this->code();
- return code->kind() == Code::FUNCTION && code->has_deoptimization_support();
-}
-
-
-void SharedFunctionInfo::TryReenableOptimization() {
- int tries = opt_reenable_tries();
- set_opt_reenable_tries((tries + 1) & OptReenableTriesBits::kMax);
- // We reenable optimization whenever the number of tries is a large
- // enough power of 2.
- if (tries >= 16 && (((tries - 1) & tries) == 0)) {
- set_optimization_disabled(false);
- set_deopt_count(0);
- }
-}
-
-
-void SharedFunctionInfo::set_disable_optimization_reason(BailoutReason reason) {
- set_opt_count_and_bailout_reason(DisabledOptimizationReasonBits::update(
- opt_count_and_bailout_reason(), reason));
-}
-
-bool SharedFunctionInfo::IsUserJavaScript() {
- Object* script_obj = script();
- if (script_obj->IsUndefined(GetIsolate())) return false;
- Script* script = Script::cast(script_obj);
- return script->IsUserJavaScript();
-}
-
-bool SharedFunctionInfo::IsSubjectToDebugging() {
- return IsUserJavaScript() && !HasAsmWasmData();
-}
-
-FeedbackVector* JSFunction::feedback_vector() const {
- DCHECK(feedback_vector_cell()->value()->IsFeedbackVector());
- return FeedbackVector::cast(feedback_vector_cell()->value());
-}
-
-bool JSFunction::IsOptimized() {
- return code()->kind() == Code::OPTIMIZED_FUNCTION;
-}
-
-bool JSFunction::IsInterpreted() {
- return code()->is_interpreter_trampoline_builtin();
+bool JSFunction::ChecksOptimizationMarker() {
+ return code()->checks_optimization_marker();
}
bool JSFunction::IsMarkedForOptimization() {
- return code() == GetIsolate()->builtins()->builtin(
- Builtins::kCompileOptimized);
+ return has_feedback_vector() && feedback_vector()->optimization_marker() ==
+ OptimizationMarker::kCompileOptimized;
}
bool JSFunction::IsMarkedForConcurrentOptimization() {
- return code() == GetIsolate()->builtins()->builtin(
- Builtins::kCompileOptimizedConcurrent);
+ return has_feedback_vector() &&
+ feedback_vector()->optimization_marker() ==
+ OptimizationMarker::kCompileOptimizedConcurrent;
}
bool JSFunction::IsInOptimizationQueue() {
- return code() == GetIsolate()->builtins()->builtin(
- Builtins::kInOptimizationQueue);
+ return has_feedback_vector() && feedback_vector()->optimization_marker() ==
+ OptimizationMarker::kInOptimizationQueue;
}
@@ -6454,8 +4663,7 @@ void JSFunction::CompleteInobjectSlackTrackingIfActive() {
}
}
-
-bool Map::IsInobjectSlackTrackingInProgress() {
+bool Map::IsInobjectSlackTrackingInProgress() const {
return construction_counter() != Map::kNoSlackTracking;
}
@@ -6486,7 +4694,8 @@ Code* JSFunction::code() {
void JSFunction::set_code(Code* value) {
DCHECK(!GetHeap()->InNewSpace(value));
Address entry = value->entry();
- WRITE_INTPTR_FIELD(this, kCodeEntryOffset, reinterpret_cast<intptr_t>(entry));
+ RELAXED_WRITE_INTPTR_FIELD(this, kCodeEntryOffset,
+ reinterpret_cast<intptr_t>(entry));
GetHeap()->incremental_marking()->RecordWriteOfCodeEntry(
this,
HeapObject::RawField(this, kCodeEntryOffset),
@@ -6497,7 +4706,8 @@ void JSFunction::set_code(Code* value) {
void JSFunction::set_code_no_write_barrier(Code* value) {
DCHECK(!GetHeap()->InNewSpace(value));
Address entry = value->entry();
- WRITE_INTPTR_FIELD(this, kCodeEntryOffset, reinterpret_cast<intptr_t>(entry));
+ RELAXED_WRITE_INTPTR_FIELD(this, kCodeEntryOffset,
+ reinterpret_cast<intptr_t>(entry));
}
void JSFunction::ClearOptimizedCodeSlot(const char* reason) {
@@ -6505,29 +4715,32 @@ void JSFunction::ClearOptimizedCodeSlot(const char* reason) {
if (FLAG_trace_opt) {
PrintF("[evicting entry from optimizing code feedback slot (%s) for ",
reason);
- shared()->ShortPrint();
+ ShortPrint();
PrintF("]\n");
}
feedback_vector()->ClearOptimizedCode();
}
}
+void JSFunction::SetOptimizationMarker(OptimizationMarker marker) {
+ DCHECK(has_feedback_vector());
+ DCHECK(ChecksOptimizationMarker());
+ DCHECK(!HasOptimizedCode());
+
+ feedback_vector()->SetOptimizationMarker(marker);
+}
+
void JSFunction::ReplaceCode(Code* code) {
- bool was_optimized = IsOptimized();
+ bool was_optimized = this->code()->kind() == Code::OPTIMIZED_FUNCTION;
bool is_optimized = code->kind() == Code::OPTIMIZED_FUNCTION;
- if (was_optimized && is_optimized) {
- ClearOptimizedCodeSlot("Replacing with another optimized code");
- }
-
set_code(code);
// Add/remove the function from the list of optimized functions for this
// context based on the state change.
if (!was_optimized && is_optimized) {
context()->native_context()->AddOptimizedFunction(this);
- }
- if (was_optimized && !is_optimized) {
+ } else if (was_optimized && !is_optimized) {
// TODO(titzer): linear in the number of optimized functions; fix!
context()->native_context()->RemoveOptimizedFunction(this);
}
@@ -6622,9 +4835,7 @@ Object* JSFunction::prototype() {
bool JSFunction::is_compiled() {
Builtins* builtins = GetIsolate()->builtins();
- return code() != builtins->builtin(Builtins::kCompileLazy) &&
- code() != builtins->builtin(Builtins::kCompileOptimized) &&
- code() != builtins->builtin(Builtins::kCompileOptimizedConcurrent);
+ return code() != builtins->builtin(Builtins::kCompileLazy);
}
ACCESSORS(JSProxy, target, JSReceiver, kTargetOffset)
@@ -6634,26 +4845,8 @@ ACCESSORS(JSProxy, hash, Object, kHashOffset)
bool JSProxy::IsRevoked() const { return !handler()->IsJSReceiver(); }
ACCESSORS(JSCollection, table, Object, kTableOffset)
-
-
-#define ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(name, type, offset) \
- template<class Derived, class TableType> \
- type* OrderedHashTableIterator<Derived, TableType>::name() const { \
- return type::cast(READ_FIELD(this, offset)); \
- } \
- template<class Derived, class TableType> \
- void OrderedHashTableIterator<Derived, TableType>::set_##name( \
- type* value, WriteBarrierMode mode) { \
- WRITE_FIELD(this, offset, value); \
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); \
- }
-
-ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(table, Object, kTableOffset)
-ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(index, Object, kIndexOffset)
-ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(kind, Object, kKindOffset)
-
-#undef ORDERED_HASH_TABLE_ITERATOR_ACCESSORS
-
+ACCESSORS(JSCollectionIterator, table, Object, kTableOffset)
+ACCESSORS(JSCollectionIterator, index, Object, kIndexOffset)
ACCESSORS(JSWeakCollection, table, Object, kTableOffset)
ACCESSORS(JSWeakCollection, next, Object, kNextOffset)
@@ -6668,6 +4861,13 @@ void Foreign::set_foreign_address(Address value) {
WRITE_INTPTR_FIELD(this, kForeignAddressOffset, OffsetFrom(value));
}
+template <class Derived>
+void SmallOrderedHashTable<Derived>::SetDataEntry(int entry, int relative_index,
+ Object* value) {
+ int entry_offset = GetDataEntryOffset(entry, relative_index);
+ RELAXED_WRITE_FIELD(this, entry_offset, value);
+ WRITE_BARRIER(GetHeap(), this, entry_offset, value);
+}
ACCESSORS(JSGeneratorObject, function, JSFunction, kFunctionOffset)
ACCESSORS(JSGeneratorObject, context, Context, kContextOffset)
@@ -6692,8 +4892,6 @@ bool JSGeneratorObject::is_executing() const {
}
ACCESSORS(JSAsyncGeneratorObject, queue, HeapObject, kQueueOffset)
-ACCESSORS(JSAsyncGeneratorObject, await_input_or_debug_pos, Object,
- kAwaitInputOrDebugPosOffset)
ACCESSORS(JSAsyncGeneratorObject, awaited_promise, HeapObject,
kAwaitedPromiseOffset)
@@ -6756,11 +4954,9 @@ void Code::WipeOutHeader() {
WRITE_FIELD(this, kTypeFeedbackInfoOffset, NULL);
}
WRITE_FIELD(this, kNextCodeLinkOffset, NULL);
- WRITE_FIELD(this, kGCMetadataOffset, NULL);
}
-
-Object* Code::type_feedback_info() {
+Object* Code::type_feedback_info() const {
DCHECK(kind() == FUNCTION);
return raw_type_feedback_info();
}
@@ -6773,7 +4969,7 @@ void Code::set_type_feedback_info(Object* value, WriteBarrierMode mode) {
value, mode);
}
-ByteArray* Code::SourcePositionTable() {
+ByteArray* Code::SourcePositionTable() const {
Object* maybe_table = source_position_table();
if (maybe_table->IsByteArray()) return ByteArray::cast(maybe_table);
DCHECK(maybe_table->IsSourcePositionTableWithFrameCache());
@@ -6781,7 +4977,7 @@ ByteArray* Code::SourcePositionTable() {
->source_position_table();
}
-uint32_t Code::stub_key() {
+uint32_t Code::stub_key() const {
DCHECK(IsCodeStubOrIC());
Smi* smi_key = Smi::cast(raw_type_feedback_info());
return static_cast<uint32_t>(smi_key->value());
@@ -6793,17 +4989,11 @@ void Code::set_stub_key(uint32_t key) {
set_raw_type_feedback_info(Smi::FromInt(key));
}
-
-ACCESSORS(Code, gc_metadata, Object, kGCMetadataOffset)
-INT_ACCESSORS(Code, ic_age, kICAgeOffset)
-
-
-byte* Code::instruction_start() {
- return FIELD_ADDR(this, kHeaderSize);
+byte* Code::instruction_start() const {
+ return const_cast<byte*>(FIELD_ADDR_CONST(this, kHeaderSize));
}
-
-byte* Code::instruction_end() {
+byte* Code::instruction_end() const {
return instruction_start() + instruction_size();
}
@@ -6823,17 +5013,19 @@ void Code::set_unwinding_info_size(int value) {
WRITE_UINT64_FIELD(this, GetUnwindingInfoSizeOffset(), value);
}
-byte* Code::unwinding_info_start() {
+byte* Code::unwinding_info_start() const {
DCHECK(has_unwinding_info());
- return FIELD_ADDR(this, GetUnwindingInfoSizeOffset()) + kInt64Size;
+ return const_cast<byte*>(
+ FIELD_ADDR_CONST(this, GetUnwindingInfoSizeOffset())) +
+ kInt64Size;
}
-byte* Code::unwinding_info_end() {
+byte* Code::unwinding_info_end() const {
DCHECK(has_unwinding_info());
return unwinding_info_start() + unwinding_info_size();
}
-int Code::body_size() {
+int Code::body_size() const {
int unpadded_body_size =
has_unwinding_info()
? static_cast<int>(unwinding_info_end() - instruction_start())
@@ -6841,7 +5033,7 @@ int Code::body_size() {
return RoundUp(unpadded_body_size, kObjectAlignment);
}
-int Code::SizeIncludingMetadata() {
+int Code::SizeIncludingMetadata() const {
int size = CodeSize();
size += relocation_info()->Size();
size += deoptimization_data()->Size();
@@ -6852,41 +5044,32 @@ int Code::SizeIncludingMetadata() {
return size;
}
-ByteArray* Code::unchecked_relocation_info() {
+ByteArray* Code::unchecked_relocation_info() const {
return reinterpret_cast<ByteArray*>(READ_FIELD(this, kRelocationInfoOffset));
}
-
-byte* Code::relocation_start() {
+byte* Code::relocation_start() const {
return unchecked_relocation_info()->GetDataStartAddress();
}
-
-int Code::relocation_size() {
+int Code::relocation_size() const {
return unchecked_relocation_info()->length();
}
-
-byte* Code::entry() {
- return instruction_start();
-}
-
+byte* Code::entry() const { return instruction_start(); }
bool Code::contains(byte* inner_pointer) {
return (address() <= inner_pointer) && (inner_pointer <= address() + Size());
}
-
-int Code::ExecutableSize() {
+int Code::ExecutableSize() const {
// Check that the assumptions about the layout of the code object holds.
DCHECK_EQ(static_cast<int>(instruction_start() - address()),
Code::kHeaderSize);
return instruction_size() + Code::kHeaderSize;
}
-
-int Code::CodeSize() { return SizeFor(body_size()); }
-
+int Code::CodeSize() const { return SizeFor(body_size()); }
ACCESSORS(JSArray, length, Object, kLengthOffset)
@@ -7097,7 +5280,7 @@ BOOL_ACCESSORS(JSPromise, flags, handled_hint, kHandledHintBit)
ACCESSORS(JSRegExp, data, Object, kDataOffset)
ACCESSORS(JSRegExp, flags, Object, kFlagsOffset)
ACCESSORS(JSRegExp, source, Object, kSourceOffset)
-
+ACCESSORS(JSRegExp, last_index, Object, kLastIndexOffset)
JSRegExp::Type JSRegExp::TypeTag() {
Object* data = this->data();
@@ -7112,10 +5295,9 @@ int JSRegExp::CaptureCount() {
case ATOM:
return 0;
case IRREGEXP:
- return Smi::cast(DataAt(kIrregexpCaptureCountIndex))->value();
+ return Smi::ToInt(DataAt(kIrregexpCaptureCountIndex));
default:
UNREACHABLE();
- return -1;
}
}
@@ -7178,10 +5360,10 @@ ElementsKind JSObject::GetElementsKind() {
// pointer may point to a one pointer filler map.
if (ElementsAreSafeToExamine()) {
Map* map = fixed_array->map();
- if (IsFastSmiOrObjectElementsKind(kind)) {
+ if (IsSmiOrObjectElementsKind(kind)) {
DCHECK(map == GetHeap()->fixed_array_map() ||
map == GetHeap()->fixed_cow_array_map());
- } else if (IsFastDoubleElementsKind(kind)) {
+ } else if (IsDoubleElementsKind(kind)) {
DCHECK(fixed_array->IsFixedDoubleArray() ||
fixed_array == GetHeap()->empty_fixed_array());
} else if (kind == DICTIONARY_ELEMENTS) {
@@ -7197,29 +5379,22 @@ ElementsKind JSObject::GetElementsKind() {
return kind;
}
-
-bool JSObject::HasFastObjectElements() {
- return IsFastObjectElementsKind(GetElementsKind());
+bool JSObject::HasObjectElements() {
+ return IsObjectElementsKind(GetElementsKind());
}
+bool JSObject::HasSmiElements() { return IsSmiElementsKind(GetElementsKind()); }
-bool JSObject::HasFastSmiElements() {
- return IsFastSmiElementsKind(GetElementsKind());
+bool JSObject::HasSmiOrObjectElements() {
+ return IsSmiOrObjectElementsKind(GetElementsKind());
}
-
-bool JSObject::HasFastSmiOrObjectElements() {
- return IsFastSmiOrObjectElementsKind(GetElementsKind());
+bool JSObject::HasDoubleElements() {
+ return IsDoubleElementsKind(GetElementsKind());
}
-
-bool JSObject::HasFastDoubleElements() {
- return IsFastDoubleElementsKind(GetElementsKind());
-}
-
-
-bool JSObject::HasFastHoleyElements() {
- return IsFastHoleyElementsKind(GetElementsKind());
+bool JSObject::HasHoleyElements() {
+ return IsHoleyElementsKind(GetElementsKind());
}
@@ -7286,11 +5461,15 @@ bool JSObject::HasIndexedInterceptor() {
return map()->has_indexed_interceptor();
}
+void JSGlobalObject::set_global_dictionary(GlobalDictionary* dictionary) {
+ DCHECK(IsJSGlobalObject());
+ return SetProperties(dictionary);
+}
-GlobalDictionary* JSObject::global_dictionary() {
+GlobalDictionary* JSGlobalObject::global_dictionary() {
DCHECK(!HasFastProperties());
DCHECK(IsJSGlobalObject());
- return GlobalDictionary::cast(properties());
+ return GlobalDictionary::cast(raw_properties_or_hash());
}
@@ -7299,70 +5478,6 @@ SeededNumberDictionary* JSObject::element_dictionary() {
return SeededNumberDictionary::cast(elements());
}
-
-bool Name::IsHashFieldComputed(uint32_t field) {
- return (field & kHashNotComputedMask) == 0;
-}
-
-
-bool Name::HasHashCode() {
- return IsHashFieldComputed(hash_field());
-}
-
-
-uint32_t Name::Hash() {
- // Fast case: has hash code already been computed?
- uint32_t field = hash_field();
- if (IsHashFieldComputed(field)) return field >> kHashShift;
- // Slow case: compute hash code and set it. Has to be a string.
- return String::cast(this)->ComputeAndSetHash();
-}
-
-
-bool Name::IsPrivate() {
- return this->IsSymbol() && Symbol::cast(this)->is_private();
-}
-
-bool Name::AsArrayIndex(uint32_t* index) {
- return IsString() && String::cast(this)->AsArrayIndex(index);
-}
-
-
-bool String::AsArrayIndex(uint32_t* index) {
- uint32_t field = hash_field();
- if (IsHashFieldComputed(field) && (field & kIsNotArrayIndexMask)) {
- return false;
- }
- return SlowAsArrayIndex(index);
-}
-
-
-void String::SetForwardedInternalizedString(String* canonical) {
- DCHECK(IsInternalizedString());
- DCHECK(HasHashCode());
- if (canonical == this) return; // No need to forward.
- DCHECK(SlowEquals(canonical));
- DCHECK(canonical->IsInternalizedString());
- DCHECK(canonical->HasHashCode());
- WRITE_FIELD(this, kHashFieldSlot, canonical);
- // Setting the hash field to a tagged value sets the LSB, causing the hash
- // code to be interpreted as uninitialized. We use this fact to recognize
- // that we have a forwarded string.
- DCHECK(!HasHashCode());
-}
-
-
-String* String::GetForwardedInternalizedString() {
- DCHECK(IsInternalizedString());
- if (HasHashCode()) return this;
- String* canonical = String::cast(READ_FIELD(this, kHashFieldSlot));
- DCHECK(canonical->IsInternalizedString());
- DCHECK(SlowEquals(canonical));
- DCHECK(canonical->HasHashCode());
- return canonical;
-}
-
-
// static
Maybe<bool> Object::GreaterThan(Handle<Object> x, Handle<Object> y) {
Maybe<ComparisonResult> result = Compare(x, y);
@@ -7459,26 +5574,43 @@ MaybeHandle<Object> Object::GetPropertyOrElement(Handle<Object> receiver,
void JSReceiver::initialize_properties() {
DCHECK(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
- DCHECK(!GetHeap()->InNewSpace(GetHeap()->empty_properties_dictionary()));
+ DCHECK(!GetHeap()->InNewSpace(GetHeap()->empty_property_dictionary()));
if (map()->is_dictionary_map()) {
- WRITE_FIELD(this, kPropertiesOffset,
- GetHeap()->empty_properties_dictionary());
+ WRITE_FIELD(this, kPropertiesOrHashOffset,
+ GetHeap()->empty_property_dictionary());
} else {
- WRITE_FIELD(this, kPropertiesOffset, GetHeap()->empty_fixed_array());
+ WRITE_FIELD(this, kPropertiesOrHashOffset, GetHeap()->empty_fixed_array());
}
}
+bool JSReceiver::HasFastProperties() const {
+ DCHECK_EQ(raw_properties_or_hash()->IsDictionary(),
+ map()->is_dictionary_map());
+ return !map()->is_dictionary_map();
+}
-bool JSReceiver::HasFastProperties() {
- DCHECK_EQ(properties()->IsDictionary(), map()->is_dictionary_map());
- return !properties()->IsDictionary();
+NameDictionary* JSReceiver::property_dictionary() const {
+ DCHECK(!IsJSGlobalObject());
+ DCHECK(!HasFastProperties());
+ return NameDictionary::cast(raw_properties_or_hash());
}
+// TODO(gsathya): Pass isolate directly to this function and access
+// the heap from this.
+PropertyArray* JSReceiver::property_array() const {
+ DCHECK(HasFastProperties());
-NameDictionary* JSReceiver::property_dictionary() {
- DCHECK(!HasFastProperties());
- DCHECK(!IsJSGlobalObject());
- return NameDictionary::cast(properties());
+ Object* prop = raw_properties_or_hash();
+ if (prop->IsSmi() || prop == GetHeap()->empty_fixed_array()) {
+ return GetHeap()->empty_property_array();
+ }
+
+ return PropertyArray::cast(prop);
+}
+
+void JSReceiver::SetProperties(HeapObject* properties) {
+ // TODO(gsathya): Update the hash code here.
+ set_raw_properties_or_hash(properties);
}
Maybe<bool> JSReceiver::HasProperty(Handle<JSReceiver> object,
@@ -7703,90 +5835,86 @@ bool AccessorPair::IsJSAccessor(Object* obj) {
return obj->IsCallable() || obj->IsUndefined(GetIsolate());
}
-
-template<typename Derived, typename Shape, typename Key>
-void Dictionary<Derived, Shape, Key>::SetEntry(int entry,
- Handle<Object> key,
- Handle<Object> value) {
- this->SetEntry(entry, key, value, PropertyDetails(Smi::kZero));
+template <typename Derived, typename Shape>
+void Dictionary<Derived, Shape>::ClearEntry(int entry) {
+ Object* the_hole = this->GetHeap()->the_hole_value();
+ PropertyDetails details = PropertyDetails::Empty();
+ Derived::cast(this)->SetEntry(entry, the_hole, the_hole, details);
}
+template <typename Derived, typename Shape>
+void Dictionary<Derived, Shape>::SetEntry(int entry, Object* key, Object* value,
+ PropertyDetails details) {
+ DCHECK(Dictionary::kEntrySize == 2 || Dictionary::kEntrySize == 3);
+ DCHECK(!key->IsName() || details.dictionary_index() > 0);
+ int index = DerivedHashTable::EntryToIndex(entry);
+ DisallowHeapAllocation no_gc;
+ WriteBarrierMode mode = this->GetWriteBarrierMode(no_gc);
+ this->set(index + Derived::kEntryKeyIndex, key, mode);
+ this->set(index + Derived::kEntryValueIndex, value, mode);
+ if (Shape::kHasDetails) DetailsAtPut(entry, details);
+}
-template<typename Derived, typename Shape, typename Key>
-void Dictionary<Derived, Shape, Key>::SetEntry(int entry,
- Handle<Object> key,
- Handle<Object> value,
- PropertyDetails details) {
- Shape::SetEntry(static_cast<Derived*>(this), entry, key, value, details);
+Object* GlobalDictionaryShape::Unwrap(Object* object) {
+ return PropertyCell::cast(object)->name();
}
+Name* NameDictionary::NameAt(int entry) { return Name::cast(KeyAt(entry)); }
-template <typename Key>
-template <typename Dictionary>
-void BaseDictionaryShape<Key>::SetEntry(Dictionary* dict, int entry,
- Handle<Object> key,
- Handle<Object> value,
- PropertyDetails details) {
- STATIC_ASSERT(Dictionary::kEntrySize == 2 || Dictionary::kEntrySize == 3);
- DCHECK(!key->IsName() || details.dictionary_index() > 0);
- int index = dict->EntryToIndex(entry);
- DisallowHeapAllocation no_gc;
- WriteBarrierMode mode = dict->GetWriteBarrierMode(no_gc);
- dict->set(index + Dictionary::kEntryKeyIndex, *key, mode);
- dict->set(index + Dictionary::kEntryValueIndex, *value, mode);
- if (Dictionary::kEntrySize == 3) {
- dict->set(index + Dictionary::kEntryDetailsIndex, details.AsSmi());
- }
+PropertyCell* GlobalDictionary::CellAt(int entry) {
+ DCHECK(KeyAt(entry)->IsPropertyCell());
+ return PropertyCell::cast(KeyAt(entry));
}
+bool GlobalDictionaryShape::IsLive(Isolate* isolate, Object* k) {
+ Heap* heap = isolate->heap();
+ DCHECK_NE(heap->the_hole_value(), k);
+ return k != heap->undefined_value();
+}
-template <typename Dictionary>
-void GlobalDictionaryShape::SetEntry(Dictionary* dict, int entry,
- Handle<Object> key, Handle<Object> value,
- PropertyDetails details) {
- STATIC_ASSERT(Dictionary::kEntrySize == 2);
- DCHECK(!key->IsName() || details.dictionary_index() > 0);
- DCHECK(value->IsPropertyCell());
- int index = dict->EntryToIndex(entry);
- DisallowHeapAllocation no_gc;
- WriteBarrierMode mode = dict->GetWriteBarrierMode(no_gc);
- dict->set(index + Dictionary::kEntryKeyIndex, *key, mode);
- dict->set(index + Dictionary::kEntryValueIndex, *value, mode);
- PropertyCell::cast(*value)->set_property_details(details);
+bool GlobalDictionaryShape::IsKey(Isolate* isolate, Object* k) {
+ return IsLive(isolate, k) &&
+ !PropertyCell::cast(k)->value()->IsTheHole(isolate);
}
+Name* GlobalDictionary::NameAt(int entry) { return CellAt(entry)->name(); }
+Object* GlobalDictionary::ValueAt(int entry) { return CellAt(entry)->value(); }
+
+void GlobalDictionary::SetEntry(int entry, Object* key, Object* value,
+ PropertyDetails details) {
+ DCHECK_EQ(key, PropertyCell::cast(value)->name());
+ set(EntryToIndex(entry) + kEntryKeyIndex, value);
+ DetailsAtPut(entry, details);
+}
bool NumberDictionaryShape::IsMatch(uint32_t key, Object* other) {
DCHECK(other->IsNumber());
return key == static_cast<uint32_t>(other->Number());
}
-
-uint32_t UnseededNumberDictionaryShape::Hash(uint32_t key) {
- return ComputeIntegerHash(key, 0);
+uint32_t UnseededNumberDictionaryShape::Hash(Isolate* isolate, uint32_t key) {
+ return ComputeIntegerHash(key);
}
-
-uint32_t UnseededNumberDictionaryShape::HashForObject(uint32_t key,
+uint32_t UnseededNumberDictionaryShape::HashForObject(Isolate* isolate,
Object* other) {
DCHECK(other->IsNumber());
- return ComputeIntegerHash(static_cast<uint32_t>(other->Number()), 0);
+ return ComputeIntegerHash(static_cast<uint32_t>(other->Number()));
}
Map* UnseededNumberDictionaryShape::GetMap(Isolate* isolate) {
return isolate->heap()->unseeded_number_dictionary_map();
}
-uint32_t SeededNumberDictionaryShape::SeededHash(uint32_t key, uint32_t seed) {
- return ComputeIntegerHash(key, seed);
+uint32_t SeededNumberDictionaryShape::Hash(Isolate* isolate, uint32_t key) {
+ return ComputeIntegerHash(key, isolate->heap()->HashSeed());
}
-
-uint32_t SeededNumberDictionaryShape::SeededHashForObject(uint32_t key,
- uint32_t seed,
- Object* other) {
+uint32_t SeededNumberDictionaryShape::HashForObject(Isolate* isolate,
+ Object* other) {
DCHECK(other->IsNumber());
- return ComputeIntegerHash(static_cast<uint32_t>(other->Number()), seed);
+ return ComputeIntegerHash(static_cast<uint32_t>(other->Number()),
+ isolate->heap()->HashSeed());
}
@@ -7796,21 +5924,28 @@ Handle<Object> NumberDictionaryShape::AsHandle(Isolate* isolate, uint32_t key) {
bool NameDictionaryShape::IsMatch(Handle<Name> key, Object* other) {
- DCHECK(Name::cast(other)->IsUniqueName());
+ DCHECK(other->IsTheHole(key->GetIsolate()) ||
+ Name::cast(other)->IsUniqueName());
DCHECK(key->IsUniqueName());
return *key == other;
}
-
-uint32_t NameDictionaryShape::Hash(Handle<Name> key) {
+uint32_t NameDictionaryShape::Hash(Isolate* isolate, Handle<Name> key) {
return key->Hash();
}
-
-uint32_t NameDictionaryShape::HashForObject(Handle<Name> key, Object* other) {
+uint32_t NameDictionaryShape::HashForObject(Isolate* isolate, Object* other) {
return Name::cast(other)->Hash();
}
+bool GlobalDictionaryShape::IsMatch(Handle<Name> key, Object* other) {
+ DCHECK(PropertyCell::cast(other)->name()->IsUniqueName());
+ return *key == PropertyCell::cast(other)->name();
+}
+
+uint32_t GlobalDictionaryShape::HashForObject(Isolate* isolate, Object* other) {
+ return PropertyCell::cast(other)->name()->Hash();
+}
Handle<Object> NameDictionaryShape::AsHandle(Isolate* isolate,
Handle<Name> key) {
@@ -7821,46 +5956,33 @@ Handle<Object> NameDictionaryShape::AsHandle(Isolate* isolate,
template <typename Dictionary>
PropertyDetails GlobalDictionaryShape::DetailsAt(Dictionary* dict, int entry) {
- DCHECK(entry >= 0); // Not found is -1, which is not caught by get().
- Object* raw_value = dict->ValueAt(entry);
- DCHECK(raw_value->IsPropertyCell());
- PropertyCell* cell = PropertyCell::cast(raw_value);
- return cell->property_details();
+ DCHECK_LE(0, entry); // Not found is -1, which is not caught by get().
+ return dict->CellAt(entry)->property_details();
}
template <typename Dictionary>
void GlobalDictionaryShape::DetailsAtPut(Dictionary* dict, int entry,
PropertyDetails value) {
- DCHECK(entry >= 0); // Not found is -1, which is not caught by get().
- Object* raw_value = dict->ValueAt(entry);
- DCHECK(raw_value->IsPropertyCell());
- PropertyCell* cell = PropertyCell::cast(raw_value);
+ DCHECK_LE(0, entry); // Not found is -1, which is not caught by get().
+ PropertyCell* cell = dict->CellAt(entry);
+ if (cell->property_details().IsReadOnly() != value.IsReadOnly()) {
+ cell->dependent_code()->DeoptimizeDependentCodeGroup(
+ cell->GetIsolate(), DependentCode::kPropertyCellChangedGroup);
+ }
cell->set_property_details(value);
}
-
-template <typename Dictionary>
-bool GlobalDictionaryShape::IsDeleted(Dictionary* dict, int entry) {
- DCHECK(dict->ValueAt(entry)->IsPropertyCell());
- Isolate* isolate = dict->GetIsolate();
- return PropertyCell::cast(dict->ValueAt(entry))->value()->IsTheHole(isolate);
-}
-
-
bool ObjectHashTableShape::IsMatch(Handle<Object> key, Object* other) {
return key->SameValue(other);
}
-
-uint32_t ObjectHashTableShape::Hash(Handle<Object> key) {
- return Smi::cast(key->GetHash())->value();
+uint32_t ObjectHashTableShape::Hash(Isolate* isolate, Handle<Object> key) {
+ return Smi::ToInt(key->GetHash());
}
-
-uint32_t ObjectHashTableShape::HashForObject(Handle<Object> key,
- Object* other) {
- return Smi::cast(other->GetHash())->value();
+uint32_t ObjectHashTableShape::HashForObject(Isolate* isolate, Object* other) {
+ return Smi::ToInt(other->GetHash());
}
@@ -7869,18 +5991,10 @@ Handle<Object> ObjectHashTableShape::AsHandle(Isolate* isolate,
return key;
}
-
-Handle<ObjectHashTable> ObjectHashTable::Shrink(
- Handle<ObjectHashTable> table, Handle<Object> key) {
- return DerivedHashTable::Shrink(table, key);
-}
-
-
-Object* OrderedHashMap::ValueAt(int entry) {
- return get(EntryToIndex(entry) + kValueOffset);
+Handle<ObjectHashTable> ObjectHashTable::Shrink(Handle<ObjectHashTable> table) {
+ return DerivedHashTable::Shrink(table);
}
-
template <int entrysize>
bool WeakHashTableShape<entrysize>::IsMatch(Handle<Object> key, Object* other) {
if (other->IsWeakCell()) other = WeakCell::cast(other)->value();
@@ -7888,9 +6002,9 @@ bool WeakHashTableShape<entrysize>::IsMatch(Handle<Object> key, Object* other) {
: *key == other;
}
-
template <int entrysize>
-uint32_t WeakHashTableShape<entrysize>::Hash(Handle<Object> key) {
+uint32_t WeakHashTableShape<entrysize>::Hash(Isolate* isolate,
+ Handle<Object> key) {
intptr_t hash =
key->IsWeakCell()
? reinterpret_cast<intptr_t>(WeakCell::cast(*key)->value())
@@ -7898,9 +6012,8 @@ uint32_t WeakHashTableShape<entrysize>::Hash(Handle<Object> key) {
return (uint32_t)(hash & 0xFFFFFFFF);
}
-
template <int entrysize>
-uint32_t WeakHashTableShape<entrysize>::HashForObject(Handle<Object> key,
+uint32_t WeakHashTableShape<entrysize>::HashForObject(Isolate* isolate,
Object* other) {
if (other->IsWeakCell()) other = WeakCell::cast(other)->value();
intptr_t hash = reinterpret_cast<intptr_t>(other);
@@ -7973,10 +6086,10 @@ void JSArray::SetContent(Handle<JSArray> array,
ALLOW_COPIED_DOUBLE_ELEMENTS);
DCHECK((storage->map() == array->GetHeap()->fixed_double_array_map() &&
- IsFastDoubleElementsKind(array->GetElementsKind())) ||
+ IsDoubleElementsKind(array->GetElementsKind())) ||
((storage->map() != array->GetHeap()->fixed_double_array_map()) &&
- (IsFastObjectElementsKind(array->GetElementsKind()) ||
- (IsFastSmiElementsKind(array->GetElementsKind()) &&
+ (IsObjectElementsKind(array->GetElementsKind()) ||
+ (IsSmiElementsKind(array->GetElementsKind()) &&
Handle<FixedArray>::cast(storage)->ContainsOnlySmisOrHoles()))));
array->set_elements(*storage);
array->set_length(Smi::FromInt(storage->length()));
@@ -7989,13 +6102,13 @@ bool JSArray::HasArrayPrototype(Isolate* isolate) {
int TypeFeedbackInfo::ic_total_count() {
- int current = Smi::cast(READ_FIELD(this, kStorage1Offset))->value();
+ int current = Smi::ToInt(READ_FIELD(this, kStorage1Offset));
return ICTotalCountField::decode(current);
}
void TypeFeedbackInfo::set_ic_total_count(int count) {
- int value = Smi::cast(READ_FIELD(this, kStorage1Offset))->value();
+ int value = Smi::ToInt(READ_FIELD(this, kStorage1Offset));
value = ICTotalCountField::update(value,
ICTotalCountField::decode(count));
WRITE_FIELD(this, kStorage1Offset, Smi::FromInt(value));
@@ -8003,14 +6116,14 @@ void TypeFeedbackInfo::set_ic_total_count(int count) {
int TypeFeedbackInfo::ic_with_type_info_count() {
- int current = Smi::cast(READ_FIELD(this, kStorage2Offset))->value();
+ int current = Smi::ToInt(READ_FIELD(this, kStorage2Offset));
return ICsWithTypeInfoCountField::decode(current);
}
void TypeFeedbackInfo::change_ic_with_type_info_count(int delta) {
if (delta == 0) return;
- int value = Smi::cast(READ_FIELD(this, kStorage2Offset))->value();
+ int value = Smi::ToInt(READ_FIELD(this, kStorage2Offset));
int new_count = ICsWithTypeInfoCountField::decode(value) + delta;
// We can get negative count here when the type-feedback info is
// shared between two code objects. The can only happen when
@@ -8026,7 +6139,7 @@ void TypeFeedbackInfo::change_ic_with_type_info_count(int delta) {
int TypeFeedbackInfo::ic_generic_count() {
- return Smi::cast(READ_FIELD(this, kStorage3Offset))->value();
+ return Smi::ToInt(READ_FIELD(this, kStorage3Offset));
}
@@ -8048,7 +6161,7 @@ void TypeFeedbackInfo::initialize_storage() {
void TypeFeedbackInfo::change_own_type_change_checksum() {
- int value = Smi::cast(READ_FIELD(this, kStorage1Offset))->value();
+ int value = Smi::ToInt(READ_FIELD(this, kStorage1Offset));
int checksum = OwnTypeChangeChecksum::decode(value);
checksum = (checksum + 1) % (1 << kTypeChangeChecksumBits);
value = OwnTypeChangeChecksum::update(value, checksum);
@@ -8060,7 +6173,7 @@ void TypeFeedbackInfo::change_own_type_change_checksum() {
void TypeFeedbackInfo::set_inlined_type_change_checksum(int checksum) {
- int value = Smi::cast(READ_FIELD(this, kStorage2Offset))->value();
+ int value = Smi::ToInt(READ_FIELD(this, kStorage2Offset));
int mask = (1 << kTypeChangeChecksumBits) - 1;
value = InlinedTypeChangeChecksum::update(value, checksum & mask);
// Ensure packed bit field is in Smi range.
@@ -8071,21 +6184,17 @@ void TypeFeedbackInfo::set_inlined_type_change_checksum(int checksum) {
int TypeFeedbackInfo::own_type_change_checksum() {
- int value = Smi::cast(READ_FIELD(this, kStorage1Offset))->value();
+ int value = Smi::ToInt(READ_FIELD(this, kStorage1Offset));
return OwnTypeChangeChecksum::decode(value);
}
bool TypeFeedbackInfo::matches_inlined_type_change_checksum(int checksum) {
- int value = Smi::cast(READ_FIELD(this, kStorage2Offset))->value();
+ int value = Smi::ToInt(READ_FIELD(this, kStorage2Offset));
int mask = (1 << kTypeChangeChecksumBits) - 1;
return InlinedTypeChangeChecksum::decode(value) == (checksum & mask);
}
-
-SMI_ACCESSORS(AliasedArgumentsEntry, aliased_context_slot, kAliasedContextSlot)
-
-
Relocatable::Relocatable(Isolate* isolate) {
isolate_ = isolate;
prev_ = isolate->relocatable_top();
@@ -8102,82 +6211,21 @@ Relocatable::~Relocatable() {
template<class Derived, class TableType>
Object* OrderedHashTableIterator<Derived, TableType>::CurrentKey() {
TableType* table(TableType::cast(this->table()));
- int index = Smi::cast(this->index())->value();
+ int index = Smi::ToInt(this->index());
Object* key = table->KeyAt(index);
DCHECK(!key->IsTheHole(table->GetIsolate()));
return key;
}
-void JSSetIterator::PopulateValueArray(FixedArray* array) {
- array->set(0, CurrentKey());
-}
-
-
-void JSMapIterator::PopulateValueArray(FixedArray* array) {
- array->set(0, CurrentKey());
- array->set(1, CurrentValue());
-}
-
-
Object* JSMapIterator::CurrentValue() {
OrderedHashMap* table(OrderedHashMap::cast(this->table()));
- int index = Smi::cast(this->index())->value();
+ int index = Smi::ToInt(this->index());
Object* value = table->ValueAt(index);
DCHECK(!value->IsTheHole(table->GetIsolate()));
return value;
}
-
-String::SubStringRange::SubStringRange(String* string, int first, int length)
- : string_(string),
- first_(first),
- length_(length == -1 ? string->length() : length) {}
-
-
-class String::SubStringRange::iterator final {
- public:
- typedef std::forward_iterator_tag iterator_category;
- typedef int difference_type;
- typedef uc16 value_type;
- typedef uc16* pointer;
- typedef uc16& reference;
-
- iterator(const iterator& other)
- : content_(other.content_), offset_(other.offset_) {}
-
- uc16 operator*() { return content_.Get(offset_); }
- bool operator==(const iterator& other) const {
- return content_.UsesSameString(other.content_) && offset_ == other.offset_;
- }
- bool operator!=(const iterator& other) const {
- return !content_.UsesSameString(other.content_) || offset_ != other.offset_;
- }
- iterator& operator++() {
- ++offset_;
- return *this;
- }
- iterator operator++(int);
-
- private:
- friend class String;
- iterator(String* from, int offset)
- : content_(from->GetFlatContent()), offset_(offset) {}
- String::FlatContent content_;
- int offset_;
-};
-
-
-String::SubStringRange::iterator String::SubStringRange::begin() {
- return String::SubStringRange::iterator(string_, first_);
-}
-
-
-String::SubStringRange::iterator String::SubStringRange::end() {
- return String::SubStringRange::iterator(string_, first_ + length_);
-}
-
-
// Predictably converts HeapObject* or Address to uint32 by calculating
// offset of the address in respective MemoryChunk.
static inline uint32_t ObjectAddressForHashing(void* object) {
@@ -8195,10 +6243,10 @@ static inline Handle<Object> MakeEntryPair(Isolate* isolate, uint32_t index,
entry_storage->set(1, *value, SKIP_WRITE_BARRIER);
}
return isolate->factory()->NewJSArrayWithElements(entry_storage,
- FAST_ELEMENTS, 2);
+ PACKED_ELEMENTS, 2);
}
-static inline Handle<Object> MakeEntryPair(Isolate* isolate, Handle<Name> key,
+static inline Handle<Object> MakeEntryPair(Isolate* isolate, Handle<Object> key,
Handle<Object> value) {
Handle<FixedArray> entry_storage =
isolate->factory()->NewUninitializedFixedArray(2);
@@ -8207,7 +6255,7 @@ static inline Handle<Object> MakeEntryPair(Isolate* isolate, Handle<Name> key,
entry_storage->set(1, *value, SKIP_WRITE_BARRIER);
}
return isolate->factory()->NewJSArrayWithElements(entry_storage,
- FAST_ELEMENTS, 2);
+ PACKED_ELEMENTS, 2);
}
ACCESSORS(JSIteratorResult, value, Object, kValueOffset)
@@ -8223,52 +6271,6 @@ ACCESSORS(JSAsyncFromSyncIterator, sync_iterator, JSReceiver,
ACCESSORS(JSStringIterator, string, String, kStringOffset)
SMI_ACCESSORS(JSStringIterator, index, kNextIndexOffset)
-#undef INT_ACCESSORS
-#undef ACCESSORS
-#undef ACCESSORS_CHECKED
-#undef ACCESSORS_CHECKED2
-#undef SMI_ACCESSORS
-#undef SYNCHRONIZED_SMI_ACCESSORS
-#undef NOBARRIER_SMI_ACCESSORS
-#undef BOOL_GETTER
-#undef BOOL_ACCESSORS
-#undef FIELD_ADDR
-#undef FIELD_ADDR_CONST
-#undef READ_FIELD
-#undef NOBARRIER_READ_FIELD
-#undef WRITE_FIELD
-#undef NOBARRIER_WRITE_FIELD
-#undef WRITE_BARRIER
-#undef CONDITIONAL_WRITE_BARRIER
-#undef READ_DOUBLE_FIELD
-#undef WRITE_DOUBLE_FIELD
-#undef READ_INT_FIELD
-#undef WRITE_INT_FIELD
-#undef READ_INTPTR_FIELD
-#undef WRITE_INTPTR_FIELD
-#undef READ_UINT8_FIELD
-#undef WRITE_UINT8_FIELD
-#undef READ_INT8_FIELD
-#undef WRITE_INT8_FIELD
-#undef READ_UINT16_FIELD
-#undef WRITE_UINT16_FIELD
-#undef READ_INT16_FIELD
-#undef WRITE_INT16_FIELD
-#undef READ_UINT32_FIELD
-#undef WRITE_UINT32_FIELD
-#undef READ_INT32_FIELD
-#undef WRITE_INT32_FIELD
-#undef READ_FLOAT_FIELD
-#undef WRITE_FLOAT_FIELD
-#undef READ_UINT64_FIELD
-#undef WRITE_UINT64_FIELD
-#undef READ_INT64_FIELD
-#undef WRITE_INT64_FIELD
-#undef READ_BYTE_FIELD
-#undef WRITE_BYTE_FIELD
-#undef NOBARRIER_READ_BYTE_FIELD
-#undef NOBARRIER_WRITE_BYTE_FIELD
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 2ea68863cf..187f56ecde 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -12,6 +12,7 @@
#include "src/disassembler.h"
#include "src/interpreter/bytecodes.h"
#include "src/objects-inl.h"
+#include "src/objects/debug-objects-inl.h"
#include "src/ostreams.h"
#include "src/regexp/jsregexp.h"
@@ -29,8 +30,8 @@ void Object::Print() {
void Object::Print(std::ostream& os) { // NOLINT
if (IsSmi()) {
- os << "Smi: " << std::hex << "0x" << Smi::cast(this)->value();
- os << std::dec << " (" << Smi::cast(this)->value() << ")\n";
+ os << "Smi: " << std::hex << "0x" << Smi::ToInt(this);
+ os << std::dec << " (" << Smi::ToInt(this) << ")\n";
} else {
HeapObject::cast(this)->HeapObjectPrint(os);
}
@@ -45,6 +46,7 @@ void HeapObject::PrintHeader(std::ostream& os, const char* id) { // NOLINT
os << map()->instance_type();
}
os << "]";
+ if (GetHeap()->InOldSpace(this)) os << " in OldSpace";
}
@@ -80,6 +82,9 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case FIXED_ARRAY_TYPE:
FixedArray::cast(this)->FixedArrayPrint(os);
break;
+ case PROPERTY_ARRAY_TYPE:
+ PropertyArray::cast(this)->PropertyArrayPrint(os);
+ break;
case BYTE_ARRAY_TYPE:
ByteArray::cast(this)->ByteArrayPrint(os);
break;
@@ -151,6 +156,10 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case JS_ARGUMENTS_TYPE:
case JS_ERROR_TYPE:
case JS_PROMISE_CAPABILITY_TYPE:
+ case WASM_INSTANCE_TYPE: // TODO(titzer): debug printing for wasm objects
+ case WASM_MEMORY_TYPE:
+ case WASM_MODULE_TYPE:
+ case WASM_TABLE_TYPE:
JSObject::cast(this)->JSObjectPrint(os);
break;
case JS_PROMISE_TYPE:
@@ -195,10 +204,13 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case JS_MAP_TYPE:
JSMap::cast(this)->JSMapPrint(os);
break;
- case JS_SET_ITERATOR_TYPE:
+ case JS_SET_KEY_VALUE_ITERATOR_TYPE:
+ case JS_SET_VALUE_ITERATOR_TYPE:
JSSetIterator::cast(this)->JSSetIteratorPrint(os);
break;
- case JS_MAP_ITERATOR_TYPE:
+ case JS_MAP_KEY_ITERATOR_TYPE:
+ case JS_MAP_KEY_VALUE_ITERATOR_TYPE:
+ case JS_MAP_VALUE_ITERATOR_TYPE:
JSMapIterator::cast(this)->JSMapIteratorPrint(os);
break;
case JS_WEAK_MAP_TYPE:
@@ -275,6 +287,7 @@ void FixedTypedArray<Traits>::FixedTypedArrayPrint(
bool JSObject::PrintProperties(std::ostream& os) { // NOLINT
if (HasFastProperties()) {
DescriptorArray* descs = map()->instance_descriptors();
+ int nof_inobject_properties = map()->GetInObjectProperties();
int i = 0;
for (; i < map()->NumberOfOwnDescriptors(); i++) {
os << "\n ";
@@ -297,10 +310,16 @@ bool JSObject::PrintProperties(std::ostream& os) { // NOLINT
}
os << " ";
details.PrintAsFastTo(os, PropertyDetails::kForProperties);
+ if (details.location() != kField) continue;
+ int field_index = details.field_index();
+ if (nof_inobject_properties <= field_index) {
+ field_index -= nof_inobject_properties;
+ os << " properties[" << field_index << "]";
+ }
}
return i > 0;
} else if (IsJSGlobalObject()) {
- global_dictionary()->Print(os);
+ JSGlobalObject::cast(this)->global_dictionary()->Print(os);
} else {
property_dictionary()->Print(os);
}
@@ -383,16 +402,34 @@ void PrintFixedArrayElements(std::ostream& os, FixedArray* array) {
}
}
+void PrintDictionaryElements(std::ostream& os, FixedArrayBase* elements) {
+ // Print some internal fields
+ SeededNumberDictionary* dict = SeededNumberDictionary::cast(elements);
+ if (dict->requires_slow_elements()) {
+ os << "\n - requires_slow_elements";
+ } else {
+ os << "\n - max_number_key: " << dict->max_number_key();
+ }
+ dict->Print(os);
+}
+
void PrintSloppyArgumentElements(std::ostream& os, ElementsKind kind,
SloppyArgumentsElements* elements) {
+ Isolate* isolate = elements->GetIsolate();
FixedArray* arguments_store = elements->arguments();
os << "\n 0: context= " << Brief(elements->context())
<< "\n 1: arguments_store= " << Brief(arguments_store)
<< "\n parameter to context slot map:";
for (uint32_t i = 0; i < elements->parameter_map_length(); i++) {
uint32_t raw_index = i + SloppyArgumentsElements::kParameterMapStart;
+ Object* mapped_entry = elements->get_mapped_entry(i);
os << "\n " << raw_index << ": param(" << i
- << ")= " << Brief(elements->get_mapped_entry(i));
+ << ")= " << Brief(mapped_entry);
+ if (mapped_entry->IsTheHole(isolate)) {
+ os << " in the arguments_store[" << i << "]";
+ } else {
+ os << " in the context";
+ }
}
if (arguments_store->length() == 0) return;
os << "\n }"
@@ -402,7 +439,7 @@ void PrintSloppyArgumentElements(std::ostream& os, ElementsKind kind,
PrintFixedArrayElements(os, arguments_store);
} else {
DCHECK_EQ(kind, SLOW_SLOPPY_ARGUMENTS_ELEMENTS);
- SeededNumberDictionary::cast(arguments_store)->Print(os);
+ PrintDictionaryElements(os, arguments_store);
}
os << "\n }";
}
@@ -418,16 +455,16 @@ void JSObject::PrintElements(std::ostream& os) { // NOLINT
return;
}
switch (map()->elements_kind()) {
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_ELEMENTS:
+ case HOLEY_SMI_ELEMENTS:
+ case PACKED_SMI_ELEMENTS:
+ case HOLEY_ELEMENTS:
+ case PACKED_ELEMENTS:
case FAST_STRING_WRAPPER_ELEMENTS: {
PrintFixedArrayElements(os, FixedArray::cast(elements()));
break;
}
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS: {
+ case HOLEY_DOUBLE_ELEMENTS:
+ case PACKED_DOUBLE_ELEMENTS: {
DoPrintElements<FixedDoubleArray>(os, elements());
break;
}
@@ -442,7 +479,7 @@ void JSObject::PrintElements(std::ostream& os) { // NOLINT
case DICTIONARY_ELEMENTS:
case SLOW_STRING_WRAPPER_ELEMENTS:
- SeededNumberDictionary::cast(elements())->Print(os);
+ PrintDictionaryElements(os, elements());
break;
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
@@ -471,9 +508,7 @@ static void JSObjectPrintHeader(std::ostream& os, JSObject* obj,
os << "]\n - prototype = " << reinterpret_cast<void*>(iter.GetCurrent());
os << "\n - elements = " << Brief(obj->elements()) << " ["
<< ElementsKindToString(obj->map()->elements_kind());
- if (obj->elements()->map() == obj->GetHeap()->fixed_cow_array_map()) {
- os << " (COW)";
- }
+ if (obj->elements()->IsCowArray()) os << " (COW)";
os << "]";
if (obj->GetEmbedderFieldCount() > 0) {
os << "\n - embedder fields: " << obj->GetEmbedderFieldCount();
@@ -483,7 +518,7 @@ static void JSObjectPrintHeader(std::ostream& os, JSObject* obj,
static void JSObjectPrintBody(std::ostream& os, JSObject* obj, // NOLINT
bool print_elements = true) {
- os << "\n - properties = " << Brief(obj->properties()) << " {";
+ os << "\n - properties = " << Brief(obj->raw_properties_or_hash()) << " {";
if (obj->PrintProperties(os)) os << "\n ";
os << "}\n";
if (print_elements && obj->elements()->length() > 0) {
@@ -614,6 +649,18 @@ void FixedArray::FixedArrayPrint(std::ostream& os) { // NOLINT
os << "\n";
}
+// TODO(gsathya): Templatize PrintFixedArrayElements to print this as
+// well.
+void PropertyArray::PropertyArrayPrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "PropertyArray");
+ os << "\n - map = " << Brief(map());
+ os << "\n - length: " << length();
+ for (int i = 0; i < length(); i++) {
+ os << "\n" << i << " : " << std::setw(8) << Brief(get(i));
+ }
+
+ os << "\n";
+}
void FixedDoubleArray::FixedDoubleArrayPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "FixedDoubleArray");
@@ -770,7 +817,6 @@ void FeedbackVector::FeedbackVectorPrint(std::ostream& os) { // NOLINT
case FeedbackSlotKind::kGeneral:
case FeedbackSlotKind::kTypeProfile:
break;
- case FeedbackSlotKind::kToBoolean:
case FeedbackSlotKind::kInvalid:
case FeedbackSlotKind::kKindsNumber:
UNREACHABLE();
@@ -860,15 +906,14 @@ void JSDate::JSDatePrint(std::ostream& os) { // NOLINT
} else {
// TODO(svenpanne) Add some basic formatting to our streams.
ScopedVector<char> buf(100);
- SNPrintF(
- buf, "\n - time = %s %04d/%02d/%02d %02d:%02d:%02d\n",
- weekdays[weekday()->IsSmi() ? Smi::cast(weekday())->value() + 1 : 0],
- year()->IsSmi() ? Smi::cast(year())->value() : -1,
- month()->IsSmi() ? Smi::cast(month())->value() : -1,
- day()->IsSmi() ? Smi::cast(day())->value() : -1,
- hour()->IsSmi() ? Smi::cast(hour())->value() : -1,
- min()->IsSmi() ? Smi::cast(min())->value() : -1,
- sec()->IsSmi() ? Smi::cast(sec())->value() : -1);
+ SNPrintF(buf, "\n - time = %s %04d/%02d/%02d %02d:%02d:%02d\n",
+ weekdays[weekday()->IsSmi() ? Smi::ToInt(weekday()) + 1 : 0],
+ year()->IsSmi() ? Smi::ToInt(year()) : -1,
+ month()->IsSmi() ? Smi::ToInt(month()) : -1,
+ day()->IsSmi() ? Smi::ToInt(day()) : -1,
+ hour()->IsSmi() ? Smi::ToInt(hour()) : -1,
+ min()->IsSmi() ? Smi::ToInt(min()) : -1,
+ sec()->IsSmi() ? Smi::ToInt(sec()) : -1);
os << buf.start();
}
JSObjectPrintBody(os, this);
@@ -901,37 +946,23 @@ void JSMap::JSMapPrint(std::ostream& os) { // NOLINT
JSObjectPrintBody(os, this);
}
-
-template <class Derived, class TableType>
-void
-OrderedHashTableIterator<Derived, TableType>::OrderedHashTableIteratorPrint(
+void JSCollectionIterator::JSCollectionIteratorPrint(
std::ostream& os) { // NOLINT
os << "\n - table = " << Brief(table());
os << "\n - index = " << Brief(index());
- os << "\n - kind = " << Brief(kind());
os << "\n";
}
-template void OrderedHashTableIterator<
- JSSetIterator,
- OrderedHashSet>::OrderedHashTableIteratorPrint(std::ostream& os); // NOLINT
-
-
-template void OrderedHashTableIterator<
- JSMapIterator,
- OrderedHashMap>::OrderedHashTableIteratorPrint(std::ostream& os); // NOLINT
-
-
void JSSetIterator::JSSetIteratorPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSSetIterator");
- OrderedHashTableIteratorPrint(os);
+ JSCollectionIteratorPrint(os);
}
void JSMapIterator::JSMapIteratorPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSMapIterator");
- OrderedHashTableIteratorPrint(os);
+ JSCollectionIteratorPrint(os);
}
@@ -953,7 +984,12 @@ void JSArrayBuffer::JSArrayBufferPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSArrayBuffer");
os << "\n - backing_store = " << backing_store();
os << "\n - byte_length = " << Brief(byte_length());
+ if (is_external()) os << "\n - external";
+ if (is_neuterable()) os << "\n - neuterable";
if (was_neutered()) os << "\n - neutered";
+ if (is_shared()) os << "\n - shared";
+ if (has_guard_region()) os << "\n - has_guard_region";
+ if (is_wasm_buffer()) os << "\n - wasm_buffer";
JSObjectPrintBody(os, this, !was_neutered());
}
@@ -1007,25 +1043,6 @@ void JSBoundFunction::JSBoundFunctionPrint(std::ostream& os) { // NOLINT
}
-void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, "Function");
- os << "\n - initial_map = ";
- if (has_initial_map()) os << Brief(initial_map());
- os << "\n - shared_info = " << Brief(shared());
- os << "\n - name = " << Brief(shared()->name());
- os << "\n - formal_parameter_count = "
- << shared()->internal_formal_parameter_count();
- if (IsGeneratorFunction(shared()->kind())) {
- os << "\n - generator";
- } else if (IsAsyncFunction(shared()->kind())) {
- os << "\n - async";
- }
- os << "\n - context = " << Brief(context());
- os << "\n - feedback vector cell = " << Brief(feedback_vector_cell());
- os << "\n - code = " << Brief(code());
- JSObjectPrintBody(os, this);
-}
-
namespace {
std::ostream& operator<<(std::ostream& os, FunctionKind kind) {
@@ -1055,17 +1072,47 @@ std::ostream& operator<<(std::ostream& os, FunctionKind kind) {
} // namespace
+void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
+ JSObjectPrintHeader(os, this, "Function");
+ os << "\n - initial_map = ";
+ if (has_initial_map()) os << Brief(initial_map());
+ os << "\n - shared_info = " << Brief(shared());
+ os << "\n - name = " << Brief(shared()->name());
+ os << "\n - formal_parameter_count = "
+ << shared()->internal_formal_parameter_count();
+ os << "\n - kind = " << shared()->kind();
+ os << "\n - context = " << Brief(context());
+ os << "\n - feedback vector cell = " << Brief(feedback_vector_cell());
+ os << "\n - code = " << Brief(code());
+ if (IsInterpreted()) {
+ os << "\n - interpreted";
+ if (shared()->HasBytecodeArray()) {
+ os << "\n - bytecode = " << shared()->bytecode_array();
+ }
+ }
+ JSObjectPrintBody(os, this);
+}
+
void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "SharedFunctionInfo");
- os << "\n - name = " << Brief(name());
+ os << "\n - name = ";
+ if (has_shared_name()) {
+ os << Brief(raw_name());
+ } else {
+ os << "<no-shared-name>";
+ }
os << "\n - kind = " << kind();
+ os << "\n - function_map_index = " << function_map_index();
os << "\n - formal_parameter_count = " << internal_formal_parameter_count();
os << "\n - expected_nof_properties = " << expected_nof_properties();
os << "\n - language_mode = " << language_mode();
os << "\n - ast_node_count = " << ast_node_count();
os << "\n - instance class name = ";
instance_class_name()->Print(os);
- os << "\n - code = " << Brief(code());
+ os << " - code = " << Brief(code());
+ if (HasBytecodeArray()) {
+ os << "\n - bytecode_array = " << bytecode_array();
+ }
if (HasSourceCode()) {
os << "\n - source code = ";
String* source = String::cast(Script::cast(script())->source());
@@ -1096,8 +1143,10 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
os << "\n - length = " << length();
os << "\n - feedback_metadata = ";
feedback_metadata()->FeedbackMetadataPrint(os);
- if (HasBytecodeArray()) {
- os << "\n - bytecode_array = " << bytecode_array();
+ if (HasPreParsedScopeData()) {
+ os << "\n - preparsed scope data = " << preparsed_scope_data();
+ } else {
+ os << "\n - no preparsed scope data";
}
os << "\n";
}
@@ -1132,6 +1181,8 @@ void Cell::CellPrint(std::ostream& os) { // NOLINT
void PropertyCell::PropertyCellPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "PropertyCell");
+ os << "\n - name: ";
+ name()->NamePrint(os);
os << "\n - value: " << Brief(value());
os << "\n - details: ";
property_details().PrintAsSlowTo(os);
@@ -1277,19 +1328,13 @@ void ModuleInfoEntry::ModuleInfoEntryPrint(std::ostream& os) { // NOLINT
void Module::ModulePrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "Module");
- // TODO(neis): Simplify once modules have a script field.
- if (!evaluated()) {
- SharedFunctionInfo* shared = code()->IsSharedFunctionInfo()
- ? SharedFunctionInfo::cast(code())
- : JSFunction::cast(code())->shared();
- Object* origin = Script::cast(shared->script())->GetNameOrSourceURL();
- os << "\n - origin: " << Brief(origin);
- }
+ os << "\n - origin: " << Brief(script()->GetNameOrSourceURL());
os << "\n - code: " << Brief(code());
os << "\n - exports: " << Brief(exports());
os << "\n - requested_modules: " << Brief(requested_modules());
- os << "\n - instantiated, evaluated: " << instantiated() << ", "
- << evaluated();
+ os << "\n - script: " << Brief(script());
+ os << "\n - status: " << status();
+ os << "\n - exception: " << Brief(exception());
os << "\n";
}
@@ -1411,13 +1456,13 @@ void AllocationSite::AllocationSitePrint(std::ostream& os) { // NOLINT
os << "\n - pretenure decision: "
<< Brief(Smi::FromInt(pretenure_decision()));
os << "\n - transition_info: ";
- if (transition_info()->IsSmi()) {
+ if (!PointsToLiteral()) {
ElementsKind kind = GetElementsKind();
os << "Array allocation with ElementsKind " << ElementsKindToString(kind);
- } else if (transition_info()->IsJSArray()) {
- os << "Array literal " << Brief(transition_info());
+ } else if (boilerplate()->IsJSArray()) {
+ os << "Array literal with boilerplate " << Brief(boilerplate());
} else {
- os << "unknown transition_info " << Brief(transition_info());
+ os << "Object literal with boilerplate " << Brief(boilerplate());
}
os << "\n";
}
@@ -1455,10 +1500,13 @@ void Script::ScriptPrint(std::ostream& os) { // NOLINT
void DebugInfo::DebugInfoPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "DebugInfo");
+ os << "\n - flags: " << flags();
+ os << "\n - debugger_hints: " << debugger_hints();
os << "\n - shared: " << Brief(shared());
os << "\n - debug bytecode array: " << Brief(debug_bytecode_array());
os << "\n - break_points: ";
break_points()->Print(os);
+ os << "\n - coverage_info: " << Brief(coverage_info());
}
@@ -1505,21 +1553,27 @@ void LayoutDescriptor::Print(std::ostream& os) { // NOLINT
os << "<all tagged>";
} else if (IsSmi()) {
os << "fast";
- PrintBitMask(os, static_cast<uint32_t>(Smi::cast(this)->value()));
+ PrintBitMask(os, static_cast<uint32_t>(Smi::ToInt(this)));
} else if (IsOddball() &&
IsUninitialized(HeapObject::cast(this)->GetIsolate())) {
os << "<uninitialized>";
} else {
os << "slow";
- int len = length();
- for (int i = 0; i < len; i++) {
+ int num_words = number_of_layout_words();
+ for (int i = 0; i < num_words; i++) {
if (i > 0) os << " |";
- PrintBitMask(os, get_scalar(i));
+ PrintBitMask(os, get_layout_word(i));
}
}
os << "\n";
}
+void PreParsedScopeData::PreParsedScopeDataPrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "PreParsedScopeData");
+ os << "\n - scope_data: " << Brief(scope_data());
+ os << "\n - child_data: " << Brief(child_data());
+ os << "\n";
+}
#endif // OBJECT_PRINT
@@ -1667,6 +1721,56 @@ void TransitionArray::PrintTransitions(std::ostream& os, Object* transitions,
}
}
+void TransitionArray::PrintTransitionTree(Map* map) {
+ OFStream os(stdout);
+ os << "map= " << Brief(map);
+ PrintTransitionTree(os, map);
+ os << "\n" << std::flush;
+}
+
+// static
+void TransitionArray::PrintTransitionTree(std::ostream& os, Map* map,
+ int level) {
+ Object* transitions = map->raw_transitions();
+ int num_transitions = NumberOfTransitions(transitions);
+ if (num_transitions == 0) return;
+ for (int i = 0; i < num_transitions; i++) {
+ Name* key = GetKey(transitions, i);
+ Map* target = GetTarget(transitions, i);
+ os << std::endl
+ << " " << level << "/" << i << ":" << std::setw(level * 2 + 2) << " ";
+ std::stringstream ss;
+ ss << Brief(target);
+ os << std::left << std::setw(50) << ss.str() << ": ";
+
+ Heap* heap = key->GetHeap();
+ if (key == heap->nonextensible_symbol()) {
+ os << "to non-extensible";
+ } else if (key == heap->sealed_symbol()) {
+ os << "to sealed ";
+ } else if (key == heap->frozen_symbol()) {
+ os << "to frozen";
+ } else if (key == heap->elements_transition_symbol()) {
+ os << "to " << ElementsKindToString(target->elements_kind());
+ } else if (key == heap->strict_function_transition_symbol()) {
+ os << "to strict function";
+ } else {
+#ifdef OBJECT_PRINT
+ key->NamePrint(os);
+#else
+ key->ShortPrint(os);
+#endif
+ os << " ";
+ DCHECK(!IsSpecialTransition(key));
+ os << "to ";
+ int descriptor = target->LastAdded();
+ DescriptorArray* descriptors = target->instance_descriptors();
+ descriptors->PrintDescriptorDetails(os, descriptor,
+ PropertyDetails::kForTransitions);
+ }
+ TransitionArray::PrintTransitionTree(os, target, level + 1);
+ }
+}
void JSObject::PrintTransitions(std::ostream& os) { // NOLINT
Object* transitions = map()->raw_transitions();
@@ -1693,7 +1797,7 @@ extern void _v8_internal_Print_Code(void* object) {
extern void _v8_internal_Print_FeedbackMetadata(void* object) {
if (reinterpret_cast<i::Object*>(object)->IsSmi()) {
- printf("Not a feedback metadata object\n");
+ printf("Please provide a feedback metadata object\n");
} else {
reinterpret_cast<i::FeedbackMetadata*>(object)->Print();
}
@@ -1701,7 +1805,7 @@ extern void _v8_internal_Print_FeedbackMetadata(void* object) {
extern void _v8_internal_Print_FeedbackVector(void* object) {
if (reinterpret_cast<i::Object*>(object)->IsSmi()) {
- printf("Not a feedback vector\n");
+ printf("Please provide a feedback vector\n");
} else {
reinterpret_cast<i::FeedbackVector*>(object)->Print();
}
@@ -1709,7 +1813,7 @@ extern void _v8_internal_Print_FeedbackVector(void* object) {
extern void _v8_internal_Print_DescriptorArray(void* object) {
if (reinterpret_cast<i::Object*>(object)->IsSmi()) {
- printf("Not a descriptor array\n");
+ printf("Please provide a descriptor array\n");
} else {
reinterpret_cast<i::DescriptorArray*>(object)->Print();
}
@@ -1718,7 +1822,7 @@ extern void _v8_internal_Print_DescriptorArray(void* object) {
extern void _v8_internal_Print_LayoutDescriptor(void* object) {
i::Object* o = reinterpret_cast<i::Object*>(object);
if (!o->IsLayoutDescriptor()) {
- printf("Not a layout descriptor\n");
+ printf("Please provide a layout descriptor\n");
} else {
reinterpret_cast<i::LayoutDescriptor*>(object)->Print();
}
@@ -1726,7 +1830,7 @@ extern void _v8_internal_Print_LayoutDescriptor(void* object) {
extern void _v8_internal_Print_TransitionArray(void* object) {
if (reinterpret_cast<i::Object*>(object)->IsSmi()) {
- printf("Not a transition array\n");
+ printf("Please provide a transition array\n");
} else {
reinterpret_cast<i::TransitionArray*>(object)->Print();
}
@@ -1736,3 +1840,14 @@ extern void _v8_internal_Print_StackTrace() {
i::Isolate* isolate = i::Isolate::Current();
isolate->PrintStack(stdout);
}
+
+extern void _v8_internal_Print_TransitionTree(void* object) {
+ i::Object* o = reinterpret_cast<i::Object*>(object);
+ if (!o->IsMap()) {
+ printf("Please provide a valid Map\n");
+ } else {
+#if defined(DEBUG) || defined(OBJECT_PRINT)
+ i::TransitionArray::PrintTransitionTree(reinterpret_cast<i::Map*>(object));
+#endif
+ }
+}
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index a1d87342ea..6a6d265f2c 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -57,8 +57,11 @@
#include "src/objects-body-descriptors-inl.h"
#include "src/objects/code-cache-inl.h"
#include "src/objects/compilation-cache-inl.h"
+#include "src/objects/debug-objects-inl.h"
#include "src/objects/frame-array-inl.h"
+#include "src/objects/hash-table.h"
#include "src/objects/map.h"
+#include "src/parsing/preparsed-scope-data.h"
#include "src/property-descriptor.h"
#include "src/prototype.h"
#include "src/regexp/jsregexp.h"
@@ -68,7 +71,7 @@
#include "src/string-builder.h"
#include "src/string-search.h"
#include "src/string-stream.h"
-#include "src/utils.h"
+#include "src/utils-inl.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects.h"
#include "src/zone/zone.h"
@@ -91,7 +94,6 @@ std::ostream& operator<<(std::ostream& os, InstanceType instance_type) {
#undef WRITE_TYPE
}
UNREACHABLE();
- return os << "UNKNOWN"; // Keep the compiler happy.
}
Handle<FieldType> Object::OptimalType(Isolate* isolate,
@@ -420,7 +422,7 @@ MaybeHandle<Object> Object::ConvertToLength(Isolate* isolate,
Handle<Object> input) {
ASSIGN_RETURN_ON_EXCEPTION(isolate, input, ToNumber(input), Object);
if (input->IsSmi()) {
- int value = std::max(Smi::cast(*input)->value(), 0);
+ int value = std::max(Smi::ToInt(*input), 0);
return handle(Smi::FromInt(value), isolate);
}
double len = DoubleToInteger(input->Number());
@@ -438,7 +440,7 @@ MaybeHandle<Object> Object::ConvertToIndex(
MessageTemplate::Template error_index) {
if (input->IsUndefined(isolate)) return handle(Smi::kZero, isolate);
ASSIGN_RETURN_ON_EXCEPTION(isolate, input, ToNumber(input), Object);
- if (input->IsSmi() && Smi::cast(*input)->value() >= 0) return input;
+ if (input->IsSmi() && Smi::ToInt(*input) >= 0) return input;
double len = DoubleToInteger(input->Number()) + 0.0;
auto js_len = isolate->factory()->NewNumber(len);
if (len < 0.0 || len > kMaxSafeInteger) {
@@ -448,7 +450,7 @@ MaybeHandle<Object> Object::ConvertToIndex(
}
bool Object::BooleanValue() {
- if (IsSmi()) return Smi::cast(this)->value() != 0;
+ if (IsSmi()) return Smi::ToInt(this) != 0;
DCHECK(IsHeapObject());
Isolate* isolate = HeapObject::cast(this)->GetIsolate();
if (IsBoolean()) return IsTrue(isolate);
@@ -852,23 +854,6 @@ MaybeHandle<Object> Object::InstanceOf(Isolate* isolate, Handle<Object> object,
return result;
}
-Maybe<bool> Object::IsArray(Handle<Object> object) {
- if (object->IsJSArray()) return Just(true);
- if (object->IsJSProxy()) {
- Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
- Isolate* isolate = proxy->GetIsolate();
- if (proxy->IsRevoked()) {
- isolate->Throw(*isolate->factory()->NewTypeError(
- MessageTemplate::kProxyRevoked,
- isolate->factory()->NewStringFromAsciiChecked("IsArray")));
- return Nothing<bool>();
- }
- return Object::IsArray(handle(proxy->target(), isolate));
- }
- return Just(false);
-}
-
-
// static
MaybeHandle<Object> Object::GetMethod(Handle<JSReceiver> receiver,
Handle<Name> name) {
@@ -1173,7 +1158,7 @@ Handle<Object> JSReceiver::GetDataProperty(LookupIterator* it) {
bool Object::ToInt32(int32_t* value) {
if (IsSmi()) {
- *value = Smi::cast(this)->value();
+ *value = Smi::ToInt(this);
return true;
}
if (IsHeapNumber()) {
@@ -1187,20 +1172,25 @@ bool Object::ToInt32(int32_t* value) {
}
Handle<SharedFunctionInfo> FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(
- Isolate* isolate, Handle<FunctionTemplateInfo> info) {
+ Isolate* isolate, Handle<FunctionTemplateInfo> info,
+ MaybeHandle<Name> maybe_name) {
Object* current_info = info->shared_function_info();
if (current_info->IsSharedFunctionInfo()) {
return handle(SharedFunctionInfo::cast(current_info), isolate);
}
-
Handle<Object> class_name(info->class_name(), isolate);
- Handle<String> name = class_name->IsString()
- ? Handle<String>::cast(class_name)
- : isolate->factory()->empty_string();
+ Handle<Name> name;
+ Handle<String> name_string;
+ if (maybe_name.ToHandle(&name) && name->IsString()) {
+ name_string = Handle<String>::cast(name);
+ } else {
+ name_string = class_name->IsString() ? Handle<String>::cast(class_name)
+ : isolate->factory()->empty_string();
+ }
Handle<Code> code = isolate->builtins()->HandleApiCall();
bool is_constructor = !info->remove_prototype();
- Handle<SharedFunctionInfo> result =
- isolate->factory()->NewSharedFunctionInfo(name, code, is_constructor);
+ Handle<SharedFunctionInfo> result = isolate->factory()->NewSharedFunctionInfo(
+ name_string, code, is_constructor);
if (is_constructor) {
result->SetConstructStub(*isolate->builtins()->JSConstructStubApi());
}
@@ -1284,7 +1274,7 @@ MaybeHandle<JSObject> JSObject::New(Handle<JSFunction> constructor,
if (initial_map->is_dictionary_map()) {
Handle<NameDictionary> dictionary =
NameDictionary::New(isolate, NameDictionary::kInitialCapacity);
- result->set_properties(*dictionary);
+ result->SetProperties(*dictionary);
}
isolate->counters()->constructed_objects()->Increment();
isolate->counters()->constructed_objects_runtime()->Increment();
@@ -1292,7 +1282,7 @@ MaybeHandle<JSObject> JSObject::New(Handle<JSFunction> constructor,
}
void JSObject::EnsureWritableFastElements(Handle<JSObject> object) {
- DCHECK(object->HasFastSmiOrObjectElements() ||
+ DCHECK(object->HasSmiOrObjectElements() ||
object->HasFastStringWrapperElements());
FixedArray* raw_elems = FixedArray::cast(object->elements());
Heap* heap = object->GetHeap();
@@ -1305,6 +1295,85 @@ void JSObject::EnsureWritableFastElements(Handle<JSObject> object) {
isolate->counters()->cow_arrays_converted()->Increment();
}
+int JSObject::GetHeaderSize(InstanceType type) {
+ switch (type) {
+ case JS_OBJECT_TYPE:
+ case JS_API_OBJECT_TYPE:
+ case JS_SPECIAL_API_OBJECT_TYPE:
+ return JSObject::kHeaderSize;
+ case JS_GENERATOR_OBJECT_TYPE:
+ return JSGeneratorObject::kSize;
+ case JS_ASYNC_GENERATOR_OBJECT_TYPE:
+ return JSAsyncGeneratorObject::kSize;
+ case JS_GLOBAL_PROXY_TYPE:
+ return JSGlobalProxy::kSize;
+ case JS_GLOBAL_OBJECT_TYPE:
+ return JSGlobalObject::kSize;
+ case JS_BOUND_FUNCTION_TYPE:
+ return JSBoundFunction::kSize;
+ case JS_FUNCTION_TYPE:
+ return JSFunction::kSize;
+ case JS_VALUE_TYPE:
+ return JSValue::kSize;
+ case JS_DATE_TYPE:
+ return JSDate::kSize;
+ case JS_ARRAY_TYPE:
+ return JSArray::kSize;
+ case JS_ARRAY_BUFFER_TYPE:
+ return JSArrayBuffer::kSize;
+ case JS_TYPED_ARRAY_TYPE:
+ return JSTypedArray::kSize;
+ case JS_DATA_VIEW_TYPE:
+ return JSDataView::kSize;
+ case JS_SET_TYPE:
+ return JSSet::kSize;
+ case JS_MAP_TYPE:
+ return JSMap::kSize;
+ case JS_SET_KEY_VALUE_ITERATOR_TYPE:
+ case JS_SET_VALUE_ITERATOR_TYPE:
+ return JSSetIterator::kSize;
+ case JS_MAP_KEY_ITERATOR_TYPE:
+ case JS_MAP_KEY_VALUE_ITERATOR_TYPE:
+ case JS_MAP_VALUE_ITERATOR_TYPE:
+ return JSMapIterator::kSize;
+ case JS_WEAK_MAP_TYPE:
+ return JSWeakMap::kSize;
+ case JS_WEAK_SET_TYPE:
+ return JSWeakSet::kSize;
+ case JS_PROMISE_CAPABILITY_TYPE:
+ return JSPromiseCapability::kSize;
+ case JS_PROMISE_TYPE:
+ return JSPromise::kSize;
+ case JS_REGEXP_TYPE:
+ return JSRegExp::kSize;
+ case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+ return JSObject::kHeaderSize;
+ case JS_MESSAGE_OBJECT_TYPE:
+ return JSMessageObject::kSize;
+ case JS_ARGUMENTS_TYPE:
+ return JSArgumentsObject::kHeaderSize;
+ case JS_ERROR_TYPE:
+ return JSObject::kHeaderSize;
+ case JS_STRING_ITERATOR_TYPE:
+ return JSStringIterator::kSize;
+ case JS_MODULE_NAMESPACE_TYPE:
+ return JSModuleNamespace::kHeaderSize;
+ case WASM_INSTANCE_TYPE:
+ return WasmInstanceObject::kSize;
+ case WASM_MEMORY_TYPE:
+ return WasmMemoryObject::kSize;
+ case WASM_MODULE_TYPE:
+ return WasmModuleObject::kSize;
+ case WASM_TABLE_TYPE:
+ return WasmTableObject::kSize;
+ default:
+ if (type >= FIRST_ARRAY_ITERATOR_TYPE &&
+ type <= LAST_ARRAY_ITERATOR_TYPE) {
+ return JSArrayIterator::kSize;
+ }
+ UNREACHABLE();
+ }
+}
// ES6 9.5.1
// static
@@ -1370,6 +1439,11 @@ MaybeHandle<Object> Object::GetPropertyWithAccessor(LookupIterator* it) {
Isolate* isolate = it->isolate();
Handle<Object> structure = it->GetAccessors();
Handle<Object> receiver = it->GetReceiver();
+ // In case of global IC, the receiver is the global object. Replace by the
+ // global proxy.
+ if (receiver->IsJSGlobalObject()) {
+ receiver = handle(JSGlobalObject::cast(*receiver)->global_proxy(), isolate);
+ }
// We should never get here to initialize a const with the hole value since a
// const declaration would conflict with the getter.
@@ -1462,6 +1536,11 @@ Maybe<bool> Object::SetPropertyWithAccessor(LookupIterator* it,
Isolate* isolate = it->isolate();
Handle<Object> structure = it->GetAccessors();
Handle<Object> receiver = it->GetReceiver();
+ // In case of global IC, the receiver is the global object. Replace by the
+ // global proxy.
+ if (receiver->IsJSGlobalObject()) {
+ receiver = handle(JSGlobalObject::cast(*receiver)->global_proxy(), isolate);
+ }
// We should never get here to initialize a const with the hole value since a
// const declaration would conflict with the setter.
@@ -1923,18 +2002,18 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object,
Handle<Object> value,
PropertyDetails details) {
DCHECK(!object->HasFastProperties());
- if (!name->IsUniqueName()) {
- name = object->GetIsolate()->factory()->InternalizeString(
- Handle<String>::cast(name));
- }
+ DCHECK(name->IsUniqueName());
+ Isolate* isolate = object->GetIsolate();
+
+ uint32_t hash = name->Hash();
if (object->IsJSGlobalObject()) {
- Handle<GlobalDictionary> dictionary(object->global_dictionary());
+ Handle<JSGlobalObject> global_obj(JSGlobalObject::cast(*object));
+ Handle<GlobalDictionary> dictionary(global_obj->global_dictionary());
+ int entry = dictionary->FindEntry(isolate, name, hash);
- int entry = dictionary->FindEntry(name);
if (entry == GlobalDictionary::kNotFound) {
- Isolate* isolate = object->GetIsolate();
- auto cell = isolate->factory()->NewPropertyCell();
+ auto cell = isolate->factory()->NewPropertyCell(name);
cell->set_value(*value);
auto cell_type = value->IsUndefined(isolate)
? PropertyCellType::kUndefined
@@ -1942,7 +2021,7 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object,
details = details.set_cell_type(cell_type);
value = cell;
dictionary = GlobalDictionary::Add(dictionary, name, value, details);
- object->set_properties(*dictionary);
+ global_obj->set_global_dictionary(*dictionary);
} else {
Handle<PropertyCell> cell =
PropertyCell::PrepareForValue(dictionary, entry, value, details);
@@ -1954,13 +2033,13 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object,
int entry = dictionary->FindEntry(name);
if (entry == NameDictionary::kNotFound) {
dictionary = NameDictionary::Add(dictionary, name, value, details);
- object->set_properties(*dictionary);
+ object->SetProperties(*dictionary);
} else {
PropertyDetails original_details = dictionary->DetailsAt(entry);
int enumeration_index = original_details.dictionary_index();
DCHECK(enumeration_index > 0);
details = details.set_index(enumeration_index);
- dictionary->SetEntry(entry, name, value, details);
+ dictionary->SetEntry(entry, *name, *value, details);
}
}
}
@@ -2150,7 +2229,7 @@ Maybe<bool> JSReceiver::SetOrCopyDataProperties(
return Just(true);
}
-Map* Object::GetPrototypeChainRootMap(Isolate* isolate) {
+Map* Object::GetPrototypeChainRootMap(Isolate* isolate) const {
DisallowHeapAllocation no_alloc;
if (IsSmi()) {
Context* native_context = isolate->context()->native_context();
@@ -2159,14 +2238,14 @@ Map* Object::GetPrototypeChainRootMap(Isolate* isolate) {
// The object is either a number, a string, a symbol, a boolean, a real JS
// object, or a Harmony proxy.
- HeapObject* heap_object = HeapObject::cast(this);
+ const HeapObject* heap_object = HeapObject::cast(this);
return heap_object->map()->GetPrototypeChainRootMap(isolate);
}
-Map* Map::GetPrototypeChainRootMap(Isolate* isolate) {
+Map* Map::GetPrototypeChainRootMap(Isolate* isolate) const {
DisallowHeapAllocation no_alloc;
if (IsJSReceiverMap()) {
- return this;
+ return const_cast<Map*>(this);
}
int constructor_function_index = GetConstructorFunctionIndex();
if (constructor_function_index != Map::kNoConstructorFunctionIndex) {
@@ -2187,8 +2266,7 @@ Object* GetSimpleHash(Object* object) {
// The object is either a Smi, a HeapNumber, a name, an odd-ball, a real JS
// object, or a Harmony proxy.
if (object->IsSmi()) {
- uint32_t hash =
- ComputeIntegerHash(Smi::cast(object)->value(), kZeroHashSeed);
+ uint32_t hash = ComputeIntegerHash(Smi::ToInt(object));
return Smi::FromInt(hash & Smi::kMaxValue);
}
if (object->IsHeapNumber()) {
@@ -2394,7 +2472,7 @@ bool Object::IterationHasObservableEffects() {
// For FastHoley kinds, an element access on a hole would cause a lookup on
// the prototype. This could have different results if the prototype has been
// changed.
- if (IsFastHoleyElementsKind(array_kind) &&
+ if (IsHoleyElementsKind(array_kind) &&
isolate->IsFastArrayConstructorPrototypeChainIntact()) {
return false;
}
@@ -2781,19 +2859,16 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
if (!heap->Contains(JSFunction::cast(constructor)->shared())) {
accumulator->Add("!!!INVALID SHARED ON CONSTRUCTOR!!!");
} else {
- Object* constructor_name =
+ String* constructor_name =
JSFunction::cast(constructor)->shared()->name();
- if (constructor_name->IsString()) {
- String* str = String::cast(constructor_name);
- if (str->length() > 0) {
- accumulator->Add(global_object ? "<GlobalObject " : "<");
- accumulator->Put(str);
- accumulator->Add(
- " %smap = %p",
- map_of_this->is_deprecated() ? "deprecated-" : "",
- map_of_this);
- printed = true;
- }
+ if (constructor_name->length() > 0) {
+ accumulator->Add(global_object ? "<GlobalObject " : "<");
+ accumulator->Put(constructor_name);
+ accumulator->Add(
+ " %smap = %p",
+ map_of_this->is_deprecated() ? "deprecated-" : "",
+ map_of_this);
+ printed = true;
}
}
} else if (constructor->IsFunctionTemplateInfo()) {
@@ -2865,6 +2940,214 @@ void Map::PrintReconfiguration(FILE* file, int modify_index, PropertyKind kind,
os << "]\n";
}
+VisitorId Map::GetVisitorId(Map* map) {
+ STATIC_ASSERT(kVisitorIdCount <= 256);
+
+ const int instance_type = map->instance_type();
+ const bool has_unboxed_fields =
+ FLAG_unbox_double_fields && !map->HasFastPointerLayout();
+ if (instance_type < FIRST_NONSTRING_TYPE) {
+ switch (instance_type & kStringRepresentationMask) {
+ case kSeqStringTag:
+ if ((instance_type & kStringEncodingMask) == kOneByteStringTag) {
+ return kVisitSeqOneByteString;
+ } else {
+ return kVisitSeqTwoByteString;
+ }
+
+ case kConsStringTag:
+ if (IsShortcutCandidate(instance_type)) {
+ return kVisitShortcutCandidate;
+ } else {
+ return kVisitConsString;
+ }
+
+ case kSlicedStringTag:
+ return kVisitSlicedString;
+
+ case kExternalStringTag:
+ return kVisitDataObject;
+
+ case kThinStringTag:
+ return kVisitThinString;
+ }
+ UNREACHABLE();
+ }
+
+ switch (instance_type) {
+ case BYTE_ARRAY_TYPE:
+ return kVisitByteArray;
+
+ case BYTECODE_ARRAY_TYPE:
+ return kVisitBytecodeArray;
+
+ case FREE_SPACE_TYPE:
+ return kVisitFreeSpace;
+
+ case FIXED_ARRAY_TYPE:
+ return kVisitFixedArray;
+
+ case FIXED_DOUBLE_ARRAY_TYPE:
+ return kVisitFixedDoubleArray;
+
+ case PROPERTY_ARRAY_TYPE:
+ return kVisitPropertyArray;
+
+ case ODDBALL_TYPE:
+ return kVisitOddball;
+
+ case MAP_TYPE:
+ return kVisitMap;
+
+ case CODE_TYPE:
+ return kVisitCode;
+
+ case CELL_TYPE:
+ return kVisitCell;
+
+ case PROPERTY_CELL_TYPE:
+ return kVisitPropertyCell;
+
+ case WEAK_CELL_TYPE:
+ return kVisitWeakCell;
+
+ case TRANSITION_ARRAY_TYPE:
+ return kVisitTransitionArray;
+
+ case JS_WEAK_MAP_TYPE:
+ case JS_WEAK_SET_TYPE:
+ return kVisitJSWeakCollection;
+
+ case JS_REGEXP_TYPE:
+ return kVisitJSRegExp;
+
+ case SHARED_FUNCTION_INFO_TYPE:
+ return kVisitSharedFunctionInfo;
+
+ case JS_PROXY_TYPE:
+ return kVisitStruct;
+
+ case SYMBOL_TYPE:
+ return kVisitSymbol;
+
+ case JS_ARRAY_BUFFER_TYPE:
+ return kVisitJSArrayBuffer;
+
+ case SMALL_ORDERED_HASH_MAP_TYPE:
+ return kVisitSmallOrderedHashMap;
+
+ case SMALL_ORDERED_HASH_SET_TYPE:
+ return kVisitSmallOrderedHashSet;
+
+ case JS_OBJECT_TYPE:
+ case JS_ERROR_TYPE:
+ case JS_ARGUMENTS_TYPE:
+ case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
+ case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+ case JS_GENERATOR_OBJECT_TYPE:
+ case JS_ASYNC_GENERATOR_OBJECT_TYPE:
+ case JS_MODULE_NAMESPACE_TYPE:
+ case JS_VALUE_TYPE:
+ case JS_DATE_TYPE:
+ case JS_ARRAY_TYPE:
+ case JS_GLOBAL_PROXY_TYPE:
+ case JS_GLOBAL_OBJECT_TYPE:
+ case JS_MESSAGE_OBJECT_TYPE:
+ case JS_TYPED_ARRAY_TYPE:
+ case JS_DATA_VIEW_TYPE:
+ case JS_SET_TYPE:
+ case JS_MAP_TYPE:
+ case JS_SET_KEY_VALUE_ITERATOR_TYPE:
+ case JS_SET_VALUE_ITERATOR_TYPE:
+ case JS_MAP_KEY_ITERATOR_TYPE:
+ case JS_MAP_KEY_VALUE_ITERATOR_TYPE:
+ case JS_MAP_VALUE_ITERATOR_TYPE:
+ case JS_STRING_ITERATOR_TYPE:
+
+ case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
+ case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
+ case JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE:
+ case JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_INT8_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_INT16_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_INT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE:
+
+ case JS_PROMISE_CAPABILITY_TYPE:
+ case JS_PROMISE_TYPE:
+ case WASM_INSTANCE_TYPE:
+ case WASM_MEMORY_TYPE:
+ case WASM_MODULE_TYPE:
+ case WASM_TABLE_TYPE:
+ case JS_BOUND_FUNCTION_TYPE:
+ return has_unboxed_fields ? kVisitJSObject : kVisitJSObjectFast;
+ case JS_API_OBJECT_TYPE:
+ case JS_SPECIAL_API_OBJECT_TYPE:
+ return kVisitJSApiObject;
+
+ case JS_FUNCTION_TYPE:
+ return kVisitJSFunction;
+
+ case FILLER_TYPE:
+ case FOREIGN_TYPE:
+ case HEAP_NUMBER_TYPE:
+ case MUTABLE_HEAP_NUMBER_TYPE:
+ return kVisitDataObject;
+
+ case FIXED_UINT8_ARRAY_TYPE:
+ case FIXED_INT8_ARRAY_TYPE:
+ case FIXED_UINT16_ARRAY_TYPE:
+ case FIXED_INT16_ARRAY_TYPE:
+ case FIXED_UINT32_ARRAY_TYPE:
+ case FIXED_INT32_ARRAY_TYPE:
+ case FIXED_FLOAT32_ARRAY_TYPE:
+ case FIXED_UINT8_CLAMPED_ARRAY_TYPE:
+ return kVisitFixedTypedArrayBase;
+
+ case FIXED_FLOAT64_ARRAY_TYPE:
+ return kVisitFixedFloat64Array;
+
+#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE:
+ STRUCT_LIST(MAKE_STRUCT_CASE)
+#undef MAKE_STRUCT_CASE
+ if (instance_type == ALLOCATION_SITE_TYPE) {
+ return kVisitAllocationSite;
+ }
+
+ return kVisitStruct;
+
+ default:
+ UNREACHABLE();
+ }
+}
+
void Map::PrintGeneralization(
FILE* file, const char* reason, int modify_index, int split,
int descriptors, bool descriptor_to_field,
@@ -2943,6 +3226,20 @@ void JSObject::PrintInstanceMigration(FILE* file,
PrintF(file, "\n");
}
+bool JSObject::IsUnmodifiedApiObject(Object** o) {
+ Object* object = *o;
+ if (object->IsSmi()) return false;
+ HeapObject* heap_object = HeapObject::cast(object);
+ if (!object->IsJSObject()) return false;
+ JSObject* js_object = JSObject::cast(object);
+ if (!js_object->WasConstructedFromApiFunction()) return false;
+ Object* maybe_constructor = js_object->map()->GetConstructor();
+ if (!maybe_constructor->IsJSFunction()) return false;
+ JSFunction* constructor = JSFunction::cast(maybe_constructor);
+ if (js_object->elements()->length() != 0) return false;
+
+ return constructor->initial_map() == heap_object->map();
+}
void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
Heap* heap = GetHeap();
@@ -2994,6 +3291,9 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
os << "<TransitionArray[" << TransitionArray::cast(this)->length()
<< "]>";
break;
+ case PROPERTY_ARRAY_TYPE:
+ os << "<PropertyArray[" << PropertyArray::cast(this)->length() << "]>";
+ break;
case FREE_SPACE_TYPE:
os << "<FreeSpace[" << FreeSpace::cast(this)->size() << "]>";
break;
@@ -3081,10 +3381,12 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
break;
}
case PROPERTY_CELL_TYPE: {
- os << "<PropertyCell value=";
+ PropertyCell* cell = PropertyCell::cast(this);
+ os << "<PropertyCell name=";
+ cell->name()->ShortPrint(os);
+ os << " value=";
HeapStringAllocator allocator;
StringStream accumulator(&allocator);
- PropertyCell* cell = PropertyCell::cast(this);
cell->value()->ShortPrint(&accumulator);
os << accumulator.ToCString().get();
os << '>';
@@ -3145,6 +3447,8 @@ void HeapNumber::HeapNumberPrint(std::ostream& os) { // NOLINT
os << value();
}
+#define FIELD_ADDR(p, offset) \
+ (reinterpret_cast<byte*>(p) + offset - kHeapObjectTag)
#define FIELD_ADDR_CONST(p, offset) \
(reinterpret_cast<const byte*>(p) + offset - kHeapObjectTag)
@@ -3190,7 +3494,7 @@ Handle<String> JSReceiver::GetConstructorName(Handle<JSReceiver> receiver) {
Object* maybe_constructor = receiver->map()->GetConstructor();
if (maybe_constructor->IsJSFunction()) {
JSFunction* constructor = JSFunction::cast(maybe_constructor);
- String* name = String::cast(constructor->shared()->name());
+ String* name = constructor->shared()->name();
if (name->length() == 0) name = constructor->shared()->inferred_name();
if (name->length() != 0 &&
!name->Equals(isolate->heap()->Object_string())) {
@@ -3218,7 +3522,7 @@ Handle<String> JSReceiver::GetConstructorName(Handle<JSReceiver> receiver) {
Handle<String> result = isolate->factory()->Object_string();
if (maybe_constructor->IsJSFunction()) {
JSFunction* constructor = JSFunction::cast(*maybe_constructor);
- String* name = String::cast(constructor->shared()->name());
+ String* name = constructor->shared()->name();
if (name->length() == 0) name = constructor->shared()->inferred_name();
if (name->length() > 0) result = handle(name, isolate);
}
@@ -3252,11 +3556,13 @@ Handle<Context> JSReceiver::GetCreationContext() {
: Handle<Context>::null();
}
+// static
Handle<Object> Map::WrapFieldType(Handle<FieldType> type) {
if (type->IsClass()) return Map::WeakCellForMap(type->AsClass());
return type;
}
+// static
FieldType* Map::UnwrapFieldType(Object* wrapped_type) {
Object* value = wrapped_type;
if (value->IsWeakCell()) {
@@ -3351,11 +3657,10 @@ const char* Representation::Mnemonic() const {
case kExternal: return "x";
default:
UNREACHABLE();
- return NULL;
}
}
-bool Map::TransitionRemovesTaggedField(Map* target) {
+bool Map::TransitionRemovesTaggedField(Map* target) const {
int inobject = NumberOfFields();
int target_inobject = target->NumberOfFields();
for (int i = target_inobject; i < inobject; i++) {
@@ -3365,7 +3670,7 @@ bool Map::TransitionRemovesTaggedField(Map* target) {
return false;
}
-bool Map::TransitionChangesTaggedFieldToUntaggedField(Map* target) {
+bool Map::TransitionChangesTaggedFieldToUntaggedField(Map* target) const {
int inobject = NumberOfFields();
int target_inobject = target->NumberOfFields();
int limit = Min(inobject, target_inobject);
@@ -3378,12 +3683,12 @@ bool Map::TransitionChangesTaggedFieldToUntaggedField(Map* target) {
return false;
}
-bool Map::TransitionRequiresSynchronizationWithGC(Map* target) {
+bool Map::TransitionRequiresSynchronizationWithGC(Map* target) const {
return TransitionRemovesTaggedField(target) ||
TransitionChangesTaggedFieldToUntaggedField(target);
}
-bool Map::InstancesNeedRewriting(Map* target) {
+bool Map::InstancesNeedRewriting(Map* target) const {
int target_number_of_fields = target->NumberOfFields();
int target_inobject = target->GetInObjectProperties();
int target_unused = target->unused_property_fields();
@@ -3396,7 +3701,7 @@ bool Map::InstancesNeedRewriting(Map* target) {
bool Map::InstancesNeedRewriting(Map* target, int target_number_of_fields,
int target_inobject, int target_unused,
- int* old_number_of_fields) {
+ int* old_number_of_fields) const {
// If fields were added (or removed), rewrite the instance.
*old_number_of_fields = NumberOfFields();
DCHECK(target_number_of_fields >= *old_number_of_fields);
@@ -3485,9 +3790,10 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
PropertyDetails details = new_map->GetLastDescriptorDetails();
int target_index = details.field_index() - new_map->GetInObjectProperties();
+ int property_array_length = object->property_array()->length();
bool have_space = old_map->unused_property_fields() > 0 ||
(details.location() == kField && target_index >= 0 &&
- object->properties()->length() > target_index);
+ property_array_length > target_index);
// Either new_map adds an kDescriptor property, or a kField property for
// which there is still space, and which does not require a mutable double
// box (an out-of-object double).
@@ -3514,9 +3820,9 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
// This migration is a transition from a map that has run out of property
// space. Extend the backing store.
int grow_by = new_map->unused_property_fields() + 1;
- Handle<FixedArray> old_storage = handle(object->properties(), isolate);
- Handle<FixedArray> new_storage =
- isolate->factory()->CopyFixedArrayAndGrow(old_storage, grow_by);
+ Handle<PropertyArray> old_storage(object->property_array());
+ Handle<PropertyArray> new_storage =
+ isolate->factory()->CopyPropertyArrayAndGrow(old_storage, grow_by);
// Properly initialize newly added property.
Handle<Object> value;
@@ -3534,7 +3840,7 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
DisallowHeapAllocation no_allocation;
// Set the new property value and do the map transition.
- object->set_properties(*new_storage);
+ object->SetProperties(*new_storage);
object->synchronized_set_map(*new_map);
return;
}
@@ -3554,8 +3860,11 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
int total_size = number_of_fields + unused;
int external = total_size - inobject;
+ Handle<PropertyArray> array = isolate->factory()->NewPropertyArray(external);
- Handle<FixedArray> array = isolate->factory()->NewFixedArray(total_size);
+ // We use this array to temporarily store the inobject properties.
+ Handle<FixedArray> inobject_props =
+ isolate->factory()->NewFixedArray(inobject);
Handle<DescriptorArray> old_descriptors(old_map->instance_descriptors());
Handle<DescriptorArray> new_descriptors(new_map->instance_descriptors());
@@ -3610,9 +3919,12 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
}
}
DCHECK(!(representation.IsDouble() && value->IsSmi()));
- int target_index = new_descriptors->GetFieldIndex(i) - inobject;
- if (target_index < 0) target_index += total_size;
- array->set(target_index, *value);
+ int target_index = new_descriptors->GetFieldIndex(i);
+ if (target_index < inobject) {
+ inobject_props->set(target_index, *value);
+ } else {
+ array->set(target_index - inobject, *value);
+ }
}
for (int i = old_nof; i < new_nof; i++) {
@@ -3625,9 +3937,12 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
} else {
value = isolate->factory()->uninitialized_value();
}
- int target_index = new_descriptors->GetFieldIndex(i) - inobject;
- if (target_index < 0) target_index += total_size;
- array->set(target_index, *value);
+ int target_index = new_descriptors->GetFieldIndex(i);
+ if (target_index < inobject) {
+ inobject_props->set(target_index, *value);
+ } else {
+ array->set(target_index - inobject, *value);
+ }
}
// From here on we cannot fail and we shouldn't GC anymore.
@@ -3642,7 +3957,7 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
int limit = Min(inobject, number_of_fields);
for (int i = 0; i < limit; i++) {
FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i);
- Object* value = array->get(external + i);
+ Object* value = inobject_props->get(i);
// Can't use JSObject::FastPropertyAtPut() because proper map was not set
// yet.
if (new_map->IsUnboxedDoubleField(index)) {
@@ -3663,12 +3978,8 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
}
}
-
- // If there are properties in the new backing store, trim it to the correct
- // size and install the backing store into the object.
if (external > 0) {
- heap->RightTrimFixedArray(*array, inobject);
- object->set_properties(*array);
+ object->SetProperties(*array);
}
// Create filler object past the new instance size.
@@ -3740,7 +4051,7 @@ void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map,
value = handle(descs->GetValue(i), isolate);
}
DCHECK(!value.is_null());
- PropertyDetails d(details.kind(), details.attributes(), i + 1,
+ PropertyDetails d(details.kind(), details.attributes(),
PropertyCellType::kNoCell);
dictionary = NameDictionary::Add(dictionary, key, value, d);
}
@@ -3769,7 +4080,7 @@ void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map,
// the left-over space to avoid races with the sweeper thread.
object->synchronized_set_map(*new_map);
- object->set_properties(*dictionary);
+ object->SetProperties(*dictionary);
// Ensure that in-object space of slow-mode object does not contain random
// garbage.
@@ -3826,7 +4137,7 @@ void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map,
CHECK(new_map->is_dictionary_map());
// Slow-to-slow migration is trivial.
- object->set_map(*new_map);
+ object->synchronized_set_map(*new_map);
} else if (!new_map->is_dictionary_map()) {
MigrateFastToFast(object, new_map);
if (old_map->is_prototype_map()) {
@@ -3859,11 +4170,11 @@ void JSObject::ForceSetPrototype(Handle<JSObject> object,
// object.__proto__ = proto;
Handle<Map> old_map = Handle<Map>(object->map());
Handle<Map> new_map = Map::Copy(old_map, "ForceSetPrototype");
- Map::SetPrototype(new_map, proto, FAST_PROTOTYPE);
+ Map::SetPrototype(new_map, proto);
JSObject::MigrateToMap(object, new_map);
}
-int Map::NumberOfFields() {
+int Map::NumberOfFields() const {
DescriptorArray* descriptors = instance_descriptors();
int result = 0;
for (int i = 0; i < NumberOfOwnDescriptors(); i++) {
@@ -3971,7 +4282,10 @@ void Map::ReplaceDescriptors(DescriptorArray* new_descriptors,
}
DescriptorArray* to_replace = instance_descriptors();
- isolate->heap()->incremental_marking()->IterateBlackObject(to_replace);
+ // Replace descriptors by new_descriptors in all maps that share it. The old
+ // descriptors will not be trimmed in the mark-compactor, we need to mark
+ // all its elements.
+ isolate->heap()->incremental_marking()->RecordWrites(to_replace);
Map* current = this;
while (current->instance_descriptors() == to_replace) {
Object* next = current->GetBackPointer();
@@ -3983,9 +4297,8 @@ void Map::ReplaceDescriptors(DescriptorArray* new_descriptors,
set_owns_descriptors(false);
}
-
-Map* Map::FindRootMap() {
- Map* result = this;
+Map* Map::FindRootMap() const {
+ const Map* result = this;
Isolate* isolate = GetIsolate();
while (true) {
Object* back = result->GetBackPointer();
@@ -3995,26 +4308,25 @@ Map* Map::FindRootMap() {
DCHECK(result->owns_descriptors());
DCHECK_EQ(result->NumberOfOwnDescriptors(),
result->instance_descriptors()->number_of_descriptors());
- return result;
+ return const_cast<Map*>(result);
}
result = Map::cast(back);
}
}
-
-Map* Map::FindFieldOwner(int descriptor) {
+Map* Map::FindFieldOwner(int descriptor) const {
DisallowHeapAllocation no_allocation;
DCHECK_EQ(kField, instance_descriptors()->GetDetails(descriptor).location());
- Map* result = this;
+ const Map* result = this;
Isolate* isolate = GetIsolate();
while (true) {
Object* back = result->GetBackPointer();
if (back->IsUndefined(isolate)) break;
- Map* parent = Map::cast(back);
+ const Map* parent = Map::cast(back);
if (parent->NumberOfOwnDescriptors() <= descriptor) break;
result = parent;
}
- return result;
+ return const_cast<Map*>(result);
}
void Map::UpdateFieldType(int descriptor, Handle<Name> name,
@@ -4654,7 +4966,7 @@ Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
Maybe<bool> result = JSObject::AddDataElement(receiver, it->index(), value,
attributes, should_throw);
- JSObject::ValidateElements(receiver);
+ JSObject::ValidateElements(*receiver);
return result;
} else {
it->UpdateProtector();
@@ -4709,8 +5021,10 @@ void Map::EnsureDescriptorSlack(Handle<Map> map, int slack) {
}
Isolate* isolate = map->GetIsolate();
- // Replace descriptors by new_descriptors in all maps that share it.
- isolate->heap()->incremental_marking()->IterateBlackObject(*descriptors);
+ // Replace descriptors by new_descriptors in all maps that share it. The old
+ // descriptors will not be trimmed in the mark-compactor, we need to mark
+ // all its elements.
+ isolate->heap()->incremental_marking()->RecordWrites(*descriptors);
Map* current = *map;
while (current->instance_descriptors() == *descriptors) {
@@ -4734,7 +5048,7 @@ Handle<Map> Map::GetObjectCreateMap(Handle<HeapObject> prototype) {
if (prototype->IsJSObject()) {
Handle<JSObject> js_prototype = Handle<JSObject>::cast(prototype);
if (!js_prototype->map()->is_prototype_map()) {
- JSObject::OptimizeAsPrototype(js_prototype, FAST_PROTOTYPE);
+ JSObject::OptimizeAsPrototype(js_prototype);
}
Handle<PrototypeInfo> info =
Map::GetOrCreatePrototypeInfo(js_prototype, isolate);
@@ -4743,13 +5057,13 @@ Handle<Map> Map::GetObjectCreateMap(Handle<HeapObject> prototype) {
map = handle(info->ObjectCreateMap(), isolate);
} else {
map = Map::CopyInitialMap(map);
- Map::SetPrototype(map, prototype, FAST_PROTOTYPE);
+ Map::SetPrototype(map, prototype);
PrototypeInfo::SetObjectCreateMap(info, map);
}
return map;
}
- return Map::TransitionToPrototype(map, prototype, REGULAR_PROTOTYPE);
+ return Map::TransitionToPrototype(map, prototype);
}
template <class T>
@@ -4917,8 +5231,7 @@ Map* Map::LookupElementsTransitionMap(ElementsKind to_kind) {
return nullptr;
}
-
-bool Map::IsMapInArrayPrototypeChain() {
+bool Map::IsMapInArrayPrototypeChain() const {
Isolate* isolate = GetIsolate();
if (isolate->initial_array_prototype()->map() == this) {
return true;
@@ -5005,7 +5318,7 @@ Handle<Map> Map::TransitionElementsTo(Handle<Map> map,
DCHECK(!map->IsUndefined(isolate));
// Check if we can go back in the elements kind transition chain.
- if (IsHoleyElementsKind(from_kind) &&
+ if (IsHoleyOrDictionaryElementsKind(from_kind) &&
to_kind == GetPackedElementsKind(from_kind) &&
map->GetBackPointer()->IsMap() &&
Map::cast(map->GetBackPointer())->elements_kind() == to_kind) {
@@ -5053,6 +5366,27 @@ void JSProxy::Revoke(Handle<JSProxy> proxy) {
DCHECK(proxy->IsRevoked());
}
+// static
+Maybe<bool> JSProxy::IsArray(Handle<JSProxy> proxy) {
+ Isolate* isolate = proxy->GetIsolate();
+ Handle<JSReceiver> object = Handle<JSReceiver>::cast(proxy);
+ for (int i = 0; i < JSProxy::kMaxIterationLimit; i++) {
+ Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
+ if (proxy->IsRevoked()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyRevoked,
+ isolate->factory()->NewStringFromAsciiChecked("IsArray")));
+ return Nothing<bool>();
+ }
+ object = handle(proxy->target(), isolate);
+ if (object->IsJSArray()) return Just(true);
+ if (!object->IsJSProxy()) return Just(false);
+ }
+
+ // Too deep recursion, throw a RangeError.
+ isolate->StackOverflow();
+ return Nothing<bool>();
+}
Maybe<bool> JSProxy::HasProperty(Isolate* isolate, Handle<JSProxy> proxy,
Handle<Name> name) {
@@ -5690,7 +6024,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
// Compute the length of the instance descriptor.
for (int i = 0; i < instance_descriptor_length; i++) {
- int index = Smi::cast(iteration_order->get(i))->value();
+ int index = Smi::ToInt(iteration_order->get(i));
DCHECK(dictionary->IsKey(isolate, dictionary->KeyAt(index)));
PropertyKind kind = dictionary->DetailsAt(index).kind();
@@ -5730,7 +6064,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
// Transform the object.
new_map->set_unused_property_fields(inobject_props);
object->synchronized_set_map(*new_map);
- object->set_properties(isolate->heap()->empty_fixed_array());
+ object->SetProperties(isolate->heap()->empty_fixed_array());
// Check that it really works.
DCHECK(object->HasFastProperties());
return;
@@ -5748,20 +6082,19 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
unused_property_fields = inobject_props - number_of_fields;
}
- // Allocate the fixed array for the fields.
- Handle<FixedArray> fields = factory->NewFixedArray(
- number_of_allocated_fields);
+ // Allocate the property array for the fields.
+ Handle<PropertyArray> fields =
+ factory->NewPropertyArray(number_of_allocated_fields);
// Fill in the instance descriptor and the fields.
int current_offset = 0;
for (int i = 0; i < instance_descriptor_length; i++) {
- int index = Smi::cast(iteration_order->get(i))->value();
- Object* k = dictionary->KeyAt(index);
- DCHECK(dictionary->IsKey(isolate, k));
+ int index = Smi::ToInt(iteration_order->get(i));
+ Name* k = dictionary->NameAt(index);
// Dictionary keys are internalized upon insertion.
// TODO(jkummerow): Turn this into a DCHECK if it's not hit in the wild.
CHECK(k->IsUniqueName());
- Handle<Name> key(Name::cast(k), isolate);
+ Handle<Name> key(k, isolate);
Object* value = dictionary->ValueAt(index);
@@ -5812,27 +6145,13 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
// Transform the object.
object->synchronized_set_map(*new_map);
- object->set_properties(*fields);
+ object->SetProperties(*fields);
DCHECK(object->IsJSObject());
// Check that it really works.
DCHECK(object->HasFastProperties());
}
-
-void JSObject::ResetElements(Handle<JSObject> object) {
- Isolate* isolate = object->GetIsolate();
- CHECK(object->map() != isolate->heap()->sloppy_arguments_elements_map());
- if (object->map()->has_dictionary_elements()) {
- Handle<SeededNumberDictionary> new_elements =
- SeededNumberDictionary::New(isolate, 0);
- object->set_elements(*new_elements);
- } else {
- object->set_elements(object->map()->GetInitialElements());
- }
-}
-
-
void JSObject::RequireSlowElements(SeededNumberDictionary* dictionary) {
if (dictionary->requires_slow_elements()) return;
dictionary->set_requires_slow_elements();
@@ -5862,8 +6181,7 @@ Handle<SeededNumberDictionary> JSObject::NormalizeElements(
}
}
- DCHECK(object->HasFastSmiOrObjectElements() ||
- object->HasFastDoubleElements() ||
+ DCHECK(object->HasSmiOrObjectElements() || object->HasDoubleElements() ||
object->HasFastArgumentsElements() ||
object->HasFastStringWrapperElements());
@@ -6003,16 +6321,15 @@ Maybe<bool> JSObject::DeletePropertyWithInterceptor(LookupIterator* it,
return Just(result->IsTrue(isolate));
}
-
void JSReceiver::DeleteNormalizedProperty(Handle<JSReceiver> object,
- Handle<Name> name, int entry) {
+ int entry) {
DCHECK(!object->HasFastProperties());
Isolate* isolate = object->GetIsolate();
if (object->IsJSGlobalObject()) {
// If we have a global object, invalidate the cell and swap in a new one.
Handle<GlobalDictionary> dictionary(
- JSObject::cast(*object)->global_dictionary());
+ JSGlobalObject::cast(*object)->global_dictionary());
DCHECK_NE(GlobalDictionary::kNotFound, entry);
auto cell = PropertyCell::InvalidateEntry(dictionary, entry);
@@ -6023,10 +6340,8 @@ void JSReceiver::DeleteNormalizedProperty(Handle<JSReceiver> object,
Handle<NameDictionary> dictionary(object->property_dictionary());
DCHECK_NE(NameDictionary::kNotFound, entry);
- NameDictionary::DeleteProperty(dictionary, entry);
- Handle<NameDictionary> new_properties =
- NameDictionary::Shrink(dictionary, name);
- object->set_properties(*new_properties);
+ dictionary = NameDictionary::DeleteEntry(dictionary, entry);
+ object->SetProperties(*dictionary);
}
}
@@ -6321,8 +6636,8 @@ Maybe<bool> JSReceiver::OrdinaryDefineOwnProperty(LookupIterator* it,
Handle<JSObject> object = Handle<JSObject>::cast(it->GetReceiver());
bool extensible = JSObject::IsExtensible(object);
- return ValidateAndApplyPropertyDescriptor(isolate, it, extensible, desc,
- &current, should_throw);
+ return ValidateAndApplyPropertyDescriptor(
+ isolate, it, extensible, desc, &current, should_throw, Handle<Name>());
}
@@ -6994,10 +7309,10 @@ Maybe<bool> JSProxy::SetPrivateProperty(Isolate* isolate, Handle<JSProxy> proxy,
}
Handle<NameDictionary> dict(proxy->property_dictionary());
- PropertyDetails details(kData, DONT_ENUM, 0, PropertyCellType::kNoCell);
+ PropertyDetails details(kData, DONT_ENUM, PropertyCellType::kNoCell);
Handle<NameDictionary> result =
NameDictionary::Add(dict, private_name, value, details);
- if (!dict.is_identical_to(result)) proxy->set_properties(*result);
+ if (!dict.is_identical_to(result)) proxy->SetProperties(*result);
return Just(true);
}
@@ -7260,10 +7575,9 @@ bool JSObject::ReferencesObjectFromElements(FixedArray* elements,
ElementsKind kind,
Object* object) {
Isolate* isolate = elements->GetIsolate();
- if (IsFastObjectElementsKind(kind) || kind == FAST_STRING_WRAPPER_ELEMENTS) {
- int length = IsJSArray()
- ? Smi::cast(JSArray::cast(this)->length())->value()
- : elements->length();
+ if (IsObjectElementsKind(kind) || kind == FAST_STRING_WRAPPER_ELEMENTS) {
+ int length = IsJSArray() ? Smi::ToInt(JSArray::cast(this)->length())
+ : elements->length();
for (int i = 0; i < length; ++i) {
Object* element = elements->get(i);
if (!element->IsTheHole(isolate) && element == object) return true;
@@ -7312,14 +7626,14 @@ bool JSObject::ReferencesObject(Object* obj) {
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case PACKED_DOUBLE_ELEMENTS:
+ case HOLEY_DOUBLE_ELEMENTS:
break;
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
+ case PACKED_SMI_ELEMENTS:
+ case HOLEY_SMI_ELEMENTS:
break;
- case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
+ case PACKED_ELEMENTS:
+ case HOLEY_ELEMENTS:
case DICTIONARY_ELEMENTS:
case FAST_STRING_WRAPPER_ELEMENTS:
case SLOW_STRING_WRAPPER_ELEMENTS: {
@@ -7338,8 +7652,7 @@ bool JSObject::ReferencesObject(Object* obj) {
}
// Check the arguments.
FixedArray* arguments = elements->arguments();
- kind = arguments->IsDictionary() ? DICTIONARY_ELEMENTS :
- FAST_HOLEY_ELEMENTS;
+ kind = arguments->IsDictionary() ? DICTIONARY_ELEMENTS : HOLEY_ELEMENTS;
if (ReferencesObjectFromElements(arguments, kind, obj)) return true;
break;
}
@@ -7379,7 +7692,8 @@ bool JSObject::ReferencesObject(Object* obj) {
}
// Check the context extension (if any) if it can have references.
- if (context->has_extension() && !context->IsCatchContext()) {
+ if (context->has_extension() && !context->IsCatchContext() &&
+ !context->IsModuleContext()) {
// With harmony scoping, a JSFunction may have a script context.
// TODO(mvstanton): walk into the ScopeInfo.
if (context->IsScriptContext()) {
@@ -7402,7 +7716,13 @@ Maybe<bool> JSReceiver::SetIntegrityLevel(Handle<JSReceiver> receiver,
if (receiver->IsJSObject()) {
Handle<JSObject> object = Handle<JSObject>::cast(receiver);
+
if (!object->HasSloppyArgumentsElements()) { // Fast path.
+ // prevent memory leaks by not adding unnecessary transitions
+ Maybe<bool> test = JSObject::TestIntegrityLevel(object, level);
+ MAYBE_RETURN(test, Nothing<bool>());
+ if (test.FromJust()) return test;
+
if (level == SEALED) {
return JSObject::PreventExtensionsWithTransition<SEALED>(object,
should_throw);
@@ -7458,25 +7778,100 @@ Maybe<bool> JSReceiver::SetIntegrityLevel(Handle<JSReceiver> receiver,
return Just(true);
}
+namespace {
-Maybe<bool> JSReceiver::TestIntegrityLevel(Handle<JSReceiver> object,
- IntegrityLevel level) {
+template <typename Dictionary>
+bool TestDictionaryPropertiesIntegrityLevel(Dictionary* dict, Isolate* isolate,
+ PropertyAttributes level) {
DCHECK(level == SEALED || level == FROZEN);
- Isolate* isolate = object->GetIsolate();
- Maybe<bool> extensible = JSReceiver::IsExtensible(object);
+ uint32_t capacity = dict->Capacity();
+ for (uint32_t i = 0; i < capacity; i++) {
+ Object* key;
+ if (!dict->ToKey(isolate, i, &key)) continue;
+ if (key->FilterKey(ALL_PROPERTIES)) continue;
+ PropertyDetails details = dict->DetailsAt(i);
+ if (details.IsConfigurable()) return false;
+ if (level == FROZEN && details.kind() == kData && !details.IsReadOnly()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool TestFastPropertiesIntegrityLevel(Map* map, PropertyAttributes level) {
+ DCHECK(level == SEALED || level == FROZEN);
+ DCHECK_LT(LAST_CUSTOM_ELEMENTS_RECEIVER, map->instance_type());
+ DCHECK(!map->is_dictionary_map());
+
+ DescriptorArray* descriptors = map->instance_descriptors();
+ int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ for (int i = 0; i < number_of_own_descriptors; i++) {
+ if (descriptors->GetKey(i)->IsPrivate()) continue;
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (details.IsConfigurable()) return false;
+ if (level == FROZEN && details.kind() == kData && !details.IsReadOnly()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool TestPropertiesIntegrityLevel(JSObject* object, PropertyAttributes level) {
+ DCHECK_LT(LAST_CUSTOM_ELEMENTS_RECEIVER, object->map()->instance_type());
+
+ if (object->HasFastProperties()) {
+ return TestFastPropertiesIntegrityLevel(object->map(), level);
+ }
+
+ return TestDictionaryPropertiesIntegrityLevel(object->property_dictionary(),
+ object->GetIsolate(), level);
+}
+
+bool TestElementsIntegrityLevel(JSObject* object, PropertyAttributes level) {
+ DCHECK(!object->HasSloppyArgumentsElements());
+
+ ElementsKind kind = object->GetElementsKind();
+
+ if (IsDictionaryElementsKind(kind)) {
+ return TestDictionaryPropertiesIntegrityLevel(
+ SeededNumberDictionary::cast(object->elements()), object->GetIsolate(),
+ level);
+ }
+
+ ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
+ // Only DICTIONARY_ELEMENTS and SLOW_SLOPPY_ARGUMENTS_ELEMENTS have
+ // PropertyAttributes so just test if empty
+ return accessor->NumberOfElements(object) == 0;
+}
+
+bool FastTestIntegrityLevel(JSObject* object, PropertyAttributes level) {
+ DCHECK_LT(LAST_CUSTOM_ELEMENTS_RECEIVER, object->map()->instance_type());
+
+ return !object->map()->is_extensible() &&
+ TestElementsIntegrityLevel(object, level) &&
+ TestPropertiesIntegrityLevel(object, level);
+}
+
+Maybe<bool> GenericTestIntegrityLevel(Handle<JSReceiver> receiver,
+ PropertyAttributes level) {
+ DCHECK(level == SEALED || level == FROZEN);
+
+ Maybe<bool> extensible = JSReceiver::IsExtensible(receiver);
MAYBE_RETURN(extensible, Nothing<bool>());
if (extensible.FromJust()) return Just(false);
+ Isolate* isolate = receiver->GetIsolate();
+
Handle<FixedArray> keys;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, keys, JSReceiver::OwnPropertyKeys(object), Nothing<bool>());
+ isolate, keys, JSReceiver::OwnPropertyKeys(receiver), Nothing<bool>());
for (int i = 0; i < keys->length(); ++i) {
Handle<Object> key(keys->get(i), isolate);
PropertyDescriptor current_desc;
Maybe<bool> owned = JSReceiver::GetOwnPropertyDescriptor(
- isolate, object, key, &current_desc);
+ isolate, receiver, key, &current_desc);
MAYBE_RETURN(owned, Nothing<bool>());
if (owned.FromJust()) {
if (current_desc.configurable()) return Just(false);
@@ -7490,6 +7885,25 @@ Maybe<bool> JSReceiver::TestIntegrityLevel(Handle<JSReceiver> object,
return Just(true);
}
+} // namespace
+
+Maybe<bool> JSReceiver::TestIntegrityLevel(Handle<JSReceiver> receiver,
+ IntegrityLevel level) {
+ if (receiver->map()->instance_type() > LAST_CUSTOM_ELEMENTS_RECEIVER) {
+ return JSObject::TestIntegrityLevel(Handle<JSObject>::cast(receiver),
+ level);
+ }
+ return GenericTestIntegrityLevel(receiver, level);
+}
+
+Maybe<bool> JSObject::TestIntegrityLevel(Handle<JSObject> object,
+ IntegrityLevel level) {
+ if (object->map()->instance_type() > LAST_CUSTOM_ELEMENTS_RECEIVER &&
+ !object->HasSloppyArgumentsElements()) {
+ return Just(FastTestIntegrityLevel(*object, level));
+ }
+ return GenericTestIntegrityLevel(Handle<JSReceiver>::cast(object), level);
+}
Maybe<bool> JSReceiver::PreventExtensions(Handle<JSReceiver> object,
ShouldThrow should_throw) {
@@ -7671,44 +8085,23 @@ bool JSObject::IsExtensible(Handle<JSObject> object) {
namespace {
template <typename Dictionary>
-void DictionaryDetailsAtPut(Isolate* isolate, Handle<Dictionary> dictionary,
- int entry, PropertyDetails details) {
- dictionary->DetailsAtPut(entry, details);
-}
-
-template <>
-void DictionaryDetailsAtPut<GlobalDictionary>(
- Isolate* isolate, Handle<GlobalDictionary> dictionary, int entry,
- PropertyDetails details) {
- Object* value = dictionary->ValueAt(entry);
- DCHECK(value->IsPropertyCell());
- value = PropertyCell::cast(value)->value();
- if (value->IsTheHole(isolate)) return;
- PropertyCell::PrepareForValue(dictionary, entry, handle(value, isolate),
- details);
-}
-
-template <typename Dictionary>
void ApplyAttributesToDictionary(Isolate* isolate,
Handle<Dictionary> dictionary,
const PropertyAttributes attributes) {
int capacity = dictionary->Capacity();
for (int i = 0; i < capacity; i++) {
- Object* k = dictionary->KeyAt(i);
- if (dictionary->IsKey(isolate, k) &&
- !(k->IsSymbol() && Symbol::cast(k)->is_private())) {
- PropertyDetails details = dictionary->DetailsAt(i);
- int attrs = attributes;
- // READ_ONLY is an invalid attribute for JS setters/getters.
- if ((attributes & READ_ONLY) && details.kind() == kAccessor) {
- Object* v = dictionary->ValueAt(i);
- if (v->IsPropertyCell()) v = PropertyCell::cast(v)->value();
- if (v->IsAccessorPair()) attrs &= ~READ_ONLY;
- }
- details = details.CopyAddAttributes(
- static_cast<PropertyAttributes>(attrs));
- DictionaryDetailsAtPut<Dictionary>(isolate, dictionary, i, details);
+ Object* k;
+ if (!dictionary->ToKey(isolate, i, &k)) continue;
+ if (k->FilterKey(ALL_PROPERTIES)) continue;
+ PropertyDetails details = dictionary->DetailsAt(i);
+ int attrs = attributes;
+ // READ_ONLY is an invalid attribute for JS setters/getters.
+ if ((attributes & READ_ONLY) && details.kind() == kAccessor) {
+ Object* v = dictionary->ValueAt(i);
+ if (v->IsAccessorPair()) attrs &= ~READ_ONLY;
}
+ details = details.CopyAddAttributes(static_cast<PropertyAttributes>(attrs));
+ dictionary->DetailsAtPut(i, details);
}
}
@@ -7764,10 +8157,9 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
if (!object->HasFixedTypedArrayElements() &&
!object->HasDictionaryElements() &&
!object->HasSlowStringWrapperElements()) {
- int length =
- object->IsJSArray()
- ? Smi::cast(Handle<JSArray>::cast(object)->length())->value()
- : object->elements()->length();
+ int length = object->IsJSArray()
+ ? Smi::ToInt(Handle<JSArray>::cast(object)->length())
+ : object->elements()->length();
new_element_dictionary =
length == 0 ? isolate->factory()->empty_slow_element_dictionary()
: object->GetElementsAccessor()->Normalize(object);
@@ -7820,8 +8212,8 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
if (attrs != NONE) {
if (object->IsJSGlobalObject()) {
- Handle<GlobalDictionary> dictionary(object->global_dictionary(),
- isolate);
+ Handle<GlobalDictionary> dictionary(
+ JSGlobalObject::cast(*object)->global_dictionary(), isolate);
ApplyAttributesToDictionary(isolate, dictionary, attrs);
} else {
Handle<NameDictionary> dictionary(object->property_dictionary(),
@@ -7875,251 +8267,6 @@ Handle<Object> JSObject::FastPropertyAt(Handle<JSObject> object,
return Object::WrapForRead(isolate, raw_value, representation);
}
-template <class ContextObject>
-class JSObjectWalkVisitor {
- public:
- JSObjectWalkVisitor(ContextObject* site_context, bool copying,
- JSObject::DeepCopyHints hints)
- : site_context_(site_context),
- copying_(copying),
- hints_(hints) {}
-
- MUST_USE_RESULT MaybeHandle<JSObject> StructureWalk(Handle<JSObject> object);
-
- protected:
- MUST_USE_RESULT inline MaybeHandle<JSObject> VisitElementOrProperty(
- Handle<JSObject> object,
- Handle<JSObject> value) {
- Handle<AllocationSite> current_site = site_context()->EnterNewScope();
- MaybeHandle<JSObject> copy_of_value = StructureWalk(value);
- site_context()->ExitScope(current_site, value);
- return copy_of_value;
- }
-
- inline ContextObject* site_context() { return site_context_; }
- inline Isolate* isolate() { return site_context()->isolate(); }
-
- inline bool copying() const { return copying_; }
-
- private:
- ContextObject* site_context_;
- const bool copying_;
- const JSObject::DeepCopyHints hints_;
-};
-
-template <class ContextObject>
-MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
- Handle<JSObject> object) {
- Isolate* isolate = this->isolate();
- bool copying = this->copying();
- bool shallow = hints_ == JSObject::kObjectIsShallow;
-
- if (!shallow) {
- StackLimitCheck check(isolate);
-
- if (check.HasOverflowed()) {
- isolate->StackOverflow();
- return MaybeHandle<JSObject>();
- }
- }
-
- if (object->map()->is_deprecated()) {
- JSObject::MigrateInstance(object);
- }
-
- Handle<JSObject> copy;
- if (copying) {
- // JSFunction objects are not allowed to be in normal boilerplates at all.
- DCHECK(!object->IsJSFunction());
- Handle<AllocationSite> site_to_pass;
- if (site_context()->ShouldCreateMemento(object)) {
- site_to_pass = site_context()->current();
- }
- copy = isolate->factory()->CopyJSObjectWithAllocationSite(
- object, site_to_pass);
- } else {
- copy = object;
- }
-
- DCHECK(copying || copy.is_identical_to(object));
-
- ElementsKind kind = copy->GetElementsKind();
- if (copying && IsFastSmiOrObjectElementsKind(kind) &&
- FixedArray::cast(copy->elements())->map() ==
- isolate->heap()->fixed_cow_array_map()) {
- isolate->counters()->cow_arrays_created_runtime()->Increment();
- }
-
- if (!shallow) {
- HandleScope scope(isolate);
-
- // Deep copy own properties.
- if (copy->HasFastProperties()) {
- Handle<DescriptorArray> descriptors(copy->map()->instance_descriptors());
- int limit = copy->map()->NumberOfOwnDescriptors();
- for (int i = 0; i < limit; i++) {
- PropertyDetails details = descriptors->GetDetails(i);
- if (details.location() != kField) continue;
- DCHECK_EQ(kData, details.kind());
- FieldIndex index = FieldIndex::ForDescriptor(copy->map(), i);
- if (object->IsUnboxedDoubleField(index)) {
- if (copying) {
- // Ensure that all bits of the double value are preserved.
- uint64_t value = object->RawFastDoublePropertyAsBitsAt(index);
- copy->RawFastDoublePropertyAsBitsAtPut(index, value);
- }
- } else {
- Handle<Object> value(object->RawFastPropertyAt(index), isolate);
- if (value->IsJSObject()) {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, value,
- VisitElementOrProperty(copy, Handle<JSObject>::cast(value)),
- JSObject);
- if (copying) {
- copy->FastPropertyAtPut(index, *value);
- }
- } else {
- if (copying) {
- Representation representation = details.representation();
- value = Object::NewStorageFor(isolate, value, representation);
- copy->FastPropertyAtPut(index, *value);
- }
- }
- }
- }
- } else {
- // Only deep copy fields from the object literal expression.
- // In particular, don't try to copy the length attribute of
- // an array.
- PropertyFilter filter = static_cast<PropertyFilter>(
- ONLY_WRITABLE | ONLY_ENUMERABLE | ONLY_CONFIGURABLE);
- KeyAccumulator accumulator(isolate, KeyCollectionMode::kOwnOnly, filter);
- accumulator.CollectOwnPropertyNames(copy, copy);
- Handle<FixedArray> names = accumulator.GetKeys();
- for (int i = 0; i < names->length(); i++) {
- DCHECK(names->get(i)->IsName());
- Handle<Name> name(Name::cast(names->get(i)));
- Handle<Object> value =
- JSObject::GetProperty(copy, name).ToHandleChecked();
- if (value->IsJSObject()) {
- Handle<JSObject> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- VisitElementOrProperty(copy, Handle<JSObject>::cast(value)),
- JSObject);
- if (copying) {
- // Creating object copy for literals. No strict mode needed.
- JSObject::SetProperty(copy, name, result, SLOPPY).Assert();
- }
- }
- }
- }
-
- // Deep copy own elements.
- switch (kind) {
- case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
- Handle<FixedArray> elements(FixedArray::cast(copy->elements()));
- if (elements->map() == isolate->heap()->fixed_cow_array_map()) {
-#ifdef DEBUG
- for (int i = 0; i < elements->length(); i++) {
- DCHECK(!elements->get(i)->IsJSObject());
- }
-#endif
- } else {
- for (int i = 0; i < elements->length(); i++) {
- Handle<Object> value(elements->get(i), isolate);
- if (value->IsJSObject()) {
- Handle<JSObject> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- VisitElementOrProperty(copy, Handle<JSObject>::cast(value)),
- JSObject);
- if (copying) {
- elements->set(i, *result);
- }
- }
- }
- }
- break;
- }
- case DICTIONARY_ELEMENTS: {
- Handle<SeededNumberDictionary> element_dictionary(
- copy->element_dictionary());
- int capacity = element_dictionary->Capacity();
- for (int i = 0; i < capacity; i++) {
- Object* k = element_dictionary->KeyAt(i);
- if (element_dictionary->IsKey(isolate, k)) {
- Handle<Object> value(element_dictionary->ValueAt(i), isolate);
- if (value->IsJSObject()) {
- Handle<JSObject> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- VisitElementOrProperty(copy, Handle<JSObject>::cast(value)),
- JSObject);
- if (copying) {
- element_dictionary->ValueAtPut(i, *result);
- }
- }
- }
- }
- break;
- }
- case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
- UNIMPLEMENTED();
- break;
- case FAST_STRING_WRAPPER_ELEMENTS:
- case SLOW_STRING_WRAPPER_ELEMENTS:
- UNREACHABLE();
- break;
-
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case TYPE##_ELEMENTS: \
-
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- // Typed elements cannot be created using an object literal.
- UNREACHABLE();
- break;
-
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case NO_ELEMENTS:
- // No contained objects, nothing to do.
- break;
- }
- }
-
- return copy;
-}
-
-
-MaybeHandle<JSObject> JSObject::DeepWalk(
- Handle<JSObject> object,
- AllocationSiteCreationContext* site_context) {
- JSObjectWalkVisitor<AllocationSiteCreationContext> v(site_context, false,
- kNoHints);
- MaybeHandle<JSObject> result = v.StructureWalk(object);
- Handle<JSObject> for_assert;
- DCHECK(!result.ToHandle(&for_assert) || for_assert.is_identical_to(object));
- return result;
-}
-
-
-MaybeHandle<JSObject> JSObject::DeepCopy(
- Handle<JSObject> object,
- AllocationSiteUsageContext* site_context,
- DeepCopyHints hints) {
- JSObjectWalkVisitor<AllocationSiteUsageContext> v(site_context, true, hints);
- MaybeHandle<JSObject> copy = v.StructureWalk(object);
- Handle<JSObject> for_assert;
- DCHECK(!copy.ToHandle(&for_assert) || !for_assert.is_identical_to(object));
- return copy;
-}
-
// static
MaybeHandle<Object> JSReceiver::ToPrimitive(Handle<JSReceiver> receiver,
ToPrimitiveHint hint) {
@@ -8185,19 +8332,19 @@ bool JSObject::HasEnumerableElements() {
// TODO(cbruni): cleanup
JSObject* object = this;
switch (object->GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS: {
+ case PACKED_SMI_ELEMENTS:
+ case PACKED_ELEMENTS:
+ case PACKED_DOUBLE_ELEMENTS: {
int length = object->IsJSArray()
- ? Smi::cast(JSArray::cast(object)->length())->value()
+ ? Smi::ToInt(JSArray::cast(object)->length())
: object->elements()->length();
return length > 0;
}
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
+ case HOLEY_SMI_ELEMENTS:
+ case HOLEY_ELEMENTS: {
FixedArray* elements = FixedArray::cast(object->elements());
int length = object->IsJSArray()
- ? Smi::cast(JSArray::cast(object)->length())->value()
+ ? Smi::ToInt(JSArray::cast(object)->length())
: elements->length();
Isolate* isolate = GetIsolate();
for (int i = 0; i < length; i++) {
@@ -8205,9 +8352,9 @@ bool JSObject::HasEnumerableElements() {
}
return false;
}
- case FAST_HOLEY_DOUBLE_ELEMENTS: {
+ case HOLEY_DOUBLE_ELEMENTS: {
int length = object->IsJSArray()
- ? Smi::cast(JSArray::cast(object)->length())->value()
+ ? Smi::ToInt(JSArray::cast(object)->length())
: object->elements()->length();
// Zero-length arrays would use the empty FixedArray...
if (length == 0) return false;
@@ -8230,7 +8377,7 @@ bool JSObject::HasEnumerableElements() {
case DICTIONARY_ELEMENTS: {
SeededNumberDictionary* elements =
SeededNumberDictionary::cast(object->elements());
- return elements->NumberOfElementsFilterAttributes(ONLY_ENUMERABLE) > 0;
+ return elements->NumberOfEnumerableProperties() > 0;
}
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
@@ -8246,28 +8393,22 @@ bool JSObject::HasEnumerableElements() {
return false;
}
UNREACHABLE();
- return true;
}
-
-int Map::NumberOfDescribedProperties(DescriptorFlag which,
- PropertyFilter filter) {
+int Map::NumberOfEnumerableProperties() const {
int result = 0;
DescriptorArray* descs = instance_descriptors();
- int limit = which == ALL_DESCRIPTORS
- ? descs->number_of_descriptors()
- : NumberOfOwnDescriptors();
+ int limit = NumberOfOwnDescriptors();
for (int i = 0; i < limit; i++) {
- if ((descs->GetDetails(i).attributes() & filter) == 0 &&
- !descs->GetKey(i)->FilterKey(filter)) {
+ if ((descs->GetDetails(i).attributes() & ONLY_ENUMERABLE) == 0 &&
+ !descs->GetKey(i)->FilterKey(ENUMERABLE_STRINGS)) {
result++;
}
}
return result;
}
-
-int Map::NextFreePropertyIndex() {
+int Map::NextFreePropertyIndex() const {
int free_index = 0;
int number_of_own_descriptors = NumberOfOwnDescriptors();
DescriptorArray* descs = instance_descriptors();
@@ -8281,8 +8422,7 @@ int Map::NextFreePropertyIndex() {
return free_index;
}
-
-bool Map::OnlyHasSimpleProperties() {
+bool Map::OnlyHasSimpleProperties() const {
// Wrapped string elements aren't explicitly stored in the elements backing
// store, but are loaded indirectly from the underlying string.
return !IsStringWrapperElementsKind(elements_kind()) &&
@@ -8413,7 +8553,7 @@ MaybeHandle<FixedArray> GetOwnValuesOrEntries(Isolate* isolate,
entry_storage->set(0, *key);
entry_storage->set(1, *value);
value = isolate->factory()->NewJSArrayWithElements(entry_storage,
- FAST_ELEMENTS, 2);
+ PACKED_ELEMENTS, 2);
}
values_or_entries->set(length, *value);
@@ -8588,7 +8728,8 @@ Object* JSObject::SlowReverseLookup(Object* value) {
}
return GetHeap()->undefined_value();
} else if (IsJSGlobalObject()) {
- return global_dictionary()->SlowReverseLookup(value);
+ return JSGlobalObject::cast(this)->global_dictionary()->SlowReverseLookup(
+ value);
} else {
return property_dictionary()->SlowReverseLookup(value);
}
@@ -8732,8 +8873,15 @@ void EnsureInitialMap(Handle<Map> map) {
DCHECK(constructor->IsJSFunction());
DCHECK(*map == JSFunction::cast(constructor)->initial_map() ||
*map == *isolate->strict_function_map() ||
+ *map == *isolate->strict_function_with_name_map() ||
*map == *isolate->generator_function_map() ||
- *map == *isolate->async_function_map());
+ *map == *isolate->generator_function_with_name_map() ||
+ *map == *isolate->generator_function_with_home_object_map() ||
+ *map == *isolate->generator_function_with_name_and_home_object_map() ||
+ *map == *isolate->async_function_map() ||
+ *map == *isolate->async_function_with_name_map() ||
+ *map == *isolate->async_function_with_home_object_map() ||
+ *map == *isolate->async_function_with_name_and_home_object_map());
#endif
// Initial maps must always own their descriptors and it's descriptor array
// does not contain descriptors that do not belong to the map.
@@ -9004,7 +9152,7 @@ void Map::InstallDescriptors(Handle<Map> parent, Handle<Map> child,
#else
SLOW_DCHECK(child->layout_descriptor()->IsConsistentWithMap(*child));
#endif
- child->set_visitor_id(Heap::GetStaticVisitorIdForMap(*child));
+ child->set_visitor_id(Map::GetVisitorId(*child));
}
Handle<Name> name = handle(descriptors->GetKey(new_descriptor));
@@ -9050,22 +9198,20 @@ Handle<Map> Map::CopyAsElementsKind(Handle<Map> map, ElementsKind kind,
return new_map;
}
-
Handle<Map> Map::AsLanguageMode(Handle<Map> initial_map,
- LanguageMode language_mode, FunctionKind kind) {
+ Handle<SharedFunctionInfo> shared_info) {
DCHECK_EQ(JS_FUNCTION_TYPE, initial_map->instance_type());
// Initial map for sloppy mode function is stored in the function
// constructor. Initial maps for strict mode are cached as special transitions
// using |strict_function_transition_symbol| as a key.
- if (language_mode == SLOPPY) return initial_map;
+ if (is_sloppy(shared_info->language_mode())) return initial_map;
Isolate* isolate = initial_map->GetIsolate();
- int map_index = Context::FunctionMapIndex(language_mode, kind);
- Handle<Map> function_map(
- Map::cast(isolate->native_context()->get(map_index)));
+ Handle<Map> function_map(Map::cast(
+ isolate->native_context()->get(shared_info->function_map_index())));
STATIC_ASSERT(LANGUAGE_END == 2);
- DCHECK_EQ(STRICT, language_mode);
+ DCHECK_EQ(STRICT, shared_info->language_mode());
Handle<Symbol> transition_symbol =
isolate->factory()->strict_function_transition_symbol();
Map* maybe_transition =
@@ -9146,11 +9292,8 @@ Handle<Map> Map::Create(Isolate* isolate, int inobject_properties) {
// Check that we do not overflow the instance size when adding the extra
// inobject properties. If the instance size overflows, we allocate as many
// properties as we can as inobject properties.
- int max_extra_properties =
- (JSObject::kMaxInstanceSize - JSObject::kHeaderSize) >> kPointerSizeLog2;
-
- if (inobject_properties > max_extra_properties) {
- inobject_properties = max_extra_properties;
+ if (inobject_properties > JSObject::kMaxInObjectProperties) {
+ inobject_properties = JSObject::kMaxInObjectProperties;
}
int new_instance_size =
@@ -9160,7 +9303,7 @@ Handle<Map> Map::Create(Isolate* isolate, int inobject_properties) {
copy->SetInObjectProperties(inobject_properties);
copy->set_unused_property_fields(inobject_properties);
copy->set_instance_size(new_instance_size);
- copy->set_visitor_id(Heap::GetStaticVisitorIdForMap(*copy));
+ copy->set_visitor_id(Map::GetVisitorId(*copy));
return copy;
}
@@ -9218,7 +9361,6 @@ bool CanHoldValue(DescriptorArray* descriptors, int descriptor,
}
}
UNREACHABLE();
- return false;
}
Handle<Map> UpdateDescriptorForValue(Handle<Map> map, int descriptor,
@@ -9769,7 +9911,7 @@ class CodeCache : public AllStatic {
static inline int GetLinearUsage(FixedArray* linear_cache) {
DCHECK_GT(linear_cache->length(), kEntrySize);
- return Smi::cast(linear_cache->get(kLinearUsageIndex))->value();
+ return Smi::ToInt(linear_cache->get(kLinearUsageIndex));
}
};
@@ -9787,66 +9929,11 @@ Code* Map::LookupInCodeCache(Name* name, Code::Flags flags) {
}
-// The key in the code cache hash table consists of the property name and the
-// code object. The actual match is on the name and the code flags. If a key
-// is created using the flags and not a code object it can only be used for
-// lookup not to create a new entry.
-class CodeCacheHashTableKey : public HashTableKey {
- public:
- CodeCacheHashTableKey(Handle<Name> name, Code::Flags flags)
- : name_(name), flags_(flags), code_() {
- DCHECK(name_->IsUniqueName());
- }
-
- CodeCacheHashTableKey(Handle<Name> name, Handle<Code> code)
- : name_(name), flags_(code->flags()), code_(code) {
- DCHECK(name_->IsUniqueName());
- }
-
- bool IsMatch(Object* other) override {
- DCHECK(other->IsFixedArray());
- FixedArray* pair = FixedArray::cast(other);
- Name* name = Name::cast(pair->get(0));
- Code::Flags flags = Code::cast(pair->get(1))->flags();
- if (flags != flags_) return false;
- DCHECK(name->IsUniqueName());
- return *name_ == name;
- }
-
- static uint32_t NameFlagsHashHelper(Name* name, Code::Flags flags) {
- return name->Hash() ^ flags;
- }
-
- uint32_t Hash() override { return NameFlagsHashHelper(*name_, flags_); }
-
- uint32_t HashForObject(Object* obj) override {
- FixedArray* pair = FixedArray::cast(obj);
- Name* name = Name::cast(pair->get(0));
- Code* code = Code::cast(pair->get(1));
- return NameFlagsHashHelper(name, code->flags());
- }
-
- MUST_USE_RESULT Handle<Object> AsHandle(Isolate* isolate) override {
- Handle<Code> code = code_.ToHandleChecked();
- Handle<FixedArray> pair = isolate->factory()->NewFixedArray(2);
- pair->set(0, *name_);
- pair->set(1, *code);
- return pair;
- }
-
- private:
- Handle<Name> name_;
- Code::Flags flags_;
- // TODO(jkummerow): We should be able to get by without this.
- MaybeHandle<Code> code_;
-};
-
-
Handle<CodeCacheHashTable> CodeCacheHashTable::Put(
Handle<CodeCacheHashTable> cache, Handle<Name> name, Handle<Code> code) {
CodeCacheHashTableKey key(name, code);
- Handle<CodeCacheHashTable> new_cache = EnsureCapacity(cache, 1, &key);
+ Handle<CodeCacheHashTable> new_cache = EnsureCapacity(cache, 1);
int entry = new_cache->FindInsertionEntry(key.Hash());
Handle<Object> k = key.AsHandle(cache->GetIsolate());
@@ -10087,10 +10174,12 @@ Handle<ArrayList> ArrayList::New(Isolate* isolate, int size) {
return result;
}
-Handle<FixedArray> ArrayList::Elements() const {
- Handle<FixedArray> result = GetIsolate()->factory()->NewFixedArray(Length());
+Handle<FixedArray> ArrayList::Elements(Handle<ArrayList> array) {
+ int length = array->Length();
+ Handle<FixedArray> result =
+ array->GetIsolate()->factory()->NewFixedArray(length);
// Do not copy the first entry, i.e., the length.
- CopyTo(kFirstIndex, *result, 0, Length());
+ array->CopyTo(kFirstIndex, *result, 0, length);
return result;
}
@@ -10153,11 +10242,9 @@ Handle<FrameArray> FrameArray::AppendJSFrame(Handle<FrameArray> in,
}
// static
-Handle<FrameArray> FrameArray::AppendWasmFrame(Handle<FrameArray> in,
- Handle<Object> wasm_instance,
- int wasm_function_index,
- Handle<AbstractCode> code,
- int offset, int flags) {
+Handle<FrameArray> FrameArray::AppendWasmFrame(
+ Handle<FrameArray> in, Handle<WasmInstanceObject> wasm_instance,
+ int wasm_function_index, Handle<AbstractCode> code, int offset, int flags) {
const int frame_count = in->FrameCount();
const int new_length = LengthFor(frame_count + 1);
Handle<FrameArray> array = EnsureSpace(in, new_length);
@@ -10320,20 +10407,6 @@ Handle<DeoptimizationInputData> DeoptimizationInputData::New(
}
-Handle<DeoptimizationOutputData> DeoptimizationOutputData::New(
- Isolate* isolate,
- int number_of_deopt_points,
- PretenureFlag pretenure) {
- Handle<FixedArray> result;
- if (number_of_deopt_points == 0) {
- result = isolate->factory()->empty_fixed_array();
- } else {
- result = isolate->factory()->NewFixedArray(
- LengthOfFixedArray(number_of_deopt_points), pretenure);
- }
- return Handle<DeoptimizationOutputData>::cast(result);
-}
-
SharedFunctionInfo* DeoptimizationInputData::GetInlinedFunction(int index) {
if (index == -1) {
return SharedFunctionInfo::cast(SharedFunctionInfo());
@@ -10352,12 +10425,12 @@ int HandlerTable::LookupRange(int pc_offset, int* data_out,
int innermost_end = std::numeric_limits<int>::max();
#endif
for (int i = 0; i < length(); i += kRangeEntrySize) {
- int start_offset = Smi::cast(get(i + kRangeStartIndex))->value();
- int end_offset = Smi::cast(get(i + kRangeEndIndex))->value();
- int handler_field = Smi::cast(get(i + kRangeHandlerIndex))->value();
+ int start_offset = Smi::ToInt(get(i + kRangeStartIndex));
+ int end_offset = Smi::ToInt(get(i + kRangeEndIndex));
+ int handler_field = Smi::ToInt(get(i + kRangeHandlerIndex));
int handler_offset = HandlerOffsetField::decode(handler_field);
CatchPrediction prediction = HandlerPredictionField::decode(handler_field);
- int handler_data = Smi::cast(get(i + kRangeDataIndex))->value();
+ int handler_data = Smi::ToInt(get(i + kRangeDataIndex));
if (pc_offset >= start_offset && pc_offset < end_offset) {
DCHECK_GE(start_offset, innermost_start);
DCHECK_LT(end_offset, innermost_end);
@@ -10377,8 +10450,8 @@ int HandlerTable::LookupRange(int pc_offset, int* data_out,
// TODO(turbofan): Make sure table is sorted and use binary search.
int HandlerTable::LookupReturn(int pc_offset) {
for (int i = 0; i < length(); i += kReturnEntrySize) {
- int return_offset = Smi::cast(get(i + kReturnOffsetIndex))->value();
- int handler_field = Smi::cast(get(i + kReturnHandlerIndex))->value();
+ int return_offset = Smi::ToInt(get(i + kReturnOffsetIndex));
+ int handler_field = Smi::ToInt(get(i + kReturnHandlerIndex));
if (pc_offset == return_offset) {
return HandlerOffsetField::decode(handler_field);
}
@@ -10664,10 +10737,8 @@ const uc16* String::GetTwoByteData(unsigned start) {
case kConsStringTag:
case kThinStringTag:
UNREACHABLE();
- return NULL;
}
UNREACHABLE();
- return NULL;
}
@@ -10844,7 +10915,6 @@ String* ConsStringIterator::Search(int* offset_out) {
return string;
}
UNREACHABLE();
- return NULL;
}
@@ -10892,7 +10962,6 @@ String* ConsStringIterator::NextLeaf(bool* blew_stack) {
}
}
UNREACHABLE();
- return NULL;
}
@@ -10923,7 +10992,6 @@ uint16_t ConsString::ConsStringGet(int index) {
}
UNREACHABLE();
- return 0;
}
uint16_t ThinString::ThinStringGet(int index) { return actual()->Get(index); }
@@ -11652,7 +11720,6 @@ MaybeHandle<String> String::GetSubstitution(Isolate* isolate, Match* match,
}
UNREACHABLE();
- return MaybeHandle<String>();
}
namespace { // for String.Prototype.lastIndexOf
@@ -11921,7 +11988,7 @@ uint32_t StringHasher::MakeArrayIndexHash(uint32_t value, int length) {
DCHECK((value & String::kIsNotArrayIndexMask) == 0);
DCHECK_EQ(length <= String::kMaxCachedArrayIndexLength,
- (value & String::kContainsCachedArrayIndexMask) == 0);
+ Name::ContainsCachedArrayIndex(value));
return value;
}
@@ -12045,7 +12112,7 @@ int Map::Hash() {
namespace {
-bool CheckEquivalent(Map* first, Map* second) {
+bool CheckEquivalent(const Map* first, const Map* second) {
return first->GetConstructor() == second->GetConstructor() &&
first->prototype() == second->prototype() &&
first->instance_type() == second->instance_type() &&
@@ -12057,8 +12124,7 @@ bool CheckEquivalent(Map* first, Map* second) {
} // namespace
-
-bool Map::EquivalentToForTransition(Map* other) {
+bool Map::EquivalentToForTransition(const Map* other) const {
if (!CheckEquivalent(this, other)) return false;
if (instance_type() == JS_FUNCTION_TYPE) {
// JSFunctions require more checks to ensure that sloppy function is
@@ -12070,7 +12136,7 @@ bool Map::EquivalentToForTransition(Map* other) {
return true;
}
-bool Map::EquivalentToForElementsKindTransition(Map* other) {
+bool Map::EquivalentToForElementsKindTransition(const Map* other) const {
if (!EquivalentToForTransition(other)) return false;
#ifdef DEBUG
// Ensure that we don't try to generate elements kind transitions from maps
@@ -12090,8 +12156,8 @@ bool Map::EquivalentToForElementsKindTransition(Map* other) {
return true;
}
-bool Map::EquivalentToForNormalization(Map* other,
- PropertyNormalizationMode mode) {
+bool Map::EquivalentToForNormalization(const Map* other,
+ PropertyNormalizationMode mode) const {
int properties =
mode == CLEAR_INOBJECT_PROPERTIES ? 0 : other->GetInObjectProperties();
return CheckEquivalent(this, other) && bit_field2() == other->bit_field2() &&
@@ -12118,46 +12184,43 @@ bool JSFunction::Inlines(SharedFunctionInfo* candidate) {
return false;
}
-void JSFunction::MarkForOptimization() {
- Isolate* isolate = GetIsolate();
- DCHECK(!IsOptimized());
- DCHECK(shared()->allows_lazy_compilation() ||
- !shared()->optimization_disabled());
- set_code_no_write_barrier(
- isolate->builtins()->builtin(Builtins::kCompileOptimized));
- // No write barrier required, since the builtin is part of the root set.
- if (FLAG_mark_shared_functions_for_tier_up) {
- shared()->set_marked_for_tier_up(true);
- }
-}
-
-
-void JSFunction::AttemptConcurrentOptimization() {
+void JSFunction::MarkForOptimization(ConcurrencyMode mode) {
Isolate* isolate = GetIsolate();
if (!isolate->concurrent_recompilation_enabled() ||
isolate->bootstrapper()->IsActive()) {
- MarkForOptimization();
- return;
+ mode = ConcurrencyMode::kNotConcurrent;
}
- DCHECK(!IsInOptimizationQueue());
+
DCHECK(!IsOptimized());
+ DCHECK(!HasOptimizedCode());
DCHECK(shared()->allows_lazy_compilation() ||
!shared()->optimization_disabled());
- DCHECK(isolate->concurrent_recompilation_enabled());
- if (FLAG_trace_concurrent_recompilation) {
- PrintF(" ** Marking ");
- ShortPrint();
- PrintF(" for concurrent recompilation.\n");
+
+ if (mode == ConcurrencyMode::kConcurrent) {
+ if (IsInOptimizationQueue()) {
+ if (FLAG_trace_concurrent_recompilation) {
+ PrintF(" ** Not marking ");
+ ShortPrint();
+ PrintF(" -- already in optimization queue.\n");
+ }
+ return;
+ }
+ if (FLAG_trace_concurrent_recompilation) {
+ PrintF(" ** Marking ");
+ ShortPrint();
+ PrintF(" for concurrent recompilation.\n");
+ }
}
- set_code_no_write_barrier(
- isolate->builtins()->builtin(Builtins::kCompileOptimizedConcurrent));
- // No write barrier required, since the builtin is part of the root set.
- if (FLAG_mark_shared_functions_for_tier_up) {
- // TODO(leszeks): The compilation isn't concurrent if we trigger it using
- // this bit.
- shared()->set_marked_for_tier_up(true);
+ if (!IsInterpreted()) {
+ // For non I+TF path, install a shim which checks the optimization marker.
+ // No write barrier required, since the builtin is part of the root set.
+ set_code_no_write_barrier(
+ isolate->builtins()->builtin(Builtins::kCheckOptimizationMarker));
}
+ SetOptimizationMarker(mode == ConcurrencyMode::kConcurrent
+ ? OptimizationMarker::kCompileOptimizedConcurrent
+ : OptimizationMarker::kCompileOptimized);
}
// static
@@ -12197,14 +12260,16 @@ static void GetMinInobjectSlack(Map* map, void* data) {
static void ShrinkInstanceSize(Map* map, void* data) {
+#ifdef DEBUG
+ int old_visitor_id = Map::GetVisitorId(map);
+#endif
int slack = *reinterpret_cast<int*>(data);
+ DCHECK_GE(slack, 0);
map->SetInObjectProperties(map->GetInObjectProperties() - slack);
map->set_unused_property_fields(map->unused_property_fields() - slack);
map->set_instance_size(map->instance_size() - slack * kPointerSize);
map->set_construction_counter(Map::kNoSlackTracking);
-
- // Visitor id might depend on the instance size, recalculate it.
- map->set_visitor_id(Heap::GetStaticVisitorIdForMap(map));
+ DCHECK_EQ(old_visitor_id, Map::GetVisitorId(map));
}
static void StopSlackTracking(Map* map, void* data) {
@@ -12253,16 +12318,15 @@ void JSObject::MakePrototypesFast(Handle<Object> receiver,
if (current_map->should_be_fast_prototype_map()) return;
Handle<Map> map(current_map);
Map::SetShouldBeFastPrototypeMap(map, true, isolate);
- JSObject::OptimizeAsPrototype(current_obj, FAST_PROTOTYPE);
+ JSObject::OptimizeAsPrototype(current_obj);
}
}
}
// static
-void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
- PrototypeOptimizationMode mode) {
+void JSObject::OptimizeAsPrototype(Handle<JSObject> object) {
if (object->IsJSGlobalObject()) return;
- if (mode == FAST_PROTOTYPE && PrototypeBenefitsFromNormalization(object)) {
+ if (PrototypeBenefitsFromNormalization(object)) {
// First normalize to ensure all JSFunctions are DATA_CONSTANT.
JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, 0,
"NormalizeAsPrototype");
@@ -12302,7 +12366,7 @@ void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
void JSObject::ReoptimizeIfPrototype(Handle<JSObject> object) {
if (!object->map()->is_prototype_map()) return;
if (!object->map()->should_be_fast_prototype_map()) return;
- OptimizeAsPrototype(object, FAST_PROTOTYPE);
+ OptimizeAsPrototype(object);
}
@@ -12509,14 +12573,13 @@ Handle<WeakCell> Map::GetOrCreatePrototypeWeakCell(Handle<JSObject> prototype,
}
// static
-void Map::SetPrototype(Handle<Map> map, Handle<Object> prototype,
- PrototypeOptimizationMode proto_mode) {
+void Map::SetPrototype(Handle<Map> map, Handle<Object> prototype) {
RuntimeCallTimerScope stats_scope(*map, &RuntimeCallStats::Map_SetPrototype);
bool is_hidden = false;
if (prototype->IsJSObject()) {
Handle<JSObject> prototype_jsobj = Handle<JSObject>::cast(prototype);
- JSObject::OptimizeAsPrototype(prototype_jsobj, proto_mode);
+ JSObject::OptimizeAsPrototype(prototype_jsobj);
Object* maybe_constructor = prototype_jsobj->map()->GetConstructor();
if (maybe_constructor->IsJSFunction()) {
@@ -12612,11 +12675,9 @@ void SetInstancePrototype(Isolate* isolate, Handle<JSFunction> function,
function->set_prototype_or_initial_map(*value);
if (value->IsJSObject()) {
// Optimize as prototype to detach it from its transition tree.
- JSObject::OptimizeAsPrototype(Handle<JSObject>::cast(value),
- FAST_PROTOTYPE);
+ JSObject::OptimizeAsPrototype(Handle<JSObject>::cast(value));
}
}
- isolate->heap()->ClearInstanceofCache();
}
} // anonymous namespace
@@ -12661,34 +12722,9 @@ void JSFunction::SetPrototype(Handle<JSFunction> function,
}
-bool JSFunction::RemovePrototype() {
- Context* native_context = context()->native_context();
- Map* no_prototype_map =
- is_strict(shared()->language_mode())
- ? native_context->strict_function_without_prototype_map()
- : native_context->sloppy_function_without_prototype_map();
-
- if (map() == no_prototype_map) return true;
-
-#ifdef DEBUG
- if (map() != (is_strict(shared()->language_mode())
- ? native_context->strict_function_map()
- : native_context->sloppy_function_map())) {
- return false;
- }
-#endif
-
- set_map(no_prototype_map);
- set_prototype_or_initial_map(no_prototype_map->GetHeap()->the_hole_value());
- return true;
-}
-
-
void JSFunction::SetInitialMap(Handle<JSFunction> function, Handle<Map> map,
Handle<Object> prototype) {
- if (map->prototype() != *prototype) {
- Map::SetPrototype(map, prototype, FAST_PROTOTYPE);
- }
+ if (map->prototype() != *prototype) Map::SetPrototype(map, prototype);
function->set_prototype_or_initial_map(*map);
map->SetConstructor(*function);
#if V8_TRACE_MAPS
@@ -12716,7 +12752,6 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
case JS_FUNCTION_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
case JS_ASYNC_GENERATOR_OBJECT_TYPE:
- case JS_MAP_ITERATOR_TYPE:
case JS_MAP_TYPE:
case JS_MESSAGE_OBJECT_TYPE:
case JS_OBJECT_TYPE:
@@ -12724,7 +12759,6 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
case JS_ARGUMENTS_TYPE:
case JS_PROMISE_TYPE:
case JS_REGEXP_TYPE:
- case JS_SET_ITERATOR_TYPE:
case JS_SET_TYPE:
case JS_SPECIAL_API_OBJECT_TYPE:
case JS_TYPED_ARRAY_TYPE:
@@ -12911,7 +12945,7 @@ MaybeHandle<Map> JSFunction::GetDerivedMap(Isolate* isolate,
DCHECK(context->IsNativeContext());
Handle<Object> maybe_index = JSReceiver::GetDataProperty(
constructor, isolate->factory()->native_context_index_symbol());
- int index = maybe_index->IsSmi() ? Smi::cast(*maybe_index)->value()
+ int index = maybe_index->IsSmi() ? Smi::ToInt(*maybe_index)
: Context::OBJECT_FUNCTION_INDEX;
Handle<JSFunction> realm_constructor(JSFunction::cast(context->get(index)));
prototype = handle(realm_constructor->prototype(), isolate);
@@ -12920,9 +12954,7 @@ MaybeHandle<Map> JSFunction::GetDerivedMap(Isolate* isolate,
Handle<Map> map = Map::CopyInitialMap(constructor_initial_map);
map->set_new_target_is_base(false);
DCHECK(prototype->IsJSReceiver());
- if (map->prototype() != *prototype) {
- Map::SetPrototype(map, prototype, FAST_PROTOTYPE);
- }
+ if (map->prototype() != *prototype) Map::SetPrototype(map, prototype);
map->SetConstructor(*constructor);
return map;
}
@@ -12951,21 +12983,27 @@ Handle<String> JSFunction::GetDebugName(Handle<JSFunction> function) {
return JSFunction::GetName(function);
}
-void JSFunction::SetName(Handle<JSFunction> function, Handle<Name> name,
+bool JSFunction::SetName(Handle<JSFunction> function, Handle<Name> name,
Handle<String> prefix) {
Isolate* isolate = function->GetIsolate();
- Handle<String> function_name = Name::ToFunctionName(name).ToHandleChecked();
+ Handle<String> function_name;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, function_name,
+ Name::ToFunctionName(name), false);
if (prefix->length() > 0) {
IncrementalStringBuilder builder(isolate);
builder.AppendString(prefix);
builder.AppendCharacter(' ');
builder.AppendString(function_name);
- function_name = builder.Finish().ToHandleChecked();
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, function_name, builder.Finish(),
+ false);
}
- JSObject::DefinePropertyOrElementIgnoreAttributes(
- function, isolate->factory()->name_string(), function_name,
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY))
- .ToHandleChecked();
+ RETURN_ON_EXCEPTION_VALUE(
+ isolate,
+ JSObject::DefinePropertyOrElementIgnoreAttributes(
+ function, isolate->factory()->name_string(), function_name,
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY)),
+ false);
+ return true;
}
namespace {
@@ -12976,14 +13014,11 @@ char const kNativeCodeSource[] = "function () { [native code] }";
Handle<String> NativeCodeFunctionSourceString(
Handle<SharedFunctionInfo> shared_info) {
Isolate* const isolate = shared_info->GetIsolate();
- if (shared_info->name()->IsString()) {
- IncrementalStringBuilder builder(isolate);
- builder.AppendCString("function ");
- builder.AppendString(handle(String::cast(shared_info->name()), isolate));
- builder.AppendCString("() { [native code] }");
- return builder.Finish().ToHandleChecked();
- }
- return isolate->factory()->NewStringFromAsciiChecked(kNativeCodeSource);
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendCString("function ");
+ builder.AppendString(handle(shared_info->name(), isolate));
+ builder.AppendCString("() { [native code] }");
+ return builder.Finish().ToHandleChecked();
}
} // namespace
@@ -13053,7 +13088,7 @@ Handle<String> JSFunction::ToString(Handle<JSFunction> function) {
if (shared_info->name_should_print_as_anonymous()) {
builder.AppendCString("anonymous");
} else if (!shared_info->is_anonymous_expression()) {
- builder.AppendString(handle(String::cast(shared_info->name()), isolate));
+ builder.AppendString(handle(shared_info->name(), isolate));
}
}
builder.AppendString(Handle<String>::cast(shared_info->GetSourceCode()));
@@ -13175,7 +13210,7 @@ bool GetPositionInfoSlow(const Script* script, int position,
}
} // namespace
-#define SMI_VALUE(x) (Smi::cast(x)->value())
+#define SMI_VALUE(x) (Smi::ToInt(x))
bool Script::GetPositionInfo(int position, PositionInfo* info,
OffsetFlag offset_flag) const {
DisallowHeapAllocation no_allocation;
@@ -13333,9 +13368,6 @@ Script::Iterator::Iterator(Isolate* isolate)
Script* Script::Iterator::Next() { return iterator_.Next<Script>(); }
-bool Script::HasPreparsedScopeData() const {
- return preparsed_scope_data()->length() > 0;
-}
SharedFunctionInfo::ScriptIterator::ScriptIterator(Handle<Script> script)
: ScriptIterator(script->GetIsolate(),
@@ -13378,13 +13410,17 @@ SharedFunctionInfo* SharedFunctionInfo::GlobalIterator::Next() {
}
}
-
void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
- Handle<Object> script_object) {
+ Handle<Object> script_object,
+ bool reset_preparsed_scope_data) {
DCHECK_NE(shared->function_literal_id(), FunctionLiteral::kIdTypeInvalid);
if (shared->script() == *script_object) return;
Isolate* isolate = shared->GetIsolate();
+ if (reset_preparsed_scope_data) {
+ shared->set_preparsed_scope_data(isolate->heap()->null_value());
+ }
+
// Add shared function info to new script's list. If a collection occurs,
// the shared function info may be temporarily in two lists.
// This is okay because the gc-time processing of these lists can tolerate
@@ -13446,10 +13482,48 @@ void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
shared->set_script(*script_object);
}
+bool SharedFunctionInfo::HasBreakInfo() const {
+ if (!HasDebugInfo()) return false;
+ DebugInfo* info = DebugInfo::cast(debug_info());
+ bool has_break_info = info->HasBreakInfo();
+ DCHECK_IMPLIES(has_break_info, HasDebugCode());
+ return has_break_info;
+}
+
+bool SharedFunctionInfo::HasCoverageInfo() const {
+ if (!HasDebugInfo()) return false;
+ DebugInfo* info = DebugInfo::cast(debug_info());
+ bool has_coverage_info = info->HasCoverageInfo();
+ DCHECK_IMPLIES(has_coverage_info, FLAG_block_coverage);
+ return has_coverage_info;
+}
+
+CoverageInfo* SharedFunctionInfo::GetCoverageInfo() const {
+ DCHECK(HasCoverageInfo());
+ return CoverageInfo::cast(GetDebugInfo()->coverage_info());
+}
+
+DebugInfo* SharedFunctionInfo::GetDebugInfo() const {
+ DCHECK(HasDebugInfo());
+ return DebugInfo::cast(debug_info());
+}
+
+int SharedFunctionInfo::debugger_hints() const {
+ if (HasDebugInfo()) return GetDebugInfo()->debugger_hints();
+ return Smi::ToInt(debug_info());
+}
+
+void SharedFunctionInfo::set_debugger_hints(int value) {
+ if (HasDebugInfo()) {
+ GetDebugInfo()->set_debugger_hints(value);
+ } else {
+ set_debug_info(Smi::FromInt(value));
+ }
+}
String* SharedFunctionInfo::DebugName() {
- Object* n = name();
- if (!n->IsString() || String::cast(n)->length() == 0) return inferred_name();
+ String* n = name();
+ if (String::cast(n)->length() == 0) return inferred_name();
return String::cast(n);
}
@@ -13601,9 +13675,9 @@ std::ostream& operator<<(std::ostream& os, const SourceCodeOf& v) {
if (!s->is_toplevel()) {
os << "function ";
- Object* name = s->name();
- if (name->IsString() && String::cast(name)->length() > 0) {
- String::cast(name)->PrintUC16(os);
+ String* name = s->name();
+ if (name->length() > 0) {
+ name->PrintUC16(os);
}
}
@@ -13619,38 +13693,6 @@ std::ostream& operator<<(std::ostream& os, const SourceCodeOf& v) {
}
-static bool IsCodeEquivalent(Code* code, Code* recompiled) {
- if (code->instruction_size() != recompiled->instruction_size()) return false;
- ByteArray* code_relocation = code->relocation_info();
- ByteArray* recompiled_relocation = recompiled->relocation_info();
- int length = code_relocation->length();
- if (length != recompiled_relocation->length()) return false;
- int compare = memcmp(code_relocation->GetDataStartAddress(),
- recompiled_relocation->GetDataStartAddress(),
- length);
- return compare == 0;
-}
-
-
-void SharedFunctionInfo::EnableDeoptimizationSupport(Code* recompiled) {
- DCHECK(!has_deoptimization_support());
- DisallowHeapAllocation no_allocation;
- Code* code = this->code();
- if (IsCodeEquivalent(code, recompiled)) {
- // Copy the deoptimization data from the recompiled code.
- code->set_deoptimization_data(recompiled->deoptimization_data());
- code->set_has_deoptimization_support(true);
- } else {
- // TODO(3025757): In case the recompiled isn't equivalent to the
- // old code, we have to replace it. We should try to avoid this
- // altogether because it flushes valuable type feedback by
- // effectively resetting all IC state.
- ReplaceCode(recompiled);
- }
- DCHECK(has_deoptimization_support());
-}
-
-
void SharedFunctionInfo::DisableOptimization(BailoutReason reason) {
// Disable optimization for the shared function info and mark the
// code as non-optimizable. The marker on the shared function info
@@ -13689,7 +13731,9 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
shared_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
shared_info->set_language_mode(lit->language_mode());
shared_info->set_uses_arguments(lit->scope()->arguments() != NULL);
- shared_info->set_kind(lit->kind());
+ // shared_info->set_kind(lit->kind());
+ // FunctionKind must have already been set.
+ DCHECK(lit->kind() == shared_info->kind());
if (!IsConstructable(lit->kind())) {
shared_info->SetConstructStub(
*shared_info->GetIsolate()->builtins()->ConstructedNonConstructable());
@@ -13706,11 +13750,24 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
shared_info->set_length(lit->function_length());
shared_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
shared_info->SetExpectedNofPropertiesFromEstimate(lit);
+ DCHECK_NULL(lit->produced_preparsed_scope_data());
} else {
// Set an invalid length for lazy functions. This way we can set the correct
// value after compiling, but avoid overwriting values set manually by the
// bootstrapper.
shared_info->set_length(SharedFunctionInfo::kInvalidLength);
+ if (FLAG_experimental_preparser_scope_analysis) {
+ ProducedPreParsedScopeData* scope_data =
+ lit->produced_preparsed_scope_data();
+ if (scope_data != nullptr) {
+ MaybeHandle<PreParsedScopeData> maybe_data =
+ scope_data->Serialize(shared_info->GetIsolate());
+ if (!maybe_data.is_null()) {
+ Handle<PreParsedScopeData> data = maybe_data.ToHandleChecked();
+ shared_info->set_preparsed_scope_data(*data);
+ }
+ }
+ }
}
}
@@ -13729,16 +13786,6 @@ void SharedFunctionInfo::SetExpectedNofPropertiesFromEstimate(
set_expected_nof_properties(estimate);
}
-bool SharedFunctionInfo::VerifyBailoutId(BailoutId id) {
- DCHECK(!id.IsNone());
- Code* unoptimized = code();
- DeoptimizationOutputData* data =
- DeoptimizationOutputData::cast(unoptimized->deoptimization_data());
- unsigned ignore = Deoptimizer::GetOutputInfo(data, id, this);
- USE(ignore);
- return true; // Return true if there was no DCHECK.
-}
-
void SharedFunctionInfo::SetConstructStub(Code* code) {
if (code->kind() == Code::BUILTIN) code->set_is_construct_stub(true);
set_construct_stub(code);
@@ -13754,23 +13801,13 @@ void Map::StartInobjectSlackTracking() {
void SharedFunctionInfo::ResetForNewContext(int new_ic_age) {
code()->ClearInlineCaches();
set_ic_age(new_ic_age);
- if (code()->kind() == Code::FUNCTION) {
- code()->set_profiler_ticks(0);
- if (optimization_disabled() && deopt_count() >= FLAG_max_deopt_count) {
- // Re-enable optimizations if they were disabled due to deopt_count limit.
- set_optimization_disabled(false);
- }
- set_opt_count(0);
- set_deopt_count(0);
- } else if (IsInterpreted()) {
- set_profiler_ticks(0);
- if (optimization_disabled() && deopt_count() >= FLAG_max_deopt_count) {
- // Re-enable optimizations if they were disabled due to deopt_count limit.
- set_optimization_disabled(false);
- }
- set_opt_count(0);
- set_deopt_count(0);
+ set_profiler_ticks(0);
+ if (optimization_disabled() && deopt_count() >= FLAG_max_deopt_count) {
+ // Re-enable optimizations if they were disabled due to deopt_count limit.
+ set_optimization_disabled(false);
}
+ set_opt_count(0);
+ set_deopt_count(0);
}
void ObjectVisitor::VisitCodeTarget(Code* host, RelocInfo* rinfo) {
@@ -13981,8 +14018,7 @@ void Code::FindAndReplace(const FindAndReplacePattern& pattern) {
void Code::ClearInlineCaches() {
- int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::CODE_TARGET_WITH_ID);
+ int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
for (RelocIterator it(this, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
Code* target(Code::GetCodeFromTargetAddress(info->target_address()));
@@ -14078,7 +14114,7 @@ void JSFunction::ClearTypeFeedbackInfo() {
}
}
-BailoutId Code::TranslatePcOffsetToAstId(uint32_t pc_offset) {
+BailoutId Code::TranslatePcOffsetToBytecodeOffset(uint32_t pc_offset) {
DisallowHeapAllocation no_gc;
DCHECK(kind() == FUNCTION);
BackEdgeTable back_edges(this, &no_gc);
@@ -14088,13 +14124,12 @@ BailoutId Code::TranslatePcOffsetToAstId(uint32_t pc_offset) {
return BailoutId::None();
}
-
-uint32_t Code::TranslateAstIdToPcOffset(BailoutId ast_id) {
+uint32_t Code::TranslateBytecodeOffsetToPcOffset(BailoutId bytecode_offset) {
DisallowHeapAllocation no_gc;
DCHECK(kind() == FUNCTION);
BackEdgeTable back_edges(this, &no_gc);
for (uint32_t i = 0; i < back_edges.length(); i++) {
- if (back_edges.ast_id(i) == ast_id) return back_edges.pc_offset(i);
+ if (back_edges.ast_id(i) == bytecode_offset) return back_edges.pc_offset(i);
}
UNREACHABLE(); // We expect to find the back edge.
return 0;
@@ -14206,7 +14241,6 @@ Code::Age Code::GetAgeOfCodeAgeStub(Code* code) {
return kToBeExecutedOnceCodeAge;
}
UNREACHABLE();
- return kNoAgeCodeAge;
}
Code* Code::GetCodeAgeStub(Isolate* isolate, Age age) {
@@ -14259,7 +14293,7 @@ bool Code::CanDeoptAt(Address pc) {
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
Address address = code_start_address + deopt_data->Pc(i)->value();
- if (address == pc && deopt_data->AstId(i) != BailoutId::None()) {
+ if (address == pc && deopt_data->BytecodeOffset(i) != BailoutId::None()) {
return true;
}
}
@@ -14276,7 +14310,6 @@ const char* Code::Kind2String(Kind kind) {
case NUMBER_OF_KINDS: break;
}
UNREACHABLE();
- return NULL;
}
// Identify kind of code.
@@ -14285,7 +14318,6 @@ const char* AbstractCode::Kind2String(Kind kind) {
return Code::Kind2String((Code::Kind)kind);
if (kind == AbstractCode::INTERPRETED_FUNCTION) return "INTERPRETED_FUNCTION";
UNREACHABLE();
- return NULL;
}
Handle<WeakCell> Code::WeakCellFor(Handle<Code> code) {
@@ -14330,7 +14362,6 @@ const char* Code::ICState2String(InlineCacheState state) {
return "GENERIC";
}
UNREACHABLE();
- return NULL;
}
void Code::PrintExtraICState(std::ostream& os, // NOLINT
@@ -14348,6 +14379,16 @@ void Code::PrintExtraICState(std::ostream& os, // NOLINT
#ifdef ENABLE_DISASSEMBLER
+namespace {
+void print_pc(std::ostream& os, int pc) {
+ if (pc == -1) {
+ os << "NA";
+ } else {
+ os << std::hex << pc << std::dec;
+ }
+}
+} // anonymous namespace
+
void DeoptimizationInputData::DeoptimizationInputDataPrint(
std::ostream& os) { // NOLINT
disasm::NameConverter converter;
@@ -14361,19 +14402,24 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(
int deopt_count = DeoptCount();
os << "Deoptimization Input Data (deopt points = " << deopt_count << ")\n";
if (0 != deopt_count) {
- os << " index ast id argc pc";
+ os << " index bytecode-offset trampoline_pc pc";
if (FLAG_print_code_verbose) os << " commands";
os << "\n";
}
for (int i = 0; i < deopt_count; i++) {
- os << std::setw(6) << i << " " << std::setw(6) << AstId(i).ToInt() << " "
- << std::setw(6) << ArgumentsStackHeight(i)->value() << " "
- << std::setw(6) << Pc(i)->value();
+ os << std::setw(6) << i << " " << std::setw(15)
+ << BytecodeOffset(i).ToInt() << " " << std::setw(13);
+
+ print_pc(os, TrampolinePc(i)->value());
+ os << std::setw(7);
+ print_pc(os, Pc(i)->value());
+ os << std::setw(2);
if (!FLAG_print_code_verbose) {
os << "\n";
continue;
}
+
// Print details of the frame translation.
int translation_index = TranslationIndex(i)->value();
TranslationIterator iterator(TranslationByteArray(), translation_index);
@@ -14389,24 +14435,13 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(
while (iterator.HasNext() &&
Translation::BEGIN !=
(opcode = static_cast<Translation::Opcode>(iterator.Next()))) {
- os << std::setw(31) << " " << Translation::StringFor(opcode) << " ";
+ os << std::setw(47) << " " << Translation::StringFor(opcode) << " ";
switch (opcode) {
case Translation::BEGIN:
UNREACHABLE();
break;
- case Translation::JS_FRAME: {
- int ast_id = iterator.Next();
- int shared_info_id = iterator.Next();
- unsigned height = iterator.Next();
- Object* shared_info = LiteralArray()->get(shared_info_id);
- os << "{ast_id=" << ast_id << ", function="
- << Brief(SharedFunctionInfo::cast(shared_info)->DebugName())
- << ", height=" << height << "}";
- break;
- }
-
case Translation::INTERPRETED_FRAME: {
int bytecode_offset = iterator.Next();
int shared_info_id = iterator.Next();
@@ -14429,28 +14464,25 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(
break;
}
- case Translation::COMPILED_STUB_FRAME: {
- Code::Kind stub_kind = static_cast<Code::Kind>(iterator.Next());
- os << "{kind=" << stub_kind << "}";
- break;
- }
-
- case Translation::ARGUMENTS_ADAPTOR_FRAME: {
+ case Translation::BUILTIN_CONTINUATION_FRAME:
+ case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME: {
+ int bailout_id = iterator.Next();
int shared_info_id = iterator.Next();
Object* shared_info = LiteralArray()->get(shared_info_id);
unsigned height = iterator.Next();
- os << "{function="
+ os << "{bailout_id=" << bailout_id << ", function="
<< Brief(SharedFunctionInfo::cast(shared_info)->DebugName())
<< ", height=" << height << "}";
break;
}
- case Translation::TAIL_CALLER_FRAME: {
+ case Translation::ARGUMENTS_ADAPTOR_FRAME: {
int shared_info_id = iterator.Next();
Object* shared_info = LiteralArray()->get(shared_info_id);
+ unsigned height = iterator.Next();
os << "{function="
<< Brief(SharedFunctionInfo::cast(shared_info)->DebugName())
- << "}";
+ << ", height=" << height << "}";
break;
}
@@ -14559,7 +14591,6 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(
break;
}
- case Translation::ARGUMENTS_OBJECT:
case Translation::CAPTURED_OBJECT: {
int args_length = iterator.Next();
os << "{length=" << args_length << "}";
@@ -14572,33 +14603,15 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(
}
-void DeoptimizationOutputData::DeoptimizationOutputDataPrint(
- std::ostream& os) { // NOLINT
- os << "Deoptimization Output Data (deopt points = " << this->DeoptPoints()
- << ")\n";
- if (this->DeoptPoints() == 0) return;
-
- os << "ast id pc state\n";
- for (int i = 0; i < this->DeoptPoints(); i++) {
- int pc_and_state = this->PcAndState(i)->value();
- os << std::setw(6) << this->AstId(i).ToInt() << " " << std::setw(8)
- << FullCodeGenerator::PcField::decode(pc_and_state) << " "
- << Deoptimizer::BailoutStateToString(
- FullCodeGenerator::BailoutStateField::decode(pc_and_state))
- << "\n";
- }
-}
-
-
void HandlerTable::HandlerTableRangePrint(std::ostream& os) {
os << " from to hdlr\n";
for (int i = 0; i < length(); i += kRangeEntrySize) {
- int pc_start = Smi::cast(get(i + kRangeStartIndex))->value();
- int pc_end = Smi::cast(get(i + kRangeEndIndex))->value();
- int handler_field = Smi::cast(get(i + kRangeHandlerIndex))->value();
+ int pc_start = Smi::ToInt(get(i + kRangeStartIndex));
+ int pc_end = Smi::ToInt(get(i + kRangeEndIndex));
+ int handler_field = Smi::ToInt(get(i + kRangeHandlerIndex));
int handler_offset = HandlerOffsetField::decode(handler_field);
CatchPrediction prediction = HandlerPredictionField::decode(handler_field);
- int data = Smi::cast(get(i + kRangeDataIndex))->value();
+ int data = Smi::ToInt(get(i + kRangeDataIndex));
os << " (" << std::setw(4) << pc_start << "," << std::setw(4) << pc_end
<< ") -> " << std::setw(4) << handler_offset
<< " (prediction=" << prediction << ", data=" << data << ")\n";
@@ -14609,8 +14622,8 @@ void HandlerTable::HandlerTableRangePrint(std::ostream& os) {
void HandlerTable::HandlerTableReturnPrint(std::ostream& os) {
os << " off hdlr (c)\n";
for (int i = 0; i < length(); i += kReturnEntrySize) {
- int pc_offset = Smi::cast(get(i + kReturnOffsetIndex))->value();
- int handler_field = Smi::cast(get(i + kReturnHandlerIndex))->value();
+ int pc_offset = Smi::ToInt(get(i + kReturnOffsetIndex));
+ int handler_field = Smi::ToInt(get(i + kReturnHandlerIndex));
int handler_offset = HandlerOffsetField::decode(handler_field);
CatchPrediction prediction = HandlerPredictionField::decode(handler_field);
os << " " << std::setw(4) << pc_offset << " -> " << std::setw(4)
@@ -14626,8 +14639,7 @@ void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
os << "major_key = " << (n == NULL ? "null" : n) << "\n";
}
if (is_inline_cache_stub()) {
- if (is_compare_ic_stub() || is_to_boolean_ic_stub() ||
- is_binary_op_stub()) {
+ if (is_compare_ic_stub()) {
InlineCacheState ic_state = IC::StateFromCode(this);
os << "ic_state = " << ICState2String(ic_state) << "\n";
PrintExtraICState(os, kind(), extra_ic_state());
@@ -14643,13 +14655,15 @@ void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
}
if ((name != nullptr) && (name[0] != '\0')) {
os << "name = " << name << "\n";
- } else if (kind() == BUILTIN) {
- name = GetIsolate()->builtins()->Lookup(instruction_start());
+ } else if (kind() == BYTECODE_HANDLER) {
+ name = GetIsolate()->interpreter()->LookupNameOfBytecodeHandler(this);
if (name != nullptr) {
os << "name = " << name << "\n";
}
- } else if (kind() == BYTECODE_HANDLER) {
- name = GetIsolate()->interpreter()->LookupNameOfBytecodeHandler(this);
+ } else {
+ // There are some handlers and ICs that we can also find names for with
+ // Builtins::Lookup.
+ name = GetIsolate()->builtins()->Lookup(instruction_start());
if (name != nullptr) {
os << "name = " << name << "\n";
}
@@ -14709,11 +14723,7 @@ void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
os << "\n";
}
- if (kind() == FUNCTION) {
- DeoptimizationOutputData* data =
- DeoptimizationOutputData::cast(this->deoptimization_data());
- data->DeoptimizationOutputDataPrint(os);
- } else if (kind() == OPTIMIZED_FUNCTION) {
+ if (kind() == OPTIMIZED_FUNCTION) {
DeoptimizationInputData* data =
DeoptimizationInputData::cast(this->deoptimization_data());
data->DeoptimizationInputDataPrint(os);
@@ -15189,18 +15199,15 @@ const char* DependentCode::DependencyGroupName(DependencyGroup group) {
return "allocation-site-transition-changed";
}
UNREACHABLE();
- return "?";
}
-
Handle<Map> Map::TransitionToPrototype(Handle<Map> map,
- Handle<Object> prototype,
- PrototypeOptimizationMode mode) {
+ Handle<Object> prototype) {
Handle<Map> new_map = TransitionArray::GetPrototypeTransition(map, prototype);
if (new_map.is_null()) {
new_map = Copy(map, "TransitionToPrototype");
TransitionArray::PutPrototypeTransition(map, prototype, new_map);
- Map::SetPrototype(new_map, prototype, mode);
+ Map::SetPrototype(new_map, prototype);
}
return new_map;
}
@@ -15311,7 +15318,6 @@ Maybe<bool> JSObject::SetPrototype(Handle<JSObject> object,
DCHECK(!object->IsAccessCheckNeeded());
}
- Heap* heap = isolate->heap();
// Silently ignore the change if value is not a JSObject or null.
// SpiderMonkey behaves this way.
if (!value->IsJSReceiver() && !value->IsNull(isolate)) return Just(true);
@@ -15375,13 +15381,10 @@ Maybe<bool> JSObject::SetPrototype(Handle<JSObject> object,
isolate->UpdateArrayProtectorOnSetPrototype(real_receiver);
- PrototypeOptimizationMode mode =
- from_javascript ? REGULAR_PROTOTYPE : FAST_PROTOTYPE;
- Handle<Map> new_map = Map::TransitionToPrototype(map, value, mode);
+ Handle<Map> new_map = Map::TransitionToPrototype(map, value);
DCHECK(new_map->prototype() == *value);
JSObject::MigrateToMap(real_receiver, new_map);
- heap->ClearInstanceofCache();
DCHECK(size == object->Size());
return Just(true);
}
@@ -15395,7 +15398,7 @@ void JSObject::SetImmutableProto(Handle<JSObject> object) {
if (map->is_immutable_proto()) return;
Handle<Map> new_map = Map::TransitionToImmutableProto(map);
- object->set_map(*new_map);
+ object->synchronized_set_map(*new_map);
}
void JSObject::EnsureCanContainElements(Handle<JSObject> object,
@@ -15415,12 +15418,10 @@ ElementsAccessor* JSObject::GetElementsAccessor() {
return ElementsAccessor::ForKind(GetElementsKind());
}
-
-void JSObject::ValidateElements(Handle<JSObject> object) {
+void JSObject::ValidateElements(JSObject* object) {
#ifdef ENABLE_SLOW_DCHECKS
if (FLAG_enable_slow_asserts) {
- ElementsAccessor* accessor = object->GetElementsAccessor();
- accessor->Validate(object);
+ object->GetElementsAccessor()->Validate(object);
}
#endif
}
@@ -15474,15 +15475,15 @@ static ElementsKind BestFittingFastElementsKind(JSObject* object) {
}
DCHECK(object->HasDictionaryElements());
SeededNumberDictionary* dictionary = object->element_dictionary();
- ElementsKind kind = FAST_HOLEY_SMI_ELEMENTS;
+ ElementsKind kind = HOLEY_SMI_ELEMENTS;
for (int i = 0; i < dictionary->Capacity(); i++) {
Object* key = dictionary->KeyAt(i);
if (key->IsNumber()) {
Object* value = dictionary->ValueAt(i);
- if (!value->IsNumber()) return FAST_HOLEY_ELEMENTS;
+ if (!value->IsNumber()) return HOLEY_ELEMENTS;
if (!value->IsSmi()) {
- if (!FLAG_unbox_double_arrays) return FAST_HOLEY_ELEMENTS;
- kind = FAST_HOLEY_DOUBLE_ELEMENTS;
+ if (!FLAG_unbox_double_arrays) return HOLEY_ELEMENTS;
+ kind = HOLEY_DOUBLE_ELEMENTS;
}
}
}
@@ -15504,7 +15505,7 @@ static bool ShouldConvertToFastElements(JSObject* object,
if (object->IsJSArray()) {
Object* length = JSArray::cast(object)->length();
if (!length->IsSmi()) return false;
- *new_capacity = static_cast<uint32_t>(Smi::cast(length)->value());
+ *new_capacity = static_cast<uint32_t>(Smi::ToInt(length));
} else if (object->IsJSSloppyArgumentsObject()) {
return false;
} else {
@@ -15572,7 +15573,8 @@ Maybe<bool> JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
}
ElementsKind to = value->OptimalElementsKind();
- if (IsHoleyElementsKind(kind) || !object->IsJSArray() || index > old_length) {
+ if (IsHoleyOrDictionaryElementsKind(kind) || !object->IsJSArray() ||
+ index > old_length) {
to = GetHoleyElementsKind(to);
kind = GetHoleyElementsKind(kind);
}
@@ -15609,15 +15611,13 @@ void AllocationSite::ResetPretenureDecision() {
set_memento_create_count(0);
}
-
-PretenureFlag AllocationSite::GetPretenureMode() {
+PretenureFlag AllocationSite::GetPretenureMode() const {
PretenureDecision mode = pretenure_decision();
// Zombie objects "decide" to be untenured.
return mode == kTenure ? TENURED : NOT_TENURED;
}
-
-bool AllocationSite::IsNestedSite() {
+bool AllocationSite::IsNested() {
DCHECK(FLAG_trace_track_allocation_sites);
Object* current = GetHeap()->allocation_sites_list();
while (current->IsAllocationSite()) {
@@ -15636,42 +15636,39 @@ bool AllocationSite::DigestTransitionFeedback(Handle<AllocationSite> site,
Isolate* isolate = site->GetIsolate();
bool result = false;
- if (site->SitePointsToLiteral() && site->transition_info()->IsJSArray()) {
- Handle<JSArray> transition_info =
- handle(JSArray::cast(site->transition_info()));
- ElementsKind kind = transition_info->GetElementsKind();
+ if (site->PointsToLiteral() && site->boilerplate()->IsJSArray()) {
+ Handle<JSArray> boilerplate(JSArray::cast(site->boilerplate()), isolate);
+ ElementsKind kind = boilerplate->GetElementsKind();
// if kind is holey ensure that to_kind is as well.
- if (IsHoleyElementsKind(kind)) {
+ if (IsHoleyOrDictionaryElementsKind(kind)) {
to_kind = GetHoleyElementsKind(to_kind);
}
if (IsMoreGeneralElementsKindTransition(kind, to_kind)) {
// If the array is huge, it's not likely to be defined in a local
// function, so we shouldn't make new instances of it very often.
uint32_t length = 0;
- CHECK(transition_info->length()->ToArrayLength(&length));
+ CHECK(boilerplate->length()->ToArrayLength(&length));
if (length <= kMaximumArrayBytesToPretransition) {
if (update_or_check == AllocationSiteUpdateMode::kCheckOnly) {
return true;
}
if (FLAG_trace_track_allocation_sites) {
- bool is_nested = site->IsNestedSite();
- PrintF(
- "AllocationSite: JSArray %p boilerplate %s updated %s->%s\n",
- reinterpret_cast<void*>(*site),
- is_nested ? "(nested)" : "",
- ElementsKindToString(kind),
- ElementsKindToString(to_kind));
+ bool is_nested = site->IsNested();
+ PrintF("AllocationSite: JSArray %p boilerplate %supdated %s->%s\n",
+ reinterpret_cast<void*>(*site), is_nested ? "(nested)" : " ",
+ ElementsKindToString(kind), ElementsKindToString(to_kind));
}
- JSObject::TransitionElementsKind(transition_info, to_kind);
+ JSObject::TransitionElementsKind(boilerplate, to_kind);
site->dependent_code()->DeoptimizeDependentCodeGroup(
isolate, DependentCode::kAllocationSiteTransitionChangedGroup);
result = true;
}
}
} else {
+ // The AllocationSite is for a constructed Array.
ElementsKind kind = site->GetElementsKind();
// if kind is holey ensure that to_kind is as well.
- if (IsHoleyElementsKind(kind)) {
+ if (IsHoleyOrDictionaryElementsKind(kind)) {
to_kind = GetHoleyElementsKind(to_kind);
}
if (IsMoreGeneralElementsKindTransition(kind, to_kind)) {
@@ -15691,13 +15688,9 @@ bool AllocationSite::DigestTransitionFeedback(Handle<AllocationSite> site,
return result;
}
-AllocationSiteMode AllocationSite::GetMode(ElementsKind from, ElementsKind to) {
- if (IsFastSmiElementsKind(from) &&
- IsMoreGeneralElementsKindTransition(from, to)) {
- return TRACK_ALLOCATION_SITE;
- }
-
- return DONT_TRACK_ALLOCATION_SITE;
+bool AllocationSite::ShouldTrack(ElementsKind from, ElementsKind to) {
+ return IsSmiElementsKind(from) &&
+ IsMoreGeneralElementsKindTransition(from, to);
}
const char* AllocationSite::PretenureDecisionName(PretenureDecision decision) {
@@ -15725,7 +15718,7 @@ bool JSObject::UpdateAllocationSite(Handle<JSObject> object,
DisallowHeapAllocation no_allocation;
AllocationMemento* memento =
- heap->FindAllocationMemento<Heap::kForRuntime>(*object);
+ heap->FindAllocationMemento<Heap::kForRuntime>(object->map(), *object);
if (memento == NULL) return false;
// Walk through to the Allocation Site
@@ -15746,7 +15739,7 @@ void JSObject::TransitionElementsKind(Handle<JSObject> object,
ElementsKind to_kind) {
ElementsKind from_kind = object->GetElementsKind();
- if (IsFastHoleyElementsKind(from_kind)) {
+ if (IsHoleyElementsKind(from_kind)) {
to_kind = GetHoleyElementsKind(to_kind);
}
@@ -15759,8 +15752,7 @@ void JSObject::TransitionElementsKind(Handle<JSObject> object,
UpdateAllocationSite(object, to_kind);
if (object->elements() == object->GetHeap()->empty_fixed_array() ||
- IsFastDoubleElementsKind(from_kind) ==
- IsFastDoubleElementsKind(to_kind)) {
+ IsDoubleElementsKind(from_kind) == IsDoubleElementsKind(to_kind)) {
// No change is needed to the elements() buffer, the transition
// only requires a map change.
Handle<Map> new_map = GetElementsTransitionMap(object, to_kind);
@@ -15770,10 +15762,8 @@ void JSObject::TransitionElementsKind(Handle<JSObject> object,
PrintElementsTransition(stdout, object, from_kind, elms, to_kind, elms);
}
} else {
- DCHECK((IsFastSmiElementsKind(from_kind) &&
- IsFastDoubleElementsKind(to_kind)) ||
- (IsFastDoubleElementsKind(from_kind) &&
- IsFastObjectElementsKind(to_kind)));
+ DCHECK((IsSmiElementsKind(from_kind) && IsDoubleElementsKind(to_kind)) ||
+ (IsDoubleElementsKind(from_kind) && IsObjectElementsKind(to_kind)));
uint32_t c = static_cast<uint32_t>(object->elements()->length());
ElementsAccessor::ForKind(to_kind)->GrowCapacityAndConvert(object, c);
}
@@ -15789,8 +15779,7 @@ bool Map::IsValidElementsTransition(ElementsKind from_kind,
}
// Transitions from HOLEY -> PACKED are not allowed.
- return !IsFastHoleyElementsKind(from_kind) ||
- IsFastHoleyElementsKind(to_kind);
+ return !IsHoleyElementsKind(from_kind) || IsHoleyElementsKind(to_kind);
}
@@ -15820,13 +15809,11 @@ bool JSArray::WouldChangeReadOnlyLength(Handle<JSArray> array,
return false;
}
-
template <typename BackingStore>
-static int FastHoleyElementsUsage(JSObject* object, BackingStore* store) {
+static int HoleyElementsUsage(JSObject* object, BackingStore* store) {
Isolate* isolate = store->GetIsolate();
- int limit = object->IsJSArray()
- ? Smi::cast(JSArray::cast(object)->length())->value()
- : store->length();
+ int limit = object->IsJSArray() ? Smi::ToInt(JSArray::cast(object)->length())
+ : store->length();
int used = 0;
for (int i = 0; i < limit; ++i) {
if (!store->is_the_hole(isolate, i)) ++used;
@@ -15834,25 +15821,24 @@ static int FastHoleyElementsUsage(JSObject* object, BackingStore* store) {
return used;
}
-
int JSObject::GetFastElementsUsage() {
FixedArrayBase* store = elements();
switch (GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- return IsJSArray() ? Smi::cast(JSArray::cast(this)->length())->value()
+ case PACKED_SMI_ELEMENTS:
+ case PACKED_DOUBLE_ELEMENTS:
+ case PACKED_ELEMENTS:
+ return IsJSArray() ? Smi::ToInt(JSArray::cast(this)->length())
: store->length();
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
store = SloppyArgumentsElements::cast(store)->arguments();
// Fall through.
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
+ case HOLEY_SMI_ELEMENTS:
+ case HOLEY_ELEMENTS:
case FAST_STRING_WRAPPER_ELEMENTS:
- return FastHoleyElementsUsage(this, FixedArray::cast(store));
- case FAST_HOLEY_DOUBLE_ELEMENTS:
+ return HoleyElementsUsage(this, FixedArray::cast(store));
+ case HOLEY_DOUBLE_ELEMENTS:
if (elements()->length() == 0) return 0;
- return FastHoleyElementsUsage(this, FixedDoubleArray::cast(store));
+ return HoleyElementsUsage(this, FixedDoubleArray::cast(store));
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_STRING_WRAPPER_ELEMENTS:
@@ -15875,26 +15861,28 @@ int JSObject::GetFastElementsUsage() {
// together, so even though this function belongs in objects-debug.cc,
// we keep it here instead to satisfy certain compilers.
#ifdef OBJECT_PRINT
-template <typename Derived, typename Shape, typename Key>
-void Dictionary<Derived, Shape, Key>::Print(std::ostream& os) { // NOLINT
+template <typename Derived, typename Shape>
+void Dictionary<Derived, Shape>::Print(std::ostream& os) {
+ DisallowHeapAllocation no_gc;
Isolate* isolate = this->GetIsolate();
- int capacity = this->Capacity();
+ Derived* dictionary = Derived::cast(this);
+ int capacity = dictionary->Capacity();
for (int i = 0; i < capacity; i++) {
- Object* k = this->KeyAt(i);
- if (this->IsKey(isolate, k)) {
- os << "\n ";
- if (k->IsString()) {
- String::cast(k)->StringPrint(os);
- } else {
- os << Brief(k);
- }
- os << ": " << Brief(this->ValueAt(i)) << " ";
- this->DetailsAt(i).PrintAsSlowTo(os);
+ Object* k = dictionary->KeyAt(i);
+ if (!Shape::IsLive(isolate, k)) continue;
+ if (!dictionary->ToKey(isolate, i, &k)) continue;
+ os << "\n ";
+ if (k->IsString()) {
+ String::cast(k)->StringPrint(os);
+ } else {
+ os << Brief(k);
}
+ os << ": " << Brief(dictionary->ValueAt(i)) << " ";
+ dictionary->DetailsAt(i).PrintAsSlowTo(os);
}
}
-template <typename Derived, typename Shape, typename Key>
-void Dictionary<Derived, Shape, Key>::Print() {
+template <typename Derived, typename Shape>
+void Dictionary<Derived, Shape>::Print() {
OFStream os(stdout);
Print(os);
}
@@ -15938,27 +15926,36 @@ int FixedArrayBase::GetMaxLengthForNewSpaceAllocation(ElementsKind kind) {
ElementsKindToShiftSize(kind));
}
+bool FixedArrayBase::IsCowArray() const {
+ return map() == GetHeap()->fixed_cow_array_map();
+}
+
bool JSObject::WasConstructedFromApiFunction() {
auto instance_type = map()->instance_type();
bool is_api_object = instance_type == JS_API_OBJECT_TYPE ||
instance_type == JS_SPECIAL_API_OBJECT_TYPE;
+ bool is_wasm_object =
+ instance_type == WASM_MEMORY_TYPE || instance_type == WASM_MODULE_TYPE ||
+ instance_type == WASM_INSTANCE_TYPE || instance_type == WASM_TABLE_TYPE;
#ifdef ENABLE_SLOW_DCHECKS
if (FLAG_enable_slow_asserts) {
Object* maybe_constructor = map()->GetConstructor();
if (maybe_constructor->IsJSFunction()) {
JSFunction* constructor = JSFunction::cast(maybe_constructor);
- if (constructor->shared()->IsApiFunction()) {
- DCHECK(is_api_object);
- } else {
- DCHECK(!is_api_object);
- }
+ DCHECK_EQ(constructor->shared()->IsApiFunction(),
+ is_api_object || is_wasm_object);
} else if (maybe_constructor->IsFunctionTemplateInfo()) {
- DCHECK(is_api_object);
+ DCHECK(is_api_object || is_wasm_object);
} else {
return false;
}
}
#endif
+ // TODO(titzer): Clean this up somehow. WebAssembly objects should not be
+ // considered "constructed from API functions" even though they have
+ // function template info, since that would make the V8 GC identify them to
+ // the embedder, e.g. the Oilpan GC.
+ USE(is_wasm_object);
return is_api_object;
}
@@ -16002,7 +15999,9 @@ class StringSharedKey : public HashTableKey {
// dynamic function's effective source where the ')' ends the parameters.
StringSharedKey(Handle<String> source, Handle<SharedFunctionInfo> shared,
LanguageMode language_mode, int position)
- : source_(source),
+ : HashTableKey(CompilationCacheShape::StringSharedHash(
+ *source, *shared, language_mode, position)),
+ source_(source),
shared_(shared),
language_mode_(language_mode),
position_(position) {}
@@ -16010,70 +16009,30 @@ class StringSharedKey : public HashTableKey {
bool IsMatch(Object* other) override {
DisallowHeapAllocation no_allocation;
if (!other->IsFixedArray()) {
- if (!other->IsNumber()) return false;
+ DCHECK(other->IsNumber());
uint32_t other_hash = static_cast<uint32_t>(other->Number());
return Hash() == other_hash;
}
FixedArray* other_array = FixedArray::cast(other);
SharedFunctionInfo* shared = SharedFunctionInfo::cast(other_array->get(0));
if (shared != *shared_) return false;
- int language_unchecked = Smi::cast(other_array->get(2))->value();
+ int language_unchecked = Smi::ToInt(other_array->get(2));
DCHECK(is_valid_language_mode(language_unchecked));
LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
if (language_mode != language_mode_) return false;
- int position = Smi::cast(other_array->get(3))->value();
+ int position = Smi::ToInt(other_array->get(3));
if (position != position_) return false;
String* source = String::cast(other_array->get(1));
return source->Equals(*source_);
}
- static uint32_t StringSharedHashHelper(String* source,
- SharedFunctionInfo* shared,
- LanguageMode language_mode,
- int position) {
- uint32_t hash = source->Hash();
- if (shared->HasSourceCode()) {
- // Instead of using the SharedFunctionInfo pointer in the hash
- // code computation, we use a combination of the hash of the
- // script source code and the start position of the calling scope.
- // We do this to ensure that the cache entries can survive garbage
- // collection.
- Script* script(Script::cast(shared->script()));
- hash ^= String::cast(script->source())->Hash();
- STATIC_ASSERT(LANGUAGE_END == 2);
- if (is_strict(language_mode)) hash ^= 0x8000;
- hash += position;
- }
- return hash;
- }
-
- uint32_t Hash() override {
- return StringSharedHashHelper(*source_, *shared_, language_mode_,
- position_);
- }
-
- uint32_t HashForObject(Object* obj) override {
- DisallowHeapAllocation no_allocation;
- if (obj->IsNumber()) {
- return static_cast<uint32_t>(obj->Number());
- }
- FixedArray* other_array = FixedArray::cast(obj);
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(other_array->get(0));
- String* source = String::cast(other_array->get(1));
- int language_unchecked = Smi::cast(other_array->get(2))->value();
- DCHECK(is_valid_language_mode(language_unchecked));
- LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
- int position = Smi::cast(other_array->get(3))->value();
- return StringSharedHashHelper(source, shared, language_mode, position);
- }
-
-
- Handle<Object> AsHandle(Isolate* isolate) override {
+ Handle<Object> AsHandle(Isolate* isolate) {
Handle<FixedArray> array = isolate->factory()->NewFixedArray(4);
array->set(0, *shared_);
array->set(1, *source_);
array->set(2, Smi::FromInt(language_mode_));
array->set(3, Smi::FromInt(position_));
+ array->set_map(isolate->heap()->fixed_cow_array_map());
return array;
}
@@ -16095,7 +16054,6 @@ const char* JSPromise::Status(int status) {
return "rejected";
}
UNREACHABLE();
- return NULL;
}
namespace {
@@ -16290,7 +16248,10 @@ MaybeHandle<JSRegExp> JSRegExp::Initialize(Handle<JSRegExp> regexp,
class RegExpKey : public HashTableKey {
public:
RegExpKey(Handle<String> string, JSRegExp::Flags flags)
- : string_(string), flags_(Smi::FromInt(flags)) {}
+ : HashTableKey(
+ CompilationCacheShape::RegExpHash(*string, Smi::FromInt(flags))),
+ string_(string),
+ flags_(Smi::FromInt(flags)) {}
// Rather than storing the key in the hash table, a pointer to the
// stored value is stored where the key should be. IsMatch then
@@ -16302,46 +16263,21 @@ class RegExpKey : public HashTableKey {
&& (flags_ == val->get(JSRegExp::kFlagsIndex));
}
- uint32_t Hash() override { return RegExpHash(*string_, flags_); }
-
- Handle<Object> AsHandle(Isolate* isolate) override {
- // Plain hash maps, which is where regexp keys are used, don't
- // use this function.
- UNREACHABLE();
- return MaybeHandle<Object>().ToHandleChecked();
- }
-
- uint32_t HashForObject(Object* obj) override {
- FixedArray* val = FixedArray::cast(obj);
- return RegExpHash(String::cast(val->get(JSRegExp::kSourceIndex)),
- Smi::cast(val->get(JSRegExp::kFlagsIndex)));
- }
-
- static uint32_t RegExpHash(String* string, Smi* flags) {
- return string->Hash() + flags->value();
- }
-
Handle<String> string_;
Smi* flags_;
};
-
-Handle<Object> OneByteStringKey::AsHandle(Isolate* isolate) {
- if (hash_field_ == 0) Hash();
- return isolate->factory()->NewOneByteInternalizedString(string_, hash_field_);
+Handle<String> OneByteStringKey::AsHandle(Isolate* isolate) {
+ return isolate->factory()->NewOneByteInternalizedString(string_, HashField());
}
-
-Handle<Object> TwoByteStringKey::AsHandle(Isolate* isolate) {
- if (hash_field_ == 0) Hash();
- return isolate->factory()->NewTwoByteInternalizedString(string_, hash_field_);
+Handle<String> TwoByteStringKey::AsHandle(Isolate* isolate) {
+ return isolate->factory()->NewTwoByteInternalizedString(string_, HashField());
}
-
-Handle<Object> SeqOneByteSubStringKey::AsHandle(Isolate* isolate) {
- if (hash_field_ == 0) Hash();
+Handle<String> SeqOneByteSubStringKey::AsHandle(Isolate* isolate) {
return isolate->factory()->NewOneByteInternalizedSubString(
- string_, from_, length_, hash_field_);
+ string_, from_, length_, HashField());
}
@@ -16352,22 +16288,22 @@ bool SeqOneByteSubStringKey::IsMatch(Object* string) {
// InternalizedStringKey carries a string/internalized-string object as key.
-class InternalizedStringKey : public HashTableKey {
+class InternalizedStringKey : public StringTableKey {
public:
explicit InternalizedStringKey(Handle<String> string)
- : string_(String::Flatten(string)) {}
-
- bool IsMatch(Object* string) override {
- return String::cast(string)->Equals(*string_);
+ : StringTableKey(0), string_(string) {
+ DCHECK(!string->IsInternalizedString());
+ DCHECK(string->IsFlat());
+ // Make sure hash_field is computed.
+ string->Hash();
+ set_hash_field(string->hash_field());
}
- uint32_t Hash() override { return string_->Hash(); }
-
- uint32_t HashForObject(Object* other) override {
- return String::cast(other)->Hash();
+ bool IsMatch(Object* string) override {
+ return string_->SlowEquals(String::cast(string));
}
- Handle<Object> AsHandle(Isolate* isolate) override {
+ Handle<String> AsHandle(Isolate* isolate) override {
// Internalize the string if possible.
MaybeHandle<Map> maybe_map =
isolate->factory()->InternalizedStringMapForString(string_);
@@ -16393,37 +16329,28 @@ class InternalizedStringKey : public HashTableKey {
string_, string_->length(), string_->hash_field());
}
- static uint32_t StringHash(Object* obj) {
- return String::cast(obj)->Hash();
- }
-
private:
Handle<String> string_;
};
-
-template<typename Derived, typename Shape, typename Key>
-void HashTable<Derived, Shape, Key>::IteratePrefix(ObjectVisitor* v) {
+template <typename Derived, typename Shape>
+void HashTable<Derived, Shape>::IteratePrefix(ObjectVisitor* v) {
BodyDescriptorBase::IteratePointers(this, 0, kElementsStartOffset, v);
}
-
-template<typename Derived, typename Shape, typename Key>
-void HashTable<Derived, Shape, Key>::IterateElements(ObjectVisitor* v) {
+template <typename Derived, typename Shape>
+void HashTable<Derived, Shape>::IterateElements(ObjectVisitor* v) {
BodyDescriptorBase::IteratePointers(this, kElementsStartOffset,
kHeaderSize + length() * kPointerSize, v);
}
-
-template<typename Derived, typename Shape, typename Key>
-Handle<Derived> HashTable<Derived, Shape, Key>::New(
- Isolate* isolate,
- int at_least_space_for,
- MinimumCapacity capacity_option,
- PretenureFlag pretenure) {
+template <typename Derived, typename Shape>
+Handle<Derived> HashTable<Derived, Shape>::New(
+ Isolate* isolate, int at_least_space_for, PretenureFlag pretenure,
+ MinimumCapacity capacity_option) {
DCHECK(0 <= at_least_space_for);
DCHECK_IMPLIES(capacity_option == USE_CUSTOM_MINIMUM_CAPACITY,
- base::bits::IsPowerOfTwo32(at_least_space_for));
+ base::bits::IsPowerOfTwo(at_least_space_for));
int capacity = (capacity_option == USE_CUSTOM_MINIMUM_CAPACITY)
? at_least_space_for
@@ -16431,13 +16358,12 @@ Handle<Derived> HashTable<Derived, Shape, Key>::New(
if (capacity > HashTable::kMaxCapacity) {
v8::internal::Heap::FatalProcessOutOfMemory("invalid table size", true);
}
- return New(isolate, capacity, pretenure);
+ return NewInternal(isolate, capacity, pretenure);
}
-template <typename Derived, typename Shape, typename Key>
-Handle<Derived> HashTable<Derived, Shape, Key>::New(Isolate* isolate,
- int capacity,
- PretenureFlag pretenure) {
+template <typename Derived, typename Shape>
+Handle<Derived> HashTable<Derived, Shape>::NewInternal(
+ Isolate* isolate, int capacity, PretenureFlag pretenure) {
Factory* factory = isolate->factory();
int length = EntryToIndex(capacity);
Handle<FixedArray> array = factory->NewFixedArray(length, pretenure);
@@ -16450,83 +16376,40 @@ Handle<Derived> HashTable<Derived, Shape, Key>::New(Isolate* isolate,
return table;
}
-// Find entry for key otherwise return kNotFound.
template <typename Derived, typename Shape>
-int NameDictionaryBase<Derived, Shape>::FindEntry(Handle<Name> key) {
- if (!key->IsUniqueName()) {
- return DerivedDictionary::FindEntry(key);
- }
-
- // Optimized for unique names. Knowledge of the key type allows:
- // 1. Move the check if the key is unique out of the loop.
- // 2. Avoid comparing hash codes in unique-to-unique comparison.
- // 3. Detect a case when a dictionary key is not unique but the key is.
- // In case of positive result the dictionary key may be replaced by the
- // internalized string with minimal performance penalty. It gives a chance
- // to perform further lookups in code stubs (and significant performance
- // boost a certain style of code).
-
- // EnsureCapacity will guarantee the hash table is never full.
- uint32_t capacity = this->Capacity();
- uint32_t entry = Derived::FirstProbe(key->Hash(), capacity);
- uint32_t count = 1;
- Isolate* isolate = this->GetIsolate();
- while (true) {
- Object* element = this->KeyAt(entry);
- if (element->IsUndefined(isolate)) break; // Empty entry.
- if (*key == element) return entry;
- DCHECK(element->IsTheHole(isolate) || element->IsUniqueName());
- entry = Derived::NextProbe(entry, count++, capacity);
- }
- return Derived::kNotFound;
-}
-
-
-template<typename Derived, typename Shape, typename Key>
-void HashTable<Derived, Shape, Key>::Rehash(
- Handle<Derived> new_table,
- Key key) {
- DCHECK(NumberOfElements() < new_table->Capacity());
-
+void HashTable<Derived, Shape>::Rehash(Derived* new_table) {
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = new_table->GetWriteBarrierMode(no_gc);
+ DCHECK_LT(NumberOfElements(), new_table->Capacity());
+
// Copy prefix to new array.
- for (int i = kPrefixStartIndex;
- i < kPrefixStartIndex + Shape::kPrefixSize;
- i++) {
+ for (int i = kPrefixStartIndex; i < kElementsStartIndex; i++) {
new_table->set(i, get(i), mode);
}
// Rehash the elements.
int capacity = this->Capacity();
- Heap* heap = new_table->GetHeap();
- Object* the_hole = heap->the_hole_value();
- Object* undefined = heap->undefined_value();
+ Isolate* isolate = new_table->GetIsolate();
for (int i = 0; i < capacity; i++) {
uint32_t from_index = EntryToIndex(i);
Object* k = this->get(from_index);
- if (k != the_hole && k != undefined) {
- uint32_t hash = this->HashForObject(key, k);
- uint32_t insertion_index =
- EntryToIndex(new_table->FindInsertionEntry(hash));
- for (int j = 0; j < Shape::kEntrySize; j++) {
- new_table->set(insertion_index + j, get(from_index + j), mode);
- }
+ if (!Shape::IsLive(isolate, k)) continue;
+ uint32_t hash = Shape::HashForObject(isolate, k);
+ uint32_t insertion_index =
+ EntryToIndex(new_table->FindInsertionEntry(hash));
+ for (int j = 0; j < Shape::kEntrySize; j++) {
+ new_table->set(insertion_index + j, get(from_index + j), mode);
}
}
new_table->SetNumberOfElements(NumberOfElements());
new_table->SetNumberOfDeletedElements(0);
}
-
-template<typename Derived, typename Shape, typename Key>
-uint32_t HashTable<Derived, Shape, Key>::EntryForProbe(
- Key key,
- Object* k,
- int probe,
- uint32_t expected) {
- uint32_t hash = this->HashForObject(key, k);
+template <typename Derived, typename Shape>
+uint32_t HashTable<Derived, Shape>::EntryForProbe(Object* k, int probe,
+ uint32_t expected) {
+ uint32_t hash = Shape::HashForObject(GetIsolate(), k);
uint32_t capacity = this->Capacity();
uint32_t entry = FirstProbe(hash, capacity);
for (int i = 1; i < probe; i++) {
@@ -16536,11 +16419,9 @@ uint32_t HashTable<Derived, Shape, Key>::EntryForProbe(
return entry;
}
-
-template<typename Derived, typename Shape, typename Key>
-void HashTable<Derived, Shape, Key>::Swap(uint32_t entry1,
- uint32_t entry2,
- WriteBarrierMode mode) {
+template <typename Derived, typename Shape>
+void HashTable<Derived, Shape>::Swap(uint32_t entry1, uint32_t entry2,
+ WriteBarrierMode mode) {
int index1 = EntryToIndex(entry1);
int index2 = EntryToIndex(entry2);
Object* temp[Shape::kEntrySize];
@@ -16555,9 +16436,8 @@ void HashTable<Derived, Shape, Key>::Swap(uint32_t entry1,
}
}
-
-template<typename Derived, typename Shape, typename Key>
-void HashTable<Derived, Shape, Key>::Rehash(Key key) {
+template <typename Derived, typename Shape>
+void HashTable<Derived, Shape>::Rehash() {
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = GetWriteBarrierMode(no_gc);
Isolate* isolate = GetIsolate();
@@ -16569,21 +16449,20 @@ void HashTable<Derived, Shape, Key>::Rehash(Key key) {
done = true;
for (uint32_t current = 0; current < capacity; current++) {
Object* current_key = KeyAt(current);
- if (IsKey(isolate, current_key)) {
- uint32_t target = EntryForProbe(key, current_key, probe, current);
- if (current == target) continue;
- Object* target_key = KeyAt(target);
- if (!IsKey(isolate, target_key) ||
- EntryForProbe(key, target_key, probe, target) != target) {
- // Put the current element into the correct position.
- Swap(current, target, mode);
- // The other element will be processed on the next iteration.
- current--;
- } else {
- // The place for the current element is occupied. Leave the element
- // for the next probe.
- done = false;
- }
+ if (!Shape::IsLive(isolate, current_key)) continue;
+ uint32_t target = EntryForProbe(current_key, probe, current);
+ if (current == target) continue;
+ Object* target_key = KeyAt(target);
+ if (!Shape::IsLive(isolate, target_key) ||
+ EntryForProbe(target_key, probe, target) != target) {
+ // Put the current element into the correct position.
+ Swap(current, target, mode);
+ // The other element will be processed on the next iteration.
+ current--;
+ } else {
+ // The place for the current element is occupied. Leave the element
+ // for the next probe.
+ done = false;
}
}
}
@@ -16592,19 +16471,15 @@ void HashTable<Derived, Shape, Key>::Rehash(Key key) {
Object* undefined = isolate->heap()->undefined_value();
for (uint32_t current = 0; current < capacity; current++) {
if (KeyAt(current) == the_hole) {
- set(EntryToIndex(current) + Derived::kEntryKeyIndex, undefined);
+ set(EntryToIndex(current) + kEntryKeyIndex, undefined);
}
}
SetNumberOfDeletedElements(0);
}
-
-template<typename Derived, typename Shape, typename Key>
-Handle<Derived> HashTable<Derived, Shape, Key>::EnsureCapacity(
- Handle<Derived> table,
- int n,
- Key key,
- PretenureFlag pretenure) {
+template <typename Derived, typename Shape>
+Handle<Derived> HashTable<Derived, Shape>::EnsureCapacity(
+ Handle<Derived> table, int n, PretenureFlag pretenure) {
if (table->HasSufficientCapacityToAdd(n)) return table;
Isolate* isolate = table->GetIsolate();
@@ -16615,16 +16490,18 @@ Handle<Derived> HashTable<Derived, Shape, Key>::EnsureCapacity(
bool should_pretenure = pretenure == TENURED ||
((capacity > kMinCapacityForPretenure) &&
!isolate->heap()->InNewSpace(*table));
- Handle<Derived> new_table =
- HashTable::New(isolate, new_nof, USE_DEFAULT_MINIMUM_CAPACITY,
- should_pretenure ? TENURED : NOT_TENURED);
+ Handle<Derived> new_table = HashTable::New(
+ isolate, new_nof, should_pretenure ? TENURED : NOT_TENURED);
- table->Rehash(new_table, key);
+ table->Rehash(*new_table);
return new_table;
}
-template <typename Derived, typename Shape, typename Key>
-bool HashTable<Derived, Shape, Key>::HasSufficientCapacityToAdd(
+template bool
+HashTable<NameDictionary, NameDictionaryShape>::HasSufficientCapacityToAdd(int);
+
+template <typename Derived, typename Shape>
+bool HashTable<Derived, Shape>::HasSufficientCapacityToAdd(
int number_of_additional_elements) {
int capacity = Capacity();
int nof = NumberOfElements() + number_of_additional_elements;
@@ -16639,10 +16516,8 @@ bool HashTable<Derived, Shape, Key>::HasSufficientCapacityToAdd(
return false;
}
-
-template<typename Derived, typename Shape, typename Key>
-Handle<Derived> HashTable<Derived, Shape, Key>::Shrink(Handle<Derived> table,
- Key key) {
+template <typename Derived, typename Shape>
+Handle<Derived> HashTable<Derived, Shape>::Shrink(Handle<Derived> table) {
int capacity = table->Capacity();
int nof = table->NumberOfElements();
@@ -16664,24 +16539,21 @@ Handle<Derived> HashTable<Derived, Shape, Key>::Shrink(Handle<Derived> table,
Handle<Derived> new_table = HashTable::New(
isolate,
at_least_room_for,
- USE_DEFAULT_MINIMUM_CAPACITY,
pretenure ? TENURED : NOT_TENURED);
- table->Rehash(new_table, key);
+ table->Rehash(*new_table);
return new_table;
}
-
-template<typename Derived, typename Shape, typename Key>
-uint32_t HashTable<Derived, Shape, Key>::FindInsertionEntry(uint32_t hash) {
+template <typename Derived, typename Shape>
+uint32_t HashTable<Derived, Shape>::FindInsertionEntry(uint32_t hash) {
uint32_t capacity = Capacity();
uint32_t entry = FirstProbe(hash, capacity);
uint32_t count = 1;
// EnsureCapacity will guarantee the hash table is never full.
Isolate* isolate = GetIsolate();
while (true) {
- Object* element = KeyAt(entry);
- if (!IsKey(isolate, element)) break;
+ if (!Shape::IsLive(isolate, KeyAt(entry))) break;
entry = NextProbe(entry, count++, capacity);
}
return entry;
@@ -16691,427 +16563,144 @@ uint32_t HashTable<Derived, Shape, Key>::FindInsertionEntry(uint32_t hash) {
// Force instantiation of template instances class.
// Please note this list is compiler dependent.
-template class HashTable<StringTable, StringTableShape, HashTableKey*>;
+template class HashTable<StringTable, StringTableShape>;
-template class HashTable<CompilationCacheTable,
- CompilationCacheShape,
- HashTableKey*>;
+template class HashTable<CompilationCacheTable, CompilationCacheShape>;
-template class HashTable<ObjectHashTable,
- ObjectHashTableShape,
- Handle<Object> >;
+template class HashTable<ObjectHashTable, ObjectHashTableShape>;
-template class HashTable<WeakHashTable, WeakHashTableShape<2>, Handle<Object> >;
+template class HashTable<WeakHashTable, WeakHashTableShape<2>>;
-template class Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >;
+template class Dictionary<NameDictionary, NameDictionaryShape>;
-template class Dictionary<GlobalDictionary, GlobalDictionaryShape,
- Handle<Name> >;
+template class Dictionary<GlobalDictionary, GlobalDictionaryShape>;
template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- HashTable<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>;
+ HashTable<SeededNumberDictionary, SeededNumberDictionaryShape>;
template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>;
+ Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape>;
template class Dictionary<UnseededNumberDictionary,
- UnseededNumberDictionaryShape,
- uint32_t>;
-
-template void
-HashTable<GlobalDictionary, GlobalDictionaryShape, Handle<Name> >::Rehash(Handle<Name> key);
-
-template Handle<SeededNumberDictionary>
-Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>::New(
- Isolate*, int at_least_space_for, PretenureFlag pretenure,
- MinimumCapacity capacity_option);
-
-template Handle<SeededNumberDictionary>
-Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape,
- uint32_t>::NewEmpty(Isolate*, PretenureFlag pretenure);
-
-template Handle<UnseededNumberDictionary>
-Dictionary<UnseededNumberDictionary, UnseededNumberDictionaryShape,
- uint32_t>::NewEmpty(Isolate*, PretenureFlag pretenure);
-
-template Handle<UnseededNumberDictionary>
-Dictionary<UnseededNumberDictionary, UnseededNumberDictionaryShape,
- uint32_t>::New(Isolate*, int at_least_space_for,
- PretenureFlag pretenure,
- MinimumCapacity capacity_option);
+ UnseededNumberDictionaryShape>;
template Handle<NameDictionary>
-Dictionary<NameDictionary, NameDictionaryShape, Handle<Name>>::New(
+BaseNameDictionary<NameDictionary, NameDictionaryShape>::New(
Isolate*, int n, PretenureFlag pretenure, MinimumCapacity capacity_option);
-template Handle<NameDictionary>
-Dictionary<NameDictionary, NameDictionaryShape, Handle<Name>>::NewEmpty(
- Isolate*, PretenureFlag pretenure);
-
template Handle<GlobalDictionary>
-Dictionary<GlobalDictionary, GlobalDictionaryShape, Handle<Name>>::New(
+BaseNameDictionary<GlobalDictionary, GlobalDictionaryShape>::New(
Isolate*, int n, PretenureFlag pretenure, MinimumCapacity capacity_option);
template Handle<SeededNumberDictionary>
-Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>::
- AtPut(Handle<SeededNumberDictionary>, uint32_t, Handle<Object>);
+ Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape>::AtPut(
+ Handle<SeededNumberDictionary>, uint32_t, Handle<Object>,
+ PropertyDetails);
template Handle<UnseededNumberDictionary>
-Dictionary<UnseededNumberDictionary, UnseededNumberDictionaryShape, uint32_t>::
- AtPut(Handle<UnseededNumberDictionary>, uint32_t, Handle<Object>);
+ Dictionary<UnseededNumberDictionary, UnseededNumberDictionaryShape>::AtPut(
+ Handle<UnseededNumberDictionary>, uint32_t, Handle<Object>,
+ PropertyDetails);
template Object*
-Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>::
- SlowReverseLookup(Object* value);
+Dictionary<SeededNumberDictionary,
+ SeededNumberDictionaryShape>::SlowReverseLookup(Object* value);
-template Object*
-Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >::
- SlowReverseLookup(Object* value);
+template Object* Dictionary<
+ NameDictionary, NameDictionaryShape>::SlowReverseLookup(Object* value);
-template Handle<Object>
-Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >::DeleteProperty(
+template Handle<NameDictionary>
+Dictionary<NameDictionary, NameDictionaryShape>::DeleteEntry(
Handle<NameDictionary>, int);
-template Handle<Object>
-Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape,
- uint32_t>::DeleteProperty(Handle<SeededNumberDictionary>, int);
+template Handle<SeededNumberDictionary>
+Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape>::DeleteEntry(
+ Handle<SeededNumberDictionary>, int);
+
+template Handle<UnseededNumberDictionary>
+Dictionary<UnseededNumberDictionary, UnseededNumberDictionaryShape>::
+ DeleteEntry(Handle<UnseededNumberDictionary>, int);
-template Handle<Object>
-Dictionary<UnseededNumberDictionary, UnseededNumberDictionaryShape,
- uint32_t>::DeleteProperty(Handle<UnseededNumberDictionary>, int);
+template Handle<UnseededNumberDictionary>
+HashTable<UnseededNumberDictionary, UnseededNumberDictionaryShape>::New(
+ Isolate*, int, PretenureFlag, MinimumCapacity);
template Handle<NameDictionary>
-HashTable<NameDictionary, NameDictionaryShape, Handle<Name> >::
- New(Isolate*, int, MinimumCapacity, PretenureFlag);
+HashTable<NameDictionary, NameDictionaryShape>::New(Isolate*, int,
+ PretenureFlag,
+ MinimumCapacity);
-template Handle<ObjectHashSet> HashTable<ObjectHashSet, ObjectHashSetShape,
- Handle<Object>>::New(Isolate*, int n,
- MinimumCapacity,
- PretenureFlag);
+template Handle<ObjectHashSet>
+HashTable<ObjectHashSet, ObjectHashSetShape>::New(Isolate*, int n,
+ PretenureFlag,
+ MinimumCapacity);
-template Handle<NameDictionary>
-HashTable<NameDictionary, NameDictionaryShape, Handle<Name> >::
- Shrink(Handle<NameDictionary>, Handle<Name>);
+template Handle<NameDictionary> HashTable<
+ NameDictionary, NameDictionaryShape>::Shrink(Handle<NameDictionary>);
template Handle<UnseededNumberDictionary>
- HashTable<UnseededNumberDictionary, UnseededNumberDictionaryShape,
- uint32_t>::Shrink(Handle<UnseededNumberDictionary>, uint32_t);
+ HashTable<UnseededNumberDictionary, UnseededNumberDictionaryShape>::Shrink(
+ Handle<UnseededNumberDictionary>);
template Handle<NameDictionary>
-Dictionary<NameDictionary, NameDictionaryShape, Handle<Name>>::Add(
+BaseNameDictionary<NameDictionary, NameDictionaryShape>::Add(
Handle<NameDictionary>, Handle<Name>, Handle<Object>, PropertyDetails,
int*);
template Handle<GlobalDictionary>
-Dictionary<GlobalDictionary, GlobalDictionaryShape, Handle<Name>>::Add(
+BaseNameDictionary<GlobalDictionary, GlobalDictionaryShape>::Add(
Handle<GlobalDictionary>, Handle<Name>, Handle<Object>, PropertyDetails,
int*);
+template void HashTable<GlobalDictionary, GlobalDictionaryShape>::Rehash();
+
template Handle<SeededNumberDictionary>
-Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>::Add(
+Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape>::Add(
Handle<SeededNumberDictionary>, uint32_t, Handle<Object>, PropertyDetails,
int*);
template Handle<UnseededNumberDictionary>
-Dictionary<UnseededNumberDictionary, UnseededNumberDictionaryShape,
- uint32_t>::Add(Handle<UnseededNumberDictionary>, uint32_t,
- Handle<Object>, PropertyDetails, int*);
-
-template Handle<SeededNumberDictionary>
-Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>::
- EnsureCapacity(Handle<SeededNumberDictionary>, int, uint32_t);
-
-template Handle<UnseededNumberDictionary>
-Dictionary<UnseededNumberDictionary, UnseededNumberDictionaryShape, uint32_t>::
- EnsureCapacity(Handle<UnseededNumberDictionary>, int, uint32_t);
-
-template void Dictionary<NameDictionary, NameDictionaryShape,
- Handle<Name> >::SetRequiresCopyOnCapacityChange();
+Dictionary<UnseededNumberDictionary, UnseededNumberDictionaryShape>::Add(
+ Handle<UnseededNumberDictionary>, uint32_t, Handle<Object>, PropertyDetails,
+ int*);
template Handle<NameDictionary>
-Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >::
- EnsureCapacity(Handle<NameDictionary>, int, Handle<Name>);
-
-template int NameDictionaryBase<NameDictionary, NameDictionaryShape>::FindEntry(
- Handle<Name>);
+BaseNameDictionary<NameDictionary, NameDictionaryShape>::EnsureCapacity(
+ Handle<NameDictionary>, int);
-template int Dictionary<GlobalDictionary, GlobalDictionaryShape, Handle<Name>>::
- NumberOfElementsFilterAttributes(PropertyFilter filter);
+template int Dictionary<GlobalDictionary,
+ GlobalDictionaryShape>::NumberOfEnumerableProperties();
-template int Dictionary<NameDictionary, NameDictionaryShape, Handle<Name>>::
- NumberOfElementsFilterAttributes(PropertyFilter filter);
+template int
+Dictionary<NameDictionary, NameDictionaryShape>::NumberOfEnumerableProperties();
template void
-Dictionary<GlobalDictionary, GlobalDictionaryShape, Handle<Name>>::
- CopyEnumKeysTo(Handle<Dictionary<GlobalDictionary, GlobalDictionaryShape,
- Handle<Name>>>
- dictionary,
- Handle<FixedArray> storage, KeyCollectionMode mode,
- KeyAccumulator* accumulator);
+BaseNameDictionary<GlobalDictionary, GlobalDictionaryShape>::CopyEnumKeysTo(
+ Handle<GlobalDictionary> dictionary, Handle<FixedArray> storage,
+ KeyCollectionMode mode, KeyAccumulator* accumulator);
template void
-Dictionary<NameDictionary, NameDictionaryShape, Handle<Name>>::CopyEnumKeysTo(
- Handle<Dictionary<NameDictionary, NameDictionaryShape, Handle<Name>>>
- dictionary,
- Handle<FixedArray> storage, KeyCollectionMode mode,
- KeyAccumulator* accumulator);
+BaseNameDictionary<NameDictionary, NameDictionaryShape>::CopyEnumKeysTo(
+ Handle<NameDictionary> dictionary, Handle<FixedArray> storage,
+ KeyCollectionMode mode, KeyAccumulator* accumulator);
template Handle<FixedArray>
-Dictionary<GlobalDictionary, GlobalDictionaryShape, Handle<Name>>::
- IterationIndices(
- Handle<
- Dictionary<GlobalDictionary, GlobalDictionaryShape, Handle<Name>>>
- dictionary);
+BaseNameDictionary<GlobalDictionary, GlobalDictionaryShape>::IterationIndices(
+ Handle<GlobalDictionary> dictionary);
template void
-Dictionary<GlobalDictionary, GlobalDictionaryShape, Handle<Name>>::
- CollectKeysTo(Handle<Dictionary<GlobalDictionary, GlobalDictionaryShape,
- Handle<Name>>>
- dictionary,
- KeyAccumulator* keys);
+BaseNameDictionary<GlobalDictionary, GlobalDictionaryShape>::CollectKeysTo(
+ Handle<GlobalDictionary> dictionary, KeyAccumulator* keys);
template Handle<FixedArray>
-Dictionary<NameDictionary, NameDictionaryShape, Handle<Name>>::IterationIndices(
- Handle<Dictionary<NameDictionary, NameDictionaryShape, Handle<Name>>>
- dictionary);
+BaseNameDictionary<NameDictionary, NameDictionaryShape>::IterationIndices(
+ Handle<NameDictionary> dictionary);
template void
-Dictionary<NameDictionary, NameDictionaryShape, Handle<Name>>::CollectKeysTo(
- Handle<Dictionary<NameDictionary, NameDictionaryShape, Handle<Name>>>
- dictionary,
- KeyAccumulator* keys);
-
-template int
-Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape,
- uint32_t>::AddEntry(Handle<SeededNumberDictionary> dictionary,
- uint32_t key, Handle<Object> value,
- PropertyDetails details, uint32_t hash);
+BaseNameDictionary<NameDictionary, NameDictionaryShape>::CollectKeysTo(
+ Handle<NameDictionary> dictionary, KeyAccumulator* keys);
template int
-Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape,
- uint32_t>::NumberOfElementsFilterAttributes(PropertyFilter filter);
-
-Handle<Object> JSObject::PrepareSlowElementsForSort(
- Handle<JSObject> object, uint32_t limit) {
- DCHECK(object->HasDictionaryElements());
- Isolate* isolate = object->GetIsolate();
- // Must stay in dictionary mode, either because of requires_slow_elements,
- // or because we are not going to sort (and therefore compact) all of the
- // elements.
- Handle<SeededNumberDictionary> dict(object->element_dictionary(), isolate);
- Handle<SeededNumberDictionary> new_dict =
- SeededNumberDictionary::New(isolate, dict->NumberOfElements());
-
- uint32_t pos = 0;
- uint32_t undefs = 0;
- int capacity = dict->Capacity();
- Handle<Smi> bailout(Smi::FromInt(-1), isolate);
- // Entry to the new dictionary does not cause it to grow, as we have
- // allocated one that is large enough for all entries.
- DisallowHeapAllocation no_gc;
- for (int i = 0; i < capacity; i++) {
- Object* k = dict->KeyAt(i);
- if (!dict->IsKey(isolate, k)) continue;
-
- DCHECK(k->IsNumber());
- DCHECK(!k->IsSmi() || Smi::cast(k)->value() >= 0);
- DCHECK(!k->IsHeapNumber() || HeapNumber::cast(k)->value() >= 0);
- DCHECK(!k->IsHeapNumber() || HeapNumber::cast(k)->value() <= kMaxUInt32);
-
- HandleScope scope(isolate);
- Handle<Object> value(dict->ValueAt(i), isolate);
- PropertyDetails details = dict->DetailsAt(i);
- if (details.kind() == kAccessor || details.IsReadOnly()) {
- // Bail out and do the sorting of undefineds and array holes in JS.
- // Also bail out if the element is not supposed to be moved.
- return bailout;
- }
-
- uint32_t key = NumberToUint32(k);
- if (key < limit) {
- if (value->IsUndefined(isolate)) {
- undefs++;
- } else if (pos > static_cast<uint32_t>(Smi::kMaxValue)) {
- // Adding an entry with the key beyond smi-range requires
- // allocation. Bailout.
- return bailout;
- } else {
- Handle<Object> result = SeededNumberDictionary::AddNumberEntry(
- new_dict, pos, value, details, object);
- DCHECK(result.is_identical_to(new_dict));
- USE(result);
- pos++;
- }
- } else if (key > static_cast<uint32_t>(Smi::kMaxValue)) {
- // Adding an entry with the key beyond smi-range requires
- // allocation. Bailout.
- return bailout;
- } else {
- Handle<Object> result = SeededNumberDictionary::AddNumberEntry(
- new_dict, key, value, details, object);
- DCHECK(result.is_identical_to(new_dict));
- USE(result);
- }
- }
-
- uint32_t result = pos;
- PropertyDetails no_details = PropertyDetails::Empty();
- while (undefs > 0) {
- if (pos > static_cast<uint32_t>(Smi::kMaxValue)) {
- // Adding an entry with the key beyond smi-range requires
- // allocation. Bailout.
- return bailout;
- }
- HandleScope scope(isolate);
- Handle<Object> result = SeededNumberDictionary::AddNumberEntry(
- new_dict, pos, isolate->factory()->undefined_value(), no_details,
- object);
- DCHECK(result.is_identical_to(new_dict));
- USE(result);
- pos++;
- undefs--;
- }
-
- object->set_elements(*new_dict);
-
- AllowHeapAllocation allocate_return_value;
- return isolate->factory()->NewNumberFromUint(result);
-}
-
-
-// Collects all defined (non-hole) and non-undefined (array) elements at
-// the start of the elements array.
-// If the object is in dictionary mode, it is converted to fast elements
-// mode.
-Handle<Object> JSObject::PrepareElementsForSort(Handle<JSObject> object,
- uint32_t limit) {
- Isolate* isolate = object->GetIsolate();
- if (object->HasSloppyArgumentsElements() || !object->map()->is_extensible()) {
- return handle(Smi::FromInt(-1), isolate);
- }
-
- if (object->HasStringWrapperElements()) {
- int len = String::cast(Handle<JSValue>::cast(object)->value())->length();
- return handle(Smi::FromInt(len), isolate);
- }
-
- if (object->HasDictionaryElements()) {
- // Convert to fast elements containing only the existing properties.
- // Ordering is irrelevant, since we are going to sort anyway.
- Handle<SeededNumberDictionary> dict(object->element_dictionary());
- if (object->IsJSArray() || dict->requires_slow_elements() ||
- dict->max_number_key() >= limit) {
- return JSObject::PrepareSlowElementsForSort(object, limit);
- }
- // Convert to fast elements.
-
- Handle<Map> new_map =
- JSObject::GetElementsTransitionMap(object, FAST_HOLEY_ELEMENTS);
-
- PretenureFlag tenure = isolate->heap()->InNewSpace(*object) ?
- NOT_TENURED: TENURED;
- Handle<FixedArray> fast_elements =
- isolate->factory()->NewFixedArray(dict->NumberOfElements(), tenure);
- dict->CopyValuesTo(*fast_elements);
- JSObject::ValidateElements(object);
-
- JSObject::SetMapAndElements(object, new_map, fast_elements);
- } else if (object->HasFixedTypedArrayElements()) {
- // Typed arrays cannot have holes or undefined elements.
- return handle(Smi::FromInt(
- FixedArrayBase::cast(object->elements())->length()), isolate);
- } else if (!object->HasFastDoubleElements()) {
- EnsureWritableFastElements(object);
- }
- DCHECK(object->HasFastSmiOrObjectElements() ||
- object->HasFastDoubleElements());
-
- // Collect holes at the end, undefined before that and the rest at the
- // start, and return the number of non-hole, non-undefined values.
-
- Handle<FixedArrayBase> elements_base(object->elements());
- uint32_t elements_length = static_cast<uint32_t>(elements_base->length());
- if (limit > elements_length) {
- limit = elements_length;
- }
- if (limit == 0) {
- return handle(Smi::kZero, isolate);
- }
-
- uint32_t result = 0;
- if (elements_base->map() == isolate->heap()->fixed_double_array_map()) {
- FixedDoubleArray* elements = FixedDoubleArray::cast(*elements_base);
- // Split elements into defined and the_hole, in that order.
- unsigned int holes = limit;
- // Assume most arrays contain no holes and undefined values, so minimize the
- // number of stores of non-undefined, non-the-hole values.
- for (unsigned int i = 0; i < holes; i++) {
- if (elements->is_the_hole(i)) {
- holes--;
- } else {
- continue;
- }
- // Position i needs to be filled.
- while (holes > i) {
- if (elements->is_the_hole(holes)) {
- holes--;
- } else {
- elements->set(i, elements->get_scalar(holes));
- break;
- }
- }
- }
- result = holes;
- while (holes < limit) {
- elements->set_the_hole(holes);
- holes++;
- }
- } else {
- FixedArray* elements = FixedArray::cast(*elements_base);
- DisallowHeapAllocation no_gc;
-
- // Split elements into defined, undefined and the_hole, in that order. Only
- // count locations for undefined and the hole, and fill them afterwards.
- WriteBarrierMode write_barrier = elements->GetWriteBarrierMode(no_gc);
- unsigned int undefs = limit;
- unsigned int holes = limit;
- // Assume most arrays contain no holes and undefined values, so minimize the
- // number of stores of non-undefined, non-the-hole values.
- for (unsigned int i = 0; i < undefs; i++) {
- Object* current = elements->get(i);
- if (current->IsTheHole(isolate)) {
- holes--;
- undefs--;
- } else if (current->IsUndefined(isolate)) {
- undefs--;
- } else {
- continue;
- }
- // Position i needs to be filled.
- while (undefs > i) {
- current = elements->get(undefs);
- if (current->IsTheHole(isolate)) {
- holes--;
- undefs--;
- } else if (current->IsUndefined(isolate)) {
- undefs--;
- } else {
- elements->set(i, current, write_barrier);
- break;
- }
- }
- }
- result = undefs;
- while (undefs < holes) {
- elements->set_undefined(isolate, undefs);
- undefs++;
- }
- while (holes < limit) {
- elements->set_the_hole(isolate, holes);
- holes++;
- }
- }
-
- return isolate->factory()->NewNumberFromUint(result);
-}
+Dictionary<SeededNumberDictionary,
+ SeededNumberDictionaryShape>::NumberOfEnumerableProperties();
namespace {
@@ -17217,7 +16806,6 @@ ExternalArrayType JSTypedArray::type() {
default:
UNREACHABLE();
- return static_cast<ExternalArrayType>(-1);
}
}
@@ -17233,7 +16821,6 @@ size_t JSTypedArray::element_size() {
default:
UNREACHABLE();
- return 0;
}
}
@@ -17321,9 +16908,7 @@ Handle<PropertyCell> JSGlobalObject::EnsureEmptyPropertyCell(
Handle<PropertyCell> cell;
if (entry != GlobalDictionary::kNotFound) {
if (entry_out) *entry_out = entry;
- // This call should be idempotent.
- DCHECK(dictionary->ValueAt(entry)->IsPropertyCell());
- cell = handle(PropertyCell::cast(dictionary->ValueAt(entry)));
+ cell = handle(dictionary->CellAt(entry));
PropertyCellType original_cell_type = cell->property_details().cell_type();
DCHECK(original_cell_type == PropertyCellType::kInvalidated ||
original_cell_type == PropertyCellType::kUninitialized);
@@ -17331,16 +16916,16 @@ Handle<PropertyCell> JSGlobalObject::EnsureEmptyPropertyCell(
if (original_cell_type == PropertyCellType::kInvalidated) {
cell = PropertyCell::InvalidateEntry(dictionary, entry);
}
- PropertyDetails details(kData, NONE, 0, cell_type);
+ PropertyDetails details(kData, NONE, cell_type);
cell->set_property_details(details);
return cell;
}
- cell = isolate->factory()->NewPropertyCell();
- PropertyDetails details(kData, NONE, 0, cell_type);
+ cell = isolate->factory()->NewPropertyCell(name);
+ PropertyDetails details(kData, NONE, cell_type);
dictionary =
GlobalDictionary::Add(dictionary, name, cell, details, entry_out);
// {*entry_out} is initialized inside GlobalDictionary::Add().
- global->set_properties(*dictionary);
+ global->SetProperties(*dictionary);
return cell;
}
@@ -17350,10 +16935,26 @@ Handle<PropertyCell> JSGlobalObject::EnsureEmptyPropertyCell(
// string hash calculation loop here for speed. Doesn't work if the two
// characters form a decimal integer, since such strings have a different hash
// algorithm.
-class TwoCharHashTableKey : public HashTableKey {
+class TwoCharHashTableKey : public StringTableKey {
public:
TwoCharHashTableKey(uint16_t c1, uint16_t c2, uint32_t seed)
- : c1_(c1), c2_(c2) {
+ : StringTableKey(ComputeHashField(c1, c2, seed)), c1_(c1), c2_(c2) {}
+
+ bool IsMatch(Object* o) override {
+ String* other = String::cast(o);
+ if (other->length() != 2) return false;
+ if (other->Get(0) != c1_) return false;
+ return other->Get(1) == c2_;
+ }
+
+ Handle<String> AsHandle(Isolate* isolate) override {
+ // The TwoCharHashTableKey is only used for looking in the string
+ // table, not for adding to it.
+ UNREACHABLE();
+ }
+
+ private:
+ uint32_t ComputeHashField(uint16_t c1, uint16_t c2, uint32_t seed) {
// Char 1.
uint32_t hash = seed;
hash += c1;
@@ -17368,7 +16969,7 @@ class TwoCharHashTableKey : public HashTableKey {
hash ^= hash >> 11;
hash += hash << 15;
if ((hash & String::kHashBitMask) == 0) hash = StringHasher::kZeroHash;
- hash_ = hash;
+ hash = (hash << String::kHashShift) | String::kIsNotArrayIndexMask;
#ifdef DEBUG
// If this assert fails then we failed to reproduce the two-character
// version of the string hashing algorithm above. One reason could be
@@ -17376,91 +16977,35 @@ class TwoCharHashTableKey : public HashTableKey {
// algorithm is different in that case.
uint16_t chars[2] = {c1, c2};
uint32_t check_hash = StringHasher::HashSequentialString(chars, 2, seed);
- hash = (hash << String::kHashShift) | String::kIsNotArrayIndexMask;
- DCHECK_EQ(static_cast<int32_t>(hash), static_cast<int32_t>(check_hash));
+ DCHECK_EQ(hash, check_hash);
#endif
+ return hash;
}
- bool IsMatch(Object* o) override {
- if (!o->IsString()) return false;
- String* other = String::cast(o);
- if (other->length() != 2) return false;
- if (other->Get(0) != c1_) return false;
- return other->Get(1) == c2_;
- }
-
- uint32_t Hash() override { return hash_; }
- uint32_t HashForObject(Object* key) override {
- if (!key->IsString()) return 0;
- return String::cast(key)->Hash();
- }
-
- Handle<Object> AsHandle(Isolate* isolate) override {
- // The TwoCharHashTableKey is only used for looking in the string
- // table, not for adding to it.
- UNREACHABLE();
- return MaybeHandle<Object>().ToHandleChecked();
- }
-
- private:
uint16_t c1_;
uint16_t c2_;
- uint32_t hash_;
};
-
-MaybeHandle<String> StringTable::InternalizeStringIfExists(
- Isolate* isolate,
- Handle<String> string) {
- if (string->IsInternalizedString()) {
- return string;
- }
- if (string->IsThinString()) {
- return handle(Handle<ThinString>::cast(string)->actual(), isolate);
- }
- return LookupStringIfExists(isolate, string);
-}
-
-
-MaybeHandle<String> StringTable::LookupStringIfExists(
- Isolate* isolate,
- Handle<String> string) {
- Handle<StringTable> string_table = isolate->factory()->string_table();
- InternalizedStringKey key(string);
- int entry = string_table->FindEntry(&key);
- if (entry == kNotFound) {
- return MaybeHandle<String>();
- } else {
- Handle<String> result(String::cast(string_table->KeyAt(entry)), isolate);
- DCHECK(StringShape(*result).IsInternalized());
- return result;
- }
-}
-
-
MaybeHandle<String> StringTable::LookupTwoCharsStringIfExists(
Isolate* isolate,
uint16_t c1,
uint16_t c2) {
- Handle<StringTable> string_table = isolate->factory()->string_table();
TwoCharHashTableKey key(c1, c2, isolate->heap()->HashSeed());
+ Handle<StringTable> string_table = isolate->factory()->string_table();
int entry = string_table->FindEntry(&key);
- if (entry == kNotFound) {
- return MaybeHandle<String>();
- } else {
- Handle<String> result(String::cast(string_table->KeyAt(entry)), isolate);
- DCHECK(StringShape(*result).IsInternalized());
- return result;
- }
-}
+ if (entry == kNotFound) return MaybeHandle<String>();
+ Handle<String> result(String::cast(string_table->KeyAt(entry)), isolate);
+ DCHECK(StringShape(*result).IsInternalized());
+ DCHECK_EQ(result->Hash(), key.Hash());
+ return result;
+}
void StringTable::EnsureCapacityForDeserialization(Isolate* isolate,
int expected) {
Handle<StringTable> table = isolate->factory()->string_table();
// We need a key instance for the virtual hash function.
- InternalizedStringKey dummy_key(isolate->factory()->empty_string());
- table = StringTable::EnsureCapacity(table, expected, &dummy_key);
+ table = StringTable::EnsureCapacity(table, expected);
isolate->heap()->SetRootStringTable(*table);
}
@@ -17524,14 +17069,8 @@ void MakeStringThin(String* string, String* internalized, Isolate* isolate) {
Handle<String> StringTable::LookupString(Isolate* isolate,
Handle<String> string) {
- if (string->IsThinString()) {
- DCHECK(Handle<ThinString>::cast(string)->actual()->IsInternalizedString());
- return handle(Handle<ThinString>::cast(string)->actual(), isolate);
- }
- if (string->IsConsString() && string->IsFlat()) {
- string = handle(Handle<ConsString>::cast(string)->first(), isolate);
- if (string->IsInternalizedString()) return string;
- }
+ string = String::Flatten(string);
+ if (string->IsInternalizedString()) return string;
InternalizedStringKey key(string);
Handle<String> result = LookupKey(isolate, &key);
@@ -17559,8 +17098,7 @@ Handle<String> StringTable::LookupString(Isolate* isolate,
return result;
}
-
-Handle<String> StringTable::LookupKey(Isolate* isolate, HashTableKey* key) {
+Handle<String> StringTable::LookupKey(Isolate* isolate, StringTableKey* key) {
Handle<StringTable> table = isolate->factory()->string_table();
int entry = table->FindEntry(key);
@@ -17570,13 +17108,14 @@ Handle<String> StringTable::LookupKey(Isolate* isolate, HashTableKey* key) {
}
// Adding new string. Grow table if needed.
- table = StringTable::EnsureCapacity(table, 1, key);
+ table = StringTable::EnsureCapacity(table, 1);
// Create string object.
- Handle<Object> string = key->AsHandle(isolate);
+ Handle<String> string = key->AsHandle(isolate);
// There must be no attempts to internalize strings that could throw
// InvalidStringLength error.
CHECK(!string.is_null());
+ DCHECK(string->HasHashCode());
// Add the new string and return it along with the string table.
entry = table->FindInsertionEntry(key->Hash());
@@ -17589,34 +17128,38 @@ Handle<String> StringTable::LookupKey(Isolate* isolate, HashTableKey* key) {
namespace {
-class StringTableNoAllocateKey : public HashTableKey {
+class StringTableNoAllocateKey : public StringTableKey {
public:
StringTableNoAllocateKey(String* string, uint32_t seed)
- : string_(string), length_(string->length()) {
+ : StringTableKey(0), string_(string) {
StringShape shape(string);
one_byte_ = shape.HasOnlyOneByteChars();
DCHECK(!shape.IsInternalized());
DCHECK(!shape.IsThin());
- if (shape.IsCons() && length_ <= String::kMaxHashCalcLength) {
+ int length = string->length();
+ if (shape.IsCons() && length <= String::kMaxHashCalcLength) {
special_flattening_ = true;
uint32_t hash_field = 0;
if (one_byte_) {
- one_byte_content_ = new uint8_t[length_];
- String::WriteToFlat(string, one_byte_content_, 0, length_);
- hash_field = StringHasher::HashSequentialString(one_byte_content_,
- length_, seed);
+ one_byte_content_ = new uint8_t[length];
+ String::WriteToFlat(string, one_byte_content_, 0, length);
+ hash_field =
+ StringHasher::HashSequentialString(one_byte_content_, length, seed);
} else {
- two_byte_content_ = new uint16_t[length_];
- String::WriteToFlat(string, two_byte_content_, 0, length_);
- hash_field = StringHasher::HashSequentialString(two_byte_content_,
- length_, seed);
+ two_byte_content_ = new uint16_t[length];
+ String::WriteToFlat(string, two_byte_content_, 0, length);
+ hash_field =
+ StringHasher::HashSequentialString(two_byte_content_, length, seed);
}
string->set_hash_field(hash_field);
} else {
special_flattening_ = false;
one_byte_content_ = nullptr;
+ string->Hash();
}
- hash_ = string->Hash();
+
+ DCHECK(string->HasHashCode());
+ set_hash_field(string->hash_field());
}
~StringTableNoAllocateKey() {
@@ -17631,8 +17174,8 @@ class StringTableNoAllocateKey : public HashTableKey {
String* other = String::cast(otherstring);
DCHECK(other->IsInternalizedString());
DCHECK(other->IsFlat());
- if (hash_ != other->Hash()) return false;
- int len = length_;
+ if (Hash() != other->Hash()) return false;
+ int len = string_->length();
if (len != other->length()) return false;
if (!special_flattening_) {
@@ -17685,23 +17228,14 @@ class StringTableNoAllocateKey : public HashTableKey {
}
}
- uint32_t Hash() override { return hash_; }
-
- uint32_t HashForObject(Object* key) override {
- return String::cast(key)->Hash();
- }
-
- MUST_USE_RESULT Handle<Object> AsHandle(Isolate* isolate) override {
+ MUST_USE_RESULT Handle<String> AsHandle(Isolate* isolate) override {
UNREACHABLE();
- return Handle<String>();
}
private:
String* string_;
- int length_;
bool one_byte_;
bool special_flattening_;
- uint32_t hash_ = 0;
union {
uint8_t* one_byte_content_;
uint16_t* two_byte_content_;
@@ -17720,7 +17254,6 @@ Object* StringTable::LookupStringIfExists_NoAllocate(String* string) {
StringTableNoAllocateKey key(string, heap->HashSeed());
// String could be an array index.
- DCHECK(string->HasHashCode());
uint32_t hash = string->hash_field();
// Valid array indices are >= 0, so they cannot be mixed up with any of
@@ -17730,7 +17263,7 @@ Object* StringTable::LookupStringIfExists_NoAllocate(String* string) {
STATIC_ASSERT(
!String::ArrayIndexValueBits::is_valid(ResultSentinel::kNotFound));
- if ((hash & Name::kContainsCachedArrayIndexMask) == 0) {
+ if (Name::ContainsCachedArrayIndex(hash)) {
return Smi::FromInt(String::ArrayIndexValueBits::decode(hash));
}
if ((hash & Name::kIsNotArrayIndexMask) == 0) {
@@ -17751,7 +17284,7 @@ Object* StringTable::LookupStringIfExists_NoAllocate(String* string) {
return Smi::FromInt(ResultSentinel::kNotFound);
}
-String* StringTable::LookupKeyIfExists(Isolate* isolate, HashTableKey* key) {
+String* StringTable::LookupKeyIfExists(Isolate* isolate, StringTableKey* key) {
Handle<StringTable> table = isolate->factory()->string_table();
int entry = table->FindEntry(isolate, key);
if (entry != kNotFound) return String::cast(table->KeyAt(entry));
@@ -17765,8 +17298,8 @@ Handle<StringSet> StringSet::New(Isolate* isolate) {
Handle<StringSet> StringSet::Add(Handle<StringSet> stringset,
Handle<String> name) {
if (!stringset->Has(name)) {
- stringset = EnsureCapacity(stringset, 1, *name);
- uint32_t hash = StringSetShape::Hash(*name);
+ stringset = EnsureCapacity(stringset, 1);
+ uint32_t hash = ShapeT::Hash(name->GetIsolate(), *name);
int entry = stringset->FindInsertionEntry(hash);
stringset->set(EntryToIndex(entry), *name);
stringset->ElementAdded();
@@ -17784,7 +17317,7 @@ Handle<ObjectHashSet> ObjectHashSet::Add(Handle<ObjectHashSet> set,
int32_t hash = Object::GetOrCreateHash(isolate, key)->value();
if (!set->Has(isolate, key, hash)) {
- set = EnsureCapacity(set, 1, key);
+ set = EnsureCapacity(set, 1);
int entry = set->FindInsertionEntry(hash);
set->set(EntryToIndex(entry), *key);
set->ElementAdded();
@@ -17971,7 +17504,7 @@ Handle<CompilationCacheTable> CompilationCacheTable::Put(
Handle<SharedFunctionInfo> shared(context->closure()->shared());
StringSharedKey key(src, shared, language_mode, kNoSourcePosition);
Handle<Object> k = key.AsHandle(isolate);
- cache = EnsureCapacity(cache, 1, &key);
+ cache = EnsureCapacity(cache, 1);
int entry = cache->FindInsertionEntry(key.Hash());
cache->set(EntryToIndex(entry), *k);
cache->set(EntryToIndex(entry) + 1, *value);
@@ -17988,7 +17521,7 @@ Handle<CompilationCacheTable> CompilationCacheTable::PutScript(
Handle<Context> native_context(context->native_context());
StringSharedKey key(src, shared, language_mode, kNoSourcePosition);
Handle<Object> k = key.AsHandle(isolate);
- cache = EnsureCapacity(cache, 1, &key);
+ cache = EnsureCapacity(cache, 1);
int entry = cache->FindInsertionEntry(key.Hash());
cache->set(EntryToIndex(entry), *k);
cache->set(EntryToIndex(entry) + 1, *value);
@@ -18018,7 +17551,7 @@ Handle<CompilationCacheTable> CompilationCacheTable::PutEval(
}
}
- cache = EnsureCapacity(cache, 1, &key);
+ cache = EnsureCapacity(cache, 1);
int entry = cache->FindInsertionEntry(key.Hash());
Handle<Object> k =
isolate->factory()->NewNumber(static_cast<double>(key.Hash()));
@@ -18033,7 +17566,7 @@ Handle<CompilationCacheTable> CompilationCacheTable::PutRegExp(
Handle<CompilationCacheTable> cache, Handle<String> src,
JSRegExp::Flags flags, Handle<FixedArray> value) {
RegExpKey key(src, flags);
- cache = EnsureCapacity(cache, 1, &key);
+ cache = EnsureCapacity(cache, 1);
int entry = cache->FindInsertionEntry(key.Hash());
// We store the value in the key slot, and compare the search key
// to the stored value with a custon IsMatch function during lookups.
@@ -18094,46 +17627,22 @@ void CompilationCacheTable::Remove(Object* value) {
return;
}
-template <typename Derived, typename Shape, typename Key>
-Handle<Derived> Dictionary<Derived, Shape, Key>::New(
+template <typename Derived, typename Shape>
+Handle<Derived> BaseNameDictionary<Derived, Shape>::New(
Isolate* isolate, int at_least_space_for, PretenureFlag pretenure,
MinimumCapacity capacity_option) {
- DCHECK(0 <= at_least_space_for);
- Handle<Derived> dict = DerivedHashTable::New(isolate, at_least_space_for,
- capacity_option, pretenure);
-
- // Initialize the next enumeration index.
+ DCHECK_LE(0, at_least_space_for);
+ Handle<Derived> dict = Dictionary<Derived, Shape>::New(
+ isolate, at_least_space_for, pretenure, capacity_option);
dict->SetNextEnumerationIndex(PropertyDetails::kInitialIndex);
return dict;
}
-template <typename Derived, typename Shape, typename Key>
-Handle<Derived> Dictionary<Derived, Shape, Key>::NewEmpty(
- Isolate* isolate, PretenureFlag pretenure) {
- Handle<Derived> dict = DerivedHashTable::New(isolate, 1, pretenure);
- // Attempt to add one element to the empty dictionary must cause reallocation.
- DCHECK(!dict->HasSufficientCapacityToAdd(1));
- // Initialize the next enumeration index.
- dict->SetNextEnumerationIndex(PropertyDetails::kInitialIndex);
- return dict;
-}
-
-template <typename Derived, typename Shape, typename Key>
-void Dictionary<Derived, Shape, Key>::SetRequiresCopyOnCapacityChange() {
- DCHECK_EQ(0, DerivedHashTable::NumberOfElements());
- DCHECK_EQ(0, DerivedHashTable::NumberOfDeletedElements());
- // Make sure that HashTable::EnsureCapacity will create a copy.
- DerivedHashTable::SetNumberOfDeletedElements(DerivedHashTable::Capacity());
- DCHECK(!DerivedHashTable::HasSufficientCapacityToAdd(1));
-}
-
-
-template <typename Derived, typename Shape, typename Key>
-Handle<Derived> Dictionary<Derived, Shape, Key>::EnsureCapacity(
- Handle<Derived> dictionary, int n, Key key) {
+template <typename Derived, typename Shape>
+Handle<Derived> BaseNameDictionary<Derived, Shape>::EnsureCapacity(
+ Handle<Derived> dictionary, int n) {
// Check whether there are enough enumeration indices to add n elements.
- if (Shape::kIsEnumerable &&
- !PropertyDetails::IsValidIndex(dictionary->NextEnumerationIndex() + n)) {
+ if (!PropertyDetails::IsValidIndex(dictionary->NextEnumerationIndex() + n)) {
// If not, we generate new indices for the properties.
int length = dictionary->NumberOfElements();
@@ -18143,7 +17652,7 @@ Handle<Derived> Dictionary<Derived, Shape, Key>::EnsureCapacity(
// Iterate over the dictionary using the enumeration order and update
// the dictionary with new enumeration indices.
for (int i = 0; i < length; i++) {
- int index = Smi::cast(iteration_order->get(i))->value();
+ int index = Smi::ToInt(iteration_order->get(i));
DCHECK(dictionary->IsKey(dictionary->GetIsolate(),
dictionary->KeyAt(index)));
@@ -18158,85 +17667,73 @@ Handle<Derived> Dictionary<Derived, Shape, Key>::EnsureCapacity(
dictionary->SetNextEnumerationIndex(PropertyDetails::kInitialIndex +
length);
}
- return DerivedHashTable::EnsureCapacity(dictionary, n, key);
+ return HashTable<Derived, Shape>::EnsureCapacity(dictionary, n);
}
-
-template <typename Derived, typename Shape, typename Key>
-Handle<Object> Dictionary<Derived, Shape, Key>::DeleteProperty(
+template <typename Derived, typename Shape>
+Handle<Derived> Dictionary<Derived, Shape>::DeleteEntry(
Handle<Derived> dictionary, int entry) {
- Factory* factory = dictionary->GetIsolate()->factory();
- PropertyDetails details = dictionary->DetailsAt(entry);
- if (!details.IsConfigurable()) return factory->false_value();
-
- dictionary->SetEntry(
- entry, factory->the_hole_value(), factory->the_hole_value());
+ DCHECK(Shape::kEntrySize != 3 ||
+ dictionary->DetailsAt(entry).IsConfigurable());
+ dictionary->ClearEntry(entry);
dictionary->ElementRemoved();
- return factory->true_value();
+ return Shrink(dictionary);
}
-
-template<typename Derived, typename Shape, typename Key>
-Handle<Derived> Dictionary<Derived, Shape, Key>::AtPut(
- Handle<Derived> dictionary, Key key, Handle<Object> value) {
+template <typename Derived, typename Shape>
+Handle<Derived> Dictionary<Derived, Shape>::AtPut(Handle<Derived> dictionary,
+ Key key, Handle<Object> value,
+ PropertyDetails details) {
int entry = dictionary->FindEntry(key);
// If the entry is present set the value;
- if (entry != Dictionary::kNotFound) {
- dictionary->ValueAtPut(entry, *value);
- return dictionary;
+ if (entry == Dictionary::kNotFound) {
+ return Derived::Add(dictionary, key, value, details);
}
- // Check whether the dictionary should be extended.
- dictionary = EnsureCapacity(dictionary, 1, key);
-#ifdef DEBUG
- USE(Shape::AsHandle(dictionary->GetIsolate(), key));
-#endif
- PropertyDetails details = PropertyDetails::Empty();
-
- AddEntry(dictionary, key, value, details, dictionary->Hash(key));
+ // We don't need to copy over the enumeration index.
+ dictionary->ValueAtPut(entry, *value);
+ if (Shape::kEntrySize == 3) dictionary->DetailsAtPut(entry, details);
return dictionary;
}
-template <typename Derived, typename Shape, typename Key>
-Handle<Derived> Dictionary<Derived, Shape, Key>::Add(Handle<Derived> dictionary,
- Key key,
- Handle<Object> value,
- PropertyDetails details,
- int* entry_out) {
+template <typename Derived, typename Shape>
+Handle<Derived> BaseNameDictionary<Derived, Shape>::Add(
+ Handle<Derived> dictionary, Key key, Handle<Object> value,
+ PropertyDetails details, int* entry_out) {
+ // Insert element at empty or deleted entry
+ DCHECK_EQ(0, details.dictionary_index());
+ // Assign an enumeration index to the property and update
+ // SetNextEnumerationIndex.
+ int index = dictionary->NextEnumerationIndex();
+ details = details.set_index(index);
+ dictionary->SetNextEnumerationIndex(index + 1);
+ return Dictionary<Derived, Shape>::Add(dictionary, key, value, details,
+ entry_out);
+}
+
+template <typename Derived, typename Shape>
+Handle<Derived> Dictionary<Derived, Shape>::Add(Handle<Derived> dictionary,
+ Key key, Handle<Object> value,
+ PropertyDetails details,
+ int* entry_out) {
+ Isolate* isolate = dictionary->GetIsolate();
+ uint32_t hash = Shape::Hash(isolate, key);
// Valdate key is absent.
SLOW_DCHECK((dictionary->FindEntry(key) == Dictionary::kNotFound));
// Check whether the dictionary should be extended.
- dictionary = EnsureCapacity(dictionary, 1, key);
-
- int entry = AddEntry(dictionary, key, value, details, dictionary->Hash(key));
- if (entry_out) *entry_out = entry;
- return dictionary;
-}
+ dictionary = Derived::EnsureCapacity(dictionary, 1);
-// Add a key, value pair to the dictionary. Returns entry value.
-template <typename Derived, typename Shape, typename Key>
-int Dictionary<Derived, Shape, Key>::AddEntry(Handle<Derived> dictionary,
- Key key, Handle<Object> value,
- PropertyDetails details,
- uint32_t hash) {
// Compute the key object.
- Handle<Object> k = Shape::AsHandle(dictionary->GetIsolate(), key);
+ Handle<Object> k = Shape::AsHandle(isolate, key);
uint32_t entry = dictionary->FindInsertionEntry(hash);
- // Insert element at empty or deleted entry
- if (details.dictionary_index() == 0 && Shape::kIsEnumerable) {
- // Assign an enumeration index to the property and update
- // SetNextEnumerationIndex.
- int index = dictionary->NextEnumerationIndex();
- details = details.set_index(index);
- dictionary->SetNextEnumerationIndex(index + 1);
- }
- dictionary->SetEntry(entry, k, value, details);
- DCHECK((dictionary->KeyAt(entry)->IsNumber() ||
- dictionary->KeyAt(entry)->IsName()));
+ dictionary->SetEntry(entry, *k, *value, details);
+ DCHECK(dictionary->KeyAt(entry)->IsNumber() ||
+ Shape::Unwrap(dictionary->KeyAt(entry))->IsUniqueName());
dictionary->ElementAdded();
- return entry;
+ if (entry_out) *entry_out = entry;
+ return dictionary;
}
bool SeededNumberDictionary::HasComplexElements() {
@@ -18244,9 +17741,8 @@ bool SeededNumberDictionary::HasComplexElements() {
Isolate* isolate = this->GetIsolate();
int capacity = this->Capacity();
for (int i = 0; i < capacity; i++) {
- Object* k = this->KeyAt(i);
- if (!this->IsKey(isolate, k)) continue;
- DCHECK(!IsDeleted(i));
+ Object* k;
+ if (!this->ToKey(isolate, i, &k)) continue;
PropertyDetails details = this->DetailsAt(i);
if (details.kind() == kAccessor) return true;
PropertyAttributes attr = details.attributes();
@@ -18278,41 +17774,12 @@ void SeededNumberDictionary::UpdateMaxNumberKey(
}
}
-Handle<SeededNumberDictionary> SeededNumberDictionary::AddNumberEntry(
- Handle<SeededNumberDictionary> dictionary, uint32_t key,
- Handle<Object> value, PropertyDetails details,
- Handle<JSObject> dictionary_holder) {
- dictionary->UpdateMaxNumberKey(key, dictionary_holder);
- SLOW_DCHECK(dictionary->FindEntry(key) == kNotFound);
- return Add(dictionary, key, value, details);
-}
-
-
-Handle<UnseededNumberDictionary> UnseededNumberDictionary::AddNumberEntry(
- Handle<UnseededNumberDictionary> dictionary,
- uint32_t key,
- Handle<Object> value) {
- SLOW_DCHECK(dictionary->FindEntry(key) == kNotFound);
- return Add(dictionary, key, value, PropertyDetails::Empty());
-}
-
-Handle<UnseededNumberDictionary> UnseededNumberDictionary::DeleteKey(
- Handle<UnseededNumberDictionary> dictionary, uint32_t key) {
- int entry = dictionary->FindEntry(key);
- if (entry == kNotFound) return dictionary;
-
- Factory* factory = dictionary->GetIsolate()->factory();
- dictionary->SetEntry(entry, factory->the_hole_value(),
- factory->the_hole_value());
- dictionary->ElementRemoved();
- return dictionary->Shrink(dictionary, key);
-}
-
-Handle<SeededNumberDictionary> SeededNumberDictionary::AtNumberPut(
+Handle<SeededNumberDictionary> SeededNumberDictionary::Set(
Handle<SeededNumberDictionary> dictionary, uint32_t key,
- Handle<Object> value, Handle<JSObject> dictionary_holder) {
+ Handle<Object> value, Handle<JSObject> dictionary_holder,
+ PropertyDetails details) {
dictionary->UpdateMaxNumberKey(key, dictionary_holder);
- return AtPut(dictionary, key, value);
+ return AtPut(dictionary, key, value, details);
}
void SeededNumberDictionary::CopyValuesTo(FixedArray* elements) {
@@ -18322,65 +17789,33 @@ void SeededNumberDictionary::CopyValuesTo(FixedArray* elements) {
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = elements->GetWriteBarrierMode(no_gc);
for (int i = 0; i < capacity; i++) {
- Object* k = this->KeyAt(i);
- if (this->IsKey(isolate, k)) {
+ Object* k;
+ if (this->ToKey(isolate, i, &k)) {
elements->set(pos++, this->ValueAt(i), mode);
}
}
- DCHECK(pos == elements->length());
-}
-
-Handle<UnseededNumberDictionary> UnseededNumberDictionary::AtNumberPut(
- Handle<UnseededNumberDictionary> dictionary,
- uint32_t key,
- Handle<Object> value) {
- return AtPut(dictionary, key, value);
+ DCHECK_EQ(pos, elements->length());
}
-Handle<SeededNumberDictionary> SeededNumberDictionary::Set(
- Handle<SeededNumberDictionary> dictionary, uint32_t key,
- Handle<Object> value, PropertyDetails details,
- Handle<JSObject> dictionary_holder) {
- int entry = dictionary->FindEntry(key);
- if (entry == kNotFound) {
- return AddNumberEntry(dictionary, key, value, details, dictionary_holder);
- }
- // Preserve enumeration index.
- details = details.set_index(dictionary->DetailsAt(entry).dictionary_index());
- Handle<Object> object_key =
- SeededNumberDictionaryShape::AsHandle(dictionary->GetIsolate(), key);
- dictionary->SetEntry(entry, object_key, value, details);
- return dictionary;
-}
-
-
Handle<UnseededNumberDictionary> UnseededNumberDictionary::Set(
Handle<UnseededNumberDictionary> dictionary,
uint32_t key,
Handle<Object> value) {
- int entry = dictionary->FindEntry(key);
- if (entry == kNotFound) return AddNumberEntry(dictionary, key, value);
- Handle<Object> object_key =
- UnseededNumberDictionaryShape::AsHandle(dictionary->GetIsolate(), key);
- dictionary->SetEntry(entry, object_key, value);
- return dictionary;
+ return AtPut(dictionary, key, value, PropertyDetails::Empty());
}
-
-template <typename Derived, typename Shape, typename Key>
-int Dictionary<Derived, Shape, Key>::NumberOfElementsFilterAttributes(
- PropertyFilter filter) {
+template <typename Derived, typename Shape>
+int Dictionary<Derived, Shape>::NumberOfEnumerableProperties() {
Isolate* isolate = this->GetIsolate();
int capacity = this->Capacity();
int result = 0;
for (int i = 0; i < capacity; i++) {
- Object* k = this->KeyAt(i);
- if (this->IsKey(isolate, k) && !k->FilterKey(filter)) {
- if (this->IsDeleted(i)) continue;
- PropertyDetails details = this->DetailsAt(i);
- PropertyAttributes attr = details.attributes();
- if ((attr & filter) == 0) result++;
- }
+ Object* k;
+ if (!this->ToKey(isolate, i, &k)) continue;
+ if (k->FilterKey(ENUMERABLE_STRINGS)) continue;
+ PropertyDetails details = this->DetailsAt(i);
+ PropertyAttributes attr = details.attributes();
+ if ((attr & ONLY_ENUMERABLE) == 0) result++;
}
return result;
}
@@ -18389,28 +17824,28 @@ int Dictionary<Derived, Shape, Key>::NumberOfElementsFilterAttributes(
template <typename Dictionary>
struct EnumIndexComparator {
explicit EnumIndexComparator(Dictionary* dict) : dict(dict) {}
- bool operator() (Smi* a, Smi* b) {
- PropertyDetails da(dict->DetailsAt(a->value()));
- PropertyDetails db(dict->DetailsAt(b->value()));
+ bool operator()(const base::AtomicElement<Smi*>& a,
+ const base::AtomicElement<Smi*>& b) {
+ PropertyDetails da(dict->DetailsAt(a.value()->value()));
+ PropertyDetails db(dict->DetailsAt(b.value()->value()));
return da.dictionary_index() < db.dictionary_index();
}
Dictionary* dict;
};
-template <typename Derived, typename Shape, typename Key>
-void Dictionary<Derived, Shape, Key>::CopyEnumKeysTo(
- Handle<Dictionary<Derived, Shape, Key>> dictionary,
- Handle<FixedArray> storage, KeyCollectionMode mode,
- KeyAccumulator* accumulator) {
+template <typename Derived, typename Shape>
+void BaseNameDictionary<Derived, Shape>::CopyEnumKeysTo(
+ Handle<Derived> dictionary, Handle<FixedArray> storage,
+ KeyCollectionMode mode, KeyAccumulator* accumulator) {
DCHECK_IMPLIES(mode != KeyCollectionMode::kOwnOnly, accumulator != nullptr);
Isolate* isolate = dictionary->GetIsolate();
int length = storage->length();
int capacity = dictionary->Capacity();
int properties = 0;
for (int i = 0; i < capacity; i++) {
- Object* key = dictionary->KeyAt(i);
+ Object* key;
+ if (!dictionary->ToKey(isolate, i, &key)) continue;
bool is_shadowing_key = false;
- if (!dictionary->IsKey(isolate, key)) continue;
if (key->IsSymbol()) continue;
PropertyDetails details = dictionary->DetailsAt(i);
if (details.IsDontEnum()) {
@@ -18420,7 +17855,6 @@ void Dictionary<Derived, Shape, Key>::CopyEnumKeysTo(
continue;
}
}
- if (dictionary->IsDeleted(i)) continue;
if (is_shadowing_key) {
accumulator->AddShadowingKey(key);
continue;
@@ -18433,20 +17867,24 @@ void Dictionary<Derived, Shape, Key>::CopyEnumKeysTo(
CHECK_EQ(length, properties);
DisallowHeapAllocation no_gc;
- Dictionary<Derived, Shape, Key>* raw_dictionary = *dictionary;
+ Derived* raw_dictionary = *dictionary;
FixedArray* raw_storage = *storage;
- EnumIndexComparator<Derived> cmp(static_cast<Derived*>(*dictionary));
- Smi** start = reinterpret_cast<Smi**>(storage->GetFirstElementAddress());
+ EnumIndexComparator<Derived> cmp(raw_dictionary);
+ // Use AtomicElement wrapper to ensure that std::sort uses atomic load and
+ // store operations that are safe for concurrent marking.
+ base::AtomicElement<Smi*>* start =
+ reinterpret_cast<base::AtomicElement<Smi*>*>(
+ storage->GetFirstElementAddress());
std::sort(start, start + length, cmp);
for (int i = 0; i < length; i++) {
- int index = Smi::cast(raw_storage->get(i))->value();
- raw_storage->set(i, raw_dictionary->KeyAt(index));
+ int index = Smi::ToInt(raw_storage->get(i));
+ raw_storage->set(i, raw_dictionary->NameAt(index));
}
}
-template <typename Derived, typename Shape, typename Key>
-Handle<FixedArray> Dictionary<Derived, Shape, Key>::IterationIndices(
- Handle<Dictionary<Derived, Shape, Key>> dictionary) {
+template <typename Derived, typename Shape>
+Handle<FixedArray> BaseNameDictionary<Derived, Shape>::IterationIndices(
+ Handle<Derived> dictionary) {
Isolate* isolate = dictionary->GetIsolate();
int capacity = dictionary->Capacity();
int length = dictionary->NumberOfElements();
@@ -18454,27 +17892,30 @@ Handle<FixedArray> Dictionary<Derived, Shape, Key>::IterationIndices(
int array_size = 0;
{
DisallowHeapAllocation no_gc;
- Dictionary<Derived, Shape, Key>* raw_dict = *dictionary;
+ Derived* raw_dictionary = *dictionary;
for (int i = 0; i < capacity; i++) {
- Object* k = raw_dict->KeyAt(i);
- if (!raw_dict->IsKey(isolate, k)) continue;
- if (raw_dict->IsDeleted(i)) continue;
+ Object* k;
+ if (!raw_dictionary->ToKey(isolate, i, &k)) continue;
array->set(array_size++, Smi::FromInt(i));
}
DCHECK_EQ(array_size, length);
- EnumIndexComparator<Derived> cmp(static_cast<Derived*>(raw_dict));
- Smi** start = reinterpret_cast<Smi**>(array->GetFirstElementAddress());
+ EnumIndexComparator<Derived> cmp(raw_dictionary);
+ // Use AtomicElement wrapper to ensure that std::sort uses atomic load and
+ // store operations that are safe for concurrent marking.
+ base::AtomicElement<Smi*>* start =
+ reinterpret_cast<base::AtomicElement<Smi*>*>(
+ array->GetFirstElementAddress());
std::sort(start, start + array_size, cmp);
}
array->Shrink(array_size);
return array;
}
-template <typename Derived, typename Shape, typename Key>
-void Dictionary<Derived, Shape, Key>::CollectKeysTo(
- Handle<Dictionary<Derived, Shape, Key>> dictionary, KeyAccumulator* keys) {
+template <typename Derived, typename Shape>
+void BaseNameDictionary<Derived, Shape>::CollectKeysTo(
+ Handle<Derived> dictionary, KeyAccumulator* keys) {
Isolate* isolate = keys->isolate();
int capacity = dictionary->Capacity();
Handle<FixedArray> array =
@@ -18483,37 +17924,38 @@ void Dictionary<Derived, Shape, Key>::CollectKeysTo(
PropertyFilter filter = keys->filter();
{
DisallowHeapAllocation no_gc;
- Dictionary<Derived, Shape, Key>* raw_dict = *dictionary;
+ Derived* raw_dictionary = *dictionary;
for (int i = 0; i < capacity; i++) {
- Object* k = raw_dict->KeyAt(i);
- if (!raw_dict->IsKey(isolate, k) || k->FilterKey(filter)) continue;
- if (raw_dict->IsDeleted(i)) continue;
- PropertyDetails details = raw_dict->DetailsAt(i);
+ Object* k;
+ if (!raw_dictionary->ToKey(isolate, i, &k)) continue;
+ if (k->FilterKey(filter)) continue;
+ PropertyDetails details = raw_dictionary->DetailsAt(i);
if ((details.attributes() & filter) != 0) {
keys->AddShadowingKey(k);
continue;
}
if (filter & ONLY_ALL_CAN_READ) {
if (details.kind() != kAccessor) continue;
- Object* accessors = raw_dict->ValueAt(i);
- if (accessors->IsPropertyCell()) {
- accessors = PropertyCell::cast(accessors)->value();
- }
+ Object* accessors = raw_dictionary->ValueAt(i);
if (!accessors->IsAccessorInfo()) continue;
if (!AccessorInfo::cast(accessors)->all_can_read()) continue;
}
array->set(array_size++, Smi::FromInt(i));
}
- EnumIndexComparator<Derived> cmp(static_cast<Derived*>(raw_dict));
- Smi** start = reinterpret_cast<Smi**>(array->GetFirstElementAddress());
+ EnumIndexComparator<Derived> cmp(raw_dictionary);
+ // Use AtomicElement wrapper to ensure that std::sort uses atomic load and
+ // store operations that are safe for concurrent marking.
+ base::AtomicElement<Smi*>* start =
+ reinterpret_cast<base::AtomicElement<Smi*>*>(
+ array->GetFirstElementAddress());
std::sort(start, start + array_size, cmp);
}
bool has_seen_symbol = false;
for (int i = 0; i < array_size; i++) {
- int index = Smi::cast(array->get(i))->value();
- Object* key = dictionary->KeyAt(index);
+ int index = Smi::ToInt(array->get(i));
+ Object* key = dictionary->NameAt(index);
if (key->IsSymbol()) {
has_seen_symbol = true;
continue;
@@ -18522,28 +17964,24 @@ void Dictionary<Derived, Shape, Key>::CollectKeysTo(
}
if (has_seen_symbol) {
for (int i = 0; i < array_size; i++) {
- int index = Smi::cast(array->get(i))->value();
- Object* key = dictionary->KeyAt(index);
+ int index = Smi::ToInt(array->get(i));
+ Object* key = dictionary->NameAt(index);
if (!key->IsSymbol()) continue;
keys->AddKey(key, DO_NOT_CONVERT);
}
}
}
-
// Backwards lookup (slow).
-template<typename Derived, typename Shape, typename Key>
-Object* Dictionary<Derived, Shape, Key>::SlowReverseLookup(Object* value) {
- Isolate* isolate = this->GetIsolate();
- int capacity = this->Capacity();
+template <typename Derived, typename Shape>
+Object* Dictionary<Derived, Shape>::SlowReverseLookup(Object* value) {
+ Derived* dictionary = Derived::cast(this);
+ Isolate* isolate = dictionary->GetIsolate();
+ int capacity = dictionary->Capacity();
for (int i = 0; i < capacity; i++) {
- Object* k = this->KeyAt(i);
- if (!this->IsKey(isolate, k)) continue;
- Object* e = this->ValueAt(i);
- // TODO(dcarney): this should be templatized.
- if (e->IsPropertyCell()) {
- e = PropertyCell::cast(e)->value();
- }
+ Object* k;
+ if (!dictionary->ToKey(isolate, i, &k)) continue;
+ Object* e = dictionary->ValueAt(i);
if (e == value) return k;
}
return isolate->heap()->undefined_value();
@@ -18572,7 +18010,7 @@ Object* ObjectHashTable::Lookup(Handle<Object> key) {
if (hash->IsUndefined(isolate)) {
return isolate->heap()->the_hole_value();
}
- return Lookup(isolate, key, Smi::cast(hash)->value());
+ return Lookup(isolate, key, Smi::ToInt(hash));
}
Object* ObjectHashTable::ValueAt(int entry) {
@@ -18617,7 +18055,7 @@ Handle<ObjectHashTable> ObjectHashTable::Put(Handle<ObjectHashTable> table,
// Rehash if more than 33% of the entries are deleted entries.
// TODO(jochen): Consider to shrink the fixed array in place.
if ((table->NumberOfDeletedElements() << 1) > table->NumberOfElements()) {
- table->Rehash(isolate->factory()->undefined_value());
+ table->Rehash();
}
// If we're out of luck, we didn't get a GC recently, and so rehashing
// isn't enough to avoid a crash.
@@ -18630,12 +18068,12 @@ Handle<ObjectHashTable> ObjectHashTable::Put(Handle<ObjectHashTable> table,
Heap::kFinalizeIncrementalMarkingMask,
GarbageCollectionReason::kFullHashtable);
}
- table->Rehash(isolate->factory()->undefined_value());
+ table->Rehash();
}
}
// Check whether the hash table should be extended.
- table = EnsureCapacity(table, 1, key);
+ table = EnsureCapacity(table, 1);
table->AddEntry(table->FindInsertionEntry(hash), *key, *value);
return table;
}
@@ -18652,7 +18090,7 @@ Handle<ObjectHashTable> ObjectHashTable::Remove(Handle<ObjectHashTable> table,
return table;
}
- return Remove(table, key, was_present, Smi::cast(hash)->value());
+ return Remove(table, key, was_present, Smi::ToInt(hash));
}
@@ -18671,7 +18109,7 @@ Handle<ObjectHashTable> ObjectHashTable::Remove(Handle<ObjectHashTable> table,
*was_present = true;
table->RemoveEntry(entry);
- return Shrink(table, key);
+ return Shrink(table);
}
@@ -18714,9 +18152,10 @@ Handle<WeakHashTable> WeakHashTable::Put(Handle<WeakHashTable> table,
Handle<WeakCell> key_cell = isolate->factory()->NewWeakCell(key);
// Check whether the hash table should be extended.
- table = EnsureCapacity(table, 1, key, TENURED);
+ table = EnsureCapacity(table, 1, TENURED);
- table->AddEntry(table->FindInsertionEntry(table->Hash(key)), key_cell, value);
+ uint32_t hash = ShapeT::Hash(isolate, key);
+ table->AddEntry(table->FindInsertionEntry(hash), key_cell, value);
return table;
}
@@ -18799,22 +18238,14 @@ Handle<Derived> OrderedHashTable<Derived, entrysize>::Clear(
}
template <class Derived, int entrysize>
-bool OrderedHashTable<Derived, entrysize>::HasKey(Handle<Derived> table,
- Handle<Object> key) {
+bool OrderedHashTable<Derived, entrysize>::HasKey(Isolate* isolate,
+ Derived* table, Object* key) {
+ DCHECK(table->IsOrderedHashTable());
DisallowHeapAllocation no_gc;
- Isolate* isolate = table->GetIsolate();
- Object* raw_key = *key;
- int entry = table->KeyToFirstEntry(isolate, raw_key);
- // Walk the chain in the bucket to find the key.
- while (entry != kNotFound) {
- Object* candidate_key = table->KeyAt(entry);
- if (candidate_key->SameValueZero(raw_key)) return true;
- entry = table->NextChainEntry(entry);
- }
- return false;
+ int entry = table->FindEntry(isolate, key);
+ return entry != kNotFound;
}
-
Handle<OrderedHashSet> OrderedHashSet::Add(Handle<OrderedHashSet> table,
Handle<Object> key) {
int hash = Object::GetOrCreateHash(table->GetIsolate(), key)->value();
@@ -18893,7 +18324,7 @@ Handle<Derived> OrderedHashTable<Derived, entrysize>::Rehash(
}
Object* hash = key->GetHash();
- int bucket = Smi::cast(hash)->value() & (new_buckets - 1);
+ int bucket = Smi::ToInt(hash) & (new_buckets - 1);
Object* chain_entry = new_table->get(kHashTableStartIndex + bucket);
new_table->set(kHashTableStartIndex + bucket, Smi::FromInt(new_entry));
int new_index = new_table->EntryToIndex(new_entry);
@@ -18914,6 +18345,79 @@ Handle<Derived> OrderedHashTable<Derived, entrysize>::Rehash(
return new_table;
}
+template <class Derived, int entrysize>
+bool OrderedHashTable<Derived, entrysize>::Delete(Isolate* isolate,
+ Derived* table, Object* key) {
+ DisallowHeapAllocation no_gc;
+ int entry = table->FindEntry(isolate, key);
+ if (entry == kNotFound) return false;
+
+ int nof = table->NumberOfElements();
+ int nod = table->NumberOfDeletedElements();
+ int index = table->EntryToIndex(entry);
+
+ Object* hole = isolate->heap()->the_hole_value();
+ for (int i = 0; i < entrysize; ++i) {
+ table->set(index + i, hole);
+ }
+
+ table->SetNumberOfElements(nof - 1);
+ table->SetNumberOfDeletedElements(nod + 1);
+
+ return true;
+}
+
+Object* OrderedHashMap::GetHash(Isolate* isolate, Object* key) {
+ DisallowHeapAllocation no_gc;
+
+ // This special cases for Smi, so that we avoid the HandleScope
+ // creation below.
+ if (key->IsSmi()) {
+ return Smi::FromInt(ComputeIntegerHash(Smi::cast(key)->value()));
+ }
+ HandleScope scope(isolate);
+ Object* hash = key->GetHash();
+ // If the object does not have an identity hash, it was never used as a key
+ if (hash->IsUndefined(isolate)) return Smi::FromInt(-1);
+ DCHECK(hash->IsSmi());
+ DCHECK(Smi::cast(hash)->value() >= 0);
+ return hash;
+}
+
+Handle<OrderedHashMap> OrderedHashMap::Add(Handle<OrderedHashMap> table,
+ Handle<Object> key,
+ Handle<Object> value) {
+ int hash = Object::GetOrCreateHash(table->GetIsolate(), key)->value();
+ int entry = table->HashToEntry(hash);
+ // Walk the chain of the bucket and try finding the key.
+ {
+ DisallowHeapAllocation no_gc;
+ Object* raw_key = *key;
+ while (entry != kNotFound) {
+ Object* candidate_key = table->KeyAt(entry);
+ // Do not add if we have the key already
+ if (candidate_key->SameValueZero(raw_key)) return table;
+ entry = table->NextChainEntry(entry);
+ }
+ }
+
+ table = OrderedHashMap::EnsureGrowable(table);
+ // Read the existing bucket values.
+ int bucket = table->HashToBucket(hash);
+ int previous_entry = table->HashToEntry(hash);
+ int nof = table->NumberOfElements();
+ // Insert a new entry at the end,
+ int new_entry = nof + table->NumberOfDeletedElements();
+ int new_index = table->EntryToIndex(new_entry);
+ table->set(new_index, *key);
+ table->set(new_index + kValueOffset, *value);
+ table->set(new_index + kChainOffset, Smi::FromInt(previous_entry));
+ // and point the bucket to the new entry.
+ table->set(kHashTableStartIndex + bucket, Smi::FromInt(new_entry));
+ table->SetNumberOfElements(nof + 1);
+ return table;
+}
+
template Handle<OrderedHashSet> OrderedHashTable<OrderedHashSet, 1>::Allocate(
Isolate* isolate, int capacity, PretenureFlag pretenure);
@@ -18926,8 +18430,13 @@ template Handle<OrderedHashSet> OrderedHashTable<OrderedHashSet, 1>::Shrink(
template Handle<OrderedHashSet> OrderedHashTable<OrderedHashSet, 1>::Clear(
Handle<OrderedHashSet> table);
-template bool OrderedHashTable<OrderedHashSet, 1>::HasKey(
- Handle<OrderedHashSet> table, Handle<Object> key);
+template bool OrderedHashTable<OrderedHashSet, 1>::HasKey(Isolate* isolate,
+ OrderedHashSet* table,
+ Object* key);
+
+template bool OrderedHashTable<OrderedHashSet, 1>::Delete(Isolate* isolate,
+ OrderedHashSet* table,
+ Object* key);
template Handle<OrderedHashMap> OrderedHashTable<OrderedHashMap, 2>::Allocate(
Isolate* isolate, int capacity, PretenureFlag pretenure);
@@ -18941,8 +18450,227 @@ template Handle<OrderedHashMap> OrderedHashTable<OrderedHashMap, 2>::Shrink(
template Handle<OrderedHashMap> OrderedHashTable<OrderedHashMap, 2>::Clear(
Handle<OrderedHashMap> table);
-template bool OrderedHashTable<OrderedHashMap, 2>::HasKey(
- Handle<OrderedHashMap> table, Handle<Object> key);
+template bool OrderedHashTable<OrderedHashMap, 2>::HasKey(Isolate* isolate,
+ OrderedHashMap* table,
+ Object* key);
+
+template bool OrderedHashTable<OrderedHashMap, 2>::Delete(Isolate* isolate,
+ OrderedHashMap* table,
+ Object* key);
+
+template <>
+Handle<SmallOrderedHashSet>
+SmallOrderedHashTable<SmallOrderedHashSet>::Allocate(Isolate* isolate,
+ int capacity,
+ PretenureFlag pretenure) {
+ return isolate->factory()->NewSmallOrderedHashSet(capacity, pretenure);
+}
+
+template <>
+Handle<SmallOrderedHashMap>
+SmallOrderedHashTable<SmallOrderedHashMap>::Allocate(Isolate* isolate,
+ int capacity,
+ PretenureFlag pretenure) {
+ return isolate->factory()->NewSmallOrderedHashMap(capacity, pretenure);
+}
+
+template <class Derived>
+void SmallOrderedHashTable<Derived>::Initialize(Isolate* isolate,
+ int capacity) {
+ int num_buckets = capacity / kLoadFactor;
+ int num_chains = capacity;
+
+ SetNumberOfBuckets(num_buckets);
+ SetNumberOfElements(0);
+ SetNumberOfDeletedElements(0);
+
+ byte* hashtable_start =
+ FIELD_ADDR(this, kHeaderSize + (kBucketsStartOffset * kOneByteSize));
+ memset(hashtable_start, kNotFound, num_buckets + num_chains);
+
+ if (isolate->heap()->InNewSpace(this)) {
+ MemsetPointer(RawField(this, GetDataTableStartOffset()),
+ isolate->heap()->the_hole_value(),
+ capacity * Derived::kEntrySize);
+ } else {
+ for (int i = 0; i < capacity; i++) {
+ for (int j = 0; j < Derived::kEntrySize; i++) {
+ SetDataEntry(i, j, isolate->heap()->the_hole_value());
+ }
+ }
+ }
+
+#ifdef DEBUG
+ for (int i = 0; i < num_buckets; ++i) {
+ DCHECK_EQ(kNotFound, GetFirstEntry(i));
+ }
+
+ for (int i = 0; i < num_chains; ++i) {
+ DCHECK_EQ(kNotFound, GetNextEntry(i));
+ }
+#endif // DEBUG
+}
+
+Handle<SmallOrderedHashSet> SmallOrderedHashSet::Add(
+ Handle<SmallOrderedHashSet> table, Handle<Object> key) {
+ Isolate* isolate = table->GetIsolate();
+ if (table->HasKey(isolate, key)) return table;
+
+ if (table->UsedCapacity() >= table->Capacity()) {
+ table = SmallOrderedHashSet::Grow(table);
+ }
+
+ int hash = Object::GetOrCreateHash(table->GetIsolate(), key)->value();
+ int nof = table->NumberOfElements();
+
+ // Read the existing bucket values.
+ int bucket = table->HashToBucket(hash);
+ int previous_entry = table->HashToFirstEntry(hash);
+
+ // Insert a new entry at the end,
+ int new_entry = nof + table->NumberOfDeletedElements();
+
+ table->SetDataEntry(new_entry, SmallOrderedHashSet::kKeyIndex, *key);
+ table->SetFirstEntry(bucket, new_entry);
+ table->SetNextEntry(new_entry, previous_entry);
+
+ // and update book keeping.
+ table->SetNumberOfElements(nof + 1);
+
+ return table;
+}
+
+Handle<SmallOrderedHashMap> SmallOrderedHashMap::Add(
+ Handle<SmallOrderedHashMap> table, Handle<Object> key,
+ Handle<Object> value) {
+ Isolate* isolate = table->GetIsolate();
+ if (table->HasKey(isolate, key)) return table;
+
+ if (table->UsedCapacity() >= table->Capacity()) {
+ table = SmallOrderedHashMap::Grow(table);
+ }
+
+ int hash = Object::GetOrCreateHash(table->GetIsolate(), key)->value();
+ int nof = table->NumberOfElements();
+
+ // Read the existing bucket values.
+ int bucket = table->HashToBucket(hash);
+ int previous_entry = table->HashToFirstEntry(hash);
+
+ // Insert a new entry at the end,
+ int new_entry = nof + table->NumberOfDeletedElements();
+
+ table->SetDataEntry(new_entry, SmallOrderedHashMap::kValueIndex, *value);
+ table->SetDataEntry(new_entry, SmallOrderedHashMap::kKeyIndex, *key);
+ table->SetFirstEntry(bucket, new_entry);
+ table->SetNextEntry(new_entry, previous_entry);
+
+ // and update book keeping.
+ table->SetNumberOfElements(nof + 1);
+
+ return table;
+}
+
+template <class Derived>
+bool SmallOrderedHashTable<Derived>::HasKey(Isolate* isolate,
+ Handle<Object> key) {
+ DisallowHeapAllocation no_gc;
+ Object* raw_key = *key;
+ Object* hash = key->GetHash();
+
+ if (hash->IsUndefined(isolate)) return false;
+ int entry = HashToFirstEntry(Smi::ToInt(hash));
+
+ // Walk the chain in the bucket to find the key.
+ while (entry != kNotFound) {
+ Object* candidate_key = KeyAt(entry);
+ if (candidate_key->SameValueZero(raw_key)) return true;
+ entry = GetNextEntry(entry);
+ }
+ return false;
+}
+
+template <class Derived>
+Handle<Derived> SmallOrderedHashTable<Derived>::Rehash(Handle<Derived> table,
+ int new_capacity) {
+ DCHECK_GE(kMaxCapacity, new_capacity);
+ Isolate* isolate = table->GetIsolate();
+
+ Handle<Derived> new_table = SmallOrderedHashTable<Derived>::Allocate(
+ isolate, new_capacity,
+ isolate->heap()->InNewSpace(*table) ? NOT_TENURED : TENURED);
+ int nof = table->NumberOfElements();
+ int nod = table->NumberOfDeletedElements();
+ int new_entry = 0;
+
+ {
+ DisallowHeapAllocation no_gc;
+ for (int old_entry = 0; old_entry < (nof + nod); ++old_entry) {
+ Object* key = table->KeyAt(old_entry);
+ if (key->IsTheHole(isolate)) continue;
+
+ int hash = Smi::ToInt(key->GetHash());
+ int bucket = new_table->HashToBucket(hash);
+ int chain = new_table->GetFirstEntry(bucket);
+
+ new_table->SetFirstEntry(bucket, new_entry);
+ new_table->SetNextEntry(new_entry, chain);
+
+ for (int i = 0; i < Derived::kEntrySize; ++i) {
+ Object* value = table->GetDataEntry(old_entry, i);
+ new_table->SetDataEntry(new_entry, i, value);
+ }
+
+ ++new_entry;
+ }
+
+ new_table->SetNumberOfElements(nof);
+ }
+ return new_table;
+}
+
+template <class Derived>
+Handle<Derived> SmallOrderedHashTable<Derived>::Grow(Handle<Derived> table) {
+ int capacity = table->Capacity();
+ int new_capacity = capacity;
+
+ // Don't need to grow if we can simply clear out deleted entries instead.
+ // TODO(gsathya): Compact in place, instead of allocating a new table.
+ if (table->NumberOfDeletedElements() < (capacity >> 1)) {
+ new_capacity = capacity << 1;
+
+ // The max capacity of our table is 254. We special case for 256 to
+ // account for our growth strategy, otherwise we would only fill up
+ // to 128 entries in our table.
+ if (new_capacity == kGrowthHack) {
+ new_capacity = kMaxCapacity;
+ }
+
+ // TODO(gsathya): Transition to OrderedHashTable for size > kMaxCapacity.
+ }
+
+ return Rehash(table, new_capacity);
+}
+
+template bool SmallOrderedHashTable<SmallOrderedHashSet>::HasKey(
+ Isolate* isolate, Handle<Object> key);
+template Handle<SmallOrderedHashSet>
+SmallOrderedHashTable<SmallOrderedHashSet>::Rehash(
+ Handle<SmallOrderedHashSet> table, int new_capacity);
+template Handle<SmallOrderedHashSet> SmallOrderedHashTable<
+ SmallOrderedHashSet>::Grow(Handle<SmallOrderedHashSet> table);
+template void SmallOrderedHashTable<SmallOrderedHashSet>::Initialize(
+ Isolate* isolate, int capacity);
+
+template bool SmallOrderedHashTable<SmallOrderedHashMap>::HasKey(
+ Isolate* isolate, Handle<Object> key);
+template Handle<SmallOrderedHashMap>
+SmallOrderedHashTable<SmallOrderedHashMap>::Rehash(
+ Handle<SmallOrderedHashMap> table, int new_capacity);
+template Handle<SmallOrderedHashMap> SmallOrderedHashTable<
+ SmallOrderedHashMap>::Grow(Handle<SmallOrderedHashMap> table);
+template void SmallOrderedHashTable<SmallOrderedHashMap>::Initialize(
+ Isolate* isolate, int capacity);
template<class Derived, class TableType>
void OrderedHashTableIterator<Derived, TableType>::Transition() {
@@ -18950,7 +18678,7 @@ void OrderedHashTableIterator<Derived, TableType>::Transition() {
TableType* table = TableType::cast(this->table());
if (!table->IsObsolete()) return;
- int index = Smi::cast(this->index())->value();
+ int index = Smi::ToInt(this->index());
while (table->IsObsolete()) {
TableType* next_table = table->NextTable();
@@ -18981,12 +18709,11 @@ template<class Derived, class TableType>
bool OrderedHashTableIterator<Derived, TableType>::HasMore() {
DisallowHeapAllocation no_allocation;
Isolate* isolate = this->GetIsolate();
- if (this->table()->IsUndefined(isolate)) return false;
Transition();
TableType* table = TableType::cast(this->table());
- int index = Smi::cast(this->index())->value();
+ int index = Smi::ToInt(this->index());
int used_capacity = table->UsedCapacity();
while (index < used_capacity && table->KeyAt(index)->IsTheHole(isolate)) {
@@ -18997,28 +18724,10 @@ bool OrderedHashTableIterator<Derived, TableType>::HasMore() {
if (index < used_capacity) return true;
- set_table(isolate->heap()->undefined_value());
+ set_table(isolate->heap()->empty_ordered_hash_table());
return false;
}
-
-template<class Derived, class TableType>
-Smi* OrderedHashTableIterator<Derived, TableType>::Next(JSArray* value_array) {
- DisallowHeapAllocation no_allocation;
- if (HasMore()) {
- FixedArray* array = FixedArray::cast(value_array->elements());
- static_cast<Derived*>(this)->PopulateValueArray(array);
- MoveNext();
- return Smi::cast(kind());
- }
- return Smi::kZero;
-}
-
-
-template Smi*
-OrderedHashTableIterator<JSSetIterator, OrderedHashSet>::Next(
- JSArray* value_array);
-
template bool
OrderedHashTableIterator<JSSetIterator, OrderedHashSet>::HasMore();
@@ -19032,10 +18741,6 @@ template void
OrderedHashTableIterator<JSSetIterator, OrderedHashSet>::Transition();
-template Smi*
-OrderedHashTableIterator<JSMapIterator, OrderedHashMap>::Next(
- JSArray* value_array);
-
template bool
OrderedHashTableIterator<JSMapIterator, OrderedHashMap>::HasMore();
@@ -19136,11 +18841,11 @@ Handle<JSArray> JSWeakCollection::GetEntries(Handle<JSWeakCollection> holder,
int count = 0;
for (int i = 0;
count / values_per_entry < max_entries && i < table->Capacity(); i++) {
- Handle<Object> key(table->KeyAt(i), isolate);
- if (table->IsKey(isolate, *key)) {
- entries->set(count++, *key);
+ Object* key;
+ if (table->ToKey(isolate, i, &key)) {
+ entries->set(count++, key);
if (values_per_entry > 1) {
- Object* value = table->Lookup(key);
+ Object* value = table->Lookup(handle(key, isolate));
entries->set(count++, value);
}
}
@@ -19150,250 +18855,6 @@ Handle<JSArray> JSWeakCollection::GetEntries(Handle<JSWeakCollection> holder,
return isolate->factory()->NewJSArrayWithElements(entries);
}
-// Check if there is a break point at this source position.
-bool DebugInfo::HasBreakPoint(int source_position) {
- // Get the break point info object for this code offset.
- Object* break_point_info = GetBreakPointInfo(source_position);
-
- // If there is no break point info object or no break points in the break
- // point info object there is no break point at this code offset.
- if (break_point_info->IsUndefined(GetIsolate())) return false;
- return BreakPointInfo::cast(break_point_info)->GetBreakPointCount() > 0;
-}
-
-// Get the break point info object for this source position.
-Object* DebugInfo::GetBreakPointInfo(int source_position) {
- Isolate* isolate = GetIsolate();
- if (!break_points()->IsUndefined(isolate)) {
- for (int i = 0; i < break_points()->length(); i++) {
- if (!break_points()->get(i)->IsUndefined(isolate)) {
- BreakPointInfo* break_point_info =
- BreakPointInfo::cast(break_points()->get(i));
- if (break_point_info->source_position() == source_position) {
- return break_point_info;
- }
- }
- }
- }
- return isolate->heap()->undefined_value();
-}
-
-bool DebugInfo::ClearBreakPoint(Handle<DebugInfo> debug_info,
- Handle<Object> break_point_object) {
- Isolate* isolate = debug_info->GetIsolate();
- if (debug_info->break_points()->IsUndefined(isolate)) return false;
-
- for (int i = 0; i < debug_info->break_points()->length(); i++) {
- if (debug_info->break_points()->get(i)->IsUndefined(isolate)) continue;
- Handle<BreakPointInfo> break_point_info = Handle<BreakPointInfo>(
- BreakPointInfo::cast(debug_info->break_points()->get(i)), isolate);
- if (BreakPointInfo::HasBreakPointObject(break_point_info,
- break_point_object)) {
- BreakPointInfo::ClearBreakPoint(break_point_info, break_point_object);
- return true;
- }
- }
- return false;
-}
-
-void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info, int source_position,
- Handle<Object> break_point_object) {
- Isolate* isolate = debug_info->GetIsolate();
- Handle<Object> break_point_info(
- debug_info->GetBreakPointInfo(source_position), isolate);
- if (!break_point_info->IsUndefined(isolate)) {
- BreakPointInfo::SetBreakPoint(
- Handle<BreakPointInfo>::cast(break_point_info),
- break_point_object);
- return;
- }
-
- // Adding a new break point for a code offset which did not have any
- // break points before. Try to find a free slot.
- static const int kNoBreakPointInfo = -1;
- int index = kNoBreakPointInfo;
- for (int i = 0; i < debug_info->break_points()->length(); i++) {
- if (debug_info->break_points()->get(i)->IsUndefined(isolate)) {
- index = i;
- break;
- }
- }
- if (index == kNoBreakPointInfo) {
- // No free slot - extend break point info array.
- Handle<FixedArray> old_break_points = Handle<FixedArray>(
- FixedArray::cast(debug_info->break_points()), isolate);
- Handle<FixedArray> new_break_points =
- isolate->factory()->NewFixedArray(
- old_break_points->length() +
- DebugInfo::kEstimatedNofBreakPointsInFunction);
-
- debug_info->set_break_points(*new_break_points);
- for (int i = 0; i < old_break_points->length(); i++) {
- new_break_points->set(i, old_break_points->get(i));
- }
- index = old_break_points->length();
- }
- DCHECK(index != kNoBreakPointInfo);
-
- // Allocate new BreakPointInfo object and set the break point.
- Handle<BreakPointInfo> new_break_point_info =
- isolate->factory()->NewBreakPointInfo(source_position);
- BreakPointInfo::SetBreakPoint(new_break_point_info, break_point_object);
- debug_info->break_points()->set(index, *new_break_point_info);
-}
-
-// Get the break point objects for a source position.
-Handle<Object> DebugInfo::GetBreakPointObjects(int source_position) {
- Object* break_point_info = GetBreakPointInfo(source_position);
- Isolate* isolate = GetIsolate();
- if (break_point_info->IsUndefined(isolate)) {
- return isolate->factory()->undefined_value();
- }
- return Handle<Object>(
- BreakPointInfo::cast(break_point_info)->break_point_objects(), isolate);
-}
-
-
-// Get the total number of break points.
-int DebugInfo::GetBreakPointCount() {
- Isolate* isolate = GetIsolate();
- if (break_points()->IsUndefined(isolate)) return 0;
- int count = 0;
- for (int i = 0; i < break_points()->length(); i++) {
- if (!break_points()->get(i)->IsUndefined(isolate)) {
- BreakPointInfo* break_point_info =
- BreakPointInfo::cast(break_points()->get(i));
- count += break_point_info->GetBreakPointCount();
- }
- }
- return count;
-}
-
-
-Handle<Object> DebugInfo::FindBreakPointInfo(
- Handle<DebugInfo> debug_info, Handle<Object> break_point_object) {
- Isolate* isolate = debug_info->GetIsolate();
- if (!debug_info->break_points()->IsUndefined(isolate)) {
- for (int i = 0; i < debug_info->break_points()->length(); i++) {
- if (!debug_info->break_points()->get(i)->IsUndefined(isolate)) {
- Handle<BreakPointInfo> break_point_info = Handle<BreakPointInfo>(
- BreakPointInfo::cast(debug_info->break_points()->get(i)), isolate);
- if (BreakPointInfo::HasBreakPointObject(break_point_info,
- break_point_object)) {
- return break_point_info;
- }
- }
- }
- }
- return isolate->factory()->undefined_value();
-}
-
-// Remove the specified break point object.
-void BreakPointInfo::ClearBreakPoint(Handle<BreakPointInfo> break_point_info,
- Handle<Object> break_point_object) {
- Isolate* isolate = break_point_info->GetIsolate();
- // If there are no break points just ignore.
- if (break_point_info->break_point_objects()->IsUndefined(isolate)) return;
- // If there is a single break point clear it if it is the same.
- if (!break_point_info->break_point_objects()->IsFixedArray()) {
- if (break_point_info->break_point_objects() == *break_point_object) {
- break_point_info->set_break_point_objects(
- isolate->heap()->undefined_value());
- }
- return;
- }
- // If there are multiple break points shrink the array
- DCHECK(break_point_info->break_point_objects()->IsFixedArray());
- Handle<FixedArray> old_array =
- Handle<FixedArray>(
- FixedArray::cast(break_point_info->break_point_objects()));
- Handle<FixedArray> new_array =
- isolate->factory()->NewFixedArray(old_array->length() - 1);
- int found_count = 0;
- for (int i = 0; i < old_array->length(); i++) {
- if (old_array->get(i) == *break_point_object) {
- DCHECK(found_count == 0);
- found_count++;
- } else {
- new_array->set(i - found_count, old_array->get(i));
- }
- }
- // If the break point was found in the list change it.
- if (found_count > 0) break_point_info->set_break_point_objects(*new_array);
-}
-
-
-// Add the specified break point object.
-void BreakPointInfo::SetBreakPoint(Handle<BreakPointInfo> break_point_info,
- Handle<Object> break_point_object) {
- Isolate* isolate = break_point_info->GetIsolate();
-
- // If there was no break point objects before just set it.
- if (break_point_info->break_point_objects()->IsUndefined(isolate)) {
- break_point_info->set_break_point_objects(*break_point_object);
- return;
- }
- // If the break point object is the same as before just ignore.
- if (break_point_info->break_point_objects() == *break_point_object) return;
- // If there was one break point object before replace with array.
- if (!break_point_info->break_point_objects()->IsFixedArray()) {
- Handle<FixedArray> array = isolate->factory()->NewFixedArray(2);
- array->set(0, break_point_info->break_point_objects());
- array->set(1, *break_point_object);
- break_point_info->set_break_point_objects(*array);
- return;
- }
- // If there was more than one break point before extend array.
- Handle<FixedArray> old_array =
- Handle<FixedArray>(
- FixedArray::cast(break_point_info->break_point_objects()));
- Handle<FixedArray> new_array =
- isolate->factory()->NewFixedArray(old_array->length() + 1);
- for (int i = 0; i < old_array->length(); i++) {
- // If the break point was there before just ignore.
- if (old_array->get(i) == *break_point_object) return;
- new_array->set(i, old_array->get(i));
- }
- // Add the new break point.
- new_array->set(old_array->length(), *break_point_object);
- break_point_info->set_break_point_objects(*new_array);
-}
-
-
-bool BreakPointInfo::HasBreakPointObject(
- Handle<BreakPointInfo> break_point_info,
- Handle<Object> break_point_object) {
- // No break point.
- Isolate* isolate = break_point_info->GetIsolate();
- if (break_point_info->break_point_objects()->IsUndefined(isolate)) {
- return false;
- }
- // Single break point.
- if (!break_point_info->break_point_objects()->IsFixedArray()) {
- return break_point_info->break_point_objects() == *break_point_object;
- }
- // Multiple break points.
- FixedArray* array = FixedArray::cast(break_point_info->break_point_objects());
- for (int i = 0; i < array->length(); i++) {
- if (array->get(i) == *break_point_object) {
- return true;
- }
- }
- return false;
-}
-
-
-// Get the number of break points.
-int BreakPointInfo::GetBreakPointCount() {
- // No break point.
- if (break_point_objects()->IsUndefined(GetIsolate())) return 0;
- // Single break point.
- if (!break_point_objects()->IsFixedArray()) return 1;
- // Multiple break points.
- return FixedArray::cast(break_point_objects())->length();
-}
-
-
// static
MaybeHandle<JSDate> JSDate::New(Handle<JSFunction> constructor,
Handle<JSReceiver> new_target, double tv) {
@@ -19515,7 +18976,6 @@ Object* JSDate::GetUTCField(FieldIndex index,
}
UNREACHABLE();
- return NULL;
}
@@ -19719,6 +19179,9 @@ bool JSArrayBuffer::SetupAllocatingData(Handle<JSArrayBuffer> array_buffer,
if (allocated_length >= MB)
isolate->counters()->array_buffer_big_allocations()->AddSample(
ConvertToMb(allocated_length));
+ if (shared == SharedFlag::kShared)
+ isolate->counters()->shared_array_allocations()->AddSample(
+ ConvertToMb(allocated_length));
if (initialize) {
data = isolate->array_buffer_allocator()->Allocate(allocated_length);
} else {
@@ -19766,9 +19229,10 @@ Handle<JSArrayBuffer> JSTypedArray::MaterializeArrayBuffer(
// registration method below handles the case of registering a buffer that has
// already been promoted.
buffer->set_backing_store(backing_store);
- isolate->heap()->RegisterNewArrayBuffer(*buffer);
buffer->set_allocation_base(backing_store);
buffer->set_allocation_length(NumberToSize(buffer->byte_length()));
+ // RegisterNewArrayBuffer expects a valid length for adjusting counters.
+ isolate->heap()->RegisterNewArrayBuffer(*buffer);
memcpy(buffer->backing_store(),
fixed_typed_array->DataPtr(),
fixed_typed_array->DataSize());
@@ -19798,9 +19262,9 @@ Handle<PropertyCell> PropertyCell::InvalidateEntry(
Handle<GlobalDictionary> dictionary, int entry) {
Isolate* isolate = dictionary->GetIsolate();
// Swap with a copy.
- DCHECK(dictionary->ValueAt(entry)->IsPropertyCell());
- Handle<PropertyCell> cell(PropertyCell::cast(dictionary->ValueAt(entry)));
- Handle<PropertyCell> new_cell = isolate->factory()->NewPropertyCell();
+ Handle<PropertyCell> cell(dictionary->CellAt(entry));
+ Handle<Name> name(cell->name(), isolate);
+ Handle<PropertyCell> new_cell = isolate->factory()->NewPropertyCell(name);
new_cell->set_value(cell->value());
dictionary->ValueAtPut(entry, *new_cell);
bool is_the_hole = cell->value()->IsTheHole(isolate);
@@ -19859,7 +19323,6 @@ PropertyCellType PropertyCell::UpdatedType(Handle<PropertyCell> cell,
return PropertyCellType::kMutable;
default:
UNREACHABLE();
- return PropertyCellType::kMutable;
}
}
switch (type) {
@@ -19877,7 +19340,6 @@ PropertyCellType PropertyCell::UpdatedType(Handle<PropertyCell> cell,
return PropertyCellType::kMutable;
}
UNREACHABLE();
- return PropertyCellType::kMutable;
}
Handle<PropertyCell> PropertyCell::PrepareForValue(
@@ -19885,21 +19347,22 @@ Handle<PropertyCell> PropertyCell::PrepareForValue(
PropertyDetails details) {
Isolate* isolate = dictionary->GetIsolate();
DCHECK(!value->IsTheHole(isolate));
- DCHECK(dictionary->ValueAt(entry)->IsPropertyCell());
- Handle<PropertyCell> cell(PropertyCell::cast(dictionary->ValueAt(entry)));
+ Handle<PropertyCell> cell(dictionary->CellAt(entry));
const PropertyDetails original_details = cell->property_details();
// Data accesses could be cached in ics or optimized code.
bool invalidate =
original_details.kind() == kData && details.kind() == kAccessor;
- int index = original_details.dictionary_index();
+ int index;
PropertyCellType old_type = original_details.cell_type();
// Preserve the enumeration index unless the property was deleted or never
// initialized.
if (cell->value()->IsTheHole(isolate)) {
index = dictionary->NextEnumerationIndex();
dictionary->SetNextEnumerationIndex(index + 1);
+ } else {
+ index = original_details.dictionary_index();
}
- DCHECK(index > 0);
+ DCHECK_LT(0, index);
details = details.set_index(index);
PropertyCellType new_type = UpdatedType(cell, value, original_details);
@@ -19935,14 +19398,7 @@ int JSGeneratorObject::source_position() const {
DCHECK(function()->shared()->HasBytecodeArray());
DCHECK(!function()->shared()->HasBaselineCode());
- int code_offset;
- const JSAsyncGeneratorObject* async =
- IsJSAsyncGeneratorObject() ? JSAsyncGeneratorObject::cast(this) : nullptr;
- if (async != nullptr && async->awaited_promise()->IsJSPromise()) {
- code_offset = Smi::cast(async->await_input_or_debug_pos())->value();
- } else {
- code_offset = Smi::cast(input_or_debug_pos())->value();
- }
+ int code_offset = Smi::ToInt(input_or_debug_pos());
// The stored bytecode offset is relative to a different base than what
// is used in the source position table, hence the subtraction.
@@ -20164,6 +19620,48 @@ void Module::StoreVariable(Handle<Module> module, int cell_index,
module->GetCell(cell_index)->set_value(*value);
}
+void Module::SetStatus(Status new_status) {
+ DisallowHeapAllocation no_alloc;
+ DCHECK_LE(status(), new_status);
+ DCHECK_NE(new_status, Module::kErrored);
+ set_status(new_status);
+}
+
+void Module::RecordError() {
+ DisallowHeapAllocation no_alloc;
+
+ Isolate* isolate = GetIsolate();
+ Object* the_exception = isolate->pending_exception();
+ DCHECK(!the_exception->IsTheHole(isolate));
+
+ switch (status()) {
+ case Module::kUninstantiated:
+ case Module::kPreInstantiating:
+ case Module::kInstantiating:
+ case Module::kEvaluating:
+ break;
+ case Module::kErrored:
+ DCHECK_EQ(exception(), the_exception);
+ return;
+ default:
+ UNREACHABLE();
+ }
+
+ set_code(info());
+
+ DCHECK(exception()->IsTheHole(isolate));
+ set_status(Module::kErrored);
+ set_exception(the_exception);
+}
+
+Object* Module::GetException() {
+ DisallowHeapAllocation no_alloc;
+ DCHECK_EQ(status(), Module::kErrored);
+ Object* the_exception = exception();
+ DCHECK(!the_exception->IsTheHole(GetIsolate()));
+ return the_exception;
+}
+
MaybeHandle<Cell> Module::ResolveImport(Handle<Module> module,
Handle<String> name, int module_request,
MessageLocation loc, bool must_resolve,
@@ -20171,15 +19669,27 @@ MaybeHandle<Cell> Module::ResolveImport(Handle<Module> module,
Isolate* isolate = module->GetIsolate();
Handle<Module> requested_module(
Module::cast(module->requested_modules()->get(module_request)), isolate);
- return Module::ResolveExport(requested_module, name, loc, must_resolve,
- resolve_set);
+ MaybeHandle<Cell> result = Module::ResolveExport(requested_module, name, loc,
+ must_resolve, resolve_set);
+ if (isolate->has_pending_exception()) {
+ DCHECK(result.is_null());
+ if (must_resolve) module->RecordError();
+ // If {must_resolve} is false and there's an exception, then either that
+ // exception was already recorded where it happened, or it's the
+ // kAmbiguousExport exception (see ResolveExportUsingStarExports) and the
+ // culprit module is still to be determined.
+ }
+ return result;
}
MaybeHandle<Cell> Module::ResolveExport(Handle<Module> module,
Handle<String> name,
MessageLocation loc, bool must_resolve,
Module::ResolveSet* resolve_set) {
- DCHECK_EQ(module->status(), kPrepared);
+ DCHECK_NE(module->status(), kErrored);
+ DCHECK_NE(module->status(), kEvaluating);
+ DCHECK_GE(module->status(), kPreInstantiating);
+
Isolate* isolate = module->GetIsolate();
Handle<Object> object(module->exports()->Lookup(name), isolate);
if (object->IsCell()) {
@@ -20301,14 +19811,39 @@ MaybeHandle<Cell> Module::ResolveExportUsingStarExports(
bool Module::Instantiate(Handle<Module> module, v8::Local<v8::Context> context,
v8::Module::ResolveCallback callback) {
- return PrepareInstantiate(module, context, callback) &&
- FinishInstantiate(module, context);
+ Isolate* isolate = module->GetIsolate();
+ if (module->status() == kErrored) {
+ isolate->Throw(module->GetException());
+ return false;
+ }
+
+ if (!PrepareInstantiate(module, context, callback)) {
+ return false;
+ }
+
+ Zone zone(isolate->allocator(), ZONE_NAME);
+ ZoneForwardList<Handle<Module>> stack(&zone);
+ unsigned dfs_index = 0;
+ if (!FinishInstantiate(module, &stack, &dfs_index, &zone)) {
+ for (auto& descendant : stack) {
+ descendant->RecordError();
+ }
+ DCHECK_EQ(module->GetException(), isolate->pending_exception());
+ return false;
+ }
+ DCHECK(module->status() == kInstantiated || module->status() == kEvaluated);
+ DCHECK(stack.empty());
+ return true;
}
bool Module::PrepareInstantiate(Handle<Module> module,
v8::Local<v8::Context> context,
v8::Module::ResolveCallback callback) {
- if (module->status() == kPrepared) return true;
+ DCHECK_NE(module->status(), kErrored);
+ DCHECK_NE(module->status(), kEvaluating);
+ DCHECK_NE(module->status(), kInstantiating);
+ if (module->status() >= kPreInstantiating) return true;
+ module->SetStatus(kPreInstantiating);
// Obtain requested modules.
Isolate* isolate = module->GetIsolate();
@@ -20322,18 +19857,29 @@ bool Module::PrepareInstantiate(Handle<Module> module,
v8::Utils::ToLocal(module))
.ToLocal(&api_requested_module)) {
isolate->PromoteScheduledException();
+ module->RecordError();
return false;
}
Handle<Module> requested_module = Utils::OpenHandle(*api_requested_module);
+ if (requested_module->status() == kErrored) {
+ // TODO(neis): Move this into callback?
+ isolate->Throw(requested_module->GetException());
+ module->RecordError();
+ DCHECK_EQ(module->GetException(), requested_module->GetException());
+ return false;
+ }
requested_modules->set(i, *requested_module);
}
// Recurse.
- module->set_status(kPrepared);
for (int i = 0, length = requested_modules->length(); i < length; ++i) {
Handle<Module> requested_module(Module::cast(requested_modules->get(i)),
isolate);
- if (!PrepareInstantiate(requested_module, context, callback)) return false;
+ if (!PrepareInstantiate(requested_module, context, callback)) {
+ module->RecordError();
+ DCHECK_EQ(module->GetException(), requested_module->GetException());
+ return false;
+ }
}
// Set up local exports.
@@ -20359,37 +19905,80 @@ bool Module::PrepareInstantiate(Handle<Module> module,
CreateIndirectExport(module, Handle<String>::cast(export_name), entry);
}
- DCHECK_EQ(module->status(), kPrepared);
- DCHECK(!module->instantiated());
+ DCHECK_EQ(module->status(), kPreInstantiating);
return true;
}
-bool Module::FinishInstantiate(Handle<Module> module,
- v8::Local<v8::Context> context) {
- DCHECK_EQ(module->status(), kPrepared);
- if (module->instantiated()) return true;
+void Module::MaybeTransitionComponent(Handle<Module> module,
+ ZoneForwardList<Handle<Module>>* stack,
+ Status new_status) {
+ DCHECK(new_status == kInstantiated || new_status == kEvaluated);
+ SLOW_DCHECK(
+ // {module} is on the {stack}.
+ std::count_if(stack->begin(), stack->end(),
+ [&](Handle<Module> m) { return *m == *module; }) == 1);
+ DCHECK_LE(module->dfs_ancestor_index(), module->dfs_index());
+ if (module->dfs_ancestor_index() == module->dfs_index()) {
+ // This is the root of its strongly connected component.
+ Handle<Module> ancestor;
+ do {
+ ancestor = stack->front();
+ stack->pop_front();
+ DCHECK_EQ(ancestor->status(),
+ new_status == kInstantiated ? kInstantiating : kEvaluating);
+ ancestor->SetStatus(new_status);
+ } while (*ancestor != *module);
+ }
+}
- // Instantiate SharedFunctionInfo and mark module as instantiated for
+bool Module::FinishInstantiate(Handle<Module> module,
+ ZoneForwardList<Handle<Module>>* stack,
+ unsigned* dfs_index, Zone* zone) {
+ DCHECK_NE(module->status(), kErrored);
+ DCHECK_NE(module->status(), kEvaluating);
+ if (module->status() >= kInstantiating) return true;
+ DCHECK_EQ(module->status(), kPreInstantiating);
+
+ // Instantiate SharedFunctionInfo and mark module as instantiating for
// the recursion.
Isolate* isolate = module->GetIsolate();
Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(module->code()),
isolate);
Handle<JSFunction> function =
isolate->factory()->NewFunctionFromSharedFunctionInfo(
- shared,
- handle(Utils::OpenHandle(*context)->native_context(), isolate));
+ shared, isolate->native_context());
module->set_code(*function);
- DCHECK(module->instantiated());
+ module->SetStatus(kInstantiating);
+ module->set_dfs_index(*dfs_index);
+ module->set_dfs_ancestor_index(*dfs_index);
+ stack->push_front(module);
+ (*dfs_index)++;
// Recurse.
Handle<FixedArray> requested_modules(module->requested_modules(), isolate);
for (int i = 0, length = requested_modules->length(); i < length; ++i) {
Handle<Module> requested_module(Module::cast(requested_modules->get(i)),
isolate);
- if (!FinishInstantiate(requested_module, context)) return false;
- }
+ if (!FinishInstantiate(requested_module, stack, dfs_index, zone)) {
+ return false;
+ }
- Zone zone(isolate->allocator(), ZONE_NAME);
+ DCHECK_NE(requested_module->status(), kErrored);
+ DCHECK_NE(requested_module->status(), kEvaluating);
+ DCHECK_GE(requested_module->status(), kInstantiating);
+ SLOW_DCHECK(
+ // {requested_module} is instantiating iff it's on the {stack}.
+ (requested_module->status() == kInstantiating) ==
+ std::count_if(stack->begin(), stack->end(), [&](Handle<Module> m) {
+ return *m == *requested_module;
+ }));
+
+ if (requested_module->status() == kInstantiating) {
+ module->set_dfs_ancestor_index(
+ std::min(module->dfs_ancestor_index(),
+ requested_module->dfs_ancestor_index()));
+ }
+ }
// Resolve imports.
Handle<ModuleInfo> module_info(shared->scope_info()->ModuleDescriptorInfo(),
@@ -20403,7 +19992,7 @@ bool Module::FinishInstantiate(Handle<Module> module,
Script::cast(JSFunction::cast(module->code())->shared()->script()),
isolate);
MessageLocation loc(script, entry->beg_pos(), entry->end_pos());
- ResolveSet resolve_set(&zone);
+ ResolveSet resolve_set(zone);
Handle<Cell> cell;
if (!ResolveImport(module, name, entry->module_request(), loc, true,
&resolve_set)
@@ -20424,7 +20013,7 @@ bool Module::FinishInstantiate(Handle<Module> module,
Script::cast(JSFunction::cast(module->code())->shared()->script()),
isolate);
MessageLocation loc(script, entry->beg_pos(), entry->end_pos());
- ResolveSet resolve_set(&zone);
+ ResolveSet resolve_set(zone);
if (ResolveExport(module, Handle<String>::cast(name), loc, true,
&resolve_set)
.is_null()) {
@@ -20432,17 +20021,53 @@ bool Module::FinishInstantiate(Handle<Module> module,
}
}
+ MaybeTransitionComponent(module, stack, kInstantiated);
return true;
}
MaybeHandle<Object> Module::Evaluate(Handle<Module> module) {
- DCHECK(module->instantiated());
+ Isolate* isolate = module->GetIsolate();
+ if (module->status() == kErrored) {
+ isolate->Throw(module->GetException());
+ return MaybeHandle<Object>();
+ }
+ DCHECK_NE(module->status(), kEvaluating);
+ DCHECK_GE(module->status(), kInstantiated);
+ Zone zone(isolate->allocator(), ZONE_NAME);
+
+ ZoneForwardList<Handle<Module>> stack(&zone);
+ unsigned dfs_index = 0;
+ Handle<Object> result;
+ if (!Evaluate(module, &stack, &dfs_index).ToHandle(&result)) {
+ for (auto& descendant : stack) {
+ DCHECK_EQ(descendant->status(), kEvaluating);
+ descendant->RecordError();
+ }
+ DCHECK_EQ(module->GetException(), isolate->pending_exception());
+ return MaybeHandle<Object>();
+ }
+ DCHECK_EQ(module->status(), kEvaluated);
+ DCHECK(stack.empty());
+ return result;
+}
- // Each module can only be evaluated once.
+MaybeHandle<Object> Module::Evaluate(Handle<Module> module,
+ ZoneForwardList<Handle<Module>>* stack,
+ unsigned* dfs_index) {
Isolate* isolate = module->GetIsolate();
- if (module->evaluated()) return isolate->factory()->undefined_value();
+ DCHECK_NE(module->status(), kErrored);
+ if (module->status() >= kEvaluating) {
+ return isolate->factory()->undefined_value();
+ }
+ DCHECK_EQ(module->status(), kInstantiated);
+
Handle<JSFunction> function(JSFunction::cast(module->code()), isolate);
- module->set_evaluated();
+ module->set_code(function->shared()->scope_info()->ModuleDescriptorInfo());
+ module->SetStatus(kEvaluating);
+ module->set_dfs_index(*dfs_index);
+ module->set_dfs_ancestor_index(*dfs_index);
+ stack->push_front(module);
+ (*dfs_index)++;
// Initialization.
DCHECK_EQ(MODULE_SCOPE, function->shared()->scope_info()->scope_type());
@@ -20457,8 +20082,25 @@ MaybeHandle<Object> Module::Evaluate(Handle<Module> module) {
// Recursion.
Handle<FixedArray> requested_modules(module->requested_modules(), isolate);
for (int i = 0, length = requested_modules->length(); i < length; ++i) {
- Handle<Module> import(Module::cast(requested_modules->get(i)), isolate);
- RETURN_ON_EXCEPTION(isolate, Evaluate(import), Object);
+ Handle<Module> requested_module(Module::cast(requested_modules->get(i)),
+ isolate);
+ RETURN_ON_EXCEPTION(isolate, Evaluate(requested_module, stack, dfs_index),
+ Object);
+
+ DCHECK_GE(requested_module->status(), kEvaluating);
+ DCHECK_NE(requested_module->status(), kErrored);
+ SLOW_DCHECK(
+ // {requested_module} is evaluating iff it's on the {stack}.
+ (requested_module->status() == kEvaluating) ==
+ std::count_if(stack->begin(), stack->end(), [&](Handle<Module> m) {
+ return *m == *requested_module;
+ }));
+
+ if (requested_module->status() == kEvaluating) {
+ module->set_dfs_ancestor_index(
+ std::min(module->dfs_ancestor_index(),
+ requested_module->dfs_ancestor_index()));
+ }
}
// Evaluation of module body.
@@ -20471,6 +20113,8 @@ MaybeHandle<Object> Module::Evaluate(Handle<Module> module) {
DCHECK(static_cast<JSIteratorResult*>(JSObject::cast(*result))
->done()
->BooleanValue());
+
+ MaybeTransitionComponent(module, stack, kEvaluated);
return handle(
static_cast<JSIteratorResult*>(JSObject::cast(*result))->value(),
isolate);
@@ -20480,7 +20124,8 @@ namespace {
void FetchStarExports(Handle<Module> module, Zone* zone,
UnorderedModuleSet* visited) {
- DCHECK(module->instantiated());
+ DCHECK_NE(module->status(), Module::kErrored);
+ DCHECK_GE(module->status(), Module::kInstantiated);
bool cycle = !visited->insert(module).second;
if (cycle) return;
@@ -20515,9 +20160,9 @@ void FetchStarExports(Handle<Module> module, Zone* zone,
Handle<ObjectHashTable> requested_exports(requested_module->exports(),
isolate);
for (int i = 0, n = requested_exports->Capacity(); i < n; ++i) {
- Handle<Object> key(requested_exports->KeyAt(i), isolate);
- if (!requested_exports->IsKey(isolate, *key)) continue;
- Handle<String> name = Handle<String>::cast(key);
+ Object* key;
+ if (!requested_exports->ToKey(isolate, i, &key)) continue;
+ Handle<String> name(String::cast(key), isolate);
if (name->Equals(isolate->heap()->default_string())) continue;
if (!exports->Lookup(name)->IsTheHole(isolate)) continue;
@@ -20581,10 +20226,9 @@ Handle<JSModuleNamespace> Module::GetModuleNamespace(Handle<Module> module) {
ZoneVector<Handle<String>> names(&zone);
names.reserve(exports->NumberOfElements());
for (int i = 0, n = exports->Capacity(); i < n; ++i) {
- Handle<Object> key(exports->KeyAt(i), isolate);
- if (!exports->IsKey(isolate, *key)) continue;
- DCHECK(exports->ValueAt(i)->IsCell());
- names.push_back(Handle<String>::cast(key));
+ Object* key;
+ if (!exports->ToKey(isolate, i, &key)) continue;
+ names.push_back(handle(String::cast(key), isolate));
}
DCHECK_EQ(static_cast<int>(names.size()), exports->NumberOfElements());
@@ -20628,7 +20272,7 @@ ElementsKind JSArrayIterator::ElementsKindForInstanceType(InstanceType type) {
if (type <= LAST_ARRAY_KEY_ITERATOR_TYPE) {
// Should be ignored for key iterators.
- return FAST_ELEMENTS;
+ return PACKED_ELEMENTS;
} else {
ElementsKind kind;
if (type < FIRST_ARRAY_VALUE_ITERATOR_TYPE) {
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 1cfdbe6f04..ba02ebeefe 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -22,7 +22,6 @@
#include "src/property-details.h"
#include "src/unicode-decoder.h"
#include "src/unicode.h"
-#include "src/zone/zone.h"
#if V8_TARGET_ARCH_ARM
#include "src/arm/constants-arm.h" // NOLINT
@@ -74,6 +73,10 @@
// - JSDate
// - JSMessageObject
// - JSModuleNamespace
+// - WasmInstanceObject
+// - WasmMemoryObject
+// - WasmModuleObject
+// - WasmTableObject
// - JSProxy
// - FixedArrayBase
// - ByteArray
@@ -100,6 +103,8 @@
// - ModuleInfo
// - ScriptContextTable
// - WeakFixedArray
+// - WasmSharedModuleData
+// - WasmCompiledModule
// - FixedDoubleArray
// - Name
// - String
@@ -124,11 +129,15 @@
// - HeapNumber
// - Cell
// - PropertyCell
+// - PropertyArray
// - Code
// - AbstractCode, a wrapper around Code or BytecodeArray
// - Map
// - Oddball
// - Foreign
+// - SmallOrderedHashTable
+// - SmallOrderedHashMap
+// - SmallOrderedHashSet
// - SharedFunctionInfo
// - Struct
// - AccessorInfo
@@ -145,10 +154,12 @@
// - DebugInfo
// - BreakPointInfo
// - StackFrameInfo
+// - SourcePositionTableWithFrameCache
// - CodeCache
// - PrototypeInfo
// - Module
// - ModuleInfoEntry
+// - PreParsedScopeData
// - WeakCell
//
// Formats of Object*:
@@ -225,14 +236,6 @@ enum PropertyNormalizationMode {
};
-// Indicates how aggressively the prototype should be optimized. FAST_PROTOTYPE
-// will give the fastest result by tailoring the map to the prototype, but that
-// will cause polymorphism with other objects. REGULAR_PROTOTYPE is to be used
-// (at least for now) when dynamically modifying the prototype chain of an
-// object using __proto__ or Object.setPrototypeOf.
-enum PrototypeOptimizationMode { REGULAR_PROTOTYPE, FAST_PROTOTYPE };
-
-
// Indicates whether transitions can be added to a source map or not.
enum TransitionFlag {
INSERT_TRANSITION,
@@ -362,18 +365,16 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(MODULE_TYPE) \
V(MODULE_INFO_ENTRY_TYPE) \
V(ASYNC_GENERATOR_REQUEST_TYPE) \
+ V(PREPARSED_SCOPE_DATA_TYPE) \
V(FIXED_ARRAY_TYPE) \
+ V(PROPERTY_ARRAY_TYPE) \
V(TRANSITION_ARRAY_TYPE) \
V(SHARED_FUNCTION_INFO_TYPE) \
V(CELL_TYPE) \
V(WEAK_CELL_TYPE) \
V(PROPERTY_CELL_TYPE) \
- /* TODO(yangguo): these padding types are for ABI stability. Remove after*/ \
- /* version 6.0 branch, or replace them when there is demand for new types.*/ \
- V(PADDING_TYPE_1) \
- V(PADDING_TYPE_2) \
- V(PADDING_TYPE_3) \
- V(PADDING_TYPE_4) \
+ V(SMALL_ORDERED_HASH_MAP_TYPE) \
+ V(SMALL_ORDERED_HASH_SET_TYPE) \
\
V(JS_PROXY_TYPE) \
V(JS_GLOBAL_OBJECT_TYPE) \
@@ -395,8 +396,11 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(JS_DATA_VIEW_TYPE) \
V(JS_SET_TYPE) \
V(JS_MAP_TYPE) \
- V(JS_SET_ITERATOR_TYPE) \
- V(JS_MAP_ITERATOR_TYPE) \
+ V(JS_SET_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_SET_VALUE_ITERATOR_TYPE) \
+ V(JS_MAP_KEY_ITERATOR_TYPE) \
+ V(JS_MAP_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_MAP_VALUE_ITERATOR_TYPE) \
V(JS_WEAK_MAP_TYPE) \
V(JS_WEAK_SET_TYPE) \
V(JS_PROMISE_CAPABILITY_TYPE) \
@@ -446,6 +450,10 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE) \
V(JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE) \
\
+ V(WASM_INSTANCE_TYPE) \
+ V(WASM_MEMORY_TYPE) \
+ V(WASM_MODULE_TYPE) \
+ V(WASM_TABLE_TYPE) \
V(JS_BOUND_FUNCTION_TYPE) \
V(JS_FUNCTION_TYPE)
@@ -535,7 +543,8 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(CONTEXT_EXTENSION, ContextExtension, context_extension) \
V(MODULE, Module, module) \
V(MODULE_INFO_ENTRY, ModuleInfoEntry, module_info_entry) \
- V(ASYNC_GENERATOR_REQUEST, AsyncGeneratorRequest, async_generator_request)
+ V(ASYNC_GENERATOR_REQUEST, AsyncGeneratorRequest, async_generator_request) \
+ V(PREPARSED_SCOPE_DATA, PreParsedScopeData, preparsed_scope_data)
// We use the full 8 bits of the instance_type field to encode heap object
// instance types. The high-order bit (bit 7) is set if the object is not a
@@ -602,7 +611,7 @@ static inline bool IsShortcutCandidate(int type) {
return ((type & kShortcutTypeMask) == kShortcutTypeTag);
}
-enum InstanceType {
+enum InstanceType : uint8_t {
// String types.
INTERNALIZED_STRING_TYPE = kTwoByteStringTag | kSeqStringTag |
kInternalizedTag, // FIRST_PRIMITIVE_TYPE
@@ -704,19 +713,16 @@ enum InstanceType {
MODULE_TYPE,
MODULE_INFO_ENTRY_TYPE,
ASYNC_GENERATOR_REQUEST_TYPE,
+ PREPARSED_SCOPE_DATA_TYPE,
FIXED_ARRAY_TYPE,
+ PROPERTY_ARRAY_TYPE,
TRANSITION_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
CELL_TYPE,
WEAK_CELL_TYPE,
PROPERTY_CELL_TYPE,
-
- // All the following types are subtypes of JSReceiver, which corresponds to
- // objects in the JS sense. The first and the last type in this range are
- PADDING_TYPE_1,
- PADDING_TYPE_2,
- PADDING_TYPE_3,
- PADDING_TYPE_4,
+ SMALL_ORDERED_HASH_MAP_TYPE,
+ SMALL_ORDERED_HASH_SET_TYPE,
// All the following types are subtypes of JSReceiver, which corresponds to
// objects in the JS sense. The first and the last type in this range are
@@ -745,8 +751,11 @@ enum InstanceType {
JS_DATA_VIEW_TYPE,
JS_SET_TYPE,
JS_MAP_TYPE,
- JS_SET_ITERATOR_TYPE,
- JS_MAP_ITERATOR_TYPE,
+ JS_SET_KEY_VALUE_ITERATOR_TYPE,
+ JS_SET_VALUE_ITERATOR_TYPE,
+ JS_MAP_KEY_ITERATOR_TYPE,
+ JS_MAP_KEY_VALUE_ITERATOR_TYPE,
+ JS_MAP_VALUE_ITERATOR_TYPE,
JS_WEAK_MAP_TYPE,
JS_WEAK_SET_TYPE,
JS_PROMISE_CAPABILITY_TYPE,
@@ -796,6 +805,10 @@ enum InstanceType {
JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE,
JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE,
+ WASM_INSTANCE_TYPE,
+ WASM_MEMORY_TYPE,
+ WASM_MODULE_TYPE,
+ WASM_TABLE_TYPE,
JS_BOUND_FUNCTION_TYPE,
JS_FUNCTION_TYPE, // LAST_JS_OBJECT_TYPE, LAST_JS_RECEIVER_TYPE
@@ -844,6 +857,12 @@ enum InstanceType {
FIRST_ARRAY_ITERATOR_TYPE = FIRST_ARRAY_KEY_ITERATOR_TYPE,
LAST_ARRAY_ITERATOR_TYPE = LAST_ARRAY_VALUE_ITERATOR_TYPE,
+
+ FIRST_SET_ITERATOR_TYPE = JS_SET_KEY_VALUE_ITERATOR_TYPE,
+ LAST_SET_ITERATOR_TYPE = JS_SET_VALUE_ITERATOR_TYPE,
+
+ FIRST_MAP_ITERATOR_TYPE = JS_MAP_KEY_ITERATOR_TYPE,
+ LAST_MAP_ITERATOR_TYPE = JS_MAP_VALUE_ITERATOR_TYPE,
};
STATIC_ASSERT(JS_OBJECT_TYPE == Internals::kJSObjectType);
@@ -871,7 +890,7 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
V(DICTIONARY_ELEMENTS_SUB_TYPE) \
V(DICTIONARY_PROPERTIES_SUB_TYPE) \
V(EMPTY_PROPERTIES_DICTIONARY_SUB_TYPE) \
- V(FAST_ELEMENTS_SUB_TYPE) \
+ V(PACKED_ELEMENTS_SUB_TYPE) \
V(FAST_PROPERTIES_SUB_TYPE) \
V(FAST_TEMPLATE_INSTANTIATIONS_CACHE_SUB_TYPE) \
V(HANDLER_TABLE_SUB_TYPE) \
@@ -930,13 +949,12 @@ enum class ComparisonResult {
class AbstractCode;
class AccessorPair;
class AllocationSite;
-class AllocationSiteCreationContext;
-class AllocationSiteUsageContext;
class Cell;
class ConsString;
class ElementsAccessor;
class FindAndReplacePattern;
class FixedArrayBase;
+class PropertyArray;
class FunctionLiteral;
class JSGlobalObject;
class KeyAccumulator;
@@ -961,14 +979,16 @@ class FeedbackVector;
class WeakCell;
class TransitionArray;
class TemplateList;
+template <typename T>
+class ZoneForwardList;
// A template-ized version of the IsXXX functions.
template <class C> inline bool Is(Object* obj);
#ifdef OBJECT_PRINT
-#define DECLARE_PRINTER(Name) void Name##Print(std::ostream& os); // NOLINT
+#define DECL_PRINTER(Name) void Name##Print(std::ostream& os); // NOLINT
#else
-#define DECLARE_PRINTER(Name)
+#define DECL_PRINTER(Name)
#endif
#define OBJECT_TYPE_LIST(V) \
@@ -997,8 +1017,8 @@ template <class C> inline bool Is(Object* obj);
V(ConstantElementsPair) \
V(Constructor) \
V(Context) \
+ V(CoverageInfo) \
V(DeoptimizationInputData) \
- V(DeoptimizationOutputData) \
V(DependentCode) \
V(DescriptorArray) \
V(Dictionary) \
@@ -1051,6 +1071,10 @@ template <class C> inline bool Is(Object* obj);
V(JSMapIterator) \
V(JSMessageObject) \
V(JSModuleNamespace) \
+ V(WasmInstanceObject) \
+ V(WasmMemoryObject) \
+ V(WasmModuleObject) \
+ V(WasmTableObject) \
V(JSObject) \
V(JSPromise) \
V(JSPromiseCapability) \
@@ -1077,6 +1101,7 @@ template <class C> inline bool Is(Object* obj);
V(ObjectHashTable) \
V(Oddball) \
V(OrderedHashTable) \
+ V(PropertyArray) \
V(PropertyCell) \
V(RegExpMatchInfo) \
V(ScopeInfo) \
@@ -1087,6 +1112,8 @@ template <class C> inline bool Is(Object* obj);
V(SharedFunctionInfo) \
V(SlicedString) \
V(SloppyArgumentsElements) \
+ V(SmallOrderedHashMap) \
+ V(SmallOrderedHashSet) \
V(SourcePositionTableWithFrameCache) \
V(String) \
V(StringSet) \
@@ -1170,26 +1197,26 @@ class Object {
#define MAYBE_RETURN_NULL(call) MAYBE_RETURN(call, MaybeHandle<Object>())
-#define DECLARE_STRUCT_PREDICATE(NAME, Name, name) \
- INLINE(bool Is##Name() const);
- STRUCT_LIST(DECLARE_STRUCT_PREDICATE)
-#undef DECLARE_STRUCT_PREDICATE
+#define DECL_STRUCT_PREDICATE(NAME, Name, name) INLINE(bool Is##Name() const);
+ STRUCT_LIST(DECL_STRUCT_PREDICATE)
+#undef DECL_STRUCT_PREDICATE
- // ES6, section 7.2.2 IsArray. NOT to be confused with %_IsArray.
- MUST_USE_RESULT static Maybe<bool> IsArray(Handle<Object> object);
+ // ES6, #sec-isarray. NOT to be confused with %_IsArray.
+ INLINE(MUST_USE_RESULT static Maybe<bool> IsArray(Handle<Object> object));
INLINE(bool IsNameDictionary() const);
INLINE(bool IsGlobalDictionary() const);
INLINE(bool IsSeededNumberDictionary() const);
INLINE(bool IsOrderedHashSet() const);
INLINE(bool IsOrderedHashMap() const);
+ INLINE(bool IsSmallOrderedHashTable() const);
// Extract the number.
inline double Number() const;
INLINE(bool IsNaN() const);
INLINE(bool IsMinusZero() const);
V8_EXPORT_PRIVATE bool ToInt32(int32_t* value);
- inline bool ToUint32(uint32_t* value);
+ inline bool ToUint32(uint32_t* value) const;
inline Representation OptimalRepresentation();
@@ -1468,19 +1495,19 @@ class Object {
// Tries to convert an object to an array length. Returns true and sets the
// output parameter if it succeeds.
- inline bool ToArrayLength(uint32_t* index);
+ inline bool ToArrayLength(uint32_t* index) const;
// Tries to convert an object to an array index. Returns true and sets the
// output parameter if it succeeds. Equivalent to ToArrayLength, but does not
// allow kMaxUInt32.
- inline bool ToArrayIndex(uint32_t* index);
+ inline bool ToArrayIndex(uint32_t* index) const;
// Returns true if the result of iterating over the object is the same
// (including observable effects) as simply accessing the properties between 0
// and length.
bool IterationHasObservableEffects();
- DECLARE_VERIFIER(Object)
+ DECL_VERIFIER(Object)
#ifdef VERIFY_HEAP
// Verify a pointer is a valid object pointer.
static void VerifyPointer(Object* p);
@@ -1496,7 +1523,7 @@ class Object {
void ShortPrint(std::ostream& os); // NOLINT
- DECLARE_CAST(Object)
+ DECL_CAST(Object)
// Layout description.
static const int kHeaderSize = 0; // Object does not take up any space.
@@ -1517,7 +1544,7 @@ class Object {
friend class StringStream;
// Return the map of the root of object's prototype chain.
- Map* GetPrototypeChainRootMap(Isolate* isolate);
+ Map* GetPrototypeChainRootMap(Isolate* isolate) const;
// Helper for SetProperty and SetSuperProperty.
// Return value is only meaningful if [found] is set to true on return.
@@ -1578,6 +1605,9 @@ class Smi: public Object {
return Smi::FromInt(static_cast<uint32_t>(value()));
}
+ // Convert a Smi object to an int.
+ static inline int ToInt(const Object* object);
+
// Convert a value to a Smi object.
static inline Smi* FromInt(int value) {
DCHECK(Smi::IsValid(value));
@@ -1590,6 +1620,13 @@ class Smi: public Object {
return reinterpret_cast<Smi*>((value << smi_shift_bits) | kSmiTag);
}
+ template <typename E,
+ typename = typename std::enable_if<std::is_enum<E>::value>::type>
+ static inline Smi* FromEnum(E value) {
+ STATIC_ASSERT(sizeof(E) <= sizeof(int));
+ return FromInt(static_cast<int>(value));
+ }
+
// Returns whether value can be represented in a Smi.
static inline bool IsValid(intptr_t value) {
bool result = Internals::IsValidSmi(value);
@@ -1597,11 +1634,11 @@ class Smi: public Object {
return result;
}
- DECLARE_CAST(Smi)
+ DECL_CAST(Smi)
// Dispatched behavior.
V8_EXPORT_PRIVATE void SmiPrint(std::ostream& os) const; // NOLINT
- DECLARE_VERIFIER(Smi)
+ DECL_VERIFIER(Smi)
static constexpr Smi* const kZero = nullptr;
static const int kMinValue =
@@ -1625,8 +1662,7 @@ class MapWord BASE_EMBEDDED {
static inline MapWord FromMap(const Map* map);
// View this map word as a map pointer.
- inline Map* ToMap();
-
+ inline Map* ToMap() const;
// Scavenge collection: the map word of live objects in the from space
// contains a forwarding address (a heap object pointer in the to space).
@@ -1677,7 +1713,7 @@ class HeapObject: public Object {
inline void set_map_no_write_barrier(Map* value);
// Get the map using acquire load.
- inline Map* synchronized_map();
+ inline Map* synchronized_map() const;
inline MapWord synchronized_map_word() const;
// Set the map using release store
@@ -1711,10 +1747,9 @@ class HeapObject: public Object {
INLINE(bool IsNullOrUndefined(Isolate* isolate) const);
-#define DECLARE_STRUCT_PREDICATE(NAME, Name, name) \
- INLINE(bool Is##Name() const);
- STRUCT_LIST(DECLARE_STRUCT_PREDICATE)
-#undef DECLARE_STRUCT_PREDICATE
+#define DECL_STRUCT_PREDICATE(NAME, Name, name) INLINE(bool Is##Name() const);
+ STRUCT_LIST(DECL_STRUCT_PREDICATE)
+#undef DECL_STRUCT_PREDICATE
// Converts an address to a HeapObject pointer.
static inline HeapObject* FromAddress(Address address) {
@@ -1723,8 +1758,9 @@ class HeapObject: public Object {
}
// Returns the address of this HeapObject.
- inline Address address() {
- return reinterpret_cast<Address>(this) - kHeapObjectTag;
+ inline Address address() const {
+ return reinterpret_cast<Address>(const_cast<HeapObject*>(this)) -
+ kHeapObjectTag;
}
// Iterates over pointers contained in the object (including the Map).
@@ -1757,12 +1793,12 @@ class HeapObject: public Object {
bool IsValidSlot(int offset);
// Returns the heap object's size in bytes
- inline int Size();
+ inline int Size() const;
// Given a heap object's map pointer, returns the heap size in bytes
// Useful when the map pointer field is used for other purposes.
// GC internal.
- inline int SizeFromMap(Map* map);
+ inline int SizeFromMap(Map* map) const;
// Returns the field at offset in obj, as a read/write Object* reference.
// Does no checking, and is safe to use during GC, while maps are invalid.
@@ -1777,7 +1813,7 @@ class HeapObject: public Object {
Handle<Name> name,
Handle<Code> code);
- DECLARE_CAST(HeapObject)
+ DECL_CAST(HeapObject)
// Return the write barrier mode for this. Callers of this function
// must be able to present a reference to an DisallowHeapAllocation
@@ -1792,8 +1828,8 @@ class HeapObject: public Object {
#ifdef OBJECT_PRINT
void PrintHeader(std::ostream& os, const char* id); // NOLINT
#endif
- DECLARE_PRINTER(HeapObject)
- DECLARE_VERIFIER(HeapObject)
+ DECL_PRINTER(HeapObject)
+ DECL_VERIFIER(HeapObject)
#ifdef VERIFY_HEAP
inline void VerifyObjectField(int offset);
inline void VerifySmiField(int offset);
@@ -1803,7 +1839,7 @@ class HeapObject: public Object {
static void VerifyHeapPointer(Object* p);
#endif
- inline AllocationAlignment RequiredAlignment();
+ inline AllocationAlignment RequiredAlignment() const;
// Layout description.
// First field in a heap object is map.
@@ -1836,13 +1872,13 @@ class HeapNumber: public HeapObject {
inline uint64_t value_as_bits() const;
inline void set_value_as_bits(uint64_t bits);
- DECLARE_CAST(HeapNumber)
+ DECL_CAST(HeapNumber)
// Dispatched behavior.
bool HeapNumberBooleanValue();
V8_EXPORT_PRIVATE void HeapNumberPrint(std::ostream& os); // NOLINT
- DECLARE_VERIFIER(HeapNumber)
+ DECL_VERIFIER(HeapNumber)
inline int get_exponent();
inline int get_sign();
@@ -1907,20 +1943,41 @@ enum class AllocationSiteUpdateMode { kUpdate, kCheckOnly };
// JSObject and JSProxy.
class JSReceiver: public HeapObject {
public:
- // [properties]: Backing storage for properties.
- // properties is a FixedArray in the fast case and a Dictionary in the
- // slow case.
- DECL_ACCESSORS(properties, FixedArray) // Get and set fast properties.
- inline void initialize_properties();
- inline bool HasFastProperties();
+ // Returns true if there is no slow (ie, dictionary) backing store.
+ inline bool HasFastProperties() const;
+
+ // Returns the properties array backing store if it
+ // exists. Otherwise, returns an empty_property_array when there's a
+ // Smi (hash code) or an empty_fixed_array for a fast properties
+ // map.
+ inline PropertyArray* property_array() const;
+
// Gets slow properties for non-global objects.
- inline NameDictionary* property_dictionary();
+ inline NameDictionary* property_dictionary() const;
+
+ inline void SetProperties(HeapObject* properties);
+
+ // There are four possible value for the properties offset.
+ // 1) EmptyFixedArray -- This is the standard placeholder.
+ //
+ // 2) TODO(gsathya): Smi -- This is the hash code of the object.
+ //
+ // 3) PropertyArray - This is similar to a FixedArray but stores
+ // the hash code of the object in its length field. This is a fast
+ // backing store.
+ //
+ // 4) NameDictionary - This is the dictionary-mode backing store.
+ //
+ // This is used only in the deoptimizer and heap. Please use the
+ // above typed getters and setters to access the properties.
+ DECL_ACCESSORS(raw_properties_or_hash, Object)
+
+ inline void initialize_properties();
// Deletes an existing named property in a normalized object.
- static void DeleteNormalizedProperty(Handle<JSReceiver> object,
- Handle<Name> name, int entry);
+ static void DeleteNormalizedProperty(Handle<JSReceiver> object, int entry);
- DECLARE_CAST(JSReceiver)
+ DECL_CAST(JSReceiver)
// ES6 section 7.1.1 ToPrimitive
MUST_USE_RESULT static MaybeHandle<Object> ToPrimitive(
@@ -2014,7 +2071,7 @@ class JSReceiver: public HeapObject {
MUST_USE_RESULT static Maybe<bool> ValidateAndApplyPropertyDescriptor(
Isolate* isolate, LookupIterator* it, bool extensible,
PropertyDescriptor* desc, PropertyDescriptor* current,
- ShouldThrow should_throw, Handle<Name> property_name = Handle<Name>());
+ ShouldThrow should_throw, Handle<Name> property_name);
V8_EXPORT_PRIVATE MUST_USE_RESULT static Maybe<bool> GetOwnPropertyDescriptor(
Isolate* isolate, Handle<JSReceiver> object, Handle<Object> key,
@@ -2096,7 +2153,7 @@ class JSReceiver: public HeapObject {
Handle<JSReceiver> object, PropertyFilter filter);
// Layout description.
- static const int kPropertiesOffset = HeapObject::kHeaderSize;
+ static const int kPropertiesOrHashOffset = HeapObject::kHeaderSize;
static const int kHeaderSize = HeapObject::kHeaderSize + kPointerSize;
bool HasProxyInPrototype(Isolate* isolate);
@@ -2112,13 +2169,12 @@ class JSReceiver: public HeapObject {
// caching.
class JSObject: public JSReceiver {
public:
+ static bool IsUnmodifiedApiObject(Object** o);
+
static MUST_USE_RESULT MaybeHandle<JSObject> New(
Handle<JSFunction> constructor, Handle<JSReceiver> new_target,
Handle<AllocationSite> site = Handle<AllocationSite>::null());
- // Gets global object properties.
- inline GlobalDictionary* global_dictionary();
-
static MaybeHandle<Context> GetFunctionRealm(Handle<JSObject> object);
// [elements]: The elements (properties with names that are integers).
@@ -2140,27 +2196,28 @@ class JSObject: public JSReceiver {
// FixedArray parameter map for a (sloppy) arguments object.
DECL_ACCESSORS(elements, FixedArrayBase)
inline void initialize_elements();
- static void ResetElements(Handle<JSObject> object);
static inline void SetMapAndElements(Handle<JSObject> object,
Handle<Map> map,
Handle<FixedArrayBase> elements);
inline ElementsKind GetElementsKind();
ElementsAccessor* GetElementsAccessor();
- // Returns true if an object has elements of FAST_SMI_ELEMENTS ElementsKind.
- inline bool HasFastSmiElements();
- // Returns true if an object has elements of FAST_ELEMENTS ElementsKind.
- inline bool HasFastObjectElements();
- // Returns true if an object has elements of FAST_ELEMENTS or
- // FAST_SMI_ONLY_ELEMENTS.
- inline bool HasFastSmiOrObjectElements();
- // Returns true if an object has any of the fast elements kinds.
+ // Returns true if an object has elements of PACKED_SMI_ELEMENTS or
+ // HOLEY_SMI_ELEMENTS ElementsKind.
+ inline bool HasSmiElements();
+ // Returns true if an object has elements of PACKED_ELEMENTS or
+ // HOLEY_ELEMENTS ElementsKind.
+ inline bool HasObjectElements();
+ // Returns true if an object has elements of PACKED_SMI_ELEMENTS,
+ // HOLEY_SMI_ELEMENTS, PACKED_ELEMENTS, or HOLEY_ELEMENTS.
+ inline bool HasSmiOrObjectElements();
+ // Returns true if an object has any of the "fast" elements kinds.
inline bool HasFastElements();
- // Returns true if an object has elements of FAST_DOUBLE_ELEMENTS
- // ElementsKind.
- inline bool HasFastDoubleElements();
- // Returns true if an object has elements of FAST_HOLEY_*_ELEMENTS
- // ElementsKind.
- inline bool HasFastHoleyElements();
+ // Returns true if an object has elements of PACKED_DOUBLE_ELEMENTS or
+ // HOLEY_DOUBLE_ELEMENTS ElementsKind.
+ inline bool HasDoubleElements();
+ // Returns true if an object has elements of HOLEY_SMI_ELEMENTS,
+ // HOLEY_DOUBLE_ELEMENTS, or HOLEY_ELEMENTS ElementsKind.
+ inline bool HasHoleyElements();
inline bool HasSloppyArgumentsElements();
inline bool HasStringWrapperElements();
inline bool HasDictionaryElements();
@@ -2189,17 +2246,6 @@ class JSObject: public JSReceiver {
// Requires: HasFastElements().
static void EnsureWritableFastElements(Handle<JSObject> object);
- // Collects elements starting at index 0.
- // Undefined values are placed after non-undefined values.
- // Returns the number of non-undefined values.
- static Handle<Object> PrepareElementsForSort(Handle<JSObject> object,
- uint32_t limit);
- // As PrepareElementsForSort, but only on objects where elements is
- // a dictionary, and it will stay a dictionary. Collates undefined and
- // unexisting elements below limit from position zero of the elements.
- static Handle<Object> PrepareSlowElementsForSort(Handle<JSObject> object,
- uint32_t limit);
-
MUST_USE_RESULT static Maybe<bool> SetPropertyWithInterceptor(
LookupIterator* it, ShouldThrow should_throw, Handle<Object> value);
@@ -2274,8 +2320,7 @@ class JSObject: public JSReceiver {
Handle<Object> value,
PropertyAttributes attributes);
- static void OptimizeAsPrototype(Handle<JSObject> object,
- PrototypeOptimizationMode mode);
+ static void OptimizeAsPrototype(Handle<JSObject> object);
static void ReoptimizeIfPrototype(Handle<JSObject> object);
static void MakePrototypesFast(Handle<Object> receiver,
WhereToStart where_to_start, Isolate* isolate);
@@ -2333,7 +2378,7 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithInterceptor(
LookupIterator* it, bool* done);
- static void ValidateElements(Handle<JSObject> object);
+ static void ValidateElements(JSObject* object);
// Makes sure that this object can contain HeapObject as elements.
static inline void EnsureCanContainHeapObjectElements(Handle<JSObject> obj);
@@ -2389,11 +2434,11 @@ class JSObject: public JSReceiver {
// Get the header size for a JSObject. Used to compute the index of
// embedder fields as well as the number of embedder fields.
- static inline int GetHeaderSize(InstanceType instance_type);
+ static int GetHeaderSize(InstanceType instance_type);
inline int GetHeaderSize();
- static inline int GetEmbedderFieldCount(Map* map);
- inline int GetEmbedderFieldCount();
+ static inline int GetEmbedderFieldCount(const Map* map);
+ inline int GetEmbedderFieldCount() const;
inline int GetEmbedderFieldOffset(int index);
inline Object* GetEmbedderField(int index);
inline void SetEmbedderField(int index, Object* value);
@@ -2482,28 +2527,20 @@ class JSObject: public JSReceiver {
// Check whether this object references another object
bool ReferencesObject(Object* obj);
+ MUST_USE_RESULT static Maybe<bool> TestIntegrityLevel(Handle<JSObject> object,
+ IntegrityLevel lvl);
+
MUST_USE_RESULT static Maybe<bool> PreventExtensions(
Handle<JSObject> object, ShouldThrow should_throw);
static bool IsExtensible(Handle<JSObject> object);
- // Copy object.
- enum DeepCopyHints { kNoHints = 0, kObjectIsShallow = 1 };
-
- MUST_USE_RESULT static MaybeHandle<JSObject> DeepCopy(
- Handle<JSObject> object,
- AllocationSiteUsageContext* site_context,
- DeepCopyHints hints = kNoHints);
- MUST_USE_RESULT static MaybeHandle<JSObject> DeepWalk(
- Handle<JSObject> object,
- AllocationSiteCreationContext* site_context);
-
- DECLARE_CAST(JSObject)
+ DECL_CAST(JSObject)
// Dispatched behavior.
void JSObjectShortPrint(StringStream* accumulator);
- DECLARE_PRINTER(JSObject)
- DECLARE_VERIFIER(JSObject)
+ DECL_PRINTER(JSObject)
+ DECL_VERIFIER(JSObject)
#ifdef OBJECT_PRINT
bool PrintProperties(std::ostream& os); // NOLINT
void PrintElements(std::ostream& os); // NOLINT
@@ -2574,6 +2611,7 @@ class JSObject: public JSReceiver {
static const int kInitialGlobalObjectUnusedPropertiesCount = 4;
static const int kMaxInstanceSize = 255 * kPointerSize;
+
// When extending the backing storage for property values, we increase
// its size by more than the 1 entry necessary, so sequentially adding fields
// to the same object requires fewer allocations and copies.
@@ -2584,9 +2622,16 @@ class JSObject: public JSReceiver {
static const int kHeaderSize = kElementsOffset + kPointerSize;
STATIC_ASSERT(kHeaderSize == Internals::kJSObjectHeaderSize);
+ static const int kMaxInObjectProperties =
+ (kMaxInstanceSize - kHeaderSize) >> kPointerSizeLog2;
class BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
+
class FastBodyDescriptor;
+ // No weak fields.
+ typedef FastBodyDescriptor FastBodyDescriptorWeak;
// Gets the number of currently used elements.
int GetFastElementsUsage();
@@ -2695,59 +2740,6 @@ class JSIteratorResult: public JSObject {
};
-// Common superclass for JSSloppyArgumentsObject and JSStrictArgumentsObject.
-class JSArgumentsObject: public JSObject {
- public:
- // Offsets of object fields.
- static const int kLengthOffset = JSObject::kHeaderSize;
- static const int kHeaderSize = kLengthOffset + kPointerSize;
- // Indices of in-object properties.
- static const int kLengthIndex = 0;
-
- DECL_ACCESSORS(length, Object)
-
- DECLARE_VERIFIER(JSArgumentsObject)
- DECLARE_CAST(JSArgumentsObject)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSArgumentsObject);
-};
-
-
-// JSSloppyArgumentsObject is just a JSObject with specific initial map.
-// This initial map adds in-object properties for "length" and "callee".
-class JSSloppyArgumentsObject: public JSArgumentsObject {
- public:
- // Offsets of object fields.
- static const int kCalleeOffset = JSArgumentsObject::kHeaderSize;
- static const int kSize = kCalleeOffset + kPointerSize;
- // Indices of in-object properties.
- static const int kCalleeIndex = kLengthIndex + 1;
-
- DECL_ACCESSORS(callee, Object)
-
- DECLARE_VERIFIER(JSSloppyArgumentsObject)
- DECLARE_CAST(JSSloppyArgumentsObject)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSSloppyArgumentsObject);
-};
-
-
-// JSStrictArgumentsObject is just a JSObject with specific initial map.
-// This initial map adds an in-object property for "length".
-class JSStrictArgumentsObject: public JSArgumentsObject {
- public:
- // Offsets of object fields.
- static const int kSize = JSArgumentsObject::kHeaderSize;
-
- DECLARE_CAST(JSStrictArgumentsObject)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSStrictArgumentsObject);
-};
-
-
// Common superclass for FixedArrays that allow implementations to share
// common accessors and some code paths.
class FixedArrayBase: public HeapObject {
@@ -2760,10 +2752,12 @@ class FixedArrayBase: public HeapObject {
inline int synchronized_length() const;
inline void synchronized_set_length(int value);
- DECLARE_CAST(FixedArrayBase)
+ DECL_CAST(FixedArrayBase)
static int GetMaxLengthForNewSpaceAllocation(ElementsKind kind);
+ bool IsCowArray() const;
+
// Layout description.
// Length is smi tagged when it is stored.
static const int kLengthOffset = HeapObject::kHeaderSize;
@@ -2834,7 +2828,7 @@ class FixedArray: public FixedArrayBase {
// Garbage collection support.
inline Object** RawFieldOfElementAt(int index);
- DECLARE_CAST(FixedArray)
+ DECL_CAST(FixedArray)
// Maximal allowed size, in bytes, of a single FixedArray.
// Prevents overflowing size computations, as well as extreme memory
@@ -2848,14 +2842,16 @@ class FixedArray: public FixedArrayBase {
(kMaxRegularHeapObjectSize - kHeaderSize) / kPointerSize;
// Dispatched behavior.
- DECLARE_PRINTER(FixedArray)
- DECLARE_VERIFIER(FixedArray)
+ DECL_PRINTER(FixedArray)
+ DECL_VERIFIER(FixedArray)
#ifdef DEBUG
// Checks if two FixedArrays have identical contents.
bool IsEqualTo(FixedArray* other);
#endif
typedef FlexibleBodyDescriptor<kHeaderSize> BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
protected:
// Set operation on FixedArray without using write barriers. Can
@@ -2899,7 +2895,7 @@ class FixedDoubleArray: public FixedArrayBase {
// Code Generation support.
static int OffsetOfElementAt(int index) { return SizeFor(index); }
- DECLARE_CAST(FixedDoubleArray)
+ DECL_CAST(FixedDoubleArray)
// Maximal allowed size, in bytes, of a single FixedDoubleArray.
// Prevents overflowing size computations, as well as extreme memory
@@ -2909,60 +2905,17 @@ class FixedDoubleArray: public FixedArrayBase {
static const int kMaxLength = (kMaxSize - kHeaderSize) / kDoubleSize;
// Dispatched behavior.
- DECLARE_PRINTER(FixedDoubleArray)
- DECLARE_VERIFIER(FixedDoubleArray)
+ DECL_PRINTER(FixedDoubleArray)
+ DECL_VERIFIER(FixedDoubleArray)
class BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(FixedDoubleArray);
};
-// Helper class to access FAST_ and SLOW_SLOPPY_ARGUMENTS_ELEMENTS
-//
-// +---+-----------------------+
-// | 0 | Context* context |
-// +---------------------------+
-// | 1 | FixedArray* arguments +----+ FAST_HOLEY_ELEMENTS
-// +---------------------------+ v-----+-----------+
-// | 2 | Object* param_1_map | | 0 | the_hole |
-// |...| ... | | ... | ... |
-// |n+1| Object* param_n_map | | n-1 | the_hole |
-// +---------------------------+ | n | element_1 |
-// | ... | ... |
-// |n+m-1| element_m |
-// +-----------------+
-//
-// Parameter maps give the index into the provided context. If a map entry is
-// the_hole it means that the given entry has been deleted from the arguments
-// object.
-// The arguments backing store kind depends on the ElementsKind of the outer
-// JSArgumentsObject:
-// - FAST_SLOPPY_ARGUMENTS_ELEMENTS: FAST_HOLEY_ELEMENTS
-// - SLOW_SLOPPY_ARGUMENTS_ELEMENTS: DICTIONARY_ELEMENTS
-// - SLOW_SLOPPY_ARGUMENTS_ELEMENTS: DICTIONARY_ELEMENTS
-class SloppyArgumentsElements : public FixedArray {
- public:
- static const int kContextIndex = 0;
- static const int kArgumentsIndex = 1;
- static const uint32_t kParameterMapStart = 2;
-
- inline Context* context();
- inline FixedArray* arguments();
- inline void set_arguments(FixedArray* arguments);
- inline uint32_t parameter_map_length();
- inline Object* get_mapped_entry(uint32_t entry);
- inline void set_mapped_entry(uint32_t entry, Object* object);
-
- DECLARE_CAST(SloppyArgumentsElements)
-#ifdef VERIFY_HEAP
- void SloppyArgumentsElementsVerify(JSSloppyArgumentsObject* holder);
-#endif
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(SloppyArgumentsElements);
-};
-
class WeakFixedArray : public FixedArray {
public:
// If |maybe_array| is not a WeakFixedArray, a fresh one will be allocated.
@@ -3008,7 +2961,7 @@ class WeakFixedArray : public FixedArray {
DISALLOW_COPY_AND_ASSIGN(Iterator);
};
- DECLARE_CAST(WeakFixedArray)
+ DECL_CAST(WeakFixedArray)
private:
static const int kLastUsedIndexIndex = 0;
@@ -3071,9 +3024,9 @@ class ArrayList : public FixedArray {
// Return a copy of the list of size Length() without the first entry. The
// number returned by Length() is stored in the first entry.
- Handle<FixedArray> Elements() const;
+ static Handle<FixedArray> Elements(Handle<ArrayList> array);
bool IsFull();
- DECLARE_CAST(ArrayList)
+ DECL_CAST(ArrayList)
private:
static Handle<ArrayList> EnsureSpace(Handle<ArrayList> array, int length);
@@ -3082,15 +3035,58 @@ class ArrayList : public FixedArray {
DISALLOW_IMPLICIT_CONSTRUCTORS(ArrayList);
};
+class PropertyArray : public HeapObject {
+ public:
+ // [length]: length of the array.
+ inline int length() const;
+ inline void set_length(int length);
+
+ // Get and set the length using acquire loads and release stores.
+ inline int synchronized_length() const;
+ inline void synchronized_set_length(int value);
+
+ inline Object* get(int index) const;
+
+ // Setter that doesn't need write barrier.
+ inline void set(int index, Object* value);
+ // Setter with explicit barrier mode.
+ inline void set(int index, Object* value, WriteBarrierMode mode);
+
+ // Gives access to raw memory which stores the array's data.
+ inline Object** data_start();
+
+ // Garbage collection support.
+ static constexpr int SizeFor(int length) {
+ return kHeaderSize + length * kPointerSize;
+ }
+
+ DECL_CAST(PropertyArray)
+ DECL_PRINTER(PropertyArray)
+ DECL_VERIFIER(PropertyArray)
+
+ // Layout description.
+ static const int kLengthOffset = HeapObject::kHeaderSize;
+ static const int kHeaderSize = kLengthOffset + kPointerSize;
+
+ // Garbage collection support.
+ typedef FlexibleBodyDescriptor<kHeaderSize> BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PropertyArray);
+};
+
enum SearchMode { ALL_ENTRIES, VALID_ENTRIES };
template <SearchMode search_mode, typename T>
inline int Search(T* array, Name* name, int valid_entries = 0,
int* out_insertion_index = NULL);
-// The cache for maps used by normalized (dictionary mode) objects.
-// Such maps do not have property descriptors, so a typical program
-// needs very limited number of distinct normalized maps.
+// HandlerTable is a fixed array containing entries for exception handlers in
+// the code object it is associated with. The tables comes in two flavors:
+// 1) Based on ranges: Used for unoptimized code. Contains one entry per
+// exception handler and a range representing the try-block covered by that
// handler. Layout looks as follows:
// [ range-start , range-end , handler-offset , handler-data ]
// 2) Based on return addresses: Used for turbofanned code. Contains one entry
@@ -3145,7 +3141,7 @@ class HandlerTable : public FixedArray {
static int LengthForRange(int entries) { return entries * kRangeEntrySize; }
static int LengthForReturn(int entries) { return entries * kReturnEntrySize; }
- DECLARE_CAST(HandlerTable)
+ DECL_CAST(HandlerTable)
#ifdef ENABLE_DISASSEMBLER
void HandlerTableRangePrint(std::ostream& os); // NOLINT
@@ -3177,7 +3173,7 @@ class ByteArray: public FixedArrayBase {
inline int Size();
// Setter and getter.
- inline byte get(int index);
+ inline byte get(int index) const;
inline void set(int index, byte value);
// Copy in / copy out whole byte slices.
@@ -3185,9 +3181,12 @@ class ByteArray: public FixedArrayBase {
inline void copy_in(int index, const byte* buffer, int length);
// Treat contents as an int array.
- inline int get_int(int index);
+ inline int get_int(int index) const;
inline void set_int(int index, int value);
+ inline uint32_t get_uint32(int index) const;
+ inline void set_uint32(int index, uint32_t value);
+
static int SizeFor(int length) {
return OBJECT_POINTER_ALIGN(kHeaderSize + length);
}
@@ -3204,15 +3203,17 @@ class ByteArray: public FixedArrayBase {
// Returns data start address.
inline Address GetDataStartAddress();
+ inline int DataSize() const;
+
// Returns a pointer to the ByteArray object for a given data start address.
static inline ByteArray* FromDataStartAddress(Address address);
- DECLARE_CAST(ByteArray)
+ DECL_CAST(ByteArray)
// Dispatched behavior.
inline int ByteArraySize();
- DECLARE_PRINTER(ByteArray)
- DECLARE_VERIFIER(ByteArray)
+ DECL_PRINTER(ByteArray)
+ DECL_VERIFIER(ByteArray)
// Layout description.
static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
@@ -3223,6 +3224,8 @@ class ByteArray: public FixedArrayBase {
static const int kMaxLength = kMaxSize - kHeaderSize;
class BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ByteArray);
@@ -3249,7 +3252,7 @@ class PodArray : public ByteArray {
sizeof(T));
}
int length() { return ByteArray::length() / sizeof(T); }
- DECLARE_CAST(PodArray<T>)
+ DECL_CAST(PodArray<T>)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(PodArray<T>);
@@ -3258,16 +3261,16 @@ class PodArray : public ByteArray {
// BytecodeArray represents a sequence of interpreter bytecodes.
class BytecodeArray : public FixedArrayBase {
public:
-#define DECLARE_BYTECODE_AGE_ENUM(X) k##X##BytecodeAge,
+#define DECL_BYTECODE_AGE_ENUM(X) k##X##BytecodeAge,
enum Age {
kNoAgeBytecodeAge = 0,
- CODE_AGE_LIST(DECLARE_BYTECODE_AGE_ENUM) kAfterLastBytecodeAge,
+ CODE_AGE_LIST(DECL_BYTECODE_AGE_ENUM) kAfterLastBytecodeAge,
kFirstBytecodeAge = kNoAgeBytecodeAge,
kLastBytecodeAge = kAfterLastBytecodeAge - 1,
kBytecodeAgeCount = kAfterLastBytecodeAge - kFirstBytecodeAge - 1,
kIsOldBytecodeAge = kSexagenarianBytecodeAge
};
-#undef DECLARE_BYTECODE_AGE_ENUM
+#undef DECL_BYTECODE_AGE_ENUM
static int SizeFor(int length) {
return OBJECT_POINTER_ALIGN(kHeaderSize + length);
@@ -3310,12 +3313,12 @@ class BytecodeArray : public FixedArrayBase {
DECL_ACCESSORS(handler_table, FixedArray)
// Accessors for source position table containing mappings between byte code
- // offset and source position.
+ // offset and source position or SourcePositionTableWithFrameCache.
DECL_ACCESSORS(source_position_table, Object)
inline ByteArray* SourcePositionTable();
- DECLARE_CAST(BytecodeArray)
+ DECL_CAST(BytecodeArray)
// Dispatched behavior.
inline int BytecodeArraySize();
@@ -3329,8 +3332,8 @@ class BytecodeArray : public FixedArrayBase {
int SourcePosition(int offset);
int SourceStatementPosition(int offset);
- DECLARE_PRINTER(BytecodeArray)
- DECLARE_VERIFIER(BytecodeArray)
+ DECL_PRINTER(BytecodeArray)
+ DECL_VERIFIER(BytecodeArray)
void Disassemble(std::ostream& os);
@@ -3360,11 +3363,9 @@ class BytecodeArray : public FixedArrayBase {
static const int kPointerFieldsBeginOffset = kConstantPoolOffset;
static const int kPointerFieldsEndOffset = kFrameSizeOffset;
- typedef FixedBodyDescriptor<kPointerFieldsBeginOffset,
- kPointerFieldsEndOffset, kHeaderSize>
- MarkingBodyDescriptor;
-
class BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(BytecodeArray);
@@ -3382,8 +3383,8 @@ class FreeSpace: public HeapObject {
inline int size() const;
inline void set_size(int value);
- inline int nobarrier_size() const;
- inline void nobarrier_set_size(int value);
+ inline int relaxed_read_size() const;
+ inline void relaxed_write_size(int value);
inline int Size();
@@ -3394,8 +3395,8 @@ class FreeSpace: public HeapObject {
inline static FreeSpace* cast(HeapObject* obj);
// Dispatched behavior.
- DECLARE_PRINTER(FreeSpace)
- DECLARE_VERIFIER(FreeSpace)
+ DECL_PRINTER(FreeSpace)
+ DECL_VERIFIER(FreeSpace)
// Layout description.
// Size is smi tagged when it is stored.
@@ -3432,7 +3433,7 @@ class FixedTypedArrayBase: public FixedArrayBase {
DECL_ACCESSORS(external_pointer, void)
// Dispatched behavior.
- DECLARE_CAST(FixedTypedArrayBase)
+ DECL_CAST(FixedTypedArrayBase)
static const int kBasePointerOffset = FixedArrayBase::kHeaderSize;
static const int kExternalPointerOffset = kBasePointerOffset + kPointerSize;
@@ -3453,21 +3454,23 @@ class FixedTypedArrayBase: public FixedArrayBase {
static const size_t kMaxLength = Smi::kMaxValue;
class BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
- inline int size();
+ inline int size() const;
static inline int TypedArraySize(InstanceType type, int length);
- inline int TypedArraySize(InstanceType type);
+ inline int TypedArraySize(InstanceType type) const;
// Use with care: returns raw pointer into heap.
inline void* DataPtr();
- inline int DataSize();
+ inline int DataSize() const;
private:
static inline int ElementSize(InstanceType type);
- inline int DataSize(InstanceType type);
+ inline int DataSize(InstanceType type) const;
DISALLOW_IMPLICIT_CONSTRUCTORS(FixedTypedArrayBase);
};
@@ -3479,7 +3482,7 @@ class FixedTypedArray: public FixedTypedArrayBase {
typedef typename Traits::ElementType ElementType;
static const InstanceType kInstanceType = Traits::kInstanceType;
- DECLARE_CAST(FixedTypedArray<Traits>)
+ DECL_CAST(FixedTypedArray<Traits>)
inline ElementType get_scalar(int index);
static inline Handle<Object> get(FixedTypedArray* array, int index);
@@ -3493,8 +3496,8 @@ class FixedTypedArray: public FixedTypedArrayBase {
// and undefined.
inline void SetValue(uint32_t index, Object* value);
- DECLARE_PRINTER(FixedTypedArray)
- DECLARE_VERIFIER(FixedTypedArray)
+ DECL_PRINTER(FixedTypedArray)
+ DECL_VERIFIER(FixedTypedArray)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(FixedTypedArray);
@@ -3530,7 +3533,7 @@ class DeoptimizationInputData: public FixedArray {
static const int kTranslationByteArrayIndex = 0;
static const int kInlinedFunctionCountIndex = 1;
static const int kLiteralArrayIndex = 2;
- static const int kOsrAstIdIndex = 3;
+ static const int kOsrBytecodeOffsetIndex = 3;
static const int kOsrPcOffsetIndex = 4;
static const int kOptimizationIdIndex = 5;
static const int kSharedFunctionInfoIndex = 6;
@@ -3539,44 +3542,44 @@ class DeoptimizationInputData: public FixedArray {
static const int kFirstDeoptEntryIndex = 9;
// Offsets of deopt entry elements relative to the start of the entry.
- static const int kAstIdRawOffset = 0;
+ static const int kBytecodeOffsetRawOffset = 0;
static const int kTranslationIndexOffset = 1;
- static const int kArgumentsStackHeightOffset = 2;
+ static const int kTrampolinePcOffset = 2;
static const int kPcOffset = 3;
static const int kDeoptEntrySize = 4;
// Simple element accessors.
-#define DECLARE_ELEMENT_ACCESSORS(name, type) \
- inline type* name(); \
+#define DECL_ELEMENT_ACCESSORS(name, type) \
+ inline type* name(); \
inline void Set##name(type* value);
- DECLARE_ELEMENT_ACCESSORS(TranslationByteArray, ByteArray)
- DECLARE_ELEMENT_ACCESSORS(InlinedFunctionCount, Smi)
- DECLARE_ELEMENT_ACCESSORS(LiteralArray, FixedArray)
- DECLARE_ELEMENT_ACCESSORS(OsrAstId, Smi)
- DECLARE_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
- DECLARE_ELEMENT_ACCESSORS(OptimizationId, Smi)
- DECLARE_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
- DECLARE_ELEMENT_ACCESSORS(WeakCellCache, Object)
- DECLARE_ELEMENT_ACCESSORS(InliningPositions, PodArray<InliningPosition>)
-
-#undef DECLARE_ELEMENT_ACCESSORS
-
- // Accessors for elements of the ith deoptimization entry.
-#define DECLARE_ENTRY_ACCESSORS(name, type) \
- inline type* name(int i); \
+ DECL_ELEMENT_ACCESSORS(TranslationByteArray, ByteArray)
+ DECL_ELEMENT_ACCESSORS(InlinedFunctionCount, Smi)
+ DECL_ELEMENT_ACCESSORS(LiteralArray, FixedArray)
+ DECL_ELEMENT_ACCESSORS(OsrBytecodeOffset, Smi)
+ DECL_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
+ DECL_ELEMENT_ACCESSORS(OptimizationId, Smi)
+ DECL_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
+ DECL_ELEMENT_ACCESSORS(WeakCellCache, Object)
+ DECL_ELEMENT_ACCESSORS(InliningPositions, PodArray<InliningPosition>)
+
+#undef DECL_ELEMENT_ACCESSORS
+
+// Accessors for elements of the ith deoptimization entry.
+#define DECL_ENTRY_ACCESSORS(name, type) \
+ inline type* name(int i); \
inline void Set##name(int i, type* value);
- DECLARE_ENTRY_ACCESSORS(AstIdRaw, Smi)
- DECLARE_ENTRY_ACCESSORS(TranslationIndex, Smi)
- DECLARE_ENTRY_ACCESSORS(ArgumentsStackHeight, Smi)
- DECLARE_ENTRY_ACCESSORS(Pc, Smi)
+ DECL_ENTRY_ACCESSORS(BytecodeOffsetRaw, Smi)
+ DECL_ENTRY_ACCESSORS(TranslationIndex, Smi)
+ DECL_ENTRY_ACCESSORS(TrampolinePc, Smi)
+ DECL_ENTRY_ACCESSORS(Pc, Smi)
-#undef DECLARE_ENTRY_ACCESSORS
+#undef DECL_ENTRY_ACCESSORS
- inline BailoutId AstId(int i);
+ inline BailoutId BytecodeOffset(int i);
- inline void SetAstId(int i, BailoutId value);
+ inline void SetBytecodeOffset(int i, BailoutId value);
inline int DeoptCount();
@@ -3591,7 +3594,7 @@ class DeoptimizationInputData: public FixedArray {
int deopt_entry_count,
PretenureFlag pretenure);
- DECLARE_CAST(DeoptimizationInputData)
+ DECL_CAST(DeoptimizationInputData)
#ifdef ENABLE_DISASSEMBLER
void DeoptimizationInputDataPrint(std::ostream& os); // NOLINT
@@ -3606,38 +3609,6 @@ class DeoptimizationInputData: public FixedArray {
static int LengthFor(int entry_count) { return IndexForEntry(entry_count); }
};
-// DeoptimizationOutputData is a fixed array used to hold the deoptimization
-// data for code generated by the full compiler.
-// The format of the these objects is
-// [i * 2]: Ast ID for ith deoptimization.
-// [i * 2 + 1]: PC and state of ith deoptimization
-class DeoptimizationOutputData: public FixedArray {
- public:
- inline int DeoptPoints();
-
- inline BailoutId AstId(int index);
-
- inline void SetAstId(int index, BailoutId id);
-
- inline Smi* PcAndState(int index);
- inline void SetPcAndState(int index, Smi* offset);
-
- static int LengthOfFixedArray(int deopt_points) {
- return deopt_points * 2;
- }
-
- // Allocates a DeoptimizationOutputData.
- static Handle<DeoptimizationOutputData> New(Isolate* isolate,
- int number_of_deopt_points,
- PretenureFlag pretenure);
-
- DECLARE_CAST(DeoptimizationOutputData)
-
-#ifdef ENABLE_DISASSEMBLER
- void DeoptimizationOutputDataPrint(std::ostream& os); // NOLINT
-#endif
-};
-
class TemplateList : public FixedArray {
public:
static Handle<TemplateList> New(Isolate* isolate, int size);
@@ -3646,7 +3617,7 @@ class TemplateList : public FixedArray {
inline void set(int index, Object* value);
static Handle<TemplateList> Add(Isolate* isolate, Handle<TemplateList> list,
Handle<Object> value);
- DECLARE_CAST(TemplateList)
+ DECL_CAST(TemplateList)
private:
static const int kLengthIndex = 0;
static const int kFirstElementIndex = kLengthIndex + 1;
@@ -3680,9 +3651,7 @@ class Code: public HeapObject {
V(STORE_IC) \
V(STORE_GLOBAL_IC) \
V(KEYED_STORE_IC) \
- V(BINARY_OP_IC) \
- V(COMPARE_IC) \
- V(TO_BOOLEAN_IC)
+ V(COMPARE_IC)
#define CODE_KIND_LIST(V) \
NON_IC_KIND_LIST(V) \
@@ -3725,11 +3694,11 @@ class Code: public HeapObject {
// [deoptimization_data]: Array containing data for deopt.
DECL_ACCESSORS(deoptimization_data, FixedArray)
- // [source_position_table]: ByteArray for the source positions table.
+ // [source_position_table]: ByteArray for the source positions table or
// SourcePositionTableWithFrameCache.
DECL_ACCESSORS(source_position_table, Object)
- inline ByteArray* SourcePositionTable();
+ inline ByteArray* SourcePositionTable() const;
// [trap_handler_index]: An index into the trap handler's master list of code
// objects.
@@ -3740,26 +3709,16 @@ class Code: public HeapObject {
// FUNCTION => type feedback information.
// STUB and ICs => major/minor key as Smi.
DECL_ACCESSORS(raw_type_feedback_info, Object)
- inline Object* type_feedback_info();
+ inline Object* type_feedback_info() const;
inline void set_type_feedback_info(
Object* value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- inline uint32_t stub_key();
+ inline uint32_t stub_key() const;
inline void set_stub_key(uint32_t key);
// [next_code_link]: Link for lists of optimized or deoptimized code.
// Note that storage for this field is overlapped with typefeedback_info.
DECL_ACCESSORS(next_code_link, Object)
- // [gc_metadata]: Field used to hold GC related metadata. The contents of this
- // field does not have to be traced during garbage collection since
- // it is only used by the garbage collector itself.
- DECL_ACCESSORS(gc_metadata, Object)
-
- // [ic_age]: Inline caching age: the value of the Heap::global_ic_age
- // at the moment when this object was created.
- inline void set_ic_age(int count);
- inline int ic_age() const;
-
// [prologue_offset]: Offset of the function prologue, used for aging
// FUNCTIONs and OPTIMIZED_FUNCTIONs.
inline int prologue_offset() const;
@@ -3771,79 +3730,76 @@ class Code: public HeapObject {
inline void set_constant_pool_offset(int offset);
// Unchecked accessors to be used during GC.
- inline ByteArray* unchecked_relocation_info();
+ inline ByteArray* unchecked_relocation_info() const;
- inline int relocation_size();
+ inline int relocation_size() const;
// [flags]: Various code flags.
- inline Flags flags();
+ inline Flags flags() const;
inline void set_flags(Flags flags);
// [flags]: Access to specific code flags.
- inline Kind kind();
- inline ExtraICState extra_ic_state(); // Only valid for IC stubs.
+ inline Kind kind() const;
+ inline ExtraICState extra_ic_state() const; // Only valid for IC stubs.
// Testers for IC stub kinds.
- inline bool is_inline_cache_stub();
- inline bool is_debug_stub();
- inline bool is_handler();
- inline bool is_stub();
- inline bool is_binary_op_stub();
- inline bool is_compare_ic_stub();
- inline bool is_to_boolean_ic_stub();
- inline bool is_optimized_code();
- inline bool is_wasm_code();
-
- inline bool IsCodeStubOrIC();
+ inline bool is_inline_cache_stub() const;
+ inline bool is_debug_stub() const;
+ inline bool is_handler() const;
+ inline bool is_stub() const;
+ inline bool is_compare_ic_stub() const;
+ inline bool is_optimized_code() const;
+ inline bool is_wasm_code() const;
+
+ inline bool IsCodeStubOrIC() const;
inline void set_raw_kind_specific_flags1(int value);
inline void set_raw_kind_specific_flags2(int value);
// Testers for interpreter builtins.
- inline bool is_interpreter_trampoline_builtin();
+ inline bool is_interpreter_trampoline_builtin() const;
+
+ // Tells whether the code checks the optimization marker in the function's
+ // feedback vector.
+ inline bool checks_optimization_marker() const;
// [is_crankshafted]: For kind STUB or ICs, tells whether or not a code
// object was generated by either the hydrogen or the TurboFan optimizing
// compiler (but it may not be an optimized function).
- inline bool is_crankshafted();
- inline bool is_hydrogen_stub(); // Crankshafted, but not a function.
+ inline bool is_crankshafted() const;
+ inline bool is_hydrogen_stub() const; // Crankshafted, but not a function.
inline void set_is_crankshafted(bool value);
// [has_tagged_params]: For compiled code or builtins: Tells whether the
// outgoing parameters of this code are tagged pointers. True for other kinds.
- inline bool has_tagged_params();
+ inline bool has_tagged_params() const;
inline void set_has_tagged_params(bool value);
// [is_turbofanned]: For kind STUB or OPTIMIZED_FUNCTION, tells whether the
// code object was generated by the TurboFan optimizing compiler.
- inline bool is_turbofanned();
+ inline bool is_turbofanned() const;
inline void set_is_turbofanned(bool value);
// [can_have_weak_objects]: For kind OPTIMIZED_FUNCTION, tells whether the
// embedded objects in code should be treated weakly.
- inline bool can_have_weak_objects();
+ inline bool can_have_weak_objects() const;
inline void set_can_have_weak_objects(bool value);
// [is_construct_stub]: For kind BUILTIN, tells whether the code object
// represents a hand-written construct stub
// (e.g., NumberConstructor_ConstructStub).
- inline bool is_construct_stub();
+ inline bool is_construct_stub() const;
inline void set_is_construct_stub(bool value);
- // [has_deoptimization_support]: For FUNCTION kind, tells if it has
- // deoptimization support.
- inline bool has_deoptimization_support();
- inline void set_has_deoptimization_support(bool value);
-
// [has_debug_break_slots]: For FUNCTION kind, tells if it has
// been compiled with debug break slots.
- inline bool has_debug_break_slots();
+ inline bool has_debug_break_slots() const;
inline void set_has_debug_break_slots(bool value);
// [has_reloc_info_for_serialization]: For FUNCTION kind, tells if its
// reloc info includes runtime and external references to support
// serialization/deserialization.
- inline bool has_reloc_info_for_serialization();
+ inline bool has_reloc_info_for_serialization() const;
inline void set_has_reloc_info_for_serialization(bool value);
// [allow_osr_at_loop_nesting_level]: For FUNCTION kind, tells for
@@ -3851,57 +3807,54 @@ class Code: public HeapObject {
// level of loop nesting we are willing to do on-stack replacement
// for.
inline void set_allow_osr_at_loop_nesting_level(int level);
- inline int allow_osr_at_loop_nesting_level();
-
- // [profiler_ticks]: For FUNCTION kind, tells for how many profiler ticks
- // the code object was seen on the stack with no IC patching going on.
- inline int profiler_ticks();
- inline void set_profiler_ticks(int ticks);
+ inline int allow_osr_at_loop_nesting_level() const;
// [builtin_index]: For builtins, tells which builtin index the code object
// has. Note that builtins can have a code kind other than BUILTIN. The
// builtin index is a non-negative integer for builtins, and -1 otherwise.
- inline int builtin_index();
+ inline int builtin_index() const;
inline void set_builtin_index(int id);
// [stack_slots]: For kind OPTIMIZED_FUNCTION, the number of stack slots
// reserved in the code prologue.
- inline unsigned stack_slots();
+ inline unsigned stack_slots() const;
inline void set_stack_slots(unsigned slots);
// [safepoint_table_start]: For kind OPTIMIZED_FUNCTION, the offset in
// the instruction stream where the safepoint table starts.
- inline unsigned safepoint_table_offset();
+ inline unsigned safepoint_table_offset() const;
inline void set_safepoint_table_offset(unsigned offset);
// [back_edge_table_start]: For kind FUNCTION, the offset in the
// instruction stream where the back edge table starts.
- inline unsigned back_edge_table_offset();
+ inline unsigned back_edge_table_offset() const;
inline void set_back_edge_table_offset(unsigned offset);
- inline bool back_edges_patched_for_osr();
+ inline bool back_edges_patched_for_osr() const;
// [to_boolean_foo]: For kind TO_BOOLEAN_IC tells what state the stub is in.
inline uint16_t to_boolean_state();
// [marked_for_deoptimization]: For kind OPTIMIZED_FUNCTION tells whether
// the code is going to be deoptimized because of dead embedded maps.
- inline bool marked_for_deoptimization();
+ inline bool marked_for_deoptimization() const;
inline void set_marked_for_deoptimization(bool flag);
- // [is_promise_rejection]: For kind BUILTIN tells whether the exception
- // thrown by the code will lead to promise rejection.
- inline bool deopt_already_counted();
+ // [deopt_already_counted]: For kind OPTIMIZED_FUNCTION tells whether
+ // the code was already deoptimized.
+ inline bool deopt_already_counted() const;
inline void set_deopt_already_counted(bool flag);
- // [is_promise_rejection]: For kind BUILTIN tells whether the exception
- // thrown by the code will lead to promise rejection.
- inline bool is_promise_rejection();
+ // [is_promise_rejection]: For kind BUILTIN tells whether the
+ // exception thrown by the code will lead to promise rejection or
+ // uncaught if both this and is_exception_caught is set.
+ // Use GetBuiltinCatchPrediction to access this.
inline void set_is_promise_rejection(bool flag);
- // [is_exception_caught]: For kind BUILTIN tells whether the exception
- // thrown by the code will be caught internally.
- inline bool is_exception_caught();
+ // [is_exception_caught]: For kind BUILTIN tells whether the
+ // exception thrown by the code will be caught internally or
+ // uncaught if both this and is_promise_rejection is set.
+ // Use GetBuiltinCatchPrediction to access this.
inline void set_is_exception_caught(bool flag);
// [constant_pool]: The constant pool for this function.
@@ -3951,22 +3904,25 @@ class Code: public HeapObject {
// Convert an entry address into an object.
static inline Object* GetObjectFromEntryAddress(Address location_of_address);
+ // Convert a code entry into an object.
+ static inline Object* GetObjectFromCodeEntry(Address code_entry);
+
// Returns the address of the first instruction.
- inline byte* instruction_start();
+ inline byte* instruction_start() const;
// Returns the address right after the last instruction.
- inline byte* instruction_end();
+ inline byte* instruction_end() const;
// Returns the size of the instructions, padding, relocation and unwinding
// information.
- inline int body_size();
+ inline int body_size() const;
// Returns the size of code and its metadata. This includes the size of code
// relocation information, deoptimization data and handler table.
- inline int SizeIncludingMetadata();
+ inline int SizeIncludingMetadata() const;
// Returns the address of the first relocation info (read backwards!).
- inline byte* relocation_start();
+ inline byte* relocation_start() const;
// [has_unwinding_info]: Whether this code object has unwinding information.
// If it doesn't, unwinding_information_start() will point to invalid data.
@@ -4006,13 +3962,13 @@ class Code: public HeapObject {
inline void set_unwinding_info_size(int value);
// Returns the address of the unwinding information, if any.
- inline byte* unwinding_info_start();
+ inline byte* unwinding_info_start() const;
// Returns the address right after the end of the unwinding information.
- inline byte* unwinding_info_end();
+ inline byte* unwinding_info_end() const;
// Code entry point.
- inline byte* entry();
+ inline byte* entry() const;
// Returns true if pc is inside this object's instructions.
inline bool contains(byte* pc);
@@ -4032,41 +3988,39 @@ class Code: public HeapObject {
// Calculate the size of the code object to report for log events. This takes
// the layout of the code object into account.
- inline int ExecutableSize();
+ inline int ExecutableSize() const;
- DECLARE_CAST(Code)
+ DECL_CAST(Code)
// Dispatched behavior.
- inline int CodeSize();
+ inline int CodeSize() const;
- DECLARE_PRINTER(Code)
- DECLARE_VERIFIER(Code)
+ DECL_PRINTER(Code)
+ DECL_VERIFIER(Code)
void ClearInlineCaches();
- BailoutId TranslatePcOffsetToAstId(uint32_t pc_offset);
- uint32_t TranslateAstIdToPcOffset(BailoutId ast_id);
+ BailoutId TranslatePcOffsetToBytecodeOffset(uint32_t pc_offset);
+ uint32_t TranslateBytecodeOffsetToPcOffset(BailoutId bytecode_offset);
-#define DECLARE_CODE_AGE_ENUM(X) k##X##CodeAge,
+#define DECL_CODE_AGE_ENUM(X) k##X##CodeAge,
enum Age {
kToBeExecutedOnceCodeAge = -3,
kNotExecutedCodeAge = -2,
kExecutedOnceCodeAge = -1,
kNoAgeCodeAge = 0,
- CODE_AGE_LIST(DECLARE_CODE_AGE_ENUM)
- kAfterLastCodeAge,
+ CODE_AGE_LIST(DECL_CODE_AGE_ENUM) kAfterLastCodeAge,
kFirstCodeAge = kToBeExecutedOnceCodeAge,
kLastCodeAge = kAfterLastCodeAge - 1,
kCodeAgeCount = kAfterLastCodeAge - kFirstCodeAge - 1,
kIsOldCodeAge = kSexagenarianCodeAge,
kPreAgedCodeAge = kIsOldCodeAge - 1
};
-#undef DECLARE_CODE_AGE_ENUM
+#undef DECL_CODE_AGE_ENUM
// Code aging. Indicates how many full GCs this code has survived without
- // being entered through the prologue. Used to determine when it is
- // relatively safe to flush this code object and replace it with the lazy
- // compilation stub.
+ // being entered through the prologue. Used to determine when to flush code
+ // held in the compilation cache.
static void MakeCodeAgeSequenceYoung(byte* sequence, Isolate* isolate);
static void MarkCodeAsExecuted(byte* sequence, Isolate* isolate);
void MakeYoung(Isolate* isolate);
@@ -4083,6 +4037,7 @@ class Code: public HeapObject {
void PrintDeoptLocation(FILE* out, Address pc);
bool CanDeoptAt(Address pc);
+ inline HandlerTable::CatchPrediction GetBuiltinCatchPrediction();
#ifdef VERIFY_HEAP
void VerifyEmbeddedObjectsDependency();
#endif
@@ -4116,10 +4071,8 @@ class Code: public HeapObject {
static const int kTypeFeedbackInfoOffset =
kSourcePositionTableOffset + kPointerSize;
static const int kNextCodeLinkOffset = kTypeFeedbackInfoOffset + kPointerSize;
- static const int kGCMetadataOffset = kNextCodeLinkOffset + kPointerSize;
- static const int kInstructionSizeOffset = kGCMetadataOffset + kPointerSize;
- static const int kICAgeOffset = kInstructionSizeOffset + kIntSize;
- static const int kFlagsOffset = kICAgeOffset + kIntSize;
+ static const int kInstructionSizeOffset = kNextCodeLinkOffset + kPointerSize;
+ static const int kFlagsOffset = kInstructionSizeOffset + kIntSize;
static const int kKindSpecificFlags1Offset = kFlagsOffset + kIntSize;
static const int kKindSpecificFlags2Offset =
kKindSpecificFlags1Offset + kIntSize;
@@ -4143,16 +4096,6 @@ class Code: public HeapObject {
class BodyDescriptor;
- // Byte offsets within kKindSpecificFlags1Offset.
- static const int kFullCodeFlags = kKindSpecificFlags1Offset;
- class FullCodeFlagsHasDeoptimizationSupportField:
- public BitField<bool, 0, 1> {}; // NOLINT
- class FullCodeFlagsHasDebugBreakSlotsField: public BitField<bool, 1, 1> {};
- class FullCodeFlagsHasRelocInfoForSerialization
- : public BitField<bool, 2, 1> {};
- // Bit 3 in this bitfield is unused.
- class ProfilerTicksField : public BitField<int, 4, 28> {};
-
// Flags layout. BitField<type, shift, size>.
class HasUnwindingInfoField : public BitField<bool, 0, 1> {};
class KindField : public BitField<Kind, HasUnwindingInfoField::kNext, 5> {};
@@ -4162,6 +4105,15 @@ class Code: public HeapObject {
PlatformSmiTagging::kSmiValueSize - KindField::kNext> {
};
+ // KindSpecificFlags1 layout (FUNCTION)
+ static const int kFullCodeFlags = kKindSpecificFlags1Offset;
+ static const int kFullCodeFlagsHasDebugBreakSlotsField = 0;
+ static const int kFullCodeFlagsHasRelocInfoForSerialization = 1;
+ class FullCodeFlagsHasDebugBreakSlotsField
+ : public BitField<bool, kFullCodeFlagsHasDebugBreakSlotsField, 1> {};
+ class FullCodeFlagsHasRelocInfoForSerialization
+ : public BitField<bool, kFullCodeFlagsHasRelocInfoForSerialization, 1> {};
+
// KindSpecificFlags1 layout (STUB, BUILTIN and OPTIMIZED_FUNCTION)
static const int kStackSlotsFirstBit = 0;
static const int kStackSlotsBitCount = 24;
@@ -4235,6 +4187,9 @@ class Code: public HeapObject {
// Code aging -- platform-specific
static void PatchPlatformCodeAge(Isolate* isolate, byte* sequence, Age age);
+ bool is_promise_rejection() const;
+ bool is_exception_caught() const;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(Code);
};
@@ -4287,7 +4242,7 @@ class AbstractCode : public HeapObject {
// the layout of the code object into account.
inline int ExecutableSize();
- DECLARE_CAST(AbstractCode)
+ DECL_CAST(AbstractCode)
inline Code* GetCode();
inline BytecodeArray* GetBytecodeArray();
@@ -4388,7 +4343,7 @@ class DependentCode: public FixedArray {
inline void set_object_at(int i, Object* object);
inline void clear_at(int i);
inline void copy(int from, int to);
- DECLARE_CAST(DependentCode)
+ DECL_CAST(DependentCode)
static const char* DependencyGroupName(DependencyGroup group);
static void SetMarkedForDeoptimization(Code* code, DependencyGroup group);
@@ -4422,7 +4377,7 @@ class PrototypeInfo;
class Struct: public HeapObject {
public:
inline void InitializeBody(int object_size);
- DECLARE_CAST(Struct)
+ DECL_CAST(Struct)
};
// A container struct to hold state required for PromiseResolveThenableJob.
@@ -4442,9 +4397,9 @@ class PromiseResolveThenableJobInfo : public Struct {
static const int kContextOffset = kRejectOffset + kPointerSize;
static const int kSize = kContextOffset + kPointerSize;
- DECLARE_CAST(PromiseResolveThenableJobInfo)
- DECLARE_PRINTER(PromiseResolveThenableJobInfo)
- DECLARE_VERIFIER(PromiseResolveThenableJobInfo)
+ DECL_CAST(PromiseResolveThenableJobInfo)
+ DECL_PRINTER(PromiseResolveThenableJobInfo)
+ DECL_VERIFIER(PromiseResolveThenableJobInfo)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseResolveThenableJobInfo);
@@ -4478,9 +4433,9 @@ class PromiseReactionJobInfo : public Struct {
static const int kContextOffset = kDeferredOnRejectOffset + kPointerSize;
static const int kSize = kContextOffset + kPointerSize;
- DECLARE_CAST(PromiseReactionJobInfo)
- DECLARE_PRINTER(PromiseReactionJobInfo)
- DECLARE_VERIFIER(PromiseReactionJobInfo)
+ DECL_CAST(PromiseReactionJobInfo)
+ DECL_PRINTER(PromiseReactionJobInfo)
+ DECL_VERIFIER(PromiseReactionJobInfo)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseReactionJobInfo);
@@ -4500,9 +4455,9 @@ class AsyncGeneratorRequest : public Struct {
static const int kPromiseOffset = kValueOffset + kPointerSize;
static const int kSize = kPromiseOffset + kPointerSize;
- DECLARE_CAST(AsyncGeneratorRequest)
- DECLARE_PRINTER(AsyncGeneratorRequest)
- DECLARE_VERIFIER(AsyncGeneratorRequest)
+ DECL_CAST(AsyncGeneratorRequest)
+ DECL_PRINTER(AsyncGeneratorRequest)
+ DECL_VERIFIER(AsyncGeneratorRequest)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(AsyncGeneratorRequest);
@@ -4543,11 +4498,11 @@ class PrototypeInfo : public Struct {
DECL_BOOLEAN_ACCESSORS(should_be_fast_map)
- DECLARE_CAST(PrototypeInfo)
+ DECL_CAST(PrototypeInfo)
// Dispatched behavior.
- DECLARE_PRINTER(PrototypeInfo)
- DECLARE_VERIFIER(PrototypeInfo)
+ DECL_PRINTER(PrototypeInfo)
+ DECL_VERIFIER(PrototypeInfo)
static const int kWeakCellOffset = HeapObject::kHeaderSize;
static const int kPrototypeUsersOffset = kWeakCellOffset + kPointerSize;
@@ -4571,11 +4526,11 @@ class Tuple2 : public Struct {
DECL_ACCESSORS(value1, Object)
DECL_ACCESSORS(value2, Object)
- DECLARE_CAST(Tuple2)
+ DECL_CAST(Tuple2)
// Dispatched behavior.
- DECLARE_PRINTER(Tuple2)
- DECLARE_VERIFIER(Tuple2)
+ DECL_PRINTER(Tuple2)
+ DECL_VERIFIER(Tuple2)
static const int kValue1Offset = HeapObject::kHeaderSize;
static const int kValue2Offset = kValue1Offset + kPointerSize;
@@ -4589,11 +4544,11 @@ class Tuple3 : public Tuple2 {
public:
DECL_ACCESSORS(value3, Object)
- DECLARE_CAST(Tuple3)
+ DECL_CAST(Tuple3)
// Dispatched behavior.
- DECLARE_PRINTER(Tuple3)
- DECLARE_VERIFIER(Tuple3)
+ DECL_PRINTER(Tuple3)
+ DECL_VERIFIER(Tuple3)
static const int kValue3Offset = Tuple2::kSize;
static const int kSize = kValue3Offset + kPointerSize;
@@ -4614,11 +4569,11 @@ class ContextExtension : public Struct {
// [extension]: Extension object.
DECL_ACCESSORS(extension, Object)
- DECLARE_CAST(ContextExtension)
+ DECL_CAST(ContextExtension)
// Dispatched behavior.
- DECLARE_PRINTER(ContextExtension)
- DECLARE_VERIFIER(ContextExtension)
+ DECL_PRINTER(ContextExtension)
+ DECL_VERIFIER(ContextExtension)
static const int kScopeInfoOffset = HeapObject::kHeaderSize;
static const int kExtensionOffset = kScopeInfoOffset + kPointerSize;
@@ -4628,212 +4583,6 @@ class ContextExtension : public Struct {
DISALLOW_IMPLICIT_CONSTRUCTORS(ContextExtension);
};
-// Script describes a script which has been added to the VM.
-class Script: public Struct {
- public:
- // Script types.
- enum Type {
- TYPE_NATIVE = 0,
- TYPE_EXTENSION = 1,
- TYPE_NORMAL = 2,
- TYPE_WASM = 3,
- TYPE_INSPECTOR = 4
- };
-
- // Script compilation types.
- enum CompilationType {
- COMPILATION_TYPE_HOST = 0,
- COMPILATION_TYPE_EVAL = 1
- };
-
- // Script compilation state.
- enum CompilationState {
- COMPILATION_STATE_INITIAL = 0,
- COMPILATION_STATE_COMPILED = 1
- };
-
- // [source]: the script source.
- DECL_ACCESSORS(source, Object)
-
- // [name]: the script name.
- DECL_ACCESSORS(name, Object)
-
- // [id]: the script id.
- DECL_INT_ACCESSORS(id)
-
- // [line_offset]: script line offset in resource from where it was extracted.
- DECL_INT_ACCESSORS(line_offset)
-
- // [column_offset]: script column offset in resource from where it was
- // extracted.
- DECL_INT_ACCESSORS(column_offset)
-
- // [context_data]: context data for the context this script was compiled in.
- DECL_ACCESSORS(context_data, Object)
-
- // [wrapper]: the wrapper cache. This is either undefined or a WeakCell.
- DECL_ACCESSORS(wrapper, HeapObject)
-
- // [type]: the script type.
- DECL_INT_ACCESSORS(type)
-
- // [line_ends]: FixedArray of line ends positions.
- DECL_ACCESSORS(line_ends, Object)
-
- // [eval_from_shared]: for eval scripts the shared function info for the
- // function from which eval was called.
- DECL_ACCESSORS(eval_from_shared, Object)
-
- // [eval_from_position]: the source position in the code for the function
- // from which eval was called, as positive integer. Or the code offset in the
- // code from which eval was called, as negative integer.
- DECL_INT_ACCESSORS(eval_from_position)
-
- // [shared_function_infos]: weak fixed array containing all shared
- // function infos created from this script.
- DECL_ACCESSORS(shared_function_infos, FixedArray)
-
- // [flags]: Holds an exciting bitfield.
- DECL_INT_ACCESSORS(flags)
-
- // [source_url]: sourceURL from magic comment
- DECL_ACCESSORS(source_url, Object)
-
- // [source_mapping_url]: sourceMappingURL magic comment
- DECL_ACCESSORS(source_mapping_url, Object)
-
- // [wasm_compiled_module]: the compiled wasm module this script belongs to.
- // This must only be called if the type of this script is TYPE_WASM.
- DECL_ACCESSORS(wasm_compiled_module, Object)
-
- DECL_ACCESSORS(preparsed_scope_data, PodArray<uint32_t>)
-
- // [compilation_type]: how the the script was compiled. Encoded in the
- // 'flags' field.
- inline CompilationType compilation_type();
- inline void set_compilation_type(CompilationType type);
-
- // [compilation_state]: determines whether the script has already been
- // compiled. Encoded in the 'flags' field.
- inline CompilationState compilation_state();
- inline void set_compilation_state(CompilationState state);
-
- // [origin_options]: optional attributes set by the embedder via ScriptOrigin,
- // and used by the embedder to make decisions about the script. V8 just passes
- // this through. Encoded in the 'flags' field.
- inline v8::ScriptOriginOptions origin_options();
- inline void set_origin_options(ScriptOriginOptions origin_options);
-
- DECLARE_CAST(Script)
-
- // If script source is an external string, check that the underlying
- // resource is accessible. Otherwise, always return true.
- inline bool HasValidSource();
-
- Object* GetNameOrSourceURL();
-
- // Set eval origin for stack trace formatting.
- static void SetEvalOrigin(Handle<Script> script,
- Handle<SharedFunctionInfo> outer,
- int eval_position);
- // Retrieve source position from where eval was called.
- int GetEvalPosition();
-
- // Init line_ends array with source code positions of line ends.
- static void InitLineEnds(Handle<Script> script);
-
- // Carries information about a source position.
- struct PositionInfo {
- PositionInfo() : line(-1), column(-1), line_start(-1), line_end(-1) {}
-
- int line; // Zero-based line number.
- int column; // Zero-based column number.
- int line_start; // Position of first character in line.
- int line_end; // Position of final linebreak character in line.
- };
-
- // Specifies whether to add offsets to position infos.
- enum OffsetFlag { NO_OFFSET = 0, WITH_OFFSET = 1 };
-
- // Retrieves information about the given position, optionally with an offset.
- // Returns false on failure, and otherwise writes into the given info object
- // on success.
- // The static method should is preferable for handlified callsites because it
- // initializes the line ends array, avoiding expensive recomputations.
- // The non-static version is not allocating and safe for unhandlified
- // callsites.
- static bool GetPositionInfo(Handle<Script> script, int position,
- PositionInfo* info, OffsetFlag offset_flag);
- bool GetPositionInfo(int position, PositionInfo* info,
- OffsetFlag offset_flag) const;
-
- bool IsUserJavaScript();
-
- // Wrappers for GetPositionInfo
- static int GetColumnNumber(Handle<Script> script, int code_offset);
- int GetColumnNumber(int code_pos) const;
- static int GetLineNumber(Handle<Script> script, int code_offset);
- int GetLineNumber(int code_pos) const;
-
- // Get the JS object wrapping the given script; create it if none exists.
- static Handle<JSObject> GetWrapper(Handle<Script> script);
-
- // Look through the list of existing shared function infos to find one
- // that matches the function literal. Return empty handle if not found.
- MaybeHandle<SharedFunctionInfo> FindSharedFunctionInfo(
- Isolate* isolate, const FunctionLiteral* fun);
-
- // Iterate over all script objects on the heap.
- class Iterator {
- public:
- explicit Iterator(Isolate* isolate);
- Script* Next();
-
- private:
- WeakFixedArray::Iterator iterator_;
- DISALLOW_COPY_AND_ASSIGN(Iterator);
- };
-
- bool HasPreparsedScopeData() const;
-
- // Dispatched behavior.
- DECLARE_PRINTER(Script)
- DECLARE_VERIFIER(Script)
-
- static const int kSourceOffset = HeapObject::kHeaderSize;
- static const int kNameOffset = kSourceOffset + kPointerSize;
- static const int kLineOffsetOffset = kNameOffset + kPointerSize;
- static const int kColumnOffsetOffset = kLineOffsetOffset + kPointerSize;
- static const int kContextOffset = kColumnOffsetOffset + kPointerSize;
- static const int kWrapperOffset = kContextOffset + kPointerSize;
- static const int kTypeOffset = kWrapperOffset + kPointerSize;
- static const int kLineEndsOffset = kTypeOffset + kPointerSize;
- static const int kIdOffset = kLineEndsOffset + kPointerSize;
- static const int kEvalFromSharedOffset = kIdOffset + kPointerSize;
- static const int kEvalFromPositionOffset =
- kEvalFromSharedOffset + kPointerSize;
- static const int kSharedFunctionInfosOffset =
- kEvalFromPositionOffset + kPointerSize;
- static const int kFlagsOffset = kSharedFunctionInfosOffset + kPointerSize;
- static const int kSourceUrlOffset = kFlagsOffset + kPointerSize;
- static const int kSourceMappingUrlOffset = kSourceUrlOffset + kPointerSize;
- static const int kPreParsedScopeDataOffset =
- kSourceMappingUrlOffset + kPointerSize;
- static const int kSize = kPreParsedScopeDataOffset + kPointerSize;
-
- private:
- // Bit positions in the flags field.
- static const int kCompilationTypeBit = 0;
- static const int kCompilationStateBit = 1;
- static const int kOriginOptionsShift = 2;
- static const int kOriginOptionsSize = 4;
- static const int kOriginOptionsMask = ((1 << kOriginOptionsSize) - 1)
- << kOriginOptionsShift;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(Script);
-};
-
-
// List of builtin functions we want to identify to improve code
// generation.
//
@@ -4875,10 +4624,12 @@ class Script: public Struct {
V(Date.prototype, getSeconds, DateGetSeconds) \
V(Date.prototype, getTime, DateGetTime) \
V(Function.prototype, apply, FunctionApply) \
+ V(Function.prototype, bind, FunctionBind) \
V(Function.prototype, call, FunctionCall) \
V(Object, assign, ObjectAssign) \
V(Object, create, ObjectCreate) \
V(Object.prototype, hasOwnProperty, ObjectHasOwnProperty) \
+ V(Object.prototype, isPrototypeOf, ObjectIsPrototypeOf) \
V(Object.prototype, toString, ObjectToString) \
V(RegExp.prototype, compile, RegExpCompile) \
V(RegExp.prototype, exec, RegExpExec) \
@@ -4954,6 +4705,7 @@ class Script: public Struct {
V(Map.prototype, forEach, MapForEach) \
V(Map.prototype, has, MapHas) \
V(Map.prototype, keys, MapKeys) \
+ V(Map.prototype, get, MapGet) \
V(Map.prototype, set, MapSet) \
V(Map.prototype, values, MapValues) \
V(Set.prototype, add, SetAdd) \
@@ -4962,7 +4714,6 @@ class Script: public Struct {
V(Set.prototype, entries, SetEntries) \
V(Set.prototype, forEach, SetForEach) \
V(Set.prototype, has, SetHas) \
- V(Set.prototype, keys, SetKeys) \
V(Set.prototype, values, SetValues) \
V(WeakMap.prototype, delete, WeakMapDelete) \
V(WeakMap.prototype, has, WeakMapHas) \
@@ -4983,12 +4734,12 @@ class Script: public Struct {
V(Atomics, xor, AtomicsXor)
enum BuiltinFunctionId {
+ kInvalidBuiltinFunctionId = -1,
kArrayCode,
-#define DECLARE_FUNCTION_ID(ignored1, ignore2, name) \
- k##name,
- FUNCTIONS_WITH_ID_LIST(DECLARE_FUNCTION_ID)
- ATOMIC_FUNCTIONS_WITH_ID_LIST(DECLARE_FUNCTION_ID)
-#undef DECLARE_FUNCTION_ID
+#define DECL_FUNCTION_ID(ignored1, ignore2, name) k##name,
+ FUNCTIONS_WITH_ID_LIST(DECL_FUNCTION_ID)
+ ATOMIC_FUNCTIONS_WITH_ID_LIST(DECL_FUNCTION_ID)
+#undef DECL_FUNCTION_ID
// Fake id for a special case of Math.pow. Note, it continues the
// list of math functions.
kMathPowHalf,
@@ -4998,6 +4749,10 @@ enum BuiltinFunctionId {
kArrayKeys,
kArrayValues,
kArrayIteratorNext,
+ kMapSize,
+ kSetSize,
+ kMapIteratorNext,
+ kSetIteratorNext,
kDataViewBuffer,
kDataViewByteLength,
kDataViewByteOffset,
@@ -5019,723 +4774,10 @@ enum BuiltinFunctionId {
kSharedArrayBufferByteLength,
kStringIterator,
kStringIteratorNext,
+ kStringToLowerCaseIntl,
+ kStringToUpperCaseIntl
};
-// Result of searching in an optimized code map of a SharedFunctionInfo. Note
-// that both {code} and {vector} can be NULL to pass search result status.
-struct CodeAndVector {
- Code* code; // Cached optimized code.
- FeedbackVector* vector; // Cached feedback vector.
-};
-
-// SharedFunctionInfo describes the JSFunction information that can be
-// shared by multiple instances of the function.
-class SharedFunctionInfo: public HeapObject {
- public:
- // [name]: Function name.
- DECL_ACCESSORS(name, Object)
-
- // [code]: Function code.
- DECL_ACCESSORS(code, Code)
-
- // Get the abstract code associated with the function, which will either be
- // a Code object or a BytecodeArray.
- inline AbstractCode* abstract_code();
-
- // Tells whether or not this shared function info is interpreted.
- //
- // Note: function->IsInterpreted() does not necessarily return the same value
- // as function->shared()->IsInterpreted() because the closure might have been
- // optimized.
- inline bool IsInterpreted() const;
-
- inline void ReplaceCode(Code* code);
- inline bool HasBaselineCode() const;
-
- // Set up the link between shared function info and the script. The shared
- // function info is added to the list on the script.
- V8_EXPORT_PRIVATE static void SetScript(Handle<SharedFunctionInfo> shared,
- Handle<Object> script_object);
-
- // Layout description of the optimized code map.
- static const int kEntriesStart = 0;
- static const int kContextOffset = 0;
- static const int kCachedCodeOffset = 1;
- static const int kEntryLength = 2;
- static const int kInitialLength = kEntriesStart + kEntryLength;
-
- static const int kNotFound = -1;
- static const int kInvalidLength = -1;
-
- // Helpers for assembly code that does a backwards walk of the optimized code
- // map.
- static const int kOffsetToPreviousContext =
- FixedArray::kHeaderSize + kPointerSize * (kContextOffset - kEntryLength);
- static const int kOffsetToPreviousCachedCode =
- FixedArray::kHeaderSize +
- kPointerSize * (kCachedCodeOffset - kEntryLength);
-
- // [scope_info]: Scope info.
- DECL_ACCESSORS(scope_info, ScopeInfo)
-
- // The outer scope info for the purpose of parsing this function, or the hole
- // value if it isn't yet known.
- DECL_ACCESSORS(outer_scope_info, HeapObject)
-
- // [construct stub]: Code stub for constructing instances of this function.
- DECL_ACCESSORS(construct_stub, Code)
-
- // Sets the given code as the construct stub, and marks builtin code objects
- // as a construct stub.
- void SetConstructStub(Code* code);
-
- // Returns if this function has been compiled to native code yet.
- inline bool is_compiled() const;
-
- // [length]: The function length - usually the number of declared parameters.
- // Use up to 2^30 parameters.
- // been compiled.
- inline int GetLength() const;
- inline bool HasLength() const;
- inline void set_length(int value);
-
- // [internal formal parameter count]: The declared number of parameters.
- // For subclass constructors, also includes new.target.
- // The size of function's frame is internal_formal_parameter_count + 1.
- inline int internal_formal_parameter_count() const;
- inline void set_internal_formal_parameter_count(int value);
-
- // Set the formal parameter count so the function code will be
- // called without using argument adaptor frames.
- inline void DontAdaptArguments();
-
- // [expected_nof_properties]: Expected number of properties for the
- // function. The value is only reliable when the function has been compiled.
- inline int expected_nof_properties() const;
- inline void set_expected_nof_properties(int value);
-
- // [feedback_metadata] - describes ast node feedback from full-codegen and
- // (increasingly) from crankshafted code where sufficient feedback isn't
- // available.
- DECL_ACCESSORS(feedback_metadata, FeedbackMetadata)
-
- // [function_literal_id] - uniquely identifies the FunctionLiteral this
- // SharedFunctionInfo represents within its script, or -1 if this
- // SharedFunctionInfo object doesn't correspond to a parsed FunctionLiteral.
- inline int function_literal_id() const;
- inline void set_function_literal_id(int value);
-
-#if V8_SFI_HAS_UNIQUE_ID
- // [unique_id] - For --trace-maps purposes, an identifier that's persistent
- // even if the GC moves this SharedFunctionInfo.
- inline int unique_id() const;
- inline void set_unique_id(int value);
-#endif
-
- // [instance class name]: class name for instances.
- DECL_ACCESSORS(instance_class_name, Object)
-
- // [function data]: This field holds some additional data for function.
- // Currently it has one of:
- // - a FunctionTemplateInfo to make benefit the API [IsApiFunction()].
- // - a BytecodeArray for the interpreter [HasBytecodeArray()].
- // - a FixedArray with Asm->Wasm conversion [HasAsmWasmData()].
- DECL_ACCESSORS(function_data, Object)
-
- inline bool IsApiFunction();
- inline FunctionTemplateInfo* get_api_func_data();
- inline void set_api_func_data(FunctionTemplateInfo* data);
- inline bool HasBytecodeArray() const;
- inline BytecodeArray* bytecode_array() const;
- inline void set_bytecode_array(BytecodeArray* bytecode);
- inline void ClearBytecodeArray();
- inline bool HasAsmWasmData() const;
- inline FixedArray* asm_wasm_data() const;
- inline void set_asm_wasm_data(FixedArray* data);
- inline void ClearAsmWasmData();
-
- // [function identifier]: This field holds an additional identifier for the
- // function.
- // - a Smi identifying a builtin function [HasBuiltinFunctionId()].
- // - a String identifying the function's inferred name [HasInferredName()].
- // The inferred_name is inferred from variable or property
- // assignment of this function. It is used to facilitate debugging and
- // profiling of JavaScript code written in OO style, where almost
- // all functions are anonymous but are assigned to object
- // properties.
- DECL_ACCESSORS(function_identifier, Object)
-
- inline bool HasBuiltinFunctionId();
- inline BuiltinFunctionId builtin_function_id();
- inline void set_builtin_function_id(BuiltinFunctionId id);
- inline bool HasInferredName();
- inline String* inferred_name();
- inline void set_inferred_name(String* inferred_name);
-
- // [script]: Script from which the function originates.
- DECL_ACCESSORS(script, Object)
-
- // [start_position_and_type]: Field used to store both the source code
- // position, whether or not the function is a function expression,
- // and whether or not the function is a toplevel function. The two
- // least significants bit indicates whether the function is an
- // expression and the rest contains the source code position.
- inline int start_position_and_type() const;
- inline void set_start_position_and_type(int value);
-
- // The function is subject to debugging if a debug info is attached.
- inline bool HasDebugInfo() const;
- inline DebugInfo* GetDebugInfo() const;
-
- // A function has debug code if the compiled code has debug break slots.
- inline bool HasDebugCode() const;
-
- // [debug info]: Debug information.
- DECL_ACCESSORS(debug_info, Object)
-
- // Bit field containing various information collected for debugging.
- // This field is either stored on the kDebugInfo slot or inside the
- // debug info struct.
- inline int debugger_hints() const;
- inline void set_debugger_hints(int value);
-
- // Indicates that the function was created by the Function function.
- // Though it's anonymous, toString should treat it as if it had the name
- // "anonymous". We don't set the name itself so that the system does not
- // see a binding for it.
- DECL_BOOLEAN_ACCESSORS(name_should_print_as_anonymous)
-
- // Indicates that the function is either an anonymous expression
- // or an arrow function (the name field can be set through the API,
- // which does not change this flag).
- DECL_BOOLEAN_ACCESSORS(is_anonymous_expression)
-
- // Indicates that the the shared function info is deserialized from cache.
- DECL_BOOLEAN_ACCESSORS(deserialized)
-
- // Indicates that the function cannot cause side-effects.
- DECL_BOOLEAN_ACCESSORS(has_no_side_effect)
-
- // Indicates that |has_no_side_effect| has been computed and set.
- DECL_BOOLEAN_ACCESSORS(computed_has_no_side_effect)
-
- // Indicates that the function should be skipped during stepping.
- DECL_BOOLEAN_ACCESSORS(debug_is_blackboxed)
-
- // Indicates that |debug_is_blackboxed| has been computed and set.
- DECL_BOOLEAN_ACCESSORS(computed_debug_is_blackboxed)
-
- // Indicates that the function has been reported for binary code coverage.
- DECL_BOOLEAN_ACCESSORS(has_reported_binary_coverage)
-
- // The function's name if it is non-empty, otherwise the inferred name.
- String* DebugName();
-
- // The function cannot cause any side effects.
- bool HasNoSideEffect();
-
- // Used for flags such as --hydrogen-filter.
- bool PassesFilter(const char* raw_filter);
-
- // Position of the 'function' token in the script source.
- inline int function_token_position() const;
- inline void set_function_token_position(int function_token_position);
-
- // Position of this function in the script source.
- inline int start_position() const;
- inline void set_start_position(int start_position);
-
- // End position of this function in the script source.
- inline int end_position() const;
- inline void set_end_position(int end_position);
-
- // Is this function a named function expression in the source code.
- DECL_BOOLEAN_ACCESSORS(is_named_expression)
-
- // Is this function a top-level function (scripts, evals).
- DECL_BOOLEAN_ACCESSORS(is_toplevel)
-
- // Bit field containing various information collected by the compiler to
- // drive optimization.
- inline int compiler_hints() const;
- inline void set_compiler_hints(int value);
-
- inline int ast_node_count() const;
- inline void set_ast_node_count(int count);
-
- inline int profiler_ticks() const;
- inline void set_profiler_ticks(int ticks);
-
- // Inline cache age is used to infer whether the function survived a context
- // disposal or not. In the former case we reset the opt_count.
- inline int ic_age();
- inline void set_ic_age(int age);
-
- // Indicates if this function can be lazy compiled.
- // This is used to determine if we can safely flush code from a function
- // when doing GC if we expect that the function will no longer be used.
- DECL_BOOLEAN_ACCESSORS(allows_lazy_compilation)
-
- // Indicates whether optimizations have been disabled for this
- // shared function info. If a function is repeatedly optimized or if
- // we cannot optimize the function we disable optimization to avoid
- // spending time attempting to optimize it again.
- DECL_BOOLEAN_ACCESSORS(optimization_disabled)
-
- // Indicates the language mode.
- inline LanguageMode language_mode();
- inline void set_language_mode(LanguageMode language_mode);
-
- // False if the function definitely does not allocate an arguments object.
- DECL_BOOLEAN_ACCESSORS(uses_arguments)
-
- // Indicates that this function uses a super property (or an eval that may
- // use a super property).
- // This is needed to set up the [[HomeObject]] on the function instance.
- DECL_BOOLEAN_ACCESSORS(needs_home_object)
-
- // True if the function has any duplicated parameter names.
- DECL_BOOLEAN_ACCESSORS(has_duplicate_parameters)
-
- // Indicates whether the function is a native function.
- // These needs special treatment in .call and .apply since
- // null passed as the receiver should not be translated to the
- // global object.
- DECL_BOOLEAN_ACCESSORS(native)
-
- // Indicate that this function should always be inlined in optimized code.
- DECL_BOOLEAN_ACCESSORS(force_inline)
-
- // Indicates that code for this function must be compiled through the
- // Ignition / TurboFan pipeline, and is unsupported by
- // FullCodegen / Crankshaft.
- DECL_BOOLEAN_ACCESSORS(must_use_ignition_turbo)
-
- // Indicates that code for this function cannot be flushed.
- DECL_BOOLEAN_ACCESSORS(dont_flush)
-
- // Indicates that this function is an asm function.
- DECL_BOOLEAN_ACCESSORS(asm_function)
-
- // Whether this function was created from a FunctionDeclaration.
- DECL_BOOLEAN_ACCESSORS(is_declaration)
-
- // Whether this function was marked to be tiered up.
- DECL_BOOLEAN_ACCESSORS(marked_for_tier_up)
-
- // Whether this function has a concurrent compilation job running.
- DECL_BOOLEAN_ACCESSORS(has_concurrent_optimization_job)
-
- // Indicates that asm->wasm conversion failed and should not be re-attempted.
- DECL_BOOLEAN_ACCESSORS(is_asm_wasm_broken)
-
- inline FunctionKind kind() const;
- inline void set_kind(FunctionKind kind);
-
- // Indicates whether or not the code in the shared function support
- // deoptimization.
- inline bool has_deoptimization_support();
-
- // Enable deoptimization support through recompiled code.
- void EnableDeoptimizationSupport(Code* recompiled);
-
- // Disable (further) attempted optimization of all functions sharing this
- // shared function info.
- void DisableOptimization(BailoutReason reason);
-
- inline BailoutReason disable_optimization_reason();
-
- // Lookup the bailout ID and DCHECK that it exists in the non-optimized
- // code, returns whether it asserted (i.e., always true if assertions are
- // disabled).
- bool VerifyBailoutId(BailoutId id);
-
- // [source code]: Source code for the function.
- bool HasSourceCode() const;
- Handle<Object> GetSourceCode();
- Handle<Object> GetSourceCodeHarmony();
-
- // Number of times the function was optimized.
- inline int opt_count();
- inline void set_opt_count(int opt_count);
-
- // Number of times the function was deoptimized.
- inline void set_deopt_count(int value);
- inline int deopt_count();
- inline void increment_deopt_count();
-
- // Number of time we tried to re-enable optimization after it
- // was disabled due to high number of deoptimizations.
- inline void set_opt_reenable_tries(int value);
- inline int opt_reenable_tries();
-
- inline void TryReenableOptimization();
-
- // Stores deopt_count, opt_reenable_tries and ic_age as bit-fields.
- inline void set_counters(int value);
- inline int counters() const;
-
- // Stores opt_count and bailout_reason as bit-fields.
- inline void set_opt_count_and_bailout_reason(int value);
- inline int opt_count_and_bailout_reason() const;
-
- inline void set_disable_optimization_reason(BailoutReason reason);
-
- // Tells whether this function should be subject to debugging.
- inline bool IsSubjectToDebugging();
-
- // Whether this function is defined in user-provided JavaScript code.
- inline bool IsUserJavaScript();
-
- // Check whether or not this function is inlineable.
- bool IsInlineable();
-
- // Source size of this function.
- int SourceSize();
-
- // Returns `false` if formal parameters include rest parameters, optional
- // parameters, or destructuring parameters.
- // TODO(caitp): make this a flag set during parsing
- inline bool has_simple_parameters();
-
- // Initialize a SharedFunctionInfo from a parsed function literal.
- static void InitFromFunctionLiteral(Handle<SharedFunctionInfo> shared_info,
- FunctionLiteral* lit);
-
- // Sets the expected number of properties based on estimate from parser.
- void SetExpectedNofPropertiesFromEstimate(FunctionLiteral* literal);
-
- // Dispatched behavior.
- DECLARE_PRINTER(SharedFunctionInfo)
- DECLARE_VERIFIER(SharedFunctionInfo)
-
- void ResetForNewContext(int new_ic_age);
-
- // Iterate over all shared function infos in a given script.
- class ScriptIterator {
- public:
- explicit ScriptIterator(Handle<Script> script);
- ScriptIterator(Isolate* isolate, Handle<FixedArray> shared_function_infos);
- SharedFunctionInfo* Next();
-
- // Reset the iterator to run on |script|.
- void Reset(Handle<Script> script);
-
- private:
- Isolate* isolate_;
- Handle<FixedArray> shared_function_infos_;
- int index_;
- DISALLOW_COPY_AND_ASSIGN(ScriptIterator);
- };
-
- // Iterate over all shared function infos on the heap.
- class GlobalIterator {
- public:
- explicit GlobalIterator(Isolate* isolate);
- SharedFunctionInfo* Next();
-
- private:
- Script::Iterator script_iterator_;
- WeakFixedArray::Iterator noscript_sfi_iterator_;
- SharedFunctionInfo::ScriptIterator sfi_iterator_;
- DisallowHeapAllocation no_gc_;
- DISALLOW_COPY_AND_ASSIGN(GlobalIterator);
- };
-
- DECLARE_CAST(SharedFunctionInfo)
-
- // Constants.
- static const int kDontAdaptArgumentsSentinel = -1;
-
- // Layout description.
- // Pointer fields.
- static const int kCodeOffset = HeapObject::kHeaderSize;
- static const int kNameOffset = kCodeOffset + kPointerSize;
- static const int kScopeInfoOffset = kNameOffset + kPointerSize;
- static const int kOuterScopeInfoOffset = kScopeInfoOffset + kPointerSize;
- static const int kConstructStubOffset = kOuterScopeInfoOffset + kPointerSize;
- static const int kInstanceClassNameOffset =
- kConstructStubOffset + kPointerSize;
- static const int kFunctionDataOffset =
- kInstanceClassNameOffset + kPointerSize;
- static const int kScriptOffset = kFunctionDataOffset + kPointerSize;
- static const int kDebugInfoOffset = kScriptOffset + kPointerSize;
- static const int kFunctionIdentifierOffset = kDebugInfoOffset + kPointerSize;
- static const int kFeedbackMetadataOffset =
- kFunctionIdentifierOffset + kPointerSize;
- static const int kFunctionLiteralIdOffset =
- kFeedbackMetadataOffset + kPointerSize;
-#if V8_SFI_HAS_UNIQUE_ID
- static const int kUniqueIdOffset = kFunctionLiteralIdOffset + kPointerSize;
- static const int kLastPointerFieldOffset = kUniqueIdOffset;
-#else
- // Just to not break the postmortrem support with conditional offsets
- static const int kUniqueIdOffset = kFunctionLiteralIdOffset;
- static const int kLastPointerFieldOffset = kFunctionLiteralIdOffset;
-#endif
-
-#if V8_HOST_ARCH_32_BIT
- // Smi fields.
- static const int kLengthOffset = kLastPointerFieldOffset + kPointerSize;
- static const int kFormalParameterCountOffset = kLengthOffset + kPointerSize;
- static const int kExpectedNofPropertiesOffset =
- kFormalParameterCountOffset + kPointerSize;
- static const int kNumLiteralsOffset =
- kExpectedNofPropertiesOffset + kPointerSize;
- static const int kStartPositionAndTypeOffset =
- kNumLiteralsOffset + kPointerSize;
- static const int kEndPositionOffset =
- kStartPositionAndTypeOffset + kPointerSize;
- static const int kFunctionTokenPositionOffset =
- kEndPositionOffset + kPointerSize;
- static const int kCompilerHintsOffset =
- kFunctionTokenPositionOffset + kPointerSize;
- static const int kOptCountAndBailoutReasonOffset =
- kCompilerHintsOffset + kPointerSize;
- static const int kCountersOffset =
- kOptCountAndBailoutReasonOffset + kPointerSize;
- static const int kAstNodeCountOffset =
- kCountersOffset + kPointerSize;
- static const int kProfilerTicksOffset =
- kAstNodeCountOffset + kPointerSize;
-
- // Total size.
- static const int kSize = kProfilerTicksOffset + kPointerSize;
-#else
-// The only reason to use smi fields instead of int fields is to allow
-// iteration without maps decoding during garbage collections.
-// To avoid wasting space on 64-bit architectures we use the following trick:
-// we group integer fields into pairs
-// The least significant integer in each pair is shifted left by 1. By doing
-// this we guarantee that LSB of each kPointerSize aligned word is not set and
-// thus this word cannot be treated as pointer to HeapObject during old space
-// traversal.
-#if V8_TARGET_LITTLE_ENDIAN
- static const int kLengthOffset = kLastPointerFieldOffset + kPointerSize;
- static const int kFormalParameterCountOffset =
- kLengthOffset + kIntSize;
-
- static const int kExpectedNofPropertiesOffset =
- kFormalParameterCountOffset + kIntSize;
- static const int kNumLiteralsOffset =
- kExpectedNofPropertiesOffset + kIntSize;
-
- static const int kEndPositionOffset =
- kNumLiteralsOffset + kIntSize;
- static const int kStartPositionAndTypeOffset =
- kEndPositionOffset + kIntSize;
-
- static const int kFunctionTokenPositionOffset =
- kStartPositionAndTypeOffset + kIntSize;
- static const int kCompilerHintsOffset =
- kFunctionTokenPositionOffset + kIntSize;
-
- static const int kOptCountAndBailoutReasonOffset =
- kCompilerHintsOffset + kIntSize;
- static const int kCountersOffset =
- kOptCountAndBailoutReasonOffset + kIntSize;
-
- static const int kAstNodeCountOffset =
- kCountersOffset + kIntSize;
- static const int kProfilerTicksOffset =
- kAstNodeCountOffset + kIntSize;
-
- // Total size.
- static const int kSize = kProfilerTicksOffset + kIntSize;
-
-#elif V8_TARGET_BIG_ENDIAN
- static const int kFormalParameterCountOffset =
- kLastPointerFieldOffset + kPointerSize;
- static const int kLengthOffset = kFormalParameterCountOffset + kIntSize;
-
- static const int kNumLiteralsOffset = kLengthOffset + kIntSize;
- static const int kExpectedNofPropertiesOffset = kNumLiteralsOffset + kIntSize;
-
- static const int kStartPositionAndTypeOffset =
- kExpectedNofPropertiesOffset + kIntSize;
- static const int kEndPositionOffset = kStartPositionAndTypeOffset + kIntSize;
-
- static const int kCompilerHintsOffset = kEndPositionOffset + kIntSize;
- static const int kFunctionTokenPositionOffset =
- kCompilerHintsOffset + kIntSize;
-
- static const int kCountersOffset = kFunctionTokenPositionOffset + kIntSize;
- static const int kOptCountAndBailoutReasonOffset = kCountersOffset + kIntSize;
-
- static const int kProfilerTicksOffset =
- kOptCountAndBailoutReasonOffset + kIntSize;
- static const int kAstNodeCountOffset = kProfilerTicksOffset + kIntSize;
-
- // Total size.
- static const int kSize = kAstNodeCountOffset + kIntSize;
-
-#else
-#error Unknown byte ordering
-#endif // Big endian
-#endif // 64-bit
-
-
- static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize);
-
- typedef FixedBodyDescriptor<kCodeOffset,
- kLastPointerFieldOffset + kPointerSize, kSize>
- BodyDescriptor;
- typedef FixedBodyDescriptor<kNameOffset,
- kLastPointerFieldOffset + kPointerSize, kSize>
- BodyDescriptorWeakCode;
-
- // Bit positions in start_position_and_type.
- // The source code start position is in the 30 most significant bits of
- // the start_position_and_type field.
- static const int kIsNamedExpressionBit = 0;
- static const int kIsTopLevelBit = 1;
- static const int kStartPositionShift = 2;
- static const int kStartPositionMask = ~((1 << kStartPositionShift) - 1);
-
- // Bit positions in compiler_hints.
- enum CompilerHints {
- // byte 0
- kAllowLazyCompilation,
- kMarkedForTierUp,
- kOptimizationDisabled,
- kHasDuplicateParameters,
- kNative,
- kStrictModeFunction,
- kUsesArguments,
- kNeedsHomeObject,
- // byte 1
- kForceInline,
- kIsAsmFunction,
- kMustUseIgnitionTurbo,
- kDontFlush,
- kIsDeclaration,
- kIsAsmWasmBroken,
- kHasConcurrentOptimizationJob,
-
- kUnused1, // Unused fields.
-
- // byte 2
- kFunctionKind,
- // rest of byte 2 and first two bits of byte 3 are used by FunctionKind
- // byte 3
- kCompilerHintsCount = kFunctionKind + 10, // Pseudo entry
- };
-
- // Bit positions in debugger_hints.
- enum DebuggerHints {
- kIsAnonymousExpression,
- kNameShouldPrintAsAnonymous,
- kDeserialized,
- kHasNoSideEffect,
- kComputedHasNoSideEffect,
- kDebugIsBlackboxed,
- kComputedDebugIsBlackboxed,
- kHasReportedBinaryCoverage
- };
-
- // kFunctionKind has to be byte-aligned
- STATIC_ASSERT((kFunctionKind % kBitsPerByte) == 0);
-
- class FunctionKindBits : public BitField<FunctionKind, kFunctionKind, 10> {};
-
- class DeoptCountBits : public BitField<int, 0, 4> {};
- class OptReenableTriesBits : public BitField<int, 4, 18> {};
- class ICAgeBits : public BitField<int, 22, 8> {};
-
- class OptCountBits : public BitField<int, 0, 22> {};
- class DisabledOptimizationReasonBits : public BitField<int, 22, 8> {};
-
- private:
- FRIEND_TEST(PreParserTest, LazyFunctionLength);
-
- inline int length() const;
-
-#if V8_HOST_ARCH_32_BIT
- // On 32 bit platforms, compiler hints is a smi.
- static const int kCompilerHintsSmiTagSize = kSmiTagSize;
- static const int kCompilerHintsSize = kPointerSize;
-#else
- // On 64 bit platforms, compiler hints is not a smi, see comment above.
- static const int kCompilerHintsSmiTagSize = 0;
- static const int kCompilerHintsSize = kIntSize;
-#endif
-
- STATIC_ASSERT(SharedFunctionInfo::kCompilerHintsCount +
- SharedFunctionInfo::kCompilerHintsSmiTagSize <=
- SharedFunctionInfo::kCompilerHintsSize * kBitsPerByte);
-
- public:
- // Constants for optimizing codegen for strict mode function and
- // native tests when using integer-width instructions.
- static const int kStrictModeBit =
- kStrictModeFunction + kCompilerHintsSmiTagSize;
- static const int kNativeBit = kNative + kCompilerHintsSmiTagSize;
- static const int kHasDuplicateParametersBit =
- kHasDuplicateParameters + kCompilerHintsSmiTagSize;
-
- static const int kFunctionKindShift =
- kFunctionKind + kCompilerHintsSmiTagSize;
- static const int kAllFunctionKindBitsMask = FunctionKindBits::kMask
- << kCompilerHintsSmiTagSize;
-
- static const int kMarkedForTierUpBit =
- kMarkedForTierUp + kCompilerHintsSmiTagSize;
-
- // Constants for optimizing codegen for strict mode function and
- // native tests.
- // Allows to use byte-width instructions.
- static const int kStrictModeBitWithinByte = kStrictModeBit % kBitsPerByte;
- static const int kNativeBitWithinByte = kNativeBit % kBitsPerByte;
- static const int kHasDuplicateParametersBitWithinByte =
- kHasDuplicateParametersBit % kBitsPerByte;
-
- static const int kClassConstructorBitsWithinByte =
- FunctionKind::kClassConstructor << kCompilerHintsSmiTagSize;
- STATIC_ASSERT(kClassConstructorBitsWithinByte < (1 << kBitsPerByte));
-
- static const int kDerivedConstructorBitsWithinByte =
- FunctionKind::kDerivedConstructor << kCompilerHintsSmiTagSize;
- STATIC_ASSERT(kDerivedConstructorBitsWithinByte < (1 << kBitsPerByte));
-
- static const int kMarkedForTierUpBitWithinByte =
- kMarkedForTierUpBit % kBitsPerByte;
-
-#if defined(V8_TARGET_LITTLE_ENDIAN)
-#define BYTE_OFFSET(compiler_hint) \
- kCompilerHintsOffset + \
- (compiler_hint + kCompilerHintsSmiTagSize) / kBitsPerByte
-#elif defined(V8_TARGET_BIG_ENDIAN)
-#define BYTE_OFFSET(compiler_hint) \
- kCompilerHintsOffset + (kCompilerHintsSize - 1) - \
- ((compiler_hint + kCompilerHintsSmiTagSize) / kBitsPerByte)
-#else
-#error Unknown byte ordering
-#endif
- static const int kStrictModeByteOffset = BYTE_OFFSET(kStrictModeFunction);
- static const int kNativeByteOffset = BYTE_OFFSET(kNative);
- static const int kFunctionKindByteOffset = BYTE_OFFSET(kFunctionKind);
- static const int kHasDuplicateParametersByteOffset =
- BYTE_OFFSET(kHasDuplicateParameters);
- static const int kMarkedForTierUpByteOffset = BYTE_OFFSET(kMarkedForTierUp);
-#undef BYTE_OFFSET
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
-};
-
-
-// Printing support.
-struct SourceCodeOf {
- explicit SourceCodeOf(SharedFunctionInfo* v, int max = -1)
- : value(v), max_length(max) {}
- const SharedFunctionInfo* value;
- int max_length;
-};
-
-
-std::ostream& operator<<(std::ostream& os, const SourceCodeOf& v);
-
-
class JSGeneratorObject: public JSObject {
public:
// [function]: The function corresponding to this generator object.
@@ -5776,10 +4818,10 @@ class JSGeneratorObject: public JSObject {
// [register_file]: Saved interpreter register file.
DECL_ACCESSORS(register_file, FixedArray)
- DECLARE_CAST(JSGeneratorObject)
+ DECL_CAST(JSGeneratorObject)
// Dispatched behavior.
- DECLARE_VERIFIER(JSGeneratorObject)
+ DECL_VERIFIER(JSGeneratorObject)
// Magic sentinel values for the continuation.
static const int kGeneratorExecuting = -2;
@@ -5801,31 +4843,23 @@ class JSGeneratorObject: public JSObject {
class JSAsyncGeneratorObject : public JSGeneratorObject {
public:
- DECLARE_CAST(JSAsyncGeneratorObject)
+ DECL_CAST(JSAsyncGeneratorObject)
// Dispatched behavior.
- DECLARE_VERIFIER(JSAsyncGeneratorObject)
+ DECL_VERIFIER(JSAsyncGeneratorObject)
// [queue]
// Pointer to the head of a singly linked list of AsyncGeneratorRequest, or
// undefined.
DECL_ACCESSORS(queue, HeapObject)
- // [await_input_or_debug_pos]
- // Holds the value to resume generator with after an Await(), in order to
- // avoid clobbering function.sent. If awaited_promise is not undefined, holds
- // current bytecode offset for debugging instead.
- DECL_ACCESSORS(await_input_or_debug_pos, Object)
-
// [awaited_promise]
// A reference to the Promise of an AwaitExpression.
DECL_ACCESSORS(awaited_promise, HeapObject)
// Layout description.
static const int kQueueOffset = JSGeneratorObject::kSize;
- static const int kAwaitInputOrDebugPosOffset = kQueueOffset + kPointerSize;
- static const int kAwaitedPromiseOffset =
- kAwaitInputOrDebugPosOffset + kPointerSize;
+ static const int kAwaitedPromiseOffset = kQueueOffset + kPointerSize;
static const int kSize = kAwaitedPromiseOffset + kPointerSize;
private:
@@ -5837,9 +4871,9 @@ class JSAsyncGeneratorObject : public JSGeneratorObject {
// the declared variable (foo). A module can have at most one namespace object.
class JSModuleNamespace : public JSObject {
public:
- DECLARE_CAST(JSModuleNamespace)
- DECLARE_PRINTER(JSModuleNamespace)
- DECLARE_VERIFIER(JSModuleNamespace)
+ DECL_CAST(JSModuleNamespace)
+ DECL_PRINTER(JSModuleNamespace)
+ DECL_VERIFIER(JSModuleNamespace)
// The actual module whose namespace is being represented.
DECL_ACCESSORS(module, Module)
@@ -5868,9 +4902,9 @@ class JSModuleNamespace : public JSObject {
// This is still very much in flux.
class Module : public Struct {
public:
- DECLARE_CAST(Module)
- DECLARE_VERIFIER(Module)
- DECLARE_PRINTER(Module)
+ DECL_CAST(Module)
+ DECL_VERIFIER(Module)
+ DECL_PRINTER(Module)
// The code representing this Module, or an abstraction thereof.
// This is either a SharedFunctionInfo or a JSFunction or a ModuleInfo
@@ -5892,9 +4926,21 @@ class Module : public Struct {
// Hash for this object (a random non-zero Smi).
DECL_INT_ACCESSORS(hash)
- // Internal instantiation status.
+ // Status.
DECL_INT_ACCESSORS(status)
- enum InstantiationStatus { kUnprepared, kPrepared };
+ enum Status {
+ // Order matters!
+ kUninstantiated,
+ kPreInstantiating,
+ kInstantiating,
+ kInstantiated,
+ kEvaluating,
+ kEvaluated,
+ kErrored
+ };
+
+ // The exception in the case {status} is kErrored.
+ Object* GetException();
// The namespace object (or undefined).
DECL_ACCESSORS(module_namespace, HeapObject)
@@ -5904,12 +4950,12 @@ class Module : public Struct {
// ModuleInfo::module_requests.
DECL_ACCESSORS(requested_modules, FixedArray)
+ // [script]: Script from which the module originates.
+ DECL_ACCESSORS(script, Script)
+
// Get the ModuleInfo associated with the code.
inline ModuleInfo* info() const;
- inline bool instantiated() const;
- inline bool evaluated() const;
-
// Implementation of spec operation ModuleDeclarationInstantiation.
// Returns false if an exception occurred during instantiation, true
// otherwise. (In the case where the callback throws an exception, that
@@ -5944,9 +4990,23 @@ class Module : public Struct {
static const int kRequestedModulesOffset =
kModuleNamespaceOffset + kPointerSize;
static const int kStatusOffset = kRequestedModulesOffset + kPointerSize;
- static const int kSize = kStatusOffset + kPointerSize;
+ static const int kDfsIndexOffset = kStatusOffset + kPointerSize;
+ static const int kDfsAncestorIndexOffset = kDfsIndexOffset + kPointerSize;
+ static const int kExceptionOffset = kDfsAncestorIndexOffset + kPointerSize;
+ static const int kScriptOffset = kExceptionOffset + kPointerSize;
+ static const int kSize = kScriptOffset + kPointerSize;
private:
+ friend class Factory;
+
+ DECL_ACCESSORS(exception, Object)
+
+ // TODO(neis): Don't store those in the module object?
+ DECL_INT_ACCESSORS(dfs_index)
+ DECL_INT_ACCESSORS(dfs_ancestor_index)
+
+ // Helpers for Instantiate and Evaluate.
+
static void CreateExport(Handle<Module> module, int cell_index,
Handle<FixedArray> names);
static void CreateIndirectExport(Handle<Module> module, Handle<String> name,
@@ -5973,13 +5033,23 @@ class Module : public Struct {
Handle<Module> module, Handle<String> name, MessageLocation loc,
bool must_resolve, ResolveSet* resolve_set);
- inline void set_evaluated();
-
static MUST_USE_RESULT bool PrepareInstantiate(
Handle<Module> module, v8::Local<v8::Context> context,
v8::Module::ResolveCallback callback);
- static MUST_USE_RESULT bool FinishInstantiate(Handle<Module> module,
- v8::Local<v8::Context> context);
+ static MUST_USE_RESULT bool FinishInstantiate(
+ Handle<Module> module, ZoneForwardList<Handle<Module>>* stack,
+ unsigned* dfs_index, Zone* zone);
+ static MUST_USE_RESULT MaybeHandle<Object> Evaluate(
+ Handle<Module> module, ZoneForwardList<Handle<Module>>* stack,
+ unsigned* dfs_index);
+
+ static void MaybeTransitionComponent(Handle<Module> module,
+ ZoneForwardList<Handle<Module>>* stack,
+ Status new_status);
+
+ // To set status to kErrored, RecordError should be used.
+ void SetStatus(Status status);
+ void RecordError();
DISALLOW_IMPLICIT_CONSTRUCTORS(Module);
};
@@ -5988,6 +5058,7 @@ class Module : public Struct {
class JSBoundFunction : public JSObject {
public:
// [bound_target_function]: The wrapped function object.
+ inline Object* raw_bound_target_function() const;
DECL_ACCESSORS(bound_target_function, JSReceiver)
// [bound_this]: The value that is always passed as the this value when
@@ -6003,11 +5074,11 @@ class JSBoundFunction : public JSObject {
static MaybeHandle<Context> GetFunctionRealm(
Handle<JSBoundFunction> function);
- DECLARE_CAST(JSBoundFunction)
+ DECL_CAST(JSBoundFunction)
// Dispatched behavior.
- DECLARE_PRINTER(JSBoundFunction)
- DECLARE_VERIFIER(JSBoundFunction)
+ DECL_PRINTER(JSBoundFunction)
+ DECL_VERIFIER(JSBoundFunction)
// The bound function's string representation implemented according
// to ES6 section 19.2.3.5 Function.prototype.toString ( ).
@@ -6072,13 +5143,27 @@ class JSFunction: public JSObject {
// optimized.
inline bool IsInterpreted();
- // Tells whether or not this function has been optimized.
+ // Tells whether or not this function checks its optimization marker in its
+ // feedback vector.
+ inline bool ChecksOptimizationMarker();
+
+ // Tells whether or not this function holds optimized code.
+ //
+ // Note: Returning false does not necessarily mean that this function hasn't
+ // been optimized, as it may have optimized code on its feedback vector.
inline bool IsOptimized();
+ // Tells whether or not this function has optimized code available to it,
+ // either because it is optimized or because it has optimized code in its
+ // feedback vector.
+ inline bool HasOptimizedCode();
+
+ // Tells whether or not this function has a (non-zero) optimization marker.
+ inline bool HasOptimizationMarker();
+
// Mark this function for lazy recompilation. The function will be recompiled
// the next time it is executed.
- void MarkForOptimization();
- void AttemptConcurrentOptimization();
+ void MarkForOptimization(ConcurrencyMode mode);
// Tells whether or not the function is already marked for lazy recompilation.
inline bool IsMarkedForOptimization();
@@ -6090,6 +5175,12 @@ class JSFunction: public JSObject {
// Clears the optimized code slot in the function's feedback vector.
inline void ClearOptimizedCodeSlot(const char* reason);
+ // Sets the optimization marker in the function's feedback vector.
+ inline void SetOptimizationMarker(OptimizationMarker marker);
+
+ // Clears the optimization marker in the function's feedback vector.
+ inline void ClearOptimizationMarker();
+
// Completes inobject slack tracking on initial map if it is active.
inline void CompleteInobjectSlackTrackingIfActive();
@@ -6137,23 +5228,18 @@ class JSFunction: public JSObject {
static void SetPrototype(Handle<JSFunction> function,
Handle<Object> value);
- // After prototype is removed, it will not be created when accessed, and
- // [[Construct]] from this function will not be allowed.
- bool RemovePrototype();
-
// Returns if this function has been compiled to native code yet.
inline bool is_compiled();
// [next_function_link]: Links functions into various lists, e.g. the list
- // of optimized functions hanging off the native_context. The CodeFlusher
- // uses this link to chain together flushing candidates. Treated weakly
+ // of optimized functions hanging off the native_context. Treated weakly
// by the garbage collector.
DECL_ACCESSORS(next_function_link, Object)
// Prints the name of the function using PrintF.
void PrintName(FILE* out = stdout);
- DECLARE_CAST(JSFunction)
+ DECL_CAST(JSFunction)
// Calculate the instance size and in-object properties count.
static void CalculateInstanceSizeForDerivedClass(
@@ -6165,30 +5251,17 @@ class JSFunction: public JSObject {
int requested_in_object_properties,
int* instance_size,
int* in_object_properties);
- // Visiting policy flags define whether the code entry or next function
- // should be visited or not.
- enum BodyVisitingPolicy {
- kVisitCodeEntry = 1 << 0,
- kVisitNextFunction = 1 << 1,
-
- kSkipCodeEntryAndNextFunction = 0,
- kVisitCodeEntryAndNextFunction = kVisitCodeEntry | kVisitNextFunction
- };
+ enum BodyVisitingPolicy { kIgnoreWeakness, kRespectWeakness };
// Iterates the function object according to the visiting policy.
template <BodyVisitingPolicy>
class BodyDescriptorImpl;
- // Visit the whole object.
- typedef BodyDescriptorImpl<kVisitCodeEntryAndNextFunction> BodyDescriptor;
-
- // Don't visit next function.
- typedef BodyDescriptorImpl<kVisitCodeEntry> BodyDescriptorStrongCode;
- typedef BodyDescriptorImpl<kSkipCodeEntryAndNextFunction>
- BodyDescriptorWeakCode;
+ typedef BodyDescriptorImpl<kIgnoreWeakness> BodyDescriptor;
+ typedef BodyDescriptorImpl<kRespectWeakness> BodyDescriptorWeak;
// Dispatched behavior.
- DECLARE_PRINTER(JSFunction)
- DECLARE_VERIFIER(JSFunction)
+ DECL_PRINTER(JSFunction)
+ DECL_VERIFIER(JSFunction)
// The function's name if it is configured, otherwise shared function info
// debug name.
@@ -6196,9 +5269,10 @@ class JSFunction: public JSObject {
// ES6 section 9.2.11 SetFunctionName
// Because of the way this abstract operation is used in the spec,
- // it should never fail.
- static void SetName(Handle<JSFunction> function, Handle<Name> name,
- Handle<String> prefix);
+ // it should never fail, but in practice it will fail if the generated
+ // function name's length exceeds String::kMaxLength.
+ static MUST_USE_RESULT bool SetName(Handle<JSFunction> function,
+ Handle<Name> name, Handle<String> prefix);
// The function's displayName if it is set, otherwise name if it is
// configured, otherwise shared function info
@@ -6244,15 +5318,15 @@ class JSGlobalProxy : public JSObject {
// [hash]: The hash code property (undefined if not initialized yet).
DECL_ACCESSORS(hash, Object)
- DECLARE_CAST(JSGlobalProxy)
+ DECL_CAST(JSGlobalProxy)
inline bool IsDetachedFrom(JSGlobalObject* global) const;
static int SizeWithEmbedderFields(int embedder_field_count);
// Dispatched behavior.
- DECLARE_PRINTER(JSGlobalProxy)
- DECLARE_VERIFIER(JSGlobalProxy)
+ DECL_PRINTER(JSGlobalProxy)
+ DECL_VERIFIER(JSGlobalProxy)
// Layout description.
static const int kNativeContextOffset = JSObject::kHeaderSize;
@@ -6273,6 +5347,9 @@ class JSGlobalObject : public JSObject {
// [global proxy]: the global proxy object of the context
DECL_ACCESSORS(global_proxy, JSObject)
+ // Gets global object properties.
+ inline GlobalDictionary* global_dictionary();
+ inline void set_global_dictionary(GlobalDictionary* dictionary);
static void InvalidatePropertyCell(Handle<JSGlobalObject> object,
Handle<Name> name);
@@ -6281,13 +5358,13 @@ class JSGlobalObject : public JSObject {
Handle<JSGlobalObject> global, Handle<Name> name,
PropertyCellType cell_type, int* entry_out = nullptr);
- DECLARE_CAST(JSGlobalObject)
+ DECL_CAST(JSGlobalObject)
inline bool IsDetached();
// Dispatched behavior.
- DECLARE_PRINTER(JSGlobalObject)
- DECLARE_VERIFIER(JSGlobalObject)
+ DECL_PRINTER(JSGlobalObject)
+ DECL_VERIFIER(JSGlobalObject)
// Layout description.
static const int kNativeContextOffset = JSObject::kHeaderSize;
@@ -6306,11 +5383,11 @@ class JSValue: public JSObject {
// [value]: the object being wrapped.
DECL_ACCESSORS(value, Object)
- DECLARE_CAST(JSValue)
+ DECL_CAST(JSValue)
// Dispatched behavior.
- DECLARE_PRINTER(JSValue)
- DECLARE_VERIFIER(JSValue)
+ DECL_PRINTER(JSValue)
+ DECL_VERIFIER(JSValue)
// Layout description.
static const int kValueOffset = JSObject::kHeaderSize;
@@ -6351,7 +5428,7 @@ class JSDate: public JSObject {
// moment when chached fields were cached.
DECL_ACCESSORS(cache_stamp, Object)
- DECLARE_CAST(JSDate)
+ DECL_CAST(JSDate)
// Returns the time value (UTC) identifying the current time.
static double CurrentTimeValue(Isolate* isolate);
@@ -6365,8 +5442,8 @@ class JSDate: public JSObject {
void SetValue(Object* value, bool is_value_nan);
// Dispatched behavior.
- DECLARE_PRINTER(JSDate)
- DECLARE_VERIFIER(JSDate)
+ DECL_PRINTER(JSDate)
+ DECL_VERIFIER(JSDate)
// The order is important. It must be kept in sync with date macros
// in macros.py.
@@ -6463,11 +5540,11 @@ class JSMessageObject: public JSObject {
inline int error_level() const;
inline void set_error_level(int level);
- DECLARE_CAST(JSMessageObject)
+ DECL_CAST(JSMessageObject)
// Dispatched behavior.
- DECLARE_PRINTER(JSMessageObject)
- DECLARE_VERIFIER(JSMessageObject)
+ DECL_PRINTER(JSMessageObject)
+ DECL_VERIFIER(JSMessageObject)
// Layout description.
static const int kTypeOffset = JSObject::kHeaderSize;
@@ -6482,6 +5559,8 @@ class JSMessageObject: public JSObject {
typedef FixedBodyDescriptor<HeapObject::kMapOffset,
kStackFramesOffset + kPointerSize,
kSize> BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
};
class JSPromise;
@@ -6490,9 +5569,9 @@ class JSPromise;
// JS
class JSPromiseCapability : public JSObject {
public:
- DECLARE_CAST(JSPromiseCapability)
+ DECL_CAST(JSPromiseCapability)
- DECLARE_VERIFIER(JSPromiseCapability)
+ DECL_VERIFIER(JSPromiseCapability)
DECL_ACCESSORS(promise, Object)
DECL_ACCESSORS(resolve, Object)
@@ -6549,11 +5628,11 @@ class JSPromise : public JSObject {
static const char* Status(int status);
- DECLARE_CAST(JSPromise)
+ DECL_CAST(JSPromise)
// Dispatched behavior.
- DECLARE_PRINTER(JSPromise)
- DECLARE_VERIFIER(JSPromise)
+ DECL_PRINTER(JSPromise)
+ DECL_VERIFIER(JSPromise)
// Layout description.
static const int kStatusOffset = JSObject::kHeaderSize;
@@ -6588,9 +5667,9 @@ class JSPromise : public JSObject {
// - a reference to a literal string to search for
// If it is an irregexp regexp:
// - a reference to code for Latin1 inputs (bytecode or compiled), or a smi
-// used for tracking the last usage (used for code flushing).
+// used for tracking the last usage (used for regexp code flushing).
// - a reference to code for UC16 inputs (bytecode or compiled), or a smi
-// used for tracking the last usage (used for code flushing)..
+// used for tracking the last usage (used for regexp code flushing).
// - max number of registers used by irregexp implementations.
// - number of capture registers (output values) of the regexp.
class JSRegExp: public JSObject {
@@ -6616,6 +5695,7 @@ class JSRegExp: public JSObject {
DECL_ACCESSORS(data, Object)
DECL_ACCESSORS(flags, Object)
+ DECL_ACCESSORS(last_index, Object)
DECL_ACCESSORS(source, Object)
V8_EXPORT_PRIVATE static MaybeHandle<JSRegExp> New(Handle<String> source,
@@ -6649,24 +5729,17 @@ class JSRegExp: public JSObject {
}
}
- static int saved_code_index(bool is_latin1) {
- if (is_latin1) {
- return kIrregexpLatin1CodeSavedIndex;
- } else {
- return kIrregexpUC16CodeSavedIndex;
- }
- }
-
- DECLARE_CAST(JSRegExp)
+ DECL_CAST(JSRegExp)
// Dispatched behavior.
- DECLARE_PRINTER(JSRegExp)
- DECLARE_VERIFIER(JSRegExp)
+ DECL_PRINTER(JSRegExp)
+ DECL_VERIFIER(JSRegExp)
static const int kDataOffset = JSObject::kHeaderSize;
static const int kSourceOffset = kDataOffset + kPointerSize;
static const int kFlagsOffset = kSourceOffset + kPointerSize;
static const int kSize = kFlagsOffset + kPointerSize;
+ static const int kLastIndexOffset = kSize; // In-object field.
// Indices in the data array.
static const int kTagIndex = 0;
@@ -6688,22 +5761,14 @@ class JSRegExp: public JSObject {
// fails, this fields hold an exception object that should be
// thrown if the regexp is used again.
static const int kIrregexpUC16CodeIndex = kDataIndex + 1;
-
- // Saved instance of Irregexp compiled code or bytecode for Latin1 that
- // is a potential candidate for flushing.
- static const int kIrregexpLatin1CodeSavedIndex = kDataIndex + 2;
- // Saved instance of Irregexp compiled code or bytecode for UC16 that is
- // a potential candidate for flushing.
- static const int kIrregexpUC16CodeSavedIndex = kDataIndex + 3;
-
// Maximal number of registers used by either Latin1 or UC16.
// Only used to check that there is enough stack space
- static const int kIrregexpMaxRegisterCountIndex = kDataIndex + 4;
+ static const int kIrregexpMaxRegisterCountIndex = kDataIndex + 2;
// Number of captures in the compiled regexp.
- static const int kIrregexpCaptureCountIndex = kDataIndex + 5;
+ static const int kIrregexpCaptureCountIndex = kDataIndex + 3;
// Maps names of named capture groups (at indices 2i) to their corresponding
// (1-based) capture group indices (at indices 2i + 1).
- static const int kIrregexpCaptureNameMapIndex = kDataIndex + 6;
+ static const int kIrregexpCaptureNameMapIndex = kDataIndex + 4;
static const int kIrregexpDataSize = kIrregexpCaptureNameMapIndex + 1;
@@ -6713,15 +5778,6 @@ class JSRegExp: public JSObject {
// The uninitialized value for a regexp code object.
static const int kUninitializedValue = -1;
-
- // The compilation error value for the regexp code object. The real error
- // object is in the saved code field.
- static const int kCompilationErrorValue = -2;
-
- // When we store the sweep generation at which we moved the code from the
- // code index to the saved code index we mask it of to be in the [0:255]
- // range.
- static const int kCodeAgeMask = 0xff;
};
DEFINE_OPERATORS_FOR_FLAGS(JSRegExp::Flags)
@@ -6745,7 +5801,7 @@ class TypeFeedbackInfo : public Tuple3 {
inline void set_inlined_type_change_checksum(int checksum);
inline bool matches_inlined_type_change_checksum(int checksum);
- DECLARE_CAST(TypeFeedbackInfo)
+ DECL_CAST(TypeFeedbackInfo)
static const int kStorage1Offset = kValue1Offset;
static const int kStorage2Offset = kValue2Offset;
@@ -6786,20 +5842,32 @@ class AllocationSite: public Struct {
const char* PretenureDecisionName(PretenureDecision decision);
- DECL_ACCESSORS(transition_info, Object)
+ // Contains either a Smi-encoded bitfield or a boilerplate. If it's a Smi the
+ // AllocationSite is for a constructed Array.
+ DECL_ACCESSORS(transition_info_or_boilerplate, Object)
+ DECL_ACCESSORS(boilerplate, JSObject)
+ DECL_INT_ACCESSORS(transition_info)
+
// nested_site threads a list of sites that represent nested literals
// walked in a particular order. So [[1, 2], 1, 2] will have one
// nested_site, but [[1, 2], 3, [4]] will have a list of two.
DECL_ACCESSORS(nested_site, Object)
+
+ // Bitfield containing pretenuring information.
DECL_INT_ACCESSORS(pretenure_data)
+
DECL_INT_ACCESSORS(pretenure_create_count)
DECL_ACCESSORS(dependent_code, DependentCode)
+
+ // heap->allocation_site_list() points to the last AllocationSite which form
+ // a linked list through the weak_next property. The GC might remove elements
+ // from the list by updateing weak_next.
DECL_ACCESSORS(weak_next, Object)
inline void Initialize();
// This method is expensive, it should only be called for reporting.
- bool IsNestedSite();
+ bool IsNested();
// transition_info bitfields, for constructed array transition info.
class ElementsKindBits: public BitField<ElementsKind, 0, 15> {};
@@ -6818,29 +5886,29 @@ class AllocationSite: public Struct {
inline void IncrementMementoCreateCount();
- PretenureFlag GetPretenureMode();
+ PretenureFlag GetPretenureMode() const;
void ResetPretenureDecision();
- inline PretenureDecision pretenure_decision();
+ inline PretenureDecision pretenure_decision() const;
inline void set_pretenure_decision(PretenureDecision decision);
- inline bool deopt_dependent_code();
+ inline bool deopt_dependent_code() const;
inline void set_deopt_dependent_code(bool deopt);
- inline int memento_found_count();
+ inline int memento_found_count() const;
inline void set_memento_found_count(int count);
- inline int memento_create_count();
+ inline int memento_create_count() const;
inline void set_memento_create_count(int count);
// The pretenuring decision is made during gc, and the zombie state allows
// us to recognize when an allocation site is just being kept alive because
// a later traversal of new space may discover AllocationMementos that point
// to this AllocationSite.
- inline bool IsZombie();
+ inline bool IsZombie() const;
- inline bool IsMaybeTenure();
+ inline bool IsMaybeTenure() const;
inline void MarkZombie();
@@ -6850,30 +5918,30 @@ class AllocationSite: public Struct {
inline bool DigestPretenuringFeedback(bool maximum_size_scavenge);
- inline ElementsKind GetElementsKind();
+ inline ElementsKind GetElementsKind() const;
inline void SetElementsKind(ElementsKind kind);
- inline bool CanInlineCall();
+ inline bool CanInlineCall() const;
inline void SetDoNotInlineCall();
- inline bool SitePointsToLiteral();
+ inline bool PointsToLiteral() const;
template <AllocationSiteUpdateMode update_or_check =
AllocationSiteUpdateMode::kUpdate>
static bool DigestTransitionFeedback(Handle<AllocationSite> site,
ElementsKind to_kind);
- DECLARE_PRINTER(AllocationSite)
- DECLARE_VERIFIER(AllocationSite)
+ DECL_PRINTER(AllocationSite)
+ DECL_VERIFIER(AllocationSite)
- DECLARE_CAST(AllocationSite)
- static inline AllocationSiteMode GetMode(
- ElementsKind boilerplate_elements_kind);
- static AllocationSiteMode GetMode(ElementsKind from, ElementsKind to);
+ DECL_CAST(AllocationSite)
+ static inline bool ShouldTrack(ElementsKind boilerplate_elements_kind);
+ static bool ShouldTrack(ElementsKind from, ElementsKind to);
static inline bool CanTrack(InstanceType type);
- static const int kTransitionInfoOffset = HeapObject::kHeaderSize;
- static const int kNestedSiteOffset = kTransitionInfoOffset + kPointerSize;
+ static const int kTransitionInfoOrBoilerplateOffset = HeapObject::kHeaderSize;
+ static const int kNestedSiteOffset =
+ kTransitionInfoOrBoilerplateOffset + kPointerSize;
static const int kPretenureDataOffset = kNestedSiteOffset + kPointerSize;
static const int kPretenureCreateCountOffset =
kPretenureDataOffset + kPointerSize;
@@ -6884,19 +5952,21 @@ class AllocationSite: public Struct {
// During mark compact we need to take special care for the dependent code
// field.
- static const int kPointerFieldsBeginOffset = kTransitionInfoOffset;
+ static const int kPointerFieldsBeginOffset =
+ kTransitionInfoOrBoilerplateOffset;
static const int kPointerFieldsEndOffset = kWeakNextOffset;
- typedef FixedBodyDescriptor<kPointerFieldsBeginOffset,
- kPointerFieldsEndOffset, kSize>
- MarkingBodyDescriptor;
-
- // For other visitors, use the fixed body descriptor below.
+ // Ignores weakness.
typedef FixedBodyDescriptor<HeapObject::kHeaderSize, kSize, kSize>
BodyDescriptor;
+ // Respects weakness.
+ typedef FixedBodyDescriptor<kPointerFieldsBeginOffset,
+ kPointerFieldsEndOffset, kSize>
+ BodyDescriptorWeak;
+
private:
- inline bool PretenuringDecisionMade();
+ inline bool PretenuringDecisionMade() const;
DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationSite);
};
@@ -6909,973 +5979,20 @@ class AllocationMemento: public Struct {
DECL_ACCESSORS(allocation_site, Object)
- inline bool IsValid();
- inline AllocationSite* GetAllocationSite();
- inline Address GetAllocationSiteUnchecked();
+ inline bool IsValid() const;
+ inline AllocationSite* GetAllocationSite() const;
+ inline Address GetAllocationSiteUnchecked() const;
- DECLARE_PRINTER(AllocationMemento)
- DECLARE_VERIFIER(AllocationMemento)
+ DECL_PRINTER(AllocationMemento)
+ DECL_VERIFIER(AllocationMemento)
- DECLARE_CAST(AllocationMemento)
+ DECL_CAST(AllocationMemento)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationMemento);
};
-// Representation of a slow alias as part of a sloppy arguments objects.
-// For fast aliases (if HasSloppyArgumentsElements()):
-// - the parameter map contains an index into the context
-// - all attributes of the element have default values
-// For slow aliases (if HasDictionaryArgumentsElements()):
-// - the parameter map contains no fast alias mapping (i.e. the hole)
-// - this struct (in the slow backing store) contains an index into the context
-// - all attributes are available as part if the property details
-class AliasedArgumentsEntry: public Struct {
- public:
- inline int aliased_context_slot() const;
- inline void set_aliased_context_slot(int count);
-
- DECLARE_CAST(AliasedArgumentsEntry)
-
- // Dispatched behavior.
- DECLARE_PRINTER(AliasedArgumentsEntry)
- DECLARE_VERIFIER(AliasedArgumentsEntry)
-
- static const int kAliasedContextSlot = HeapObject::kHeaderSize;
- static const int kSize = kAliasedContextSlot + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(AliasedArgumentsEntry);
-};
-
-
-enum AllowNullsFlag {ALLOW_NULLS, DISALLOW_NULLS};
-enum RobustnessFlag {ROBUST_STRING_TRAVERSAL, FAST_STRING_TRAVERSAL};
-
-// The characteristics of a string are stored in its map. Retrieving these
-// few bits of information is moderately expensive, involving two memory
-// loads where the second is dependent on the first. To improve efficiency
-// the shape of the string is given its own class so that it can be retrieved
-// once and used for several string operations. A StringShape is small enough
-// to be passed by value and is immutable, but be aware that flattening a
-// string can potentially alter its shape. Also be aware that a GC caused by
-// something else can alter the shape of a string due to ConsString
-// shortcutting. Keeping these restrictions in mind has proven to be error-
-// prone and so we no longer put StringShapes in variables unless there is a
-// concrete performance benefit at that particular point in the code.
-class StringShape BASE_EMBEDDED {
- public:
- inline explicit StringShape(const String* s);
- inline explicit StringShape(Map* s);
- inline explicit StringShape(InstanceType t);
- inline bool IsSequential();
- inline bool IsExternal();
- inline bool IsCons();
- inline bool IsSliced();
- inline bool IsThin();
- inline bool IsIndirect();
- inline bool IsExternalOneByte();
- inline bool IsExternalTwoByte();
- inline bool IsSequentialOneByte();
- inline bool IsSequentialTwoByte();
- inline bool IsInternalized();
- inline StringRepresentationTag representation_tag();
- inline uint32_t encoding_tag();
- inline uint32_t full_representation_tag();
- inline bool HasOnlyOneByteChars();
-#ifdef DEBUG
- inline uint32_t type() { return type_; }
- inline void invalidate() { valid_ = false; }
- inline bool valid() { return valid_; }
-#else
- inline void invalidate() { }
-#endif
-
- private:
- uint32_t type_;
-#ifdef DEBUG
- inline void set_valid() { valid_ = true; }
- bool valid_;
-#else
- inline void set_valid() { }
-#endif
-};
-
-
-// The Name abstract class captures anything that can be used as a property
-// name, i.e., strings and symbols. All names store a hash value.
-class Name: public HeapObject {
- public:
- // Get and set the hash field of the name.
- inline uint32_t hash_field();
- inline void set_hash_field(uint32_t value);
-
- // Tells whether the hash code has been computed.
- inline bool HasHashCode();
-
- // Returns a hash value used for the property table
- inline uint32_t Hash();
-
- // Equality operations.
- inline bool Equals(Name* other);
- inline static bool Equals(Handle<Name> one, Handle<Name> two);
-
- // Conversion.
- inline bool AsArrayIndex(uint32_t* index);
-
- // If the name is private, it can only name own properties.
- inline bool IsPrivate();
-
- inline bool IsUniqueName() const;
-
- // Return a string version of this name that is converted according to the
- // rules described in ES6 section 9.2.11.
- MUST_USE_RESULT static MaybeHandle<String> ToFunctionName(Handle<Name> name);
- MUST_USE_RESULT static MaybeHandle<String> ToFunctionName(
- Handle<Name> name, Handle<String> prefix);
-
- DECLARE_CAST(Name)
-
- DECLARE_PRINTER(Name)
-#if V8_TRACE_MAPS
- void NameShortPrint();
- int NameShortPrint(Vector<char> str);
-#endif
-
- // Layout description.
- static const int kHashFieldSlot = HeapObject::kHeaderSize;
-#if V8_TARGET_LITTLE_ENDIAN || !V8_HOST_ARCH_64_BIT
- static const int kHashFieldOffset = kHashFieldSlot;
-#else
- static const int kHashFieldOffset = kHashFieldSlot + kIntSize;
-#endif
- static const int kSize = kHashFieldSlot + kPointerSize;
-
- // Mask constant for checking if a name has a computed hash code
- // and if it is a string that is an array index. The least significant bit
- // indicates whether a hash code has been computed. If the hash code has
- // been computed the 2nd bit tells whether the string can be used as an
- // array index.
- static const int kHashNotComputedMask = 1;
- static const int kIsNotArrayIndexMask = 1 << 1;
- static const int kNofHashBitFields = 2;
-
- // Shift constant retrieving hash code from hash field.
- static const int kHashShift = kNofHashBitFields;
-
- // Only these bits are relevant in the hash, since the top two are shifted
- // out.
- static const uint32_t kHashBitMask = 0xffffffffu >> kHashShift;
-
- // Array index strings this short can keep their index in the hash field.
- static const int kMaxCachedArrayIndexLength = 7;
-
- // Maximum number of characters to consider when trying to convert a string
- // value into an array index.
- static const int kMaxArrayIndexSize = 10;
-
- // For strings which are array indexes the hash value has the string length
- // mixed into the hash, mainly to avoid a hash value of zero which would be
- // the case for the string '0'. 24 bits are used for the array index value.
- static const int kArrayIndexValueBits = 24;
- static const int kArrayIndexLengthBits =
- kBitsPerInt - kArrayIndexValueBits - kNofHashBitFields;
-
- STATIC_ASSERT(kArrayIndexLengthBits > 0);
- STATIC_ASSERT(kMaxArrayIndexSize < (1 << kArrayIndexLengthBits));
-
- class ArrayIndexValueBits : public BitField<unsigned int, kNofHashBitFields,
- kArrayIndexValueBits> {}; // NOLINT
- class ArrayIndexLengthBits : public BitField<unsigned int,
- kNofHashBitFields + kArrayIndexValueBits,
- kArrayIndexLengthBits> {}; // NOLINT
-
- // Check that kMaxCachedArrayIndexLength + 1 is a power of two so we
- // could use a mask to test if the length of string is less than or equal to
- // kMaxCachedArrayIndexLength.
- STATIC_ASSERT(IS_POWER_OF_TWO(kMaxCachedArrayIndexLength + 1));
-
- static const unsigned int kContainsCachedArrayIndexMask =
- (~static_cast<unsigned>(kMaxCachedArrayIndexLength)
- << ArrayIndexLengthBits::kShift) |
- kIsNotArrayIndexMask;
-
- // Value of empty hash field indicating that the hash is not computed.
- static const int kEmptyHashField =
- kIsNotArrayIndexMask | kHashNotComputedMask;
-
- protected:
- static inline bool IsHashFieldComputed(uint32_t field);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(Name);
-};
-
-
-// ES6 symbols.
-class Symbol: public Name {
- public:
- // [name]: The print name of a symbol, or undefined if none.
- DECL_ACCESSORS(name, Object)
-
- DECL_INT_ACCESSORS(flags)
-
- // [is_private]: Whether this is a private symbol. Private symbols can only
- // be used to designate own properties of objects.
- DECL_BOOLEAN_ACCESSORS(is_private)
-
- // [is_well_known_symbol]: Whether this is a spec-defined well-known symbol,
- // or not. Well-known symbols do not throw when an access check fails during
- // a load.
- DECL_BOOLEAN_ACCESSORS(is_well_known_symbol)
-
- // [is_public]: Whether this is a symbol created by Symbol.for. Calling
- // Symbol.keyFor on such a symbol simply needs to return the attached name.
- DECL_BOOLEAN_ACCESSORS(is_public)
-
- DECLARE_CAST(Symbol)
-
- // Dispatched behavior.
- DECLARE_PRINTER(Symbol)
- DECLARE_VERIFIER(Symbol)
-
- // Layout description.
- static const int kNameOffset = Name::kSize;
- static const int kFlagsOffset = kNameOffset + kPointerSize;
- static const int kSize = kFlagsOffset + kPointerSize;
-
- // Flags layout.
- static const int kPrivateBit = 0;
- static const int kWellKnownSymbolBit = 1;
- static const int kPublicBit = 2;
-
- typedef FixedBodyDescriptor<kNameOffset, kFlagsOffset, kSize> BodyDescriptor;
-
- void SymbolShortPrint(std::ostream& os);
-
- private:
- const char* PrivateSymbolToName() const;
-
-#if V8_TRACE_MAPS
- friend class Name; // For PrivateSymbolToName.
-#endif
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(Symbol);
-};
-
-
-class ConsString;
-
-// The String abstract class captures JavaScript string values:
-//
-// Ecma-262:
-// 4.3.16 String Value
-// A string value is a member of the type String and is a finite
-// ordered sequence of zero or more 16-bit unsigned integer values.
-//
-// All string values have a length field.
-class String: public Name {
- public:
- enum Encoding { ONE_BYTE_ENCODING, TWO_BYTE_ENCODING };
-
- class SubStringRange {
- public:
- explicit inline SubStringRange(String* string, int first = 0,
- int length = -1);
- class iterator;
- inline iterator begin();
- inline iterator end();
-
- private:
- String* string_;
- int first_;
- int length_;
- };
-
- // Representation of the flat content of a String.
- // A non-flat string doesn't have flat content.
- // A flat string has content that's encoded as a sequence of either
- // one-byte chars or two-byte UC16.
- // Returned by String::GetFlatContent().
- class FlatContent {
- public:
- // Returns true if the string is flat and this structure contains content.
- bool IsFlat() const { return state_ != NON_FLAT; }
- // Returns true if the structure contains one-byte content.
- bool IsOneByte() const { return state_ == ONE_BYTE; }
- // Returns true if the structure contains two-byte content.
- bool IsTwoByte() const { return state_ == TWO_BYTE; }
-
- // Return the one byte content of the string. Only use if IsOneByte()
- // returns true.
- Vector<const uint8_t> ToOneByteVector() const {
- DCHECK_EQ(ONE_BYTE, state_);
- return Vector<const uint8_t>(onebyte_start, length_);
- }
- // Return the two-byte content of the string. Only use if IsTwoByte()
- // returns true.
- Vector<const uc16> ToUC16Vector() const {
- DCHECK_EQ(TWO_BYTE, state_);
- return Vector<const uc16>(twobyte_start, length_);
- }
-
- uc16 Get(int i) const {
- DCHECK(i < length_);
- DCHECK(state_ != NON_FLAT);
- if (state_ == ONE_BYTE) return onebyte_start[i];
- return twobyte_start[i];
- }
-
- bool UsesSameString(const FlatContent& other) const {
- return onebyte_start == other.onebyte_start;
- }
-
- private:
- enum State { NON_FLAT, ONE_BYTE, TWO_BYTE };
-
- // Constructors only used by String::GetFlatContent().
- explicit FlatContent(const uint8_t* start, int length)
- : onebyte_start(start), length_(length), state_(ONE_BYTE) {}
- explicit FlatContent(const uc16* start, int length)
- : twobyte_start(start), length_(length), state_(TWO_BYTE) { }
- FlatContent() : onebyte_start(NULL), length_(0), state_(NON_FLAT) { }
-
- union {
- const uint8_t* onebyte_start;
- const uc16* twobyte_start;
- };
- int length_;
- State state_;
-
- friend class String;
- friend class IterableSubString;
- };
-
- template <typename Char>
- INLINE(Vector<const Char> GetCharVector());
-
- // Get and set the length of the string.
- inline int length() const;
- inline void set_length(int value);
-
- // Get and set the length of the string using acquire loads and release
- // stores.
- inline int synchronized_length() const;
- inline void synchronized_set_length(int value);
-
- // Returns whether this string has only one-byte chars, i.e. all of them can
- // be one-byte encoded. This might be the case even if the string is
- // two-byte. Such strings may appear when the embedder prefers
- // two-byte external representations even for one-byte data.
- inline bool IsOneByteRepresentation() const;
- inline bool IsTwoByteRepresentation() const;
-
- // Cons and slices have an encoding flag that may not represent the actual
- // encoding of the underlying string. This is taken into account here.
- // Requires: this->IsFlat()
- inline bool IsOneByteRepresentationUnderneath();
- inline bool IsTwoByteRepresentationUnderneath();
-
- // NOTE: this should be considered only a hint. False negatives are
- // possible.
- inline bool HasOnlyOneByteChars();
-
- // Get and set individual two byte chars in the string.
- inline void Set(int index, uint16_t value);
- // Get individual two byte char in the string. Repeated calls
- // to this method are not efficient unless the string is flat.
- INLINE(uint16_t Get(int index));
-
- // ES6 section 7.1.3.1 ToNumber Applied to the String Type
- static Handle<Object> ToNumber(Handle<String> subject);
-
- // Flattens the string. Checks first inline to see if it is
- // necessary. Does nothing if the string is not a cons string.
- // Flattening allocates a sequential string with the same data as
- // the given string and mutates the cons string to a degenerate
- // form, where the first component is the new sequential string and
- // the second component is the empty string. If allocation fails,
- // this function returns a failure. If flattening succeeds, this
- // function returns the sequential string that is now the first
- // component of the cons string.
- //
- // Degenerate cons strings are handled specially by the garbage
- // collector (see IsShortcutCandidate).
-
- static inline Handle<String> Flatten(Handle<String> string,
- PretenureFlag pretenure = NOT_TENURED);
-
- // Tries to return the content of a flat string as a structure holding either
- // a flat vector of char or of uc16.
- // If the string isn't flat, and therefore doesn't have flat content, the
- // returned structure will report so, and can't provide a vector of either
- // kind.
- FlatContent GetFlatContent();
-
- // Returns the parent of a sliced string or first part of a flat cons string.
- // Requires: StringShape(this).IsIndirect() && this->IsFlat()
- inline String* GetUnderlying();
-
- // String relational comparison, implemented according to ES6 section 7.2.11
- // Abstract Relational Comparison (step 5): The comparison of Strings uses a
- // simple lexicographic ordering on sequences of code unit values. There is no
- // attempt to use the more complex, semantically oriented definitions of
- // character or string equality and collating order defined in the Unicode
- // specification. Therefore String values that are canonically equal according
- // to the Unicode standard could test as unequal. In effect this algorithm
- // assumes that both Strings are already in normalized form. Also, note that
- // for strings containing supplementary characters, lexicographic ordering on
- // sequences of UTF-16 code unit values differs from that on sequences of code
- // point values.
- MUST_USE_RESULT static ComparisonResult Compare(Handle<String> x,
- Handle<String> y);
-
- // Perform ES6 21.1.3.8, including checking arguments.
- static Object* IndexOf(Isolate* isolate, Handle<Object> receiver,
- Handle<Object> search, Handle<Object> position);
- // Perform string match of pattern on subject, starting at start index.
- // Caller must ensure that 0 <= start_index <= sub->length(), as this does not
- // check any arguments.
- static int IndexOf(Isolate* isolate, Handle<String> receiver,
- Handle<String> search, int start_index);
-
- static Object* LastIndexOf(Isolate* isolate, Handle<Object> receiver,
- Handle<Object> search, Handle<Object> position);
-
- // Encapsulates logic related to a match and its capture groups as required
- // by GetSubstitution.
- class Match {
- public:
- virtual Handle<String> GetMatch() = 0;
- virtual Handle<String> GetPrefix() = 0;
- virtual Handle<String> GetSuffix() = 0;
-
- // A named capture can be invalid (if it is not specified in the pattern),
- // unmatched (specified but not matched in the current string), and matched.
- enum CaptureState { INVALID, UNMATCHED, MATCHED };
-
- virtual int CaptureCount() = 0;
- virtual bool HasNamedCaptures() = 0;
- virtual MaybeHandle<String> GetCapture(int i, bool* capture_exists) = 0;
- virtual MaybeHandle<String> GetNamedCapture(Handle<String> name,
- CaptureState* state) = 0;
-
- virtual ~Match() {}
- };
-
- // ES#sec-getsubstitution
- // GetSubstitution(matched, str, position, captures, replacement)
- // Expand the $-expressions in the string and return a new string with
- // the result.
- // A {start_index} can be passed to specify where to start scanning the
- // replacement string.
- MUST_USE_RESULT static MaybeHandle<String> GetSubstitution(
- Isolate* isolate, Match* match, Handle<String> replacement,
- int start_index = 0);
-
- // String equality operations.
- inline bool Equals(String* other);
- inline static bool Equals(Handle<String> one, Handle<String> two);
- bool IsUtf8EqualTo(Vector<const char> str, bool allow_prefix_match = false);
-
- // Dispatches to Is{One,Two}ByteEqualTo.
- template <typename Char>
- bool IsEqualTo(Vector<const Char> str);
-
- bool IsOneByteEqualTo(Vector<const uint8_t> str);
- bool IsTwoByteEqualTo(Vector<const uc16> str);
-
- // Return a UTF8 representation of the string. The string is null
- // terminated but may optionally contain nulls. Length is returned
- // in length_output if length_output is not a null pointer The string
- // should be nearly flat, otherwise the performance of this method may
- // be very slow (quadratic in the length). Setting robustness_flag to
- // ROBUST_STRING_TRAVERSAL invokes behaviour that is robust This means it
- // handles unexpected data without causing assert failures and it does not
- // do any heap allocations. This is useful when printing stack traces.
- std::unique_ptr<char[]> ToCString(AllowNullsFlag allow_nulls,
- RobustnessFlag robustness_flag, int offset,
- int length, int* length_output = 0);
- std::unique_ptr<char[]> ToCString(
- AllowNullsFlag allow_nulls = DISALLOW_NULLS,
- RobustnessFlag robustness_flag = FAST_STRING_TRAVERSAL,
- int* length_output = 0);
-
- bool ComputeArrayIndex(uint32_t* index);
-
- // Externalization.
- bool MakeExternal(v8::String::ExternalStringResource* resource);
- bool MakeExternal(v8::String::ExternalOneByteStringResource* resource);
-
- // Conversion.
- inline bool AsArrayIndex(uint32_t* index);
- uint32_t inline ToValidIndex(Object* number);
-
- // Trimming.
- enum TrimMode { kTrim, kTrimLeft, kTrimRight };
- static Handle<String> Trim(Handle<String> string, TrimMode mode);
-
- DECLARE_CAST(String)
-
- void PrintOn(FILE* out);
-
- // For use during stack traces. Performs rudimentary sanity check.
- bool LooksValid();
-
- // Dispatched behavior.
- void StringShortPrint(StringStream* accumulator, bool show_details = true);
- void PrintUC16(std::ostream& os, int start = 0, int end = -1); // NOLINT
-#if defined(DEBUG) || defined(OBJECT_PRINT)
- char* ToAsciiArray();
-#endif
- DECLARE_PRINTER(String)
- DECLARE_VERIFIER(String)
-
- inline bool IsFlat();
-
- // Layout description.
- static const int kLengthOffset = Name::kSize;
- static const int kSize = kLengthOffset + kPointerSize;
-
- // Max char codes.
- static const int32_t kMaxOneByteCharCode = unibrow::Latin1::kMaxChar;
- static const uint32_t kMaxOneByteCharCodeU = unibrow::Latin1::kMaxChar;
- static const int kMaxUtf16CodeUnit = 0xffff;
- static const uint32_t kMaxUtf16CodeUnitU = kMaxUtf16CodeUnit;
- static const uc32 kMaxCodePoint = 0x10ffff;
-
- // Maximal string length.
- static const int kMaxLength = (1 << 28) - 16;
-
- // Max length for computing hash. For strings longer than this limit the
- // string length is used as the hash value.
- static const int kMaxHashCalcLength = 16383;
-
- // Limit for truncation in short printing.
- static const int kMaxShortPrintLength = 1024;
-
- // Support for regular expressions.
- const uc16* GetTwoByteData(unsigned start);
-
- // Helper function for flattening strings.
- template <typename sinkchar>
- static void WriteToFlat(String* source,
- sinkchar* sink,
- int from,
- int to);
-
- // The return value may point to the first aligned word containing the first
- // non-one-byte character, rather than directly to the non-one-byte character.
- // If the return value is >= the passed length, the entire string was
- // one-byte.
- static inline int NonAsciiStart(const char* chars, int length) {
- const char* start = chars;
- const char* limit = chars + length;
-
- if (length >= kIntptrSize) {
- // Check unaligned bytes.
- while (!IsAligned(reinterpret_cast<intptr_t>(chars), sizeof(uintptr_t))) {
- if (static_cast<uint8_t>(*chars) > unibrow::Utf8::kMaxOneByteChar) {
- return static_cast<int>(chars - start);
- }
- ++chars;
- }
- // Check aligned words.
- DCHECK(unibrow::Utf8::kMaxOneByteChar == 0x7F);
- const uintptr_t non_one_byte_mask = kUintptrAllBitsSet / 0xFF * 0x80;
- while (chars + sizeof(uintptr_t) <= limit) {
- if (*reinterpret_cast<const uintptr_t*>(chars) & non_one_byte_mask) {
- return static_cast<int>(chars - start);
- }
- chars += sizeof(uintptr_t);
- }
- }
- // Check remaining unaligned bytes.
- while (chars < limit) {
- if (static_cast<uint8_t>(*chars) > unibrow::Utf8::kMaxOneByteChar) {
- return static_cast<int>(chars - start);
- }
- ++chars;
- }
-
- return static_cast<int>(chars - start);
- }
-
- static inline bool IsAscii(const char* chars, int length) {
- return NonAsciiStart(chars, length) >= length;
- }
-
- static inline bool IsAscii(const uint8_t* chars, int length) {
- return
- NonAsciiStart(reinterpret_cast<const char*>(chars), length) >= length;
- }
-
- static inline int NonOneByteStart(const uc16* chars, int length) {
- const uc16* limit = chars + length;
- const uc16* start = chars;
- while (chars < limit) {
- if (*chars > kMaxOneByteCharCodeU) return static_cast<int>(chars - start);
- ++chars;
- }
- return static_cast<int>(chars - start);
- }
-
- static inline bool IsOneByte(const uc16* chars, int length) {
- return NonOneByteStart(chars, length) >= length;
- }
-
- template<class Visitor>
- static inline ConsString* VisitFlat(Visitor* visitor,
- String* string,
- int offset = 0);
-
- static Handle<FixedArray> CalculateLineEnds(Handle<String> string,
- bool include_ending_line);
-
- // Use the hash field to forward to the canonical internalized string
- // when deserializing an internalized string.
- inline void SetForwardedInternalizedString(String* string);
- inline String* GetForwardedInternalizedString();
-
- private:
- friend class Name;
- friend class StringTableInsertionKey;
-
- static Handle<String> SlowFlatten(Handle<ConsString> cons,
- PretenureFlag tenure);
-
- // Slow case of String::Equals. This implementation works on any strings
- // but it is most efficient on strings that are almost flat.
- bool SlowEquals(String* other);
-
- static bool SlowEquals(Handle<String> one, Handle<String> two);
-
- // Slow case of AsArrayIndex.
- V8_EXPORT_PRIVATE bool SlowAsArrayIndex(uint32_t* index);
-
- // Compute and set the hash code.
- uint32_t ComputeAndSetHash();
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(String);
-};
-
-
-// The SeqString abstract class captures sequential string values.
-class SeqString: public String {
- public:
- DECLARE_CAST(SeqString)
-
- // Layout description.
- static const int kHeaderSize = String::kSize;
-
- // Truncate the string in-place if possible and return the result.
- // In case of new_length == 0, the empty string is returned without
- // truncating the original string.
- MUST_USE_RESULT static Handle<String> Truncate(Handle<SeqString> string,
- int new_length);
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(SeqString);
-};
-
-
-// The OneByteString class captures sequential one-byte string objects.
-// Each character in the OneByteString is an one-byte character.
-class SeqOneByteString: public SeqString {
- public:
- static const bool kHasOneByteEncoding = true;
-
- // Dispatched behavior.
- inline uint16_t SeqOneByteStringGet(int index);
- inline void SeqOneByteStringSet(int index, uint16_t value);
-
- // Get the address of the characters in this string.
- inline Address GetCharsAddress();
-
- inline uint8_t* GetChars();
-
- DECLARE_CAST(SeqOneByteString)
-
- // Garbage collection support. This method is called by the
- // garbage collector to compute the actual size of an OneByteString
- // instance.
- inline int SeqOneByteStringSize(InstanceType instance_type);
-
- // Computes the size for an OneByteString instance of a given length.
- static int SizeFor(int length) {
- return OBJECT_POINTER_ALIGN(kHeaderSize + length * kCharSize);
- }
-
- // Maximal memory usage for a single sequential one-byte string.
- static const int kMaxSize = 512 * MB - 1;
- STATIC_ASSERT((kMaxSize - kHeaderSize) >= String::kMaxLength);
-
- class BodyDescriptor;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(SeqOneByteString);
-};
-
-
-// The TwoByteString class captures sequential unicode string objects.
-// Each character in the TwoByteString is a two-byte uint16_t.
-class SeqTwoByteString: public SeqString {
- public:
- static const bool kHasOneByteEncoding = false;
-
- // Dispatched behavior.
- inline uint16_t SeqTwoByteStringGet(int index);
- inline void SeqTwoByteStringSet(int index, uint16_t value);
-
- // Get the address of the characters in this string.
- inline Address GetCharsAddress();
-
- inline uc16* GetChars();
-
- // For regexp code.
- const uint16_t* SeqTwoByteStringGetData(unsigned start);
-
- DECLARE_CAST(SeqTwoByteString)
-
- // Garbage collection support. This method is called by the
- // garbage collector to compute the actual size of a TwoByteString
- // instance.
- inline int SeqTwoByteStringSize(InstanceType instance_type);
-
- // Computes the size for a TwoByteString instance of a given length.
- static int SizeFor(int length) {
- return OBJECT_POINTER_ALIGN(kHeaderSize + length * kShortSize);
- }
-
- // Maximal memory usage for a single sequential two-byte string.
- static const int kMaxSize = 512 * MB - 1;
- STATIC_ASSERT(static_cast<int>((kMaxSize - kHeaderSize)/sizeof(uint16_t)) >=
- String::kMaxLength);
-
- class BodyDescriptor;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(SeqTwoByteString);
-};
-
-
-// The ConsString class describes string values built by using the
-// addition operator on strings. A ConsString is a pair where the
-// first and second components are pointers to other string values.
-// One or both components of a ConsString can be pointers to other
-// ConsStrings, creating a binary tree of ConsStrings where the leaves
-// are non-ConsString string values. The string value represented by
-// a ConsString can be obtained by concatenating the leaf string
-// values in a left-to-right depth-first traversal of the tree.
-class ConsString: public String {
- public:
- // First string of the cons cell.
- inline String* first();
- // Doesn't check that the result is a string, even in debug mode. This is
- // useful during GC where the mark bits confuse the checks.
- inline Object* unchecked_first();
- inline void set_first(String* first,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-
- // Second string of the cons cell.
- inline String* second();
- // Doesn't check that the result is a string, even in debug mode. This is
- // useful during GC where the mark bits confuse the checks.
- inline Object* unchecked_second();
- inline void set_second(String* second,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-
- // Dispatched behavior.
- V8_EXPORT_PRIVATE uint16_t ConsStringGet(int index);
-
- DECLARE_CAST(ConsString)
-
- // Layout description.
- static const int kFirstOffset = POINTER_SIZE_ALIGN(String::kSize);
- static const int kSecondOffset = kFirstOffset + kPointerSize;
- static const int kSize = kSecondOffset + kPointerSize;
-
- // Minimum length for a cons string.
- static const int kMinLength = 13;
-
- typedef FixedBodyDescriptor<kFirstOffset, kSecondOffset + kPointerSize, kSize>
- BodyDescriptor;
-
- DECLARE_VERIFIER(ConsString)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ConsString);
-};
-
-// The ThinString class describes string objects that are just references
-// to another string object. They are used for in-place internalization when
-// the original string cannot actually be internalized in-place: in these
-// cases, the original string is converted to a ThinString pointing at its
-// internalized version (which is allocated as a new object).
-// In terms of memory layout and most algorithms operating on strings,
-// ThinStrings can be thought of as "one-part cons strings".
-class ThinString : public String {
- public:
- // Actual string that this ThinString refers to.
- inline String* actual() const;
- inline void set_actual(String* s,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-
- V8_EXPORT_PRIVATE uint16_t ThinStringGet(int index);
-
- DECLARE_CAST(ThinString)
- DECLARE_VERIFIER(ThinString)
-
- // Layout description.
- static const int kActualOffset = String::kSize;
- static const int kSize = kActualOffset + kPointerSize;
-
- typedef FixedBodyDescriptor<kActualOffset, kSize, kSize> BodyDescriptor;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(ThinString);
-};
-
-// The Sliced String class describes strings that are substrings of another
-// sequential string. The motivation is to save time and memory when creating
-// a substring. A Sliced String is described as a pointer to the parent,
-// the offset from the start of the parent string and the length. Using
-// a Sliced String therefore requires unpacking of the parent string and
-// adding the offset to the start address. A substring of a Sliced String
-// are not nested since the double indirection is simplified when creating
-// such a substring.
-// Currently missing features are:
-// - handling externalized parent strings
-// - external strings as parent
-// - truncating sliced string to enable otherwise unneeded parent to be GC'ed.
-class SlicedString: public String {
- public:
- inline String* parent();
- inline void set_parent(String* parent,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- inline int offset() const;
- inline void set_offset(int offset);
-
- // Dispatched behavior.
- V8_EXPORT_PRIVATE uint16_t SlicedStringGet(int index);
-
- DECLARE_CAST(SlicedString)
-
- // Layout description.
- static const int kParentOffset = POINTER_SIZE_ALIGN(String::kSize);
- static const int kOffsetOffset = kParentOffset + kPointerSize;
- static const int kSize = kOffsetOffset + kPointerSize;
-
- // Minimum length for a sliced string.
- static const int kMinLength = 13;
-
- typedef FixedBodyDescriptor<kParentOffset,
- kOffsetOffset + kPointerSize, kSize>
- BodyDescriptor;
-
- DECLARE_VERIFIER(SlicedString)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(SlicedString);
-};
-
-
-// The ExternalString class describes string values that are backed by
-// a string resource that lies outside the V8 heap. ExternalStrings
-// consist of the length field common to all strings, a pointer to the
-// external resource. It is important to ensure (externally) that the
-// resource is not deallocated while the ExternalString is live in the
-// V8 heap.
-//
-// The API expects that all ExternalStrings are created through the
-// API. Therefore, ExternalStrings should not be used internally.
-class ExternalString: public String {
- public:
- DECLARE_CAST(ExternalString)
-
- // Layout description.
- static const int kResourceOffset = POINTER_SIZE_ALIGN(String::kSize);
- static const int kShortSize = kResourceOffset + kPointerSize;
- static const int kResourceDataOffset = kResourceOffset + kPointerSize;
- static const int kSize = kResourceDataOffset + kPointerSize;
-
- // Return whether external string is short (data pointer is not cached).
- inline bool is_short();
-
- STATIC_ASSERT(kResourceOffset == Internals::kStringResourceOffset);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalString);
-};
-
-
-// The ExternalOneByteString class is an external string backed by an
-// one-byte string.
-class ExternalOneByteString : public ExternalString {
- public:
- static const bool kHasOneByteEncoding = true;
-
- typedef v8::String::ExternalOneByteStringResource Resource;
-
- // The underlying resource.
- inline const Resource* resource();
- inline void set_resource(const Resource* buffer);
-
- // Update the pointer cache to the external character array.
- // The cached pointer is always valid, as the external character array does =
- // not move during lifetime. Deserialization is the only exception, after
- // which the pointer cache has to be refreshed.
- inline void update_data_cache();
-
- inline const uint8_t* GetChars();
-
- // Dispatched behavior.
- inline uint16_t ExternalOneByteStringGet(int index);
-
- DECLARE_CAST(ExternalOneByteString)
-
- class BodyDescriptor;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalOneByteString);
-};
-
-
-// The ExternalTwoByteString class is an external string backed by a UTF-16
-// encoded string.
-class ExternalTwoByteString: public ExternalString {
- public:
- static const bool kHasOneByteEncoding = false;
-
- typedef v8::String::ExternalStringResource Resource;
-
- // The underlying string resource.
- inline const Resource* resource();
- inline void set_resource(const Resource* buffer);
-
- // Update the pointer cache to the external character array.
- // The cached pointer is always valid, as the external character array does =
- // not move during lifetime. Deserialization is the only exception, after
- // which the pointer cache has to be refreshed.
- inline void update_data_cache();
-
- inline const uint16_t* GetChars();
-
- // Dispatched behavior.
- inline uint16_t ExternalTwoByteStringGet(int index);
-
- // For regexp code.
- inline const uint16_t* ExternalTwoByteStringGetData(unsigned start);
-
- DECLARE_CAST(ExternalTwoByteString)
-
- class BodyDescriptor;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalTwoByteString);
-};
-
-
// Utility superclass for stack-allocated objects that must be updated
// on gc. It provides two ways for the gc to update instances, either
// iterating or updating after gc.
@@ -7899,99 +6016,6 @@ class Relocatable BASE_EMBEDDED {
Relocatable* prev_;
};
-
-// A flat string reader provides random access to the contents of a
-// string independent of the character width of the string. The handle
-// must be valid as long as the reader is being used.
-class FlatStringReader : public Relocatable {
- public:
- FlatStringReader(Isolate* isolate, Handle<String> str);
- FlatStringReader(Isolate* isolate, Vector<const char> input);
- void PostGarbageCollection();
- inline uc32 Get(int index);
- template <typename Char>
- inline Char Get(int index);
- int length() { return length_; }
- private:
- String** str_;
- bool is_one_byte_;
- int length_;
- const void* start_;
-};
-
-
-// This maintains an off-stack representation of the stack frames required
-// to traverse a ConsString, allowing an entirely iterative and restartable
-// traversal of the entire string
-class ConsStringIterator {
- public:
- inline ConsStringIterator() {}
- inline explicit ConsStringIterator(ConsString* cons_string, int offset = 0) {
- Reset(cons_string, offset);
- }
- inline void Reset(ConsString* cons_string, int offset = 0) {
- depth_ = 0;
- // Next will always return NULL.
- if (cons_string == NULL) return;
- Initialize(cons_string, offset);
- }
- // Returns NULL when complete.
- inline String* Next(int* offset_out) {
- *offset_out = 0;
- if (depth_ == 0) return NULL;
- return Continue(offset_out);
- }
-
- private:
- static const int kStackSize = 32;
- // Use a mask instead of doing modulo operations for stack wrapping.
- static const int kDepthMask = kStackSize-1;
- STATIC_ASSERT(IS_POWER_OF_TWO(kStackSize));
- static inline int OffsetForDepth(int depth);
-
- inline void PushLeft(ConsString* string);
- inline void PushRight(ConsString* string);
- inline void AdjustMaximumDepth();
- inline void Pop();
- inline bool StackBlown() { return maximum_depth_ - depth_ == kStackSize; }
- void Initialize(ConsString* cons_string, int offset);
- String* Continue(int* offset_out);
- String* NextLeaf(bool* blew_stack);
- String* Search(int* offset_out);
-
- // Stack must always contain only frames for which right traversal
- // has not yet been performed.
- ConsString* frames_[kStackSize];
- ConsString* root_;
- int depth_;
- int maximum_depth_;
- int consumed_;
- DISALLOW_COPY_AND_ASSIGN(ConsStringIterator);
-};
-
-
-class StringCharacterStream {
- public:
- inline StringCharacterStream(String* string,
- int offset = 0);
- inline uint16_t GetNext();
- inline bool HasMore();
- inline void Reset(String* string, int offset = 0);
- inline void VisitOneByteString(const uint8_t* chars, int length);
- inline void VisitTwoByteString(const uint16_t* chars, int length);
-
- private:
- ConsStringIterator iter_;
- bool is_one_byte_;
- union {
- const uint8_t* buffer8_;
- const uint16_t* buffer16_;
- };
- const uint8_t* end_;
- DISALLOW_COPY_AND_ASSIGN(StringCharacterStream);
-};
-
-
template <typename T>
class VectorIterator {
public:
@@ -8028,10 +6052,10 @@ class Oddball: public HeapObject {
// ES6 section 7.1.3 ToNumber for Boolean, Null, Undefined.
MUST_USE_RESULT static inline Handle<Object> ToNumber(Handle<Oddball> input);
- DECLARE_CAST(Oddball)
+ DECL_CAST(Oddball)
// Dispatched behavior.
- DECLARE_VERIFIER(Oddball)
+ DECL_VERIFIER(Oddball)
// Initialize the fields.
static void Initialize(Isolate* isolate, Handle<Oddball> oddball,
@@ -8061,6 +6085,8 @@ class Oddball: public HeapObject {
typedef FixedBodyDescriptor<kToStringOffset, kTypeOfOffset + kPointerSize,
kSize> BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
STATIC_ASSERT(kToNumberRawOffset == HeapNumber::kValueOffset);
STATIC_ASSERT(kKindOffset == Internals::kOddballKindOffset);
@@ -8077,7 +6103,7 @@ class Cell: public HeapObject {
// [value]: value of the cell.
DECL_ACCESSORS(value, Object)
- DECLARE_CAST(Cell)
+ DECL_CAST(Cell)
static inline Cell* FromValueAddress(Address value) {
Object* result = FromAddress(value - kValueOffset);
@@ -8089,8 +6115,8 @@ class Cell: public HeapObject {
}
// Dispatched behavior.
- DECLARE_PRINTER(Cell)
- DECLARE_VERIFIER(Cell)
+ DECL_PRINTER(Cell)
+ DECL_VERIFIER(Cell)
// Layout description.
static const int kValueOffset = HeapObject::kHeaderSize;
@@ -8099,6 +6125,8 @@ class Cell: public HeapObject {
typedef FixedBodyDescriptor<kValueOffset,
kValueOffset + kPointerSize,
kSize> BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Cell);
@@ -8107,6 +6135,8 @@ class Cell: public HeapObject {
class PropertyCell : public HeapObject {
public:
+ // [name]: the name of the global property.
+ DECL_ACCESSORS(name, Name)
// [property_details]: details of the global property.
DECL_ACCESSORS(property_details_raw, Object)
// [value]: value of the global property.
@@ -8138,21 +6168,22 @@ class PropertyCell : public HeapObject {
static void SetValueWithInvalidation(Handle<PropertyCell> cell,
Handle<Object> new_value);
- DECLARE_CAST(PropertyCell)
+ DECL_CAST(PropertyCell)
// Dispatched behavior.
- DECLARE_PRINTER(PropertyCell)
- DECLARE_VERIFIER(PropertyCell)
+ DECL_PRINTER(PropertyCell)
+ DECL_VERIFIER(PropertyCell)
// Layout description.
static const int kDetailsOffset = HeapObject::kHeaderSize;
- static const int kValueOffset = kDetailsOffset + kPointerSize;
+ static const int kNameOffset = kDetailsOffset + kPointerSize;
+ static const int kValueOffset = kNameOffset + kPointerSize;
static const int kDependentCodeOffset = kValueOffset + kPointerSize;
static const int kSize = kDependentCodeOffset + kPointerSize;
- typedef FixedBodyDescriptor<kValueOffset,
- kSize,
- kSize> BodyDescriptor;
+ typedef FixedBodyDescriptor<kNameOffset, kSize, kSize> BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(PropertyCell);
@@ -8177,10 +6208,10 @@ class WeakCell : public HeapObject {
inline bool next_cleared();
- DECLARE_CAST(WeakCell)
+ DECL_CAST(WeakCell)
- DECLARE_PRINTER(WeakCell)
- DECLARE_VERIFIER(WeakCell)
+ DECL_PRINTER(WeakCell)
+ DECL_VERIFIER(WeakCell)
// Layout description.
static const int kValueOffset = HeapObject::kHeaderSize;
@@ -8210,7 +6241,7 @@ class JSProxy: public JSReceiver {
static MaybeHandle<Context> GetFunctionRealm(Handle<JSProxy> proxy);
- DECLARE_CAST(JSProxy)
+ DECL_CAST(JSProxy)
INLINE(bool IsRevoked() const);
static void Revoke(Handle<JSProxy> proxy);
@@ -8226,6 +6257,9 @@ class JSProxy: public JSReceiver {
// ES6 9.5.3
MUST_USE_RESULT static Maybe<bool> IsExtensible(Handle<JSProxy> proxy);
+ // ES6, #sec-isarray. NOT to be confused with %_IsArray.
+ MUST_USE_RESULT static Maybe<bool> IsArray(Handle<JSProxy> proxy);
+
// ES6 9.5.4 (when passed DONT_THROW)
MUST_USE_RESULT static Maybe<bool> PreventExtensions(
Handle<JSProxy> proxy, ShouldThrow should_throw);
@@ -8270,8 +6304,10 @@ class JSProxy: public JSReceiver {
LookupIterator* it);
// Dispatched behavior.
- DECLARE_PRINTER(JSProxy)
- DECLARE_VERIFIER(JSProxy)
+ DECL_PRINTER(JSProxy)
+ DECL_VERIFIER(JSProxy)
+
+ static const int kMaxIterationLimit = 100 * 1024;
// Layout description.
static const int kTargetOffset = JSReceiver::kHeaderSize;
@@ -8279,8 +6315,10 @@ class JSProxy: public JSReceiver {
static const int kHashOffset = kHandlerOffset + kPointerSize;
static const int kSize = kHashOffset + kPointerSize;
- typedef FixedBodyDescriptor<JSReceiver::kPropertiesOffset, kSize, kSize>
+ typedef FixedBodyDescriptor<JSReceiver::kPropertiesOrHashOffset, kSize, kSize>
BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
static Object* GetIdentityHash(Handle<JSProxy> receiver);
@@ -8314,14 +6352,14 @@ class JSCollection : public JSObject {
// objects/hash-table.h) into the same file.
class JSSet : public JSCollection {
public:
- DECLARE_CAST(JSSet)
+ DECL_CAST(JSSet)
static void Initialize(Handle<JSSet> set, Isolate* isolate);
static void Clear(Handle<JSSet> set);
// Dispatched behavior.
- DECLARE_PRINTER(JSSet)
- DECLARE_VERIFIER(JSSet)
+ DECL_PRINTER(JSSet)
+ DECL_VERIFIER(JSSet)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSSet);
@@ -8333,14 +6371,14 @@ class JSSet : public JSCollection {
// objects/hash-table.h) into the same file.
class JSMap : public JSCollection {
public:
- DECLARE_CAST(JSMap)
+ DECL_CAST(JSMap)
static void Initialize(Handle<JSMap> map, Isolate* isolate);
static void Clear(Handle<JSMap> map);
// Dispatched behavior.
- DECLARE_PRINTER(JSMap)
- DECLARE_VERIFIER(JSMap)
+ DECL_PRINTER(JSMap)
+ DECL_VERIFIER(JSMap)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSMap);
@@ -8348,10 +6386,10 @@ class JSMap : public JSCollection {
class JSArrayIterator : public JSObject {
public:
- DECLARE_PRINTER(JSArrayIterator)
- DECLARE_VERIFIER(JSArrayIterator)
+ DECL_PRINTER(JSArrayIterator)
+ DECL_VERIFIER(JSArrayIterator)
- DECLARE_CAST(JSArrayIterator)
+ DECL_CAST(JSArrayIterator)
// [object]: the [[IteratedObject]] inobject property.
DECL_ACCESSORS(object, Object)
@@ -8383,9 +6421,9 @@ class JSArrayIterator : public JSObject {
// (See https://tc39.github.io/proposal-async-iteration/#sec-iteration)
class JSAsyncFromSyncIterator : public JSObject {
public:
- DECLARE_CAST(JSAsyncFromSyncIterator)
- DECLARE_PRINTER(JSAsyncFromSyncIterator)
- DECLARE_VERIFIER(JSAsyncFromSyncIterator)
+ DECL_CAST(JSAsyncFromSyncIterator)
+ DECL_PRINTER(JSAsyncFromSyncIterator)
+ DECL_VERIFIER(JSAsyncFromSyncIterator)
// Async-from-Sync Iterator instances are ordinary objects that inherit
// properties from the %AsyncFromSyncIteratorPrototype% intrinsic object.
@@ -8405,10 +6443,10 @@ class JSAsyncFromSyncIterator : public JSObject {
class JSStringIterator : public JSObject {
public:
// Dispatched behavior.
- DECLARE_PRINTER(JSStringIterator)
- DECLARE_VERIFIER(JSStringIterator)
+ DECL_PRINTER(JSStringIterator)
+ DECL_VERIFIER(JSStringIterator)
- DECLARE_CAST(JSStringIterator)
+ DECL_CAST(JSStringIterator)
// [string]: the [[IteratedString]] inobject property.
DECL_ACCESSORS(string, String)
@@ -8428,7 +6466,7 @@ class JSStringIterator : public JSObject {
// Base class for both JSWeakMap and JSWeakSet
class JSWeakCollection: public JSObject {
public:
- DECLARE_CAST(JSWeakCollection)
+ DECL_CAST(JSWeakCollection)
// [table]: the backing hash table mapping keys to values.
DECL_ACCESSORS(table, Object)
@@ -8450,17 +6488,17 @@ class JSWeakCollection: public JSObject {
// Visiting policy defines whether the table and next collection fields
// should be visited or not.
- enum BodyVisitingPolicy { kVisitStrong, kVisitWeak };
+ enum BodyVisitingPolicy { kIgnoreWeakness, kRespectWeakness };
// Iterates the function object according to the visiting policy.
template <BodyVisitingPolicy>
class BodyDescriptorImpl;
// Visit the whole object.
- typedef BodyDescriptorImpl<kVisitStrong> BodyDescriptor;
+ typedef BodyDescriptorImpl<kIgnoreWeakness> BodyDescriptor;
// Don't visit table and next collection fields.
- typedef BodyDescriptorImpl<kVisitWeak> BodyDescriptorWeak;
+ typedef BodyDescriptorImpl<kRespectWeakness> BodyDescriptorWeak;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSWeakCollection);
@@ -8470,11 +6508,11 @@ class JSWeakCollection: public JSObject {
// The JSWeakMap describes EcmaScript Harmony weak maps
class JSWeakMap: public JSWeakCollection {
public:
- DECLARE_CAST(JSWeakMap)
+ DECL_CAST(JSWeakMap)
// Dispatched behavior.
- DECLARE_PRINTER(JSWeakMap)
- DECLARE_VERIFIER(JSWeakMap)
+ DECL_PRINTER(JSWeakMap)
+ DECL_VERIFIER(JSWeakMap)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSWeakMap);
@@ -8484,11 +6522,11 @@ class JSWeakMap: public JSWeakCollection {
// The JSWeakSet describes EcmaScript Harmony weak sets
class JSWeakSet: public JSWeakCollection {
public:
- DECLARE_CAST(JSWeakSet)
+ DECL_CAST(JSWeakSet)
// Dispatched behavior.
- DECLARE_PRINTER(JSWeakSet)
- DECLARE_VERIFIER(JSWeakSet)
+ DECL_PRINTER(JSWeakSet)
+ DECL_VERIFIER(JSWeakSet)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSWeakSet);
@@ -8542,7 +6580,7 @@ class JSArrayBuffer: public JSObject {
inline bool is_wasm_buffer();
inline void set_is_wasm_buffer(bool value);
- DECLARE_CAST(JSArrayBuffer)
+ DECL_CAST(JSArrayBuffer)
void Neuter();
@@ -8568,8 +6606,8 @@ class JSArrayBuffer: public JSObject {
SharedFlag shared = SharedFlag::kNotShared) WARN_UNUSED_RESULT;
// Dispatched behavior.
- DECLARE_PRINTER(JSArrayBuffer)
- DECLARE_VERIFIER(JSArrayBuffer)
+ DECL_PRINTER(JSArrayBuffer)
+ DECL_VERIFIER(JSArrayBuffer)
static const int kByteLengthOffset = JSObject::kHeaderSize;
// The rest of the fields are not JSObjects, so they are not iterated over in
@@ -8592,6 +6630,8 @@ class JSArrayBuffer: public JSObject {
// Iterates all fields in the object including internal ones except
// kBackingStoreOffset and kBitFieldSlot.
class BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
class IsExternal : public BitField<bool, 1, 1> {};
class IsNeuterable : public BitField<bool, 2, 1> {};
@@ -8616,9 +6656,9 @@ class JSArrayBufferView: public JSObject {
// [byte_length]: length of typed array in bytes.
DECL_ACCESSORS(byte_length, Object)
- DECLARE_CAST(JSArrayBufferView)
+ DECL_CAST(JSArrayBufferView)
- DECLARE_VERIFIER(JSArrayBufferView)
+ DECL_VERIFIER(JSArrayBufferView)
inline bool WasNeutered() const;
@@ -8648,7 +6688,7 @@ class JSTypedArray: public JSArrayBufferView {
Isolate* isolate, Handle<JSTypedArray> o, Handle<Object> key,
PropertyDescriptor* desc, ShouldThrow should_throw);
- DECLARE_CAST(JSTypedArray)
+ DECL_CAST(JSTypedArray)
ExternalArrayType type();
V8_EXPORT_PRIVATE size_t element_size();
@@ -8670,8 +6710,8 @@ class JSTypedArray: public JSArrayBufferView {
const char* method_name);
// Dispatched behavior.
- DECLARE_PRINTER(JSTypedArray)
- DECLARE_VERIFIER(JSTypedArray)
+ DECL_PRINTER(JSTypedArray)
+ DECL_VERIFIER(JSTypedArray)
static const int kLengthOffset = kViewSize + kPointerSize;
static const int kSize = kLengthOffset + kPointerSize;
@@ -8692,11 +6732,11 @@ class JSTypedArray: public JSArrayBufferView {
class JSDataView: public JSArrayBufferView {
public:
- DECLARE_CAST(JSDataView)
+ DECL_CAST(JSDataView)
// Dispatched behavior.
- DECLARE_PRINTER(JSDataView)
- DECLARE_VERIFIER(JSDataView)
+ DECL_PRINTER(JSDataView)
+ DECL_VERIFIER(JSDataView)
static const int kSize = kViewSize;
@@ -8715,11 +6755,11 @@ class Foreign: public HeapObject {
inline Address foreign_address();
inline void set_foreign_address(Address value);
- DECLARE_CAST(Foreign)
+ DECL_CAST(Foreign)
// Dispatched behavior.
- DECLARE_PRINTER(Foreign)
- DECLARE_VERIFIER(Foreign)
+ DECL_PRINTER(Foreign)
+ DECL_VERIFIER(Foreign)
// Layout description.
@@ -8729,6 +6769,8 @@ class Foreign: public HeapObject {
STATIC_ASSERT(kForeignAddressOffset == Internals::kForeignAddressOffset);
class BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Foreign);
@@ -8790,11 +6832,11 @@ class JSArray: public JSObject {
// to Proxies and objects with a hidden prototype.
inline bool HasArrayPrototype(Isolate* isolate);
- DECLARE_CAST(JSArray)
+ DECL_CAST(JSArray)
// Dispatched behavior.
- DECLARE_PRINTER(JSArray)
- DECLARE_VERIFIER(JSArray)
+ DECL_PRINTER(JSArray)
+ DECL_VERIFIER(JSArray)
// Number of element slots to pre-allocate for an empty array.
static const int kPreallocatedArrayElements = 4;
@@ -8867,7 +6909,7 @@ class AccessorInfo: public Struct {
Address redirected_getter() const;
// Dispatched behavior.
- DECLARE_PRINTER(AccessorInfo)
+ DECL_PRINTER(AccessorInfo)
inline bool all_can_read();
inline void set_all_can_read(bool value);
@@ -8893,10 +6935,10 @@ class AccessorInfo: public Struct {
Handle<Map> map);
inline bool IsCompatibleReceiver(Object* receiver);
- DECLARE_CAST(AccessorInfo)
+ DECL_CAST(AccessorInfo)
// Dispatched behavior.
- DECLARE_VERIFIER(AccessorInfo)
+ DECL_VERIFIER(AccessorInfo)
// Append all descriptors to the array that are not already there.
// Return number added.
@@ -8940,7 +6982,7 @@ class AccessorPair: public Struct {
DECL_ACCESSORS(getter, Object)
DECL_ACCESSORS(setter, Object)
- DECLARE_CAST(AccessorPair)
+ DECL_CAST(AccessorPair)
static Handle<AccessorPair> Copy(Handle<AccessorPair> pair);
@@ -8960,8 +7002,8 @@ class AccessorPair: public Struct {
inline bool ContainsAccessor();
// Dispatched behavior.
- DECLARE_PRINTER(AccessorPair)
- DECLARE_VERIFIER(AccessorPair)
+ DECL_PRINTER(AccessorPair)
+ DECL_VERIFIER(AccessorPair)
static const int kGetterOffset = HeapObject::kHeaderSize;
static const int kSetterOffset = kGetterOffset + kPointerSize;
@@ -8986,11 +7028,11 @@ class AccessCheckInfo: public Struct {
DECL_ACCESSORS(indexed_interceptor, Object)
DECL_ACCESSORS(data, Object)
- DECLARE_CAST(AccessCheckInfo)
+ DECL_CAST(AccessCheckInfo)
// Dispatched behavior.
- DECLARE_PRINTER(AccessCheckInfo)
- DECLARE_VERIFIER(AccessCheckInfo)
+ DECL_PRINTER(AccessCheckInfo)
+ DECL_VERIFIER(AccessCheckInfo)
static AccessCheckInfo* Get(Isolate* isolate, Handle<JSObject> receiver);
@@ -9023,11 +7065,11 @@ class InterceptorInfo: public Struct {
inline int flags() const;
inline void set_flags(int flags);
- DECLARE_CAST(InterceptorInfo)
+ DECL_CAST(InterceptorInfo)
// Dispatched behavior.
- DECLARE_PRINTER(InterceptorInfo)
- DECLARE_VERIFIER(InterceptorInfo)
+ DECL_PRINTER(InterceptorInfo)
+ DECL_VERIFIER(InterceptorInfo)
static const int kGetterOffset = HeapObject::kHeaderSize;
static const int kSetterOffset = kGetterOffset + kPointerSize;
@@ -9053,7 +7095,7 @@ class CallHandlerInfo : public Tuple2 {
DECL_ACCESSORS(callback, Object)
DECL_ACCESSORS(data, Object)
- DECLARE_CAST(CallHandlerInfo)
+ DECL_CAST(CallHandlerInfo)
static const int kCallbackOffset = kValue1Offset;
static const int kDataOffset = kValue2Offset;
@@ -9071,9 +7113,9 @@ class TemplateInfo: public Struct {
DECL_ACCESSORS(property_list, Object)
DECL_ACCESSORS(property_accessors, Object)
- DECLARE_VERIFIER(TemplateInfo)
+ DECL_VERIFIER(TemplateInfo)
- DECLARE_CAST(TemplateInfo)
+ DECL_CAST(TemplateInfo)
static const int kTagOffset = HeapObject::kHeaderSize;
static const int kSerialNumberOffset = kTagOffset + kPointerSize;
@@ -9094,45 +7136,93 @@ class TemplateInfo: public Struct {
DISALLOW_IMPLICIT_CONSTRUCTORS(TemplateInfo);
};
-
+// See the api-exposed FunctionTemplate for more information.
class FunctionTemplateInfo: public TemplateInfo {
public:
+ // Handler invoked when calling an instance of this FunctionTemplateInfo.
+ // Either CallInfoHandler or Undefined.
DECL_ACCESSORS(call_code, Object)
+
+ // ObjectTemplateInfo or Undefined, used for the prototype property of the
+ // resulting JSFunction instance of this FunctionTemplate.
DECL_ACCESSORS(prototype_template, Object)
+
+ // In the case the prototype_template is Undefined we use the
+ // protoype_provider_template to retrieve the instance prototype. Either
+ // contains an ObjectTemplateInfo or Undefined.
DECL_ACCESSORS(prototype_provider_template, Object)
+
+ // Used to create protoype chains. The parent_template's prototype is set as
+ // __proto__ of this FunctionTemplate's instance prototype. Is either a
+ // FunctionTemplateInfo or Undefined.
DECL_ACCESSORS(parent_template, Object)
+
+ // Returns an InterceptorInfo or Undefined for named properties.
DECL_ACCESSORS(named_property_handler, Object)
+ // Returns an InterceptorInfo or Undefined for indexed properties/elements.
DECL_ACCESSORS(indexed_property_handler, Object)
+
+ // An ObjectTemplateInfo that is used when instantiating the JSFunction
+ // associated with this FunctionTemplateInfo. Contains either an
+ // ObjectTemplateInfo or Undefined. A default instance_template is assigned
+ // upon first instantiation if it's Undefined.
DECL_ACCESSORS(instance_template, Object)
+
DECL_ACCESSORS(class_name, Object)
+
+ // If the signature is a FunctionTemplateInfo it is used to check whether the
+ // receiver calling the associated JSFunction is a compatible receiver, i.e.
+ // it is an instance of the signare FunctionTemplateInfo or any of the
+ // receiver's prototypes are.
DECL_ACCESSORS(signature, Object)
+
+ // Either a CallHandlerInfo or Undefined. If an instance_call_handler is
+ // provided the instances created from the associated JSFunction are marked as
+ // callable.
DECL_ACCESSORS(instance_call_handler, Object)
+
DECL_ACCESSORS(access_check_info, Object)
DECL_ACCESSORS(shared_function_info, Object)
- DECL_ACCESSORS(js_function, Object)
+
+ // Internal field to store a flag bitfield.
DECL_INT_ACCESSORS(flag)
- inline int length() const;
- inline void set_length(int value);
+ // "length" property of the final JSFunction.
+ DECL_INT_ACCESSORS(length)
- // Following properties use flag bits.
+ // Either the_hole or a private symbol. Used to cache the result on
+ // the receiver under the the cached_property_name when this
+ // FunctionTemplateInfo is used as a getter.
+ DECL_ACCESSORS(cached_property_name, Object)
+
+ // Begin flag bits ---------------------
DECL_BOOLEAN_ACCESSORS(hidden_prototype)
DECL_BOOLEAN_ACCESSORS(undetectable)
- // If the bit is set, object instances created by this function
+
+ // If set, object instances created by this function
// requires access check.
DECL_BOOLEAN_ACCESSORS(needs_access_check)
+
DECL_BOOLEAN_ACCESSORS(read_only_prototype)
+
+ // If set, do not create a prototype property for the associated
+ // JSFunction. This bit implies that neither the prototype_template nor the
+ // prototype_provoider_template are instantiated.
DECL_BOOLEAN_ACCESSORS(remove_prototype)
+
+ // If set, do not attach a serial number to this FunctionTemplate and thus do
+ // not keep an instance boilerplate around.
DECL_BOOLEAN_ACCESSORS(do_not_cache)
- DECL_BOOLEAN_ACCESSORS(accept_any_receiver)
- DECL_ACCESSORS(cached_property_name, Object)
+ // If not set an access may be performed on calling the associated JSFunction.
+ DECL_BOOLEAN_ACCESSORS(accept_any_receiver)
+ // End flag bits ---------------------
- DECLARE_CAST(FunctionTemplateInfo)
+ DECL_CAST(FunctionTemplateInfo)
// Dispatched behavior.
- DECLARE_PRINTER(FunctionTemplateInfo)
- DECLARE_VERIFIER(FunctionTemplateInfo)
+ DECL_PRINTER(FunctionTemplateInfo)
+ DECL_VERIFIER(FunctionTemplateInfo)
static const int kInvalidSerialNumber = 0;
@@ -9162,7 +7252,8 @@ class FunctionTemplateInfo: public TemplateInfo {
static const int kSize = kCachedPropertyNameOffset + kPointerSize;
static Handle<SharedFunctionInfo> GetOrCreateSharedFunctionInfo(
- Isolate* isolate, Handle<FunctionTemplateInfo> info);
+ Isolate* isolate, Handle<FunctionTemplateInfo> info,
+ MaybeHandle<Name> maybe_name);
// Returns parent function template or null.
inline FunctionTemplateInfo* GetParent(Isolate* isolate);
// Returns true if |object| is an instance of this function template.
@@ -9195,11 +7286,11 @@ class ObjectTemplateInfo: public TemplateInfo {
DECL_INT_ACCESSORS(embedder_field_count)
DECL_BOOLEAN_ACCESSORS(immutable_proto)
- DECLARE_CAST(ObjectTemplateInfo)
+ DECL_CAST(ObjectTemplateInfo)
// Dispatched behavior.
- DECLARE_PRINTER(ObjectTemplateInfo)
- DECLARE_VERIFIER(ObjectTemplateInfo)
+ DECL_PRINTER(ObjectTemplateInfo)
+ DECL_VERIFIER(ObjectTemplateInfo)
static const int kConstructorOffset = TemplateInfo::kHeaderSize;
// LSB is for immutable_proto, higher bits for embedder_field_count
@@ -9216,102 +7307,6 @@ class ObjectTemplateInfo: public TemplateInfo {
: public BitField<int, IsImmutablePrototype::kNext, 29> {};
};
-
-// The DebugInfo class holds additional information for a function being
-// debugged.
-class DebugInfo: public Struct {
- public:
- // The shared function info for the source being debugged.
- DECL_ACCESSORS(shared, SharedFunctionInfo)
-
- // Bit field containing various information collected for debugging.
- DECL_INT_ACCESSORS(debugger_hints)
-
- DECL_ACCESSORS(debug_bytecode_array, Object)
- // Fixed array holding status information for each active break point.
- DECL_ACCESSORS(break_points, FixedArray)
-
- // Check if there is a break point at a source position.
- bool HasBreakPoint(int source_position);
- // Attempt to clear a break point. Return true if successful.
- static bool ClearBreakPoint(Handle<DebugInfo> debug_info,
- Handle<Object> break_point_object);
- // Set a break point.
- static void SetBreakPoint(Handle<DebugInfo> debug_info, int source_position,
- Handle<Object> break_point_object);
- // Get the break point objects for a source position.
- Handle<Object> GetBreakPointObjects(int source_position);
- // Find the break point info holding this break point object.
- static Handle<Object> FindBreakPointInfo(Handle<DebugInfo> debug_info,
- Handle<Object> break_point_object);
- // Get the number of break points for this function.
- int GetBreakPointCount();
-
- inline bool HasDebugBytecodeArray();
- inline bool HasDebugCode();
-
- inline BytecodeArray* OriginalBytecodeArray();
- inline BytecodeArray* DebugBytecodeArray();
- inline Code* DebugCode();
-
- DECLARE_CAST(DebugInfo)
-
- // Dispatched behavior.
- DECLARE_PRINTER(DebugInfo)
- DECLARE_VERIFIER(DebugInfo)
-
- static const int kSharedFunctionInfoIndex = Struct::kHeaderSize;
- static const int kDebuggerHintsIndex =
- kSharedFunctionInfoIndex + kPointerSize;
- static const int kDebugBytecodeArrayIndex =
- kDebuggerHintsIndex + kPointerSize;
- static const int kBreakPointsStateIndex =
- kDebugBytecodeArrayIndex + kPointerSize;
- static const int kSize = kBreakPointsStateIndex + kPointerSize;
-
- static const int kEstimatedNofBreakPointsInFunction = 4;
-
- private:
- // Get the break point info object for a source position.
- Object* GetBreakPointInfo(int source_position);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(DebugInfo);
-};
-
-
-// The BreakPointInfo class holds information for break points set in a
-// function. The DebugInfo object holds a BreakPointInfo object for each code
-// position with one or more break points.
-class BreakPointInfo : public Tuple2 {
- public:
- // The position in the source for the break position.
- DECL_INT_ACCESSORS(source_position)
- // List of related JavaScript break points.
- DECL_ACCESSORS(break_point_objects, Object)
-
- // Removes a break point.
- static void ClearBreakPoint(Handle<BreakPointInfo> info,
- Handle<Object> break_point_object);
- // Set a break point.
- static void SetBreakPoint(Handle<BreakPointInfo> info,
- Handle<Object> break_point_object);
- // Check if break point info has this break point object.
- static bool HasBreakPointObject(Handle<BreakPointInfo> info,
- Handle<Object> break_point_object);
- // Get the number of break points for this code offset.
- int GetBreakPointCount();
-
- int GetStatementPosition(Handle<DebugInfo> debug_info);
-
- DECLARE_CAST(BreakPointInfo)
-
- static const int kSourcePositionIndex = kValue1Offset;
- static const int kBreakPointObjectsIndex = kValue2Offset;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(BreakPointInfo);
-};
-
class StackFrameInfo : public Struct {
public:
DECL_INT_ACCESSORS(line_number)
@@ -9326,11 +7321,11 @@ class StackFrameInfo : public Struct {
DECL_INT_ACCESSORS(flag)
DECL_INT_ACCESSORS(id)
- DECLARE_CAST(StackFrameInfo)
+ DECL_CAST(StackFrameInfo)
// Dispatched behavior.
- DECLARE_PRINTER(StackFrameInfo)
- DECLARE_VERIFIER(StackFrameInfo)
+ DECL_PRINTER(StackFrameInfo)
+ DECL_VERIFIER(StackFrameInfo)
static const int kLineNumberIndex = Struct::kHeaderSize;
static const int kColumnNumberIndex = kLineNumberIndex + kPointerSize;
@@ -9358,7 +7353,7 @@ class SourcePositionTableWithFrameCache : public Tuple2 {
DECL_ACCESSORS(source_position_table, ByteArray)
DECL_ACCESSORS(stack_frame_cache, UnseededNumberDictionary)
- DECLARE_CAST(SourcePositionTableWithFrameCache)
+ DECL_CAST(SourcePositionTableWithFrameCache)
static const int kSourcePositionTableIndex = Struct::kHeaderSize;
static const int kStackFrameCacheIndex =
diff --git a/deps/v8/src/objects/arguments-inl.h b/deps/v8/src/objects/arguments-inl.h
new file mode 100644
index 0000000000..32f5359e4b
--- /dev/null
+++ b/deps/v8/src/objects/arguments-inl.h
@@ -0,0 +1,57 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_ARGUMENTS_INL_H_
+#define V8_OBJECTS_ARGUMENTS_INL_H_
+
+#include "src/objects/arguments.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+CAST_ACCESSOR(AliasedArgumentsEntry)
+CAST_ACCESSOR(JSArgumentsObject)
+CAST_ACCESSOR(JSSloppyArgumentsObject)
+CAST_ACCESSOR(SloppyArgumentsElements)
+
+ACCESSORS(JSArgumentsObject, length, Object, kLengthOffset);
+ACCESSORS(JSSloppyArgumentsObject, callee, Object, kCalleeOffset);
+
+SMI_ACCESSORS(AliasedArgumentsEntry, aliased_context_slot, kAliasedContextSlot)
+
+TYPE_CHECKER(JSArgumentsObject, JS_ARGUMENTS_TYPE)
+
+Context* SloppyArgumentsElements::context() {
+ return Context::cast(get(kContextIndex));
+}
+
+FixedArray* SloppyArgumentsElements::arguments() {
+ return FixedArray::cast(get(kArgumentsIndex));
+}
+
+void SloppyArgumentsElements::set_arguments(FixedArray* arguments) {
+ set(kArgumentsIndex, arguments);
+}
+
+uint32_t SloppyArgumentsElements::parameter_map_length() {
+ return length() - kParameterMapStart;
+}
+
+Object* SloppyArgumentsElements::get_mapped_entry(uint32_t entry) {
+ return get(entry + kParameterMapStart);
+}
+
+void SloppyArgumentsElements::set_mapped_entry(uint32_t entry, Object* object) {
+ set(entry + kParameterMapStart, object);
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_ARGUMENTS_INL_H_
diff --git a/deps/v8/src/objects/arguments.h b/deps/v8/src/objects/arguments.h
new file mode 100644
index 0000000000..61c1a9cb41
--- /dev/null
+++ b/deps/v8/src/objects/arguments.h
@@ -0,0 +1,141 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_ARGUMENTS_H_
+#define V8_OBJECTS_ARGUMENTS_H_
+
+#include "src/objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// Common superclass for JSSloppyArgumentsObject and JSStrictArgumentsObject.
+class JSArgumentsObject : public JSObject {
+ public:
+ // Offsets of object fields.
+ static const int kLengthOffset = JSObject::kHeaderSize;
+ static const int kHeaderSize = kLengthOffset + kPointerSize;
+ // Indices of in-object properties.
+ static const int kLengthIndex = 0;
+
+ DECL_ACCESSORS(length, Object)
+
+ DECL_VERIFIER(JSArgumentsObject)
+ DECL_CAST(JSArgumentsObject)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSArgumentsObject);
+};
+
+// JSSloppyArgumentsObject is just a JSObject with specific initial map.
+// This initial map adds in-object properties for "length" and "callee".
+class JSSloppyArgumentsObject : public JSArgumentsObject {
+ public:
+ // Offsets of object fields.
+ static const int kCalleeOffset = JSArgumentsObject::kHeaderSize;
+ static const int kSize = kCalleeOffset + kPointerSize;
+ // Indices of in-object properties.
+ static const int kCalleeIndex = kLengthIndex + 1;
+
+ DECL_ACCESSORS(callee, Object)
+
+ DECL_VERIFIER(JSSloppyArgumentsObject)
+ DECL_CAST(JSSloppyArgumentsObject)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSSloppyArgumentsObject);
+};
+
+// JSStrictArgumentsObject is just a JSObject with specific initial map.
+// This initial map adds an in-object property for "length".
+class JSStrictArgumentsObject : public JSArgumentsObject {
+ public:
+ // Offsets of object fields.
+ static const int kSize = JSArgumentsObject::kHeaderSize;
+
+ DECL_CAST(JSStrictArgumentsObject)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSStrictArgumentsObject);
+};
+
+// Helper class to access FAST_ and SLOW_SLOPPY_ARGUMENTS_ELEMENTS
+//
+// +---+-----------------------+
+// | 0 | Context* context |
+// +---------------------------+
+// | 1 | FixedArray* arguments +----+ HOLEY_ELEMENTS
+// +---------------------------+ v-----+-----------+
+// | 2 | Object* param_1_map | | 0 | the_hole |
+// |...| ... | | ... | ... |
+// |n+1| Object* param_n_map | | n-1 | the_hole |
+// +---------------------------+ | n | element_1 |
+// | ... | ... |
+// |n+m-1| element_m |
+// +-----------------+
+//
+// Parameter maps give the index into the provided context. If a map entry is
+// the_hole it means that the given entry has been deleted from the arguments
+// object.
+// The arguments backing store kind depends on the ElementsKind of the outer
+// JSArgumentsObject:
+// - FAST_SLOPPY_ARGUMENTS_ELEMENTS: HOLEY_ELEMENTS
+// - SLOW_SLOPPY_ARGUMENTS_ELEMENTS: DICTIONARY_ELEMENTS
+class SloppyArgumentsElements : public FixedArray {
+ public:
+ static const int kContextIndex = 0;
+ static const int kArgumentsIndex = 1;
+ static const uint32_t kParameterMapStart = 2;
+
+ inline Context* context();
+ inline FixedArray* arguments();
+ inline void set_arguments(FixedArray* arguments);
+ inline uint32_t parameter_map_length();
+ inline Object* get_mapped_entry(uint32_t entry);
+ inline void set_mapped_entry(uint32_t entry, Object* object);
+
+ DECL_CAST(SloppyArgumentsElements)
+#ifdef VERIFY_HEAP
+ void SloppyArgumentsElementsVerify(JSSloppyArgumentsObject* holder);
+#endif
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(SloppyArgumentsElements);
+};
+
+// Representation of a slow alias as part of a sloppy arguments objects.
+// For fast aliases (if HasSloppyArgumentsElements()):
+// - the parameter map contains an index into the context
+// - all attributes of the element have default values
+// For slow aliases (if HasDictionaryArgumentsElements()):
+// - the parameter map contains no fast alias mapping (i.e. the hole)
+// - this struct (in the slow backing store) contains an index into the context
+// - all attributes are available as part if the property details
+class AliasedArgumentsEntry : public Struct {
+ public:
+ inline int aliased_context_slot() const;
+ inline void set_aliased_context_slot(int count);
+
+ DECL_CAST(AliasedArgumentsEntry)
+
+ // Dispatched behavior.
+ DECL_PRINTER(AliasedArgumentsEntry)
+ DECL_VERIFIER(AliasedArgumentsEntry)
+
+ static const int kAliasedContextSlot = HeapObject::kHeaderSize;
+ static const int kSize = kAliasedContextSlot + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AliasedArgumentsEntry);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_ARGUMENTS_H_
diff --git a/deps/v8/src/objects/code-cache-inl.h b/deps/v8/src/objects/code-cache-inl.h
index 9f0ec21be7..5d08d3c122 100644
--- a/deps/v8/src/objects/code-cache-inl.h
+++ b/deps/v8/src/objects/code-cache-inl.h
@@ -16,7 +16,7 @@ namespace internal {
CAST_ACCESSOR(CodeCacheHashTable)
Handle<Object> CodeCacheHashTableShape::AsHandle(Isolate* isolate,
- HashTableKey* key) {
+ CodeCacheHashTableKey* key) {
return key->AsHandle(isolate);
}
diff --git a/deps/v8/src/objects/code-cache.h b/deps/v8/src/objects/code-cache.h
index 74f5362b61..25bdaf2159 100644
--- a/deps/v8/src/objects/code-cache.h
+++ b/deps/v8/src/objects/code-cache.h
@@ -13,19 +13,72 @@
namespace v8 {
namespace internal {
-class CodeCacheHashTableShape : public BaseShape<HashTableKey*> {
+// The key in the code cache hash table consists of the property name and the
+// code object. The actual match is on the name and the code flags. If a key
+// is created using the flags and not a code object it can only be used for
+// lookup not to create a new entry.
+class CodeCacheHashTableKey final {
public:
- static inline bool IsMatch(HashTableKey* key, Object* value) {
+ CodeCacheHashTableKey(Handle<Name> name, Code::Flags flags)
+ : name_(name), flags_(flags), code_() {
+ DCHECK(name_->IsUniqueName());
+ }
+
+ CodeCacheHashTableKey(Handle<Name> name, Handle<Code> code)
+ : name_(name), flags_(code->flags()), code_(code) {
+ DCHECK(name_->IsUniqueName());
+ }
+
+ bool IsMatch(Object* other) {
+ DCHECK(other->IsFixedArray());
+ FixedArray* pair = FixedArray::cast(other);
+ Name* name = Name::cast(pair->get(0));
+ Code::Flags flags = Code::cast(pair->get(1))->flags();
+ if (flags != flags_) return false;
+ DCHECK(name->IsUniqueName());
+ return *name_ == name;
+ }
+
+ static uint32_t NameFlagsHashHelper(Name* name, Code::Flags flags) {
+ return name->Hash() ^ flags;
+ }
+
+ uint32_t Hash() { return NameFlagsHashHelper(*name_, flags_); }
+
+ MUST_USE_RESULT Handle<Object> AsHandle(Isolate* isolate) {
+ Handle<Code> code = code_.ToHandleChecked();
+ Handle<FixedArray> pair = isolate->factory()->NewFixedArray(2);
+ pair->set(0, *name_);
+ pair->set(1, *code);
+ return pair;
+ }
+
+ private:
+ Handle<Name> name_;
+ Code::Flags flags_;
+ // TODO(jkummerow): We should be able to get by without this.
+ MaybeHandle<Code> code_;
+};
+
+class CodeCacheHashTableShape : public BaseShape<CodeCacheHashTableKey*> {
+ public:
+ static inline bool IsMatch(CodeCacheHashTableKey* key, Object* value) {
return key->IsMatch(value);
}
- static inline uint32_t Hash(HashTableKey* key) { return key->Hash(); }
+ static inline uint32_t Hash(Isolate* isolate, CodeCacheHashTableKey* key) {
+ return key->Hash();
+ }
- static inline uint32_t HashForObject(HashTableKey* key, Object* object) {
- return key->HashForObject(object);
+ static inline uint32_t HashForObject(Isolate* isolate, Object* object) {
+ FixedArray* pair = FixedArray::cast(object);
+ Name* name = Name::cast(pair->get(0));
+ Code* code = Code::cast(pair->get(1));
+ return CodeCacheHashTableKey::NameFlagsHashHelper(name, code->flags());
}
- static inline Handle<Object> AsHandle(Isolate* isolate, HashTableKey* key);
+ static inline Handle<Object> AsHandle(Isolate* isolate,
+ CodeCacheHashTableKey* key);
static const int kPrefixSize = 0;
// The both the key (name + flags) and value (code object) can be derived from
@@ -36,15 +89,14 @@ class CodeCacheHashTableShape : public BaseShape<HashTableKey*> {
};
class CodeCacheHashTable
- : public HashTable<CodeCacheHashTable, CodeCacheHashTableShape,
- HashTableKey*> {
+ : public HashTable<CodeCacheHashTable, CodeCacheHashTableShape> {
public:
static Handle<CodeCacheHashTable> Put(Handle<CodeCacheHashTable> table,
Handle<Name> name, Handle<Code> code);
Code* Lookup(Name* name, Code::Flags flags);
- DECLARE_CAST(CodeCacheHashTable)
+ DECL_CAST(CodeCacheHashTable)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(CodeCacheHashTable);
diff --git a/deps/v8/src/objects/compilation-cache-inl.h b/deps/v8/src/objects/compilation-cache-inl.h
index fde19117b0..42798ed5a0 100644
--- a/deps/v8/src/objects/compilation-cache-inl.h
+++ b/deps/v8/src/objects/compilation-cache-inl.h
@@ -15,9 +15,48 @@ namespace internal {
CAST_ACCESSOR(CompilationCacheTable)
-Handle<Object> CompilationCacheShape::AsHandle(Isolate* isolate,
- HashTableKey* key) {
- return key->AsHandle(isolate);
+uint32_t CompilationCacheShape::RegExpHash(String* string, Smi* flags) {
+ return string->Hash() + flags->value();
+}
+
+uint32_t CompilationCacheShape::StringSharedHash(String* source,
+ SharedFunctionInfo* shared,
+ LanguageMode language_mode,
+ int position) {
+ uint32_t hash = source->Hash();
+ if (shared->HasSourceCode()) {
+ // Instead of using the SharedFunctionInfo pointer in the hash
+ // code computation, we use a combination of the hash of the
+ // script source code and the start position of the calling scope.
+ // We do this to ensure that the cache entries can survive garbage
+ // collection.
+ Script* script(Script::cast(shared->script()));
+ hash ^= String::cast(script->source())->Hash();
+ STATIC_ASSERT(LANGUAGE_END == 2);
+ if (is_strict(language_mode)) hash ^= 0x8000;
+ hash += position;
+ }
+ return hash;
+}
+
+uint32_t CompilationCacheShape::HashForObject(Isolate* isolate,
+ Object* object) {
+ if (object->IsNumber()) return static_cast<uint32_t>(object->Number());
+
+ FixedArray* val = FixedArray::cast(object);
+ if (val->map() == val->GetHeap()->fixed_cow_array_map()) {
+ DCHECK_EQ(4, val->length());
+ SharedFunctionInfo* shared = SharedFunctionInfo::cast(val->get(0));
+ String* source = String::cast(val->get(1));
+ int language_unchecked = Smi::ToInt(val->get(2));
+ DCHECK(is_valid_language_mode(language_unchecked));
+ LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
+ int position = Smi::ToInt(val->get(3));
+ return StringSharedHash(source, shared, language_mode, position);
+ }
+ DCHECK_LT(2, val->length());
+ return RegExpHash(String::cast(val->get(JSRegExp::kSourceIndex)),
+ Smi::cast(val->get(JSRegExp::kFlagsIndex)));
}
} // namespace internal
diff --git a/deps/v8/src/objects/compilation-cache.h b/deps/v8/src/objects/compilation-cache.h
index 55df1d7857..a2358671f5 100644
--- a/deps/v8/src/objects/compilation-cache.h
+++ b/deps/v8/src/objects/compilation-cache.h
@@ -19,13 +19,18 @@ class CompilationCacheShape : public BaseShape<HashTableKey*> {
return key->IsMatch(value);
}
- static inline uint32_t Hash(HashTableKey* key) { return key->Hash(); }
-
- static inline uint32_t HashForObject(HashTableKey* key, Object* object) {
- return key->HashForObject(object);
+ static inline uint32_t Hash(Isolate* isolate, HashTableKey* key) {
+ return key->Hash();
}
- static inline Handle<Object> AsHandle(Isolate* isolate, HashTableKey* key);
+ static inline uint32_t RegExpHash(String* string, Smi* flags);
+
+ static inline uint32_t StringSharedHash(String* source,
+ SharedFunctionInfo* shared,
+ LanguageMode language_mode,
+ int position);
+
+ static inline uint32_t HashForObject(Isolate* isolate, Object* object);
static const int kPrefixSize = 0;
static const int kEntrySize = 3;
@@ -60,8 +65,7 @@ class InfoVectorPair {
// recompilation stub, or to "old" code. This avoids memory leaks due to
// premature caching of scripts and eval strings that are never needed later.
class CompilationCacheTable
- : public HashTable<CompilationCacheTable, CompilationCacheShape,
- HashTableKey*> {
+ : public HashTable<CompilationCacheTable, CompilationCacheShape> {
public:
// Find cached value for a string key, otherwise return null.
Handle<Object> Lookup(Handle<String> src, Handle<Context> context,
@@ -93,7 +97,7 @@ class CompilationCacheTable
void Age();
static const int kHashGenerations = 10;
- DECLARE_CAST(CompilationCacheTable)
+ DECL_CAST(CompilationCacheTable)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheTable);
diff --git a/deps/v8/src/objects/debug-objects-inl.h b/deps/v8/src/objects/debug-objects-inl.h
new file mode 100644
index 0000000000..1a117f15bd
--- /dev/null
+++ b/deps/v8/src/objects/debug-objects-inl.h
@@ -0,0 +1,63 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_DEBUG_OBJECTS_INL_H_
+#define V8_OBJECTS_DEBUG_OBJECTS_INL_H_
+
+#include "src/objects/debug-objects.h"
+
+#include "src/heap/heap-inl.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+CAST_ACCESSOR(BreakPointInfo)
+CAST_ACCESSOR(DebugInfo)
+CAST_ACCESSOR(CoverageInfo)
+
+SMI_ACCESSORS(DebugInfo, flags, kFlagsOffset)
+ACCESSORS(DebugInfo, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
+SMI_ACCESSORS(DebugInfo, debugger_hints, kDebuggerHintsOffset)
+ACCESSORS(DebugInfo, debug_bytecode_array, Object, kDebugBytecodeArrayOffset)
+ACCESSORS(DebugInfo, break_points, FixedArray, kBreakPointsStateOffset)
+ACCESSORS(DebugInfo, coverage_info, Object, kCoverageInfoOffset)
+
+SMI_ACCESSORS(BreakPointInfo, source_position, kSourcePositionOffset)
+ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsOffset)
+
+bool DebugInfo::HasDebugBytecodeArray() {
+ return debug_bytecode_array()->IsBytecodeArray();
+}
+
+bool DebugInfo::HasDebugCode() {
+ Code* code = shared()->code();
+ bool has = code->kind() == Code::FUNCTION;
+ DCHECK(!has || code->has_debug_break_slots());
+ return has;
+}
+
+BytecodeArray* DebugInfo::OriginalBytecodeArray() {
+ DCHECK(HasDebugBytecodeArray());
+ return shared()->bytecode_array();
+}
+
+BytecodeArray* DebugInfo::DebugBytecodeArray() {
+ DCHECK(HasDebugBytecodeArray());
+ return BytecodeArray::cast(debug_bytecode_array());
+}
+
+Code* DebugInfo::DebugCode() {
+ DCHECK(HasDebugCode());
+ return shared()->code();
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_DEBUG_OBJECTS_INL_H_
diff --git a/deps/v8/src/objects/debug-objects.cc b/deps/v8/src/objects/debug-objects.cc
new file mode 100644
index 0000000000..f686c99639
--- /dev/null
+++ b/deps/v8/src/objects/debug-objects.cc
@@ -0,0 +1,337 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/debug-objects.h"
+#include "src/objects/debug-objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+bool DebugInfo::IsEmpty() const { return flags() == kNone; }
+
+bool DebugInfo::HasBreakInfo() const { return (flags() & kHasBreakInfo) != 0; }
+
+bool DebugInfo::ClearBreakInfo() {
+ Isolate* isolate = GetIsolate();
+
+ set_debug_bytecode_array(isolate->heap()->undefined_value());
+ set_break_points(isolate->heap()->empty_fixed_array());
+
+ int new_flags = flags() & ~kHasBreakInfo;
+ set_flags(new_flags);
+
+ return new_flags == kNone;
+}
+
+// Check if there is a break point at this source position.
+bool DebugInfo::HasBreakPoint(int source_position) {
+ DCHECK(HasBreakInfo());
+ // Get the break point info object for this code offset.
+ Object* break_point_info = GetBreakPointInfo(source_position);
+
+ // If there is no break point info object or no break points in the break
+ // point info object there is no break point at this code offset.
+ if (break_point_info->IsUndefined(GetIsolate())) return false;
+ return BreakPointInfo::cast(break_point_info)->GetBreakPointCount() > 0;
+}
+
+// Get the break point info object for this source position.
+Object* DebugInfo::GetBreakPointInfo(int source_position) {
+ DCHECK(HasBreakInfo());
+ Isolate* isolate = GetIsolate();
+ if (!break_points()->IsUndefined(isolate)) {
+ for (int i = 0; i < break_points()->length(); i++) {
+ if (!break_points()->get(i)->IsUndefined(isolate)) {
+ BreakPointInfo* break_point_info =
+ BreakPointInfo::cast(break_points()->get(i));
+ if (break_point_info->source_position() == source_position) {
+ return break_point_info;
+ }
+ }
+ }
+ }
+ return isolate->heap()->undefined_value();
+}
+
+bool DebugInfo::ClearBreakPoint(Handle<DebugInfo> debug_info,
+ Handle<Object> break_point_object) {
+ DCHECK(debug_info->HasBreakInfo());
+ Isolate* isolate = debug_info->GetIsolate();
+ if (debug_info->break_points()->IsUndefined(isolate)) return false;
+
+ for (int i = 0; i < debug_info->break_points()->length(); i++) {
+ if (debug_info->break_points()->get(i)->IsUndefined(isolate)) continue;
+ Handle<BreakPointInfo> break_point_info = Handle<BreakPointInfo>(
+ BreakPointInfo::cast(debug_info->break_points()->get(i)), isolate);
+ if (BreakPointInfo::HasBreakPointObject(break_point_info,
+ break_point_object)) {
+ BreakPointInfo::ClearBreakPoint(break_point_info, break_point_object);
+ return true;
+ }
+ }
+ return false;
+}
+
+void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info, int source_position,
+ Handle<Object> break_point_object) {
+ DCHECK(debug_info->HasBreakInfo());
+ Isolate* isolate = debug_info->GetIsolate();
+ Handle<Object> break_point_info(
+ debug_info->GetBreakPointInfo(source_position), isolate);
+ if (!break_point_info->IsUndefined(isolate)) {
+ BreakPointInfo::SetBreakPoint(
+ Handle<BreakPointInfo>::cast(break_point_info), break_point_object);
+ return;
+ }
+
+ // Adding a new break point for a code offset which did not have any
+ // break points before. Try to find a free slot.
+ static const int kNoBreakPointInfo = -1;
+ int index = kNoBreakPointInfo;
+ for (int i = 0; i < debug_info->break_points()->length(); i++) {
+ if (debug_info->break_points()->get(i)->IsUndefined(isolate)) {
+ index = i;
+ break;
+ }
+ }
+ if (index == kNoBreakPointInfo) {
+ // No free slot - extend break point info array.
+ Handle<FixedArray> old_break_points = Handle<FixedArray>(
+ FixedArray::cast(debug_info->break_points()), isolate);
+ Handle<FixedArray> new_break_points = isolate->factory()->NewFixedArray(
+ old_break_points->length() +
+ DebugInfo::kEstimatedNofBreakPointsInFunction);
+
+ debug_info->set_break_points(*new_break_points);
+ for (int i = 0; i < old_break_points->length(); i++) {
+ new_break_points->set(i, old_break_points->get(i));
+ }
+ index = old_break_points->length();
+ }
+ DCHECK(index != kNoBreakPointInfo);
+
+ // Allocate new BreakPointInfo object and set the break point.
+ Handle<BreakPointInfo> new_break_point_info =
+ isolate->factory()->NewBreakPointInfo(source_position);
+ BreakPointInfo::SetBreakPoint(new_break_point_info, break_point_object);
+ debug_info->break_points()->set(index, *new_break_point_info);
+}
+
+// Get the break point objects for a source position.
+Handle<Object> DebugInfo::GetBreakPointObjects(int source_position) {
+ DCHECK(HasBreakInfo());
+ Object* break_point_info = GetBreakPointInfo(source_position);
+ Isolate* isolate = GetIsolate();
+ if (break_point_info->IsUndefined(isolate)) {
+ return isolate->factory()->undefined_value();
+ }
+ return Handle<Object>(
+ BreakPointInfo::cast(break_point_info)->break_point_objects(), isolate);
+}
+
+// Get the total number of break points.
+int DebugInfo::GetBreakPointCount() {
+ DCHECK(HasBreakInfo());
+ Isolate* isolate = GetIsolate();
+ if (break_points()->IsUndefined(isolate)) return 0;
+ int count = 0;
+ for (int i = 0; i < break_points()->length(); i++) {
+ if (!break_points()->get(i)->IsUndefined(isolate)) {
+ BreakPointInfo* break_point_info =
+ BreakPointInfo::cast(break_points()->get(i));
+ count += break_point_info->GetBreakPointCount();
+ }
+ }
+ return count;
+}
+
+Handle<Object> DebugInfo::FindBreakPointInfo(
+ Handle<DebugInfo> debug_info, Handle<Object> break_point_object) {
+ DCHECK(debug_info->HasBreakInfo());
+ Isolate* isolate = debug_info->GetIsolate();
+ if (!debug_info->break_points()->IsUndefined(isolate)) {
+ for (int i = 0; i < debug_info->break_points()->length(); i++) {
+ if (!debug_info->break_points()->get(i)->IsUndefined(isolate)) {
+ Handle<BreakPointInfo> break_point_info = Handle<BreakPointInfo>(
+ BreakPointInfo::cast(debug_info->break_points()->get(i)), isolate);
+ if (BreakPointInfo::HasBreakPointObject(break_point_info,
+ break_point_object)) {
+ return break_point_info;
+ }
+ }
+ }
+ }
+ return isolate->factory()->undefined_value();
+}
+
+bool DebugInfo::HasCoverageInfo() const {
+ return (flags() & kHasCoverageInfo) != 0;
+}
+
+bool DebugInfo::ClearCoverageInfo() {
+ DCHECK(FLAG_block_coverage);
+ DCHECK(HasCoverageInfo());
+ Isolate* isolate = GetIsolate();
+
+ set_coverage_info(isolate->heap()->undefined_value());
+
+ int new_flags = flags() & ~kHasCoverageInfo;
+ set_flags(new_flags);
+
+ return new_flags == kNone;
+}
+
+// Remove the specified break point object.
+void BreakPointInfo::ClearBreakPoint(Handle<BreakPointInfo> break_point_info,
+ Handle<Object> break_point_object) {
+ Isolate* isolate = break_point_info->GetIsolate();
+ // If there are no break points just ignore.
+ if (break_point_info->break_point_objects()->IsUndefined(isolate)) return;
+ // If there is a single break point clear it if it is the same.
+ if (!break_point_info->break_point_objects()->IsFixedArray()) {
+ if (break_point_info->break_point_objects() == *break_point_object) {
+ break_point_info->set_break_point_objects(
+ isolate->heap()->undefined_value());
+ }
+ return;
+ }
+ // If there are multiple break points shrink the array
+ DCHECK(break_point_info->break_point_objects()->IsFixedArray());
+ Handle<FixedArray> old_array = Handle<FixedArray>(
+ FixedArray::cast(break_point_info->break_point_objects()));
+ Handle<FixedArray> new_array =
+ isolate->factory()->NewFixedArray(old_array->length() - 1);
+ int found_count = 0;
+ for (int i = 0; i < old_array->length(); i++) {
+ if (old_array->get(i) == *break_point_object) {
+ DCHECK(found_count == 0);
+ found_count++;
+ } else {
+ new_array->set(i - found_count, old_array->get(i));
+ }
+ }
+ // If the break point was found in the list change it.
+ if (found_count > 0) break_point_info->set_break_point_objects(*new_array);
+}
+
+// Add the specified break point object.
+void BreakPointInfo::SetBreakPoint(Handle<BreakPointInfo> break_point_info,
+ Handle<Object> break_point_object) {
+ Isolate* isolate = break_point_info->GetIsolate();
+
+ // If there was no break point objects before just set it.
+ if (break_point_info->break_point_objects()->IsUndefined(isolate)) {
+ break_point_info->set_break_point_objects(*break_point_object);
+ return;
+ }
+ // If the break point object is the same as before just ignore.
+ if (break_point_info->break_point_objects() == *break_point_object) return;
+ // If there was one break point object before replace with array.
+ if (!break_point_info->break_point_objects()->IsFixedArray()) {
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(2);
+ array->set(0, break_point_info->break_point_objects());
+ array->set(1, *break_point_object);
+ break_point_info->set_break_point_objects(*array);
+ return;
+ }
+ // If there was more than one break point before extend array.
+ Handle<FixedArray> old_array = Handle<FixedArray>(
+ FixedArray::cast(break_point_info->break_point_objects()));
+ Handle<FixedArray> new_array =
+ isolate->factory()->NewFixedArray(old_array->length() + 1);
+ for (int i = 0; i < old_array->length(); i++) {
+ // If the break point was there before just ignore.
+ if (old_array->get(i) == *break_point_object) return;
+ new_array->set(i, old_array->get(i));
+ }
+ // Add the new break point.
+ new_array->set(old_array->length(), *break_point_object);
+ break_point_info->set_break_point_objects(*new_array);
+}
+
+bool BreakPointInfo::HasBreakPointObject(
+ Handle<BreakPointInfo> break_point_info,
+ Handle<Object> break_point_object) {
+ // No break point.
+ Isolate* isolate = break_point_info->GetIsolate();
+ if (break_point_info->break_point_objects()->IsUndefined(isolate)) {
+ return false;
+ }
+ // Single break point.
+ if (!break_point_info->break_point_objects()->IsFixedArray()) {
+ return break_point_info->break_point_objects() == *break_point_object;
+ }
+ // Multiple break points.
+ FixedArray* array = FixedArray::cast(break_point_info->break_point_objects());
+ for (int i = 0; i < array->length(); i++) {
+ if (array->get(i) == *break_point_object) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// Get the number of break points.
+int BreakPointInfo::GetBreakPointCount() {
+ // No break point.
+ if (break_point_objects()->IsUndefined(GetIsolate())) return 0;
+ // Single break point.
+ if (!break_point_objects()->IsFixedArray()) return 1;
+ // Multiple break points.
+ return FixedArray::cast(break_point_objects())->length();
+}
+
+int CoverageInfo::SlotCount() const {
+ DCHECK(FLAG_block_coverage);
+ DCHECK_EQ(kFirstSlotIndex, length() % kSlotIndexCount);
+ return (length() - kFirstSlotIndex) / kSlotIndexCount;
+}
+
+int CoverageInfo::StartSourcePosition(int slot_index) const {
+ DCHECK(FLAG_block_coverage);
+ DCHECK_LT(slot_index, SlotCount());
+ const int slot_start = CoverageInfo::FirstIndexForSlot(slot_index);
+ return Smi::ToInt(get(slot_start + kSlotStartSourcePositionIndex));
+}
+
+int CoverageInfo::EndSourcePosition(int slot_index) const {
+ DCHECK(FLAG_block_coverage);
+ DCHECK_LT(slot_index, SlotCount());
+ const int slot_start = CoverageInfo::FirstIndexForSlot(slot_index);
+ return Smi::ToInt(get(slot_start + kSlotEndSourcePositionIndex));
+}
+
+int CoverageInfo::BlockCount(int slot_index) const {
+ DCHECK(FLAG_block_coverage);
+ DCHECK_LT(slot_index, SlotCount());
+ const int slot_start = CoverageInfo::FirstIndexForSlot(slot_index);
+ return Smi::ToInt(get(slot_start + kSlotBlockCountIndex));
+}
+
+void CoverageInfo::InitializeSlot(int slot_index, int from_pos, int to_pos) {
+ DCHECK(FLAG_block_coverage);
+ DCHECK_LT(slot_index, SlotCount());
+ const int slot_start = CoverageInfo::FirstIndexForSlot(slot_index);
+ set(slot_start + kSlotStartSourcePositionIndex, Smi::FromInt(from_pos));
+ set(slot_start + kSlotEndSourcePositionIndex, Smi::FromInt(to_pos));
+ set(slot_start + kSlotBlockCountIndex, Smi::kZero);
+}
+
+void CoverageInfo::IncrementBlockCount(int slot_index) {
+ DCHECK(FLAG_block_coverage);
+ DCHECK_LT(slot_index, SlotCount());
+ const int slot_start = CoverageInfo::FirstIndexForSlot(slot_index);
+ const int old_count = BlockCount(slot_index);
+ set(slot_start + kSlotBlockCountIndex, Smi::FromInt(old_count + 1));
+}
+
+void CoverageInfo::ResetBlockCount(int slot_index) {
+ DCHECK(FLAG_block_coverage);
+ DCHECK_LT(slot_index, SlotCount());
+ const int slot_start = CoverageInfo::FirstIndexForSlot(slot_index);
+ set(slot_start + kSlotBlockCountIndex, Smi::kZero);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/debug-objects.h b/deps/v8/src/objects/debug-objects.h
new file mode 100644
index 0000000000..1c874fc77d
--- /dev/null
+++ b/deps/v8/src/objects/debug-objects.h
@@ -0,0 +1,187 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_DEBUG_OBJECTS_H_
+#define V8_OBJECTS_DEBUG_OBJECTS_H_
+
+#include "src/objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// The DebugInfo class holds additional information for a function being
+// debugged.
+class DebugInfo : public Struct {
+ public:
+ enum Flag {
+ kNone = 0,
+ kHasBreakInfo = 1 << 0,
+ kHasCoverageInfo = 1 << 1,
+ };
+ typedef base::Flags<Flag> Flags;
+
+ // A bitfield that lists uses of the current instance.
+ DECL_INT_ACCESSORS(flags)
+
+ // The shared function info for the source being debugged.
+ DECL_ACCESSORS(shared, SharedFunctionInfo)
+
+ // Bit field containing various information collected for debugging.
+ DECL_INT_ACCESSORS(debugger_hints)
+
+ // DebugInfo can be detached from the SharedFunctionInfo iff it is empty.
+ bool IsEmpty() const;
+
+ // --- Break points ---
+ // --------------------
+
+ bool HasBreakInfo() const;
+
+ // Clears all fields related to break points. Returns true iff the
+ // DebugInfo is now empty.
+ bool ClearBreakInfo();
+
+ // The instrumented bytecode array for functions with break points.
+ DECL_ACCESSORS(debug_bytecode_array, Object)
+
+ // Fixed array holding status information for each active break point.
+ DECL_ACCESSORS(break_points, FixedArray)
+
+ // Check if there is a break point at a source position.
+ bool HasBreakPoint(int source_position);
+ // Attempt to clear a break point. Return true if successful.
+ static bool ClearBreakPoint(Handle<DebugInfo> debug_info,
+ Handle<Object> break_point_object);
+ // Set a break point.
+ static void SetBreakPoint(Handle<DebugInfo> debug_info, int source_position,
+ Handle<Object> break_point_object);
+ // Get the break point objects for a source position.
+ Handle<Object> GetBreakPointObjects(int source_position);
+ // Find the break point info holding this break point object.
+ static Handle<Object> FindBreakPointInfo(Handle<DebugInfo> debug_info,
+ Handle<Object> break_point_object);
+ // Get the number of break points for this function.
+ int GetBreakPointCount();
+
+ inline bool HasDebugBytecodeArray();
+ inline bool HasDebugCode();
+
+ inline BytecodeArray* OriginalBytecodeArray();
+ inline BytecodeArray* DebugBytecodeArray();
+ inline Code* DebugCode();
+
+ // --- Block Coverage ---
+ // ----------------------
+
+ bool HasCoverageInfo() const;
+
+ // Clears all fields related to block coverage. Returns true iff the
+ // DebugInfo is now empty.
+ bool ClearCoverageInfo();
+ DECL_ACCESSORS(coverage_info, Object)
+
+ DECL_CAST(DebugInfo)
+
+ // Dispatched behavior.
+ DECL_PRINTER(DebugInfo)
+ DECL_VERIFIER(DebugInfo)
+
+ static const int kSharedFunctionInfoOffset = Struct::kHeaderSize;
+ static const int kDebuggerHintsOffset =
+ kSharedFunctionInfoOffset + kPointerSize;
+ static const int kDebugBytecodeArrayOffset =
+ kDebuggerHintsOffset + kPointerSize;
+ static const int kBreakPointsStateOffset =
+ kDebugBytecodeArrayOffset + kPointerSize;
+ static const int kFlagsOffset = kBreakPointsStateOffset + kPointerSize;
+ static const int kCoverageInfoOffset = kFlagsOffset + kPointerSize;
+ static const int kSize = kCoverageInfoOffset + kPointerSize;
+
+ static const int kEstimatedNofBreakPointsInFunction = 4;
+
+ private:
+ // Get the break point info object for a source position.
+ Object* GetBreakPointInfo(int source_position);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(DebugInfo);
+};
+
+// The BreakPointInfo class holds information for break points set in a
+// function. The DebugInfo object holds a BreakPointInfo object for each code
+// position with one or more break points.
+class BreakPointInfo : public Tuple2 {
+ public:
+ // The position in the source for the break position.
+ DECL_INT_ACCESSORS(source_position)
+ // List of related JavaScript break points.
+ DECL_ACCESSORS(break_point_objects, Object)
+
+ // Removes a break point.
+ static void ClearBreakPoint(Handle<BreakPointInfo> info,
+ Handle<Object> break_point_object);
+ // Set a break point.
+ static void SetBreakPoint(Handle<BreakPointInfo> info,
+ Handle<Object> break_point_object);
+ // Check if break point info has this break point object.
+ static bool HasBreakPointObject(Handle<BreakPointInfo> info,
+ Handle<Object> break_point_object);
+ // Get the number of break points for this code offset.
+ int GetBreakPointCount();
+
+ int GetStatementPosition(Handle<DebugInfo> debug_info);
+
+ DECL_CAST(BreakPointInfo)
+
+ static const int kSourcePositionOffset = kValue1Offset;
+ static const int kBreakPointObjectsOffset = kValue2Offset;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BreakPointInfo);
+};
+
+// Holds information related to block code coverage.
+class CoverageInfo : public FixedArray {
+ public:
+ int SlotCount() const;
+
+ int StartSourcePosition(int slot_index) const;
+ int EndSourcePosition(int slot_index) const;
+ int BlockCount(int slot_index) const;
+
+ void InitializeSlot(int slot_index, int start_pos, int end_pos);
+ void IncrementBlockCount(int slot_index);
+ void ResetBlockCount(int slot_index);
+
+ static int FixedArrayLengthForSlotCount(int slot_count) {
+ return slot_count * kSlotIndexCount + kFirstSlotIndex;
+ }
+
+ DECL_CAST(CoverageInfo)
+
+ private:
+ static int FirstIndexForSlot(int slot_index) {
+ return kFirstSlotIndex + slot_index * kSlotIndexCount;
+ }
+
+ static const int kFirstSlotIndex = 0;
+
+ // Each slot is assigned a group of indices starting at kFirstSlotIndex.
+ // Within this group, semantics are as follows:
+ static const int kSlotStartSourcePositionIndex = 0;
+ static const int kSlotEndSourcePositionIndex = 1;
+ static const int kSlotBlockCountIndex = 2;
+ static const int kSlotIndexCount = 3;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CoverageInfo);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_DEBUG_OBJECTS_H_
diff --git a/deps/v8/src/objects/descriptor-array.h b/deps/v8/src/objects/descriptor-array.h
index 32c376ee78..0dd5742e93 100644
--- a/deps/v8/src/objects/descriptor-array.h
+++ b/deps/v8/src/objects/descriptor-array.h
@@ -116,7 +116,7 @@ class DescriptorArray : public FixedArray {
Isolate* isolate, int number_of_descriptors, int slack,
PretenureFlag pretenure = NOT_TENURED);
- DECLARE_CAST(DescriptorArray)
+ DECL_CAST(DescriptorArray)
// Constant for denoting key was not found.
static const int kNotFound = -1;
diff --git a/deps/v8/src/objects/dictionary.h b/deps/v8/src/objects/dictionary.h
index ca709679b1..bb8e63b267 100644
--- a/deps/v8/src/objects/dictionary.h
+++ b/deps/v8/src/objects/dictionary.h
@@ -21,19 +21,20 @@ class Handle;
class Isolate;
-template <typename Derived, typename Shape, typename Key>
-class Dictionary : public HashTable<Derived, Shape, Key> {
- typedef HashTable<Derived, Shape, Key> DerivedHashTable;
+template <typename Derived, typename Shape>
+class Dictionary : public HashTable<Derived, Shape> {
+ typedef HashTable<Derived, Shape> DerivedHashTable;
public:
+ typedef typename Shape::Key Key;
// Returns the value at entry.
Object* ValueAt(int entry) {
- return this->get(Derived::EntryToIndex(entry) + 1);
+ return this->get(DerivedHashTable::EntryToIndex(entry) + 1);
}
// Set the value for entry.
void ValueAtPut(int entry, Object* value) {
- this->set(Derived::EntryToIndex(entry) + 1, value);
+ this->set(DerivedHashTable::EntryToIndex(entry) + 1, value);
}
// Returns the property details for the property at entry.
@@ -46,70 +47,17 @@ class Dictionary : public HashTable<Derived, Shape, Key> {
Shape::DetailsAtPut(static_cast<Derived*>(this), entry, value);
}
- // Returns true if property at given entry is deleted.
- bool IsDeleted(int entry) {
- return Shape::IsDeleted(static_cast<Derived*>(this), entry);
- }
-
// Delete a property from the dictionary.
- static Handle<Object> DeleteProperty(Handle<Derived> dictionary, int entry);
+ MUST_USE_RESULT static Handle<Derived> DeleteEntry(Handle<Derived> dictionary,
+ int entry);
// Attempt to shrink the dictionary after deletion of key.
MUST_USE_RESULT static inline Handle<Derived> Shrink(
- Handle<Derived> dictionary, Key key) {
- return DerivedHashTable::Shrink(dictionary, key);
+ Handle<Derived> dictionary) {
+ return DerivedHashTable::Shrink(dictionary);
}
- // Returns the number of elements in the dictionary filtering out properties
- // with the specified attributes.
- int NumberOfElementsFilterAttributes(PropertyFilter filter);
-
- // Returns the number of enumerable elements in the dictionary.
- int NumberOfEnumElements() {
- return NumberOfElementsFilterAttributes(ENUMERABLE_STRINGS);
- }
-
- enum SortMode { UNSORTED, SORTED };
-
- // Return the key indices sorted by its enumeration index.
- static Handle<FixedArray> IterationIndices(
- Handle<Dictionary<Derived, Shape, Key>> dictionary);
-
- // Collect the keys into the given KeyAccumulator, in ascending chronological
- // order of property creation.
- static void CollectKeysTo(Handle<Dictionary<Derived, Shape, Key>> dictionary,
- KeyAccumulator* keys);
-
- // Copies enumerable keys to preallocated fixed array.
- static void CopyEnumKeysTo(Handle<Dictionary<Derived, Shape, Key>> dictionary,
- Handle<FixedArray> storage, KeyCollectionMode mode,
- KeyAccumulator* accumulator);
-
- // Accessors for next enumeration index.
- void SetNextEnumerationIndex(int index) {
- DCHECK(index != 0);
- this->set(kNextEnumerationIndexIndex, Smi::FromInt(index));
- }
-
- int NextEnumerationIndex() {
- return Smi::cast(this->get(kNextEnumerationIndexIndex))->value();
- }
-
- // Creates a new dictionary.
- MUST_USE_RESULT static Handle<Derived> New(
- Isolate* isolate, int at_least_space_for,
- PretenureFlag pretenure = NOT_TENURED,
- MinimumCapacity capacity_option = USE_DEFAULT_MINIMUM_CAPACITY);
-
- // Creates an dictionary with minimal possible capacity.
- MUST_USE_RESULT static Handle<Derived> NewEmpty(
- Isolate* isolate, PretenureFlag pretenure = NOT_TENURED);
-
- // Ensures that a new dictionary is created when the capacity is checked.
- void SetRequiresCopyOnCapacityChange();
-
- // Ensure enough space for n additional elements.
- static Handle<Derived> EnsureCapacity(Handle<Derived> obj, int n, Key key);
+ int NumberOfEnumerableProperties();
#ifdef OBJECT_PRINT
// For our gdb macros, we should perhaps change these in the future.
@@ -121,8 +69,8 @@ class Dictionary : public HashTable<Derived, Shape, Key> {
Object* SlowReverseLookup(Object* value);
// Sets the entry to (key, value) pair.
- inline void SetEntry(int entry, Handle<Object> key, Handle<Object> value);
- inline void SetEntry(int entry, Handle<Object> key, Handle<Object> value,
+ inline void ClearEntry(int entry);
+ inline void SetEntry(int entry, Object* key, Object* value,
PropertyDetails details);
MUST_USE_RESULT static Handle<Derived> Add(Handle<Derived> dictionary,
@@ -130,33 +78,17 @@ class Dictionary : public HashTable<Derived, Shape, Key> {
PropertyDetails details,
int* entry_out = nullptr);
- static const int kMaxNumberKeyIndex = DerivedHashTable::kPrefixStartIndex;
- static const int kNextEnumerationIndexIndex = kMaxNumberKeyIndex + 1;
-
- static const bool kIsEnumerable = Shape::kIsEnumerable;
-
protected:
// Generic at put operation.
MUST_USE_RESULT static Handle<Derived> AtPut(Handle<Derived> dictionary,
- Key key, Handle<Object> value);
- // Add entry to dictionary. Returns entry value.
- static int AddEntry(Handle<Derived> dictionary, Key key, Handle<Object> value,
- PropertyDetails details, uint32_t hash);
-};
-
-template <typename Derived, typename Shape>
-class NameDictionaryBase : public Dictionary<Derived, Shape, Handle<Name>> {
- typedef Dictionary<Derived, Shape, Handle<Name>> DerivedDictionary;
-
- public:
- // Find entry for key, otherwise return kNotFound. Optimized version of
- // HashTable::FindEntry.
- int FindEntry(Handle<Name> key);
+ Key key, Handle<Object> value,
+ PropertyDetails details);
};
template <typename Key>
class BaseDictionaryShape : public BaseShape<Key> {
public:
+ static const bool kHasDetails = true;
template <typename Dictionary>
static inline PropertyDetails DetailsAt(Dictionary* dict, int entry) {
STATIC_ASSERT(Dictionary::kEntrySize == 3);
@@ -172,46 +104,82 @@ class BaseDictionaryShape : public BaseShape<Key> {
dict->set(Dictionary::EntryToIndex(entry) + Dictionary::kEntryDetailsIndex,
value.AsSmi());
}
-
- template <typename Dictionary>
- static bool IsDeleted(Dictionary* dict, int entry) {
- return false;
- }
-
- template <typename Dictionary>
- static inline void SetEntry(Dictionary* dict, int entry, Handle<Object> key,
- Handle<Object> value, PropertyDetails details);
};
class NameDictionaryShape : public BaseDictionaryShape<Handle<Name>> {
public:
static inline bool IsMatch(Handle<Name> key, Object* other);
- static inline uint32_t Hash(Handle<Name> key);
- static inline uint32_t HashForObject(Handle<Name> key, Object* object);
+ static inline uint32_t Hash(Isolate* isolate, Handle<Name> key);
+ static inline uint32_t HashForObject(Isolate* isolate, Object* object);
static inline Handle<Object> AsHandle(Isolate* isolate, Handle<Name> key);
- static const int kPrefixSize = 2;
+ static const int kPrefixSize = 1;
static const int kEntrySize = 3;
static const int kEntryValueIndex = 1;
- static const int kEntryDetailsIndex = 2;
- static const bool kIsEnumerable = true;
+ static const bool kNeedsHoleCheck = false;
};
-class NameDictionary
- : public NameDictionaryBase<NameDictionary, NameDictionaryShape> {
- typedef NameDictionaryBase<NameDictionary, NameDictionaryShape>
- DerivedDictionary;
+template <typename Derived, typename Shape>
+class BaseNameDictionary : public Dictionary<Derived, Shape> {
+ typedef typename Shape::Key Key;
public:
- DECLARE_CAST(NameDictionary)
-
+ static const int kNextEnumerationIndexIndex =
+ HashTableBase::kPrefixStartIndex;
static const int kEntryValueIndex = 1;
+
+ // Accessors for next enumeration index.
+ void SetNextEnumerationIndex(int index) {
+ DCHECK_NE(0, index);
+ this->set(kNextEnumerationIndexIndex, Smi::FromInt(index));
+ }
+
+ int NextEnumerationIndex() {
+ return Smi::ToInt(this->get(kNextEnumerationIndexIndex));
+ }
+
+ // Creates a new dictionary.
+ MUST_USE_RESULT static Handle<Derived> New(
+ Isolate* isolate, int at_least_space_for,
+ PretenureFlag pretenure = NOT_TENURED,
+ MinimumCapacity capacity_option = USE_DEFAULT_MINIMUM_CAPACITY);
+
+ // Collect the keys into the given KeyAccumulator, in ascending chronological
+ // order of property creation.
+ static void CollectKeysTo(Handle<Derived> dictionary, KeyAccumulator* keys);
+
+ // Return the key indices sorted by its enumeration index.
+ static Handle<FixedArray> IterationIndices(Handle<Derived> dictionary);
+
+ // Copies enumerable keys to preallocated fixed array.
+ static void CopyEnumKeysTo(Handle<Derived> dictionary,
+ Handle<FixedArray> storage, KeyCollectionMode mode,
+ KeyAccumulator* accumulator);
+
+ // Ensure enough space for n additional elements.
+ static Handle<Derived> EnsureCapacity(Handle<Derived> dictionary, int n);
+
+ MUST_USE_RESULT static Handle<Derived> Add(Handle<Derived> dictionary,
+ Key key, Handle<Object> value,
+ PropertyDetails details,
+ int* entry_out = nullptr);
+};
+
+class NameDictionary
+ : public BaseNameDictionary<NameDictionary, NameDictionaryShape> {
+ public:
+ DECL_CAST(NameDictionary)
+
static const int kEntryDetailsIndex = 2;
static const int kInitialCapacity = 2;
+ inline Name* NameAt(int entry);
};
class GlobalDictionaryShape : public NameDictionaryShape {
public:
- static const int kEntrySize = 2; // Overrides NameDictionaryShape::kEntrySize
+ static inline bool IsMatch(Handle<Name> key, Object* other);
+ static inline uint32_t HashForObject(Isolate* isolate, Object* object);
+
+ static const int kEntrySize = 1; // Overrides NameDictionaryShape::kEntrySize
template <typename Dictionary>
static inline PropertyDetails DetailsAt(Dictionary* dict, int entry);
@@ -220,52 +188,51 @@ class GlobalDictionaryShape : public NameDictionaryShape {
static inline void DetailsAtPut(Dictionary* dict, int entry,
PropertyDetails value);
- template <typename Dictionary>
- static bool IsDeleted(Dictionary* dict, int entry);
-
- template <typename Dictionary>
- static inline void SetEntry(Dictionary* dict, int entry, Handle<Object> key,
- Handle<Object> value, PropertyDetails details);
+ static inline Object* Unwrap(Object* key);
+ static inline bool IsKey(Isolate* isolate, Object* k);
+ static inline bool IsLive(Isolate* isolate, Object* key);
};
class GlobalDictionary
- : public NameDictionaryBase<GlobalDictionary, GlobalDictionaryShape> {
+ : public BaseNameDictionary<GlobalDictionary, GlobalDictionaryShape> {
public:
- DECLARE_CAST(GlobalDictionary)
+ DECL_CAST(GlobalDictionary)
- static const int kEntryValueIndex = 1;
+ inline Object* ValueAt(int entry);
+ inline PropertyCell* CellAt(int entry);
+ inline void SetEntry(int entry, Object* key, Object* value,
+ PropertyDetails details);
+ inline Name* NameAt(int entry);
+ void ValueAtPut(int entry, Object* value) { set(EntryToIndex(entry), value); }
};
class NumberDictionaryShape : public BaseDictionaryShape<uint32_t> {
public:
static inline bool IsMatch(uint32_t key, Object* other);
static inline Handle<Object> AsHandle(Isolate* isolate, uint32_t key);
- static const bool kIsEnumerable = false;
};
class SeededNumberDictionaryShape : public NumberDictionaryShape {
public:
- static const bool UsesSeed = true;
- static const int kPrefixSize = 2;
+ static const int kPrefixSize = 1;
static const int kEntrySize = 3;
- static inline uint32_t SeededHash(uint32_t key, uint32_t seed);
- static inline uint32_t SeededHashForObject(uint32_t key, uint32_t seed,
- Object* object);
+ static inline uint32_t Hash(Isolate* isolate, uint32_t key);
+ static inline uint32_t HashForObject(Isolate* isolate, Object* object);
};
class UnseededNumberDictionaryShape : public NumberDictionaryShape {
public:
+ static const bool kHasDetails = false;
static const int kPrefixSize = 0;
static const int kEntrySize = 2;
- static inline uint32_t Hash(uint32_t key);
- static inline uint32_t HashForObject(uint32_t key, Object* object);
+ static inline uint32_t Hash(Isolate* isolate, uint32_t key);
+ static inline uint32_t HashForObject(Isolate* isolate, Object* object);
template <typename Dictionary>
static inline PropertyDetails DetailsAt(Dictionary* dict, int entry) {
UNREACHABLE();
- return PropertyDetails::Empty();
}
template <typename Dictionary>
@@ -278,33 +245,23 @@ class UnseededNumberDictionaryShape : public NumberDictionaryShape {
};
extern template class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- HashTable<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>;
+ HashTable<SeededNumberDictionary, SeededNumberDictionaryShape>;
extern template class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>;
+ Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape>;
class SeededNumberDictionary
- : public Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape,
- uint32_t> {
+ : public Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape> {
public:
- DECLARE_CAST(SeededNumberDictionary)
+ DECL_CAST(SeededNumberDictionary)
// Type specific at put (default NONE attributes is used when adding).
- MUST_USE_RESULT static Handle<SeededNumberDictionary> AtNumberPut(
- Handle<SeededNumberDictionary> dictionary, uint32_t key,
- Handle<Object> value, Handle<JSObject> dictionary_holder);
- MUST_USE_RESULT static Handle<SeededNumberDictionary> AddNumberEntry(
- Handle<SeededNumberDictionary> dictionary, uint32_t key,
- Handle<Object> value, PropertyDetails details,
- Handle<JSObject> dictionary_holder);
-
- // Set an existing entry or add a new one if needed.
- // Return the updated dictionary.
MUST_USE_RESULT static Handle<SeededNumberDictionary> Set(
Handle<SeededNumberDictionary> dictionary, uint32_t key,
- Handle<Object> value, PropertyDetails details,
- Handle<JSObject> dictionary_holder);
+ Handle<Object> value, Handle<JSObject> dictionary_holder,
+ PropertyDetails details = PropertyDetails::Empty());
+ static const int kMaxNumberKeyIndex = kPrefixStartIndex;
void UpdateMaxNumberKey(uint32_t key, Handle<JSObject> dictionary_holder);
// Returns true if the dictionary contains any elements that are non-writable,
@@ -341,29 +298,17 @@ class SeededNumberDictionary
};
class UnseededNumberDictionary
- : public Dictionary<UnseededNumberDictionary, UnseededNumberDictionaryShape,
- uint32_t> {
+ : public Dictionary<UnseededNumberDictionary,
+ UnseededNumberDictionaryShape> {
public:
- DECLARE_CAST(UnseededNumberDictionary)
+ DECL_CAST(UnseededNumberDictionary)
// Type specific at put (default NONE attributes is used when adding).
- MUST_USE_RESULT static Handle<UnseededNumberDictionary> AtNumberPut(
- Handle<UnseededNumberDictionary> dictionary, uint32_t key,
- Handle<Object> value);
- MUST_USE_RESULT static Handle<UnseededNumberDictionary> AddNumberEntry(
- Handle<UnseededNumberDictionary> dictionary, uint32_t key,
- Handle<Object> value);
- static Handle<UnseededNumberDictionary> DeleteKey(
- Handle<UnseededNumberDictionary> dictionary, uint32_t key);
-
- // Set an existing entry or add a new one if needed.
- // Return the updated dictionary.
MUST_USE_RESULT static Handle<UnseededNumberDictionary> Set(
Handle<UnseededNumberDictionary> dictionary, uint32_t key,
Handle<Object> value);
static const int kEntryValueIndex = 1;
- static const int kEntryDetailsIndex = 2;
};
} // namespace internal
diff --git a/deps/v8/src/objects/frame-array-inl.h b/deps/v8/src/objects/frame-array-inl.h
index a0c82b8bab..5ada507b9f 100644
--- a/deps/v8/src/objects/frame-array-inl.h
+++ b/deps/v8/src/objects/frame-array-inl.h
@@ -44,7 +44,7 @@ bool FrameArray::IsAsmJsWasmFrame(int frame_ix) const {
}
int FrameArray::FrameCount() const {
- const int frame_count = Smi::cast(get(kFrameCountIndex))->value();
+ const int frame_count = Smi::ToInt(get(kFrameCountIndex));
DCHECK_LE(0, frame_count);
return frame_count;
}
diff --git a/deps/v8/src/objects/frame-array.h b/deps/v8/src/objects/frame-array.h
index fc793c4098..8bc188cf6a 100644
--- a/deps/v8/src/objects/frame-array.h
+++ b/deps/v8/src/objects/frame-array.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_FRAME_ARRAY_H_
#include "src/objects.h"
+#include "src/wasm/wasm-objects.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -16,23 +17,23 @@ namespace internal {
template <typename T>
class Handle;
-#define FRAME_ARRAY_FIELD_LIST(V) \
- V(WasmInstance, Object) \
- V(WasmFunctionIndex, Smi) \
- V(Receiver, Object) \
- V(Function, JSFunction) \
- V(Code, AbstractCode) \
- V(Offset, Smi) \
+#define FRAME_ARRAY_FIELD_LIST(V) \
+ V(WasmInstance, WasmInstanceObject) \
+ V(WasmFunctionIndex, Smi) \
+ V(Receiver, Object) \
+ V(Function, JSFunction) \
+ V(Code, AbstractCode) \
+ V(Offset, Smi) \
V(Flags, Smi)
// Container object for data collected during simple stack trace captures.
class FrameArray : public FixedArray {
public:
-#define DECLARE_FRAME_ARRAY_ACCESSORS(name, type) \
- inline type* name(int frame_ix) const; \
+#define DECL_FRAME_ARRAY_ACCESSORS(name, type) \
+ inline type* name(int frame_ix) const; \
inline void Set##name(int frame_ix, type* value);
- FRAME_ARRAY_FIELD_LIST(DECLARE_FRAME_ARRAY_ACCESSORS)
-#undef DECLARE_FRAME_ARRAY_ACCESSORS
+ FRAME_ARRAY_FIELD_LIST(DECL_FRAME_ARRAY_ACCESSORS)
+#undef DECL_FRAME_ARRAY_ACCESSORS
inline bool IsWasmFrame(int frame_ix) const;
inline bool IsWasmInterpretedFrame(int frame_ix) const;
@@ -47,7 +48,7 @@ class FrameArray : public FixedArray {
kIsWasmInterpretedFrame = 1 << 1,
kIsAsmJsWasmFrame = 1 << 2,
kIsStrict = 1 << 3,
- kForceConstructor = 1 << 4,
+ kIsConstructor = 1 << 4,
kAsmJsAtNumberConversion = 1 << 5
};
@@ -56,13 +57,12 @@ class FrameArray : public FixedArray {
Handle<JSFunction> function,
Handle<AbstractCode> code, int offset,
int flags);
- static Handle<FrameArray> AppendWasmFrame(Handle<FrameArray> in,
- Handle<Object> wasm_instance,
- int wasm_function_index,
- Handle<AbstractCode> code,
- int offset, int flags);
+ static Handle<FrameArray> AppendWasmFrame(
+ Handle<FrameArray> in, Handle<WasmInstanceObject> wasm_instance,
+ int wasm_function_index, Handle<AbstractCode> code, int offset,
+ int flags);
- DECLARE_CAST(FrameArray)
+ DECL_CAST(FrameArray)
private:
// The underlying fixed array embodies a captured stack trace. Frame i
diff --git a/deps/v8/src/objects/hash-table-inl.h b/deps/v8/src/objects/hash-table-inl.h
index 7b2db38495..1f1014e230 100644
--- a/deps/v8/src/objects/hash-table-inl.h
+++ b/deps/v8/src/objects/hash-table-inl.h
@@ -10,22 +10,10 @@
namespace v8 {
namespace internal {
-template <typename Derived, typename Shape, typename Key>
-uint32_t HashTable<Derived, Shape, Key>::Hash(Key key) {
- if (Shape::UsesSeed) {
- return Shape::SeededHash(key, GetHeap()->HashSeed());
- } else {
- return Shape::Hash(key);
- }
-}
-
-template <typename Derived, typename Shape, typename Key>
-uint32_t HashTable<Derived, Shape, Key>::HashForObject(Key key, Object* object) {
- if (Shape::UsesSeed) {
- return Shape::SeededHashForObject(key, GetHeap()->HashSeed(), object);
- } else {
- return Shape::HashForObject(key, object);
- }
+template <typename KeyT>
+bool BaseShape<KeyT>::IsLive(Isolate* isolate, Object* k) {
+ Heap* heap = isolate->heap();
+ return k != heap->the_hole_value() && k != heap->undefined_value();
}
} // namespace internal
diff --git a/deps/v8/src/objects/hash-table.h b/deps/v8/src/objects/hash-table.h
index 0eac3a2342..90146c8f29 100644
--- a/deps/v8/src/objects/hash-table.h
+++ b/deps/v8/src/objects/hash-table.h
@@ -27,16 +27,16 @@ namespace internal {
// - Elements with key == undefined have not been used yet.
// - Elements with key == the_hole have been deleted.
//
-// The hash table class is parameterized with a Shape and a Key.
+// The hash table class is parameterized with a Shape.
// Shape must be a class with the following interface:
// class ExampleShape {
// public:
-// // Tells whether key matches other.
+// // Tells whether key matches other.
// static bool IsMatch(Key key, Object* other);
// // Returns the hash value for key.
-// static uint32_t Hash(Key key);
+// static uint32_t Hash(Isolate* isolate, Key key);
// // Returns the hash value for object.
-// static uint32_t HashForObject(Key key, Object* object);
+// static uint32_t HashForObject(Isolate* isolate, Object* object);
// // Convert key to an object.
// static inline Handle<Object> AsHandle(Isolate* isolate, Key key);
// // The prefix size indicates number of elements in the beginning
@@ -44,38 +44,37 @@ namespace internal {
// static const int kPrefixSize = ..;
// // The Element size indicates number of elements per entry.
// static const int kEntrySize = ..;
+// // Indicates whether IsMatch can deal with other being the_hole (a
+// // deleted entry).
+// static const bool kNeedsHoleCheck = ..;
// };
// The prefix size indicates an amount of memory in the
// beginning of the backing storage that can be used for non-element
// information by subclasses.
-template <typename Key>
+template <typename KeyT>
class BaseShape {
public:
- static const bool UsesSeed = false;
- static uint32_t Hash(Key key) { return 0; }
- static uint32_t SeededHash(Key key, uint32_t seed) {
- DCHECK(UsesSeed);
- return Hash(key);
- }
- static uint32_t HashForObject(Key key, Object* object) { return 0; }
- static uint32_t SeededHashForObject(Key key, uint32_t seed, Object* object) {
- DCHECK(UsesSeed);
- return HashForObject(key, object);
- }
+ typedef KeyT Key;
static inline Map* GetMap(Isolate* isolate);
+ static const bool kNeedsHoleCheck = true;
+ static Object* Unwrap(Object* key) { return key; }
+ static bool IsKey(Isolate* isolate, Object* key) {
+ return IsLive(isolate, key);
+ }
+ static inline bool IsLive(Isolate* isolate, Object* key);
};
class V8_EXPORT_PRIVATE HashTableBase : public NON_EXPORTED_BASE(FixedArray) {
public:
// Returns the number of elements in the hash table.
- inline int NumberOfElements();
+ inline int NumberOfElements() const;
// Returns the number of deleted elements in the hash table.
- inline int NumberOfDeletedElements();
+ inline int NumberOfDeletedElements() const;
// Returns the capacity of the hash table.
- inline int Capacity();
+ inline int Capacity() const;
// ElementAdded should be called whenever an element is added to a
// hash table.
@@ -90,10 +89,6 @@ class V8_EXPORT_PRIVATE HashTableBase : public NON_EXPORTED_BASE(FixedArray) {
// number of elements. May be more than HashTable::kMaxCapacity.
static inline int ComputeCapacity(int at_least_space_for);
- // Tells whether k is a real key. The hole and undefined are not allowed
- // as keys and can be used to indicate missing or deleted elements.
- inline bool IsKey(Isolate* isolate, Object* k);
-
// Compute the probe offset (quadratic probing).
INLINE(static uint32_t GetProbeOffset(uint32_t n)) {
return (n + n * n) >> 1;
@@ -119,7 +114,7 @@ class V8_EXPORT_PRIVATE HashTableBase : public NON_EXPORTED_BASE(FixedArray) {
// Returns probe entry.
static uint32_t GetProbe(uint32_t hash, uint32_t number, uint32_t size) {
- DCHECK(base::bits::IsPowerOfTwo32(size));
+ DCHECK(base::bits::IsPowerOfTwo(size));
return (hash + GetProbeOffset(number)) & (size - 1);
}
@@ -133,23 +128,19 @@ class V8_EXPORT_PRIVATE HashTableBase : public NON_EXPORTED_BASE(FixedArray) {
}
};
-template <typename Derived, typename Shape, typename Key>
+template <typename Derived, typename Shape>
class HashTable : public HashTableBase {
public:
typedef Shape ShapeT;
-
- // Wrapper methods. Defined in src/objects/hash-table-inl.h
- // to break a cycle with src/heap/heap.h
- inline uint32_t Hash(Key key);
- inline uint32_t HashForObject(Key key, Object* object);
+ typedef typename Shape::Key Key;
// Returns a new HashTable object.
MUST_USE_RESULT static Handle<Derived> New(
Isolate* isolate, int at_least_space_for,
- MinimumCapacity capacity_option = USE_DEFAULT_MINIMUM_CAPACITY,
- PretenureFlag pretenure = NOT_TENURED);
+ PretenureFlag pretenure = NOT_TENURED,
+ MinimumCapacity capacity_option = USE_DEFAULT_MINIMUM_CAPACITY);
- DECLARE_CAST(HashTable)
+ DECL_CAST(HashTable)
// Garbage collection support.
void IteratePrefix(ObjectVisitor* visitor);
@@ -159,11 +150,22 @@ class HashTable : public HashTableBase {
inline int FindEntry(Key key);
inline int FindEntry(Isolate* isolate, Key key, int32_t hash);
int FindEntry(Isolate* isolate, Key key);
- inline bool Has(Isolate* isolate, Key key);
- inline bool Has(Key key);
// Rehashes the table in-place.
- void Rehash(Key key);
+ void Rehash();
+
+ // Tells whether k is a real key. The hole and undefined are not allowed
+ // as keys and can be used to indicate missing or deleted elements.
+ static bool IsKey(Isolate* isolate, Object* k) {
+ return Shape::IsKey(isolate, k);
+ }
+
+ inline bool ToKey(Isolate* isolate, int entry, Object** out_k) {
+ Object* k = KeyAt(entry);
+ if (!IsKey(isolate, k)) return false;
+ *out_k = Shape::Unwrap(k);
+ return true;
+ }
// Returns the key at entry.
Object* KeyAt(int entry) { return get(EntryToIndex(entry) + kEntryKeyIndex); }
@@ -188,31 +190,31 @@ class HashTable : public HashTableBase {
return (entry * kEntrySize) + kElementsStartIndex;
}
+ // Ensure enough space for n additional elements.
+ MUST_USE_RESULT static Handle<Derived> EnsureCapacity(
+ Handle<Derived> table, int n, PretenureFlag pretenure = NOT_TENURED);
+
+ // Returns true if this table has sufficient capacity for adding n elements.
+ bool HasSufficientCapacityToAdd(int number_of_additional_elements);
+
protected:
friend class ObjectHashTable;
- MUST_USE_RESULT static Handle<Derived> New(Isolate* isolate, int capacity,
- PretenureFlag pretenure);
+ MUST_USE_RESULT static Handle<Derived> NewInternal(Isolate* isolate,
+ int capacity,
+ PretenureFlag pretenure);
// Find the entry at which to insert element with the given key that
// has the given hash value.
uint32_t FindInsertionEntry(uint32_t hash);
// Attempt to shrink hash table after removal of key.
- MUST_USE_RESULT static Handle<Derived> Shrink(Handle<Derived> table, Key key);
-
- // Ensure enough space for n additional elements.
- MUST_USE_RESULT static Handle<Derived> EnsureCapacity(
- Handle<Derived> table, int n, Key key,
- PretenureFlag pretenure = NOT_TENURED);
-
- // Returns true if this table has sufficient capacity for adding n elements.
- bool HasSufficientCapacityToAdd(int number_of_additional_elements);
+ MUST_USE_RESULT static Handle<Derived> Shrink(Handle<Derived> table);
private:
// Ensure that kMaxRegularCapacity yields a non-large object dictionary.
STATIC_ASSERT(EntryToIndex(kMaxRegularCapacity) < kMaxRegularLength);
- STATIC_ASSERT(v8::base::bits::IsPowerOfTwo32(kMaxRegularCapacity));
+ STATIC_ASSERT(v8::base::bits::IsPowerOfTwo(kMaxRegularCapacity));
static const int kMaxRegularEntry = kMaxRegularCapacity / kEntrySize;
static const int kMaxRegularIndex = EntryToIndex(kMaxRegularEntry);
STATIC_ASSERT(OffsetOfElementAt(kMaxRegularIndex) <
@@ -231,52 +233,60 @@ class HashTable : public HashTableBase {
// Returns _expected_ if one of entries given by the first _probe_ probes is
// equal to _expected_. Otherwise, returns the entry given by the probe
// number _probe_.
- uint32_t EntryForProbe(Key key, Object* k, int probe, uint32_t expected);
+ uint32_t EntryForProbe(Object* k, int probe, uint32_t expected);
void Swap(uint32_t entry1, uint32_t entry2, WriteBarrierMode mode);
// Rehashes this hash-table into the new table.
- void Rehash(Handle<Derived> new_table, Key key);
+ void Rehash(Derived* new_table);
};
// HashTableKey is an abstract superclass for virtual key behavior.
class HashTableKey {
public:
+ explicit HashTableKey(uint32_t hash) : hash_(hash) {}
+
// Returns whether the other object matches this key.
virtual bool IsMatch(Object* other) = 0;
// Returns the hash value for this key.
- virtual uint32_t Hash() = 0;
- // Returns the hash value for object.
- virtual uint32_t HashForObject(Object* key) = 0;
- // Returns the key object for storing into the hash table.
- MUST_USE_RESULT virtual Handle<Object> AsHandle(Isolate* isolate) = 0;
// Required.
virtual ~HashTableKey() {}
+
+ uint32_t Hash() const { return hash_; }
+
+ protected:
+ void set_hash(uint32_t hash) {
+ DCHECK_EQ(0, hash_);
+ hash_ = hash;
+ }
+
+ private:
+ uint32_t hash_ = 0;
};
class ObjectHashTableShape : public BaseShape<Handle<Object>> {
public:
static inline bool IsMatch(Handle<Object> key, Object* other);
- static inline uint32_t Hash(Handle<Object> key);
- static inline uint32_t HashForObject(Handle<Object> key, Object* object);
+ static inline uint32_t Hash(Isolate* isolate, Handle<Object> key);
+ static inline uint32_t HashForObject(Isolate* isolate, Object* object);
static inline Handle<Object> AsHandle(Isolate* isolate, Handle<Object> key);
static const int kPrefixSize = 0;
static const int kEntrySize = 2;
+ static const bool kNeedsHoleCheck = false;
};
// ObjectHashTable maps keys that are arbitrary objects to object values by
// using the identity hash of the key for hashing purposes.
class ObjectHashTable
- : public HashTable<ObjectHashTable, ObjectHashTableShape, Handle<Object>> {
- typedef HashTable<ObjectHashTable, ObjectHashTableShape, Handle<Object>>
- DerivedHashTable;
+ : public HashTable<ObjectHashTable, ObjectHashTableShape> {
+ typedef HashTable<ObjectHashTable, ObjectHashTableShape> DerivedHashTable;
public:
- DECLARE_CAST(ObjectHashTable)
+ DECL_CAST(ObjectHashTable)
// Attempt to shrink hash table after removal of key.
MUST_USE_RESULT static inline Handle<ObjectHashTable> Shrink(
- Handle<ObjectHashTable> table, Handle<Object> key);
+ Handle<ObjectHashTable> table);
// Looks up the value associated with the given key. The hole value is
// returned in case the key is not present.
@@ -319,8 +329,7 @@ class ObjectHashSetShape : public ObjectHashTableShape {
static const int kEntrySize = 1;
};
-class ObjectHashSet
- : public HashTable<ObjectHashSet, ObjectHashSetShape, Handle<Object>> {
+class ObjectHashSet : public HashTable<ObjectHashSet, ObjectHashSetShape> {
public:
static Handle<ObjectHashSet> Add(Handle<ObjectHashSet> set,
Handle<Object> key);
@@ -328,7 +337,41 @@ class ObjectHashSet
inline bool Has(Isolate* isolate, Handle<Object> key, int32_t hash);
inline bool Has(Isolate* isolate, Handle<Object> key);
- DECLARE_CAST(ObjectHashSet)
+ DECL_CAST(ObjectHashSet)
+};
+
+// Non-templatized base class for {OrderedHashTable}s.
+// TODO(hash): Unify this with the HashTableBase above.
+class OrderedHashTableBase : public FixedArray {
+ public:
+ static const int kNotFound = -1;
+ static const int kMinCapacity = 4;
+
+ static const int kNumberOfElementsIndex = 0;
+ // The next table is stored at the same index as the nof elements.
+ static const int kNextTableIndex = kNumberOfElementsIndex;
+ static const int kNumberOfDeletedElementsIndex = kNumberOfElementsIndex + 1;
+ static const int kNumberOfBucketsIndex = kNumberOfDeletedElementsIndex + 1;
+ static const int kHashTableStartIndex = kNumberOfBucketsIndex + 1;
+ static const int kRemovedHolesIndex = kHashTableStartIndex;
+
+ static constexpr const int kNumberOfElementsOffset =
+ FixedArray::OffsetOfElementAt(kNumberOfElementsIndex);
+ static constexpr const int kNextTableOffset =
+ FixedArray::OffsetOfElementAt(kNextTableIndex);
+ static constexpr const int kNumberOfDeletedElementsOffset =
+ FixedArray::OffsetOfElementAt(kNumberOfDeletedElementsIndex);
+ static constexpr const int kNumberOfBucketsOffset =
+ FixedArray::OffsetOfElementAt(kNumberOfBucketsIndex);
+ static constexpr const int kHashTableStartOffset =
+ FixedArray::OffsetOfElementAt(kHashTableStartIndex);
+
+ static const int kLoadFactor = 2;
+
+ // NumberOfDeletedElements is set to kClearedTableSentinel when
+ // the table is cleared, which allows iterator transitions to
+ // optimize that case.
+ static const int kClearedTableSentinel = -1;
};
// OrderedHashTable is a HashTable with Object keys that preserves
@@ -368,7 +411,7 @@ class ObjectHashSet
// [3 + NumberOfRemovedHoles()..length]: Not used
//
template <class Derived, int entrysize>
-class OrderedHashTable : public FixedArray {
+class OrderedHashTable : public OrderedHashTableBase {
public:
// Returns an OrderedHashTable with a capacity of at least |capacity|.
static Handle<Derived> Allocate(Isolate* isolate, int capacity,
@@ -386,24 +429,24 @@ class OrderedHashTable : public FixedArray {
// existing iterators can be updated.
static Handle<Derived> Clear(Handle<Derived> table);
- // Returns a true if the OrderedHashTable contains the key
- static bool HasKey(Handle<Derived> table, Handle<Object> key);
+ // Returns true if the OrderedHashTable contains the key
+ static bool HasKey(Isolate* isolate, Derived* table, Object* key);
- int NumberOfElements() {
- return Smi::cast(get(kNumberOfElementsIndex))->value();
- }
+ // Returns a true value if the OrderedHashTable contains the key and
+ // the key has been deleted. This does not shrink the table.
+ static bool Delete(Isolate* isolate, Derived* table, Object* key);
+
+ int NumberOfElements() { return Smi::ToInt(get(kNumberOfElementsIndex)); }
int NumberOfDeletedElements() {
- return Smi::cast(get(kNumberOfDeletedElementsIndex))->value();
+ return Smi::ToInt(get(kNumberOfDeletedElementsIndex));
}
// Returns the number of contiguous entries in the data table, starting at 0,
// that either are real entries or have been deleted.
int UsedCapacity() { return NumberOfElements() + NumberOfDeletedElements(); }
- int NumberOfBuckets() {
- return Smi::cast(get(kNumberOfBucketsIndex))->value();
- }
+ int NumberOfBuckets() { return Smi::ToInt(get(kNumberOfBucketsIndex)); }
// Returns an index into |this| for the given entry.
int EntryToIndex(int entry) {
@@ -415,19 +458,38 @@ class OrderedHashTable : public FixedArray {
int HashToEntry(int hash) {
int bucket = HashToBucket(hash);
Object* entry = this->get(kHashTableStartIndex + bucket);
- return Smi::cast(entry)->value();
+ return Smi::ToInt(entry);
}
int KeyToFirstEntry(Isolate* isolate, Object* key) {
+ // This special cases for Smi, so that we avoid the HandleScope
+ // creation below.
+ if (key->IsSmi()) {
+ uint32_t hash = ComputeIntegerHash(Smi::ToInt(key));
+ return HashToEntry(hash & Smi::kMaxValue);
+ }
+ HandleScope scope(isolate);
Object* hash = key->GetHash();
// If the object does not have an identity hash, it was never used as a key
if (hash->IsUndefined(isolate)) return kNotFound;
- return HashToEntry(Smi::cast(hash)->value());
+ return HashToEntry(Smi::ToInt(hash));
+ }
+
+ int FindEntry(Isolate* isolate, Object* key) {
+ int entry = KeyToFirstEntry(isolate, key);
+ // Walk the chain in the bucket to find the key.
+ while (entry != kNotFound) {
+ Object* candidate_key = KeyAt(entry);
+ if (candidate_key->SameValueZero(key)) break;
+ entry = NextChainEntry(entry);
+ }
+
+ return entry;
}
int NextChainEntry(int entry) {
Object* next_entry = get(EntryToIndex(entry) + kChainOffset);
- return Smi::cast(next_entry)->value();
+ return Smi::ToInt(next_entry);
}
// use KeyAt(i)->IsTheHole(isolate) to determine if this is a deleted entry.
@@ -443,39 +505,15 @@ class OrderedHashTable : public FixedArray {
// When the table is obsolete we store the indexes of the removed holes.
int RemovedIndexAt(int index) {
- return Smi::cast(get(kRemovedHolesIndex + index))->value();
+ return Smi::ToInt(get(kRemovedHolesIndex + index));
}
- static const int kNotFound = -1;
- static const int kMinCapacity = 4;
-
- static const int kNumberOfElementsIndex = 0;
- // The next table is stored at the same index as the nof elements.
- static const int kNextTableIndex = kNumberOfElementsIndex;
- static const int kNumberOfDeletedElementsIndex = kNumberOfElementsIndex + 1;
- static const int kNumberOfBucketsIndex = kNumberOfDeletedElementsIndex + 1;
- static const int kHashTableStartIndex = kNumberOfBucketsIndex + 1;
-
- static constexpr const int kNumberOfElementsOffset =
- FixedArray::OffsetOfElementAt(kNumberOfElementsIndex);
- static constexpr const int kNextTableOffset =
- FixedArray::OffsetOfElementAt(kNextTableIndex);
- static constexpr const int kNumberOfDeletedElementsOffset =
- FixedArray::OffsetOfElementAt(kNumberOfDeletedElementsIndex);
- static constexpr const int kNumberOfBucketsOffset =
- FixedArray::OffsetOfElementAt(kNumberOfBucketsIndex);
- static constexpr const int kHashTableStartOffset =
- FixedArray::OffsetOfElementAt(kHashTableStartIndex);
-
static const int kEntrySize = entrysize + 1;
static const int kChainOffset = entrysize;
- static const int kLoadFactor = 2;
-
- // NumberOfDeletedElements is set to kClearedTableSentinel when
- // the table is cleared, which allows iterator transitions to
- // optimize that case.
- static const int kClearedTableSentinel = -1;
+ static const int kMaxCapacity =
+ (FixedArray::kMaxLength - kHashTableStartIndex) /
+ (1 + (kEntrySize * kLoadFactor));
protected:
static Handle<Derived> Rehash(Handle<Derived> table, int new_capacity);
@@ -500,17 +538,11 @@ class OrderedHashTable : public FixedArray {
void SetRemovedIndexAt(int index, int removed_index) {
return set(kRemovedHolesIndex + index, Smi::FromInt(removed_index));
}
-
- static const int kRemovedHolesIndex = kHashTableStartIndex;
-
- static const int kMaxCapacity =
- (FixedArray::kMaxLength - kHashTableStartIndex) /
- (1 + (kEntrySize * kLoadFactor));
};
class OrderedHashSet : public OrderedHashTable<OrderedHashSet, 1> {
public:
- DECLARE_CAST(OrderedHashSet)
+ DECL_CAST(OrderedHashSet)
static Handle<OrderedHashSet> Add(Handle<OrderedHashSet> table,
Handle<Object> value);
@@ -520,9 +552,15 @@ class OrderedHashSet : public OrderedHashTable<OrderedHashSet, 1> {
class OrderedHashMap : public OrderedHashTable<OrderedHashMap, 2> {
public:
- DECLARE_CAST(OrderedHashMap)
+ DECL_CAST(OrderedHashMap)
+
+ // Returns a value if the OrderedHashMap contains the key, otherwise
+ // returns undefined.
+ static Handle<OrderedHashMap> Add(Handle<OrderedHashMap> table,
+ Handle<Object> key, Handle<Object> value);
+ Object* ValueAt(int entry);
- inline Object* ValueAt(int entry);
+ static Object* GetHash(Isolate* isolate, Object* key);
static const int kValueOffset = 1;
};
@@ -531,23 +569,22 @@ template <int entrysize>
class WeakHashTableShape : public BaseShape<Handle<Object>> {
public:
static inline bool IsMatch(Handle<Object> key, Object* other);
- static inline uint32_t Hash(Handle<Object> key);
- static inline uint32_t HashForObject(Handle<Object> key, Object* object);
+ static inline uint32_t Hash(Isolate* isolate, Handle<Object> key);
+ static inline uint32_t HashForObject(Isolate* isolate, Object* object);
static inline Handle<Object> AsHandle(Isolate* isolate, Handle<Object> key);
static const int kPrefixSize = 0;
static const int kEntrySize = entrysize;
+ static const bool kNeedsHoleCheck = false;
};
// WeakHashTable maps keys that are arbitrary heap objects to heap object
// values. The table wraps the keys in weak cells and store values directly.
// Thus it references keys weakly and values strongly.
-class WeakHashTable
- : public HashTable<WeakHashTable, WeakHashTableShape<2>, Handle<Object>> {
- typedef HashTable<WeakHashTable, WeakHashTableShape<2>, Handle<Object>>
- DerivedHashTable;
+class WeakHashTable : public HashTable<WeakHashTable, WeakHashTableShape<2>> {
+ typedef HashTable<WeakHashTable, WeakHashTableShape<2>> DerivedHashTable;
public:
- DECLARE_CAST(WeakHashTable)
+ DECL_CAST(WeakHashTable)
// Looks up the value associated with the given key. The hole value is
// returned in case the key is not present.
@@ -572,20 +609,234 @@ class WeakHashTable
}
};
-// OrderedHashTableIterator is an iterator that iterates over the keys and
-// values of an OrderedHashTable.
+// This is similar to the OrderedHashTable, except for the memory
+// layout where we use byte instead of Smi. The max capacity of this
+// is only 254, we transition to an OrderedHashTable beyond that
+// limit.
//
-// The iterator has a reference to the underlying OrderedHashTable data,
-// [table], as well as the current [index] the iterator is at.
+// Each bucket and chain value is a byte long. The padding exists so
+// that the DataTable entries start aligned. A bucket or chain value
+// of 255 is used to denote an unknown entry.
//
-// When the OrderedHashTable is rehashed it adds a reference from the old table
-// to the new table as well as storing enough data about the changes so that the
-// iterator [index] can be adjusted accordingly.
+// Memory layout: [ Header ] [ HashTable ] [ Chains ] [ Padding ] [ DataTable ]
//
-// When the [Next] result from the iterator is requested, the iterator checks if
-// there is a newer table that it needs to transition to.
-template <class Derived, class TableType>
-class OrderedHashTableIterator : public JSObject {
+// On a 64 bit machine with capacity = 4 and 2 entries,
+//
+// [ Header ] :
+// [0 .. 7] : Number of elements
+// [8 .. 15] : Number of deleted elements
+// [16 .. 23] : Number of buckets
+//
+// [ HashTable ] :
+// [24 .. 31] : First chain-link for bucket 1
+// [32 .. 40] : First chain-link for bucket 2
+//
+// [ Chains ] :
+// [40 .. 47] : Next chain link for entry 1
+// [48 .. 55] : Next chain link for entry 2
+// [56 .. 63] : Next chain link for entry 3
+// [64 .. 71] : Next chain link for entry 4
+//
+// [ Padding ] :
+// [72 .. 127] : Padding
+//
+// [ DataTable ] :
+// [128 .. 128 + kEntrySize - 1] : Entry 1
+// [128 + kEntrySize .. 128 + kEntrySize + kEntrySize - 1] : Entry 2
+//
+template <class Derived>
+class SmallOrderedHashTable : public HeapObject {
+ public:
+ void Initialize(Isolate* isolate, int capacity);
+
+ static Handle<Derived> Allocate(Isolate* isolate, int capacity,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ // Returns a true if the OrderedHashTable contains the key
+ bool HasKey(Isolate* isolate, Handle<Object> key);
+
+ // Iterates only fields in the DataTable.
+ class BodyDescriptor;
+
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
+
+ // Returns an SmallOrderedHashTable (possibly |table|) with enough
+ // space to add at least one new element.
+ static Handle<Derived> Grow(Handle<Derived> table);
+
+ static Handle<Derived> Rehash(Handle<Derived> table, int new_capacity);
+
+ void SetDataEntry(int entry, int relative_index, Object* value);
+
+ static int GetDataTableStartOffset(int capacity) {
+ int nof_buckets = capacity / kLoadFactor;
+ int nof_chain_entries = capacity;
+
+ int padding_index = kBucketsStartOffset + nof_buckets + nof_chain_entries;
+ int padding_offset = padding_index * kBitsPerByte;
+
+ return ((padding_offset + kPointerSize - 1) / kPointerSize) * kPointerSize;
+ }
+
+ int GetDataTableStartOffset() const {
+ return GetDataTableStartOffset(Capacity());
+ }
+
+ static int Size(int capacity) {
+ int data_table_start = GetDataTableStartOffset(capacity);
+ int data_table_size = capacity * Derived::kEntrySize * kBitsPerPointer;
+ return data_table_start + data_table_size;
+ }
+
+ int Size() const { return Size(Capacity()); }
+
+ void SetFirstEntry(int bucket, byte value) {
+ set(kBucketsStartOffset + bucket, value);
+ }
+
+ int GetFirstEntry(int bucket) const {
+ return get(kBucketsStartOffset + bucket);
+ }
+
+ void SetNextEntry(int entry, int next_entry) {
+ set(GetChainTableOffset() + entry, next_entry);
+ }
+
+ int GetNextEntry(int entry) const {
+ return get(GetChainTableOffset() + entry);
+ }
+
+ Object* GetDataEntry(int entry, int relative_index) {
+ int entry_offset = GetDataEntryOffset(entry, relative_index);
+ return READ_FIELD(this, entry_offset);
+ }
+
+ Object* KeyAt(int entry) const {
+ int entry_offset = GetDataEntryOffset(entry, Derived::kKeyIndex);
+ return READ_FIELD(this, entry_offset);
+ }
+
+ int HashToBucket(int hash) const { return hash & (NumberOfBuckets() - 1); }
+
+ int HashToFirstEntry(int hash) const {
+ int bucket = HashToBucket(hash);
+ int entry = GetFirstEntry(bucket);
+ return entry;
+ }
+
+ int GetChainTableOffset() const {
+ return kBucketsStartOffset + NumberOfBuckets();
+ }
+
+ void SetNumberOfBuckets(int num) { set(kNumberOfBucketsOffset, num); }
+
+ void SetNumberOfElements(int num) { set(kNumberOfElementsOffset, num); }
+
+ void SetNumberOfDeletedElements(int num) {
+ set(kNumberOfDeletedElementsOffset, num);
+ }
+
+ int NumberOfElements() const { return get(kNumberOfElementsOffset); }
+
+ int NumberOfDeletedElements() const {
+ return get(kNumberOfDeletedElementsOffset);
+ }
+
+ int NumberOfBuckets() const { return get(kNumberOfBucketsOffset); }
+
+ static const byte kNotFound = 0xFF;
+ static const int kMinCapacity = 4;
+
+ // We use the value 255 to indicate kNotFound for chain and bucket
+ // values, which means that this value can't be used a valid
+ // index.
+ static const int kMaxCapacity = 254;
+ STATIC_ASSERT(kMaxCapacity < kNotFound);
+
+ static const int kNumberOfElementsOffset = 0;
+ static const int kNumberOfDeletedElementsOffset = 1;
+ static const int kNumberOfBucketsOffset = 2;
+ static const int kBucketsStartOffset = 3;
+
+ // The load factor is used to derive the number of buckets from
+ // capacity during Allocation. We also depend on this to calaculate
+ // the capacity from number of buckets after allocation. If we
+ // decide to change kLoadFactor to something other than 2, capacity
+ // should be stored as another field of this object.
+ static const int kLoadFactor = 2;
+ static const int kBitsPerPointer = kPointerSize * kBitsPerByte;
+
+ // Our growth strategy involves doubling the capacity until we reach
+ // kMaxCapacity, but since the kMaxCapacity is always less than 256,
+ // we will never fully utilize this table. We special case for 256,
+ // by changing the new capacity to be kMaxCapacity in
+ // SmallOrderedHashTable::Grow.
+ static const int kGrowthHack = 256;
+
+ DECL_VERIFIER(SmallOrderedHashTable)
+
+ protected:
+ // This is used for accessing the non |DataTable| part of the
+ // structure.
+ byte get(int index) const {
+ return READ_BYTE_FIELD(this, kHeaderSize + (index * kOneByteSize));
+ }
+
+ void set(int index, byte value) {
+ WRITE_BYTE_FIELD(this, kHeaderSize + (index * kOneByteSize), value);
+ }
+
+ int GetDataEntryOffset(int entry, int relative_index) const {
+ int datatable_start = GetDataTableStartOffset();
+ int offset_in_datatable = entry * Derived::kEntrySize * kPointerSize;
+ int offset_in_entry = relative_index * kPointerSize;
+ return datatable_start + offset_in_datatable + offset_in_entry;
+ }
+
+ // Returns the number elements that can fit into the allocated buffer.
+ int Capacity() const { return NumberOfBuckets() * kLoadFactor; }
+
+ int UsedCapacity() const {
+ return NumberOfElements() + NumberOfDeletedElements();
+ }
+};
+
+class SmallOrderedHashSet : public SmallOrderedHashTable<SmallOrderedHashSet> {
+ public:
+ DECL_CAST(SmallOrderedHashSet)
+
+ DECL_PRINTER(SmallOrderedHashSet)
+
+ static const int kKeyIndex = 0;
+ static const int kEntrySize = 1;
+
+ // Adds |value| to |table|, if the capacity isn't enough, a new
+ // table is created. The original |table| is returned if there is
+ // capacity to store |value| otherwise the new table is returned.
+ static Handle<SmallOrderedHashSet> Add(Handle<SmallOrderedHashSet> table,
+ Handle<Object> key);
+};
+
+class SmallOrderedHashMap : public SmallOrderedHashTable<SmallOrderedHashMap> {
+ public:
+ DECL_CAST(SmallOrderedHashMap)
+
+ DECL_PRINTER(SmallOrderedHashMap)
+
+ static const int kKeyIndex = 0;
+ static const int kValueIndex = 1;
+ static const int kEntrySize = 2;
+
+ // Adds |value| to |table|, if the capacity isn't enough, a new
+ // table is created. The original |table| is returned if there is
+ // capacity to store |value| otherwise the new table is returned.
+ static Handle<SmallOrderedHashMap> Add(Handle<SmallOrderedHashMap> table,
+ Handle<Object> key,
+ Handle<Object> value);
+};
+
+class JSCollectionIterator : public JSObject {
public:
// [table]: the backing hash table mapping keys to values.
DECL_ACCESSORS(table, Object)
@@ -593,31 +844,38 @@ class OrderedHashTableIterator : public JSObject {
// [index]: The index into the data table.
DECL_ACCESSORS(index, Object)
- // [kind]: The kind of iteration this is. One of the [Kind] enum values.
- DECL_ACCESSORS(kind, Object)
-
-#ifdef OBJECT_PRINT
- void OrderedHashTableIteratorPrint(std::ostream& os); // NOLINT
-#endif
+ // Dispatched behavior.
+ DECL_PRINTER(JSCollectionIterator)
static const int kTableOffset = JSObject::kHeaderSize;
static const int kIndexOffset = kTableOffset + kPointerSize;
- static const int kKindOffset = kIndexOffset + kPointerSize;
- static const int kSize = kKindOffset + kPointerSize;
+ static const int kSize = kIndexOffset + kPointerSize;
- enum Kind { kKindKeys = 1, kKindValues = 2, kKindEntries = 3 };
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSCollectionIterator);
+};
+// OrderedHashTableIterator is an iterator that iterates over the keys and
+// values of an OrderedHashTable.
+//
+// The iterator has a reference to the underlying OrderedHashTable data,
+// [table], as well as the current [index] the iterator is at.
+//
+// When the OrderedHashTable is rehashed it adds a reference from the old table
+// to the new table as well as storing enough data about the changes so that the
+// iterator [index] can be adjusted accordingly.
+//
+// When the [Next] result from the iterator is requested, the iterator checks if
+// there is a newer table that it needs to transition to.
+template <class Derived, class TableType>
+class OrderedHashTableIterator : public JSCollectionIterator {
+ public:
// Whether the iterator has more elements. This needs to be called before
// calling |CurrentKey| and/or |CurrentValue|.
bool HasMore();
// Move the index forward one.
- void MoveNext() { set_index(Smi::FromInt(Smi::cast(index())->value() + 1)); }
-
- // Populates the array with the next key and value and then moves the iterator
- // forward.
- // This returns the |kind| or 0 if the iterator is already at the end.
- Smi* Next(JSArray* value_array);
+ void MoveNext() { set_index(Smi::FromInt(Smi::ToInt(index()) + 1)); }
// Returns the current key of the iterator. This should only be called when
// |HasMore| returns true.
@@ -635,14 +893,10 @@ class JSSetIterator
: public OrderedHashTableIterator<JSSetIterator, OrderedHashSet> {
public:
// Dispatched behavior.
- DECLARE_PRINTER(JSSetIterator)
- DECLARE_VERIFIER(JSSetIterator)
-
- DECLARE_CAST(JSSetIterator)
+ DECL_PRINTER(JSSetIterator)
+ DECL_VERIFIER(JSSetIterator)
- // Called by |Next| to populate the array. This allows the subclasses to
- // populate the array differently.
- inline void PopulateValueArray(FixedArray* array);
+ DECL_CAST(JSSetIterator)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSSetIterator);
@@ -652,20 +906,16 @@ class JSMapIterator
: public OrderedHashTableIterator<JSMapIterator, OrderedHashMap> {
public:
// Dispatched behavior.
- DECLARE_PRINTER(JSMapIterator)
- DECLARE_VERIFIER(JSMapIterator)
+ DECL_PRINTER(JSMapIterator)
+ DECL_VERIFIER(JSMapIterator)
- DECLARE_CAST(JSMapIterator)
+ DECL_CAST(JSMapIterator)
- // Called by |Next| to populate the array. This allows the subclasses to
- // populate the array differently.
- inline void PopulateValueArray(FixedArray* array);
-
- private:
// Returns the current value of the iterator. This should only be called when
// |HasMore| returns true.
inline Object* CurrentValue();
+ private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSMapIterator);
};
diff --git a/deps/v8/src/objects/literal-objects.cc b/deps/v8/src/objects/literal-objects.cc
index 551b03621e..22d2119645 100644
--- a/deps/v8/src/objects/literal-objects.cc
+++ b/deps/v8/src/objects/literal-objects.cc
@@ -31,7 +31,7 @@ int BoilerplateDescription::size() const {
int BoilerplateDescription::backing_store_size() const {
if (has_number_of_properties()) {
// If present, the last entry contains the number of properties.
- return Smi::cast(this->get(length() - 1))->value();
+ return Smi::ToInt(this->get(length() - 1));
}
// If the number is not given explicitly, we assume there are no
// properties with computed names.
diff --git a/deps/v8/src/objects/literal-objects.h b/deps/v8/src/objects/literal-objects.h
index 40bf70d602..f544ee37b8 100644
--- a/deps/v8/src/objects/literal-objects.h
+++ b/deps/v8/src/objects/literal-objects.h
@@ -31,7 +31,7 @@ class BoilerplateDescription : public FixedArray {
void set_backing_store_size(Isolate* isolate, int backing_store_size);
- DECLARE_CAST(BoilerplateDescription)
+ DECL_CAST(BoilerplateDescription)
private:
bool has_number_of_properties() const;
@@ -45,7 +45,7 @@ class ConstantElementsPair : public Tuple2 {
DECL_INT_ACCESSORS(elements_kind)
DECL_ACCESSORS(constant_values, FixedArrayBase)
- DECLARE_CAST(ConstantElementsPair)
+ DECL_CAST(ConstantElementsPair)
static const int kElementsKindOffset = kValue1Offset;
static const int kConstantValuesOffset = kValue2Offset;
diff --git a/deps/v8/src/objects/map.h b/deps/v8/src/objects/map.h
index 7faf834c08..2fa992e153 100644
--- a/deps/v8/src/objects/map.h
+++ b/deps/v8/src/objects/map.h
@@ -15,6 +15,60 @@
namespace v8 {
namespace internal {
+#define VISITOR_ID_LIST(V) \
+ V(AllocationSite) \
+ V(ByteArray) \
+ V(BytecodeArray) \
+ V(Cell) \
+ V(Code) \
+ V(ConsString) \
+ V(DataObject) \
+ V(FixedArray) \
+ V(FixedDoubleArray) \
+ V(FixedFloat64Array) \
+ V(FixedTypedArrayBase) \
+ V(FreeSpace) \
+ V(JSApiObject) \
+ V(JSArrayBuffer) \
+ V(JSFunction) \
+ V(JSObject) \
+ V(JSObjectFast) \
+ V(JSRegExp) \
+ V(JSWeakCollection) \
+ V(Map) \
+ V(NativeContext) \
+ V(Oddball) \
+ V(PropertyArray) \
+ V(PropertyCell) \
+ V(SeqOneByteString) \
+ V(SeqTwoByteString) \
+ V(SharedFunctionInfo) \
+ V(ShortcutCandidate) \
+ V(SlicedString) \
+ V(SmallOrderedHashMap) \
+ V(SmallOrderedHashSet) \
+ V(Struct) \
+ V(Symbol) \
+ V(ThinString) \
+ V(TransitionArray) \
+ V(WeakCell)
+
+// For data objects, JS objects and structs along with generic visitor which
+// can visit object of any size we provide visitors specialized by
+// object size in words.
+// Ids of specialized visitors are declared in a linear order (without
+// holes) starting from the id of visitor specialized for 2 words objects
+// (base visitor id) and ending with the id of generic visitor.
+// Method GetVisitorIdForSize depends on this ordering to calculate visitor
+// id of specialized visitor from given instance size, base visitor id and
+// generic visitor's id.
+enum VisitorId {
+#define VISITOR_ID_ENUM_DECL(id) kVisit##id,
+ VISITOR_ID_LIST(VISITOR_ID_ENUM_DECL)
+#undef VISITOR_ID_ENUM_DECL
+ kVisitorIdCount
+};
+
typedef std::vector<Handle<Map>> MapHandles;
// All heap objects have a Map that describes their structure.
@@ -26,7 +80,7 @@ class Map : public HeapObject {
// Instance size.
// Size in bytes or kVariableSizeSentinel if instances do not have
// a fixed size.
- inline int instance_size();
+ inline int instance_size() const;
inline void set_instance_size(int value);
// Only to clear an unused byte, remove once byte is used.
@@ -35,16 +89,16 @@ class Map : public HeapObject {
// [inobject_properties_or_constructor_function_index]: Provides access
// to the inobject properties in case of JSObject maps, or the constructor
// function index in case of primitive maps.
- inline int inobject_properties_or_constructor_function_index();
+ inline int inobject_properties_or_constructor_function_index() const;
inline void set_inobject_properties_or_constructor_function_index(int value);
// Count of properties allocated in the object (JSObject only).
- inline int GetInObjectProperties();
+ inline int GetInObjectProperties() const;
inline void SetInObjectProperties(int value);
// Index of the constructor function in the native context (primitives only),
// or the special sentinel value to indicate that there is no object wrapper
// for the primitive (i.e. in case of null or undefined).
static const int kNoConstructorFunctionIndex = 0;
- inline int GetConstructorFunctionIndex();
+ inline int GetConstructorFunctionIndex() const;
inline void SetConstructorFunctionIndex(int value);
static MaybeHandle<JSFunction> GetConstructorFunction(
Handle<Map> map, Handle<Context> native_context);
@@ -54,12 +108,12 @@ class Map : public HeapObject {
inline InterceptorInfo* GetIndexedInterceptor();
// Instance type.
- inline InstanceType instance_type();
+ inline InstanceType instance_type() const;
inline void set_instance_type(InstanceType value);
// Tells how many unused property fields are available in the
// instance (only used for JSObject in fast mode).
- inline int unused_property_fields();
+ inline int unused_property_fields() const;
inline void set_unused_property_fields(int value);
// Bit field.
@@ -142,7 +196,7 @@ class Map : public HeapObject {
// True if the object constructions countdown counter is a range
// [kSlackTrackingCounterEnd, kSlackTrackingCounterStart].
- inline bool IsInobjectSlackTrackingInProgress();
+ inline bool IsInobjectSlackTrackingInProgress() const;
// Does the tracking step.
inline void InobjectSlackTrackingStep();
@@ -157,7 +211,7 @@ class Map : public HeapObject {
// property will not be used to create instances of the function.
// See ECMA-262, 13.2.2.
inline void set_non_instance_prototype(bool value);
- inline bool has_non_instance_prototype();
+ inline bool has_non_instance_prototype() const;
// Tells whether the instance has a [[Construct]] internal method.
// This property is implemented according to ES6, section 7.2.4.
@@ -170,11 +224,11 @@ class Map : public HeapObject {
// Records and queries whether the instance has a named interceptor.
inline void set_has_named_interceptor();
- inline bool has_named_interceptor();
+ inline bool has_named_interceptor() const;
// Records and queries whether the instance has an indexed interceptor.
inline void set_has_indexed_interceptor();
- inline bool has_indexed_interceptor();
+ inline bool has_indexed_interceptor() const;
// Tells whether the instance is undetectable.
// An undetectable object is a special class of JSObject: 'typeof' operator
@@ -183,7 +237,7 @@ class Map : public HeapObject {
// document.all in Firefox & Safari.
// See https://bugzilla.mozilla.org/show_bug.cgi?id=248549.
inline void set_is_undetectable();
- inline bool is_undetectable();
+ inline bool is_undetectable() const;
// Tells whether the instance has a [[Call]] internal method.
// This property is implemented according to ES6, section 7.2.3.
@@ -191,28 +245,28 @@ class Map : public HeapObject {
inline bool is_callable() const;
inline void set_new_target_is_base(bool value);
- inline bool new_target_is_base();
+ inline bool new_target_is_base() const;
inline void set_is_extensible(bool value);
- inline bool is_extensible();
+ inline bool is_extensible() const;
inline void set_is_prototype_map(bool value);
inline bool is_prototype_map() const;
inline void set_elements_kind(ElementsKind elements_kind);
- inline ElementsKind elements_kind();
+ inline ElementsKind elements_kind() const;
// Tells whether the instance has fast elements that are only Smis.
- inline bool has_fast_smi_elements();
+ inline bool has_fast_smi_elements() const;
// Tells whether the instance has fast elements.
- inline bool has_fast_object_elements();
- inline bool has_fast_smi_or_object_elements();
- inline bool has_fast_double_elements();
- inline bool has_fast_elements();
- inline bool has_sloppy_arguments_elements();
- inline bool has_fast_sloppy_arguments_elements();
- inline bool has_fast_string_wrapper_elements();
- inline bool has_fixed_typed_array_elements();
- inline bool has_dictionary_elements();
+ inline bool has_fast_object_elements() const;
+ inline bool has_fast_smi_or_object_elements() const;
+ inline bool has_fast_double_elements() const;
+ inline bool has_fast_elements() const;
+ inline bool has_sloppy_arguments_elements() const;
+ inline bool has_fast_sloppy_arguments_elements() const;
+ inline bool has_fast_string_wrapper_elements() const;
+ inline bool has_fixed_typed_array_elements() const;
+ inline bool has_dictionary_elements() const;
static bool IsValidElementsTransition(ElementsKind from_kind,
ElementsKind to_kind);
@@ -221,9 +275,9 @@ class Map : public HeapObject {
// map with DICTIONARY_ELEMENTS was found in the prototype chain.
bool DictionaryElementsInPrototypeChainOnly();
- inline Map* ElementsTransitionMap();
+ inline Map* ElementsTransitionMap() const;
- inline FixedArrayBase* GetInitialElements();
+ inline FixedArrayBase* GetInitialElements() const;
// [raw_transitions]: Provides access to the transitions storage field.
// Don't call set_raw_transitions() directly to overwrite transitions, use
@@ -252,35 +306,35 @@ class Map : public HeapObject {
static const int kPrototypeChainInvalid = 1;
// Return the map of the root of object's prototype chain.
- Map* GetPrototypeChainRootMap(Isolate* isolate);
+ Map* GetPrototypeChainRootMap(Isolate* isolate) const;
// Returns a WeakCell object containing given prototype. The cell is cached
// in PrototypeInfo which is created lazily.
static Handle<WeakCell> GetOrCreatePrototypeWeakCell(
Handle<JSObject> prototype, Isolate* isolate);
- Map* FindRootMap();
- Map* FindFieldOwner(int descriptor);
+ Map* FindRootMap() const;
+ Map* FindFieldOwner(int descriptor) const;
- inline int GetInObjectPropertyOffset(int index);
+ inline int GetInObjectPropertyOffset(int index) const;
- int NumberOfFields();
+ int NumberOfFields() const;
// Returns true if transition to the given map requires special
// synchronization with the concurrent marker.
- bool TransitionRequiresSynchronizationWithGC(Map* target);
+ bool TransitionRequiresSynchronizationWithGC(Map* target) const;
// Returns true if transition to the given map removes a tagged in-object
// field.
- bool TransitionRemovesTaggedField(Map* target);
+ bool TransitionRemovesTaggedField(Map* target) const;
// Returns true if transition to the given map replaces a tagged in-object
// field with an untagged in-object field.
- bool TransitionChangesTaggedFieldToUntaggedField(Map* target);
+ bool TransitionChangesTaggedFieldToUntaggedField(Map* target) const;
// TODO(ishell): candidate with JSObject::MigrateToMap().
- bool InstancesNeedRewriting(Map* target);
+ bool InstancesNeedRewriting(Map* target) const;
bool InstancesNeedRewriting(Map* target, int target_number_of_fields,
int target_inobject, int target_unused,
- int* old_number_of_fields);
+ int* old_number_of_fields) const;
// TODO(ishell): moveit!
static Handle<Map> GeneralizeAllFields(Handle<Map> map);
MUST_USE_RESULT static Handle<FieldType> GeneralizeFieldType(
@@ -318,22 +372,17 @@ class Map : public HeapObject {
// A map can never be used for both dictionary mode and fast mode JSObjects.
// False by default and for HeapObjects that are not JSObjects.
inline void set_dictionary_map(bool value);
- inline bool is_dictionary_map();
+ inline bool is_dictionary_map() const;
// Tells whether the instance needs security checks when accessing its
// properties.
inline void set_is_access_check_needed(bool access_check_needed);
- inline bool is_access_check_needed();
-
- // Returns true if map has a non-empty stub code cache.
- inline bool has_code_cache();
+ inline bool is_access_check_needed() const;
// [prototype]: implicit prototype object.
DECL_ACCESSORS(prototype, Object)
// TODO(jkummerow): make set_prototype private.
- static void SetPrototype(
- Handle<Map> map, Handle<Object> prototype,
- PrototypeOptimizationMode proto_mode = FAST_PROTOTYPE);
+ static void SetPrototype(Handle<Map> map, Handle<Object> prototype);
// [constructor]: points back to the function or FunctionTemplateInfo
// responsible for this map.
@@ -349,7 +398,7 @@ class Map : public HeapObject {
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// [back pointer]: points back to the parent map from which a transition
// leads to this map. The field overlaps with the constructor (see above).
- inline Object* GetBackPointer();
+ inline Object* GetBackPointer() const;
inline void SetBackPointer(Object* value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
@@ -359,13 +408,13 @@ class Map : public HeapObject {
// [layout descriptor]: describes the object layout.
DECL_ACCESSORS(layout_descriptor, LayoutDescriptor)
// |layout descriptor| accessor which can be used from GC.
- inline LayoutDescriptor* layout_descriptor_gc_safe();
+ inline LayoutDescriptor* layout_descriptor_gc_safe() const;
inline bool HasFastPointerLayout() const;
// |layout descriptor| accessor that is safe to call even when
// FLAG_unbox_double_fields is disabled (in this case Map does not contain
// |layout_descriptor| field at all).
- inline LayoutDescriptor* GetLayoutDescriptor();
+ inline LayoutDescriptor* GetLayoutDescriptor() const;
inline void UpdateDescriptors(DescriptorArray* descriptors,
LayoutDescriptor* layout_descriptor);
@@ -381,11 +430,11 @@ class Map : public HeapObject {
// [weak cell cache]: cache that stores a weak cell pointing to this map.
DECL_ACCESSORS(weak_cell_cache, Object)
- inline PropertyDetails GetLastDescriptorDetails();
+ inline PropertyDetails GetLastDescriptorDetails() const;
- inline int LastAdded();
+ inline int LastAdded() const;
- inline int NumberOfOwnDescriptors();
+ inline int NumberOfOwnDescriptors() const;
inline void SetNumberOfOwnDescriptors(int number);
inline Cell* RetrieveDescriptorsPointer();
@@ -393,23 +442,23 @@ class Map : public HeapObject {
// Checks whether all properties are stored either in the map or on the object
// (inobject, properties, or elements backing store), requiring no special
// checks.
- bool OnlyHasSimpleProperties();
- inline int EnumLength();
+ bool OnlyHasSimpleProperties() const;
+ inline int EnumLength() const;
inline void SetEnumLength(int length);
- inline bool owns_descriptors();
+ inline bool owns_descriptors() const;
inline void set_owns_descriptors(bool owns_descriptors);
inline void mark_unstable();
- inline bool is_stable();
+ inline bool is_stable() const;
inline void set_migration_target(bool value);
- inline bool is_migration_target();
+ inline bool is_migration_target() const;
inline void set_immutable_proto(bool value);
- inline bool is_immutable_proto();
+ inline bool is_immutable_proto() const;
inline void set_construction_counter(int value);
- inline int construction_counter();
+ inline int construction_counter() const;
inline void deprecate();
- inline bool is_deprecated();
- inline bool CanBeDeprecated();
+ inline bool is_deprecated() const;
+ inline bool CanBeDeprecated() const;
// Returns a non-deprecated version of the input. If the input was not
// deprecated, it is directly returned. Otherwise, the non-deprecated version
// is found by re-transitioning from the root of the transition tree using the
@@ -457,8 +506,7 @@ class Map : public HeapObject {
TransitionFlag flag);
static Handle<Map> AsLanguageMode(Handle<Map> initial_map,
- LanguageMode language_mode,
- FunctionKind kind);
+ Handle<SharedFunctionInfo> shared_info);
static Handle<Map> CopyForPreventExtensions(Handle<Map> map,
PropertyAttributes attrs_to_add,
@@ -470,7 +518,7 @@ class Map : public HeapObject {
// Maximal number of fast properties. Used to restrict the number of map
// transitions to avoid an explosion in the number of maps for objects used as
// dictionaries.
- inline bool TooManyFastProperties(StoreFromKeyed store_mode);
+ inline bool TooManyFastProperties(StoreFromKeyed store_mode) const;
static Handle<Map> TransitionToDataProperty(Handle<Map> map,
Handle<Name> name,
Handle<Object> value,
@@ -499,14 +547,12 @@ class Map : public HeapObject {
static Handle<Map> Create(Isolate* isolate, int inobject_properties);
// Returns the next free property index (only valid for FAST MODE).
- int NextFreePropertyIndex();
+ int NextFreePropertyIndex() const;
- // Returns the number of properties described in instance_descriptors
- // filtering out properties with the specified attributes.
- int NumberOfDescribedProperties(DescriptorFlag which = OWN_DESCRIPTORS,
- PropertyFilter filter = ALL_PROPERTIES);
+ // Returns the number of enumerable properties.
+ int NumberOfEnumerableProperties() const;
- DECLARE_CAST(Map)
+ DECL_CAST(Map)
// Code cache operations.
@@ -538,49 +584,48 @@ class Map : public HeapObject {
// found at all.
Map* FindElementsKindTransitionedMap(MapHandles const& candidates);
- inline bool CanTransition();
+ inline bool CanTransition() const;
- inline bool IsBooleanMap();
- inline bool IsPrimitiveMap();
- inline bool IsJSReceiverMap();
- inline bool IsJSObjectMap();
- inline bool IsJSArrayMap();
- inline bool IsJSFunctionMap();
- inline bool IsStringMap();
- inline bool IsJSProxyMap();
- inline bool IsModuleMap();
- inline bool IsJSGlobalProxyMap();
- inline bool IsJSGlobalObjectMap();
- inline bool IsJSTypedArrayMap();
- inline bool IsJSDataViewMap();
+ inline bool IsBooleanMap() const;
+ inline bool IsPrimitiveMap() const;
+ inline bool IsJSReceiverMap() const;
+ inline bool IsJSObjectMap() const;
+ inline bool IsJSArrayMap() const;
+ inline bool IsJSFunctionMap() const;
+ inline bool IsStringMap() const;
+ inline bool IsJSProxyMap() const;
+ inline bool IsModuleMap() const;
+ inline bool IsJSGlobalProxyMap() const;
+ inline bool IsJSGlobalObjectMap() const;
+ inline bool IsJSTypedArrayMap() const;
+ inline bool IsJSDataViewMap() const;
- inline bool IsSpecialReceiverMap();
+ inline bool IsSpecialReceiverMap() const;
- inline bool CanOmitMapChecks();
+ inline bool CanOmitMapChecks() const;
static void AddDependentCode(Handle<Map> map,
DependentCode::DependencyGroup group,
Handle<Code> code);
- bool IsMapInArrayPrototypeChain();
+ bool IsMapInArrayPrototypeChain() const;
static Handle<WeakCell> WeakCellForMap(Handle<Map> map);
// Dispatched behavior.
- DECLARE_PRINTER(Map)
- DECLARE_VERIFIER(Map)
+ DECL_PRINTER(Map)
+ DECL_VERIFIER(Map)
#ifdef VERIFY_HEAP
void DictionaryMapVerify();
void VerifyOmittedMapChecks();
#endif
- inline int visitor_id();
+ inline int visitor_id() const;
inline void set_visitor_id(int visitor_id);
static Handle<Map> TransitionToPrototype(Handle<Map> map,
- Handle<Object> prototype,
- PrototypeOptimizationMode mode);
+ Handle<Object> prototype);
static Handle<Map> TransitionToImmutableProto(Handle<Map> map);
@@ -665,19 +710,19 @@ class Map : public HeapObject {
// Derived values from bit field 2
static const int8_t kMaximumBitField2FastElementValue =
- static_cast<int8_t>((FAST_ELEMENTS + 1)
+ static_cast<int8_t>((PACKED_ELEMENTS + 1)
<< Map::ElementsKindBits::kShift) -
1;
static const int8_t kMaximumBitField2FastSmiElementValue =
- static_cast<int8_t>((FAST_SMI_ELEMENTS + 1)
+ static_cast<int8_t>((PACKED_SMI_ELEMENTS + 1)
<< Map::ElementsKindBits::kShift) -
1;
static const int8_t kMaximumBitField2FastHoleyElementValue =
- static_cast<int8_t>((FAST_HOLEY_ELEMENTS + 1)
+ static_cast<int8_t>((HOLEY_ELEMENTS + 1)
<< Map::ElementsKindBits::kShift) -
1;
static const int8_t kMaximumBitField2FastHoleySmiElementValue =
- static_cast<int8_t>((FAST_HOLEY_SMI_ELEMENTS + 1)
+ static_cast<int8_t>((HOLEY_SMI_ELEMENTS + 1)
<< Map::ElementsKindBits::kShift) -
1;
@@ -689,10 +734,11 @@ class Map : public HeapObject {
// If |mode| is set to CLEAR_INOBJECT_PROPERTIES, |other| is treated as if
// it had exactly zero inobject properties.
// The "shared" flags of both this map and |other| are ignored.
- bool EquivalentToForNormalization(Map* other, PropertyNormalizationMode mode);
+ bool EquivalentToForNormalization(const Map* other,
+ PropertyNormalizationMode mode) const;
// Returns true if given field is unboxed double.
- inline bool IsUnboxedDoubleField(FieldIndex index);
+ inline bool IsUnboxedDoubleField(FieldIndex index) const;
#if V8_TRACE_MAPS
static void TraceTransition(const char* what, Map* from, Map* to, Name* name);
@@ -708,6 +754,8 @@ class Map : public HeapObject {
// the descriptor array.
inline void NotifyLeafMapLayoutChange();
+ static VisitorId GetVisitorId(Map* map);
+
private:
// Returns the map that this (root) map transitions to if its elements_kind
// is changed to |elements_kind|, or |nullptr| if no such map is cached yet.
@@ -723,8 +771,8 @@ class Map : public HeapObject {
static void ConnectTransition(Handle<Map> parent, Handle<Map> child,
Handle<Name> name, SimpleTransitionFlag flag);
- bool EquivalentToForTransition(Map* other);
- bool EquivalentToForElementsKindTransition(Map* other);
+ bool EquivalentToForTransition(const Map* other) const;
+ bool EquivalentToForElementsKindTransition(const Map* other) const;
static Handle<Map> RawCopy(Handle<Map> map, int instance_size);
static Handle<Map> ShareDescriptor(Handle<Map> map,
Handle<DescriptorArray> descriptors,
@@ -806,11 +854,11 @@ class NormalizedMapCache : public FixedArray {
void Clear();
- DECLARE_CAST(NormalizedMapCache)
+ DECL_CAST(NormalizedMapCache)
static inline bool IsNormalizedMapCache(const HeapObject* obj);
- DECLARE_VERIFIER(NormalizedMapCache)
+ DECL_VERIFIER(NormalizedMapCache)
private:
static const int kEntries = 64;
diff --git a/deps/v8/src/objects/module-info.h b/deps/v8/src/objects/module-info.h
index 099ee5f657..b797db7156 100644
--- a/deps/v8/src/objects/module-info.h
+++ b/deps/v8/src/objects/module-info.h
@@ -24,7 +24,7 @@ class Zone;
// ModuleInfo is to ModuleDescriptor what ScopeInfo is to Scope.
class ModuleInfo : public FixedArray {
public:
- DECLARE_CAST(ModuleInfo)
+ DECL_CAST(ModuleInfo)
static Handle<ModuleInfo> New(Isolate* isolate, Zone* zone,
ModuleDescriptor* descr);
@@ -49,21 +49,24 @@ class ModuleInfo : public FixedArray {
return FixedArray::cast(get(kNamespaceImportsIndex));
}
+ inline FixedArray* module_request_positions() const {
+ return FixedArray::cast(get(kModuleRequestPositionsIndex));
+ }
+
// Accessors for [regular_exports].
int RegularExportCount() const;
String* RegularExportLocalName(int i) const;
int RegularExportCellIndex(int i) const;
FixedArray* RegularExportExportNames(int i) const;
- static Handle<ModuleInfoEntry> LookupRegularImport(Handle<ModuleInfo> info,
- Handle<String> local_name);
-
#ifdef DEBUG
inline bool Equals(ModuleInfo* other) const {
return regular_exports() == other->regular_exports() &&
regular_imports() == other->regular_imports() &&
special_exports() == other->special_exports() &&
- namespace_imports() == other->namespace_imports();
+ namespace_imports() == other->namespace_imports() &&
+ module_requests() == other->module_requests() &&
+ module_request_positions() == other->module_request_positions();
}
#endif
@@ -76,6 +79,7 @@ class ModuleInfo : public FixedArray {
kRegularExportsIndex,
kNamespaceImportsIndex,
kRegularImportsIndex,
+ kModuleRequestPositionsIndex,
kLength
};
enum {
@@ -89,9 +93,9 @@ class ModuleInfo : public FixedArray {
class ModuleInfoEntry : public Struct {
public:
- DECLARE_CAST(ModuleInfoEntry)
- DECLARE_PRINTER(ModuleInfoEntry)
- DECLARE_VERIFIER(ModuleInfoEntry)
+ DECL_CAST(ModuleInfoEntry)
+ DECL_PRINTER(ModuleInfoEntry)
+ DECL_VERIFIER(ModuleInfoEntry)
DECL_ACCESSORS(export_name, Object)
DECL_ACCESSORS(local_name, Object)
diff --git a/deps/v8/src/objects/name-inl.h b/deps/v8/src/objects/name-inl.h
new file mode 100644
index 0000000000..4271a1da14
--- /dev/null
+++ b/deps/v8/src/objects/name-inl.h
@@ -0,0 +1,99 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_NAME_INL_H_
+#define V8_OBJECTS_NAME_INL_H_
+
+#include "src/objects/name.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+CAST_ACCESSOR(Name)
+CAST_ACCESSOR(Symbol)
+
+ACCESSORS(Symbol, name, Object, kNameOffset)
+SMI_ACCESSORS(Symbol, flags, kFlagsOffset)
+BOOL_ACCESSORS(Symbol, flags, is_private, kPrivateBit)
+BOOL_ACCESSORS(Symbol, flags, is_well_known_symbol, kWellKnownSymbolBit)
+BOOL_ACCESSORS(Symbol, flags, is_public, kPublicBit)
+
+TYPE_CHECKER(Symbol, SYMBOL_TYPE)
+
+bool Name::IsUniqueName() const {
+ uint32_t type = map()->instance_type();
+ return (type & (kIsNotStringMask | kIsNotInternalizedMask)) !=
+ (kStringTag | kNotInternalizedTag);
+}
+
+uint32_t Name::hash_field() {
+ return READ_UINT32_FIELD(this, kHashFieldOffset);
+}
+
+void Name::set_hash_field(uint32_t value) {
+ WRITE_UINT32_FIELD(this, kHashFieldOffset, value);
+#if V8_HOST_ARCH_64_BIT
+#if V8_TARGET_LITTLE_ENDIAN
+ WRITE_UINT32_FIELD(this, kHashFieldSlot + kIntSize, 0);
+#else
+ WRITE_UINT32_FIELD(this, kHashFieldSlot, 0);
+#endif
+#endif
+}
+
+bool Name::Equals(Name* other) {
+ if (other == this) return true;
+ if ((this->IsInternalizedString() && other->IsInternalizedString()) ||
+ this->IsSymbol() || other->IsSymbol()) {
+ return false;
+ }
+ return String::cast(this)->SlowEquals(String::cast(other));
+}
+
+bool Name::Equals(Handle<Name> one, Handle<Name> two) {
+ if (one.is_identical_to(two)) return true;
+ if ((one->IsInternalizedString() && two->IsInternalizedString()) ||
+ one->IsSymbol() || two->IsSymbol()) {
+ return false;
+ }
+ return String::SlowEquals(Handle<String>::cast(one),
+ Handle<String>::cast(two));
+}
+
+bool Name::IsHashFieldComputed(uint32_t field) {
+ return (field & kHashNotComputedMask) == 0;
+}
+
+bool Name::HasHashCode() { return IsHashFieldComputed(hash_field()); }
+
+uint32_t Name::Hash() {
+ // Fast case: has hash code already been computed?
+ uint32_t field = hash_field();
+ if (IsHashFieldComputed(field)) return field >> kHashShift;
+ // Slow case: compute hash code and set it. Has to be a string.
+ return String::cast(this)->ComputeAndSetHash();
+}
+
+bool Name::IsPrivate() {
+ return this->IsSymbol() && Symbol::cast(this)->is_private();
+}
+
+bool Name::AsArrayIndex(uint32_t* index) {
+ return IsString() && String::cast(this)->AsArrayIndex(index);
+}
+
+// static
+bool Name::ContainsCachedArrayIndex(uint32_t hash) {
+ return (hash & Name::kDoesNotContainCachedArrayIndexMask) == 0;
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_NAME_INL_H_
diff --git a/deps/v8/src/objects/name.h b/deps/v8/src/objects/name.h
new file mode 100644
index 0000000000..1f35cac865
--- /dev/null
+++ b/deps/v8/src/objects/name.h
@@ -0,0 +1,189 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_NAME_H_
+#define V8_OBJECTS_NAME_H_
+
+#include "src/objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// The Name abstract class captures anything that can be used as a property
+// name, i.e., strings and symbols. All names store a hash value.
+class Name : public HeapObject {
+ public:
+ // Get and set the hash field of the name.
+ inline uint32_t hash_field();
+ inline void set_hash_field(uint32_t value);
+
+ // Tells whether the hash code has been computed.
+ inline bool HasHashCode();
+
+ // Returns a hash value used for the property table
+ inline uint32_t Hash();
+
+ // Equality operations.
+ inline bool Equals(Name* other);
+ inline static bool Equals(Handle<Name> one, Handle<Name> two);
+
+ // Conversion.
+ inline bool AsArrayIndex(uint32_t* index);
+
+ // If the name is private, it can only name own properties.
+ inline bool IsPrivate();
+
+ inline bool IsUniqueName() const;
+
+ static inline bool ContainsCachedArrayIndex(uint32_t hash);
+
+ // Return a string version of this name that is converted according to the
+ // rules described in ES6 section 9.2.11.
+ MUST_USE_RESULT static MaybeHandle<String> ToFunctionName(Handle<Name> name);
+ MUST_USE_RESULT static MaybeHandle<String> ToFunctionName(
+ Handle<Name> name, Handle<String> prefix);
+
+ DECL_CAST(Name)
+
+ DECL_PRINTER(Name)
+#if V8_TRACE_MAPS
+ void NameShortPrint();
+ int NameShortPrint(Vector<char> str);
+#endif
+
+ // Layout description.
+ static const int kHashFieldSlot = HeapObject::kHeaderSize;
+#if V8_TARGET_LITTLE_ENDIAN || !V8_HOST_ARCH_64_BIT
+ static const int kHashFieldOffset = kHashFieldSlot;
+#else
+ static const int kHashFieldOffset = kHashFieldSlot + kIntSize;
+#endif
+ static const int kSize = kHashFieldSlot + kPointerSize;
+
+ // Mask constant for checking if a name has a computed hash code
+ // and if it is a string that is an array index. The least significant bit
+ // indicates whether a hash code has been computed. If the hash code has
+ // been computed the 2nd bit tells whether the string can be used as an
+ // array index.
+ static const int kHashNotComputedMask = 1;
+ static const int kIsNotArrayIndexMask = 1 << 1;
+ static const int kNofHashBitFields = 2;
+
+ // Shift constant retrieving hash code from hash field.
+ static const int kHashShift = kNofHashBitFields;
+
+ // Only these bits are relevant in the hash, since the top two are shifted
+ // out.
+ static const uint32_t kHashBitMask = 0xffffffffu >> kHashShift;
+
+ // Array index strings this short can keep their index in the hash field.
+ static const int kMaxCachedArrayIndexLength = 7;
+
+ // Maximum number of characters to consider when trying to convert a string
+ // value into an array index.
+ static const int kMaxArrayIndexSize = 10;
+
+ // For strings which are array indexes the hash value has the string length
+ // mixed into the hash, mainly to avoid a hash value of zero which would be
+ // the case for the string '0'. 24 bits are used for the array index value.
+ static const int kArrayIndexValueBits = 24;
+ static const int kArrayIndexLengthBits =
+ kBitsPerInt - kArrayIndexValueBits - kNofHashBitFields;
+
+ STATIC_ASSERT(kArrayIndexLengthBits > 0);
+ STATIC_ASSERT(kMaxArrayIndexSize < (1 << kArrayIndexLengthBits));
+
+ class ArrayIndexValueBits
+ : public BitField<unsigned int, kNofHashBitFields, kArrayIndexValueBits> {
+ }; // NOLINT
+ class ArrayIndexLengthBits
+ : public BitField<unsigned int, kNofHashBitFields + kArrayIndexValueBits,
+ kArrayIndexLengthBits> {}; // NOLINT
+
+ // Check that kMaxCachedArrayIndexLength + 1 is a power of two so we
+ // could use a mask to test if the length of string is less than or equal to
+ // kMaxCachedArrayIndexLength.
+ static_assert(base::bits::IsPowerOfTwo(kMaxCachedArrayIndexLength + 1),
+ "(kMaxCachedArrayIndexLength + 1) must be power of two");
+
+ // When any of these bits is set then the hash field does not contain a cached
+ // array index.
+ static const unsigned int kDoesNotContainCachedArrayIndexMask =
+ (~static_cast<unsigned>(kMaxCachedArrayIndexLength)
+ << ArrayIndexLengthBits::kShift) |
+ kIsNotArrayIndexMask;
+
+ // Value of empty hash field indicating that the hash is not computed.
+ static const int kEmptyHashField =
+ kIsNotArrayIndexMask | kHashNotComputedMask;
+
+ protected:
+ static inline bool IsHashFieldComputed(uint32_t field);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Name);
+};
+
+// ES6 symbols.
+class Symbol : public Name {
+ public:
+ // [name]: The print name of a symbol, or undefined if none.
+ DECL_ACCESSORS(name, Object)
+
+ DECL_INT_ACCESSORS(flags)
+
+ // [is_private]: Whether this is a private symbol. Private symbols can only
+ // be used to designate own properties of objects.
+ DECL_BOOLEAN_ACCESSORS(is_private)
+
+ // [is_well_known_symbol]: Whether this is a spec-defined well-known symbol,
+ // or not. Well-known symbols do not throw when an access check fails during
+ // a load.
+ DECL_BOOLEAN_ACCESSORS(is_well_known_symbol)
+
+ // [is_public]: Whether this is a symbol created by Symbol.for. Calling
+ // Symbol.keyFor on such a symbol simply needs to return the attached name.
+ DECL_BOOLEAN_ACCESSORS(is_public)
+
+ DECL_CAST(Symbol)
+
+ // Dispatched behavior.
+ DECL_PRINTER(Symbol)
+ DECL_VERIFIER(Symbol)
+
+ // Layout description.
+ static const int kNameOffset = Name::kSize;
+ static const int kFlagsOffset = kNameOffset + kPointerSize;
+ static const int kSize = kFlagsOffset + kPointerSize;
+
+ // Flags layout.
+ static const int kPrivateBit = 0;
+ static const int kWellKnownSymbolBit = 1;
+ static const int kPublicBit = 2;
+
+ typedef FixedBodyDescriptor<kNameOffset, kFlagsOffset, kSize> BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
+
+ void SymbolShortPrint(std::ostream& os);
+
+ private:
+ const char* PrivateSymbolToName() const;
+
+#if V8_TRACE_MAPS
+ friend class Name; // For PrivateSymbolToName.
+#endif
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Symbol);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_NAME_H_
diff --git a/deps/v8/src/objects/object-macros-undef.h b/deps/v8/src/objects/object-macros-undef.h
index 571e84a3e9..6d5f10c07d 100644
--- a/deps/v8/src/objects/object-macros-undef.h
+++ b/deps/v8/src/objects/object-macros-undef.h
@@ -2,9 +2,60 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#undef CAST_ACCESSOR
#undef DECL_BOOLEAN_ACCESSORS
#undef DECL_INT_ACCESSORS
#undef DECL_ACCESSORS
-#undef DECLARE_CAST
-#undef DECLARE_VERIFIER
+#undef DECL_CAST
+#undef CAST_ACCESSOR
+#undef INT_ACCESSORS
+#undef ACCESSORS_CHECKED2
+#undef ACCESSORS_CHECKED
+#undef ACCESSORS
+#undef SMI_ACCESSORS_CHECKED
+#undef SMI_ACCESSORS
+#undef SYNCHRONIZED_SMI_ACCESSORS
+#undef RELAXED_SMI_ACCESSORS
+#undef BOOL_GETTER
+#undef BOOL_ACCESSORS
+#undef TYPE_CHECKER
+#undef FIELD_ADDR
+#undef FIELD_ADDR_CONST
+#undef READ_FIELD
+#undef ACQUIRE_READ_FIELD
+#undef RELAXED_READ_FIELD
+#undef WRITE_FIELD
+#undef RELEASE_WRITE_FIELD
+#undef RELAXED_WRITE_FIELD
+#undef WRITE_BARRIER
+#undef CONDITIONAL_WRITE_BARRIER
+#undef READ_DOUBLE_FIELD
+#undef WRITE_DOUBLE_FIELD
+#undef READ_INT_FIELD
+#undef WRITE_INT_FIELD
+#undef READ_INTPTR_FIELD
+#undef WRITE_INTPTR_FIELD
+#undef READ_UINT8_FIELD
+#undef WRITE_UINT8_FIELD
+#undef READ_INT8_FIELD
+#undef WRITE_INT8_FIELD
+#undef READ_UINT16_FIELD
+#undef WRITE_UINT16_FIELD
+#undef READ_INT16_FIELD
+#undef WRITE_INT16_FIELD
+#undef READ_UINT32_FIELD
+#undef WRITE_UINT32_FIELD
+#undef READ_INT32_FIELD
+#undef WRITE_INT32_FIELD
+#undef READ_FLOAT_FIELD
+#undef WRITE_FLOAT_FIELD
+#undef READ_UINT64_FIELD
+#undef WRITE_UINT64_FIELD
+#undef READ_INT64_FIELD
+#undef WRITE_INT64_FIELD
+#undef READ_BYTE_FIELD
+#undef RELAXED_READ_BYTE_FIELD
+#undef WRITE_BYTE_FIELD
+#undef RELAXED_WRITE_BYTE_FIELD
+#undef DECL_VERIFIER
+#undef DEFINE_DEOPT_ELEMENT_ACCESSORS
+#undef DEFINE_DEOPT_ENTRY_ACCESSORS
diff --git a/deps/v8/src/objects/object-macros.h b/deps/v8/src/objects/object-macros.h
index c03026d51b..e672af14c9 100644
--- a/deps/v8/src/objects/object-macros.h
+++ b/deps/v8/src/objects/object-macros.h
@@ -8,6 +8,10 @@
// Note 2: This file is deliberately missing the include guards (the undeffing
// approach wouldn't work otherwise).
+// The accessors with RELAXED_, ACQUIRE_, and RELEASE_ prefixes should be used
+// for fields that can be written to and read from multiple threads at the same
+// time. See comments in src/base/atomicops.h for the memory ordering sematics.
+
#define DECL_BOOLEAN_ACCESSORS(name) \
inline bool name() const; \
inline void set_##name(bool value);
@@ -21,7 +25,7 @@
inline void set_##name(type* value, \
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-#define DECLARE_CAST(type) \
+#define DECL_CAST(type) \
INLINE(static type* cast(Object* object)); \
INLINE(static const type* cast(const Object* object));
@@ -35,8 +39,242 @@
return reinterpret_cast<const type*>(object); \
}
+#define INT_ACCESSORS(holder, name, offset) \
+ int holder::name() const { return READ_INT_FIELD(this, offset); } \
+ void holder::set_##name(int value) { WRITE_INT_FIELD(this, offset, value); }
+
+#define ACCESSORS_CHECKED2(holder, name, type, offset, get_condition, \
+ set_condition) \
+ type* holder::name() const { \
+ DCHECK(get_condition); \
+ return type::cast(READ_FIELD(this, offset)); \
+ } \
+ void holder::set_##name(type* value, WriteBarrierMode mode) { \
+ DCHECK(set_condition); \
+ WRITE_FIELD(this, offset, value); \
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); \
+ }
+#define ACCESSORS_CHECKED(holder, name, type, offset, condition) \
+ ACCESSORS_CHECKED2(holder, name, type, offset, condition, condition)
+
+#define ACCESSORS(holder, name, type, offset) \
+ ACCESSORS_CHECKED(holder, name, type, offset, true)
+
+// Getter that returns a Smi as an int and writes an int as a Smi.
+#define SMI_ACCESSORS_CHECKED(holder, name, offset, condition) \
+ int holder::name() const { \
+ DCHECK(condition); \
+ Object* value = READ_FIELD(this, offset); \
+ return Smi::ToInt(value); \
+ } \
+ void holder::set_##name(int value) { \
+ DCHECK(condition); \
+ WRITE_FIELD(this, offset, Smi::FromInt(value)); \
+ }
+
+#define SMI_ACCESSORS(holder, name, offset) \
+ SMI_ACCESSORS_CHECKED(holder, name, offset, true)
+
+#define SYNCHRONIZED_SMI_ACCESSORS(holder, name, offset) \
+ int holder::synchronized_##name() const { \
+ Object* value = ACQUIRE_READ_FIELD(this, offset); \
+ return Smi::ToInt(value); \
+ } \
+ void holder::synchronized_set_##name(int value) { \
+ RELEASE_WRITE_FIELD(this, offset, Smi::FromInt(value)); \
+ }
+
+#define RELAXED_SMI_ACCESSORS(holder, name, offset) \
+ int holder::relaxed_read_##name() const { \
+ Object* value = RELAXED_READ_FIELD(this, offset); \
+ return Smi::ToInt(value); \
+ } \
+ void holder::relaxed_write_##name(int value) { \
+ RELAXED_WRITE_FIELD(this, offset, Smi::FromInt(value)); \
+ }
+
+#define BOOL_GETTER(holder, field, name, offset) \
+ bool holder::name() const { return BooleanBit::get(field(), offset); }
+
+#define BOOL_ACCESSORS(holder, field, name, offset) \
+ bool holder::name() const { return BooleanBit::get(field(), offset); } \
+ void holder::set_##name(bool value) { \
+ set_##field(BooleanBit::set(field(), offset, value)); \
+ }
+
+#define BIT_FIELD_ACCESSORS(holder, field, name, BitField) \
+ typename BitField::FieldType holder::name() const { \
+ return BitField::decode(field()); \
+ } \
+ void holder::set_##name(typename BitField::FieldType value) { \
+ set_##field(BitField::update(field(), value)); \
+ }
+
+#define TYPE_CHECKER(type, instancetype) \
+ bool HeapObject::Is##type() const { \
+ return map()->instance_type() == instancetype; \
+ }
+
+#define FIELD_ADDR(p, offset) \
+ (reinterpret_cast<byte*>(p) + offset - kHeapObjectTag)
+
+#define FIELD_ADDR_CONST(p, offset) \
+ (reinterpret_cast<const byte*>(p) + offset - kHeapObjectTag)
+
+#define READ_FIELD(p, offset) \
+ (*reinterpret_cast<Object* const*>(FIELD_ADDR_CONST(p, offset)))
+
+#define ACQUIRE_READ_FIELD(p, offset) \
+ reinterpret_cast<Object*>(base::Acquire_Load( \
+ reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR_CONST(p, offset))))
+
+#define RELAXED_READ_FIELD(p, offset) \
+ reinterpret_cast<Object*>(base::Relaxed_Load( \
+ reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR_CONST(p, offset))))
+
+#ifdef V8_CONCURRENT_MARKING
+#define WRITE_FIELD(p, offset, value) \
+ base::Relaxed_Store( \
+ reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
+ reinterpret_cast<base::AtomicWord>(value));
+#else
+#define WRITE_FIELD(p, offset, value) \
+ (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value)
+#endif
+
+#define RELEASE_WRITE_FIELD(p, offset, value) \
+ base::Release_Store( \
+ reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
+ reinterpret_cast<base::AtomicWord>(value));
+
+#define RELAXED_WRITE_FIELD(p, offset, value) \
+ base::Relaxed_Store( \
+ reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
+ reinterpret_cast<base::AtomicWord>(value));
+
+#define WRITE_BARRIER(heap, object, offset, value) \
+ heap->incremental_marking()->RecordWrite( \
+ object, HeapObject::RawField(object, offset), value); \
+ heap->RecordWrite(object, offset, value);
+
+#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \
+ if (mode != SKIP_WRITE_BARRIER) { \
+ if (mode == UPDATE_WRITE_BARRIER) { \
+ heap->incremental_marking()->RecordWrite( \
+ object, HeapObject::RawField(object, offset), value); \
+ } \
+ heap->RecordWrite(object, offset, value); \
+ }
+
+#define READ_DOUBLE_FIELD(p, offset) \
+ ReadDoubleValue(FIELD_ADDR_CONST(p, offset))
+
+#define WRITE_DOUBLE_FIELD(p, offset, value) \
+ WriteDoubleValue(FIELD_ADDR(p, offset), value)
+
+#define READ_INT_FIELD(p, offset) \
+ (*reinterpret_cast<const int*>(FIELD_ADDR_CONST(p, offset)))
+
+#define WRITE_INT_FIELD(p, offset, value) \
+ (*reinterpret_cast<int*>(FIELD_ADDR(p, offset)) = value)
+
+#define READ_INTPTR_FIELD(p, offset) \
+ (*reinterpret_cast<const intptr_t*>(FIELD_ADDR_CONST(p, offset)))
+
+#define RELAXED_WRITE_INTPTR_FIELD(p, offset, value) \
+ base::Relaxed_Store( \
+ reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
+ static_cast<base::AtomicWord>(value));
+
+#define WRITE_INTPTR_FIELD(p, offset, value) \
+ (*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)) = value)
+
+#define READ_UINT8_FIELD(p, offset) \
+ (*reinterpret_cast<const uint8_t*>(FIELD_ADDR_CONST(p, offset)))
+
+#define WRITE_UINT8_FIELD(p, offset, value) \
+ (*reinterpret_cast<uint8_t*>(FIELD_ADDR(p, offset)) = value)
+
+#define READ_INT8_FIELD(p, offset) \
+ (*reinterpret_cast<const int8_t*>(FIELD_ADDR_CONST(p, offset)))
+
+#define WRITE_INT8_FIELD(p, offset, value) \
+ (*reinterpret_cast<int8_t*>(FIELD_ADDR(p, offset)) = value)
+
+#define READ_UINT16_FIELD(p, offset) \
+ (*reinterpret_cast<const uint16_t*>(FIELD_ADDR_CONST(p, offset)))
+
+#define WRITE_UINT16_FIELD(p, offset, value) \
+ (*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset)) = value)
+
+#define READ_INT16_FIELD(p, offset) \
+ (*reinterpret_cast<const int16_t*>(FIELD_ADDR_CONST(p, offset)))
+
+#define WRITE_INT16_FIELD(p, offset, value) \
+ (*reinterpret_cast<int16_t*>(FIELD_ADDR(p, offset)) = value)
+
+#define READ_UINT32_FIELD(p, offset) \
+ (*reinterpret_cast<const uint32_t*>(FIELD_ADDR_CONST(p, offset)))
+
+#define WRITE_UINT32_FIELD(p, offset, value) \
+ (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)) = value)
+
+#define READ_INT32_FIELD(p, offset) \
+ (*reinterpret_cast<const int32_t*>(FIELD_ADDR_CONST(p, offset)))
+
+#define WRITE_INT32_FIELD(p, offset, value) \
+ (*reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset)) = value)
+
+#define READ_FLOAT_FIELD(p, offset) \
+ (*reinterpret_cast<const float*>(FIELD_ADDR_CONST(p, offset)))
+
+#define WRITE_FLOAT_FIELD(p, offset, value) \
+ (*reinterpret_cast<float*>(FIELD_ADDR(p, offset)) = value)
+
+#define READ_UINT64_FIELD(p, offset) \
+ (*reinterpret_cast<const uint64_t*>(FIELD_ADDR_CONST(p, offset)))
+
+#define WRITE_UINT64_FIELD(p, offset, value) \
+ (*reinterpret_cast<uint64_t*>(FIELD_ADDR(p, offset)) = value)
+
+#define READ_INT64_FIELD(p, offset) \
+ (*reinterpret_cast<const int64_t*>(FIELD_ADDR_CONST(p, offset)))
+
+#define WRITE_INT64_FIELD(p, offset, value) \
+ (*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset)) = value)
+
+#define READ_BYTE_FIELD(p, offset) \
+ (*reinterpret_cast<const byte*>(FIELD_ADDR_CONST(p, offset)))
+
+#define RELAXED_READ_BYTE_FIELD(p, offset) \
+ static_cast<byte>(base::Relaxed_Load( \
+ reinterpret_cast<const base::Atomic8*>(FIELD_ADDR_CONST(p, offset))))
+
+#define WRITE_BYTE_FIELD(p, offset, value) \
+ (*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)) = value)
+
+#define RELAXED_WRITE_BYTE_FIELD(p, offset, value) \
+ base::Relaxed_Store(reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)), \
+ static_cast<base::Atomic8>(value));
+
#ifdef VERIFY_HEAP
-#define DECLARE_VERIFIER(Name) void Name##Verify();
+#define DECL_VERIFIER(Name) void Name##Verify();
#else
-#define DECLARE_VERIFIER(Name)
+#define DECL_VERIFIER(Name)
#endif
+
+#define DEFINE_DEOPT_ELEMENT_ACCESSORS(name, type) \
+ type* DeoptimizationInputData::name() { \
+ return type::cast(get(k##name##Index)); \
+ } \
+ void DeoptimizationInputData::Set##name(type* value) { \
+ set(k##name##Index, value); \
+ }
+
+#define DEFINE_DEOPT_ENTRY_ACCESSORS(name, type) \
+ type* DeoptimizationInputData::name(int i) { \
+ return type::cast(get(IndexForEntry(i) + k##name##Offset)); \
+ } \
+ void DeoptimizationInputData::Set##name(int i, type* value) { \
+ set(IndexForEntry(i) + k##name##Offset, value); \
+ }
diff --git a/deps/v8/src/objects/regexp-match-info.h b/deps/v8/src/objects/regexp-match-info.h
index 327ded3247..24eb805f5a 100644
--- a/deps/v8/src/objects/regexp-match-info.h
+++ b/deps/v8/src/objects/regexp-match-info.h
@@ -48,7 +48,7 @@ class V8_EXPORT_PRIVATE RegExpMatchInfo : NON_EXPORTED_BASE(public FixedArray) {
static Handle<RegExpMatchInfo> ReserveCaptures(
Handle<RegExpMatchInfo> match_info, int capture_count);
- DECLARE_CAST(RegExpMatchInfo)
+ DECL_CAST(RegExpMatchInfo)
static const int kNumberOfCapturesIndex = 0;
static const int kLastSubjectIndex = 1;
diff --git a/deps/v8/src/objects/scope-info.cc b/deps/v8/src/objects/scope-info.cc
index ae828cc1f0..9a30b710d7 100644
--- a/deps/v8/src/objects/scope-info.cc
+++ b/deps/v8/src/objects/scope-info.cc
@@ -50,7 +50,6 @@ bool ScopeInfo::Equals(ScopeInfo* other) const {
}
} else {
UNREACHABLE();
- return false;
}
}
}
@@ -501,14 +500,6 @@ void ScopeInfo::SetIsDebugEvaluateScope() {
}
}
-bool ScopeInfo::HasHeapAllocatedLocals() {
- if (length() > 0) {
- return ContextLocalCount() > 0;
- } else {
- return false;
- }
-}
-
bool ScopeInfo::HasContext() { return ContextLength() > 0; }
String* ScopeInfo::FunctionName() {
@@ -552,7 +543,7 @@ String* ScopeInfo::StackLocalName(int var) {
int ScopeInfo::StackLocalIndex(int var) {
DCHECK_LE(0, var);
DCHECK_LT(var, StackLocalCount());
- int first_slot_index = Smi::cast(get(StackLocalFirstSlotIndex()))->value();
+ int first_slot_index = Smi::ToInt(get(StackLocalFirstSlotIndex()));
return first_slot_index + var;
}
@@ -567,7 +558,7 @@ VariableMode ScopeInfo::ContextLocalMode(int var) {
DCHECK_LE(0, var);
DCHECK_LT(var, ContextLocalCount());
int info_index = ContextLocalInfosIndex() + var;
- int value = Smi::cast(get(info_index))->value();
+ int value = Smi::ToInt(get(info_index));
return VariableModeField::decode(value);
}
@@ -575,7 +566,7 @@ InitializationFlag ScopeInfo::ContextLocalInitFlag(int var) {
DCHECK_LE(0, var);
DCHECK_LT(var, ContextLocalCount());
int info_index = ContextLocalInfosIndex() + var;
- int value = Smi::cast(get(info_index))->value();
+ int value = Smi::ToInt(get(info_index));
return InitFlagField::decode(value);
}
@@ -583,7 +574,7 @@ MaybeAssignedFlag ScopeInfo::ContextLocalMaybeAssignedFlag(int var) {
DCHECK_LE(0, var);
DCHECK_LT(var, ContextLocalCount());
int info_index = ContextLocalInfosIndex() + var;
- int value = Smi::cast(get(info_index))->value();
+ int value = Smi::ToInt(get(info_index));
return MaybeAssignedFlagField::decode(value);
}
@@ -599,7 +590,7 @@ bool ScopeInfo::VariableIsSynthetic(String* name) {
int ScopeInfo::StackSlotIndex(String* name) {
DCHECK(name->IsInternalizedString());
if (length() > 0) {
- int first_slot_index = Smi::cast(get(StackLocalFirstSlotIndex()))->value();
+ int first_slot_index = Smi::ToInt(get(StackLocalFirstSlotIndex()));
int start = StackLocalNamesIndex();
int end = start + StackLocalCount();
for (int i = start; i < end; ++i) {
@@ -615,15 +606,15 @@ int ScopeInfo::ModuleIndex(Handle<String> name, VariableMode* mode,
InitializationFlag* init_flag,
MaybeAssignedFlag* maybe_assigned_flag) {
DCHECK_EQ(scope_type(), MODULE_SCOPE);
- DCHECK(name->IsInternalizedString());
DCHECK_NOT_NULL(mode);
DCHECK_NOT_NULL(init_flag);
DCHECK_NOT_NULL(maybe_assigned_flag);
- int module_vars_count = Smi::cast(get(ModuleVariableCountIndex()))->value();
+ int module_vars_count = Smi::ToInt(get(ModuleVariableCountIndex()));
int entry = ModuleVariablesIndex();
for (int i = 0; i < module_vars_count; ++i) {
- if (*name == get(entry + kModuleVariableNameOffset)) {
+ String* var_name = String::cast(get(entry + kModuleVariableNameOffset));
+ if (name->Equals(var_name)) {
int index;
ModuleVariable(i, nullptr, &index, mode, init_flag, maybe_assigned_flag);
return index;
@@ -677,13 +668,6 @@ int ScopeInfo::ContextSlotIndex(Handle<ScopeInfo> scope_info,
return -1;
}
-String* ScopeInfo::ContextSlotName(int slot_index) {
- int const var = slot_index - Context::MIN_CONTEXT_SLOTS;
- DCHECK_LE(0, var);
- DCHECK_LT(var, ContextLocalCount());
- return ContextLocalName(var);
-}
-
int ScopeInfo::ParameterIndex(String* name) {
DCHECK(name->IsInternalizedString());
if (length() > 0) {
@@ -705,7 +689,7 @@ int ScopeInfo::ParameterIndex(String* name) {
int ScopeInfo::ReceiverContextSlotIndex() {
if (length() > 0 && ReceiverVariableField::decode(Flags()) == CONTEXT)
- return Smi::cast(get(ReceiverInfoIndex()))->value();
+ return Smi::ToInt(get(ReceiverInfoIndex()));
return -1;
}
@@ -714,7 +698,7 @@ int ScopeInfo::FunctionContextSlotIndex(String* name) {
if (length() > 0) {
if (FunctionVariableField::decode(Flags()) == CONTEXT &&
FunctionName() == name) {
- return Smi::cast(get(FunctionNameInfoIndex() + 1))->value();
+ return Smi::ToInt(get(FunctionNameInfoIndex() + 1));
}
}
return -1;
@@ -768,17 +752,16 @@ void ScopeInfo::ModuleVariable(int i, String** name, int* index,
InitializationFlag* init_flag,
MaybeAssignedFlag* maybe_assigned_flag) {
DCHECK_LE(0, i);
- DCHECK_LT(i, Smi::cast(get(ModuleVariableCountIndex()))->value());
+ DCHECK_LT(i, Smi::ToInt(get(ModuleVariableCountIndex())));
int entry = ModuleVariablesIndex() + i * kModuleVariableEntryLength;
- int properties =
- Smi::cast(get(entry + kModuleVariablePropertiesOffset))->value();
+ int properties = Smi::ToInt(get(entry + kModuleVariablePropertiesOffset));
if (name != nullptr) {
*name = String::cast(get(entry + kModuleVariableNameOffset));
}
if (index != nullptr) {
- *index = Smi::cast(get(entry + kModuleVariableIndexOffset))->value();
+ *index = Smi::ToInt(get(entry + kModuleVariableIndexOffset));
DCHECK_NE(*index, 0);
}
if (mode != nullptr) {
@@ -854,10 +837,14 @@ Handle<ModuleInfoEntry> ModuleInfoEntry::New(Isolate* isolate,
Handle<ModuleInfo> ModuleInfo::New(Isolate* isolate, Zone* zone,
ModuleDescriptor* descr) {
// Serialize module requests.
- Handle<FixedArray> module_requests = isolate->factory()->NewFixedArray(
- static_cast<int>(descr->module_requests().size()));
+ int size = static_cast<int>(descr->module_requests().size());
+ Handle<FixedArray> module_requests = isolate->factory()->NewFixedArray(size);
+ Handle<FixedArray> module_request_positions =
+ isolate->factory()->NewFixedArray(size);
for (const auto& elem : descr->module_requests()) {
- module_requests->set(elem.second, *elem.first->string());
+ module_requests->set(elem.second.index, *elem.first->string());
+ module_request_positions->set(elem.second.index,
+ Smi::FromInt(elem.second.position));
}
// Serialize special exports.
@@ -904,6 +891,7 @@ Handle<ModuleInfo> ModuleInfo::New(Isolate* isolate, Zone* zone,
result->set(kRegularExportsIndex, *regular_exports);
result->set(kNamespaceImportsIndex, *namespace_imports);
result->set(kRegularImportsIndex, *regular_imports);
+ result->set(kModuleRequestPositionsIndex, *module_request_positions);
return result;
}
@@ -928,20 +916,5 @@ FixedArray* ModuleInfo::RegularExportExportNames(int i) const {
i * kRegularExportLength + kRegularExportExportNamesOffset));
}
-Handle<ModuleInfoEntry> ModuleInfo::LookupRegularImport(
- Handle<ModuleInfo> info, Handle<String> local_name) {
- Isolate* isolate = info->GetIsolate();
- Handle<FixedArray> regular_imports(info->regular_imports(), isolate);
- for (int i = 0, n = regular_imports->length(); i < n; ++i) {
- Handle<ModuleInfoEntry> entry(
- ModuleInfoEntry::cast(regular_imports->get(i)), isolate);
- if (String::cast(entry->local_name())->Equals(*local_name)) {
- return entry;
- }
- }
- UNREACHABLE();
- return Handle<ModuleInfoEntry>();
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/scope-info.h b/deps/v8/src/objects/scope-info.h
index 75a374d5d8..e60ac99162 100644
--- a/deps/v8/src/objects/scope-info.h
+++ b/deps/v8/src/objects/scope-info.h
@@ -32,7 +32,7 @@ class Zone;
// routines.
class ScopeInfo : public FixedArray {
public:
- DECLARE_CAST(ScopeInfo)
+ DECL_CAST(ScopeInfo)
// Return the type of this scope.
ScopeType scope_type();
@@ -81,9 +81,6 @@ class ScopeInfo : public FixedArray {
// Is this scope the scope of a named function expression?
bool HasFunctionName();
- // Return if this has context allocated locals.
- bool HasHeapAllocatedLocals();
-
// Return if contexts are allocated for this scope.
bool HasContext();
@@ -152,9 +149,6 @@ class ScopeInfo : public FixedArray {
InitializationFlag* init_flag,
MaybeAssignedFlag* maybe_assigned_flag);
- // Lookup the name of a certain context slot by its index.
- String* ContextSlotName(int slot_index);
-
// Lookup support for serialized scope info. Returns the
// parameter index for a given parameter name if the parameter is present;
// otherwise returns a value < 0. The name must be an internalized string.
@@ -220,7 +214,7 @@ class ScopeInfo : public FixedArray {
inline void Set##name(int value) { set(k##name, Smi::FromInt(value)); } \
inline int name() { \
if (length() > 0) { \
- return Smi::cast(get(k##name))->value(); \
+ return Smi::ToInt(get(k##name)); \
} else { \
return 0; \
} \
diff --git a/deps/v8/src/objects/script-inl.h b/deps/v8/src/objects/script-inl.h
new file mode 100644
index 0000000000..7a639080c7
--- /dev/null
+++ b/deps/v8/src/objects/script-inl.h
@@ -0,0 +1,85 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_SCRIPT_INL_H_
+#define V8_OBJECTS_SCRIPT_INL_H_
+
+#include "src/objects/script.h"
+
+#include "src/objects/string-inl.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+CAST_ACCESSOR(Script)
+
+ACCESSORS(Script, source, Object, kSourceOffset)
+ACCESSORS(Script, name, Object, kNameOffset)
+SMI_ACCESSORS(Script, id, kIdOffset)
+SMI_ACCESSORS(Script, line_offset, kLineOffsetOffset)
+SMI_ACCESSORS(Script, column_offset, kColumnOffsetOffset)
+ACCESSORS(Script, context_data, Object, kContextOffset)
+ACCESSORS(Script, wrapper, HeapObject, kWrapperOffset)
+SMI_ACCESSORS(Script, type, kTypeOffset)
+ACCESSORS(Script, line_ends, Object, kLineEndsOffset)
+ACCESSORS_CHECKED(Script, eval_from_shared, Object, kEvalFromSharedOffset,
+ this->type() != TYPE_WASM)
+SMI_ACCESSORS_CHECKED(Script, eval_from_position, kEvalFromPositionOffset,
+ this->type() != TYPE_WASM)
+ACCESSORS(Script, shared_function_infos, FixedArray, kSharedFunctionInfosOffset)
+SMI_ACCESSORS(Script, flags, kFlagsOffset)
+ACCESSORS(Script, source_url, Object, kSourceUrlOffset)
+ACCESSORS(Script, source_mapping_url, Object, kSourceMappingUrlOffset)
+ACCESSORS_CHECKED(Script, wasm_compiled_module, Object, kEvalFromSharedOffset,
+ this->type() == TYPE_WASM)
+
+Script::CompilationType Script::compilation_type() {
+ return BooleanBit::get(flags(), kCompilationTypeBit) ? COMPILATION_TYPE_EVAL
+ : COMPILATION_TYPE_HOST;
+}
+void Script::set_compilation_type(CompilationType type) {
+ set_flags(BooleanBit::set(flags(), kCompilationTypeBit,
+ type == COMPILATION_TYPE_EVAL));
+}
+Script::CompilationState Script::compilation_state() {
+ return BooleanBit::get(flags(), kCompilationStateBit)
+ ? COMPILATION_STATE_COMPILED
+ : COMPILATION_STATE_INITIAL;
+}
+void Script::set_compilation_state(CompilationState state) {
+ set_flags(BooleanBit::set(flags(), kCompilationStateBit,
+ state == COMPILATION_STATE_COMPILED));
+}
+ScriptOriginOptions Script::origin_options() {
+ return ScriptOriginOptions((flags() & kOriginOptionsMask) >>
+ kOriginOptionsShift);
+}
+void Script::set_origin_options(ScriptOriginOptions origin_options) {
+ DCHECK(!(origin_options.Flags() & ~((1 << kOriginOptionsSize) - 1)));
+ set_flags((flags() & ~kOriginOptionsMask) |
+ (origin_options.Flags() << kOriginOptionsShift));
+}
+
+bool Script::HasValidSource() {
+ Object* src = this->source();
+ if (!src->IsString()) return true;
+ String* src_str = String::cast(src);
+ if (!StringShape(src_str).IsExternal()) return true;
+ if (src_str->IsOneByteRepresentation()) {
+ return ExternalOneByteString::cast(src)->resource() != NULL;
+ } else if (src_str->IsTwoByteRepresentation()) {
+ return ExternalTwoByteString::cast(src)->resource() != NULL;
+ }
+ return true;
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_SCRIPT_INL_H_
diff --git a/deps/v8/src/objects/script.h b/deps/v8/src/objects/script.h
new file mode 100644
index 0000000000..fc9385d609
--- /dev/null
+++ b/deps/v8/src/objects/script.h
@@ -0,0 +1,217 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_SCRIPT_H_
+#define V8_OBJECTS_SCRIPT_H_
+
+#include "src/objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// Script describes a script which has been added to the VM.
+class Script : public Struct {
+ public:
+ // Script types.
+ enum Type {
+ TYPE_NATIVE = 0,
+ TYPE_EXTENSION = 1,
+ TYPE_NORMAL = 2,
+ TYPE_WASM = 3,
+ TYPE_INSPECTOR = 4
+ };
+
+ // Script compilation types.
+ enum CompilationType { COMPILATION_TYPE_HOST = 0, COMPILATION_TYPE_EVAL = 1 };
+
+ // Script compilation state.
+ enum CompilationState {
+ COMPILATION_STATE_INITIAL = 0,
+ COMPILATION_STATE_COMPILED = 1
+ };
+
+ // [source]: the script source.
+ DECL_ACCESSORS(source, Object)
+
+ // [name]: the script name.
+ DECL_ACCESSORS(name, Object)
+
+ // [id]: the script id.
+ DECL_INT_ACCESSORS(id)
+
+ // [line_offset]: script line offset in resource from where it was extracted.
+ DECL_INT_ACCESSORS(line_offset)
+
+ // [column_offset]: script column offset in resource from where it was
+ // extracted.
+ DECL_INT_ACCESSORS(column_offset)
+
+ // [context_data]: context data for the context this script was compiled in.
+ DECL_ACCESSORS(context_data, Object)
+
+ // [wrapper]: the wrapper cache. This is either undefined or a WeakCell.
+ DECL_ACCESSORS(wrapper, HeapObject)
+
+ // [type]: the script type.
+ DECL_INT_ACCESSORS(type)
+
+ // [line_ends]: FixedArray of line ends positions.
+ DECL_ACCESSORS(line_ends, Object)
+
+ // [eval_from_shared]: for eval scripts the shared function info for the
+ // function from which eval was called.
+ DECL_ACCESSORS(eval_from_shared, Object)
+
+ // [eval_from_position]: the source position in the code for the function
+ // from which eval was called, as positive integer. Or the code offset in the
+ // code from which eval was called, as negative integer.
+ DECL_INT_ACCESSORS(eval_from_position)
+
+ // [shared_function_infos]: weak fixed array containing all shared
+ // function infos created from this script.
+ DECL_ACCESSORS(shared_function_infos, FixedArray)
+
+ // [flags]: Holds an exciting bitfield.
+ DECL_INT_ACCESSORS(flags)
+
+ // [source_url]: sourceURL from magic comment
+ DECL_ACCESSORS(source_url, Object)
+
+ // [source_mapping_url]: sourceMappingURL magic comment
+ DECL_ACCESSORS(source_mapping_url, Object)
+
+ // [wasm_compiled_module]: the compiled wasm module this script belongs to.
+ // This must only be called if the type of this script is TYPE_WASM.
+ DECL_ACCESSORS(wasm_compiled_module, Object)
+
+ // [compilation_type]: how the the script was compiled. Encoded in the
+ // 'flags' field.
+ inline CompilationType compilation_type();
+ inline void set_compilation_type(CompilationType type);
+
+ // [compilation_state]: determines whether the script has already been
+ // compiled. Encoded in the 'flags' field.
+ inline CompilationState compilation_state();
+ inline void set_compilation_state(CompilationState state);
+
+ // [origin_options]: optional attributes set by the embedder via ScriptOrigin,
+ // and used by the embedder to make decisions about the script. V8 just passes
+ // this through. Encoded in the 'flags' field.
+ inline v8::ScriptOriginOptions origin_options();
+ inline void set_origin_options(ScriptOriginOptions origin_options);
+
+ DECL_CAST(Script)
+
+ // If script source is an external string, check that the underlying
+ // resource is accessible. Otherwise, always return true.
+ inline bool HasValidSource();
+
+ Object* GetNameOrSourceURL();
+
+ // Set eval origin for stack trace formatting.
+ static void SetEvalOrigin(Handle<Script> script,
+ Handle<SharedFunctionInfo> outer,
+ int eval_position);
+ // Retrieve source position from where eval was called.
+ int GetEvalPosition();
+
+ // Init line_ends array with source code positions of line ends.
+ static void InitLineEnds(Handle<Script> script);
+
+ // Carries information about a source position.
+ struct PositionInfo {
+ PositionInfo() : line(-1), column(-1), line_start(-1), line_end(-1) {}
+
+ int line; // Zero-based line number.
+ int column; // Zero-based column number.
+ int line_start; // Position of first character in line.
+ int line_end; // Position of final linebreak character in line.
+ };
+
+ // Specifies whether to add offsets to position infos.
+ enum OffsetFlag { NO_OFFSET = 0, WITH_OFFSET = 1 };
+
+ // Retrieves information about the given position, optionally with an offset.
+ // Returns false on failure, and otherwise writes into the given info object
+ // on success.
+ // The static method should is preferable for handlified callsites because it
+ // initializes the line ends array, avoiding expensive recomputations.
+ // The non-static version is not allocating and safe for unhandlified
+ // callsites.
+ static bool GetPositionInfo(Handle<Script> script, int position,
+ PositionInfo* info, OffsetFlag offset_flag);
+ bool GetPositionInfo(int position, PositionInfo* info,
+ OffsetFlag offset_flag) const;
+
+ bool IsUserJavaScript();
+
+ // Wrappers for GetPositionInfo
+ static int GetColumnNumber(Handle<Script> script, int code_offset);
+ int GetColumnNumber(int code_pos) const;
+ static int GetLineNumber(Handle<Script> script, int code_offset);
+ int GetLineNumber(int code_pos) const;
+
+ // Get the JS object wrapping the given script; create it if none exists.
+ static Handle<JSObject> GetWrapper(Handle<Script> script);
+
+ // Look through the list of existing shared function infos to find one
+ // that matches the function literal. Return empty handle if not found.
+ MaybeHandle<SharedFunctionInfo> FindSharedFunctionInfo(
+ Isolate* isolate, const FunctionLiteral* fun);
+
+ // Iterate over all script objects on the heap.
+ class Iterator {
+ public:
+ explicit Iterator(Isolate* isolate);
+ Script* Next();
+
+ private:
+ WeakFixedArray::Iterator iterator_;
+ DISALLOW_COPY_AND_ASSIGN(Iterator);
+ };
+
+ // Dispatched behavior.
+ DECL_PRINTER(Script)
+ DECL_VERIFIER(Script)
+
+ static const int kSourceOffset = HeapObject::kHeaderSize;
+ static const int kNameOffset = kSourceOffset + kPointerSize;
+ static const int kLineOffsetOffset = kNameOffset + kPointerSize;
+ static const int kColumnOffsetOffset = kLineOffsetOffset + kPointerSize;
+ static const int kContextOffset = kColumnOffsetOffset + kPointerSize;
+ static const int kWrapperOffset = kContextOffset + kPointerSize;
+ static const int kTypeOffset = kWrapperOffset + kPointerSize;
+ static const int kLineEndsOffset = kTypeOffset + kPointerSize;
+ static const int kIdOffset = kLineEndsOffset + kPointerSize;
+ static const int kEvalFromSharedOffset = kIdOffset + kPointerSize;
+ static const int kEvalFromPositionOffset =
+ kEvalFromSharedOffset + kPointerSize;
+ static const int kSharedFunctionInfosOffset =
+ kEvalFromPositionOffset + kPointerSize;
+ static const int kFlagsOffset = kSharedFunctionInfosOffset + kPointerSize;
+ static const int kSourceUrlOffset = kFlagsOffset + kPointerSize;
+ static const int kSourceMappingUrlOffset = kSourceUrlOffset + kPointerSize;
+ static const int kSize = kSourceMappingUrlOffset + kPointerSize;
+
+ private:
+ // Bit positions in the flags field.
+ static const int kCompilationTypeBit = 0;
+ static const int kCompilationStateBit = 1;
+ static const int kOriginOptionsShift = 2;
+ static const int kOriginOptionsSize = 4;
+ static const int kOriginOptionsMask = ((1 << kOriginOptionsSize) - 1)
+ << kOriginOptionsShift;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Script);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_SCRIPT_H_
diff --git a/deps/v8/src/objects/shared-function-info-inl.h b/deps/v8/src/objects/shared-function-info-inl.h
new file mode 100644
index 0000000000..028d3cf086
--- /dev/null
+++ b/deps/v8/src/objects/shared-function-info-inl.h
@@ -0,0 +1,416 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_SHARED_FUNCTION_INFO_INL_H_
+#define V8_OBJECTS_SHARED_FUNCTION_INFO_INL_H_
+
+#include "src/heap/heap-inl.h"
+#include "src/objects/shared-function-info.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+CAST_ACCESSOR(PreParsedScopeData)
+ACCESSORS(PreParsedScopeData, scope_data, PodArray<uint32_t>, kScopeDataOffset)
+ACCESSORS(PreParsedScopeData, child_data, FixedArray, kChildDataOffset)
+
+TYPE_CHECKER(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE)
+CAST_ACCESSOR(SharedFunctionInfo)
+DEFINE_DEOPT_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
+
+ACCESSORS(SharedFunctionInfo, raw_name, Object, kNameOffset)
+ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
+ACCESSORS(SharedFunctionInfo, feedback_metadata, FeedbackMetadata,
+ kFeedbackMetadataOffset)
+ACCESSORS(SharedFunctionInfo, instance_class_name, Object,
+ kInstanceClassNameOffset)
+ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset)
+ACCESSORS(SharedFunctionInfo, script, Object, kScriptOffset)
+ACCESSORS(SharedFunctionInfo, debug_info, Object, kDebugInfoOffset)
+ACCESSORS(SharedFunctionInfo, function_identifier, Object,
+ kFunctionIdentifierOffset)
+ACCESSORS(SharedFunctionInfo, preparsed_scope_data, Object,
+ kPreParsedScopeDataOffset)
+
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, start_position_and_type,
+ is_named_expression,
+ SharedFunctionInfo::IsNamedExpressionBit)
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_toplevel,
+ SharedFunctionInfo::IsTopLevelBit)
+
+INT_ACCESSORS(SharedFunctionInfo, function_literal_id, kFunctionLiteralIdOffset)
+#if V8_SFI_HAS_UNIQUE_ID
+INT_ACCESSORS(SharedFunctionInfo, unique_id, kUniqueIdOffset)
+#endif
+INT_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
+INT_ACCESSORS(SharedFunctionInfo, internal_formal_parameter_count,
+ kFormalParameterCountOffset)
+INT_ACCESSORS(SharedFunctionInfo, expected_nof_properties,
+ kExpectedNofPropertiesOffset)
+INT_ACCESSORS(SharedFunctionInfo, end_position, kEndPositionOffset)
+INT_ACCESSORS(SharedFunctionInfo, start_position_and_type,
+ kStartPositionAndTypeOffset)
+INT_ACCESSORS(SharedFunctionInfo, function_token_position,
+ kFunctionTokenPositionOffset)
+INT_ACCESSORS(SharedFunctionInfo, compiler_hints, kCompilerHintsOffset)
+INT_ACCESSORS(SharedFunctionInfo, opt_count_and_bailout_reason,
+ kOptCountAndBailoutReasonOffset)
+INT_ACCESSORS(SharedFunctionInfo, counters, kCountersOffset)
+INT_ACCESSORS(SharedFunctionInfo, ast_node_count, kAstNodeCountOffset)
+INT_ACCESSORS(SharedFunctionInfo, profiler_ticks, kProfilerTicksOffset)
+
+bool SharedFunctionInfo::has_shared_name() const {
+ return raw_name() != kNoSharedNameSentinel;
+}
+
+String* SharedFunctionInfo::name() const {
+ if (!has_shared_name()) return GetHeap()->empty_string();
+ DCHECK(raw_name()->IsString());
+ return String::cast(raw_name());
+}
+
+void SharedFunctionInfo::set_name(String* name) {
+ set_raw_name(name);
+ UpdateFunctionMapIndex();
+}
+
+AbstractCode* SharedFunctionInfo::abstract_code() {
+ if (HasBytecodeArray()) {
+ return AbstractCode::cast(bytecode_array());
+ } else {
+ return AbstractCode::cast(code());
+ }
+}
+
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints, allows_lazy_compilation,
+ SharedFunctionInfo::AllowLazyCompilationBit)
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints, uses_arguments,
+ SharedFunctionInfo::UsesArgumentsBit)
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints,
+ has_duplicate_parameters,
+ SharedFunctionInfo::HasDuplicateParametersBit)
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints, asm_function,
+ SharedFunctionInfo::IsAsmFunctionBit)
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints, is_declaration,
+ SharedFunctionInfo::IsDeclarationBit)
+
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints, native,
+ SharedFunctionInfo::IsNativeBit)
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints, force_inline,
+ SharedFunctionInfo::ForceInlineBit)
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints, is_asm_wasm_broken,
+ SharedFunctionInfo::IsAsmWasmBrokenBit)
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints, optimization_disabled,
+ SharedFunctionInfo::OptimizationDisabledBit)
+
+LanguageMode SharedFunctionInfo::language_mode() {
+ STATIC_ASSERT(LANGUAGE_END == 2);
+ return construct_language_mode(IsStrictBit::decode(compiler_hints()));
+}
+
+void SharedFunctionInfo::set_language_mode(LanguageMode language_mode) {
+ STATIC_ASSERT(LANGUAGE_END == 2);
+ // We only allow language mode transitions that set the same language mode
+ // again or go up in the chain:
+ DCHECK(is_sloppy(this->language_mode()) || is_strict(language_mode));
+ int hints = compiler_hints();
+ hints = IsStrictBit::update(hints, is_strict(language_mode));
+ set_compiler_hints(hints);
+ UpdateFunctionMapIndex();
+}
+
+FunctionKind SharedFunctionInfo::kind() const {
+ return FunctionKindBits::decode(compiler_hints());
+}
+
+void SharedFunctionInfo::set_kind(FunctionKind kind) {
+ DCHECK(IsValidFunctionKind(kind));
+ int hints = compiler_hints();
+ hints = FunctionKindBits::update(hints, kind);
+ set_compiler_hints(hints);
+ UpdateFunctionMapIndex();
+}
+
+bool SharedFunctionInfo::needs_home_object() const {
+ return NeedsHomeObjectBit::decode(compiler_hints());
+}
+
+void SharedFunctionInfo::set_needs_home_object(bool value) {
+ int hints = compiler_hints();
+ hints = NeedsHomeObjectBit::update(hints, value);
+ set_compiler_hints(hints);
+ UpdateFunctionMapIndex();
+}
+
+int SharedFunctionInfo::function_map_index() const {
+ // Note: Must be kept in sync with the FastNewClosure builtin.
+ int index = Context::FIRST_FUNCTION_MAP_INDEX +
+ FunctionMapIndexBits::decode(compiler_hints());
+ DCHECK_LE(index, Context::LAST_FUNCTION_MAP_INDEX);
+ return index;
+}
+
+void SharedFunctionInfo::set_function_map_index(int index) {
+ STATIC_ASSERT(Context::LAST_FUNCTION_MAP_INDEX <=
+ Context::FIRST_FUNCTION_MAP_INDEX + FunctionMapIndexBits::kMax);
+ DCHECK_LE(Context::FIRST_FUNCTION_MAP_INDEX, index);
+ DCHECK_LE(index, Context::LAST_FUNCTION_MAP_INDEX);
+ index -= Context::FIRST_FUNCTION_MAP_INDEX;
+ set_compiler_hints(FunctionMapIndexBits::update(compiler_hints(), index));
+}
+
+void SharedFunctionInfo::UpdateFunctionMapIndex() {
+ int map_index = Context::FunctionMapIndex(
+ language_mode(), kind(), has_shared_name(), needs_home_object());
+ set_function_map_index(map_index);
+}
+
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, debugger_hints,
+ name_should_print_as_anonymous,
+ SharedFunctionInfo::NameShouldPrintAsAnonymousBit)
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, debugger_hints, is_anonymous_expression,
+ SharedFunctionInfo::IsAnonymousExpressionBit)
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, debugger_hints, deserialized,
+ SharedFunctionInfo::IsDeserializedBit)
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, debugger_hints, has_no_side_effect,
+ SharedFunctionInfo::HasNoSideEffectBit)
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, debugger_hints,
+ computed_has_no_side_effect,
+ SharedFunctionInfo::ComputedHasNoSideEffectBit)
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, debugger_hints, debug_is_blackboxed,
+ SharedFunctionInfo::DebugIsBlackboxedBit)
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, debugger_hints,
+ computed_debug_is_blackboxed,
+ SharedFunctionInfo::ComputedDebugIsBlackboxedBit)
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, debugger_hints,
+ has_reported_binary_coverage,
+ SharedFunctionInfo::HasReportedBinaryCoverageBit)
+
+void SharedFunctionInfo::DontAdaptArguments() {
+ DCHECK(code()->kind() == Code::BUILTIN || code()->kind() == Code::STUB);
+ set_internal_formal_parameter_count(kDontAdaptArgumentsSentinel);
+}
+
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, start_position_and_type, start_position,
+ SharedFunctionInfo::StartPositionBits)
+
+Code* SharedFunctionInfo::code() const {
+ return Code::cast(READ_FIELD(this, kCodeOffset));
+}
+
+void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) {
+ DCHECK(value->kind() != Code::OPTIMIZED_FUNCTION);
+ // If the SharedFunctionInfo has bytecode we should never mark it for lazy
+ // compile, since the bytecode is never flushed.
+ DCHECK(value != GetIsolate()->builtins()->builtin(Builtins::kCompileLazy) ||
+ !HasBytecodeArray());
+ WRITE_FIELD(this, kCodeOffset, value);
+ CONDITIONAL_WRITE_BARRIER(value->GetHeap(), this, kCodeOffset, value, mode);
+}
+
+void SharedFunctionInfo::ReplaceCode(Code* value) {
+#ifdef DEBUG
+ Code::VerifyRecompiledCode(code(), value);
+#endif // DEBUG
+
+ set_code(value);
+}
+
+bool SharedFunctionInfo::IsInterpreted() const {
+ return code()->is_interpreter_trampoline_builtin();
+}
+
+bool SharedFunctionInfo::HasBaselineCode() const {
+ return code()->kind() == Code::FUNCTION;
+}
+
+ScopeInfo* SharedFunctionInfo::scope_info() const {
+ return reinterpret_cast<ScopeInfo*>(READ_FIELD(this, kScopeInfoOffset));
+}
+
+void SharedFunctionInfo::set_scope_info(ScopeInfo* value,
+ WriteBarrierMode mode) {
+ WRITE_FIELD(this, kScopeInfoOffset, reinterpret_cast<Object*>(value));
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kScopeInfoOffset,
+ reinterpret_cast<Object*>(value), mode);
+}
+
+ACCESSORS(SharedFunctionInfo, outer_scope_info, HeapObject,
+ kOuterScopeInfoOffset)
+
+bool SharedFunctionInfo::is_compiled() const {
+ Builtins* builtins = GetIsolate()->builtins();
+ DCHECK(code() != builtins->builtin(Builtins::kCheckOptimizationMarker));
+ return code() != builtins->builtin(Builtins::kCompileLazy);
+}
+
+int SharedFunctionInfo::GetLength() const {
+ DCHECK(is_compiled());
+ DCHECK(HasLength());
+ return length();
+}
+
+bool SharedFunctionInfo::HasLength() const {
+ DCHECK_IMPLIES(length() < 0, length() == kInvalidLength);
+ return length() != kInvalidLength;
+}
+
+bool SharedFunctionInfo::has_simple_parameters() {
+ return scope_info()->HasSimpleParameters();
+}
+
+bool SharedFunctionInfo::HasDebugInfo() const {
+ bool has_debug_info = !debug_info()->IsSmi();
+ DCHECK_EQ(debug_info()->IsStruct(), has_debug_info);
+ return has_debug_info;
+}
+
+bool SharedFunctionInfo::HasDebugCode() const {
+ if (HasBaselineCode()) return code()->has_debug_break_slots();
+ return HasBytecodeArray();
+}
+
+bool SharedFunctionInfo::IsApiFunction() {
+ return function_data()->IsFunctionTemplateInfo();
+}
+
+FunctionTemplateInfo* SharedFunctionInfo::get_api_func_data() {
+ DCHECK(IsApiFunction());
+ return FunctionTemplateInfo::cast(function_data());
+}
+
+void SharedFunctionInfo::set_api_func_data(FunctionTemplateInfo* data) {
+ DCHECK(function_data()->IsUndefined(GetIsolate()));
+ set_function_data(data);
+}
+
+bool SharedFunctionInfo::HasBytecodeArray() const {
+ return function_data()->IsBytecodeArray();
+}
+
+BytecodeArray* SharedFunctionInfo::bytecode_array() const {
+ DCHECK(HasBytecodeArray());
+ return BytecodeArray::cast(function_data());
+}
+
+void SharedFunctionInfo::set_bytecode_array(BytecodeArray* bytecode) {
+ DCHECK(function_data()->IsUndefined(GetIsolate()));
+ set_function_data(bytecode);
+}
+
+void SharedFunctionInfo::ClearBytecodeArray() {
+ DCHECK(function_data()->IsUndefined(GetIsolate()) || HasBytecodeArray());
+ set_function_data(GetHeap()->undefined_value());
+}
+
+bool SharedFunctionInfo::HasAsmWasmData() const {
+ return function_data()->IsFixedArray();
+}
+
+FixedArray* SharedFunctionInfo::asm_wasm_data() const {
+ DCHECK(HasAsmWasmData());
+ return FixedArray::cast(function_data());
+}
+
+void SharedFunctionInfo::set_asm_wasm_data(FixedArray* data) {
+ DCHECK(function_data()->IsUndefined(GetIsolate()) || HasAsmWasmData());
+ set_function_data(data);
+}
+
+void SharedFunctionInfo::ClearAsmWasmData() {
+ DCHECK(function_data()->IsUndefined(GetIsolate()) || HasAsmWasmData());
+ set_function_data(GetHeap()->undefined_value());
+}
+
+bool SharedFunctionInfo::HasBuiltinFunctionId() {
+ return function_identifier()->IsSmi();
+}
+
+BuiltinFunctionId SharedFunctionInfo::builtin_function_id() {
+ DCHECK(HasBuiltinFunctionId());
+ return static_cast<BuiltinFunctionId>(Smi::ToInt(function_identifier()));
+}
+
+void SharedFunctionInfo::set_builtin_function_id(BuiltinFunctionId id) {
+ set_function_identifier(Smi::FromInt(id));
+}
+
+bool SharedFunctionInfo::HasInferredName() {
+ return function_identifier()->IsString();
+}
+
+String* SharedFunctionInfo::inferred_name() {
+ if (HasInferredName()) {
+ return String::cast(function_identifier());
+ }
+ Isolate* isolate = GetIsolate();
+ DCHECK(function_identifier()->IsUndefined(isolate) || HasBuiltinFunctionId());
+ return isolate->heap()->empty_string();
+}
+
+void SharedFunctionInfo::set_inferred_name(String* inferred_name) {
+ DCHECK(function_identifier()->IsUndefined(GetIsolate()) || HasInferredName());
+ set_function_identifier(inferred_name);
+}
+
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, counters, ic_age,
+ SharedFunctionInfo::ICAgeBits)
+
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, counters, deopt_count,
+ SharedFunctionInfo::DeoptCountBits)
+
+void SharedFunctionInfo::increment_deopt_count() {
+ int value = counters();
+ int deopt_count = DeoptCountBits::decode(value);
+ // Saturate the deopt count when incrementing, rather than overflowing.
+ if (deopt_count < DeoptCountBits::kMax) {
+ set_counters(DeoptCountBits::update(value, deopt_count + 1));
+ }
+}
+
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, counters, opt_reenable_tries,
+ SharedFunctionInfo::OptReenableTriesBits)
+
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, opt_count_and_bailout_reason, opt_count,
+ SharedFunctionInfo::OptCountBits)
+
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, opt_count_and_bailout_reason,
+ disable_optimization_reason,
+ SharedFunctionInfo::DisabledOptimizationReasonBits)
+
+void SharedFunctionInfo::TryReenableOptimization() {
+ int tries = opt_reenable_tries();
+ set_opt_reenable_tries((tries + 1) & OptReenableTriesBits::kMax);
+ // We reenable optimization whenever the number of tries is a large
+ // enough power of 2.
+ if (tries >= 16 && (((tries - 1) & tries) == 0)) {
+ set_optimization_disabled(false);
+ set_deopt_count(0);
+ }
+}
+
+bool SharedFunctionInfo::IsUserJavaScript() {
+ Object* script_obj = script();
+ if (script_obj->IsUndefined(GetIsolate())) return false;
+ Script* script = Script::cast(script_obj);
+ return script->IsUserJavaScript();
+}
+
+bool SharedFunctionInfo::IsSubjectToDebugging() {
+ return IsUserJavaScript() && !HasAsmWasmData();
+}
+
+bool SharedFunctionInfo::HasPreParsedScopeData() const {
+ return preparsed_scope_data()->IsPreParsedScopeData();
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_SHARED_FUNCTION_INFO_INL_H_
diff --git a/deps/v8/src/objects/shared-function-info.h b/deps/v8/src/objects/shared-function-info.h
new file mode 100644
index 0000000000..03cae0aede
--- /dev/null
+++ b/deps/v8/src/objects/shared-function-info.h
@@ -0,0 +1,592 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_SHARED_FUNCTION_INFO_H_
+#define V8_OBJECTS_SHARED_FUNCTION_INFO_H_
+
+#include "src/objects.h"
+#include "src/objects/script.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+class CoverageInfo;
+class DebugInfo;
+
+class PreParsedScopeData : public Struct {
+ public:
+ DECL_ACCESSORS(scope_data, PodArray<uint32_t>)
+ DECL_ACCESSORS(child_data, FixedArray)
+
+ static const int kScopeDataOffset = Struct::kHeaderSize;
+ static const int kChildDataOffset = kScopeDataOffset + kPointerSize;
+ static const int kSize = kChildDataOffset + kPointerSize;
+
+ DECL_CAST(PreParsedScopeData)
+ DECL_PRINTER(PreParsedScopeData)
+ DECL_VERIFIER(PreParsedScopeData)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PreParsedScopeData);
+};
+
+// SharedFunctionInfo describes the JSFunction information that can be
+// shared by multiple instances of the function.
+class SharedFunctionInfo : public HeapObject {
+ public:
+ static constexpr Object* const kNoSharedNameSentinel = Smi::kZero;
+
+ // [name]: Returns shared name if it exists or an empty string otherwise.
+ inline String* name() const;
+ inline void set_name(String* name);
+
+ // [code]: Function code.
+ DECL_ACCESSORS(code, Code)
+
+ // Get the abstract code associated with the function, which will either be
+ // a Code object or a BytecodeArray.
+ inline AbstractCode* abstract_code();
+
+ // Tells whether or not this shared function info is interpreted.
+ //
+ // Note: function->IsInterpreted() does not necessarily return the same value
+ // as function->shared()->IsInterpreted() because the closure might have been
+ // optimized.
+ inline bool IsInterpreted() const;
+
+ inline void ReplaceCode(Code* code);
+ inline bool HasBaselineCode() const;
+
+ // Set up the link between shared function info and the script. The shared
+ // function info is added to the list on the script.
+ V8_EXPORT_PRIVATE static void SetScript(
+ Handle<SharedFunctionInfo> shared, Handle<Object> script_object,
+ bool reset_preparsed_scope_data = true);
+
+ // Layout description of the optimized code map.
+ static const int kEntriesStart = 0;
+ static const int kContextOffset = 0;
+ static const int kCachedCodeOffset = 1;
+ static const int kEntryLength = 2;
+ static const int kInitialLength = kEntriesStart + kEntryLength;
+
+ static const int kNotFound = -1;
+ static const int kInvalidLength = -1;
+
+ // Helpers for assembly code that does a backwards walk of the optimized code
+ // map.
+ static const int kOffsetToPreviousContext =
+ FixedArray::kHeaderSize + kPointerSize * (kContextOffset - kEntryLength);
+ static const int kOffsetToPreviousCachedCode =
+ FixedArray::kHeaderSize +
+ kPointerSize * (kCachedCodeOffset - kEntryLength);
+
+ // [scope_info]: Scope info.
+ DECL_ACCESSORS(scope_info, ScopeInfo)
+
+ // The outer scope info for the purpose of parsing this function, or the hole
+ // value if it isn't yet known.
+ DECL_ACCESSORS(outer_scope_info, HeapObject)
+
+ // [construct stub]: Code stub for constructing instances of this function.
+ DECL_ACCESSORS(construct_stub, Code)
+
+ // Sets the given code as the construct stub, and marks builtin code objects
+ // as a construct stub.
+ void SetConstructStub(Code* code);
+
+ // Returns if this function has been compiled to native code yet.
+ inline bool is_compiled() const;
+
+ // [length]: The function length - usually the number of declared parameters.
+ // Use up to 2^30 parameters. The value is only reliable when the function has
+ // been compiled.
+ inline int GetLength() const;
+ inline bool HasLength() const;
+ inline void set_length(int value);
+
+ // [internal formal parameter count]: The declared number of parameters.
+ // For subclass constructors, also includes new.target.
+ // The size of function's frame is internal_formal_parameter_count + 1.
+ DECL_INT_ACCESSORS(internal_formal_parameter_count)
+
+ // Set the formal parameter count so the function code will be
+ // called without using argument adaptor frames.
+ inline void DontAdaptArguments();
+
+ // [expected_nof_properties]: Expected number of properties for the
+ // function. The value is only reliable when the function has been compiled.
+ DECL_INT_ACCESSORS(expected_nof_properties)
+
+ // [feedback_metadata] - describes ast node feedback from full-codegen and
+ // (increasingly) from crankshafted code where sufficient feedback isn't
+ // available.
+ DECL_ACCESSORS(feedback_metadata, FeedbackMetadata)
+
+ // [function_literal_id] - uniquely identifies the FunctionLiteral this
+ // SharedFunctionInfo represents within its script, or -1 if this
+ // SharedFunctionInfo object doesn't correspond to a parsed FunctionLiteral.
+ DECL_INT_ACCESSORS(function_literal_id)
+
+#if V8_SFI_HAS_UNIQUE_ID
+ // [unique_id] - For --trace-maps purposes, an identifier that's persistent
+ // even if the GC moves this SharedFunctionInfo.
+ DECL_INT_ACCESSORS(unique_id)
+#endif
+
+ // [instance class name]: class name for instances.
+ DECL_ACCESSORS(instance_class_name, Object)
+
+ // [function data]: This field holds some additional data for function.
+ // Currently it has one of:
+ // - a FunctionTemplateInfo to make benefit the API [IsApiFunction()].
+ // - a BytecodeArray for the interpreter [HasBytecodeArray()].
+ // - a FixedArray with Asm->Wasm conversion [HasAsmWasmData()].
+ DECL_ACCESSORS(function_data, Object)
+
+ inline bool IsApiFunction();
+ inline FunctionTemplateInfo* get_api_func_data();
+ inline void set_api_func_data(FunctionTemplateInfo* data);
+ inline bool HasBytecodeArray() const;
+ inline BytecodeArray* bytecode_array() const;
+ inline void set_bytecode_array(BytecodeArray* bytecode);
+ inline void ClearBytecodeArray();
+ inline bool HasAsmWasmData() const;
+ inline FixedArray* asm_wasm_data() const;
+ inline void set_asm_wasm_data(FixedArray* data);
+ inline void ClearAsmWasmData();
+
+ // [function identifier]: This field holds an additional identifier for the
+ // function.
+ // - a Smi identifying a builtin function [HasBuiltinFunctionId()].
+ // - a String identifying the function's inferred name [HasInferredName()].
+ // The inferred_name is inferred from variable or property
+ // assignment of this function. It is used to facilitate debugging and
+ // profiling of JavaScript code written in OO style, where almost
+ // all functions are anonymous but are assigned to object
+ // properties.
+ DECL_ACCESSORS(function_identifier, Object)
+
+ inline bool HasBuiltinFunctionId();
+ inline BuiltinFunctionId builtin_function_id();
+ inline void set_builtin_function_id(BuiltinFunctionId id);
+ inline bool HasInferredName();
+ inline String* inferred_name();
+ inline void set_inferred_name(String* inferred_name);
+
+ // [script]: Script from which the function originates.
+ DECL_ACCESSORS(script, Object)
+
+ // [start_position_and_type]: Field used to store both the source code
+ // position, whether or not the function is a function expression,
+ // and whether or not the function is a toplevel function. The two
+ // least significants bit indicates whether the function is an
+ // expression and the rest contains the source code position.
+ DECL_INT_ACCESSORS(start_position_and_type)
+
+ // The function is subject to debugging if a debug info is attached.
+ inline bool HasDebugInfo() const;
+ DebugInfo* GetDebugInfo() const;
+
+ // Break infos are contained in DebugInfo, this is a convenience method
+ // to simplify access.
+ bool HasBreakInfo() const;
+
+ // Coverage infos are contained in DebugInfo, this is a convenience method
+ // to simplify access.
+ bool HasCoverageInfo() const;
+ CoverageInfo* GetCoverageInfo() const;
+
+ // A function has debug code if the compiled code has debug break slots.
+ inline bool HasDebugCode() const;
+
+ // [debug info]: Debug information.
+ DECL_ACCESSORS(debug_info, Object)
+
+ // PreParsedScopeData or null.
+ DECL_ACCESSORS(preparsed_scope_data, Object)
+
+ inline bool HasPreParsedScopeData() const;
+
+ // Bit field containing various information collected for debugging.
+ // This field is either stored on the kDebugInfo slot or inside the
+ // debug info struct.
+ int debugger_hints() const;
+ void set_debugger_hints(int value);
+
+ // Indicates that the function was created by the Function function.
+ // Though it's anonymous, toString should treat it as if it had the name
+ // "anonymous". We don't set the name itself so that the system does not
+ // see a binding for it.
+ DECL_BOOLEAN_ACCESSORS(name_should_print_as_anonymous)
+
+ // Indicates that the function is either an anonymous expression
+ // or an arrow function (the name field can be set through the API,
+ // which does not change this flag).
+ DECL_BOOLEAN_ACCESSORS(is_anonymous_expression)
+
+ // Indicates that the the shared function info is deserialized from cache.
+ DECL_BOOLEAN_ACCESSORS(deserialized)
+
+ // Indicates that the function cannot cause side-effects.
+ DECL_BOOLEAN_ACCESSORS(has_no_side_effect)
+
+ // Indicates that |has_no_side_effect| has been computed and set.
+ DECL_BOOLEAN_ACCESSORS(computed_has_no_side_effect)
+
+ // Indicates that the function should be skipped during stepping.
+ DECL_BOOLEAN_ACCESSORS(debug_is_blackboxed)
+
+ // Indicates that |debug_is_blackboxed| has been computed and set.
+ DECL_BOOLEAN_ACCESSORS(computed_debug_is_blackboxed)
+
+ // Indicates that the function has been reported for binary code coverage.
+ DECL_BOOLEAN_ACCESSORS(has_reported_binary_coverage)
+
+ // The function's name if it is non-empty, otherwise the inferred name.
+ String* DebugName();
+
+ // The function cannot cause any side effects.
+ bool HasNoSideEffect();
+
+ // Used for flags such as --hydrogen-filter.
+ bool PassesFilter(const char* raw_filter);
+
+ // Position of the 'function' token in the script source.
+ DECL_INT_ACCESSORS(function_token_position)
+
+ // Position of this function in the script source.
+ DECL_INT_ACCESSORS(start_position)
+
+ // End position of this function in the script source.
+ DECL_INT_ACCESSORS(end_position)
+
+ // Returns true if the function has shared name.
+ inline bool has_shared_name() const;
+
+ // Is this function a named function expression in the source code.
+ DECL_BOOLEAN_ACCESSORS(is_named_expression)
+
+ // Is this function a top-level function (scripts, evals).
+ DECL_BOOLEAN_ACCESSORS(is_toplevel)
+
+ // Bit field containing various information collected by the compiler to
+ // drive optimization.
+ DECL_INT_ACCESSORS(compiler_hints)
+
+ DECL_INT_ACCESSORS(ast_node_count)
+
+ DECL_INT_ACCESSORS(profiler_ticks)
+
+ // Inline cache age is used to infer whether the function survived a context
+ // disposal or not. In the former case we reset the opt_count.
+ DECL_INT_ACCESSORS(ic_age)
+
+ // Indicates if this function can be lazy compiled.
+ DECL_BOOLEAN_ACCESSORS(allows_lazy_compilation)
+
+ // Indicates whether optimizations have been disabled for this
+ // shared function info. If a function is repeatedly optimized or if
+ // we cannot optimize the function we disable optimization to avoid
+ // spending time attempting to optimize it again.
+ DECL_BOOLEAN_ACCESSORS(optimization_disabled)
+
+ // Indicates the language mode.
+ inline LanguageMode language_mode();
+ inline void set_language_mode(LanguageMode language_mode);
+
+ // False if the function definitely does not allocate an arguments object.
+ DECL_BOOLEAN_ACCESSORS(uses_arguments)
+
+ // True if the function has any duplicated parameter names.
+ DECL_BOOLEAN_ACCESSORS(has_duplicate_parameters)
+
+ // Indicates whether the function is a native function.
+ // These needs special treatment in .call and .apply since
+ // null passed as the receiver should not be translated to the
+ // global object.
+ DECL_BOOLEAN_ACCESSORS(native)
+
+ // Indicate that this function should always be inlined in optimized code.
+ DECL_BOOLEAN_ACCESSORS(force_inline)
+
+ // Indicates that this function is an asm function.
+ DECL_BOOLEAN_ACCESSORS(asm_function)
+
+ // Whether this function was created from a FunctionDeclaration.
+ DECL_BOOLEAN_ACCESSORS(is_declaration)
+
+ // Indicates that asm->wasm conversion failed and should not be re-attempted.
+ DECL_BOOLEAN_ACCESSORS(is_asm_wasm_broken)
+
+ inline FunctionKind kind() const;
+
+ // Defines the index in a native context of closure's map instantiated using
+ // this shared function info.
+ DECL_INT_ACCESSORS(function_map_index)
+
+ // Recalculates the |map_index| value after modifications of this shared info.
+ inline void UpdateFunctionMapIndex();
+
+ // Disable (further) attempted optimization of all functions sharing this
+ // shared function info.
+ void DisableOptimization(BailoutReason reason);
+
+ // [source code]: Source code for the function.
+ bool HasSourceCode() const;
+ Handle<Object> GetSourceCode();
+ Handle<Object> GetSourceCodeHarmony();
+
+ // Number of times the function was optimized.
+ DECL_INT_ACCESSORS(opt_count)
+
+ // Number of times the function was deoptimized.
+ DECL_INT_ACCESSORS(deopt_count)
+ inline void increment_deopt_count();
+
+ // Number of time we tried to re-enable optimization after it
+ // was disabled due to high number of deoptimizations.
+ DECL_INT_ACCESSORS(opt_reenable_tries)
+
+ inline void TryReenableOptimization();
+
+ // Stores deopt_count, opt_reenable_tries and ic_age as bit-fields.
+ inline void set_counters(int value);
+ inline int counters() const;
+
+ // Stores opt_count and bailout_reason as bit-fields.
+ DECL_INT_ACCESSORS(opt_count_and_bailout_reason)
+
+ inline BailoutReason disable_optimization_reason() const;
+ inline void set_disable_optimization_reason(BailoutReason reason);
+
+ // Tells whether this function should be subject to debugging.
+ inline bool IsSubjectToDebugging();
+
+ // Whether this function is defined in user-provided JavaScript code.
+ inline bool IsUserJavaScript();
+
+ // Check whether or not this function is inlineable.
+ bool IsInlineable();
+
+ // Source size of this function.
+ int SourceSize();
+
+ // Returns `false` if formal parameters include rest parameters, optional
+ // parameters, or destructuring parameters.
+ // TODO(caitp): make this a flag set during parsing
+ inline bool has_simple_parameters();
+
+ // Initialize a SharedFunctionInfo from a parsed function literal.
+ static void InitFromFunctionLiteral(Handle<SharedFunctionInfo> shared_info,
+ FunctionLiteral* lit);
+
+ // Sets the expected number of properties based on estimate from parser.
+ void SetExpectedNofPropertiesFromEstimate(FunctionLiteral* literal);
+
+ // Dispatched behavior.
+ DECL_PRINTER(SharedFunctionInfo)
+ DECL_VERIFIER(SharedFunctionInfo)
+
+ void ResetForNewContext(int new_ic_age);
+
+ // Iterate over all shared function infos in a given script.
+ class ScriptIterator {
+ public:
+ explicit ScriptIterator(Handle<Script> script);
+ ScriptIterator(Isolate* isolate, Handle<FixedArray> shared_function_infos);
+ SharedFunctionInfo* Next();
+
+ // Reset the iterator to run on |script|.
+ void Reset(Handle<Script> script);
+
+ private:
+ Isolate* isolate_;
+ Handle<FixedArray> shared_function_infos_;
+ int index_;
+ DISALLOW_COPY_AND_ASSIGN(ScriptIterator);
+ };
+
+ // Iterate over all shared function infos on the heap.
+ class GlobalIterator {
+ public:
+ explicit GlobalIterator(Isolate* isolate);
+ SharedFunctionInfo* Next();
+
+ private:
+ Script::Iterator script_iterator_;
+ WeakFixedArray::Iterator noscript_sfi_iterator_;
+ SharedFunctionInfo::ScriptIterator sfi_iterator_;
+ DisallowHeapAllocation no_gc_;
+ DISALLOW_COPY_AND_ASSIGN(GlobalIterator);
+ };
+
+ DECL_CAST(SharedFunctionInfo)
+
+ // Constants.
+ static const int kDontAdaptArgumentsSentinel = -1;
+
+#if V8_SFI_HAS_UNIQUE_ID
+ static const int kUniqueIdFieldSize = kInt32Size;
+#else
+ // Just to not break the postmortrem support with conditional offsets
+ static const int kUniqueIdFieldSize = 0;
+#endif
+
+// Layout description.
+#define SHARED_FUNCTION_INFO_FIELDS(V) \
+ /* Pointer fields. */ \
+ V(kCodeOffset, kPointerSize) \
+ V(kNameOffset, kPointerSize) \
+ V(kScopeInfoOffset, kPointerSize) \
+ V(kOuterScopeInfoOffset, kPointerSize) \
+ V(kConstructStubOffset, kPointerSize) \
+ V(kInstanceClassNameOffset, kPointerSize) \
+ V(kFunctionDataOffset, kPointerSize) \
+ V(kScriptOffset, kPointerSize) \
+ V(kDebugInfoOffset, kPointerSize) \
+ V(kFunctionIdentifierOffset, kPointerSize) \
+ V(kFeedbackMetadataOffset, kPointerSize) \
+ V(kPreParsedScopeDataOffset, kPointerSize) \
+ V(kEndOfPointerFieldsOffset, 0) \
+ /* Raw data fields. */ \
+ V(kFunctionLiteralIdOffset, kInt32Size) \
+ V(kUniqueIdOffset, kUniqueIdFieldSize) \
+ V(kLengthOffset, kInt32Size) \
+ V(kFormalParameterCountOffset, kInt32Size) \
+ V(kExpectedNofPropertiesOffset, kInt32Size) \
+ V(kStartPositionAndTypeOffset, kInt32Size) \
+ V(kEndPositionOffset, kInt32Size) \
+ V(kFunctionTokenPositionOffset, kInt32Size) \
+ V(kCompilerHintsOffset, kInt32Size) \
+ V(kOptCountAndBailoutReasonOffset, kInt32Size) \
+ V(kCountersOffset, kInt32Size) \
+ V(kAstNodeCountOffset, kInt32Size) \
+ V(kProfilerTicksOffset, kInt32Size) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+ SHARED_FUNCTION_INFO_FIELDS)
+
+ static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize);
+
+ typedef FixedBodyDescriptor<kCodeOffset, kEndOfPointerFieldsOffset, kSize>
+ BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
+
+// Bit fields in |start_position_and_type|.
+#define START_POSITION_AND_TYPE_BIT_FIELDS(V, _) \
+ V(IsNamedExpressionBit, bool, 1, _) \
+ V(IsTopLevelBit, bool, 1, _) \
+ V(StartPositionBits, int, 30, _)
+
+ DEFINE_BIT_FIELDS(START_POSITION_AND_TYPE_BIT_FIELDS)
+#undef START_POSITION_AND_TYPE_BIT_FIELDS
+
+// Bit positions in |compiler_hints|.
+#define COMPILER_HINTS_BIT_FIELDS(V, _) \
+ V(IsNativeBit, bool, 1, _) \
+ V(IsStrictBit, bool, 1, _) \
+ V(FunctionKindBits, FunctionKind, 10, _) \
+ V(HasDuplicateParametersBit, bool, 1, _) \
+ V(AllowLazyCompilationBit, bool, 1, _) \
+ V(OptimizationDisabledBit, bool, 1, _) \
+ V(UsesArgumentsBit, bool, 1, _) \
+ V(NeedsHomeObjectBit, bool, 1, _) \
+ V(ForceInlineBit, bool, 1, _) \
+ V(IsAsmFunctionBit, bool, 1, _) \
+ V(IsDeclarationBit, bool, 1, _) \
+ V(IsAsmWasmBrokenBit, bool, 1, _) \
+ V(FunctionMapIndexBits, int, 5, _) \
+ /* Bits 26-31 are unused. */
+
+ DEFINE_BIT_FIELDS(COMPILER_HINTS_BIT_FIELDS)
+#undef COMPILER_HINTS_BIT_FIELDS
+
+ // Masks for checking if certain FunctionKind bits are set without fully
+ // decoding of the FunctionKind bit field.
+ static const int kClassConstructorMask = FunctionKind::kClassConstructor
+ << FunctionKindBits::kShift;
+ static const int kDerivedConstructorMask = FunctionKind::kDerivedConstructor
+ << FunctionKindBits::kShift;
+
+// Bit positions in |debugger_hints|.
+#define DEBUGGER_HINTS_BIT_FIELDS(V, _) \
+ V(IsAnonymousExpressionBit, bool, 1, _) \
+ V(NameShouldPrintAsAnonymousBit, bool, 1, _) \
+ V(IsDeserializedBit, bool, 1, _) \
+ V(HasNoSideEffectBit, bool, 1, _) \
+ V(ComputedHasNoSideEffectBit, bool, 1, _) \
+ V(DebugIsBlackboxedBit, bool, 1, _) \
+ V(ComputedDebugIsBlackboxedBit, bool, 1, _) \
+ V(HasReportedBinaryCoverageBit, bool, 1, _)
+
+ DEFINE_BIT_FIELDS(DEBUGGER_HINTS_BIT_FIELDS)
+#undef DEBUGGER_HINTS_BIT_FIELDS
+
+// Bit fields in |counters|.
+#define COUNTERS_BIT_FIELDS(V, _) \
+ V(DeoptCountBits, int, 4, _) \
+ V(OptReenableTriesBits, int, 18, _) \
+ V(ICAgeBits, int, 8, _)
+
+ DEFINE_BIT_FIELDS(COUNTERS_BIT_FIELDS)
+#undef COUNTERS_BIT_FIELDS
+
+// Bit fields in |opt_count_and_bailout_reason|.
+#define OPT_COUNT_AND_BAILOUT_REASON_BIT_FIELDS(V, _) \
+ V(OptCountBits, int, 22, _) \
+ V(DisabledOptimizationReasonBits, BailoutReason, 8, _)
+
+ DEFINE_BIT_FIELDS(OPT_COUNT_AND_BAILOUT_REASON_BIT_FIELDS)
+#undef OPT_COUNT_AND_BAILOUT_REASON_BIT_FIELDS
+
+ private:
+ // [raw_name]: Function name string or kNoSharedNameSentinel.
+ DECL_ACCESSORS(raw_name, Object)
+
+ inline void set_kind(FunctionKind kind);
+
+ // Indicates that this function uses a super property (or an eval that may
+ // use a super property).
+ // This is needed to set up the [[HomeObject]] on the function instance.
+ DECL_BOOLEAN_ACCESSORS(needs_home_object)
+
+ friend class Factory;
+ friend class V8HeapExplorer;
+ FRIEND_TEST(PreParserTest, LazyFunctionLength);
+
+ inline int length() const;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
+};
+
+// Result of searching in an optimized code map of a SharedFunctionInfo. Note
+// that both {code} and {vector} can be NULL to pass search result status.
+struct CodeAndVector {
+ Code* code; // Cached optimized code.
+ FeedbackVector* vector; // Cached feedback vector.
+};
+
+// Printing support.
+struct SourceCodeOf {
+ explicit SourceCodeOf(SharedFunctionInfo* v, int max = -1)
+ : value(v), max_length(max) {}
+ const SharedFunctionInfo* value;
+ int max_length;
+};
+
+std::ostream& operator<<(std::ostream& os, const SourceCodeOf& v);
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_SHARED_FUNCTION_INFO_H_
diff --git a/deps/v8/src/objects/string-inl.h b/deps/v8/src/objects/string-inl.h
new file mode 100644
index 0000000000..c83ecffa46
--- /dev/null
+++ b/deps/v8/src/objects/string-inl.h
@@ -0,0 +1,742 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_STRING_INL_H_
+#define V8_OBJECTS_STRING_INL_H_
+
+#include "src/objects/string.h"
+
+#include "src/conversions-inl.h"
+#include "src/objects/name-inl.h"
+#include "src/string-hasher-inl.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+SMI_ACCESSORS(String, length, kLengthOffset)
+SYNCHRONIZED_SMI_ACCESSORS(String, length, kLengthOffset)
+
+CAST_ACCESSOR(ConsString)
+CAST_ACCESSOR(ExternalOneByteString)
+CAST_ACCESSOR(ExternalString)
+CAST_ACCESSOR(ExternalTwoByteString)
+CAST_ACCESSOR(SeqOneByteString)
+CAST_ACCESSOR(SeqString)
+CAST_ACCESSOR(SeqTwoByteString)
+CAST_ACCESSOR(SlicedString)
+CAST_ACCESSOR(String)
+CAST_ACCESSOR(ThinString)
+
+StringShape::StringShape(const String* str)
+ : type_(str->map()->instance_type()) {
+ set_valid();
+ DCHECK((type_ & kIsNotStringMask) == kStringTag);
+}
+
+StringShape::StringShape(Map* map) : type_(map->instance_type()) {
+ set_valid();
+ DCHECK((type_ & kIsNotStringMask) == kStringTag);
+}
+
+StringShape::StringShape(InstanceType t) : type_(static_cast<uint32_t>(t)) {
+ set_valid();
+ DCHECK((type_ & kIsNotStringMask) == kStringTag);
+}
+
+bool StringShape::IsInternalized() {
+ DCHECK(valid());
+ STATIC_ASSERT(kNotInternalizedTag != 0);
+ return (type_ & (kIsNotStringMask | kIsNotInternalizedMask)) ==
+ (kStringTag | kInternalizedTag);
+}
+
+bool StringShape::HasOnlyOneByteChars() {
+ return (type_ & kStringEncodingMask) == kOneByteStringTag ||
+ (type_ & kOneByteDataHintMask) == kOneByteDataHintTag;
+}
+
+bool StringShape::IsCons() {
+ return (type_ & kStringRepresentationMask) == kConsStringTag;
+}
+
+bool StringShape::IsThin() {
+ return (type_ & kStringRepresentationMask) == kThinStringTag;
+}
+
+bool StringShape::IsSliced() {
+ return (type_ & kStringRepresentationMask) == kSlicedStringTag;
+}
+
+bool StringShape::IsIndirect() {
+ return (type_ & kIsIndirectStringMask) == kIsIndirectStringTag;
+}
+
+bool StringShape::IsExternal() {
+ return (type_ & kStringRepresentationMask) == kExternalStringTag;
+}
+
+bool StringShape::IsSequential() {
+ return (type_ & kStringRepresentationMask) == kSeqStringTag;
+}
+
+StringRepresentationTag StringShape::representation_tag() {
+ uint32_t tag = (type_ & kStringRepresentationMask);
+ return static_cast<StringRepresentationTag>(tag);
+}
+
+uint32_t StringShape::encoding_tag() { return type_ & kStringEncodingMask; }
+
+uint32_t StringShape::full_representation_tag() {
+ return (type_ & (kStringRepresentationMask | kStringEncodingMask));
+}
+
+STATIC_ASSERT((kStringRepresentationMask | kStringEncodingMask) ==
+ Internals::kFullStringRepresentationMask);
+
+STATIC_ASSERT(static_cast<uint32_t>(kStringEncodingMask) ==
+ Internals::kStringEncodingMask);
+
+bool StringShape::IsSequentialOneByte() {
+ return full_representation_tag() == (kSeqStringTag | kOneByteStringTag);
+}
+
+bool StringShape::IsSequentialTwoByte() {
+ return full_representation_tag() == (kSeqStringTag | kTwoByteStringTag);
+}
+
+bool StringShape::IsExternalOneByte() {
+ return full_representation_tag() == (kExternalStringTag | kOneByteStringTag);
+}
+
+STATIC_ASSERT((kExternalStringTag | kOneByteStringTag) ==
+ Internals::kExternalOneByteRepresentationTag);
+
+STATIC_ASSERT(v8::String::ONE_BYTE_ENCODING == kOneByteStringTag);
+
+bool StringShape::IsExternalTwoByte() {
+ return full_representation_tag() == (kExternalStringTag | kTwoByteStringTag);
+}
+
+STATIC_ASSERT((kExternalStringTag | kTwoByteStringTag) ==
+ Internals::kExternalTwoByteRepresentationTag);
+
+STATIC_ASSERT(v8::String::TWO_BYTE_ENCODING == kTwoByteStringTag);
+
+bool String::IsOneByteRepresentation() const {
+ uint32_t type = map()->instance_type();
+ return (type & kStringEncodingMask) == kOneByteStringTag;
+}
+
+bool String::IsTwoByteRepresentation() const {
+ uint32_t type = map()->instance_type();
+ return (type & kStringEncodingMask) == kTwoByteStringTag;
+}
+
+bool String::IsOneByteRepresentationUnderneath() {
+ uint32_t type = map()->instance_type();
+ STATIC_ASSERT(kIsIndirectStringTag != 0);
+ STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0);
+ DCHECK(IsFlat());
+ switch (type & (kIsIndirectStringMask | kStringEncodingMask)) {
+ case kOneByteStringTag:
+ return true;
+ case kTwoByteStringTag:
+ return false;
+ default: // Cons or sliced string. Need to go deeper.
+ return GetUnderlying()->IsOneByteRepresentation();
+ }
+}
+
+bool String::IsTwoByteRepresentationUnderneath() {
+ uint32_t type = map()->instance_type();
+ STATIC_ASSERT(kIsIndirectStringTag != 0);
+ STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0);
+ DCHECK(IsFlat());
+ switch (type & (kIsIndirectStringMask | kStringEncodingMask)) {
+ case kOneByteStringTag:
+ return false;
+ case kTwoByteStringTag:
+ return true;
+ default: // Cons or sliced string. Need to go deeper.
+ return GetUnderlying()->IsTwoByteRepresentation();
+ }
+}
+
+bool String::HasOnlyOneByteChars() {
+ uint32_t type = map()->instance_type();
+ return (type & kOneByteDataHintMask) == kOneByteDataHintTag ||
+ IsOneByteRepresentation();
+}
+
+uc32 FlatStringReader::Get(int index) {
+ if (is_one_byte_) {
+ return Get<uint8_t>(index);
+ } else {
+ return Get<uc16>(index);
+ }
+}
+
+template <typename Char>
+Char FlatStringReader::Get(int index) {
+ DCHECK_EQ(is_one_byte_, sizeof(Char) == 1);
+ DCHECK(0 <= index && index <= length_);
+ if (sizeof(Char) == 1) {
+ return static_cast<Char>(static_cast<const uint8_t*>(start_)[index]);
+ } else {
+ return static_cast<Char>(static_cast<const uc16*>(start_)[index]);
+ }
+}
+
+template <typename Char>
+class SequentialStringKey : public StringTableKey {
+ public:
+ explicit SequentialStringKey(Vector<const Char> string, uint32_t seed)
+ : StringTableKey(StringHasher::HashSequentialString<Char>(
+ string.start(), string.length(), seed)),
+ string_(string) {}
+
+ Vector<const Char> string_;
+};
+
+class OneByteStringKey : public SequentialStringKey<uint8_t> {
+ public:
+ OneByteStringKey(Vector<const uint8_t> str, uint32_t seed)
+ : SequentialStringKey<uint8_t>(str, seed) {}
+
+ bool IsMatch(Object* string) override {
+ return String::cast(string)->IsOneByteEqualTo(string_);
+ }
+
+ Handle<String> AsHandle(Isolate* isolate) override;
+};
+
+class SeqOneByteSubStringKey : public StringTableKey {
+ public:
+// VS 2017 on official builds gives this spurious warning:
+// warning C4789: buffer 'key' of size 16 bytes will be overrun; 4 bytes will
+// be written starting at offset 16
+// https://bugs.chromium.org/p/v8/issues/detail?id=6068
+#if defined(V8_CC_MSVC)
+#pragma warning(push)
+#pragma warning(disable : 4789)
+#endif
+ SeqOneByteSubStringKey(Handle<SeqOneByteString> string, int from, int length)
+ : StringTableKey(StringHasher::HashSequentialString(
+ string->GetChars() + from, length, string->GetHeap()->HashSeed())),
+ string_(string),
+ from_(from),
+ length_(length) {
+ DCHECK_LE(0, length_);
+ DCHECK_LE(from_ + length_, string_->length());
+ DCHECK(string_->IsSeqOneByteString());
+ }
+#if defined(V8_CC_MSVC)
+#pragma warning(pop)
+#endif
+
+ bool IsMatch(Object* string) override;
+ Handle<String> AsHandle(Isolate* isolate) override;
+
+ private:
+ Handle<SeqOneByteString> string_;
+ int from_;
+ int length_;
+};
+
+class TwoByteStringKey : public SequentialStringKey<uc16> {
+ public:
+ explicit TwoByteStringKey(Vector<const uc16> str, uint32_t seed)
+ : SequentialStringKey<uc16>(str, seed) {}
+
+ bool IsMatch(Object* string) override {
+ return String::cast(string)->IsTwoByteEqualTo(string_);
+ }
+
+ Handle<String> AsHandle(Isolate* isolate) override;
+};
+
+// Utf8StringKey carries a vector of chars as key.
+class Utf8StringKey : public StringTableKey {
+ public:
+ explicit Utf8StringKey(Vector<const char> string, uint32_t seed)
+ : StringTableKey(StringHasher::ComputeUtf8Hash(string, seed, &chars_)),
+ string_(string) {}
+
+ bool IsMatch(Object* string) override {
+ return String::cast(string)->IsUtf8EqualTo(string_);
+ }
+
+ Handle<String> AsHandle(Isolate* isolate) override {
+ return isolate->factory()->NewInternalizedStringFromUtf8(string_, chars_,
+ HashField());
+ }
+
+ private:
+ Vector<const char> string_;
+ int chars_; // Caches the number of characters when computing the hash code.
+};
+
+bool String::Equals(String* other) {
+ if (other == this) return true;
+ if (this->IsInternalizedString() && other->IsInternalizedString()) {
+ return false;
+ }
+ return SlowEquals(other);
+}
+
+bool String::Equals(Handle<String> one, Handle<String> two) {
+ if (one.is_identical_to(two)) return true;
+ if (one->IsInternalizedString() && two->IsInternalizedString()) {
+ return false;
+ }
+ return SlowEquals(one, two);
+}
+
+Handle<String> String::Flatten(Handle<String> string, PretenureFlag pretenure) {
+ if (string->IsConsString()) {
+ Handle<ConsString> cons = Handle<ConsString>::cast(string);
+ if (cons->IsFlat()) {
+ string = handle(cons->first());
+ } else {
+ return SlowFlatten(cons, pretenure);
+ }
+ }
+ if (string->IsThinString()) {
+ string = handle(Handle<ThinString>::cast(string)->actual());
+ DCHECK(!string->IsConsString());
+ }
+ return string;
+}
+
+uint16_t String::Get(int index) {
+ DCHECK(index >= 0 && index < length());
+ switch (StringShape(this).full_representation_tag()) {
+ case kSeqStringTag | kOneByteStringTag:
+ return SeqOneByteString::cast(this)->SeqOneByteStringGet(index);
+ case kSeqStringTag | kTwoByteStringTag:
+ return SeqTwoByteString::cast(this)->SeqTwoByteStringGet(index);
+ case kConsStringTag | kOneByteStringTag:
+ case kConsStringTag | kTwoByteStringTag:
+ return ConsString::cast(this)->ConsStringGet(index);
+ case kExternalStringTag | kOneByteStringTag:
+ return ExternalOneByteString::cast(this)->ExternalOneByteStringGet(index);
+ case kExternalStringTag | kTwoByteStringTag:
+ return ExternalTwoByteString::cast(this)->ExternalTwoByteStringGet(index);
+ case kSlicedStringTag | kOneByteStringTag:
+ case kSlicedStringTag | kTwoByteStringTag:
+ return SlicedString::cast(this)->SlicedStringGet(index);
+ case kThinStringTag | kOneByteStringTag:
+ case kThinStringTag | kTwoByteStringTag:
+ return ThinString::cast(this)->ThinStringGet(index);
+ default:
+ break;
+ }
+
+ UNREACHABLE();
+}
+
+void String::Set(int index, uint16_t value) {
+ DCHECK(index >= 0 && index < length());
+ DCHECK(StringShape(this).IsSequential());
+
+ return this->IsOneByteRepresentation()
+ ? SeqOneByteString::cast(this)->SeqOneByteStringSet(index, value)
+ : SeqTwoByteString::cast(this)->SeqTwoByteStringSet(index, value);
+}
+
+bool String::IsFlat() {
+ if (!StringShape(this).IsCons()) return true;
+ return ConsString::cast(this)->second()->length() == 0;
+}
+
+String* String::GetUnderlying() {
+ // Giving direct access to underlying string only makes sense if the
+ // wrapping string is already flattened.
+ DCHECK(this->IsFlat());
+ DCHECK(StringShape(this).IsIndirect());
+ STATIC_ASSERT(ConsString::kFirstOffset == SlicedString::kParentOffset);
+ STATIC_ASSERT(ConsString::kFirstOffset == ThinString::kActualOffset);
+ const int kUnderlyingOffset = SlicedString::kParentOffset;
+ return String::cast(READ_FIELD(this, kUnderlyingOffset));
+}
+
+template <class Visitor>
+ConsString* String::VisitFlat(Visitor* visitor, String* string,
+ const int offset) {
+ int slice_offset = offset;
+ const int length = string->length();
+ DCHECK(offset <= length);
+ while (true) {
+ int32_t type = string->map()->instance_type();
+ switch (type & (kStringRepresentationMask | kStringEncodingMask)) {
+ case kSeqStringTag | kOneByteStringTag:
+ visitor->VisitOneByteString(
+ SeqOneByteString::cast(string)->GetChars() + slice_offset,
+ length - offset);
+ return NULL;
+
+ case kSeqStringTag | kTwoByteStringTag:
+ visitor->VisitTwoByteString(
+ SeqTwoByteString::cast(string)->GetChars() + slice_offset,
+ length - offset);
+ return NULL;
+
+ case kExternalStringTag | kOneByteStringTag:
+ visitor->VisitOneByteString(
+ ExternalOneByteString::cast(string)->GetChars() + slice_offset,
+ length - offset);
+ return NULL;
+
+ case kExternalStringTag | kTwoByteStringTag:
+ visitor->VisitTwoByteString(
+ ExternalTwoByteString::cast(string)->GetChars() + slice_offset,
+ length - offset);
+ return NULL;
+
+ case kSlicedStringTag | kOneByteStringTag:
+ case kSlicedStringTag | kTwoByteStringTag: {
+ SlicedString* slicedString = SlicedString::cast(string);
+ slice_offset += slicedString->offset();
+ string = slicedString->parent();
+ continue;
+ }
+
+ case kConsStringTag | kOneByteStringTag:
+ case kConsStringTag | kTwoByteStringTag:
+ return ConsString::cast(string);
+
+ case kThinStringTag | kOneByteStringTag:
+ case kThinStringTag | kTwoByteStringTag:
+ string = ThinString::cast(string)->actual();
+ continue;
+
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+template <>
+inline Vector<const uint8_t> String::GetCharVector() {
+ String::FlatContent flat = GetFlatContent();
+ DCHECK(flat.IsOneByte());
+ return flat.ToOneByteVector();
+}
+
+template <>
+inline Vector<const uc16> String::GetCharVector() {
+ String::FlatContent flat = GetFlatContent();
+ DCHECK(flat.IsTwoByte());
+ return flat.ToUC16Vector();
+}
+
+uint32_t String::ToValidIndex(Object* number) {
+ uint32_t index = PositiveNumberToUint32(number);
+ uint32_t length_value = static_cast<uint32_t>(length());
+ if (index > length_value) return length_value;
+ return index;
+}
+
+uint16_t SeqOneByteString::SeqOneByteStringGet(int index) {
+ DCHECK(index >= 0 && index < length());
+ return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
+}
+
+void SeqOneByteString::SeqOneByteStringSet(int index, uint16_t value) {
+ DCHECK(index >= 0 && index < length() && value <= kMaxOneByteCharCode);
+ WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize,
+ static_cast<byte>(value));
+}
+
+Address SeqOneByteString::GetCharsAddress() {
+ return FIELD_ADDR(this, kHeaderSize);
+}
+
+uint8_t* SeqOneByteString::GetChars() {
+ return reinterpret_cast<uint8_t*>(GetCharsAddress());
+}
+
+Address SeqTwoByteString::GetCharsAddress() {
+ return FIELD_ADDR(this, kHeaderSize);
+}
+
+uc16* SeqTwoByteString::GetChars() {
+ return reinterpret_cast<uc16*>(FIELD_ADDR(this, kHeaderSize));
+}
+
+uint16_t SeqTwoByteString::SeqTwoByteStringGet(int index) {
+ DCHECK(index >= 0 && index < length());
+ return READ_UINT16_FIELD(this, kHeaderSize + index * kShortSize);
+}
+
+void SeqTwoByteString::SeqTwoByteStringSet(int index, uint16_t value) {
+ DCHECK(index >= 0 && index < length());
+ WRITE_UINT16_FIELD(this, kHeaderSize + index * kShortSize, value);
+}
+
+int SeqTwoByteString::SeqTwoByteStringSize(InstanceType instance_type) {
+ return SizeFor(length());
+}
+
+int SeqOneByteString::SeqOneByteStringSize(InstanceType instance_type) {
+ return SizeFor(length());
+}
+
+String* SlicedString::parent() {
+ return String::cast(READ_FIELD(this, kParentOffset));
+}
+
+void SlicedString::set_parent(String* parent, WriteBarrierMode mode) {
+ DCHECK(parent->IsSeqString() || parent->IsExternalString());
+ WRITE_FIELD(this, kParentOffset, parent);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kParentOffset, parent, mode);
+}
+
+SMI_ACCESSORS(SlicedString, offset, kOffsetOffset)
+
+String* ConsString::first() {
+ return String::cast(READ_FIELD(this, kFirstOffset));
+}
+
+Object* ConsString::unchecked_first() { return READ_FIELD(this, kFirstOffset); }
+
+void ConsString::set_first(String* value, WriteBarrierMode mode) {
+ WRITE_FIELD(this, kFirstOffset, value);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kFirstOffset, value, mode);
+}
+
+String* ConsString::second() {
+ return String::cast(READ_FIELD(this, kSecondOffset));
+}
+
+Object* ConsString::unchecked_second() {
+ return READ_FIELD(this, kSecondOffset);
+}
+
+void ConsString::set_second(String* value, WriteBarrierMode mode) {
+ WRITE_FIELD(this, kSecondOffset, value);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kSecondOffset, value, mode);
+}
+
+ACCESSORS(ThinString, actual, String, kActualOffset);
+
+bool ExternalString::is_short() {
+ InstanceType type = map()->instance_type();
+ return (type & kShortExternalStringMask) == kShortExternalStringTag;
+}
+
+const ExternalOneByteString::Resource* ExternalOneByteString::resource() {
+ return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
+}
+
+void ExternalOneByteString::update_data_cache() {
+ if (is_short()) return;
+ const char** data_field =
+ reinterpret_cast<const char**>(FIELD_ADDR(this, kResourceDataOffset));
+ *data_field = resource()->data();
+}
+
+void ExternalOneByteString::set_resource(
+ const ExternalOneByteString::Resource* resource) {
+ DCHECK(IsAligned(reinterpret_cast<intptr_t>(resource), kPointerSize));
+ *reinterpret_cast<const Resource**>(FIELD_ADDR(this, kResourceOffset)) =
+ resource;
+ if (resource != NULL) update_data_cache();
+}
+
+const uint8_t* ExternalOneByteString::GetChars() {
+ return reinterpret_cast<const uint8_t*>(resource()->data());
+}
+
+uint16_t ExternalOneByteString::ExternalOneByteStringGet(int index) {
+ DCHECK(index >= 0 && index < length());
+ return GetChars()[index];
+}
+
+const ExternalTwoByteString::Resource* ExternalTwoByteString::resource() {
+ return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
+}
+
+void ExternalTwoByteString::update_data_cache() {
+ if (is_short()) return;
+ const uint16_t** data_field =
+ reinterpret_cast<const uint16_t**>(FIELD_ADDR(this, kResourceDataOffset));
+ *data_field = resource()->data();
+}
+
+void ExternalTwoByteString::set_resource(
+ const ExternalTwoByteString::Resource* resource) {
+ *reinterpret_cast<const Resource**>(FIELD_ADDR(this, kResourceOffset)) =
+ resource;
+ if (resource != NULL) update_data_cache();
+}
+
+const uint16_t* ExternalTwoByteString::GetChars() { return resource()->data(); }
+
+uint16_t ExternalTwoByteString::ExternalTwoByteStringGet(int index) {
+ DCHECK(index >= 0 && index < length());
+ return GetChars()[index];
+}
+
+const uint16_t* ExternalTwoByteString::ExternalTwoByteStringGetData(
+ unsigned start) {
+ return GetChars() + start;
+}
+
+int ConsStringIterator::OffsetForDepth(int depth) { return depth & kDepthMask; }
+
+void ConsStringIterator::PushLeft(ConsString* string) {
+ frames_[depth_++ & kDepthMask] = string;
+}
+
+void ConsStringIterator::PushRight(ConsString* string) {
+ // Inplace update.
+ frames_[(depth_ - 1) & kDepthMask] = string;
+}
+
+void ConsStringIterator::AdjustMaximumDepth() {
+ if (depth_ > maximum_depth_) maximum_depth_ = depth_;
+}
+
+void ConsStringIterator::Pop() {
+ DCHECK(depth_ > 0);
+ DCHECK(depth_ <= maximum_depth_);
+ depth_--;
+}
+
+uint16_t StringCharacterStream::GetNext() {
+ DCHECK(buffer8_ != NULL && end_ != NULL);
+ // Advance cursor if needed.
+ if (buffer8_ == end_) HasMore();
+ DCHECK(buffer8_ < end_);
+ return is_one_byte_ ? *buffer8_++ : *buffer16_++;
+}
+
+StringCharacterStream::StringCharacterStream(String* string, int offset)
+ : is_one_byte_(false) {
+ Reset(string, offset);
+}
+
+void StringCharacterStream::Reset(String* string, int offset) {
+ buffer8_ = NULL;
+ end_ = NULL;
+ ConsString* cons_string = String::VisitFlat(this, string, offset);
+ iter_.Reset(cons_string, offset);
+ if (cons_string != NULL) {
+ string = iter_.Next(&offset);
+ if (string != NULL) String::VisitFlat(this, string, offset);
+ }
+}
+
+bool StringCharacterStream::HasMore() {
+ if (buffer8_ != end_) return true;
+ int offset;
+ String* string = iter_.Next(&offset);
+ DCHECK_EQ(offset, 0);
+ if (string == NULL) return false;
+ String::VisitFlat(this, string);
+ DCHECK(buffer8_ != end_);
+ return true;
+}
+
+void StringCharacterStream::VisitOneByteString(const uint8_t* chars,
+ int length) {
+ is_one_byte_ = true;
+ buffer8_ = chars;
+ end_ = chars + length;
+}
+
+void StringCharacterStream::VisitTwoByteString(const uint16_t* chars,
+ int length) {
+ is_one_byte_ = false;
+ buffer16_ = chars;
+ end_ = reinterpret_cast<const uint8_t*>(chars + length);
+}
+
+bool String::AsArrayIndex(uint32_t* index) {
+ uint32_t field = hash_field();
+ if (IsHashFieldComputed(field) && (field & kIsNotArrayIndexMask)) {
+ return false;
+ }
+ return SlowAsArrayIndex(index);
+}
+
+void String::SetForwardedInternalizedString(String* canonical) {
+ DCHECK(IsInternalizedString());
+ DCHECK(HasHashCode());
+ if (canonical == this) return; // No need to forward.
+ DCHECK(SlowEquals(canonical));
+ DCHECK(canonical->IsInternalizedString());
+ DCHECK(canonical->HasHashCode());
+ WRITE_FIELD(this, kHashFieldSlot, canonical);
+ // Setting the hash field to a tagged value sets the LSB, causing the hash
+ // code to be interpreted as uninitialized. We use this fact to recognize
+ // that we have a forwarded string.
+ DCHECK(!HasHashCode());
+}
+
+String* String::GetForwardedInternalizedString() {
+ DCHECK(IsInternalizedString());
+ if (HasHashCode()) return this;
+ String* canonical = String::cast(READ_FIELD(this, kHashFieldSlot));
+ DCHECK(canonical->IsInternalizedString());
+ DCHECK(SlowEquals(canonical));
+ DCHECK(canonical->HasHashCode());
+ return canonical;
+}
+
+String::SubStringRange::SubStringRange(String* string, int first, int length)
+ : string_(string),
+ first_(first),
+ length_(length == -1 ? string->length() : length) {}
+
+class String::SubStringRange::iterator final {
+ public:
+ typedef std::forward_iterator_tag iterator_category;
+ typedef int difference_type;
+ typedef uc16 value_type;
+ typedef uc16* pointer;
+ typedef uc16& reference;
+
+ iterator(const iterator& other)
+ : content_(other.content_), offset_(other.offset_) {}
+
+ uc16 operator*() { return content_.Get(offset_); }
+ bool operator==(const iterator& other) const {
+ return content_.UsesSameString(other.content_) && offset_ == other.offset_;
+ }
+ bool operator!=(const iterator& other) const {
+ return !content_.UsesSameString(other.content_) || offset_ != other.offset_;
+ }
+ iterator& operator++() {
+ ++offset_;
+ return *this;
+ }
+ iterator operator++(int);
+
+ private:
+ friend class String;
+ iterator(String* from, int offset)
+ : content_(from->GetFlatContent()), offset_(offset) {}
+ String::FlatContent content_;
+ int offset_;
+};
+
+String::SubStringRange::iterator String::SubStringRange::begin() {
+ return String::SubStringRange::iterator(string_, first_);
+}
+
+String::SubStringRange::iterator String::SubStringRange::end() {
+ return String::SubStringRange::iterator(string_, first_ + length_);
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_STRING_INL_H_
diff --git a/deps/v8/src/objects/string-table.h b/deps/v8/src/objects/string-table.h
index a827895e6a..88a86dfcdf 100644
--- a/deps/v8/src/objects/string-table.h
+++ b/deps/v8/src/objects/string-table.h
@@ -13,19 +13,34 @@
namespace v8 {
namespace internal {
-class StringTableShape : public BaseShape<HashTableKey*> {
+class StringTableKey : public HashTableKey {
public:
- static inline bool IsMatch(HashTableKey* key, Object* value) {
- return key->IsMatch(value);
+ explicit inline StringTableKey(uint32_t hash_field);
+
+ virtual Handle<String> AsHandle(Isolate* isolate) = 0;
+ uint32_t HashField() const {
+ DCHECK_NE(0, hash_field_);
+ return hash_field_;
}
- static inline uint32_t Hash(HashTableKey* key) { return key->Hash(); }
+ protected:
+ inline void set_hash_field(uint32_t hash_field);
+
+ private:
+ uint32_t hash_field_ = 0;
+};
- static inline uint32_t HashForObject(HashTableKey* key, Object* object) {
- return key->HashForObject(object);
+class StringTableShape : public BaseShape<StringTableKey*> {
+ public:
+ static inline bool IsMatch(Key key, Object* value) {
+ return key->IsMatch(value);
}
- static inline Handle<Object> AsHandle(Isolate* isolate, HashTableKey* key);
+ static inline uint32_t Hash(Isolate* isolate, Key key) { return key->Hash(); }
+
+ static inline uint32_t HashForObject(Isolate* isolate, Object* object);
+
+ static inline Handle<Object> AsHandle(Isolate* isolate, Key key);
static const int kPrefixSize = 0;
static const int kEntrySize = 1;
@@ -37,32 +52,24 @@ class SeqOneByteString;
//
// No special elements in the prefix and the element size is 1
// because only the string itself (the key) needs to be stored.
-class StringTable
- : public HashTable<StringTable, StringTableShape, HashTableKey*> {
+class StringTable : public HashTable<StringTable, StringTableShape> {
public:
// Find string in the string table. If it is not there yet, it is
// added. The return value is the string found.
V8_EXPORT_PRIVATE static Handle<String> LookupString(Isolate* isolate,
Handle<String> key);
- static Handle<String> LookupKey(Isolate* isolate, HashTableKey* key);
- static String* LookupKeyIfExists(Isolate* isolate, HashTableKey* key);
-
- // Tries to internalize given string and returns string handle on success
- // or an empty handle otherwise.
- MUST_USE_RESULT static MaybeHandle<String> InternalizeStringIfExists(
- Isolate* isolate, Handle<String> string);
+ static Handle<String> LookupKey(Isolate* isolate, StringTableKey* key);
+ static String* LookupKeyIfExists(Isolate* isolate, StringTableKey* key);
// Looks up a string that is equal to the given string and returns
// string handle if it is found, or an empty handle otherwise.
- MUST_USE_RESULT static MaybeHandle<String> LookupStringIfExists(
- Isolate* isolate, Handle<String> str);
MUST_USE_RESULT static MaybeHandle<String> LookupTwoCharsStringIfExists(
Isolate* isolate, uint16_t c1, uint16_t c2);
static Object* LookupStringIfExists_NoAllocate(String* string);
static void EnsureCapacityForDeserialization(Isolate* isolate, int expected);
- DECLARE_CAST(StringTable)
+ DECL_CAST(StringTable)
private:
template <bool seq_one_byte>
@@ -74,21 +81,21 @@ class StringTable
class StringSetShape : public BaseShape<String*> {
public:
static inline bool IsMatch(String* key, Object* value);
- static inline uint32_t Hash(String* key);
- static inline uint32_t HashForObject(String* key, Object* object);
+ static inline uint32_t Hash(Isolate* isolate, String* key);
+ static inline uint32_t HashForObject(Isolate* isolate, Object* object);
static const int kPrefixSize = 0;
static const int kEntrySize = 1;
};
-class StringSet : public HashTable<StringSet, StringSetShape, String*> {
+class StringSet : public HashTable<StringSet, StringSetShape> {
public:
static Handle<StringSet> New(Isolate* isolate);
static Handle<StringSet> Add(Handle<StringSet> blacklist,
Handle<String> name);
bool Has(Handle<String> name);
- DECLARE_CAST(StringSet)
+ DECL_CAST(StringSet)
};
} // namespace internal
diff --git a/deps/v8/src/objects/string.h b/deps/v8/src/objects/string.h
new file mode 100644
index 0000000000..04dbe9bbb7
--- /dev/null
+++ b/deps/v8/src/objects/string.h
@@ -0,0 +1,877 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_STRING_H_
+#define V8_OBJECTS_STRING_H_
+
+#include "src/base/bits.h"
+#include "src/objects/name.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+enum AllowNullsFlag { ALLOW_NULLS, DISALLOW_NULLS };
+enum RobustnessFlag { ROBUST_STRING_TRAVERSAL, FAST_STRING_TRAVERSAL };
+
+// The characteristics of a string are stored in its map. Retrieving these
+// few bits of information is moderately expensive, involving two memory
+// loads where the second is dependent on the first. To improve efficiency
+// the shape of the string is given its own class so that it can be retrieved
+// once and used for several string operations. A StringShape is small enough
+// to be passed by value and is immutable, but be aware that flattening a
+// string can potentially alter its shape. Also be aware that a GC caused by
+// something else can alter the shape of a string due to ConsString
+// shortcutting. Keeping these restrictions in mind has proven to be error-
+// prone and so we no longer put StringShapes in variables unless there is a
+// concrete performance benefit at that particular point in the code.
+class StringShape BASE_EMBEDDED {
+ public:
+ inline explicit StringShape(const String* s);
+ inline explicit StringShape(Map* s);
+ inline explicit StringShape(InstanceType t);
+ inline bool IsSequential();
+ inline bool IsExternal();
+ inline bool IsCons();
+ inline bool IsSliced();
+ inline bool IsThin();
+ inline bool IsIndirect();
+ inline bool IsExternalOneByte();
+ inline bool IsExternalTwoByte();
+ inline bool IsSequentialOneByte();
+ inline bool IsSequentialTwoByte();
+ inline bool IsInternalized();
+ inline StringRepresentationTag representation_tag();
+ inline uint32_t encoding_tag();
+ inline uint32_t full_representation_tag();
+ inline bool HasOnlyOneByteChars();
+#ifdef DEBUG
+ inline uint32_t type() { return type_; }
+ inline void invalidate() { valid_ = false; }
+ inline bool valid() { return valid_; }
+#else
+ inline void invalidate() {}
+#endif
+
+ private:
+ uint32_t type_;
+#ifdef DEBUG
+ inline void set_valid() { valid_ = true; }
+ bool valid_;
+#else
+ inline void set_valid() {}
+#endif
+};
+
+// The String abstract class captures JavaScript string values:
+//
+// Ecma-262:
+// 4.3.16 String Value
+// A string value is a member of the type String and is a finite
+// ordered sequence of zero or more 16-bit unsigned integer values.
+//
+// All string values have a length field.
+class String : public Name {
+ public:
+ enum Encoding { ONE_BYTE_ENCODING, TWO_BYTE_ENCODING };
+
+ class SubStringRange {
+ public:
+ explicit inline SubStringRange(String* string, int first = 0,
+ int length = -1);
+ class iterator;
+ inline iterator begin();
+ inline iterator end();
+
+ private:
+ String* string_;
+ int first_;
+ int length_;
+ };
+
+ // Representation of the flat content of a String.
+ // A non-flat string doesn't have flat content.
+ // A flat string has content that's encoded as a sequence of either
+ // one-byte chars or two-byte UC16.
+ // Returned by String::GetFlatContent().
+ class FlatContent {
+ public:
+ // Returns true if the string is flat and this structure contains content.
+ bool IsFlat() const { return state_ != NON_FLAT; }
+ // Returns true if the structure contains one-byte content.
+ bool IsOneByte() const { return state_ == ONE_BYTE; }
+ // Returns true if the structure contains two-byte content.
+ bool IsTwoByte() const { return state_ == TWO_BYTE; }
+
+ // Return the one byte content of the string. Only use if IsOneByte()
+ // returns true.
+ Vector<const uint8_t> ToOneByteVector() const {
+ DCHECK_EQ(ONE_BYTE, state_);
+ return Vector<const uint8_t>(onebyte_start, length_);
+ }
+ // Return the two-byte content of the string. Only use if IsTwoByte()
+ // returns true.
+ Vector<const uc16> ToUC16Vector() const {
+ DCHECK_EQ(TWO_BYTE, state_);
+ return Vector<const uc16>(twobyte_start, length_);
+ }
+
+ uc16 Get(int i) const {
+ DCHECK(i < length_);
+ DCHECK(state_ != NON_FLAT);
+ if (state_ == ONE_BYTE) return onebyte_start[i];
+ return twobyte_start[i];
+ }
+
+ bool UsesSameString(const FlatContent& other) const {
+ return onebyte_start == other.onebyte_start;
+ }
+
+ private:
+ enum State { NON_FLAT, ONE_BYTE, TWO_BYTE };
+
+ // Constructors only used by String::GetFlatContent().
+ explicit FlatContent(const uint8_t* start, int length)
+ : onebyte_start(start), length_(length), state_(ONE_BYTE) {}
+ explicit FlatContent(const uc16* start, int length)
+ : twobyte_start(start), length_(length), state_(TWO_BYTE) {}
+ FlatContent() : onebyte_start(NULL), length_(0), state_(NON_FLAT) {}
+
+ union {
+ const uint8_t* onebyte_start;
+ const uc16* twobyte_start;
+ };
+ int length_;
+ State state_;
+
+ friend class String;
+ friend class IterableSubString;
+ };
+
+ template <typename Char>
+ INLINE(Vector<const Char> GetCharVector());
+
+ // Get and set the length of the string.
+ inline int length() const;
+ inline void set_length(int value);
+
+ // Get and set the length of the string using acquire loads and release
+ // stores.
+ inline int synchronized_length() const;
+ inline void synchronized_set_length(int value);
+
+ // Returns whether this string has only one-byte chars, i.e. all of them can
+ // be one-byte encoded. This might be the case even if the string is
+ // two-byte. Such strings may appear when the embedder prefers
+ // two-byte external representations even for one-byte data.
+ inline bool IsOneByteRepresentation() const;
+ inline bool IsTwoByteRepresentation() const;
+
+ // Cons and slices have an encoding flag that may not represent the actual
+ // encoding of the underlying string. This is taken into account here.
+ // Requires: this->IsFlat()
+ inline bool IsOneByteRepresentationUnderneath();
+ inline bool IsTwoByteRepresentationUnderneath();
+
+ // NOTE: this should be considered only a hint. False negatives are
+ // possible.
+ inline bool HasOnlyOneByteChars();
+
+ // Get and set individual two byte chars in the string.
+ inline void Set(int index, uint16_t value);
+ // Get individual two byte char in the string. Repeated calls
+ // to this method are not efficient unless the string is flat.
+ INLINE(uint16_t Get(int index));
+
+ // ES6 section 7.1.3.1 ToNumber Applied to the String Type
+ static Handle<Object> ToNumber(Handle<String> subject);
+
+ // Flattens the string. Checks first inline to see if it is
+ // necessary. Does nothing if the string is not a cons string.
+ // Flattening allocates a sequential string with the same data as
+ // the given string and mutates the cons string to a degenerate
+ // form, where the first component is the new sequential string and
+ // the second component is the empty string. If allocation fails,
+ // this function returns a failure. If flattening succeeds, this
+ // function returns the sequential string that is now the first
+ // component of the cons string.
+ //
+ // Degenerate cons strings are handled specially by the garbage
+ // collector (see IsShortcutCandidate).
+
+ static inline Handle<String> Flatten(Handle<String> string,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ // Tries to return the content of a flat string as a structure holding either
+ // a flat vector of char or of uc16.
+ // If the string isn't flat, and therefore doesn't have flat content, the
+ // returned structure will report so, and can't provide a vector of either
+ // kind.
+ FlatContent GetFlatContent();
+
+ // Returns the parent of a sliced string or first part of a flat cons string.
+ // Requires: StringShape(this).IsIndirect() && this->IsFlat()
+ inline String* GetUnderlying();
+
+ // String relational comparison, implemented according to ES6 section 7.2.11
+ // Abstract Relational Comparison (step 5): The comparison of Strings uses a
+ // simple lexicographic ordering on sequences of code unit values. There is no
+ // attempt to use the more complex, semantically oriented definitions of
+ // character or string equality and collating order defined in the Unicode
+ // specification. Therefore String values that are canonically equal according
+ // to the Unicode standard could test as unequal. In effect this algorithm
+ // assumes that both Strings are already in normalized form. Also, note that
+ // for strings containing supplementary characters, lexicographic ordering on
+ // sequences of UTF-16 code unit values differs from that on sequences of code
+ // point values.
+ MUST_USE_RESULT static ComparisonResult Compare(Handle<String> x,
+ Handle<String> y);
+
+ // Perform ES6 21.1.3.8, including checking arguments.
+ static Object* IndexOf(Isolate* isolate, Handle<Object> receiver,
+ Handle<Object> search, Handle<Object> position);
+ // Perform string match of pattern on subject, starting at start index.
+ // Caller must ensure that 0 <= start_index <= sub->length(), as this does not
+ // check any arguments.
+ static int IndexOf(Isolate* isolate, Handle<String> receiver,
+ Handle<String> search, int start_index);
+
+ static Object* LastIndexOf(Isolate* isolate, Handle<Object> receiver,
+ Handle<Object> search, Handle<Object> position);
+
+ // Encapsulates logic related to a match and its capture groups as required
+ // by GetSubstitution.
+ class Match {
+ public:
+ virtual Handle<String> GetMatch() = 0;
+ virtual Handle<String> GetPrefix() = 0;
+ virtual Handle<String> GetSuffix() = 0;
+
+ // A named capture can be invalid (if it is not specified in the pattern),
+ // unmatched (specified but not matched in the current string), and matched.
+ enum CaptureState { INVALID, UNMATCHED, MATCHED };
+
+ virtual int CaptureCount() = 0;
+ virtual bool HasNamedCaptures() = 0;
+ virtual MaybeHandle<String> GetCapture(int i, bool* capture_exists) = 0;
+ virtual MaybeHandle<String> GetNamedCapture(Handle<String> name,
+ CaptureState* state) = 0;
+
+ virtual ~Match() {}
+ };
+
+ // ES#sec-getsubstitution
+ // GetSubstitution(matched, str, position, captures, replacement)
+ // Expand the $-expressions in the string and return a new string with
+ // the result.
+ // A {start_index} can be passed to specify where to start scanning the
+ // replacement string.
+ MUST_USE_RESULT static MaybeHandle<String> GetSubstitution(
+ Isolate* isolate, Match* match, Handle<String> replacement,
+ int start_index = 0);
+
+ // String equality operations.
+ inline bool Equals(String* other);
+ inline static bool Equals(Handle<String> one, Handle<String> two);
+ bool IsUtf8EqualTo(Vector<const char> str, bool allow_prefix_match = false);
+
+ // Dispatches to Is{One,Two}ByteEqualTo.
+ template <typename Char>
+ bool IsEqualTo(Vector<const Char> str);
+
+ bool IsOneByteEqualTo(Vector<const uint8_t> str);
+ bool IsTwoByteEqualTo(Vector<const uc16> str);
+
+ // Return a UTF8 representation of the string. The string is null
+ // terminated but may optionally contain nulls. Length is returned
+ // in length_output if length_output is not a null pointer The string
+ // should be nearly flat, otherwise the performance of this method may
+ // be very slow (quadratic in the length). Setting robustness_flag to
+ // ROBUST_STRING_TRAVERSAL invokes behaviour that is robust This means it
+ // handles unexpected data without causing assert failures and it does not
+ // do any heap allocations. This is useful when printing stack traces.
+ std::unique_ptr<char[]> ToCString(AllowNullsFlag allow_nulls,
+ RobustnessFlag robustness_flag, int offset,
+ int length, int* length_output = 0);
+ std::unique_ptr<char[]> ToCString(
+ AllowNullsFlag allow_nulls = DISALLOW_NULLS,
+ RobustnessFlag robustness_flag = FAST_STRING_TRAVERSAL,
+ int* length_output = 0);
+
+ bool ComputeArrayIndex(uint32_t* index);
+
+ // Externalization.
+ bool MakeExternal(v8::String::ExternalStringResource* resource);
+ bool MakeExternal(v8::String::ExternalOneByteStringResource* resource);
+
+ // Conversion.
+ inline bool AsArrayIndex(uint32_t* index);
+ uint32_t inline ToValidIndex(Object* number);
+
+ // Trimming.
+ enum TrimMode { kTrim, kTrimLeft, kTrimRight };
+ static Handle<String> Trim(Handle<String> string, TrimMode mode);
+
+ DECL_CAST(String)
+
+ void PrintOn(FILE* out);
+
+ // For use during stack traces. Performs rudimentary sanity check.
+ bool LooksValid();
+
+ // Dispatched behavior.
+ void StringShortPrint(StringStream* accumulator, bool show_details = true);
+ void PrintUC16(std::ostream& os, int start = 0, int end = -1); // NOLINT
+#if defined(DEBUG) || defined(OBJECT_PRINT)
+ char* ToAsciiArray();
+#endif
+ DECL_PRINTER(String)
+ DECL_VERIFIER(String)
+
+ inline bool IsFlat();
+
+ // Layout description.
+ static const int kLengthOffset = Name::kSize;
+ static const int kSize = kLengthOffset + kPointerSize;
+
+ // Max char codes.
+ static const int32_t kMaxOneByteCharCode = unibrow::Latin1::kMaxChar;
+ static const uint32_t kMaxOneByteCharCodeU = unibrow::Latin1::kMaxChar;
+ static const int kMaxUtf16CodeUnit = 0xffff;
+ static const uint32_t kMaxUtf16CodeUnitU = kMaxUtf16CodeUnit;
+ static const uc32 kMaxCodePoint = 0x10ffff;
+
+ // Maximal string length.
+ static const int kMaxLength = (1 << 28) - 16;
+
+ // Max length for computing hash. For strings longer than this limit the
+ // string length is used as the hash value.
+ static const int kMaxHashCalcLength = 16383;
+
+ // Limit for truncation in short printing.
+ static const int kMaxShortPrintLength = 1024;
+
+ // Support for regular expressions.
+ const uc16* GetTwoByteData(unsigned start);
+
+ // Helper function for flattening strings.
+ template <typename sinkchar>
+ static void WriteToFlat(String* source, sinkchar* sink, int from, int to);
+
+ // The return value may point to the first aligned word containing the first
+ // non-one-byte character, rather than directly to the non-one-byte character.
+ // If the return value is >= the passed length, the entire string was
+ // one-byte.
+ static inline int NonAsciiStart(const char* chars, int length) {
+ const char* start = chars;
+ const char* limit = chars + length;
+
+ if (length >= kIntptrSize) {
+ // Check unaligned bytes.
+ while (!IsAligned(reinterpret_cast<intptr_t>(chars), sizeof(uintptr_t))) {
+ if (static_cast<uint8_t>(*chars) > unibrow::Utf8::kMaxOneByteChar) {
+ return static_cast<int>(chars - start);
+ }
+ ++chars;
+ }
+ // Check aligned words.
+ DCHECK(unibrow::Utf8::kMaxOneByteChar == 0x7F);
+ const uintptr_t non_one_byte_mask = kUintptrAllBitsSet / 0xFF * 0x80;
+ while (chars + sizeof(uintptr_t) <= limit) {
+ if (*reinterpret_cast<const uintptr_t*>(chars) & non_one_byte_mask) {
+ return static_cast<int>(chars - start);
+ }
+ chars += sizeof(uintptr_t);
+ }
+ }
+ // Check remaining unaligned bytes.
+ while (chars < limit) {
+ if (static_cast<uint8_t>(*chars) > unibrow::Utf8::kMaxOneByteChar) {
+ return static_cast<int>(chars - start);
+ }
+ ++chars;
+ }
+
+ return static_cast<int>(chars - start);
+ }
+
+ static inline bool IsAscii(const char* chars, int length) {
+ return NonAsciiStart(chars, length) >= length;
+ }
+
+ static inline bool IsAscii(const uint8_t* chars, int length) {
+ return NonAsciiStart(reinterpret_cast<const char*>(chars), length) >=
+ length;
+ }
+
+ static inline int NonOneByteStart(const uc16* chars, int length) {
+ const uc16* limit = chars + length;
+ const uc16* start = chars;
+ while (chars < limit) {
+ if (*chars > kMaxOneByteCharCodeU) return static_cast<int>(chars - start);
+ ++chars;
+ }
+ return static_cast<int>(chars - start);
+ }
+
+ static inline bool IsOneByte(const uc16* chars, int length) {
+ return NonOneByteStart(chars, length) >= length;
+ }
+
+ template <class Visitor>
+ static inline ConsString* VisitFlat(Visitor* visitor, String* string,
+ int offset = 0);
+
+ static Handle<FixedArray> CalculateLineEnds(Handle<String> string,
+ bool include_ending_line);
+
+ // Use the hash field to forward to the canonical internalized string
+ // when deserializing an internalized string.
+ inline void SetForwardedInternalizedString(String* string);
+ inline String* GetForwardedInternalizedString();
+
+ private:
+ friend class Name;
+ friend class StringTableInsertionKey;
+ friend class InternalizedStringKey;
+
+ static Handle<String> SlowFlatten(Handle<ConsString> cons,
+ PretenureFlag tenure);
+
+ // Slow case of String::Equals. This implementation works on any strings
+ // but it is most efficient on strings that are almost flat.
+ bool SlowEquals(String* other);
+
+ static bool SlowEquals(Handle<String> one, Handle<String> two);
+
+ // Slow case of AsArrayIndex.
+ V8_EXPORT_PRIVATE bool SlowAsArrayIndex(uint32_t* index);
+
+ // Compute and set the hash code.
+ uint32_t ComputeAndSetHash();
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(String);
+};
+
+// The SeqString abstract class captures sequential string values.
+class SeqString : public String {
+ public:
+ DECL_CAST(SeqString)
+
+ // Layout description.
+ static const int kHeaderSize = String::kSize;
+
+ // Truncate the string in-place if possible and return the result.
+ // In case of new_length == 0, the empty string is returned without
+ // truncating the original string.
+ MUST_USE_RESULT static Handle<String> Truncate(Handle<SeqString> string,
+ int new_length);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(SeqString);
+};
+
+// The OneByteString class captures sequential one-byte string objects.
+// Each character in the OneByteString is an one-byte character.
+class SeqOneByteString : public SeqString {
+ public:
+ static const bool kHasOneByteEncoding = true;
+
+ // Dispatched behavior.
+ inline uint16_t SeqOneByteStringGet(int index);
+ inline void SeqOneByteStringSet(int index, uint16_t value);
+
+ // Get the address of the characters in this string.
+ inline Address GetCharsAddress();
+
+ inline uint8_t* GetChars();
+
+ DECL_CAST(SeqOneByteString)
+
+ // Garbage collection support. This method is called by the
+ // garbage collector to compute the actual size of an OneByteString
+ // instance.
+ inline int SeqOneByteStringSize(InstanceType instance_type);
+
+ // Computes the size for an OneByteString instance of a given length.
+ static int SizeFor(int length) {
+ return OBJECT_POINTER_ALIGN(kHeaderSize + length * kCharSize);
+ }
+
+ // Maximal memory usage for a single sequential one-byte string.
+ static const int kMaxSize = 512 * MB - 1;
+ STATIC_ASSERT((kMaxSize - kHeaderSize) >= String::kMaxLength);
+
+ class BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(SeqOneByteString);
+};
+
+// The TwoByteString class captures sequential unicode string objects.
+// Each character in the TwoByteString is a two-byte uint16_t.
+class SeqTwoByteString : public SeqString {
+ public:
+ static const bool kHasOneByteEncoding = false;
+
+ // Dispatched behavior.
+ inline uint16_t SeqTwoByteStringGet(int index);
+ inline void SeqTwoByteStringSet(int index, uint16_t value);
+
+ // Get the address of the characters in this string.
+ inline Address GetCharsAddress();
+
+ inline uc16* GetChars();
+
+ // For regexp code.
+ const uint16_t* SeqTwoByteStringGetData(unsigned start);
+
+ DECL_CAST(SeqTwoByteString)
+
+ // Garbage collection support. This method is called by the
+ // garbage collector to compute the actual size of a TwoByteString
+ // instance.
+ inline int SeqTwoByteStringSize(InstanceType instance_type);
+
+ // Computes the size for a TwoByteString instance of a given length.
+ static int SizeFor(int length) {
+ return OBJECT_POINTER_ALIGN(kHeaderSize + length * kShortSize);
+ }
+
+ // Maximal memory usage for a single sequential two-byte string.
+ static const int kMaxSize = 512 * MB - 1;
+ STATIC_ASSERT(static_cast<int>((kMaxSize - kHeaderSize) / sizeof(uint16_t)) >=
+ String::kMaxLength);
+
+ class BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(SeqTwoByteString);
+};
+
+// The ConsString class describes string values built by using the
+// addition operator on strings. A ConsString is a pair where the
+// first and second components are pointers to other string values.
+// One or both components of a ConsString can be pointers to other
+// ConsStrings, creating a binary tree of ConsStrings where the leaves
+// are non-ConsString string values. The string value represented by
+// a ConsString can be obtained by concatenating the leaf string
+// values in a left-to-right depth-first traversal of the tree.
+class ConsString : public String {
+ public:
+ // First string of the cons cell.
+ inline String* first();
+ // Doesn't check that the result is a string, even in debug mode. This is
+ // useful during GC where the mark bits confuse the checks.
+ inline Object* unchecked_first();
+ inline void set_first(String* first,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
+ // Second string of the cons cell.
+ inline String* second();
+ // Doesn't check that the result is a string, even in debug mode. This is
+ // useful during GC where the mark bits confuse the checks.
+ inline Object* unchecked_second();
+ inline void set_second(String* second,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
+ // Dispatched behavior.
+ V8_EXPORT_PRIVATE uint16_t ConsStringGet(int index);
+
+ DECL_CAST(ConsString)
+
+ // Layout description.
+ static const int kFirstOffset = POINTER_SIZE_ALIGN(String::kSize);
+ static const int kSecondOffset = kFirstOffset + kPointerSize;
+ static const int kSize = kSecondOffset + kPointerSize;
+
+ // Minimum length for a cons string.
+ static const int kMinLength = 13;
+
+ typedef FixedBodyDescriptor<kFirstOffset, kSecondOffset + kPointerSize, kSize>
+ BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
+
+ DECL_VERIFIER(ConsString)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ConsString);
+};
+
+// The ThinString class describes string objects that are just references
+// to another string object. They are used for in-place internalization when
+// the original string cannot actually be internalized in-place: in these
+// cases, the original string is converted to a ThinString pointing at its
+// internalized version (which is allocated as a new object).
+// In terms of memory layout and most algorithms operating on strings,
+// ThinStrings can be thought of as "one-part cons strings".
+class ThinString : public String {
+ public:
+ // Actual string that this ThinString refers to.
+ inline String* actual() const;
+ inline void set_actual(String* s,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
+ V8_EXPORT_PRIVATE uint16_t ThinStringGet(int index);
+
+ DECL_CAST(ThinString)
+ DECL_VERIFIER(ThinString)
+
+ // Layout description.
+ static const int kActualOffset = String::kSize;
+ static const int kSize = kActualOffset + kPointerSize;
+
+ typedef FixedBodyDescriptor<kActualOffset, kSize, kSize> BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ThinString);
+};
+
+// The Sliced String class describes strings that are substrings of another
+// sequential string. The motivation is to save time and memory when creating
+// a substring. A Sliced String is described as a pointer to the parent,
+// the offset from the start of the parent string and the length. Using
+// a Sliced String therefore requires unpacking of the parent string and
+// adding the offset to the start address. A substring of a Sliced String
+// are not nested since the double indirection is simplified when creating
+// such a substring.
+// Currently missing features are:
+// - handling externalized parent strings
+// - external strings as parent
+// - truncating sliced string to enable otherwise unneeded parent to be GC'ed.
+class SlicedString : public String {
+ public:
+ inline String* parent();
+ inline void set_parent(String* parent,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline int offset() const;
+ inline void set_offset(int offset);
+
+ // Dispatched behavior.
+ V8_EXPORT_PRIVATE uint16_t SlicedStringGet(int index);
+
+ DECL_CAST(SlicedString)
+
+ // Layout description.
+ static const int kParentOffset = POINTER_SIZE_ALIGN(String::kSize);
+ static const int kOffsetOffset = kParentOffset + kPointerSize;
+ static const int kSize = kOffsetOffset + kPointerSize;
+
+ // Minimum length for a sliced string.
+ static const int kMinLength = 13;
+
+ typedef FixedBodyDescriptor<kParentOffset, kOffsetOffset + kPointerSize,
+ kSize>
+ BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
+
+ DECL_VERIFIER(SlicedString)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(SlicedString);
+};
+
+// The ExternalString class describes string values that are backed by
+// a string resource that lies outside the V8 heap. ExternalStrings
+// consist of the length field common to all strings, a pointer to the
+// external resource. It is important to ensure (externally) that the
+// resource is not deallocated while the ExternalString is live in the
+// V8 heap.
+//
+// The API expects that all ExternalStrings are created through the
+// API. Therefore, ExternalStrings should not be used internally.
+class ExternalString : public String {
+ public:
+ DECL_CAST(ExternalString)
+
+ // Layout description.
+ static const int kResourceOffset = POINTER_SIZE_ALIGN(String::kSize);
+ static const int kShortSize = kResourceOffset + kPointerSize;
+ static const int kResourceDataOffset = kResourceOffset + kPointerSize;
+ static const int kSize = kResourceDataOffset + kPointerSize;
+
+ // Return whether external string is short (data pointer is not cached).
+ inline bool is_short();
+
+ STATIC_ASSERT(kResourceOffset == Internals::kStringResourceOffset);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalString);
+};
+
+// The ExternalOneByteString class is an external string backed by an
+// one-byte string.
+class ExternalOneByteString : public ExternalString {
+ public:
+ static const bool kHasOneByteEncoding = true;
+
+ typedef v8::String::ExternalOneByteStringResource Resource;
+
+ // The underlying resource.
+ inline const Resource* resource();
+ inline void set_resource(const Resource* buffer);
+
+ // Update the pointer cache to the external character array.
+ // The cached pointer is always valid, as the external character array does =
+ // not move during lifetime. Deserialization is the only exception, after
+ // which the pointer cache has to be refreshed.
+ inline void update_data_cache();
+
+ inline const uint8_t* GetChars();
+
+ // Dispatched behavior.
+ inline uint16_t ExternalOneByteStringGet(int index);
+
+ DECL_CAST(ExternalOneByteString)
+
+ class BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalOneByteString);
+};
+
+// The ExternalTwoByteString class is an external string backed by a UTF-16
+// encoded string.
+class ExternalTwoByteString : public ExternalString {
+ public:
+ static const bool kHasOneByteEncoding = false;
+
+ typedef v8::String::ExternalStringResource Resource;
+
+ // The underlying string resource.
+ inline const Resource* resource();
+ inline void set_resource(const Resource* buffer);
+
+ // Update the pointer cache to the external character array.
+ // The cached pointer is always valid, as the external character array does =
+ // not move during lifetime. Deserialization is the only exception, after
+ // which the pointer cache has to be refreshed.
+ inline void update_data_cache();
+
+ inline const uint16_t* GetChars();
+
+ // Dispatched behavior.
+ inline uint16_t ExternalTwoByteStringGet(int index);
+
+ // For regexp code.
+ inline const uint16_t* ExternalTwoByteStringGetData(unsigned start);
+
+ DECL_CAST(ExternalTwoByteString)
+
+ class BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalTwoByteString);
+};
+
+// A flat string reader provides random access to the contents of a
+// string independent of the character width of the string. The handle
+// must be valid as long as the reader is being used.
+class FlatStringReader : public Relocatable {
+ public:
+ FlatStringReader(Isolate* isolate, Handle<String> str);
+ FlatStringReader(Isolate* isolate, Vector<const char> input);
+ void PostGarbageCollection();
+ inline uc32 Get(int index);
+ template <typename Char>
+ inline Char Get(int index);
+ int length() { return length_; }
+
+ private:
+ String** str_;
+ bool is_one_byte_;
+ int length_;
+ const void* start_;
+};
+
+// This maintains an off-stack representation of the stack frames required
+// to traverse a ConsString, allowing an entirely iterative and restartable
+// traversal of the entire string
+class ConsStringIterator {
+ public:
+ inline ConsStringIterator() {}
+ inline explicit ConsStringIterator(ConsString* cons_string, int offset = 0) {
+ Reset(cons_string, offset);
+ }
+ inline void Reset(ConsString* cons_string, int offset = 0) {
+ depth_ = 0;
+ // Next will always return NULL.
+ if (cons_string == NULL) return;
+ Initialize(cons_string, offset);
+ }
+ // Returns NULL when complete.
+ inline String* Next(int* offset_out) {
+ *offset_out = 0;
+ if (depth_ == 0) return NULL;
+ return Continue(offset_out);
+ }
+
+ private:
+ static const int kStackSize = 32;
+ // Use a mask instead of doing modulo operations for stack wrapping.
+ static const int kDepthMask = kStackSize - 1;
+ static_assert(base::bits::IsPowerOfTwo(kStackSize),
+ "kStackSize must be power of two");
+ static inline int OffsetForDepth(int depth);
+
+ inline void PushLeft(ConsString* string);
+ inline void PushRight(ConsString* string);
+ inline void AdjustMaximumDepth();
+ inline void Pop();
+ inline bool StackBlown() { return maximum_depth_ - depth_ == kStackSize; }
+ void Initialize(ConsString* cons_string, int offset);
+ String* Continue(int* offset_out);
+ String* NextLeaf(bool* blew_stack);
+ String* Search(int* offset_out);
+
+ // Stack must always contain only frames for which right traversal
+ // has not yet been performed.
+ ConsString* frames_[kStackSize];
+ ConsString* root_;
+ int depth_;
+ int maximum_depth_;
+ int consumed_;
+ DISALLOW_COPY_AND_ASSIGN(ConsStringIterator);
+};
+
+class StringCharacterStream {
+ public:
+ inline explicit StringCharacterStream(String* string, int offset = 0);
+ inline uint16_t GetNext();
+ inline bool HasMore();
+ inline void Reset(String* string, int offset = 0);
+ inline void VisitOneByteString(const uint8_t* chars, int length);
+ inline void VisitTwoByteString(const uint16_t* chars, int length);
+
+ private:
+ ConsStringIterator iter_;
+ bool is_one_byte_;
+ union {
+ const uint8_t* buffer8_;
+ const uint16_t* buffer16_;
+ };
+ const uint8_t* end_;
+ DISALLOW_COPY_AND_ASSIGN(StringCharacterStream);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_STRING_H_
diff --git a/deps/v8/src/ostreams.cc b/deps/v8/src/ostreams.cc
index a0a548b607..7ab965c473 100644
--- a/deps/v8/src/ostreams.cc
+++ b/deps/v8/src/ostreams.cc
@@ -4,6 +4,7 @@
#include "src/ostreams.h"
#include "src/objects.h"
+#include "src/objects/string.h"
#if V8_OS_WIN
#if _MSC_VER < 1900
diff --git a/deps/v8/src/parsing/OWNERS b/deps/v8/src/parsing/OWNERS
index 5f136aae42..3230680a05 100644
--- a/deps/v8/src/parsing/OWNERS
+++ b/deps/v8/src/parsing/OWNERS
@@ -7,3 +7,5 @@ neis@chromium.org
rossberg@chromium.org
verwaest@chromium.org
vogelheim@chromium.org
+
+# COMPONENT: Blink>JavaScript>Language
diff --git a/deps/v8/src/parsing/parameter-initializer-rewriter.cc b/deps/v8/src/parsing/parameter-initializer-rewriter.cc
index 7a1bc1ef9e..33ebbbd910 100644
--- a/deps/v8/src/parsing/parameter-initializer-rewriter.cc
+++ b/deps/v8/src/parsing/parameter-initializer-rewriter.cc
@@ -17,9 +17,8 @@ namespace {
class Rewriter final : public AstTraversalVisitor<Rewriter> {
public:
- Rewriter(uintptr_t stack_limit, Expression* initializer, Scope* param_scope)
- : AstTraversalVisitor(stack_limit, initializer),
- param_scope_(param_scope) {}
+ Rewriter(uintptr_t stack_limit, Expression* initializer, Scope* scope)
+ : AstTraversalVisitor(stack_limit, initializer), scope_(scope) {}
private:
// This is required so that the overriden Visit* methods can be
@@ -34,11 +33,11 @@ class Rewriter final : public AstTraversalVisitor<Rewriter> {
void VisitTryCatchStatement(TryCatchStatement* stmt);
void VisitWithStatement(WithStatement* stmt);
- Scope* param_scope_;
+ Scope* scope_;
};
void Rewriter::VisitFunctionLiteral(FunctionLiteral* function_literal) {
- function_literal->scope()->ReplaceOuterScope(param_scope_);
+ function_literal->scope()->ReplaceOuterScope(scope_);
}
@@ -63,20 +62,20 @@ void Rewriter::VisitClassLiteral(ClassLiteral* class_literal) {
void Rewriter::VisitVariableProxy(VariableProxy* proxy) {
if (!proxy->is_resolved()) {
- if (param_scope_->outer_scope()->RemoveUnresolved(proxy)) {
- param_scope_->AddUnresolved(proxy);
+ if (scope_->outer_scope()->RemoveUnresolved(proxy)) {
+ scope_->AddUnresolved(proxy);
}
} else {
// Ensure that temporaries we find are already in the correct scope.
DCHECK(proxy->var()->mode() != TEMPORARY ||
- proxy->var()->scope() == param_scope_->GetClosureScope());
+ proxy->var()->scope() == scope_->GetClosureScope());
}
}
void Rewriter::VisitBlock(Block* stmt) {
if (stmt->scope() != nullptr)
- stmt->scope()->ReplaceOuterScope(param_scope_);
+ stmt->scope()->ReplaceOuterScope(scope_);
else
VisitStatements(stmt->statements());
}
@@ -84,28 +83,33 @@ void Rewriter::VisitBlock(Block* stmt) {
void Rewriter::VisitTryCatchStatement(TryCatchStatement* stmt) {
Visit(stmt->try_block());
- stmt->scope()->ReplaceOuterScope(param_scope_);
+ stmt->scope()->ReplaceOuterScope(scope_);
}
void Rewriter::VisitWithStatement(WithStatement* stmt) {
Visit(stmt->expression());
- stmt->scope()->ReplaceOuterScope(param_scope_);
+ stmt->scope()->ReplaceOuterScope(scope_);
}
} // anonymous namespace
-void ReparentParameterExpressionScope(uintptr_t stack_limit, Expression* expr,
- Scope* param_scope) {
- // The only case that uses this code is block scopes for parameters containing
- // sloppy eval.
- DCHECK(param_scope->is_block_scope());
- DCHECK(param_scope->is_declaration_scope());
- DCHECK(param_scope->calls_sloppy_eval());
- DCHECK(param_scope->outer_scope()->is_function_scope());
-
- Rewriter rewriter(stack_limit, expr, param_scope);
+void ReparentExpressionScope(uintptr_t stack_limit, Expression* expr,
+ Scope* scope) {
+ // Both uses of this function should pass in a block scope.
+ DCHECK(scope->is_block_scope());
+ // These hold for the sloppy parameters-with-eval case...
+ DCHECK_IMPLIES(scope->is_declaration_scope(), scope->calls_sloppy_eval());
+ DCHECK_IMPLIES(scope->is_declaration_scope(),
+ scope->outer_scope()->is_function_scope());
+ // ...whereas these hold for lexical declarations in for-in/of loops.
+ DCHECK_IMPLIES(!scope->is_declaration_scope(),
+ scope->outer_scope()->is_block_scope());
+ DCHECK_IMPLIES(!scope->is_declaration_scope(),
+ scope->outer_scope()->is_hidden());
+
+ Rewriter rewriter(stack_limit, expr, scope);
rewriter.Run();
}
diff --git a/deps/v8/src/parsing/parameter-initializer-rewriter.h b/deps/v8/src/parsing/parameter-initializer-rewriter.h
index 5e409b4fbc..79da889b25 100644
--- a/deps/v8/src/parsing/parameter-initializer-rewriter.h
+++ b/deps/v8/src/parsing/parameter-initializer-rewriter.h
@@ -5,7 +5,7 @@
#ifndef V8_PARSING_PARAMETER_EXPRESSION_REWRITER_H_
#define V8_PARSING_PARAMETER_EXPRESSION_REWRITER_H_
-#include "src/ast/ast-types.h"
+#include <stdint.h>
namespace v8 {
namespace internal {
@@ -16,12 +16,13 @@ class Scope;
// When an extra declaration scope needs to be inserted to account for
// a sloppy eval in a default parameter or function body, the expressions
// needs to be in that new inner scope which was added after initial
-// parsing.
+// parsing. We do the same rewriting for initializers of destructured
+// lexical declarations in for-in/of loops.
//
-// param_scope is the new inner scope, and its outer_scope() is assumed
-// to be the function scope which was used during the initial parse.
-void ReparentParameterExpressionScope(uintptr_t stack_limit, Expression* expr,
- Scope* param_scope);
+// scope is the new inner scope, and its outer_scope() is assumed
+// to be the scope which was used during the initial parse.
+void ReparentExpressionScope(uintptr_t stack_limit, Expression* expr,
+ Scope* scope);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/parsing/parse-info.cc b/deps/v8/src/parsing/parse-info.cc
index 12329307ac..6b34811811 100644
--- a/deps/v8/src/parsing/parse-info.cc
+++ b/deps/v8/src/parsing/parse-info.cc
@@ -5,6 +5,7 @@
#include "src/parsing/parse-info.h"
#include "src/api.h"
+#include "src/ast/ast-source-ranges.h"
#include "src/ast/ast-value-factory.h"
#include "src/ast/ast.h"
#include "src/heap/heap-inl.h"
@@ -39,6 +40,7 @@ ParseInfo::ParseInfo(AccountingAllocator* zone_allocator)
ast_string_constants_(nullptr),
function_name_(nullptr),
runtime_call_stats_(nullptr),
+ source_range_map_(nullptr),
literal_(nullptr),
deferred_handles_(nullptr) {}
@@ -135,7 +137,7 @@ ParseInfo* ParseInfo::AllocateWithoutScript(Handle<SharedFunctionInfo> shared) {
DeclarationScope* ParseInfo::scope() const { return literal()->scope(); }
bool ParseInfo::is_declaration() const {
- return (compiler_hints_ & (1 << SharedFunctionInfo::kIsDeclaration)) != 0;
+ return SharedFunctionInfo::IsDeclarationBit::decode(compiler_hints_);
}
FunctionKind ParseInfo::function_kind() const {
@@ -158,10 +160,11 @@ void ParseInfo::InitFromIsolate(Isolate* isolate) {
set_hash_seed(isolate->heap()->HashSeed());
set_stack_limit(isolate->stack_guard()->real_climit());
set_unicode_cache(isolate->unicode_cache());
- set_tail_call_elimination_enabled(
- isolate->is_tail_call_elimination_enabled());
set_runtime_call_stats(isolate->counters()->runtime_call_stats());
set_ast_string_constants(isolate->ast_string_constants());
+ if (FLAG_block_coverage && isolate->is_block_code_coverage()) {
+ set_source_range_map(new (zone()) SourceRangeMap(zone()));
+ }
}
void ParseInfo::UpdateStatisticsAfterBackgroundParse(Isolate* isolate) {
diff --git a/deps/v8/src/parsing/parse-info.h b/deps/v8/src/parsing/parse-info.h
index 5d8bb9c8eb..3eed5bca3c 100644
--- a/deps/v8/src/parsing/parse-info.h
+++ b/deps/v8/src/parsing/parse-info.h
@@ -31,6 +31,7 @@ class FunctionLiteral;
class RuntimeCallStats;
class ScriptData;
class SharedFunctionInfo;
+class SourceRangeMap;
class UnicodeCache;
class Utf16CharacterStream;
class Zone;
@@ -79,8 +80,6 @@ class V8_EXPORT_PRIVATE ParseInfo : public CompileJobFinishCallback {
set_is_named_expression)
FLAG_ACCESSOR(kDebug, is_debug, set_is_debug)
FLAG_ACCESSOR(kSerializing, will_serialize, set_will_serialize)
- FLAG_ACCESSOR(kTailCallEliminationEnabled, is_tail_call_elimination_enabled,
- set_tail_call_elimination_enabled)
#undef FLAG_ACCESSOR
@@ -119,7 +118,9 @@ class V8_EXPORT_PRIVATE ParseInfo : public CompileJobFinishCallback {
ScriptData** cached_data() const { return cached_data_; }
void set_cached_data(ScriptData** cached_data) { cached_data_ = cached_data; }
- PreParsedScopeData* preparsed_scope_data() { return &preparsed_scope_data_; }
+ ConsumedPreParsedScopeData* consumed_preparsed_scope_data() {
+ return &consumed_preparsed_scope_data_;
+ }
ScriptCompiler::CompileOptions compile_options() const {
return compile_options_;
@@ -205,6 +206,11 @@ class V8_EXPORT_PRIVATE ParseInfo : public CompileJobFinishCallback {
runtime_call_stats_ = runtime_call_stats;
}
+ SourceRangeMap* source_range_map() const { return source_range_map_; }
+ void set_source_range_map(SourceRangeMap* source_range_map) {
+ source_range_map_ = source_range_map;
+ }
+
// Getters for individual compiler hints.
bool is_declaration() const;
FunctionKind function_kind() const;
@@ -272,8 +278,7 @@ class V8_EXPORT_PRIVATE ParseInfo : public CompileJobFinishCallback {
kIsNamedExpression = 1 << 8,
kDebug = 1 << 9,
kSerializing = 1 << 10,
- kTailCallEliminationEnabled = 1 << 11,
- kAstValueFactoryOwned = 1 << 12,
+ kAstValueFactoryOwned = 1 << 11,
};
//------------- Inputs to parsing and scope analysis -----------------------
@@ -303,11 +308,12 @@ class V8_EXPORT_PRIVATE ParseInfo : public CompileJobFinishCallback {
//----------- Inputs+Outputs of parsing and scope analysis -----------------
ScriptData** cached_data_; // used if available, populated if requested.
- PreParsedScopeData preparsed_scope_data_;
+ ConsumedPreParsedScopeData consumed_preparsed_scope_data_;
AstValueFactory* ast_value_factory_; // used if available, otherwise new.
const class AstStringConstants* ast_string_constants_;
const AstRawString* function_name_;
RuntimeCallStats* runtime_call_stats_;
+ SourceRangeMap* source_range_map_; // Used when block coverage is enabled.
//----------- Output of parsing and scope analysis ------------------------
FunctionLiteral* literal_;
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index 2d01398980..104e36ea93 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -5,6 +5,7 @@
#ifndef V8_PARSING_PARSER_BASE_H
#define V8_PARSING_PARSER_BASE_H
+#include "src/ast/ast-source-ranges.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/bailout-reason.h"
@@ -20,8 +21,6 @@
namespace v8 {
namespace internal {
-class PreParsedScopeData;
-
enum FunctionNameValidity {
kFunctionNameIsStrictReserved,
kSkipFunctionNameCheck,
@@ -83,6 +82,57 @@ struct FormalParametersBase {
int arity = 0;
};
+// Stack-allocated scope to collect source ranges from the parser.
+class SourceRangeScope final {
+ public:
+ enum PositionKind {
+ POSITION_BEG,
+ POSITION_END,
+ PEEK_POSITION_BEG,
+ PEEK_POSITION_END,
+ };
+
+ SourceRangeScope(Scanner* scanner, SourceRange* range,
+ PositionKind pre_kind = PEEK_POSITION_BEG,
+ PositionKind post_kind = POSITION_END)
+ : scanner_(scanner), range_(range), post_kind_(post_kind) {
+ range_->start = GetPosition(pre_kind);
+ DCHECK_NE(range_->start, kNoSourcePosition);
+ }
+
+ ~SourceRangeScope() { Finalize(); }
+
+ const SourceRange& Finalize() {
+ if (is_finalized_) return *range_;
+ is_finalized_ = true;
+ range_->end = GetPosition(post_kind_);
+ DCHECK_NE(range_->end, kNoSourcePosition);
+ return *range_;
+ }
+
+ private:
+ int32_t GetPosition(PositionKind kind) {
+ switch (kind) {
+ case POSITION_BEG:
+ return scanner_->location().beg_pos;
+ case POSITION_END:
+ return scanner_->location().end_pos;
+ case PEEK_POSITION_BEG:
+ return scanner_->peek_location().beg_pos;
+ case PEEK_POSITION_END:
+ return scanner_->peek_location().end_pos;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ Scanner* scanner_;
+ SourceRange* range_;
+ PositionKind post_kind_;
+ bool is_finalized_ = false;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(SourceRangeScope);
+};
// ----------------------------------------------------------------------------
// The CHECK_OK macro is a convenient macro to enforce error
@@ -201,7 +251,6 @@ class ParserBase {
ParserBase(Zone* zone, Scanner* scanner, uintptr_t stack_limit,
v8::Extension* extension, AstValueFactory* ast_value_factory,
RuntimeCallStats* runtime_call_stats,
- PreParsedScopeData* preparsed_scope_data,
bool parsing_on_main_thread = true)
: scope_(nullptr),
original_scope_(nullptr),
@@ -214,7 +263,6 @@ class ParserBase {
parsing_on_main_thread_(parsing_on_main_thread),
parsing_module_(false),
stack_limit_(stack_limit),
- preparsed_scope_data_(preparsed_scope_data),
zone_(zone),
classifier_(nullptr),
scanner_(scanner),
@@ -222,11 +270,9 @@ class ParserBase {
default_eager_compile_hint_(FunctionLiteral::kShouldLazyCompile),
function_literal_id_(0),
allow_natives_(false),
- allow_tailcalls_(false),
allow_harmony_do_expressions_(false),
allow_harmony_function_sent_(false),
allow_harmony_restrictive_generators_(false),
- allow_harmony_trailing_commas_(false),
allow_harmony_class_fields_(false),
allow_harmony_object_rest_spread_(false),
allow_harmony_dynamic_import_(false),
@@ -238,11 +284,9 @@ class ParserBase {
void set_allow_##name(bool allow) { allow_##name##_ = allow; }
ALLOW_ACCESSORS(natives);
- ALLOW_ACCESSORS(tailcalls);
ALLOW_ACCESSORS(harmony_do_expressions);
ALLOW_ACCESSORS(harmony_function_sent);
ALLOW_ACCESSORS(harmony_restrictive_generators);
- ALLOW_ACCESSORS(harmony_trailing_commas);
ALLOW_ACCESSORS(harmony_class_fields);
ALLOW_ACCESSORS(harmony_object_rest_spread);
ALLOW_ACCESSORS(harmony_dynamic_import);
@@ -271,6 +315,10 @@ class ParserBase {
void ResetFunctionLiteralId() { function_literal_id_ = 0; }
+ // The Zone where the parsing outputs are stored.
+ Zone* main_zone() const { return ast_value_factory()->zone(); }
+
+ // The current Zone, which might be the main zone or a temporary Zone.
Zone* zone() const { return zone_; }
protected:
@@ -326,56 +374,6 @@ class ParserBase {
Scope* scope;
};
- class TailCallExpressionList {
- public:
- explicit TailCallExpressionList(Zone* zone)
- : zone_(zone), expressions_(0, zone), has_explicit_tail_calls_(false) {}
-
- const ZoneList<ExpressionT>& expressions() const { return expressions_; }
- const Scanner::Location& location() const { return loc_; }
-
- bool has_explicit_tail_calls() const { return has_explicit_tail_calls_; }
-
- void Swap(TailCallExpressionList& other) {
- expressions_.Swap(&other.expressions_);
- std::swap(loc_, other.loc_);
- std::swap(has_explicit_tail_calls_, other.has_explicit_tail_calls_);
- }
-
- void AddImplicitTailCall(ExpressionT expr) {
- expressions_.Add(expr, zone_);
- }
-
- void Append(const TailCallExpressionList& other) {
- if (!has_explicit_tail_calls()) {
- loc_ = other.loc_;
- has_explicit_tail_calls_ = other.has_explicit_tail_calls_;
- }
- expressions_.AddAll(other.expressions_, zone_);
- }
-
- private:
- Zone* zone_;
- ZoneList<ExpressionT> expressions_;
- Scanner::Location loc_;
- bool has_explicit_tail_calls_;
- };
-
- // Defines whether tail call expressions are allowed or not.
- enum class ReturnExprContext {
- // We are inside return statement which is allowed to contain tail call
- // expressions. Tail call expressions are allowed.
- kInsideValidReturnStatement,
-
- // We are inside a block in which tail call expressions are allowed but
- // not yet inside a return statement.
- kInsideValidBlock,
-
- // Tail call expressions are not allowed in the following blocks.
- kInsideTryBlock,
- kInsideForInOfBody,
- };
-
class FunctionState final : public BlockState {
public:
FunctionState(FunctionState** function_state_stack, Scope** scope_stack,
@@ -390,14 +388,6 @@ class ParserBase {
FunctionKind kind() const { return scope()->function_kind(); }
FunctionState* outer() const { return outer_function_state_; }
- typename Types::Variable* generator_object_variable() const {
- return scope()->generator_object_var();
- }
-
- typename Types::Variable* promise_variable() const {
- return scope()->promise_var();
- }
-
void RewindDestructuringAssignments(int pos) {
destructuring_assignments_to_rewrite_.Rewind(pos);
}
@@ -414,27 +404,10 @@ class ParserBase {
return destructuring_assignments_to_rewrite_;
}
- TailCallExpressionList& tail_call_expressions() {
- return tail_call_expressions_;
- }
- void AddImplicitTailCallExpression(ExpressionT expression) {
- if (return_expr_context() ==
- ReturnExprContext::kInsideValidReturnStatement) {
- tail_call_expressions_.AddImplicitTailCall(expression);
- }
- }
-
ZoneList<typename ExpressionClassifier::Error>* GetReportedErrorList() {
return &reported_errors_;
}
- ReturnExprContext return_expr_context() const {
- return return_expr_context_;
- }
- void set_return_expr_context(ReturnExprContext context) {
- return_expr_context_ = context;
- }
-
ZoneList<ExpressionT>* non_patterns_to_rewrite() {
return &non_patterns_to_rewrite_;
}
@@ -495,8 +468,6 @@ class ParserBase {
DeclarationScope* scope_;
ZoneList<DestructuringAssignment> destructuring_assignments_to_rewrite_;
- TailCallExpressionList tail_call_expressions_;
- ReturnExprContext return_expr_context_;
ZoneList<ExpressionT> non_patterns_to_rewrite_;
ZoneList<typename ExpressionClassifier::Error> reported_errors_;
@@ -515,50 +486,8 @@ class ParserBase {
friend Impl;
};
- // This scope sets current ReturnExprContext to given value.
- class ReturnExprScope {
- public:
- explicit ReturnExprScope(FunctionState* function_state,
- ReturnExprContext return_expr_context)
- : function_state_(function_state),
- sav_return_expr_context_(function_state->return_expr_context()) {
- // Don't update context if we are requested to enable tail call
- // expressions but current block does not allow them.
- if (return_expr_context !=
- ReturnExprContext::kInsideValidReturnStatement ||
- sav_return_expr_context_ == ReturnExprContext::kInsideValidBlock) {
- function_state->set_return_expr_context(return_expr_context);
- }
- }
- ~ReturnExprScope() {
- function_state_->set_return_expr_context(sav_return_expr_context_);
- }
-
- private:
- FunctionState* function_state_;
- ReturnExprContext sav_return_expr_context_;
- };
-
- // Collects all return expressions at tail call position in this scope
- // to a separate list.
- class CollectExpressionsInTailPositionToListScope {
- public:
- CollectExpressionsInTailPositionToListScope(FunctionState* function_state,
- TailCallExpressionList* list)
- : function_state_(function_state), list_(list) {
- function_state->tail_call_expressions().Swap(*list_);
- }
- ~CollectExpressionsInTailPositionToListScope() {
- function_state_->tail_call_expressions().Swap(*list_);
- }
-
- private:
- FunctionState* function_state_;
- TailCallExpressionList* list_;
- };
-
struct DeclarationDescriptor {
- enum Kind { NORMAL, PARAMETER };
+ enum Kind { NORMAL, PARAMETER, LEXICAL_FOR_EACH };
Scope* scope;
VariableMode mode;
int declaration_pos;
@@ -576,6 +505,7 @@ class ParserBase {
ExpressionT pattern;
int initializer_position;
+ int value_beg_position = kNoSourcePosition;
ExpressionT initializer;
};
@@ -598,15 +528,13 @@ class ParserBase {
scope(nullptr),
init_block(parser->impl()->NullBlock()),
inner_block(parser->impl()->NullBlock()),
- bound_names(1, parser->zone()),
- tail_call_expressions(parser->zone()) {}
+ bound_names(1, parser->zone()) {}
IdentifierT name;
ExpressionT pattern;
Scope* scope;
BlockT init_block;
BlockT inner_block;
ZoneList<const AstRawString*> bound_names;
- TailCallExpressionList tail_call_expressions;
};
struct ForInfo {
@@ -874,6 +802,7 @@ class ParserBase {
void CheckFunctionName(LanguageMode language_mode, IdentifierT function_name,
FunctionNameValidity function_name_validity,
const Scanner::Location& function_name_loc, bool* ok) {
+ if (impl()->IsEmptyIdentifier(function_name)) return;
if (function_name_validity == kSkipFunctionNameCheck) return;
// The function name needs to be checked in strict mode.
if (is_sloppy(language_mode)) return;
@@ -1386,29 +1315,11 @@ class ParserBase {
// Convenience method which determines the type of return statement to emit
// depending on the current function type.
- inline StatementT BuildReturnStatement(ExpressionT expr, int pos) {
- if (is_generator() && !is_async_generator()) {
- expr = impl()->BuildIteratorResult(expr, true);
- }
-
- if (is_async_function()) {
- return factory()->NewAsyncReturnStatement(expr, pos);
- }
- return factory()->NewReturnStatement(expr, pos);
- }
-
- inline SuspendExpressionT BuildSuspend(ExpressionT generator,
- ExpressionT expr, int pos,
- Suspend::OnException on_exception,
- SuspendFlags suspend_type) {
- DCHECK_EQ(0,
- static_cast<int>(suspend_type & ~SuspendFlags::kSuspendTypeMask));
- if (V8_UNLIKELY(is_async_generator())) {
- suspend_type = static_cast<SuspendFlags>(suspend_type |
- SuspendFlags::kAsyncGenerator);
- }
- return factory()->NewSuspend(generator, expr, pos, on_exception,
- suspend_type);
+ inline StatementT BuildReturnStatement(ExpressionT expr, int pos,
+ int end_pos = kNoSourcePosition) {
+ return is_async_function()
+ ? factory()->NewAsyncReturnStatement(expr, pos, end_pos)
+ : factory()->NewReturnStatement(expr, pos, end_pos);
}
// Validation per ES6 object literals.
@@ -1523,7 +1434,6 @@ class ParserBase {
bool parsing_on_main_thread_;
bool parsing_module_;
uintptr_t stack_limit_;
- PreParsedScopeData* preparsed_scope_data_;
// Parser base's private field members.
@@ -1539,11 +1449,9 @@ class ParserBase {
int function_literal_id_;
bool allow_natives_;
- bool allow_tailcalls_;
bool allow_harmony_do_expressions_;
bool allow_harmony_function_sent_;
bool allow_harmony_restrictive_generators_;
- bool allow_harmony_trailing_commas_;
bool allow_harmony_class_fields_;
bool allow_harmony_object_rest_spread_;
bool allow_harmony_dynamic_import_;
@@ -1563,8 +1471,6 @@ ParserBase<Impl>::FunctionState::FunctionState(
outer_function_state_(*function_state_stack),
scope_(scope),
destructuring_assignments_to_rewrite_(16, scope->zone()),
- tail_call_expressions_(scope->zone()),
- return_expr_context_(ReturnExprContext::kInsideValidBlock),
non_patterns_to_rewrite_(0, scope->zone()),
reported_errors_(16, scope->zone()),
next_function_is_likely_called_(false),
@@ -1830,6 +1736,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePrimaryExpression(
case Token::ASYNC:
if (!scanner()->HasAnyLineTerminatorAfterNext() &&
PeekAhead() == Token::FUNCTION) {
+ BindingPatternUnexpectedToken();
Consume(Token::ASYNC);
return ParseAsyncFunctionLiteral(CHECK_OK);
}
@@ -2001,8 +1908,7 @@ ParserBase<Impl>::ParseExpressionCoverGrammar(bool accept_IN, bool* ok) {
scanner()->location(), MessageTemplate::kParamAfterRest);
}
- if (allow_harmony_trailing_commas() && peek() == Token::RPAREN &&
- PeekAhead() == Token::ARROW) {
+ if (peek() == Token::RPAREN && PeekAhead() == Token::ARROW) {
// a trailing comma is allowed at the end of an arrow parameter list
break;
}
@@ -2205,19 +2111,23 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePropertyName(
}
case Token::ELLIPSIS:
- if (allow_harmony_object_rest_spread()) {
+ if (allow_harmony_object_rest_spread() && !*is_generator && !*is_async &&
+ !*is_get && !*is_set) {
*name = impl()->EmptyIdentifier();
Consume(Token::ELLIPSIS);
expression = ParseAssignmentExpression(true, CHECK_OK);
*kind = PropertyKind::kSpreadProperty;
- if (expression->IsAssignment()) {
- classifier()->RecordPatternError(
+ if (!impl()->IsIdentifier(expression)) {
+ classifier()->RecordBindingPatternError(
scanner()->location(),
- MessageTemplate::kInvalidDestructuringTarget);
- } else {
- CheckDestructuringElement(expression, pos,
- scanner()->location().end_pos);
+ MessageTemplate::kInvalidRestBindingPattern);
+ }
+
+ if (!expression->IsValidReferenceExpression()) {
+ classifier()->RecordAssignmentPatternError(
+ scanner()->location(),
+ MessageTemplate::kInvalidRestAssignmentPattern);
}
if (peek() != Token::RBRACE) {
@@ -2226,6 +2136,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePropertyName(
}
return expression;
}
+ // Fall-through.
default:
*name = ParseIdentifierName(CHECK_OK);
@@ -2313,9 +2224,12 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
has_initializer, CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
ExpectSemicolon(CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
*property_kind = ClassLiteralProperty::FIELD;
- return factory()->NewClassLiteralProperty(
+ ClassLiteralPropertyT result = factory()->NewClassLiteralProperty(
name_expression, function_literal, *property_kind, *is_static,
*is_computed_name);
+ impl()->SetFunctionNameFromPropertyName(result, name);
+ return result;
+
} else {
ReportUnexpectedToken(Next());
*ok = false;
@@ -2355,9 +2269,11 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
*property_kind = ClassLiteralProperty::METHOD;
- return factory()->NewClassLiteralProperty(name_expression, value,
- *property_kind, *is_static,
- *is_computed_name);
+ ClassLiteralPropertyT result = factory()->NewClassLiteralProperty(
+ name_expression, value, *property_kind, *is_static,
+ *is_computed_name);
+ impl()->SetFunctionNameFromPropertyName(result, name);
+ return result;
}
case PropertyKind::kAccessorProperty: {
@@ -2384,15 +2300,16 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
FunctionLiteral::kAccessorOrMethod, language_mode(),
CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
- if (!*is_computed_name) {
- impl()->AddAccessorPrefixToFunctionName(is_get, value, name);
- }
-
*property_kind =
is_get ? ClassLiteralProperty::GETTER : ClassLiteralProperty::SETTER;
- return factory()->NewClassLiteralProperty(name_expression, value,
- *property_kind, *is_static,
- *is_computed_name);
+ ClassLiteralPropertyT result = factory()->NewClassLiteralProperty(
+ name_expression, value, *property_kind, *is_static,
+ *is_computed_name);
+ const AstRawString* prefix =
+ is_get ? ast_value_factory()->get_space_string()
+ : ast_value_factory()->set_space_string();
+ impl()->SetFunctionNameFromPropertyName(result, name, prefix);
+ return result;
}
case PropertyKind::kSpreadProperty:
ReportUnexpectedTokenAt(
@@ -2402,7 +2319,6 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
return impl()->EmptyClassLiteralProperty();
}
UNREACHABLE();
- return impl()->EmptyClassLiteralProperty();
}
template <typename Impl>
@@ -2487,11 +2403,7 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
ObjectLiteralPropertyT result = factory()->NewObjectLiteralProperty(
name_expression, value, *is_computed_name);
-
- if (!*is_computed_name) {
- impl()->SetFunctionNameFromPropertyName(result, name);
- }
-
+ impl()->SetFunctionNameFromPropertyName(result, name);
return result;
}
@@ -2558,8 +2470,10 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
value = lhs;
}
- return factory()->NewObjectLiteralProperty(
+ ObjectLiteralPropertyT result = factory()->NewObjectLiteralProperty(
name_expression, value, ObjectLiteralProperty::COMPUTED, false);
+ impl()->SetFunctionNameFromPropertyName(result, name);
+ return result;
}
case PropertyKind::kMethodProperty: {
@@ -2581,9 +2495,11 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
FunctionLiteral::kAccessorOrMethod, language_mode(),
CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
- return factory()->NewObjectLiteralProperty(
+ ObjectLiteralPropertyT result = factory()->NewObjectLiteralProperty(
name_expression, value, ObjectLiteralProperty::COMPUTED,
*is_computed_name);
+ impl()->SetFunctionNameFromPropertyName(result, name);
+ return result;
}
case PropertyKind::kAccessorProperty: {
@@ -2611,14 +2527,16 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
FunctionLiteral::kAccessorOrMethod, language_mode(),
CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
- if (!*is_computed_name) {
- impl()->AddAccessorPrefixToFunctionName(is_get, value, name);
- }
-
- return factory()->NewObjectLiteralProperty(
- name_expression, value, is_get ? ObjectLiteralProperty::GETTER
- : ObjectLiteralProperty::SETTER,
+ ObjectLiteralPropertyT result = factory()->NewObjectLiteralProperty(
+ name_expression, value,
+ is_get ? ObjectLiteralProperty::GETTER
+ : ObjectLiteralProperty::SETTER,
*is_computed_name);
+ const AstRawString* prefix =
+ is_get ? ast_value_factory()->get_space_string()
+ : ast_value_factory()->set_space_string();
+ impl()->SetFunctionNameFromPropertyName(result, name, prefix);
+ return result;
}
case PropertyKind::kClassField:
@@ -2628,7 +2546,6 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
return impl()->EmptyObjectLiteralProperty();
}
UNREACHABLE();
- return impl()->EmptyObjectLiteralProperty();
}
template <typename Impl>
@@ -2740,7 +2657,7 @@ typename ParserBase<Impl>::ExpressionListT ParserBase<Impl>::ParseArguments(
classifier()->RecordAsyncArrowFormalParametersError(
scanner()->location(), MessageTemplate::kParamAfterRest);
}
- if (allow_harmony_trailing_commas() && peek() == Token::RPAREN) {
+ if (peek() == Token::RPAREN) {
// allow trailing comma
done = true;
}
@@ -2972,8 +2889,6 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseYieldExpression(
classifier()->RecordFormalParameterInitializerError(
scanner()->peek_location(), MessageTemplate::kYieldInParameter);
Expect(Token::YIELD, CHECK_OK);
- ExpressionT generator_object =
- factory()->NewVariableProxy(function_state_->generator_object_variable());
// The following initialization is necessary.
ExpressionT expression = impl()->EmptyExpression();
bool delegating = false; // yield*
@@ -3001,20 +2916,13 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseYieldExpression(
}
if (delegating) {
- return impl()->RewriteYieldStar(generator_object, expression, pos);
- }
-
- if (!is_async_generator()) {
- // Async generator yield is rewritten in Ignition, and doesn't require
- // producing an Iterator Result.
- expression = impl()->BuildIteratorResult(expression, false);
+ return impl()->RewriteYieldStar(expression, pos);
}
// Hackily disambiguate o from o.next and o [Symbol.iterator]().
// TODO(verwaest): Come up with a better solution.
ExpressionT yield =
- BuildSuspend(generator_object, expression, pos,
- Suspend::kOnExceptionThrow, SuspendFlags::kYield);
+ factory()->NewYield(expression, pos, Suspend::kOnExceptionThrow);
return yield;
}
@@ -3027,6 +2935,7 @@ ParserBase<Impl>::ParseConditionalExpression(bool accept_IN,
// LogicalOrExpression
// LogicalOrExpression '?' AssignmentExpression ':' AssignmentExpression
+ SourceRange then_range, else_range;
int pos = peek_position();
// We start using the binary expression parser for prec >= 4 only!
ExpressionT expression = ParseBinaryExpression(4, accept_IN, CHECK_OK);
@@ -3038,6 +2947,7 @@ ParserBase<Impl>::ParseConditionalExpression(bool accept_IN,
ExpressionT left;
{
+ SourceRangeScope range_scope(scanner(), &then_range);
ExpressionClassifier classifier(this);
// In parsing the first assignment expression in conditional
// expressions we always accept the 'in' keyword; see ECMA-262,
@@ -3049,12 +2959,15 @@ ParserBase<Impl>::ParseConditionalExpression(bool accept_IN,
Expect(Token::COLON, CHECK_OK);
ExpressionT right;
{
+ SourceRangeScope range_scope(scanner(), &else_range);
ExpressionClassifier classifier(this);
right = ParseAssignmentExpression(accept_IN, CHECK_OK);
AccumulateNonBindingPatternErrors();
}
impl()->RewriteNonPattern(CHECK_OK);
- return factory()->NewConditional(expression, left, right, pos);
+ ExpressionT expr = factory()->NewConditional(expression, left, right, pos);
+ impl()->RecordConditionalSourceRange(expr, then_range, else_range);
+ return expr;
}
@@ -3185,7 +3098,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseUnaryExpression(
ExpressionT value = ParseUnaryExpression(CHECK_OK);
- return impl()->RewriteAwaitExpression(value, await_pos);
+ return factory()->NewAwait(value, await_pos);
} else {
return ParsePostfixExpression(ok);
}
@@ -3613,7 +3526,7 @@ ParserBase<Impl>::ParseMemberExpressionContinuation(ExpressionT expression,
ArrowFormalParametersUnexpectedToken();
Consume(Token::PERIOD);
- int pos = position();
+ int pos = peek_position();
IdentifierT name = ParseIdentifierName(CHECK_OK);
expression = factory()->NewProperty(
expression, factory()->NewStringLiteral(name, pos), pos);
@@ -3729,14 +3642,15 @@ void ParserBase<Impl>::ParseFormalParameterList(FormalParametersT* parameters,
break;
}
if (!Check(Token::COMMA)) break;
- if (allow_harmony_trailing_commas() && peek() == Token::RPAREN) {
+ if (peek() == Token::RPAREN) {
// allow the trailing comma
break;
}
}
}
- impl()->DeclareFormalParameters(parameters->scope, parameters->params);
+ impl()->DeclareFormalParameters(parameters->scope, parameters->params,
+ parameters->is_simple);
}
template <typename Impl>
@@ -3810,7 +3724,10 @@ typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseVariableDeclarations(
ExpressionT value = impl()->EmptyExpression();
int initializer_position = kNoSourcePosition;
+ int value_beg_position = kNoSourcePosition;
if (Check(Token::ASSIGN)) {
+ value_beg_position = peek_position();
+
ExpressionClassifier classifier(this);
value = ParseAssignmentExpression(var_context != kForStatement,
CHECK_OK_CUSTOM(NullBlock));
@@ -3858,6 +3775,7 @@ typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseVariableDeclarations(
typename DeclarationParsingResult::Declaration decl(
pattern, initializer_position, value);
+ decl.value_beg_position = value_beg_position;
if (var_context == kForStatement) {
// Save the declaration for further handling in ParseForStatement.
parsing_result->declarations.Add(decl);
@@ -4093,7 +4011,9 @@ void ParserBase<Impl>::ParseFunctionBody(
{
BlockState block_state(&scope_, inner_scope);
- if (IsGeneratorFunction(kind)) {
+ if (IsAsyncGeneratorFunction(kind)) {
+ impl()->ParseAndRewriteAsyncGeneratorFunctionBody(pos, kind, body, ok);
+ } else if (IsGeneratorFunction(kind)) {
impl()->ParseAndRewriteGeneratorFunctionBody(pos, kind, body, ok);
} else if (IsAsyncFunction(kind)) {
const bool accept_IN = true;
@@ -4158,7 +4078,6 @@ void ParserBase<Impl>::ParseFunctionBody(
impl()->CreateFunctionNameAssignment(function_name, pos, function_type,
function_scope, result,
kFunctionNameAssignmentIndex);
- impl()->MarkCollectedTailCallExpressions();
}
template <typename Impl>
@@ -4204,14 +4123,10 @@ bool ParserBase<Impl>::IsNextLetKeyword() {
// for those semantics to apply. This ensures that ASI is
// not honored when a LineTerminator separates the
// tokens.
+ case Token::YIELD:
+ case Token::AWAIT:
case Token::ASYNC:
return true;
- case Token::AWAIT:
- // In an async function, allow ASI between `let` and `yield`
- return !is_async_function() || !scanner_->HasAnyLineTerminatorAfterNext();
- case Token::YIELD:
- // In an generator, allow ASI between `let` and `yield`
- return !is_generator() || !scanner_->HasAnyLineTerminatorAfterNext();
case Token::FUTURE_STRICT_RESERVED_WORD:
return is_sloppy(language_mode());
default:
@@ -4275,6 +4190,7 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
can_preparse && impl()->AllowsLazyParsingWithoutUnresolvedVariables();
bool should_be_used_once_hint = false;
bool has_braces = true;
+ ProducedPreParsedScopeData* produced_preparsed_scope_data = nullptr;
{
FunctionState function_state(&function_state_, &scope_,
formal_parameters.scope);
@@ -4293,10 +4209,12 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
// parameters.
int dummy_num_parameters = -1;
DCHECK((kind & FunctionKind::kArrowFunction) != 0);
- LazyParsingResult result =
- impl()->SkipFunction(kind, formal_parameters.scope,
- &dummy_num_parameters, false, false, CHECK_OK);
+ LazyParsingResult result = impl()->SkipFunction(
+ nullptr, kind, FunctionLiteral::kAnonymousExpression,
+ formal_parameters.scope, &dummy_num_parameters,
+ &produced_preparsed_scope_data, false, false, CHECK_OK);
DCHECK_NE(result, kLazyParsingAborted);
+ DCHECK_NULL(produced_preparsed_scope_data);
USE(result);
formal_parameters.scope->ResetAfterPreparsing(ast_value_factory_,
false);
@@ -4314,10 +4232,6 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
// Single-expression body
has_braces = false;
int pos = position();
- DCHECK(ReturnExprContext::kInsideValidBlock ==
- function_state_->return_expr_context());
- ReturnExprScope allow_tail_calls(
- function_state_, ReturnExprContext::kInsideValidReturnStatement);
body = impl()->NewStatementList(1);
impl()->AddParameterInitializationBlock(
formal_parameters, body, kind == kAsyncArrowFunction, CHECK_OK);
@@ -4332,13 +4246,8 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
impl()->RewriteNonPattern(CHECK_OK);
body->Add(BuildReturnStatement(expression, expression->position()),
zone());
- if (allow_tailcalls() && !is_sloppy(language_mode())) {
- // ES6 14.6.1 Static Semantics: IsInTailPosition
- impl()->MarkTailPosition(expression);
- }
}
expected_property_count = function_state.expected_property_count();
- impl()->MarkCollectedTailCallExpressions();
}
formal_parameters.scope->set_end_position(scanner()->location().end_pos);
@@ -4382,7 +4291,7 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::kAnonymousExpression, eager_compile_hint,
formal_parameters.scope->start_position(), has_braces,
- function_literal_id);
+ function_literal_id, produced_preparsed_scope_data);
function_literal->set_function_token_position(
formal_parameters.scope->start_position());
@@ -4516,7 +4425,7 @@ ParserBase<Impl>::ParseAsyncFunctionLiteral(bool* ok) {
bool is_generator = allow_harmony_async_iteration() && Check(Token::MUL);
const bool kIsAsync = true;
- static const FunctionKind kind = FunctionKindFor(is_generator, kIsAsync);
+ const FunctionKind kind = FunctionKindFor(is_generator, kIsAsync);
if (impl()->ParsingDynamicFunctionDeclaration()) {
// We don't want dynamic functions to actually declare their name
@@ -4969,7 +4878,6 @@ ParserBase<Impl>::ParseStatementAsUnlabelled(
return ParseTryStatement(ok);
default:
UNREACHABLE();
- return impl()->NullStatement();
}
}
@@ -4997,7 +4905,9 @@ typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseBlock(
}
Expect(Token::RBRACE, CHECK_OK_CUSTOM(NullBlock));
- scope()->set_end_position(scanner()->location().end_pos);
+ int end_pos = scanner()->location().end_pos;
+ scope()->set_end_position(end_pos);
+ impl()->RecordBlockSourceRange(body, end_pos);
body->set_scope(scope()->FinalizeBlockScope());
}
return body;
@@ -5089,8 +4999,10 @@ ParserBase<Impl>::ParseExpressionOrLabelledStatement(
Token::Value next_next = PeekAhead();
// "let" followed by either "[", "{" or an identifier means a lexical
// declaration, which should not appear here.
- if (next_next != Token::LBRACK && next_next != Token::LBRACE &&
- next_next != Token::IDENTIFIER) {
+ // However, ASI may insert a line break before an identifier or a brace.
+ if (next_next != Token::LBRACK &&
+ ((next_next != Token::LBRACE && next_next != Token::IDENTIFIER) ||
+ scanner_->HasAnyLineTerminatorAfterNext())) {
break;
}
impl()->ReportMessageAt(scanner()->peek_location(),
@@ -5116,7 +5028,7 @@ ParserBase<Impl>::ParseExpressionOrLabelledStatement(
allow_function == kAllowLabelledFunctionStatement) {
return ParseFunctionDeclaration(ok);
}
- return ParseStatement(labels, ok);
+ return ParseStatement(labels, allow_function, ok);
}
// If we have an extension, we allow a native function declaration.
@@ -5144,15 +5056,25 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseIfStatement(
Expect(Token::LPAREN, CHECK_OK);
ExpressionT condition = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- StatementT then_statement = ParseScopedStatement(labels, CHECK_OK);
+
+ SourceRange then_range, else_range;
+ StatementT then_statement = impl()->NullStatement();
+ {
+ SourceRangeScope range_scope(scanner(), &then_range);
+ then_statement = ParseScopedStatement(labels, CHECK_OK);
+ }
+
StatementT else_statement = impl()->NullStatement();
if (Check(Token::ELSE)) {
+ SourceRangeScope range_scope(scanner(), &else_range);
else_statement = ParseScopedStatement(labels, CHECK_OK);
} else {
else_statement = factory()->NewEmptyStatement(kNoSourcePosition);
}
- return factory()->NewIfStatement(condition, then_statement, else_statement,
- pos);
+ StatementT stmt =
+ factory()->NewIfStatement(condition, then_statement, else_statement, pos);
+ impl()->RecordIfStatementSourceRange(stmt, then_range, else_range);
+ return stmt;
}
template <typename Impl>
@@ -5187,7 +5109,9 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseContinueStatement(
return impl()->NullStatement();
}
ExpectSemicolon(CHECK_OK);
- return factory()->NewContinueStatement(target, pos);
+ StatementT stmt = factory()->NewContinueStatement(target, pos);
+ impl()->RecordJumpStatementSourceRange(stmt, scanner_->location().end_pos);
+ return stmt;
}
template <typename Impl>
@@ -5225,7 +5149,9 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseBreakStatement(
return impl()->NullStatement();
}
ExpectSemicolon(CHECK_OK);
- return factory()->NewBreakStatement(target, pos);
+ StatementT stmt = factory()->NewBreakStatement(target, pos);
+ impl()->RecordJumpStatementSourceRange(stmt, scanner_->location().end_pos);
+ return stmt;
}
template <typename Impl>
@@ -5261,25 +5187,15 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseReturnStatement(
return_value = impl()->GetLiteralUndefined(position());
}
} else {
- if (IsDerivedConstructor(function_state_->kind())) {
- // Because of the return code rewriting that happens in case of a subclass
- // constructor we don't want to accept tail calls, therefore we don't set
- // ReturnExprScope to kInsideValidReturnStatement here.
- return_value = ParseExpression(true, CHECK_OK);
- } else {
- ReturnExprScope maybe_allow_tail_calls(
- function_state_, ReturnExprContext::kInsideValidReturnStatement);
- return_value = ParseExpression(true, CHECK_OK);
-
- if (allow_tailcalls() && !is_sloppy(language_mode()) && !is_resumable()) {
- // ES6 14.6.1 Static Semantics: IsInTailPosition
- function_state_->AddImplicitTailCallExpression(return_value);
- }
- }
+ return_value = ParseExpression(true, CHECK_OK);
}
ExpectSemicolon(CHECK_OK);
return_value = impl()->RewriteReturn(return_value, loc.beg_pos);
- return BuildReturnStatement(return_value, loc.beg_pos);
+ int continuation_pos = scanner_->location().end_pos;
+ StatementT stmt =
+ BuildReturnStatement(return_value, loc.beg_pos, continuation_pos);
+ impl()->RecordJumpStatementSourceRange(stmt, scanner_->location().end_pos);
+ return stmt;
}
template <typename Impl>
@@ -5321,8 +5237,14 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseDoWhileStatement(
auto loop = factory()->NewDoWhileStatement(labels, peek_position());
typename Types::Target target(this, loop);
+ SourceRange body_range;
+ StatementT body = impl()->NullStatement();
+
Expect(Token::DO, CHECK_OK);
- StatementT body = ParseStatement(nullptr, CHECK_OK);
+ {
+ SourceRangeScope range_scope(scanner(), &body_range);
+ body = ParseStatement(nullptr, CHECK_OK);
+ }
Expect(Token::WHILE, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
@@ -5336,6 +5258,8 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseDoWhileStatement(
Check(Token::SEMICOLON);
loop->Initialize(cond, body);
+ impl()->RecordIterationStatementSourceRange(loop, body_range);
+
return loop;
}
@@ -5348,13 +5272,21 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseWhileStatement(
auto loop = factory()->NewWhileStatement(labels, peek_position());
typename Types::Target target(this, loop);
+ SourceRange body_range;
+ StatementT body = impl()->NullStatement();
+
Expect(Token::WHILE, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
ExpressionT cond = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- StatementT body = ParseStatement(nullptr, CHECK_OK);
+ {
+ SourceRangeScope range_scope(scanner(), &body_range);
+ body = ParseStatement(nullptr, CHECK_OK);
+ }
loop->Initialize(cond, body);
+ impl()->RecordIterationStatementSourceRange(loop, body_range);
+
return loop;
}
@@ -5374,7 +5306,10 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseThrowStatement(
ExpressionT exception = ParseExpression(true, CHECK_OK);
ExpectSemicolon(CHECK_OK);
- return impl()->NewThrowStatement(exception, pos);
+ StatementT stmt = impl()->NewThrowStatement(exception, pos);
+ impl()->RecordThrowSourceRange(stmt, scanner_->location().end_pos);
+
+ return stmt;
}
template <typename Impl>
@@ -5407,6 +5342,8 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseSwitchStatement(
while (peek() != Token::RBRACE) {
// An empty label indicates the default case.
ExpressionT label = impl()->EmptyExpression();
+ SourceRange clause_range;
+ SourceRangeScope range_scope(scanner(), &clause_range);
if (Check(Token::CASE)) {
label = ParseExpression(true, CHECK_OK);
} else {
@@ -5427,11 +5364,14 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseSwitchStatement(
statements->Add(stat, zone());
}
auto clause = factory()->NewCaseClause(label, statements, clause_pos);
+ impl()->RecordCaseClauseSourceRange(clause, range_scope.Finalize());
cases->Add(clause, zone());
}
Expect(Token::RBRACE, CHECK_OK);
- scope()->set_end_position(scanner()->location().end_pos);
+ int end_position = scanner()->location().end_pos;
+ scope()->set_end_position(end_position);
+ impl()->RecordSwitchStatementSourceRange(switch_statement, end_position);
return impl()->RewriteSwitchStatement(tag, switch_statement, cases,
scope()->FinalizeBlockScope());
}
@@ -5454,12 +5394,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseTryStatement(
Expect(Token::TRY, CHECK_OK);
int pos = position();
- BlockT try_block = impl()->NullBlock();
- {
- ReturnExprScope no_tail_calls(function_state_,
- ReturnExprContext::kInsideTryBlock);
- try_block = ParseBlock(nullptr, CHECK_OK);
- }
+ BlockT try_block = ParseBlock(nullptr, CHECK_OK);
CatchInfo catch_info(this);
@@ -5469,6 +5404,8 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseTryStatement(
return impl()->NullStatement();
}
+ SourceRange catch_range, finally_range;
+
BlockT catch_block = impl()->NullBlock();
if (Check(Token::CATCH)) {
Expect(Token::LPAREN, CHECK_OK);
@@ -5476,9 +5413,6 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseTryStatement(
catch_info.scope->set_start_position(scanner()->location().beg_pos);
{
- CollectExpressionsInTailPositionToListScope
- collect_tail_call_expressions_scope(
- function_state_, &catch_info.tail_call_expressions);
BlockState catch_block_state(&scope_, catch_info.scope);
catch_block = factory()->NewBlock(nullptr, 16, false, kNoSourcePosition);
@@ -5509,6 +5443,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseTryStatement(
catch_block->statements()->Add(catch_info.init_block, zone());
}
+ SourceRangeScope range_scope(scanner(), &catch_range);
catch_info.inner_block = ParseBlock(nullptr, CHECK_OK);
catch_block->statements()->Add(catch_info.inner_block, zone());
impl()->ValidateCatchBlock(catch_info, CHECK_OK);
@@ -5523,11 +5458,13 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseTryStatement(
BlockT finally_block = impl()->NullBlock();
DCHECK(peek() == Token::FINALLY || !impl()->IsNullStatement(catch_block));
if (Check(Token::FINALLY)) {
+ SourceRangeScope range_scope(scanner(), &finally_range);
finally_block = ParseBlock(nullptr, CHECK_OK);
}
- return impl()->RewriteTryStatement(try_block, catch_block, finally_block,
- catch_info, pos);
+ return impl()->RewriteTryStatement(try_block, catch_block, catch_range,
+ finally_block, finally_range, catch_info,
+ pos);
}
template <typename Impl>
@@ -5628,8 +5565,6 @@ ParserBase<Impl>::ParseForEachStatementWithDeclarations(
auto loop = factory()->NewForEachStatement(for_info->mode, labels, stmt_pos);
typename Types::Target target(this, loop);
- int each_keyword_pos = scanner()->location().beg_pos;
-
ExpressionT enumerable = impl()->EmptyExpression();
if (for_info->mode == ForEachStatement::ITERATE) {
ExpressionClassifier classifier(this);
@@ -5643,20 +5578,22 @@ ParserBase<Impl>::ParseForEachStatementWithDeclarations(
StatementT final_loop = impl()->NullStatement();
{
- ReturnExprScope no_tail_calls(function_state_,
- ReturnExprContext::kInsideForInOfBody);
BlockState block_state(zone(), &scope_);
scope()->set_start_position(scanner()->location().beg_pos);
+ SourceRange body_range;
+ SourceRangeScope range_scope(scanner(), &body_range);
+
StatementT body = ParseStatement(nullptr, CHECK_OK);
+ impl()->RecordIterationStatementSourceRange(loop, range_scope.Finalize());
BlockT body_block = impl()->NullBlock();
ExpressionT each_variable = impl()->EmptyExpression();
impl()->DesugarBindingInForEachStatement(for_info, &body_block,
&each_variable, CHECK_OK);
body_block->statements()->Add(body, zone());
- final_loop = impl()->InitializeForEachStatement(
- loop, each_variable, enumerable, body_block, each_keyword_pos);
+ final_loop = impl()->InitializeForEachStatement(loop, each_variable,
+ enumerable, body_block);
scope()->set_end_position(scanner()->location().end_pos);
body_block->set_scope(scope()->FinalizeBlockScope());
@@ -5693,8 +5630,6 @@ ParserBase<Impl>::ParseForEachStatementWithoutDeclarations(
auto loop = factory()->NewForEachStatement(for_info->mode, labels, stmt_pos);
typename Types::Target target(this, loop);
- int each_keyword_pos = scanner()->location().beg_pos;
-
ExpressionT enumerable = impl()->EmptyExpression();
if (for_info->mode == ForEachStatement::ITERATE) {
ExpressionClassifier classifier(this);
@@ -5708,15 +5643,17 @@ ParserBase<Impl>::ParseForEachStatementWithoutDeclarations(
Scope* for_scope = scope();
{
- ReturnExprScope no_tail_calls(function_state_,
- ReturnExprContext::kInsideForInOfBody);
BlockState block_state(zone(), &scope_);
scope()->set_start_position(scanner()->location().beg_pos);
+ SourceRange body_range;
+ SourceRangeScope range_scope(scanner(), &body_range);
+
StatementT body = ParseStatement(nullptr, CHECK_OK);
scope()->set_end_position(scanner()->location().end_pos);
- StatementT final_loop = impl()->InitializeForEachStatement(
- loop, expression, enumerable, body, each_keyword_pos);
+ StatementT final_loop =
+ impl()->InitializeForEachStatement(loop, expression, enumerable, body);
+ impl()->RecordIterationStatementSourceRange(loop, range_scope.Finalize());
for_scope = for_scope->FinalizeBlockScope();
USE(for_scope);
@@ -5742,6 +5679,8 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStandardForLoop(
StatementT next = impl()->NullStatement();
StatementT body = impl()->NullStatement();
+ SourceRange body_range;
+
// If there are let bindings, then condition and the next statement of the
// for loop must be parsed in a new scope.
Scope* inner_scope = scope();
@@ -5763,16 +5702,18 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStandardForLoop(
}
Expect(Token::RPAREN, CHECK_OK);
+ SourceRangeScope range_scope(scanner(), &body_range);
body = ParseStatement(nullptr, CHECK_OK);
}
scope()->set_end_position(scanner()->location().end_pos);
inner_scope->set_end_position(scanner()->location().end_pos);
if (bound_names_are_lexical && for_info->bound_names.length() > 0 &&
- (is_resumable() || function_state_->contains_function_or_eval())) {
+ function_state_->contains_function_or_eval()) {
scope()->set_is_hidden();
return impl()->DesugarLexicalBindingsInForStatement(
- loop, init, cond, next, body, inner_scope, *for_info, CHECK_OK);
+ loop, init, cond, next, body, body_range, inner_scope, *for_info,
+ CHECK_OK);
}
Scope* for_scope = scope()->FinalizeBlockScope();
@@ -5803,10 +5744,12 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStandardForLoop(
block->statements()->Add(loop, zone());
block->set_scope(for_scope);
loop->Initialize(init, cond, next, body);
+ impl()->RecordIterationStatementSourceRange(loop, body_range);
return block;
}
loop->Initialize(init, cond, next, body);
+ impl()->RecordIterationStatementSourceRange(loop, body_range);
return loop;
}
@@ -5909,13 +5852,15 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForAwaitStatement(
StatementT final_loop = impl()->NullStatement();
Scope* for_scope = scope();
{
- ReturnExprScope no_tail_calls(function_state_,
- ReturnExprContext::kInsideForInOfBody);
BlockState block_state(zone(), &scope_);
scope()->set_start_position(scanner()->location().beg_pos);
+ SourceRange body_range;
+ SourceRangeScope range_scope(scanner(), &body_range);
+
StatementT body = ParseStatement(nullptr, CHECK_OK);
scope()->set_end_position(scanner()->location().end_pos);
+ impl()->RecordIterationStatementSourceRange(loop, range_scope.Finalize());
if (has_declarations) {
BlockT body_block = impl()->NullBlock();
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index a4f40dda53..3da659ebed 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -170,7 +170,6 @@ FunctionLiteral* Parser::DefaultConstructor(const AstRawString* name,
int end_pos) {
int expected_property_count = -1;
const int parameter_count = 0;
- if (name == nullptr) name = ast_value_factory()->empty_string();
FunctionKind kind = call_super ? FunctionKind::kDefaultDerivedConstructor
: FunctionKind::kDefaultBaseConstructor;
@@ -352,19 +351,6 @@ Expression* Parser::BuildUnaryExpression(Expression* expression,
return factory()->NewUnaryOperation(op, expression, pos);
}
-Expression* Parser::BuildIteratorResult(Expression* value, bool done) {
- int pos = kNoSourcePosition;
-
- if (value == nullptr) value = factory()->NewUndefinedLiteral(pos);
-
- auto args = new (zone()) ZoneList<Expression*>(2, zone());
- args->Add(value, zone());
- args->Add(factory()->NewBooleanLiteral(done, pos), zone());
-
- return factory()->NewCallRuntime(Runtime::kInlineCreateIterResultObject, args,
- pos);
-}
-
Expression* Parser::NewThrowError(Runtime::FunctionId id,
MessageTemplate::Template message,
const AstRawString* arg, int pos) {
@@ -406,8 +392,8 @@ Expression* Parser::NewTargetExpression(int pos) {
Expression* Parser::FunctionSentExpression(int pos) {
// We desugar function.sent into %_GeneratorGetInputOrDebugPos(generator).
ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(1, zone());
- VariableProxy* generator =
- factory()->NewVariableProxy(function_state_->generator_object_variable());
+ VariableProxy* generator = factory()->NewVariableProxy(
+ function_state_->scope()->generator_object_var());
args->Add(generator, zone());
return factory()->NewCallRuntime(Runtime::kInlineGeneratorGetInputOrDebugPos,
args, pos);
@@ -435,10 +421,6 @@ Literal* Parser::ExpressionFromLiteral(Token::Value token, int pos) {
return NULL;
}
-void Parser::MarkTailPosition(Expression* expression) {
- expression->MarkTail();
-}
-
Expression* Parser::NewV8Intrinsic(const AstRawString* name,
ZoneList<Expression*>* args, int pos,
bool* ok) {
@@ -497,17 +479,18 @@ Expression* Parser::NewV8Intrinsic(const AstRawString* name,
Parser::Parser(ParseInfo* info)
: ParserBase<Parser>(info->zone(), &scanner_, info->stack_limit(),
info->extension(), info->ast_value_factory(),
- info->runtime_call_stats(),
- info->preparsed_scope_data(), true),
+ info->runtime_call_stats(), true),
scanner_(info->unicode_cache()),
reusable_preparser_(nullptr),
mode_(PARSE_EAGERLY), // Lazy mode must be set explicitly.
+ source_range_map_(info->source_range_map()),
target_stack_(nullptr),
compile_options_(info->compile_options()),
cached_parse_data_(nullptr),
total_preparse_skipped_(0),
temp_zoned_(false),
log_(nullptr),
+ consumed_preparsed_scope_data_(info->consumed_preparsed_scope_data()),
parameters_end_pos_(info->parameters_end_pos()) {
// Even though we were passed ParseInfo, we should not store it in
// Parser - this makes sure that Isolate is not accidentally accessed via
@@ -534,12 +517,9 @@ Parser::Parser(ParseInfo* info)
allow_lazy_ = FLAG_lazy && info->allow_lazy_parsing() && !info->is_native() &&
info->extension() == nullptr && can_compile_lazily;
set_allow_natives(FLAG_allow_natives_syntax || info->is_native());
- set_allow_tailcalls(FLAG_harmony_tailcalls && !info->is_native() &&
- info->is_tail_call_elimination_enabled());
set_allow_harmony_do_expressions(FLAG_harmony_do_expressions);
set_allow_harmony_function_sent(FLAG_harmony_function_sent);
set_allow_harmony_restrictive_generators(FLAG_harmony_restrictive_generators);
- set_allow_harmony_trailing_commas(FLAG_harmony_trailing_commas);
set_allow_harmony_class_fields(FLAG_harmony_class_fields);
set_allow_harmony_object_rest_spread(FLAG_harmony_object_rest_spread);
set_allow_harmony_dynamic_import(FLAG_harmony_dynamic_import);
@@ -696,7 +676,7 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
if (parsing_module_) {
// Declare the special module parameter.
auto name = ast_value_factory()->empty_string();
- bool is_duplicate;
+ bool is_duplicate = false;
bool is_rest = false;
bool is_optional = false;
auto var =
@@ -706,6 +686,7 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
var->AllocateTo(VariableLocation::PARAMETER, 0);
PrepareGeneratorVariables();
+ scope->ForceContextAllocation();
Expression* initial_yield =
BuildInitialYield(kNoSourcePosition, kGeneratorFunction);
body->Add(
@@ -796,7 +777,7 @@ FunctionLiteral* Parser::ParseFunction(Isolate* isolate, ParseInfo* info) {
{
std::unique_ptr<Utf16CharacterStream> stream(ScannerStream::For(
source, shared_info->start_position(), shared_info->end_position()));
- Handle<String> name(String::cast(shared_info->name()));
+ Handle<String> name(shared_info->name());
scanner_.Initialize(stream.get(), info->is_module());
info->set_function_name(ast_value_factory()->GetString(name));
result = DoParseFunction(info);
@@ -914,7 +895,10 @@ FunctionLiteral* Parser::DoParseFunction(ParseInfo* info) {
} else {
// BindingIdentifier
ParseFormalParameter(&formals, &ok);
- if (ok) DeclareFormalParameters(formals.scope, formals.params);
+ if (ok) {
+ DeclareFormalParameters(formals.scope, formals.params,
+ formals.is_simple);
+ }
}
}
@@ -1159,9 +1143,10 @@ void Parser::ParseImportDeclaration(bool* ok) {
// 'import' ModuleSpecifier ';'
if (tok == Token::STRING) {
+ Scanner::Location specifier_loc = scanner()->peek_location();
const AstRawString* module_specifier = ParseModuleSpecifier(CHECK_OK_VOID);
ExpectSemicolon(CHECK_OK_VOID);
- module()->AddEmptyImport(module_specifier);
+ module()->AddEmptyImport(module_specifier, specifier_loc);
return;
}
@@ -1205,6 +1190,7 @@ void Parser::ParseImportDeclaration(bool* ok) {
}
ExpectContextualKeyword(Token::FROM, CHECK_OK_VOID);
+ Scanner::Location specifier_loc = scanner()->peek_location();
const AstRawString* module_specifier = ParseModuleSpecifier(CHECK_OK_VOID);
ExpectSemicolon(CHECK_OK_VOID);
@@ -1218,23 +1204,25 @@ void Parser::ParseImportDeclaration(bool* ok) {
if (module_namespace_binding != nullptr) {
module()->AddStarImport(module_namespace_binding, module_specifier,
- module_namespace_binding_loc, zone());
+ module_namespace_binding_loc, specifier_loc,
+ zone());
}
if (import_default_binding != nullptr) {
module()->AddImport(ast_value_factory()->default_string(),
import_default_binding, module_specifier,
- import_default_binding_loc, zone());
+ import_default_binding_loc, specifier_loc, zone());
}
if (named_imports != nullptr) {
if (named_imports->length() == 0) {
- module()->AddEmptyImport(module_specifier);
+ module()->AddEmptyImport(module_specifier, specifier_loc);
} else {
for (int i = 0; i < named_imports->length(); ++i) {
const NamedImport* import = named_imports->at(i);
module()->AddImport(import->import_name, import->local_name,
- module_specifier, import->location, zone());
+ module_specifier, import->location, specifier_loc,
+ zone());
}
}
}
@@ -1328,9 +1316,10 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
Consume(Token::MUL);
loc = scanner()->location();
ExpectContextualKeyword(Token::FROM, CHECK_OK);
+ Scanner::Location specifier_loc = scanner()->peek_location();
const AstRawString* module_specifier = ParseModuleSpecifier(CHECK_OK);
ExpectSemicolon(CHECK_OK);
- module()->AddStarExport(module_specifier, loc, zone());
+ module()->AddStarExport(module_specifier, loc, specifier_loc, zone());
return factory()->NewEmptyStatement(pos);
}
@@ -1353,7 +1342,9 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
ParseExportClause(&export_names, &export_locations, &original_names,
&reserved_loc, CHECK_OK);
const AstRawString* module_specifier = nullptr;
+ Scanner::Location specifier_loc;
if (CheckContextualKeyword(Token::FROM)) {
+ specifier_loc = scanner()->peek_location();
module_specifier = ParseModuleSpecifier(CHECK_OK);
} else if (reserved_loc.IsValid()) {
// No FromClause, so reserved words are invalid in ExportClause.
@@ -1371,11 +1362,12 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
export_locations[i], zone());
}
} else if (length == 0) {
- module()->AddEmptyImport(module_specifier);
+ module()->AddEmptyImport(module_specifier, specifier_loc);
} else {
for (int i = 0; i < length; ++i) {
module()->AddExport(original_names[i], export_names[i],
- module_specifier, export_locations[i], zone());
+ module_specifier, export_locations[i],
+ specifier_loc, zone());
}
}
return factory()->NewEmptyStatement(pos);
@@ -1739,7 +1731,9 @@ void Parser::ValidateCatchBlock(const CatchInfo& catch_info, bool* ok) {
}
Statement* Parser::RewriteTryStatement(Block* try_block, Block* catch_block,
+ const SourceRange& catch_range,
Block* finally_block,
+ const SourceRange& finally_range,
const CatchInfo& catch_info, int pos) {
// Simplify the AST nodes by converting:
// 'try B0 catch B1 finally B2'
@@ -1752,6 +1746,7 @@ Statement* Parser::RewriteTryStatement(Block* try_block, Block* catch_block,
TryCatchStatement* statement;
statement = factory()->NewTryCatchStatement(try_block, catch_info.scope,
catch_block, kNoSourcePosition);
+ RecordTryCatchStatementSourceRange(statement, catch_range);
try_block = factory()->NewBlock(nullptr, 1, false, kNoSourcePosition);
try_block->statements()->Add(statement, zone());
@@ -1759,29 +1754,33 @@ Statement* Parser::RewriteTryStatement(Block* try_block, Block* catch_block,
}
if (catch_block != nullptr) {
- // For a try-catch construct append return expressions from the catch block
- // to the list of return expressions.
- function_state_->tail_call_expressions().Append(
- catch_info.tail_call_expressions);
-
DCHECK_NULL(finally_block);
DCHECK_NOT_NULL(catch_info.scope);
- return factory()->NewTryCatchStatement(try_block, catch_info.scope,
- catch_block, pos);
+ TryCatchStatement* stmt = factory()->NewTryCatchStatement(
+ try_block, catch_info.scope, catch_block, pos);
+ RecordTryCatchStatementSourceRange(stmt, catch_range);
+ return stmt;
} else {
DCHECK_NOT_NULL(finally_block);
- return factory()->NewTryFinallyStatement(try_block, finally_block, pos);
+ TryFinallyStatement* stmt =
+ factory()->NewTryFinallyStatement(try_block, finally_block, pos);
+ RecordTryFinallyStatementSourceRange(stmt, finally_range);
+ return stmt;
}
}
void Parser::ParseAndRewriteGeneratorFunctionBody(int pos, FunctionKind kind,
ZoneList<Statement*>* body,
bool* ok) {
- // For ES6 Generators, we produce:
- //
- // try { InitialYield; ...body...; return {value: undefined, done: true} }
- // finally { %_GeneratorClose(generator) }
- //
+ // For ES6 Generators, we just prepend the initial yield.
+ Expression* initial_yield = BuildInitialYield(pos, kind);
+ body->Add(factory()->NewExpressionStatement(initial_yield, kNoSourcePosition),
+ zone());
+ ParseStatementList(body, Token::RBRACE, ok);
+}
+
+void Parser::ParseAndRewriteAsyncGeneratorFunctionBody(
+ int pos, FunctionKind kind, ZoneList<Statement*>* body, bool* ok) {
// For ES2017 Async Generators, we produce:
//
// try {
@@ -1802,6 +1801,7 @@ void Parser::ParseAndRewriteGeneratorFunctionBody(int pos, FunctionKind kind,
// - BytecodeGenerator performs special handling for ReturnStatements in
// async generator functions, resolving the appropriate Promise with an
// "done" iterator result object containing a Promise-unwrapped value.
+ DCHECK(IsAsyncGeneratorFunction(kind));
Block* try_block = factory()->NewBlock(nullptr, 3, false, kNoSourcePosition);
Expression* initial_yield = BuildInitialYield(pos, kind);
@@ -1811,49 +1811,45 @@ void Parser::ParseAndRewriteGeneratorFunctionBody(int pos, FunctionKind kind,
ParseStatementList(try_block->statements(), Token::RBRACE, ok);
if (!*ok) return;
- if (IsAsyncGeneratorFunction(kind)) {
- // Don't create iterator result for async generators, as the resume methods
- // will create it.
- Statement* final_return = BuildReturnStatement(
- factory()->NewUndefinedLiteral(kNoSourcePosition), kNoSourcePosition);
- try_block->statements()->Add(final_return, zone());
+ // Don't create iterator result for async generators, as the resume methods
+ // will create it.
+ Statement* final_return = BuildReturnStatement(
+ factory()->NewUndefinedLiteral(kNoSourcePosition), kNoSourcePosition);
+ try_block->statements()->Add(final_return, zone());
- // For AsyncGenerators, a top-level catch block will reject the Promise.
- Scope* catch_scope = NewHiddenCatchScopeWithParent(scope());
+ // For AsyncGenerators, a top-level catch block will reject the Promise.
+ Scope* catch_scope = NewHiddenCatchScopeWithParent(scope());
- ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(2, zone());
- args->Add(factory()->NewVariableProxy(
- function_state_->generator_object_variable()),
- zone());
- args->Add(factory()->NewVariableProxy(catch_scope->catch_variable()),
- zone());
+ ZoneList<Expression*>* reject_args =
+ new (zone()) ZoneList<Expression*>(2, zone());
+ reject_args->Add(factory()->NewVariableProxy(
+ function_state_->scope()->generator_object_var()),
+ zone());
+ reject_args->Add(factory()->NewVariableProxy(catch_scope->catch_variable()),
+ zone());
- Expression* call = factory()->NewCallRuntime(
- Runtime::kInlineAsyncGeneratorReject, args, kNoSourcePosition);
- Block* catch_block = IgnoreCompletion(
- factory()->NewReturnStatement(call, kNoSourcePosition));
+ Expression* reject_call = factory()->NewCallRuntime(
+ Runtime::kInlineAsyncGeneratorReject, reject_args, kNoSourcePosition);
+ Block* catch_block = IgnoreCompletion(
+ factory()->NewReturnStatement(reject_call, kNoSourcePosition));
- TryStatement* try_catch = factory()->NewTryCatchStatementForAsyncAwait(
- try_block, catch_scope, catch_block, kNoSourcePosition);
+ TryStatement* try_catch = factory()->NewTryCatchStatementForAsyncAwait(
+ try_block, catch_scope, catch_block, kNoSourcePosition);
- try_block = factory()->NewBlock(nullptr, 1, false, kNoSourcePosition);
- try_block->statements()->Add(try_catch, zone());
- } else {
- Statement* final_return = factory()->NewReturnStatement(
- BuildIteratorResult(nullptr, true), kNoSourcePosition);
- try_block->statements()->Add(final_return, zone());
- }
+ try_block = factory()->NewBlock(nullptr, 1, false, kNoSourcePosition);
+ try_block->statements()->Add(try_catch, zone());
Block* finally_block =
factory()->NewBlock(nullptr, 1, false, kNoSourcePosition);
- ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(1, zone());
- VariableProxy* call_proxy =
- factory()->NewVariableProxy(function_state_->generator_object_variable());
- args->Add(call_proxy, zone());
- Expression* call = factory()->NewCallRuntime(Runtime::kInlineGeneratorClose,
- args, kNoSourcePosition);
+ ZoneList<Expression*>* close_args =
+ new (zone()) ZoneList<Expression*>(1, zone());
+ VariableProxy* call_proxy = factory()->NewVariableProxy(
+ function_state_->scope()->generator_object_var());
+ close_args->Add(call_proxy, zone());
+ Expression* close_call = factory()->NewCallRuntime(
+ Runtime::kInlineGeneratorClose, close_args, kNoSourcePosition);
finally_block->statements()->Add(
- factory()->NewExpressionStatement(call, kNoSourcePosition), zone());
+ factory()->NewExpressionStatement(close_call, kNoSourcePosition), zone());
body->Add(factory()->NewTryFinallyStatement(try_block, finally_block,
kNoSourcePosition),
@@ -1899,9 +1895,9 @@ Expression* Parser::BuildIteratorNextResult(Expression* iterator,
ZoneList<Expression*>* next_arguments =
new (zone()) ZoneList<Expression*>(0, zone());
Expression* next_call =
- factory()->NewCall(next_property, next_arguments, pos);
+ factory()->NewCall(next_property, next_arguments, kNoSourcePosition);
if (type == IteratorType::kAsync) {
- next_call = RewriteAwaitExpression(next_call, pos);
+ next_call = factory()->NewAwait(next_call, pos);
}
Expression* result_proxy = factory()->NewVariableProxy(result);
Expression* left =
@@ -1931,13 +1927,12 @@ Expression* Parser::BuildIteratorNextResult(Expression* iterator,
Statement* Parser::InitializeForEachStatement(ForEachStatement* stmt,
Expression* each,
Expression* subject,
- Statement* body,
- int each_keyword_pos) {
+ Statement* body) {
ForOfStatement* for_of = stmt->AsForOfStatement();
if (for_of != NULL) {
const bool finalize = true;
return InitializeForOfStatement(for_of, each, subject, body, finalize,
- IteratorType::kNormal, each_keyword_pos);
+ IteratorType::kNormal, each->position());
} else {
if (each->IsArrayLiteral() || each->IsObjectLiteral()) {
Variable* temp = NewTemporary(ast_value_factory()->empty_string());
@@ -2004,10 +1999,9 @@ Block* Parser::RewriteForVarInLegacy(const ForInfo& for_info) {
// into
//
// {
-// <let x' be a temporary variable>
-// for (x' in/of e) {
-// let/const/var x;
-// x = x';
+// var temp;
+// for (temp in/of e) {
+// let/const/var x = temp;
// b;
// }
// let x; // for TDZ
@@ -2026,6 +2020,8 @@ void Parser::DesugarBindingInForEachStatement(ForInfo* for_info,
auto descriptor = for_info->parsing_result.descriptor;
descriptor.declaration_pos = kNoSourcePosition;
descriptor.initialization_pos = kNoSourcePosition;
+ descriptor.scope = scope();
+ descriptor.declaration_kind = DeclarationDescriptor::LEXICAL_FOR_EACH;
decl.initializer = factory()->NewVariableProxy(temp);
bool is_for_var_of =
@@ -2215,7 +2211,8 @@ Statement* Parser::InitializeForOfStatement(
Statement* Parser::DesugarLexicalBindingsInForStatement(
ForStatement* loop, Statement* init, Expression* cond, Statement* next,
- Statement* body, Scope* inner_scope, const ForInfo& for_info, bool* ok) {
+ Statement* body, const SourceRange& body_range, Scope* inner_scope,
+ const ForInfo& for_info, bool* ok) {
// ES6 13.7.4.8 specifies that on each loop iteration the let variables are
// copied into a new environment. Moreover, the "next" statement must be
// evaluated not in the environment of the just completed iteration but in
@@ -2446,6 +2443,8 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
}
outer_loop->Initialize(NULL, NULL, NULL, inner_block);
+ RecordIterationStatementSourceRange(outer_loop, body_range);
+
return outer_block;
}
@@ -2518,25 +2517,19 @@ void Parser::DeclareArrowFunctionFormalParameters(
return;
}
- ExpressionClassifier classifier(this);
- if (!parameters->is_simple) {
- this->classifier()->RecordNonSimpleParameter();
- }
- DeclareFormalParameters(parameters->scope, parameters->params);
- if (!this->classifier()
- ->is_valid_formal_parameter_list_without_duplicates()) {
- *duplicate_loc =
- this->classifier()->duplicate_formal_parameter_error().location;
+ bool has_duplicate = false;
+ DeclareFormalParameters(parameters->scope, parameters->params,
+ parameters->is_simple, &has_duplicate);
+ if (has_duplicate) {
+ *duplicate_loc = scanner()->location();
}
DCHECK_EQ(parameters->is_simple, parameters->scope->has_simple_parameters());
}
void Parser::PrepareGeneratorVariables() {
- // For generators, allocating variables in contexts is currently a win because
- // it minimizes the work needed to suspend and resume an activation. The
- // code produced for generators relies on this forced context allocation (it
- // does not restore the frame's parameters upon resume).
- function_state_->scope()->ForceContextAllocation();
+ // The code produced for generators relies on forced context allocation of
+ // parameters (it does not restore the frame's parameters upon resume).
+ function_state_->scope()->ForceContextAllocationForParameters();
// Calling a generator returns a generator object. That object is stored
// in a temporary variable, a definition that is used by "yield"
@@ -2567,7 +2560,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// handle to decide whether to invoke function name inference.
bool should_infer_name = function_name == NULL;
- // We want a non-null handle as the function name.
+ // We want a non-null handle as the function name by default. We will handle
+ // the "function does not have a shared name" case later.
if (should_infer_name) {
function_name = ast_value_factory()->empty_string();
}
@@ -2623,7 +2617,9 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
const bool is_lazy_top_level_function = is_lazy && is_top_level;
const bool is_lazy_inner_function = is_lazy && !is_top_level;
const bool is_eager_top_level_function = !is_lazy && is_top_level;
- const bool is_declaration = function_type == FunctionLiteral::kDeclaration;
+ const bool is_expression =
+ function_type == FunctionLiteral::kAnonymousExpression ||
+ function_type == FunctionLiteral::kNamedExpression;
RuntimeCallTimerScope runtime_timer(
runtime_call_stats_,
@@ -2650,7 +2646,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
const bool should_preparse_inner =
parse_lazily() && FLAG_lazy_inner_functions && is_lazy_inner_function &&
- (is_declaration || FLAG_aggressive_lazy_inner_functions);
+ (!is_expression || FLAG_aggressive_lazy_inner_functions);
bool should_use_parse_task =
FLAG_use_parse_tasks && parse_lazily() && compiler_dispatcher_ &&
@@ -2668,6 +2664,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
int function_length = -1;
bool has_duplicate_parameters = false;
int function_literal_id = GetNextFunctionLiteralId();
+ ProducedPreParsedScopeData* produced_preparsed_scope_data = nullptr;
Expect(Token::LPAREN, CHECK_OK);
@@ -2680,7 +2677,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// Only sets fields in compiler_hints that are currently used.
int compiler_hints = SharedFunctionInfo::FunctionKindBits::encode(kind);
if (function_type == FunctionLiteral::kDeclaration) {
- compiler_hints |= 1 << SharedFunctionInfo::kIsDeclaration;
+ compiler_hints |= SharedFunctionInfo::IsDeclarationBit::encode(true);
}
should_use_parse_task = compiler_dispatcher_->Enqueue(
source_, start_pos, source_->length(), language_mode,
@@ -2733,9 +2730,10 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
should_use_parse_task);
Scanner::BookmarkScope bookmark(scanner());
bookmark.Set();
- LazyParsingResult result =
- SkipFunction(kind, scope, &num_parameters, is_lazy_inner_function,
- is_lazy_top_level_function, CHECK_OK);
+ LazyParsingResult result = SkipFunction(
+ function_name, kind, function_type, scope, &num_parameters,
+ &produced_preparsed_scope_data, is_lazy_inner_function,
+ is_lazy_top_level_function, CHECK_OK);
if (result == kLazyParsingAborted) {
DCHECK(is_lazy_top_level_function);
@@ -2754,8 +2752,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
}
if (should_preparse) {
- scope->AnalyzePartially(&previous_zone_ast_node_factory,
- preparsed_scope_data_);
+ scope->AnalyzePartially(&previous_zone_ast_node_factory);
} else {
body = ParseFunction(function_name, pos, kind, function_type, scope,
&num_parameters, &function_length,
@@ -2810,7 +2807,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
FunctionLiteral* function_literal = factory()->NewFunctionLiteral(
function_name, scope, body, expected_property_count, num_parameters,
function_length, duplicate_parameters, function_type, eager_compile_hint,
- pos, true, function_literal_id);
+ pos, true, function_literal_id, produced_preparsed_scope_data);
if (should_use_parse_task) {
literals_to_stitch_.emplace_back(function_literal);
}
@@ -2825,11 +2822,12 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
return function_literal;
}
-Parser::LazyParsingResult Parser::SkipFunction(FunctionKind kind,
- DeclarationScope* function_scope,
- int* num_parameters,
- bool is_inner_function,
- bool may_abort, bool* ok) {
+Parser::LazyParsingResult Parser::SkipFunction(
+ const AstRawString* function_name, FunctionKind kind,
+ FunctionLiteral::FunctionType function_type,
+ DeclarationScope* function_scope, int* num_parameters,
+ ProducedPreParsedScopeData** produced_preparsed_scope_data,
+ bool is_inner_function, bool may_abort, bool* ok) {
FunctionState function_state(&function_state_, &scope_, function_scope);
DCHECK_NE(kNoSourcePosition, function_scope->start_position());
@@ -2866,26 +2864,30 @@ Parser::LazyParsingResult Parser::SkipFunction(FunctionKind kind,
}
// FIXME(marja): There are 3 ways to skip functions now. Unify them.
- if (preparsed_scope_data_->Consuming()) {
+ DCHECK_NOT_NULL(consumed_preparsed_scope_data_);
+ if (consumed_preparsed_scope_data_->HasData()) {
DCHECK(FLAG_experimental_preparser_scope_analysis);
- const PreParseData::FunctionData& data =
- preparsed_scope_data_->FindSkippableFunction(
- function_scope->start_position());
- if (data.is_valid()) {
- function_scope->set_is_skipped_function(true);
- function_scope->outer_scope()->SetMustUsePreParsedScopeData();
-
- function_scope->set_end_position(data.end);
- scanner()->SeekForward(data.end - 1);
- Expect(Token::RBRACE, CHECK_OK_VALUE(kLazyParsingComplete));
- *num_parameters = data.num_parameters;
- SetLanguageMode(function_scope, data.language_mode);
- if (data.uses_super_property) {
- function_scope->RecordSuperPropertyUsage();
- }
- SkipFunctionLiterals(data.num_inner_functions);
- return kLazyParsingComplete;
+ int end_position;
+ LanguageMode language_mode;
+ int num_inner_functions;
+ bool uses_super_property;
+ *produced_preparsed_scope_data =
+ consumed_preparsed_scope_data_->GetDataForSkippableFunction(
+ main_zone(), function_scope->start_position(), &end_position,
+ num_parameters, &num_inner_functions, &uses_super_property,
+ &language_mode);
+
+ function_scope->outer_scope()->SetMustUsePreParsedScopeData();
+ function_scope->set_is_skipped_function(true);
+ function_scope->set_end_position(end_position);
+ scanner()->SeekForward(end_position - 1);
+ Expect(Token::RBRACE, CHECK_OK_VALUE(kLazyParsingComplete));
+ SetLanguageMode(function_scope, language_mode);
+ if (uses_super_property) {
+ function_scope->RecordSuperPropertyUsage();
}
+ SkipFunctionLiterals(num_inner_functions);
+ return kLazyParsingComplete;
}
// With no cached data, we partially parse the function, without building an
@@ -2897,8 +2899,8 @@ Parser::LazyParsingResult Parser::SkipFunction(FunctionKind kind,
DCHECK(!is_inner_function || !may_abort);
PreParser::PreParseResult result = reusable_preparser()->PreParseFunction(
- kind, function_scope, parsing_module_, is_inner_function, may_abort,
- use_counts_);
+ function_name, kind, function_type, function_scope, parsing_module_,
+ is_inner_function, may_abort, use_counts_, produced_preparsed_scope_data);
// Return immediately if pre-parser decided to abort parsing.
if (result == PreParser::kPreParseAbort) return kLazyParsingAborted;
@@ -2929,10 +2931,22 @@ Parser::LazyParsingResult Parser::SkipFunction(FunctionKind kind,
return kLazyParsingComplete;
}
-
-Statement* Parser::BuildAssertIsCoercible(Variable* var) {
+Statement* Parser::BuildAssertIsCoercible(Variable* var,
+ ObjectLiteral* pattern) {
// if (var === null || var === undefined)
// throw /* type error kNonCoercible) */;
+ auto source_position = pattern->position();
+ const AstRawString* property = ast_value_factory()->empty_string();
+ MessageTemplate::Template msg = MessageTemplate::kNonCoercible;
+ for (ObjectLiteralProperty* literal_property : *pattern->properties()) {
+ Expression* key = literal_property->key();
+ if (key->IsPropertyName()) {
+ property = key->AsLiteral()->AsRawPropertyName();
+ msg = MessageTemplate::kNonCoercibleWithProperty;
+ source_position = key->position();
+ break;
+ }
+ }
Expression* condition = factory()->NewBinaryOperation(
Token::OR,
@@ -2944,8 +2958,7 @@ Statement* Parser::BuildAssertIsCoercible(Variable* var) {
factory()->NewNullLiteral(kNoSourcePosition), kNoSourcePosition),
kNoSourcePosition);
Expression* throw_type_error =
- NewThrowTypeError(MessageTemplate::kNonCoercible,
- ast_value_factory()->empty_string(), kNoSourcePosition);
+ NewThrowTypeError(msg, property, source_position);
IfStatement* if_statement = factory()->NewIfStatement(
condition,
factory()->NewExpressionStatement(throw_type_error, kNoSourcePosition),
@@ -2953,7 +2966,6 @@ Statement* Parser::BuildAssertIsCoercible(Variable* var) {
return if_statement;
}
-
class InitializerRewriter final
: public AstTraversalVisitor<InitializerRewriter> {
public:
@@ -3040,8 +3052,7 @@ Block* Parser::BuildParameterInitializationBlock(
// rewrite inner initializers of the pattern to param_scope
descriptor.scope = param_scope;
// Rewrite the outer initializer to point to param_scope
- ReparentParameterExpressionScope(stack_limit(), initial_value,
- param_scope);
+ ReparentExpressionScope(stack_limit(), initial_value, param_scope);
}
BlockState block_state(&scope_, param_scope);
@@ -3161,36 +3172,22 @@ Variable* Parser::PromiseVariable() {
// Based on the various compilation paths, there are many different code
// paths which may be the first to access the Promise temporary. Whichever
// comes first should create it and stash it in the FunctionState.
- Variable* promise = function_state_->promise_variable();
- if (function_state_->promise_variable() == nullptr) {
+ Variable* promise = function_state_->scope()->promise_var();
+ if (promise == nullptr) {
promise = function_state_->scope()->DeclarePromiseVar(
ast_value_factory()->empty_string());
}
return promise;
}
-Variable* Parser::AsyncGeneratorAwaitVariable() {
- Variable* result = function_state_->scope()->async_generator_await_var();
- if (result == nullptr) {
- result = function_state_->scope()->DeclareAsyncGeneratorAwaitVar(
- ast_value_factory()->empty_string());
- }
- return result;
-}
-
Expression* Parser::BuildInitialYield(int pos, FunctionKind kind) {
- // We access the generator object twice: once for the {generator}
- // member of the Suspend AST node, and once for the result of
- // the initial yield.
- Expression* yield_result =
- factory()->NewVariableProxy(function_state_->generator_object_variable());
- Expression* generator_object =
- factory()->NewVariableProxy(function_state_->generator_object_variable());
+ Expression* yield_result = factory()->NewVariableProxy(
+ function_state_->scope()->generator_object_var());
// The position of the yield is important for reporting the exception
// caused by calling the .throw method on a generator suspended at the
// initial yield (i.e. right after generator instantiation).
- return BuildSuspend(generator_object, yield_result, scope()->start_position(),
- Suspend::kOnExceptionThrow, SuspendFlags::kYield);
+ return factory()->NewYield(yield_result, scope()->start_position(),
+ Suspend::kOnExceptionThrow);
}
ZoneList<Statement*>* Parser::ParseFunction(
@@ -3296,7 +3293,7 @@ void Parser::DeclareClassProperty(const AstRawString* class_name,
DCHECK_NOT_NULL(class_info->constructor);
class_info->constructor->set_raw_name(
class_name != nullptr ? ast_value_factory()->NewConsString(class_name)
- : ast_value_factory()->empty_cons_string());
+ : nullptr);
return;
}
@@ -3532,7 +3529,7 @@ void Parser::ParseOnBackground(ParseInfo* info) {
info->set_literal(result);
// We cannot internalize on a background thread; a foreground task will take
- // care of calling Parser::Internalize just before compilation.
+ // care of calling AstValueFactory::Internalize just before compilation.
if (produce_cached_parse_data()) {
if (result != NULL) *info->cached_data() = logger.GetScriptData();
@@ -3740,10 +3737,8 @@ ZoneList<Expression*>* Parser::PrepareSpreadArguments(
Expression* Parser::SpreadCall(Expression* function,
ZoneList<Expression*>* args, int pos,
Call::PossiblyEval is_possibly_eval) {
- // Handle these cases in BytecodeGenerator.
- // [Call,New]WithSpread bytecodes aren't used with tailcalls - see
- // https://crbug.com/v8/5867
- if (!allow_tailcalls() && OnlyLastArgIsSpread(args)) {
+ // Handle this case in BytecodeGenerator.
+ if (OnlyLastArgIsSpread(args)) {
return factory()->NewCall(function, args, pos);
}
@@ -3824,14 +3819,6 @@ void Parser::SetAsmModule() {
scope()->AsDeclarationScope()->set_asm_module();
}
-void Parser::MarkCollectedTailCallExpressions() {
- const ZoneList<Expression*>& tail_call_expressions =
- function_state_->tail_call_expressions().expressions();
- for (int i = 0; i < tail_call_expressions.length(); ++i) {
- MarkTailPosition(tail_call_expressions[i]);
- }
-}
-
Expression* Parser::ExpressionListToExpression(ZoneList<Expression*>* args) {
Expression* expr = args->at(0);
for (int i = 1; i < args->length(); ++i) {
@@ -3847,7 +3834,7 @@ void Parser::PrepareAsyncFunctionBody(ZoneList<Statement*>* body,
FunctionKind kind, int pos) {
// When parsing an async arrow function, we get here without having called
// PrepareGeneratorVariables yet, so do it now.
- if (function_state_->generator_object_variable() == nullptr) {
+ if (function_state_->scope()->generator_object_var() == nullptr) {
PrepareGeneratorVariables();
}
}
@@ -3871,102 +3858,6 @@ void Parser::RewriteAsyncFunctionBody(ZoneList<Statement*>* body, Block* block,
body->Add(block, zone());
}
-Expression* Parser::RewriteAwaitExpression(Expression* value, int await_pos) {
- // In an Async Function:
- // yield do {
- // tmp = <operand>;
- // %AsyncFunctionAwait(.generator_object, tmp, .promise);
- // .promise
- // }
- //
- // In an Async Generator:
- // yield do {
- // tmp = <operand>;
- // %AsyncGeneratorAwait(.generator_object, tmp)
- // .await_result_var
- // }
- //
- // The value of the expression is returned to the caller of the async
- // function for the first yield statement; for this, .promise is the
- // appropriate return value, being a Promise that will be fulfilled or
- // rejected with the appropriate value by the desugaring. Subsequent yield
- // occurrences will return to the AsyncFunctionNext call within the
- // implemementation of the intermediate throwaway Promise's then handler.
- // This handler has nothing useful to do with the value, as the Promise is
- // ignored. If we yielded the value of the throwawayPromise that
- // AsyncFunctionAwait creates as an intermediate, it would create a memory
- // leak; we must return .promise instead;
- // The operand needs to be evaluated on a separate statement in order to get
- // a break location, and the .promise needs to be read earlier so that it
- // doesn't insert a false location.
- // TODO(littledan): investigate why this ordering is needed in more detail.
- //
- // In the case of Async Generators, `.await_result_var` is not actually used
- // for anything, but exists because of the current requirement that
- // Do Expressions have a result variable.
- Variable* generator_object_variable =
- function_state_->generator_object_variable();
- DCHECK_NOT_NULL(generator_object_variable);
-
- const int nopos = kNoSourcePosition;
-
- Block* do_block = factory()->NewBlock(nullptr, 2, false, nopos);
-
- // Wrap value evaluation to provide a break location.
- Variable* temp_var = NewTemporary(ast_value_factory()->empty_string());
- Expression* value_assignment = factory()->NewAssignment(
- Token::ASSIGN, factory()->NewVariableProxy(temp_var), value, nopos);
- do_block->statements()->Add(
- factory()->NewExpressionStatement(value_assignment, value->position()),
- zone());
-
- Expression* generator_object =
- factory()->NewVariableProxy(generator_object_variable);
-
- if (is_async_generator()) {
- // AsyncGeneratorAwaitCaught will be rewritten to
- // AsyncGeneratorAwaitUncaught by AstNumberingVisitor if there is no local
- // enclosing try/catch block (not counting the one implicitly added in
- // ParseAndRewriteGeneratorFunctionBody)
- ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(2, zone());
- args->Add(generator_object, zone());
- args->Add(factory()->NewVariableProxy(temp_var), zone());
-
- Expression* await = factory()->NewCallRuntime(
- Context::ASYNC_GENERATOR_AWAIT_CAUGHT, args, nopos);
- do_block->statements()->Add(
- factory()->NewExpressionStatement(await, await_pos), zone());
-
- // Wrap await to provide a break location between value evaluation and
- // yield.
- Expression* do_expr = factory()->NewDoExpression(
- do_block, AsyncGeneratorAwaitVariable(), nopos);
- return BuildSuspend(generator_object, do_expr, nopos,
- Suspend::kOnExceptionRethrow, SuspendFlags::kAwait);
- }
-
- // The parser emits calls to AsyncFunctionAwaitCaught or but the
- // AstNumberingVisitor will rewrite this to AsyncFunctionAwaitUncaught or if
- // there is no local enclosing try/catch block.
- ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(3, zone());
- args->Add(generator_object, zone());
- args->Add(factory()->NewVariableProxy(temp_var), zone());
- args->Add(factory()->NewVariableProxy(PromiseVariable()), zone());
-
- Expression* await = factory()->NewCallRuntime(
- Context::ASYNC_FUNCTION_AWAIT_CAUGHT_INDEX, args, nopos);
- do_block->statements()->Add(
- factory()->NewExpressionStatement(await, await_pos), zone());
-
- // Wrap await to provide a break location between value evaluation and yield.
- Expression* do_expr =
- factory()->NewDoExpression(do_block, PromiseVariable(), nopos);
-
- return factory()->NewSuspend(generator_object, do_expr, nopos,
- Suspend::kOnExceptionRethrow,
- SuspendFlags::kAwait);
-}
-
class NonPatternRewriter : public AstExpressionRewriter {
public:
NonPatternRewriter(uintptr_t stack_limit, Parser* parser)
@@ -4090,7 +3981,6 @@ Expression* Parser::RewriteAssignExponentiation(Expression* left,
pos);
}
UNREACHABLE();
- return nullptr;
}
Expression* Parser::RewriteSpreads(ArrayLiteral* lit) {
@@ -4192,32 +4082,41 @@ void Parser::QueueNonPatternForRewriting(Expression* expr, bool* ok) {
function_state_->AddNonPatternForRewriting(expr, ok);
}
-void Parser::AddAccessorPrefixToFunctionName(bool is_get,
- FunctionLiteral* function,
- const AstRawString* name) {
- DCHECK_NOT_NULL(name);
- const AstRawString* prefix = is_get ? ast_value_factory()->get_space_string()
- : ast_value_factory()->set_space_string();
- function->set_raw_name(ast_value_factory()->NewConsString(prefix, name));
+void Parser::SetFunctionNameFromPropertyName(LiteralProperty* property,
+ const AstRawString* name,
+ const AstRawString* prefix) {
+ // Ensure that the function we are going to create has shared name iff
+ // we are not going to set it later.
+ if (property->NeedsSetFunctionName()) {
+ name = nullptr;
+ prefix = nullptr;
+ } else {
+ // If the property value is an anonymous function or an anonymous class or
+ // a concise method or an accessor function which doesn't require the name
+ // to be set then the shared name must be provided.
+ DCHECK_IMPLIES(property->value()->IsAnonymousFunctionDefinition() ||
+ property->value()->IsConciseMethodDefinition() ||
+ property->value()->IsAccessorFunctionDefinition(),
+ name != nullptr);
+ }
+
+ Expression* value = property->value();
+ SetFunctionName(value, name, prefix);
}
void Parser::SetFunctionNameFromPropertyName(ObjectLiteralProperty* property,
- const AstRawString* name) {
- DCHECK(property->kind() != ObjectLiteralProperty::GETTER);
- DCHECK(property->kind() != ObjectLiteralProperty::SETTER);
-
- // Computed name setting must happen at runtime.
- DCHECK(!property->is_computed_name());
-
+ const AstRawString* name,
+ const AstRawString* prefix) {
// Ignore "__proto__" as a name when it's being used to set the [[Prototype]]
// of an object literal.
+ // See ES #sec-__proto__-property-names-in-object-initializers.
if (property->IsPrototype()) return;
- Expression* value = property->value();
-
- DCHECK(!value->IsAnonymousFunctionDefinition() ||
+ DCHECK(!property->value()->IsAnonymousFunctionDefinition() ||
property->kind() == ObjectLiteralProperty::COMPUTED);
- SetFunctionName(value, name);
+
+ SetFunctionNameFromPropertyName(static_cast<LiteralProperty*>(property), name,
+ prefix);
}
void Parser::SetFunctionNameFromIdentifierRef(Expression* value,
@@ -4226,19 +4125,32 @@ void Parser::SetFunctionNameFromIdentifierRef(Expression* value,
SetFunctionName(value, identifier->AsVariableProxy()->raw_name());
}
-void Parser::SetFunctionName(Expression* value, const AstRawString* name) {
- DCHECK_NOT_NULL(name);
- if (!value->IsAnonymousFunctionDefinition()) return;
+void Parser::SetFunctionName(Expression* value, const AstRawString* name,
+ const AstRawString* prefix) {
+ if (!value->IsAnonymousFunctionDefinition() &&
+ !value->IsConciseMethodDefinition() &&
+ !value->IsAccessorFunctionDefinition()) {
+ return;
+ }
auto function = value->AsFunctionLiteral();
if (value->IsClassLiteral()) {
function = value->AsClassLiteral()->constructor();
}
if (function != nullptr) {
- function->set_raw_name(ast_value_factory()->NewConsString(name));
+ AstConsString* cons_name = nullptr;
+ if (name != nullptr) {
+ if (prefix != nullptr) {
+ cons_name = ast_value_factory()->NewConsString(prefix, name);
+ } else {
+ cons_name = ast_value_factory()->NewConsString(name);
+ }
+ } else {
+ DCHECK_NULL(prefix);
+ }
+ function->set_raw_name(cons_name);
}
}
-
// Desugaring of yield*
// ====================
//
@@ -4283,17 +4195,9 @@ void Parser::SetFunctionName(Expression* value, const AstRawString* name) {
//
// // From the generator to its user:
// // Forward output, receive new input, and determine resume mode.
-// mode = kReturn;
-// try {
-// try {
-// RawYield(output); // See explanation above.
-// mode = kNext;
-// } catch (error) {
-// mode = kThrow;
-// }
-// } finally {
-// input = function.sent;
-// continue;
+// RawYield(output); // See explanation above.
+// mode = %GeneratorGetResumeMode();
+// input = function.sent;
// }
// }
//
@@ -4318,12 +4222,15 @@ void Parser::SetFunctionName(Expression* value, const AstRawString* name) {
// output = %_Call(iteratorReturn, iterator, input);
// if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
-Expression* Parser::RewriteYieldStar(Expression* generator,
- Expression* iterable, int pos) {
+Expression* Parser::RewriteYieldStar(Expression* iterable, int pos) {
const int nopos = kNoSourcePosition;
IteratorType type =
is_async_generator() ? IteratorType::kAsync : IteratorType::kNormal;
+ if (type == IteratorType::kNormal) {
+ return factory()->NewYieldStar(iterable, pos);
+ }
+
// Forward definition for break/continue statements.
WhileStatement* loop = factory()->NewWhileStatement(nullptr, nopos);
@@ -4385,7 +4292,7 @@ Expression* Parser::RewriteYieldStar(Expression* generator,
args->Add(input_proxy, zone());
Expression* call = factory()->NewCall(next_property, args, nopos);
if (type == IteratorType::kAsync) {
- call = RewriteAwaitExpression(call, nopos);
+ call = factory()->NewAwait(call, nopos);
}
Expression* output_proxy = factory()->NewVariableProxy(var_output);
Expression* assignment =
@@ -4466,7 +4373,7 @@ Expression* Parser::RewriteYieldStar(Expression* generator,
Expression* call =
factory()->NewCallRuntime(Runtime::kInlineCall, args, nopos);
if (type == IteratorType::kAsync) {
- call = RewriteAwaitExpression(call, nopos);
+ call = factory()->NewAwait(call, nopos);
}
Expression* assignment = factory()->NewAssignment(
Token::ASSIGN, factory()->NewVariableProxy(var_output), call, nopos);
@@ -4510,48 +4417,30 @@ Expression* Parser::RewriteYieldStar(Expression* generator,
property, break_loop, factory()->NewEmptyStatement(nopos), nopos);
}
-
- // mode = kReturn;
- Statement* set_mode_return;
- {
- Expression* mode_proxy = factory()->NewVariableProxy(var_mode);
- Expression* kreturn =
- factory()->NewSmiLiteral(JSGeneratorObject::kReturn, nopos);
- Expression* assignment =
- factory()->NewAssignment(Token::ASSIGN, mode_proxy, kreturn, nopos);
- set_mode_return = factory()->NewExpressionStatement(assignment, nopos);
- }
-
// Yield(output);
Statement* yield_output;
{
Expression* output_proxy = factory()->NewVariableProxy(var_output);
Suspend* yield =
- BuildSuspend(generator, output_proxy, nopos, Suspend::kOnExceptionThrow,
- SuspendFlags::kYieldStar);
+ factory()->NewYield(output_proxy, nopos, Suspend::kNoControl);
yield_output = factory()->NewExpressionStatement(yield, nopos);
}
- // mode = kNext;
- Statement* set_mode_next;
+ // mode = %GeneratorGetResumeMode();
+ Statement* get_mode;
{
Expression* mode_proxy = factory()->NewVariableProxy(var_mode);
- Expression* knext =
- factory()->NewSmiLiteral(JSGeneratorObject::kNext, nopos);
- Expression* assignment =
- factory()->NewAssignment(Token::ASSIGN, mode_proxy, knext, nopos);
- set_mode_next = factory()->NewExpressionStatement(assignment, nopos);
- }
- // mode = kThrow;
- Statement* set_mode_throw;
- {
- Expression* mode_proxy = factory()->NewVariableProxy(var_mode);
- Expression* kthrow =
- factory()->NewSmiLiteral(JSGeneratorObject::kThrow, nopos);
+ ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(1, zone());
+ VariableProxy* generator = factory()->NewVariableProxy(
+ function_state_->scope()->generator_object_var());
+ args->Add(generator, zone());
+ Expression* mode = factory()->NewCallRuntime(
+ Runtime::kInlineGeneratorGetResumeMode, args, pos);
+
Expression* assignment =
- factory()->NewAssignment(Token::ASSIGN, mode_proxy, kthrow, nopos);
- set_mode_throw = factory()->NewExpressionStatement(assignment, nopos);
+ factory()->NewAssignment(Token::ASSIGN, mode_proxy, mode, nopos);
+ get_mode = factory()->NewExpressionStatement(assignment, nopos);
}
// input = function.sent;
@@ -4597,36 +4486,6 @@ Expression* Parser::RewriteYieldStar(Expression* generator,
// Now put things together.
- // try { ... } catch(e) { ... }
- Statement* try_catch;
- {
- Block* try_block = factory()->NewBlock(nullptr, 2, false, nopos);
- try_block->statements()->Add(yield_output, zone());
- try_block->statements()->Add(set_mode_next, zone());
-
- Block* catch_block = factory()->NewBlock(nullptr, 1, false, nopos);
- catch_block->statements()->Add(set_mode_throw, zone());
-
- Scope* catch_scope = NewHiddenCatchScopeWithParent(scope());
-
- try_catch = factory()->NewTryCatchStatementForDesugaring(
- try_block, catch_scope, catch_block, nopos);
- }
-
- // try { ... } finally { ... }
- Statement* try_finally;
- {
- Block* try_block = factory()->NewBlock(nullptr, 1, false, nopos);
- try_block->statements()->Add(try_catch, zone());
-
- Block* finally = factory()->NewBlock(nullptr, 2, false, nopos);
- finally->statements()->Add(get_input, zone());
- finally->statements()->Add(factory()->NewContinueStatement(loop, nopos),
- zone());
-
- try_finally = factory()->NewTryFinallyStatement(try_block, finally, nopos);
- }
-
// switch (mode) { ... }
SwitchStatement* switch_mode = factory()->NewSwitchStatement(nullptr, nopos);
{
@@ -4666,7 +4525,6 @@ Expression* Parser::RewriteYieldStar(Expression* generator,
Block* loop_body = factory()->NewBlock(nullptr, 5, false, nopos);
loop_body->statements()->Add(switch_mode, zone());
loop_body->statements()->Add(if_done, zone());
- loop_body->statements()->Add(set_mode_return, zone());
if (is_async_generator()) {
// AsyncGeneratorYield does not yield the original iterator result,
@@ -4683,7 +4541,9 @@ Expression* Parser::RewriteYieldStar(Expression* generator,
factory()->NewExpressionStatement(assign, nopos), zone());
}
- loop_body->statements()->Add(try_finally, zone());
+ loop_body->statements()->Add(yield_output, zone());
+ loop_body->statements()->Add(get_input, zone());
+ loop_body->statements()->Add(get_mode, zone());
loop->Initialize(factory()->NewBooleanLiteral(true, nopos), loop_body);
}
@@ -4794,7 +4654,7 @@ void Parser::BuildIteratorClose(ZoneList<Statement*>* statements,
Expression* call =
factory()->NewCallRuntime(Runtime::kInlineCall, args, nopos);
if (type == IteratorType::kAsync) {
- call = RewriteAwaitExpression(call, nopos);
+ call = factory()->NewAwait(call, nopos);
}
Expression* output_proxy = factory()->NewVariableProxy(var_output);
Expression* assignment =
@@ -5015,7 +4875,7 @@ void Parser::BuildIteratorCloseForCompletion(Scope* scope,
factory()->NewCallRuntime(Runtime::kInlineCall, args, nopos);
if (type == IteratorType::kAsync) {
- call = RewriteAwaitExpression(call, nopos);
+ call = factory()->NewAwait(call, nopos);
}
Block* try_block = factory()->NewBlock(nullptr, 1, false, nopos);
@@ -5043,7 +4903,7 @@ void Parser::BuildIteratorCloseForCompletion(Scope* scope,
Expression* call =
factory()->NewCallRuntime(Runtime::kInlineCall, args, nopos);
if (type == IteratorType::kAsync) {
- call = RewriteAwaitExpression(call, nopos);
+ call = factory()->NewAwait(call, nopos);
}
Expression* output_proxy = factory()->NewVariableProxy(var_output);
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index c51c0eff01..5ed5155c8a 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -5,6 +5,7 @@
#ifndef V8_PARSING_PARSER_H_
#define V8_PARSING_PARSER_H_
+#include "src/ast/ast-source-ranges.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/base/compiler-specific.h"
@@ -23,6 +24,7 @@ class ScriptCompiler;
namespace internal {
+class ConsumedPreParsedScopeData;
class ParseInfo;
class ScriptData;
class ParserTarget;
@@ -156,8 +158,6 @@ struct ParserTypes<Parser> {
typedef ParserBase<Parser> Base;
typedef Parser Impl;
- typedef v8::internal::Variable Variable;
-
// Return types for traversing functions.
typedef const AstRawString* Identifier;
typedef v8::internal::Expression* Expression;
@@ -290,13 +290,11 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
reusable_preparser_ =
new PreParser(zone(), &scanner_, stack_limit_, ast_value_factory(),
&pending_error_handler_, runtime_call_stats_,
- preparsed_scope_data_, parsing_on_main_thread_);
+ parsing_on_main_thread_);
#define SET_ALLOW(name) reusable_preparser_->set_allow_##name(allow_##name());
SET_ALLOW(natives);
- SET_ALLOW(tailcalls);
SET_ALLOW(harmony_do_expressions);
SET_ALLOW(harmony_function_sent);
- SET_ALLOW(harmony_trailing_commas);
SET_ALLOW(harmony_class_fields);
SET_ALLOW(harmony_object_rest_spread);
SET_ALLOW(harmony_dynamic_import);
@@ -346,12 +344,16 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
void RewriteCatchPattern(CatchInfo* catch_info, bool* ok);
void ValidateCatchBlock(const CatchInfo& catch_info, bool* ok);
Statement* RewriteTryStatement(Block* try_block, Block* catch_block,
+ const SourceRange& catch_range,
Block* finally_block,
+ const SourceRange& finally_range,
const CatchInfo& catch_info, int pos);
-
void ParseAndRewriteGeneratorFunctionBody(int pos, FunctionKind kind,
ZoneList<Statement*>* body,
bool* ok);
+ void ParseAndRewriteAsyncGeneratorFunctionBody(int pos, FunctionKind kind,
+ ZoneList<Statement*>* body,
+ bool* ok);
void CreateFunctionNameAssignment(const AstRawString* function_name, int pos,
FunctionLiteral::FunctionType function_type,
DeclarationScope* function_scope,
@@ -458,6 +460,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
PatternContext context_;
Expression* pattern_;
int initializer_position_;
+ int value_beg_position_;
Block* block_;
const DeclarationDescriptor* descriptor_;
ZoneList<const AstRawString*>* names_;
@@ -481,7 +484,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// Initialize the components of a for-in / for-of statement.
Statement* InitializeForEachStatement(ForEachStatement* stmt,
Expression* each, Expression* subject,
- Statement* body, int each_keyword_pos);
+ Statement* body);
Statement* InitializeForOfStatement(ForOfStatement* stmt, Expression* each,
Expression* iterable, Statement* body,
bool finalize, IteratorType type,
@@ -495,7 +498,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Statement* DesugarLexicalBindingsInForStatement(
ForStatement* loop, Statement* init, Expression* cond, Statement* next,
- Statement* body, Scope* inner_scope, const ForInfo& for_info, bool* ok);
+ Statement* body, const SourceRange& body_range, Scope* inner_scope,
+ const ForInfo& for_info, bool* ok);
Expression* RewriteDoExpression(Block* body, int pos, bool* ok);
@@ -543,7 +547,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
BreakableStatement* LookupBreakTarget(const AstRawString* label, bool* ok);
IterationStatement* LookupContinueTarget(const AstRawString* label, bool* ok);
- Statement* BuildAssertIsCoercible(Variable* var);
+ Statement* BuildAssertIsCoercible(Variable* var, ObjectLiteral* pattern);
// Factory methods.
FunctionLiteral* DefaultConstructor(const AstRawString* name, bool call_super,
@@ -553,10 +557,12 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// by parsing the function with PreParser. Consumes the ending }.
// If may_abort == true, the (pre-)parser may decide to abort skipping
// in order to force the function to be eagerly parsed, after all.
- LazyParsingResult SkipFunction(FunctionKind kind,
- DeclarationScope* function_scope,
- int* num_parameters, bool is_inner_function,
- bool may_abort, bool* ok);
+ LazyParsingResult SkipFunction(
+ const AstRawString* function_name, FunctionKind kind,
+ FunctionLiteral::FunctionType function_type,
+ DeclarationScope* function_scope, int* num_parameters,
+ ProducedPreParsedScopeData** produced_preparsed_scope_data,
+ bool is_inner_function, bool may_abort, bool* ok);
Block* BuildParameterInitializationBlock(
const ParserFormalParameters& parameters, bool* ok);
@@ -628,9 +634,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
void SetLanguageMode(Scope* scope, LanguageMode mode);
void SetAsmModule();
- V8_INLINE void MarkCollectedTailCallExpressions();
- V8_INLINE void MarkTailPosition(Expression* expression);
-
// Rewrite all DestructuringAssignments in the current FunctionState.
V8_INLINE void RewriteDestructuringAssignments();
@@ -680,20 +683,19 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
IteratorType type);
Statement* CheckCallable(Variable* var, Expression* error, int pos);
- V8_INLINE Expression* RewriteAwaitExpression(Expression* value, int pos);
V8_INLINE void PrepareAsyncFunctionBody(ZoneList<Statement*>* body,
FunctionKind kind, int pos);
V8_INLINE void RewriteAsyncFunctionBody(ZoneList<Statement*>* body,
Block* block,
Expression* return_value, bool* ok);
- Expression* RewriteYieldStar(Expression* generator, Expression* expression,
- int pos);
+ Expression* RewriteYieldStar(Expression* expression, int pos);
void AddArrowFunctionFormalParameters(ParserFormalParameters* parameters,
Expression* params, int end_pos,
bool* ok);
- void SetFunctionName(Expression* value, const AstRawString* name);
+ void SetFunctionName(Expression* value, const AstRawString* name,
+ const AstRawString* prefix = nullptr);
// Helper functions for recursive descent.
V8_INLINE bool IsEval(const AstRawString* identifier) const {
@@ -874,8 +876,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Expression* BuildUnaryExpression(Expression* expression, Token::Value op,
int pos);
- Expression* BuildIteratorResult(Expression* value, bool done);
-
// Generate AST node that throws a ReferenceError with the given type.
V8_INLINE Expression* NewThrowReferenceError(
MessageTemplate::Template message, int pos) {
@@ -1083,11 +1083,10 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
V8_INLINE void DeclareFormalParameters(
DeclarationScope* scope,
- const ThreadedList<ParserFormalParameters::Parameter>& parameters) {
- bool is_simple = classifier()->is_simple_parameter_list();
+ const ThreadedList<ParserFormalParameters::Parameter>& parameters,
+ bool is_simple, bool* has_duplicate = nullptr) {
if (!is_simple) scope->SetHasNonSimpleParameters();
for (auto parameter : parameters) {
- bool is_duplicate = false;
bool is_optional = parameter->initializer != nullptr;
// If the parameter list is simple, declare the parameters normally with
// their names. If the parameter list is not simple, declare a temporary
@@ -1096,12 +1095,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
scope->DeclareParameter(
is_simple ? parameter->name : ast_value_factory()->empty_string(),
is_simple ? VAR : TEMPORARY, is_optional, parameter->is_rest,
- &is_duplicate, ast_value_factory(), parameter->position);
- if (is_duplicate &&
- classifier()->is_valid_formal_parameter_list_without_duplicates()) {
- classifier()->RecordDuplicateFormalParameterError(
- scanner()->location());
- }
+ has_duplicate, ast_value_factory(), parameter->position);
}
}
@@ -1118,11 +1112,12 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Expression* ExpressionListToExpression(ZoneList<Expression*>* args);
- void AddAccessorPrefixToFunctionName(bool is_get, FunctionLiteral* function,
- const AstRawString* name);
-
+ void SetFunctionNameFromPropertyName(LiteralProperty* property,
+ const AstRawString* name,
+ const AstRawString* prefix = nullptr);
void SetFunctionNameFromPropertyName(ObjectLiteralProperty* property,
- const AstRawString* name);
+ const AstRawString* name,
+ const AstRawString* prefix = nullptr);
void SetFunctionNameFromIdentifierRef(Expression* value,
Expression* identifier);
@@ -1146,6 +1141,84 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
return parameters_end_pos_ != kNoSourcePosition;
}
+ V8_INLINE void RecordBlockSourceRange(Block* node,
+ int32_t continuation_position) {
+ if (source_range_map_ == nullptr) return;
+ source_range_map_->Insert(
+ node, new (zone()) BlockSourceRanges(continuation_position));
+ }
+
+ V8_INLINE void RecordCaseClauseSourceRange(CaseClause* node,
+ const SourceRange& body_range) {
+ if (source_range_map_ == nullptr) return;
+ source_range_map_->Insert(node,
+ new (zone()) CaseClauseSourceRanges(body_range));
+ }
+
+ V8_INLINE void RecordConditionalSourceRange(Expression* node,
+ const SourceRange& then_range,
+ const SourceRange& else_range) {
+ if (source_range_map_ == nullptr) return;
+ source_range_map_->Insert(
+ node->AsConditional(),
+ new (zone()) ConditionalSourceRanges(then_range, else_range));
+ }
+
+ V8_INLINE void RecordJumpStatementSourceRange(Statement* node,
+ int32_t continuation_position) {
+ if (source_range_map_ == nullptr) return;
+ source_range_map_->Insert(
+ static_cast<JumpStatement*>(node),
+ new (zone()) JumpStatementSourceRanges(continuation_position));
+ }
+
+ V8_INLINE void RecordIfStatementSourceRange(Statement* node,
+ const SourceRange& then_range,
+ const SourceRange& else_range) {
+ if (source_range_map_ == nullptr) return;
+ source_range_map_->Insert(
+ node->AsIfStatement(),
+ new (zone()) IfStatementSourceRanges(then_range, else_range));
+ }
+
+ V8_INLINE void RecordIterationStatementSourceRange(
+ IterationStatement* node, const SourceRange& body_range) {
+ if (source_range_map_ == nullptr) return;
+ source_range_map_->Insert(
+ node, new (zone()) IterationStatementSourceRanges(body_range));
+ }
+
+ V8_INLINE void RecordSwitchStatementSourceRange(
+ Statement* node, int32_t continuation_position) {
+ if (source_range_map_ == nullptr) return;
+ source_range_map_->Insert(
+ node->AsSwitchStatement(),
+ new (zone()) SwitchStatementSourceRanges(continuation_position));
+ }
+
+ V8_INLINE void RecordThrowSourceRange(Statement* node,
+ int32_t continuation_position) {
+ if (source_range_map_ == nullptr) return;
+ ExpressionStatement* expr_stmt = static_cast<ExpressionStatement*>(node);
+ Throw* throw_expr = expr_stmt->expression()->AsThrow();
+ source_range_map_->Insert(
+ throw_expr, new (zone()) ThrowSourceRanges(continuation_position));
+ }
+
+ V8_INLINE void RecordTryCatchStatementSourceRange(
+ TryCatchStatement* node, const SourceRange& body_range) {
+ if (source_range_map_ == nullptr) return;
+ source_range_map_->Insert(
+ node, new (zone()) TryCatchStatementSourceRanges(body_range));
+ }
+
+ V8_INLINE void RecordTryFinallyStatementSourceRange(
+ TryFinallyStatement* node, const SourceRange& body_range) {
+ if (source_range_map_ == nullptr) return;
+ source_range_map_->Insert(
+ node, new (zone()) TryFinallyStatementSourceRanges(body_range));
+ }
+
// Parser's private field members.
friend class DiscardableZoneScope; // Uses reusable_preparser_.
// FIXME(marja): Make reusable_preparser_ always use its own temp Zone (call
@@ -1160,6 +1233,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
CompilerDispatcher* compiler_dispatcher_ = nullptr;
ParseInfo* main_parse_info_ = nullptr;
+ SourceRangeMap* source_range_map_ = nullptr;
+
friend class ParserTarget;
friend class ParserTargetScope;
ParserTarget* target_stack_; // for break, continue statements
@@ -1176,6 +1251,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
bool allow_lazy_;
bool temp_zoned_;
ParserLogger* log_;
+ ConsumedPreParsedScopeData* consumed_preparsed_scope_data_;
// If not kNoSourcePosition, indicates that the first function literal
// encountered is a dynamic function, see CreateDynamicFunction(). This field
diff --git a/deps/v8/src/parsing/pattern-rewriter.cc b/deps/v8/src/parsing/pattern-rewriter.cc
index 1565156f18..0d95144f9d 100644
--- a/deps/v8/src/parsing/pattern-rewriter.cc
+++ b/deps/v8/src/parsing/pattern-rewriter.cc
@@ -26,6 +26,7 @@ void Parser::PatternRewriter::DeclareAndInitializeVariables(
rewriter.context_ = BINDING;
rewriter.pattern_ = declaration->pattern;
rewriter.initializer_position_ = declaration->initializer_position;
+ rewriter.value_beg_position_ = declaration->value_beg_position;
rewriter.block_ = block;
rewriter.descriptor_ = declaration_descriptor;
rewriter.names_ = names;
@@ -190,58 +191,24 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
// 'with' statement or 'catch' block). Global var declarations
// also need special treatment.
- if (descriptor_->mode == VAR && var_init_scope->is_script_scope()) {
- // Global variable declarations must be compiled in a specific
- // way. When the script containing the global variable declaration
- // is entered, the global variable must be declared, so that if it
- // doesn't exist (on the global object itself, see ES5 errata) it
- // gets created with an initial undefined value. This is handled
- // by the declarations part of the function representing the
- // top-level global code; see Runtime::DeclareGlobalVariable. If
- // it already exists (in the object or in a prototype), it is
- // *not* touched until the variable declaration statement is
- // executed.
- //
- // Executing the variable declaration statement will always
- // guarantee to give the global object an own property.
- // This way, global variable declarations can shadow
- // properties in the prototype chain, but only after the variable
- // declaration statement has been executed. This is important in
- // browsers where the global object (window) has lots of
- // properties defined in prototype objects.
-
- ZoneList<Expression*>* arguments =
- new (zone()) ZoneList<Expression*>(3, zone());
- arguments->Add(
- factory()->NewStringLiteral(name, descriptor_->declaration_pos),
- zone());
- arguments->Add(factory()->NewNumberLiteral(var_init_scope->language_mode(),
- kNoSourcePosition),
- zone());
- arguments->Add(value, zone());
-
- CallRuntime* initialize = factory()->NewCallRuntime(
- Runtime::kInitializeVarGlobal, arguments, value->position());
- block_->statements()->Add(
- factory()->NewExpressionStatement(initialize, initialize->position()),
- zone());
+ // For 'let' and 'const' declared variables the initialization always
+ // assigns to the declared variable.
+ // But for var declarations we need to do a new lookup.
+ if (descriptor_->mode == VAR) {
+ proxy = var_init_scope->NewUnresolved(factory(), name);
} else {
- // For 'let' and 'const' declared variables the initialization always
- // assigns to the declared variable.
- // But for var declarations we need to do a new lookup.
- if (descriptor_->mode == VAR) {
- proxy = var_init_scope->NewUnresolved(factory(), name);
- } else {
- DCHECK_NOT_NULL(proxy);
- DCHECK_NOT_NULL(proxy->var());
- }
- // Add break location for destructured sub-pattern.
- int pos = IsSubPattern() ? pattern->position() : value->position();
- Assignment* assignment =
- factory()->NewAssignment(Token::INIT, proxy, value, pos);
- block_->statements()->Add(
- factory()->NewExpressionStatement(assignment, pos), zone());
+ DCHECK_NOT_NULL(proxy);
+ DCHECK_NOT_NULL(proxy->var());
}
+ // Add break location for destructured sub-pattern.
+ int pos = value_beg_position_;
+ if (pos == kNoSourcePosition) {
+ pos = IsSubPattern() ? pattern->position() : value->position();
+ }
+ Assignment* assignment =
+ factory()->NewAssignment(Token::INIT, proxy, value, pos);
+ block_->statements()->Add(factory()->NewExpressionStatement(assignment, pos),
+ zone());
}
@@ -322,11 +289,13 @@ void Parser::PatternRewriter::VisitRewritableExpression(
set_context(old_context);
}
+// When an extra declaration scope needs to be inserted to account for
+// a sloppy eval in a default parameter or function body, the expressions
+// needs to be in that new inner scope which was added after initial
+// parsing.
bool Parser::PatternRewriter::DeclaresParameterContainingSloppyEval() const {
- // Need to check for a binding context to make sure we have a descriptor.
- if (IsBindingContext() &&
- // Only relevant for parameters.
- descriptor_->declaration_kind == DeclarationDescriptor::PARAMETER &&
+ DCHECK(IsBindingContext());
+ if (descriptor_->declaration_kind == DeclarationDescriptor::PARAMETER &&
// And only when scope is a block scope;
// without eval, it is a function scope.
scope()->is_block_scope()) {
@@ -339,13 +308,12 @@ bool Parser::PatternRewriter::DeclaresParameterContainingSloppyEval() const {
return false;
}
-// When an extra declaration scope needs to be inserted to account for
-// a sloppy eval in a default parameter or function body, the expressions
-// needs to be in that new inner scope which was added after initial
-// parsing.
void Parser::PatternRewriter::RewriteParameterScopes(Expression* expr) {
- if (DeclaresParameterContainingSloppyEval()) {
- ReparentParameterExpressionScope(parser_->stack_limit(), expr, scope());
+ if (!IsBindingContext()) return;
+ if (DeclaresParameterContainingSloppyEval() ||
+ descriptor_->declaration_kind ==
+ DeclarationDescriptor::LEXICAL_FOR_EACH) {
+ ReparentExpressionScope(parser_->stack_limit(), expr, scope());
}
}
@@ -364,7 +332,8 @@ void Parser::PatternRewriter::VisitObjectLiteral(ObjectLiteral* pattern,
rest_runtime_callargs->Add(factory()->NewVariableProxy(temp), zone());
}
- block_->statements()->Add(parser_->BuildAssertIsCoercible(temp), zone());
+ block_->statements()->Add(parser_->BuildAssertIsCoercible(temp, pattern),
+ zone());
for (ObjectLiteralProperty* property : *pattern->properties()) {
PatternContext context = SetInitializerContextIfNeeded(property->value());
@@ -430,9 +399,9 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
DCHECK(block_->ignore_completion_value());
auto temp = *temp_var = CreateTempVar(current_value_);
- auto iterator = CreateTempVar(
- factory()->NewGetIterator(factory()->NewVariableProxy(temp),
- IteratorType::kNormal, kNoSourcePosition));
+ auto iterator = CreateTempVar(factory()->NewGetIterator(
+ factory()->NewVariableProxy(temp), current_value_, IteratorType::kNormal,
+ current_value_->position()));
auto done =
CreateTempVar(factory()->NewBooleanLiteral(false, kNoSourcePosition));
auto result = CreateTempVar();
@@ -762,7 +731,9 @@ NOT_A_PATTERN(UnaryOperation)
NOT_A_PATTERN(VariableDeclaration)
NOT_A_PATTERN(WhileStatement)
NOT_A_PATTERN(WithStatement)
-NOT_A_PATTERN(Suspend)
+NOT_A_PATTERN(Yield)
+NOT_A_PATTERN(YieldStar)
+NOT_A_PATTERN(Await)
#undef NOT_A_PATTERN
} // namespace internal
diff --git a/deps/v8/src/parsing/preparsed-scope-data.cc b/deps/v8/src/parsing/preparsed-scope-data.cc
index c8ea3de22a..59f46d2d92 100644
--- a/deps/v8/src/parsing/preparsed-scope-data.cc
+++ b/deps/v8/src/parsing/preparsed-scope-data.cc
@@ -8,250 +8,314 @@
#include "src/ast/variables.h"
#include "src/handles.h"
#include "src/objects-inl.h"
+#include "src/objects/shared-function-info.h"
+#include "src/parsing/preparser.h"
namespace v8 {
namespace internal {
namespace {
+class ScopeCallsEvalField : public BitField<bool, 0, 1> {};
+class InnerScopeCallsEvalField
+ : public BitField<bool, ScopeCallsEvalField::kNext, 1> {};
+
class VariableIsUsedField : public BitField16<bool, 0, 1> {};
class VariableMaybeAssignedField
: public BitField16<bool, VariableIsUsedField::kNext, 1> {};
class VariableContextAllocatedField
: public BitField16<bool, VariableMaybeAssignedField::kNext, 1> {};
-const int kFunctionDataSize = 8;
+const int kMagicValue = 0xc0de0de;
+
+enum SkippableFunctionDataOffsets {
+ kStartPosition,
+ kEndPosition,
+ kNumParameters,
+ kNumInnerFunctions,
+ kLanguageAndSuper,
+ kSize
+};
+
+STATIC_ASSERT(LANGUAGE_END == 2);
+class LanguageField : public BitField<int, 0, 1> {};
+class UsesSuperField : public BitField<bool, LanguageField::kNext, 1> {};
} // namespace
/*
- Internal data format for the backing store:
+ Internal data format for the backing store of ProducedPreparsedScopeData:
+ (Skippable function data:)
+ ------------------------------------
+ | data for inner function 1 |
+ | ... |
+ ------------------------------------
+ | data for inner function n |
+ | ... |
+ ------------------------------------
+ (Scope allocation data:)
+ ------------------------------------
+ magic value
+ ------------------------------------
+ scope positions
------------------------------------
| scope type << only in debug |
- | inner_scope_calls_eval_ |
- | data end index |
+ | eval |
| ---------------------- |
| | data for variables | |
| | ... | |
| ---------------------- |
------------------------------------
------------------------------------
- | data for inner scope_1 |
+ | data for inner scope 1 | << but not for function scopes
| ... |
------------------------------------
...
------------------------------------
- | data for inner scope_n |
+ | data for inner scope m |
| ... |
------------------------------------
- << data end index points here
- */
-void PreParsedScopeData::SaveData(Scope* scope) {
- DCHECK(!has_data_);
- DCHECK_NE(scope->end_position(), kNoSourcePosition);
- // We're not trying to save data for default constructors because the
- // PreParser doesn't construct them.
- DCHECK_IMPLIES(scope->scope_type() == ScopeType::FUNCTION_SCOPE,
- (scope->AsDeclarationScope()->function_kind() &
- kDefaultConstructor) == 0);
+ Data format for PreParsedScopeData (on the heap):
- if (scope->scope_type() == ScopeType::FUNCTION_SCOPE &&
- !scope->AsDeclarationScope()->is_arrow_scope()) {
- // This cast is OK since we're not going to have more than 2^32 elements in
- // the data. FIXME(marja): Implement limits for the data size.
- function_data_positions_[scope->start_position()] =
- static_cast<uint32_t>(backing_store_.size());
- }
+ PreParsedScopeData::scope_data:
- if (!ScopeNeedsData(scope)) {
- return;
- }
+ ------------------------------------
+ | scope_data_start |
+ ------------------------------------
+ | Skippable function data |
+ | (see above) |
+ | ... |
+ ------------------------------------
+ ------------------------------------
+ | Scope allocation data | << scope_data_start points here
+ | (see above) |
+ | ... |
+ ------------------------------------
-#ifdef DEBUG
- backing_store_.push_back(scope->scope_type());
-#endif
- backing_store_.push_back(scope->inner_scope_calls_eval());
- // Reserve space for the data end index (which we don't know yet). The end
- // index is needed for skipping over data for a function scope when we skip
- // parsing of the corresponding function.
- size_t data_end_index = backing_store_.size();
- backing_store_.push_back(0);
+ PreParsedScopeData::child_data is an array of PreParsedScopeData objects, one
+ for each skippable inner function.
- if (!scope->is_hidden()) {
- for (Variable* var : *scope->locals()) {
- if (IsDeclaredVariableMode(var->mode())) {
- SaveDataForVariable(var);
- }
+
+ ConsumedPreParsedScopeData wraps a PreParsedScopeData and reads data from it.
+
+ */
+
+ProducedPreParsedScopeData::DataGatheringScope::DataGatheringScope(
+ DeclarationScope* function_scope, PreParser* preparser)
+ : function_scope_(function_scope),
+ preparser_(preparser),
+ parent_data_(preparser->produced_preparsed_scope_data()) {
+ if (FLAG_experimental_preparser_scope_analysis) {
+ Zone* main_zone = preparser->main_zone();
+ auto* new_data = new (main_zone) ProducedPreParsedScopeData(main_zone);
+ if (parent_data_ != nullptr) {
+ parent_data_->data_for_inner_functions_.push_back(new_data);
}
+ preparser->set_produced_preparsed_scope_data(new_data);
+ function_scope->set_produced_preparsed_scope_data(new_data);
}
+}
- SaveDataForInnerScopes(scope);
-
- // FIXME(marja): see above.
- backing_store_[data_end_index] = static_cast<uint32_t>(backing_store_.size());
+ProducedPreParsedScopeData::DataGatheringScope::~DataGatheringScope() {
+ if (FLAG_experimental_preparser_scope_analysis) {
+ preparser_->set_produced_preparsed_scope_data(parent_data_);
+ }
}
-void PreParsedScopeData::AddSkippableFunction(
- int start_position, const PreParseData::FunctionData& function_data) {
- AddFunction(start_position, function_data);
- skippable_functions_.insert(start_position);
+void ProducedPreParsedScopeData::DataGatheringScope::MarkFunctionAsSkippable(
+ int end_position, int num_inner_functions) {
+ DCHECK(FLAG_experimental_preparser_scope_analysis);
+ DCHECK_NOT_NULL(parent_data_);
+ parent_data_->AddSkippableFunction(
+ function_scope_->start_position(), end_position,
+ function_scope_->num_parameters(), num_inner_functions,
+ function_scope_->language_mode(), function_scope_->uses_super_property());
}
-void PreParsedScopeData::AddFunction(
- int start_position, const PreParseData::FunctionData& function_data) {
- DCHECK(function_data.is_valid());
- function_index_.AddFunctionData(start_position, function_data);
+void ProducedPreParsedScopeData::AddSkippableFunction(
+ int start_position, int end_position, int num_parameters,
+ int num_inner_functions, LanguageMode language_mode,
+ bool uses_super_property) {
+ DCHECK(FLAG_experimental_preparser_scope_analysis);
+ DCHECK_EQ(scope_data_start_, -1);
+ DCHECK(previously_produced_preparsed_scope_data_.is_null());
+
+ size_t current_size = backing_store_.size();
+ backing_store_.resize(current_size + SkippableFunctionDataOffsets::kSize);
+ backing_store_[current_size + SkippableFunctionDataOffsets::kStartPosition] =
+ start_position;
+ backing_store_[current_size + SkippableFunctionDataOffsets::kEndPosition] =
+ end_position;
+ backing_store_[current_size + SkippableFunctionDataOffsets::kNumParameters] =
+ num_parameters;
+ backing_store_[current_size +
+ SkippableFunctionDataOffsets::kNumInnerFunctions] =
+ num_inner_functions;
+
+ uint32_t language_and_super = LanguageField::encode(language_mode) |
+ UsesSuperField::encode(uses_super_property);
+
+ backing_store_[current_size +
+ SkippableFunctionDataOffsets::kLanguageAndSuper] =
+ language_and_super;
}
-void PreParsedScopeData::RestoreData(DeclarationScope* scope) const {
- uint32_t index = 0;
+void ProducedPreParsedScopeData::SaveScopeAllocationData(
+ DeclarationScope* scope) {
+ DCHECK(FLAG_experimental_preparser_scope_analysis);
+ DCHECK(previously_produced_preparsed_scope_data_.is_null());
+ DCHECK_EQ(scope_data_start_, -1);
+ DCHECK_EQ(backing_store_.size() % SkippableFunctionDataOffsets::kSize, 0);
- DCHECK_EQ(scope->scope_type(), ScopeType::FUNCTION_SCOPE);
+ scope_data_start_ = static_cast<int>(backing_store_.size());
+
+ // If there are no skippable inner functions, we don't need to save anything.
+ if (backing_store_.size() == 0) {
+ return;
+ }
- bool success = FindFunctionData(scope->start_position(), &index);
- DCHECK(success);
- USE(success);
+ // For sanity checks.
+ backing_store_.push_back(kMagicValue);
+ backing_store_.push_back(scope->start_position());
+ backing_store_.push_back(scope->end_position());
- RestoreData(scope, &index);
+ // For a data integrity check, write a value between data about skipped inner
+ // funcs and data about variables.
+ SaveDataForScope(scope);
}
-void PreParsedScopeData::RestoreData(Scope* scope, uint32_t* index_ptr) const {
- // It's possible that scope is not present in the data at all (since PreParser
- // doesn't create the corresponding scope). In this case, the Scope won't
- // contain any variables for which we need the data.
- if (!ScopeNeedsData(scope) && !IsSkippedFunctionScope(scope)) {
- return;
+MaybeHandle<PreParsedScopeData> ProducedPreParsedScopeData::Serialize(
+ Isolate* isolate) const {
+ if (!previously_produced_preparsed_scope_data_.is_null()) {
+ DCHECK_EQ(backing_store_.size(), 0);
+ DCHECK_EQ(data_for_inner_functions_.size(), 0);
+ return previously_produced_preparsed_scope_data_;
+ }
+ // FIXME(marja): save space by using a byte array and converting
+ // function data to bytes.
+ size_t length = backing_store_.size();
+ if (length == 0) {
+ return MaybeHandle<PreParsedScopeData>();
}
- uint32_t& index = *index_ptr;
+ Handle<PodArray<uint32_t>> data_array =
+ PodArray<uint32_t>::New(isolate, static_cast<int>(length + 1), TENURED);
-#ifdef DEBUG
- // Data integrity check.
- if (scope->scope_type() == ScopeType::FUNCTION_SCOPE &&
- !scope->AsDeclarationScope()->is_arrow_scope()) {
- const PreParseData::FunctionData& data =
- function_index_.GetFunctionData(scope->start_position());
- DCHECK(data.is_valid());
- DCHECK_EQ(data.end, scope->end_position());
- // FIXME(marja): unify num_parameters too and DCHECK here.
- DCHECK_EQ(data.language_mode, scope->language_mode());
- DCHECK_EQ(data.uses_super_property,
- scope->AsDeclarationScope()->uses_super_property());
- uint32_t index_from_data = 0;
- FindFunctionData(scope->start_position(), &index_from_data);
- DCHECK_EQ(index_from_data, index);
+ DCHECK_GE(scope_data_start_, 0);
+ data_array->set(0, scope_data_start_);
+ {
+ int i = 1;
+ for (const auto& item : backing_store_) {
+ data_array->set(i++, item);
+ }
}
-#endif
- if (IsSkippedFunctionScope(scope)) {
- // This scope is a function scope representing a function we want to
- // skip. So just skip over its data.
- DCHECK(!scope->must_use_preparsed_scope_data());
- // Check that we're moving forward (not backward) in the data.
- DCHECK_GT(backing_store_[index + 2], index);
- index = backing_store_[index + 2];
- return;
+ Handle<PreParsedScopeData> data = isolate->factory()->NewPreParsedScopeData();
+
+ int child_data_length = static_cast<int>(data_for_inner_functions_.size());
+ if (child_data_length == 0) {
+ data->set_child_data(*(isolate->factory()->empty_fixed_array()));
+ } else {
+ Handle<FixedArray> child_array =
+ isolate->factory()->NewFixedArray(child_data_length, TENURED);
+ int i = 0;
+ for (const auto& item : data_for_inner_functions_) {
+ MaybeHandle<PreParsedScopeData> maybe_child_data =
+ item->Serialize(isolate);
+ if (maybe_child_data.is_null()) {
+ child_array->set(i++, *(isolate->factory()->null_value()));
+ } else {
+ Handle<PreParsedScopeData> child_data =
+ maybe_child_data.ToHandleChecked();
+ child_array->set(i++, *child_data);
+ }
+ }
+ data->set_child_data(*child_array);
}
- DCHECK_GE(backing_store_.size(), index + 3);
- DCHECK_EQ(backing_store_[index++], scope->scope_type());
+ data->set_scope_data(*data_array);
+ return data;
+}
- if (backing_store_[index++]) {
- scope->RecordEvalCall();
+bool ProducedPreParsedScopeData::ScopeNeedsData(Scope* scope) {
+ if (scope->scope_type() == ScopeType::FUNCTION_SCOPE) {
+ // Default constructors don't need data (they cannot contain inner functions
+ // defined by the user). Other functions do.
+ return !IsDefaultConstructor(scope->AsDeclarationScope()->function_kind());
}
- uint32_t data_end_index = backing_store_[index++];
- USE(data_end_index);
-
if (!scope->is_hidden()) {
for (Variable* var : *scope->locals()) {
- if (var->mode() == VAR || var->mode() == LET || var->mode() == CONST) {
- RestoreDataForVariable(var, index_ptr);
+ if (IsDeclaredVariableMode(var->mode())) {
+ return true;
}
}
}
-
- RestoreDataForInnerScopes(scope, index_ptr);
-
- DCHECK_EQ(data_end_index, index);
-}
-
-Handle<PodArray<uint32_t>> PreParsedScopeData::Serialize(
- Isolate* isolate) const {
- // FIXME(marja): save space by using a byte array and converting
- // function_index_ to bytes.
- size_t length =
- function_index_.size() * kFunctionDataSize + backing_store_.size() + 1;
- Handle<PodArray<uint32_t>> array =
- PodArray<uint32_t>::New(isolate, static_cast<int>(length), TENURED);
-
- array->set(0, static_cast<uint32_t>(function_index_.size()));
- int i = 1;
- for (const auto& item : function_index_) {
- const auto& it = function_data_positions_.find(item.first);
- DCHECK(it != function_data_positions_.end());
- const PreParseData::FunctionData& function_data = item.second;
- array->set(i++, item.first); // start position
- array->set(i++, it->second); // position in data
- array->set(i++, function_data.end);
- array->set(i++, function_data.num_parameters);
- array->set(i++, function_data.num_inner_functions);
- array->set(i++, function_data.language_mode);
- array->set(i++, function_data.uses_super_property);
- array->set(i++, skippable_functions_.find(item.first) !=
- skippable_functions_.end());
+ for (Scope* inner = scope->inner_scope(); inner != nullptr;
+ inner = inner->sibling()) {
+ if (ScopeNeedsData(inner)) {
+ return true;
+ }
}
+ return false;
+}
- for (size_t j = 0; j < backing_store_.size(); ++j) {
- array->set(i++, static_cast<uint32_t>(backing_store_[j]));
+bool ProducedPreParsedScopeData::ScopeIsSkippableFunctionScope(Scope* scope) {
+ // Lazy non-arrow function scopes are skippable. Lazy functions are exactly
+ // those Scopes which have their own ProducedPreParsedScopeData object. This
+ // logic ensures that the scope allocation data is consistent with the
+ // skippable function data (both agree on where the lazy function boundaries
+ // are).
+ if (scope->scope_type() != ScopeType::FUNCTION_SCOPE) {
+ return false;
}
- DCHECK_EQ(array->length(), length);
- return array;
+ DeclarationScope* declaration_scope = scope->AsDeclarationScope();
+ return !declaration_scope->is_arrow_scope() &&
+ declaration_scope->produced_preparsed_scope_data() != nullptr;
}
-void PreParsedScopeData::Deserialize(PodArray<uint32_t>* array) {
- has_data_ = true;
- DCHECK_NOT_NULL(array);
- if (array->length() == 0) {
- return;
- }
- int function_count = array->get(0);
- CHECK(array->length() > function_count * kFunctionDataSize);
- if (function_count == 0) {
+void ProducedPreParsedScopeData::SaveDataForScope(Scope* scope) {
+ DCHECK_NE(scope->end_position(), kNoSourcePosition);
+
+ // We're not trying to save data for default constructors because the
+ // PreParser doesn't construct them.
+ DCHECK_IMPLIES(scope->scope_type() == ScopeType::FUNCTION_SCOPE,
+ (scope->AsDeclarationScope()->function_kind() &
+ kDefaultConstructor) == 0);
+
+ if (!ScopeNeedsData(scope)) {
return;
}
- int i = 1;
- for (; i < function_count * kFunctionDataSize + 1; i += kFunctionDataSize) {
- int start = array->get(i);
- function_data_positions_[start] = array->get(i + 1);
- function_index_.AddFunctionData(
- start, PreParseData::FunctionData(
- array->get(i + 2), array->get(i + 3), array->get(i + 4),
- LanguageMode(array->get(i + 5)), array->get(i + 6)));
- if (array->get(i + 7)) {
- skippable_functions_.insert(start);
+
+#ifdef DEBUG
+ backing_store_.push_back(scope->scope_type());
+#endif
+
+ uint32_t eval =
+ ScopeCallsEvalField::encode(scope->calls_eval()) |
+ InnerScopeCallsEvalField::encode(scope->inner_scope_calls_eval());
+ backing_store_.push_back(eval);
+
+ if (scope->scope_type() == ScopeType::FUNCTION_SCOPE) {
+ Variable* function = scope->AsDeclarationScope()->function_var();
+ if (function != nullptr) {
+ SaveDataForVariable(function);
}
}
- CHECK_EQ(function_index_.size(), function_count);
- backing_store_.reserve(array->length() - i);
- for (; i < array->length(); ++i) {
- backing_store_.push_back(array->get(i));
+ for (Variable* var : *scope->locals()) {
+ if (IsDeclaredVariableMode(var->mode())) {
+ SaveDataForVariable(var);
+ }
}
-}
-PreParseData::FunctionData PreParsedScopeData::FindSkippableFunction(
- int start_pos) const {
- if (skippable_functions_.find(start_pos) == skippable_functions_.end()) {
- return PreParseData::FunctionData();
- }
- return function_index_.GetFunctionData(start_pos);
+ SaveDataForInnerScopes(scope);
}
-void PreParsedScopeData::SaveDataForVariable(Variable* var) {
+void ProducedPreParsedScopeData::SaveDataForVariable(Variable* var) {
#ifdef DEBUG
// Store the variable name in debug mode; this way we can check that we
// restore data to the correct variable.
@@ -271,91 +335,190 @@ void PreParsedScopeData::SaveDataForVariable(Variable* var) {
backing_store_.push_back(variable_data);
}
-void PreParsedScopeData::RestoreDataForVariable(Variable* var,
- uint32_t* index_ptr) const {
- uint32_t& index = *index_ptr;
-#ifdef DEBUG
- const AstRawString* name = var->raw_name();
- DCHECK_GT(backing_store_.size(), index + name->length());
- DCHECK_EQ(backing_store_[index++], static_cast<uint32_t>(name->length()));
- for (int i = 0; i < name->length(); ++i) {
- DCHECK_EQ(backing_store_[index++], name->raw_data()[i]);
- }
-#endif
- DCHECK_GT(backing_store_.size(), index);
- byte variable_data = backing_store_[index++];
- if (VariableIsUsedField::decode(variable_data)) {
- var->set_is_used();
- }
- if (VariableMaybeAssignedField::decode(variable_data)) {
- var->set_maybe_assigned();
- }
- if (VariableContextAllocatedField::decode(variable_data)) {
- var->ForceContextAllocation();
- }
-}
-
-void PreParsedScopeData::SaveDataForInnerScopes(Scope* scope) {
+void ProducedPreParsedScopeData::SaveDataForInnerScopes(Scope* scope) {
// Inner scopes are stored in the reverse order, but we'd like to write the
// data in the logical order. There might be many inner scopes, so we don't
// want to recurse here.
std::vector<Scope*> scopes;
for (Scope* inner = scope->inner_scope(); inner != nullptr;
inner = inner->sibling()) {
+ if (ScopeIsSkippableFunctionScope(inner)) {
+ // Don't save data about function scopes, since they'll have their own
+ // ProducedPreParsedScopeData where their data is saved.
+ DCHECK(inner->AsDeclarationScope()->produced_preparsed_scope_data() !=
+ nullptr);
+ continue;
+ }
scopes.push_back(inner);
}
- for (int i = static_cast<int>(scopes.size()) - 1; i >= 0; --i) {
- SaveData(scopes[i]);
+ for (auto it = scopes.rbegin(); it != scopes.rend(); ++it) {
+ SaveDataForScope(*it);
}
}
-void PreParsedScopeData::RestoreDataForInnerScopes(Scope* scope,
- uint32_t* index_ptr) const {
- std::vector<Scope*> scopes;
- for (Scope* inner = scope->inner_scope(); inner != nullptr;
- inner = inner->sibling()) {
- scopes.push_back(inner);
- }
- for (int i = static_cast<int>(scopes.size()) - 1; i >= 0; --i) {
- RestoreData(scopes[i], index_ptr);
- }
+void ConsumedPreParsedScopeData::SetData(Handle<PreParsedScopeData> data) {
+ DCHECK(data->IsPreParsedScopeData());
+ data_ = data;
+#ifdef DEBUG
+ DisallowHeapAllocation no_gc;
+ PodArray<uint32_t>* scope_data = data_->scope_data();
+ DCHECK_GT(scope_data->length(), 2);
+ DCHECK_EQ(scope_data->get(scope_data->get(0) + 1), kMagicValue);
+#endif
}
-bool PreParsedScopeData::FindFunctionData(int start_pos,
- uint32_t* index) const {
- auto it = function_data_positions_.find(start_pos);
- if (it == function_data_positions_.end()) {
- return false;
+ProducedPreParsedScopeData*
+ConsumedPreParsedScopeData::GetDataForSkippableFunction(
+ Zone* zone, int start_position, int* end_position, int* num_parameters,
+ int* num_inner_functions, bool* uses_super_property,
+ LanguageMode* language_mode) {
+ DisallowHeapAllocation no_gc;
+ PodArray<uint32_t>* scope_data = data_->scope_data();
+
+ // The skippable function *must* be the next function in the data. Use the
+ // start position as a sanity check.
+ CHECK_GE(scope_data->length(), index_ + SkippableFunctionDataOffsets::kSize);
+ int start_position_from_data =
+ scope_data->get(index_ + SkippableFunctionDataOffsets::kStartPosition);
+ CHECK_EQ(start_position, start_position_from_data);
+
+ *end_position =
+ scope_data->get(index_ + SkippableFunctionDataOffsets::kEndPosition);
+ DCHECK_GT(*end_position, start_position);
+ *num_parameters =
+ scope_data->get(index_ + SkippableFunctionDataOffsets::kNumParameters);
+ *num_inner_functions = scope_data->get(
+ index_ + SkippableFunctionDataOffsets::kNumInnerFunctions);
+
+ int language_and_super =
+ scope_data->get(index_ + SkippableFunctionDataOffsets::kLanguageAndSuper);
+ *language_mode = LanguageMode(LanguageField::decode(language_and_super));
+ *uses_super_property = UsesSuperField::decode(language_and_super);
+
+ index_ += SkippableFunctionDataOffsets::kSize;
+
+ // Retrieve the corresponding PreParsedScopeData and associate it to the
+ // skipped function. If the skipped functions contains inner functions, those
+ // can be skipped when the skipped function is eagerly parsed.
+ FixedArray* children = data_->child_data();
+ CHECK_GT(children->length(), child_index_);
+ Object* child_data = children->get(child_index_++);
+ if (!child_data->IsPreParsedScopeData()) {
+ return nullptr;
}
- *index = it->second;
- return true;
+ Handle<PreParsedScopeData> child_data_handle(
+ PreParsedScopeData::cast(child_data));
+ return new (zone) ProducedPreParsedScopeData(child_data_handle, zone);
}
-bool PreParsedScopeData::ScopeNeedsData(Scope* scope) {
- if (scope->scope_type() == ScopeType::FUNCTION_SCOPE) {
- // Default constructors don't need data (they cannot contain inner functions
- // defined by the user). Other functions do.
- return !IsDefaultConstructor(scope->AsDeclarationScope()->function_kind());
+void ConsumedPreParsedScopeData::RestoreScopeAllocationData(
+ DeclarationScope* scope) {
+ DCHECK(FLAG_experimental_preparser_scope_analysis);
+ DCHECK_EQ(scope->scope_type(), ScopeType::FUNCTION_SCOPE);
+ DCHECK(!data_.is_null());
+
+ DisallowHeapAllocation no_gc;
+ PodArray<uint32_t>* scope_data = data_->scope_data();
+ int magic_value_from_data = scope_data->get(index_++);
+ // Check that we've consumed all inner function data.
+ CHECK_EQ(magic_value_from_data, kMagicValue);
+
+ int start_position_from_data = scope_data->get(index_++);
+ int end_position_from_data = scope_data->get(index_++);
+ CHECK_EQ(start_position_from_data, scope->start_position());
+ CHECK_EQ(end_position_from_data, scope->end_position());
+
+ RestoreData(scope, scope_data);
+
+ // Check that we consumed all scope data.
+ DCHECK_EQ(index_, scope_data->length());
+}
+
+void ConsumedPreParsedScopeData::SkipFunctionDataForTesting() {
+ DCHECK_EQ(index_, 1);
+ DisallowHeapAllocation no_gc;
+ PodArray<uint32_t>* scope_data = data_->scope_data();
+ DCHECK_GT(scope_data->length(), 2);
+ index_ = scope_data->get(0) + 1;
+ DCHECK_EQ(scope_data->get(index_), kMagicValue);
+}
+
+void ConsumedPreParsedScopeData::RestoreData(Scope* scope,
+ PodArray<uint32_t>* scope_data) {
+ if (scope->is_declaration_scope() &&
+ scope->AsDeclarationScope()->is_skipped_function()) {
+ return;
}
- if (!scope->is_hidden()) {
- for (Variable* var : *scope->locals()) {
- if (var->mode() == VAR || var->mode() == LET || var->mode() == CONST) {
- return true;
- }
+
+ // It's possible that scope is not present in the data at all (since PreParser
+ // doesn't create the corresponding scope). In this case, the Scope won't
+ // contain any variables for which we need the data.
+ if (!ProducedPreParsedScopeData::ScopeNeedsData(scope)) {
+ return;
+ }
+
+ // scope_type is stored only in debug mode.
+ CHECK_GE(scope_data->length(), index_ + 1);
+ DCHECK_GE(scope_data->length(), index_ + 2);
+ DCHECK_EQ(scope_data->get(index_++), scope->scope_type());
+
+ uint32_t eval = scope_data->get(index_++);
+ if (ScopeCallsEvalField::decode(eval)) {
+ scope->RecordEvalCall();
+ }
+ if (InnerScopeCallsEvalField::decode(eval)) {
+ scope->RecordInnerScopeEvalCall();
+ }
+
+ if (scope->scope_type() == ScopeType::FUNCTION_SCOPE) {
+ Variable* function = scope->AsDeclarationScope()->function_var();
+ if (function != nullptr) {
+ RestoreDataForVariable(function, scope_data);
}
}
- for (Scope* inner = scope->inner_scope(); inner != nullptr;
- inner = inner->sibling()) {
- if (ScopeNeedsData(inner)) {
- return true;
+
+ for (Variable* var : *scope->locals()) {
+ if (IsDeclaredVariableMode(var->mode())) {
+ RestoreDataForVariable(var, scope_data);
}
}
- return false;
+
+ RestoreDataForInnerScopes(scope, scope_data);
}
-bool PreParsedScopeData::IsSkippedFunctionScope(Scope* scope) {
- return scope->is_declaration_scope() &&
- scope->AsDeclarationScope()->is_skipped_function();
+void ConsumedPreParsedScopeData::RestoreDataForVariable(
+ Variable* var, PodArray<uint32_t>* scope_data) {
+#ifdef DEBUG
+ const AstRawString* name = var->raw_name();
+ DCHECK_GT(scope_data->length(), index_ + name->length());
+ DCHECK_EQ(scope_data->get(index_++), static_cast<uint32_t>(name->length()));
+ for (int i = 0; i < name->length(); ++i) {
+ DCHECK_EQ(scope_data->get(index_++), name->raw_data()[i]);
+ }
+#endif
+ CHECK_GT(scope_data->length(), index_);
+ byte variable_data = scope_data->get(index_++);
+ if (VariableIsUsedField::decode(variable_data)) {
+ var->set_is_used();
+ }
+ if (VariableMaybeAssignedField::decode(variable_data)) {
+ var->set_maybe_assigned();
+ }
+ if (VariableContextAllocatedField::decode(variable_data)) {
+ var->ForceContextAllocation();
+ }
+}
+
+void ConsumedPreParsedScopeData::RestoreDataForInnerScopes(
+ Scope* scope, PodArray<uint32_t>* scope_data) {
+ std::vector<Scope*> scopes;
+ for (Scope* inner = scope->inner_scope(); inner != nullptr;
+ inner = inner->sibling()) {
+ scopes.push_back(inner);
+ }
+ for (auto it = scopes.rbegin(); it != scopes.rend(); ++it) {
+ RestoreData(*it, scope_data);
+ }
}
} // namespace internal
diff --git a/deps/v8/src/parsing/preparsed-scope-data.h b/deps/v8/src/parsing/preparsed-scope-data.h
index 5d4fc3a3a0..5ab25b9cf4 100644
--- a/deps/v8/src/parsing/preparsed-scope-data.h
+++ b/deps/v8/src/parsing/preparsed-scope-data.h
@@ -10,8 +10,10 @@
#include <vector>
#include "src/globals.h"
-#include "src/objects.h"
+#include "src/handles.h"
+#include "src/objects/shared-function-info.h"
#include "src/parsing/preparse-data.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -19,6 +21,9 @@ namespace internal {
template <typename T>
class Handle;
+class PreParser;
+class PreParsedScopeData;
+
/*
Skipping inner functions.
@@ -53,77 +58,123 @@ class Handle;
For each Scope:
- inner_scope_calls_eval_.
- PreParsedScopeData implements storing and restoring the above mentioned data.
+ ProducedPreParsedScopeData implements storing the above mentioned data and
+ ConsumedPreParsedScopeData implements restoring it (= setting the context
+ allocation status of the variables in a Scope (and its subscopes) based on the
+ data).
*/
-class PreParsedScopeData {
+class ProducedPreParsedScopeData : public ZoneObject {
public:
- PreParsedScopeData() {}
- ~PreParsedScopeData() {}
+ // Create a ProducedPreParsedScopeData object which will collect data as we
+ // parse.
+ explicit ProducedPreParsedScopeData(Zone* zone)
+ : backing_store_(zone),
+ data_for_inner_functions_(zone),
+ scope_data_start_(-1) {}
+
+ // Create a ProducedPreParsedScopeData which is just a proxy for a previous
+ // produced PreParsedScopeData.
+ ProducedPreParsedScopeData(Handle<PreParsedScopeData> data, Zone* zone)
+ : backing_store_(zone),
+ data_for_inner_functions_(zone),
+ scope_data_start_(-1),
+ previously_produced_preparsed_scope_data_(data) {}
+
+ // For gathering the inner function data and splitting it up according to the
+ // laziness boundaries. Each lazy function gets its own
+ // ProducedPreParsedScopeData, and so do all lazy functions inside it.
+ class DataGatheringScope {
+ public:
+ DataGatheringScope(DeclarationScope* function_scope, PreParser* preparser);
+ ~DataGatheringScope();
+
+ void MarkFunctionAsSkippable(int end_position, int num_inner_functions);
+
+ private:
+ DeclarationScope* function_scope_;
+ PreParser* preparser_;
+ ProducedPreParsedScopeData* parent_data_;
+
+ DISALLOW_COPY_AND_ASSIGN(DataGatheringScope);
+ };
// Saves the information needed for allocating the Scope's (and its
// subscopes') variables.
- void SaveData(Scope* scope);
-
- // Save data for a function we might skip later. The data is used later for
- // creating a FunctionLiteral.
- void AddSkippableFunction(int start_position,
- const PreParseData::FunctionData& function_data);
-
- // Save variable allocation data for function which contains skippable
- // functions.
- void AddFunction(int start_position,
- const PreParseData::FunctionData& function_data);
-
- // FIXME(marja): We need different kinds of data for the two types of
- // functions. For a skippable function we need the end position + the data
- // needed for creating a FunctionLiteral. For a function which contains
- // skippable functions, we need the data affecting context allocation status
- // of the variables (but e.g., no end position). Currently we just save the
- // same data for both. Here we can save less data.
-
- // Restores the information needed for allocating the Scopes's (and its
- // subscopes') variables.
- void RestoreData(Scope* scope, uint32_t* index_ptr) const;
- void RestoreData(DeclarationScope* scope) const;
-
- Handle<PodArray<uint32_t>> Serialize(Isolate* isolate) const;
- void Deserialize(PodArray<uint32_t>* array);
+ void SaveScopeAllocationData(DeclarationScope* scope);
- bool Consuming() const { return has_data_; }
+ // If there is data (if the Scope contains skippable inner functions), move
+ // the data into the heap and return a Handle to it; otherwise return a null
+ // MaybeHandle.
+ MaybeHandle<PreParsedScopeData> Serialize(Isolate* isolate) const;
- bool Producing() const { return !has_data_; }
-
- PreParseData::FunctionData FindSkippableFunction(int start_pos) const;
+ static bool ScopeNeedsData(Scope* scope);
+ static bool ScopeIsSkippableFunctionScope(Scope* scope);
private:
- friend class ScopeTestHelper;
+ void AddSkippableFunction(int start_position, int end_position,
+ int num_parameters, int num_inner_functions,
+ LanguageMode language_mode,
+ bool uses_super_property);
+ void SaveDataForScope(Scope* scope);
void SaveDataForVariable(Variable* var);
- void RestoreDataForVariable(Variable* var, uint32_t* index_ptr) const;
void SaveDataForInnerScopes(Scope* scope);
- void RestoreDataForInnerScopes(Scope* scope, uint32_t* index_ptr) const;
- bool FindFunctionData(int start_pos, uint32_t* index) const;
-
- static bool ScopeNeedsData(Scope* scope);
- static bool IsSkippedFunctionScope(Scope* scope);
// TODO(marja): Make the backing store more efficient once we know exactly
// what data is needed.
- std::vector<uint32_t> backing_store_;
+ ZoneDeque<uint32_t> backing_store_;
+ ZoneDeque<ProducedPreParsedScopeData*> data_for_inner_functions_;
+ // The backing store contains data about inner functions and then data about
+ // this scope's (and its subscopes') variables. scope_data_start_ marks where
+ // the latter starts.
+ int scope_data_start_;
+
+ // ProducedPreParsedScopeData can also mask a Handle<PreParsedScopeData>
+ // which was produced already earlier. This happens for deeper lazy functions.
+ Handle<PreParsedScopeData> previously_produced_preparsed_scope_data_;
+
+ DISALLOW_COPY_AND_ASSIGN(ProducedPreParsedScopeData);
+};
- // Start pos -> FunctionData. Used for creating FunctionLiterals for skipped
- // functions (when they're actually skipped).
- PreParseData function_index_;
- // Start pos -> position in backing_store_.
- std::unordered_map<uint32_t, uint32_t> function_data_positions_;
- // Start positions of skippable functions.
- std::set<uint32_t> skippable_functions_;
+class ConsumedPreParsedScopeData {
+ public:
+ // Real data starts from index 1 (see data format description in the .cc
+ // file).
+ ConsumedPreParsedScopeData() : index_(1), child_index_(0) {}
+ ~ConsumedPreParsedScopeData() {}
+
+ void SetData(Handle<PreParsedScopeData> data);
+
+ bool HasData() const { return !data_.is_null(); }
+
+ ProducedPreParsedScopeData* GetDataForSkippableFunction(
+ Zone* zone, int start_position, int* end_position, int* num_parameters,
+ int* num_inner_functions, bool* uses_super_property,
+ LanguageMode* language_mode);
+
+ // Restores the information needed for allocating the Scope's (and its
+ // subscopes') variables.
+ void RestoreScopeAllocationData(DeclarationScope* scope);
+
+ // Skips the data about skippable functions, moves straight to the scope
+ // allocation data. Useful for tests which don't want to verify only the scope
+ // allocation data.
+ void SkipFunctionDataForTesting();
+
+ private:
+ void RestoreData(Scope* scope, PodArray<uint32_t>* scope_data);
+ void RestoreDataForVariable(Variable* var, PodArray<uint32_t>* scope_data);
+ void RestoreDataForInnerScopes(Scope* scope, PodArray<uint32_t>* scope_data);
- bool has_data_ = false;
+ Handle<PreParsedScopeData> data_;
+ // When consuming the data, these indexes point to the data we're going to
+ // consume next.
+ int index_;
+ int child_index_;
- DISALLOW_COPY_AND_ASSIGN(PreParsedScopeData);
+ DISALLOW_COPY_AND_ASSIGN(ConsumedPreParsedScopeData);
};
} // namespace internal
diff --git a/deps/v8/src/parsing/preparser.cc b/deps/v8/src/parsing/preparser.cc
index c408af88c9..37f8d77851 100644
--- a/deps/v8/src/parsing/preparser.cc
+++ b/deps/v8/src/parsing/preparser.cc
@@ -131,8 +131,11 @@ PreParser::PreParseResult PreParser::PreParseProgram(bool is_module) {
}
PreParser::PreParseResult PreParser::PreParseFunction(
- FunctionKind kind, DeclarationScope* function_scope, bool parsing_module,
- bool is_inner_function, bool may_abort, int* use_counts) {
+ const AstRawString* function_name, FunctionKind kind,
+ FunctionLiteral::FunctionType function_type,
+ DeclarationScope* function_scope, bool parsing_module,
+ bool is_inner_function, bool may_abort, int* use_counts,
+ ProducedPreParsedScopeData** produced_preparsed_scope_data) {
DCHECK_EQ(FUNCTION_SCOPE, function_scope->scope_type());
parsing_module_ = parsing_module;
use_counts_ = use_counts;
@@ -142,6 +145,17 @@ PreParser::PreParseResult PreParser::PreParseFunction(
function_scope->set_is_being_lazily_parsed(true);
#endif
+ // Start collecting data for a new function which might contain skippable
+ // functions.
+ std::unique_ptr<ProducedPreParsedScopeData::DataGatheringScope>
+ produced_preparsed_scope_data_scope;
+ if (FLAG_experimental_preparser_scope_analysis && !IsArrowFunction(kind)) {
+ track_unresolved_variables_ = true;
+ produced_preparsed_scope_data_scope.reset(
+ new ProducedPreParsedScopeData::DataGatheringScope(function_scope,
+ this));
+ }
+
// In the preparser, we use the function literal ids to count how many
// FunctionLiterals were encountered. The PreParser doesn't actually persist
// FunctionLiterals, so there IDs don't matter.
@@ -207,22 +221,12 @@ PreParser::PreParseResult PreParser::PreParseFunction(
}
if (!IsArrowFunction(kind) && track_unresolved_variables_) {
+ CreateFunctionNameAssignment(function_name, function_type, function_scope);
+
// Declare arguments after parsing the function since lexical 'arguments'
// masks the arguments object. Declare arguments before declaring the
// function var since the arguments object masks 'function arguments'.
function_scope->DeclareArguments(ast_value_factory());
-
- if (FLAG_experimental_preparser_scope_analysis &&
- preparsed_scope_data_ != nullptr) {
- // We're not going to skip this function, but it might contain skippable
- // functions inside it.
- preparsed_scope_data_->AddFunction(
- scope()->start_position(),
- PreParseData::FunctionData(
- scanner()->peek_location().end_pos, scope()->num_parameters(),
- GetLastFunctionLiteralId(), scope()->language_mode(),
- scope()->AsDeclarationScope()->uses_super_property()));
- }
}
use_counts_ = nullptr;
@@ -246,6 +250,8 @@ PreParser::PreParseResult PreParser::PreParseFunction(
ValidateFormalParameters(function_scope->language_mode(),
allow_duplicate_parameters,
CHECK_OK_VALUE(kPreParseSuccess));
+
+ *produced_preparsed_scope_data = produced_preparsed_scope_data_;
}
if (is_strict(function_scope->language_mode())) {
@@ -288,6 +294,20 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
DeclarationScope* function_scope = NewFunctionScope(kind);
function_scope->SetLanguageMode(language_mode);
+
+ // Start collecting data for a new function which might contain skippable
+ // functions.
+ std::unique_ptr<ProducedPreParsedScopeData::DataGatheringScope>
+ produced_preparsed_scope_data_scope;
+ if (!function_state_->next_function_is_likely_called() &&
+ produced_preparsed_scope_data_ != nullptr) {
+ DCHECK(FLAG_experimental_preparser_scope_analysis);
+ DCHECK(track_unresolved_variables_);
+ produced_preparsed_scope_data_scope.reset(
+ new ProducedPreParsedScopeData::DataGatheringScope(function_scope,
+ this));
+ }
+
FunctionState function_state(&function_state_, &scope_, function_scope);
DuplicateFinder duplicate_finder;
ExpressionClassifier formals_classifier(this, &duplicate_finder);
@@ -333,14 +353,9 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
CheckStrictOctalLiteral(start_position, end_position, CHECK_OK);
}
- if (FLAG_experimental_preparser_scope_analysis &&
- track_unresolved_variables_ && preparsed_scope_data_ != nullptr) {
- preparsed_scope_data_->AddSkippableFunction(
- start_position,
- PreParseData::FunctionData(
- end_position, scope()->num_parameters(),
- GetLastFunctionLiteralId() - func_id, scope()->language_mode(),
- scope()->AsDeclarationScope()->uses_super_property()));
+ if (produced_preparsed_scope_data_scope) {
+ produced_preparsed_scope_data_scope->MarkFunctionAsSkippable(
+ end_position, GetLastFunctionLiteralId() - func_id);
}
if (FLAG_trace_preparse) {
PrintF(" [%s]: %i-%i\n",
diff --git a/deps/v8/src/parsing/preparser.h b/deps/v8/src/parsing/preparser.h
index 3bb85f0b20..63352153df 100644
--- a/deps/v8/src/parsing/preparser.h
+++ b/deps/v8/src/parsing/preparser.h
@@ -22,6 +22,8 @@ namespace internal {
// interface as AstNodeFactory, so ParserBase doesn't need to care which one is
// used.
+class ProducedPreParsedScopeData;
+
class PreParserIdentifier {
public:
PreParserIdentifier() : type_(kUnknownIdentifier) {}
@@ -522,9 +524,11 @@ class PreParserStatement {
PreParserStatementList statements() { return PreParserStatementList(); }
void set_scope(Scope* scope) {}
- void Initialize(PreParserExpression cond, PreParserStatement body) {}
+ void Initialize(PreParserExpression cond, PreParserStatement body,
+ const SourceRange& body_range = {}) {}
void Initialize(PreParserStatement init, PreParserExpression cond,
- PreParserStatement next, PreParserStatement body) {}
+ PreParserStatement next, PreParserStatement body,
+ const SourceRange& body_range = {}) {}
private:
enum Type {
@@ -642,10 +646,11 @@ class PreParserFactory {
// default value inside an arrow function parameter list.
return PreParserExpression::Assignment(left.variables_);
}
- PreParserExpression NewSuspend(PreParserExpression generator_object,
- PreParserExpression expression, int pos,
- Suspend::OnException on_exception,
- SuspendFlags flags) {
+ PreParserExpression NewYield(PreParserExpression expression, int pos,
+ Suspend::OnAbruptResume on_abrupt_resume) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewAwait(PreParserExpression expression, int pos) {
return PreParserExpression::Default();
}
PreParserExpression NewConditional(PreParserExpression condition,
@@ -674,12 +679,14 @@ class PreParserFactory {
int pos) {
return PreParserExpression::Default();
}
- PreParserStatement NewReturnStatement(PreParserExpression expression,
- int pos) {
+ PreParserStatement NewReturnStatement(
+ PreParserExpression expression, int pos,
+ int continuation_pos = kNoSourcePosition) {
return PreParserStatement::Jump();
}
- PreParserStatement NewAsyncReturnStatement(PreParserExpression expression,
- int pos) {
+ PreParserStatement NewAsyncReturnStatement(
+ PreParserExpression expression, int pos,
+ int continuation_pos = kNoSourcePosition) {
return PreParserStatement::Jump();
}
PreParserExpression NewFunctionLiteral(
@@ -688,7 +695,9 @@ class PreParserFactory {
FunctionLiteral::ParameterFlag has_duplicate_parameters,
FunctionLiteral::FunctionType function_type,
FunctionLiteral::EagerCompileHint eager_compile_hint, int position,
- bool has_braces, int function_literal_id) {
+ bool has_braces, int function_literal_id,
+ ProducedPreParsedScopeData* produced_preparsed_scope_data = nullptr) {
+ DCHECK_NULL(produced_preparsed_scope_data);
return PreParserExpression::Default();
}
@@ -721,17 +730,22 @@ class PreParserFactory {
PreParserStatement NewIfStatement(PreParserExpression condition,
PreParserStatement then_statement,
- PreParserStatement else_statement,
- int pos) {
+ PreParserStatement else_statement, int pos,
+ SourceRange then_range = {},
+ SourceRange else_range = {}) {
// This must return a jump statement iff both clauses are jump statements.
return else_statement.IsJumpStatement() ? then_statement : else_statement;
}
- PreParserStatement NewBreakStatement(PreParserStatement target, int pos) {
+ PreParserStatement NewBreakStatement(
+ PreParserStatement target, int pos,
+ int continuation_pos = kNoSourcePosition) {
return PreParserStatement::Jump();
}
- PreParserStatement NewContinueStatement(PreParserStatement target, int pos) {
+ PreParserStatement NewContinueStatement(
+ PreParserStatement target, int pos,
+ int continuation_pos = kNoSourcePosition) {
return PreParserStatement::Jump();
}
@@ -840,9 +854,6 @@ struct ParserTypes<PreParser> {
typedef ParserBase<PreParser> Base;
typedef PreParser Impl;
- // PreParser doesn't need to store generator variables.
- typedef void Variable;
-
// Return types for traversing functions.
typedef PreParserIdentifier Identifier;
typedef PreParserExpression Expression;
@@ -899,14 +910,14 @@ class PreParser : public ParserBase<PreParser> {
AstValueFactory* ast_value_factory,
PendingCompilationErrorHandler* pending_error_handler,
RuntimeCallStats* runtime_call_stats,
- PreParsedScopeData* preparsed_scope_data = nullptr,
bool parsing_on_main_thread = true)
: ParserBase<PreParser>(zone, scanner, stack_limit, nullptr,
ast_value_factory, runtime_call_stats,
- preparsed_scope_data, parsing_on_main_thread),
+ parsing_on_main_thread),
use_counts_(nullptr),
track_unresolved_variables_(false),
- pending_error_handler_(pending_error_handler) {}
+ pending_error_handler_(pending_error_handler),
+ produced_preparsed_scope_data_(nullptr) {}
static bool IsPreParser() { return true; }
@@ -926,11 +937,21 @@ class PreParser : public ParserBase<PreParser> {
// keyword and parameters, and have consumed the initial '{'.
// At return, unless an error occurred, the scanner is positioned before the
// the final '}'.
- PreParseResult PreParseFunction(FunctionKind kind,
- DeclarationScope* function_scope,
- bool parsing_module,
- bool track_unresolved_variables,
- bool may_abort, int* use_counts);
+ PreParseResult PreParseFunction(
+ const AstRawString* function_name, FunctionKind kind,
+ FunctionLiteral::FunctionType function_type,
+ DeclarationScope* function_scope, bool parsing_module,
+ bool track_unresolved_variables, bool may_abort, int* use_counts,
+ ProducedPreParsedScopeData** produced_preparser_scope_data);
+
+ ProducedPreParsedScopeData* produced_preparsed_scope_data() const {
+ return produced_preparsed_scope_data_;
+ }
+
+ void set_produced_preparsed_scope_data(
+ ProducedPreParsedScopeData* produced_preparsed_scope_data) {
+ produced_preparsed_scope_data_ = produced_preparsed_scope_data;
+ }
private:
// These types form an algebra over syntactic categories that is just
@@ -948,13 +969,13 @@ class PreParser : public ParserBase<PreParser> {
bool AllowsLazyParsingWithoutUnresolvedVariables() const { return false; }
bool parse_lazily() const { return false; }
- V8_INLINE LazyParsingResult SkipFunction(FunctionKind kind,
- DeclarationScope* function_scope,
- int* num_parameters,
- bool is_inner_function,
- bool may_abort, bool* ok) {
+ V8_INLINE LazyParsingResult
+ SkipFunction(const AstRawString* name, FunctionKind kind,
+ FunctionLiteral::FunctionType function_type,
+ DeclarationScope* function_scope, int* num_parameters,
+ ProducedPreParsedScopeData** produced_preparsed_scope_data,
+ bool is_inner_function, bool may_abort, bool* ok) {
UNREACHABLE();
- return kLazyParsingComplete;
}
Expression ParseFunctionLiteral(
Identifier name, Scanner::Location function_name_location,
@@ -1005,18 +1026,13 @@ class PreParser : public ParserBase<PreParser> {
return left;
}
- V8_INLINE PreParserExpression
- RewriteAwaitExpression(PreParserExpression value, int pos) {
- return value;
- }
V8_INLINE void PrepareAsyncFunctionBody(PreParserStatementList body,
FunctionKind kind, int pos) {}
V8_INLINE void RewriteAsyncFunctionBody(PreParserStatementList body,
PreParserStatement block,
PreParserExpression return_value,
bool* ok) {}
- V8_INLINE PreParserExpression RewriteYieldStar(PreParserExpression generator,
- PreParserExpression expression,
+ V8_INLINE PreParserExpression RewriteYieldStar(PreParserExpression expression,
int pos) {
return PreParserExpression::Default();
}
@@ -1055,11 +1071,16 @@ class PreParser : public ParserBase<PreParser> {
V8_INLINE void RewriteCatchPattern(CatchInfo* catch_info, bool* ok) {
if (track_unresolved_variables_) {
- if (catch_info->name.string_ != nullptr) {
- // Unlike in the parser, we need to declare the catch variable as LET
- // variable, so that it won't get hoisted out of the scope.
- catch_info->scope->DeclareVariableName(catch_info->name.string_, LET);
+ const AstRawString* catch_name = catch_info->name.string_;
+ if (catch_name == nullptr) {
+ catch_name = ast_value_factory()->dot_catch_string();
}
+ // Unlike in the parser, we need to declare the catch variable as LET
+ // variable, so that it won't get hoisted out of the scope. (Parser uses
+ // DeclareLocal instead of DeclareVariable to prevent hoisting.) Another
+ // solution would've been to add DeclareLocalName just for this purpose.
+ catch_info->scope->DeclareVariableName(catch_name, LET);
+
if (catch_info->pattern.variables_ != nullptr) {
for (auto variable : *catch_info->pattern.variables_) {
scope()->DeclareVariableName(variable->raw_name(), LET);
@@ -1071,7 +1092,8 @@ class PreParser : public ParserBase<PreParser> {
V8_INLINE void ValidateCatchBlock(const CatchInfo& catch_info, bool* ok) {}
V8_INLINE PreParserStatement RewriteTryStatement(
PreParserStatement try_block, PreParserStatement catch_block,
- PreParserStatement finally_block, const CatchInfo& catch_info, int pos) {
+ const SourceRange& catch_range, PreParserStatement finally_block,
+ const SourceRange& finally_range, const CatchInfo& catch_info, int pos) {
return PreParserStatement::Default();
}
@@ -1079,11 +1101,32 @@ class PreParser : public ParserBase<PreParser> {
int pos, FunctionKind kind, PreParserStatementList body, bool* ok) {
ParseStatementList(body, Token::RBRACE, ok);
}
+ V8_INLINE void ParseAndRewriteAsyncGeneratorFunctionBody(
+ int pos, FunctionKind kind, PreParserStatementList body, bool* ok) {
+ ParseStatementList(body, Token::RBRACE, ok);
+ }
+ V8_INLINE void CreateFunctionNameAssignment(
+ const AstRawString* function_name,
+ FunctionLiteral::FunctionType function_type,
+ DeclarationScope* function_scope) {
+ if (track_unresolved_variables_ &&
+ function_type == FunctionLiteral::kNamedExpression) {
+ if (function_scope->LookupLocal(function_name) == nullptr) {
+ DCHECK_EQ(function_scope, scope());
+ Variable* fvar = function_scope->DeclareFunctionVar(function_name);
+ fvar->set_is_used();
+ }
+ }
+ }
+
V8_INLINE void CreateFunctionNameAssignment(
PreParserIdentifier function_name, int pos,
FunctionLiteral::FunctionType function_type,
DeclarationScope* function_scope, PreParserStatementList result,
- int index) {}
+ int index) {
+ CreateFunctionNameAssignment(function_name.string_, function_type,
+ function_scope);
+ }
V8_INLINE PreParserExpression RewriteDoExpression(PreParserStatement body,
int pos, bool* ok) {
@@ -1292,11 +1335,6 @@ class PreParser : public ParserBase<PreParser> {
return PreParserExpression::Default();
}
- V8_INLINE PreParserExpression BuildIteratorResult(PreParserExpression value,
- bool done) {
- return PreParserExpression::Default();
- }
-
V8_INLINE PreParserStatement
BuildInitializationBlock(DeclarationParsingResult* parsing_result,
ZoneList<const AstRawString*>* names, bool* ok) {
@@ -1308,10 +1346,9 @@ class PreParser : public ParserBase<PreParser> {
return PreParserStatement::Default();
}
- V8_INLINE PreParserStatement
- InitializeForEachStatement(PreParserStatement stmt, PreParserExpression each,
- PreParserExpression subject,
- PreParserStatement body, int each_keyword_pos) {
+ V8_INLINE PreParserStatement InitializeForEachStatement(
+ PreParserStatement stmt, PreParserExpression each,
+ PreParserExpression subject, PreParserStatement body) {
MarkExpressionAsAssigned(each);
return stmt;
}
@@ -1363,8 +1400,8 @@ class PreParser : public ParserBase<PreParser> {
V8_INLINE StatementT DesugarLexicalBindingsInForStatement(
PreParserStatement loop, PreParserStatement init,
PreParserExpression cond, PreParserStatement next,
- PreParserStatement body, Scope* inner_scope, const ForInfo& for_info,
- bool* ok) {
+ PreParserStatement body, const SourceRange& body_range,
+ Scope* inner_scope, const ForInfo& for_info, bool* ok) {
// See Parser::DesugarLexicalBindingsInForStatement.
if (track_unresolved_variables_) {
for (auto name : for_info.bound_names) {
@@ -1621,18 +1658,35 @@ class PreParser : public ParserBase<PreParser> {
V8_INLINE void DeclareFormalParameters(
DeclarationScope* scope,
- const ThreadedList<PreParserFormalParameters::Parameter>& parameters) {
- bool is_simple = classifier()->is_simple_parameter_list();
+ const ThreadedList<PreParserFormalParameters::Parameter>& parameters,
+ bool is_simple) {
if (!is_simple) scope->SetHasNonSimpleParameters();
if (track_unresolved_variables_) {
DCHECK(FLAG_lazy_inner_functions);
for (auto parameter : parameters) {
+ DCHECK_IMPLIES(is_simple, parameter->variables_ != nullptr);
+ DCHECK_IMPLIES(is_simple, parameter->variables_->length() == 1);
+ // Make sure each parameter is added only once even if it's a
+ // destructuring parameter which contains multiple names.
+ bool add_parameter = true;
if (parameter->variables_ != nullptr) {
for (auto variable : (*parameter->variables_)) {
- scope->DeclareParameterName(
- variable->raw_name(), parameter->is_rest, ast_value_factory());
+ // We declare the parameter name for all names, but only create a
+ // parameter entry for the first one.
+ scope->DeclareParameterName(variable->raw_name(),
+ parameter->is_rest, ast_value_factory(),
+ true, add_parameter);
+ add_parameter = false;
}
}
+ if (add_parameter) {
+ // No names were declared; declare a dummy one here to up the
+ // parameter count.
+ DCHECK(!is_simple);
+ scope->DeclareParameterName(ast_value_factory()->empty_string(),
+ parameter->is_rest, ast_value_factory(),
+ false, add_parameter);
+ }
}
}
}
@@ -1666,11 +1720,9 @@ class PreParser : public ParserBase<PreParser> {
return PreParserExpression::Default(args.variables_);
}
- V8_INLINE void AddAccessorPrefixToFunctionName(bool is_get,
- PreParserExpression function,
- PreParserIdentifier name) {}
- V8_INLINE void SetFunctionNameFromPropertyName(PreParserExpression property,
- PreParserIdentifier name) {}
+ V8_INLINE void SetFunctionNameFromPropertyName(
+ PreParserExpression property, PreParserIdentifier name,
+ const AstRawString* prefix = nullptr) {}
V8_INLINE void SetFunctionNameFromIdentifierRef(
PreParserExpression value, PreParserExpression identifier) {}
@@ -1689,12 +1741,38 @@ class PreParser : public ParserBase<PreParser> {
V8_INLINE bool ParsingDynamicFunctionDeclaration() const { return false; }
+ V8_INLINE void RecordBlockSourceRange(PreParserStatement node,
+ int32_t continuation_position) {}
+ V8_INLINE void RecordCaseClauseSourceRange(PreParserStatement node,
+ const SourceRange& body_range) {}
+ V8_INLINE void RecordConditionalSourceRange(PreParserExpression node,
+ const SourceRange& then_range,
+ const SourceRange& else_range) {}
+ V8_INLINE void RecordIfStatementSourceRange(PreParserStatement node,
+ const SourceRange& then_range,
+ const SourceRange& else_range) {}
+ V8_INLINE void RecordJumpStatementSourceRange(PreParserStatement node,
+ int32_t continuation_position) {
+ }
+ V8_INLINE void RecordIterationStatementSourceRange(
+ PreParserStatement node, const SourceRange& body_range) {}
+ V8_INLINE void RecordSwitchStatementSourceRange(
+ PreParserStatement node, int32_t continuation_position) {}
+ V8_INLINE void RecordThrowSourceRange(PreParserStatement node,
+ int32_t continuation_position) {}
+ V8_INLINE void RecordTryCatchStatementSourceRange(
+ PreParserStatement node, const SourceRange& body_range) {}
+ V8_INLINE void RecordTryFinallyStatementSourceRange(
+ PreParserStatement node, const SourceRange& body_range) {}
+
// Preparser's private field members.
int* use_counts_;
bool track_unresolved_variables_;
PreParserLogger log_;
PendingCompilationErrorHandler* pending_error_handler_;
+
+ ProducedPreParsedScopeData* produced_preparsed_scope_data_;
};
PreParserExpression PreParser::SpreadCall(PreParserExpression function,
diff --git a/deps/v8/src/parsing/rewriter.cc b/deps/v8/src/parsing/rewriter.cc
index 00eb29550a..cc2b528d4d 100644
--- a/deps/v8/src/parsing/rewriter.cc
+++ b/deps/v8/src/parsing/rewriter.cc
@@ -386,14 +386,6 @@ bool Rewriter::Rewrite(ParseInfo* info, Isolate* isolate) {
int pos = kNoSourcePosition;
Expression* result_value =
processor.factory()->NewVariableProxy(result, pos);
- if (scope->is_module_scope()) {
- auto args = new (info->zone()) ZoneList<Expression*>(2, info->zone());
- args->Add(result_value, info->zone());
- args->Add(processor.factory()->NewBooleanLiteral(true, pos),
- info->zone());
- result_value = processor.factory()->NewCallRuntime(
- Runtime::kInlineCreateIterResultObject, args, pos);
- }
Statement* result_statement =
processor.factory()->NewReturnStatement(result_value, pos);
body->Add(result_statement, info->zone());
diff --git a/deps/v8/src/parsing/scanner-character-streams.cc b/deps/v8/src/parsing/scanner-character-streams.cc
index e22308e8d5..1949814ea9 100644
--- a/deps/v8/src/parsing/scanner-character-streams.cc
+++ b/deps/v8/src/parsing/scanner-character-streams.cc
@@ -598,6 +598,7 @@ bool TwoByteExternalStreamingStream::ReadBlock() {
// Out of data? Return 0.
if (chunks_[chunk_no].byte_length == 0) {
+ buffer_pos_ = position;
buffer_cursor_ = buffer_start_;
buffer_end_ = buffer_start_;
return false;
@@ -700,6 +701,7 @@ bool TwoByteExternalBufferedStream::ReadBlock() {
// Out of data? Return 0.
if (chunks_[chunk_no].byte_length == 0) {
+ buffer_pos_ = position;
buffer_cursor_ = buffer_start_;
buffer_end_ = buffer_start_;
return false;
diff --git a/deps/v8/src/parsing/scanner.cc b/deps/v8/src/parsing/scanner.cc
index 8dfb74c06a..a4d9d76da6 100644
--- a/deps/v8/src/parsing/scanner.cc
+++ b/deps/v8/src/parsing/scanner.cc
@@ -429,18 +429,6 @@ Token::Value Scanner::PeekAhead() {
}
-// TODO(yangguo): check whether this is actually necessary.
-static inline bool IsLittleEndianByteOrderMark(uc32 c) {
- // The Unicode value U+FFFE is guaranteed never to be assigned as a
- // Unicode character; this implies that in a Unicode context the
- // 0xFF, 0xFE byte pattern can only be interpreted as the U+FEFF
- // character expressed in little-endian byte order (since it could
- // not be a U+FFFE character expressed in big-endian byte
- // order). Nevertheless, we check for it to be compatible with
- // Spidermonkey.
- return c == 0xFFFE;
-}
-
Token::Value Scanner::SkipWhiteSpace() {
int start_position = source_pos();
@@ -453,8 +441,7 @@ Token::Value Scanner::SkipWhiteSpace() {
// Remember if the latter is the case.
if (unicode_cache_->IsLineTerminator(c0_)) {
has_line_terminator_before_next_ = true;
- } else if (!unicode_cache_->IsWhiteSpace(c0_) &&
- !IsLittleEndianByteOrderMark(c0_)) {
+ } else if (!unicode_cache_->IsWhiteSpace(c0_)) {
break;
}
Advance();
@@ -983,10 +970,8 @@ bool Scanner::ScanEscape() {
// Skip escaped newlines.
if (!in_template_literal && c0_ != kEndOfInput &&
unicode_cache_->IsLineTerminator(c)) {
- // Allow CR+LF newlines in multiline string literals.
+ // Allow escaped CR+LF newlines in multiline string literals.
if (IsCarriageReturn(c) && IsLineFeed(c0_)) Advance<capture_raw>();
- // Allow LF+CR newlines in multiline string literals.
- if (IsLineFeed(c) && IsCarriageReturn(c0_)) Advance<capture_raw>();
return true;
}
@@ -1047,7 +1032,7 @@ uc32 Scanner::ScanOctalEscape(uc32 c, int length) {
// can be reported later (in strict mode).
// We don't report the error immediately, because the octal escape can
// occur before the "use strict" directive.
- if (c != '0' || i > 0) {
+ if (c != '0' || i > 0 || c0_ == '8' || c0_ == '9') {
octal_pos_ = Location(source_pos() - i - 1, source_pos() - 1);
octal_message_ = MessageTemplate::kStrictOctalEscape;
}
@@ -1193,13 +1178,6 @@ Token::Value Scanner::ScanTemplateStart() {
return ScanTemplateSpan();
}
-
-Token::Value Scanner::ScanTemplateContinuation() {
- DCHECK_EQ(next_.token, Token::RBRACE);
- next_.location.beg_pos = source_pos() - 1; // We already consumed }
- return ScanTemplateSpan();
-}
-
Handle<String> Scanner::SourceUrl(Isolate* isolate) const {
Handle<String> tmp;
if (source_url_.length() > 0) tmp = source_url_.Internalize(isolate);
diff --git a/deps/v8/src/parsing/scanner.h b/deps/v8/src/parsing/scanner.h
index 03b3f316c2..cb6ad47d40 100644
--- a/deps/v8/src/parsing/scanner.h
+++ b/deps/v8/src/parsing/scanner.h
@@ -43,7 +43,7 @@ class Utf16CharacterStream {
inline uc32 Advance() {
if (V8_LIKELY(buffer_cursor_ < buffer_end_)) {
return static_cast<uc32>(*(buffer_cursor_++));
- } else if (ReadBlock()) {
+ } else if (ReadBlockChecked()) {
return static_cast<uc32>(*(buffer_cursor_++));
} else {
// Note: currently the following increment is necessary to avoid a
@@ -102,6 +102,21 @@ class Utf16CharacterStream {
buffer_pos_(buffer_pos) {}
Utf16CharacterStream() : Utf16CharacterStream(nullptr, nullptr, nullptr, 0) {}
+ bool ReadBlockChecked() {
+ size_t position = pos();
+ USE(position);
+ bool success = ReadBlock();
+
+ // Post-conditions: 1, We should always be at the right position.
+ // 2, Cursor should be inside the buffer.
+ // 3, We should have more characters available iff success.
+ DCHECK_EQ(pos(), position);
+ DCHECK_LE(buffer_cursor_, buffer_end_);
+ DCHECK_LE(buffer_start_, buffer_cursor_);
+ DCHECK_EQ(success, buffer_cursor_ < buffer_end_);
+ return success;
+ }
+
void ReadBlockAt(size_t new_pos) {
// The callers of this method (Back/Back2/Seek) should handle the easy
// case (seeking within the current buffer), and we should only get here
@@ -113,14 +128,8 @@ class Utf16CharacterStream {
// Change pos() to point to new_pos.
buffer_pos_ = new_pos;
buffer_cursor_ = buffer_start_;
- bool success = ReadBlock();
- USE(success);
-
- // Post-conditions: 1, on success, we should be at the right position.
- // 2, success == we should have more characters available.
- DCHECK_IMPLIES(success, pos() == new_pos);
- DCHECK_EQ(success, buffer_cursor_ < buffer_end_);
- DCHECK_EQ(success, buffer_start_ < buffer_end_);
+ DCHECK_EQ(pos(), new_pos);
+ ReadBlockChecked();
}
// Read more data, and update buffer_*_ to point to it.
@@ -333,7 +342,11 @@ class Scanner {
// Scans the input as a template literal
Token::Value ScanTemplateStart();
- Token::Value ScanTemplateContinuation();
+ Token::Value ScanTemplateContinuation() {
+ DCHECK_EQ(next_.token, Token::RBRACE);
+ next_.location.beg_pos = source_pos() - 1; // We already consumed }
+ return ScanTemplateSpan();
+ }
Handle<String> SourceUrl(Isolate* isolate) const;
Handle<String> SourceMappingUrl(Isolate* isolate) const;
diff --git a/deps/v8/src/parsing/token.h b/deps/v8/src/parsing/token.h
index 46ae35b180..c3405b2239 100644
--- a/deps/v8/src/parsing/token.h
+++ b/deps/v8/src/parsing/token.h
@@ -83,7 +83,6 @@ namespace internal {
T(SHL, "<<", 11) \
T(SAR, ">>", 11) \
T(SHR, ">>>", 11) \
- T(ROR, "rotate right", 11) /* only used by Crankshaft */ \
T(ADD, "+", 12) \
T(SUB, "-", 12) \
T(MUL, "*", 13) \
@@ -239,7 +238,6 @@ class Token {
return false;
}
UNREACHABLE();
- return false;
}
static bool IsAssignmentOp(Value tok) {
@@ -249,7 +247,7 @@ class Token {
static bool IsBinaryOp(Value op) { return COMMA <= op && op <= EXP; }
static bool IsTruncatingBinaryOp(Value op) {
- return BIT_OR <= op && op <= ROR;
+ return BIT_OR <= op && op <= SHR;
}
static bool IsCompareOp(Value op) {
@@ -286,7 +284,6 @@ class Token {
case GTE: return LT;
default:
UNREACHABLE();
- return op;
}
}
@@ -303,7 +300,6 @@ class Token {
case GTE: return LTE;
default:
UNREACHABLE();
- return op;
}
}
@@ -319,7 +315,6 @@ class Token {
case Token::GTE: return (op1 >= op2);
default:
UNREACHABLE();
- return false;
}
}
diff --git a/deps/v8/src/ppc/assembler-ppc-inl.h b/deps/v8/src/ppc/assembler-ppc-inl.h
index 30a068a466..c187c2517d 100644
--- a/deps/v8/src/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/ppc/assembler-ppc-inl.h
@@ -125,7 +125,6 @@ Address RelocInfo::constant_pool_entry_address() {
pc_, constant_pool, access, ConstantPoolEntry::INTPTR);
}
UNREACHABLE();
- return NULL;
}
@@ -368,19 +367,19 @@ void RelocInfo::Visit(Heap* heap) {
Operand::Operand(intptr_t immediate, RelocInfo::Mode rmode) {
rm_ = no_reg;
- imm_ = immediate;
+ value_.immediate = immediate;
rmode_ = rmode;
}
Operand::Operand(const ExternalReference& f) {
rm_ = no_reg;
- imm_ = reinterpret_cast<intptr_t>(f.address());
+ value_.immediate = reinterpret_cast<intptr_t>(f.address());
rmode_ = RelocInfo::EXTERNAL_REFERENCE;
}
Operand::Operand(Smi* value) {
rm_ = no_reg;
- imm_ = reinterpret_cast<intptr_t>(value);
+ value_.immediate = reinterpret_cast<intptr_t>(value);
rmode_ = kRelocInfo_NONEPTR;
}
@@ -466,7 +465,6 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
}
UNREACHABLE();
- return NULL;
}
diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc
index d3c57a479b..3a58578524 100644
--- a/deps/v8/src/ppc/assembler-ppc.cc
+++ b/deps/v8/src/ppc/assembler-ppc.cc
@@ -40,6 +40,7 @@
#include "src/base/bits.h"
#include "src/base/cpu.h"
+#include "src/code-stubs.h"
#include "src/macro-assembler.h"
#include "src/ppc/assembler-ppc-inl.h"
@@ -154,7 +155,7 @@ bool RelocInfo::IsCodedSpecially() {
bool RelocInfo::IsInConstantPool() {
- if (FLAG_enable_embedded_constant_pool) {
+ if (FLAG_enable_embedded_constant_pool && host_ != NULL) {
Address constant_pool = host_->constant_pool();
return (constant_pool && Assembler::IsConstantPoolLoadStart(pc_));
}
@@ -198,21 +199,27 @@ void RelocInfo::unchecked_update_wasm_size(Isolate* isolate, uint32_t size,
// Implementation of Operand and MemOperand
// See assembler-ppc-inl.h for inlined constructors
-Operand::Operand(Handle<Object> handle) {
- AllowDeferredHandleDereference using_raw_address;
+Operand::Operand(Handle<HeapObject> handle) {
rm_ = no_reg;
- // Verify all Objects referred by code are NOT in new space.
- Object* obj = *handle;
- if (obj->IsHeapObject()) {
- imm_ = reinterpret_cast<intptr_t>(handle.location());
- rmode_ = RelocInfo::EMBEDDED_OBJECT;
- } else {
- // no relocation needed
- imm_ = reinterpret_cast<intptr_t>(obj);
- rmode_ = kRelocInfo_NONEPTR;
- }
+ value_.immediate = reinterpret_cast<intptr_t>(handle.address());
+ rmode_ = RelocInfo::EMBEDDED_OBJECT;
+}
+
+Operand Operand::EmbeddedNumber(double value) {
+ int32_t smi;
+ if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
+ Operand result(0, RelocInfo::EMBEDDED_OBJECT);
+ result.is_heap_object_request_ = true;
+ result.value_.heap_object_request = HeapObjectRequest(value);
+ return result;
}
+Operand Operand::EmbeddedCode(CodeStub* stub) {
+ Operand result(0, RelocInfo::CODE_TARGET);
+ result.is_heap_object_request_ = true;
+ result.value_.heap_object_request = HeapObjectRequest(stub);
+ return result;
+}
MemOperand::MemOperand(Register rn, int32_t offset) {
ra_ = rn;
@@ -227,13 +234,32 @@ MemOperand::MemOperand(Register ra, Register rb) {
offset_ = 0;
}
+void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
+ for (auto& request : heap_object_requests_) {
+ Handle<HeapObject> object;
+ switch (request.kind()) {
+ case HeapObjectRequest::kHeapNumber:
+ object = isolate->factory()->NewHeapNumber(request.heap_number(),
+ IMMUTABLE, TENURED);
+ break;
+ case HeapObjectRequest::kCodeStub:
+ request.code_stub()->set_isolate(isolate);
+ object = request.code_stub()->GetCode();
+ break;
+ }
+ Address pc = buffer_ + request.offset();
+ Address constant_pool = NULL;
+ set_target_address_at(nullptr, pc, constant_pool,
+ reinterpret_cast<Address>(object.location()),
+ SKIP_ICACHE_FLUSH);
+ }
+}
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
: AssemblerBase(isolate_data, buffer, buffer_size),
- recorded_ast_id_(TypeFeedbackId::None()),
constant_pool_builder_(kLoadPtrMaxReachBits, kLoadDoubleMaxReachBits) {
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
@@ -246,16 +272,15 @@ Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
optimizable_cmpi_pos_ = -1;
trampoline_emitted_ = FLAG_force_long_branches;
tracked_branch_count_ = 0;
- ClearRecordedAstId();
relocations_.reserve(128);
}
-
-void Assembler::GetCode(CodeDesc* desc) {
+void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
// Emit constant pool if necessary.
int constant_pool_offset = EmitConstantPool();
EmitRelocations();
+ AllocateAndInstallRequestedHeapObjects(isolate);
// Set up code descriptor.
desc->buffer = buffer_;
@@ -271,7 +296,7 @@ void Assembler::GetCode(CodeDesc* desc) {
void Assembler::Align(int m) {
- DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
+ DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
DCHECK((pc_offset() & (kInstrSize - 1)) == 0);
while ((pc_offset() & (m - 1)) != 0) {
nop();
@@ -743,12 +768,12 @@ void Assembler::b(int branch_offset, LKBit lk) {
void Assembler::xori(Register dst, Register src, const Operand& imm) {
- d_form(XORI, src, dst, imm.imm_, false);
+ d_form(XORI, src, dst, imm.immediate(), false);
}
void Assembler::xoris(Register ra, Register rs, const Operand& imm) {
- d_form(XORIS, rs, ra, imm.imm_, false);
+ d_form(XORIS, rs, ra, imm.immediate(), false);
}
@@ -782,28 +807,28 @@ void Assembler::rlwimi(Register ra, Register rs, int sh, int mb, int me,
void Assembler::slwi(Register dst, Register src, const Operand& val, RCBit rc) {
- DCHECK((32 > val.imm_) && (val.imm_ >= 0));
- rlwinm(dst, src, val.imm_, 0, 31 - val.imm_, rc);
+ DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
+ rlwinm(dst, src, val.immediate(), 0, 31 - val.immediate(), rc);
}
void Assembler::srwi(Register dst, Register src, const Operand& val, RCBit rc) {
- DCHECK((32 > val.imm_) && (val.imm_ >= 0));
- rlwinm(dst, src, 32 - val.imm_, val.imm_, 31, rc);
+ DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
+ rlwinm(dst, src, 32 - val.immediate(), val.immediate(), 31, rc);
}
void Assembler::clrrwi(Register dst, Register src, const Operand& val,
RCBit rc) {
- DCHECK((32 > val.imm_) && (val.imm_ >= 0));
- rlwinm(dst, src, 0, 0, 31 - val.imm_, rc);
+ DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
+ rlwinm(dst, src, 0, 0, 31 - val.immediate(), rc);
}
void Assembler::clrlwi(Register dst, Register src, const Operand& val,
RCBit rc) {
- DCHECK((32 > val.imm_) && (val.imm_ >= 0));
- rlwinm(dst, src, 0, val.imm_, 31, rc);
+ DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
+ rlwinm(dst, src, 0, val.immediate(), 31, rc);
}
@@ -823,7 +848,7 @@ void Assembler::rotrwi(Register ra, Register rs, int sh, RCBit r) {
void Assembler::subi(Register dst, Register src, const Operand& imm) {
- addi(dst, src, Operand(-(imm.imm_)));
+ addi(dst, src, Operand(-(imm.immediate())));
}
void Assembler::addc(Register dst, Register src1, Register src2, OEBit o,
@@ -858,7 +883,7 @@ void Assembler::sube(Register dst, Register src1, Register src2, OEBit o,
}
void Assembler::subfic(Register dst, Register src, const Operand& imm) {
- d_form(SUBFIC, dst, src, imm.imm_, true);
+ d_form(SUBFIC, dst, src, imm.immediate(), true);
}
@@ -903,43 +928,43 @@ void Assembler::divwu(Register dst, Register src1, Register src2, OEBit o,
void Assembler::addi(Register dst, Register src, const Operand& imm) {
DCHECK(!src.is(r0)); // use li instead to show intent
- d_form(ADDI, dst, src, imm.imm_, true);
+ d_form(ADDI, dst, src, imm.immediate(), true);
}
void Assembler::addis(Register dst, Register src, const Operand& imm) {
DCHECK(!src.is(r0)); // use lis instead to show intent
- d_form(ADDIS, dst, src, imm.imm_, true);
+ d_form(ADDIS, dst, src, imm.immediate(), true);
}
void Assembler::addic(Register dst, Register src, const Operand& imm) {
- d_form(ADDIC, dst, src, imm.imm_, true);
+ d_form(ADDIC, dst, src, imm.immediate(), true);
}
void Assembler::andi(Register ra, Register rs, const Operand& imm) {
- d_form(ANDIx, rs, ra, imm.imm_, false);
+ d_form(ANDIx, rs, ra, imm.immediate(), false);
}
void Assembler::andis(Register ra, Register rs, const Operand& imm) {
- d_form(ANDISx, rs, ra, imm.imm_, false);
+ d_form(ANDISx, rs, ra, imm.immediate(), false);
}
void Assembler::ori(Register ra, Register rs, const Operand& imm) {
- d_form(ORI, rs, ra, imm.imm_, false);
+ d_form(ORI, rs, ra, imm.immediate(), false);
}
void Assembler::oris(Register dst, Register src, const Operand& imm) {
- d_form(ORIS, src, dst, imm.imm_, false);
+ d_form(ORIS, src, dst, imm.immediate(), false);
}
void Assembler::cmpi(Register src1, const Operand& src2, CRegister cr) {
- intptr_t imm16 = src2.imm_;
+ intptr_t imm16 = src2.immediate();
#if V8_TARGET_ARCH_PPC64
int L = 1;
#else
@@ -953,7 +978,7 @@ void Assembler::cmpi(Register src1, const Operand& src2, CRegister cr) {
void Assembler::cmpli(Register src1, const Operand& src2, CRegister cr) {
- uintptr_t uimm16 = src2.imm_;
+ uintptr_t uimm16 = src2.immediate();
#if V8_TARGET_ARCH_PPC64
int L = 1;
#else
@@ -967,7 +992,7 @@ void Assembler::cmpli(Register src1, const Operand& src2, CRegister cr) {
void Assembler::cmpwi(Register src1, const Operand& src2, CRegister cr) {
- intptr_t imm16 = src2.imm_;
+ intptr_t imm16 = src2.immediate();
int L = 0;
int pos = pc_offset();
DCHECK(is_int16(imm16));
@@ -985,7 +1010,7 @@ void Assembler::cmpwi(Register src1, const Operand& src2, CRegister cr) {
void Assembler::cmplwi(Register src1, const Operand& src2, CRegister cr) {
- uintptr_t uimm16 = src2.imm_;
+ uintptr_t uimm16 = src2.immediate();
int L = 0;
DCHECK(is_uint16(uimm16));
DCHECK(cr.code() >= 0 && cr.code() <= 7);
@@ -1002,12 +1027,12 @@ void Assembler::isel(Register rt, Register ra, Register rb, int cb) {
// Pseudo op - load immediate
void Assembler::li(Register dst, const Operand& imm) {
- d_form(ADDI, dst, r0, imm.imm_, true);
+ d_form(ADDI, dst, r0, imm.immediate(), true);
}
void Assembler::lis(Register dst, const Operand& imm) {
- d_form(ADDIS, dst, r0, imm.imm_, true);
+ d_form(ADDIS, dst, r0, imm.immediate(), true);
}
@@ -1148,28 +1173,28 @@ void Assembler::rldicr(Register ra, Register rs, int sh, int me, RCBit r) {
void Assembler::sldi(Register dst, Register src, const Operand& val, RCBit rc) {
- DCHECK((64 > val.imm_) && (val.imm_ >= 0));
- rldicr(dst, src, val.imm_, 63 - val.imm_, rc);
+ DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
+ rldicr(dst, src, val.immediate(), 63 - val.immediate(), rc);
}
void Assembler::srdi(Register dst, Register src, const Operand& val, RCBit rc) {
- DCHECK((64 > val.imm_) && (val.imm_ >= 0));
- rldicl(dst, src, 64 - val.imm_, val.imm_, rc);
+ DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
+ rldicl(dst, src, 64 - val.immediate(), val.immediate(), rc);
}
void Assembler::clrrdi(Register dst, Register src, const Operand& val,
RCBit rc) {
- DCHECK((64 > val.imm_) && (val.imm_ >= 0));
- rldicr(dst, src, 0, 63 - val.imm_, rc);
+ DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
+ rldicr(dst, src, 0, 63 - val.immediate(), rc);
}
void Assembler::clrldi(Register dst, Register src, const Operand& val,
RCBit rc) {
- DCHECK((64 > val.imm_) && (val.imm_ >= 0));
- rldicl(dst, src, 0, val.imm_, rc);
+ DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
+ rldicl(dst, src, 0, val.immediate(), rc);
}
@@ -1258,7 +1283,6 @@ bool Assembler::use_constant_pool_for_mov(Register dst, const Operand& src,
// immediate sequence.
return false;
}
-
intptr_t value = src.immediate();
#if V8_TARGET_ARCH_PPC64
bool allowOverflow = !((canOptimize && is_int32(value)) || dst.is(r0));
@@ -1304,14 +1328,21 @@ bool Operand::must_output_reloc_info(const Assembler* assembler) const {
// Todo - break this dependency so we can optimize mov() in general
// and only use the generic version when we require a fixed sequence
void Assembler::mov(Register dst, const Operand& src) {
- intptr_t value = src.immediate();
+ intptr_t value;
+ if (src.IsHeapObjectRequest()) {
+ RequestHeapObject(src.heap_object_request());
+ value = 0;
+ } else {
+ value = src.immediate();
+ }
bool relocatable = src.must_output_reloc_info(this);
bool canOptimize;
canOptimize =
!(relocatable || (is_trampoline_pool_blocked() && !is_int16(value)));
- if (use_constant_pool_for_mov(dst, src, canOptimize)) {
+ if (!src.IsHeapObjectRequest() &&
+ use_constant_pool_for_mov(dst, src, canOptimize)) {
DCHECK(is_constant_pool_available());
if (relocatable) {
RecordRelocInfo(src.rmode_);
@@ -1987,9 +2018,7 @@ void Assembler::GrowBuffer(int needed) {
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
- if (desc.buffer_size > kMaximalBufferSize ||
- static_cast<size_t>(desc.buffer_size) >
- isolate_data().max_old_generation_size_) {
+ if (desc.buffer_size > kMaximalBufferSize) {
V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
}
@@ -2057,10 +2086,6 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
!emit_debug_code())) {
return;
}
- if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- data = RecordedAstId().ToInt();
- ClearRecordedAstId();
- }
DeferredRelocInfo rinfo(pc_offset(), rmode, data);
relocations_.push_back(rinfo);
}
diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h
index 5eebdbbd17..38e6f2bb46 100644
--- a/deps/v8/src/ppc/assembler-ppc.h
+++ b/deps/v8/src/ppc/assembler-ppc.h
@@ -44,6 +44,7 @@
#include <vector>
#include "src/assembler.h"
+#include "src/double.h"
#include "src/ppc/constants-ppc.h"
#if V8_HOST_ARCH_PPC && \
@@ -303,27 +304,51 @@ class Operand BASE_EMBEDDED {
RelocInfo::Mode rmode = kRelocInfo_NONEPTR));
INLINE(static Operand Zero()) { return Operand(static_cast<intptr_t>(0)); }
INLINE(explicit Operand(const ExternalReference& f));
- explicit Operand(Handle<Object> handle);
+ explicit Operand(Handle<HeapObject> handle);
INLINE(explicit Operand(Smi* value));
// rm
INLINE(explicit Operand(Register rm));
+ static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
+ static Operand EmbeddedCode(CodeStub* stub);
+
// Return true if this is a register operand.
INLINE(bool is_reg() const);
bool must_output_reloc_info(const Assembler* assembler) const;
inline intptr_t immediate() const {
- DCHECK(!rm_.is_valid());
- return imm_;
+ DCHECK(IsImmediate());
+ DCHECK(!IsHeapObjectRequest());
+ return value_.immediate;
+ }
+ bool IsImmediate() const { return !rm_.is_valid(); }
+
+ HeapObjectRequest heap_object_request() const {
+ DCHECK(IsHeapObjectRequest());
+ return value_.heap_object_request;
}
Register rm() const { return rm_; }
+ bool IsHeapObjectRequest() const {
+ DCHECK_IMPLIES(is_heap_object_request_, IsImmediate());
+ DCHECK_IMPLIES(is_heap_object_request_,
+ rmode_ == RelocInfo::EMBEDDED_OBJECT ||
+ rmode_ == RelocInfo::CODE_TARGET);
+ return is_heap_object_request_;
+ }
+
private:
Register rm_;
- intptr_t imm_; // valid if rm_ == no_reg
+ union Value {
+ Value() {}
+ HeapObjectRequest heap_object_request; // if is_heap_object_request_
+ intptr_t immediate; // otherwise
+ } value_; // valid if rm_ == no_reg
+ bool is_heap_object_request_ = false;
+
RelocInfo::Mode rmode_;
friend class Assembler;
@@ -405,7 +430,7 @@ class Assembler : public AssemblerBase {
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
- void GetCode(CodeDesc* desc);
+ void GetCode(Isolate* isolate, CodeDesc* desc);
// Label operations & relative jumps (PPUM Appendix D)
//
@@ -1263,22 +1288,6 @@ class Assembler : public AssemblerBase {
// Mark address of a debug break slot.
void RecordDebugBreakSlot(RelocInfo::Mode mode);
- // Record the AST id of the CallIC being compiled, so that it can be placed
- // in the relocation information.
- void SetRecordedAstId(TypeFeedbackId ast_id) {
- // Causes compiler to fail
- // DCHECK(recorded_ast_id_.IsNone());
- recorded_ast_id_ = ast_id;
- }
-
- TypeFeedbackId RecordedAstId() {
- // Causes compiler to fail
- // DCHECK(!recorded_ast_id_.IsNone());
- return recorded_ast_id_;
- }
-
- void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); }
-
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
void RecordComment(const char* msg);
@@ -1372,11 +1381,6 @@ class Assembler : public AssemblerBase {
void EmitRelocations();
protected:
- // Relocation for a type-recording IC has the AST id added to it. This
- // member variable is a way to pass the information from the call site to
- // the relocation info.
- TypeFeedbackId recorded_ast_id_;
-
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Decode instruction(s) at pos and return backchain to previous
@@ -1395,7 +1399,7 @@ class Assembler : public AssemblerBase {
is_constant_pool_entry_sharing_blocked());
return constant_pool_builder_.AddEntry(pc_offset(), value, sharing_ok);
}
- ConstantPoolEntry::Access ConstantPoolAddEntry(double value) {
+ ConstantPoolEntry::Access ConstantPoolAddEntry(Double value) {
return constant_pool_builder_.AddEntry(pc_offset(), value);
}
@@ -1544,6 +1548,19 @@ class Assembler : public AssemblerBase {
friend class CodePatcher;
friend class BlockTrampolinePoolScope;
friend class EnsureSpace;
+
+ // The following functions help with avoiding allocations of embedded heap
+ // objects during the code assembly phase. {RequestHeapObject} records the
+ // need for a future heap number allocation or code stub generation. After
+ // code assembly, {AllocateAndInstallRequestedHeapObjects} will allocate these
+ // objects and place them where they are expected (determined by the pc offset
+ // associated with each request). That is, for each request, it will patch the
+ // dummy heap object handle that we emitted during code assembly with the
+ // actual heap object handle.
+ void RequestHeapObject(HeapObjectRequest request);
+ void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
+
+ std::forward_list<HeapObjectRequest> heap_object_requests_;
};
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
index beeb66b442..f259a393ae 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.cc
+++ b/deps/v8/src/ppc/code-stubs-ppc.cc
@@ -9,6 +9,7 @@
#include "src/base/bits.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
+#include "src/double.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
@@ -41,29 +42,6 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs,
Register rhs);
-void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
- ExternalReference miss) {
- // Update the static counter each time a new code stub is generated.
- isolate()->counters()->code_stubs()->Increment();
-
- CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
- int param_count = descriptor.GetRegisterParameterCount();
- {
- // Call the runtime system in a fresh internal frame.
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- DCHECK(param_count == 0 ||
- r3.is(descriptor.GetRegisterParameter(param_count - 1)));
- // Push arguments
- for (int i = 0; i < param_count; ++i) {
- __ push(descriptor.GetRegisterParameter(i));
- }
- __ CallExternalReference(miss, param_count);
- }
-
- __ Ret();
-}
-
-
void DoubleToIStub::Generate(MacroAssembler* masm) {
Label out_of_range, only_low, negate, done, fastpath_done;
Register input_reg = source();
@@ -836,14 +814,11 @@ bool CEntryStub::NeedsImmovableCode() { return true; }
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
- StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
CreateWeakCellStub::GenerateAheadOfTime(isolate);
- BinaryOpICStub::GenerateAheadOfTime(isolate);
StoreRegistersStateStub::GenerateAheadOfTime(isolate);
RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
- BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
}
@@ -871,6 +846,8 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
CEntryStub stub(isolate, 1, kDontSaveFPRegs);
stub.GetCode();
+ CEntryStub save_doubles(isolate, 1, kSaveFPRegs);
+ save_doubles.GetCode();
}
@@ -977,7 +954,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
if (FLAG_debug_code) {
Label okay;
ExternalReference pending_exception_address(
- Isolate::kPendingExceptionAddress, isolate());
+ IsolateAddressId::kPendingExceptionAddress, isolate());
__ mov(r6, Operand(pending_exception_address));
__ LoadP(r6, MemOperand(r6));
@@ -1007,15 +984,15 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ bind(&exception_returned);
ExternalReference pending_handler_context_address(
- Isolate::kPendingHandlerContextAddress, isolate());
+ IsolateAddressId::kPendingHandlerContextAddress, isolate());
ExternalReference pending_handler_code_address(
- Isolate::kPendingHandlerCodeAddress, isolate());
+ IsolateAddressId::kPendingHandlerCodeAddress, isolate());
ExternalReference pending_handler_offset_address(
- Isolate::kPendingHandlerOffsetAddress, isolate());
+ IsolateAddressId::kPendingHandlerOffsetAddress, isolate());
ExternalReference pending_handler_fp_address(
- Isolate::kPendingHandlerFPAddress, isolate());
+ IsolateAddressId::kPendingHandlerFPAddress, isolate());
ExternalReference pending_handler_sp_address(
- Isolate::kPendingHandlerSPAddress, isolate());
+ IsolateAddressId::kPendingHandlerSPAddress, isolate());
// Ask the runtime for help to determine the handler. This will set r3 to
// contain the current pending exception, don't clobber it.
@@ -1086,7 +1063,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Save callee-saved double registers.
__ MultiPushDoubles(kCalleeSavedDoubles);
// Set up the reserved register for 0.0.
- __ LoadDoubleLiteral(kDoubleRegZero, 0.0, r0);
+ __ LoadDoubleLiteral(kDoubleRegZero, Double(0.0), r0);
// Push a frame with special values setup to mark it as an entry frame.
// r3: code entry
@@ -1105,7 +1082,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ push(r0);
__ push(r0);
// Save copies of the top frame descriptor on the stack.
- __ mov(r8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ __ mov(r8, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
+ isolate())));
__ LoadP(r0, MemOperand(r8));
__ push(r0);
@@ -1114,7 +1092,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// If this is the outermost JS call, set js_entry_sp value.
Label non_outermost_js;
- ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
+ ExternalReference js_entry_sp(IsolateAddressId::kJSEntrySPAddress, isolate());
__ mov(r8, Operand(ExternalReference(js_entry_sp)));
__ LoadP(r9, MemOperand(r8));
__ cmpi(r9, Operand::Zero());
@@ -1138,8 +1116,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// field in the JSEnv and return a failure sentinel. Coming in here the
// fp will be invalid because the PushStackHandler below sets it to 0 to
// signal the existence of the JSEntry frame.
- __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate())));
+ __ mov(ip, Operand(ExternalReference(
+ IsolateAddressId::kPendingExceptionAddress, isolate())));
__ StoreP(r3, MemOperand(ip));
__ LoadRoot(r3, Heap::kExceptionRootIndex);
@@ -1196,7 +1174,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Restore the top frame descriptors from the stack.
__ pop(r6);
- __ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ __ mov(ip, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
+ isolate())));
__ StoreP(r6, MemOperand(ip));
// Reset the stack to the callee saved registers.
@@ -1591,37 +1570,6 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
}
-void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r4 : left
- // -- r3 : right
- // -- lr : return address
- // -----------------------------------
-
- // Load r5 with the allocation site. We stick an undefined dummy value here
- // and replace it with the real allocation site later when we instantiate this
- // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
- __ Move(r5, isolate()->factory()->undefined_value());
-
- // Make sure that we actually patched the allocation site.
- if (FLAG_debug_code) {
- __ TestIfSmi(r5, r0);
- __ Assert(ne, kExpectedAllocationSite, cr0);
- __ push(r5);
- __ LoadP(r5, FieldMemOperand(r5, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kAllocationSiteMapRootIndex);
- __ cmp(r5, ip);
- __ pop(r5);
- __ Assert(eq, kExpectedAllocationSite);
- }
-
- // Tail call into the stub that handles binary operations with allocation
- // sites.
- BinaryOpWithAllocationSiteStub stub(isolate(), state());
- __ TailCallStub(&stub);
-}
-
-
void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
DCHECK_EQ(CompareICState::BOOLEAN, state());
Label miss;
@@ -2083,7 +2031,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(
// Restore the properties.
__ LoadP(properties,
- FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
}
const int spill_mask = (r0.bit() | r9.bit() | r8.bit() | r7.bit() | r6.bit() |
@@ -2092,7 +2040,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(
__ mflr(r0);
__ MultiPush(spill_mask);
- __ LoadP(r3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ LoadP(r3, FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
__ mov(r4, Operand(Handle<Name>(name)));
NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
__ CallStub(&stub);
@@ -2302,9 +2250,10 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode) {
- Label on_black;
Label need_incremental;
Label need_incremental_pop_scratch;
+#ifndef V8_CONCURRENT_MARKING
+ Label on_black;
// Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker.
@@ -2319,6 +2268,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
}
__ bind(&on_black);
+#endif
// Get the value from the slot.
__ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0));
@@ -2365,20 +2315,21 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// Fall through when we need to inform the incremental marker.
}
-
-void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
- CEntryStub ces(isolate(), 1, kSaveFPRegs);
- __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
- int parameter_count_offset =
- StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
- __ LoadP(r4, MemOperand(fp, parameter_count_offset));
- if (function_mode() == JS_FUNCTION_STUB_MODE) {
- __ addi(r4, r4, Operand(1));
+void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
+ Zone* zone) {
+ if (tasm->isolate()->function_entry_hook() != NULL) {
+ PredictableCodeSizeScope predictable(tasm,
+#if V8_TARGET_ARCH_PPC64
+ 14 * Assembler::kInstrSize);
+#else
+ 11 * Assembler::kInstrSize);
+#endif
+ tasm->mflr(r0);
+ tasm->Push(r0, ip);
+ tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
+ tasm->Pop(r0, ip);
+ tasm->mtlr(r0);
}
- masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
- __ slwi(r4, r4, Operand(kPointerSizeLog2));
- __ add(sp, sp, r4);
- __ Ret();
}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
@@ -2426,7 +2377,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
int frame_alignment = masm->ActivationFrameAlignment();
if (frame_alignment > kPointerSize) {
__ mr(r15, sp);
- DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
__ ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
}
@@ -2503,24 +2454,12 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// r3 - number of arguments
// r4 - constructor?
// sp[0] - last argument
- Label normal_sequence;
- if (mode == DONT_OVERRIDE) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
- STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
-
- // is the low bit set? If so, we are holey and that is good.
- __ andi(r0, r6, Operand(1));
- __ bne(&normal_sequence, cr0);
- }
-
- // look at the first argument
- __ LoadP(r8, MemOperand(sp, 0));
- __ cmpi(r8, Operand::Zero());
- __ beq(&normal_sequence);
+ STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(PACKED_ELEMENTS == 2);
+ STATIC_ASSERT(HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS == 4);
+ STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS == 5);
if (mode == DISABLE_ALLOCATION_SITES) {
ElementsKind initial = GetInitialFastElementsKind();
@@ -2529,12 +2468,12 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
ArraySingleArgumentConstructorStub stub_holey(
masm->isolate(), holey_initial, DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub_holey);
-
- __ bind(&normal_sequence);
- ArraySingleArgumentConstructorStub stub(masm->isolate(), initial,
- DISABLE_ALLOCATION_SITES);
- __ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
+ // is the low bit set? If so, we are holey and that is good.
+ Label normal_sequence;
+ __ andi(r0, r6, Operand(1));
+ __ bne(&normal_sequence, cr0);
+
// We are going to create a holey array, but our kind is non-holey.
// Fix kind and retry (only if we have an allocation site in the slot).
__ addi(r6, r6, Operand(1));
@@ -2549,10 +2488,13 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// in the AllocationSite::transition_info field because elements kind is
// restricted to a portion of the field...upper bits need to be left alone.
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ LoadP(r7, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset));
+ __ LoadP(r7, FieldMemOperand(
+ r5, AllocationSite::kTransitionInfoOrBoilerplateOffset));
__ AddSmiLiteral(r7, r7, Smi::FromInt(kFastElementsKindPackedToHoley), r0);
- __ StoreP(r7, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset),
- r0);
+ __ StoreP(
+ r7,
+ FieldMemOperand(r5, AllocationSite::kTransitionInfoOrBoilerplateOffset),
+ r0);
__ bind(&normal_sequence);
int last_index =
@@ -2581,7 +2523,7 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(isolate, kind);
stub.GetCode();
- if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ if (AllocationSite::ShouldTrack(kind)) {
T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
stub1.GetCode();
}
@@ -2594,7 +2536,7 @@ void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
isolate);
ArrayNArgumentsConstructorStub stub(isolate);
stub.GetCode();
- ElementsKind kinds[2] = {FAST_ELEMENTS, FAST_HOLEY_ELEMENTS};
+ ElementsKind kinds[2] = {PACKED_ELEMENTS, HOLEY_ELEMENTS};
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things
InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
@@ -2661,7 +2603,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ CompareRoot(r5, Heap::kUndefinedValueRootIndex);
__ beq(&no_info);
- __ LoadP(r6, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset));
+ __ LoadP(r6, FieldMemOperand(
+ r5, AllocationSite::kTransitionInfoOrBoilerplateOffset));
__ SmiUntag(r6);
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
__ And(r6, r6, Operand(AllocationSite::ElementsKindBits::kMask));
@@ -2735,20 +2678,20 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
if (FLAG_debug_code) {
Label done;
- __ cmpi(r6, Operand(FAST_ELEMENTS));
+ __ cmpi(r6, Operand(PACKED_ELEMENTS));
__ beq(&done);
- __ cmpi(r6, Operand(FAST_HOLEY_ELEMENTS));
+ __ cmpi(r6, Operand(HOLEY_ELEMENTS));
__ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
__ bind(&done);
}
Label fast_elements_case;
- __ cmpi(r6, Operand(FAST_ELEMENTS));
+ __ cmpi(r6, Operand(PACKED_ELEMENTS));
__ beq(&fast_elements_case);
- GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+ GenerateCase(masm, HOLEY_ELEMENTS);
__ bind(&fast_elements_case);
- GenerateCase(masm, FAST_ELEMENTS);
+ GenerateCase(masm, PACKED_ELEMENTS);
}
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
diff --git a/deps/v8/src/ppc/codegen-ppc.cc b/deps/v8/src/ppc/codegen-ppc.cc
index 1d4cdd0fcb..6c8ffe6898 100644
--- a/deps/v8/src/ppc/codegen-ppc.cc
+++ b/deps/v8/src/ppc/codegen-ppc.cc
@@ -39,7 +39,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
__ Ret();
CodeDesc desc;
- masm.GetCode(&desc);
+ masm.GetCode(isolate, &desc);
DCHECK(ABI_USES_FUNCTION_DESCRIPTORS ||
!RelocInfo::RequiresRelocation(isolate, desc));
diff --git a/deps/v8/src/ppc/constants-ppc.h b/deps/v8/src/ppc/constants-ppc.h
index e1929dbf63..bab4efe0ac 100644
--- a/deps/v8/src/ppc/constants-ppc.h
+++ b/deps/v8/src/ppc/constants-ppc.h
@@ -11,6 +11,15 @@
#include "src/base/macros.h"
#include "src/globals.h"
+// UNIMPLEMENTED_ macro for PPC.
+#ifdef DEBUG
+#define UNIMPLEMENTED_PPC() \
+ v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \
+ __FILE__, __LINE__, __func__)
+#else
+#define UNIMPLEMENTED_PPC()
+#endif
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/ppc/deoptimizer-ppc.cc b/deps/v8/src/ppc/deoptimizer-ppc.cc
index 142b398b43..f8cfe70c8f 100644
--- a/deps/v8/src/ppc/deoptimizer-ppc.cc
+++ b/deps/v8/src/ppc/deoptimizer-ppc.cc
@@ -85,24 +85,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
-void Deoptimizer::SetPlatformCompiledStubRegisters(
- FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
- ApiFunction function(descriptor->deoptimization_handler());
- ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
- intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
- int params = descriptor->GetHandlerParameterCount();
- output_frame->SetRegister(r3.code(), params);
- output_frame->SetRegister(r4.code(), handler);
-}
-
-
-void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
- for (int i = 0; i < DoubleRegister::kNumRegisters; ++i) {
- Float64 double_value = input_->GetDoubleRegister(i);
- output_frame->SetDoubleRegister(i, double_value);
- }
-}
-
#define __ masm()->
// This code tries to be close to ia32 code so that any changes can be
@@ -147,7 +129,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
}
- __ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ __ mov(ip, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
+ isolate())));
__ StoreP(fp, MemOperand(ip));
const int kSavedRegistersAreaSize =
diff --git a/deps/v8/src/ppc/disasm-ppc.cc b/deps/v8/src/ppc/disasm-ppc.cc
index 3651f4c7ef..6933e302a4 100644
--- a/deps/v8/src/ppc/disasm-ppc.cc
+++ b/deps/v8/src/ppc/disasm-ppc.cc
@@ -166,7 +166,6 @@ int Decoder::FormatRegister(Instruction* instr, const char* format) {
}
UNREACHABLE();
- return -1;
}
@@ -325,7 +324,6 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
}
UNREACHABLE();
- return -1;
}
diff --git a/deps/v8/src/ppc/frames-ppc.cc b/deps/v8/src/ppc/frames-ppc.cc
index e86ec681ec..228ef1998f 100644
--- a/deps/v8/src/ppc/frames-ppc.cc
+++ b/deps/v8/src/ppc/frames-ppc.cc
@@ -25,12 +25,6 @@ Register JavaScriptFrame::constant_pool_pointer_register() {
}
-Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
-Register StubFailureTrampolineFrame::context_register() { return cp; }
-Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
- DCHECK(FLAG_enable_embedded_constant_pool);
- return kConstantPoolRegister;
-}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
index cba9275d90..bb14f091b4 100644
--- a/deps/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc
@@ -47,6 +47,8 @@ const Register StoreTransitionDescriptor::MapRegister() { return r8; }
const Register StringCompareDescriptor::LeftRegister() { return r4; }
const Register StringCompareDescriptor::RightRegister() { return r3; }
+const Register StringConcatDescriptor::ArgumentsCountRegister() { return r3; }
+
const Register ApiGetterDescriptor::HolderRegister() { return r3; }
const Register ApiGetterDescriptor::CallbackRegister() { return r6; }
@@ -153,6 +155,16 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r3 : number of arguments (on the stack, not including receiver)
+ // r4 : the target to call
+ // r5 : arguments list (FixedArray)
+ // r7 : arguments list length (untagged)
+ Register registers[] = {r4, r3, r5, r7};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void CallForwardVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r3 : number of arguments
@@ -162,6 +174,34 @@ void CallForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallWithSpreadDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r3 : number of arguments (on the stack, not including receiver)
+ // r4 : the target to call
+ // r5 : the object to spread
+ Register registers[] = {r4, r3, r5};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r4 : the target to call
+ // r5 : the arguments list
+ Register registers[] = {r4, r5};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r3 : number of arguments (on the stack, not including receiver)
+ // r4 : the target to call
+ // r6 : the new target
+ // r5 : arguments list (FixedArray)
+ // r7 : arguments list length (untagged)
+ Register registers[] = {r4, r6, r3, r5, r7};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r3 : number of arguments
@@ -172,6 +212,25 @@ void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r3 : number of arguments (on the stack, not including receiver)
+ // r4 : the target to call
+ // r6 : the new target
+ // r5 : the object to spread
+ Register registers[] = {r4, r6, r3, r5};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r4 : the target to call
+ // r6 : the new target
+ // r5 : the arguments list
+ Register registers[] = {r4, r6, r5};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void ConstructStubDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r3 : number of arguments
@@ -369,8 +428,7 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
Register registers[] = {
r3, // the value to pass to the generator
r4, // the JSGeneratorObject to resume
- r5, // the resume mode (tagged)
- r6 // SuspendFlags (tagged)
+ r5 // the resume mode (tagged)
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index 8c5ea97eee..ed925001a0 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -12,6 +12,7 @@
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/debug/debug.h"
+#include "src/external-reference-table.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
@@ -22,30 +23,19 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
- : Assembler(isolate, buffer, size),
- generating_stub_(false),
- has_frame_(false),
- isolate_(isolate) {
- if (create_code_object == CodeObjectRequired::kYes) {
- code_object_ =
- Handle<Object>::New(isolate_->heap()->undefined_value(), isolate_);
- }
-}
+ : TurboAssembler(isolate, buffer, size, create_code_object) {}
-
-void MacroAssembler::Jump(Register target) {
+void TurboAssembler::Jump(Register target) {
mtctr(target);
bctr();
}
-
void MacroAssembler::JumpToJSEntry(Register target) {
Move(ip, target);
Jump(ip);
}
-
-void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
+void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
Condition cond, CRegister cr) {
Label skip;
@@ -60,27 +50,22 @@ void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
bind(&skip);
}
-
-void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
+void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
CRegister cr) {
DCHECK(!RelocInfo::IsCodeTarget(rmode));
Jump(reinterpret_cast<intptr_t>(target), rmode, cond, cr);
}
-
-void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
+void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
// 'code' is always generated ppc code, never THUMB code
- AllowDeferredHandleDereference embedding_raw_address;
- Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
+ Jump(reinterpret_cast<intptr_t>(code.address()), rmode, cond);
}
+int TurboAssembler::CallSize(Register target) { return 2 * kInstrSize; }
-int MacroAssembler::CallSize(Register target) { return 2 * kInstrSize; }
-
-
-void MacroAssembler::Call(Register target) {
+void TurboAssembler::Call(Register target) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Label start;
bind(&start);
@@ -92,28 +77,24 @@ void MacroAssembler::Call(Register target) {
DCHECK_EQ(CallSize(target), SizeOfCodeGeneratedSince(&start));
}
-
void MacroAssembler::CallJSEntry(Register target) {
DCHECK(target.is(ip));
Call(target);
}
-
-int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode,
+int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode,
Condition cond) {
Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
return (2 + instructions_required_for_mov(ip, mov_operand)) * kInstrSize;
}
-
int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
RelocInfo::Mode rmode,
Condition cond) {
return (2 + kMovInstructionsNoConstantPool) * kInstrSize;
}
-
-void MacroAssembler::Call(Address target, RelocInfo::Mode rmode,
+void TurboAssembler::Call(Address target, RelocInfo::Mode rmode,
Condition cond) {
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(cond == al);
@@ -127,7 +108,7 @@ void MacroAssembler::Call(Address target, RelocInfo::Mode rmode,
#endif
// This can likely be optimized to make use of bc() with 24bit relative
//
- // RecordRelocInfo(x.rmode_, x.imm_);
+ // RecordRelocInfo(x.rmode_, x.immediate);
// bc( BA, .... offset, LKset);
//
@@ -138,78 +119,78 @@ void MacroAssembler::Call(Address target, RelocInfo::Mode rmode,
DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
}
-
-int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
- TypeFeedbackId ast_id, Condition cond) {
- AllowDeferredHandleDereference using_raw_address;
- return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
+int TurboAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
+ Condition cond) {
+ return CallSize(code.address(), rmode, cond);
}
-
-void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
- TypeFeedbackId ast_id, Condition cond) {
+void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
+ Condition cond) {
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(RelocInfo::IsCodeTarget(rmode));
+ Label start;
+ bind(&start);
+
#ifdef DEBUG
// Check the expected size before generating code to ensure we assume the same
// constant pool availability (e.g., whether constant pool is full or not).
- int expected_size = CallSize(code, rmode, ast_id, cond);
- Label start;
- bind(&start);
+ int expected_size = CallSize(code, rmode, cond);
#endif
- if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
- SetRecordedAstId(ast_id);
- rmode = RelocInfo::CODE_TARGET_WITH_ID;
- }
- AllowDeferredHandleDereference using_raw_address;
- Call(reinterpret_cast<Address>(code.location()), rmode, cond);
+ Call(code.address(), rmode, cond);
DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
}
-
-void MacroAssembler::Drop(int count) {
+void TurboAssembler::Drop(int count) {
if (count > 0) {
Add(sp, sp, count * kPointerSize, r0);
}
}
-void MacroAssembler::Drop(Register count, Register scratch) {
+void TurboAssembler::Drop(Register count, Register scratch) {
ShiftLeftImm(scratch, count, Operand(kPointerSizeLog2));
add(sp, sp, scratch);
}
-void MacroAssembler::Call(Label* target) { b(target, SetLK); }
+void TurboAssembler::Call(Label* target) { b(target, SetLK); }
-
-void MacroAssembler::Push(Handle<Object> handle) {
+void TurboAssembler::Push(Handle<HeapObject> handle) {
mov(r0, Operand(handle));
push(r0);
}
+void TurboAssembler::Push(Smi* smi) {
+ mov(r0, Operand(smi));
+ push(r0);
+}
-void MacroAssembler::Move(Register dst, Handle<Object> value) {
- mov(dst, Operand(value));
+void MacroAssembler::PushObject(Handle<Object> handle) {
+ if (handle->IsHeapObject()) {
+ Push(Handle<HeapObject>::cast(handle));
+ } else {
+ Push(Smi::cast(*handle));
+ }
}
+void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
+ mov(dst, Operand(value));
+}
-void MacroAssembler::Move(Register dst, Register src, Condition cond) {
+void TurboAssembler::Move(Register dst, Register src, Condition cond) {
DCHECK(cond == al);
if (!dst.is(src)) {
mr(dst, src);
}
}
-
-void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
+void TurboAssembler::Move(DoubleRegister dst, DoubleRegister src) {
if (!dst.is(src)) {
fmr(dst, src);
}
}
-
-void MacroAssembler::MultiPush(RegList regs, Register location) {
+void TurboAssembler::MultiPush(RegList regs, Register location) {
int16_t num_to_push = NumberOfBitsSet(regs);
int16_t stack_offset = num_to_push * kPointerSize;
@@ -222,8 +203,7 @@ void MacroAssembler::MultiPush(RegList regs, Register location) {
}
}
-
-void MacroAssembler::MultiPop(RegList regs, Register location) {
+void TurboAssembler::MultiPop(RegList regs, Register location) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < Register::kNumRegisters; i++) {
@@ -235,8 +215,7 @@ void MacroAssembler::MultiPop(RegList regs, Register location) {
addi(location, location, Operand(stack_offset));
}
-
-void MacroAssembler::MultiPushDoubles(RegList dregs, Register location) {
+void TurboAssembler::MultiPushDoubles(RegList dregs, Register location) {
int16_t num_to_push = NumberOfBitsSet(dregs);
int16_t stack_offset = num_to_push * kDoubleSize;
@@ -250,8 +229,7 @@ void MacroAssembler::MultiPushDoubles(RegList dregs, Register location) {
}
}
-
-void MacroAssembler::MultiPopDoubles(RegList dregs, Register location) {
+void TurboAssembler::MultiPopDoubles(RegList dregs, Register location) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) {
@@ -264,22 +242,12 @@ void MacroAssembler::MultiPopDoubles(RegList dregs, Register location) {
addi(location, location, Operand(stack_offset));
}
-
-void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
+void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
Condition cond) {
DCHECK(cond == al);
LoadP(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
}
-
-void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index,
- Condition cond) {
- DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
- DCHECK(cond == al);
- StoreP(source, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
-}
-
-
void MacroAssembler::InNewSpace(Register object, Register scratch,
Condition cond, Label* branch) {
DCHECK(cond == eq || cond == ne);
@@ -568,7 +536,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
}
}
-void MacroAssembler::PushCommonFrame(Register marker_reg) {
+void TurboAssembler::PushCommonFrame(Register marker_reg) {
int fp_delta = 0;
mflr(r0);
if (FLAG_enable_embedded_constant_pool) {
@@ -608,7 +576,7 @@ void MacroAssembler::PopCommonFrame(Register marker_reg) {
mtlr(r0);
}
-void MacroAssembler::PushStandardFrame(Register function_reg) {
+void TurboAssembler::PushStandardFrame(Register function_reg) {
int fp_delta = 0;
mflr(r0);
if (FLAG_enable_embedded_constant_pool) {
@@ -631,7 +599,7 @@ void MacroAssembler::PushStandardFrame(Register function_reg) {
addi(fp, sp, Operand(fp_delta * kPointerSize));
}
-void MacroAssembler::RestoreFrameStateForTailCall() {
+void TurboAssembler::RestoreFrameStateForTailCall() {
if (FLAG_enable_embedded_constant_pool) {
LoadP(kConstantPoolRegister,
MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
@@ -709,66 +677,61 @@ MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
return MemOperand(sp, doubles_size + register_offset);
}
-
-void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst,
+void TurboAssembler::CanonicalizeNaN(const DoubleRegister dst,
const DoubleRegister src) {
// Turn potential sNaN into qNaN.
fsub(dst, src, kDoubleRegZero);
}
-void MacroAssembler::ConvertIntToDouble(Register src, DoubleRegister dst) {
+void TurboAssembler::ConvertIntToDouble(Register src, DoubleRegister dst) {
MovIntToDouble(dst, src, r0);
fcfid(dst, dst);
}
-void MacroAssembler::ConvertUnsignedIntToDouble(Register src,
+void TurboAssembler::ConvertUnsignedIntToDouble(Register src,
DoubleRegister dst) {
MovUnsignedIntToDouble(dst, src, r0);
fcfid(dst, dst);
}
-void MacroAssembler::ConvertIntToFloat(Register src, DoubleRegister dst) {
+void TurboAssembler::ConvertIntToFloat(Register src, DoubleRegister dst) {
MovIntToDouble(dst, src, r0);
fcfids(dst, dst);
}
-void MacroAssembler::ConvertUnsignedIntToFloat(Register src,
+void TurboAssembler::ConvertUnsignedIntToFloat(Register src,
DoubleRegister dst) {
MovUnsignedIntToDouble(dst, src, r0);
fcfids(dst, dst);
}
#if V8_TARGET_ARCH_PPC64
-void MacroAssembler::ConvertInt64ToDouble(Register src,
+void TurboAssembler::ConvertInt64ToDouble(Register src,
DoubleRegister double_dst) {
MovInt64ToDouble(double_dst, src);
fcfid(double_dst, double_dst);
}
-
-void MacroAssembler::ConvertUnsignedInt64ToFloat(Register src,
+void TurboAssembler::ConvertUnsignedInt64ToFloat(Register src,
DoubleRegister double_dst) {
MovInt64ToDouble(double_dst, src);
fcfidus(double_dst, double_dst);
}
-
-void MacroAssembler::ConvertUnsignedInt64ToDouble(Register src,
+void TurboAssembler::ConvertUnsignedInt64ToDouble(Register src,
DoubleRegister double_dst) {
MovInt64ToDouble(double_dst, src);
fcfidu(double_dst, double_dst);
}
-
-void MacroAssembler::ConvertInt64ToFloat(Register src,
+void TurboAssembler::ConvertInt64ToFloat(Register src,
DoubleRegister double_dst) {
MovInt64ToDouble(double_dst, src);
fcfids(double_dst, double_dst);
}
#endif
-
-void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
+void TurboAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
#if !V8_TARGET_ARCH_PPC64
const Register dst_hi,
#endif
@@ -791,7 +754,7 @@ void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
}
#if V8_TARGET_ARCH_PPC64
-void MacroAssembler::ConvertDoubleToUnsignedInt64(
+void TurboAssembler::ConvertDoubleToUnsignedInt64(
const DoubleRegister double_input, const Register dst,
const DoubleRegister double_dst, FPRoundingMode rounding_mode) {
if (rounding_mode == kRoundToZero) {
@@ -807,7 +770,7 @@ void MacroAssembler::ConvertDoubleToUnsignedInt64(
#endif
#if !V8_TARGET_ARCH_PPC64
-void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
+void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift) {
DCHECK(!AreAliased(dst_low, src_high));
@@ -832,7 +795,7 @@ void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
bind(&done);
}
-void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
+void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
uint32_t shift) {
DCHECK(!AreAliased(dst_low, src_high));
@@ -854,7 +817,7 @@ void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
}
}
-void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
+void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift) {
DCHECK(!AreAliased(dst_low, src_high));
@@ -879,7 +842,7 @@ void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
bind(&done);
}
-void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
+void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
uint32_t shift) {
DCHECK(!AreAliased(dst_low, src_high));
@@ -901,7 +864,7 @@ void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
}
}
-void MacroAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
+void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift) {
DCHECK(!AreAliased(dst_low, src_high, shift));
@@ -925,7 +888,7 @@ void MacroAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
bind(&done);
}
-void MacroAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
+void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
uint32_t shift) {
DCHECK(!AreAliased(dst_low, src_high));
@@ -956,19 +919,17 @@ void MacroAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
add(kConstantPoolRegister, kConstantPoolRegister, code_target_address);
}
-
-void MacroAssembler::LoadConstantPoolPointerRegister(Register base,
+void TurboAssembler::LoadConstantPoolPointerRegister(Register base,
int code_start_delta) {
add_label_offset(kConstantPoolRegister, base, ConstantPoolPosition(),
code_start_delta);
}
-
-void MacroAssembler::LoadConstantPoolPointerRegister() {
+void TurboAssembler::LoadConstantPoolPointerRegister() {
mov_label_addr(kConstantPoolRegister, ConstantPoolPosition());
}
-void MacroAssembler::StubPrologue(StackFrame::Type type, Register base,
+void TurboAssembler::StubPrologue(StackFrame::Type type, Register base,
int prologue_offset) {
{
ConstantPoolUnavailableScope constant_pool_unavailable(this);
@@ -986,8 +947,7 @@ void MacroAssembler::StubPrologue(StackFrame::Type type, Register base,
}
}
-
-void MacroAssembler::Prologue(bool code_pre_aging, Register base,
+void TurboAssembler::Prologue(bool code_pre_aging, Register base,
int prologue_offset) {
DCHECK(!base.is(no_reg));
{
@@ -1029,8 +989,7 @@ void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
LoadP(vector, FieldMemOperand(vector, Cell::kValueOffset));
}
-
-void MacroAssembler::EnterFrame(StackFrame::Type type,
+void TurboAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
// Push type explicitly so we can leverage the constant pool.
@@ -1049,8 +1008,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type,
}
}
-
-int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
+int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
ConstantPoolUnavailableScope constant_pool_unavailable(this);
// r3: preserved
// r4: preserved
@@ -1163,9 +1121,11 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
StoreP(r8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
// Save the frame pointer and the context in top.
- mov(r8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ mov(r8, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
+ isolate())));
StoreP(fp, MemOperand(r8));
- mov(r8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+ mov(r8,
+ Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
StoreP(cp, MemOperand(r8));
// Optionally save all volatile double registers.
@@ -1183,7 +1143,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// function.
const int frame_alignment = ActivationFrameAlignment();
if (frame_alignment > kPointerSize) {
- DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
}
li(r0, Operand::Zero());
@@ -1195,7 +1155,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
-int MacroAssembler::ActivationFrameAlignment() {
+int TurboAssembler::ActivationFrameAlignment() {
#if !defined(USE_SIMULATOR)
// Running on the real platform. Use the alignment as mandated by the local
// environment.
@@ -1228,16 +1188,19 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
// Clear top frame.
li(r6, Operand::Zero());
- mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ mov(ip, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
+ isolate())));
StoreP(r6, MemOperand(ip));
// Restore current context from top and clear it in debug mode.
if (restore_context) {
- mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+ mov(ip, Operand(ExternalReference(IsolateAddressId::kContextAddress,
+ isolate())));
LoadP(cp, MemOperand(ip));
}
#ifdef DEBUG
- mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+ mov(ip,
+ Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
StoreP(r6, MemOperand(ip));
#endif
@@ -1252,17 +1215,15 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
}
}
-
-void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
+void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) {
Move(dst, d1);
}
-
-void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
+void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) {
Move(dst, d1);
}
-void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
+void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
Register caller_args_count_reg,
Register scratch0, Register scratch1) {
#if DEBUG
@@ -1499,9 +1460,6 @@ void MacroAssembler::InvokeFunction(Register fun, Register new_target,
LoadWordArith(expected_reg,
FieldMemOperand(
temp_reg, SharedFunctionInfo::kFormalParameterCountOffset));
-#if !defined(V8_TARGET_ARCH_PPC64)
- SmiUntag(expected_reg);
-#endif
ParameterCount expected(expected_reg);
InvokeFunctionCode(fun, new_target, expected, actual, flag, call_wrapper);
@@ -1546,7 +1504,6 @@ void MacroAssembler::IsObjectJSStringType(Register object, Register scratch,
bne(fail, cr0);
}
-
void MacroAssembler::MaybeDropFrames() {
// Check whether we need to drop frames to restart a function on the stack.
ExternalReference restart_fp =
@@ -1565,7 +1522,8 @@ void MacroAssembler::PushStackHandler() {
// Link the current handler as the next handler.
// Preserve r3-r7.
- mov(r8, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+ mov(r8,
+ Operand(ExternalReference(IsolateAddressId::kHandlerAddress, isolate())));
LoadP(r0, MemOperand(r8));
push(r0);
@@ -1579,7 +1537,8 @@ void MacroAssembler::PopStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
pop(r4);
- mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+ mov(ip,
+ Operand(ExternalReference(IsolateAddressId::kHandlerAddress, isolate())));
StoreP(r4, MemOperand(ip));
}
@@ -1628,7 +1587,6 @@ void MacroAssembler::Allocate(int object_size, Register result,
Register scratch1, Register scratch2,
Label* gc_required, AllocationFlags flags) {
DCHECK(object_size <= kMaxRegularHeapObjectSize);
- DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1715,10 +1673,7 @@ void MacroAssembler::Allocate(int object_size, Register result,
add(result_end, result, result_end);
}
- if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
- // The top pointer is not updated for allocation folding dominators.
- StoreP(result_end, MemOperand(top_address));
- }
+ StoreP(result_end, MemOperand(top_address));
// Tag object.
addi(result, result, Operand(kHeapObjectTag));
@@ -1728,7 +1683,6 @@ void MacroAssembler::Allocate(int object_size, Register result,
void MacroAssembler::Allocate(Register object_size, Register result,
Register result_end, Register scratch,
Label* gc_required, AllocationFlags flags) {
- DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1818,110 +1772,6 @@ void MacroAssembler::Allocate(Register object_size, Register result,
andi(r0, result_end, Operand(kObjectAlignmentMask));
Check(eq, kUnalignedAllocationInNewSpace, cr0);
}
- if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
- // The top pointer is not updated for allocation folding dominators.
- StoreP(result_end, MemOperand(top_address));
- }
-
- // Tag object.
- addi(result, result, Operand(kHeapObjectTag));
-}
-
-void MacroAssembler::FastAllocate(Register object_size, Register result,
- Register result_end, Register scratch,
- AllocationFlags flags) {
- // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
- // is not specified. Other registers must not overlap.
- DCHECK(!AreAliased(object_size, result, scratch, ip));
- DCHECK(!AreAliased(result_end, result, scratch, ip));
- DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
-
- ExternalReference allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), flags);
-
- Register top_address = scratch;
- mov(top_address, Operand(allocation_top));
- LoadP(result, MemOperand(top_address));
-
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
- // Align the next allocation. Storing the filler map without checking top is
- // safe in new-space because the limit of the heap is aligned there.
-#if V8_TARGET_ARCH_PPC64
- STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
-#else
- DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
- andi(result_end, result, Operand(kDoubleAlignmentMask));
- Label aligned;
- beq(&aligned);
- mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
- stw(result_end, MemOperand(result));
- addi(result, result, Operand(kDoubleSize / 2));
- bind(&aligned);
-#endif
- }
-
- // Calculate new top using result. Object size may be in words so a shift is
- // required to get the number of bytes.
- if ((flags & SIZE_IN_WORDS) != 0) {
- ShiftLeftImm(result_end, object_size, Operand(kPointerSizeLog2));
- add(result_end, result, result_end);
- } else {
- add(result_end, result, object_size);
- }
-
- // Update allocation top. result temporarily holds the new top.
- if (emit_debug_code()) {
- andi(r0, result_end, Operand(kObjectAlignmentMask));
- Check(eq, kUnalignedAllocationInNewSpace, cr0);
- }
- StoreP(result_end, MemOperand(top_address));
-
- // Tag object.
- addi(result, result, Operand(kHeapObjectTag));
-}
-
-void MacroAssembler::FastAllocate(int object_size, Register result,
- Register scratch1, Register scratch2,
- AllocationFlags flags) {
- DCHECK(object_size <= kMaxRegularHeapObjectSize);
- DCHECK(!AreAliased(result, scratch1, scratch2, ip));
-
- // Make object size into bytes.
- if ((flags & SIZE_IN_WORDS) != 0) {
- object_size *= kPointerSize;
- }
- DCHECK_EQ(0, object_size & kObjectAlignmentMask);
-
- ExternalReference allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), flags);
-
- // Set up allocation top address register.
- Register top_address = scratch1;
- Register result_end = scratch2;
- mov(top_address, Operand(allocation_top));
- LoadP(result, MemOperand(top_address));
-
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
- // Align the next allocation. Storing the filler map without checking top is
- // safe in new-space because the limit of the heap is aligned there.
-#if V8_TARGET_ARCH_PPC64
- STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
-#else
- DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
- andi(result_end, result, Operand(kDoubleAlignmentMask));
- Label aligned;
- beq(&aligned);
- mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
- stw(result_end, MemOperand(result));
- addi(result, result, Operand(kDoubleSize / 2));
- bind(&aligned);
-#endif
- }
-
- // Calculate new top using result.
- Add(result_end, result, object_size, r0);
-
- // The top pointer is not updated for allocation folding dominators.
StoreP(result_end, MemOperand(top_address));
// Tag object.
@@ -1952,7 +1802,7 @@ void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
cmp(obj, r0);
}
-void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
+void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left,
Register right,
Register overflow_dst,
Register scratch) {
@@ -1984,8 +1834,7 @@ void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
if (!left_is_right) and_(overflow_dst, scratch, overflow_dst, SetRC);
}
-
-void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
+void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left,
intptr_t right,
Register overflow_dst,
Register scratch) {
@@ -2010,8 +1859,7 @@ void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
}
}
-
-void MacroAssembler::SubAndCheckForOverflow(Register dst, Register left,
+void TurboAssembler::SubAndCheckForOverflow(Register dst, Register left,
Register right,
Register overflow_dst,
Register scratch) {
@@ -2108,19 +1956,27 @@ void MacroAssembler::GetMapConstructor(Register result, Register map,
bind(&done);
}
-void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id,
- Condition cond) {
+void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
}
+void TurboAssembler::CallStubDelayed(CodeStub* stub) {
+ DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
+
+ // Block constant pool for the call instruction sequence.
+ ConstantPoolUnavailableScope constant_pool_unavailable(this);
+
+ mov(ip, Operand::EmbeddedCode(stub));
+ mtctr(ip);
+ bctrl();
+}
void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
}
-
-bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
return has_frame_ || !stub->SometimesSetsUpAFrame();
}
@@ -2197,7 +2053,49 @@ void MacroAssembler::TryDoubleToInt32Exact(Register result,
fcmpu(double_scratch, double_input);
bind(&done);
}
+void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
+ DoubleRegister double_input) {
+ Label done;
+ TryInlineTruncateDoubleToI(result, double_input, &done);
+
+ // If we fell through then inline version didn't succeed - call stub instead.
+ mflr(r0);
+ push(r0);
+ // Put input on stack.
+ stfdu(double_input, MemOperand(sp, -kDoubleSize));
+
+ CallStubDelayed(new (zone) DoubleToIStub(nullptr, sp, result, 0, true, true));
+
+ addi(sp, sp, Operand(kDoubleSize));
+ pop(r0);
+ mtlr(r0);
+
+ bind(&done);
+}
+
+void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
+ DoubleRegister double_input,
+ Label* done) {
+ DoubleRegister double_scratch = kScratchDoubleReg;
+#if !V8_TARGET_ARCH_PPC64
+ Register scratch = ip;
+#endif
+
+ ConvertDoubleToInt64(double_input,
+#if !V8_TARGET_ARCH_PPC64
+ scratch,
+#endif
+ result, double_scratch);
+
+// Test for overflow
+#if V8_TARGET_ARCH_PPC64
+ TestIfInt32(result, r0);
+#else
+ TestIfInt32(scratch, result, r0);
+#endif
+ beq(done);
+}
void MacroAssembler::TryInt32Floor(Register result, DoubleRegister double_input,
Register input_high, Register scratch,
@@ -2338,6 +2236,23 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst, Register src,
rlwinm(dst, src, 0, 32 - num_least_bits, 31);
}
+void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
+ SaveFPRegsMode save_doubles) {
+ const Runtime::Function* f = Runtime::FunctionForId(fid);
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ mov(r3, Operand(f->nargs));
+ mov(r4, Operand(ExternalReference(f, isolate())));
+ CallStubDelayed(new (zone) CEntryStub(nullptr,
+#if V8_TARGET_ARCH_PPC64
+ f->result_size,
+#else
+ 1,
+#endif
+ save_doubles));
+}
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles) {
@@ -2427,15 +2342,12 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
}
}
-
-void MacroAssembler::Assert(Condition cond, BailoutReason reason,
+void TurboAssembler::Assert(Condition cond, BailoutReason reason,
CRegister cr) {
if (emit_debug_code()) Check(cond, reason, cr);
}
-
-
-void MacroAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
+void TurboAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
Label L;
b(cond, &L, cr);
Abort(reason);
@@ -2443,8 +2355,7 @@ void MacroAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
bind(&L);
}
-
-void MacroAssembler::Abort(BailoutReason reason) {
+void TurboAssembler::Abort(BailoutReason reason) {
Label abort_start;
bind(&abort_start);
#ifdef DEBUG
@@ -2460,9 +2371,6 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- // Check if Abort() has already been initialized.
- DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
-
LoadSmiLiteral(r4, Smi::FromInt(static_cast<int>(reason)));
// Disable stub call restrictions to always allow calls to abort.
@@ -2601,6 +2509,17 @@ void MacroAssembler::AssertSmi(Register object) {
}
}
+void MacroAssembler::AssertFixedArray(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ TestIfSmi(object, r0);
+ Check(ne, kOperandIsASmiAndNotAFixedArray, cr0);
+ push(object);
+ CompareObjectType(object, object, object, FIXED_ARRAY_TYPE);
+ pop(object);
+ Check(eq, kOperandIsNotAFixedArray);
+ }
+}
void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
@@ -2627,8 +2546,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
}
-void MacroAssembler::AssertGeneratorObject(Register object, Register flags) {
- // `flags` should be an untagged integer. See `SuspendFlags` in src/globals.h
+void MacroAssembler::AssertGeneratorObject(Register object) {
if (!emit_debug_code()) return;
TestIfSmi(object, r0);
Check(ne, kOperandIsASmiAndNotAGeneratorObject, cr0);
@@ -2638,17 +2556,14 @@ void MacroAssembler::AssertGeneratorObject(Register object, Register flags) {
push(object);
LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
- Label async, do_check;
- TestBitMask(flags, static_cast<int>(SuspendFlags::kGeneratorTypeMask), r0);
- bne(&async, cr0);
-
// Check if JSGeneratorObject
- CompareInstanceType(map, object, JS_GENERATOR_OBJECT_TYPE);
- b(&do_check);
+ Label do_check;
+ Register instance_type = object;
+ CompareInstanceType(map, instance_type, JS_GENERATOR_OBJECT_TYPE);
+ beq(&do_check);
- bind(&async);
- // Check if JSAsyncGeneratorObject
- CompareInstanceType(map, object, JS_ASYNC_GENERATOR_OBJECT_TYPE);
+ // Check if JSAsyncGeneratorObject (See MacroAssembler::CompareInstanceType)
+ cmpi(instance_type, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
bind(&do_check);
// Restore generator object to register and perform assertion
@@ -2777,7 +2692,8 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
StoreP(scratch1, FieldMemOperand(result, HeapObject::kMapOffset), r0);
LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
- StoreP(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset), r0);
+ StoreP(scratch1, FieldMemOperand(result, JSObject::kPropertiesOrHashOffset),
+ r0);
StoreP(scratch1, FieldMemOperand(result, JSObject::kElementsOffset), r0);
StoreP(value, FieldMemOperand(result, JSValue::kValueOffset), r0);
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
@@ -2823,8 +2739,7 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
static const int kRegisterPassedArguments = 8;
-
-int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
+int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments) {
int stack_passed_words = 0;
if (num_double_arguments > DoubleRegister::kNumRegisters) {
@@ -2879,8 +2794,7 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string, Register index,
SmiUntag(index, index);
}
-
-void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
int num_double_arguments,
Register scratch) {
int frame_alignment = ActivationFrameAlignment();
@@ -2893,7 +2807,7 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
// -- preserving original value of sp.
mr(scratch, sp);
addi(sp, sp, Operand(-(stack_passed_arguments + 1) * kPointerSize));
- DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
StoreP(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else {
@@ -2906,20 +2820,16 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
StorePU(r0, MemOperand(sp, -stack_space * kPointerSize));
}
-
-void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
Register scratch) {
PrepareCallCFunction(num_reg_arguments, 0, scratch);
}
+void TurboAssembler::MovToFloatParameter(DoubleRegister src) { Move(d1, src); }
-void MacroAssembler::MovToFloatParameter(DoubleRegister src) { Move(d1, src); }
-
-
-void MacroAssembler::MovToFloatResult(DoubleRegister src) { Move(d1, src); }
+void TurboAssembler::MovToFloatResult(DoubleRegister src) { Move(d1, src); }
-
-void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
+void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
DoubleRegister src2) {
if (src2.is(d1)) {
DCHECK(!src1.is(d2));
@@ -2931,33 +2841,28 @@ void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
}
}
-
-void MacroAssembler::CallCFunction(ExternalReference function,
+void TurboAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments) {
mov(ip, Operand(function));
CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
}
-
-void MacroAssembler::CallCFunction(Register function, int num_reg_arguments,
+void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
int num_double_arguments) {
CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
}
-
-void MacroAssembler::CallCFunction(ExternalReference function,
+void TurboAssembler::CallCFunction(ExternalReference function,
int num_arguments) {
CallCFunction(function, num_arguments, 0);
}
-
-void MacroAssembler::CallCFunction(Register function, int num_arguments) {
+void TurboAssembler::CallCFunction(Register function, int num_arguments) {
CallCFunction(function, num_arguments, 0);
}
-
-void MacroAssembler::CallCFunctionHelper(Register function,
+void TurboAssembler::CallCFunctionHelper(Register function,
int num_reg_arguments,
int num_double_arguments) {
DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
@@ -3020,8 +2925,7 @@ void MacroAssembler::DecodeConstantPoolOffset(Register result,
bind(&done);
}
-
-void MacroAssembler::CheckPageFlag(
+void TurboAssembler::CheckPageFlag(
Register object,
Register scratch, // scratch may be same register as object
int mask, Condition cc, Label* condition_met) {
@@ -3029,7 +2933,8 @@ void MacroAssembler::CheckPageFlag(
ClearRightImm(scratch, object, Operand(kPageSizeBits));
LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
- And(r0, scratch, Operand(mask), SetRC);
+ mov(r0, Operand(mask));
+ and_(r0, scratch, r0, SetRC);
if (cc == ne) {
bne(condition_met, cr0);
@@ -3151,11 +3056,9 @@ void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
}
}
+void TurboAssembler::SetRoundingMode(FPRoundingMode RN) { mtfsfi(7, RN); }
-void MacroAssembler::SetRoundingMode(FPRoundingMode RN) { mtfsfi(7, RN); }
-
-
-void MacroAssembler::ResetRoundingMode() {
+void TurboAssembler::ResetRoundingMode() {
mtfsfi(7, kRoundToNearest); // reset (default is kRoundToNearest)
}
@@ -3167,7 +3070,7 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg,
Label done;
Label in_bounds;
- LoadDoubleLiteral(double_scratch, 0.0, result_reg);
+ LoadDoubleLiteral(double_scratch, Double(0.0), result_reg);
fcmpu(input_reg, double_scratch);
bgt(&above_zero);
@@ -3177,7 +3080,7 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg,
// Double value is >= 255, return 255.
bind(&above_zero);
- LoadDoubleLiteral(double_scratch, 255.0, result_reg);
+ LoadDoubleLiteral(double_scratch, Double(255.0), result_reg);
fcmpu(input_reg, double_scratch);
ble(&in_bounds);
LoadIntLiteral(result_reg, 255);
@@ -3278,17 +3181,15 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
// New MacroAssembler Interfaces added for PPC
//
////////////////////////////////////////////////////////////////////////////////
-void MacroAssembler::LoadIntLiteral(Register dst, int value) {
+void TurboAssembler::LoadIntLiteral(Register dst, int value) {
mov(dst, Operand(value));
}
-
-void MacroAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
+void TurboAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
mov(dst, Operand(smi));
}
-
-void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, double value,
+void TurboAssembler::LoadDoubleLiteral(DoubleRegister result, Double value,
Register scratch) {
if (FLAG_enable_embedded_constant_pool && is_constant_pool_available() &&
!(scratch.is(r0) && ConstantPoolAccessIsInOverflow())) {
@@ -3304,7 +3205,7 @@ void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, double value,
// avoid gcc strict aliasing error using union cast
union {
- double dval;
+ uint64_t dval;
#if V8_TARGET_ARCH_PPC64
intptr_t ival;
#else
@@ -3312,7 +3213,7 @@ void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, double value,
#endif
} litVal;
- litVal.dval = value;
+ litVal.dval = value.AsUint64();
#if V8_TARGET_ARCH_PPC64
if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
@@ -3337,8 +3238,7 @@ void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, double value,
addi(sp, sp, Operand(kDoubleSize));
}
-
-void MacroAssembler::MovIntToDouble(DoubleRegister dst, Register src,
+void TurboAssembler::MovIntToDouble(DoubleRegister dst, Register src,
Register scratch) {
// sign-extend src to 64-bit
#if V8_TARGET_ARCH_PPC64
@@ -3363,8 +3263,7 @@ void MacroAssembler::MovIntToDouble(DoubleRegister dst, Register src,
addi(sp, sp, Operand(kDoubleSize));
}
-
-void MacroAssembler::MovUnsignedIntToDouble(DoubleRegister dst, Register src,
+void TurboAssembler::MovUnsignedIntToDouble(DoubleRegister dst, Register src,
Register scratch) {
// zero-extend src to 64-bit
#if V8_TARGET_ARCH_PPC64
@@ -3389,8 +3288,7 @@ void MacroAssembler::MovUnsignedIntToDouble(DoubleRegister dst, Register src,
addi(sp, sp, Operand(kDoubleSize));
}
-
-void MacroAssembler::MovInt64ToDouble(DoubleRegister dst,
+void TurboAssembler::MovInt64ToDouble(DoubleRegister dst,
#if !V8_TARGET_ARCH_PPC64
Register src_hi,
#endif
@@ -3416,7 +3314,7 @@ void MacroAssembler::MovInt64ToDouble(DoubleRegister dst,
#if V8_TARGET_ARCH_PPC64
-void MacroAssembler::MovInt64ComponentsToDouble(DoubleRegister dst,
+void TurboAssembler::MovInt64ComponentsToDouble(DoubleRegister dst,
Register src_hi,
Register src_lo,
Register scratch) {
@@ -3436,8 +3334,7 @@ void MacroAssembler::MovInt64ComponentsToDouble(DoubleRegister dst,
}
#endif
-
-void MacroAssembler::InsertDoubleLow(DoubleRegister dst, Register src,
+void TurboAssembler::InsertDoubleLow(DoubleRegister dst, Register src,
Register scratch) {
#if V8_TARGET_ARCH_PPC64
if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
@@ -3456,8 +3353,7 @@ void MacroAssembler::InsertDoubleLow(DoubleRegister dst, Register src,
addi(sp, sp, Operand(kDoubleSize));
}
-
-void MacroAssembler::InsertDoubleHigh(DoubleRegister dst, Register src,
+void TurboAssembler::InsertDoubleHigh(DoubleRegister dst, Register src,
Register scratch) {
#if V8_TARGET_ARCH_PPC64
if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
@@ -3476,8 +3372,7 @@ void MacroAssembler::InsertDoubleHigh(DoubleRegister dst, Register src,
addi(sp, sp, Operand(kDoubleSize));
}
-
-void MacroAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) {
+void TurboAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) {
#if V8_TARGET_ARCH_PPC64
if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
mffprwz(dst, src);
@@ -3492,8 +3387,7 @@ void MacroAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) {
addi(sp, sp, Operand(kDoubleSize));
}
-
-void MacroAssembler::MovDoubleHighToInt(Register dst, DoubleRegister src) {
+void TurboAssembler::MovDoubleHighToInt(Register dst, DoubleRegister src) {
#if V8_TARGET_ARCH_PPC64
if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
mffprd(dst, src);
@@ -3509,8 +3403,7 @@ void MacroAssembler::MovDoubleHighToInt(Register dst, DoubleRegister src) {
addi(sp, sp, Operand(kDoubleSize));
}
-
-void MacroAssembler::MovDoubleToInt64(
+void TurboAssembler::MovDoubleToInt64(
#if !V8_TARGET_ARCH_PPC64
Register dst_hi,
#endif
@@ -3534,8 +3427,7 @@ void MacroAssembler::MovDoubleToInt64(
addi(sp, sp, Operand(kDoubleSize));
}
-
-void MacroAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
+void TurboAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
subi(sp, sp, Operand(kFloatSize));
stw(src, MemOperand(sp, 0));
nop(GROUP_ENDING_NOP); // LHS/RAW optimization
@@ -3543,8 +3435,7 @@ void MacroAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
addi(sp, sp, Operand(kFloatSize));
}
-
-void MacroAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
+void TurboAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
subi(sp, sp, Operand(kFloatSize));
stfs(src, MemOperand(sp, 0));
nop(GROUP_ENDING_NOP); // LHS/RAW optimization
@@ -3552,8 +3443,7 @@ void MacroAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
addi(sp, sp, Operand(kFloatSize));
}
-
-void MacroAssembler::Add(Register dst, Register src, intptr_t value,
+void TurboAssembler::Add(Register dst, Register src, intptr_t value,
Register scratch) {
if (is_int16(value)) {
addi(dst, src, Operand(value));
@@ -3575,8 +3465,7 @@ void MacroAssembler::Cmpi(Register src1, const Operand& src2, Register scratch,
}
}
-
-void MacroAssembler::Cmpli(Register src1, const Operand& src2, Register scratch,
+void TurboAssembler::Cmpli(Register src1, const Operand& src2, Register scratch,
CRegister cr) {
intptr_t value = src2.immediate();
if (is_uint16(value)) {
@@ -3587,8 +3476,7 @@ void MacroAssembler::Cmpli(Register src1, const Operand& src2, Register scratch,
}
}
-
-void MacroAssembler::Cmpwi(Register src1, const Operand& src2, Register scratch,
+void TurboAssembler::Cmpwi(Register src1, const Operand& src2, Register scratch,
CRegister cr) {
intptr_t value = src2.immediate();
if (is_int16(value)) {
@@ -3617,7 +3505,8 @@ void MacroAssembler::And(Register ra, Register rs, const Operand& rb,
if (rb.is_reg()) {
and_(ra, rs, rb.rm(), rc);
} else {
- if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == SetRC) {
+ if (is_uint16(rb.immediate()) && RelocInfo::IsNone(rb.rmode_) &&
+ rc == SetRC) {
andi(ra, rs, rb);
} else {
// mov handles the relocation.
@@ -3633,7 +3522,8 @@ void MacroAssembler::Or(Register ra, Register rs, const Operand& rb, RCBit rc) {
if (rb.is_reg()) {
orx(ra, rs, rb.rm(), rc);
} else {
- if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == LeaveRC) {
+ if (is_uint16(rb.immediate()) && RelocInfo::IsNone(rb.rmode_) &&
+ rc == LeaveRC) {
ori(ra, rs, rb);
} else {
// mov handles the relocation.
@@ -3650,7 +3540,8 @@ void MacroAssembler::Xor(Register ra, Register rs, const Operand& rb,
if (rb.is_reg()) {
xor_(ra, rs, rb.rm(), rc);
} else {
- if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == LeaveRC) {
+ if (is_uint16(rb.immediate()) && RelocInfo::IsNone(rb.rmode_) &&
+ rc == LeaveRC) {
xori(ra, rs, rb);
} else {
// mov handles the relocation.
@@ -3718,7 +3609,7 @@ void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi,
// Load a "pointer" sized value from the memory location
-void MacroAssembler::LoadP(Register dst, const MemOperand& mem,
+void TurboAssembler::LoadP(Register dst, const MemOperand& mem,
Register scratch) {
int offset = mem.offset();
@@ -3745,7 +3636,7 @@ void MacroAssembler::LoadP(Register dst, const MemOperand& mem,
}
}
-void MacroAssembler::LoadPU(Register dst, const MemOperand& mem,
+void TurboAssembler::LoadPU(Register dst, const MemOperand& mem,
Register scratch) {
int offset = mem.offset();
@@ -3764,7 +3655,7 @@ void MacroAssembler::LoadPU(Register dst, const MemOperand& mem,
}
// Store a "pointer" sized value to the memory location
-void MacroAssembler::StoreP(Register src, const MemOperand& mem,
+void TurboAssembler::StoreP(Register src, const MemOperand& mem,
Register scratch) {
int offset = mem.offset();
@@ -3796,7 +3687,7 @@ void MacroAssembler::StoreP(Register src, const MemOperand& mem,
}
}
-void MacroAssembler::StorePU(Register src, const MemOperand& mem,
+void TurboAssembler::StorePU(Register src, const MemOperand& mem,
Register scratch) {
int offset = mem.offset();
@@ -3994,8 +3885,7 @@ void MacroAssembler::StoreRepresentation(Register src, const MemOperand& mem,
}
}
-
-void MacroAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem,
+void TurboAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem,
Register scratch) {
Register base = mem.ra();
int offset = mem.offset();
@@ -4021,7 +3911,7 @@ void MacroAssembler::LoadDoubleU(DoubleRegister dst, const MemOperand& mem,
}
}
-void MacroAssembler::LoadSingle(DoubleRegister dst, const MemOperand& mem,
+void TurboAssembler::LoadSingle(DoubleRegister dst, const MemOperand& mem,
Register scratch) {
Register base = mem.ra();
int offset = mem.offset();
@@ -4034,8 +3924,8 @@ void MacroAssembler::LoadSingle(DoubleRegister dst, const MemOperand& mem,
}
}
-void MacroAssembler::LoadSingleU(DoubleRegister dst, const MemOperand& mem,
- Register scratch) {
+void TurboAssembler::LoadSingleU(DoubleRegister dst, const MemOperand& mem,
+ Register scratch) {
Register base = mem.ra();
int offset = mem.offset();
@@ -4047,7 +3937,7 @@ void MacroAssembler::LoadSingleU(DoubleRegister dst, const MemOperand& mem,
}
}
-void MacroAssembler::StoreDouble(DoubleRegister src, const MemOperand& mem,
+void TurboAssembler::StoreDouble(DoubleRegister src, const MemOperand& mem,
Register scratch) {
Register base = mem.ra();
int offset = mem.offset();
@@ -4060,8 +3950,8 @@ void MacroAssembler::StoreDouble(DoubleRegister src, const MemOperand& mem,
}
}
-void MacroAssembler::StoreDoubleU(DoubleRegister src, const MemOperand& mem,
- Register scratch) {
+void TurboAssembler::StoreDoubleU(DoubleRegister src, const MemOperand& mem,
+ Register scratch) {
Register base = mem.ra();
int offset = mem.offset();
@@ -4073,7 +3963,7 @@ void MacroAssembler::StoreDoubleU(DoubleRegister src, const MemOperand& mem,
}
}
-void MacroAssembler::StoreSingle(DoubleRegister src, const MemOperand& mem,
+void TurboAssembler::StoreSingle(DoubleRegister src, const MemOperand& mem,
Register scratch) {
Register base = mem.ra();
int offset = mem.offset();
@@ -4086,8 +3976,8 @@ void MacroAssembler::StoreSingle(DoubleRegister src, const MemOperand& mem,
}
}
-void MacroAssembler::StoreSingleU(DoubleRegister src, const MemOperand& mem,
- Register scratch) {
+void TurboAssembler::StoreSingleU(DoubleRegister src, const MemOperand& mem,
+ Register scratch) {
Register base = mem.ra();
int offset = mem.offset();
@@ -4167,7 +4057,6 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
return candidate;
}
UNREACHABLE();
- return no_reg;
}
#ifdef DEBUG
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
index 5f4c8ac9be..7577c762e8 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/ppc/macro-assembler-ppc.h
@@ -7,6 +7,7 @@
#include "src/assembler.h"
#include "src/bailout-reason.h"
+#include "src/double.h"
#include "src/frames.h"
#include "src/globals.h"
@@ -104,85 +105,537 @@ bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
#define Div divw
#endif
-
-// MacroAssembler implements a collection of frequently used macros.
-class MacroAssembler : public Assembler {
+class TurboAssembler : public Assembler {
public:
- MacroAssembler(Isolate* isolate, void* buffer, int size,
- CodeObjectRequired create_code_object);
+ TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
+ CodeObjectRequired create_code_object)
+ : Assembler(isolate, buffer, buffer_size), isolate_(isolate) {
+ if (create_code_object == CodeObjectRequired::kYes) {
+ code_object_ =
+ Handle<HeapObject>::New(isolate->heap()->undefined_value(), isolate);
+ }
+ }
+ void set_has_frame(bool value) { has_frame_ = value; }
+ bool has_frame() { return has_frame_; }
Isolate* isolate() const { return isolate_; }
+ Handle<HeapObject> CodeObject() {
+ DCHECK(!code_object_.is_null());
+ return code_object_;
+ }
+ // Converts the integer (untagged smi) in |src| to a double, storing
+ // the result to |dst|
+ void ConvertIntToDouble(Register src, DoubleRegister dst);
+
+ // Converts the unsigned integer (untagged smi) in |src| to
+ // a double, storing the result to |dst|
+ void ConvertUnsignedIntToDouble(Register src, DoubleRegister dst);
+
+ // Converts the integer (untagged smi) in |src| to
+ // a float, storing the result in |dst|
+ void ConvertIntToFloat(Register src, DoubleRegister dst);
+
+ // Converts the unsigned integer (untagged smi) in |src| to
+ // a float, storing the result in |dst|
+ void ConvertUnsignedIntToFloat(Register src, DoubleRegister dst);
+
+#if V8_TARGET_ARCH_PPC64
+ void ConvertInt64ToFloat(Register src, DoubleRegister double_dst);
+ void ConvertInt64ToDouble(Register src, DoubleRegister double_dst);
+ void ConvertUnsignedInt64ToFloat(Register src, DoubleRegister double_dst);
+ void ConvertUnsignedInt64ToDouble(Register src, DoubleRegister double_dst);
+#endif
+
+ // Converts the double_input to an integer. Note that, upon return,
+ // the contents of double_dst will also hold the fixed point representation.
+ void ConvertDoubleToInt64(const DoubleRegister double_input,
+#if !V8_TARGET_ARCH_PPC64
+ const Register dst_hi,
+#endif
+ const Register dst, const DoubleRegister double_dst,
+ FPRoundingMode rounding_mode = kRoundToZero);
+
+#if V8_TARGET_ARCH_PPC64
+ // Converts the double_input to an unsigned integer. Note that, upon return,
+ // the contents of double_dst will also hold the fixed point representation.
+ void ConvertDoubleToUnsignedInt64(
+ const DoubleRegister double_input, const Register dst,
+ const DoubleRegister double_dst,
+ FPRoundingMode rounding_mode = kRoundToZero);
+#endif
+
+ // Activation support.
+ void EnterFrame(StackFrame::Type type,
+ bool load_constant_pool_pointer_reg = false);
+
+ // Returns the pc offset at which the frame ends.
+ int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
+
+ // Push a fixed frame, consisting of lr, fp, constant pool.
+ void PushCommonFrame(Register marker_reg = no_reg);
+
+ // Generates function and stub prologue code.
+ void StubPrologue(StackFrame::Type type, Register base = no_reg,
+ int prologue_offset = 0);
+ void Prologue(bool code_pre_aging, Register base, int prologue_offset = 0);
+
+ // Push a standard frame, consisting of lr, fp, constant pool,
+ // context and JS function
+ void PushStandardFrame(Register function_reg);
+
+ // Restore caller's frame pointer and return address prior to being
+ // overwritten by tail call stack preparation.
+ void RestoreFrameStateForTailCall();
+
+ // Get the actual activation frame alignment for target environment.
+ static int ActivationFrameAlignment();
+
+ void InitializeRootRegister() {
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate());
+ mov(kRootRegister, Operand(roots_array_start));
+ }
+
+ // These exist to provide portability between 32 and 64bit
+ void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg);
+ void LoadPU(Register dst, const MemOperand& mem, Register scratch = no_reg);
+ void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg);
+ void StorePU(Register src, const MemOperand& mem, Register scratch = no_reg);
+
+ void LoadDouble(DoubleRegister dst, const MemOperand& mem,
+ Register scratch = no_reg);
+ void LoadDoubleLiteral(DoubleRegister result, Double value, Register scratch);
+
+ // load a literal signed int value <value> to GPR <dst>
+ void LoadIntLiteral(Register dst, int value);
+ // load an SMI value <value> to GPR <dst>
+ void LoadSmiLiteral(Register dst, Smi* smi);
+
+ void LoadSingle(DoubleRegister dst, const MemOperand& mem,
+ Register scratch = no_reg);
+ void LoadSingleU(DoubleRegister dst, const MemOperand& mem,
+ Register scratch = no_reg);
+
+ void StoreDouble(DoubleRegister src, const MemOperand& mem,
+ Register scratch = no_reg);
+ void StoreDoubleU(DoubleRegister src, const MemOperand& mem,
+ Register scratch = no_reg);
+
+ void StoreSingle(DoubleRegister src, const MemOperand& mem,
+ Register scratch = no_reg);
+ void StoreSingleU(DoubleRegister src, const MemOperand& mem,
+ Register scratch = no_reg);
+
+ void Cmpli(Register src1, const Operand& src2, Register scratch,
+ CRegister cr = cr7);
+ void Cmpwi(Register src1, const Operand& src2, Register scratch,
+ CRegister cr = cr7);
+ // Set new rounding mode RN to FPSCR
+ void SetRoundingMode(FPRoundingMode RN);
+
+ // reset rounding mode to default (kRoundToNearest)
+ void ResetRoundingMode();
+ void Add(Register dst, Register src, intptr_t value, Register scratch);
+
+ void Push(Register src) { push(src); }
+ // Push a handle.
+ void Push(Handle<HeapObject> handle);
+ void Push(Smi* smi);
+
+ // Push two registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2) {
+ StorePU(src2, MemOperand(sp, -2 * kPointerSize));
+ StoreP(src1, MemOperand(sp, kPointerSize));
+ }
+
+ // Push three registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3) {
+ StorePU(src3, MemOperand(sp, -3 * kPointerSize));
+ StoreP(src2, MemOperand(sp, kPointerSize));
+ StoreP(src1, MemOperand(sp, 2 * kPointerSize));
+ }
+
+ // Push four registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3, Register src4) {
+ StorePU(src4, MemOperand(sp, -4 * kPointerSize));
+ StoreP(src3, MemOperand(sp, kPointerSize));
+ StoreP(src2, MemOperand(sp, 2 * kPointerSize));
+ StoreP(src1, MemOperand(sp, 3 * kPointerSize));
+ }
+
+ // Push five registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3, Register src4,
+ Register src5) {
+ StorePU(src5, MemOperand(sp, -5 * kPointerSize));
+ StoreP(src4, MemOperand(sp, kPointerSize));
+ StoreP(src3, MemOperand(sp, 2 * kPointerSize));
+ StoreP(src2, MemOperand(sp, 3 * kPointerSize));
+ StoreP(src1, MemOperand(sp, 4 * kPointerSize));
+ }
+
+ void Pop(Register dst) { pop(dst); }
+
+ // Pop two registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1, Register src2) {
+ LoadP(src2, MemOperand(sp, 0));
+ LoadP(src1, MemOperand(sp, kPointerSize));
+ addi(sp, sp, Operand(2 * kPointerSize));
+ }
+
+ // Pop three registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1, Register src2, Register src3) {
+ LoadP(src3, MemOperand(sp, 0));
+ LoadP(src2, MemOperand(sp, kPointerSize));
+ LoadP(src1, MemOperand(sp, 2 * kPointerSize));
+ addi(sp, sp, Operand(3 * kPointerSize));
+ }
+
+ // Pop four registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1, Register src2, Register src3, Register src4) {
+ LoadP(src4, MemOperand(sp, 0));
+ LoadP(src3, MemOperand(sp, kPointerSize));
+ LoadP(src2, MemOperand(sp, 2 * kPointerSize));
+ LoadP(src1, MemOperand(sp, 3 * kPointerSize));
+ addi(sp, sp, Operand(4 * kPointerSize));
+ }
+
+ // Pop five registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1, Register src2, Register src3, Register src4,
+ Register src5) {
+ LoadP(src5, MemOperand(sp, 0));
+ LoadP(src4, MemOperand(sp, kPointerSize));
+ LoadP(src3, MemOperand(sp, 2 * kPointerSize));
+ LoadP(src2, MemOperand(sp, 3 * kPointerSize));
+ LoadP(src1, MemOperand(sp, 4 * kPointerSize));
+ addi(sp, sp, Operand(5 * kPointerSize));
+ }
+ void MultiPush(RegList regs, Register location = sp);
+ void MultiPop(RegList regs, Register location = sp);
+
+ void MultiPushDoubles(RegList dregs, Register location = sp);
+ void MultiPopDoubles(RegList dregs, Register location = sp);
+
+ // Load an object from the root table.
+ void LoadRoot(Register destination, Heap::RootListIndex index,
+ Condition cond = al);
+
+ // Before calling a C-function from generated code, align arguments on stack.
+ // After aligning the frame, non-register arguments must be stored in
+ // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
+ // are word sized. If double arguments are used, this function assumes that
+ // all double arguments are stored before core registers; otherwise the
+ // correct alignment of the double values is not guaranteed.
+ // Some compilers/platforms require the stack to be aligned when calling
+ // C++ code.
+ // Needs a scratch register to do some arithmetic. This register will be
+ // trashed.
+ void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
+ Register scratch);
+ void PrepareCallCFunction(int num_reg_arguments, Register scratch);
+
+ void PrepareForTailCall(const ParameterCount& callee_args_count,
+ Register caller_args_count_reg, Register scratch0,
+ Register scratch1);
+
+ // There are two ways of passing double arguments on ARM, depending on
+ // whether soft or hard floating point ABI is used. These functions
+ // abstract parameter passing for the three different ways we call
+ // C functions from generated code.
+ void MovToFloatParameter(DoubleRegister src);
+ void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
+ void MovToFloatResult(DoubleRegister src);
+
+ // Calls a C function and cleans up the space for arguments allocated
+ // by PrepareCallCFunction. The called function is not allowed to trigger a
+ // garbage collection, since that might move the code and invalidate the
+ // return address (unless this is somehow accounted for by the called
+ // function).
+ void CallCFunction(ExternalReference function, int num_arguments);
+ void CallCFunction(Register function, int num_arguments);
+ void CallCFunction(ExternalReference function, int num_reg_arguments,
+ int num_double_arguments);
+ void CallCFunction(Register function, int num_reg_arguments,
+ int num_double_arguments);
+
+ void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ void MovFromFloatParameter(DoubleRegister dst);
+ void MovFromFloatResult(DoubleRegister dst);
+
+ // Calls Abort(msg) if the condition cond is not satisfied.
+ // Use --debug_code to enable.
+ void Assert(Condition cond, BailoutReason reason, CRegister cr = cr7);
+
+ // Like Assert(), but always enabled.
+ void Check(Condition cond, BailoutReason reason, CRegister cr = cr7);
+
+ // Print a message to stdout and abort execution.
+ void Abort(BailoutReason reason);
+
+ inline bool AllowThisStubCall(CodeStub* stub);
+#if !V8_TARGET_ARCH_PPC64
+ void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, Register scratch, Register shift);
+ void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, uint32_t shift);
+ void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, Register scratch, Register shift);
+ void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, uint32_t shift);
+ void ShiftRightAlgPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, Register scratch, Register shift);
+ void ShiftRightAlgPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, uint32_t shift);
+#endif
// Returns the size of a call in instructions. Note, the value returned is
// only valid as long as no entries are added to the constant pool between
// checking the call size and emitting the actual call.
static int CallSize(Register target);
int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
- static int CallSizeNotPredictableCodeSize(Address target,
- RelocInfo::Mode rmode,
- Condition cond = al);
// Jump, Call, and Ret pseudo instructions implementing inter-working.
void Jump(Register target);
- void JumpToJSEntry(Register target);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al,
CRegister cr = cr7);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
void Call(Register target);
- void CallJSEntry(Register target);
void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
int CallSize(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- TypeFeedbackId ast_id = TypeFeedbackId::None(),
Condition cond = al);
void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- TypeFeedbackId ast_id = TypeFeedbackId::None(),
Condition cond = al);
- void Ret() { blr(); }
- void Ret(Condition cond, CRegister cr = cr7) { bclr(cond, cr); }
-
- // Emit code that loads |parameter_index|'th parameter from the stack to
- // the register according to the CallInterfaceDescriptor definition.
- // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
- // below the caller's sp.
- template <class Descriptor>
- void LoadParameterFromStack(
- Register reg, typename Descriptor::ParameterIndices parameter_index,
- int sp_to_ra_offset_in_words = 0) {
- DCHECK(Descriptor::kPassLastArgsOnStack);
- UNIMPLEMENTED();
- }
+ void Call(Label* target);
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
void Drop(int count);
void Drop(Register count, Register scratch = r0);
+ void Ret() { blr(); }
+ void Ret(Condition cond, CRegister cr = cr7) { bclr(cond, cr); }
void Ret(int drop) {
Drop(drop);
blr();
}
- void Call(Label* target);
+ // If the value is a NaN, canonicalize the value else, do nothing.
+ void CanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
+ void CanonicalizeNaN(const DoubleRegister value) {
+ CanonicalizeNaN(value, value);
+ }
+ void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
+ Label* condition_met);
+ // Move values between integer and floating point registers.
+ void MovIntToDouble(DoubleRegister dst, Register src, Register scratch);
+ void MovUnsignedIntToDouble(DoubleRegister dst, Register src,
+ Register scratch);
+ void MovInt64ToDouble(DoubleRegister dst,
+#if !V8_TARGET_ARCH_PPC64
+ Register src_hi,
+#endif
+ Register src);
+#if V8_TARGET_ARCH_PPC64
+ void MovInt64ComponentsToDouble(DoubleRegister dst, Register src_hi,
+ Register src_lo, Register scratch);
+#endif
+ void InsertDoubleLow(DoubleRegister dst, Register src, Register scratch);
+ void InsertDoubleHigh(DoubleRegister dst, Register src, Register scratch);
+ void MovDoubleLowToInt(Register dst, DoubleRegister src);
+ void MovDoubleHighToInt(Register dst, DoubleRegister src);
+ void MovDoubleToInt64(
+#if !V8_TARGET_ARCH_PPC64
+ Register dst_hi,
+#endif
+ Register dst, DoubleRegister src);
+ void MovIntToFloat(DoubleRegister dst, Register src);
+ void MovFloatToInt(Register dst, DoubleRegister src);
// Register move. May do nothing if the registers are identical.
void Move(Register dst, Smi* smi) { LoadSmiLiteral(dst, smi); }
- void Move(Register dst, Handle<Object> value);
+ void Move(Register dst, Handle<HeapObject> value);
void Move(Register dst, Register src, Condition cond = al);
void Move(DoubleRegister dst, DoubleRegister src);
- void MultiPush(RegList regs, Register location = sp);
- void MultiPop(RegList regs, Register location = sp);
+ void SmiUntag(Register reg, RCBit rc = LeaveRC) { SmiUntag(reg, reg, rc); }
- void MultiPushDoubles(RegList dregs, Register location = sp);
- void MultiPopDoubles(RegList dregs, Register location = sp);
+ void SmiUntag(Register dst, Register src, RCBit rc = LeaveRC) {
+ ShiftRightArithImm(dst, src, kSmiShift, rc);
+ }
+ // ---------------------------------------------------------------------------
+ // Bit testing/extraction
+ //
+ // Bit numbering is such that the least significant bit is bit 0
+ // (for consistency between 32/64-bit).
- // Load an object from the root table.
- void LoadRoot(Register destination, Heap::RootListIndex index,
- Condition cond = al);
- // Store an object to the root table.
- void StoreRoot(Register source, Heap::RootListIndex index,
- Condition cond = al);
+ // Extract consecutive bits (defined by rangeStart - rangeEnd) from src
+ // and, if !test, shift them into the least significant bits of dst.
+ inline void ExtractBitRange(Register dst, Register src, int rangeStart,
+ int rangeEnd, RCBit rc = LeaveRC,
+ bool test = false) {
+ DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer);
+ int rotate = (rangeEnd == 0) ? 0 : kBitsPerPointer - rangeEnd;
+ int width = rangeStart - rangeEnd + 1;
+ if (rc == SetRC && rangeStart < 16 && (rangeEnd == 0 || test)) {
+ // Prefer faster andi when applicable.
+ andi(dst, src, Operand(((1 << width) - 1) << rangeEnd));
+ } else {
+#if V8_TARGET_ARCH_PPC64
+ rldicl(dst, src, rotate, kBitsPerPointer - width, rc);
+#else
+ rlwinm(dst, src, rotate, kBitsPerPointer - width, kBitsPerPointer - 1,
+ rc);
+#endif
+ }
+ }
+
+ inline void ExtractBit(Register dst, Register src, uint32_t bitNumber,
+ RCBit rc = LeaveRC, bool test = false) {
+ ExtractBitRange(dst, src, bitNumber, bitNumber, rc, test);
+ }
+
+ // Extract consecutive bits (defined by mask) from src and place them
+ // into the least significant bits of dst.
+ inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
+ RCBit rc = LeaveRC, bool test = false) {
+ int start = kBitsPerPointer - 1;
+ int end;
+ uintptr_t bit = (1L << start);
+
+ while (bit && (mask & bit) == 0) {
+ start--;
+ bit >>= 1;
+ }
+ end = start;
+ bit >>= 1;
+
+ while (bit && (mask & bit)) {
+ end--;
+ bit >>= 1;
+ }
+
+ // 1-bits in mask must be contiguous
+ DCHECK(bit == 0 || (mask & ((bit << 1) - 1)) == 0);
+
+ ExtractBitRange(dst, src, start, end, rc, test);
+ }
+
+ // Test single bit in value.
+ inline void TestBit(Register value, int bitNumber, Register scratch = r0) {
+ ExtractBitRange(scratch, value, bitNumber, bitNumber, SetRC, true);
+ }
+
+ // Test consecutive bit range in value. Range is defined by mask.
+ inline void TestBitMask(Register value, uintptr_t mask,
+ Register scratch = r0) {
+ ExtractBitMask(scratch, value, mask, SetRC, true);
+ }
+ // Test consecutive bit range in value. Range is defined by
+ // rangeStart - rangeEnd.
+ inline void TestBitRange(Register value, int rangeStart, int rangeEnd,
+ Register scratch = r0) {
+ ExtractBitRange(scratch, value, rangeStart, rangeEnd, SetRC, true);
+ }
+
+ inline void TestIfSmi(Register value, Register scratch) {
+ TestBitRange(value, kSmiTagSize - 1, 0, scratch);
+ }
+ // Jump the register contains a smi.
+ inline void JumpIfSmi(Register value, Label* smi_label) {
+ TestIfSmi(value, r0);
+ beq(smi_label, cr0); // branch if SMI
+ }
+#if V8_TARGET_ARCH_PPC64
+ inline void TestIfInt32(Register value, Register scratch,
+ CRegister cr = cr7) {
+ // High bits must be identical to fit into an 32-bit integer
+ extsw(scratch, value);
+ cmp(scratch, value, cr);
+ }
+#else
+ inline void TestIfInt32(Register hi_word, Register lo_word, Register scratch,
+ CRegister cr = cr7) {
+ // High bits must be identical to fit into an 32-bit integer
+ srawi(scratch, lo_word, 31);
+ cmp(scratch, hi_word, cr);
+ }
+#endif
+
+ // Overflow handling functions.
+ // Usage: call the appropriate arithmetic function and then call one of the
+ // flow control functions with the corresponding label.
+
+ // Compute dst = left + right, setting condition codes. dst may be same as
+ // either left or right (or a unique register). left and right must not be
+ // the same register.
+ void AddAndCheckForOverflow(Register dst, Register left, Register right,
+ Register overflow_dst, Register scratch = r0);
+ void AddAndCheckForOverflow(Register dst, Register left, intptr_t right,
+ Register overflow_dst, Register scratch = r0);
+
+ // Compute dst = left - right, setting condition codes. dst may be same as
+ // either left or right (or a unique register). left and right must not be
+ // the same register.
+ void SubAndCheckForOverflow(Register dst, Register left, Register right,
+ Register overflow_dst, Register scratch = r0);
+
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
+ // succeeds, otherwise falls through if result is saturated. On return
+ // 'result' either holds answer, or is clobbered on fall through.
+ //
+ // Only public for the test code in test-code-stubs-arm.cc.
+ void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
+ Label* done);
+ void TruncateDoubleToIDelayed(Zone* zone, Register result,
+ DoubleRegister double_input);
+
+ // Call a code stub.
+ void CallStubDelayed(CodeStub* stub);
+
+ void LoadConstantPoolPointerRegister();
+ void LoadConstantPoolPointerRegister(Register base, int code_entry_delta = 0);
+ void AbortConstantPoolBuilding() {
+#ifdef DEBUG
+ // Avoid DCHECK(!is_linked()) failure in ~Label()
+ bind(ConstantPoolPosition());
+#endif
+ }
+
+ private:
+ static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
+
+ bool has_frame_ = false;
+ Isolate* const isolate_;
+ // This handle will be patched with the code object on installation.
+ Handle<HeapObject> code_object_;
+
+ void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al,
+ CRegister cr = cr7);
+ int CalculateStackPassedWords(int num_reg_arguments,
+ int num_double_arguments);
+ void CallCFunctionHelper(Register function, int num_reg_arguments,
+ int num_double_arguments);
+};
+
+// MacroAssembler implements a collection of frequently used acros.
+class MacroAssembler : public TurboAssembler {
+ public:
+ MacroAssembler(Isolate* isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object);
+
+ // Emit code that loads |parameter_index|'th parameter from the stack to
+ // the register according to the CallInterfaceDescriptor definition.
+ // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
+ // below the caller's sp.
+ template <class Descriptor>
+ void LoadParameterFromStack(
+ Register reg, typename Descriptor::ParameterIndices parameter_index,
+ int sp_to_ra_offset_in_words = 0) {
+ DCHECK(Descriptor::kPassLastArgsOnStack);
+ UNIMPLEMENTED();
+ }
// ---------------------------------------------------------------------------
// GC Support
@@ -200,9 +653,7 @@ class MacroAssembler : public Assembler {
SaveFPRegsMode save_fp,
RememberedSetFinalAction and_then);
- void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
- Label* condition_met);
-
+ void JumpToJSEntry(Register target);
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) {
@@ -273,93 +724,9 @@ class MacroAssembler : public Assembler {
PointersToHereCheck pointers_to_here_check_for_value =
kPointersToHereMaybeInteresting);
- void Push(Register src) { push(src); }
-
- // Push a handle.
- void Push(Handle<Object> handle);
- void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
-
- // Push two registers. Pushes leftmost register first (to highest address).
- void Push(Register src1, Register src2) {
- StorePU(src2, MemOperand(sp, -2 * kPointerSize));
- StoreP(src1, MemOperand(sp, kPointerSize));
- }
-
- // Push three registers. Pushes leftmost register first (to highest address).
- void Push(Register src1, Register src2, Register src3) {
- StorePU(src3, MemOperand(sp, -3 * kPointerSize));
- StoreP(src2, MemOperand(sp, kPointerSize));
- StoreP(src1, MemOperand(sp, 2 * kPointerSize));
- }
-
- // Push four registers. Pushes leftmost register first (to highest address).
- void Push(Register src1, Register src2, Register src3, Register src4) {
- StorePU(src4, MemOperand(sp, -4 * kPointerSize));
- StoreP(src3, MemOperand(sp, kPointerSize));
- StoreP(src2, MemOperand(sp, 2 * kPointerSize));
- StoreP(src1, MemOperand(sp, 3 * kPointerSize));
- }
-
- // Push five registers. Pushes leftmost register first (to highest address).
- void Push(Register src1, Register src2, Register src3, Register src4,
- Register src5) {
- StorePU(src5, MemOperand(sp, -5 * kPointerSize));
- StoreP(src4, MemOperand(sp, kPointerSize));
- StoreP(src3, MemOperand(sp, 2 * kPointerSize));
- StoreP(src2, MemOperand(sp, 3 * kPointerSize));
- StoreP(src1, MemOperand(sp, 4 * kPointerSize));
- }
-
- void Pop(Register dst) { pop(dst); }
-
- // Pop two registers. Pops rightmost register first (from lower address).
- void Pop(Register src1, Register src2) {
- LoadP(src2, MemOperand(sp, 0));
- LoadP(src1, MemOperand(sp, kPointerSize));
- addi(sp, sp, Operand(2 * kPointerSize));
- }
-
- // Pop three registers. Pops rightmost register first (from lower address).
- void Pop(Register src1, Register src2, Register src3) {
- LoadP(src3, MemOperand(sp, 0));
- LoadP(src2, MemOperand(sp, kPointerSize));
- LoadP(src1, MemOperand(sp, 2 * kPointerSize));
- addi(sp, sp, Operand(3 * kPointerSize));
- }
-
- // Pop four registers. Pops rightmost register first (from lower address).
- void Pop(Register src1, Register src2, Register src3, Register src4) {
- LoadP(src4, MemOperand(sp, 0));
- LoadP(src3, MemOperand(sp, kPointerSize));
- LoadP(src2, MemOperand(sp, 2 * kPointerSize));
- LoadP(src1, MemOperand(sp, 3 * kPointerSize));
- addi(sp, sp, Operand(4 * kPointerSize));
- }
-
- // Pop five registers. Pops rightmost register first (from lower address).
- void Pop(Register src1, Register src2, Register src3, Register src4,
- Register src5) {
- LoadP(src5, MemOperand(sp, 0));
- LoadP(src4, MemOperand(sp, kPointerSize));
- LoadP(src3, MemOperand(sp, 2 * kPointerSize));
- LoadP(src2, MemOperand(sp, 3 * kPointerSize));
- LoadP(src1, MemOperand(sp, 4 * kPointerSize));
- addi(sp, sp, Operand(5 * kPointerSize));
- }
-
- // Push a fixed frame, consisting of lr, fp, constant pool.
- void PushCommonFrame(Register marker_reg = no_reg);
-
- // Push a standard frame, consisting of lr, fp, constant pool,
- // context and JS function
- void PushStandardFrame(Register function_reg);
-
void PopCommonFrame(Register marker_reg = no_reg);
- // Restore caller's frame pointer and return address prior to being
- // overwritten by tail call stack preparation.
- void RestoreFrameStateForTailCall();
-
+ void PushObject(Handle<Object> handle);
// Push and pop the registers that can hold pointers, as defined by the
// RegList constant kSafepointSavedRegisters.
void PushSafepointRegisters();
@@ -371,77 +738,18 @@ class MacroAssembler : public Assembler {
// into register dst.
void LoadFromSafepointRegisterSlot(Register dst, Register src);
+ // Loads the constant pool pointer (kConstantPoolRegister).
+ void LoadConstantPoolPointerRegisterFromCodeTargetAddress(
+ Register code_target_address);
+
// Flush the I-cache from asm code. You should use CpuFeatures::FlushICache
// from C.
// Does not handle errors.
void FlushICache(Register address, size_t size, Register scratch);
- // If the value is a NaN, canonicalize the value else, do nothing.
- void CanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
- void CanonicalizeNaN(const DoubleRegister value) {
- CanonicalizeNaN(value, value);
- }
-
- // Converts the integer (untagged smi) in |src| to a double, storing
- // the result to |dst|
- void ConvertIntToDouble(Register src, DoubleRegister dst);
-
- // Converts the unsigned integer (untagged smi) in |src| to
- // a double, storing the result to |dst|
- void ConvertUnsignedIntToDouble(Register src, DoubleRegister dst);
-
- // Converts the integer (untagged smi) in |src| to
- // a float, storing the result in |dst|
- void ConvertIntToFloat(Register src, DoubleRegister dst);
- // Converts the unsigned integer (untagged smi) in |src| to
- // a float, storing the result in |dst|
- void ConvertUnsignedIntToFloat(Register src, DoubleRegister dst);
-#if V8_TARGET_ARCH_PPC64
- void ConvertInt64ToFloat(Register src, DoubleRegister double_dst);
- void ConvertInt64ToDouble(Register src, DoubleRegister double_dst);
- void ConvertUnsignedInt64ToFloat(Register src, DoubleRegister double_dst);
- void ConvertUnsignedInt64ToDouble(Register src, DoubleRegister double_dst);
-#endif
- // Converts the double_input to an integer. Note that, upon return,
- // the contents of double_dst will also hold the fixed point representation.
- void ConvertDoubleToInt64(const DoubleRegister double_input,
-#if !V8_TARGET_ARCH_PPC64
- const Register dst_hi,
-#endif
- const Register dst, const DoubleRegister double_dst,
- FPRoundingMode rounding_mode = kRoundToZero);
-
-#if V8_TARGET_ARCH_PPC64
- // Converts the double_input to an unsigned integer. Note that, upon return,
- // the contents of double_dst will also hold the fixed point representation.
- void ConvertDoubleToUnsignedInt64(
- const DoubleRegister double_input, const Register dst,
- const DoubleRegister double_dst,
- FPRoundingMode rounding_mode = kRoundToZero);
-#endif
-
-#if !V8_TARGET_ARCH_PPC64
- void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
- Register src_high, Register scratch, Register shift);
- void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
- Register src_high, uint32_t shift);
- void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
- Register src_high, Register scratch, Register shift);
- void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
- Register src_high, uint32_t shift);
- void ShiftRightAlgPair(Register dst_low, Register dst_high, Register src_low,
- Register src_high, Register scratch, Register shift);
- void ShiftRightAlgPair(Register dst_low, Register dst_high, Register src_low,
- Register src_high, uint32_t shift);
-#endif
-
- // Generates function and stub prologue code.
- void StubPrologue(StackFrame::Type type, Register base = no_reg,
- int prologue_offset = 0);
- void Prologue(bool code_pre_aging, Register base, int prologue_offset = 0);
// Enter exit frame.
// stack_space - extra stack space, used for parameters before call to C.
@@ -456,9 +764,6 @@ class MacroAssembler : public Assembler {
bool restore_context,
bool argument_count_is_length = false);
- // Get the actual activation frame alignment for target environment.
- static int ActivationFrameAlignment();
-
void LoadContext(Register dst, int context_chain_length);
// Load the global object from the current context.
@@ -477,26 +782,11 @@ class MacroAssembler : public Assembler {
// function and map can be the same, function is then overwritten.
void LoadGlobalFunctionInitialMap(Register function, Register map,
Register scratch);
-
- void InitializeRootRegister() {
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(isolate());
- mov(kRootRegister, Operand(roots_array_start));
- }
-
// ----------------------------------------------------------------
// new PPC macro-assembler interfaces that are slightly higher level
// than assembler-ppc and may generate variable length sequences
- // load a literal signed int value <value> to GPR <dst>
- void LoadIntLiteral(Register dst, int value);
-
- // load an SMI value <value> to GPR <dst>
- void LoadSmiLiteral(Register dst, Smi* smi);
-
// load a literal double value <value> to FPR <result>
- void LoadDoubleLiteral(DoubleRegister result, double value, Register scratch);
-
void LoadWord(Register dst, const MemOperand& mem, Register scratch);
void LoadWordArith(Register dst, const MemOperand& mem,
Register scratch = no_reg);
@@ -514,59 +804,11 @@ class MacroAssembler : public Assembler {
Register scratch = no_reg);
void StoreRepresentation(Register src, const MemOperand& mem,
Representation r, Register scratch = no_reg);
-
- void LoadDouble(DoubleRegister dst, const MemOperand& mem,
- Register scratch = no_reg);
void LoadDoubleU(DoubleRegister dst, const MemOperand& mem,
- Register scratch = no_reg);
-
- void LoadSingle(DoubleRegister dst, const MemOperand& mem,
- Register scratch = no_reg);
- void LoadSingleU(DoubleRegister dst, const MemOperand& mem,
- Register scratch = no_reg);
-
- void StoreDouble(DoubleRegister src, const MemOperand& mem,
Register scratch = no_reg);
- void StoreDoubleU(DoubleRegister src, const MemOperand& mem,
- Register scratch = no_reg);
-
- void StoreSingle(DoubleRegister src, const MemOperand& mem,
- Register scratch = no_reg);
- void StoreSingleU(DoubleRegister src, const MemOperand& mem,
- Register scratch = no_reg);
-
- // Move values between integer and floating point registers.
- void MovIntToDouble(DoubleRegister dst, Register src, Register scratch);
- void MovUnsignedIntToDouble(DoubleRegister dst, Register src,
- Register scratch);
- void MovInt64ToDouble(DoubleRegister dst,
-#if !V8_TARGET_ARCH_PPC64
- Register src_hi,
-#endif
- Register src);
-#if V8_TARGET_ARCH_PPC64
- void MovInt64ComponentsToDouble(DoubleRegister dst, Register src_hi,
- Register src_lo, Register scratch);
-#endif
- void InsertDoubleLow(DoubleRegister dst, Register src, Register scratch);
- void InsertDoubleHigh(DoubleRegister dst, Register src, Register scratch);
- void MovDoubleLowToInt(Register dst, DoubleRegister src);
- void MovDoubleHighToInt(Register dst, DoubleRegister src);
- void MovDoubleToInt64(
-#if !V8_TARGET_ARCH_PPC64
- Register dst_hi,
-#endif
- Register dst, DoubleRegister src);
- void MovIntToFloat(DoubleRegister dst, Register src);
- void MovFloatToInt(Register dst, DoubleRegister src);
- void Add(Register dst, Register src, intptr_t value, Register scratch);
void Cmpi(Register src1, const Operand& src2, Register scratch,
CRegister cr = cr7);
- void Cmpli(Register src1, const Operand& src2, Register scratch,
- CRegister cr = cr7);
- void Cmpwi(Register src1, const Operand& src2, Register scratch,
- CRegister cr = cr7);
void Cmplwi(Register src1, const Operand& src2, Register scratch,
CRegister cr = cr7);
void And(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
@@ -582,17 +824,7 @@ class MacroAssembler : public Assembler {
void AndSmiLiteral(Register dst, Register src, Smi* smi, Register scratch,
RCBit rc = LeaveRC);
- // Set new rounding mode RN to FPSCR
- void SetRoundingMode(FPRoundingMode RN);
-
- // reset rounding mode to default (kRoundToNearest)
- void ResetRoundingMode();
- // These exist to provide portability between 32 and 64bit
- void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg);
- void LoadPU(Register dst, const MemOperand& mem, Register scratch = no_reg);
- void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg);
- void StorePU(Register src, const MemOperand& mem, Register scratch = no_reg);
// ---------------------------------------------------------------------------
// JavaScript invokes
@@ -602,9 +834,6 @@ class MacroAssembler : public Assembler {
// Both |callee_args_count| and |caller_args_count_reg| do not include
// receiver. |callee_args_count| is not modified, |caller_args_count_reg|
// is trashed.
- void PrepareForTailCall(const ParameterCount& callee_args_count,
- Register caller_args_count_reg, Register scratch0,
- Register scratch1);
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
@@ -701,15 +930,6 @@ class MacroAssembler : public Assembler {
void Allocate(Register object_size, Register result, Register result_end,
Register scratch, Label* gc_required, AllocationFlags flags);
- // FastAllocate is right now only used for folded allocations. It just
- // increments the top pointer without checking against limit. This can only
- // be done if it was proved earlier that the allocation will succeed.
- void FastAllocate(int object_size, Register result, Register scratch1,
- Register scratch2, AllocationFlags flags);
-
- void FastAllocate(Register object_size, Register result, Register result_end,
- Register scratch, AllocationFlags flags);
-
// Allocates a heap number or jumps to the gc_required label if the young
// space is full and a scavenge is needed. All registers are clobbered also
// when control continues at the gc_required label.
@@ -891,20 +1111,6 @@ class MacroAssembler : public Assembler {
// Usage: call the appropriate arithmetic function and then call one of the
// flow control functions with the corresponding label.
- // Compute dst = left + right, setting condition codes. dst may be same as
- // either left or right (or a unique register). left and right must not be
- // the same register.
- void AddAndCheckForOverflow(Register dst, Register left, Register right,
- Register overflow_dst, Register scratch = r0);
- void AddAndCheckForOverflow(Register dst, Register left, intptr_t right,
- Register overflow_dst, Register scratch = r0);
-
- // Compute dst = left - right, setting condition codes. dst may be same as
- // either left or right (or a unique register). left and right must not be
- // the same register.
- void SubAndCheckForOverflow(Register dst, Register left, Register right,
- Register overflow_dst, Register scratch = r0);
-
void BranchOnOverflow(Label* label) { blt(label, cr0); }
void BranchOnNoOverflow(Label* label) { bge(label, cr0); }
@@ -916,11 +1122,13 @@ class MacroAssembler : public Assembler {
// ---------------------------------------------------------------------------
// Runtime calls
- // Call a code stub.
- void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None(),
- Condition cond = al);
+ static int CallSizeNotPredictableCodeSize(Address target,
+ RelocInfo::Mode rmode,
+ Condition cond = al);
+ void CallJSEntry(Register target);
// Call a code stub.
+ void CallStub(CodeStub* stub, Condition cond = al);
void TailCallStub(CodeStub* stub, Condition cond = al);
// Call a runtime routine.
@@ -950,56 +1158,12 @@ class MacroAssembler : public Assembler {
// Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid);
- int CalculateStackPassedWords(int num_reg_arguments,
- int num_double_arguments);
-
- // Before calling a C-function from generated code, align arguments on stack.
- // After aligning the frame, non-register arguments must be stored in
- // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
- // are word sized. If double arguments are used, this function assumes that
- // all double arguments are stored before core registers; otherwise the
- // correct alignment of the double values is not guaranteed.
- // Some compilers/platforms require the stack to be aligned when calling
- // C++ code.
- // Needs a scratch register to do some arithmetic. This register will be
- // trashed.
- void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
- Register scratch);
- void PrepareCallCFunction(int num_reg_arguments, Register scratch);
-
- // There are two ways of passing double arguments on ARM, depending on
- // whether soft or hard floating point ABI is used. These functions
- // abstract parameter passing for the three different ways we call
- // C functions from generated code.
- void MovToFloatParameter(DoubleRegister src);
- void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
- void MovToFloatResult(DoubleRegister src);
-
- // Calls a C function and cleans up the space for arguments allocated
- // by PrepareCallCFunction. The called function is not allowed to trigger a
- // garbage collection, since that might move the code and invalidate the
- // return address (unless this is somehow accounted for by the called
- // function).
- void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, int num_arguments);
- void CallCFunction(ExternalReference function, int num_reg_arguments,
- int num_double_arguments);
- void CallCFunction(Register function, int num_reg_arguments,
- int num_double_arguments);
- void MovFromFloatParameter(DoubleRegister dst);
- void MovFromFloatResult(DoubleRegister dst);
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame = false);
- Handle<Object> CodeObject() {
- DCHECK(!code_object_.is_null());
- return code_object_;
- }
-
-
// Emit code for a truncating division by a constant. The dividend register is
// unchanged and ip gets clobbered. Dividend and result must be different.
void TruncatingDiv(Register result, Register dividend, int32_t divisor);
@@ -1014,27 +1178,6 @@ class MacroAssembler : public Assembler {
void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
Register scratch2);
-
- // ---------------------------------------------------------------------------
- // Debugging
-
- // Calls Abort(msg) if the condition cond is not satisfied.
- // Use --debug_code to enable.
- void Assert(Condition cond, BailoutReason reason, CRegister cr = cr7);
-
- // Like Assert(), but always enabled.
- void Check(Condition cond, BailoutReason reason, CRegister cr = cr7);
-
- // Print a message to stdout and abort execution.
- void Abort(BailoutReason reason);
-
- // Verify restrictions about code generated in stubs.
- void set_generating_stub(bool value) { generating_stub_ = value; }
- bool generating_stub() { return generating_stub_; }
- void set_has_frame(bool value) { has_frame_ = value; }
- bool has_frame() { return has_frame_; }
- inline bool AllowThisStubCall(CodeStub* stub);
-
// ---------------------------------------------------------------------------
// Number utilities
@@ -1054,81 +1197,7 @@ class MacroAssembler : public Assembler {
Label* zero_and_neg,
Label* not_power_of_two);
- // ---------------------------------------------------------------------------
- // Bit testing/extraction
- //
- // Bit numbering is such that the least significant bit is bit 0
- // (for consistency between 32/64-bit).
- // Extract consecutive bits (defined by rangeStart - rangeEnd) from src
- // and, if !test, shift them into the least significant bits of dst.
- inline void ExtractBitRange(Register dst, Register src, int rangeStart,
- int rangeEnd, RCBit rc = LeaveRC,
- bool test = false) {
- DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer);
- int rotate = (rangeEnd == 0) ? 0 : kBitsPerPointer - rangeEnd;
- int width = rangeStart - rangeEnd + 1;
- if (rc == SetRC && rangeStart < 16 && (rangeEnd == 0 || test)) {
- // Prefer faster andi when applicable.
- andi(dst, src, Operand(((1 << width) - 1) << rangeEnd));
- } else {
-#if V8_TARGET_ARCH_PPC64
- rldicl(dst, src, rotate, kBitsPerPointer - width, rc);
-#else
- rlwinm(dst, src, rotate, kBitsPerPointer - width, kBitsPerPointer - 1,
- rc);
-#endif
- }
- }
-
- inline void ExtractBit(Register dst, Register src, uint32_t bitNumber,
- RCBit rc = LeaveRC, bool test = false) {
- ExtractBitRange(dst, src, bitNumber, bitNumber, rc, test);
- }
-
- // Extract consecutive bits (defined by mask) from src and place them
- // into the least significant bits of dst.
- inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
- RCBit rc = LeaveRC, bool test = false) {
- int start = kBitsPerPointer - 1;
- int end;
- uintptr_t bit = (1L << start);
-
- while (bit && (mask & bit) == 0) {
- start--;
- bit >>= 1;
- }
- end = start;
- bit >>= 1;
-
- while (bit && (mask & bit)) {
- end--;
- bit >>= 1;
- }
-
- // 1-bits in mask must be contiguous
- DCHECK(bit == 0 || (mask & ((bit << 1) - 1)) == 0);
-
- ExtractBitRange(dst, src, start, end, rc, test);
- }
-
- // Test single bit in value.
- inline void TestBit(Register value, int bitNumber, Register scratch = r0) {
- ExtractBitRange(scratch, value, bitNumber, bitNumber, SetRC, true);
- }
-
- // Test consecutive bit range in value. Range is defined by
- // rangeStart - rangeEnd.
- inline void TestBitRange(Register value, int rangeStart, int rangeEnd,
- Register scratch = r0) {
- ExtractBitRange(scratch, value, rangeStart, rangeEnd, SetRC, true);
- }
-
- // Test consecutive bit range in value. Range is defined by mask.
- inline void TestBitMask(Register value, uintptr_t mask,
- Register scratch = r0) {
- ExtractBitMask(scratch, value, mask, SetRC, true);
- }
// ---------------------------------------------------------------------------
@@ -1167,11 +1236,6 @@ class MacroAssembler : public Assembler {
bne(not_smi_label, cr0);
}
- void SmiUntag(Register reg, RCBit rc = LeaveRC) { SmiUntag(reg, reg, rc); }
-
- void SmiUntag(Register dst, Register src, RCBit rc = LeaveRC) {
- ShiftRightArithImm(dst, src, kSmiShift, rc);
- }
void SmiToPtrArrayOffset(Register dst, Register src) {
#if V8_TARGET_ARCH_PPC64
@@ -1242,9 +1306,6 @@ class MacroAssembler : public Assembler {
// Souce and destination can be the same register.
void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
- inline void TestIfSmi(Register value, Register scratch) {
- TestBitRange(value, kSmiTagSize - 1, 0, scratch);
- }
inline void TestIfPositiveSmi(Register value, Register scratch) {
#if V8_TARGET_ARCH_PPC64
@@ -1255,11 +1316,6 @@ class MacroAssembler : public Assembler {
#endif
}
- // Jump the register contains a smi.
- inline void JumpIfSmi(Register value, Label* smi_label) {
- TestIfSmi(value, r0);
- beq(smi_label, cr0); // branch if SMI
- }
// Jump if either of the registers contain a non-smi.
inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
TestIfSmi(value, r0);
@@ -1275,21 +1331,6 @@ class MacroAssembler : public Assembler {
void AssertSmi(Register object);
-#if V8_TARGET_ARCH_PPC64
- inline void TestIfInt32(Register value, Register scratch,
- CRegister cr = cr7) {
- // High bits must be identical to fit into an 32-bit integer
- extsw(scratch, value);
- cmp(scratch, value, cr);
- }
-#else
- inline void TestIfInt32(Register hi_word, Register lo_word, Register scratch,
- CRegister cr = cr7) {
- // High bits must be identical to fit into an 32-bit integer
- srawi(scratch, lo_word, 31);
- cmp(scratch, hi_word, cr);
- }
-#endif
#if V8_TARGET_ARCH_PPC64
// Ensure it is permissable to read/write int value directly from
@@ -1303,15 +1344,18 @@ class MacroAssembler : public Assembler {
#define SmiWordOffset(offset) offset
#endif
+ // Abort execution if argument is not a FixedArray, enabled via --debug-code.
+ void AssertFixedArray(Register object);
+
void AssertFunction(Register object);
// Abort execution if argument is not a JSBoundFunction,
// enabled via --debug-code.
void AssertBoundFunction(Register object);
- // Abort execution if argument is not a JSGeneratorObject,
+ // Abort execution if argument is not a JSGeneratorObject (or subclass),
// enabled via --debug-code.
- void AssertGeneratorObject(Register object, Register suspend_flags);
+ void AssertGeneratorObject(Register object);
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
@@ -1416,11 +1460,6 @@ class MacroAssembler : public Assembler {
// Load the type feedback vector from a JavaScript frame.
void EmitLoadFeedbackVector(Register vector);
- // Activation support.
- void EnterFrame(StackFrame::Type type,
- bool load_constant_pool_pointer_reg = false);
- // Returns the pc offset at which the frame ends.
- int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
void EnterBuiltinFrame(Register context, Register target, Register argc);
void LeaveBuiltinFrame(Register context, Register target, Register argc);
@@ -1440,28 +1479,9 @@ class MacroAssembler : public Assembler {
Register scratch2_reg,
Label* no_memento_found);
- // Loads the constant pool pointer (kConstantPoolRegister).
- void LoadConstantPoolPointerRegisterFromCodeTargetAddress(
- Register code_target_address);
- void LoadConstantPoolPointerRegister();
- void LoadConstantPoolPointerRegister(Register base, int code_entry_delta = 0);
-
- void AbortConstantPoolBuilding() {
-#ifdef DEBUG
- // Avoid DCHECK(!is_linked()) failure in ~Label()
- bind(ConstantPoolPosition());
-#endif
- }
-
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
- void CallCFunctionHelper(Register function, int num_reg_arguments,
- int num_double_arguments);
-
- void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al,
- CRegister cr = cr7);
-
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual, Label* done,
@@ -1487,11 +1507,6 @@ class MacroAssembler : public Assembler {
MemOperand SafepointRegisterSlot(Register reg);
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
- bool generating_stub_;
- bool has_frame_;
- Isolate* isolate_;
- // This handle will be patched with the code object on installation.
- Handle<Object> code_object_;
// Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
diff --git a/deps/v8/src/profiler/OWNERS b/deps/v8/src/profiler/OWNERS
index 87c96616bc..991d9bafa6 100644
--- a/deps/v8/src/profiler/OWNERS
+++ b/deps/v8/src/profiler/OWNERS
@@ -1 +1,3 @@
alph@chromium.org
+
+# COMPONENT: Platform>DevTools>JavaScript
diff --git a/deps/v8/src/profiler/allocation-tracker.cc b/deps/v8/src/profiler/allocation-tracker.cc
index e2ed7f7817..8d8a3c7e1d 100644
--- a/deps/v8/src/profiler/allocation-tracker.cc
+++ b/deps/v8/src/profiler/allocation-tracker.cc
@@ -252,8 +252,7 @@ void AllocationTracker::AllocationEvent(Address addr, int size) {
static uint32_t SnapshotObjectIdHash(SnapshotObjectId id) {
- return ComputeIntegerHash(static_cast<uint32_t>(id),
- v8::internal::kZeroHashSeed);
+ return ComputeIntegerHash(static_cast<uint32_t>(id));
}
diff --git a/deps/v8/src/profiler/circular-queue-inl.h b/deps/v8/src/profiler/circular-queue-inl.h
index 428945a2ee..4ba774174a 100644
--- a/deps/v8/src/profiler/circular-queue-inl.h
+++ b/deps/v8/src/profiler/circular-queue-inl.h
@@ -24,7 +24,7 @@ SamplingCircularQueue<T, L>::~SamplingCircularQueue() {
template<typename T, unsigned L>
T* SamplingCircularQueue<T, L>::Peek() {
- base::MemoryBarrier();
+ base::MemoryFence();
if (base::Acquire_Load(&dequeue_pos_->marker) == kFull) {
return &dequeue_pos_->record;
}
@@ -41,7 +41,7 @@ void SamplingCircularQueue<T, L>::Remove() {
template<typename T, unsigned L>
T* SamplingCircularQueue<T, L>::StartEnqueue() {
- base::MemoryBarrier();
+ base::MemoryFence();
if (base::Acquire_Load(&enqueue_pos_->marker) == kEmpty) {
return &enqueue_pos_->record;
}
diff --git a/deps/v8/src/profiler/cpu-profiler.cc b/deps/v8/src/profiler/cpu-profiler.cc
index 85f9d5e475..80d488f12c 100644
--- a/deps/v8/src/profiler/cpu-profiler.cc
+++ b/deps/v8/src/profiler/cpu-profiler.cc
@@ -92,7 +92,7 @@ void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate,
void ProfilerEventsProcessor::StopSynchronously() {
- if (!base::NoBarrier_AtomicExchange(&running_, 0)) return;
+ if (!base::Relaxed_AtomicExchange(&running_, 0)) return;
Join();
}
@@ -143,7 +143,7 @@ ProfilerEventsProcessor::SampleProcessingResult
void ProfilerEventsProcessor::Run() {
- while (!!base::NoBarrier_Load(&running_)) {
+ while (!!base::Relaxed_Load(&running_)) {
base::TimeTicks nextSampleTime =
base::TimeTicks::HighResolutionNow() + period_;
base::TimeTicks now;
@@ -300,6 +300,7 @@ void CpuProfiler::CollectSample() {
void CpuProfiler::StartProfiling(const char* title, bool record_samples) {
if (profiles_->StartProfiling(title, record_samples)) {
+ TRACE_EVENT0("v8", "CpuProfiler::StartProfiling");
StartProcessorIfNotStarted();
}
}
diff --git a/deps/v8/src/profiler/cpu-profiler.h b/deps/v8/src/profiler/cpu-profiler.h
index a6872e4986..5fd7fa14da 100644
--- a/deps/v8/src/profiler/cpu-profiler.h
+++ b/deps/v8/src/profiler/cpu-profiler.h
@@ -138,7 +138,7 @@ class ProfilerEventsProcessor : public base::Thread {
// Thread control.
virtual void Run();
void StopSynchronously();
- INLINE(bool running()) { return !!base::NoBarrier_Load(&running_); }
+ INLINE(bool running()) { return !!base::Relaxed_Load(&running_); }
void Enqueue(const CodeEventsContainer& event);
// Puts current stack into tick sample events buffer.
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index cdd80ffc44..6110d9422f 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -172,14 +172,18 @@ namespace { // Avoid littering the global namespace.
template <size_t ptr_size> struct SnapshotSizeConstants;
template <> struct SnapshotSizeConstants<4> {
- static const int kExpectedHeapGraphEdgeSize = 12;
- static const int kExpectedHeapEntrySize = 28;
+ static constexpr int kExpectedHeapGraphEdgeSize = 12;
+ static constexpr int kExpectedHeapEntrySize = 28;
};
+constexpr int SnapshotSizeConstants<4>::kExpectedHeapGraphEdgeSize;
+constexpr int SnapshotSizeConstants<4>::kExpectedHeapEntrySize;
template <> struct SnapshotSizeConstants<8> {
- static const int kExpectedHeapGraphEdgeSize = 24;
- static const int kExpectedHeapEntrySize = 40;
+ static constexpr int kExpectedHeapGraphEdgeSize = 24;
+ static constexpr int kExpectedHeapEntrySize = 40;
};
+constexpr int SnapshotSizeConstants<8>::kExpectedHeapGraphEdgeSize;
+constexpr int SnapshotSizeConstants<8>::kExpectedHeapEntrySize;
} // namespace
@@ -677,9 +681,9 @@ SnapshotObjectId HeapObjectsMap::GenerateId(v8::RetainedObjectInfo* info) {
static_cast<int>(strlen(label)),
heap_->HashSeed());
intptr_t element_count = info->GetElementCount();
- if (element_count != -1)
- id ^= ComputeIntegerHash(static_cast<uint32_t>(element_count),
- v8::internal::kZeroHashSeed);
+ if (element_count != -1) {
+ id ^= ComputeIntegerHash(static_cast<uint32_t>(element_count));
+ }
return id << 1;
}
@@ -773,7 +777,7 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
if (object->IsJSFunction()) {
JSFunction* func = JSFunction::cast(object);
SharedFunctionInfo* shared = func->shared();
- const char* name = names_->GetName(String::cast(shared->name()));
+ const char* name = names_->GetName(shared->name());
return AddEntry(object, HeapEntry::kClosure, name);
} else if (object->IsJSBoundFunction()) {
return AddEntry(object, HeapEntry::kClosure, "native_bind");
@@ -813,7 +817,7 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
} else if (object->IsCode()) {
return AddEntry(object, HeapEntry::kCode, "");
} else if (object->IsSharedFunctionInfo()) {
- String* name = String::cast(SharedFunctionInfo::cast(object)->name());
+ String* name = SharedFunctionInfo::cast(object)->name();
return AddEntry(object,
HeapEntry::kCode,
names_->GetName(name));
@@ -1138,10 +1142,12 @@ void V8HeapExplorer::ExtractJSObjectReferences(
SetInternalReference(view, entry, "buffer", view->buffer(),
JSArrayBufferView::kBufferOffset);
}
- TagObject(js_obj->properties(), "(object properties)");
- SetInternalReference(obj, entry,
- "properties", js_obj->properties(),
- JSObject::kPropertiesOffset);
+
+ TagObject(js_obj->raw_properties_or_hash(), "(object properties)");
+ SetInternalReference(obj, entry, "properties",
+ js_obj->raw_properties_or_hash(),
+ JSObject::kPropertiesOrHashOffset);
+
TagObject(js_obj->elements(), "(object elements)");
SetInternalReference(obj, entry,
"elements", js_obj->elements(),
@@ -1323,8 +1329,7 @@ void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
Code::Kind2String(shared->code()->kind())));
}
- SetInternalReference(obj, entry,
- "name", shared->name(),
+ SetInternalReference(obj, entry, "raw_name", shared->raw_name(),
SharedFunctionInfo::kNameOffset);
SetInternalReference(obj, entry,
"code", shared->code(),
@@ -1446,8 +1451,6 @@ void V8HeapExplorer::ExtractCodeReferences(int entry, Code* code) {
code->type_feedback_info(),
Code::kTypeFeedbackInfoOffset);
}
- SetInternalReference(code, entry, "gc_metadata", code->gc_metadata(),
- Code::kGCMetadataOffset);
}
void V8HeapExplorer::ExtractCellReferences(int entry, Cell* cell) {
@@ -1472,8 +1475,9 @@ void V8HeapExplorer::ExtractPropertyCellReferences(int entry,
void V8HeapExplorer::ExtractAllocationSiteReferences(int entry,
AllocationSite* site) {
- SetInternalReference(site, entry, "transition_info", site->transition_info(),
- AllocationSite::kTransitionInfoOffset);
+ SetInternalReference(site, entry, "transition_info",
+ site->transition_info_or_boilerplate(),
+ AllocationSite::kTransitionInfoOrBoilerplateOffset);
SetInternalReference(site, entry, "nested_site", site->nested_site(),
AllocationSite::kNestedSiteOffset);
TagObject(site->dependent_code(), "(dependent code)");
@@ -1575,17 +1579,17 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
}
} else if (js_obj->IsJSGlobalObject()) {
// We assume that global objects can only have slow properties.
- GlobalDictionary* dictionary = js_obj->global_dictionary();
+ GlobalDictionary* dictionary =
+ JSGlobalObject::cast(js_obj)->global_dictionary();
int length = dictionary->Capacity();
for (int i = 0; i < length; ++i) {
- Object* k = dictionary->KeyAt(i);
- if (dictionary->IsKey(isolate, k)) {
- DCHECK(dictionary->ValueAt(i)->IsPropertyCell());
- PropertyCell* cell = PropertyCell::cast(dictionary->ValueAt(i));
+ if (dictionary->IsKey(isolate, dictionary->KeyAt(i))) {
+ PropertyCell* cell = dictionary->CellAt(i);
+ Name* name = cell->name();
Object* value = cell->value();
PropertyDetails details = cell->property_details();
- SetDataOrAccessorPropertyReference(details.kind(), js_obj, entry,
- Name::cast(k), value);
+ SetDataOrAccessorPropertyReference(details.kind(), js_obj, entry, name,
+ value);
}
}
} else {
@@ -1624,11 +1628,11 @@ void V8HeapExplorer::ExtractAccessorPairProperty(JSObject* js_obj, int entry,
void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj, int entry) {
Isolate* isolate = js_obj->GetIsolate();
- if (js_obj->HasFastObjectElements()) {
+ if (js_obj->HasObjectElements()) {
FixedArray* elements = FixedArray::cast(js_obj->elements());
- int length = js_obj->IsJSArray() ?
- Smi::cast(JSArray::cast(js_obj)->length())->value() :
- elements->length();
+ int length = js_obj->IsJSArray()
+ ? Smi::ToInt(JSArray::cast(js_obj)->length())
+ : elements->length();
for (int i = 0; i < length; ++i) {
if (!elements->get(i)->IsTheHole(isolate)) {
SetElementReference(js_obj, entry, i, elements->get(i));
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index 84a23e4c0d..897d4a3069 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -297,8 +297,7 @@ class HeapEntriesMap {
private:
static uint32_t Hash(HeapThing thing) {
return ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(thing)),
- v8::internal::kZeroHashSeed);
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(thing)));
}
base::HashMap entries_;
@@ -505,8 +504,7 @@ class NativeObjectsExplorer {
void VisitSubtreeWrapper(Object** p, uint16_t class_id);
static uint32_t InfoHash(v8::RetainedObjectInfo* info) {
- return ComputeIntegerHash(static_cast<uint32_t>(info->GetHash()),
- v8::internal::kZeroHashSeed);
+ return ComputeIntegerHash(static_cast<uint32_t>(info->GetHash()));
}
static bool RetainedInfosMatch(void* key1, void* key2) {
return key1 == key2 ||
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index 742d368390..75377f0f2d 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -96,23 +96,18 @@ CodeEntry::~CodeEntry() {
uint32_t CodeEntry::GetHash() const {
- uint32_t hash = ComputeIntegerHash(tag(), v8::internal::kZeroHashSeed);
+ uint32_t hash = ComputeIntegerHash(tag());
if (script_id_ != v8::UnboundScript::kNoScriptId) {
- hash ^= ComputeIntegerHash(static_cast<uint32_t>(script_id_),
- v8::internal::kZeroHashSeed);
- hash ^= ComputeIntegerHash(static_cast<uint32_t>(position_),
- v8::internal::kZeroHashSeed);
+ hash ^= ComputeIntegerHash(static_cast<uint32_t>(script_id_));
+ hash ^= ComputeIntegerHash(static_cast<uint32_t>(position_));
} else {
hash ^= ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_prefix_)),
- v8::internal::kZeroHashSeed);
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_prefix_)));
hash ^= ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_)),
- v8::internal::kZeroHashSeed);
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_)));
hash ^= ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_)),
- v8::internal::kZeroHashSeed);
- hash ^= ComputeIntegerHash(line_number_, v8::internal::kZeroHashSeed);
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_)));
+ hash ^= ComputeIntegerHash(line_number_);
}
return hash;
}
diff --git a/deps/v8/src/profiler/profiler-listener.cc b/deps/v8/src/profiler/profiler-listener.cc
index 7eb323a14e..169b12da07 100644
--- a/deps/v8/src/profiler/profiler-listener.cc
+++ b/deps/v8/src/profiler/profiler-listener.cc
@@ -220,8 +220,7 @@ void ProfilerListener::RecordInliningInfo(CodeEntry* entry,
while (it.HasNext() &&
Translation::BEGIN !=
(opcode = static_cast<Translation::Opcode>(it.Next()))) {
- if (opcode != Translation::JS_FRAME &&
- opcode != Translation::INTERPRETED_FRAME) {
+ if (opcode != Translation::INTERPRETED_FRAME) {
it.Skip(Translation::NumberOfOperandsFor(opcode));
continue;
}
diff --git a/deps/v8/src/profiler/tick-sample.cc b/deps/v8/src/profiler/tick-sample.cc
index 538223715b..c00d80d6c5 100644
--- a/deps/v8/src/profiler/tick-sample.cc
+++ b/deps/v8/src/profiler/tick-sample.cc
@@ -242,19 +242,28 @@ bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
timer = timer->parent();
}
if (i == frames_limit) break;
- if (!it.frame()->is_interpreted()) {
- frames[i++] = it.frame()->pc();
- continue;
+ if (it.frame()->is_interpreted()) {
+ // For interpreted frames use the bytecode array pointer as the pc.
+ i::InterpretedFrame* frame =
+ static_cast<i::InterpretedFrame*>(it.frame());
+ // Since the sampler can interrupt execution at any point the
+ // bytecode_array might be garbage, so don't actually dereference it. We
+ // avoid the frame->GetXXX functions since they call BytecodeArray::cast,
+ // which has a heap access in its DCHECK.
+ i::Object* bytecode_array = i::Memory::Object_at(
+ frame->fp() + i::InterpreterFrameConstants::kBytecodeArrayFromFp);
+ i::Object* bytecode_offset = i::Memory::Object_at(
+ frame->fp() + i::InterpreterFrameConstants::kBytecodeOffsetFromFp);
+
+ // If the bytecode array is a heap object and the bytecode offset is a
+ // Smi, use those, otherwise fall back to using the frame's pc.
+ if (HAS_HEAP_OBJECT_TAG(bytecode_array) && HAS_SMI_TAG(bytecode_offset)) {
+ frames[i++] = reinterpret_cast<i::Address>(bytecode_array) +
+ i::Internals::SmiValue(bytecode_offset);
+ continue;
+ }
}
- // For interpreted frames use the bytecode array pointer as the pc.
- i::InterpretedFrame* frame = static_cast<i::InterpretedFrame*>(it.frame());
- // Since the sampler can interrupt execution at any point the
- // bytecode_array might be garbage, so don't dereference it.
- i::Address bytecode_array =
- reinterpret_cast<i::Address>(frame->GetBytecodeArray()) -
- i::kHeapObjectTag;
- frames[i++] = bytecode_array + i::BytecodeArray::kHeaderSize +
- frame->GetBytecodeOffset();
+ frames[i++] = it.frame()->pc();
}
sample_info->frames_count = i;
return true;
diff --git a/deps/v8/src/profiler/unbound-queue-inl.h b/deps/v8/src/profiler/unbound-queue-inl.h
index 8c45d09861..9d45903e04 100644
--- a/deps/v8/src/profiler/unbound-queue-inl.h
+++ b/deps/v8/src/profiler/unbound-queue-inl.h
@@ -66,7 +66,7 @@ void UnboundQueue<Record>::Enqueue(const Record& rec) {
template<typename Record>
bool UnboundQueue<Record>::IsEmpty() const {
- return base::NoBarrier_Load(&divider_) == base::NoBarrier_Load(&last_);
+ return base::Relaxed_Load(&divider_) == base::Relaxed_Load(&last_);
}
diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h
index 92bf0b497f..d007a0414c 100644
--- a/deps/v8/src/property-details.h
+++ b/deps/v8/src/property-details.h
@@ -231,11 +231,11 @@ enum class PropertyCellConstantType {
class PropertyDetails BASE_EMBEDDED {
public:
// Property details for dictionary mode properties/elements.
- PropertyDetails(PropertyKind kind, PropertyAttributes attributes, int index,
- PropertyCellType cell_type) {
+ PropertyDetails(PropertyKind kind, PropertyAttributes attributes,
+ PropertyCellType cell_type, int dictionary_index = 0) {
value_ = KindField::encode(kind) | LocationField::encode(kField) |
AttributesField::encode(attributes) |
- DictionaryStorageField::encode(index) |
+ DictionaryStorageField::encode(dictionary_index) |
PropertyCellTypeField::encode(cell_type);
}
@@ -252,7 +252,7 @@ class PropertyDetails BASE_EMBEDDED {
static PropertyDetails Empty(
PropertyCellType cell_type = PropertyCellType::kNoCell) {
- return PropertyDetails(kData, NONE, 0, cell_type);
+ return PropertyDetails(kData, NONE, cell_type);
}
int pointer() const { return DescriptorPointer::decode(value_); }
diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h
index ab183d9e9f..5c744a1bd7 100644
--- a/deps/v8/src/property.h
+++ b/deps/v8/src/property.h
@@ -8,7 +8,6 @@
#include <iosfwd>
#include "src/factory.h"
-#include "src/isolate.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/prototype.h b/deps/v8/src/prototype.h
index 3d973dbf9c..e98cd977f0 100644
--- a/deps/v8/src/prototype.h
+++ b/deps/v8/src/prototype.h
@@ -27,8 +27,6 @@ class PrototypeIterator {
public:
enum WhereToEnd { END_AT_NULL, END_AT_NON_HIDDEN };
- const int kProxyPrototypeLimit = 100 * 1000;
-
PrototypeIterator(Isolate* isolate, Handle<JSReceiver> receiver,
WhereToStart where_to_start = kStartAtPrototype,
WhereToEnd where_to_end = END_AT_NULL)
@@ -160,7 +158,7 @@ class PrototypeIterator {
// Due to possible __proto__ recursion limit the number of Proxies
// we visit to an arbitrarily chosen large number.
seen_proxies_++;
- if (seen_proxies_ > kProxyPrototypeLimit) {
+ if (seen_proxies_ > JSProxy::kMaxIterationLimit) {
isolate_->StackOverflow();
return false;
}
diff --git a/deps/v8/src/regexp/OWNERS b/deps/v8/src/regexp/OWNERS
index c493afa8f0..7f916e12ea 100644
--- a/deps/v8/src/regexp/OWNERS
+++ b/deps/v8/src/regexp/OWNERS
@@ -2,3 +2,5 @@ set noparent
jgruber@chromium.org
yangguo@chromium.org
+
+# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
index 6ce35fff09..11a6bade88 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
@@ -290,7 +290,7 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
} else {
DCHECK(mode_ == UC16);
int argument_count = 4;
- __ PrepareCallCFunction(argument_count, r2);
+ __ PrepareCallCFunction(argument_count);
// r0 - offset of start of capture
// r1 - length of capture
@@ -665,7 +665,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ jmp(&return_r0);
__ bind(&stack_limit_hit);
- CallCheckStackGuardState(r0);
+ CallCheckStackGuardState();
__ cmp(r0, Operand::Zero());
// If returned value is non-zero, we exit with the returned value as result.
__ b(ne, &return_r0);
@@ -841,7 +841,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
if (check_preempt_label_.is_linked()) {
SafeCallTarget(&check_preempt_label_);
- CallCheckStackGuardState(r0);
+ CallCheckStackGuardState();
__ cmp(r0, Operand::Zero());
// If returning non-zero, we should end execution with the given
// result as return value.
@@ -860,7 +860,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Call GrowStack(backtrack_stackpointer(), &stack_base)
static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, r0);
+ __ PrepareCallCFunction(num_arguments);
__ mov(r0, backtrack_stackpointer());
__ add(r1, frame_pointer(), Operand(kStackHighEnd));
__ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
@@ -886,7 +886,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
}
CodeDesc code_desc;
- masm_->GetCode(&code_desc);
+ masm_->GetCode(isolate(), &code_desc);
Handle<Code> code = isolate()->factory()->NewCode(
code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
PROFILE(masm_->isolate(),
@@ -1046,8 +1046,8 @@ void RegExpMacroAssemblerARM::WriteStackPointerToRegister(int reg) {
// Private methods:
-void RegExpMacroAssemblerARM::CallCheckStackGuardState(Register scratch) {
- __ PrepareCallCFunction(3, scratch);
+void RegExpMacroAssemblerARM::CallCheckStackGuardState() {
+ __ PrepareCallCFunction(3);
// RegExp code frame pointer.
__ mov(r2, frame_pointer());
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
index a522f53d4a..8b067e998b 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
@@ -140,7 +140,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
// Generate a call to CheckStackGuardState.
- void CallCheckStackGuardState(Register scratch);
+ void CallCheckStackGuardState();
// The ebp-relative location of a regexp register.
MemOperand register_location(int register_index);
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index f740470ae3..e8887b2694 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -1078,7 +1078,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
}
CodeDesc code_desc;
- masm_->GetCode(&code_desc);
+ masm_->GetCode(isolate(), &code_desc);
Handle<Code> code = isolate()->factory()->NewCode(
code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
PROFILE(masm_->isolate(),
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
index c279304777..35008b7b8a 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
@@ -932,7 +932,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
}
CodeDesc code_desc;
- masm_->GetCode(&code_desc);
+ masm_->GetCode(masm_->isolate(), &code_desc);
Handle<Code> code =
isolate()->factory()->NewCode(code_desc,
Code::ComputeFlags(Code::REGEXP),
diff --git a/deps/v8/src/regexp/interpreter-irregexp.cc b/deps/v8/src/regexp/interpreter-irregexp.cc
index f27f43aa5c..83dca70804 100644
--- a/deps/v8/src/regexp/interpreter-irregexp.cc
+++ b/deps/v8/src/regexp/interpreter-irregexp.cc
@@ -175,7 +175,6 @@ static RegExpImpl::IrregexpResult RawMatch(Isolate* isolate,
switch (insn & BYTECODE_MASK) {
BYTECODE(BREAK)
UNREACHABLE();
- return RegExpImpl::RE_FAILURE;
BYTECODE(PUSH_CP)
if (--backtrack_stack_space < 0) {
return RegExpImpl::RE_EXCEPTION;
diff --git a/deps/v8/src/regexp/jsregexp.cc b/deps/v8/src/regexp/jsregexp.cc
index 61cabd0b94..dc4727c38a 100644
--- a/deps/v8/src/regexp/jsregexp.cc
+++ b/deps/v8/src/regexp/jsregexp.cc
@@ -48,8 +48,6 @@
#include "src/regexp/mips/regexp-macro-assembler-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/regexp/mips64/regexp-macro-assembler-mips64.h"
-#elif V8_TARGET_ARCH_X87
-#include "src/regexp/x87/regexp-macro-assembler-x87.h"
#else
#error Unsupported target architecture.
#endif
@@ -201,7 +199,6 @@ MaybeHandle<Object> RegExpImpl::Exec(Handle<JSRegExp> regexp,
}
default:
UNREACHABLE();
- return MaybeHandle<Object>();
}
}
@@ -320,15 +317,6 @@ bool RegExpImpl::EnsureCompiledIrregexp(Handle<JSRegExp> re,
#else // V8_INTERPRETED_REGEXP (RegExp native code)
if (compiled_code->IsCode()) return true;
#endif
- // We could potentially have marked this as flushable, but have kept
- // a saved version if we did not flush it yet.
- Object* saved_code = re->DataAt(JSRegExp::saved_code_index(is_one_byte));
- if (saved_code->IsCode()) {
- // Reinstate the code in the original place.
- re->SetDataAt(JSRegExp::code_index(is_one_byte), saved_code);
- DCHECK(compiled_code->IsSmi());
- return true;
- }
return CompileIrregexp(re, sample_subject, is_one_byte);
}
@@ -340,28 +328,14 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
Isolate* isolate = re->GetIsolate();
Zone zone(isolate->allocator(), ZONE_NAME);
PostponeInterruptsScope postpone(isolate);
- // If we had a compilation error the last time this is saved at the
- // saved code index.
+#ifdef DEBUG
Object* entry = re->DataAt(JSRegExp::code_index(is_one_byte));
- // When arriving here entry can only be a smi, either representing an
- // uncompiled regexp, a previous compilation error, or code that has
- // been flushed.
+ // When arriving here entry can only be a smi representing an uncompiled
+ // regexp.
DCHECK(entry->IsSmi());
- int entry_value = Smi::cast(entry)->value();
- DCHECK(entry_value == JSRegExp::kUninitializedValue ||
- entry_value == JSRegExp::kCompilationErrorValue ||
- (entry_value < JSRegExp::kCodeAgeMask && entry_value >= 0));
-
- if (entry_value == JSRegExp::kCompilationErrorValue) {
- // A previous compilation failed and threw an error which we store in
- // the saved code index (we store the error message, not the actual
- // error). Recreate the error object and throw it.
- Object* error_string = re->DataAt(JSRegExp::saved_code_index(is_one_byte));
- DCHECK(error_string->IsString());
- Handle<String> error_message(String::cast(error_string));
- ThrowRegExpException(re, error_message);
- return false;
- }
+ int entry_value = Smi::ToInt(entry);
+ DCHECK_EQ(JSRegExp::kUninitializedValue, entry_value);
+#endif
JSRegExp::Flags flags = re->GetFlags();
@@ -419,12 +393,12 @@ void RegExpImpl::SetIrregexpCaptureNameMap(FixedArray* re,
}
int RegExpImpl::IrregexpNumberOfCaptures(FixedArray* re) {
- return Smi::cast(re->get(JSRegExp::kIrregexpCaptureCountIndex))->value();
+ return Smi::ToInt(re->get(JSRegExp::kIrregexpCaptureCountIndex));
}
int RegExpImpl::IrregexpNumberOfRegisters(FixedArray* re) {
- return Smi::cast(re->get(JSRegExp::kIrregexpMaxRegisterCountIndex))->value();
+ return Smi::ToInt(re->get(JSRegExp::kIrregexpMaxRegisterCountIndex));
}
@@ -526,7 +500,6 @@ int RegExpImpl::IrregexpExecRaw(Handle<JSRegExp> regexp,
is_one_byte = subject->IsOneByteRepresentationUnderneath();
} while (true);
UNREACHABLE();
- return RE_EXCEPTION;
#else // V8_INTERPRETED_REGEXP
DCHECK(output_size >= IrregexpNumberOfRegisters(*irregexp));
@@ -903,7 +876,6 @@ int TextElement::length() const {
return 1;
}
UNREACHABLE();
- return 0;
}
@@ -6788,9 +6760,6 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
#elif V8_TARGET_ARCH_MIPS64
RegExpMacroAssemblerMIPS macro_assembler(isolate, zone, mode,
(data->capture_count + 1) * 2);
-#elif V8_TARGET_ARCH_X87
- RegExpMacroAssemblerX87 macro_assembler(isolate, zone, mode,
- (data->capture_count + 1) * 2);
#else
#error "Unsupported architecture"
#endif
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
index 11590599f9..1a8f2c8d8e 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
@@ -898,7 +898,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
}
CodeDesc code_desc;
- masm_->GetCode(&code_desc);
+ masm_->GetCode(isolate(), &code_desc);
Handle<Code> code = isolate()->factory()->NewCode(
code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
LOG(masm_->isolate(),
@@ -1091,7 +1091,7 @@ void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
// Align the stack pointer and save the original sp value on the stack.
__ mov(scratch, sp);
__ Subu(sp, sp, Operand(kPointerSize));
- DCHECK(base::bits::IsPowerOfTwo32(stack_alignment));
+ DCHECK(base::bits::IsPowerOfTwo(stack_alignment));
__ And(sp, sp, Operand(-stack_alignment));
__ sw(scratch, MemOperand(sp));
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
index 595d6fd4de..651e3007fe 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
@@ -936,7 +936,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
}
CodeDesc code_desc;
- masm_->GetCode(&code_desc);
+ masm_->GetCode(isolate(), &code_desc);
Handle<Code> code = isolate()->factory()->NewCode(
code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
LOG(masm_->isolate(),
@@ -1129,7 +1129,7 @@ void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
// Align the stack pointer and save the original sp value on the stack.
__ mov(scratch, sp);
__ Dsubu(sp, sp, Operand(kPointerSize));
- DCHECK(base::bits::IsPowerOfTwo32(stack_alignment));
+ DCHECK(base::bits::IsPowerOfTwo(stack_alignment));
__ And(sp, sp, Operand(-stack_alignment));
__ Sd(scratch, MemOperand(sp));
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
index 8f03bcdee8..a1425b4372 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
@@ -932,7 +932,7 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
}
CodeDesc code_desc;
- masm_->GetCode(&code_desc);
+ masm_->GetCode(isolate(), &code_desc);
Handle<Code> code = isolate()->factory()->NewCode(
code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
PROFILE(masm_->isolate(),
@@ -1101,7 +1101,7 @@ void RegExpMacroAssemblerPPC::CallCheckStackGuardState(Register scratch) {
// -- preserving original value of sp.
__ mr(scratch, sp);
__ addi(sp, sp, Operand(-(stack_passed_arguments + 1) * kPointerSize));
- DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
__ ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
__ StoreP(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else {
diff --git a/deps/v8/src/regexp/regexp-ast.h b/deps/v8/src/regexp/regexp-ast.h
index 7065ecd96c..c1ea0e7926 100644
--- a/deps/v8/src/regexp/regexp-ast.h
+++ b/deps/v8/src/regexp/regexp-ast.h
@@ -6,6 +6,7 @@
#define V8_REGEXP_REGEXP_AST_H_
#include "src/objects.h"
+#include "src/objects/string.h"
#include "src/utils.h"
#include "src/zone/zone-containers.h"
#include "src/zone/zone.h"
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
index 20f023930f..8e9d3150c7 100644
--- a/deps/v8/src/regexp/regexp-parser.cc
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -1927,7 +1927,6 @@ bool RegExpBuilder::AddQuantifierToAtom(
} else {
// Only call immediately after adding an atom or character!
UNREACHABLE();
- return false;
}
terms_.Add(new (zone()) RegExpQuantifier(min, max, quantifier_type, atom),
zone());
diff --git a/deps/v8/src/regexp/regexp-utils.cc b/deps/v8/src/regexp/regexp-utils.cc
index 570a348f74..88ecb85dfa 100644
--- a/deps/v8/src/regexp/regexp-utils.cc
+++ b/deps/v8/src/regexp/regexp-utils.cc
@@ -152,7 +152,7 @@ bool RegExpUtils::IsUnmodifiedRegExp(Isolate* isolate, Handle<Object> obj) {
// The smi check is required to omit ToLength(lastIndex) calls with possible
// user-code execution on the fast path.
Object* last_index = JSRegExp::cast(recv)->LastIndex();
- return last_index->IsSmi() && Smi::cast(last_index)->value() >= 0;
+ return last_index->IsSmi() && Smi::ToInt(last_index) >= 0;
}
int RegExpUtils::AdvanceStringIndex(Isolate* isolate, Handle<String> string,
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
index e2fe913b36..f31b217acf 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
@@ -928,7 +928,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
}
CodeDesc code_desc;
- masm_->GetCode(&code_desc);
+ masm_->GetCode(isolate(), &code_desc);
Handle<Code> code = isolate()->factory()->NewCode(
code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
PROFILE(masm_->isolate(),
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
index 8c51233e29..9b0352d863 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
@@ -1005,8 +1005,8 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
FixupCodeRelativePositions();
CodeDesc code_desc;
- masm_.GetCode(&code_desc);
Isolate* isolate = this->isolate();
+ masm_.GetCode(isolate, &code_desc);
Handle<Code> code = isolate->factory()->NewCode(
code_desc, Code::ComputeFlags(Code::REGEXP),
masm_.CodeObject());
diff --git a/deps/v8/src/regexp/x87/OWNERS b/deps/v8/src/regexp/x87/OWNERS
deleted file mode 100644
index 61245ae8e2..0000000000
--- a/deps/v8/src/regexp/x87/OWNERS
+++ /dev/null
@@ -1,2 +0,0 @@
-weiliang.lin@intel.com
-chunyang.dai@intel.com
diff --git a/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc b/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc
deleted file mode 100644
index 622a36e021..0000000000
--- a/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc
+++ /dev/null
@@ -1,1273 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X87
-
-#include "src/regexp/x87/regexp-macro-assembler-x87.h"
-
-#include "src/log.h"
-#include "src/macro-assembler.h"
-#include "src/regexp/regexp-macro-assembler.h"
-#include "src/regexp/regexp-stack.h"
-#include "src/unicode.h"
-
-namespace v8 {
-namespace internal {
-
-#ifndef V8_INTERPRETED_REGEXP
-/*
- * This assembler uses the following register assignment convention
- * - edx : Current character. Must be loaded using LoadCurrentCharacter
- * before using any of the dispatch methods. Temporarily stores the
- * index of capture start after a matching pass for a global regexp.
- * - edi : Current position in input, as negative offset from end of string.
- * Please notice that this is the byte offset, not the character offset!
- * - esi : end of input (points to byte after last character in input).
- * - ebp : Frame pointer. Used to access arguments, local variables and
- * RegExp registers.
- * - esp : Points to tip of C stack.
- * - ecx : Points to tip of backtrack stack
- *
- * The registers eax and ebx are free to use for computations.
- *
- * Each call to a public method should retain this convention.
- * The stack will have the following structure:
- * - Isolate* isolate (address of the current isolate)
- * - direct_call (if 1, direct call from JavaScript code, if 0
- * call through the runtime system)
- * - stack_area_base (high end of the memory area to use as
- * backtracking stack)
- * - capture array size (may fit multiple sets of matches)
- * - int* capture_array (int[num_saved_registers_], for output).
- * - end of input (address of end of string)
- * - start of input (address of first character in string)
- * - start index (character index of start)
- * - String* input_string (location of a handle containing the string)
- * --- frame alignment (if applicable) ---
- * - return address
- * ebp-> - old ebp
- * - backup of caller esi
- * - backup of caller edi
- * - backup of caller ebx
- * - success counter (only for global regexps to count matches).
- * - Offset of location before start of input (effectively character
- * string start - 1). Used to initialize capture registers to a
- * non-position.
- * - register 0 ebp[-4] (only positions must be stored in the first
- * - register 1 ebp[-8] num_saved_registers_ registers)
- * - ...
- *
- * The first num_saved_registers_ registers are initialized to point to
- * "character -1" in the string (i.e., char_size() bytes before the first
- * character of the string). The remaining registers starts out as garbage.
- *
- * The data up to the return address must be placed there by the calling
- * code, by calling the code entry as cast to a function with the signature:
- * int (*match)(String* input_string,
- * int start_index,
- * Address start,
- * Address end,
- * int* capture_output_array,
- * int num_capture_registers,
- * byte* stack_area_base,
- * bool direct_call = false,
- * Isolate* isolate);
- */
-
-#define __ ACCESS_MASM(masm_)
-
-RegExpMacroAssemblerX87::RegExpMacroAssemblerX87(Isolate* isolate, Zone* zone,
- Mode mode,
- int registers_to_save)
- : NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize,
- CodeObjectRequired::kYes)),
- mode_(mode),
- num_registers_(registers_to_save),
- num_saved_registers_(registers_to_save),
- entry_label_(),
- start_label_(),
- success_label_(),
- backtrack_label_(),
- exit_label_() {
- DCHECK_EQ(0, registers_to_save % 2);
- __ jmp(&entry_label_); // We'll write the entry code later.
- __ bind(&start_label_); // And then continue from here.
-}
-
-
-RegExpMacroAssemblerX87::~RegExpMacroAssemblerX87() {
- delete masm_;
- // Unuse labels in case we throw away the assembler without calling GetCode.
- entry_label_.Unuse();
- start_label_.Unuse();
- success_label_.Unuse();
- backtrack_label_.Unuse();
- exit_label_.Unuse();
- check_preempt_label_.Unuse();
- stack_overflow_label_.Unuse();
-}
-
-
-int RegExpMacroAssemblerX87::stack_limit_slack() {
- return RegExpStack::kStackLimitSlack;
-}
-
-
-void RegExpMacroAssemblerX87::AdvanceCurrentPosition(int by) {
- if (by != 0) {
- __ add(edi, Immediate(by * char_size()));
- }
-}
-
-
-void RegExpMacroAssemblerX87::AdvanceRegister(int reg, int by) {
- DCHECK(reg >= 0);
- DCHECK(reg < num_registers_);
- if (by != 0) {
- __ add(register_location(reg), Immediate(by));
- }
-}
-
-
-void RegExpMacroAssemblerX87::Backtrack() {
- CheckPreemption();
- // Pop Code* offset from backtrack stack, add Code* and jump to location.
- Pop(ebx);
- __ add(ebx, Immediate(masm_->CodeObject()));
- __ jmp(ebx);
-}
-
-
-void RegExpMacroAssemblerX87::Bind(Label* label) {
- __ bind(label);
-}
-
-
-void RegExpMacroAssemblerX87::CheckCharacter(uint32_t c, Label* on_equal) {
- __ cmp(current_character(), c);
- BranchOrBacktrack(equal, on_equal);
-}
-
-
-void RegExpMacroAssemblerX87::CheckCharacterGT(uc16 limit, Label* on_greater) {
- __ cmp(current_character(), limit);
- BranchOrBacktrack(greater, on_greater);
-}
-
-
-void RegExpMacroAssemblerX87::CheckAtStart(Label* on_at_start) {
- __ lea(eax, Operand(edi, -char_size()));
- __ cmp(eax, Operand(ebp, kStringStartMinusOne));
- BranchOrBacktrack(equal, on_at_start);
-}
-
-
-void RegExpMacroAssemblerX87::CheckNotAtStart(int cp_offset,
- Label* on_not_at_start) {
- __ lea(eax, Operand(edi, -char_size() + cp_offset * char_size()));
- __ cmp(eax, Operand(ebp, kStringStartMinusOne));
- BranchOrBacktrack(not_equal, on_not_at_start);
-}
-
-
-void RegExpMacroAssemblerX87::CheckCharacterLT(uc16 limit, Label* on_less) {
- __ cmp(current_character(), limit);
- BranchOrBacktrack(less, on_less);
-}
-
-
-void RegExpMacroAssemblerX87::CheckGreedyLoop(Label* on_equal) {
- Label fallthrough;
- __ cmp(edi, Operand(backtrack_stackpointer(), 0));
- __ j(not_equal, &fallthrough);
- __ add(backtrack_stackpointer(), Immediate(kPointerSize)); // Pop.
- BranchOrBacktrack(no_condition, on_equal);
- __ bind(&fallthrough);
-}
-
-void RegExpMacroAssemblerX87::CheckNotBackReferenceIgnoreCase(
- int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
- Label fallthrough;
- __ mov(edx, register_location(start_reg)); // Index of start of capture
- __ mov(ebx, register_location(start_reg + 1)); // Index of end of capture
- __ sub(ebx, edx); // Length of capture.
-
- // At this point, the capture registers are either both set or both cleared.
- // If the capture length is zero, then the capture is either empty or cleared.
- // Fall through in both cases.
- __ j(equal, &fallthrough);
-
- // Check that there are sufficient characters left in the input.
- if (read_backward) {
- __ mov(eax, Operand(ebp, kStringStartMinusOne));
- __ add(eax, ebx);
- __ cmp(edi, eax);
- BranchOrBacktrack(less_equal, on_no_match);
- } else {
- __ mov(eax, edi);
- __ add(eax, ebx);
- BranchOrBacktrack(greater, on_no_match);
- }
-
- if (mode_ == LATIN1) {
- Label success;
- Label fail;
- Label loop_increment;
- // Save register contents to make the registers available below.
- __ push(edi);
- __ push(backtrack_stackpointer());
- // After this, the eax, ecx, and edi registers are available.
-
- __ add(edx, esi); // Start of capture
- __ add(edi, esi); // Start of text to match against capture.
- if (read_backward) {
- __ sub(edi, ebx); // Offset by length when matching backwards.
- }
- __ add(ebx, edi); // End of text to match against capture.
-
- Label loop;
- __ bind(&loop);
- __ movzx_b(eax, Operand(edi, 0));
- __ cmpb_al(Operand(edx, 0));
- __ j(equal, &loop_increment);
-
- // Mismatch, try case-insensitive match (converting letters to lower-case).
- __ or_(eax, 0x20); // Convert match character to lower-case.
- __ lea(ecx, Operand(eax, -'a'));
- __ cmp(ecx, static_cast<int32_t>('z' - 'a')); // Is eax a lowercase letter?
- Label convert_capture;
- __ j(below_equal, &convert_capture); // In range 'a'-'z'.
- // Latin-1: Check for values in range [224,254] but not 247.
- __ sub(ecx, Immediate(224 - 'a'));
- __ cmp(ecx, Immediate(254 - 224));
- __ j(above, &fail); // Weren't Latin-1 letters.
- __ cmp(ecx, Immediate(247 - 224)); // Check for 247.
- __ j(equal, &fail);
- __ bind(&convert_capture);
- // Also convert capture character.
- __ movzx_b(ecx, Operand(edx, 0));
- __ or_(ecx, 0x20);
-
- __ cmp(eax, ecx);
- __ j(not_equal, &fail);
-
- __ bind(&loop_increment);
- // Increment pointers into match and capture strings.
- __ add(edx, Immediate(1));
- __ add(edi, Immediate(1));
- // Compare to end of match, and loop if not done.
- __ cmp(edi, ebx);
- __ j(below, &loop);
- __ jmp(&success);
-
- __ bind(&fail);
- // Restore original values before failing.
- __ pop(backtrack_stackpointer());
- __ pop(edi);
- BranchOrBacktrack(no_condition, on_no_match);
-
- __ bind(&success);
- // Restore original value before continuing.
- __ pop(backtrack_stackpointer());
- // Drop original value of character position.
- __ add(esp, Immediate(kPointerSize));
- // Compute new value of character position after the matched part.
- __ sub(edi, esi);
- if (read_backward) {
- // Subtract match length if we matched backward.
- __ add(edi, register_location(start_reg));
- __ sub(edi, register_location(start_reg + 1));
- }
- } else {
- DCHECK(mode_ == UC16);
- // Save registers before calling C function.
- __ push(esi);
- __ push(edi);
- __ push(backtrack_stackpointer());
- __ push(ebx);
-
- static const int argument_count = 4;
- __ PrepareCallCFunction(argument_count, ecx);
- // Put arguments into allocated stack area, last argument highest on stack.
- // Parameters are
- // Address byte_offset1 - Address captured substring's start.
- // Address byte_offset2 - Address of current character position.
- // size_t byte_length - length of capture in bytes(!)
-// Isolate* isolate or 0 if unicode flag.
-
- // Set isolate.
-#ifdef V8_INTL_SUPPORT
- if (unicode) {
- __ mov(Operand(esp, 3 * kPointerSize), Immediate(0));
- } else // NOLINT
-#endif // V8_INTL_SUPPORT
- {
- __ mov(Operand(esp, 3 * kPointerSize),
- Immediate(ExternalReference::isolate_address(isolate())));
- }
- // Set byte_length.
- __ mov(Operand(esp, 2 * kPointerSize), ebx);
- // Set byte_offset2.
- // Found by adding negative string-end offset of current position (edi)
- // to end of string.
- __ add(edi, esi);
- if (read_backward) {
- __ sub(edi, ebx); // Offset by length when matching backwards.
- }
- __ mov(Operand(esp, 1 * kPointerSize), edi);
- // Set byte_offset1.
- // Start of capture, where edx already holds string-end negative offset.
- __ add(edx, esi);
- __ mov(Operand(esp, 0 * kPointerSize), edx);
-
- {
- AllowExternalCallThatCantCauseGC scope(masm_);
- ExternalReference compare =
- ExternalReference::re_case_insensitive_compare_uc16(isolate());
- __ CallCFunction(compare, argument_count);
- }
- // Pop original values before reacting on result value.
- __ pop(ebx);
- __ pop(backtrack_stackpointer());
- __ pop(edi);
- __ pop(esi);
-
- // Check if function returned non-zero for success or zero for failure.
- __ or_(eax, eax);
- BranchOrBacktrack(zero, on_no_match);
- // On success, advance position by length of capture.
- if (read_backward) {
- __ sub(edi, ebx);
- } else {
- __ add(edi, ebx);
- }
- }
- __ bind(&fallthrough);
-}
-
-
-void RegExpMacroAssemblerX87::CheckNotBackReference(int start_reg,
- bool read_backward,
- Label* on_no_match) {
- Label fallthrough;
- Label success;
- Label fail;
-
- // Find length of back-referenced capture.
- __ mov(edx, register_location(start_reg));
- __ mov(eax, register_location(start_reg + 1));
- __ sub(eax, edx); // Length to check.
-
- // At this point, the capture registers are either both set or both cleared.
- // If the capture length is zero, then the capture is either empty or cleared.
- // Fall through in both cases.
- __ j(equal, &fallthrough);
-
- // Check that there are sufficient characters left in the input.
- if (read_backward) {
- __ mov(ebx, Operand(ebp, kStringStartMinusOne));
- __ add(ebx, eax);
- __ cmp(edi, ebx);
- BranchOrBacktrack(less_equal, on_no_match);
- } else {
- __ mov(ebx, edi);
- __ add(ebx, eax);
- BranchOrBacktrack(greater, on_no_match);
- }
-
- // Save register to make it available below.
- __ push(backtrack_stackpointer());
-
- // Compute pointers to match string and capture string
- __ add(edx, esi); // Start of capture.
- __ lea(ebx, Operand(esi, edi, times_1, 0)); // Start of match.
- if (read_backward) {
- __ sub(ebx, eax); // Offset by length when matching backwards.
- }
- __ lea(ecx, Operand(eax, ebx, times_1, 0)); // End of match
-
- Label loop;
- __ bind(&loop);
- if (mode_ == LATIN1) {
- __ movzx_b(eax, Operand(edx, 0));
- __ cmpb_al(Operand(ebx, 0));
- } else {
- DCHECK(mode_ == UC16);
- __ movzx_w(eax, Operand(edx, 0));
- __ cmpw_ax(Operand(ebx, 0));
- }
- __ j(not_equal, &fail);
- // Increment pointers into capture and match string.
- __ add(edx, Immediate(char_size()));
- __ add(ebx, Immediate(char_size()));
- // Check if we have reached end of match area.
- __ cmp(ebx, ecx);
- __ j(below, &loop);
- __ jmp(&success);
-
- __ bind(&fail);
- // Restore backtrack stackpointer.
- __ pop(backtrack_stackpointer());
- BranchOrBacktrack(no_condition, on_no_match);
-
- __ bind(&success);
- // Move current character position to position after match.
- __ mov(edi, ecx);
- __ sub(edi, esi);
- if (read_backward) {
- // Subtract match length if we matched backward.
- __ add(edi, register_location(start_reg));
- __ sub(edi, register_location(start_reg + 1));
- }
- // Restore backtrack stackpointer.
- __ pop(backtrack_stackpointer());
-
- __ bind(&fallthrough);
-}
-
-
-void RegExpMacroAssemblerX87::CheckNotCharacter(uint32_t c,
- Label* on_not_equal) {
- __ cmp(current_character(), c);
- BranchOrBacktrack(not_equal, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerX87::CheckCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_equal) {
- if (c == 0) {
- __ test(current_character(), Immediate(mask));
- } else {
- __ mov(eax, mask);
- __ and_(eax, current_character());
- __ cmp(eax, c);
- }
- BranchOrBacktrack(equal, on_equal);
-}
-
-
-void RegExpMacroAssemblerX87::CheckNotCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_not_equal) {
- if (c == 0) {
- __ test(current_character(), Immediate(mask));
- } else {
- __ mov(eax, mask);
- __ and_(eax, current_character());
- __ cmp(eax, c);
- }
- BranchOrBacktrack(not_equal, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerX87::CheckNotCharacterAfterMinusAnd(
- uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal) {
- DCHECK(minus < String::kMaxUtf16CodeUnit);
- __ lea(eax, Operand(current_character(), -minus));
- if (c == 0) {
- __ test(eax, Immediate(mask));
- } else {
- __ and_(eax, mask);
- __ cmp(eax, c);
- }
- BranchOrBacktrack(not_equal, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerX87::CheckCharacterInRange(
- uc16 from,
- uc16 to,
- Label* on_in_range) {
- __ lea(eax, Operand(current_character(), -from));
- __ cmp(eax, to - from);
- BranchOrBacktrack(below_equal, on_in_range);
-}
-
-
-void RegExpMacroAssemblerX87::CheckCharacterNotInRange(
- uc16 from,
- uc16 to,
- Label* on_not_in_range) {
- __ lea(eax, Operand(current_character(), -from));
- __ cmp(eax, to - from);
- BranchOrBacktrack(above, on_not_in_range);
-}
-
-
-void RegExpMacroAssemblerX87::CheckBitInTable(
- Handle<ByteArray> table,
- Label* on_bit_set) {
- __ mov(eax, Immediate(table));
- Register index = current_character();
- if (mode_ != LATIN1 || kTableMask != String::kMaxOneByteCharCode) {
- __ mov(ebx, kTableSize - 1);
- __ and_(ebx, current_character());
- index = ebx;
- }
- __ cmpb(FieldOperand(eax, index, times_1, ByteArray::kHeaderSize),
- Immediate(0));
- BranchOrBacktrack(not_equal, on_bit_set);
-}
-
-
-bool RegExpMacroAssemblerX87::CheckSpecialCharacterClass(uc16 type,
- Label* on_no_match) {
- // Range checks (c in min..max) are generally implemented by an unsigned
- // (c - min) <= (max - min) check
- switch (type) {
- case 's':
- // Match space-characters
- if (mode_ == LATIN1) {
- // One byte space characters are '\t'..'\r', ' ' and \u00a0.
- Label success;
- __ cmp(current_character(), ' ');
- __ j(equal, &success, Label::kNear);
- // Check range 0x09..0x0d
- __ lea(eax, Operand(current_character(), -'\t'));
- __ cmp(eax, '\r' - '\t');
- __ j(below_equal, &success, Label::kNear);
- // \u00a0 (NBSP).
- __ cmp(eax, 0x00a0 - '\t');
- BranchOrBacktrack(not_equal, on_no_match);
- __ bind(&success);
- return true;
- }
- return false;
- case 'S':
- // The emitted code for generic character classes is good enough.
- return false;
- case 'd':
- // Match ASCII digits ('0'..'9')
- __ lea(eax, Operand(current_character(), -'0'));
- __ cmp(eax, '9' - '0');
- BranchOrBacktrack(above, on_no_match);
- return true;
- case 'D':
- // Match non ASCII-digits
- __ lea(eax, Operand(current_character(), -'0'));
- __ cmp(eax, '9' - '0');
- BranchOrBacktrack(below_equal, on_no_match);
- return true;
- case '.': {
- // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
- __ mov(eax, current_character());
- __ xor_(eax, Immediate(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ sub(eax, Immediate(0x0b));
- __ cmp(eax, 0x0c - 0x0b);
- BranchOrBacktrack(below_equal, on_no_match);
- if (mode_ == UC16) {
- // Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ sub(eax, Immediate(0x2028 - 0x0b));
- __ cmp(eax, 0x2029 - 0x2028);
- BranchOrBacktrack(below_equal, on_no_match);
- }
- return true;
- }
- case 'w': {
- if (mode_ != LATIN1) {
- // Table is 256 entries, so all Latin1 characters can be tested.
- __ cmp(current_character(), Immediate('z'));
- BranchOrBacktrack(above, on_no_match);
- }
- DCHECK_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
- ExternalReference word_map = ExternalReference::re_word_character_map();
- __ test_b(current_character(),
- Operand::StaticArray(current_character(), times_1, word_map));
- BranchOrBacktrack(zero, on_no_match);
- return true;
- }
- case 'W': {
- Label done;
- if (mode_ != LATIN1) {
- // Table is 256 entries, so all Latin1 characters can be tested.
- __ cmp(current_character(), Immediate('z'));
- __ j(above, &done);
- }
- DCHECK_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
- ExternalReference word_map = ExternalReference::re_word_character_map();
- __ test_b(current_character(),
- Operand::StaticArray(current_character(), times_1, word_map));
- BranchOrBacktrack(not_zero, on_no_match);
- if (mode_ != LATIN1) {
- __ bind(&done);
- }
- return true;
- }
- // Non-standard classes (with no syntactic shorthand) used internally.
- case '*':
- // Match any character.
- return true;
- case 'n': {
- // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 or 0x2029).
- // The opposite of '.'.
- __ mov(eax, current_character());
- __ xor_(eax, Immediate(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ sub(eax, Immediate(0x0b));
- __ cmp(eax, 0x0c - 0x0b);
- if (mode_ == LATIN1) {
- BranchOrBacktrack(above, on_no_match);
- } else {
- Label done;
- BranchOrBacktrack(below_equal, &done);
- DCHECK_EQ(UC16, mode_);
- // Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ sub(eax, Immediate(0x2028 - 0x0b));
- __ cmp(eax, 1);
- BranchOrBacktrack(above, on_no_match);
- __ bind(&done);
- }
- return true;
- }
- // No custom implementation (yet): s(UC16), S(UC16).
- default:
- return false;
- }
-}
-
-
-void RegExpMacroAssemblerX87::Fail() {
- STATIC_ASSERT(FAILURE == 0); // Return value for failure is zero.
- if (!global()) {
- __ Move(eax, Immediate(FAILURE));
- }
- __ jmp(&exit_label_);
-}
-
-
-Handle<HeapObject> RegExpMacroAssemblerX87::GetCode(Handle<String> source) {
- Label return_eax;
- // Finalize code - write the entry point code now we know how many
- // registers we need.
-
- // Entry code:
- __ bind(&entry_label_);
-
- // Tell the system that we have a stack frame. Because the type is MANUAL, no
- // code is generated.
- FrameScope scope(masm_, StackFrame::MANUAL);
-
- // Actually emit code to start a new stack frame.
- __ push(ebp);
- __ mov(ebp, esp);
- // Save callee-save registers. Order here should correspond to order of
- // kBackup_ebx etc.
- __ push(esi);
- __ push(edi);
- __ push(ebx); // Callee-save on MacOS.
- __ push(Immediate(0)); // Number of successful matches in a global regexp.
- __ push(Immediate(0)); // Make room for "string start - 1" constant.
-
- // Check if we have space on the stack for registers.
- Label stack_limit_hit;
- Label stack_ok;
-
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ mov(ecx, esp);
- __ sub(ecx, Operand::StaticVariable(stack_limit));
- // Handle it if the stack pointer is already below the stack limit.
- __ j(below_equal, &stack_limit_hit);
- // Check if there is room for the variable number of registers above
- // the stack limit.
- __ cmp(ecx, num_registers_ * kPointerSize);
- __ j(above_equal, &stack_ok);
- // Exit with OutOfMemory exception. There is not enough space on the stack
- // for our working registers.
- __ mov(eax, EXCEPTION);
- __ jmp(&return_eax);
-
- __ bind(&stack_limit_hit);
- CallCheckStackGuardState(ebx);
- __ or_(eax, eax);
- // If returned value is non-zero, we exit with the returned value as result.
- __ j(not_zero, &return_eax);
-
- __ bind(&stack_ok);
- // Load start index for later use.
- __ mov(ebx, Operand(ebp, kStartIndex));
-
- // Allocate space on stack for registers.
- __ sub(esp, Immediate(num_registers_ * kPointerSize));
- // Load string length.
- __ mov(esi, Operand(ebp, kInputEnd));
- // Load input position.
- __ mov(edi, Operand(ebp, kInputStart));
- // Set up edi to be negative offset from string end.
- __ sub(edi, esi);
-
- // Set eax to address of char before start of the string.
- // (effectively string position -1).
- __ neg(ebx);
- if (mode_ == UC16) {
- __ lea(eax, Operand(edi, ebx, times_2, -char_size()));
- } else {
- __ lea(eax, Operand(edi, ebx, times_1, -char_size()));
- }
- // Store this value in a local variable, for use when clearing
- // position registers.
- __ mov(Operand(ebp, kStringStartMinusOne), eax);
-
-#if V8_OS_WIN
- // Ensure that we write to each stack page, in order. Skipping a page
- // on Windows can cause segmentation faults. Assuming page size is 4k.
- const int kPageSize = 4096;
- const int kRegistersPerPage = kPageSize / kPointerSize;
- for (int i = num_saved_registers_ + kRegistersPerPage - 1;
- i < num_registers_;
- i += kRegistersPerPage) {
- __ mov(register_location(i), eax); // One write every page.
- }
-#endif // V8_OS_WIN
-
- Label load_char_start_regexp, start_regexp;
- // Load newline if index is at start, previous character otherwise.
- __ cmp(Operand(ebp, kStartIndex), Immediate(0));
- __ j(not_equal, &load_char_start_regexp, Label::kNear);
- __ mov(current_character(), '\n');
- __ jmp(&start_regexp, Label::kNear);
-
- // Global regexp restarts matching here.
- __ bind(&load_char_start_regexp);
- // Load previous char as initial value of current character register.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&start_regexp);
-
- // Initialize on-stack registers.
- if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
- // Fill saved registers with initial value = start offset - 1
- // Fill in stack push order, to avoid accessing across an unwritten
- // page (a problem on Windows).
- if (num_saved_registers_ > 8) {
- __ mov(ecx, kRegisterZero);
- Label init_loop;
- __ bind(&init_loop);
- __ mov(Operand(ebp, ecx, times_1, 0), eax);
- __ sub(ecx, Immediate(kPointerSize));
- __ cmp(ecx, kRegisterZero - num_saved_registers_ * kPointerSize);
- __ j(greater, &init_loop);
- } else { // Unroll the loop.
- for (int i = 0; i < num_saved_registers_; i++) {
- __ mov(register_location(i), eax);
- }
- }
- }
-
- // Initialize backtrack stack pointer.
- __ mov(backtrack_stackpointer(), Operand(ebp, kStackHighEnd));
-
- __ jmp(&start_label_);
-
- // Exit code:
- if (success_label_.is_linked()) {
- // Save captures when successful.
- __ bind(&success_label_);
- if (num_saved_registers_ > 0) {
- // copy captures to output
- __ mov(ebx, Operand(ebp, kRegisterOutput));
- __ mov(ecx, Operand(ebp, kInputEnd));
- __ mov(edx, Operand(ebp, kStartIndex));
- __ sub(ecx, Operand(ebp, kInputStart));
- if (mode_ == UC16) {
- __ lea(ecx, Operand(ecx, edx, times_2, 0));
- } else {
- __ add(ecx, edx);
- }
- for (int i = 0; i < num_saved_registers_; i++) {
- __ mov(eax, register_location(i));
- if (i == 0 && global_with_zero_length_check()) {
- // Keep capture start in edx for the zero-length check later.
- __ mov(edx, eax);
- }
- // Convert to index from start of string, not end.
- __ add(eax, ecx);
- if (mode_ == UC16) {
- __ sar(eax, 1); // Convert byte index to character index.
- }
- __ mov(Operand(ebx, i * kPointerSize), eax);
- }
- }
-
- if (global()) {
- // Restart matching if the regular expression is flagged as global.
- // Increment success counter.
- __ inc(Operand(ebp, kSuccessfulCaptures));
- // Capture results have been stored, so the number of remaining global
- // output registers is reduced by the number of stored captures.
- __ mov(ecx, Operand(ebp, kNumOutputRegisters));
- __ sub(ecx, Immediate(num_saved_registers_));
- // Check whether we have enough room for another set of capture results.
- __ cmp(ecx, Immediate(num_saved_registers_));
- __ j(less, &exit_label_);
-
- __ mov(Operand(ebp, kNumOutputRegisters), ecx);
- // Advance the location for output.
- __ add(Operand(ebp, kRegisterOutput),
- Immediate(num_saved_registers_ * kPointerSize));
-
- // Prepare eax to initialize registers with its value in the next run.
- __ mov(eax, Operand(ebp, kStringStartMinusOne));
-
- if (global_with_zero_length_check()) {
- // Special case for zero-length matches.
- // edx: capture start index
- __ cmp(edi, edx);
- // Not a zero-length match, restart.
- __ j(not_equal, &load_char_start_regexp);
- // edi (offset from the end) is zero if we already reached the end.
- __ test(edi, edi);
- __ j(zero, &exit_label_, Label::kNear);
- // Advance current position after a zero-length match.
- Label advance;
- __ bind(&advance);
- if (mode_ == UC16) {
- __ add(edi, Immediate(2));
- } else {
- __ inc(edi);
- }
- if (global_unicode()) CheckNotInSurrogatePair(0, &advance);
- }
- __ jmp(&load_char_start_regexp);
- } else {
- __ mov(eax, Immediate(SUCCESS));
- }
- }
-
- __ bind(&exit_label_);
- if (global()) {
- // Return the number of successful captures.
- __ mov(eax, Operand(ebp, kSuccessfulCaptures));
- }
-
- __ bind(&return_eax);
- // Skip esp past regexp registers.
- __ lea(esp, Operand(ebp, kBackup_ebx));
- // Restore callee-save registers.
- __ pop(ebx);
- __ pop(edi);
- __ pop(esi);
- // Exit function frame, restore previous one.
- __ pop(ebp);
- __ ret(0);
-
- // Backtrack code (branch target for conditional backtracks).
- if (backtrack_label_.is_linked()) {
- __ bind(&backtrack_label_);
- Backtrack();
- }
-
- Label exit_with_exception;
-
- // Preempt-code
- if (check_preempt_label_.is_linked()) {
- SafeCallTarget(&check_preempt_label_);
-
- __ push(backtrack_stackpointer());
- __ push(edi);
-
- CallCheckStackGuardState(ebx);
- __ or_(eax, eax);
- // If returning non-zero, we should end execution with the given
- // result as return value.
- __ j(not_zero, &return_eax);
-
- __ pop(edi);
- __ pop(backtrack_stackpointer());
- // String might have moved: Reload esi from frame.
- __ mov(esi, Operand(ebp, kInputEnd));
- SafeReturn();
- }
-
- // Backtrack stack overflow code.
- if (stack_overflow_label_.is_linked()) {
- SafeCallTarget(&stack_overflow_label_);
- // Reached if the backtrack-stack limit has been hit.
-
- Label grow_failed;
- // Save registers before calling C function
- __ push(esi);
- __ push(edi);
-
- // Call GrowStack(backtrack_stackpointer())
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, ebx);
- __ mov(Operand(esp, 2 * kPointerSize),
- Immediate(ExternalReference::isolate_address(isolate())));
- __ lea(eax, Operand(ebp, kStackHighEnd));
- __ mov(Operand(esp, 1 * kPointerSize), eax);
- __ mov(Operand(esp, 0 * kPointerSize), backtrack_stackpointer());
- ExternalReference grow_stack =
- ExternalReference::re_grow_stack(isolate());
- __ CallCFunction(grow_stack, num_arguments);
- // If return NULL, we have failed to grow the stack, and
- // must exit with a stack-overflow exception.
- __ or_(eax, eax);
- __ j(equal, &exit_with_exception);
- // Otherwise use return value as new stack pointer.
- __ mov(backtrack_stackpointer(), eax);
- // Restore saved registers and continue.
- __ pop(edi);
- __ pop(esi);
- SafeReturn();
- }
-
- if (exit_with_exception.is_linked()) {
- // If any of the code above needed to exit with an exception.
- __ bind(&exit_with_exception);
- // Exit with Result EXCEPTION(-1) to signal thrown exception.
- __ mov(eax, EXCEPTION);
- __ jmp(&return_eax);
- }
-
- CodeDesc code_desc;
- masm_->GetCode(&code_desc);
- Handle<Code> code =
- isolate()->factory()->NewCode(code_desc,
- Code::ComputeFlags(Code::REGEXP),
- masm_->CodeObject());
- PROFILE(masm_->isolate(),
- RegExpCodeCreateEvent(AbstractCode::cast(*code), *source));
- return Handle<HeapObject>::cast(code);
-}
-
-
-void RegExpMacroAssemblerX87::GoTo(Label* to) {
- BranchOrBacktrack(no_condition, to);
-}
-
-
-void RegExpMacroAssemblerX87::IfRegisterGE(int reg,
- int comparand,
- Label* if_ge) {
- __ cmp(register_location(reg), Immediate(comparand));
- BranchOrBacktrack(greater_equal, if_ge);
-}
-
-
-void RegExpMacroAssemblerX87::IfRegisterLT(int reg,
- int comparand,
- Label* if_lt) {
- __ cmp(register_location(reg), Immediate(comparand));
- BranchOrBacktrack(less, if_lt);
-}
-
-
-void RegExpMacroAssemblerX87::IfRegisterEqPos(int reg,
- Label* if_eq) {
- __ cmp(edi, register_location(reg));
- BranchOrBacktrack(equal, if_eq);
-}
-
-
-RegExpMacroAssembler::IrregexpImplementation
- RegExpMacroAssemblerX87::Implementation() {
- return kX87Implementation;
-}
-
-
-void RegExpMacroAssemblerX87::LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds,
- int characters) {
- DCHECK(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
- if (check_bounds) {
- if (cp_offset >= 0) {
- CheckPosition(cp_offset + characters - 1, on_end_of_input);
- } else {
- CheckPosition(cp_offset, on_end_of_input);
- }
- }
- LoadCurrentCharacterUnchecked(cp_offset, characters);
-}
-
-
-void RegExpMacroAssemblerX87::PopCurrentPosition() {
- Pop(edi);
-}
-
-
-void RegExpMacroAssemblerX87::PopRegister(int register_index) {
- Pop(eax);
- __ mov(register_location(register_index), eax);
-}
-
-
-void RegExpMacroAssemblerX87::PushBacktrack(Label* label) {
- Push(Immediate::CodeRelativeOffset(label));
- CheckStackLimit();
-}
-
-
-void RegExpMacroAssemblerX87::PushCurrentPosition() {
- Push(edi);
-}
-
-
-void RegExpMacroAssemblerX87::PushRegister(int register_index,
- StackCheckFlag check_stack_limit) {
- __ mov(eax, register_location(register_index));
- Push(eax);
- if (check_stack_limit) CheckStackLimit();
-}
-
-
-void RegExpMacroAssemblerX87::ReadCurrentPositionFromRegister(int reg) {
- __ mov(edi, register_location(reg));
-}
-
-
-void RegExpMacroAssemblerX87::ReadStackPointerFromRegister(int reg) {
- __ mov(backtrack_stackpointer(), register_location(reg));
- __ add(backtrack_stackpointer(), Operand(ebp, kStackHighEnd));
-}
-
-void RegExpMacroAssemblerX87::SetCurrentPositionFromEnd(int by) {
- Label after_position;
- __ cmp(edi, -by * char_size());
- __ j(greater_equal, &after_position, Label::kNear);
- __ mov(edi, -by * char_size());
- // On RegExp code entry (where this operation is used), the character before
- // the current position is expected to be already loaded.
- // We have advanced the position, so it's safe to read backwards.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&after_position);
-}
-
-
-void RegExpMacroAssemblerX87::SetRegister(int register_index, int to) {
- DCHECK(register_index >= num_saved_registers_); // Reserved for positions!
- __ mov(register_location(register_index), Immediate(to));
-}
-
-
-bool RegExpMacroAssemblerX87::Succeed() {
- __ jmp(&success_label_);
- return global();
-}
-
-
-void RegExpMacroAssemblerX87::WriteCurrentPositionToRegister(int reg,
- int cp_offset) {
- if (cp_offset == 0) {
- __ mov(register_location(reg), edi);
- } else {
- __ lea(eax, Operand(edi, cp_offset * char_size()));
- __ mov(register_location(reg), eax);
- }
-}
-
-
-void RegExpMacroAssemblerX87::ClearRegisters(int reg_from, int reg_to) {
- DCHECK(reg_from <= reg_to);
- __ mov(eax, Operand(ebp, kStringStartMinusOne));
- for (int reg = reg_from; reg <= reg_to; reg++) {
- __ mov(register_location(reg), eax);
- }
-}
-
-
-void RegExpMacroAssemblerX87::WriteStackPointerToRegister(int reg) {
- __ mov(eax, backtrack_stackpointer());
- __ sub(eax, Operand(ebp, kStackHighEnd));
- __ mov(register_location(reg), eax);
-}
-
-
-// Private methods:
-
-void RegExpMacroAssemblerX87::CallCheckStackGuardState(Register scratch) {
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, scratch);
- // RegExp code frame pointer.
- __ mov(Operand(esp, 2 * kPointerSize), ebp);
- // Code* of self.
- __ mov(Operand(esp, 1 * kPointerSize), Immediate(masm_->CodeObject()));
- // Next address on the stack (will be address of return address).
- __ lea(eax, Operand(esp, -kPointerSize));
- __ mov(Operand(esp, 0 * kPointerSize), eax);
- ExternalReference check_stack_guard =
- ExternalReference::re_check_stack_guard_state(isolate());
- __ CallCFunction(check_stack_guard, num_arguments);
-}
-
-
-// Helper function for reading a value out of a stack frame.
-template <typename T>
-static T& frame_entry(Address re_frame, int frame_offset) {
- return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
-}
-
-
-template <typename T>
-static T* frame_entry_address(Address re_frame, int frame_offset) {
- return reinterpret_cast<T*>(re_frame + frame_offset);
-}
-
-
-int RegExpMacroAssemblerX87::CheckStackGuardState(Address* return_address,
- Code* re_code,
- Address re_frame) {
- return NativeRegExpMacroAssembler::CheckStackGuardState(
- frame_entry<Isolate*>(re_frame, kIsolate),
- frame_entry<int>(re_frame, kStartIndex),
- frame_entry<int>(re_frame, kDirectCall) == 1, return_address, re_code,
- frame_entry_address<String*>(re_frame, kInputString),
- frame_entry_address<const byte*>(re_frame, kInputStart),
- frame_entry_address<const byte*>(re_frame, kInputEnd));
-}
-
-
-Operand RegExpMacroAssemblerX87::register_location(int register_index) {
- DCHECK(register_index < (1<<30));
- if (num_registers_ <= register_index) {
- num_registers_ = register_index + 1;
- }
- return Operand(ebp, kRegisterZero - register_index * kPointerSize);
-}
-
-
-void RegExpMacroAssemblerX87::CheckPosition(int cp_offset,
- Label* on_outside_input) {
- if (cp_offset >= 0) {
- __ cmp(edi, -cp_offset * char_size());
- BranchOrBacktrack(greater_equal, on_outside_input);
- } else {
- __ lea(eax, Operand(edi, cp_offset * char_size()));
- __ cmp(eax, Operand(ebp, kStringStartMinusOne));
- BranchOrBacktrack(less_equal, on_outside_input);
- }
-}
-
-
-void RegExpMacroAssemblerX87::BranchOrBacktrack(Condition condition,
- Label* to) {
- if (condition < 0) { // No condition
- if (to == NULL) {
- Backtrack();
- return;
- }
- __ jmp(to);
- return;
- }
- if (to == NULL) {
- __ j(condition, &backtrack_label_);
- return;
- }
- __ j(condition, to);
-}
-
-
-void RegExpMacroAssemblerX87::SafeCall(Label* to) {
- Label return_to;
- __ push(Immediate::CodeRelativeOffset(&return_to));
- __ jmp(to);
- __ bind(&return_to);
-}
-
-
-void RegExpMacroAssemblerX87::SafeReturn() {
- __ pop(ebx);
- __ add(ebx, Immediate(masm_->CodeObject()));
- __ jmp(ebx);
-}
-
-
-void RegExpMacroAssemblerX87::SafeCallTarget(Label* name) {
- __ bind(name);
-}
-
-
-void RegExpMacroAssemblerX87::Push(Register source) {
- DCHECK(!source.is(backtrack_stackpointer()));
- // Notice: This updates flags, unlike normal Push.
- __ sub(backtrack_stackpointer(), Immediate(kPointerSize));
- __ mov(Operand(backtrack_stackpointer(), 0), source);
-}
-
-
-void RegExpMacroAssemblerX87::Push(Immediate value) {
- // Notice: This updates flags, unlike normal Push.
- __ sub(backtrack_stackpointer(), Immediate(kPointerSize));
- __ mov(Operand(backtrack_stackpointer(), 0), value);
-}
-
-
-void RegExpMacroAssemblerX87::Pop(Register target) {
- DCHECK(!target.is(backtrack_stackpointer()));
- __ mov(target, Operand(backtrack_stackpointer(), 0));
- // Notice: This updates flags, unlike normal Pop.
- __ add(backtrack_stackpointer(), Immediate(kPointerSize));
-}
-
-
-void RegExpMacroAssemblerX87::CheckPreemption() {
- // Check for preemption.
- Label no_preempt;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above, &no_preempt);
-
- SafeCall(&check_preempt_label_);
-
- __ bind(&no_preempt);
-}
-
-
-void RegExpMacroAssemblerX87::CheckStackLimit() {
- Label no_stack_overflow;
- ExternalReference stack_limit =
- ExternalReference::address_of_regexp_stack_limit(isolate());
- __ cmp(backtrack_stackpointer(), Operand::StaticVariable(stack_limit));
- __ j(above, &no_stack_overflow);
-
- SafeCall(&stack_overflow_label_);
-
- __ bind(&no_stack_overflow);
-}
-
-
-void RegExpMacroAssemblerX87::LoadCurrentCharacterUnchecked(int cp_offset,
- int characters) {
- if (mode_ == LATIN1) {
- if (characters == 4) {
- __ mov(current_character(), Operand(esi, edi, times_1, cp_offset));
- } else if (characters == 2) {
- __ movzx_w(current_character(), Operand(esi, edi, times_1, cp_offset));
- } else {
- DCHECK(characters == 1);
- __ movzx_b(current_character(), Operand(esi, edi, times_1, cp_offset));
- }
- } else {
- DCHECK(mode_ == UC16);
- if (characters == 2) {
- __ mov(current_character(),
- Operand(esi, edi, times_1, cp_offset * sizeof(uc16)));
- } else {
- DCHECK(characters == 1);
- __ movzx_w(current_character(),
- Operand(esi, edi, times_1, cp_offset * sizeof(uc16)));
- }
- }
-}
-
-
-#undef __
-
-#endif // V8_INTERPRETED_REGEXP
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.h b/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.h
deleted file mode 100644
index 2f689612b7..0000000000
--- a/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.h
+++ /dev/null
@@ -1,204 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_REGEXP_X87_REGEXP_MACRO_ASSEMBLER_X87_H_
-#define V8_REGEXP_X87_REGEXP_MACRO_ASSEMBLER_X87_H_
-
-#include "src/macro-assembler.h"
-#include "src/regexp/regexp-macro-assembler.h"
-#include "src/x87/assembler-x87.h"
-
-namespace v8 {
-namespace internal {
-
-#ifndef V8_INTERPRETED_REGEXP
-class RegExpMacroAssemblerX87: public NativeRegExpMacroAssembler {
- public:
- RegExpMacroAssemblerX87(Isolate* isolate, Zone* zone, Mode mode,
- int registers_to_save);
- virtual ~RegExpMacroAssemblerX87();
- virtual int stack_limit_slack();
- virtual void AdvanceCurrentPosition(int by);
- virtual void AdvanceRegister(int reg, int by);
- virtual void Backtrack();
- virtual void Bind(Label* label);
- virtual void CheckAtStart(Label* on_at_start);
- virtual void CheckCharacter(uint32_t c, Label* on_equal);
- virtual void CheckCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_equal);
- virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
- virtual void CheckCharacterLT(uc16 limit, Label* on_less);
- // A "greedy loop" is a loop that is both greedy and with a simple
- // body. It has a particularly simple implementation.
- virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, bool read_backward,
- Label* on_no_match);
- virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- bool read_backward, bool unicode,
- Label* on_no_match);
- virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
- virtual void CheckNotCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_not_equal);
- virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal);
- virtual void CheckCharacterInRange(uc16 from,
- uc16 to,
- Label* on_in_range);
- virtual void CheckCharacterNotInRange(uc16 from,
- uc16 to,
- Label* on_not_in_range);
- virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
-
- // Checks whether the given offset from the current position is before
- // the end of the string.
- virtual void CheckPosition(int cp_offset, Label* on_outside_input);
- virtual bool CheckSpecialCharacterClass(uc16 type, Label* on_no_match);
- virtual void Fail();
- virtual Handle<HeapObject> GetCode(Handle<String> source);
- virtual void GoTo(Label* label);
- virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
- virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
- virtual void IfRegisterEqPos(int reg, Label* if_eq);
- virtual IrregexpImplementation Implementation();
- virtual void LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds = true,
- int characters = 1);
- virtual void PopCurrentPosition();
- virtual void PopRegister(int register_index);
- virtual void PushBacktrack(Label* label);
- virtual void PushCurrentPosition();
- virtual void PushRegister(int register_index,
- StackCheckFlag check_stack_limit);
- virtual void ReadCurrentPositionFromRegister(int reg);
- virtual void ReadStackPointerFromRegister(int reg);
- virtual void SetCurrentPositionFromEnd(int by);
- virtual void SetRegister(int register_index, int to);
- virtual bool Succeed();
- virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
- virtual void ClearRegisters(int reg_from, int reg_to);
- virtual void WriteStackPointerToRegister(int reg);
-
- // Called from RegExp if the stack-guard is triggered.
- // If the code object is relocated, the return address is fixed before
- // returning.
- static int CheckStackGuardState(Address* return_address,
- Code* re_code,
- Address re_frame);
-
- private:
- // Offsets from ebp of function parameters and stored registers.
- static const int kFramePointer = 0;
- // Above the frame pointer - function parameters and return address.
- static const int kReturn_eip = kFramePointer + kPointerSize;
- static const int kFrameAlign = kReturn_eip + kPointerSize;
- // Parameters.
- static const int kInputString = kFrameAlign;
- static const int kStartIndex = kInputString + kPointerSize;
- static const int kInputStart = kStartIndex + kPointerSize;
- static const int kInputEnd = kInputStart + kPointerSize;
- static const int kRegisterOutput = kInputEnd + kPointerSize;
- // For the case of global regular expression, we have room to store at least
- // one set of capture results. For the case of non-global regexp, we ignore
- // this value.
- static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
- static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
- static const int kDirectCall = kStackHighEnd + kPointerSize;
- static const int kIsolate = kDirectCall + kPointerSize;
- // Below the frame pointer - local stack variables.
- // When adding local variables remember to push space for them in
- // the frame in GetCode.
- static const int kBackup_esi = kFramePointer - kPointerSize;
- static const int kBackup_edi = kBackup_esi - kPointerSize;
- static const int kBackup_ebx = kBackup_edi - kPointerSize;
- static const int kSuccessfulCaptures = kBackup_ebx - kPointerSize;
- static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
- // First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
-
- // Initial size of code buffer.
- static const size_t kRegExpCodeSize = 1024;
-
- // Load a number of characters at the given offset from the
- // current position, into the current-character register.
- void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
-
- // Check whether preemption has been requested.
- void CheckPreemption();
-
- // Check whether we are exceeding the stack limit on the backtrack stack.
- void CheckStackLimit();
-
- // Generate a call to CheckStackGuardState.
- void CallCheckStackGuardState(Register scratch);
-
- // The ebp-relative location of a regexp register.
- Operand register_location(int register_index);
-
- // The register containing the current character after LoadCurrentCharacter.
- inline Register current_character() { return edx; }
-
- // The register containing the backtrack stack top. Provides a meaningful
- // name to the register.
- inline Register backtrack_stackpointer() { return ecx; }
-
- // Byte size of chars in the string to match (decided by the Mode argument)
- inline int char_size() { return static_cast<int>(mode_); }
-
- // Equivalent to a conditional branch to the label, unless the label
- // is NULL, in which case it is a conditional Backtrack.
- void BranchOrBacktrack(Condition condition, Label* to);
-
- // Call and return internally in the generated code in a way that
- // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
- inline void SafeCall(Label* to);
- inline void SafeReturn();
- inline void SafeCallTarget(Label* name);
-
- // Pushes the value of a register on the backtrack stack. Decrements the
- // stack pointer (ecx) by a word size and stores the register's value there.
- inline void Push(Register source);
-
- // Pushes a value on the backtrack stack. Decrements the stack pointer (ecx)
- // by a word size and stores the value there.
- inline void Push(Immediate value);
-
- // Pops a value from the backtrack stack. Reads the word at the stack pointer
- // (ecx) and increments it by a word size.
- inline void Pop(Register target);
-
- Isolate* isolate() const { return masm_->isolate(); }
-
- MacroAssembler* masm_;
-
- // Which mode to generate code for (LATIN1 or UC16).
- Mode mode_;
-
- // One greater than maximal register index actually used.
- int num_registers_;
-
- // Number of registers to output at the end (the saved registers
- // are always 0..num_saved_registers_-1)
- int num_saved_registers_;
-
- // Labels used internally.
- Label entry_label_;
- Label start_label_;
- Label success_label_;
- Label backtrack_label_;
- Label exit_label_;
- Label check_preempt_label_;
- Label stack_overflow_label_;
-};
-#endif // V8_INTERPRETED_REGEXP
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_REGEXP_X87_REGEXP_MACRO_ASSEMBLER_X87_H_
diff --git a/deps/v8/src/register-configuration.cc b/deps/v8/src/register-configuration.cc
index af35fd3b03..70f044ebae 100644
--- a/deps/v8/src/register-configuration.cc
+++ b/deps/v8/src/register-configuration.cc
@@ -74,9 +74,6 @@ class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
#if V8_TARGET_ARCH_IA32
kMaxAllocatableGeneralRegisterCount,
kMaxAllocatableDoubleRegisterCount,
-#elif V8_TARGET_ARCH_X87
- kMaxAllocatableGeneralRegisterCount,
- compiler == TURBOFAN ? 1 : kMaxAllocatableDoubleRegisterCount,
#elif V8_TARGET_ARCH_X64
kMaxAllocatableGeneralRegisterCount,
kMaxAllocatableDoubleRegisterCount,
diff --git a/deps/v8/src/register-configuration.h b/deps/v8/src/register-configuration.h
index c59488444b..58b62db898 100644
--- a/deps/v8/src/register-configuration.h
+++ b/deps/v8/src/register-configuration.h
@@ -28,8 +28,7 @@ class V8_EXPORT_PRIVATE RegisterConfiguration {
static const int kMaxFPRegisters = 32;
// Default RegisterConfigurations for the target architecture.
- // TODO(X87): This distinction in RegisterConfigurations is temporary
- // until x87 TF supports all of the registers that Crankshaft does.
+ // TODO(mstarzinger): Crankshaft is gone.
static const RegisterConfiguration* Crankshaft();
static const RegisterConfiguration* Turbofan();
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index 56ab1b89e4..3220d29a66 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -35,6 +35,12 @@ STATIC_ASSERT(kProfilerTicksBeforeOptimization < 256);
STATIC_ASSERT(kProfilerTicksBeforeReenablingOptimization < 256);
STATIC_ASSERT(kTicksWhenNotEnoughTypeInfo < 256);
+// The number of ticks required for optimizing a function increases with
+// the size of the bytecode. This is in addition to the
+// kProfilerTicksBeforeOptimization required for any function.
+static const int kCodeSizeAllowancePerTickIgnition =
+ 50 * interpreter::Interpreter::kCodeSizeMultiplier;
+
// Maximum size in bytes of generate code for a function to allow OSR.
static const int kOSRCodeSizeAllowanceBase =
100 * FullCodeGenerator::kCodeSizeMultiplier;
@@ -57,7 +63,7 @@ static const int kMaxSizeEarlyOptIgnition =
// We aren't using the code size multiplier here because there is no
// "kMaxSizeOpt" with which we would need to normalize. This constant is
// only for optimization decisions coming into TurboFan from Ignition.
-static const int kMaxSizeOptIgnition = 80 * KB;
+static const int kMaxSizeOptIgnition = 60 * KB;
#define OPTIMIZATION_REASON_LIST(V) \
V(DoNotOptimize, "do not optimize") \
@@ -150,7 +156,7 @@ void RuntimeProfiler::Optimize(JSFunction* function,
OptimizationReason reason) {
DCHECK_NE(reason, OptimizationReason::kDoNotOptimize);
TraceRecompile(function, OptimizationReasonToString(reason), "optimized");
- function->AttemptConcurrentOptimization();
+ function->MarkForOptimization(ConcurrencyMode::kConcurrent);
}
void RuntimeProfiler::AttemptOnStackReplacement(JavaScriptFrame* frame,
@@ -218,17 +224,14 @@ void RuntimeProfiler::MaybeOptimizeFullCodegen(JSFunction* function,
} else if (!frame->is_optimized() &&
(function->IsMarkedForOptimization() ||
function->IsMarkedForConcurrentOptimization() ||
- function->IsOptimized())) {
+ function->HasOptimizedCode())) {
// Attempt OSR if we are still running unoptimized code even though the
// the function has long been marked or even already been optimized.
- int ticks = shared_code->profiler_ticks();
+ int ticks = shared->profiler_ticks();
int64_t allowance =
kOSRCodeSizeAllowanceBase +
static_cast<int64_t>(ticks) * kOSRCodeSizeAllowancePerTick;
- if (shared_code->CodeSize() > allowance &&
- ticks < Code::ProfilerTicksField::kMax) {
- shared_code->set_profiler_ticks(ticks + 1);
- } else {
+ if (shared_code->CodeSize() <= allowance) {
AttemptOnStackReplacement(frame);
}
return;
@@ -248,19 +251,17 @@ void RuntimeProfiler::MaybeOptimizeFullCodegen(JSFunction* function,
if (shared->deopt_count() >= FLAG_max_deopt_count) {
// If optimization was disabled due to many deoptimizations,
// then check if the function is hot and try to reenable optimization.
- int ticks = shared_code->profiler_ticks();
+ int ticks = shared->profiler_ticks();
if (ticks >= kProfilerTicksBeforeReenablingOptimization) {
- shared_code->set_profiler_ticks(0);
+ shared->set_profiler_ticks(0);
shared->TryReenableOptimization();
- } else {
- shared_code->set_profiler_ticks(ticks + 1);
}
}
return;
}
if (frame->is_optimized()) return;
- int ticks = shared_code->profiler_ticks();
+ int ticks = shared->profiler_ticks();
if (ticks >= kProfilerTicksBeforeOptimization) {
int typeinfo, generic, total, type_percentage, generic_percentage;
@@ -274,7 +275,6 @@ void RuntimeProfiler::MaybeOptimizeFullCodegen(JSFunction* function,
} else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
Optimize(function, OptimizationReason::kHotWithoutMuchTypeInfo);
} else {
- shared_code->set_profiler_ticks(ticks + 1);
if (FLAG_trace_opt_verbose) {
PrintF("[not yet optimizing ");
function->PrintName();
@@ -292,11 +292,7 @@ void RuntimeProfiler::MaybeOptimizeFullCodegen(JSFunction* function,
if (type_percentage >= FLAG_type_info_threshold &&
generic_percentage <= FLAG_generic_ic_threshold) {
Optimize(function, OptimizationReason::kSmallFunction);
- } else {
- shared_code->set_profiler_ticks(ticks + 1);
}
- } else {
- shared_code->set_profiler_ticks(ticks + 1);
}
}
@@ -353,7 +349,7 @@ bool RuntimeProfiler::MaybeOSRIgnition(JSFunction* function,
if (!frame->is_optimized() &&
(function->IsMarkedForOptimization() ||
function->IsMarkedForConcurrentOptimization() ||
- function->IsOptimized())) {
+ function->HasOptimizedCode())) {
// Attempt OSR if we are still running interpreted code even though the
// the function has long been marked or even already been optimized.
int64_t allowance =
@@ -376,7 +372,10 @@ OptimizationReason RuntimeProfiler::ShouldOptimizeIgnition(
return OptimizationReason::kDoNotOptimize;
}
- if (ticks >= kProfilerTicksBeforeOptimization) {
+ int ticks_for_optimization =
+ kProfilerTicksBeforeOptimization +
+ (shared->bytecode_array()->Size() / kCodeSizeAllowancePerTickIgnition);
+ if (ticks >= ticks_for_optimization) {
int typeinfo, generic, total, type_percentage, generic_percentage;
GetICCounts(function, &typeinfo, &generic, &total, &type_percentage,
&generic_percentage);
@@ -453,17 +452,12 @@ void RuntimeProfiler::MarkCandidatesForOptimization() {
MaybeOptimizeFullCodegen(function, frame, frame_count);
}
- // Update shared function info ticks after checking for whether functions
- // should be optimized to keep FCG (which updates ticks on code) and
- // Ignition (which updates ticks on shared function info) in sync.
- List<SharedFunctionInfo*> functions(4);
- frame->GetFunctions(&functions);
- for (int i = functions.length(); --i >= 0;) {
- SharedFunctionInfo* shared_function_info = functions[i];
- int ticks = shared_function_info->profiler_ticks();
- if (ticks < Smi::kMaxValue) {
- shared_function_info->set_profiler_ticks(ticks + 1);
- }
+ // TODO(leszeks): Move this increment to before the maybe optimize checks,
+ // and update the tests to assume the increment has already happened.
+ SharedFunctionInfo* shared = function->shared();
+ int ticks = shared->profiler_ticks();
+ if (ticks < Smi::kMaxValue) {
+ shared->set_profiler_ticks(ticks + 1);
}
}
any_ic_changed_ = false;
diff --git a/deps/v8/src/runtime/runtime-array.cc b/deps/v8/src/runtime/runtime-array.cc
index 781065a371..cc73f59524 100644
--- a/deps/v8/src/runtime/runtime-array.cc
+++ b/deps/v8/src/runtime/runtime-array.cc
@@ -17,58 +17,6 @@
namespace v8 {
namespace internal {
-static void InstallCode(
- Isolate* isolate, Handle<JSObject> holder, const char* name,
- Handle<Code> code, int argc = -1,
- BuiltinFunctionId id = static_cast<BuiltinFunctionId>(-1)) {
- Handle<String> key = isolate->factory()->InternalizeUtf8String(name);
- Handle<JSFunction> optimized =
- isolate->factory()->NewFunctionWithoutPrototype(key, code, true);
- if (argc < 0) {
- optimized->shared()->DontAdaptArguments();
- } else {
- optimized->shared()->set_internal_formal_parameter_count(argc);
- }
- if (id >= 0) {
- optimized->shared()->set_builtin_function_id(id);
- }
- optimized->shared()->set_language_mode(STRICT);
- optimized->shared()->set_native(true);
- JSObject::AddProperty(holder, key, optimized, NONE);
-}
-
-static void InstallBuiltin(
- Isolate* isolate, Handle<JSObject> holder, const char* name,
- Builtins::Name builtin_name, int argc = -1,
- BuiltinFunctionId id = static_cast<BuiltinFunctionId>(-1)) {
- InstallCode(isolate, holder, name,
- handle(isolate->builtins()->builtin(builtin_name), isolate), argc,
- id);
-}
-
-RUNTIME_FUNCTION(Runtime_SpecialArrayFunctions) {
- HandleScope scope(isolate);
- DCHECK_EQ(0, args.length());
- Handle<JSObject> holder =
- isolate->factory()->NewJSObject(isolate->object_function());
-
- InstallBuiltin(isolate, holder, "pop", Builtins::kFastArrayPop);
- InstallBuiltin(isolate, holder, "push", Builtins::kFastArrayPush);
- InstallBuiltin(isolate, holder, "shift", Builtins::kFastArrayShift);
- InstallBuiltin(isolate, holder, "unshift", Builtins::kArrayUnshift);
- InstallBuiltin(isolate, holder, "slice", Builtins::kArraySlice);
- InstallBuiltin(isolate, holder, "splice", Builtins::kArraySplice);
- InstallBuiltin(isolate, holder, "includes", Builtins::kArrayIncludes);
- InstallBuiltin(isolate, holder, "indexOf", Builtins::kArrayIndexOf);
- InstallBuiltin(isolate, holder, "keys", Builtins::kArrayPrototypeKeys, 0,
- kArrayKeys);
- InstallBuiltin(isolate, holder, "values", Builtins::kArrayPrototypeValues, 0,
- kArrayValues);
- InstallBuiltin(isolate, holder, "entries", Builtins::kArrayPrototypeEntries,
- 0, kArrayEntries);
- return *holder;
-}
-
RUNTIME_FUNCTION(Runtime_FixedArrayGet) {
SealHandleScope shs(isolate);
DCHECK_EQ(2, args.length());
@@ -99,6 +47,235 @@ RUNTIME_FUNCTION(Runtime_TransitionElementsKind) {
return *object;
}
+namespace {
+// As PrepareElementsForSort, but only on objects where elements is
+// a dictionary, and it will stay a dictionary. Collates undefined and
+// unexisting elements below limit from position zero of the elements.
+Handle<Object> PrepareSlowElementsForSort(Handle<JSObject> object,
+ uint32_t limit) {
+ DCHECK(object->HasDictionaryElements());
+ Isolate* isolate = object->GetIsolate();
+ // Must stay in dictionary mode, either because of requires_slow_elements,
+ // or because we are not going to sort (and therefore compact) all of the
+ // elements.
+ Handle<SeededNumberDictionary> dict(object->element_dictionary(), isolate);
+ Handle<SeededNumberDictionary> new_dict =
+ SeededNumberDictionary::New(isolate, dict->NumberOfElements());
+
+ uint32_t pos = 0;
+ uint32_t undefs = 0;
+ uint32_t max_key = 0;
+ int capacity = dict->Capacity();
+ Handle<Smi> bailout(Smi::FromInt(-1), isolate);
+ // Entry to the new dictionary does not cause it to grow, as we have
+ // allocated one that is large enough for all entries.
+ DisallowHeapAllocation no_gc;
+ for (int i = 0; i < capacity; i++) {
+ Object* k;
+ if (!dict->ToKey(isolate, i, &k)) continue;
+
+ DCHECK_LE(0, k->Number());
+ DCHECK_LE(k->Number(), kMaxUInt32);
+
+ HandleScope scope(isolate);
+ Handle<Object> value(dict->ValueAt(i), isolate);
+ PropertyDetails details = dict->DetailsAt(i);
+ if (details.kind() == kAccessor || details.IsReadOnly()) {
+ // Bail out and do the sorting of undefineds and array holes in JS.
+ // Also bail out if the element is not supposed to be moved.
+ return bailout;
+ }
+
+ uint32_t key = NumberToUint32(k);
+ if (key < limit) {
+ if (value->IsUndefined(isolate)) {
+ undefs++;
+ } else if (pos > static_cast<uint32_t>(Smi::kMaxValue)) {
+ // Adding an entry with the key beyond smi-range requires
+ // allocation. Bailout.
+ return bailout;
+ } else {
+ Handle<Object> result =
+ SeededNumberDictionary::Add(new_dict, pos, value, details);
+ DCHECK(result.is_identical_to(new_dict));
+ USE(result);
+ pos++;
+ }
+ } else if (key > static_cast<uint32_t>(Smi::kMaxValue)) {
+ // Adding an entry with the key beyond smi-range requires
+ // allocation. Bailout.
+ return bailout;
+ } else {
+ Handle<Object> result =
+ SeededNumberDictionary::Add(new_dict, key, value, details);
+ DCHECK(result.is_identical_to(new_dict));
+ USE(result);
+ max_key = Max(max_key, key);
+ }
+ }
+
+ uint32_t result = pos;
+ PropertyDetails no_details = PropertyDetails::Empty();
+ while (undefs > 0) {
+ if (pos > static_cast<uint32_t>(Smi::kMaxValue)) {
+ // Adding an entry with the key beyond smi-range requires
+ // allocation. Bailout.
+ return bailout;
+ }
+ HandleScope scope(isolate);
+ Handle<Object> result = SeededNumberDictionary::Add(
+ new_dict, pos, isolate->factory()->undefined_value(), no_details);
+ DCHECK(result.is_identical_to(new_dict));
+ USE(result);
+ pos++;
+ undefs--;
+ }
+ max_key = Max(max_key, pos - 1);
+
+ object->set_elements(*new_dict);
+ new_dict->UpdateMaxNumberKey(max_key, object);
+ JSObject::ValidateElements(*object);
+
+ AllowHeapAllocation allocate_return_value;
+ return isolate->factory()->NewNumberFromUint(result);
+}
+
+// Collects all defined (non-hole) and non-undefined (array) elements at the
+// start of the elements array. If the object is in dictionary mode, it is
+// converted to fast elements mode. Undefined values are placed after
+// non-undefined values. Returns the number of non-undefined values.
+Handle<Object> PrepareElementsForSort(Handle<JSObject> object, uint32_t limit) {
+ Isolate* isolate = object->GetIsolate();
+ if (object->HasSloppyArgumentsElements() || !object->map()->is_extensible()) {
+ return handle(Smi::FromInt(-1), isolate);
+ }
+
+ if (object->HasStringWrapperElements()) {
+ int len = String::cast(Handle<JSValue>::cast(object)->value())->length();
+ return handle(Smi::FromInt(len), isolate);
+ }
+
+ JSObject::ValidateElements(*object);
+ if (object->HasDictionaryElements()) {
+ // Convert to fast elements containing only the existing properties.
+ // Ordering is irrelevant, since we are going to sort anyway.
+ Handle<SeededNumberDictionary> dict(object->element_dictionary());
+ if (object->IsJSArray() || dict->requires_slow_elements() ||
+ dict->max_number_key() >= limit) {
+ return PrepareSlowElementsForSort(object, limit);
+ }
+ // Convert to fast elements.
+ Handle<Map> new_map =
+ JSObject::GetElementsTransitionMap(object, HOLEY_ELEMENTS);
+
+ PretenureFlag tenure =
+ isolate->heap()->InNewSpace(*object) ? NOT_TENURED : TENURED;
+ Handle<FixedArray> fast_elements =
+ isolate->factory()->NewFixedArray(dict->NumberOfElements(), tenure);
+ dict->CopyValuesTo(*fast_elements);
+
+ JSObject::SetMapAndElements(object, new_map, fast_elements);
+ JSObject::ValidateElements(*object);
+ } else if (object->HasFixedTypedArrayElements()) {
+ // Typed arrays cannot have holes or undefined elements.
+ return handle(
+ Smi::FromInt(FixedArrayBase::cast(object->elements())->length()),
+ isolate);
+ } else if (!object->HasDoubleElements()) {
+ JSObject::EnsureWritableFastElements(object);
+ }
+ DCHECK(object->HasSmiOrObjectElements() || object->HasDoubleElements());
+
+ // Collect holes at the end, undefined before that and the rest at the
+ // start, and return the number of non-hole, non-undefined values.
+
+ Handle<FixedArrayBase> elements_base(object->elements());
+ uint32_t elements_length = static_cast<uint32_t>(elements_base->length());
+ if (limit > elements_length) {
+ limit = elements_length;
+ }
+ if (limit == 0) {
+ return handle(Smi::kZero, isolate);
+ }
+
+ uint32_t result = 0;
+ if (elements_base->map() == isolate->heap()->fixed_double_array_map()) {
+ FixedDoubleArray* elements = FixedDoubleArray::cast(*elements_base);
+ // Split elements into defined and the_hole, in that order.
+ unsigned int holes = limit;
+ // Assume most arrays contain no holes and undefined values, so minimize the
+ // number of stores of non-undefined, non-the-hole values.
+ for (unsigned int i = 0; i < holes; i++) {
+ if (elements->is_the_hole(i)) {
+ holes--;
+ } else {
+ continue;
+ }
+ // Position i needs to be filled.
+ while (holes > i) {
+ if (elements->is_the_hole(holes)) {
+ holes--;
+ } else {
+ elements->set(i, elements->get_scalar(holes));
+ break;
+ }
+ }
+ }
+ result = holes;
+ while (holes < limit) {
+ elements->set_the_hole(holes);
+ holes++;
+ }
+ } else {
+ FixedArray* elements = FixedArray::cast(*elements_base);
+ DisallowHeapAllocation no_gc;
+
+ // Split elements into defined, undefined and the_hole, in that order. Only
+ // count locations for undefined and the hole, and fill them afterwards.
+ WriteBarrierMode write_barrier = elements->GetWriteBarrierMode(no_gc);
+ unsigned int undefs = limit;
+ unsigned int holes = limit;
+ // Assume most arrays contain no holes and undefined values, so minimize the
+ // number of stores of non-undefined, non-the-hole values.
+ for (unsigned int i = 0; i < undefs; i++) {
+ Object* current = elements->get(i);
+ if (current->IsTheHole(isolate)) {
+ holes--;
+ undefs--;
+ } else if (current->IsUndefined(isolate)) {
+ undefs--;
+ } else {
+ continue;
+ }
+ // Position i needs to be filled.
+ while (undefs > i) {
+ current = elements->get(undefs);
+ if (current->IsTheHole(isolate)) {
+ holes--;
+ undefs--;
+ } else if (current->IsUndefined(isolate)) {
+ undefs--;
+ } else {
+ elements->set(i, current, write_barrier);
+ break;
+ }
+ }
+ }
+ result = undefs;
+ while (undefs < holes) {
+ elements->set_undefined(isolate, undefs);
+ undefs++;
+ }
+ while (holes < limit) {
+ elements->set_the_hole(isolate, holes);
+ holes++;
+ }
+ }
+
+ return isolate->factory()->NewNumberFromUint(result);
+}
+
+} // namespace
// Moves all own elements of an object, that are below a limit, to positions
// starting at zero. All undefined values are placed after non-undefined values,
@@ -112,8 +289,7 @@ RUNTIME_FUNCTION(Runtime_RemoveArrayHoles) {
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
if (object->IsJSProxy()) return Smi::FromInt(-1);
- return *JSObject::PrepareElementsForSort(Handle<JSObject>::cast(object),
- limit);
+ return *PrepareElementsForSort(Handle<JSObject>::cast(object), limit);
}
@@ -123,8 +299,8 @@ RUNTIME_FUNCTION(Runtime_MoveArrayContents) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSArray, from, 0);
CONVERT_ARG_HANDLE_CHECKED(JSArray, to, 1);
- JSObject::ValidateElements(from);
- JSObject::ValidateElements(to);
+ JSObject::ValidateElements(*from);
+ JSObject::ValidateElements(*to);
Handle<FixedArrayBase> new_elements(from->elements());
ElementsKind from_kind = from->GetElementsKind();
@@ -132,10 +308,10 @@ RUNTIME_FUNCTION(Runtime_MoveArrayContents) {
JSObject::SetMapAndElements(to, new_map, new_elements);
to->set_length(from->length());
- JSObject::ResetElements(from);
+ from->initialize_elements();
from->set_length(Smi::kZero);
- JSObject::ValidateElements(to);
+ JSObject::ValidateElements(*to);
return *to;
}
@@ -236,13 +412,20 @@ RUNTIME_FUNCTION(Runtime_GetArrayKeys) {
return *isolate->factory()->NewJSArrayWithElements(keys);
}
+RUNTIME_FUNCTION(Runtime_NewArray) {
+ HandleScope scope(isolate);
+ DCHECK_LE(3, args.length());
+ int const argc = args.length() - 3;
+ // TODO(bmeurer): Remove this Arguments nonsense.
+ Arguments argv(argc, args.arguments() - 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, new_target, argc + 1);
+ CONVERT_ARG_HANDLE_CHECKED(HeapObject, type_info, argc + 2);
+ // TODO(bmeurer): Use MaybeHandle to pass around the AllocationSite.
+ Handle<AllocationSite> site = type_info->IsAllocationSite()
+ ? Handle<AllocationSite>::cast(type_info)
+ : Handle<AllocationSite>::null();
-namespace {
-
-Object* ArrayConstructorCommon(Isolate* isolate, Handle<JSFunction> constructor,
- Handle<JSReceiver> new_target,
- Handle<AllocationSite> site,
- Arguments* caller_args) {
Factory* factory = isolate->factory();
// If called through new, new.target can be:
@@ -256,8 +439,8 @@ Object* ArrayConstructorCommon(Isolate* isolate, Handle<JSFunction> constructor,
bool holey = false;
bool can_use_type_feedback = !site.is_null();
bool can_inline_array_constructor = true;
- if (caller_args->length() == 1) {
- Handle<Object> argument_one = caller_args->at<Object>(0);
+ if (argv.length() == 1) {
+ Handle<Object> argument_one = argv.at<Object>(0);
if (argument_one->IsSmi()) {
int value = Handle<Smi>::cast(argument_one)->value();
if (value < 0 ||
@@ -283,7 +466,7 @@ Object* ArrayConstructorCommon(Isolate* isolate, Handle<JSFunction> constructor,
ElementsKind to_kind = can_use_type_feedback ? site->GetElementsKind()
: initial_map->elements_kind();
- if (holey && !IsFastHoleyElementsKind(to_kind)) {
+ if (holey && !IsHoleyElementsKind(to_kind)) {
to_kind = GetHoleyElementsKind(to_kind);
// Update the allocation site info to reflect the advice alteration.
if (!site.is_null()) site->SetElementsKind(to_kind);
@@ -299,7 +482,7 @@ Object* ArrayConstructorCommon(Isolate* isolate, Handle<JSFunction> constructor,
// If we don't care to track arrays of to_kind ElementsKind, then
// don't emit a memento for them.
Handle<AllocationSite> allocation_site;
- if (AllocationSite::GetMode(to_kind) == TRACK_ALLOCATION_SITE) {
+ if (AllocationSite::ShouldTrack(to_kind)) {
allocation_site = site;
}
@@ -309,8 +492,8 @@ Object* ArrayConstructorCommon(Isolate* isolate, Handle<JSFunction> constructor,
factory->NewJSArrayStorage(array, 0, 0, DONT_INITIALIZE_ARRAY_ELEMENTS);
ElementsKind old_kind = array->GetElementsKind();
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, ArrayConstructInitializeElements(array, caller_args));
+ RETURN_FAILURE_ON_EXCEPTION(isolate,
+ ArrayConstructInitializeElements(array, &argv));
if (!site.is_null() &&
(old_kind != array->GetElementsKind() || !can_use_type_feedback ||
!can_inline_array_constructor)) {
@@ -323,24 +506,6 @@ Object* ArrayConstructorCommon(Isolate* isolate, Handle<JSFunction> constructor,
return *array;
}
-} // namespace
-
-RUNTIME_FUNCTION(Runtime_NewArray) {
- HandleScope scope(isolate);
- DCHECK_LE(3, args.length());
- int const argc = args.length() - 3;
- // TODO(bmeurer): Remove this Arguments nonsense.
- Arguments argv(argc, args.arguments() - 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, new_target, argc + 1);
- CONVERT_ARG_HANDLE_CHECKED(HeapObject, type_info, argc + 2);
- // TODO(bmeurer): Use MaybeHandle to pass around the AllocationSite.
- Handle<AllocationSite> site = type_info->IsAllocationSite()
- ? Handle<AllocationSite>::cast(type_info)
- : Handle<AllocationSite>::null();
- return ArrayConstructorCommon(isolate, constructor, new_target, site, &argv);
-}
-
RUNTIME_FUNCTION(Runtime_NormalizeElements) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -467,7 +632,7 @@ RUNTIME_FUNCTION(Runtime_ArrayIncludes_Slow) {
Object::ToInteger(isolate, from_index));
if (V8_LIKELY(from_index->IsSmi())) {
- int start_from = Smi::cast(*from_index)->value();
+ int start_from = Smi::ToInt(*from_index);
if (start_from < 0) {
index = std::max<int64_t>(len + start_from, 0);
} else {
@@ -610,9 +775,9 @@ RUNTIME_FUNCTION(Runtime_ArrayIndexOf) {
LookupIterator it = LookupIterator::PropertyOrElement(
isolate, object, index_obj, &success);
DCHECK(success);
- if (!JSReceiver::HasProperty(&it).FromJust()) {
- continue;
- }
+ Maybe<bool> present = JSReceiver::HasProperty(&it);
+ MAYBE_RETURN(present, isolate->heap()->exception());
+ if (!present.FromJust()) continue;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, element_k,
Object::GetProperty(&it));
if (search_element->StrictEquals(*element_k)) {
@@ -641,33 +806,5 @@ RUNTIME_FUNCTION(Runtime_SpreadIterablePrepare) {
return *spread;
}
-RUNTIME_FUNCTION(Runtime_SpreadIterableFixed) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, spread, 0);
-
- // The caller should check if proper iteration is necessary.
- Handle<JSFunction> spread_iterable_function = isolate->spread_iterable();
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, spread,
- Execution::Call(isolate, spread_iterable_function,
- isolate->factory()->undefined_value(), 1, &spread));
-
- // Create a new FixedArray and put the result of the spread into it.
- Handle<JSArray> spread_array = Handle<JSArray>::cast(spread);
- uint32_t spread_length;
- CHECK(spread_array->length()->ToArrayIndex(&spread_length));
-
- Handle<FixedArray> result = isolate->factory()->NewFixedArray(spread_length);
- ElementsAccessor* accessor = spread_array->GetElementsAccessor();
- for (uint32_t i = 0; i < spread_length; i++) {
- DCHECK(accessor->HasElement(*spread_array, i));
- Handle<Object> element = accessor->Get(spread_array, i);
- result->set(i, *element);
- }
-
- return *result;
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-atomics.cc b/deps/v8/src/runtime/runtime-atomics.cc
index 62986c6661..68a7b413b5 100644
--- a/deps/v8/src/runtime/runtime-atomics.cc
+++ b/deps/v8/src/runtime/runtime-atomics.cc
@@ -298,7 +298,6 @@ RUNTIME_FUNCTION(Runtime_AtomicsExchange) {
}
UNREACHABLE();
- return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
@@ -327,7 +326,6 @@ RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
}
UNREACHABLE();
- return isolate->heap()->undefined_value();
}
// ES #sec-atomics.add
@@ -357,7 +355,6 @@ RUNTIME_FUNCTION(Runtime_AtomicsAdd) {
}
UNREACHABLE();
- return isolate->heap()->undefined_value();
}
// ES #sec-atomics.sub
@@ -387,7 +384,6 @@ RUNTIME_FUNCTION(Runtime_AtomicsSub) {
}
UNREACHABLE();
- return isolate->heap()->undefined_value();
}
// ES #sec-atomics.and
@@ -417,7 +413,6 @@ RUNTIME_FUNCTION(Runtime_AtomicsAnd) {
}
UNREACHABLE();
- return isolate->heap()->undefined_value();
}
// ES #sec-atomics.or
@@ -447,7 +442,6 @@ RUNTIME_FUNCTION(Runtime_AtomicsOr) {
}
UNREACHABLE();
- return isolate->heap()->undefined_value();
}
// ES #sec-atomics.xor
@@ -477,7 +471,6 @@ RUNTIME_FUNCTION(Runtime_AtomicsXor) {
}
UNREACHABLE();
- return isolate->heap()->undefined_value();
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index feb0120045..bd7f154e50 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -32,7 +32,7 @@ RUNTIME_FUNCTION(Runtime_ThrowConstructorNonCallableError) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 0);
- Handle<Object> name(constructor->shared()->name(), isolate);
+ Handle<String> name(constructor->shared()->name(), isolate);
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kConstructorNonCallable, name));
}
@@ -63,7 +63,7 @@ namespace {
Object* ThrowNotSuperConstructor(Isolate* isolate, Handle<Object> constructor,
Handle<JSFunction> function) {
- Handle<Object> super_name;
+ Handle<String> super_name;
if (constructor->IsJSFunction()) {
super_name = handle(Handle<JSFunction>::cast(constructor)->shared()->name(),
isolate);
@@ -74,12 +74,12 @@ Object* ThrowNotSuperConstructor(Isolate* isolate, Handle<Object> constructor,
super_name = Object::NoSideEffectsToString(isolate, constructor);
}
// null constructor
- if (Handle<String>::cast(super_name)->length() == 0) {
+ if (super_name->length() == 0) {
super_name = isolate->factory()->null_string();
}
- Handle<Object> function_name(function->shared()->name(), isolate);
+ Handle<String> function_name(function->shared()->name(), isolate);
// anonymous class
- if (Handle<String>::cast(function_name)->length() == 0) {
+ if (function_name->length() == 0) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate,
NewTypeError(MessageTemplate::kNotSuperConstructorAnonymousClass,
diff --git a/deps/v8/src/runtime/runtime-collections.cc b/deps/v8/src/runtime/runtime-collections.cc
index 0e311517e9..deea56fb87 100644
--- a/deps/v8/src/runtime/runtime-collections.cc
+++ b/deps/v8/src/runtime/runtime-collections.cc
@@ -75,68 +75,14 @@ RUNTIME_FUNCTION(Runtime_SetShrink) {
return isolate->heap()->undefined_value();
}
-
-RUNTIME_FUNCTION(Runtime_SetClear) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
- JSSet::Clear(holder);
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_SetIteratorInitialize) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSSetIterator, holder, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSSet, set, 1);
- CONVERT_SMI_ARG_CHECKED(kind, 2)
- CHECK(kind == JSSetIterator::kKindValues ||
- kind == JSSetIterator::kKindEntries);
- Handle<OrderedHashSet> table(OrderedHashSet::cast(set->table()));
- holder->set_table(*table);
- holder->set_index(Smi::kZero);
- holder->set_kind(Smi::FromInt(kind));
- return isolate->heap()->undefined_value();
-}
-
-
RUNTIME_FUNCTION(Runtime_SetIteratorClone) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSSetIterator, holder, 0);
-
- Handle<JSSetIterator> result = isolate->factory()->NewJSSetIterator();
- result->set_table(holder->table());
- result->set_index(Smi::FromInt(Smi::cast(holder->index())->value()));
- result->set_kind(Smi::FromInt(Smi::cast(holder->kind())->value()));
-
- return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_SetIteratorNext) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_CHECKED(JSSetIterator, holder, 0);
- CONVERT_ARG_CHECKED(JSArray, value_array, 1);
- return holder->Next(value_array);
-}
-
-
-// The array returned contains the following information:
-// 0: HasMore flag
-// 1: Iteration index
-// 2: Iteration kind
-RUNTIME_FUNCTION(Runtime_SetIteratorDetails) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSSetIterator, holder, 0);
- Handle<FixedArray> details = isolate->factory()->NewFixedArray(4);
- details->set(0, isolate->heap()->ToBoolean(holder->HasMore()));
- details->set(1, holder->index());
- details->set(2, holder->kind());
- return *isolate->factory()->NewJSArrayWithElements(details);
+ return *isolate->factory()->NewJSSetIterator(
+ handle(holder->map(), isolate),
+ handle(OrderedHashSet::cast(holder->table()), isolate),
+ Smi::ToInt(holder->index()));
}
@@ -159,16 +105,6 @@ RUNTIME_FUNCTION(Runtime_MapShrink) {
return isolate->heap()->undefined_value();
}
-
-RUNTIME_FUNCTION(Runtime_MapClear) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
- JSMap::Clear(holder);
- return isolate->heap()->undefined_value();
-}
-
-
RUNTIME_FUNCTION(Runtime_MapGrow) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -179,54 +115,16 @@ RUNTIME_FUNCTION(Runtime_MapGrow) {
return isolate->heap()->undefined_value();
}
-
-RUNTIME_FUNCTION(Runtime_MapIteratorInitialize) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSMapIterator, holder, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSMap, map, 1);
- CONVERT_SMI_ARG_CHECKED(kind, 2)
- CHECK(kind == JSMapIterator::kKindKeys ||
- kind == JSMapIterator::kKindValues ||
- kind == JSMapIterator::kKindEntries);
- Handle<OrderedHashMap> table(OrderedHashMap::cast(map->table()));
- holder->set_table(*table);
- holder->set_index(Smi::kZero);
- holder->set_kind(Smi::FromInt(kind));
- return isolate->heap()->undefined_value();
-}
-
-
RUNTIME_FUNCTION(Runtime_MapIteratorClone) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSMapIterator, holder, 0);
-
- Handle<JSMapIterator> result = isolate->factory()->NewJSMapIterator();
- result->set_table(holder->table());
- result->set_index(Smi::FromInt(Smi::cast(holder->index())->value()));
- result->set_kind(Smi::FromInt(Smi::cast(holder->kind())->value()));
-
- return *result;
+ return *isolate->factory()->NewJSMapIterator(
+ handle(holder->map(), isolate),
+ handle(OrderedHashMap::cast(holder->table()), isolate),
+ Smi::ToInt(holder->index()));
}
-
-// The array returned contains the following information:
-// 0: HasMore flag
-// 1: Iteration index
-// 2: Iteration kind
-RUNTIME_FUNCTION(Runtime_MapIteratorDetails) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSMapIterator, holder, 0);
- Handle<FixedArray> details = isolate->factory()->NewFixedArray(4);
- details->set(0, isolate->heap()->ToBoolean(holder->HasMore()));
- details->set(1, holder->index());
- details->set(2, holder->kind());
- return *isolate->factory()->NewJSArrayWithElements(details);
-}
-
-
RUNTIME_FUNCTION(Runtime_GetWeakMapEntries) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -236,16 +134,6 @@ RUNTIME_FUNCTION(Runtime_GetWeakMapEntries) {
return *JSWeakCollection::GetEntries(holder, max_entries);
}
-
-RUNTIME_FUNCTION(Runtime_MapIteratorNext) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_CHECKED(JSMapIterator, holder, 0);
- CONVERT_ARG_CHECKED(JSArray, value_array, 1);
- return holder->Next(value_array);
-}
-
-
RUNTIME_FUNCTION(Runtime_WeakCollectionInitialize) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -255,37 +143,6 @@ RUNTIME_FUNCTION(Runtime_WeakCollectionInitialize) {
}
-RUNTIME_FUNCTION(Runtime_WeakCollectionGet) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- CONVERT_SMI_ARG_CHECKED(hash, 2)
- CHECK(key->IsJSReceiver() || key->IsSymbol());
- Handle<ObjectHashTable> table(
- ObjectHashTable::cast(weak_collection->table()));
- CHECK(table->IsKey(isolate, *key));
- Handle<Object> lookup(table->Lookup(key, hash), isolate);
- return lookup->IsTheHole(isolate) ? isolate->heap()->undefined_value()
- : *lookup;
-}
-
-
-RUNTIME_FUNCTION(Runtime_WeakCollectionHas) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- CONVERT_SMI_ARG_CHECKED(hash, 2)
- CHECK(key->IsJSReceiver() || key->IsSymbol());
- Handle<ObjectHashTable> table(
- ObjectHashTable::cast(weak_collection->table()));
- CHECK(table->IsKey(isolate, *key));
- Handle<Object> lookup(table->Lookup(key, hash), isolate);
- return isolate->heap()->ToBoolean(!lookup->IsTheHole(isolate));
-}
-
-
RUNTIME_FUNCTION(Runtime_WeakCollectionDelete) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
@@ -340,20 +197,6 @@ RUNTIME_FUNCTION(Runtime_IsJSSet) {
return isolate->heap()->ToBoolean(obj->IsJSSet());
}
-RUNTIME_FUNCTION(Runtime_IsJSMapIterator) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(Object, obj, 0);
- return isolate->heap()->ToBoolean(obj->IsJSMapIterator());
-}
-
-RUNTIME_FUNCTION(Runtime_IsJSSetIterator) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(Object, obj, 0);
- return isolate->heap()->ToBoolean(obj->IsJSSetIterator());
-}
-
RUNTIME_FUNCTION(Runtime_IsJSWeakMap) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index 7b73967acc..6cd8541585 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -33,7 +33,9 @@ RUNTIME_FUNCTION(Runtime_CompileLazy) {
#endif
StackLimitCheck check(isolate);
- if (check.JsHasOverflowed(1 * KB)) return isolate->StackOverflow();
+ if (check.JsHasOverflowed(kStackSpaceRequiredForCompilation * KB)) {
+ return isolate->StackOverflow();
+ }
if (!Compiler::Compile(function, Compiler::KEEP_EXCEPTION)) {
return isolate->heap()->exception();
}
@@ -46,8 +48,10 @@ RUNTIME_FUNCTION(Runtime_CompileOptimized_Concurrent) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
StackLimitCheck check(isolate);
- if (check.JsHasOverflowed(1 * KB)) return isolate->StackOverflow();
- if (!Compiler::CompileOptimized(function, Compiler::CONCURRENT)) {
+ if (check.JsHasOverflowed(kStackSpaceRequiredForCompilation * KB)) {
+ return isolate->StackOverflow();
+ }
+ if (!Compiler::CompileOptimized(function, ConcurrencyMode::kConcurrent)) {
return isolate->heap()->exception();
}
DCHECK(function->is_compiled());
@@ -60,8 +64,10 @@ RUNTIME_FUNCTION(Runtime_CompileOptimized_NotConcurrent) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
StackLimitCheck check(isolate);
- if (check.JsHasOverflowed(1 * KB)) return isolate->StackOverflow();
- if (!Compiler::CompileOptimized(function, Compiler::NOT_CONCURRENT)) {
+ if (check.JsHasOverflowed(kStackSpaceRequiredForCompilation * KB)) {
+ return isolate->StackOverflow();
+ }
+ if (!Compiler::CompileOptimized(function, ConcurrencyMode::kNotConcurrent)) {
return isolate->heap()->exception();
}
DCHECK(function->is_compiled());
@@ -73,7 +79,8 @@ RUNTIME_FUNCTION(Runtime_EvictOptimizedCodeSlot) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- DCHECK(function->is_compiled());
+ DCHECK(function->shared()->is_compiled());
+
function->feedback_vector()->EvictOptimizedCodeMarkedForDeoptimization(
function->shared(), "Runtime_EvictOptimizedCodeSlot");
return function->code();
@@ -88,9 +95,9 @@ RUNTIME_FUNCTION(Runtime_InstantiateAsmJs) {
if (args[1]->IsJSReceiver()) {
stdlib = args.at<JSReceiver>(1);
}
- Handle<JSObject> foreign;
- if (args[2]->IsJSObject()) {
- foreign = args.at<JSObject>(2);
+ Handle<JSReceiver> foreign;
+ if (args[2]->IsJSReceiver()) {
+ foreign = args.at<JSReceiver>(2);
}
Handle<JSArrayBuffer> memory;
if (args[3]->IsJSArrayBuffer()) {
@@ -168,22 +175,13 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
Handle<Code> optimized_code = deoptimizer->compiled_code();
DCHECK(optimized_code->kind() == Code::OPTIMIZED_FUNCTION);
+ DCHECK(optimized_code->is_turbofanned());
DCHECK(type == deoptimizer->bailout_type());
DCHECK_NULL(isolate->context());
- // TODO(turbofan): For Crankshaft we restore the context before objects are
- // being materialized, because it never de-materializes the context but it
- // requires a context to materialize arguments objects. This is specific to
- // Crankshaft and can be removed once only TurboFan goes through here.
- if (!optimized_code->is_turbofanned()) {
- JavaScriptFrameIterator top_it(isolate);
- JavaScriptFrame* top_frame = top_it.frame();
- isolate->set_context(Context::cast(top_frame->context()));
- } else {
- // TODO(turbofan): We currently need the native context to materialize
- // the arguments object, but only to get to its map.
- isolate->set_context(function->native_context());
- }
+ // TODO(turbofan): We currently need the native context to materialize
+ // the arguments object, but only to get to its map.
+ isolate->set_context(function->native_context());
// Make sure to materialize objects before causing any allocation.
JavaScriptFrameIterator it(isolate);
@@ -191,11 +189,9 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
delete deoptimizer;
// Ensure the context register is updated for materialized objects.
- if (optimized_code->is_turbofanned()) {
- JavaScriptFrameIterator top_it(isolate);
- JavaScriptFrame* top_frame = top_it.frame();
- isolate->set_context(Context::cast(top_frame->context()));
- }
+ JavaScriptFrameIterator top_it(isolate);
+ JavaScriptFrame* top_frame = top_it.frame();
+ isolate->set_context(Context::cast(top_frame->context()));
if (type == Deoptimizer::LAZY) {
return isolate->heap()->undefined_value();
@@ -218,12 +214,6 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
if (function->feedback_vector()->optimized_code() == *optimized_code) {
function->ClearOptimizedCodeSlot("notify deoptimized");
}
- // Remove the code from the osr optimized code cache.
- DeoptimizationInputData* deopt_data =
- DeoptimizationInputData::cast(optimized_code->deoptimization_data());
- if (deopt_data->OsrAstId()->value() == BailoutId::None().ToInt()) {
- isolate->EvictOSROptimizedCode(*optimized_code, "notify deoptimized");
- }
} else {
// TODO(titzer): we should probably do DeoptimizeCodeList(code)
// unconditionally if the code is not already marked for deoptimization.
@@ -275,7 +265,7 @@ BailoutId DetermineEntryAndDisarmOSRForBaseline(JavaScriptFrame* frame) {
// Return a BailoutId representing an AST id of the {IterationStatement}.
uint32_t pc_offset =
static_cast<uint32_t>(frame->pc() - caller_code->instruction_start());
- return caller_code->TranslatePcOffsetToAstId(pc_offset);
+ return caller_code->TranslatePcOffsetToBytecodeOffset(pc_offset);
}
BailoutId DetermineEntryAndDisarmOSRForInterpreter(JavaScriptFrame* frame) {
@@ -342,28 +332,23 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
DeoptimizationInputData::cast(result->deoptimization_data());
if (data->OsrPcOffset()->value() >= 0) {
- DCHECK(BailoutId(data->OsrAstId()->value()) == ast_id);
+ DCHECK(BailoutId(data->OsrBytecodeOffset()->value()) == ast_id);
if (FLAG_trace_osr) {
PrintF("[OSR - Entry at AST id %d, offset %d in optimized code]\n",
ast_id.ToInt(), data->OsrPcOffset()->value());
}
- if (result->is_turbofanned()) {
- // When we're waiting for concurrent optimization, set to compile on
- // the next call - otherwise we'd run unoptimized once more
- // and potentially compile for OSR another time as well.
- if (function->IsMarkedForConcurrentOptimization()) {
- if (FLAG_trace_osr) {
- PrintF("[OSR - Re-marking ");
- function->PrintName();
- PrintF(" for non-concurrent optimization]\n");
- }
- function->ReplaceCode(
- isolate->builtins()->builtin(Builtins::kCompileOptimized));
+ DCHECK(result->is_turbofanned());
+ if (!function->HasOptimizedCode()) {
+ // If we're not already optimized, set to optimize non-concurrently on
+ // the next call, otherwise we'd run unoptimized once more and
+ // potentially compile for OSR again.
+ if (FLAG_trace_osr) {
+ PrintF("[OSR - Re-marking ");
+ function->PrintName();
+ PrintF(" for non-concurrent optimization]\n");
}
- } else {
- // Crankshafted OSR code can be installed into the function.
- function->ReplaceCode(*result);
+ function->SetOptimizationMarker(OptimizationMarker::kCompileOptimized);
}
return *result;
}
@@ -390,33 +375,19 @@ RUNTIME_FUNCTION(Runtime_TryInstallOptimizedCode) {
// First check if this is a real stack overflow.
StackLimitCheck check(isolate);
- if (check.JsHasOverflowed()) {
- SealHandleScope shs(isolate);
+ if (check.JsHasOverflowed(kStackSpaceRequiredForCompilation * KB)) {
return isolate->StackOverflow();
}
- isolate->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
+ // Only try to install optimized functions if the interrupt was InstallCode.
+ if (isolate->stack_guard()->CheckAndClearInstallCode()) {
+ isolate->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
+ }
+
return (function->IsOptimized()) ? function->code()
: function->shared()->code();
}
-
-bool CodeGenerationFromStringsAllowed(Isolate* isolate,
- Handle<Context> context) {
- DCHECK(context->allow_code_gen_from_strings()->IsFalse(isolate));
- // Check with callback if set.
- AllowCodeGenerationFromStringsCallback callback =
- isolate->allow_code_gen_callback();
- if (callback == NULL) {
- // No callback set and code generation disallowed.
- return false;
- } else {
- // Callback set. Let it decide if code generation is allowed.
- VMState<EXTERNAL> state(isolate);
- return callback(v8::Utils::ToLocal(context));
- }
-}
-
static Object* CompileGlobalEval(Isolate* isolate, Handle<String> source,
Handle<SharedFunctionInfo> outer_info,
LanguageMode language_mode,
@@ -427,7 +398,8 @@ static Object* CompileGlobalEval(Isolate* isolate, Handle<String> source,
// Check if native context allows code generation from
// strings. Throw an exception if it doesn't.
if (native_context->allow_code_gen_from_strings()->IsFalse(isolate) &&
- !CodeGenerationFromStringsAllowed(isolate, native_context)) {
+ !Compiler::CodeGenerationFromStringsAllowed(isolate, native_context,
+ source)) {
Handle<Object> error_message =
native_context->ErrorMessageForCodeGenerationFromStrings();
Handle<Object> error;
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index b65757b2de..b58dce22b6 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -17,6 +17,7 @@
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
+#include "src/objects/debug-objects-inl.h"
#include "src/runtime/runtime.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects.h"
@@ -146,18 +147,19 @@ static MaybeHandle<JSArray> GetIteratorInternalProperties(
Isolate* isolate, Handle<IteratorType> object) {
Factory* factory = isolate->factory();
Handle<IteratorType> iterator = Handle<IteratorType>::cast(object);
- CHECK(iterator->kind()->IsSmi());
const char* kind = NULL;
- switch (Smi::cast(iterator->kind())->value()) {
- case IteratorType::kKindKeys:
+ switch (iterator->map()->instance_type()) {
+ case JS_MAP_KEY_ITERATOR_TYPE:
kind = "keys";
break;
- case IteratorType::kKindValues:
- kind = "values";
- break;
- case IteratorType::kKindEntries:
+ case JS_MAP_KEY_VALUE_ITERATOR_TYPE:
+ case JS_SET_KEY_VALUE_ITERATOR_TYPE:
kind = "entries";
break;
+ case JS_MAP_VALUE_ITERATOR_TYPE:
+ case JS_SET_VALUE_ITERATOR_TYPE:
+ kind = "values";
+ break;
default:
UNREACHABLE();
}
@@ -921,6 +923,11 @@ RUNTIME_FUNCTION(Runtime_GetGeneratorScopeCount) {
// Check arguments.
CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, gen, 0);
+ // Only inspect suspended generator scopes.
+ if (!gen->is_suspended()) {
+ return Smi::kZero;
+ }
+
// Count the visible scopes.
int n = 0;
for (ScopeIterator it(isolate, gen); !it.Done(); it.Next()) {
@@ -942,6 +949,11 @@ RUNTIME_FUNCTION(Runtime_GetGeneratorScopeDetails) {
CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, gen, 0);
CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
+ // Only inspect suspended generator scopes.
+ if (!gen->is_suspended()) {
+ return isolate->heap()->undefined_value();
+ }
+
// Find the requested scope.
int n = 0;
ScopeIterator it(isolate, gen);
@@ -1046,28 +1058,15 @@ RUNTIME_FUNCTION(Runtime_SetBreakPointsActive) {
}
-static bool IsPositionAlignmentCodeCorrect(int alignment) {
- return alignment == STATEMENT_ALIGNED || alignment == BREAK_POSITION_ALIGNED;
-}
-
-
RUNTIME_FUNCTION(Runtime_GetBreakLocations) {
HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
+ DCHECK_EQ(1, args.length());
CHECK(isolate->debug()->is_active());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
- CONVERT_NUMBER_CHECKED(int32_t, statement_aligned_code, Int32, args[1]);
-
- if (!IsPositionAlignmentCodeCorrect(statement_aligned_code)) {
- return isolate->ThrowIllegalOperation();
- }
- BreakPositionAlignment alignment =
- static_cast<BreakPositionAlignment>(statement_aligned_code);
Handle<SharedFunctionInfo> shared(fun->shared());
// Find the number of break points
- Handle<Object> break_locations =
- Debug::GetSourceBreakLocations(shared, alignment);
+ Handle<Object> break_locations = Debug::GetSourceBreakLocations(shared);
if (break_locations->IsUndefined(isolate)) {
return isolate->heap()->undefined_value();
}
@@ -1098,29 +1097,20 @@ RUNTIME_FUNCTION(Runtime_SetFunctionBreakPoint) {
return Smi::FromInt(source_position);
}
-
// Changes the state of a break point in a script and returns source position
// where break point was set. NOTE: Regarding performance see the NOTE for
// GetScriptFromScriptData.
// args[0]: script to set break point in
// args[1]: number: break source position (within the script source)
-// args[2]: number, breakpoint position alignment
-// args[3]: number: break point object
+// args[2]: number: break point object
RUNTIME_FUNCTION(Runtime_SetScriptBreakPoint) {
HandleScope scope(isolate);
- DCHECK_EQ(4, args.length());
+ DCHECK_EQ(3, args.length());
CHECK(isolate->debug()->is_active());
CONVERT_ARG_HANDLE_CHECKED(JSValue, wrapper, 0);
CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
CHECK(source_position >= 0);
- CONVERT_NUMBER_CHECKED(int32_t, statement_aligned_code, Int32, args[2]);
- CONVERT_ARG_HANDLE_CHECKED(Object, break_point_object_arg, 3);
-
- if (!IsPositionAlignmentCodeCorrect(statement_aligned_code)) {
- return isolate->ThrowIllegalOperation();
- }
- BreakPositionAlignment alignment =
- static_cast<BreakPositionAlignment>(statement_aligned_code);
+ CONVERT_ARG_HANDLE_CHECKED(Object, break_point_object_arg, 2);
// Get the script from the script wrapper.
CHECK(wrapper->value()->IsScript());
@@ -1128,7 +1118,7 @@ RUNTIME_FUNCTION(Runtime_SetScriptBreakPoint) {
// Set break point.
if (!isolate->debug()->SetBreakPointForScript(script, break_point_object_arg,
- &source_position, alignment)) {
+ &source_position)) {
return isolate->heap()->undefined_value();
}
@@ -1567,7 +1557,7 @@ int ScriptLinePosition(Handle<Script> script, int line) {
if (line == 0) return 0;
// If line == line_count, we return the first position beyond the last line.
if (line > line_count) return -1;
- return Smi::cast(line_ends_array->get(line - 1))->value() + 1;
+ return Smi::ToInt(line_ends_array->get(line - 1)) + 1;
}
} // namespace
@@ -1802,8 +1792,8 @@ RUNTIME_FUNCTION(Runtime_ScriptSourceLine) {
}
const int start =
- (line == 0) ? 0 : Smi::cast(line_ends_array->get(line - 1))->value() + 1;
- const int end = Smi::cast(line_ends_array->get(line))->value();
+ (line == 0) ? 0 : Smi::ToInt(line_ends_array->get(line - 1)) + 1;
+ const int end = Smi::ToInt(line_ends_array->get(line));
Handle<String> source =
handle(String::cast(script_handle->source()), isolate);
@@ -1910,6 +1900,26 @@ RUNTIME_FUNCTION(Runtime_DebugBreakInOptimizedCode) {
return NULL;
}
+namespace {
+Handle<JSObject> MakeRangeObject(Isolate* isolate, const CoverageBlock& range) {
+ Factory* factory = isolate->factory();
+
+ Handle<String> start_string = factory->InternalizeUtf8String("start");
+ Handle<String> end_string = factory->InternalizeUtf8String("end");
+ Handle<String> count_string = factory->InternalizeUtf8String("count");
+
+ Handle<JSObject> range_obj = factory->NewJSObjectWithNullProto();
+ JSObject::AddProperty(range_obj, start_string,
+ factory->NewNumberFromInt(range.start), NONE);
+ JSObject::AddProperty(range_obj, end_string,
+ factory->NewNumberFromInt(range.end), NONE);
+ JSObject::AddProperty(range_obj, count_string,
+ factory->NewNumberFromUint(range.count), NONE);
+
+ return range_obj;
+}
+} // namespace
+
RUNTIME_FUNCTION(Runtime_DebugCollectCoverage) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
@@ -1927,34 +1937,36 @@ RUNTIME_FUNCTION(Runtime_DebugCollectCoverage) {
// Prepare property keys.
Handle<FixedArray> scripts_array = factory->NewFixedArray(num_scripts);
Handle<String> script_string = factory->NewStringFromStaticChars("script");
- Handle<String> start_string = factory->NewStringFromStaticChars("start");
- Handle<String> end_string = factory->NewStringFromStaticChars("end");
- Handle<String> count_string = factory->NewStringFromStaticChars("count");
for (int i = 0; i < num_scripts; i++) {
const auto& script_data = coverage->at(i);
HandleScope inner_scope(isolate);
+
+ std::vector<CoverageBlock> ranges;
int num_functions = static_cast<int>(script_data.functions.size());
- Handle<FixedArray> functions_array = factory->NewFixedArray(num_functions);
for (int j = 0; j < num_functions; j++) {
const auto& function_data = script_data.functions[j];
- Handle<JSObject> range_obj = factory->NewJSObjectWithNullProto();
- JSObject::AddProperty(range_obj, start_string,
- factory->NewNumberFromInt(function_data.start),
- NONE);
- JSObject::AddProperty(range_obj, end_string,
- factory->NewNumberFromInt(function_data.end), NONE);
- JSObject::AddProperty(range_obj, count_string,
- factory->NewNumberFromUint(function_data.count),
- NONE);
- functions_array->set(j, *range_obj);
+ ranges.emplace_back(function_data.start, function_data.end,
+ function_data.count);
+ for (size_t k = 0; k < function_data.blocks.size(); k++) {
+ const auto& block_data = function_data.blocks[k];
+ ranges.emplace_back(block_data.start, block_data.end, block_data.count);
+ }
+ }
+
+ int num_ranges = static_cast<int>(ranges.size());
+ Handle<FixedArray> ranges_array = factory->NewFixedArray(num_ranges);
+ for (int j = 0; j < num_ranges; j++) {
+ Handle<JSObject> range_object = MakeRangeObject(isolate, ranges[j]);
+ ranges_array->set(j, *range_object);
}
+
Handle<JSArray> script_obj =
- factory->NewJSArrayWithElements(functions_array, FAST_ELEMENTS);
+ factory->NewJSArrayWithElements(ranges_array, PACKED_ELEMENTS);
Handle<JSObject> wrapper = Script::GetWrapper(script_data.script);
JSObject::AddProperty(script_obj, script_string, wrapper, NONE);
scripts_array->set(i, *script_obj);
}
- return *factory->NewJSArrayWithElements(scripts_array, FAST_ELEMENTS);
+ return *factory->NewJSArrayWithElements(scripts_array, PACKED_ELEMENTS);
}
RUNTIME_FUNCTION(Runtime_DebugTogglePreciseCoverage) {
@@ -1965,5 +1977,35 @@ RUNTIME_FUNCTION(Runtime_DebugTogglePreciseCoverage) {
return isolate->heap()->undefined_value();
}
+RUNTIME_FUNCTION(Runtime_DebugToggleBlockCoverage) {
+ SealHandleScope shs(isolate);
+ CONVERT_BOOLEAN_ARG_CHECKED(enable, 0);
+ Coverage::SelectMode(isolate, enable ? debug::Coverage::kBlockCount
+ : debug::Coverage::kBestEffort);
+ return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_IncBlockCounter) {
+ SealHandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_CHECKED(JSFunction, function, 0);
+ CONVERT_SMI_ARG_CHECKED(coverage_array_slot_index, 1);
+
+ DCHECK(FLAG_block_coverage);
+
+ // It's quite possible that a function contains IncBlockCounter bytecodes, but
+ // no coverage info exists. This happens e.g. by selecting the best-effort
+ // coverage collection mode, which triggers deletion of all coverage infos in
+ // order to avoid memory leaks.
+
+ SharedFunctionInfo* shared = function->shared();
+ if (shared->HasCoverageInfo()) {
+ CoverageInfo* coverage_info = shared->GetCoverageInfo();
+ coverage_info->IncrementBlockCount(coverage_array_slot_index);
+ }
+
+ return isolate->heap()->undefined_value();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-function.cc b/deps/v8/src/runtime/runtime-function.cc
index c7100d1bf5..382f09c4d4 100644
--- a/deps/v8/src/runtime/runtime-function.cc
+++ b/deps/v8/src/runtime/runtime-function.cc
@@ -29,32 +29,6 @@ RUNTIME_FUNCTION(Runtime_FunctionGetName) {
}
}
-
-RUNTIME_FUNCTION(Runtime_FunctionSetName) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, f, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
-
- name = String::Flatten(name);
- f->shared()->set_name(*name);
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_FunctionRemovePrototype) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
-
- CONVERT_ARG_CHECKED(JSFunction, f, 0);
- CHECK(f->RemovePrototype());
- f->shared()->SetConstructStub(
- *isolate->builtins()->ConstructedNonConstructable());
-
- return isolate->heap()->undefined_value();
-}
-
// TODO(5530): Remove once uses in debug.js are gone.
RUNTIME_FUNCTION(Runtime_FunctionGetScript) {
HandleScope scope(isolate);
@@ -114,24 +88,12 @@ RUNTIME_FUNCTION(Runtime_FunctionGetContextData) {
return fun->native_context()->debug_context_id();
}
-RUNTIME_FUNCTION(Runtime_FunctionSetInstanceClassName) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(2, args.length());
-
- CONVERT_ARG_CHECKED(JSFunction, fun, 0);
- CONVERT_ARG_CHECKED(String, name, 1);
- fun->shared()->set_instance_class_name(name);
- return isolate->heap()->undefined_value();
-}
-
-
RUNTIME_FUNCTION(Runtime_FunctionSetLength) {
SealHandleScope shs(isolate);
DCHECK_EQ(2, args.length());
CONVERT_ARG_CHECKED(JSFunction, fun, 0);
CONVERT_SMI_ARG_CHECKED(length, 1);
- CHECK((length & 0xC0000000) == 0xC0000000 || (length & 0xC0000000) == 0x0);
fun->shared()->set_length(length);
return isolate->heap()->undefined_value();
}
@@ -172,13 +134,6 @@ RUNTIME_FUNCTION(Runtime_SetCode) {
return isolate->heap()->exception();
}
- // Mark both, the source and the target, as un-flushable because the
- // shared unoptimized code makes them impossible to enqueue in a list.
- DCHECK(target_shared->code()->gc_metadata() == NULL);
- DCHECK(source_shared->code()->gc_metadata() == NULL);
- target_shared->set_dont_flush(true);
- source_shared->set_dont_flush(true);
-
// Set the code, scope info, formal parameter count, and the length
// of the target shared function info.
target_shared->ReplaceCode(source_shared->code());
diff --git a/deps/v8/src/runtime/runtime-generator.cc b/deps/v8/src/runtime/runtime-generator.cc
index 74b1fe90d2..9515d8e53a 100644
--- a/deps/v8/src/runtime/runtime-generator.cc
+++ b/deps/v8/src/runtime/runtime-generator.cc
@@ -36,13 +36,9 @@ RUNTIME_FUNCTION(Runtime_CreateJSGeneratorObject) {
}
RUNTIME_FUNCTION(Runtime_GeneratorClose) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
-
- generator->set_continuation(JSGeneratorObject::kGeneratorClosed);
-
- return isolate->heap()->undefined_value();
+ // Runtime call is implemented in InterpreterIntrinsics and lowered in
+ // JSIntrinsicLowering
+ UNREACHABLE();
}
RUNTIME_FUNCTION(Runtime_GeneratorGetFunction) {
@@ -62,68 +58,33 @@ RUNTIME_FUNCTION(Runtime_GeneratorGetReceiver) {
}
RUNTIME_FUNCTION(Runtime_GeneratorGetContext) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
-
// Runtime call is implemented in InterpreterIntrinsics and lowered in
// JSIntrinsicLowering
UNREACHABLE();
-
- return generator->context();
}
RUNTIME_FUNCTION(Runtime_GeneratorGetInputOrDebugPos) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
-
// Runtime call is implemented in InterpreterIntrinsics and lowered in
// JSIntrinsicLowering
UNREACHABLE();
-
- return generator->input_or_debug_pos();
-}
-
-RUNTIME_FUNCTION(Runtime_AsyncGeneratorGetAwaitInputOrDebugPos) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSAsyncGeneratorObject, generator, 0);
- return generator->await_input_or_debug_pos();
}
RUNTIME_FUNCTION(Runtime_AsyncGeneratorResolve) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
-
// Runtime call is implemented in InterpreterIntrinsics and lowered in
// JSIntrinsicLowering
UNREACHABLE();
-
- return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(Runtime_AsyncGeneratorReject) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
-
// Runtime call is implemented in InterpreterIntrinsics and lowered in
// JSIntrinsicLowering
UNREACHABLE();
-
- return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(Runtime_GeneratorGetResumeMode) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
-
// Runtime call is implemented in InterpreterIntrinsics and lowered in
// JSIntrinsicLowering
UNREACHABLE();
-
- return Smi::FromInt(generator->resume_mode());
}
RUNTIME_FUNCTION(Runtime_GeneratorGetContinuation) {
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index 7348d5f007..3f3f2f8185 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -29,7 +29,6 @@ RUNTIME_FUNCTION(Runtime_CheckIsBootstrapping) {
return isolate->heap()->undefined_value();
}
-
RUNTIME_FUNCTION(Runtime_ExportFromRuntime) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -42,7 +41,6 @@ RUNTIME_FUNCTION(Runtime_ExportFromRuntime) {
return *container;
}
-
RUNTIME_FUNCTION(Runtime_InstallToContext) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -51,7 +49,7 @@ RUNTIME_FUNCTION(Runtime_InstallToContext) {
CHECK(isolate->bootstrapper()->IsActive());
Handle<Context> native_context = isolate->native_context();
Handle<FixedArray> fixed_array(FixedArray::cast(array->elements()));
- int length = Smi::cast(array->length())->value();
+ int length = Smi::ToInt(array->length());
for (int i = 0; i < length; i += 2) {
CHECK(fixed_array->get(i)->IsString());
Handle<String> name(String::cast(fixed_array->get(i)));
@@ -67,21 +65,18 @@ RUNTIME_FUNCTION(Runtime_InstallToContext) {
return isolate->heap()->undefined_value();
}
-
RUNTIME_FUNCTION(Runtime_Throw) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
return isolate->Throw(args[0]);
}
-
RUNTIME_FUNCTION(Runtime_ReThrow) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
return isolate->ReThrow(args[0]);
}
-
RUNTIME_FUNCTION(Runtime_ThrowStackOverflow) {
SealHandleScope shs(isolate);
DCHECK_LE(0, args.length());
@@ -133,7 +128,6 @@ const char* ElementsKindToType(ElementsKind fixed_elements_kind) {
default:
UNREACHABLE();
- return "";
}
}
@@ -167,14 +161,12 @@ RUNTIME_FUNCTION(Runtime_UnwindAndFindExceptionHandler) {
return isolate->UnwindAndFindHandler();
}
-
RUNTIME_FUNCTION(Runtime_PromoteScheduledException) {
SealHandleScope shs(isolate);
DCHECK_EQ(0, args.length());
return isolate->PromoteScheduledException();
}
-
RUNTIME_FUNCTION(Runtime_ThrowReferenceError) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -183,7 +175,6 @@ RUNTIME_FUNCTION(Runtime_ThrowReferenceError) {
isolate, NewReferenceError(MessageTemplate::kNotDefined, name));
}
-
RUNTIME_FUNCTION(Runtime_NewTypeError) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -194,7 +185,6 @@ RUNTIME_FUNCTION(Runtime_NewTypeError) {
return *isolate->factory()->NewTypeError(message_template, arg0);
}
-
RUNTIME_FUNCTION(Runtime_NewReferenceError) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -205,7 +195,6 @@ RUNTIME_FUNCTION(Runtime_NewReferenceError) {
return *isolate->factory()->NewReferenceError(message_template, arg0);
}
-
RUNTIME_FUNCTION(Runtime_NewSyntaxError) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -261,6 +250,13 @@ RUNTIME_FUNCTION(Runtime_ThrowIteratorResultNotAnObject) {
NewTypeError(MessageTemplate::kIteratorResultNotAnObject, value));
}
+RUNTIME_FUNCTION(Runtime_ThrowThrowMethodMissing) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(0, args.length());
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kThrowMethodMissing));
+}
+
RUNTIME_FUNCTION(Runtime_ThrowSymbolIteratorInvalid) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
@@ -304,7 +300,6 @@ RUNTIME_FUNCTION(Runtime_ThrowApplyNonFunction) {
isolate, NewTypeError(MessageTemplate::kApplyNonFunction, object, type));
}
-
RUNTIME_FUNCTION(Runtime_StackGuard) {
SealHandleScope shs(isolate);
DCHECK_EQ(0, args.length());
@@ -318,14 +313,12 @@ RUNTIME_FUNCTION(Runtime_StackGuard) {
return isolate->stack_guard()->HandleInterrupts();
}
-
RUNTIME_FUNCTION(Runtime_Interrupt) {
SealHandleScope shs(isolate);
DCHECK_EQ(0, args.length());
return isolate->stack_guard()->HandleInterrupts();
}
-
RUNTIME_FUNCTION(Runtime_AllocateInNewSpace) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -336,7 +329,6 @@ RUNTIME_FUNCTION(Runtime_AllocateInNewSpace) {
return *isolate->factory()->NewFillerObject(size, false, NEW_SPACE);
}
-
RUNTIME_FUNCTION(Runtime_AllocateInTargetSpace) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -372,13 +364,10 @@ RUNTIME_FUNCTION(Runtime_AllocateSeqTwoByteString) {
return *result;
}
-
RUNTIME_FUNCTION(Runtime_IS_VAR) {
UNREACHABLE(); // implemented as macro in the parser
- return NULL;
}
-
namespace {
bool ComputeLocation(Isolate* isolate, MessageLocation* target) {
@@ -403,14 +392,15 @@ bool ComputeLocation(Isolate* isolate, MessageLocation* target) {
return false;
}
-
-Handle<String> RenderCallSite(Isolate* isolate, Handle<Object> object) {
+Handle<String> RenderCallSite(Isolate* isolate, Handle<Object> object,
+ CallPrinter::IteratorHint* hint) {
MessageLocation location;
if (ComputeLocation(isolate, &location)) {
- std::unique_ptr<ParseInfo> info(new ParseInfo(location.shared()));
- if (parsing::ParseAny(info.get(), isolate)) {
+ ParseInfo info(location.shared());
+ if (parsing::ParseAny(&info, isolate)) {
CallPrinter printer(isolate, location.shared()->IsUserJavaScript());
- Handle<String> str = printer.Print(info->literal(), location.start_pos());
+ Handle<String> str = printer.Print(info.literal(), location.start_pos());
+ *hint = printer.GetIteratorHint();
if (str->length() > 0) return str;
} else {
isolate->clear_pending_exception();
@@ -419,15 +409,44 @@ Handle<String> RenderCallSite(Isolate* isolate, Handle<Object> object) {
return Object::TypeOf(isolate, object);
}
+void UpdateIteratorTemplate(CallPrinter::IteratorHint hint,
+ MessageTemplate::Template* id) {
+ if (hint == CallPrinter::IteratorHint::kNormal) {
+ *id = MessageTemplate::kNotIterable;
+ }
+
+ if (hint == CallPrinter::IteratorHint::kAsync) {
+ *id = MessageTemplate::kNotAsyncIterable;
+ }
+}
+
} // namespace
+MaybeHandle<Object> Runtime::ThrowIteratorError(Isolate* isolate,
+ Handle<Object> object) {
+ CallPrinter::IteratorHint hint = CallPrinter::kNone;
+ Handle<String> callsite = RenderCallSite(isolate, object, &hint);
+ MessageTemplate::Template id = MessageTemplate::kNonObjectPropertyLoad;
+
+ if (hint == CallPrinter::kNone) {
+ Handle<Symbol> iterator_symbol = isolate->factory()->iterator_symbol();
+ THROW_NEW_ERROR(isolate, NewTypeError(id, iterator_symbol, callsite),
+ Object);
+ }
+
+ UpdateIteratorTemplate(hint, &id);
+ THROW_NEW_ERROR(isolate, NewTypeError(id, callsite), Object);
+}
+
RUNTIME_FUNCTION(Runtime_ThrowCalledNonCallable) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- Handle<String> callsite = RenderCallSite(isolate, object);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledNonCallable, callsite));
+ CallPrinter::IteratorHint hint = CallPrinter::kNone;
+ Handle<String> callsite = RenderCallSite(isolate, object, &hint);
+ MessageTemplate::Template id = MessageTemplate::kCalledNonCallable;
+ UpdateIteratorTemplate(hint, &id);
+ THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewTypeError(id, callsite));
}
RUNTIME_FUNCTION(Runtime_ThrowCalledOnNullOrUndefined) {
@@ -442,9 +461,10 @@ RUNTIME_FUNCTION(Runtime_ThrowConstructedNonConstructable) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- Handle<String> callsite = RenderCallSite(isolate, object);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kNotConstructor, callsite));
+ CallPrinter::IteratorHint hint = CallPrinter::kNone;
+ Handle<String> callsite = RenderCallSite(isolate, object, &hint);
+ MessageTemplate::Template id = MessageTemplate::kNotConstructor;
+ THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewTypeError(id, callsite));
}
RUNTIME_FUNCTION(Runtime_ThrowConstructorReturnedNonObject) {
@@ -478,7 +498,6 @@ RUNTIME_FUNCTION(Runtime_CreateListFromArrayLike) {
isolate, object, ElementTypes::kAll));
}
-
RUNTIME_FUNCTION(Runtime_IncrementUseCounter) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -487,6 +506,15 @@ RUNTIME_FUNCTION(Runtime_IncrementUseCounter) {
return isolate->heap()->undefined_value();
}
+RUNTIME_FUNCTION(
+ Runtime_IncrementUseCounterConstructorReturnNonUndefinedPrimitive) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(0, args.length());
+ isolate->CountUsage(
+ v8::Isolate::UseCounterFeature::kConstructorNonUndefinedPrimitiveReturn);
+ return isolate->heap()->undefined_value();
+}
+
RUNTIME_FUNCTION(Runtime_GetAndResetRuntimeCallStats) {
HandleScope scope(isolate);
if (args.length() == 0) {
diff --git a/deps/v8/src/runtime/runtime-interpreter.cc b/deps/v8/src/runtime/runtime-interpreter.cc
index 5889a477c3..815e6d0fec 100644
--- a/deps/v8/src/runtime/runtime-interpreter.cc
+++ b/deps/v8/src/runtime/runtime-interpreter.cc
@@ -15,6 +15,7 @@
#include "src/interpreter/bytecodes.h"
#include "src/isolate-inl.h"
#include "src/ostreams.h"
+#include "src/string-builder.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/runtime/runtime-intl.cc b/deps/v8/src/runtime/runtime-intl.cc
index 623fe05fe8..343d560a8a 100644
--- a/deps/v8/src/runtime/runtime-intl.cc
+++ b/deps/v8/src/runtime/runtime-intl.cc
@@ -143,6 +143,11 @@ RUNTIME_FUNCTION(Runtime_GetDefaultICULocale) {
icu::Locale default_locale;
+ // Translate ICU's fallback locale to a well-known locale.
+ if (strcmp(default_locale.getName(), "en_US_POSIX") == 0) {
+ return *factory->NewStringFromStaticChars("en-US");
+ }
+
// Set the locale
char result[ULOC_FULLNAME_CAPACITY];
UErrorCode status = U_ZERO_ERROR;
@@ -471,7 +476,7 @@ RUNTIME_FUNCTION(Runtime_InternalDateFormatToParts) {
return isolate->heap()->undefined_value();
}
}
- JSObject::ValidateElements(result);
+ JSObject::ValidateElements(*result);
return *result;
}
diff --git a/deps/v8/src/runtime/runtime-literals.cc b/deps/v8/src/runtime/runtime-literals.cc
index 1a2b1f584e..4f761d5997 100644
--- a/deps/v8/src/runtime/runtime-literals.cc
+++ b/deps/v8/src/runtime/runtime-literals.cc
@@ -14,301 +14,521 @@
namespace v8 {
namespace internal {
-MUST_USE_RESULT static MaybeHandle<Object> CreateLiteralBoilerplate(
- Isolate* isolate, Handle<FeedbackVector> vector,
- Handle<FixedArray> compile_time_value);
-
-MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
- Isolate* isolate, Handle<FeedbackVector> vector,
- Handle<BoilerplateDescription> boilerplate_description,
- bool use_fast_elements, bool has_null_prototype) {
- Handle<Context> native_context = isolate->native_context();
-
- // In case we have function literals, we want the object to be in
- // slow properties mode for now. We don't go in the map cache because
- // maps with constant functions can't be shared if the functions are
- // not the same (which is the common case).
- int number_of_properties = boilerplate_description->backing_store_size();
-
- // Ignoring number_of_properties for force dictionary map with __proto__:null.
- Handle<Map> map =
- has_null_prototype
- ? handle(native_context->slow_object_with_null_prototype_map(),
- isolate)
- : isolate->factory()->ObjectLiteralMapFromCache(native_context,
- number_of_properties);
-
- PretenureFlag pretenure_flag =
- isolate->heap()->InNewSpace(*vector) ? NOT_TENURED : TENURED;
-
- Handle<JSObject> boilerplate =
- map->is_dictionary_map()
- ? isolate->factory()->NewSlowJSObjectFromMap(
- map, number_of_properties, pretenure_flag)
- : isolate->factory()->NewJSObjectFromMap(map, pretenure_flag);
-
- // Normalize the elements of the boilerplate to save space if needed.
- if (!use_fast_elements) JSObject::NormalizeElements(boilerplate);
-
- // Add the constant properties to the boilerplate.
- int length = boilerplate_description->size();
- // TODO(verwaest): Support tracking representations in the boilerplate.
- for (int index = 0; index < length; index++) {
- Handle<Object> key(boilerplate_description->name(index), isolate);
- Handle<Object> value(boilerplate_description->value(index), isolate);
- if (value->IsFixedArray()) {
- // The value contains the CompileTimeValue with the boilerplate properties
- // of a simple object or array literal.
- Handle<FixedArray> compile_time_value = Handle<FixedArray>::cast(value);
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, value,
- CreateLiteralBoilerplate(isolate, vector, compile_time_value),
- Object);
+namespace {
+
+bool IsUninitializedLiteralSite(Handle<Object> literal_site) {
+ return *literal_site == Smi::kZero;
+}
+
+bool HasBoilerplate(Isolate* isolate, Handle<Object> literal_site) {
+ return !literal_site->IsSmi();
+}
+
+void PreInitializeLiteralSite(Handle<FeedbackVector> vector,
+ FeedbackSlot slot) {
+ vector->Set(slot, Smi::FromInt(1));
+}
+
+Handle<Object> InnerCreateBoilerplate(Isolate* isolate,
+ Handle<FixedArray> compile_time_value,
+ PretenureFlag pretenure_flag);
+
+enum DeepCopyHints { kNoHints = 0, kObjectIsShallow = 1 };
+
+template <class ContextObject>
+class JSObjectWalkVisitor {
+ public:
+ JSObjectWalkVisitor(ContextObject* site_context, DeepCopyHints hints)
+ : site_context_(site_context), hints_(hints) {}
+
+ MUST_USE_RESULT MaybeHandle<JSObject> StructureWalk(Handle<JSObject> object);
+
+ protected:
+ MUST_USE_RESULT inline MaybeHandle<JSObject> VisitElementOrProperty(
+ Handle<JSObject> object, Handle<JSObject> value) {
+ Handle<AllocationSite> current_site = site_context()->EnterNewScope();
+ MaybeHandle<JSObject> copy_of_value = StructureWalk(value);
+ site_context()->ExitScope(current_site, value);
+ return copy_of_value;
+ }
+
+ inline ContextObject* site_context() { return site_context_; }
+ inline Isolate* isolate() { return site_context()->isolate(); }
+
+ private:
+ ContextObject* site_context_;
+ const DeepCopyHints hints_;
+};
+
+template <class ContextObject>
+MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
+ Handle<JSObject> object) {
+ Isolate* isolate = this->isolate();
+ bool copying = ContextObject::kCopying;
+ bool shallow = hints_ == kObjectIsShallow;
+
+ if (!shallow) {
+ StackLimitCheck check(isolate);
+
+ if (check.HasOverflowed()) {
+ isolate->StackOverflow();
+ return MaybeHandle<JSObject>();
}
- MaybeHandle<Object> maybe_result;
- uint32_t element_index = 0;
- if (key->ToArrayIndex(&element_index)) {
- // Array index (uint32).
- if (value->IsUninitialized(isolate)) {
- value = handle(Smi::kZero, isolate);
+ }
+
+ if (object->map()->is_deprecated()) {
+ JSObject::MigrateInstance(object);
+ }
+
+ Handle<JSObject> copy;
+ if (copying) {
+ // JSFunction objects are not allowed to be in normal boilerplates at all.
+ DCHECK(!object->IsJSFunction());
+ Handle<AllocationSite> site_to_pass;
+ if (site_context()->ShouldCreateMemento(object)) {
+ site_to_pass = site_context()->current();
+ }
+ copy = isolate->factory()->CopyJSObjectWithAllocationSite(object,
+ site_to_pass);
+ } else {
+ copy = object;
+ }
+
+ DCHECK(copying || copy.is_identical_to(object));
+
+ if (shallow) return copy;
+
+ HandleScope scope(isolate);
+
+ // Deep copy own properties. Arrays only have 1 property "length".
+ if (!copy->IsJSArray()) {
+ if (copy->HasFastProperties()) {
+ Handle<DescriptorArray> descriptors(copy->map()->instance_descriptors());
+ int limit = copy->map()->NumberOfOwnDescriptors();
+ for (int i = 0; i < limit; i++) {
+ DCHECK_EQ(kField, descriptors->GetDetails(i).location());
+ DCHECK_EQ(kData, descriptors->GetDetails(i).kind());
+ FieldIndex index = FieldIndex::ForDescriptor(copy->map(), i);
+ if (copy->IsUnboxedDoubleField(index)) continue;
+ Object* raw = copy->RawFastPropertyAt(index);
+ if (raw->IsJSObject()) {
+ Handle<JSObject> value(JSObject::cast(raw), isolate);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, value, VisitElementOrProperty(copy, value), JSObject);
+ if (copying) copy->FastPropertyAtPut(index, *value);
+ } else if (copying && raw->IsMutableHeapNumber()) {
+ DCHECK(descriptors->GetDetails(i).representation().IsDouble());
+ uint64_t double_value = HeapNumber::cast(raw)->value_as_bits();
+ Handle<HeapNumber> value = isolate->factory()->NewHeapNumber(MUTABLE);
+ value->set_value_as_bits(double_value);
+ copy->FastPropertyAtPut(index, *value);
+ }
}
- maybe_result = JSObject::SetOwnElementIgnoreAttributes(
- boilerplate, element_index, value, NONE);
} else {
- Handle<String> name = Handle<String>::cast(key);
- DCHECK(!name->AsArrayIndex(&element_index));
- maybe_result = JSObject::SetOwnPropertyIgnoreAttributes(boilerplate, name,
- value, NONE);
+ Handle<NameDictionary> dict(copy->property_dictionary());
+ for (int i = 0; i < dict->Capacity(); i++) {
+ Object* raw = dict->ValueAt(i);
+ if (!raw->IsJSObject()) continue;
+ DCHECK(dict->KeyAt(i)->IsName());
+ Handle<JSObject> value(JSObject::cast(raw), isolate);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, value, VisitElementOrProperty(copy, value), JSObject);
+ if (copying) dict->ValueAtPut(i, *value);
+ }
}
- RETURN_ON_EXCEPTION(isolate, maybe_result, Object);
+
+ // Assume non-arrays don't end up having elements.
+ if (copy->elements()->length() == 0) return copy;
}
- if (map->is_dictionary_map() && !has_null_prototype) {
- // TODO(cbruni): avoid making the boilerplate fast again, the clone stub
- // supports dict-mode objects directly.
- JSObject::MigrateSlowToFast(boilerplate,
- boilerplate->map()->unused_property_fields(),
- "FastLiteral");
+ // Deep copy own elements.
+ switch (copy->GetElementsKind()) {
+ case PACKED_ELEMENTS:
+ case HOLEY_ELEMENTS: {
+ Handle<FixedArray> elements(FixedArray::cast(copy->elements()));
+ if (elements->map() == isolate->heap()->fixed_cow_array_map()) {
+#ifdef DEBUG
+ for (int i = 0; i < elements->length(); i++) {
+ DCHECK(!elements->get(i)->IsJSObject());
+ }
+#endif
+ } else {
+ for (int i = 0; i < elements->length(); i++) {
+ Object* raw = elements->get(i);
+ if (!raw->IsJSObject()) continue;
+ Handle<JSObject> value(JSObject::cast(raw), isolate);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, value, VisitElementOrProperty(copy, value), JSObject);
+ if (copying) elements->set(i, *value);
+ }
+ }
+ break;
+ }
+ case DICTIONARY_ELEMENTS: {
+ Handle<SeededNumberDictionary> element_dictionary(
+ copy->element_dictionary());
+ int capacity = element_dictionary->Capacity();
+ for (int i = 0; i < capacity; i++) {
+ Object* raw = element_dictionary->ValueAt(i);
+ if (!raw->IsJSObject()) continue;
+ Handle<JSObject> value(JSObject::cast(raw), isolate);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, value, VisitElementOrProperty(copy, value), JSObject);
+ if (copying) element_dictionary->ValueAtPut(i, *value);
+ }
+ break;
+ }
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ UNIMPLEMENTED();
+ break;
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+ UNREACHABLE();
+ break;
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) case TYPE##_ELEMENTS:
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ // Typed elements cannot be created using an object literal.
+ UNREACHABLE();
+ break;
+
+ case PACKED_SMI_ELEMENTS:
+ case HOLEY_SMI_ELEMENTS:
+ case PACKED_DOUBLE_ELEMENTS:
+ case HOLEY_DOUBLE_ELEMENTS:
+ case NO_ELEMENTS:
+ // No contained objects, nothing to do.
+ break;
}
- return boilerplate;
+
+ return copy;
}
-static MaybeHandle<Object> CreateArrayLiteralBoilerplate(
- Isolate* isolate, Handle<FeedbackVector> vector,
- Handle<ConstantElementsPair> elements) {
- // Create the JSArray.
- Handle<JSFunction> constructor = isolate->array_function();
-
- PretenureFlag pretenure_flag =
- isolate->heap()->InNewSpace(*vector) ? NOT_TENURED : TENURED;
-
- Handle<JSArray> object = Handle<JSArray>::cast(
- isolate->factory()->NewJSObject(constructor, pretenure_flag));
-
- ElementsKind constant_elements_kind =
- static_cast<ElementsKind>(elements->elements_kind());
- Handle<FixedArrayBase> constant_elements_values(elements->constant_values());
-
- {
- DisallowHeapAllocation no_gc;
- DCHECK(IsFastElementsKind(constant_elements_kind));
- Context* native_context = isolate->context()->native_context();
- Object* map =
- native_context->get(Context::ArrayMapIndex(constant_elements_kind));
- object->set_map(Map::cast(map));
+class DeprecationUpdateContext {
+ public:
+ explicit DeprecationUpdateContext(Isolate* isolate) { isolate_ = isolate; }
+ Isolate* isolate() { return isolate_; }
+ bool ShouldCreateMemento(Handle<JSObject> object) { return false; }
+ inline void ExitScope(Handle<AllocationSite> scope_site,
+ Handle<JSObject> object) {}
+ Handle<AllocationSite> EnterNewScope() { return Handle<AllocationSite>(); }
+ Handle<AllocationSite> current() {
+ UNREACHABLE();
+ return Handle<AllocationSite>();
}
- Handle<FixedArrayBase> copied_elements_values;
- if (IsFastDoubleElementsKind(constant_elements_kind)) {
- copied_elements_values = isolate->factory()->CopyFixedDoubleArray(
- Handle<FixedDoubleArray>::cast(constant_elements_values));
- } else {
- DCHECK(IsFastSmiOrObjectElementsKind(constant_elements_kind));
- const bool is_cow = (constant_elements_values->map() ==
- isolate->heap()->fixed_cow_array_map());
- if (is_cow) {
- copied_elements_values = constant_elements_values;
-#if DEBUG
- Handle<FixedArray> fixed_array_values =
- Handle<FixedArray>::cast(copied_elements_values);
- for (int i = 0; i < fixed_array_values->length(); i++) {
- DCHECK(!fixed_array_values->get(i)->IsFixedArray());
+ static const bool kCopying = false;
+
+ private:
+ Isolate* isolate_;
+};
+
+// AllocationSiteCreationContext aids in the creation of AllocationSites to
+// accompany object literals.
+class AllocationSiteCreationContext : public AllocationSiteContext {
+ public:
+ explicit AllocationSiteCreationContext(Isolate* isolate)
+ : AllocationSiteContext(isolate) {}
+
+ Handle<AllocationSite> EnterNewScope() {
+ Handle<AllocationSite> scope_site;
+ if (top().is_null()) {
+ // We are creating the top level AllocationSite as opposed to a nested
+ // AllocationSite.
+ InitializeTraversal(isolate()->factory()->NewAllocationSite());
+ scope_site = Handle<AllocationSite>(*top(), isolate());
+ if (FLAG_trace_creation_allocation_sites) {
+ PrintF("*** Creating top level AllocationSite %p\n",
+ static_cast<void*>(*scope_site));
}
-#endif
} else {
- Handle<FixedArray> fixed_array_values =
- Handle<FixedArray>::cast(constant_elements_values);
- Handle<FixedArray> fixed_array_values_copy =
- isolate->factory()->CopyFixedArray(fixed_array_values);
- copied_elements_values = fixed_array_values_copy;
- FOR_WITH_HANDLE_SCOPE(
- isolate, int, i = 0, i, i < fixed_array_values->length(), i++, {
- if (fixed_array_values->get(i)->IsFixedArray()) {
- // The value contains the CompileTimeValue with the
- // boilerplate description of a simple object or
- // array literal.
- Handle<FixedArray> compile_time_value(
- FixedArray::cast(fixed_array_values->get(i)));
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- CreateLiteralBoilerplate(isolate, vector, compile_time_value),
- Object);
- fixed_array_values_copy->set(i, *result);
- }
- });
+ DCHECK(!current().is_null());
+ scope_site = isolate()->factory()->NewAllocationSite();
+ if (FLAG_trace_creation_allocation_sites) {
+ PrintF("Creating nested site (top, current, new) (%p, %p, %p)\n",
+ static_cast<void*>(*top()), static_cast<void*>(*current()),
+ static_cast<void*>(*scope_site));
+ }
+ current()->set_nested_site(*scope_site);
+ update_current_site(*scope_site);
+ }
+ DCHECK(!scope_site.is_null());
+ return scope_site;
+ }
+ void ExitScope(Handle<AllocationSite> scope_site, Handle<JSObject> object) {
+ if (object.is_null()) return;
+ scope_site->set_boilerplate(*object);
+ if (FLAG_trace_creation_allocation_sites) {
+ bool top_level =
+ !scope_site.is_null() && top().is_identical_to(scope_site);
+ if (top_level) {
+ PrintF("*** Setting AllocationSite %p transition_info %p\n",
+ static_cast<void*>(*scope_site), static_cast<void*>(*object));
+ } else {
+ PrintF("Setting AllocationSite (%p, %p) transition_info %p\n",
+ static_cast<void*>(*top()), static_cast<void*>(*scope_site),
+ static_cast<void*>(*object));
+ }
}
}
- object->set_elements(*copied_elements_values);
- object->set_length(Smi::FromInt(copied_elements_values->length()));
+ static const bool kCopying = false;
+};
+
+MaybeHandle<JSObject> DeepWalk(Handle<JSObject> object,
+ DeprecationUpdateContext* site_context) {
+ JSObjectWalkVisitor<DeprecationUpdateContext> v(site_context, kNoHints);
+ MaybeHandle<JSObject> result = v.StructureWalk(object);
+ Handle<JSObject> for_assert;
+ DCHECK(!result.ToHandle(&for_assert) || for_assert.is_identical_to(object));
+ return result;
+}
- JSObject::ValidateElements(object);
- return object;
+MaybeHandle<JSObject> DeepWalk(Handle<JSObject> object,
+ AllocationSiteCreationContext* site_context) {
+ JSObjectWalkVisitor<AllocationSiteCreationContext> v(site_context, kNoHints);
+ MaybeHandle<JSObject> result = v.StructureWalk(object);
+ Handle<JSObject> for_assert;
+ DCHECK(!result.ToHandle(&for_assert) || for_assert.is_identical_to(object));
+ return result;
}
-MUST_USE_RESULT static MaybeHandle<Object> CreateLiteralBoilerplate(
- Isolate* isolate, Handle<FeedbackVector> vector,
- Handle<FixedArray> compile_time_value) {
- Handle<HeapObject> elements =
- CompileTimeValue::GetElements(compile_time_value);
- int flags = CompileTimeValue::GetLiteralTypeFlags(compile_time_value);
- if (flags == CompileTimeValue::kArrayLiteralFlag) {
- Handle<ConstantElementsPair> elems =
- Handle<ConstantElementsPair>::cast(elements);
- return CreateArrayLiteralBoilerplate(isolate, vector, elems);
- }
- Handle<BoilerplateDescription> props =
- Handle<BoilerplateDescription>::cast(elements);
- bool use_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
- bool has_null_prototype = (flags & ObjectLiteral::kHasNullPrototype) != 0;
- return CreateObjectLiteralBoilerplate(isolate, vector, props,
- use_fast_elements, has_null_prototype);
+MaybeHandle<JSObject> DeepCopy(Handle<JSObject> object,
+ AllocationSiteUsageContext* site_context,
+ DeepCopyHints hints) {
+ JSObjectWalkVisitor<AllocationSiteUsageContext> v(site_context, hints);
+ MaybeHandle<JSObject> copy = v.StructureWalk(object);
+ Handle<JSObject> for_assert;
+ DCHECK(!copy.ToHandle(&for_assert) || !for_assert.is_identical_to(object));
+ return copy;
}
+struct ObjectBoilerplate {
+ static Handle<JSObject> Create(Isolate* isolate,
+ Handle<HeapObject> description, int flags,
+ PretenureFlag pretenure_flag) {
+ Handle<Context> native_context = isolate->native_context();
+ Handle<BoilerplateDescription> boilerplate_description =
+ Handle<BoilerplateDescription>::cast(description);
+ bool use_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
+ bool has_null_prototype = (flags & ObjectLiteral::kHasNullPrototype) != 0;
+
+ // In case we have function literals, we want the object to be in
+ // slow properties mode for now. We don't go in the map cache because
+ // maps with constant functions can't be shared if the functions are
+ // not the same (which is the common case).
+ int number_of_properties = boilerplate_description->backing_store_size();
+
+ // Ignoring number_of_properties for force dictionary map with
+ // __proto__:null.
+ Handle<Map> map =
+ has_null_prototype
+ ? handle(native_context->slow_object_with_null_prototype_map(),
+ isolate)
+ : isolate->factory()->ObjectLiteralMapFromCache(
+ native_context, number_of_properties);
+
+ Handle<JSObject> boilerplate =
+ map->is_dictionary_map()
+ ? isolate->factory()->NewSlowJSObjectFromMap(
+ map, number_of_properties, pretenure_flag)
+ : isolate->factory()->NewJSObjectFromMap(map, pretenure_flag);
+
+ // Normalize the elements of the boilerplate to save space if needed.
+ if (!use_fast_elements) JSObject::NormalizeElements(boilerplate);
+
+ // Add the constant properties to the boilerplate.
+ int length = boilerplate_description->size();
+ // TODO(verwaest): Support tracking representations in the boilerplate.
+ for (int index = 0; index < length; index++) {
+ Handle<Object> key(boilerplate_description->name(index), isolate);
+ Handle<Object> value(boilerplate_description->value(index), isolate);
+ if (value->IsFixedArray()) {
+ // The value contains the CompileTimeValue with the boilerplate
+ // properties of a simple object or array literal.
+ Handle<FixedArray> compile_time_value = Handle<FixedArray>::cast(value);
+ value =
+ InnerCreateBoilerplate(isolate, compile_time_value, pretenure_flag);
+ }
+ uint32_t element_index = 0;
+ if (key->ToArrayIndex(&element_index)) {
+ // Array index (uint32).
+ if (value->IsUninitialized(isolate)) {
+ value = handle(Smi::kZero, isolate);
+ }
+ JSObject::SetOwnElementIgnoreAttributes(boilerplate, element_index,
+ value, NONE)
+ .Check();
+ } else {
+ Handle<String> name = Handle<String>::cast(key);
+ DCHECK(!name->AsArrayIndex(&element_index));
+ JSObject::SetOwnPropertyIgnoreAttributes(boilerplate, name, value, NONE)
+ .Check();
+ }
+ }
-RUNTIME_FUNCTION(Runtime_CreateRegExpLiteral) {
- HandleScope scope(isolate);
- DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, closure, 0);
- CONVERT_SMI_ARG_CHECKED(index, 1);
- CONVERT_ARG_HANDLE_CHECKED(String, pattern, 2);
- CONVERT_SMI_ARG_CHECKED(flags, 3);
- FeedbackSlot literal_slot(FeedbackVector::ToSlot(index));
+ if (map->is_dictionary_map() && !has_null_prototype) {
+ // TODO(cbruni): avoid making the boilerplate fast again, the clone stub
+ // supports dict-mode objects directly.
+ JSObject::MigrateSlowToFast(boilerplate,
+ boilerplate->map()->unused_property_fields(),
+ "FastLiteral");
+ }
+ return boilerplate;
+ }
+};
+
+struct ArrayBoilerplate {
+ static Handle<JSObject> Create(Isolate* isolate,
+ Handle<HeapObject> description, int flags,
+ PretenureFlag pretenure_flag) {
+ Handle<ConstantElementsPair> elements =
+ Handle<ConstantElementsPair>::cast(description);
+ // Create the JSArray.
+ ElementsKind constant_elements_kind =
+ static_cast<ElementsKind>(elements->elements_kind());
+
+ Handle<FixedArrayBase> constant_elements_values(
+ elements->constant_values());
+ Handle<FixedArrayBase> copied_elements_values;
+ if (IsDoubleElementsKind(constant_elements_kind)) {
+ copied_elements_values = isolate->factory()->CopyFixedDoubleArray(
+ Handle<FixedDoubleArray>::cast(constant_elements_values));
+ } else {
+ DCHECK(IsSmiOrObjectElementsKind(constant_elements_kind));
+ const bool is_cow = (constant_elements_values->map() ==
+ isolate->heap()->fixed_cow_array_map());
+ if (is_cow) {
+ copied_elements_values = constant_elements_values;
+#if DEBUG
+ Handle<FixedArray> fixed_array_values =
+ Handle<FixedArray>::cast(copied_elements_values);
+ for (int i = 0; i < fixed_array_values->length(); i++) {
+ DCHECK(!fixed_array_values->get(i)->IsFixedArray());
+ }
+#endif
+ } else {
+ Handle<FixedArray> fixed_array_values =
+ Handle<FixedArray>::cast(constant_elements_values);
+ Handle<FixedArray> fixed_array_values_copy =
+ isolate->factory()->CopyFixedArray(fixed_array_values);
+ copied_elements_values = fixed_array_values_copy;
+ FOR_WITH_HANDLE_SCOPE(
+ isolate, int, i = 0, i, i < fixed_array_values->length(), i++, {
+ if (fixed_array_values->get(i)->IsFixedArray()) {
+ // The value contains the CompileTimeValue with the
+ // boilerplate description of a simple object or
+ // array literal.
+ Handle<FixedArray> compile_time_value(
+ FixedArray::cast(fixed_array_values->get(i)));
+ Handle<Object> result = InnerCreateBoilerplate(
+ isolate, compile_time_value, pretenure_flag);
+ fixed_array_values_copy->set(i, *result);
+ }
+ });
+ }
+ }
- // Check if boilerplate exists. If not, create it first.
- Handle<Object> boilerplate(closure->feedback_vector()->Get(literal_slot),
- isolate);
- if (boilerplate->IsUndefined(isolate)) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, boilerplate, JSRegExp::New(pattern, JSRegExp::Flags(flags)));
- closure->feedback_vector()->Set(literal_slot, *boilerplate);
+ return isolate->factory()->NewJSArrayWithElements(
+ copied_elements_values, constant_elements_kind,
+ copied_elements_values->length(), pretenure_flag);
}
- return *JSRegExp::Copy(Handle<JSRegExp>::cast(boilerplate));
-}
+};
+Handle<Object> InnerCreateBoilerplate(Isolate* isolate,
+ Handle<FixedArray> compile_time_value,
+ PretenureFlag pretenure_flag) {
+ Handle<HeapObject> elements =
+ CompileTimeValue::GetElements(compile_time_value);
+ int flags = CompileTimeValue::GetLiteralTypeFlags(compile_time_value);
+ if (flags == CompileTimeValue::kArrayLiteralFlag) {
+ return ArrayBoilerplate::Create(isolate, elements, flags, pretenure_flag);
+ }
+ return ObjectBoilerplate::Create(isolate, elements, flags, pretenure_flag);
+}
-RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
- HandleScope scope(isolate);
- DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, closure, 0);
- CONVERT_SMI_ARG_CHECKED(literals_index, 1);
- CONVERT_ARG_HANDLE_CHECKED(BoilerplateDescription, boilerplate_description,
- 2);
- CONVERT_SMI_ARG_CHECKED(flags, 3);
+template <typename Boilerplate>
+MaybeHandle<JSObject> CreateLiteral(Isolate* isolate,
+ Handle<JSFunction> closure,
+ int literals_index,
+ Handle<HeapObject> description, int flags) {
Handle<FeedbackVector> vector(closure->feedback_vector(), isolate);
- bool use_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
- bool enable_mementos = (flags & ObjectLiteral::kDisableMementos) == 0;
- bool has_null_prototype = (flags & ObjectLiteral::kHasNullPrototype) != 0;
-
FeedbackSlot literals_slot(FeedbackVector::ToSlot(literals_index));
CHECK(literals_slot.ToInt() < vector->slot_count());
-
- // Check if boilerplate exists. If not, create it first.
Handle<Object> literal_site(vector->Get(literals_slot), isolate);
- Handle<AllocationSite> site;
- Handle<JSObject> boilerplate;
- if (literal_site->IsUndefined(isolate)) {
- Handle<Object> raw_boilerplate;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, raw_boilerplate,
- CreateObjectLiteralBoilerplate(isolate, vector, boilerplate_description,
- use_fast_elements, has_null_prototype));
- boilerplate = Handle<JSObject>::cast(raw_boilerplate);
-
- AllocationSiteCreationContext creation_context(isolate);
- site = creation_context.EnterNewScope();
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, JSObject::DeepWalk(boilerplate, &creation_context));
- creation_context.ExitScope(site, boilerplate);
-
- // Update the functions literal and return the boilerplate.
- vector->Set(literals_slot, *site);
- } else {
- site = Handle<AllocationSite>::cast(literal_site);
- boilerplate =
- Handle<JSObject>(JSObject::cast(site->transition_info()), isolate);
+ DeepCopyHints copy_hints =
+ (flags & AggregateLiteral::kIsShallow) ? kObjectIsShallow : kNoHints;
+ if (FLAG_track_double_fields && !FLAG_unbox_double_fields) {
+ // Make sure we properly clone mutable heap numbers on 32-bit platforms.
+ copy_hints = kNoHints;
}
- AllocationSiteUsageContext usage_context(isolate, site, enable_mementos);
- usage_context.EnterNewScope();
- MaybeHandle<Object> maybe_copy =
- JSObject::DeepCopy(boilerplate, &usage_context);
- usage_context.ExitScope(site, boilerplate);
- RETURN_RESULT_OR_FAILURE(isolate, maybe_copy);
-}
-
-MUST_USE_RESULT static MaybeHandle<AllocationSite> GetLiteralAllocationSite(
- Isolate* isolate, Handle<FeedbackVector> vector, FeedbackSlot literals_slot,
- Handle<ConstantElementsPair> elements) {
- // Check if boilerplate exists. If not, create it first.
- Handle<Object> literal_site(vector->Get(literals_slot), isolate);
Handle<AllocationSite> site;
- if (literal_site->IsUndefined(isolate)) {
- Handle<Object> boilerplate;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, boilerplate,
- CreateArrayLiteralBoilerplate(isolate, vector, elements),
- AllocationSite);
+ Handle<JSObject> boilerplate;
+ if (HasBoilerplate(isolate, literal_site)) {
+ site = Handle<AllocationSite>::cast(literal_site);
+ boilerplate = Handle<JSObject>(site->boilerplate(), isolate);
+ } else {
+ // Eagerly create AllocationSites for literals that contain an Array.
+ bool needs_initial_allocation_site =
+ (flags & AggregateLiteral::kNeedsInitialAllocationSite) != 0;
+ // TODO(cbruni): Even in the case where we need an initial allocation site
+ // we could still create the boilerplate lazily to save memory.
+ if (!needs_initial_allocation_site &&
+ IsUninitializedLiteralSite(literal_site)) {
+ PreInitializeLiteralSite(vector, literals_slot);
+ boilerplate =
+ Boilerplate::Create(isolate, description, flags, NOT_TENURED);
+ if (copy_hints == kNoHints) {
+ DeprecationUpdateContext update_context(isolate);
+ RETURN_ON_EXCEPTION(isolate, DeepWalk(boilerplate, &update_context),
+ JSObject);
+ }
+ return boilerplate;
+ } else {
+ PretenureFlag pretenure_flag =
+ isolate->heap()->InNewSpace(*vector) ? NOT_TENURED : TENURED;
+ boilerplate =
+ Boilerplate::Create(isolate, description, flags, pretenure_flag);
+ }
+ // Install AllocationSite objects.
AllocationSiteCreationContext creation_context(isolate);
site = creation_context.EnterNewScope();
- if (JSObject::DeepWalk(Handle<JSObject>::cast(boilerplate),
- &creation_context).is_null()) {
- return Handle<AllocationSite>::null();
- }
- creation_context.ExitScope(site, Handle<JSObject>::cast(boilerplate));
+ RETURN_ON_EXCEPTION(isolate, DeepWalk(boilerplate, &creation_context),
+ JSObject);
+ creation_context.ExitScope(site, boilerplate);
vector->Set(literals_slot, *site);
- } else {
- site = Handle<AllocationSite>::cast(literal_site);
}
- return site;
-}
-
-static MaybeHandle<JSObject> CreateArrayLiteralImpl(
- Isolate* isolate, Handle<FeedbackVector> vector, FeedbackSlot literals_slot,
- Handle<ConstantElementsPair> elements, int flags) {
- CHECK(literals_slot.ToInt() < vector->slot_count());
- Handle<AllocationSite> site;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, site,
- GetLiteralAllocationSite(isolate, vector, literals_slot, elements),
- JSObject);
+ STATIC_ASSERT(static_cast<int>(ObjectLiteral::kDisableMementos) ==
+ static_cast<int>(ArrayLiteral::kDisableMementos));
+ bool enable_mementos = (flags & ObjectLiteral::kDisableMementos) == 0;
- bool enable_mementos = (flags & ArrayLiteral::kDisableMementos) == 0;
- Handle<JSObject> boilerplate(JSObject::cast(site->transition_info()));
+ // Copy the existing boilerplate.
AllocationSiteUsageContext usage_context(isolate, site, enable_mementos);
usage_context.EnterNewScope();
- JSObject::DeepCopyHints hints = (flags & ArrayLiteral::kShallowElements) == 0
- ? JSObject::kNoHints
- : JSObject::kObjectIsShallow;
MaybeHandle<JSObject> copy =
- JSObject::DeepCopy(boilerplate, &usage_context, hints);
+ DeepCopy(boilerplate, &usage_context, copy_hints);
usage_context.ExitScope(site, boilerplate);
return copy;
}
+} // namespace
+RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(4, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, closure, 0);
+ CONVERT_SMI_ARG_CHECKED(literals_index, 1);
+ CONVERT_ARG_HANDLE_CHECKED(BoilerplateDescription, description, 2);
+ CONVERT_SMI_ARG_CHECKED(flags, 3);
+ RETURN_RESULT_OR_FAILURE(
+ isolate, CreateLiteral<ObjectBoilerplate>(
+ isolate, closure, literals_index, description, flags));
+}
RUNTIME_FUNCTION(Runtime_CreateArrayLiteral) {
HandleScope scope(isolate);
@@ -317,27 +537,35 @@ RUNTIME_FUNCTION(Runtime_CreateArrayLiteral) {
CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_HANDLE_CHECKED(ConstantElementsPair, elements, 2);
CONVERT_SMI_ARG_CHECKED(flags, 3);
-
- FeedbackSlot literals_slot(FeedbackVector::ToSlot(literals_index));
- Handle<FeedbackVector> vector(closure->feedback_vector(), isolate);
RETURN_RESULT_OR_FAILURE(
- isolate,
- CreateArrayLiteralImpl(isolate, vector, literals_slot, elements, flags));
+ isolate, CreateLiteral<ArrayBoilerplate>(isolate, closure, literals_index,
+ elements, flags));
}
-
-RUNTIME_FUNCTION(Runtime_CreateArrayLiteralStubBailout) {
+RUNTIME_FUNCTION(Runtime_CreateRegExpLiteral) {
HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
+ DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, closure, 0);
- CONVERT_SMI_ARG_CHECKED(literals_index, 1);
- CONVERT_ARG_HANDLE_CHECKED(ConstantElementsPair, elements, 2);
+ CONVERT_SMI_ARG_CHECKED(index, 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, pattern, 2);
+ CONVERT_SMI_ARG_CHECKED(flags, 3);
Handle<FeedbackVector> vector(closure->feedback_vector(), isolate);
- FeedbackSlot literals_slot(FeedbackVector::ToSlot(literals_index));
- RETURN_RESULT_OR_FAILURE(
- isolate, CreateArrayLiteralImpl(isolate, vector, literals_slot, elements,
- ArrayLiteral::kShallowElements));
+ FeedbackSlot literal_slot(FeedbackVector::ToSlot(index));
+
+ // Check if boilerplate exists. If not, create it first.
+ Handle<Object> literal_site(vector->Get(literal_slot), isolate);
+ Handle<Object> boilerplate;
+ if (!HasBoilerplate(isolate, literal_site)) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, boilerplate, JSRegExp::New(pattern, JSRegExp::Flags(flags)));
+ if (IsUninitializedLiteralSite(literal_site)) {
+ PreInitializeLiteralSite(vector, literal_slot);
+ return *boilerplate;
+ }
+ vector->Set(literal_slot, *boilerplate);
+ }
+ return *JSRegExp::Copy(Handle<JSRegExp>::cast(boilerplate));
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-liveedit.cc b/deps/v8/src/runtime/runtime-liveedit.cc
index fa49df88da..9d57d01b7f 100644
--- a/deps/v8/src/runtime/runtime-liveedit.cc
+++ b/deps/v8/src/runtime/runtime-liveedit.cc
@@ -44,7 +44,7 @@ RUNTIME_FUNCTION(Runtime_LiveEditFindSharedFunctionInfosForScript) {
for (int i = 0; i < found.length(); ++i) {
Handle<SharedFunctionInfo> shared = found[i];
SharedInfoWrapper info_wrapper = SharedInfoWrapper::Create(isolate);
- Handle<String> name(String::cast(shared->name()));
+ Handle<String> name(shared->name(), isolate);
info_wrapper.SetProperties(name, shared->start_position(),
shared->end_position(), shared);
result->set(i, *info_wrapper.GetJSArray());
@@ -223,7 +223,7 @@ RUNTIME_FUNCTION(Runtime_LiveEditCheckAndDropActivations) {
CHECK(new_shared_array->length() == old_shared_array->length());
CHECK(old_shared_array->HasFastElements());
CHECK(new_shared_array->HasFastElements());
- int array_length = Smi::cast(old_shared_array->length())->value();
+ int array_length = Smi::ToInt(old_shared_array->length());
for (int i = 0; i < array_length; i++) {
Handle<Object> old_element;
Handle<Object> new_element;
diff --git a/deps/v8/src/runtime/runtime-module.cc b/deps/v8/src/runtime/runtime-module.cc
index 3c896d8c56..af156ea7ce 100644
--- a/deps/v8/src/runtime/runtime-module.cc
+++ b/deps/v8/src/runtime/runtime-module.cc
@@ -17,32 +17,12 @@ RUNTIME_FUNCTION(Runtime_DynamicImportCall) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, specifier, 1);
- Handle<JSPromise> promise = isolate->factory()->NewJSPromise();
-
- Handle<String> specifier_str;
- MaybeHandle<String> maybe_specifier = Object::ToString(isolate, specifier);
- if (!maybe_specifier.ToHandle(&specifier_str)) {
- DCHECK(isolate->has_pending_exception());
- Handle<Object> reason(isolate->pending_exception(), isolate);
- isolate->clear_pending_exception();
-
- Handle<Object> argv[] = {promise, reason,
- isolate->factory()->ToBoolean(false)};
-
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, Execution::Call(isolate, isolate->promise_internal_reject(),
- isolate->factory()->undefined_value(),
- arraysize(argv), argv))
- return *promise;
- }
- DCHECK(!isolate->has_pending_exception());
-
Handle<Script> script(Script::cast(function->shared()->script()));
Handle<String> source_url(String::cast(script->name()));
- isolate->RunHostImportModuleDynamicallyCallback(source_url, specifier_str,
- promise);
- return *promise;
+ RETURN_RESULT_OR_FAILURE(
+ isolate,
+ isolate->RunHostImportModuleDynamicallyCallback(source_url, specifier));
}
RUNTIME_FUNCTION(Runtime_GetModuleNamespace) {
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index eef2e6616a..3e76e4efe2 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -67,11 +67,11 @@ static MaybeHandle<Object> KeyedGetObjectProperty(Isolate* isolate,
DisallowHeapAllocation no_allocation;
if (receiver->IsJSGlobalObject()) {
// Attempt dictionary lookup.
- GlobalDictionary* dictionary = receiver->global_dictionary();
+ GlobalDictionary* dictionary =
+ JSGlobalObject::cast(*receiver)->global_dictionary();
int entry = dictionary->FindEntry(key);
if (entry != GlobalDictionary::kNotFound) {
- DCHECK(dictionary->ValueAt(entry)->IsPropertyCell());
- PropertyCell* cell = PropertyCell::cast(dictionary->ValueAt(entry));
+ PropertyCell* cell = dictionary->CellAt(entry);
if (cell->property_details().kind() == kData) {
Object* value = cell->value();
if (!value->IsTheHole(isolate)) {
@@ -96,18 +96,17 @@ static MaybeHandle<Object> KeyedGetObjectProperty(Isolate* isolate,
// that subsequent accesses will also call the runtime. Proactively
// transition elements to FAST_*_ELEMENTS to avoid excessive boxing of
// doubles for those future calls in the case that the elements would
- // become FAST_DOUBLE_ELEMENTS.
+ // become PACKED_DOUBLE_ELEMENTS.
Handle<JSObject> js_object = Handle<JSObject>::cast(receiver_obj);
ElementsKind elements_kind = js_object->GetElementsKind();
- if (IsFastDoubleElementsKind(elements_kind)) {
- if (Smi::cast(*key_obj)->value() >= js_object->elements()->length()) {
- elements_kind = IsFastHoleyElementsKind(elements_kind)
- ? FAST_HOLEY_ELEMENTS
- : FAST_ELEMENTS;
+ if (IsDoubleElementsKind(elements_kind)) {
+ if (Smi::ToInt(*key_obj) >= js_object->elements()->length()) {
+ elements_kind = IsHoleyElementsKind(elements_kind) ? HOLEY_ELEMENTS
+ : PACKED_ELEMENTS;
JSObject::TransitionElementsKind(js_object, elements_kind);
}
} else {
- DCHECK(IsFastSmiOrObjectElementsKind(elements_kind) ||
+ DCHECK(IsSmiOrObjectElementsKind(elements_kind) ||
!IsFastElementsKind(elements_kind));
}
}
@@ -306,11 +305,9 @@ RUNTIME_FUNCTION(Runtime_AddDictionaryProperty) {
DCHECK(name->IsUniqueName());
Handle<NameDictionary> dictionary(receiver->property_dictionary(), isolate);
- int entry;
- PropertyDetails property_details(kData, NONE, 0, PropertyCellType::kNoCell);
- dictionary =
- NameDictionary::Add(dictionary, name, value, property_details, &entry);
- receiver->set_properties(*dictionary);
+ PropertyDetails property_details(kData, NONE, PropertyCellType::kNoCell);
+ dictionary = NameDictionary::Add(dictionary, name, value, property_details);
+ receiver->SetProperties(*dictionary);
return *value;
}
@@ -387,6 +384,17 @@ RUNTIME_FUNCTION(Runtime_InternalSetPrototype) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, obj, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1);
+ if (prototype->IsJSFunction()) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(prototype);
+ if (!function->shared()->has_shared_name()) {
+ Handle<Map> function_map(function->map(), isolate);
+ if (!JSFunction::SetName(function, isolate->factory()->proto_string(),
+ isolate->factory()->empty_string())) {
+ return isolate->heap()->exception();
+ }
+ CHECK_EQ(*function_map, function->map());
+ }
+ }
MAYBE_RETURN(
JSReceiver::SetPrototype(obj, prototype, false, Object::THROW_ON_ERROR),
isolate->heap()->exception());
@@ -498,7 +506,7 @@ RUNTIME_FUNCTION(Runtime_AppendElement) {
RETURN_FAILURE_ON_EXCEPTION(
isolate, JSObject::AddDataElement(array, index, value, NONE));
- JSObject::ValidateElements(array);
+ JSObject::ValidateElements(*array);
return *array;
}
@@ -546,13 +554,11 @@ RUNTIME_FUNCTION(Runtime_DeleteProperty) {
RUNTIME_FUNCTION(Runtime_ShrinkPropertyDictionary) {
HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
- CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
Handle<NameDictionary> dictionary(receiver->property_dictionary(), isolate);
- Handle<NameDictionary> new_properties =
- NameDictionary::Shrink(dictionary, key);
- receiver->set_properties(*new_properties);
+ Handle<NameDictionary> new_properties = NameDictionary::Shrink(dictionary);
+ receiver->SetProperties(*new_properties);
return Smi::kZero;
}
@@ -669,7 +675,8 @@ RUNTIME_FUNCTION(Runtime_LoadMutableDouble) {
CHECK(field_index.property_index() <
object->map()->GetInObjectProperties());
} else {
- CHECK(field_index.outobject_array_index() < object->properties()->length());
+ CHECK(field_index.outobject_array_index() <
+ object->property_dictionary()->length());
}
return *JSObject::FastPropertyAt(object, Representation::Double(),
field_index);
@@ -762,8 +769,16 @@ RUNTIME_FUNCTION(Runtime_DefineDataPropertyInLiteral) {
if (flags & DataPropertyInLiteralFlag::kSetFunctionName) {
DCHECK(value->IsJSFunction());
- JSFunction::SetName(Handle<JSFunction>::cast(value), name,
- isolate->factory()->empty_string());
+ Handle<JSFunction> function = Handle<JSFunction>::cast(value);
+ DCHECK(!function->shared()->has_shared_name());
+ Handle<Map> function_map(function->map(), isolate);
+ if (!JSFunction::SetName(function, name,
+ isolate->factory()->empty_string())) {
+ return isolate->heap()->exception();
+ }
+ // Class constructors do not reserve in-object space for name field.
+ CHECK_IMPLIES(!IsClassConstructor(function->shared()->kind()),
+ *function_map == function->map());
}
LookupIterator it = LookupIterator::PropertyOrElement(
@@ -861,7 +876,11 @@ RUNTIME_FUNCTION(Runtime_DefineGetterPropertyUnchecked) {
CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
if (String::cast(getter->shared()->name())->length() == 0) {
- JSFunction::SetName(getter, name, isolate->factory()->get_string());
+ Handle<Map> getter_map(getter->map(), isolate);
+ if (!JSFunction::SetName(getter, name, isolate->factory()->get_string())) {
+ return isolate->heap()->exception();
+ }
+ CHECK_EQ(*getter_map, getter->map());
}
RETURN_FAILURE_ON_EXCEPTION(
@@ -922,6 +941,64 @@ RUNTIME_FUNCTION(Runtime_CopyDataPropertiesWithExcludedProperties) {
return *target;
}
+namespace {
+
+inline void TrySetNative(Handle<Object> maybe_func) {
+ if (!maybe_func->IsJSFunction()) return;
+ JSFunction::cast(*maybe_func)->shared()->set_native(true);
+}
+
+inline void TrySetNativeAndLength(Handle<Object> maybe_func, int length) {
+ if (!maybe_func->IsJSFunction()) return;
+ SharedFunctionInfo* shared = JSFunction::cast(*maybe_func)->shared();
+ shared->set_native(true);
+ if (length >= 0) {
+ shared->set_length(length);
+ }
+}
+
+} // namespace
+
+RUNTIME_FUNCTION(Runtime_DefineMethodsInternal) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ CHECK(isolate->bootstrapper()->IsActive());
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, target, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, source_class, 1);
+ CONVERT_SMI_ARG_CHECKED(length, 2);
+
+ DCHECK(source_class->prototype()->IsJSObject());
+ Handle<JSObject> source(JSObject::cast(source_class->prototype()), isolate);
+
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, keys,
+ KeyAccumulator::GetKeys(source, KeyCollectionMode::kOwnOnly,
+ ALL_PROPERTIES,
+ GetKeysConversion::kConvertToString));
+
+ for (int i = 0; i < keys->length(); ++i) {
+ Handle<Name> key = Handle<Name>::cast(FixedArray::get(*keys, i, isolate));
+ if (*key == isolate->heap()->constructor_string()) continue;
+
+ PropertyDescriptor descriptor;
+ Maybe<bool> did_get_descriptor =
+ JSReceiver::GetOwnPropertyDescriptor(isolate, source, key, &descriptor);
+ CHECK(did_get_descriptor.FromJust());
+ if (descriptor.has_value()) {
+ TrySetNativeAndLength(descriptor.value(), length);
+ } else {
+ if (descriptor.has_get()) TrySetNative(descriptor.get());
+ if (descriptor.has_set()) TrySetNative(descriptor.set());
+ }
+
+ Maybe<bool> success = JSReceiver::DefineOwnProperty(
+ isolate, target, key, &descriptor, Object::DONT_THROW);
+ CHECK(success.FromJust());
+ }
+ return isolate->heap()->undefined_value();
+}
+
RUNTIME_FUNCTION(Runtime_DefineSetterPropertyUnchecked) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
@@ -931,7 +1008,11 @@ RUNTIME_FUNCTION(Runtime_DefineSetterPropertyUnchecked) {
CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
if (String::cast(setter->shared()->name())->length() == 0) {
- JSFunction::SetName(setter, name, isolate->factory()->set_string());
+ Handle<Map> setter_map(setter->map(), isolate);
+ if (!JSFunction::SetName(setter, name, isolate->factory()->set_string())) {
+ return isolate->heap()->exception();
+ }
+ CHECK_EQ(*setter_map, setter->map());
}
RETURN_FAILURE_ON_EXCEPTION(
@@ -1052,10 +1133,11 @@ RUNTIME_FUNCTION(Runtime_Compare) {
RUNTIME_FUNCTION(Runtime_HasInPrototypeChain) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1);
- Maybe<bool> result =
- JSReceiver::HasInPrototypeChain(isolate, object, prototype);
+ if (!object->IsJSReceiver()) return isolate->heap()->false_value();
+ Maybe<bool> result = JSReceiver::HasInPrototypeChain(
+ isolate, Handle<JSReceiver>::cast(object), prototype);
MAYBE_RETURN(result, isolate->heap()->exception());
return isolate->heap()->ToBoolean(result.FromJust());
}
@@ -1070,26 +1152,6 @@ RUNTIME_FUNCTION(Runtime_CreateIterResultObject) {
return *isolate->factory()->NewJSIteratorResult(value, done->BooleanValue());
}
-RUNTIME_FUNCTION(Runtime_CreateKeyValueArray) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
- Handle<FixedArray> elements = isolate->factory()->NewFixedArray(2);
- elements->set(0, *key);
- elements->set(1, *value);
- return *isolate->factory()->NewJSArrayWithElements(elements, FAST_ELEMENTS,
- 2);
-}
-
-RUNTIME_FUNCTION(Runtime_IsAccessCheckNeeded) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(Object, object, 0);
- return isolate->heap()->ToBoolean(object->IsAccessCheckNeeded());
-}
-
-
RUNTIME_FUNCTION(Runtime_CreateDataProperty) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
diff --git a/deps/v8/src/runtime/runtime-proxy.cc b/deps/v8/src/runtime/runtime-proxy.cc
index de8231e2e9..05c3cf61ba 100644
--- a/deps/v8/src/runtime/runtime-proxy.cc
+++ b/deps/v8/src/runtime/runtime-proxy.cc
@@ -14,60 +14,6 @@ namespace v8 {
namespace internal {
-// ES6 9.5.13 [[Call]] (thisArgument, argumentsList)
-RUNTIME_FUNCTION(Runtime_JSProxyCall) {
- HandleScope scope(isolate);
- DCHECK_LE(2, args.length());
- // thisArgument == receiver
- CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSProxy, proxy, args.length() - 1);
- Handle<String> trap_name = isolate->factory()->apply_string();
- // 1. Let handler be the value of the [[ProxyHandler]] internal slot of O.
- Handle<Object> handler(proxy->handler(), isolate);
- // 2. If handler is null, throw a TypeError exception.
- if (proxy->IsRevoked()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kProxyRevoked, trap_name));
- }
- // 3. Assert: Type(handler) is Object.
- DCHECK(handler->IsJSReceiver());
- // 4. Let target be the value of the [[ProxyTarget]] internal slot of O.
- Handle<JSReceiver> target(proxy->target(), isolate);
- // 5. Let trap be ? GetMethod(handler, "apply").
- Handle<Object> trap;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, trap,
- Object::GetMethod(Handle<JSReceiver>::cast(handler), trap_name));
- // 6. If trap is undefined, then
- int const arguments_length = args.length() - 2;
- if (trap->IsUndefined(isolate)) {
- // 6.a. Return Call(target, thisArgument, argumentsList).
- ScopedVector<Handle<Object>> argv(arguments_length);
- for (int i = 0; i < arguments_length; ++i) {
- argv[i] = args.at(i + 1);
- }
- RETURN_RESULT_OR_FAILURE(
- isolate, Execution::Call(isolate, target, receiver, arguments_length,
- argv.start()));
- }
- // 7. Let argArray be CreateArrayFromList(argumentsList).
- Handle<JSArray> arg_array = isolate->factory()->NewJSArray(
- FAST_ELEMENTS, arguments_length, arguments_length);
- ElementsAccessor* accessor = arg_array->GetElementsAccessor();
- {
- DisallowHeapAllocation no_gc;
- for (int i = 0; i < arguments_length; i++) {
- accessor->Set(arg_array, i, args[i + 1]);
- }
- }
- // 8. Return Call(trap, handler, Ā«target, thisArgument, argArrayĀ»).
- Handle<Object> trap_args[] = {target, receiver, arg_array};
- RETURN_RESULT_OR_FAILURE(
- isolate,
- Execution::Call(isolate, trap, handler, arraysize(trap_args), trap_args));
-}
-
-
// 9.5.14 [[Construct]] (argumentsList, newTarget)
RUNTIME_FUNCTION(Runtime_JSProxyConstruct) {
HandleScope scope(isolate);
@@ -108,7 +54,7 @@ RUNTIME_FUNCTION(Runtime_JSProxyConstruct) {
}
// 7. Let argArray be CreateArrayFromList(argumentsList).
Handle<JSArray> arg_array = isolate->factory()->NewJSArray(
- FAST_ELEMENTS, arguments_length, arguments_length);
+ PACKED_ELEMENTS, arguments_length, arguments_length);
ElementsAccessor* accessor = arg_array->GetElementsAccessor();
{
DisallowHeapAllocation no_gc;
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index 8803deff0f..a5d61d8348 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -39,7 +39,7 @@ int LookupNamedCapture(std::function<bool(String*)> name_matches,
String* capture_name = String::cast(capture_name_map->get(name_ix));
if (!name_matches(capture_name)) continue;
- maybe_capture_index = Smi::cast(capture_name_map->get(index_ix))->value();
+ maybe_capture_index = Smi::ToInt(capture_name_map->get(index_ix));
break;
}
@@ -869,10 +869,10 @@ RUNTIME_FUNCTION(Runtime_StringSplit) {
int part_count = indices->length();
Handle<JSArray> result =
- isolate->factory()->NewJSArray(FAST_ELEMENTS, part_count, part_count,
+ isolate->factory()->NewJSArray(PACKED_ELEMENTS, part_count, part_count,
INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
- DCHECK(result->HasFastObjectElements());
+ DCHECK(result->HasObjectElements());
Handle<FixedArray> elements(FixedArray::cast(result->elements()));
@@ -890,7 +890,7 @@ RUNTIME_FUNCTION(Runtime_StringSplit) {
}
if (limit == 0xffffffffu) {
- if (result->HasFastObjectElements()) {
+ if (result->HasObjectElements()) {
RegExpResultsCache::Enter(isolate, subject, pattern, elements,
isolate->factory()->empty_fixed_array(),
RegExpResultsCache::STRING_SPLIT_SUBSTRINGS);
@@ -1140,7 +1140,7 @@ Handle<JSObject> ConstructNamedCaptureGroupsObject(
const int index_ix = i * 2 + 1;
Handle<String> capture_name(String::cast(capture_map->get(name_ix)));
- const int capture_ix = Smi::cast(capture_map->get(index_ix))->value();
+ const int capture_ix = Smi::ToInt(capture_map->get(index_ix));
DCHECK(1 <= capture_ix && capture_ix <= capture_count);
Handle<Object> capture_value(f_get_capture(capture_ix), isolate);
@@ -1177,7 +1177,7 @@ static Object* SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
int capture_registers = (capture_count + 1) * 2;
int32_t* last_match = NewArray<int32_t>(capture_registers);
for (int i = 0; i < capture_registers; i++) {
- last_match[i] = Smi::cast(last_match_cache->get(i))->value();
+ last_match[i] = Smi::ToInt(last_match_cache->get(i));
}
Handle<FixedArray> cached_fixed_array =
Handle<FixedArray>(FixedArray::cast(cached_answer));
@@ -1197,7 +1197,7 @@ static Object* SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
if (global_cache.HasException()) return isolate->heap()->exception();
// Ensured in Runtime_RegExpExecMultiple.
- DCHECK(result_array->HasFastObjectElements());
+ DCHECK(result_array->HasObjectElements());
Handle<FixedArray> result_elements(
FixedArray::cast(result_array->elements()));
if (result_elements->length() < 16) {
@@ -1423,7 +1423,6 @@ MUST_USE_RESULT MaybeHandle<String> RegExpReplace(Isolate* isolate,
}
UNREACHABLE();
- return MaybeHandle<String>();
}
} // namespace
@@ -1437,7 +1436,7 @@ RUNTIME_FUNCTION(Runtime_RegExpExecMultiple) {
CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
CONVERT_ARG_HANDLE_CHECKED(RegExpMatchInfo, last_match_info, 2);
CONVERT_ARG_HANDLE_CHECKED(JSArray, result_array, 3);
- CHECK(result_array->HasFastObjectElements());
+ CHECK(result_array->HasObjectElements());
subject = String::Flatten(subject);
CHECK(regexp->GetFlags() & JSRegExp::kGlobal);
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index 76e7c2b186..38545139df 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -139,7 +139,7 @@ Object* DeclareGlobals(Isolate* isolate, Handle<FixedArray> declarations,
int length = declarations->length();
FOR_WITH_HANDLE_SCOPE(isolate, int, i = 0, i, i < length, i += 4, {
Handle<String> name(String::cast(declarations->get(i)), isolate);
- FeedbackSlot slot(Smi::cast(declarations->get(i + 1))->value());
+ FeedbackSlot slot(Smi::ToInt(declarations->get(i + 1)));
Handle<Object> possibly_literal_slot(declarations->get(i + 2), isolate);
Handle<Object> initial_value(declarations->get(i + 3), isolate);
@@ -153,7 +153,7 @@ Object* DeclareGlobals(Isolate* isolate, Handle<FixedArray> declarations,
// Copy the function and update its context. Use it as value.
Handle<SharedFunctionInfo> shared =
Handle<SharedFunctionInfo>::cast(initial_value);
- FeedbackSlot literals_slot(Smi::cast(*possibly_literal_slot)->value());
+ FeedbackSlot literals_slot(Smi::ToInt(*possibly_literal_slot));
Handle<Cell> literals(Cell::cast(feedback_vector->Get(literals_slot)),
isolate);
Handle<JSFunction> function =
@@ -211,18 +211,6 @@ RUNTIME_FUNCTION(Runtime_DeclareGlobalsForInterpreter) {
return DeclareGlobals(isolate, declarations, flags, feedback_vector);
}
-RUNTIME_FUNCTION(Runtime_InitializeVarGlobal) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
-
- Handle<JSGlobalObject> global(isolate->global_object());
- RETURN_RESULT_OR_FAILURE(
- isolate, Object::SetProperty(global, name, value, language_mode));
-}
-
namespace {
Object* DeclareEvalHelper(Isolate* isolate, Handle<String> name,
@@ -248,7 +236,9 @@ Object* DeclareEvalHelper(Isolate* isolate, Handle<String> name,
VariableMode mode;
// Check for a conflict with a lexically scoped variable
- context_arg->Lookup(name, LEXICAL_TEST, &index, &attributes, &init_flag,
+ const ContextLookupFlags lookup_flags = static_cast<ContextLookupFlags>(
+ FOLLOW_CONTEXT_CHAIN | STOP_AT_DECLARATION_SCOPE | SKIP_WITH_CONTEXT);
+ context_arg->Lookup(name, lookup_flags, &index, &attributes, &init_flag,
&mode);
if (attributes != ABSENT && IsLexicalVariableMode(mode)) {
// ES#sec-evaldeclarationinstantiation 5.a.i.1:
@@ -262,6 +252,7 @@ Object* DeclareEvalHelper(Isolate* isolate, Handle<String> name,
Handle<Object> holder = context->Lookup(name, DONT_FOLLOW_CHAINS, &index,
&attributes, &init_flag, &mode);
+ DCHECK(holder.is_null() || !holder->IsModule());
DCHECK(!isolate->has_pending_exception());
Handle<JSObject> object;
@@ -572,9 +563,9 @@ RUNTIME_FUNCTION(Runtime_NewRestParameter) {
std::unique_ptr<Handle<Object>[]> arguments =
GetCallerArguments(isolate, &argument_count);
int num_elements = std::max(0, argument_count - start_index);
- Handle<JSObject> result =
- isolate->factory()->NewJSArray(FAST_ELEMENTS, num_elements, num_elements,
- DONT_INITIALIZE_ARRAY_ELEMENTS);
+ Handle<JSObject> result = isolate->factory()->NewJSArray(
+ PACKED_ELEMENTS, num_elements, num_elements,
+ DONT_INITIALIZE_ARRAY_ELEMENTS);
{
DisallowHeapAllocation no_gc;
FixedArray* elements = FixedArray::cast(result->elements());
@@ -823,8 +814,9 @@ RUNTIME_FUNCTION(Runtime_DeleteLookupSlot) {
return isolate->heap()->true_value();
}
- // If the slot was found in a context, it should be DONT_DELETE.
- if (holder->IsContext()) {
+ // If the slot was found in a context or in module imports and exports it
+ // should be DONT_DELETE.
+ if (holder->IsContext() || holder->IsModule()) {
return isolate->heap()->false_value();
}
@@ -853,6 +845,9 @@ MaybeHandle<Object> LoadLookupSlot(Handle<String> name,
name, FOLLOW_CHAINS, &index, &attributes, &flag, &mode);
if (isolate->has_pending_exception()) return MaybeHandle<Object>();
+ if (!holder.is_null() && holder->IsModule()) {
+ return Module::LoadVariable(Handle<Module>::cast(holder), index);
+ }
if (index != Context::kNotFound) {
DCHECK(holder->IsContext());
// If the "property" we were looking for is a local variable, the
@@ -936,8 +931,9 @@ RUNTIME_FUNCTION_RETURN_PAIR(Runtime_LoadLookupSlotForCall) {
namespace {
-MaybeHandle<Object> StoreLookupSlot(Handle<String> name, Handle<Object> value,
- LanguageMode language_mode) {
+MaybeHandle<Object> StoreLookupSlot(
+ Handle<String> name, Handle<Object> value, LanguageMode language_mode,
+ ContextLookupFlags context_lookup_flags = FOLLOW_CHAINS) {
Isolate* const isolate = name->GetIsolate();
Handle<Context> context(isolate->context(), isolate);
@@ -945,13 +941,22 @@ MaybeHandle<Object> StoreLookupSlot(Handle<String> name, Handle<Object> value,
PropertyAttributes attributes;
InitializationFlag flag;
VariableMode mode;
- Handle<Object> holder =
- context->Lookup(name, FOLLOW_CHAINS, &index, &attributes, &flag, &mode);
+ Handle<Object> holder = context->Lookup(name, context_lookup_flags, &index,
+ &attributes, &flag, &mode);
if (holder.is_null()) {
// In case of JSProxy, an exception might have been thrown.
if (isolate->has_pending_exception()) return MaybeHandle<Object>();
+ } else if (holder->IsModule()) {
+ if ((attributes & READ_ONLY) == 0) {
+ Module::StoreVariable(Handle<Module>::cast(holder), index, value);
+ } else if (is_strict(language_mode)) {
+ // Setting read only property in strict mode.
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kStrictCannotAssign, name),
+ Object);
+ }
+ return value;
}
-
// The property was found in a context slot.
if (index != Context::kNotFound) {
if (flag == kNeedsInitialization &&
@@ -1004,6 +1009,19 @@ RUNTIME_FUNCTION(Runtime_StoreLookupSlot_Sloppy) {
RETURN_RESULT_OR_FAILURE(isolate, StoreLookupSlot(name, value, SLOPPY));
}
+// Store into a dynamic context for sloppy-mode block-scoped function hoisting
+// which leaks out of an eval. In particular, with-scopes are be skipped to
+// reach the appropriate var-like declaration.
+RUNTIME_FUNCTION(Runtime_StoreLookupSlot_SloppyHoisting) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+ const ContextLookupFlags lookup_flags = static_cast<ContextLookupFlags>(
+ FOLLOW_CONTEXT_CHAIN | STOP_AT_DECLARATION_SCOPE | SKIP_WITH_CONTEXT);
+ RETURN_RESULT_OR_FAILURE(isolate,
+ StoreLookupSlot(name, value, SLOPPY, lookup_flags));
+}
RUNTIME_FUNCTION(Runtime_StoreLookupSlot_Strict) {
HandleScope scope(isolate);
diff --git a/deps/v8/src/runtime/runtime-strings.cc b/deps/v8/src/runtime/runtime-strings.cc
index 99fbf2d475..adc388196b 100644
--- a/deps/v8/src/runtime/runtime-strings.cc
+++ b/deps/v8/src/runtime/runtime-strings.cc
@@ -108,7 +108,6 @@ MaybeHandle<String> StringReplaceOneCharWithString(
}
}
-
RUNTIME_FUNCTION(Runtime_StringReplaceOneCharWithString) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
@@ -197,7 +196,6 @@ RUNTIME_FUNCTION(Runtime_SubString) {
return *isolate->factory()->NewSubString(string, start, end);
}
-
RUNTIME_FUNCTION(Runtime_StringAdd) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -208,6 +206,22 @@ RUNTIME_FUNCTION(Runtime_StringAdd) {
isolate->factory()->NewConsString(str1, str2));
}
+RUNTIME_FUNCTION(Runtime_StringConcat) {
+ HandleScope scope(isolate);
+ DCHECK_LE(2, args.length());
+ int const argc = args.length();
+ ScopedVector<Handle<Object>> argv(argc);
+
+ isolate->counters()->string_add_runtime()->Increment();
+ IncrementalStringBuilder builder(isolate);
+ for (int i = 0; i < argc; ++i) {
+ Handle<String> str = Handle<String>::cast(args.at(i));
+ if (str->length() != 0) {
+ builder.AppendString(str);
+ }
+ }
+ RETURN_RESULT_OR_FAILURE(isolate, builder.Finish());
+}
RUNTIME_FUNCTION(Runtime_InternalizeString) {
HandleScope handles(isolate);
@@ -216,7 +230,6 @@ RUNTIME_FUNCTION(Runtime_InternalizeString) {
return *isolate->factory()->InternalizeString(string);
}
-
RUNTIME_FUNCTION(Runtime_StringCharCodeAtRT) {
HandleScope handle_scope(isolate);
DCHECK_EQ(2, args.length());
@@ -236,7 +249,6 @@ RUNTIME_FUNCTION(Runtime_StringCharCodeAtRT) {
return Smi::FromInt(subject->Get(i));
}
-
RUNTIME_FUNCTION(Runtime_StringCompare) {
HandleScope handle_scope(isolate);
DCHECK_EQ(2, args.length());
@@ -254,10 +266,8 @@ RUNTIME_FUNCTION(Runtime_StringCompare) {
break;
}
UNREACHABLE();
- return Smi::kZero;
}
-
RUNTIME_FUNCTION(Runtime_StringBuilderConcat) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
@@ -280,7 +290,7 @@ RUNTIME_FUNCTION(Runtime_StringBuilderConcat) {
JSObject::EnsureCanContainHeapObjectElements(array);
int special_length = special->length();
- if (!array->HasFastObjectElements()) {
+ if (!array->HasObjectElements()) {
return isolate->Throw(isolate->heap()->illegal_argument_string());
}
@@ -330,7 +340,6 @@ RUNTIME_FUNCTION(Runtime_StringBuilderConcat) {
}
}
-
RUNTIME_FUNCTION(Runtime_StringBuilderJoin) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
@@ -340,7 +349,7 @@ RUNTIME_FUNCTION(Runtime_StringBuilderJoin) {
THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewInvalidStringLengthError());
}
CONVERT_ARG_HANDLE_CHECKED(String, separator, 2);
- CHECK(array->HasFastObjectElements());
+ CHECK(array->HasObjectElements());
CHECK(array_length >= 0);
Handle<FixedArray> fixed_array(FixedArray::cast(array->elements()));
@@ -471,7 +480,6 @@ static void JoinSparseArrayWithSeparator(FixedArray* elements,
DCHECK(cursor <= buffer.length());
}
-
RUNTIME_FUNCTION(Runtime_SparseJoinWithSeparator) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
@@ -480,7 +488,7 @@ RUNTIME_FUNCTION(Runtime_SparseJoinWithSeparator) {
CONVERT_ARG_HANDLE_CHECKED(String, separator, 2);
// elements_array is fast-mode JSarray of alternating positions
// (increasing order) and strings.
- CHECK(elements_array->HasFastSmiOrObjectElements());
+ CHECK(elements_array->HasSmiOrObjectElements());
// array_length is length of original array (used to add separators);
// separator is string to put between elements. Assumed to be non-empty.
CHECK(array_length > 0);
@@ -556,7 +564,6 @@ RUNTIME_FUNCTION(Runtime_SparseJoinWithSeparator) {
}
}
-
// Copies Latin1 characters to the given fixed array looking up
// one-char strings in the cache. Gives up on the first char that is
// not in the cache and fills the remainder with smi zeros. Returns
@@ -587,7 +594,6 @@ static int CopyCachedOneByteCharsToArray(Heap* heap, const uint8_t* chars,
return i;
}
-
// Converts a String to JSArray.
// For example, "foo" => ["f", "o", "o"].
RUNTIME_FUNCTION(Runtime_StringToArray) {
@@ -635,7 +641,6 @@ RUNTIME_FUNCTION(Runtime_StringToArray) {
return *isolate->factory()->NewJSArrayWithElements(elements);
}
-
RUNTIME_FUNCTION(Runtime_StringLessThan) {
HandleScope handle_scope(isolate);
DCHECK_EQ(2, args.length());
@@ -651,7 +656,6 @@ RUNTIME_FUNCTION(Runtime_StringLessThan) {
break;
}
UNREACHABLE();
- return Smi::kZero;
}
RUNTIME_FUNCTION(Runtime_StringLessThanOrEqual) {
@@ -669,7 +673,6 @@ RUNTIME_FUNCTION(Runtime_StringLessThanOrEqual) {
break;
}
UNREACHABLE();
- return Smi::kZero;
}
RUNTIME_FUNCTION(Runtime_StringGreaterThan) {
@@ -687,7 +690,6 @@ RUNTIME_FUNCTION(Runtime_StringGreaterThan) {
break;
}
UNREACHABLE();
- return Smi::kZero;
}
RUNTIME_FUNCTION(Runtime_StringGreaterThanOrEqual) {
@@ -705,7 +707,6 @@ RUNTIME_FUNCTION(Runtime_StringGreaterThanOrEqual) {
break;
}
UNREACHABLE();
- return Smi::kZero;
}
RUNTIME_FUNCTION(Runtime_StringEqual) {
@@ -731,7 +732,6 @@ RUNTIME_FUNCTION(Runtime_FlattenString) {
return *String::Flatten(str);
}
-
RUNTIME_FUNCTION(Runtime_StringCharFromCode) {
HandleScope handlescope(isolate);
DCHECK_EQ(1, args.length());
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index 6e1d09f6ad..10deb67216 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -71,61 +71,18 @@ void ThrowRangeException(v8::Isolate* isolate, const char* message) {
isolate->ThrowException(NewRangeException(isolate, message));
}
-void RejectPromiseWithRangeError(
- const v8::FunctionCallbackInfo<v8::Value>& args, const char* message) {
- v8::Isolate* isolate = args.GetIsolate();
- v8::HandleScope scope(isolate);
-
- v8::Local<v8::Context> context = isolate->GetCurrentContext();
- v8::Local<v8::Promise::Resolver> resolver;
- if (!v8::Promise::Resolver::New(context).ToLocal(&resolver)) return;
- v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
- return_value.Set(resolver->GetPromise());
-
- auto maybe = resolver->Reject(context, NewRangeException(isolate, message));
- CHECK(!maybe.IsNothing());
- return;
-}
-
bool WasmModuleOverride(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (IsWasmCompileAllowed(args.GetIsolate(), args[0], false)) return false;
ThrowRangeException(args.GetIsolate(), "Sync compile not allowed");
return true;
}
-bool WasmCompileOverride(const v8::FunctionCallbackInfo<v8::Value>& args) {
- if (IsWasmCompileAllowed(args.GetIsolate(), args[0], true)) return false;
- RejectPromiseWithRangeError(args, "Async compile not allowed");
- return true;
-}
-
bool WasmInstanceOverride(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (IsWasmInstantiateAllowed(args.GetIsolate(), args[0], false)) return false;
ThrowRangeException(args.GetIsolate(), "Sync instantiate not allowed");
return true;
}
-bool WasmInstantiateOverride(const v8::FunctionCallbackInfo<v8::Value>& args) {
- if (IsWasmInstantiateAllowed(args.GetIsolate(), args[0], true)) return false;
- RejectPromiseWithRangeError(args, "Async instantiate not allowed");
- return true;
-}
-
-bool GetWasmFromArray(const v8::FunctionCallbackInfo<v8::Value>& args) {
- CHECK(args.Length() == 1);
- v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
- v8::Local<v8::Value> module =
- v8::Local<v8::Object>::Cast(args[0])->Get(context, 0).ToLocalChecked();
-
- v8::Local<v8::Promise::Resolver> resolver =
- v8::Promise::Resolver::New(context).ToLocalChecked();
- args.GetReturnValue().Set(resolver->GetPromise());
- USE(resolver->Resolve(context, module));
- return true;
-}
-
-bool NoExtension(const v8::FunctionCallbackInfo<v8::Value>&) { return false; }
-
} // namespace
namespace v8 {
@@ -165,14 +122,13 @@ RUNTIME_FUNCTION(Runtime_DeoptimizeFunction) {
return isolate->heap()->undefined_value();
}
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
- function->shared()->set_marked_for_tier_up(false);
// If the function is not optimized, just return.
if (!function->IsOptimized()) return isolate->heap()->undefined_value();
- // TODO(turbofan): Deoptimization is not supported yet.
+ // TODO(turbofan): Deoptimization from AstGraphBuilder is not supported.
if (function->code()->is_turbofanned() &&
- function->shared()->asm_function()) {
+ !function->shared()->HasBytecodeArray()) {
return isolate->heap()->undefined_value();
}
@@ -196,9 +152,9 @@ RUNTIME_FUNCTION(Runtime_DeoptimizeNow) {
// If the function is not optimized, just return.
if (!function->IsOptimized()) return isolate->heap()->undefined_value();
- // TODO(turbofan): Deoptimization is not supported yet.
+ // TODO(turbofan): Deoptimization from AstGraphBuilder is not supported.
if (function->code()->is_turbofanned() &&
- function->shared()->asm_function()) {
+ !function->shared()->HasBytecodeArray()) {
return isolate->heap()->undefined_value();
}
@@ -277,22 +233,41 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
}
// If the function is already optimized, just return.
- if (function->IsOptimized()) return isolate->heap()->undefined_value();
+ if (function->IsOptimized()) {
+ return isolate->heap()->undefined_value();
+ }
- function->MarkForOptimization();
- if (FLAG_trace_opt) {
- PrintF("[manually marking ");
- function->ShortPrint();
- PrintF(" for optimization]\n");
+ // If the function has optimized code, ensure that we check for it and return.
+ if (function->HasOptimizedCode()) {
+ if (!function->IsInterpreted()) {
+ // For non I+TF path, install a shim which checks the optimization marker.
+ function->ReplaceCode(
+ isolate->builtins()->builtin(Builtins::kCheckOptimizationMarker));
+ }
+ DCHECK(function->ChecksOptimizationMarker());
+ return isolate->heap()->undefined_value();
}
+ ConcurrencyMode concurrency_mode = ConcurrencyMode::kNotConcurrent;
if (args.length() == 2) {
CONVERT_ARG_HANDLE_CHECKED(String, type, 1);
if (type->IsOneByteEqualTo(STATIC_CHAR_VECTOR("concurrent")) &&
isolate->concurrent_recompilation_enabled()) {
- function->AttemptConcurrentOptimization();
+ concurrency_mode = ConcurrencyMode::kConcurrent;
}
}
+ if (FLAG_trace_opt) {
+ PrintF("[manually marking ");
+ function->ShortPrint();
+ PrintF(" for %s optimization]\n",
+ concurrency_mode == ConcurrencyMode::kConcurrent ? "concurrent"
+ : "non-concurrent");
+ }
+
+ // TODO(mvstanton): pass pretenure flag to EnsureLiterals.
+ JSFunction::EnsureLiterals(function);
+
+ function->MarkForOptimization(concurrency_mode);
return isolate->heap()->undefined_value();
}
@@ -315,6 +290,17 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
// If the function is already optimized, just return.
if (function->IsOptimized()) return isolate->heap()->undefined_value();
+ // Ensure that the function is marked for non-concurrent optimization, so that
+ // subsequent runs don't also optimize.
+ if (!function->HasOptimizedCode()) {
+ if (FLAG_trace_osr) {
+ PrintF("[OSR - OptimizeOsr marking ");
+ function->ShortPrint();
+ PrintF(" for non-concurrent optimization]\n");
+ }
+ function->MarkForOptimization(ConcurrencyMode::kNotConcurrent);
+ }
+
// Make the profiler arm all back edges in unoptimized code.
if (it.frame()->type() == StackFrame::JAVA_SCRIPT ||
it.frame()->type() == StackFrame::INTERPRETED) {
@@ -473,16 +459,6 @@ RUNTIME_FUNCTION(Runtime_ClearFunctionFeedback) {
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(Runtime_SetWasmCompileFromPromiseOverload) {
- isolate->set_wasm_compile_callback(GetWasmFromArray);
- return isolate->heap()->undefined_value();
-}
-
-RUNTIME_FUNCTION(Runtime_ResetWasmOverloads) {
- isolate->set_wasm_compile_callback(NoExtension);
- return isolate->heap()->undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_CheckWasmWrapperElision) {
// This only supports the case where the function being exported
// calls an intermediate function, and the intermediate function
@@ -492,9 +468,9 @@ RUNTIME_FUNCTION(Runtime_CheckWasmWrapperElision) {
// It takes two parameters, the first one is the JSFunction,
// The second one is the type
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- // If type is 0, it means that it is supposed to be a direct call into a WASM
- // function
- // If type is 1, it means that it is supposed to have wrappers
+ // If type is 0, it means that it is supposed to be a direct call into a wasm
+ // function.
+ // If type is 1, it means that it is supposed to have wrappers.
CONVERT_ARG_HANDLE_CHECKED(Smi, type, 1);
Handle<Code> export_code = handle(function->code());
CHECK(export_code->kind() == Code::JS_TO_WASM_FUNCTION);
@@ -525,8 +501,8 @@ RUNTIME_FUNCTION(Runtime_CheckWasmWrapperElision) {
}
}
CHECK(count == 1);
- // check the type of the imported exported function, it should be also a WASM
- // function in our case
+ // Check the type of the imported exported function, it should be also a wasm
+ // function in our case.
Handle<Code> imported_fct;
CHECK(type->value() == 0 || type->value() == 1);
@@ -556,7 +532,6 @@ RUNTIME_FUNCTION(Runtime_SetWasmCompileControls) {
ctrl.AllowAnySizeForAsync = allow_async;
ctrl.MaxWasmBufferSize = static_cast<uint32_t>(block_size->value());
v8_isolate->SetWasmModuleCallback(WasmModuleOverride);
- v8_isolate->SetWasmCompileCallback(WasmCompileOverride);
return isolate->heap()->undefined_value();
}
@@ -565,7 +540,6 @@ RUNTIME_FUNCTION(Runtime_SetWasmInstantiateControls) {
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
CHECK(args.length() == 0);
v8_isolate->SetWasmInstanceCallback(WasmInstanceOverride);
- v8_isolate->SetWasmInstantiateCallback(WasmInstantiateOverride);
return isolate->heap()->undefined_value();
}
@@ -687,7 +661,6 @@ RUNTIME_FUNCTION(Runtime_Abort) {
isolate->PrintStack(stderr);
base::OS::Abort();
UNREACHABLE();
- return NULL;
}
@@ -699,7 +672,6 @@ RUNTIME_FUNCTION(Runtime_AbortJS) {
isolate->PrintStack(stderr);
base::OS::Abort();
UNREACHABLE();
- return NULL;
}
@@ -766,14 +738,6 @@ RUNTIME_FUNCTION(Runtime_TraceExit) {
return obj; // return TOS
}
-RUNTIME_FUNCTION(Runtime_TraceTailCall) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(0, args.length());
- PrintIndentation(isolate);
- PrintF("} -> tail call ->\n");
- return isolate->heap()->undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_GetExceptionDetails) {
HandleScope shs(isolate);
DCHECK_EQ(1, args.length());
@@ -832,7 +796,8 @@ RUNTIME_FUNCTION(Runtime_IsAsmWasmCode) {
}
namespace {
-bool DisallowCodegenFromStringsCallback(v8::Local<v8::Context> context) {
+bool DisallowCodegenFromStringsCallback(v8::Local<v8::Context> context,
+ v8::Local<v8::String> source) {
return false;
}
}
@@ -860,11 +825,11 @@ RUNTIME_FUNCTION(Runtime_IsWasmCode) {
return isolate->heap()->ToBoolean(obj->Has##Name()); \
}
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastSmiElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastObjectElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastSmiOrObjectElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastDoubleElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastHoleyElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(SmiElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ObjectElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(SmiOrObjectElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(DoubleElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(HoleyElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(DictionaryElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(SloppyArgumentsElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FixedTypedArrayElements)
@@ -891,16 +856,12 @@ RUNTIME_FUNCTION(Runtime_SpeciesProtector) {
return isolate->heap()->ToBoolean(isolate->IsArraySpeciesLookupChainIntact());
}
-#define CONVERT_ARG_HANDLE_CHECKED_2(Type, name, index) \
- CHECK(Type::Is##Type(args[index])); \
- Handle<Type> name = args.at<Type>(index);
-
// Take a compiled wasm module, serialize it and copy the buffer into an array
// buffer, which is then returned.
RUNTIME_FUNCTION(Runtime_SerializeWasmModule) {
HandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED_2(WasmModuleObject, module_obj, 0);
+ CONVERT_ARG_HANDLE_CHECKED(WasmModuleObject, module_obj, 0);
Handle<WasmCompiledModule> orig(module_obj->compiled_module());
std::unique_ptr<ScriptData> data =
@@ -952,7 +913,7 @@ RUNTIME_FUNCTION(Runtime_DeserializeWasmModule) {
RUNTIME_FUNCTION(Runtime_ValidateWasmInstancesChain) {
HandleScope shs(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED_2(WasmModuleObject, module_obj, 0);
+ CONVERT_ARG_HANDLE_CHECKED(WasmModuleObject, module_obj, 0);
CONVERT_ARG_HANDLE_CHECKED(Smi, instance_count, 1);
wasm::testing::ValidateInstancesChain(isolate, module_obj,
instance_count->value());
@@ -962,7 +923,7 @@ RUNTIME_FUNCTION(Runtime_ValidateWasmInstancesChain) {
RUNTIME_FUNCTION(Runtime_ValidateWasmModuleState) {
HandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED_2(WasmModuleObject, module_obj, 0);
+ CONVERT_ARG_HANDLE_CHECKED(WasmModuleObject, module_obj, 0);
wasm::testing::ValidateModuleState(isolate, module_obj);
return isolate->heap()->ToBoolean(true);
}
@@ -970,7 +931,7 @@ RUNTIME_FUNCTION(Runtime_ValidateWasmModuleState) {
RUNTIME_FUNCTION(Runtime_ValidateWasmOrphanedInstance) {
HandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED_2(WasmInstanceObject, instance, 0);
+ CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
wasm::testing::ValidateOrphanedInstance(isolate, instance);
return isolate->heap()->ToBoolean(true);
}
@@ -995,10 +956,7 @@ RUNTIME_FUNCTION(Runtime_HeapObjectVerify) {
RUNTIME_FUNCTION(Runtime_WasmNumInterpretedCalls) {
DCHECK_EQ(1, args.length());
HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, instance_obj, 0);
- CHECK(WasmInstanceObject::IsWasmInstanceObject(*instance_obj));
- Handle<WasmInstanceObject> instance =
- Handle<WasmInstanceObject>::cast(instance_obj);
+ CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
if (!instance->has_debug_info()) return 0;
uint64_t num = instance->debug_info()->NumInterpretedCalls();
return *isolate->factory()->NewNumberFromSize(static_cast<size_t>(num));
@@ -1007,11 +965,8 @@ RUNTIME_FUNCTION(Runtime_WasmNumInterpretedCalls) {
RUNTIME_FUNCTION(Runtime_RedirectToWasmInterpreter) {
DCHECK_EQ(2, args.length());
HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, instance_obj, 0);
+ CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
CONVERT_SMI_ARG_CHECKED(function_index, 1);
- CHECK(WasmInstanceObject::IsWasmInstanceObject(*instance_obj));
- Handle<WasmInstanceObject> instance =
- Handle<WasmInstanceObject>::cast(instance_obj);
Handle<WasmDebugInfo> debug_info =
WasmInstanceObject::GetOrCreateDebugInfo(instance);
WasmDebugInfo::RedirectToInterpreter(debug_info,
diff --git a/deps/v8/src/runtime/runtime-typedarray.cc b/deps/v8/src/runtime/runtime-typedarray.cc
index aa87c921eb..54b9050b6c 100644
--- a/deps/v8/src/runtime/runtime-typedarray.cc
+++ b/deps/v8/src/runtime/runtime-typedarray.cc
@@ -33,7 +33,9 @@ RUNTIME_FUNCTION(Runtime_ArrayBufferNeuter) {
isolate, NewTypeError(MessageTemplate::kNotTypedArray));
}
Handle<JSArrayBuffer> array_buffer = Handle<JSArrayBuffer>::cast(argument);
-
+ if (!array_buffer->is_neuterable()) {
+ return isolate->heap()->undefined_value();
+ }
if (array_buffer->backing_store() == NULL) {
CHECK(Smi::kZero == array_buffer->byte_length());
return isolate->heap()->undefined_value();
diff --git a/deps/v8/src/runtime/runtime-wasm.cc b/deps/v8/src/runtime/runtime-wasm.cc
index bb5360abe9..a9f112d975 100644
--- a/deps/v8/src/runtime/runtime-wasm.cc
+++ b/deps/v8/src/runtime/runtime-wasm.cc
@@ -180,12 +180,9 @@ RUNTIME_FUNCTION(Runtime_ClearThreadInWasm) {
RUNTIME_FUNCTION(Runtime_WasmRunInterpreter) {
DCHECK_EQ(3, args.length());
HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, instance_obj, 0);
+ CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
CONVERT_NUMBER_CHECKED(int32_t, func_index, Int32, args[1]);
CONVERT_ARG_HANDLE_CHECKED(Object, arg_buffer_obj, 2);
- CHECK(WasmInstanceObject::IsWasmInstanceObject(*instance_obj));
- Handle<WasmInstanceObject> instance =
- Handle<WasmInstanceObject>::cast(instance_obj);
// The arg buffer is the raw pointer to the caller's stack. It looks like a
// Smi (lowest bit not set, as checked by IsSmi), but is no valid Smi. We just
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index 386b1a8108..fedaa098f8 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -36,27 +36,25 @@ namespace internal {
// A variable number of arguments is specified by a -1, additional restrictions
// are specified by inline comments
-#define FOR_EACH_INTRINSIC_ARRAY(F) \
- F(SpecialArrayFunctions, 0, 1) \
- F(TransitionElementsKind, 2, 1) \
- F(RemoveArrayHoles, 2, 1) \
- F(MoveArrayContents, 2, 1) \
- F(EstimateNumberOfElements, 1, 1) \
- F(GetArrayKeys, 2, 1) \
- F(NewArray, -1 /* >= 3 */, 1) \
- F(FunctionBind, -1, 1) \
- F(NormalizeElements, 1, 1) \
- F(GrowArrayElements, 2, 1) \
- F(HasComplexElements, 1, 1) \
- F(IsArray, 1, 1) \
- F(ArrayIsArray, 1, 1) \
- F(FixedArrayGet, 2, 1) \
- F(FixedArraySet, 3, 1) \
- F(ArraySpeciesConstructor, 1, 1) \
- F(ArrayIncludes_Slow, 3, 1) \
- F(ArrayIndexOf, 3, 1) \
- F(SpreadIterablePrepare, 1, 1) \
- F(SpreadIterableFixed, 1, 1)
+#define FOR_EACH_INTRINSIC_ARRAY(F) \
+ F(TransitionElementsKind, 2, 1) \
+ F(RemoveArrayHoles, 2, 1) \
+ F(MoveArrayContents, 2, 1) \
+ F(EstimateNumberOfElements, 1, 1) \
+ F(GetArrayKeys, 2, 1) \
+ F(NewArray, -1 /* >= 3 */, 1) \
+ F(FunctionBind, -1, 1) \
+ F(NormalizeElements, 1, 1) \
+ F(GrowArrayElements, 2, 1) \
+ F(HasComplexElements, 1, 1) \
+ F(IsArray, 1, 1) \
+ F(ArrayIsArray, 1, 1) \
+ F(FixedArrayGet, 2, 1) \
+ F(FixedArraySet, 3, 1) \
+ F(ArraySpeciesConstructor, 1, 1) \
+ F(ArrayIncludes_Slow, 3, 1) \
+ F(ArrayIndexOf, 3, 1) \
+ F(SpreadIterablePrepare, 1, 1)
#define FOR_EACH_INTRINSIC_ATOMICS(F) \
F(ThrowNotIntegerSharedTypedArrayError, 1, 1) \
@@ -99,30 +97,18 @@ namespace internal {
F(SetInitialize, 1, 1) \
F(SetGrow, 1, 1) \
F(SetShrink, 1, 1) \
- F(SetClear, 1, 1) \
- F(SetIteratorInitialize, 3, 1) \
F(SetIteratorClone, 1, 1) \
- F(SetIteratorNext, 2, 1) \
- F(SetIteratorDetails, 1, 1) \
F(MapInitialize, 1, 1) \
F(MapShrink, 1, 1) \
- F(MapClear, 1, 1) \
F(MapGrow, 1, 1) \
- F(MapIteratorInitialize, 3, 1) \
F(MapIteratorClone, 1, 1) \
- F(MapIteratorDetails, 1, 1) \
F(GetWeakMapEntries, 2, 1) \
- F(MapIteratorNext, 2, 1) \
F(WeakCollectionInitialize, 1, 1) \
- F(WeakCollectionGet, 3, 1) \
- F(WeakCollectionHas, 3, 1) \
F(WeakCollectionDelete, 3, 1) \
F(WeakCollectionSet, 4, 1) \
F(GetWeakSetValues, 2, 1) \
F(IsJSMap, 1, 1) \
F(IsJSSet, 1, 1) \
- F(IsJSMapIterator, 1, 1) \
- F(IsJSSetIterator, 1, 1) \
F(IsJSWeakMap, 1, 1) \
F(IsJSWeakSet, 1, 1)
@@ -167,9 +153,9 @@ namespace internal {
F(SetScopeVariableValue, 6, 1) \
F(DebugPrintScopes, 0, 1) \
F(SetBreakPointsActive, 1, 1) \
- F(GetBreakLocations, 2, 1) \
+ F(GetBreakLocations, 1, 1) \
F(SetFunctionBreakPoint, 3, 1) \
- F(SetScriptBreakPoint, 4, 1) \
+ F(SetScriptBreakPoint, 3, 1) \
F(ClearBreakPoint, 1, 1) \
F(ChangeBreakOnException, 2, 1) \
F(IsBreakOnException, 1, 1) \
@@ -207,7 +193,9 @@ namespace internal {
F(DebugIsActive, 0, 1) \
F(DebugBreakInOptimizedCode, 0, 1) \
F(DebugCollectCoverage, 0, 1) \
- F(DebugTogglePreciseCoverage, 1, 1)
+ F(DebugTogglePreciseCoverage, 1, 1) \
+ F(DebugToggleBlockCoverage, 1, 1) \
+ F(IncBlockCounter, 2, 1)
#define FOR_EACH_INTRINSIC_ERROR(F) F(ErrorToString, 1, 1)
@@ -224,21 +212,18 @@ namespace internal {
#define FOR_EACH_INTRINSIC_INTERPRETER_TRACE(F)
#endif
-#define FOR_EACH_INTRINSIC_INTERPRETER(F) \
- FOR_EACH_INTRINSIC_INTERPRETER_TRACE(F) \
- F(InterpreterNewClosure, 4, 1) \
+#define FOR_EACH_INTRINSIC_INTERPRETER(F) \
+ FOR_EACH_INTRINSIC_INTERPRETER_TRACE(F) \
+ F(InterpreterNewClosure, 4, 1) \
F(InterpreterAdvanceBytecodeOffset, 2, 1)
#define FOR_EACH_INTRINSIC_FUNCTION(F) \
F(FunctionGetName, 1, 1) \
- F(FunctionSetName, 2, 1) \
- F(FunctionRemovePrototype, 1, 1) \
F(FunctionGetScript, 1, 1) \
F(FunctionGetScriptId, 1, 1) \
F(FunctionGetSourceCode, 1, 1) \
F(FunctionGetScriptSourcePosition, 1, 1) \
F(FunctionGetContextData, 1, 1) \
- F(FunctionSetInstanceClassName, 2, 1) \
F(FunctionSetLength, 2, 1) \
F(FunctionSetPrototype, 2, 1) \
F(FunctionIsAPIFunction, 1, 1) \
@@ -258,7 +243,6 @@ namespace internal {
F(GeneratorGetReceiver, 1, 1) \
F(GeneratorGetContext, 1, 1) \
F(GeneratorGetInputOrDebugPos, 1, 1) \
- F(AsyncGeneratorGetAwaitInputOrDebugPos, 1, 1) \
F(AsyncGeneratorResolve, 3, 1) \
F(AsyncGeneratorReject, 2, 1) \
F(GeneratorGetContinuation, 1, 1) \
@@ -296,61 +280,62 @@ namespace internal {
#define FOR_EACH_INTRINSIC_INTL(F)
#endif
-#define FOR_EACH_INTRINSIC_INTERNAL(F) \
- F(AllocateInNewSpace, 1, 1) \
- F(AllocateInTargetSpace, 2, 1) \
- F(AllocateSeqOneByteString, 1, 1) \
- F(AllocateSeqTwoByteString, 1, 1) \
- F(CheckIsBootstrapping, 0, 1) \
- F(CreateAsyncFromSyncIterator, 1, 1) \
- F(CreateListFromArrayLike, 1, 1) \
- F(GetAndResetRuntimeCallStats, -1 /* <= 2 */, 1) \
- F(ExportFromRuntime, 1, 1) \
- F(IncrementUseCounter, 1, 1) \
- F(InstallToContext, 1, 1) \
- F(Interrupt, 0, 1) \
- F(IS_VAR, 1, 1) \
- F(NewReferenceError, 2, 1) \
- F(NewSyntaxError, 2, 1) \
- F(NewTypeError, 2, 1) \
- F(OrdinaryHasInstance, 2, 1) \
- F(PromoteScheduledException, 0, 1) \
- F(ReThrow, 1, 1) \
- F(RunMicrotasks, 0, 1) \
- F(StackGuard, 0, 1) \
- F(Throw, 1, 1) \
- F(ThrowApplyNonFunction, 1, 1) \
- F(ThrowCannotConvertToPrimitive, 0, 1) \
- F(ThrowCalledNonCallable, 1, 1) \
- F(ThrowCalledOnNullOrUndefined, 1, 1) \
- F(ThrowConstructedNonConstructable, 1, 1) \
- F(ThrowConstructorReturnedNonObject, 0, 1) \
- F(ThrowGeneratorRunning, 0, 1) \
- F(ThrowIllegalInvocation, 0, 1) \
- F(ThrowIncompatibleMethodReceiver, 2, 1) \
- F(ThrowInvalidHint, 1, 1) \
- F(ThrowInvalidStringLength, 0, 1) \
- F(ThrowInvalidTypedArrayAlignment, 2, 1) \
- F(ThrowIteratorResultNotAnObject, 1, 1) \
- F(ThrowSymbolIteratorInvalid, 0, 1) \
- F(ThrowNonCallableInInstanceOfCheck, 0, 1) \
- F(ThrowNonObjectInInstanceOfCheck, 0, 1) \
- F(ThrowNotConstructor, 1, 1) \
- F(ThrowRangeError, -1 /* >= 1 */, 1) \
- F(ThrowReferenceError, 1, 1) \
- F(ThrowStackOverflow, 0, 1) \
- F(ThrowSymbolAsyncIteratorInvalid, 0, 1) \
- F(ThrowTypeError, -1 /* >= 1 */, 1) \
- F(ThrowUndefinedOrNullToObject, 1, 1) \
- F(Typeof, 1, 1) \
- F(UnwindAndFindExceptionHandler, 0, 1) \
+#define FOR_EACH_INTRINSIC_INTERNAL(F) \
+ F(AllocateInNewSpace, 1, 1) \
+ F(AllocateInTargetSpace, 2, 1) \
+ F(AllocateSeqOneByteString, 1, 1) \
+ F(AllocateSeqTwoByteString, 1, 1) \
+ F(CheckIsBootstrapping, 0, 1) \
+ F(CreateAsyncFromSyncIterator, 1, 1) \
+ F(CreateListFromArrayLike, 1, 1) \
+ F(GetAndResetRuntimeCallStats, -1 /* <= 2 */, 1) \
+ F(ExportFromRuntime, 1, 1) \
+ F(IncrementUseCounter, 1, 1) \
+ F(IncrementUseCounterConstructorReturnNonUndefinedPrimitive, 0, 1) \
+ F(InstallToContext, 1, 1) \
+ F(Interrupt, 0, 1) \
+ F(IS_VAR, 1, 1) \
+ F(NewReferenceError, 2, 1) \
+ F(NewSyntaxError, 2, 1) \
+ F(NewTypeError, 2, 1) \
+ F(OrdinaryHasInstance, 2, 1) \
+ F(PromoteScheduledException, 0, 1) \
+ F(ReThrow, 1, 1) \
+ F(RunMicrotasks, 0, 1) \
+ F(StackGuard, 0, 1) \
+ F(Throw, 1, 1) \
+ F(ThrowApplyNonFunction, 1, 1) \
+ F(ThrowCannotConvertToPrimitive, 0, 1) \
+ F(ThrowCalledNonCallable, 1, 1) \
+ F(ThrowCalledOnNullOrUndefined, 1, 1) \
+ F(ThrowConstructedNonConstructable, 1, 1) \
+ F(ThrowConstructorReturnedNonObject, 0, 1) \
+ F(ThrowGeneratorRunning, 0, 1) \
+ F(ThrowIllegalInvocation, 0, 1) \
+ F(ThrowIncompatibleMethodReceiver, 2, 1) \
+ F(ThrowInvalidHint, 1, 1) \
+ F(ThrowInvalidStringLength, 0, 1) \
+ F(ThrowInvalidTypedArrayAlignment, 2, 1) \
+ F(ThrowIteratorResultNotAnObject, 1, 1) \
+ F(ThrowThrowMethodMissing, 0, 1) \
+ F(ThrowSymbolIteratorInvalid, 0, 1) \
+ F(ThrowNonCallableInInstanceOfCheck, 0, 1) \
+ F(ThrowNonObjectInInstanceOfCheck, 0, 1) \
+ F(ThrowNotConstructor, 1, 1) \
+ F(ThrowRangeError, -1 /* >= 1 */, 1) \
+ F(ThrowReferenceError, 1, 1) \
+ F(ThrowStackOverflow, 0, 1) \
+ F(ThrowSymbolAsyncIteratorInvalid, 0, 1) \
+ F(ThrowTypeError, -1 /* >= 1 */, 1) \
+ F(ThrowUndefinedOrNullToObject, 1, 1) \
+ F(Typeof, 1, 1) \
+ F(UnwindAndFindExceptionHandler, 0, 1) \
F(AllowDynamicFunction, 1, 1)
#define FOR_EACH_INTRINSIC_LITERALS(F) \
F(CreateRegExpLiteral, 4, 1) \
F(CreateObjectLiteral, 4, 1) \
- F(CreateArrayLiteral, 4, 1) \
- F(CreateArrayLiteralStubBailout, 3, 1)
+ F(CreateArrayLiteral, 4, 1)
#define FOR_EACH_INTRINSIC_LIVEEDIT(F) \
F(LiveEditFindSharedFunctionInfosForScript, 1, 1) \
@@ -403,7 +388,7 @@ namespace internal {
F(AddElement, 3, 1) \
F(AppendElement, 2, 1) \
F(DeleteProperty, 3, 1) \
- F(ShrinkPropertyDictionary, 2, 1) \
+ F(ShrinkPropertyDictionary, 1, 1) \
F(HasProperty, 2, 1) \
F(GetOwnPropertyKeys, 2, 1) \
F(GetInterceptorInfo, 1, 1) \
@@ -427,6 +412,7 @@ namespace internal {
F(CopyDataPropertiesWithExcludedProperties, -1 /* >= 1 */, 1) \
F(DefineGetterPropertyUnchecked, 4, 1) \
F(DefineSetterPropertyUnchecked, 4, 1) \
+ F(DefineMethodsInternal, 3, 1) \
F(ToObject, 1, 1) \
F(ToPrimitive, 1, 1) \
F(ToPrimitive_Number, 1, 1) \
@@ -440,8 +426,6 @@ namespace internal {
F(Compare, 3, 1) \
F(HasInPrototypeChain, 2, 1) \
F(CreateIterResultObject, 2, 1) \
- F(CreateKeyValueArray, 2, 1) \
- F(IsAccessCheckNeeded, 1, 1) \
F(CreateDataProperty, 3, 1) \
F(IterableToListCanBeElided, 1, 1)
@@ -484,7 +468,6 @@ namespace internal {
#define FOR_EACH_INTRINSIC_PROXY(F) \
F(IsJSProxy, 1, 1) \
- F(JSProxyCall, -1 /* >= 2 */, 1) \
F(JSProxyConstruct, -1 /* >= 3 */, 1) \
F(JSProxyGetTarget, 1, 1) \
F(JSProxyGetHandler, 1, 1) \
@@ -504,30 +487,30 @@ namespace internal {
F(StringReplaceNonGlobalRegExpWithFunction, 3, 1) \
F(StringSplit, 3, 1)
-#define FOR_EACH_INTRINSIC_SCOPES(F) \
- F(ThrowConstAssignError, 0, 1) \
- F(DeclareGlobals, 3, 1) \
- F(DeclareGlobalsForInterpreter, 3, 1) \
- F(InitializeVarGlobal, 3, 1) \
- F(DeclareEvalFunction, 2, 1) \
- F(DeclareEvalVar, 1, 1) \
- F(NewSloppyArguments_Generic, 1, 1) \
- F(NewStrictArguments, 1, 1) \
- F(NewRestParameter, 1, 1) \
- F(NewSloppyArguments, 3, 1) \
- F(NewArgumentsElements, 2, 1) \
- F(NewClosure, 3, 1) \
- F(NewClosure_Tenured, 3, 1) \
- F(NewScriptContext, 2, 1) \
- F(NewFunctionContext, 2, 1) \
- F(PushModuleContext, 3, 1) \
- F(PushWithContext, 3, 1) \
- F(PushCatchContext, 4, 1) \
- F(PushBlockContext, 2, 1) \
- F(DeleteLookupSlot, 1, 1) \
- F(LoadLookupSlot, 1, 1) \
- F(LoadLookupSlotInsideTypeof, 1, 1) \
- F(StoreLookupSlot_Sloppy, 2, 1) \
+#define FOR_EACH_INTRINSIC_SCOPES(F) \
+ F(ThrowConstAssignError, 0, 1) \
+ F(DeclareGlobals, 3, 1) \
+ F(DeclareGlobalsForInterpreter, 3, 1) \
+ F(DeclareEvalFunction, 2, 1) \
+ F(DeclareEvalVar, 1, 1) \
+ F(NewSloppyArguments_Generic, 1, 1) \
+ F(NewStrictArguments, 1, 1) \
+ F(NewRestParameter, 1, 1) \
+ F(NewSloppyArguments, 3, 1) \
+ F(NewArgumentsElements, 2, 1) \
+ F(NewClosure, 3, 1) \
+ F(NewClosure_Tenured, 3, 1) \
+ F(NewScriptContext, 2, 1) \
+ F(NewFunctionContext, 2, 1) \
+ F(PushModuleContext, 3, 1) \
+ F(PushWithContext, 3, 1) \
+ F(PushCatchContext, 4, 1) \
+ F(PushBlockContext, 2, 1) \
+ F(DeleteLookupSlot, 1, 1) \
+ F(LoadLookupSlot, 1, 1) \
+ F(LoadLookupSlotInsideTypeof, 1, 1) \
+ F(StoreLookupSlot_Sloppy, 2, 1) \
+ F(StoreLookupSlot_SloppyHoisting, 2, 1) \
F(StoreLookupSlot_Strict, 2, 1)
#define FOR_EACH_INTRINSIC_STRINGS(F) \
@@ -538,6 +521,7 @@ namespace internal {
F(StringLastIndexOf, 2, 1) \
F(SubString, 3, 1) \
F(StringAdd, 2, 1) \
+ F(StringConcat, -1 /* >= 2 */, 1) \
F(InternalizeString, 1, 1) \
F(StringCharCodeAtRT, 2, 1) \
F(StringCompare, 2, 1) \
@@ -596,14 +580,13 @@ namespace internal {
F(DisassembleFunction, 1, 1) \
F(TraceEnter, 0, 1) \
F(TraceExit, 1, 1) \
- F(TraceTailCall, 0, 1) \
F(HaveSameMap, 2, 1) \
F(InNewSpace, 1, 1) \
- F(HasFastSmiElements, 1, 1) \
- F(HasFastObjectElements, 1, 1) \
- F(HasFastSmiOrObjectElements, 1, 1) \
- F(HasFastDoubleElements, 1, 1) \
- F(HasFastHoleyElements, 1, 1) \
+ F(HasSmiElements, 1, 1) \
+ F(HasObjectElements, 1, 1) \
+ F(HasSmiOrObjectElements, 1, 1) \
+ F(HasDoubleElements, 1, 1) \
+ F(HasHoleyElements, 1, 1) \
F(HasDictionaryElements, 1, 1) \
F(HasSloppyArgumentsElements, 1, 1) \
F(HasFixedTypedArrayElements, 1, 1) \
@@ -628,8 +611,6 @@ namespace internal {
F(ValidateWasmOrphanedInstance, 1, 1) \
F(SetWasmCompileControls, 2, 1) \
F(SetWasmInstantiateControls, 0, 1) \
- F(SetWasmCompileFromPromiseOverload, 0, 1) \
- F(ResetWasmOverloads, 0, 1) \
F(HeapObjectVerify, 1, 1) \
F(WasmNumInterpretedCalls, 1, 1) \
F(RedirectToWasmInterpreter, 2, 1)
@@ -676,8 +657,6 @@ namespace internal {
// Most intrinsics are implemented in the runtime/ directory, but ICs are
// implemented in ic.cc for now.
#define FOR_EACH_INTRINSIC_IC(F) \
- F(BinaryOpIC_Miss, 2, 1) \
- F(BinaryOpIC_MissWithAllocationSite, 3, 1) \
F(CompareIC_Miss, 3, 1) \
F(ElementsTransitionAndStoreIC_Miss, 6, 1) \
F(KeyedLoadIC_Miss, 4, 1) \
@@ -691,7 +670,6 @@ namespace internal {
F(StoreCallbackProperty, 6, 1) \
F(StoreIC_Miss, 5, 1) \
F(StorePropertyWithInterceptor, 5, 1) \
- F(ToBooleanIC_Miss, 1, 1) \
F(Unreachable, 0, 1)
#define FOR_EACH_INTRINSIC_RETURN_OBJECT(F) \
@@ -803,17 +781,22 @@ class Runtime : public AllStatic {
MUST_USE_RESULT static MaybeHandle<JSArray> GetInternalProperties(
Isolate* isolate, Handle<Object>);
+
+ MUST_USE_RESULT static MaybeHandle<Object> ThrowIteratorError(
+ Isolate* isolate, Handle<Object> object);
};
class RuntimeState {
public:
+#ifndef V8_INTL_SUPPORT
unibrow::Mapping<unibrow::ToUppercase, 128>* to_upper_mapping() {
return &to_upper_mapping_;
}
unibrow::Mapping<unibrow::ToLowercase, 128>* to_lower_mapping() {
return &to_lower_mapping_;
}
+#endif
Runtime::Function* redirected_intrinsic_functions() {
return redirected_intrinsic_functions_.get();
@@ -826,8 +809,10 @@ class RuntimeState {
private:
RuntimeState() {}
+#ifndef V8_INTL_SUPPORT
unibrow::Mapping<unibrow::ToUppercase, 128> to_upper_mapping_;
unibrow::Mapping<unibrow::ToLowercase, 128> to_lower_mapping_;
+#endif
std::unique_ptr<Runtime::Function[]> redirected_intrinsic_functions_;
diff --git a/deps/v8/src/s390/assembler-s390-inl.h b/deps/v8/src/s390/assembler-s390-inl.h
index 6024232f9b..1475254921 100644
--- a/deps/v8/src/s390/assembler-s390-inl.h
+++ b/deps/v8/src/s390/assembler-s390-inl.h
@@ -114,7 +114,6 @@ Address RelocInfo::target_address_address() {
Address RelocInfo::constant_pool_entry_address() {
UNREACHABLE();
- return NULL;
}
int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
@@ -345,19 +344,19 @@ void RelocInfo::Visit(Heap* heap) {
// Operand constructors
Operand::Operand(intptr_t immediate, RelocInfo::Mode rmode) {
rm_ = no_reg;
- imm_ = immediate;
+ value_.immediate = immediate;
rmode_ = rmode;
}
Operand::Operand(const ExternalReference& f) {
rm_ = no_reg;
- imm_ = reinterpret_cast<intptr_t>(f.address());
+ value_.immediate = reinterpret_cast<intptr_t>(f.address());
rmode_ = RelocInfo::EXTERNAL_REFERENCE;
}
Operand::Operand(Smi* value) {
rm_ = no_reg;
- imm_ = reinterpret_cast<intptr_t>(value);
+ value_.immediate = reinterpret_cast<intptr_t>(value);
rmode_ = kRelocInfo_NONEPTR;
}
@@ -372,18 +371,14 @@ void Assembler::CheckBuffer() {
}
}
-int32_t Assembler::emit_code_target(Handle<Code> target, RelocInfo::Mode rmode,
- TypeFeedbackId ast_id) {
+int32_t Assembler::emit_code_target(Handle<Code> target,
+ RelocInfo::Mode rmode) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
- if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
- SetRecordedAstId(ast_id);
- RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID);
- } else {
- RecordRelocInfo(rmode);
- }
+ RecordRelocInfo(rmode);
int current = code_targets_.length();
- if (current > 0 && code_targets_.last().is_identical_to(target)) {
+ if (current > 0 && !target.is_null() &&
+ code_targets_.last().is_identical_to(target)) {
// Optimization if we keep jumping to the same code target.
current--;
} else {
diff --git a/deps/v8/src/s390/assembler-s390.cc b/deps/v8/src/s390/assembler-s390.cc
index 35305fc074..31ca0592d3 100644
--- a/deps/v8/src/s390/assembler-s390.cc
+++ b/deps/v8/src/s390/assembler-s390.cc
@@ -47,9 +47,9 @@
#include "src/base/bits.h"
#include "src/base/cpu.h"
-#include "src/s390/assembler-s390-inl.h"
-
+#include "src/code-stubs.h"
#include "src/macro-assembler.h"
+#include "src/s390/assembler-s390-inl.h"
namespace v8 {
namespace internal {
@@ -307,19 +307,20 @@ void RelocInfo::unchecked_update_wasm_size(Isolate* isolate, uint32_t size,
// Implementation of Operand and MemOperand
// See assembler-s390-inl.h for inlined constructors
-Operand::Operand(Handle<Object> handle) {
- AllowDeferredHandleDereference using_raw_address;
+Operand::Operand(Handle<HeapObject> handle) {
+ AllowHandleDereference using_location;
rm_ = no_reg;
- // Verify all Objects referred by code are NOT in new space.
- Object* obj = *handle;
- if (obj->IsHeapObject()) {
- imm_ = reinterpret_cast<intptr_t>(handle.location());
- rmode_ = RelocInfo::EMBEDDED_OBJECT;
- } else {
- // no relocation needed
- imm_ = reinterpret_cast<intptr_t>(obj);
- rmode_ = kRelocInfo_NONEPTR;
- }
+ value_.immediate = reinterpret_cast<intptr_t>(handle.address());
+ rmode_ = RelocInfo::EMBEDDED_OBJECT;
+}
+
+Operand Operand::EmbeddedNumber(double value) {
+ int32_t smi;
+ if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
+ Operand result(0, RelocInfo::EMBEDDED_OBJECT);
+ result.is_heap_object_request_ = true;
+ result.value_.heap_object_request = HeapObjectRequest(value);
+ return result;
}
MemOperand::MemOperand(Register rn, int32_t offset) {
@@ -334,23 +335,46 @@ MemOperand::MemOperand(Register rx, Register rb, int32_t offset) {
offset_ = offset;
}
+void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
+ for (auto& request : heap_object_requests_) {
+ Handle<HeapObject> object;
+ Address pc = buffer_ + request.offset();
+ switch (request.kind()) {
+ case HeapObjectRequest::kHeapNumber:
+ object = isolate->factory()->NewHeapNumber(request.heap_number(),
+ IMMUTABLE, TENURED);
+ set_target_address_at(nullptr, pc, static_cast<Address>(NULL),
+ reinterpret_cast<Address>(object.location()),
+ SKIP_ICACHE_FLUSH);
+ break;
+ case HeapObjectRequest::kCodeStub:
+ request.code_stub()->set_isolate(isolate);
+ SixByteInstr instr =
+ Instruction::InstructionBits(reinterpret_cast<const byte*>(pc));
+ int index = instr & 0xFFFFFFFF;
+ code_targets_[index] = request.code_stub()->GetCode();
+ break;
+ }
+ }
+}
+
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
: AssemblerBase(isolate_data, buffer, buffer_size),
- recorded_ast_id_(TypeFeedbackId::None()),
code_targets_(100) {
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
last_bound_pos_ = 0;
- ClearRecordedAstId();
relocations_.reserve(128);
}
-void Assembler::GetCode(CodeDesc* desc) {
+void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
EmitRelocations();
+ AllocateAndInstallRequestedHeapObjects(isolate);
+
// Set up code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
@@ -362,7 +386,7 @@ void Assembler::GetCode(CodeDesc* desc) {
}
void Assembler::Align(int m) {
- DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
+ DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
while ((pc_offset() & (m - 1)) != 0) {
nop(0);
}
@@ -651,9 +675,9 @@ void Assembler::nop(int type) {
void Assembler::ri_form(Opcode op, Register r1, const Operand& i2) {
DCHECK(is_uint12(op));
- DCHECK(is_uint16(i2.imm_) || is_int16(i2.imm_));
+ DCHECK(is_uint16(i2.immediate()) || is_int16(i2.immediate()));
emit4bytes((op & 0xFF0) * B20 | r1.code() * B20 | (op & 0xF) * B16 |
- (i2.imm_ & 0xFFFF));
+ (i2.immediate() & 0xFFFF));
}
// RI2 format: <insn> M1,I2
@@ -667,9 +691,9 @@ void Assembler::ri_form(Opcode op, Register r1, const Operand& i2) {
void Assembler::ri_form(Opcode op, Condition m1, const Operand& i2) {
DCHECK(is_uint12(op));
DCHECK(is_uint4(m1));
- DCHECK(op == BRC ? is_int16(i2.imm_) : is_uint16(i2.imm_));
+ DCHECK(op == BRC ? is_int16(i2.immediate()) : is_uint16(i2.immediate()));
emit4bytes((op & 0xFF0) * B20 | m1 * B20 | (op & 0xF) * B16 |
- (i2.imm_ & 0xFFFF));
+ (i2.immediate() & 0xFFFF));
}
// RIE-f format: <insn> R1,R2,I3,I4,I5
@@ -681,15 +705,15 @@ void Assembler::rie_f_form(Opcode op, Register r1, Register r2,
const Operand& i3, const Operand& i4,
const Operand& i5) {
DCHECK(is_uint16(op));
- DCHECK(is_uint8(i3.imm_));
- DCHECK(is_uint8(i4.imm_));
- DCHECK(is_uint8(i5.imm_));
+ DCHECK(is_uint8(i3.immediate()));
+ DCHECK(is_uint8(i4.immediate()));
+ DCHECK(is_uint8(i5.immediate()));
uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
(static_cast<uint64_t>(r1.code())) * B36 |
(static_cast<uint64_t>(r2.code())) * B32 |
- (static_cast<uint64_t>(i3.imm_)) * B24 |
- (static_cast<uint64_t>(i4.imm_)) * B16 |
- (static_cast<uint64_t>(i5.imm_)) * B8 |
+ (static_cast<uint64_t>(i3.immediate())) * B24 |
+ (static_cast<uint64_t>(i4.immediate())) * B16 |
+ (static_cast<uint64_t>(i5.immediate())) * B8 |
(static_cast<uint64_t>(op & 0x00FF));
emit6bytes(code);
}
@@ -707,11 +731,11 @@ void Assembler::rie_f_form(Opcode op, Register r1, Register r2,
void Assembler::rie_form(Opcode op, Register r1, Register r3,
const Operand& i2) {
DCHECK(is_uint16(op));
- DCHECK(is_int16(i2.imm_));
+ DCHECK(is_int16(i2.immediate()));
uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
(static_cast<uint64_t>(r1.code())) * B36 |
(static_cast<uint64_t>(r3.code())) * B32 |
- (static_cast<uint64_t>(i2.imm_ & 0xFFFF)) * B16 |
+ (static_cast<uint64_t>(i2.immediate() & 0xFFFF)) * B16 |
(static_cast<uint64_t>(op & 0x00FF));
emit6bytes(code);
}
@@ -768,8 +792,9 @@ void Assembler::rs_form(Opcode op, Register r1, Condition m3, Register b2,
void Assembler::rsi_form(Opcode op, Register r1, Register r3,
const Operand& i2) {
DCHECK(is_uint8(op));
- DCHECK(is_uint16(i2.imm_));
- emit4bytes(op * B24 | r1.code() * B20 | r3.code() * B16 | (i2.imm_ & 0xFFFF));
+ DCHECK(is_uint16(i2.immediate()));
+ emit4bytes(op * B24 | r1.code() * B20 | r3.code() * B16 |
+ (i2.immediate() & 0xFFFF));
}
// RSL format: <insn> R1,R3,D2(B2)
@@ -921,13 +946,13 @@ void Assembler::ris_form(Opcode op, Register r1, Condition m3, Register b4,
Disp d4, const Operand& i2) {
DCHECK(is_uint12(d4));
DCHECK(is_uint16(op));
- DCHECK(is_uint8(i2.imm_));
+ DCHECK(is_uint8(i2.immediate()));
uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
(static_cast<uint64_t>(r1.code())) * B36 |
(static_cast<uint64_t>(m3)) * B32 |
(static_cast<uint64_t>(b4.code())) * B28 |
(static_cast<uint64_t>(d4)) * B16 |
- (static_cast<uint64_t>(i2.imm_)) << 8 |
+ (static_cast<uint64_t>(i2.immediate())) << 8 |
(static_cast<uint64_t>(op & 0x00FF));
emit6bytes(code);
}
@@ -962,7 +987,7 @@ void Assembler::s_form(Opcode op, Register b1, Disp d2) {
}
void Assembler::si_form(Opcode op, const Operand& i2, Register b1, Disp d1) {
- emit4bytes((op & 0x00FF) << 24 | i2.imm_ * B16 | b1.code() * B12 | d1);
+ emit4bytes((op & 0x00FF) << 24 | i2.immediate() * B16 | b1.code() * B12 | d1);
}
// SIY format: <insn> D1(B1),I2
@@ -981,9 +1006,9 @@ void Assembler::si_form(Opcode op, const Operand& i2, Register b1, Disp d1) {
void Assembler::siy_form(Opcode op, const Operand& i2, Register b1, Disp d1) {
DCHECK(is_uint20(d1) || is_int20(d1));
DCHECK(is_uint16(op));
- DCHECK(is_uint8(i2.imm_));
+ DCHECK(is_uint8(i2.immediate()));
uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
- (static_cast<uint64_t>(i2.imm_)) * B32 |
+ (static_cast<uint64_t>(i2.immediate())) * B32 |
(static_cast<uint64_t>(b1.code())) * B28 |
(static_cast<uint64_t>(d1 & 0x0FFF)) * B16 |
(static_cast<uint64_t>(d1 & 0x0FF000)) >> 4 |
@@ -1007,11 +1032,11 @@ void Assembler::siy_form(Opcode op, const Operand& i2, Register b1, Disp d1) {
void Assembler::sil_form(Opcode op, Register b1, Disp d1, const Operand& i2) {
DCHECK(is_uint12(d1));
DCHECK(is_uint16(op));
- DCHECK(is_uint16(i2.imm_));
+ DCHECK(is_uint16(i2.immediate()));
uint64_t code = (static_cast<uint64_t>(op)) * B32 |
(static_cast<uint64_t>(b1.code())) * B28 |
(static_cast<uint64_t>(d1)) * B16 |
- (static_cast<uint64_t>(i2.imm_));
+ (static_cast<uint64_t>(i2.immediate()));
emit6bytes(code);
}
@@ -1125,10 +1150,10 @@ void Assembler::ss_form(Opcode op, Length l1, const Operand& i3, Register b1,
DCHECK(is_uint12(d1));
DCHECK(is_uint8(op));
DCHECK(is_uint4(l1));
- DCHECK(is_uint4(i3.imm_));
+ DCHECK(is_uint4(i3.immediate()));
uint64_t code =
(static_cast<uint64_t>(op)) * B40 | (static_cast<uint64_t>(l1)) * B36 |
- (static_cast<uint64_t>(i3.imm_)) * B32 |
+ (static_cast<uint64_t>(i3.immediate())) * B32 |
(static_cast<uint64_t>(b1.code())) * B28 |
(static_cast<uint64_t>(d1)) * B16 |
(static_cast<uint64_t>(b2.code())) * B12 | (static_cast<uint64_t>(d2));
@@ -1426,7 +1451,7 @@ void Assembler::risbg(Register dst, Register src, const Operand& startBit,
bool zeroBits) {
// High tag the top bit of I4/EndBit to zero out any unselected bits
if (zeroBits)
- rie_f_form(RISBG, dst, src, startBit, Operand(endBit.imm_ | 0x80),
+ rie_f_form(RISBG, dst, src, startBit, Operand(endBit.immediate() | 0x80),
shiftAmt);
else
rie_f_form(RISBG, dst, src, startBit, endBit, shiftAmt);
@@ -1438,7 +1463,7 @@ void Assembler::risbgn(Register dst, Register src, const Operand& startBit,
bool zeroBits) {
// High tag the top bit of I4/EndBit to zero out any unselected bits
if (zeroBits)
- rie_f_form(RISBGN, dst, src, startBit, Operand(endBit.imm_ | 0x80),
+ rie_f_form(RISBGN, dst, src, startBit, Operand(endBit.immediate() | 0x80),
shiftAmt);
else
rie_f_form(RISBGN, dst, src, startBit, endBit, shiftAmt);
@@ -1472,9 +1497,10 @@ void Assembler::ark(Register r1, Register r2, Register r3) {
// Add Storage-Imm (32)
void Assembler::asi(const MemOperand& opnd, const Operand& imm) {
- DCHECK(is_int8(imm.imm_));
+ DCHECK(is_int8(imm.immediate()));
DCHECK(is_int20(opnd.offset()));
- siy_form(ASI, Operand(0xff & imm.imm_), opnd.rb(), 0xfffff & opnd.offset());
+ siy_form(ASI, Operand(0xff & imm.immediate()), opnd.rb(),
+ 0xfffff & opnd.offset());
}
// -----------------------
@@ -1495,9 +1521,10 @@ void Assembler::agrk(Register r1, Register r2, Register r3) {
// Add Storage-Imm (64)
void Assembler::agsi(const MemOperand& opnd, const Operand& imm) {
- DCHECK(is_int8(imm.imm_));
+ DCHECK(is_int8(imm.immediate()));
DCHECK(is_int20(opnd.offset()));
- siy_form(AGSI, Operand(0xff & imm.imm_), opnd.rb(), 0xfffff & opnd.offset());
+ siy_form(AGSI, Operand(0xff & imm.immediate()), opnd.rb(),
+ 0xfffff & opnd.offset());
}
// -------------------------------
@@ -1815,11 +1842,18 @@ void Assembler::srdl(Register r1, const Operand& opnd) {
rs_form(SRDL, r1, r0, r0, opnd.immediate());
}
-void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode,
- TypeFeedbackId ast_id) {
+void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
- int32_t target_index = emit_code_target(target, rmode, ast_id);
+ int32_t target_index = emit_code_target(target, rmode);
+ brasl(r14, Operand(target_index));
+}
+
+void Assembler::call(CodeStub* stub) {
+ EnsureSpace ensure_space(this);
+ RequestHeapObject(HeapObjectRequest(stub));
+ int32_t target_index =
+ emit_code_target(Handle<Code>(), RelocInfo::CODE_TARGET);
brasl(r14, Operand(target_index));
}
@@ -2104,9 +2138,7 @@ void Assembler::GrowBuffer(int needed) {
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
- if (desc.buffer_size > kMaximalBufferSize ||
- static_cast<size_t>(desc.buffer_size) >
- isolate_data().max_old_generation_size_) {
+ if (desc.buffer_size > kMaximalBufferSize) {
V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
}
@@ -2169,10 +2201,6 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
!emit_debug_code())) {
return;
}
- if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- data = RecordedAstId().ToInt();
- ClearRecordedAstId();
- }
DeferredRelocInfo rinfo(pc_offset(), rmode, data);
relocations_.push_back(rinfo);
}
diff --git a/deps/v8/src/s390/assembler-s390.h b/deps/v8/src/s390/assembler-s390.h
index bee7452aaa..54c0b2baf7 100644
--- a/deps/v8/src/s390/assembler-s390.h
+++ b/deps/v8/src/s390/assembler-s390.h
@@ -297,12 +297,14 @@ class Operand BASE_EMBEDDED {
RelocInfo::Mode rmode = kRelocInfo_NONEPTR));
INLINE(static Operand Zero()) { return Operand(static_cast<intptr_t>(0)); }
INLINE(explicit Operand(const ExternalReference& f));
- explicit Operand(Handle<Object> handle);
+ explicit Operand(Handle<HeapObject> handle);
INLINE(explicit Operand(Smi* value));
// rm
INLINE(explicit Operand(Register rm));
+ static Operand EmbeddedNumber(double value); // Smi or HeapNumber
+
// Return true if this is a register operand.
INLINE(bool is_reg() const);
@@ -310,18 +312,41 @@ class Operand BASE_EMBEDDED {
inline intptr_t immediate() const {
DCHECK(!rm_.is_valid());
- return imm_;
+ DCHECK(!is_heap_object_request());
+ return value_.immediate;
+ }
+
+ HeapObjectRequest heap_object_request() const {
+ DCHECK(is_heap_object_request());
+ return value_.heap_object_request;
}
inline void setBits(int n) {
- imm_ = (static_cast<uint32_t>(imm_) << (32 - n)) >> (32 - n);
+ value_.immediate =
+ (static_cast<uint32_t>(value_.immediate) << (32 - n)) >> (32 - n);
}
Register rm() const { return rm_; }
+ bool is_heap_object_request() const {
+ DCHECK_IMPLIES(is_heap_object_request_, !rm_.is_valid());
+ DCHECK_IMPLIES(is_heap_object_request_,
+ rmode_ == RelocInfo::EMBEDDED_OBJECT ||
+ rmode_ == RelocInfo::CODE_TARGET);
+ return is_heap_object_request_;
+ }
+
+ RelocInfo::Mode rmode() const { return rmode_; }
+
private:
Register rm_;
- intptr_t imm_; // valid if rm_ == no_reg
+ union Value {
+ Value() {}
+ HeapObjectRequest heap_object_request; // if is_heap_object_request_
+ intptr_t immediate; // otherwise
+ } value_; // valid if rm_ == no_reg
+ bool is_heap_object_request_ = false;
+
RelocInfo::Mode rmode_;
friend class Assembler;
@@ -405,7 +430,7 @@ class Assembler : public AssemblerBase {
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
- void GetCode(CodeDesc* desc);
+ void GetCode(Isolate* isolate, CodeDesc* desc);
// Label operations & relative jumps (PPUM Appendix D)
//
@@ -838,8 +863,8 @@ class Assembler : public AssemblerBase {
basr(r14, r1);
}
- void call(Handle<Code> target, RelocInfo::Mode rmode,
- TypeFeedbackId ast_id = TypeFeedbackId::None());
+ void call(Handle<Code> target, RelocInfo::Mode rmode);
+ void call(CodeStub* stub);
void jump(Handle<Code> target, RelocInfo::Mode rmode, Condition cond);
// S390 instruction generation
@@ -1269,17 +1294,6 @@ class Assembler : public AssemblerBase {
// Mark address of a debug break slot.
void RecordDebugBreakSlot(RelocInfo::Mode mode);
- // Record the AST id of the CallIC being compiled, so that it can be placed
- // in the relocation information.
- void SetRecordedAstId(TypeFeedbackId ast_id) { recorded_ast_id_ = ast_id; }
-
- TypeFeedbackId RecordedAstId() {
- // roohack - another issue??? DCHECK(!recorded_ast_id_.IsNone());
- return recorded_ast_id_;
- }
-
- void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); }
-
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
void RecordComment(const char* msg);
@@ -1346,13 +1360,9 @@ class Assembler : public AssemblerBase {
public:
byte* buffer_pos() const { return buffer_; }
+ void RequestHeapObject(HeapObjectRequest request);
protected:
- // Relocation for a type-recording IC has the AST id added to it. This
- // member variable is a way to pass the information from the call site to
- // the relocation info.
- TypeFeedbackId recorded_ast_id_;
-
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Decode instruction(s) at pos and return backchain to previous
@@ -1392,8 +1402,7 @@ class Assembler : public AssemblerBase {
inline void UntrackBranch();
inline int32_t emit_code_target(
- Handle<Code> target, RelocInfo::Mode rmode,
- TypeFeedbackId ast_id = TypeFeedbackId::None());
+ Handle<Code> target, RelocInfo::Mode rmode);
// Helpers to emit binary encoding of 2/4/6 byte instructions.
inline void emit2bytes(uint16_t x);
@@ -1468,6 +1477,17 @@ class Assembler : public AssemblerBase {
void bind_to(Label* L, int pos);
void next(Label* L);
+ // The following functions help with avoiding allocations of embedded heap
+ // objects during the code assembly phase. {RequestHeapObject} records the
+ // need for a future heap number allocation or code stub generation. After
+ // code assembly, {AllocateAndInstallRequestedHeapObjects} will allocate these
+ // objects and place them where they are expected (determined by the pc offset
+ // associated with each request). That is, for each request, it will patch the
+ // dummy heap object handle that we emitted during code assembly with the
+ // actual heap object handle.
+ void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
+ std::forward_list<HeapObjectRequest> heap_object_requests_;
+
friend class RegExpMacroAssemblerS390;
friend class RelocInfo;
friend class CodePatcher;
diff --git a/deps/v8/src/s390/code-stubs-s390.cc b/deps/v8/src/s390/code-stubs-s390.cc
index e47cb3e903..511f019118 100644
--- a/deps/v8/src/s390/code-stubs-s390.cc
+++ b/deps/v8/src/s390/code-stubs-s390.cc
@@ -40,28 +40,6 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs,
Register rhs);
-void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
- ExternalReference miss) {
- // Update the static counter each time a new code stub is generated.
- isolate()->counters()->code_stubs()->Increment();
-
- CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
- int param_count = descriptor.GetRegisterParameterCount();
- {
- // Call the runtime system in a fresh internal frame.
- FrameScope scope(masm, StackFrame::INTERNAL);
- DCHECK(param_count == 0 ||
- r2.is(descriptor.GetRegisterParameter(param_count - 1)));
- // Push arguments
- for (int i = 0; i < param_count; ++i) {
- __ push(descriptor.GetRegisterParameter(i));
- }
- __ CallExternalReference(miss, param_count);
- }
-
- __ Ret();
-}
-
void DoubleToIStub::Generate(MacroAssembler* masm) {
Label out_of_range, only_low, negate, done, fastpath_done;
Register input_reg = source();
@@ -800,14 +778,11 @@ bool CEntryStub::NeedsImmovableCode() { return true; }
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
- StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
CreateWeakCellStub::GenerateAheadOfTime(isolate);
- BinaryOpICStub::GenerateAheadOfTime(isolate);
StoreRegistersStateStub::GenerateAheadOfTime(isolate);
RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
- BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
}
@@ -830,6 +805,8 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
CEntryStub stub(isolate, 1, kDontSaveFPRegs);
stub.GetCode();
+ CEntryStub save_doubles(isolate, 1, kSaveFPRegs);
+ save_doubles.GetCode();
}
void CEntryStub::Generate(MacroAssembler* masm) {
@@ -937,7 +914,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
if (FLAG_debug_code) {
Label okay;
ExternalReference pending_exception_address(
- Isolate::kPendingExceptionAddress, isolate());
+ IsolateAddressId::kPendingExceptionAddress, isolate());
__ mov(r1, Operand(pending_exception_address));
__ LoadP(r1, MemOperand(r1));
__ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
@@ -966,15 +943,15 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ bind(&exception_returned);
ExternalReference pending_handler_context_address(
- Isolate::kPendingHandlerContextAddress, isolate());
+ IsolateAddressId::kPendingHandlerContextAddress, isolate());
ExternalReference pending_handler_code_address(
- Isolate::kPendingHandlerCodeAddress, isolate());
+ IsolateAddressId::kPendingHandlerCodeAddress, isolate());
ExternalReference pending_handler_offset_address(
- Isolate::kPendingHandlerOffsetAddress, isolate());
+ IsolateAddressId::kPendingHandlerOffsetAddress, isolate());
ExternalReference pending_handler_fp_address(
- Isolate::kPendingHandlerFPAddress, isolate());
+ IsolateAddressId::kPendingHandlerFPAddress, isolate());
ExternalReference pending_handler_sp_address(
- Isolate::kPendingHandlerSPAddress, isolate());
+ IsolateAddressId::kPendingHandlerSPAddress, isolate());
// Ask the runtime for help to determine the handler. This will set r3 to
// contain the current pending exception, don't clobber it.
@@ -1076,7 +1053,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Load(r9, Operand(StackFrame::TypeToMarker(marker)));
__ Load(r8, Operand(StackFrame::TypeToMarker(marker)));
// Save copies of the top frame descriptor on the stack.
- __ mov(r7, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ __ mov(r7, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
+ isolate())));
__ LoadP(r7, MemOperand(r7));
__ StoreMultipleP(r7, r10, MemOperand(sp, kPointerSize));
// Set up frame pointer for the frame to be pushed.
@@ -1087,7 +1065,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// If this is the outermost JS call, set js_entry_sp value.
Label non_outermost_js;
- ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
+ ExternalReference js_entry_sp(IsolateAddressId::kJSEntrySPAddress, isolate());
__ mov(r7, Operand(ExternalReference(js_entry_sp)));
__ LoadAndTestP(r8, MemOperand(r7));
__ bne(&non_outermost_js, Label::kNear);
@@ -1111,8 +1089,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// field in the JSEnv and return a failure sentinel. Coming in here the
// fp will be invalid because the PushStackHandler below sets it to 0 to
// signal the existence of the JSEntry frame.
- __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate())));
+ __ mov(ip, Operand(ExternalReference(
+ IsolateAddressId::kPendingExceptionAddress, isolate())));
__ StoreP(r2, MemOperand(ip));
__ LoadRoot(r2, Heap::kExceptionRootIndex);
@@ -1172,7 +1150,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Restore the top frame descriptors from the stack.
__ pop(r5);
- __ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ __ mov(ip, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
+ isolate())));
__ StoreP(r5, MemOperand(ip));
// Reset the stack to the callee saved registers.
@@ -1559,35 +1538,6 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
__ bne(&loop);
}
-void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : left
- // -- r2 : right
- // r3: second string
- // -----------------------------------
-
- // Load r4 with the allocation site. We stick an undefined dummy value here
- // and replace it with the real allocation site later when we instantiate this
- // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
- __ Move(r4, isolate()->factory()->undefined_value());
-
- // Make sure that we actually patched the allocation site.
- if (FLAG_debug_code) {
- __ TestIfSmi(r4);
- __ Assert(ne, kExpectedAllocationSite, cr0);
- __ push(r4);
- __ LoadP(r4, FieldMemOperand(r4, HeapObject::kMapOffset));
- __ CompareRoot(r4, Heap::kAllocationSiteMapRootIndex);
- __ pop(r4);
- __ Assert(eq, kExpectedAllocationSite);
- }
-
- // Tail call into the stub that handles binary operations with allocation
- // sites.
- BinaryOpWithAllocationSiteStub stub(isolate(), state());
- __ TailCallStub(&stub);
-}
-
void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
DCHECK_EQ(CompareICState::BOOLEAN, state());
Label miss;
@@ -2021,7 +1971,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(
// Restore the properties.
__ LoadP(properties,
- FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
}
const int spill_mask = (r0.bit() | r8.bit() | r7.bit() | r6.bit() | r5.bit() |
@@ -2030,7 +1980,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(
__ LoadRR(r0, r14);
__ MultiPush(spill_mask);
- __ LoadP(r2, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ LoadP(r2, FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
__ mov(r3, Operand(Handle<Name>(name)));
NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
__ CallStub(&stub);
@@ -2235,10 +2185,11 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode) {
- Label on_black;
Label need_incremental;
Label need_incremental_pop_scratch;
+#ifndef V8_CONCURRENT_MARKING
+ Label on_black;
// Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker.
__ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
@@ -2252,6 +2203,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
}
__ bind(&on_black);
+#endif
// Get the value from the slot.
__ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0));
@@ -2298,19 +2250,22 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// Fall through when we need to inform the incremental marker.
}
-void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
- CEntryStub ces(isolate(), 1, kSaveFPRegs);
- __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
- int parameter_count_offset =
- StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
- __ LoadP(r3, MemOperand(fp, parameter_count_offset));
- if (function_mode() == JS_FUNCTION_STUB_MODE) {
- __ AddP(r3, Operand(1));
- }
- masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
- __ ShiftLeftP(r3, r3, Operand(kPointerSizeLog2));
- __ la(sp, MemOperand(r3, sp));
- __ Ret();
+void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
+ Zone* zone) {
+ if (tasm->isolate()->function_entry_hook() != NULL) {
+ PredictableCodeSizeScope predictable(tasm,
+#if V8_TARGET_ARCH_S390X
+ 40);
+#elif V8_HOST_ARCH_S390
+ 36);
+#else
+ 32);
+#endif
+ tasm->CleanseP(r14);
+ tasm->Push(r14, ip);
+ tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
+ tasm->Pop(r14, ip);
+ }
}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
@@ -2368,7 +2323,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
int frame_alignment = masm->ActivationFrameAlignment();
if (frame_alignment > kPointerSize) {
__ LoadRR(r7, sp);
- DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
__ ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
}
@@ -2449,24 +2404,12 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// r2 - number of arguments
// r3 - constructor?
// sp[0] - last argument
- Label normal_sequence;
- if (mode == DONT_OVERRIDE) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
- STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
-
- // is the low bit set? If so, we are holey and that is good.
- __ AndP(r0, r5, Operand(1));
- __ bne(&normal_sequence);
- }
-
- // look at the first argument
- __ LoadP(r7, MemOperand(sp, 0));
- __ CmpP(r7, Operand::Zero());
- __ beq(&normal_sequence);
+ STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(PACKED_ELEMENTS == 2);
+ STATIC_ASSERT(HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS == 4);
+ STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS == 5);
if (mode == DISABLE_ALLOCATION_SITES) {
ElementsKind initial = GetInitialFastElementsKind();
@@ -2475,12 +2418,12 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
ArraySingleArgumentConstructorStub stub_holey(
masm->isolate(), holey_initial, DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub_holey);
-
- __ bind(&normal_sequence);
- ArraySingleArgumentConstructorStub stub(masm->isolate(), initial,
- DISABLE_ALLOCATION_SITES);
- __ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
+ Label normal_sequence;
+ // is the low bit set? If so, we are holey and that is good.
+ __ AndP(r0, r5, Operand(1));
+ __ bne(&normal_sequence);
+
// We are going to create a holey array, but our kind is non-holey.
// Fix kind and retry (only if we have an allocation site in the slot).
__ AddP(r5, r5, Operand(1));
@@ -2494,9 +2437,11 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// in the AllocationSite::transition_info field because elements kind is
// restricted to a portion of the field...upper bits need to be left alone.
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ LoadP(r6, FieldMemOperand(r4, AllocationSite::kTransitionInfoOffset));
+ __ LoadP(r6, FieldMemOperand(
+ r4, AllocationSite::kTransitionInfoOrBoilerplateOffset));
__ AddSmiLiteral(r6, r6, Smi::FromInt(kFastElementsKindPackedToHoley), r0);
- __ StoreP(r6, FieldMemOperand(r4, AllocationSite::kTransitionInfoOffset));
+ __ StoreP(r6, FieldMemOperand(
+ r4, AllocationSite::kTransitionInfoOrBoilerplateOffset));
__ bind(&normal_sequence);
int last_index =
@@ -2523,7 +2468,7 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(isolate, kind);
stub.GetCode();
- if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ if (AllocationSite::ShouldTrack(kind)) {
T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
stub1.GetCode();
}
@@ -2535,7 +2480,7 @@ void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
isolate);
ArrayNArgumentsConstructorStub stub(isolate);
stub.GetCode();
- ElementsKind kinds[2] = {FAST_ELEMENTS, FAST_HOLEY_ELEMENTS};
+ ElementsKind kinds[2] = {PACKED_ELEMENTS, HOLEY_ELEMENTS};
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things
InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
@@ -2600,7 +2545,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ CompareRoot(r4, Heap::kUndefinedValueRootIndex);
__ beq(&no_info);
- __ LoadP(r5, FieldMemOperand(r4, AllocationSite::kTransitionInfoOffset));
+ __ LoadP(r5, FieldMemOperand(
+ r4, AllocationSite::kTransitionInfoOrBoilerplateOffset));
__ SmiUntag(r5);
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
__ AndP(r5, Operand(AllocationSite::ElementsKindBits::kMask));
@@ -2672,20 +2618,20 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
if (FLAG_debug_code) {
Label done;
- __ CmpP(r5, Operand(FAST_ELEMENTS));
+ __ CmpP(r5, Operand(PACKED_ELEMENTS));
__ beq(&done);
- __ CmpP(r5, Operand(FAST_HOLEY_ELEMENTS));
+ __ CmpP(r5, Operand(HOLEY_ELEMENTS));
__ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
__ bind(&done);
}
Label fast_elements_case;
- __ CmpP(r5, Operand(FAST_ELEMENTS));
+ __ CmpP(r5, Operand(PACKED_ELEMENTS));
__ beq(&fast_elements_case);
- GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+ GenerateCase(masm, HOLEY_ELEMENTS);
__ bind(&fast_elements_case);
- GenerateCase(masm, FAST_ELEMENTS);
+ GenerateCase(masm, PACKED_ELEMENTS);
}
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
diff --git a/deps/v8/src/s390/codegen-s390.cc b/deps/v8/src/s390/codegen-s390.cc
index 04dc77129c..b52bb791a3 100644
--- a/deps/v8/src/s390/codegen-s390.cc
+++ b/deps/v8/src/s390/codegen-s390.cc
@@ -35,7 +35,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
__ Ret();
CodeDesc desc;
- masm.GetCode(&desc);
+ masm.GetCode(isolate, &desc);
DCHECK(ABI_USES_FUNCTION_DESCRIPTORS ||
!RelocInfo::RequiresRelocation(isolate, desc));
diff --git a/deps/v8/src/s390/constants-s390.h b/deps/v8/src/s390/constants-s390.h
index 67fe47436f..dee21b28ef 100644
--- a/deps/v8/src/s390/constants-s390.h
+++ b/deps/v8/src/s390/constants-s390.h
@@ -17,6 +17,15 @@
#include "src/base/macros.h"
#include "src/globals.h"
+// UNIMPLEMENTED_ macro for S390.
+#ifdef DEBUG
+#define UNIMPLEMENTED_S390() \
+ v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \
+ __FILE__, __LINE__, __func__)
+#else
+#define UNIMPLEMENTED_S390()
+#endif
+
namespace v8 {
namespace internal {
@@ -2084,7 +2093,6 @@ class Instruction {
}
UNREACHABLE();
- return static_cast<Opcode>(-1);
}
// Fields used in Software interrupt instructions
diff --git a/deps/v8/src/s390/deoptimizer-s390.cc b/deps/v8/src/s390/deoptimizer-s390.cc
index a0ca01849e..ed31c69baa 100644
--- a/deps/v8/src/s390/deoptimizer-s390.cc
+++ b/deps/v8/src/s390/deoptimizer-s390.cc
@@ -80,23 +80,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
}
-void Deoptimizer::SetPlatformCompiledStubRegisters(
- FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
- ApiFunction function(descriptor->deoptimization_handler());
- ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
- intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
- int params = descriptor->GetHandlerParameterCount();
- output_frame->SetRegister(r2.code(), params);
- output_frame->SetRegister(r3.code(), handler);
-}
-
-void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
- for (int i = 0; i < DoubleRegister::kNumRegisters; ++i) {
- Float64 double_value = input_->GetDoubleRegister(i);
- output_frame->SetDoubleRegister(i, double_value);
- }
-}
-
#define __ masm()->
// This code tries to be close to ia32 code so that any changes can be
@@ -134,7 +117,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ lay(sp, MemOperand(sp, -kNumberOfRegisters * kPointerSize));
__ StoreMultipleP(r0, sp, MemOperand(sp)); // Save all 16 registers
- __ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ __ mov(ip, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
+ isolate())));
__ StoreP(fp, MemOperand(ip));
const int kSavedRegistersAreaSize =
diff --git a/deps/v8/src/s390/disasm-s390.cc b/deps/v8/src/s390/disasm-s390.cc
index 19f36a6f47..4ec911c144 100644
--- a/deps/v8/src/s390/disasm-s390.cc
+++ b/deps/v8/src/s390/disasm-s390.cc
@@ -188,7 +188,6 @@ int Decoder::FormatRegister(Instruction* instr, const char* format) {
}
UNREACHABLE();
- return -1;
}
int Decoder::FormatFloatingRegister(Instruction* instr, const char* format) {
@@ -222,7 +221,6 @@ int Decoder::FormatFloatingRegister(Instruction* instr, const char* format) {
return 2;
}
UNREACHABLE();
- return -1;
}
// FormatOption takes a formatting string and interprets it based on
@@ -304,7 +302,6 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
}
UNREACHABLE();
- return -1;
}
int Decoder::FormatMask(Instruction* instr, const char* format) {
@@ -456,7 +453,6 @@ int Decoder::FormatImmediate(Instruction* instr, const char* format) {
}
UNREACHABLE();
- return -1;
}
// Format takes a formatting string for a whole instruction and prints it into
@@ -1536,7 +1532,6 @@ const char* NameConverter::NameOfXMMRegister(int reg) const {
// S390 does not have XMM register
// TODO(joransiu): Consider update this for Vector Regs
UNREACHABLE();
- return "noxmmreg";
}
const char* NameConverter::NameInCode(byte* addr) const {
diff --git a/deps/v8/src/s390/frames-s390.cc b/deps/v8/src/s390/frames-s390.cc
index 20506ec13c..04066ae223 100644
--- a/deps/v8/src/s390/frames-s390.cc
+++ b/deps/v8/src/s390/frames-s390.cc
@@ -19,14 +19,6 @@ Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() {
UNREACHABLE();
- return no_reg;
-}
-
-Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
-Register StubFailureTrampolineFrame::context_register() { return cp; }
-Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
- UNREACHABLE();
- return no_reg;
}
} // namespace internal
diff --git a/deps/v8/src/s390/interface-descriptors-s390.cc b/deps/v8/src/s390/interface-descriptors-s390.cc
index 0028b9578b..0d09be5b73 100644
--- a/deps/v8/src/s390/interface-descriptors-s390.cc
+++ b/deps/v8/src/s390/interface-descriptors-s390.cc
@@ -47,6 +47,8 @@ const Register StoreTransitionDescriptor::MapRegister() { return r7; }
const Register StringCompareDescriptor::LeftRegister() { return r3; }
const Register StringCompareDescriptor::RightRegister() { return r2; }
+const Register StringConcatDescriptor::ArgumentsCountRegister() { return r2; }
+
const Register ApiGetterDescriptor::HolderRegister() { return r2; }
const Register ApiGetterDescriptor::CallbackRegister() { return r5; }
@@ -143,6 +145,16 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r2 : number of arguments (on the stack, not including receiver)
+ // r3 : the target to call
+ // r4 : arguments list (FixedArray)
+ // r6 : arguments list length (untagged)
+ Register registers[] = {r3, r2, r4, r6};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void CallForwardVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r2 : number of arguments
@@ -152,6 +164,34 @@ void CallForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallWithSpreadDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r2 : number of arguments (on the stack, not including receiver)
+ // r3 : the target to call
+ // r4 : the object to spread
+ Register registers[] = {r3, r2, r4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r3 : the target to call
+ // r4 : the arguments list
+ Register registers[] = {r3, r4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r2 : number of arguments (on the stack, not including receiver)
+ // r3 : the target to call
+ // r5 : the new target
+ // r4 : arguments list (FixedArray)
+ // r6 : arguments list length (untagged)
+ Register registers[] = {r3, r5, r2, r4, r6};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r2 : number of arguments
@@ -162,6 +202,25 @@ void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r2 : number of arguments (on the stack, not including receiver)
+ // r3 : the target to call
+ // r5 : the new target
+ // r4 : the object to spread
+ Register registers[] = {r3, r5, r2, r4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r3 : the target to call
+ // r5 : the new target
+ // r4 : the arguments list
+ Register registers[] = {r3, r5, r4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void ConstructStubDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r2 : number of arguments
@@ -352,8 +411,7 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
Register registers[] = {
r2, // the value to pass to the generator
r3, // the JSGeneratorObject to resume
- r4, // the resume mode (tagged)
- r5 // SuspendFlags (tagged)
+ r4 // the resume mode (tagged)
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/s390/macro-assembler-s390.cc b/deps/v8/src/s390/macro-assembler-s390.cc
index d0e3ea022a..fbfad2e402 100644
--- a/deps/v8/src/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/s390/macro-assembler-s390.cc
@@ -12,6 +12,7 @@
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/debug/debug.h"
+#include "src/external-reference-table.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
@@ -22,24 +23,16 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
- : Assembler(isolate, buffer, size),
- generating_stub_(false),
- has_frame_(false),
- isolate_(isolate) {
- if (create_code_object == CodeObjectRequired::kYes) {
- code_object_ =
- Handle<Object>::New(isolate_->heap()->undefined_value(), isolate_);
- }
-}
+ : TurboAssembler(isolate, buffer, size, create_code_object) {}
-void MacroAssembler::Jump(Register target) { b(target); }
+void TurboAssembler::Jump(Register target) { b(target); }
void MacroAssembler::JumpToJSEntry(Register target) {
Move(ip, target);
Jump(ip);
}
-void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
+void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
Condition cond, CRegister) {
Label skip;
@@ -53,21 +46,21 @@ void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
bind(&skip);
}
-void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
+void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
CRegister cr) {
DCHECK(!RelocInfo::IsCodeTarget(rmode));
Jump(reinterpret_cast<intptr_t>(target), rmode, cond, cr);
}
-void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
+void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
jump(code, rmode, cond);
}
-int MacroAssembler::CallSize(Register target) { return 2; } // BASR
+int TurboAssembler::CallSize(Register target) { return 2; } // BASR
-void MacroAssembler::Call(Register target) {
+void TurboAssembler::Call(Register target) {
Label start;
bind(&start);
@@ -82,7 +75,7 @@ void MacroAssembler::CallJSEntry(Register target) {
Call(target);
}
-int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode,
+int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode,
Condition cond) {
// S390 Assembler::move sequence is IILF / IIHF
int size;
@@ -107,7 +100,7 @@ int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
return size;
}
-void MacroAssembler::Call(Address target, RelocInfo::Mode rmode,
+void TurboAssembler::Call(Address target, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(cond == al);
@@ -125,27 +118,27 @@ void MacroAssembler::Call(Address target, RelocInfo::Mode rmode,
DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
}
-int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
- TypeFeedbackId ast_id, Condition cond) {
+int TurboAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
+ Condition cond) {
return 6; // BRASL
}
-void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
- TypeFeedbackId ast_id, Condition cond) {
+void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
+ Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode) && cond == al);
#ifdef DEBUG
// Check the expected size before generating code to ensure we assume the same
// constant pool availability (e.g., whether constant pool is full or not).
- int expected_size = CallSize(code, rmode, ast_id, cond);
+ int expected_size = CallSize(code, rmode, cond);
Label start;
bind(&start);
#endif
- call(code, rmode, ast_id);
+ call(code, rmode);
DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
}
-void MacroAssembler::Drop(int count) {
+void TurboAssembler::Drop(int count) {
if (count > 0) {
int total = count * kPointerSize;
if (is_uint12(total)) {
@@ -158,35 +151,48 @@ void MacroAssembler::Drop(int count) {
}
}
-void MacroAssembler::Drop(Register count, Register scratch) {
+void TurboAssembler::Drop(Register count, Register scratch) {
ShiftLeftP(scratch, count, Operand(kPointerSizeLog2));
AddP(sp, sp, scratch);
}
-void MacroAssembler::Call(Label* target) { b(r14, target); }
+void TurboAssembler::Call(Label* target) { b(r14, target); }
-void MacroAssembler::Push(Handle<Object> handle) {
+void TurboAssembler::Push(Handle<HeapObject> handle) {
mov(r0, Operand(handle));
push(r0);
}
-void MacroAssembler::Move(Register dst, Handle<Object> value) {
+void TurboAssembler::Push(Smi* smi) {
+ mov(r0, Operand(smi));
+ push(r0);
+}
+
+void MacroAssembler::PushObject(Handle<Object> handle) {
+ if (handle->IsHeapObject()) {
+ Push(Handle<HeapObject>::cast(handle));
+ } else {
+ Push(Smi::cast(*handle));
+ }
+}
+
+void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
mov(dst, Operand(value));
}
-void MacroAssembler::Move(Register dst, Register src, Condition cond) {
+void TurboAssembler::Move(Register dst, Register src, Condition cond) {
if (!dst.is(src)) {
LoadRR(dst, src);
}
}
-void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
+void TurboAssembler::Move(DoubleRegister dst, DoubleRegister src) {
if (!dst.is(src)) {
ldr(dst, src);
}
}
-void MacroAssembler::MultiPush(RegList regs, Register location) {
+void TurboAssembler::MultiPush(RegList regs, Register location) {
int16_t num_to_push = NumberOfBitsSet(regs);
int16_t stack_offset = num_to_push * kPointerSize;
@@ -199,7 +205,7 @@ void MacroAssembler::MultiPush(RegList regs, Register location) {
}
}
-void MacroAssembler::MultiPop(RegList regs, Register location) {
+void TurboAssembler::MultiPop(RegList regs, Register location) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < Register::kNumRegisters; i++) {
@@ -211,7 +217,7 @@ void MacroAssembler::MultiPop(RegList regs, Register location) {
AddP(location, location, Operand(stack_offset));
}
-void MacroAssembler::MultiPushDoubles(RegList dregs, Register location) {
+void TurboAssembler::MultiPushDoubles(RegList dregs, Register location) {
int16_t num_to_push = NumberOfBitsSet(dregs);
int16_t stack_offset = num_to_push * kDoubleSize;
@@ -225,7 +231,7 @@ void MacroAssembler::MultiPushDoubles(RegList dregs, Register location) {
}
}
-void MacroAssembler::MultiPopDoubles(RegList dregs, Register location) {
+void TurboAssembler::MultiPopDoubles(RegList dregs, Register location) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) {
@@ -238,17 +244,11 @@ void MacroAssembler::MultiPopDoubles(RegList dregs, Register location) {
AddP(location, location, Operand(stack_offset));
}
-void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
+void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
Condition) {
LoadP(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
}
-void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index,
- Condition) {
- DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
- StoreP(source, MemOperand(kRootRegister, index << kPointerSizeLog2));
-}
-
void MacroAssembler::InNewSpace(Register object, Register scratch,
Condition cond, Label* branch) {
DCHECK(cond == eq || cond == ne);
@@ -523,7 +523,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
}
}
-void MacroAssembler::PushCommonFrame(Register marker_reg) {
+void TurboAssembler::PushCommonFrame(Register marker_reg) {
int fp_delta = 0;
CleanseP(r14);
if (marker_reg.is_valid()) {
@@ -536,7 +536,7 @@ void MacroAssembler::PushCommonFrame(Register marker_reg) {
la(fp, MemOperand(sp, fp_delta * kPointerSize));
}
-void MacroAssembler::PopCommonFrame(Register marker_reg) {
+void TurboAssembler::PopCommonFrame(Register marker_reg) {
if (marker_reg.is_valid()) {
Pop(r14, fp, marker_reg);
} else {
@@ -544,7 +544,7 @@ void MacroAssembler::PopCommonFrame(Register marker_reg) {
}
}
-void MacroAssembler::PushStandardFrame(Register function_reg) {
+void TurboAssembler::PushStandardFrame(Register function_reg) {
int fp_delta = 0;
CleanseP(r14);
if (function_reg.is_valid()) {
@@ -557,7 +557,7 @@ void MacroAssembler::PushStandardFrame(Register function_reg) {
la(fp, MemOperand(sp, fp_delta * kPointerSize));
}
-void MacroAssembler::RestoreFrameStateForTailCall() {
+void TurboAssembler::RestoreFrameStateForTailCall() {
// if (FLAG_enable_embedded_constant_pool) {
// LoadP(kConstantPoolRegister,
// MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
@@ -629,7 +629,7 @@ MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
return MemOperand(sp, doubles_size + register_offset);
}
-void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst,
+void TurboAssembler::CanonicalizeNaN(const DoubleRegister dst,
const DoubleRegister src) {
// Turn potential sNaN into qNaN
if (!dst.is(src)) ldr(dst, src);
@@ -637,11 +637,11 @@ void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst,
sdbr(dst, kDoubleRegZero);
}
-void MacroAssembler::ConvertIntToDouble(DoubleRegister dst, Register src) {
+void TurboAssembler::ConvertIntToDouble(DoubleRegister dst, Register src) {
cdfbr(dst, src);
}
-void MacroAssembler::ConvertUnsignedIntToDouble(DoubleRegister dst,
+void TurboAssembler::ConvertUnsignedIntToDouble(DoubleRegister dst,
Register src) {
if (CpuFeatures::IsSupported(FLOATING_POINT_EXT)) {
cdlfbr(Condition(5), Condition(0), dst, src);
@@ -653,36 +653,36 @@ void MacroAssembler::ConvertUnsignedIntToDouble(DoubleRegister dst,
}
}
-void MacroAssembler::ConvertIntToFloat(DoubleRegister dst, Register src) {
+void TurboAssembler::ConvertIntToFloat(DoubleRegister dst, Register src) {
cefbr(Condition(4), dst, src);
}
-void MacroAssembler::ConvertUnsignedIntToFloat(DoubleRegister dst,
+void TurboAssembler::ConvertUnsignedIntToFloat(DoubleRegister dst,
Register src) {
celfbr(Condition(4), Condition(0), dst, src);
}
-void MacroAssembler::ConvertInt64ToFloat(DoubleRegister double_dst,
+void TurboAssembler::ConvertInt64ToFloat(DoubleRegister double_dst,
Register src) {
cegbr(double_dst, src);
}
-void MacroAssembler::ConvertInt64ToDouble(DoubleRegister double_dst,
+void TurboAssembler::ConvertInt64ToDouble(DoubleRegister double_dst,
Register src) {
cdgbr(double_dst, src);
}
-void MacroAssembler::ConvertUnsignedInt64ToFloat(DoubleRegister double_dst,
+void TurboAssembler::ConvertUnsignedInt64ToFloat(DoubleRegister double_dst,
Register src) {
celgbr(Condition(0), Condition(0), double_dst, src);
}
-void MacroAssembler::ConvertUnsignedInt64ToDouble(DoubleRegister double_dst,
+void TurboAssembler::ConvertUnsignedInt64ToDouble(DoubleRegister double_dst,
Register src) {
cdlgbr(Condition(0), Condition(0), double_dst, src);
}
-void MacroAssembler::ConvertFloat32ToInt64(const Register dst,
+void TurboAssembler::ConvertFloat32ToInt64(const Register dst,
const DoubleRegister double_input,
FPRoundingMode rounding_mode) {
Condition m = Condition(0);
@@ -706,7 +706,7 @@ void MacroAssembler::ConvertFloat32ToInt64(const Register dst,
cgebr(m, dst, double_input);
}
-void MacroAssembler::ConvertDoubleToInt64(const Register dst,
+void TurboAssembler::ConvertDoubleToInt64(const Register dst,
const DoubleRegister double_input,
FPRoundingMode rounding_mode) {
Condition m = Condition(0);
@@ -730,7 +730,7 @@ void MacroAssembler::ConvertDoubleToInt64(const Register dst,
cgdbr(m, dst, double_input);
}
-void MacroAssembler::ConvertDoubleToInt32(const Register dst,
+void TurboAssembler::ConvertDoubleToInt32(const Register dst,
const DoubleRegister double_input,
FPRoundingMode rounding_mode) {
Condition m = Condition(0);
@@ -754,7 +754,7 @@ void MacroAssembler::ConvertDoubleToInt32(const Register dst,
cfdbr(m, dst, double_input);
}
-void MacroAssembler::ConvertFloat32ToInt32(const Register result,
+void TurboAssembler::ConvertFloat32ToInt32(const Register result,
const DoubleRegister double_input,
FPRoundingMode rounding_mode) {
Condition m = Condition(0);
@@ -778,7 +778,7 @@ void MacroAssembler::ConvertFloat32ToInt32(const Register result,
cfebr(m, result, double_input);
}
-void MacroAssembler::ConvertFloat32ToUnsignedInt32(
+void TurboAssembler::ConvertFloat32ToUnsignedInt32(
const Register result, const DoubleRegister double_input,
FPRoundingMode rounding_mode) {
Condition m = Condition(0);
@@ -802,7 +802,7 @@ void MacroAssembler::ConvertFloat32ToUnsignedInt32(
clfebr(m, Condition(0), result, double_input);
}
-void MacroAssembler::ConvertFloat32ToUnsignedInt64(
+void TurboAssembler::ConvertFloat32ToUnsignedInt64(
const Register result, const DoubleRegister double_input,
FPRoundingMode rounding_mode) {
Condition m = Condition(0);
@@ -826,7 +826,7 @@ void MacroAssembler::ConvertFloat32ToUnsignedInt64(
clgebr(m, Condition(0), result, double_input);
}
-void MacroAssembler::ConvertDoubleToUnsignedInt64(
+void TurboAssembler::ConvertDoubleToUnsignedInt64(
const Register dst, const DoubleRegister double_input,
FPRoundingMode rounding_mode) {
Condition m = Condition(0);
@@ -850,7 +850,7 @@ void MacroAssembler::ConvertDoubleToUnsignedInt64(
clgdbr(m, Condition(0), dst, double_input);
}
-void MacroAssembler::ConvertDoubleToUnsignedInt32(
+void TurboAssembler::ConvertDoubleToUnsignedInt32(
const Register dst, const DoubleRegister double_input,
FPRoundingMode rounding_mode) {
Condition m = Condition(0);
@@ -875,7 +875,7 @@ void MacroAssembler::ConvertDoubleToUnsignedInt32(
}
#if !V8_TARGET_ARCH_S390X
-void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
+void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift) {
LoadRR(r0, src_high);
@@ -885,7 +885,7 @@ void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
LoadRR(dst_low, r1);
}
-void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
+void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
uint32_t shift) {
LoadRR(r0, src_high);
@@ -895,7 +895,7 @@ void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
LoadRR(dst_low, r1);
}
-void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
+void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift) {
LoadRR(r0, src_high);
@@ -905,7 +905,7 @@ void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
LoadRR(dst_low, r1);
}
-void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
+void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
uint32_t shift) {
LoadRR(r0, src_high);
@@ -915,7 +915,7 @@ void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
LoadRR(dst_low, r1);
}
-void MacroAssembler::ShiftRightArithPair(Register dst_low, Register dst_high,
+void TurboAssembler::ShiftRightArithPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift) {
LoadRR(r0, src_high);
@@ -925,7 +925,7 @@ void MacroAssembler::ShiftRightArithPair(Register dst_low, Register dst_high,
LoadRR(dst_low, r1);
}
-void MacroAssembler::ShiftRightArithPair(Register dst_low, Register dst_high,
+void TurboAssembler::ShiftRightArithPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
uint32_t shift) {
LoadRR(r0, src_high);
@@ -936,15 +936,15 @@ void MacroAssembler::ShiftRightArithPair(Register dst_low, Register dst_high,
}
#endif
-void MacroAssembler::MovDoubleToInt64(Register dst, DoubleRegister src) {
+void TurboAssembler::MovDoubleToInt64(Register dst, DoubleRegister src) {
lgdr(dst, src);
}
-void MacroAssembler::MovInt64ToDouble(DoubleRegister dst, Register src) {
+void TurboAssembler::MovInt64ToDouble(DoubleRegister dst, Register src) {
ldgr(dst, src);
}
-void MacroAssembler::StubPrologue(StackFrame::Type type, Register base,
+void TurboAssembler::StubPrologue(StackFrame::Type type, Register base,
int prologue_offset) {
{
ConstantPoolUnavailableScope constant_pool_unavailable(this);
@@ -953,7 +953,7 @@ void MacroAssembler::StubPrologue(StackFrame::Type type, Register base,
}
}
-void MacroAssembler::Prologue(bool code_pre_aging, Register base,
+void TurboAssembler::Prologue(bool code_pre_aging, Register base,
int prologue_offset) {
DCHECK(!base.is(no_reg));
{
@@ -990,7 +990,7 @@ void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
LoadP(vector, FieldMemOperand(vector, Cell::kValueOffset));
}
-void MacroAssembler::EnterFrame(StackFrame::Type type,
+void TurboAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
// We create a stack frame with:
// Return Addr <-- old sp
@@ -1008,12 +1008,17 @@ void MacroAssembler::EnterFrame(StackFrame::Type type,
}
}
-int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
+int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
// Drop the execution stack down to the frame pointer and restore
// the caller frame pointer, return address and constant pool pointer.
LoadP(r14, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
- lay(r1, MemOperand(
- fp, StandardFrameConstants::kCallerSPOffset + stack_adjustment));
+ if (is_int20(StandardFrameConstants::kCallerSPOffset + stack_adjustment)) {
+ lay(r1, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
+ stack_adjustment));
+ } else {
+ AddP(r1, fp,
+ Operand(StandardFrameConstants::kCallerSPOffset + stack_adjustment));
+ }
LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
LoadRR(sp, r1);
int frame_ends = pc_offset();
@@ -1084,9 +1089,11 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
StoreP(r1, MemOperand(fp, ExitFrameConstants::kCodeOffset));
// Save the frame pointer and the context in top.
- mov(r1, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ mov(r1, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
+ isolate())));
StoreP(fp, MemOperand(r1));
- mov(r1, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+ mov(r1,
+ Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
StoreP(cp, MemOperand(r1));
// Optionally save all volatile double registers.
@@ -1102,7 +1109,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// Allocate and align the frame preparing for calling the runtime
// function.
- const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
+ const int frame_alignment = TurboAssembler::ActivationFrameAlignment();
if (frame_alignment > 0) {
DCHECK(frame_alignment == 8);
ClearRightImm(sp, sp, Operand(3)); // equivalent to &= -8
@@ -1116,7 +1123,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
StoreP(r1, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
-int MacroAssembler::ActivationFrameAlignment() {
+int TurboAssembler::ActivationFrameAlignment() {
#if !defined(USE_SIMULATOR)
// Running on the real platform. Use the alignment as mandated by the local
// environment.
@@ -1145,16 +1152,19 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
}
// Clear top frame.
- mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ mov(ip, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
+ isolate())));
StoreP(MemOperand(ip), Operand(0, kRelocInfo_NONEPTR), r0);
// Restore current context from top and clear it in debug mode.
if (restore_context) {
- mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+ mov(ip, Operand(ExternalReference(IsolateAddressId::kContextAddress,
+ isolate())));
LoadP(cp, MemOperand(ip));
}
#ifdef DEBUG
- mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+ mov(ip,
+ Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
StoreP(MemOperand(ip), Operand(0, kRelocInfo_NONEPTR), r0);
#endif
@@ -1169,15 +1179,15 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
}
}
-void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
+void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) {
Move(dst, d0);
}
-void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
+void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) {
Move(dst, d0);
}
-void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
+void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
Register caller_args_count_reg,
Register scratch0, Register scratch1) {
#if DEBUG
@@ -1414,9 +1424,6 @@ void MacroAssembler::InvokeFunction(Register fun, Register new_target,
LoadW(expected_reg,
FieldMemOperand(temp_reg,
SharedFunctionInfo::kFormalParameterCountOffset));
-#if !defined(V8_TARGET_ARCH_S390X)
- SmiUntag(expected_reg);
-#endif
ParameterCount expected(expected_reg);
InvokeFunctionCode(fun, new_target, expected, actual, flag, call_wrapper);
@@ -1448,17 +1455,6 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
InvokeFunction(r3, expected, actual, flag, call_wrapper);
}
-void MacroAssembler::IsObjectJSStringType(Register object, Register scratch,
- Label* fail) {
- DCHECK(kNotStringTag != 0);
-
- LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- LoadlB(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- mov(r0, Operand(kIsNotStringMask));
- AndP(r0, scratch);
- bne(fail);
-}
-
void MacroAssembler::MaybeDropFrames() {
// Check whether we need to drop frames to restart a function on the stack.
ExternalReference restart_fp =
@@ -1476,7 +1472,8 @@ void MacroAssembler::PushStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
// Link the current handler as the next handler.
- mov(r7, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+ mov(r7,
+ Operand(ExternalReference(IsolateAddressId::kHandlerAddress, isolate())));
// Buy the full stack frame for 5 slots.
lay(sp, MemOperand(sp, -StackHandlerConstants::kSize));
@@ -1494,7 +1491,8 @@ void MacroAssembler::PopStackHandler() {
// Pop the Next Handler into r3 and store it into Handler Address reference.
Pop(r3);
- mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+ mov(ip,
+ Operand(ExternalReference(IsolateAddressId::kHandlerAddress, isolate())));
StoreP(r3, MemOperand(ip));
}
@@ -1544,7 +1542,6 @@ void MacroAssembler::Allocate(int object_size, Register result,
Register scratch1, Register scratch2,
Label* gc_required, AllocationFlags flags) {
DCHECK(object_size <= kMaxRegularHeapObjectSize);
- DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1617,10 +1614,7 @@ void MacroAssembler::Allocate(int object_size, Register result,
CmpLogicalP(result_end, MemOperand(top_address, limit - top));
bge(gc_required);
- if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
- // The top pointer is not updated for allocation folding dominators.
- StoreP(result_end, MemOperand(top_address));
- }
+ StoreP(result_end, MemOperand(top_address));
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
// Prefetch the allocation_top's next cache line in advance to
@@ -1636,7 +1630,6 @@ void MacroAssembler::Allocate(int object_size, Register result,
void MacroAssembler::Allocate(Register object_size, Register result,
Register result_end, Register scratch,
Label* gc_required, AllocationFlags flags) {
- DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1716,69 +1709,6 @@ void MacroAssembler::Allocate(Register object_size, Register result,
AndP(r0, result_end, Operand(kObjectAlignmentMask));
Check(eq, kUnalignedAllocationInNewSpace, cr0);
}
- if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
- // The top pointer is not updated for allocation folding dominators.
- StoreP(result_end, MemOperand(top_address));
- }
-
- if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
- // Prefetch the allocation_top's next cache line in advance to
- // help alleviate potential cache misses.
- // Mode 2 - Prefetch the data into a cache line for store access.
- pfd(static_cast<Condition>(2), MemOperand(result, 256));
- }
-
- // Tag object.
- la(result, MemOperand(result, kHeapObjectTag));
-}
-
-void MacroAssembler::FastAllocate(Register object_size, Register result,
- Register result_end, Register scratch,
- AllocationFlags flags) {
- // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
- // is not specified. Other registers must not overlap.
- DCHECK(!AreAliased(object_size, result, scratch, ip));
- DCHECK(!AreAliased(result_end, result, scratch, ip));
- DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
-
- ExternalReference allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), flags);
-
- Register top_address = scratch;
- mov(top_address, Operand(allocation_top));
- LoadP(result, MemOperand(top_address));
-
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
-// Align the next allocation. Storing the filler map without checking top is
-// safe in new-space because the limit of the heap is aligned there.
-#if V8_TARGET_ARCH_S390X
- STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
-#else
- DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
- AndP(result_end, result, Operand(kDoubleAlignmentMask));
- Label aligned;
- beq(&aligned, Label::kNear);
- mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
- StoreW(result_end, MemOperand(result));
- AddP(result, result, Operand(kDoubleSize / 2));
- bind(&aligned);
-#endif
- }
-
- // Calculate new top using result. Object size may be in words so a shift is
- // required to get the number of bytes.
- if ((flags & SIZE_IN_WORDS) != 0) {
- ShiftLeftP(result_end, object_size, Operand(kPointerSizeLog2));
- AddP(result_end, result, result_end);
- } else {
- AddP(result_end, result, object_size);
- }
-
- // Update allocation top. result temporarily holds the new top.
- if (emit_debug_code()) {
- AndP(r0, result_end, Operand(kObjectAlignmentMask));
- Check(eq, kUnalignedAllocationInNewSpace, cr0);
- }
StoreP(result_end, MemOperand(top_address));
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
@@ -1792,74 +1722,6 @@ void MacroAssembler::FastAllocate(Register object_size, Register result,
la(result, MemOperand(result, kHeapObjectTag));
}
-void MacroAssembler::FastAllocate(int object_size, Register result,
- Register scratch1, Register scratch2,
- AllocationFlags flags) {
- DCHECK(object_size <= kMaxRegularHeapObjectSize);
- DCHECK(!AreAliased(result, scratch1, scratch2, ip));
-
- // Make object size into bytes.
- if ((flags & SIZE_IN_WORDS) != 0) {
- object_size *= kPointerSize;
- }
- DCHECK_EQ(0, object_size & kObjectAlignmentMask);
-
- ExternalReference allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), flags);
-
- // Set up allocation top address register.
- Register top_address = scratch1;
- Register result_end = scratch2;
- mov(top_address, Operand(allocation_top));
- LoadP(result, MemOperand(top_address));
-
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
-// Align the next allocation. Storing the filler map without checking top is
-// safe in new-space because the limit of the heap is aligned there.
-#if V8_TARGET_ARCH_S390X
- STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
-#else
- DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
- AndP(result_end, result, Operand(kDoubleAlignmentMask));
- Label aligned;
- beq(&aligned, Label::kNear);
- mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
- StoreW(result_end, MemOperand(result));
- AddP(result, result, Operand(kDoubleSize / 2));
- bind(&aligned);
-#endif
- }
-
-#if V8_TARGET_ARCH_S390X
- // Limit to 64-bit only, as double alignment check above may adjust
- // allocation top by an extra kDoubleSize/2.
- if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_int8(object_size)) {
- // Update allocation top.
- AddP(MemOperand(top_address), Operand(object_size));
- } else {
- // Calculate new top using result.
- AddP(result_end, result, Operand(object_size));
- // Update allocation top.
- StoreP(result_end, MemOperand(top_address));
- }
-#else
- // Calculate new top using result.
- AddP(result_end, result, Operand(object_size));
- // Update allocation top.
- StoreP(result_end, MemOperand(top_address));
-#endif
-
- if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
- // Prefetch the allocation_top's next cache line in advance to
- // help alleviate potential cache misses.
- // Mode 2 - Prefetch the data into a cache line for store access.
- pfd(static_cast<Condition>(2), MemOperand(result, 256));
- }
-
- // Tag object.
- la(result, MemOperand(result, kHeapObjectTag));
-}
-
void MacroAssembler::CompareObjectType(Register object, Register map,
Register type_reg, InstanceType type) {
const Register temp = type_reg.is(no_reg) ? r0 : type_reg;
@@ -1944,17 +1806,21 @@ void MacroAssembler::GetMapConstructor(Register result, Register map,
bind(&done);
}
-void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id,
- Condition cond) {
+void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
+}
+
+void TurboAssembler::CallStubDelayed(CodeStub* stub) {
+ DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
+ call(stub);
}
void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
}
-bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
return has_frame_ || !stub->SometimesSetsUpAFrame();
}
@@ -2011,53 +1877,8 @@ void MacroAssembler::TryDoubleToInt32Exact(Register result,
bind(&done);
}
-void MacroAssembler::TryInt32Floor(Register result, DoubleRegister double_input,
- Register input_high, Register scratch,
- DoubleRegister double_scratch, Label* done,
- Label* exact) {
- DCHECK(!result.is(input_high));
- DCHECK(!double_input.is(double_scratch));
- Label exception;
-
- // Move high word into input_high
- lay(sp, MemOperand(sp, -kDoubleSize));
- StoreDouble(double_input, MemOperand(sp));
- LoadlW(input_high, MemOperand(sp, Register::kExponentOffset));
- la(sp, MemOperand(sp, kDoubleSize));
-
- // Test for NaN/Inf
- ExtractBitMask(result, input_high, HeapNumber::kExponentMask);
- CmpLogicalP(result, Operand(0x7ff));
- beq(&exception);
-
- // Convert (rounding to -Inf)
- ConvertDoubleToInt64(result, double_input, kRoundToMinusInf);
-
- // Test for overflow
- TestIfInt32(result);
- bne(&exception);
-
- // Test for exactness
- cdfbr(double_scratch, result);
- cdbr(double_scratch, double_input);
- beq(exact);
- b(done);
-
- bind(&exception);
-}
-
-void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
- DoubleRegister double_input,
- Label* done) {
- ConvertDoubleToInt64(result, double_input);
-
- // Test for overflow
- TestIfInt32(result);
- beq(done);
-}
-
-void MacroAssembler::TruncateDoubleToI(Register result,
- DoubleRegister double_input) {
+void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
+ DoubleRegister double_input) {
Label done;
TryInlineTruncateDoubleToI(result, double_input, &done);
@@ -2068,8 +1889,7 @@ void MacroAssembler::TruncateDoubleToI(Register result,
lay(sp, MemOperand(sp, -kDoubleSize));
StoreDouble(double_input, MemOperand(sp));
- DoubleToIStub stub(isolate(), sp, result, 0, true, true);
- CallStub(&stub);
+ CallStubDelayed(new (zone) DoubleToIStub(nullptr, sp, result, 0, true, true));
la(sp, MemOperand(sp, kDoubleSize));
pop(r14);
@@ -2077,35 +1897,14 @@ void MacroAssembler::TruncateDoubleToI(Register result,
bind(&done);
}
-void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
- Label done;
- DoubleRegister double_scratch = kScratchDoubleReg;
- DCHECK(!result.is(object));
-
- LoadDouble(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
- TryInlineTruncateDoubleToI(result, double_scratch, &done);
-
- // If we fell through then inline version didn't succeed - call stub instead.
- push(r14);
- DoubleToIStub stub(isolate(), object, result,
- HeapNumber::kValueOffset - kHeapObjectTag, true, true);
- CallStub(&stub);
- pop(r14);
-
- bind(&done);
-}
-
-void MacroAssembler::TruncateNumberToI(Register object, Register result,
- Register heap_number_map,
- Register scratch1, Label* not_number) {
- Label done;
- DCHECK(!result.is(object));
-
- UntagAndJumpIfSmi(result, object, &done);
- JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
- TruncateHeapNumberToI(result, object);
+void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
+ DoubleRegister double_input,
+ Label* done) {
+ ConvertDoubleToInt64(result, double_input);
- bind(&done);
+ // Test for overflow
+ TestIfInt32(result);
+ beq(done);
}
void MacroAssembler::GetLeastBitsFromSmi(Register dst, Register src,
@@ -2125,6 +1924,20 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst, Register src,
AndP(dst, src, Operand((1 << num_least_bits) - 1));
}
+void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
+ SaveFPRegsMode save_doubles) {
+ const Runtime::Function* f = Runtime::FunctionForId(fid);
+ mov(r2, Operand(f->nargs));
+ mov(r3, Operand(ExternalReference(f, isolate())));
+ CallStubDelayed(new (zone) CEntryStub(nullptr,
+#if V8_TARGET_ARCH_S390X
+ f->result_size,
+#else
+ 1,
+#endif
+ save_doubles));
+}
+
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles) {
// All parameters are on the stack. r2 has the return value after call.
@@ -2209,12 +2022,12 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
}
}
-void MacroAssembler::Assert(Condition cond, BailoutReason reason,
+void TurboAssembler::Assert(Condition cond, BailoutReason reason,
CRegister cr) {
if (emit_debug_code()) Check(cond, reason, cr);
}
-void MacroAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
+void TurboAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
Label L;
b(cond, &L);
Abort(reason);
@@ -2222,7 +2035,7 @@ void MacroAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
bind(&L);
}
-void MacroAssembler::Abort(BailoutReason reason) {
+void TurboAssembler::Abort(BailoutReason reason) {
Label abort_start;
bind(&abort_start);
#ifdef DEBUG
@@ -2238,9 +2051,6 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- // Check if Abort() has already been initialized.
- DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
-
LoadSmiLiteral(r3, Smi::FromInt(static_cast<int>(reason)));
// Disable stub call restrictions to always allow calls to abort.
@@ -2377,6 +2187,18 @@ void MacroAssembler::AssertSmi(Register object) {
}
}
+void MacroAssembler::AssertFixedArray(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ TestIfSmi(object);
+ Check(ne, kOperandIsASmiAndNotAFixedArray, cr0);
+ push(object);
+ CompareObjectType(object, object, object, FIXED_ARRAY_TYPE);
+ pop(object);
+ Check(eq, kOperandIsNotAFixedArray);
+ }
+}
+
void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@@ -2401,8 +2223,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
}
-void MacroAssembler::AssertGeneratorObject(Register object, Register flags) {
- // `flags` should be an untagged integer. See `SuspendFlags` in src/globals.h
+void MacroAssembler::AssertGeneratorObject(Register object) {
if (!emit_debug_code()) return;
TestIfSmi(object);
Check(ne, kOperandIsASmiAndNotAGeneratorObject, cr0);
@@ -2412,17 +2233,14 @@ void MacroAssembler::AssertGeneratorObject(Register object, Register flags) {
push(object);
LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
- Label async, do_check;
- tmll(flags, Operand(static_cast<int>(SuspendFlags::kGeneratorTypeMask)));
- bne(&async);
-
// Check if JSGeneratorObject
- CompareInstanceType(map, object, JS_GENERATOR_OBJECT_TYPE);
- b(&do_check);
+ Label do_check;
+ Register instance_type = object;
+ CompareInstanceType(map, instance_type, JS_GENERATOR_OBJECT_TYPE);
+ beq(&do_check);
- bind(&async);
- // Check if JSAsyncGeneratorObject
- CompareInstanceType(map, object, JS_ASYNC_GENERATOR_OBJECT_TYPE);
+ // Check if JSAsyncGeneratorObject (See MacroAssembler::CompareInstanceType)
+ CmpP(instance_type, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
bind(&do_check);
// Restore generator object to register and perform assertion
@@ -2504,8 +2322,7 @@ void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
void MacroAssembler::AllocateHeapNumber(Register result, Register scratch1,
Register scratch2,
Register heap_number_map,
- Label* gc_required,
- MutableMode mode) {
+ Label* gc_required, MutableMode mode) {
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
@@ -2543,7 +2360,8 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
StoreP(scratch1, FieldMemOperand(result, HeapObject::kMapOffset), r0);
LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
- StoreP(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset), r0);
+ StoreP(scratch1, FieldMemOperand(result, JSObject::kPropertiesOrHashOffset),
+ r0);
StoreP(scratch1, FieldMemOperand(result, JSObject::kElementsOffset), r0);
StoreP(value, FieldMemOperand(result, JSValue::kValueOffset), r0);
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
@@ -2592,7 +2410,7 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
static const int kRegisterPassedArguments = 5;
-int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
+int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments) {
int stack_passed_words = 0;
if (num_double_arguments > DoubleRegister::kNumRegisters) {
@@ -2646,7 +2464,7 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string, Register index,
SmiUntag(index, index);
}
-void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
int num_double_arguments,
Register scratch) {
int frame_alignment = ActivationFrameAlignment();
@@ -2658,7 +2476,7 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
// -- preserving original value of sp.
LoadRR(scratch, sp);
lay(sp, MemOperand(sp, -(stack_passed_arguments + 1) * kPointerSize));
- DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
StoreP(scratch, MemOperand(sp, (stack_passed_arguments)*kPointerSize));
} else {
@@ -2667,16 +2485,16 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
lay(sp, MemOperand(sp, -(stack_space)*kPointerSize));
}
-void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
Register scratch) {
PrepareCallCFunction(num_reg_arguments, 0, scratch);
}
-void MacroAssembler::MovToFloatParameter(DoubleRegister src) { Move(d0, src); }
+void TurboAssembler::MovToFloatParameter(DoubleRegister src) { Move(d0, src); }
-void MacroAssembler::MovToFloatResult(DoubleRegister src) { Move(d0, src); }
+void TurboAssembler::MovToFloatResult(DoubleRegister src) { Move(d0, src); }
-void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
+void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
DoubleRegister src2) {
if (src2.is(d0)) {
DCHECK(!src1.is(d2));
@@ -2688,28 +2506,28 @@ void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
}
}
-void MacroAssembler::CallCFunction(ExternalReference function,
+void TurboAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments) {
mov(ip, Operand(function));
CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
}
-void MacroAssembler::CallCFunction(Register function, int num_reg_arguments,
+void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
int num_double_arguments) {
CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
}
-void MacroAssembler::CallCFunction(ExternalReference function,
+void TurboAssembler::CallCFunction(ExternalReference function,
int num_arguments) {
CallCFunction(function, num_arguments, 0);
}
-void MacroAssembler::CallCFunction(Register function, int num_arguments) {
+void TurboAssembler::CallCFunction(Register function, int num_arguments) {
CallCFunction(function, num_arguments, 0);
}
-void MacroAssembler::CallCFunctionHelper(Register function,
+void TurboAssembler::CallCFunctionHelper(Register function,
int num_reg_arguments,
int num_double_arguments) {
DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
@@ -2737,14 +2555,14 @@ void MacroAssembler::CallCFunctionHelper(Register function,
}
}
-void MacroAssembler::CheckPageFlag(
+void TurboAssembler::CheckPageFlag(
Register object,
Register scratch, // scratch may be same register as object
int mask, Condition cc, Label* condition_met) {
DCHECK(cc == ne || cc == eq);
ClearRightImm(scratch, object, Operand(kPageSizeBits));
- if (base::bits::IsPowerOfTwo32(mask)) {
+ if (base::bits::IsPowerOfTwo(mask)) {
// If it's a power of two, we can use Test-Under-Mask Memory-Imm form
// which allows testing of a single byte in memory.
int32_t byte_offset = 4;
@@ -3104,29 +2922,38 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
return candidate;
}
UNREACHABLE();
- return no_reg;
}
-void MacroAssembler::mov(Register dst, const Operand& src) {
- if (src.rmode_ != kRelocInfo_NONEPTR) {
+void TurboAssembler::mov(Register dst, const Operand& src) {
+#if V8_TARGET_ARCH_S390X
+ int64_t value;
+#else
+ int value;
+#endif
+ if (src.is_heap_object_request()) {
+ RequestHeapObject(src.heap_object_request());
+ value = 0;
+ } else {
+ value = src.immediate();
+ }
+
+ if (src.rmode() != kRelocInfo_NONEPTR) {
// some form of relocation needed
- RecordRelocInfo(src.rmode_, src.imm_);
+ RecordRelocInfo(src.rmode(), value);
}
#if V8_TARGET_ARCH_S390X
- int64_t value = src.immediate();
int32_t hi_32 = static_cast<int64_t>(value) >> 32;
int32_t lo_32 = static_cast<int32_t>(value);
iihf(dst, Operand(hi_32));
iilf(dst, Operand(lo_32));
#else
- int value = src.immediate();
iilf(dst, Operand(value));
#endif
}
-void MacroAssembler::Mul32(Register dst, const MemOperand& src1) {
+void TurboAssembler::Mul32(Register dst, const MemOperand& src1) {
if (is_uint12(src1.offset())) {
ms(dst, src1);
} else if (is_int20(src1.offset())) {
@@ -3136,9 +2963,9 @@ void MacroAssembler::Mul32(Register dst, const MemOperand& src1) {
}
}
-void MacroAssembler::Mul32(Register dst, Register src1) { msr(dst, src1); }
+void TurboAssembler::Mul32(Register dst, Register src1) { msr(dst, src1); }
-void MacroAssembler::Mul32(Register dst, const Operand& src1) {
+void TurboAssembler::Mul32(Register dst, const Operand& src1) {
msfi(dst, src1);
}
@@ -3149,19 +2976,19 @@ void MacroAssembler::Mul32(Register dst, const Operand& src1) {
srlg(dst, dst, Operand(32)); \
}
-void MacroAssembler::MulHigh32(Register dst, Register src1,
+void TurboAssembler::MulHigh32(Register dst, Register src1,
const MemOperand& src2) {
Generate_MulHigh32(msgf);
}
-void MacroAssembler::MulHigh32(Register dst, Register src1, Register src2) {
+void TurboAssembler::MulHigh32(Register dst, Register src1, Register src2) {
if (dst.is(src2)) {
std::swap(src1, src2);
}
Generate_MulHigh32(msgfr);
}
-void MacroAssembler::MulHigh32(Register dst, Register src1,
+void TurboAssembler::MulHigh32(Register dst, Register src1,
const Operand& src2) {
Generate_MulHigh32(msgfi);
}
@@ -3175,16 +3002,16 @@ void MacroAssembler::MulHigh32(Register dst, Register src1,
LoadlW(dst, r0); \
}
-void MacroAssembler::MulHighU32(Register dst, Register src1,
+void TurboAssembler::MulHighU32(Register dst, Register src1,
const MemOperand& src2) {
Generate_MulHighU32(ml);
}
-void MacroAssembler::MulHighU32(Register dst, Register src1, Register src2) {
+void TurboAssembler::MulHighU32(Register dst, Register src1, Register src2) {
Generate_MulHighU32(mlr);
}
-void MacroAssembler::MulHighU32(Register dst, Register src1,
+void TurboAssembler::MulHighU32(Register dst, Register src1,
const Operand& src2) {
USE(dst);
USE(src1);
@@ -3201,7 +3028,7 @@ void MacroAssembler::MulHighU32(Register dst, Register src1,
cgfr(dst, dst); \
}
-void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
+void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
const MemOperand& src2) {
Register result = dst;
if (src2.rx().is(dst) || src2.rb().is(dst)) dst = r0;
@@ -3209,7 +3036,7 @@ void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
if (!result.is(dst)) llgfr(result, dst);
}
-void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
+void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
Register src2) {
if (dst.is(src2)) {
std::swap(src1, src2);
@@ -3217,14 +3044,14 @@ void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
Generate_Mul32WithOverflowIfCCUnequal(msgfr);
}
-void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
+void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
const Operand& src2) {
Generate_Mul32WithOverflowIfCCUnequal(msgfi);
}
#undef Generate_Mul32WithOverflowIfCCUnequal
-void MacroAssembler::Mul64(Register dst, const MemOperand& src1) {
+void TurboAssembler::Mul64(Register dst, const MemOperand& src1) {
if (is_int20(src1.offset())) {
msg(dst, src1);
} else {
@@ -3232,13 +3059,13 @@ void MacroAssembler::Mul64(Register dst, const MemOperand& src1) {
}
}
-void MacroAssembler::Mul64(Register dst, Register src1) { msgr(dst, src1); }
+void TurboAssembler::Mul64(Register dst, Register src1) { msgr(dst, src1); }
-void MacroAssembler::Mul64(Register dst, const Operand& src1) {
+void TurboAssembler::Mul64(Register dst, const Operand& src1) {
msgfi(dst, src1);
}
-void MacroAssembler::Mul(Register dst, Register src1, Register src2) {
+void TurboAssembler::Mul(Register dst, Register src1, Register src2) {
if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
MulPWithCondition(dst, src1, src2);
} else {
@@ -3253,7 +3080,7 @@ void MacroAssembler::Mul(Register dst, Register src1, Register src2) {
}
}
-void MacroAssembler::DivP(Register dividend, Register divider) {
+void TurboAssembler::DivP(Register dividend, Register divider) {
// have to make sure the src and dst are reg pairs
DCHECK(dividend.code() % 2 == 0);
#if V8_TARGET_ARCH_S390X
@@ -3270,12 +3097,12 @@ void MacroAssembler::DivP(Register dividend, Register divider) {
LoadlW(dst, r1); \
}
-void MacroAssembler::Div32(Register dst, Register src1,
+void TurboAssembler::Div32(Register dst, Register src1,
const MemOperand& src2) {
Generate_Div32(dsgf);
}
-void MacroAssembler::Div32(Register dst, Register src1, Register src2) {
+void TurboAssembler::Div32(Register dst, Register src1, Register src2) {
Generate_Div32(dsgfr);
}
@@ -3289,12 +3116,12 @@ void MacroAssembler::Div32(Register dst, Register src1, Register src2) {
LoadlW(dst, r1); \
}
-void MacroAssembler::DivU32(Register dst, Register src1,
+void TurboAssembler::DivU32(Register dst, Register src1,
const MemOperand& src2) {
Generate_DivU32(dl);
}
-void MacroAssembler::DivU32(Register dst, Register src1, Register src2) {
+void TurboAssembler::DivU32(Register dst, Register src1, Register src2) {
Generate_DivU32(dlr);
}
@@ -3307,12 +3134,12 @@ void MacroAssembler::DivU32(Register dst, Register src1, Register src2) {
lgr(dst, r1); \
}
-void MacroAssembler::Div64(Register dst, Register src1,
+void TurboAssembler::Div64(Register dst, Register src1,
const MemOperand& src2) {
Generate_Div64(dsg);
}
-void MacroAssembler::Div64(Register dst, Register src1, Register src2) {
+void TurboAssembler::Div64(Register dst, Register src1, Register src2) {
Generate_Div64(dsgr);
}
@@ -3326,12 +3153,12 @@ void MacroAssembler::Div64(Register dst, Register src1, Register src2) {
lgr(dst, r1); \
}
-void MacroAssembler::DivU64(Register dst, Register src1,
+void TurboAssembler::DivU64(Register dst, Register src1,
const MemOperand& src2) {
Generate_DivU64(dlg);
}
-void MacroAssembler::DivU64(Register dst, Register src1, Register src2) {
+void TurboAssembler::DivU64(Register dst, Register src1, Register src2) {
Generate_DivU64(dlgr);
}
@@ -3344,12 +3171,12 @@ void MacroAssembler::DivU64(Register dst, Register src1, Register src2) {
LoadlW(dst, r0); \
}
-void MacroAssembler::Mod32(Register dst, Register src1,
+void TurboAssembler::Mod32(Register dst, Register src1,
const MemOperand& src2) {
Generate_Mod32(dsgf);
}
-void MacroAssembler::Mod32(Register dst, Register src1, Register src2) {
+void TurboAssembler::Mod32(Register dst, Register src1, Register src2) {
Generate_Mod32(dsgfr);
}
@@ -3363,12 +3190,12 @@ void MacroAssembler::Mod32(Register dst, Register src1, Register src2) {
LoadlW(dst, r0); \
}
-void MacroAssembler::ModU32(Register dst, Register src1,
+void TurboAssembler::ModU32(Register dst, Register src1,
const MemOperand& src2) {
Generate_ModU32(dl);
}
-void MacroAssembler::ModU32(Register dst, Register src1, Register src2) {
+void TurboAssembler::ModU32(Register dst, Register src1, Register src2) {
Generate_ModU32(dlr);
}
@@ -3381,12 +3208,12 @@ void MacroAssembler::ModU32(Register dst, Register src1, Register src2) {
lgr(dst, r0); \
}
-void MacroAssembler::Mod64(Register dst, Register src1,
+void TurboAssembler::Mod64(Register dst, Register src1,
const MemOperand& src2) {
Generate_Mod64(dsg);
}
-void MacroAssembler::Mod64(Register dst, Register src1, Register src2) {
+void TurboAssembler::Mod64(Register dst, Register src1, Register src2) {
Generate_Mod64(dsgr);
}
@@ -3400,18 +3227,18 @@ void MacroAssembler::Mod64(Register dst, Register src1, Register src2) {
lgr(dst, r0); \
}
-void MacroAssembler::ModU64(Register dst, Register src1,
+void TurboAssembler::ModU64(Register dst, Register src1,
const MemOperand& src2) {
Generate_ModU64(dlg);
}
-void MacroAssembler::ModU64(Register dst, Register src1, Register src2) {
+void TurboAssembler::ModU64(Register dst, Register src1, Register src2) {
Generate_ModU64(dlgr);
}
#undef Generate_ModU64
-void MacroAssembler::MulP(Register dst, const Operand& opnd) {
+void TurboAssembler::MulP(Register dst, const Operand& opnd) {
#if V8_TARGET_ARCH_S390X
msgfi(dst, opnd);
#else
@@ -3419,7 +3246,7 @@ void MacroAssembler::MulP(Register dst, const Operand& opnd) {
#endif
}
-void MacroAssembler::MulP(Register dst, Register src) {
+void TurboAssembler::MulP(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
msgr(dst, src);
#else
@@ -3427,7 +3254,7 @@ void MacroAssembler::MulP(Register dst, Register src) {
#endif
}
-void MacroAssembler::MulPWithCondition(Register dst, Register src1,
+void TurboAssembler::MulPWithCondition(Register dst, Register src1,
Register src2) {
CHECK(CpuFeatures::IsSupported(MISC_INSTR_EXT2));
#if V8_TARGET_ARCH_S390X
@@ -3437,7 +3264,7 @@ void MacroAssembler::MulPWithCondition(Register dst, Register src1,
#endif
}
-void MacroAssembler::MulP(Register dst, const MemOperand& opnd) {
+void TurboAssembler::MulP(Register dst, const MemOperand& opnd) {
#if V8_TARGET_ARCH_S390X
if (is_uint16(opnd.offset())) {
ms(dst, opnd);
@@ -3455,10 +3282,10 @@ void MacroAssembler::MulP(Register dst, const MemOperand& opnd) {
#endif
}
-void MacroAssembler::Sqrt(DoubleRegister result, DoubleRegister input) {
+void TurboAssembler::Sqrt(DoubleRegister result, DoubleRegister input) {
sqdbr(result, input);
}
-void MacroAssembler::Sqrt(DoubleRegister result, const MemOperand& input) {
+void TurboAssembler::Sqrt(DoubleRegister result, const MemOperand& input) {
if (is_uint12(input.offset())) {
sqdb(result, input);
} else {
@@ -3471,7 +3298,7 @@ void MacroAssembler::Sqrt(DoubleRegister result, const MemOperand& input) {
//----------------------------------------------------------------------------
// Add 32-bit (Register dst = Register dst + Immediate opnd)
-void MacroAssembler::Add32(Register dst, const Operand& opnd) {
+void TurboAssembler::Add32(Register dst, const Operand& opnd) {
if (is_int16(opnd.immediate()))
ahi(dst, opnd);
else
@@ -3479,13 +3306,13 @@ void MacroAssembler::Add32(Register dst, const Operand& opnd) {
}
// Add 32-bit (Register dst = Register dst + Immediate opnd)
-void MacroAssembler::Add32_RI(Register dst, const Operand& opnd) {
+void TurboAssembler::Add32_RI(Register dst, const Operand& opnd) {
// Just a wrapper for above
Add32(dst, opnd);
}
// Add Pointer Size (Register dst = Register dst + Immediate opnd)
-void MacroAssembler::AddP(Register dst, const Operand& opnd) {
+void TurboAssembler::AddP(Register dst, const Operand& opnd) {
#if V8_TARGET_ARCH_S390X
if (is_int16(opnd.immediate()))
aghi(dst, opnd);
@@ -3497,7 +3324,7 @@ void MacroAssembler::AddP(Register dst, const Operand& opnd) {
}
// Add 32-bit (Register dst = Register src + Immediate opnd)
-void MacroAssembler::Add32(Register dst, Register src, const Operand& opnd) {
+void TurboAssembler::Add32(Register dst, Register src, const Operand& opnd) {
if (!dst.is(src)) {
if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) {
ahik(dst, src, opnd);
@@ -3509,14 +3336,14 @@ void MacroAssembler::Add32(Register dst, Register src, const Operand& opnd) {
}
// Add 32-bit (Register dst = Register src + Immediate opnd)
-void MacroAssembler::Add32_RRI(Register dst, Register src,
+void TurboAssembler::Add32_RRI(Register dst, Register src,
const Operand& opnd) {
// Just a wrapper for above
Add32(dst, src, opnd);
}
// Add Pointer Size (Register dst = Register src + Immediate opnd)
-void MacroAssembler::AddP(Register dst, Register src, const Operand& opnd) {
+void TurboAssembler::AddP(Register dst, Register src, const Operand& opnd) {
if (!dst.is(src)) {
if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) {
AddPImm_RRI(dst, src, opnd);
@@ -3528,16 +3355,16 @@ void MacroAssembler::AddP(Register dst, Register src, const Operand& opnd) {
}
// Add 32-bit (Register dst = Register dst + Register src)
-void MacroAssembler::Add32(Register dst, Register src) { ar(dst, src); }
+void TurboAssembler::Add32(Register dst, Register src) { ar(dst, src); }
// Add Pointer Size (Register dst = Register dst + Register src)
-void MacroAssembler::AddP(Register dst, Register src) { AddRR(dst, src); }
+void TurboAssembler::AddP(Register dst, Register src) { AddRR(dst, src); }
// Add Pointer Size with src extension
// (Register dst(ptr) = Register dst (ptr) + Register src (32 | 32->64))
// src is treated as a 32-bit signed integer, which is sign extended to
// 64-bit if necessary.
-void MacroAssembler::AddP_ExtendSrc(Register dst, Register src) {
+void TurboAssembler::AddP_ExtendSrc(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
agfr(dst, src);
#else
@@ -3546,7 +3373,7 @@ void MacroAssembler::AddP_ExtendSrc(Register dst, Register src) {
}
// Add 32-bit (Register dst = Register src1 + Register src2)
-void MacroAssembler::Add32(Register dst, Register src1, Register src2) {
+void TurboAssembler::Add32(Register dst, Register src1, Register src2) {
if (!dst.is(src1) && !dst.is(src2)) {
// We prefer to generate AR/AGR, over the non clobbering ARK/AGRK
// as AR is a smaller instruction
@@ -3563,7 +3390,7 @@ void MacroAssembler::Add32(Register dst, Register src1, Register src2) {
}
// Add Pointer Size (Register dst = Register src1 + Register src2)
-void MacroAssembler::AddP(Register dst, Register src1, Register src2) {
+void TurboAssembler::AddP(Register dst, Register src1, Register src2) {
if (!dst.is(src1) && !dst.is(src2)) {
// We prefer to generate AR/AGR, over the non clobbering ARK/AGRK
// as AR is a smaller instruction
@@ -3584,7 +3411,7 @@ void MacroAssembler::AddP(Register dst, Register src1, Register src2) {
// Register src2 (32 | 32->64))
// src is treated as a 32-bit signed integer, which is sign extended to
// 64-bit if necessary.
-void MacroAssembler::AddP_ExtendSrc(Register dst, Register src1,
+void TurboAssembler::AddP_ExtendSrc(Register dst, Register src1,
Register src2) {
#if V8_TARGET_ARCH_S390X
if (dst.is(src2)) {
@@ -3601,7 +3428,7 @@ void MacroAssembler::AddP_ExtendSrc(Register dst, Register src1,
}
// Add 32-bit (Register-Memory)
-void MacroAssembler::Add32(Register dst, const MemOperand& opnd) {
+void TurboAssembler::Add32(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
a(dst, opnd);
@@ -3610,7 +3437,7 @@ void MacroAssembler::Add32(Register dst, const MemOperand& opnd) {
}
// Add Pointer Size (Register-Memory)
-void MacroAssembler::AddP(Register dst, const MemOperand& opnd) {
+void TurboAssembler::AddP(Register dst, const MemOperand& opnd) {
#if V8_TARGET_ARCH_S390X
DCHECK(is_int20(opnd.offset()));
ag(dst, opnd);
@@ -3623,7 +3450,7 @@ void MacroAssembler::AddP(Register dst, const MemOperand& opnd) {
// (Register dst (ptr) = Register dst (ptr) + Mem opnd (32 | 32->64))
// src is treated as a 32-bit signed integer, which is sign extended to
// 64-bit if necessary.
-void MacroAssembler::AddP_ExtendSrc(Register dst, const MemOperand& opnd) {
+void TurboAssembler::AddP_ExtendSrc(Register dst, const MemOperand& opnd) {
#if V8_TARGET_ARCH_S390X
DCHECK(is_int20(opnd.offset()));
agf(dst, opnd);
@@ -3633,7 +3460,7 @@ void MacroAssembler::AddP_ExtendSrc(Register dst, const MemOperand& opnd) {
}
// Add 32-bit (Memory - Immediate)
-void MacroAssembler::Add32(const MemOperand& opnd, const Operand& imm) {
+void TurboAssembler::Add32(const MemOperand& opnd, const Operand& imm) {
DCHECK(is_int8(imm.immediate()));
DCHECK(is_int20(opnd.offset()));
DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
@@ -3641,7 +3468,7 @@ void MacroAssembler::Add32(const MemOperand& opnd, const Operand& imm) {
}
// Add Pointer-sized (Memory - Immediate)
-void MacroAssembler::AddP(const MemOperand& opnd, const Operand& imm) {
+void TurboAssembler::AddP(const MemOperand& opnd, const Operand& imm) {
DCHECK(is_int8(imm.immediate()));
DCHECK(is_int20(opnd.offset()));
DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
@@ -3657,7 +3484,7 @@ void MacroAssembler::AddP(const MemOperand& opnd, const Operand& imm) {
//----------------------------------------------------------------------------
// Add Logical With Carry 32-bit (Register dst = Register src1 + Register src2)
-void MacroAssembler::AddLogicalWithCarry32(Register dst, Register src1,
+void TurboAssembler::AddLogicalWithCarry32(Register dst, Register src1,
Register src2) {
if (!dst.is(src2) && !dst.is(src1)) {
lr(dst, src1);
@@ -3674,7 +3501,7 @@ void MacroAssembler::AddLogicalWithCarry32(Register dst, Register src1,
}
// Add Logical 32-bit (Register dst = Register src1 + Register src2)
-void MacroAssembler::AddLogical32(Register dst, Register src1, Register src2) {
+void TurboAssembler::AddLogical32(Register dst, Register src1, Register src2) {
if (!dst.is(src2) && !dst.is(src1)) {
lr(dst, src1);
alr(dst, src2);
@@ -3690,12 +3517,12 @@ void MacroAssembler::AddLogical32(Register dst, Register src1, Register src2) {
}
// Add Logical 32-bit (Register dst = Register dst + Immediate opnd)
-void MacroAssembler::AddLogical(Register dst, const Operand& imm) {
+void TurboAssembler::AddLogical(Register dst, const Operand& imm) {
alfi(dst, imm);
}
// Add Logical Pointer Size (Register dst = Register dst + Immediate opnd)
-void MacroAssembler::AddLogicalP(Register dst, const Operand& imm) {
+void TurboAssembler::AddLogicalP(Register dst, const Operand& imm) {
#ifdef V8_TARGET_ARCH_S390X
algfi(dst, imm);
#else
@@ -3704,7 +3531,7 @@ void MacroAssembler::AddLogicalP(Register dst, const Operand& imm) {
}
// Add Logical 32-bit (Register-Memory)
-void MacroAssembler::AddLogical(Register dst, const MemOperand& opnd) {
+void TurboAssembler::AddLogical(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
al_z(dst, opnd);
@@ -3713,7 +3540,7 @@ void MacroAssembler::AddLogical(Register dst, const MemOperand& opnd) {
}
// Add Logical Pointer Size (Register-Memory)
-void MacroAssembler::AddLogicalP(Register dst, const MemOperand& opnd) {
+void TurboAssembler::AddLogicalP(Register dst, const MemOperand& opnd) {
#if V8_TARGET_ARCH_S390X
DCHECK(is_int20(opnd.offset()));
alg(dst, opnd);
@@ -3728,7 +3555,7 @@ void MacroAssembler::AddLogicalP(Register dst, const MemOperand& opnd) {
// Subtract Logical With Carry 32-bit (Register dst = Register src1 - Register
// src2)
-void MacroAssembler::SubLogicalWithBorrow32(Register dst, Register src1,
+void TurboAssembler::SubLogicalWithBorrow32(Register dst, Register src1,
Register src2) {
if (!dst.is(src2) && !dst.is(src1)) {
lr(dst, src1);
@@ -3746,7 +3573,7 @@ void MacroAssembler::SubLogicalWithBorrow32(Register dst, Register src1,
}
// Subtract Logical 32-bit (Register dst = Register src1 - Register src2)
-void MacroAssembler::SubLogical32(Register dst, Register src1, Register src2) {
+void TurboAssembler::SubLogical32(Register dst, Register src1, Register src2) {
if (!dst.is(src2) && !dst.is(src1)) {
lr(dst, src1);
slr(dst, src2);
@@ -3763,36 +3590,36 @@ void MacroAssembler::SubLogical32(Register dst, Register src1, Register src2) {
}
// Subtract 32-bit (Register dst = Register dst - Immediate opnd)
-void MacroAssembler::Sub32(Register dst, const Operand& imm) {
- Add32(dst, Operand(-(imm.imm_)));
+void TurboAssembler::Sub32(Register dst, const Operand& imm) {
+ Add32(dst, Operand(-(imm.immediate())));
}
// Subtract Pointer Size (Register dst = Register dst - Immediate opnd)
-void MacroAssembler::SubP(Register dst, const Operand& imm) {
- AddP(dst, Operand(-(imm.imm_)));
+void TurboAssembler::SubP(Register dst, const Operand& imm) {
+ AddP(dst, Operand(-(imm.immediate())));
}
// Subtract 32-bit (Register dst = Register src - Immediate opnd)
-void MacroAssembler::Sub32(Register dst, Register src, const Operand& imm) {
- Add32(dst, src, Operand(-(imm.imm_)));
+void TurboAssembler::Sub32(Register dst, Register src, const Operand& imm) {
+ Add32(dst, src, Operand(-(imm.immediate())));
}
// Subtract Pointer Sized (Register dst = Register src - Immediate opnd)
-void MacroAssembler::SubP(Register dst, Register src, const Operand& imm) {
- AddP(dst, src, Operand(-(imm.imm_)));
+void TurboAssembler::SubP(Register dst, Register src, const Operand& imm) {
+ AddP(dst, src, Operand(-(imm.immediate())));
}
// Subtract 32-bit (Register dst = Register dst - Register src)
-void MacroAssembler::Sub32(Register dst, Register src) { sr(dst, src); }
+void TurboAssembler::Sub32(Register dst, Register src) { sr(dst, src); }
// Subtract Pointer Size (Register dst = Register dst - Register src)
-void MacroAssembler::SubP(Register dst, Register src) { SubRR(dst, src); }
+void TurboAssembler::SubP(Register dst, Register src) { SubRR(dst, src); }
// Subtract Pointer Size with src extension
// (Register dst(ptr) = Register dst (ptr) - Register src (32 | 32->64))
// src is treated as a 32-bit signed integer, which is sign extended to
// 64-bit if necessary.
-void MacroAssembler::SubP_ExtendSrc(Register dst, Register src) {
+void TurboAssembler::SubP_ExtendSrc(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
sgfr(dst, src);
#else
@@ -3801,7 +3628,7 @@ void MacroAssembler::SubP_ExtendSrc(Register dst, Register src) {
}
// Subtract 32-bit (Register = Register - Register)
-void MacroAssembler::Sub32(Register dst, Register src1, Register src2) {
+void TurboAssembler::Sub32(Register dst, Register src1, Register src2) {
// Use non-clobbering version if possible
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
srk(dst, src1, src2);
@@ -3821,7 +3648,7 @@ void MacroAssembler::Sub32(Register dst, Register src1, Register src2) {
}
// Subtract Pointer Sized (Register = Register - Register)
-void MacroAssembler::SubP(Register dst, Register src1, Register src2) {
+void TurboAssembler::SubP(Register dst, Register src1, Register src2) {
// Use non-clobbering version if possible
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
SubP_RRR(dst, src1, src2);
@@ -3844,7 +3671,7 @@ void MacroAssembler::SubP(Register dst, Register src1, Register src2) {
// (Register dst(ptr) = Register dst (ptr) - Register src (32 | 32->64))
// src is treated as a 32-bit signed integer, which is sign extended to
// 64-bit if necessary.
-void MacroAssembler::SubP_ExtendSrc(Register dst, Register src1,
+void TurboAssembler::SubP_ExtendSrc(Register dst, Register src1,
Register src2) {
#if V8_TARGET_ARCH_S390X
if (!dst.is(src1) && !dst.is(src2)) LoadRR(dst, src1);
@@ -3863,7 +3690,7 @@ void MacroAssembler::SubP_ExtendSrc(Register dst, Register src1,
}
// Subtract 32-bit (Register-Memory)
-void MacroAssembler::Sub32(Register dst, const MemOperand& opnd) {
+void TurboAssembler::Sub32(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
s(dst, opnd);
@@ -3872,7 +3699,7 @@ void MacroAssembler::Sub32(Register dst, const MemOperand& opnd) {
}
// Subtract Pointer Sized (Register - Memory)
-void MacroAssembler::SubP(Register dst, const MemOperand& opnd) {
+void TurboAssembler::SubP(Register dst, const MemOperand& opnd) {
#if V8_TARGET_ARCH_S390X
sg(dst, opnd);
#else
@@ -3880,17 +3707,17 @@ void MacroAssembler::SubP(Register dst, const MemOperand& opnd) {
#endif
}
-void MacroAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
+void TurboAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
sllg(r0, src, Operand(32));
ldgr(dst, r0);
}
-void MacroAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
+void TurboAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
lgdr(dst, src);
srlg(dst, dst, Operand(32));
}
-void MacroAssembler::SubP_ExtendSrc(Register dst, const MemOperand& opnd) {
+void TurboAssembler::SubP_ExtendSrc(Register dst, const MemOperand& opnd) {
#if V8_TARGET_ARCH_S390X
DCHECK(is_int20(opnd.offset()));
sgf(dst, opnd);
@@ -3904,7 +3731,7 @@ void MacroAssembler::SubP_ExtendSrc(Register dst, const MemOperand& opnd) {
//----------------------------------------------------------------------------
// Subtract Logical 32-bit (Register - Memory)
-void MacroAssembler::SubLogical(Register dst, const MemOperand& opnd) {
+void TurboAssembler::SubLogical(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
sl(dst, opnd);
@@ -3913,7 +3740,7 @@ void MacroAssembler::SubLogical(Register dst, const MemOperand& opnd) {
}
// Subtract Logical Pointer Sized (Register - Memory)
-void MacroAssembler::SubLogicalP(Register dst, const MemOperand& opnd) {
+void TurboAssembler::SubLogicalP(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
#if V8_TARGET_ARCH_S390X
slgf(dst, opnd);
@@ -3926,7 +3753,7 @@ void MacroAssembler::SubLogicalP(Register dst, const MemOperand& opnd) {
// (Register dst (ptr) = Register dst (ptr) - Mem opnd (32 | 32->64))
// src is treated as a 32-bit signed integer, which is sign extended to
// 64-bit if necessary.
-void MacroAssembler::SubLogicalP_ExtendSrc(Register dst,
+void TurboAssembler::SubLogicalP_ExtendSrc(Register dst,
const MemOperand& opnd) {
#if V8_TARGET_ARCH_S390X
DCHECK(is_int20(opnd.offset()));
@@ -3941,13 +3768,13 @@ void MacroAssembler::SubLogicalP_ExtendSrc(Register dst,
//----------------------------------------------------------------------------
// AND 32-bit - dst = dst & src
-void MacroAssembler::And(Register dst, Register src) { nr(dst, src); }
+void TurboAssembler::And(Register dst, Register src) { nr(dst, src); }
// AND Pointer Size - dst = dst & src
-void MacroAssembler::AndP(Register dst, Register src) { AndRR(dst, src); }
+void TurboAssembler::AndP(Register dst, Register src) { AndRR(dst, src); }
// Non-clobbering AND 32-bit - dst = src1 & src1
-void MacroAssembler::And(Register dst, Register src1, Register src2) {
+void TurboAssembler::And(Register dst, Register src1, Register src2) {
if (!dst.is(src1) && !dst.is(src2)) {
// We prefer to generate XR/XGR, over the non clobbering XRK/XRK
// as XR is a smaller instruction
@@ -3964,7 +3791,7 @@ void MacroAssembler::And(Register dst, Register src1, Register src2) {
}
// Non-clobbering AND pointer size - dst = src1 & src1
-void MacroAssembler::AndP(Register dst, Register src1, Register src2) {
+void TurboAssembler::AndP(Register dst, Register src1, Register src2) {
if (!dst.is(src1) && !dst.is(src2)) {
// We prefer to generate XR/XGR, over the non clobbering XRK/XRK
// as XR is a smaller instruction
@@ -3981,7 +3808,7 @@ void MacroAssembler::AndP(Register dst, Register src1, Register src2) {
}
// AND 32-bit (Reg - Mem)
-void MacroAssembler::And(Register dst, const MemOperand& opnd) {
+void TurboAssembler::And(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
n(dst, opnd);
@@ -3990,7 +3817,7 @@ void MacroAssembler::And(Register dst, const MemOperand& opnd) {
}
// AND Pointer Size (Reg - Mem)
-void MacroAssembler::AndP(Register dst, const MemOperand& opnd) {
+void TurboAssembler::AndP(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
#if V8_TARGET_ARCH_S390X
ng(dst, opnd);
@@ -4000,12 +3827,12 @@ void MacroAssembler::AndP(Register dst, const MemOperand& opnd) {
}
// AND 32-bit - dst = dst & imm
-void MacroAssembler::And(Register dst, const Operand& opnd) { nilf(dst, opnd); }
+void TurboAssembler::And(Register dst, const Operand& opnd) { nilf(dst, opnd); }
// AND Pointer Size - dst = dst & imm
-void MacroAssembler::AndP(Register dst, const Operand& opnd) {
+void TurboAssembler::AndP(Register dst, const Operand& opnd) {
#if V8_TARGET_ARCH_S390X
- intptr_t value = opnd.imm_;
+ intptr_t value = opnd.immediate();
if (value >> 32 != -1) {
// this may not work b/c condition code won't be set correctly
nihf(dst, Operand(value >> 32));
@@ -4017,15 +3844,15 @@ void MacroAssembler::AndP(Register dst, const Operand& opnd) {
}
// AND 32-bit - dst = src & imm
-void MacroAssembler::And(Register dst, Register src, const Operand& opnd) {
+void TurboAssembler::And(Register dst, Register src, const Operand& opnd) {
if (!dst.is(src)) lr(dst, src);
nilf(dst, opnd);
}
// AND Pointer Size - dst = src & imm
-void MacroAssembler::AndP(Register dst, Register src, const Operand& opnd) {
+void TurboAssembler::AndP(Register dst, Register src, const Operand& opnd) {
// Try to exploit RISBG first
- intptr_t value = opnd.imm_;
+ intptr_t value = opnd.immediate();
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
intptr_t shifted_value = value;
int trailing_zeros = 0;
@@ -4040,7 +3867,7 @@ void MacroAssembler::AndP(Register dst, Register src, const Operand& opnd) {
// than power of 2, we have consecutive bits of 1.
// Special case: If shift_value is zero, we cannot use RISBG, as it requires
// selection of at least 1 bit.
- if ((0 != shifted_value) && base::bits::IsPowerOfTwo64(shifted_value + 1)) {
+ if ((0 != shifted_value) && base::bits::IsPowerOfTwo(shifted_value + 1)) {
int startBit =
base::bits::CountLeadingZeros64(shifted_value) - trailing_zeros;
int endBit = 63 - trailing_zeros;
@@ -4063,13 +3890,13 @@ void MacroAssembler::AndP(Register dst, Register src, const Operand& opnd) {
}
// OR 32-bit - dst = dst & src
-void MacroAssembler::Or(Register dst, Register src) { or_z(dst, src); }
+void TurboAssembler::Or(Register dst, Register src) { or_z(dst, src); }
// OR Pointer Size - dst = dst & src
-void MacroAssembler::OrP(Register dst, Register src) { OrRR(dst, src); }
+void TurboAssembler::OrP(Register dst, Register src) { OrRR(dst, src); }
// Non-clobbering OR 32-bit - dst = src1 & src1
-void MacroAssembler::Or(Register dst, Register src1, Register src2) {
+void TurboAssembler::Or(Register dst, Register src1, Register src2) {
if (!dst.is(src1) && !dst.is(src2)) {
// We prefer to generate XR/XGR, over the non clobbering XRK/XRK
// as XR is a smaller instruction
@@ -4086,7 +3913,7 @@ void MacroAssembler::Or(Register dst, Register src1, Register src2) {
}
// Non-clobbering OR pointer size - dst = src1 & src1
-void MacroAssembler::OrP(Register dst, Register src1, Register src2) {
+void TurboAssembler::OrP(Register dst, Register src1, Register src2) {
if (!dst.is(src1) && !dst.is(src2)) {
// We prefer to generate XR/XGR, over the non clobbering XRK/XRK
// as XR is a smaller instruction
@@ -4103,7 +3930,7 @@ void MacroAssembler::OrP(Register dst, Register src1, Register src2) {
}
// OR 32-bit (Reg - Mem)
-void MacroAssembler::Or(Register dst, const MemOperand& opnd) {
+void TurboAssembler::Or(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
o(dst, opnd);
@@ -4112,7 +3939,7 @@ void MacroAssembler::Or(Register dst, const MemOperand& opnd) {
}
// OR Pointer Size (Reg - Mem)
-void MacroAssembler::OrP(Register dst, const MemOperand& opnd) {
+void TurboAssembler::OrP(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
#if V8_TARGET_ARCH_S390X
og(dst, opnd);
@@ -4122,12 +3949,12 @@ void MacroAssembler::OrP(Register dst, const MemOperand& opnd) {
}
// OR 32-bit - dst = dst & imm
-void MacroAssembler::Or(Register dst, const Operand& opnd) { oilf(dst, opnd); }
+void TurboAssembler::Or(Register dst, const Operand& opnd) { oilf(dst, opnd); }
// OR Pointer Size - dst = dst & imm
-void MacroAssembler::OrP(Register dst, const Operand& opnd) {
+void TurboAssembler::OrP(Register dst, const Operand& opnd) {
#if V8_TARGET_ARCH_S390X
- intptr_t value = opnd.imm_;
+ intptr_t value = opnd.immediate();
if (value >> 32 != 0) {
// this may not work b/c condition code won't be set correctly
oihf(dst, Operand(value >> 32));
@@ -4139,25 +3966,25 @@ void MacroAssembler::OrP(Register dst, const Operand& opnd) {
}
// OR 32-bit - dst = src & imm
-void MacroAssembler::Or(Register dst, Register src, const Operand& opnd) {
+void TurboAssembler::Or(Register dst, Register src, const Operand& opnd) {
if (!dst.is(src)) lr(dst, src);
oilf(dst, opnd);
}
// OR Pointer Size - dst = src & imm
-void MacroAssembler::OrP(Register dst, Register src, const Operand& opnd) {
+void TurboAssembler::OrP(Register dst, Register src, const Operand& opnd) {
if (!dst.is(src)) LoadRR(dst, src);
OrP(dst, opnd);
}
// XOR 32-bit - dst = dst & src
-void MacroAssembler::Xor(Register dst, Register src) { xr(dst, src); }
+void TurboAssembler::Xor(Register dst, Register src) { xr(dst, src); }
// XOR Pointer Size - dst = dst & src
-void MacroAssembler::XorP(Register dst, Register src) { XorRR(dst, src); }
+void TurboAssembler::XorP(Register dst, Register src) { XorRR(dst, src); }
// Non-clobbering XOR 32-bit - dst = src1 & src1
-void MacroAssembler::Xor(Register dst, Register src1, Register src2) {
+void TurboAssembler::Xor(Register dst, Register src1, Register src2) {
if (!dst.is(src1) && !dst.is(src2)) {
// We prefer to generate XR/XGR, over the non clobbering XRK/XRK
// as XR is a smaller instruction
@@ -4174,7 +4001,7 @@ void MacroAssembler::Xor(Register dst, Register src1, Register src2) {
}
// Non-clobbering XOR pointer size - dst = src1 & src1
-void MacroAssembler::XorP(Register dst, Register src1, Register src2) {
+void TurboAssembler::XorP(Register dst, Register src1, Register src2) {
if (!dst.is(src1) && !dst.is(src2)) {
// We prefer to generate XR/XGR, over the non clobbering XRK/XRK
// as XR is a smaller instruction
@@ -4191,7 +4018,7 @@ void MacroAssembler::XorP(Register dst, Register src1, Register src2) {
}
// XOR 32-bit (Reg - Mem)
-void MacroAssembler::Xor(Register dst, const MemOperand& opnd) {
+void TurboAssembler::Xor(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
x(dst, opnd);
@@ -4200,7 +4027,7 @@ void MacroAssembler::Xor(Register dst, const MemOperand& opnd) {
}
// XOR Pointer Size (Reg - Mem)
-void MacroAssembler::XorP(Register dst, const MemOperand& opnd) {
+void TurboAssembler::XorP(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
#if V8_TARGET_ARCH_S390X
xg(dst, opnd);
@@ -4210,12 +4037,12 @@ void MacroAssembler::XorP(Register dst, const MemOperand& opnd) {
}
// XOR 32-bit - dst = dst & imm
-void MacroAssembler::Xor(Register dst, const Operand& opnd) { xilf(dst, opnd); }
+void TurboAssembler::Xor(Register dst, const Operand& opnd) { xilf(dst, opnd); }
// XOR Pointer Size - dst = dst & imm
-void MacroAssembler::XorP(Register dst, const Operand& opnd) {
+void TurboAssembler::XorP(Register dst, const Operand& opnd) {
#if V8_TARGET_ARCH_S390X
- intptr_t value = opnd.imm_;
+ intptr_t value = opnd.immediate();
xihf(dst, Operand(value >> 32));
xilf(dst, Operand(value & 0xFFFFFFFF));
#else
@@ -4224,29 +4051,29 @@ void MacroAssembler::XorP(Register dst, const Operand& opnd) {
}
// XOR 32-bit - dst = src & imm
-void MacroAssembler::Xor(Register dst, Register src, const Operand& opnd) {
+void TurboAssembler::Xor(Register dst, Register src, const Operand& opnd) {
if (!dst.is(src)) lr(dst, src);
xilf(dst, opnd);
}
// XOR Pointer Size - dst = src & imm
-void MacroAssembler::XorP(Register dst, Register src, const Operand& opnd) {
+void TurboAssembler::XorP(Register dst, Register src, const Operand& opnd) {
if (!dst.is(src)) LoadRR(dst, src);
XorP(dst, opnd);
}
-void MacroAssembler::Not32(Register dst, Register src) {
+void TurboAssembler::Not32(Register dst, Register src) {
if (!src.is(no_reg) && !src.is(dst)) lr(dst, src);
xilf(dst, Operand(0xFFFFFFFF));
}
-void MacroAssembler::Not64(Register dst, Register src) {
+void TurboAssembler::Not64(Register dst, Register src) {
if (!src.is(no_reg) && !src.is(dst)) lgr(dst, src);
xihf(dst, Operand(0xFFFFFFFF));
xilf(dst, Operand(0xFFFFFFFF));
}
-void MacroAssembler::NotP(Register dst, Register src) {
+void TurboAssembler::NotP(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
Not64(dst, src);
#else
@@ -4255,7 +4082,7 @@ void MacroAssembler::NotP(Register dst, Register src) {
}
// works the same as mov
-void MacroAssembler::Load(Register dst, const Operand& opnd) {
+void TurboAssembler::Load(Register dst, const Operand& opnd) {
intptr_t value = opnd.immediate();
if (is_int16(value)) {
#if V8_TARGET_ARCH_S390X
@@ -4284,7 +4111,7 @@ void MacroAssembler::Load(Register dst, const Operand& opnd) {
}
}
-void MacroAssembler::Load(Register dst, const MemOperand& opnd) {
+void TurboAssembler::Load(Register dst, const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
#if V8_TARGET_ARCH_S390X
lgf(dst, opnd); // 64<-32
@@ -4297,7 +4124,7 @@ void MacroAssembler::Load(Register dst, const MemOperand& opnd) {
#endif
}
-void MacroAssembler::LoadPositiveP(Register result, Register input) {
+void TurboAssembler::LoadPositiveP(Register result, Register input) {
#if V8_TARGET_ARCH_S390X
lpgr(result, input);
#else
@@ -4305,7 +4132,7 @@ void MacroAssembler::LoadPositiveP(Register result, Register input) {
#endif
}
-void MacroAssembler::LoadPositive32(Register result, Register input) {
+void TurboAssembler::LoadPositive32(Register result, Register input) {
lpr(result, input);
lgfr(result, result);
}
@@ -4315,10 +4142,10 @@ void MacroAssembler::LoadPositive32(Register result, Register input) {
//-----------------------------------------------------------------------------
// Compare 32-bit Register vs Register
-void MacroAssembler::Cmp32(Register src1, Register src2) { cr_z(src1, src2); }
+void TurboAssembler::Cmp32(Register src1, Register src2) { cr_z(src1, src2); }
// Compare Pointer Sized Register vs Register
-void MacroAssembler::CmpP(Register src1, Register src2) {
+void TurboAssembler::CmpP(Register src1, Register src2) {
#if V8_TARGET_ARCH_S390X
cgr(src1, src2);
#else
@@ -4328,8 +4155,8 @@ void MacroAssembler::CmpP(Register src1, Register src2) {
// Compare 32-bit Register vs Immediate
// This helper will set up proper relocation entries if required.
-void MacroAssembler::Cmp32(Register dst, const Operand& opnd) {
- if (opnd.rmode_ == kRelocInfo_NONEPTR) {
+void TurboAssembler::Cmp32(Register dst, const Operand& opnd) {
+ if (opnd.rmode() == kRelocInfo_NONEPTR) {
intptr_t value = opnd.immediate();
if (is_int16(value))
chi(dst, opnd);
@@ -4337,16 +4164,16 @@ void MacroAssembler::Cmp32(Register dst, const Operand& opnd) {
cfi(dst, opnd);
} else {
// Need to generate relocation record here
- RecordRelocInfo(opnd.rmode_, opnd.imm_);
+ RecordRelocInfo(opnd.rmode(), opnd.immediate());
cfi(dst, opnd);
}
}
// Compare Pointer Sized Register vs Immediate
// This helper will set up proper relocation entries if required.
-void MacroAssembler::CmpP(Register dst, const Operand& opnd) {
+void TurboAssembler::CmpP(Register dst, const Operand& opnd) {
#if V8_TARGET_ARCH_S390X
- if (opnd.rmode_ == kRelocInfo_NONEPTR) {
+ if (opnd.rmode() == kRelocInfo_NONEPTR) {
cgfi(dst, opnd);
} else {
mov(r0, opnd); // Need to generate 64-bit relocation
@@ -4358,7 +4185,7 @@ void MacroAssembler::CmpP(Register dst, const Operand& opnd) {
}
// Compare 32-bit Register vs Memory
-void MacroAssembler::Cmp32(Register dst, const MemOperand& opnd) {
+void TurboAssembler::Cmp32(Register dst, const MemOperand& opnd) {
// make sure offset is within 20 bit range
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
@@ -4368,7 +4195,7 @@ void MacroAssembler::Cmp32(Register dst, const MemOperand& opnd) {
}
// Compare Pointer Size Register vs Memory
-void MacroAssembler::CmpP(Register dst, const MemOperand& opnd) {
+void TurboAssembler::CmpP(Register dst, const MemOperand& opnd) {
// make sure offset is within 20 bit range
DCHECK(is_int20(opnd.offset()));
#if V8_TARGET_ARCH_S390X
@@ -4383,10 +4210,10 @@ void MacroAssembler::CmpP(Register dst, const MemOperand& opnd) {
//-----------------------------------------------------------------------------
// Compare Logical 32-bit Register vs Register
-void MacroAssembler::CmpLogical32(Register dst, Register src) { clr(dst, src); }
+void TurboAssembler::CmpLogical32(Register dst, Register src) { clr(dst, src); }
// Compare Logical Pointer Sized Register vs Register
-void MacroAssembler::CmpLogicalP(Register dst, Register src) {
+void TurboAssembler::CmpLogicalP(Register dst, Register src) {
#ifdef V8_TARGET_ARCH_S390X
clgr(dst, src);
#else
@@ -4395,12 +4222,12 @@ void MacroAssembler::CmpLogicalP(Register dst, Register src) {
}
// Compare Logical 32-bit Register vs Immediate
-void MacroAssembler::CmpLogical32(Register dst, const Operand& opnd) {
+void TurboAssembler::CmpLogical32(Register dst, const Operand& opnd) {
clfi(dst, opnd);
}
// Compare Logical Pointer Sized Register vs Immediate
-void MacroAssembler::CmpLogicalP(Register dst, const Operand& opnd) {
+void TurboAssembler::CmpLogicalP(Register dst, const Operand& opnd) {
#if V8_TARGET_ARCH_S390X
DCHECK(static_cast<uint32_t>(opnd.immediate() >> 32) == 0);
clgfi(dst, opnd);
@@ -4410,7 +4237,7 @@ void MacroAssembler::CmpLogicalP(Register dst, const Operand& opnd) {
}
// Compare Logical 32-bit Register vs Memory
-void MacroAssembler::CmpLogical32(Register dst, const MemOperand& opnd) {
+void TurboAssembler::CmpLogical32(Register dst, const MemOperand& opnd) {
// make sure offset is within 20 bit range
DCHECK(is_int20(opnd.offset()));
if (is_uint12(opnd.offset()))
@@ -4420,7 +4247,7 @@ void MacroAssembler::CmpLogical32(Register dst, const MemOperand& opnd) {
}
// Compare Logical Pointer Sized Register vs Memory
-void MacroAssembler::CmpLogicalP(Register dst, const MemOperand& opnd) {
+void TurboAssembler::CmpLogicalP(Register dst, const MemOperand& opnd) {
// make sure offset is within 20 bit range
DCHECK(is_int20(opnd.offset()));
#if V8_TARGET_ARCH_S390X
@@ -4431,7 +4258,7 @@ void MacroAssembler::CmpLogicalP(Register dst, const MemOperand& opnd) {
}
// Compare Logical Byte (Mem - Imm)
-void MacroAssembler::CmpLogicalByte(const MemOperand& mem, const Operand& imm) {
+void TurboAssembler::CmpLogicalByte(const MemOperand& mem, const Operand& imm) {
DCHECK(is_uint8(imm.immediate()));
if (is_uint12(mem.offset()))
cli(mem, imm);
@@ -4439,7 +4266,7 @@ void MacroAssembler::CmpLogicalByte(const MemOperand& mem, const Operand& imm) {
cliy(mem, imm);
}
-void MacroAssembler::Branch(Condition c, const Operand& opnd) {
+void TurboAssembler::Branch(Condition c, const Operand& opnd) {
intptr_t value = opnd.immediate();
if (is_int16(value))
brc(c, opnd);
@@ -4448,7 +4275,7 @@ void MacroAssembler::Branch(Condition c, const Operand& opnd) {
}
// Branch On Count. Decrement R1, and branch if R1 != 0.
-void MacroAssembler::BranchOnCount(Register r1, Label* l) {
+void TurboAssembler::BranchOnCount(Register r1, Label* l) {
int32_t offset = branch_offset(l);
if (is_int16(offset)) {
#if V8_TARGET_ARCH_S390X
@@ -4462,11 +4289,11 @@ void MacroAssembler::BranchOnCount(Register r1, Label* l) {
}
}
-void MacroAssembler::LoadIntLiteral(Register dst, int value) {
+void TurboAssembler::LoadIntLiteral(Register dst, int value) {
Load(dst, Operand(value));
}
-void MacroAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
+void TurboAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
intptr_t value = reinterpret_cast<intptr_t>(smi);
#if V8_TARGET_ARCH_S390X
DCHECK((value & 0xffffffff) == 0);
@@ -4477,7 +4304,7 @@ void MacroAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
#endif
}
-void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, uint64_t value,
+void TurboAssembler::LoadDoubleLiteral(DoubleRegister result, uint64_t value,
Register scratch) {
uint32_t hi_32 = value >> 32;
uint32_t lo_32 = static_cast<uint32_t>(value);
@@ -4495,20 +4322,20 @@ void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, uint64_t value,
}
}
-void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, double value,
+void TurboAssembler::LoadDoubleLiteral(DoubleRegister result, double value,
Register scratch) {
uint64_t int_val = bit_cast<uint64_t, double>(value);
LoadDoubleLiteral(result, int_val, scratch);
}
-void MacroAssembler::LoadFloat32Literal(DoubleRegister result, float value,
+void TurboAssembler::LoadFloat32Literal(DoubleRegister result, float value,
Register scratch) {
uint64_t int_val = static_cast<uint64_t>(bit_cast<uint32_t, float>(value))
<< 32;
LoadDoubleLiteral(result, int_val, scratch);
}
-void MacroAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch) {
+void TurboAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch) {
#if V8_TARGET_ARCH_S390X
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
cih(src1, Operand(reinterpret_cast<intptr_t>(smi) >> 32));
@@ -4522,7 +4349,7 @@ void MacroAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch) {
#endif
}
-void MacroAssembler::CmpLogicalSmiLiteral(Register src1, Smi* smi,
+void TurboAssembler::CmpLogicalSmiLiteral(Register src1, Smi* smi,
Register scratch) {
#if V8_TARGET_ARCH_S390X
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
@@ -4537,7 +4364,7 @@ void MacroAssembler::CmpLogicalSmiLiteral(Register src1, Smi* smi,
#endif
}
-void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
+void TurboAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
Register scratch) {
#if V8_TARGET_ARCH_S390X
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
@@ -4552,7 +4379,7 @@ void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
#endif
}
-void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
+void TurboAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
Register scratch) {
#if V8_TARGET_ARCH_S390X
if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
@@ -4567,7 +4394,7 @@ void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
#endif
}
-void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi) {
+void TurboAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi) {
if (!dst.is(src)) LoadRR(dst, src);
#if V8_TARGET_ARCH_S390X
DCHECK((reinterpret_cast<intptr_t>(smi) & 0xffffffff) == 0);
@@ -4579,7 +4406,7 @@ void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi) {
}
// Load a "pointer" sized value from the memory location
-void MacroAssembler::LoadP(Register dst, const MemOperand& mem,
+void TurboAssembler::LoadP(Register dst, const MemOperand& mem,
Register scratch) {
int offset = mem.offset();
@@ -4605,7 +4432,7 @@ void MacroAssembler::LoadP(Register dst, const MemOperand& mem,
}
// Store a "pointer" sized value to the memory location
-void MacroAssembler::StoreP(Register src, const MemOperand& mem,
+void TurboAssembler::StoreP(Register src, const MemOperand& mem,
Register scratch) {
if (!is_int20(mem.offset())) {
DCHECK(!scratch.is(no_reg));
@@ -4628,14 +4455,14 @@ void MacroAssembler::StoreP(Register src, const MemOperand& mem,
}
// Store a "pointer" sized constant to the memory location
-void MacroAssembler::StoreP(const MemOperand& mem, const Operand& opnd,
+void TurboAssembler::StoreP(const MemOperand& mem, const Operand& opnd,
Register scratch) {
// Relocations not supported
- DCHECK(opnd.rmode_ == kRelocInfo_NONEPTR);
+ DCHECK(opnd.rmode() == kRelocInfo_NONEPTR);
// Try to use MVGHI/MVHI
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_uint12(mem.offset()) &&
- mem.getIndexRegister().is(r0) && is_int16(opnd.imm_)) {
+ mem.getIndexRegister().is(r0) && is_int16(opnd.immediate())) {
#if V8_TARGET_ARCH_S390X
mvghi(mem, opnd);
#else
@@ -4647,7 +4474,7 @@ void MacroAssembler::StoreP(const MemOperand& mem, const Operand& opnd,
}
}
-void MacroAssembler::LoadMultipleP(Register dst1, Register dst2,
+void TurboAssembler::LoadMultipleP(Register dst1, Register dst2,
const MemOperand& mem) {
#if V8_TARGET_ARCH_S390X
DCHECK(is_int20(mem.offset()));
@@ -4662,7 +4489,7 @@ void MacroAssembler::LoadMultipleP(Register dst1, Register dst2,
#endif
}
-void MacroAssembler::StoreMultipleP(Register src1, Register src2,
+void TurboAssembler::StoreMultipleP(Register src1, Register src2,
const MemOperand& mem) {
#if V8_TARGET_ARCH_S390X
DCHECK(is_int20(mem.offset()));
@@ -4677,7 +4504,7 @@ void MacroAssembler::StoreMultipleP(Register src1, Register src2,
#endif
}
-void MacroAssembler::LoadMultipleW(Register dst1, Register dst2,
+void TurboAssembler::LoadMultipleW(Register dst1, Register dst2,
const MemOperand& mem) {
if (is_uint12(mem.offset())) {
lm(dst1, dst2, mem);
@@ -4687,7 +4514,7 @@ void MacroAssembler::LoadMultipleW(Register dst1, Register dst2,
}
}
-void MacroAssembler::StoreMultipleW(Register src1, Register src2,
+void TurboAssembler::StoreMultipleW(Register src1, Register src2,
const MemOperand& mem) {
if (is_uint12(mem.offset())) {
stm(src1, src2, mem);
@@ -4698,7 +4525,7 @@ void MacroAssembler::StoreMultipleW(Register src1, Register src2,
}
// Load 32-bits and sign extend if necessary.
-void MacroAssembler::LoadW(Register dst, Register src) {
+void TurboAssembler::LoadW(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
lgfr(dst, src);
#else
@@ -4707,7 +4534,7 @@ void MacroAssembler::LoadW(Register dst, Register src) {
}
// Load 32-bits and sign extend if necessary.
-void MacroAssembler::LoadW(Register dst, const MemOperand& mem,
+void TurboAssembler::LoadW(Register dst, const MemOperand& mem,
Register scratch) {
int offset = mem.offset();
@@ -4733,7 +4560,7 @@ void MacroAssembler::LoadW(Register dst, const MemOperand& mem,
}
// Load 32-bits and zero extend if necessary.
-void MacroAssembler::LoadlW(Register dst, Register src) {
+void TurboAssembler::LoadlW(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
llgfr(dst, src);
#else
@@ -4743,7 +4570,7 @@ void MacroAssembler::LoadlW(Register dst, Register src) {
// Variable length depending on whether offset fits into immediate field
// MemOperand of RX or RXY format
-void MacroAssembler::LoadlW(Register dst, const MemOperand& mem,
+void TurboAssembler::LoadlW(Register dst, const MemOperand& mem,
Register scratch) {
Register base = mem.rb();
int offset = mem.offset();
@@ -4784,7 +4611,7 @@ void MacroAssembler::LoadlW(Register dst, const MemOperand& mem,
#endif
}
-void MacroAssembler::LoadLogicalHalfWordP(Register dst, const MemOperand& mem) {
+void TurboAssembler::LoadLogicalHalfWordP(Register dst, const MemOperand& mem) {
#if V8_TARGET_ARCH_S390X
llgh(dst, mem);
#else
@@ -4792,7 +4619,7 @@ void MacroAssembler::LoadLogicalHalfWordP(Register dst, const MemOperand& mem) {
#endif
}
-void MacroAssembler::LoadLogicalHalfWordP(Register dst, Register src) {
+void TurboAssembler::LoadLogicalHalfWordP(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
llghr(dst, src);
#else
@@ -4800,7 +4627,7 @@ void MacroAssembler::LoadLogicalHalfWordP(Register dst, Register src) {
#endif
}
-void MacroAssembler::LoadB(Register dst, const MemOperand& mem) {
+void TurboAssembler::LoadB(Register dst, const MemOperand& mem) {
#if V8_TARGET_ARCH_S390X
lgb(dst, mem);
#else
@@ -4808,7 +4635,7 @@ void MacroAssembler::LoadB(Register dst, const MemOperand& mem) {
#endif
}
-void MacroAssembler::LoadB(Register dst, Register src) {
+void TurboAssembler::LoadB(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
lgbr(dst, src);
#else
@@ -4816,7 +4643,7 @@ void MacroAssembler::LoadB(Register dst, Register src) {
#endif
}
-void MacroAssembler::LoadlB(Register dst, const MemOperand& mem) {
+void TurboAssembler::LoadlB(Register dst, const MemOperand& mem) {
#if V8_TARGET_ARCH_S390X
llgc(dst, mem);
#else
@@ -4824,7 +4651,7 @@ void MacroAssembler::LoadlB(Register dst, const MemOperand& mem) {
#endif
}
-void MacroAssembler::LoadlB(Register dst, Register src) {
+void TurboAssembler::LoadlB(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
llgcr(dst, src);
#else
@@ -4832,22 +4659,21 @@ void MacroAssembler::LoadlB(Register dst, Register src) {
#endif
}
-void MacroAssembler::LoadLogicalReversedWordP(Register dst,
+void TurboAssembler::LoadLogicalReversedWordP(Register dst,
const MemOperand& mem) {
lrv(dst, mem);
LoadlW(dst, dst);
}
-
-void MacroAssembler::LoadLogicalReversedHalfWordP(Register dst,
- const MemOperand& mem) {
+void TurboAssembler::LoadLogicalReversedHalfWordP(Register dst,
+ const MemOperand& mem) {
lrvh(dst, mem);
LoadLogicalHalfWordP(dst, dst);
}
// Load And Test (Reg <- Reg)
-void MacroAssembler::LoadAndTest32(Register dst, Register src) {
+void TurboAssembler::LoadAndTest32(Register dst, Register src) {
ltr(dst, src);
}
@@ -4855,7 +4681,7 @@ void MacroAssembler::LoadAndTest32(Register dst, Register src) {
// (Register dst(ptr) = Register src (32 | 32->64))
// src is treated as a 32-bit signed integer, which is sign extended to
// 64-bit if necessary.
-void MacroAssembler::LoadAndTestP_ExtendSrc(Register dst, Register src) {
+void TurboAssembler::LoadAndTestP_ExtendSrc(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
ltgfr(dst, src);
#else
@@ -4864,7 +4690,7 @@ void MacroAssembler::LoadAndTestP_ExtendSrc(Register dst, Register src) {
}
// Load And Test Pointer Sized (Reg <- Reg)
-void MacroAssembler::LoadAndTestP(Register dst, Register src) {
+void TurboAssembler::LoadAndTestP(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
ltgr(dst, src);
#else
@@ -4873,12 +4699,12 @@ void MacroAssembler::LoadAndTestP(Register dst, Register src) {
}
// Load And Test 32-bit (Reg <- Mem)
-void MacroAssembler::LoadAndTest32(Register dst, const MemOperand& mem) {
+void TurboAssembler::LoadAndTest32(Register dst, const MemOperand& mem) {
lt_z(dst, mem);
}
// Load And Test Pointer Sized (Reg <- Mem)
-void MacroAssembler::LoadAndTestP(Register dst, const MemOperand& mem) {
+void TurboAssembler::LoadAndTestP(Register dst, const MemOperand& mem) {
#if V8_TARGET_ARCH_S390X
ltg(dst, mem);
#else
@@ -4887,7 +4713,7 @@ void MacroAssembler::LoadAndTestP(Register dst, const MemOperand& mem) {
}
// Load On Condition Pointer Sized (Reg <- Reg)
-void MacroAssembler::LoadOnConditionP(Condition cond, Register dst,
+void TurboAssembler::LoadOnConditionP(Condition cond, Register dst,
Register src) {
#if V8_TARGET_ARCH_S390X
locgr(cond, dst, src);
@@ -4897,7 +4723,7 @@ void MacroAssembler::LoadOnConditionP(Condition cond, Register dst,
}
// Load Double Precision (64-bit) Floating Point number from memory
-void MacroAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem) {
+void TurboAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem) {
// for 32bit and 64bit we all use 64bit floating point regs
if (is_uint12(mem.offset())) {
ld(dst, mem);
@@ -4907,7 +4733,7 @@ void MacroAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem) {
}
// Load Single Precision (32-bit) Floating Point number from memory
-void MacroAssembler::LoadFloat32(DoubleRegister dst, const MemOperand& mem) {
+void TurboAssembler::LoadFloat32(DoubleRegister dst, const MemOperand& mem) {
if (is_uint12(mem.offset())) {
le_z(dst, mem);
} else {
@@ -4918,14 +4744,14 @@ void MacroAssembler::LoadFloat32(DoubleRegister dst, const MemOperand& mem) {
// Load Single Precision (32-bit) Floating Point number from memory,
// and convert to Double Precision (64-bit)
-void MacroAssembler::LoadFloat32ConvertToDouble(DoubleRegister dst,
+void TurboAssembler::LoadFloat32ConvertToDouble(DoubleRegister dst,
const MemOperand& mem) {
LoadFloat32(dst, mem);
ldebr(dst, dst);
}
// Store Double Precision (64-bit) Floating Point number to memory
-void MacroAssembler::StoreDouble(DoubleRegister dst, const MemOperand& mem) {
+void TurboAssembler::StoreDouble(DoubleRegister dst, const MemOperand& mem) {
if (is_uint12(mem.offset())) {
std(dst, mem);
} else {
@@ -4934,7 +4760,7 @@ void MacroAssembler::StoreDouble(DoubleRegister dst, const MemOperand& mem) {
}
// Store Single Precision (32-bit) Floating Point number to memory
-void MacroAssembler::StoreFloat32(DoubleRegister src, const MemOperand& mem) {
+void TurboAssembler::StoreFloat32(DoubleRegister src, const MemOperand& mem) {
if (is_uint12(mem.offset())) {
ste(src, mem);
} else {
@@ -4944,14 +4770,14 @@ void MacroAssembler::StoreFloat32(DoubleRegister src, const MemOperand& mem) {
// Convert Double precision (64-bit) to Single Precision (32-bit)
// and store resulting Float32 to memory
-void MacroAssembler::StoreDoubleAsFloat32(DoubleRegister src,
+void TurboAssembler::StoreDoubleAsFloat32(DoubleRegister src,
const MemOperand& mem,
DoubleRegister scratch) {
ledbr(scratch, src);
StoreFloat32(scratch, mem);
}
-void MacroAssembler::AddFloat32(DoubleRegister dst, const MemOperand& opnd,
+void TurboAssembler::AddFloat32(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
aeb(dst, opnd);
@@ -4961,7 +4787,7 @@ void MacroAssembler::AddFloat32(DoubleRegister dst, const MemOperand& opnd,
}
}
-void MacroAssembler::AddFloat64(DoubleRegister dst, const MemOperand& opnd,
+void TurboAssembler::AddFloat64(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
adb(dst, opnd);
@@ -4971,7 +4797,7 @@ void MacroAssembler::AddFloat64(DoubleRegister dst, const MemOperand& opnd,
}
}
-void MacroAssembler::SubFloat32(DoubleRegister dst, const MemOperand& opnd,
+void TurboAssembler::SubFloat32(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
seb(dst, opnd);
@@ -4981,7 +4807,7 @@ void MacroAssembler::SubFloat32(DoubleRegister dst, const MemOperand& opnd,
}
}
-void MacroAssembler::SubFloat64(DoubleRegister dst, const MemOperand& opnd,
+void TurboAssembler::SubFloat64(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
sdb(dst, opnd);
@@ -4991,7 +4817,7 @@ void MacroAssembler::SubFloat64(DoubleRegister dst, const MemOperand& opnd,
}
}
-void MacroAssembler::MulFloat32(DoubleRegister dst, const MemOperand& opnd,
+void TurboAssembler::MulFloat32(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
meeb(dst, opnd);
@@ -5001,7 +4827,7 @@ void MacroAssembler::MulFloat32(DoubleRegister dst, const MemOperand& opnd,
}
}
-void MacroAssembler::MulFloat64(DoubleRegister dst, const MemOperand& opnd,
+void TurboAssembler::MulFloat64(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
mdb(dst, opnd);
@@ -5011,7 +4837,7 @@ void MacroAssembler::MulFloat64(DoubleRegister dst, const MemOperand& opnd,
}
}
-void MacroAssembler::DivFloat32(DoubleRegister dst, const MemOperand& opnd,
+void TurboAssembler::DivFloat32(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
deb(dst, opnd);
@@ -5021,7 +4847,7 @@ void MacroAssembler::DivFloat32(DoubleRegister dst, const MemOperand& opnd,
}
}
-void MacroAssembler::DivFloat64(DoubleRegister dst, const MemOperand& opnd,
+void TurboAssembler::DivFloat64(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
ddb(dst, opnd);
@@ -5031,7 +4857,7 @@ void MacroAssembler::DivFloat64(DoubleRegister dst, const MemOperand& opnd,
}
}
-void MacroAssembler::LoadFloat32ToDouble(DoubleRegister dst,
+void TurboAssembler::LoadFloat32ToDouble(DoubleRegister dst,
const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
@@ -5044,7 +4870,7 @@ void MacroAssembler::LoadFloat32ToDouble(DoubleRegister dst,
// Variable length depending on whether offset fits into immediate field
// MemOperand of RX or RXY format
-void MacroAssembler::StoreW(Register src, const MemOperand& mem,
+void TurboAssembler::StoreW(Register src, const MemOperand& mem,
Register scratch) {
Register base = mem.rb();
int offset = mem.offset();
@@ -5077,7 +4903,7 @@ void MacroAssembler::StoreW(Register src, const MemOperand& mem,
// Loads 16-bits half-word value from memory and sign extends to pointer
// sized register
-void MacroAssembler::LoadHalfWordP(Register dst, const MemOperand& mem,
+void TurboAssembler::LoadHalfWordP(Register dst, const MemOperand& mem,
Register scratch) {
Register base = mem.rb();
int offset = mem.offset();
@@ -5105,7 +4931,7 @@ void MacroAssembler::LoadHalfWordP(Register dst, const MemOperand& mem,
// Variable length depending on whether offset fits into immediate field
// MemOperand current only supports d-form
-void MacroAssembler::StoreHalfWord(Register src, const MemOperand& mem,
+void TurboAssembler::StoreHalfWord(Register src, const MemOperand& mem,
Register scratch) {
Register base = mem.rb();
int offset = mem.offset();
@@ -5123,7 +4949,7 @@ void MacroAssembler::StoreHalfWord(Register src, const MemOperand& mem,
// Variable length depending on whether offset fits into immediate field
// MemOperand current only supports d-form
-void MacroAssembler::StoreByte(Register src, const MemOperand& mem,
+void TurboAssembler::StoreByte(Register src, const MemOperand& mem,
Register scratch) {
Register base = mem.rb();
int offset = mem.offset();
@@ -5140,7 +4966,7 @@ void MacroAssembler::StoreByte(Register src, const MemOperand& mem,
}
// Shift left logical for 32-bit integer types.
-void MacroAssembler::ShiftLeft(Register dst, Register src, const Operand& val) {
+void TurboAssembler::ShiftLeft(Register dst, Register src, const Operand& val) {
if (dst.is(src)) {
sll(dst, val);
} else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
@@ -5152,7 +4978,7 @@ void MacroAssembler::ShiftLeft(Register dst, Register src, const Operand& val) {
}
// Shift left logical for 32-bit integer types.
-void MacroAssembler::ShiftLeft(Register dst, Register src, Register val) {
+void TurboAssembler::ShiftLeft(Register dst, Register src, Register val) {
if (dst.is(src)) {
sll(dst, val);
} else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
@@ -5165,7 +4991,7 @@ void MacroAssembler::ShiftLeft(Register dst, Register src, Register val) {
}
// Shift right logical for 32-bit integer types.
-void MacroAssembler::ShiftRight(Register dst, Register src,
+void TurboAssembler::ShiftRight(Register dst, Register src,
const Operand& val) {
if (dst.is(src)) {
srl(dst, val);
@@ -5178,7 +5004,7 @@ void MacroAssembler::ShiftRight(Register dst, Register src,
}
// Shift right logical for 32-bit integer types.
-void MacroAssembler::ShiftRight(Register dst, Register src, Register val) {
+void TurboAssembler::ShiftRight(Register dst, Register src, Register val) {
if (dst.is(src)) {
srl(dst, val);
} else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
@@ -5191,7 +5017,7 @@ void MacroAssembler::ShiftRight(Register dst, Register src, Register val) {
}
// Shift left arithmetic for 32-bit integer types.
-void MacroAssembler::ShiftLeftArith(Register dst, Register src,
+void TurboAssembler::ShiftLeftArith(Register dst, Register src,
const Operand& val) {
if (dst.is(src)) {
sla(dst, val);
@@ -5204,7 +5030,7 @@ void MacroAssembler::ShiftLeftArith(Register dst, Register src,
}
// Shift left arithmetic for 32-bit integer types.
-void MacroAssembler::ShiftLeftArith(Register dst, Register src, Register val) {
+void TurboAssembler::ShiftLeftArith(Register dst, Register src, Register val) {
if (dst.is(src)) {
sla(dst, val);
} else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
@@ -5217,7 +5043,7 @@ void MacroAssembler::ShiftLeftArith(Register dst, Register src, Register val) {
}
// Shift right arithmetic for 32-bit integer types.
-void MacroAssembler::ShiftRightArith(Register dst, Register src,
+void TurboAssembler::ShiftRightArith(Register dst, Register src,
const Operand& val) {
if (dst.is(src)) {
sra(dst, val);
@@ -5230,7 +5056,7 @@ void MacroAssembler::ShiftRightArith(Register dst, Register src,
}
// Shift right arithmetic for 32-bit integer types.
-void MacroAssembler::ShiftRightArith(Register dst, Register src, Register val) {
+void TurboAssembler::ShiftRightArith(Register dst, Register src, Register val) {
if (dst.is(src)) {
sra(dst, val);
} else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
@@ -5243,9 +5069,9 @@ void MacroAssembler::ShiftRightArith(Register dst, Register src, Register val) {
}
// Clear right most # of bits
-void MacroAssembler::ClearRightImm(Register dst, Register src,
+void TurboAssembler::ClearRightImm(Register dst, Register src,
const Operand& val) {
- int numBitsToClear = val.imm_ % (kPointerSize * 8);
+ int numBitsToClear = val.immediate() % (kPointerSize * 8);
// Try to use RISBG if possible
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
@@ -5269,7 +5095,7 @@ void MacroAssembler::ClearRightImm(Register dst, Register src,
}
}
-void MacroAssembler::Popcnt32(Register dst, Register src) {
+void TurboAssembler::Popcnt32(Register dst, Register src) {
DCHECK(!src.is(r0));
DCHECK(!dst.is(r0));
@@ -5282,7 +5108,7 @@ void MacroAssembler::Popcnt32(Register dst, Register src) {
}
#ifdef V8_TARGET_ARCH_S390X
-void MacroAssembler::Popcnt64(Register dst, Register src) {
+void TurboAssembler::Popcnt64(Register dst, Register src) {
DCHECK(!src.is(r0));
DCHECK(!dst.is(r0));
diff --git a/deps/v8/src/s390/macro-assembler-s390.h b/deps/v8/src/s390/macro-assembler-s390.h
index ace1604934..493572b087 100644
--- a/deps/v8/src/s390/macro-assembler-s390.h
+++ b/deps/v8/src/s390/macro-assembler-s390.h
@@ -162,51 +162,48 @@ bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
#endif
-// MacroAssembler implements a collection of frequently used macros.
-class MacroAssembler : public Assembler {
+class TurboAssembler : public Assembler {
public:
- MacroAssembler(Isolate* isolate, void* buffer, int size,
- CodeObjectRequired create_code_object);
+ TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
+ CodeObjectRequired create_code_object)
+ : Assembler(isolate, buffer, buffer_size), isolate_(isolate) {
+ if (create_code_object == CodeObjectRequired::kYes) {
+ code_object_ =
+ Handle<HeapObject>::New(isolate->heap()->undefined_value(), isolate);
+ }
+ }
Isolate* isolate() const { return isolate_; }
+ Handle<HeapObject> CodeObject() {
+ DCHECK(!code_object_.is_null());
+ return code_object_;
+ }
+
// Returns the size of a call in instructions.
static int CallSize(Register target);
int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
- static int CallSizeNotPredictableCodeSize(Address target,
- RelocInfo::Mode rmode,
- Condition cond = al);
// Jump, Call, and Ret pseudo instructions implementing inter-working.
void Jump(Register target);
- void JumpToJSEntry(Register target);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al,
CRegister cr = cr7);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
+ // Jump the register contains a smi.
+ inline void JumpIfSmi(Register value, Label* smi_label) {
+ TestIfSmi(value);
+ beq(smi_label /*, cr0*/); // branch if SMI
+ }
void Call(Register target);
- void CallJSEntry(Register target);
void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
int CallSize(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- TypeFeedbackId ast_id = TypeFeedbackId::None(),
Condition cond = al);
void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- TypeFeedbackId ast_id = TypeFeedbackId::None(),
Condition cond = al);
void Ret() { b(r14); }
void Ret(Condition cond) { b(cond, r14); }
- // Emit code that loads |parameter_index|'th parameter from the stack to
- // the register according to the CallInterfaceDescriptor definition.
- // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
- // below the caller's sp.
- template <class Descriptor>
- void LoadParameterFromStack(
- Register reg, typename Descriptor::ParameterIndices parameter_index,
- int sp_to_ra_offset_in_words = 0) {
- DCHECK(Descriptor::kPassLastArgsOnStack);
- UNIMPLEMENTED();
- }
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
@@ -222,7 +219,7 @@ class MacroAssembler : public Assembler {
// Register move. May do nothing if the registers are identical.
void Move(Register dst, Smi* smi) { LoadSmiLiteral(dst, smi); }
- void Move(Register dst, Handle<Object> value);
+ void Move(Register dst, Handle<HeapObject> value);
void Move(Register dst, Register src, Condition cond = al);
void Move(DoubleRegister dst, DoubleRegister src);
@@ -235,10 +232,6 @@ class MacroAssembler : public Assembler {
// Load an object from the root table.
void LoadRoot(Register destination, Heap::RootListIndex index,
Condition cond = al);
- // Store an object to the root table.
- void StoreRoot(Register source, Heap::RootListIndex index,
- Condition cond = al);
-
//--------------------------------------------------------------------------
// S390 Macro Assemblers for Instructions
//--------------------------------------------------------------------------
@@ -508,94 +501,6 @@ class MacroAssembler : public Assembler {
#endif
}
- // ---------------------------------------------------------------------------
- // GC Support
-
- void IncrementalMarkingRecordWriteHelper(Register object, Register value,
- Register address);
-
- enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
-
- // Record in the remembered set the fact that we have a pointer to new space
- // at the address pointed to by the addr register. Only works if addr is not
- // in new space.
- void RememberedSetHelper(Register object, // Used for debug code.
- Register addr, Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetFinalAction and_then);
-
- void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
- Label* condition_met);
-
- // Check if object is in new space. Jumps if the object is not in new space.
- // The register scratch can be object itself, but scratch will be clobbered.
- void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) {
- InNewSpace(object, scratch, eq, branch);
- }
-
- // Check if object is in new space. Jumps if the object is in new space.
- // The register scratch can be object itself, but it will be clobbered.
- void JumpIfInNewSpace(Register object, Register scratch, Label* branch) {
- InNewSpace(object, scratch, ne, branch);
- }
-
- // Check if an object has a given incremental marking color.
- void HasColor(Register object, Register scratch0, Register scratch1,
- Label* has_color, int first_bit, int second_bit);
-
- void JumpIfBlack(Register object, Register scratch0, Register scratch1,
- Label* on_black);
-
- // Checks the color of an object. If the object is white we jump to the
- // incremental marker.
- void JumpIfWhite(Register value, Register scratch1, Register scratch2,
- Register scratch3, Label* value_is_white);
-
- // Notify the garbage collector that we wrote a pointer into an object.
- // |object| is the object being stored into, |value| is the object being
- // stored. value and scratch registers are clobbered by the operation.
- // The offset is the offset from the start of the object, not the offset from
- // the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
- void RecordWriteField(
- Register object, int offset, Register value, Register scratch,
- LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK,
- PointersToHereCheck pointers_to_here_check_for_value =
- kPointersToHereMaybeInteresting);
-
- // As above, but the offset has the tag presubtracted. For use with
- // MemOperand(reg, off).
- inline void RecordWriteContextSlot(
- Register context, int offset, Register value, Register scratch,
- LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK,
- PointersToHereCheck pointers_to_here_check_for_value =
- kPointersToHereMaybeInteresting) {
- RecordWriteField(context, offset + kHeapObjectTag, value, scratch,
- lr_status, save_fp, remembered_set_action, smi_check,
- pointers_to_here_check_for_value);
- }
-
- // Notify the garbage collector that we wrote a code entry into a
- // JSFunction. Only scratch is clobbered by the operation.
- void RecordWriteCodeEntryField(Register js_function, Register code_entry,
- Register scratch);
-
- void RecordWriteForMap(Register object, Register map, Register dst,
- LinkRegisterStatus lr_status, SaveFPRegsMode save_fp);
-
- // For a given |object| notify the garbage collector that the slot |address|
- // has been written. |value| is the object being stored. The value and
- // address registers are clobbered by the operation.
- void RecordWrite(
- Register object, Register address, Register value,
- LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK,
- PointersToHereCheck pointers_to_here_check_for_value =
- kPointersToHereMaybeInteresting);
void push(Register src) {
lay(sp, MemOperand(sp, -kPointerSize));
@@ -612,8 +517,8 @@ class MacroAssembler : public Assembler {
void Push(Register src) { push(src); }
// Push a handle.
- void Push(Handle<Object> handle);
- void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
+ void Push(Handle<HeapObject> handle);
+ void Push(Smi* smi);
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2) {
@@ -711,16 +616,11 @@ class MacroAssembler : public Assembler {
// overwritten by tail call stack preparation.
void RestoreFrameStateForTailCall();
- // Push and pop the registers that can hold pointers, as defined by the
- // RegList constant kSafepointSavedRegisters.
- void PushSafepointRegisters();
- void PopSafepointRegisters();
- // Store value in register src in the safepoint stack slot for
- // register dst.
- void StoreToSafepointRegisterSlot(Register src, Register dst);
- // Load the value of the src register from its safepoint stack slot
- // into register dst.
- void LoadFromSafepointRegisterSlot(Register dst, Register src);
+ void InitializeRootRegister() {
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate());
+ mov(kRootRegister, Operand(roots_array_start));
+ }
// Flush the I-cache from asm code. You should use CpuFeatures::FlushICache
// from C.
@@ -812,47 +712,8 @@ class MacroAssembler : public Assembler {
int prologue_offset = 0);
void Prologue(bool code_pre_aging, Register base, int prologue_offset = 0);
- // Enter exit frame.
- // stack_space - extra stack space, used for parameters before call to C.
- // At least one slot (for the return address) should be provided.
- void EnterExitFrame(bool save_doubles, int stack_space = 1,
- StackFrame::Type frame_type = StackFrame::EXIT);
-
- // Leave the current exit frame. Expects the return value in r0.
- // Expect the number of values, pushed prior to the exit frame, to
- // remove in a register (or no_reg, if there is nothing to remove).
- void LeaveExitFrame(bool save_doubles, Register argument_count,
- bool restore_context,
- bool argument_count_is_length = false);
-
// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();
-
- void LoadContext(Register dst, int context_chain_length);
-
- // Load the global object from the current context.
- void LoadGlobalObject(Register dst) {
- LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
- }
-
- // Load the global proxy from the current context.
- void LoadGlobalProxy(Register dst) {
- LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
- }
-
- void LoadNativeContextSlot(int index, Register dst);
-
- // Load the initial map from the global function. The registers
- // function and map can be the same, function is then overwritten.
- void LoadGlobalFunctionInitialMap(Register function, Register map,
- Register scratch);
-
- void InitializeRootRegister() {
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(isolate());
- mov(kRootRegister, Operand(roots_array_start));
- }
-
// ----------------------------------------------------------------
// new S390 macro-assembler interfaces that are slightly higher level
// than assembler-s390 and may generate variable length sequences
@@ -879,11 +740,6 @@ class MacroAssembler : public Assembler {
Register scratch = r0);
void StoreByte(Register src, const MemOperand& mem, Register scratch = r0);
- void LoadRepresentation(Register dst, const MemOperand& mem, Representation r,
- Register scratch = no_reg);
- void StoreRepresentation(Register src, const MemOperand& mem,
- Representation r, Register scratch = no_reg);
-
void AddSmiLiteral(Register dst, Register src, Smi* smi,
Register scratch = r0);
void SubSmiLiteral(Register dst, Register src, Smi* smi,
@@ -916,153 +772,261 @@ class MacroAssembler : public Assembler {
#endif
}
- // ---------------------------------------------------------------------------
- // JavaScript invokes
-
- // Set up call kind marking in ecx. The method takes ecx as an
- // explicit first parameter to make the code more readable at the
- // call sites.
- // void SetCallKind(Register dst, CallKind kind);
-
- // Removes current frame and its arguments from the stack preserving
- // the arguments and a return address pushed to the stack for the next call.
- // Both |callee_args_count| and |caller_args_count_reg| do not include
- // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
- // is trashed.
void PrepareForTailCall(const ParameterCount& callee_args_count,
Register caller_args_count_reg, Register scratch0,
Register scratch1);
- // Invoke the JavaScript function code by either calling or jumping.
- void InvokeFunctionCode(Register function, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual, InvokeFlag flag,
- const CallWrapper& call_wrapper);
+ // ---------------------------------------------------------------------------
+ // Runtime calls
- // On function call, call into the debugger if necessary.
- void CheckDebugHook(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual);
+ // Call a code stub.
+ void CallStubDelayed(CodeStub* stub);
- // Invoke the JavaScript function in the given register. Changes the
- // current context to the context in the function before invoking.
- void InvokeFunction(Register function, Register new_target,
- const ParameterCount& actual, InvokeFlag flag,
- const CallWrapper& call_wrapper);
+ // Call a runtime routine.
+ void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
- void InvokeFunction(Register function, const ParameterCount& expected,
- const ParameterCount& actual, InvokeFlag flag,
- const CallWrapper& call_wrapper);
+ // Before calling a C-function from generated code, align arguments on stack.
+ // After aligning the frame, non-register arguments must be stored in
+ // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
+ // are word sized. If double arguments are used, this function assumes that
+ // all double arguments are stored before core registers; otherwise the
+ // correct alignment of the double values is not guaranteed.
+ // Some compilers/platforms require the stack to be aligned when calling
+ // C++ code.
+ // Needs a scratch register to do some arithmetic. This register will be
+ // trashed.
+ void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
+ Register scratch);
+ void PrepareCallCFunction(int num_reg_arguments, Register scratch);
- void InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual, InvokeFlag flag,
- const CallWrapper& call_wrapper);
+ // There are two ways of passing double arguments on ARM, depending on
+ // whether soft or hard floating point ABI is used. These functions
+ // abstract parameter passing for the three different ways we call
+ // C functions from generated code.
+ void MovToFloatParameter(DoubleRegister src);
+ void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
+ void MovToFloatResult(DoubleRegister src);
- void IsObjectJSStringType(Register object, Register scratch, Label* fail);
+ // Calls a C function and cleans up the space for arguments allocated
+ // by PrepareCallCFunction. The called function is not allowed to trigger a
+ // garbage collection, since that might move the code and invalidate the
+ // return address (unless this is somehow accounted for by the called
+ // function).
+ void CallCFunction(ExternalReference function, int num_arguments);
+ void CallCFunction(Register function, int num_arguments);
+ void CallCFunction(ExternalReference function, int num_reg_arguments,
+ int num_double_arguments);
+ void CallCFunction(Register function, int num_reg_arguments,
+ int num_double_arguments);
- // Frame restart support
- void MaybeDropFrames();
+ void MovFromFloatParameter(DoubleRegister dst);
+ void MovFromFloatResult(DoubleRegister dst);
- // Exception handling
+ // Emit code for a truncating division by a constant. The dividend register is
+ // unchanged and ip gets clobbered. Dividend and result must be different.
+ void TruncateDoubleToIDelayed(Zone* zone, Register result,
+ DoubleRegister double_input);
+ void TryInlineTruncateDoubleToI(Register result, DoubleRegister double_input,
+ Label* done);
- // Push a new stack handler and link into stack handler chain.
- void PushStackHandler();
+ // ---------------------------------------------------------------------------
+ // Debugging
- // Unlink the stack handler on top of the stack from the stack handler chain.
- // Must preserve the result register.
- void PopStackHandler();
+ // Calls Abort(msg) if the condition cond is not satisfied.
+ // Use --debug_code to enable.
+ void Assert(Condition cond, BailoutReason reason, CRegister cr = cr7);
+
+ // Like Assert(), but always enabled.
+ void Check(Condition cond, BailoutReason reason, CRegister cr = cr7);
+
+ // Print a message to stdout and abort execution.
+ void Abort(BailoutReason reason);
+
+ void set_has_frame(bool value) { has_frame_ = value; }
+ bool has_frame() { return has_frame_; }
+ inline bool AllowThisStubCall(CodeStub* stub);
// ---------------------------------------------------------------------------
- // Inline caching support
+ // Bit testing/extraction
+ //
+ // Bit numbering is such that the least significant bit is bit 0
+ // (for consistency between 32/64-bit).
- void GetNumberHash(Register t0, Register scratch);
+ // Extract consecutive bits (defined by rangeStart - rangeEnd) from src
+ // and place them into the least significant bits of dst.
+ inline void ExtractBitRange(Register dst, Register src, int rangeStart,
+ int rangeEnd) {
+ DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer);
- inline void MarkCode(NopMarkerTypes type) { nop(type); }
+ // Try to use RISBG if possible.
+ if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
+ int shiftAmount = (64 - rangeEnd) % 64; // Convert to shift left.
+ int endBit = 63; // End is always LSB after shifting.
+ int startBit = 63 - rangeStart + rangeEnd;
+ risbg(dst, src, Operand(startBit), Operand(endBit), Operand(shiftAmount),
+ true);
+ } else {
+ if (rangeEnd > 0) // Don't need to shift if rangeEnd is zero.
+ ShiftRightP(dst, src, Operand(rangeEnd));
+ else if (!dst.is(src)) // If we didn't shift, we might need to copy
+ LoadRR(dst, src);
+ int width = rangeStart - rangeEnd + 1;
+#if V8_TARGET_ARCH_S390X
+ uint64_t mask = (static_cast<uint64_t>(1) << width) - 1;
+ nihf(dst, Operand(mask >> 32));
+ nilf(dst, Operand(mask & 0xFFFFFFFF));
+ ltgr(dst, dst);
+#else
+ uint32_t mask = (1 << width) - 1;
+ AndP(dst, Operand(mask));
+#endif
+ }
+ }
- // Check if the given instruction is a 'type' marker.
- // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
- // These instructions are generated to mark special location in the code,
- // like some special IC code.
- static inline bool IsMarkedCode(Instr instr, int type) {
- DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
- return IsNop(instr, type);
+ inline void ExtractBit(Register dst, Register src, uint32_t bitNumber) {
+ ExtractBitRange(dst, src, bitNumber, bitNumber);
}
- static inline int GetCodeMarker(Instr instr) {
- int dst_reg_offset = 12;
- int dst_mask = 0xf << dst_reg_offset;
- int src_mask = 0xf;
- int dst_reg = (instr & dst_mask) >> dst_reg_offset;
- int src_reg = instr & src_mask;
- uint32_t non_register_mask = ~(dst_mask | src_mask);
- uint32_t mov_mask = al | 13 << 21;
+ // Extract consecutive bits (defined by mask) from src and place them
+ // into the least significant bits of dst.
+ inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
+ RCBit rc = LeaveRC) {
+ int start = kBitsPerPointer - 1;
+ int end;
+ uintptr_t bit = (1L << start);
- // Return <n> if we have a mov rn rn, else return -1.
- int type = ((instr & non_register_mask) == mov_mask) &&
- (dst_reg == src_reg) && (FIRST_IC_MARKER <= dst_reg) &&
- (dst_reg < LAST_CODE_MARKER)
- ? src_reg
- : -1;
- DCHECK((type == -1) ||
- ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
- return type;
+ while (bit && (mask & bit) == 0) {
+ start--;
+ bit >>= 1;
+ }
+ end = start;
+ bit >>= 1;
+
+ while (bit && (mask & bit)) {
+ end--;
+ bit >>= 1;
+ }
+
+ // 1-bits in mask must be contiguous
+ DCHECK(bit == 0 || (mask & ((bit << 1) - 1)) == 0);
+
+ ExtractBitRange(dst, src, start, end);
}
- // ---------------------------------------------------------------------------
- // Allocation support
+ // Test single bit in value.
+ inline void TestBit(Register value, int bitNumber, Register scratch = r0) {
+ ExtractBitRange(scratch, value, bitNumber, bitNumber);
+ }
- // Allocate an object in new space or old pointer space. The object_size is
- // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
- // is passed. If the space is exhausted control continues at the gc_required
- // label. The allocated object is returned in result. If the flag
- // tag_allocated_object is true the result is tagged as as a heap object.
- // All registers are clobbered also when control continues at the gc_required
- // label.
- void Allocate(int object_size, Register result, Register scratch1,
- Register scratch2, Label* gc_required, AllocationFlags flags);
+ // Test consecutive bit range in value. Range is defined by
+ // rangeStart - rangeEnd.
+ inline void TestBitRange(Register value, int rangeStart, int rangeEnd,
+ Register scratch = r0) {
+ ExtractBitRange(scratch, value, rangeStart, rangeEnd);
+ }
- void Allocate(Register object_size, Register result, Register result_end,
- Register scratch, Label* gc_required, AllocationFlags flags);
+ // Test consecutive bit range in value. Range is defined by mask.
+ inline void TestBitMask(Register value, uintptr_t mask,
+ Register scratch = r0) {
+ ExtractBitMask(scratch, value, mask, SetRC);
+ }
+ inline void TestIfSmi(Register value) { tmll(value, Operand(1)); }
- // FastAllocate is right now only used for folded allocations. It just
- // increments the top pointer without checking against limit. This can only
- // be done if it was proved earlier that the allocation will succeed.
- void FastAllocate(int object_size, Register result, Register scratch1,
- Register scratch2, AllocationFlags flags);
+ inline void TestIfSmi(MemOperand value) {
+ if (is_uint12(value.offset())) {
+ tm(value, Operand(1));
+ } else if (is_int20(value.offset())) {
+ tmy(value, Operand(1));
+ } else {
+ LoadB(r0, value);
+ tmll(r0, Operand(1));
+ }
+ }
- void FastAllocate(Register object_size, Register result, Register result_end,
- Register scratch, AllocationFlags flags);
+ inline void TestIfInt32(Register value) {
+ // High bits must be identical to fit into an 32-bit integer
+ cgfr(value, value);
+ }
+ void SmiUntag(Register reg) { SmiUntag(reg, reg); }
- // Allocates a heap number or jumps to the gc_required label if the young
- // space is full and a scavenge is needed. All registers are clobbered also
- // when control continues at the gc_required label.
- void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
- Register heap_number_map, Label* gc_required,
- MutableMode mode = IMMUTABLE);
- void AllocateHeapNumberWithValue(Register result, DoubleRegister value,
- Register scratch1, Register scratch2,
- Register heap_number_map,
- Label* gc_required);
+ void SmiUntag(Register dst, Register src) {
+ ShiftRightArithP(dst, src, Operand(kSmiShift));
+ }
- // Allocate and initialize a JSValue wrapper with the specified {constructor}
- // and {value}.
- void AllocateJSValue(Register result, Register constructor, Register value,
- Register scratch1, Register scratch2,
- Label* gc_required);
+ // Activation support.
+ void EnterFrame(StackFrame::Type type,
+ bool load_constant_pool_pointer_reg = false);
+ // Returns the pc offset at which the frame ends.
+ int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
- // Initialize fields with filler values. |count| fields starting at
- // |current_address| are overwritten with the value in |filler|. At the end
- // the loop, |current_address| points at the next uninitialized field.
- // |count| is assumed to be non-zero.
- void InitializeNFieldsWithFiller(Register current_address, Register count,
- Register filler);
+ void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
+ Label* condition_met);
- // Initialize fields with filler values. Fields starting at |current_address|
- // not including |end_address| are overwritten with the value in |filler|. At
- // the end the loop, |current_address| takes the value of |end_address|.
- void InitializeFieldsWithFiller(Register current_address,
- Register end_address, Register filler);
+ private:
+ static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
+
+ void CallCFunctionHelper(Register function, int num_reg_arguments,
+ int num_double_arguments);
+
+ void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al,
+ CRegister cr = cr7);
+ int CalculateStackPassedWords(int num_reg_arguments,
+ int num_double_arguments);
+
+ bool has_frame_ = false;
+ Isolate* isolate_;
+ // This handle will be patched with the code object on installation.
+ Handle<HeapObject> code_object_;
+};
+
+// MacroAssembler implements a collection of frequently used macros.
+class MacroAssembler : public TurboAssembler {
+ public:
+ MacroAssembler(Isolate* isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object);
+
+ // Emit code that loads |parameter_index|'th parameter from the stack to
+ // the register according to the CallInterfaceDescriptor definition.
+ // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
+ // below the caller's sp.
+ template <class Descriptor>
+ void LoadParameterFromStack(
+ Register reg, typename Descriptor::ParameterIndices parameter_index,
+ int sp_to_ra_offset_in_words = 0) {
+ DCHECK(Descriptor::kPassLastArgsOnStack);
+ UNIMPLEMENTED();
+ }
+ // Call a code stub.
+ void TailCallStub(CodeStub* stub, Condition cond = al);
+
+ void CallStub(CodeStub* stub, Condition cond = al);
+ void CallRuntime(const Runtime::Function* f, int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ CallRuntime(function, function->nargs, kSaveFPRegs);
+ }
+
+ void TruncatingDiv(Register result, Register dividend, int32_t divisor);
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId fid,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ CallRuntime(function, function->nargs, save_doubles);
+ }
+
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId fid, int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
+ }
+
+ // Convenience function: call an external reference.
+ void CallExternalReference(const ExternalReference& ext, int num_arguments);
+
+ // Convenience function: tail call a runtime routine (jump).
+ void TailCallRuntime(Runtime::FunctionId fid);
// ---------------------------------------------------------------------------
// Support functions.
@@ -1123,6 +1087,10 @@ class MacroAssembler : public Assembler {
Push(r0);
}
+ // Jump to a runtime routine.
+ void JumpToExternalReference(const ExternalReference& builtin,
+ bool builtin_exit_frame = false);
+
// Compare the object in a register to a value and jump if they are equal.
void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
CompareRoot(with, index);
@@ -1213,228 +1181,168 @@ class MacroAssembler : public Assembler {
Label* not_int32);
// ---------------------------------------------------------------------------
- // Runtime calls
-
- // Call a code stub.
- void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None(),
- Condition cond = al);
-
- // Call a code stub.
- void TailCallStub(CodeStub* stub, Condition cond = al);
-
- // Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
- void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
- const Runtime::Function* function = Runtime::FunctionForId(fid);
- CallRuntime(function, function->nargs, kSaveFPRegs);
- }
-
- // Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
- const Runtime::Function* function = Runtime::FunctionForId(fid);
- CallRuntime(function, function->nargs, save_doubles);
- }
+ // StatsCounter support
- // Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
- CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
- }
+ void SetCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
+ void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
+ void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
+ // ---------------------------------------------------------------------------
+ // JavaScript invokes
- // Convenience function: call an external reference.
- void CallExternalReference(const ExternalReference& ext, int num_arguments);
+ // Set up call kind marking in ecx. The method takes ecx as an
+ // explicit first parameter to make the code more readable at the
+ // call sites.
+ // void SetCallKind(Register dst, CallKind kind);
- // Convenience function: tail call a runtime routine (jump).
- void TailCallRuntime(Runtime::FunctionId fid);
+ // Removes current frame and its arguments from the stack preserving
+ // the arguments and a return address pushed to the stack for the next call.
+ // Both |callee_args_count| and |caller_args_count_reg| do not include
+ // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
+ // is trashed.
- int CalculateStackPassedWords(int num_reg_arguments,
- int num_double_arguments);
+ // Invoke the JavaScript function code by either calling or jumping.
+ void InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual, InvokeFlag flag,
+ const CallWrapper& call_wrapper);
- // Before calling a C-function from generated code, align arguments on stack.
- // After aligning the frame, non-register arguments must be stored in
- // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
- // are word sized. If double arguments are used, this function assumes that
- // all double arguments are stored before core registers; otherwise the
- // correct alignment of the double values is not guaranteed.
- // Some compilers/platforms require the stack to be aligned when calling
- // C++ code.
- // Needs a scratch register to do some arithmetic. This register will be
- // trashed.
- void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
- Register scratch);
- void PrepareCallCFunction(int num_reg_arguments, Register scratch);
+ // On function call, call into the debugger if necessary.
+ void CheckDebugHook(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
- // There are two ways of passing double arguments on ARM, depending on
- // whether soft or hard floating point ABI is used. These functions
- // abstract parameter passing for the three different ways we call
- // C functions from generated code.
- void MovToFloatParameter(DoubleRegister src);
- void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
- void MovToFloatResult(DoubleRegister src);
+ // Invoke the JavaScript function in the given register. Changes the
+ // current context to the context in the function before invoking.
+ void InvokeFunction(Register function, Register new_target,
+ const ParameterCount& actual, InvokeFlag flag,
+ const CallWrapper& call_wrapper);
- // Calls a C function and cleans up the space for arguments allocated
- // by PrepareCallCFunction. The called function is not allowed to trigger a
- // garbage collection, since that might move the code and invalidate the
- // return address (unless this is somehow accounted for by the called
- // function).
- void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, int num_arguments);
- void CallCFunction(ExternalReference function, int num_reg_arguments,
- int num_double_arguments);
- void CallCFunction(Register function, int num_reg_arguments,
- int num_double_arguments);
+ void InvokeFunction(Register function, const ParameterCount& expected,
+ const ParameterCount& actual, InvokeFlag flag,
+ const CallWrapper& call_wrapper);
- void MovFromFloatParameter(DoubleRegister dst);
- void MovFromFloatResult(DoubleRegister dst);
+ void InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual, InvokeFlag flag,
+ const CallWrapper& call_wrapper);
- // Jump to a runtime routine.
- void JumpToExternalReference(const ExternalReference& builtin,
- bool builtin_exit_frame = false);
+ // Frame restart support
+ void MaybeDropFrames();
- Handle<Object> CodeObject() {
- DCHECK(!code_object_.is_null());
- return code_object_;
- }
+ // Exception handling
- // Emit code for a truncating division by a constant. The dividend register is
- // unchanged and ip gets clobbered. Dividend and result must be different.
- void TruncatingDiv(Register result, Register dividend, int32_t divisor);
+ // Push a new stack handler and link into stack handler chain.
+ void PushStackHandler();
- // ---------------------------------------------------------------------------
- // StatsCounter support
+ // Unlink the stack handler on top of the stack from the stack handler chain.
+ // Must preserve the result register.
+ void PopStackHandler();
- void SetCounter(StatsCounter* counter, int value, Register scratch1,
- Register scratch2);
- void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
- Register scratch2);
- void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
- Register scratch2);
+ // Enter exit frame.
+ // stack_space - extra stack space, used for parameters before call to C.
+ // At least one slot (for the return address) should be provided.
+ void EnterExitFrame(bool save_doubles, int stack_space = 1,
+ StackFrame::Type frame_type = StackFrame::EXIT);
- // ---------------------------------------------------------------------------
- // Debugging
+ // Leave the current exit frame. Expects the return value in r0.
+ // Expect the number of values, pushed prior to the exit frame, to
+ // remove in a register (or no_reg, if there is nothing to remove).
+ void LeaveExitFrame(bool save_doubles, Register argument_count,
+ bool restore_context,
+ bool argument_count_is_length = false);
- // Calls Abort(msg) if the condition cond is not satisfied.
- // Use --debug_code to enable.
- void Assert(Condition cond, BailoutReason reason, CRegister cr = cr7);
+ void EnterBuiltinFrame(Register context, Register target, Register argc);
+ void LeaveBuiltinFrame(Register context, Register target, Register argc);
- // Like Assert(), but always enabled.
- void Check(Condition cond, BailoutReason reason, CRegister cr = cr7);
+ void LoadContext(Register dst, int context_chain_length);
- // Print a message to stdout and abort execution.
- void Abort(BailoutReason reason);
+ // Load the global object from the current context.
+ void LoadGlobalObject(Register dst) {
+ LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
+ }
- // Verify restrictions about code generated in stubs.
- void set_generating_stub(bool value) { generating_stub_ = value; }
- bool generating_stub() { return generating_stub_; }
- void set_has_frame(bool value) { has_frame_ = value; }
- bool has_frame() { return has_frame_; }
- inline bool AllowThisStubCall(CodeStub* stub);
+ // Load the global proxy from the current context.
+ void LoadGlobalProxy(Register dst) {
+ LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
+ }
- // ---------------------------------------------------------------------------
- // Number utilities
+ void LoadNativeContextSlot(int index, Register dst);
- // Check whether the value of reg is a power of two and not zero. If not
- // control continues at the label not_power_of_two. If reg is a power of two
- // the register scratch contains the value of (reg - 1) when control falls
- // through.
- void JumpIfNotPowerOfTwoOrZero(Register reg, Register scratch,
- Label* not_power_of_two_or_zero);
- // Check whether the value of reg is a power of two and not zero.
- // Control falls through if it is, with scratch containing the mask
- // value (reg - 1).
- // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
- // zero or negative, or jumps to the 'not_power_of_two' label if the value is
- // strictly positive but not a power of two.
- void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg, Register scratch,
- Label* zero_and_neg,
- Label* not_power_of_two);
+ // Load the initial map from the global function. The registers
+ // function and map can be the same, function is then overwritten.
+ void LoadGlobalFunctionInitialMap(Register function, Register map,
+ Register scratch);
// ---------------------------------------------------------------------------
- // Bit testing/extraction
- //
- // Bit numbering is such that the least significant bit is bit 0
- // (for consistency between 32/64-bit).
+ // Inline caching support
- // Extract consecutive bits (defined by rangeStart - rangeEnd) from src
- // and place them into the least significant bits of dst.
- inline void ExtractBitRange(Register dst, Register src, int rangeStart,
- int rangeEnd) {
- DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer);
+ void GetNumberHash(Register t0, Register scratch);
- // Try to use RISBG if possible.
- if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
- int shiftAmount = (64 - rangeEnd) % 64; // Convert to shift left.
- int endBit = 63; // End is always LSB after shifting.
- int startBit = 63 - rangeStart + rangeEnd;
- risbg(dst, src, Operand(startBit), Operand(endBit), Operand(shiftAmount),
- true);
- } else {
- if (rangeEnd > 0) // Don't need to shift if rangeEnd is zero.
- ShiftRightP(dst, src, Operand(rangeEnd));
- else if (!dst.is(src)) // If we didn't shift, we might need to copy
- LoadRR(dst, src);
- int width = rangeStart - rangeEnd + 1;
-#if V8_TARGET_ARCH_S390X
- uint64_t mask = (static_cast<uint64_t>(1) << width) - 1;
- nihf(dst, Operand(mask >> 32));
- nilf(dst, Operand(mask & 0xFFFFFFFF));
- ltgr(dst, dst);
-#else
- uint32_t mask = (1 << width) - 1;
- AndP(dst, Operand(mask));
-#endif
- }
- }
+ inline void MarkCode(NopMarkerTypes type) { nop(type); }
- inline void ExtractBit(Register dst, Register src, uint32_t bitNumber) {
- ExtractBitRange(dst, src, bitNumber, bitNumber);
+ // Check if the given instruction is a 'type' marker.
+ // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
+ // These instructions are generated to mark special location in the code,
+ // like some special IC code.
+ static inline bool IsMarkedCode(Instr instr, int type) {
+ DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
+ return IsNop(instr, type);
}
- // Extract consecutive bits (defined by mask) from src and place them
- // into the least significant bits of dst.
- inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
- RCBit rc = LeaveRC) {
- int start = kBitsPerPointer - 1;
- int end;
- uintptr_t bit = (1L << start);
-
- while (bit && (mask & bit) == 0) {
- start--;
- bit >>= 1;
- }
- end = start;
- bit >>= 1;
-
- while (bit && (mask & bit)) {
- end--;
- bit >>= 1;
- }
-
- // 1-bits in mask must be contiguous
- DCHECK(bit == 0 || (mask & ((bit << 1) - 1)) == 0);
+ static inline int GetCodeMarker(Instr instr) {
+ int dst_reg_offset = 12;
+ int dst_mask = 0xf << dst_reg_offset;
+ int src_mask = 0xf;
+ int dst_reg = (instr & dst_mask) >> dst_reg_offset;
+ int src_reg = instr & src_mask;
+ uint32_t non_register_mask = ~(dst_mask | src_mask);
+ uint32_t mov_mask = al | 13 << 21;
- ExtractBitRange(dst, src, start, end);
+ // Return <n> if we have a mov rn rn, else return -1.
+ int type = ((instr & non_register_mask) == mov_mask) &&
+ (dst_reg == src_reg) && (FIRST_IC_MARKER <= dst_reg) &&
+ (dst_reg < LAST_CODE_MARKER)
+ ? src_reg
+ : -1;
+ DCHECK((type == -1) ||
+ ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
+ return type;
}
+ // ---------------------------------------------------------------------------
+ // Allocation support
- // Test single bit in value.
- inline void TestBit(Register value, int bitNumber, Register scratch = r0) {
- ExtractBitRange(scratch, value, bitNumber, bitNumber);
- }
+ // Allocate an object in new space or old pointer space. The object_size is
+ // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
+ // is passed. If the space is exhausted control continues at the gc_required
+ // label. The allocated object is returned in result. If the flag
+ // tag_allocated_object is true the result is tagged as as a heap object.
+ // All registers are clobbered also when control continues at the gc_required
+ // label.
+ void Allocate(int object_size, Register result, Register scratch1,
+ Register scratch2, Label* gc_required, AllocationFlags flags);
- // Test consecutive bit range in value. Range is defined by
- // rangeStart - rangeEnd.
- inline void TestBitRange(Register value, int rangeStart, int rangeEnd,
- Register scratch = r0) {
- ExtractBitRange(scratch, value, rangeStart, rangeEnd);
- }
+ void Allocate(Register object_size, Register result, Register result_end,
+ Register scratch, Label* gc_required, AllocationFlags flags);
- // Test consecutive bit range in value. Range is defined by mask.
- inline void TestBitMask(Register value, uintptr_t mask,
- Register scratch = r0) {
- ExtractBitMask(scratch, value, mask, SetRC);
- }
+ // Allocates a heap number or jumps to the gc_required label if the young
+ // space is full and a scavenge is needed. All registers are clobbered also
+ // when control continues at the gc_required label.
+ void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
+ Register heap_number_map, Label* gc_required,
+ MutableMode mode = IMMUTABLE);
+ void AllocateHeapNumberWithValue(Register result, DoubleRegister value,
+ Register scratch1, Register scratch2,
+ Register heap_number_map,
+ Label* gc_required);
+
+ // Allocate and initialize a JSValue wrapper with the specified {constructor}
+ // and {value}.
+ void AllocateJSValue(Register result, Register constructor, Register value,
+ Register scratch1, Register scratch2,
+ Label* gc_required);
// ---------------------------------------------------------------------------
// Smi utilities
@@ -1472,12 +1380,6 @@ class MacroAssembler : public Assembler {
bne(not_smi_label /*, cr0*/);
}
- void SmiUntag(Register reg) { SmiUntag(reg, reg); }
-
- void SmiUntag(Register dst, Register src) {
- ShiftRightArithP(dst, src, Operand(kSmiShift));
- }
-
void SmiToPtrArrayOffset(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2);
@@ -1565,19 +1467,6 @@ class MacroAssembler : public Assembler {
// Souce and destination can be the same register.
void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
- inline void TestIfSmi(Register value) { tmll(value, Operand(1)); }
-
- inline void TestIfSmi(MemOperand value) {
- if (is_uint12(value.offset())) {
- tm(value, Operand(1));
- } else if (is_int20(value.offset())) {
- tmy(value, Operand(1));
- } else {
- LoadB(r0, value);
- tmll(r0, Operand(1));
- }
- }
-
inline void TestIfPositiveSmi(Register value, Register scratch) {
STATIC_ASSERT((kSmiTagMask | kSmiSignMask) ==
(intptr_t)(1UL << (kBitsPerPointer - 1) | 1));
@@ -1585,11 +1474,6 @@ class MacroAssembler : public Assembler {
AndP(scratch, value);
}
- // Jump the register contains a smi.
- inline void JumpIfSmi(Register value, Label* smi_label) {
- TestIfSmi(value);
- beq(smi_label /*, cr0*/); // branch if SMI
- }
// Jump if either of the registers contain a non-smi.
inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
TestIfSmi(value);
@@ -1604,11 +1488,6 @@ class MacroAssembler : public Assembler {
void AssertNotSmi(Register object);
void AssertSmi(Register object);
- inline void TestIfInt32(Register value) {
- // High bits must be identical to fit into an 32-bit integer
- cgfr(value, value);
- }
-
#if V8_TARGET_ARCH_S390X
// Ensure it is permissable to read/write int value directly from
// upper half of the smi.
@@ -1621,15 +1500,18 @@ class MacroAssembler : public Assembler {
#define SmiWordOffset(offset) offset
#endif
+ // Abort execution if argument is not a FixedArray, enabled via --debug-code.
+ void AssertFixedArray(Register object);
+
void AssertFunction(Register object);
// Abort execution if argument is not a JSBoundFunction,
// enabled via --debug-code.
void AssertBoundFunction(Register object);
- // Abort execution if argument is not a JSGeneratorObject,
+ // Abort execution if argument is not a JSGeneratorObject (or subclass),
// enabled via --debug-code.
- void AssertGeneratorObject(Register object, Register suspend_flags);
+ void AssertGeneratorObject(Register object);
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
@@ -1716,20 +1598,26 @@ class MacroAssembler : public Assembler {
// Load the type feedback vector from a JavaScript frame.
void EmitLoadFeedbackVector(Register vector);
-
- // Activation support.
- void EnterFrame(StackFrame::Type type,
- bool load_constant_pool_pointer_reg = false);
- // Returns the pc offset at which the frame ends.
- int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
-
- void EnterBuiltinFrame(Register context, Register target, Register argc);
- void LeaveBuiltinFrame(Register context, Register target, Register argc);
-
// Expects object in r2 and returns map with validated enum cache
// in r2. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Label* call_runtime);
+ // Initialize fields with filler values. |count| fields starting at
+ // |current_address| are overwritten with the value in |filler|. At the end
+ // the loop, |current_address| points at the next uninitialized field.
+ // |count| is assumed to be non-zero.
+ void InitializeNFieldsWithFiller(Register current_address, Register count,
+ Register filler);
+
+ // Initialize fields with filler values. Fields starting at |current_address|
+ // not including |end_address| are overwritten with the value in |filler|. At
+ // the end the loop, |current_address| takes the value of |end_address|.
+ void InitializeFieldsWithFiller(Register current_address,
+ Register end_address, Register filler);
+
+ // ---------------------------------------------------------------------------
+ // GC Support
+
// AllocationMemento support. Arrays may have an associated
// AllocationMemento object that can be checked for in order to pretransition
// to another type.
@@ -1741,15 +1629,132 @@ class MacroAssembler : public Assembler {
Register scratch2_reg,
Label* no_memento_found);
- private:
- static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
+ void IncrementalMarkingRecordWriteHelper(Register object, Register value,
+ Register address);
- void CallCFunctionHelper(Register function, int num_reg_arguments,
- int num_double_arguments);
+ enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
- void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al,
- CRegister cr = cr7);
+ // Record in the remembered set the fact that we have a pointer to new space
+ // at the address pointed to by the addr register. Only works if addr is not
+ // in new space.
+ void RememberedSetHelper(Register object, // Used for debug code.
+ Register addr, Register scratch,
+ SaveFPRegsMode save_fp,
+ RememberedSetFinalAction and_then);
+
+ void CallJSEntry(Register target);
+ static int CallSizeNotPredictableCodeSize(Address target,
+ RelocInfo::Mode rmode,
+ Condition cond = al);
+ void PushObject(Handle<Object> handle);
+ void JumpToJSEntry(Register target);
+ // ---------------------------------------------------------------------------
+ // Number utilities
+ // Check whether the value of reg is a power of two and not zero. If not
+ // control continues at the label not_power_of_two. If reg is a power of two
+ // the register scratch contains the value of (reg - 1) when control falls
+ // through.
+ void JumpIfNotPowerOfTwoOrZero(Register reg, Register scratch,
+ Label* not_power_of_two_or_zero);
+ // Check whether the value of reg is a power of two and not zero.
+ // Control falls through if it is, with scratch containing the mask
+ // value (reg - 1).
+ // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
+ // zero or negative, or jumps to the 'not_power_of_two' label if the value is
+ // strictly positive but not a power of two.
+ void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg, Register scratch,
+ Label* zero_and_neg,
+ Label* not_power_of_two);
+
+ // Check if object is in new space. Jumps if the object is not in new space.
+ // The register scratch can be object itself, but scratch will be clobbered.
+ void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) {
+ InNewSpace(object, scratch, eq, branch);
+ }
+
+ // Check if object is in new space. Jumps if the object is in new space.
+ // The register scratch can be object itself, but it will be clobbered.
+ void JumpIfInNewSpace(Register object, Register scratch, Label* branch) {
+ InNewSpace(object, scratch, ne, branch);
+ }
+
+ // Check if an object has a given incremental marking color.
+ void HasColor(Register object, Register scratch0, Register scratch1,
+ Label* has_color, int first_bit, int second_bit);
+
+ void JumpIfBlack(Register object, Register scratch0, Register scratch1,
+ Label* on_black);
+
+ // Checks the color of an object. If the object is white we jump to the
+ // incremental marker.
+ void JumpIfWhite(Register value, Register scratch1, Register scratch2,
+ Register scratch3, Label* value_is_white);
+
+ // Notify the garbage collector that we wrote a pointer into an object.
+ // |object| is the object being stored into, |value| is the object being
+ // stored. value and scratch registers are clobbered by the operation.
+ // The offset is the offset from the start of the object, not the offset from
+ // the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
+ void RecordWriteField(
+ Register object, int offset, Register value, Register scratch,
+ LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting);
+
+ // As above, but the offset has the tag presubtracted. For use with
+ // MemOperand(reg, off).
+ inline void RecordWriteContextSlot(
+ Register context, int offset, Register value, Register scratch,
+ LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting) {
+ RecordWriteField(context, offset + kHeapObjectTag, value, scratch,
+ lr_status, save_fp, remembered_set_action, smi_check,
+ pointers_to_here_check_for_value);
+ }
+
+ // Notify the garbage collector that we wrote a code entry into a
+ // JSFunction. Only scratch is clobbered by the operation.
+ void RecordWriteCodeEntryField(Register js_function, Register code_entry,
+ Register scratch);
+
+ void RecordWriteForMap(Register object, Register map, Register dst,
+ LinkRegisterStatus lr_status, SaveFPRegsMode save_fp);
+
+ // For a given |object| notify the garbage collector that the slot |address|
+ // has been written. |value| is the object being stored. The value and
+ // address registers are clobbered by the operation.
+ void RecordWrite(
+ Register object, Register address, Register value,
+ LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK,
+ PointersToHereCheck pointers_to_here_check_for_value =
+ kPointersToHereMaybeInteresting);
+
+ // Push and pop the registers that can hold pointers, as defined by the
+ // RegList constant kSafepointSavedRegisters.
+ void PushSafepointRegisters();
+ void PopSafepointRegisters();
+ // Store value in register src in the safepoint stack slot for
+ // register dst.
+ void StoreToSafepointRegisterSlot(Register src, Register dst);
+ // Load the value of the src register from its safepoint stack slot
+ // into register dst.
+ void LoadFromSafepointRegisterSlot(Register dst, Register src);
+
+ void LoadRepresentation(Register dst, const MemOperand& mem, Representation r,
+ Register scratch = no_reg);
+ void StoreRepresentation(Register src, const MemOperand& mem,
+ Representation r, Register scratch = no_reg);
+
+ private:
+ static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual, Label* done,
@@ -1775,12 +1780,6 @@ class MacroAssembler : public Assembler {
MemOperand SafepointRegisterSlot(Register reg);
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
- bool generating_stub_;
- bool has_frame_;
- Isolate* isolate_;
- // This handle will be patched with the code object on installation.
- Handle<Object> code_object_;
-
// Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
friend class StandardFrame;
diff --git a/deps/v8/src/s390/simulator-s390.cc b/deps/v8/src/s390/simulator-s390.cc
index 9f41bace2a..0888beff2c 100644
--- a/deps/v8/src/s390/simulator-s390.cc
+++ b/deps/v8/src/s390/simulator-s390.cc
@@ -2944,7 +2944,6 @@ uintptr_t Simulator::PopAddress() {
int Simulator::Evaluate_Unknown(Instruction* instr) {
UNREACHABLE();
- return 0;
}
EVALUATE(VFA) {
@@ -4692,7 +4691,6 @@ EVALUATE(TMLL) {
}
#endif
UNREACHABLE();
- return length;
}
EVALUATE(TMHH) {
@@ -7313,7 +7311,6 @@ EVALUATE(DLGR) {
// 32 bit arch doesn't support __int128 type
USE(instr);
UNREACHABLE();
- return 0;
#endif
}
@@ -8533,7 +8530,6 @@ EVALUATE(DLG) {
// 32 bit arch doesn't support __int128 type
USE(instr);
UNREACHABLE();
- return 0;
#endif
}
diff --git a/deps/v8/src/setup-isolate-deserialize.cc b/deps/v8/src/setup-isolate-deserialize.cc
index a01bb5a3f8..eec5f60a33 100644
--- a/deps/v8/src/setup-isolate-deserialize.cc
+++ b/deps/v8/src/setup-isolate-deserialize.cc
@@ -20,6 +20,14 @@ void SetupIsolateDelegate::SetupBuiltins(Isolate* isolate,
void SetupIsolateDelegate::SetupInterpreter(
interpreter::Interpreter* interpreter, bool create_heap_objects) {
+#if defined(V8_USE_SNAPSHOT) && !defined(V8_USE_SNAPSHOT_WITH_UNWINDING_INFO)
+ if (FLAG_perf_prof_unwinding_info) {
+ OFStream os(stdout);
+ os << "Warning: The --perf-prof-unwinding-info flag can be passed at "
+ "mksnapshot time to get better results."
+ << std::endl;
+ }
+#endif
DCHECK(interpreter->IsDispatchTableInitialized());
}
diff --git a/deps/v8/src/signature.h b/deps/v8/src/signature.h
index 4eeb624c43..90f38d0353 100644
--- a/deps/v8/src/signature.h
+++ b/deps/v8/src/signature.h
@@ -15,7 +15,8 @@ namespace internal {
template <typename T>
class Signature : public ZoneObject {
public:
- Signature(size_t return_count, size_t parameter_count, const T* reps)
+ constexpr Signature(size_t return_count, size_t parameter_count,
+ const T* reps)
: return_count_(return_count),
parameter_count_(parameter_count),
reps_(reps) {}
diff --git a/deps/v8/src/simulator.h b/deps/v8/src/simulator.h
index ca23889b90..6eab8cf976 100644
--- a/deps/v8/src/simulator.h
+++ b/deps/v8/src/simulator.h
@@ -21,8 +21,6 @@
#include "src/mips64/simulator-mips64.h"
#elif V8_TARGET_ARCH_S390
#include "src/s390/simulator-s390.h"
-#elif V8_TARGET_ARCH_X87
-#include "src/x87/simulator-x87.h"
#else
#error Unsupported target architecture.
#endif
diff --git a/deps/v8/src/snapshot/OWNERS b/deps/v8/src/snapshot/OWNERS
index 6c84c07df7..752ee3c8f6 100644
--- a/deps/v8/src/snapshot/OWNERS
+++ b/deps/v8/src/snapshot/OWNERS
@@ -3,3 +3,5 @@ set noparent
verwaest@chromium.org
vogelheim@chromium.org
yangguo@chromium.org
+
+# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index b39f351a94..a01fa67c88 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -116,15 +116,18 @@ void Deserializer::Deserialize(Isolate* isolate) {
isolate_->heap()->undefined_value());
}
- // If needed, print the dissassembly of deserialized code objects.
- PrintDisassembledCodeObjects();
-
// Issue code events for newly deserialized code objects.
LOG_CODE_EVENT(isolate_, LogCodeObjects());
LOG_CODE_EVENT(isolate_, LogBytecodeHandlers());
LOG_CODE_EVENT(isolate_, LogCompiledFunctions());
isolate_->builtins()->MarkInitialized();
+
+ // If needed, print the dissassembly of deserialized code objects.
+ // Needs to be called after the builtins are marked as initialized, in order
+ // to display the builtin names.
+ PrintDisassembledCodeObjects();
+
if (FLAG_rehash_snapshot && can_rehash_) Rehash();
}
@@ -185,51 +188,18 @@ MaybeHandle<HeapObject> Deserializer::DeserializeObject(Isolate* isolate) {
}
}
-// We only really just need HashForObject here.
-class StringRehashKey : public HashTableKey {
- public:
- uint32_t HashForObject(Object* other) override {
- return String::cast(other)->Hash();
- }
-
- static uint32_t StringHash(Object* obj) {
- UNREACHABLE();
- return String::cast(obj)->Hash();
- }
-
- bool IsMatch(Object* string) override {
- UNREACHABLE();
- return false;
- }
-
- uint32_t Hash() override {
- UNREACHABLE();
- return 0;
- }
-
- Handle<Object> AsHandle(Isolate* isolate) override {
- UNREACHABLE();
- return isolate->factory()->empty_string();
- }
-};
-
void Deserializer::Rehash() {
DCHECK(can_rehash_);
isolate_->heap()->InitializeHashSeed();
- if (FLAG_profile_deserialization) {
- PrintF("Re-initializing hash seed to %x\n",
- isolate_->heap()->hash_seed()->value());
- }
- StringRehashKey string_rehash_key;
- isolate_->heap()->string_table()->Rehash(&string_rehash_key);
+ isolate_->heap()->string_table()->Rehash();
+ isolate_->heap()->weak_object_to_code_table()->Rehash();
SortMapDescriptors();
}
void Deserializer::RehashContext(Context* context) {
DCHECK(can_rehash_);
for (const auto& array : transition_arrays_) array->Sort();
- Handle<Name> dummy = isolate_->factory()->empty_string();
- context->global_object()->global_dictionary()->Rehash(dummy);
+ context->global_object()->global_dictionary()->Rehash();
SortMapDescriptors();
}
@@ -342,34 +312,33 @@ void Deserializer::PrintDisassembledCodeObjects() {
}
// Used to insert a deserialized internalized string into the string table.
-class StringTableInsertionKey : public HashTableKey {
+class StringTableInsertionKey : public StringTableKey {
public:
explicit StringTableInsertionKey(String* string)
- : string_(string), hash_(HashForObject(string)) {
+ : StringTableKey(ComputeHashField(string)), string_(string) {
DCHECK(string->IsInternalizedString());
}
bool IsMatch(Object* string) override {
// We know that all entries in a hash table had their hash keys created.
// Use that knowledge to have fast failure.
- if (hash_ != HashForObject(string)) return false;
+ if (Hash() != String::cast(string)->Hash()) return false;
// We want to compare the content of two internalized strings here.
return string_->SlowEquals(String::cast(string));
}
- uint32_t Hash() override { return hash_; }
-
- uint32_t HashForObject(Object* key) override {
- return String::cast(key)->Hash();
- }
-
- MUST_USE_RESULT Handle<Object> AsHandle(Isolate* isolate) override {
+ MUST_USE_RESULT Handle<String> AsHandle(Isolate* isolate) override {
return handle(string_, isolate);
}
private:
+ uint32_t ComputeHashField(String* string) {
+ // Make sure hash_field() is computed.
+ string->Hash();
+ return string->hash_field();
+ }
+
String* string_;
- uint32_t hash_;
DisallowHeapAllocation no_gc;
};
@@ -399,7 +368,6 @@ HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) {
}
}
if (obj->IsAllocationSite()) {
- DCHECK(obj->IsAllocationSite());
// Allocation sites are present in the snapshot, and must be linked into
// a list at deserialization time.
AllocationSite* site = AllocationSite::cast(obj);
diff --git a/deps/v8/src/snapshot/serializer-common.cc b/deps/v8/src/snapshot/serializer-common.cc
index 05a18ab727..5d931b2af5 100644
--- a/deps/v8/src/snapshot/serializer-common.cc
+++ b/deps/v8/src/snapshot/serializer-common.cc
@@ -24,7 +24,11 @@ ExternalReferenceEncoder::ExternalReferenceEncoder(Isolate* isolate) {
Address addr = table->address(i);
// Ignore duplicate API references.
if (table->is_api_reference(i) && !map_->Get(addr).IsNothing()) continue;
+#ifndef V8_OS_WIN
+ // TODO(yangguo): On Windows memcpy and memmove can end up at the same
+ // address due to ICF. See http://crbug.com/726896.
DCHECK(map_->Get(addr).IsNothing());
+#endif
map_->Set(addr, i);
DCHECK(map_->Get(addr).IsJust());
}
diff --git a/deps/v8/src/snapshot/serializer-common.h b/deps/v8/src/snapshot/serializer-common.h
index d445cb95c9..b011c7777a 100644
--- a/deps/v8/src/snapshot/serializer-common.h
+++ b/deps/v8/src/snapshot/serializer-common.h
@@ -6,6 +6,7 @@
#define V8_SNAPSHOT_SERIALIZER_COMMON_H_
#include "src/address-map.h"
+#include "src/base/bits.h"
#include "src/external-reference-table.h"
#include "src/globals.h"
#include "src/visitors.h"
@@ -63,7 +64,7 @@ class HotObjectsList {
static const int kSize = 8;
private:
- STATIC_ASSERT(IS_POWER_OF_TWO(kSize));
+ static_assert(base::bits::IsPowerOfTwo(kSize), "kSize must be power of two");
static const int kSizeMask = kSize - 1;
HeapObject* circular_queue_[kSize];
int index_;
@@ -271,13 +272,12 @@ class SerializedData {
protected:
void SetHeaderValue(int offset, uint32_t value) {
- uint32_t* address = reinterpret_cast<uint32_t*>(data_ + offset);
- memcpy(reinterpret_cast<uint32_t*>(address), &value, sizeof(value));
+ memcpy(data_ + offset, &value, sizeof(value));
}
uint32_t GetHeaderValue(int offset) const {
uint32_t value;
- memcpy(&value, reinterpret_cast<int*>(data_ + offset), sizeof(value));
+ memcpy(&value, data_ + offset, sizeof(value));
return value;
}
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index a63d888d11..5808ab6ba4 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -147,6 +147,13 @@ bool Serializer::BackReferenceIsAlreadyAllocated(
}
}
}
+
+void Serializer::PrintStack() {
+ for (const auto& o : stack_) {
+ o->Print();
+ PrintF("\n");
+ }
+}
#endif // DEBUG
bool Serializer::SerializeHotObject(HeapObject* obj, HowToCode how_to_code,
diff --git a/deps/v8/src/snapshot/serializer.h b/deps/v8/src/snapshot/serializer.h
index cc4d30bfc5..bcb308df71 100644
--- a/deps/v8/src/snapshot/serializer.h
+++ b/deps/v8/src/snapshot/serializer.h
@@ -186,8 +186,6 @@ class Serializer : public SerializerDeserializer {
}
}
- bool BackReferenceIsAlreadyAllocated(SerializerReference back_reference);
-
// This will return the space for an object.
SerializerReference AllocateLargeObject(int size);
SerializerReference AllocateMap();
@@ -222,6 +220,14 @@ class Serializer : public SerializerDeserializer {
void OutputStatistics(const char* name);
+#ifdef DEBUG
+ void PushStack(HeapObject* o) { stack_.Add(o); }
+ void PopStack() { stack_.RemoveLast(); }
+ void PrintStack();
+
+ bool BackReferenceIsAlreadyAllocated(SerializerReference back_reference);
+#endif // DEBUG
+
Isolate* isolate_;
SnapshotByteSink sink_;
@@ -264,6 +270,10 @@ class Serializer : public SerializerDeserializer {
size_t* instance_type_size_;
#endif // OBJECT_PRINT
+#ifdef DEBUG
+ List<HeapObject*> stack_;
+#endif // DEBUG
+
DISALLOW_COPY_AND_ASSIGN(Serializer);
};
@@ -277,8 +287,16 @@ class Serializer::ObjectSerializer : public ObjectVisitor {
sink_(sink),
reference_representation_(how_to_code + where_to_point),
bytes_processed_so_far_(0),
- code_has_been_output_(false) {}
- ~ObjectSerializer() override {}
+ code_has_been_output_(false) {
+#ifdef DEBUG
+ serializer_->PushStack(obj);
+#endif // DEBUG
+ }
+ ~ObjectSerializer() override {
+#ifdef DEBUG
+ serializer_->PopStack();
+#endif // DEBUG
+ }
void Serialize();
void SerializeContent();
void SerializeDeferred();
diff --git a/deps/v8/src/snapshot/snapshot-source-sink.cc b/deps/v8/src/snapshot/snapshot-source-sink.cc
index 66a14bc599..5399fe11f2 100644
--- a/deps/v8/src/snapshot/snapshot-source-sink.cc
+++ b/deps/v8/src/snapshot/snapshot-source-sink.cc
@@ -12,12 +12,6 @@
namespace v8 {
namespace internal {
-void SnapshotByteSource::CopyRaw(byte* to, int number_of_bytes) {
- memcpy(to, data_ + position_, number_of_bytes);
- position_ += number_of_bytes;
-}
-
-
void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) {
DCHECK(integer < 1 << 30);
integer <<= 2;
diff --git a/deps/v8/src/snapshot/snapshot-source-sink.h b/deps/v8/src/snapshot/snapshot-source-sink.h
index 5d4c08d43a..4922ebc74b 100644
--- a/deps/v8/src/snapshot/snapshot-source-sink.h
+++ b/deps/v8/src/snapshot/snapshot-source-sink.h
@@ -38,7 +38,10 @@ class SnapshotByteSource final {
void Advance(int by) { position_ += by; }
- void CopyRaw(byte* to, int number_of_bytes);
+ void CopyRaw(byte* to, int number_of_bytes) {
+ memcpy(to, data_ + position_, number_of_bytes);
+ position_ += number_of_bytes;
+ }
inline int GetInt() {
// This way of decoding variable-length encoded integers does not
diff --git a/deps/v8/src/snapshot/startup-serializer.cc b/deps/v8/src/snapshot/startup-serializer.cc
index 89eaaf55e4..34bb390735 100644
--- a/deps/v8/src/snapshot/startup-serializer.cc
+++ b/deps/v8/src/snapshot/startup-serializer.cc
@@ -48,7 +48,6 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
Code* code = Code::cast(obj);
if (code->kind() == Code::FUNCTION) {
code->ClearInlineCaches();
- code->set_profiler_ticks(0);
}
}
@@ -198,7 +197,7 @@ void StartupSerializer::CheckRehashability(HeapObject* table) {
// We can only correctly rehash if the four hash tables below are the only
// ones that we deserialize.
if (table == isolate_->heap()->empty_slow_element_dictionary()) return;
- if (table == isolate_->heap()->empty_properties_dictionary()) return;
+ if (table == isolate_->heap()->empty_property_dictionary()) return;
if (table == isolate_->heap()->weak_object_to_code_table()) return;
if (table == isolate_->heap()->string_table()) return;
can_be_rehashed_ = false;
diff --git a/deps/v8/src/string-builder.h b/deps/v8/src/string-builder.h
index c8c1329157..ef66ed6a1e 100644
--- a/deps/v8/src/string-builder.h
+++ b/deps/v8/src/string-builder.h
@@ -35,7 +35,7 @@ static inline void StringBuilderConcatHelper(String* special, sinkchar* sink,
Object* element = fixed_array->get(i);
if (element->IsSmi()) {
// Smi encoding of position and length.
- int encoded_slice = Smi::cast(element)->value();
+ int encoded_slice = Smi::ToInt(element);
int pos;
int len;
if (encoded_slice > 0) {
@@ -46,7 +46,7 @@ static inline void StringBuilderConcatHelper(String* special, sinkchar* sink,
// Position and length encoded in two smis.
Object* obj = fixed_array->get(++i);
DCHECK(obj->IsSmi());
- pos = Smi::cast(obj)->value();
+ pos = Smi::ToInt(obj);
len = -encoded_slice;
}
String::WriteToFlat(special, sink + position, pos, pos + len);
@@ -73,7 +73,7 @@ static inline int StringBuilderConcatLength(int special_length,
Object* elt = fixed_array->get(i);
if (elt->IsSmi()) {
// Smi encoding of position and length.
- int smi_value = Smi::cast(elt)->value();
+ int smi_value = Smi::ToInt(elt);
int pos;
int len;
if (smi_value > 0) {
@@ -88,7 +88,7 @@ static inline int StringBuilderConcatLength(int special_length,
if (i >= array_length) return -1;
Object* next_smi = fixed_array->get(i);
if (!next_smi->IsSmi()) return -1;
- pos = Smi::cast(next_smi)->value();
+ pos = Smi::ToInt(next_smi);
if (pos < 0) return -1;
}
DCHECK(pos >= 0);
diff --git a/deps/v8/src/string-hasher-inl.h b/deps/v8/src/string-hasher-inl.h
index c4e353f1ef..7d1f106e02 100644
--- a/deps/v8/src/string-hasher-inl.h
+++ b/deps/v8/src/string-hasher-inl.h
@@ -5,6 +5,7 @@
#ifndef V8_STRING_HASHER_INL_H_
#define V8_STRING_HASHER_INL_H_
+#include "src/char-predicates-inl.h"
#include "src/objects.h"
#include "src/string-hasher.h"
@@ -71,14 +72,14 @@ void StringHasher::AddCharacter(uint16_t c) {
bool StringHasher::UpdateIndex(uint16_t c) {
DCHECK(is_array_index_);
- if (c < '0' || c > '9') {
+ if (!IsDecimalDigit(c)) {
is_array_index_ = false;
return false;
}
int d = c - '0';
if (is_first_char_) {
is_first_char_ = false;
- if (c == '0' && length_ > 1) {
+ if (d == 0 && length_ > 1) {
is_array_index_ = false;
return false;
}
diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc
index 28cc44a220..6697191494 100644
--- a/deps/v8/src/string-stream.cc
+++ b/deps/v8/src/string-stream.cc
@@ -389,7 +389,7 @@ void StringStream::PrintMentionedObjectCache(Isolate* isolate) {
PrintUsingMap(JSObject::cast(printee));
if (printee->IsJSArray()) {
JSArray* array = JSArray::cast(printee);
- if (array->HasFastObjectElements()) {
+ if (array->HasObjectElements()) {
unsigned int limit = FixedArray::cast(array->elements())->length();
unsigned int length =
static_cast<uint32_t>(JSArray::cast(array)->length()->Number());
diff --git a/deps/v8/src/strtod.cc b/deps/v8/src/strtod.cc
index 31dab94f12..c98660b5bf 100644
--- a/deps/v8/src/strtod.cc
+++ b/deps/v8/src/strtod.cc
@@ -154,8 +154,7 @@ static void ReadDiyFp(Vector<const char> buffer,
static bool DoubleStrtod(Vector<const char> trimmed,
int exponent,
double* result) {
-#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87 || defined(USE_SIMULATOR)) && \
- !defined(_MSC_VER)
+#if (V8_TARGET_ARCH_IA32 || defined(USE_SIMULATOR)) && !defined(_MSC_VER)
// On x86 the floating-point stack can be 64 or 80 bits wide. If it is
// 80 bits wide (as is the case on Linux) then double-rounding occurs and the
// result is not accurate.
@@ -223,7 +222,6 @@ static DiyFp AdjustmentPowerOfTen(int exponent) {
case 7: return DiyFp(V8_2PART_UINT64_C(0x98968000, 00000000), -40);
default:
UNREACHABLE();
- return DiyFp(0, 0);
}
}
diff --git a/deps/v8/src/tracing/trace-event.h b/deps/v8/src/tracing/trace-event.h
index 6550e3e6fa..6ca39506f4 100644
--- a/deps/v8/src/tracing/trace-event.h
+++ b/deps/v8/src/tracing/trace-event.h
@@ -107,9 +107,9 @@ enum CategoryGroupEnabledFlags {
// Defines atomic operations used internally by the tracing system.
#define TRACE_EVENT_API_ATOMIC_WORD v8::base::AtomicWord
-#define TRACE_EVENT_API_ATOMIC_LOAD(var) v8::base::NoBarrier_Load(&(var))
+#define TRACE_EVENT_API_ATOMIC_LOAD(var) v8::base::Relaxed_Load(&(var))
#define TRACE_EVENT_API_ATOMIC_STORE(var, value) \
- v8::base::NoBarrier_Store(&(var), (value))
+ v8::base::Relaxed_Store(&(var), (value))
////////////////////////////////////////////////////////////////////////////////
diff --git a/deps/v8/src/transitions.cc b/deps/v8/src/transitions.cc
index 42d1c89507..50e3b60c52 100644
--- a/deps/v8/src/transitions.cc
+++ b/deps/v8/src/transitions.cc
@@ -181,7 +181,7 @@ Map* TransitionArray::SearchTransition(Map* map, PropertyKind kind, Name* name,
// static
-Map* TransitionArray::SearchSpecial(Map* map, Symbol* name) {
+Map* TransitionArray::SearchSpecial(const Map* map, Symbol* name) {
Object* raw_transitions = map->raw_transitions();
if (IsFullTransitionArray(raw_transitions)) {
TransitionArray* transitions = TransitionArray::cast(raw_transitions);
diff --git a/deps/v8/src/transitions.h b/deps/v8/src/transitions.h
index e40bbb0468..5f763e1268 100644
--- a/deps/v8/src/transitions.h
+++ b/deps/v8/src/transitions.h
@@ -7,10 +7,10 @@
#include "src/checks.h"
#include "src/elements-kind.h"
-#include "src/isolate.h"
#include "src/objects.h"
#include "src/objects/descriptor-array.h"
#include "src/objects/map.h"
+#include "src/objects/name.h"
namespace v8 {
namespace internal {
@@ -51,7 +51,7 @@ class TransitionArray: public FixedArray {
return MaybeHandle<Map>();
}
- static Map* SearchSpecial(Map* map, Symbol* name);
+ static Map* SearchSpecial(const Map* map, Symbol* name);
static Handle<Map> FindTransitionToField(Handle<Map> map, Handle<Name> name);
@@ -112,7 +112,7 @@ class TransitionArray: public FixedArray {
static int NumberOfPrototypeTransitions(FixedArray* proto_transitions) {
if (proto_transitions->length() == 0) return 0;
Object* raw = proto_transitions->get(kProtoTransitionNumberOfEntriesOffset);
- return Smi::cast(raw)->value();
+ return Smi::ToInt(raw);
}
static int NumberOfPrototypeTransitionsForTest(Map* map);
@@ -180,6 +180,8 @@ class TransitionArray: public FixedArray {
// Print all the transitions.
static void PrintTransitions(std::ostream& os, Object* transitions,
bool print_header = true); // NOLINT
+ static void PrintTransitionTree(Map* map);
+ static void PrintTransitionTree(std::ostream& os, Map* map, int level = 0);
#endif
#ifdef OBJECT_PRINT
@@ -267,7 +269,7 @@ class TransitionArray: public FixedArray {
int number_of_transitions() {
if (length() < kFirstIndex) return 0;
- return Smi::cast(get(kTransitionLengthIndex))->value();
+ return Smi::ToInt(get(kTransitionLengthIndex));
}
static inline PropertyDetails GetSimpleTargetDetails(Map* transition) {
diff --git a/deps/v8/src/trap-handler/OWNERS b/deps/v8/src/trap-handler/OWNERS
index e44dd97155..11145cb047 100644
--- a/deps/v8/src/trap-handler/OWNERS
+++ b/deps/v8/src/trap-handler/OWNERS
@@ -1,6 +1,5 @@
set noparent
-jochen@chromium.org
bradnelson@chromium.org
# Changes to this directory should also be reviewed by:
@@ -8,3 +7,5 @@ bradnelson@chromium.org
# eholk@chromium.org
# mseaborn@chromium.org
# mark@chromium.org
+
+# COMPONENT: Blink>JavaScript>WebAssembly
diff --git a/deps/v8/src/trap-handler/handler-shared.cc b/deps/v8/src/trap-handler/handler-shared.cc
index d1b549a170..7b399f5eea 100644
--- a/deps/v8/src/trap-handler/handler-shared.cc
+++ b/deps/v8/src/trap-handler/handler-shared.cc
@@ -23,14 +23,7 @@ namespace v8 {
namespace internal {
namespace trap_handler {
-// We declare this as int rather than bool as a workaround for a glibc bug, in
-// which the dynamic loader cannot handle executables whose TLS area is only
-// 1 byte in size; see https://sourceware.org/bugzilla/show_bug.cgi?id=14898.
-THREAD_LOCAL int g_thread_in_wasm_code = false;
-
-static_assert(sizeof(g_thread_in_wasm_code) > 1,
- "sizeof(thread_local_var) must be > 1, see "
- "https://sourceware.org/bugzilla/show_bug.cgi?id=14898");
+THREAD_LOCAL bool g_thread_in_wasm_code = false;
size_t gNumCodeObjects = 0;
CodeProtectionInfoListEntry* gCodeObjects = nullptr;
diff --git a/deps/v8/src/trap-handler/trap-handler.h b/deps/v8/src/trap-handler/trap-handler.h
index ed9459918b..5494c5fdb3 100644
--- a/deps/v8/src/trap-handler/trap-handler.h
+++ b/deps/v8/src/trap-handler/trap-handler.h
@@ -65,7 +65,7 @@ inline bool UseTrapHandler() {
return FLAG_wasm_trap_handler && V8_TRAP_HANDLER_SUPPORTED;
}
-extern THREAD_LOCAL int g_thread_in_wasm_code;
+extern THREAD_LOCAL bool g_thread_in_wasm_code;
inline bool IsThreadInWasm() { return g_thread_in_wasm_code; }
diff --git a/deps/v8/src/type-hints.cc b/deps/v8/src/type-hints.cc
index 29a15c6e3c..82eb3d72f9 100644
--- a/deps/v8/src/type-hints.cc
+++ b/deps/v8/src/type-hints.cc
@@ -15,15 +15,18 @@ std::ostream& operator<<(std::ostream& os, BinaryOperationHint hint) {
return os << "SignedSmall";
case BinaryOperationHint::kSigned32:
return os << "Signed32";
+ case BinaryOperationHint::kNumber:
+ return os << "Number";
case BinaryOperationHint::kNumberOrOddball:
return os << "NumberOrOddball";
+ case BinaryOperationHint::kNonEmptyString:
+ return os << "NonEmptyString";
case BinaryOperationHint::kString:
return os << "String";
case BinaryOperationHint::kAny:
return os << "Any";
}
UNREACHABLE();
- return os;
}
std::ostream& operator<<(std::ostream& os, CompareOperationHint hint) {
@@ -40,13 +43,14 @@ std::ostream& operator<<(std::ostream& os, CompareOperationHint hint) {
return os << "InternalizedString";
case CompareOperationHint::kString:
return os << "String";
+ case CompareOperationHint::kSymbol:
+ return os << "Symbol";
case CompareOperationHint::kReceiver:
return os << "Receiver";
case CompareOperationHint::kAny:
return os << "Any";
}
UNREACHABLE();
- return os;
}
std::ostream& operator<<(std::ostream& os, ToBooleanHint hint) {
@@ -75,7 +79,6 @@ std::ostream& operator<<(std::ostream& os, ToBooleanHint hint) {
return os << "NeedsMap";
}
UNREACHABLE();
- return os;
}
std::string ToString(ToBooleanHint hint) {
@@ -104,7 +107,6 @@ std::string ToString(ToBooleanHint hint) {
return "NeedsMap";
}
UNREACHABLE();
- return "";
}
std::ostream& operator<<(std::ostream& os, ToBooleanHints hints) {
@@ -156,7 +158,6 @@ std::ostream& operator<<(std::ostream& os, const StringAddFlags& flags) {
break;
}
UNREACHABLE();
- return os;
}
} // namespace internal
diff --git a/deps/v8/src/type-hints.h b/deps/v8/src/type-hints.h
index c7c6cccae0..a384af9ef5 100644
--- a/deps/v8/src/type-hints.h
+++ b/deps/v8/src/type-hints.h
@@ -16,7 +16,9 @@ enum class BinaryOperationHint : uint8_t {
kNone,
kSignedSmall,
kSigned32,
+ kNumber,
kNumberOrOddball,
+ kNonEmptyString,
kString,
kAny
};
@@ -35,6 +37,7 @@ enum class CompareOperationHint : uint8_t {
kNumberOrOddball,
kInternalizedString,
kString,
+ kSymbol,
kReceiver,
kAny
};
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
deleted file mode 100644
index 0b8dd147bd..0000000000
--- a/deps/v8/src/type-info.cc
+++ /dev/null
@@ -1,550 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/type-info.h"
-
-#include "src/assembler-inl.h"
-#include "src/ast/ast.h"
-#include "src/code-stubs.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/objects-inl.h"
-#include "src/objects/map.h"
-
-namespace v8 {
-namespace internal {
-
-TypeFeedbackOracle::TypeFeedbackOracle(Isolate* isolate, Zone* zone,
- Handle<Code> code,
- Handle<FeedbackVector> feedback_vector,
- Handle<Context> native_context)
- : native_context_(native_context), isolate_(isolate), zone_(zone) {
- BuildDictionary(code);
- DCHECK(dictionary_->IsUnseededNumberDictionary());
- // We make a copy of the feedback vector because a GC could clear
- // the type feedback info contained therein.
- // TODO(mvstanton): revisit the decision to copy when we weakly
- // traverse the feedback vector at GC time.
- feedback_vector_ = FeedbackVector::Copy(isolate, feedback_vector);
-}
-
-
-static uint32_t IdToKey(TypeFeedbackId ast_id) {
- return static_cast<uint32_t>(ast_id.ToInt());
-}
-
-
-Handle<Object> TypeFeedbackOracle::GetInfo(TypeFeedbackId ast_id) {
- int entry = dictionary_->FindEntry(IdToKey(ast_id));
- if (entry != UnseededNumberDictionary::kNotFound) {
- Object* value = dictionary_->ValueAt(entry);
- if (value->IsCell()) {
- Cell* cell = Cell::cast(value);
- return Handle<Object>(cell->value(), isolate());
- } else {
- return Handle<Object>(value, isolate());
- }
- }
- return Handle<Object>::cast(isolate()->factory()->undefined_value());
-}
-
-Handle<Object> TypeFeedbackOracle::GetInfo(FeedbackSlot slot) {
- DCHECK(slot.ToInt() >= 0 && slot.ToInt() < feedback_vector_->length());
- Handle<Object> undefined =
- Handle<Object>::cast(isolate()->factory()->undefined_value());
- Object* obj = feedback_vector_->Get(slot);
-
- // Slots do not embed direct pointers to maps, functions. Instead
- // a WeakCell is always used.
- if (obj->IsWeakCell()) {
- WeakCell* cell = WeakCell::cast(obj);
- if (cell->cleared()) return undefined;
- obj = cell->value();
- }
-
- if (obj->IsJSFunction() || obj->IsAllocationSite() || obj->IsSymbol()) {
- return Handle<Object>(obj, isolate());
- }
-
- return undefined;
-}
-
-InlineCacheState TypeFeedbackOracle::LoadInlineCacheState(FeedbackSlot slot) {
- if (!slot.IsInvalid()) {
- FeedbackSlotKind kind = feedback_vector_->GetKind(slot);
- if (IsLoadICKind(kind)) {
- LoadICNexus nexus(feedback_vector_, slot);
- return nexus.StateFromFeedback();
- } else if (IsKeyedLoadICKind(kind)) {
- KeyedLoadICNexus nexus(feedback_vector_, slot);
- return nexus.StateFromFeedback();
- }
- }
-
- // If we can't find an IC, assume we've seen *something*, but we don't know
- // what. PREMONOMORPHIC roughly encodes this meaning.
- return PREMONOMORPHIC;
-}
-
-bool TypeFeedbackOracle::StoreIsUninitialized(FeedbackSlot slot) {
- if (!slot.IsInvalid()) {
- FeedbackSlotKind kind = feedback_vector_->GetKind(slot);
- if (IsStoreICKind(kind) || IsStoreGlobalICKind(kind)) {
- StoreICNexus nexus(feedback_vector_, slot);
- return nexus.StateFromFeedback() == UNINITIALIZED;
- } else if (IsKeyedStoreICKind(kind)) {
- KeyedStoreICNexus nexus(feedback_vector_, slot);
- return nexus.StateFromFeedback() == UNINITIALIZED;
- }
- }
- return true;
-}
-
-bool TypeFeedbackOracle::CallIsUninitialized(FeedbackSlot slot) {
- Handle<Object> value = GetInfo(slot);
- return value->IsUndefined(isolate()) ||
- value.is_identical_to(
- FeedbackVector::UninitializedSentinel(isolate()));
-}
-
-bool TypeFeedbackOracle::CallIsMonomorphic(FeedbackSlot slot) {
- Handle<Object> value = GetInfo(slot);
- return value->IsAllocationSite() || value->IsJSFunction();
-}
-
-bool TypeFeedbackOracle::CallNewIsMonomorphic(FeedbackSlot slot) {
- Handle<Object> info = GetInfo(slot);
- return info->IsAllocationSite() || info->IsJSFunction();
-}
-
-byte TypeFeedbackOracle::ForInType(FeedbackSlot feedback_vector_slot) {
- Handle<Object> value = GetInfo(feedback_vector_slot);
- return value.is_identical_to(FeedbackVector::UninitializedSentinel(isolate()))
- ? ForInStatement::FAST_FOR_IN
- : ForInStatement::SLOW_FOR_IN;
-}
-
-void TypeFeedbackOracle::GetStoreModeAndKeyType(
- FeedbackSlot slot, KeyedAccessStoreMode* store_mode,
- IcCheckType* key_type) {
- if (!slot.IsInvalid() && feedback_vector_->IsKeyedStoreIC(slot)) {
- KeyedStoreICNexus nexus(feedback_vector_, slot);
- *store_mode = nexus.GetKeyedAccessStoreMode();
- *key_type = nexus.GetKeyType();
- } else {
- *store_mode = STANDARD_STORE;
- *key_type = ELEMENT;
- }
-}
-
-Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(FeedbackSlot slot) {
- Handle<Object> info = GetInfo(slot);
- if (info->IsAllocationSite()) {
- return Handle<JSFunction>(isolate()->native_context()->array_function());
- }
-
- return Handle<JSFunction>::cast(info);
-}
-
-Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(FeedbackSlot slot) {
- Handle<Object> info = GetInfo(slot);
- if (info->IsJSFunction()) {
- return Handle<JSFunction>::cast(info);
- }
-
- DCHECK(info->IsAllocationSite());
- return Handle<JSFunction>(isolate()->native_context()->array_function());
-}
-
-Handle<AllocationSite> TypeFeedbackOracle::GetCallAllocationSite(
- FeedbackSlot slot) {
- Handle<Object> info = GetInfo(slot);
- if (info->IsAllocationSite()) {
- return Handle<AllocationSite>::cast(info);
- }
- return Handle<AllocationSite>::null();
-}
-
-Handle<AllocationSite> TypeFeedbackOracle::GetCallNewAllocationSite(
- FeedbackSlot slot) {
- Handle<Object> info = GetInfo(slot);
- if (info->IsAllocationSite()) {
- return Handle<AllocationSite>::cast(info);
- }
- return Handle<AllocationSite>::null();
-}
-
-namespace {
-
-AstType* CompareOpHintToType(CompareOperationHint hint) {
- switch (hint) {
- case CompareOperationHint::kNone:
- return AstType::None();
- case CompareOperationHint::kSignedSmall:
- return AstType::SignedSmall();
- case CompareOperationHint::kNumber:
- return AstType::Number();
- case CompareOperationHint::kNumberOrOddball:
- return AstType::NumberOrOddball();
- case CompareOperationHint::kInternalizedString:
- return AstType::InternalizedString();
- case CompareOperationHint::kString:
- return AstType::String();
- case CompareOperationHint::kReceiver:
- return AstType::Receiver();
- case CompareOperationHint::kAny:
- return AstType::Any();
- }
- UNREACHABLE();
- return AstType::None();
-}
-
-AstType* BinaryOpFeedbackToType(int hint) {
- switch (hint) {
- case BinaryOperationFeedback::kNone:
- return AstType::None();
- case BinaryOperationFeedback::kSignedSmall:
- return AstType::SignedSmall();
- case BinaryOperationFeedback::kNumber:
- return AstType::Number();
- case BinaryOperationFeedback::kString:
- return AstType::String();
- case BinaryOperationFeedback::kNumberOrOddball:
- return AstType::NumberOrOddball();
- case BinaryOperationFeedback::kAny:
- default:
- return AstType::Any();
- }
- UNREACHABLE();
- return AstType::None();
-}
-
-} // end anonymous namespace
-
-void TypeFeedbackOracle::CompareType(TypeFeedbackId id, FeedbackSlot slot,
- AstType** left_type, AstType** right_type,
- AstType** combined_type) {
- Handle<Object> info = GetInfo(id);
- // A check for a valid slot is not sufficient here. InstanceOf collects
- // type feedback in a General slot.
- if (!info->IsCode()) {
- // For some comparisons we don't have type feedback, e.g.
- // LiteralCompareTypeof.
- *left_type = *right_type = *combined_type = AstType::None();
- return;
- }
-
- // Feedback from Ignition. The feedback slot will be allocated and initialized
- // to AstType::None() even when ignition is not enabled. So it is safe to get
- // feedback from the type feedback vector.
- DCHECK(!slot.IsInvalid());
- CompareICNexus nexus(feedback_vector_, slot);
- *left_type = *right_type = *combined_type =
- CompareOpHintToType(nexus.GetCompareOperationFeedback());
-
- // Merge the feedback from full-codegen if available.
- Handle<Code> code = Handle<Code>::cast(info);
- Handle<Map> map;
- Map* raw_map = code->FindFirstMap();
- if (raw_map != NULL) Map::TryUpdate(handle(raw_map)).ToHandle(&map);
-
- if (code->is_compare_ic_stub()) {
- CompareICStub stub(code->stub_key(), isolate());
- AstType* left_type_from_ic =
- CompareICState::StateToType(zone(), stub.left());
- AstType* right_type_from_ic =
- CompareICState::StateToType(zone(), stub.right());
- AstType* combined_type_from_ic =
- CompareICState::StateToType(zone(), stub.state(), map);
- // Full-codegen collects lhs and rhs feedback seperately and Crankshaft
- // could use this information to optimize better. So if combining the
- // feedback has made the feedback less precise, we should use the feedback
- // only from Full-codegen. If the union of the feedback from Full-codegen
- // is same as that of Ignition, there is no need to combine feedback from
- // from Ignition.
- AstType* combined_type_from_fcg = AstType::Union(
- left_type_from_ic,
- AstType::Union(right_type_from_ic, combined_type_from_ic, zone()),
- zone());
- if (combined_type_from_fcg == *left_type) {
- // Full-codegen collects information about lhs, rhs and result types
- // seperately. So just retain that information.
- *left_type = left_type_from_ic;
- *right_type = right_type_from_ic;
- *combined_type = combined_type_from_ic;
- } else {
- // Combine Ignition and Full-codegen feedbacks.
- *left_type = AstType::Union(*left_type, left_type_from_ic, zone());
- *right_type = AstType::Union(*right_type, right_type_from_ic, zone());
- *combined_type =
- AstType::Union(*combined_type, combined_type_from_ic, zone());
- }
- }
-}
-
-void TypeFeedbackOracle::BinaryType(TypeFeedbackId id, FeedbackSlot slot,
- AstType** left, AstType** right,
- AstType** result,
- Maybe<int>* fixed_right_arg,
- Handle<AllocationSite>* allocation_site,
- Token::Value op) {
- Handle<Object> object = GetInfo(id);
- if (slot.IsInvalid()) {
- // For some binary ops we don't have ICs or feedback slots,
- // e.g. Token::COMMA, but for the operations covered by the BinaryOpIC we
- // should always have them.
- DCHECK(!object->IsCode());
- DCHECK(op < BinaryOpICState::FIRST_TOKEN ||
- op > BinaryOpICState::LAST_TOKEN);
- *left = *right = *result = AstType::None();
- *fixed_right_arg = Nothing<int>();
- *allocation_site = Handle<AllocationSite>::null();
- return;
- }
-
- // Feedback from Ignition. The feedback slot will be allocated and initialized
- // to AstType::None() even when ignition is not enabled. So it is safe to get
- // feedback from the type feedback vector.
- DCHECK(!slot.IsInvalid());
- BinaryOpICNexus nexus(feedback_vector_, slot);
- *left = *right = *result =
- BinaryOpFeedbackToType(Smi::cast(nexus.GetFeedback())->value());
- *fixed_right_arg = Nothing<int>();
- *allocation_site = Handle<AllocationSite>::null();
-
- if (!object->IsCode()) return;
-
- // Merge the feedback from full-codegen if available.
- Handle<Code> code = Handle<Code>::cast(object);
- DCHECK_EQ(Code::BINARY_OP_IC, code->kind());
- BinaryOpICState state(isolate(), code->extra_ic_state());
- DCHECK_EQ(op, state.op());
-
- // Full-codegen collects lhs and rhs feedback seperately and Crankshaft
- // could use this information to optimize better. So if combining the
- // feedback has made the feedback less precise, we should use the feedback
- // only from Full-codegen. If the union of the feedback from Full-codegen
- // is same as that of Ignition, there is no need to combine feedback from
- // from Ignition.
- AstType* combined_type_from_fcg = AstType::Union(
- state.GetLeftType(),
- AstType::Union(state.GetRightType(), state.GetResultType(), zone()),
- zone());
- if (combined_type_from_fcg == *left) {
- // Full-codegen collects information about lhs, rhs and result types
- // seperately. So just retain that information.
- *left = state.GetLeftType();
- *right = state.GetRightType();
- *result = state.GetResultType();
- } else {
- // Combine Ignition and Full-codegen feedback.
- *left = AstType::Union(*left, state.GetLeftType(), zone());
- *right = AstType::Union(*right, state.GetRightType(), zone());
- *result = AstType::Union(*result, state.GetResultType(), zone());
- }
- // Ignition does not collect this feedback.
- *fixed_right_arg = state.fixed_right_arg();
-
- AllocationSite* first_allocation_site = code->FindFirstAllocationSite();
- if (first_allocation_site != NULL) {
- *allocation_site = handle(first_allocation_site);
- } else {
- *allocation_site = Handle<AllocationSite>::null();
- }
-}
-
-AstType* TypeFeedbackOracle::CountType(TypeFeedbackId id, FeedbackSlot slot) {
- Handle<Object> object = GetInfo(id);
- if (slot.IsInvalid()) {
- DCHECK(!object->IsCode());
- return AstType::None();
- }
-
- DCHECK(!slot.IsInvalid());
- BinaryOpICNexus nexus(feedback_vector_, slot);
- AstType* type =
- BinaryOpFeedbackToType(Smi::cast(nexus.GetFeedback())->value());
-
- if (!object->IsCode()) return type;
-
- Handle<Code> code = Handle<Code>::cast(object);
- DCHECK_EQ(Code::BINARY_OP_IC, code->kind());
- BinaryOpICState state(isolate(), code->extra_ic_state());
- return AstType::Union(type, state.GetLeftType(), zone());
-}
-
-
-bool TypeFeedbackOracle::HasOnlyStringMaps(SmallMapList* receiver_types) {
- bool all_strings = receiver_types->length() > 0;
- for (int i = 0; i < receiver_types->length(); i++) {
- all_strings &= receiver_types->at(i)->IsStringMap();
- }
- return all_strings;
-}
-
-void TypeFeedbackOracle::PropertyReceiverTypes(FeedbackSlot slot,
- Handle<Name> name,
- SmallMapList* receiver_types) {
- receiver_types->Clear();
- if (!slot.IsInvalid()) {
- LoadICNexus nexus(feedback_vector_, slot);
- CollectReceiverTypes(&nexus, receiver_types);
- }
-}
-
-void TypeFeedbackOracle::KeyedPropertyReceiverTypes(
- FeedbackSlot slot, SmallMapList* receiver_types, bool* is_string,
- IcCheckType* key_type) {
- receiver_types->Clear();
- if (slot.IsInvalid()) {
- *is_string = false;
- *key_type = ELEMENT;
- } else {
- KeyedLoadICNexus nexus(feedback_vector_, slot);
- CollectReceiverTypes(&nexus, receiver_types);
- *is_string = HasOnlyStringMaps(receiver_types);
- *key_type = nexus.GetKeyType();
- }
-}
-
-void TypeFeedbackOracle::AssignmentReceiverTypes(FeedbackSlot slot,
- Handle<Name> name,
- SmallMapList* receiver_types) {
- receiver_types->Clear();
- StoreICNexus nexus(feedback_vector_, slot);
- CollectReceiverTypes(&nexus, receiver_types);
-}
-
-void TypeFeedbackOracle::KeyedAssignmentReceiverTypes(
- FeedbackSlot slot, SmallMapList* receiver_types,
- KeyedAccessStoreMode* store_mode, IcCheckType* key_type) {
- receiver_types->Clear();
- CollectReceiverTypes(slot, receiver_types);
- GetStoreModeAndKeyType(slot, store_mode, key_type);
-}
-
-void TypeFeedbackOracle::CountReceiverTypes(FeedbackSlot slot,
- SmallMapList* receiver_types) {
- receiver_types->Clear();
- if (!slot.IsInvalid()) CollectReceiverTypes(slot, receiver_types);
-}
-
-void TypeFeedbackOracle::CollectReceiverTypes(FeedbackSlot slot,
- SmallMapList* types) {
- FeedbackSlotKind kind = feedback_vector_->GetKind(slot);
- if (IsStoreICKind(kind) || IsStoreOwnICKind(kind) ||
- IsStoreGlobalICKind(kind)) {
- StoreICNexus nexus(feedback_vector_, slot);
- CollectReceiverTypes(&nexus, types);
- } else {
- DCHECK(IsKeyedStoreICKind(kind));
- KeyedStoreICNexus nexus(feedback_vector_, slot);
- CollectReceiverTypes(&nexus, types);
- }
-}
-
-void TypeFeedbackOracle::CollectReceiverTypes(FeedbackNexus* nexus,
- SmallMapList* types) {
- MapHandles maps;
- if (nexus->ExtractMaps(&maps) == 0) {
- return;
- }
-
- types->Reserve(static_cast<int>(maps.size()), zone());
- for (Handle<Map> map : maps) {
- types->AddMapIfMissing(map, zone());
- }
-}
-
-
-uint16_t TypeFeedbackOracle::ToBooleanTypes(TypeFeedbackId id) {
- Handle<Object> object = GetInfo(id);
- return object->IsCode() ? Handle<Code>::cast(object)->to_boolean_state() : 0;
-}
-
-
-// Things are a bit tricky here: The iterator for the RelocInfos and the infos
-// themselves are not GC-safe, so we first get all infos, then we create the
-// dictionary (possibly triggering GC), and finally we relocate the collected
-// infos before we process them.
-void TypeFeedbackOracle::BuildDictionary(Handle<Code> code) {
- DisallowHeapAllocation no_allocation;
- ZoneList<RelocInfo> infos(16, zone());
- HandleScope scope(isolate());
- GetRelocInfos(code, &infos);
- CreateDictionary(code, &infos);
- ProcessRelocInfos(&infos);
- // Allocate handle in the parent scope.
- dictionary_ = scope.CloseAndEscape(dictionary_);
-}
-
-
-void TypeFeedbackOracle::GetRelocInfos(Handle<Code> code,
- ZoneList<RelocInfo>* infos) {
- int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET_WITH_ID);
- for (RelocIterator it(*code, mask); !it.done(); it.next()) {
- infos->Add(*it.rinfo(), zone());
- }
-}
-
-
-void TypeFeedbackOracle::CreateDictionary(Handle<Code> code,
- ZoneList<RelocInfo>* infos) {
- AllowHeapAllocation allocation_allowed;
- Code* old_code = *code;
- dictionary_ = UnseededNumberDictionary::New(isolate(), infos->length());
- RelocateRelocInfos(infos, old_code, *code);
-}
-
-
-void TypeFeedbackOracle::RelocateRelocInfos(ZoneList<RelocInfo>* infos,
- Code* old_code,
- Code* new_code) {
- for (int i = 0; i < infos->length(); i++) {
- RelocInfo* info = &(*infos)[i];
- info->set_host(new_code);
- info->set_pc(new_code->instruction_start() +
- (info->pc() - old_code->instruction_start()));
- }
-}
-
-
-void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) {
- for (int i = 0; i < infos->length(); i++) {
- RelocInfo reloc_entry = (*infos)[i];
- Address target_address = reloc_entry.target_address();
- TypeFeedbackId ast_id =
- TypeFeedbackId(static_cast<unsigned>((*infos)[i].data()));
- Code* target = Code::GetCodeFromTargetAddress(target_address);
- switch (target->kind()) {
- case Code::LOAD_IC:
- case Code::STORE_IC:
- case Code::KEYED_LOAD_IC:
- case Code::KEYED_STORE_IC:
- case Code::BINARY_OP_IC:
- case Code::COMPARE_IC:
- case Code::TO_BOOLEAN_IC:
- SetInfo(ast_id, target);
- break;
-
- default:
- break;
- }
- }
-}
-
-
-void TypeFeedbackOracle::SetInfo(TypeFeedbackId ast_id, Object* target) {
- DCHECK(dictionary_->FindEntry(IdToKey(ast_id)) ==
- UnseededNumberDictionary::kNotFound);
- // Dictionary has been allocated with sufficient size for all elements.
- DisallowHeapAllocation no_need_to_resize_dictionary;
- HandleScope scope(isolate());
- USE(UnseededNumberDictionary::AtNumberPut(
- dictionary_, IdToKey(ast_id), handle(target, isolate())));
-}
-
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h
deleted file mode 100644
index d767a297c6..0000000000
--- a/deps/v8/src/type-info.h
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_TYPE_INFO_H_
-#define V8_TYPE_INFO_H_
-
-#include "src/allocation.h"
-#include "src/ast/ast-types.h"
-#include "src/contexts.h"
-#include "src/globals.h"
-#include "src/parsing/token.h"
-#include "src/zone/zone.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class SmallMapList;
-class FeedbackNexus;
-class StubCache;
-
-class TypeFeedbackOracle: public ZoneObject {
- public:
- TypeFeedbackOracle(Isolate* isolate, Zone* zone, Handle<Code> code,
- Handle<FeedbackVector> feedback_vector,
- Handle<Context> native_context);
-
- InlineCacheState LoadInlineCacheState(FeedbackSlot slot);
- bool StoreIsUninitialized(FeedbackSlot slot);
- bool CallIsUninitialized(FeedbackSlot slot);
- bool CallIsMonomorphic(FeedbackSlot slot);
- bool CallNewIsMonomorphic(FeedbackSlot slot);
-
- // TODO(1571) We can't use ForInStatement::ForInType as the return value due
- // to various cycles in our headers.
- // TODO(rossberg): once all oracle access is removed from ast.cc, it should
- // be possible.
- byte ForInType(FeedbackSlot feedback_vector_slot);
-
- void GetStoreModeAndKeyType(FeedbackSlot slot,
- KeyedAccessStoreMode* store_mode,
- IcCheckType* key_type);
-
- void PropertyReceiverTypes(FeedbackSlot slot, Handle<Name> name,
- SmallMapList* receiver_types);
- void KeyedPropertyReceiverTypes(FeedbackSlot slot,
- SmallMapList* receiver_types, bool* is_string,
- IcCheckType* key_type);
- void AssignmentReceiverTypes(FeedbackSlot slot, Handle<Name> name,
- SmallMapList* receiver_types);
- void KeyedAssignmentReceiverTypes(FeedbackSlot slot,
- SmallMapList* receiver_types,
- KeyedAccessStoreMode* store_mode,
- IcCheckType* key_type);
- void CountReceiverTypes(FeedbackSlot slot, SmallMapList* receiver_types);
-
- void CollectReceiverTypes(FeedbackSlot slot, SmallMapList* types);
- void CollectReceiverTypes(FeedbackNexus* nexus, SmallMapList* types);
-
- Handle<JSFunction> GetCallTarget(FeedbackSlot slot);
- Handle<AllocationSite> GetCallAllocationSite(FeedbackSlot slot);
- Handle<JSFunction> GetCallNewTarget(FeedbackSlot slot);
- Handle<AllocationSite> GetCallNewAllocationSite(FeedbackSlot slot);
-
- // TODO(1571) We can't use ToBooleanICStub::Types as the return value because
- // of various cycles in our headers. Death to tons of implementations in
- // headers!! :-P
- uint16_t ToBooleanTypes(TypeFeedbackId id);
-
- // Get type information for arithmetic operations and compares.
- void BinaryType(TypeFeedbackId id, FeedbackSlot slot, AstType** left,
- AstType** right, AstType** result,
- Maybe<int>* fixed_right_arg,
- Handle<AllocationSite>* allocation_site,
- Token::Value operation);
-
- void CompareType(TypeFeedbackId id, FeedbackSlot slot, AstType** left,
- AstType** right, AstType** combined);
-
- AstType* CountType(TypeFeedbackId id, FeedbackSlot slot);
-
- Zone* zone() const { return zone_; }
- Isolate* isolate() const { return isolate_; }
-
- private:
- // Returns true if there is at least one string map and if
- // all maps are string maps.
- bool HasOnlyStringMaps(SmallMapList* receiver_types);
-
- void SetInfo(TypeFeedbackId id, Object* target);
-
- void BuildDictionary(Handle<Code> code);
- void GetRelocInfos(Handle<Code> code, ZoneList<RelocInfo>* infos);
- void CreateDictionary(Handle<Code> code, ZoneList<RelocInfo>* infos);
- void RelocateRelocInfos(ZoneList<RelocInfo>* infos,
- Code* old_code,
- Code* new_code);
- void ProcessRelocInfos(ZoneList<RelocInfo>* infos);
-
- // Returns an element from the backing store. Returns undefined if
- // there is no information.
- Handle<Object> GetInfo(TypeFeedbackId id);
-
- // Returns an element from the type feedback vector. Returns undefined
- // if there is no information.
- Handle<Object> GetInfo(FeedbackSlot slot);
-
- private:
- Handle<Context> native_context_;
- Isolate* isolate_;
- Zone* zone_;
- Handle<UnseededNumberDictionary> dictionary_;
- Handle<FeedbackVector> feedback_vector_;
-
- DISALLOW_COPY_AND_ASSIGN(TypeFeedbackOracle);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TYPE_INFO_H_
diff --git a/deps/v8/src/unicode.cc b/deps/v8/src/unicode.cc
index 654494cd81..838ce96c75 100644
--- a/deps/v8/src/unicode.cc
+++ b/deps/v8/src/unicode.cc
@@ -9,6 +9,10 @@
#include <stdio.h>
#include <stdlib.h>
+#ifdef V8_INTL_SUPPORT
+#include "unicode/uchar.h"
+#endif
+
namespace unibrow {
static const int kStartBit = (1 << 30);
@@ -36,12 +40,11 @@ static inline uchar GetEntry(int32_t entry) {
return entry & (kStartBit - 1);
}
-
static inline bool IsStart(int32_t entry) {
return (entry & kStartBit) != 0;
}
-
+#ifndef V8_INTL_SUPPORT
/**
* Look up a character in the unicode table using a mix of binary and
* interpolation search. For a uniformly distributed array
@@ -81,6 +84,7 @@ static bool LookupPredicate(const int32_t* table, uint16_t size, uchar chr) {
bool is_start = IsStart(field);
return (entry == value) || (entry < value && is_start);
}
+#endif // !V8_INTL_SUPPORT
template <int kW>
struct MultiCharacterSpecialCase {
@@ -88,7 +92,6 @@ struct MultiCharacterSpecialCase {
uchar chars[kW];
};
-
// Look up the mapping for the given character in the specified table,
// which is of the specified length and uses the specified special case
// mapping for multi-char mappings. The next parameter is the character
@@ -273,7 +276,6 @@ uchar Utf8::CalculateValue(const byte* str, size_t max_length, size_t* cursor) {
}
UNREACHABLE();
- return kBadChar;
}
uchar Utf8::ValueOfIncremental(byte next, Utf8IncrementalBuffer* buffer) {
@@ -361,23 +363,60 @@ uchar Utf8::ValueOfIncrementalFinish(Utf8IncrementalBuffer* buffer) {
}
}
-bool Utf8::Validate(const byte* bytes, size_t length) {
- size_t cursor = 0;
+bool Utf8::ValidateEncoding(const byte* bytes, size_t length) {
+ const byte* cursor = bytes;
+ const byte* end = bytes + length;
- // Performance optimization: Skip over single-byte values first.
- while (cursor < length && bytes[cursor] <= kMaxOneByteChar) {
- ++cursor;
- }
+ while (cursor < end) {
+ // Skip over single-byte values.
+ if (*cursor <= kMaxOneByteChar) {
+ ++cursor;
+ continue;
+ }
+
+ // Get the length the the character.
+ size_t seq_length = NonASCIISequenceLength(*cursor);
+ // For some invalid characters NonASCIISequenceLength returns 0.
+ if (seq_length == 0) return false;
+
+ const byte* char_end = cursor + seq_length;
- while (cursor < length) {
- uchar c = ValueOf(bytes + cursor, length - cursor, &cursor);
- if (!IsValidCharacter(c)) return false;
+ // Return false if we do not have enough bytes for the character.
+ if (char_end > end) return false;
+
+ // Check if the bytes of the character are continuation bytes.
+ for (const byte* i = cursor + 1; i < char_end; ++i) {
+ if (!IsContinuationCharacter(*i)) return false;
+ }
+
+ // Check overly long sequences & other conditions.
+ if (seq_length == 3) {
+ if (cursor[0] == 0xE0 && (cursor[1] < 0xA0 || cursor[1] > 0xBF)) {
+ // Overlong three-byte sequence?
+ return false;
+ } else if (cursor[0] == 0xED && (cursor[1] < 0x80 || cursor[1] > 0x9F)) {
+ // High and low surrogate halves?
+ return false;
+ }
+ } else if (seq_length == 4) {
+ if (cursor[0] == 0xF0 && (cursor[1] < 0x90 || cursor[1] > 0xBF)) {
+ // Overlong four-byte sequence.
+ return false;
+ } else if (cursor[0] == 0xF4 && (cursor[1] < 0x80 || cursor[1] > 0x8F)) {
+ // Code points outside of the unicode range.
+ return false;
+ }
+ }
+ cursor = char_end;
}
return true;
}
// Uppercase: point.category == 'Lu'
-
+// TODO(jshin): Check if it's ok to exclude Other_Uppercase characters.
+#ifdef V8_INTL_SUPPORT
+bool Uppercase::Is(uchar c) { return static_cast<bool>(u_isupper(c)); }
+#else
static const uint16_t kUppercaseTable0Size = 455;
static const int32_t kUppercaseTable0[455] = {
1073741889, 90, 1073742016, 214,
@@ -543,196 +582,12 @@ bool Uppercase::Is(uchar c) {
default: return false;
}
}
-
-
-// Lowercase: point.category == 'Ll'
-
-static const uint16_t kLowercaseTable0Size = 467;
-static const int32_t kLowercaseTable0[467] = {
- 1073741921, 122, 181, 1073742047,
- 246, 1073742072, 255, 257, // NOLINT
- 259, 261, 263, 265,
- 267, 269, 271, 273, // NOLINT
- 275, 277, 279, 281,
- 283, 285, 287, 289, // NOLINT
- 291, 293, 295, 297,
- 299, 301, 303, 305, // NOLINT
- 307, 309, 1073742135, 312,
- 314, 316, 318, 320, // NOLINT
- 322, 324, 326, 1073742152,
- 329, 331, 333, 335, // NOLINT
- 337, 339, 341, 343,
- 345, 347, 349, 351, // NOLINT
- 353, 355, 357, 359,
- 361, 363, 365, 367, // NOLINT
- 369, 371, 373, 375,
- 378, 380, 1073742206, 384, // NOLINT
- 387, 389, 392, 1073742220,
- 397, 402, 405, 1073742233, // NOLINT
- 411, 414, 417, 419,
- 421, 424, 1073742250, 427, // NOLINT
- 429, 432, 436, 438,
- 1073742265, 442, 1073742269, 447, // NOLINT
- 454, 457, 460, 462,
- 464, 466, 468, 470, // NOLINT
- 472, 474, 1073742300, 477,
- 479, 481, 483, 485, // NOLINT
- 487, 489, 491, 493,
- 1073742319, 496, 499, 501, // NOLINT
- 505, 507, 509, 511,
- 513, 515, 517, 519, // NOLINT
- 521, 523, 525, 527,
- 529, 531, 533, 535, // NOLINT
- 537, 539, 541, 543,
- 545, 547, 549, 551, // NOLINT
- 553, 555, 557, 559,
- 561, 1073742387, 569, 572, // NOLINT
- 1073742399, 576, 578, 583,
- 585, 587, 589, 1073742415, // NOLINT
- 659, 1073742485, 687, 881,
- 883, 887, 1073742715, 893, // NOLINT
- 912, 1073742764, 974, 1073742800,
- 977, 1073742805, 983, 985, // NOLINT
- 987, 989, 991, 993,
- 995, 997, 999, 1001, // NOLINT
- 1003, 1005, 1073742831, 1011,
- 1013, 1016, 1073742843, 1020, // NOLINT
- 1073742896, 1119, 1121, 1123,
- 1125, 1127, 1129, 1131, // NOLINT
- 1133, 1135, 1137, 1139,
- 1141, 1143, 1145, 1147, // NOLINT
- 1149, 1151, 1153, 1163,
- 1165, 1167, 1169, 1171, // NOLINT
- 1173, 1175, 1177, 1179,
- 1181, 1183, 1185, 1187, // NOLINT
- 1189, 1191, 1193, 1195,
- 1197, 1199, 1201, 1203, // NOLINT
- 1205, 1207, 1209, 1211,
- 1213, 1215, 1218, 1220, // NOLINT
- 1222, 1224, 1226, 1228,
- 1073743054, 1231, 1233, 1235, // NOLINT
- 1237, 1239, 1241, 1243,
- 1245, 1247, 1249, 1251, // NOLINT
- 1253, 1255, 1257, 1259,
- 1261, 1263, 1265, 1267, // NOLINT
- 1269, 1271, 1273, 1275,
- 1277, 1279, 1281, 1283, // NOLINT
- 1285, 1287, 1289, 1291,
- 1293, 1295, 1297, 1299, // NOLINT
- 1301, 1303, 1305, 1307,
- 1309, 1311, 1313, 1315, // NOLINT
- 1317, 1319, 1321, 1323,
- 1325, 1327, 1073743201, 1415, // NOLINT
- 1073749248, 7467, 1073749355, 7543,
- 1073749369, 7578, 7681, 7683, // NOLINT
- 7685, 7687, 7689, 7691,
- 7693, 7695, 7697, 7699, // NOLINT
- 7701, 7703, 7705, 7707,
- 7709, 7711, 7713, 7715, // NOLINT
- 7717, 7719, 7721, 7723,
- 7725, 7727, 7729, 7731, // NOLINT
- 7733, 7735, 7737, 7739,
- 7741, 7743, 7745, 7747, // NOLINT
- 7749, 7751, 7753, 7755,
- 7757, 7759, 7761, 7763, // NOLINT
- 7765, 7767, 7769, 7771,
- 7773, 7775, 7777, 7779, // NOLINT
- 7781, 7783, 7785, 7787,
- 7789, 7791, 7793, 7795, // NOLINT
- 7797, 7799, 7801, 7803,
- 7805, 7807, 7809, 7811, // NOLINT
- 7813, 7815, 7817, 7819,
- 7821, 7823, 7825, 7827, // NOLINT
- 1073749653, 7837, 7839, 7841,
- 7843, 7845, 7847, 7849, // NOLINT
- 7851, 7853, 7855, 7857,
- 7859, 7861, 7863, 7865, // NOLINT
- 7867, 7869, 7871, 7873,
- 7875, 7877, 7879, 7881, // NOLINT
- 7883, 7885, 7887, 7889,
- 7891, 7893, 7895, 7897, // NOLINT
- 7899, 7901, 7903, 7905,
- 7907, 7909, 7911, 7913, // NOLINT
- 7915, 7917, 7919, 7921,
- 7923, 7925, 7927, 7929, // NOLINT
- 7931, 7933, 1073749759, 7943,
- 1073749776, 7957, 1073749792, 7975, // NOLINT
- 1073749808, 7991, 1073749824, 8005,
- 1073749840, 8023, 1073749856, 8039, // NOLINT
- 1073749872, 8061, 1073749888, 8071,
- 1073749904, 8087, 1073749920, 8103, // NOLINT
- 1073749936, 8116, 1073749942, 8119,
- 8126, 1073749954, 8132, 1073749958, // NOLINT
- 8135, 1073749968, 8147, 1073749974,
- 8151, 1073749984, 8167, 1073750002, // NOLINT
- 8180, 1073750006, 8183}; // NOLINT
-static const uint16_t kLowercaseTable1Size = 84;
-static const int32_t kLowercaseTable1[84] = {
- 266, 1073742094, 271, 275, 303, 308, 313, 1073742140, // NOLINT
- 317, 1073742150, 329, 334, 388, 1073744944, 3166, 3169, // NOLINT
- 1073744997, 3174, 3176, 3178, 3180, 3185, 1073745011, 3188, // NOLINT
- 1073745014, 3195, 3201, 3203, 3205, 3207, 3209, 3211, // NOLINT
- 3213, 3215, 3217, 3219, 3221, 3223, 3225, 3227, // NOLINT
- 3229, 3231, 3233, 3235, 3237, 3239, 3241, 3243, // NOLINT
- 3245, 3247, 3249, 3251, 3253, 3255, 3257, 3259, // NOLINT
- 3261, 3263, 3265, 3267, 3269, 3271, 3273, 3275, // NOLINT
- 3277, 3279, 3281, 3283, 3285, 3287, 3289, 3291, // NOLINT
- 3293, 3295, 3297, 1073745123, 3300, 3308, 3310, 3315, // NOLINT
- 1073745152, 3365, 3367, 3373 }; // NOLINT
-static const uint16_t kLowercaseTable5Size = 105;
-static const int32_t kLowercaseTable5[105] = {
- 1601, 1603, 1605, 1607,
- 1609, 1611, 1613, 1615, // NOLINT
- 1617, 1619, 1621, 1623,
- 1625, 1627, 1629, 1631, // NOLINT
- 1633, 1635, 1637, 1639,
- 1641, 1643, 1645, 1665, // NOLINT
- 1667, 1669, 1671, 1673,
- 1675, 1677, 1679, 1681, // NOLINT
- 1683, 1685, 1687, 1689,
- 1691, 1827, 1829, 1831, // NOLINT
- 1833, 1835, 1837, 1073743663,
- 1841, 1843, 1845, 1847, // NOLINT
- 1849, 1851, 1853, 1855,
- 1857, 1859, 1861, 1863, // NOLINT
- 1865, 1867, 1869, 1871,
- 1873, 1875, 1877, 1879, // NOLINT
- 1881, 1883, 1885, 1887,
- 1889, 1891, 1893, 1895, // NOLINT
- 1897, 1899, 1901, 1903,
- 1073743729, 1912, 1914, 1916, // NOLINT
- 1919, 1921, 1923, 1925,
- 1927, 1932, 1934, 1937, // NOLINT
- 1073743763, 1941, 1943, 1945,
- 1947, 1949, 1951, 1953, // NOLINT
- 1955, 1957, 1959, 1961,
- 2042, 1073744688, 2906, 1073744740, // NOLINT
- 2917}; // NOLINT
-static const uint16_t kLowercaseTable7Size = 6;
-static const int32_t kLowercaseTable7[6] = {
- 1073748736, 6918, 1073748755, 6935, 1073749825, 8026 }; // NOLINT
-bool Lowercase::Is(uchar c) {
- int chunk_index = c >> 13;
- switch (chunk_index) {
- case 0: return LookupPredicate(kLowercaseTable0,
- kLowercaseTable0Size,
- c);
- case 1: return LookupPredicate(kLowercaseTable1,
- kLowercaseTable1Size,
- c);
- case 5: return LookupPredicate(kLowercaseTable5,
- kLowercaseTable5Size,
- c);
- case 7: return LookupPredicate(kLowercaseTable7,
- kLowercaseTable7Size,
- c);
- default: return false;
- }
-}
-
+#endif // V8_INTL_SUPPORT
// Letter: point.category in ['Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl']
-
+#ifdef V8_INTL_SUPPORT
+bool Letter::Is(uchar c) { return static_cast<bool>(u_isalpha(c)); }
+#else
static const uint16_t kLetterTable0Size = 431;
static const int32_t kLetterTable0[431] = {
1073741889, 90, 1073741921, 122,
@@ -933,8 +788,9 @@ bool Letter::Is(uchar c) {
default: return false;
}
}
+#endif
-
+#ifndef V8_INTL_SUPPORT
// ID_Start: ((point.category in ['Lu', 'Ll', 'Lt', 'Lm', 'Lo',
// 'Nl'] or 'Other_ID_Start' in point.properties) and ('Pattern_Syntax' not in
// point.properties) and ('Pattern_White_Space' not in point.properties)) or
@@ -1280,7 +1136,6 @@ bool ID_Continue::Is(uchar c) {
}
}
-
// WhiteSpace: (point.category == 'Zs') or ('JS_White_Space' in
// point.properties)
@@ -1306,29 +1161,17 @@ bool WhiteSpace::Is(uchar c) {
default: return false;
}
}
-
+#endif // !V8_INTL_SUPPORT
// LineTerminator: 'JS_Line_Terminator' in point.properties
+// ES#sec-line-terminators lists exactly 4 code points:
+// LF (U+000A), CR (U+000D), LS(U+2028), PS(U+2029)
-static const uint16_t kLineTerminatorTable0Size = 2;
-static const int32_t kLineTerminatorTable0[2] = {
- 10, 13 }; // NOLINT
-static const uint16_t kLineTerminatorTable1Size = 2;
-static const int32_t kLineTerminatorTable1[2] = {
- 1073741864, 41 }; // NOLINT
bool LineTerminator::Is(uchar c) {
- int chunk_index = c >> 13;
- switch (chunk_index) {
- case 0: return LookupPredicate(kLineTerminatorTable0,
- kLineTerminatorTable0Size,
- c);
- case 1: return LookupPredicate(kLineTerminatorTable1,
- kLineTerminatorTable1Size,
- c);
- default: return false;
- }
+ return c == 0xA || c == 0xD || c == 0x2028 || c == 0x2029;
}
+#ifndef V8_INTL_SUPPORT
static const MultiCharacterSpecialCase<2> kToLowercaseMultiStrings0[2] = { // NOLINT
{{105, 775}}, {{kSentinel}} }; // NOLINT
static const uint16_t kToLowercaseTable0Size = 488; // NOLINT
@@ -1938,6 +1781,7 @@ int ToUppercase::Convert(uchar c,
default: return 0;
}
}
+#endif // !V8_INTL_SUPPORT
static const MultiCharacterSpecialCase<1> kEcma262CanonicalizeMultiStrings0[1] = { // NOLINT
{{kSentinel}} }; // NOLINT
@@ -3417,14 +3261,11 @@ int CanonicalizationRange::Convert(uchar c,
const uchar UnicodeData::kMaxCodePoint = 65533;
int UnicodeData::GetByteCount() {
+#ifndef V8_INTL_SUPPORT // NOLINT
return kUppercaseTable0Size * sizeof(int32_t) // NOLINT
+ kUppercaseTable1Size * sizeof(int32_t) // NOLINT
+ kUppercaseTable5Size * sizeof(int32_t) // NOLINT
+ kUppercaseTable7Size * sizeof(int32_t) // NOLINT
- + kLowercaseTable0Size * sizeof(int32_t) // NOLINT
- + kLowercaseTable1Size * sizeof(int32_t) // NOLINT
- + kLowercaseTable5Size * sizeof(int32_t) // NOLINT
- + kLowercaseTable7Size * sizeof(int32_t) // NOLINT
+ kLetterTable0Size * sizeof(int32_t) // NOLINT
+ kLetterTable1Size * sizeof(int32_t) // NOLINT
+ kLetterTable2Size * sizeof(int32_t) // NOLINT
@@ -3448,8 +3289,6 @@ int UnicodeData::GetByteCount() {
+ kWhiteSpaceTable0Size * sizeof(int32_t) // NOLINT
+ kWhiteSpaceTable1Size * sizeof(int32_t) // NOLINT
+ kWhiteSpaceTable7Size * sizeof(int32_t) // NOLINT
- + kLineTerminatorTable0Size * sizeof(int32_t) // NOLINT
- + kLineTerminatorTable1Size * sizeof(int32_t) // NOLINT
+
kToLowercaseMultiStrings0Size *
sizeof(MultiCharacterSpecialCase<2>) // NOLINT
@@ -3474,6 +3313,9 @@ int UnicodeData::GetByteCount() {
+
kToUppercaseMultiStrings7Size *
sizeof(MultiCharacterSpecialCase<3>) // NOLINT
+#else
+ return
+#endif // !V8_INTL_SUPPORT
+
kEcma262CanonicalizeMultiStrings0Size *
sizeof(MultiCharacterSpecialCase<1>) // NOLINT
diff --git a/deps/v8/src/unicode.h b/deps/v8/src/unicode.h
index 1b98a472f2..f360b14634 100644
--- a/deps/v8/src/unicode.h
+++ b/deps/v8/src/unicode.h
@@ -166,18 +166,24 @@ class Utf8 {
// Excludes non-characters from the set of valid code points.
static inline bool IsValidCharacter(uchar c);
- static bool Validate(const byte* str, size_t length);
+ // Validate if the input has a valid utf-8 encoding. Unlike JS source code
+ // this validation function will accept any unicode code point, including
+ // kBadChar and BOMs.
+ //
+ // This method checks for:
+ // - valid utf-8 endcoding (e.g. no over-long encodings),
+ // - absence of surrogates,
+ // - valid code point range.
+ static bool ValidateEncoding(const byte* str, size_t length);
};
struct Uppercase {
static bool Is(uchar c);
};
-struct Lowercase {
- static bool Is(uchar c);
-};
struct Letter {
static bool Is(uchar c);
};
+#ifndef V8_INTL_SUPPORT
struct V8_EXPORT_PRIVATE ID_Start {
static bool Is(uchar c);
};
@@ -187,9 +193,11 @@ struct V8_EXPORT_PRIVATE ID_Continue {
struct V8_EXPORT_PRIVATE WhiteSpace {
static bool Is(uchar c);
};
+#endif // !V8_INTL_SUPPORT
struct V8_EXPORT_PRIVATE LineTerminator {
static bool Is(uchar c);
};
+#ifndef V8_INTL_SUPPORT
struct ToLowercase {
static const int kMaxWidth = 3;
static const bool kIsToLower = true;
@@ -206,6 +214,7 @@ struct ToUppercase {
uchar* result,
bool* allow_caching_ptr);
};
+#endif
struct Ecma262Canonicalize {
static const int kMaxWidth = 1;
static int Convert(uchar c,
diff --git a/deps/v8/src/utils-inl.h b/deps/v8/src/utils-inl.h
index 617d7fc151..b7108a4361 100644
--- a/deps/v8/src/utils-inl.h
+++ b/deps/v8/src/utils-inl.h
@@ -9,6 +9,7 @@
#include "include/v8-platform.h"
#include "src/base/platform/time.h"
+#include "src/char-predicates-inl.h"
#include "src/v8.h"
namespace v8 {
@@ -31,6 +32,34 @@ class TimedScope {
double* result_;
};
+template <typename Stream>
+bool StringToArrayIndex(Stream* stream, uint32_t* index) {
+ uint16_t ch = stream->GetNext();
+
+ // If the string begins with a '0' character, it must only consist
+ // of it to be a legal array index.
+ if (ch == '0') {
+ *index = 0;
+ return !stream->HasMore();
+ }
+
+ // Convert string to uint32 array index; character by character.
+ if (!IsDecimalDigit(ch)) return false;
+ int d = ch - '0';
+ uint32_t result = d;
+ while (stream->HasMore()) {
+ ch = stream->GetNext();
+ if (!IsDecimalDigit(ch)) return false;
+ d = ch - '0';
+ // Check that the new result is below the 32 bit limit.
+ if (result > 429496729U - ((d + 3) >> 3)) return false;
+ result = (result * 10) + d;
+ }
+
+ *index = result;
+ return true;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/utils.cc b/deps/v8/src/utils.cc
index 96a7d2c9ee..9d166e06c6 100644
--- a/deps/v8/src/utils.cc
+++ b/deps/v8/src/utils.cc
@@ -356,8 +356,7 @@ void StringBuilder::AddFormattedList(const char* format, va_list list) {
}
}
-
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
+#if V8_TARGET_ARCH_IA32
static void MemMoveWrapper(void* dest, const void* src, size_t size) {
memmove(dest, src, size);
}
@@ -411,7 +410,7 @@ static bool g_memcopy_functions_initialized = false;
void init_memcopy_functions(Isolate* isolate) {
if (g_memcopy_functions_initialized) return;
g_memcopy_functions_initialized = true;
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
+#if V8_TARGET_ARCH_IA32
MemMoveFunction generated_memmove = CreateMemMoveFunction(isolate);
if (generated_memmove != NULL) {
memmove_function = generated_memmove;
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index 6c8dadbd71..6e9f1c01cb 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -52,26 +52,28 @@ inline bool CStringEquals(const char* s1, const char* s2) {
return (s1 == s2) || (s1 != NULL && s2 != NULL && strcmp(s1, s2) == 0);
}
-
// X must be a power of 2. Returns the number of trailing zeros.
-inline int WhichPowerOf2(uint32_t x) {
- DCHECK(base::bits::IsPowerOfTwo32(x));
+template <typename T,
+ typename = typename std::enable_if<std::is_integral<T>::value>::type>
+inline int WhichPowerOf2(T x) {
+ DCHECK(base::bits::IsPowerOfTwo(x));
int bits = 0;
#ifdef DEBUG
- uint32_t original_x = x;
+ const T original_x = x;
#endif
- if (x >= 0x10000) {
- bits += 16;
- x >>= 16;
- }
- if (x >= 0x100) {
- bits += 8;
- x >>= 8;
- }
- if (x >= 0x10) {
- bits += 4;
- x >>= 4;
- }
+ constexpr int max_bits = sizeof(T) * 8;
+ static_assert(max_bits <= 64, "integral types are not bigger than 64 bits");
+// Avoid shifting by more than the bit width of x to avoid compiler warnings.
+#define CHECK_BIGGER(s) \
+ if (max_bits > s && x >= T{1} << (max_bits > s ? s : 0)) { \
+ bits += s; \
+ x >>= max_bits > s ? s : 0; \
+ }
+ CHECK_BIGGER(32)
+ CHECK_BIGGER(16)
+ CHECK_BIGGER(8)
+ CHECK_BIGGER(4)
+#undef CHECK_BIGGER
switch (x) {
default: UNREACHABLE();
case 8: bits++; // Fall through.
@@ -79,46 +81,10 @@ inline int WhichPowerOf2(uint32_t x) {
case 2: bits++; // Fall through.
case 1: break;
}
- DCHECK_EQ(static_cast<uint32_t>(1) << bits, original_x);
+ DCHECK_EQ(T{1} << bits, original_x);
return bits;
}
-
-// X must be a power of 2. Returns the number of trailing zeros.
-inline int WhichPowerOf2_64(uint64_t x) {
- DCHECK(base::bits::IsPowerOfTwo64(x));
- int bits = 0;
-#ifdef DEBUG
- uint64_t original_x = x;
-#endif
- if (x >= 0x100000000L) {
- bits += 32;
- x >>= 32;
- }
- if (x >= 0x10000) {
- bits += 16;
- x >>= 16;
- }
- if (x >= 0x100) {
- bits += 8;
- x >>= 8;
- }
- if (x >= 0x10) {
- bits += 4;
- x >>= 4;
- }
- switch (x) {
- default: UNREACHABLE();
- case 8: bits++; // Fall through.
- case 4: bits++; // Fall through.
- case 2: bits++; // Fall through.
- case 1: break;
- }
- DCHECK_EQ(static_cast<uint64_t>(1) << bits, original_x);
- return bits;
-}
-
-
inline int MostSignificantBit(uint32_t x) {
static const int msb4[] = {0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4};
int nibble = 0;
@@ -188,14 +154,14 @@ inline bool IsAddressAligned(Address addr,
// Returns the maximum of the two parameters.
template <typename T>
-T Max(T a, T b) {
+constexpr T Max(T a, T b) {
return a < b ? b : a;
}
// Returns the minimum of the two parameters.
template <typename T>
-T Min(T a, T b) {
+constexpr T Min(T a, T b) {
return a < b ? a : b;
}
@@ -218,12 +184,12 @@ T JSMin(T x, T y) {
}
// Returns the absolute value of its argument.
-template <typename T>
-T Abs(T a) {
+template <typename T,
+ typename = typename std::enable_if<std::is_integral<T>::value>::type>
+typename std::make_unsigned<T>::type Abs(T a) {
return a < 0 ? -a : a;
}
-
// Floor(-0.0) == 0.0
inline double Floor(double x) {
#if V8_CC_MSVC
@@ -261,30 +227,6 @@ inline double Pow(double x, double y) {
return std::pow(x, y);
}
-// TODO(svenpanne) Clean up the whole power-of-2 mess.
-inline int32_t WhichPowerOf2Abs(int32_t x) {
- return (x == kMinInt) ? 31 : WhichPowerOf2(Abs(x));
-}
-
-
-// Obtains the unsigned type corresponding to T
-// available in C++11 as std::make_unsigned
-template<typename T>
-struct make_unsigned {
- typedef T type;
-};
-
-
-// Template specializations necessary to have make_unsigned work
-template<> struct make_unsigned<int32_t> {
- typedef uint32_t type;
-};
-
-
-template<> struct make_unsigned<int64_t> {
- typedef uint64_t type;
-};
-
// ----------------------------------------------------------------------------
// BitField is a help template for encoding and decode bitfield with
@@ -293,6 +235,8 @@ template<> struct make_unsigned<int64_t> {
template<class T, int shift, int size, class U>
class BitFieldBase {
public:
+ typedef T FieldType;
+
// A type U mask of bit field. To use all bits of a type U of x bits
// in a bitfield without compiler warnings we have to compute 2^x
// without using a shift count of x in the computation.
@@ -344,6 +288,41 @@ class BitField : public BitFieldBase<T, shift, size, uint32_t> { };
template<class T, int shift, int size>
class BitField64 : public BitFieldBase<T, shift, size, uint64_t> { };
+// Helper macros for defining a contiguous sequence of bit fields. Example:
+// (backslashes at the ends of respective lines of this multi-line macro
+// definition are omitted here to please the compiler)
+//
+// #define MAP_BIT_FIELD1(V, _)
+// V(IsAbcBit, bool, 1, _)
+// V(IsBcdBit, bool, 1, _)
+// V(CdeBits, int, 5, _)
+// V(DefBits, MutableMode, 1, _)
+//
+// DEFINE_BIT_FIELDS(MAP_BIT_FIELD1)
+// or
+// DEFINE_BIT_FIELDS_64(MAP_BIT_FIELD1)
+//
+#define DEFINE_BIT_FIELD_RANGE_TYPE(Name, Type, Size, _) \
+ k##Name##Start, k##Name##End = k##Name##Start + Size - 1,
+
+#define DEFINE_BIT_RANGES(LIST_MACRO) \
+ struct LIST_MACRO##_Ranges { \
+ enum { LIST_MACRO(DEFINE_BIT_FIELD_RANGE_TYPE, _) }; \
+ };
+
+#define DEFINE_BIT_FIELD_TYPE(Name, Type, Size, RangesName) \
+ typedef BitField<Type, RangesName::k##Name##Start, Size> Name;
+
+#define DEFINE_BIT_FIELD_64_TYPE(Name, Type, Size, RangesName) \
+ typedef BitField64<Type, RangesName::k##Name##Start, Size> Name;
+
+#define DEFINE_BIT_FIELDS(LIST_MACRO) \
+ DEFINE_BIT_RANGES(LIST_MACRO) \
+ LIST_MACRO(DEFINE_BIT_FIELD_TYPE, LIST_MACRO##_Ranges)
+
+#define DEFINE_BIT_FIELDS_64(LIST_MACRO) \
+ DEFINE_BIT_RANGES(LIST_MACRO) \
+ LIST_MACRO(DEFINE_BIT_FIELD_64_TYPE, LIST_MACRO##_Ranges)
// ----------------------------------------------------------------------------
// BitSetComputer is a help template for encoding and decoding information for
@@ -384,6 +363,26 @@ class BitSetComputer {
static int shift(int item) { return (item % kItemsPerWord) * kBitsPerItem; }
};
+// Helper macros for defining a contiguous sequence of field offset constants.
+// Example: (backslashes at the ends of respective lines of this multi-line
+// macro definition are omitted here to please the compiler)
+//
+// #define MAP_FIELDS(V)
+// V(kField1Offset, kPointerSize)
+// V(kField2Offset, kIntSize)
+// V(kField3Offset, kIntSize)
+// V(kField4Offset, kPointerSize)
+// V(kSize, 0)
+//
+// DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, MAP_FIELDS)
+//
+#define DEFINE_ONE_FIELD_OFFSET(Name, Size) Name, Name##End = Name + Size - 1,
+
+#define DEFINE_FIELD_OFFSET_CONSTANTS(StartOffset, LIST_MACRO) \
+ enum { \
+ LIST_MACRO##_StartOffset = StartOffset - 1, \
+ LIST_MACRO(DEFINE_ONE_FIELD_OFFSET) \
+ };
// ----------------------------------------------------------------------------
// Hash function.
@@ -404,6 +403,9 @@ inline uint32_t ComputeIntegerHash(uint32_t key, uint32_t seed) {
return hash & 0x3fffffff;
}
+inline uint32_t ComputeIntegerHash(uint32_t key) {
+ return ComputeIntegerHash(key, kZeroHashSeed);
+}
inline uint32_t ComputeLongHash(uint64_t key) {
uint64_t hash = key;
@@ -419,8 +421,7 @@ inline uint32_t ComputeLongHash(uint64_t key) {
inline uint32_t ComputePointerHash(void* ptr) {
return ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<intptr_t>(ptr)),
- v8::internal::kZeroHashSeed);
+ static_cast<uint32_t>(reinterpret_cast<intptr_t>(ptr)));
}
@@ -430,7 +431,7 @@ inline uint32_t ComputePointerHash(void* ptr) {
// Initializes the codegen support that depends on CPU features.
void init_memcopy_functions(Isolate* isolate);
-#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X87)
+#if defined(V8_TARGET_ARCH_IA32)
// Limit below which the extra overhead of the MemCopy function is likely
// to outweigh the benefits of faster copying.
const int kMinComplexMemCopy = 64;
@@ -866,27 +867,6 @@ INT_1_TO_63_LIST(DECLARE_TRUNCATE_TO_INT_N)
#undef DECLARE_IS_UINT_N
#undef DECLARE_TRUNCATE_TO_INT_N
-class TypeFeedbackId {
- public:
- explicit TypeFeedbackId(int id) : id_(id) { }
- int ToInt() const { return id_; }
-
- static TypeFeedbackId None() { return TypeFeedbackId(kNoneId); }
- bool IsNone() const { return id_ == kNoneId; }
-
- private:
- static const int kNoneId = -1;
-
- int id_;
-};
-
-inline bool operator<(TypeFeedbackId lhs, TypeFeedbackId rhs) {
- return lhs.ToInt() < rhs.ToInt();
-}
-inline bool operator>(TypeFeedbackId lhs, TypeFeedbackId rhs) {
- return lhs.ToInt() > rhs.ToInt();
-}
-
class FeedbackSlot {
public:
FeedbackSlot() : id_(kInvalidSlot) {}
@@ -941,6 +921,8 @@ class BailoutId {
V8_EXPORT_PRIVATE friend std::ostream& operator<<(std::ostream&, BailoutId);
private:
+ friend class Builtins;
+
static const int kNoneId = -1;
// Using 0 could disguise errors.
@@ -959,6 +941,11 @@ class BailoutId {
// Every compiled stub starts with this id.
static const int kStubEntryId = 6;
+ // Builtin continuations bailout ids start here. If you need to add a
+ // non-builtin BailoutId, add it before this id so that this Id has the
+ // highest number.
+ static const int kFirstBuiltinContinuationId = 7;
+
int id_;
};
@@ -1486,32 +1473,7 @@ class StringBuilder : public SimpleStringBuilder {
bool DoubleToBoolean(double d);
template <typename Stream>
-bool StringToArrayIndex(Stream* stream, uint32_t* index) {
- uint16_t ch = stream->GetNext();
-
- // If the string begins with a '0' character, it must only consist
- // of it to be a legal array index.
- if (ch == '0') {
- *index = 0;
- return !stream->HasMore();
- }
-
- // Convert string to uint32 array index; character by character.
- int d = ch - '0';
- if (d < 0 || d > 9) return false;
- uint32_t result = d;
- while (stream->HasMore()) {
- d = stream->GetNext() - '0';
- if (d < 0 || d > 9) return false;
- // Check that the new result is below the 32 bit limit.
- if (result > 429496729U - ((d + 3) >> 3)) return false;
- result = (result * 10) + d;
- }
-
- *index = result;
- return true;
-}
-
+bool StringToArrayIndex(Stream* stream, uint32_t* index);
// Returns current value of top of the stack. Works correctly with ASAN.
DISABLE_ASAN
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index 95cf3adce1..dbdd8c3624 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -6,10 +6,10 @@
#include "src/api.h"
#include "src/assembler.h"
+#include "src/base/atomicops.h"
#include "src/base/once.h"
#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
-#include "src/crankshaft/lithium-allocator.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/elements.h"
@@ -45,7 +45,6 @@ bool V8::Initialize() {
void V8::TearDown() {
Bootstrapper::TearDownExtensions();
ElementsAccessor::TearDown();
- LOperand::TearDownCaches();
RegisteredExtension::UnregisterAll();
Isolate::GlobalTearDown();
sampler::Sampler::TearDown();
@@ -67,11 +66,6 @@ void V8::InitializeOncePerProcessImpl() {
FLAG_max_semi_space_size = 1;
}
- if (FLAG_opt && FLAG_turbo && strcmp(FLAG_turbo_filter, "~~") == 0) {
- const char* filter_flag = "--turbo-filter=*";
- FlagList::SetFlagsFromString(filter_flag, StrLength(filter_flag));
- }
-
base::OS::Initialize(FLAG_random_seed, FLAG_hard_abort, FLAG_gc_fake_mmap);
Isolate::InitializeOncePerProcess();
@@ -79,7 +73,6 @@ void V8::InitializeOncePerProcessImpl() {
sampler::Sampler::SetUp();
CpuFeatures::Probe(false);
ElementsAccessor::InitializeOncePerProcess();
- LOperand::SetUpCaches();
SetUpJSCallerSavedCodeData();
ExternalReference::SetUp();
Bootstrapper::InitializeOncePerProcess();
@@ -109,13 +102,16 @@ void V8::ShutdownPlatform() {
v8::Platform* V8::GetCurrentPlatform() {
- DCHECK(platform_);
- return platform_;
+ v8::Platform* platform = reinterpret_cast<v8::Platform*>(
+ base::Relaxed_Load(reinterpret_cast<base::AtomicWord*>(&platform_)));
+ DCHECK(platform);
+ return platform;
}
-
-void V8::SetPlatformForTesting(v8::Platform* platform) { platform_ = platform; }
-
+void V8::SetPlatformForTesting(v8::Platform* platform) {
+ base::Relaxed_Store(reinterpret_cast<base::AtomicWord*>(&platform_),
+ reinterpret_cast<base::AtomicWord>(platform));
+}
void V8::SetNativesBlob(StartupData* natives_blob) {
#ifdef V8_USE_EXTERNAL_STARTUP_DATA
diff --git a/deps/v8/src/v8.gyp b/deps/v8/src/v8.gyp
index c269f245aa..e6e4567880 100644
--- a/deps/v8/src/v8.gyp
+++ b/deps/v8/src/v8.gyp
@@ -180,12 +180,16 @@
'builtins/builtins-async-iterator-gen.cc',
'builtins/builtins-boolean-gen.cc',
'builtins/builtins-call-gen.cc',
+ 'builtins/builtins-call-gen.h',
+ 'builtins/builtins-collections-gen.cc',
'builtins/builtins-console-gen.cc',
'builtins/builtins-constructor-gen.cc',
'builtins/builtins-constructor-gen.h',
'builtins/builtins-constructor.h',
'builtins/builtins-conversion-gen.cc',
+ 'builtins/builtins-conversion-gen.h',
'builtins/builtins-date-gen.cc',
+ 'builtins/builtins-debug-gen.cc',
'builtins/builtins-forin-gen.cc',
'builtins/builtins-forin-gen.h',
'builtins/builtins-function-gen.cc',
@@ -196,11 +200,14 @@
'builtins/builtins-internal-gen.cc',
'builtins/builtins-interpreter-gen.cc',
'builtins/builtins-intl-gen.cc',
+ 'builtins/builtins-iterator-gen.h',
+ 'builtins/builtins-iterator-gen.cc',
'builtins/builtins-math-gen.cc',
'builtins/builtins-number-gen.cc',
'builtins/builtins-object-gen.cc',
'builtins/builtins-promise-gen.cc',
'builtins/builtins-promise-gen.h',
+ 'builtins/builtins-proxy-gen.cc',
'builtins/builtins-regexp-gen.cc',
'builtins/builtins-regexp-gen.h',
'builtins/builtins-sharedarraybuffer-gen.cc',
@@ -272,11 +279,6 @@
'builtins/s390/builtins-s390.cc',
],
}],
- ['v8_target_arch=="x87"', {
- 'sources': [ ### gcmole(arch:x87) ###
- 'builtins/x87/builtins-x87.cc',
- ],
- }],
['v8_enable_i18n_support==0', {
'sources!': [
'builtins/builtins-intl-gen.cc',
@@ -554,7 +556,6 @@
'address-map.h',
'allocation.cc',
'allocation.h',
- 'allocation-site-scopes.cc',
'allocation-site-scopes.h',
'api.cc',
'api.h',
@@ -587,10 +588,8 @@
'ast/ast-function-literal-id-reindexer.h',
'ast/ast-numbering.cc',
'ast/ast-numbering.h',
+ 'ast/ast-source-ranges.h',
'ast/ast-traversal-visitor.h',
- 'ast/ast-type-bounds.h',
- 'ast/ast-types.cc',
- 'ast/ast-types.h',
'ast/ast-value-factory.cc',
'ast/ast-value-factory.h',
'ast/ast.cc',
@@ -627,11 +626,11 @@
'builtins/builtins-boolean.cc',
'builtins/builtins-call.cc',
'builtins/builtins-callsite.cc',
+ 'builtins/builtins-collections.cc',
'builtins/builtins-console.cc',
'builtins/builtins-constructor.h',
'builtins/builtins-dataview.cc',
'builtins/builtins-date.cc',
- 'builtins/builtins-debug.cc',
'builtins/builtins-definitions.h',
'builtins/builtins-descriptors.h',
'builtins/builtins-error.cc',
@@ -643,12 +642,13 @@
'builtins/builtins-math.cc',
'builtins/builtins-number.cc',
'builtins/builtins-object.cc',
- 'builtins/builtins-proxy.cc',
+ 'builtins/builtins-promise.cc',
'builtins/builtins-reflect.cc',
'builtins/builtins-regexp.cc',
'builtins/builtins-sharedarraybuffer.cc',
'builtins/builtins-string.cc',
'builtins/builtins-intl.cc',
+ 'builtins/builtins-intl.h',
'builtins/builtins-symbol.cc',
'builtins/builtins-typedarray.cc',
'builtins/builtins-utils.h',
@@ -670,7 +670,6 @@
'code-stub-assembler.h',
'code-stubs.cc',
'code-stubs.h',
- 'code-stubs-hydrogen.cc',
'code-stubs-utils.h',
'codegen.cc',
'codegen.h',
@@ -704,6 +703,8 @@
'compiler/bytecode-liveness-map.cc',
'compiler/bytecode-liveness-map.h',
'compiler/c-linkage.cc',
+ 'compiler/check-elimination.cc',
+ 'compiler/check-elimination.h',
'compiler/checkpoint-elimination.cc',
'compiler/checkpoint-elimination.h',
'compiler/code-generator-impl.h',
@@ -792,8 +793,6 @@
'compiler/jump-threading.h',
'compiler/linkage.cc',
'compiler/linkage.h',
- 'compiler/liveness-analyzer.cc',
- 'compiler/liveness-analyzer.h',
'compiler/live-range-separator.cc',
'compiler/live-range-separator.h',
'compiler/load-elimination.cc',
@@ -839,6 +838,8 @@
'compiler/pipeline.h',
'compiler/pipeline-statistics.cc',
'compiler/pipeline-statistics.h',
+ 'compiler/property-access-builder.cc',
+ 'compiler/property-access-builder.h',
'compiler/raw-machine-assembler.cc',
'compiler/raw-machine-assembler.h',
'compiler/redundancy-elimination.cc',
@@ -869,8 +870,6 @@
'compiler/state-values-utils.h',
'compiler/store-store-elimination.cc',
'compiler/store-store-elimination.h',
- 'compiler/tail-call-optimization.cc',
- 'compiler/tail-call-optimization.h',
'compiler/types.cc',
'compiler/types.h',
'compiler/type-cache.cc',
@@ -908,67 +907,6 @@
'counters-inl.h',
'counters.cc',
'counters.h',
- 'crankshaft/compilation-phase.cc',
- 'crankshaft/compilation-phase.h',
- 'crankshaft/hydrogen-alias-analysis.h',
- 'crankshaft/hydrogen-bce.cc',
- 'crankshaft/hydrogen-bce.h',
- 'crankshaft/hydrogen-canonicalize.cc',
- 'crankshaft/hydrogen-canonicalize.h',
- 'crankshaft/hydrogen-check-elimination.cc',
- 'crankshaft/hydrogen-check-elimination.h',
- 'crankshaft/hydrogen-dce.cc',
- 'crankshaft/hydrogen-dce.h',
- 'crankshaft/hydrogen-dehoist.cc',
- 'crankshaft/hydrogen-dehoist.h',
- 'crankshaft/hydrogen-environment-liveness.cc',
- 'crankshaft/hydrogen-environment-liveness.h',
- 'crankshaft/hydrogen-escape-analysis.cc',
- 'crankshaft/hydrogen-escape-analysis.h',
- 'crankshaft/hydrogen-flow-engine.h',
- 'crankshaft/hydrogen-gvn.cc',
- 'crankshaft/hydrogen-gvn.h',
- 'crankshaft/hydrogen-infer-representation.cc',
- 'crankshaft/hydrogen-infer-representation.h',
- 'crankshaft/hydrogen-infer-types.cc',
- 'crankshaft/hydrogen-infer-types.h',
- 'crankshaft/hydrogen-instructions.cc',
- 'crankshaft/hydrogen-instructions.h',
- 'crankshaft/hydrogen-load-elimination.cc',
- 'crankshaft/hydrogen-load-elimination.h',
- 'crankshaft/hydrogen-mark-unreachable.cc',
- 'crankshaft/hydrogen-mark-unreachable.h',
- 'crankshaft/hydrogen-osr.cc',
- 'crankshaft/hydrogen-osr.h',
- 'crankshaft/hydrogen-range-analysis.cc',
- 'crankshaft/hydrogen-range-analysis.h',
- 'crankshaft/hydrogen-redundant-phi.cc',
- 'crankshaft/hydrogen-redundant-phi.h',
- 'crankshaft/hydrogen-removable-simulates.cc',
- 'crankshaft/hydrogen-removable-simulates.h',
- 'crankshaft/hydrogen-representation-changes.cc',
- 'crankshaft/hydrogen-representation-changes.h',
- 'crankshaft/hydrogen-sce.cc',
- 'crankshaft/hydrogen-sce.h',
- 'crankshaft/hydrogen-store-elimination.cc',
- 'crankshaft/hydrogen-store-elimination.h',
- 'crankshaft/hydrogen-types.cc',
- 'crankshaft/hydrogen-types.h',
- 'crankshaft/hydrogen-uint32-analysis.cc',
- 'crankshaft/hydrogen-uint32-analysis.h',
- 'crankshaft/hydrogen.cc',
- 'crankshaft/hydrogen.h',
- 'crankshaft/lithium-allocator-inl.h',
- 'crankshaft/lithium-allocator.cc',
- 'crankshaft/lithium-allocator.h',
- 'crankshaft/lithium-codegen.cc',
- 'crankshaft/lithium-codegen.h',
- 'crankshaft/lithium.cc',
- 'crankshaft/lithium.h',
- 'crankshaft/lithium-inl.h',
- 'crankshaft/typing.cc',
- 'crankshaft/typing.h',
- 'crankshaft/unique.h',
'date.cc',
'date.h',
'dateparser-inl.h',
@@ -1000,7 +938,6 @@
'double.h',
'dtoa.cc',
'dtoa.h',
- 'effects.h',
'eh-frame.cc',
'eh-frame.h',
'elements-kind.cc',
@@ -1042,6 +979,7 @@
'flag-definitions.h',
'flags.cc',
'flags.h',
+ 'float.h',
'frames-inl.h',
'frames.cc',
'frames.h',
@@ -1063,7 +1001,6 @@
'heap/array-buffer-tracker.h',
'heap/code-stats.cc',
'heap/code-stats.h',
- 'heap/concurrent-marking-deque.h',
'heap/concurrent-marking.cc',
'heap/concurrent-marking.h',
'heap/embedder-tracing.cc',
@@ -1083,16 +1020,17 @@
'heap/incremental-marking.cc',
'heap/incremental-marking.h',
'heap/item-parallel-job.h',
+ 'heap/local-allocator.h',
'heap/mark-compact-inl.h',
'heap/mark-compact.cc',
'heap/mark-compact.h',
+ 'heap/marking.cc',
'heap/marking.h',
'heap/object-stats.cc',
'heap/object-stats.h',
'heap/objects-visiting-inl.h',
'heap/objects-visiting.cc',
'heap/objects-visiting.h',
- 'heap/page-parallel-job.h',
'heap/remembered-set.h',
'heap/scavenge-job.h',
'heap/scavenge-job.cc',
@@ -1107,7 +1045,7 @@
'heap/spaces.h',
'heap/store-buffer.cc',
'heap/store-buffer.h',
- 'heap/workstealing-marking-deque.h',
+ 'heap/worklist.h',
'intl.cc',
'intl.h',
'icu_util.cc',
@@ -1132,6 +1070,7 @@
'identity-map.h',
'interface-descriptors.cc',
'interface-descriptors.h',
+ 'interpreter/block-coverage-builder.h',
'interpreter/bytecodes.cc',
'interpreter/bytecodes.h',
'interpreter/bytecode-array-accessor.cc',
@@ -1220,10 +1159,15 @@
'objects-printer.cc',
'objects.cc',
'objects.h',
+ 'objects/arguments-inl.h',
+ 'objects/arguments.h',
'objects/code-cache.h',
'objects/code-cache-inl.h',
'objects/compilation-cache.h',
'objects/compilation-cache-inl.h',
+ 'objects/debug-objects-inl.h',
+ 'objects/debug-objects.cc',
+ 'objects/debug-objects.h',
'objects/descriptor-array.h',
'objects/dictionary.h',
'objects/frame-array.h',
@@ -1236,12 +1180,20 @@
'objects/literal-objects.h',
'objects/map-inl.h',
'objects/map.h',
+ 'objects/name-inl.h',
+ 'objects/name.h',
'objects/module-info.h',
'objects/object-macros.h',
'objects/object-macros-undef.h',
'objects/regexp-match-info.h',
'objects/scope-info.cc',
'objects/scope-info.h',
+ 'objects/script.h',
+ 'objects/script-inl.h',
+ 'objects/shared-function-info-inl.h',
+ 'objects/shared-function-info.h',
+ 'objects/string-inl.h',
+ 'objects/string.h',
'objects/string-table.h',
'ostreams.cc',
'ostreams.h',
@@ -1430,8 +1382,6 @@
'trap-handler/trap-handler-internal.h',
'type-hints.cc',
'type-hints.h',
- 'type-info.cc',
- 'type-info.h',
'unicode-inl.h',
'unicode.cc',
'unicode.h',
@@ -1458,6 +1408,8 @@
'visitors.h',
'vm-state-inl.h',
'vm-state.h',
+ 'wasm/compilation-manager.cc',
+ 'wasm/compilation-manager.h',
'wasm/decoder.h',
'wasm/function-body-decoder.cc',
'wasm/function-body-decoder.h',
@@ -1465,6 +1417,8 @@
'wasm/leb-helper.h',
'wasm/local-decl-encoder.cc',
'wasm/local-decl-encoder.h',
+ 'wasm/module-compiler.cc',
+ 'wasm/module-compiler.h',
'wasm/module-decoder.cc',
'wasm/module-decoder.h',
'wasm/signature-map.cc',
@@ -1493,6 +1447,7 @@
'wasm/wasm-result.h',
'wasm/wasm-text.cc',
'wasm/wasm-text.h',
+ 'wasm/wasm-value.h',
'zone/accounting-allocator.cc',
'zone/accounting-allocator.h',
'zone/zone-segment.cc',
@@ -1541,12 +1496,6 @@
'compiler/arm/instruction-selector-arm.cc',
'compiler/arm/unwinding-info-writer-arm.h',
'compiler/arm/unwinding-info-writer-arm.cc',
- 'crankshaft/arm/lithium-arm.cc',
- 'crankshaft/arm/lithium-arm.h',
- 'crankshaft/arm/lithium-codegen-arm.cc',
- 'crankshaft/arm/lithium-codegen-arm.h',
- 'crankshaft/arm/lithium-gap-resolver-arm.cc',
- 'crankshaft/arm/lithium-gap-resolver-arm.h',
'debug/arm/debug-arm.cc',
'full-codegen/arm/full-codegen-arm.cc',
'ic/arm/access-compiler-arm.cc',
@@ -1586,6 +1535,7 @@
'arm64/macro-assembler-arm64-inl.h',
'arm64/simulator-arm64.cc',
'arm64/simulator-arm64.h',
+ 'arm64/simulator-logic-arm64.cc',
'arm64/utils-arm64.cc',
'arm64/utils-arm64.h',
'arm64/eh-frame-arm64.cc',
@@ -1595,15 +1545,6 @@
'compiler/arm64/instruction-selector-arm64.cc',
'compiler/arm64/unwinding-info-writer-arm64.h',
'compiler/arm64/unwinding-info-writer-arm64.cc',
- 'crankshaft/arm64/delayed-masm-arm64.cc',
- 'crankshaft/arm64/delayed-masm-arm64.h',
- 'crankshaft/arm64/delayed-masm-arm64-inl.h',
- 'crankshaft/arm64/lithium-arm64.cc',
- 'crankshaft/arm64/lithium-arm64.h',
- 'crankshaft/arm64/lithium-codegen-arm64.cc',
- 'crankshaft/arm64/lithium-codegen-arm64.h',
- 'crankshaft/arm64/lithium-gap-resolver-arm64.cc',
- 'crankshaft/arm64/lithium-gap-resolver-arm64.h',
'debug/arm64/debug-arm64.cc',
'full-codegen/arm64/full-codegen-arm64.cc',
'ic/arm64/access-compiler-arm64.cc',
@@ -1637,12 +1578,6 @@
'compiler/ia32/instruction-codes-ia32.h',
'compiler/ia32/instruction-scheduler-ia32.cc',
'compiler/ia32/instruction-selector-ia32.cc',
- 'crankshaft/ia32/lithium-codegen-ia32.cc',
- 'crankshaft/ia32/lithium-codegen-ia32.h',
- 'crankshaft/ia32/lithium-gap-resolver-ia32.cc',
- 'crankshaft/ia32/lithium-gap-resolver-ia32.h',
- 'crankshaft/ia32/lithium-ia32.cc',
- 'crankshaft/ia32/lithium-ia32.h',
'debug/ia32/debug-ia32.cc',
'full-codegen/ia32/full-codegen-ia32.cc',
'ic/ia32/access-compiler-ia32.cc',
@@ -1652,44 +1587,6 @@
'regexp/ia32/regexp-macro-assembler-ia32.h',
],
}],
- ['v8_target_arch=="x87"', {
- 'sources': [ ### gcmole(arch:x87) ###
- 'x87/assembler-x87-inl.h',
- 'x87/assembler-x87.cc',
- 'x87/assembler-x87.h',
- 'x87/code-stubs-x87.cc',
- 'x87/code-stubs-x87.h',
- 'x87/codegen-x87.cc',
- 'x87/codegen-x87.h',
- 'x87/cpu-x87.cc',
- 'x87/deoptimizer-x87.cc',
- 'x87/disasm-x87.cc',
- 'x87/frames-x87.cc',
- 'x87/frames-x87.h',
- 'x87/interface-descriptors-x87.cc',
- 'x87/macro-assembler-x87.cc',
- 'x87/macro-assembler-x87.h',
- 'x87/simulator-x87.cc',
- 'x87/simulator-x87.h',
- 'compiler/x87/code-generator-x87.cc',
- 'compiler/x87/instruction-codes-x87.h',
- 'compiler/x87/instruction-scheduler-x87.cc',
- 'compiler/x87/instruction-selector-x87.cc',
- 'crankshaft/x87/lithium-codegen-x87.cc',
- 'crankshaft/x87/lithium-codegen-x87.h',
- 'crankshaft/x87/lithium-gap-resolver-x87.cc',
- 'crankshaft/x87/lithium-gap-resolver-x87.h',
- 'crankshaft/x87/lithium-x87.cc',
- 'crankshaft/x87/lithium-x87.h',
- 'debug/x87/debug-x87.cc',
- 'full-codegen/x87/full-codegen-x87.cc',
- 'ic/x87/access-compiler-x87.cc',
- 'ic/x87/handler-compiler-x87.cc',
- 'ic/x87/ic-x87.cc',
- 'regexp/x87/regexp-macro-assembler-x87.cc',
- 'regexp/x87/regexp-macro-assembler-x87.h',
- ],
- }],
['v8_target_arch=="mips" or v8_target_arch=="mipsel"', {
'sources': [ ### gcmole(arch:mipsel) ###
'mips/assembler-mips.cc',
@@ -1715,12 +1612,6 @@
'compiler/mips/instruction-codes-mips.h',
'compiler/mips/instruction-scheduler-mips.cc',
'compiler/mips/instruction-selector-mips.cc',
- 'crankshaft/mips/lithium-codegen-mips.cc',
- 'crankshaft/mips/lithium-codegen-mips.h',
- 'crankshaft/mips/lithium-gap-resolver-mips.cc',
- 'crankshaft/mips/lithium-gap-resolver-mips.h',
- 'crankshaft/mips/lithium-mips.cc',
- 'crankshaft/mips/lithium-mips.h',
'full-codegen/mips/full-codegen-mips.cc',
'debug/mips/debug-mips.cc',
'ic/mips/access-compiler-mips.cc',
@@ -1755,12 +1646,6 @@
'compiler/mips64/instruction-codes-mips64.h',
'compiler/mips64/instruction-scheduler-mips64.cc',
'compiler/mips64/instruction-selector-mips64.cc',
- 'crankshaft/mips64/lithium-codegen-mips64.cc',
- 'crankshaft/mips64/lithium-codegen-mips64.h',
- 'crankshaft/mips64/lithium-gap-resolver-mips64.cc',
- 'crankshaft/mips64/lithium-gap-resolver-mips64.h',
- 'crankshaft/mips64/lithium-mips64.cc',
- 'crankshaft/mips64/lithium-mips64.h',
'debug/mips64/debug-mips64.cc',
'full-codegen/mips64/full-codegen-mips64.cc',
'ic/mips64/access-compiler-mips64.cc',
@@ -1778,12 +1663,6 @@
'compiler/x64/instruction-selector-x64.cc',
'compiler/x64/unwinding-info-writer-x64.h',
'compiler/x64/unwinding-info-writer-x64.cc',
- 'crankshaft/x64/lithium-codegen-x64.cc',
- 'crankshaft/x64/lithium-codegen-x64.h',
- 'crankshaft/x64/lithium-gap-resolver-x64.cc',
- 'crankshaft/x64/lithium-gap-resolver-x64.h',
- 'crankshaft/x64/lithium-x64.cc',
- 'crankshaft/x64/lithium-x64.h',
'x64/assembler-x64-inl.h',
'x64/assembler-x64.cc',
'x64/assembler-x64.h',
@@ -1822,12 +1701,6 @@
'compiler/ppc/instruction-codes-ppc.h',
'compiler/ppc/instruction-scheduler-ppc.cc',
'compiler/ppc/instruction-selector-ppc.cc',
- 'crankshaft/ppc/lithium-ppc.cc',
- 'crankshaft/ppc/lithium-ppc.h',
- 'crankshaft/ppc/lithium-codegen-ppc.cc',
- 'crankshaft/ppc/lithium-codegen-ppc.h',
- 'crankshaft/ppc/lithium-gap-resolver-ppc.cc',
- 'crankshaft/ppc/lithium-gap-resolver-ppc.h',
'debug/ppc/debug-ppc.cc',
'full-codegen/ppc/full-codegen-ppc.cc',
'ic/ppc/access-compiler-ppc.cc',
@@ -1862,12 +1735,6 @@
'compiler/s390/instruction-codes-s390.h',
'compiler/s390/instruction-scheduler-s390.cc',
'compiler/s390/instruction-selector-s390.cc',
- 'crankshaft/s390/lithium-codegen-s390.cc',
- 'crankshaft/s390/lithium-codegen-s390.h',
- 'crankshaft/s390/lithium-gap-resolver-s390.cc',
- 'crankshaft/s390/lithium-gap-resolver-s390.h',
- 'crankshaft/s390/lithium-s390.cc',
- 'crankshaft/s390/lithium-s390.h',
'debug/s390/debug-s390.cc',
'full-codegen/s390/full-codegen-s390.cc',
'ic/s390/access-compiler-s390.cc',
@@ -1944,6 +1811,8 @@
}, { # v8_enable_i18n_support==0
'sources!': [
'builtins/builtins-intl.cc',
+ 'builtins/builtins-intl.h',
+ 'char-predicates.cc',
'intl.cc',
'intl.h',
'objects/intl-objects.cc',
@@ -2004,6 +1873,7 @@
'base/macros.h',
'base/once.cc',
'base/once.h',
+ 'base/optional.h',
'base/platform/elapsed-timer.h',
'base/platform/time.cc',
'base/platform/time.h',
@@ -2021,6 +1891,7 @@
'base/safe_math_impl.h',
'base/sys-info.cc',
'base/sys-info.h',
+ 'base/template-utils.h',
'base/timezone-cache.h',
'base/utils/random-number-generator.cc',
'base/utils/random-number-generator.h',
@@ -2206,6 +2077,12 @@
'base/platform/platform-posix.cc'
]},
],
+ ['OS=="fuchsia"', {
+ 'sources': [
+ 'base/debug/stack_trace_fuchsia.cc',
+ 'base/platform/platform-fuchsia.cc',
+ ]},
+ ],
['OS=="solaris"', {
'link_settings': {
'libraries': [
@@ -2440,8 +2317,6 @@
'js/typedarray.js',
'js/collection.js',
'js/weak-collection.js',
- 'js/collection-iterator.js',
- 'js/promise.js',
'js/messages.js',
'js/templates.js',
'js/spread.js',
@@ -2569,6 +2444,12 @@
'objects-inl.h',
'objects/map.h',
'objects/map-inl.h',
+ 'objects/script.h',
+ 'objects/script-inl.h',
+ 'objects/shared-function-info.h',
+ 'objects/shared-function-info-inl.h',
+ 'objects/string.h',
+ 'objects/string-inl.h',
],
},
'actions': [
diff --git a/deps/v8/src/v8threads.cc b/deps/v8/src/v8threads.cc
index c0470c5b3c..202323ec0d 100644
--- a/deps/v8/src/v8threads.cc
+++ b/deps/v8/src/v8threads.cc
@@ -32,7 +32,7 @@ void Locker::Initialize(v8::Isolate* isolate) {
top_level_ = true;
isolate_ = reinterpret_cast<i::Isolate*>(isolate);
// Record that the Locker has been used at least once.
- base::NoBarrier_Store(&g_locker_was_ever_used_, 1);
+ base::Relaxed_Store(&g_locker_was_ever_used_, 1);
// Get the big lock if necessary.
if (!isolate_->thread_manager()->IsLockedByCurrentThread()) {
isolate_->thread_manager()->Lock();
@@ -60,7 +60,7 @@ bool Locker::IsLocked(v8::Isolate* isolate) {
bool Locker::IsActive() {
- return !!base::NoBarrier_Load(&g_locker_was_ever_used_);
+ return !!base::Relaxed_Load(&g_locker_was_ever_used_);
}
diff --git a/deps/v8/src/value-serializer.cc b/deps/v8/src/value-serializer.cc
index 2ba06c170b..a6e741042f 100644
--- a/deps/v8/src/value-serializer.cc
+++ b/deps/v8/src/value-serializer.cc
@@ -449,12 +449,7 @@ Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) {
case JS_OBJECT_TYPE:
case JS_API_OBJECT_TYPE: {
Handle<JSObject> js_object = Handle<JSObject>::cast(receiver);
- Map* map = js_object->map();
- if (!FLAG_wasm_disable_structured_cloning &&
- map->GetConstructor() ==
- isolate_->native_context()->wasm_module_constructor()) {
- return WriteWasmModule(js_object);
- } else if (JSObject::GetEmbedderFieldCount(map)) {
+ if (JSObject::GetEmbedderFieldCount(js_object->map())) {
return WriteHostObject(js_object);
} else {
return WriteJSObject(js_object);
@@ -479,6 +474,11 @@ Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) {
case JS_TYPED_ARRAY_TYPE:
case JS_DATA_VIEW_TYPE:
return WriteJSArrayBufferView(JSArrayBufferView::cast(*receiver));
+ case WASM_MODULE_TYPE:
+ if (!FLAG_wasm_disable_structured_cloning) {
+ // Only write WebAssembly modules if not disabled by a flag.
+ return WriteWasmModule(Handle<WasmModuleObject>::cast(receiver));
+ } // fall through to error case
default:
ThrowDataCloneError(MessageTemplate::kDataCloneError, receiver);
return Nothing<bool>();
@@ -560,7 +560,7 @@ Maybe<bool> ValueSerializer::WriteJSArray(Handle<JSArray> array) {
// existed (as only indices which were enumerable own properties at this point
// should be serialized).
const bool should_serialize_densely =
- array->HasFastElements() && !array->HasFastHoleyElements();
+ array->HasFastElements() && !array->HasHoleyElements();
if (should_serialize_densely) {
DCHECK_LE(length, static_cast<uint32_t>(FixedArray::kMaxLength));
@@ -568,16 +568,16 @@ Maybe<bool> ValueSerializer::WriteJSArray(Handle<JSArray> array) {
WriteVarint<uint32_t>(length);
uint32_t i = 0;
- // Fast paths. Note that FAST_ELEMENTS in particular can bail due to the
+ // Fast paths. Note that PACKED_ELEMENTS in particular can bail due to the
// structure of the elements changing.
switch (array->GetElementsKind()) {
- case FAST_SMI_ELEMENTS: {
+ case PACKED_SMI_ELEMENTS: {
Handle<FixedArray> elements(FixedArray::cast(array->elements()),
isolate_);
for (; i < length; i++) WriteSmi(Smi::cast(elements->get(i)));
break;
}
- case FAST_DOUBLE_ELEMENTS: {
+ case PACKED_DOUBLE_ELEMENTS: {
// Elements are empty_fixed_array, not a FixedDoubleArray, if the array
// is empty. No elements to encode in this case anyhow.
if (length == 0) break;
@@ -589,11 +589,11 @@ Maybe<bool> ValueSerializer::WriteJSArray(Handle<JSArray> array) {
}
break;
}
- case FAST_ELEMENTS: {
+ case PACKED_ELEMENTS: {
Handle<Object> old_length(array->length(), isolate_);
for (; i < length; i++) {
if (array->length() != *old_length ||
- array->GetElementsKind() != FAST_ELEMENTS) {
+ array->GetElementsKind() != PACKED_ELEMENTS) {
// Fall back to slow path.
break;
}
@@ -815,11 +815,13 @@ Maybe<bool> ValueSerializer::WriteJSArrayBufferView(JSArrayBufferView* view) {
return ThrowIfOutOfMemory();
}
-Maybe<bool> ValueSerializer::WriteWasmModule(Handle<JSObject> object) {
+Maybe<bool> ValueSerializer::WriteWasmModule(Handle<WasmModuleObject> object) {
if (delegate_ != nullptr) {
+ // TODO(titzer): introduce a Utils::ToLocal for WasmModuleObject.
Maybe<uint32_t> transfer_id = delegate_->GetWasmModuleTransferId(
reinterpret_cast<v8::Isolate*>(isolate_),
- v8::Local<v8::WasmCompiledModule>::Cast(Utils::ToLocal(object)));
+ v8::Local<v8::WasmCompiledModule>::Cast(
+ Utils::ToLocal(Handle<JSObject>::cast(object))));
RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate_, Nothing<bool>());
uint32_t id = 0;
if (transfer_id.To(&id)) {
@@ -829,8 +831,7 @@ Maybe<bool> ValueSerializer::WriteWasmModule(Handle<JSObject> object) {
}
}
- Handle<WasmCompiledModule> compiled_part(
- WasmCompiledModule::cast(object->GetEmbedderField(0)), isolate_);
+ Handle<WasmCompiledModule> compiled_part(object->compiled_module(), isolate_);
WasmEncodingTag encoding_tag = WasmEncodingTag::kRawBytes;
WriteTag(SerializationTag::kWasmModule);
WriteRawBytes(&encoding_tag, sizeof(encoding_tag));
@@ -1064,14 +1065,12 @@ void ValueDeserializer::TransferArrayBuffer(
uint32_t transfer_id, Handle<JSArrayBuffer> array_buffer) {
if (array_buffer_transfer_map_.is_null()) {
array_buffer_transfer_map_ = isolate_->global_handles()->Create(
- *SeededNumberDictionary::New(isolate_, 0));
+ *UnseededNumberDictionary::New(isolate_, 0));
}
- Handle<SeededNumberDictionary> dictionary =
+ Handle<UnseededNumberDictionary> dictionary =
array_buffer_transfer_map_.ToHandleChecked();
- Handle<JSObject> not_a_prototype_holder;
- Handle<SeededNumberDictionary> new_dictionary =
- SeededNumberDictionary::AtNumberPut(dictionary, transfer_id, array_buffer,
- not_a_prototype_holder);
+ Handle<UnseededNumberDictionary> new_dictionary =
+ UnseededNumberDictionary::Set(dictionary, transfer_id, array_buffer);
if (!new_dictionary.is_identical_to(dictionary)) {
GlobalHandles::Destroy(Handle<Object>::cast(dictionary).location());
array_buffer_transfer_map_ =
@@ -1366,7 +1365,7 @@ MaybeHandle<JSArray> ValueDeserializer::ReadDenseJSArray() {
uint32_t id = next_id_++;
HandleScope scope(isolate_);
Handle<JSArray> array = isolate_->factory()->NewJSArray(
- FAST_HOLEY_ELEMENTS, length, length, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE,
+ HOLEY_ELEMENTS, length, length, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE,
pretenure_);
AddObjectWithID(id, array);
@@ -1451,7 +1450,6 @@ MaybeHandle<JSValue> ValueDeserializer::ReadJSValue(SerializationTag tag) {
}
default:
UNREACHABLE();
- return MaybeHandle<JSValue>();
}
AddObjectWithID(id, value);
return value;
@@ -1581,13 +1579,13 @@ MaybeHandle<JSArrayBuffer> ValueDeserializer::ReadTransferredJSArrayBuffer(
bool is_shared) {
uint32_t id = next_id_++;
uint32_t transfer_id;
- Handle<SeededNumberDictionary> transfer_map;
+ Handle<UnseededNumberDictionary> transfer_map;
if (!ReadVarint<uint32_t>().To(&transfer_id) ||
!array_buffer_transfer_map_.ToHandle(&transfer_map)) {
return MaybeHandle<JSArrayBuffer>();
}
int index = transfer_map->FindEntry(isolate_, transfer_id);
- if (index == SeededNumberDictionary::kNotFound) {
+ if (index == UnseededNumberDictionary::kNotFound) {
return MaybeHandle<JSArrayBuffer>();
}
Handle<JSArrayBuffer> array_buffer(
diff --git a/deps/v8/src/value-serializer.h b/deps/v8/src/value-serializer.h
index ef424698d0..43c73cbb56 100644
--- a/deps/v8/src/value-serializer.h
+++ b/deps/v8/src/value-serializer.h
@@ -125,7 +125,8 @@ class ValueSerializer {
Maybe<bool> WriteJSArrayBuffer(Handle<JSArrayBuffer> array_buffer)
WARN_UNUSED_RESULT;
Maybe<bool> WriteJSArrayBufferView(JSArrayBufferView* array_buffer);
- Maybe<bool> WriteWasmModule(Handle<JSObject> object) WARN_UNUSED_RESULT;
+ Maybe<bool> WriteWasmModule(Handle<WasmModuleObject> object)
+ WARN_UNUSED_RESULT;
Maybe<bool> WriteHostObject(Handle<JSObject> object) WARN_UNUSED_RESULT;
/*
@@ -295,7 +296,7 @@ class ValueDeserializer {
// Always global handles.
Handle<FixedArray> id_map_;
- MaybeHandle<SeededNumberDictionary> array_buffer_transfer_map_;
+ MaybeHandle<UnseededNumberDictionary> array_buffer_transfer_map_;
DISALLOW_COPY_AND_ASSIGN(ValueDeserializer);
};
diff --git a/deps/v8/src/vm-state-inl.h b/deps/v8/src/vm-state-inl.h
index 35b69a1ddc..df1a7202e5 100644
--- a/deps/v8/src/vm-state-inl.h
+++ b/deps/v8/src/vm-state-inl.h
@@ -32,7 +32,6 @@ inline const char* StateToString(StateTag state) {
return "EXTERNAL";
default:
UNREACHABLE();
- return NULL;
}
}
diff --git a/deps/v8/src/vm-state.h b/deps/v8/src/vm-state.h
index 29cbf39593..38852e5571 100644
--- a/deps/v8/src/vm-state.h
+++ b/deps/v8/src/vm-state.h
@@ -7,7 +7,6 @@
#include "src/allocation.h"
#include "src/counters.h"
-#include "src/isolate.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/wasm/OWNERS b/deps/v8/src/wasm/OWNERS
index c698fc4776..dfcaa91d2e 100644
--- a/deps/v8/src/wasm/OWNERS
+++ b/deps/v8/src/wasm/OWNERS
@@ -8,3 +8,5 @@ gdeepti@chromium.org
mtrofin@chromium.org
rossberg@chromium.org
titzer@chromium.org
+
+# COMPONENT: Blink>JavaScript>WebAssembly
diff --git a/deps/v8/src/wasm/compilation-manager.cc b/deps/v8/src/wasm/compilation-manager.cc
new file mode 100644
index 0000000000..01e0755e14
--- /dev/null
+++ b/deps/v8/src/wasm/compilation-manager.cc
@@ -0,0 +1,32 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/compilation-manager.h"
+
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+void CompilationManager::StartAsyncCompileJob(
+ Isolate* isolate, std::unique_ptr<byte[]> bytes_copy, size_t length,
+ Handle<Context> context, Handle<JSPromise> promise) {
+ std::shared_ptr<AsyncCompileJob> job(new AsyncCompileJob(
+ isolate, std::move(bytes_copy), length, context, promise));
+ jobs_.insert({job.get(), job});
+ job->Start();
+}
+
+void CompilationManager::RemoveJob(AsyncCompileJob* job) {
+ size_t num_removed = jobs_.erase(job);
+ USE(num_removed);
+ DCHECK_EQ(1, num_removed);
+}
+
+void CompilationManager::TearDown() { jobs_.clear(); }
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/compilation-manager.h b/deps/v8/src/wasm/compilation-manager.h
new file mode 100644
index 0000000000..85b6fd5ce2
--- /dev/null
+++ b/deps/v8/src/wasm/compilation-manager.h
@@ -0,0 +1,44 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_COMPILATION_MANAGER_H_
+#define V8_WASM_COMPILATION_MANAGER_H_
+
+#include <vector>
+
+#include "src/handles.h"
+#include "src/isolate.h"
+#include "src/wasm/module-compiler.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// The CompilationManager manages a list of active WebAssembly compile jobs. The
+// manager owns the memory of the compile jobs and can trigger the abortion of
+// compile jobs. If the isolate tears down, the CompilationManager makes sure
+// that all compile jobs finish executing before the isolate becomes
+// unavailable.
+class CompilationManager {
+ public:
+ void StartAsyncCompileJob(Isolate* isolate,
+ std::unique_ptr<byte[]> bytes_copy, size_t length,
+ Handle<Context> context, Handle<JSPromise> promise);
+
+ // Removes {job} from the list of active compile jobs. This will delete {job}.
+ void RemoveJob(AsyncCompileJob* job);
+
+ void TearDown();
+
+ private:
+ // We use an AsyncCompileJob as the key for itself so that we can delete the
+ // job from the map when it is finished.
+ std::unordered_map<AsyncCompileJob*, std::shared_ptr<AsyncCompileJob>> jobs_;
+};
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_COMPILATION_MANAGER_H_
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index 5f242ac1aa..4f0548abb1 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -139,8 +139,7 @@ class Decoder {
// Consume {size} bytes and send them to the bit bucket, advancing {pc_}.
void consume_bytes(uint32_t size, const char* name = "skip") {
// Only trace if the name is not null.
- TRACE_IF(name, " +%d %-20s: %d bytes\n", static_cast<int>(pc_ - start_),
- name, size);
+ TRACE_IF(name, " +%u %-20s: %d bytes\n", pc_offset(), name, size);
if (checkAvailable(size)) {
pc_ += size;
} else {
@@ -268,7 +267,7 @@ class Decoder {
template <typename IntType>
inline IntType consume_little_endian(const char* name) {
- TRACE(" +%d %-20s: ", static_cast<int>(pc_ - start_), name);
+ TRACE(" +%u %-20s: ", pc_offset(), name);
if (!checkAvailable(sizeof(IntType))) {
traceOffEnd();
pc_ = end_;
@@ -285,7 +284,7 @@ class Decoder {
inline IntType read_leb(const byte* pc, uint32_t* length,
const char* name = "varint") {
DCHECK_IMPLIES(advance_pc, pc == pc_);
- TRACE_IF(trace, " +%d %-20s: ", static_cast<int>(pc - start_), name);
+ TRACE_IF(trace, " +%u %-20s: ", pc_offset(), name);
return read_leb_tail<IntType, checked, advance_pc, trace, 0>(pc, length,
name, 0);
}
@@ -302,7 +301,7 @@ class Decoder {
const bool at_end = checked && pc >= end_;
byte b = 0;
if (!at_end) {
- DCHECK_LT(pc_, end_);
+ DCHECK_LT(pc, end_);
b = *pc;
TRACE_IF(trace, "%02x ", b);
result = result | ((static_cast<IntType>(b) & 0x7f) << shift);
@@ -344,7 +343,7 @@ class Decoder {
}
}
constexpr int sign_ext_shift =
- is_signed && !is_last_byte ? 8 * sizeof(IntType) - shift - 7 : 0;
+ is_signed ? Max(0, int{8 * sizeof(IntType)} - shift - 7) : 0;
// Perform sign extension.
result = (result << sign_ext_shift) >> sign_ext_shift;
if (trace && is_signed) {
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index 0df04e7ee0..ec295cb0e0 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -100,7 +100,7 @@ struct BlockTypeOperand {
types = pc + 1;
} else {
// Handle multi-value blocks.
- if (!CHECKED_COND(FLAG_wasm_mv_prototype)) {
+ if (!CHECKED_COND(FLAG_experimental_wasm_mv)) {
decoder->error(pc + 1, "invalid block arity > 1");
return;
}
@@ -152,15 +152,6 @@ struct BlockTypeOperand {
case kLocalS128:
*result = kWasmS128;
return true;
- case kLocalS1x4:
- *result = kWasmS1x4;
- return true;
- case kLocalS1x8:
- *result = kWasmS1x8;
- return true;
- case kLocalS1x16:
- *result = kWasmS1x16;
- return true;
default:
*result = kWasmStmt;
return false;
@@ -322,15 +313,13 @@ struct SimdShiftOperand {
}
};
-// Operand for SIMD shuffle operations.
+// Operand for SIMD S8x16 shuffle operations.
template <bool checked>
-struct SimdShuffleOperand {
- uint8_t shuffle[16];
- unsigned lanes;
+struct Simd8x16ShuffleOperand {
+ uint8_t shuffle[kSimd128Size];
- inline SimdShuffleOperand(Decoder* decoder, const byte* pc, unsigned lanes_) {
- lanes = lanes_;
- for (unsigned i = 0; i < lanes; i++) {
+ inline Simd8x16ShuffleOperand(Decoder* decoder, const byte* pc) {
+ for (uint32_t i = 0; i < kSimd128Size; ++i) {
shuffle[i] = decoder->read_u8<checked>(pc + 2 + i, "shuffle");
}
}
diff --git a/deps/v8/src/wasm/function-body-decoder.cc b/deps/v8/src/wasm/function-body-decoder.cc
index df74485a33..f1224070d6 100644
--- a/deps/v8/src/wasm/function-body-decoder.cc
+++ b/deps/v8/src/wasm/function-body-decoder.cc
@@ -35,15 +35,19 @@ namespace wasm {
#define TRACE(...)
#endif
-#define CHECK_PROTOTYPE_OPCODE(flag) \
- if (module_ != nullptr && module_->is_asm_js()) { \
- error("Opcode not supported for asmjs modules"); \
- } \
- if (!FLAG_##flag) { \
- error("Invalid opcode (enable with --" #flag ")"); \
- break; \
+#define CHECK_PROTOTYPE_OPCODE(flag) \
+ if (module_ != nullptr && module_->is_asm_js()) { \
+ error("Opcode not supported for asmjs modules"); \
+ } \
+ if (!FLAG_experimental_wasm_##flag) { \
+ error("Invalid opcode (enable with --experimental-wasm-" #flag ")"); \
+ break; \
}
+#define PROTOTYPE_NOT_FUNCTIONAL(opcode) \
+ errorf(pc_, "Prototype still not functional: %s", \
+ WasmOpcodes::OpcodeName(opcode));
+
// An SsaEnv environment carries the current local variable renaming
// as well as the current effect and control dependency in the TF graph.
// It maintains a control state that tracks whether the environment
@@ -146,22 +150,6 @@ struct Control {
}
};
-namespace {
-inline unsigned GetShuffleMaskSize(WasmOpcode opcode) {
- switch (opcode) {
- case kExprS32x4Shuffle:
- return 4;
- case kExprS16x8Shuffle:
- return 8;
- case kExprS8x16Shuffle:
- return 16;
- default:
- UNREACHABLE();
- return 0;
- }
-}
-} // namespace
-
// Macros that build nodes only if there is a graph and the current SSA
// environment is reachable from start. This avoids problems with malformed
// TF graphs when decoding inputs that have unreachable code.
@@ -174,8 +162,8 @@ inline unsigned GetShuffleMaskSize(WasmOpcode opcode) {
class WasmDecoder : public Decoder {
public:
WasmDecoder(const WasmModule* module, FunctionSig* sig, const byte* start,
- const byte* end)
- : Decoder(start, end),
+ const byte* end, uint32_t buffer_offset = 0)
+ : Decoder(start, end, buffer_offset),
module_(module),
sig_(sig),
local_types_(nullptr) {}
@@ -229,15 +217,6 @@ class WasmDecoder : public Decoder {
case kLocalS128:
type = kWasmS128;
break;
- case kLocalS1x4:
- type = kWasmS1x4;
- break;
- case kLocalS1x8:
- type = kWasmS1x8;
- break;
- case kLocalS1x16:
- type = kWasmS1x16;
- break;
default:
decoder->error(decoder->pc() - 1, "invalid local type");
return false;
@@ -431,13 +410,12 @@ class WasmDecoder : public Decoder {
}
}
- inline bool Validate(const byte* pc, WasmOpcode opcode,
- SimdShuffleOperand<true>& operand) {
- unsigned lanes = GetShuffleMaskSize(opcode);
+ inline bool Validate(const byte* pc, Simd8x16ShuffleOperand<true>& operand) {
uint8_t max_lane = 0;
- for (unsigned i = 0; i < lanes; i++)
+ for (uint32_t i = 0; i < kSimd128Size; ++i)
max_lane = std::max(max_lane, operand.shuffle[i]);
- if (operand.lanes != lanes || max_lane > 2 * lanes) {
+ // Shuffle indices must be in [0..31] for a 16 lane shuffle.
+ if (max_lane > 2 * kSimd128Size) {
error(pc_ + 2, "invalid shuffle mask");
return false;
} else {
@@ -521,20 +499,21 @@ class WasmDecoder : public Decoder {
#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
FOREACH_SIMD_0_OPERAND_OPCODE(DECLARE_OPCODE_CASE)
#undef DECLARE_OPCODE_CASE
- {
- return 2;
- }
+ return 2;
#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
FOREACH_SIMD_1_OPERAND_OPCODE(DECLARE_OPCODE_CASE)
#undef DECLARE_OPCODE_CASE
+ return 3;
+#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
+ FOREACH_SIMD_MEM_OPCODE(DECLARE_OPCODE_CASE)
+#undef DECLARE_OPCODE_CASE
{
- return 3;
+ MemoryAccessOperand<true> operand(decoder, pc + 1, UINT32_MAX);
+ return 2 + operand.length;
}
- // Shuffles contain a byte array to determine the shuffle.
- case kExprS32x4Shuffle:
- case kExprS16x8Shuffle:
+ // Shuffles require a byte per lane, or 16 immediate bytes.
case kExprS8x16Shuffle:
- return 2 + GetShuffleMaskSize(opcode);
+ return 2 + kSimd128Size;
default:
decoder->error(pc, "invalid SIMD opcode");
return 2;
@@ -551,14 +530,19 @@ class WasmDecoder : public Decoder {
FunctionSig* sig = WasmOpcodes::Signature(opcode);
if (!sig) sig = WasmOpcodes::AsmjsSignature(opcode);
if (sig) return {sig->parameter_count(), sig->return_count()};
+ if (WasmOpcodes::IsPrefixOpcode(opcode)) {
+ opcode = static_cast<WasmOpcode>(opcode << 8 | *(pc + 1));
+ }
#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
// clang-format off
switch (opcode) {
case kExprSelect:
return {3, 1};
+ case kExprS128StoreMem:
FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
return {2, 0};
+ case kExprS128LoadMem:
FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
case kExprTeeLocal:
case kExprGrowMemory:
@@ -600,7 +584,8 @@ class WasmDecoder : public Decoder {
case kExprUnreachable:
return {0, 0};
default:
- V8_Fatal(__FILE__, __LINE__, "unimplemented opcode: %x", opcode);
+ V8_Fatal(__FILE__, __LINE__, "unimplemented opcode: %x (%s)", opcode,
+ WasmOpcodes::OpcodeName(opcode));
return {0, 0};
}
#undef DECLARE_OPCODE_CASE
@@ -610,7 +595,7 @@ class WasmDecoder : public Decoder {
static const int32_t kNullCatch = -1;
-// The full WASM decoder for bytecode. Verifies bytecode and, optionally,
+// The full wasm decoder for bytecode. Verifies bytecode and, optionally,
// generates a TurboFan IR graph.
class WasmFullDecoder : public WasmDecoder {
public:
@@ -644,6 +629,7 @@ class WasmFullDecoder : public WasmDecoder {
WasmDecoder::DecodeLocals(this, sig_, local_types_);
InitSsaEnv();
DecodeFunctionBody();
+ FinishFunction();
if (failed()) return TraceFailed();
@@ -674,18 +660,17 @@ class WasmFullDecoder : public WasmDecoder {
}
bool TraceFailed() {
- TRACE("wasm-error module+%-6d func+%d: %s\n\n",
- baserel(start_ + error_offset_), error_offset_, error_msg_.c_str());
+ TRACE("wasm-error module+%-6d func+%d: %s\n\n", error_offset_,
+ GetBufferRelativeOffset(error_offset_), error_msg_.c_str());
return false;
}
private:
WasmFullDecoder(Zone* zone, const wasm::WasmModule* module,
TFBuilder* builder, const FunctionBody& body)
- : WasmDecoder(module, body.sig, body.start, body.end),
+ : WasmDecoder(module, body.sig, body.start, body.end, body.offset),
zone_(zone),
builder_(builder),
- base_(body.base),
local_type_vec_(zone),
stack_(zone),
control_(zone),
@@ -698,7 +683,6 @@ class WasmFullDecoder : public WasmDecoder {
Zone* zone_;
TFBuilder* builder_;
- const byte* base_;
SsaEnv* ssa_env_;
@@ -742,11 +726,6 @@ class WasmFullDecoder : public WasmDecoder {
ssa_env->control = start;
ssa_env->effect = start;
SetEnv("initial", ssa_env);
- if (builder_) {
- // The function-prologue stack check is associated with position 0, which
- // is never a position of any instruction in the function.
- builder_->StackCheck(0);
- }
}
TFNode* DefaultValue(ValueType type) {
@@ -761,27 +740,9 @@ class WasmFullDecoder : public WasmDecoder {
return builder_->Float64Constant(0);
case kWasmS128:
return builder_->S128Zero();
- case kWasmS1x4:
- return builder_->S1x4Zero();
- case kWasmS1x8:
- return builder_->S1x8Zero();
- case kWasmS1x16:
- return builder_->S1x16Zero();
default:
UNREACHABLE();
- return nullptr;
- }
- }
-
- char* indentation() {
- static const int kMaxIndent = 64;
- static char bytes[kMaxIndent + 1];
- for (int i = 0; i < kMaxIndent; ++i) bytes[i] = ' ';
- bytes[kMaxIndent] = 0;
- if (stack_.size() < kMaxIndent / 2) {
- bytes[stack_.size() * 2] = 0;
}
- return bytes;
}
bool CheckHasMemory() {
@@ -793,9 +754,9 @@ class WasmFullDecoder : public WasmDecoder {
// Decodes the body of a function.
void DecodeFunctionBody() {
- TRACE("wasm-decode %p...%p (module+%d, %d bytes) %s\n",
- reinterpret_cast<const void*>(start_),
- reinterpret_cast<const void*>(end_), baserel(pc_),
+ TRACE("wasm-decode %p...%p (module+%u, %d bytes) %s\n",
+ reinterpret_cast<const void*>(start()),
+ reinterpret_cast<const void*>(end()), pc_offset(),
static_cast<int>(end_ - start_), builder_ ? "graph building" : "");
{
@@ -844,8 +805,16 @@ class WasmFullDecoder : public WasmDecoder {
len = 1 + operand.length;
break;
}
+ case kExprRethrow: {
+ // TODO(kschimpf): Implement.
+ CHECK_PROTOTYPE_OPCODE(eh);
+ PROTOTYPE_NOT_FUNCTIONAL(opcode);
+ break;
+ }
case kExprThrow: {
- CHECK_PROTOTYPE_OPCODE(wasm_eh_prototype);
+ // TODO(kschimpf): Fix to use type signature of exception.
+ CHECK_PROTOTYPE_OPCODE(eh);
+ PROTOTYPE_NOT_FUNCTIONAL(opcode);
Value value = Pop(0, kWasmI32);
BUILD(Throw, value.node);
// TODO(titzer): Throw should end control, but currently we build a
@@ -855,7 +824,7 @@ class WasmFullDecoder : public WasmDecoder {
break;
}
case kExprTry: {
- CHECK_PROTOTYPE_OPCODE(wasm_eh_prototype);
+ CHECK_PROTOTYPE_OPCODE(eh);
BlockTypeOperand<true> operand(this, pc_);
SsaEnv* outer_env = ssa_env_;
SsaEnv* try_env = Steal(outer_env);
@@ -867,7 +836,9 @@ class WasmFullDecoder : public WasmDecoder {
break;
}
case kExprCatch: {
- CHECK_PROTOTYPE_OPCODE(wasm_eh_prototype);
+ // TODO(kschimpf): Fix to use type signature of exception.
+ CHECK_PROTOTYPE_OPCODE(eh);
+ PROTOTYPE_NOT_FUNCTIONAL(opcode);
LocalIndexOperand<true> operand(this, pc_);
len = 1 + operand.length;
@@ -906,6 +877,12 @@ class WasmFullDecoder : public WasmDecoder {
break;
}
+ case kExprCatchAll: {
+ // TODO(kschimpf): Implement.
+ CHECK_PROTOTYPE_OPCODE(eh);
+ PROTOTYPE_NOT_FUNCTIONAL(opcode);
+ break;
+ }
case kExprLoop: {
BlockTypeOperand<true> operand(this, pc_);
SsaEnv* finish_try_env = Steal(ssa_env_);
@@ -1263,10 +1240,6 @@ class WasmFullDecoder : public WasmDecoder {
case kExprF64LoadMem:
len = DecodeLoadMem(kWasmF64, MachineType::Float64());
break;
- case kExprS128LoadMem:
- CHECK_PROTOTYPE_OPCODE(wasm_simd_prototype);
- len = DecodeLoadMem(kWasmS128, MachineType::Simd128());
- break;
case kExprI32StoreMem8:
len = DecodeStoreMem(kWasmI32, MachineType::Int8());
break;
@@ -1294,10 +1267,6 @@ class WasmFullDecoder : public WasmDecoder {
case kExprF64StoreMem:
len = DecodeStoreMem(kWasmF64, MachineType::Float64());
break;
- case kExprS128StoreMem:
- CHECK_PROTOTYPE_OPCODE(wasm_simd_prototype);
- len = DecodeStoreMem(kWasmS128, MachineType::Simd128());
- break;
case kExprGrowMemory: {
if (!CheckHasMemory()) break;
MemoryIndexOperand<true> operand(this, pc_);
@@ -1343,7 +1312,7 @@ class WasmFullDecoder : public WasmDecoder {
break;
}
case kSimdPrefix: {
- CHECK_PROTOTYPE_OPCODE(wasm_simd_prototype);
+ CHECK_PROTOTYPE_OPCODE(simd);
len++;
byte simd_index = read_u8<true>(pc_ + 1, "simd index");
opcode = static_cast<WasmOpcode>(opcode << 8 | simd_index);
@@ -1357,10 +1326,7 @@ class WasmFullDecoder : public WasmDecoder {
error("Atomics are allowed only in AsmJs modules");
break;
}
- if (!FLAG_wasm_atomics_prototype) {
- error("Invalid opcode (enable with --wasm_atomics_prototype)");
- break;
- }
+ CHECK_PROTOTYPE_OPCODE(threads);
len = 2;
byte atomic_opcode = read_u8<true>(pc_ + 1, "atomic index");
opcode = static_cast<WasmOpcode>(opcode << 8 | atomic_opcode);
@@ -1455,6 +1421,10 @@ class WasmFullDecoder : public WasmDecoder {
if (pc_ > end_ && ok()) error("Beyond end of code");
}
+ void FinishFunction() {
+ if (builder_) builder_->PatchInStackCheckIfNeeded();
+ }
+
void EndControl() {
ssa_env_->Kill(SsaEnv::kControlEnd);
if (!control_.empty()) {
@@ -1539,16 +1509,39 @@ class WasmFullDecoder : public WasmDecoder {
Value val = Pop(1, type);
Value index = Pop(0, kWasmI32);
BUILD(StoreMem, mem_type, index.node, operand.offset, operand.alignment,
- val.node, position());
+ val.node, position(), type);
return 1 + operand.length;
}
+ int DecodePrefixedLoadMem(ValueType type, MachineType mem_type) {
+ if (!CheckHasMemory()) return 0;
+ MemoryAccessOperand<true> operand(
+ this, pc_ + 1, ElementSizeLog2Of(mem_type.representation()));
+
+ Value index = Pop(0, kWasmI32);
+ TFNode* node = BUILD(LoadMem, type, mem_type, index.node, operand.offset,
+ operand.alignment, position());
+ Push(type, node);
+ return operand.length;
+ }
+
+ int DecodePrefixedStoreMem(ValueType type, MachineType mem_type) {
+ if (!CheckHasMemory()) return 0;
+ MemoryAccessOperand<true> operand(
+ this, pc_ + 1, ElementSizeLog2Of(mem_type.representation()));
+ Value val = Pop(1, type);
+ Value index = Pop(0, kWasmI32);
+ BUILD(StoreMem, mem_type, index.node, operand.offset, operand.alignment,
+ val.node, position(), type);
+ return operand.length;
+ }
+
unsigned SimdExtractLane(WasmOpcode opcode, ValueType type) {
SimdLaneOperand<true> operand(this, pc_);
if (Validate(pc_, opcode, operand)) {
compiler::NodeVector inputs(1, zone_);
inputs[0] = Pop(0, ValueType::kSimd128).node;
- TFNode* node = BUILD(SimdLaneOp, opcode, operand.lane, inputs);
+ TFNode* node = BUILD(SimdLaneOp, opcode, operand.lane, inputs.data());
Push(type, node);
}
return operand.length;
@@ -1560,7 +1553,7 @@ class WasmFullDecoder : public WasmDecoder {
compiler::NodeVector inputs(2, zone_);
inputs[1] = Pop(1, type).node;
inputs[0] = Pop(0, ValueType::kSimd128).node;
- TFNode* node = BUILD(SimdLaneOp, opcode, operand.lane, inputs);
+ TFNode* node = BUILD(SimdLaneOp, opcode, operand.lane, inputs.data());
Push(ValueType::kSimd128, node);
}
return operand.length;
@@ -1571,23 +1564,22 @@ class WasmFullDecoder : public WasmDecoder {
if (Validate(pc_, opcode, operand)) {
compiler::NodeVector inputs(1, zone_);
inputs[0] = Pop(0, ValueType::kSimd128).node;
- TFNode* node = BUILD(SimdShiftOp, opcode, operand.shift, inputs);
+ TFNode* node = BUILD(SimdShiftOp, opcode, operand.shift, inputs.data());
Push(ValueType::kSimd128, node);
}
return operand.length;
}
- unsigned SimdShuffleOp(WasmOpcode opcode) {
- SimdShuffleOperand<true> operand(this, pc_, GetShuffleMaskSize(opcode));
- if (Validate(pc_, opcode, operand)) {
+ unsigned Simd8x16ShuffleOp() {
+ Simd8x16ShuffleOperand<true> operand(this, pc_);
+ if (Validate(pc_, operand)) {
compiler::NodeVector inputs(2, zone_);
inputs[1] = Pop(1, ValueType::kSimd128).node;
inputs[0] = Pop(0, ValueType::kSimd128).node;
- TFNode* node =
- BUILD(SimdShuffleOp, operand.shuffle, operand.lanes, inputs);
+ TFNode* node = BUILD(Simd8x16ShuffleOp, operand.shuffle, inputs.data());
Push(ValueType::kSimd128, node);
}
- return operand.lanes;
+ return 16;
}
unsigned DecodeSimdOpcode(WasmOpcode opcode) {
@@ -1625,12 +1617,16 @@ class WasmFullDecoder : public WasmDecoder {
len = SimdShiftOp(opcode);
break;
}
- case kExprS32x4Shuffle:
- case kExprS16x8Shuffle:
case kExprS8x16Shuffle: {
- len = SimdShuffleOp(opcode);
+ len = Simd8x16ShuffleOp();
break;
}
+ case kExprS128LoadMem:
+ len = DecodePrefixedLoadMem(kWasmS128, MachineType::Simd128());
+ break;
+ case kExprS128StoreMem:
+ len = DecodePrefixedStoreMem(kWasmS128, MachineType::Simd128());
+ break;
default: {
FunctionSig* sig = WasmOpcodes::Signature(opcode);
if (sig != nullptr) {
@@ -1639,7 +1635,7 @@ class WasmFullDecoder : public WasmDecoder {
Value val = Pop(static_cast<int>(i - 1), sig->GetParam(i - 1));
inputs[i - 1] = val.node;
}
- TFNode* node = BUILD(SimdOp, opcode, inputs);
+ TFNode* node = BUILD(SimdOp, opcode, inputs.data());
Push(GetReturnType(sig), node);
} else {
error("invalid simd opcode");
@@ -1722,10 +1718,6 @@ class WasmFullDecoder : public WasmDecoder {
return val;
}
- int baserel(const byte* ptr) {
- return base_ ? static_cast<int>(ptr - base_) : 0;
- }
-
int startrel(const byte* ptr) { return static_cast<int>(ptr - start_); }
void BreakTo(unsigned depth) {
@@ -2132,7 +2124,7 @@ DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
Zone zone(allocator, ZONE_NAME);
WasmFullDecoder decoder(&zone, module, body);
decoder.Decode();
- return decoder.toResult<DecodeStruct*>(nullptr);
+ return decoder.toResult(nullptr);
}
DecodeResult BuildTFGraph(AccountingAllocator* allocator, TFBuilder* builder,
@@ -2140,7 +2132,7 @@ DecodeResult BuildTFGraph(AccountingAllocator* allocator, TFBuilder* builder,
Zone zone(allocator, ZONE_NAME);
WasmFullDecoder decoder(&zone, builder, body);
decoder.Decode();
- return decoder.toResult<DecodeStruct*>(nullptr);
+ return decoder.toResult(nullptr);
}
unsigned OpcodeLength(const byte* pc, const byte* end) {
diff --git a/deps/v8/src/wasm/function-body-decoder.h b/deps/v8/src/wasm/function-body-decoder.h
index ef3998f0e1..5efc1e1c18 100644
--- a/deps/v8/src/wasm/function-body-decoder.h
+++ b/deps/v8/src/wasm/function-body-decoder.h
@@ -5,8 +5,6 @@
#ifndef V8_WASM_FUNCTION_BODY_DECODER_H_
#define V8_WASM_FUNCTION_BODY_DECODER_H_
-#include <iterator>
-
#include "src/base/compiler-specific.h"
#include "src/base/iterator.h"
#include "src/globals.h"
@@ -32,23 +30,21 @@ struct WasmModule; // forward declaration of module interface.
// A wrapper around the signature and bytes of a function.
struct FunctionBody {
FunctionSig* sig; // function signature
- const byte* base; // base of the module bytes, for error reporting
+ uint32_t offset; // offset in the module bytes, for error reporting
const byte* start; // start of the function body
const byte* end; // end of the function body
};
static inline FunctionBody FunctionBodyForTesting(const byte* start,
const byte* end) {
- return {nullptr, start, start, end};
+ return {nullptr, 0, start, end};
}
-struct DecodeStruct {
- int unused;
-};
-typedef Result<DecodeStruct*> DecodeResult;
-inline std::ostream& operator<<(std::ostream& os, const DecodeStruct& tree) {
- return os;
-}
+// A {DecodeResult} only stores the failure / success status, but no data. Thus
+// we use {nullptr_t} as data value, such that the only valid data stored in
+// this type is a nullptr.
+// Storing {void} would require template specialization.
+using DecodeResult = Result<std::nullptr_t>;
V8_EXPORT_PRIVATE DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
const wasm::WasmModule* module,
@@ -64,14 +60,14 @@ void PrintRawWasmCode(const byte* start, const byte* end);
inline DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
const WasmModule* module, FunctionSig* sig,
const byte* start, const byte* end) {
- FunctionBody body = {sig, nullptr, start, end};
+ FunctionBody body = {sig, 0, start, end};
return VerifyWasmCode(allocator, module, body);
}
inline DecodeResult BuildTFGraph(AccountingAllocator* allocator,
TFBuilder* builder, FunctionSig* sig,
const byte* start, const byte* end) {
- FunctionBody body = {sig, nullptr, start, end};
+ FunctionBody body = {sig, 0, start, end};
return BuildTFGraph(allocator, builder, body);
}
@@ -131,7 +127,7 @@ class V8_EXPORT_PRIVATE BytecodeIterator : public NON_EXPORTED_BASE(Decoder) {
// If one wants to iterate over the bytecode without looking at {pc_offset()}.
class opcode_iterator
: public iterator_base,
- public std::iterator<std::input_iterator_tag, WasmOpcode> {
+ public base::iterator<std::input_iterator_tag, WasmOpcode> {
public:
inline WasmOpcode operator*() {
DCHECK_LT(ptr_, end_);
@@ -147,7 +143,7 @@ class V8_EXPORT_PRIVATE BytecodeIterator : public NON_EXPORTED_BASE(Decoder) {
// opcodes.
class offset_iterator
: public iterator_base,
- public std::iterator<std::input_iterator_tag, uint32_t> {
+ public base::iterator<std::input_iterator_tag, uint32_t> {
public:
inline uint32_t operator*() {
DCHECK_LT(ptr_, end_);
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
new file mode 100644
index 0000000000..77700b2abe
--- /dev/null
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -0,0 +1,2356 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <src/wasm/module-compiler.h>
+
+#include <atomic>
+
+#include "src/asmjs/asm-js.h"
+#include "src/assembler-inl.h"
+#include "src/code-stubs.h"
+#include "src/counters.h"
+#include "src/property-descriptor.h"
+#include "src/wasm/compilation-manager.h"
+#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-js.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-result.h"
+
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_wasm_instances) PrintF(__VA_ARGS__); \
+ } while (false)
+
+#define TRACE_CHAIN(instance) \
+ do { \
+ instance->PrintInstancesChain(); \
+ } while (false)
+
+#define TRACE_COMPILE(...) \
+ do { \
+ if (FLAG_trace_wasm_compiler) PrintF(__VA_ARGS__); \
+ } while (false)
+
+static const int kInvalidSigIndex = -1;
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+ModuleCompiler::CodeGenerationSchedule::CodeGenerationSchedule(
+ base::RandomNumberGenerator* random_number_generator, size_t max_memory)
+ : random_number_generator_(random_number_generator),
+ max_memory_(max_memory) {
+ DCHECK_NOT_NULL(random_number_generator_);
+ DCHECK_GT(max_memory_, 0);
+}
+
+void ModuleCompiler::CodeGenerationSchedule::Schedule(
+ std::unique_ptr<compiler::WasmCompilationUnit>&& item) {
+ size_t cost = item->memory_cost();
+ schedule_.push_back(std::move(item));
+ allocated_memory_.Increment(cost);
+}
+
+bool ModuleCompiler::CodeGenerationSchedule::CanAcceptWork() const {
+ return (!throttle_ || allocated_memory_.Value() <= max_memory_);
+}
+
+bool ModuleCompiler::CodeGenerationSchedule::ShouldIncreaseWorkload() const {
+ // Half the memory is unused again, we can increase the workload again.
+ return (!throttle_ || allocated_memory_.Value() <= max_memory_ / 2);
+}
+
+std::unique_ptr<compiler::WasmCompilationUnit>
+ModuleCompiler::CodeGenerationSchedule::GetNext() {
+ DCHECK(!IsEmpty());
+ size_t index = GetRandomIndexInSchedule();
+ auto ret = std::move(schedule_[index]);
+ std::swap(schedule_[schedule_.size() - 1], schedule_[index]);
+ schedule_.pop_back();
+ allocated_memory_.Decrement(ret->memory_cost());
+ return ret;
+}
+
+size_t ModuleCompiler::CodeGenerationSchedule::GetRandomIndexInSchedule() {
+ double factor = random_number_generator_->NextDouble();
+ size_t index = (size_t)(factor * schedule_.size());
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, schedule_.size());
+ return index;
+}
+
+ModuleCompiler::ModuleCompiler(Isolate* isolate,
+ std::unique_ptr<WasmModule> module)
+ : isolate_(isolate),
+ module_(std::move(module)),
+ async_counters_(isolate->async_counters()),
+ executed_units_(
+ isolate->random_number_generator(),
+ (isolate->heap()->memory_allocator()->code_range()->valid()
+ ? isolate->heap()->memory_allocator()->code_range()->size()
+ : isolate->heap()->code_space()->Capacity()) /
+ 2),
+ num_background_tasks_(
+ Min(static_cast<size_t>(FLAG_wasm_num_compilation_tasks),
+ V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads())),
+ stopped_compilation_tasks_(num_background_tasks_),
+ centry_stub_(CEntryStub(isolate, 1).GetCode()) {}
+
+// The actual runnable task that performs compilations in the background.
+ModuleCompiler::CompilationTask::CompilationTask(ModuleCompiler* compiler)
+ : CancelableTask(&compiler->background_task_manager_),
+ compiler_(compiler) {}
+
+void ModuleCompiler::CompilationTask::RunInternal() {
+ while (compiler_->executed_units_.CanAcceptWork() &&
+ compiler_->FetchAndExecuteCompilationUnit()) {
+ }
+
+ compiler_->OnBackgroundTaskStopped();
+}
+
+void ModuleCompiler::OnBackgroundTaskStopped() {
+ base::LockGuard<base::Mutex> guard(&tasks_mutex_);
+ ++stopped_compilation_tasks_;
+ DCHECK_LE(stopped_compilation_tasks_, num_background_tasks_);
+}
+
+// Run by each compilation task The no_finisher_callback is called
+// within the result_mutex_ lock when no finishing task is running,
+// i.e. when the finisher_is_running_ flag is not set.
+bool ModuleCompiler::FetchAndExecuteCompilationUnit(
+ std::function<void()> no_finisher_callback) {
+ DisallowHeapAllocation no_allocation;
+ DisallowHandleAllocation no_handles;
+ DisallowHandleDereference no_deref;
+ DisallowCodeDependencyChange no_dependency_change;
+
+ std::unique_ptr<compiler::WasmCompilationUnit> unit;
+ {
+ base::LockGuard<base::Mutex> guard(&compilation_units_mutex_);
+ if (compilation_units_.empty()) return false;
+ unit = std::move(compilation_units_.back());
+ compilation_units_.pop_back();
+ }
+ unit->ExecuteCompilation();
+ {
+ base::LockGuard<base::Mutex> guard(&result_mutex_);
+ executed_units_.Schedule(std::move(unit));
+ if (no_finisher_callback != nullptr && !finisher_is_running_) {
+ no_finisher_callback();
+ // We set the flag here so that not more than one finisher is started.
+ finisher_is_running_ = true;
+ }
+ }
+ return true;
+}
+
+size_t ModuleCompiler::InitializeCompilationUnits(
+ const std::vector<WasmFunction>& functions, ModuleBytesEnv& module_env) {
+ uint32_t start = module_env.module_env.module->num_imported_functions +
+ FLAG_skip_compiling_wasm_funcs;
+ uint32_t num_funcs = static_cast<uint32_t>(functions.size());
+ uint32_t funcs_to_compile = start > num_funcs ? 0 : num_funcs - start;
+ CompilationUnitBuilder builder(this);
+ for (uint32_t i = start; i < num_funcs; ++i) {
+ const WasmFunction* func = &functions[i];
+ uint32_t buffer_offset = func->code.offset();
+ Vector<const uint8_t> bytes(
+ module_env.wire_bytes.start() + func->code.offset(),
+ func->code.end_offset() - func->code.offset());
+ WasmName name = module_env.wire_bytes.GetName(func);
+ builder.AddUnit(&module_env.module_env, func, buffer_offset, bytes, name);
+ }
+ builder.Commit();
+ return funcs_to_compile;
+}
+
+void ModuleCompiler::ReopenHandlesInDeferredScope() {
+ centry_stub_ = handle(*centry_stub_, isolate_);
+}
+
+void ModuleCompiler::RestartCompilationTasks() {
+ base::LockGuard<base::Mutex> guard(&tasks_mutex_);
+ for (; stopped_compilation_tasks_ > 0; --stopped_compilation_tasks_) {
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ new CompilationTask(this),
+ v8::Platform::ExpectedRuntime::kShortRunningTask);
+ }
+}
+
+size_t ModuleCompiler::FinishCompilationUnits(
+ std::vector<Handle<Code>>& results, ErrorThrower* thrower) {
+ size_t finished = 0;
+ while (true) {
+ int func_index = -1;
+ MaybeHandle<Code> result = FinishCompilationUnit(thrower, &func_index);
+ if (func_index < 0) break;
+ ++finished;
+ DCHECK_IMPLIES(result.is_null(), thrower->error());
+ if (result.is_null()) break;
+ results[func_index] = result.ToHandleChecked();
+ }
+ bool do_restart;
+ {
+ base::LockGuard<base::Mutex> guard(&compilation_units_mutex_);
+ do_restart = !compilation_units_.empty();
+ }
+ if (do_restart) RestartCompilationTasks();
+ return finished;
+}
+
+void ModuleCompiler::SetFinisherIsRunning(bool value) {
+ base::LockGuard<base::Mutex> guard(&result_mutex_);
+ finisher_is_running_ = value;
+}
+
+MaybeHandle<Code> ModuleCompiler::FinishCompilationUnit(ErrorThrower* thrower,
+ int* func_index) {
+ std::unique_ptr<compiler::WasmCompilationUnit> unit;
+ {
+ base::LockGuard<base::Mutex> guard(&result_mutex_);
+ if (executed_units_.IsEmpty()) return {};
+ unit = executed_units_.GetNext();
+ }
+ *func_index = unit->func_index();
+ return unit->FinishCompilation(thrower);
+}
+
+void ModuleCompiler::CompileInParallel(ModuleBytesEnv* module_env,
+ std::vector<Handle<Code>>& results,
+ ErrorThrower* thrower) {
+ const WasmModule* module = module_env->module_env.module;
+ // Data structures for the parallel compilation.
+
+ //-----------------------------------------------------------------------
+ // For parallel compilation:
+ // 1) The main thread allocates a compilation unit for each wasm function
+ // and stores them in the vector {compilation_units}.
+ // 2) The main thread spawns {CompilationTask} instances which run on
+ // the background threads.
+ // 3.a) The background threads and the main thread pick one compilation
+ // unit at a time and execute the parallel phase of the compilation
+ // unit. After finishing the execution of the parallel phase, the
+ // result is enqueued in {executed_units}.
+ // 3.b) If {executed_units} contains a compilation unit, the main thread
+ // dequeues it and finishes the compilation.
+ // 4) After the parallel phase of all compilation units has started, the
+ // main thread waits for all {CompilationTask} instances to finish.
+ // 5) The main thread finishes the compilation.
+
+ // Turn on the {CanonicalHandleScope} so that the background threads can
+ // use the node cache.
+ CanonicalHandleScope canonical(isolate_);
+
+ // 1) The main thread allocates a compilation unit for each wasm function
+ // and stores them in the vector {compilation_units}.
+ InitializeCompilationUnits(module->functions, *module_env);
+ executed_units_.EnableThrottling();
+
+ // 2) The main thread spawns {CompilationTask} instances which run on
+ // the background threads.
+ RestartCompilationTasks();
+
+ // 3.a) The background threads and the main thread pick one compilation
+ // unit at a time and execute the parallel phase of the compilation
+ // unit. After finishing the execution of the parallel phase, the
+ // result is enqueued in {executed_units}.
+ // The foreground task bypasses waiting on memory threshold, because
+ // its results will immediately be converted to code (below).
+ while (FetchAndExecuteCompilationUnit()) {
+ // 3.b) If {executed_units} contains a compilation unit, the main thread
+ // dequeues it and finishes the compilation unit. Compilation units
+ // are finished concurrently to the background threads to save
+ // memory.
+ FinishCompilationUnits(results, thrower);
+ }
+ // 4) After the parallel phase of all compilation units has started, the
+ // main thread waits for all {CompilationTask} instances to finish - which
+ // happens once they all realize there's no next work item to process.
+ background_task_manager_.CancelAndWait();
+ // Finish all compilation units which have been executed while we waited.
+ FinishCompilationUnits(results, thrower);
+}
+
+void ModuleCompiler::CompileSequentially(ModuleBytesEnv* module_env,
+ std::vector<Handle<Code>>& results,
+ ErrorThrower* thrower) {
+ DCHECK(!thrower->error());
+
+ const WasmModule* module = module_env->module_env.module;
+ for (uint32_t i = FLAG_skip_compiling_wasm_funcs;
+ i < module->functions.size(); ++i) {
+ const WasmFunction& func = module->functions[i];
+ if (func.imported) continue; // Imports are compiled at instantiation time.
+
+ // Compile the function.
+ MaybeHandle<Code> code = compiler::WasmCompilationUnit::CompileWasmFunction(
+ thrower, isolate_, module_env, &func);
+ if (code.is_null()) {
+ WasmName str = module_env->wire_bytes.GetName(&func);
+ // TODO(clemensh): Truncate the function name in the output.
+ thrower->CompileError("Compilation of #%d:%.*s failed.", i, str.length(),
+ str.start());
+ break;
+ }
+ results[i] = code.ToHandleChecked();
+ }
+}
+
+void ModuleCompiler::ValidateSequentially(ModuleBytesEnv* module_env,
+ ErrorThrower* thrower) {
+ DCHECK(!thrower->error());
+
+ const WasmModule* module = module_env->module_env.module;
+ for (uint32_t i = 0; i < module->functions.size(); ++i) {
+ const WasmFunction& func = module->functions[i];
+ if (func.imported) continue;
+
+ const byte* base = module_env->wire_bytes.start();
+ FunctionBody body{func.sig, func.code.offset(), base + func.code.offset(),
+ base + func.code.end_offset()};
+ DecodeResult result = VerifyWasmCode(isolate_->allocator(),
+ module_env->module_env.module, body);
+ if (result.failed()) {
+ WasmName str = module_env->wire_bytes.GetName(&func);
+ thrower->CompileError("Compiling function #%d:%.*s failed: %s @+%u", i,
+ str.length(), str.start(),
+ result.error_msg().c_str(), result.error_offset());
+ break;
+ }
+ }
+}
+
+MaybeHandle<WasmModuleObject> ModuleCompiler::CompileToModuleObject(
+ ErrorThrower* thrower, const ModuleWireBytes& wire_bytes,
+ Handle<Script> asm_js_script,
+ Vector<const byte> asm_js_offset_table_bytes) {
+ Factory* factory = isolate_->factory();
+ WasmInstance temp_instance(module_.get());
+ temp_instance.context = isolate_->native_context();
+ temp_instance.mem_size = WasmModule::kPageSize * module_->min_mem_pages;
+ temp_instance.mem_start = nullptr;
+ temp_instance.globals_start = nullptr;
+
+ // Initialize the indirect tables with placeholders.
+ int function_table_count = static_cast<int>(module_->function_tables.size());
+ Handle<FixedArray> function_tables =
+ factory->NewFixedArray(function_table_count, TENURED);
+ Handle<FixedArray> signature_tables =
+ factory->NewFixedArray(function_table_count, TENURED);
+ for (int i = 0; i < function_table_count; ++i) {
+ temp_instance.function_tables[i] = factory->NewFixedArray(1, TENURED);
+ temp_instance.signature_tables[i] = factory->NewFixedArray(1, TENURED);
+ function_tables->set(i, *temp_instance.function_tables[i]);
+ signature_tables->set(i, *temp_instance.signature_tables[i]);
+ }
+
+ TimedHistogramScope wasm_compile_module_time_scope(
+ module_->is_wasm() ? counters()->wasm_compile_wasm_module_time()
+ : counters()->wasm_compile_asm_module_time());
+ return CompileToModuleObjectInternal(
+ thrower, wire_bytes, asm_js_script, asm_js_offset_table_bytes, factory,
+ &temp_instance, &function_tables, &signature_tables);
+}
+
+namespace {
+bool compile_lazy(const WasmModule* module) {
+ return FLAG_wasm_lazy_compilation ||
+ (FLAG_asm_wasm_lazy_compilation && module->is_asm_js());
+}
+
+void FlushICache(Isolate* isolate, Handle<FixedArray> code_table) {
+ for (int i = 0; i < code_table->length(); ++i) {
+ Handle<Code> code = code_table->GetValueChecked<Code>(isolate, i);
+ Assembler::FlushICache(isolate, code->instruction_start(),
+ code->instruction_size());
+ }
+}
+
+byte* raw_buffer_ptr(MaybeHandle<JSArrayBuffer> buffer, int offset) {
+ return static_cast<byte*>(buffer.ToHandleChecked()->backing_store()) + offset;
+}
+
+void RecordStats(Code* code, Counters* counters) {
+ counters->wasm_generated_code_size()->Increment(code->body_size());
+ counters->wasm_reloc_size()->Increment(code->relocation_info()->length());
+}
+
+void RecordStats(Handle<FixedArray> functions, Counters* counters) {
+ DisallowHeapAllocation no_gc;
+ for (int i = 0; i < functions->length(); ++i) {
+ RecordStats(Code::cast(functions->get(i)), counters);
+ }
+}
+Handle<Script> CreateWasmScript(Isolate* isolate,
+ const ModuleWireBytes& wire_bytes) {
+ Handle<Script> script =
+ isolate->factory()->NewScript(isolate->factory()->empty_string());
+ script->set_context_data(isolate->native_context()->debug_context_id());
+ script->set_type(Script::TYPE_WASM);
+
+ int hash = StringHasher::HashSequentialString(
+ reinterpret_cast<const char*>(wire_bytes.start()),
+ static_cast<int>(wire_bytes.length()), kZeroHashSeed);
+
+ const int kBufferSize = 32;
+ char buffer[kBufferSize];
+ int url_chars = SNPrintF(ArrayVector(buffer), "wasm://wasm/%08x", hash);
+ DCHECK(url_chars >= 0 && url_chars < kBufferSize);
+ MaybeHandle<String> url_str = isolate->factory()->NewStringFromOneByte(
+ Vector<const uint8_t>(reinterpret_cast<uint8_t*>(buffer), url_chars),
+ TENURED);
+ script->set_source_url(*url_str.ToHandleChecked());
+
+ int name_chars = SNPrintF(ArrayVector(buffer), "wasm-%08x", hash);
+ DCHECK(name_chars >= 0 && name_chars < kBufferSize);
+ MaybeHandle<String> name_str = isolate->factory()->NewStringFromOneByte(
+ Vector<const uint8_t>(reinterpret_cast<uint8_t*>(buffer), name_chars),
+ TENURED);
+ script->set_name(*name_str.ToHandleChecked());
+
+ return script;
+}
+
+// Ensure that the code object in <code_table> at offset <func_index> has
+// deoptimization data attached. This is needed for lazy compile stubs which are
+// called from JS_TO_WASM functions or via exported function tables. The deopt
+// data is used to determine which function this lazy compile stub belongs to.
+Handle<Code> EnsureExportedLazyDeoptData(Isolate* isolate,
+ Handle<WasmInstanceObject> instance,
+ Handle<FixedArray> code_table,
+ int func_index) {
+ Handle<Code> code(Code::cast(code_table->get(func_index)), isolate);
+ if (code->builtin_index() != Builtins::kWasmCompileLazy) {
+ // No special deopt data needed for compiled functions, and imported
+ // functions, which map to Illegal at this point (they get compiled at
+ // instantiation time).
+ DCHECK(code->kind() == Code::WASM_FUNCTION ||
+ code->kind() == Code::WASM_TO_JS_FUNCTION ||
+ code->builtin_index() == Builtins::kIllegal);
+ return code;
+ }
+ // deopt_data:
+ // #0: weak instance
+ // #1: func_index
+ // might be extended later for table exports (see
+ // EnsureTableExportLazyDeoptData).
+ Handle<FixedArray> deopt_data(code->deoptimization_data());
+ DCHECK_EQ(0, deopt_data->length() % 2);
+ if (deopt_data->length() == 0) {
+ code = isolate->factory()->CopyCode(code);
+ code_table->set(func_index, *code);
+ deopt_data = isolate->factory()->NewFixedArray(2, TENURED);
+ code->set_deoptimization_data(*deopt_data);
+ if (!instance.is_null()) {
+ Handle<WeakCell> weak_instance =
+ isolate->factory()->NewWeakCell(instance);
+ deopt_data->set(0, *weak_instance);
+ }
+ deopt_data->set(1, Smi::FromInt(func_index));
+ }
+ DCHECK_IMPLIES(!instance.is_null(),
+ WeakCell::cast(code->deoptimization_data()->get(0))->value() ==
+ *instance);
+ DCHECK_EQ(func_index, Smi::ToInt(code->deoptimization_data()->get(1)));
+ return code;
+}
+
+// Ensure that the code object in <code_table> at offset <func_index> has
+// deoptimization data attached. This is needed for lazy compile stubs which are
+// called from JS_TO_WASM functions or via exported function tables. The deopt
+// data is used to determine which function this lazy compile stub belongs to.
+Handle<Code> EnsureTableExportLazyDeoptData(
+ Isolate* isolate, Handle<WasmInstanceObject> instance,
+ Handle<FixedArray> code_table, int func_index,
+ Handle<FixedArray> export_table, int export_index,
+ std::unordered_map<uint32_t, uint32_t>& table_export_count) {
+ Handle<Code> code =
+ EnsureExportedLazyDeoptData(isolate, instance, code_table, func_index);
+ if (code->builtin_index() != Builtins::kWasmCompileLazy) return code;
+
+ // deopt_data:
+ // #0: weak instance
+ // #1: func_index
+ // [#2: export table
+ // #3: export table index]
+ // [#4: export table
+ // #5: export table index]
+ // ...
+ // table_export_count counts down and determines the index for the new export
+ // table entry.
+ auto table_export_entry = table_export_count.find(func_index);
+ DCHECK(table_export_entry != table_export_count.end());
+ DCHECK_LT(0, table_export_entry->second);
+ uint32_t this_idx = 2 * table_export_entry->second;
+ --table_export_entry->second;
+ Handle<FixedArray> deopt_data(code->deoptimization_data());
+ DCHECK_EQ(0, deopt_data->length() % 2);
+ if (deopt_data->length() == 2) {
+ // Then only the "header" (#0 and #1) exists. Extend for the export table
+ // entries (make space for this_idx + 2 elements).
+ deopt_data = isolate->factory()->CopyFixedArrayAndGrow(deopt_data, this_idx,
+ TENURED);
+ code->set_deoptimization_data(*deopt_data);
+ }
+ DCHECK_LE(this_idx + 2, deopt_data->length());
+ DCHECK(deopt_data->get(this_idx)->IsUndefined(isolate));
+ DCHECK(deopt_data->get(this_idx + 1)->IsUndefined(isolate));
+ deopt_data->set(this_idx, *export_table);
+ deopt_data->set(this_idx + 1, Smi::FromInt(export_index));
+ return code;
+}
+
+bool in_bounds(uint32_t offset, uint32_t size, uint32_t upper) {
+ return offset + size <= upper && offset + size >= offset;
+}
+
+using WasmInstanceMap =
+ IdentityMap<Handle<WasmInstanceObject>, FreeStoreAllocationPolicy>;
+
+Handle<Code> UnwrapOrCompileImportWrapper(
+ Isolate* isolate, int index, FunctionSig* sig, Handle<JSReceiver> target,
+ Handle<String> module_name, MaybeHandle<String> import_name,
+ ModuleOrigin origin, WasmInstanceMap* imported_instances) {
+ WasmFunction* other_func = GetWasmFunctionForImportWrapper(isolate, target);
+ if (other_func) {
+ if (!sig->Equals(other_func->sig)) return Handle<Code>::null();
+ // Signature matched. Unwrap the import wrapper and return the raw wasm
+ // function code.
+ // Remember the wasm instance of the import. We have to keep it alive.
+ Handle<WasmInstanceObject> imported_instance(
+ Handle<WasmExportedFunction>::cast(target)->instance(), isolate);
+ imported_instances->Set(imported_instance, imported_instance);
+ return UnwrapImportWrapper(target);
+ }
+ // No wasm function or being debugged. Compile a new wrapper for the new
+ // signature.
+ return compiler::CompileWasmToJSWrapper(isolate, target, sig, index,
+ module_name, import_name, origin);
+}
+
+double MonotonicallyIncreasingTimeInMs() {
+ return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
+ base::Time::kMillisecondsPerSecond;
+}
+
+void RejectPromise(Isolate* isolate, Handle<Context> context,
+ ErrorThrower& thrower, Handle<JSPromise> promise) {
+ v8::Local<v8::Promise::Resolver> resolver =
+ v8::Utils::PromiseToLocal(promise).As<v8::Promise::Resolver>();
+ auto maybe = resolver->Reject(v8::Utils::ToLocal(context),
+ v8::Utils::ToLocal(thrower.Reify()));
+ CHECK_IMPLIES(!maybe.FromMaybe(false), isolate->has_scheduled_exception());
+}
+
+void ResolvePromise(Isolate* isolate, Handle<Context> context,
+ Handle<JSPromise> promise, Handle<Object> result) {
+ v8::Local<v8::Promise::Resolver> resolver =
+ v8::Utils::PromiseToLocal(promise).As<v8::Promise::Resolver>();
+ auto maybe = resolver->Resolve(v8::Utils::ToLocal(context),
+ v8::Utils::ToLocal(result));
+ CHECK_IMPLIES(!maybe.FromMaybe(false), isolate->has_scheduled_exception());
+}
+
+} // namespace
+
+MaybeHandle<WasmModuleObject> ModuleCompiler::CompileToModuleObjectInternal(
+ ErrorThrower* thrower, const ModuleWireBytes& wire_bytes,
+ Handle<Script> asm_js_script, Vector<const byte> asm_js_offset_table_bytes,
+ Factory* factory, WasmInstance* temp_instance,
+ Handle<FixedArray>* function_tables, Handle<FixedArray>* signature_tables) {
+ ModuleBytesEnv module_env(module_.get(), temp_instance, wire_bytes);
+
+ // The {code_table} array contains import wrappers and functions (which
+ // are both included in {functions.size()}, and export wrappers.
+ int code_table_size = static_cast<int>(module_->functions.size() +
+ module_->num_exported_functions);
+ Handle<FixedArray> code_table =
+ factory->NewFixedArray(static_cast<int>(code_table_size), TENURED);
+
+ // Check whether lazy compilation is enabled for this module.
+ bool lazy_compile = compile_lazy(module_.get());
+
+ // If lazy compile: Initialize the code table with the lazy compile builtin.
+ // Otherwise: Initialize with the illegal builtin. All call sites will be
+ // patched at instantiation.
+ Handle<Code> init_builtin = lazy_compile
+ ? isolate_->builtins()->WasmCompileLazy()
+ : isolate_->builtins()->Illegal();
+ for (int i = 0, e = static_cast<int>(module_->functions.size()); i < e; ++i) {
+ code_table->set(i, *init_builtin);
+ temp_instance->function_code[i] = init_builtin;
+ }
+
+ (module_->is_wasm() ? counters()->wasm_functions_per_wasm_module()
+ : counters()->wasm_functions_per_asm_module())
+ ->AddSample(static_cast<int>(module_->functions.size()));
+
+ if (!lazy_compile) {
+ size_t funcs_to_compile =
+ module_->functions.size() - module_->num_imported_functions;
+ bool compile_parallel =
+ !FLAG_trace_wasm_decoder && FLAG_wasm_num_compilation_tasks > 0 &&
+ funcs_to_compile > 1 &&
+ V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads() > 0;
+ if (compile_parallel) {
+ // Avoid a race condition by collecting results into a second vector.
+ std::vector<Handle<Code>> results(temp_instance->function_code);
+ CompileInParallel(&module_env, results, thrower);
+ temp_instance->function_code.swap(results);
+ } else {
+ CompileSequentially(&module_env, temp_instance->function_code, thrower);
+ }
+ } else if (module_->is_wasm()) {
+ // Validate wasm modules for lazy compilation. Don't validate asm.js
+ // modules, they are valid by construction (otherwise a CHECK will fail
+ // during lazy compilation).
+ // TODO(clemensh): According to the spec, we can actually skip validation
+ // at module creation time, and return a function that always traps at
+ // (lazy) compilation time.
+ ValidateSequentially(&module_env, thrower);
+ }
+ if (thrower->error()) return {};
+
+ // At this point, compilation has completed. Update the code table.
+ for (size_t i = FLAG_skip_compiling_wasm_funcs;
+ i < temp_instance->function_code.size(); ++i) {
+ Code* code = *temp_instance->function_code[i];
+ code_table->set(static_cast<int>(i), code);
+ RecordStats(code, counters());
+ }
+
+ // Create heap objects for script, module bytes and asm.js offset table to
+ // be stored in the shared module data.
+ Handle<Script> script;
+ Handle<ByteArray> asm_js_offset_table;
+ if (asm_js_script.is_null()) {
+ script = CreateWasmScript(isolate_, wire_bytes);
+ } else {
+ script = asm_js_script;
+ asm_js_offset_table =
+ isolate_->factory()->NewByteArray(asm_js_offset_table_bytes.length());
+ asm_js_offset_table->copy_in(0, asm_js_offset_table_bytes.start(),
+ asm_js_offset_table_bytes.length());
+ }
+ // TODO(wasm): only save the sections necessary to deserialize a
+ // {WasmModule}. E.g. function bodies could be omitted.
+ Handle<String> module_bytes =
+ factory
+ ->NewStringFromOneByte({wire_bytes.start(), wire_bytes.length()},
+ TENURED)
+ .ToHandleChecked();
+ DCHECK(module_bytes->IsSeqOneByteString());
+
+ // The {module_wrapper} will take ownership of the {WasmModule} object,
+ // and it will be destroyed when the GC reclaims the wrapper object.
+ Handle<WasmModuleWrapper> module_wrapper =
+ WasmModuleWrapper::New(isolate_, module_.release());
+ WasmModule* module = module_wrapper->get();
+
+ // Create the shared module data.
+ // TODO(clemensh): For the same module (same bytes / same hash), we should
+ // only have one WasmSharedModuleData. Otherwise, we might only set
+ // breakpoints on a (potentially empty) subset of the instances.
+
+ Handle<WasmSharedModuleData> shared = WasmSharedModuleData::New(
+ isolate_, module_wrapper, Handle<SeqOneByteString>::cast(module_bytes),
+ script, asm_js_offset_table);
+ if (lazy_compile) WasmSharedModuleData::PrepareForLazyCompilation(shared);
+
+ // Create the compiled module object, and populate with compiled functions
+ // and information needed at instantiation time. This object needs to be
+ // serializable. Instantiation may occur off a deserialized version of this
+ // object.
+ Handle<WasmCompiledModule> compiled_module = WasmCompiledModule::New(
+ isolate_, shared, code_table, *function_tables, *signature_tables);
+
+ // If we created a wasm script, finish it now and make it public to the
+ // debugger.
+ if (asm_js_script.is_null()) {
+ script->set_wasm_compiled_module(*compiled_module);
+ isolate_->debug()->OnAfterCompile(script);
+ }
+
+ // Compile JS->wasm wrappers for exported functions.
+ JSToWasmWrapperCache js_to_wasm_cache;
+ int func_index = 0;
+ for (auto exp : module->export_table) {
+ if (exp.kind != kExternalFunction) continue;
+ Handle<Code> wasm_code = EnsureExportedLazyDeoptData(
+ isolate_, Handle<WasmInstanceObject>::null(), code_table, exp.index);
+ Handle<Code> wrapper_code = js_to_wasm_cache.CloneOrCompileJSToWasmWrapper(
+ isolate_, module, wasm_code, exp.index);
+ int export_index = static_cast<int>(module->functions.size() + func_index);
+ code_table->set(export_index, *wrapper_code);
+ RecordStats(*wrapper_code, counters());
+ func_index++;
+ }
+
+ return WasmModuleObject::New(isolate_, compiled_module);
+}
+
+Handle<Code> JSToWasmWrapperCache::CloneOrCompileJSToWasmWrapper(
+ Isolate* isolate, const wasm::WasmModule* module, Handle<Code> wasm_code,
+ uint32_t index) {
+ const wasm::WasmFunction* func = &module->functions[index];
+ int cached_idx = sig_map_.Find(func->sig);
+ if (cached_idx >= 0) {
+ Handle<Code> code = isolate->factory()->CopyCode(code_cache_[cached_idx]);
+ // Now patch the call to wasm code.
+ for (RelocIterator it(*code, RelocInfo::kCodeTargetMask);; it.next()) {
+ DCHECK(!it.done());
+ Code* target =
+ Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
+ if (target->kind() == Code::WASM_FUNCTION ||
+ target->kind() == Code::WASM_TO_JS_FUNCTION ||
+ target->builtin_index() == Builtins::kIllegal ||
+ target->builtin_index() == Builtins::kWasmCompileLazy) {
+ it.rinfo()->set_target_address(isolate, wasm_code->instruction_start());
+ break;
+ }
+ }
+ return code;
+ }
+
+ Handle<Code> code =
+ compiler::CompileJSToWasmWrapper(isolate, module, wasm_code, index);
+ uint32_t new_cache_idx = sig_map_.FindOrInsert(func->sig);
+ DCHECK_EQ(code_cache_.size(), new_cache_idx);
+ USE(new_cache_idx);
+ code_cache_.push_back(code);
+ return code;
+}
+
+InstanceBuilder::InstanceBuilder(
+ Isolate* isolate, ErrorThrower* thrower,
+ Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> ffi,
+ MaybeHandle<JSArrayBuffer> memory,
+ WeakCallbackInfo<void>::Callback instance_finalizer_callback)
+ : isolate_(isolate),
+ module_(module_object->compiled_module()->module()),
+ async_counters_(isolate->async_counters()),
+ thrower_(thrower),
+ module_object_(module_object),
+ ffi_(ffi),
+ memory_(memory),
+ instance_finalizer_callback_(instance_finalizer_callback) {}
+
+// Build an instance, in all of its glory.
+MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
+ // Check that an imports argument was provided, if the module requires it.
+ // No point in continuing otherwise.
+ if (!module_->import_table.empty() && ffi_.is_null()) {
+ thrower_->TypeError(
+ "Imports argument must be present and must be an object");
+ return {};
+ }
+
+ // Record build time into correct bucket, then build instance.
+ TimedHistogramScope wasm_instantiate_module_time_scope(
+ module_->is_wasm() ? counters()->wasm_instantiate_wasm_module_time()
+ : counters()->wasm_instantiate_asm_module_time());
+ Factory* factory = isolate_->factory();
+
+ //--------------------------------------------------------------------------
+ // Reuse the compiled module (if no owner), otherwise clone.
+ //--------------------------------------------------------------------------
+ Handle<FixedArray> code_table;
+ // We keep around a copy of the old code table, because we'll be replacing
+ // imports for the new instance, and then we need the old imports to be
+ // able to relocate.
+ Handle<FixedArray> old_code_table;
+ MaybeHandle<WasmInstanceObject> owner;
+
+ TRACE("Starting new module instantiation\n");
+ {
+ // Root the owner, if any, before doing any allocations, which
+ // may trigger GC.
+ // Both owner and original template need to be in sync. Even
+ // after we lose the original template handle, the code
+ // objects we copied from it have data relative to the
+ // instance - such as globals addresses.
+ Handle<WasmCompiledModule> original;
+ {
+ DisallowHeapAllocation no_gc;
+ original = handle(module_object_->compiled_module());
+ if (original->has_weak_owning_instance()) {
+ owner = handle(WasmInstanceObject::cast(
+ original->weak_owning_instance()->value()));
+ }
+ }
+ DCHECK(!original.is_null());
+ if (original->has_weak_owning_instance()) {
+ // Clone, but don't insert yet the clone in the instances chain.
+ // We do that last. Since we are holding on to the owner instance,
+ // the owner + original state used for cloning and patching
+ // won't be mutated by possible finalizer runs.
+ DCHECK(!owner.is_null());
+ TRACE("Cloning from %d\n", original->instance_id());
+ old_code_table = original->code_table();
+ compiled_module_ = WasmCompiledModule::Clone(isolate_, original);
+ code_table = compiled_module_->code_table();
+ // Avoid creating too many handles in the outer scope.
+ HandleScope scope(isolate_);
+
+ // Clone the code for wasm functions and exports.
+ for (int i = 0; i < code_table->length(); ++i) {
+ Handle<Code> orig_code(Code::cast(code_table->get(i)), isolate_);
+ switch (orig_code->kind()) {
+ case Code::WASM_TO_JS_FUNCTION:
+ // Imports will be overwritten with newly compiled wrappers.
+ break;
+ case Code::BUILTIN:
+ DCHECK_EQ(Builtins::kWasmCompileLazy, orig_code->builtin_index());
+ // If this code object has deoptimization data, then we need a
+ // unique copy to attach updated deoptimization data.
+ if (orig_code->deoptimization_data()->length() > 0) {
+ Handle<Code> code = factory->CopyCode(orig_code);
+ Handle<FixedArray> deopt_data =
+ factory->NewFixedArray(2, TENURED);
+ deopt_data->set(1, Smi::FromInt(i));
+ code->set_deoptimization_data(*deopt_data);
+ code_table->set(i, *code);
+ }
+ break;
+ case Code::JS_TO_WASM_FUNCTION:
+ case Code::WASM_FUNCTION: {
+ Handle<Code> code = factory->CopyCode(orig_code);
+ code_table->set(i, *code);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ }
+ RecordStats(code_table, counters());
+ } else {
+ // There was no owner, so we can reuse the original.
+ compiled_module_ = original;
+ old_code_table = factory->CopyFixedArray(compiled_module_->code_table());
+ code_table = compiled_module_->code_table();
+ TRACE("Reusing existing instance %d\n", compiled_module_->instance_id());
+ }
+ compiled_module_->set_native_context(isolate_->native_context());
+ }
+
+ //--------------------------------------------------------------------------
+ // Allocate the instance object.
+ //--------------------------------------------------------------------------
+ Zone instantiation_zone(isolate_->allocator(), ZONE_NAME);
+ CodeSpecialization code_specialization(isolate_, &instantiation_zone);
+ Handle<WasmInstanceObject> instance =
+ WasmInstanceObject::New(isolate_, compiled_module_);
+
+ //--------------------------------------------------------------------------
+ // Set up the globals for the new instance.
+ //--------------------------------------------------------------------------
+ MaybeHandle<JSArrayBuffer> old_globals;
+ uint32_t globals_size = module_->globals_size;
+ if (globals_size > 0) {
+ const bool enable_guard_regions = false;
+ Handle<JSArrayBuffer> global_buffer =
+ NewArrayBuffer(isolate_, globals_size, enable_guard_regions);
+ globals_ = global_buffer;
+ if (globals_.is_null()) {
+ thrower_->RangeError("Out of memory: wasm globals");
+ return {};
+ }
+ Address old_globals_start = compiled_module_->GetGlobalsStartOrNull();
+ Address new_globals_start =
+ static_cast<Address>(global_buffer->backing_store());
+ code_specialization.RelocateGlobals(old_globals_start, new_globals_start);
+ // The address of the backing buffer for the golbals is in native memory
+ // and, thus, not moving. We need it saved for
+ // serialization/deserialization purposes - so that the other end
+ // understands how to relocate the references. We still need to save the
+ // JSArrayBuffer on the instance, to keep it all alive.
+ WasmCompiledModule::SetGlobalsStartAddressFrom(factory, compiled_module_,
+ global_buffer);
+ instance->set_globals_buffer(*global_buffer);
+ }
+
+ //--------------------------------------------------------------------------
+ // Prepare for initialization of function tables.
+ //--------------------------------------------------------------------------
+ int function_table_count = static_cast<int>(module_->function_tables.size());
+ table_instances_.reserve(module_->function_tables.size());
+ for (int index = 0; index < function_table_count; ++index) {
+ table_instances_.push_back(
+ {Handle<WasmTableObject>::null(), Handle<FixedArray>::null(),
+ Handle<FixedArray>::null(), Handle<FixedArray>::null()});
+ }
+
+ //--------------------------------------------------------------------------
+ // Process the imports for the module.
+ //--------------------------------------------------------------------------
+ int num_imported_functions = ProcessImports(code_table, instance);
+ if (num_imported_functions < 0) return {};
+
+ //--------------------------------------------------------------------------
+ // Process the initialization for the module's globals.
+ //--------------------------------------------------------------------------
+ InitGlobals();
+
+ //--------------------------------------------------------------------------
+ // Set up the indirect function tables for the new instance.
+ //--------------------------------------------------------------------------
+ if (function_table_count > 0)
+ InitializeTables(instance, &code_specialization);
+
+ //--------------------------------------------------------------------------
+ // Set up the memory for the new instance.
+ //--------------------------------------------------------------------------
+ uint32_t min_mem_pages = module_->min_mem_pages;
+ (module_->is_wasm() ? counters()->wasm_wasm_min_mem_pages_count()
+ : counters()->wasm_asm_min_mem_pages_count())
+ ->AddSample(min_mem_pages);
+
+ if (!memory_.is_null()) {
+ Handle<JSArrayBuffer> memory = memory_.ToHandleChecked();
+ // Set externally passed ArrayBuffer non neuterable.
+ memory->set_is_neuterable(false);
+ memory->set_is_wasm_buffer(true);
+
+ DCHECK_IMPLIES(EnableGuardRegions(),
+ module_->is_asm_js() || memory->has_guard_region());
+ } else if (min_mem_pages > 0) {
+ memory_ = AllocateMemory(min_mem_pages);
+ if (memory_.is_null()) return {}; // failed to allocate memory
+ }
+
+ //--------------------------------------------------------------------------
+ // Check that indirect function table segments are within bounds.
+ //--------------------------------------------------------------------------
+ for (WasmTableInit& table_init : module_->table_inits) {
+ DCHECK(table_init.table_index < table_instances_.size());
+ uint32_t base = EvalUint32InitExpr(table_init.offset);
+ uint32_t table_size =
+ table_instances_[table_init.table_index].function_table->length();
+ if (!in_bounds(base, static_cast<uint32_t>(table_init.entries.size()),
+ table_size)) {
+ thrower_->LinkError("table initializer is out of bounds");
+ return {};
+ }
+ }
+
+ //--------------------------------------------------------------------------
+ // Check that memory segments are within bounds.
+ //--------------------------------------------------------------------------
+ for (WasmDataSegment& seg : module_->data_segments) {
+ uint32_t base = EvalUint32InitExpr(seg.dest_addr);
+ uint32_t mem_size = 0;
+ if (!memory_.is_null()) {
+ CHECK(memory_.ToHandleChecked()->byte_length()->ToUint32(&mem_size));
+ }
+ if (!in_bounds(base, seg.source.length(), mem_size)) {
+ thrower_->LinkError("data segment is out of bounds");
+ return {};
+ }
+ }
+
+ //--------------------------------------------------------------------------
+ // Initialize memory.
+ //--------------------------------------------------------------------------
+ if (!memory_.is_null()) {
+ Handle<JSArrayBuffer> memory = memory_.ToHandleChecked();
+ Address mem_start = static_cast<Address>(memory->backing_store());
+ uint32_t mem_size;
+ CHECK(memory->byte_length()->ToUint32(&mem_size));
+ LoadDataSegments(mem_start, mem_size);
+
+ uint32_t old_mem_size = compiled_module_->mem_size();
+ Address old_mem_start = compiled_module_->GetEmbeddedMemStartOrNull();
+ // We might get instantiated again with the same memory. No patching
+ // needed in this case.
+ if (old_mem_start != mem_start || old_mem_size != mem_size) {
+ code_specialization.RelocateMemoryReferences(old_mem_start, old_mem_size,
+ mem_start, mem_size);
+ }
+ // Just like with globals, we need to keep both the JSArrayBuffer
+ // and save the start pointer.
+ instance->set_memory_buffer(*memory);
+ WasmCompiledModule::SetSpecializationMemInfoFrom(factory, compiled_module_,
+ memory);
+ }
+
+ //--------------------------------------------------------------------------
+ // Set up the runtime support for the new instance.
+ //--------------------------------------------------------------------------
+ Handle<WeakCell> weak_link = factory->NewWeakCell(instance);
+
+ for (int i = num_imported_functions + FLAG_skip_compiling_wasm_funcs,
+ num_functions = static_cast<int>(module_->functions.size());
+ i < num_functions; ++i) {
+ Handle<Code> code = handle(Code::cast(code_table->get(i)), isolate_);
+ if (code->kind() == Code::WASM_FUNCTION) {
+ Handle<FixedArray> deopt_data = factory->NewFixedArray(2, TENURED);
+ deopt_data->set(0, *weak_link);
+ deopt_data->set(1, Smi::FromInt(i));
+ code->set_deoptimization_data(*deopt_data);
+ continue;
+ }
+ DCHECK_EQ(Builtins::kWasmCompileLazy, code->builtin_index());
+ int deopt_len = code->deoptimization_data()->length();
+ if (deopt_len == 0) continue;
+ DCHECK_LE(2, deopt_len);
+ DCHECK_EQ(i, Smi::ToInt(code->deoptimization_data()->get(1)));
+ code->deoptimization_data()->set(0, *weak_link);
+ // Entries [2, deopt_len) encode information about table exports of this
+ // function. This is rebuilt in {LoadTableSegments}, so reset it here.
+ for (int i = 2; i < deopt_len; ++i) {
+ code->deoptimization_data()->set_undefined(isolate_, i);
+ }
+ }
+
+ //--------------------------------------------------------------------------
+ // Set up the exports object for the new instance.
+ //--------------------------------------------------------------------------
+ ProcessExports(code_table, instance, compiled_module_);
+ if (thrower_->error()) return {};
+
+ //--------------------------------------------------------------------------
+ // Add instance to Memory object
+ //--------------------------------------------------------------------------
+ if (instance->has_memory_object()) {
+ Handle<WasmMemoryObject> memory(instance->memory_object(), isolate_);
+ WasmMemoryObject::AddInstance(isolate_, memory, instance);
+ }
+
+ //--------------------------------------------------------------------------
+ // Initialize the indirect function tables.
+ //--------------------------------------------------------------------------
+ if (function_table_count > 0) LoadTableSegments(code_table, instance);
+
+ // Patch all code with the relocations registered in code_specialization.
+ code_specialization.RelocateDirectCalls(instance);
+ code_specialization.ApplyToWholeInstance(*instance, SKIP_ICACHE_FLUSH);
+
+ FlushICache(isolate_, code_table);
+
+ //--------------------------------------------------------------------------
+ // Unpack and notify signal handler of protected instructions.
+ //--------------------------------------------------------------------------
+ if (trap_handler::UseTrapHandler()) {
+ UnpackAndRegisterProtectedInstructions(isolate_, code_table);
+ }
+
+ //--------------------------------------------------------------------------
+ // Set up and link the new instance.
+ //--------------------------------------------------------------------------
+ {
+ Handle<Object> global_handle =
+ isolate_->global_handles()->Create(*instance);
+ Handle<WeakCell> link_to_clone = factory->NewWeakCell(compiled_module_);
+ Handle<WeakCell> link_to_owning_instance = factory->NewWeakCell(instance);
+ MaybeHandle<WeakCell> link_to_original;
+ MaybeHandle<WasmCompiledModule> original;
+ if (!owner.is_null()) {
+ // prepare the data needed for publishing in a chain, but don't link
+ // just yet, because
+ // we want all the publishing to happen free from GC interruptions, and
+ // so we do it in
+ // one GC-free scope afterwards.
+ original = handle(owner.ToHandleChecked()->compiled_module());
+ link_to_original = factory->NewWeakCell(original.ToHandleChecked());
+ }
+ // Publish the new instance to the instances chain.
+ {
+ DisallowHeapAllocation no_gc;
+ if (!link_to_original.is_null()) {
+ compiled_module_->set_weak_next_instance(
+ link_to_original.ToHandleChecked());
+ original.ToHandleChecked()->set_weak_prev_instance(link_to_clone);
+ compiled_module_->set_weak_wasm_module(
+ original.ToHandleChecked()->weak_wasm_module());
+ }
+ module_object_->set_compiled_module(*compiled_module_);
+ compiled_module_->set_weak_owning_instance(link_to_owning_instance);
+ GlobalHandles::MakeWeak(
+ global_handle.location(), global_handle.location(),
+ instance_finalizer_callback_, v8::WeakCallbackType::kFinalizer);
+ }
+ }
+
+ //--------------------------------------------------------------------------
+ // Debugging support.
+ //--------------------------------------------------------------------------
+ // Set all breakpoints that were set on the shared module.
+ WasmSharedModuleData::SetBreakpointsOnNewInstance(compiled_module_->shared(),
+ instance);
+
+ if (FLAG_wasm_interpret_all && module_->is_wasm()) {
+ Handle<WasmDebugInfo> debug_info =
+ WasmInstanceObject::GetOrCreateDebugInfo(instance);
+ std::vector<int> func_indexes;
+ for (int func_index = num_imported_functions,
+ num_wasm_functions = static_cast<int>(module_->functions.size());
+ func_index < num_wasm_functions; ++func_index) {
+ func_indexes.push_back(func_index);
+ }
+ WasmDebugInfo::RedirectToInterpreter(
+ debug_info, Vector<int>(func_indexes.data(),
+ static_cast<int>(func_indexes.size())));
+ }
+
+ //--------------------------------------------------------------------------
+ // Run the start function if one was specified.
+ //--------------------------------------------------------------------------
+ if (module_->start_function_index >= 0) {
+ HandleScope scope(isolate_);
+ int start_index = module_->start_function_index;
+ Handle<Code> startup_code = EnsureExportedLazyDeoptData(
+ isolate_, instance, code_table, start_index);
+ FunctionSig* sig = module_->functions[start_index].sig;
+ Handle<Code> wrapper_code = js_to_wasm_cache_.CloneOrCompileJSToWasmWrapper(
+ isolate_, module_, startup_code, start_index);
+ Handle<WasmExportedFunction> startup_fct = WasmExportedFunction::New(
+ isolate_, instance, MaybeHandle<String>(), start_index,
+ static_cast<int>(sig->parameter_count()), wrapper_code);
+ RecordStats(*startup_code, counters());
+ // Call the JS function.
+ Handle<Object> undefined = factory->undefined_value();
+ MaybeHandle<Object> retval =
+ Execution::Call(isolate_, startup_fct, undefined, 0, nullptr);
+
+ if (retval.is_null()) {
+ DCHECK(isolate_->has_pending_exception());
+ // It's unfortunate that the new instance is already linked in the
+ // chain. However, we need to set up everything before executing the
+ // start function, such that stack trace information can be generated
+ // correctly already in the start function.
+ return {};
+ }
+ }
+
+ DCHECK(!isolate_->has_pending_exception());
+ TRACE("Finishing instance %d\n", compiled_module_->instance_id());
+ TRACE_CHAIN(module_object_->compiled_module());
+ return instance;
+}
+
+// Look up an import value in the {ffi_} object.
+MaybeHandle<Object> InstanceBuilder::LookupImport(uint32_t index,
+ Handle<String> module_name,
+
+ Handle<String> import_name) {
+ // We pre-validated in the js-api layer that the ffi object is present, and
+ // a JSObject, if the module has imports.
+ DCHECK(!ffi_.is_null());
+
+ // Look up the module first.
+ MaybeHandle<Object> result =
+ Object::GetPropertyOrElement(ffi_.ToHandleChecked(), module_name);
+ if (result.is_null()) {
+ return ReportTypeError("module not found", index, module_name);
+ }
+
+ Handle<Object> module = result.ToHandleChecked();
+
+ // Look up the value in the module.
+ if (!module->IsJSReceiver()) {
+ return ReportTypeError("module is not an object or function", index,
+ module_name);
+ }
+
+ result = Object::GetPropertyOrElement(module, import_name);
+ if (result.is_null()) {
+ ReportLinkError("import not found", index, module_name, import_name);
+ return MaybeHandle<JSFunction>();
+ }
+
+ return result;
+}
+
+// Look up an import value in the {ffi_} object specifically for linking an
+// asm.js module. This only performs non-observable lookups, which allows
+// falling back to JavaScript proper (and hence re-executing all lookups) if
+// module instantiation fails.
+MaybeHandle<Object> InstanceBuilder::LookupImportAsm(
+ uint32_t index, Handle<String> import_name) {
+ // Check that a foreign function interface object was provided.
+ if (ffi_.is_null()) {
+ return ReportLinkError("missing imports object", index, import_name);
+ }
+
+ // Perform lookup of the given {import_name} without causing any observable
+ // side-effect. We only accept accesses that resolve to data properties,
+ // which is indicated by the asm.js spec in section 7 ("Linking") as well.
+ Handle<Object> result;
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate_, ffi_.ToHandleChecked(), import_name);
+ switch (it.state()) {
+ case LookupIterator::ACCESS_CHECK:
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
+ case LookupIterator::INTERCEPTOR:
+ case LookupIterator::JSPROXY:
+ case LookupIterator::ACCESSOR:
+ case LookupIterator::TRANSITION:
+ return ReportLinkError("not a data property", index, import_name);
+ case LookupIterator::NOT_FOUND:
+ // Accepting missing properties as undefined does not cause any
+ // observable difference from JavaScript semantics, we are lenient.
+ result = isolate_->factory()->undefined_value();
+ break;
+ case LookupIterator::DATA:
+ result = it.GetDataValue();
+ break;
+ }
+
+ return result;
+}
+
+uint32_t InstanceBuilder::EvalUint32InitExpr(const WasmInitExpr& expr) {
+ switch (expr.kind) {
+ case WasmInitExpr::kI32Const:
+ return expr.val.i32_const;
+ case WasmInitExpr::kGlobalIndex: {
+ uint32_t offset = module_->globals[expr.val.global_index].offset;
+ return *reinterpret_cast<uint32_t*>(raw_buffer_ptr(globals_, offset));
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+// Load data segments into the memory.
+void InstanceBuilder::LoadDataSegments(Address mem_addr, size_t mem_size) {
+ Handle<SeqOneByteString> module_bytes(compiled_module_->module_bytes(),
+ isolate_);
+ for (const WasmDataSegment& segment : module_->data_segments) {
+ uint32_t source_size = segment.source.length();
+ // Segments of size == 0 are just nops.
+ if (source_size == 0) continue;
+ uint32_t dest_offset = EvalUint32InitExpr(segment.dest_addr);
+ DCHECK(
+ in_bounds(dest_offset, source_size, static_cast<uint32_t>(mem_size)));
+ byte* dest = mem_addr + dest_offset;
+ const byte* src = reinterpret_cast<const byte*>(
+ module_bytes->GetCharsAddress() + segment.source.offset());
+ memcpy(dest, src, source_size);
+ }
+}
+
+void InstanceBuilder::WriteGlobalValue(WasmGlobal& global,
+ Handle<Object> value) {
+ double num = value->Number();
+ TRACE("init [globals+%u] = %lf, type = %s\n", global.offset, num,
+ WasmOpcodes::TypeName(global.type));
+ switch (global.type) {
+ case kWasmI32:
+ *GetRawGlobalPtr<int32_t>(global) = static_cast<int32_t>(num);
+ break;
+ case kWasmI64:
+ // TODO(titzer): initialization of imported i64 globals.
+ UNREACHABLE();
+ break;
+ case kWasmF32:
+ *GetRawGlobalPtr<float>(global) = static_cast<float>(num);
+ break;
+ case kWasmF64:
+ *GetRawGlobalPtr<double>(global) = static_cast<double>(num);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+// Process the imports, including functions, tables, globals, and memory, in
+// order, loading them from the {ffi_} object. Returns the number of imported
+// functions.
+int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
+ Handle<WasmInstanceObject> instance) {
+ int num_imported_functions = 0;
+ int num_imported_tables = 0;
+ WasmInstanceMap imported_wasm_instances(isolate_->heap());
+ for (int index = 0; index < static_cast<int>(module_->import_table.size());
+ ++index) {
+ WasmImport& import = module_->import_table[index];
+
+ Handle<String> module_name;
+ MaybeHandle<String> maybe_module_name =
+ WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
+ isolate_, compiled_module_, import.module_name);
+ if (!maybe_module_name.ToHandle(&module_name)) return -1;
+
+ Handle<String> import_name;
+ MaybeHandle<String> maybe_import_name =
+ WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
+ isolate_, compiled_module_, import.field_name);
+ if (!maybe_import_name.ToHandle(&import_name)) return -1;
+
+ MaybeHandle<Object> result =
+ module_->is_asm_js() ? LookupImportAsm(index, import_name)
+ : LookupImport(index, module_name, import_name);
+ if (thrower_->error()) return -1;
+ Handle<Object> value = result.ToHandleChecked();
+
+ switch (import.kind) {
+ case kExternalFunction: {
+ // Function imports must be callable.
+ if (!value->IsCallable()) {
+ ReportLinkError("function import requires a callable", index,
+ module_name, import_name);
+ return -1;
+ }
+
+ Handle<Code> import_wrapper = UnwrapOrCompileImportWrapper(
+ isolate_, index, module_->functions[import.index].sig,
+ Handle<JSReceiver>::cast(value), module_name, import_name,
+ module_->origin(), &imported_wasm_instances);
+ if (import_wrapper.is_null()) {
+ ReportLinkError("imported function does not match the expected type",
+ index, module_name, import_name);
+ return -1;
+ }
+ code_table->set(num_imported_functions, *import_wrapper);
+ RecordStats(*import_wrapper, counters());
+ num_imported_functions++;
+ break;
+ }
+ case kExternalTable: {
+ if (!value->IsWasmTableObject()) {
+ ReportLinkError("table import requires a WebAssembly.Table", index,
+ module_name, import_name);
+ return -1;
+ }
+ WasmIndirectFunctionTable& table =
+ module_->function_tables[num_imported_tables];
+ TableInstance& table_instance = table_instances_[num_imported_tables];
+ table_instance.table_object = Handle<WasmTableObject>::cast(value);
+ table_instance.js_wrappers = Handle<FixedArray>(
+ table_instance.table_object->functions(), isolate_);
+
+ int imported_cur_size = table_instance.js_wrappers->length();
+ if (imported_cur_size < static_cast<int>(table.min_size)) {
+ thrower_->LinkError(
+ "table import %d is smaller than minimum %d, got %u", index,
+ table.min_size, imported_cur_size);
+ return -1;
+ }
+
+ if (table.has_max) {
+ int64_t imported_max_size =
+ table_instance.table_object->maximum_length()->Number();
+ if (imported_max_size < 0) {
+ thrower_->LinkError(
+ "table import %d has no maximum length, expected %d", index,
+ table.max_size);
+ return -1;
+ }
+ if (imported_max_size > table.max_size) {
+ thrower_->LinkError(
+ "table import %d has maximum larger than maximum %d, "
+ "got %" PRIx64,
+ index, table.max_size, imported_max_size);
+ return -1;
+ }
+ }
+
+ // Allocate a new dispatch table and signature table.
+ int table_size = imported_cur_size;
+ table_instance.function_table =
+ isolate_->factory()->NewFixedArray(table_size);
+ table_instance.signature_table =
+ isolate_->factory()->NewFixedArray(table_size);
+ for (int i = 0; i < table_size; ++i) {
+ table_instance.signature_table->set(i,
+ Smi::FromInt(kInvalidSigIndex));
+ }
+ // Initialize the dispatch table with the (foreign) JS functions
+ // that are already in the table.
+ for (int i = 0; i < table_size; ++i) {
+ Handle<Object> val(table_instance.js_wrappers->get(i), isolate_);
+ if (!val->IsJSFunction()) continue;
+ WasmFunction* function =
+ GetWasmFunctionForImportWrapper(isolate_, val);
+ if (function == nullptr) {
+ thrower_->LinkError("table import %d[%d] is not a wasm function",
+ index, i);
+ return -1;
+ }
+ int sig_index = table.map.FindOrInsert(function->sig);
+ table_instance.signature_table->set(i, Smi::FromInt(sig_index));
+ table_instance.function_table->set(i, *UnwrapImportWrapper(val));
+ }
+
+ num_imported_tables++;
+ break;
+ }
+ case kExternalMemory: {
+ // Validation should have failed if more than one memory object was
+ // provided.
+ DCHECK(!instance->has_memory_object());
+ if (!value->IsWasmMemoryObject()) {
+ ReportLinkError("memory import must be a WebAssembly.Memory object",
+ index, module_name, import_name);
+ return -1;
+ }
+ auto memory = Handle<WasmMemoryObject>::cast(value);
+ instance->set_memory_object(*memory);
+ Handle<JSArrayBuffer> buffer(memory->array_buffer(), isolate_);
+ memory_ = buffer;
+ uint32_t imported_cur_pages = static_cast<uint32_t>(
+ buffer->byte_length()->Number() / WasmModule::kPageSize);
+ if (imported_cur_pages < module_->min_mem_pages) {
+ thrower_->LinkError(
+ "memory import %d is smaller than maximum %u, got %u", index,
+ module_->min_mem_pages, imported_cur_pages);
+ }
+ int32_t imported_max_pages = memory->maximum_pages();
+ if (module_->has_max_mem) {
+ if (imported_max_pages < 0) {
+ thrower_->LinkError(
+ "memory import %d has no maximum limit, expected at most %u",
+ index, imported_max_pages);
+ return -1;
+ }
+ if (static_cast<uint32_t>(imported_max_pages) >
+ module_->max_mem_pages) {
+ thrower_->LinkError(
+ "memory import %d has larger maximum than maximum %u, got %d",
+ index, module_->max_mem_pages, imported_max_pages);
+ return -1;
+ }
+ }
+ break;
+ }
+ case kExternalGlobal: {
+ // Global imports are converted to numbers and written into the
+ // {globals_} array buffer.
+ if (module_->globals[import.index].type == kWasmI64) {
+ ReportLinkError("global import cannot have type i64", index,
+ module_name, import_name);
+ return -1;
+ }
+ if (module_->is_asm_js()) {
+ // Accepting {JSFunction} on top of just primitive values here is a
+ // workaround to support legacy asm.js code with broken binding. Note
+ // that using {NaN} (or Smi::kZero) here is what using the observable
+ // conversion via {ToPrimitive} would produce as well.
+ // TODO(mstarzinger): Still observable if Function.prototype.valueOf
+ // or friends are patched, we might need to check for that as well.
+ if (value->IsJSFunction()) value = isolate_->factory()->nan_value();
+ if (value->IsPrimitive() && !value->IsSymbol()) {
+ if (module_->globals[import.index].type == kWasmI32) {
+ value = Object::ToInt32(isolate_, value).ToHandleChecked();
+ } else {
+ value = Object::ToNumber(value).ToHandleChecked();
+ }
+ }
+ }
+ if (!value->IsNumber()) {
+ ReportLinkError("global import must be a number", index, module_name,
+ import_name);
+ return -1;
+ }
+ WriteGlobalValue(module_->globals[import.index], value);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+
+ if (!imported_wasm_instances.empty()) {
+ WasmInstanceMap::IteratableScope iteratable_scope(&imported_wasm_instances);
+ Handle<FixedArray> instances_array = isolate_->factory()->NewFixedArray(
+ imported_wasm_instances.size(), TENURED);
+ instance->set_directly_called_instances(*instances_array);
+ int index = 0;
+ for (auto it = iteratable_scope.begin(), end = iteratable_scope.end();
+ it != end; ++it, ++index) {
+ instances_array->set(index, ***it);
+ }
+ }
+
+ return num_imported_functions;
+}
+
+template <typename T>
+T* InstanceBuilder::GetRawGlobalPtr(WasmGlobal& global) {
+ return reinterpret_cast<T*>(raw_buffer_ptr(globals_, global.offset));
+}
+
+// Process initialization of globals.
+void InstanceBuilder::InitGlobals() {
+ for (auto global : module_->globals) {
+ switch (global.init.kind) {
+ case WasmInitExpr::kI32Const:
+ *GetRawGlobalPtr<int32_t>(global) = global.init.val.i32_const;
+ break;
+ case WasmInitExpr::kI64Const:
+ *GetRawGlobalPtr<int64_t>(global) = global.init.val.i64_const;
+ break;
+ case WasmInitExpr::kF32Const:
+ *GetRawGlobalPtr<float>(global) = global.init.val.f32_const;
+ break;
+ case WasmInitExpr::kF64Const:
+ *GetRawGlobalPtr<double>(global) = global.init.val.f64_const;
+ break;
+ case WasmInitExpr::kGlobalIndex: {
+ // Initialize with another global.
+ uint32_t new_offset = global.offset;
+ uint32_t old_offset =
+ module_->globals[global.init.val.global_index].offset;
+ TRACE("init [globals+%u] = [globals+%d]\n", global.offset, old_offset);
+ size_t size = (global.type == kWasmI64 || global.type == kWasmF64)
+ ? sizeof(double)
+ : sizeof(int32_t);
+ memcpy(raw_buffer_ptr(globals_, new_offset),
+ raw_buffer_ptr(globals_, old_offset), size);
+ break;
+ }
+ case WasmInitExpr::kNone:
+ // Happens with imported globals.
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+// Allocate memory for a module instance as a new JSArrayBuffer.
+Handle<JSArrayBuffer> InstanceBuilder::AllocateMemory(uint32_t min_mem_pages) {
+ if (min_mem_pages > FLAG_wasm_max_mem_pages) {
+ thrower_->RangeError("Out of memory: wasm memory too large");
+ return Handle<JSArrayBuffer>::null();
+ }
+ const bool enable_guard_regions = EnableGuardRegions();
+ Handle<JSArrayBuffer> mem_buffer = NewArrayBuffer(
+ isolate_, min_mem_pages * WasmModule::kPageSize, enable_guard_regions);
+
+ if (mem_buffer.is_null()) {
+ thrower_->RangeError("Out of memory: wasm memory");
+ }
+ return mem_buffer;
+}
+
+bool InstanceBuilder::NeedsWrappers() const {
+ if (module_->num_exported_functions > 0) return true;
+ for (auto& table_instance : table_instances_) {
+ if (!table_instance.js_wrappers.is_null()) return true;
+ }
+ for (auto& table : module_->function_tables) {
+ if (table.exported) return true;
+ }
+ return false;
+}
+
+// Process the exports, creating wrappers for functions, tables, memories,
+// and globals.
+void InstanceBuilder::ProcessExports(
+ Handle<FixedArray> code_table, Handle<WasmInstanceObject> instance,
+ Handle<WasmCompiledModule> compiled_module) {
+ if (NeedsWrappers()) {
+ // Fill the table to cache the exported JSFunction wrappers.
+ js_wrappers_.insert(js_wrappers_.begin(), module_->functions.size(),
+ Handle<JSFunction>::null());
+ }
+
+ Handle<JSObject> exports_object;
+ if (module_->is_wasm()) {
+ // Create the "exports" object.
+ exports_object = isolate_->factory()->NewJSObjectWithNullProto();
+ } else if (module_->is_asm_js()) {
+ Handle<JSFunction> object_function = Handle<JSFunction>(
+ isolate_->native_context()->object_function(), isolate_);
+ exports_object = isolate_->factory()->NewJSObject(object_function);
+ } else {
+ UNREACHABLE();
+ }
+ Handle<String> exports_name =
+ isolate_->factory()->InternalizeUtf8String("exports");
+ JSObject::AddProperty(instance, exports_name, exports_object, NONE);
+
+ Handle<String> single_function_name =
+ isolate_->factory()->InternalizeUtf8String(AsmJs::kSingleFunctionName);
+
+ PropertyDescriptor desc;
+ desc.set_writable(module_->is_asm_js());
+ desc.set_enumerable(true);
+ desc.set_configurable(module_->is_asm_js());
+
+ // Store weak references to all exported functions.
+ Handle<FixedArray> weak_exported_functions;
+ if (compiled_module->has_weak_exported_functions()) {
+ weak_exported_functions = compiled_module->weak_exported_functions();
+ } else {
+ int export_count = 0;
+ for (WasmExport& exp : module_->export_table) {
+ if (exp.kind == kExternalFunction) ++export_count;
+ }
+ weak_exported_functions = isolate_->factory()->NewFixedArray(export_count);
+ compiled_module->set_weak_exported_functions(weak_exported_functions);
+ }
+
+ // Process each export in the export table.
+ int export_index = 0; // Index into {weak_exported_functions}.
+ for (WasmExport& exp : module_->export_table) {
+ Handle<String> name = WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
+ isolate_, compiled_module_, exp.name)
+ .ToHandleChecked();
+ Handle<JSObject> export_to;
+ if (module_->is_asm_js() && exp.kind == kExternalFunction &&
+ String::Equals(name, single_function_name)) {
+ export_to = instance;
+ } else {
+ export_to = exports_object;
+ }
+
+ switch (exp.kind) {
+ case kExternalFunction: {
+ // Wrap and export the code as a JSFunction.
+ WasmFunction& function = module_->functions[exp.index];
+ int func_index =
+ static_cast<int>(module_->functions.size() + export_index);
+ Handle<JSFunction> js_function = js_wrappers_[exp.index];
+ if (js_function.is_null()) {
+ // Wrap the exported code as a JSFunction.
+ Handle<Code> export_code =
+ code_table->GetValueChecked<Code>(isolate_, func_index);
+ MaybeHandle<String> func_name;
+ if (module_->is_asm_js()) {
+ // For modules arising from asm.js, honor the names section.
+ func_name = WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
+ isolate_, compiled_module_, function.name)
+ .ToHandleChecked();
+ }
+ js_function = WasmExportedFunction::New(
+ isolate_, instance, func_name, function.func_index,
+ static_cast<int>(function.sig->parameter_count()), export_code);
+ js_wrappers_[exp.index] = js_function;
+ }
+ desc.set_value(js_function);
+ Handle<WeakCell> weak_export =
+ isolate_->factory()->NewWeakCell(js_function);
+ DCHECK_GT(weak_exported_functions->length(), export_index);
+ weak_exported_functions->set(export_index, *weak_export);
+ export_index++;
+ break;
+ }
+ case kExternalTable: {
+ // Export a table as a WebAssembly.Table object.
+ TableInstance& table_instance = table_instances_[exp.index];
+ WasmIndirectFunctionTable& table = module_->function_tables[exp.index];
+ if (table_instance.table_object.is_null()) {
+ uint32_t maximum =
+ table.has_max ? table.max_size : FLAG_wasm_max_table_size;
+ table_instance.table_object = WasmTableObject::New(
+ isolate_, table.min_size, maximum, &table_instance.js_wrappers);
+ }
+ desc.set_value(table_instance.table_object);
+ break;
+ }
+ case kExternalMemory: {
+ // Export the memory as a WebAssembly.Memory object.
+ Handle<WasmMemoryObject> memory_object;
+ if (!instance->has_memory_object()) {
+ // If there was no imported WebAssembly.Memory object, create one.
+ memory_object = WasmMemoryObject::New(
+ isolate_,
+ (instance->has_memory_buffer())
+ ? handle(instance->memory_buffer())
+ : Handle<JSArrayBuffer>::null(),
+ (module_->max_mem_pages != 0) ? module_->max_mem_pages : -1);
+ instance->set_memory_object(*memory_object);
+ } else {
+ memory_object =
+ Handle<WasmMemoryObject>(instance->memory_object(), isolate_);
+ }
+
+ desc.set_value(memory_object);
+ break;
+ }
+ case kExternalGlobal: {
+ // Export the value of the global variable as a number.
+ WasmGlobal& global = module_->globals[exp.index];
+ double num = 0;
+ switch (global.type) {
+ case kWasmI32:
+ num = *GetRawGlobalPtr<int32_t>(global);
+ break;
+ case kWasmF32:
+ num = *GetRawGlobalPtr<float>(global);
+ break;
+ case kWasmF64:
+ num = *GetRawGlobalPtr<double>(global);
+ break;
+ case kWasmI64:
+ thrower_->LinkError(
+ "export of globals of type I64 is not allowed.");
+ return;
+ default:
+ UNREACHABLE();
+ }
+ desc.set_value(isolate_->factory()->NewNumber(num));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ v8::Maybe<bool> status = JSReceiver::DefineOwnProperty(
+ isolate_, export_to, name, &desc, Object::THROW_ON_ERROR);
+ if (!status.IsJust()) {
+ thrower_->LinkError("export of %.*s failed.", name->length(),
+ name->ToCString().get());
+ return;
+ }
+ }
+ DCHECK_EQ(export_index, weak_exported_functions->length());
+
+ if (module_->is_wasm()) {
+ v8::Maybe<bool> success = JSReceiver::SetIntegrityLevel(
+ exports_object, FROZEN, Object::DONT_THROW);
+ DCHECK(success.FromMaybe(false));
+ USE(success);
+ }
+}
+
+void InstanceBuilder::InitializeTables(
+ Handle<WasmInstanceObject> instance,
+ CodeSpecialization* code_specialization) {
+ int function_table_count = static_cast<int>(module_->function_tables.size());
+ Handle<FixedArray> new_function_tables =
+ isolate_->factory()->NewFixedArray(function_table_count);
+ Handle<FixedArray> new_signature_tables =
+ isolate_->factory()->NewFixedArray(function_table_count);
+ for (int index = 0; index < function_table_count; ++index) {
+ WasmIndirectFunctionTable& table = module_->function_tables[index];
+ TableInstance& table_instance = table_instances_[index];
+ int table_size = static_cast<int>(table.min_size);
+
+ if (table_instance.function_table.is_null()) {
+ // Create a new dispatch table if necessary.
+ table_instance.function_table =
+ isolate_->factory()->NewFixedArray(table_size);
+ table_instance.signature_table =
+ isolate_->factory()->NewFixedArray(table_size);
+ for (int i = 0; i < table_size; ++i) {
+ // Fill the table with invalid signature indexes so that
+ // uninitialized entries will always fail the signature check.
+ table_instance.signature_table->set(i, Smi::FromInt(kInvalidSigIndex));
+ }
+ } else {
+ // Table is imported, patch table bounds check
+ DCHECK(table_size <= table_instance.function_table->length());
+ if (table_size < table_instance.function_table->length()) {
+ code_specialization->PatchTableSize(
+ table_size, table_instance.function_table->length());
+ }
+ }
+
+ new_function_tables->set(static_cast<int>(index),
+ *table_instance.function_table);
+ new_signature_tables->set(static_cast<int>(index),
+ *table_instance.signature_table);
+ }
+
+ FixedArray* old_function_tables = compiled_module_->ptr_to_function_tables();
+ DCHECK_EQ(old_function_tables->length(), new_function_tables->length());
+ for (int i = 0, e = new_function_tables->length(); i < e; ++i) {
+ code_specialization->RelocateObject(
+ handle(old_function_tables->get(i), isolate_),
+ handle(new_function_tables->get(i), isolate_));
+ }
+ FixedArray* old_signature_tables =
+ compiled_module_->ptr_to_signature_tables();
+ DCHECK_EQ(old_signature_tables->length(), new_signature_tables->length());
+ for (int i = 0, e = new_signature_tables->length(); i < e; ++i) {
+ code_specialization->RelocateObject(
+ handle(old_signature_tables->get(i), isolate_),
+ handle(new_signature_tables->get(i), isolate_));
+ }
+
+ compiled_module_->set_function_tables(new_function_tables);
+ compiled_module_->set_signature_tables(new_signature_tables);
+}
+
+void InstanceBuilder::LoadTableSegments(Handle<FixedArray> code_table,
+ Handle<WasmInstanceObject> instance) {
+ int function_table_count = static_cast<int>(module_->function_tables.size());
+ for (int index = 0; index < function_table_count; ++index) {
+ WasmIndirectFunctionTable& table = module_->function_tables[index];
+ TableInstance& table_instance = table_instances_[index];
+
+ Handle<FixedArray> all_dispatch_tables;
+ if (!table_instance.table_object.is_null()) {
+ // Get the existing dispatch table(s) with the WebAssembly.Table object.
+ all_dispatch_tables =
+ handle(table_instance.table_object->dispatch_tables());
+ }
+
+ // Count the number of table exports for each function (needed for lazy
+ // compilation).
+ std::unordered_map<uint32_t, uint32_t> num_table_exports;
+ if (compile_lazy(module_)) {
+ for (auto table_init : module_->table_inits) {
+ for (uint32_t func_index : table_init.entries) {
+ Code* code =
+ Code::cast(code_table->get(static_cast<int>(func_index)));
+ // Only increase the counter for lazy compile builtins (it's not
+ // needed otherwise).
+ if (code->is_wasm_code()) continue;
+ DCHECK_EQ(Builtins::kWasmCompileLazy, code->builtin_index());
+ ++num_table_exports[func_index];
+ }
+ }
+ }
+
+ // TODO(titzer): this does redundant work if there are multiple tables,
+ // since initializations are not sorted by table index.
+ for (auto table_init : module_->table_inits) {
+ uint32_t base = EvalUint32InitExpr(table_init.offset);
+ uint32_t num_entries = static_cast<uint32_t>(table_init.entries.size());
+ DCHECK(in_bounds(base, num_entries,
+ table_instance.function_table->length()));
+ for (uint32_t i = 0; i < num_entries; ++i) {
+ uint32_t func_index = table_init.entries[i];
+ WasmFunction* function = &module_->functions[func_index];
+ int table_index = static_cast<int>(i + base);
+ int32_t sig_index = table.map.Find(function->sig);
+ DCHECK_GE(sig_index, 0);
+ table_instance.signature_table->set(table_index,
+ Smi::FromInt(sig_index));
+ Handle<Code> wasm_code = EnsureTableExportLazyDeoptData(
+ isolate_, instance, code_table, func_index,
+ table_instance.function_table, table_index, num_table_exports);
+ table_instance.function_table->set(table_index, *wasm_code);
+
+ if (!all_dispatch_tables.is_null()) {
+ if (js_wrappers_[func_index].is_null()) {
+ // No JSFunction entry yet exists for this function. Create one.
+ // TODO(titzer): We compile JS->wasm wrappers for functions are
+ // not exported but are in an exported table. This should be done
+ // at module compile time and cached instead.
+
+ Handle<Code> wrapper_code =
+ js_to_wasm_cache_.CloneOrCompileJSToWasmWrapper(
+ isolate_, module_, wasm_code, func_index);
+ MaybeHandle<String> func_name;
+ if (module_->is_asm_js()) {
+ // For modules arising from asm.js, honor the names section.
+ func_name = WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
+ isolate_, compiled_module_, function->name)
+ .ToHandleChecked();
+ }
+ Handle<WasmExportedFunction> js_function =
+ WasmExportedFunction::New(
+ isolate_, instance, func_name, func_index,
+ static_cast<int>(function->sig->parameter_count()),
+ wrapper_code);
+ js_wrappers_[func_index] = js_function;
+ }
+ table_instance.js_wrappers->set(table_index,
+ *js_wrappers_[func_index]);
+
+ UpdateDispatchTables(isolate_, all_dispatch_tables, table_index,
+ function, wasm_code);
+ }
+ }
+ }
+
+#ifdef DEBUG
+ // Check that the count of table exports was accurate. The entries are
+ // decremented on each export, so all should be zero now.
+ for (auto e : num_table_exports) {
+ DCHECK_EQ(0, e.second);
+ }
+#endif
+
+ // TODO(titzer): we add the new dispatch table at the end to avoid
+ // redundant work and also because the new instance is not yet fully
+ // initialized.
+ if (!table_instance.table_object.is_null()) {
+ // Add the new dispatch table to the WebAssembly.Table object.
+ all_dispatch_tables = WasmTableObject::AddDispatchTable(
+ isolate_, table_instance.table_object, instance, index,
+ table_instance.function_table, table_instance.signature_table);
+ }
+ }
+}
+
+AsyncCompileJob::AsyncCompileJob(Isolate* isolate,
+ std::unique_ptr<byte[]> bytes_copy,
+ size_t length, Handle<Context> context,
+ Handle<JSPromise> promise)
+ : isolate_(isolate),
+ async_counters_(isolate->async_counters()),
+ bytes_copy_(std::move(bytes_copy)),
+ wire_bytes_(bytes_copy_.get(), bytes_copy_.get() + length) {
+ // The handles for the context and promise must be deferred.
+ DeferredHandleScope deferred(isolate);
+ context_ = Handle<Context>(*context);
+ module_promise_ = Handle<JSPromise>(*promise);
+ deferred_handles_.push_back(deferred.Detach());
+}
+
+void AsyncCompileJob::Start() {
+ DoAsync<DecodeModule>(); // --
+}
+
+AsyncCompileJob::~AsyncCompileJob() {
+ background_task_manager_.CancelAndWait();
+ for (auto d : deferred_handles_) delete d;
+}
+
+void AsyncCompileJob::ReopenHandlesInDeferredScope() {
+ DeferredHandleScope deferred(isolate_);
+ function_tables_ = handle(*function_tables_, isolate_);
+ signature_tables_ = handle(*signature_tables_, isolate_);
+ code_table_ = handle(*code_table_, isolate_);
+ temp_instance_->ReopenHandles(isolate_);
+ compiler_->ReopenHandlesInDeferredScope();
+ deferred_handles_.push_back(deferred.Detach());
+}
+
+void AsyncCompileJob::AsyncCompileFailed(ErrorThrower& thrower) {
+ RejectPromise(isolate_, context_, thrower, module_promise_);
+ isolate_->wasm_compilation_manager()->RemoveJob(this);
+}
+
+void AsyncCompileJob::AsyncCompileSucceeded(Handle<Object> result) {
+ ResolvePromise(isolate_, context_, module_promise_, result);
+ isolate_->wasm_compilation_manager()->RemoveJob(this);
+}
+
+// A closure to run a compilation step (either as foreground or background
+// task) and schedule the next step(s), if any.
+class AsyncCompileJob::CompileStep {
+ public:
+ explicit CompileStep(size_t num_background_tasks = 0)
+ : num_background_tasks_(num_background_tasks) {}
+
+ virtual ~CompileStep() {}
+
+ void Run(bool on_foreground) {
+ if (on_foreground) {
+ DCHECK_EQ(1, job_->num_pending_foreground_tasks_--);
+ SaveContext saved_context(job_->isolate_);
+ job_->isolate_->set_context(*job_->context_);
+ RunInForeground();
+ } else {
+ RunInBackground();
+ }
+ }
+
+ virtual void RunInForeground() { UNREACHABLE(); }
+ virtual void RunInBackground() { UNREACHABLE(); }
+
+ size_t NumberOfBackgroundTasks() { return num_background_tasks_; }
+
+ AsyncCompileJob* job_ = nullptr;
+ const size_t num_background_tasks_;
+};
+
+class AsyncCompileJob::CompileTask : public CancelableTask {
+ public:
+ CompileTask(AsyncCompileJob* job, bool on_foreground)
+ // We only manage the background tasks with the {CancelableTaskManager} of
+ // the {AsyncCompileJob}. Foreground tasks are managed by the system's
+ // {CancelableTaskManager}. Background tasks cannot spawn tasks managed by
+ // their own task manager.
+ : CancelableTask(on_foreground ? job->isolate_->cancelable_task_manager()
+ : &job->background_task_manager_),
+ job_(job),
+ on_foreground_(on_foreground) {}
+
+ void RunInternal() override { job_->step_->Run(on_foreground_); }
+
+ private:
+ AsyncCompileJob* job_;
+ bool on_foreground_;
+};
+
+void AsyncCompileJob::StartForegroundTask() {
+ DCHECK_EQ(0, num_pending_foreground_tasks_++);
+
+ V8::GetCurrentPlatform()->CallOnForegroundThread(
+ reinterpret_cast<v8::Isolate*>(isolate_), new CompileTask(this, true));
+}
+
+template <typename State, typename... Args>
+void AsyncCompileJob::DoSync(Args&&... args) {
+ step_.reset(new State(std::forward<Args>(args)...));
+ step_->job_ = this;
+ StartForegroundTask();
+}
+
+void AsyncCompileJob::StartBackgroundTask() {
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ new CompileTask(this, false), v8::Platform::kShortRunningTask);
+}
+
+template <typename State, typename... Args>
+void AsyncCompileJob::DoAsync(Args&&... args) {
+ step_.reset(new State(std::forward<Args>(args)...));
+ step_->job_ = this;
+ size_t end = step_->NumberOfBackgroundTasks();
+ for (size_t i = 0; i < end; ++i) {
+ StartBackgroundTask();
+ }
+}
+
+//==========================================================================
+// Step 1: (async) Decode the module.
+//==========================================================================
+class AsyncCompileJob::DecodeModule : public AsyncCompileJob::CompileStep {
+ public:
+ DecodeModule() : CompileStep(1) {}
+
+ void RunInBackground() override {
+ ModuleResult result;
+ {
+ DisallowHandleAllocation no_handle;
+ DisallowHeapAllocation no_allocation;
+ // Decode the module bytes.
+ TRACE_COMPILE("(1) Decoding module...\n");
+ result = AsyncDecodeWasmModule(job_->isolate_, job_->wire_bytes_.start(),
+ job_->wire_bytes_.end(), false,
+ kWasmOrigin, job_->async_counters());
+ }
+ if (result.failed()) {
+ // Decoding failure; reject the promise and clean up.
+ job_->DoSync<DecodeFail>(std::move(result));
+ } else {
+ // Decode passed.
+ job_->DoSync<PrepareAndStartCompile>(std::move(result.val));
+ }
+ }
+};
+
+//==========================================================================
+// Step 1b: (sync) Fail decoding the module.
+//==========================================================================
+class AsyncCompileJob::DecodeFail : public CompileStep {
+ public:
+ explicit DecodeFail(ModuleResult result) : result_(std::move(result)) {}
+
+ private:
+ ModuleResult result_;
+ void RunInForeground() override {
+ TRACE_COMPILE("(1b) Decoding failed.\n");
+ HandleScope scope(job_->isolate_);
+ ErrorThrower thrower(job_->isolate_, "AsyncCompile");
+ thrower.CompileFailed("Wasm decoding failed", result_);
+ // {job_} is deleted in AsyncCompileFailed, therefore the {return}.
+ return job_->AsyncCompileFailed(thrower);
+ }
+};
+
+//==========================================================================
+// Step 2 (sync): Create heap-allocated data and start compile.
+//==========================================================================
+class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
+ public:
+ explicit PrepareAndStartCompile(std::unique_ptr<WasmModule> module)
+ : module_(std::move(module)) {}
+
+ private:
+ std::unique_ptr<WasmModule> module_;
+ void RunInForeground() override {
+ TRACE_COMPILE("(2) Prepare and start compile...\n");
+ HandleScope scope(job_->isolate_);
+
+ Factory* factory = job_->isolate_->factory();
+ job_->temp_instance_.reset(new WasmInstance(module_.get()));
+ job_->temp_instance_->context = job_->context_;
+ job_->temp_instance_->mem_size =
+ WasmModule::kPageSize * module_->min_mem_pages;
+ job_->temp_instance_->mem_start = nullptr;
+ job_->temp_instance_->globals_start = nullptr;
+
+ // Initialize the indirect tables with placeholders.
+ int function_table_count =
+ static_cast<int>(module_->function_tables.size());
+ job_->function_tables_ =
+ factory->NewFixedArray(function_table_count, TENURED);
+ job_->signature_tables_ =
+ factory->NewFixedArray(function_table_count, TENURED);
+ for (int i = 0; i < function_table_count; ++i) {
+ job_->temp_instance_->function_tables[i] =
+ factory->NewFixedArray(1, TENURED);
+ job_->temp_instance_->signature_tables[i] =
+ factory->NewFixedArray(1, TENURED);
+ job_->function_tables_->set(i, *job_->temp_instance_->function_tables[i]);
+ job_->signature_tables_->set(i,
+ *job_->temp_instance_->signature_tables[i]);
+ }
+
+ // The {code_table} array contains import wrappers and functions (which
+ // are both included in {functions.size()}, and export wrappers.
+ // The results of compilation will be written into it.
+ int code_table_size = static_cast<int>(module_->functions.size() +
+ module_->num_exported_functions);
+ job_->code_table_ = factory->NewFixedArray(code_table_size, TENURED);
+
+ // Initialize {code_table_} with the illegal builtin. All call sites
+ // will be patched at instantiation.
+ Handle<Code> illegal_builtin = job_->isolate_->builtins()->Illegal();
+ // TODO(wasm): Fix this for lazy compilation.
+ for (uint32_t i = 0; i < module_->functions.size(); ++i) {
+ job_->code_table_->set(static_cast<int>(i), *illegal_builtin);
+ job_->temp_instance_->function_code[i] = illegal_builtin;
+ }
+
+ job_->counters()->wasm_functions_per_wasm_module()->AddSample(
+ static_cast<int>(module_->functions.size()));
+
+ // Transfer ownership of the {WasmModule} to the {ModuleCompiler}, but
+ // keep a pointer.
+ WasmModule* module = module_.get();
+ job_->compiler_.reset(
+ new ModuleCompiler(job_->isolate_, std::move(module_)));
+ job_->compiler_->EnableThrottling();
+
+ // Reopen all handles which should survive in the DeferredHandleScope.
+ job_->ReopenHandlesInDeferredScope();
+
+ DCHECK_LE(module->num_imported_functions, module->functions.size());
+ size_t num_functions =
+ module->functions.size() - module->num_imported_functions;
+ if (num_functions == 0) {
+ // Degenerate case of an empty module.
+ job_->DoSync<FinishCompile>();
+ return;
+ }
+
+ // Start asynchronous compilation tasks.
+ size_t num_background_tasks =
+ Max(static_cast<size_t>(1),
+ Min(num_functions,
+ Min(static_cast<size_t>(FLAG_wasm_num_compilation_tasks),
+ V8::GetCurrentPlatform()
+ ->NumberOfAvailableBackgroundThreads())));
+ job_->module_bytes_env_.reset(new ModuleBytesEnv(
+ module, job_->temp_instance_.get(), job_->wire_bytes_));
+
+ job_->outstanding_units_ = job_->compiler_->InitializeCompilationUnits(
+ module->functions, *job_->module_bytes_env_);
+
+ job_->DoAsync<ExecuteAndFinishCompilationUnits>(num_background_tasks);
+ }
+};
+
+//==========================================================================
+// Step 3 (async x K tasks): Execute compilation units.
+//==========================================================================
+class AsyncCompileJob::ExecuteAndFinishCompilationUnits : public CompileStep {
+ public:
+ explicit ExecuteAndFinishCompilationUnits(size_t num_compile_tasks)
+ : CompileStep(num_compile_tasks) {}
+
+ void RunInBackground() override {
+ std::function<void()> StartFinishCompilationUnit = [this]() {
+ if (!failed_) job_->StartForegroundTask();
+ };
+
+ TRACE_COMPILE("(3) Compiling...\n");
+ while (job_->compiler_->CanAcceptWork()) {
+ if (failed_) break;
+ DisallowHandleAllocation no_handle;
+ DisallowHeapAllocation no_allocation;
+ if (!job_->compiler_->FetchAndExecuteCompilationUnit(
+ StartFinishCompilationUnit)) {
+ finished_ = true;
+ break;
+ }
+ }
+ stopped_tasks_.Increment(1);
+ }
+
+ void RestartCompilationTasks() {
+ size_t num_restarts = stopped_tasks_.Value();
+ stopped_tasks_.Decrement(num_restarts);
+
+ for (size_t i = 0; i < num_restarts; ++i) {
+ job_->StartBackgroundTask();
+ }
+ }
+
+ void RunInForeground() override {
+ TRACE_COMPILE("(4a) Finishing compilation units...\n");
+ if (failed_) {
+ // The job failed already, no need to do more work.
+ job_->compiler_->SetFinisherIsRunning(false);
+ return;
+ }
+ HandleScope scope(job_->isolate_);
+ ErrorThrower thrower(job_->isolate_, "AsyncCompile");
+
+ // We execute for 1 ms and then reschedule the task, same as the GC.
+ double deadline = MonotonicallyIncreasingTimeInMs() + 1.0;
+
+ while (true) {
+ if (!finished_ && job_->compiler_->ShouldIncreaseWorkload()) {
+ RestartCompilationTasks();
+ }
+
+ int func_index = -1;
+
+ MaybeHandle<Code> result =
+ job_->compiler_->FinishCompilationUnit(&thrower, &func_index);
+
+ if (thrower.error()) {
+ // An error was detected, we stop compiling and wait for the
+ // background tasks to finish.
+ failed_ = true;
+ break;
+ } else if (result.is_null()) {
+ // The working queue was empty, we break the loop. If new work units
+ // are enqueued, the background task will start this
+ // FinishCompilationUnits task again.
+ break;
+ } else {
+ DCHECK(func_index >= 0);
+ job_->code_table_->set(func_index, *result.ToHandleChecked());
+ --job_->outstanding_units_;
+ }
+
+ if (deadline < MonotonicallyIncreasingTimeInMs()) {
+ // We reached the deadline. We reschedule this task and return
+ // immediately. Since we rescheduled this task already, we do not set
+ // the FinisherIsRunning flat to false.
+ job_->StartForegroundTask();
+ return;
+ }
+ }
+ // This task finishes without being rescheduled. Therefore we set the
+ // FinisherIsRunning flag to false.
+ job_->compiler_->SetFinisherIsRunning(false);
+ if (thrower.error()) {
+ // Make sure all compilation tasks stopped running.
+ job_->background_task_manager_.CancelAndWait();
+ return job_->AsyncCompileFailed(thrower);
+ }
+ if (job_->outstanding_units_ == 0) {
+ // Make sure all compilation tasks stopped running.
+ job_->background_task_manager_.CancelAndWait();
+ job_->DoSync<FinishCompile>();
+ }
+ }
+
+ private:
+ std::atomic<bool> failed_{false};
+ std::atomic<bool> finished_{false};
+ base::AtomicNumber<size_t> stopped_tasks_{0};
+};
+
+//==========================================================================
+// Step 5 (sync): Finish heap-allocated data structures.
+//==========================================================================
+class AsyncCompileJob::FinishCompile : public CompileStep {
+ void RunInForeground() override {
+ TRACE_COMPILE("(5b) Finish compile...\n");
+ HandleScope scope(job_->isolate_);
+ // At this point, compilation has completed. Update the code table.
+ for (size_t i = FLAG_skip_compiling_wasm_funcs;
+ i < job_->temp_instance_->function_code.size(); ++i) {
+ Code* code = Code::cast(job_->code_table_->get(static_cast<int>(i)));
+ RecordStats(code, job_->counters());
+ }
+
+ // Create heap objects for script and module bytes to be stored in the
+ // shared module data. Asm.js is not compiled asynchronously.
+ Handle<Script> script = CreateWasmScript(job_->isolate_, job_->wire_bytes_);
+ Handle<ByteArray> asm_js_offset_table;
+ // TODO(wasm): Improve efficiency of storing module wire bytes.
+ // 1. Only store relevant sections, not function bodies
+ // 2. Don't make a second copy of the bytes here; reuse the copy made
+ // for asynchronous compilation and store it as an external one
+ // byte string for serialization/deserialization.
+ Handle<String> module_bytes =
+ job_->isolate_->factory()
+ ->NewStringFromOneByte(
+ {job_->wire_bytes_.start(), job_->wire_bytes_.length()},
+ TENURED)
+ .ToHandleChecked();
+ DCHECK(module_bytes->IsSeqOneByteString());
+
+ // The {module_wrapper} will take ownership of the {WasmModule} object,
+ // and it will be destroyed when the GC reclaims the wrapper object.
+ Handle<WasmModuleWrapper> module_wrapper = WasmModuleWrapper::New(
+ job_->isolate_, job_->compiler_->ReleaseModule().release());
+
+ // Create the shared module data.
+ // TODO(clemensh): For the same module (same bytes / same hash), we should
+ // only have one WasmSharedModuleData. Otherwise, we might only set
+ // breakpoints on a (potentially empty) subset of the instances.
+
+ Handle<WasmSharedModuleData> shared =
+ WasmSharedModuleData::New(job_->isolate_, module_wrapper,
+ Handle<SeqOneByteString>::cast(module_bytes),
+ script, asm_js_offset_table);
+
+ // Create the compiled module object and populate with compiled functions
+ // and information needed at instantiation time. This object needs to be
+ // serializable. Instantiation may occur off a deserialized version of
+ // this object.
+ job_->compiled_module_ = WasmCompiledModule::New(
+ job_->isolate_, shared, job_->code_table_, job_->function_tables_,
+ job_->signature_tables_);
+
+ // Finish the wasm script now and make it public to the debugger.
+ script->set_wasm_compiled_module(*job_->compiled_module_);
+ job_->isolate_->debug()->OnAfterCompile(script);
+
+ DeferredHandleScope deferred(job_->isolate_);
+ job_->compiled_module_ = handle(*job_->compiled_module_, job_->isolate_);
+ job_->deferred_handles_.push_back(deferred.Detach());
+ // TODO(wasm): compiling wrappers should be made async as well.
+ job_->DoSync<CompileWrappers>();
+ }
+};
+
+//==========================================================================
+// Step 6 (sync): Compile JS->wasm wrappers.
+//==========================================================================
+class AsyncCompileJob::CompileWrappers : public CompileStep {
+ void RunInForeground() override {
+ TRACE_COMPILE("(6) Compile wrappers...\n");
+ // Compile JS->wasm wrappers for exported functions.
+ HandleScope scope(job_->isolate_);
+ JSToWasmWrapperCache js_to_wasm_cache;
+ int func_index = 0;
+ WasmModule* module = job_->compiled_module_->module();
+ for (auto exp : module->export_table) {
+ if (exp.kind != kExternalFunction) continue;
+ Handle<Code> wasm_code(Code::cast(job_->code_table_->get(exp.index)),
+ job_->isolate_);
+ Handle<Code> wrapper_code =
+ js_to_wasm_cache.CloneOrCompileJSToWasmWrapper(job_->isolate_, module,
+ wasm_code, exp.index);
+ int export_index =
+ static_cast<int>(module->functions.size() + func_index);
+ job_->code_table_->set(export_index, *wrapper_code);
+ RecordStats(*wrapper_code, job_->counters());
+ func_index++;
+ }
+
+ job_->DoSync<FinishModule>();
+ }
+};
+
+//==========================================================================
+// Step 7 (sync): Finish the module and resolve the promise.
+//==========================================================================
+class AsyncCompileJob::FinishModule : public CompileStep {
+ void RunInForeground() override {
+ TRACE_COMPILE("(7) Finish module...\n");
+ HandleScope scope(job_->isolate_);
+ Handle<WasmModuleObject> result =
+ WasmModuleObject::New(job_->isolate_, job_->compiled_module_);
+ // {job_} is deleted in AsyncCompileSucceeded, therefore the {return}.
+ return job_->AsyncCompileSucceeded(result);
+ }
+};
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/module-compiler.h b/deps/v8/src/wasm/module-compiler.h
new file mode 100644
index 0000000000..2e59b357d2
--- /dev/null
+++ b/deps/v8/src/wasm/module-compiler.h
@@ -0,0 +1,390 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_MODULE_COMPILER_H_
+#define V8_WASM_MODULE_COMPILER_H_
+
+#include <functional>
+
+#include "src/base/atomic-utils.h"
+#include "src/base/utils/random-number-generator.h"
+#include "src/cancelable-task.h"
+#include "src/compiler/wasm-compiler.h"
+#include "src/isolate.h"
+
+#include "src/wasm/wasm-code-specialization.h"
+#include "src/wasm/wasm-objects.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// A class compiling an entire module.
+class ModuleCompiler {
+ public:
+ // The ModuleCompiler takes ownership of the {WasmModule}.
+ // In {CompileToModuleObject}, it will transfer ownership to the generated
+ // {WasmModuleWrapper}. If this method is not called, ownership may be
+ // reclaimed by explicitely releasing the {module_} field.
+ ModuleCompiler(Isolate* isolate, std::unique_ptr<WasmModule> module);
+
+ // The actual runnable task that performs compilations in the background.
+ class CompilationTask : public CancelableTask {
+ public:
+ ModuleCompiler* compiler_;
+ explicit CompilationTask(ModuleCompiler* helper);
+
+ void RunInternal() override;
+ };
+
+ // The CompilationUnitBuilder builds compilation units and stores them in an
+ // internal buffer. The buffer is moved into the working queue of the
+ // ModuleCompiler when {Commit} is called.
+ class CompilationUnitBuilder {
+ public:
+ explicit CompilationUnitBuilder(ModuleCompiler* compiler)
+ : compiler_(compiler) {}
+
+ ~CompilationUnitBuilder() { DCHECK(units_.empty()); }
+
+ void AddUnit(ModuleEnv* module_env, const WasmFunction* function,
+ uint32_t buffer_offset, Vector<const uint8_t> bytes,
+ WasmName name) {
+ units_.emplace_back(new compiler::WasmCompilationUnit(
+ compiler_->isolate_, module_env,
+ wasm::FunctionBody{function->sig, buffer_offset, bytes.begin(),
+ bytes.end()},
+ name, function->func_index, compiler_->centry_stub_,
+ compiler_->async_counters()));
+ }
+
+ void Commit() {
+ {
+ base::LockGuard<base::Mutex> guard(
+ &compiler_->compilation_units_mutex_);
+ compiler_->compilation_units_.insert(
+ compiler_->compilation_units_.end(),
+ std::make_move_iterator(units_.begin()),
+ std::make_move_iterator(units_.end()));
+ }
+ units_.clear();
+ }
+
+ private:
+ ModuleCompiler* compiler_;
+ std::vector<std::unique_ptr<compiler::WasmCompilationUnit>> units_;
+ };
+
+ class CodeGenerationSchedule {
+ public:
+ explicit CodeGenerationSchedule(
+ base::RandomNumberGenerator* random_number_generator,
+ size_t max_memory = 0);
+
+ void Schedule(std::unique_ptr<compiler::WasmCompilationUnit>&& item);
+
+ bool IsEmpty() const { return schedule_.empty(); }
+
+ std::unique_ptr<compiler::WasmCompilationUnit> GetNext();
+
+ bool CanAcceptWork() const;
+
+ bool ShouldIncreaseWorkload() const;
+
+ void EnableThrottling() { throttle_ = true; }
+
+ private:
+ size_t GetRandomIndexInSchedule();
+
+ base::RandomNumberGenerator* random_number_generator_ = nullptr;
+ std::vector<std::unique_ptr<compiler::WasmCompilationUnit>> schedule_;
+ const size_t max_memory_;
+ bool throttle_ = false;
+ base::AtomicNumber<size_t> allocated_memory_{0};
+ };
+
+ const std::shared_ptr<Counters>& async_counters() const {
+ return async_counters_;
+ }
+ Counters* counters() const { return async_counters().get(); }
+
+ // Run by each compilation task and by the main thread (i.e. in both
+ // foreground and background threads). The no_finisher_callback is called
+ // within the result_mutex_ lock when no finishing task is running, i.e. when
+ // the finisher_is_running_ flag is not set.
+ bool FetchAndExecuteCompilationUnit(
+ std::function<void()> no_finisher_callback = nullptr);
+
+ void OnBackgroundTaskStopped();
+
+ void EnableThrottling() { executed_units_.EnableThrottling(); }
+
+ bool CanAcceptWork() const { return executed_units_.CanAcceptWork(); }
+
+ bool ShouldIncreaseWorkload() const {
+ return executed_units_.ShouldIncreaseWorkload();
+ }
+
+ size_t InitializeCompilationUnits(const std::vector<WasmFunction>& functions,
+ ModuleBytesEnv& module_env);
+
+ void ReopenHandlesInDeferredScope();
+
+ void RestartCompilationTasks();
+
+ size_t FinishCompilationUnits(std::vector<Handle<Code>>& results,
+ ErrorThrower* thrower);
+
+ void SetFinisherIsRunning(bool value);
+
+ MaybeHandle<Code> FinishCompilationUnit(ErrorThrower* thrower,
+ int* func_index);
+
+ void CompileInParallel(ModuleBytesEnv* module_env,
+ std::vector<Handle<Code>>& results,
+ ErrorThrower* thrower);
+
+ void CompileSequentially(ModuleBytesEnv* module_env,
+ std::vector<Handle<Code>>& results,
+ ErrorThrower* thrower);
+
+ void ValidateSequentially(ModuleBytesEnv* module_env, ErrorThrower* thrower);
+
+ MaybeHandle<WasmModuleObject> CompileToModuleObject(
+ ErrorThrower* thrower, const ModuleWireBytes& wire_bytes,
+ Handle<Script> asm_js_script,
+ Vector<const byte> asm_js_offset_table_bytes);
+
+ std::unique_ptr<WasmModule> ReleaseModule() { return std::move(module_); }
+
+ private:
+ MaybeHandle<WasmModuleObject> CompileToModuleObjectInternal(
+ ErrorThrower* thrower, const ModuleWireBytes& wire_bytes,
+ Handle<Script> asm_js_script,
+ Vector<const byte> asm_js_offset_table_bytes, Factory* factory,
+ WasmInstance* temp_instance, Handle<FixedArray>* function_tables,
+ Handle<FixedArray>* signature_tables);
+
+ Isolate* isolate_;
+ std::unique_ptr<WasmModule> module_;
+ const std::shared_ptr<Counters> async_counters_;
+ std::vector<std::unique_ptr<compiler::WasmCompilationUnit>>
+ compilation_units_;
+ base::Mutex compilation_units_mutex_;
+ CodeGenerationSchedule executed_units_;
+ base::Mutex result_mutex_;
+ const size_t num_background_tasks_;
+ // This flag should only be set while holding result_mutex_.
+ bool finisher_is_running_ = false;
+ CancelableTaskManager background_task_manager_;
+ size_t stopped_compilation_tasks_ = 0;
+ base::Mutex tasks_mutex_;
+ Handle<Code> centry_stub_;
+};
+
+class JSToWasmWrapperCache {
+ public:
+ Handle<Code> CloneOrCompileJSToWasmWrapper(Isolate* isolate,
+ const wasm::WasmModule* module,
+ Handle<Code> wasm_code,
+ uint32_t index);
+
+ private:
+ // sig_map_ maps signatures to an index in code_cache_.
+ wasm::SignatureMap sig_map_;
+ std::vector<Handle<Code>> code_cache_;
+};
+
+// A helper class to simplify instantiating a module from a compiled module.
+// It closes over the {Isolate}, the {ErrorThrower}, the {WasmCompiledModule},
+// etc.
+class InstanceBuilder {
+ public:
+ InstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
+ Handle<WasmModuleObject> module_object,
+ MaybeHandle<JSReceiver> ffi,
+ MaybeHandle<JSArrayBuffer> memory,
+ WeakCallbackInfo<void>::Callback instance_finalizer_callback);
+
+ // Build an instance, in all of its glory.
+ MaybeHandle<WasmInstanceObject> Build();
+
+ private:
+ // Represents the initialized state of a table.
+ struct TableInstance {
+ Handle<WasmTableObject> table_object; // WebAssembly.Table instance
+ Handle<FixedArray> js_wrappers; // JSFunctions exported
+ Handle<FixedArray> function_table; // internal code array
+ Handle<FixedArray> signature_table; // internal sig array
+ };
+
+ Isolate* isolate_;
+ WasmModule* const module_;
+ const std::shared_ptr<Counters> async_counters_;
+ ErrorThrower* thrower_;
+ Handle<WasmModuleObject> module_object_;
+ MaybeHandle<JSReceiver> ffi_;
+ MaybeHandle<JSArrayBuffer> memory_;
+ Handle<JSArrayBuffer> globals_;
+ Handle<WasmCompiledModule> compiled_module_;
+ std::vector<TableInstance> table_instances_;
+ std::vector<Handle<JSFunction>> js_wrappers_;
+ JSToWasmWrapperCache js_to_wasm_cache_;
+ WeakCallbackInfo<void>::Callback instance_finalizer_callback_;
+
+ const std::shared_ptr<Counters>& async_counters() const {
+ return async_counters_;
+ }
+ Counters* counters() const { return async_counters().get(); }
+
+// Helper routines to print out errors with imports.
+#define ERROR_THROWER_WITH_MESSAGE(TYPE) \
+ void Report##TYPE(const char* error, uint32_t index, \
+ Handle<String> module_name, Handle<String> import_name) { \
+ thrower_->TYPE("Import #%d module=\"%s\" function=\"%s\" error: %s", \
+ index, module_name->ToCString().get(), \
+ import_name->ToCString().get(), error); \
+ } \
+ \
+ MaybeHandle<Object> Report##TYPE(const char* error, uint32_t index, \
+ Handle<String> module_name) { \
+ thrower_->TYPE("Import #%d module=\"%s\" error: %s", index, \
+ module_name->ToCString().get(), error); \
+ return MaybeHandle<Object>(); \
+ }
+
+ ERROR_THROWER_WITH_MESSAGE(LinkError)
+ ERROR_THROWER_WITH_MESSAGE(TypeError)
+
+ // Look up an import value in the {ffi_} object.
+ MaybeHandle<Object> LookupImport(uint32_t index, Handle<String> module_name,
+ Handle<String> import_name);
+
+ // Look up an import value in the {ffi_} object specifically for linking an
+ // asm.js module. This only performs non-observable lookups, which allows
+ // falling back to JavaScript proper (and hence re-executing all lookups) if
+ // module instantiation fails.
+ MaybeHandle<Object> LookupImportAsm(uint32_t index,
+ Handle<String> import_name);
+
+ uint32_t EvalUint32InitExpr(const WasmInitExpr& expr);
+
+ // Load data segments into the memory.
+ void LoadDataSegments(Address mem_addr, size_t mem_size);
+
+ void WriteGlobalValue(WasmGlobal& global, Handle<Object> value);
+
+ // Process the imports, including functions, tables, globals, and memory, in
+ // order, loading them from the {ffi_} object. Returns the number of imported
+ // functions.
+ int ProcessImports(Handle<FixedArray> code_table,
+ Handle<WasmInstanceObject> instance);
+
+ template <typename T>
+ T* GetRawGlobalPtr(WasmGlobal& global);
+
+ // Process initialization of globals.
+ void InitGlobals();
+
+ // Allocate memory for a module instance as a new JSArrayBuffer.
+ Handle<JSArrayBuffer> AllocateMemory(uint32_t min_mem_pages);
+
+ bool NeedsWrappers() const;
+
+ // Process the exports, creating wrappers for functions, tables, memories,
+ // and globals.
+ void ProcessExports(Handle<FixedArray> code_table,
+ Handle<WasmInstanceObject> instance,
+ Handle<WasmCompiledModule> compiled_module);
+
+ void InitializeTables(Handle<WasmInstanceObject> instance,
+ CodeSpecialization* code_specialization);
+
+ void LoadTableSegments(Handle<FixedArray> code_table,
+ Handle<WasmInstanceObject> instance);
+};
+
+// Encapsulates all the state and steps of an asynchronous compilation.
+// An asynchronous compile job consists of a number of tasks that are executed
+// as foreground and background tasks. Any phase that touches the V8 heap or
+// allocates on the V8 heap (e.g. creating the module object) must be a
+// foreground task. All other tasks (e.g. decoding and validating, the majority
+// of the work of compilation) can be background tasks.
+// TODO(wasm): factor out common parts of this with the synchronous pipeline.
+class AsyncCompileJob {
+ public:
+ explicit AsyncCompileJob(Isolate* isolate, std::unique_ptr<byte[]> bytes_copy,
+ size_t length, Handle<Context> context,
+ Handle<JSPromise> promise);
+
+ void Start();
+
+ ~AsyncCompileJob();
+
+ private:
+ class CompileTask;
+ class CompileStep;
+
+ // States of the AsyncCompileJob.
+ class DecodeModule;
+ class DecodeFail;
+ class PrepareAndStartCompile;
+ class ExecuteAndFinishCompilationUnits;
+ class WaitForBackgroundTasks;
+ class FinishCompilationUnits;
+ class FinishCompile;
+ class CompileWrappers;
+ class FinishModule;
+
+ Isolate* isolate_;
+ const std::shared_ptr<Counters> async_counters_;
+ std::unique_ptr<byte[]> bytes_copy_;
+ ModuleWireBytes wire_bytes_;
+ Handle<Context> context_;
+ Handle<JSPromise> module_promise_;
+ std::unique_ptr<ModuleCompiler> compiler_;
+ std::unique_ptr<ModuleBytesEnv> module_bytes_env_;
+
+ std::vector<DeferredHandles*> deferred_handles_;
+ Handle<WasmModuleObject> module_object_;
+ Handle<FixedArray> function_tables_;
+ Handle<FixedArray> signature_tables_;
+ Handle<WasmCompiledModule> compiled_module_;
+ Handle<FixedArray> code_table_;
+ std::unique_ptr<WasmInstance> temp_instance_ = nullptr;
+ size_t outstanding_units_ = 0;
+ std::unique_ptr<CompileStep> step_;
+ CancelableTaskManager background_task_manager_;
+#if DEBUG
+ // Counts the number of pending foreground tasks.
+ int32_t num_pending_foreground_tasks_ = 0;
+#endif
+
+ const std::shared_ptr<Counters>& async_counters() const {
+ return async_counters_;
+ }
+ Counters* counters() const { return async_counters().get(); }
+
+ void ReopenHandlesInDeferredScope();
+
+ void AsyncCompileFailed(ErrorThrower& thrower);
+
+ void AsyncCompileSucceeded(Handle<Object> result);
+
+ template <typename Task, typename... Args>
+ void DoSync(Args&&... args);
+
+ void StartForegroundTask();
+
+ void StartBackgroundTask();
+
+ template <typename Task, typename... Args>
+ void DoAsync(Args&&... args);
+};
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_MODULE_COMPILER_H_
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index 83cafbd0d8..94dc710b1b 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -3,18 +3,18 @@
// found in the LICENSE file.
#include "src/wasm/module-decoder.h"
-#include "src/wasm/function-body-decoder-impl.h"
#include "src/base/functional.h"
#include "src/base/platform/platform.h"
+#include "src/base/template-utils.h"
#include "src/counters.h"
#include "src/flags.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
#include "src/ostreams.h"
#include "src/v8.h"
-
#include "src/wasm/decoder.h"
+#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/wasm-limits.h"
namespace v8 {
@@ -29,6 +29,18 @@ namespace wasm {
#else
#define TRACE(...)
#endif
+namespace {
+
+const char kNameString[] = "name";
+
+const char kExceptionString[] = "exception";
+
+template <size_t N>
+constexpr size_t num_chars(const char (&)[N]) {
+ return N - 1; // remove null character at end.
+}
+
+} // namespace
const char* SectionName(SectionCode code) {
switch (code) {
@@ -57,7 +69,9 @@ const char* SectionName(SectionCode code) {
case kDataSectionCode:
return "Data";
case kNameSectionCode:
- return "Name";
+ return kNameString;
+ case kExceptionSectionCode:
+ return kExceptionString;
default:
return "<unknown>";
}
@@ -65,9 +79,6 @@ const char* SectionName(SectionCode code) {
namespace {
-const char* kNameString = "name";
-const size_t kNameStringLength = 4;
-
ValueType TypeOf(const WasmModule* module, const WasmInitExpr& expr) {
switch (expr.kind) {
case WasmInitExpr::kNone:
@@ -86,29 +97,28 @@ ValueType TypeOf(const WasmModule* module, const WasmInitExpr& expr) {
return kWasmF64;
default:
UNREACHABLE();
- return kWasmStmt;
}
}
// Reads a length-prefixed string, checking that it is within bounds. Returns
// the offset of the string, and the length as an out parameter.
-uint32_t consume_string(Decoder& decoder, uint32_t* length, bool validate_utf8,
- const char* name) {
- *length = decoder.consume_u32v("string length");
+WireBytesRef consume_string(Decoder& decoder, bool validate_utf8,
+ const char* name) {
+ uint32_t length = decoder.consume_u32v("string length");
uint32_t offset = decoder.pc_offset();
const byte* string_start = decoder.pc();
// Consume bytes before validation to guarantee that the string is not oob.
- if (*length > 0) {
- decoder.consume_bytes(*length, name);
+ if (length > 0) {
+ decoder.consume_bytes(length, name);
if (decoder.ok() && validate_utf8 &&
- !unibrow::Utf8::Validate(string_start, *length)) {
+ !unibrow::Utf8::ValidateEncoding(string_start, length)) {
decoder.errorf(string_start, "%s: no valid UTF-8 string", name);
}
}
- return offset;
+ return {offset, decoder.failed() ? 0 : length};
}
-// An iterator over the sections in a WASM binary module.
+// An iterator over the sections in a wasm binary module.
// Automatically skips all unknown sections.
class WasmSectionIterator {
public:
@@ -190,25 +200,29 @@ class WasmSectionIterator {
if (section_code == kUnknownSectionCode) {
// Check for the known "name" section.
- uint32_t string_length;
- uint32_t string_offset =
- wasm::consume_string(decoder_, &string_length, true, "section name");
+ WireBytesRef string =
+ wasm::consume_string(decoder_, true, "section name");
if (decoder_.failed() || decoder_.pc() > section_end_) {
section_code_ = kUnknownSectionCode;
return;
}
const byte* section_name_start =
- decoder_.start() + decoder_.GetBufferRelativeOffset(string_offset);
+ decoder_.start() + decoder_.GetBufferRelativeOffset(string.offset());
payload_start_ = decoder_.pc();
TRACE(" +%d section name : \"%.*s\"\n",
static_cast<int>(section_name_start - decoder_.start()),
- string_length < 20 ? string_length : 20, section_name_start);
+ string.length() < 20 ? string.length() : 20, section_name_start);
- if (string_length == kNameStringLength &&
+ if (string.length() == num_chars(kNameString) &&
strncmp(reinterpret_cast<const char*>(section_name_start),
- kNameString, kNameStringLength) == 0) {
+ kNameString, num_chars(kNameString)) == 0) {
section_code = kNameSectionCode;
+ } else if (FLAG_experimental_wasm_eh &&
+ string.length() == num_chars(kExceptionString) &&
+ strncmp(reinterpret_cast<const char*>(section_name_start),
+ kExceptionString, num_chars(kExceptionString)) == 0) {
+ section_code = kExceptionSectionCode;
}
} else if (!IsValidSectionCode(section_code)) {
decoder_.errorf(decoder_.pc(), "unknown section code #0x%02x",
@@ -274,7 +288,7 @@ class ModuleDecoder : public Decoder {
void StartDecoding(Isolate* isolate) {
CHECK_NULL(module_);
module_.reset(new WasmModule(
- std::unique_ptr<Zone>(new Zone(isolate->allocator(), "signatures"))));
+ base::make_unique<Zone>(isolate->allocator(), "signatures")));
module_->min_mem_pages = 0;
module_->max_mem_pages = 0;
module_->mem_export = false;
@@ -361,6 +375,9 @@ class ModuleDecoder : public Decoder {
case kNameSectionCode:
DecodeNameSection();
break;
+ case kExceptionSectionCode:
+ DecodeExceptionSection();
+ break;
default:
errorf(pc(), "unexpected section: %s", SectionName(section_code));
return;
@@ -395,19 +412,15 @@ class ModuleDecoder : public Decoder {
static_cast<int>(pc_ - start_));
module_->import_table.push_back({
- 0, // module_name_length
- 0, // module_name_offset
- 0, // field_name_offset
- 0, // field_name_length
+ {0, 0}, // module_name
+ {0, 0}, // field_name
kExternalFunction, // kind
0 // index
});
WasmImport* import = &module_->import_table.back();
const byte* pos = pc_;
- import->module_name_offset =
- consume_string(&import->module_name_length, true, "module name");
- import->field_name_offset =
- consume_string(&import->field_name_length, true, "field name");
+ import->module_name = consume_string(true, "module name");
+ import->field_name = consume_string(true, "field name");
import->kind = static_cast<WasmExternalKind>(consume_u8("import kind"));
switch (import->kind) {
case kExternalFunction: {
@@ -417,10 +430,8 @@ class ModuleDecoder : public Decoder {
module_->functions.push_back({nullptr, // sig
import->index, // func_index
0, // sig_index
- 0, // name_offset
- 0, // name_length
- 0, // code_start_offset
- 0, // code_end_offset
+ {0, 0}, // name_offset
+ {0, 0}, // code
true, // imported
false}); // exported
WasmFunction* function = &module_->functions.back();
@@ -433,11 +444,10 @@ class ModuleDecoder : public Decoder {
if (!AddTable(module_.get())) break;
import->index =
static_cast<uint32_t>(module_->function_tables.size());
- module_->function_tables.push_back({0, 0, false,
- std::vector<int32_t>(), true,
- false, SignatureMap()});
- expect_u8("element type", kWasmAnyFunctionTypeForm);
+ module_->function_tables.emplace_back();
WasmIndirectFunctionTable* table = &module_->function_tables.back();
+ table->imported = true;
+ expect_u8("element type", kWasmAnyFunctionTypeForm);
consume_resizable_limits("element count", "elements",
FLAG_wasm_max_table_size, &table->min_size,
&table->has_max, FLAG_wasm_max_table_size,
@@ -483,10 +493,8 @@ class ModuleDecoder : public Decoder {
module_->functions.push_back({nullptr, // sig
func_index, // func_index
0, // sig_index
- 0, // name_offset
- 0, // name_length
- 0, // code_start_offset
- 0, // code_end_offset
+ {0, 0}, // name
+ {0, 0}, // code
false, // imported
false}); // exported
WasmFunction* function = &module_->functions.back();
@@ -499,8 +507,7 @@ class ModuleDecoder : public Decoder {
for (uint32_t i = 0; ok() && i < table_count; i++) {
if (!AddTable(module_.get())) break;
- module_->function_tables.push_back(
- {0, 0, false, std::vector<int32_t>(), false, false, SignatureMap()});
+ module_->function_tables.emplace_back();
WasmIndirectFunctionTable* table = &module_->function_tables.back();
expect_u8("table type", kWasmAnyFunctionTypeForm);
consume_resizable_limits("table elements", "elements",
@@ -545,14 +552,13 @@ class ModuleDecoder : public Decoder {
static_cast<int>(pc_ - start_));
module_->export_table.push_back({
- 0, // name_length
- 0, // name_offset
+ {0, 0}, // name
kExternalFunction, // kind
0 // index
});
WasmExport* exp = &module_->export_table.back();
- exp->name_offset = consume_string(&exp->name_length, true, "field name");
+ exp->name = consume_string(true, "field name");
const byte* pos = pc();
exp->kind = static_cast<WasmExternalKind>(consume_u8("export kind"));
@@ -602,12 +608,12 @@ class ModuleDecoder : public Decoder {
auto cmp_less = [this](const WasmExport& a, const WasmExport& b) {
// Return true if a < b.
- if (a.name_length != b.name_length) {
- return a.name_length < b.name_length;
+ if (a.name.length() != b.name.length()) {
+ return a.name.length() < b.name.length();
}
- const byte* left = start() + GetBufferRelativeOffset(a.name_offset);
- const byte* right = start() + GetBufferRelativeOffset(b.name_offset);
- return memcmp(left, right, a.name_length) < 0;
+ const byte* left = start() + GetBufferRelativeOffset(a.name.offset());
+ const byte* right = start() + GetBufferRelativeOffset(b.name.offset());
+ return memcmp(left, right, a.name.length()) < 0;
};
std::stable_sort(sorted_exports.begin(), sorted_exports.end(), cmp_less);
@@ -616,9 +622,10 @@ class ModuleDecoder : public Decoder {
for (auto end = sorted_exports.end(); it != end; last = &*it++) {
DCHECK(!cmp_less(*it, *last)); // Vector must be sorted.
if (!cmp_less(*last, *it)) {
- const byte* pc = start() + GetBufferRelativeOffset(it->name_offset);
- errorf(pc, "Duplicate export name '%.*s' for functions %d and %d",
- it->name_length, pc, last->index, it->index);
+ const byte* pc = start() + GetBufferRelativeOffset(it->name.offset());
+ errorf(pc, "Duplicate export name '%.*s' for %s %d and %s %d",
+ it->name.length(), pc, ExternalKindName(last->kind),
+ last->index, ExternalKindName(it->kind), it->index);
break;
}
}
@@ -676,15 +683,14 @@ class ModuleDecoder : public Decoder {
errorf(pos, "function body count %u mismatch (%u expected)",
functions_count, module_->num_declared_functions);
}
- for (uint32_t i = 0; ok() && i < functions_count; ++i) {
- WasmFunction* function =
- &module_->functions[i + module_->num_imported_functions];
+ for (uint32_t i = 0; i < functions_count; ++i) {
uint32_t size = consume_u32v("body size");
uint32_t offset = pc_offset();
consume_bytes(size, "function body");
if (failed()) break;
- function->code_start_offset = offset;
- function->code_end_offset = offset + size;
+ WasmFunction* function =
+ &module_->functions[i + module_->num_imported_functions];
+ function->code = {offset, size};
if (verify_functions) {
ModuleBytesEnv module_env(module_.get(), nullptr,
ModuleWireBytes(start_, end_));
@@ -708,8 +714,7 @@ class ModuleDecoder : public Decoder {
static_cast<int>(pc_ - start_));
module_->data_segments.push_back({
WasmInitExpr(), // dest_addr
- 0, // source_offset
- 0 // source_size
+ {0, 0} // source
});
WasmDataSegment* segment = &module_->data_segments.back();
DecodeDataSegmentInModule(module_.get(), segment);
@@ -731,34 +736,50 @@ class ModuleDecoder : public Decoder {
// Decode function names, ignore the rest.
// Local names will be decoded when needed.
- if (name_type == NameSectionType::kFunction) {
- uint32_t functions_count = inner.consume_u32v("functions count");
-
- for (; inner.ok() && functions_count > 0; --functions_count) {
- uint32_t function_index = inner.consume_u32v("function index");
- uint32_t name_length = 0;
- uint32_t name_offset =
- wasm::consume_string(inner, &name_length, false, "function name");
-
- // Be lenient with errors in the name section: Ignore illegal
- // or out-of-order indexes and non-UTF8 names. You can even assign
- // to the same function multiple times (last valid one wins).
- if (inner.ok() && function_index < module_->functions.size() &&
- unibrow::Utf8::Validate(
- inner.start() + inner.GetBufferRelativeOffset(name_offset),
- name_length)) {
- module_->functions[function_index].name_offset = name_offset;
- module_->functions[function_index].name_length = name_length;
+ switch (name_type) {
+ case NameSectionType::kModule: {
+ WireBytesRef name = wasm::consume_string(inner, false, "module name");
+ if (inner.ok() && validate_utf8(&inner, name)) module_->name = name;
+ break;
+ }
+ case NameSectionType::kFunction: {
+ uint32_t functions_count = inner.consume_u32v("functions count");
+
+ for (; inner.ok() && functions_count > 0; --functions_count) {
+ uint32_t function_index = inner.consume_u32v("function index");
+ WireBytesRef name =
+ wasm::consume_string(inner, false, "function name");
+
+ // Be lenient with errors in the name section: Ignore illegal
+ // or out-of-order indexes and non-UTF8 names. You can even assign
+ // to the same function multiple times (last valid one wins).
+ if (inner.ok() && function_index < module_->functions.size() &&
+ validate_utf8(&inner, name)) {
+ module_->functions[function_index].name = name;
+ }
}
+ break;
}
- } else {
- inner.consume_bytes(name_payload_len, "name subsection payload");
+ default:
+ inner.consume_bytes(name_payload_len, "name subsection payload");
+ break;
}
}
// Skip the whole names section in the outer decoder.
consume_bytes(static_cast<uint32_t>(end_ - start_), nullptr);
}
+ void DecodeExceptionSection() {
+ uint32_t exception_count =
+ consume_count("exception count", kV8MaxWasmExceptions);
+ for (uint32_t i = 0; ok() && i < exception_count; ++i) {
+ TRACE("DecodeExceptionSignature[%d] module+%d\n", i,
+ static_cast<int>(pc_ - start_));
+ module_->exceptions.emplace_back(
+ consume_exception_sig(module_->signature_zone.get()));
+ }
+ }
+
ModuleResult FinishDecoding(bool verify_functions = true) {
if (ok()) {
CalculateGlobalOffsets(module_.get());
@@ -809,11 +830,9 @@ class ModuleDecoder : public Decoder {
FunctionResult DecodeSingleFunction(Zone* zone, ModuleBytesEnv* module_env,
std::unique_ptr<WasmFunction> function) {
pc_ = start_;
- function->sig = consume_sig(zone); // read signature
- function->name_offset = 0; // ---- name
- function->name_length = 0; // ---- name length
- function->code_start_offset = off(pc_); // ---- code start
- function->code_end_offset = off(end_); // ---- code end
+ function->sig = consume_sig(zone);
+ function->name = {0, 0};
+ function->code = {off(pc_), static_cast<uint32_t>(end_ - pc_)};
if (ok())
VerifyFunctionBody(zone->allocator(), 0, module_env, function.get());
@@ -916,17 +935,18 @@ class ModuleDecoder : public Decoder {
const byte* start = pc_;
expect_u8("linear memory index", 0);
segment->dest_addr = consume_init_expr(module, kWasmI32);
- segment->source_size = consume_u32v("source size");
- segment->source_offset = pc_offset();
+ uint32_t source_length = consume_u32v("source size");
+ uint32_t source_offset = pc_offset();
+ segment->source = {source_offset, source_length};
// Validate the data is in the decoder buffer.
uint32_t limit = static_cast<uint32_t>(end_ - start_);
- if (!IsWithinLimit(limit, GetBufferRelativeOffset(segment->source_offset),
- segment->source_size)) {
+ if (!IsWithinLimit(limit, GetBufferRelativeOffset(segment->source.offset()),
+ segment->source.length())) {
error(start, "segment out of bounds of the section");
}
- consume_bytes(segment->source_size, "segment data");
+ consume_bytes(segment->source.length(), "segment data");
}
// Calculate individual global offsets and total size of globals table.
@@ -953,18 +973,19 @@ class ModuleDecoder : public Decoder {
menv->wire_bytes.GetNameOrNull(function));
if (FLAG_trace_wasm_decoder || FLAG_trace_wasm_decode_time) {
OFStream os(stdout);
- os << "Verifying WASM function " << func_name << std::endl;
+ os << "Verifying wasm function " << func_name << std::endl;
}
FunctionBody body = {
- function->sig, start_,
- start_ + GetBufferRelativeOffset(function->code_start_offset),
- start_ + GetBufferRelativeOffset(function->code_end_offset)};
+ function->sig, function->code.offset(),
+ start_ + GetBufferRelativeOffset(function->code.offset()),
+ start_ + GetBufferRelativeOffset(function->code.end_offset())};
DecodeResult result = VerifyWasmCode(
allocator, menv == nullptr ? nullptr : menv->module_env.module, body);
if (result.failed()) {
// Wrap the error message from the function decoder.
- std::ostringstream str;
- str << "in function " << func_name << ": " << result.error_msg();
+ std::ostringstream wrapped;
+ wrapped << "in function " << func_name << ": " << result.error_msg();
+ result.error(result.error_offset(), wrapped.str());
// Set error code and location, if this is the first error.
if (intermediate_result_.ok()) {
@@ -973,9 +994,14 @@ class ModuleDecoder : public Decoder {
}
}
- uint32_t consume_string(uint32_t* length, bool validate_utf8,
- const char* name) {
- return wasm::consume_string(*this, length, validate_utf8, name);
+ WireBytesRef consume_string(bool validate_utf8, const char* name) {
+ return wasm::consume_string(*this, validate_utf8, name);
+ }
+
+ bool validate_utf8(Decoder* decoder, WireBytesRef string) {
+ return unibrow::Utf8::ValidateEncoding(
+ decoder->start() + decoder->GetBufferRelativeOffset(string.offset()),
+ string.length());
}
uint32_t consume_sig_index(WasmModule* module, FunctionSig** sig) {
@@ -1166,16 +1192,10 @@ class ModuleDecoder : public Decoder {
case kLocalF64:
return kWasmF64;
default:
- if (origin_ != kAsmJsOrigin && FLAG_wasm_simd_prototype) {
+ if (origin_ != kAsmJsOrigin && FLAG_experimental_wasm_simd) {
switch (t) {
case kLocalS128:
return kWasmS128;
- case kLocalS1x4:
- return kWasmS1x4;
- case kLocalS1x8:
- return kWasmS1x8;
- case kLocalS1x16:
- return kWasmS1x16;
default:
break;
}
@@ -1185,9 +1205,20 @@ class ModuleDecoder : public Decoder {
}
}
- // Parses a type entry, which is currently limited to functions only.
FunctionSig* consume_sig(Zone* zone) {
- if (!expect_u8("type form", kWasmFunctionTypeForm)) return nullptr;
+ constexpr bool has_return_values = true;
+ return consume_sig_internal(zone, has_return_values);
+ }
+
+ WasmExceptionSig* consume_exception_sig(Zone* zone) {
+ constexpr bool has_return_values = true;
+ return consume_sig_internal(zone, !has_return_values);
+ }
+
+ private:
+ FunctionSig* consume_sig_internal(Zone* zone, bool has_return_values) {
+ if (has_return_values && !expect_u8("type form", kWasmFunctionTypeForm))
+ return nullptr;
// parse parameter types
uint32_t param_count =
consume_count("param count", kV8MaxWasmFunctionParams);
@@ -1197,17 +1228,19 @@ class ModuleDecoder : public Decoder {
ValueType param = consume_value_type();
params.push_back(param);
}
-
- // parse return types
- const size_t max_return_count = FLAG_wasm_mv_prototype
- ? kV8MaxWasmFunctionMultiReturns
- : kV8MaxWasmFunctionReturns;
- uint32_t return_count = consume_count("return count", max_return_count);
- if (failed()) return nullptr;
std::vector<ValueType> returns;
- for (uint32_t i = 0; ok() && i < return_count; ++i) {
- ValueType ret = consume_value_type();
- returns.push_back(ret);
+ uint32_t return_count = 0;
+ if (has_return_values) {
+ // parse return types
+ const size_t max_return_count = FLAG_experimental_wasm_mv
+ ? kV8MaxWasmFunctionMultiReturns
+ : kV8MaxWasmFunctionReturns;
+ return_count = consume_count("return count", max_return_count);
+ if (failed()) return nullptr;
+ for (uint32_t i = 0; ok() && i < return_count; ++i) {
+ ValueType ret = consume_value_type();
+ returns.push_back(ret);
+ }
}
if (failed()) return nullptr;
@@ -1222,23 +1255,22 @@ class ModuleDecoder : public Decoder {
}
};
-ModuleResult DecodeWasmModuleInternal(Isolate* isolate,
- const byte* module_start,
- const byte* module_end,
- bool verify_functions,
- ModuleOrigin origin, bool is_sync) {
+ModuleResult DecodeWasmModule(Isolate* isolate, const byte* module_start,
+ const byte* module_end, bool verify_functions,
+ ModuleOrigin origin, Counters* counters) {
+ auto counter = origin == kWasmOrigin
+ ? counters->wasm_decode_wasm_module_time()
+ : counters->wasm_decode_asm_module_time();
+ TimedHistogramScope wasm_decode_module_time_scope(counter);
size_t size = module_end - module_start;
if (module_start > module_end) return ModuleResult::Error("start > end");
if (size >= kV8MaxWasmModuleSize)
return ModuleResult::Error("size > maximum module size: %zu", size);
// TODO(bradnelson): Improve histogram handling of size_t.
- if (is_sync) {
- // TODO(karlschimpf): Make this work when asynchronous.
- // https://bugs.chromium.org/p/v8/issues/detail?id=6361
- (IsWasm(origin) ? isolate->counters()->wasm_wasm_module_size_bytes()
- : isolate->counters()->wasm_asm_module_size_bytes())
- ->AddSample(static_cast<int>(size));
- }
+ auto size_counter = origin == kWasmOrigin
+ ? counters->wasm_wasm_module_size_bytes()
+ : counters->wasm_asm_module_size_bytes();
+ size_counter->AddSample(static_cast<int>(size));
// Signatures are stored in zone memory, which have the same lifetime
// as the {module}.
ModuleDecoder decoder(module_start, module_end, origin);
@@ -1247,34 +1279,32 @@ ModuleResult DecodeWasmModuleInternal(Isolate* isolate,
// TODO(titzer): this isn't accurate, since it doesn't count the data
// allocated on the C++ heap.
// https://bugs.chromium.org/p/chromium/issues/detail?id=657320
- if (is_sync && result.ok()) {
- // TODO(karlschimpf): Make this work when asynchronous.
- // https://bugs.chromium.org/p/v8/issues/detail?id=6361
- (IsWasm(origin)
- ? isolate->counters()->wasm_decode_wasm_module_peak_memory_bytes()
- : isolate->counters()->wasm_decode_asm_module_peak_memory_bytes())
- ->AddSample(
- static_cast<int>(result.val->signature_zone->allocation_size()));
+ if (result.ok()) {
+ auto peak_counter =
+ origin == kWasmOrigin
+ ? counters->wasm_decode_wasm_module_peak_memory_bytes()
+ : counters->wasm_decode_asm_module_peak_memory_bytes();
+ peak_counter->AddSample(
+ static_cast<int>(result.val->signature_zone->allocation_size()));
}
return result;
}
} // namespace
-ModuleResult DecodeWasmModule(Isolate* isolate, const byte* module_start,
- const byte* module_end, bool verify_functions,
- ModuleOrigin origin, bool is_sync) {
- if (is_sync) {
- // TODO(karlschimpf): Make this work when asynchronous.
- // https://bugs.chromium.org/p/v8/issues/detail?id=6361
- HistogramTimerScope wasm_decode_module_time_scope(
- IsWasm(origin) ? isolate->counters()->wasm_decode_wasm_module_time()
- : isolate->counters()->wasm_decode_asm_module_time());
- return DecodeWasmModuleInternal(isolate, module_start, module_end,
- verify_functions, origin, true);
- }
- return DecodeWasmModuleInternal(isolate, module_start, module_end,
- verify_functions, origin, false);
+ModuleResult SyncDecodeWasmModule(Isolate* isolate, const byte* module_start,
+ const byte* module_end, bool verify_functions,
+ ModuleOrigin origin) {
+ return DecodeWasmModule(isolate, module_start, module_end, verify_functions,
+ origin, isolate->counters());
+}
+
+ModuleResult AsyncDecodeWasmModule(
+ Isolate* isolate, const byte* module_start, const byte* module_end,
+ bool verify_functions, ModuleOrigin origin,
+ const std::shared_ptr<Counters> async_counters) {
+ return DecodeWasmModule(isolate, module_start, module_end, verify_functions,
+ origin, async_counters.get());
}
FunctionSig* DecodeWasmSignatureForTesting(Zone* zone, const byte* start,
@@ -1291,51 +1321,44 @@ WasmInitExpr DecodeWasmInitExprForTesting(const byte* start, const byte* end) {
namespace {
-FunctionResult DecodeWasmFunctionInternal(Isolate* isolate, Zone* zone,
- ModuleBytesEnv* module_env,
- const byte* function_start,
- const byte* function_end,
- bool is_sync) {
+FunctionResult DecodeWasmFunction(Isolate* isolate, Zone* zone,
+ ModuleBytesEnv* module_env,
+ const byte* function_start,
+ const byte* function_end,
+ Counters* counters) {
size_t size = function_end - function_start;
+ bool is_wasm = module_env->module_env.is_wasm();
+ auto size_histogram = is_wasm ? counters->wasm_wasm_function_size_bytes()
+ : counters->wasm_asm_function_size_bytes();
+ size_histogram->AddSample(static_cast<int>(size));
+ auto time_counter = is_wasm ? counters->wasm_decode_wasm_function_time()
+ : counters->wasm_decode_asm_function_time();
+ TimedHistogramScope wasm_decode_function_time_scope(time_counter);
if (function_start > function_end)
return FunctionResult::Error("start > end");
if (size > kV8MaxWasmFunctionSize)
return FunctionResult::Error("size > maximum function size: %zu", size);
- if (is_sync) {
- // TODO(karlschimpf): Make this work when asynchronous.
- // https://bugs.chromium.org/p/v8/issues/detail?id=6361
- bool is_wasm = module_env->module_env.is_wasm();
- (is_wasm ? isolate->counters()->wasm_wasm_function_size_bytes()
- : isolate->counters()->wasm_asm_function_size_bytes())
- ->AddSample(static_cast<int>(size));
- }
ModuleDecoder decoder(function_start, function_end, kWasmOrigin);
- return decoder.DecodeSingleFunction(
- zone, module_env, std::unique_ptr<WasmFunction>(new WasmFunction()));
+ return decoder.DecodeSingleFunction(zone, module_env,
+ base::make_unique<WasmFunction>());
}
} // namespace
-FunctionResult DecodeWasmFunction(Isolate* isolate, Zone* zone,
- ModuleBytesEnv* module_env,
- const byte* function_start,
- const byte* function_end, bool is_sync) {
- if (is_sync) {
- // TODO(karlschimpf): Make this work when asynchronous.
- // https://bugs.chromium.org/p/v8/issues/detail?id=6361
- size_t size = function_end - function_start;
- bool is_wasm = module_env->module_env.is_wasm();
- (is_wasm ? isolate->counters()->wasm_wasm_function_size_bytes()
- : isolate->counters()->wasm_asm_function_size_bytes())
- ->AddSample(static_cast<int>(size));
- HistogramTimerScope wasm_decode_function_time_scope(
- is_wasm ? isolate->counters()->wasm_decode_wasm_function_time()
- : isolate->counters()->wasm_decode_asm_function_time());
- return DecodeWasmFunctionInternal(isolate, zone, module_env, function_start,
- function_end, true);
- }
- return DecodeWasmFunctionInternal(isolate, zone, module_env, function_start,
- function_end, false);
+FunctionResult SyncDecodeWasmFunction(Isolate* isolate, Zone* zone,
+ ModuleBytesEnv* module_env,
+ const byte* function_start,
+ const byte* function_end) {
+ return DecodeWasmFunction(isolate, zone, module_env, function_start,
+ function_end, isolate->counters());
+}
+
+FunctionResult AsyncDecodeWasmFunction(
+ Isolate* isolate, Zone* zone, ModuleBytesEnv* module_env,
+ const byte* function_start, const byte* function_end,
+ std::shared_ptr<Counters> async_counters) {
+ return DecodeWasmFunction(isolate, zone, module_env, function_start,
+ function_end, async_counters.get());
}
AsmJsOffsetsResult DecodeAsmJsOffsets(const byte* tables_start,
@@ -1411,13 +1434,68 @@ std::vector<CustomSectionOffset> DecodeCustomSections(const byte* start,
uint32_t payload_offset = decoder.pc_offset();
uint32_t payload_length = section_length - (payload_offset - section_start);
decoder.consume_bytes(payload_length);
- result.push_back({section_start, name_offset, name_length, payload_offset,
- payload_length, section_length});
+ result.push_back({{section_start, section_length},
+ {name_offset, name_length},
+ {payload_offset, payload_length}});
}
return result;
}
+void DecodeLocalNames(const byte* module_start, const byte* module_end,
+ LocalNames* result) {
+ DCHECK_NOT_NULL(result);
+ DCHECK(result->names.empty());
+
+ static constexpr int kModuleHeaderSize = 8;
+ Decoder decoder(module_start, module_end);
+ decoder.consume_bytes(kModuleHeaderSize, "module header");
+
+ WasmSectionIterator section_iter(decoder);
+
+ while (decoder.ok() && section_iter.more() &&
+ section_iter.section_code() != kNameSectionCode) {
+ section_iter.advance(true);
+ }
+ if (!section_iter.more()) return;
+
+ // Reset the decoder to not read beyond the name section end.
+ decoder.Reset(section_iter.payload(), decoder.pc_offset());
+
+ while (decoder.ok() && decoder.more()) {
+ uint8_t name_type = decoder.consume_u8("name type");
+ if (name_type & 0x80) break; // no varuint7
+
+ uint32_t name_payload_len = decoder.consume_u32v("name payload length");
+ if (!decoder.checkAvailable(name_payload_len)) break;
+
+ if (name_type != NameSectionType::kLocal) {
+ decoder.consume_bytes(name_payload_len, "name subsection payload");
+ continue;
+ }
+
+ uint32_t local_names_count = decoder.consume_u32v("local names count");
+ for (uint32_t i = 0; i < local_names_count; ++i) {
+ uint32_t func_index = decoder.consume_u32v("function index");
+ if (func_index > kMaxInt) continue;
+ result->names.emplace_back(static_cast<int>(func_index));
+ LocalNamesPerFunction& func_names = result->names.back();
+ result->max_function_index =
+ std::max(result->max_function_index, func_names.function_index);
+ uint32_t num_names = decoder.consume_u32v("namings count");
+ for (uint32_t k = 0; k < num_names; ++k) {
+ uint32_t local_index = decoder.consume_u32v("local index");
+ WireBytesRef name = wasm::consume_string(decoder, true, "local name");
+ if (!decoder.ok()) break;
+ if (local_index > kMaxInt) continue;
+ func_names.max_local_index =
+ std::max(func_names.max_local_index, static_cast<int>(local_index));
+ func_names.names.emplace_back(static_cast<int>(local_index), name);
+ }
+ }
+ }
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
index 91169d8eca..094a39dbe7 100644
--- a/deps/v8/src/wasm/module-decoder.h
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -21,25 +21,26 @@ const uint8_t kWasmAnyFunctionTypeForm = 0x70;
const uint8_t kResizableMaximumFlag = 1;
enum SectionCode : int8_t {
- kUnknownSectionCode = 0, // code for unknown sections
- kTypeSectionCode = 1, // Function signature declarations
- kImportSectionCode = 2, // Import declarations
- kFunctionSectionCode = 3, // Function declarations
- kTableSectionCode = 4, // Indirect function table and other tables
- kMemorySectionCode = 5, // Memory attributes
- kGlobalSectionCode = 6, // Global declarations
- kExportSectionCode = 7, // Exports
- kStartSectionCode = 8, // Start function declaration
- kElementSectionCode = 9, // Elements section
- kCodeSectionCode = 10, // Function code
- kDataSectionCode = 11, // Data segments
- kNameSectionCode = 12, // Name section (encoded as a string)
+ kUnknownSectionCode = 0, // code for unknown sections
+ kTypeSectionCode = 1, // Function signature declarations
+ kImportSectionCode = 2, // Import declarations
+ kFunctionSectionCode = 3, // Function declarations
+ kTableSectionCode = 4, // Indirect function table and other tables
+ kMemorySectionCode = 5, // Memory attributes
+ kGlobalSectionCode = 6, // Global declarations
+ kExportSectionCode = 7, // Exports
+ kStartSectionCode = 8, // Start function declaration
+ kElementSectionCode = 9, // Elements section
+ kCodeSectionCode = 10, // Function code
+ kDataSectionCode = 11, // Data segments
+ kNameSectionCode = 12, // Name section (encoded as a string)
+ kExceptionSectionCode = 13, // Exception section (encoded as a string)
// Helper values
kFirstSectionInModule = kTypeSectionCode,
};
-enum NameSectionType : uint8_t { kFunction = 1, kLocal = 2 };
+enum NameSectionType : uint8_t { kModule = 0, kFunction = 1, kLocal = 2 };
inline bool IsValidSectionCode(uint8_t byte) {
return kTypeSectionCode <= byte && byte <= kDataSectionCode;
@@ -51,6 +52,7 @@ typedef Result<std::unique_ptr<WasmModule>> ModuleResult;
typedef Result<std::unique_ptr<WasmFunction>> FunctionResult;
typedef std::vector<std::pair<int, int>> FunctionOffsets;
typedef Result<FunctionOffsets> FunctionOffsetsResult;
+
struct AsmJsOffsetEntry {
int byte_offset;
int source_position_call;
@@ -59,10 +61,35 @@ struct AsmJsOffsetEntry {
typedef std::vector<std::vector<AsmJsOffsetEntry>> AsmJsOffsets;
typedef Result<AsmJsOffsets> AsmJsOffsetsResult;
-// Decodes the bytes of a WASM module between {module_start} and {module_end}.
-V8_EXPORT_PRIVATE ModuleResult DecodeWasmModule(
+struct LocalName {
+ int local_index;
+ WireBytesRef name;
+ LocalName(int local_index, WireBytesRef name)
+ : local_index(local_index), name(name) {}
+};
+struct LocalNamesPerFunction {
+ int function_index;
+ int max_local_index = -1;
+ std::vector<LocalName> names;
+ explicit LocalNamesPerFunction(int function_index)
+ : function_index(function_index) {}
+};
+struct LocalNames {
+ int max_function_index = -1;
+ std::vector<LocalNamesPerFunction> names;
+};
+
+// Decodes the bytes of a wasm module between {module_start} and {module_end}.
+V8_EXPORT_PRIVATE ModuleResult SyncDecodeWasmModule(Isolate* isolate,
+ const byte* module_start,
+ const byte* module_end,
+ bool verify_functions,
+ ModuleOrigin origin);
+
+V8_EXPORT_PRIVATE ModuleResult AsyncDecodeWasmModule(
Isolate* isolate, const byte* module_start, const byte* module_end,
- bool verify_functions, ModuleOrigin origin, bool is_sync = true);
+ bool verify_functions, ModuleOrigin origin,
+ const std::shared_ptr<Counters> async_counters);
// Exposed for testing. Decodes a single function signature, allocating it
// in the given zone. Returns {nullptr} upon failure.
@@ -70,22 +97,24 @@ V8_EXPORT_PRIVATE FunctionSig* DecodeWasmSignatureForTesting(Zone* zone,
const byte* start,
const byte* end);
-// Decodes the bytes of a WASM function between
+// Decodes the bytes of a wasm function between
// {function_start} and {function_end}.
-V8_EXPORT_PRIVATE FunctionResult DecodeWasmFunction(
- Isolate* isolate, Zone* zone, ModuleBytesEnv* env,
- const byte* function_start, const byte* function_end, bool is_sync = true);
+V8_EXPORT_PRIVATE FunctionResult
+SyncDecodeWasmFunction(Isolate* isolate, Zone* zone, ModuleBytesEnv* env,
+ const byte* function_start, const byte* function_end);
+
+V8_EXPORT_PRIVATE FunctionResult
+AsyncDecodeWasmFunction(Isolate* isolate, Zone* zone, ModuleBytesEnv* env,
+ const byte* function_start, const byte* function_end,
+ const std::shared_ptr<Counters> async_counters);
V8_EXPORT_PRIVATE WasmInitExpr DecodeWasmInitExprForTesting(const byte* start,
const byte* end);
struct CustomSectionOffset {
- uint32_t section_start;
- uint32_t name_offset;
- uint32_t name_length;
- uint32_t payload_offset;
- uint32_t payload_length;
- uint32_t section_length;
+ WireBytesRef section;
+ WireBytesRef name;
+ WireBytesRef payload;
};
V8_EXPORT_PRIVATE std::vector<CustomSectionOffset> DecodeCustomSections(
@@ -99,6 +128,13 @@ V8_EXPORT_PRIVATE std::vector<CustomSectionOffset> DecodeCustomSections(
AsmJsOffsetsResult DecodeAsmJsOffsets(const byte* module_start,
const byte* module_end);
+// Decode the local names assignment from the name section.
+// Stores the result in the given {LocalNames} structure. The result will be
+// empty if no name section is present. On encountering an error in the name
+// section, returns all information decoded up to the first error.
+void DecodeLocalNames(const byte* module_start, const byte* module_end,
+ LocalNames* result);
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/signature-map.cc b/deps/v8/src/wasm/signature-map.cc
index e7f8b2fc94..e7ee4eba4e 100644
--- a/deps/v8/src/wasm/signature-map.cc
+++ b/deps/v8/src/wasm/signature-map.cc
@@ -8,7 +8,10 @@ namespace v8 {
namespace internal {
namespace wasm {
+SignatureMap::SignatureMap() : mutex_(new base::Mutex()) {}
+
uint32_t SignatureMap::FindOrInsert(FunctionSig* sig) {
+ base::LockGuard<base::Mutex> guard(mutex_.get());
auto pos = map_.find(sig);
if (pos != map_.end()) {
return pos->second;
@@ -20,6 +23,7 @@ uint32_t SignatureMap::FindOrInsert(FunctionSig* sig) {
}
int32_t SignatureMap::Find(FunctionSig* sig) const {
+ base::LockGuard<base::Mutex> guard(mutex_.get());
auto pos = map_.find(sig);
if (pos != map_.end()) {
return static_cast<int32_t>(pos->second);
diff --git a/deps/v8/src/wasm/signature-map.h b/deps/v8/src/wasm/signature-map.h
index 3a7ed0a047..0b7ddfc58c 100644
--- a/deps/v8/src/wasm/signature-map.h
+++ b/deps/v8/src/wasm/signature-map.h
@@ -19,6 +19,12 @@ namespace wasm {
// same index.
class V8_EXPORT_PRIVATE SignatureMap {
public:
+ // Allow default construction and move construction (because we have vectors
+ // of objects containing SignatureMaps), but disallow copy or assign. It's
+ // too easy to get security bugs by accidentally updating a copy of the map.
+ SignatureMap();
+ SignatureMap(SignatureMap&&) = default;
+
// Gets the index for a signature, assigning a new index if necessary.
uint32_t FindOrInsert(FunctionSig* sig);
@@ -31,7 +37,10 @@ class V8_EXPORT_PRIVATE SignatureMap {
bool operator()(FunctionSig* a, FunctionSig* b) const;
};
uint32_t next_ = 0;
+ std::unique_ptr<base::Mutex> mutex_;
std::map<FunctionSig*, uint32_t, CompareFunctionSigs> map_;
+
+ DISALLOW_COPY_AND_ASSIGN(SignatureMap);
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/streaming-decoder.cc b/deps/v8/src/wasm/streaming-decoder.cc
index 2772cd5945..4e9d1a843d 100644
--- a/deps/v8/src/wasm/streaming-decoder.cc
+++ b/deps/v8/src/wasm/streaming-decoder.cc
@@ -4,18 +4,17 @@
#include "src/wasm/streaming-decoder.h"
-#include "src/objects-inl.h"
-
+#include "src/base/template-utils.h"
#include "src/handles.h"
+#include "src/objects-inl.h"
+#include "src/objects/descriptor-array.h"
+#include "src/objects/dictionary.h"
#include "src/wasm/decoder.h"
#include "src/wasm/leb-helper.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-result.h"
-#include "src/objects/descriptor-array.h"
-#include "src/objects/dictionary.h"
-
using namespace v8::internal;
using namespace v8::internal::wasm;
@@ -235,12 +234,12 @@ size_t StreamingDecoder::DecodeVarInt32::ReadBytes(
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeVarInt32::Next(StreamingDecoder* streaming) {
if (streaming->decoder()->failed()) {
- return std::unique_ptr<DecodingState>(nullptr);
+ return nullptr;
}
if (value() > max_value_) {
streaming->decoder()->errorf(buffer(), "size > maximum function size: %zu",
value());
- return std::unique_ptr<DecodingState>(nullptr);
+ return nullptr;
}
return NextWithValue(streaming);
@@ -271,12 +270,12 @@ void StreamingDecoder::DecodeModuleHeader::CheckHeader(Decoder* decoder) {
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeModuleHeader::Next(StreamingDecoder* streaming) {
CheckHeader(streaming->decoder());
- return std::unique_ptr<DecodingState>(new DecodeSectionID());
+ return base::make_unique<DecodeSectionID>();
}
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeSectionID::Next(StreamingDecoder* streaming) {
- return std::unique_ptr<DecodingState>(new DecodeSectionLength(id()));
+ return base::make_unique<DecodeSectionLength>(id());
}
std::unique_ptr<StreamingDecoder::DecodingState>
@@ -287,19 +286,19 @@ StreamingDecoder::DecodeSectionLength::NextWithValue(
Vector<const uint8_t>(buffer(), static_cast<int>(bytes_needed())));
if (value() == 0) {
// There is no payload, we go to the next section immediately.
- return std::unique_ptr<DecodingState>(new DecodeSectionID());
+ return base::make_unique<DecodeSectionID>();
} else if (section_id() == SectionCode::kCodeSectionCode) {
// We reached the code section. All functions of the code section are put
// into the same SectionBuffer.
- return std::unique_ptr<DecodingState>(new DecodeNumberOfFunctions(buf));
+ return base::make_unique<DecodeNumberOfFunctions>(buf);
} else {
- return std::unique_ptr<DecodingState>(new DecodeSectionPayload(buf));
+ return base::make_unique<DecodeSectionPayload>(buf);
}
}
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeSectionPayload::Next(StreamingDecoder* streaming) {
- return std::unique_ptr<DecodingState>(new DecodeSectionID());
+ return base::make_unique<DecodeSectionID>();
}
std::unique_ptr<StreamingDecoder::DecodingState>
@@ -311,16 +310,16 @@ StreamingDecoder::DecodeNumberOfFunctions::NextWithValue(
buffer(), bytes_needed());
} else {
streaming->decoder()->error("Invalid code section length");
- return std::unique_ptr<DecodingState>(new DecodeSectionID());
+ return base::make_unique<DecodeSectionID>();
}
// {value} is the number of functions.
if (value() > 0) {
- return std::unique_ptr<DecodingState>(new DecodeFunctionLength(
+ return base::make_unique<DecodeFunctionLength>(
section_buffer(), section_buffer()->payload_offset() + bytes_needed(),
- value()));
+ value());
} else {
- return std::unique_ptr<DecodingState>(new DecodeSectionID());
+ return base::make_unique<DecodeSectionID>();
}
}
@@ -332,30 +331,30 @@ StreamingDecoder::DecodeFunctionLength::NextWithValue(
memcpy(section_buffer_->bytes() + buffer_offset_, buffer(), bytes_needed());
} else {
streaming->decoder()->error("Invalid code section length");
- return std::unique_ptr<DecodingState>(new DecodeSectionID());
+ return base::make_unique<DecodeSectionID>();
}
// {value} is the length of the function.
if (value() == 0) {
streaming->decoder()->errorf(buffer(), "Invalid function length (0)");
- return std::unique_ptr<DecodingState>(nullptr);
+ return nullptr;
} else if (buffer_offset() + bytes_needed() + value() >
section_buffer()->length()) {
streaming->decoder()->errorf(buffer(), "not enough code section bytes");
- return std::unique_ptr<DecodingState>(nullptr);
+ return nullptr;
}
- return std::unique_ptr<DecodingState>(
- new DecodeFunctionBody(section_buffer(), buffer_offset() + bytes_needed(),
- value(), num_remaining_functions()));
+ return base::make_unique<DecodeFunctionBody>(
+ section_buffer(), buffer_offset() + bytes_needed(), value(),
+ num_remaining_functions());
}
std::unique_ptr<StreamingDecoder::DecodingState>
StreamingDecoder::DecodeFunctionBody::Next(StreamingDecoder* streaming) {
// TODO(ahaas): Start compilation of the function here.
if (num_remaining_functions() != 0) {
- return std::unique_ptr<DecodingState>(new DecodeFunctionLength(
- section_buffer(), buffer_offset() + size(), num_remaining_functions()));
+ return base::make_unique<DecodeFunctionLength>(
+ section_buffer(), buffer_offset() + size(), num_remaining_functions());
} else {
if (buffer_offset() + size() != section_buffer()->length()) {
streaming->decoder()->Reset(
@@ -364,9 +363,9 @@ StreamingDecoder::DecodeFunctionBody::Next(StreamingDecoder* streaming) {
streaming->decoder()->errorf(
section_buffer()->bytes() + buffer_offset() + size(),
"not all code section bytes were used");
- return std::unique_ptr<DecodingState>(nullptr);
+ return nullptr;
}
- return std::unique_ptr<DecodingState>(new DecodeSectionID());
+ return base::make_unique<DecodeSectionID>();
}
}
diff --git a/deps/v8/src/wasm/wasm-code-specialization.cc b/deps/v8/src/wasm/wasm-code-specialization.cc
index 53e3fe699c..c274497fec 100644
--- a/deps/v8/src/wasm/wasm-code-specialization.cc
+++ b/deps/v8/src/wasm/wasm-code-specialization.cc
@@ -45,9 +45,9 @@ class PatchDirectCallsHelper {
FixedArray* deopt_data = code->deoptimization_data();
DCHECK_EQ(2, deopt_data->length());
WasmCompiledModule* comp_mod = instance->compiled_module();
- int func_index = Smi::cast(deopt_data->get(1))->value();
+ int func_index = Smi::ToInt(deopt_data->get(1));
func_bytes = comp_mod->module_bytes()->GetChars() +
- comp_mod->module()->functions[func_index].code_start_offset;
+ comp_mod->module()->functions[func_index].code.offset();
}
SourcePositionTableIterator source_pos_it;
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index f942c92127..e302f04adc 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -40,8 +40,8 @@ Handle<String> PrintFToOneByteString(Isolate* isolate, const char* format,
: isolate->factory()->NewStringFromOneByte(name).ToHandleChecked();
}
-Handle<Object> WasmValToValueObject(Isolate* isolate, WasmVal value) {
- switch (value.type) {
+Handle<Object> WasmValueToValueObject(Isolate* isolate, WasmValue value) {
+ switch (value.type()) {
case kWasmI32:
if (Smi::IsValid(value.to<int32_t>()))
return handle(Smi::FromInt(value.to<int32_t>()), isolate);
@@ -61,6 +61,34 @@ Handle<Object> WasmValToValueObject(Isolate* isolate, WasmVal value) {
}
}
+MaybeHandle<String> GetLocalName(Isolate* isolate,
+ Handle<WasmDebugInfo> debug_info,
+ int func_index, int local_index) {
+ DCHECK_LE(0, func_index);
+ DCHECK_LE(0, local_index);
+ if (!debug_info->has_locals_names()) {
+ Handle<WasmCompiledModule> compiled_module(
+ debug_info->wasm_instance()->compiled_module(), isolate);
+ Handle<FixedArray> locals_names =
+ wasm::DecodeLocalNames(isolate, compiled_module);
+ debug_info->set_locals_names(*locals_names);
+ }
+
+ Handle<FixedArray> locals_names(debug_info->locals_names(), isolate);
+ if (func_index >= locals_names->length() ||
+ locals_names->get(func_index)->IsUndefined(isolate)) {
+ return {};
+ }
+
+ Handle<FixedArray> func_locals_names(
+ FixedArray::cast(locals_names->get(func_index)), isolate);
+ if (local_index >= func_locals_names->length() ||
+ func_locals_names->get(local_index)->IsUndefined(isolate)) {
+ return {};
+ }
+ return handle(String::cast(func_locals_names->get(local_index)));
+}
+
// Forward declaration.
class InterpreterHandle;
InterpreterHandle* GetInterpreterHandle(WasmDebugInfo* debug_info);
@@ -116,9 +144,6 @@ class InterpreterHandle {
WasmInstanceObject* instance = debug_info->wasm_instance();
- // Store a global handle to the wasm instance in the interpreter.
- interpreter_.SetInstanceObject(instance);
-
// Set memory start pointer and size.
instance_.mem_start = nullptr;
instance_.mem_size = 0;
@@ -165,20 +190,21 @@ class InterpreterHandle {
// Returns true if exited regularly, false if a trap/exception occured and was
// not handled inside this activation. In the latter case, a pending exception
// will have been set on the isolate.
- bool Execute(Address frame_pointer, uint32_t func_index,
+ bool Execute(Handle<WasmInstanceObject> instance_object,
+ Address frame_pointer, uint32_t func_index,
uint8_t* arg_buffer) {
DCHECK_GE(module()->functions.size(), func_index);
FunctionSig* sig = module()->functions[func_index].sig;
DCHECK_GE(kMaxInt, sig->parameter_count());
int num_params = static_cast<int>(sig->parameter_count());
- ScopedVector<WasmVal> wasm_args(num_params);
+ ScopedVector<WasmValue> wasm_args(num_params);
uint8_t* arg_buf_ptr = arg_buffer;
for (int i = 0; i < num_params; ++i) {
uint32_t param_size = 1 << ElementSizeLog2Of(sig->GetParam(i));
-#define CASE_ARG_TYPE(type, ctype) \
- case type: \
- DCHECK_EQ(param_size, sizeof(ctype)); \
- wasm_args[i] = WasmVal(ReadUnalignedValue<ctype>(arg_buf_ptr)); \
+#define CASE_ARG_TYPE(type, ctype) \
+ case type: \
+ DCHECK_EQ(param_size, sizeof(ctype)); \
+ wasm_args[i] = WasmValue(ReadUnalignedValue<ctype>(arg_buf_ptr)); \
break;
switch (sig->GetParam(i)) {
CASE_ARG_TYPE(kWasmI32, uint32_t)
@@ -194,6 +220,8 @@ class InterpreterHandle {
uint32_t activation_id = StartActivation(frame_pointer);
+ WasmInterpreter::HeapObjectsScope heap_objects_scope(&interpreter_,
+ instance_object);
WasmInterpreter::Thread* thread = interpreter_.GetThread(0);
thread->InitFrame(&module()->functions[func_index], wasm_args.start());
bool finished = false;
@@ -236,7 +264,7 @@ class InterpreterHandle {
// TODO(wasm): Handle multi-value returns.
DCHECK_EQ(1, kV8MaxWasmFunctionReturns);
if (sig->return_count()) {
- WasmVal ret_val = thread->GetReturnValue(0);
+ WasmValue ret_val = thread->GetReturnValue(0);
#define CASE_RET_TYPE(type, ctype) \
case type: \
DCHECK_EQ(1 << ElementSizeLog2Of(sig->GetReturn(0)), sizeof(ctype)); \
@@ -277,7 +305,6 @@ class InterpreterHandle {
}
default:
UNREACHABLE();
- return WasmInterpreter::STOPPED;
}
}
@@ -411,8 +438,10 @@ class InterpreterHandle {
}
Handle<JSArray> GetScopeDetails(Address frame_pointer, int frame_index,
- Handle<WasmInstanceObject> instance) {
+ Handle<WasmDebugInfo> debug_info) {
auto frame = GetInterpretedFrame(frame_pointer, frame_index);
+ Isolate* isolate = debug_info->GetIsolate();
+ Handle<WasmInstanceObject> instance(debug_info->wasm_instance(), isolate);
Handle<FixedArray> global_scope =
isolate_->factory()->NewFixedArray(ScopeIterator::kScopeDetailsSize);
@@ -435,7 +464,7 @@ class InterpreterHandle {
kExternalUint8Array, memory_buffer, 0, byte_length);
JSObject::SetOwnPropertyIgnoreAttributes(global_scope_object, name,
uint8_array, NONE)
- .Check();
+ .Assert();
}
Handle<FixedArray> local_scope =
@@ -451,15 +480,30 @@ class InterpreterHandle {
int num_params = frame->GetParameterCount();
int num_locals = frame->GetLocalCount();
DCHECK_LE(num_params, num_locals);
- for (int i = 0; i < num_locals; ++i) {
- // TODO(clemensh): Use names from name section if present.
- const char* label = i < num_params ? "param#%d" : "local#%d";
- Handle<String> name = PrintFToOneByteString<true>(isolate_, label, i);
- WasmVal value = frame->GetLocalValue(i);
- Handle<Object> value_obj = WasmValToValueObject(isolate_, value);
- JSObject::SetOwnPropertyIgnoreAttributes(local_scope_object, name,
- value_obj, NONE)
- .Check();
+ if (num_locals > 0) {
+ Handle<JSObject> locals_obj =
+ isolate_->factory()->NewJSObjectWithNullProto();
+ Handle<String> locals_name =
+ isolate_->factory()->InternalizeOneByteString(
+ STATIC_CHAR_VECTOR("locals"));
+ JSObject::SetOwnPropertyIgnoreAttributes(local_scope_object, locals_name,
+ locals_obj, NONE)
+ .Assert();
+ for (int i = 0; i < num_locals; ++i) {
+ MaybeHandle<String> name =
+ GetLocalName(isolate, debug_info, frame->function()->func_index, i);
+ if (name.is_null()) {
+ // Parameters should come before locals in alphabetical ordering, so
+ // we name them "args" here.
+ const char* label = i < num_params ? "arg#%d" : "local#%d";
+ name = PrintFToOneByteString<true>(isolate_, label, i);
+ }
+ WasmValue value = frame->GetLocalValue(i);
+ Handle<Object> value_obj = WasmValueToValueObject(isolate_, value);
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ locals_obj, name.ToHandleChecked(), value_obj, NONE)
+ .Assert();
+ }
}
// Fill stack values.
@@ -469,18 +513,18 @@ class InterpreterHandle {
// which does not make too much sense here.
Handle<JSObject> stack_obj =
isolate_->factory()->NewJSObjectWithNullProto();
- for (int i = 0; i < stack_count; ++i) {
- WasmVal value = frame->GetStackValue(i);
- Handle<Object> value_obj = WasmValToValueObject(isolate_, value);
- JSObject::SetOwnElementIgnoreAttributes(
- stack_obj, static_cast<uint32_t>(i), value_obj, NONE)
- .Check();
- }
Handle<String> stack_name = isolate_->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("stack"));
JSObject::SetOwnPropertyIgnoreAttributes(local_scope_object, stack_name,
stack_obj, NONE)
- .Check();
+ .Assert();
+ for (int i = 0; i < stack_count; ++i) {
+ WasmValue value = frame->GetStackValue(i);
+ Handle<Object> value_obj = WasmValueToValueObject(isolate_, value);
+ JSObject::SetOwnElementIgnoreAttributes(
+ stack_obj, static_cast<uint32_t>(i), value_obj, NONE)
+ .Assert();
+ }
Handle<JSArray> global_jsarr =
isolate_->factory()->NewJSArrayWithElements(global_scope);
@@ -495,25 +539,25 @@ class InterpreterHandle {
InterpreterHandle* GetOrCreateInterpreterHandle(
Isolate* isolate, Handle<WasmDebugInfo> debug_info) {
- Handle<Object> handle(debug_info->get(WasmDebugInfo::kInterpreterHandle),
+ Handle<Object> handle(debug_info->get(WasmDebugInfo::kInterpreterHandleIndex),
isolate);
if (handle->IsUndefined(isolate)) {
InterpreterHandle* cpp_handle = new InterpreterHandle(isolate, *debug_info);
handle = Managed<InterpreterHandle>::New(isolate, cpp_handle);
- debug_info->set(WasmDebugInfo::kInterpreterHandle, *handle);
+ debug_info->set(WasmDebugInfo::kInterpreterHandleIndex, *handle);
}
return Handle<Managed<InterpreterHandle>>::cast(handle)->get();
}
InterpreterHandle* GetInterpreterHandle(WasmDebugInfo* debug_info) {
- Object* handle_obj = debug_info->get(WasmDebugInfo::kInterpreterHandle);
+ Object* handle_obj = debug_info->get(WasmDebugInfo::kInterpreterHandleIndex);
DCHECK(!handle_obj->IsUndefined(debug_info->GetIsolate()));
return Managed<InterpreterHandle>::cast(handle_obj)->get();
}
InterpreterHandle* GetInterpreterHandleOrNull(WasmDebugInfo* debug_info) {
- Object* handle_obj = debug_info->get(WasmDebugInfo::kInterpreterHandle);
+ Object* handle_obj = debug_info->get(WasmDebugInfo::kInterpreterHandleIndex);
if (handle_obj->IsUndefined(debug_info->GetIsolate())) return nullptr;
return Managed<InterpreterHandle>::cast(handle_obj)->get();
}
@@ -527,13 +571,13 @@ int GetNumFunctions(WasmInstanceObject* instance) {
Handle<FixedArray> GetOrCreateInterpretedFunctions(
Isolate* isolate, Handle<WasmDebugInfo> debug_info) {
- Handle<Object> obj(debug_info->get(WasmDebugInfo::kInterpretedFunctions),
+ Handle<Object> obj(debug_info->get(WasmDebugInfo::kInterpretedFunctionsIndex),
isolate);
if (!obj->IsUndefined(isolate)) return Handle<FixedArray>::cast(obj);
Handle<FixedArray> new_arr = isolate->factory()->NewFixedArray(
GetNumFunctions(debug_info->wasm_instance()));
- debug_info->set(WasmDebugInfo::kInterpretedFunctions, *new_arr);
+ debug_info->set(WasmDebugInfo::kInterpretedFunctionsIndex, *new_arr);
return new_arr;
}
@@ -578,8 +622,7 @@ Handle<WasmDebugInfo> WasmDebugInfo::New(Handle<WasmInstanceObject> instance) {
DCHECK(!instance->has_debug_info());
Factory* factory = instance->GetIsolate()->factory();
Handle<FixedArray> arr = factory->NewFixedArray(kFieldCount, TENURED);
- arr->set(kWrapperTracerHeader, Smi::kZero);
- arr->set(kInstance, *instance);
+ arr->set(kInstanceIndex, *instance);
Handle<WasmDebugInfo> debug_info = Handle<WasmDebugInfo>::cast(arr);
instance->set_debug_info(*debug_info);
return debug_info;
@@ -592,29 +635,29 @@ WasmInterpreter* WasmDebugInfo::SetupForTesting(
InterpreterHandle* cpp_handle =
new InterpreterHandle(isolate, *debug_info, instance);
Handle<Object> handle = Managed<InterpreterHandle>::New(isolate, cpp_handle);
- debug_info->set(kInterpreterHandle, *handle);
+ debug_info->set(kInterpreterHandleIndex, *handle);
return cpp_handle->interpreter();
}
-bool WasmDebugInfo::IsDebugInfo(Object* object) {
+bool WasmDebugInfo::IsWasmDebugInfo(Object* object) {
if (!object->IsFixedArray()) return false;
FixedArray* arr = FixedArray::cast(object);
if (arr->length() != kFieldCount) return false;
- if (!IsWasmInstance(arr->get(kInstance))) return false;
+ if (!arr->get(kInstanceIndex)->IsWasmInstanceObject()) return false;
Isolate* isolate = arr->GetIsolate();
- if (!arr->get(kInterpreterHandle)->IsUndefined(isolate) &&
- !arr->get(kInterpreterHandle)->IsForeign())
+ if (!arr->get(kInterpreterHandleIndex)->IsUndefined(isolate) &&
+ !arr->get(kInterpreterHandleIndex)->IsForeign())
return false;
return true;
}
WasmDebugInfo* WasmDebugInfo::cast(Object* object) {
- DCHECK(IsDebugInfo(object));
+ DCHECK(IsWasmDebugInfo(object));
return reinterpret_cast<WasmDebugInfo*>(object);
}
WasmInstanceObject* WasmDebugInfo::wasm_instance() {
- return WasmInstanceObject::cast(get(kInstance));
+ return WasmInstanceObject::cast(get(kInstanceIndex));
}
void WasmDebugInfo::SetBreakpoint(Handle<WasmDebugInfo> debug_info,
@@ -662,8 +705,9 @@ void WasmDebugInfo::PrepareStep(StepAction step_action) {
bool WasmDebugInfo::RunInterpreter(Address frame_pointer, int func_index,
uint8_t* arg_buffer) {
DCHECK_LE(0, func_index);
+ Handle<WasmInstanceObject> instance(wasm_instance());
return GetInterpreterHandle(this)->Execute(
- frame_pointer, static_cast<uint32_t>(func_index), arg_buffer);
+ instance, frame_pointer, static_cast<uint32_t>(func_index), arg_buffer);
}
std::vector<std::pair<uint32_t, int>> WasmDebugInfo::GetInterpretedStack(
@@ -696,6 +740,5 @@ Handle<JSArray> WasmDebugInfo::GetScopeDetails(Handle<WasmDebugInfo> debug_info,
Address frame_pointer,
int frame_index) {
InterpreterHandle* interp_handle = GetInterpreterHandle(*debug_info);
- Handle<WasmInstanceObject> instance(debug_info->wasm_instance());
- return interp_handle->GetScopeDetails(frame_pointer, frame_index, instance);
+ return interp_handle->GetScopeDetails(frame_pointer, frame_index, debug_info);
}
diff --git a/deps/v8/src/wasm/wasm-interpreter.cc b/deps/v8/src/wasm/wasm-interpreter.cc
index d344d1fae4..6ebc342b62 100644
--- a/deps/v8/src/wasm/wasm-interpreter.cc
+++ b/deps/v8/src/wasm/wasm-interpreter.cc
@@ -587,11 +587,11 @@ inline double ExecuteF64ReinterpretI64(int64_t a, TrapReason* trap) {
return bit_cast<double>(a);
}
-inline int32_t ExecuteI32ReinterpretF32(WasmVal a) {
+inline int32_t ExecuteI32ReinterpretF32(WasmValue a) {
return a.to_unchecked<int32_t>();
}
-inline int64_t ExecuteI64ReinterpretF64(WasmVal a) {
+inline int64_t ExecuteI64ReinterpretF64(WasmValue a) {
return a.to_unchecked<int64_t>();
}
@@ -936,66 +936,49 @@ class CodeMap {
Zone* zone_;
const WasmModule* module_;
ZoneVector<InterpreterCode> interpreter_code_;
- // Global handle to the wasm instance.
+ // This handle is set and reset by the SetInstanceObject() /
+ // ClearInstanceObject() method, which is used by the HeapObjectsScope.
Handle<WasmInstanceObject> instance_;
- // Global handle to array of unwrapped imports.
- Handle<FixedArray> imported_functions_;
- // Map from WASM_TO_JS wrappers to unwrapped imports (indexes into
- // imported_functions_).
- IdentityMap<int, ZoneAllocationPolicy> unwrapped_imports_;
public:
CodeMap(Isolate* isolate, const WasmModule* module,
const uint8_t* module_start, Zone* zone)
- : zone_(zone),
- module_(module),
- interpreter_code_(zone),
- unwrapped_imports_(isolate->heap(), ZoneAllocationPolicy(zone)) {
+ : zone_(zone), module_(module), interpreter_code_(zone) {
if (module == nullptr) return;
interpreter_code_.reserve(module->functions.size());
for (const WasmFunction& function : module->functions) {
if (function.imported) {
- DCHECK_EQ(function.code_start_offset, function.code_end_offset);
+ DCHECK(!function.code.is_set());
AddFunction(&function, nullptr, nullptr);
} else {
- const byte* code_start = module_start + function.code_start_offset;
- const byte* code_end = module_start + function.code_end_offset;
- AddFunction(&function, code_start, code_end);
+ AddFunction(&function, module_start + function.code.offset(),
+ module_start + function.code.end_offset());
}
}
}
- ~CodeMap() {
- // Destroy the global handles.
- // Cast the location, not the handle, because the handle cast might access
- // the object behind the handle.
- GlobalHandles::Destroy(reinterpret_cast<Object**>(instance_.location()));
- GlobalHandles::Destroy(
- reinterpret_cast<Object**>(imported_functions_.location()));
+ void SetInstanceObject(Handle<WasmInstanceObject> instance) {
+ DCHECK(instance_.is_null());
+ instance_ = instance;
}
+ void ClearInstanceObject() { instance_ = Handle<WasmInstanceObject>::null(); }
+
const WasmModule* module() const { return module_; }
bool has_instance() const { return !instance_.is_null(); }
- Handle<WasmInstanceObject> instance() const {
+ WasmInstanceObject* instance() const {
DCHECK(has_instance());
- return instance_;
+ return *instance_;
}
MaybeHandle<WasmInstanceObject> maybe_instance() const {
- return has_instance() ? instance_ : MaybeHandle<WasmInstanceObject>();
- }
-
- void SetInstanceObject(WasmInstanceObject* instance) {
- // Only set the instance once (otherwise we have to destroy the global
- // handle first).
- DCHECK(instance_.is_null());
- DCHECK_EQ(instance->module(), module_);
- instance_ = instance->GetIsolate()->global_handles()->Create(instance);
+ return has_instance() ? handle(instance())
+ : MaybeHandle<WasmInstanceObject>();
}
Code* GetImportedFunction(uint32_t function_index) {
- DCHECK(!instance_.is_null());
+ DCHECK(has_instance());
DCHECK_GT(module_->num_imported_functions, function_index);
- FixedArray* code_table = instance_->compiled_module()->ptr_to_code_table();
+ FixedArray* code_table = instance()->compiled_module()->ptr_to_code_table();
return Code::cast(code_table->get(static_cast<int>(function_index)));
}
@@ -1052,59 +1035,16 @@ class CodeMap {
code->side_table = nullptr;
Preprocess(code);
}
-
- // Returns a callable object if the imported function has a JS-compatible
- // signature, or a null handle otherwise.
- Handle<HeapObject> GetCallableObjectForJSImport(Isolate* isolate,
- Handle<Code> code) {
- DCHECK_EQ(Code::WASM_TO_JS_FUNCTION, code->kind());
- int* unwrapped_index = unwrapped_imports_.Find(code);
- if (unwrapped_index) {
- return handle(
- HeapObject::cast(imported_functions_->get(*unwrapped_index)),
- isolate);
- }
- Handle<HeapObject> called_obj = UnwrapWasmToJSWrapper(isolate, code);
- if (!called_obj.is_null()) {
- // Cache the unwrapped callable object.
- if (imported_functions_.is_null()) {
- // This is the first call to an imported function. Allocate the
- // FixedArray to cache unwrapped objects.
- constexpr int kInitialCacheSize = 8;
- Handle<FixedArray> new_imported_functions =
- isolate->factory()->NewFixedArray(kInitialCacheSize, TENURED);
- // First entry: Number of occupied slots.
- new_imported_functions->set(0, Smi::kZero);
- imported_functions_ =
- isolate->global_handles()->Create(*new_imported_functions);
- }
- int this_idx = Smi::cast(imported_functions_->get(0))->value() + 1;
- if (this_idx == imported_functions_->length()) {
- Handle<FixedArray> new_imported_functions =
- isolate->factory()->CopyFixedArrayAndGrow(imported_functions_,
- this_idx / 2, TENURED);
- // Update the existing global handle:
- *imported_functions_.location() = *new_imported_functions;
- }
- DCHECK_GT(imported_functions_->length(), this_idx);
- DCHECK(imported_functions_->get(this_idx)->IsUndefined(isolate));
- imported_functions_->set(0, Smi::FromInt(this_idx));
- imported_functions_->set(this_idx, *called_obj);
- unwrapped_imports_.Set(code, this_idx);
- }
- return called_obj;
- }
};
-Handle<Object> WasmValToNumber(Factory* factory, WasmVal val,
- wasm::ValueType type) {
+Handle<Object> WasmValueToNumber(Factory* factory, WasmValue val,
+ wasm::ValueType type) {
switch (type) {
case kWasmI32:
return factory->NewNumberFromInt(val.to<int32_t>());
case kWasmI64:
// wasm->js and js->wasm is illegal for i64 type.
UNREACHABLE();
- return Handle<Object>::null();
case kWasmF32:
return factory->NewNumber(val.to<float>());
case kWasmF64:
@@ -1118,15 +1058,15 @@ Handle<Object> WasmValToNumber(Factory* factory, WasmVal val,
// Convert JS value to WebAssembly, spec here:
// https://github.com/WebAssembly/design/blob/master/JS.md#towebassemblyvalue
-WasmVal ToWebAssemblyValue(Isolate* isolate, Handle<Object> value,
- wasm::ValueType type) {
+WasmValue ToWebAssemblyValue(Isolate* isolate, Handle<Object> value,
+ wasm::ValueType type) {
switch (type) {
case kWasmI32: {
MaybeHandle<Object> maybe_i32 = Object::ToInt32(isolate, value);
// TODO(clemensh): Handle failure here (unwind).
int32_t value;
CHECK(maybe_i32.ToHandleChecked()->ToInt32(&value));
- return WasmVal(value);
+ return WasmValue(value);
}
case kWasmI64:
// If the signature contains i64, a type error was thrown before.
@@ -1134,18 +1074,18 @@ WasmVal ToWebAssemblyValue(Isolate* isolate, Handle<Object> value,
case kWasmF32: {
MaybeHandle<Object> maybe_number = Object::ToNumber(value);
// TODO(clemensh): Handle failure here (unwind).
- return WasmVal(
+ return WasmValue(
static_cast<float>(maybe_number.ToHandleChecked()->Number()));
}
case kWasmF64: {
MaybeHandle<Object> maybe_number = Object::ToNumber(value);
// TODO(clemensh): Handle failure here (unwind).
- return WasmVal(maybe_number.ToHandleChecked()->Number());
+ return WasmValue(maybe_number.ToHandleChecked()->Number());
}
default:
// TODO(wasm): Handle simd.
UNIMPLEMENTED();
- return WasmVal();
+ return WasmValue();
}
}
@@ -1171,7 +1111,7 @@ class ThreadImpl {
WasmInterpreter::State state() { return state_; }
- void InitFrame(const WasmFunction* function, WasmVal* args) {
+ void InitFrame(const WasmFunction* function, WasmValue* args) {
DCHECK_EQ(current_activation().fp, frames_.size());
InterpreterCode* code = codemap()->GetCode(function);
size_t num_params = function->sig->parameter_count();
@@ -1215,8 +1155,8 @@ class ThreadImpl {
return static_cast<int>(frames_.size());
}
- WasmVal GetReturnValue(uint32_t index) {
- if (state_ == WasmInterpreter::TRAPPED) return WasmVal(0xdeadbeef);
+ WasmValue GetReturnValue(uint32_t index) {
+ if (state_ == WasmInterpreter::TRAPPED) return WasmValue(0xdeadbeef);
DCHECK_EQ(WasmInterpreter::FINISHED, state_);
Activation act = current_activation();
// Current activation must be finished.
@@ -1224,12 +1164,12 @@ class ThreadImpl {
return GetStackValue(act.sp + index);
}
- WasmVal GetStackValue(sp_t index) {
+ WasmValue GetStackValue(sp_t index) {
DCHECK_GT(StackHeight(), index);
return stack_start_[index];
}
- void SetStackValue(sp_t index, WasmVal value) {
+ void SetStackValue(sp_t index, WasmValue value) {
DCHECK_GT(StackHeight(), index);
stack_start_[index] = value;
}
@@ -1322,9 +1262,9 @@ class ThreadImpl {
CodeMap* codemap_;
WasmInstance* instance_;
Zone* zone_;
- WasmVal* stack_start_ = nullptr; // Start of allocated stack space.
- WasmVal* stack_limit_ = nullptr; // End of allocated stack space.
- WasmVal* sp_ = nullptr; // Current stack pointer.
+ WasmValue* stack_start_ = nullptr; // Start of allocated stack space.
+ WasmValue* stack_limit_ = nullptr; // End of allocated stack space.
+ WasmValue* sp_ = nullptr; // Current stack pointer.
ZoneVector<Frame> frames_;
WasmInterpreter::State state_ = WasmInterpreter::STOPPED;
pc_t break_pc_ = kInvalidPc;
@@ -1365,11 +1305,11 @@ class ThreadImpl {
pc_t InitLocals(InterpreterCode* code) {
for (auto p : code->locals.type_list) {
- WasmVal val;
+ WasmValue val;
switch (p) {
-#define CASE_TYPE(wasm, ctype) \
- case kWasm##wasm: \
- val = WasmVal(static_cast<ctype>(0)); \
+#define CASE_TYPE(wasm, ctype) \
+ case kWasm##wasm: \
+ val = WasmValue(static_cast<ctype>(0)); \
break;
WASM_CTYPES(CASE_TYPE)
#undef CASE_TYPE
@@ -1419,14 +1359,13 @@ class ThreadImpl {
}
default:
UNREACHABLE();
- return 0;
}
}
bool DoReturn(Decoder* decoder, InterpreterCode** code, pc_t* pc, pc_t* limit,
size_t arity) {
DCHECK_GT(frames_.size(), 0);
- WasmVal* sp_dest = stack_start_ + frames_.back().sp;
+ WasmValue* sp_dest = stack_start_ + frames_.back().sp;
frames_.pop_back();
if (frames_.size() == current_activation().fp) {
// A return from the last frame terminates the execution.
@@ -1463,7 +1402,7 @@ class ThreadImpl {
// Copies {arity} values on the top of the stack down the stack to {dest},
// dropping the values in-between.
- void DoStackTransfer(WasmVal* dest, size_t arity) {
+ void DoStackTransfer(WasmValue* dest, size_t arity) {
// before: |---------------| pop_count | arity |
// ^ 0 ^ dest ^ sp_
//
@@ -1490,7 +1429,7 @@ class ThreadImpl {
return false;
}
byte* addr = instance()->mem_start + operand.offset + index;
- WasmVal result(static_cast<ctype>(ReadLittleEndianValue<mtype>(addr)));
+ WasmValue result(static_cast<ctype>(ReadLittleEndianValue<mtype>(addr)));
Push(result);
len = 1 + operand.length;
@@ -1501,7 +1440,7 @@ class ThreadImpl {
bool ExecuteStore(Decoder* decoder, InterpreterCode* code, pc_t pc,
int& len) {
MemoryAccessOperand<false> operand(decoder, code->at(pc), sizeof(ctype));
- WasmVal val = Pop();
+ WasmValue val = Pop();
uint32_t index = Pop().to<uint32_t>();
if (!BoundsCheck<mtype>(instance()->mem_size, operand.offset, index)) {
@@ -1623,7 +1562,7 @@ class ThreadImpl {
}
case kExprIf: {
BlockTypeOperand<false> operand(&decoder, code->at(pc));
- WasmVal cond = Pop();
+ WasmValue cond = Pop();
bool is_true = cond.to<uint32_t>() != 0;
if (is_true) {
// fall through to the true block.
@@ -1641,9 +1580,9 @@ class ThreadImpl {
break;
}
case kExprSelect: {
- WasmVal cond = Pop();
- WasmVal fval = Pop();
- WasmVal tval = Pop();
+ WasmValue cond = Pop();
+ WasmValue fval = Pop();
+ WasmValue tval = Pop();
Push(cond.to<int32_t>() != 0 ? tval : fval);
break;
}
@@ -1655,7 +1594,7 @@ class ThreadImpl {
}
case kExprBrIf: {
BreakDepthOperand<false> operand(&decoder, code->at(pc));
- WasmVal cond = Pop();
+ WasmValue cond = Pop();
bool is_true = cond.to<uint32_t>() != 0;
if (is_true) {
len = DoBreak(code, pc, operand.depth);
@@ -1694,25 +1633,25 @@ class ThreadImpl {
}
case kExprI32Const: {
ImmI32Operand<false> operand(&decoder, code->at(pc));
- Push(WasmVal(operand.value));
+ Push(WasmValue(operand.value));
len = 1 + operand.length;
break;
}
case kExprI64Const: {
ImmI64Operand<false> operand(&decoder, code->at(pc));
- Push(WasmVal(operand.value));
+ Push(WasmValue(operand.value));
len = 1 + operand.length;
break;
}
case kExprF32Const: {
ImmF32Operand<false> operand(&decoder, code->at(pc));
- Push(WasmVal(operand.value));
+ Push(WasmValue(operand.value));
len = 1 + operand.length;
break;
}
case kExprF64Const: {
ImmF64Operand<false> operand(&decoder, code->at(pc));
- Push(WasmVal(operand.value));
+ Push(WasmValue(operand.value));
len = 1 + operand.length;
break;
}
@@ -1724,14 +1663,14 @@ class ThreadImpl {
}
case kExprSetLocal: {
LocalIndexOperand<false> operand(&decoder, code->at(pc));
- WasmVal val = Pop();
+ WasmValue val = Pop();
SetStackValue(frames_.back().sp + operand.index, val);
len = 1 + operand.length;
break;
}
case kExprTeeLocal: {
LocalIndexOperand<false> operand(&decoder, code->at(pc));
- WasmVal val = Pop();
+ WasmValue val = Pop();
SetStackValue(frames_.back().sp + operand.index, val);
Push(val);
len = 1 + operand.length;
@@ -1804,11 +1743,11 @@ class ThreadImpl {
GlobalIndexOperand<false> operand(&decoder, code->at(pc));
const WasmGlobal* global = &module()->globals[operand.index];
byte* ptr = instance()->globals_start + global->offset;
- WasmVal val;
+ WasmValue val;
switch (global->type) {
-#define CASE_TYPE(wasm, ctype) \
- case kWasm##wasm: \
- val = WasmVal(*reinterpret_cast<ctype*>(ptr)); \
+#define CASE_TYPE(wasm, ctype) \
+ case kWasm##wasm: \
+ val = WasmValue(*reinterpret_cast<ctype*>(ptr)); \
break;
WASM_CTYPES(CASE_TYPE)
#undef CASE_TYPE
@@ -1823,7 +1762,7 @@ class ThreadImpl {
GlobalIndexOperand<false> operand(&decoder, code->at(pc));
const WasmGlobal* global = &module()->globals[operand.index];
byte* ptr = instance()->globals_start + global->offset;
- WasmVal val = Pop();
+ WasmValue val = Pop();
switch (global->type) {
#define CASE_TYPE(wasm, ctype) \
case kWasm##wasm: \
@@ -1888,7 +1827,7 @@ class ThreadImpl {
/* TODO(titzer): alignment for asmjs load mem? */ \
result = static_cast<ctype>(*reinterpret_cast<mtype*>(addr)); \
} \
- Push(WasmVal(result)); \
+ Push(WasmValue(result)); \
break; \
}
ASMJS_LOAD_CASE(I32AsmjsLoadMem8S, int32_t, int8_t, 0);
@@ -1904,7 +1843,7 @@ class ThreadImpl {
#define ASMJS_STORE_CASE(name, ctype, mtype) \
case kExpr##name: { \
- WasmVal val = Pop(); \
+ WasmValue val = Pop(); \
uint32_t index = Pop().to<uint32_t>(); \
if (BoundsCheck<mtype>(instance()->mem_size, 0, index)) { \
byte* addr = instance()->mem_start + index; \
@@ -1924,15 +1863,15 @@ class ThreadImpl {
case kExprGrowMemory: {
MemoryIndexOperand<false> operand(&decoder, code->at(pc));
uint32_t delta_pages = Pop().to<uint32_t>();
- Push(WasmVal(ExecuteGrowMemory(
+ Push(WasmValue(ExecuteGrowMemory(
delta_pages, codemap_->maybe_instance(), instance())));
len = 1 + operand.length;
break;
}
case kExprMemorySize: {
MemoryIndexOperand<false> operand(&decoder, code->at(pc));
- Push(WasmVal(static_cast<uint32_t>(instance()->mem_size /
- WasmModule::kPageSize)));
+ Push(WasmValue(static_cast<uint32_t>(instance()->mem_size /
+ WasmModule::kPageSize)));
len = 1 + operand.length;
break;
}
@@ -1940,37 +1879,37 @@ class ThreadImpl {
// specially to guarantee that the quiet bit of a NaN is preserved on
// ia32 by the reinterpret casts.
case kExprI32ReinterpretF32: {
- WasmVal val = Pop();
- Push(WasmVal(ExecuteI32ReinterpretF32(val)));
+ WasmValue val = Pop();
+ Push(WasmValue(ExecuteI32ReinterpretF32(val)));
possible_nondeterminism_ |= std::isnan(val.to<float>());
break;
}
case kExprI64ReinterpretF64: {
- WasmVal val = Pop();
- Push(WasmVal(ExecuteI64ReinterpretF64(val)));
+ WasmValue val = Pop();
+ Push(WasmValue(ExecuteI64ReinterpretF64(val)));
possible_nondeterminism_ |= std::isnan(val.to<double>());
break;
}
-#define EXECUTE_SIMPLE_BINOP(name, ctype, op) \
- case kExpr##name: { \
- WasmVal rval = Pop(); \
- WasmVal lval = Pop(); \
- WasmVal result(lval.to<ctype>() op rval.to<ctype>()); \
- Push(result); \
- break; \
+#define EXECUTE_SIMPLE_BINOP(name, ctype, op) \
+ case kExpr##name: { \
+ WasmValue rval = Pop(); \
+ WasmValue lval = Pop(); \
+ WasmValue result(lval.to<ctype>() op rval.to<ctype>()); \
+ Push(result); \
+ break; \
}
FOREACH_SIMPLE_BINOP(EXECUTE_SIMPLE_BINOP)
#undef EXECUTE_SIMPLE_BINOP
-#define EXECUTE_OTHER_BINOP(name, ctype) \
- case kExpr##name: { \
- TrapReason trap = kTrapCount; \
- volatile ctype rval = Pop().to<ctype>(); \
- volatile ctype lval = Pop().to<ctype>(); \
- WasmVal result(Execute##name(lval, rval, &trap)); \
- if (trap != kTrapCount) return DoTrap(trap, pc); \
- Push(result); \
- break; \
+#define EXECUTE_OTHER_BINOP(name, ctype) \
+ case kExpr##name: { \
+ TrapReason trap = kTrapCount; \
+ volatile ctype rval = Pop().to<ctype>(); \
+ volatile ctype lval = Pop().to<ctype>(); \
+ WasmValue result(Execute##name(lval, rval, &trap)); \
+ if (trap != kTrapCount) return DoTrap(trap, pc); \
+ Push(result); \
+ break; \
}
FOREACH_OTHER_BINOP(EXECUTE_OTHER_BINOP)
#undef EXECUTE_OTHER_BINOP
@@ -1981,7 +1920,7 @@ class ThreadImpl {
TrapReason trap = kTrapCount;
volatile float rval = Pop().to<float>();
volatile float lval = Pop().to<float>();
- WasmVal result(ExecuteF32CopySign(lval, rval, &trap));
+ WasmValue result(ExecuteF32CopySign(lval, rval, &trap));
Push(result);
possible_nondeterminism_ |= std::isnan(rval);
break;
@@ -1992,7 +1931,7 @@ class ThreadImpl {
TrapReason trap = kTrapCount;
volatile double rval = Pop().to<double>();
volatile double lval = Pop().to<double>();
- WasmVal result(ExecuteF64CopySign(lval, rval, &trap));
+ WasmValue result(ExecuteF64CopySign(lval, rval, &trap));
Push(result);
possible_nondeterminism_ |= std::isnan(rval);
break;
@@ -2001,7 +1940,7 @@ class ThreadImpl {
case kExpr##name: { \
TrapReason trap = kTrapCount; \
volatile ctype val = Pop().to<ctype>(); \
- WasmVal result(Execute##name(val, &trap)); \
+ WasmValue result(Execute##name(val, &trap)); \
if (trap != kTrapCount) return DoTrap(trap, pc); \
Push(result); \
break; \
@@ -2037,7 +1976,7 @@ class ThreadImpl {
CommitPc(pc);
}
- WasmVal Pop() {
+ WasmValue Pop() {
DCHECK_GT(frames_.size(), 0);
DCHECK_GT(StackHeight(), frames_.back().llimit()); // can't pop into locals
return *--sp_;
@@ -2051,22 +1990,22 @@ class ThreadImpl {
sp_ -= n;
}
- WasmVal PopArity(size_t arity) {
- if (arity == 0) return WasmVal();
+ WasmValue PopArity(size_t arity) {
+ if (arity == 0) return WasmValue();
CHECK_EQ(1, arity);
return Pop();
}
- void Push(WasmVal val) {
- DCHECK_NE(kWasmStmt, val.type);
+ void Push(WasmValue val) {
+ DCHECK_NE(kWasmStmt, val.type());
DCHECK_LE(1, stack_limit_ - sp_);
*sp_++ = val;
}
- void Push(WasmVal* vals, size_t arity) {
+ void Push(WasmValue* vals, size_t arity) {
DCHECK_LE(arity, stack_limit_ - sp_);
- for (WasmVal *val = vals, *end = vals + arity; val != end; ++val) {
- DCHECK_NE(kWasmStmt, val->type);
+ for (WasmValue *val = vals, *end = vals + arity; val != end; ++val) {
+ DCHECK_NE(kWasmStmt, val->type());
}
memcpy(sp_, vals, arity * sizeof(*sp_));
sp_ += arity;
@@ -2078,7 +2017,7 @@ class ThreadImpl {
size_t requested_size =
base::bits::RoundUpToPowerOfTwo64((sp_ - stack_start_) + size);
size_t new_size = Max(size_t{8}, Max(2 * old_size, requested_size));
- WasmVal* new_stack = zone_->NewArray<WasmVal>(new_size);
+ WasmValue* new_stack = zone_->NewArray<WasmValue>(new_size);
memcpy(new_stack, stack_start_, old_size * sizeof(*sp_));
sp_ = new_stack + (sp_ - stack_start_);
stack_start_ = new_stack;
@@ -2087,14 +2026,6 @@ class ThreadImpl {
sp_t StackHeight() { return sp_ - stack_start_; }
- void TraceStack(const char* phase, pc_t pc) {
- if (FLAG_trace_wasm_interpreter) {
- PrintF("%s @%zu", phase, pc);
- UNIMPLEMENTED();
- PrintF("\n");
- }
- }
-
void TraceValueStack() {
#ifdef DEBUG
if (!FLAG_trace_wasm_interpreter) return;
@@ -2109,8 +2040,8 @@ class ThreadImpl {
PrintF(" l%zu:", i);
else
PrintF(" s%zu:", i);
- WasmVal val = GetStackValue(i);
- switch (val.type) {
+ WasmValue val = GetStackValue(i);
+ switch (val.type()) {
case kWasmI32:
PrintF("i32:%d", val.to<int32_t>());
break;
@@ -2151,18 +2082,17 @@ class ThreadImpl {
DCHECK_EQ(2, deopt_data->length());
WasmInstanceObject* target_instance =
WasmInstanceObject::cast(WeakCell::cast(deopt_data->get(0))->value());
- if (target_instance != *codemap()->instance()) {
+ if (target_instance != codemap()->instance()) {
// TODO(wasm): Implement calling functions of other instances/modules.
UNIMPLEMENTED();
}
- int target_func_idx = Smi::cast(deopt_data->get(1))->value();
+ int target_func_idx = Smi::ToInt(deopt_data->get(1));
DCHECK_LE(0, target_func_idx);
return {ExternalCallResult::INTERNAL,
codemap()->GetCode(target_func_idx)};
}
- Handle<HeapObject> target =
- codemap()->GetCallableObjectForJSImport(isolate, code);
+ Handle<HeapObject> target = UnwrapWasmToJSWrapper(isolate, code);
if (target.is_null()) {
isolate->Throw(*isolate->factory()->NewTypeError(
@@ -2181,10 +2111,10 @@ class ThreadImpl {
// Get all arguments as JS values.
std::vector<Handle<Object>> args;
args.reserve(num_args);
- WasmVal* wasm_args = sp_ - num_args;
+ WasmValue* wasm_args = sp_ - num_args;
for (int i = 0; i < num_args; ++i) {
- args.push_back(WasmValToNumber(isolate->factory(), wasm_args[i],
- signature->GetParam(i)));
+ args.push_back(WasmValueToNumber(isolate->factory(), wasm_args[i],
+ signature->GetParam(i)));
}
// The receiver is the global proxy if in sloppy mode (default), undefined
@@ -2275,8 +2205,7 @@ class ThreadImpl {
if (entry_index >= static_cast<uint32_t>(sig_table->length())) {
return {ExternalCallResult::INVALID_FUNC};
}
- int found_sig =
- Smi::cast(sig_table->get(static_cast<int>(entry_index)))->value();
+ int found_sig = Smi::ToInt(sig_table->get(static_cast<int>(entry_index)));
if (static_cast<uint32_t>(found_sig) != canonical_sig_index) {
return {ExternalCallResult::SIGNATURE_MISMATCH};
}
@@ -2341,13 +2270,13 @@ class InterpretedFrameImpl {
return static_cast<int>(frame_size) - GetLocalCount();
}
- WasmVal GetLocalValue(int index) const {
+ WasmValue GetLocalValue(int index) const {
DCHECK_LE(0, index);
DCHECK_GT(GetLocalCount(), index);
return thread_->GetStackValue(static_cast<int>(frame()->sp) + index);
}
- WasmVal GetStackValue(int index) const {
+ WasmValue GetStackValue(int index) const {
DCHECK_LE(0, index);
// Index must be within the number of stack values of this frame.
DCHECK_GT(GetStackHeight(), index);
@@ -2384,6 +2313,37 @@ const InterpretedFrameImpl* ToImpl(const InterpretedFrame* frame) {
return reinterpret_cast<const InterpretedFrameImpl*>(frame);
}
+//============================================================================
+// Implementation details of the heap objects scope.
+//============================================================================
+class HeapObjectsScopeImpl {
+ public:
+ HeapObjectsScopeImpl(CodeMap* codemap, Handle<WasmInstanceObject> instance)
+ : codemap_(codemap), needs_reset(!codemap_->has_instance()) {
+ if (needs_reset) {
+ instance_ = handle(*instance);
+ codemap_->SetInstanceObject(instance_);
+ } else {
+ DCHECK_EQ(*instance, codemap_->instance());
+ return;
+ }
+ }
+
+ ~HeapObjectsScopeImpl() {
+ if (!needs_reset) return;
+ DCHECK_EQ(*instance_, codemap_->instance());
+ codemap_->ClearInstanceObject();
+ // Clear the handle, such that anyone who accidentally copied them will
+ // notice.
+ *instance_.location() = nullptr;
+ }
+
+ private:
+ CodeMap* codemap_;
+ Handle<WasmInstanceObject> instance_;
+ bool needs_reset;
+};
+
} // namespace
//============================================================================
@@ -2396,7 +2356,7 @@ WasmInterpreter::State WasmInterpreter::Thread::state() {
return ToImpl(this)->state();
}
void WasmInterpreter::Thread::InitFrame(const WasmFunction* function,
- WasmVal* args) {
+ WasmValue* args) {
ToImpl(this)->InitFrame(function, args);
}
WasmInterpreter::State WasmInterpreter::Thread::Run(int num_steps) {
@@ -2420,7 +2380,7 @@ std::unique_ptr<InterpretedFrame> WasmInterpreter::Thread::GetFrame(int index) {
return std::unique_ptr<InterpretedFrame>(
ToFrame(new InterpretedFrameImpl(ToImpl(this), index)));
}
-WasmVal WasmInterpreter::Thread::GetReturnValue(int index) {
+WasmValue WasmInterpreter::Thread::GetReturnValue(int index) {
return ToImpl(this)->GetReturnValue(index);
}
TrapReason WasmInterpreter::Thread::GetTrapReason() {
@@ -2524,10 +2484,6 @@ bool WasmInterpreter::SetTracing(const WasmFunction* function, bool enabled) {
return false;
}
-void WasmInterpreter::SetInstanceObject(WasmInstanceObject* instance) {
- internals_->codemap_.SetInstanceObject(instance);
-}
-
int WasmInterpreter::GetThreadCount() {
return 1; // only one thread for now.
}
@@ -2541,12 +2497,12 @@ size_t WasmInterpreter::GetMemorySize() {
return internals_->instance_->mem_size;
}
-WasmVal WasmInterpreter::ReadMemory(size_t offset) {
+WasmValue WasmInterpreter::ReadMemory(size_t offset) {
UNIMPLEMENTED();
- return WasmVal();
+ return WasmValue();
}
-void WasmInterpreter::WriteMemory(size_t offset, WasmVal val) {
+void WasmInterpreter::WriteMemory(size_t offset, WasmValue val) {
UNIMPLEMENTED();
}
@@ -2570,7 +2526,7 @@ ControlTransferMap WasmInterpreter::ComputeControlTransfersForTesting(
// Create some dummy structures, to avoid special-casing the implementation
// just for testing.
FunctionSig sig(0, 0, nullptr);
- WasmFunction function{&sig, 0, 0, 0, 0, 0, 0, false, false};
+ WasmFunction function{&sig, 0, 0, {0, 0}, {0, 0}, false, false};
InterpreterCode code{
&function, BodyLocalDecls(zone), start, end, nullptr, nullptr, nullptr};
@@ -2595,13 +2551,26 @@ int InterpretedFrame::GetLocalCount() const {
int InterpretedFrame::GetStackHeight() const {
return ToImpl(this)->GetStackHeight();
}
-WasmVal InterpretedFrame::GetLocalValue(int index) const {
+WasmValue InterpretedFrame::GetLocalValue(int index) const {
return ToImpl(this)->GetLocalValue(index);
}
-WasmVal InterpretedFrame::GetStackValue(int index) const {
+WasmValue InterpretedFrame::GetStackValue(int index) const {
return ToImpl(this)->GetStackValue(index);
}
+//============================================================================
+// Public API of the heap objects scope.
+//============================================================================
+WasmInterpreter::HeapObjectsScope::HeapObjectsScope(
+ WasmInterpreter* interpreter, Handle<WasmInstanceObject> instance) {
+ static_assert(sizeof(data) == sizeof(HeapObjectsScopeImpl), "Size mismatch");
+ new (data) HeapObjectsScopeImpl(&interpreter->internals_->codemap_, instance);
+}
+
+WasmInterpreter::HeapObjectsScope::~HeapObjectsScope() {
+ reinterpret_cast<HeapObjectsScopeImpl*>(data)->~HeapObjectsScopeImpl();
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-interpreter.h b/deps/v8/src/wasm/wasm-interpreter.h
index 1259f09ff2..b01a088e98 100644
--- a/deps/v8/src/wasm/wasm-interpreter.h
+++ b/deps/v8/src/wasm/wasm-interpreter.h
@@ -6,6 +6,7 @@
#define V8_WASM_INTERPRETER_H_
#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-value.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -43,67 +44,6 @@ struct ControlTransferEntry {
using ControlTransferMap = ZoneMap<pc_t, ControlTransferEntry>;
-// Macro for defining union members.
-#define FOREACH_UNION_MEMBER(V) \
- V(i32, kWasmI32, int32_t) \
- V(u32, kWasmI32, uint32_t) \
- V(i64, kWasmI64, int64_t) \
- V(u64, kWasmI64, uint64_t) \
- V(f32, kWasmF32, float) \
- V(f64, kWasmF64, double)
-
-// Representation of values within the interpreter.
-struct WasmVal {
- ValueType type;
- union {
-#define DECLARE_FIELD(field, localtype, ctype) ctype field;
- FOREACH_UNION_MEMBER(DECLARE_FIELD)
-#undef DECLARE_FIELD
- } val;
-
- WasmVal() : type(kWasmStmt) {}
-
-#define DECLARE_CONSTRUCTOR(field, localtype, ctype) \
- explicit WasmVal(ctype v) : type(localtype) { val.field = v; }
- FOREACH_UNION_MEMBER(DECLARE_CONSTRUCTOR)
-#undef DECLARE_CONSTRUCTOR
-
- bool operator==(const WasmVal& other) const {
- if (type != other.type) return false;
-#define CHECK_VAL_EQ(field, localtype, ctype) \
- if (type == localtype) { \
- return val.field == other.val.field; \
- }
- FOREACH_UNION_MEMBER(CHECK_VAL_EQ)
-#undef CHECK_VAL_EQ
- UNREACHABLE();
- return false;
- }
-
- template <typename T>
- inline T to() const {
- UNREACHABLE();
- }
-
- template <typename T>
- inline T to_unchecked() const {
- UNREACHABLE();
- }
-};
-
-#define DECLARE_CAST(field, localtype, ctype) \
- template <> \
- inline ctype WasmVal::to_unchecked() const { \
- return val.field; \
- } \
- template <> \
- inline ctype WasmVal::to() const { \
- CHECK_EQ(localtype, type); \
- return val.field; \
- }
-FOREACH_UNION_MEMBER(DECLARE_CAST)
-#undef DECLARE_CAST
-
// Representation of frames within the interpreter.
//
// Layout of a frame:
@@ -127,8 +67,8 @@ class InterpretedFrame {
int GetParameterCount() const;
int GetLocalCount() const;
int GetStackHeight() const;
- WasmVal GetLocalValue(int index) const;
- WasmVal GetStackValue(int index) const;
+ WasmValue GetLocalValue(int index) const;
+ WasmValue GetStackValue(int index) const;
private:
friend class WasmInterpreter;
@@ -138,9 +78,22 @@ class InterpretedFrame {
DISALLOW_COPY_AND_ASSIGN(InterpretedFrame);
};
-// An interpreter capable of executing WASM.
+// An interpreter capable of executing WebAssembly.
class V8_EXPORT_PRIVATE WasmInterpreter {
public:
+ // Open a HeapObjectsScope before running any code in the interpreter which
+ // needs access to the instance object or needs to call to JS functions.
+ class V8_EXPORT_PRIVATE HeapObjectsScope {
+ public:
+ HeapObjectsScope(WasmInterpreter* interpreter,
+ Handle<WasmInstanceObject> instance);
+ ~HeapObjectsScope();
+
+ private:
+ char data[3 * sizeof(void*)]; // must match sizeof(HeapObjectsScopeImpl).
+ DISALLOW_COPY_AND_ASSIGN(HeapObjectsScope);
+ };
+
// State machine for a Thread:
// +---------Run()/Step()--------+
// V |
@@ -170,7 +123,7 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
// Execution control.
State state();
- void InitFrame(const WasmFunction* function, WasmVal* args);
+ void InitFrame(const WasmFunction* function, WasmValue* args);
// Pass -1 as num_steps to run till completion, pause or breakpoint.
State Run(int num_steps = -1);
State Step() { return Run(1); }
@@ -186,7 +139,7 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
int GetFrameCount();
// The InterpretedFrame is only valid as long as the Thread is paused.
std::unique_ptr<InterpretedFrame> GetFrame(int index);
- WasmVal GetReturnValue(int index = 0);
+ WasmValue GetReturnValue(int index = 0);
TrapReason GetTrapReason();
// Returns true if the thread executed an instruction which may produce
@@ -237,13 +190,6 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
// Enable or disable tracing for {function}. Return the previous state.
bool SetTracing(const WasmFunction* function, bool enabled);
- // Set the associated wasm instance object.
- // If the instance object has been set, some tables stored inside it are used
- // instead of the tables stored in the WasmModule struct. This allows to call
- // back and forth between the interpreter and outside code (JS or wasm
- // compiled) without repeatedly copying information.
- void SetInstanceObject(WasmInstanceObject*);
-
//==========================================================================
// Thread iteration and inspection.
//==========================================================================
@@ -254,8 +200,8 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
// Memory access.
//==========================================================================
size_t GetMemorySize();
- WasmVal ReadMemory(size_t offset);
- void WriteMemory(size_t offset, WasmVal val);
+ WasmValue ReadMemory(size_t offset);
+ void WriteMemory(size_t offset, WasmValue val);
// Update the memory region, e.g. after external GrowMemory.
void UpdateMemory(byte* mem_start, uint32_t mem_size);
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index 3dde623594..5f775f0d35 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -40,19 +40,34 @@ namespace {
} \
} while (false)
-// TODO(wasm): move brand check to the respective types, and don't throw
-// in it, rather, use a provided ErrorThrower, or let caller handle it.
-static bool HasBrand(i::Handle<i::Object> value, i::Handle<i::Symbol> sym) {
- if (!value->IsJSObject()) return false;
- i::Handle<i::JSObject> object = i::Handle<i::JSObject>::cast(value);
- Maybe<bool> has_brand = i::JSObject::HasOwnProperty(object, sym);
- return has_brand.FromMaybe(false);
-}
-
-static bool BrandCheck(i::Handle<i::Object> value, i::Handle<i::Symbol> sym,
- ErrorThrower* thrower, const char* msg) {
- return HasBrand(value, sym) ? true : (thrower->TypeError("%s", msg), false);
-}
+// Like an ErrorThrower, but turns all pending exceptions into scheduled
+// exceptions when going out of scope. Use this in API methods.
+// Note that pending exceptions are not necessarily created by the ErrorThrower,
+// but e.g. by the wasm start function. There might also be a scheduled
+// exception, created by another API call (e.g. v8::Object::Get). But there
+// should never be both pending and scheduled exceptions.
+class ScheduledErrorThrower : public ErrorThrower {
+ public:
+ ScheduledErrorThrower(v8::Isolate* isolate, const char* context)
+ : ScheduledErrorThrower(reinterpret_cast<i::Isolate*>(isolate), context) {
+ }
+ ScheduledErrorThrower(i::Isolate* isolate, const char* context)
+ : ErrorThrower(isolate, context) {}
+ ~ScheduledErrorThrower() {
+ // There should never be both a pending and a scheduled exception.
+ DCHECK(!isolate()->has_scheduled_exception() ||
+ !isolate()->has_pending_exception());
+ // Don't throw another error if there is already a scheduled error.
+ if (isolate()->has_scheduled_exception()) {
+ Reset();
+ } else if (isolate()->has_pending_exception()) {
+ Reset();
+ isolate()->OptionalRescheduleException(false);
+ } else if (error()) {
+ isolate()->ScheduleThrow(*Reify());
+ }
+ }
+};
i::Handle<i::String> v8_str(i::Isolate* isolate, const char* str) {
return isolate->factory()->NewStringFromAsciiChecked(str);
@@ -63,17 +78,14 @@ Local<String> v8_str(Isolate* isolate, const char* str) {
i::MaybeHandle<i::WasmModuleObject> GetFirstArgumentAsModule(
const v8::FunctionCallbackInfo<v8::Value>& args, ErrorThrower* thrower) {
- v8::Isolate* isolate = args.GetIsolate();
if (args.Length() < 1) {
thrower->TypeError("Argument 0 must be a WebAssembly.Module");
return {};
}
- Local<Context> context = isolate->GetCurrentContext();
- i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
- if (!BrandCheck(Utils::OpenHandle(*args[0]),
- i::handle(i_context->wasm_module_sym()), thrower,
- "Argument 0 must be a WebAssembly.Module")) {
+ i::Handle<i::Object> arg0 = Utils::OpenHandle(*args[0]);
+ if (!arg0->IsWasmModuleObject()) {
+ thrower->TypeError("Argument 0 must be a WebAssembly.Module");
return {};
}
@@ -121,7 +133,6 @@ i::wasm::ModuleWireBytes GetFirstArgumentAsBytes(
i::wasm::kV8MaxWasmModuleSize, length);
}
if (thrower->error()) return i::wasm::ModuleWireBytes(nullptr, nullptr);
- // TODO(titzer): use the handle as well?
return i::wasm::ModuleWireBytes(start, start + length);
}
@@ -137,32 +148,13 @@ i::MaybeHandle<i::JSReceiver> GetValueAsImports(Local<Value> arg,
return i::Handle<i::JSReceiver>::cast(v8::Utils::OpenHandle(*obj));
}
-void RejectResponseAPI(const v8::FunctionCallbackInfo<v8::Value>& args,
- ErrorThrower* thrower) {
- v8::Isolate* isolate = args.GetIsolate();
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-
- HandleScope scope(isolate);
- Local<Context> context = isolate->GetCurrentContext();
-
- ASSIGN(Promise::Resolver, resolver, Promise::Resolver::New(context));
- Local<Promise> module_promise = resolver->GetPromise();
- args.GetReturnValue().Set(module_promise);
- thrower->TypeError(
- "Argument 0 must be provided and must be a Response or Response promise");
- auto maybe = resolver->Reject(context, Utils::ToLocal(thrower->Reify()));
- CHECK_IMPLIES(!maybe.FromMaybe(false), i_isolate->has_scheduled_exception());
-}
-
void WebAssemblyCompileStreaming(
const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
MicrotasksScope runs_microtasks(isolate, MicrotasksScope::kRunMicrotasks);
- if (!i_isolate->wasm_compile_callback()(args)) {
- ErrorThrower thrower(i_isolate, "WebAssembly.compileStreaming()");
- RejectResponseAPI(args, &thrower);
- }
+ DCHECK_NOT_NULL(i_isolate->wasm_compile_streaming_callback());
+ i_isolate->wasm_compile_streaming_callback()(args);
}
// WebAssembly.compile(bytes) -> Promise
@@ -170,10 +162,9 @@ void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
MicrotasksScope runs_microtasks(isolate, MicrotasksScope::kRunMicrotasks);
- if (i_isolate->wasm_compile_callback()(args)) return;
HandleScope scope(isolate);
- ErrorThrower thrower(i_isolate, "WebAssembly.compile()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.compile()");
Local<Context> context = isolate->GetCurrentContext();
ASSIGN(Promise::Resolver, resolver, Promise::Resolver::New(context));
@@ -196,7 +187,7 @@ void WebAssemblyValidate(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- ErrorThrower thrower(i_isolate, "WebAssembly.validate()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.validate()");
auto bytes = GetFirstArgumentAsBytes(args, &thrower);
@@ -217,7 +208,7 @@ void WebAssemblyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (i_isolate->wasm_module_callback()(args)) return;
HandleScope scope(isolate);
- ErrorThrower thrower(i_isolate, "WebAssembly.Module()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Module()");
auto bytes = GetFirstArgumentAsBytes(args, &thrower);
@@ -237,7 +228,7 @@ void WebAssemblyModuleImports(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope scope(args.GetIsolate());
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- ErrorThrower thrower(i_isolate, "WebAssembly.Module.imports()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Module.imports()");
auto maybe_module = GetFirstArgumentAsModule(args, &thrower);
if (thrower.error()) return;
@@ -250,7 +241,7 @@ void WebAssemblyModuleExports(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope scope(args.GetIsolate());
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- ErrorThrower thrower(i_isolate, "WebAssembly.Module.exports()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Module.exports()");
auto maybe_module = GetFirstArgumentAsModule(args, &thrower);
if (thrower.error()) return;
@@ -264,7 +255,8 @@ void WebAssemblyModuleCustomSections(
HandleScope scope(args.GetIsolate());
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- ErrorThrower thrower(i_isolate, "WebAssembly.Module.customSections()");
+ ScheduledErrorThrower thrower(i_isolate,
+ "WebAssembly.Module.customSections()");
auto maybe_module = GetFirstArgumentAsModule(args, &thrower);
if (thrower.error()) return;
@@ -292,24 +284,23 @@ MaybeLocal<Value> WebAssemblyInstantiateImpl(Isolate* isolate,
Local<Value> ffi) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- ErrorThrower thrower(i_isolate, "WebAssembly Instantiation");
- i::MaybeHandle<i::JSReceiver> maybe_imports =
- GetValueAsImports(ffi, &thrower);
- if (thrower.error()) return {};
-
- i::Handle<i::WasmModuleObject> module_obj =
- i::Handle<i::WasmModuleObject>::cast(
- Utils::OpenHandle(Object::Cast(*module)));
- i::MaybeHandle<i::Object> instance_object =
- i::wasm::SyncInstantiate(i_isolate, &thrower, module_obj, maybe_imports,
- i::MaybeHandle<i::JSArrayBuffer>());
-
- if (instance_object.is_null()) {
- // TODO(wasm): this *should* mean there's an error to throw, but
- // we exit sometimes the instantiation pipeline without throwing.
- // v8:6232.
- return {};
+ i::MaybeHandle<i::Object> instance_object;
+ {
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly Instantiation");
+ i::MaybeHandle<i::JSReceiver> maybe_imports =
+ GetValueAsImports(ffi, &thrower);
+ if (thrower.error()) return {};
+
+ i::Handle<i::WasmModuleObject> module_obj =
+ i::Handle<i::WasmModuleObject>::cast(
+ Utils::OpenHandle(Object::Cast(*module)));
+ instance_object =
+ i::wasm::SyncInstantiate(i_isolate, &thrower, module_obj, maybe_imports,
+ i::MaybeHandle<i::JSArrayBuffer>());
}
+
+ DCHECK_EQ(instance_object.is_null(), i_isolate->has_scheduled_exception());
+ if (instance_object.is_null()) return {};
return Utils::ToLocal(instance_object.ToHandleChecked());
}
@@ -375,9 +366,9 @@ void WebAssemblyInstance(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
if (i_isolate->wasm_instance_callback()(args)) return;
- ErrorThrower thrower(i_isolate, "WebAssembly.Instance()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Instance()");
- auto maybe_module = GetFirstArgumentAsModule(args, &thrower);
+ GetFirstArgumentAsModule(args, &thrower);
if (thrower.error()) return;
// If args.Length < 2, this will be undefined - see FunctionCallbackInfo.
@@ -424,14 +415,12 @@ void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
MicrotasksScope runs_microtasks(isolate, MicrotasksScope::kRunMicrotasks);
- if (i_isolate->wasm_instantiate_callback()(args)) return;
- ErrorThrower thrower(i_isolate, "WebAssembly.instantiate()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.instantiate()");
HandleScope scope(isolate);
Local<Context> context = isolate->GetCurrentContext();
- i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
ASSIGN(Promise::Resolver, resolver, Promise::Resolver::New(context));
Local<Promise> module_promise = resolver->GetPromise();
@@ -459,7 +448,7 @@ void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
FunctionCallback instantiator = nullptr;
- if (HasBrand(first_arg, i::Handle<i::Symbol>(i_context->wasm_module_sym()))) {
+ if (first_arg->IsWasmModuleObject()) {
module_promise = resolver->GetPromise();
if (!resolver->Resolve(context, first_arg_value).IsJust()) return;
instantiator = WebAssemblyInstantiateImplCallback;
@@ -483,7 +472,7 @@ void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) {
bool GetIntegerProperty(v8::Isolate* isolate, ErrorThrower* thrower,
Local<Context> context, Local<v8::Object> object,
- Local<String> property, int* result,
+ Local<String> property, int64_t* result,
int64_t lower_bound, uint64_t upper_bound) {
v8::MaybeLocal<v8::Value> maybe = object->Get(context, property);
v8::Local<v8::Value> value;
@@ -513,7 +502,7 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- ErrorThrower thrower(i_isolate, "WebAssembly.Module()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Module()");
if (args.Length() < 1 || !args[0]->IsObject()) {
thrower.TypeError("Argument 0 must be a table descriptor");
return;
@@ -536,14 +525,14 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
}
// The descriptor's 'initial'.
- int initial = 0;
+ int64_t initial = 0;
if (!GetIntegerProperty(isolate, &thrower, context, descriptor,
v8_str(isolate, "initial"), &initial, 0,
i::FLAG_wasm_max_table_size)) {
return;
}
// The descriptor's 'maximum'.
- int maximum = -1;
+ int64_t maximum = -1;
Local<String> maximum_key = v8_str(isolate, "maximum");
Maybe<bool> has_maximum = descriptor->Has(context, maximum_key);
@@ -556,8 +545,8 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
i::Handle<i::FixedArray> fixed_array;
- i::Handle<i::JSObject> table_obj =
- i::WasmTableObject::New(i_isolate, initial, maximum, &fixed_array);
+ i::Handle<i::JSObject> table_obj = i::WasmTableObject::New(
+ i_isolate, static_cast<uint32_t>(initial), maximum, &fixed_array);
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
return_value.Set(Utils::ToLocal(table_obj));
}
@@ -566,7 +555,7 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- ErrorThrower thrower(i_isolate, "WebAssembly.Memory()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Memory()");
if (args.Length() < 1 || !args[0]->IsObject()) {
thrower.TypeError("Argument 0 must be a memory descriptor");
return;
@@ -574,14 +563,14 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
Local<Context> context = isolate->GetCurrentContext();
Local<v8::Object> descriptor = args[0]->ToObject(context).ToLocalChecked();
// The descriptor's 'initial'.
- int initial = 0;
+ int64_t initial = 0;
if (!GetIntegerProperty(isolate, &thrower, context, descriptor,
v8_str(isolate, "initial"), &initial, 0,
i::FLAG_wasm_max_mem_pages)) {
return;
}
// The descriptor's 'maximum'.
- int maximum = -1;
+ int64_t maximum = -1;
Local<String> maximum_key = v8_str(isolate, "maximum");
Maybe<bool> has_maximum = descriptor->Has(context, maximum_key);
@@ -592,34 +581,71 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
}
+
+ bool is_shared_memory = false;
+ if (i::FLAG_experimental_wasm_threads) {
+ // Shared property of descriptor
+ Local<String> shared_key = v8_str(isolate, "shared");
+ Maybe<bool> has_shared = descriptor->Has(context, shared_key);
+ if (!has_shared.IsNothing() && has_shared.FromJust()) {
+ v8::MaybeLocal<v8::Value> maybe = descriptor->Get(context, shared_key);
+ v8::Local<v8::Value> value;
+ if (maybe.ToLocal(&value)) {
+ if (!value->BooleanValue(context).To(&is_shared_memory)) return;
+ }
+ }
+ // Throw TypeError if shared is true, and the descriptor has no "maximum"
+ if (is_shared_memory && maximum == -1) {
+ thrower.TypeError(
+ "If shared is true, maximum property should be defined.");
+ }
+ }
+
size_t size = static_cast<size_t>(i::wasm::WasmModule::kPageSize) *
static_cast<size_t>(initial);
- i::Handle<i::JSArrayBuffer> buffer =
- i::wasm::NewArrayBuffer(i_isolate, size, i::FLAG_wasm_guard_pages);
+ i::Handle<i::JSArrayBuffer> buffer = i::wasm::NewArrayBuffer(
+ i_isolate, size, i::FLAG_wasm_guard_pages,
+ is_shared_memory ? i::SharedFlag::kShared : i::SharedFlag::kNotShared);
if (buffer.is_null()) {
thrower.RangeError("could not allocate memory");
return;
}
- i::Handle<i::JSObject> memory_obj =
- i::WasmMemoryObject::New(i_isolate, buffer, maximum);
+ if (buffer->is_shared()) {
+ Maybe<bool> result =
+ buffer->SetIntegrityLevel(buffer, i::FROZEN, i::Object::DONT_THROW);
+ if (!result.FromJust()) {
+ thrower.TypeError(
+ "Status of setting SetIntegrityLevel of buffer is false.");
+ }
+ }
+ i::Handle<i::JSObject> memory_obj = i::WasmMemoryObject::New(
+ i_isolate, buffer, static_cast<int32_t>(maximum));
args.GetReturnValue().Set(Utils::ToLocal(memory_obj));
}
+#define NAME_OF_WasmMemoryObject "WebAssembly.Memory"
+#define NAME_OF_WasmModuleObject "WebAssembly.Module"
+#define NAME_OF_WasmInstanceObject "WebAssembly.Instance"
+#define NAME_OF_WasmTableObject "WebAssembly.Table"
+
+#define EXTRACT_THIS(var, WasmType) \
+ i::Handle<i::WasmType> var; \
+ { \
+ i::Handle<i::Object> this_arg = Utils::OpenHandle(*args.This()); \
+ if (!this_arg->Is##WasmType()) { \
+ thrower.TypeError("Receiver is not a " NAME_OF_##WasmType); \
+ return; \
+ } \
+ var = i::Handle<i::WasmType>::cast(this_arg); \
+ }
+
void WebAssemblyTableGetLength(
const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- ErrorThrower thrower(i_isolate, "WebAssembly.Table.length()");
- Local<Context> context = isolate->GetCurrentContext();
- i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
- if (!BrandCheck(Utils::OpenHandle(*args.This()),
- i::Handle<i::Symbol>(i_context->wasm_table_sym()), &thrower,
- "Receiver is not a WebAssembly.Table")) {
- return;
- }
- auto receiver =
- i::Handle<i::WasmTableObject>::cast(Utils::OpenHandle(*args.This()));
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Table.length()");
+ EXTRACT_THIS(receiver, WasmTableObject);
args.GetReturnValue().Set(
v8::Number::New(isolate, receiver->current_length()));
}
@@ -629,17 +655,10 @@ void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- ErrorThrower thrower(i_isolate, "WebAssembly.Table.grow()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Table.grow()");
Local<Context> context = isolate->GetCurrentContext();
- i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
- if (!BrandCheck(Utils::OpenHandle(*args.This()),
- i::Handle<i::Symbol>(i_context->wasm_table_sym()), &thrower,
- "Receiver is not a WebAssembly.Table")) {
- return;
- }
+ EXTRACT_THIS(receiver, WasmTableObject);
- auto receiver =
- i::Handle<i::WasmTableObject>::cast(Utils::OpenHandle(*args.This()));
i::Handle<i::FixedArray> old_array(receiver->functions(), i_isolate);
int old_size = old_array->length();
int64_t new_size64 = 0;
@@ -648,9 +667,8 @@ void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
new_size64 += old_size;
- int64_t max_size64 = receiver->maximum_length();
- if (max_size64 < 0 ||
- max_size64 > static_cast<int64_t>(i::FLAG_wasm_max_table_size)) {
+ int64_t max_size64 = receiver->maximum_length()->Number();
+ if (max_size64 < 0 || max_size64 > i::FLAG_wasm_max_table_size) {
max_size64 = i::FLAG_wasm_max_table_size;
}
@@ -682,17 +700,9 @@ void WebAssemblyTableGet(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- ErrorThrower thrower(i_isolate, "WebAssembly.Table.get()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Table.get()");
Local<Context> context = isolate->GetCurrentContext();
- i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
- if (!BrandCheck(Utils::OpenHandle(*args.This()),
- i::Handle<i::Symbol>(i_context->wasm_table_sym()), &thrower,
- "Receiver is not a WebAssembly.Table")) {
- return;
- }
-
- auto receiver =
- i::Handle<i::WasmTableObject>::cast(Utils::OpenHandle(*args.This()));
+ EXTRACT_THIS(receiver, WasmTableObject);
i::Handle<i::FixedArray> array(receiver->functions(), i_isolate);
int i = 0;
if (args.Length() > 0 && !args[0]->Int32Value(context).To(&i)) return;
@@ -711,18 +721,20 @@ void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- ErrorThrower thrower(i_isolate, "WebAssembly.Table.set()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Table.set()");
Local<Context> context = isolate->GetCurrentContext();
- i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
- if (!BrandCheck(Utils::OpenHandle(*args.This()),
- i::Handle<i::Symbol>(i_context->wasm_table_sym()), &thrower,
- "Receiver is not a WebAssembly.Table")) {
- return;
- }
+ EXTRACT_THIS(receiver, WasmTableObject);
+
if (args.Length() < 2) {
thrower.TypeError("Argument 1 must be null or a function");
return;
}
+
+ // Parameter 0.
+ int32_t index;
+ if (!args[0]->Int32Value(context).To(&index)) return;
+
+ // Parameter 1.
i::Handle<i::Object> value = Utils::OpenHandle(*args[1]);
if (!value->IsNull(i_isolate) &&
(!value->IsJSFunction() ||
@@ -732,27 +744,10 @@ void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- auto receiver =
- i::Handle<i::WasmTableObject>::cast(Utils::OpenHandle(*args.This()));
- i::Handle<i::FixedArray> array(receiver->functions(), i_isolate);
- int i;
- if (!args[0]->Int32Value(context).To(&i)) return;
- if (i < 0 || i >= array->length()) {
- thrower.RangeError("index out of bounds");
- return;
- }
-
- i::Handle<i::FixedArray> dispatch_tables(receiver->dispatch_tables(),
- i_isolate);
- if (value->IsNull(i_isolate)) {
- i::wasm::UpdateDispatchTables(i_isolate, dispatch_tables, i,
- i::Handle<i::JSFunction>::null());
- } else {
- i::wasm::UpdateDispatchTables(i_isolate, dispatch_tables, i,
- i::Handle<i::JSFunction>::cast(value));
- }
-
- i::Handle<i::FixedArray>::cast(array)->set(i, *value);
+ i::wasm::TableSet(&thrower, i_isolate, receiver, index,
+ value->IsNull(i_isolate)
+ ? i::Handle<i::JSFunction>::null()
+ : i::Handle<i::JSFunction>::cast(value));
}
// WebAssembly.Memory.grow(num) -> num
@@ -760,27 +755,21 @@ void WebAssemblyMemoryGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- ErrorThrower thrower(i_isolate, "WebAssembly.Memory.grow()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Memory.grow()");
Local<Context> context = isolate->GetCurrentContext();
- i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
- if (!BrandCheck(Utils::OpenHandle(*args.This()),
- i::Handle<i::Symbol>(i_context->wasm_memory_sym()), &thrower,
- "Receiver is not a WebAssembly.Memory")) {
- return;
- }
+ EXTRACT_THIS(receiver, WasmMemoryObject);
+
int64_t delta_size = 0;
if (args.Length() < 1 || !args[0]->IntegerValue(context).To(&delta_size)) {
thrower.TypeError("Argument 0 required, must be numeric value of pages");
return;
}
- i::Handle<i::WasmMemoryObject> receiver =
- i::Handle<i::WasmMemoryObject>::cast(Utils::OpenHandle(*args.This()));
int64_t max_size64 = receiver->maximum_pages();
if (max_size64 < 0 ||
max_size64 > static_cast<int64_t>(i::FLAG_wasm_max_mem_pages)) {
max_size64 = i::FLAG_wasm_max_mem_pages;
}
- i::Handle<i::JSArrayBuffer> old_buffer(receiver->buffer());
+ i::Handle<i::JSArrayBuffer> old_buffer(receiver->array_buffer());
uint32_t old_size =
old_buffer->byte_length()->Number() / i::wasm::kSpecMaxWasmMemoryPages;
int64_t new_size64 = old_size + delta_size;
@@ -796,7 +785,9 @@ void WebAssemblyMemoryGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
bool free_memory = (delta_size != 0);
- i::wasm::DetachWebAssemblyMemoryBuffer(i_isolate, old_buffer, free_memory);
+ if (!old_buffer->is_shared()) {
+ i::wasm::DetachWebAssemblyMemoryBuffer(i_isolate, old_buffer, free_memory);
+ }
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
return_value.Set(ret);
}
@@ -807,18 +798,23 @@ void WebAssemblyMemoryGetBuffer(
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- ErrorThrower thrower(i_isolate, "WebAssembly.Memory.buffer");
- Local<Context> context = isolate->GetCurrentContext();
- i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
- if (!BrandCheck(Utils::OpenHandle(*args.This()),
- i::Handle<i::Symbol>(i_context->wasm_memory_sym()), &thrower,
- "Receiver is not a WebAssembly.Memory")) {
- return;
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Memory.buffer");
+ EXTRACT_THIS(receiver, WasmMemoryObject);
+
+ i::Handle<i::Object> buffer_obj(receiver->array_buffer(), i_isolate);
+ DCHECK(buffer_obj->IsJSArrayBuffer());
+ i::Handle<i::JSArrayBuffer> buffer(i::JSArrayBuffer::cast(*buffer_obj));
+ if (buffer->is_shared()) {
+ // TODO(gdeepti): More needed here for when cached buffer, and current
+ // buffer are out of sync, handle that here when bounds checks, and Grow
+ // are handled correctly.
+ Maybe<bool> result =
+ buffer->SetIntegrityLevel(buffer, i::FROZEN, i::Object::DONT_THROW);
+ if (!result.FromJust()) {
+ thrower.TypeError(
+ "Status of setting SetIntegrityLevel of buffer is false.");
+ }
}
- i::Handle<i::WasmMemoryObject> receiver =
- i::Handle<i::WasmMemoryObject>::cast(Utils::OpenHandle(*args.This()));
- i::Handle<i::Object> buffer(receiver->buffer(), i_isolate);
- DCHECK(buffer->IsJSArrayBuffer());
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
return_value.Set(Utils::ToLocal(buffer));
}
@@ -842,8 +838,8 @@ Handle<JSFunction> InstallFunc(Isolate* isolate, Handle<JSObject> object,
Handle<String> name = v8_str(isolate, str);
Handle<FunctionTemplateInfo> temp = NewTemplate(isolate, func);
Handle<JSFunction> function =
- ApiNatives::InstantiateFunction(temp).ToHandleChecked();
- JSFunction::SetName(function, name, isolate->factory()->empty_string());
+ ApiNatives::InstantiateFunction(temp, name).ToHandleChecked();
+ DCHECK(function->shared()->has_shared_name());
function->shared()->set_length(length);
PropertyAttributes attributes = static_cast<PropertyAttributes>(DONT_ENUM);
JSObject::AddProperty(object, name, function, attributes);
@@ -854,8 +850,10 @@ Handle<JSFunction> InstallGetter(Isolate* isolate, Handle<JSObject> object,
const char* str, FunctionCallback func) {
Handle<String> name = v8_str(isolate, str);
Handle<FunctionTemplateInfo> temp = NewTemplate(isolate, func);
+ // TODO(ishell): shouldn't we set "get "+name as getter's name?
Handle<JSFunction> function =
ApiNatives::InstantiateFunction(temp).ToHandleChecked();
+ DCHECK(function->shared()->has_shared_name());
v8::PropertyAttribute attributes =
static_cast<v8::PropertyAttribute>(v8::DontEnum);
Utils::ToLocal(object)->SetAccessorProperty(Utils::ToLocal(name),
@@ -867,55 +865,19 @@ Handle<JSFunction> InstallGetter(Isolate* isolate, Handle<JSObject> object,
void WasmJs::Install(Isolate* isolate) {
Handle<JSGlobalObject> global = isolate->global_object();
Handle<Context> context(global->native_context(), isolate);
- // TODO(titzer): once FLAG_expose_wasm is gone, this should become a DCHECK.
- if (context->get(Context::WASM_FUNCTION_MAP_INDEX)->IsMap()) return;
-
- // Install Maps.
-
- // TODO(titzer): Also make one for strict mode functions?
- Handle<Map> prev_map = Handle<Map>(context->sloppy_function_map(), isolate);
-
- InstanceType instance_type = prev_map->instance_type();
- int embedder_fields = JSObject::GetEmbedderFieldCount(*prev_map);
- CHECK_EQ(0, embedder_fields);
- int pre_allocated =
- prev_map->GetInObjectProperties() - prev_map->unused_property_fields();
- int instance_size = 0;
- int in_object_properties = 0;
- int wasm_embedder_fields = embedder_fields + 1 // module instance object
- + 1 // function arity
- + 1; // function signature
- JSFunction::CalculateInstanceSizeHelper(instance_type, wasm_embedder_fields,
- 0, &instance_size,
- &in_object_properties);
-
- int unused_property_fields = in_object_properties - pre_allocated;
- Handle<Map> map = Map::CopyInitialMap(
- prev_map, instance_size, in_object_properties, unused_property_fields);
-
- context->set_wasm_function_map(*map);
-
- // Install symbols.
+ // Install the JS API once only.
+ Object* prev = context->get(Context::WASM_MODULE_CONSTRUCTOR_INDEX);
+ if (!prev->IsUndefined(isolate)) {
+ DCHECK(prev->IsJSFunction());
+ return;
+ }
Factory* factory = isolate->factory();
- // Create private symbols.
- Handle<Symbol> module_sym = factory->NewPrivateSymbol();
- context->set_wasm_module_sym(*module_sym);
-
- Handle<Symbol> instance_sym = factory->NewPrivateSymbol();
- context->set_wasm_instance_sym(*instance_sym);
-
- Handle<Symbol> table_sym = factory->NewPrivateSymbol();
- context->set_wasm_table_sym(*table_sym);
-
- Handle<Symbol> memory_sym = factory->NewPrivateSymbol();
- context->set_wasm_memory_sym(*memory_sym);
-
- // Install the JS API.
// Setup WebAssembly
Handle<String> name = v8_str(isolate, "WebAssembly");
- Handle<JSFunction> cons = factory->NewFunction(name);
+ Handle<JSFunction> cons = factory->NewFunction(isolate->strict_function_map(),
+ name, MaybeHandle<Code>());
JSFunction::SetPrototype(cons, isolate->initial_object_prototype());
cons->shared()->set_instance_class_name(*name);
Handle<JSObject> webassembly = factory->NewJSObject(cons, TENURED);
@@ -926,12 +888,15 @@ void WasmJs::Install(Isolate* isolate) {
JSObject::AddProperty(webassembly, factory->to_string_tag_symbol(),
v8_str(isolate, "WebAssembly"), ro_attributes);
InstallFunc(isolate, webassembly, "compile", WebAssemblyCompile, 1);
- InstallFunc(isolate, webassembly, "compileStreaming",
- WebAssemblyCompileStreaming, 1);
InstallFunc(isolate, webassembly, "validate", WebAssemblyValidate, 1);
InstallFunc(isolate, webassembly, "instantiate", WebAssemblyInstantiate, 1);
- InstallFunc(isolate, webassembly, "instantiateStreaming",
- WebAssemblyInstantiateStreaming, 1);
+
+ if (isolate->wasm_compile_streaming_callback() != nullptr) {
+ InstallFunc(isolate, webassembly, "compileStreaming",
+ WebAssemblyCompileStreaming, 1);
+ InstallFunc(isolate, webassembly, "instantiateStreaming",
+ WebAssemblyInstantiateStreaming, 1);
+ }
// Setup Module
Handle<JSFunction> module_constructor =
@@ -940,8 +905,8 @@ void WasmJs::Install(Isolate* isolate) {
Handle<JSObject> module_proto =
factory->NewJSObject(module_constructor, TENURED);
i::Handle<i::Map> module_map = isolate->factory()->NewMap(
- i::JS_API_OBJECT_TYPE, i::JSObject::kHeaderSize +
- WasmModuleObject::kFieldCount * i::kPointerSize);
+ i::WASM_MODULE_TYPE, i::JSObject::kHeaderSize +
+ WasmModuleObject::kFieldCount * i::kPointerSize);
JSFunction::SetInitialMap(module_constructor, module_map, module_proto);
InstallFunc(isolate, module_constructor, "imports", WebAssemblyModuleImports,
1);
@@ -961,8 +926,7 @@ void WasmJs::Install(Isolate* isolate) {
Handle<JSObject> instance_proto =
factory->NewJSObject(instance_constructor, TENURED);
i::Handle<i::Map> instance_map = isolate->factory()->NewMap(
- i::JS_API_OBJECT_TYPE, i::JSObject::kHeaderSize +
- WasmInstanceObject::kFieldCount * i::kPointerSize);
+ i::WASM_INSTANCE_TYPE, WasmInstanceObject::kSize);
JSFunction::SetInitialMap(instance_constructor, instance_map, instance_proto);
JSObject::AddProperty(instance_proto,
isolate->factory()->constructor_string(),
@@ -976,9 +940,8 @@ void WasmJs::Install(Isolate* isolate) {
context->set_wasm_table_constructor(*table_constructor);
Handle<JSObject> table_proto =
factory->NewJSObject(table_constructor, TENURED);
- i::Handle<i::Map> table_map = isolate->factory()->NewMap(
- i::JS_API_OBJECT_TYPE, i::JSObject::kHeaderSize +
- WasmTableObject::kFieldCount * i::kPointerSize);
+ i::Handle<i::Map> table_map =
+ isolate->factory()->NewMap(i::WASM_TABLE_TYPE, WasmTableObject::kSize);
JSFunction::SetInitialMap(table_constructor, table_map, table_proto);
JSObject::AddProperty(table_proto, isolate->factory()->constructor_string(),
table_constructor, DONT_ENUM);
@@ -995,9 +958,8 @@ void WasmJs::Install(Isolate* isolate) {
context->set_wasm_memory_constructor(*memory_constructor);
Handle<JSObject> memory_proto =
factory->NewJSObject(memory_constructor, TENURED);
- i::Handle<i::Map> memory_map = isolate->factory()->NewMap(
- i::JS_API_OBJECT_TYPE, i::JSObject::kHeaderSize +
- WasmMemoryObject::kFieldCount * i::kPointerSize);
+ i::Handle<i::Map> memory_map =
+ isolate->factory()->NewMap(i::WASM_MEMORY_TYPE, WasmMemoryObject::kSize);
JSFunction::SetInitialMap(memory_constructor, memory_map, memory_proto);
JSObject::AddProperty(memory_proto, isolate->factory()->constructor_string(),
memory_constructor, DONT_ENUM);
@@ -1021,15 +983,5 @@ void WasmJs::Install(Isolate* isolate) {
JSObject::AddProperty(webassembly, isolate->factory()->RuntimeError_string(),
runtime_error, attributes);
}
-
-bool WasmJs::IsWasmMemoryObject(Isolate* isolate, Handle<Object> value) {
- i::Handle<i::Symbol> symbol(isolate->context()->wasm_memory_sym(), isolate);
- return HasBrand(value, symbol);
-}
-
-bool WasmJs::IsWasmTableObject(Isolate* isolate, Handle<Object> value) {
- i::Handle<i::Symbol> symbol(isolate->context()->wasm_table_sym(), isolate);
- return HasBrand(value, symbol);
-}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-js.h b/deps/v8/src/wasm/wasm-js.h
index 05d5ea3061..0ef2219b1f 100644
--- a/deps/v8/src/wasm/wasm-js.h
+++ b/deps/v8/src/wasm/wasm-js.h
@@ -10,7 +10,8 @@
namespace v8 {
namespace internal {
-// Exposes a WASM API to JavaScript through the V8 API.
+
+// Exposes a WebAssembly API to JavaScript through the V8 API.
class WasmJs {
public:
V8_EXPORT_PRIVATE static void Install(Isolate* isolate);
diff --git a/deps/v8/src/wasm/wasm-limits.h b/deps/v8/src/wasm/wasm-limits.h
index c2ecf61657..0e10688cd6 100644
--- a/deps/v8/src/wasm/wasm-limits.h
+++ b/deps/v8/src/wasm/wasm-limits.h
@@ -20,9 +20,13 @@ constexpr size_t kV8MaxWasmFunctions = 1000000;
constexpr size_t kV8MaxWasmImports = 100000;
constexpr size_t kV8MaxWasmExports = 100000;
constexpr size_t kV8MaxWasmGlobals = 1000000;
+constexpr size_t kV8MaxWasmExceptions = 1000000;
+constexpr size_t kV8MaxWasmExceptionTypes = 1000000;
constexpr size_t kV8MaxWasmDataSegments = 100000;
// Don't use this limit directly, but use the value of FLAG_wasm_max_mem_pages.
-constexpr size_t kV8MaxWasmMemoryPages = 16384; // = 1 GiB
+// Current limit mimics the maximum allowed allocation on an ArrayBuffer
+// (2GiB - 1 page).
+constexpr size_t kV8MaxWasmMemoryPages = 32767; // ~ 2 GiB
constexpr size_t kV8MaxWasmStringSize = 100000;
constexpr size_t kV8MaxWasmModuleSize = 1024 * 1024 * 1024; // = 1 GiB
constexpr size_t kV8MaxWasmFunctionSize = 128 * 1024;
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index 5b8020b4f5..7901a6b619 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -2,24 +2,20 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <functional>
#include <memory>
-#include "src/asmjs/asm-js.h"
-#include "src/assembler-inl.h"
-#include "src/base/atomic-utils.h"
-#include "src/base/utils/random-number-generator.h"
#include "src/code-stubs.h"
-#include "src/compiler/wasm-compiler.h"
#include "src/debug/interface-types.h"
#include "src/frames-inl.h"
#include "src/objects.h"
#include "src/property-descriptor.h"
#include "src/simulator.h"
#include "src/snapshot/snapshot.h"
-#include "src/trap-handler/trap-handler.h"
#include "src/v8.h"
-#include "src/wasm/function-body-decoder.h"
+#include "src/wasm/compilation-manager.h"
+#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-code-specialization.h"
#include "src/wasm/wasm-js.h"
@@ -49,30 +45,6 @@ namespace base = v8::base;
namespace {
-static const int kInvalidSigIndex = -1;
-
-byte* raw_buffer_ptr(MaybeHandle<JSArrayBuffer> buffer, int offset) {
- return static_cast<byte*>(buffer.ToHandleChecked()->backing_store()) + offset;
-}
-
-static void RecordStats(Isolate* isolate, Code* code, bool is_sync) {
- if (is_sync) {
- // TODO(karlschimpf): Make this work when asynchronous.
- // https://bugs.chromium.org/p/v8/issues/detail?id=6361
- isolate->counters()->wasm_generated_code_size()->Increment(
- code->body_size());
- isolate->counters()->wasm_reloc_size()->Increment(
- code->relocation_info()->length());
- }
-}
-
-static void RecordStats(Isolate* isolate, Handle<FixedArray> functions,
- bool is_sync) {
- DisallowHeapAllocation no_gc;
- for (int i = 0; i < functions->length(); ++i) {
- RecordStats(isolate, Code::cast(functions->get(i)), is_sync);
- }
-}
void* TryAllocateBackingStore(Isolate* isolate, size_t size,
bool enable_guard_regions, void*& allocation_base,
@@ -108,721 +80,14 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size,
return memory;
} else {
- void* memory = isolate->array_buffer_allocator()->Allocate(size);
+ void* memory =
+ size == 0 ? nullptr : isolate->array_buffer_allocator()->Allocate(size);
allocation_base = memory;
allocation_length = size;
return memory;
}
}
-void FlushICache(Isolate* isolate, Handle<FixedArray> code_table) {
- for (int i = 0; i < code_table->length(); ++i) {
- Handle<Code> code = code_table->GetValueChecked<Code>(isolate, i);
- Assembler::FlushICache(isolate, code->instruction_start(),
- code->instruction_size());
- }
-}
-
-Handle<Script> CreateWasmScript(Isolate* isolate,
- const ModuleWireBytes& wire_bytes) {
- Handle<Script> script =
- isolate->factory()->NewScript(isolate->factory()->empty_string());
- script->set_context_data(isolate->native_context()->debug_context_id());
- script->set_type(Script::TYPE_WASM);
-
- int hash = StringHasher::HashSequentialString(
- reinterpret_cast<const char*>(wire_bytes.start()),
- static_cast<int>(wire_bytes.length()), kZeroHashSeed);
-
- const int kBufferSize = 32;
- char buffer[kBufferSize];
- int url_chars = SNPrintF(ArrayVector(buffer), "wasm://wasm/%08x", hash);
- DCHECK(url_chars >= 0 && url_chars < kBufferSize);
- MaybeHandle<String> url_str = isolate->factory()->NewStringFromOneByte(
- Vector<const uint8_t>(reinterpret_cast<uint8_t*>(buffer), url_chars),
- TENURED);
- script->set_source_url(*url_str.ToHandleChecked());
-
- int name_chars = SNPrintF(ArrayVector(buffer), "wasm-%08x", hash);
- DCHECK(name_chars >= 0 && name_chars < kBufferSize);
- MaybeHandle<String> name_str = isolate->factory()->NewStringFromOneByte(
- Vector<const uint8_t>(reinterpret_cast<uint8_t*>(buffer), name_chars),
- TENURED);
- script->set_name(*name_str.ToHandleChecked());
-
- return script;
-}
-
-class JSToWasmWrapperCache {
- public:
- Handle<Code> CloneOrCompileJSToWasmWrapper(Isolate* isolate,
- const wasm::WasmModule* module,
- Handle<Code> wasm_code,
- uint32_t index) {
- const wasm::WasmFunction* func = &module->functions[index];
- int cached_idx = sig_map_.Find(func->sig);
- if (cached_idx >= 0) {
- Handle<Code> code = isolate->factory()->CopyCode(code_cache_[cached_idx]);
- // Now patch the call to wasm code.
- for (RelocIterator it(*code, RelocInfo::kCodeTargetMask);; it.next()) {
- DCHECK(!it.done());
- Code* target =
- Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
- if (target->kind() == Code::WASM_FUNCTION ||
- target->kind() == Code::WASM_TO_JS_FUNCTION ||
- target->builtin_index() == Builtins::kIllegal ||
- target->builtin_index() == Builtins::kWasmCompileLazy) {
- it.rinfo()->set_target_address(isolate,
- wasm_code->instruction_start());
- break;
- }
- }
- return code;
- }
-
- Handle<Code> code =
- compiler::CompileJSToWasmWrapper(isolate, module, wasm_code, index);
- uint32_t new_cache_idx = sig_map_.FindOrInsert(func->sig);
- DCHECK_EQ(code_cache_.size(), new_cache_idx);
- USE(new_cache_idx);
- code_cache_.push_back(code);
- return code;
- }
-
- private:
- // sig_map_ maps signatures to an index in code_cache_.
- wasm::SignatureMap sig_map_;
- std::vector<Handle<Code>> code_cache_;
-};
-
-// Ensure that the code object in <code_table> at offset <func_index> has
-// deoptimization data attached. This is needed for lazy compile stubs which are
-// called from JS_TO_WASM functions or via exported function tables. The deopt
-// data is used to determine which function this lazy compile stub belongs to.
-Handle<Code> EnsureExportedLazyDeoptData(Isolate* isolate,
- Handle<WasmInstanceObject> instance,
- Handle<FixedArray> code_table,
- int func_index) {
- Handle<Code> code(Code::cast(code_table->get(func_index)), isolate);
- if (code->builtin_index() != Builtins::kWasmCompileLazy) {
- // No special deopt data needed for compiled functions, and imported
- // functions, which map to Illegal at this point (they get compiled at
- // instantiation time).
- DCHECK(code->kind() == Code::WASM_FUNCTION ||
- code->kind() == Code::WASM_TO_JS_FUNCTION ||
- code->builtin_index() == Builtins::kIllegal);
- return code;
- }
- // deopt_data:
- // #0: weak instance
- // #1: func_index
- // might be extended later for table exports (see
- // EnsureTableExportLazyDeoptData).
- Handle<FixedArray> deopt_data(code->deoptimization_data());
- DCHECK_EQ(0, deopt_data->length() % 2);
- if (deopt_data->length() == 0) {
- code = isolate->factory()->CopyCode(code);
- code_table->set(func_index, *code);
- deopt_data = isolate->factory()->NewFixedArray(2, TENURED);
- code->set_deoptimization_data(*deopt_data);
- if (!instance.is_null()) {
- Handle<WeakCell> weak_instance =
- isolate->factory()->NewWeakCell(instance);
- deopt_data->set(0, *weak_instance);
- }
- deopt_data->set(1, Smi::FromInt(func_index));
- }
- DCHECK_IMPLIES(!instance.is_null(),
- WeakCell::cast(code->deoptimization_data()->get(0))->value() ==
- *instance);
- DCHECK_EQ(func_index,
- Smi::cast(code->deoptimization_data()->get(1))->value());
- return code;
-}
-
-// Ensure that the code object in <code_table> at offset <func_index> has
-// deoptimization data attached. This is needed for lazy compile stubs which are
-// called from JS_TO_WASM functions or via exported function tables. The deopt
-// data is used to determine which function this lazy compile stub belongs to.
-Handle<Code> EnsureTableExportLazyDeoptData(
- Isolate* isolate, Handle<WasmInstanceObject> instance,
- Handle<FixedArray> code_table, int func_index,
- Handle<FixedArray> export_table, int export_index,
- std::unordered_map<uint32_t, uint32_t>& table_export_count) {
- Handle<Code> code =
- EnsureExportedLazyDeoptData(isolate, instance, code_table, func_index);
- if (code->builtin_index() != Builtins::kWasmCompileLazy) return code;
-
- // deopt_data:
- // #0: weak instance
- // #1: func_index
- // [#2: export table
- // #3: export table index]
- // [#4: export table
- // #5: export table index]
- // ...
- // table_export_count counts down and determines the index for the new export
- // table entry.
- auto table_export_entry = table_export_count.find(func_index);
- DCHECK(table_export_entry != table_export_count.end());
- DCHECK_LT(0, table_export_entry->second);
- uint32_t this_idx = 2 * table_export_entry->second;
- --table_export_entry->second;
- Handle<FixedArray> deopt_data(code->deoptimization_data());
- DCHECK_EQ(0, deopt_data->length() % 2);
- if (deopt_data->length() == 2) {
- // Then only the "header" (#0 and #1) exists. Extend for the export table
- // entries (make space for this_idx + 2 elements).
- deopt_data = isolate->factory()->CopyFixedArrayAndGrow(deopt_data, this_idx,
- TENURED);
- code->set_deoptimization_data(*deopt_data);
- }
- DCHECK_LE(this_idx + 2, deopt_data->length());
- DCHECK(deopt_data->get(this_idx)->IsUndefined(isolate));
- DCHECK(deopt_data->get(this_idx + 1)->IsUndefined(isolate));
- deopt_data->set(this_idx, *export_table);
- deopt_data->set(this_idx + 1, Smi::FromInt(export_index));
- return code;
-}
-
-bool compile_lazy(const WasmModule* module) {
- return FLAG_wasm_lazy_compilation ||
- (FLAG_asm_wasm_lazy_compilation && module->is_asm_js());
-}
-
-// A helper for compiling an entire module.
-class CompilationHelper {
- public:
- // The compilation helper takes ownership of the {WasmModule}.
- // In {CompileToModuleObject}, it will transfer ownership to the generated
- // {WasmModuleWrapper}. If this method is not called, ownership may be
- // reclaimed by explicitely releasing the {module_} field.
- CompilationHelper(Isolate* isolate, std::unique_ptr<WasmModule> module,
- bool is_sync)
- : isolate_(isolate),
- module_(std::move(module)),
- is_sync_(is_sync),
- executed_units_(
- isolate->random_number_generator(),
- (isolate->heap()->memory_allocator()->code_range()->valid()
- ? isolate->heap()->memory_allocator()->code_range()->size()
- : isolate->heap()->code_space()->Capacity()) /
- 2),
- num_background_tasks_(Min(
- static_cast<size_t>(FLAG_wasm_num_compilation_tasks),
- V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads())),
- stopped_compilation_tasks_(num_background_tasks_) {}
-
- bool GetNextUncompiledFunctionId(size_t* index) {
- DCHECK_NOT_NULL(index);
- // - 1 because AtomicIncrement returns the value after the atomic increment.
- *index = next_unit_.Increment(1) - 1;
- return *index < compilation_units_.size();
- }
-
- // The actual runnable task that performs compilations in the background.
- class CompilationTask : public CancelableTask {
- public:
- CompilationHelper* helper_;
- explicit CompilationTask(CompilationHelper* helper)
- : CancelableTask(helper->isolate_, &helper->background_task_manager_),
- helper_(helper) {}
-
- void RunInternal() override {
- size_t index = 0;
- while (helper_->executed_units_.CanAcceptWork() &&
- helper_->GetNextUncompiledFunctionId(&index)) {
- helper_->CompileAndSchedule(index);
- }
- helper_->OnBackgroundTaskStopped();
- }
- };
-
- void OnBackgroundTaskStopped() {
- base::LockGuard<base::Mutex> guard(&tasks_mutex_);
- ++stopped_compilation_tasks_;
- DCHECK_LE(stopped_compilation_tasks_, num_background_tasks_);
- }
-
- void CompileAndSchedule(size_t index) {
- DisallowHeapAllocation no_allocation;
- DisallowHandleAllocation no_handles;
- DisallowHandleDereference no_deref;
- DisallowCodeDependencyChange no_dependency_change;
- DCHECK_LT(index, compilation_units_.size());
-
- std::unique_ptr<compiler::WasmCompilationUnit> unit =
- std::move(compilation_units_.at(index));
- unit->ExecuteCompilation();
- {
- base::LockGuard<base::Mutex> guard(&result_mutex_);
- executed_units_.Schedule(std::move(unit));
- }
- }
-
- class CodeGenerationSchedule {
- public:
- explicit CodeGenerationSchedule(
- base::RandomNumberGenerator* random_number_generator,
- size_t max_memory = 0);
-
- void Schedule(std::unique_ptr<compiler::WasmCompilationUnit>&& item);
-
- bool IsEmpty() const { return schedule_.empty(); }
-
- std::unique_ptr<compiler::WasmCompilationUnit> GetNext();
-
- bool CanAcceptWork() const;
-
- void EnableThrottling() { throttle_ = true; }
-
- private:
- size_t GetRandomIndexInSchedule();
-
- base::RandomNumberGenerator* random_number_generator_ = nullptr;
- std::vector<std::unique_ptr<compiler::WasmCompilationUnit>> schedule_;
- const size_t max_memory_;
- bool throttle_ = false;
- base::AtomicNumber<size_t> allocated_memory_{0};
- };
-
- Isolate* isolate_;
- std::unique_ptr<WasmModule> module_;
- bool is_sync_;
- std::vector<std::unique_ptr<compiler::WasmCompilationUnit>>
- compilation_units_;
- CodeGenerationSchedule executed_units_;
- base::Mutex result_mutex_;
- base::AtomicNumber<size_t> next_unit_;
- const size_t num_background_tasks_ = 0;
- CancelableTaskManager background_task_manager_;
-
- // Run by each compilation task and by the main thread.
- bool FetchAndExecuteCompilationUnit() {
- DisallowHeapAllocation no_allocation;
- DisallowHandleAllocation no_handles;
- DisallowHandleDereference no_deref;
- DisallowCodeDependencyChange no_dependency_change;
-
- // - 1 because AtomicIncrement returns the value after the atomic increment.
- size_t index = next_unit_.Increment(1) - 1;
- if (index >= compilation_units_.size()) {
- return false;
- }
-
- std::unique_ptr<compiler::WasmCompilationUnit> unit =
- std::move(compilation_units_.at(index));
- unit->ExecuteCompilation();
- base::LockGuard<base::Mutex> guard(&result_mutex_);
- executed_units_.Schedule(std::move(unit));
- return true;
- }
-
- size_t InitializeParallelCompilation(
- const std::vector<WasmFunction>& functions, ModuleBytesEnv& module_env) {
- uint32_t start = module_env.module_env.module->num_imported_functions +
- FLAG_skip_compiling_wasm_funcs;
- uint32_t num_funcs = static_cast<uint32_t>(functions.size());
- uint32_t funcs_to_compile = start > num_funcs ? 0 : num_funcs - start;
- compilation_units_.reserve(funcs_to_compile);
- for (uint32_t i = start; i < num_funcs; ++i) {
- const WasmFunction* func = &functions[i];
- constexpr bool is_sync = true;
- compilation_units_.push_back(
- std::unique_ptr<compiler::WasmCompilationUnit>(
- new compiler::WasmCompilationUnit(isolate_, &module_env, func,
- !is_sync)));
- }
- return funcs_to_compile;
- }
-
- void RestartCompilationTasks() {
- base::LockGuard<base::Mutex> guard(&tasks_mutex_);
- for (; stopped_compilation_tasks_ > 0; --stopped_compilation_tasks_) {
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- new CompilationTask(this), v8::Platform::kShortRunningTask);
- }
- }
-
- void WaitForCompilationTasks(uint32_t* task_ids) {
- for (size_t i = 0; i < num_background_tasks_; ++i) {
- // If the task has not started yet, then we abort it. Otherwise we wait
- // for it to finish.
- if (isolate_->cancelable_task_manager()->TryAbort(task_ids[i]) !=
- CancelableTaskManager::kTaskAborted) {
- module_->pending_tasks.get()->Wait();
- }
- }
- }
-
- size_t FinishCompilationUnits(std::vector<Handle<Code>>& results,
- ErrorThrower* thrower) {
- size_t finished = 0;
- while (true) {
- int func_index = -1;
- Handle<Code> result = FinishCompilationUnit(thrower, &func_index);
- if (func_index < 0) break;
- results[func_index] = result;
- ++finished;
- }
- RestartCompilationTasks();
- return finished;
- }
-
- Handle<Code> FinishCompilationUnit(ErrorThrower* thrower, int* func_index) {
- std::unique_ptr<compiler::WasmCompilationUnit> unit;
- {
- base::LockGuard<base::Mutex> guard(&result_mutex_);
- if (executed_units_.IsEmpty()) return Handle<Code>::null();
- unit = executed_units_.GetNext();
- }
- *func_index = unit->func_index();
- Handle<Code> result = unit->FinishCompilation(thrower);
- return result;
- }
-
- void CompileInParallel(ModuleBytesEnv* module_env,
- std::vector<Handle<Code>>& results,
- ErrorThrower* thrower) {
- const WasmModule* module = module_env->module_env.module;
- // Data structures for the parallel compilation.
-
- //-----------------------------------------------------------------------
- // For parallel compilation:
- // 1) The main thread allocates a compilation unit for each wasm function
- // and stores them in the vector {compilation_units}.
- // 2) The main thread spawns {CompilationTask} instances which run on
- // the background threads.
- // 3.a) The background threads and the main thread pick one compilation
- // unit at a time and execute the parallel phase of the compilation
- // unit. After finishing the execution of the parallel phase, the
- // result is enqueued in {executed_units}.
- // 3.b) If {executed_units} contains a compilation unit, the main thread
- // dequeues it and finishes the compilation.
- // 4) After the parallel phase of all compilation units has started, the
- // main thread waits for all {CompilationTask} instances to finish.
- // 5) The main thread finishes the compilation.
-
- // Turn on the {CanonicalHandleScope} so that the background threads can
- // use the node cache.
- CanonicalHandleScope canonical(isolate_);
-
- // 1) The main thread allocates a compilation unit for each wasm function
- // and stores them in the vector {compilation_units}.
- InitializeParallelCompilation(module->functions, *module_env);
-
- executed_units_.EnableThrottling();
-
- // 2) The main thread spawns {CompilationTask} instances which run on
- // the background threads.
- RestartCompilationTasks();
-
- size_t finished_functions = 0;
- while (finished_functions < compilation_units_.size()) {
- // 3.a) The background threads and the main thread pick one compilation
- // unit at a time and execute the parallel phase of the compilation
- // unit. After finishing the execution of the parallel phase, the
- // result is enqueued in {executed_units}.
- size_t index = 0;
- if (GetNextUncompiledFunctionId(&index)) {
- CompileAndSchedule(index);
- }
- // 3.b) If {executed_units} contains a compilation unit, the main thread
- // dequeues it and finishes the compilation unit. Compilation units
- // are finished concurrently to the background threads to save
- // memory.
- finished_functions += FinishCompilationUnits(results, thrower);
- }
- // 4) After the parallel phase of all compilation units has started, the
- // main thread waits for all {CompilationTask} instances to finish -
- // which happens once they all realize there's no next work item to
- // process.
- background_task_manager_.CancelAndWait();
- }
-
- void CompileSequentially(ModuleBytesEnv* module_env,
- std::vector<Handle<Code>>& results,
- ErrorThrower* thrower) {
- DCHECK(!thrower->error());
-
- const WasmModule* module = module_env->module_env.module;
- for (uint32_t i = FLAG_skip_compiling_wasm_funcs;
- i < module->functions.size(); ++i) {
- const WasmFunction& func = module->functions[i];
- if (func.imported)
- continue; // Imports are compiled at instantiation time.
-
- // Compile the function.
- Handle<Code> code = compiler::WasmCompilationUnit::CompileWasmFunction(
- thrower, isolate_, module_env, &func);
- if (code.is_null()) {
- WasmName str = module_env->wire_bytes.GetName(&func);
- thrower->CompileError("Compilation of #%d:%.*s failed.", i,
- str.length(), str.start());
- break;
- }
- results[i] = code;
- }
- }
-
- MaybeHandle<WasmModuleObject> CompileToModuleObject(
- ErrorThrower* thrower, const ModuleWireBytes& wire_bytes,
- Handle<Script> asm_js_script,
- Vector<const byte> asm_js_offset_table_bytes) {
- Factory* factory = isolate_->factory();
- WasmInstance temp_instance(module_.get());
- temp_instance.context = isolate_->native_context();
- temp_instance.mem_size = WasmModule::kPageSize * module_->min_mem_pages;
- temp_instance.mem_start = nullptr;
- temp_instance.globals_start = nullptr;
-
- // Initialize the indirect tables with placeholders.
- int function_table_count =
- static_cast<int>(module_->function_tables.size());
- Handle<FixedArray> function_tables =
- factory->NewFixedArray(function_table_count, TENURED);
- Handle<FixedArray> signature_tables =
- factory->NewFixedArray(function_table_count, TENURED);
- for (int i = 0; i < function_table_count; ++i) {
- temp_instance.function_tables[i] = factory->NewFixedArray(1, TENURED);
- temp_instance.signature_tables[i] = factory->NewFixedArray(1, TENURED);
- function_tables->set(i, *temp_instance.function_tables[i]);
- signature_tables->set(i, *temp_instance.signature_tables[i]);
- }
-
- if (is_sync_) {
- // TODO(karlschimpf): Make this work when asynchronous.
- // https://bugs.chromium.org/p/v8/issues/detail?id=6361
- HistogramTimerScope wasm_compile_module_time_scope(
- module_->is_wasm()
- ? isolate_->counters()->wasm_compile_wasm_module_time()
- : isolate_->counters()->wasm_compile_asm_module_time());
- return CompileToModuleObjectInternal(
- thrower, wire_bytes, asm_js_script, asm_js_offset_table_bytes,
- factory, &temp_instance, &function_tables, &signature_tables);
- }
- return CompileToModuleObjectInternal(
- thrower, wire_bytes, asm_js_script, asm_js_offset_table_bytes, factory,
- &temp_instance, &function_tables, &signature_tables);
- }
-
- private:
- MaybeHandle<WasmModuleObject> CompileToModuleObjectInternal(
- ErrorThrower* thrower, const ModuleWireBytes& wire_bytes,
- Handle<Script> asm_js_script,
- Vector<const byte> asm_js_offset_table_bytes, Factory* factory,
- WasmInstance* temp_instance, Handle<FixedArray>* function_tables,
- Handle<FixedArray>* signature_tables) {
- ModuleBytesEnv module_env(module_.get(), temp_instance, wire_bytes);
-
- // The {code_table} array contains import wrappers and functions (which
- // are both included in {functions.size()}, and export wrappers.
- int code_table_size = static_cast<int>(module_->functions.size() +
- module_->num_exported_functions);
- Handle<FixedArray> code_table =
- factory->NewFixedArray(static_cast<int>(code_table_size), TENURED);
-
- // Check whether lazy compilation is enabled for this module.
- bool lazy_compile = compile_lazy(module_.get());
-
- // If lazy compile: Initialize the code table with the lazy compile builtin.
- // Otherwise: Initialize with the illegal builtin. All call sites will be
- // patched at instantiation.
- Handle<Code> init_builtin = lazy_compile
- ? isolate_->builtins()->WasmCompileLazy()
- : isolate_->builtins()->Illegal();
- for (int i = 0, e = static_cast<int>(module_->functions.size()); i < e;
- ++i) {
- code_table->set(i, *init_builtin);
- temp_instance->function_code[i] = init_builtin;
- }
-
- if (is_sync_)
- // TODO(karlschimpf): Make this work when asynchronous.
- // https://bugs.chromium.org/p/v8/issues/detail?id=6361
- (module_->is_wasm()
- ? isolate_->counters()->wasm_functions_per_wasm_module()
- : isolate_->counters()->wasm_functions_per_asm_module())
- ->AddSample(static_cast<int>(module_->functions.size()));
-
- if (!lazy_compile) {
- size_t funcs_to_compile =
- module_->functions.size() - module_->num_imported_functions;
- if (!FLAG_trace_wasm_decoder && FLAG_wasm_num_compilation_tasks != 0 &&
- funcs_to_compile > 1) {
- // Avoid a race condition by collecting results into a second vector.
- std::vector<Handle<Code>> results(temp_instance->function_code);
- CompileInParallel(&module_env, results, thrower);
- temp_instance->function_code.swap(results);
- } else {
- CompileSequentially(&module_env, temp_instance->function_code, thrower);
- }
- if (thrower->error()) return {};
- }
-
- // At this point, compilation has completed. Update the code table.
- for (size_t i = FLAG_skip_compiling_wasm_funcs;
- i < temp_instance->function_code.size(); ++i) {
- Code* code = *temp_instance->function_code[i];
- code_table->set(static_cast<int>(i), code);
- RecordStats(isolate_, code, is_sync_);
- }
-
- // Create heap objects for script, module bytes and asm.js offset table to
- // be stored in the shared module data.
- Handle<Script> script;
- Handle<ByteArray> asm_js_offset_table;
- if (asm_js_script.is_null()) {
- script = CreateWasmScript(isolate_, wire_bytes);
- } else {
- script = asm_js_script;
- asm_js_offset_table =
- isolate_->factory()->NewByteArray(asm_js_offset_table_bytes.length());
- asm_js_offset_table->copy_in(0, asm_js_offset_table_bytes.start(),
- asm_js_offset_table_bytes.length());
- }
- // TODO(wasm): only save the sections necessary to deserialize a
- // {WasmModule}. E.g. function bodies could be omitted.
- Handle<String> module_bytes =
- factory
- ->NewStringFromOneByte({wire_bytes.start(), wire_bytes.length()},
- TENURED)
- .ToHandleChecked();
- DCHECK(module_bytes->IsSeqOneByteString());
-
- // The {module_wrapper} will take ownership of the {WasmModule} object,
- // and it will be destroyed when the GC reclaims the wrapper object.
- Handle<WasmModuleWrapper> module_wrapper =
- WasmModuleWrapper::New(isolate_, module_.release());
- WasmModule* module = module_wrapper->get();
-
- // Create the shared module data.
- // TODO(clemensh): For the same module (same bytes / same hash), we should
- // only have one WasmSharedModuleData. Otherwise, we might only set
- // breakpoints on a (potentially empty) subset of the instances.
-
- Handle<WasmSharedModuleData> shared = WasmSharedModuleData::New(
- isolate_, module_wrapper, Handle<SeqOneByteString>::cast(module_bytes),
- script, asm_js_offset_table);
- if (lazy_compile) WasmSharedModuleData::PrepareForLazyCompilation(shared);
-
- // Create the compiled module object, and populate with compiled functions
- // and information needed at instantiation time. This object needs to be
- // serializable. Instantiation may occur off a deserialized version of this
- // object.
- Handle<WasmCompiledModule> compiled_module = WasmCompiledModule::New(
- isolate_, shared, code_table, *function_tables, *signature_tables);
-
- // If we created a wasm script, finish it now and make it public to the
- // debugger.
- if (asm_js_script.is_null()) {
- script->set_wasm_compiled_module(*compiled_module);
- isolate_->debug()->OnAfterCompile(script);
- }
-
- // Compile JS->WASM wrappers for exported functions.
- JSToWasmWrapperCache js_to_wasm_cache;
- int func_index = 0;
- for (auto exp : module->export_table) {
- if (exp.kind != kExternalFunction) continue;
- Handle<Code> wasm_code = EnsureExportedLazyDeoptData(
- isolate_, Handle<WasmInstanceObject>::null(), code_table, exp.index);
- Handle<Code> wrapper_code =
- js_to_wasm_cache.CloneOrCompileJSToWasmWrapper(isolate_, module,
- wasm_code, exp.index);
- int export_index =
- static_cast<int>(module->functions.size() + func_index);
- code_table->set(export_index, *wrapper_code);
- RecordStats(isolate_, *wrapper_code, is_sync_);
- func_index++;
- }
-
- return WasmModuleObject::New(isolate_, compiled_module);
- }
- size_t stopped_compilation_tasks_ = 0;
- base::Mutex tasks_mutex_;
-};
-
-CompilationHelper::CodeGenerationSchedule::CodeGenerationSchedule(
- base::RandomNumberGenerator* random_number_generator, size_t max_memory)
- : random_number_generator_(random_number_generator),
- max_memory_(max_memory) {
- DCHECK_NOT_NULL(random_number_generator_);
- DCHECK_GT(max_memory_, 0);
-}
-
-void CompilationHelper::CodeGenerationSchedule::Schedule(
- std::unique_ptr<compiler::WasmCompilationUnit>&& item) {
- size_t cost = item->memory_cost();
- schedule_.push_back(std::move(item));
- allocated_memory_.Increment(cost);
-}
-
-bool CompilationHelper::CodeGenerationSchedule::CanAcceptWork() const {
- return (!throttle_ || allocated_memory_.Value() <= max_memory_);
-}
-
-std::unique_ptr<compiler::WasmCompilationUnit>
-CompilationHelper::CodeGenerationSchedule::GetNext() {
- DCHECK(!IsEmpty());
- size_t index = GetRandomIndexInSchedule();
- auto ret = std::move(schedule_[index]);
- std::swap(schedule_[schedule_.size() - 1], schedule_[index]);
- schedule_.pop_back();
- allocated_memory_.Decrement(ret->memory_cost());
- return ret;
-}
-
-size_t CompilationHelper::CodeGenerationSchedule::GetRandomIndexInSchedule() {
- double factor = random_number_generator_->NextDouble();
- size_t index = (size_t)(factor * schedule_.size());
- DCHECK_GE(index, 0);
- DCHECK_LT(index, schedule_.size());
- return index;
-}
-
-static void MemoryInstanceFinalizer(Isolate* isolate,
- WasmInstanceObject* instance) {
- DisallowHeapAllocation no_gc;
- // If the memory object is destroyed, nothing needs to be done here.
- if (!instance->has_memory_object()) return;
- Handle<WasmInstanceWrapper> instance_wrapper =
- handle(instance->instance_wrapper());
- DCHECK(WasmInstanceWrapper::IsWasmInstanceWrapper(*instance_wrapper));
- DCHECK(instance_wrapper->has_instance());
- bool has_prev = instance_wrapper->has_previous();
- bool has_next = instance_wrapper->has_next();
- Handle<WasmMemoryObject> memory_object(instance->memory_object());
-
- if (!has_prev && !has_next) {
- memory_object->ResetInstancesLink(isolate);
- return;
- } else {
- Handle<WasmInstanceWrapper> next_wrapper, prev_wrapper;
- if (!has_prev) {
- Handle<WasmInstanceWrapper> next_wrapper =
- instance_wrapper->next_wrapper();
- next_wrapper->reset_previous_wrapper();
- // As this is the first link in the memory object, destroying
- // without updating memory object would corrupt the instance chain in
- // the memory object.
- memory_object->set_instances_link(*next_wrapper);
- } else if (!has_next) {
- instance_wrapper->previous_wrapper()->reset_next_wrapper();
- } else {
- DCHECK(has_next && has_prev);
- Handle<WasmInstanceWrapper> prev_wrapper =
- instance_wrapper->previous_wrapper();
- Handle<WasmInstanceWrapper> next_wrapper =
- instance_wrapper->next_wrapper();
- prev_wrapper->set_next_wrapper(*next_wrapper);
- next_wrapper->set_previous_wrapper(*prev_wrapper);
- }
- // Reset to avoid dangling pointers
- instance_wrapper->reset();
- }
-}
-
static void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
DisallowHeapAllocation no_gc;
JSObject** p = reinterpret_cast<JSObject**>(data.GetParameter());
@@ -830,7 +95,6 @@ static void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
Isolate* isolate = reinterpret_cast<Isolate*>(data.GetIsolate());
// If a link to shared memory instances exists, update the list of memory
// instances before the instance is destroyed.
- if (owner->has_instance_wrapper()) MemoryInstanceFinalizer(isolate, owner);
WasmCompiledModule* compiled_module = owner->compiled_module();
TRACE("Finalizing %d {\n", compiled_module->instance_id());
DCHECK(compiled_module->has_weak_wasm_module());
@@ -848,13 +112,25 @@ static void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
}
}
+ // Since the order of finalizers is not guaranteed, it can be the case
+ // that {instance->compiled_module()->module()}, which is a
+ // {Managed<WasmModule>} has been collected earlier in this GC cycle.
+ // Weak references to this instance won't be cleared until
+ // the next GC cycle, so we need to manually break some links (such as
+ // the weak references from {WasmMemoryObject::instances}.
+ if (owner->has_memory_object()) {
+ Handle<WasmMemoryObject> memory(owner->memory_object(), isolate);
+ Handle<WasmInstanceObject> instance(owner, isolate);
+ WasmMemoryObject::RemoveInstance(isolate, memory, instance);
+ }
+
// weak_wasm_module may have been cleared, meaning the module object
// was GC-ed. In that case, there won't be any new instances created,
// and we don't need to maintain the links between instances.
if (!weak_wasm_module->cleared()) {
- JSObject* wasm_module = JSObject::cast(weak_wasm_module->value());
- WasmCompiledModule* current_template =
- WasmCompiledModule::cast(wasm_module->GetEmbedderField(0));
+ WasmModuleObject* wasm_module =
+ WasmModuleObject::cast(weak_wasm_module->value());
+ WasmCompiledModule* current_template = wasm_module->compiled_module();
TRACE("chain before {\n");
TRACE_CHAIN(current_template);
@@ -868,10 +144,12 @@ static void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
if (next == nullptr) {
WasmCompiledModule::Reset(isolate, compiled_module);
} else {
- DCHECK(next->value()->IsFixedArray());
- wasm_module->SetEmbedderField(0, next->value());
+ WasmCompiledModule* next_compiled_module =
+ WasmCompiledModule::cast(next->value());
+ WasmModuleObject::cast(wasm_module)
+ ->set_compiled_module(next_compiled_module);
DCHECK_NULL(prev);
- WasmCompiledModule::cast(next->value())->reset_weak_prev_instance();
+ next_compiled_module->reset_weak_prev_instance();
}
} else {
DCHECK(!(prev == nullptr && next == nullptr));
@@ -898,7 +176,7 @@ static void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
}
}
TRACE("chain after {\n");
- TRACE_CHAIN(WasmCompiledModule::cast(wasm_module->GetEmbedderField(0)));
+ TRACE_CHAIN(wasm_module->compiled_module());
TRACE("}\n");
}
compiled_module->reset_weak_owning_instance();
@@ -927,25 +205,27 @@ int ExtractDirectCallIndex(wasm::Decoder& decoder, const byte* pc) {
return static_cast<int>(call_idx);
}
-void RecordLazyCodeStats(Isolate* isolate, Code* code) {
- isolate->counters()->wasm_lazily_compiled_functions()->Increment();
- isolate->counters()->wasm_generated_code_size()->Increment(code->body_size());
- isolate->counters()->wasm_reloc_size()->Increment(
- code->relocation_info()->length());
+void RecordLazyCodeStats(Code* code, Counters* counters) {
+ counters->wasm_lazily_compiled_functions()->Increment();
+ counters->wasm_generated_code_size()->Increment(code->body_size());
+ counters->wasm_reloc_size()->Increment(code->relocation_info()->length());
}
} // namespace
-Handle<JSArrayBuffer> wasm::SetupArrayBuffer(Isolate* isolate,
- void* allocation_base,
- size_t allocation_length,
- void* backing_store, size_t size,
- bool is_external,
- bool enable_guard_regions) {
- Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
+// static
+const WasmExceptionSig wasm::WasmException::empty_sig_(0, 0, nullptr);
+
+Handle<JSArrayBuffer> wasm::SetupArrayBuffer(
+ Isolate* isolate, void* allocation_base, size_t allocation_length,
+ void* backing_store, size_t size, bool is_external,
+ bool enable_guard_regions, SharedFlag shared) {
+ Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer(shared);
+ DCHECK_GE(kMaxInt, size);
+ if (shared == SharedFlag::kShared) DCHECK(FLAG_experimental_wasm_threads);
JSArrayBuffer::Setup(buffer, isolate, is_external, allocation_base,
- allocation_length, backing_store,
- static_cast<int>(size));
+ allocation_length, backing_store, static_cast<int>(size),
+ shared);
buffer->set_is_neuterable(false);
buffer->set_is_wasm_buffer(true);
buffer->set_has_guard_region(enable_guard_regions);
@@ -953,8 +233,13 @@ Handle<JSArrayBuffer> wasm::SetupArrayBuffer(Isolate* isolate,
}
Handle<JSArrayBuffer> wasm::NewArrayBuffer(Isolate* isolate, size_t size,
- bool enable_guard_regions) {
- if (size > (FLAG_wasm_max_mem_pages * WasmModule::kPageSize)) {
+ bool enable_guard_regions,
+ SharedFlag shared) {
+ // Check against kMaxInt, since the byte length is stored as int in the
+ // JSArrayBuffer. Note that wasm_max_mem_pages can be raised from the command
+ // line, and we don't want to fail a CHECK then.
+ if (size > FLAG_wasm_max_mem_pages * WasmModule::kPageSize ||
+ size > kMaxInt) {
// TODO(titzer): lift restriction on maximum memory allocated here.
return Handle<JSArrayBuffer>::null();
}
@@ -966,7 +251,7 @@ Handle<JSArrayBuffer> wasm::NewArrayBuffer(Isolate* isolate, size_t size,
void* memory = TryAllocateBackingStore(isolate, size, enable_guard_regions,
allocation_base, allocation_length);
- if (memory == nullptr) {
+ if (size > 0 && memory == nullptr) {
return Handle<JSArrayBuffer>::null();
}
@@ -978,9 +263,9 @@ Handle<JSArrayBuffer> wasm::NewArrayBuffer(Isolate* isolate, size_t size,
}
#endif
- const bool is_external = false;
+ constexpr bool is_external = false;
return SetupArrayBuffer(isolate, allocation_base, allocation_length, memory,
- size, is_external, enable_guard_regions);
+ size, is_external, enable_guard_regions, shared);
}
void wasm::UnpackAndRegisterProtectedInstructions(
@@ -1021,7 +306,7 @@ void wasm::UnpackAndRegisterProtectedInstructions(
std::ostream& wasm::operator<<(std::ostream& os, const WasmFunctionName& name) {
os << "#" << name.function_->func_index;
- if (name.function_->name_offset > 0) {
+ if (name.function_->name.is_set()) {
if (name.name_.start()) {
os << ":";
os.write(name.name_.start(), name.name_.length());
@@ -1047,12 +332,10 @@ WasmInstanceObject* wasm::GetOwningWasmInstance(Code* code) {
}
WasmModule::WasmModule(std::unique_ptr<Zone> owned)
- : signature_zone(std::move(owned)), pending_tasks(new base::Semaphore(0)) {}
-
-namespace {
+ : signature_zone(std::move(owned)) {}
-WasmFunction* GetWasmFunctionForImportWrapper(Isolate* isolate,
- Handle<Object> target) {
+WasmFunction* wasm::GetWasmFunctionForImportWrapper(Isolate* isolate,
+ Handle<Object> target) {
if (target->IsJSFunction()) {
Handle<JSFunction> func = Handle<JSFunction>::cast(target);
if (func->code()->kind() == Code::JS_TO_WASM_FUNCTION) {
@@ -1065,7 +348,7 @@ WasmFunction* GetWasmFunctionForImportWrapper(Isolate* isolate,
return nullptr;
}
-static Handle<Code> UnwrapImportWrapper(Handle<Object> import_wrapper) {
+Handle<Code> wasm::UnwrapImportWrapper(Handle<Object> import_wrapper) {
Handle<JSFunction> func = Handle<JSFunction>::cast(import_wrapper);
Handle<Code> export_wrapper_code = handle(func->code());
int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
@@ -1088,33 +371,14 @@ static Handle<Code> UnwrapImportWrapper(Handle<Object> import_wrapper) {
return handle(target);
}
UNREACHABLE();
- return Handle<Code>::null();
-}
-
-Handle<Code> CompileImportWrapper(Isolate* isolate, int index, FunctionSig* sig,
- Handle<JSReceiver> target,
- Handle<String> module_name,
- MaybeHandle<String> import_name,
- ModuleOrigin origin) {
- WasmFunction* other_func = GetWasmFunctionForImportWrapper(isolate, target);
- if (other_func) {
- if (!sig->Equals(other_func->sig)) return Handle<Code>::null();
- // Signature matched. Unwrap the JS->WASM wrapper and return the raw
- // WASM function code.
- return UnwrapImportWrapper(target);
- }
- // No wasm function or being debugged. Compile a new wrapper for the new
- // signature.
- return compiler::CompileWasmToJSWrapper(isolate, target, sig, index,
- module_name, import_name, origin);
}
-void UpdateDispatchTablesInternal(Isolate* isolate,
- Handle<FixedArray> dispatch_tables, int index,
- WasmFunction* function, Handle<Code> code) {
+void wasm::UpdateDispatchTables(Isolate* isolate,
+ Handle<FixedArray> dispatch_tables, int index,
+ WasmFunction* function, Handle<Code> code) {
DCHECK_EQ(0, dispatch_tables->length() % 4);
for (int i = 0; i < dispatch_tables->length(); i += 4) {
- int table_index = Smi::cast(dispatch_tables->get(i + 1))->value();
+ int table_index = Smi::ToInt(dispatch_tables->get(i + 1));
Handle<FixedArray> function_table(
FixedArray::cast(dispatch_tables->get(i + 2)), isolate);
Handle<FixedArray> signature_table(
@@ -1135,1174 +399,31 @@ void UpdateDispatchTablesInternal(Isolate* isolate,
}
}
-} // namespace
-void wasm::UpdateDispatchTables(Isolate* isolate,
- Handle<FixedArray> dispatch_tables, int index,
- Handle<JSFunction> function) {
- if (function.is_null()) {
- UpdateDispatchTablesInternal(isolate, dispatch_tables, index, nullptr,
- Handle<Code>::null());
- } else {
- UpdateDispatchTablesInternal(
- isolate, dispatch_tables, index,
- GetWasmFunctionForImportWrapper(isolate, function),
- UnwrapImportWrapper(function));
- }
-}
-
-// A helper class to simplify instantiating a module from a compiled module.
-// It closes over the {Isolate}, the {ErrorThrower}, the {WasmCompiledModule},
-// etc.
-class InstantiationHelper {
- public:
- InstantiationHelper(Isolate* isolate, ErrorThrower* thrower,
- Handle<WasmModuleObject> module_object,
- MaybeHandle<JSReceiver> ffi,
- MaybeHandle<JSArrayBuffer> memory)
- : isolate_(isolate),
- module_(module_object->compiled_module()->module()),
- thrower_(thrower),
- module_object_(module_object),
- ffi_(ffi.is_null() ? Handle<JSReceiver>::null()
- : ffi.ToHandleChecked()),
- memory_(memory.is_null() ? Handle<JSArrayBuffer>::null()
- : memory.ToHandleChecked()) {}
-
- // Build an instance, in all of its glory.
- MaybeHandle<WasmInstanceObject> Build() {
- // Check that an imports argument was provided, if the module requires it.
- // No point in continuing otherwise.
- if (!module_->import_table.empty() && ffi_.is_null()) {
- thrower_->TypeError(
- "Imports argument must be present and must be an object");
- return {};
- }
-
- // Record build time into correct bucket, then build instance.
- HistogramTimerScope wasm_instantiate_module_time_scope(
- module_->is_wasm()
- ? isolate_->counters()->wasm_instantiate_wasm_module_time()
- : isolate_->counters()->wasm_instantiate_asm_module_time());
- Factory* factory = isolate_->factory();
-
- //--------------------------------------------------------------------------
- // Reuse the compiled module (if no owner), otherwise clone.
- //--------------------------------------------------------------------------
- Handle<FixedArray> code_table;
- // We keep around a copy of the old code table, because we'll be replacing
- // imports for the new instance, and then we need the old imports to be
- // able to relocate.
- Handle<FixedArray> old_code_table;
- MaybeHandle<WasmInstanceObject> owner;
-
- TRACE("Starting new module instantiation\n");
- {
- // Root the owner, if any, before doing any allocations, which
- // may trigger GC.
- // Both owner and original template need to be in sync. Even
- // after we lose the original template handle, the code
- // objects we copied from it have data relative to the
- // instance - such as globals addresses.
- Handle<WasmCompiledModule> original;
- {
- DisallowHeapAllocation no_gc;
- original = handle(module_object_->compiled_module());
- if (original->has_weak_owning_instance()) {
- owner = handle(WasmInstanceObject::cast(
- original->weak_owning_instance()->value()));
- }
- }
- DCHECK(!original.is_null());
- if (original->has_weak_owning_instance()) {
- // Clone, but don't insert yet the clone in the instances chain.
- // We do that last. Since we are holding on to the owner instance,
- // the owner + original state used for cloning and patching
- // won't be mutated by possible finalizer runs.
- DCHECK(!owner.is_null());
- TRACE("Cloning from %d\n", original->instance_id());
- old_code_table = original->code_table();
- compiled_module_ = WasmCompiledModule::Clone(isolate_, original);
- code_table = compiled_module_->code_table();
- // Avoid creating too many handles in the outer scope.
- HandleScope scope(isolate_);
-
- // Clone the code for WASM functions and exports.
- for (int i = 0; i < code_table->length(); ++i) {
- Handle<Code> orig_code(Code::cast(code_table->get(i)), isolate_);
- switch (orig_code->kind()) {
- case Code::WASM_TO_JS_FUNCTION:
- // Imports will be overwritten with newly compiled wrappers.
- break;
- case Code::BUILTIN:
- DCHECK_EQ(Builtins::kWasmCompileLazy, orig_code->builtin_index());
- // If this code object has deoptimization data, then we need a
- // unique copy to attach updated deoptimization data.
- if (orig_code->deoptimization_data()->length() > 0) {
- Handle<Code> code = factory->CopyCode(orig_code);
- Handle<FixedArray> deopt_data =
- factory->NewFixedArray(2, TENURED);
- deopt_data->set(1, Smi::FromInt(i));
- code->set_deoptimization_data(*deopt_data);
- code_table->set(i, *code);
- }
- break;
- case Code::JS_TO_WASM_FUNCTION:
- case Code::WASM_FUNCTION: {
- Handle<Code> code = factory->CopyCode(orig_code);
- code_table->set(i, *code);
- break;
- }
- default:
- UNREACHABLE();
- }
- }
- RecordStats(isolate_, code_table, is_sync_);
- } else {
- // There was no owner, so we can reuse the original.
- compiled_module_ = original;
- old_code_table =
- factory->CopyFixedArray(compiled_module_->code_table());
- code_table = compiled_module_->code_table();
- TRACE("Reusing existing instance %d\n",
- compiled_module_->instance_id());
- }
- compiled_module_->set_native_context(isolate_->native_context());
- }
-
- //--------------------------------------------------------------------------
- // Allocate the instance object.
- //--------------------------------------------------------------------------
- Zone instantiation_zone(isolate_->allocator(), ZONE_NAME);
- CodeSpecialization code_specialization(isolate_, &instantiation_zone);
- Handle<WasmInstanceObject> instance =
- WasmInstanceObject::New(isolate_, compiled_module_);
-
- //--------------------------------------------------------------------------
- // Set up the globals for the new instance.
- //--------------------------------------------------------------------------
- MaybeHandle<JSArrayBuffer> old_globals;
- uint32_t globals_size = module_->globals_size;
- if (globals_size > 0) {
- const bool enable_guard_regions = false;
- Handle<JSArrayBuffer> global_buffer =
- NewArrayBuffer(isolate_, globals_size, enable_guard_regions);
- globals_ = global_buffer;
- if (globals_.is_null()) {
- thrower_->RangeError("Out of memory: wasm globals");
- return {};
- }
- Address old_globals_start = compiled_module_->GetGlobalsStartOrNull();
- Address new_globals_start =
- static_cast<Address>(global_buffer->backing_store());
- code_specialization.RelocateGlobals(old_globals_start, new_globals_start);
- // The address of the backing buffer for the golbals is in native memory
- // and, thus, not moving. We need it saved for
- // serialization/deserialization purposes - so that the other end
- // understands how to relocate the references. We still need to save the
- // JSArrayBuffer on the instance, to keep it all alive.
- WasmCompiledModule::SetGlobalsStartAddressFrom(factory, compiled_module_,
- global_buffer);
- instance->set_globals_buffer(*global_buffer);
- }
-
- //--------------------------------------------------------------------------
- // Prepare for initialization of function tables.
- //--------------------------------------------------------------------------
- int function_table_count =
- static_cast<int>(module_->function_tables.size());
- table_instances_.reserve(module_->function_tables.size());
- for (int index = 0; index < function_table_count; ++index) {
- table_instances_.push_back(
- {Handle<WasmTableObject>::null(), Handle<FixedArray>::null(),
- Handle<FixedArray>::null(), Handle<FixedArray>::null()});
- }
-
- //--------------------------------------------------------------------------
- // Process the imports for the module.
- //--------------------------------------------------------------------------
- int num_imported_functions = ProcessImports(code_table, instance);
- if (num_imported_functions < 0) return {};
-
- //--------------------------------------------------------------------------
- // Process the initialization for the module's globals.
- //--------------------------------------------------------------------------
- InitGlobals();
-
- //--------------------------------------------------------------------------
- // Set up the indirect function tables for the new instance.
- //--------------------------------------------------------------------------
- if (function_table_count > 0)
- InitializeTables(instance, &code_specialization);
-
- //--------------------------------------------------------------------------
- // Set up the memory for the new instance.
- //--------------------------------------------------------------------------
- uint32_t min_mem_pages = module_->min_mem_pages;
- (module_->is_wasm() ? isolate_->counters()->wasm_wasm_min_mem_pages_count()
- : isolate_->counters()->wasm_asm_min_mem_pages_count())
- ->AddSample(min_mem_pages);
-
- if (!memory_.is_null()) {
- // Set externally passed ArrayBuffer non neuterable.
- memory_->set_is_neuterable(false);
- memory_->set_is_wasm_buffer(true);
-
- DCHECK_IMPLIES(EnableGuardRegions(),
- module_->is_asm_js() || memory_->has_guard_region());
- } else if (min_mem_pages > 0) {
- memory_ = AllocateMemory(min_mem_pages);
- if (memory_.is_null()) return {}; // failed to allocate memory
- }
+void wasm::TableSet(ErrorThrower* thrower, Isolate* isolate,
+ Handle<WasmTableObject> table, int32_t index,
+ Handle<JSFunction> function) {
+ Handle<FixedArray> array(table->functions(), isolate);
- //--------------------------------------------------------------------------
- // Check that indirect function table segments are within bounds.
- //--------------------------------------------------------------------------
- for (WasmTableInit& table_init : module_->table_inits) {
- DCHECK(table_init.table_index < table_instances_.size());
- uint32_t base = EvalUint32InitExpr(table_init.offset);
- uint32_t table_size =
- table_instances_[table_init.table_index].function_table->length();
- if (!in_bounds(base, static_cast<uint32_t>(table_init.entries.size()),
- table_size)) {
- thrower_->LinkError("table initializer is out of bounds");
- return {};
- }
- }
-
- //--------------------------------------------------------------------------
- // Check that memory segments are within bounds.
- //--------------------------------------------------------------------------
- for (WasmDataSegment& seg : module_->data_segments) {
- uint32_t base = EvalUint32InitExpr(seg.dest_addr);
- uint32_t mem_size = memory_.is_null()
- ? 0 : static_cast<uint32_t>(memory_->byte_length()->Number());
- if (!in_bounds(base, seg.source_size, mem_size)) {
- thrower_->LinkError("data segment is out of bounds");
- return {};
- }
- }
-
- //--------------------------------------------------------------------------
- // Initialize memory.
- //--------------------------------------------------------------------------
- if (!memory_.is_null()) {
- Address mem_start = static_cast<Address>(memory_->backing_store());
- uint32_t mem_size =
- static_cast<uint32_t>(memory_->byte_length()->Number());
- LoadDataSegments(mem_start, mem_size);
-
- uint32_t old_mem_size = compiled_module_->mem_size();
- Address old_mem_start = compiled_module_->GetEmbeddedMemStartOrNull();
- // We might get instantiated again with the same memory. No patching
- // needed in this case.
- if (old_mem_start != mem_start || old_mem_size != mem_size) {
- code_specialization.RelocateMemoryReferences(
- old_mem_start, old_mem_size, mem_start, mem_size);
- }
- // Just like with globals, we need to keep both the JSArrayBuffer
- // and save the start pointer.
- instance->set_memory_buffer(*memory_);
- WasmCompiledModule::SetSpecializationMemInfoFrom(
- factory, compiled_module_, memory_);
- }
-
- //--------------------------------------------------------------------------
- // Set up the runtime support for the new instance.
- //--------------------------------------------------------------------------
- Handle<WeakCell> weak_link = factory->NewWeakCell(instance);
-
- for (int i = num_imported_functions + FLAG_skip_compiling_wasm_funcs,
- num_functions = static_cast<int>(module_->functions.size());
- i < num_functions; ++i) {
- Handle<Code> code = handle(Code::cast(code_table->get(i)), isolate_);
- if (code->kind() == Code::WASM_FUNCTION) {
- Handle<FixedArray> deopt_data = factory->NewFixedArray(2, TENURED);
- deopt_data->set(0, *weak_link);
- deopt_data->set(1, Smi::FromInt(i));
- code->set_deoptimization_data(*deopt_data);
- continue;
- }
- DCHECK_EQ(Builtins::kWasmCompileLazy, code->builtin_index());
- if (code->deoptimization_data()->length() == 0) continue;
- DCHECK_LE(2, code->deoptimization_data()->length());
- DCHECK_EQ(i, Smi::cast(code->deoptimization_data()->get(1))->value());
- code->deoptimization_data()->set(0, *weak_link);
- }
-
- //--------------------------------------------------------------------------
- // Set up the exports object for the new instance.
- //--------------------------------------------------------------------------
- ProcessExports(code_table, instance, compiled_module_);
-
- //--------------------------------------------------------------------------
- // Add instance to Memory object
- //--------------------------------------------------------------------------
- DCHECK(wasm::IsWasmInstance(*instance));
- if (instance->has_memory_object()) {
- instance->memory_object()->AddInstance(isolate_, instance);
- }
-
- //--------------------------------------------------------------------------
- // Initialize the indirect function tables.
- //--------------------------------------------------------------------------
- if (function_table_count > 0) LoadTableSegments(code_table, instance);
-
- // Patch all code with the relocations registered in code_specialization.
- code_specialization.RelocateDirectCalls(instance);
- code_specialization.ApplyToWholeInstance(*instance, SKIP_ICACHE_FLUSH);
-
- FlushICache(isolate_, code_table);
-
- //--------------------------------------------------------------------------
- // Unpack and notify signal handler of protected instructions.
- //--------------------------------------------------------------------------
- if (trap_handler::UseTrapHandler()) {
- UnpackAndRegisterProtectedInstructions(isolate_, code_table);
- }
-
- //--------------------------------------------------------------------------
- // Set up and link the new instance.
- //--------------------------------------------------------------------------
- {
- Handle<Object> global_handle =
- isolate_->global_handles()->Create(*instance);
- Handle<WeakCell> link_to_clone = factory->NewWeakCell(compiled_module_);
- Handle<WeakCell> link_to_owning_instance = factory->NewWeakCell(instance);
- MaybeHandle<WeakCell> link_to_original;
- MaybeHandle<WasmCompiledModule> original;
- if (!owner.is_null()) {
- // prepare the data needed for publishing in a chain, but don't link
- // just yet, because
- // we want all the publishing to happen free from GC interruptions, and
- // so we do it in
- // one GC-free scope afterwards.
- original = handle(owner.ToHandleChecked()->compiled_module());
- link_to_original = factory->NewWeakCell(original.ToHandleChecked());
- }
- // Publish the new instance to the instances chain.
- {
- DisallowHeapAllocation no_gc;
- if (!link_to_original.is_null()) {
- compiled_module_->set_weak_next_instance(
- link_to_original.ToHandleChecked());
- original.ToHandleChecked()->set_weak_prev_instance(link_to_clone);
- compiled_module_->set_weak_wasm_module(
- original.ToHandleChecked()->weak_wasm_module());
- }
- module_object_->SetEmbedderField(0, *compiled_module_);
- compiled_module_->set_weak_owning_instance(link_to_owning_instance);
- GlobalHandles::MakeWeak(global_handle.location(),
- global_handle.location(), &InstanceFinalizer,
- v8::WeakCallbackType::kFinalizer);
- }
- }
-
- //--------------------------------------------------------------------------
- // Debugging support.
- //--------------------------------------------------------------------------
- // Set all breakpoints that were set on the shared module.
- WasmSharedModuleData::SetBreakpointsOnNewInstance(
- compiled_module_->shared(), instance);
-
- if (FLAG_wasm_interpret_all && module_->is_wasm()) {
- Handle<WasmDebugInfo> debug_info =
- WasmInstanceObject::GetOrCreateDebugInfo(instance);
- std::vector<int> func_indexes;
- for (int func_index = num_imported_functions,
- num_wasm_functions = static_cast<int>(module_->functions.size());
- func_index < num_wasm_functions; ++func_index) {
- func_indexes.push_back(func_index);
- }
- WasmDebugInfo::RedirectToInterpreter(
- debug_info, Vector<int>(func_indexes.data(),
- static_cast<int>(func_indexes.size())));
- }
-
- //--------------------------------------------------------------------------
- // Run the start function if one was specified.
- //--------------------------------------------------------------------------
- if (module_->start_function_index >= 0) {
- HandleScope scope(isolate_);
- int start_index = module_->start_function_index;
- Handle<Code> startup_code = EnsureExportedLazyDeoptData(
- isolate_, instance, code_table, start_index);
- FunctionSig* sig = module_->functions[start_index].sig;
- Handle<Code> wrapper_code =
- js_to_wasm_cache_.CloneOrCompileJSToWasmWrapper(
- isolate_, module_, startup_code, start_index);
- Handle<WasmExportedFunction> startup_fct = WasmExportedFunction::New(
- isolate_, instance, MaybeHandle<String>(), start_index,
- static_cast<int>(sig->parameter_count()), wrapper_code);
- RecordStats(isolate_, *startup_code, is_sync_);
- // Call the JS function.
- Handle<Object> undefined = factory->undefined_value();
- MaybeHandle<Object> retval =
- Execution::Call(isolate_, startup_fct, undefined, 0, nullptr);
-
- if (retval.is_null()) {
- DCHECK(isolate_->has_pending_exception());
- isolate_->OptionalRescheduleException(false);
- // It's unfortunate that the new instance is already linked in the
- // chain. However, we need to set up everything before executing the
- // start function, such that stack trace information can be generated
- // correctly already in the start function.
- return {};
- }
- }
-
- DCHECK(!isolate_->has_pending_exception());
- TRACE("Finishing instance %d\n", compiled_module_->instance_id());
- TRACE_CHAIN(module_object_->compiled_module());
- return instance;
- }
-
- private:
- // Represents the initialized state of a table.
- struct TableInstance {
- Handle<WasmTableObject> table_object; // WebAssembly.Table instance
- Handle<FixedArray> js_wrappers; // JSFunctions exported
- Handle<FixedArray> function_table; // internal code array
- Handle<FixedArray> signature_table; // internal sig array
- };
-
- Isolate* isolate_;
- WasmModule* const module_;
- constexpr static bool is_sync_ = true;
- ErrorThrower* thrower_;
- Handle<WasmModuleObject> module_object_;
- Handle<JSReceiver> ffi_; // TODO(titzer): Use MaybeHandle
- Handle<JSArrayBuffer> memory_; // TODO(titzer): Use MaybeHandle
- Handle<JSArrayBuffer> globals_;
- Handle<WasmCompiledModule> compiled_module_;
- std::vector<TableInstance> table_instances_;
- std::vector<Handle<JSFunction>> js_wrappers_;
- JSToWasmWrapperCache js_to_wasm_cache_;
-
-// Helper routines to print out errors with imports.
-#define ERROR_THROWER_WITH_MESSAGE(TYPE) \
- void Report##TYPE(const char* error, uint32_t index, \
- Handle<String> module_name, Handle<String> import_name) { \
- thrower_->TYPE("Import #%d module=\"%.*s\" function=\"%.*s\" error: %s", \
- index, module_name->length(), \
- module_name->ToCString().get(), import_name->length(), \
- import_name->ToCString().get(), error); \
- } \
- \
- MaybeHandle<Object> Report##TYPE(const char* error, uint32_t index, \
- Handle<String> module_name) { \
- thrower_->TYPE("Import #%d module=\"%.*s\" error: %s", index, \
- module_name->length(), module_name->ToCString().get(), \
- error); \
- return MaybeHandle<Object>(); \
- }
-
- ERROR_THROWER_WITH_MESSAGE(LinkError)
- ERROR_THROWER_WITH_MESSAGE(TypeError)
-
- // Look up an import value in the {ffi_} object.
- MaybeHandle<Object> LookupImport(uint32_t index, Handle<String> module_name,
- Handle<String> import_name) {
- // We pre-validated in the js-api layer that the ffi object is present, and
- // a JSObject, if the module has imports.
- DCHECK(!ffi_.is_null());
-
- // Look up the module first.
- MaybeHandle<Object> result =
- Object::GetPropertyOrElement(ffi_, module_name);
- if (result.is_null()) {
- return ReportTypeError("module not found", index, module_name);
- }
-
- Handle<Object> module = result.ToHandleChecked();
-
- // Look up the value in the module.
- if (!module->IsJSReceiver()) {
- return ReportTypeError("module is not an object or function", index,
- module_name);
- }
-
- result = Object::GetPropertyOrElement(module, import_name);
- if (result.is_null()) {
- ReportLinkError("import not found", index, module_name, import_name);
- return MaybeHandle<JSFunction>();
- }
-
- return result;
- }
-
- uint32_t EvalUint32InitExpr(const WasmInitExpr& expr) {
- switch (expr.kind) {
- case WasmInitExpr::kI32Const:
- return expr.val.i32_const;
- case WasmInitExpr::kGlobalIndex: {
- uint32_t offset = module_->globals[expr.val.global_index].offset;
- return *reinterpret_cast<uint32_t*>(raw_buffer_ptr(globals_, offset));
- }
- default:
- UNREACHABLE();
- return 0;
- }
- }
-
- bool in_bounds(uint32_t offset, uint32_t size, uint32_t upper) {
- return offset + size <= upper && offset + size >= offset;
- }
-
- // Load data segments into the memory.
- void LoadDataSegments(Address mem_addr, size_t mem_size) {
- Handle<SeqOneByteString> module_bytes(compiled_module_->module_bytes(),
- isolate_);
- for (const WasmDataSegment& segment : module_->data_segments) {
- uint32_t source_size = segment.source_size;
- // Segments of size == 0 are just nops.
- if (source_size == 0) continue;
- uint32_t dest_offset = EvalUint32InitExpr(segment.dest_addr);
- DCHECK(in_bounds(dest_offset, source_size,
- static_cast<uint32_t>(mem_size)));
- byte* dest = mem_addr + dest_offset;
- const byte* src = reinterpret_cast<const byte*>(
- module_bytes->GetCharsAddress() + segment.source_offset);
- memcpy(dest, src, source_size);
- }
- }
-
- void WriteGlobalValue(WasmGlobal& global, Handle<Object> value) {
- double num = value->Number();
- TRACE("init [globals+%u] = %lf, type = %s\n", global.offset, num,
- WasmOpcodes::TypeName(global.type));
- switch (global.type) {
- case kWasmI32:
- *GetRawGlobalPtr<int32_t>(global) = static_cast<int32_t>(num);
- break;
- case kWasmI64:
- // TODO(titzer): initialization of imported i64 globals.
- UNREACHABLE();
- break;
- case kWasmF32:
- *GetRawGlobalPtr<float>(global) = static_cast<float>(num);
- break;
- case kWasmF64:
- *GetRawGlobalPtr<double>(global) = static_cast<double>(num);
- break;
- default:
- UNREACHABLE();
- }
- }
-
- // Process the imports, including functions, tables, globals, and memory, in
- // order, loading them from the {ffi_} object. Returns the number of imported
- // functions.
- int ProcessImports(Handle<FixedArray> code_table,
- Handle<WasmInstanceObject> instance) {
- int num_imported_functions = 0;
- int num_imported_tables = 0;
- for (int index = 0; index < static_cast<int>(module_->import_table.size());
- ++index) {
- WasmImport& import = module_->import_table[index];
-
- Handle<String> module_name;
- MaybeHandle<String> maybe_module_name =
- WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
- isolate_, compiled_module_, import.module_name_offset,
- import.module_name_length);
- if (!maybe_module_name.ToHandle(&module_name)) return -1;
-
- Handle<String> import_name;
- MaybeHandle<String> maybe_import_name =
- WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
- isolate_, compiled_module_, import.field_name_offset,
- import.field_name_length);
- if (!maybe_import_name.ToHandle(&import_name)) return -1;
-
- MaybeHandle<Object> result =
- LookupImport(index, module_name, import_name);
- if (thrower_->error()) return -1;
- Handle<Object> value = result.ToHandleChecked();
-
- switch (import.kind) {
- case kExternalFunction: {
- // Function imports must be callable.
- if (!value->IsCallable()) {
- ReportLinkError("function import requires a callable", index,
- module_name, import_name);
- return -1;
- }
-
- Handle<Code> import_wrapper = CompileImportWrapper(
- isolate_, index, module_->functions[import.index].sig,
- Handle<JSReceiver>::cast(value), module_name, import_name,
- module_->get_origin());
- if (import_wrapper.is_null()) {
- ReportLinkError(
- "imported function does not match the expected type", index,
- module_name, import_name);
- return -1;
- }
- code_table->set(num_imported_functions, *import_wrapper);
- RecordStats(isolate_, *import_wrapper, is_sync_);
- num_imported_functions++;
- break;
- }
- case kExternalTable: {
- if (!WasmJs::IsWasmTableObject(isolate_, value)) {
- ReportLinkError("table import requires a WebAssembly.Table", index,
- module_name, import_name);
- return -1;
- }
- WasmIndirectFunctionTable& table =
- module_->function_tables[num_imported_tables];
- TableInstance& table_instance = table_instances_[num_imported_tables];
- table_instance.table_object = Handle<WasmTableObject>::cast(value);
- table_instance.js_wrappers = Handle<FixedArray>(
- table_instance.table_object->functions(), isolate_);
-
- int imported_cur_size = table_instance.js_wrappers->length();
- if (imported_cur_size < static_cast<int>(table.min_size)) {
- thrower_->LinkError(
- "table import %d is smaller than minimum %d, got %u", index,
- table.min_size, imported_cur_size);
- return -1;
- }
-
- if (table.has_max) {
- int64_t imported_max_size =
- table_instance.table_object->maximum_length();
- if (imported_max_size < 0) {
- thrower_->LinkError(
- "table import %d has no maximum length, expected %d", index,
- table.max_size);
- return -1;
- }
- if (imported_max_size > table.max_size) {
- thrower_->LinkError(
- "table import %d has maximum larger than maximum %d, "
- "got %" PRIx64,
- index, table.max_size, imported_max_size);
- return -1;
- }
- }
-
- // Allocate a new dispatch table and signature table.
- int table_size = imported_cur_size;
- table_instance.function_table =
- isolate_->factory()->NewFixedArray(table_size);
- table_instance.signature_table =
- isolate_->factory()->NewFixedArray(table_size);
- for (int i = 0; i < table_size; ++i) {
- table_instance.signature_table->set(i,
- Smi::FromInt(kInvalidSigIndex));
- }
- // Initialize the dispatch table with the (foreign) JS functions
- // that are already in the table.
- for (int i = 0; i < table_size; ++i) {
- Handle<Object> val(table_instance.js_wrappers->get(i), isolate_);
- if (!val->IsJSFunction()) continue;
- WasmFunction* function =
- GetWasmFunctionForImportWrapper(isolate_, val);
- if (function == nullptr) {
- thrower_->LinkError("table import %d[%d] is not a WASM function",
- index, i);
- return -1;
- }
- int sig_index = table.map.FindOrInsert(function->sig);
- table_instance.signature_table->set(i, Smi::FromInt(sig_index));
- table_instance.function_table->set(i, *UnwrapImportWrapper(val));
- }
-
- num_imported_tables++;
- break;
- }
- case kExternalMemory: {
- // Validation should have failed if more than one memory object was
- // provided.
- DCHECK(!instance->has_memory_object());
- if (!WasmJs::IsWasmMemoryObject(isolate_, value)) {
- ReportLinkError("memory import must be a WebAssembly.Memory object",
- index, module_name, import_name);
- return -1;
- }
- auto memory = Handle<WasmMemoryObject>::cast(value);
- DCHECK(WasmJs::IsWasmMemoryObject(isolate_, memory));
- instance->set_memory_object(*memory);
- memory_ = Handle<JSArrayBuffer>(memory->buffer(), isolate_);
- uint32_t imported_cur_pages = static_cast<uint32_t>(
- memory_->byte_length()->Number() / WasmModule::kPageSize);
- if (imported_cur_pages < module_->min_mem_pages) {
- thrower_->LinkError(
- "memory import %d is smaller than maximum %u, got %u", index,
- module_->min_mem_pages, imported_cur_pages);
- }
- int32_t imported_max_pages = memory->maximum_pages();
- if (module_->has_max_mem) {
- if (imported_max_pages < 0) {
- thrower_->LinkError(
- "memory import %d has no maximum limit, expected at most %u",
- index, imported_max_pages);
- return -1;
- }
- if (static_cast<uint32_t>(imported_max_pages) >
- module_->max_mem_pages) {
- thrower_->LinkError(
- "memory import %d has larger maximum than maximum %u, got %d",
- index, module_->max_mem_pages, imported_max_pages);
- return -1;
- }
- }
- break;
- }
- case kExternalGlobal: {
- // Global imports are converted to numbers and written into the
- // {globals_} array buffer.
- if (module_->globals[import.index].type == kWasmI64) {
- ReportLinkError("global import cannot have type i64", index,
- module_name, import_name);
- return -1;
- }
- if (module_->is_asm_js()) {
- if (module_->globals[import.index].type == kWasmI32) {
- value = Object::ToInt32(isolate_, value).ToHandleChecked();
- } else {
- value = Object::ToNumber(value).ToHandleChecked();
- }
- }
- if (!value->IsNumber()) {
- ReportLinkError("global import must be a number", index,
- module_name, import_name);
- return -1;
- }
- WriteGlobalValue(module_->globals[import.index], value);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- }
- return num_imported_functions;
- }
-
- template <typename T>
- T* GetRawGlobalPtr(WasmGlobal& global) {
- return reinterpret_cast<T*>(raw_buffer_ptr(globals_, global.offset));
- }
-
- // Process initialization of globals.
- void InitGlobals() {
- for (auto global : module_->globals) {
- switch (global.init.kind) {
- case WasmInitExpr::kI32Const:
- *GetRawGlobalPtr<int32_t>(global) = global.init.val.i32_const;
- break;
- case WasmInitExpr::kI64Const:
- *GetRawGlobalPtr<int64_t>(global) = global.init.val.i64_const;
- break;
- case WasmInitExpr::kF32Const:
- *GetRawGlobalPtr<float>(global) = global.init.val.f32_const;
- break;
- case WasmInitExpr::kF64Const:
- *GetRawGlobalPtr<double>(global) = global.init.val.f64_const;
- break;
- case WasmInitExpr::kGlobalIndex: {
- // Initialize with another global.
- uint32_t new_offset = global.offset;
- uint32_t old_offset =
- module_->globals[global.init.val.global_index].offset;
- TRACE("init [globals+%u] = [globals+%d]\n", global.offset,
- old_offset);
- size_t size = (global.type == kWasmI64 || global.type == kWasmF64)
- ? sizeof(double)
- : sizeof(int32_t);
- memcpy(raw_buffer_ptr(globals_, new_offset),
- raw_buffer_ptr(globals_, old_offset), size);
- break;
- }
- case WasmInitExpr::kNone:
- // Happens with imported globals.
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
- }
-
- // Allocate memory for a module instance as a new JSArrayBuffer.
- Handle<JSArrayBuffer> AllocateMemory(uint32_t min_mem_pages) {
- if (min_mem_pages > FLAG_wasm_max_mem_pages) {
- thrower_->RangeError("Out of memory: wasm memory too large");
- return Handle<JSArrayBuffer>::null();
- }
- const bool enable_guard_regions = EnableGuardRegions();
- Handle<JSArrayBuffer> mem_buffer = NewArrayBuffer(
- isolate_, min_mem_pages * WasmModule::kPageSize, enable_guard_regions);
-
- if (mem_buffer.is_null()) {
- thrower_->RangeError("Out of memory: wasm memory");
- }
- return mem_buffer;
- }
-
- bool NeedsWrappers() {
- if (module_->num_exported_functions > 0) return true;
- for (auto table_instance : table_instances_) {
- if (!table_instance.js_wrappers.is_null()) return true;
- }
- for (auto table : module_->function_tables) {
- if (table.exported) return true;
- }
- return false;
- }
-
- // Process the exports, creating wrappers for functions, tables, memories,
- // and globals.
- void ProcessExports(Handle<FixedArray> code_table,
- Handle<WasmInstanceObject> instance,
- Handle<WasmCompiledModule> compiled_module) {
- if (NeedsWrappers()) {
- // Fill the table to cache the exported JSFunction wrappers.
- js_wrappers_.insert(js_wrappers_.begin(), module_->functions.size(),
- Handle<JSFunction>::null());
- }
-
- Handle<JSObject> exports_object;
- if (module_->is_wasm()) {
- // Create the "exports" object.
- exports_object = isolate_->factory()->NewJSObjectWithNullProto();
- } else if (module_->is_asm_js()) {
- Handle<JSFunction> object_function = Handle<JSFunction>(
- isolate_->native_context()->object_function(), isolate_);
- exports_object = isolate_->factory()->NewJSObject(object_function);
- } else {
- UNREACHABLE();
- }
- Handle<String> exports_name =
- isolate_->factory()->InternalizeUtf8String("exports");
- JSObject::AddProperty(instance, exports_name, exports_object, NONE);
-
- Handle<String> single_function_name =
- isolate_->factory()->InternalizeUtf8String(AsmJs::kSingleFunctionName);
-
- PropertyDescriptor desc;
- desc.set_writable(module_->is_asm_js());
- desc.set_enumerable(true);
- desc.set_configurable(module_->is_asm_js());
-
- // Store weak references to all exported functions.
- Handle<FixedArray> weak_exported_functions;
- if (compiled_module->has_weak_exported_functions()) {
- weak_exported_functions = compiled_module->weak_exported_functions();
- } else {
- int export_count = 0;
- for (WasmExport& exp : module_->export_table) {
- if (exp.kind == kExternalFunction) ++export_count;
- }
- weak_exported_functions =
- isolate_->factory()->NewFixedArray(export_count);
- compiled_module->set_weak_exported_functions(weak_exported_functions);
- }
-
- // Process each export in the export table.
- int export_index = 0; // Index into {weak_exported_functions}.
- for (WasmExport& exp : module_->export_table) {
- Handle<String> name =
- WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
- isolate_, compiled_module_, exp.name_offset, exp.name_length)
- .ToHandleChecked();
- Handle<JSObject> export_to;
- if (module_->is_asm_js() && exp.kind == kExternalFunction &&
- String::Equals(name, single_function_name)) {
- export_to = instance;
- } else {
- export_to = exports_object;
- }
-
- switch (exp.kind) {
- case kExternalFunction: {
- // Wrap and export the code as a JSFunction.
- WasmFunction& function = module_->functions[exp.index];
- int func_index =
- static_cast<int>(module_->functions.size() + export_index);
- Handle<JSFunction> js_function = js_wrappers_[exp.index];
- if (js_function.is_null()) {
- // Wrap the exported code as a JSFunction.
- Handle<Code> export_code =
- code_table->GetValueChecked<Code>(isolate_, func_index);
- MaybeHandle<String> func_name;
- if (module_->is_asm_js()) {
- // For modules arising from asm.js, honor the names section.
- func_name = WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
- isolate_, compiled_module_, function.name_offset,
- function.name_length)
- .ToHandleChecked();
- }
- js_function = WasmExportedFunction::New(
- isolate_, instance, func_name, function.func_index,
- static_cast<int>(function.sig->parameter_count()), export_code);
- js_wrappers_[exp.index] = js_function;
- }
- desc.set_value(js_function);
- Handle<WeakCell> weak_export =
- isolate_->factory()->NewWeakCell(js_function);
- DCHECK_GT(weak_exported_functions->length(), export_index);
- weak_exported_functions->set(export_index, *weak_export);
- export_index++;
- break;
- }
- case kExternalTable: {
- // Export a table as a WebAssembly.Table object.
- TableInstance& table_instance = table_instances_[exp.index];
- WasmIndirectFunctionTable& table =
- module_->function_tables[exp.index];
- if (table_instance.table_object.is_null()) {
- uint32_t maximum =
- table.has_max ? table.max_size : FLAG_wasm_max_table_size;
- table_instance.table_object = WasmTableObject::New(
- isolate_, table.min_size, maximum, &table_instance.js_wrappers);
- }
- desc.set_value(table_instance.table_object);
- break;
- }
- case kExternalMemory: {
- // Export the memory as a WebAssembly.Memory object.
- Handle<WasmMemoryObject> memory_object;
- if (!instance->has_memory_object()) {
- // If there was no imported WebAssembly.Memory object, create one.
- memory_object = WasmMemoryObject::New(
- isolate_,
- (instance->has_memory_buffer())
- ? handle(instance->memory_buffer())
- : Handle<JSArrayBuffer>::null(),
- (module_->max_mem_pages != 0) ? module_->max_mem_pages : -1);
- instance->set_memory_object(*memory_object);
- } else {
- memory_object =
- Handle<WasmMemoryObject>(instance->memory_object(), isolate_);
- DCHECK(WasmJs::IsWasmMemoryObject(isolate_, memory_object));
- memory_object->ResetInstancesLink(isolate_);
- }
-
- desc.set_value(memory_object);
- break;
- }
- case kExternalGlobal: {
- // Export the value of the global variable as a number.
- WasmGlobal& global = module_->globals[exp.index];
- double num = 0;
- switch (global.type) {
- case kWasmI32:
- num = *GetRawGlobalPtr<int32_t>(global);
- break;
- case kWasmF32:
- num = *GetRawGlobalPtr<float>(global);
- break;
- case kWasmF64:
- num = *GetRawGlobalPtr<double>(global);
- break;
- case kWasmI64:
- thrower_->LinkError(
- "export of globals of type I64 is not allowed.");
- break;
- default:
- UNREACHABLE();
- }
- desc.set_value(isolate_->factory()->NewNumber(num));
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-
- v8::Maybe<bool> status = JSReceiver::DefineOwnProperty(
- isolate_, export_to, name, &desc, Object::THROW_ON_ERROR);
- if (!status.IsJust()) {
- thrower_->LinkError("export of %.*s failed.", name->length(),
- name->ToCString().get());
- return;
- }
- }
- DCHECK_EQ(export_index, weak_exported_functions->length());
-
- if (module_->is_wasm()) {
- v8::Maybe<bool> success = JSReceiver::SetIntegrityLevel(
- exports_object, FROZEN, Object::DONT_THROW);
- DCHECK(success.FromMaybe(false));
- USE(success);
- }
- }
-
- void InitializeTables(Handle<WasmInstanceObject> instance,
- CodeSpecialization* code_specialization) {
- int function_table_count =
- static_cast<int>(module_->function_tables.size());
- Handle<FixedArray> new_function_tables =
- isolate_->factory()->NewFixedArray(function_table_count);
- Handle<FixedArray> new_signature_tables =
- isolate_->factory()->NewFixedArray(function_table_count);
- for (int index = 0; index < function_table_count; ++index) {
- WasmIndirectFunctionTable& table = module_->function_tables[index];
- TableInstance& table_instance = table_instances_[index];
- int table_size = static_cast<int>(table.min_size);
-
- if (table_instance.function_table.is_null()) {
- // Create a new dispatch table if necessary.
- table_instance.function_table =
- isolate_->factory()->NewFixedArray(table_size);
- table_instance.signature_table =
- isolate_->factory()->NewFixedArray(table_size);
- for (int i = 0; i < table_size; ++i) {
- // Fill the table with invalid signature indexes so that
- // uninitialized entries will always fail the signature check.
- table_instance.signature_table->set(i,
- Smi::FromInt(kInvalidSigIndex));
- }
- } else {
- // Table is imported, patch table bounds check
- DCHECK(table_size <= table_instance.function_table->length());
- if (table_size < table_instance.function_table->length()) {
- code_specialization->PatchTableSize(
- table_size, table_instance.function_table->length());
- }
- }
-
- new_function_tables->set(static_cast<int>(index),
- *table_instance.function_table);
- new_signature_tables->set(static_cast<int>(index),
- *table_instance.signature_table);
- }
-
- FixedArray* old_function_tables =
- compiled_module_->ptr_to_function_tables();
- DCHECK_EQ(old_function_tables->length(), new_function_tables->length());
- for (int i = 0, e = new_function_tables->length(); i < e; ++i) {
- code_specialization->RelocateObject(
- handle(old_function_tables->get(i), isolate_),
- handle(new_function_tables->get(i), isolate_));
- }
- FixedArray* old_signature_tables =
- compiled_module_->ptr_to_signature_tables();
- DCHECK_EQ(old_signature_tables->length(), new_signature_tables->length());
- for (int i = 0, e = new_signature_tables->length(); i < e; ++i) {
- code_specialization->RelocateObject(
- handle(old_signature_tables->get(i), isolate_),
- handle(new_signature_tables->get(i), isolate_));
- }
-
- compiled_module_->set_function_tables(new_function_tables);
- compiled_module_->set_signature_tables(new_signature_tables);
+ if (index < 0 || index >= array->length()) {
+ thrower->RangeError("index out of bounds");
+ return;
}
- void LoadTableSegments(Handle<FixedArray> code_table,
- Handle<WasmInstanceObject> instance) {
- int function_table_count =
- static_cast<int>(module_->function_tables.size());
- for (int index = 0; index < function_table_count; ++index) {
- WasmIndirectFunctionTable& table = module_->function_tables[index];
- TableInstance& table_instance = table_instances_[index];
-
- Handle<FixedArray> all_dispatch_tables;
- if (!table_instance.table_object.is_null()) {
- // Get the existing dispatch table(s) with the WebAssembly.Table object.
- all_dispatch_tables =
- handle(table_instance.table_object->dispatch_tables());
- }
-
- // Count the number of table exports for each function (needed for lazy
- // compilation).
- std::unordered_map<uint32_t, uint32_t> num_table_exports;
- if (compile_lazy(module_)) {
- for (auto table_init : module_->table_inits) {
- for (uint32_t func_index : table_init.entries) {
- Code* code =
- Code::cast(code_table->get(static_cast<int>(func_index)));
- // Only increase the counter for lazy compile builtins (it's not
- // needed otherwise).
- if (code->is_wasm_code()) continue;
- DCHECK_EQ(Builtins::kWasmCompileLazy, code->builtin_index());
- ++num_table_exports[func_index];
- }
- }
- }
-
- // TODO(titzer): this does redundant work if there are multiple tables,
- // since initializations are not sorted by table index.
- for (auto table_init : module_->table_inits) {
- uint32_t base = EvalUint32InitExpr(table_init.offset);
- DCHECK(in_bounds(base, static_cast<uint32_t>(table_init.entries.size()),
- table_instance.function_table->length()));
- for (int i = 0, e = static_cast<int>(table_init.entries.size()); i < e;
- ++i) {
- uint32_t func_index = table_init.entries[i];
- WasmFunction* function = &module_->functions[func_index];
- int table_index = static_cast<int>(i + base);
- int32_t sig_index = table.map.Find(function->sig);
- DCHECK_GE(sig_index, 0);
- table_instance.signature_table->set(table_index,
- Smi::FromInt(sig_index));
- Handle<Code> wasm_code = EnsureTableExportLazyDeoptData(
- isolate_, instance, code_table, func_index,
- table_instance.function_table, table_index, num_table_exports);
- table_instance.function_table->set(table_index, *wasm_code);
-
- if (!all_dispatch_tables.is_null()) {
- if (js_wrappers_[func_index].is_null()) {
- // No JSFunction entry yet exists for this function. Create one.
- // TODO(titzer): We compile JS->WASM wrappers for functions are
- // not exported but are in an exported table. This should be done
- // at module compile time and cached instead.
-
- Handle<Code> wrapper_code =
- js_to_wasm_cache_.CloneOrCompileJSToWasmWrapper(
- isolate_, module_, wasm_code, func_index);
- MaybeHandle<String> func_name;
- if (module_->is_asm_js()) {
- // For modules arising from asm.js, honor the names section.
- func_name =
- WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
- isolate_, compiled_module_, function->name_offset,
- function->name_length)
- .ToHandleChecked();
- }
- Handle<WasmExportedFunction> js_function =
- WasmExportedFunction::New(
- isolate_, instance, func_name, func_index,
- static_cast<int>(function->sig->parameter_count()),
- wrapper_code);
- js_wrappers_[func_index] = js_function;
- }
- table_instance.js_wrappers->set(table_index,
- *js_wrappers_[func_index]);
-
- UpdateDispatchTablesInternal(isolate_, all_dispatch_tables,
- table_index, function, wasm_code);
- }
- }
- }
+ Handle<FixedArray> dispatch_tables(table->dispatch_tables(), isolate);
-#ifdef DEBUG
- // Check that the count of table exports was accurate. The entries are
- // decremented on each export, so all should be zero now.
- for (auto e : num_table_exports) {
- DCHECK_EQ(0, e.second);
- }
-#endif
+ WasmFunction* wasm_function = nullptr;
+ Handle<Code> code = Handle<Code>::null();
+ Handle<Object> value = handle(isolate->heap()->null_value());
- // TODO(titzer): we add the new dispatch table at the end to avoid
- // redundant work and also because the new instance is not yet fully
- // initialized.
- if (!table_instance.table_object.is_null()) {
- // Add the new dispatch table to the WebAssembly.Table object.
- all_dispatch_tables = WasmTableObject::AddDispatchTable(
- isolate_, table_instance.table_object, instance, index,
- table_instance.function_table, table_instance.signature_table);
- }
- }
+ if (!function.is_null()) {
+ wasm_function = GetWasmFunctionForImportWrapper(isolate, function);
+ code = UnwrapImportWrapper(function);
+ value = Handle<Object>::cast(function);
}
-};
-bool wasm::IsWasmInstance(Object* object) {
- return WasmInstanceObject::IsWasmInstanceObject(object);
+ UpdateDispatchTables(isolate, dispatch_tables, index, wasm_function, code);
+ array->set(index, *value);
}
Handle<Script> wasm::GetScript(Handle<JSObject> instance) {
@@ -2312,8 +433,14 @@ Handle<Script> wasm::GetScript(Handle<JSObject> instance) {
}
bool wasm::IsWasmCodegenAllowed(Isolate* isolate, Handle<Context> context) {
+ // TODO(wasm): Once wasm has its own CSP policy, we should introduce a
+ // separate callback that includes information about the module about to be
+ // compiled. For the time being, pass an empty string as placeholder for the
+ // sources.
return isolate->allow_code_gen_callback() == nullptr ||
- isolate->allow_code_gen_callback()(v8::Utils::ToLocal(context));
+ isolate->allow_code_gen_callback()(
+ v8::Utils::ToLocal(context),
+ v8::Utils::ToLocal(isolate->factory()->empty_string()));
}
void wasm::DetachWebAssemblyMemoryBuffer(Isolate* isolate,
@@ -2357,8 +484,9 @@ void testing::ValidateInstancesChain(Isolate* isolate,
CHECK((prev == nullptr && !current_instance->has_weak_prev_instance()) ||
current_instance->ptr_to_weak_prev_instance()->value() == prev);
CHECK_EQ(current_instance->ptr_to_weak_wasm_module()->value(), *module_obj);
- CHECK(IsWasmInstance(
- current_instance->ptr_to_weak_owning_instance()->value()));
+ CHECK(current_instance->ptr_to_weak_owning_instance()
+ ->value()
+ ->IsWasmInstanceObject());
prev = current_instance;
current_instance = WasmCompiledModule::cast(
current_instance->ptr_to_weak_next_instance()->value());
@@ -2405,7 +533,7 @@ Handle<JSArray> wasm::GetImports(Isolate* isolate,
// Create the result array.
WasmModule* module = compiled_module->module();
int num_imports = static_cast<int>(module->import_table.size());
- Handle<JSArray> array_object = factory->NewJSArray(FAST_ELEMENTS, 0, 0);
+ Handle<JSArray> array_object = factory->NewJSArray(PACKED_ELEMENTS, 0, 0);
Handle<FixedArray> storage = factory->NewFixedArray(num_imports);
JSArray::SetContent(array_object, storage);
array_object->set_length(Smi::FromInt(num_imports));
@@ -2439,13 +567,11 @@ Handle<JSArray> wasm::GetImports(Isolate* isolate,
MaybeHandle<String> import_module =
WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
- isolate, compiled_module, import.module_name_offset,
- import.module_name_length);
+ isolate, compiled_module, import.module_name);
MaybeHandle<String> import_name =
WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
- isolate, compiled_module, import.field_name_offset,
- import.field_name_length);
+ isolate, compiled_module, import.field_name);
JSObject::AddProperty(entry, module_string, import_module.ToHandleChecked(),
NONE);
@@ -2476,7 +602,7 @@ Handle<JSArray> wasm::GetExports(Isolate* isolate,
// Create the result array.
WasmModule* module = compiled_module->module();
int num_exports = static_cast<int>(module->export_table.size());
- Handle<JSArray> array_object = factory->NewJSArray(FAST_ELEMENTS, 0, 0);
+ Handle<JSArray> array_object = factory->NewJSArray(PACKED_ELEMENTS, 0, 0);
Handle<FixedArray> storage = factory->NewFixedArray(num_exports);
JSArray::SetContent(array_object, storage);
array_object->set_length(Smi::FromInt(num_exports));
@@ -2510,7 +636,7 @@ Handle<JSArray> wasm::GetExports(Isolate* isolate,
MaybeHandle<String> export_name =
WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
- isolate, compiled_module, exp.name_offset, exp.name_length);
+ isolate, compiled_module, exp.name);
JSObject::AddProperty(entry, name_string, export_name.ToHandleChecked(),
NONE);
@@ -2544,45 +670,38 @@ Handle<JSArray> wasm::GetCustomSections(Isolate* isolate,
std::vector<Handle<Object>> matching_sections;
// Gather matching sections.
- for (auto section : custom_sections) {
+ for (auto& section : custom_sections) {
MaybeHandle<String> section_name =
WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
- isolate, compiled_module, section.name_offset, section.name_length);
+ isolate, compiled_module, section.name);
if (!name->Equals(*section_name.ToHandleChecked())) continue;
// Make a copy of the payload data in the section.
- void* allocation_base = nullptr; // Set by TryAllocateBackingStore
- size_t allocation_length = 0; // Set by TryAllocateBackingStore
- const bool enable_guard_regions = false;
- void* memory = TryAllocateBackingStore(isolate, section.payload_length,
- enable_guard_regions,
- allocation_base, allocation_length);
-
- Handle<Object> section_data = factory->undefined_value();
- if (memory) {
- Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
- const bool is_external = false;
- JSArrayBuffer::Setup(buffer, isolate, is_external, allocation_base,
- allocation_length, memory,
- static_cast<int>(section.payload_length));
- DisallowHeapAllocation no_gc; // for raw access to string bytes.
- Handle<SeqOneByteString> module_bytes(compiled_module->module_bytes(),
- isolate);
- const byte* start =
- reinterpret_cast<const byte*>(module_bytes->GetCharsAddress());
- memcpy(memory, start + section.payload_offset, section.payload_length);
- section_data = buffer;
- } else {
+ size_t size = section.payload.length();
+ void* memory =
+ size == 0 ? nullptr : isolate->array_buffer_allocator()->Allocate(size);
+
+ if (size && !memory) {
thrower->RangeError("out of memory allocating custom section data");
return Handle<JSArray>();
}
+ Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
+ constexpr bool is_external = false;
+ JSArrayBuffer::Setup(buffer, isolate, is_external, memory, size, memory,
+ size);
+ DisallowHeapAllocation no_gc; // for raw access to string bytes.
+ Handle<SeqOneByteString> module_bytes(compiled_module->module_bytes(),
+ isolate);
+ const byte* start =
+ reinterpret_cast<const byte*>(module_bytes->GetCharsAddress());
+ memcpy(memory, start + section.payload.offset(), section.payload.length());
- matching_sections.push_back(section_data);
+ matching_sections.push_back(buffer);
}
int num_custom_sections = static_cast<int>(matching_sections.size());
- Handle<JSArray> array_object = factory->NewJSArray(FAST_ELEMENTS, 0, 0);
+ Handle<JSArray> array_object = factory->NewJSArray(PACKED_ELEMENTS, 0, 0);
Handle<FixedArray> storage = factory->NewFixedArray(num_custom_sections);
JSArray::SetContent(array_object, storage);
array_object->set_length(Smi::FromInt(num_custom_sections));
@@ -2594,10 +713,37 @@ Handle<JSArray> wasm::GetCustomSections(Isolate* isolate,
return array_object;
}
+Handle<FixedArray> wasm::DecodeLocalNames(
+ Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
+ Handle<SeqOneByteString> wire_bytes(compiled_module->module_bytes(), isolate);
+ LocalNames decoded_locals;
+ {
+ DisallowHeapAllocation no_gc;
+ wasm::DecodeLocalNames(wire_bytes->GetChars(),
+ wire_bytes->GetChars() + wire_bytes->length(),
+ &decoded_locals);
+ }
+ Handle<FixedArray> locals_names =
+ isolate->factory()->NewFixedArray(decoded_locals.max_function_index + 1);
+ for (LocalNamesPerFunction& func : decoded_locals.names) {
+ Handle<FixedArray> func_locals_names =
+ isolate->factory()->NewFixedArray(func.max_local_index + 1);
+ locals_names->set(func.function_index, *func_locals_names);
+ for (LocalName& name : func.names) {
+ Handle<String> name_str =
+ WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
+ isolate, compiled_module, name.name)
+ .ToHandleChecked();
+ func_locals_names->set(name.local_index, *name_str);
+ }
+ }
+ return locals_names;
+}
+
bool wasm::SyncValidate(Isolate* isolate, const ModuleWireBytes& bytes) {
if (bytes.start() == nullptr || bytes.length() == 0) return false;
- ModuleResult result =
- DecodeWasmModule(isolate, bytes.start(), bytes.end(), true, kWasmOrigin);
+ ModuleResult result = SyncDecodeWasmModule(isolate, bytes.start(),
+ bytes.end(), true, kWasmOrigin);
return result.ok();
}
@@ -2605,9 +751,8 @@ MaybeHandle<WasmModuleObject> wasm::SyncCompileTranslatedAsmJs(
Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
Handle<Script> asm_js_script,
Vector<const byte> asm_js_offset_table_bytes) {
-
- ModuleResult result = DecodeWasmModule(isolate, bytes.start(), bytes.end(),
- false, kAsmJsOrigin);
+ ModuleResult result = SyncDecodeWasmModule(isolate, bytes.start(),
+ bytes.end(), false, kAsmJsOrigin);
if (result.failed()) {
thrower->CompileFailed("Wasm decoding failed", result);
return {};
@@ -2615,8 +760,7 @@ MaybeHandle<WasmModuleObject> wasm::SyncCompileTranslatedAsmJs(
// Transfer ownership to the {WasmModuleWrapper} generated in
// {CompileToModuleObject}.
- constexpr bool is_sync = true;
- CompilationHelper helper(isolate, std::move(result.val), is_sync);
+ ModuleCompiler helper(isolate, std::move(result.val));
return helper.CompileToModuleObject(thrower, bytes, asm_js_script,
asm_js_offset_table_bytes);
}
@@ -2629,8 +773,13 @@ MaybeHandle<WasmModuleObject> wasm::SyncCompile(Isolate* isolate,
return {};
}
- ModuleResult result =
- DecodeWasmModule(isolate, bytes.start(), bytes.end(), false, kWasmOrigin);
+ // TODO(titzer): only make a copy of the bytes if SharedArrayBuffer
+ std::unique_ptr<byte[]> copy(new byte[bytes.length()]);
+ memcpy(copy.get(), bytes.start(), bytes.length());
+ ModuleWireBytes bytes_copy(copy.get(), copy.get() + bytes.length());
+
+ ModuleResult result = SyncDecodeWasmModule(
+ isolate, bytes_copy.start(), bytes_copy.end(), false, kWasmOrigin);
if (result.failed()) {
thrower->CompileFailed("Wasm decoding failed", result);
return {};
@@ -2638,9 +787,8 @@ MaybeHandle<WasmModuleObject> wasm::SyncCompile(Isolate* isolate,
// Transfer ownership to the {WasmModuleWrapper} generated in
// {CompileToModuleObject}.
- constexpr bool is_sync = true;
- CompilationHelper helper(isolate, std::move(result.val), is_sync);
- return helper.CompileToModuleObject(thrower, bytes, Handle<Script>(),
+ ModuleCompiler helper(isolate, std::move(result.val));
+ return helper.CompileToModuleObject(thrower, bytes_copy, Handle<Script>(),
Vector<const byte>());
}
@@ -2648,8 +796,22 @@ MaybeHandle<WasmInstanceObject> wasm::SyncInstantiate(
Isolate* isolate, ErrorThrower* thrower,
Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
MaybeHandle<JSArrayBuffer> memory) {
- InstantiationHelper helper(isolate, thrower, module_object, imports, memory);
- return helper.Build();
+ InstanceBuilder builder(isolate, thrower, module_object, imports, memory,
+ &InstanceFinalizer);
+ return builder.Build();
+}
+
+MaybeHandle<WasmInstanceObject> wasm::SyncCompileAndInstantiate(
+ Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
+ MaybeHandle<JSReceiver> imports, MaybeHandle<JSArrayBuffer> memory) {
+ MaybeHandle<WasmModuleObject> module =
+ wasm::SyncCompile(isolate, thrower, bytes);
+ DCHECK_EQ(thrower->error(), module.is_null());
+ if (module.is_null()) return {};
+
+ return wasm::SyncInstantiate(isolate, thrower, module.ToHandleChecked(),
+ Handle<JSReceiver>::null(),
+ Handle<JSArrayBuffer>::null());
}
namespace {
@@ -2688,480 +850,6 @@ void wasm::AsyncInstantiate(Isolate* isolate, Handle<JSPromise> promise,
instance_object.ToHandleChecked());
}
-// Encapsulates all the state and steps of an asynchronous compilation.
-// An asynchronous compile job consists of a number of tasks that are executed
-// as foreground and background tasks. Any phase that touches the V8 heap or
-// allocates on the V8 heap (e.g. creating the module object) must be a
-// foreground task. All other tasks (e.g. decoding and validating, the majority
-// of the work of compilation) can be background tasks.
-// TODO(wasm): factor out common parts of this with the synchronous pipeline.
-class AsyncCompileJob {
- // TODO(ahaas): Fix https://bugs.chromium.org/p/v8/issues/detail?id=6263 to
- // make sure that d8 does not shut down before the AsyncCompileJob is
- // finished.
- public:
- explicit AsyncCompileJob(Isolate* isolate, std::unique_ptr<byte[]> bytes_copy,
- size_t length, Handle<Context> context,
- Handle<JSPromise> promise)
- : isolate_(isolate),
- bytes_copy_(std::move(bytes_copy)),
- wire_bytes_(bytes_copy_.get(), bytes_copy_.get() + length) {
- // The handles for the context and promise must be deferred.
- DeferredHandleScope deferred(isolate);
- context_ = Handle<Context>(*context);
- module_promise_ = Handle<JSPromise>(*promise);
- deferred_handles_.push_back(deferred.Detach());
- }
-
- void Start() {
- DoAsync<DecodeModule>(); // --
- }
-
- ~AsyncCompileJob() {
- for (auto d : deferred_handles_) delete d;
- }
-
- private:
- Isolate* isolate_;
- std::unique_ptr<byte[]> bytes_copy_;
- ModuleWireBytes wire_bytes_;
- Handle<Context> context_;
- Handle<JSPromise> module_promise_;
- std::unique_ptr<CompilationHelper> helper_;
- std::unique_ptr<ModuleBytesEnv> module_bytes_env_;
-
- bool failed_ = false;
- std::vector<DeferredHandles*> deferred_handles_;
- Handle<WasmModuleObject> module_object_;
- Handle<FixedArray> function_tables_;
- Handle<FixedArray> signature_tables_;
- Handle<WasmCompiledModule> compiled_module_;
- Handle<FixedArray> code_table_;
- std::unique_ptr<WasmInstance> temp_instance_ = nullptr;
- size_t outstanding_units_ = 0;
- size_t num_background_tasks_ = 0;
-
- void ReopenHandlesInDeferredScope() {
- DeferredHandleScope deferred(isolate_);
- function_tables_ = handle(*function_tables_, isolate_);
- signature_tables_ = handle(*signature_tables_, isolate_);
- code_table_ = handle(*code_table_, isolate_);
- temp_instance_->ReopenHandles(isolate_);
- for (auto& unit : helper_->compilation_units_) {
- unit->ReopenCentryStub();
- }
- deferred_handles_.push_back(deferred.Detach());
- }
-
- void AsyncCompileFailed(ErrorThrower& thrower) {
- RejectPromise(isolate_, context_, thrower, module_promise_);
- // The AsyncCompileJob is finished, we resolved the promise, we do not need
- // the data anymore. We can delete the AsyncCompileJob object.
- delete this;
- }
-
- void AsyncCompileSucceeded(Handle<Object> result) {
- ResolvePromise(isolate_, context_, module_promise_, result);
- // The AsyncCompileJob is finished, we resolved the promise, we do not need
- // the data anymore. We can delete the AsyncCompileJob object.
- delete this;
- }
-
- enum TaskType { SYNC, ASYNC };
-
- // A closure to run a compilation step (either as foreground or background
- // task) and schedule the next step(s), if any.
- class CompileTask : NON_EXPORTED_BASE(public v8::Task) {
- public:
- AsyncCompileJob* job_ = nullptr;
- CompileTask() {}
- void Run() override = 0; // Force sub-classes to override Run().
- };
-
- class AsyncCompileTask : public CompileTask {};
-
- class SyncCompileTask : public CompileTask {
- public:
- void Run() final {
- SaveContext saved_context(job_->isolate_);
- job_->isolate_->set_context(*job_->context_);
- RunImpl();
- }
-
- protected:
- virtual void RunImpl() = 0;
- };
-
- template <typename Task, typename... Args>
- void DoSync(Args&&... args) {
- static_assert(std::is_base_of<SyncCompileTask, Task>::value,
- "Scheduled type must be sync");
- Task* task = new Task(std::forward<Args>(args)...);
- task->job_ = this;
- V8::GetCurrentPlatform()->CallOnForegroundThread(
- reinterpret_cast<v8::Isolate*>(isolate_), task);
- }
-
- template <typename Task, typename... Args>
- void DoAsync(Args&&... args) {
- static_assert(std::is_base_of<AsyncCompileTask, Task>::value,
- "Scheduled type must be async");
- Task* task = new Task(std::forward<Args>(args)...);
- task->job_ = this;
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- task, v8::Platform::kShortRunningTask);
- }
-
- //==========================================================================
- // Step 1: (async) Decode the module.
- //==========================================================================
- class DecodeModule : public AsyncCompileTask {
- void Run() override {
- ModuleResult result;
- {
- DisallowHandleAllocation no_handle;
- DisallowHeapAllocation no_allocation;
- // Decode the module bytes.
- TRACE_COMPILE("(1) Decoding module...\n");
- constexpr bool is_sync = true;
- result = DecodeWasmModule(job_->isolate_, job_->wire_bytes_.start(),
- job_->wire_bytes_.end(), false, kWasmOrigin,
- !is_sync);
- }
- if (result.failed()) {
- // Decoding failure; reject the promise and clean up.
- job_->DoSync<DecodeFail>(std::move(result));
- } else {
- // Decode passed.
- job_->DoSync<PrepareAndStartCompile>(std::move(result.val));
- }
- }
- };
-
- //==========================================================================
- // Step 1b: (sync) Fail decoding the module.
- //==========================================================================
- class DecodeFail : public SyncCompileTask {
- public:
- explicit DecodeFail(ModuleResult result) : result_(std::move(result)) {}
-
- private:
- ModuleResult result_;
- void RunImpl() override {
- TRACE_COMPILE("(1b) Decoding failed.\n");
- HandleScope scope(job_->isolate_);
- ErrorThrower thrower(job_->isolate_, "AsyncCompile");
- thrower.CompileFailed("Wasm decoding failed", result_);
- // {job_} is deleted in AsyncCompileFailed, therefore the {return}.
- return job_->AsyncCompileFailed(thrower);
- }
- };
-
- //==========================================================================
- // Step 2 (sync): Create heap-allocated data and start compile.
- //==========================================================================
- class PrepareAndStartCompile : public SyncCompileTask {
- public:
- explicit PrepareAndStartCompile(std::unique_ptr<WasmModule> module)
- : module_(std::move(module)) {}
-
- private:
- std::unique_ptr<WasmModule> module_;
- void RunImpl() override {
- TRACE_COMPILE("(2) Prepare and start compile...\n");
- HandleScope scope(job_->isolate_);
-
- Factory* factory = job_->isolate_->factory();
- job_->temp_instance_.reset(new WasmInstance(module_.get()));
- job_->temp_instance_->context = job_->context_;
- job_->temp_instance_->mem_size =
- WasmModule::kPageSize * module_->min_mem_pages;
- job_->temp_instance_->mem_start = nullptr;
- job_->temp_instance_->globals_start = nullptr;
-
- // Initialize the indirect tables with placeholders.
- int function_table_count =
- static_cast<int>(module_->function_tables.size());
- job_->function_tables_ =
- factory->NewFixedArray(function_table_count, TENURED);
- job_->signature_tables_ =
- factory->NewFixedArray(function_table_count, TENURED);
- for (int i = 0; i < function_table_count; ++i) {
- job_->temp_instance_->function_tables[i] =
- factory->NewFixedArray(1, TENURED);
- job_->temp_instance_->signature_tables[i] =
- factory->NewFixedArray(1, TENURED);
- job_->function_tables_->set(i,
- *job_->temp_instance_->function_tables[i]);
- job_->signature_tables_->set(
- i, *job_->temp_instance_->signature_tables[i]);
- }
-
- // The {code_table} array contains import wrappers and functions (which
- // are both included in {functions.size()}, and export wrappers.
- // The results of compilation will be written into it.
- int code_table_size = static_cast<int>(module_->functions.size() +
- module_->num_exported_functions);
- job_->code_table_ = factory->NewFixedArray(code_table_size, TENURED);
-
- // Initialize {code_table_} with the illegal builtin. All call sites
- // will be patched at instantiation.
- Handle<Code> illegal_builtin = job_->isolate_->builtins()->Illegal();
- // TODO(wasm): Fix this for lazy compilation.
- for (uint32_t i = 0; i < module_->functions.size(); ++i) {
- job_->code_table_->set(static_cast<int>(i), *illegal_builtin);
- job_->temp_instance_->function_code[i] = illegal_builtin;
- }
-
- job_->isolate_->counters()->wasm_functions_per_wasm_module()->AddSample(
- static_cast<int>(module_->functions.size()));
-
- // Transfer ownership of the {WasmModule} to the {CompilationHelper}, but
- // keep a pointer.
- WasmModule* module = module_.get();
- constexpr bool is_sync = true;
- job_->helper_.reset(
- new CompilationHelper(job_->isolate_, std::move(module_), !is_sync));
-
- DCHECK_LE(module->num_imported_functions, module->functions.size());
- size_t num_functions =
- module->functions.size() - module->num_imported_functions;
- if (num_functions == 0) {
- job_->ReopenHandlesInDeferredScope();
- // Degenerate case of an empty module.
- job_->DoSync<FinishCompile>();
- return;
- }
-
- // Start asynchronous compilation tasks.
- job_->num_background_tasks_ =
- Max(static_cast<size_t>(1),
- Min(num_functions,
- Min(static_cast<size_t>(FLAG_wasm_num_compilation_tasks),
- V8::GetCurrentPlatform()
- ->NumberOfAvailableBackgroundThreads())));
- job_->module_bytes_env_.reset(new ModuleBytesEnv(
- module, job_->temp_instance_.get(), job_->wire_bytes_));
- job_->outstanding_units_ = job_->helper_->InitializeParallelCompilation(
- module->functions, *job_->module_bytes_env_);
-
- // Reopen all handles which should survive in the DeferredHandleScope.
- job_->ReopenHandlesInDeferredScope();
- for (size_t i = 0; i < job_->num_background_tasks_; ++i) {
- job_->DoAsync<ExecuteCompilationUnits>();
- }
- }
- };
-
- //==========================================================================
- // Step 3 (async x K tasks): Execute compilation units.
- //==========================================================================
- class ExecuteCompilationUnits : public AsyncCompileTask {
- void Run() override {
- TRACE_COMPILE("(3) Compiling...\n");
- for (;;) {
- {
- DisallowHandleAllocation no_handle;
- DisallowHeapAllocation no_allocation;
- if (!job_->helper_->FetchAndExecuteCompilationUnit()) break;
- }
- // TODO(ahaas): Create one FinishCompilationUnit job for all compilation
- // units.
- job_->DoSync<FinishCompilationUnit>();
- // TODO(ahaas): Limit the number of outstanding compilation units to be
- // finished to reduce memory overhead.
- }
- job_->helper_->module_->pending_tasks.get()->Signal();
- }
- };
-
- //==========================================================================
- // Step 4 (sync x each function): Finish a single compilation unit.
- //==========================================================================
- class FinishCompilationUnit : public SyncCompileTask {
- void RunImpl() override {
- TRACE_COMPILE("(4a) Finishing compilation unit...\n");
- HandleScope scope(job_->isolate_);
- if (job_->failed_) return; // already failed
-
- int func_index = -1;
- ErrorThrower thrower(job_->isolate_, "AsyncCompile");
- Handle<Code> result =
- job_->helper_->FinishCompilationUnit(&thrower, &func_index);
- if (thrower.error()) {
- job_->failed_ = true;
- } else {
- DCHECK(func_index >= 0);
- job_->code_table_->set(func_index, *(result));
- }
- if (thrower.error() || --job_->outstanding_units_ == 0) {
- // All compilation units are done. We still need to wait for the
- // background tasks to shut down and only then is it safe to finish the
- // compile and delete this job. We can wait for that to happen also
- // in a background task.
- job_->DoAsync<WaitForBackgroundTasks>(std::move(thrower));
- }
- }
- };
-
- //==========================================================================
- // Step 4b (async): Wait for all background tasks to finish.
- //==========================================================================
- class WaitForBackgroundTasks : public AsyncCompileTask {
- public:
- explicit WaitForBackgroundTasks(ErrorThrower thrower)
- : thrower_(std::move(thrower)) {}
-
- private:
- ErrorThrower thrower_;
-
- void Run() override {
- TRACE_COMPILE("(4b) Waiting for background tasks...\n");
- // Bump next_unit_, such that background tasks stop processing the queue.
- job_->helper_->next_unit_.SetValue(
- job_->helper_->compilation_units_.size());
- for (size_t i = 0; i < job_->num_background_tasks_; ++i) {
- // We wait for it to finish.
- job_->helper_->module_->pending_tasks.get()->Wait();
- }
- if (thrower_.error()) {
- job_->DoSync<FailCompile>(std::move(thrower_));
- } else {
- job_->DoSync<FinishCompile>();
- }
- }
- };
-
- //==========================================================================
- // Step 5a (sync): Fail compilation (reject promise).
- //==========================================================================
- class FailCompile : public SyncCompileTask {
- public:
- explicit FailCompile(ErrorThrower thrower) : thrower_(std::move(thrower)) {}
-
- private:
- ErrorThrower thrower_;
-
- void RunImpl() override {
- TRACE_COMPILE("(5a) Fail compilation...\n");
- HandleScope scope(job_->isolate_);
- return job_->AsyncCompileFailed(thrower_);
- }
- };
-
- //==========================================================================
- // Step 5b (sync): Finish heap-allocated data structures.
- //==========================================================================
- class FinishCompile : public SyncCompileTask {
- void RunImpl() override {
- TRACE_COMPILE("(5b) Finish compile...\n");
- HandleScope scope(job_->isolate_);
- // At this point, compilation has completed. Update the code table.
- constexpr bool is_sync = true;
- for (size_t i = FLAG_skip_compiling_wasm_funcs;
- i < job_->temp_instance_->function_code.size(); ++i) {
- Code* code = Code::cast(job_->code_table_->get(static_cast<int>(i)));
- RecordStats(job_->isolate_, code, !is_sync);
- }
-
- // Create heap objects for script and module bytes to be stored in the
- // shared module data. Asm.js is not compiled asynchronously.
- Handle<Script> script =
- CreateWasmScript(job_->isolate_, job_->wire_bytes_);
- Handle<ByteArray> asm_js_offset_table;
- // TODO(wasm): Improve efficiency of storing module wire bytes.
- // 1. Only store relevant sections, not function bodies
- // 2. Don't make a second copy of the bytes here; reuse the copy made
- // for asynchronous compilation and store it as an external one
- // byte string for serialization/deserialization.
- Handle<String> module_bytes =
- job_->isolate_->factory()
- ->NewStringFromOneByte(
- {job_->wire_bytes_.start(), job_->wire_bytes_.length()},
- TENURED)
- .ToHandleChecked();
- DCHECK(module_bytes->IsSeqOneByteString());
-
- // The {module_wrapper} will take ownership of the {WasmModule} object,
- // and it will be destroyed when the GC reclaims the wrapper object.
- Handle<WasmModuleWrapper> module_wrapper = WasmModuleWrapper::New(
- job_->isolate_, job_->helper_->module_.release());
-
- // Create the shared module data.
- // TODO(clemensh): For the same module (same bytes / same hash), we should
- // only have one WasmSharedModuleData. Otherwise, we might only set
- // breakpoints on a (potentially empty) subset of the instances.
-
- Handle<WasmSharedModuleData> shared = WasmSharedModuleData::New(
- job_->isolate_, module_wrapper,
- Handle<SeqOneByteString>::cast(module_bytes), script,
- asm_js_offset_table);
-
- // Create the compiled module object and populate with compiled functions
- // and information needed at instantiation time. This object needs to be
- // serializable. Instantiation may occur off a deserialized version of
- // this object.
- job_->compiled_module_ = WasmCompiledModule::New(
- job_->isolate_, shared, job_->code_table_, job_->function_tables_,
- job_->signature_tables_);
-
- // Finish the WASM script now and make it public to the debugger.
- script->set_wasm_compiled_module(*job_->compiled_module_);
- job_->isolate_->debug()->OnAfterCompile(script);
-
- DeferredHandleScope deferred(job_->isolate_);
- job_->compiled_module_ = handle(*job_->compiled_module_, job_->isolate_);
- job_->deferred_handles_.push_back(deferred.Detach());
- // TODO(wasm): compiling wrappers should be made async as well.
- job_->DoSync<CompileWrappers>();
- }
- };
-
- //==========================================================================
- // Step 6 (sync): Compile JS->WASM wrappers.
- //==========================================================================
- class CompileWrappers : public SyncCompileTask {
- void RunImpl() override {
- TRACE_COMPILE("(6) Compile wrappers...\n");
- // Compile JS->WASM wrappers for exported functions.
- HandleScope scope(job_->isolate_);
- JSToWasmWrapperCache js_to_wasm_cache;
- int func_index = 0;
- constexpr bool is_sync = true;
- WasmModule* module = job_->compiled_module_->module();
- for (auto exp : module->export_table) {
- if (exp.kind != kExternalFunction) continue;
- Handle<Code> wasm_code(Code::cast(job_->code_table_->get(exp.index)),
- job_->isolate_);
- Handle<Code> wrapper_code =
- js_to_wasm_cache.CloneOrCompileJSToWasmWrapper(
- job_->isolate_, module, wasm_code, exp.index);
- int export_index =
- static_cast<int>(module->functions.size() + func_index);
- job_->code_table_->set(export_index, *wrapper_code);
- RecordStats(job_->isolate_, *wrapper_code, !is_sync);
- func_index++;
- }
-
- job_->DoSync<FinishModule>();
- }
- };
-
- //==========================================================================
- // Step 7 (sync): Finish the module and resolve the promise.
- //==========================================================================
- class FinishModule : public SyncCompileTask {
- void RunImpl() override {
- TRACE_COMPILE("(7) Finish module...\n");
- HandleScope scope(job_->isolate_);
- Handle<WasmModuleObject> result =
- WasmModuleObject::New(job_->isolate_, job_->compiled_module_);
- // {job_} is deleted in AsyncCompileSucceeded, therefore the {return}.
- return job_->AsyncCompileSucceeded(result);
- }
- };
-};
-
void wasm::AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
const ModuleWireBytes& bytes) {
if (!FLAG_wasm_async_compilation) {
@@ -3182,9 +870,9 @@ void wasm::AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
// during asynchronous compilation.
std::unique_ptr<byte[]> copy(new byte[bytes.length()]);
memcpy(copy.get(), bytes.start(), bytes.length());
- auto job = new AsyncCompileJob(isolate, std::move(copy), bytes.length(),
- handle(isolate->context()), promise);
- job->Start();
+ isolate->wasm_compilation_manager()->StartAsyncCompileJob(
+ isolate, std::move(copy), bytes.length(), handle(isolate->context()),
+ promise);
}
Handle<Code> wasm::CompileLazy(Isolate* isolate) {
@@ -3206,12 +894,12 @@ Handle<Code> wasm::CompileLazy(Isolate* isolate) {
Handle<FixedArray> exp_deopt_data;
int func_index = -1;
if (lazy_compile_code->deoptimization_data()->length() > 0) {
- // Then it's an indirect call or via JS->WASM wrapper.
+ // Then it's an indirect call or via JS->wasm wrapper.
DCHECK_LE(2, lazy_compile_code->deoptimization_data()->length());
exp_deopt_data = handle(lazy_compile_code->deoptimization_data(), isolate);
auto* weak_cell = WeakCell::cast(exp_deopt_data->get(0));
instance = handle(WasmInstanceObject::cast(weak_cell->value()), isolate);
- func_index = Smi::cast(exp_deopt_data->get(1))->value();
+ func_index = Smi::ToInt(exp_deopt_data->get(1));
}
it.Advance();
// Third frame: The calling wasm code or js-to-wasm wrapper.
@@ -3221,9 +909,10 @@ Handle<Code> wasm::CompileLazy(Isolate* isolate) {
if (it.frame()->is_js_to_wasm()) {
DCHECK(!instance.is_null());
} else if (instance.is_null()) {
+ // Then this is a direct call (otherwise we would have attached the instance
+ // via deopt data to the lazy compile stub). Just use the instance of the
+ // caller.
instance = handle(wasm::GetOwningWasmInstance(*caller_code), isolate);
- } else {
- DCHECK(*instance == wasm::GetOwningWasmInstance(*caller_code));
}
int offset =
static_cast<int>(it.frame()->pc() - caller_code->instruction_start());
@@ -3246,7 +935,7 @@ Handle<Code> wasm::CompileLazy(Isolate* isolate) {
for (int idx = 2, end = exp_deopt_data->length(); idx < end; idx += 2) {
if (exp_deopt_data->get(idx)->IsUndefined(isolate)) break;
FixedArray* exp_table = FixedArray::cast(exp_deopt_data->get(idx));
- int exp_index = Smi::cast(exp_deopt_data->get(idx + 1))->value();
+ int exp_index = Smi::ToInt(exp_deopt_data->get(idx + 1));
DCHECK(exp_table->get(exp_index) == *lazy_compile_code);
exp_table->set(exp_index, *compiled_code);
}
@@ -3288,9 +977,9 @@ void LazyCompilationOrchestrator::CompileFunction(
&sig_tables);
uint8_t* module_start = compiled_module->module_bytes()->GetChars();
const WasmFunction* func = &module_env.module->functions[func_index];
- wasm::FunctionBody body{func->sig, module_start,
- module_start + func->code_start_offset,
- module_start + func->code_end_offset};
+ wasm::FunctionBody body{func->sig, func->code.offset(),
+ module_start + func->code.offset(),
+ module_start + func->code.end_offset()};
// TODO(wasm): Refactor this to only get the name if it is really needed for
// tracing / debugging.
std::string func_name;
@@ -3303,14 +992,18 @@ void LazyCompilationOrchestrator::CompileFunction(
}
ErrorThrower thrower(isolate, "WasmLazyCompile");
compiler::WasmCompilationUnit unit(isolate, &module_env, body,
- CStrVector(func_name.c_str()), func_index);
+ CStrVector(func_name.c_str()), func_index,
+ CEntryStub(isolate, 1).GetCode());
unit.ExecuteCompilation();
- Handle<Code> code = unit.FinishCompilation(&thrower);
+ MaybeHandle<Code> maybe_code = unit.FinishCompilation(&thrower);
// If there is a pending error, something really went wrong. The module was
// verified before starting execution with lazy compilation.
// This might be OOM, but then we cannot continue execution anyway.
+ // TODO(clemensh): According to the spec, we can actually skip validation at
+ // module creation time, and return a function that always traps here.
CHECK(!thrower.error());
+ Handle<Code> code = maybe_code.ToHandleChecked();
Handle<FixedArray> deopt_data = isolate->factory()->NewFixedArray(2, TENURED);
Handle<WeakCell> weak_instance = isolate->factory()->NewWeakCell(instance);
@@ -3336,8 +1029,8 @@ void LazyCompilationOrchestrator::CompileFunction(
Address mem_start =
reinterpret_cast<Address>(instance->memory_buffer()->backing_store());
int mem_size = instance->memory_buffer()->byte_length()->Number();
- DCHECK_IMPLIES(mem_size == 0, mem_start == nullptr);
- if (mem_size > 0) {
+ DCHECK_IMPLIES(mem_start == nullptr, mem_size == 0);
+ if (mem_start != nullptr) {
code_specialization.RelocateMemoryReferences(nullptr, 0, mem_start,
mem_size);
}
@@ -3346,7 +1039,7 @@ void LazyCompilationOrchestrator::CompileFunction(
code_specialization.ApplyToWasmCode(*code, SKIP_ICACHE_FLUSH);
Assembler::FlushICache(isolate, code->instruction_start(),
code->instruction_size());
- RecordLazyCodeStats(isolate, *code);
+ RecordLazyCodeStats(*code, isolate->counters());
}
Handle<Code> LazyCompilationOrchestrator::CompileLazy(
@@ -3371,12 +1064,10 @@ Handle<Code> LazyCompilationOrchestrator::CompileLazy(
SourcePositionTableIterator source_pos_iterator(
caller->SourcePositionTable());
DCHECK_EQ(2, caller->deoptimization_data()->length());
- int caller_func_index =
- Smi::cast(caller->deoptimization_data()->get(1))->value();
+ int caller_func_index = Smi::ToInt(caller->deoptimization_data()->get(1));
const byte* func_bytes =
- module_bytes->GetChars() + compiled_module->module()
- ->functions[caller_func_index]
- .code_start_offset;
+ module_bytes->GetChars() +
+ compiled_module->module()->functions[caller_func_index].code.offset();
for (RelocIterator it(*caller, RelocInfo::kCodeTargetMask); !it.done();
it.next()) {
Code* callee =
@@ -3414,10 +1105,9 @@ Handle<Code> LazyCompilationOrchestrator::CompileLazy(
DCHECK_GT(non_compiled_functions.size(), idx);
int called_func_index = non_compiled_functions[idx].func_index;
// Check that the callee agrees with our assumed called_func_index.
- DCHECK_IMPLIES(
- callee->deoptimization_data()->length() > 0,
- Smi::cast(callee->deoptimization_data()->get(1))->value() ==
- called_func_index);
+ DCHECK_IMPLIES(callee->deoptimization_data()->length() > 0,
+ Smi::ToInt(callee->deoptimization_data()->get(1)) ==
+ called_func_index);
if (is_js_to_wasm) {
DCHECK_EQ(func_to_return_idx, called_func_index);
} else {
@@ -3443,3 +1133,17 @@ Handle<Code> LazyCompilationOrchestrator::CompileLazy(
DCHECK_EQ(Code::WASM_FUNCTION, ret->kind());
return handle(ret, isolate);
}
+
+const char* wasm::ExternalKindName(WasmExternalKind kind) {
+ switch (kind) {
+ case kExternalFunction:
+ return "function";
+ case kExternalTable:
+ return "table";
+ case kExternalMemory:
+ return "memory";
+ case kExternalGlobal:
+ return "global";
+ }
+ return "unknown";
+}
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index 4776298e9f..b36fdff4ea 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -24,6 +24,7 @@ class WasmCompiledModule;
class WasmDebugInfo;
class WasmModuleObject;
class WasmInstanceObject;
+class WasmTableObject;
class WasmMemoryObject;
namespace compiler {
@@ -69,15 +70,34 @@ struct WasmInitExpr {
}
};
-// Static representation of a WASM function.
+// Reference to a string in the wire bytes.
+class WireBytesRef {
+ public:
+ WireBytesRef() : WireBytesRef(0, 0) {}
+ WireBytesRef(uint32_t offset, uint32_t length)
+ : offset_(offset), length_(length) {
+ DCHECK_IMPLIES(offset_ == 0, length_ == 0);
+ DCHECK_LE(offset_, offset_ + length_); // no uint32_t overflow.
+ }
+
+ uint32_t offset() const { return offset_; }
+ uint32_t length() const { return length_; }
+ uint32_t end_offset() const { return offset_ + length_; }
+ bool is_empty() const { return length_ == 0; }
+ bool is_set() const { return offset_ != 0; }
+
+ private:
+ uint32_t offset_;
+ uint32_t length_;
+};
+
+// Static representation of a wasm function.
struct WasmFunction {
FunctionSig* sig; // signature of the function.
uint32_t func_index; // index into the function table.
uint32_t sig_index; // index into the signature table.
- uint32_t name_offset; // offset in the module bytes of the name, if any.
- uint32_t name_length; // length in bytes of the name.
- uint32_t code_start_offset; // offset in the module bytes of code start.
- uint32_t code_end_offset; // offset in the module bytes of code end.
+ WireBytesRef name; // function name, if any.
+ WireBytesRef code; // code of this function.
bool imported;
bool exported;
};
@@ -92,22 +112,35 @@ struct WasmGlobal {
bool exported; // true if exported.
};
+// Note: An exception signature only uses the params portion of a
+// function signature.
+typedef FunctionSig WasmExceptionSig;
+
+struct WasmException {
+ explicit WasmException(const WasmExceptionSig* sig = &empty_sig_)
+ : sig(sig) {}
+
+ const WasmExceptionSig* sig; // type signature of the exception.
+
+ private:
+ static const WasmExceptionSig empty_sig_;
+};
+
// Static representation of a wasm data segment.
struct WasmDataSegment {
WasmInitExpr dest_addr; // destination memory address of the data.
- uint32_t source_offset; // start offset in the module bytes.
- uint32_t source_size; // end offset in the module bytes.
+ WireBytesRef source; // start offset in the module bytes.
};
// Static representation of a wasm indirect call table.
struct WasmIndirectFunctionTable {
- uint32_t min_size; // minimum table size.
- uint32_t max_size; // maximum table size.
- bool has_max; // true if there is a maximum size.
+ uint32_t min_size = 0; // minimum table size.
+ uint32_t max_size = 0; // maximum table size.
+ bool has_max = false; // true if there is a maximum size.
// TODO(titzer): Move this to WasmInstance. Needed by interpreter only.
std::vector<int32_t> values; // function table, -1 indicating invalid.
- bool imported; // true if imported.
- bool exported; // true if exported.
+ bool imported = false; // true if imported.
+ bool exported = false; // true if exported.
SignatureMap map; // canonicalizing map for sig indexes.
};
@@ -118,33 +151,23 @@ struct WasmTableInit {
std::vector<uint32_t> entries;
};
-// Static representation of a WASM import.
+// Static representation of a wasm import.
struct WasmImport {
- uint32_t module_name_length; // length in bytes of the module name.
- uint32_t module_name_offset; // offset in module bytes of the module name.
- uint32_t field_name_length; // length in bytes of the import name.
- uint32_t field_name_offset; // offset in module bytes of the import name.
- WasmExternalKind kind; // kind of the import.
- uint32_t index; // index into the respective space.
+ WireBytesRef module_name; // module name.
+ WireBytesRef field_name; // import name.
+ WasmExternalKind kind; // kind of the import.
+ uint32_t index; // index into the respective space.
};
-// Static representation of a WASM export.
+// Static representation of a wasm export.
struct WasmExport {
- uint32_t name_length; // length in bytes of the exported name.
- uint32_t name_offset; // offset in module bytes of the name to export.
+ WireBytesRef name; // exported name.
WasmExternalKind kind; // kind of the export.
uint32_t index; // index into the respective space.
};
enum ModuleOrigin : uint8_t { kWasmOrigin, kAsmJsOrigin };
-inline bool IsWasm(ModuleOrigin Origin) {
- return Origin == ModuleOrigin::kWasmOrigin;
-}
-inline bool IsAsmJs(ModuleOrigin Origin) {
- return Origin == ModuleOrigin::kAsmJsOrigin;
-}
-
struct ModuleWireBytes;
// Static representation of a module.
@@ -153,44 +176,36 @@ struct V8_EXPORT_PRIVATE WasmModule {
static const uint32_t kMinMemPages = 1; // Minimum memory size = 64kb
std::unique_ptr<Zone> signature_zone;
- uint32_t min_mem_pages = 0; // minimum size of the memory in 64k pages
- uint32_t max_mem_pages = 0; // maximum size of the memory in 64k pages
- bool has_max_mem = false; // try if a maximum memory size exists
- bool has_memory = false; // true if the memory was defined or imported
- bool mem_export = false; // true if the memory is exported
- // TODO(wasm): reconcile start function index being an int with
- // the fact that we index on uint32_t, so we may technically not be
- // able to represent some start_function_index -es.
- int start_function_index = -1; // start function, if any
-
- std::vector<WasmGlobal> globals; // globals in this module.
- uint32_t globals_size = 0; // size of globals table.
- uint32_t num_imported_functions = 0; // number of imported functions.
- uint32_t num_declared_functions = 0; // number of declared functions.
- uint32_t num_exported_functions = 0; // number of exported functions.
- std::vector<FunctionSig*> signatures; // signatures in this module.
- std::vector<WasmFunction> functions; // functions in this module.
- std::vector<WasmDataSegment> data_segments; // data segments in this module.
- std::vector<WasmIndirectFunctionTable> function_tables; // function tables.
- std::vector<WasmImport> import_table; // import table.
- std::vector<WasmExport> export_table; // export table.
- std::vector<WasmTableInit> table_inits; // initializations of tables
- // We store the semaphore here to extend its lifetime. In <libc-2.21, which we
- // use on the try bots, semaphore::Wait() can return while some compilation
- // tasks are still executing semaphore::Signal(). If the semaphore is cleaned
- // up right after semaphore::Wait() returns, then this can cause an
- // invalid-semaphore error in the compilation tasks.
- // TODO(wasm): Move this semaphore back to CompileInParallel when the try bots
- // switch to libc-2.21 or higher.
- std::unique_ptr<base::Semaphore> pending_tasks;
+ uint32_t min_mem_pages = 0; // minimum size of the memory in 64k pages
+ uint32_t max_mem_pages = 0; // maximum size of the memory in 64k pages
+ bool has_max_mem = false; // try if a maximum memory size exists
+ bool has_memory = false; // true if the memory was defined or imported
+ bool mem_export = false; // true if the memory is exported
+ int start_function_index = -1; // start function, >= 0 if any
+
+ std::vector<WasmGlobal> globals;
+ uint32_t globals_size = 0;
+ uint32_t num_imported_functions = 0;
+ uint32_t num_declared_functions = 0;
+ uint32_t num_exported_functions = 0;
+ WireBytesRef name = {0, 0};
+ // TODO(wasm): Add url here, for spec'ed location information.
+ std::vector<FunctionSig*> signatures;
+ std::vector<WasmFunction> functions;
+ std::vector<WasmDataSegment> data_segments;
+ std::vector<WasmIndirectFunctionTable> function_tables;
+ std::vector<WasmImport> import_table;
+ std::vector<WasmExport> export_table;
+ std::vector<WasmException> exceptions;
+ std::vector<WasmTableInit> table_inits;
WasmModule() : WasmModule(nullptr) {}
WasmModule(std::unique_ptr<Zone> owned);
- ModuleOrigin get_origin() const { return origin_; }
+ ModuleOrigin origin() const { return origin_; }
void set_origin(ModuleOrigin new_value) { origin_ = new_value; }
- bool is_wasm() const { return wasm::IsWasm(origin_); }
- bool is_asm_js() const { return wasm::IsAsmJs(origin_); }
+ bool is_wasm() const { return origin_ == kWasmOrigin; }
+ bool is_asm_js() const { return origin_ == kAsmJsOrigin; }
private:
// TODO(kschimpf) - Encapsulate more fields.
@@ -199,7 +214,7 @@ struct V8_EXPORT_PRIVATE WasmModule {
typedef Managed<WasmModule> WasmModuleWrapper;
-// An instantiated WASM module, including memory, function table, etc.
+// An instantiated wasm module, including memory, function table, etc.
struct WasmInstance {
const WasmModule* module; // static representation of the module.
// -- Heap allocated --------------------------------------------------------
@@ -251,31 +266,29 @@ struct V8_EXPORT_PRIVATE ModuleWireBytes {
}
// Get a string stored in the module bytes representing a name.
- WasmName GetName(uint32_t offset, uint32_t length) const {
- if (length == 0) return {"<?>", 3}; // no name.
- CHECK(BoundsCheck(offset, length));
- DCHECK_GE(length, 0);
+ WasmName GetName(WireBytesRef ref) const {
+ if (ref.is_empty()) return {"<?>", 3}; // no name.
+ CHECK(BoundsCheck(ref.offset(), ref.length()));
return Vector<const char>::cast(
- module_bytes_.SubVector(offset, offset + length));
+ module_bytes_.SubVector(ref.offset(), ref.end_offset()));
}
// Get a string stored in the module bytes representing a function name.
WasmName GetName(const WasmFunction* function) const {
- return GetName(function->name_offset, function->name_length);
+ return GetName(function->name);
}
// Get a string stored in the module bytes representing a name.
- WasmName GetNameOrNull(uint32_t offset, uint32_t length) const {
- if (offset == 0 && length == 0) return {NULL, 0}; // no name.
- CHECK(BoundsCheck(offset, length));
- DCHECK_GE(length, 0);
+ WasmName GetNameOrNull(WireBytesRef ref) const {
+ if (!ref.is_set()) return {NULL, 0}; // no name.
+ CHECK(BoundsCheck(ref.offset(), ref.length()));
return Vector<const char>::cast(
- module_bytes_.SubVector(offset, offset + length));
+ module_bytes_.SubVector(ref.offset(), ref.end_offset()));
}
// Get a string stored in the module bytes representing a function name.
WasmName GetNameOrNull(const WasmFunction* function) const {
- return GetNameOrNull(function->name_offset, function->name_length);
+ return GetNameOrNull(function->name);
}
// Checks the given offset range is contained within the module bytes.
@@ -285,8 +298,8 @@ struct V8_EXPORT_PRIVATE ModuleWireBytes {
}
Vector<const byte> GetFunctionBytes(const WasmFunction* function) const {
- return module_bytes_.SubVector(function->code_start_offset,
- function->code_end_offset);
+ return module_bytes_.SubVector(function->code.offset(),
+ function->code.end_offset());
}
const byte* start() const { return module_bytes_.start(); }
@@ -356,14 +369,6 @@ struct V8_EXPORT_PRIVATE ModuleEnv {
DCHECK_NOT_NULL(instance);
return instance->function_code[index];
}
-
- // TODO(titzer): move these into src/compiler/wasm-compiler.cc
- static compiler::CallDescriptor* GetWasmCallDescriptor(Zone* zone,
- FunctionSig* sig);
- static compiler::CallDescriptor* GetI32WasmCallDescriptor(
- Zone* zone, compiler::CallDescriptor* descriptor);
- static compiler::CallDescriptor* GetI32WasmCallDescriptorForSimd(
- Zone* zone, compiler::CallDescriptor* descriptor);
};
// A ModuleEnv together with ModuleWireBytes.
@@ -394,13 +399,6 @@ std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name);
// If no debug info exists yet, it is created automatically.
Handle<WasmDebugInfo> GetDebugInfo(Handle<JSObject> wasm);
-// Check whether the given object represents a WebAssembly.Instance instance.
-// This checks the number and type of embedder fields, so it's not 100 percent
-// secure. If it turns out that we need more complete checks, we could add a
-// special marker as embedder field, which will definitely never occur anywhere
-// else.
-bool IsWasmInstance(Object* instance);
-
// Get the script of the wasm module. If the origin of the module is asm.js, the
// returned Script will be a JavaScript Script of Script::TYPE_NORMAL, otherwise
// it's of type TYPE_WASM.
@@ -422,26 +420,42 @@ V8_EXPORT_PRIVATE Handle<JSArray> GetCustomSections(
Isolate* isolate, Handle<WasmModuleObject> module, Handle<String> name,
ErrorThrower* thrower);
+// Decode local variable names from the names section. Return FixedArray of
+// FixedArray of <undefined|String>. The outer fixed array is indexed by the
+// function index, the inner one by the local index.
+Handle<FixedArray> DecodeLocalNames(Isolate*, Handle<WasmCompiledModule>);
+
// Assumed to be called with a code object associated to a wasm module instance.
// Intended to be called from runtime functions.
// Returns nullptr on failing to get owning instance.
WasmInstanceObject* GetOwningWasmInstance(Code* code);
-Handle<JSArrayBuffer> NewArrayBuffer(Isolate*, size_t size,
- bool enable_guard_regions);
+Handle<JSArrayBuffer> NewArrayBuffer(
+ Isolate*, size_t size, bool enable_guard_regions,
+ SharedFlag shared = SharedFlag::kNotShared);
-Handle<JSArrayBuffer> SetupArrayBuffer(Isolate*, void* allocation_base,
- size_t allocation_length,
- void* backing_store, size_t size,
- bool is_external,
- bool enable_guard_regions);
+Handle<JSArrayBuffer> SetupArrayBuffer(
+ Isolate*, void* allocation_base, size_t allocation_length,
+ void* backing_store, size_t size, bool is_external,
+ bool enable_guard_regions, SharedFlag shared = SharedFlag::kNotShared);
void DetachWebAssemblyMemoryBuffer(Isolate* isolate,
Handle<JSArrayBuffer> buffer,
bool free_memory);
+// The returned pointer is owned by the wasm instance target belongs to. The
+// result is alive as long as the instance exists.
+WasmFunction* GetWasmFunctionForImportWrapper(Isolate* isolate,
+ Handle<Object> target);
+
+Handle<Code> UnwrapImportWrapper(Handle<Object> import_wrapper);
+
+void TableSet(ErrorThrower* thrower, Isolate* isolate,
+ Handle<WasmTableObject> table, int32_t index,
+ Handle<JSFunction> function);
+
void UpdateDispatchTables(Isolate* isolate, Handle<FixedArray> dispatch_tables,
- int index, Handle<JSFunction> js_function);
+ int index, WasmFunction* function, Handle<Code> code);
//============================================================================
//== Compilation and instantiation ===========================================
@@ -461,6 +475,10 @@ V8_EXPORT_PRIVATE MaybeHandle<WasmInstanceObject> SyncInstantiate(
Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
MaybeHandle<JSArrayBuffer> memory);
+V8_EXPORT_PRIVATE MaybeHandle<WasmInstanceObject> SyncCompileAndInstantiate(
+ Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
+ MaybeHandle<JSReceiver> imports, MaybeHandle<JSArrayBuffer> memory);
+
V8_EXPORT_PRIVATE void AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
const ModuleWireBytes& bytes);
@@ -476,7 +494,16 @@ const bool kGuardRegionsSupported = false;
#endif
inline bool EnableGuardRegions() {
- return FLAG_wasm_guard_pages && kGuardRegionsSupported;
+ return FLAG_wasm_guard_pages && kGuardRegionsSupported &&
+ !FLAG_experimental_wasm_threads;
+}
+
+inline SharedFlag IsShared(Handle<JSArrayBuffer> buffer) {
+ if (!buffer.is_null() && buffer->is_shared()) {
+ DCHECK(FLAG_experimental_wasm_threads);
+ return SharedFlag::kShared;
+ }
+ return SharedFlag::kNotShared;
}
void UnpackAndRegisterProtectedInstructions(Isolate* isolate,
@@ -509,6 +536,8 @@ class LazyCompilationOrchestrator {
int exported_func_index, bool patch_caller);
};
+const char* ExternalKindName(WasmExternalKind);
+
namespace testing {
void ValidateInstancesChain(Isolate* isolate,
Handle<WasmModuleObject> module_obj,
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index d43087b263..71839ba27c 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -10,6 +10,7 @@
#include "src/compiler/wasm-compiler.h"
#include "src/debug/debug-interface.h"
#include "src/objects-inl.h"
+#include "src/objects/debug-objects-inl.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-code-specialization.h"
#include "src/wasm/wasm-module.h"
@@ -28,53 +29,13 @@
using namespace v8::internal;
using namespace v8::internal::wasm;
-#define DEFINE_GETTER0(getter, Container, name, field, type) \
- type* Container::name() { return type::cast(getter(field)); }
-
-#define DEFINE_ACCESSORS0(getter, setter, Container, name, field, type) \
- DEFINE_GETTER0(getter, Container, name, field, type) \
- void Container::set_##name(type* value) { return setter(field, value); }
-
-#define DEFINE_OPTIONAL_ACCESSORS0(getter, setter, Container, name, field, \
- type) \
- DEFINE_ACCESSORS0(getter, setter, Container, name, field, type) \
- bool Container::has_##name() { \
- return !getter(field)->IsUndefined(GetIsolate()); \
- }
-
-#define DEFINE_OPTIONAL_GETTER0(getter, Container, name, field, type) \
- DEFINE_GETTER0(getter, Container, name, field, type) \
- bool Container::has_##name() { \
- return !getter(field)->IsUndefined(GetIsolate()); \
- }
-
-#define DEFINE_GETTER0(getter, Container, name, field, type) \
- type* Container::name() { return type::cast(getter(field)); }
-
-#define DEFINE_OBJ_GETTER(Container, name, field, type) \
- DEFINE_GETTER0(GetEmbedderField, Container, name, field, type)
-#define DEFINE_OBJ_ACCESSORS(Container, name, field, type) \
- DEFINE_ACCESSORS0(GetEmbedderField, SetEmbedderField, Container, name, \
- field, type)
-#define DEFINE_OPTIONAL_OBJ_ACCESSORS(Container, name, field, type) \
- DEFINE_OPTIONAL_ACCESSORS0(GetEmbedderField, SetEmbedderField, Container, \
- name, field, type)
-#define DEFINE_ARR_GETTER(Container, name, field, type) \
- DEFINE_GETTER0(get, Container, name, field, type)
-#define DEFINE_ARR_ACCESSORS(Container, name, field, type) \
- DEFINE_ACCESSORS0(get, set, Container, name, field, type)
-#define DEFINE_OPTIONAL_ARR_ACCESSORS(Container, name, field, type) \
- DEFINE_OPTIONAL_ACCESSORS0(get, set, Container, name, field, type)
-#define DEFINE_OPTIONAL_ARR_GETTER(Container, name, field, type) \
- DEFINE_OPTIONAL_GETTER0(get, Container, name, field, type)
-
namespace {
// An iterator that returns first the module itself, then all modules linked via
// next, then all linked via prev.
class CompiledModulesIterator
- : public std::iterator<std::input_iterator_tag,
- Handle<WasmCompiledModule>> {
+ : public v8::base::iterator<std::input_iterator_tag,
+ Handle<WasmCompiledModule>> {
public:
CompiledModulesIterator(Isolate* isolate,
Handle<WasmCompiledModule> start_module, bool at_end)
@@ -131,8 +92,8 @@ class CompiledModulesIterator
// An iterator based on the CompiledModulesIterator, but it returns all live
// instances, not the WasmCompiledModules itself.
class CompiledModuleInstancesIterator
- : public std::iterator<std::input_iterator_tag,
- Handle<WasmInstanceObject>> {
+ : public v8::base::iterator<std::input_iterator_tag,
+ Handle<WasmInstanceObject>> {
public:
CompiledModuleInstancesIterator(Isolate* isolate,
Handle<WasmCompiledModule> start_module,
@@ -182,8 +143,8 @@ bool IsBreakablePosition(Handle<WasmCompiledModule> compiled_module,
BodyLocalDecls locals(&tmp);
const byte* module_start = compiled_module->module_bytes()->GetChars();
WasmFunction& func = compiled_module->module()->functions[func_index];
- BytecodeIterator iterator(module_start + func.code_start_offset,
- module_start + func.code_end_offset, &locals);
+ BytecodeIterator iterator(module_start + func.code.offset(),
+ module_start + func.code.end_offset(), &locals);
DCHECK_LT(0, locals.encoded_size);
for (uint32_t offset : iterator.offsets()) {
if (offset > static_cast<uint32_t>(offset_in_func)) break;
@@ -197,65 +158,37 @@ bool IsBreakablePosition(Handle<WasmCompiledModule> compiled_module,
Handle<WasmModuleObject> WasmModuleObject::New(
Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
- WasmModule* module = compiled_module->module();
- Handle<JSObject> module_object;
- if (module->is_wasm()) {
- Handle<JSFunction> module_cons(
- isolate->native_context()->wasm_module_constructor());
- module_object = isolate->factory()->NewJSObject(module_cons);
- Handle<Symbol> module_sym(isolate->native_context()->wasm_module_sym());
- Object::SetProperty(module_object, module_sym, module_object, STRICT)
- .Check();
- } else {
- DCHECK(module->is_asm_js());
- Handle<Map> map = isolate->factory()->NewMap(
- JS_OBJECT_TYPE,
- JSObject::kHeaderSize + WasmModuleObject::kFieldCount * kPointerSize);
- module_object = isolate->factory()->NewJSObjectFromMap(map, TENURED);
- }
- module_object->SetEmbedderField(WasmModuleObject::kCompiledModule,
- *compiled_module);
+ Handle<JSFunction> module_cons(
+ isolate->native_context()->wasm_module_constructor());
+ auto module_object = Handle<WasmModuleObject>::cast(
+ isolate->factory()->NewJSObject(module_cons));
+ module_object->set_compiled_module(*compiled_module);
Handle<WeakCell> link_to_module =
isolate->factory()->NewWeakCell(module_object);
compiled_module->set_weak_wasm_module(link_to_module);
- return Handle<WasmModuleObject>::cast(module_object);
-}
-
-WasmModuleObject* WasmModuleObject::cast(Object* object) {
- DCHECK(object->IsJSObject());
- // TODO(titzer): brand check for WasmModuleObject.
- return reinterpret_cast<WasmModuleObject*>(object);
+ return module_object;
}
-bool WasmModuleObject::IsWasmModuleObject(Object* object) {
- return object->IsJSObject() &&
- JSObject::cast(object)->GetEmbedderFieldCount() == kFieldCount;
-}
-
-DEFINE_OBJ_GETTER(WasmModuleObject, compiled_module, kCompiledModule,
- WasmCompiledModule)
-
Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate, uint32_t initial,
int64_t maximum,
Handle<FixedArray>* js_functions) {
Handle<JSFunction> table_ctor(
isolate->native_context()->wasm_table_constructor());
- Handle<JSObject> table_obj = isolate->factory()->NewJSObject(table_ctor);
- table_obj->SetEmbedderField(kWrapperTracerHeader, Smi::kZero);
+ auto table_obj = Handle<WasmTableObject>::cast(
+ isolate->factory()->NewJSObject(table_ctor));
*js_functions = isolate->factory()->NewFixedArray(initial);
Object* null = isolate->heap()->null_value();
for (int i = 0; i < static_cast<int>(initial); ++i) {
(*js_functions)->set(i, null);
}
- table_obj->SetEmbedderField(kFunctions, *(*js_functions));
+ table_obj->set_functions(**js_functions);
+ DCHECK_EQ(maximum, static_cast<int>(maximum));
Handle<Object> max = isolate->factory()->NewNumber(maximum);
- table_obj->SetEmbedderField(kMaximum, *max);
+ table_obj->set_maximum_length(*max);
Handle<FixedArray> dispatch_tables = isolate->factory()->NewFixedArray(0);
- table_obj->SetEmbedderField(kDispatchTables, *dispatch_tables);
- Handle<Symbol> table_sym(isolate->native_context()->wasm_table_sym());
- Object::SetProperty(table_obj, table_sym, table_obj, STRICT).Check();
+ table_obj->set_dispatch_tables(*dispatch_tables);
return Handle<WasmTableObject>::cast(table_obj);
}
@@ -263,14 +196,13 @@ Handle<FixedArray> WasmTableObject::AddDispatchTable(
Isolate* isolate, Handle<WasmTableObject> table_obj,
Handle<WasmInstanceObject> instance, int table_index,
Handle<FixedArray> function_table, Handle<FixedArray> signature_table) {
- Handle<FixedArray> dispatch_tables(
- FixedArray::cast(table_obj->GetEmbedderField(kDispatchTables)), isolate);
+ Handle<FixedArray> dispatch_tables(table_obj->dispatch_tables());
DCHECK_EQ(0, dispatch_tables->length() % 4);
if (instance.is_null()) return dispatch_tables;
// TODO(titzer): use weak cells here to avoid leaking instances.
- // Grow the dispatch table and add a new triple at the end.
+ // Grow the dispatch table and add a new entry at the end.
Handle<FixedArray> new_dispatch_tables =
isolate->factory()->CopyFixedArrayAndGrow(dispatch_tables, 4);
@@ -280,35 +212,13 @@ Handle<FixedArray> WasmTableObject::AddDispatchTable(
new_dispatch_tables->set(dispatch_tables->length() + 2, *function_table);
new_dispatch_tables->set(dispatch_tables->length() + 3, *signature_table);
- table_obj->SetEmbedderField(WasmTableObject::kDispatchTables,
- *new_dispatch_tables);
+ table_obj->set_dispatch_tables(*new_dispatch_tables);
return new_dispatch_tables;
}
-DEFINE_OBJ_ACCESSORS(WasmTableObject, functions, kFunctions, FixedArray)
-
-DEFINE_OBJ_GETTER(WasmTableObject, dispatch_tables, kDispatchTables, FixedArray)
-
-uint32_t WasmTableObject::current_length() { return functions()->length(); }
-
-bool WasmTableObject::has_maximum_length() {
- return GetEmbedderField(kMaximum)->Number() >= 0;
-}
-
-int64_t WasmTableObject::maximum_length() {
- return static_cast<int64_t>(GetEmbedderField(kMaximum)->Number());
-}
-
-WasmTableObject* WasmTableObject::cast(Object* object) {
- DCHECK(object && object->IsJSObject());
- // TODO(titzer): brand check for WasmTableObject.
- return reinterpret_cast<WasmTableObject*>(object);
-}
-
void WasmTableObject::grow(Isolate* isolate, uint32_t count) {
- Handle<FixedArray> dispatch_tables(
- FixedArray::cast(GetEmbedderField(kDispatchTables)));
+ Handle<FixedArray> dispatch_tables(this->dispatch_tables());
DCHECK_EQ(0, dispatch_tables->length() % 4);
uint32_t old_size = functions()->length();
@@ -346,25 +256,26 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
Address old_mem_start = nullptr;
uint32_t old_size = 0;
if (!old_buffer.is_null()) {
- DCHECK(old_buffer->byte_length()->IsNumber());
old_mem_start = static_cast<Address>(old_buffer->backing_store());
- old_size = old_buffer->byte_length()->Number();
+ CHECK(old_buffer->byte_length()->ToUint32(&old_size));
}
+ DCHECK_EQ(0, old_size % WasmModule::kPageSize);
+ uint32_t old_pages = old_size / WasmModule::kPageSize;
DCHECK_GE(std::numeric_limits<uint32_t>::max(),
old_size + pages * WasmModule::kPageSize);
- uint32_t new_size = old_size + pages * WasmModule::kPageSize;
- if (new_size <= old_size || max_pages * WasmModule::kPageSize < new_size ||
- FLAG_wasm_max_mem_pages * WasmModule::kPageSize < new_size) {
+ if (old_pages > max_pages || pages > max_pages - old_pages) {
return Handle<JSArrayBuffer>::null();
}
// TODO(gdeepti): Change the protection here instead of allocating a new
// buffer before guard regions are turned on, see issue #5886.
- const bool enable_guard_regions =
- (old_buffer.is_null() && EnableGuardRegions()) ||
- (!old_buffer.is_null() && old_buffer->has_guard_region());
- Handle<JSArrayBuffer> new_buffer =
- NewArrayBuffer(isolate, new_size, enable_guard_regions);
+ const bool enable_guard_regions = old_buffer.is_null()
+ ? EnableGuardRegions()
+ : old_buffer->has_guard_region();
+ size_t new_size =
+ static_cast<size_t>(old_pages + pages) * WasmModule::kPageSize;
+ Handle<JSArrayBuffer> new_buffer = NewArrayBuffer(
+ isolate, new_size, enable_guard_regions, IsShared(old_buffer));
if (new_buffer.is_null()) return new_buffer;
Address new_mem_start = static_cast<Address>(new_buffer->backing_store());
memcpy(new_mem_start, old_mem_start, old_size);
@@ -404,143 +315,96 @@ Handle<WasmMemoryObject> WasmMemoryObject::New(Isolate* isolate,
int32_t maximum) {
Handle<JSFunction> memory_ctor(
isolate->native_context()->wasm_memory_constructor());
- Handle<JSObject> memory_obj =
- isolate->factory()->NewJSObject(memory_ctor, TENURED);
- memory_obj->SetEmbedderField(kWrapperTracerHeader, Smi::kZero);
- buffer.is_null() ? memory_obj->SetEmbedderField(
- kArrayBuffer, isolate->heap()->undefined_value())
- : memory_obj->SetEmbedderField(kArrayBuffer, *buffer);
- Handle<Object> max = isolate->factory()->NewNumber(maximum);
- memory_obj->SetEmbedderField(kMaximum, *max);
- Handle<Symbol> memory_sym(isolate->native_context()->wasm_memory_sym());
- Object::SetProperty(memory_obj, memory_sym, memory_obj, STRICT).Check();
- return Handle<WasmMemoryObject>::cast(memory_obj);
+ auto memory_obj = Handle<WasmMemoryObject>::cast(
+ isolate->factory()->NewJSObject(memory_ctor, TENURED));
+ if (buffer.is_null()) {
+ const bool enable_guard_regions = EnableGuardRegions();
+ buffer = SetupArrayBuffer(isolate, nullptr, 0, nullptr, 0, false,
+ enable_guard_regions);
+ }
+ memory_obj->set_array_buffer(*buffer);
+ memory_obj->set_maximum_pages(maximum);
+ return memory_obj;
}
-DEFINE_OPTIONAL_OBJ_ACCESSORS(WasmMemoryObject, buffer, kArrayBuffer,
- JSArrayBuffer)
-DEFINE_OPTIONAL_OBJ_ACCESSORS(WasmMemoryObject, instances_link, kInstancesLink,
- WasmInstanceWrapper)
-
uint32_t WasmMemoryObject::current_pages() {
uint32_t byte_length;
- CHECK(buffer()->byte_length()->ToUint32(&byte_length));
+ CHECK(array_buffer()->byte_length()->ToUint32(&byte_length));
return byte_length / wasm::WasmModule::kPageSize;
}
-bool WasmMemoryObject::has_maximum_pages() {
- return GetEmbedderField(kMaximum)->Number() >= 0;
-}
-
-int32_t WasmMemoryObject::maximum_pages() {
- return static_cast<int32_t>(GetEmbedderField(kMaximum)->Number());
-}
-
-WasmMemoryObject* WasmMemoryObject::cast(Object* object) {
- DCHECK(object && object->IsJSObject());
- // TODO(titzer): brand check for WasmMemoryObject.
- return reinterpret_cast<WasmMemoryObject*>(object);
-}
-
void WasmMemoryObject::AddInstance(Isolate* isolate,
+ Handle<WasmMemoryObject> memory,
Handle<WasmInstanceObject> instance) {
- Handle<WasmInstanceWrapper> instance_wrapper =
- handle(instance->instance_wrapper());
- if (has_instances_link()) {
- Handle<WasmInstanceWrapper> current_wrapper(instances_link());
- DCHECK(WasmInstanceWrapper::IsWasmInstanceWrapper(*current_wrapper));
- DCHECK(!current_wrapper->has_previous());
- instance_wrapper->set_next_wrapper(*current_wrapper);
- current_wrapper->set_previous_wrapper(*instance_wrapper);
+ Handle<WeakFixedArray> old_instances =
+ memory->has_instances()
+ ? Handle<WeakFixedArray>(memory->instances(), isolate)
+ : Handle<WeakFixedArray>::null();
+ Handle<WeakFixedArray> new_instances =
+ WeakFixedArray::Add(old_instances, instance);
+ memory->set_instances(*new_instances);
+}
+
+void WasmMemoryObject::RemoveInstance(Isolate* isolate,
+ Handle<WasmMemoryObject> memory,
+ Handle<WasmInstanceObject> instance) {
+ if (memory->has_instances()) {
+ memory->instances()->Remove(instance);
}
- set_instances_link(*instance_wrapper);
-}
-
-void WasmMemoryObject::ResetInstancesLink(Isolate* isolate) {
- Handle<Object> undefined = isolate->factory()->undefined_value();
- SetEmbedderField(kInstancesLink, *undefined);
}
// static
int32_t WasmMemoryObject::Grow(Isolate* isolate,
Handle<WasmMemoryObject> memory_object,
uint32_t pages) {
- Handle<JSArrayBuffer> old_buffer;
+ Handle<JSArrayBuffer> old_buffer(memory_object->array_buffer());
uint32_t old_size = 0;
- Address old_mem_start = nullptr;
- if (memory_object->has_buffer()) {
- old_buffer = handle(memory_object->buffer());
- old_size = old_buffer->byte_length()->Number();
- old_mem_start = static_cast<Address>(old_buffer->backing_store());
- }
+ CHECK(old_buffer->byte_length()->ToUint32(&old_size));
Handle<JSArrayBuffer> new_buffer;
// Return current size if grow by 0.
if (pages == 0) {
// Even for pages == 0, we need to attach a new JSArrayBuffer with the same
// backing store and neuter the old one to be spec compliant.
- if (!old_buffer.is_null() && old_size != 0) {
+ if (old_size != 0) {
new_buffer = SetupArrayBuffer(
isolate, old_buffer->allocation_base(),
old_buffer->allocation_length(), old_buffer->backing_store(),
- old_size, old_buffer->is_external(), old_buffer->has_guard_region());
- memory_object->set_buffer(*new_buffer);
+ old_size, old_buffer->is_external(), old_buffer->has_guard_region(),
+ IsShared(old_buffer));
+ memory_object->set_array_buffer(*new_buffer);
}
DCHECK_EQ(0, old_size % WasmModule::kPageSize);
return old_size / WasmModule::kPageSize;
}
- if (!memory_object->has_instances_link()) {
- // Memory object does not have an instance associated with it, just grow
- uint32_t max_pages;
- if (memory_object->has_maximum_pages()) {
- max_pages = static_cast<uint32_t>(memory_object->maximum_pages());
- if (FLAG_wasm_max_mem_pages < max_pages) return -1;
- } else {
- max_pages = FLAG_wasm_max_mem_pages;
- }
- new_buffer = GrowMemoryBuffer(isolate, old_buffer, pages, max_pages);
- if (new_buffer.is_null()) return -1;
+
+ uint32_t max_pages;
+ if (memory_object->has_maximum_pages()) {
+ max_pages = static_cast<uint32_t>(memory_object->maximum_pages());
+ if (FLAG_wasm_max_mem_pages < max_pages) return -1;
} else {
- Handle<WasmInstanceWrapper> instance_wrapper(
- memory_object->instances_link());
- DCHECK(WasmInstanceWrapper::IsWasmInstanceWrapper(*instance_wrapper));
- DCHECK(instance_wrapper->has_instance());
- Handle<WasmInstanceObject> instance = instance_wrapper->instance_object();
- DCHECK(IsWasmInstance(*instance));
- uint32_t max_pages = instance->GetMaxMemoryPages();
-
- // Grow memory object buffer and update instances associated with it.
- new_buffer = GrowMemoryBuffer(isolate, old_buffer, pages, max_pages);
- if (new_buffer.is_null()) return -1;
- DCHECK(!instance_wrapper->has_previous());
- SetInstanceMemory(isolate, instance, new_buffer);
- UncheckedUpdateInstanceMemory(isolate, instance, old_mem_start, old_size);
- while (instance_wrapper->has_next()) {
- instance_wrapper = instance_wrapper->next_wrapper();
- DCHECK(WasmInstanceWrapper::IsWasmInstanceWrapper(*instance_wrapper));
- Handle<WasmInstanceObject> instance = instance_wrapper->instance_object();
- DCHECK(IsWasmInstance(*instance));
+ max_pages = FLAG_wasm_max_mem_pages;
+ }
+ new_buffer = GrowMemoryBuffer(isolate, old_buffer, pages, max_pages);
+ if (new_buffer.is_null()) return -1;
+
+ if (memory_object->has_instances()) {
+ Address old_mem_start = static_cast<Address>(old_buffer->backing_store());
+ Handle<WeakFixedArray> instances(memory_object->instances(), isolate);
+ for (int i = 0; i < instances->Length(); i++) {
+ Object* elem = instances->Get(i);
+ if (!elem->IsWasmInstanceObject()) continue;
+ Handle<WasmInstanceObject> instance(WasmInstanceObject::cast(elem),
+ isolate);
SetInstanceMemory(isolate, instance, new_buffer);
UncheckedUpdateInstanceMemory(isolate, instance, old_mem_start, old_size);
}
}
- memory_object->set_buffer(*new_buffer);
+
+ memory_object->set_array_buffer(*new_buffer);
DCHECK_EQ(0, old_size % WasmModule::kPageSize);
return old_size / WasmModule::kPageSize;
}
-DEFINE_OBJ_ACCESSORS(WasmInstanceObject, compiled_module, kCompiledModule,
- WasmCompiledModule)
-DEFINE_OPTIONAL_OBJ_ACCESSORS(WasmInstanceObject, globals_buffer,
- kGlobalsArrayBuffer, JSArrayBuffer)
-DEFINE_OPTIONAL_OBJ_ACCESSORS(WasmInstanceObject, memory_buffer,
- kMemoryArrayBuffer, JSArrayBuffer)
-DEFINE_OPTIONAL_OBJ_ACCESSORS(WasmInstanceObject, memory_object, kMemoryObject,
- WasmMemoryObject)
-DEFINE_OPTIONAL_OBJ_ACCESSORS(WasmInstanceObject, debug_info, kDebugInfo,
- WasmDebugInfo)
-DEFINE_OPTIONAL_OBJ_ACCESSORS(WasmInstanceObject, instance_wrapper,
- kWasmMemInstanceWrapper, WasmInstanceWrapper)
-
WasmModuleObject* WasmInstanceObject::module_object() {
return *compiled_module()->wasm_module();
}
@@ -555,50 +419,17 @@ Handle<WasmDebugInfo> WasmInstanceObject::GetOrCreateDebugInfo(
return new_info;
}
-WasmInstanceObject* WasmInstanceObject::cast(Object* object) {
- DCHECK(IsWasmInstanceObject(object));
- return reinterpret_cast<WasmInstanceObject*>(object);
-}
-
-bool WasmInstanceObject::IsWasmInstanceObject(Object* object) {
- if (!object->IsJSObject()) return false;
-
- JSObject* obj = JSObject::cast(object);
- Isolate* isolate = obj->GetIsolate();
- if (obj->GetEmbedderFieldCount() != kFieldCount) {
- return false;
- }
-
- Object* mem = obj->GetEmbedderField(kMemoryArrayBuffer);
- if (!(mem->IsUndefined(isolate) || mem->IsJSArrayBuffer()) ||
- !WasmCompiledModule::IsWasmCompiledModule(
- obj->GetEmbedderField(kCompiledModule))) {
- return false;
- }
-
- // All checks passed.
- return true;
-}
-
Handle<WasmInstanceObject> WasmInstanceObject::New(
Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
Handle<JSFunction> instance_cons(
isolate->native_context()->wasm_instance_constructor());
Handle<JSObject> instance_object =
isolate->factory()->NewJSObject(instance_cons, TENURED);
- instance_object->SetEmbedderField(kWrapperTracerHeader, Smi::kZero);
- Handle<Symbol> instance_sym(isolate->native_context()->wasm_instance_sym());
- Object::SetProperty(instance_object, instance_sym, instance_object, STRICT)
- .Check();
Handle<WasmInstanceObject> instance(
reinterpret_cast<WasmInstanceObject*>(*instance_object), isolate);
- instance->SetEmbedderField(kCompiledModule, *compiled_module);
- instance->SetEmbedderField(kMemoryObject, isolate->heap()->undefined_value());
- Handle<WasmInstanceWrapper> instance_wrapper =
- WasmInstanceWrapper::New(isolate, instance);
- instance->SetEmbedderField(kWasmMemInstanceWrapper, *instance_wrapper);
+ instance->set_compiled_module(*compiled_module);
return instance;
}
@@ -654,53 +485,72 @@ uint32_t WasmInstanceObject::GetMaxMemoryPages() {
return FLAG_wasm_max_mem_pages;
}
-WasmInstanceObject* WasmExportedFunction::instance() {
- return WasmInstanceObject::cast(GetEmbedderField(kInstance));
-}
+bool WasmExportedFunction::IsWasmExportedFunction(Object* object) {
+ if (!object->IsJSFunction()) return false;
+ Handle<JSFunction> js_function(JSFunction::cast(object));
+ if (Code::JS_TO_WASM_FUNCTION != js_function->code()->kind()) return false;
-int WasmExportedFunction::function_index() {
- int32_t func_index;
- CHECK(GetEmbedderField(kIndex)->ToInt32(&func_index));
- return func_index;
+ Handle<Symbol> symbol(
+ js_function->GetIsolate()->factory()->wasm_instance_symbol());
+ MaybeHandle<Object> maybe_result =
+ JSObject::GetPropertyOrElement(js_function, symbol);
+ Handle<Object> result;
+ if (!maybe_result.ToHandle(&result)) return false;
+ return result->IsWasmInstanceObject();
}
WasmExportedFunction* WasmExportedFunction::cast(Object* object) {
- DCHECK(object && object->IsJSFunction());
- DCHECK_EQ(Code::JS_TO_WASM_FUNCTION,
- JSFunction::cast(object)->code()->kind());
- // TODO(titzer): brand check for WasmExportedFunction.
+ DCHECK(IsWasmExportedFunction(object));
return reinterpret_cast<WasmExportedFunction*>(object);
}
+WasmInstanceObject* WasmExportedFunction::instance() {
+ DisallowHeapAllocation no_allocation;
+ Handle<Symbol> symbol(GetIsolate()->factory()->wasm_instance_symbol());
+ MaybeHandle<Object> result =
+ JSObject::GetPropertyOrElement(handle(this), symbol);
+ return WasmInstanceObject::cast(*(result.ToHandleChecked()));
+}
+
+int WasmExportedFunction::function_index() {
+ DisallowHeapAllocation no_allocation;
+ Handle<Symbol> symbol = GetIsolate()->factory()->wasm_function_index_symbol();
+ MaybeHandle<Object> result =
+ JSObject::GetPropertyOrElement(handle(this), symbol);
+ return result.ToHandleChecked()->Number();
+}
+
Handle<WasmExportedFunction> WasmExportedFunction::New(
Isolate* isolate, Handle<WasmInstanceObject> instance,
MaybeHandle<String> maybe_name, int func_index, int arity,
Handle<Code> export_wrapper) {
+ DCHECK_EQ(Code::JS_TO_WASM_FUNCTION, export_wrapper->kind());
Handle<String> name;
- if (maybe_name.is_null()) {
+ if (!maybe_name.ToHandle(&name)) {
EmbeddedVector<char, 16> buffer;
int length = SNPrintF(buffer, "%d", func_index);
name = isolate->factory()
->NewStringFromOneByte(
Vector<uint8_t>::cast(buffer.SubVector(0, length)))
.ToHandleChecked();
- } else {
- name = maybe_name.ToHandleChecked();
}
- DCHECK_EQ(Code::JS_TO_WASM_FUNCTION, export_wrapper->kind());
Handle<SharedFunctionInfo> shared =
isolate->factory()->NewSharedFunctionInfo(name, export_wrapper, false);
shared->set_length(arity);
shared->set_internal_formal_parameter_count(arity);
- Handle<JSFunction> function = isolate->factory()->NewFunction(
- isolate->wasm_function_map(), name, export_wrapper);
- function->SetEmbedderField(kWrapperTracerHeader, Smi::kZero);
+ Handle<JSFunction> js_function = isolate->factory()->NewFunction(
+ isolate->sloppy_function_map(), name, export_wrapper);
+
+ js_function->set_shared(*shared);
+ Handle<Symbol> instance_symbol(isolate->factory()->wasm_instance_symbol());
+ JSObject::AddProperty(js_function, instance_symbol, instance, DONT_ENUM);
- function->set_shared(*shared);
+ Handle<Symbol> function_index_symbol(
+ isolate->factory()->wasm_function_index_symbol());
+ JSObject::AddProperty(js_function, function_index_symbol,
+ isolate->factory()->NewNumber(func_index), DONT_ENUM);
- function->SetEmbedderField(kInstance, *instance);
- function->SetEmbedderField(kIndex, Smi::FromInt(func_index));
- return Handle<WasmExportedFunction>::cast(function);
+ return Handle<WasmExportedFunction>::cast(js_function);
}
bool WasmSharedModuleData::IsWasmSharedModuleData(Object* object) {
@@ -708,16 +558,16 @@ bool WasmSharedModuleData::IsWasmSharedModuleData(Object* object) {
FixedArray* arr = FixedArray::cast(object);
if (arr->length() != kFieldCount) return false;
Isolate* isolate = arr->GetIsolate();
- if (!arr->get(kModuleWrapper)->IsForeign()) return false;
- if (!arr->get(kModuleBytes)->IsUndefined(isolate) &&
- !arr->get(kModuleBytes)->IsSeqOneByteString())
+ if (!arr->get(kModuleWrapperIndex)->IsForeign()) return false;
+ if (!arr->get(kModuleBytesIndex)->IsUndefined(isolate) &&
+ !arr->get(kModuleBytesIndex)->IsSeqOneByteString())
return false;
- if (!arr->get(kScript)->IsScript()) return false;
- if (!arr->get(kAsmJsOffsetTable)->IsUndefined(isolate) &&
- !arr->get(kAsmJsOffsetTable)->IsByteArray())
+ if (!arr->get(kScriptIndex)->IsScript()) return false;
+ if (!arr->get(kAsmJsOffsetTableIndex)->IsUndefined(isolate) &&
+ !arr->get(kAsmJsOffsetTableIndex)->IsByteArray())
return false;
- if (!arr->get(kBreakPointInfos)->IsUndefined(isolate) &&
- !arr->get(kBreakPointInfos)->IsFixedArray())
+ if (!arr->get(kBreakPointInfosIndex)->IsUndefined(isolate) &&
+ !arr->get(kBreakPointInfosIndex)->IsFixedArray())
return false;
return true;
}
@@ -734,41 +584,34 @@ wasm::WasmModule* WasmSharedModuleData::module() {
// a Managed<WasmModule> object, as well as cases when it's managed
// by the embedder. CcTests fall into the latter case.
return *(reinterpret_cast<wasm::WasmModule**>(
- Foreign::cast(get(kModuleWrapper))->foreign_address()));
+ Foreign::cast(get(kModuleWrapperIndex))->foreign_address()));
}
-DEFINE_OPTIONAL_ARR_ACCESSORS(WasmSharedModuleData, module_bytes, kModuleBytes,
- SeqOneByteString);
-DEFINE_ARR_GETTER(WasmSharedModuleData, script, kScript, Script);
-DEFINE_OPTIONAL_ARR_ACCESSORS(WasmSharedModuleData, asm_js_offset_table,
- kAsmJsOffsetTable, ByteArray);
-DEFINE_OPTIONAL_ARR_GETTER(WasmSharedModuleData, breakpoint_infos,
- kBreakPointInfos, FixedArray);
-DEFINE_OPTIONAL_ARR_GETTER(WasmSharedModuleData, lazy_compilation_orchestrator,
- kLazyCompilationOrchestrator, Foreign);
-
Handle<WasmSharedModuleData> WasmSharedModuleData::New(
Isolate* isolate, Handle<Foreign> module_wrapper,
Handle<SeqOneByteString> module_bytes, Handle<Script> script,
Handle<ByteArray> asm_js_offset_table) {
Handle<FixedArray> arr =
isolate->factory()->NewFixedArray(kFieldCount, TENURED);
- arr->set(kWrapperTracerHeader, Smi::kZero);
- arr->set(kModuleWrapper, *module_wrapper);
+ arr->set(kModuleWrapperIndex, *module_wrapper);
if (!module_bytes.is_null()) {
- arr->set(kModuleBytes, *module_bytes);
+ arr->set(kModuleBytesIndex, *module_bytes);
}
if (!script.is_null()) {
- arr->set(kScript, *script);
+ arr->set(kScriptIndex, *script);
}
if (!asm_js_offset_table.is_null()) {
- arr->set(kAsmJsOffsetTable, *asm_js_offset_table);
+ arr->set(kAsmJsOffsetTableIndex, *asm_js_offset_table);
}
DCHECK(WasmSharedModuleData::IsWasmSharedModuleData(*arr));
return Handle<WasmSharedModuleData>::cast(arr);
}
+Foreign* WasmSharedModuleData::lazy_compilation_orchestrator() {
+ return Foreign::cast(get(kLazyCompilationOrchestratorIndex));
+}
+
bool WasmSharedModuleData::is_asm_js() {
bool asm_js = module()->is_asm_js();
DCHECK_EQ(asm_js, script()->IsUserJavaScript());
@@ -778,7 +621,7 @@ bool WasmSharedModuleData::is_asm_js() {
void WasmSharedModuleData::ReinitializeAfterDeserialization(
Isolate* isolate, Handle<WasmSharedModuleData> shared) {
- DCHECK(shared->get(kModuleWrapper)->IsUndefined(isolate));
+ DCHECK(shared->get(kModuleWrapperIndex)->IsUndefined(isolate));
#ifdef DEBUG
// No BreakpointInfo objects should survive deserialization.
if (shared->has_breakpoint_infos()) {
@@ -788,7 +631,7 @@ void WasmSharedModuleData::ReinitializeAfterDeserialization(
}
#endif
- shared->set(kBreakPointInfos, isolate->heap()->undefined_value());
+ shared->set(kBreakPointInfosIndex, isolate->heap()->undefined_value());
WasmModule* module = nullptr;
{
@@ -802,7 +645,7 @@ void WasmSharedModuleData::ReinitializeAfterDeserialization(
// TODO(titzer): remember the module origin in the compiled_module
// For now, we assume serialized modules did not originate from asm.js.
ModuleResult result =
- DecodeWasmModule(isolate, start, end, false, kWasmOrigin);
+ SyncDecodeWasmModule(isolate, start, end, false, kWasmOrigin);
CHECK(result.ok());
CHECK_NOT_NULL(result.val);
// Take ownership of the WasmModule and immediately transfer it to the
@@ -813,7 +656,7 @@ void WasmSharedModuleData::ReinitializeAfterDeserialization(
Handle<WasmModuleWrapper> module_wrapper =
WasmModuleWrapper::New(isolate, module);
- shared->set(kModuleWrapper, *module_wrapper);
+ shared->set(kModuleWrapperIndex, *module_wrapper);
DCHECK(WasmSharedModuleData::IsWasmSharedModuleData(*shared));
}
@@ -858,7 +701,7 @@ void WasmSharedModuleData::AddBreakpoint(Handle<WasmSharedModuleData> shared,
breakpoint_infos = handle(shared->breakpoint_infos(), isolate);
} else {
breakpoint_infos = isolate->factory()->NewFixedArray(4, TENURED);
- shared->set(kBreakPointInfos, *breakpoint_infos);
+ shared->set(kBreakPointInfosIndex, *breakpoint_infos);
}
int insert_pos =
@@ -882,7 +725,7 @@ void WasmSharedModuleData::AddBreakpoint(Handle<WasmSharedModuleData> shared,
if (need_realloc) {
new_breakpoint_infos = isolate->factory()->NewFixedArray(
2 * breakpoint_infos->length(), TENURED);
- shared->set(kBreakPointInfos, *new_breakpoint_infos);
+ shared->set(kBreakPointInfosIndex, *new_breakpoint_infos);
// Copy over the entries [0, insert_pos).
for (int i = 0; i < insert_pos; ++i)
new_breakpoint_infos->set(i, breakpoint_infos->get(i));
@@ -932,7 +775,7 @@ void WasmSharedModuleData::SetBreakpointsOnNewInstance(
int func_index = compiled_module->GetContainingFunction(position);
DCHECK_LE(0, func_index);
WasmFunction& func = compiled_module->module()->functions[func_index];
- int offset_in_func = position - func.code_start_offset;
+ int offset_in_func = position - func.code.offset();
WasmDebugInfo::SetBreakpoint(debug_info, func_index, offset_in_func);
}
}
@@ -944,7 +787,7 @@ void WasmSharedModuleData::PrepareForLazyCompilation(
LazyCompilationOrchestrator* orch = new LazyCompilationOrchestrator();
Handle<Managed<LazyCompilationOrchestrator>> orch_handle =
Managed<LazyCompilationOrchestrator>::New(isolate, orch);
- shared->set(WasmSharedModuleData::kLazyCompilationOrchestrator, *orch_handle);
+ shared->set(kLazyCompilationOrchestratorIndex, *orch_handle);
}
Handle<WasmCompiledModule> WasmCompiledModule::New(
@@ -1132,20 +975,21 @@ void WasmCompiledModule::SetGlobalsStartAddressFrom(
MaybeHandle<String> WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
- uint32_t offset, uint32_t size) {
+ WireBytesRef ref) {
// TODO(wasm): cache strings from modules if it's a performance win.
Handle<SeqOneByteString> module_bytes(compiled_module->module_bytes(),
isolate);
- DCHECK_GE(module_bytes->length(), offset);
- DCHECK_GE(module_bytes->length() - offset, size);
+ DCHECK_GE(module_bytes->length(), ref.end_offset());
// UTF8 validation happens at decode time.
- DCHECK(unibrow::Utf8::Validate(
- reinterpret_cast<const byte*>(module_bytes->GetCharsAddress() + offset),
- size));
- DCHECK_GE(kMaxInt, offset);
- DCHECK_GE(kMaxInt, size);
+ DCHECK(unibrow::Utf8::ValidateEncoding(
+ reinterpret_cast<const byte*>(module_bytes->GetCharsAddress() +
+ ref.offset()),
+ ref.length()));
+ DCHECK_GE(kMaxInt, ref.offset());
+ DCHECK_GE(kMaxInt, ref.length());
return isolate->factory()->NewStringFromUtf8SubString(
- module_bytes, static_cast<int>(offset), static_cast<int>(size));
+ module_bytes, static_cast<int>(ref.offset()),
+ static_cast<int>(ref.length()));
}
bool WasmCompiledModule::IsWasmCompiledModule(Object* obj) {
@@ -1221,15 +1065,22 @@ uint32_t WasmCompiledModule::default_mem_size() const {
return min_mem_pages() * WasmModule::kPageSize;
}
+MaybeHandle<String> WasmCompiledModule::GetModuleNameOrNull(
+ Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
+ WasmModule* module = compiled_module->module();
+ if (!module->name.is_set()) return {};
+ return WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
+ isolate, compiled_module, module->name);
+}
+
MaybeHandle<String> WasmCompiledModule::GetFunctionNameOrNull(
Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
uint32_t func_index) {
DCHECK_LT(func_index, compiled_module->module()->functions.size());
WasmFunction& function = compiled_module->module()->functions[func_index];
- DCHECK_IMPLIES(function.name_offset == 0, function.name_length == 0);
- if (!function.name_offset) return {};
+ if (!function.name.is_set()) return {};
return WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
- isolate, compiled_module, function.name_offset, function.name_length);
+ isolate, compiled_module, function.name);
}
Handle<String> WasmCompiledModule::GetFunctionName(
@@ -1246,17 +1097,17 @@ Vector<const uint8_t> WasmCompiledModule::GetRawFunctionName(
DCHECK_GT(module()->functions.size(), func_index);
WasmFunction& function = module()->functions[func_index];
SeqOneByteString* bytes = module_bytes();
- DCHECK_GE(bytes->length(), function.name_offset);
- DCHECK_GE(bytes->length() - function.name_offset, function.name_length);
- return Vector<const uint8_t>(bytes->GetCharsAddress() + function.name_offset,
- function.name_length);
+ DCHECK_GE(bytes->length(), function.name.end_offset());
+ return Vector<const uint8_t>(
+ bytes->GetCharsAddress() + function.name.offset(),
+ function.name.length());
}
int WasmCompiledModule::GetFunctionOffset(uint32_t func_index) {
std::vector<WasmFunction>& functions = module()->functions;
if (static_cast<uint32_t>(func_index) >= functions.size()) return -1;
- DCHECK_GE(kMaxInt, functions[func_index].code_start_offset);
- return static_cast<int>(functions[func_index].code_start_offset);
+ DCHECK_GE(kMaxInt, functions[func_index].code.offset());
+ return static_cast<int>(functions[func_index].code.offset());
}
int WasmCompiledModule::GetContainingFunction(uint32_t byte_offset) {
@@ -1268,7 +1119,7 @@ int WasmCompiledModule::GetContainingFunction(uint32_t byte_offset) {
if (right == 0) return false;
while (right - left > 1) {
int mid = left + (right - left) / 2;
- if (functions[mid].code_start_offset <= byte_offset) {
+ if (functions[mid].code.offset() <= byte_offset) {
left = mid;
} else {
right = mid;
@@ -1276,8 +1127,8 @@ int WasmCompiledModule::GetContainingFunction(uint32_t byte_offset) {
}
// If the found function does not contains the given position, return -1.
WasmFunction& func = functions[left];
- if (byte_offset < func.code_start_offset ||
- byte_offset >= func.code_end_offset) {
+ if (byte_offset < func.code.offset() ||
+ byte_offset >= func.code.end_offset()) {
return -1;
}
@@ -1292,9 +1143,9 @@ bool WasmCompiledModule::GetPositionInfo(uint32_t position,
WasmFunction& function = module()->functions[func_index];
info->line = func_index;
- info->column = position - function.code_start_offset;
- info->line_start = function.code_start_offset;
- info->line_end = function.code_end_offset;
+ info->column = position - function.code.offset();
+ info->line_start = function.code.offset();
+ info->line_end = function.code.end_offset();
return true;
}
@@ -1355,8 +1206,7 @@ Handle<ByteArray> GetDecodedAsmJsOffsetTable(
for (int func = 0; func < num_functions; ++func) {
std::vector<AsmJsOffsetEntry>& func_asm_offsets = asm_offsets.val[func];
if (func_asm_offsets.empty()) continue;
- int func_offset =
- wasm_funs[num_imported_functions + func].code_start_offset;
+ int func_offset = wasm_funs[num_imported_functions + func].code.offset();
for (AsmJsOffsetEntry& e : func_asm_offsets) {
// Byte offsets must be strictly monotonously increasing:
DCHECK_IMPLIES(idx > 0, func_offset + e.byte_offset >
@@ -1383,7 +1233,7 @@ int WasmCompiledModule::GetAsmJsSourcePosition(
DCHECK_LT(func_index, compiled_module->module()->functions.size());
uint32_t func_code_offset =
- compiled_module->module()->functions[func_index].code_start_offset;
+ compiled_module->module()->functions[func_index].code.offset();
uint32_t total_offset = func_code_offset + byte_offset;
// Binary search for the total byte offset.
@@ -1444,17 +1294,16 @@ bool WasmCompiledModule::GetPossibleBreakpoints(
// start_offset and end_offset are module-relative byte offsets.
uint32_t start_func_index = start.GetLineNumber();
if (start_func_index >= functions.size()) return false;
- int start_func_len = functions[start_func_index].code_end_offset -
- functions[start_func_index].code_start_offset;
+ int start_func_len = functions[start_func_index].code.length();
if (start.GetColumnNumber() > start_func_len) return false;
uint32_t start_offset =
- functions[start_func_index].code_start_offset + start.GetColumnNumber();
+ functions[start_func_index].code.offset() + start.GetColumnNumber();
uint32_t end_func_index;
uint32_t end_offset;
if (end.IsEmpty()) {
// Default: everything till the end of the Script.
end_func_index = static_cast<uint32_t>(functions.size() - 1);
- end_offset = functions[end_func_index].code_end_offset;
+ end_offset = functions[end_func_index].code.end_offset();
} else {
// If end is specified: Use it and check for valid input.
end_func_index = static_cast<uint32_t>(end.GetLineNumber());
@@ -1464,12 +1313,13 @@ bool WasmCompiledModule::GetPossibleBreakpoints(
// next function also.
if (end.GetColumnNumber() == 0 && end_func_index > 0) {
--end_func_index;
- end_offset = functions[end_func_index].code_end_offset;
+ end_offset = functions[end_func_index].code.end_offset();
} else {
if (end_func_index >= functions.size()) return false;
end_offset =
- functions[end_func_index].code_start_offset + end.GetColumnNumber();
- if (end_offset > functions[end_func_index].code_end_offset) return false;
+ functions[end_func_index].code.offset() + end.GetColumnNumber();
+ if (end_offset > functions[end_func_index].code.end_offset())
+ return false;
}
}
@@ -1480,14 +1330,14 @@ bool WasmCompiledModule::GetPossibleBreakpoints(
for (uint32_t func_idx = start_func_index; func_idx <= end_func_index;
++func_idx) {
WasmFunction& func = functions[func_idx];
- if (func.code_start_offset == func.code_end_offset) continue;
+ if (func.code.length() == 0) continue;
BodyLocalDecls locals(&tmp);
- BytecodeIterator iterator(module_start + func.code_start_offset,
- module_start + func.code_end_offset, &locals);
+ BytecodeIterator iterator(module_start + func.code.offset(),
+ module_start + func.code.end_offset(), &locals);
DCHECK_LT(0u, locals.encoded_size);
for (uint32_t offset : iterator.offsets()) {
- uint32_t total_offset = func.code_start_offset + offset;
+ uint32_t total_offset = func.code.offset() + offset;
if (total_offset >= end_offset) {
DCHECK_EQ(end_func_index, func_idx);
break;
@@ -1508,7 +1358,7 @@ bool WasmCompiledModule::SetBreakPoint(
int func_index = compiled_module->GetContainingFunction(*position);
if (func_index < 0) return false;
WasmFunction& func = compiled_module->module()->functions[func_index];
- int offset_in_func = *position - func.code_start_offset;
+ int offset_in_func = *position - func.code.offset();
// According to the current design, we should only be called with valid
// breakable positions.
@@ -1562,29 +1412,3 @@ Handle<Code> WasmCompiledModule::CompileLazy(
return orch->CompileLazy(isolate, instance, caller, offset, func_index,
patch_caller);
}
-
-Handle<WasmInstanceWrapper> WasmInstanceWrapper::New(
- Isolate* isolate, Handle<WasmInstanceObject> instance) {
- Handle<FixedArray> array =
- isolate->factory()->NewFixedArray(kWrapperPropertyCount, TENURED);
- Handle<WasmInstanceWrapper> instance_wrapper(
- reinterpret_cast<WasmInstanceWrapper*>(*array), isolate);
- Handle<WeakCell> cell = isolate->factory()->NewWeakCell(instance);
- instance_wrapper->set(kWrapperInstanceObject, *cell);
- return instance_wrapper;
-}
-
-bool WasmInstanceWrapper::IsWasmInstanceWrapper(Object* obj) {
- if (!obj->IsFixedArray()) return false;
- Handle<FixedArray> array = handle(FixedArray::cast(obj));
- if (array->length() != kWrapperPropertyCount) return false;
- if (!array->get(kWrapperInstanceObject)->IsWeakCell()) return false;
- Isolate* isolate = array->GetIsolate();
- if (!array->get(kNextInstanceWrapper)->IsUndefined(isolate) &&
- !array->get(kNextInstanceWrapper)->IsFixedArray())
- return false;
- if (!array->get(kPreviousInstanceWrapper)->IsUndefined(isolate) &&
- !array->get(kPreviousInstanceWrapper)->IsFixedArray())
- return false;
- return true;
-}
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index 00dfc60f10..ee8915317c 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -8,51 +8,64 @@
#include "src/debug/debug.h"
#include "src/debug/interface-types.h"
#include "src/objects.h"
+#include "src/objects/script.h"
#include "src/trap-handler/trap-handler.h"
#include "src/wasm/wasm-limits.h"
+#include "src/wasm/wasm-module.h"
+
+#include "src/heap/heap-inl.h"
+#include "src/heap/heap.h"
+
+// Has to be the last include (doesn't have include guards)
+#include "src/objects/object-macros.h"
namespace v8 {
namespace internal {
namespace wasm {
class InterpretedFrame;
-struct WasmModule;
-struct WasmInstance;
class WasmInterpreter;
}
class WasmCompiledModule;
class WasmDebugInfo;
class WasmInstanceObject;
-class WasmInstanceWrapper;
-#define DECLARE_CASTS(name) \
- static bool Is##name(Object* object); \
- static name* cast(Object* object)
+#define DECL_OOL_QUERY(type) static bool Is##type(Object* object);
+#define DECL_OOL_CAST(type) static type* cast(Object* object);
-#define DECLARE_GETTER(name, type) type* name()
+#define DECL_GETTER(name, type) type* name();
-#define DECLARE_ACCESSORS(name, type) \
- void set_##name(type* value); \
- DECLARE_GETTER(name, type)
+#define DECL_OPTIONAL_ACCESSORS(name, type) \
+ INLINE(bool has_##name()); \
+ DECL_ACCESSORS(name, type)
-#define DECLARE_OPTIONAL_ACCESSORS(name, type) \
- bool has_##name(); \
- DECLARE_ACCESSORS(name, type)
+#define DECL_OPTIONAL_GETTER(name, type) \
+ INLINE(bool has_##name()); \
+ DECL_GETTER(name, type)
-#define DECLARE_OPTIONAL_GETTER(name, type) \
- bool has_##name(); \
- DECLARE_GETTER(name, type)
+#define DEF_SIZE(parent) \
+ static const int kSize = parent::kHeaderSize + kFieldCount * kPointerSize; \
+ static const int kParentSize = parent::kHeaderSize; \
+ static const int kHeaderSize = kSize;
+#define DEF_OFFSET(name) \
+ static const int k##name##Offset = \
+ kSize + (k##name##Index - kFieldCount) * kPointerSize;
// Representation of a WebAssembly.Module JavaScript-level object.
class WasmModuleObject : public JSObject {
public:
- // If a second field is added, we need a kWrapperTracerHeader field as well.
- // TODO(titzer): add the brand as an embedder field instead of a property.
- enum Fields { kCompiledModule, kFieldCount };
+ DECL_CAST(WasmModuleObject)
- DECLARE_CASTS(WasmModuleObject);
+ // Shared compiled code between multiple WebAssembly.Module objects.
+ DECL_ACCESSORS(compiled_module, WasmCompiledModule)
- WasmCompiledModule* compiled_module();
+ enum { // --
+ kCompiledModuleIndex,
+ kFieldCount
+ };
+
+ DEF_SIZE(JSObject)
+ DEF_OFFSET(CompiledModule)
static Handle<WasmModuleObject> New(
Isolate* isolate, Handle<WasmCompiledModule> compiled_module);
@@ -61,23 +74,27 @@ class WasmModuleObject : public JSObject {
// Representation of a WebAssembly.Table JavaScript-level object.
class WasmTableObject : public JSObject {
public:
- // The 0-th field is used by the Blink Wrapper Tracer.
- // TODO(titzer): add the brand as an embedder field instead of a property.
- enum Fields {
- kWrapperTracerHeader,
- kFunctions,
- kMaximum,
- kDispatchTables,
+ DECL_CAST(WasmTableObject)
+
+ DECL_ACCESSORS(functions, FixedArray)
+ // TODO(titzer): introduce DECL_I64_ACCESSORS macro
+ DECL_ACCESSORS(maximum_length, Object)
+ DECL_ACCESSORS(dispatch_tables, FixedArray)
+
+ enum { // --
+ kFunctionsIndex,
+ kMaximumLengthIndex,
+ kDispatchTablesIndex,
kFieldCount
};
- DECLARE_CASTS(WasmTableObject);
- DECLARE_ACCESSORS(functions, FixedArray);
- DECLARE_GETTER(dispatch_tables, FixedArray);
+ DEF_SIZE(JSObject)
+ DEF_OFFSET(Functions)
+ DEF_OFFSET(MaximumLength)
+ DEF_OFFSET(DispatchTables)
- uint32_t current_length();
- bool has_maximum_length();
- int64_t maximum_length(); // Returns < 0 if no maximum.
+ inline uint32_t current_length() { return functions()->length(); }
+ inline bool has_maximum_length() { return maximum_length()->Number() >= 0; }
void grow(Isolate* isolate, uint32_t count);
static Handle<WasmTableObject> New(Isolate* isolate, uint32_t initial,
@@ -92,25 +109,32 @@ class WasmTableObject : public JSObject {
// Representation of a WebAssembly.Memory JavaScript-level object.
class WasmMemoryObject : public JSObject {
public:
- // The 0-th field is used by the Blink Wrapper Tracer.
- // TODO(titzer): add the brand as an embedder field instead of a property.
- enum Fields : uint8_t {
- kWrapperTracerHeader,
- kArrayBuffer,
- kMaximum,
- kInstancesLink,
+ DECL_CAST(WasmMemoryObject)
+
+ DECL_ACCESSORS(array_buffer, JSArrayBuffer)
+ DECL_INT_ACCESSORS(maximum_pages)
+ DECL_OPTIONAL_ACCESSORS(instances, WeakFixedArray)
+
+ enum { // --
+ kArrayBufferIndex,
+ kMaximumPagesIndex,
+ kInstancesIndex,
kFieldCount
};
- DECLARE_CASTS(WasmMemoryObject);
- DECLARE_OPTIONAL_ACCESSORS(buffer, JSArrayBuffer);
- DECLARE_OPTIONAL_ACCESSORS(instances_link, WasmInstanceWrapper);
-
- void AddInstance(Isolate* isolate, Handle<WasmInstanceObject> object);
- void ResetInstancesLink(Isolate* isolate);
+ DEF_SIZE(JSObject)
+ DEF_OFFSET(ArrayBuffer)
+ DEF_OFFSET(MaximumPages)
+ DEF_OFFSET(Instances)
+
+ // Add an instance to the internal (weak) list. amortized O(n).
+ static void AddInstance(Isolate* isolate, Handle<WasmMemoryObject> memory,
+ Handle<WasmInstanceObject> object);
+ // Remove an instance from the internal (weak) list. O(n).
+ static void RemoveInstance(Isolate* isolate, Handle<WasmMemoryObject> memory,
+ Handle<WasmInstanceObject> object);
uint32_t current_pages();
- bool has_maximum_pages();
- int32_t maximum_pages(); // Returns < 0 if there is no maximum.
+ inline bool has_maximum_pages() { return maximum_pages() >= 0; }
static Handle<WasmMemoryObject> New(Isolate* isolate,
Handle<JSArrayBuffer> buffer,
@@ -119,33 +143,39 @@ class WasmMemoryObject : public JSObject {
static int32_t Grow(Isolate*, Handle<WasmMemoryObject>, uint32_t pages);
};
-// Representation of a WebAssembly.Instance JavaScript-level object.
+// A WebAssembly.Instance JavaScript-level object.
class WasmInstanceObject : public JSObject {
public:
- // The 0-th field is used by the Blink Wrapper Tracer.
- // TODO(titzer): add the brand as an embedder field instead of a property.
- enum Fields {
- kWrapperTracerHeader,
- kCompiledModule,
- kMemoryObject,
- kMemoryArrayBuffer,
- kGlobalsArrayBuffer,
- kDebugInfo,
- kWasmMemInstanceWrapper,
+ DECL_CAST(WasmInstanceObject)
+
+ DECL_ACCESSORS(compiled_module, WasmCompiledModule)
+ DECL_OPTIONAL_ACCESSORS(memory_object, WasmMemoryObject)
+ DECL_OPTIONAL_ACCESSORS(memory_buffer, JSArrayBuffer)
+ DECL_OPTIONAL_ACCESSORS(globals_buffer, JSArrayBuffer)
+ DECL_OPTIONAL_ACCESSORS(debug_info, WasmDebugInfo)
+ // FixedArray of all instances whose code was imported
+ DECL_OPTIONAL_ACCESSORS(directly_called_instances, FixedArray)
+
+ enum { // --
+ kCompiledModuleIndex,
+ kMemoryObjectIndex,
+ kMemoryBufferIndex,
+ kGlobalsBufferIndex,
+ kDebugInfoIndex,
+ kDirectlyCalledInstancesIndex,
kFieldCount
};
- DECLARE_CASTS(WasmInstanceObject);
-
- DECLARE_ACCESSORS(compiled_module, WasmCompiledModule);
- DECLARE_OPTIONAL_ACCESSORS(globals_buffer, JSArrayBuffer);
- DECLARE_OPTIONAL_ACCESSORS(memory_buffer, JSArrayBuffer);
- DECLARE_OPTIONAL_ACCESSORS(memory_object, WasmMemoryObject);
- DECLARE_OPTIONAL_ACCESSORS(debug_info, WasmDebugInfo);
- DECLARE_OPTIONAL_ACCESSORS(instance_wrapper, WasmInstanceWrapper);
+ DEF_SIZE(JSObject)
+ DEF_OFFSET(CompiledModule)
+ DEF_OFFSET(MemoryObject)
+ DEF_OFFSET(MemoryBuffer)
+ DEF_OFFSET(GlobalsBuffer)
+ DEF_OFFSET(DebugInfo)
+ DEF_OFFSET(DirectlyCalledInstances)
WasmModuleObject* module_object();
- wasm::WasmModule* module();
+ V8_EXPORT_PRIVATE wasm::WasmModule* module();
// Get the debug info associated with the given wasm object.
// If no debug info exists yet, it is created automatically.
@@ -161,17 +191,15 @@ class WasmInstanceObject : public JSObject {
uint32_t GetMaxMemoryPages();
};
-// Representation of an exported WASM function.
+// A WASM function that is wrapped and exported to JavaScript.
class WasmExportedFunction : public JSFunction {
public:
- // The 0-th field is used by the Blink Wrapper Tracer.
- enum Fields { kWrapperTracerHeader, kInstance, kIndex, kFieldCount };
-
- DECLARE_CASTS(WasmExportedFunction);
-
WasmInstanceObject* instance();
int function_index();
+ static WasmExportedFunction* cast(Object* object);
+ static bool IsWasmExportedFunction(Object* object);
+
static Handle<WasmExportedFunction> New(Isolate* isolate,
Handle<WasmInstanceObject> instance,
MaybeHandle<String> maybe_name,
@@ -181,31 +209,33 @@ class WasmExportedFunction : public JSFunction {
// Information shared by all WasmCompiledModule objects for the same module.
class WasmSharedModuleData : public FixedArray {
- // The 0-th field is used by the Blink Wrapper Tracer.
- enum Fields {
- kWrapperTracerHeader,
- kModuleWrapper,
- kModuleBytes,
- kScript,
- kAsmJsOffsetTable,
- kBreakPointInfos,
- kLazyCompilationOrchestrator,
+ public:
+ DECL_OOL_QUERY(WasmSharedModuleData)
+ DECL_OOL_CAST(WasmSharedModuleData)
+
+ DECL_GETTER(module, wasm::WasmModule)
+ DECL_OPTIONAL_ACCESSORS(module_bytes, SeqOneByteString)
+ DECL_ACCESSORS(script, Script)
+ DECL_OPTIONAL_ACCESSORS(asm_js_offset_table, ByteArray)
+ DECL_OPTIONAL_ACCESSORS(breakpoint_infos, FixedArray)
+
+ enum { // --
+ kModuleWrapperIndex,
+ kModuleBytesIndex,
+ kScriptIndex,
+ kAsmJsOffsetTableIndex,
+ kBreakPointInfosIndex,
+ kLazyCompilationOrchestratorIndex,
kFieldCount
};
- public:
- DECLARE_CASTS(WasmSharedModuleData);
-
- DECLARE_GETTER(module, wasm::WasmModule);
- DECLARE_OPTIONAL_ACCESSORS(module_bytes, SeqOneByteString);
- DECLARE_GETTER(script, Script);
- DECLARE_OPTIONAL_ACCESSORS(asm_js_offset_table, ByteArray);
- DECLARE_OPTIONAL_GETTER(breakpoint_infos, FixedArray);
-
- static Handle<WasmSharedModuleData> New(
- Isolate* isolate, Handle<Foreign> module_wrapper,
- Handle<SeqOneByteString> module_bytes, Handle<Script> script,
- Handle<ByteArray> asm_js_offset_table);
+ DEF_SIZE(FixedArray)
+ DEF_OFFSET(ModuleWrapper)
+ DEF_OFFSET(ModuleBytes)
+ DEF_OFFSET(Script)
+ DEF_OFFSET(AsmJsOffsetTable)
+ DEF_OFFSET(BreakPointInfos)
+ DEF_OFFSET(LazyCompilationOrchestrator)
// Check whether this module was generated from asm.js source.
bool is_asm_js();
@@ -221,8 +251,13 @@ class WasmSharedModuleData : public FixedArray {
static void PrepareForLazyCompilation(Handle<WasmSharedModuleData>);
+ static Handle<WasmSharedModuleData> New(
+ Isolate* isolate, Handle<Foreign> module_wrapper,
+ Handle<SeqOneByteString> module_bytes, Handle<Script> script,
+ Handle<ByteArray> asm_js_offset_table);
+
private:
- DECLARE_OPTIONAL_GETTER(lazy_compilation_orchestrator, Foreign);
+ DECL_OPTIONAL_GETTER(lazy_compilation_orchestrator, Foreign)
friend class WasmCompiledModule;
};
@@ -252,7 +287,9 @@ class WasmSharedModuleData : public FixedArray {
// we embed them as objects, and they may move.
class WasmCompiledModule : public FixedArray {
public:
- enum Fields { kFieldCount };
+ enum { // --
+ kFieldCount
+ };
static WasmCompiledModule* cast(Object* fixed_array) {
SLOW_DCHECK(IsWasmCompiledModule(fixed_array));
@@ -300,13 +337,11 @@ class WasmCompiledModule : public FixedArray {
#define WCM_WASM_OBJECT(TYPE, NAME) \
WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME, TYPE::Is##TYPE(obj), private)
-#define WCM_SMALL_CONST_NUMBER(TYPE, NAME) \
- public: \
- TYPE NAME() const { \
- return static_cast<TYPE>(Smi::cast(get(kID_##NAME))->value()); \
- } \
- \
- private: \
+#define WCM_SMALL_CONST_NUMBER(TYPE, NAME) \
+ public: \
+ TYPE NAME() const { return static_cast<TYPE>(Smi::ToInt(get(kID_##NAME))); } \
+ \
+ private: \
void set_##NAME(TYPE value) { set(kID_##NAME, Smi::FromInt(value)); }
#define WCM_WEAK_LINK(TYPE, NAME) \
@@ -441,6 +476,10 @@ class WasmCompiledModule : public FixedArray {
static void ReinitializeAfterDeserialization(Isolate*,
Handle<WasmCompiledModule>);
+ // Get the module name, if set. Returns an empty handle otherwise.
+ static MaybeHandle<String> GetModuleNameOrNull(
+ Isolate* isolate, Handle<WasmCompiledModule> compiled_module);
+
// Get the function name of the function identified by the given index.
// Returns a null handle if the function is unnamed or the name is not a valid
// UTF-8 string.
@@ -494,7 +533,7 @@ class WasmCompiledModule : public FixedArray {
// string.
static MaybeHandle<String> ExtractUtf8StringFromModuleBytes(
Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
- uint32_t offset, uint32_t size);
+ wasm::WireBytesRef ref);
// Get a list of all possible breakpoints within a given range of this module.
bool GetPossibleBreakpoints(const debug::Location& start,
@@ -537,26 +576,34 @@ class WasmCompiledModule : public FixedArray {
class WasmDebugInfo : public FixedArray {
public:
- // The 0-th field is used by the Blink Wrapper Tracer.
- enum Fields {
- kWrapperTracerHeader,
- kInstance,
- kInterpreterHandle,
- kInterpretedFunctions,
+ DECL_OOL_QUERY(WasmDebugInfo)
+ DECL_OOL_CAST(WasmDebugInfo)
+
+ DECL_GETTER(wasm_instance, WasmInstanceObject)
+ DECL_OPTIONAL_ACCESSORS(locals_names, FixedArray)
+
+ enum {
+ kInstanceIndex, // instance object.
+ kInterpreterHandleIndex, // managed object containing the interpreter.
+ kInterpretedFunctionsIndex, // array of interpreter entry code objects.
+ kLocalsNamesIndex, // array of array of local names.
kFieldCount
};
+ DEF_SIZE(FixedArray)
+ DEF_OFFSET(Instance)
+ DEF_OFFSET(InterpreterHandle)
+ DEF_OFFSET(InterpretedFunctions)
+ DEF_OFFSET(LocalsNames)
+
static Handle<WasmDebugInfo> New(Handle<WasmInstanceObject>);
// Setup a WasmDebugInfo with an existing WasmInstance struct.
// Returns a pointer to the interpreter instantiated inside this
// WasmDebugInfo.
// Use for testing only.
- static wasm::WasmInterpreter* SetupForTesting(Handle<WasmInstanceObject>,
- wasm::WasmInstance*);
-
- static bool IsDebugInfo(Object*);
- static WasmDebugInfo* cast(Object*);
+ V8_EXPORT_PRIVATE static wasm::WasmInterpreter* SetupForTesting(
+ Handle<WasmInstanceObject>, wasm::WasmInstance*);
// Set a breakpoint in the given function at the given byte offset within that
// function. This will redirect all future calls to this function to the
@@ -594,8 +641,6 @@ class WasmDebugInfo : public FixedArray {
// Returns the number of calls / function frames executed in the interpreter.
uint64_t NumInterpretedCalls();
- DECLARE_GETTER(wasm_instance, WasmInstanceObject);
-
// Update the memory view of the interpreter after executing GrowMemory in
// compiled code.
void UpdateMemory(JSArrayBuffer* new_memory);
@@ -612,65 +657,76 @@ class WasmDebugInfo : public FixedArray {
int frame_index);
};
-class WasmInstanceWrapper : public FixedArray {
- public:
- static Handle<WasmInstanceWrapper> New(Isolate* isolate,
- Handle<WasmInstanceObject> instance);
- static WasmInstanceWrapper* cast(Object* fixed_array) {
- SLOW_DCHECK(IsWasmInstanceWrapper(fixed_array));
- return reinterpret_cast<WasmInstanceWrapper*>(fixed_array);
- }
- static bool IsWasmInstanceWrapper(Object* obj);
- bool has_instance() { return get(kWrapperInstanceObject)->IsWeakCell(); }
- Handle<WasmInstanceObject> instance_object() {
- Object* obj = get(kWrapperInstanceObject);
- DCHECK(obj->IsWeakCell());
- WeakCell* cell = WeakCell::cast(obj);
- DCHECK(cell->value()->IsJSObject());
- return handle(WasmInstanceObject::cast(cell->value()));
- }
- bool has_next() { return IsWasmInstanceWrapper(get(kNextInstanceWrapper)); }
- bool has_previous() {
- return IsWasmInstanceWrapper(get(kPreviousInstanceWrapper));
- }
- void set_next_wrapper(Object* obj) {
- DCHECK(IsWasmInstanceWrapper(obj));
- set(kNextInstanceWrapper, obj);
- }
- void set_previous_wrapper(Object* obj) {
- DCHECK(IsWasmInstanceWrapper(obj));
- set(kPreviousInstanceWrapper, obj);
- }
- Handle<WasmInstanceWrapper> next_wrapper() {
- Object* obj = get(kNextInstanceWrapper);
- DCHECK(IsWasmInstanceWrapper(obj));
- return handle(WasmInstanceWrapper::cast(obj));
- }
- Handle<WasmInstanceWrapper> previous_wrapper() {
- Object* obj = get(kPreviousInstanceWrapper);
- DCHECK(IsWasmInstanceWrapper(obj));
- return handle(WasmInstanceWrapper::cast(obj));
- }
- void reset_next_wrapper() { set_undefined(kNextInstanceWrapper); }
- void reset_previous_wrapper() { set_undefined(kPreviousInstanceWrapper); }
- void reset() {
- for (int kID = 0; kID < kWrapperPropertyCount; kID++) set_undefined(kID);
+// TODO(titzer): these should be moved to wasm-objects-inl.h
+CAST_ACCESSOR(WasmInstanceObject)
+CAST_ACCESSOR(WasmMemoryObject)
+CAST_ACCESSOR(WasmModuleObject)
+CAST_ACCESSOR(WasmTableObject)
+
+// WasmModuleObject
+ACCESSORS(WasmModuleObject, compiled_module, WasmCompiledModule,
+ kCompiledModuleOffset)
+
+// WasmTableObject
+ACCESSORS(WasmTableObject, functions, FixedArray, kFunctionsOffset)
+ACCESSORS(WasmTableObject, maximum_length, Object, kMaximumLengthOffset)
+ACCESSORS(WasmTableObject, dispatch_tables, FixedArray, kDispatchTablesOffset)
+
+// WasmMemoryObject
+ACCESSORS(WasmMemoryObject, array_buffer, JSArrayBuffer, kArrayBufferOffset)
+SMI_ACCESSORS(WasmMemoryObject, maximum_pages, kMaximumPagesOffset)
+ACCESSORS(WasmMemoryObject, instances, WeakFixedArray, kInstancesOffset)
+
+// WasmInstanceObject
+ACCESSORS(WasmInstanceObject, compiled_module, WasmCompiledModule,
+ kCompiledModuleOffset)
+ACCESSORS(WasmInstanceObject, memory_object, WasmMemoryObject,
+ kMemoryObjectOffset)
+ACCESSORS(WasmInstanceObject, memory_buffer, JSArrayBuffer, kMemoryBufferOffset)
+ACCESSORS(WasmInstanceObject, globals_buffer, JSArrayBuffer,
+ kGlobalsBufferOffset)
+ACCESSORS(WasmInstanceObject, debug_info, WasmDebugInfo, kDebugInfoOffset)
+ACCESSORS(WasmInstanceObject, directly_called_instances, FixedArray,
+ kDirectlyCalledInstancesOffset)
+
+// WasmSharedModuleData
+ACCESSORS(WasmSharedModuleData, module_bytes, SeqOneByteString,
+ kModuleBytesOffset)
+ACCESSORS(WasmSharedModuleData, script, Script, kScriptOffset)
+ACCESSORS(WasmSharedModuleData, asm_js_offset_table, ByteArray,
+ kAsmJsOffsetTableOffset)
+ACCESSORS(WasmSharedModuleData, breakpoint_infos, FixedArray,
+ kBreakPointInfosOffset)
+
+#define OPTIONAL_ACCESSOR(holder, name, offset) \
+ bool holder::has_##name() { \
+ return !READ_FIELD(this, offset)->IsUndefined(GetIsolate()); \
}
- private:
- enum {
- kWrapperInstanceObject,
- kNextInstanceWrapper,
- kPreviousInstanceWrapper,
- kWrapperPropertyCount
- };
-};
+OPTIONAL_ACCESSOR(WasmInstanceObject, debug_info, kDebugInfoOffset)
+OPTIONAL_ACCESSOR(WasmInstanceObject, memory_buffer, kMemoryBufferOffset)
+OPTIONAL_ACCESSOR(WasmInstanceObject, memory_object, kMemoryObjectOffset)
+
+OPTIONAL_ACCESSOR(WasmMemoryObject, instances, kInstancesOffset)
+
+OPTIONAL_ACCESSOR(WasmSharedModuleData, breakpoint_infos,
+ kBreakPointInfosOffset)
+OPTIONAL_ACCESSOR(WasmSharedModuleData, asm_js_offset_table,
+ kAsmJsOffsetTableOffset)
+OPTIONAL_ACCESSOR(WasmSharedModuleData, lazy_compilation_orchestrator,
+ kLazyCompilationOrchestratorOffset)
+
+ACCESSORS(WasmDebugInfo, locals_names, FixedArray, kLocalsNamesOffset)
+
+OPTIONAL_ACCESSOR(WasmDebugInfo, locals_names, kLocalsNamesOffset)
+
+#undef DECL_OOL_QUERY
+#undef DECL_OOL_CAST
+#undef DECL_GETTER
+#undef DECL_OPTIONAL_ACCESSORS
+#undef DECL_OPTIONAL_GETTER
-#undef DECLARE_CASTS
-#undef DECLARE_GETTER
-#undef DECLARE_ACCESSORS
-#undef DECLARE_OPTIONAL_ACCESSORS
-#undef DECLARE_OPTIONAL_GETTER
+#include "src/objects/object-macros-undef.h"
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index 355cdf40b5..10bc69dfb2 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -3,6 +3,10 @@
// found in the LICENSE file.
#include "src/wasm/wasm-opcodes.h"
+
+#include <array>
+
+#include "src/base/template-utils.h"
#include "src/messages.h"
#include "src/runtime/runtime.h"
#include "src/signature.h"
@@ -39,6 +43,7 @@ namespace wasm {
CASE_I32x4_OP(name, str) CASE_I16x8_OP(name, str) CASE_I8x16_OP(name, str)
#define CASE_SIGN_OP(TYPE, name, str) \
CASE_##TYPE##_OP(name##S, str "_s") CASE_##TYPE##_OP(name##U, str "_u")
+#define CASE_UNSIGNED_OP(TYPE, name, str) CASE_##TYPE##_OP(name##U, str "_u")
#define CASE_ALL_SIGN_OP(name, str) \
CASE_FLOAT_OP(name, str) CASE_SIGN_OP(INT, name, str)
#define CASE_CONVERT_OP(name, RES, SRC, src_suffix, str) \
@@ -48,6 +53,10 @@ namespace wasm {
CASE_SIGN_OP(I32, name##8, str "8") \
CASE_SIGN_OP(I32, name##16, str "16") \
CASE_I32_OP(name, str "32")
+#define CASE_U32_OP(name, str) \
+ CASE_I32_OP(name, str "32") \
+ CASE_UNSIGNED_OP(I32, name##8, str "8") \
+ CASE_UNSIGNED_OP(I32, name##16, str "16")
const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
switch (opcode) {
@@ -137,7 +146,9 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
// Non-standard opcodes.
CASE_OP(Try, "try")
CASE_OP(Throw, "throw")
+ CASE_OP(Rethrow, "rethrow")
CASE_OP(Catch, "catch")
+ CASE_OP(CatchAll, "catch_all")
// asm.js-only opcodes.
CASE_F64_OP(Acos, "acos")
@@ -214,39 +225,23 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_S128_OP(Or, "or")
CASE_S128_OP(Xor, "xor")
CASE_S128_OP(Not, "not")
- CASE_S32x4_OP(Shuffle, "shuffle")
- CASE_S32x4_OP(Select, "select")
- CASE_S16x8_OP(Shuffle, "shuffle")
- CASE_S16x8_OP(Select, "select")
+ CASE_S128_OP(Select, "select")
CASE_S8x16_OP(Shuffle, "shuffle")
- CASE_S8x16_OP(Select, "select")
- CASE_S1x4_OP(And, "and")
- CASE_S1x4_OP(Or, "or")
- CASE_S1x4_OP(Xor, "xor")
- CASE_S1x4_OP(Not, "not")
CASE_S1x4_OP(AnyTrue, "any_true")
CASE_S1x4_OP(AllTrue, "all_true")
- CASE_S1x8_OP(And, "and")
- CASE_S1x8_OP(Or, "or")
- CASE_S1x8_OP(Xor, "xor")
- CASE_S1x8_OP(Not, "not")
CASE_S1x8_OP(AnyTrue, "any_true")
CASE_S1x8_OP(AllTrue, "all_true")
- CASE_S1x16_OP(And, "and")
- CASE_S1x16_OP(Or, "or")
- CASE_S1x16_OP(Xor, "xor")
- CASE_S1x16_OP(Not, "not")
CASE_S1x16_OP(AnyTrue, "any_true")
CASE_S1x16_OP(AllTrue, "all_true")
// Atomic operations.
- CASE_L32_OP(AtomicAdd, "atomic_add")
- CASE_L32_OP(AtomicAnd, "atomic_and")
- CASE_L32_OP(AtomicCompareExchange, "atomic_cmpxchng")
- CASE_L32_OP(AtomicExchange, "atomic_xchng")
- CASE_L32_OP(AtomicOr, "atomic_or")
- CASE_L32_OP(AtomicSub, "atomic_sub")
- CASE_L32_OP(AtomicXor, "atomic_xor")
+ CASE_U32_OP(AtomicAdd, "atomic_add")
+ CASE_U32_OP(AtomicSub, "atomic_sub")
+ CASE_U32_OP(AtomicAnd, "atomic_and")
+ CASE_U32_OP(AtomicOr, "atomic_or")
+ CASE_U32_OP(AtomicXor, "atomic_xor")
+ CASE_U32_OP(AtomicExchange, "atomic_xchng")
+ CASE_U32_OP(AtomicCompareExchange, "atomic_cmpxchng")
default : return "unknown";
// clang-format on
@@ -255,11 +250,10 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
bool WasmOpcodes::IsPrefixOpcode(WasmOpcode opcode) {
switch (opcode) {
-#define CHECK_PREFIX(name, opcode) \
- case k##name##Prefix: \
- return true;
+#define CHECK_PREFIX(name, opcode) case k##name##Prefix:
FOREACH_PREFIX(CHECK_PREFIX)
#undef CHECK_PREFIX
+ return true;
default:
return false;
}
@@ -267,11 +261,10 @@ bool WasmOpcodes::IsPrefixOpcode(WasmOpcode opcode) {
bool WasmOpcodes::IsControlOpcode(WasmOpcode opcode) {
switch (opcode) {
-#define CHECK_OPCODE(name, opcode, _) \
- case kExpr##name: \
- return true;
+#define CHECK_OPCODE(name, opcode, _) case kExpr##name:
FOREACH_CONTROL_OPCODE(CHECK_OPCODE)
#undef CHECK_OPCODE
+ return true;
default:
return false;
}
@@ -309,106 +302,103 @@ bool IsJSCompatibleSignature(const FunctionSig* sig) {
return true;
}
+namespace {
+
#define DECLARE_SIG_ENUM(name, ...) kSigEnum_##name,
-enum WasmOpcodeSig { FOREACH_SIGNATURE(DECLARE_SIG_ENUM) };
+enum WasmOpcodeSig : byte {
+ kSigEnum_None,
+ FOREACH_SIGNATURE(DECLARE_SIG_ENUM)
+};
-// TODO(titzer): not static-initializer safe. Wrap in LazyInstance.
-#define DECLARE_SIG(name, ...) \
- static ValueType kTypes_##name[] = {__VA_ARGS__}; \
- static const FunctionSig kSig_##name( \
+#define DECLARE_SIG(name, ...) \
+ constexpr ValueType kTypes_##name[] = {__VA_ARGS__}; \
+ constexpr FunctionSig kSig_##name( \
1, static_cast<int>(arraysize(kTypes_##name)) - 1, kTypes_##name);
FOREACH_SIGNATURE(DECLARE_SIG)
#define DECLARE_SIG_ENTRY(name, ...) &kSig_##name,
-static const FunctionSig* kSimpleExprSigs[] = {
+constexpr const FunctionSig* kSimpleExprSigs[] = {
nullptr, FOREACH_SIGNATURE(DECLARE_SIG_ENTRY)};
-#define DECLARE_SIMD_SIG_ENTRY(name, ...) &kSig_##name,
-
-static const FunctionSig* kSimdExprSigs[] = {
- nullptr, FOREACH_SIMD_SIGNATURE(DECLARE_SIMD_SIG_ENTRY)};
-
-static byte kSimpleExprSigTable[256];
-static byte kSimpleAsmjsExprSigTable[256];
-static byte kSimdExprSigTable[256];
-static byte kAtomicExprSigTable[256];
-
-// Initialize the signature table.
-static void InitSigTables() {
-#define SET_SIG_TABLE(name, opcode, sig) \
- kSimpleExprSigTable[opcode] = static_cast<int>(kSigEnum_##sig) + 1;
- FOREACH_SIMPLE_OPCODE(SET_SIG_TABLE);
-#undef SET_SIG_TABLE
-#define SET_ASMJS_SIG_TABLE(name, opcode, sig) \
- kSimpleAsmjsExprSigTable[opcode] = static_cast<int>(kSigEnum_##sig) + 1;
- FOREACH_ASMJS_COMPAT_OPCODE(SET_ASMJS_SIG_TABLE);
-#undef SET_ASMJS_SIG_TABLE
- byte simd_index;
-#define SET_SIG_TABLE(name, opcode, sig) \
- simd_index = opcode & 0xff; \
- kSimdExprSigTable[simd_index] = static_cast<int>(kSigEnum_##sig) + 1;
- FOREACH_SIMD_0_OPERAND_OPCODE(SET_SIG_TABLE)
-#undef SET_SIG_TABLE
- byte atomic_index;
-#define SET_ATOMIC_SIG_TABLE(name, opcode, sig) \
- atomic_index = opcode & 0xff; \
- kAtomicExprSigTable[atomic_index] = static_cast<int>(kSigEnum_##sig) + 1;
- FOREACH_ATOMIC_OPCODE(SET_ATOMIC_SIG_TABLE)
-#undef SET_ATOMIC_SIG_TABLE
+// The following constexpr functions are used to initialize the constant arrays
+// defined below. They must have exactly one return statement, and no switch.
+constexpr WasmOpcodeSig GetOpcodeSigIndex(byte opcode) {
+ return
+#define CASE(name, opc, sig) opcode == opc ? kSigEnum_##sig:
+ FOREACH_SIMPLE_OPCODE(CASE)
+#undef CASE
+ kSigEnum_None;
}
-class SigTable {
- public:
- SigTable() {
- // TODO(ahaas): Move {InitSigTable} into the class.
- InitSigTables();
- }
- FunctionSig* Signature(WasmOpcode opcode) const {
- return const_cast<FunctionSig*>(
- kSimpleExprSigs[kSimpleExprSigTable[static_cast<byte>(opcode)]]);
- }
- FunctionSig* AsmjsSignature(WasmOpcode opcode) const {
- return const_cast<FunctionSig*>(
- kSimpleExprSigs[kSimpleAsmjsExprSigTable[static_cast<byte>(opcode)]]);
- }
- FunctionSig* SimdSignature(WasmOpcode opcode) const {
- return const_cast<FunctionSig*>(
- kSimdExprSigs[kSimdExprSigTable[static_cast<byte>(opcode & 0xff)]]);
- }
- FunctionSig* AtomicSignature(WasmOpcode opcode) const {
- return const_cast<FunctionSig*>(
- kSimpleExprSigs[kAtomicExprSigTable[static_cast<byte>(opcode & 0xff)]]);
- }
-};
+constexpr WasmOpcodeSig GetAsmJsOpcodeSigIndex(byte opcode) {
+ return
+#define CASE(name, opc, sig) opcode == opc ? kSigEnum_##sig:
+ FOREACH_ASMJS_COMPAT_OPCODE(CASE)
+#undef CASE
+ kSigEnum_None;
+}
+
+constexpr WasmOpcodeSig GetSimdOpcodeSigIndex(byte opcode) {
+ return
+#define CASE(name, opc, sig) opcode == (opc & 0xff) ? kSigEnum_##sig:
+ FOREACH_SIMD_0_OPERAND_OPCODE(CASE)
+#undef CASE
+ kSigEnum_None;
+}
+
+constexpr WasmOpcodeSig GetAtomicOpcodeSigIndex(byte opcode) {
+ return
+#define CASE(name, opc, sig) opcode == (opc & 0xff) ? kSigEnum_##sig:
+ FOREACH_ATOMIC_OPCODE(CASE)
+#undef CASE
+ kSigEnum_None;
+}
+
+// gcc 4.7 - 4.9 have a bug which prohibits marking the array constexpr
+// (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=52892).
+// TODO(clemensh): Remove this once we require gcc >= 5.0.
+#if defined(__GNUC__) && !defined(__clang__) && __GNUC__ == 4
+#define CONSTEXPR_IF_NOT_GCC_4
+#else
+#define CONSTEXPR_IF_NOT_GCC_4 constexpr
+#endif
-static base::LazyInstance<SigTable>::type sig_table = LAZY_INSTANCE_INITIALIZER;
+CONSTEXPR_IF_NOT_GCC_4 std::array<WasmOpcodeSig, 256> kSimpleExprSigTable =
+ base::make_array<256>(GetOpcodeSigIndex);
+CONSTEXPR_IF_NOT_GCC_4 std::array<WasmOpcodeSig, 256> kSimpleAsmjsExprSigTable =
+ base::make_array<256>(GetAsmJsOpcodeSigIndex);
+CONSTEXPR_IF_NOT_GCC_4 std::array<WasmOpcodeSig, 256> kSimdExprSigTable =
+ base::make_array<256>(GetSimdOpcodeSigIndex);
+CONSTEXPR_IF_NOT_GCC_4 std::array<WasmOpcodeSig, 256> kAtomicExprSigTable =
+ base::make_array<256>(GetAtomicOpcodeSigIndex);
+
+} // namespace
FunctionSig* WasmOpcodes::Signature(WasmOpcode opcode) {
if (opcode >> 8 == kSimdPrefix) {
- return sig_table.Get().SimdSignature(opcode);
+ return const_cast<FunctionSig*>(
+ kSimpleExprSigs[kSimdExprSigTable[opcode & 0xff]]);
} else {
- return sig_table.Get().Signature(opcode);
+ DCHECK_GT(kSimpleExprSigTable.size(), opcode);
+ return const_cast<FunctionSig*>(
+ kSimpleExprSigs[kSimpleExprSigTable[opcode]]);
}
}
FunctionSig* WasmOpcodes::AsmjsSignature(WasmOpcode opcode) {
- return sig_table.Get().AsmjsSignature(opcode);
+ DCHECK_GT(kSimpleAsmjsExprSigTable.size(), opcode);
+ return const_cast<FunctionSig*>(
+ kSimpleExprSigs[kSimpleAsmjsExprSigTable[opcode]]);
}
FunctionSig* WasmOpcodes::AtomicSignature(WasmOpcode opcode) {
- return sig_table.Get().AtomicSignature(opcode);
+ return const_cast<FunctionSig*>(
+ kSimpleExprSigs[kAtomicExprSigTable[opcode & 0xff]]);
}
-// TODO(titzer): pull WASM_64 up to a common header.
-#if !V8_TARGET_ARCH_32_BIT || V8_TARGET_ARCH_X64
-#define WASM_64 1
-#else
-#define WASM_64 0
-#endif
-
int WasmOpcodes::TrapReasonToMessageId(TrapReason reason) {
switch (reason) {
#define TRAPREASON_TO_MESSAGE(name) \
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index a1a84366a2..7405a83ec8 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -21,10 +21,7 @@ enum ValueTypeCode {
kLocalI64 = 0x7e,
kLocalF32 = 0x7d,
kLocalF64 = 0x7c,
- kLocalS128 = 0x7b,
- kLocalS1x4 = 0x7a,
- kLocalS1x8 = 0x79,
- kLocalS1x16 = 0x78
+ kLocalS128 = 0x7b
};
// Type code for multi-value block types.
@@ -39,9 +36,6 @@ constexpr ValueType kWasmI64 = MachineRepresentation::kWord64;
constexpr ValueType kWasmF32 = MachineRepresentation::kFloat32;
constexpr ValueType kWasmF64 = MachineRepresentation::kFloat64;
constexpr ValueType kWasmS128 = MachineRepresentation::kSimd128;
-constexpr ValueType kWasmS1x4 = MachineRepresentation::kSimd1x4;
-constexpr ValueType kWasmS1x8 = MachineRepresentation::kSimd1x8;
-constexpr ValueType kWasmS1x16 = MachineRepresentation::kSimd1x16;
constexpr ValueType kWasmVar = MachineRepresentation::kTagged;
using FunctionSig = Signature<ValueType>;
@@ -54,20 +48,22 @@ using WasmCodePosition = int;
constexpr WasmCodePosition kNoCodePosition = -1;
// Control expressions and blocks.
-#define FOREACH_CONTROL_OPCODE(V) \
- V(Unreachable, 0x00, _) \
- V(Nop, 0x01, _) \
- V(Block, 0x02, _) \
- V(Loop, 0x03, _) \
- V(If, 0x004, _) \
- V(Else, 0x05, _) \
- V(Try, 0x06, _ /* eh_prototype */) \
- V(Catch, 0x07, _ /* eh_prototype */) \
- V(Throw, 0x08, _ /* eh_prototype */) \
- V(End, 0x0b, _) \
- V(Br, 0x0c, _) \
- V(BrIf, 0x0d, _) \
- V(BrTable, 0x0e, _) \
+#define FOREACH_CONTROL_OPCODE(V) \
+ V(Unreachable, 0x00, _) \
+ V(Nop, 0x01, _) \
+ V(Block, 0x02, _) \
+ V(Loop, 0x03, _) \
+ V(If, 0x004, _) \
+ V(Else, 0x05, _) \
+ V(Try, 0x06, _ /* eh_prototype */) \
+ V(Catch, 0x07, _ /* eh_prototype */) \
+ V(Throw, 0x08, _ /* eh_prototype */) \
+ V(Rethrow, 0x09, _ /* eh_prototype */) \
+ V(CatchAll, 0x0a, _ /* eh prototype */) \
+ V(End, 0x0b, _) \
+ V(Br, 0x0c, _) \
+ V(BrIf, 0x0d, _) \
+ V(BrTable, 0x0e, _) \
V(Return, 0x0f, _)
// Constants, locals, globals, and calls.
@@ -101,8 +97,7 @@ constexpr WasmCodePosition kNoCodePosition = -1;
V(I64LoadMem16S, 0x32, l_i) \
V(I64LoadMem16U, 0x33, l_i) \
V(I64LoadMem32S, 0x34, l_i) \
- V(I64LoadMem32U, 0x35, l_i) \
- V(S128LoadMem, 0xc0, s_i)
+ V(I64LoadMem32U, 0x35, l_i)
// Store memory expressions.
#define FOREACH_STORE_MEM_OPCODE(V) \
@@ -114,8 +109,7 @@ constexpr WasmCodePosition kNoCodePosition = -1;
V(I32StoreMem16, 0x3b, i_ii) \
V(I64StoreMem8, 0x3c, l_il) \
V(I64StoreMem16, 0x3d, l_il) \
- V(I64StoreMem32, 0x3e, l_il) \
- V(S128StoreMem, 0xc1, s_is)
+ V(I64StoreMem32, 0x3e, l_il)
// Miscellaneous memory expressions
#define FOREACH_MISC_MEM_OPCODE(V) \
@@ -283,192 +277,165 @@ constexpr WasmCodePosition kNoCodePosition = -1;
V(I32AsmjsUConvertF64, 0xe3, i_d)
#define FOREACH_SIMD_0_OPERAND_OPCODE(V) \
- V(F32x4Splat, 0xe500, s_f) \
- V(F32x4Abs, 0xe503, s_s) \
- V(F32x4Neg, 0xe504, s_s) \
- V(F32x4RecipApprox, 0xe506, s_s) \
- V(F32x4RecipSqrtApprox, 0xe507, s_s) \
- V(F32x4Add, 0xe508, s_ss) \
- V(F32x4AddHoriz, 0xe5b9, s_ss) \
- V(F32x4Sub, 0xe509, s_ss) \
- V(F32x4Mul, 0xe50a, s_ss) \
- V(F32x4Min, 0xe50c, s_ss) \
- V(F32x4Max, 0xe50d, s_ss) \
- V(F32x4Eq, 0xe510, s1x4_ss) \
- V(F32x4Ne, 0xe511, s1x4_ss) \
- V(F32x4Lt, 0xe512, s1x4_ss) \
- V(F32x4Le, 0xe513, s1x4_ss) \
- V(F32x4Gt, 0xe514, s1x4_ss) \
- V(F32x4Ge, 0xe515, s1x4_ss) \
- V(F32x4SConvertI32x4, 0xe519, s_s) \
- V(F32x4UConvertI32x4, 0xe51a, s_s) \
- V(I32x4Splat, 0xe51b, s_i) \
- V(I32x4Neg, 0xe51e, s_s) \
- V(I32x4Add, 0xe51f, s_ss) \
- V(I32x4AddHoriz, 0xe5ba, s_ss) \
- V(I32x4Sub, 0xe520, s_ss) \
- V(I32x4Mul, 0xe521, s_ss) \
- V(I32x4MinS, 0xe522, s_ss) \
- V(I32x4MaxS, 0xe523, s_ss) \
- V(I32x4Eq, 0xe526, s1x4_ss) \
- V(I32x4Ne, 0xe527, s1x4_ss) \
- V(I32x4LtS, 0xe528, s1x4_ss) \
- V(I32x4LeS, 0xe529, s1x4_ss) \
- V(I32x4GtS, 0xe52a, s1x4_ss) \
- V(I32x4GeS, 0xe52b, s1x4_ss) \
- V(I32x4SConvertF32x4, 0xe52f, s_s) \
- V(I32x4UConvertF32x4, 0xe537, s_s) \
- V(I32x4SConvertI16x8Low, 0xe594, s_s) \
- V(I32x4SConvertI16x8High, 0xe595, s_s) \
- V(I32x4UConvertI16x8Low, 0xe596, s_s) \
- V(I32x4UConvertI16x8High, 0xe597, s_s) \
- V(I32x4MinU, 0xe530, s_ss) \
- V(I32x4MaxU, 0xe531, s_ss) \
- V(I32x4LtU, 0xe533, s1x4_ss) \
- V(I32x4LeU, 0xe534, s1x4_ss) \
- V(I32x4GtU, 0xe535, s1x4_ss) \
- V(I32x4GeU, 0xe536, s1x4_ss) \
- V(I16x8Splat, 0xe538, s_i) \
- V(I16x8Neg, 0xe53b, s_s) \
- V(I16x8Add, 0xe53c, s_ss) \
- V(I16x8AddSaturateS, 0xe53d, s_ss) \
- V(I16x8AddHoriz, 0xe5bb, s_ss) \
- V(I16x8Sub, 0xe53e, s_ss) \
- V(I16x8SubSaturateS, 0xe53f, s_ss) \
- V(I16x8Mul, 0xe540, s_ss) \
- V(I16x8MinS, 0xe541, s_ss) \
- V(I16x8MaxS, 0xe542, s_ss) \
- V(I16x8Eq, 0xe545, s1x8_ss) \
- V(I16x8Ne, 0xe546, s1x8_ss) \
- V(I16x8LtS, 0xe547, s1x8_ss) \
- V(I16x8LeS, 0xe548, s1x8_ss) \
- V(I16x8GtS, 0xe549, s1x8_ss) \
- V(I16x8GeS, 0xe54a, s1x8_ss) \
- V(I16x8AddSaturateU, 0xe54e, s_ss) \
- V(I16x8SubSaturateU, 0xe54f, s_ss) \
- V(I16x8MinU, 0xe550, s_ss) \
- V(I16x8MaxU, 0xe551, s_ss) \
- V(I16x8LtU, 0xe553, s1x8_ss) \
- V(I16x8LeU, 0xe554, s1x8_ss) \
- V(I16x8GtU, 0xe555, s1x8_ss) \
- V(I16x8GeU, 0xe556, s1x8_ss) \
- V(I16x8SConvertI32x4, 0xe598, s_ss) \
- V(I16x8UConvertI32x4, 0xe599, s_ss) \
- V(I16x8SConvertI8x16Low, 0xe59a, s_s) \
- V(I16x8SConvertI8x16High, 0xe59b, s_s) \
- V(I16x8UConvertI8x16Low, 0xe59c, s_s) \
- V(I16x8UConvertI8x16High, 0xe59d, s_s) \
- V(I8x16Splat, 0xe557, s_i) \
- V(I8x16Neg, 0xe55a, s_s) \
- V(I8x16Add, 0xe55b, s_ss) \
- V(I8x16AddSaturateS, 0xe55c, s_ss) \
- V(I8x16Sub, 0xe55d, s_ss) \
- V(I8x16SubSaturateS, 0xe55e, s_ss) \
- V(I8x16Mul, 0xe55f, s_ss) \
- V(I8x16MinS, 0xe560, s_ss) \
- V(I8x16MaxS, 0xe561, s_ss) \
- V(I8x16Eq, 0xe564, s1x16_ss) \
- V(I8x16Ne, 0xe565, s1x16_ss) \
- V(I8x16LtS, 0xe566, s1x16_ss) \
- V(I8x16LeS, 0xe567, s1x16_ss) \
- V(I8x16GtS, 0xe568, s1x16_ss) \
- V(I8x16GeS, 0xe569, s1x16_ss) \
- V(I8x16AddSaturateU, 0xe56d, s_ss) \
- V(I8x16SubSaturateU, 0xe56e, s_ss) \
- V(I8x16MinU, 0xe56f, s_ss) \
- V(I8x16MaxU, 0xe570, s_ss) \
- V(I8x16LtU, 0xe572, s1x16_ss) \
- V(I8x16LeU, 0xe573, s1x16_ss) \
- V(I8x16GtU, 0xe574, s1x16_ss) \
- V(I8x16GeU, 0xe575, s1x16_ss) \
- V(I8x16SConvertI16x8, 0xe59e, s_ss) \
- V(I8x16UConvertI16x8, 0xe59f, s_ss) \
- V(S128And, 0xe576, s_ss) \
- V(S128Or, 0xe577, s_ss) \
- V(S128Xor, 0xe578, s_ss) \
- V(S128Not, 0xe579, s_s) \
- V(S32x4Select, 0xe52c, s_s1x4ss) \
- V(S16x8Select, 0xe54b, s_s1x8ss) \
- V(S8x16Select, 0xe56a, s_s1x16ss) \
- V(S1x4And, 0xe580, s1x4_s1x4s1x4) \
- V(S1x4Or, 0xe581, s1x4_s1x4s1x4) \
- V(S1x4Xor, 0xe582, s1x4_s1x4s1x4) \
- V(S1x4Not, 0xe583, s1x4_s1x4) \
- V(S1x4AnyTrue, 0xe584, i_s1x4) \
- V(S1x4AllTrue, 0xe585, i_s1x4) \
- V(S1x8And, 0xe586, s1x8_s1x8s1x8) \
- V(S1x8Or, 0xe587, s1x8_s1x8s1x8) \
- V(S1x8Xor, 0xe588, s1x8_s1x8s1x8) \
- V(S1x8Not, 0xe589, s1x8_s1x8) \
- V(S1x8AnyTrue, 0xe58a, i_s1x8) \
- V(S1x8AllTrue, 0xe58b, i_s1x8) \
- V(S1x16And, 0xe58c, s1x16_s1x16s1x16) \
- V(S1x16Or, 0xe58d, s1x16_s1x16s1x16) \
- V(S1x16Xor, 0xe58e, s1x16_s1x16s1x16) \
- V(S1x16Not, 0xe58f, s1x16_s1x16) \
- V(S1x16AnyTrue, 0xe590, i_s1x16) \
- V(S1x16AllTrue, 0xe591, i_s1x16)
+ V(F32x4Splat, 0xfd00, s_f) \
+ V(F32x4Abs, 0xfd03, s_s) \
+ V(F32x4Neg, 0xfd04, s_s) \
+ V(F32x4RecipApprox, 0xfd06, s_s) \
+ V(F32x4RecipSqrtApprox, 0xfd07, s_s) \
+ V(F32x4Add, 0xfd08, s_ss) \
+ V(F32x4AddHoriz, 0xfdb9, s_ss) \
+ V(F32x4Sub, 0xfd09, s_ss) \
+ V(F32x4Mul, 0xfd0a, s_ss) \
+ V(F32x4Min, 0xfd0c, s_ss) \
+ V(F32x4Max, 0xfd0d, s_ss) \
+ V(F32x4Eq, 0xfd10, s_ss) \
+ V(F32x4Ne, 0xfd11, s_ss) \
+ V(F32x4Lt, 0xfd12, s_ss) \
+ V(F32x4Le, 0xfd13, s_ss) \
+ V(F32x4Gt, 0xfd14, s_ss) \
+ V(F32x4Ge, 0xfd15, s_ss) \
+ V(F32x4SConvertI32x4, 0xfd19, s_s) \
+ V(F32x4UConvertI32x4, 0xfd1a, s_s) \
+ V(I32x4Splat, 0xfd1b, s_i) \
+ V(I32x4Neg, 0xfd1e, s_s) \
+ V(I32x4Add, 0xfd1f, s_ss) \
+ V(I32x4AddHoriz, 0xfdba, s_ss) \
+ V(I32x4Sub, 0xfd20, s_ss) \
+ V(I32x4Mul, 0xfd21, s_ss) \
+ V(I32x4MinS, 0xfd22, s_ss) \
+ V(I32x4MaxS, 0xfd23, s_ss) \
+ V(I32x4Eq, 0xfd26, s_ss) \
+ V(I32x4Ne, 0xfd27, s_ss) \
+ V(I32x4LtS, 0xfd28, s_ss) \
+ V(I32x4LeS, 0xfd29, s_ss) \
+ V(I32x4GtS, 0xfd2a, s_ss) \
+ V(I32x4GeS, 0xfd2b, s_ss) \
+ V(I32x4SConvertF32x4, 0xfd2f, s_s) \
+ V(I32x4UConvertF32x4, 0xfd37, s_s) \
+ V(I32x4SConvertI16x8Low, 0xfd94, s_s) \
+ V(I32x4SConvertI16x8High, 0xfd95, s_s) \
+ V(I32x4UConvertI16x8Low, 0xfd96, s_s) \
+ V(I32x4UConvertI16x8High, 0xfd97, s_s) \
+ V(I32x4MinU, 0xfd30, s_ss) \
+ V(I32x4MaxU, 0xfd31, s_ss) \
+ V(I32x4LtU, 0xfd33, s_ss) \
+ V(I32x4LeU, 0xfd34, s_ss) \
+ V(I32x4GtU, 0xfd35, s_ss) \
+ V(I32x4GeU, 0xfd36, s_ss) \
+ V(I16x8Splat, 0xfd38, s_i) \
+ V(I16x8Neg, 0xfd3b, s_s) \
+ V(I16x8Add, 0xfd3c, s_ss) \
+ V(I16x8AddSaturateS, 0xfd3d, s_ss) \
+ V(I16x8AddHoriz, 0xfdbb, s_ss) \
+ V(I16x8Sub, 0xfd3e, s_ss) \
+ V(I16x8SubSaturateS, 0xfd3f, s_ss) \
+ V(I16x8Mul, 0xfd40, s_ss) \
+ V(I16x8MinS, 0xfd41, s_ss) \
+ V(I16x8MaxS, 0xfd42, s_ss) \
+ V(I16x8Eq, 0xfd45, s_ss) \
+ V(I16x8Ne, 0xfd46, s_ss) \
+ V(I16x8LtS, 0xfd47, s_ss) \
+ V(I16x8LeS, 0xfd48, s_ss) \
+ V(I16x8GtS, 0xfd49, s_ss) \
+ V(I16x8GeS, 0xfd4a, s_ss) \
+ V(I16x8AddSaturateU, 0xfd4e, s_ss) \
+ V(I16x8SubSaturateU, 0xfd4f, s_ss) \
+ V(I16x8MinU, 0xfd50, s_ss) \
+ V(I16x8MaxU, 0xfd51, s_ss) \
+ V(I16x8LtU, 0xfd53, s_ss) \
+ V(I16x8LeU, 0xfd54, s_ss) \
+ V(I16x8GtU, 0xfd55, s_ss) \
+ V(I16x8GeU, 0xfd56, s_ss) \
+ V(I16x8SConvertI32x4, 0xfd98, s_ss) \
+ V(I16x8UConvertI32x4, 0xfd99, s_ss) \
+ V(I16x8SConvertI8x16Low, 0xfd9a, s_s) \
+ V(I16x8SConvertI8x16High, 0xfd9b, s_s) \
+ V(I16x8UConvertI8x16Low, 0xfd9c, s_s) \
+ V(I16x8UConvertI8x16High, 0xfd9d, s_s) \
+ V(I8x16Splat, 0xfd57, s_i) \
+ V(I8x16Neg, 0xfd5a, s_s) \
+ V(I8x16Add, 0xfd5b, s_ss) \
+ V(I8x16AddSaturateS, 0xfd5c, s_ss) \
+ V(I8x16Sub, 0xfd5d, s_ss) \
+ V(I8x16SubSaturateS, 0xfd5e, s_ss) \
+ V(I8x16Mul, 0xfd5f, s_ss) \
+ V(I8x16MinS, 0xfd60, s_ss) \
+ V(I8x16MaxS, 0xfd61, s_ss) \
+ V(I8x16Eq, 0xfd64, s_ss) \
+ V(I8x16Ne, 0xfd65, s_ss) \
+ V(I8x16LtS, 0xfd66, s_ss) \
+ V(I8x16LeS, 0xfd67, s_ss) \
+ V(I8x16GtS, 0xfd68, s_ss) \
+ V(I8x16GeS, 0xfd69, s_ss) \
+ V(I8x16AddSaturateU, 0xfd6d, s_ss) \
+ V(I8x16SubSaturateU, 0xfd6e, s_ss) \
+ V(I8x16MinU, 0xfd6f, s_ss) \
+ V(I8x16MaxU, 0xfd70, s_ss) \
+ V(I8x16LtU, 0xfd72, s_ss) \
+ V(I8x16LeU, 0xfd73, s_ss) \
+ V(I8x16GtU, 0xfd74, s_ss) \
+ V(I8x16GeU, 0xfd75, s_ss) \
+ V(I8x16SConvertI16x8, 0xfd9e, s_ss) \
+ V(I8x16UConvertI16x8, 0xfd9f, s_ss) \
+ V(S128And, 0xfd76, s_ss) \
+ V(S128Or, 0xfd77, s_ss) \
+ V(S128Xor, 0xfd78, s_ss) \
+ V(S128Not, 0xfd79, s_s) \
+ V(S128Select, 0xfd2c, s_sss) \
+ V(S1x4AnyTrue, 0xfd84, i_s) \
+ V(S1x4AllTrue, 0xfd85, i_s) \
+ V(S1x8AnyTrue, 0xfd8a, i_s) \
+ V(S1x8AllTrue, 0xfd8b, i_s) \
+ V(S1x16AnyTrue, 0xfd90, i_s) \
+ V(S1x16AllTrue, 0xfd91, i_s)
#define FOREACH_SIMD_1_OPERAND_OPCODE(V) \
- V(F32x4ExtractLane, 0xe501, _) \
- V(F32x4ReplaceLane, 0xe502, _) \
- V(I32x4ExtractLane, 0xe51c, _) \
- V(I32x4ReplaceLane, 0xe51d, _) \
- V(I32x4Shl, 0xe524, _) \
- V(I32x4ShrS, 0xe525, _) \
- V(I32x4ShrU, 0xe532, _) \
- V(I16x8ExtractLane, 0xe539, _) \
- V(I16x8ReplaceLane, 0xe53a, _) \
- V(I16x8Shl, 0xe543, _) \
- V(I16x8ShrS, 0xe544, _) \
- V(I16x8ShrU, 0xe552, _) \
- V(I8x16ExtractLane, 0xe558, _) \
- V(I8x16ReplaceLane, 0xe559, _) \
- V(I8x16Shl, 0xe562, _) \
- V(I8x16ShrS, 0xe563, _) \
- V(I8x16ShrU, 0xe571, _)
-
-#define FOREACH_SIMD_MASK_OPERAND_OPCODE(V) \
- V(S32x4Shuffle, 0xe52d, s_ss) \
- V(S16x8Shuffle, 0xe54c, s_ss) \
- V(S8x16Shuffle, 0xe56b, s_ss)
-
-#define FOREACH_ATOMIC_OPCODE(V) \
- V(I32AtomicAdd8S, 0xe601, i_ii) \
- V(I32AtomicAdd8U, 0xe602, i_ii) \
- V(I32AtomicAdd16S, 0xe603, i_ii) \
- V(I32AtomicAdd16U, 0xe604, i_ii) \
- V(I32AtomicAdd, 0xe605, i_ii) \
- V(I32AtomicAnd8S, 0xe606, i_ii) \
- V(I32AtomicAnd8U, 0xe607, i_ii) \
- V(I32AtomicAnd16S, 0xe608, i_ii) \
- V(I32AtomicAnd16U, 0xe609, i_ii) \
- V(I32AtomicAnd, 0xe60a, i_ii) \
- V(I32AtomicCompareExchange8S, 0xe60b, i_ii) \
- V(I32AtomicCompareExchange8U, 0xe60c, i_ii) \
- V(I32AtomicCompareExchange16S, 0xe60d, i_ii) \
- V(I32AtomicCompareExchange16U, 0xe60e, i_ii) \
- V(I32AtomicCompareExchange, 0xe60f, i_ii) \
- V(I32AtomicExchange8S, 0xe610, i_ii) \
- V(I32AtomicExchange8U, 0xe611, i_ii) \
- V(I32AtomicExchange16S, 0xe612, i_ii) \
- V(I32AtomicExchange16U, 0xe613, i_ii) \
- V(I32AtomicExchange, 0xe614, i_ii) \
- V(I32AtomicOr8S, 0xe615, i_ii) \
- V(I32AtomicOr8U, 0xe616, i_ii) \
- V(I32AtomicOr16S, 0xe617, i_ii) \
- V(I32AtomicOr16U, 0xe618, i_ii) \
- V(I32AtomicOr, 0xe619, i_ii) \
- V(I32AtomicSub8S, 0xe61a, i_ii) \
- V(I32AtomicSub8U, 0xe61b, i_ii) \
- V(I32AtomicSub16S, 0xe61c, i_ii) \
- V(I32AtomicSub16U, 0xe61d, i_ii) \
- V(I32AtomicSub, 0xe61e, i_ii) \
- V(I32AtomicXor8S, 0xe61f, i_ii) \
- V(I32AtomicXor8U, 0xe620, i_ii) \
- V(I32AtomicXor16S, 0xe621, i_ii) \
- V(I32AtomicXor16U, 0xe622, i_ii) \
- V(I32AtomicXor, 0xe623, i_ii)
+ V(F32x4ExtractLane, 0xfd01, _) \
+ V(F32x4ReplaceLane, 0xfd02, _) \
+ V(I32x4ExtractLane, 0xfd1c, _) \
+ V(I32x4ReplaceLane, 0xfd1d, _) \
+ V(I32x4Shl, 0xfd24, _) \
+ V(I32x4ShrS, 0xfd25, _) \
+ V(I32x4ShrU, 0xfd32, _) \
+ V(I16x8ExtractLane, 0xfd39, _) \
+ V(I16x8ReplaceLane, 0xfd3a, _) \
+ V(I16x8Shl, 0xfd43, _) \
+ V(I16x8ShrS, 0xfd44, _) \
+ V(I16x8ShrU, 0xfd52, _) \
+ V(I8x16ExtractLane, 0xfd58, _) \
+ V(I8x16ReplaceLane, 0xfd59, _) \
+ V(I8x16Shl, 0xfd62, _) \
+ V(I8x16ShrS, 0xfd63, _) \
+ V(I8x16ShrU, 0xfd71, _)
+
+#define FOREACH_SIMD_MASK_OPERAND_OPCODE(V) V(S8x16Shuffle, 0xfd6b, s_ss)
+
+#define FOREACH_SIMD_MEM_OPCODE(V) \
+ V(S128LoadMem, 0xfd80, s_i) \
+ V(S128StoreMem, 0xfd81, s_is)
+
+#define FOREACH_ATOMIC_OPCODE(V) \
+ V(I32AtomicAdd, 0xfe1e, i_ii) \
+ V(I32AtomicAdd8U, 0xfe20, i_ii) \
+ V(I32AtomicAdd16U, 0xfe21, i_ii) \
+ V(I32AtomicSub, 0xfe25, i_ii) \
+ V(I32AtomicSub8U, 0xfe27, i_ii) \
+ V(I32AtomicSub16U, 0xfe28, i_ii) \
+ V(I32AtomicAnd, 0xfe2c, i_ii) \
+ V(I32AtomicAnd8U, 0xfe2e, i_ii) \
+ V(I32AtomicAnd16U, 0xfe2f, i_ii) \
+ V(I32AtomicOr, 0xfe33, i_ii) \
+ V(I32AtomicOr8U, 0xfe35, i_ii) \
+ V(I32AtomicOr16U, 0xfe36, i_ii) \
+ V(I32AtomicXor, 0xfe3a, i_ii) \
+ V(I32AtomicXor8U, 0xfe3c, i_ii) \
+ V(I32AtomicXor16U, 0xfe3d, i_ii) \
+ V(I32AtomicExchange, 0xfe41, i_ii) \
+ V(I32AtomicExchange8U, 0xfe43, i_ii) \
+ V(I32AtomicExchange16U, 0xfe44, i_ii) \
+ V(I32AtomicCompareExchange, 0xfe48, i_ii) \
+ V(I32AtomicCompareExchange8U, 0xfe4a, i_ii) \
+ V(I32AtomicCompareExchange16U, 0xfe4b, i_ii)
// All opcodes.
#define FOREACH_OPCODE(V) \
@@ -482,6 +449,7 @@ constexpr WasmCodePosition kNoCodePosition = -1;
FOREACH_SIMD_0_OPERAND_OPCODE(V) \
FOREACH_SIMD_1_OPERAND_OPCODE(V) \
FOREACH_SIMD_MASK_OPERAND_OPCODE(V) \
+ FOREACH_SIMD_MEM_OPCODE(V) \
FOREACH_ATOMIC_OPCODE(V)
// All signatures.
@@ -515,32 +483,18 @@ constexpr WasmCodePosition kNoCodePosition = -1;
V(f_if, kWasmF32, kWasmI32, kWasmF32) \
V(l_il, kWasmI64, kWasmI32, kWasmI64)
-#define FOREACH_SIMD_SIGNATURE(V) \
- V(s_s, kWasmS128, kWasmS128) \
- V(s_f, kWasmS128, kWasmF32) \
- V(s_ss, kWasmS128, kWasmS128, kWasmS128) \
- V(s1x4_ss, kWasmS1x4, kWasmS128, kWasmS128) \
- V(s1x8_ss, kWasmS1x8, kWasmS128, kWasmS128) \
- V(s1x16_ss, kWasmS1x16, kWasmS128, kWasmS128) \
- V(s_i, kWasmS128, kWasmI32) \
- V(s_si, kWasmS128, kWasmS128, kWasmI32) \
- V(i_s, kWasmI32, kWasmS128) \
- V(i_s1x4, kWasmI32, kWasmS1x4) \
- V(i_s1x8, kWasmI32, kWasmS1x8) \
- V(i_s1x16, kWasmI32, kWasmS1x16) \
- V(s_s1x4ss, kWasmS128, kWasmS1x4, kWasmS128, kWasmS128) \
- V(s_s1x8ss, kWasmS128, kWasmS1x8, kWasmS128, kWasmS128) \
- V(s_s1x16ss, kWasmS128, kWasmS1x16, kWasmS128, kWasmS128) \
- V(s1x4_s1x4, kWasmS1x4, kWasmS1x4) \
- V(s1x4_s1x4s1x4, kWasmS1x4, kWasmS1x4, kWasmS1x4) \
- V(s1x8_s1x8, kWasmS1x8, kWasmS1x8) \
- V(s1x8_s1x8s1x8, kWasmS1x8, kWasmS1x8, kWasmS1x8) \
- V(s1x16_s1x16, kWasmS1x16, kWasmS1x16) \
- V(s1x16_s1x16s1x16, kWasmS1x16, kWasmS1x16, kWasmS1x16)
+#define FOREACH_SIMD_SIGNATURE(V) \
+ V(s_s, kWasmS128, kWasmS128) \
+ V(s_f, kWasmS128, kWasmF32) \
+ V(s_ss, kWasmS128, kWasmS128, kWasmS128) \
+ V(s_i, kWasmS128, kWasmI32) \
+ V(s_si, kWasmS128, kWasmS128, kWasmI32) \
+ V(i_s, kWasmI32, kWasmS128) \
+ V(s_sss, kWasmS128, kWasmS128, kWasmS128, kWasmS128)
#define FOREACH_PREFIX(V) \
- V(Simd, 0xe5) \
- V(Atomic, 0xe6)
+ V(Simd, 0xfd) \
+ V(Atomic, 0xfe)
enum WasmOpcode {
// Declare expression opcodes.
@@ -604,17 +558,10 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
return kLocalF64;
case kWasmS128:
return kLocalS128;
- case kWasmS1x4:
- return kLocalS1x4;
- case kWasmS1x8:
- return kLocalS1x8;
- case kWasmS1x16:
- return kLocalS1x16;
case kWasmStmt:
return kLocalVoid;
default:
UNREACHABLE();
- return kLocalVoid;
}
}
@@ -630,17 +577,10 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
return MachineType::Float64();
case kWasmS128:
return MachineType::Simd128();
- case kWasmS1x4:
- return MachineType::Simd1x4();
- case kWasmS1x8:
- return MachineType::Simd1x8();
- case kWasmS1x16:
- return MachineType::Simd1x16();
case kWasmStmt:
return MachineType::None();
default:
UNREACHABLE();
- return MachineType::None();
}
}
@@ -658,15 +598,8 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
return kWasmF64;
case MachineRepresentation::kSimd128:
return kWasmS128;
- case MachineRepresentation::kSimd1x4:
- return kWasmS1x4;
- case MachineRepresentation::kSimd1x8:
- return kWasmS1x8;
- case MachineRepresentation::kSimd1x16:
- return kWasmS1x16;
default:
UNREACHABLE();
- return kWasmI32;
}
}
@@ -681,9 +614,6 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
case kWasmF64:
return 'd';
case kWasmS128:
- case kWasmS1x4:
- case kWasmS1x8:
- case kWasmS1x16:
return 's';
case kWasmStmt:
return 'v';
@@ -706,12 +636,6 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
return "f64";
case kWasmS128:
return "s128";
- case kWasmS1x4:
- return "s1x4";
- case kWasmS1x8:
- return "s1x8";
- case kWasmS1x16:
- return "s1x16";
case kWasmStmt:
return "<stmt>";
case kWasmVar:
diff --git a/deps/v8/src/wasm/wasm-result.cc b/deps/v8/src/wasm/wasm-result.cc
index b83d9dbbaa..6deccae6dc 100644
--- a/deps/v8/src/wasm/wasm-result.cc
+++ b/deps/v8/src/wasm/wasm-result.cc
@@ -133,15 +133,11 @@ Handle<Object> ErrorThrower::Reify() {
constructor = isolate_->wasm_runtime_error_function();
break;
}
- Vector<const uint8_t> msg_vec(
- reinterpret_cast<const uint8_t*>(error_msg_.data()),
- static_cast<int>(error_msg_.size()));
+ Vector<const char> msg_vec(error_msg_.data(), error_msg_.size());
Handle<String> message =
- isolate_->factory()->NewStringFromOneByte(msg_vec).ToHandleChecked();
- error_type_ = kNone; // Reset.
- Handle<Object> exception =
- isolate_->factory()->NewError(constructor, message);
- return exception;
+ isolate_->factory()->NewStringFromUtf8(msg_vec).ToHandleChecked();
+ Reset();
+ return isolate_->factory()->NewError(constructor, message);
}
void ErrorThrower::Reset() {
@@ -159,7 +155,10 @@ ErrorThrower::ErrorThrower(ErrorThrower&& other)
ErrorThrower::~ErrorThrower() {
if (error() && !isolate_->has_pending_exception()) {
- isolate_->ScheduleThrow(*Reify());
+ // We don't want to mix pending exceptions and scheduled exceptions, hence
+ // an existing exception should be pending, never scheduled.
+ DCHECK(!isolate_->has_scheduled_exception());
+ isolate_->Throw(*Reify());
}
}
diff --git a/deps/v8/src/wasm/wasm-result.h b/deps/v8/src/wasm/wasm-result.h
index 848170c80e..9ae8c33f2f 100644
--- a/deps/v8/src/wasm/wasm-result.h
+++ b/deps/v8/src/wasm/wasm-result.h
@@ -123,6 +123,8 @@ class V8_EXPORT_PRIVATE ErrorThrower {
bool error() const { return error_type_ != kNone; }
bool wasm_error() { return error_type_ >= kFirstWasmError; }
+ Isolate* isolate() const { return isolate_; }
+
private:
enum ErrorType {
kNone,
@@ -146,6 +148,9 @@ class V8_EXPORT_PRIVATE ErrorThrower {
std::string error_msg_;
DISALLOW_COPY_AND_ASSIGN(ErrorThrower);
+ // ErrorThrower should always be stack-allocated, since it constitutes a scope
+ // (things happen in the destructor).
+ DISALLOW_NEW_AND_DELETE();
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/wasm-text.cc b/deps/v8/src/wasm/wasm-text.cc
index 540bfb5ec0..20438b8ae6 100644
--- a/deps/v8/src/wasm/wasm-text.cc
+++ b/deps/v8/src/wasm/wasm-text.cc
@@ -195,6 +195,7 @@ void wasm::PrintWasmText(const WasmModule *module,
FOREACH_SIMD_0_OPERAND_OPCODE(CASE_OPCODE)
FOREACH_SIMD_1_OPERAND_OPCODE(CASE_OPCODE)
FOREACH_SIMD_MASK_OPERAND_OPCODE(CASE_OPCODE)
+ FOREACH_SIMD_MEM_OPCODE(CASE_OPCODE)
FOREACH_ATOMIC_OPCODE(CASE_OPCODE)
os << WasmOpcodes::OpcodeName(opcode);
break;
diff --git a/deps/v8/src/wasm/wasm-value.h b/deps/v8/src/wasm/wasm-value.h
new file mode 100644
index 0000000000..8e86c4824a
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-value.h
@@ -0,0 +1,86 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_VALUE_H_
+#define V8_WASM_VALUE_H_
+
+#include "src/wasm/wasm-opcodes.h"
+#include "src/zone/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// Macro for defining WasmValue union members.
+#define FOREACH_WASMVAL_UNION_MEMBER(V) \
+ V(i32, kWasmI32, int32_t) \
+ V(u32, kWasmI32, uint32_t) \
+ V(i64, kWasmI64, int64_t) \
+ V(u64, kWasmI64, uint64_t) \
+ V(f32, kWasmF32, float) \
+ V(f64, kWasmF64, double)
+
+// A wasm value with type information.
+class WasmValue {
+ public:
+ WasmValue() : type_(kWasmStmt) {}
+
+#define DEFINE_TYPE_SPECIFIC_METHODS(field, localtype, ctype) \
+ explicit WasmValue(ctype v) : type_(localtype) { value_.field = v; } \
+ ctype to_##field() const { \
+ DCHECK_EQ(localtype, type_); \
+ return value_.field; \
+ }
+ FOREACH_WASMVAL_UNION_MEMBER(DEFINE_TYPE_SPECIFIC_METHODS)
+#undef DEFINE_TYPE_SPECIFIC_METHODS
+
+ ValueType type() const { return type_; }
+
+ bool operator==(const WasmValue& other) const {
+ if (type_ != other.type_) return false;
+#define CHECK_VALUE_EQ(field, localtype, ctype) \
+ if (type_ == localtype) { \
+ return value_.field == other.value_.field; \
+ }
+ FOREACH_WASMVAL_UNION_MEMBER(CHECK_VALUE_EQ)
+#undef CHECK_VALUE_EQ
+ UNREACHABLE();
+ }
+
+ template <typename T>
+ inline T to() const {
+ static_assert(sizeof(T) == -1, "Do only use this method with valid types");
+ }
+
+ template <typename T>
+ inline T to_unchecked() const {
+ static_assert(sizeof(T) == -1, "Do only use this method with valid types");
+ }
+
+ private:
+ ValueType type_;
+ union {
+#define DECLARE_FIELD(field, localtype, ctype) ctype field;
+ FOREACH_WASMVAL_UNION_MEMBER(DECLARE_FIELD)
+#undef DECLARE_FIELD
+ } value_;
+};
+
+#define DECLARE_CAST(field, localtype, ctype) \
+ template <> \
+ inline ctype WasmValue::to_unchecked() const { \
+ return value_.field; \
+ } \
+ template <> \
+ inline ctype WasmValue::to() const { \
+ return to_##field(); \
+ }
+FOREACH_WASMVAL_UNION_MEMBER(DECLARE_CAST)
+#undef DECLARE_CAST
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_VALUE_H_
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index 4a2e9a17e8..3ec545f986 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -53,19 +53,13 @@ void Assembler::emitw(uint16_t x) {
pc_ += sizeof(uint16_t);
}
-
-void Assembler::emit_code_target(Handle<Code> target,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id) {
+void Assembler::emit_code_target(Handle<Code> target, RelocInfo::Mode rmode) {
DCHECK(RelocInfo::IsCodeTarget(rmode) ||
rmode == RelocInfo::CODE_AGE_SEQUENCE);
- if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
- RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, ast_id.ToInt());
- } else {
- RecordRelocInfo(rmode);
- }
+ RecordRelocInfo(rmode);
int current = code_targets_.length();
- if (current > 0 && code_targets_.last().address() == target.address()) {
+ if (current > 0 && !target.is_null() &&
+ code_targets_.last().address() == target.address()) {
// Optimization if we keep jumping to the same code target.
emitl(current - 1);
} else {
@@ -317,7 +311,6 @@ Handle<Code> Assembler::code_target_object_handle_at(Address pc) {
return code_targets_[Memory::int32_at(pc)];
}
-
Address Assembler::runtime_entry_at(Address pc) {
return Memory::int32_at(pc) + isolate_data().code_range_start_;
}
@@ -356,7 +349,6 @@ Address RelocInfo::target_address_address() {
Address RelocInfo::constant_pool_entry_address() {
UNREACHABLE();
- return NULL;
}
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index b2330b3320..8c90fd6032 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -18,6 +18,7 @@
#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/base/cpu.h"
+#include "src/code-stubs.h"
#include "src/macro-assembler.h"
#include "src/v8.h"
@@ -292,6 +293,25 @@ bool Operand::AddressUsesRegister(Register reg) const {
}
}
+void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
+ for (auto& request : heap_object_requests_) {
+ Address pc = buffer_ + request.offset();
+ switch (request.kind()) {
+ case HeapObjectRequest::kHeapNumber: {
+ Handle<HeapNumber> object = isolate->factory()->NewHeapNumber(
+ request.heap_number(), IMMUTABLE, TENURED);
+ Memory::Object_Handle_at(pc) = object;
+ break;
+ }
+ case HeapObjectRequest::kCodeStub: {
+ request.code_stub()->set_isolate(isolate);
+ code_targets_[Memory::int32_at(pc)] = request.code_stub()->GetCode();
+ break;
+ }
+ }
+ }
+}
+
// -----------------------------------------------------------------------------
// Implementation of Assembler.
@@ -309,11 +329,13 @@ Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
}
-
-void Assembler::GetCode(CodeDesc* desc) {
- // Finalize code (at this point overflow() may be true, but the gap ensures
- // that we are still not overlapping instructions and relocation info).
+void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
+ // At this point overflow() may be true, but the gap ensures
+ // that we are still not overlapping instructions and relocation info.
DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
+
+ AllocateAndInstallRequestedHeapObjects(isolate);
+
// Set up code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
@@ -329,7 +351,7 @@ void Assembler::GetCode(CodeDesc* desc) {
void Assembler::Align(int m) {
- DCHECK(base::bits::IsPowerOfTwo32(m));
+ DCHECK(base::bits::IsPowerOfTwo(m));
int delta = (m - (pc_offset() & (m - 1))) & (m - 1);
Nop(delta);
}
@@ -414,9 +436,7 @@ void Assembler::GrowBuffer() {
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
- if (desc.buffer_size > kMaximalBufferSize ||
- static_cast<size_t>(desc.buffer_size) >
- isolate_data().max_old_generation_size_) {
+ if (desc.buffer_size > kMaximalBufferSize) {
V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
}
@@ -880,14 +900,19 @@ void Assembler::call(Address entry, RelocInfo::Mode rmode) {
emit_runtime_entry(entry, rmode);
}
+void Assembler::call(CodeStub* stub) {
+ EnsureSpace ensure_space(this);
+ // 1110 1000 #32-bit disp.
+ emit(0xE8);
+ RequestHeapObject(HeapObjectRequest(stub));
+ emit_code_target(Handle<Code>(), RelocInfo::CODE_TARGET);
+}
-void Assembler::call(Handle<Code> target,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id) {
+void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
// 1110 1000 #32-bit disp.
emit(0xE8);
- emit_code_target(target, rmode, ast_id);
+ emit_code_target(target, rmode);
}
@@ -1538,6 +1563,14 @@ void Assembler::movp(Register dst, void* value, RelocInfo::Mode rmode) {
emitp(value, rmode);
}
+void Assembler::movp_heap_number(Register dst, double value) {
+ EnsureSpace ensure_space(this);
+ emit_rex(dst, kPointerSize);
+ emit(0xB8 | dst.low_bits());
+ RequestHeapObject(HeapObjectRequest(value));
+ emitp(nullptr, RelocInfo::EMBEDDED_OBJECT);
+}
+
void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
emit_rex_64(dst);
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 0f2f27247e..560f3fe513 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -38,6 +38,7 @@
#define V8_X64_ASSEMBLER_X64_H_
#include <deque>
+#include <forward_list>
#include "src/assembler.h"
#include "src/x64/sse-instr.h"
@@ -448,15 +449,14 @@ class Operand BASE_EMBEDDED {
// Shift instructions on operands/registers with kPointerSize, kInt32Size and
// kInt64Size.
-#define SHIFT_INSTRUCTION_LIST(V) \
- V(rol, 0x0) \
- V(ror, 0x1) \
- V(rcl, 0x2) \
- V(rcr, 0x3) \
- V(shl, 0x4) \
- V(shr, 0x5) \
- V(sar, 0x7) \
-
+#define SHIFT_INSTRUCTION_LIST(V) \
+ V(rol, 0x0) \
+ V(ror, 0x1) \
+ V(rcl, 0x2) \
+ V(rcr, 0x3) \
+ V(shl, 0x4) \
+ V(shr, 0x5) \
+ V(sar, 0x7)
class Assembler : public AssemblerBase {
private:
@@ -488,12 +488,12 @@ class Assembler : public AssemblerBase {
Assembler(Isolate* isolate, void* buffer, int buffer_size)
: Assembler(IsolateData(isolate), buffer, buffer_size) {}
Assembler(IsolateData isolate_data, void* buffer, int buffer_size);
- virtual ~Assembler() { }
+ virtual ~Assembler() {}
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
- void GetCode(CodeDesc* desc);
+ void GetCode(Isolate* isolate, CodeDesc* desc);
// Read/Modify the code target in the relative branch/call instruction at pc.
// On the x64 architecture, we use relative jumps with a 32-bit displacement
@@ -696,6 +696,15 @@ class Assembler : public AssemblerBase {
// Loads a pointer into a register with a relocation mode.
void movp(Register dst, void* ptr, RelocInfo::Mode rmode);
+ // Load a heap number into a register.
+ // The heap number will not be allocated and embedded into the code right
+ // away. Instead, we emit the load of a dummy object. Later, when calling
+ // Assembler::GetCode, the heap number will be allocated and the code will be
+ // patched by replacing the dummy with the actual object. The RelocInfo for
+ // the embedded object gets already recorded correctly when emitting the dummy
+ // move.
+ void movp_heap_number(Register dst, double value);
+
// Loads a 64-bit immediate into a register.
void movq(Register dst, int64_t value,
RelocInfo::Mode rmode = RelocInfo::NONE64);
@@ -913,9 +922,9 @@ class Assembler : public AssemblerBase {
// Call near relative 32-bit displacement, relative to next instruction.
void call(Label* L);
void call(Address entry, RelocInfo::Mode rmode);
+ void call(CodeStub* stub);
void call(Handle<Code> target,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- TypeFeedbackId ast_id = TypeFeedbackId::None());
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
// Calls directly to the given address using a relative offset.
// Should only ever be used in Code objects for calls within the
@@ -2052,9 +2061,7 @@ class Assembler : public AssemblerBase {
inline void emitp(void* x, RelocInfo::Mode rmode);
inline void emitq(uint64_t x);
inline void emitw(uint16_t x);
- inline void emit_code_target(Handle<Code> target,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id = TypeFeedbackId::None());
+ inline void emit_code_target(Handle<Code> target, RelocInfo::Mode rmode);
inline void emit_runtime_entry(Address entry, RelocInfo::Mode rmode);
inline void emit(Immediate x);
@@ -2497,6 +2504,19 @@ class Assembler : public AssemblerBase {
std::deque<int> internal_reference_positions_;
List< Handle<Code> > code_targets_;
+
+ // The following functions help with avoiding allocations of embedded heap
+ // objects during the code assembly phase. {RequestHeapObject} records the
+ // need for a future heap number allocation or code stub generation. After
+ // code assembly, {AllocateAndInstallRequestedHeapObjects} will allocate these
+ // objects and place them where they are expected (determined by the pc offset
+ // associated with each request). That is, for each request, it will patch the
+ // dummy heap object handle that we emitted during code assembly with the
+ // actual heap object handle.
+ void RequestHeapObject(HeapObjectRequest request);
+ void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
+
+ std::forward_list<HeapObjectRequest> heap_object_requests_;
};
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 84630928d4..7c75a747f7 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -38,28 +38,6 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kNewArray);
}
-void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
- ExternalReference miss) {
- // Update the static counter each time a new code stub is generated.
- isolate()->counters()->code_stubs()->Increment();
-
- CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
- int param_count = descriptor.GetRegisterParameterCount();
- {
- // Call the runtime system in a fresh internal frame.
- FrameScope scope(masm, StackFrame::INTERNAL);
- DCHECK(param_count == 0 ||
- rax.is(descriptor.GetRegisterParameter(param_count - 1)));
- // Push arguments
- for (int i = 0; i < param_count; ++i) {
- __ Push(descriptor.GetRegisterParameter(i));
- }
- __ CallExternalReference(miss, param_count);
- }
-
- __ Ret();
-}
-
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
__ PushCallerSaved(save_doubles() ? kSaveFPRegs : kDontSaveFPRegs);
@@ -831,13 +809,10 @@ bool CEntryStub::NeedsImmovableCode() {
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
- StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
// It is important that the store buffer overflow stubs are generated first.
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
CreateWeakCellStub::GenerateAheadOfTime(isolate);
- BinaryOpICStub::GenerateAheadOfTime(isolate);
- BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
}
@@ -959,7 +934,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
Label okay;
__ LoadRoot(r14, Heap::kTheHoleValueRootIndex);
ExternalReference pending_exception_address(
- Isolate::kPendingExceptionAddress, isolate());
+ IsolateAddressId::kPendingExceptionAddress, isolate());
Operand pending_exception_operand =
masm->ExternalOperand(pending_exception_address);
__ cmpp(r14, pending_exception_operand);
@@ -976,15 +951,15 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ bind(&exception_returned);
ExternalReference pending_handler_context_address(
- Isolate::kPendingHandlerContextAddress, isolate());
+ IsolateAddressId::kPendingHandlerContextAddress, isolate());
ExternalReference pending_handler_code_address(
- Isolate::kPendingHandlerCodeAddress, isolate());
+ IsolateAddressId::kPendingHandlerCodeAddress, isolate());
ExternalReference pending_handler_offset_address(
- Isolate::kPendingHandlerOffsetAddress, isolate());
+ IsolateAddressId::kPendingHandlerOffsetAddress, isolate());
ExternalReference pending_handler_fp_address(
- Isolate::kPendingHandlerFPAddress, isolate());
+ IsolateAddressId::kPendingHandlerFPAddress, isolate());
ExternalReference pending_handler_sp_address(
- Isolate::kPendingHandlerSPAddress, isolate());
+ IsolateAddressId::kPendingHandlerSPAddress, isolate());
// Ask the runtime for help to determine the handler. This will set rax to
// contain the current pending exception, don't clobber it.
@@ -1034,7 +1009,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Push the stack frame type.
__ Push(Immediate(StackFrame::TypeToMarker(type()))); // context slot
- ExternalReference context_address(Isolate::kContextAddress, isolate());
+ ExternalReference context_address(IsolateAddressId::kContextAddress,
+ isolate());
__ Load(kScratchRegister, context_address);
__ Push(kScratchRegister); // context
// Save callee-saved registers (X64/X32/Win64 calling conventions).
@@ -1069,14 +1045,14 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
}
// Save copies of the top frame descriptor on the stack.
- ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate());
+ ExternalReference c_entry_fp(IsolateAddressId::kCEntryFPAddress, isolate());
{
Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
__ Push(c_entry_fp_operand);
}
// If this is the outermost JS call, set js_entry_sp value.
- ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
+ ExternalReference js_entry_sp(IsolateAddressId::kJSEntrySPAddress, isolate());
__ Load(rax, js_entry_sp);
__ testp(rax, rax);
__ j(not_zero, &not_outermost_js);
@@ -1096,8 +1072,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
handler_offset_ = handler_entry.pos();
// Caught exception: Store result (exception) in the pending exception
// field in the JSEnv and return a failure sentinel.
- ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
- isolate());
+ ExternalReference pending_exception(
+ IsolateAddressId::kPendingExceptionAddress, isolate());
__ Store(pending_exception, rax);
__ LoadRoot(rax, Heap::kExceptionRootIndex);
__ jmp(&exit);
@@ -1404,34 +1380,6 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
}
-void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rdx : left
- // -- rax : right
- // -- rsp[0] : return address
- // -----------------------------------
-
- // Load rcx with the allocation site. We stick an undefined dummy value here
- // and replace it with the real allocation site later when we instantiate this
- // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
- __ Move(rcx, isolate()->factory()->undefined_value());
-
- // Make sure that we actually patched the allocation site.
- if (FLAG_debug_code) {
- __ testb(rcx, Immediate(kSmiTagMask));
- __ Assert(not_equal, kExpectedAllocationSite);
- __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset),
- isolate()->factory()->allocation_site_map());
- __ Assert(equal, kExpectedAllocationSite);
- }
-
- // Tail call into the stub that handles binary operations with allocation
- // sites.
- BinaryOpWithAllocationSiteStub stub(isolate(), state());
- __ TailCallStub(&stub);
-}
-
-
void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
DCHECK_EQ(CompareICState::BOOLEAN, state());
Label miss;
@@ -1852,7 +1800,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ j(equal, done);
// Stop if found the property.
- __ Cmp(entity_name, Handle<Name>(name));
+ __ Cmp(entity_name, name);
__ j(equal, miss);
Label good;
@@ -1869,7 +1817,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
NameDictionaryLookupStub stub(masm->isolate(), properties, r0, r0,
NEGATIVE_LOOKUP);
- __ Push(Handle<Object>(name));
+ __ Push(name);
__ Push(Immediate(name->Hash()));
__ CallStub(&stub);
__ testp(r0, r0);
@@ -2074,10 +2022,11 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode) {
- Label on_black;
Label need_incremental;
Label need_incremental_pop_object;
+#ifndef V8_CONCURRENT_MARKING
+ Label on_black;
// Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker.
__ JumpIfBlack(regs_.object(),
@@ -2095,6 +2044,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
}
__ bind(&on_black);
+#endif
// Get the value from the slot.
__ movp(regs_.scratch0(), Operand(regs_.address(), 0));
@@ -2144,21 +2094,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
}
-void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
- CEntryStub ces(isolate(), 1, kSaveFPRegs);
- __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
- int parameter_count_offset =
- StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
- __ movp(rbx, MemOperand(rbp, parameter_count_offset));
- masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
- __ PopReturnAddressTo(rcx);
- int additional_offset =
- function_mode() == JS_FUNCTION_STUB_MODE ? kPointerSize : 0;
- __ leap(rsp, MemOperand(rsp, rbx, times_pointer_size, additional_offset));
- __ jmp(rcx); // Return to IC Miss stub, continuation still on stack.
-}
-
-
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
@@ -2166,6 +2101,12 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
}
}
+void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
+ Zone* zone) {
+ if (tasm->isolate()->function_entry_hook() != nullptr) {
+ tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
+ }
+}
void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// This stub can be called from essentially anywhere, so it needs to save
@@ -2211,8 +2152,8 @@ static void CreateArrayDispatch(MacroAssembler* masm,
T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
- int last_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
+ int last_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
Label next;
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
@@ -2240,25 +2181,12 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// rsp[0] - return address
// rsp[8] - last argument
- Label normal_sequence;
- if (mode == DONT_OVERRIDE) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
- STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
-
- // is the low bit set? If so, we are holey and that is good.
- __ testb(rdx, Immediate(1));
- __ j(not_zero, &normal_sequence);
- }
-
- // look at the first argument
- StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movp(rcx, args.GetArgumentOperand(0));
- __ testp(rcx, rcx);
- __ j(zero, &normal_sequence);
+ STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(PACKED_ELEMENTS == 2);
+ STATIC_ASSERT(HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS == 4);
+ STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS == 5);
if (mode == DISABLE_ALLOCATION_SITES) {
ElementsKind initial = GetInitialFastElementsKind();
@@ -2268,13 +2196,12 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
holey_initial,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub_holey);
-
- __ bind(&normal_sequence);
- ArraySingleArgumentConstructorStub stub(masm->isolate(),
- initial,
- DISABLE_ALLOCATION_SITES);
- __ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
+ // is the low bit set? If so, we are holey and that is good.
+ Label normal_sequence;
+ __ testb(rdx, Immediate(1));
+ __ j(not_zero, &normal_sequence);
+
// We are going to create a holey array, but our kind is non-holey.
// Fix kind and retry (only if we have an allocation site in the slot).
__ incl(rdx);
@@ -2290,12 +2217,13 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// in the AllocationSite::transition_info field because elements kind is
// restricted to a portion of the field...upper bits need to be left alone.
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ SmiAddConstant(FieldOperand(rbx, AllocationSite::kTransitionInfoOffset),
- Smi::FromInt(kFastElementsKindPackedToHoley));
+ __ SmiAddConstant(
+ FieldOperand(rbx, AllocationSite::kTransitionInfoOrBoilerplateOffset),
+ Smi::FromInt(kFastElementsKindPackedToHoley));
__ bind(&normal_sequence);
- int last_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
+ int last_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
Label next;
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
@@ -2316,13 +2244,13 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
template<class T>
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
- int to_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
+ int to_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(isolate, kind);
stub.GetCode();
- if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ if (AllocationSite::ShouldTrack(kind)) {
T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
stub1.GetCode();
}
@@ -2337,7 +2265,7 @@ void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
ArrayNArgumentsConstructorStub stub(isolate);
stub.GetCode();
- ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
+ ElementsKind kinds[2] = {PACKED_ELEMENTS, HOLEY_ELEMENTS};
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things
InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
@@ -2404,7 +2332,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ j(equal, &no_info);
// Only look at the lower 16 bits of the transition info.
- __ movp(rdx, FieldOperand(rbx, AllocationSite::kTransitionInfoOffset));
+ __ movp(rdx, FieldOperand(
+ rbx, AllocationSite::kTransitionInfoOrBoilerplateOffset));
__ SmiToInteger32(rdx, rdx);
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
__ andp(rdx, Immediate(AllocationSite::ElementsKindBits::kMask));
@@ -2496,21 +2425,21 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
if (FLAG_debug_code) {
Label done;
- __ cmpl(rcx, Immediate(FAST_ELEMENTS));
+ __ cmpl(rcx, Immediate(PACKED_ELEMENTS));
__ j(equal, &done);
- __ cmpl(rcx, Immediate(FAST_HOLEY_ELEMENTS));
+ __ cmpl(rcx, Immediate(HOLEY_ELEMENTS));
__ Assert(equal,
kInvalidElementsKindForInternalArrayOrInternalPackedArray);
__ bind(&done);
}
Label fast_elements_case;
- __ cmpl(rcx, Immediate(FAST_ELEMENTS));
+ __ cmpl(rcx, Immediate(PACKED_ELEMENTS));
__ j(equal, &fast_elements_case);
- GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+ GenerateCase(masm, HOLEY_ELEMENTS);
__ bind(&fast_elements_case);
- GenerateCase(masm, FAST_ELEMENTS);
+ GenerateCase(masm, PACKED_ELEMENTS);
}
static int Offset(ExternalReference ref0, ExternalReference ref1) {
diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h
index 4240cb46ca..526adbd47d 100644
--- a/deps/v8/src/x64/code-stubs-x64.h
+++ b/deps/v8/src/x64/code-stubs-x64.h
@@ -288,7 +288,6 @@ class RecordWriteStub: public PlatformCodeStub {
}
}
UNREACHABLE();
- return no_reg;
}
friend class RecordWriteStub;
};
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index e16dbebd3c..e08f0756d7 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -36,8 +36,8 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
size_t actual_size;
// Allocate buffer in executable space.
- byte* buffer =
- static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
+ byte* buffer = static_cast<byte*>(base::OS::Allocate(
+ 1 * KB, &actual_size, true, isolate->heap()->GetRandomMmapAddr()));
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
@@ -48,7 +48,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
__ Ret();
CodeDesc desc;
- masm.GetCode(&desc);
+ masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index 611a3c6c21..c57325e1e2 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -86,23 +86,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
-void Deoptimizer::SetPlatformCompiledStubRegisters(
- FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
- intptr_t handler =
- reinterpret_cast<intptr_t>(descriptor->deoptimization_handler());
- int params = descriptor->GetHandlerParameterCount();
- output_frame->SetRegister(rax.code(), params);
- output_frame->SetRegister(rbx.code(), handler);
-}
-
-
-void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
- for (int i = 0; i < XMMRegister::kMaxNumRegisters; ++i) {
- Float64 double_value = input_->GetDoubleRegister(i);
- output_frame->SetDoubleRegister(i, double_value);
- }
-}
-
#define __ masm()->
void Deoptimizer::TableEntryGenerator::Generate() {
@@ -142,7 +125,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
const int kSavedRegistersAreaSize =
kNumberOfRegisters * kRegisterSize + kDoubleRegsSize + kFloatRegsSize;
- __ Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
+ __ Store(ExternalReference(IsolateAddressId::kCEntryFPAddress, isolate()),
+ rbp);
// We use this to keep the value of the fifth argument temporarily.
// Unfortunately we can't store it directly in r8 (used for passing
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index a7438ad275..5458a86c3f 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -2776,7 +2776,6 @@ const char* NameConverter::NameOfXMMRegister(int reg) const {
const char* NameConverter::NameInCode(byte* addr) const {
// X64 does not embed debug strings at the moment.
UNREACHABLE();
- return "";
}
diff --git a/deps/v8/src/x64/frames-x64.cc b/deps/v8/src/x64/frames-x64.cc
index 433c3efdfb..db1d76c805 100644
--- a/deps/v8/src/x64/frames-x64.cc
+++ b/deps/v8/src/x64/frames-x64.cc
@@ -18,15 +18,6 @@ Register JavaScriptFrame::fp_register() { return rbp; }
Register JavaScriptFrame::context_register() { return rsi; }
Register JavaScriptFrame::constant_pool_pointer_register() {
UNREACHABLE();
- return no_reg;
-}
-
-
-Register StubFailureTrampolineFrame::fp_register() { return rbp; }
-Register StubFailureTrampolineFrame::context_register() { return rsi; }
-Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
- UNREACHABLE();
- return no_reg;
}
diff --git a/deps/v8/src/x64/interface-descriptors-x64.cc b/deps/v8/src/x64/interface-descriptors-x64.cc
index b6ab7ca1af..60a4297594 100644
--- a/deps/v8/src/x64/interface-descriptors-x64.cc
+++ b/deps/v8/src/x64/interface-descriptors-x64.cc
@@ -47,6 +47,8 @@ const Register StoreTransitionDescriptor::MapRegister() { return r11; }
const Register StringCompareDescriptor::LeftRegister() { return rdx; }
const Register StringCompareDescriptor::RightRegister() { return rax; }
+const Register StringConcatDescriptor::ArgumentsCountRegister() { return rbx; }
+
const Register ApiGetterDescriptor::HolderRegister() { return rcx; }
const Register ApiGetterDescriptor::CallbackRegister() { return rbx; }
@@ -153,6 +155,16 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // rax : number of arguments (on the stack, not including receiver)
+ // rdi : the target to call
+ // rbx : arguments list (FixedArray)
+ // rcx : arguments list length (untagged)
+ Register registers[] = {rdi, rax, rbx, rcx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void CallForwardVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// rax : number of arguments
@@ -162,6 +174,34 @@ void CallForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallWithSpreadDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // rax : number of arguments (on the stack, not including receiver)
+ // rdi : the target to call
+ // rbx : the object to spread
+ Register registers[] = {rdi, rax, rbx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // rdi : the target to call
+ // rbx : the arguments list
+ Register registers[] = {rdi, rbx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // rax : number of arguments (on the stack, not including receiver)
+ // rdi : the target to call
+ // rdx : the new target
+ // rbx : arguments list (FixedArray)
+ // rcx : arguments list length (untagged)
+ Register registers[] = {rdi, rdx, rax, rbx, rcx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// rax : number of arguments
@@ -172,6 +212,25 @@ void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // rax : number of arguments (on the stack, not including receiver)
+ // rdi : the target to call
+ // rdx : the new target
+ // rbx : the object to spread
+ Register registers[] = {rdi, rdx, rax, rbx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // rdi : the target to call
+ // rdx : the new target
+ // rbx : the arguments list
+ Register registers[] = {rdi, rdx, rbx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void ConstructStubDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// rax : number of arguments
@@ -370,8 +429,7 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
Register registers[] = {
rax, // the value to pass to the generator
rbx, // the JSGeneratorObject / JSAsyncGeneratorObject to resume
- rdx, // the resume mode (tagged)
- rcx // SuspendFlags (tagged)
+ rdx // the resume mode (tagged)
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 7087c03973..559de47646 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -23,32 +23,24 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
- : Assembler(isolate, buffer, size),
- generating_stub_(false),
- has_frame_(false),
- isolate_(isolate),
- root_array_available_(true),
+ : TurboAssembler(isolate, buffer, size, create_code_object),
jit_cookie_(0) {
if (FLAG_mask_constants_with_cookie) {
jit_cookie_ = isolate->random_number_generator()->NextInt();
}
- if (create_code_object == CodeObjectRequired::kYes) {
- code_object_ =
- Handle<Object>::New(isolate_->heap()->undefined_value(), isolate_);
- }
}
static const int64_t kInvalidRootRegisterDelta = -1;
-
-int64_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
+int64_t TurboAssembler::RootRegisterDelta(ExternalReference other) {
if (predictable_code_size() &&
(other.address() < reinterpret_cast<Address>(isolate()) ||
other.address() >= reinterpret_cast<Address>(isolate() + 1))) {
return kInvalidRootRegisterDelta;
}
- Address roots_register_value = kRootRegisterBias +
+ Address roots_register_value =
+ kRootRegisterBias +
reinterpret_cast<Address>(isolate()->heap()->roots_array_start());
int64_t delta = kInvalidRootRegisterDelta; // Bogus initialization.
@@ -114,8 +106,7 @@ void MacroAssembler::Store(ExternalReference destination, Register source) {
}
}
-
-void MacroAssembler::LoadAddress(Register destination,
+void TurboAssembler::LoadAddress(Register destination,
ExternalReference source) {
if (root_array_available_ && !serializer_enabled()) {
int64_t delta = RootRegisterDelta(source);
@@ -128,8 +119,7 @@ void MacroAssembler::LoadAddress(Register destination,
Move(destination, source);
}
-
-int MacroAssembler::LoadAddressSize(ExternalReference source) {
+int TurboAssembler::LoadAddressSize(ExternalReference source) {
if (root_array_available_ && !serializer_enabled()) {
// This calculation depends on the internals of LoadAddress.
// It's correctness is ensured by the asserts in the Call
@@ -163,14 +153,12 @@ void MacroAssembler::PushAddress(ExternalReference source) {
Push(kScratchRegister);
}
-
-void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
+void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
DCHECK(root_array_available_);
movp(destination, Operand(kRootRegister,
(index << kPointerSizeLog2) - kRootRegisterBias));
}
-
void MacroAssembler::LoadRootIndexed(Register destination,
Register variable_offset,
int fixed_offset) {
@@ -195,15 +183,13 @@ void MacroAssembler::PushRoot(Heap::RootListIndex index) {
Push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
}
-
-void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
+void TurboAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
DCHECK(root_array_available_);
cmpp(with, Operand(kRootRegister,
(index << kPointerSizeLog2) - kRootRegisterBias));
}
-
-void MacroAssembler::CompareRoot(const Operand& with,
+void TurboAssembler::CompareRoot(const Operand& with,
Heap::RootListIndex index) {
DCHECK(root_array_available_);
DCHECK(!with.AddressUsesRegister(kScratchRegister));
@@ -537,12 +523,15 @@ void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
bind(&done);
}
-void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
+void TurboAssembler::Assert(Condition cc, BailoutReason reason) {
if (emit_debug_code()) Check(cc, reason);
}
+void TurboAssembler::AssertUnreachable(BailoutReason reason) {
+ if (emit_debug_code()) Abort(reason);
+}
-void MacroAssembler::Check(Condition cc, BailoutReason reason) {
+void TurboAssembler::Check(Condition cc, BailoutReason reason) {
Label L;
j(cc, &L, Label::kNear);
Abort(reason);
@@ -550,12 +539,11 @@ void MacroAssembler::Check(Condition cc, BailoutReason reason) {
bind(&L);
}
-
-void MacroAssembler::CheckStackAlignment() {
+void TurboAssembler::CheckStackAlignment() {
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
- DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
Label alignment_as_expected;
testp(rsp, Immediate(frame_alignment_mask));
j(zero, &alignment_as_expected, Label::kNear);
@@ -565,8 +553,7 @@ void MacroAssembler::CheckStackAlignment() {
}
}
-
-void MacroAssembler::Abort(BailoutReason reason) {
+void TurboAssembler::Abort(BailoutReason reason) {
#ifdef DEBUG
const char* msg = GetBailoutReason(reason);
if (msg != NULL) {
@@ -580,12 +567,9 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- // Check if Abort() has already been initialized.
- DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
-
Move(rdx, Smi::FromInt(static_cast<int>(reason)));
- if (!has_frame_) {
+ if (!has_frame()) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
@@ -597,10 +581,14 @@ void MacroAssembler::Abort(BailoutReason reason) {
int3();
}
+void TurboAssembler::CallStubDelayed(CodeStub* stub) {
+ DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
+ call(stub);
+}
-void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
+void MacroAssembler::CallStub(CodeStub* stub) {
DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET);
}
@@ -608,9 +596,20 @@ void MacroAssembler::TailCallStub(CodeStub* stub) {
Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
}
+bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
+ return has_frame() || !stub->SometimesSetsUpAFrame();
+}
-bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
- return has_frame_ || !stub->SometimesSetsUpAFrame();
+void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
+ SaveFPRegsMode save_doubles) {
+ const Runtime::Function* f = Runtime::FunctionForId(fid);
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ Set(rax, f->nargs);
+ LoadAddress(rbx, ExternalReference(f, isolate()));
+ CallStubDelayed(new (zone) CEntryStub(nullptr, f->result_size, save_doubles));
}
void MacroAssembler::CallRuntime(const Runtime::Function* f,
@@ -726,8 +725,7 @@ void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
}
}
-
-void MacroAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) {
+void TurboAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvtss2sd(dst, src, src);
@@ -736,8 +734,7 @@ void MacroAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) {
}
}
-
-void MacroAssembler::Cvtss2sd(XMMRegister dst, const Operand& src) {
+void TurboAssembler::Cvtss2sd(XMMRegister dst, const Operand& src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvtss2sd(dst, dst, src);
@@ -746,8 +743,7 @@ void MacroAssembler::Cvtss2sd(XMMRegister dst, const Operand& src) {
}
}
-
-void MacroAssembler::Cvtsd2ss(XMMRegister dst, XMMRegister src) {
+void TurboAssembler::Cvtsd2ss(XMMRegister dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvtsd2ss(dst, src, src);
@@ -756,8 +752,7 @@ void MacroAssembler::Cvtsd2ss(XMMRegister dst, XMMRegister src) {
}
}
-
-void MacroAssembler::Cvtsd2ss(XMMRegister dst, const Operand& src) {
+void TurboAssembler::Cvtsd2ss(XMMRegister dst, const Operand& src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvtsd2ss(dst, dst, src);
@@ -766,8 +761,7 @@ void MacroAssembler::Cvtsd2ss(XMMRegister dst, const Operand& src) {
}
}
-
-void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
+void TurboAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vxorpd(dst, dst, dst);
@@ -778,8 +772,7 @@ void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
}
}
-
-void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
+void TurboAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vxorpd(dst, dst, dst);
@@ -790,8 +783,7 @@ void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
}
}
-
-void MacroAssembler::Cvtlsi2ss(XMMRegister dst, Register src) {
+void TurboAssembler::Cvtlsi2ss(XMMRegister dst, Register src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vxorps(dst, dst, dst);
@@ -802,8 +794,7 @@ void MacroAssembler::Cvtlsi2ss(XMMRegister dst, Register src) {
}
}
-
-void MacroAssembler::Cvtlsi2ss(XMMRegister dst, const Operand& src) {
+void TurboAssembler::Cvtlsi2ss(XMMRegister dst, const Operand& src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vxorps(dst, dst, dst);
@@ -814,8 +805,7 @@ void MacroAssembler::Cvtlsi2ss(XMMRegister dst, const Operand& src) {
}
}
-
-void MacroAssembler::Cvtqsi2ss(XMMRegister dst, Register src) {
+void TurboAssembler::Cvtqsi2ss(XMMRegister dst, Register src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vxorps(dst, dst, dst);
@@ -826,8 +816,7 @@ void MacroAssembler::Cvtqsi2ss(XMMRegister dst, Register src) {
}
}
-
-void MacroAssembler::Cvtqsi2ss(XMMRegister dst, const Operand& src) {
+void TurboAssembler::Cvtqsi2ss(XMMRegister dst, const Operand& src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vxorps(dst, dst, dst);
@@ -838,8 +827,7 @@ void MacroAssembler::Cvtqsi2ss(XMMRegister dst, const Operand& src) {
}
}
-
-void MacroAssembler::Cvtqsi2sd(XMMRegister dst, Register src) {
+void TurboAssembler::Cvtqsi2sd(XMMRegister dst, Register src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vxorpd(dst, dst, dst);
@@ -850,8 +838,7 @@ void MacroAssembler::Cvtqsi2sd(XMMRegister dst, Register src) {
}
}
-
-void MacroAssembler::Cvtqsi2sd(XMMRegister dst, const Operand& src) {
+void TurboAssembler::Cvtqsi2sd(XMMRegister dst, const Operand& src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vxorpd(dst, dst, dst);
@@ -862,8 +849,7 @@ void MacroAssembler::Cvtqsi2sd(XMMRegister dst, const Operand& src) {
}
}
-
-void MacroAssembler::Cvtqui2ss(XMMRegister dst, Register src, Register tmp) {
+void TurboAssembler::Cvtqui2ss(XMMRegister dst, Register src, Register tmp) {
Label msb_set_src;
Label jmp_return;
testq(src, src);
@@ -881,8 +867,7 @@ void MacroAssembler::Cvtqui2ss(XMMRegister dst, Register src, Register tmp) {
bind(&jmp_return);
}
-
-void MacroAssembler::Cvtqui2sd(XMMRegister dst, Register src, Register tmp) {
+void TurboAssembler::Cvtqui2sd(XMMRegister dst, Register src, Register tmp) {
Label msb_set_src;
Label jmp_return;
testq(src, src);
@@ -909,8 +894,7 @@ void MacroAssembler::Cvtsd2si(Register dst, XMMRegister src) {
}
}
-
-void MacroAssembler::Cvttss2si(Register dst, XMMRegister src) {
+void TurboAssembler::Cvttss2si(Register dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvttss2si(dst, src);
@@ -919,8 +903,7 @@ void MacroAssembler::Cvttss2si(Register dst, XMMRegister src) {
}
}
-
-void MacroAssembler::Cvttss2si(Register dst, const Operand& src) {
+void TurboAssembler::Cvttss2si(Register dst, const Operand& src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvttss2si(dst, src);
@@ -929,8 +912,7 @@ void MacroAssembler::Cvttss2si(Register dst, const Operand& src) {
}
}
-
-void MacroAssembler::Cvttsd2si(Register dst, XMMRegister src) {
+void TurboAssembler::Cvttsd2si(Register dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvttsd2si(dst, src);
@@ -939,8 +921,7 @@ void MacroAssembler::Cvttsd2si(Register dst, XMMRegister src) {
}
}
-
-void MacroAssembler::Cvttsd2si(Register dst, const Operand& src) {
+void TurboAssembler::Cvttsd2si(Register dst, const Operand& src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvttsd2si(dst, src);
@@ -949,8 +930,7 @@ void MacroAssembler::Cvttsd2si(Register dst, const Operand& src) {
}
}
-
-void MacroAssembler::Cvttss2siq(Register dst, XMMRegister src) {
+void TurboAssembler::Cvttss2siq(Register dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvttss2siq(dst, src);
@@ -959,8 +939,7 @@ void MacroAssembler::Cvttss2siq(Register dst, XMMRegister src) {
}
}
-
-void MacroAssembler::Cvttss2siq(Register dst, const Operand& src) {
+void TurboAssembler::Cvttss2siq(Register dst, const Operand& src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvttss2siq(dst, src);
@@ -969,8 +948,7 @@ void MacroAssembler::Cvttss2siq(Register dst, const Operand& src) {
}
}
-
-void MacroAssembler::Cvttsd2siq(Register dst, XMMRegister src) {
+void TurboAssembler::Cvttsd2siq(Register dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvttsd2siq(dst, src);
@@ -979,8 +957,7 @@ void MacroAssembler::Cvttsd2siq(Register dst, XMMRegister src) {
}
}
-
-void MacroAssembler::Cvttsd2siq(Register dst, const Operand& src) {
+void TurboAssembler::Cvttsd2siq(Register dst, const Operand& src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvttsd2siq(dst, src);
@@ -1026,8 +1003,7 @@ void MacroAssembler::Store(const Operand& dst, Register src, Representation r) {
}
}
-
-void MacroAssembler::Set(Register dst, int64_t x) {
+void TurboAssembler::Set(Register dst, int64_t x) {
if (x == 0) {
xorl(dst, dst);
} else if (is_uint32(x)) {
@@ -1039,7 +1015,7 @@ void MacroAssembler::Set(Register dst, int64_t x) {
}
}
-void MacroAssembler::Set(const Operand& dst, intptr_t x) {
+void TurboAssembler::Set(const Operand& dst, intptr_t x) {
if (kPointerSize == kInt64Size) {
if (is_int32(x)) {
movp(dst, Immediate(static_cast<int32_t>(x)));
@@ -1100,20 +1076,18 @@ void MacroAssembler::SafePush(Smi* src) {
}
}
-
-Register MacroAssembler::GetSmiConstant(Smi* source) {
+Register TurboAssembler::GetSmiConstant(Smi* source) {
STATIC_ASSERT(kSmiTag == 0);
int value = source->value();
if (value == 0) {
xorl(kScratchRegister, kScratchRegister);
return kScratchRegister;
}
- LoadSmiConstant(kScratchRegister, source);
+ Move(kScratchRegister, source);
return kScratchRegister;
}
-
-void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
+void TurboAssembler::Move(Register dst, Smi* source) {
STATIC_ASSERT(kSmiTag == 0);
int value = source->value();
if (value == 0) {
@@ -1123,7 +1097,6 @@ void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
}
}
-
void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
STATIC_ASSERT(kSmiTag == 0);
if (!dst.is(src)) {
@@ -1164,8 +1137,7 @@ void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
shlp(dst, Immediate(kSmiShift));
}
-
-void MacroAssembler::SmiToInteger32(Register dst, Register src) {
+void TurboAssembler::SmiToInteger32(Register dst, Register src) {
STATIC_ASSERT(kSmiTag == 0);
if (!dst.is(src)) {
movp(dst, src);
@@ -1179,8 +1151,7 @@ void MacroAssembler::SmiToInteger32(Register dst, Register src) {
}
}
-
-void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
+void TurboAssembler::SmiToInteger32(Register dst, const Operand& src) {
if (SmiValuesAre32Bits()) {
movl(dst, Operand(src, kSmiShift / kBitsPerByte));
} else {
@@ -1297,21 +1268,18 @@ void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
}
}
-
-Condition MacroAssembler::CheckSmi(Register src) {
+Condition TurboAssembler::CheckSmi(Register src) {
STATIC_ASSERT(kSmiTag == 0);
testb(src, Immediate(kSmiTagMask));
return zero;
}
-
-Condition MacroAssembler::CheckSmi(const Operand& src) {
+Condition TurboAssembler::CheckSmi(const Operand& src) {
STATIC_ASSERT(kSmiTag == 0);
testb(src, Immediate(kSmiTagMask));
return zero;
}
-
Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
STATIC_ASSERT(kSmiTag == 0);
// Test that both bits of the mask 0x8000000000000001 are zero.
@@ -1429,9 +1397,7 @@ void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
j(NegateCondition(is_valid), on_invalid, near_jump);
}
-
-void MacroAssembler::JumpIfSmi(Register src,
- Label* on_smi,
+void TurboAssembler::JumpIfSmi(Register src, Label* on_smi,
Label::Distance near_jump) {
Condition smi = CheckSmi(src);
j(smi, on_smi, near_jump);
@@ -1497,7 +1463,7 @@ void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
Register constant_reg = GetSmiConstant(constant);
addp(dst, constant_reg);
} else {
- LoadSmiConstant(dst, constant);
+ Move(dst, constant);
addp(dst, src);
}
}
@@ -1526,7 +1492,7 @@ void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant,
}
} else if (dst.is(src)) {
DCHECK(!dst.is(kScratchRegister));
- LoadSmiConstant(kScratchRegister, constant);
+ Move(kScratchRegister, constant);
addp(dst, kScratchRegister);
if (constraints & SmiOperationConstraint::kBailoutOnNoOverflow) {
j(no_overflow, bailout_label, near_jump);
@@ -1549,7 +1515,7 @@ void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant,
} else {
DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
DCHECK(constraints & SmiOperationConstraint::kBailoutOnOverflow);
- LoadSmiConstant(dst, constant);
+ Move(dst, constant);
addp(dst, src);
j(overflow, bailout_label, near_jump);
}
@@ -1567,13 +1533,13 @@ void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
subp(dst, constant_reg);
} else {
if (constant->value() == Smi::kMinValue) {
- LoadSmiConstant(dst, constant);
+ Move(dst, constant);
// Adding and subtracting the min-value gives the same result, it only
// differs on the overflow bit, which we don't check here.
addp(dst, src);
} else {
// Subtract by adding the negation.
- LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
+ Move(dst, Smi::FromInt(-constant->value()));
addp(dst, src);
}
}
@@ -1590,7 +1556,7 @@ void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant,
}
} else if (dst.is(src)) {
DCHECK(!dst.is(kScratchRegister));
- LoadSmiConstant(kScratchRegister, constant);
+ Move(kScratchRegister, constant);
subp(dst, kScratchRegister);
if (constraints & SmiOperationConstraint::kBailoutOnNoOverflow) {
j(no_overflow, bailout_label, near_jump);
@@ -1616,12 +1582,12 @@ void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant,
if (constant->value() == Smi::kMinValue) {
DCHECK(!dst.is(kScratchRegister));
movp(dst, src);
- LoadSmiConstant(kScratchRegister, constant);
+ Move(kScratchRegister, constant);
subp(dst, kScratchRegister);
j(overflow, bailout_label, near_jump);
} else {
// Subtract by adding the negation.
- LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
+ Move(dst, Smi::FromInt(-(constant->value())));
addp(dst, src);
j(overflow, bailout_label, near_jump);
}
@@ -1999,7 +1965,7 @@ void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
Register constant_reg = GetSmiConstant(constant);
andp(dst, constant_reg);
} else {
- LoadSmiConstant(dst, constant);
+ Move(dst, constant);
andp(dst, src);
}
}
@@ -2020,7 +1986,7 @@ void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
Register constant_reg = GetSmiConstant(constant);
orp(dst, constant_reg);
} else {
- LoadSmiConstant(dst, constant);
+ Move(dst, constant);
orp(dst, src);
}
}
@@ -2041,7 +2007,7 @@ void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
Register constant_reg = GetSmiConstant(constant);
xorp(dst, constant_reg);
} else {
- LoadSmiConstant(dst, constant);
+ Move(dst, constant);
xorp(dst, src);
}
}
@@ -2335,8 +2301,7 @@ void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
}
}
-
-void MacroAssembler::Push(Smi* source) {
+void TurboAssembler::Push(Smi* source) {
intptr_t smi = reinterpret_cast<intptr_t>(source);
if (is_int32(smi)) {
Push(Immediate(static_cast<int32_t>(smi)));
@@ -2494,36 +2459,22 @@ void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
JumpIfNotUniqueNameHelper<Register>(this, reg, not_unique_name, distance);
}
-
-void MacroAssembler::Move(Register dst, Register src) {
+void TurboAssembler::Move(Register dst, Register src) {
if (!dst.is(src)) {
movp(dst, src);
}
}
-
-void MacroAssembler::Move(Register dst, Handle<Object> source) {
- AllowDeferredHandleDereference smi_check;
- if (source->IsSmi()) {
- Move(dst, Smi::cast(*source));
- } else {
- MoveHeapObject(dst, source);
- }
-}
-
-
-void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
- AllowDeferredHandleDereference smi_check;
- if (source->IsSmi()) {
- Move(dst, Smi::cast(*source));
+void TurboAssembler::MoveNumber(Register dst, double value) {
+ int32_t smi;
+ if (DoubleToSmiInteger(value, &smi)) {
+ Move(dst, Smi::FromInt(smi));
} else {
- MoveHeapObject(kScratchRegister, source);
- movp(dst, kScratchRegister);
+ movp_heap_number(dst, value);
}
}
-
-void MacroAssembler::Move(XMMRegister dst, uint32_t src) {
+void TurboAssembler::Move(XMMRegister dst, uint32_t src) {
if (src == 0) {
Xorpd(dst, dst);
} else {
@@ -2538,8 +2489,7 @@ void MacroAssembler::Move(XMMRegister dst, uint32_t src) {
}
}
-
-void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
+void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
if (src == 0) {
Xorpd(dst, dst);
} else {
@@ -2568,8 +2518,7 @@ void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
}
}
-
-void MacroAssembler::Movaps(XMMRegister dst, XMMRegister src) {
+void TurboAssembler::Movaps(XMMRegister dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vmovaps(dst, src);
@@ -2578,7 +2527,7 @@ void MacroAssembler::Movaps(XMMRegister dst, XMMRegister src) {
}
}
-void MacroAssembler::Movups(XMMRegister dst, XMMRegister src) {
+void TurboAssembler::Movups(XMMRegister dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vmovups(dst, src);
@@ -2587,7 +2536,7 @@ void MacroAssembler::Movups(XMMRegister dst, XMMRegister src) {
}
}
-void MacroAssembler::Movups(XMMRegister dst, const Operand& src) {
+void TurboAssembler::Movups(XMMRegister dst, const Operand& src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vmovups(dst, src);
@@ -2596,7 +2545,7 @@ void MacroAssembler::Movups(XMMRegister dst, const Operand& src) {
}
}
-void MacroAssembler::Movups(const Operand& dst, XMMRegister src) {
+void TurboAssembler::Movups(const Operand& dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vmovups(dst, src);
@@ -2605,7 +2554,7 @@ void MacroAssembler::Movups(const Operand& dst, XMMRegister src) {
}
}
-void MacroAssembler::Movapd(XMMRegister dst, XMMRegister src) {
+void TurboAssembler::Movapd(XMMRegister dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vmovapd(dst, src);
@@ -2614,25 +2563,7 @@ void MacroAssembler::Movapd(XMMRegister dst, XMMRegister src) {
}
}
-void MacroAssembler::Movupd(XMMRegister dst, const Operand& src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovupd(dst, src);
- } else {
- movupd(dst, src);
- }
-}
-
-void MacroAssembler::Movupd(const Operand& dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovupd(dst, src);
- } else {
- movupd(dst, src);
- }
-}
-
-void MacroAssembler::Movsd(XMMRegister dst, XMMRegister src) {
+void TurboAssembler::Movsd(XMMRegister dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vmovsd(dst, dst, src);
@@ -2641,8 +2572,7 @@ void MacroAssembler::Movsd(XMMRegister dst, XMMRegister src) {
}
}
-
-void MacroAssembler::Movsd(XMMRegister dst, const Operand& src) {
+void TurboAssembler::Movsd(XMMRegister dst, const Operand& src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vmovsd(dst, src);
@@ -2651,8 +2581,7 @@ void MacroAssembler::Movsd(XMMRegister dst, const Operand& src) {
}
}
-
-void MacroAssembler::Movsd(const Operand& dst, XMMRegister src) {
+void TurboAssembler::Movsd(const Operand& dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vmovsd(dst, src);
@@ -2661,8 +2590,7 @@ void MacroAssembler::Movsd(const Operand& dst, XMMRegister src) {
}
}
-
-void MacroAssembler::Movss(XMMRegister dst, XMMRegister src) {
+void TurboAssembler::Movss(XMMRegister dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vmovss(dst, dst, src);
@@ -2671,8 +2599,7 @@ void MacroAssembler::Movss(XMMRegister dst, XMMRegister src) {
}
}
-
-void MacroAssembler::Movss(XMMRegister dst, const Operand& src) {
+void TurboAssembler::Movss(XMMRegister dst, const Operand& src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vmovss(dst, src);
@@ -2681,8 +2608,7 @@ void MacroAssembler::Movss(XMMRegister dst, const Operand& src) {
}
}
-
-void MacroAssembler::Movss(const Operand& dst, XMMRegister src) {
+void TurboAssembler::Movss(const Operand& dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vmovss(dst, src);
@@ -2691,8 +2617,7 @@ void MacroAssembler::Movss(const Operand& dst, XMMRegister src) {
}
}
-
-void MacroAssembler::Movd(XMMRegister dst, Register src) {
+void TurboAssembler::Movd(XMMRegister dst, Register src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vmovd(dst, src);
@@ -2701,8 +2626,7 @@ void MacroAssembler::Movd(XMMRegister dst, Register src) {
}
}
-
-void MacroAssembler::Movd(XMMRegister dst, const Operand& src) {
+void TurboAssembler::Movd(XMMRegister dst, const Operand& src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vmovd(dst, src);
@@ -2711,8 +2635,7 @@ void MacroAssembler::Movd(XMMRegister dst, const Operand& src) {
}
}
-
-void MacroAssembler::Movd(Register dst, XMMRegister src) {
+void TurboAssembler::Movd(Register dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vmovd(dst, src);
@@ -2721,8 +2644,7 @@ void MacroAssembler::Movd(Register dst, XMMRegister src) {
}
}
-
-void MacroAssembler::Movq(XMMRegister dst, Register src) {
+void TurboAssembler::Movq(XMMRegister dst, Register src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vmovq(dst, src);
@@ -2731,8 +2653,7 @@ void MacroAssembler::Movq(XMMRegister dst, Register src) {
}
}
-
-void MacroAssembler::Movq(Register dst, XMMRegister src) {
+void TurboAssembler::Movq(Register dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vmovq(dst, src);
@@ -2741,7 +2662,7 @@ void MacroAssembler::Movq(Register dst, XMMRegister src) {
}
}
-void MacroAssembler::Movmskps(Register dst, XMMRegister src) {
+void TurboAssembler::Movmskps(Register dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vmovmskps(dst, src);
@@ -2750,7 +2671,7 @@ void MacroAssembler::Movmskps(Register dst, XMMRegister src) {
}
}
-void MacroAssembler::Movmskpd(Register dst, XMMRegister src) {
+void TurboAssembler::Movmskpd(Register dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vmovmskpd(dst, src);
@@ -2759,7 +2680,7 @@ void MacroAssembler::Movmskpd(Register dst, XMMRegister src) {
}
}
-void MacroAssembler::Xorps(XMMRegister dst, XMMRegister src) {
+void TurboAssembler::Xorps(XMMRegister dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vxorps(dst, dst, src);
@@ -2768,7 +2689,7 @@ void MacroAssembler::Xorps(XMMRegister dst, XMMRegister src) {
}
}
-void MacroAssembler::Xorps(XMMRegister dst, const Operand& src) {
+void TurboAssembler::Xorps(XMMRegister dst, const Operand& src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vxorps(dst, dst, src);
@@ -2777,7 +2698,7 @@ void MacroAssembler::Xorps(XMMRegister dst, const Operand& src) {
}
}
-void MacroAssembler::Roundss(XMMRegister dst, XMMRegister src,
+void TurboAssembler::Roundss(XMMRegister dst, XMMRegister src,
RoundingMode mode) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
@@ -2787,8 +2708,7 @@ void MacroAssembler::Roundss(XMMRegister dst, XMMRegister src,
}
}
-
-void MacroAssembler::Roundsd(XMMRegister dst, XMMRegister src,
+void TurboAssembler::Roundsd(XMMRegister dst, XMMRegister src,
RoundingMode mode) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
@@ -2798,8 +2718,7 @@ void MacroAssembler::Roundsd(XMMRegister dst, XMMRegister src,
}
}
-
-void MacroAssembler::Sqrtsd(XMMRegister dst, XMMRegister src) {
+void TurboAssembler::Sqrtsd(XMMRegister dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vsqrtsd(dst, dst, src);
@@ -2808,8 +2727,7 @@ void MacroAssembler::Sqrtsd(XMMRegister dst, XMMRegister src) {
}
}
-
-void MacroAssembler::Sqrtsd(XMMRegister dst, const Operand& src) {
+void TurboAssembler::Sqrtsd(XMMRegister dst, const Operand& src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vsqrtsd(dst, dst, src);
@@ -2818,8 +2736,7 @@ void MacroAssembler::Sqrtsd(XMMRegister dst, const Operand& src) {
}
}
-
-void MacroAssembler::Ucomiss(XMMRegister src1, XMMRegister src2) {
+void TurboAssembler::Ucomiss(XMMRegister src1, XMMRegister src2) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vucomiss(src1, src2);
@@ -2828,8 +2745,7 @@ void MacroAssembler::Ucomiss(XMMRegister src1, XMMRegister src2) {
}
}
-
-void MacroAssembler::Ucomiss(XMMRegister src1, const Operand& src2) {
+void TurboAssembler::Ucomiss(XMMRegister src1, const Operand& src2) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vucomiss(src1, src2);
@@ -2838,8 +2754,7 @@ void MacroAssembler::Ucomiss(XMMRegister src1, const Operand& src2) {
}
}
-
-void MacroAssembler::Ucomisd(XMMRegister src1, XMMRegister src2) {
+void TurboAssembler::Ucomisd(XMMRegister src1, XMMRegister src2) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vucomisd(src1, src2);
@@ -2848,8 +2763,7 @@ void MacroAssembler::Ucomisd(XMMRegister src1, XMMRegister src2) {
}
}
-
-void MacroAssembler::Ucomisd(XMMRegister src1, const Operand& src2) {
+void TurboAssembler::Ucomisd(XMMRegister src1, const Operand& src2) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vucomisd(src1, src2);
@@ -2885,7 +2799,7 @@ void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
if (source->IsSmi()) {
Cmp(dst, Smi::cast(*source));
} else {
- MoveHeapObject(kScratchRegister, source);
+ Move(kScratchRegister, Handle<HeapObject>::cast(source));
cmpp(dst, kScratchRegister);
}
}
@@ -2896,29 +2810,35 @@ void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
if (source->IsSmi()) {
Cmp(dst, Smi::cast(*source));
} else {
- MoveHeapObject(kScratchRegister, source);
+ Move(kScratchRegister, Handle<HeapObject>::cast(source));
cmpp(dst, kScratchRegister);
}
}
-
-void MacroAssembler::Push(Handle<Object> source) {
+void MacroAssembler::PushObject(Handle<Object> source) {
AllowDeferredHandleDereference smi_check;
if (source->IsSmi()) {
Push(Smi::cast(*source));
} else {
- MoveHeapObject(kScratchRegister, source);
- Push(kScratchRegister);
+ Push(Handle<HeapObject>::cast(source));
}
}
+void TurboAssembler::Push(Handle<HeapObject> source) {
+ Move(kScratchRegister, source);
+ Push(kScratchRegister);
+}
-void MacroAssembler::MoveHeapObject(Register result,
- Handle<Object> object) {
- DCHECK(object->IsHeapObject());
- Move(result, object, RelocInfo::EMBEDDED_OBJECT);
+void TurboAssembler::Move(Register result, Handle<HeapObject> object,
+ RelocInfo::Mode rmode) {
+ movp(result, reinterpret_cast<void*>(object.address()), rmode);
}
+void TurboAssembler::Move(const Operand& dst, Handle<HeapObject> object,
+ RelocInfo::Mode rmode) {
+ Move(kScratchRegister, object, rmode);
+ movp(dst, kScratchRegister);
+}
void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
Move(value, cell, RelocInfo::EMBEDDED_OBJECT);
@@ -2953,8 +2873,7 @@ void MacroAssembler::DropUnderReturnAddress(int stack_elements,
PushReturnAddressFrom(scratch);
}
-
-void MacroAssembler::Push(Register src) {
+void TurboAssembler::Push(Register src) {
if (kPointerSize == kInt64Size) {
pushq(src);
} else {
@@ -2965,8 +2884,7 @@ void MacroAssembler::Push(Register src) {
}
}
-
-void MacroAssembler::Push(const Operand& src) {
+void TurboAssembler::Push(const Operand& src) {
if (kPointerSize == kInt64Size) {
pushq(src);
} else {
@@ -2986,8 +2904,7 @@ void MacroAssembler::PushQuad(const Operand& src) {
}
}
-
-void MacroAssembler::Push(Immediate value) {
+void TurboAssembler::Push(Immediate value) {
if (kPointerSize == kInt64Size) {
pushq(value);
} else {
@@ -3046,21 +2963,6 @@ void MacroAssembler::PopQuad(const Operand& dst) {
}
-void MacroAssembler::LoadSharedFunctionInfoSpecialField(Register dst,
- Register base,
- int offset) {
- DCHECK(offset > SharedFunctionInfo::kLengthOffset &&
- offset <= SharedFunctionInfo::kSize &&
- (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
- if (kPointerSize == kInt64Size) {
- movsxlq(dst, FieldOperand(base, offset));
- } else {
- movp(dst, FieldOperand(base, offset));
- SmiToInteger32(dst, dst);
- }
-}
-
-
void MacroAssembler::Jump(ExternalReference ext) {
LoadAddress(kScratchRegister, ext);
jmp(kScratchRegister);
@@ -3088,15 +2990,13 @@ void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
jmp(code_object, rmode);
}
-
-int MacroAssembler::CallSize(ExternalReference ext) {
+int TurboAssembler::CallSize(ExternalReference ext) {
// Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
return LoadAddressSize(ext) +
Assembler::kCallScratchRegisterInstructionLength;
}
-
-void MacroAssembler::Call(ExternalReference ext) {
+void TurboAssembler::Call(ExternalReference ext) {
#ifdef DEBUG
int end_position = pc_offset() + CallSize(ext);
#endif
@@ -3107,8 +3007,7 @@ void MacroAssembler::Call(ExternalReference ext) {
#endif
}
-
-void MacroAssembler::Call(const Operand& op) {
+void TurboAssembler::Call(const Operand& op) {
if (kPointerSize == kInt64Size && !CpuFeatures::IsSupported(ATOM)) {
call(op);
} else {
@@ -3117,8 +3016,7 @@ void MacroAssembler::Call(const Operand& op) {
}
}
-
-void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
+void TurboAssembler::Call(Address destination, RelocInfo::Mode rmode) {
#ifdef DEBUG
int end_position = pc_offset() + CallSize(destination);
#endif
@@ -3129,23 +3027,19 @@ void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
#endif
}
-
-void MacroAssembler::Call(Handle<Code> code_object,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id) {
+void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
#ifdef DEBUG
int end_position = pc_offset() + CallSize(code_object);
#endif
DCHECK(RelocInfo::IsCodeTarget(rmode) ||
- rmode == RelocInfo::CODE_AGE_SEQUENCE);
- call(code_object, rmode, ast_id);
+ rmode == RelocInfo::CODE_AGE_SEQUENCE);
+ call(code_object, rmode);
#ifdef DEBUG
CHECK_EQ(end_position, pc_offset());
#endif
}
-
-void MacroAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
+void TurboAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
if (imm8 == 0) {
Movd(dst, src);
return;
@@ -3160,8 +3054,7 @@ void MacroAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
shrq(dst, Immediate(32));
}
-
-void MacroAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
+void TurboAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope sse_scope(this, SSE4_1);
pinsrd(dst, src, imm8);
@@ -3176,8 +3069,7 @@ void MacroAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
}
}
-
-void MacroAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) {
+void TurboAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) {
DCHECK(imm8 == 0 || imm8 == 1);
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope sse_scope(this, SSE4_1);
@@ -3193,8 +3085,7 @@ void MacroAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) {
}
}
-
-void MacroAssembler::Lzcntl(Register dst, Register src) {
+void TurboAssembler::Lzcntl(Register dst, Register src) {
if (CpuFeatures::IsSupported(LZCNT)) {
CpuFeatureScope scope(this, LZCNT);
lzcntl(dst, src);
@@ -3208,8 +3099,7 @@ void MacroAssembler::Lzcntl(Register dst, Register src) {
xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x
}
-
-void MacroAssembler::Lzcntl(Register dst, const Operand& src) {
+void TurboAssembler::Lzcntl(Register dst, const Operand& src) {
if (CpuFeatures::IsSupported(LZCNT)) {
CpuFeatureScope scope(this, LZCNT);
lzcntl(dst, src);
@@ -3223,8 +3113,7 @@ void MacroAssembler::Lzcntl(Register dst, const Operand& src) {
xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x
}
-
-void MacroAssembler::Lzcntq(Register dst, Register src) {
+void TurboAssembler::Lzcntq(Register dst, Register src) {
if (CpuFeatures::IsSupported(LZCNT)) {
CpuFeatureScope scope(this, LZCNT);
lzcntq(dst, src);
@@ -3238,8 +3127,7 @@ void MacroAssembler::Lzcntq(Register dst, Register src) {
xorl(dst, Immediate(63)); // for x in [0..63], 63^x == 63 - x
}
-
-void MacroAssembler::Lzcntq(Register dst, const Operand& src) {
+void TurboAssembler::Lzcntq(Register dst, const Operand& src) {
if (CpuFeatures::IsSupported(LZCNT)) {
CpuFeatureScope scope(this, LZCNT);
lzcntq(dst, src);
@@ -3253,8 +3141,7 @@ void MacroAssembler::Lzcntq(Register dst, const Operand& src) {
xorl(dst, Immediate(63)); // for x in [0..63], 63^x == 63 - x
}
-
-void MacroAssembler::Tzcntq(Register dst, Register src) {
+void TurboAssembler::Tzcntq(Register dst, Register src) {
if (CpuFeatures::IsSupported(BMI1)) {
CpuFeatureScope scope(this, BMI1);
tzcntq(dst, src);
@@ -3268,8 +3155,7 @@ void MacroAssembler::Tzcntq(Register dst, Register src) {
bind(&not_zero_src);
}
-
-void MacroAssembler::Tzcntq(Register dst, const Operand& src) {
+void TurboAssembler::Tzcntq(Register dst, const Operand& src) {
if (CpuFeatures::IsSupported(BMI1)) {
CpuFeatureScope scope(this, BMI1);
tzcntq(dst, src);
@@ -3283,8 +3169,7 @@ void MacroAssembler::Tzcntq(Register dst, const Operand& src) {
bind(&not_zero_src);
}
-
-void MacroAssembler::Tzcntl(Register dst, Register src) {
+void TurboAssembler::Tzcntl(Register dst, Register src) {
if (CpuFeatures::IsSupported(BMI1)) {
CpuFeatureScope scope(this, BMI1);
tzcntl(dst, src);
@@ -3297,8 +3182,7 @@ void MacroAssembler::Tzcntl(Register dst, Register src) {
bind(&not_zero_src);
}
-
-void MacroAssembler::Tzcntl(Register dst, const Operand& src) {
+void TurboAssembler::Tzcntl(Register dst, const Operand& src) {
if (CpuFeatures::IsSupported(BMI1)) {
CpuFeatureScope scope(this, BMI1);
tzcntl(dst, src);
@@ -3311,8 +3195,7 @@ void MacroAssembler::Tzcntl(Register dst, const Operand& src) {
bind(&not_zero_src);
}
-
-void MacroAssembler::Popcntl(Register dst, Register src) {
+void TurboAssembler::Popcntl(Register dst, Register src) {
if (CpuFeatures::IsSupported(POPCNT)) {
CpuFeatureScope scope(this, POPCNT);
popcntl(dst, src);
@@ -3321,8 +3204,7 @@ void MacroAssembler::Popcntl(Register dst, Register src) {
UNREACHABLE();
}
-
-void MacroAssembler::Popcntl(Register dst, const Operand& src) {
+void TurboAssembler::Popcntl(Register dst, const Operand& src) {
if (CpuFeatures::IsSupported(POPCNT)) {
CpuFeatureScope scope(this, POPCNT);
popcntl(dst, src);
@@ -3331,8 +3213,7 @@ void MacroAssembler::Popcntl(Register dst, const Operand& src) {
UNREACHABLE();
}
-
-void MacroAssembler::Popcntq(Register dst, Register src) {
+void TurboAssembler::Popcntq(Register dst, Register src) {
if (CpuFeatures::IsSupported(POPCNT)) {
CpuFeatureScope scope(this, POPCNT);
popcntq(dst, src);
@@ -3341,8 +3222,7 @@ void MacroAssembler::Popcntq(Register dst, Register src) {
UNREACHABLE();
}
-
-void MacroAssembler::Popcntq(Register dst, const Operand& src) {
+void TurboAssembler::Popcntq(Register dst, const Operand& src) {
if (CpuFeatures::IsSupported(POPCNT)) {
CpuFeatureScope scope(this, POPCNT);
popcntq(dst, src);
@@ -3446,7 +3326,8 @@ void MacroAssembler::PushStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
// Link the current handler as the next handler.
- ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
+ ExternalReference handler_address(IsolateAddressId::kHandlerAddress,
+ isolate());
Push(ExternalOperand(handler_address));
// Set this new handler as the current one.
@@ -3456,18 +3337,15 @@ void MacroAssembler::PushStackHandler() {
void MacroAssembler::PopStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
+ ExternalReference handler_address(IsolateAddressId::kHandlerAddress,
+ isolate());
Pop(ExternalOperand(handler_address));
addp(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
}
+void TurboAssembler::Ret() { ret(0); }
-void MacroAssembler::Ret() {
- ret(0);
-}
-
-
-void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
+void TurboAssembler::Ret(int bytes_dropped, Register scratch) {
if (is_uint16(bytes_dropped)) {
ret(bytes_dropped);
} else {
@@ -3560,6 +3438,11 @@ void MacroAssembler::LoadUint32(XMMRegister dst,
Cvtqsi2sd(dst, src);
}
+void TurboAssembler::SlowTruncateToIDelayed(Zone* zone, Register result_reg,
+ Register input_reg, int offset) {
+ CallStubDelayed(
+ new (zone) DoubleToIStub(nullptr, input_reg, result_reg, offset, true));
+}
void MacroAssembler::SlowTruncateToI(Register result_reg,
Register input_reg,
@@ -3693,8 +3576,18 @@ void MacroAssembler::AssertSmi(const Operand& object) {
}
}
+void MacroAssembler::AssertFixedArray(Register object) {
+ if (emit_debug_code()) {
+ testb(object, Immediate(kSmiTagMask));
+ Check(not_equal, kOperandIsASmiAndNotAFixedArray);
+ Push(object);
+ CmpObjectType(object, FIXED_ARRAY_TYPE, object);
+ Pop(object);
+ Check(equal, kOperandIsNotAFixedArray);
+ }
+}
-void MacroAssembler::AssertZeroExtended(Register int32_register) {
+void TurboAssembler::AssertZeroExtended(Register int32_register) {
if (emit_debug_code()) {
DCHECK(!int32_register.is(kScratchRegister));
movq(kScratchRegister, V8_INT64_C(0x0000000100000000));
@@ -3727,8 +3620,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
}
-void MacroAssembler::AssertGeneratorObject(Register object, Register flags) {
- // `flags` should be an untagged integer. See `SuspendFlags` in src/globals.h
+void MacroAssembler::AssertGeneratorObject(Register object) {
if (!emit_debug_code()) return;
testb(object, Immediate(kSmiTagMask));
Check(not_equal, kOperandIsASmiAndNotAGeneratorObject);
@@ -3738,15 +3630,11 @@ void MacroAssembler::AssertGeneratorObject(Register object, Register flags) {
Push(object);
movp(map, FieldOperand(object, HeapObject::kMapOffset));
- Label async, do_check;
- testb(flags, Immediate(static_cast<int>(SuspendFlags::kGeneratorTypeMask)));
- j(not_zero, &async);
-
+ Label do_check;
// Check if JSGeneratorObject
CmpInstanceType(map, JS_GENERATOR_OBJECT_TYPE);
- jmp(&do_check);
+ j(equal, &do_check);
- bind(&async);
// Check if JSAsyncGeneratorObject
CmpInstanceType(map, JS_ASYNC_GENERATOR_OBJECT_TYPE);
@@ -3835,7 +3723,7 @@ void MacroAssembler::MaybeDropFrames() {
RelocInfo::CODE_TARGET);
}
-void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
+void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
Register caller_args_count_reg,
Register scratch0, Register scratch1,
ReturnAddressState ra_state) {
@@ -3913,8 +3801,8 @@ void MacroAssembler::InvokeFunction(Register function,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
movp(rbx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- LoadSharedFunctionInfoSpecialField(
- rbx, rbx, SharedFunctionInfo::kFormalParameterCountOffset);
+ movsxlq(rbx,
+ FieldOperand(rbx, SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount expected(rbx);
InvokeFunction(function, new_target, expected, actual, flag, call_wrapper);
@@ -4097,17 +3985,17 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
bind(&skip_hook);
}
-void MacroAssembler::StubPrologue(StackFrame::Type type) {
+void TurboAssembler::StubPrologue(StackFrame::Type type) {
pushq(rbp); // Caller's frame pointer.
movp(rbp, rsp);
Push(Immediate(StackFrame::TypeToMarker(type)));
}
-void MacroAssembler::Prologue(bool code_pre_aging) {
+void TurboAssembler::Prologue(bool code_pre_aging) {
PredictableCodeSizeScope predictible_code_size_scope(this,
kNoCodeAgeSequenceLength);
if (code_pre_aging) {
- // Pre-age the code.
+ // Pre-age the code.
Call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
RelocInfo::CODE_AGE_SEQUENCE);
Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
@@ -4125,15 +4013,7 @@ void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
movp(vector, FieldOperand(vector, Cell::kValueOffset));
}
-
-void MacroAssembler::EnterFrame(StackFrame::Type type,
- bool load_constant_pool_pointer_reg) {
- // Out-of-line constant pool not implemented on x64.
- UNREACHABLE();
-}
-
-
-void MacroAssembler::EnterFrame(StackFrame::Type type) {
+void TurboAssembler::EnterFrame(StackFrame::Type type) {
pushq(rbp);
movp(rbp, rsp);
Push(Immediate(StackFrame::TypeToMarker(type)));
@@ -4150,8 +4030,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
}
}
-
-void MacroAssembler::LeaveFrame(StackFrame::Type type) {
+void TurboAssembler::LeaveFrame(StackFrame::Type type) {
if (emit_debug_code()) {
cmpp(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
Immediate(StackFrame::TypeToMarker(type)));
@@ -4204,9 +4083,9 @@ void MacroAssembler::EnterExitFramePrologue(bool save_rax,
movp(r14, rax); // Backup rax in callee-save register.
}
- Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
- Store(ExternalReference(Isolate::kContextAddress, isolate()), rsi);
- Store(ExternalReference(Isolate::kCFunctionAddress, isolate()), rbx);
+ Store(ExternalReference(IsolateAddressId::kCEntryFPAddress, isolate()), rbp);
+ Store(ExternalReference(IsolateAddressId::kContextAddress, isolate()), rsi);
+ Store(ExternalReference(IsolateAddressId::kCFunctionAddress, isolate()), rbx);
}
@@ -4235,7 +4114,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
// Get the required frame alignment for the OS.
const int kFrameAlignment = base::OS::ActivationFrameAlignment();
if (kFrameAlignment > 0) {
- DCHECK(base::bits::IsPowerOfTwo32(kFrameAlignment));
+ DCHECK(base::bits::IsPowerOfTwo(kFrameAlignment));
DCHECK(is_int8(kFrameAlignment));
andp(rsp, Immediate(-kFrameAlignment));
}
@@ -4305,7 +4184,8 @@ void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
// Restore current context from top and clear it in debug mode.
- ExternalReference context_address(Isolate::kContextAddress, isolate());
+ ExternalReference context_address(IsolateAddressId::kContextAddress,
+ isolate());
Operand context_operand = ExternalOperand(context_address);
if (restore_context) {
movp(rsi, context_operand);
@@ -4315,7 +4195,7 @@ void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
#endif
// Clear the top frame.
- ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
+ ExternalReference c_entry_fp_address(IsolateAddressId::kCEntryFPAddress,
isolate());
Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
movp(c_entry_fp_operand, Immediate(0));
@@ -4410,7 +4290,7 @@ void MacroAssembler::MakeSureDoubleAlignedHelper(Register result,
Label aligned;
testl(result, Immediate(kDoubleAlignmentMask));
j(zero, &aligned, Label::kNear);
- if (((flags & ALLOCATION_FOLDED) == 0) && ((flags & PRETENURE) != 0)) {
+ if ((flags & PRETENURE) != 0) {
ExternalReference allocation_limit =
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
cmpp(result, ExternalOperand(allocation_limit));
@@ -4453,7 +4333,6 @@ void MacroAssembler::Allocate(int object_size,
AllocationFlags flags) {
DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
DCHECK(object_size <= kMaxRegularHeapObjectSize);
- DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -4491,10 +4370,7 @@ void MacroAssembler::Allocate(int object_size,
cmpp(top_reg, limit_operand);
j(above, gc_required);
- if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
- // The top pointer is not updated for allocation folding dominators.
- UpdateAllocationTopHelper(top_reg, scratch, flags);
- }
+ UpdateAllocationTopHelper(top_reg, scratch, flags);
if (top_reg.is(result)) {
subp(result, Immediate(object_size - kHeapObjectTag));
@@ -4515,8 +4391,6 @@ void MacroAssembler::Allocate(int header_size,
Label* gc_required,
AllocationFlags flags) {
DCHECK((flags & SIZE_IN_WORDS) == 0);
- DCHECK((flags & ALLOCATION_FOLDING_DOMINATOR) == 0);
- DCHECK((flags & ALLOCATION_FOLDED) == 0);
leap(result_end, Operand(element_count, element_size, header_size));
Allocate(result_end, result, result_end, scratch, gc_required, flags);
}
@@ -4529,7 +4403,6 @@ void MacroAssembler::Allocate(Register object_size,
Label* gc_required,
AllocationFlags flags) {
DCHECK((flags & SIZE_IN_WORDS) == 0);
- DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -4562,49 +4435,12 @@ void MacroAssembler::Allocate(Register object_size,
cmpp(result_end, limit_operand);
j(above, gc_required);
- if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
- // The top pointer is not updated for allocation folding dominators.
- UpdateAllocationTopHelper(result_end, scratch, flags);
- }
+ UpdateAllocationTopHelper(result_end, scratch, flags);
// Tag the result.
addp(result, Immediate(kHeapObjectTag));
}
-void MacroAssembler::FastAllocate(int object_size, Register result,
- Register result_end, AllocationFlags flags) {
- DCHECK(!result.is(result_end));
- // Load address of new object into result.
- LoadAllocationTopHelper(result, no_reg, flags);
-
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
- MakeSureDoubleAlignedHelper(result, no_reg, NULL, flags);
- }
-
- leap(result_end, Operand(result, object_size));
-
- UpdateAllocationTopHelper(result_end, no_reg, flags);
-
- addp(result, Immediate(kHeapObjectTag));
-}
-
-void MacroAssembler::FastAllocate(Register object_size, Register result,
- Register result_end, AllocationFlags flags) {
- DCHECK(!result.is(result_end));
- // Load address of new object into result.
- LoadAllocationTopHelper(result, no_reg, flags);
-
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
- MakeSureDoubleAlignedHelper(result, no_reg, NULL, flags);
- }
-
- leap(result_end, Operand(result, object_size, times_1, 0));
-
- UpdateAllocationTopHelper(result_end, no_reg, flags);
-
- addp(result, Immediate(kHeapObjectTag));
-}
-
void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch,
Label* gc_required,
@@ -4637,7 +4473,7 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
LoadGlobalFunctionInitialMap(constructor, scratch);
movp(FieldOperand(result, HeapObject::kMapOffset), scratch);
LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
- movp(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
+ movp(FieldOperand(result, JSObject::kPropertiesOrHashOffset), scratch);
movp(FieldOperand(result, JSObject::kElementsOffset), scratch);
movp(FieldOperand(result, JSValue::kValueOffset), value);
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
@@ -4709,8 +4545,7 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
}
}
-
-int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
+int TurboAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
// On Windows 64 stack slots are reserved by the caller for all arguments
// including the ones passed in registers, and space is always allocated for
// the four register arguments even if the function takes fewer than four
@@ -4761,15 +4596,14 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
SmiToInteger32(index, index);
}
-
-void MacroAssembler::PrepareCallCFunction(int num_arguments) {
+void TurboAssembler::PrepareCallCFunction(int num_arguments) {
int frame_alignment = base::OS::ActivationFrameAlignment();
DCHECK(frame_alignment != 0);
DCHECK(num_arguments >= 0);
// Make stack end at alignment and allocate space for arguments and old rsp.
movp(kScratchRegister, rsp);
- DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
int argument_slots_on_stack =
ArgumentStackSlotsForCFunctionCall(num_arguments);
subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
@@ -4777,15 +4611,13 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments) {
movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister);
}
-
-void MacroAssembler::CallCFunction(ExternalReference function,
+void TurboAssembler::CallCFunction(ExternalReference function,
int num_arguments) {
LoadAddress(rax, function);
CallCFunction(rax, num_arguments);
}
-
-void MacroAssembler::CallCFunction(Register function, int num_arguments) {
+void TurboAssembler::CallCFunction(Register function, int num_arguments) {
DCHECK_LE(num_arguments, kMaxCParameters);
DCHECK(has_frame());
// Check stack alignment.
@@ -4851,14 +4683,9 @@ CodePatcher::~CodePatcher() {
DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
}
-
-void MacroAssembler::CheckPageFlag(
- Register object,
- Register scratch,
- int mask,
- Condition cc,
- Label* condition_met,
- Label::Distance condition_met_distance) {
+void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
+ Condition cc, Label* condition_met,
+ Label::Distance condition_met_distance) {
DCHECK(cc == zero || cc == not_zero);
if (scratch.is(object)) {
andp(scratch, Immediate(~Page::kPageAlignmentMask));
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 2e478dc85c..1c79d06b29 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -85,9 +85,358 @@ struct SmiIndex {
ScaleFactor scale;
};
+class TurboAssembler : public Assembler {
+ public:
+ TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
+ CodeObjectRequired create_code_object)
+ : Assembler(isolate, buffer, buffer_size), isolate_(isolate) {
+ if (create_code_object == CodeObjectRequired::kYes) {
+ code_object_ =
+ Handle<HeapObject>::New(isolate->heap()->undefined_value(), isolate);
+ }
+ }
+
+ void set_has_frame(bool value) { has_frame_ = value; }
+ bool has_frame() const { return has_frame_; }
+
+ Isolate* isolate() const { return isolate_; }
+
+ Handle<HeapObject> CodeObject() {
+ DCHECK(!code_object_.is_null());
+ return code_object_;
+ }
+
+#define AVX_OP2_WITH_TYPE(macro_name, name, src_type) \
+ void macro_name(XMMRegister dst, src_type src) { \
+ if (CpuFeatures::IsSupported(AVX)) { \
+ CpuFeatureScope scope(this, AVX); \
+ v##name(dst, dst, src); \
+ } else { \
+ name(dst, src); \
+ } \
+ }
+#define AVX_OP2_X(macro_name, name) \
+ AVX_OP2_WITH_TYPE(macro_name, name, XMMRegister)
+#define AVX_OP2_O(macro_name, name) \
+ AVX_OP2_WITH_TYPE(macro_name, name, const Operand&)
+#define AVX_OP2_XO(macro_name, name) \
+ AVX_OP2_X(macro_name, name) \
+ AVX_OP2_O(macro_name, name)
+
+ AVX_OP2_XO(Subsd, subsd)
+ AVX_OP2_XO(Divss, divss)
+ AVX_OP2_XO(Divsd, divsd)
+ AVX_OP2_XO(Xorpd, xorpd)
+ AVX_OP2_X(Pcmpeqd, pcmpeqd)
+ AVX_OP2_WITH_TYPE(Psllq, psllq, byte)
+ AVX_OP2_WITH_TYPE(Psrlq, psrlq, byte)
+
+#undef AVX_OP2_O
+#undef AVX_OP2_X
+#undef AVX_OP2_XO
+#undef AVX_OP2_WITH_TYPE
+
+ void Xorps(XMMRegister dst, XMMRegister src);
+ void Xorps(XMMRegister dst, const Operand& src);
+
+ void Movd(XMMRegister dst, Register src);
+ void Movd(XMMRegister dst, const Operand& src);
+ void Movd(Register dst, XMMRegister src);
+ void Movq(XMMRegister dst, Register src);
+ void Movq(Register dst, XMMRegister src);
+
+ void Movsd(XMMRegister dst, XMMRegister src);
+ void Movsd(XMMRegister dst, const Operand& src);
+ void Movsd(const Operand& dst, XMMRegister src);
+ void Movss(XMMRegister dst, XMMRegister src);
+ void Movss(XMMRegister dst, const Operand& src);
+ void Movss(const Operand& dst, XMMRegister src);
+
+ void PushReturnAddressFrom(Register src) { pushq(src); }
+ void PopReturnAddressTo(Register dst) { popq(dst); }
+
+ void Ret();
+
+ // Return and drop arguments from stack, where the number of arguments
+ // may be bigger than 2^16 - 1. Requires a scratch register.
+ void Ret(int bytes_dropped, Register scratch);
+
+ // Load a register with a long value as efficiently as possible.
+ void Set(Register dst, int64_t x);
+ void Set(const Operand& dst, intptr_t x);
+
+ // Operations on roots in the root-array.
+ void LoadRoot(Register destination, Heap::RootListIndex index);
+ void LoadRoot(const Operand& destination, Heap::RootListIndex index) {
+ LoadRoot(kScratchRegister, index);
+ movp(destination, kScratchRegister);
+ }
+
+ void Movups(XMMRegister dst, XMMRegister src);
+ void Movups(XMMRegister dst, const Operand& src);
+ void Movups(const Operand& dst, XMMRegister src);
+ void Movapd(XMMRegister dst, XMMRegister src);
+ void Movaps(XMMRegister dst, XMMRegister src);
+ void Movmskpd(Register dst, XMMRegister src);
+ void Movmskps(Register dst, XMMRegister src);
+
+ void Push(Register src);
+ void Push(const Operand& src);
+ void Push(Immediate value);
+ void Push(Smi* smi);
+ void Push(Handle<HeapObject> source);
+
+ // Before calling a C-function from generated code, align arguments on stack.
+ // After aligning the frame, arguments must be stored in rsp[0], rsp[8],
+ // etc., not pushed. The argument count assumes all arguments are word sized.
+ // The number of slots reserved for arguments depends on platform. On Windows
+ // stack slots are reserved for the arguments passed in registers. On other
+ // platforms stack slots are only reserved for the arguments actually passed
+ // on the stack.
+ void PrepareCallCFunction(int num_arguments);
+
+ // Calls a C function and cleans up the space for arguments allocated
+ // by PrepareCallCFunction. The called function is not allowed to trigger a
+ // garbage collection, since that might move the code and invalidate the
+ // return address (unless this is somehow accounted for by the called
+ // function).
+ void CallCFunction(ExternalReference function, int num_arguments);
+ void CallCFunction(Register function, int num_arguments);
+
+ // Calculate the number of stack slots to reserve for arguments when calling a
+ // C function.
+ int ArgumentStackSlotsForCFunctionCall(int num_arguments);
+
+ void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
+ Label* condition_met,
+ Label::Distance condition_met_distance = Label::kFar);
+
+ void Cvtss2sd(XMMRegister dst, XMMRegister src);
+ void Cvtss2sd(XMMRegister dst, const Operand& src);
+ void Cvtsd2ss(XMMRegister dst, XMMRegister src);
+ void Cvtsd2ss(XMMRegister dst, const Operand& src);
+ void Cvttsd2si(Register dst, XMMRegister src);
+ void Cvttsd2si(Register dst, const Operand& src);
+ void Cvttsd2siq(Register dst, XMMRegister src);
+ void Cvttsd2siq(Register dst, const Operand& src);
+ void Cvttss2si(Register dst, XMMRegister src);
+ void Cvttss2si(Register dst, const Operand& src);
+ void Cvttss2siq(Register dst, XMMRegister src);
+ void Cvttss2siq(Register dst, const Operand& src);
+ void Cvtqsi2ss(XMMRegister dst, Register src);
+ void Cvtqsi2ss(XMMRegister dst, const Operand& src);
+ void Cvtqsi2sd(XMMRegister dst, Register src);
+ void Cvtqsi2sd(XMMRegister dst, const Operand& src);
+ void Cvtlsi2ss(XMMRegister dst, Register src);
+ void Cvtlsi2ss(XMMRegister dst, const Operand& src);
+ void Cvtqui2ss(XMMRegister dst, Register src, Register tmp);
+ void Cvtqui2sd(XMMRegister dst, Register src, Register tmp);
+
+ // cvtsi2sd instruction only writes to the low 64-bit of dst register, which
+ // hinders register renaming and makes dependence chains longer. So we use
+ // xorpd to clear the dst register before cvtsi2sd to solve this issue.
+ void Cvtlsi2sd(XMMRegister dst, Register src);
+ void Cvtlsi2sd(XMMRegister dst, const Operand& src);
+
+ void Roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
+ void Roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
+
+ void Sqrtsd(XMMRegister dst, XMMRegister src);
+ void Sqrtsd(XMMRegister dst, const Operand& src);
+
+ void Ucomiss(XMMRegister src1, XMMRegister src2);
+ void Ucomiss(XMMRegister src1, const Operand& src2);
+ void Ucomisd(XMMRegister src1, XMMRegister src2);
+ void Ucomisd(XMMRegister src1, const Operand& src2);
+
+ void Lzcntq(Register dst, Register src);
+ void Lzcntq(Register dst, const Operand& src);
+ void Lzcntl(Register dst, Register src);
+ void Lzcntl(Register dst, const Operand& src);
+ void Tzcntq(Register dst, Register src);
+ void Tzcntq(Register dst, const Operand& src);
+ void Tzcntl(Register dst, Register src);
+ void Tzcntl(Register dst, const Operand& src);
+ void Popcntl(Register dst, Register src);
+ void Popcntl(Register dst, const Operand& src);
+ void Popcntq(Register dst, Register src);
+ void Popcntq(Register dst, const Operand& src);
+
+ // Is the value a tagged smi.
+ Condition CheckSmi(Register src);
+ Condition CheckSmi(const Operand& src);
+
+ // Jump to label if the value is a tagged smi.
+ void JumpIfSmi(Register src, Label* on_smi,
+ Label::Distance near_jump = Label::kFar);
+
+ void Move(Register dst, Smi* source);
+
+ void Move(const Operand& dst, Smi* source) {
+ Register constant = GetSmiConstant(source);
+ movp(dst, constant);
+ }
+
+ void Move(Register dst, ExternalReference ext) {
+ movp(dst, reinterpret_cast<void*>(ext.address()),
+ RelocInfo::EXTERNAL_REFERENCE);
+ }
+
+ void Move(XMMRegister dst, uint32_t src);
+ void Move(XMMRegister dst, uint64_t src);
+ void Move(XMMRegister dst, float src) { Move(dst, bit_cast<uint32_t>(src)); }
+ void Move(XMMRegister dst, double src) { Move(dst, bit_cast<uint64_t>(src)); }
+
+ // Move if the registers are not identical.
+ void Move(Register target, Register source);
+
+ void Move(Register dst, Handle<HeapObject> source,
+ RelocInfo::Mode rmode = RelocInfo::EMBEDDED_OBJECT);
+ void Move(const Operand& dst, Handle<HeapObject> source,
+ RelocInfo::Mode rmode = RelocInfo::EMBEDDED_OBJECT);
+
+ // Loads a pointer into a register with a relocation mode.
+ void Move(Register dst, void* ptr, RelocInfo::Mode rmode) {
+ // This method must not be used with heap object references. The stored
+ // address is not GC safe. Use the handle version instead.
+ DCHECK(rmode > RelocInfo::LAST_GCED_ENUM);
+ movp(dst, ptr, rmode);
+ }
+
+ // Convert smi to 32-bit integer. I.e., not sign extended into
+ // high 32 bits of destination.
+ void SmiToInteger32(Register dst, Register src);
+ void SmiToInteger32(Register dst, const Operand& src);
+
+ // Loads the address of the external reference into the destination
+ // register.
+ void LoadAddress(Register destination, ExternalReference source);
+
+ void Call(const Operand& op);
+ void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
+ void Call(Address destination, RelocInfo::Mode rmode);
+ void Call(ExternalReference ext);
+ void Call(Label* target) { call(target); }
+
+ // The size of the code generated for different call instructions.
+ int CallSize(ExternalReference ext);
+ int CallSize(Address destination) { return kCallSequenceLength; }
+ int CallSize(Handle<Code> code_object) {
+ // Code calls use 32-bit relative addressing.
+ return kShortCallInstructionLength;
+ }
+ int CallSize(Register target) {
+ // Opcode: REX_opt FF /2 m64
+ return (target.high_bit() != 0) ? 3 : 2;
+ }
+ int CallSize(const Operand& target) {
+ // Opcode: REX_opt FF /2 m64
+ return (target.requires_rex() ? 2 : 1) + target.operand_size();
+ }
+
+ // Returns the size of the code generated by LoadAddress.
+ // Used by CallSize(ExternalReference) to find the size of a call.
+ int LoadAddressSize(ExternalReference source);
+
+ // Non-SSE2 instructions.
+ void Pextrd(Register dst, XMMRegister src, int8_t imm8);
+ void Pinsrd(XMMRegister dst, Register src, int8_t imm8);
+ void Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8);
+
+ void CompareRoot(Register with, Heap::RootListIndex index);
+ void CompareRoot(const Operand& with, Heap::RootListIndex index);
+
+ // Generates function and stub prologue code.
+ void StubPrologue(StackFrame::Type type);
+ void Prologue(bool code_pre_aging);
+
+ // Calls Abort(msg) if the condition cc is not satisfied.
+ // Use --debug_code to enable.
+ void Assert(Condition cc, BailoutReason reason);
+
+ // Like Assert(), but without condition.
+ // Use --debug_code to enable.
+ void AssertUnreachable(BailoutReason reason);
+
+ // Abort execution if a 64 bit register containing a 32 bit payload does not
+ // have zeros in the top 32 bits, enabled via --debug-code.
+ void AssertZeroExtended(Register reg);
+
+ // Like Assert(), but always enabled.
+ void Check(Condition cc, BailoutReason reason);
+
+ // Print a message to stdout and abort execution.
+ void Abort(BailoutReason msg);
+
+ // Check that the stack is aligned.
+ void CheckStackAlignment();
+
+ // Activation support.
+ void EnterFrame(StackFrame::Type type);
+ void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) {
+ // Out-of-line constant pool not implemented on x64.
+ UNREACHABLE();
+ }
+ void LeaveFrame(StackFrame::Type type);
+
+ // Removes current frame and its arguments from the stack preserving
+ // the arguments and a return address pushed to the stack for the next call.
+ // |ra_state| defines whether return address is already pushed to stack or
+ // not. Both |callee_args_count| and |caller_args_count_reg| do not include
+ // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
+ // is trashed.
+ void PrepareForTailCall(const ParameterCount& callee_args_count,
+ Register caller_args_count_reg, Register scratch0,
+ Register scratch1, ReturnAddressState ra_state);
+
+ inline bool AllowThisStubCall(CodeStub* stub);
+
+ // Call a code stub. This expects {stub} to be zone-allocated, as it does not
+ // trigger generation of the stub's code object but instead files a
+ // HeapObjectRequest that will be fulfilled after code assembly.
+ void CallStubDelayed(CodeStub* stub);
+
+ void SlowTruncateToIDelayed(Zone* zone, Register result_reg,
+ Register input_reg,
+ int offset = HeapNumber::kValueOffset -
+ kHeapObjectTag);
+
+ // Call a runtime routine.
+ void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+
+ void InitializeRootRegister() {
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate());
+ Move(kRootRegister, roots_array_start);
+ addp(kRootRegister, Immediate(kRootRegisterBias));
+ }
+
+ void MoveNumber(Register dst, double value);
+ void MoveNonSmi(Register dst, double value);
+
+ protected:
+ static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
+ int smi_count = 0;
+ int heap_object_count = 0;
+
+ bool root_array_available_ = true;
+
+ int64_t RootRegisterDelta(ExternalReference other);
+
+ // Returns a register holding the smi value. The register MUST NOT be
+ // modified. It may be the "smi 1 constant" register.
+ Register GetSmiConstant(Smi* value);
+
+ private:
+ bool has_frame_ = false;
+ // This handle will be patched with the code object on installation.
+ Handle<HeapObject> code_object_;
+ Isolate* const isolate_;
+};
// MacroAssembler implements a collection of frequently used macros.
-class MacroAssembler: public Assembler {
+class MacroAssembler : public TurboAssembler {
public:
MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object);
@@ -111,8 +460,6 @@ class MacroAssembler: public Assembler {
bool old_value_;
};
- Isolate* isolate() const { return isolate_; }
-
// Operand pointing to an external reference.
// May emit code to set up the scratch register. The operand is
// only guaranteed to be correct as long as the scratch register
@@ -129,21 +476,10 @@ class MacroAssembler: public Assembler {
// operation(operand, ..);
void Load(Register destination, ExternalReference source);
void Store(ExternalReference destination, Register source);
- // Loads the address of the external reference into the destination
- // register.
- void LoadAddress(Register destination, ExternalReference source);
- // Returns the size of the code generated by LoadAddress.
- // Used by CallSize(ExternalReference) to find the size of a call.
- int LoadAddressSize(ExternalReference source);
// Pushes the address of the external reference onto the stack.
void PushAddress(ExternalReference source);
// Operations on roots in the root-array.
- void LoadRoot(Register destination, Heap::RootListIndex index);
- void LoadRoot(const Operand& destination, Heap::RootListIndex index) {
- LoadRoot(kScratchRegister, index);
- movp(destination, kScratchRegister);
- }
void StoreRoot(Register source, Heap::RootListIndex index);
// Load a root value where the index (or part of it) is variable.
// The variable_offset register is added to the fixed_offset value
@@ -151,8 +487,6 @@ class MacroAssembler: public Assembler {
void LoadRootIndexed(Register destination,
Register variable_offset,
int fixed_offset);
- void CompareRoot(Register with, Heap::RootListIndex index);
- void CompareRoot(const Operand& with, Heap::RootListIndex index);
void PushRoot(Heap::RootListIndex index);
// Compare the object in a register to a value and jump if they are equal.
@@ -212,13 +546,6 @@ class MacroAssembler: public Assembler {
SaveFPRegsMode save_fp,
RememberedSetFinalAction and_then);
- void CheckPageFlag(Register object,
- Register scratch,
- int mask,
- Condition cc,
- Label* condition_met,
- Label::Distance condition_met_distance = Label::kFar);
-
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfNotInNewSpace(Register object,
@@ -314,10 +641,6 @@ class MacroAssembler: public Assembler {
// Frame restart support.
void MaybeDropFrames();
- // Generates function and stub prologue code.
- void StubPrologue(StackFrame::Type type);
- void Prologue(bool code_pre_aging);
-
// Enter specific kind of exit frame; either in normal or
// debug mode. Expects the number of arguments in register rax and
// sets up the number of arguments in register rdi and the pointer
@@ -350,26 +673,9 @@ class MacroAssembler: public Assembler {
void StoreToSafepointRegisterSlot(Register dst, Register src);
void LoadFromSafepointRegisterSlot(Register dst, Register src);
- void InitializeRootRegister() {
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(isolate());
- Move(kRootRegister, roots_array_start);
- addp(kRootRegister, Immediate(kRootRegisterBias));
- }
-
// ---------------------------------------------------------------------------
// JavaScript invokes
- // Removes current frame and its arguments from the stack preserving
- // the arguments and a return address pushed to the stack for the next call.
- // |ra_state| defines whether return address is already pushed to stack or
- // not. Both |callee_args_count| and |caller_args_count_reg| do not include
- // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
- // is trashed.
- void PrepareForTailCall(const ParameterCount& callee_args_count,
- Register caller_args_count_reg, Register scratch0,
- Register scratch1, ReturnAddressState ra_state);
-
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
const ParameterCount& expected,
@@ -424,11 +730,6 @@ class MacroAssembler: public Assembler {
// Result must be a valid smi.
void Integer64PlusConstantToSmi(Register dst, Register src, int constant);
- // Convert smi to 32-bit integer. I.e., not sign extended into
- // high 32 bits of destination.
- void SmiToInteger32(Register dst, Register src);
- void SmiToInteger32(Register dst, const Operand& src);
-
// Convert smi to 64-bit integer (sign extended if necessary).
void SmiToInteger64(Register dst, Register src);
void SmiToInteger64(Register dst, const Operand& src);
@@ -458,10 +759,6 @@ class MacroAssembler: public Assembler {
// Functions performing a check on a known or potential smi. Returns
// a condition that is satisfied if the check is successful.
- // Is the value a tagged smi.
- Condition CheckSmi(Register src);
- Condition CheckSmi(const Operand& src);
-
// Is the value a non-negative tagged smi.
Condition CheckNonNegativeSmi(Register src);
@@ -503,11 +800,6 @@ class MacroAssembler: public Assembler {
void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid,
Label::Distance near_jump = Label::kFar);
- // Jump to label if the value is a tagged smi.
- void JumpIfSmi(Register src,
- Label* on_smi,
- Label::Distance near_jump = Label::kFar);
-
// Jump to label if the value is not a tagged smi.
void JumpIfNotSmi(Register src,
Label* on_not_smi,
@@ -717,18 +1009,6 @@ class MacroAssembler: public Assembler {
// Sets flags as a normal add.
void AddSmiField(Register dst, const Operand& src);
- // Basic Smi operations.
- void Move(Register dst, Smi* source) {
- LoadSmiConstant(dst, source);
- }
-
- void Move(const Operand& dst, Smi* source) {
- Register constant = GetSmiConstant(source);
- movp(dst, constant);
- }
-
- void Push(Smi* smi);
-
// Save away a raw integer with pointer size on the stack as two integers
// masquerading as smis so that the garbage collector skips visiting them.
void PushRegisterAsTwoSmis(Register src, Register scratch = kScratchRegister);
@@ -777,62 +1057,16 @@ class MacroAssembler: public Assembler {
void Load(Register dst, const Operand& src, Representation r);
void Store(const Operand& dst, Register src, Representation r);
- // Load a register with a long value as efficiently as possible.
- void Set(Register dst, int64_t x);
- void Set(const Operand& dst, intptr_t x);
-
- void Cvtss2sd(XMMRegister dst, XMMRegister src);
- void Cvtss2sd(XMMRegister dst, const Operand& src);
- void Cvtsd2ss(XMMRegister dst, XMMRegister src);
- void Cvtsd2ss(XMMRegister dst, const Operand& src);
-
- // cvtsi2sd instruction only writes to the low 64-bit of dst register, which
- // hinders register renaming and makes dependence chains longer. So we use
- // xorpd to clear the dst register before cvtsi2sd to solve this issue.
- void Cvtlsi2sd(XMMRegister dst, Register src);
- void Cvtlsi2sd(XMMRegister dst, const Operand& src);
-
- void Cvtlsi2ss(XMMRegister dst, Register src);
- void Cvtlsi2ss(XMMRegister dst, const Operand& src);
- void Cvtqsi2ss(XMMRegister dst, Register src);
- void Cvtqsi2ss(XMMRegister dst, const Operand& src);
-
- void Cvtqsi2sd(XMMRegister dst, Register src);
- void Cvtqsi2sd(XMMRegister dst, const Operand& src);
-
- void Cvtqui2ss(XMMRegister dst, Register src, Register tmp);
- void Cvtqui2sd(XMMRegister dst, Register src, Register tmp);
-
void Cvtsd2si(Register dst, XMMRegister src);
- void Cvttss2si(Register dst, XMMRegister src);
- void Cvttss2si(Register dst, const Operand& src);
- void Cvttsd2si(Register dst, XMMRegister src);
- void Cvttsd2si(Register dst, const Operand& src);
- void Cvttss2siq(Register dst, XMMRegister src);
- void Cvttss2siq(Register dst, const Operand& src);
- void Cvttsd2siq(Register dst, XMMRegister src);
- void Cvttsd2siq(Register dst, const Operand& src);
-
- // Move if the registers are not identical.
- void Move(Register target, Register source);
-
- void LoadSharedFunctionInfoSpecialField(Register dst,
- Register base,
- int offset);
-
- // Handle support
- void Move(Register dst, Handle<Object> source);
- void Move(const Operand& dst, Handle<Object> source);
void Cmp(Register dst, Handle<Object> source);
void Cmp(const Operand& dst, Handle<Object> source);
void Cmp(Register dst, Smi* src);
void Cmp(const Operand& dst, Smi* src);
- void Push(Handle<Object> source);
+ void PushObject(Handle<Object> source);
- // Load a heap object and handle the case of new-space objects by
- // indirecting via a global cell.
- void MoveHeapObject(Register result, Handle<Object> object);
+ // Move a Smi or HeapNumber.
+ void MoveNumber(Register dst, double value);
void GetWeakValue(Register value, Handle<WeakCell> cell);
@@ -861,41 +1095,11 @@ class MacroAssembler: public Assembler {
void DropUnderReturnAddress(int stack_elements,
Register scratch = kScratchRegister);
- void Call(Label* target) { call(target); }
- void Push(Register src);
- void Push(const Operand& src);
void PushQuad(const Operand& src);
- void Push(Immediate value);
void PushImm32(int32_t imm32);
void Pop(Register dst);
void Pop(const Operand& dst);
void PopQuad(const Operand& dst);
- void PushReturnAddressFrom(Register src) { pushq(src); }
- void PopReturnAddressTo(Register dst) { popq(dst); }
- void Move(Register dst, ExternalReference ext) {
- movp(dst, reinterpret_cast<void*>(ext.address()),
- RelocInfo::EXTERNAL_REFERENCE);
- }
-
- // Loads a pointer into a register with a relocation mode.
- void Move(Register dst, void* ptr, RelocInfo::Mode rmode) {
- // This method must not be used with heap object references. The stored
- // address is not GC safe. Use the handle version instead.
- DCHECK(rmode > RelocInfo::LAST_GCED_ENUM);
- movp(dst, ptr, rmode);
- }
-
- void Move(Register dst, Handle<Object> value, RelocInfo::Mode rmode) {
- AllowDeferredHandleDereference using_raw_address;
- DCHECK(!RelocInfo::IsNone(rmode));
- DCHECK(value->IsHeapObject());
- movp(dst, reinterpret_cast<void*>(value.location()), rmode);
- }
-
- void Move(XMMRegister dst, uint32_t src);
- void Move(XMMRegister dst, uint64_t src);
- void Move(XMMRegister dst, float src) { Move(dst, bit_cast<uint32_t>(src)); }
- void Move(XMMRegister dst, double src) { Move(dst, bit_cast<uint64_t>(src)); }
#define AVX_OP2_WITH_TYPE(macro_name, name, src_type) \
void macro_name(XMMRegister dst, src_type src) { \
@@ -915,14 +1119,10 @@ class MacroAssembler: public Assembler {
AVX_OP2_O(macro_name, name)
AVX_OP2_XO(Addsd, addsd)
- AVX_OP2_XO(Subsd, subsd)
AVX_OP2_XO(Mulsd, mulsd)
- AVX_OP2_XO(Divss, divss)
- AVX_OP2_XO(Divsd, divsd)
AVX_OP2_XO(Andps, andps)
AVX_OP2_XO(Andpd, andpd)
AVX_OP2_XO(Orpd, orpd)
- AVX_OP2_XO(Xorpd, xorpd)
AVX_OP2_XO(Cmpeqps, cmpeqps)
AVX_OP2_XO(Cmpltps, cmpltps)
AVX_OP2_XO(Cmpleps, cmpleps)
@@ -935,51 +1135,12 @@ class MacroAssembler: public Assembler {
AVX_OP2_XO(Cmpneqpd, cmpneqpd)
AVX_OP2_XO(Cmpnltpd, cmpnltpd)
AVX_OP2_XO(Cmpnlepd, cmpnlepd)
- AVX_OP2_X(Pcmpeqd, pcmpeqd)
- AVX_OP2_WITH_TYPE(Psllq, psllq, byte)
- AVX_OP2_WITH_TYPE(Psrlq, psrlq, byte)
#undef AVX_OP2_O
#undef AVX_OP2_X
#undef AVX_OP2_XO
#undef AVX_OP2_WITH_TYPE
- void Movsd(XMMRegister dst, XMMRegister src);
- void Movsd(XMMRegister dst, const Operand& src);
- void Movsd(const Operand& dst, XMMRegister src);
- void Movss(XMMRegister dst, XMMRegister src);
- void Movss(XMMRegister dst, const Operand& src);
- void Movss(const Operand& dst, XMMRegister src);
-
- void Movd(XMMRegister dst, Register src);
- void Movd(XMMRegister dst, const Operand& src);
- void Movd(Register dst, XMMRegister src);
- void Movq(XMMRegister dst, Register src);
- void Movq(Register dst, XMMRegister src);
-
- void Movaps(XMMRegister dst, XMMRegister src);
- void Movups(XMMRegister dst, XMMRegister src);
- void Movups(XMMRegister dst, const Operand& src);
- void Movups(const Operand& dst, XMMRegister src);
- void Movmskps(Register dst, XMMRegister src);
- void Movapd(XMMRegister dst, XMMRegister src);
- void Movupd(XMMRegister dst, const Operand& src);
- void Movupd(const Operand& dst, XMMRegister src);
- void Movmskpd(Register dst, XMMRegister src);
-
- void Xorps(XMMRegister dst, XMMRegister src);
- void Xorps(XMMRegister dst, const Operand& src);
-
- void Roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
- void Roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
- void Sqrtsd(XMMRegister dst, XMMRegister src);
- void Sqrtsd(XMMRegister dst, const Operand& src);
-
- void Ucomiss(XMMRegister src1, XMMRegister src2);
- void Ucomiss(XMMRegister src1, const Operand& src2);
- void Ucomisd(XMMRegister src1, XMMRegister src2);
- void Ucomisd(XMMRegister src1, const Operand& src2);
-
// ---------------------------------------------------------------------------
// SIMD macros.
void Absps(XMMRegister dst);
@@ -993,54 +1154,6 @@ class MacroAssembler: public Assembler {
void Jump(const Operand& op);
void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
- void Call(Address destination, RelocInfo::Mode rmode);
- void Call(ExternalReference ext);
- void Call(const Operand& op);
- void Call(Handle<Code> code_object,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id = TypeFeedbackId::None());
-
- // The size of the code generated for different call instructions.
- int CallSize(Address destination) {
- return kCallSequenceLength;
- }
- int CallSize(ExternalReference ext);
- int CallSize(Handle<Code> code_object) {
- // Code calls use 32-bit relative addressing.
- return kShortCallInstructionLength;
- }
- int CallSize(Register target) {
- // Opcode: REX_opt FF /2 m64
- return (target.high_bit() != 0) ? 3 : 2;
- }
- int CallSize(const Operand& target) {
- // Opcode: REX_opt FF /2 m64
- return (target.requires_rex() ? 2 : 1) + target.operand_size();
- }
-
- // Non-SSE2 instructions.
- void Pextrd(Register dst, XMMRegister src, int8_t imm8);
- void Pinsrd(XMMRegister dst, Register src, int8_t imm8);
- void Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8);
-
- void Lzcntq(Register dst, Register src);
- void Lzcntq(Register dst, const Operand& src);
-
- void Lzcntl(Register dst, Register src);
- void Lzcntl(Register dst, const Operand& src);
-
- void Tzcntq(Register dst, Register src);
- void Tzcntq(Register dst, const Operand& src);
-
- void Tzcntl(Register dst, Register src);
- void Tzcntl(Register dst, const Operand& src);
-
- void Popcntl(Register dst, Register src);
- void Popcntl(Register dst, const Operand& src);
-
- void Popcntq(Register dst, Register src);
- void Popcntq(Register dst, const Operand& src);
-
// Non-x64 instructions.
// Push/pop all general purpose registers.
// Does not push rsp/rbp nor any of the assembler's special purpose registers
@@ -1146,9 +1259,8 @@ class MacroAssembler: public Assembler {
void AssertSmi(Register object);
void AssertSmi(const Operand& object);
- // Abort execution if a 64 bit register containing a 32 bit payload does not
- // have zeros in the top 32 bits, enabled via --debug-code.
- void AssertZeroExtended(Register reg);
+ // Abort execution if argument is not a FixedArray, enabled via --debug-code.
+ void AssertFixedArray(Register object);
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
@@ -1157,9 +1269,9 @@ class MacroAssembler: public Assembler {
// enabled via --debug-code.
void AssertBoundFunction(Register object);
- // Abort execution if argument is not a JSGeneratorObject,
+ // Abort execution if argument is not a JSGeneratorObject (or subclass),
// enabled via --debug-code.
- void AssertGeneratorObject(Register object, Register suspend_flags);
+ void AssertGeneratorObject(Register object);
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
@@ -1215,15 +1327,6 @@ class MacroAssembler: public Assembler {
Label* gc_required,
AllocationFlags flags);
- // FastAllocate is right now only used for folded allocations. It just
- // increments the top pointer without checking against limit. This can only
- // be done if it was proved earlier that the allocation will succeed.
- void FastAllocate(int object_size, Register result, Register result_end,
- AllocationFlags flags);
-
- void FastAllocate(Register object_size, Register result, Register result_end,
- AllocationFlags flags);
-
// Allocate a heap number in new space with undefined value. Returns
// tagged pointer in result register, or jumps to gc_required if new
// space is full.
@@ -1268,7 +1371,9 @@ class MacroAssembler: public Assembler {
// Runtime calls
// Call a code stub.
- void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
+ // The code object is generated immediately, in contrast to
+ // TurboAssembler::CallStubDelayed.
+ void CallStub(CodeStub* stub);
// Tail call a code stub (jump).
void TailCallStub(CodeStub* stub);
@@ -1308,41 +1413,9 @@ class MacroAssembler: public Assembler {
void JumpToExternalReference(const ExternalReference& ext,
bool builtin_exit_frame = false);
- // Before calling a C-function from generated code, align arguments on stack.
- // After aligning the frame, arguments must be stored in rsp[0], rsp[8],
- // etc., not pushed. The argument count assumes all arguments are word sized.
- // The number of slots reserved for arguments depends on platform. On Windows
- // stack slots are reserved for the arguments passed in registers. On other
- // platforms stack slots are only reserved for the arguments actually passed
- // on the stack.
- void PrepareCallCFunction(int num_arguments);
-
- // Calls a C function and cleans up the space for arguments allocated
- // by PrepareCallCFunction. The called function is not allowed to trigger a
- // garbage collection, since that might move the code and invalidate the
- // return address (unless this is somehow accounted for by the called
- // function).
- void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, int num_arguments);
-
- // Calculate the number of stack slots to reserve for arguments when calling a
- // C function.
- int ArgumentStackSlotsForCFunctionCall(int num_arguments);
-
// ---------------------------------------------------------------------------
// Utilities
- void Ret();
-
- // Return and drop arguments from stack, where the number of arguments
- // may be bigger than 2^16 - 1. Requires a scratch register.
- void Ret(int bytes_dropped, Register scratch);
-
- Handle<Object> CodeObject() {
- DCHECK(!code_object_.is_null());
- return code_object_;
- }
-
// Initialize fields with filler values. Fields starting at |current_address|
// not including |end_address| are overwritten with the value in |filler|. At
// the end the loop, |current_address| takes the value of |end_address|.
@@ -1365,26 +1438,6 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Debugging
- // Calls Abort(msg) if the condition cc is not satisfied.
- // Use --debug_code to enable.
- void Assert(Condition cc, BailoutReason reason);
-
- // Like Assert(), but always enabled.
- void Check(Condition cc, BailoutReason reason);
-
- // Print a message to stdout and abort execution.
- void Abort(BailoutReason msg);
-
- // Check that the stack is aligned.
- void CheckStackAlignment();
-
- // Verify restrictions about code generated in stubs.
- void set_generating_stub(bool value) { generating_stub_ = value; }
- bool generating_stub() { return generating_stub_; }
- void set_has_frame(bool value) { has_frame_ = value; }
- bool has_frame() { return has_frame_; }
- inline bool AllowThisStubCall(CodeStub* stub);
-
static int SafepointRegisterStackIndex(Register reg) {
return SafepointRegisterStackIndex(reg.code());
}
@@ -1392,11 +1445,6 @@ class MacroAssembler: public Assembler {
// Load the type feedback vector from a JavaScript frame.
void EmitLoadFeedbackVector(Register vector);
- // Activation support.
- void EnterFrame(StackFrame::Type type);
- void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
- void LeaveFrame(StackFrame::Type type);
-
void EnterBuiltinFrame(Register context, Register target, Register argc);
void LeaveBuiltinFrame(Register context, Register target, Register argc);
@@ -1419,26 +1467,9 @@ class MacroAssembler: public Assembler {
// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r12, r14, r15.
static const int kSafepointPushRegisterIndices[Register::kNumRegisters];
static const int kNumSafepointSavedRegisters = 12;
- static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
- bool generating_stub_;
- bool has_frame_;
- Isolate* isolate_;
- bool root_array_available_;
int jit_cookie_;
- // Returns a register holding the smi value. The register MUST NOT be
- // modified. It may be the "smi 1 constant" register.
- Register GetSmiConstant(Smi* value);
-
- int64_t RootRegisterDelta(ExternalReference other);
-
- // Moves the smi value to the destination register.
- void LoadSmiConstant(Register dst, Smi* value);
-
- // This handle will be patched with the code object on installation.
- Handle<Object> code_object_;
-
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
diff --git a/deps/v8/src/x87/OWNERS b/deps/v8/src/x87/OWNERS
deleted file mode 100644
index 61245ae8e2..0000000000
--- a/deps/v8/src/x87/OWNERS
+++ /dev/null
@@ -1,2 +0,0 @@
-weiliang.lin@intel.com
-chunyang.dai@intel.com
diff --git a/deps/v8/src/x87/assembler-x87-inl.h b/deps/v8/src/x87/assembler-x87-inl.h
deleted file mode 100644
index 02ffffc292..0000000000
--- a/deps/v8/src/x87/assembler-x87-inl.h
+++ /dev/null
@@ -1,547 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
-// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
-// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2012 the V8 project authors. All rights reserved.
-
-// A light-weight IA32 Assembler.
-
-#ifndef V8_X87_ASSEMBLER_X87_INL_H_
-#define V8_X87_ASSEMBLER_X87_INL_H_
-
-#include "src/x87/assembler-x87.h"
-
-#include "src/assembler.h"
-#include "src/debug/debug.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-bool CpuFeatures::SupportsCrankshaft() { return true; }
-
-bool CpuFeatures::SupportsWasmSimd128() { return false; }
-
-static const byte kCallOpcode = 0xE8;
-static const int kNoCodeAgeSequenceLength = 5;
-
-
-// The modes possibly affected by apply must be in kApplyMask.
-void RelocInfo::apply(intptr_t delta) {
- if (IsRuntimeEntry(rmode_) || IsCodeTarget(rmode_)) {
- int32_t* p = reinterpret_cast<int32_t*>(pc_);
- *p -= delta; // Relocate entry.
- } else if (IsCodeAgeSequence(rmode_)) {
- if (*pc_ == kCallOpcode) {
- int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
- *p -= delta; // Relocate entry.
- }
- } else if (IsDebugBreakSlot(rmode_) && IsPatchedDebugBreakSlotSequence()) {
- // Special handling of a debug break slot when a break point is set (call
- // instruction has been inserted).
- int32_t* p = reinterpret_cast<int32_t*>(
- pc_ + Assembler::kPatchDebugBreakSlotAddressOffset);
- *p -= delta; // Relocate entry.
- } else if (IsInternalReference(rmode_)) {
- // absolute code pointer inside code object moves with the code object.
- int32_t* p = reinterpret_cast<int32_t*>(pc_);
- *p += delta; // Relocate entry.
- }
-}
-
-
-Address RelocInfo::target_address() {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- return Assembler::target_address_at(pc_, host_);
-}
-
-Address RelocInfo::target_address_address() {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
- || rmode_ == EMBEDDED_OBJECT
- || rmode_ == EXTERNAL_REFERENCE);
- return reinterpret_cast<Address>(pc_);
-}
-
-
-Address RelocInfo::constant_pool_entry_address() {
- UNREACHABLE();
- return NULL;
-}
-
-
-int RelocInfo::target_address_size() {
- return Assembler::kSpecialTargetSize;
-}
-
-HeapObject* RelocInfo::target_object() {
- DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return HeapObject::cast(Memory::Object_at(pc_));
-}
-
-Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
- DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Handle<HeapObject>::cast(Memory::Object_Handle_at(pc_));
-}
-
-void RelocInfo::set_target_object(HeapObject* target,
- WriteBarrierMode write_barrier_mode,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Memory::Object_at(pc_) = target;
- if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(target->GetIsolate(), pc_, sizeof(Address));
- }
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
- host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
- target);
- }
-}
-
-
-Address RelocInfo::target_external_reference() {
- DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
- return Memory::Address_at(pc_);
-}
-
-
-Address RelocInfo::target_internal_reference() {
- DCHECK(rmode_ == INTERNAL_REFERENCE);
- return Memory::Address_at(pc_);
-}
-
-
-Address RelocInfo::target_internal_reference_address() {
- DCHECK(rmode_ == INTERNAL_REFERENCE);
- return reinterpret_cast<Address>(pc_);
-}
-
-
-Address RelocInfo::target_runtime_entry(Assembler* origin) {
- DCHECK(IsRuntimeEntry(rmode_));
- return reinterpret_cast<Address>(*reinterpret_cast<int32_t*>(pc_));
-}
-
-void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
- WriteBarrierMode write_barrier_mode,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(IsRuntimeEntry(rmode_));
- if (target_address() != target) {
- set_target_address(isolate, target, write_barrier_mode, icache_flush_mode);
- }
-}
-
-
-Handle<Cell> RelocInfo::target_cell_handle() {
- DCHECK(rmode_ == RelocInfo::CELL);
- Address address = Memory::Address_at(pc_);
- return Handle<Cell>(reinterpret_cast<Cell**>(address));
-}
-
-
-Cell* RelocInfo::target_cell() {
- DCHECK(rmode_ == RelocInfo::CELL);
- return Cell::FromValueAddress(Memory::Address_at(pc_));
-}
-
-
-void RelocInfo::set_target_cell(Cell* cell,
- WriteBarrierMode write_barrier_mode,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(cell->IsCell());
- DCHECK(rmode_ == RelocInfo::CELL);
- Address address = cell->address() + Cell::kValueOffset;
- Memory::Address_at(pc_) = address;
- if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(cell->GetIsolate(), pc_, sizeof(Address));
- }
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
- cell);
- }
-}
-
-Handle<Code> RelocInfo::code_age_stub_handle(Assembler* origin) {
- DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- DCHECK(*pc_ == kCallOpcode);
- return Handle<Code>::cast(Memory::Object_Handle_at(pc_ + 1));
-}
-
-
-Code* RelocInfo::code_age_stub() {
- DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- DCHECK(*pc_ == kCallOpcode);
- return Code::GetCodeFromTargetAddress(
- Assembler::target_address_at(pc_ + 1, host_));
-}
-
-
-void RelocInfo::set_code_age_stub(Code* stub,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(*pc_ == kCallOpcode);
- DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Assembler::set_target_address_at(stub->GetIsolate(), pc_ + 1, host_,
- stub->instruction_start(),
- icache_flush_mode);
-}
-
-
-Address RelocInfo::debug_call_address() {
- DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
- Address location = pc_ + Assembler::kPatchDebugBreakSlotAddressOffset;
- return Assembler::target_address_at(location, host_);
-}
-
-void RelocInfo::set_debug_call_address(Isolate* isolate, Address target) {
- DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
- Address location = pc_ + Assembler::kPatchDebugBreakSlotAddressOffset;
- Assembler::set_target_address_at(isolate, location, host_, target);
- if (host() != NULL) {
- Code* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
- target_code);
- }
-}
-
-void RelocInfo::WipeOut(Isolate* isolate) {
- if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
- IsInternalReference(rmode_)) {
- Memory::Address_at(pc_) = NULL;
- } else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
- // Effectively write zero into the relocation.
- Assembler::set_target_address_at(isolate, pc_, host_,
- pc_ + sizeof(int32_t));
- } else {
- UNREACHABLE();
- }
-}
-
-template <typename ObjectVisitor>
-void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
- RelocInfo::Mode mode = rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitEmbeddedPointer(host(), this);
- Assembler::FlushICache(isolate, pc_, sizeof(Address));
- } else if (RelocInfo::IsCodeTarget(mode)) {
- visitor->VisitCodeTarget(host(), this);
- } else if (mode == RelocInfo::CELL) {
- visitor->VisitCellPointer(host(), this);
- } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(host(), this);
- } else if (mode == RelocInfo::INTERNAL_REFERENCE) {
- visitor->VisitInternalReference(host(), this);
- } else if (RelocInfo::IsCodeAgeSequence(mode)) {
- visitor->VisitCodeAgeSequence(host(), this);
- } else if (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence()) {
- visitor->VisitDebugTarget(host(), this);
- } else if (IsRuntimeEntry(mode)) {
- visitor->VisitRuntimeEntry(host(), this);
- }
-}
-
-
-template<typename StaticVisitor>
-void RelocInfo::Visit(Heap* heap) {
- RelocInfo::Mode mode = rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- StaticVisitor::VisitEmbeddedPointer(heap, this);
- Assembler::FlushICache(heap->isolate(), pc_, sizeof(Address));
- } else if (RelocInfo::IsCodeTarget(mode)) {
- StaticVisitor::VisitCodeTarget(heap, this);
- } else if (mode == RelocInfo::CELL) {
- StaticVisitor::VisitCell(heap, this);
- } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- StaticVisitor::VisitExternalReference(this);
- } else if (mode == RelocInfo::INTERNAL_REFERENCE) {
- StaticVisitor::VisitInternalReference(this);
- } else if (RelocInfo::IsCodeAgeSequence(mode)) {
- StaticVisitor::VisitCodeAgeSequence(heap, this);
- } else if (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence()) {
- StaticVisitor::VisitDebugTarget(heap, this);
- } else if (IsRuntimeEntry(mode)) {
- StaticVisitor::VisitRuntimeEntry(this);
- }
-}
-
-
-
-Immediate::Immediate(int x) {
- x_ = x;
- rmode_ = RelocInfo::NONE32;
-}
-
-Immediate::Immediate(Address x, RelocInfo::Mode rmode) {
- x_ = reinterpret_cast<int32_t>(x);
- rmode_ = rmode;
-}
-
-Immediate::Immediate(const ExternalReference& ext) {
- x_ = reinterpret_cast<int32_t>(ext.address());
- rmode_ = RelocInfo::EXTERNAL_REFERENCE;
-}
-
-
-Immediate::Immediate(Label* internal_offset) {
- x_ = reinterpret_cast<int32_t>(internal_offset);
- rmode_ = RelocInfo::INTERNAL_REFERENCE;
-}
-
-
-Immediate::Immediate(Handle<Object> handle) {
- AllowDeferredHandleDereference using_raw_address;
- // Verify all Objects referred by code are NOT in new space.
- Object* obj = *handle;
- if (obj->IsHeapObject()) {
- x_ = reinterpret_cast<intptr_t>(handle.location());
- rmode_ = RelocInfo::EMBEDDED_OBJECT;
- } else {
- // no relocation needed
- x_ = reinterpret_cast<intptr_t>(obj);
- rmode_ = RelocInfo::NONE32;
- }
-}
-
-
-Immediate::Immediate(Smi* value) {
- x_ = reinterpret_cast<intptr_t>(value);
- rmode_ = RelocInfo::NONE32;
-}
-
-
-Immediate::Immediate(Address addr) {
- x_ = reinterpret_cast<int32_t>(addr);
- rmode_ = RelocInfo::NONE32;
-}
-
-
-void Assembler::emit(uint32_t x) {
- *reinterpret_cast<uint32_t*>(pc_) = x;
- pc_ += sizeof(uint32_t);
-}
-
-
-void Assembler::emit_q(uint64_t x) {
- *reinterpret_cast<uint64_t*>(pc_) = x;
- pc_ += sizeof(uint64_t);
-}
-
-
-void Assembler::emit(Handle<Object> handle) {
- AllowDeferredHandleDereference heap_object_check;
- // Verify all Objects referred by code are NOT in new space.
- Object* obj = *handle;
- if (obj->IsHeapObject()) {
- emit(reinterpret_cast<intptr_t>(handle.location()),
- RelocInfo::EMBEDDED_OBJECT);
- } else {
- // no relocation needed
- emit(reinterpret_cast<intptr_t>(obj));
- }
-}
-
-
-void Assembler::emit(uint32_t x, RelocInfo::Mode rmode, TypeFeedbackId id) {
- if (rmode == RelocInfo::CODE_TARGET && !id.IsNone()) {
- RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, id.ToInt());
- } else if (!RelocInfo::IsNone(rmode)
- && rmode != RelocInfo::CODE_AGE_SEQUENCE) {
- RecordRelocInfo(rmode);
- }
- emit(x);
-}
-
-
-void Assembler::emit(Handle<Code> code,
- RelocInfo::Mode rmode,
- TypeFeedbackId id) {
- AllowDeferredHandleDereference embedding_raw_address;
- emit(reinterpret_cast<intptr_t>(code.location()), rmode, id);
-}
-
-
-void Assembler::emit(const Immediate& x) {
- if (x.rmode_ == RelocInfo::INTERNAL_REFERENCE) {
- Label* label = reinterpret_cast<Label*>(x.x_);
- emit_code_relative_offset(label);
- return;
- }
- if (!RelocInfo::IsNone(x.rmode_)) RecordRelocInfo(x.rmode_);
- emit(x.x_);
-}
-
-
-void Assembler::emit_code_relative_offset(Label* label) {
- if (label->is_bound()) {
- int32_t pos;
- pos = label->pos() + Code::kHeaderSize - kHeapObjectTag;
- emit(pos);
- } else {
- emit_disp(label, Displacement::CODE_RELATIVE);
- }
-}
-
-void Assembler::emit_b(Immediate x) {
- DCHECK(x.is_int8() || x.is_uint8());
- uint8_t value = static_cast<uint8_t>(x.x_);
- *pc_++ = value;
-}
-
-void Assembler::emit_w(const Immediate& x) {
- DCHECK(RelocInfo::IsNone(x.rmode_));
- uint16_t value = static_cast<uint16_t>(x.x_);
- reinterpret_cast<uint16_t*>(pc_)[0] = value;
- pc_ += sizeof(uint16_t);
-}
-
-
-Address Assembler::target_address_at(Address pc, Address constant_pool) {
- return pc + sizeof(int32_t) + *reinterpret_cast<int32_t*>(pc);
-}
-
-
-void Assembler::set_target_address_at(Isolate* isolate, Address pc,
- Address constant_pool, Address target,
- ICacheFlushMode icache_flush_mode) {
- DCHECK_IMPLIES(isolate == nullptr, icache_flush_mode == SKIP_ICACHE_FLUSH);
- int32_t* p = reinterpret_cast<int32_t*>(pc);
- *p = target - (pc + sizeof(int32_t));
- if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate, p, sizeof(int32_t));
- }
-}
-
-Address Assembler::target_address_at(Address pc, Code* code) {
- Address constant_pool = code ? code->constant_pool() : NULL;
- return target_address_at(pc, constant_pool);
-}
-
-void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
- Address target,
- ICacheFlushMode icache_flush_mode) {
- Address constant_pool = code ? code->constant_pool() : NULL;
- set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
-}
-
-Address Assembler::target_address_from_return_address(Address pc) {
- return pc - kCallTargetAddressOffset;
-}
-
-
-Displacement Assembler::disp_at(Label* L) {
- return Displacement(long_at(L->pos()));
-}
-
-
-void Assembler::disp_at_put(Label* L, Displacement disp) {
- long_at_put(L->pos(), disp.data());
-}
-
-
-void Assembler::emit_disp(Label* L, Displacement::Type type) {
- Displacement disp(L, type);
- L->link_to(pc_offset());
- emit(static_cast<int>(disp.data()));
-}
-
-
-void Assembler::emit_near_disp(Label* L) {
- byte disp = 0x00;
- if (L->is_near_linked()) {
- int offset = L->near_link_pos() - pc_offset();
- DCHECK(is_int8(offset));
- disp = static_cast<byte>(offset & 0xFF);
- }
- L->link_to(pc_offset(), Label::kNear);
- *pc_++ = disp;
-}
-
-
-void Assembler::deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
- Memory::Address_at(pc) = target;
-}
-
-
-void Operand::set_modrm(int mod, Register rm) {
- DCHECK((mod & -4) == 0);
- buf_[0] = mod << 6 | rm.code();
- len_ = 1;
-}
-
-
-void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
- DCHECK(len_ == 1);
- DCHECK((scale & -4) == 0);
- // Use SIB with no index register only for base esp.
- DCHECK(!index.is(esp) || base.is(esp));
- buf_[1] = scale << 6 | index.code() << 3 | base.code();
- len_ = 2;
-}
-
-
-void Operand::set_disp8(int8_t disp) {
- DCHECK(len_ == 1 || len_ == 2);
- *reinterpret_cast<int8_t*>(&buf_[len_++]) = disp;
-}
-
-
-void Operand::set_dispr(int32_t disp, RelocInfo::Mode rmode) {
- DCHECK(len_ == 1 || len_ == 2);
- int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]);
- *p = disp;
- len_ += sizeof(int32_t);
- rmode_ = rmode;
-}
-
-Operand::Operand(Register reg) {
- // reg
- set_modrm(3, reg);
-}
-
-
-Operand::Operand(int32_t disp, RelocInfo::Mode rmode) {
- // [disp/r]
- set_modrm(0, ebp);
- set_dispr(disp, rmode);
-}
-
-
-Operand::Operand(Immediate imm) {
- // [disp/r]
- set_modrm(0, ebp);
- set_dispr(imm.x_, imm.rmode_);
-}
-} // namespace internal
-} // namespace v8
-
-#endif // V8_X87_ASSEMBLER_X87_INL_H_
diff --git a/deps/v8/src/x87/assembler-x87.cc b/deps/v8/src/x87/assembler-x87.cc
deleted file mode 100644
index 08dade2a43..0000000000
--- a/deps/v8/src/x87/assembler-x87.cc
+++ /dev/null
@@ -1,2217 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the
-// distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-// OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been modified
-// significantly by Google Inc.
-// Copyright 2012 the V8 project authors. All rights reserved.
-
-#include "src/x87/assembler-x87.h"
-
-#if V8_TARGET_ARCH_X87
-
-#include "src/base/bits.h"
-#include "src/base/cpu.h"
-#include "src/disassembler.h"
-#include "src/macro-assembler.h"
-#include "src/v8.h"
-
-namespace v8 {
-namespace internal {
-
-// -----------------------------------------------------------------------------
-// Implementation of CpuFeatures
-
-void CpuFeatures::ProbeImpl(bool cross_compile) {
- base::CPU cpu;
-
- // Only use statically determined features for cross compile (snapshot).
- if (cross_compile) return;
-}
-
-
-void CpuFeatures::PrintTarget() { }
-void CpuFeatures::PrintFeatures() { }
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Displacement
-
-void Displacement::init(Label* L, Type type) {
- DCHECK(!L->is_bound());
- int next = 0;
- if (L->is_linked()) {
- next = L->pos();
- DCHECK(next > 0); // Displacements must be at positions > 0
- }
- // Ensure that we _never_ overflow the next field.
- DCHECK(NextField::is_valid(Assembler::kMaximalBufferSize));
- data_ = NextField::encode(next) | TypeField::encode(type);
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of RelocInfo
-
-
-const int RelocInfo::kApplyMask =
- RelocInfo::kCodeTargetMask | 1 << RelocInfo::RUNTIME_ENTRY |
- 1 << RelocInfo::INTERNAL_REFERENCE | 1 << RelocInfo::CODE_AGE_SEQUENCE |
- RelocInfo::kDebugBreakSlotMask;
-
-
-bool RelocInfo::IsCodedSpecially() {
- // The deserializer needs to know whether a pointer is specially coded. Being
- // specially coded on IA32 means that it is a relative address, as used by
- // branch instructions. These are also the ones that need changing when a
- // code object moves.
- return (1 << rmode_) & kApplyMask;
-}
-
-
-bool RelocInfo::IsInConstantPool() {
- return false;
-}
-
-Address RelocInfo::wasm_memory_reference() {
- DCHECK(IsWasmMemoryReference(rmode_));
- return Memory::Address_at(pc_);
-}
-
-Address RelocInfo::wasm_global_reference() {
- DCHECK(IsWasmGlobalReference(rmode_));
- return Memory::Address_at(pc_);
-}
-
-uint32_t RelocInfo::wasm_memory_size_reference() {
- DCHECK(IsWasmMemorySizeReference(rmode_));
- return Memory::uint32_at(pc_);
-}
-
-uint32_t RelocInfo::wasm_function_table_size_reference() {
- DCHECK(IsWasmFunctionTableSizeReference(rmode_));
- return Memory::uint32_at(pc_);
-}
-
-void RelocInfo::unchecked_update_wasm_memory_reference(
- Isolate* isolate, Address address, ICacheFlushMode icache_flush_mode) {
- Memory::Address_at(pc_) = address;
- if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate, pc_, sizeof(Address));
- }
-}
-
-void RelocInfo::unchecked_update_wasm_size(Isolate* isolate, uint32_t size,
- ICacheFlushMode icache_flush_mode) {
- Memory::uint32_at(pc_) = size;
- if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate, pc_, sizeof(uint32_t));
- }
-}
-
-// -----------------------------------------------------------------------------
-// Implementation of Operand
-
-Operand::Operand(Register base, int32_t disp, RelocInfo::Mode rmode) {
- // [base + disp/r]
- if (disp == 0 && RelocInfo::IsNone(rmode) && !base.is(ebp)) {
- // [base]
- set_modrm(0, base);
- if (base.is(esp)) set_sib(times_1, esp, base);
- } else if (is_int8(disp) && RelocInfo::IsNone(rmode)) {
- // [base + disp8]
- set_modrm(1, base);
- if (base.is(esp)) set_sib(times_1, esp, base);
- set_disp8(disp);
- } else {
- // [base + disp/r]
- set_modrm(2, base);
- if (base.is(esp)) set_sib(times_1, esp, base);
- set_dispr(disp, rmode);
- }
-}
-
-
-Operand::Operand(Register base,
- Register index,
- ScaleFactor scale,
- int32_t disp,
- RelocInfo::Mode rmode) {
- DCHECK(!index.is(esp)); // illegal addressing mode
- // [base + index*scale + disp/r]
- if (disp == 0 && RelocInfo::IsNone(rmode) && !base.is(ebp)) {
- // [base + index*scale]
- set_modrm(0, esp);
- set_sib(scale, index, base);
- } else if (is_int8(disp) && RelocInfo::IsNone(rmode)) {
- // [base + index*scale + disp8]
- set_modrm(1, esp);
- set_sib(scale, index, base);
- set_disp8(disp);
- } else {
- // [base + index*scale + disp/r]
- set_modrm(2, esp);
- set_sib(scale, index, base);
- set_dispr(disp, rmode);
- }
-}
-
-
-Operand::Operand(Register index,
- ScaleFactor scale,
- int32_t disp,
- RelocInfo::Mode rmode) {
- DCHECK(!index.is(esp)); // illegal addressing mode
- // [index*scale + disp/r]
- set_modrm(0, esp);
- set_sib(scale, index, ebp);
- set_dispr(disp, rmode);
-}
-
-
-bool Operand::is_reg(Register reg) const {
- return ((buf_[0] & 0xF8) == 0xC0) // addressing mode is register only.
- && ((buf_[0] & 0x07) == reg.code()); // register codes match.
-}
-
-
-bool Operand::is_reg_only() const {
- return (buf_[0] & 0xF8) == 0xC0; // Addressing mode is register only.
-}
-
-
-Register Operand::reg() const {
- DCHECK(is_reg_only());
- return Register::from_code(buf_[0] & 0x07);
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Assembler.
-
-// Emit a single byte. Must always be inlined.
-#define EMIT(x) \
- *pc_++ = (x)
-
-Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
- : AssemblerBase(isolate_data, buffer, buffer_size) {
-// Clear the buffer in debug mode unless it was provided by the
-// caller in which case we can't be sure it's okay to overwrite
-// existing code in it; see CodePatcher::CodePatcher(...).
-#ifdef DEBUG
- if (own_buffer_) {
- memset(buffer_, 0xCC, buffer_size_); // int3
- }
-#endif
-
- reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
-}
-
-
-void Assembler::GetCode(CodeDesc* desc) {
- // Finalize code (at this point overflow() may be true, but the gap ensures
- // that we are still not overlapping instructions and relocation info).
- DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
- // Set up code descriptor.
- desc->buffer = buffer_;
- desc->buffer_size = buffer_size_;
- desc->instr_size = pc_offset();
- desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
- desc->origin = this;
- desc->constant_pool_size = 0;
- desc->unwinding_info_size = 0;
- desc->unwinding_info = nullptr;
-}
-
-
-void Assembler::Align(int m) {
- DCHECK(base::bits::IsPowerOfTwo32(m));
- int mask = m - 1;
- int addr = pc_offset();
- Nop((m - (addr & mask)) & mask);
-}
-
-
-bool Assembler::IsNop(Address addr) {
- Address a = addr;
- while (*a == 0x66) a++;
- if (*a == 0x90) return true;
- if (a[0] == 0xf && a[1] == 0x1f) return true;
- return false;
-}
-
-
-void Assembler::Nop(int bytes) {
- EnsureSpace ensure_space(this);
-
- // Older CPUs that do not support SSE2 may not support multibyte NOP
- // instructions.
- for (; bytes > 0; bytes--) {
- EMIT(0x90);
- }
- return;
-}
-
-
-void Assembler::CodeTargetAlign() {
- Align(16); // Preferred alignment of jump targets on ia32.
-}
-
-
-void Assembler::cpuid() {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xA2);
-}
-
-
-void Assembler::pushad() {
- EnsureSpace ensure_space(this);
- EMIT(0x60);
-}
-
-
-void Assembler::popad() {
- EnsureSpace ensure_space(this);
- EMIT(0x61);
-}
-
-
-void Assembler::pushfd() {
- EnsureSpace ensure_space(this);
- EMIT(0x9C);
-}
-
-
-void Assembler::popfd() {
- EnsureSpace ensure_space(this);
- EMIT(0x9D);
-}
-
-
-void Assembler::push(const Immediate& x) {
- EnsureSpace ensure_space(this);
- if (x.is_int8()) {
- EMIT(0x6a);
- EMIT(x.x_);
- } else {
- EMIT(0x68);
- emit(x);
- }
-}
-
-
-void Assembler::push_imm32(int32_t imm32) {
- EnsureSpace ensure_space(this);
- EMIT(0x68);
- emit(imm32);
-}
-
-
-void Assembler::push(Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x50 | src.code());
-}
-
-
-void Assembler::push(const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0xFF);
- emit_operand(esi, src);
-}
-
-
-void Assembler::pop(Register dst) {
- DCHECK(reloc_info_writer.last_pc() != NULL);
- EnsureSpace ensure_space(this);
- EMIT(0x58 | dst.code());
-}
-
-
-void Assembler::pop(const Operand& dst) {
- EnsureSpace ensure_space(this);
- EMIT(0x8F);
- emit_operand(eax, dst);
-}
-
-
-void Assembler::enter(const Immediate& size) {
- EnsureSpace ensure_space(this);
- EMIT(0xC8);
- emit_w(size);
- EMIT(0);
-}
-
-
-void Assembler::leave() {
- EnsureSpace ensure_space(this);
- EMIT(0xC9);
-}
-
-
-void Assembler::mov_b(Register dst, const Operand& src) {
- CHECK(dst.is_byte_register());
- EnsureSpace ensure_space(this);
- EMIT(0x8A);
- emit_operand(dst, src);
-}
-
-
-void Assembler::mov_b(const Operand& dst, const Immediate& src) {
- EnsureSpace ensure_space(this);
- EMIT(0xC6);
- emit_operand(eax, dst);
- EMIT(static_cast<int8_t>(src.x_));
-}
-
-
-void Assembler::mov_b(const Operand& dst, int8_t imm8) {
- EnsureSpace ensure_space(this);
- EMIT(0xC6);
- emit_operand(eax, dst);
- EMIT(imm8);
-}
-
-
-void Assembler::mov_b(const Operand& dst, Register src) {
- CHECK(src.is_byte_register());
- EnsureSpace ensure_space(this);
- EMIT(0x88);
- emit_operand(src, dst);
-}
-
-
-void Assembler::mov_w(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x8B);
- emit_operand(dst, src);
-}
-
-
-void Assembler::mov_w(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x89);
- emit_operand(src, dst);
-}
-
-
-void Assembler::mov_w(const Operand& dst, int16_t imm16) {
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0xC7);
- emit_operand(eax, dst);
- EMIT(static_cast<int8_t>(imm16 & 0xff));
- EMIT(static_cast<int8_t>(imm16 >> 8));
-}
-
-
-void Assembler::mov_w(const Operand& dst, const Immediate& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0xC7);
- emit_operand(eax, dst);
- EMIT(static_cast<int8_t>(src.x_ & 0xff));
- EMIT(static_cast<int8_t>(src.x_ >> 8));
-}
-
-
-void Assembler::mov(Register dst, int32_t imm32) {
- EnsureSpace ensure_space(this);
- EMIT(0xB8 | dst.code());
- emit(imm32);
-}
-
-
-void Assembler::mov(Register dst, const Immediate& x) {
- EnsureSpace ensure_space(this);
- EMIT(0xB8 | dst.code());
- emit(x);
-}
-
-
-void Assembler::mov(Register dst, Handle<Object> handle) {
- EnsureSpace ensure_space(this);
- EMIT(0xB8 | dst.code());
- emit(handle);
-}
-
-
-void Assembler::mov(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x8B);
- emit_operand(dst, src);
-}
-
-
-void Assembler::mov(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x89);
- EMIT(0xC0 | src.code() << 3 | dst.code());
-}
-
-
-void Assembler::mov(const Operand& dst, const Immediate& x) {
- EnsureSpace ensure_space(this);
- EMIT(0xC7);
- emit_operand(eax, dst);
- emit(x);
-}
-
-
-void Assembler::mov(const Operand& dst, Handle<Object> handle) {
- EnsureSpace ensure_space(this);
- EMIT(0xC7);
- emit_operand(eax, dst);
- emit(handle);
-}
-
-
-void Assembler::mov(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x89);
- emit_operand(src, dst);
-}
-
-
-void Assembler::movsx_b(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xBE);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movsx_w(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xBF);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movzx_b(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xB6);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movzx_w(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xB7);
- emit_operand(dst, src);
-}
-
-
-void Assembler::cld() {
- EnsureSpace ensure_space(this);
- EMIT(0xFC);
-}
-
-
-void Assembler::rep_movs() {
- EnsureSpace ensure_space(this);
- EMIT(0xF3);
- EMIT(0xA5);
-}
-
-
-void Assembler::rep_stos() {
- EnsureSpace ensure_space(this);
- EMIT(0xF3);
- EMIT(0xAB);
-}
-
-
-void Assembler::stos() {
- EnsureSpace ensure_space(this);
- EMIT(0xAB);
-}
-
-
-void Assembler::xchg(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- if (src.is(eax) || dst.is(eax)) { // Single-byte encoding.
- EMIT(0x90 | (src.is(eax) ? dst.code() : src.code()));
- } else {
- EMIT(0x87);
- EMIT(0xC0 | src.code() << 3 | dst.code());
- }
-}
-
-
-void Assembler::xchg(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x87);
- emit_operand(dst, src);
-}
-
-void Assembler::xchg_b(Register reg, const Operand& op) {
- EnsureSpace ensure_space(this);
- EMIT(0x86);
- emit_operand(reg, op);
-}
-
-void Assembler::xchg_w(Register reg, const Operand& op) {
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x87);
- emit_operand(reg, op);
-}
-
-void Assembler::lock() {
- EnsureSpace ensure_space(this);
- EMIT(0xF0);
-}
-
-void Assembler::cmpxchg(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xB1);
- emit_operand(src, dst);
-}
-
-void Assembler::cmpxchg_b(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xB0);
- emit_operand(src, dst);
-}
-
-void Assembler::cmpxchg_w(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0xB1);
- emit_operand(src, dst);
-}
-
-void Assembler::adc(Register dst, int32_t imm32) {
- EnsureSpace ensure_space(this);
- emit_arith(2, Operand(dst), Immediate(imm32));
-}
-
-
-void Assembler::adc(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x13);
- emit_operand(dst, src);
-}
-
-
-void Assembler::add(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x03);
- emit_operand(dst, src);
-}
-
-
-void Assembler::add(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x01);
- emit_operand(src, dst);
-}
-
-
-void Assembler::add(const Operand& dst, const Immediate& x) {
- DCHECK(reloc_info_writer.last_pc() != NULL);
- EnsureSpace ensure_space(this);
- emit_arith(0, dst, x);
-}
-
-
-void Assembler::and_(Register dst, int32_t imm32) {
- and_(dst, Immediate(imm32));
-}
-
-
-void Assembler::and_(Register dst, const Immediate& x) {
- EnsureSpace ensure_space(this);
- emit_arith(4, Operand(dst), x);
-}
-
-
-void Assembler::and_(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x23);
- emit_operand(dst, src);
-}
-
-
-void Assembler::and_(const Operand& dst, const Immediate& x) {
- EnsureSpace ensure_space(this);
- emit_arith(4, dst, x);
-}
-
-
-void Assembler::and_(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x21);
- emit_operand(src, dst);
-}
-
-void Assembler::cmpb(const Operand& op, Immediate imm8) {
- DCHECK(imm8.is_int8() || imm8.is_uint8());
- EnsureSpace ensure_space(this);
- if (op.is_reg(eax)) {
- EMIT(0x3C);
- } else {
- EMIT(0x80);
- emit_operand(edi, op); // edi == 7
- }
- emit_b(imm8);
-}
-
-
-void Assembler::cmpb(const Operand& op, Register reg) {
- CHECK(reg.is_byte_register());
- EnsureSpace ensure_space(this);
- EMIT(0x38);
- emit_operand(reg, op);
-}
-
-
-void Assembler::cmpb(Register reg, const Operand& op) {
- CHECK(reg.is_byte_register());
- EnsureSpace ensure_space(this);
- EMIT(0x3A);
- emit_operand(reg, op);
-}
-
-
-void Assembler::cmpw(const Operand& op, Immediate imm16) {
- DCHECK(imm16.is_int16());
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x81);
- emit_operand(edi, op);
- emit_w(imm16);
-}
-
-void Assembler::cmpw(Register reg, const Operand& op) {
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x3B);
- emit_operand(reg, op);
-}
-
-void Assembler::cmpw(const Operand& op, Register reg) {
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x39);
- emit_operand(reg, op);
-}
-
-void Assembler::cmp(Register reg, int32_t imm32) {
- EnsureSpace ensure_space(this);
- emit_arith(7, Operand(reg), Immediate(imm32));
-}
-
-
-void Assembler::cmp(Register reg, Handle<Object> handle) {
- EnsureSpace ensure_space(this);
- emit_arith(7, Operand(reg), Immediate(handle));
-}
-
-
-void Assembler::cmp(Register reg, const Operand& op) {
- EnsureSpace ensure_space(this);
- EMIT(0x3B);
- emit_operand(reg, op);
-}
-
-void Assembler::cmp(const Operand& op, Register reg) {
- EnsureSpace ensure_space(this);
- EMIT(0x39);
- emit_operand(reg, op);
-}
-
-void Assembler::cmp(const Operand& op, const Immediate& imm) {
- EnsureSpace ensure_space(this);
- emit_arith(7, op, imm);
-}
-
-
-void Assembler::cmp(const Operand& op, Handle<Object> handle) {
- EnsureSpace ensure_space(this);
- emit_arith(7, op, Immediate(handle));
-}
-
-
-void Assembler::cmpb_al(const Operand& op) {
- EnsureSpace ensure_space(this);
- EMIT(0x38); // CMP r/m8, r8
- emit_operand(eax, op); // eax has same code as register al.
-}
-
-
-void Assembler::cmpw_ax(const Operand& op) {
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x39); // CMP r/m16, r16
- emit_operand(eax, op); // eax has same code as register ax.
-}
-
-
-void Assembler::dec_b(Register dst) {
- CHECK(dst.is_byte_register());
- EnsureSpace ensure_space(this);
- EMIT(0xFE);
- EMIT(0xC8 | dst.code());
-}
-
-
-void Assembler::dec_b(const Operand& dst) {
- EnsureSpace ensure_space(this);
- EMIT(0xFE);
- emit_operand(ecx, dst);
-}
-
-
-void Assembler::dec(Register dst) {
- EnsureSpace ensure_space(this);
- EMIT(0x48 | dst.code());
-}
-
-
-void Assembler::dec(const Operand& dst) {
- EnsureSpace ensure_space(this);
- EMIT(0xFF);
- emit_operand(ecx, dst);
-}
-
-
-void Assembler::cdq() {
- EnsureSpace ensure_space(this);
- EMIT(0x99);
-}
-
-
-void Assembler::idiv(const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0xF7);
- emit_operand(edi, src);
-}
-
-
-void Assembler::div(const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0xF7);
- emit_operand(esi, src);
-}
-
-
-void Assembler::imul(Register reg) {
- EnsureSpace ensure_space(this);
- EMIT(0xF7);
- EMIT(0xE8 | reg.code());
-}
-
-
-void Assembler::imul(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xAF);
- emit_operand(dst, src);
-}
-
-
-void Assembler::imul(Register dst, Register src, int32_t imm32) {
- imul(dst, Operand(src), imm32);
-}
-
-
-void Assembler::imul(Register dst, const Operand& src, int32_t imm32) {
- EnsureSpace ensure_space(this);
- if (is_int8(imm32)) {
- EMIT(0x6B);
- emit_operand(dst, src);
- EMIT(imm32);
- } else {
- EMIT(0x69);
- emit_operand(dst, src);
- emit(imm32);
- }
-}
-
-
-void Assembler::inc(Register dst) {
- EnsureSpace ensure_space(this);
- EMIT(0x40 | dst.code());
-}
-
-
-void Assembler::inc(const Operand& dst) {
- EnsureSpace ensure_space(this);
- EMIT(0xFF);
- emit_operand(eax, dst);
-}
-
-
-void Assembler::lea(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x8D);
- emit_operand(dst, src);
-}
-
-
-void Assembler::mul(Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0xF7);
- EMIT(0xE0 | src.code());
-}
-
-
-void Assembler::neg(Register dst) {
- EnsureSpace ensure_space(this);
- EMIT(0xF7);
- EMIT(0xD8 | dst.code());
-}
-
-
-void Assembler::neg(const Operand& dst) {
- EnsureSpace ensure_space(this);
- EMIT(0xF7);
- emit_operand(ebx, dst);
-}
-
-
-void Assembler::not_(Register dst) {
- EnsureSpace ensure_space(this);
- EMIT(0xF7);
- EMIT(0xD0 | dst.code());
-}
-
-
-void Assembler::not_(const Operand& dst) {
- EnsureSpace ensure_space(this);
- EMIT(0xF7);
- emit_operand(edx, dst);
-}
-
-
-void Assembler::or_(Register dst, int32_t imm32) {
- EnsureSpace ensure_space(this);
- emit_arith(1, Operand(dst), Immediate(imm32));
-}
-
-
-void Assembler::or_(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0B);
- emit_operand(dst, src);
-}
-
-
-void Assembler::or_(const Operand& dst, const Immediate& x) {
- EnsureSpace ensure_space(this);
- emit_arith(1, dst, x);
-}
-
-
-void Assembler::or_(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x09);
- emit_operand(src, dst);
-}
-
-
-void Assembler::rcl(Register dst, uint8_t imm8) {
- EnsureSpace ensure_space(this);
- DCHECK(is_uint5(imm8)); // illegal shift count
- if (imm8 == 1) {
- EMIT(0xD1);
- EMIT(0xD0 | dst.code());
- } else {
- EMIT(0xC1);
- EMIT(0xD0 | dst.code());
- EMIT(imm8);
- }
-}
-
-
-void Assembler::rcr(Register dst, uint8_t imm8) {
- EnsureSpace ensure_space(this);
- DCHECK(is_uint5(imm8)); // illegal shift count
- if (imm8 == 1) {
- EMIT(0xD1);
- EMIT(0xD8 | dst.code());
- } else {
- EMIT(0xC1);
- EMIT(0xD8 | dst.code());
- EMIT(imm8);
- }
-}
-
-
-void Assembler::ror(const Operand& dst, uint8_t imm8) {
- EnsureSpace ensure_space(this);
- DCHECK(is_uint5(imm8)); // illegal shift count
- if (imm8 == 1) {
- EMIT(0xD1);
- emit_operand(ecx, dst);
- } else {
- EMIT(0xC1);
- emit_operand(ecx, dst);
- EMIT(imm8);
- }
-}
-
-
-void Assembler::ror_cl(const Operand& dst) {
- EnsureSpace ensure_space(this);
- EMIT(0xD3);
- emit_operand(ecx, dst);
-}
-
-
-void Assembler::sar(const Operand& dst, uint8_t imm8) {
- EnsureSpace ensure_space(this);
- DCHECK(is_uint5(imm8)); // illegal shift count
- if (imm8 == 1) {
- EMIT(0xD1);
- emit_operand(edi, dst);
- } else {
- EMIT(0xC1);
- emit_operand(edi, dst);
- EMIT(imm8);
- }
-}
-
-
-void Assembler::sar_cl(const Operand& dst) {
- EnsureSpace ensure_space(this);
- EMIT(0xD3);
- emit_operand(edi, dst);
-}
-
-void Assembler::sbb(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x1B);
- emit_operand(dst, src);
-}
-
-void Assembler::shld(Register dst, Register src, uint8_t shift) {
- DCHECK(is_uint5(shift));
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xA4);
- emit_operand(src, Operand(dst));
- EMIT(shift);
-}
-
-void Assembler::shld_cl(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xA5);
- emit_operand(src, Operand(dst));
-}
-
-
-void Assembler::shl(const Operand& dst, uint8_t imm8) {
- EnsureSpace ensure_space(this);
- DCHECK(is_uint5(imm8)); // illegal shift count
- if (imm8 == 1) {
- EMIT(0xD1);
- emit_operand(esp, dst);
- } else {
- EMIT(0xC1);
- emit_operand(esp, dst);
- EMIT(imm8);
- }
-}
-
-
-void Assembler::shl_cl(const Operand& dst) {
- EnsureSpace ensure_space(this);
- EMIT(0xD3);
- emit_operand(esp, dst);
-}
-
-void Assembler::shr(const Operand& dst, uint8_t imm8) {
- EnsureSpace ensure_space(this);
- DCHECK(is_uint5(imm8)); // illegal shift count
- if (imm8 == 1) {
- EMIT(0xD1);
- emit_operand(ebp, dst);
- } else {
- EMIT(0xC1);
- emit_operand(ebp, dst);
- EMIT(imm8);
- }
-}
-
-
-void Assembler::shr_cl(const Operand& dst) {
- EnsureSpace ensure_space(this);
- EMIT(0xD3);
- emit_operand(ebp, dst);
-}
-
-void Assembler::shrd(Register dst, Register src, uint8_t shift) {
- DCHECK(is_uint5(shift));
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xAC);
- emit_operand(dst, Operand(src));
- EMIT(shift);
-}
-
-void Assembler::shrd_cl(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xAD);
- emit_operand(src, dst);
-}
-
-void Assembler::sub(const Operand& dst, const Immediate& x) {
- EnsureSpace ensure_space(this);
- emit_arith(5, dst, x);
-}
-
-
-void Assembler::sub(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x2B);
- emit_operand(dst, src);
-}
-
-
-void Assembler::sub(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x29);
- emit_operand(src, dst);
-}
-
-
-void Assembler::test(Register reg, const Immediate& imm) {
- if (imm.is_uint8()) {
- test_b(reg, imm);
- return;
- }
-
- EnsureSpace ensure_space(this);
- // This is not using emit_arith because test doesn't support
- // sign-extension of 8-bit operands.
- if (reg.is(eax)) {
- EMIT(0xA9);
- } else {
- EMIT(0xF7);
- EMIT(0xC0 | reg.code());
- }
- emit(imm);
-}
-
-
-void Assembler::test(Register reg, const Operand& op) {
- EnsureSpace ensure_space(this);
- EMIT(0x85);
- emit_operand(reg, op);
-}
-
-
-void Assembler::test_b(Register reg, const Operand& op) {
- CHECK(reg.is_byte_register());
- EnsureSpace ensure_space(this);
- EMIT(0x84);
- emit_operand(reg, op);
-}
-
-
-void Assembler::test(const Operand& op, const Immediate& imm) {
- if (op.is_reg_only()) {
- test(op.reg(), imm);
- return;
- }
- if (imm.is_uint8()) {
- return test_b(op, imm);
- }
- EnsureSpace ensure_space(this);
- EMIT(0xF7);
- emit_operand(eax, op);
- emit(imm);
-}
-
-void Assembler::test_b(Register reg, Immediate imm8) {
- DCHECK(imm8.is_uint8());
- EnsureSpace ensure_space(this);
- // Only use test against byte for registers that have a byte
- // variant: eax, ebx, ecx, and edx.
- if (reg.is(eax)) {
- EMIT(0xA8);
- emit_b(imm8);
- } else if (reg.is_byte_register()) {
- emit_arith_b(0xF6, 0xC0, reg, static_cast<uint8_t>(imm8.x_));
- } else {
- EMIT(0x66);
- EMIT(0xF7);
- EMIT(0xC0 | reg.code());
- emit_w(imm8);
- }
-}
-
-void Assembler::test_b(const Operand& op, Immediate imm8) {
- if (op.is_reg_only()) {
- test_b(op.reg(), imm8);
- return;
- }
- EnsureSpace ensure_space(this);
- EMIT(0xF6);
- emit_operand(eax, op);
- emit_b(imm8);
-}
-
-void Assembler::test_w(Register reg, Immediate imm16) {
- DCHECK(imm16.is_int16() || imm16.is_uint16());
- EnsureSpace ensure_space(this);
- if (reg.is(eax)) {
- EMIT(0xA9);
- emit_w(imm16);
- } else {
- EMIT(0x66);
- EMIT(0xF7);
- EMIT(0xc0 | reg.code());
- emit_w(imm16);
- }
-}
-
-void Assembler::test_w(Register reg, const Operand& op) {
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x85);
- emit_operand(reg, op);
-}
-
-void Assembler::test_w(const Operand& op, Immediate imm16) {
- DCHECK(imm16.is_int16() || imm16.is_uint16());
- if (op.is_reg_only()) {
- test_w(op.reg(), imm16);
- return;
- }
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0xF7);
- emit_operand(eax, op);
- emit_w(imm16);
-}
-
-void Assembler::xor_(Register dst, int32_t imm32) {
- EnsureSpace ensure_space(this);
- emit_arith(6, Operand(dst), Immediate(imm32));
-}
-
-
-void Assembler::xor_(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x33);
- emit_operand(dst, src);
-}
-
-
-void Assembler::xor_(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x31);
- emit_operand(src, dst);
-}
-
-
-void Assembler::xor_(const Operand& dst, const Immediate& x) {
- EnsureSpace ensure_space(this);
- emit_arith(6, dst, x);
-}
-
-
-void Assembler::bt(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xA3);
- emit_operand(src, dst);
-}
-
-
-void Assembler::bts(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xAB);
- emit_operand(src, dst);
-}
-
-
-void Assembler::bsr(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xBD);
- emit_operand(dst, src);
-}
-
-
-void Assembler::bsf(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xBC);
- emit_operand(dst, src);
-}
-
-
-void Assembler::hlt() {
- EnsureSpace ensure_space(this);
- EMIT(0xF4);
-}
-
-
-void Assembler::int3() {
- EnsureSpace ensure_space(this);
- EMIT(0xCC);
-}
-
-
-void Assembler::nop() {
- EnsureSpace ensure_space(this);
- EMIT(0x90);
-}
-
-
-void Assembler::ret(int imm16) {
- EnsureSpace ensure_space(this);
- DCHECK(is_uint16(imm16));
- if (imm16 == 0) {
- EMIT(0xC3);
- } else {
- EMIT(0xC2);
- EMIT(imm16 & 0xFF);
- EMIT((imm16 >> 8) & 0xFF);
- }
-}
-
-
-void Assembler::ud2() {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x0B);
-}
-
-
-// Labels refer to positions in the (to be) generated code.
-// There are bound, linked, and unused labels.
-//
-// Bound labels refer to known positions in the already
-// generated code. pos() is the position the label refers to.
-//
-// Linked labels refer to unknown positions in the code
-// to be generated; pos() is the position of the 32bit
-// Displacement of the last instruction using the label.
-
-
-void Assembler::print(Label* L) {
- if (L->is_unused()) {
- PrintF("unused label\n");
- } else if (L->is_bound()) {
- PrintF("bound label to %d\n", L->pos());
- } else if (L->is_linked()) {
- Label l = *L;
- PrintF("unbound label");
- while (l.is_linked()) {
- Displacement disp = disp_at(&l);
- PrintF("@ %d ", l.pos());
- disp.print();
- PrintF("\n");
- disp.next(&l);
- }
- } else {
- PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
- }
-}
-
-
-void Assembler::bind_to(Label* L, int pos) {
- EnsureSpace ensure_space(this);
- DCHECK(0 <= pos && pos <= pc_offset()); // must have a valid binding position
- while (L->is_linked()) {
- Displacement disp = disp_at(L);
- int fixup_pos = L->pos();
- if (disp.type() == Displacement::CODE_ABSOLUTE) {
- long_at_put(fixup_pos, reinterpret_cast<int>(buffer_ + pos));
- internal_reference_positions_.push_back(fixup_pos);
- } else if (disp.type() == Displacement::CODE_RELATIVE) {
- // Relative to Code* heap object pointer.
- long_at_put(fixup_pos, pos + Code::kHeaderSize - kHeapObjectTag);
- } else {
- if (disp.type() == Displacement::UNCONDITIONAL_JUMP) {
- DCHECK(byte_at(fixup_pos - 1) == 0xE9); // jmp expected
- }
- // Relative address, relative to point after address.
- int imm32 = pos - (fixup_pos + sizeof(int32_t));
- long_at_put(fixup_pos, imm32);
- }
- disp.next(L);
- }
- while (L->is_near_linked()) {
- int fixup_pos = L->near_link_pos();
- int offset_to_next =
- static_cast<int>(*reinterpret_cast<int8_t*>(addr_at(fixup_pos)));
- DCHECK(offset_to_next <= 0);
- // Relative address, relative to point after address.
- int disp = pos - fixup_pos - sizeof(int8_t);
- CHECK(0 <= disp && disp <= 127);
- set_byte_at(fixup_pos, disp);
- if (offset_to_next < 0) {
- L->link_to(fixup_pos + offset_to_next, Label::kNear);
- } else {
- L->UnuseNear();
- }
- }
- L->bind_to(pos);
-}
-
-
-void Assembler::bind(Label* L) {
- EnsureSpace ensure_space(this);
- DCHECK(!L->is_bound()); // label can only be bound once
- bind_to(L, pc_offset());
-}
-
-
-void Assembler::call(Label* L) {
- EnsureSpace ensure_space(this);
- if (L->is_bound()) {
- const int long_size = 5;
- int offs = L->pos() - pc_offset();
- DCHECK(offs <= 0);
- // 1110 1000 #32-bit disp.
- EMIT(0xE8);
- emit(offs - long_size);
- } else {
- // 1110 1000 #32-bit disp.
- EMIT(0xE8);
- emit_disp(L, Displacement::OTHER);
- }
-}
-
-
-void Assembler::call(byte* entry, RelocInfo::Mode rmode) {
- EnsureSpace ensure_space(this);
- DCHECK(!RelocInfo::IsCodeTarget(rmode));
- EMIT(0xE8);
- if (RelocInfo::IsRuntimeEntry(rmode)) {
- emit(reinterpret_cast<uint32_t>(entry), rmode);
- } else {
- emit(entry - (pc_ + sizeof(int32_t)), rmode);
- }
-}
-
-
-int Assembler::CallSize(const Operand& adr) {
- // Call size is 1 (opcode) + adr.len_ (operand).
- return 1 + adr.len_;
-}
-
-
-void Assembler::call(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xFF);
- emit_operand(edx, adr);
-}
-
-
-int Assembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
- return 1 /* EMIT */ + sizeof(uint32_t) /* emit */;
-}
-
-
-void Assembler::call(Handle<Code> code,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id) {
- EnsureSpace ensure_space(this);
- DCHECK(RelocInfo::IsCodeTarget(rmode)
- || rmode == RelocInfo::CODE_AGE_SEQUENCE);
- EMIT(0xE8);
- emit(code, rmode, ast_id);
-}
-
-
-void Assembler::jmp(Label* L, Label::Distance distance) {
- EnsureSpace ensure_space(this);
- if (L->is_bound()) {
- const int short_size = 2;
- const int long_size = 5;
- int offs = L->pos() - pc_offset();
- DCHECK(offs <= 0);
- if (is_int8(offs - short_size)) {
- // 1110 1011 #8-bit disp.
- EMIT(0xEB);
- EMIT((offs - short_size) & 0xFF);
- } else {
- // 1110 1001 #32-bit disp.
- EMIT(0xE9);
- emit(offs - long_size);
- }
- } else if (distance == Label::kNear) {
- EMIT(0xEB);
- emit_near_disp(L);
- } else {
- // 1110 1001 #32-bit disp.
- EMIT(0xE9);
- emit_disp(L, Displacement::UNCONDITIONAL_JUMP);
- }
-}
-
-
-void Assembler::jmp(byte* entry, RelocInfo::Mode rmode) {
- EnsureSpace ensure_space(this);
- DCHECK(!RelocInfo::IsCodeTarget(rmode));
- EMIT(0xE9);
- if (RelocInfo::IsRuntimeEntry(rmode)) {
- emit(reinterpret_cast<uint32_t>(entry), rmode);
- } else {
- emit(entry - (pc_ + sizeof(int32_t)), rmode);
- }
-}
-
-
-void Assembler::jmp(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xFF);
- emit_operand(esp, adr);
-}
-
-
-void Assembler::jmp(Handle<Code> code, RelocInfo::Mode rmode) {
- EnsureSpace ensure_space(this);
- DCHECK(RelocInfo::IsCodeTarget(rmode));
- EMIT(0xE9);
- emit(code, rmode);
-}
-
-
-void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
- EnsureSpace ensure_space(this);
- DCHECK(0 <= cc && static_cast<int>(cc) < 16);
- if (L->is_bound()) {
- const int short_size = 2;
- const int long_size = 6;
- int offs = L->pos() - pc_offset();
- DCHECK(offs <= 0);
- if (is_int8(offs - short_size)) {
- // 0111 tttn #8-bit disp
- EMIT(0x70 | cc);
- EMIT((offs - short_size) & 0xFF);
- } else {
- // 0000 1111 1000 tttn #32-bit disp
- EMIT(0x0F);
- EMIT(0x80 | cc);
- emit(offs - long_size);
- }
- } else if (distance == Label::kNear) {
- EMIT(0x70 | cc);
- emit_near_disp(L);
- } else {
- // 0000 1111 1000 tttn #32-bit disp
- // Note: could eliminate cond. jumps to this jump if condition
- // is the same however, seems to be rather unlikely case.
- EMIT(0x0F);
- EMIT(0x80 | cc);
- emit_disp(L, Displacement::OTHER);
- }
-}
-
-
-void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode) {
- EnsureSpace ensure_space(this);
- DCHECK((0 <= cc) && (static_cast<int>(cc) < 16));
- // 0000 1111 1000 tttn #32-bit disp.
- EMIT(0x0F);
- EMIT(0x80 | cc);
- if (RelocInfo::IsRuntimeEntry(rmode)) {
- emit(reinterpret_cast<uint32_t>(entry), rmode);
- } else {
- emit(entry - (pc_ + sizeof(int32_t)), rmode);
- }
-}
-
-
-void Assembler::j(Condition cc, Handle<Code> code, RelocInfo::Mode rmode) {
- EnsureSpace ensure_space(this);
- // 0000 1111 1000 tttn #32-bit disp
- EMIT(0x0F);
- EMIT(0x80 | cc);
- emit(code, rmode);
-}
-
-
-// FPU instructions.
-
-void Assembler::fld(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xD9, 0xC0, i);
-}
-
-
-void Assembler::fstp(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDD, 0xD8, i);
-}
-
-
-void Assembler::fld1() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xE8);
-}
-
-
-void Assembler::fldpi() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xEB);
-}
-
-
-void Assembler::fldz() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xEE);
-}
-
-
-void Assembler::fldln2() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xED);
-}
-
-
-void Assembler::fld_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- emit_operand(eax, adr);
-}
-
-
-void Assembler::fld_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xDD);
- emit_operand(eax, adr);
-}
-
-
-void Assembler::fstp_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- emit_operand(ebx, adr);
-}
-
-
-void Assembler::fst_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- emit_operand(edx, adr);
-}
-
-
-void Assembler::fldcw(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- emit_operand(ebp, adr);
-}
-
-
-void Assembler::fnstcw(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- emit_operand(edi, adr);
-}
-
-
-void Assembler::fstp_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xDD);
- emit_operand(ebx, adr);
-}
-
-
-void Assembler::fst_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xDD);
- emit_operand(edx, adr);
-}
-
-
-void Assembler::fild_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xDB);
- emit_operand(eax, adr);
-}
-
-
-void Assembler::fild_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xDF);
- emit_operand(ebp, adr);
-}
-
-
-void Assembler::fistp_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xDB);
- emit_operand(ebx, adr);
-}
-
-
-void Assembler::fisttp_s(const Operand& adr) {
- DCHECK(IsEnabled(SSE3));
- EnsureSpace ensure_space(this);
- EMIT(0xDB);
- emit_operand(ecx, adr);
-}
-
-
-void Assembler::fisttp_d(const Operand& adr) {
- DCHECK(IsEnabled(SSE3));
- EnsureSpace ensure_space(this);
- EMIT(0xDD);
- emit_operand(ecx, adr);
-}
-
-
-void Assembler::fist_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xDB);
- emit_operand(edx, adr);
-}
-
-
-void Assembler::fistp_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xDF);
- emit_operand(edi, adr);
-}
-
-
-void Assembler::fabs() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xE1);
-}
-
-
-void Assembler::fchs() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xE0);
-}
-
-
-void Assembler::fsqrt() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xFA);
-}
-
-
-void Assembler::fcos() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xFF);
-}
-
-
-void Assembler::fsin() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xFE);
-}
-
-
-void Assembler::fptan() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xF2);
-}
-
-
-void Assembler::fyl2x() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xF1);
-}
-
-
-void Assembler::f2xm1() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xF0);
-}
-
-
-void Assembler::fscale() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xFD);
-}
-
-
-void Assembler::fninit() {
- EnsureSpace ensure_space(this);
- EMIT(0xDB);
- EMIT(0xE3);
-}
-
-
-void Assembler::fadd(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDC, 0xC0, i);
-}
-
-
-void Assembler::fadd_i(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xD8, 0xC0, i);
-}
-
-
-void Assembler::fadd_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xDC);
- emit_operand(eax, adr);
-}
-
-
-void Assembler::fsub(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDC, 0xE8, i);
-}
-
-
-void Assembler::fsub_i(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xD8, 0xE0, i);
-}
-
-
-void Assembler::fsubr_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xDC);
- emit_operand(ebp, adr);
-}
-
-
-void Assembler::fsub_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xDC);
- emit_operand(esp, adr);
-}
-
-
-void Assembler::fisub_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xDA);
- emit_operand(esp, adr);
-}
-
-
-void Assembler::fmul_i(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xD8, 0xC8, i);
-}
-
-
-void Assembler::fmul(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDC, 0xC8, i);
-}
-
-
-void Assembler::fmul_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xDC);
- emit_operand(ecx, adr);
-}
-
-
-void Assembler::fdiv(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDC, 0xF8, i);
-}
-
-
-void Assembler::fdiv_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xDC);
- emit_operand(esi, adr);
-}
-
-
-void Assembler::fdivr_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xDC);
- emit_operand(edi, adr);
-}
-
-
-void Assembler::fdiv_i(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xD8, 0xF0, i);
-}
-
-
-void Assembler::faddp(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDE, 0xC0, i);
-}
-
-
-void Assembler::fsubp(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDE, 0xE8, i);
-}
-
-
-void Assembler::fsubrp(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDE, 0xE0, i);
-}
-
-
-void Assembler::fmulp(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDE, 0xC8, i);
-}
-
-
-void Assembler::fdivp(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDE, 0xF8, i);
-}
-
-
-void Assembler::fprem() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xF8);
-}
-
-
-void Assembler::fprem1() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xF5);
-}
-
-
-void Assembler::fxch(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xD9, 0xC8, i);
-}
-
-
-void Assembler::fincstp() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xF7);
-}
-
-
-void Assembler::ffree(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDD, 0xC0, i);
-}
-
-
-void Assembler::ftst() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xE4);
-}
-
-
-void Assembler::fxam() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xE5);
-}
-
-
-void Assembler::fucomp(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDD, 0xE8, i);
-}
-
-
-void Assembler::fucompp() {
- EnsureSpace ensure_space(this);
- EMIT(0xDA);
- EMIT(0xE9);
-}
-
-
-void Assembler::fucomi(int i) {
- EnsureSpace ensure_space(this);
- EMIT(0xDB);
- EMIT(0xE8 + i);
-}
-
-
-void Assembler::fucomip() {
- EnsureSpace ensure_space(this);
- EMIT(0xDF);
- EMIT(0xE9);
-}
-
-
-void Assembler::fcompp() {
- EnsureSpace ensure_space(this);
- EMIT(0xDE);
- EMIT(0xD9);
-}
-
-
-void Assembler::fnstsw_ax() {
- EnsureSpace ensure_space(this);
- EMIT(0xDF);
- EMIT(0xE0);
-}
-
-
-void Assembler::fwait() {
- EnsureSpace ensure_space(this);
- EMIT(0x9B);
-}
-
-
-void Assembler::frndint() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xFC);
-}
-
-
-void Assembler::fnclex() {
- EnsureSpace ensure_space(this);
- EMIT(0xDB);
- EMIT(0xE2);
-}
-
-
-void Assembler::fnsave(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xDD);
- emit_operand(esi, adr);
-}
-
-
-void Assembler::frstor(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xDD);
- emit_operand(esp, adr);
-}
-
-
-void Assembler::sahf() {
- EnsureSpace ensure_space(this);
- EMIT(0x9E);
-}
-
-
-void Assembler::setcc(Condition cc, Register reg) {
- DCHECK(reg.is_byte_register());
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x90 | cc);
- EMIT(0xC0 | reg.code());
-}
-
-
-void Assembler::GrowBuffer() {
- DCHECK(buffer_overflow());
- if (!own_buffer_) FATAL("external code buffer is too small");
-
- // Compute new buffer size.
- CodeDesc desc; // the new buffer
- desc.buffer_size = 2 * buffer_size_;
-
- // Some internal data structures overflow for very large buffers,
- // they must ensure that kMaximalBufferSize is not too large.
- if (desc.buffer_size > kMaximalBufferSize ||
- static_cast<size_t>(desc.buffer_size) >
- isolate()->heap()->MaxOldGenerationSize()) {
- V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
- }
-
- // Set up new buffer.
- desc.buffer = NewArray<byte>(desc.buffer_size);
- desc.origin = this;
- desc.instr_size = pc_offset();
- desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos());
-
- // Clear the buffer in debug mode. Use 'int3' instructions to make
- // sure to get into problems if we ever run uninitialized code.
-#ifdef DEBUG
- memset(desc.buffer, 0xCC, desc.buffer_size);
-#endif
-
- // Copy the data.
- int pc_delta = desc.buffer - buffer_;
- int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
- MemMove(desc.buffer, buffer_, desc.instr_size);
- MemMove(rc_delta + reloc_info_writer.pos(), reloc_info_writer.pos(),
- desc.reloc_size);
-
- DeleteArray(buffer_);
- buffer_ = desc.buffer;
- buffer_size_ = desc.buffer_size;
- pc_ += pc_delta;
- reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.last_pc() + pc_delta);
-
- // Relocate internal references.
- for (auto pos : internal_reference_positions_) {
- int32_t* p = reinterpret_cast<int32_t*>(buffer_ + pos);
- *p += pc_delta;
- }
-
- DCHECK(!buffer_overflow());
-}
-
-
-void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
- DCHECK(is_uint8(op1) && is_uint8(op2)); // wrong opcode
- DCHECK(is_uint8(imm8));
- DCHECK((op1 & 0x01) == 0); // should be 8bit operation
- EMIT(op1);
- EMIT(op2 | dst.code());
- EMIT(imm8);
-}
-
-
-void Assembler::emit_arith(int sel, Operand dst, const Immediate& x) {
- DCHECK((0 <= sel) && (sel <= 7));
- Register ireg = { sel };
- if (x.is_int8()) {
- EMIT(0x83); // using a sign-extended 8-bit immediate.
- emit_operand(ireg, dst);
- EMIT(x.x_ & 0xFF);
- } else if (dst.is_reg(eax)) {
- EMIT((sel << 3) | 0x05); // short form if the destination is eax.
- emit(x);
- } else {
- EMIT(0x81); // using a literal 32-bit immediate.
- emit_operand(ireg, dst);
- emit(x);
- }
-}
-
-
-void Assembler::emit_operand(Register reg, const Operand& adr) {
- const unsigned length = adr.len_;
- DCHECK(length > 0);
-
- // Emit updated ModRM byte containing the given register.
- pc_[0] = (adr.buf_[0] & ~0x38) | (reg.code() << 3);
-
- // Emit the rest of the encoded operand.
- for (unsigned i = 1; i < length; i++) pc_[i] = adr.buf_[i];
- pc_ += length;
-
- // Emit relocation information if necessary.
- if (length >= sizeof(int32_t) && !RelocInfo::IsNone(adr.rmode_)) {
- pc_ -= sizeof(int32_t); // pc_ must be *at* disp32
- RecordRelocInfo(adr.rmode_);
- if (adr.rmode_ == RelocInfo::INTERNAL_REFERENCE) { // Fixup for labels
- emit_label(*reinterpret_cast<Label**>(pc_));
- } else {
- pc_ += sizeof(int32_t);
- }
- }
-}
-
-
-void Assembler::emit_label(Label* label) {
- if (label->is_bound()) {
- internal_reference_positions_.push_back(pc_offset());
- emit(reinterpret_cast<uint32_t>(buffer_ + label->pos()));
- } else {
- emit_disp(label, Displacement::CODE_ABSOLUTE);
- }
-}
-
-
-void Assembler::emit_farith(int b1, int b2, int i) {
- DCHECK(is_uint8(b1) && is_uint8(b2)); // wrong opcode
- DCHECK(0 <= i && i < 8); // illegal stack offset
- EMIT(b1);
- EMIT(b2 + i);
-}
-
-
-void Assembler::db(uint8_t data) {
- EnsureSpace ensure_space(this);
- EMIT(data);
-}
-
-
-void Assembler::dd(uint32_t data) {
- EnsureSpace ensure_space(this);
- emit(data);
-}
-
-
-void Assembler::dq(uint64_t data) {
- EnsureSpace ensure_space(this);
- emit_q(data);
-}
-
-
-void Assembler::dd(Label* label) {
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
- emit_label(label);
-}
-
-
-void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- DCHECK(!RelocInfo::IsNone(rmode));
- // Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
- !serializer_enabled() && !emit_debug_code()) {
- return;
- }
- RelocInfo rinfo(isolate(), pc_, rmode, data, NULL);
- reloc_info_writer.Write(&rinfo);
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/x87/assembler-x87.h b/deps/v8/src/x87/assembler-x87.h
deleted file mode 100644
index 8b9cda4357..0000000000
--- a/deps/v8/src/x87/assembler-x87.h
+++ /dev/null
@@ -1,1107 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
-// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
-// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2011 the V8 project authors. All rights reserved.
-
-// A light-weight IA32 Assembler.
-
-#ifndef V8_X87_ASSEMBLER_X87_H_
-#define V8_X87_ASSEMBLER_X87_H_
-
-#include <deque>
-
-#include "src/assembler.h"
-#include "src/isolate.h"
-#include "src/utils.h"
-
-namespace v8 {
-namespace internal {
-
-#define GENERAL_REGISTERS(V) \
- V(eax) \
- V(ecx) \
- V(edx) \
- V(ebx) \
- V(esp) \
- V(ebp) \
- V(esi) \
- V(edi)
-
-#define ALLOCATABLE_GENERAL_REGISTERS(V) \
- V(eax) \
- V(ecx) \
- V(edx) \
- V(ebx) \
- V(esi) \
- V(edi)
-
-#define DOUBLE_REGISTERS(V) \
- V(stX_0) \
- V(stX_1) \
- V(stX_2) \
- V(stX_3) \
- V(stX_4) \
- V(stX_5) \
- V(stX_6) \
- V(stX_7)
-
-#define FLOAT_REGISTERS DOUBLE_REGISTERS
-#define SIMD128_REGISTERS DOUBLE_REGISTERS
-
-#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
- V(stX_0) \
- V(stX_1) \
- V(stX_2) \
- V(stX_3) \
- V(stX_4) \
- V(stX_5)
-
-// CPU Registers.
-//
-// 1) We would prefer to use an enum, but enum values are assignment-
-// compatible with int, which has caused code-generation bugs.
-//
-// 2) We would prefer to use a class instead of a struct but we don't like
-// the register initialization to depend on the particular initialization
-// order (which appears to be different on OS X, Linux, and Windows for the
-// installed versions of C++ we tried). Using a struct permits C-style
-// "initialization". Also, the Register objects cannot be const as this
-// forces initialization stubs in MSVC, making us dependent on initialization
-// order.
-//
-// 3) By not using an enum, we are possibly preventing the compiler from
-// doing certain constant folds, which may significantly reduce the
-// code generated for some assembly instructions (because they boil down
-// to a few constants). If this is a problem, we could change the code
-// such that we use an enum in optimized mode, and the struct in debug
-// mode. This way we get the compile-time error checking in debug mode
-// and best performance in optimized code.
-//
-struct Register {
- enum Code {
-#define REGISTER_CODE(R) kCode_##R,
- GENERAL_REGISTERS(REGISTER_CODE)
-#undef REGISTER_CODE
- kAfterLast,
- kCode_no_reg = -1
- };
-
- static const int kNumRegisters = Code::kAfterLast;
-
- static Register from_code(int code) {
- DCHECK(code >= 0);
- DCHECK(code < kNumRegisters);
- Register r = {code};
- return r;
- }
- bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
- bool is(Register reg) const { return reg_code == reg.reg_code; }
- int code() const {
- DCHECK(is_valid());
- return reg_code;
- }
- int bit() const {
- DCHECK(is_valid());
- return 1 << reg_code;
- }
-
- bool is_byte_register() const { return reg_code <= 3; }
-
- // Unfortunately we can't make this private in a struct.
- int reg_code;
-};
-
-
-#define DECLARE_REGISTER(R) const Register R = {Register::kCode_##R};
-GENERAL_REGISTERS(DECLARE_REGISTER)
-#undef DECLARE_REGISTER
-const Register no_reg = {Register::kCode_no_reg};
-
-static const bool kSimpleFPAliasing = true;
-static const bool kSimdMaskRegisters = false;
-
-struct X87Register {
- enum Code {
-#define REGISTER_CODE(R) kCode_##R,
- DOUBLE_REGISTERS(REGISTER_CODE)
-#undef REGISTER_CODE
- kAfterLast,
- kCode_no_reg = -1
- };
-
- static const int kMaxNumRegisters = Code::kAfterLast;
- static const int kMaxNumAllocatableRegisters = 6;
-
- static X87Register from_code(int code) {
- X87Register result = {code};
- return result;
- }
-
- bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
-
- int code() const {
- DCHECK(is_valid());
- return reg_code;
- }
-
- bool is(X87Register reg) const { return reg_code == reg.reg_code; }
-
- int reg_code;
-};
-
-typedef X87Register FloatRegister;
-
-typedef X87Register DoubleRegister;
-
-// TODO(x87) Define SIMD registers.
-typedef X87Register Simd128Register;
-
-#define DECLARE_REGISTER(R) \
- const DoubleRegister R = {DoubleRegister::kCode_##R};
-DOUBLE_REGISTERS(DECLARE_REGISTER)
-#undef DECLARE_REGISTER
-const DoubleRegister no_double_reg = {DoubleRegister::kCode_no_reg};
-
-enum Condition {
- // any value < 0 is considered no_condition
- no_condition = -1,
-
- overflow = 0,
- no_overflow = 1,
- below = 2,
- above_equal = 3,
- equal = 4,
- not_equal = 5,
- below_equal = 6,
- above = 7,
- negative = 8,
- positive = 9,
- parity_even = 10,
- parity_odd = 11,
- less = 12,
- greater_equal = 13,
- less_equal = 14,
- greater = 15,
-
- // aliases
- carry = below,
- not_carry = above_equal,
- zero = equal,
- not_zero = not_equal,
- sign = negative,
- not_sign = positive
-};
-
-
-// Returns the equivalent of !cc.
-// Negation of the default no_condition (-1) results in a non-default
-// no_condition value (-2). As long as tests for no_condition check
-// for condition < 0, this will work as expected.
-inline Condition NegateCondition(Condition cc) {
- return static_cast<Condition>(cc ^ 1);
-}
-
-
-// Commute a condition such that {a cond b == b cond' a}.
-inline Condition CommuteCondition(Condition cc) {
- switch (cc) {
- case below:
- return above;
- case above:
- return below;
- case above_equal:
- return below_equal;
- case below_equal:
- return above_equal;
- case less:
- return greater;
- case greater:
- return less;
- case greater_equal:
- return less_equal;
- case less_equal:
- return greater_equal;
- default:
- return cc;
- }
-}
-
-
-enum RoundingMode {
- kRoundToNearest = 0x0,
- kRoundDown = 0x1,
- kRoundUp = 0x2,
- kRoundToZero = 0x3
-};
-
-
-// -----------------------------------------------------------------------------
-// Machine instruction Immediates
-
-class Immediate BASE_EMBEDDED {
- public:
- inline explicit Immediate(int x);
- inline explicit Immediate(const ExternalReference& ext);
- inline explicit Immediate(Handle<Object> handle);
- inline explicit Immediate(Smi* value);
- inline explicit Immediate(Address addr);
- inline explicit Immediate(Address x, RelocInfo::Mode rmode);
-
- static Immediate CodeRelativeOffset(Label* label) {
- return Immediate(label);
- }
-
- bool is_zero() const { return x_ == 0 && RelocInfo::IsNone(rmode_); }
- bool is_int8() const {
- return -128 <= x_ && x_ < 128 && RelocInfo::IsNone(rmode_);
- }
- bool is_uint8() const {
- return v8::internal::is_uint8(x_) && RelocInfo::IsNone(rmode_);
- }
- bool is_int16() const {
- return -32768 <= x_ && x_ < 32768 && RelocInfo::IsNone(rmode_);
- }
- bool is_uint16() const {
- return v8::internal::is_uint16(x_) && RelocInfo::IsNone(rmode_);
- }
-
- private:
- inline explicit Immediate(Label* value);
-
- int x_;
- RelocInfo::Mode rmode_;
-
- friend class Operand;
- friend class Assembler;
- friend class MacroAssembler;
-};
-
-
-// -----------------------------------------------------------------------------
-// Machine instruction Operands
-
-enum ScaleFactor {
- times_1 = 0,
- times_2 = 1,
- times_4 = 2,
- times_8 = 3,
- times_int_size = times_4,
- times_half_pointer_size = times_2,
- times_pointer_size = times_4,
- times_twice_pointer_size = times_8
-};
-
-
-class Operand BASE_EMBEDDED {
- public:
- // reg
- INLINE(explicit Operand(Register reg));
-
- // [disp/r]
- INLINE(explicit Operand(int32_t disp, RelocInfo::Mode rmode));
-
- // [disp/r]
- INLINE(explicit Operand(Immediate imm));
-
- // [base + disp/r]
- explicit Operand(Register base, int32_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE32);
-
- // [base + index*scale + disp/r]
- explicit Operand(Register base,
- Register index,
- ScaleFactor scale,
- int32_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE32);
-
- // [index*scale + disp/r]
- explicit Operand(Register index,
- ScaleFactor scale,
- int32_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE32);
-
- static Operand JumpTable(Register index, ScaleFactor scale, Label* table) {
- return Operand(index, scale, reinterpret_cast<int32_t>(table),
- RelocInfo::INTERNAL_REFERENCE);
- }
-
- static Operand StaticVariable(const ExternalReference& ext) {
- return Operand(reinterpret_cast<int32_t>(ext.address()),
- RelocInfo::EXTERNAL_REFERENCE);
- }
-
- static Operand StaticArray(Register index,
- ScaleFactor scale,
- const ExternalReference& arr) {
- return Operand(index, scale, reinterpret_cast<int32_t>(arr.address()),
- RelocInfo::EXTERNAL_REFERENCE);
- }
-
- static Operand ForCell(Handle<Cell> cell) {
- AllowDeferredHandleDereference embedding_raw_address;
- return Operand(reinterpret_cast<int32_t>(cell.location()),
- RelocInfo::CELL);
- }
-
- static Operand ForRegisterPlusImmediate(Register base, Immediate imm) {
- return Operand(base, imm.x_, imm.rmode_);
- }
-
- // Returns true if this Operand is a wrapper for the specified register.
- bool is_reg(Register reg) const;
-
- // Returns true if this Operand is a wrapper for one register.
- bool is_reg_only() const;
-
- // Asserts that this Operand is a wrapper for one register and returns the
- // register.
- Register reg() const;
-
- private:
- // Set the ModRM byte without an encoded 'reg' register. The
- // register is encoded later as part of the emit_operand operation.
- inline void set_modrm(int mod, Register rm);
-
- inline void set_sib(ScaleFactor scale, Register index, Register base);
- inline void set_disp8(int8_t disp);
- inline void set_dispr(int32_t disp, RelocInfo::Mode rmode);
-
- byte buf_[6];
- // The number of bytes in buf_.
- unsigned int len_;
- // Only valid if len_ > 4.
- RelocInfo::Mode rmode_;
-
- friend class Assembler;
- friend class MacroAssembler;
-};
-
-
-// -----------------------------------------------------------------------------
-// A Displacement describes the 32bit immediate field of an instruction which
-// may be used together with a Label in order to refer to a yet unknown code
-// position. Displacements stored in the instruction stream are used to describe
-// the instruction and to chain a list of instructions using the same Label.
-// A Displacement contains 2 different fields:
-//
-// next field: position of next displacement in the chain (0 = end of list)
-// type field: instruction type
-//
-// A next value of null (0) indicates the end of a chain (note that there can
-// be no displacement at position zero, because there is always at least one
-// instruction byte before the displacement).
-//
-// Displacement _data field layout
-//
-// |31.....2|1......0|
-// [ next | type |
-
-class Displacement BASE_EMBEDDED {
- public:
- enum Type { UNCONDITIONAL_JUMP, CODE_RELATIVE, OTHER, CODE_ABSOLUTE };
-
- int data() const { return data_; }
- Type type() const { return TypeField::decode(data_); }
- void next(Label* L) const {
- int n = NextField::decode(data_);
- n > 0 ? L->link_to(n) : L->Unuse();
- }
- void link_to(Label* L) { init(L, type()); }
-
- explicit Displacement(int data) { data_ = data; }
-
- Displacement(Label* L, Type type) { init(L, type); }
-
- void print() {
- PrintF("%s (%x) ", (type() == UNCONDITIONAL_JUMP ? "jmp" : "[other]"),
- NextField::decode(data_));
- }
-
- private:
- int data_;
-
- class TypeField: public BitField<Type, 0, 2> {};
- class NextField: public BitField<int, 2, 32-2> {};
-
- void init(Label* L, Type type);
-};
-
-
-class Assembler : public AssemblerBase {
- private:
- // We check before assembling an instruction that there is sufficient
- // space to write an instruction and its relocation information.
- // The relocation writer's position must be kGap bytes above the end of
- // the generated instructions. This leaves enough space for the
- // longest possible ia32 instruction, 15 bytes, and the longest possible
- // relocation information encoding, RelocInfoWriter::kMaxLength == 16.
- // (There is a 15 byte limit on ia32 instruction length that rules out some
- // otherwise valid instructions.)
- // This allows for a single, fast space check per instruction.
- static const int kGap = 32;
-
- public:
- // Create an assembler. Instructions and relocation information are emitted
- // into a buffer, with the instructions starting from the beginning and the
- // relocation information starting from the end of the buffer. See CodeDesc
- // for a detailed comment on the layout (globals.h).
- //
- // If the provided buffer is NULL, the assembler allocates and grows its own
- // buffer, and buffer_size determines the initial buffer size. The buffer is
- // owned by the assembler and deallocated upon destruction of the assembler.
- //
- // If the provided buffer is not NULL, the assembler uses the provided buffer
- // for code generation and assumes its size to be buffer_size. If the buffer
- // is too small, a fatal error occurs. No deallocation of the buffer is done
- // upon destruction of the assembler.
- Assembler(Isolate* isolate, void* buffer, int buffer_size)
- : Assembler(IsolateData(isolate), buffer, buffer_size) {}
- Assembler(IsolateData isolate_data, void* buffer, int buffer_size);
- virtual ~Assembler() { }
-
- // GetCode emits any pending (non-emitted) code and fills the descriptor
- // desc. GetCode() is idempotent; it returns the same result if no other
- // Assembler functions are invoked in between GetCode() calls.
- void GetCode(CodeDesc* desc);
-
- // Read/Modify the code target in the branch/call instruction at pc.
- // The isolate argument is unused (and may be nullptr) when skipping flushing.
- inline static Address target_address_at(Address pc, Address constant_pool);
- inline static void set_target_address_at(
- Isolate* isolate, Address pc, Address constant_pool, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
- static inline Address target_address_at(Address pc, Code* code);
- static inline void set_target_address_at(
- Isolate* isolate, Address pc, Code* code, Address target,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
-
- // Return the code target address at a call site from the return address
- // of that call in the instruction stream.
- inline static Address target_address_from_return_address(Address pc);
-
- // This sets the branch destination (which is in the instruction on x86).
- // This is for calls and branches within generated code.
- inline static void deserialization_set_special_target_at(
- Isolate* isolate, Address instruction_payload, Code* code,
- Address target) {
- set_target_address_at(isolate, instruction_payload, code, target);
- }
-
- // This sets the internal reference at the pc.
- inline static void deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target,
- RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
-
- static const int kSpecialTargetSize = kPointerSize;
-
- // Distance between the address of the code target in the call instruction
- // and the return address
- static const int kCallTargetAddressOffset = kPointerSize;
-
- static const int kCallInstructionLength = 5;
-
- // The debug break slot must be able to contain a call instruction.
- static const int kDebugBreakSlotLength = kCallInstructionLength;
-
- // Distance between start of patched debug break slot and the emitted address
- // to jump to.
- static const int kPatchDebugBreakSlotAddressOffset = 1; // JMP imm32.
-
- // One byte opcode for test al, 0xXX.
- static const byte kTestAlByte = 0xA8;
- // One byte opcode for nop.
- static const byte kNopByte = 0x90;
-
- // One byte opcode for a short unconditional jump.
- static const byte kJmpShortOpcode = 0xEB;
- // One byte prefix for a short conditional jump.
- static const byte kJccShortPrefix = 0x70;
- static const byte kJncShortOpcode = kJccShortPrefix | not_carry;
- static const byte kJcShortOpcode = kJccShortPrefix | carry;
- static const byte kJnzShortOpcode = kJccShortPrefix | not_zero;
- static const byte kJzShortOpcode = kJccShortPrefix | zero;
-
-
- // ---------------------------------------------------------------------------
- // Code generation
- //
- // - function names correspond one-to-one to ia32 instruction mnemonics
- // - unless specified otherwise, instructions operate on 32bit operands
- // - instructions on 8bit (byte) operands/registers have a trailing '_b'
- // - instructions on 16bit (word) operands/registers have a trailing '_w'
- // - naming conflicts with C++ keywords are resolved via a trailing '_'
-
- // NOTE ON INTERFACE: Currently, the interface is not very consistent
- // in the sense that some operations (e.g. mov()) can be called in more
- // the one way to generate the same instruction: The Register argument
- // can in some cases be replaced with an Operand(Register) argument.
- // This should be cleaned up and made more orthogonal. The questions
- // is: should we always use Operands instead of Registers where an
- // Operand is possible, or should we have a Register (overloaded) form
- // instead? We must be careful to make sure that the selected instruction
- // is obvious from the parameters to avoid hard-to-find code generation
- // bugs.
-
- // Insert the smallest number of nop instructions
- // possible to align the pc offset to a multiple
- // of m. m must be a power of 2.
- void Align(int m);
- // Insert the smallest number of zero bytes possible to align the pc offset
- // to a mulitple of m. m must be a power of 2 (>= 2).
- void DataAlign(int m);
- void Nop(int bytes = 1);
- // Aligns code to something that's optimal for a jump target for the platform.
- void CodeTargetAlign();
-
- // Stack
- void pushad();
- void popad();
-
- void pushfd();
- void popfd();
-
- void push(const Immediate& x);
- void push_imm32(int32_t imm32);
- void push(Register src);
- void push(const Operand& src);
-
- void pop(Register dst);
- void pop(const Operand& dst);
-
- void enter(const Immediate& size);
- void leave();
-
- // Moves
- void mov_b(Register dst, Register src) { mov_b(dst, Operand(src)); }
- void mov_b(Register dst, const Operand& src);
- void mov_b(Register dst, int8_t imm8) { mov_b(Operand(dst), imm8); }
- void mov_b(const Operand& dst, int8_t imm8);
- void mov_b(const Operand& dst, const Immediate& src);
- void mov_b(const Operand& dst, Register src);
-
- void mov_w(Register dst, const Operand& src);
- void mov_w(const Operand& dst, Register src);
- void mov_w(const Operand& dst, int16_t imm16);
- void mov_w(const Operand& dst, const Immediate& src);
-
-
- void mov(Register dst, int32_t imm32);
- void mov(Register dst, const Immediate& x);
- void mov(Register dst, Handle<Object> handle);
- void mov(Register dst, const Operand& src);
- void mov(Register dst, Register src);
- void mov(const Operand& dst, const Immediate& x);
- void mov(const Operand& dst, Handle<Object> handle);
- void mov(const Operand& dst, Register src);
-
- void movsx_b(Register dst, Register src) { movsx_b(dst, Operand(src)); }
- void movsx_b(Register dst, const Operand& src);
-
- void movsx_w(Register dst, Register src) { movsx_w(dst, Operand(src)); }
- void movsx_w(Register dst, const Operand& src);
-
- void movzx_b(Register dst, Register src) { movzx_b(dst, Operand(src)); }
- void movzx_b(Register dst, const Operand& src);
-
- void movzx_w(Register dst, Register src) { movzx_w(dst, Operand(src)); }
- void movzx_w(Register dst, const Operand& src);
-
- // Flag management.
- void cld();
-
- // Repetitive string instructions.
- void rep_movs();
- void rep_stos();
- void stos();
-
- // Exchange
- void xchg(Register dst, Register src);
- void xchg(Register dst, const Operand& src);
- void xchg_b(Register reg, const Operand& op);
- void xchg_w(Register reg, const Operand& op);
-
- // Lock prefix
- void lock();
-
- // CompareExchange
- void cmpxchg(const Operand& dst, Register src);
- void cmpxchg_b(const Operand& dst, Register src);
- void cmpxchg_w(const Operand& dst, Register src);
-
- // Arithmetics
- void adc(Register dst, int32_t imm32);
- void adc(Register dst, const Operand& src);
-
- void add(Register dst, Register src) { add(dst, Operand(src)); }
- void add(Register dst, const Operand& src);
- void add(const Operand& dst, Register src);
- void add(Register dst, const Immediate& imm) { add(Operand(dst), imm); }
- void add(const Operand& dst, const Immediate& x);
-
- void and_(Register dst, int32_t imm32);
- void and_(Register dst, const Immediate& x);
- void and_(Register dst, Register src) { and_(dst, Operand(src)); }
- void and_(Register dst, const Operand& src);
- void and_(const Operand& dst, Register src);
- void and_(const Operand& dst, const Immediate& x);
-
- void cmpb(Register reg, Immediate imm8) { cmpb(Operand(reg), imm8); }
- void cmpb(const Operand& op, Immediate imm8);
- void cmpb(Register reg, const Operand& op);
- void cmpb(const Operand& op, Register reg);
- void cmpb(Register dst, Register src) { cmpb(Operand(dst), src); }
- void cmpb_al(const Operand& op);
- void cmpw_ax(const Operand& op);
- void cmpw(const Operand& dst, Immediate src);
- void cmpw(Register dst, Immediate src) { cmpw(Operand(dst), src); }
- void cmpw(Register dst, const Operand& src);
- void cmpw(Register dst, Register src) { cmpw(Operand(dst), src); }
- void cmpw(const Operand& dst, Register src);
- void cmp(Register reg, int32_t imm32);
- void cmp(Register reg, Handle<Object> handle);
- void cmp(Register reg0, Register reg1) { cmp(reg0, Operand(reg1)); }
- void cmp(Register reg, const Operand& op);
- void cmp(Register reg, const Immediate& imm) { cmp(Operand(reg), imm); }
- void cmp(const Operand& op, Register reg);
- void cmp(const Operand& op, const Immediate& imm);
- void cmp(const Operand& op, Handle<Object> handle);
-
- void dec_b(Register dst);
- void dec_b(const Operand& dst);
-
- void dec(Register dst);
- void dec(const Operand& dst);
-
- void cdq();
-
- void idiv(Register src) { idiv(Operand(src)); }
- void idiv(const Operand& src);
- void div(Register src) { div(Operand(src)); }
- void div(const Operand& src);
-
- // Signed multiply instructions.
- void imul(Register src); // edx:eax = eax * src.
- void imul(Register dst, Register src) { imul(dst, Operand(src)); }
- void imul(Register dst, const Operand& src); // dst = dst * src.
- void imul(Register dst, Register src, int32_t imm32); // dst = src * imm32.
- void imul(Register dst, const Operand& src, int32_t imm32);
-
- void inc(Register dst);
- void inc(const Operand& dst);
-
- void lea(Register dst, const Operand& src);
-
- // Unsigned multiply instruction.
- void mul(Register src); // edx:eax = eax * reg.
-
- void neg(Register dst);
- void neg(const Operand& dst);
-
- void not_(Register dst);
- void not_(const Operand& dst);
-
- void or_(Register dst, int32_t imm32);
- void or_(Register dst, Register src) { or_(dst, Operand(src)); }
- void or_(Register dst, const Operand& src);
- void or_(const Operand& dst, Register src);
- void or_(Register dst, const Immediate& imm) { or_(Operand(dst), imm); }
- void or_(const Operand& dst, const Immediate& x);
-
- void rcl(Register dst, uint8_t imm8);
- void rcr(Register dst, uint8_t imm8);
-
- void ror(Register dst, uint8_t imm8) { ror(Operand(dst), imm8); }
- void ror(const Operand& dst, uint8_t imm8);
- void ror_cl(Register dst) { ror_cl(Operand(dst)); }
- void ror_cl(const Operand& dst);
-
- void sar(Register dst, uint8_t imm8) { sar(Operand(dst), imm8); }
- void sar(const Operand& dst, uint8_t imm8);
- void sar_cl(Register dst) { sar_cl(Operand(dst)); }
- void sar_cl(const Operand& dst);
-
- void sbb(Register dst, const Operand& src);
-
- void shl(Register dst, uint8_t imm8) { shl(Operand(dst), imm8); }
- void shl(const Operand& dst, uint8_t imm8);
- void shl_cl(Register dst) { shl_cl(Operand(dst)); }
- void shl_cl(const Operand& dst);
- void shld(Register dst, Register src, uint8_t shift);
- void shld_cl(Register dst, Register src);
-
- void shr(Register dst, uint8_t imm8) { shr(Operand(dst), imm8); }
- void shr(const Operand& dst, uint8_t imm8);
- void shr_cl(Register dst) { shr_cl(Operand(dst)); }
- void shr_cl(const Operand& dst);
- void shrd(Register dst, Register src, uint8_t shift);
- void shrd_cl(Register dst, Register src) { shrd_cl(Operand(dst), src); }
- void shrd_cl(const Operand& dst, Register src);
-
- void sub(Register dst, const Immediate& imm) { sub(Operand(dst), imm); }
- void sub(const Operand& dst, const Immediate& x);
- void sub(Register dst, Register src) { sub(dst, Operand(src)); }
- void sub(Register dst, const Operand& src);
- void sub(const Operand& dst, Register src);
-
- void test(Register reg, const Immediate& imm);
- void test(Register reg0, Register reg1) { test(reg0, Operand(reg1)); }
- void test(Register reg, const Operand& op);
- void test(const Operand& op, const Immediate& imm);
- void test(const Operand& op, Register reg) { test(reg, op); }
- void test_b(Register reg, const Operand& op);
- void test_b(Register reg, Immediate imm8);
- void test_b(const Operand& op, Immediate imm8);
- void test_b(const Operand& op, Register reg) { test_b(reg, op); }
- void test_b(Register dst, Register src) { test_b(dst, Operand(src)); }
- void test_w(Register reg, const Operand& op);
- void test_w(Register reg, Immediate imm16);
- void test_w(const Operand& op, Immediate imm16);
- void test_w(const Operand& op, Register reg) { test_w(reg, op); }
- void test_w(Register dst, Register src) { test_w(dst, Operand(src)); }
-
- void xor_(Register dst, int32_t imm32);
- void xor_(Register dst, Register src) { xor_(dst, Operand(src)); }
- void xor_(Register dst, const Operand& src);
- void xor_(const Operand& dst, Register src);
- void xor_(Register dst, const Immediate& imm) { xor_(Operand(dst), imm); }
- void xor_(const Operand& dst, const Immediate& x);
-
- // Bit operations.
- void bt(const Operand& dst, Register src);
- void bts(Register dst, Register src) { bts(Operand(dst), src); }
- void bts(const Operand& dst, Register src);
- void bsr(Register dst, Register src) { bsr(dst, Operand(src)); }
- void bsr(Register dst, const Operand& src);
- void bsf(Register dst, Register src) { bsf(dst, Operand(src)); }
- void bsf(Register dst, const Operand& src);
-
- // Miscellaneous
- void hlt();
- void int3();
- void nop();
- void ret(int imm16);
- void ud2();
-
- // Label operations & relative jumps (PPUM Appendix D)
- //
- // Takes a branch opcode (cc) and a label (L) and generates
- // either a backward branch or a forward branch and links it
- // to the label fixup chain. Usage:
- //
- // Label L; // unbound label
- // j(cc, &L); // forward branch to unbound label
- // bind(&L); // bind label to the current pc
- // j(cc, &L); // backward branch to bound label
- // bind(&L); // illegal: a label may be bound only once
- //
- // Note: The same Label can be used for forward and backward branches
- // but it may be bound only once.
-
- void bind(Label* L); // binds an unbound label L to the current code position
-
- // Calls
- void call(Label* L);
- void call(byte* entry, RelocInfo::Mode rmode);
- int CallSize(const Operand& adr);
- void call(Register reg) { call(Operand(reg)); }
- void call(const Operand& adr);
- int CallSize(Handle<Code> code, RelocInfo::Mode mode);
- void call(Handle<Code> code,
- RelocInfo::Mode rmode,
- TypeFeedbackId id = TypeFeedbackId::None());
-
- // Jumps
- // unconditional jump to L
- void jmp(Label* L, Label::Distance distance = Label::kFar);
- void jmp(byte* entry, RelocInfo::Mode rmode);
- void jmp(Register reg) { jmp(Operand(reg)); }
- void jmp(const Operand& adr);
- void jmp(Handle<Code> code, RelocInfo::Mode rmode);
-
- // Conditional jumps
- void j(Condition cc,
- Label* L,
- Label::Distance distance = Label::kFar);
- void j(Condition cc, byte* entry, RelocInfo::Mode rmode);
- void j(Condition cc, Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
-
- // Floating-point operations
- void fld(int i);
- void fstp(int i);
-
- void fld1();
- void fldz();
- void fldpi();
- void fldln2();
-
- void fld_s(const Operand& adr);
- void fld_d(const Operand& adr);
-
- void fstp_s(const Operand& adr);
- void fst_s(const Operand& adr);
- void fstp_d(const Operand& adr);
- void fst_d(const Operand& adr);
-
- void fild_s(const Operand& adr);
- void fild_d(const Operand& adr);
-
- void fist_s(const Operand& adr);
-
- void fistp_s(const Operand& adr);
- void fistp_d(const Operand& adr);
-
- // The fisttp instructions require SSE3.
- void fisttp_s(const Operand& adr);
- void fisttp_d(const Operand& adr);
-
- void fabs();
- void fchs();
- void fsqrt();
- void fcos();
- void fsin();
- void fptan();
- void fyl2x();
- void f2xm1();
- void fscale();
- void fninit();
-
- void fadd(int i);
- void fadd_i(int i);
- void fadd_d(const Operand& adr);
- void fsub(int i);
- void fsub_i(int i);
- void fsub_d(const Operand& adr);
- void fsubr_d(const Operand& adr);
- void fmul(int i);
- void fmul_d(const Operand& adr);
- void fmul_i(int i);
- void fdiv(int i);
- void fdiv_d(const Operand& adr);
- void fdivr_d(const Operand& adr);
- void fdiv_i(int i);
-
- void fisub_s(const Operand& adr);
-
- void faddp(int i = 1);
- void fsubp(int i = 1);
- void fsubr(int i = 1);
- void fsubrp(int i = 1);
- void fmulp(int i = 1);
- void fdivp(int i = 1);
- void fprem();
- void fprem1();
-
- void fxch(int i = 1);
- void fincstp();
- void ffree(int i = 0);
-
- void ftst();
- void fxam();
- void fucomp(int i);
- void fucompp();
- void fucomi(int i);
- void fucomip();
- void fcompp();
- void fnstsw_ax();
- void fldcw(const Operand& adr);
- void fnstcw(const Operand& adr);
- void fwait();
- void fnclex();
- void fnsave(const Operand& adr);
- void frstor(const Operand& adr);
-
- void frndint();
-
- void sahf();
- void setcc(Condition cc, Register reg);
-
- void cpuid();
-
- // TODO(lrn): Need SFENCE for movnt?
-
- // Check the code size generated from label to here.
- int SizeOfCodeGeneratedSince(Label* label) {
- return pc_offset() - label->pos();
- }
-
- // Mark address of a debug break slot.
- void RecordDebugBreakSlot(RelocInfo::Mode mode);
-
- // Record a comment relocation entry that can be used by a disassembler.
- // Use --code-comments to enable.
- void RecordComment(const char* msg);
-
- // Record a deoptimization reason that can be used by a log or cpu profiler.
- // Use --trace-deopt to enable.
- void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
- int id);
-
- // Writes a single byte or word of data in the code stream. Used for
- // inline tables, e.g., jump-tables.
- void db(uint8_t data);
- void dd(uint32_t data);
- void dq(uint64_t data);
- void dp(uintptr_t data) { dd(data); }
- void dd(Label* label);
-
- // Check if there is less than kGap bytes available in the buffer.
- // If this is the case, we need to grow the buffer before emitting
- // an instruction or relocation information.
- inline bool buffer_overflow() const {
- return pc_ >= reloc_info_writer.pos() - kGap;
- }
-
- // Get the number of bytes available in the buffer.
- inline int available_space() const { return reloc_info_writer.pos() - pc_; }
-
- static bool IsNop(Address addr);
-
- int relocation_writer_size() {
- return (buffer_ + buffer_size_) - reloc_info_writer.pos();
- }
-
- // Avoid overflows for displacements etc.
- static const int kMaximalBufferSize = 512*MB;
-
- byte byte_at(int pos) { return buffer_[pos]; }
- void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
-
- void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
- ConstantPoolEntry::Access access,
- ConstantPoolEntry::Type type) {
- // No embedded constant pool support.
- UNREACHABLE();
- }
-
- protected:
- byte* addr_at(int pos) { return buffer_ + pos; }
-
-
- private:
- uint32_t long_at(int pos) {
- return *reinterpret_cast<uint32_t*>(addr_at(pos));
- }
- void long_at_put(int pos, uint32_t x) {
- *reinterpret_cast<uint32_t*>(addr_at(pos)) = x;
- }
-
- // code emission
- void GrowBuffer();
- inline void emit(uint32_t x);
- inline void emit(Handle<Object> handle);
- inline void emit(uint32_t x,
- RelocInfo::Mode rmode,
- TypeFeedbackId id = TypeFeedbackId::None());
- inline void emit(Handle<Code> code,
- RelocInfo::Mode rmode,
- TypeFeedbackId id = TypeFeedbackId::None());
- inline void emit(const Immediate& x);
- inline void emit_b(Immediate x);
- inline void emit_w(const Immediate& x);
- inline void emit_q(uint64_t x);
-
- // Emit the code-object-relative offset of the label's position
- inline void emit_code_relative_offset(Label* label);
-
- // instruction generation
- void emit_arith_b(int op1, int op2, Register dst, int imm8);
-
- // Emit a basic arithmetic instruction (i.e. first byte of the family is 0x81)
- // with a given destination expression and an immediate operand. It attempts
- // to use the shortest encoding possible.
- // sel specifies the /n in the modrm byte (see the Intel PRM).
- void emit_arith(int sel, Operand dst, const Immediate& x);
-
- void emit_operand(Register reg, const Operand& adr);
-
- void emit_label(Label* label);
-
- void emit_farith(int b1, int b2, int i);
-
- // labels
- void print(Label* L);
- void bind_to(Label* L, int pos);
-
- // displacements
- inline Displacement disp_at(Label* L);
- inline void disp_at_put(Label* L, Displacement disp);
- inline void emit_disp(Label* L, Displacement::Type type);
- inline void emit_near_disp(Label* L);
-
- // record reloc info for current pc_
- void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
-
- friend class CodePatcher;
- friend class EnsureSpace;
-
- // Internal reference positions, required for (potential) patching in
- // GrowBuffer(); contains only those internal references whose labels
- // are already bound.
- std::deque<int> internal_reference_positions_;
-
- // code generation
- RelocInfoWriter reloc_info_writer;
-};
-
-
-// Helper class that ensures that there is enough space for generating
-// instructions and relocation information. The constructor makes
-// sure that there is enough space and (in debug mode) the destructor
-// checks that we did not generate too much.
-class EnsureSpace BASE_EMBEDDED {
- public:
- explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
- if (assembler_->buffer_overflow()) assembler_->GrowBuffer();
-#ifdef DEBUG
- space_before_ = assembler_->available_space();
-#endif
- }
-
-#ifdef DEBUG
- ~EnsureSpace() {
- int bytes_generated = space_before_ - assembler_->available_space();
- DCHECK(bytes_generated < assembler_->kGap);
- }
-#endif
-
- private:
- Assembler* assembler_;
-#ifdef DEBUG
- int space_before_;
-#endif
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_X87_ASSEMBLER_X87_H_
diff --git a/deps/v8/src/x87/code-stubs-x87.cc b/deps/v8/src/x87/code-stubs-x87.cc
deleted file mode 100644
index 8b6fbadcb9..0000000000
--- a/deps/v8/src/x87/code-stubs-x87.cc
+++ /dev/null
@@ -1,3490 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X87
-
-#include "src/code-stubs.h"
-#include "src/api-arguments.h"
-#include "src/base/bits.h"
-#include "src/bootstrapper.h"
-#include "src/codegen.h"
-#include "src/ic/handler-compiler.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/isolate.h"
-#include "src/regexp/jsregexp.h"
-#include "src/regexp/regexp-macro-assembler.h"
-#include "src/runtime/runtime.h"
-#include "src/x87/code-stubs-x87.h"
-#include "src/x87/frames-x87.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
- __ pop(ecx);
- __ mov(MemOperand(esp, eax, times_4, 0), edi);
- __ push(edi);
- __ push(ebx);
- __ push(ecx);
- __ add(eax, Immediate(3));
- __ TailCallRuntime(Runtime::kNewArray);
-}
-
-void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
- ExternalReference miss) {
- // Update the static counter each time a new code stub is generated.
- isolate()->counters()->code_stubs()->Increment();
-
- CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
- int param_count = descriptor.GetRegisterParameterCount();
- {
- // Call the runtime system in a fresh internal frame.
- FrameScope scope(masm, StackFrame::INTERNAL);
- DCHECK(param_count == 0 ||
- eax.is(descriptor.GetRegisterParameter(param_count - 1)));
- // Push arguments
- for (int i = 0; i < param_count; ++i) {
- __ push(descriptor.GetRegisterParameter(i));
- }
- __ CallExternalReference(miss, param_count);
- }
-
- __ ret(0);
-}
-
-
-void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
- // We don't allow a GC during a store buffer overflow so there is no need to
- // store the registers in any particular way, but we do have to store and
- // restore them.
- __ pushad();
- if (save_doubles()) {
- // Save FPU stat in m108byte.
- __ sub(esp, Immediate(108));
- __ fnsave(Operand(esp, 0));
- }
- const int argument_count = 1;
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(argument_count, ecx);
- __ mov(Operand(esp, 0 * kPointerSize),
- Immediate(ExternalReference::isolate_address(isolate())));
- __ CallCFunction(
- ExternalReference::store_buffer_overflow_function(isolate()),
- argument_count);
- if (save_doubles()) {
- // Restore FPU stat in m108byte.
- __ frstor(Operand(esp, 0));
- __ add(esp, Immediate(108));
- }
- __ popad();
- __ ret(0);
-}
-
-
-class FloatingPointHelper : public AllStatic {
- public:
- enum ArgLocation {
- ARGS_ON_STACK,
- ARGS_IN_REGISTERS
- };
-
- // Code pattern for loading a floating point value. Input value must
- // be either a smi or a heap number object (fp value). Requirements:
- // operand in register number. Returns operand as floating point number
- // on FPU stack.
- static void LoadFloatOperand(MacroAssembler* masm, Register number);
-
- // Test if operands are smi or number objects (fp). Requirements:
- // operand_1 in eax, operand_2 in edx; falls through on float
- // operands, jumps to the non_float label otherwise.
- static void CheckFloatOperands(MacroAssembler* masm,
- Label* non_float,
- Register scratch);
-};
-
-
-void DoubleToIStub::Generate(MacroAssembler* masm) {
- Register input_reg = this->source();
- Register final_result_reg = this->destination();
- DCHECK(is_truncating());
-
- Label check_negative, process_64_bits, done, done_no_stash;
-
- int double_offset = offset();
-
- // Account for return address and saved regs if input is esp.
- if (input_reg.is(esp)) double_offset += 3 * kPointerSize;
-
- MemOperand mantissa_operand(MemOperand(input_reg, double_offset));
- MemOperand exponent_operand(MemOperand(input_reg,
- double_offset + kDoubleSize / 2));
-
- Register scratch1;
- {
- Register scratch_candidates[3] = { ebx, edx, edi };
- for (int i = 0; i < 3; i++) {
- scratch1 = scratch_candidates[i];
- if (!final_result_reg.is(scratch1) && !input_reg.is(scratch1)) break;
- }
- }
- // Since we must use ecx for shifts below, use some other register (eax)
- // to calculate the result if ecx is the requested return register.
- Register result_reg = final_result_reg.is(ecx) ? eax : final_result_reg;
- // Save ecx if it isn't the return register and therefore volatile, or if it
- // is the return register, then save the temp register we use in its stead for
- // the result.
- Register save_reg = final_result_reg.is(ecx) ? eax : ecx;
- __ push(scratch1);
- __ push(save_reg);
-
- bool stash_exponent_copy = !input_reg.is(esp);
- __ mov(scratch1, mantissa_operand);
- __ mov(ecx, exponent_operand);
- if (stash_exponent_copy) __ push(ecx);
-
- __ and_(ecx, HeapNumber::kExponentMask);
- __ shr(ecx, HeapNumber::kExponentShift);
- __ lea(result_reg, MemOperand(ecx, -HeapNumber::kExponentBias));
- __ cmp(result_reg, Immediate(HeapNumber::kMantissaBits));
- __ j(below, &process_64_bits);
-
- // Result is entirely in lower 32-bits of mantissa
- int delta = HeapNumber::kExponentBias + Double::kPhysicalSignificandSize;
- __ sub(ecx, Immediate(delta));
- __ xor_(result_reg, result_reg);
- __ cmp(ecx, Immediate(31));
- __ j(above, &done);
- __ shl_cl(scratch1);
- __ jmp(&check_negative);
-
- __ bind(&process_64_bits);
- // Result must be extracted from shifted 32-bit mantissa
- __ sub(ecx, Immediate(delta));
- __ neg(ecx);
- if (stash_exponent_copy) {
- __ mov(result_reg, MemOperand(esp, 0));
- } else {
- __ mov(result_reg, exponent_operand);
- }
- __ and_(result_reg,
- Immediate(static_cast<uint32_t>(Double::kSignificandMask >> 32)));
- __ add(result_reg,
- Immediate(static_cast<uint32_t>(Double::kHiddenBit >> 32)));
- __ shrd_cl(scratch1, result_reg);
- __ shr_cl(result_reg);
- __ test(ecx, Immediate(32));
- {
- Label skip_mov;
- __ j(equal, &skip_mov, Label::kNear);
- __ mov(scratch1, result_reg);
- __ bind(&skip_mov);
- }
-
- // If the double was negative, negate the integer result.
- __ bind(&check_negative);
- __ mov(result_reg, scratch1);
- __ neg(result_reg);
- if (stash_exponent_copy) {
- __ cmp(MemOperand(esp, 0), Immediate(0));
- } else {
- __ cmp(exponent_operand, Immediate(0));
- }
- {
- Label skip_mov;
- __ j(less_equal, &skip_mov, Label::kNear);
- __ mov(result_reg, scratch1);
- __ bind(&skip_mov);
- }
-
- // Restore registers
- __ bind(&done);
- if (stash_exponent_copy) {
- __ add(esp, Immediate(kDoubleSize / 2));
- }
- __ bind(&done_no_stash);
- if (!final_result_reg.is(result_reg)) {
- DCHECK(final_result_reg.is(ecx));
- __ mov(final_result_reg, result_reg);
- }
- __ pop(save_reg);
- __ pop(scratch1);
- __ ret(0);
-}
-
-
-void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
- Register number) {
- Label load_smi, done;
-
- __ JumpIfSmi(number, &load_smi, Label::kNear);
- __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
- __ jmp(&done, Label::kNear);
-
- __ bind(&load_smi);
- __ SmiUntag(number);
- __ push(number);
- __ fild_s(Operand(esp, 0));
- __ pop(number);
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
- Label* non_float,
- Register scratch) {
- Label test_other, done;
- // Test if both operands are floats or smi -> scratch=k_is_float;
- // Otherwise scratch = k_not_float.
- __ JumpIfSmi(edx, &test_other, Label::kNear);
- __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
- Factory* factory = masm->isolate()->factory();
- __ cmp(scratch, factory->heap_number_map());
- __ j(not_equal, non_float); // argument in edx is not a number -> NaN
-
- __ bind(&test_other);
- __ JumpIfSmi(eax, &done, Label::kNear);
- __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(scratch, factory->heap_number_map());
- __ j(not_equal, non_float); // argument in eax is not a number -> NaN
-
- // Fall-through: Both operands are numbers.
- __ bind(&done);
-}
-
-
-void MathPowStub::Generate(MacroAssembler* masm) {
- const Register scratch = ecx;
-
- // Load the double_exponent into x87 FPU
- __ fld_d(Operand(esp, 0 * kDoubleSize + 4));
- // Load the double_base into x87 FPU
- __ fld_d(Operand(esp, 1 * kDoubleSize + 4));
-
- // Call ieee754 runtime directly.
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(4, scratch);
- // Put the double_base parameter in call stack
- __ fstp_d(Operand(esp, 0 * kDoubleSize));
- // Put the double_exponent parameter in call stack
- __ fstp_d(Operand(esp, 1 * kDoubleSize));
- __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
- 4);
- }
- // Return value is in st(0) on ia32.
- __ ret(0);
-}
-
-
-static int NegativeComparisonResult(Condition cc) {
- DCHECK(cc != equal);
- DCHECK((cc == less) || (cc == less_equal)
- || (cc == greater) || (cc == greater_equal));
- return (cc == greater || cc == greater_equal) ? LESS : GREATER;
-}
-
-
-static void CheckInputType(MacroAssembler* masm, Register input,
- CompareICState::State expected, Label* fail) {
- Label ok;
- if (expected == CompareICState::SMI) {
- __ JumpIfNotSmi(input, fail);
- } else if (expected == CompareICState::NUMBER) {
- __ JumpIfSmi(input, &ok);
- __ cmp(FieldOperand(input, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->heap_number_map()));
- __ j(not_equal, fail);
- }
- // We could be strict about internalized/non-internalized here, but as long as
- // hydrogen doesn't care, the stub doesn't have to care either.
- __ bind(&ok);
-}
-
-
-static void BranchIfNotInternalizedString(MacroAssembler* masm,
- Label* label,
- Register object,
- Register scratch) {
- __ JumpIfSmi(object, label);
- __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
- __ test(scratch, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
- __ j(not_zero, label);
-}
-
-
-void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
- Label runtime_call, check_unequal_objects;
- Condition cc = GetCondition();
-
- Label miss;
- CheckInputType(masm, edx, left(), &miss);
- CheckInputType(masm, eax, right(), &miss);
-
- // Compare two smis.
- Label non_smi, smi_done;
- __ mov(ecx, edx);
- __ or_(ecx, eax);
- __ JumpIfNotSmi(ecx, &non_smi, Label::kNear);
- __ sub(edx, eax); // Return on the result of the subtraction.
- __ j(no_overflow, &smi_done, Label::kNear);
- __ not_(edx); // Correct sign in case of overflow. edx is never 0 here.
- __ bind(&smi_done);
- __ mov(eax, edx);
- __ ret(0);
- __ bind(&non_smi);
-
- // NOTICE! This code is only reached after a smi-fast-case check, so
- // it is certain that at least one operand isn't a smi.
-
- // Identical objects can be compared fast, but there are some tricky cases
- // for NaN and undefined.
- Label generic_heap_number_comparison;
- {
- Label not_identical;
- __ cmp(eax, edx);
- __ j(not_equal, &not_identical);
-
- if (cc != equal) {
- // Check for undefined. undefined OP undefined is false even though
- // undefined == undefined.
- __ cmp(edx, isolate()->factory()->undefined_value());
- Label check_for_nan;
- __ j(not_equal, &check_for_nan, Label::kNear);
- __ Move(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
- __ ret(0);
- __ bind(&check_for_nan);
- }
-
- // Test for NaN. Compare heap numbers in a general way,
- // to handle NaNs correctly.
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->heap_number_map()));
- __ j(equal, &generic_heap_number_comparison, Label::kNear);
- if (cc != equal) {
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- // Call runtime on identical JSObjects. Otherwise return equal.
- __ cmpb(ecx, Immediate(FIRST_JS_RECEIVER_TYPE));
- __ j(above_equal, &runtime_call, Label::kFar);
- // Call runtime on identical symbols since we need to throw a TypeError.
- __ cmpb(ecx, Immediate(SYMBOL_TYPE));
- __ j(equal, &runtime_call, Label::kFar);
- }
- __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
-
-
- __ bind(&not_identical);
- }
-
- // Strict equality can quickly decide whether objects are equal.
- // Non-strict object equality is slower, so it is handled later in the stub.
- if (cc == equal && strict()) {
- Label slow; // Fallthrough label.
- Label not_smis;
- // If we're doing a strict equality comparison, we don't have to do
- // type conversion, so we generate code to do fast comparison for objects
- // and oddballs. Non-smi numbers and strings still go through the usual
- // slow-case code.
- // If either is a Smi (we know that not both are), then they can only
- // be equal if the other is a HeapNumber. If so, use the slow case.
- STATIC_ASSERT(kSmiTag == 0);
- DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
- __ mov(ecx, Immediate(kSmiTagMask));
- __ and_(ecx, eax);
- __ test(ecx, edx);
- __ j(not_zero, &not_smis, Label::kNear);
- // One operand is a smi.
-
- // Check whether the non-smi is a heap number.
- STATIC_ASSERT(kSmiTagMask == 1);
- // ecx still holds eax & kSmiTag, which is either zero or one.
- __ sub(ecx, Immediate(0x01));
- __ mov(ebx, edx);
- __ xor_(ebx, eax);
- __ and_(ebx, ecx); // ebx holds either 0 or eax ^ edx.
- __ xor_(ebx, eax);
- // if eax was smi, ebx is now edx, else eax.
-
- // Check if the non-smi operand is a heap number.
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->heap_number_map()));
- // If heap number, handle it in the slow case.
- __ j(equal, &slow, Label::kNear);
- // Return non-equal (ebx is not zero)
- __ mov(eax, ebx);
- __ ret(0);
-
- __ bind(&not_smis);
- // If either operand is a JSObject or an oddball value, then they are not
- // equal since their pointers are different
- // There is no test for undetectability in strict equality.
-
- // Get the type of the first operand.
- // If the first object is a JS object, we have done pointer comparison.
- Label first_non_object;
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
- __ j(below, &first_non_object, Label::kNear);
-
- // Return non-zero (eax is not zero)
- Label return_not_equal;
- STATIC_ASSERT(kHeapObjectTag != 0);
- __ bind(&return_not_equal);
- __ ret(0);
-
- __ bind(&first_non_object);
- // Check for oddballs: true, false, null, undefined.
- __ CmpInstanceType(ecx, ODDBALL_TYPE);
- __ j(equal, &return_not_equal);
-
- __ CmpObjectType(edx, FIRST_JS_RECEIVER_TYPE, ecx);
- __ j(above_equal, &return_not_equal);
-
- // Check for oddballs: true, false, null, undefined.
- __ CmpInstanceType(ecx, ODDBALL_TYPE);
- __ j(equal, &return_not_equal);
-
- // Fall through to the general case.
- __ bind(&slow);
- }
-
- // Generate the number comparison code.
- Label non_number_comparison;
- Label unordered;
- __ bind(&generic_heap_number_comparison);
- FloatingPointHelper::CheckFloatOperands(
- masm, &non_number_comparison, ebx);
- FloatingPointHelper::LoadFloatOperand(masm, eax);
- FloatingPointHelper::LoadFloatOperand(masm, edx);
- __ FCmp();
-
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, Label::kNear);
-
- Label below_label, above_label;
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ j(below, &below_label, Label::kNear);
- __ j(above, &above_label, Label::kNear);
-
- __ Move(eax, Immediate(0));
- __ ret(0);
-
- __ bind(&below_label);
- __ mov(eax, Immediate(Smi::FromInt(-1)));
- __ ret(0);
-
- __ bind(&above_label);
- __ mov(eax, Immediate(Smi::FromInt(1)));
- __ ret(0);
-
- // If one of the numbers was NaN, then the result is always false.
- // The cc is never not-equal.
- __ bind(&unordered);
- DCHECK(cc != not_equal);
- if (cc == less || cc == less_equal) {
- __ mov(eax, Immediate(Smi::FromInt(1)));
- } else {
- __ mov(eax, Immediate(Smi::FromInt(-1)));
- }
- __ ret(0);
-
- // The number comparison code did not provide a valid result.
- __ bind(&non_number_comparison);
-
- // Fast negative check for internalized-to-internalized equality.
- Label check_for_strings;
- if (cc == equal) {
- BranchIfNotInternalizedString(masm, &check_for_strings, eax, ecx);
- BranchIfNotInternalizedString(masm, &check_for_strings, edx, ecx);
-
- // We've already checked for object identity, so if both operands
- // are internalized they aren't equal. Register eax already holds a
- // non-zero value, which indicates not equal, so just return.
- __ ret(0);
- }
-
- __ bind(&check_for_strings);
-
- __ JumpIfNotBothSequentialOneByteStrings(edx, eax, ecx, ebx,
- &check_unequal_objects);
-
- // Inline comparison of one-byte strings.
- if (cc == equal) {
- StringHelper::GenerateFlatOneByteStringEquals(masm, edx, eax, ecx, ebx);
- } else {
- StringHelper::GenerateCompareFlatOneByteStrings(masm, edx, eax, ecx, ebx,
- edi);
- }
-#ifdef DEBUG
- __ Abort(kUnexpectedFallThroughFromStringComparison);
-#endif
-
- __ bind(&check_unequal_objects);
- if (cc == equal && !strict()) {
- // Non-strict equality. Objects are unequal if
- // they are both JSObjects and not undetectable,
- // and their pointers are different.
- Label return_equal, return_unequal, undetectable;
- // At most one is a smi, so we can test for smi by adding the two.
- // A smi plus a heap object has the low bit set, a heap object plus
- // a heap object has the low bit clear.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagMask == 1);
- __ lea(ecx, Operand(eax, edx, times_1, 0));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &runtime_call);
-
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
-
- __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, &undetectable, Label::kNear);
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, &return_unequal, Label::kNear);
-
- __ CmpInstanceType(ebx, FIRST_JS_RECEIVER_TYPE);
- __ j(below, &runtime_call, Label::kNear);
- __ CmpInstanceType(ecx, FIRST_JS_RECEIVER_TYPE);
- __ j(below, &runtime_call, Label::kNear);
-
- __ bind(&return_unequal);
- // Return non-equal by returning the non-zero object pointer in eax.
- __ ret(0); // eax, edx were pushed
-
- __ bind(&undetectable);
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(zero, &return_unequal, Label::kNear);
-
- // If both sides are JSReceivers, then the result is false according to
- // the HTML specification, which says that only comparisons with null or
- // undefined are affected by special casing for document.all.
- __ CmpInstanceType(ebx, ODDBALL_TYPE);
- __ j(zero, &return_equal, Label::kNear);
- __ CmpInstanceType(ecx, ODDBALL_TYPE);
- __ j(not_zero, &return_unequal, Label::kNear);
-
- __ bind(&return_equal);
- __ Move(eax, Immediate(EQUAL));
- __ ret(0); // eax, edx were pushed
- }
- __ bind(&runtime_call);
-
- if (cc == equal) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(esi);
- __ Call(strict() ? isolate()->builtins()->StrictEqual()
- : isolate()->builtins()->Equal(),
- RelocInfo::CODE_TARGET);
- __ Pop(esi);
- }
- // Turn true into 0 and false into some non-zero value.
- STATIC_ASSERT(EQUAL == 0);
- __ sub(eax, Immediate(isolate()->factory()->true_value()));
- __ Ret();
- } else {
- // Push arguments below the return address.
- __ pop(ecx);
- __ push(edx);
- __ push(eax);
- __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
-
- // Restore return address on the stack.
- __ push(ecx);
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ TailCallRuntime(Runtime::kCompare);
- }
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
- // eax : number of arguments to the construct function
- // ebx : feedback vector
- // edx : slot in feedback vector (Smi)
- // edi : the function to call
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Number-of-arguments register must be smi-tagged to call out.
- __ SmiTag(eax);
- __ push(eax);
- __ push(edi);
- __ push(edx);
- __ push(ebx);
- __ push(esi);
-
- __ CallStub(stub);
-
- __ pop(esi);
- __ pop(ebx);
- __ pop(edx);
- __ pop(edi);
- __ pop(eax);
- __ SmiUntag(eax);
- }
-}
-
-
-static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a feedback vector slot. Cache states
- // are uninitialized, monomorphic (indicated by a JSFunction), and
- // megamorphic.
- // eax : number of arguments to the construct function
- // ebx : feedback vector
- // edx : slot in feedback vector (Smi)
- // edi : the function to call
- Isolate* isolate = masm->isolate();
- Label initialize, done, miss, megamorphic, not_array_function;
-
- // Load the cache state into ecx.
- __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize));
-
- // A monomorphic cache hit or an already megamorphic state: invoke the
- // function without changing the state.
- // We don't know if ecx is a WeakCell or a Symbol, but it's harmless to read
- // at this position in a symbol (see static asserts in feedback-vector.h).
- Label check_allocation_site;
- __ cmp(edi, FieldOperand(ecx, WeakCell::kValueOffset));
- __ j(equal, &done, Label::kFar);
- __ CompareRoot(ecx, Heap::kmegamorphic_symbolRootIndex);
- __ j(equal, &done, Label::kFar);
- __ CompareRoot(FieldOperand(ecx, HeapObject::kMapOffset),
- Heap::kWeakCellMapRootIndex);
- __ j(not_equal, &check_allocation_site);
-
- // If the weak cell is cleared, we have a new chance to become monomorphic.
- __ JumpIfSmi(FieldOperand(ecx, WeakCell::kValueOffset), &initialize);
- __ jmp(&megamorphic);
-
- __ bind(&check_allocation_site);
- // If we came here, we need to see if we are the array function.
- // If we didn't have a matching function, and we didn't find the megamorph
- // sentinel, then we have in the slot either some other function or an
- // AllocationSite.
- __ CompareRoot(FieldOperand(ecx, 0), Heap::kAllocationSiteMapRootIndex);
- __ j(not_equal, &miss);
-
- // Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
- __ cmp(edi, ecx);
- __ j(not_equal, &megamorphic);
- __ jmp(&done, Label::kFar);
-
- __ bind(&miss);
-
- // A monomorphic miss (i.e, here the cache is not uninitialized) goes
- // megamorphic.
- __ CompareRoot(ecx, Heap::kuninitialized_symbolRootIndex);
- __ j(equal, &initialize);
- // MegamorphicSentinel is an immortal immovable object (undefined) so no
- // write-barrier is needed.
- __ bind(&megamorphic);
- __ mov(
- FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize),
- Immediate(FeedbackVector::MegamorphicSentinel(isolate)));
- __ jmp(&done, Label::kFar);
-
- // An uninitialized cache is patched with the function or sentinel to
- // indicate the ElementsKind if function is the Array constructor.
- __ bind(&initialize);
- // Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
- __ cmp(edi, ecx);
- __ j(not_equal, &not_array_function);
-
- // The target function is the Array constructor,
- // Create an AllocationSite if we don't already have it, store it in the
- // slot.
- CreateAllocationSiteStub create_stub(isolate);
- CallStubInRecordCallTarget(masm, &create_stub);
- __ jmp(&done);
-
- __ bind(&not_array_function);
- CreateWeakCellStub weak_cell_stub(isolate);
- CallStubInRecordCallTarget(masm, &weak_cell_stub);
-
- __ bind(&done);
- // Increment the call count for all function calls.
- __ add(FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize + kPointerSize),
- Immediate(Smi::FromInt(1)));
-}
-
-
-void CallConstructStub::Generate(MacroAssembler* masm) {
- // eax : number of arguments
- // ebx : feedback vector
- // edx : slot in feedback vector (Smi, for RecordCallTarget)
- // edi : constructor function
-
- Label non_function;
- // Check that function is not a smi.
- __ JumpIfSmi(edi, &non_function);
- // Check that function is a JSFunction.
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &non_function);
-
- GenerateRecordCallTarget(masm);
-
- Label feedback_register_initialized;
- // Put the AllocationSite from the feedback vector into ebx, or undefined.
- __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize));
- Handle<Map> allocation_site_map = isolate()->factory()->allocation_site_map();
- __ cmp(FieldOperand(ebx, 0), Immediate(allocation_site_map));
- __ j(equal, &feedback_register_initialized);
- __ mov(ebx, isolate()->factory()->undefined_value());
- __ bind(&feedback_register_initialized);
-
- __ AssertUndefinedOrAllocationSite(ebx);
-
- // Pass new target to construct stub.
- __ mov(edx, edi);
-
- // Tail call to the function-specific construct stub (still in the caller
- // context at this point).
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kConstructStubOffset));
- __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
- __ jmp(ecx);
-
- __ bind(&non_function);
- __ mov(edx, edi);
- __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
-}
-
-static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
- Register slot) {
- __ add(FieldOperand(feedback_vector, slot, times_half_pointer_size,
- FixedArray::kHeaderSize + kPointerSize),
- Immediate(Smi::FromInt(1)));
-}
-
-void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
- // eax - number of arguments
- // edi - function
- // edx - slot id
- // ebx - vector
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
- __ cmp(edi, ecx);
- __ j(not_equal, miss);
-
- // Reload ecx.
- __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize));
-
- // Increment the call count for monomorphic function calls.
- IncrementCallCount(masm, ebx, edx);
-
- __ mov(ebx, ecx);
- __ mov(edx, edi);
- ArrayConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-
- // Unreachable.
-}
-
-
-void CallICStub::Generate(MacroAssembler* masm) {
- // edi - number of arguments
- // edi - function
- // edx - slot id
- // ebx - vector
- Isolate* isolate = masm->isolate();
- Label extra_checks_or_miss, call, call_function, call_count_incremented;
-
- // The checks. First, does edi match the recorded monomorphic target?
- __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize));
-
- // We don't know that we have a weak cell. We might have a private symbol
- // or an AllocationSite, but the memory is safe to examine.
- // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
- // FixedArray.
- // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
- // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
- // computed, meaning that it can't appear to be a pointer. If the low bit is
- // 0, then hash is computed, but the 0 bit prevents the field from appearing
- // to be a pointer.
- STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
- STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
- WeakCell::kValueOffset &&
- WeakCell::kValueOffset == Symbol::kHashFieldSlot);
-
- __ cmp(edi, FieldOperand(ecx, WeakCell::kValueOffset));
- __ j(not_equal, &extra_checks_or_miss);
-
- // The compare above could have been a SMI/SMI comparison. Guard against this
- // convincing us that we have a monomorphic JSFunction.
- __ JumpIfSmi(edi, &extra_checks_or_miss);
-
- __ bind(&call_function);
-
- // Increment the call count for monomorphic function calls.
- IncrementCallCount(masm, ebx, edx);
-
- __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
- tail_call_mode()),
- RelocInfo::CODE_TARGET);
-
- __ bind(&extra_checks_or_miss);
- Label uninitialized, miss, not_allocation_site;
-
- __ cmp(ecx, Immediate(FeedbackVector::MegamorphicSentinel(isolate)));
- __ j(equal, &call);
-
- // Check if we have an allocation site.
- __ CompareRoot(FieldOperand(ecx, HeapObject::kMapOffset),
- Heap::kAllocationSiteMapRootIndex);
- __ j(not_equal, &not_allocation_site);
-
- // We have an allocation site.
- HandleArrayCase(masm, &miss);
-
- __ bind(&not_allocation_site);
-
- // The following cases attempt to handle MISS cases without going to the
- // runtime.
- if (FLAG_trace_ic) {
- __ jmp(&miss);
- }
-
- __ cmp(ecx, Immediate(FeedbackVector::UninitializedSentinel(isolate)));
- __ j(equal, &uninitialized);
-
- // We are going megamorphic. If the feedback is a JSFunction, it is fine
- // to handle it here. More complex cases are dealt with in the runtime.
- __ AssertNotSmi(ecx);
- __ CmpObjectType(ecx, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &miss);
- __ mov(
- FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize),
- Immediate(FeedbackVector::MegamorphicSentinel(isolate)));
-
- __ bind(&call);
-
- // Increment the call count for megamorphic function calls.
- IncrementCallCount(masm, ebx, edx);
-
- __ bind(&call_count_incremented);
-
- __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
- RelocInfo::CODE_TARGET);
-
- __ bind(&uninitialized);
-
- // We are going monomorphic, provided we actually have a JSFunction.
- __ JumpIfSmi(edi, &miss);
-
- // Goto miss case if we do not have a function.
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &miss);
-
- // Make sure the function is not the Array() function, which requires special
- // behavior on MISS.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
- __ cmp(edi, ecx);
- __ j(equal, &miss);
-
- // Make sure the function belongs to the same native context.
- __ mov(ecx, FieldOperand(edi, JSFunction::kContextOffset));
- __ mov(ecx, ContextOperand(ecx, Context::NATIVE_CONTEXT_INDEX));
- __ cmp(ecx, NativeContextOperand());
- __ j(not_equal, &miss);
-
- // Store the function. Use a stub since we need a frame for allocation.
- // eax - number of arguments
- // ebx - vector
- // edx - slot
- // edi - function
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- CreateWeakCellStub create_stub(isolate);
- __ SmiTag(eax);
- __ push(eax);
- __ push(ebx);
- __ push(edx);
- __ push(edi);
- __ push(esi);
- __ CallStub(&create_stub);
- __ pop(esi);
- __ pop(edi);
- __ pop(edx);
- __ pop(ebx);
- __ pop(eax);
- __ SmiUntag(eax);
- }
-
- __ jmp(&call_function);
-
- // We are here because tracing is on or we encountered a MISS case we can't
- // handle here.
- __ bind(&miss);
- GenerateMiss(masm);
-
- __ jmp(&call_count_incremented);
-
- // Unreachable
- __ int3();
-}
-
-
-void CallICStub::GenerateMiss(MacroAssembler* masm) {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve the number of arguments.
- __ SmiTag(eax);
- __ push(eax);
-
- // Push the function and feedback info.
- __ push(edi);
- __ push(ebx);
- __ push(edx);
-
- // Call the entry.
- __ CallRuntime(Runtime::kCallIC_Miss);
-
- // Move result to edi and exit the internal frame.
- __ mov(edi, eax);
-
- // Restore number of arguments.
- __ pop(eax);
- __ SmiUntag(eax);
-}
-
-
-bool CEntryStub::NeedsImmovableCode() {
- return false;
-}
-
-
-void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
- CEntryStub::GenerateAheadOfTime(isolate);
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
- StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
- // It is important that the store buffer overflow stubs are generated first.
- CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
- CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
- CreateWeakCellStub::GenerateAheadOfTime(isolate);
- BinaryOpICStub::GenerateAheadOfTime(isolate);
- BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
- StoreFastElementStub::GenerateAheadOfTime(isolate);
-}
-
-
-void CodeStub::GenerateFPStubs(Isolate* isolate) {
- CEntryStub save_doubles(isolate, 1, kSaveFPRegs);
- // Stubs might already be in the snapshot, detect that and don't regenerate,
- // which would lead to code stub initialization state being messed up.
- Code* save_doubles_code;
- if (!save_doubles.FindCodeInCache(&save_doubles_code)) {
- save_doubles_code = *(save_doubles.GetCode());
- }
-}
-
-
-void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
- CEntryStub stub(isolate, 1, kDontSaveFPRegs);
- stub.GetCode();
-}
-
-
-void CEntryStub::Generate(MacroAssembler* masm) {
- // eax: number of arguments including receiver
- // ebx: pointer to C function (C callee-saved)
- // ebp: frame pointer (restored after C call)
- // esp: stack pointer (restored after C call)
- // esi: current context (C callee-saved)
- // edi: JS function of the caller (C callee-saved)
- //
- // If argv_in_register():
- // ecx: pointer to the first argument
-
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
- // Reserve space on the stack for the three arguments passed to the call. If
- // result size is greater than can be returned in registers, also reserve
- // space for the hidden argument for the result location, and space for the
- // result itself.
- int arg_stack_space = result_size() < 3 ? 3 : 4 + result_size();
-
- // Enter the exit frame that transitions from JavaScript to C++.
- if (argv_in_register()) {
- DCHECK(!save_doubles());
- DCHECK(!is_builtin_exit());
- __ EnterApiExitFrame(arg_stack_space);
-
- // Move argc and argv into the correct registers.
- __ mov(esi, ecx);
- __ mov(edi, eax);
- } else {
- __ EnterExitFrame(
- arg_stack_space, save_doubles(),
- is_builtin_exit() ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
- }
-
- // ebx: pointer to C function (C callee-saved)
- // ebp: frame pointer (restored after C call)
- // esp: stack pointer (restored after C call)
- // edi: number of arguments including receiver (C callee-saved)
- // esi: pointer to the first argument (C callee-saved)
-
- // Result returned in eax, or eax+edx if result size is 2.
-
- // Check stack alignment.
- if (FLAG_debug_code) {
- __ CheckStackAlignment();
- }
- // Call C function.
- if (result_size() <= 2) {
- __ mov(Operand(esp, 0 * kPointerSize), edi); // argc.
- __ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
- __ mov(Operand(esp, 2 * kPointerSize),
- Immediate(ExternalReference::isolate_address(isolate())));
- } else {
- DCHECK_EQ(3, result_size());
- // Pass a pointer to the result location as the first argument.
- __ lea(eax, Operand(esp, 4 * kPointerSize));
- __ mov(Operand(esp, 0 * kPointerSize), eax);
- __ mov(Operand(esp, 1 * kPointerSize), edi); // argc.
- __ mov(Operand(esp, 2 * kPointerSize), esi); // argv.
- __ mov(Operand(esp, 3 * kPointerSize),
- Immediate(ExternalReference::isolate_address(isolate())));
- }
- __ call(ebx);
-
- if (result_size() > 2) {
- DCHECK_EQ(3, result_size());
-#ifndef _WIN32
- // Restore the "hidden" argument on the stack which was popped by caller.
- __ sub(esp, Immediate(kPointerSize));
-#endif
- // Read result values stored on stack. Result is stored above the arguments.
- __ mov(kReturnRegister0, Operand(esp, 4 * kPointerSize));
- __ mov(kReturnRegister1, Operand(esp, 5 * kPointerSize));
- __ mov(kReturnRegister2, Operand(esp, 6 * kPointerSize));
- }
- // Result is in eax, edx:eax or edi:edx:eax - do not destroy these registers!
-
- // Check result for exception sentinel.
- Label exception_returned;
- __ cmp(eax, isolate()->factory()->exception());
- __ j(equal, &exception_returned);
-
- // Check that there is no pending exception, otherwise we
- // should have returned the exception sentinel.
- if (FLAG_debug_code) {
- __ push(edx);
- __ mov(edx, Immediate(isolate()->factory()->the_hole_value()));
- Label okay;
- ExternalReference pending_exception_address(
- Isolate::kPendingExceptionAddress, isolate());
- __ cmp(edx, Operand::StaticVariable(pending_exception_address));
- // Cannot use check here as it attempts to generate call into runtime.
- __ j(equal, &okay, Label::kNear);
- __ int3();
- __ bind(&okay);
- __ pop(edx);
- }
-
- // Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(save_doubles(), !argv_in_register());
- __ ret(0);
-
- // Handling of exception.
- __ bind(&exception_returned);
-
- ExternalReference pending_handler_context_address(
- Isolate::kPendingHandlerContextAddress, isolate());
- ExternalReference pending_handler_code_address(
- Isolate::kPendingHandlerCodeAddress, isolate());
- ExternalReference pending_handler_offset_address(
- Isolate::kPendingHandlerOffsetAddress, isolate());
- ExternalReference pending_handler_fp_address(
- Isolate::kPendingHandlerFPAddress, isolate());
- ExternalReference pending_handler_sp_address(
- Isolate::kPendingHandlerSPAddress, isolate());
-
- // Ask the runtime for help to determine the handler. This will set eax to
- // contain the current pending exception, don't clobber it.
- ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
- isolate());
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ PrepareCallCFunction(3, eax);
- __ mov(Operand(esp, 0 * kPointerSize), Immediate(0)); // argc.
- __ mov(Operand(esp, 1 * kPointerSize), Immediate(0)); // argv.
- __ mov(Operand(esp, 2 * kPointerSize),
- Immediate(ExternalReference::isolate_address(isolate())));
- __ CallCFunction(find_handler, 3);
- }
-
- // Retrieve the handler context, SP and FP.
- __ mov(esi, Operand::StaticVariable(pending_handler_context_address));
- __ mov(esp, Operand::StaticVariable(pending_handler_sp_address));
- __ mov(ebp, Operand::StaticVariable(pending_handler_fp_address));
-
- // If the handler is a JS frame, restore the context to the frame. Note that
- // the context will be set to (esi == 0) for non-JS frames.
- Label skip;
- __ test(esi, esi);
- __ j(zero, &skip, Label::kNear);
- __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
- __ bind(&skip);
-
- // Compute the handler entry address and jump to it.
- __ mov(edi, Operand::StaticVariable(pending_handler_code_address));
- __ mov(edx, Operand::StaticVariable(pending_handler_offset_address));
- // Check whether it's a turbofanned exception handler code before jump to it.
- Label not_turbo;
- __ push(eax);
- __ mov(eax, Operand(edi, Code::kKindSpecificFlags1Offset - kHeapObjectTag));
- __ and_(eax, Immediate(1 << Code::kIsTurbofannedBit));
- __ j(zero, &not_turbo);
- __ fninit();
- __ fld1();
- __ bind(&not_turbo);
- __ pop(eax);
- __ lea(edi, FieldOperand(edi, edx, times_1, Code::kHeaderSize));
- __ jmp(edi);
-}
-
-
-void JSEntryStub::Generate(MacroAssembler* masm) {
- Label invoke, handler_entry, exit;
- Label not_outermost_js, not_outermost_js_2;
-
- ProfileEntryHookStub::MaybeCallEntryHook(masm);
-
- // Set up frame.
- __ push(ebp);
- __ mov(ebp, esp);
-
- // Push marker in two places.
- int marker = type();
- __ push(Immediate(Smi::FromInt(marker))); // marker
- ExternalReference context_address(Isolate::kContextAddress, isolate());
- __ push(Operand::StaticVariable(context_address)); // context
- // Save callee-saved registers (C calling conventions).
- __ push(edi);
- __ push(esi);
- __ push(ebx);
-
- // Save copies of the top frame descriptor on the stack.
- ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate());
- __ push(Operand::StaticVariable(c_entry_fp));
-
- // If this is the outermost JS call, set js_entry_sp value.
- ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
- __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
- __ j(not_equal, &not_outermost_js, Label::kNear);
- __ mov(Operand::StaticVariable(js_entry_sp), ebp);
- __ push(Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
- __ jmp(&invoke, Label::kNear);
- __ bind(&not_outermost_js);
- __ push(Immediate(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
-
- // Jump to a faked try block that does the invoke, with a faked catch
- // block that sets the pending exception.
- __ jmp(&invoke);
- __ bind(&handler_entry);
- handler_offset_ = handler_entry.pos();
- // Caught exception: Store result (exception) in the pending exception
- // field in the JSEnv and return a failure sentinel.
- ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
- isolate());
- __ mov(Operand::StaticVariable(pending_exception), eax);
- __ mov(eax, Immediate(isolate()->factory()->exception()));
- __ jmp(&exit);
-
- // Invoke: Link this frame into the handler chain.
- __ bind(&invoke);
- __ PushStackHandler();
-
- // Fake a receiver (NULL).
- __ push(Immediate(0)); // receiver
-
- // Invoke the function by calling through JS entry trampoline builtin and
- // pop the faked function when we return. Notice that we cannot store a
- // reference to the trampoline code directly in this stub, because the
- // builtin stubs may not have been generated yet.
- if (type() == StackFrame::ENTRY_CONSTRUCT) {
- ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
- isolate());
- __ mov(edx, Immediate(construct_entry));
- } else {
- ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
- __ mov(edx, Immediate(entry));
- }
- __ mov(edx, Operand(edx, 0)); // deref address
- __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
- __ call(edx);
-
- // Unlink this frame from the handler chain.
- __ PopStackHandler();
-
- __ bind(&exit);
- // Check if the current stack frame is marked as the outermost JS frame.
- __ pop(ebx);
- __ cmp(ebx, Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
- __ j(not_equal, &not_outermost_js_2);
- __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
- __ bind(&not_outermost_js_2);
-
- // Restore the top frame descriptor from the stack.
- __ pop(Operand::StaticVariable(ExternalReference(
- Isolate::kCEntryFPAddress, isolate())));
-
- // Restore callee-saved registers (C calling conventions).
- __ pop(ebx);
- __ pop(esi);
- __ pop(edi);
- __ add(esp, Immediate(2 * kPointerSize)); // remove markers
-
- // Restore frame pointer and return.
- __ pop(ebp);
- __ ret(0);
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharCodeAtGenerator
-
-void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
- // If the receiver is a smi trigger the non-string case.
- if (check_mode_ == RECEIVER_IS_UNKNOWN) {
- __ JumpIfSmi(object_, receiver_not_string_);
-
- // Fetch the instance type of the receiver into result register.
- __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- // If the receiver is not a string trigger the non-string case.
- __ test(result_, Immediate(kIsNotStringMask));
- __ j(not_zero, receiver_not_string_);
- }
-
- // If the index is non-smi trigger the non-smi case.
- __ JumpIfNotSmi(index_, &index_not_smi_);
- __ bind(&got_smi_index_);
-
- // Check for index out of range.
- __ cmp(index_, FieldOperand(object_, String::kLengthOffset));
- __ j(above_equal, index_out_of_range_);
-
- __ SmiUntag(index_);
-
- Factory* factory = masm->isolate()->factory();
- StringCharLoadGenerator::Generate(
- masm, factory, object_, index_, result_, &call_runtime_);
-
- __ SmiTag(result_);
- __ bind(&exit_);
-}
-
-
-void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm, EmbedMode embed_mode,
- const RuntimeCallHelper& call_helper) {
- __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
-
- // Index is not a smi.
- __ bind(&index_not_smi_);
- // If index is a heap number, try converting it to an integer.
- __ CheckMap(index_,
- masm->isolate()->factory()->heap_number_map(),
- index_not_number_,
- DONT_DO_SMI_CHECK);
- call_helper.BeforeCall(masm);
- if (embed_mode == PART_OF_IC_HANDLER) {
- __ push(LoadWithVectorDescriptor::VectorRegister());
- __ push(LoadDescriptor::SlotRegister());
- }
- __ push(object_);
- __ push(index_); // Consumed by runtime conversion function.
- __ CallRuntime(Runtime::kNumberToSmi);
- if (!index_.is(eax)) {
- // Save the conversion result before the pop instructions below
- // have a chance to overwrite it.
- __ mov(index_, eax);
- }
- __ pop(object_);
- if (embed_mode == PART_OF_IC_HANDLER) {
- __ pop(LoadDescriptor::SlotRegister());
- __ pop(LoadWithVectorDescriptor::VectorRegister());
- }
- // Reload the instance type.
- __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- call_helper.AfterCall(masm);
- // If index is still not a smi, it must be out of range.
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(index_, index_out_of_range_);
- // Otherwise, return to the fast path.
- __ jmp(&got_smi_index_);
-
- // Call runtime. We get here when the receiver is a string and the
- // index is a number, but the code of getting the actual character
- // is too complex (e.g., when the string needs to be flattened).
- __ bind(&call_runtime_);
- call_helper.BeforeCall(masm);
- __ push(object_);
- __ SmiTag(index_);
- __ push(index_);
- __ CallRuntime(Runtime::kStringCharCodeAtRT);
- if (!result_.is(eax)) {
- __ mov(result_, eax);
- }
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
-}
-
-void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2) {
- Register length = scratch1;
-
- // Compare lengths.
- Label strings_not_equal, check_zero_length;
- __ mov(length, FieldOperand(left, String::kLengthOffset));
- __ cmp(length, FieldOperand(right, String::kLengthOffset));
- __ j(equal, &check_zero_length, Label::kNear);
- __ bind(&strings_not_equal);
- __ Move(eax, Immediate(Smi::FromInt(NOT_EQUAL)));
- __ ret(0);
-
- // Check if the length is zero.
- Label compare_chars;
- __ bind(&check_zero_length);
- STATIC_ASSERT(kSmiTag == 0);
- __ test(length, length);
- __ j(not_zero, &compare_chars, Label::kNear);
- __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
-
- // Compare characters.
- __ bind(&compare_chars);
- GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2,
- &strings_not_equal, Label::kNear);
-
- // Characters are equal.
- __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
-}
-
-
-void StringHelper::GenerateCompareFlatOneByteStrings(
- MacroAssembler* masm, Register left, Register right, Register scratch1,
- Register scratch2, Register scratch3) {
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_compare_native(), 1);
-
- // Find minimum length.
- Label left_shorter;
- __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
- __ mov(scratch3, scratch1);
- __ sub(scratch3, FieldOperand(right, String::kLengthOffset));
-
- Register length_delta = scratch3;
-
- __ j(less_equal, &left_shorter, Label::kNear);
- // Right string is shorter. Change scratch1 to be length of right string.
- __ sub(scratch1, length_delta);
- __ bind(&left_shorter);
-
- Register min_length = scratch1;
-
- // If either length is zero, just compare lengths.
- Label compare_lengths;
- __ test(min_length, min_length);
- __ j(zero, &compare_lengths, Label::kNear);
-
- // Compare characters.
- Label result_not_equal;
- GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
- &result_not_equal, Label::kNear);
-
- // Compare lengths - strings up to min-length are equal.
- __ bind(&compare_lengths);
- __ test(length_delta, length_delta);
- Label length_not_equal;
- __ j(not_zero, &length_not_equal, Label::kNear);
-
- // Result is EQUAL.
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
-
- Label result_greater;
- Label result_less;
- __ bind(&length_not_equal);
- __ j(greater, &result_greater, Label::kNear);
- __ jmp(&result_less, Label::kNear);
- __ bind(&result_not_equal);
- __ j(above, &result_greater, Label::kNear);
- __ bind(&result_less);
-
- // Result is LESS.
- __ Move(eax, Immediate(Smi::FromInt(LESS)));
- __ ret(0);
-
- // Result is GREATER.
- __ bind(&result_greater);
- __ Move(eax, Immediate(Smi::FromInt(GREATER)));
- __ ret(0);
-}
-
-
-void StringHelper::GenerateOneByteCharsCompareLoop(
- MacroAssembler* masm, Register left, Register right, Register length,
- Register scratch, Label* chars_not_equal,
- Label::Distance chars_not_equal_near) {
- // Change index to run from -length to -1 by adding length to string
- // start. This means that loop ends when index reaches zero, which
- // doesn't need an additional compare.
- __ SmiUntag(length);
- __ lea(left,
- FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize));
- __ lea(right,
- FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize));
- __ neg(length);
- Register index = length; // index = -length;
-
- // Compare loop.
- Label loop;
- __ bind(&loop);
- __ mov_b(scratch, Operand(left, index, times_1, 0));
- __ cmpb(scratch, Operand(right, index, times_1, 0));
- __ j(not_equal, chars_not_equal, chars_not_equal_near);
- __ inc(index);
- __ j(not_zero, &loop);
-}
-
-
-void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- edx : left
- // -- eax : right
- // -- esp[0] : return address
- // -----------------------------------
-
- // Load ecx with the allocation site. We stick an undefined dummy value here
- // and replace it with the real allocation site later when we instantiate this
- // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
- __ mov(ecx, isolate()->factory()->undefined_value());
-
- // Make sure that we actually patched the allocation site.
- if (FLAG_debug_code) {
- __ test(ecx, Immediate(kSmiTagMask));
- __ Assert(not_equal, kExpectedAllocationSite);
- __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
- isolate()->factory()->allocation_site_map());
- __ Assert(equal, kExpectedAllocationSite);
- }
-
- // Tail call into the stub that handles binary operations with allocation
- // sites.
- BinaryOpWithAllocationSiteStub stub(isolate(), state());
- __ TailCallStub(&stub);
-}
-
-
-void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
- DCHECK_EQ(CompareICState::BOOLEAN, state());
- Label miss;
- Label::Distance const miss_distance =
- masm->emit_debug_code() ? Label::kFar : Label::kNear;
-
- __ JumpIfSmi(edx, &miss, miss_distance);
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ JumpIfSmi(eax, &miss, miss_distance);
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ JumpIfNotRoot(ecx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
- __ JumpIfNotRoot(ebx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
- if (!Token::IsEqualityOp(op())) {
- __ mov(eax, FieldOperand(eax, Oddball::kToNumberOffset));
- __ AssertSmi(eax);
- __ mov(edx, FieldOperand(edx, Oddball::kToNumberOffset));
- __ AssertSmi(edx);
- __ xchg(eax, edx);
- }
- __ sub(eax, edx);
- __ Ret();
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void CompareICStub::GenerateSmis(MacroAssembler* masm) {
- DCHECK(state() == CompareICState::SMI);
- Label miss;
- __ mov(ecx, edx);
- __ or_(ecx, eax);
- __ JumpIfNotSmi(ecx, &miss, Label::kNear);
-
- if (GetCondition() == equal) {
- // For equality we do not care about the sign of the result.
- __ sub(eax, edx);
- } else {
- Label done;
- __ sub(edx, eax);
- __ j(no_overflow, &done, Label::kNear);
- // Correct sign of result in case of overflow.
- __ not_(edx);
- __ bind(&done);
- __ mov(eax, edx);
- }
- __ ret(0);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
- DCHECK(state() == CompareICState::NUMBER);
-
- Label generic_stub, check_left;
- Label unordered, maybe_undefined1, maybe_undefined2;
- Label miss;
-
- if (left() == CompareICState::SMI) {
- __ JumpIfNotSmi(edx, &miss);
- }
- if (right() == CompareICState::SMI) {
- __ JumpIfNotSmi(eax, &miss);
- }
-
- // Inlining the double comparison and falling back to the general compare
- // stub if NaN is involved or SSE2 or CMOV is unsupported.
- __ JumpIfSmi(eax, &check_left, Label::kNear);
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- isolate()->factory()->heap_number_map());
- __ j(not_equal, &maybe_undefined1, Label::kNear);
-
- __ bind(&check_left);
- __ JumpIfSmi(edx, &generic_stub, Label::kNear);
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- isolate()->factory()->heap_number_map());
- __ j(not_equal, &maybe_undefined2, Label::kNear);
-
- __ bind(&unordered);
- __ bind(&generic_stub);
- CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
- CompareICState::GENERIC, CompareICState::GENERIC);
- __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
-
- __ bind(&maybe_undefined1);
- if (Token::IsOrderedRelationalCompareOp(op())) {
- __ cmp(eax, Immediate(isolate()->factory()->undefined_value()));
- __ j(not_equal, &miss);
- __ JumpIfSmi(edx, &unordered);
- __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
- __ j(not_equal, &maybe_undefined2, Label::kNear);
- __ jmp(&unordered);
- }
-
- __ bind(&maybe_undefined2);
- if (Token::IsOrderedRelationalCompareOp(op())) {
- __ cmp(edx, Immediate(isolate()->factory()->undefined_value()));
- __ j(equal, &unordered);
- }
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
- DCHECK(state() == CompareICState::INTERNALIZED_STRING);
- DCHECK(GetCondition() == equal);
-
- // Registers containing left and right operands respectively.
- Register left = edx;
- Register right = eax;
- Register tmp1 = ecx;
- Register tmp2 = ebx;
-
- // Check that both operands are heap objects.
- Label miss;
- __ mov(tmp1, left);
- STATIC_ASSERT(kSmiTag == 0);
- __ and_(tmp1, right);
- __ JumpIfSmi(tmp1, &miss, Label::kNear);
-
- // Check that both operands are internalized strings.
- __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
- __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
- __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
- __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
- __ or_(tmp1, tmp2);
- __ test(tmp1, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
- __ j(not_zero, &miss, Label::kNear);
-
- // Internalized strings are compared by identity.
- Label done;
- __ cmp(left, right);
- // Make sure eax is non-zero. At this point input operands are
- // guaranteed to be non-zero.
- DCHECK(right.is(eax));
- __ j(not_equal, &done, Label::kNear);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
- __ bind(&done);
- __ ret(0);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
- DCHECK(state() == CompareICState::UNIQUE_NAME);
- DCHECK(GetCondition() == equal);
-
- // Registers containing left and right operands respectively.
- Register left = edx;
- Register right = eax;
- Register tmp1 = ecx;
- Register tmp2 = ebx;
-
- // Check that both operands are heap objects.
- Label miss;
- __ mov(tmp1, left);
- STATIC_ASSERT(kSmiTag == 0);
- __ and_(tmp1, right);
- __ JumpIfSmi(tmp1, &miss, Label::kNear);
-
- // Check that both operands are unique names. This leaves the instance
- // types loaded in tmp1 and tmp2.
- __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
- __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
- __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
- __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
-
- __ JumpIfNotUniqueNameInstanceType(tmp1, &miss, Label::kNear);
- __ JumpIfNotUniqueNameInstanceType(tmp2, &miss, Label::kNear);
-
- // Unique names are compared by identity.
- Label done;
- __ cmp(left, right);
- // Make sure eax is non-zero. At this point input operands are
- // guaranteed to be non-zero.
- DCHECK(right.is(eax));
- __ j(not_equal, &done, Label::kNear);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
- __ bind(&done);
- __ ret(0);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void CompareICStub::GenerateStrings(MacroAssembler* masm) {
- DCHECK(state() == CompareICState::STRING);
- Label miss;
-
- bool equality = Token::IsEqualityOp(op());
-
- // Registers containing left and right operands respectively.
- Register left = edx;
- Register right = eax;
- Register tmp1 = ecx;
- Register tmp2 = ebx;
- Register tmp3 = edi;
-
- // Check that both operands are heap objects.
- __ mov(tmp1, left);
- STATIC_ASSERT(kSmiTag == 0);
- __ and_(tmp1, right);
- __ JumpIfSmi(tmp1, &miss);
-
- // Check that both operands are strings. This leaves the instance
- // types loaded in tmp1 and tmp2.
- __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
- __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
- __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
- __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
- __ mov(tmp3, tmp1);
- STATIC_ASSERT(kNotStringTag != 0);
- __ or_(tmp3, tmp2);
- __ test(tmp3, Immediate(kIsNotStringMask));
- __ j(not_zero, &miss);
-
- // Fast check for identical strings.
- Label not_same;
- __ cmp(left, right);
- __ j(not_equal, &not_same, Label::kNear);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
-
- // Handle not identical strings.
- __ bind(&not_same);
-
- // Check that both strings are internalized. If they are, we're done
- // because we already know they are not identical. But in the case of
- // non-equality compare, we still need to determine the order. We
- // also know they are both strings.
- if (equality) {
- Label do_compare;
- STATIC_ASSERT(kInternalizedTag == 0);
- __ or_(tmp1, tmp2);
- __ test(tmp1, Immediate(kIsNotInternalizedMask));
- __ j(not_zero, &do_compare, Label::kNear);
- // Make sure eax is non-zero. At this point input operands are
- // guaranteed to be non-zero.
- DCHECK(right.is(eax));
- __ ret(0);
- __ bind(&do_compare);
- }
-
- // Check that both strings are sequential one-byte.
- Label runtime;
- __ JumpIfNotBothSequentialOneByteStrings(left, right, tmp1, tmp2, &runtime);
-
- // Compare flat one byte strings. Returns when done.
- if (equality) {
- StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1,
- tmp2);
- } else {
- StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
- tmp2, tmp3);
- }
-
- // Handle more complex cases in runtime.
- __ bind(&runtime);
- if (equality) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(left);
- __ Push(right);
- __ CallRuntime(Runtime::kStringEqual);
- }
- __ sub(eax, Immediate(masm->isolate()->factory()->true_value()));
- __ Ret();
- } else {
- __ pop(tmp1); // Return address.
- __ push(left);
- __ push(right);
- __ push(tmp1);
- __ TailCallRuntime(Runtime::kStringCompare);
- }
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
- DCHECK_EQ(CompareICState::RECEIVER, state());
- Label miss;
- __ mov(ecx, edx);
- __ and_(ecx, eax);
- __ JumpIfSmi(ecx, &miss, Label::kNear);
-
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
- __ j(below, &miss, Label::kNear);
- __ CmpObjectType(edx, FIRST_JS_RECEIVER_TYPE, ecx);
- __ j(below, &miss, Label::kNear);
-
- DCHECK_EQ(equal, GetCondition());
- __ sub(eax, edx);
- __ ret(0);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
- Label miss;
- Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
- __ mov(ecx, edx);
- __ and_(ecx, eax);
- __ JumpIfSmi(ecx, &miss, Label::kNear);
-
- __ GetWeakValue(edi, cell);
- __ cmp(edi, FieldOperand(eax, HeapObject::kMapOffset));
- __ j(not_equal, &miss, Label::kNear);
- __ cmp(edi, FieldOperand(edx, HeapObject::kMapOffset));
- __ j(not_equal, &miss, Label::kNear);
-
- if (Token::IsEqualityOp(op())) {
- __ sub(eax, edx);
- __ ret(0);
- } else {
- __ PopReturnAddressTo(ecx);
- __ Push(edx);
- __ Push(eax);
- __ Push(Immediate(Smi::FromInt(NegativeComparisonResult(GetCondition()))));
- __ PushReturnAddressFrom(ecx);
- __ TailCallRuntime(Runtime::kCompare);
- }
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void CompareICStub::GenerateMiss(MacroAssembler* masm) {
- {
- // Call the runtime system in a fresh internal frame.
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edx); // Preserve edx and eax.
- __ push(eax);
- __ push(edx); // And also use them as the arguments.
- __ push(eax);
- __ push(Immediate(Smi::FromInt(op())));
- __ CallRuntime(Runtime::kCompareIC_Miss);
- // Compute the entry point of the rewritten stub.
- __ lea(edi, FieldOperand(eax, Code::kHeaderSize));
- __ pop(eax);
- __ pop(edx);
- }
-
- // Do a tail call to the rewritten stub.
- __ jmp(edi);
-}
-
-
-// Helper function used to check that the dictionary doesn't contain
-// the property. This function may return false negatives, so miss_label
-// must always call a backup property check that is complete.
-// This function is safe to call if the receiver has fast properties.
-// Name must be a unique name and receiver must be a heap object.
-void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register properties,
- Handle<Name> name,
- Register r0) {
- DCHECK(name->IsUniqueName());
-
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the hole value).
- for (int i = 0; i < kInlinedProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- Register index = r0;
- // Capacity is smi 2^n.
- __ mov(index, FieldOperand(properties, kCapacityOffset));
- __ dec(index);
- __ and_(index,
- Immediate(Smi::FromInt(name->Hash() +
- NameDictionary::GetProbeOffset(i))));
-
- // Scale the index by multiplying by the entry size.
- STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- __ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
- Register entity_name = r0;
- // Having undefined at this place means the name is not contained.
- STATIC_ASSERT(kSmiTagSize == 1);
- __ mov(entity_name, Operand(properties, index, times_half_pointer_size,
- kElementsStartOffset - kHeapObjectTag));
- __ cmp(entity_name, masm->isolate()->factory()->undefined_value());
- __ j(equal, done);
-
- // Stop if found the property.
- __ cmp(entity_name, Handle<Name>(name));
- __ j(equal, miss);
-
- Label good;
- // Check for the hole and skip.
- __ cmp(entity_name, masm->isolate()->factory()->the_hole_value());
- __ j(equal, &good, Label::kNear);
-
- // Check if the entry name is not a unique name.
- __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
- __ JumpIfNotUniqueNameInstanceType(
- FieldOperand(entity_name, Map::kInstanceTypeOffset), miss);
- __ bind(&good);
- }
-
- NameDictionaryLookupStub stub(masm->isolate(), properties, r0, r0,
- NEGATIVE_LOOKUP);
- __ push(Immediate(Handle<Object>(name)));
- __ push(Immediate(name->Hash()));
- __ CallStub(&stub);
- __ test(r0, r0);
- __ j(not_zero, miss);
- __ jmp(done);
-}
-
-void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
- // Stack frame on entry:
- // esp[0 * kPointerSize]: return address.
- // esp[1 * kPointerSize]: key's hash.
- // esp[2 * kPointerSize]: key.
- // Registers:
- // dictionary_: NameDictionary to probe.
- // result_: used as scratch.
- // index_: will hold an index of entry if lookup is successful.
- // might alias with result_.
- // Returns:
- // result_ is zero if lookup failed, non zero otherwise.
-
- Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
-
- Register scratch = result();
-
- __ mov(scratch, FieldOperand(dictionary(), kCapacityOffset));
- __ dec(scratch);
- __ SmiUntag(scratch);
- __ push(scratch);
-
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the null value).
- for (int i = kInlinedProbes; i < kTotalProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- __ mov(scratch, Operand(esp, 2 * kPointerSize));
- if (i > 0) {
- __ add(scratch, Immediate(NameDictionary::GetProbeOffset(i)));
- }
- __ and_(scratch, Operand(esp, 0));
-
- // Scale the index by multiplying by the entry size.
- STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- __ lea(index(), Operand(scratch, scratch, times_2, 0)); // index *= 3.
-
- // Having undefined at this place means the name is not contained.
- STATIC_ASSERT(kSmiTagSize == 1);
- __ mov(scratch, Operand(dictionary(), index(), times_pointer_size,
- kElementsStartOffset - kHeapObjectTag));
- __ cmp(scratch, isolate()->factory()->undefined_value());
- __ j(equal, &not_in_dictionary);
-
- // Stop if found the property.
- __ cmp(scratch, Operand(esp, 3 * kPointerSize));
- __ j(equal, &in_dictionary);
-
- if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
- // If we hit a key that is not a unique name during negative
- // lookup we have to bailout as this key might be equal to the
- // key we are looking for.
-
- // Check if the entry name is not a unique name.
- __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
- __ JumpIfNotUniqueNameInstanceType(
- FieldOperand(scratch, Map::kInstanceTypeOffset),
- &maybe_in_dictionary);
- }
- }
-
- __ bind(&maybe_in_dictionary);
- // If we are doing negative lookup then probing failure should be
- // treated as a lookup success. For positive lookup probing failure
- // should be treated as lookup failure.
- if (mode() == POSITIVE_LOOKUP) {
- __ mov(result(), Immediate(0));
- __ Drop(1);
- __ ret(2 * kPointerSize);
- }
-
- __ bind(&in_dictionary);
- __ mov(result(), Immediate(1));
- __ Drop(1);
- __ ret(2 * kPointerSize);
-
- __ bind(&not_in_dictionary);
- __ mov(result(), Immediate(0));
- __ Drop(1);
- __ ret(2 * kPointerSize);
-}
-
-
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
- Isolate* isolate) {
- StoreBufferOverflowStub stub(isolate, kDontSaveFPRegs);
- stub.GetCode();
- StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
- stub2.GetCode();
-}
-
-
-// Takes the input in 3 registers: address_ value_ and object_. A pointer to
-// the value has just been written into the object, now this stub makes sure
-// we keep the GC informed. The word in the object where the value has been
-// written is in the address register.
-void RecordWriteStub::Generate(MacroAssembler* masm) {
- Label skip_to_incremental_noncompacting;
- Label skip_to_incremental_compacting;
-
- // The first two instructions are generated with labels so as to get the
- // offset fixed up correctly by the bind(Label*) call. We patch it back and
- // forth between a compare instructions (a nop in this position) and the
- // real branch when we start and stop incremental heap marking.
- __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
- __ jmp(&skip_to_incremental_compacting, Label::kFar);
-
- if (remembered_set_action() == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
- MacroAssembler::kReturnAtEnd);
- } else {
- __ ret(0);
- }
-
- __ bind(&skip_to_incremental_noncompacting);
- GenerateIncremental(masm, INCREMENTAL);
-
- __ bind(&skip_to_incremental_compacting);
- GenerateIncremental(masm, INCREMENTAL_COMPACTION);
-
- // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
- // Will be checked in IncrementalMarking::ActivateGeneratedStub.
- masm->set_byte_at(0, kTwoByteNopInstruction);
- masm->set_byte_at(2, kFiveByteNopInstruction);
-}
-
-
-void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
- regs_.Save(masm);
-
- if (remembered_set_action() == EMIT_REMEMBERED_SET) {
- Label dont_need_remembered_set;
-
- __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
- __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
- regs_.scratch0(),
- &dont_need_remembered_set);
-
- __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
- &dont_need_remembered_set);
-
- // First notify the incremental marker if necessary, then update the
- // remembered set.
- CheckNeedsToInformIncrementalMarker(
- masm,
- kUpdateRememberedSetOnNoNeedToInformIncrementalMarker,
- mode);
- InformIncrementalMarker(masm);
- regs_.Restore(masm);
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
- MacroAssembler::kReturnAtEnd);
-
- __ bind(&dont_need_remembered_set);
- }
-
- CheckNeedsToInformIncrementalMarker(
- masm,
- kReturnOnNoNeedToInformIncrementalMarker,
- mode);
- InformIncrementalMarker(masm);
- regs_.Restore(masm);
- __ ret(0);
-}
-
-
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
- regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
- int argument_count = 3;
- __ PrepareCallCFunction(argument_count, regs_.scratch0());
- __ mov(Operand(esp, 0 * kPointerSize), regs_.object());
- __ mov(Operand(esp, 1 * kPointerSize), regs_.address()); // Slot.
- __ mov(Operand(esp, 2 * kPointerSize),
- Immediate(ExternalReference::isolate_address(isolate())));
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(isolate()),
- argument_count);
-
- regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
-}
-
-
-void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode) {
- Label object_is_black, need_incremental, need_incremental_pop_object;
-
- // Let's look at the color of the object: If it is not black we don't have
- // to inform the incremental marker.
- __ JumpIfBlack(regs_.object(),
- regs_.scratch0(),
- regs_.scratch1(),
- &object_is_black,
- Label::kNear);
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
- MacroAssembler::kReturnAtEnd);
- } else {
- __ ret(0);
- }
-
- __ bind(&object_is_black);
-
- // Get the value from the slot.
- __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
-
- if (mode == INCREMENTAL_COMPACTION) {
- Label ensure_not_white;
-
- __ CheckPageFlag(regs_.scratch0(), // Contains value.
- regs_.scratch1(), // Scratch.
- MemoryChunk::kEvacuationCandidateMask,
- zero,
- &ensure_not_white,
- Label::kNear);
-
- __ CheckPageFlag(regs_.object(),
- regs_.scratch1(), // Scratch.
- MemoryChunk::kSkipEvacuationSlotsRecordingMask,
- not_zero,
- &ensure_not_white,
- Label::kNear);
-
- __ jmp(&need_incremental);
-
- __ bind(&ensure_not_white);
- }
-
- // We need an extra register for this, so we push the object register
- // temporarily.
- __ push(regs_.object());
- __ JumpIfWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- &need_incremental_pop_object, Label::kNear);
- __ pop(regs_.object());
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
- MacroAssembler::kReturnAtEnd);
- } else {
- __ ret(0);
- }
-
- __ bind(&need_incremental_pop_object);
- __ pop(regs_.object());
-
- __ bind(&need_incremental);
-
- // Fall through when we need to inform the incremental marker.
-}
-
-
-void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
- CEntryStub ces(isolate(), 1, kSaveFPRegs);
- __ call(ces.GetCode(), RelocInfo::CODE_TARGET);
- int parameter_count_offset =
- StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
- __ mov(ebx, MemOperand(ebp, parameter_count_offset));
- masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
- __ pop(ecx);
- int additional_offset =
- function_mode() == JS_FUNCTION_STUB_MODE ? kPointerSize : 0;
- __ lea(esp, MemOperand(esp, ebx, times_pointer_size, additional_offset));
- __ jmp(ecx); // Return to IC Miss stub, continuation still on stack.
-}
-
-void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (masm->isolate()->function_entry_hook() != NULL) {
- ProfileEntryHookStub stub(masm->isolate());
- masm->CallStub(&stub);
- }
-}
-
-void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
- // Save volatile registers.
- const int kNumSavedRegisters = 3;
- __ push(eax);
- __ push(ecx);
- __ push(edx);
-
- // Calculate and push the original stack pointer.
- __ lea(eax, Operand(esp, (kNumSavedRegisters + 1) * kPointerSize));
- __ push(eax);
-
- // Retrieve our return address and use it to calculate the calling
- // function's address.
- __ mov(eax, Operand(esp, (kNumSavedRegisters + 1) * kPointerSize));
- __ sub(eax, Immediate(Assembler::kCallInstructionLength));
- __ push(eax);
-
- // Call the entry hook.
- DCHECK(isolate()->function_entry_hook() != NULL);
- __ call(FUNCTION_ADDR(isolate()->function_entry_hook()),
- RelocInfo::RUNTIME_ENTRY);
- __ add(esp, Immediate(2 * kPointerSize));
-
- // Restore ecx.
- __ pop(edx);
- __ pop(ecx);
- __ pop(eax);
-
- __ ret(0);
-}
-
-template <class T>
-static void CreateArrayDispatch(MacroAssembler* masm,
- AllocationSiteOverrideMode mode) {
- if (mode == DISABLE_ALLOCATION_SITES) {
- T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
- __ TailCallStub(&stub);
- } else if (mode == DONT_OVERRIDE) {
- int last_index =
- GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- Label next;
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ cmp(edx, kind);
- __ j(not_equal, &next);
- T stub(masm->isolate(), kind);
- __ TailCallStub(&stub);
- __ bind(&next);
- }
-
- // If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
- } else {
- UNREACHABLE();
- }
-}
-
-static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
- AllocationSiteOverrideMode mode) {
- // ebx - allocation site (if mode != DISABLE_ALLOCATION_SITES)
- // edx - kind (if mode != DISABLE_ALLOCATION_SITES)
- // eax - number of arguments
- // edi - constructor?
- // esp[0] - return address
- // esp[4] - last argument
- Label normal_sequence;
- if (mode == DONT_OVERRIDE) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
- STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
-
- // is the low bit set? If so, we are holey and that is good.
- __ test_b(edx, Immediate(1));
- __ j(not_zero, &normal_sequence);
- }
-
- // look at the first argument
- __ mov(ecx, Operand(esp, kPointerSize));
- __ test(ecx, ecx);
- __ j(zero, &normal_sequence);
-
- if (mode == DISABLE_ALLOCATION_SITES) {
- ElementsKind initial = GetInitialFastElementsKind();
- ElementsKind holey_initial = GetHoleyElementsKind(initial);
-
- ArraySingleArgumentConstructorStub stub_holey(
- masm->isolate(), holey_initial, DISABLE_ALLOCATION_SITES);
- __ TailCallStub(&stub_holey);
-
- __ bind(&normal_sequence);
- ArraySingleArgumentConstructorStub stub(masm->isolate(), initial,
- DISABLE_ALLOCATION_SITES);
- __ TailCallStub(&stub);
- } else if (mode == DONT_OVERRIDE) {
- // We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry.
- __ inc(edx);
-
- if (FLAG_debug_code) {
- Handle<Map> allocation_site_map =
- masm->isolate()->factory()->allocation_site_map();
- __ cmp(FieldOperand(ebx, 0), Immediate(allocation_site_map));
- __ Assert(equal, kExpectedAllocationSite);
- }
-
- // Save the resulting elements kind in type info. We can't just store r3
- // in the AllocationSite::transition_info field because elements kind is
- // restricted to a portion of the field...upper bits need to be left alone.
- STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ add(FieldOperand(ebx, AllocationSite::kTransitionInfoOffset),
- Immediate(Smi::FromInt(kFastElementsKindPackedToHoley)));
-
- __ bind(&normal_sequence);
- int last_index =
- GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- Label next;
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ cmp(edx, kind);
- __ j(not_equal, &next);
- ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
- __ TailCallStub(&stub);
- __ bind(&next);
- }
-
- // If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
- } else {
- UNREACHABLE();
- }
-}
-
-template <class T>
-static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
- int to_index =
- GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= to_index; ++i) {
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- T stub(isolate, kind);
- stub.GetCode();
- if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
- T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
- stub1.GetCode();
- }
- }
-}
-
-void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
- ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
- isolate);
- ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
- isolate);
- ArrayNArgumentsConstructorStub stub(isolate);
- stub.GetCode();
-
- ElementsKind kinds[2] = {FAST_ELEMENTS, FAST_HOLEY_ELEMENTS};
- for (int i = 0; i < 2; i++) {
- // For internal arrays we only need a few things
- InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
- stubh1.GetCode();
- InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
- stubh2.GetCode();
- }
-}
-
-void ArrayConstructorStub::GenerateDispatchToArrayStub(
- MacroAssembler* masm, AllocationSiteOverrideMode mode) {
- Label not_zero_case, not_one_case;
- __ test(eax, eax);
- __ j(not_zero, &not_zero_case);
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
-
- __ bind(&not_zero_case);
- __ cmp(eax, 1);
- __ j(greater, &not_one_case);
- CreateArrayDispatchOneArgument(masm, mode);
-
- __ bind(&not_one_case);
- ArrayNArgumentsConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-void ArrayConstructorStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argc (only if argument_count() is ANY or MORE_THAN_ONE)
- // -- ebx : AllocationSite or undefined
- // -- edi : constructor
- // -- edx : Original constructor
- // -- esp[0] : return address
- // -- esp[4] : last argument
- // -----------------------------------
- if (FLAG_debug_code) {
- // The array construct code is only set for the global and natives
- // builtin Array functions which always have maps.
-
- // Initial map for the builtin Array function should be a map.
- __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- __ test(ecx, Immediate(kSmiTagMask));
- __ Assert(not_zero, kUnexpectedInitialMapForArrayFunction);
- __ CmpObjectType(ecx, MAP_TYPE, ecx);
- __ Assert(equal, kUnexpectedInitialMapForArrayFunction);
-
- // We should either have undefined in ebx or a valid AllocationSite
- __ AssertUndefinedOrAllocationSite(ebx);
- }
-
- Label subclassing;
-
- // Enter the context of the Array function.
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- __ cmp(edx, edi);
- __ j(not_equal, &subclassing);
-
- Label no_info;
- // If the feedback vector is the undefined value call an array constructor
- // that doesn't use AllocationSites.
- __ cmp(ebx, isolate()->factory()->undefined_value());
- __ j(equal, &no_info);
-
- // Only look at the lower 16 bits of the transition info.
- __ mov(edx, FieldOperand(ebx, AllocationSite::kTransitionInfoOffset));
- __ SmiUntag(edx);
- STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ and_(edx, Immediate(AllocationSite::ElementsKindBits::kMask));
- GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
-
- __ bind(&no_info);
- GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
-
- // Subclassing.
- __ bind(&subclassing);
- __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
- __ add(eax, Immediate(3));
- __ PopReturnAddressTo(ecx);
- __ Push(edx);
- __ Push(ebx);
- __ PushReturnAddressFrom(ecx);
- __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
-}
-
-void InternalArrayConstructorStub::GenerateCase(MacroAssembler* masm,
- ElementsKind kind) {
- Label not_zero_case, not_one_case;
- Label normal_sequence;
-
- __ test(eax, eax);
- __ j(not_zero, &not_zero_case);
- InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
- __ TailCallStub(&stub0);
-
- __ bind(&not_zero_case);
- __ cmp(eax, 1);
- __ j(greater, &not_one_case);
-
- if (IsFastPackedElementsKind(kind)) {
- // We might need to create a holey array
- // look at the first argument
- __ mov(ecx, Operand(esp, kPointerSize));
- __ test(ecx, ecx);
- __ j(zero, &normal_sequence);
-
- InternalArraySingleArgumentConstructorStub stub1_holey(
- isolate(), GetHoleyElementsKind(kind));
- __ TailCallStub(&stub1_holey);
- }
-
- __ bind(&normal_sequence);
- InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
- __ TailCallStub(&stub1);
-
- __ bind(&not_one_case);
- ArrayNArgumentsConstructorStub stubN(isolate());
- __ TailCallStub(&stubN);
-}
-
-void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argc
- // -- edi : constructor
- // -- esp[0] : return address
- // -- esp[4] : last argument
- // -----------------------------------
-
- if (FLAG_debug_code) {
- // The array construct code is only set for the global and natives
- // builtin Array functions which always have maps.
-
- // Initial map for the builtin Array function should be a map.
- __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- __ test(ecx, Immediate(kSmiTagMask));
- __ Assert(not_zero, kUnexpectedInitialMapForArrayFunction);
- __ CmpObjectType(ecx, MAP_TYPE, ecx);
- __ Assert(equal, kUnexpectedInitialMapForArrayFunction);
- }
-
- // Figure out the right elements kind
- __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Load the map's "bit field 2" into |result|. We only need the first byte,
- // but the following masking takes care of that anyway.
- __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ DecodeField<Map::ElementsKindBits>(ecx);
-
- if (FLAG_debug_code) {
- Label done;
- __ cmp(ecx, Immediate(FAST_ELEMENTS));
- __ j(equal, &done);
- __ cmp(ecx, Immediate(FAST_HOLEY_ELEMENTS));
- __ Assert(equal, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
- __ bind(&done);
- }
-
- Label fast_elements_case;
- __ cmp(ecx, Immediate(FAST_ELEMENTS));
- __ j(equal, &fast_elements_case);
- GenerateCase(masm, FAST_HOLEY_ELEMENTS);
-
- __ bind(&fast_elements_case);
- GenerateCase(masm, FAST_ELEMENTS);
-}
-
-void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- edi : function
- // -- esi : context
- // -- ebp : frame pointer
- // -- esp[0] : return address
- // -----------------------------------
- __ AssertFunction(edi);
-
- // Make edx point to the JavaScript frame.
- __ mov(edx, ebp);
- if (skip_stub_frame()) {
- // For Ignition we need to skip the handler/stub frame to reach the
- // JavaScript frame for the function.
- __ mov(edx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
- }
- if (FLAG_debug_code) {
- Label ok;
- __ cmp(edi, Operand(edx, StandardFrameConstants::kFunctionOffset));
- __ j(equal, &ok);
- __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
- __ bind(&ok);
- }
-
- // Check if we have rest parameters (only possible if we have an
- // arguments adaptor frame below the function frame).
- Label no_rest_parameters;
- __ mov(ebx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
- __ cmp(Operand(ebx, CommonFrameConstants::kContextOrFrameTypeOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &no_rest_parameters, Label::kNear);
-
- // Check if the arguments adaptor frame contains more arguments than
- // specified by the function's internal formal parameter count.
- Label rest_parameters;
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ sub(eax,
- FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
- __ j(greater, &rest_parameters);
-
- // Return an empty rest parameter array.
- __ bind(&no_rest_parameters);
- {
- // ----------- S t a t e -------------
- // -- esi : context
- // -- esp[0] : return address
- // -----------------------------------
-
- // Allocate an empty rest parameter array.
- Label allocate, done_allocate;
- __ Allocate(JSArray::kSize, eax, edx, ecx, &allocate, NO_ALLOCATION_FLAGS);
- __ bind(&done_allocate);
-
- // Setup the rest parameter array in rax.
- __ LoadGlobalFunction(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, ecx);
- __ mov(FieldOperand(eax, JSArray::kMapOffset), ecx);
- __ mov(ecx, isolate()->factory()->empty_fixed_array());
- __ mov(FieldOperand(eax, JSArray::kPropertiesOffset), ecx);
- __ mov(FieldOperand(eax, JSArray::kElementsOffset), ecx);
- __ mov(FieldOperand(eax, JSArray::kLengthOffset), Immediate(Smi::kZero));
- STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
- __ Ret();
-
- // Fall back to %AllocateInNewSpace.
- __ bind(&allocate);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(Smi::FromInt(JSArray::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- }
- __ jmp(&done_allocate);
- }
-
- __ bind(&rest_parameters);
- {
- // Compute the pointer to the first rest parameter (skippping the receiver).
- __ lea(ebx,
- Operand(ebx, eax, times_half_pointer_size,
- StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
-
- // ----------- S t a t e -------------
- // -- esi : context
- // -- eax : number of rest parameters (tagged)
- // -- ebx : pointer to first rest parameters
- // -- esp[0] : return address
- // -----------------------------------
-
- // Allocate space for the rest parameter array plus the backing store.
- Label allocate, done_allocate;
- __ lea(ecx, Operand(eax, times_half_pointer_size,
- JSArray::kSize + FixedArray::kHeaderSize));
- __ Allocate(ecx, edx, edi, no_reg, &allocate, NO_ALLOCATION_FLAGS);
- __ bind(&done_allocate);
-
- // Setup the elements array in edx.
- __ mov(FieldOperand(edx, FixedArray::kMapOffset),
- isolate()->factory()->fixed_array_map());
- __ mov(FieldOperand(edx, FixedArray::kLengthOffset), eax);
- {
- Label loop, done_loop;
- __ Move(ecx, Smi::kZero);
- __ bind(&loop);
- __ cmp(ecx, eax);
- __ j(equal, &done_loop, Label::kNear);
- __ mov(edi, Operand(ebx, 0 * kPointerSize));
- __ mov(FieldOperand(edx, ecx, times_half_pointer_size,
- FixedArray::kHeaderSize),
- edi);
- __ sub(ebx, Immediate(1 * kPointerSize));
- __ add(ecx, Immediate(Smi::FromInt(1)));
- __ jmp(&loop);
- __ bind(&done_loop);
- }
-
- // Setup the rest parameter array in edi.
- __ lea(edi,
- Operand(edx, eax, times_half_pointer_size, FixedArray::kHeaderSize));
- __ LoadGlobalFunction(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, ecx);
- __ mov(FieldOperand(edi, JSArray::kMapOffset), ecx);
- __ mov(FieldOperand(edi, JSArray::kPropertiesOffset),
- isolate()->factory()->empty_fixed_array());
- __ mov(FieldOperand(edi, JSArray::kElementsOffset), edx);
- __ mov(FieldOperand(edi, JSArray::kLengthOffset), eax);
- STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
- __ mov(eax, edi);
- __ Ret();
-
- // Fall back to %AllocateInNewSpace (if not too big).
- Label too_big_for_new_space;
- __ bind(&allocate);
- __ cmp(ecx, Immediate(kMaxRegularHeapObjectSize));
- __ j(greater, &too_big_for_new_space);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(ecx);
- __ Push(eax);
- __ Push(ebx);
- __ Push(ecx);
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- __ mov(edx, eax);
- __ Pop(ebx);
- __ Pop(eax);
- }
- __ jmp(&done_allocate);
-
- // Fall back to %NewRestParameter.
- __ bind(&too_big_for_new_space);
- __ PopReturnAddressTo(ecx);
- // We reload the function from the caller frame due to register pressure
- // within this stub. This is the slow path, hence reloading is preferable.
- if (skip_stub_frame()) {
- // For Ignition we need to skip the handler/stub frame to reach the
- // JavaScript frame for the function.
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ Push(Operand(edx, StandardFrameConstants::kFunctionOffset));
- } else {
- __ Push(Operand(ebp, StandardFrameConstants::kFunctionOffset));
- }
- __ PushReturnAddressFrom(ecx);
- __ TailCallRuntime(Runtime::kNewRestParameter);
- }
-}
-
-void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- edi : function
- // -- esi : context
- // -- ebp : frame pointer
- // -- esp[0] : return address
- // -----------------------------------
- __ AssertFunction(edi);
-
- // Make ecx point to the JavaScript frame.
- __ mov(ecx, ebp);
- if (skip_stub_frame()) {
- // For Ignition we need to skip the handler/stub frame to reach the
- // JavaScript frame for the function.
- __ mov(ecx, Operand(ecx, StandardFrameConstants::kCallerFPOffset));
- }
- if (FLAG_debug_code) {
- Label ok;
- __ cmp(edi, Operand(ecx, StandardFrameConstants::kFunctionOffset));
- __ j(equal, &ok);
- __ Abort(kInvalidFrameForFastNewSloppyArgumentsStub);
- __ bind(&ok);
- }
-
- // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
- __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ebx,
- FieldOperand(ebx, SharedFunctionInfo::kFormalParameterCountOffset));
- __ lea(edx, Operand(ecx, ebx, times_half_pointer_size,
- StandardFrameConstants::kCallerSPOffset));
-
- // ebx : number of parameters (tagged)
- // edx : parameters pointer
- // edi : function
- // ecx : JavaScript frame pointer.
- // esp[0] : return address
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ mov(eax, Operand(ecx, StandardFrameConstants::kCallerFPOffset));
- __ mov(eax, Operand(eax, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adaptor_frame, Label::kNear);
-
- // No adaptor, parameter count = argument count.
- __ mov(ecx, ebx);
- __ push(ebx);
- __ jmp(&try_allocate, Label::kNear);
-
- // We have an adaptor frame. Patch the parameters pointer.
- __ bind(&adaptor_frame);
- __ push(ebx);
- __ mov(edx, Operand(ecx, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ lea(edx,
- Operand(edx, ecx, times_2, StandardFrameConstants::kCallerSPOffset));
-
- // ebx = parameter count (tagged)
- // ecx = argument count (smi-tagged)
- // Compute the mapped parameter count = min(ebx, ecx) in ebx.
- __ cmp(ebx, ecx);
- __ j(less_equal, &try_allocate, Label::kNear);
- __ mov(ebx, ecx);
-
- // Save mapped parameter count and function.
- __ bind(&try_allocate);
- __ push(edi);
- __ push(ebx);
-
- // Compute the sizes of backing store, parameter map, and arguments object.
- // 1. Parameter map, has 2 extra words containing context and backing store.
- const int kParameterMapHeaderSize =
- FixedArray::kHeaderSize + 2 * kPointerSize;
- Label no_parameter_map;
- __ test(ebx, ebx);
- __ j(zero, &no_parameter_map, Label::kNear);
- __ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize));
- __ bind(&no_parameter_map);
-
- // 2. Backing store.
- __ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
-
- // 3. Arguments object.
- __ add(ebx, Immediate(JSSloppyArgumentsObject::kSize));
-
- // Do the allocation of all three objects in one go.
- __ Allocate(ebx, eax, edi, no_reg, &runtime, NO_ALLOCATION_FLAGS);
-
- // eax = address of new object(s) (tagged)
- // ecx = argument count (smi-tagged)
- // esp[0] = mapped parameter count (tagged)
- // esp[4] = function
- // esp[8] = parameter count (tagged)
- // Get the arguments map from the current native context into edi.
- Label has_mapped_parameters, instantiate;
- __ mov(edi, NativeContextOperand());
- __ mov(ebx, Operand(esp, 0 * kPointerSize));
- __ test(ebx, ebx);
- __ j(not_zero, &has_mapped_parameters, Label::kNear);
- __ mov(
- edi,
- Operand(edi, Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX)));
- __ jmp(&instantiate, Label::kNear);
-
- __ bind(&has_mapped_parameters);
- __ mov(edi, Operand(edi, Context::SlotOffset(
- Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX)));
- __ bind(&instantiate);
-
- // eax = address of new object (tagged)
- // ebx = mapped parameter count (tagged)
- // ecx = argument count (smi-tagged)
- // edi = address of arguments map (tagged)
- // esp[0] = mapped parameter count (tagged)
- // esp[4] = function
- // esp[8] = parameter count (tagged)
- // Copy the JS object part.
- __ mov(FieldOperand(eax, JSObject::kMapOffset), edi);
- __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
- masm->isolate()->factory()->empty_fixed_array());
- __ mov(FieldOperand(eax, JSObject::kElementsOffset),
- masm->isolate()->factory()->empty_fixed_array());
-
- // Set up the callee in-object property.
- STATIC_ASSERT(JSSloppyArgumentsObject::kCalleeIndex == 1);
- __ mov(edi, Operand(esp, 1 * kPointerSize));
- __ AssertNotSmi(edi);
- __ mov(FieldOperand(eax, JSSloppyArgumentsObject::kCalleeOffset), edi);
-
- // Use the length (smi tagged) and set that as an in-object property too.
- __ AssertSmi(ecx);
- __ mov(FieldOperand(eax, JSSloppyArgumentsObject::kLengthOffset), ecx);
-
- // Set up the elements pointer in the allocated arguments object.
- // If we allocated a parameter map, edi will point there, otherwise to the
- // backing store.
- __ lea(edi, Operand(eax, JSSloppyArgumentsObject::kSize));
- __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
-
- // eax = address of new object (tagged)
- // ebx = mapped parameter count (tagged)
- // ecx = argument count (tagged)
- // edx = address of receiver argument
- // edi = address of parameter map or backing store (tagged)
- // esp[0] = mapped parameter count (tagged)
- // esp[4] = function
- // esp[8] = parameter count (tagged)
- // Free two registers.
- __ push(edx);
- __ push(eax);
-
- // Initialize parameter map. If there are no mapped arguments, we're done.
- Label skip_parameter_map;
- __ test(ebx, ebx);
- __ j(zero, &skip_parameter_map);
-
- __ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(isolate()->factory()->sloppy_arguments_elements_map()));
- __ lea(eax, Operand(ebx, reinterpret_cast<intptr_t>(Smi::FromInt(2))));
- __ mov(FieldOperand(edi, FixedArray::kLengthOffset), eax);
- __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 0 * kPointerSize), esi);
- __ lea(eax, Operand(edi, ebx, times_2, kParameterMapHeaderSize));
- __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 1 * kPointerSize), eax);
-
- // Copy the parameter slots and the holes in the arguments.
- // We need to fill in mapped_parameter_count slots. They index the context,
- // where parameters are stored in reverse order, at
- // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
- // The mapped parameter thus need to get indices
- // MIN_CONTEXT_SLOTS+parameter_count-1 ..
- // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
- // We loop from right to left.
- Label parameters_loop, parameters_test;
- __ push(ecx);
- __ mov(eax, Operand(esp, 3 * kPointerSize));
- __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
- __ add(ebx, Operand(esp, 5 * kPointerSize));
- __ sub(ebx, eax);
- __ mov(ecx, isolate()->factory()->the_hole_value());
- __ mov(edx, edi);
- __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
- // eax = loop variable (tagged)
- // ebx = mapping index (tagged)
- // ecx = the hole value
- // edx = address of parameter map (tagged)
- // edi = address of backing store (tagged)
- // esp[0] = argument count (tagged)
- // esp[4] = address of new object (tagged)
- // esp[8] = address of receiver argument
- // esp[12] = mapped parameter count (tagged)
- // esp[16] = function
- // esp[20] = parameter count (tagged)
- __ jmp(&parameters_test, Label::kNear);
-
- __ bind(&parameters_loop);
- __ sub(eax, Immediate(Smi::FromInt(1)));
- __ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx);
- __ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx);
- __ add(ebx, Immediate(Smi::FromInt(1)));
- __ bind(&parameters_test);
- __ test(eax, eax);
- __ j(not_zero, &parameters_loop, Label::kNear);
- __ pop(ecx);
-
- __ bind(&skip_parameter_map);
-
- // ecx = argument count (tagged)
- // edi = address of backing store (tagged)
- // esp[0] = address of new object (tagged)
- // esp[4] = address of receiver argument
- // esp[8] = mapped parameter count (tagged)
- // esp[12] = function
- // esp[16] = parameter count (tagged)
- // Copy arguments header and remaining slots (if there are any).
- __ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(isolate()->factory()->fixed_array_map()));
- __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
-
- Label arguments_loop, arguments_test;
- __ mov(ebx, Operand(esp, 2 * kPointerSize));
- __ mov(edx, Operand(esp, 1 * kPointerSize));
- __ sub(edx, ebx); // Is there a smarter way to do negative scaling?
- __ sub(edx, ebx);
- __ jmp(&arguments_test, Label::kNear);
-
- __ bind(&arguments_loop);
- __ sub(edx, Immediate(kPointerSize));
- __ mov(eax, Operand(edx, 0));
- __ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax);
- __ add(ebx, Immediate(Smi::FromInt(1)));
-
- __ bind(&arguments_test);
- __ cmp(ebx, ecx);
- __ j(less, &arguments_loop, Label::kNear);
-
- // Restore.
- __ pop(eax); // Address of arguments object.
- __ Drop(4);
-
- // Return.
- __ ret(0);
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ pop(eax); // Remove saved mapped parameter count.
- __ pop(edi); // Pop saved function.
- __ pop(eax); // Remove saved parameter count.
- __ pop(eax); // Pop return address.
- __ push(edi); // Push function.
- __ push(edx); // Push parameters pointer.
- __ push(ecx); // Push parameter count.
- __ push(eax); // Push return address.
- __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- edi : function
- // -- esi : context
- // -- ebp : frame pointer
- // -- esp[0] : return address
- // -----------------------------------
- __ AssertFunction(edi);
-
- // Make edx point to the JavaScript frame.
- __ mov(edx, ebp);
- if (skip_stub_frame()) {
- // For Ignition we need to skip the handler/stub frame to reach the
- // JavaScript frame for the function.
- __ mov(edx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
- }
- if (FLAG_debug_code) {
- Label ok;
- __ cmp(edi, Operand(edx, StandardFrameConstants::kFunctionOffset));
- __ j(equal, &ok);
- __ Abort(kInvalidFrameForFastNewStrictArgumentsStub);
- __ bind(&ok);
- }
-
- // Check if we have an arguments adaptor frame below the function frame.
- Label arguments_adaptor, arguments_done;
- __ mov(ebx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
- __ cmp(Operand(ebx, CommonFrameConstants::kContextOrFrameTypeOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &arguments_adaptor, Label::kNear);
- {
- __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(eax,
- FieldOperand(eax, SharedFunctionInfo::kFormalParameterCountOffset));
- __ lea(ebx,
- Operand(edx, eax, times_half_pointer_size,
- StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
- }
- __ jmp(&arguments_done, Label::kNear);
- __ bind(&arguments_adaptor);
- {
- __ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ lea(ebx,
- Operand(ebx, eax, times_half_pointer_size,
- StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
- }
- __ bind(&arguments_done);
-
- // ----------- S t a t e -------------
- // -- eax : number of arguments (tagged)
- // -- ebx : pointer to the first argument
- // -- esi : context
- // -- esp[0] : return address
- // -----------------------------------
-
- // Allocate space for the strict arguments object plus the backing store.
- Label allocate, done_allocate;
- __ lea(ecx,
- Operand(eax, times_half_pointer_size,
- JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
- __ Allocate(ecx, edx, edi, no_reg, &allocate, NO_ALLOCATION_FLAGS);
- __ bind(&done_allocate);
-
- // Setup the elements array in edx.
- __ mov(FieldOperand(edx, FixedArray::kMapOffset),
- isolate()->factory()->fixed_array_map());
- __ mov(FieldOperand(edx, FixedArray::kLengthOffset), eax);
- {
- Label loop, done_loop;
- __ Move(ecx, Smi::kZero);
- __ bind(&loop);
- __ cmp(ecx, eax);
- __ j(equal, &done_loop, Label::kNear);
- __ mov(edi, Operand(ebx, 0 * kPointerSize));
- __ mov(FieldOperand(edx, ecx, times_half_pointer_size,
- FixedArray::kHeaderSize),
- edi);
- __ sub(ebx, Immediate(1 * kPointerSize));
- __ add(ecx, Immediate(Smi::FromInt(1)));
- __ jmp(&loop);
- __ bind(&done_loop);
- }
-
- // Setup the rest parameter array in edi.
- __ lea(edi,
- Operand(edx, eax, times_half_pointer_size, FixedArray::kHeaderSize));
- __ LoadGlobalFunction(Context::STRICT_ARGUMENTS_MAP_INDEX, ecx);
- __ mov(FieldOperand(edi, JSStrictArgumentsObject::kMapOffset), ecx);
- __ mov(FieldOperand(edi, JSStrictArgumentsObject::kPropertiesOffset),
- isolate()->factory()->empty_fixed_array());
- __ mov(FieldOperand(edi, JSStrictArgumentsObject::kElementsOffset), edx);
- __ mov(FieldOperand(edi, JSStrictArgumentsObject::kLengthOffset), eax);
- STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
- __ mov(eax, edi);
- __ Ret();
-
- // Fall back to %AllocateInNewSpace (if not too big).
- Label too_big_for_new_space;
- __ bind(&allocate);
- __ cmp(ecx, Immediate(kMaxRegularHeapObjectSize));
- __ j(greater, &too_big_for_new_space);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(ecx);
- __ Push(eax);
- __ Push(ebx);
- __ Push(ecx);
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- __ mov(edx, eax);
- __ Pop(ebx);
- __ Pop(eax);
- }
- __ jmp(&done_allocate);
-
- // Fall back to %NewStrictArguments.
- __ bind(&too_big_for_new_space);
- __ PopReturnAddressTo(ecx);
- // We reload the function from the caller frame due to register pressure
- // within this stub. This is the slow path, hence reloading is preferable.
- if (skip_stub_frame()) {
- // For Ignition we need to skip the handler/stub frame to reach the
- // JavaScript frame for the function.
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ Push(Operand(edx, StandardFrameConstants::kFunctionOffset));
- } else {
- __ Push(Operand(ebp, StandardFrameConstants::kFunctionOffset));
- }
- __ PushReturnAddressFrom(ecx);
- __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
-// Generates an Operand for saving parameters after PrepareCallApiFunction.
-static Operand ApiParameterOperand(int index) {
- return Operand(esp, index * kPointerSize);
-}
-
-
-// Prepares stack to put arguments (aligns and so on). Reserves
-// space for return value if needed (assumes the return value is a handle).
-// Arguments must be stored in ApiParameterOperand(0), ApiParameterOperand(1)
-// etc. Saves context (esi). If space was reserved for return value then
-// stores the pointer to the reserved slot into esi.
-static void PrepareCallApiFunction(MacroAssembler* masm, int argc) {
- __ EnterApiExitFrame(argc);
- if (__ emit_debug_code()) {
- __ mov(esi, Immediate(bit_cast<int32_t>(kZapValue)));
- }
-}
-
-
-// Calls an API function. Allocates HandleScope, extracts returned value
-// from handle and propagates exceptions. Clobbers ebx, edi and
-// caller-save registers. Restores context. On return removes
-// stack_space * kPointerSize (GCed).
-static void CallApiFunctionAndReturn(MacroAssembler* masm,
- Register function_address,
- ExternalReference thunk_ref,
- Operand thunk_last_arg, int stack_space,
- Operand* stack_space_operand,
- Operand return_value_operand,
- Operand* context_restore_operand) {
- Isolate* isolate = masm->isolate();
-
- ExternalReference next_address =
- ExternalReference::handle_scope_next_address(isolate);
- ExternalReference limit_address =
- ExternalReference::handle_scope_limit_address(isolate);
- ExternalReference level_address =
- ExternalReference::handle_scope_level_address(isolate);
-
- DCHECK(edx.is(function_address));
- // Allocate HandleScope in callee-save registers.
- __ mov(ebx, Operand::StaticVariable(next_address));
- __ mov(edi, Operand::StaticVariable(limit_address));
- __ add(Operand::StaticVariable(level_address), Immediate(1));
-
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ PrepareCallCFunction(1, eax);
- __ mov(Operand(esp, 0),
- Immediate(ExternalReference::isolate_address(isolate)));
- __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
- 1);
- __ PopSafepointRegisters();
- }
-
-
- Label profiler_disabled;
- Label end_profiler_check;
- __ mov(eax, Immediate(ExternalReference::is_profiling_address(isolate)));
- __ cmpb(Operand(eax, 0), Immediate(0));
- __ j(zero, &profiler_disabled);
-
- // Additional parameter is the address of the actual getter function.
- __ mov(thunk_last_arg, function_address);
- // Call the api function.
- __ mov(eax, Immediate(thunk_ref));
- __ call(eax);
- __ jmp(&end_profiler_check);
-
- __ bind(&profiler_disabled);
- // Call the api function.
- __ call(function_address);
- __ bind(&end_profiler_check);
-
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ PrepareCallCFunction(1, eax);
- __ mov(Operand(esp, 0),
- Immediate(ExternalReference::isolate_address(isolate)));
- __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
- 1);
- __ PopSafepointRegisters();
- }
-
- Label prologue;
- // Load the value from ReturnValue
- __ mov(eax, return_value_operand);
-
- Label promote_scheduled_exception;
- Label delete_allocated_handles;
- Label leave_exit_frame;
-
- __ bind(&prologue);
- // No more valid handles (the result handle was the last one). Restore
- // previous handle scope.
- __ mov(Operand::StaticVariable(next_address), ebx);
- __ sub(Operand::StaticVariable(level_address), Immediate(1));
- __ Assert(above_equal, kInvalidHandleScopeLevel);
- __ cmp(edi, Operand::StaticVariable(limit_address));
- __ j(not_equal, &delete_allocated_handles);
-
- // Leave the API exit frame.
- __ bind(&leave_exit_frame);
- bool restore_context = context_restore_operand != NULL;
- if (restore_context) {
- __ mov(esi, *context_restore_operand);
- }
- if (stack_space_operand != nullptr) {
- __ mov(ebx, *stack_space_operand);
- }
- __ LeaveApiExitFrame(!restore_context);
-
- // Check if the function scheduled an exception.
- ExternalReference scheduled_exception_address =
- ExternalReference::scheduled_exception_address(isolate);
- __ cmp(Operand::StaticVariable(scheduled_exception_address),
- Immediate(isolate->factory()->the_hole_value()));
- __ j(not_equal, &promote_scheduled_exception);
-
-#if DEBUG
- // Check if the function returned a valid JavaScript value.
- Label ok;
- Register return_value = eax;
- Register map = ecx;
-
- __ JumpIfSmi(return_value, &ok, Label::kNear);
- __ mov(map, FieldOperand(return_value, HeapObject::kMapOffset));
-
- __ CmpInstanceType(map, LAST_NAME_TYPE);
- __ j(below_equal, &ok, Label::kNear);
-
- __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
- __ j(above_equal, &ok, Label::kNear);
-
- __ cmp(map, isolate->factory()->heap_number_map());
- __ j(equal, &ok, Label::kNear);
-
- __ cmp(return_value, isolate->factory()->undefined_value());
- __ j(equal, &ok, Label::kNear);
-
- __ cmp(return_value, isolate->factory()->true_value());
- __ j(equal, &ok, Label::kNear);
-
- __ cmp(return_value, isolate->factory()->false_value());
- __ j(equal, &ok, Label::kNear);
-
- __ cmp(return_value, isolate->factory()->null_value());
- __ j(equal, &ok, Label::kNear);
-
- __ Abort(kAPICallReturnedInvalidObject);
-
- __ bind(&ok);
-#endif
-
- if (stack_space_operand != nullptr) {
- DCHECK_EQ(0, stack_space);
- __ pop(ecx);
- __ add(esp, ebx);
- __ jmp(ecx);
- } else {
- __ ret(stack_space * kPointerSize);
- }
-
- // Re-throw by promoting a scheduled exception.
- __ bind(&promote_scheduled_exception);
- __ TailCallRuntime(Runtime::kPromoteScheduledException);
-
- // HandleScope limit has changed. Delete allocated extensions.
- ExternalReference delete_extensions =
- ExternalReference::delete_handle_scope_extensions(isolate);
- __ bind(&delete_allocated_handles);
- __ mov(Operand::StaticVariable(limit_address), edi);
- __ mov(edi, eax);
- __ mov(Operand(esp, 0),
- Immediate(ExternalReference::isolate_address(isolate)));
- __ mov(eax, Immediate(delete_extensions));
- __ call(eax);
- __ mov(eax, edi);
- __ jmp(&leave_exit_frame);
-}
-
-void CallApiCallbackStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- edi : callee
- // -- ebx : call_data
- // -- ecx : holder
- // -- edx : api_function_address
- // -- esi : context
- // --
- // -- esp[0] : return address
- // -- esp[4] : last argument
- // -- ...
- // -- esp[argc * 4] : first argument
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- Register callee = edi;
- Register call_data = ebx;
- Register holder = ecx;
- Register api_function_address = edx;
- Register context = esi;
- Register return_address = eax;
-
- typedef FunctionCallbackArguments FCA;
-
- STATIC_ASSERT(FCA::kContextSaveIndex == 6);
- STATIC_ASSERT(FCA::kCalleeIndex == 5);
- STATIC_ASSERT(FCA::kDataIndex == 4);
- STATIC_ASSERT(FCA::kReturnValueOffset == 3);
- STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
- STATIC_ASSERT(FCA::kIsolateIndex == 1);
- STATIC_ASSERT(FCA::kHolderIndex == 0);
- STATIC_ASSERT(FCA::kNewTargetIndex == 7);
- STATIC_ASSERT(FCA::kArgsLength == 8);
-
- __ pop(return_address);
-
- // new target
- __ PushRoot(Heap::kUndefinedValueRootIndex);
-
- // context save.
- __ push(context);
-
- // callee
- __ push(callee);
-
- // call data
- __ push(call_data);
-
- Register scratch = call_data;
- if (!call_data_undefined()) {
- // return value
- __ push(Immediate(masm->isolate()->factory()->undefined_value()));
- // return value default
- __ push(Immediate(masm->isolate()->factory()->undefined_value()));
- } else {
- // return value
- __ push(scratch);
- // return value default
- __ push(scratch);
- }
- // isolate
- __ push(Immediate(reinterpret_cast<int>(masm->isolate())));
- // holder
- __ push(holder);
-
- __ mov(scratch, esp);
-
- // push return address
- __ push(return_address);
-
- if (!is_lazy()) {
- // load context from callee
- __ mov(context, FieldOperand(callee, JSFunction::kContextOffset));
- }
-
- // API function gets reference to the v8::Arguments. If CPU profiler
- // is enabled wrapper function will be called and we need to pass
- // address of the callback as additional parameter, always allocate
- // space for it.
- const int kApiArgc = 1 + 1;
-
- // Allocate the v8::Arguments structure in the arguments' space since
- // it's not controlled by GC.
- const int kApiStackSpace = 3;
-
- PrepareCallApiFunction(masm, kApiArgc + kApiStackSpace);
-
- // FunctionCallbackInfo::implicit_args_.
- __ mov(ApiParameterOperand(2), scratch);
- __ add(scratch, Immediate((argc() + FCA::kArgsLength - 1) * kPointerSize));
- // FunctionCallbackInfo::values_.
- __ mov(ApiParameterOperand(3), scratch);
- // FunctionCallbackInfo::length_.
- __ Move(ApiParameterOperand(4), Immediate(argc()));
-
- // v8::InvocationCallback's argument.
- __ lea(scratch, ApiParameterOperand(2));
- __ mov(ApiParameterOperand(0), scratch);
-
- ExternalReference thunk_ref =
- ExternalReference::invoke_function_callback(masm->isolate());
-
- Operand context_restore_operand(ebp,
- (2 + FCA::kContextSaveIndex) * kPointerSize);
- // Stores return the first js argument
- int return_value_offset = 0;
- if (is_store()) {
- return_value_offset = 2 + FCA::kArgsLength;
- } else {
- return_value_offset = 2 + FCA::kReturnValueOffset;
- }
- Operand return_value_operand(ebp, return_value_offset * kPointerSize);
- int stack_space = 0;
- Operand length_operand = ApiParameterOperand(4);
- Operand* stack_space_operand = &length_operand;
- stack_space = argc() + FCA::kArgsLength + 1;
- stack_space_operand = nullptr;
- CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
- ApiParameterOperand(1), stack_space,
- stack_space_operand, return_value_operand,
- &context_restore_operand);
-}
-
-
-void CallApiGetterStub::Generate(MacroAssembler* masm) {
- // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
- // name below the exit frame to make GC aware of them.
- STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
- STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
-
- Register receiver = ApiGetterDescriptor::ReceiverRegister();
- Register holder = ApiGetterDescriptor::HolderRegister();
- Register callback = ApiGetterDescriptor::CallbackRegister();
- Register scratch = ebx;
- DCHECK(!AreAliased(receiver, holder, callback, scratch));
-
- __ pop(scratch); // Pop return address to extend the frame.
- __ push(receiver);
- __ push(FieldOperand(callback, AccessorInfo::kDataOffset));
- __ PushRoot(Heap::kUndefinedValueRootIndex); // ReturnValue
- // ReturnValue default value
- __ PushRoot(Heap::kUndefinedValueRootIndex);
- __ push(Immediate(ExternalReference::isolate_address(isolate())));
- __ push(holder);
- __ push(Immediate(Smi::kZero)); // should_throw_on_error -> false
- __ push(FieldOperand(callback, AccessorInfo::kNameOffset));
- __ push(scratch); // Restore return address.
-
- // v8::PropertyCallbackInfo::args_ array and name handle.
- const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
-
- // Allocate v8::PropertyCallbackInfo object, arguments for callback and
- // space for optional callback address parameter (in case CPU profiler is
- // active) in non-GCed stack space.
- const int kApiArgc = 3 + 1;
-
- // Load address of v8::PropertyAccessorInfo::args_ array.
- __ lea(scratch, Operand(esp, 2 * kPointerSize));
-
- PrepareCallApiFunction(masm, kApiArgc);
- // Create v8::PropertyCallbackInfo object on the stack and initialize
- // it's args_ field.
- Operand info_object = ApiParameterOperand(3);
- __ mov(info_object, scratch);
-
- // Name as handle.
- __ sub(scratch, Immediate(kPointerSize));
- __ mov(ApiParameterOperand(0), scratch);
- // Arguments pointer.
- __ lea(scratch, info_object);
- __ mov(ApiParameterOperand(1), scratch);
- // Reserve space for optional callback address parameter.
- Operand thunk_last_arg = ApiParameterOperand(2);
-
- ExternalReference thunk_ref =
- ExternalReference::invoke_accessor_getter_callback(isolate());
-
- __ mov(scratch, FieldOperand(callback, AccessorInfo::kJsGetterOffset));
- Register function_address = edx;
- __ mov(function_address,
- FieldOperand(scratch, Foreign::kForeignAddressOffset));
- // +3 is to skip prolog, return address and name handle.
- Operand return_value_operand(
- ebp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
- CallApiFunctionAndReturn(masm, function_address, thunk_ref, thunk_last_arg,
- kStackUnwindSpace, nullptr, return_value_operand,
- NULL);
-}
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/x87/code-stubs-x87.h b/deps/v8/src/x87/code-stubs-x87.h
deleted file mode 100644
index 9aeae46728..0000000000
--- a/deps/v8/src/x87/code-stubs-x87.h
+++ /dev/null
@@ -1,352 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_X87_CODE_STUBS_X87_H_
-#define V8_X87_CODE_STUBS_X87_H_
-
-namespace v8 {
-namespace internal {
-
-
-void ArrayNativeCode(MacroAssembler* masm,
- bool construct_call,
- Label* call_generic_code);
-
-
-class StringHelper : public AllStatic {
- public:
- // Compares two flat one byte strings and returns result in eax.
- static void GenerateCompareFlatOneByteStrings(MacroAssembler* masm,
- Register left, Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3);
-
- // Compares two flat one byte strings for equality and returns result in eax.
- static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
- Register left, Register right,
- Register scratch1,
- Register scratch2);
-
- private:
- static void GenerateOneByteCharsCompareLoop(
- MacroAssembler* masm, Register left, Register right, Register length,
- Register scratch, Label* chars_not_equal,
- Label::Distance chars_not_equal_near = Label::kFar);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-class NameDictionaryLookupStub: public PlatformCodeStub {
- public:
- enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
-
- NameDictionaryLookupStub(Isolate* isolate, Register dictionary,
- Register result, Register index, LookupMode mode)
- : PlatformCodeStub(isolate) {
- minor_key_ = DictionaryBits::encode(dictionary.code()) |
- ResultBits::encode(result.code()) |
- IndexBits::encode(index.code()) | LookupModeBits::encode(mode);
- }
-
- static void GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register properties,
- Handle<Name> name,
- Register r0);
-
- bool SometimesSetsUpAFrame() override { return false; }
-
- private:
- static const int kInlinedProbes = 4;
- static const int kTotalProbes = 20;
-
- static const int kCapacityOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kCapacityIndex * kPointerSize;
-
- static const int kElementsStartOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
-
- Register dictionary() const {
- return Register::from_code(DictionaryBits::decode(minor_key_));
- }
-
- Register result() const {
- return Register::from_code(ResultBits::decode(minor_key_));
- }
-
- Register index() const {
- return Register::from_code(IndexBits::decode(minor_key_));
- }
-
- LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
-
- class DictionaryBits: public BitField<int, 0, 3> {};
- class ResultBits: public BitField<int, 3, 3> {};
- class IndexBits: public BitField<int, 6, 3> {};
- class LookupModeBits: public BitField<LookupMode, 9, 1> {};
-
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
- DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
-};
-
-
-class RecordWriteStub: public PlatformCodeStub {
- public:
- RecordWriteStub(Isolate* isolate, Register object, Register value,
- Register address, RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode)
- : PlatformCodeStub(isolate),
- regs_(object, // An input reg.
- address, // An input reg.
- value) { // One scratch reg.
- minor_key_ = ObjectBits::encode(object.code()) |
- ValueBits::encode(value.code()) |
- AddressBits::encode(address.code()) |
- RememberedSetActionBits::encode(remembered_set_action) |
- SaveFPRegsModeBits::encode(fp_mode);
- }
-
- RecordWriteStub(uint32_t key, Isolate* isolate)
- : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
-
- enum Mode {
- STORE_BUFFER_ONLY,
- INCREMENTAL,
- INCREMENTAL_COMPACTION
- };
-
- bool SometimesSetsUpAFrame() override { return false; }
-
- static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8.
- static const byte kTwoByteJumpInstruction = 0xeb; // Jmp #imm8.
-
- static const byte kFiveByteNopInstruction = 0x3d; // Cmpl eax, #imm32.
- static const byte kFiveByteJumpInstruction = 0xe9; // Jmp #imm32.
-
- static Mode GetMode(Code* stub) {
- byte first_instruction = stub->instruction_start()[0];
- byte second_instruction = stub->instruction_start()[2];
-
- if (first_instruction == kTwoByteJumpInstruction) {
- return INCREMENTAL;
- }
-
- DCHECK(first_instruction == kTwoByteNopInstruction);
-
- if (second_instruction == kFiveByteJumpInstruction) {
- return INCREMENTAL_COMPACTION;
- }
-
- DCHECK(second_instruction == kFiveByteNopInstruction);
-
- return STORE_BUFFER_ONLY;
- }
-
- static void Patch(Code* stub, Mode mode) {
- switch (mode) {
- case STORE_BUFFER_ONLY:
- DCHECK(GetMode(stub) == INCREMENTAL ||
- GetMode(stub) == INCREMENTAL_COMPACTION);
- stub->instruction_start()[0] = kTwoByteNopInstruction;
- stub->instruction_start()[2] = kFiveByteNopInstruction;
- break;
- case INCREMENTAL:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- stub->instruction_start()[0] = kTwoByteJumpInstruction;
- break;
- case INCREMENTAL_COMPACTION:
- DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
- stub->instruction_start()[0] = kTwoByteNopInstruction;
- stub->instruction_start()[2] = kFiveByteJumpInstruction;
- break;
- }
- DCHECK(GetMode(stub) == mode);
- Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(), 7);
- }
-
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
-
- private:
- // This is a helper class for freeing up 3 scratch registers, where the third
- // is always ecx (needed for shift operations). The input is two registers
- // that must be preserved and one scratch register provided by the caller.
- class RegisterAllocation {
- public:
- RegisterAllocation(Register object,
- Register address,
- Register scratch0)
- : object_orig_(object),
- address_orig_(address),
- scratch0_orig_(scratch0),
- object_(object),
- address_(address),
- scratch0_(scratch0) {
- DCHECK(!AreAliased(scratch0, object, address, no_reg));
- scratch1_ = GetRegThatIsNotEcxOr(object_, address_, scratch0_);
- if (scratch0.is(ecx)) {
- scratch0_ = GetRegThatIsNotEcxOr(object_, address_, scratch1_);
- }
- if (object.is(ecx)) {
- object_ = GetRegThatIsNotEcxOr(address_, scratch0_, scratch1_);
- }
- if (address.is(ecx)) {
- address_ = GetRegThatIsNotEcxOr(object_, scratch0_, scratch1_);
- }
- DCHECK(!AreAliased(scratch0_, object_, address_, ecx));
- }
-
- void Save(MacroAssembler* masm) {
- DCHECK(!address_orig_.is(object_));
- DCHECK(object_.is(object_orig_) || address_.is(address_orig_));
- DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_));
- DCHECK(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
- DCHECK(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
- // We don't have to save scratch0_orig_ because it was given to us as
- // a scratch register. But if we had to switch to a different reg then
- // we should save the new scratch0_.
- if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_);
- if (!ecx.is(scratch0_orig_) &&
- !ecx.is(object_orig_) &&
- !ecx.is(address_orig_)) {
- masm->push(ecx);
- }
- masm->push(scratch1_);
- if (!address_.is(address_orig_)) {
- masm->push(address_);
- masm->mov(address_, address_orig_);
- }
- if (!object_.is(object_orig_)) {
- masm->push(object_);
- masm->mov(object_, object_orig_);
- }
- }
-
- void Restore(MacroAssembler* masm) {
- // These will have been preserved the entire time, so we just need to move
- // them back. Only in one case is the orig_ reg different from the plain
- // one, since only one of them can alias with ecx.
- if (!object_.is(object_orig_)) {
- masm->mov(object_orig_, object_);
- masm->pop(object_);
- }
- if (!address_.is(address_orig_)) {
- masm->mov(address_orig_, address_);
- masm->pop(address_);
- }
- masm->pop(scratch1_);
- if (!ecx.is(scratch0_orig_) &&
- !ecx.is(object_orig_) &&
- !ecx.is(address_orig_)) {
- masm->pop(ecx);
- }
- if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_);
- }
-
- // If we have to call into C then we need to save and restore all caller-
- // saved registers that were not already preserved. The caller saved
- // registers are eax, ecx and edx. The three scratch registers (incl. ecx)
- // will be restored by other means so we don't bother pushing them here.
- void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
- masm->PushCallerSaved(mode, ecx, scratch0_, scratch1_);
- }
-
- inline void RestoreCallerSaveRegisters(MacroAssembler* masm,
- SaveFPRegsMode mode) {
- masm->PopCallerSaved(mode, ecx, scratch0_, scratch1_);
- }
-
- inline Register object() { return object_; }
- inline Register address() { return address_; }
- inline Register scratch0() { return scratch0_; }
- inline Register scratch1() { return scratch1_; }
-
- private:
- Register object_orig_;
- Register address_orig_;
- Register scratch0_orig_;
- Register object_;
- Register address_;
- Register scratch0_;
- Register scratch1_;
- // Third scratch register is always ecx.
-
- Register GetRegThatIsNotEcxOr(Register r1,
- Register r2,
- Register r3) {
- for (int i = 0; i < Register::kNumRegisters; i++) {
- if (RegisterConfiguration::Crankshaft()->IsAllocatableGeneralCode(i)) {
- Register candidate = Register::from_code(i);
- if (candidate.is(ecx)) continue;
- if (candidate.is(r1)) continue;
- if (candidate.is(r2)) continue;
- if (candidate.is(r3)) continue;
- return candidate;
- }
- }
- UNREACHABLE();
- return no_reg;
- }
- friend class RecordWriteStub;
- };
-
- enum OnNoNeedToInformIncrementalMarker {
- kReturnOnNoNeedToInformIncrementalMarker,
- kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
- };
-
- inline Major MajorKey() const final { return RecordWrite; }
-
- void Generate(MacroAssembler* masm) override;
- void GenerateIncremental(MacroAssembler* masm, Mode mode);
- void CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm);
-
- void Activate(Code* code) override {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
- }
-
- Register object() const {
- return Register::from_code(ObjectBits::decode(minor_key_));
- }
-
- Register value() const {
- return Register::from_code(ValueBits::decode(minor_key_));
- }
-
- Register address() const {
- return Register::from_code(AddressBits::decode(minor_key_));
- }
-
- RememberedSetAction remembered_set_action() const {
- return RememberedSetActionBits::decode(minor_key_);
- }
-
- SaveFPRegsMode save_fp_regs_mode() const {
- return SaveFPRegsModeBits::decode(minor_key_);
- }
-
- class ObjectBits: public BitField<int, 0, 3> {};
- class ValueBits: public BitField<int, 3, 3> {};
- class AddressBits: public BitField<int, 6, 3> {};
- class RememberedSetActionBits: public BitField<RememberedSetAction, 9, 1> {};
- class SaveFPRegsModeBits : public BitField<SaveFPRegsMode, 10, 1> {};
-
- RegisterAllocation regs_;
-
- DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_X87_CODE_STUBS_X87_H_
diff --git a/deps/v8/src/x87/codegen-x87.cc b/deps/v8/src/x87/codegen-x87.cc
deleted file mode 100644
index 1a827788ff..0000000000
--- a/deps/v8/src/x87/codegen-x87.cc
+++ /dev/null
@@ -1,381 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/x87/codegen-x87.h"
-
-#if V8_TARGET_ARCH_X87
-
-#include "src/codegen.h"
-#include "src/heap/heap.h"
-#include "src/macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-
-// -------------------------------------------------------------------------
-// Platform-specific RuntimeCallHelper functions.
-
-void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- masm->EnterFrame(StackFrame::INTERNAL);
- DCHECK(!masm->has_frame());
- masm->set_has_frame(true);
-}
-
-
-void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
- masm->LeaveFrame(StackFrame::INTERNAL);
- DCHECK(masm->has_frame());
- masm->set_has_frame(false);
-}
-
-
-#define __ masm.
-
-
-UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
- size_t actual_size;
- // Allocate buffer in executable space.
- byte* buffer =
- static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == nullptr) return nullptr;
-
- MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
- CodeObjectRequired::kNo);
- // Load double input into registers.
- __ fld_d(MemOperand(esp, 4));
- __ X87SetFPUCW(0x027F);
- __ fsqrt();
- __ X87SetFPUCW(0x037F);
- __ Ret();
-
- CodeDesc desc;
- masm.GetCode(&desc);
- DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
-
- Assembler::FlushICache(isolate, buffer, actual_size);
- base::OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
-}
-
-
-// Helper functions for CreateMemMoveFunction.
-#undef __
-#define __ ACCESS_MASM(masm)
-
-enum Direction { FORWARD, BACKWARD };
-enum Alignment { MOVE_ALIGNED, MOVE_UNALIGNED };
-
-
-void MemMoveEmitPopAndReturn(MacroAssembler* masm) {
- __ pop(esi);
- __ pop(edi);
- __ ret(0);
-}
-
-
-#undef __
-#define __ masm.
-
-
-class LabelConverter {
- public:
- explicit LabelConverter(byte* buffer) : buffer_(buffer) {}
- int32_t address(Label* l) const {
- return reinterpret_cast<int32_t>(buffer_) + l->pos();
- }
- private:
- byte* buffer_;
-};
-
-
-MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
- size_t actual_size;
- // Allocate buffer in executable space.
- byte* buffer =
- static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == nullptr) return nullptr;
- MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
- CodeObjectRequired::kNo);
- LabelConverter conv(buffer);
-
- // Generated code is put into a fixed, unmovable buffer, and not into
- // the V8 heap. We can't, and don't, refer to any relocatable addresses
- // (e.g. the JavaScript nan-object).
-
- // 32-bit C declaration function calls pass arguments on stack.
-
- // Stack layout:
- // esp[12]: Third argument, size.
- // esp[8]: Second argument, source pointer.
- // esp[4]: First argument, destination pointer.
- // esp[0]: return address
-
- const int kDestinationOffset = 1 * kPointerSize;
- const int kSourceOffset = 2 * kPointerSize;
- const int kSizeOffset = 3 * kPointerSize;
-
- int stack_offset = 0; // Update if we change the stack height.
-
- Label backward, backward_much_overlap;
- Label forward_much_overlap, small_size, medium_size, pop_and_return;
- __ push(edi);
- __ push(esi);
- stack_offset += 2 * kPointerSize;
- Register dst = edi;
- Register src = esi;
- Register count = ecx;
- __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
- __ mov(src, Operand(esp, stack_offset + kSourceOffset));
- __ mov(count, Operand(esp, stack_offset + kSizeOffset));
-
- __ cmp(dst, src);
- __ j(equal, &pop_and_return);
-
- // No SSE2.
- Label forward;
- __ cmp(count, 0);
- __ j(equal, &pop_and_return);
- __ cmp(dst, src);
- __ j(above, &backward);
- __ jmp(&forward);
- {
- // Simple forward copier.
- Label forward_loop_1byte, forward_loop_4byte;
- __ bind(&forward_loop_4byte);
- __ mov(eax, Operand(src, 0));
- __ sub(count, Immediate(4));
- __ add(src, Immediate(4));
- __ mov(Operand(dst, 0), eax);
- __ add(dst, Immediate(4));
- __ bind(&forward); // Entry point.
- __ cmp(count, 3);
- __ j(above, &forward_loop_4byte);
- __ bind(&forward_loop_1byte);
- __ cmp(count, 0);
- __ j(below_equal, &pop_and_return);
- __ mov_b(eax, Operand(src, 0));
- __ dec(count);
- __ inc(src);
- __ mov_b(Operand(dst, 0), eax);
- __ inc(dst);
- __ jmp(&forward_loop_1byte);
- }
- {
- // Simple backward copier.
- Label backward_loop_1byte, backward_loop_4byte, entry_shortcut;
- __ bind(&backward);
- __ add(src, count);
- __ add(dst, count);
- __ cmp(count, 3);
- __ j(below_equal, &entry_shortcut);
-
- __ bind(&backward_loop_4byte);
- __ sub(src, Immediate(4));
- __ sub(count, Immediate(4));
- __ mov(eax, Operand(src, 0));
- __ sub(dst, Immediate(4));
- __ mov(Operand(dst, 0), eax);
- __ cmp(count, 3);
- __ j(above, &backward_loop_4byte);
- __ bind(&backward_loop_1byte);
- __ cmp(count, 0);
- __ j(below_equal, &pop_and_return);
- __ bind(&entry_shortcut);
- __ dec(src);
- __ dec(count);
- __ mov_b(eax, Operand(src, 0));
- __ dec(dst);
- __ mov_b(Operand(dst, 0), eax);
- __ jmp(&backward_loop_1byte);
- }
-
- __ bind(&pop_and_return);
- MemMoveEmitPopAndReturn(&masm);
-
- CodeDesc desc;
- masm.GetCode(&desc);
- DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
- Assembler::FlushICache(isolate, buffer, actual_size);
- base::OS::ProtectCode(buffer, actual_size);
- // TODO(jkummerow): It would be nice to register this code creation event
- // with the PROFILE / GDBJIT system.
- return FUNCTION_CAST<MemMoveFunction>(buffer);
-}
-
-
-#undef __
-
-// -------------------------------------------------------------------------
-// Code generators
-
-#define __ ACCESS_MASM(masm)
-
-void StringCharLoadGenerator::Generate(MacroAssembler* masm,
- Factory* factory,
- Register string,
- Register index,
- Register result,
- Label* call_runtime) {
- Label indirect_string_loaded;
- __ bind(&indirect_string_loaded);
-
- // Fetch the instance type of the receiver into result register.
- __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
-
- // We need special handling for indirect strings.
- Label check_sequential;
- __ test(result, Immediate(kIsIndirectStringMask));
- __ j(zero, &check_sequential, Label::kNear);
-
- // Dispatch on the indirect string shape: slice or cons.
- Label cons_string, thin_string;
- __ and_(result, Immediate(kStringRepresentationMask));
- __ cmp(result, Immediate(kConsStringTag));
- __ j(equal, &cons_string, Label::kNear);
- __ cmp(result, Immediate(kThinStringTag));
- __ j(equal, &thin_string, Label::kNear);
-
- // Handle slices.
- __ mov(result, FieldOperand(string, SlicedString::kOffsetOffset));
- __ SmiUntag(result);
- __ add(index, result);
- __ mov(string, FieldOperand(string, SlicedString::kParentOffset));
- __ jmp(&indirect_string_loaded);
-
- // Handle thin strings.
- __ bind(&thin_string);
- __ mov(string, FieldOperand(string, ThinString::kActualOffset));
- __ jmp(&indirect_string_loaded);
-
- // Handle cons strings.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ bind(&cons_string);
- __ cmp(FieldOperand(string, ConsString::kSecondOffset),
- Immediate(factory->empty_string()));
- __ j(not_equal, call_runtime);
- __ mov(string, FieldOperand(string, ConsString::kFirstOffset));
- __ jmp(&indirect_string_loaded);
-
- // Distinguish sequential and external strings. Only these two string
- // representations can reach here (slices and flat cons strings have been
- // reduced to the underlying sequential or external string).
- Label seq_string;
- __ bind(&check_sequential);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ test(result, Immediate(kStringRepresentationMask));
- __ j(zero, &seq_string, Label::kNear);
-
- // Handle external strings.
- Label one_byte_external, done;
- if (FLAG_debug_code) {
- // Assert that we do not have a cons or slice (indirect strings) here.
- // Sequential strings have already been ruled out.
- __ test(result, Immediate(kIsIndirectStringMask));
- __ Assert(zero, kExternalStringExpectedButNotFound);
- }
- // Rule out short external strings.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ test_b(result, Immediate(kShortExternalStringMask));
- __ j(not_zero, call_runtime);
- // Check encoding.
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ test_b(result, Immediate(kStringEncodingMask));
- __ mov(result, FieldOperand(string, ExternalString::kResourceDataOffset));
- __ j(not_equal, &one_byte_external, Label::kNear);
- // Two-byte string.
- __ movzx_w(result, Operand(result, index, times_2, 0));
- __ jmp(&done, Label::kNear);
- __ bind(&one_byte_external);
- // One-byte string.
- __ movzx_b(result, Operand(result, index, times_1, 0));
- __ jmp(&done, Label::kNear);
-
- // Dispatch on the encoding: one-byte or two-byte.
- Label one_byte;
- __ bind(&seq_string);
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ test(result, Immediate(kStringEncodingMask));
- __ j(not_zero, &one_byte, Label::kNear);
-
- // Two-byte string.
- // Load the two-byte character code into the result register.
- __ movzx_w(result, FieldOperand(string,
- index,
- times_2,
- SeqTwoByteString::kHeaderSize));
- __ jmp(&done, Label::kNear);
-
- // One-byte string.
- // Load the byte into the result register.
- __ bind(&one_byte);
- __ movzx_b(result, FieldOperand(string,
- index,
- times_1,
- SeqOneByteString::kHeaderSize));
- __ bind(&done);
-}
-
-
-#undef __
-
-
-CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
- USE(isolate);
- DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
- CodePatcher patcher(isolate, young_sequence_.start(),
- young_sequence_.length());
- patcher.masm()->push(ebp);
- patcher.masm()->mov(ebp, esp);
- patcher.masm()->push(esi);
- patcher.masm()->push(edi);
-}
-
-
-#ifdef DEBUG
-bool CodeAgingHelper::IsOld(byte* candidate) const {
- return *candidate == kCallOpcode;
-}
-#endif
-
-
-bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
- bool result = isolate->code_aging_helper()->IsYoung(sequence);
- DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
- return result;
-}
-
-Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
- if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
-
- sequence++; // Skip the kCallOpcode byte
- Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
- Assembler::kCallTargetAddressOffset;
- Code* stub = GetCodeFromTargetAddress(target_address);
- return GetAgeOfCodeAgeStub(stub);
-}
-
-void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
- Code::Age age) {
- uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
- if (age == kNoAgeCodeAge) {
- isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
- Assembler::FlushICache(isolate, sequence, young_length);
- } else {
- Code* stub = GetCodeAgeStub(isolate, age);
- CodePatcher patcher(isolate, sequence, young_length);
- patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
- }
-}
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/x87/codegen-x87.h b/deps/v8/src/x87/codegen-x87.h
deleted file mode 100644
index f034a9c2fa..0000000000
--- a/deps/v8/src/x87/codegen-x87.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_X87_CODEGEN_X87_H_
-#define V8_X87_CODEGEN_X87_H_
-
-#include "src/macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-
-class StringCharLoadGenerator : public AllStatic {
- public:
- // Generates the code for handling different string types and loading the
- // indexed character into |result|. We expect |index| as untagged input and
- // |result| as untagged output.
- static void Generate(MacroAssembler* masm,
- Factory* factory,
- Register string,
- Register index,
- Register result,
- Label* call_runtime);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_X87_CODEGEN_X87_H_
diff --git a/deps/v8/src/x87/cpu-x87.cc b/deps/v8/src/x87/cpu-x87.cc
deleted file mode 100644
index 22906b31be..0000000000
--- a/deps/v8/src/x87/cpu-x87.cc
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// CPU specific code for ia32 independent of OS goes here.
-
-#ifdef __GNUC__
-#include "src/third_party/valgrind/valgrind.h"
-#endif
-
-#if V8_TARGET_ARCH_X87
-
-#include "src/assembler.h"
-#include "src/macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-void CpuFeatures::FlushICache(void* start, size_t size) {
- // No need to flush the instruction cache on Intel. On Intel instruction
- // cache flushing is only necessary when multiple cores running the same
- // code simultaneously. V8 (and JavaScript) is single threaded and when code
- // is patched on an intel CPU the core performing the patching will have its
- // own instruction cache updated automatically.
-
- // If flushing of the instruction cache becomes necessary Windows has the
- // API function FlushInstructionCache.
-
- // By default, valgrind only checks the stack for writes that might need to
- // invalidate already cached translated code. This leads to random
- // instability when code patches or moves are sometimes unnoticed. One
- // solution is to run valgrind with --smc-check=all, but this comes at a big
- // performance cost. We can notify valgrind to invalidate its cache.
-#ifdef VALGRIND_DISCARD_TRANSLATIONS
- unsigned res = VALGRIND_DISCARD_TRANSLATIONS(start, size);
- USE(res);
-#endif
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/x87/deoptimizer-x87.cc b/deps/v8/src/x87/deoptimizer-x87.cc
deleted file mode 100644
index a198284da5..0000000000
--- a/deps/v8/src/x87/deoptimizer-x87.cc
+++ /dev/null
@@ -1,428 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X87
-
-#include "src/codegen.h"
-#include "src/deoptimizer.h"
-#include "src/full-codegen/full-codegen.h"
-#include "src/register-configuration.h"
-#include "src/safepoint-table.h"
-#include "src/x87/frames-x87.h"
-
-namespace v8 {
-namespace internal {
-
-const int Deoptimizer::table_entry_size_ = 10;
-
-
-int Deoptimizer::patch_size() {
- return Assembler::kCallInstructionLength;
-}
-
-
-void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
- Isolate* isolate = code->GetIsolate();
- HandleScope scope(isolate);
-
- // Compute the size of relocation information needed for the code
- // patching in Deoptimizer::PatchCodeForDeoptimization below.
- int min_reloc_size = 0;
- int prev_pc_offset = 0;
- DeoptimizationInputData* deopt_data =
- DeoptimizationInputData::cast(code->deoptimization_data());
- for (int i = 0; i < deopt_data->DeoptCount(); i++) {
- int pc_offset = deopt_data->Pc(i)->value();
- if (pc_offset == -1) continue;
- pc_offset = pc_offset + 1; // We will encode the pc offset after the call.
- DCHECK_GE(pc_offset, prev_pc_offset);
- int pc_delta = pc_offset - prev_pc_offset;
- // We use RUNTIME_ENTRY reloc info which has a size of 2 bytes
- // if encodable with small pc delta encoding and up to 6 bytes
- // otherwise.
- if (pc_delta <= RelocInfo::kMaxSmallPCDelta) {
- min_reloc_size += 2;
- } else {
- min_reloc_size += 6;
- }
- prev_pc_offset = pc_offset;
- }
-
- // If the relocation information is not big enough we create a new
- // relocation info object that is padded with comments to make it
- // big enough for lazy doptimization.
- int reloc_length = code->relocation_info()->length();
- if (min_reloc_size > reloc_length) {
- int comment_reloc_size = RelocInfo::kMinRelocCommentSize;
- // Padding needed.
- int min_padding = min_reloc_size - reloc_length;
- // Number of comments needed to take up at least that much space.
- int additional_comments =
- (min_padding + comment_reloc_size - 1) / comment_reloc_size;
- // Actual padding size.
- int padding = additional_comments * comment_reloc_size;
- // Allocate new relocation info and copy old relocation to the end
- // of the new relocation info array because relocation info is
- // written and read backwards.
- Factory* factory = isolate->factory();
- Handle<ByteArray> new_reloc =
- factory->NewByteArray(reloc_length + padding, TENURED);
- MemCopy(new_reloc->GetDataStartAddress() + padding,
- code->relocation_info()->GetDataStartAddress(), reloc_length);
- // Create a relocation writer to write the comments in the padding
- // space. Use position 0 for everything to ensure short encoding.
- RelocInfoWriter reloc_info_writer(
- new_reloc->GetDataStartAddress() + padding, 0);
- intptr_t comment_string
- = reinterpret_cast<intptr_t>(RelocInfo::kFillerCommentString);
- RelocInfo rinfo(isolate, 0, RelocInfo::COMMENT, comment_string, NULL);
- for (int i = 0; i < additional_comments; ++i) {
-#ifdef DEBUG
- byte* pos_before = reloc_info_writer.pos();
-#endif
- reloc_info_writer.Write(&rinfo);
- DCHECK(RelocInfo::kMinRelocCommentSize ==
- pos_before - reloc_info_writer.pos());
- }
- // Replace relocation information on the code object.
- code->set_relocation_info(*new_reloc);
- }
-}
-
-
-void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
- Address code_start_address = code->instruction_start();
-
- // Fail hard and early if we enter this code object again.
- byte* pointer = code->FindCodeAgeSequence();
- if (pointer != NULL) {
- pointer += kNoCodeAgeSequenceLength;
- } else {
- pointer = code->instruction_start();
- }
- CodePatcher patcher(isolate, pointer, 1);
- patcher.masm()->int3();
-
- DeoptimizationInputData* data =
- DeoptimizationInputData::cast(code->deoptimization_data());
- int osr_offset = data->OsrPcOffset()->value();
- if (osr_offset > 0) {
- CodePatcher osr_patcher(isolate, code_start_address + osr_offset, 1);
- osr_patcher.masm()->int3();
- }
-
- // We will overwrite the code's relocation info in-place. Relocation info
- // is written backward. The relocation info is the payload of a byte
- // array. Later on we will slide this to the start of the byte array and
- // create a filler object in the remaining space.
- ByteArray* reloc_info = code->relocation_info();
- Address reloc_end_address = reloc_info->address() + reloc_info->Size();
- RelocInfoWriter reloc_info_writer(reloc_end_address, code_start_address);
-
- // Since the call is a relative encoding, write new
- // reloc info. We do not need any of the existing reloc info because the
- // existing code will not be used again (we zap it in debug builds).
- //
- // Emit call to lazy deoptimization at all lazy deopt points.
- DeoptimizationInputData* deopt_data =
- DeoptimizationInputData::cast(code->deoptimization_data());
-#ifdef DEBUG
- Address prev_call_address = NULL;
-#endif
- // For each LLazyBailout instruction insert a call to the corresponding
- // deoptimization entry.
- for (int i = 0; i < deopt_data->DeoptCount(); i++) {
- if (deopt_data->Pc(i)->value() == -1) continue;
- // Patch lazy deoptimization entry.
- Address call_address = code_start_address + deopt_data->Pc(i)->value();
- CodePatcher patcher(isolate, call_address, patch_size());
- Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
- patcher.masm()->call(deopt_entry, RelocInfo::NONE32);
- // We use RUNTIME_ENTRY for deoptimization bailouts.
- RelocInfo rinfo(isolate, call_address + 1, // 1 after the call opcode.
- RelocInfo::RUNTIME_ENTRY,
- reinterpret_cast<intptr_t>(deopt_entry), NULL);
- reloc_info_writer.Write(&rinfo);
- DCHECK_GE(reloc_info_writer.pos(),
- reloc_info->address() + ByteArray::kHeaderSize);
- DCHECK(prev_call_address == NULL ||
- call_address >= prev_call_address + patch_size());
- DCHECK(call_address + patch_size() <= code->instruction_end());
-#ifdef DEBUG
- prev_call_address = call_address;
-#endif
- }
-
- // Move the relocation info to the beginning of the byte array.
- const int new_reloc_length = reloc_end_address - reloc_info_writer.pos();
- MemMove(code->relocation_start(), reloc_info_writer.pos(), new_reloc_length);
-
- // Right trim the relocation info to free up remaining space.
- const int delta = reloc_info->length() - new_reloc_length;
- if (delta > 0) {
- isolate->heap()->RightTrimFixedArray(reloc_info, delta);
- }
-}
-
-
-void Deoptimizer::SetPlatformCompiledStubRegisters(
- FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
- intptr_t handler =
- reinterpret_cast<intptr_t>(descriptor->deoptimization_handler());
- int params = descriptor->GetHandlerParameterCount();
- output_frame->SetRegister(eax.code(), params);
- output_frame->SetRegister(ebx.code(), handler);
-}
-
-
-void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
- for (int i = 0; i < X87Register::kMaxNumRegisters; ++i) {
- Float64 double_value = input_->GetDoubleRegister(i);
- output_frame->SetDoubleRegister(i, double_value);
- }
-}
-
-#define __ masm()->
-
-void Deoptimizer::TableEntryGenerator::Generate() {
- GeneratePrologue();
-
- // Save all general purpose registers before messing with them.
- const int kNumberOfRegisters = Register::kNumRegisters;
-
- const int kDoubleRegsSize = kDoubleSize * X87Register::kMaxNumRegisters;
-
- // Reserve space for x87 fp registers.
- __ sub(esp, Immediate(kDoubleRegsSize));
-
- __ pushad();
-
- ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress, isolate());
- __ mov(Operand::StaticVariable(c_entry_fp_address), ebp);
-
- // GP registers are safe to use now.
- // Save used x87 fp registers in correct position of previous reserve space.
- Label loop, done;
- // Get the layout of x87 stack.
- __ sub(esp, Immediate(kPointerSize));
- __ fistp_s(MemOperand(esp, 0));
- __ pop(eax);
- // Preserve stack layout in edi
- __ mov(edi, eax);
- // Get the x87 stack depth, the first 3 bits.
- __ mov(ecx, eax);
- __ and_(ecx, 0x7);
- __ j(zero, &done, Label::kNear);
-
- __ bind(&loop);
- __ shr(eax, 0x3);
- __ mov(ebx, eax);
- __ and_(ebx, 0x7); // Extract the st_x index into ebx.
- // Pop TOS to the correct position. The disp(0x20) is due to pushad.
- // The st_i should be saved to (esp + ebx * kDoubleSize + 0x20).
- __ fstp_d(Operand(esp, ebx, times_8, 0x20));
- __ dec(ecx); // Decrease stack depth.
- __ j(not_zero, &loop, Label::kNear);
- __ bind(&done);
-
- const int kSavedRegistersAreaSize =
- kNumberOfRegisters * kPointerSize + kDoubleRegsSize;
-
- // Get the bailout id from the stack.
- __ mov(ebx, Operand(esp, kSavedRegistersAreaSize));
-
- // Get the address of the location in the code object
- // and compute the fp-to-sp delta in register edx.
- __ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
- __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
-
- __ sub(edx, ebp);
- __ neg(edx);
-
- __ push(edi);
- // Allocate a new deoptimizer object.
- __ PrepareCallCFunction(6, eax);
- __ mov(eax, Immediate(0));
- Label context_check;
- __ mov(edi, Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ JumpIfSmi(edi, &context_check);
- __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ bind(&context_check);
- __ mov(Operand(esp, 0 * kPointerSize), eax); // Function.
- __ mov(Operand(esp, 1 * kPointerSize), Immediate(type())); // Bailout type.
- __ mov(Operand(esp, 2 * kPointerSize), ebx); // Bailout id.
- __ mov(Operand(esp, 3 * kPointerSize), ecx); // Code address or 0.
- __ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta.
- __ mov(Operand(esp, 5 * kPointerSize),
- Immediate(ExternalReference::isolate_address(isolate())));
- {
- AllowExternalCallThatCantCauseGC scope(masm());
- __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
- }
-
- __ pop(edi);
-
- // Preserve deoptimizer object in register eax and get the input
- // frame descriptor pointer.
- __ mov(ebx, Operand(eax, Deoptimizer::input_offset()));
-
- // Fill in the input registers.
- for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ pop(Operand(ebx, offset));
- }
-
- int double_regs_offset = FrameDescription::double_registers_offset();
- const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
- // Fill in the double input registers.
- for (int i = 0; i < X87Register::kMaxNumAllocatableRegisters; ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- int dst_offset = code * kDoubleSize + double_regs_offset;
- int src_offset = code * kDoubleSize;
- __ fld_d(Operand(esp, src_offset));
- __ fstp_d(Operand(ebx, dst_offset));
- }
-
- // Clear FPU all exceptions.
- // TODO(ulan): Find out why the TOP register is not zero here in some cases,
- // and check that the generated code never deoptimizes with unbalanced stack.
- __ fnclex();
-
- // Remove the bailout id, return address and the double registers.
- __ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize));
-
- // Compute a pointer to the unwinding limit in register ecx; that is
- // the first stack slot not part of the input frame.
- __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
- __ add(ecx, esp);
-
- // Unwind the stack down to - but not including - the unwinding
- // limit and copy the contents of the activation frame to the input
- // frame description.
- __ lea(edx, Operand(ebx, FrameDescription::frame_content_offset()));
- Label pop_loop_header;
- __ jmp(&pop_loop_header);
- Label pop_loop;
- __ bind(&pop_loop);
- __ pop(Operand(edx, 0));
- __ add(edx, Immediate(sizeof(uint32_t)));
- __ bind(&pop_loop_header);
- __ cmp(ecx, esp);
- __ j(not_equal, &pop_loop);
-
- // Compute the output frame in the deoptimizer.
- __ push(edi);
- __ push(eax);
- __ PrepareCallCFunction(1, ebx);
- __ mov(Operand(esp, 0 * kPointerSize), eax);
- {
- AllowExternalCallThatCantCauseGC scope(masm());
- __ CallCFunction(
- ExternalReference::compute_output_frames_function(isolate()), 1);
- }
- __ pop(eax);
- __ pop(edi);
- __ mov(esp, Operand(eax, Deoptimizer::caller_frame_top_offset()));
-
- // Replace the current (input) frame with the output frames.
- Label outer_push_loop, inner_push_loop,
- outer_loop_header, inner_loop_header;
- // Outer loop state: eax = current FrameDescription**, edx = one past the
- // last FrameDescription**.
- __ mov(edx, Operand(eax, Deoptimizer::output_count_offset()));
- __ mov(eax, Operand(eax, Deoptimizer::output_offset()));
- __ lea(edx, Operand(eax, edx, times_4, 0));
- __ jmp(&outer_loop_header);
- __ bind(&outer_push_loop);
- // Inner loop state: ebx = current FrameDescription*, ecx = loop index.
- __ mov(ebx, Operand(eax, 0));
- __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
- __ jmp(&inner_loop_header);
- __ bind(&inner_push_loop);
- __ sub(ecx, Immediate(sizeof(uint32_t)));
- __ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset()));
- __ bind(&inner_loop_header);
- __ test(ecx, ecx);
- __ j(not_zero, &inner_push_loop);
- __ add(eax, Immediate(kPointerSize));
- __ bind(&outer_loop_header);
- __ cmp(eax, edx);
- __ j(below, &outer_push_loop);
-
-
- // In case of a failed STUB, we have to restore the x87 stack.
- // x87 stack layout is in edi.
- Label loop2, done2;
- // Get the x87 stack depth, the first 3 bits.
- __ mov(ecx, edi);
- __ and_(ecx, 0x7);
- __ j(zero, &done2, Label::kNear);
-
- __ lea(ecx, Operand(ecx, ecx, times_2, 0));
- __ bind(&loop2);
- __ mov(eax, edi);
- __ shr_cl(eax);
- __ and_(eax, 0x7);
- __ fld_d(Operand(ebx, eax, times_8, double_regs_offset));
- __ sub(ecx, Immediate(0x3));
- __ j(not_zero, &loop2, Label::kNear);
- __ bind(&done2);
-
- // Push state, pc, and continuation from the last output frame.
- __ push(Operand(ebx, FrameDescription::state_offset()));
- __ push(Operand(ebx, FrameDescription::pc_offset()));
- __ push(Operand(ebx, FrameDescription::continuation_offset()));
-
-
- // Push the registers from the last output frame.
- for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ push(Operand(ebx, offset));
- }
-
- // Restore the registers from the stack.
- __ popad();
-
- // Return to the continuation point.
- __ ret(0);
-}
-
-
-void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
- // Create a sequence of deoptimization entries.
- Label done;
- for (int i = 0; i < count(); i++) {
- int start = masm()->pc_offset();
- USE(start);
- __ push_imm32(i);
- __ jmp(&done);
- DCHECK(masm()->pc_offset() - start == table_entry_size_);
- }
- __ bind(&done);
-}
-
-
-void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
- SetFrameSlot(offset, value);
-}
-
-
-void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
- SetFrameSlot(offset, value);
-}
-
-
-void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
- // No embedded constant pool support.
- UNREACHABLE();
-}
-
-
-#undef __
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/x87/disasm-x87.cc b/deps/v8/src/x87/disasm-x87.cc
deleted file mode 100644
index 657dc7be24..0000000000
--- a/deps/v8/src/x87/disasm-x87.cc
+++ /dev/null
@@ -1,1875 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <assert.h>
-#include <stdarg.h>
-#include <stdio.h>
-
-#if V8_TARGET_ARCH_X87
-
-#include "src/base/compiler-specific.h"
-#include "src/disasm.h"
-
-namespace disasm {
-
-enum OperandOrder {
- UNSET_OP_ORDER = 0,
- REG_OPER_OP_ORDER,
- OPER_REG_OP_ORDER
-};
-
-
-//------------------------------------------------------------------
-// Tables
-//------------------------------------------------------------------
-struct ByteMnemonic {
- int b; // -1 terminates, otherwise must be in range (0..255)
- const char* mnem;
- OperandOrder op_order_;
-};
-
-static const ByteMnemonic two_operands_instr[] = {
- {0x01, "add", OPER_REG_OP_ORDER}, {0x03, "add", REG_OPER_OP_ORDER},
- {0x09, "or", OPER_REG_OP_ORDER}, {0x0B, "or", REG_OPER_OP_ORDER},
- {0x13, "adc", REG_OPER_OP_ORDER}, {0x1B, "sbb", REG_OPER_OP_ORDER},
- {0x21, "and", OPER_REG_OP_ORDER}, {0x23, "and", REG_OPER_OP_ORDER},
- {0x29, "sub", OPER_REG_OP_ORDER}, {0x2A, "subb", REG_OPER_OP_ORDER},
- {0x2B, "sub", REG_OPER_OP_ORDER}, {0x31, "xor", OPER_REG_OP_ORDER},
- {0x33, "xor", REG_OPER_OP_ORDER}, {0x38, "cmpb", OPER_REG_OP_ORDER},
- {0x39, "cmp", OPER_REG_OP_ORDER}, {0x3A, "cmpb", REG_OPER_OP_ORDER},
- {0x3B, "cmp", REG_OPER_OP_ORDER}, {0x84, "test_b", REG_OPER_OP_ORDER},
- {0x85, "test", REG_OPER_OP_ORDER}, {0x86, "xchg_b", REG_OPER_OP_ORDER},
- {0x87, "xchg", REG_OPER_OP_ORDER}, {0x8A, "mov_b", REG_OPER_OP_ORDER},
- {0x8B, "mov", REG_OPER_OP_ORDER}, {0x8D, "lea", REG_OPER_OP_ORDER},
- {-1, "", UNSET_OP_ORDER}};
-
-static const ByteMnemonic zero_operands_instr[] = {
- {0xC3, "ret", UNSET_OP_ORDER},
- {0xC9, "leave", UNSET_OP_ORDER},
- {0x90, "nop", UNSET_OP_ORDER},
- {0xF4, "hlt", UNSET_OP_ORDER},
- {0xCC, "int3", UNSET_OP_ORDER},
- {0x60, "pushad", UNSET_OP_ORDER},
- {0x61, "popad", UNSET_OP_ORDER},
- {0x9C, "pushfd", UNSET_OP_ORDER},
- {0x9D, "popfd", UNSET_OP_ORDER},
- {0x9E, "sahf", UNSET_OP_ORDER},
- {0x99, "cdq", UNSET_OP_ORDER},
- {0x9B, "fwait", UNSET_OP_ORDER},
- {0xFC, "cld", UNSET_OP_ORDER},
- {0xAB, "stos", UNSET_OP_ORDER},
- {-1, "", UNSET_OP_ORDER}
-};
-
-
-static const ByteMnemonic call_jump_instr[] = {
- {0xE8, "call", UNSET_OP_ORDER},
- {0xE9, "jmp", UNSET_OP_ORDER},
- {-1, "", UNSET_OP_ORDER}
-};
-
-
-static const ByteMnemonic short_immediate_instr[] = {
- {0x05, "add", UNSET_OP_ORDER},
- {0x0D, "or", UNSET_OP_ORDER},
- {0x15, "adc", UNSET_OP_ORDER},
- {0x25, "and", UNSET_OP_ORDER},
- {0x2D, "sub", UNSET_OP_ORDER},
- {0x35, "xor", UNSET_OP_ORDER},
- {0x3D, "cmp", UNSET_OP_ORDER},
- {-1, "", UNSET_OP_ORDER}
-};
-
-
-// Generally we don't want to generate these because they are subject to partial
-// register stalls. They are included for completeness and because the cmp
-// variant is used by the RecordWrite stub. Because it does not update the
-// register it is not subject to partial register stalls.
-static ByteMnemonic byte_immediate_instr[] = {
- {0x0c, "or", UNSET_OP_ORDER},
- {0x24, "and", UNSET_OP_ORDER},
- {0x34, "xor", UNSET_OP_ORDER},
- {0x3c, "cmp", UNSET_OP_ORDER},
- {-1, "", UNSET_OP_ORDER}
-};
-
-
-static const char* const jump_conditional_mnem[] = {
- /*0*/ "jo", "jno", "jc", "jnc",
- /*4*/ "jz", "jnz", "jna", "ja",
- /*8*/ "js", "jns", "jpe", "jpo",
- /*12*/ "jl", "jnl", "jng", "jg"
-};
-
-
-static const char* const set_conditional_mnem[] = {
- /*0*/ "seto", "setno", "setc", "setnc",
- /*4*/ "setz", "setnz", "setna", "seta",
- /*8*/ "sets", "setns", "setpe", "setpo",
- /*12*/ "setl", "setnl", "setng", "setg"
-};
-
-
-static const char* const conditional_move_mnem[] = {
- /*0*/ "cmovo", "cmovno", "cmovc", "cmovnc",
- /*4*/ "cmovz", "cmovnz", "cmovna", "cmova",
- /*8*/ "cmovs", "cmovns", "cmovpe", "cmovpo",
- /*12*/ "cmovl", "cmovnl", "cmovng", "cmovg"
-};
-
-
-enum InstructionType {
- NO_INSTR,
- ZERO_OPERANDS_INSTR,
- TWO_OPERANDS_INSTR,
- JUMP_CONDITIONAL_SHORT_INSTR,
- REGISTER_INSTR,
- MOVE_REG_INSTR,
- CALL_JUMP_INSTR,
- SHORT_IMMEDIATE_INSTR,
- BYTE_IMMEDIATE_INSTR
-};
-
-
-struct InstructionDesc {
- const char* mnem;
- InstructionType type;
- OperandOrder op_order_;
-};
-
-
-class InstructionTable {
- public:
- InstructionTable();
- const InstructionDesc& Get(byte x) const { return instructions_[x]; }
- static InstructionTable* get_instance() {
- static InstructionTable table;
- return &table;
- }
-
- private:
- InstructionDesc instructions_[256];
- void Clear();
- void Init();
- void CopyTable(const ByteMnemonic bm[], InstructionType type);
- void SetTableRange(InstructionType type,
- byte start,
- byte end,
- const char* mnem);
- void AddJumpConditionalShort();
-};
-
-
-InstructionTable::InstructionTable() {
- Clear();
- Init();
-}
-
-
-void InstructionTable::Clear() {
- for (int i = 0; i < 256; i++) {
- instructions_[i].mnem = "";
- instructions_[i].type = NO_INSTR;
- instructions_[i].op_order_ = UNSET_OP_ORDER;
- }
-}
-
-
-void InstructionTable::Init() {
- CopyTable(two_operands_instr, TWO_OPERANDS_INSTR);
- CopyTable(zero_operands_instr, ZERO_OPERANDS_INSTR);
- CopyTable(call_jump_instr, CALL_JUMP_INSTR);
- CopyTable(short_immediate_instr, SHORT_IMMEDIATE_INSTR);
- CopyTable(byte_immediate_instr, BYTE_IMMEDIATE_INSTR);
- AddJumpConditionalShort();
- SetTableRange(REGISTER_INSTR, 0x40, 0x47, "inc");
- SetTableRange(REGISTER_INSTR, 0x48, 0x4F, "dec");
- SetTableRange(REGISTER_INSTR, 0x50, 0x57, "push");
- SetTableRange(REGISTER_INSTR, 0x58, 0x5F, "pop");
- SetTableRange(REGISTER_INSTR, 0x91, 0x97, "xchg eax,"); // 0x90 is nop.
- SetTableRange(MOVE_REG_INSTR, 0xB8, 0xBF, "mov");
-}
-
-
-void InstructionTable::CopyTable(const ByteMnemonic bm[],
- InstructionType type) {
- for (int i = 0; bm[i].b >= 0; i++) {
- InstructionDesc* id = &instructions_[bm[i].b];
- id->mnem = bm[i].mnem;
- id->op_order_ = bm[i].op_order_;
- DCHECK_EQ(NO_INSTR, id->type); // Information not already entered.
- id->type = type;
- }
-}
-
-
-void InstructionTable::SetTableRange(InstructionType type,
- byte start,
- byte end,
- const char* mnem) {
- for (byte b = start; b <= end; b++) {
- InstructionDesc* id = &instructions_[b];
- DCHECK_EQ(NO_INSTR, id->type); // Information not already entered.
- id->mnem = mnem;
- id->type = type;
- }
-}
-
-
-void InstructionTable::AddJumpConditionalShort() {
- for (byte b = 0x70; b <= 0x7F; b++) {
- InstructionDesc* id = &instructions_[b];
- DCHECK_EQ(NO_INSTR, id->type); // Information not already entered.
- id->mnem = jump_conditional_mnem[b & 0x0F];
- id->type = JUMP_CONDITIONAL_SHORT_INSTR;
- }
-}
-
-
-// The X87 disassembler implementation.
-class DisassemblerX87 {
- public:
- DisassemblerX87(const NameConverter& converter,
- bool abort_on_unimplemented = true)
- : converter_(converter),
- instruction_table_(InstructionTable::get_instance()),
- tmp_buffer_pos_(0),
- abort_on_unimplemented_(abort_on_unimplemented) {
- tmp_buffer_[0] = '\0';
- }
-
- virtual ~DisassemblerX87() {}
-
- // Writes one disassembled instruction into 'buffer' (0-terminated).
- // Returns the length of the disassembled machine instruction in bytes.
- int InstructionDecode(v8::internal::Vector<char> buffer, byte* instruction);
-
- private:
- const NameConverter& converter_;
- InstructionTable* instruction_table_;
- v8::internal::EmbeddedVector<char, 128> tmp_buffer_;
- unsigned int tmp_buffer_pos_;
- bool abort_on_unimplemented_;
-
- enum {
- eax = 0,
- ecx = 1,
- edx = 2,
- ebx = 3,
- esp = 4,
- ebp = 5,
- esi = 6,
- edi = 7
- };
-
-
- enum ShiftOpcodeExtension {
- kROL = 0,
- kROR = 1,
- kRCL = 2,
- kRCR = 3,
- kSHL = 4,
- KSHR = 5,
- kSAR = 7
- };
-
-
- const char* NameOfCPURegister(int reg) const {
- return converter_.NameOfCPURegister(reg);
- }
-
-
- const char* NameOfByteCPURegister(int reg) const {
- return converter_.NameOfByteCPURegister(reg);
- }
-
-
- const char* NameOfXMMRegister(int reg) const {
- return converter_.NameOfXMMRegister(reg);
- }
-
-
- const char* NameOfAddress(byte* addr) const {
- return converter_.NameOfAddress(addr);
- }
-
-
- // Disassembler helper functions.
- static void get_modrm(byte data, int* mod, int* regop, int* rm) {
- *mod = (data >> 6) & 3;
- *regop = (data & 0x38) >> 3;
- *rm = data & 7;
- }
-
-
- static void get_sib(byte data, int* scale, int* index, int* base) {
- *scale = (data >> 6) & 3;
- *index = (data >> 3) & 7;
- *base = data & 7;
- }
-
- typedef const char* (DisassemblerX87::*RegisterNameMapping)(int reg) const;
-
- int PrintRightOperandHelper(byte* modrmp, RegisterNameMapping register_name);
- int PrintRightOperand(byte* modrmp);
- int PrintRightByteOperand(byte* modrmp);
- int PrintRightXMMOperand(byte* modrmp);
- int PrintOperands(const char* mnem, OperandOrder op_order, byte* data);
- int PrintImmediateOp(byte* data);
- int F7Instruction(byte* data);
- int D1D3C1Instruction(byte* data);
- int JumpShort(byte* data);
- int JumpConditional(byte* data, const char* comment);
- int JumpConditionalShort(byte* data, const char* comment);
- int SetCC(byte* data);
- int CMov(byte* data);
- int FPUInstruction(byte* data);
- int MemoryFPUInstruction(int escape_opcode, int regop, byte* modrm_start);
- int RegisterFPUInstruction(int escape_opcode, byte modrm_byte);
- PRINTF_FORMAT(2, 3) void AppendToBuffer(const char* format, ...);
-
- void UnimplementedInstruction() {
- if (abort_on_unimplemented_) {
- UNIMPLEMENTED();
- } else {
- AppendToBuffer("'Unimplemented Instruction'");
- }
- }
-};
-
-
-void DisassemblerX87::AppendToBuffer(const char* format, ...) {
- v8::internal::Vector<char> buf = tmp_buffer_ + tmp_buffer_pos_;
- va_list args;
- va_start(args, format);
- int result = v8::internal::VSNPrintF(buf, format, args);
- va_end(args);
- tmp_buffer_pos_ += result;
-}
-
-int DisassemblerX87::PrintRightOperandHelper(
- byte* modrmp,
- RegisterNameMapping direct_register_name) {
- int mod, regop, rm;
- get_modrm(*modrmp, &mod, &regop, &rm);
- RegisterNameMapping register_name = (mod == 3) ? direct_register_name :
- &DisassemblerX87::NameOfCPURegister;
- switch (mod) {
- case 0:
- if (rm == ebp) {
- int32_t disp = *reinterpret_cast<int32_t*>(modrmp+1);
- AppendToBuffer("[0x%x]", disp);
- return 5;
- } else if (rm == esp) {
- byte sib = *(modrmp + 1);
- int scale, index, base;
- get_sib(sib, &scale, &index, &base);
- if (index == esp && base == esp && scale == 0 /*times_1*/) {
- AppendToBuffer("[%s]", (this->*register_name)(rm));
- return 2;
- } else if (base == ebp) {
- int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 2);
- AppendToBuffer("[%s*%d%s0x%x]",
- (this->*register_name)(index),
- 1 << scale,
- disp < 0 ? "-" : "+",
- disp < 0 ? -disp : disp);
- return 6;
- } else if (index != esp && base != ebp) {
- // [base+index*scale]
- AppendToBuffer("[%s+%s*%d]",
- (this->*register_name)(base),
- (this->*register_name)(index),
- 1 << scale);
- return 2;
- } else {
- UnimplementedInstruction();
- return 1;
- }
- } else {
- AppendToBuffer("[%s]", (this->*register_name)(rm));
- return 1;
- }
- break;
- case 1: // fall through
- case 2:
- if (rm == esp) {
- byte sib = *(modrmp + 1);
- int scale, index, base;
- get_sib(sib, &scale, &index, &base);
- int disp = mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 2)
- : *reinterpret_cast<int8_t*>(modrmp + 2);
- if (index == base && index == rm /*esp*/ && scale == 0 /*times_1*/) {
- AppendToBuffer("[%s%s0x%x]",
- (this->*register_name)(rm),
- disp < 0 ? "-" : "+",
- disp < 0 ? -disp : disp);
- } else {
- AppendToBuffer("[%s+%s*%d%s0x%x]",
- (this->*register_name)(base),
- (this->*register_name)(index),
- 1 << scale,
- disp < 0 ? "-" : "+",
- disp < 0 ? -disp : disp);
- }
- return mod == 2 ? 6 : 3;
- } else {
- // No sib.
- int disp = mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 1)
- : *reinterpret_cast<int8_t*>(modrmp + 1);
- AppendToBuffer("[%s%s0x%x]",
- (this->*register_name)(rm),
- disp < 0 ? "-" : "+",
- disp < 0 ? -disp : disp);
- return mod == 2 ? 5 : 2;
- }
- break;
- case 3:
- AppendToBuffer("%s", (this->*register_name)(rm));
- return 1;
- default:
- UnimplementedInstruction();
- return 1;
- }
- UNREACHABLE();
-}
-
-
-int DisassemblerX87::PrintRightOperand(byte* modrmp) {
- return PrintRightOperandHelper(modrmp, &DisassemblerX87::NameOfCPURegister);
-}
-
-
-int DisassemblerX87::PrintRightByteOperand(byte* modrmp) {
- return PrintRightOperandHelper(modrmp,
- &DisassemblerX87::NameOfByteCPURegister);
-}
-
-
-int DisassemblerX87::PrintRightXMMOperand(byte* modrmp) {
- return PrintRightOperandHelper(modrmp,
- &DisassemblerX87::NameOfXMMRegister);
-}
-
-
-// Returns number of bytes used including the current *data.
-// Writes instruction's mnemonic, left and right operands to 'tmp_buffer_'.
-int DisassemblerX87::PrintOperands(const char* mnem,
- OperandOrder op_order,
- byte* data) {
- byte modrm = *data;
- int mod, regop, rm;
- get_modrm(modrm, &mod, &regop, &rm);
- int advance = 0;
- switch (op_order) {
- case REG_OPER_OP_ORDER: {
- AppendToBuffer("%s %s,", mnem, NameOfCPURegister(regop));
- advance = PrintRightOperand(data);
- break;
- }
- case OPER_REG_OP_ORDER: {
- AppendToBuffer("%s ", mnem);
- advance = PrintRightOperand(data);
- AppendToBuffer(",%s", NameOfCPURegister(regop));
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- return advance;
-}
-
-
-// Returns number of bytes used by machine instruction, including *data byte.
-// Writes immediate instructions to 'tmp_buffer_'.
-int DisassemblerX87::PrintImmediateOp(byte* data) {
- bool sign_extension_bit = (*data & 0x02) != 0;
- byte modrm = *(data+1);
- int mod, regop, rm;
- get_modrm(modrm, &mod, &regop, &rm);
- const char* mnem = "Imm???";
- switch (regop) {
- case 0: mnem = "add"; break;
- case 1: mnem = "or"; break;
- case 2: mnem = "adc"; break;
- case 4: mnem = "and"; break;
- case 5: mnem = "sub"; break;
- case 6: mnem = "xor"; break;
- case 7: mnem = "cmp"; break;
- default: UnimplementedInstruction();
- }
- AppendToBuffer("%s ", mnem);
- int count = PrintRightOperand(data+1);
- if (sign_extension_bit) {
- AppendToBuffer(",0x%x", *(data + 1 + count));
- return 1 + count + 1 /*int8*/;
- } else {
- AppendToBuffer(",0x%x", *reinterpret_cast<int32_t*>(data + 1 + count));
- return 1 + count + 4 /*int32_t*/;
- }
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerX87::F7Instruction(byte* data) {
- DCHECK_EQ(0xF7, *data);
- byte modrm = *++data;
- int mod, regop, rm;
- get_modrm(modrm, &mod, &regop, &rm);
- const char* mnem = NULL;
- switch (regop) {
- case 0:
- mnem = "test";
- break;
- case 2:
- mnem = "not";
- break;
- case 3:
- mnem = "neg";
- break;
- case 4:
- mnem = "mul";
- break;
- case 5:
- mnem = "imul";
- break;
- case 6:
- mnem = "div";
- break;
- case 7:
- mnem = "idiv";
- break;
- default:
- UnimplementedInstruction();
- }
- AppendToBuffer("%s ", mnem);
- int count = PrintRightOperand(data);
- if (regop == 0) {
- AppendToBuffer(",0x%x", *reinterpret_cast<int32_t*>(data + count));
- count += 4;
- }
- return 1 + count;
-}
-
-
-int DisassemblerX87::D1D3C1Instruction(byte* data) {
- byte op = *data;
- DCHECK(op == 0xD1 || op == 0xD3 || op == 0xC1);
- byte modrm = *++data;
- int mod, regop, rm;
- get_modrm(modrm, &mod, &regop, &rm);
- int imm8 = -1;
- const char* mnem = NULL;
- switch (regop) {
- case kROL:
- mnem = "rol";
- break;
- case kROR:
- mnem = "ror";
- break;
- case kRCL:
- mnem = "rcl";
- break;
- case kRCR:
- mnem = "rcr";
- break;
- case kSHL:
- mnem = "shl";
- break;
- case KSHR:
- mnem = "shr";
- break;
- case kSAR:
- mnem = "sar";
- break;
- default:
- UnimplementedInstruction();
- }
- AppendToBuffer("%s ", mnem);
- int count = PrintRightOperand(data);
- if (op == 0xD1) {
- imm8 = 1;
- } else if (op == 0xC1) {
- imm8 = *(data + 1);
- count++;
- } else if (op == 0xD3) {
- // Shift/rotate by cl.
- }
- if (imm8 >= 0) {
- AppendToBuffer(",%d", imm8);
- } else {
- AppendToBuffer(",cl");
- }
- return 1 + count;
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerX87::JumpShort(byte* data) {
- DCHECK_EQ(0xEB, *data);
- byte b = *(data+1);
- byte* dest = data + static_cast<int8_t>(b) + 2;
- AppendToBuffer("jmp %s", NameOfAddress(dest));
- return 2;
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerX87::JumpConditional(byte* data, const char* comment) {
- DCHECK_EQ(0x0F, *data);
- byte cond = *(data+1) & 0x0F;
- byte* dest = data + *reinterpret_cast<int32_t*>(data+2) + 6;
- const char* mnem = jump_conditional_mnem[cond];
- AppendToBuffer("%s %s", mnem, NameOfAddress(dest));
- if (comment != NULL) {
- AppendToBuffer(", %s", comment);
- }
- return 6; // includes 0x0F
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerX87::JumpConditionalShort(byte* data, const char* comment) {
- byte cond = *data & 0x0F;
- byte b = *(data+1);
- byte* dest = data + static_cast<int8_t>(b) + 2;
- const char* mnem = jump_conditional_mnem[cond];
- AppendToBuffer("%s %s", mnem, NameOfAddress(dest));
- if (comment != NULL) {
- AppendToBuffer(", %s", comment);
- }
- return 2;
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerX87::SetCC(byte* data) {
- DCHECK_EQ(0x0F, *data);
- byte cond = *(data+1) & 0x0F;
- const char* mnem = set_conditional_mnem[cond];
- AppendToBuffer("%s ", mnem);
- PrintRightByteOperand(data+2);
- return 3; // Includes 0x0F.
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerX87::CMov(byte* data) {
- DCHECK_EQ(0x0F, *data);
- byte cond = *(data + 1) & 0x0F;
- const char* mnem = conditional_move_mnem[cond];
- int op_size = PrintOperands(mnem, REG_OPER_OP_ORDER, data + 2);
- return 2 + op_size; // includes 0x0F
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerX87::FPUInstruction(byte* data) {
- byte escape_opcode = *data;
- DCHECK_EQ(0xD8, escape_opcode & 0xF8);
- byte modrm_byte = *(data+1);
-
- if (modrm_byte >= 0xC0) {
- return RegisterFPUInstruction(escape_opcode, modrm_byte);
- } else {
- return MemoryFPUInstruction(escape_opcode, modrm_byte, data+1);
- }
-}
-
-int DisassemblerX87::MemoryFPUInstruction(int escape_opcode,
- int modrm_byte,
- byte* modrm_start) {
- const char* mnem = "?";
- int regop = (modrm_byte >> 3) & 0x7; // reg/op field of modrm byte.
- switch (escape_opcode) {
- case 0xD9: switch (regop) {
- case 0: mnem = "fld_s"; break;
- case 2: mnem = "fst_s"; break;
- case 3: mnem = "fstp_s"; break;
- case 5:
- mnem = "fldcw";
- break;
- case 7:
- mnem = "fnstcw";
- break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xDB: switch (regop) {
- case 0: mnem = "fild_s"; break;
- case 1: mnem = "fisttp_s"; break;
- case 2: mnem = "fist_s"; break;
- case 3: mnem = "fistp_s"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xDC:
- switch (regop) {
- case 0:
- mnem = "fadd_d";
- break;
- case 1:
- mnem = "fmul_d";
- break;
- case 4:
- mnem = "fsub_d";
- break;
- case 5:
- mnem = "fsubr_d";
- break;
- case 6:
- mnem = "fdiv_d";
- break;
- case 7:
- mnem = "fdivr_d";
- break;
- default:
- UnimplementedInstruction();
- }
- break;
-
- case 0xDD: switch (regop) {
- case 0: mnem = "fld_d"; break;
- case 1: mnem = "fisttp_d"; break;
- case 2: mnem = "fst_d"; break;
- case 3: mnem = "fstp_d"; break;
- case 4:
- mnem = "frstor";
- break;
- case 6:
- mnem = "fnsave";
- break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xDF: switch (regop) {
- case 5: mnem = "fild_d"; break;
- case 7: mnem = "fistp_d"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- default: UnimplementedInstruction();
- }
- AppendToBuffer("%s ", mnem);
- int count = PrintRightOperand(modrm_start);
- return count + 1;
-}
-
-int DisassemblerX87::RegisterFPUInstruction(int escape_opcode,
- byte modrm_byte) {
- bool has_register = false; // Is the FPU register encoded in modrm_byte?
- const char* mnem = "?";
-
- switch (escape_opcode) {
- case 0xD8:
- has_register = true;
- switch (modrm_byte & 0xF8) {
- case 0xC0: mnem = "fadd_i"; break;
- case 0xE0: mnem = "fsub_i"; break;
- case 0xC8: mnem = "fmul_i"; break;
- case 0xF0: mnem = "fdiv_i"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xD9:
- switch (modrm_byte & 0xF8) {
- case 0xC0:
- mnem = "fld";
- has_register = true;
- break;
- case 0xC8:
- mnem = "fxch";
- has_register = true;
- break;
- default:
- switch (modrm_byte) {
- case 0xE0: mnem = "fchs"; break;
- case 0xE1: mnem = "fabs"; break;
- case 0xE4: mnem = "ftst"; break;
- case 0xE8: mnem = "fld1"; break;
- case 0xEB: mnem = "fldpi"; break;
- case 0xED: mnem = "fldln2"; break;
- case 0xEE: mnem = "fldz"; break;
- case 0xF0: mnem = "f2xm1"; break;
- case 0xF1: mnem = "fyl2x"; break;
- case 0xF4: mnem = "fxtract"; break;
- case 0xF5: mnem = "fprem1"; break;
- case 0xF7: mnem = "fincstp"; break;
- case 0xF8: mnem = "fprem"; break;
- case 0xFC: mnem = "frndint"; break;
- case 0xFD: mnem = "fscale"; break;
- case 0xFE: mnem = "fsin"; break;
- case 0xFF: mnem = "fcos"; break;
- default: UnimplementedInstruction();
- }
- }
- break;
-
- case 0xDA:
- if (modrm_byte == 0xE9) {
- mnem = "fucompp";
- } else {
- UnimplementedInstruction();
- }
- break;
-
- case 0xDB:
- if ((modrm_byte & 0xF8) == 0xE8) {
- mnem = "fucomi";
- has_register = true;
- } else if (modrm_byte == 0xE2) {
- mnem = "fclex";
- } else if (modrm_byte == 0xE3) {
- mnem = "fninit";
- } else {
- UnimplementedInstruction();
- }
- break;
-
- case 0xDC:
- has_register = true;
- switch (modrm_byte & 0xF8) {
- case 0xC0: mnem = "fadd"; break;
- case 0xE8: mnem = "fsub"; break;
- case 0xC8: mnem = "fmul"; break;
- case 0xF8: mnem = "fdiv"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xDD:
- has_register = true;
- switch (modrm_byte & 0xF8) {
- case 0xC0: mnem = "ffree"; break;
- case 0xD0: mnem = "fst"; break;
- case 0xD8: mnem = "fstp"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xDE:
- if (modrm_byte == 0xD9) {
- mnem = "fcompp";
- } else {
- has_register = true;
- switch (modrm_byte & 0xF8) {
- case 0xC0: mnem = "faddp"; break;
- case 0xE8: mnem = "fsubp"; break;
- case 0xC8: mnem = "fmulp"; break;
- case 0xF8: mnem = "fdivp"; break;
- default: UnimplementedInstruction();
- }
- }
- break;
-
- case 0xDF:
- if (modrm_byte == 0xE0) {
- mnem = "fnstsw_ax";
- } else if ((modrm_byte & 0xF8) == 0xE8) {
- mnem = "fucomip";
- has_register = true;
- }
- break;
-
- default: UnimplementedInstruction();
- }
-
- if (has_register) {
- AppendToBuffer("%s st%d", mnem, modrm_byte & 0x7);
- } else {
- AppendToBuffer("%s", mnem);
- }
- return 2;
-}
-
-
-// Mnemonics for instructions 0xF0 byte.
-// Returns NULL if the instruction is not handled here.
-static const char* F0Mnem(byte f0byte) {
- switch (f0byte) {
- case 0x0B:
- return "ud2";
- case 0x18:
- return "prefetch";
- case 0xA2:
- return "cpuid";
- case 0xBE:
- return "movsx_b";
- case 0xBF:
- return "movsx_w";
- case 0xB6:
- return "movzx_b";
- case 0xB7:
- return "movzx_w";
- case 0xAF:
- return "imul";
- case 0xA4:
- return "shld";
- case 0xA5:
- return "shld";
- case 0xAD:
- return "shrd";
- case 0xAC:
- return "shrd"; // 3-operand version.
- case 0xAB:
- return "bts";
- case 0xB0:
- return "cmpxchg_b";
- case 0xB1:
- return "cmpxchg";
- case 0xBC:
- return "bsf";
- case 0xBD:
- return "bsr";
- default: return NULL;
- }
-}
-
-
-// Disassembled instruction '*instr' and writes it into 'out_buffer'.
-int DisassemblerX87::InstructionDecode(v8::internal::Vector<char> out_buffer,
- byte* instr) {
- tmp_buffer_pos_ = 0; // starting to write as position 0
- byte* data = instr;
- // Check for hints.
- const char* branch_hint = NULL;
- // We use these two prefixes only with branch prediction
- if (*data == 0x3E /*ds*/) {
- branch_hint = "predicted taken";
- data++;
- } else if (*data == 0x2E /*cs*/) {
- branch_hint = "predicted not taken";
- data++;
- } else if (*data == 0xF0 /*lock*/) {
- AppendToBuffer("lock ");
- data++;
- }
-
- bool processed = true; // Will be set to false if the current instruction
- // is not in 'instructions' table.
- const InstructionDesc& idesc = instruction_table_->Get(*data);
- switch (idesc.type) {
- case ZERO_OPERANDS_INSTR:
- AppendToBuffer("%s", idesc.mnem);
- data++;
- break;
-
- case TWO_OPERANDS_INSTR:
- data++;
- data += PrintOperands(idesc.mnem, idesc.op_order_, data);
- break;
-
- case JUMP_CONDITIONAL_SHORT_INSTR:
- data += JumpConditionalShort(data, branch_hint);
- break;
-
- case REGISTER_INSTR:
- AppendToBuffer("%s %s", idesc.mnem, NameOfCPURegister(*data & 0x07));
- data++;
- break;
-
- case MOVE_REG_INSTR: {
- byte* addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data+1));
- AppendToBuffer("mov %s,%s",
- NameOfCPURegister(*data & 0x07),
- NameOfAddress(addr));
- data += 5;
- break;
- }
-
- case CALL_JUMP_INSTR: {
- byte* addr = data + *reinterpret_cast<int32_t*>(data+1) + 5;
- AppendToBuffer("%s %s", idesc.mnem, NameOfAddress(addr));
- data += 5;
- break;
- }
-
- case SHORT_IMMEDIATE_INSTR: {
- byte* addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data+1));
- AppendToBuffer("%s eax,%s", idesc.mnem, NameOfAddress(addr));
- data += 5;
- break;
- }
-
- case BYTE_IMMEDIATE_INSTR: {
- AppendToBuffer("%s al,0x%x", idesc.mnem, data[1]);
- data += 2;
- break;
- }
-
- case NO_INSTR:
- processed = false;
- break;
-
- default:
- UNIMPLEMENTED(); // This type is not implemented.
- }
- //----------------------------
- if (!processed) {
- switch (*data) {
- case 0xC2:
- AppendToBuffer("ret 0x%x", *reinterpret_cast<uint16_t*>(data+1));
- data += 3;
- break;
-
- case 0x6B: {
- data++;
- data += PrintOperands("imul", REG_OPER_OP_ORDER, data);
- AppendToBuffer(",%d", *data);
- data++;
- } break;
-
- case 0x69: {
- data++;
- data += PrintOperands("imul", REG_OPER_OP_ORDER, data);
- AppendToBuffer(",%d", *reinterpret_cast<int32_t*>(data));
- data += 4;
- }
- break;
-
- case 0xF6:
- { data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- if (regop == eax) {
- AppendToBuffer("test_b ");
- data += PrintRightByteOperand(data);
- int32_t imm = *data;
- AppendToBuffer(",0x%x", imm);
- data++;
- } else {
- UnimplementedInstruction();
- }
- }
- break;
-
- case 0x81: // fall through
- case 0x83: // 0x81 with sign extension bit set
- data += PrintImmediateOp(data);
- break;
-
- case 0x0F:
- { byte f0byte = data[1];
- const char* f0mnem = F0Mnem(f0byte);
- if (f0byte == 0x18) {
- data += 2;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- const char* suffix[] = {"nta", "1", "2", "3"};
- AppendToBuffer("%s%s ", f0mnem, suffix[regop & 0x03]);
- data += PrintRightOperand(data);
- } else if (f0byte == 0x1F && data[2] == 0) {
- AppendToBuffer("nop"); // 3 byte nop.
- data += 3;
- } else if (f0byte == 0x1F && data[2] == 0x40 && data[3] == 0) {
- AppendToBuffer("nop"); // 4 byte nop.
- data += 4;
- } else if (f0byte == 0x1F && data[2] == 0x44 && data[3] == 0 &&
- data[4] == 0) {
- AppendToBuffer("nop"); // 5 byte nop.
- data += 5;
- } else if (f0byte == 0x1F && data[2] == 0x80 && data[3] == 0 &&
- data[4] == 0 && data[5] == 0 && data[6] == 0) {
- AppendToBuffer("nop"); // 7 byte nop.
- data += 7;
- } else if (f0byte == 0x1F && data[2] == 0x84 && data[3] == 0 &&
- data[4] == 0 && data[5] == 0 && data[6] == 0 &&
- data[7] == 0) {
- AppendToBuffer("nop"); // 8 byte nop.
- data += 8;
- } else if (f0byte == 0x0B || f0byte == 0xA2 || f0byte == 0x31) {
- AppendToBuffer("%s", f0mnem);
- data += 2;
- } else if (f0byte == 0x28) {
- data += 2;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movaps %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (f0byte >= 0x53 && f0byte <= 0x5F) {
- const char* const pseudo_op[] = {
- "rcpps",
- "andps",
- "andnps",
- "orps",
- "xorps",
- "addps",
- "mulps",
- "cvtps2pd",
- "cvtdq2ps",
- "subps",
- "minps",
- "divps",
- "maxps",
- };
-
- data += 2;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("%s %s,",
- pseudo_op[f0byte - 0x53],
- NameOfXMMRegister(regop));
- data += PrintRightXMMOperand(data);
- } else if (f0byte == 0x50) {
- data += 2;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movmskps %s,%s",
- NameOfCPURegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (f0byte== 0xC6) {
- // shufps xmm, xmm/m128, imm8
- data += 2;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("shufps %s,%s,%d",
- NameOfXMMRegister(rm),
- NameOfXMMRegister(regop),
- static_cast<int>(imm8));
- data += 2;
- } else if ((f0byte & 0xF0) == 0x80) {
- data += JumpConditional(data, branch_hint);
- } else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 ||
- f0byte == 0xB7 || f0byte == 0xAF) {
- data += 2;
- data += PrintOperands(f0mnem, REG_OPER_OP_ORDER, data);
- } else if ((f0byte & 0xF0) == 0x90) {
- data += SetCC(data);
- } else if ((f0byte & 0xF0) == 0x40) {
- data += CMov(data);
- } else if (f0byte == 0xA4 || f0byte == 0xAC) {
- // shld, shrd
- data += 2;
- AppendToBuffer("%s ", f0mnem);
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- data += 2;
- AppendToBuffer("%s,%s,%d", NameOfCPURegister(rm),
- NameOfCPURegister(regop), static_cast<int>(imm8));
- } else if (f0byte == 0xAB || f0byte == 0xA5 || f0byte == 0xAD) {
- // shrd_cl, shld_cl, bts
- data += 2;
- AppendToBuffer("%s ", f0mnem);
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- data += PrintRightOperand(data);
- if (f0byte == 0xAB) {
- AppendToBuffer(",%s", NameOfCPURegister(regop));
- } else {
- AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
- }
- } else if (f0byte == 0xB0) {
- // cmpxchg_b
- data += 2;
- AppendToBuffer("%s ", f0mnem);
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- data += PrintRightOperand(data);
- AppendToBuffer(",%s", NameOfByteCPURegister(regop));
- } else if (f0byte == 0xB1) {
- // cmpxchg
- data += 2;
- data += PrintOperands(f0mnem, OPER_REG_OP_ORDER, data);
- } else if (f0byte == 0xBC) {
- data += 2;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("%s %s,", f0mnem, NameOfCPURegister(regop));
- data += PrintRightOperand(data);
- } else if (f0byte == 0xBD) {
- data += 2;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("%s %s,", f0mnem, NameOfCPURegister(regop));
- data += PrintRightOperand(data);
- } else {
- UnimplementedInstruction();
- }
- }
- break;
-
- case 0x8F:
- { data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- if (regop == eax) {
- AppendToBuffer("pop ");
- data += PrintRightOperand(data);
- }
- }
- break;
-
- case 0xFF:
- { data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- const char* mnem = NULL;
- switch (regop) {
- case esi: mnem = "push"; break;
- case eax: mnem = "inc"; break;
- case ecx: mnem = "dec"; break;
- case edx: mnem = "call"; break;
- case esp: mnem = "jmp"; break;
- default: mnem = "???";
- }
- AppendToBuffer("%s ", mnem);
- data += PrintRightOperand(data);
- }
- break;
-
- case 0xC7: // imm32, fall through
- case 0xC6: // imm8
- { bool is_byte = *data == 0xC6;
- data++;
- if (is_byte) {
- AppendToBuffer("%s ", "mov_b");
- data += PrintRightByteOperand(data);
- int32_t imm = *data;
- AppendToBuffer(",0x%x", imm);
- data++;
- } else {
- AppendToBuffer("%s ", "mov");
- data += PrintRightOperand(data);
- int32_t imm = *reinterpret_cast<int32_t*>(data);
- AppendToBuffer(",0x%x", imm);
- data += 4;
- }
- }
- break;
-
- case 0x80:
- { data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- const char* mnem = NULL;
- switch (regop) {
- case 5: mnem = "subb"; break;
- case 7: mnem = "cmpb"; break;
- default: UnimplementedInstruction();
- }
- AppendToBuffer("%s ", mnem);
- data += PrintRightByteOperand(data);
- int32_t imm = *data;
- AppendToBuffer(",0x%x", imm);
- data++;
- }
- break;
-
- case 0x88: // 8bit, fall through
- case 0x89: // 32bit
- { bool is_byte = *data == 0x88;
- int mod, regop, rm;
- data++;
- get_modrm(*data, &mod, &regop, &rm);
- if (is_byte) {
- AppendToBuffer("%s ", "mov_b");
- data += PrintRightByteOperand(data);
- AppendToBuffer(",%s", NameOfByteCPURegister(regop));
- } else {
- AppendToBuffer("%s ", "mov");
- data += PrintRightOperand(data);
- AppendToBuffer(",%s", NameOfCPURegister(regop));
- }
- }
- break;
-
- case 0x66: // prefix
- while (*data == 0x66) data++;
- if (*data == 0xf && data[1] == 0x1f) {
- AppendToBuffer("nop"); // 0x66 prefix
- } else if (*data == 0x39) {
- data++;
- data += PrintOperands("cmpw", OPER_REG_OP_ORDER, data);
- } else if (*data == 0x3B) {
- data++;
- data += PrintOperands("cmpw", REG_OPER_OP_ORDER, data);
- } else if (*data == 0x81) {
- data++;
- AppendToBuffer("cmpw ");
- data += PrintRightOperand(data);
- int imm = *reinterpret_cast<int16_t*>(data);
- AppendToBuffer(",0x%x", imm);
- data += 2;
- } else if (*data == 0x87) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("xchg_w %s,", NameOfCPURegister(regop));
- data += PrintRightOperand(data);
- } else if (*data == 0x89) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("mov_w ");
- data += PrintRightOperand(data);
- AppendToBuffer(",%s", NameOfCPURegister(regop));
- } else if (*data == 0x8B) {
- data++;
- data += PrintOperands("mov_w", REG_OPER_OP_ORDER, data);
- } else if (*data == 0x90) {
- AppendToBuffer("nop"); // 0x66 prefix
- } else if (*data == 0xC7) {
- data++;
- AppendToBuffer("%s ", "mov_w");
- data += PrintRightOperand(data);
- int imm = *reinterpret_cast<int16_t*>(data);
- AppendToBuffer(",0x%x", imm);
- data += 2;
- } else if (*data == 0xF7) {
- data++;
- AppendToBuffer("%s ", "test_w");
- data += PrintRightOperand(data);
- int imm = *reinterpret_cast<int16_t*>(data);
- AppendToBuffer(",0x%x", imm);
- data += 2;
- } else if (*data == 0x0F) {
- data++;
- if (*data == 0x38) {
- data++;
- if (*data == 0x17) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("ptest %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0x2A) {
- // movntdqa
- UnimplementedInstruction();
- } else {
- UnimplementedInstruction();
- }
- } else if (*data == 0x3A) {
- data++;
- if (*data == 0x0B) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("roundsd %s,%s,%d",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm),
- static_cast<int>(imm8));
- data += 2;
- } else if (*data == 0x16) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &rm, &regop);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("pextrd %s,%s,%d",
- NameOfCPURegister(regop),
- NameOfXMMRegister(rm),
- static_cast<int>(imm8));
- data += 2;
- } else if (*data == 0x17) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("extractps %s,%s,%d",
- NameOfCPURegister(rm),
- NameOfXMMRegister(regop),
- static_cast<int>(imm8));
- data += 2;
- } else if (*data == 0x22) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("pinsrd %s,%s,%d",
- NameOfXMMRegister(regop),
- NameOfCPURegister(rm),
- static_cast<int>(imm8));
- data += 2;
- } else {
- UnimplementedInstruction();
- }
- } else if (*data == 0x2E || *data == 0x2F) {
- const char* mnem = (*data == 0x2E) ? "ucomisd" : "comisd";
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- if (mod == 0x3) {
- AppendToBuffer("%s %s,%s", mnem,
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else {
- AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
- data += PrintRightOperand(data);
- }
- } else if (*data == 0x50) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movmskpd %s,%s",
- NameOfCPURegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0x54) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("andpd %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0x56) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("orpd %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0x57) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("xorpd %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0x6E) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movd %s,", NameOfXMMRegister(regop));
- data += PrintRightOperand(data);
- } else if (*data == 0x6F) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movdqa %s,", NameOfXMMRegister(regop));
- data += PrintRightXMMOperand(data);
- } else if (*data == 0x70) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("pshufd %s,%s,%d",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm),
- static_cast<int>(imm8));
- data += 2;
- } else if (*data == 0x76) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("pcmpeqd %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0x90) {
- data++;
- AppendToBuffer("nop"); // 2 byte nop.
- } else if (*data == 0xF3) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("psllq %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0x73) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- DCHECK(regop == esi || regop == edx);
- AppendToBuffer("%s %s,%d",
- (regop == esi) ? "psllq" : "psrlq",
- NameOfXMMRegister(rm),
- static_cast<int>(imm8));
- data += 2;
- } else if (*data == 0xD3) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("psrlq %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0x7F) {
- AppendToBuffer("movdqa ");
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- data += PrintRightXMMOperand(data);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else if (*data == 0x7E) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movd ");
- data += PrintRightOperand(data);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else if (*data == 0xDB) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("pand %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0xE7) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- if (mod == 3) {
- // movntdq
- UnimplementedInstruction();
- } else {
- UnimplementedInstruction();
- }
- } else if (*data == 0xEF) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("pxor %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0xEB) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("por %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0xB1) {
- data++;
- data += PrintOperands("cmpxchg_w", OPER_REG_OP_ORDER, data);
- } else {
- UnimplementedInstruction();
- }
- } else {
- UnimplementedInstruction();
- }
- break;
-
- case 0xFE:
- { data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- if (regop == ecx) {
- AppendToBuffer("dec_b ");
- data += PrintRightOperand(data);
- } else {
- UnimplementedInstruction();
- }
- }
- break;
-
- case 0x68:
- AppendToBuffer("push 0x%x", *reinterpret_cast<int32_t*>(data+1));
- data += 5;
- break;
-
- case 0x6A:
- AppendToBuffer("push 0x%x", *reinterpret_cast<int8_t*>(data + 1));
- data += 2;
- break;
-
- case 0xA8:
- AppendToBuffer("test al,0x%x", *reinterpret_cast<uint8_t*>(data+1));
- data += 2;
- break;
-
- case 0xA9:
- AppendToBuffer("test eax,0x%x", *reinterpret_cast<int32_t*>(data+1));
- data += 5;
- break;
-
- case 0xD1: // fall through
- case 0xD3: // fall through
- case 0xC1:
- data += D1D3C1Instruction(data);
- break;
-
- case 0xD8: // fall through
- case 0xD9: // fall through
- case 0xDA: // fall through
- case 0xDB: // fall through
- case 0xDC: // fall through
- case 0xDD: // fall through
- case 0xDE: // fall through
- case 0xDF:
- data += FPUInstruction(data);
- break;
-
- case 0xEB:
- data += JumpShort(data);
- break;
-
- case 0xF2:
- if (*(data+1) == 0x0F) {
- byte b2 = *(data+2);
- if (b2 == 0x11) {
- AppendToBuffer("movsd ");
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- data += PrintRightXMMOperand(data);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else if (b2 == 0x10) {
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movsd %s,", NameOfXMMRegister(regop));
- data += PrintRightXMMOperand(data);
- } else if (b2 == 0x5A) {
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("cvtsd2ss %s,", NameOfXMMRegister(regop));
- data += PrintRightXMMOperand(data);
- } else {
- const char* mnem = "?";
- switch (b2) {
- case 0x2A: mnem = "cvtsi2sd"; break;
- case 0x2C: mnem = "cvttsd2si"; break;
- case 0x2D: mnem = "cvtsd2si"; break;
- case 0x51: mnem = "sqrtsd"; break;
- case 0x58: mnem = "addsd"; break;
- case 0x59: mnem = "mulsd"; break;
- case 0x5C: mnem = "subsd"; break;
- case 0x5E: mnem = "divsd"; break;
- }
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- if (b2 == 0x2A) {
- AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
- data += PrintRightOperand(data);
- } else if (b2 == 0x2C || b2 == 0x2D) {
- AppendToBuffer("%s %s,", mnem, NameOfCPURegister(regop));
- data += PrintRightXMMOperand(data);
- } else if (b2 == 0xC2) {
- // Intel manual 2A, Table 3-18.
- const char* const pseudo_op[] = {
- "cmpeqsd",
- "cmpltsd",
- "cmplesd",
- "cmpunordsd",
- "cmpneqsd",
- "cmpnltsd",
- "cmpnlesd",
- "cmpordsd"
- };
- AppendToBuffer("%s %s,%s",
- pseudo_op[data[1]],
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data += 2;
- } else {
- AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
- data += PrintRightXMMOperand(data);
- }
- }
- } else {
- UnimplementedInstruction();
- }
- break;
-
- case 0xF3:
- if (*(data+1) == 0x0F) {
- byte b2 = *(data+2);
- if (b2 == 0x11) {
- AppendToBuffer("movss ");
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- data += PrintRightXMMOperand(data);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else if (b2 == 0x10) {
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movss %s,", NameOfXMMRegister(regop));
- data += PrintRightXMMOperand(data);
- } else if (b2 == 0x2C) {
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("cvttss2si %s,", NameOfCPURegister(regop));
- data += PrintRightXMMOperand(data);
- } else if (b2 == 0x5A) {
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("cvtss2sd %s,", NameOfXMMRegister(regop));
- data += PrintRightXMMOperand(data);
- } else if (b2 == 0x6F) {
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movdqu %s,", NameOfXMMRegister(regop));
- data += PrintRightXMMOperand(data);
- } else if (b2 == 0x7F) {
- AppendToBuffer("movdqu ");
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- data += PrintRightXMMOperand(data);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else {
- UnimplementedInstruction();
- }
- } else if (*(data+1) == 0xA5) {
- data += 2;
- AppendToBuffer("rep_movs");
- } else if (*(data+1) == 0xAB) {
- data += 2;
- AppendToBuffer("rep_stos");
- } else {
- UnimplementedInstruction();
- }
- break;
-
- case 0xF7:
- data += F7Instruction(data);
- break;
-
- default:
- UnimplementedInstruction();
- }
- }
-
- if (tmp_buffer_pos_ < sizeof tmp_buffer_) {
- tmp_buffer_[tmp_buffer_pos_] = '\0';
- }
-
- int instr_len = data - instr;
- if (instr_len == 0) {
- printf("%02x", *data);
- }
- DCHECK(instr_len > 0); // Ensure progress.
-
- int outp = 0;
- // Instruction bytes.
- for (byte* bp = instr; bp < data; bp++) {
- outp += v8::internal::SNPrintF(out_buffer + outp, "%02x", *bp);
- }
- for (int i = 6 - instr_len; i >= 0; i--) {
- outp += v8::internal::SNPrintF(out_buffer + outp, " ");
- }
-
- outp += v8::internal::SNPrintF(out_buffer + outp, " %s", tmp_buffer_.start());
- return instr_len;
-} // NOLINT (function is too long)
-
-
-//------------------------------------------------------------------------------
-
-
-static const char* const cpu_regs[8] = {
- "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi"
-};
-
-
-static const char* const byte_cpu_regs[8] = {
- "al", "cl", "dl", "bl", "ah", "ch", "dh", "bh"
-};
-
-
-static const char* const xmm_regs[8] = {
- "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
-};
-
-
-const char* NameConverter::NameOfAddress(byte* addr) const {
- v8::internal::SNPrintF(tmp_buffer_, "%p", static_cast<void*>(addr));
- return tmp_buffer_.start();
-}
-
-
-const char* NameConverter::NameOfConstant(byte* addr) const {
- return NameOfAddress(addr);
-}
-
-
-const char* NameConverter::NameOfCPURegister(int reg) const {
- if (0 <= reg && reg < 8) return cpu_regs[reg];
- return "noreg";
-}
-
-
-const char* NameConverter::NameOfByteCPURegister(int reg) const {
- if (0 <= reg && reg < 8) return byte_cpu_regs[reg];
- return "noreg";
-}
-
-
-const char* NameConverter::NameOfXMMRegister(int reg) const {
- if (0 <= reg && reg < 8) return xmm_regs[reg];
- return "noxmmreg";
-}
-
-
-const char* NameConverter::NameInCode(byte* addr) const {
- // X87 does not embed debug strings at the moment.
- UNREACHABLE();
- return "";
-}
-
-
-//------------------------------------------------------------------------------
-
-Disassembler::Disassembler(const NameConverter& converter)
- : converter_(converter) {}
-
-
-Disassembler::~Disassembler() {}
-
-
-int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
- byte* instruction) {
- DisassemblerX87 d(converter_, false /*do not crash if unimplemented*/);
- return d.InstructionDecode(buffer, instruction);
-}
-
-
-// The IA-32 assembler does not currently use constant pools.
-int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; }
-
-
-/*static*/ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
- NameConverter converter;
- Disassembler d(converter);
- for (byte* pc = begin; pc < end;) {
- v8::internal::EmbeddedVector<char, 128> buffer;
- buffer[0] = '\0';
- byte* prev_pc = pc;
- pc += d.InstructionDecode(buffer, pc);
- fprintf(f, "%p", static_cast<void*>(prev_pc));
- fprintf(f, " ");
-
- for (byte* bp = prev_pc; bp < pc; bp++) {
- fprintf(f, "%02x", *bp);
- }
- for (int i = 6 - (pc - prev_pc); i >= 0; i--) {
- fprintf(f, " ");
- }
- fprintf(f, " %s\n", buffer.start());
- }
-}
-
-
-} // namespace disasm
-
-#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/x87/frames-x87.cc b/deps/v8/src/x87/frames-x87.cc
deleted file mode 100644
index 80e30a5afe..0000000000
--- a/deps/v8/src/x87/frames-x87.cc
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X87
-
-#include "src/assembler.h"
-#include "src/frames.h"
-#include "src/x87/assembler-x87-inl.h"
-#include "src/x87/assembler-x87.h"
-#include "src/x87/frames-x87.h"
-
-namespace v8 {
-namespace internal {
-
-
-Register JavaScriptFrame::fp_register() { return ebp; }
-Register JavaScriptFrame::context_register() { return esi; }
-Register JavaScriptFrame::constant_pool_pointer_register() {
- UNREACHABLE();
- return no_reg;
-}
-
-
-Register StubFailureTrampolineFrame::fp_register() { return ebp; }
-Register StubFailureTrampolineFrame::context_register() { return esi; }
-Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
- UNREACHABLE();
- return no_reg;
-}
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/x87/frames-x87.h b/deps/v8/src/x87/frames-x87.h
deleted file mode 100644
index 1a378ed3ec..0000000000
--- a/deps/v8/src/x87/frames-x87.h
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_X87_FRAMES_X87_H_
-#define V8_X87_FRAMES_X87_H_
-
-namespace v8 {
-namespace internal {
-
-
-// Register lists
-// Note that the bit values must match those used in actual instruction encoding
-const int kNumRegs = 8;
-
-
-// Caller-saved registers
-const RegList kJSCallerSaved =
- 1 << 0 | // eax
- 1 << 1 | // ecx
- 1 << 2 | // edx
- 1 << 3 | // ebx - used as a caller-saved register in JavaScript code
- 1 << 7; // edi - callee function
-
-const int kNumJSCallerSaved = 5;
-
-
-// Number of registers for which space is reserved in safepoints.
-const int kNumSafepointRegisters = 8;
-
-// ----------------------------------------------------
-
-
-class EntryFrameConstants : public AllStatic {
- public:
- static const int kCallerFPOffset = -6 * kPointerSize;
-
- static const int kNewTargetArgOffset = +2 * kPointerSize;
- static const int kFunctionArgOffset = +3 * kPointerSize;
- static const int kReceiverArgOffset = +4 * kPointerSize;
- static const int kArgcOffset = +5 * kPointerSize;
- static const int kArgvOffset = +6 * kPointerSize;
-};
-
-class ExitFrameConstants : public TypedFrameConstants {
- public:
- static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
- static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
- DEFINE_TYPED_FRAME_SIZES(2);
-
- static const int kCallerFPOffset = 0 * kPointerSize;
- static const int kCallerPCOffset = +1 * kPointerSize;
-
- // FP-relative displacement of the caller's SP. It points just
- // below the saved PC.
- static const int kCallerSPDisplacement = +2 * kPointerSize;
-
- static const int kConstantPoolOffset = 0; // Not used
-};
-
-
-class JavaScriptFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
- static const int kLastParameterOffset = +2 * kPointerSize;
- static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
-
- // Caller SP-relative.
- static const int kParam0Offset = -2 * kPointerSize;
- static const int kReceiverOffset = -1 * kPointerSize;
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_X87_FRAMES_X87_H_
diff --git a/deps/v8/src/x87/interface-descriptors-x87.cc b/deps/v8/src/x87/interface-descriptors-x87.cc
deleted file mode 100644
index 25707a34aa..0000000000
--- a/deps/v8/src/x87/interface-descriptors-x87.cc
+++ /dev/null
@@ -1,384 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X87
-
-#include "src/interface-descriptors.h"
-
-namespace v8 {
-namespace internal {
-
-const Register CallInterfaceDescriptor::ContextRegister() { return esi; }
-
-void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
- CallInterfaceDescriptorData* data, int register_parameter_count) {
- const Register default_stub_registers[] = {eax, ebx, ecx, edx, edi};
- CHECK_LE(static_cast<size_t>(register_parameter_count),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(register_parameter_count,
- default_stub_registers);
-}
-
-const Register FastNewFunctionContextDescriptor::FunctionRegister() {
- return edi;
-}
-const Register FastNewFunctionContextDescriptor::SlotsRegister() { return eax; }
-
-const Register LoadDescriptor::ReceiverRegister() { return edx; }
-const Register LoadDescriptor::NameRegister() { return ecx; }
-const Register LoadDescriptor::SlotRegister() { return eax; }
-
-const Register LoadWithVectorDescriptor::VectorRegister() { return ebx; }
-
-const Register LoadICProtoArrayDescriptor::HandlerRegister() { return edi; }
-
-const Register StoreDescriptor::ReceiverRegister() { return edx; }
-const Register StoreDescriptor::NameRegister() { return ecx; }
-const Register StoreDescriptor::ValueRegister() { return eax; }
-const Register StoreDescriptor::SlotRegister() { return edi; }
-
-const Register StoreWithVectorDescriptor::VectorRegister() { return ebx; }
-
-const Register StoreTransitionDescriptor::SlotRegister() { return no_reg; }
-const Register StoreTransitionDescriptor::VectorRegister() { return ebx; }
-const Register StoreTransitionDescriptor::MapRegister() { return edi; }
-
-const Register StringCompareDescriptor::LeftRegister() { return edx; }
-const Register StringCompareDescriptor::RightRegister() { return eax; }
-
-const Register ApiGetterDescriptor::HolderRegister() { return ecx; }
-const Register ApiGetterDescriptor::CallbackRegister() { return eax; }
-
-const Register MathPowTaggedDescriptor::exponent() { return eax; }
-
-const Register MathPowIntegerDescriptor::exponent() {
- return MathPowTaggedDescriptor::exponent();
-}
-
-
-const Register GrowArrayElementsDescriptor::ObjectRegister() { return eax; }
-const Register GrowArrayElementsDescriptor::KeyRegister() { return ebx; }
-
-
-void FastNewClosureDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // SharedFunctionInfo, vector, slot index.
- Register registers[] = {ebx, ecx, edx};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-void FastNewRestParameterDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {edi};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {edi};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {edi};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
-// static
-const Register TypeConversionDescriptor::ArgumentRegister() { return eax; }
-
-void TypeofDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ebx};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
-void FastCloneRegExpDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {edi, eax, ecx, edx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {eax, ebx, ecx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {eax, ebx, ecx, edx};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
-void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ebx, edx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void CreateWeakCellDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ebx, edx, edi};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void CallFunctionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {edi};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-void CallICTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {edi, eax, edx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallICDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {edi, eax, edx, ebx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void CallConstructDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // eax : number of arguments
- // ebx : feedback vector
- // ecx : new target (for IsSuperConstructorCall)
- // edx : slot in feedback vector (Smi, for RecordCallTarget)
- // edi : constructor function
- // TODO(turbofan): So far we don't gather type feedback and hence skip the
- // slot parameter, but ArrayConstructStub needs the vector to be undefined.
- Register registers[] = {eax, edi, ecx, ebx};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
-void CallTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // eax : number of arguments
- // edi : the target to call
- Register registers[] = {edi, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // ecx : start index (to support rest parameters)
- // edi : the target to call
- Register registers[] = {edi, ecx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructStubDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // eax : number of arguments
- // edx : the new target
- // edi : the target to call
- // ebx : allocation site or undefined
- Register registers[] = {edi, edx, eax, ebx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ConstructTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // eax : number of arguments
- // edx : the new target
- // edi : the target to call
- Register registers[] = {edi, edx, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void TransitionElementsKindDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {eax, ebx};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
-void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // register state
- data->InitializePlatformSpecific(0, nullptr, nullptr);
-}
-
-void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // register state
- // eax -- number of arguments
- // edi -- function
- // ebx -- allocation site with elements kind
- Register registers[] = {edi, ebx, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // register state
- // eax -- number of arguments
- // edi -- function
- // ebx -- allocation site with elements kind
- Register registers[] = {edi, ebx, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // register state
- // eax -- number of arguments
- // edi -- function
- // ebx -- allocation site with elements kind
- Register registers[] = {edi, ebx, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-void VarArgFunctionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // stack param count needs (arg count)
- Register registers[] = {eax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CompareDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {edx, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
-void BinaryOpDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {edx, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
-void BinaryOpWithAllocationSiteDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ecx, edx, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-void BinaryOpWithVectorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // register state
- // edx -- lhs
- // eax -- rhs
- // edi -- slot id
- // ebx -- vector
- Register registers[] = {edx, eax, edi, ebx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CountOpDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {eax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void StringAddDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {edx, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- edi, // JSFunction
- edx, // the new target
- eax, // actual number of arguments
- ebx, // expected number of arguments
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ApiCallbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- edi, // callee
- ebx, // call_data
- ecx, // holder
- edx, // api_function_address
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterDispatchDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
- kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- eax, // argument count (not including receiver)
- ebx, // address of first argument
- edi // the target callable to be call
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- eax, // argument count (not including receiver)
- edx, // new target
- edi, // constructor
- ebx, // allocation site feedback
- ecx, // address of first argument
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenConstructArrayDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
- Register registers[] = {
- eax, // argument count (not including receiver)
- edx, // target to the call. It is checked to be Array function.
- ebx, // allocation site feedback
- ecx, // address of first argument
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterCEntryDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- eax, // argument count (argc)
- ecx, // address of first argument (argv)
- ebx // the runtime function to call
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ResumeGeneratorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- eax, // the value to pass to the generator
- ebx, // the JSGeneratorObject to resume
- edx // the resume mode (tagged)
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/x87/macro-assembler-x87.cc b/deps/v8/src/x87/macro-assembler-x87.cc
deleted file mode 100644
index e7a512cd5b..0000000000
--- a/deps/v8/src/x87/macro-assembler-x87.cc
+++ /dev/null
@@ -1,2599 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X87
-
-#include "src/base/bits.h"
-#include "src/base/division-by-constant.h"
-#include "src/bootstrapper.h"
-#include "src/codegen.h"
-#include "src/debug/debug.h"
-#include "src/runtime/runtime.h"
-#include "src/x87/frames-x87.h"
-#include "src/x87/macro-assembler-x87.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// MacroAssembler implementation.
-
-MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
- CodeObjectRequired create_code_object)
- : Assembler(arg_isolate, buffer, size),
- generating_stub_(false),
- has_frame_(false),
- isolate_(isolate) {
- if (create_code_object == CodeObjectRequired::kYes) {
- code_object_ =
- Handle<Object>::New(isolate_->heap()->undefined_value(), isolate_);
- }
-}
-
-
-void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
- DCHECK(!r.IsDouble());
- if (r.IsInteger8()) {
- movsx_b(dst, src);
- } else if (r.IsUInteger8()) {
- movzx_b(dst, src);
- } else if (r.IsInteger16()) {
- movsx_w(dst, src);
- } else if (r.IsUInteger16()) {
- movzx_w(dst, src);
- } else {
- mov(dst, src);
- }
-}
-
-
-void MacroAssembler::Store(Register src, const Operand& dst, Representation r) {
- DCHECK(!r.IsDouble());
- if (r.IsInteger8() || r.IsUInteger8()) {
- mov_b(dst, src);
- } else if (r.IsInteger16() || r.IsUInteger16()) {
- mov_w(dst, src);
- } else {
- if (r.IsHeapObject()) {
- AssertNotSmi(src);
- } else if (r.IsSmi()) {
- AssertSmi(src);
- }
- mov(dst, src);
- }
-}
-
-
-void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
- if (isolate()->heap()->RootCanBeTreatedAsConstant(index)) {
- mov(destination, isolate()->heap()->root_handle(index));
- return;
- }
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(isolate());
- mov(destination, Immediate(index));
- mov(destination, Operand::StaticArray(destination,
- times_pointer_size,
- roots_array_start));
-}
-
-
-void MacroAssembler::StoreRoot(Register source,
- Register scratch,
- Heap::RootListIndex index) {
- DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(isolate());
- mov(scratch, Immediate(index));
- mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
- source);
-}
-
-
-void MacroAssembler::CompareRoot(Register with,
- Register scratch,
- Heap::RootListIndex index) {
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(isolate());
- mov(scratch, Immediate(index));
- cmp(with, Operand::StaticArray(scratch,
- times_pointer_size,
- roots_array_start));
-}
-
-
-void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
- DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
- cmp(with, isolate()->heap()->root_handle(index));
-}
-
-
-void MacroAssembler::CompareRoot(const Operand& with,
- Heap::RootListIndex index) {
- DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
- cmp(with, isolate()->heap()->root_handle(index));
-}
-
-
-void MacroAssembler::PushRoot(Heap::RootListIndex index) {
- DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
- Push(isolate()->heap()->root_handle(index));
-}
-
-#define REG(Name) \
- { Register::kCode_##Name }
-
-static const Register saved_regs[] = {REG(eax), REG(ecx), REG(edx)};
-
-#undef REG
-
-static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
-
-void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
- Register exclusion1, Register exclusion2,
- Register exclusion3) {
- // We don't allow a GC during a store buffer overflow so there is no need to
- // store the registers in any particular way, but we do have to store and
- // restore them.
- for (int i = 0; i < kNumberOfSavedRegs; i++) {
- Register reg = saved_regs[i];
- if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
- push(reg);
- }
- }
- if (fp_mode == kSaveFPRegs) {
- // Save FPU state in m108byte.
- sub(esp, Immediate(108));
- fnsave(Operand(esp, 0));
- }
-}
-
-void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
- Register exclusion2, Register exclusion3) {
- if (fp_mode == kSaveFPRegs) {
- // Restore FPU state in m108byte.
- frstor(Operand(esp, 0));
- add(esp, Immediate(108));
- }
-
- for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
- Register reg = saved_regs[i];
- if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
- pop(reg);
- }
- }
-}
-
-void MacroAssembler::InNewSpace(Register object, Register scratch, Condition cc,
- Label* condition_met,
- Label::Distance distance) {
- CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cc,
- condition_met, distance);
-}
-
-
-void MacroAssembler::RememberedSetHelper(
- Register object, // Only used for debug checks.
- Register addr, Register scratch, SaveFPRegsMode save_fp,
- MacroAssembler::RememberedSetFinalAction and_then) {
- Label done;
- if (emit_debug_code()) {
- Label ok;
- JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
- int3();
- bind(&ok);
- }
- // Load store buffer top.
- ExternalReference store_buffer =
- ExternalReference::store_buffer_top(isolate());
- mov(scratch, Operand::StaticVariable(store_buffer));
- // Store pointer to buffer.
- mov(Operand(scratch, 0), addr);
- // Increment buffer top.
- add(scratch, Immediate(kPointerSize));
- // Write back new top of buffer.
- mov(Operand::StaticVariable(store_buffer), scratch);
- // Call stub on end of buffer.
- // Check for end of buffer.
- test(scratch, Immediate(StoreBuffer::kStoreBufferMask));
- if (and_then == kReturnAtEnd) {
- Label buffer_overflowed;
- j(equal, &buffer_overflowed, Label::kNear);
- ret(0);
- bind(&buffer_overflowed);
- } else {
- DCHECK(and_then == kFallThroughAtEnd);
- j(not_equal, &done, Label::kNear);
- }
- StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
- CallStub(&store_buffer_overflow);
- if (and_then == kReturnAtEnd) {
- ret(0);
- } else {
- DCHECK(and_then == kFallThroughAtEnd);
- bind(&done);
- }
-}
-
-
-void MacroAssembler::ClampTOSToUint8(Register result_reg) {
- Label done, conv_failure;
- sub(esp, Immediate(kPointerSize));
- fnclex();
- fist_s(Operand(esp, 0));
- pop(result_reg);
- X87CheckIA();
- j(equal, &conv_failure, Label::kNear);
- test(result_reg, Immediate(0xFFFFFF00));
- j(zero, &done, Label::kNear);
- setcc(sign, result_reg);
- sub(result_reg, Immediate(1));
- and_(result_reg, Immediate(255));
- jmp(&done, Label::kNear);
- bind(&conv_failure);
- fnclex();
- fldz();
- fld(1);
- FCmp();
- setcc(below, result_reg); // 1 if negative, 0 if positive.
- dec_b(result_reg); // 0 if negative, 255 if positive.
- bind(&done);
-}
-
-
-void MacroAssembler::ClampUint8(Register reg) {
- Label done;
- test(reg, Immediate(0xFFFFFF00));
- j(zero, &done, Label::kNear);
- setcc(negative, reg); // 1 if negative, 0 if positive.
- dec_b(reg); // 0 if negative, 255 if positive.
- bind(&done);
-}
-
-
-void MacroAssembler::SlowTruncateToI(Register result_reg,
- Register input_reg,
- int offset) {
- DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true);
- call(stub.GetCode(), RelocInfo::CODE_TARGET);
-}
-
-
-void MacroAssembler::TruncateX87TOSToI(Register result_reg) {
- sub(esp, Immediate(kDoubleSize));
- fst_d(MemOperand(esp, 0));
- SlowTruncateToI(result_reg, esp, 0);
- add(esp, Immediate(kDoubleSize));
-}
-
-
-void MacroAssembler::X87TOSToI(Register result_reg,
- MinusZeroMode minus_zero_mode,
- Label* lost_precision, Label* is_nan,
- Label* minus_zero, Label::Distance dst) {
- Label done;
- sub(esp, Immediate(kPointerSize));
- fld(0);
- fist_s(MemOperand(esp, 0));
- fild_s(MemOperand(esp, 0));
- pop(result_reg);
- FCmp();
- j(not_equal, lost_precision, dst);
- j(parity_even, is_nan, dst);
- if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
- test(result_reg, Operand(result_reg));
- j(not_zero, &done, Label::kNear);
- // To check for minus zero, we load the value again as float, and check
- // if that is still 0.
- sub(esp, Immediate(kPointerSize));
- fst_s(MemOperand(esp, 0));
- pop(result_reg);
- test(result_reg, Operand(result_reg));
- j(not_zero, minus_zero, dst);
- }
- bind(&done);
-}
-
-
-void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
- Register input_reg) {
- Label done, slow_case;
-
- SlowTruncateToI(result_reg, input_reg);
- bind(&done);
-}
-
-
-void MacroAssembler::LoadUint32NoSSE2(const Operand& src) {
- Label done;
- push(src);
- fild_s(Operand(esp, 0));
- cmp(src, Immediate(0));
- j(not_sign, &done, Label::kNear);
- ExternalReference uint32_bias =
- ExternalReference::address_of_uint32_bias();
- fld_d(Operand::StaticVariable(uint32_bias));
- faddp(1);
- bind(&done);
- add(esp, Immediate(kPointerSize));
-}
-
-
-void MacroAssembler::RecordWriteField(
- Register object, int offset, Register value, Register dst,
- SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action,
- SmiCheck smi_check, PointersToHereCheck pointers_to_here_check_for_value) {
- // First, check if a write barrier is even needed. The tests below
- // catch stores of Smis.
- Label done;
-
- // Skip barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
- JumpIfSmi(value, &done, Label::kNear);
- }
-
- // Although the object register is tagged, the offset is relative to the start
- // of the object, so so offset must be a multiple of kPointerSize.
- DCHECK(IsAligned(offset, kPointerSize));
-
- lea(dst, FieldOperand(object, offset));
- if (emit_debug_code()) {
- Label ok;
- test_b(dst, Immediate(kPointerSize - 1));
- j(zero, &ok, Label::kNear);
- int3();
- bind(&ok);
- }
-
- RecordWrite(object, dst, value, save_fp, remembered_set_action,
- OMIT_SMI_CHECK, pointers_to_here_check_for_value);
-
- bind(&done);
-
- // Clobber clobbered input registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
- mov(dst, Immediate(bit_cast<int32_t>(kZapValue)));
- }
-}
-
-
-void MacroAssembler::RecordWriteForMap(Register object, Handle<Map> map,
- Register scratch1, Register scratch2,
- SaveFPRegsMode save_fp) {
- Label done;
-
- Register address = scratch1;
- Register value = scratch2;
- if (emit_debug_code()) {
- Label ok;
- lea(address, FieldOperand(object, HeapObject::kMapOffset));
- test_b(address, Immediate(kPointerSize - 1));
- j(zero, &ok, Label::kNear);
- int3();
- bind(&ok);
- }
-
- DCHECK(!object.is(value));
- DCHECK(!object.is(address));
- DCHECK(!value.is(address));
- AssertNotSmi(object);
-
- if (!FLAG_incremental_marking) {
- return;
- }
-
- // Compute the address.
- lea(address, FieldOperand(object, HeapObject::kMapOffset));
-
- // A single check of the map's pages interesting flag suffices, since it is
- // only set during incremental collection, and then it's also guaranteed that
- // the from object's page's interesting flag is also set. This optimization
- // relies on the fact that maps can never be in new space.
- DCHECK(!isolate()->heap()->InNewSpace(*map));
- CheckPageFlagForMap(map,
- MemoryChunk::kPointersToHereAreInterestingMask,
- zero,
- &done,
- Label::kNear);
-
- RecordWriteStub stub(isolate(), object, value, address, OMIT_REMEMBERED_SET,
- save_fp);
- CallStub(&stub);
-
- bind(&done);
-
- // Count number of write barriers in generated code.
- isolate()->counters()->write_barriers_static()->Increment();
- IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
-
- // Clobber clobbered input registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
- mov(scratch1, Immediate(bit_cast<int32_t>(kZapValue)));
- mov(scratch2, Immediate(bit_cast<int32_t>(kZapValue)));
- }
-}
-
-
-void MacroAssembler::RecordWrite(
- Register object, Register address, Register value, SaveFPRegsMode fp_mode,
- RememberedSetAction remembered_set_action, SmiCheck smi_check,
- PointersToHereCheck pointers_to_here_check_for_value) {
- DCHECK(!object.is(value));
- DCHECK(!object.is(address));
- DCHECK(!value.is(address));
- AssertNotSmi(object);
-
- if (remembered_set_action == OMIT_REMEMBERED_SET &&
- !FLAG_incremental_marking) {
- return;
- }
-
- if (emit_debug_code()) {
- Label ok;
- cmp(value, Operand(address, 0));
- j(equal, &ok, Label::kNear);
- int3();
- bind(&ok);
- }
-
- // First, check if a write barrier is even needed. The tests below
- // catch stores of Smis and stores into young gen.
- Label done;
-
- if (smi_check == INLINE_SMI_CHECK) {
- // Skip barrier if writing a smi.
- JumpIfSmi(value, &done, Label::kNear);
- }
-
- if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
- CheckPageFlag(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- zero,
- &done,
- Label::kNear);
- }
- CheckPageFlag(object,
- value, // Used as scratch.
- MemoryChunk::kPointersFromHereAreInterestingMask,
- zero,
- &done,
- Label::kNear);
-
- RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
- fp_mode);
- CallStub(&stub);
-
- bind(&done);
-
- // Count number of write barriers in generated code.
- isolate()->counters()->write_barriers_static()->Increment();
- IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
-
- // Clobber clobbered registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- mov(address, Immediate(bit_cast<int32_t>(kZapValue)));
- mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
- }
-}
-
-void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
- Register code_entry,
- Register scratch) {
- const int offset = JSFunction::kCodeEntryOffset;
-
- // Since a code entry (value) is always in old space, we don't need to update
- // remembered set. If incremental marking is off, there is nothing for us to
- // do.
- if (!FLAG_incremental_marking) return;
-
- DCHECK(!js_function.is(code_entry));
- DCHECK(!js_function.is(scratch));
- DCHECK(!code_entry.is(scratch));
- AssertNotSmi(js_function);
-
- if (emit_debug_code()) {
- Label ok;
- lea(scratch, FieldOperand(js_function, offset));
- cmp(code_entry, Operand(scratch, 0));
- j(equal, &ok, Label::kNear);
- int3();
- bind(&ok);
- }
-
- // First, check if a write barrier is even needed. The tests below
- // catch stores of Smis and stores into young gen.
- Label done;
-
- CheckPageFlag(code_entry, scratch,
- MemoryChunk::kPointersToHereAreInterestingMask, zero, &done,
- Label::kNear);
- CheckPageFlag(js_function, scratch,
- MemoryChunk::kPointersFromHereAreInterestingMask, zero, &done,
- Label::kNear);
-
- // Save input registers.
- push(js_function);
- push(code_entry);
-
- const Register dst = scratch;
- lea(dst, FieldOperand(js_function, offset));
-
- // Save caller-saved registers.
- PushCallerSaved(kDontSaveFPRegs, js_function, code_entry);
-
- int argument_count = 3;
- PrepareCallCFunction(argument_count, code_entry);
- mov(Operand(esp, 0 * kPointerSize), js_function);
- mov(Operand(esp, 1 * kPointerSize), dst); // Slot.
- mov(Operand(esp, 2 * kPointerSize),
- Immediate(ExternalReference::isolate_address(isolate())));
-
- {
- AllowExternalCallThatCantCauseGC scope(this);
- CallCFunction(
- ExternalReference::incremental_marking_record_write_code_entry_function(
- isolate()),
- argument_count);
- }
-
- // Restore caller-saved registers.
- PopCallerSaved(kDontSaveFPRegs, js_function, code_entry);
-
- // Restore input registers.
- pop(code_entry);
- pop(js_function);
-
- bind(&done);
-}
-
-void MacroAssembler::DebugBreak() {
- Move(eax, Immediate(0));
- mov(ebx, Immediate(ExternalReference(Runtime::kHandleDebuggerStatement,
- isolate())));
- CEntryStub ces(isolate(), 1);
- call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
-}
-
-void MacroAssembler::ShlPair(Register high, Register low, uint8_t shift) {
- if (shift >= 32) {
- mov(high, low);
- shl(high, shift - 32);
- xor_(low, low);
- } else {
- shld(high, low, shift);
- shl(low, shift);
- }
-}
-
-void MacroAssembler::ShlPair_cl(Register high, Register low) {
- shld_cl(high, low);
- shl_cl(low);
- Label done;
- test(ecx, Immediate(0x20));
- j(equal, &done, Label::kNear);
- mov(high, low);
- xor_(low, low);
- bind(&done);
-}
-
-void MacroAssembler::ShrPair(Register high, Register low, uint8_t shift) {
- if (shift >= 32) {
- mov(low, high);
- shr(low, shift - 32);
- xor_(high, high);
- } else {
- shrd(high, low, shift);
- shr(high, shift);
- }
-}
-
-void MacroAssembler::ShrPair_cl(Register high, Register low) {
- shrd_cl(low, high);
- shr_cl(high);
- Label done;
- test(ecx, Immediate(0x20));
- j(equal, &done, Label::kNear);
- mov(low, high);
- xor_(high, high);
- bind(&done);
-}
-
-void MacroAssembler::SarPair(Register high, Register low, uint8_t shift) {
- if (shift >= 32) {
- mov(low, high);
- sar(low, shift - 32);
- sar(high, 31);
- } else {
- shrd(high, low, shift);
- sar(high, shift);
- }
-}
-
-void MacroAssembler::SarPair_cl(Register high, Register low) {
- shrd_cl(low, high);
- sar_cl(high);
- Label done;
- test(ecx, Immediate(0x20));
- j(equal, &done, Label::kNear);
- mov(low, high);
- sar(high, 31);
- bind(&done);
-}
-
-bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) {
- static const int kMaxImmediateBits = 17;
- if (!RelocInfo::IsNone(x.rmode_)) return false;
- return !is_intn(x.x_, kMaxImmediateBits);
-}
-
-
-void MacroAssembler::SafeMove(Register dst, const Immediate& x) {
- if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
- Move(dst, Immediate(x.x_ ^ jit_cookie()));
- xor_(dst, jit_cookie());
- } else {
- Move(dst, x);
- }
-}
-
-
-void MacroAssembler::SafePush(const Immediate& x) {
- if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
- push(Immediate(x.x_ ^ jit_cookie()));
- xor_(Operand(esp, 0), Immediate(jit_cookie()));
- } else {
- push(x);
- }
-}
-
-
-void MacroAssembler::CmpObjectType(Register heap_object,
- InstanceType type,
- Register map) {
- mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
- CmpInstanceType(map, type);
-}
-
-
-void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
- cmpb(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
-}
-
-void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
- cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
-}
-
-
-void MacroAssembler::CheckMap(Register obj,
- Handle<Map> map,
- Label* fail,
- SmiCheckType smi_check_type) {
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, fail);
- }
-
- CompareMap(obj, map);
- j(not_equal, fail);
-}
-
-
-Condition MacroAssembler::IsObjectStringType(Register heap_object,
- Register map,
- Register instance_type) {
- mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
- movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kNotStringTag != 0);
- test(instance_type, Immediate(kIsNotStringMask));
- return zero;
-}
-
-
-void MacroAssembler::FCmp() {
- fucompp();
- push(eax);
- fnstsw_ax();
- sahf();
- pop(eax);
-}
-
-
-void MacroAssembler::FXamMinusZero() {
- fxam();
- push(eax);
- fnstsw_ax();
- and_(eax, Immediate(0x4700));
- // For minus zero, C3 == 1 && C1 == 1.
- cmp(eax, Immediate(0x4200));
- pop(eax);
- fstp(0);
-}
-
-
-void MacroAssembler::FXamSign() {
- fxam();
- push(eax);
- fnstsw_ax();
- // For negative value (including -0.0), C1 == 1.
- and_(eax, Immediate(0x0200));
- pop(eax);
- fstp(0);
-}
-
-
-void MacroAssembler::X87CheckIA() {
- push(eax);
- fnstsw_ax();
- // For #IA, IE == 1 && SF == 0.
- and_(eax, Immediate(0x0041));
- cmp(eax, Immediate(0x0001));
- pop(eax);
-}
-
-
-// rc=00B, round to nearest.
-// rc=01B, round down.
-// rc=10B, round up.
-// rc=11B, round toward zero.
-void MacroAssembler::X87SetRC(int rc) {
- sub(esp, Immediate(kPointerSize));
- fnstcw(MemOperand(esp, 0));
- and_(MemOperand(esp, 0), Immediate(0xF3FF));
- or_(MemOperand(esp, 0), Immediate(rc));
- fldcw(MemOperand(esp, 0));
- add(esp, Immediate(kPointerSize));
-}
-
-
-void MacroAssembler::X87SetFPUCW(int cw) {
- RecordComment("-- X87SetFPUCW start --");
- push(Immediate(cw));
- fldcw(MemOperand(esp, 0));
- add(esp, Immediate(kPointerSize));
- RecordComment("-- X87SetFPUCW end--");
-}
-
-
-void MacroAssembler::AssertSmi(Register object) {
- if (emit_debug_code()) {
- test(object, Immediate(kSmiTagMask));
- Check(equal, kOperandIsNotASmi);
- }
-}
-
-
-void MacroAssembler::AssertFunction(Register object) {
- if (emit_debug_code()) {
- test(object, Immediate(kSmiTagMask));
- Check(not_equal, kOperandIsASmiAndNotAFunction);
- Push(object);
- CmpObjectType(object, JS_FUNCTION_TYPE, object);
- Pop(object);
- Check(equal, kOperandIsNotAFunction);
- }
-}
-
-
-void MacroAssembler::AssertBoundFunction(Register object) {
- if (emit_debug_code()) {
- test(object, Immediate(kSmiTagMask));
- Check(not_equal, kOperandIsASmiAndNotABoundFunction);
- Push(object);
- CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
- Pop(object);
- Check(equal, kOperandIsNotABoundFunction);
- }
-}
-
-void MacroAssembler::AssertGeneratorObject(Register object) {
- if (emit_debug_code()) {
- test(object, Immediate(kSmiTagMask));
- Check(not_equal, kOperandIsASmiAndNotAGeneratorObject);
- Push(object);
- CmpObjectType(object, JS_GENERATOR_OBJECT_TYPE, object);
- Pop(object);
- Check(equal, kOperandIsNotAGeneratorObject);
- }
-}
-
-void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
- if (emit_debug_code()) {
- Label done_checking;
- AssertNotSmi(object);
- cmp(object, isolate()->factory()->undefined_value());
- j(equal, &done_checking);
- cmp(FieldOperand(object, 0),
- Immediate(isolate()->factory()->allocation_site_map()));
- Assert(equal, kExpectedUndefinedOrCell);
- bind(&done_checking);
- }
-}
-
-
-void MacroAssembler::AssertNotSmi(Register object) {
- if (emit_debug_code()) {
- test(object, Immediate(kSmiTagMask));
- Check(not_equal, kOperandIsASmi);
- }
-}
-
-void MacroAssembler::StubPrologue(StackFrame::Type type) {
- push(ebp); // Caller's frame pointer.
- mov(ebp, esp);
- push(Immediate(Smi::FromInt(type)));
-}
-
-
-void MacroAssembler::Prologue(bool code_pre_aging) {
- PredictableCodeSizeScope predictible_code_size_scope(this,
- kNoCodeAgeSequenceLength);
- if (code_pre_aging) {
- // Pre-age the code.
- call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
- RelocInfo::CODE_AGE_SEQUENCE);
- Nop(kNoCodeAgeSequenceLength - Assembler::kCallInstructionLength);
- } else {
- push(ebp); // Caller's frame pointer.
- mov(ebp, esp);
- push(esi); // Callee's context.
- push(edi); // Callee's JS function.
- }
-}
-
-void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
- mov(vector, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- mov(vector, FieldOperand(vector, JSFunction::kFeedbackVectorOffset));
- mov(vector, FieldOperand(vector, Cell::kValueOffset));
-}
-
-
-void MacroAssembler::EnterFrame(StackFrame::Type type,
- bool load_constant_pool_pointer_reg) {
- // Out-of-line constant pool not implemented on x87.
- UNREACHABLE();
-}
-
-
-void MacroAssembler::EnterFrame(StackFrame::Type type) {
- push(ebp);
- mov(ebp, esp);
- push(Immediate(Smi::FromInt(type)));
- if (type == StackFrame::INTERNAL) {
- push(Immediate(CodeObject()));
- }
- if (emit_debug_code()) {
- cmp(Operand(esp, 0), Immediate(isolate()->factory()->undefined_value()));
- Check(not_equal, kCodeObjectNotProperlyPatched);
- }
-}
-
-
-void MacroAssembler::LeaveFrame(StackFrame::Type type) {
- if (emit_debug_code()) {
- cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
- Immediate(Smi::FromInt(type)));
- Check(equal, kStackFrameTypesMustMatch);
- }
- leave();
-}
-
-void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
- Register argc) {
- Push(ebp);
- Move(ebp, esp);
- Push(context);
- Push(target);
- Push(argc);
-}
-
-void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
- Register argc) {
- Pop(argc);
- Pop(target);
- Pop(context);
- leave();
-}
-
-void MacroAssembler::EnterExitFramePrologue(StackFrame::Type frame_type) {
- DCHECK(frame_type == StackFrame::EXIT ||
- frame_type == StackFrame::BUILTIN_EXIT);
-
- // Set up the frame structure on the stack.
- DCHECK_EQ(+2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
- DCHECK_EQ(+1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
- DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
- push(ebp);
- mov(ebp, esp);
-
- // Reserve room for entry stack pointer and push the code object.
- push(Immediate(Smi::FromInt(frame_type)));
- DCHECK_EQ(-2 * kPointerSize, ExitFrameConstants::kSPOffset);
- push(Immediate(0)); // Saved entry sp, patched before call.
- DCHECK_EQ(-3 * kPointerSize, ExitFrameConstants::kCodeOffset);
- push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot.
-
- // Save the frame pointer and the context in top.
- ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress, isolate());
- ExternalReference context_address(Isolate::kContextAddress, isolate());
- ExternalReference c_function_address(Isolate::kCFunctionAddress, isolate());
- mov(Operand::StaticVariable(c_entry_fp_address), ebp);
- mov(Operand::StaticVariable(context_address), esi);
- mov(Operand::StaticVariable(c_function_address), ebx);
-}
-
-
-void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
- // Optionally save FPU state.
- if (save_doubles) {
- // Store FPU state to m108byte.
- int space = 108 + argc * kPointerSize;
- sub(esp, Immediate(space));
- const int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
- fnsave(MemOperand(ebp, offset - 108));
- } else {
- sub(esp, Immediate(argc * kPointerSize));
- }
-
- // Get the required frame alignment for the OS.
- const int kFrameAlignment = base::OS::ActivationFrameAlignment();
- if (kFrameAlignment > 0) {
- DCHECK(base::bits::IsPowerOfTwo32(kFrameAlignment));
- and_(esp, -kFrameAlignment);
- }
-
- // Patch the saved entry sp.
- mov(Operand(ebp, ExitFrameConstants::kSPOffset), esp);
-}
-
-void MacroAssembler::EnterExitFrame(int argc, bool save_doubles,
- StackFrame::Type frame_type) {
- EnterExitFramePrologue(frame_type);
-
- // Set up argc and argv in callee-saved registers.
- int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
- mov(edi, eax);
- lea(esi, Operand(ebp, eax, times_4, offset));
-
- // Reserve space for argc, argv and isolate.
- EnterExitFrameEpilogue(argc, save_doubles);
-}
-
-
-void MacroAssembler::EnterApiExitFrame(int argc) {
- EnterExitFramePrologue(StackFrame::EXIT);
- EnterExitFrameEpilogue(argc, false);
-}
-
-
-void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
- // Optionally restore FPU state.
- if (save_doubles) {
- const int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
- frstor(MemOperand(ebp, offset - 108));
- }
-
- if (pop_arguments) {
- // Get the return address from the stack and restore the frame pointer.
- mov(ecx, Operand(ebp, 1 * kPointerSize));
- mov(ebp, Operand(ebp, 0 * kPointerSize));
-
- // Pop the arguments and the receiver from the caller stack.
- lea(esp, Operand(esi, 1 * kPointerSize));
-
- // Push the return address to get ready to return.
- push(ecx);
- } else {
- // Otherwise just leave the exit frame.
- leave();
- }
-
- LeaveExitFrameEpilogue(true);
-}
-
-
-void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
- // Restore current context from top and clear it in debug mode.
- ExternalReference context_address(Isolate::kContextAddress, isolate());
- if (restore_context) {
- mov(esi, Operand::StaticVariable(context_address));
- }
-#ifdef DEBUG
- mov(Operand::StaticVariable(context_address), Immediate(0));
-#endif
-
- // Clear the top frame.
- ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
- isolate());
- mov(Operand::StaticVariable(c_entry_fp_address), Immediate(0));
-}
-
-
-void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
- mov(esp, ebp);
- pop(ebp);
-
- LeaveExitFrameEpilogue(restore_context);
-}
-
-
-void MacroAssembler::PushStackHandler() {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-
- // Link the current handler as the next handler.
- ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- push(Operand::StaticVariable(handler_address));
-
- // Set this new handler as the current one.
- mov(Operand::StaticVariable(handler_address), esp);
-}
-
-
-void MacroAssembler::PopStackHandler() {
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- pop(Operand::StaticVariable(handler_address));
- add(esp, Immediate(StackHandlerConstants::kSize - kPointerSize));
-}
-
-
-// Compute the hash code from the untagged key. This must be kept in sync with
-// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
-// code-stub-hydrogen.cc
-//
-// Note: r0 will contain hash code
-void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
- // Xor original key with a seed.
- if (serializer_enabled()) {
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(isolate());
- mov(scratch, Immediate(Heap::kHashSeedRootIndex));
- mov(scratch,
- Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
- SmiUntag(scratch);
- xor_(r0, scratch);
- } else {
- int32_t seed = isolate()->heap()->HashSeed();
- xor_(r0, Immediate(seed));
- }
-
- // hash = ~hash + (hash << 15);
- mov(scratch, r0);
- not_(r0);
- shl(scratch, 15);
- add(r0, scratch);
- // hash = hash ^ (hash >> 12);
- mov(scratch, r0);
- shr(scratch, 12);
- xor_(r0, scratch);
- // hash = hash + (hash << 2);
- lea(r0, Operand(r0, r0, times_4, 0));
- // hash = hash ^ (hash >> 4);
- mov(scratch, r0);
- shr(scratch, 4);
- xor_(r0, scratch);
- // hash = hash * 2057;
- imul(r0, r0, 2057);
- // hash = hash ^ (hash >> 16);
- mov(scratch, r0);
- shr(scratch, 16);
- xor_(r0, scratch);
- and_(r0, 0x3fffffff);
-}
-
-void MacroAssembler::LoadAllocationTopHelper(Register result,
- Register scratch,
- AllocationFlags flags) {
- ExternalReference allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), flags);
-
- // Just return if allocation top is already known.
- if ((flags & RESULT_CONTAINS_TOP) != 0) {
- // No use of scratch if allocation top is provided.
- DCHECK(scratch.is(no_reg));
-#ifdef DEBUG
- // Assert that result actually contains top on entry.
- cmp(result, Operand::StaticVariable(allocation_top));
- Check(equal, kUnexpectedAllocationTop);
-#endif
- return;
- }
-
- // Move address of new object to result. Use scratch register if available.
- if (scratch.is(no_reg)) {
- mov(result, Operand::StaticVariable(allocation_top));
- } else {
- mov(scratch, Immediate(allocation_top));
- mov(result, Operand(scratch, 0));
- }
-}
-
-
-void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
- Register scratch,
- AllocationFlags flags) {
- if (emit_debug_code()) {
- test(result_end, Immediate(kObjectAlignmentMask));
- Check(zero, kUnalignedAllocationInNewSpace);
- }
-
- ExternalReference allocation_top =
- AllocationUtils::GetAllocationTopReference(isolate(), flags);
-
- // Update new top. Use scratch if available.
- if (scratch.is(no_reg)) {
- mov(Operand::StaticVariable(allocation_top), result_end);
- } else {
- mov(Operand(scratch, 0), result_end);
- }
-}
-
-
-void MacroAssembler::Allocate(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
- DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
- DCHECK(object_size <= kMaxRegularHeapObjectSize);
- DCHECK((flags & ALLOCATION_FOLDED) == 0);
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- mov(result, Immediate(0x7091));
- if (result_end.is_valid()) {
- mov(result_end, Immediate(0x7191));
- }
- if (scratch.is_valid()) {
- mov(scratch, Immediate(0x7291));
- }
- }
- jmp(gc_required);
- return;
- }
- DCHECK(!result.is(result_end));
-
- // Load address of new object into result.
- LoadAllocationTopHelper(result, scratch, flags);
-
- ExternalReference allocation_limit =
- AllocationUtils::GetAllocationLimitReference(isolate(), flags);
-
- // Align the next allocation. Storing the filler map without checking top is
- // safe in new-space because the limit of the heap is aligned there.
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
- DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
- Label aligned;
- test(result, Immediate(kDoubleAlignmentMask));
- j(zero, &aligned, Label::kNear);
- if ((flags & PRETENURE) != 0) {
- cmp(result, Operand::StaticVariable(allocation_limit));
- j(above_equal, gc_required);
- }
- mov(Operand(result, 0),
- Immediate(isolate()->factory()->one_pointer_filler_map()));
- add(result, Immediate(kDoubleSize / 2));
- bind(&aligned);
- }
-
- // Calculate new top and bail out if space is exhausted.
- Register top_reg = result_end.is_valid() ? result_end : result;
-
- if (!top_reg.is(result)) {
- mov(top_reg, result);
- }
- add(top_reg, Immediate(object_size));
- cmp(top_reg, Operand::StaticVariable(allocation_limit));
- j(above, gc_required);
-
- if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
- // The top pointer is not updated for allocation folding dominators.
- UpdateAllocationTopHelper(top_reg, scratch, flags);
- }
-
- if (top_reg.is(result)) {
- sub(result, Immediate(object_size - kHeapObjectTag));
- } else {
- // Tag the result.
- DCHECK(kHeapObjectTag == 1);
- inc(result);
- }
-}
-
-
-void MacroAssembler::Allocate(int header_size,
- ScaleFactor element_size,
- Register element_count,
- RegisterValueType element_count_type,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
- DCHECK((flags & SIZE_IN_WORDS) == 0);
- DCHECK((flags & ALLOCATION_FOLDING_DOMINATOR) == 0);
- DCHECK((flags & ALLOCATION_FOLDED) == 0);
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- mov(result, Immediate(0x7091));
- mov(result_end, Immediate(0x7191));
- if (scratch.is_valid()) {
- mov(scratch, Immediate(0x7291));
- }
- // Register element_count is not modified by the function.
- }
- jmp(gc_required);
- return;
- }
- DCHECK(!result.is(result_end));
-
- // Load address of new object into result.
- LoadAllocationTopHelper(result, scratch, flags);
-
- ExternalReference allocation_limit =
- AllocationUtils::GetAllocationLimitReference(isolate(), flags);
-
- // Align the next allocation. Storing the filler map without checking top is
- // safe in new-space because the limit of the heap is aligned there.
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
- DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
- Label aligned;
- test(result, Immediate(kDoubleAlignmentMask));
- j(zero, &aligned, Label::kNear);
- if ((flags & PRETENURE) != 0) {
- cmp(result, Operand::StaticVariable(allocation_limit));
- j(above_equal, gc_required);
- }
- mov(Operand(result, 0),
- Immediate(isolate()->factory()->one_pointer_filler_map()));
- add(result, Immediate(kDoubleSize / 2));
- bind(&aligned);
- }
-
- // Calculate new top and bail out if space is exhausted.
- // We assume that element_count*element_size + header_size does not
- // overflow.
- if (element_count_type == REGISTER_VALUE_IS_SMI) {
- STATIC_ASSERT(static_cast<ScaleFactor>(times_2 - 1) == times_1);
- STATIC_ASSERT(static_cast<ScaleFactor>(times_4 - 1) == times_2);
- STATIC_ASSERT(static_cast<ScaleFactor>(times_8 - 1) == times_4);
- DCHECK(element_size >= times_2);
- DCHECK(kSmiTagSize == 1);
- element_size = static_cast<ScaleFactor>(element_size - 1);
- } else {
- DCHECK(element_count_type == REGISTER_VALUE_IS_INT32);
- }
- lea(result_end, Operand(element_count, element_size, header_size));
- add(result_end, result);
- j(carry, gc_required);
- cmp(result_end, Operand::StaticVariable(allocation_limit));
- j(above, gc_required);
-
- // Tag result.
- DCHECK(kHeapObjectTag == 1);
- inc(result);
-
- // Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch, flags);
-}
-
-void MacroAssembler::Allocate(Register object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
- DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
- DCHECK((flags & ALLOCATION_FOLDED) == 0);
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- mov(result, Immediate(0x7091));
- mov(result_end, Immediate(0x7191));
- if (scratch.is_valid()) {
- mov(scratch, Immediate(0x7291));
- }
- // object_size is left unchanged by this function.
- }
- jmp(gc_required);
- return;
- }
- DCHECK(!result.is(result_end));
-
- // Load address of new object into result.
- LoadAllocationTopHelper(result, scratch, flags);
-
- ExternalReference allocation_limit =
- AllocationUtils::GetAllocationLimitReference(isolate(), flags);
-
- // Align the next allocation. Storing the filler map without checking top is
- // safe in new-space because the limit of the heap is aligned there.
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
- DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
- Label aligned;
- test(result, Immediate(kDoubleAlignmentMask));
- j(zero, &aligned, Label::kNear);
- if ((flags & PRETENURE) != 0) {
- cmp(result, Operand::StaticVariable(allocation_limit));
- j(above_equal, gc_required);
- }
- mov(Operand(result, 0),
- Immediate(isolate()->factory()->one_pointer_filler_map()));
- add(result, Immediate(kDoubleSize / 2));
- bind(&aligned);
- }
-
- // Calculate new top and bail out if space is exhausted.
- if (!object_size.is(result_end)) {
- mov(result_end, object_size);
- }
- add(result_end, result);
- cmp(result_end, Operand::StaticVariable(allocation_limit));
- j(above, gc_required);
-
- // Tag result.
- DCHECK(kHeapObjectTag == 1);
- inc(result);
-
- if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
- // The top pointer is not updated for allocation folding dominators.
- UpdateAllocationTopHelper(result_end, scratch, flags);
- }
-}
-
-void MacroAssembler::FastAllocate(int object_size, Register result,
- Register result_end, AllocationFlags flags) {
- DCHECK(!result.is(result_end));
- // Load address of new object into result.
- LoadAllocationTopHelper(result, no_reg, flags);
-
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
- DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
- Label aligned;
- test(result, Immediate(kDoubleAlignmentMask));
- j(zero, &aligned, Label::kNear);
- mov(Operand(result, 0),
- Immediate(isolate()->factory()->one_pointer_filler_map()));
- add(result, Immediate(kDoubleSize / 2));
- bind(&aligned);
- }
-
- lea(result_end, Operand(result, object_size));
- UpdateAllocationTopHelper(result_end, no_reg, flags);
-
- DCHECK(kHeapObjectTag == 1);
- inc(result);
-}
-
-void MacroAssembler::FastAllocate(Register object_size, Register result,
- Register result_end, AllocationFlags flags) {
- DCHECK(!result.is(result_end));
- // Load address of new object into result.
- LoadAllocationTopHelper(result, no_reg, flags);
-
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
- DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
- Label aligned;
- test(result, Immediate(kDoubleAlignmentMask));
- j(zero, &aligned, Label::kNear);
- mov(Operand(result, 0),
- Immediate(isolate()->factory()->one_pointer_filler_map()));
- add(result, Immediate(kDoubleSize / 2));
- bind(&aligned);
- }
-
- lea(result_end, Operand(result, object_size, times_1, 0));
- UpdateAllocationTopHelper(result_end, no_reg, flags);
-
- DCHECK(kHeapObjectTag == 1);
- inc(result);
-}
-
-void MacroAssembler::AllocateHeapNumber(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- MutableMode mode) {
- // Allocate heap number in new space.
- Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- Handle<Map> map = mode == MUTABLE
- ? isolate()->factory()->mutable_heap_number_map()
- : isolate()->factory()->heap_number_map();
-
- // Set the map.
- mov(FieldOperand(result, HeapObject::kMapOffset), Immediate(map));
-}
-
-void MacroAssembler::AllocateJSValue(Register result, Register constructor,
- Register value, Register scratch,
- Label* gc_required) {
- DCHECK(!result.is(constructor));
- DCHECK(!result.is(scratch));
- DCHECK(!result.is(value));
-
- // Allocate JSValue in new space.
- Allocate(JSValue::kSize, result, scratch, no_reg, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Initialize the JSValue.
- LoadGlobalFunctionInitialMap(constructor, scratch);
- mov(FieldOperand(result, HeapObject::kMapOffset), scratch);
- LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
- mov(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
- mov(FieldOperand(result, JSObject::kElementsOffset), scratch);
- mov(FieldOperand(result, JSValue::kValueOffset), value);
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
-}
-
-void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
- Register end_address,
- Register filler) {
- Label loop, entry;
- jmp(&entry, Label::kNear);
- bind(&loop);
- mov(Operand(current_address, 0), filler);
- add(current_address, Immediate(kPointerSize));
- bind(&entry);
- cmp(current_address, end_address);
- j(below, &loop, Label::kNear);
-}
-
-
-void MacroAssembler::BooleanBitTest(Register object,
- int field_offset,
- int bit_index) {
- bit_index += kSmiTagSize + kSmiShiftSize;
- DCHECK(base::bits::IsPowerOfTwo32(kBitsPerByte));
- int byte_index = bit_index / kBitsPerByte;
- int byte_bit_index = bit_index & (kBitsPerByte - 1);
- test_b(FieldOperand(object, field_offset + byte_index),
- Immediate(1 << byte_bit_index));
-}
-
-void MacroAssembler::GetMapConstructor(Register result, Register map,
- Register temp) {
- Label done, loop;
- mov(result, FieldOperand(map, Map::kConstructorOrBackPointerOffset));
- bind(&loop);
- JumpIfSmi(result, &done, Label::kNear);
- CmpObjectType(result, MAP_TYPE, temp);
- j(not_equal, &done, Label::kNear);
- mov(result, FieldOperand(result, Map::kConstructorOrBackPointerOffset));
- jmp(&loop);
- bind(&done);
-}
-
-void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
- DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
- call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
-}
-
-
-void MacroAssembler::TailCallStub(CodeStub* stub) {
- jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
-}
-
-
-
-bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
- return has_frame_ || !stub->SometimesSetsUpAFrame();
-}
-
-void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles) {
- // If the expected number of arguments of the runtime function is
- // constant, we check that the actual number of arguments match the
- // expectation.
- CHECK(f->nargs < 0 || f->nargs == num_arguments);
-
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Move(eax, Immediate(num_arguments));
- mov(ebx, Immediate(ExternalReference(f, isolate())));
- CEntryStub ces(isolate(), 1, save_doubles);
- CallStub(&ces);
-}
-
-
-void MacroAssembler::CallExternalReference(ExternalReference ref,
- int num_arguments) {
- mov(eax, Immediate(num_arguments));
- mov(ebx, Immediate(ref));
-
- CEntryStub stub(isolate(), 1);
- CallStub(&stub);
-}
-
-
-void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
- // ----------- S t a t e -------------
- // -- esp[0] : return address
- // -- esp[8] : argument num_arguments - 1
- // ...
- // -- esp[8 * num_arguments] : argument 0 (receiver)
- //
- // For runtime functions with variable arguments:
- // -- eax : number of arguments
- // -----------------------------------
-
- const Runtime::Function* function = Runtime::FunctionForId(fid);
- DCHECK_EQ(1, function->result_size);
- if (function->nargs >= 0) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- mov(eax, Immediate(function->nargs));
- }
- JumpToExternalReference(ExternalReference(fid, isolate()));
-}
-
-void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
- bool builtin_exit_frame) {
- // Set the entry point and jump to the C entry runtime stub.
- mov(ebx, Immediate(ext));
- CEntryStub ces(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
- builtin_exit_frame);
- jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
-}
-
-void MacroAssembler::PrepareForTailCall(
- const ParameterCount& callee_args_count, Register caller_args_count_reg,
- Register scratch0, Register scratch1, ReturnAddressState ra_state,
- int number_of_temp_values_after_return_address) {
-#if DEBUG
- if (callee_args_count.is_reg()) {
- DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
- scratch1));
- } else {
- DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
- }
- DCHECK(ra_state != ReturnAddressState::kNotOnStack ||
- number_of_temp_values_after_return_address == 0);
-#endif
-
- // Calculate the destination address where we will put the return address
- // after we drop current frame.
- Register new_sp_reg = scratch0;
- if (callee_args_count.is_reg()) {
- sub(caller_args_count_reg, callee_args_count.reg());
- lea(new_sp_reg,
- Operand(ebp, caller_args_count_reg, times_pointer_size,
- StandardFrameConstants::kCallerPCOffset -
- number_of_temp_values_after_return_address * kPointerSize));
- } else {
- lea(new_sp_reg, Operand(ebp, caller_args_count_reg, times_pointer_size,
- StandardFrameConstants::kCallerPCOffset -
- (callee_args_count.immediate() +
- number_of_temp_values_after_return_address) *
- kPointerSize));
- }
-
- if (FLAG_debug_code) {
- cmp(esp, new_sp_reg);
- Check(below, kStackAccessBelowStackPointer);
- }
-
- // Copy return address from caller's frame to current frame's return address
- // to avoid its trashing and let the following loop copy it to the right
- // place.
- Register tmp_reg = scratch1;
- if (ra_state == ReturnAddressState::kOnStack) {
- mov(tmp_reg, Operand(ebp, StandardFrameConstants::kCallerPCOffset));
- mov(Operand(esp, number_of_temp_values_after_return_address * kPointerSize),
- tmp_reg);
- } else {
- DCHECK(ReturnAddressState::kNotOnStack == ra_state);
- DCHECK_EQ(0, number_of_temp_values_after_return_address);
- Push(Operand(ebp, StandardFrameConstants::kCallerPCOffset));
- }
-
- // Restore caller's frame pointer now as it could be overwritten by
- // the copying loop.
- mov(ebp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-
- // +2 here is to copy both receiver and return address.
- Register count_reg = caller_args_count_reg;
- if (callee_args_count.is_reg()) {
- lea(count_reg, Operand(callee_args_count.reg(),
- 2 + number_of_temp_values_after_return_address));
- } else {
- mov(count_reg, Immediate(callee_args_count.immediate() + 2 +
- number_of_temp_values_after_return_address));
- // TODO(ishell): Unroll copying loop for small immediate values.
- }
-
- // Now copy callee arguments to the caller frame going backwards to avoid
- // callee arguments corruption (source and destination areas could overlap).
- Label loop, entry;
- jmp(&entry, Label::kNear);
- bind(&loop);
- dec(count_reg);
- mov(tmp_reg, Operand(esp, count_reg, times_pointer_size, 0));
- mov(Operand(new_sp_reg, count_reg, times_pointer_size, 0), tmp_reg);
- bind(&entry);
- cmp(count_reg, Immediate(0));
- j(not_equal, &loop, Label::kNear);
-
- // Leave current frame.
- mov(esp, new_sp_reg);
-}
-
-void MacroAssembler::InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Label* done,
- bool* definitely_mismatches,
- InvokeFlag flag,
- Label::Distance done_near,
- const CallWrapper& call_wrapper) {
- bool definitely_matches = false;
- *definitely_mismatches = false;
- Label invoke;
- if (expected.is_immediate()) {
- DCHECK(actual.is_immediate());
- mov(eax, actual.immediate());
- if (expected.immediate() == actual.immediate()) {
- definitely_matches = true;
- } else {
- const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
- if (expected.immediate() == sentinel) {
- // Don't worry about adapting arguments for builtins that
- // don't want that done. Skip adaption code by making it look
- // like we have a match between expected and actual number of
- // arguments.
- definitely_matches = true;
- } else {
- *definitely_mismatches = true;
- mov(ebx, expected.immediate());
- }
- }
- } else {
- if (actual.is_immediate()) {
- // Expected is in register, actual is immediate. This is the
- // case when we invoke function values without going through the
- // IC mechanism.
- mov(eax, actual.immediate());
- cmp(expected.reg(), actual.immediate());
- j(equal, &invoke);
- DCHECK(expected.reg().is(ebx));
- } else if (!expected.reg().is(actual.reg())) {
- // Both expected and actual are in (different) registers. This
- // is the case when we invoke functions using call and apply.
- cmp(expected.reg(), actual.reg());
- j(equal, &invoke);
- DCHECK(actual.reg().is(eax));
- DCHECK(expected.reg().is(ebx));
- } else {
- Move(eax, actual.reg());
- }
- }
-
- if (!definitely_matches) {
- Handle<Code> adaptor =
- isolate()->builtins()->ArgumentsAdaptorTrampoline();
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
- call(adaptor, RelocInfo::CODE_TARGET);
- call_wrapper.AfterCall();
- if (!*definitely_mismatches) {
- jmp(done, done_near);
- }
- } else {
- jmp(adaptor, RelocInfo::CODE_TARGET);
- }
- bind(&invoke);
- }
-}
-
-void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual) {
- Label skip_hook;
- ExternalReference debug_hook_active =
- ExternalReference::debug_hook_on_function_call_address(isolate());
- cmpb(Operand::StaticVariable(debug_hook_active), Immediate(0));
- j(equal, &skip_hook);
- {
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
- if (expected.is_reg()) {
- SmiTag(expected.reg());
- Push(expected.reg());
- }
- if (actual.is_reg()) {
- SmiTag(actual.reg());
- Push(actual.reg());
- }
- if (new_target.is_valid()) {
- Push(new_target);
- }
- Push(fun);
- Push(fun);
- CallRuntime(Runtime::kDebugOnFunctionCall);
- Pop(fun);
- if (new_target.is_valid()) {
- Pop(new_target);
- }
- if (actual.is_reg()) {
- Pop(actual.reg());
- SmiUntag(actual.reg());
- }
- if (expected.is_reg()) {
- Pop(expected.reg());
- SmiUntag(expected.reg());
- }
- }
- bind(&skip_hook);
-}
-
-
-void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- // You can't call a function without a valid frame.
- DCHECK(flag == JUMP_FUNCTION || has_frame());
- DCHECK(function.is(edi));
- DCHECK_IMPLIES(new_target.is_valid(), new_target.is(edx));
-
- if (call_wrapper.NeedsDebugHookCheck()) {
- CheckDebugHook(function, new_target, expected, actual);
- }
-
- // Clear the new.target register if not given.
- if (!new_target.is_valid()) {
- mov(edx, isolate()->factory()->undefined_value());
- }
-
- Label done;
- bool definitely_mismatches = false;
- InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
- Label::kNear, call_wrapper);
- if (!definitely_mismatches) {
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- Operand code = FieldOperand(function, JSFunction::kCodeEntryOffset);
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(code));
- call(code);
- call_wrapper.AfterCall();
- } else {
- DCHECK(flag == JUMP_FUNCTION);
- jmp(code);
- }
- bind(&done);
- }
-}
-
-
-void MacroAssembler::InvokeFunction(Register fun, Register new_target,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- // You can't call a function without a valid frame.
- DCHECK(flag == JUMP_FUNCTION || has_frame());
-
- DCHECK(fun.is(edi));
- mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kFormalParameterCountOffset));
- SmiUntag(ebx);
-
- ParameterCount expected(ebx);
- InvokeFunctionCode(edi, new_target, expected, actual, flag, call_wrapper);
-}
-
-
-void MacroAssembler::InvokeFunction(Register fun,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- // You can't call a function without a valid frame.
- DCHECK(flag == JUMP_FUNCTION || has_frame());
-
- DCHECK(fun.is(edi));
- mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- InvokeFunctionCode(edi, no_reg, expected, actual, flag, call_wrapper);
-}
-
-
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- LoadHeapObject(edi, function);
- InvokeFunction(edi, expected, actual, flag, call_wrapper);
-}
-
-
-void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
- if (context_chain_length > 0) {
- // Move up the chain of contexts to the context containing the slot.
- mov(dst, Operand(esi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
- for (int i = 1; i < context_chain_length; i++) {
- mov(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
- }
- } else {
- // Slot is in the current function context. Move it into the
- // destination register in case we store into it (the write barrier
- // cannot be allowed to destroy the context in esi).
- mov(dst, esi);
- }
-
- // We should not have found a with context by walking the context chain
- // (i.e., the static scope chain and runtime context chain do not agree).
- // A variable occurring in such a scope should have slot type LOOKUP and
- // not CONTEXT.
- if (emit_debug_code()) {
- cmp(FieldOperand(dst, HeapObject::kMapOffset),
- isolate()->factory()->with_context_map());
- Check(not_equal, kVariableResolvedToWithContext);
- }
-}
-
-
-void MacroAssembler::LoadGlobalProxy(Register dst) {
- mov(dst, NativeContextOperand());
- mov(dst, ContextOperand(dst, Context::GLOBAL_PROXY_INDEX));
-}
-
-void MacroAssembler::LoadGlobalFunction(int index, Register function) {
- // Load the native context from the current context.
- mov(function, NativeContextOperand());
- // Load the function from the native context.
- mov(function, ContextOperand(function, index));
-}
-
-
-void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
- Register map) {
- // Load the initial map. The global functions all have initial maps.
- mov(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- if (emit_debug_code()) {
- Label ok, fail;
- CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
- jmp(&ok);
- bind(&fail);
- Abort(kGlobalFunctionsMustHaveInitialMap);
- bind(&ok);
- }
-}
-
-
-// Store the value in register src in the safepoint register stack
-// slot for register dst.
-void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
- mov(SafepointRegisterSlot(dst), src);
-}
-
-
-void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Immediate src) {
- mov(SafepointRegisterSlot(dst), src);
-}
-
-
-void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
- mov(dst, SafepointRegisterSlot(src));
-}
-
-
-Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
- return Operand(esp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
-}
-
-
-int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
- // The registers are pushed starting with the lowest encoding,
- // which means that lowest encodings are furthest away from
- // the stack pointer.
- DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
- return kNumSafepointRegisters - reg_code - 1;
-}
-
-
-void MacroAssembler::LoadHeapObject(Register result,
- Handle<HeapObject> object) {
- mov(result, object);
-}
-
-
-void MacroAssembler::CmpHeapObject(Register reg, Handle<HeapObject> object) {
- cmp(reg, object);
-}
-
-void MacroAssembler::PushHeapObject(Handle<HeapObject> object) { Push(object); }
-
-void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
- mov(value, cell);
- mov(value, FieldOperand(value, WeakCell::kValueOffset));
-}
-
-
-void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
- Label* miss) {
- GetWeakValue(value, cell);
- JumpIfSmi(value, miss);
-}
-
-
-void MacroAssembler::Ret() {
- ret(0);
-}
-
-
-void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
- if (is_uint16(bytes_dropped)) {
- ret(bytes_dropped);
- } else {
- pop(scratch);
- add(esp, Immediate(bytes_dropped));
- push(scratch);
- ret(0);
- }
-}
-
-
-void MacroAssembler::VerifyX87StackDepth(uint32_t depth) {
- // Turn off the stack depth check when serializer is enabled to reduce the
- // code size.
- if (serializer_enabled()) return;
- // Make sure the floating point stack is either empty or has depth items.
- DCHECK(depth <= 7);
- // This is very expensive.
- DCHECK(FLAG_debug_code && FLAG_enable_slow_asserts);
-
- // The top-of-stack (tos) is 7 if there is one item pushed.
- int tos = (8 - depth) % 8;
- const int kTopMask = 0x3800;
- push(eax);
- fwait();
- fnstsw_ax();
- and_(eax, kTopMask);
- shr(eax, 11);
- cmp(eax, Immediate(tos));
- Check(equal, kUnexpectedFPUStackDepthAfterInstruction);
- fnclex();
- pop(eax);
-}
-
-
-void MacroAssembler::Drop(int stack_elements) {
- if (stack_elements > 0) {
- add(esp, Immediate(stack_elements * kPointerSize));
- }
-}
-
-
-void MacroAssembler::Move(Register dst, Register src) {
- if (!dst.is(src)) {
- mov(dst, src);
- }
-}
-
-
-void MacroAssembler::Move(Register dst, const Immediate& x) {
- if (x.is_zero() && RelocInfo::IsNone(x.rmode_)) {
- xor_(dst, dst); // Shorter than mov of 32-bit immediate 0.
- } else {
- mov(dst, x);
- }
-}
-
-
-void MacroAssembler::Move(const Operand& dst, const Immediate& x) {
- mov(dst, x);
-}
-
-
-void MacroAssembler::Lzcnt(Register dst, const Operand& src) {
- // TODO(intel): Add support for LZCNT (with ABM/BMI1).
- Label not_zero_src;
- bsr(dst, src);
- j(not_zero, &not_zero_src, Label::kNear);
- Move(dst, Immediate(63)); // 63^31 == 32
- bind(&not_zero_src);
- xor_(dst, Immediate(31)); // for x in [0..31], 31^x == 31-x.
-}
-
-
-void MacroAssembler::Tzcnt(Register dst, const Operand& src) {
- // TODO(intel): Add support for TZCNT (with ABM/BMI1).
- Label not_zero_src;
- bsf(dst, src);
- j(not_zero, &not_zero_src, Label::kNear);
- Move(dst, Immediate(32)); // The result of tzcnt is 32 if src = 0.
- bind(&not_zero_src);
-}
-
-
-void MacroAssembler::Popcnt(Register dst, const Operand& src) {
- // TODO(intel): Add support for POPCNT (with POPCNT)
- // if (CpuFeatures::IsSupported(POPCNT)) {
- // CpuFeatureScope scope(this, POPCNT);
- // popcnt(dst, src);
- // return;
- // }
- UNREACHABLE();
-}
-
-
-void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
- if (FLAG_native_code_counters && counter->Enabled()) {
- mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
- }
-}
-
-
-void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
- DCHECK(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- Operand operand = Operand::StaticVariable(ExternalReference(counter));
- if (value == 1) {
- inc(operand);
- } else {
- add(operand, Immediate(value));
- }
- }
-}
-
-
-void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
- DCHECK(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- Operand operand = Operand::StaticVariable(ExternalReference(counter));
- if (value == 1) {
- dec(operand);
- } else {
- sub(operand, Immediate(value));
- }
- }
-}
-
-
-void MacroAssembler::IncrementCounter(Condition cc,
- StatsCounter* counter,
- int value) {
- DCHECK(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- Label skip;
- j(NegateCondition(cc), &skip);
- pushfd();
- IncrementCounter(counter, value);
- popfd();
- bind(&skip);
- }
-}
-
-
-void MacroAssembler::DecrementCounter(Condition cc,
- StatsCounter* counter,
- int value) {
- DCHECK(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- Label skip;
- j(NegateCondition(cc), &skip);
- pushfd();
- DecrementCounter(counter, value);
- popfd();
- bind(&skip);
- }
-}
-
-
-void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
- if (emit_debug_code()) Check(cc, reason);
-}
-
-
-
-void MacroAssembler::Check(Condition cc, BailoutReason reason) {
- Label L;
- j(cc, &L);
- Abort(reason);
- // will not return here
- bind(&L);
-}
-
-
-void MacroAssembler::CheckStackAlignment() {
- int frame_alignment = base::OS::ActivationFrameAlignment();
- int frame_alignment_mask = frame_alignment - 1;
- if (frame_alignment > kPointerSize) {
- DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
- Label alignment_as_expected;
- test(esp, Immediate(frame_alignment_mask));
- j(zero, &alignment_as_expected);
- // Abort if stack is not aligned.
- int3();
- bind(&alignment_as_expected);
- }
-}
-
-
-void MacroAssembler::Abort(BailoutReason reason) {
-#ifdef DEBUG
- const char* msg = GetBailoutReason(reason);
- if (msg != NULL) {
- RecordComment("Abort message: ");
- RecordComment(msg);
- }
-
- if (FLAG_trap_on_abort) {
- int3();
- return;
- }
-#endif
-
- // Check if Abort() has already been initialized.
- DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
-
- Move(edx, Smi::FromInt(static_cast<int>(reason)));
-
- // Disable stub call restrictions to always allow calls to abort.
- if (!has_frame_) {
- // We don't actually want to generate a pile of code for this, so just
- // claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
- Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
- } else {
- Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
- }
- // will not return here
- int3();
-}
-
-
-void MacroAssembler::LoadInstanceDescriptors(Register map,
- Register descriptors) {
- mov(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
-}
-
-
-void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
- mov(dst, FieldOperand(map, Map::kBitField3Offset));
- DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
-}
-
-
-void MacroAssembler::LoadAccessor(Register dst, Register holder,
- int accessor_index,
- AccessorComponent accessor) {
- mov(dst, FieldOperand(holder, HeapObject::kMapOffset));
- LoadInstanceDescriptors(dst, dst);
- mov(dst, FieldOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
- int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
- : AccessorPair::kSetterOffset;
- mov(dst, FieldOperand(dst, offset));
-}
-
-void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register object1,
- Register object2,
- Register scratch1,
- Register scratch2,
- Label* failure) {
- // Check that both objects are not smis.
- STATIC_ASSERT(kSmiTag == 0);
- mov(scratch1, object1);
- and_(scratch1, object2);
- JumpIfSmi(scratch1, failure);
-
- // Load instance type for both strings.
- mov(scratch1, FieldOperand(object1, HeapObject::kMapOffset));
- mov(scratch2, FieldOperand(object2, HeapObject::kMapOffset));
- movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
- movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
-
- // Check that both are flat one-byte strings.
- const int kFlatOneByteStringMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatOneByteStringTag =
- kStringTag | kOneByteStringTag | kSeqStringTag;
- // Interleave bits from both instance types and compare them in one check.
- const int kShift = 8;
- DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << kShift));
- and_(scratch1, kFlatOneByteStringMask);
- and_(scratch2, kFlatOneByteStringMask);
- shl(scratch2, kShift);
- or_(scratch1, scratch2);
- cmp(scratch1, kFlatOneByteStringTag | (kFlatOneByteStringTag << kShift));
- j(not_equal, failure);
-}
-
-
-void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand,
- Label* not_unique_name,
- Label::Distance distance) {
- STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
- Label succeed;
- test(operand, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
- j(zero, &succeed);
- cmpb(operand, Immediate(SYMBOL_TYPE));
- j(not_equal, not_unique_name, distance);
-
- bind(&succeed);
-}
-
-
-void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
- Register index,
- Register value,
- uint32_t encoding_mask) {
- Label is_object;
- JumpIfNotSmi(string, &is_object, Label::kNear);
- Abort(kNonObject);
- bind(&is_object);
-
- push(value);
- mov(value, FieldOperand(string, HeapObject::kMapOffset));
- movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
-
- and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
- cmp(value, Immediate(encoding_mask));
- pop(value);
- Check(equal, kUnexpectedStringType);
-
- // The index is assumed to be untagged coming in, tag it to compare with the
- // string length without using a temp register, it is restored at the end of
- // this function.
- SmiTag(index);
- Check(no_overflow, kIndexIsTooLarge);
-
- cmp(index, FieldOperand(string, String::kLengthOffset));
- Check(less, kIndexIsTooLarge);
-
- cmp(index, Immediate(Smi::kZero));
- Check(greater_equal, kIndexIsNegative);
-
- // Restore the index
- SmiUntag(index);
-}
-
-
-void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
- int frame_alignment = base::OS::ActivationFrameAlignment();
- if (frame_alignment != 0) {
- // Make stack end at alignment and make room for num_arguments words
- // and the original value of esp.
- mov(scratch, esp);
- sub(esp, Immediate((num_arguments + 1) * kPointerSize));
- DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
- and_(esp, -frame_alignment);
- mov(Operand(esp, num_arguments * kPointerSize), scratch);
- } else {
- sub(esp, Immediate(num_arguments * kPointerSize));
- }
-}
-
-
-void MacroAssembler::CallCFunction(ExternalReference function,
- int num_arguments) {
- // Trashing eax is ok as it will be the return value.
- mov(eax, Immediate(function));
- CallCFunction(eax, num_arguments);
-}
-
-
-void MacroAssembler::CallCFunction(Register function,
- int num_arguments) {
- DCHECK(has_frame());
- // Check stack alignment.
- if (emit_debug_code()) {
- CheckStackAlignment();
- }
-
- call(function);
- if (base::OS::ActivationFrameAlignment() != 0) {
- mov(esp, Operand(esp, num_arguments * kPointerSize));
- } else {
- add(esp, Immediate(num_arguments * kPointerSize));
- }
-}
-
-
-#ifdef DEBUG
-bool AreAliased(Register reg1,
- Register reg2,
- Register reg3,
- Register reg4,
- Register reg5,
- Register reg6,
- Register reg7,
- Register reg8) {
- int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
- reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
- reg7.is_valid() + reg8.is_valid();
-
- RegList regs = 0;
- if (reg1.is_valid()) regs |= reg1.bit();
- if (reg2.is_valid()) regs |= reg2.bit();
- if (reg3.is_valid()) regs |= reg3.bit();
- if (reg4.is_valid()) regs |= reg4.bit();
- if (reg5.is_valid()) regs |= reg5.bit();
- if (reg6.is_valid()) regs |= reg6.bit();
- if (reg7.is_valid()) regs |= reg7.bit();
- if (reg8.is_valid()) regs |= reg8.bit();
- int n_of_non_aliasing_regs = NumRegs(regs);
-
- return n_of_valid_regs != n_of_non_aliasing_regs;
-}
-#endif
-
-
-CodePatcher::CodePatcher(Isolate* isolate, byte* address, int size)
- : address_(address),
- size_(size),
- masm_(isolate, address, size + Assembler::kGap, CodeObjectRequired::kNo) {
- // Create a new macro assembler pointing to the address of the code to patch.
- // The size is adjusted with kGap on order for the assembler to generate size
- // bytes of instructions without failing with buffer size constraints.
- DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
-
-
-CodePatcher::~CodePatcher() {
- // Indicate that code has changed.
- Assembler::FlushICache(masm_.isolate(), address_, size_);
-
- // Check that the code was patched as expected.
- DCHECK(masm_.pc_ == address_ + size_);
- DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
-
-
-void MacroAssembler::CheckPageFlag(
- Register object,
- Register scratch,
- int mask,
- Condition cc,
- Label* condition_met,
- Label::Distance condition_met_distance) {
- DCHECK(cc == zero || cc == not_zero);
- if (scratch.is(object)) {
- and_(scratch, Immediate(~Page::kPageAlignmentMask));
- } else {
- mov(scratch, Immediate(~Page::kPageAlignmentMask));
- and_(scratch, object);
- }
- if (mask < (1 << kBitsPerByte)) {
- test_b(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
- } else {
- test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
- }
- j(cc, condition_met, condition_met_distance);
-}
-
-
-void MacroAssembler::CheckPageFlagForMap(
- Handle<Map> map,
- int mask,
- Condition cc,
- Label* condition_met,
- Label::Distance condition_met_distance) {
- DCHECK(cc == zero || cc == not_zero);
- Page* page = Page::FromAddress(map->address());
- DCHECK(!serializer_enabled()); // Serializer cannot match page_flags.
- ExternalReference reference(ExternalReference::page_flags(page));
- // The inlined static address check of the page's flags relies
- // on maps never being compacted.
- DCHECK(!isolate()->heap()->mark_compact_collector()->
- IsOnEvacuationCandidate(*map));
- if (mask < (1 << kBitsPerByte)) {
- test_b(Operand::StaticVariable(reference), Immediate(mask));
- } else {
- test(Operand::StaticVariable(reference), Immediate(mask));
- }
- j(cc, condition_met, condition_met_distance);
-}
-
-
-void MacroAssembler::JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* on_black,
- Label::Distance on_black_near) {
- HasColor(object, scratch0, scratch1, on_black, on_black_near, 1,
- 1); // kBlackBitPattern.
- DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
-}
-
-
-void MacroAssembler::HasColor(Register object,
- Register bitmap_scratch,
- Register mask_scratch,
- Label* has_color,
- Label::Distance has_color_distance,
- int first_bit,
- int second_bit) {
- DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, ecx));
-
- GetMarkBits(object, bitmap_scratch, mask_scratch);
-
- Label other_color, word_boundary;
- test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- j(first_bit == 1 ? zero : not_zero, &other_color, Label::kNear);
- add(mask_scratch, mask_scratch); // Shift left 1 by adding.
- j(zero, &word_boundary, Label::kNear);
- test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
- jmp(&other_color, Label::kNear);
-
- bind(&word_boundary);
- test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize),
- Immediate(1));
-
- j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
- bind(&other_color);
-}
-
-
-void MacroAssembler::GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register mask_reg) {
- DCHECK(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx));
- mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
- and_(bitmap_reg, addr_reg);
- mov(ecx, addr_reg);
- int shift =
- Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
- shr(ecx, shift);
- and_(ecx,
- (Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1));
-
- add(bitmap_reg, ecx);
- mov(ecx, addr_reg);
- shr(ecx, kPointerSizeLog2);
- and_(ecx, (1 << Bitmap::kBitsPerCellLog2) - 1);
- mov(mask_reg, Immediate(1));
- shl_cl(mask_reg);
-}
-
-
-void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
- Register mask_scratch, Label* value_is_white,
- Label::Distance distance) {
- DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
- GetMarkBits(value, bitmap_scratch, mask_scratch);
-
- // If the value is black or grey we don't need to do anything.
- DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
- // Since both black and grey have a 1 in the first position and white does
- // not have a 1 there we only need to check one bit.
- test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- j(zero, value_is_white, Label::kNear);
-}
-
-
-void MacroAssembler::EnumLength(Register dst, Register map) {
- STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
- mov(dst, FieldOperand(map, Map::kBitField3Offset));
- and_(dst, Immediate(Map::EnumLengthBits::kMask));
- SmiTag(dst);
-}
-
-
-void MacroAssembler::CheckEnumCache(Label* call_runtime) {
- Label next, start;
- mov(ecx, eax);
-
- // Check if the enum length field is properly initialized, indicating that
- // there is an enum cache.
- mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
-
- EnumLength(edx, ebx);
- cmp(edx, Immediate(Smi::FromInt(kInvalidEnumCacheSentinel)));
- j(equal, call_runtime);
-
- jmp(&start);
-
- bind(&next);
- mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
-
- // For all objects but the receiver, check that the cache is empty.
- EnumLength(edx, ebx);
- cmp(edx, Immediate(Smi::kZero));
- j(not_equal, call_runtime);
-
- bind(&start);
-
- // Check that there are no elements. Register rcx contains the current JS
- // object we've reached through the prototype chain.
- Label no_elements;
- mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
- cmp(ecx, isolate()->factory()->empty_fixed_array());
- j(equal, &no_elements);
-
- // Second chance, the object may be using the empty slow element dictionary.
- cmp(ecx, isolate()->factory()->empty_slow_element_dictionary());
- j(not_equal, call_runtime);
-
- bind(&no_elements);
- mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
- cmp(ecx, isolate()->factory()->null_value());
- j(not_equal, &next);
-}
-
-
-void MacroAssembler::TestJSArrayForAllocationMemento(
- Register receiver_reg,
- Register scratch_reg,
- Label* no_memento_found) {
- Label map_check;
- Label top_check;
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
- const int kMementoLastWordOffset =
- kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
-
- // Bail out if the object is not in new space.
- JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
- // If the object is in new space, we need to check whether it is on the same
- // page as the current top.
- lea(scratch_reg, Operand(receiver_reg, kMementoLastWordOffset));
- xor_(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
- test(scratch_reg, Immediate(~Page::kPageAlignmentMask));
- j(zero, &top_check);
- // The object is on a different page than allocation top. Bail out if the
- // object sits on the page boundary as no memento can follow and we cannot
- // touch the memory following it.
- lea(scratch_reg, Operand(receiver_reg, kMementoLastWordOffset));
- xor_(scratch_reg, receiver_reg);
- test(scratch_reg, Immediate(~Page::kPageAlignmentMask));
- j(not_zero, no_memento_found);
- // Continue with the actual map check.
- jmp(&map_check);
- // If top is on the same page as the current object, we need to check whether
- // we are below top.
- bind(&top_check);
- lea(scratch_reg, Operand(receiver_reg, kMementoLastWordOffset));
- cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
- j(greater_equal, no_memento_found);
- // Memento map check.
- bind(&map_check);
- mov(scratch_reg, Operand(receiver_reg, kMementoMapOffset));
- cmp(scratch_reg, Immediate(isolate()->factory()->allocation_memento_map()));
-}
-
-void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
- DCHECK(!dividend.is(eax));
- DCHECK(!dividend.is(edx));
- base::MagicNumbersForDivision<uint32_t> mag =
- base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
- mov(eax, Immediate(mag.multiplier));
- imul(dividend);
- bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
- if (divisor > 0 && neg) add(edx, dividend);
- if (divisor < 0 && !neg && mag.multiplier > 0) sub(edx, dividend);
- if (mag.shift > 0) sar(edx, mag.shift);
- mov(eax, dividend);
- shr(eax, 31);
- add(edx, eax);
-}
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/x87/macro-assembler-x87.h b/deps/v8/src/x87/macro-assembler-x87.h
deleted file mode 100644
index 63c8dad194..0000000000
--- a/deps/v8/src/x87/macro-assembler-x87.h
+++ /dev/null
@@ -1,914 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_X87_MACRO_ASSEMBLER_X87_H_
-#define V8_X87_MACRO_ASSEMBLER_X87_H_
-
-#include "src/assembler.h"
-#include "src/bailout-reason.h"
-#include "src/frames.h"
-#include "src/globals.h"
-
-namespace v8 {
-namespace internal {
-
-// Give alias names to registers for calling conventions.
-const Register kReturnRegister0 = {Register::kCode_eax};
-const Register kReturnRegister1 = {Register::kCode_edx};
-const Register kReturnRegister2 = {Register::kCode_edi};
-const Register kJSFunctionRegister = {Register::kCode_edi};
-const Register kContextRegister = {Register::kCode_esi};
-const Register kAllocateSizeRegister = {Register::kCode_edx};
-const Register kInterpreterAccumulatorRegister = {Register::kCode_eax};
-const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_ecx};
-const Register kInterpreterBytecodeArrayRegister = {Register::kCode_edi};
-const Register kInterpreterDispatchTableRegister = {Register::kCode_esi};
-const Register kJavaScriptCallArgCountRegister = {Register::kCode_eax};
-const Register kJavaScriptCallNewTargetRegister = {Register::kCode_edx};
-const Register kRuntimeCallFunctionRegister = {Register::kCode_ebx};
-const Register kRuntimeCallArgCountRegister = {Register::kCode_eax};
-
-// Spill slots used by interpreter dispatch calling convention.
-const int kInterpreterDispatchTableSpillSlot = -1;
-
-// Convenience for platform-independent signatures. We do not normally
-// distinguish memory operands from other operands on ia32.
-typedef Operand MemOperand;
-
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
-enum PointersToHereCheck {
- kPointersToHereMaybeInteresting,
- kPointersToHereAreAlwaysInteresting
-};
-
-enum RegisterValueType { REGISTER_VALUE_IS_SMI, REGISTER_VALUE_IS_INT32 };
-
-enum class ReturnAddressState { kOnStack, kNotOnStack };
-
-#ifdef DEBUG
-bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
- Register reg4 = no_reg, Register reg5 = no_reg,
- Register reg6 = no_reg, Register reg7 = no_reg,
- Register reg8 = no_reg);
-#endif
-
-// MacroAssembler implements a collection of frequently used macros.
-class MacroAssembler: public Assembler {
- public:
- MacroAssembler(Isolate* isolate, void* buffer, int size,
- CodeObjectRequired create_code_object);
-
- Isolate* isolate() const { return isolate_; }
-
- void Load(Register dst, const Operand& src, Representation r);
- void Store(Register src, const Operand& dst, Representation r);
-
- // Load a register with a long value as efficiently as possible.
- void Set(Register dst, int32_t x) {
- if (x == 0) {
- xor_(dst, dst);
- } else {
- mov(dst, Immediate(x));
- }
- }
- void Set(const Operand& dst, int32_t x) { mov(dst, Immediate(x)); }
-
- // Operations on roots in the root-array.
- void LoadRoot(Register destination, Heap::RootListIndex index);
- void StoreRoot(Register source, Register scratch, Heap::RootListIndex index);
- void CompareRoot(Register with, Register scratch, Heap::RootListIndex index);
- // These methods can only be used with constant roots (i.e. non-writable
- // and not in new space).
- void CompareRoot(Register with, Heap::RootListIndex index);
- void CompareRoot(const Operand& with, Heap::RootListIndex index);
- void PushRoot(Heap::RootListIndex index);
-
- // Compare the object in a register to a value and jump if they are equal.
- void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal,
- Label::Distance if_equal_distance = Label::kFar) {
- CompareRoot(with, index);
- j(equal, if_equal, if_equal_distance);
- }
- void JumpIfRoot(const Operand& with, Heap::RootListIndex index,
- Label* if_equal,
- Label::Distance if_equal_distance = Label::kFar) {
- CompareRoot(with, index);
- j(equal, if_equal, if_equal_distance);
- }
-
- // Compare the object in a register to a value and jump if they are not equal.
- void JumpIfNotRoot(Register with, Heap::RootListIndex index,
- Label* if_not_equal,
- Label::Distance if_not_equal_distance = Label::kFar) {
- CompareRoot(with, index);
- j(not_equal, if_not_equal, if_not_equal_distance);
- }
- void JumpIfNotRoot(const Operand& with, Heap::RootListIndex index,
- Label* if_not_equal,
- Label::Distance if_not_equal_distance = Label::kFar) {
- CompareRoot(with, index);
- j(not_equal, if_not_equal, if_not_equal_distance);
- }
-
- // These functions do not arrange the registers in any particular order so
- // they are not useful for calls that can cause a GC. The caller can
- // exclude up to 3 registers that do not need to be saved and restored.
- void PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
- Register exclusion2 = no_reg,
- Register exclusion3 = no_reg);
- void PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
- Register exclusion2 = no_reg,
- Register exclusion3 = no_reg);
-
- // ---------------------------------------------------------------------------
- // GC Support
- enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
-
- // Record in the remembered set the fact that we have a pointer to new space
- // at the address pointed to by the addr register. Only works if addr is not
- // in new space.
- void RememberedSetHelper(Register object, // Used for debug code.
- Register addr, Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetFinalAction and_then);
-
- void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
- Label* condition_met,
- Label::Distance condition_met_distance = Label::kFar);
-
- void CheckPageFlagForMap(
- Handle<Map> map, int mask, Condition cc, Label* condition_met,
- Label::Distance condition_met_distance = Label::kFar);
-
- // Check if object is in new space. Jumps if the object is not in new space.
- // The register scratch can be object itself, but scratch will be clobbered.
- void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch,
- Label::Distance distance = Label::kFar) {
- InNewSpace(object, scratch, zero, branch, distance);
- }
-
- // Check if object is in new space. Jumps if the object is in new space.
- // The register scratch can be object itself, but it will be clobbered.
- void JumpIfInNewSpace(Register object, Register scratch, Label* branch,
- Label::Distance distance = Label::kFar) {
- InNewSpace(object, scratch, not_zero, branch, distance);
- }
-
- // Check if an object has a given incremental marking color. Also uses ecx!
- void HasColor(Register object, Register scratch0, Register scratch1,
- Label* has_color, Label::Distance has_color_distance,
- int first_bit, int second_bit);
-
- void JumpIfBlack(Register object, Register scratch0, Register scratch1,
- Label* on_black,
- Label::Distance on_black_distance = Label::kFar);
-
- // Checks the color of an object. If the object is white we jump to the
- // incremental marker.
- void JumpIfWhite(Register value, Register scratch1, Register scratch2,
- Label* value_is_white, Label::Distance distance);
-
- // Notify the garbage collector that we wrote a pointer into an object.
- // |object| is the object being stored into, |value| is the object being
- // stored. value and scratch registers are clobbered by the operation.
- // The offset is the offset from the start of the object, not the offset from
- // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
- void RecordWriteField(
- Register object, int offset, Register value, Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK,
- PointersToHereCheck pointers_to_here_check_for_value =
- kPointersToHereMaybeInteresting);
-
- // As above, but the offset has the tag presubtracted. For use with
- // Operand(reg, off).
- void RecordWriteContextSlot(
- Register context, int offset, Register value, Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK,
- PointersToHereCheck pointers_to_here_check_for_value =
- kPointersToHereMaybeInteresting) {
- RecordWriteField(context, offset + kHeapObjectTag, value, scratch, save_fp,
- remembered_set_action, smi_check,
- pointers_to_here_check_for_value);
- }
-
- // For page containing |object| mark region covering |address|
- // dirty. |object| is the object being stored into, |value| is the
- // object being stored. The address and value registers are clobbered by the
- // operation. RecordWrite filters out smis so it does not update the
- // write barrier if the value is a smi.
- void RecordWrite(
- Register object, Register address, Register value, SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK,
- PointersToHereCheck pointers_to_here_check_for_value =
- kPointersToHereMaybeInteresting);
-
- // Notify the garbage collector that we wrote a code entry into a
- // JSFunction. Only scratch is clobbered by the operation.
- void RecordWriteCodeEntryField(Register js_function, Register code_entry,
- Register scratch);
-
- // For page containing |object| mark the region covering the object's map
- // dirty. |object| is the object being stored into, |map| is the Map object
- // that was stored.
- void RecordWriteForMap(Register object, Handle<Map> map, Register scratch1,
- Register scratch2, SaveFPRegsMode save_fp);
-
- // ---------------------------------------------------------------------------
- // Debugger Support
-
- void DebugBreak();
-
- // Generates function and stub prologue code.
- void StubPrologue(StackFrame::Type type);
- void Prologue(bool code_pre_aging);
-
- // Enter specific kind of exit frame. Expects the number of
- // arguments in register eax and sets up the number of arguments in
- // register edi and the pointer to the first argument in register
- // esi.
- void EnterExitFrame(int argc, bool save_doubles, StackFrame::Type frame_type);
-
- void EnterApiExitFrame(int argc);
-
- // Leave the current exit frame. Expects the return value in
- // register eax:edx (untouched) and the pointer to the first
- // argument in register esi (if pop_arguments == true).
- void LeaveExitFrame(bool save_doubles, bool pop_arguments = true);
-
- // Leave the current exit frame. Expects the return value in
- // register eax (untouched).
- void LeaveApiExitFrame(bool restore_context);
-
- // Find the function context up the context chain.
- void LoadContext(Register dst, int context_chain_length);
-
- // Load the global proxy from the current context.
- void LoadGlobalProxy(Register dst);
-
- // Load the global function with the given index.
- void LoadGlobalFunction(int index, Register function);
-
- // Load the initial map from the global function. The registers
- // function and map can be the same.
- void LoadGlobalFunctionInitialMap(Register function, Register map);
-
- // Push and pop the registers that can hold pointers.
- void PushSafepointRegisters() { pushad(); }
- void PopSafepointRegisters() { popad(); }
- // Store the value in register/immediate src in the safepoint
- // register stack slot for register dst.
- void StoreToSafepointRegisterSlot(Register dst, Register src);
- void StoreToSafepointRegisterSlot(Register dst, Immediate src);
- void LoadFromSafepointRegisterSlot(Register dst, Register src);
-
- // Nop, because x87 does not have a root register.
- void InitializeRootRegister() {}
-
- void LoadHeapObject(Register result, Handle<HeapObject> object);
- void CmpHeapObject(Register reg, Handle<HeapObject> object);
- void PushHeapObject(Handle<HeapObject> object);
-
- void LoadObject(Register result, Handle<Object> object) {
- AllowDeferredHandleDereference heap_object_check;
- if (object->IsHeapObject()) {
- LoadHeapObject(result, Handle<HeapObject>::cast(object));
- } else {
- Move(result, Immediate(object));
- }
- }
-
- void CmpObject(Register reg, Handle<Object> object) {
- AllowDeferredHandleDereference heap_object_check;
- if (object->IsHeapObject()) {
- CmpHeapObject(reg, Handle<HeapObject>::cast(object));
- } else {
- cmp(reg, Immediate(object));
- }
- }
-
- void GetWeakValue(Register value, Handle<WeakCell> cell);
- void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
-
- // ---------------------------------------------------------------------------
- // JavaScript invokes
-
- // Removes current frame and its arguments from the stack preserving
- // the arguments and a return address pushed to the stack for the next call.
- // |ra_state| defines whether return address is already pushed to stack or
- // not. Both |callee_args_count| and |caller_args_count_reg| do not include
- // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
- // is trashed. |number_of_temp_values_after_return_address| specifies
- // the number of words pushed to the stack after the return address. This is
- // to allow "allocation" of scratch registers that this function requires
- // by saving their values on the stack.
- void PrepareForTailCall(const ParameterCount& callee_args_count,
- Register caller_args_count_reg, Register scratch0,
- Register scratch1, ReturnAddressState ra_state,
- int number_of_temp_values_after_return_address);
-
- // Invoke the JavaScript function code by either calling or jumping.
-
- void InvokeFunctionCode(Register function, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual, InvokeFlag flag,
- const CallWrapper& call_wrapper);
-
- // On function call, call into the debugger if necessary.
- void CheckDebugHook(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual);
-
- // Invoke the JavaScript function in the given register. Changes the
- // current context to the context in the function before invoking.
- void InvokeFunction(Register function, Register new_target,
- const ParameterCount& actual, InvokeFlag flag,
- const CallWrapper& call_wrapper);
-
- void InvokeFunction(Register function, const ParameterCount& expected,
- const ParameterCount& actual, InvokeFlag flag,
- const CallWrapper& call_wrapper);
-
- void InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual, InvokeFlag flag,
- const CallWrapper& call_wrapper);
-
- void ShlPair(Register high, Register low, uint8_t imm8);
- void ShlPair_cl(Register high, Register low);
- void ShrPair(Register high, Register low, uint8_t imm8);
- void ShrPair_cl(Register high, Register src);
- void SarPair(Register high, Register low, uint8_t imm8);
- void SarPair_cl(Register high, Register low);
-
- // Expression support
- // Support for constant splitting.
- bool IsUnsafeImmediate(const Immediate& x);
- void SafeMove(Register dst, const Immediate& x);
- void SafePush(const Immediate& x);
-
- // Compare object type for heap object.
- // Incoming register is heap_object and outgoing register is map.
- void CmpObjectType(Register heap_object, InstanceType type, Register map);
-
- // Compare instance type for map.
- void CmpInstanceType(Register map, InstanceType type);
-
- // Compare an object's map with the specified map.
- void CompareMap(Register obj, Handle<Map> map);
-
- // Check if the map of an object is equal to a specified map and branch to
- // label if not. Skip the smi check if not required (object is known to be a
- // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
- // against maps that are ElementsKind transition maps of the specified map.
- void CheckMap(Register obj, Handle<Map> map, Label* fail,
- SmiCheckType smi_check_type);
-
- // Check if the object in register heap_object is a string. Afterwards the
- // register map contains the object map and the register instance_type
- // contains the instance_type. The registers map and instance_type can be the
- // same in which case it contains the instance type afterwards. Either of the
- // registers map and instance_type can be the same as heap_object.
- Condition IsObjectStringType(Register heap_object, Register map,
- Register instance_type);
-
- // FCmp is similar to integer cmp, but requires unsigned
- // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
- void FCmp();
- void FXamMinusZero();
- void FXamSign();
- void X87CheckIA();
- void X87SetRC(int rc);
- void X87SetFPUCW(int cw);
-
- void ClampUint8(Register reg);
- void ClampTOSToUint8(Register result_reg);
-
- void SlowTruncateToI(Register result_reg, Register input_reg,
- int offset = HeapNumber::kValueOffset - kHeapObjectTag);
-
- void TruncateHeapNumberToI(Register result_reg, Register input_reg);
- void TruncateX87TOSToI(Register result_reg);
-
- void X87TOSToI(Register result_reg, MinusZeroMode minus_zero_mode,
- Label* lost_precision, Label* is_nan, Label* minus_zero,
- Label::Distance dst = Label::kFar);
-
- // Smi tagging support.
- void SmiTag(Register reg) {
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- add(reg, reg);
- }
- void SmiUntag(Register reg) {
- sar(reg, kSmiTagSize);
- }
-
- // Modifies the register even if it does not contain a Smi!
- void SmiUntag(Register reg, Label* is_smi) {
- STATIC_ASSERT(kSmiTagSize == 1);
- sar(reg, kSmiTagSize);
- STATIC_ASSERT(kSmiTag == 0);
- j(not_carry, is_smi);
- }
-
- void LoadUint32NoSSE2(Register src) {
- LoadUint32NoSSE2(Operand(src));
- }
- void LoadUint32NoSSE2(const Operand& src);
-
- // Jump the register contains a smi.
- inline void JumpIfSmi(Register value, Label* smi_label,
- Label::Distance distance = Label::kFar) {
- test(value, Immediate(kSmiTagMask));
- j(zero, smi_label, distance);
- }
- // Jump if the operand is a smi.
- inline void JumpIfSmi(Operand value, Label* smi_label,
- Label::Distance distance = Label::kFar) {
- test(value, Immediate(kSmiTagMask));
- j(zero, smi_label, distance);
- }
- // Jump if register contain a non-smi.
- inline void JumpIfNotSmi(Register value, Label* not_smi_label,
- Label::Distance distance = Label::kFar) {
- test(value, Immediate(kSmiTagMask));
- j(not_zero, not_smi_label, distance);
- }
- // Jump if the operand is not a smi.
- inline void JumpIfNotSmi(Operand value, Label* smi_label,
- Label::Distance distance = Label::kFar) {
- test(value, Immediate(kSmiTagMask));
- j(not_zero, smi_label, distance);
- }
- // Jump if the value cannot be represented by a smi.
- inline void JumpIfNotValidSmiValue(Register value, Register scratch,
- Label* on_invalid,
- Label::Distance distance = Label::kFar) {
- mov(scratch, value);
- add(scratch, Immediate(0x40000000U));
- j(sign, on_invalid, distance);
- }
-
- // Jump if the unsigned integer value cannot be represented by a smi.
- inline void JumpIfUIntNotValidSmiValue(
- Register value, Label* on_invalid,
- Label::Distance distance = Label::kFar) {
- cmp(value, Immediate(0x40000000U));
- j(above_equal, on_invalid, distance);
- }
-
- void LoadInstanceDescriptors(Register map, Register descriptors);
- void EnumLength(Register dst, Register map);
- void NumberOfOwnDescriptors(Register dst, Register map);
- void LoadAccessor(Register dst, Register holder, int accessor_index,
- AccessorComponent accessor);
-
- template<typename Field>
- void DecodeField(Register reg) {
- static const int shift = Field::kShift;
- static const int mask = Field::kMask >> Field::kShift;
- if (shift != 0) {
- sar(reg, shift);
- }
- and_(reg, Immediate(mask));
- }
-
- template<typename Field>
- void DecodeFieldToSmi(Register reg) {
- static const int shift = Field::kShift;
- static const int mask = (Field::kMask >> Field::kShift) << kSmiTagSize;
- STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0);
- STATIC_ASSERT(kSmiTag == 0);
- if (shift < kSmiTagSize) {
- shl(reg, kSmiTagSize - shift);
- } else if (shift > kSmiTagSize) {
- sar(reg, shift - kSmiTagSize);
- }
- and_(reg, Immediate(mask));
- }
-
- // Abort execution if argument is not a smi, enabled via --debug-code.
- void AssertSmi(Register object);
-
- // Abort execution if argument is a smi, enabled via --debug-code.
- void AssertNotSmi(Register object);
-
- // Abort execution if argument is not a JSFunction, enabled via --debug-code.
- void AssertFunction(Register object);
-
- // Abort execution if argument is not a JSBoundFunction,
- // enabled via --debug-code.
- void AssertBoundFunction(Register object);
-
- // Abort execution if argument is not a JSGeneratorObject,
- // enabled via --debug-code.
- void AssertGeneratorObject(Register object);
-
- // Abort execution if argument is not undefined or an AllocationSite, enabled
- // via --debug-code.
- void AssertUndefinedOrAllocationSite(Register object);
-
- // ---------------------------------------------------------------------------
- // Exception handling
-
- // Push a new stack handler and link it into stack handler chain.
- void PushStackHandler();
-
- // Unlink the stack handler on top of the stack from the stack handler chain.
- void PopStackHandler();
-
- // ---------------------------------------------------------------------------
- // Inline caching support
-
- void GetNumberHash(Register r0, Register scratch);
-
- // ---------------------------------------------------------------------------
- // Allocation support
-
- // Allocate an object in new space or old space. If the given space
- // is exhausted control continues at the gc_required label. The allocated
- // object is returned in result and end of the new object is returned in
- // result_end. The register scratch can be passed as no_reg in which case
- // an additional object reference will be added to the reloc info. The
- // returned pointers in result and result_end have not yet been tagged as
- // heap objects. If result_contains_top_on_entry is true the content of
- // result is known to be the allocation top on entry (could be result_end
- // from a previous call). If result_contains_top_on_entry is true scratch
- // should be no_reg as it is never used.
- void Allocate(int object_size, Register result, Register result_end,
- Register scratch, Label* gc_required, AllocationFlags flags);
-
- void Allocate(int header_size, ScaleFactor element_size,
- Register element_count, RegisterValueType element_count_type,
- Register result, Register result_end, Register scratch,
- Label* gc_required, AllocationFlags flags);
-
- void Allocate(Register object_size, Register result, Register result_end,
- Register scratch, Label* gc_required, AllocationFlags flags);
-
- // FastAllocate is right now only used for folded allocations. It just
- // increments the top pointer without checking against limit. This can only
- // be done if it was proved earlier that the allocation will succeed.
- void FastAllocate(int object_size, Register result, Register result_end,
- AllocationFlags flags);
- void FastAllocate(Register object_size, Register result, Register result_end,
- AllocationFlags flags);
-
- // Allocate a heap number in new space with undefined value. The
- // register scratch2 can be passed as no_reg; the others must be
- // valid registers. Returns tagged pointer in result register, or
- // jumps to gc_required if new space is full.
- void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
- Label* gc_required, MutableMode mode = IMMUTABLE);
-
- // Allocate and initialize a JSValue wrapper with the specified {constructor}
- // and {value}.
- void AllocateJSValue(Register result, Register constructor, Register value,
- Register scratch, Label* gc_required);
-
- // Initialize fields with filler values. Fields starting at |current_address|
- // not including |end_address| are overwritten with the value in |filler|. At
- // the end the loop, |current_address| takes the value of |end_address|.
- void InitializeFieldsWithFiller(Register current_address,
- Register end_address, Register filler);
-
- // ---------------------------------------------------------------------------
- // Support functions.
-
- // Check a boolean-bit of a Smi field.
- void BooleanBitTest(Register object, int field_offset, int bit_index);
-
- // Machine code version of Map::GetConstructor().
- // |temp| holds |result|'s map when done.
- void GetMapConstructor(Register result, Register map, Register temp);
-
- // ---------------------------------------------------------------------------
- // Runtime calls
-
- // Call a code stub. Generate the code if necessary.
- void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
-
- // Tail call a code stub (jump). Generate the code if necessary.
- void TailCallStub(CodeStub* stub);
-
- // Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
- void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
- const Runtime::Function* function = Runtime::FunctionForId(fid);
- CallRuntime(function, function->nargs, kSaveFPRegs);
- }
-
- // Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
- const Runtime::Function* function = Runtime::FunctionForId(fid);
- CallRuntime(function, function->nargs, save_doubles);
- }
-
- // Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
- CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
- }
-
- // Convenience function: call an external reference.
- void CallExternalReference(ExternalReference ref, int num_arguments);
-
- // Convenience function: tail call a runtime routine (jump).
- void TailCallRuntime(Runtime::FunctionId fid);
-
- // Before calling a C-function from generated code, align arguments on stack.
- // After aligning the frame, arguments must be stored in esp[0], esp[4],
- // etc., not pushed. The argument count assumes all arguments are word sized.
- // Some compilers/platforms require the stack to be aligned when calling
- // C++ code.
- // Needs a scratch register to do some arithmetic. This register will be
- // trashed.
- void PrepareCallCFunction(int num_arguments, Register scratch);
-
- // Calls a C function and cleans up the space for arguments allocated
- // by PrepareCallCFunction. The called function is not allowed to trigger a
- // garbage collection, since that might move the code and invalidate the
- // return address (unless this is somehow accounted for by the called
- // function).
- void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, int num_arguments);
-
- // Jump to a runtime routine.
- void JumpToExternalReference(const ExternalReference& ext,
- bool builtin_exit_frame = false);
-
- // ---------------------------------------------------------------------------
- // Utilities
-
- void Ret();
-
- // Return and drop arguments from stack, where the number of arguments
- // may be bigger than 2^16 - 1. Requires a scratch register.
- void Ret(int bytes_dropped, Register scratch);
-
- // Emit code that loads |parameter_index|'th parameter from the stack to
- // the register according to the CallInterfaceDescriptor definition.
- // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
- // below the caller's sp (on x87 it's at least return address).
- template <class Descriptor>
- void LoadParameterFromStack(
- Register reg, typename Descriptor::ParameterIndices parameter_index,
- int sp_to_ra_offset_in_words = 1) {
- DCHECK(Descriptor::kPassLastArgsOnStack);
- DCHECK_LT(parameter_index, Descriptor::kParameterCount);
- DCHECK_LE(Descriptor::kParameterCount - Descriptor::kStackArgumentsCount,
- parameter_index);
- int offset = (Descriptor::kParameterCount - parameter_index - 1 +
- sp_to_ra_offset_in_words) *
- kPointerSize;
- mov(reg, Operand(esp, offset));
- }
-
- // Emit code to discard a non-negative number of pointer-sized elements
- // from the stack, clobbering only the esp register.
- void Drop(int element_count);
-
- void Call(Label* target) { call(target); }
- void Call(Handle<Code> target, RelocInfo::Mode rmode,
- TypeFeedbackId id = TypeFeedbackId::None()) {
- call(target, rmode, id);
- }
- void Jump(Handle<Code> target, RelocInfo::Mode rmode) { jmp(target, rmode); }
- void Push(Register src) { push(src); }
- void Push(const Operand& src) { push(src); }
- void Push(Immediate value) { push(value); }
- void Pop(Register dst) { pop(dst); }
- void Pop(const Operand& dst) { pop(dst); }
- void PushReturnAddressFrom(Register src) { push(src); }
- void PopReturnAddressTo(Register dst) { pop(dst); }
-
- void Lzcnt(Register dst, Register src) { Lzcnt(dst, Operand(src)); }
- void Lzcnt(Register dst, const Operand& src);
-
- void Tzcnt(Register dst, Register src) { Tzcnt(dst, Operand(src)); }
- void Tzcnt(Register dst, const Operand& src);
-
- void Popcnt(Register dst, Register src) { Popcnt(dst, Operand(src)); }
- void Popcnt(Register dst, const Operand& src);
-
- // Move if the registers are not identical.
- void Move(Register target, Register source);
-
- // Move a constant into a destination using the most efficient encoding.
- void Move(Register dst, const Immediate& x);
- void Move(const Operand& dst, const Immediate& x);
-
- void Move(Register dst, Handle<Object> handle) { LoadObject(dst, handle); }
- void Move(Register dst, Smi* source) { Move(dst, Immediate(source)); }
-
- // Push a handle value.
- void Push(Handle<Object> handle) { push(Immediate(handle)); }
- void Push(Smi* smi) { Push(Immediate(smi)); }
-
- Handle<Object> CodeObject() {
- DCHECK(!code_object_.is_null());
- return code_object_;
- }
-
- // Insert code to verify that the x87 stack has the specified depth (0-7)
- void VerifyX87StackDepth(uint32_t depth);
-
- // Emit code for a truncating division by a constant. The dividend register is
- // unchanged, the result is in edx, and eax gets clobbered.
- void TruncatingDiv(Register dividend, int32_t divisor);
-
- // ---------------------------------------------------------------------------
- // StatsCounter support
-
- void SetCounter(StatsCounter* counter, int value);
- void IncrementCounter(StatsCounter* counter, int value);
- void DecrementCounter(StatsCounter* counter, int value);
- void IncrementCounter(Condition cc, StatsCounter* counter, int value);
- void DecrementCounter(Condition cc, StatsCounter* counter, int value);
-
- // ---------------------------------------------------------------------------
- // Debugging
-
- // Calls Abort(msg) if the condition cc is not satisfied.
- // Use --debug_code to enable.
- void Assert(Condition cc, BailoutReason reason);
-
- // Like Assert(), but always enabled.
- void Check(Condition cc, BailoutReason reason);
-
- // Print a message to stdout and abort execution.
- void Abort(BailoutReason reason);
-
- // Check that the stack is aligned.
- void CheckStackAlignment();
-
- // Verify restrictions about code generated in stubs.
- void set_generating_stub(bool value) { generating_stub_ = value; }
- bool generating_stub() { return generating_stub_; }
- void set_has_frame(bool value) { has_frame_ = value; }
- bool has_frame() { return has_frame_; }
- inline bool AllowThisStubCall(CodeStub* stub);
-
- // ---------------------------------------------------------------------------
- // String utilities.
-
- // Checks if both objects are sequential one-byte strings, and jumps to label
- // if either is not.
- void JumpIfNotBothSequentialOneByteStrings(
- Register object1, Register object2, Register scratch1, Register scratch2,
- Label* on_not_flat_one_byte_strings);
-
- // Checks if the given register or operand is a unique name
- void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name,
- Label::Distance distance = Label::kFar) {
- JumpIfNotUniqueNameInstanceType(Operand(reg), not_unique_name, distance);
- }
-
- void JumpIfNotUniqueNameInstanceType(Operand operand, Label* not_unique_name,
- Label::Distance distance = Label::kFar);
-
- void EmitSeqStringSetCharCheck(Register string, Register index,
- Register value, uint32_t encoding_mask);
-
- static int SafepointRegisterStackIndex(Register reg) {
- return SafepointRegisterStackIndex(reg.code());
- }
-
- // Load the type feedback vector from a JavaScript frame.
- void EmitLoadFeedbackVector(Register vector);
-
- // Activation support.
- void EnterFrame(StackFrame::Type type);
- void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
- void LeaveFrame(StackFrame::Type type);
-
- void EnterBuiltinFrame(Register context, Register target, Register argc);
- void LeaveBuiltinFrame(Register context, Register target, Register argc);
-
- // Expects object in eax and returns map with validated enum cache
- // in eax. Assumes that any other register can be used as a scratch.
- void CheckEnumCache(Label* call_runtime);
-
- // AllocationMemento support. Arrays may have an associated
- // AllocationMemento object that can be checked for in order to pretransition
- // to another type.
- // On entry, receiver_reg should point to the array object.
- // scratch_reg gets clobbered.
- // If allocation info is present, conditional code is set to equal.
- void TestJSArrayForAllocationMemento(Register receiver_reg,
- Register scratch_reg,
- Label* no_memento_found);
-
- private:
- bool generating_stub_;
- bool has_frame_;
- Isolate* isolate_;
- // This handle will be patched with the code object on installation.
- Handle<Object> code_object_;
-
- // Helper functions for generating invokes.
- void InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual, Label* done,
- bool* definitely_mismatches, InvokeFlag flag,
- Label::Distance done_distance,
- const CallWrapper& call_wrapper);
-
- void EnterExitFramePrologue(StackFrame::Type frame_type);
- void EnterExitFrameEpilogue(int argc, bool save_doubles);
-
- void LeaveExitFrameEpilogue(bool restore_context);
-
- // Allocation support helpers.
- void LoadAllocationTopHelper(Register result, Register scratch,
- AllocationFlags flags);
-
- void UpdateAllocationTopHelper(Register result_end, Register scratch,
- AllocationFlags flags);
-
- // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
- void InNewSpace(Register object, Register scratch, Condition cc,
- Label* condition_met,
- Label::Distance condition_met_distance = Label::kFar);
-
- // Helper for finding the mark bits for an address. Afterwards, the
- // bitmap register points at the word with the mark bits and the mask
- // the position of the first bit. Uses ecx as scratch and leaves addr_reg
- // unchanged.
- inline void GetMarkBits(Register addr_reg, Register bitmap_reg,
- Register mask_reg);
-
- // Compute memory operands for safepoint stack slots.
- Operand SafepointRegisterSlot(Register reg);
- static int SafepointRegisterStackIndex(int reg_code);
-
- // Needs access to SafepointRegisterStackIndex for compiled frame
- // traversal.
- friend class StandardFrame;
-};
-
-// The code patcher is used to patch (typically) small parts of code e.g. for
-// debugging and other types of instrumentation. When using the code patcher
-// the exact number of bytes specified must be emitted. Is not legal to emit
-// relocation information. If any of these constraints are violated it causes
-// an assertion.
-class CodePatcher {
- public:
- CodePatcher(Isolate* isolate, byte* address, int size);
- ~CodePatcher();
-
- // Macro assembler to emit code.
- MacroAssembler* masm() { return &masm_; }
-
- private:
- byte* address_; // The address of the code being patched.
- int size_; // Number of bytes of the expected patch size.
- MacroAssembler masm_; // Macro assembler used to generate the code.
-};
-
-// -----------------------------------------------------------------------------
-// Static helper functions.
-
-// Generate an Operand for loading a field from an object.
-inline Operand FieldOperand(Register object, int offset) {
- return Operand(object, offset - kHeapObjectTag);
-}
-
-// Generate an Operand for loading an indexed field from an object.
-inline Operand FieldOperand(Register object, Register index, ScaleFactor scale,
- int offset) {
- return Operand(object, index, scale, offset - kHeapObjectTag);
-}
-
-inline Operand FixedArrayElementOperand(Register array, Register index_as_smi,
- int additional_offset = 0) {
- int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
- return FieldOperand(array, index_as_smi, times_half_pointer_size, offset);
-}
-
-inline Operand ContextOperand(Register context, int index) {
- return Operand(context, Context::SlotOffset(index));
-}
-
-inline Operand ContextOperand(Register context, Register index) {
- return Operand(context, index, times_pointer_size, Context::SlotOffset(0));
-}
-
-inline Operand NativeContextOperand() {
- return ContextOperand(esi, Context::NATIVE_CONTEXT_INDEX);
-}
-
-#define ACCESS_MASM(masm) masm->
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_X87_MACRO_ASSEMBLER_X87_H_
diff --git a/deps/v8/src/x87/simulator-x87.cc b/deps/v8/src/x87/simulator-x87.cc
deleted file mode 100644
index cb5652b581..0000000000
--- a/deps/v8/src/x87/simulator-x87.cc
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/x87/simulator-x87.h"
-
-// Since there is no simulator for the ia32 architecture this file is empty.
diff --git a/deps/v8/src/x87/simulator-x87.h b/deps/v8/src/x87/simulator-x87.h
deleted file mode 100644
index 667f0fd6d7..0000000000
--- a/deps/v8/src/x87/simulator-x87.h
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_X87_SIMULATOR_X87_H_
-#define V8_X87_SIMULATOR_X87_H_
-
-#include "src/allocation.h"
-
-namespace v8 {
-namespace internal {
-
-// Since there is no simulator for the ia32 architecture the only thing we can
-// do is to call the entry directly.
-#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
- (entry(p0, p1, p2, p3, p4))
-
-
-typedef int (*regexp_matcher)(String*, int, const byte*,
- const byte*, int*, int, Address, int, Isolate*);
-
-// Call the generated regexp code directly. The code at the entry address should
-// expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
- p7, p8) \
- (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
-
-
-// The stack limit beyond which we will throw stack overflow errors in
-// generated code. Because generated code on ia32 uses the C stack, we
-// just use the C stack limit.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
- uintptr_t c_limit) {
- USE(isolate);
- return c_limit;
- }
-
- static inline uintptr_t RegisterCTryCatch(Isolate* isolate,
- uintptr_t try_catch_address) {
- USE(isolate);
- return try_catch_address;
- }
-
- static inline void UnregisterCTryCatch(Isolate* isolate) { USE(isolate); }
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_X87_SIMULATOR_X87_H_
diff --git a/deps/v8/src/zone/accounting-allocator.cc b/deps/v8/src/zone/accounting-allocator.cc
index c06306309d..08381b31f1 100644
--- a/deps/v8/src/zone/accounting-allocator.cc
+++ b/deps/v8/src/zone/accounting-allocator.cc
@@ -85,10 +85,10 @@ Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
void* memory = malloc(bytes);
if (memory) {
base::AtomicWord current =
- base::NoBarrier_AtomicIncrement(&current_memory_usage_, bytes);
- base::AtomicWord max = base::NoBarrier_Load(&max_memory_usage_);
+ base::Relaxed_AtomicIncrement(&current_memory_usage_, bytes);
+ base::AtomicWord max = base::Relaxed_Load(&max_memory_usage_);
while (current > max) {
- max = base::NoBarrier_CompareAndSwap(&max_memory_usage_, max, current);
+ max = base::Relaxed_CompareAndSwap(&max_memory_usage_, max, current);
}
}
return reinterpret_cast<Segment*>(memory);
@@ -105,22 +105,22 @@ void AccountingAllocator::ReturnSegment(Segment* segment) {
}
void AccountingAllocator::FreeSegment(Segment* memory) {
- base::NoBarrier_AtomicIncrement(
- &current_memory_usage_, -static_cast<base::AtomicWord>(memory->size()));
+ base::Relaxed_AtomicIncrement(&current_memory_usage_,
+ -static_cast<base::AtomicWord>(memory->size()));
memory->ZapHeader();
free(memory);
}
size_t AccountingAllocator::GetCurrentMemoryUsage() const {
- return base::NoBarrier_Load(&current_memory_usage_);
+ return base::Relaxed_Load(&current_memory_usage_);
}
size_t AccountingAllocator::GetMaxMemoryUsage() const {
- return base::NoBarrier_Load(&max_memory_usage_);
+ return base::Relaxed_Load(&max_memory_usage_);
}
size_t AccountingAllocator::GetCurrentPoolSize() const {
- return base::NoBarrier_Load(&current_pool_size_);
+ return base::Relaxed_Load(&current_pool_size_);
}
Segment* AccountingAllocator::GetSegmentFromPool(size_t requested_size) {
@@ -145,7 +145,7 @@ Segment* AccountingAllocator::GetSegmentFromPool(size_t requested_size) {
segment->set_next(nullptr);
unused_segments_sizes_[power]--;
- base::NoBarrier_AtomicIncrement(
+ base::Relaxed_AtomicIncrement(
&current_pool_size_, -static_cast<base::AtomicWord>(segment->size()));
}
}
@@ -179,7 +179,7 @@ bool AccountingAllocator::AddSegmentToPool(Segment* segment) {
segment->set_next(unused_segments_heads_[power]);
unused_segments_heads_[power] = segment;
- base::NoBarrier_AtomicIncrement(&current_pool_size_, size);
+ base::Relaxed_AtomicIncrement(&current_pool_size_, size);
unused_segments_sizes_[power]++;
}
diff --git a/deps/v8/src/zone/accounting-allocator.h b/deps/v8/src/zone/accounting-allocator.h
index 65128c6f70..53d30b3826 100644
--- a/deps/v8/src/zone/accounting-allocator.h
+++ b/deps/v8/src/zone/accounting-allocator.h
@@ -20,10 +20,7 @@ namespace internal {
class V8_EXPORT_PRIVATE AccountingAllocator {
public:
- static const size_t kMaxPoolSizeLowMemoryDevice = 8ul * KB;
- static const size_t kMaxPoolSizeMediumMemoryDevice = 8ul * KB;
- static const size_t kMaxPoolSizeHighMemoryDevice = 8ul * KB;
- static const size_t kMaxPoolSizeHugeMemoryDevice = 8ul * KB;
+ static const size_t kMaxPoolSize = 8ul * KB;
AccountingAllocator();
virtual ~AccountingAllocator();
diff --git a/deps/v8/src/zone/zone-containers.h b/deps/v8/src/zone/zone-containers.h
index 1f83ae24a3..f399899464 100644
--- a/deps/v8/src/zone/zone-containers.h
+++ b/deps/v8/src/zone/zone-containers.h
@@ -6,6 +6,7 @@
#define V8_SRC_ZONE_ZONE_CONTAINERS_H_
#include <deque>
+#include <forward_list>
#include <list>
#include <map>
#include <queue>
@@ -44,7 +45,7 @@ class ZoneVector : public std::vector<T, ZoneAllocator<T>> {
: std::vector<T, ZoneAllocator<T>>(first, last, ZoneAllocator<T>(zone)) {}
};
-// A wrapper subclass std::deque to make it easy to construct one
+// A wrapper subclass for std::deque to make it easy to construct one
// that uses a zone allocator.
template <typename T>
class ZoneDeque : public std::deque<T, RecyclingZoneAllocator<T>> {
@@ -55,7 +56,7 @@ class ZoneDeque : public std::deque<T, RecyclingZoneAllocator<T>> {
RecyclingZoneAllocator<T>(zone)) {}
};
-// A wrapper subclass std::list to make it easy to construct one
+// A wrapper subclass for std::list to make it easy to construct one
// that uses a zone allocator.
// TODO(mstarzinger): This should be renamed to ZoneList once we got rid of our
// own home-grown ZoneList that actually is a ZoneVector.
@@ -67,7 +68,17 @@ class ZoneLinkedList : public std::list<T, ZoneAllocator<T>> {
: std::list<T, ZoneAllocator<T>>(ZoneAllocator<T>(zone)) {}
};
-// A wrapper subclass std::priority_queue to make it easy to construct one
+// A wrapper subclass for std::forward_list to make it easy to construct one
+// that uses a zone allocator.
+template <typename T>
+class ZoneForwardList : public std::forward_list<T, ZoneAllocator<T>> {
+ public:
+ // Constructs an empty list.
+ explicit ZoneForwardList(Zone* zone)
+ : std::forward_list<T, ZoneAllocator<T>>(ZoneAllocator<T>(zone)) {}
+};
+
+// A wrapper subclass for std::priority_queue to make it easy to construct one
// that uses a zone allocator.
template <typename T, typename Compare = std::less<T>>
class ZonePriorityQueue
diff --git a/deps/v8/src/zone/zone-handle-set.h b/deps/v8/src/zone/zone-handle-set.h
index 641c740abb..b3c3688461 100644
--- a/deps/v8/src/zone/zone-handle-set.h
+++ b/deps/v8/src/zone/zone-handle-set.h
@@ -134,6 +134,10 @@ class ZoneHandleSet final {
return static_cast<size_t>(set.data_);
}
+ class const_iterator;
+ inline const_iterator begin() const;
+ inline const_iterator end() const;
+
private:
typedef ZoneList<T**> List;
@@ -159,6 +163,50 @@ class ZoneHandleSet final {
intptr_t data_;
};
+template <typename T>
+class ZoneHandleSet<T>::const_iterator {
+ public:
+ typedef std::forward_iterator_tag iterator_category;
+ typedef std::ptrdiff_t difference_type;
+ typedef Handle<T> value_type;
+
+ const_iterator(const const_iterator& other)
+ : set_(other.set_), current_(other.current_) {}
+
+ Handle<T> operator*() const { return (*set_)[current_]; }
+ bool operator==(const const_iterator& other) const {
+ return set_ == other.set_ && current_ == other.current_;
+ }
+ bool operator!=(const const_iterator& other) const {
+ return !(*this == other);
+ }
+ const_iterator& operator++() {
+ DCHECK(current_ < set_->size());
+ current_ += 1;
+ return *this;
+ }
+ const_iterator operator++(int);
+
+ private:
+ friend class ZoneHandleSet<T>;
+
+ explicit const_iterator(const ZoneHandleSet<T>* set, size_t current)
+ : set_(set), current_(current) {}
+
+ const ZoneHandleSet<T>* set_;
+ size_t current_;
+};
+
+template <typename T>
+typename ZoneHandleSet<T>::const_iterator ZoneHandleSet<T>::begin() const {
+ return ZoneHandleSet<T>::const_iterator(this, 0);
+}
+
+template <typename T>
+typename ZoneHandleSet<T>::const_iterator ZoneHandleSet<T>::end() const {
+ return ZoneHandleSet<T>::const_iterator(this, size());
+}
+
} // namespace internal
} // namespace v8